commit aebc1b03fdc1a5e56f7e7bf34784f8b1d83174e2 Author: Ansible Core Team Date: Mon Mar 9 09:11:07 2020 +0000 Initial commit diff --git a/.github/workflows/collection-continuous-integration.yml b/.github/workflows/collection-continuous-integration.yml new file mode 100644 index 0000000000..dfc285be4f --- /dev/null +++ b/.github/workflows/collection-continuous-integration.yml @@ -0,0 +1,308 @@ +name: Collection test suite + +on: + push: + pull_request: + schedule: + - cron: 3 0 * * * # Run daily at 0:03 UTC + +jobs: + build-collection-artifact: + name: Build collection + runs-on: ${{ matrix.runner-os }} + strategy: + matrix: + runner-os: + - ubuntu-latest + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + runner-python-version: + - 3.8 + steps: + - name: Check out ${{ github.repository }} on disk + uses: actions/checkout@master + - name: Set up Python ${{ matrix.runner-python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.runner-python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('tests/sanity/requirements.txt') }}-${{ hashFiles('tests/unit/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Build a collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + build + --output-path + "${GITHUB_WORKSPACE}/.cache/collection-tarballs" + - name: Store migrated collection artifacts + uses: actions/upload-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + + sanity-test-collection-via-vms: + name: Sanity in VM ${{ matrix.os.vm || 'ubuntu-latest' }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.os.vm || 'ubuntu-latest' }} + strategy: + fail-fast: false + matrix: + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + os: + - vm: ubuntu-latest + - vm: ubuntu-16.04 + - vm: macos-latest + python-version: + - 3.8 + - 3.7 + - 3.6 + - 3.5 + - 2.7 + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-sanity-VMs + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection sanity tests + run: >- + ~/.local/bin/ansible-test + sanity + --color + --requirements + --venv + --python + "${{ matrix.python-version }}" + -vvv + working-directory: >- + /${{ runner.os == 'Linux' && 'home' || 'Users' }}/runner/.ansible/collections/ansible_collections/community/general + + sanity-test-collection-via-containers: + name: Sanity in container via Python ${{ matrix.python-version }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.runner-os }} + strategy: + fail-fast: false + matrix: + runner-os: + - ubuntu-latest + runner-python-version: + - 3.8 + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + python-version: + - 3.8 + - 2.7 + - 3.7 + - 3.6 + - 3.5 + - 2.6 + steps: + - name: Set up Python ${{ matrix.runner-python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.runner-python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-sanity-containers + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection sanity tests + run: >- + ~/.local/bin/ansible-test + sanity + --color + --requirements + --docker + --python + "${{ matrix.python-version }}" + -vvv + working-directory: >- + /home/runner/.ansible/collections/ansible_collections/community/general + + unit-test-collection-via-vms: + name: Units in VM ${{ matrix.os.vm || 'ubuntu-latest' }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.os.vm || 'ubuntu-latest' }} + strategy: + fail-fast: false + matrix: + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + os: + - vm: ubuntu-latest + - vm: ubuntu-16.04 + - vm: macos-latest + python-version: + - 3.8 + - 3.7 + - 3.6 + - 3.5 + - 2.7 + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-units-VMs + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection unit tests + run: | + [[ ! -d 'tests/unit' ]] && echo This collection does not have unit tests. Skipping... || \ + ~/.local/bin/ansible-test units --color --coverage --requirements --venv --python "${{ matrix.python-version }}" -vvv + working-directory: >- + /${{ runner.os == 'Linux' && 'home' || 'Users' }}/runner/.ansible/collections/ansible_collections/community/general + + unit-test-collection-via-containers: + name: Units in container ${{ matrix.container-image }} + needs: + - build-collection-artifact + runs-on: ${{ matrix.runner-os }} + strategy: + fail-fast: false + matrix: + runner-os: + - ubuntu-latest + runner-python-version: + - 3.8 + ansible-version: + - git+https://github.com/ansible/ansible.git@devel + container-image: + - fedora31 + - ubuntu1804 + - centos8 + - opensuse15 + - fedora30 + - centos7 + - opensuse15py2 + - ubuntu1604 + - centos6 + steps: + - name: Set up Python ${{ matrix.runner-python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.runner-python-version }} + - name: Set up pip cache + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ github.ref }}-units-containers + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install Ansible ${{ matrix.ansible-version }} + run: >- + python -m + pip + install + --user + ${{ matrix.ansible-version }} + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: >- + collection + path: .cache/collection-tarballs + - name: Install the collection tarball + run: >- + ~/.local/bin/ansible-galaxy + collection + install + .cache/collection-tarballs/*.tar.gz + - name: Run collection unit tests + run: | + [[ ! -d 'tests/unit' ]] && echo This collection does not have unit tests. Skipping... || \ + ~/.local/bin/ansible-test units --color --coverage --requirements --docker "${{ matrix.container-image }}" -vvv + working-directory: >- + /home/runner/.ansible/collections/ansible_collections/community/general \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..c6fc14ad0b --- /dev/null +++ b/.gitignore @@ -0,0 +1,387 @@ + +# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv + +### dotenv ### +.env + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# + +### Linux ### + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### pydev ### +.pydevproject + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### WebStorm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### WebStorm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/**/sonarlint/ + +# SonarQube Plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator/ + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/COPYING b/COPYING new file mode 100644 index 0000000000..10926e87f1 --- /dev/null +++ b/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/README.md b/README.md new file mode 100644 index 0000000000..cea6e6c6ca --- /dev/null +++ b/README.md @@ -0,0 +1,4 @@ +[![GitHub Actions CI/CD build status — Collection test suite](https://github.com/ansible-collection-migration/community.general/workflows/Collection%20test%20suite/badge.svg?branch=master)](https://github.com/ansible-collection-migration/community.general/actions?query=workflow%3A%22Collection%20test%20suite%22) + +Ansible Collection: community.general +================================================= \ No newline at end of file diff --git a/galaxy.yml b/galaxy.yml new file mode 100644 index 0000000000..a707a25e66 --- /dev/null +++ b/galaxy.yml @@ -0,0 +1,28 @@ +namespace: community +name: general +version: 0.1.0 +readme: README.md +authors: null +description: null +license: GPL-3.0-or-later +license_file: COPYING +tags: null +dependencies: + netapp.ontap: '>=0.1.0' + community.kubernetes: '>=0.1.0' + ovirt.ovirt: '>=0.1.0' + ansible.netcommon: '>=0.1.0' + cisco.mso: '>=0.1.0' + ansible.posix: '>=0.1.0' + cisco.aci: '>=0.1.0' + cisco.intersight: '>=0.1.0' + check_point.mgmt: '>=0.1.0' + fortinet.fortios: '>=0.1.0' + openstack.cloud: '>=0.1.0' + google.cloud: '>=0.1.0' + f5networks.f5_modules: '>=0.1.0' +repository: git@github.com:ansible-collection-migration/community.general.git +documentation: https://github.com/ansible-collection-migration/community.general/tree/master/docs +homepage: https://github.com/ansible-collection-migration/community.general +issues: https://github.com/ansible-collection-migration/community.general/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc +type: flatmap diff --git a/meta/action_groups.yml b/meta/action_groups.yml new file mode 100644 index 0000000000..009faecf60 --- /dev/null +++ b/meta/action_groups.yml @@ -0,0 +1,66 @@ +docker: +- docker_swarm +- docker_image_facts +- docker_service +- docker_compose +- docker_config +- docker_container +- docker_container_info +- docker_host_info +- docker_image +- docker_image_info +- docker_login +- docker_network +- docker_network_info +- docker_node +- docker_node_info +- docker_prune +- docker_secret +- docker_swarm +- docker_swarm_info +- docker_swarm_service +- docker_swarm_service_info +- docker_volume +- docker_volume_info +k8s: +- kubevirt_cdi_upload +- kubevirt_preset +- kubevirt_pvc +- kubevirt_rs +- kubevirt_template +- kubevirt_vm +os: +- os_flavor_facts +- os_image_facts +- os_keystone_domain_facts +- os_networks_facts +- os_port_facts +- os_project_facts +- os_server_facts +- os_subnets_facts +- os_user_facts +ovirt: +- ovirt_affinity_label_facts +- ovirt_api_facts +- ovirt_cluster_facts +- ovirt_datacenter_facts +- ovirt_disk_facts +- ovirt_event_facts +- ovirt_external_provider_facts +- ovirt_group_facts +- ovirt_host_facts +- ovirt_host_storage_facts +- ovirt_network_facts +- ovirt_nic_facts +- ovirt_permission_facts +- ovirt_quota_facts +- ovirt_scheduling_policy_facts +- ovirt_snapshot_facts +- ovirt_storage_domain_facts +- ovirt_storage_template_facts +- ovirt_storage_vm_facts +- ovirt_tag_facts +- ovirt_template_facts +- ovirt_user_facts +- ovirt_vm_facts +- ovirt_vmpool_facts diff --git a/meta/routing.yml b/meta/routing.yml new file mode 100644 index 0000000000..29dcf22093 --- /dev/null +++ b/meta/routing.yml @@ -0,0 +1,962 @@ +plugin_routing: + modules: + ali_instance_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + cs_instance_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + cs_zone_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_account_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_certificate_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_domain_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_firewall_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_floating_ip_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_image_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_load_balancer_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_region_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_size_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_snapshot_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_sshkey_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_tag_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + digital_ocean_volume_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + docker_image_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + docker_service: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcdns_record: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcdns_zone: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gce: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_backend_service: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_bigquery_dataset_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_bigquery_table_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_cloudbuild_trigger_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_address_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_backend_bucket_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_backend_service_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_disk_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_firewall_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_forwarding_rule_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_global_address_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_global_forwarding_rule_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_health_check_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_http_health_check_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_https_health_check_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_image_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_instance_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_instance_group_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_instance_group_manager_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_instance_template_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_interconnect_attachment_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_network_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_region_disk_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_route_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_router_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_ssl_certificate_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_ssl_policy_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_subnetwork_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_target_http_proxy_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_target_https_proxy_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_target_pool_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_target_ssl_proxy_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_target_tcp_proxy_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_target_vpn_gateway_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_url_map_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_compute_vpn_tunnel_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_container_cluster_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_container_node_pool_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_dns_managed_zone_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_dns_resource_record_set_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_forwarding_rule: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_healthcheck: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_iam_role_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_iam_service_account_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_pubsub_subscription_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_pubsub_topic_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_redis_instance_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_resourcemanager_project_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_sourcerepo_repository_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_spanner_database_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_spanner_instance_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_sql_database_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_sql_instance_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_sql_user_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_target_proxy: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_tpu_node_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcp_url_map: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcpubsub_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gcspanner: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + memset_memstore_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + memset_server_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + online_server_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + online_user_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + one_image_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_flavor_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_image_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_keystone_domain_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_networks_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_port_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_project_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_server_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_subnets_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + os_user_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_affinity_label_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_api_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_cluster_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_datacenter_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_disk_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_event_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_external_provider_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_group_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_host_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_host_storage_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_network_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_nic_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_permission_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_quota_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_scheduling_policy_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_snapshot_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_storage_domain_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_storage_template_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_storage_vm_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_tag_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_template_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_user_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_vm_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ovirt_vmpool_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + scaleway_image_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + scaleway_ip_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + scaleway_organization_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + scaleway_security_group_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + scaleway_server_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + scaleway_snapshot_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + scaleway_volume_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + smartos_image_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_account_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_dns_domain: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_dns_record: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_firewall_group: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_firewall_rule: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_server: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_ssh_key: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_startup_script: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vr_user: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_account_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_block_storage_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_dns_domain_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_firewall_group_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_network_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_os_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_plan_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_region_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_server_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_ssh_key_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_startup_script_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vultr_user_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + xenserver_guest_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + vertica_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + onepassword_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + zabbix_group_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + zabbix_host_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + ldap_attr: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + aci_intf_policy_fc: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + aci_intf_policy_l2: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + aci_intf_policy_lldp: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + aci_intf_policy_mcp: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + aci_intf_policy_port_channel: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + aci_intf_policy_port_security: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_asm_policy: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_device_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_gtm_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_iapplx_package: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_security_address_list: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_security_port_list: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigip_traffic_group: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + bigiq_device_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_cluster: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_ospf: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_ospfarea: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_show: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_trunk: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_vlag: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_vlan: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_vrouter: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_vrouterbgp: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_vrouterif: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + pn_vrouterlbif: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_admin: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_admpwd: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_cert_gen_ssh: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_check: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_commit: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_dag: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_dag_tags: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_import: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_interface: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_lic: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_loadcfg: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_match_rule: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_mgtconfig: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_nat_rule: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_object: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_op: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_pg: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_query_rules: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_restart: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_sag: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_security_rule: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + panos_set: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + osx_say: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + foreman: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + katello: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + hpilo_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_datacenter_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_enclosure_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_ethernet_network_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_fc_network_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_fcoe_network_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_logical_interconnect_group_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_network_set_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + oneview_san_manager_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + idrac_redfish_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + redfish_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + github_hooks: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + github_webhook_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gitlab_hooks: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + gluster_heal_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_aggregate: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_license: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_lun: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_qtree: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_svm: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_user: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_user_role: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_cdot_volume: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + na_ontap_gather_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + sf_account_manager: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + sf_check_connections: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + sf_snapshot_schedule_manager: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + sf_volume_access_group_manager: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + sf_volume_manager: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + purefa_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + purefb_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + python_requirements_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + jenkins_job_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details + nginx_status_facts: + deprecation: + removal_date: TBD + warning_text: see plugin documentation for details diff --git a/plugins/action/__init__.py b/plugins/action/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/action/aireos.py b/plugins/action/aireos.py new file mode 100644 index 0000000000..84daba2724 --- /dev/null +++ b/plugins/action/aireos.py @@ -0,0 +1,79 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.community.general.plugins.module_utils.network.aireos.aireos import aireos_provider_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'aireos_config' else False + + if self._play_context.connection != 'local': + return dict( + failed=True, + msg='invalid connection specified, expected connection=local, ' + 'got %s' % self._play_context.connection + ) + + provider = load_provider(aireos_provider_spec, self._task.args) + + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'aireos' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + + display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + + if self._play_context.become_method == 'enable': + self._play_context.become = False + self._play_context.become_method = None + + result = super(ActionModule, self).run(task_vars=task_vars) + + return result diff --git a/plugins/action/aruba.py b/plugins/action/aruba.py new file mode 100644 index 0000000000..7ea03c8726 --- /dev/null +++ b/plugins/action/aruba.py @@ -0,0 +1,79 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.community.general.plugins.module_utils.network.aruba.aruba import aruba_provider_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'aruba_config' else False + + if self._play_context.connection != 'local': + return dict( + failed=True, + msg='invalid connection specified, expected connection=local, ' + 'got %s' % self._play_context.connection + ) + + provider = load_provider(aruba_provider_spec, self._task.args) + + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'aruba' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + + display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + + if self._play_context.become_method == 'enable': + self._play_context.become = False + self._play_context.become_method = None + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/plugins/action/ce.py b/plugins/action/ce.py new file mode 100644 index 0000000000..31cf3b4e6d --- /dev/null +++ b/plugins/action/ce.py @@ -0,0 +1,89 @@ +# +# Copyright: (c) 2016, Red Hat Inc. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_provider_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible.utils.display import Display + +display = Display() + +CLI_SUPPORTED_MODULES = ['ce_rollback', 'ce_mlag_interface', 'ce_startup', 'ce_config', + 'ce_command', 'ce_facts', 'ce_evpn_global', 'ce_evpn_bgp_rr', + 'ce_mtu', 'ce_evpn_bgp', 'ce_snmp_location', 'ce_snmp_contact', + 'ce_snmp_traps', 'ce_netstream_global', 'ce_netstream_aging', + 'ce_netstream_export', 'ce_netstream_template', 'ce_ntp_auth', + 'ce_stp', 'ce_vxlan_global', 'ce_vxlan_arp', 'ce_vxlan_gateway', + 'ce_acl_interface'] + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'ce_config' else False + socket_path = None + persistent_connection = self._play_context.connection.split('.')[-1] + + if self._play_context.connection == 'local': + provider = load_provider(ce_provider_spec, self._task.args) + transport = provider['transport'] or 'cli' + + display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr) + + if transport == 'cli': + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'ce' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + self._task.args['provider'] = provider.update( + host=pc.remote_addr, + port=pc.port, + username=pc.remote_user, + password=pc.password + ) + if module_name in ['ce_netconf'] or module_name not in CLI_SUPPORTED_MODULES: + pc.connection = 'netconf' + display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + # make sure a transport value is set in args + self._task.args['transport'] = transport + self._task.args['provider'] = provider + elif persistent_connection in ('netconf', 'network_cli'): + provider = self._task.args.get('provider', {}) + if any(provider.values()): + display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection) + del self._task.args['provider'] + + if (persistent_connection == 'network_cli' and module_name not in CLI_SUPPORTED_MODULES) or \ + (persistent_connection == 'netconf' and module_name in CLI_SUPPORTED_MODULES): + return {'failed': True, 'msg': "Connection type '%s' is not valid for '%s' module." + % (self._play_context.connection, self._task.action)} + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/plugins/action/ce_template.py b/plugins/action/ce_template.py new file mode 100644 index 0000000000..aaacf42b6b --- /dev/null +++ b/plugins/action/ce_template.py @@ -0,0 +1,104 @@ +# +# Copyright 2015 Peter Sprygada +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import time +import glob +from ansible.module_utils.six.moves.urllib.parse import urlsplit + +from ansible.module_utils._text import to_text +from ansible_collections.community.general.plugins.action.ce import ActionModule as _ActionModule + + +class ActionModule(_ActionModule): + + def run(self, tmp=None, task_vars=None): + + try: + self._handle_template() + except (ValueError, AttributeError) as exc: + return dict(failed=True, msg=exc.message) + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if self._task.args.get('backup') and result.get('__backup__'): + # User requested backup and no error occurred in module. + # NOTE: If there is a parameter error, __backup__ key may not be in results. + self._write_backup(task_vars['inventory_hostname'], result['__backup__']) + + if '__backup__' in result: + del result['__backup__'] + + return result + + def _get_working_path(self): + cwd = self._loader.get_basedir() + if self._task._role is not None: + cwd = self._task._role._role_path + return cwd + + def _write_backup(self, host, contents): + backup_path = self._get_working_path() + '/backup' + if not os.path.exists(backup_path): + os.mkdir(backup_path) + for fn in glob.glob('%s/%s*' % (backup_path, host)): + os.remove(fn) + tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time())) + filename = '%s/%s_config.%s' % (backup_path, host, tstamp) + open(filename, 'w').write(contents) + + def _handle_template(self): + src = self._task.args.get('src') + if not src: + raise ValueError('missing required arguments: src') + + working_path = self._get_working_path() + + if os.path.isabs(src) or urlsplit(src).scheme: + source = src + else: + source = self._loader.path_dwim_relative(working_path, 'templates', src) + if not source: + source = self._loader.path_dwim_relative(working_path, src) + + if not os.path.exists(source): + return + + try: + with open(source, 'r') as f: + template_data = to_text(f.read()) + except IOError: + return dict(failed=True, msg='unable to load src file') + + # Create a template search path in the following order: + # [working_path, self_role_path, dependent_role_paths, dirname(source)] + searchpath = [working_path] + if self._task._role is not None: + searchpath.append(self._task._role._role_path) + if hasattr(self._task, "_block:"): + dep_chain = self._task._block.get_dep_chain() + if dep_chain is not None: + for role in dep_chain: + searchpath.append(role._role_path) + searchpath.append(os.path.dirname(source)) + with self._templar.set_temporary_context(searchpath=searchpath): + self._task.args['src'] = self._templar.template(template_data) diff --git a/plugins/action/cnos.py b/plugins/action/cnos.py new file mode 100644 index 0000000000..fa0332da5e --- /dev/null +++ b/plugins/action/cnos.py @@ -0,0 +1,69 @@ +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains Action Plugin methods for CNOS Config Module +# Lenovo Networking +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_provider_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'cnos_config' else False + + if self._play_context.connection == 'local': + provider = load_provider(cnos_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'cnos' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = provider['port'] or self._play_context.port or 22 + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + pc.become = provider['authorize'] or True + pc.become_pass = provider['auth_pass'] + pc.become_method = 'enable' + + display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/plugins/action/edgeos_config.py b/plugins/action/edgeos_config.py new file mode 100644 index 0000000000..e1145da6b8 --- /dev/null +++ b/plugins/action/edgeos_config.py @@ -0,0 +1,36 @@ +# +# Copyright 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + self._config_module = True + + if self._play_context.connection.split('.')[-1] != 'network_cli': + return {'failed': True, 'msg': 'Connection type %s is not valid for this module. Must use fully qualified' + ' name of network_cli connection type.' % self._play_context.connection} + + return super(ActionModule, self).run(task_vars=task_vars) diff --git a/plugins/action/enos.py b/plugins/action/enos.py new file mode 100644 index 0000000000..5097d95749 --- /dev/null +++ b/plugins/action/enos.py @@ -0,0 +1,69 @@ +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains Action Plugin methods for ENOS Config Module +# Lenovo Networking +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.community.general.plugins.module_utils.network.enos.enos import enos_provider_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'enos_config' else False + + if self._play_context.connection == 'local': + provider = load_provider(enos_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'enos' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = provider['port'] or self._play_context.port or 22 + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + pc.become = provider['authorize'] or True + pc.become_pass = provider['auth_pass'] + pc.become_method = 'enable' + + display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/plugins/action/exos.py b/plugins/action/exos.py new file mode 100644 index 0000000000..5d91155c12 --- /dev/null +++ b/plugins/action/exos.py @@ -0,0 +1,45 @@ +# +# Copyright 2015 Peter Sprygada +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule + + +class ActionModule(ActionNetworkModule): + + EXOS_NETWORK_CLI_MODULES = ( + 'exos_facts', + 'exos_config', + 'exos_command') + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'exos_config' else False + persistent_connection = self._play_context.connection.split('.')[-1] + + if persistent_connection not in ('network_cli', 'httpapi'): + return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection} + + if persistent_connection == 'network_cli' and module_name not in self.EXOS_NETWORK_CLI_MODULES: + return {'failed': True, 'msg': "Connection type %s is not valid for this module" % self._play_context.connection} + + return super(ActionModule, self).run(task_vars=task_vars) diff --git a/plugins/action/ironware.py b/plugins/action/ironware.py new file mode 100644 index 0000000000..0a75a2f365 --- /dev/null +++ b/plugins/action/ironware.py @@ -0,0 +1,80 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import ironware_provider_spec +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'ironware_config' else False + persistent_connection = self._play_context.connection.split('.')[-1] + + if persistent_connection == 'network_cli': + provider = self._task.args.get('provider', {}) + if any(provider.values()): + display.warning('provider is unnecessary when using network_cli and will be ignored') + del self._task.args['provider'] + elif self._play_context.connection == 'local': + provider = load_provider(ironware_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'ironware' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + pc.become = provider['authorize'] or False + if pc.become: + pc.become_method = 'enable' + pc.become_pass = provider['auth_pass'] + + display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid) + + command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout') + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + else: + return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection} + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/plugins/action/nos_config.py b/plugins/action/nos_config.py new file mode 100644 index 0000000000..3f49e1afab --- /dev/null +++ b/plugins/action/nos_config.py @@ -0,0 +1,31 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + self._config_module = True + return super(ActionModule, self).run(task_vars=task_vars) diff --git a/plugins/action/onyx_config.py b/plugins/action/onyx_config.py new file mode 100644 index 0000000000..b1c9089b49 --- /dev/null +++ b/plugins/action/onyx_config.py @@ -0,0 +1,31 @@ +# +# (c) 2017, Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + self._config_module = True + return super(ActionModule, self).run(task_vars=task_vars) diff --git a/plugins/action/slxos.py b/plugins/action/slxos.py new file mode 100644 index 0000000000..cb0478ce87 --- /dev/null +++ b/plugins/action/slxos.py @@ -0,0 +1,40 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule + +PRIVATE_KEYS_RE = re.compile('__.+__') + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'slxos_config' else False + persistent_connection = self._play_context.connection.split('.')[-1] + + if persistent_connection not in ('network_cli'): + return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection} + return super(ActionModule, self).run(task_vars=task_vars) diff --git a/plugins/action/sros.py b/plugins/action/sros.py new file mode 100644 index 0000000000..0de5947a71 --- /dev/null +++ b/plugins/action/sros.py @@ -0,0 +1,77 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.community.general.plugins.module_utils.network.sros.sros import sros_provider_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + persistent_connection = self._play_context.connection.split('.')[-1] + + self._config_module = True if module_name == 'sros_config' else False + if persistent_connection == 'network_cli': + provider = self._task.args.get('provider', {}) + if any(provider.values()): + display.warning('provider is unnecessary when using network_cli and will be ignored') + del self._task.args['provider'] + elif self._play_context.connection == 'local': + provider = load_provider(sros_provider_spec, self._task.args) + + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'sros' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + + display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + else: + return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection} + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/plugins/action/voss.py b/plugins/action/voss.py new file mode 100644 index 0000000000..e7e2ca452c --- /dev/null +++ b/plugins/action/voss.py @@ -0,0 +1,36 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'voss_config' else False + persistent_connection = self._play_context.connection.split('.')[-1] + + if persistent_connection not in ('network_cli'): + return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection} + return super(ActionModule, self).run(task_vars=task_vars) diff --git a/plugins/become/__init__.py b/plugins/become/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/become/doas.py b/plugins/become/doas.py new file mode 100644 index 0000000000..b93c5d2c47 --- /dev/null +++ b/plugins/become/doas.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: doas + short_description: Do As user + description: + - This become plugins allows your remote/login user to execute commands as another user via the doas utility. + author: ansible (@core) + options: + become_user: + description: User you 'become' to execute the task + ini: + - section: privilege_escalation + key: become_user + - section: doas_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_doas_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DOAS_USER + become_exe: + description: Doas executable + default: doas + ini: + - section: privilege_escalation + key: become_exe + - section: doas_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_doas_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DOAS_EXE + become_flags: + description: Options to pass to doas + default: + ini: + - section: privilege_escalation + key: become_flags + - section: doas_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_doas_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DOAS_FLAGS + become_pass: + description: password for doas prompt + required: False + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_doas_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DOAS_PASS + ini: + - section: doas_become_plugin + key: password + prompt_l10n: + description: + - List of localized strings to match for prompt detection + - If empty we'll use the built in one + default: [] + ini: + - section: doas_become_plugin + key: localized_prompts + vars: + - name: ansible_doas_prompt_l10n + env: + - name: ANSIBLE_DOAS_PROMPT_L10N +''' + +import re + +from ansible.module_utils._text import to_bytes +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.doas' + + # messages for detecting prompted password issues + fail = ('Permission denied',) + missing = ('Authorization required',) + + def check_password_prompt(self, b_output): + ''' checks if the expected password prompt exists in b_output ''' + + # FIXME: more accurate would be: 'doas (%s@' % remote_user + # however become plugins don't have that information currently + b_prompts = [to_bytes(p) for p in self.get_option('prompt_l10n')] or [br'doas \(', br'Password:'] + b_prompt = b"|".join(b_prompts) + + return bool(re.match(b_prompt, b_output)) + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + self.prompt = True + + become_exe = self.get_option('become_exe') or self.name + + flags = self.get_option('become_flags') or '' + if not self.get_option('become_pass') and '-n' not in flags: + flags += ' -n' + + user = self.get_option('become_user') or '' + if user: + user = '-u %s' % (user) + + success_cmd = self._build_success_command(cmd, shell, noexe=True) + executable = getattr(shell, 'executable', shell.SHELL_FAMILY) + + return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd) diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py new file mode 100644 index 0000000000..6b60b6d865 --- /dev/null +++ b/plugins/become/dzdo.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: dzdo + short_description: Centrify's Direct Authorize + description: + - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. + author: ansible (@core) + options: + become_user: + description: User you 'become' to execute the task + ini: + - section: privilege_escalation + key: become_user + - section: dzdo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_dzdo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DZDO_USER + become_exe: + description: Dzdo executable + default: dzdo + ini: + - section: privilege_escalation + key: become_exe + - section: dzdo_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_dzdo_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DZDO_EXE + become_flags: + description: Options to pass to dzdo + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: dzdo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_dzdo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DZDO_FLAGS + become_pass: + description: Options to pass to dzdo + required: False + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_dzdo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DZDO_PASS + ini: + - section: dzdo_become_plugin + key: password +''' + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.dzdo' + + # messages for detecting prompted password issues + fail = ('Sorry, try again.',) + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + becomecmd = self.get_option('become_exe') or self.name + + flags = self.get_option('become_flags') or '' + if self.get_option('become_pass'): + self.prompt = '[dzdo via ansible, key=%s] password:' % self._id + flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt) + + user = self.get_option('become_user') or '' + if user: + user = '-u %s' % (user) + + return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)]) diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py new file mode 100644 index 0000000000..c741b61ba1 --- /dev/null +++ b/plugins/become/ksu.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: ksu + short_description: Kerberos substitute user + description: + - This become plugins allows your remote/login user to execute commands as another user via the ksu utility. + author: ansible (@core) + options: + become_user: + description: User you 'become' to execute the task + ini: + - section: privilege_escalation + key: become_user + - section: ksu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_ksu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_KSU_USER + required: True + become_exe: + description: Su executable + default: ksu + ini: + - section: privilege_escalation + key: become_exe + - section: ksu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_ksu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_KSU_EXE + become_flags: + description: Options to pass to ksu + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: ksu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_ksu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_KSU_FLAGS + become_pass: + description: ksu password + required: False + vars: + - name: ansible_ksu_pass + - name: ansible_become_pass + - name: ansible_become_password + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_KSU_PASS + ini: + - section: ksu_become_plugin + key: password + prompt_l10n: + description: + - List of localized strings to match for prompt detection + - If empty we'll use the built in one + default: [] + ini: + - section: ksu_become_plugin + key: localized_prompts + vars: + - name: ansible_ksu_prompt_l10n + env: + - name: ANSIBLE_KSU_PROMPT_L10N +''' + +import re + +from ansible.module_utils._text import to_bytes +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.ksu' + + # messages for detecting prompted password issues + fail = ('Password incorrect',) + missing = ('No password given',) + + def check_password_prompt(self, b_output): + ''' checks if the expected password prompt exists in b_output ''' + + prompts = self.get_option('prompt_l10n') or ["Kerberos password for .*@.*:"] + b_prompt = b"|".join(to_bytes(p) for p in prompts) + + return bool(re.match(b_prompt, b_output)) + + def build_become_command(self, cmd, shell): + + super(BecomeModule, self).build_become_command(cmd, shell) + + # Prompt handling for ``ksu`` is more complicated, this + # is used to satisfy the connection plugin + self.prompt = True + + if not cmd: + return cmd + + exe = self.get_option('become_exe') or self.name + flags = self.get_option('become_flags') or '' + user = self.get_option('become_user') or '' + return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell)) diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py new file mode 100644 index 0000000000..6515badf4b --- /dev/null +++ b/plugins/become/machinectl.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: machinectl + short_description: Systemd's machinectl privilege escalation + description: + - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. + author: ansible (@core) + options: + become_user: + description: User you 'become' to execute the task + ini: + - section: privilege_escalation + key: become_user + - section: machinectl_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_machinectl_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_MACHINECTL_USER + become_exe: + description: Machinectl executable + default: machinectl + ini: + - section: privilege_escalation + key: become_exe + - section: machinectl_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_machinectl_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_MACHINECTL_EXE + become_flags: + description: Options to pass to machinectl + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: machinectl_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_machinectl_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_MACHINECTL_FLAGS + become_pass: + description: Password for machinectl + required: False + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_machinectl_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_MACHINECTL_PASS + ini: + - section: machinectl_become_plugin + key: password +''' + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.machinectl' + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + become = self.get_option('become_exe') or self.name + flags = self.get_option('become_flags') or '' + user = self.get_option('become_user') or '' + return '%s -q shell %s %s@ %s' % (become, flags, user, cmd) diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py new file mode 100644 index 0000000000..1ac4a9b6c4 --- /dev/null +++ b/plugins/become/pbrun.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: pbrun + short_description: PowerBroker run + description: + - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility. + author: ansible (@core) + options: + become_user: + description: User you 'become' to execute the task + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: pbrun_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pbrun_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PBRUN_USER + become_exe: + description: Sudo executable + default: pbrun + ini: + - section: privilege_escalation + key: become_exe + - section: pbrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pbrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PBRUN_EXE + become_flags: + description: Options to pass to pbrun + ini: + - section: privilege_escalation + key: become_flags + - section: pbrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pbrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PBRUN_FLAGS + become_pass: + description: Password for pbrun + required: False + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pbrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PBRUN_PASS + ini: + - section: pbrun_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command pbrun calls in 'shell -c' or not + default: False + type: bool + ini: + - section: pbrun_become_plugin + key: wrap_execution + vars: + - name: ansible_pbrun_wrap_execution + env: + - name: ANSIBLE_PBRUN_WRAP_EXECUTION +''' + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.pbrun' + + prompt = 'Password:' + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + become_exe = self.get_option('become_exe') or self.name + flags = self.get_option('become_flags') or '' + user = self.get_option('become_user') or '' + if user: + user = '-u %s' % (user) + noexe = not self.get_option('wrap_exe') + + return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)]) diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py new file mode 100644 index 0000000000..994bbaa17d --- /dev/null +++ b/plugins/become/pfexec.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: pfexec + short_description: profile based execution + description: + - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility. + author: ansible (@core) + options: + become_user: + description: + - User you 'become' to execute the task + - This plugin ignores this setting as pfexec uses it's own ``exec_attr`` to figure this out, + but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions. + default: root + ini: + - section: privilege_escalation + key: become_user + - section: pfexec_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pfexec_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PFEXEC_USER + become_exe: + description: Sudo executable + default: pfexec + ini: + - section: privilege_escalation + key: become_exe + - section: pfexec_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pfexec_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PFEXEC_EXE + become_flags: + description: Options to pass to pfexec + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: pfexec_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pfexec_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PFEXEC_FLAGS + become_pass: + description: pfexec password + required: False + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pfexec_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PFEXEC_PASS + ini: + - section: pfexec_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command pfexec calls in 'shell -c' or not + default: False + type: bool + ini: + - section: pfexec_become_plugin + key: wrap_execution + vars: + - name: ansible_pfexec_wrap_execution + env: + - name: ANSIBLE_PFEXEC_WRAP_EXECUTION + note: + - This plugin ignores ``become_user`` as pfexec uses it's own ``exec_attr`` to figure this out. +''' + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.pfexec' + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + exe = self.get_option('become_exe') or self.name + flags = self.get_option('become_flags') + noexe = not self.get_option('wrap_exe') + return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe)) diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py new file mode 100644 index 0000000000..8d5ddfd001 --- /dev/null +++ b/plugins/become/pmrun.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: pmrun + short_description: Privilege Manager run + description: + - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility. + author: ansible (@core) + options: + become_exe: + description: Sudo executable + default: pmrun + ini: + - section: privilege_escalation + key: become_exe + - section: pmrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pmrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PMRUN_EXE + become_flags: + description: Options to pass to pmrun + ini: + - section: privilege_escalation + key: become_flags + - section: pmrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pmrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PMRUN_FLAGS + become_pass: + description: pmrun password + required: False + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pmrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PMRUN_PASS + ini: + - section: pmrun_become_plugin + key: password + notes: + - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user. +''' + +from ansible.plugins.become import BecomeBase +from ansible.module_utils.six.moves import shlex_quote + + +class BecomeModule(BecomeBase): + + name = 'community.general.pmrun' + prompt = 'Enter UPM user password:' + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + become = self.get_option('become_exe') or self.name + flags = self.get_option('become_flags') or '' + return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell))) diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py new file mode 100644 index 0000000000..5773b74b23 --- /dev/null +++ b/plugins/become/sesu.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + become: sesu + short_description: CA Privileged Access Manager + description: + - This become plugins allows your remote/login user to execute commands as another user via the sesu utility. + author: ansible (@nekonyuu) + options: + become_user: + description: User you 'become' to execute the task + ini: + - section: privilege_escalation + key: become_user + - section: sesu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sesu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SESU_USER + become_exe: + description: sesu executable + default: sesu + ini: + - section: privilege_escalation + key: become_exe + - section: sesu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_sesu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_SESU_EXE + become_flags: + description: Options to pass to sesu + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sesu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sesu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SESU_FLAGS + become_pass: + description: Password to pass to sesu + required: False + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sesu_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SESU_PASS + ini: + - section: sesu_become_plugin + key: password +''' + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.sesu' + + _prompt = 'Please enter your password:' + fail = missing = ('Sorry, try again with sesu.',) + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + become = self.get_option('become_exe') or self.name + flags = self.get_option('become_flags') or '' + user = self.get_option('become_user') or '' + return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell)) diff --git a/plugins/cache/__init__.py b/plugins/cache/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/cache/jsonfile.py b/plugins/cache/jsonfile.py new file mode 100644 index 0000000000..90e39937d1 --- /dev/null +++ b/plugins/cache/jsonfile.py @@ -0,0 +1,62 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: jsonfile + short_description: JSON formatted files. + description: + - This cache uses JSON formatted, per host, files saved to the filesystem. + author: Ansible Core (@ansible-core) + options: + _uri: + required: True + description: + - Path in which the cache plugin will save the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + +import codecs +import json + +from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder +from ansible.plugins.cache import BaseFileCacheModule + + +class CacheModule(BaseFileCacheModule): + """ + A caching module backed by json files. + """ + + def _load(self, filepath): + # Valid JSON is always UTF-8 encoded. + with codecs.open(filepath, 'r', encoding='utf-8') as f: + return json.load(f, cls=AnsibleJSONDecoder) + + def _dump(self, value, filepath): + with codecs.open(filepath, 'w', encoding='utf-8') as f: + f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)) diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py new file mode 100644 index 0000000000..99841d2e84 --- /dev/null +++ b/plugins/cache/memcached.py @@ -0,0 +1,242 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: memcached + short_description: Use memcached DB for cache + description: + - This cache uses JSON formatted, per host records saved in memcached. + requirements: + - memcache (python lib) + options: + _uri: + description: + - List of connection information for the memcached DBs + default: ['127.0.0.1:11211'] + type: list + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + +import collections +import os +import time +from multiprocessing import Lock +from itertools import chain + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.module_utils.common._collections_compat import MutableSet +from ansible.plugins.cache import BaseCacheModule +from ansible.utils.display import Display + +try: + import memcache +except ImportError: + raise AnsibleError("python-memcached is required for the memcached fact cache") + +display = Display() + + +class ProxyClientPool(object): + """ + Memcached connection pooling for thread/fork safety. Inspired by py-redis + connection pool. + + Available connections are maintained in a deque and released in a FIFO manner. + """ + + def __init__(self, *args, **kwargs): + self.max_connections = kwargs.pop('max_connections', 1024) + self.connection_args = args + self.connection_kwargs = kwargs + self.reset() + + def reset(self): + self.pid = os.getpid() + self._num_connections = 0 + self._available_connections = collections.deque(maxlen=self.max_connections) + self._locked_connections = set() + self._lock = Lock() + + def _check_safe(self): + if self.pid != os.getpid(): + with self._lock: + if self.pid == os.getpid(): + # bail out - another thread already acquired the lock + return + self.disconnect_all() + self.reset() + + def get_connection(self): + self._check_safe() + try: + connection = self._available_connections.popleft() + except IndexError: + connection = self.create_connection() + self._locked_connections.add(connection) + return connection + + def create_connection(self): + if self._num_connections >= self.max_connections: + raise RuntimeError("Too many memcached connections") + self._num_connections += 1 + return memcache.Client(*self.connection_args, **self.connection_kwargs) + + def release_connection(self, connection): + self._check_safe() + self._locked_connections.remove(connection) + self._available_connections.append(connection) + + def disconnect_all(self): + for conn in chain(self._available_connections, self._locked_connections): + conn.disconnect_all() + + def __getattr__(self, name): + def wrapped(*args, **kwargs): + return self._proxy_client(name, *args, **kwargs) + return wrapped + + def _proxy_client(self, name, *args, **kwargs): + conn = self.get_connection() + + try: + return getattr(conn, name)(*args, **kwargs) + finally: + self.release_connection(conn) + + +class CacheModuleKeys(MutableSet): + """ + A set subclass that keeps track of insertion time and persists + the set in memcached. + """ + PREFIX = 'ansible_cache_keys' + + def __init__(self, cache, *args, **kwargs): + self._cache = cache + self._keyset = dict(*args, **kwargs) + + def __contains__(self, key): + return key in self._keyset + + def __iter__(self): + return iter(self._keyset) + + def __len__(self): + return len(self._keyset) + + def add(self, key): + self._keyset[key] = time.time() + self._cache.set(self.PREFIX, self._keyset) + + def discard(self, key): + del self._keyset[key] + self._cache.set(self.PREFIX, self._keyset) + + def remove_by_timerange(self, s_min, s_max): + for k in self._keyset.keys(): + t = self._keyset[k] + if s_min < t < s_max: + del self._keyset[k] + self._cache.set(self.PREFIX, self._keyset) + + +class CacheModule(BaseCacheModule): + + def __init__(self, *args, **kwargs): + connection = ['127.0.0.1:11211'] + + try: + super(CacheModule, self).__init__(*args, **kwargs) + if self.get_option('_uri'): + connection = self.get_option('_uri') + self._timeout = self.get_option('_timeout') + self._prefix = self.get_option('_prefix') + except KeyError: + display.deprecated('Rather than importing CacheModules directly, ' + 'use ansible.plugins.loader.cache_loader', version='2.12') + if C.CACHE_PLUGIN_CONNECTION: + connection = C.CACHE_PLUGIN_CONNECTION.split(',') + self._timeout = C.CACHE_PLUGIN_TIMEOUT + self._prefix = C.CACHE_PLUGIN_PREFIX + + self._cache = {} + self._db = ProxyClientPool(connection, debug=0) + self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or []) + + def _make_key(self, key): + return "{0}{1}".format(self._prefix, key) + + def _expire_keys(self): + if self._timeout > 0: + expiry_age = time.time() - self._timeout + self._keys.remove_by_timerange(0, expiry_age) + + def get(self, key): + if key not in self._cache: + value = self._db.get(self._make_key(key)) + # guard against the key not being removed from the keyset; + # this could happen in cases where the timeout value is changed + # between invocations + if value is None: + self.delete(key) + raise KeyError + self._cache[key] = value + + return self._cache.get(key) + + def set(self, key, value): + self._db.set(self._make_key(key), value, time=self._timeout, min_compress_len=1) + self._cache[key] = value + self._keys.add(key) + + def keys(self): + self._expire_keys() + return list(iter(self._keys)) + + def contains(self, key): + self._expire_keys() + return key in self._keys + + def delete(self, key): + del self._cache[key] + self._db.delete(self._make_key(key)) + self._keys.discard(key) + + def flush(self): + for key in self.keys(): + self.delete(key) + + def copy(self): + return self._keys.copy() + + def __getstate__(self): + return dict() + + def __setstate__(self, data): + self.__init__() diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py new file mode 100644 index 0000000000..80b00b4c7b --- /dev/null +++ b/plugins/cache/pickle.py @@ -0,0 +1,67 @@ +# (c) 2017, Brian Coca +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: pickle + short_description: Pickle formatted files. + description: + - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. + author: Brian Coca (@bcoca) + options: + _uri: + required: True + description: + - Path in which the cache plugin will save the files + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the files + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +''' + +try: + import cPickle as pickle +except ImportError: + import pickle + +from ansible.module_utils.six import PY3 +from ansible.plugins.cache import BaseFileCacheModule + + +class CacheModule(BaseFileCacheModule): + """ + A caching module backed by pickle files. + """ + + def _load(self, filepath): + # Pickle is a binary format + with open(filepath, 'rb') as f: + if PY3: + return pickle.load(f, encoding='bytes') + else: + return pickle.load(f) + + def _dump(self, value, filepath): + with open(filepath, 'wb') as f: + # Use pickle protocol 2 which is compatible with Python 2.3+. + pickle.dump(value, f, protocol=2) diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py new file mode 100644 index 0000000000..962ae85891 --- /dev/null +++ b/plugins/cache/redis.py @@ -0,0 +1,155 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: redis + short_description: Use Redis DB for cache + description: + - This cache uses JSON formatted, per host records saved in Redis. + requirements: + - redis>=2.4.5 (python lib) + options: + _uri: + description: + - A colon separated string of connection information for Redis. + required: True + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + +import time +import json + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder +from ansible.plugins.cache import BaseCacheModule +from ansible.utils.display import Display + +try: + from redis import StrictRedis, VERSION +except ImportError: + raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'") + +display = Display() + + +class CacheModule(BaseCacheModule): + """ + A caching module backed by redis. + + Keys are maintained in a zset with their score being the timestamp + when they are inserted. This allows for the usage of 'zremrangebyscore' + to expire keys. This mechanism is used or a pattern matched 'scan' for + performance. + """ + def __init__(self, *args, **kwargs): + connection = [] + + try: + super(CacheModule, self).__init__(*args, **kwargs) + if self.get_option('_uri'): + connection = self.get_option('_uri').split(':') + self._timeout = float(self.get_option('_timeout')) + self._prefix = self.get_option('_prefix') + except KeyError: + display.deprecated('Rather than importing CacheModules directly, ' + 'use ansible.plugins.loader.cache_loader', version='2.12') + if C.CACHE_PLUGIN_CONNECTION: + connection = C.CACHE_PLUGIN_CONNECTION.split(':') + self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) + self._prefix = C.CACHE_PLUGIN_PREFIX + + self._cache = {} + self._db = StrictRedis(*connection) + self._keys_set = 'ansible_cache_keys' + + def _make_key(self, key): + return self._prefix + key + + def get(self, key): + + if key not in self._cache: + value = self._db.get(self._make_key(key)) + # guard against the key not being removed from the zset; + # this could happen in cases where the timeout value is changed + # between invocations + if value is None: + self.delete(key) + raise KeyError + self._cache[key] = json.loads(value, cls=AnsibleJSONDecoder) + + return self._cache.get(key) + + def set(self, key, value): + + value2 = json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4) + if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire' + self._db.setex(self._make_key(key), int(self._timeout), value2) + else: + self._db.set(self._make_key(key), value2) + + if VERSION[0] == 2: + self._db.zadd(self._keys_set, time.time(), key) + else: + self._db.zadd(self._keys_set, {key: time.time()}) + self._cache[key] = value + + def _expire_keys(self): + if self._timeout > 0: + expiry_age = time.time() - self._timeout + self._db.zremrangebyscore(self._keys_set, 0, expiry_age) + + def keys(self): + self._expire_keys() + return self._db.zrange(self._keys_set, 0, -1) + + def contains(self, key): + self._expire_keys() + return (self._db.zrank(self._keys_set, key) is not None) + + def delete(self, key): + if key in self._cache: + del self._cache[key] + self._db.delete(self._make_key(key)) + self._db.zrem(self._keys_set, key) + + def flush(self): + for key in self.keys(): + self.delete(key) + + def copy(self): + # TODO: there is probably a better way to do this in redis + ret = dict() + for key in self.keys(): + ret[key] = self.get(key) + return ret + + def __getstate__(self): + return dict() + + def __setstate__(self, data): + self.__init__() diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py new file mode 100644 index 0000000000..e4c495be70 --- /dev/null +++ b/plugins/cache/yaml.py @@ -0,0 +1,64 @@ +# (c) 2017, Brian Coca +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: yaml + short_description: YAML formatted files. + description: + - This cache uses YAML formatted, per host, files saved to the filesystem. + author: Brian Coca (@bcoca) + options: + _uri: + required: True + description: + - Path in which the cache plugin will save the files + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the files + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + + +import codecs + +import yaml + +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.yaml.dumper import AnsibleDumper +from ansible.plugins.cache import BaseFileCacheModule + + +class CacheModule(BaseFileCacheModule): + """ + A caching module backed by yaml files. + """ + + def _load(self, filepath): + with codecs.open(filepath, 'r', encoding='utf-8') as f: + return AnsibleLoader(f).get_single_data() + + def _dump(self, value, filepath): + with codecs.open(filepath, 'w', encoding='utf-8') as f: + yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False) diff --git a/plugins/callback/__init__.py b/plugins/callback/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/callback/actionable.py b/plugins/callback/actionable.py new file mode 100644 index 0000000000..caa0e3eb48 --- /dev/null +++ b/plugins/callback/actionable.py @@ -0,0 +1,60 @@ +# (c) 2015, Andrew Gaffney +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: actionable + type: stdout + short_description: shows only items that need attention + description: + - Use this callback when you dont care about OK nor Skipped. + - This callback suppresses any non Failed or Changed status. + deprecated: + why: The 'default' callback plugin now supports this functionality + removed_in: '2.11' + alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options" + extends_documentation_fragment: + - default_callback + requirements: + - set as stdout callback in configuration + # Override defaults from 'default' callback plugin + options: + display_skipped_hosts: + name: Show skipped hosts + description: "Toggle to control displaying skipped task/host results in a task" + type: bool + default: no + env: + - name: DISPLAY_SKIPPED_HOSTS + deprecated: + why: environment variables without "ANSIBLE_" prefix are deprecated + version: "2.12" + alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable + - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS + ini: + - key: display_skipped_hosts + section: defaults + display_ok_hosts: + name: Show 'ok' hosts + description: "Toggle to control displaying 'ok' task/host results in a task" + type: bool + default: no + env: + - name: ANSIBLE_DISPLAY_OK_HOSTS + ini: + - key: display_ok_hosts + section: defaults +''' + +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + + +class CallbackModule(CallbackModule_default): + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.actionable' diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py new file mode 100644 index 0000000000..5c1d7ac797 --- /dev/null +++ b/plugins/callback/cgroup_memory_recap.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + callback: cgroup_memory_recap + callback_type: aggregate + requirements: + - whitelist in configuration + - cgroups + short_description: Profiles maximum memory usage of tasks and full execution using cgroups + description: + - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups + notes: + - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...) + - This cgroup should only be used by ansible to get accurate results + - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile) + options: + max_mem_file: + required: True + description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes) + env: + - name: CGROUP_MAX_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: max_mem_file + cur_mem_file: + required: True + description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes) + env: + - name: CGROUP_CUR_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: cur_mem_file +''' + +import time +import threading + +from ansible.plugins.callback import CallbackBase + + +class MemProf(threading.Thread): + """Python thread for recording memory usage""" + def __init__(self, path, obj=None): + threading.Thread.__init__(self) + self.obj = obj + self.path = path + self.results = [] + self.running = True + + def run(self): + while self.running: + with open(self.path) as f: + val = f.read() + self.results.append(int(val.strip()) / 1024 / 1024) + time.sleep(0.001) + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.cgroup_memory_recap' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display) + + self._task_memprof = None + + self.task_results = [] + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.cgroup_max_file = self.get_option('max_mem_file') + self.cgroup_current_file = self.get_option('cur_mem_file') + + with open(self.cgroup_max_file, 'w+') as f: + f.write('0') + + def _profile_memory(self, obj=None): + prev_task = None + results = None + try: + self._task_memprof.running = False + results = self._task_memprof.results + prev_task = self._task_memprof.obj + except AttributeError: + pass + + if obj is not None: + self._task_memprof = MemProf(self.cgroup_current_file, obj=obj) + self._task_memprof.start() + + if results is not None: + self.task_results.append((prev_task, max(results))) + + def v2_playbook_on_task_start(self, task, is_conditional): + self._profile_memory(task) + + def v2_playbook_on_stats(self, stats): + self._profile_memory() + + with open(self.cgroup_max_file) as f: + max_results = int(f.read().strip()) / 1024 / 1024 + + self._display.banner('CGROUP MEMORY RECAP') + self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results) + + for task, memory in self.task_results: + self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory)) diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py new file mode 100644 index 0000000000..9485f34f98 --- /dev/null +++ b/plugins/callback/context_demo.py @@ -0,0 +1,52 @@ +# (C) 2012, Michael DeHaan, +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: context_demo + type: aggregate + short_description: demo callback that adds play/task context + description: + - Displays some play and task context along with normal output + - This is mostly for demo purposes + requirements: + - whitelist in configuration +''' + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + This is a very trivial example of how any callback function can get at play and task objects. + play will be 'None' for runner invocations, and task will be None for 'setup' invocations. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.context_demo' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, *args, **kwargs): + super(CallbackModule, self).__init__(*args, **kwargs) + self.task = None + self.play = None + + def v2_on_any(self, *args, **kwargs): + self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task)) + + self._display.display(" --- ARGS ") + for i, a in enumerate(args): + self._display.display(' %s: %s' % (i, a)) + + self._display.display(" --- KWARGS ") + for k in kwargs: + self._display.display(' %s: %s' % (k, kwargs[k])) + + def v2_playbook_on_play_start(self, play): + self.play = play + + def v2_playbook_on_task_start(self, task, is_conditional): + self.task = task diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py new file mode 100644 index 0000000000..2bfff6a3ef --- /dev/null +++ b/plugins/callback/counter_enabled.py @@ -0,0 +1,246 @@ +# (c) 2018, Ivan Aragones Muniesa +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + Counter enabled Ansible callback plugin (See DOCUMENTATION for more information) +''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +from ansible import constants as C +from ansible.plugins.callback import CallbackBase +from ansible.utils.color import colorize, hostcolor +from ansible.template import Templar +from ansible.playbook.task_include import TaskInclude + +DOCUMENTATION = ''' + callback: counter_enabled + type: stdout + short_description: adds counters to the output items (tasks and hosts/task) + description: + - Use this callback when you need a kind of progress bar on a large environments. + - You will know how many tasks has the playbook to run, and which one is actually running. + - You will know how many hosts may run a task, and which of them is actually running. + extends_documentation_fragment: + - default_callback + requirements: + - set as stdout callback in ansible.cfg (stdout_callback = counter_enabled) +''' + + +class CallbackModule(CallbackBase): + + ''' + This is the default callback interface, which simply prints messages + to stdout when new callback events are received. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.counter_enabled' + + _task_counter = 1 + _task_total = 0 + _host_counter = 1 + _host_total = 0 + + def __init__(self): + super(CallbackModule, self).__init__() + + self._playbook = "" + self._play = "" + + def _all_vars(self, host=None, task=None): + # host and task need to be specified in case 'magic variables' (host vars, group vars, etc) + # need to be loaded as well + return self._play.get_variable_manager().get_vars( + play=self._play, + host=host, + task=task + ) + + def v2_playbook_on_start(self, playbook): + self._playbook = playbook + + def v2_playbook_on_play_start(self, play): + name = play.get_name().strip() + if not name: + msg = u"play" + else: + msg = u"PLAY [%s]" % name + + self._play = play + + self._display.banner(msg) + self._play = play + + self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all']) + self._task_total = len(self._play.get_tasks()[0]) + + def v2_playbook_on_stats(self, stats): + self._display.banner("PLAY RECAP") + + hosts = sorted(stats.processed.keys()) + for host in hosts: + stat = stats.summarize(host) + + self._display.display(u"%s : %s %s %s %s %s %s" % ( + hostcolor(host, stat), + colorize(u'ok', stat['ok'], C.COLOR_OK), + colorize(u'changed', stat['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', stat['failures'], C.COLOR_ERROR), + colorize(u'rescued', stat['rescued'], C.COLOR_OK), + colorize(u'ignored', stat['ignored'], C.COLOR_WARN)), + screen_only=True + ) + + self._display.display(u"%s : %s %s %s %s %s %s" % ( + hostcolor(host, stat, False), + colorize(u'ok', stat['ok'], None), + colorize(u'changed', stat['changed'], None), + colorize(u'unreachable', stat['unreachable'], None), + colorize(u'failed', stat['failures'], None), + colorize(u'rescued', stat['rescued'], None), + colorize(u'ignored', stat['ignored'], None)), + log_only=True + ) + + self._display.display("", screen_only=True) + + # print custom stats + if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom: + # fallback on constants for inherited plugins missing docs + self._display.banner("CUSTOM STATS: ") + # per host + # TODO: come up with 'pretty format' + for k in sorted(stats.custom.keys()): + if k == '_run': + continue + self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + + # print per run custom stats + if '_run' in stats.custom: + self._display.display("", screen_only=True) + self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + self._display.display("", screen_only=True) + + def v2_playbook_on_task_start(self, task, is_conditional): + args = '' + # args can be specified as no_log in several places: in the task or in + # the argument spec. We can check whether the task is no_log but the + # argument spec can't be because that is only run on the target + # machine and we haven't run it there yet at this time. + # + # So we give people a config option to affect display of the args so + # that they can secure this if they feel that their stdout is insecure + # (shoulder surfing, logging stdout straight to a file, etc). + if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: + args = ', '.join(('%s=%s' % a for a in task.args.items())) + args = ' %s' % args + self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args)) + if self._display.verbosity >= 2: + path = task.get_path() + if path: + self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) + self._host_counter = 0 + self._task_counter += 1 + + def v2_runner_on_ok(self, result): + + self._host_counter += 1 + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + if isinstance(result._task, TaskInclude): + return + elif result._result.get('changed', False): + if delegated_vars: + msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + else: + msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + color = C.COLOR_CHANGED + else: + if delegated_vars: + msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + else: + msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + color = C.COLOR_OK + + self._handle_warnings(result._result) + + if result._task.loop and 'results' in result._result: + self._process_items(result) + else: + self._clean_results(result._result, result._task.action) + + if self._run_is_verbose(result): + msg += " => %s" % (self._dump_results(result._result),) + self._display.display(msg, color=color) + + def v2_runner_on_failed(self, result, ignore_errors=False): + + self._host_counter += 1 + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + self._clean_results(result._result, result._task.action) + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + self._handle_exception(result._result) + self._handle_warnings(result._result) + + if result._task.loop and 'results' in result._result: + self._process_items(result) + + else: + if delegated_vars: + self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total, + result._host.get_name(), delegated_vars['ansible_host'], + self._dump_results(result._result)), + color=C.COLOR_ERROR) + else: + self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total, + result._host.get_name(), self._dump_results(result._result)), + color=C.COLOR_ERROR) + + if ignore_errors: + self._display.display("...ignoring", color=C.COLOR_SKIP) + + def v2_runner_on_skipped(self, result): + self._host_counter += 1 + + if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs + + self._clean_results(result._result, result._task.action) + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + if result._task.loop and 'results' in result._result: + self._process_items(result) + else: + msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + if self._run_is_verbose(result): + msg += " => %s" % self._dump_results(result._result) + self._display.display(msg, color=C.COLOR_SKIP) + + def v2_runner_on_unreachable(self, result): + self._host_counter += 1 + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if delegated_vars: + self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, + result._host.get_name(), delegated_vars['ansible_host'], + self._dump_results(result._result)), + color=C.COLOR_UNREACHABLE) + else: + self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, + result._host.get_name(), self._dump_results(result._result)), + color=C.COLOR_UNREACHABLE) diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py new file mode 100644 index 0000000000..39693ae823 --- /dev/null +++ b/plugins/callback/dense.py @@ -0,0 +1,501 @@ +# (c) 2016, Dag Wieers +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +callback: dense +type: stdout +short_description: minimal stdout output +extends_documentation_fragment: +- default_callback +description: +- When in verbose mode it will act the same as the default callback +author: +- Dag Wieers (@dagwieers) +requirements: +- set as stdout in configuration +''' + +HAS_OD = False +try: + from collections import OrderedDict + HAS_OD = True +except ImportError: + pass + +from ansible.module_utils.six import binary_type, text_type +from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default +from ansible.utils.color import colorize, hostcolor +from ansible.utils.display import Display + +import sys + +display = Display() + + +# Design goals: +# +# + On screen there should only be relevant stuff +# - How far are we ? (during run, last line) +# - What issues occurred +# - What changes occurred +# - Diff output (in diff-mode) +# +# + If verbosity increases, act as default output +# So that users can easily switch to default for troubleshooting +# +# + Rewrite the output during processing +# - We use the cursor to indicate where in the task we are. +# Output after the prompt is the output of the previous task. +# - If we would clear the line at the start of a task, there would often +# be no information at all, so we leave it until it gets updated +# +# + Use the same color-conventions of Ansible +# +# + Ensure the verbose output (-v) is also dense. +# Remove information that is not essential (eg. timestamps, status) + + +# TODO: +# +# + Properly test for terminal capabilities, and fall back to default +# + Modify Ansible mechanism so we don't need to use sys.stdout directly +# + Find an elegant solution for progress bar line wrapping + + +# FIXME: Importing constants as C simply does not work, beats me :-/ +# from ansible import constants as C +class C: + COLOR_HIGHLIGHT = 'white' + COLOR_VERBOSE = 'blue' + COLOR_WARN = 'bright purple' + COLOR_ERROR = 'red' + COLOR_DEBUG = 'dark gray' + COLOR_DEPRECATE = 'purple' + COLOR_SKIP = 'cyan' + COLOR_UNREACHABLE = 'bright red' + COLOR_OK = 'green' + COLOR_CHANGED = 'yellow' + + +# Taken from Dstat +class vt100: + black = '\033[0;30m' + darkred = '\033[0;31m' + darkgreen = '\033[0;32m' + darkyellow = '\033[0;33m' + darkblue = '\033[0;34m' + darkmagenta = '\033[0;35m' + darkcyan = '\033[0;36m' + gray = '\033[0;37m' + + darkgray = '\033[1;30m' + red = '\033[1;31m' + green = '\033[1;32m' + yellow = '\033[1;33m' + blue = '\033[1;34m' + magenta = '\033[1;35m' + cyan = '\033[1;36m' + white = '\033[1;37m' + + blackbg = '\033[40m' + redbg = '\033[41m' + greenbg = '\033[42m' + yellowbg = '\033[43m' + bluebg = '\033[44m' + magentabg = '\033[45m' + cyanbg = '\033[46m' + whitebg = '\033[47m' + + reset = '\033[0;0m' + bold = '\033[1m' + reverse = '\033[2m' + underline = '\033[4m' + + clear = '\033[2J' +# clearline = '\033[K' + clearline = '\033[2K' + save = '\033[s' + restore = '\033[u' + save_all = '\0337' + restore_all = '\0338' + linewrap = '\033[7h' + nolinewrap = '\033[7l' + + up = '\033[1A' + down = '\033[1B' + right = '\033[1C' + left = '\033[1D' + + +colors = dict( + ok=vt100.darkgreen, + changed=vt100.darkyellow, + skipped=vt100.darkcyan, + ignored=vt100.cyanbg + vt100.red, + failed=vt100.darkred, + unreachable=vt100.red, +) + +states = ('skipped', 'ok', 'changed', 'failed', 'unreachable') + + +class CallbackModule_dense(CallbackModule_default): + + ''' + This is the dense callback interface, where screen estate is still valued. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'dense' + + def __init__(self): + + # From CallbackModule + self._display = display + + if HAS_OD: + + self.disabled = False + self.super_ref = super(CallbackModule, self) + self.super_ref.__init__() + + # Attributes to remove from results for more density + self.removed_attributes = ( + # 'changed', + 'delta', + # 'diff', + 'end', + 'failed', + 'failed_when_result', + 'invocation', + 'start', + 'stdout_lines', + ) + + # Initiate data structures + self.hosts = OrderedDict() + self.keep = False + self.shown_title = False + self.count = dict(play=0, handler=0, task=0) + self.type = 'foo' + + # Start immediately on the first line + sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) + sys.stdout.flush() + else: + display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.") + self.disabled = True + + def __del__(self): + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + + def _add_host(self, result, status): + name = result._host.get_name() + + # Add a new status in case a failed task is ignored + if status == 'failed' and result._task.ignore_errors: + status = 'ignored' + + # Check if we have to update an existing state (when looping over items) + if name not in self.hosts: + self.hosts[name] = dict(state=status) + elif states.index(self.hosts[name]['state']) < states.index(status): + self.hosts[name]['state'] = status + + # Store delegated hostname, if needed + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if delegated_vars: + self.hosts[name]['delegate'] = delegated_vars['ansible_host'] + + # Print progress bar + self._display_progress(result) + +# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode +# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)): + # Ensure that tasks with changes/failures stay on-screen + if status in ['changed', 'failed', 'unreachable']: + self.keep = True + + if self._display.verbosity == 1: + # Print task title, if needed + self._display_task_banner() + self._display_results(result, status) + + def _clean_results(self, result): + # Remove non-essential attributes + for attr in self.removed_attributes: + if attr in result: + del(result[attr]) + + # Remove empty attributes (list, dict, str) + for attr in result.copy(): + if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)): + if not result[attr]: + del(result[attr]) + + def _handle_exceptions(self, result): + if 'exception' in result: + # Remove the exception from the result so it's not shown every time + del result['exception'] + + if self._display.verbosity == 1: + return "An exception occurred during task execution. To see the full traceback, use -vvv." + + def _display_progress(self, result=None): + # Always rewrite the complete line + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline) + sys.stdout.write('%s %d:' % (self.type, self.count[self.type])) + sys.stdout.write(vt100.reset) + sys.stdout.flush() + + # Print out each host in its own status-color + for name in self.hosts: + sys.stdout.write(' ') + if self.hosts[name].get('delegate', None): + sys.stdout.write(self.hosts[name]['delegate'] + '>') + sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) + sys.stdout.flush() + +# if result._result.get('diff', False): +# sys.stdout.write('\n' + vt100.linewrap) + sys.stdout.write(vt100.linewrap) + +# self.keep = True + + def _display_task_banner(self): + if not self.shown_title: + self.shown_title = True + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) + sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip())) + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.flush() + else: + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) + self.keep = False + + def _display_results(self, result, status): + # Leave the previous task on screen (as it has changes/errors) + if self._display.verbosity == 0 and self.keep: + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + else: + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) + self.keep = False + + self._clean_results(result._result) + + dump = '' + if result._task.action == 'include': + return + elif status == 'ok': + return + elif status == 'ignored': + dump = self._handle_exceptions(result._result) + elif status == 'failed': + dump = self._handle_exceptions(result._result) + elif status == 'unreachable': + dump = result._result['msg'] + + if not dump: + dump = self._dump_results(result._result) + + if result._task.loop and 'results' in result._result: + self._process_items(result) + else: + sys.stdout.write(colors[status] + status + ': ') + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if delegated_vars: + sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host']) + else: + sys.stdout.write(result._host.get_name()) + + sys.stdout.write(': ' + dump + '\n') + sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) + sys.stdout.flush() + + if status == 'changed': + self._handle_warnings(result._result) + + def v2_playbook_on_play_start(self, play): + # Leave the previous task on screen (as it has changes/errors) + if self._display.verbosity == 0 and self.keep: + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold) + else: + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold) + + # Reset at the start of each play + self.keep = False + self.count.update(dict(handler=0, task=0)) + self.count['play'] += 1 + self.play = play + + # Write the next play on screen IN UPPERCASE, and make it permanent + name = play.get_name().strip() + if not name: + name = 'unnamed' + sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper())) + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.flush() + + def v2_playbook_on_task_start(self, task, is_conditional): + # Leave the previous task on screen (as it has changes/errors) + if self._display.verbosity == 0 and self.keep: + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + else: + # Do not clear line, since we want to retain the previous output + sys.stdout.write(vt100.restore + vt100.reset + vt100.underline) + + # Reset at the start of each task + self.keep = False + self.shown_title = False + self.hosts = OrderedDict() + self.task = task + self.type = 'task' + + # Enumerate task if not setup (task names are too long for dense output) + if task.get_name() != 'setup': + self.count['task'] += 1 + + # Write the next task on screen (behind the prompt is the previous output) + sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(vt100.reset) + sys.stdout.flush() + + def v2_playbook_on_handler_task_start(self, task): + # Leave the previous task on screen (as it has changes/errors) + if self._display.verbosity == 0 and self.keep: + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + else: + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) + + # Reset at the start of each handler + self.keep = False + self.shown_title = False + self.hosts = OrderedDict() + self.task = task + self.type = 'handler' + + # Enumerate handler if not setup (handler names may be too long for dense output) + if task.get_name() != 'setup': + self.count[self.type] += 1 + + # Write the next task on screen (behind the prompt is the previous output) + sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(vt100.reset) + sys.stdout.flush() + + def v2_playbook_on_cleanup_task_start(self, task): + # TBD + sys.stdout.write('cleanup.') + sys.stdout.flush() + + def v2_runner_on_failed(self, result, ignore_errors=False): + self._add_host(result, 'failed') + + def v2_runner_on_ok(self, result): + if result._result.get('changed', False): + self._add_host(result, 'changed') + else: + self._add_host(result, 'ok') + + def v2_runner_on_skipped(self, result): + self._add_host(result, 'skipped') + + def v2_runner_on_unreachable(self, result): + self._add_host(result, 'unreachable') + + def v2_runner_on_include(self, included_file): + pass + + def v2_runner_on_file_diff(self, result, diff): + sys.stdout.write(vt100.bold) + self.super_ref.v2_runner_on_file_diff(result, diff) + sys.stdout.write(vt100.reset) + + def v2_on_file_diff(self, result): + sys.stdout.write(vt100.bold) + self.super_ref.v2_on_file_diff(result) + sys.stdout.write(vt100.reset) + + # Old definition in v2.0 + def v2_playbook_item_on_ok(self, result): + self.v2_runner_item_on_ok(result) + + def v2_runner_item_on_ok(self, result): + if result._result.get('changed', False): + self._add_host(result, 'changed') + else: + self._add_host(result, 'ok') + + # Old definition in v2.0 + def v2_playbook_item_on_failed(self, result): + self.v2_runner_item_on_failed(result) + + def v2_runner_item_on_failed(self, result): + self._add_host(result, 'failed') + + # Old definition in v2.0 + def v2_playbook_item_on_skipped(self, result): + self.v2_runner_item_on_skipped(result) + + def v2_runner_item_on_skipped(self, result): + self._add_host(result, 'skipped') + + def v2_playbook_on_no_hosts_remaining(self): + if self._display.verbosity == 0 and self.keep: + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + else: + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) + self.keep = False + + sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT') + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.flush() + + def v2_playbook_on_include(self, included_file): + pass + + def v2_playbook_on_stats(self, stats): + if self._display.verbosity == 0 and self.keep: + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + else: + sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) + + # In normal mode screen output should be sufficient, summary is redundant + if self._display.verbosity == 0: + return + + sys.stdout.write(vt100.bold + vt100.underline) + sys.stdout.write('SUMMARY') + + sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.flush() + + hosts = sorted(stats.processed.keys()) + for h in hosts: + t = stats.summarize(h) + self._display.display( + u"%s : %s %s %s %s %s %s" % ( + hostcolor(h, t), + colorize(u'ok', t['ok'], C.COLOR_OK), + colorize(u'changed', t['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', t['failures'], C.COLOR_ERROR), + colorize(u'rescued', t['rescued'], C.COLOR_OK), + colorize(u'ignored', t['ignored'], C.COLOR_WARN), + ), + screen_only=True + ) + + +# When using -vv or higher, simply do the default action +if display.verbosity >= 2 or not HAS_OD: + CallbackModule = CallbackModule_default +else: + CallbackModule = CallbackModule_dense diff --git a/plugins/callback/full_skip.py b/plugins/callback/full_skip.py new file mode 100644 index 0000000000..8e3fbfef24 --- /dev/null +++ b/plugins/callback/full_skip.py @@ -0,0 +1,75 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: full_skip + type: stdout + short_description: suppresses tasks if all hosts skipped + description: + - Use this plugin when you do not care about any output for tasks that were completely skipped + deprecated: + why: The 'default' callback plugin now supports this functionality + removed_in: '2.11' + alternative: "'default' callback plugin with 'display_skipped_hosts = no' option" + extends_documentation_fragment: + - default_callback + requirements: + - set as stdout in configuration +''' + +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + + +class CallbackModule(CallbackModule_default): + + ''' + This is the default callback interface, which simply prints messages + to stdout when new callback events are received. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.full_skip' + + def v2_runner_on_skipped(self, result): + self.outlines = [] + + def v2_playbook_item_on_skipped(self, result): + self.outlines = [] + + def v2_runner_item_on_skipped(self, result): + self.outlines = [] + + def v2_runner_on_failed(self, result, ignore_errors=False): + self.display() + super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.outlines = [] + self.outlines.append("TASK [%s]" % task.get_name().strip()) + if self._display.verbosity >= 2: + path = task.get_path() + if path: + self.outlines.append("task path: %s" % path) + + def v2_playbook_item_on_ok(self, result): + self.display() + super(CallbackModule, self).v2_playbook_item_on_ok(result) + + def v2_runner_on_ok(self, result): + self.display() + super(CallbackModule, self).v2_runner_on_ok(result) + + def display(self): + if len(self.outlines) == 0: + return + (first, rest) = self.outlines[0], self.outlines[1:] + self._display.banner(first) + for line in rest: + self._display.display(line) + self.outlines = [] diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py new file mode 100644 index 0000000000..74657e573d --- /dev/null +++ b/plugins/callback/hipchat.py @@ -0,0 +1,227 @@ +# (C) 2014, Matt Martz +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: hipchat + callback_type: notification + requirements: + - whitelist in configuration. + - prettytable (python lib) + short_description: post task events to hipchat + description: + - This callback plugin sends status updates to a HipChat channel during playbook execution. + - Before 2.4 only environment variables were available for configuring this plugin. + options: + token: + description: HipChat API token for v1 or v2 API. + required: True + env: + - name: HIPCHAT_TOKEN + ini: + - section: callback_hipchat + key: token + api_version: + description: HipChat API version, v1 or v2. + required: False + default: v1 + env: + - name: HIPCHAT_API_VERSION + ini: + - section: callback_hipchat + key: api_version + room: + description: HipChat room to post in. + default: ansible + env: + - name: HIPCHAT_ROOM + ini: + - section: callback_hipchat + key: room + from: + description: Name to post as + default: ansible + env: + - name: HIPCHAT_FROM + ini: + - section: callback_hipchat + key: from + notify: + description: Add notify flag to important messages + type: bool + default: True + env: + - name: HIPCHAT_NOTIFY + ini: + - section: callback_hipchat + key: notify + +''' + +import os +import json + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + +from ansible.plugins.callback import CallbackBase +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import open_url + + +class CallbackModule(CallbackBase): + """This is an example ansible callback plugin that sends status + updates to a HipChat channel during playbook execution. + """ + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.hipchat' + CALLBACK_NEEDS_WHITELIST = True + + API_V1_URL = 'https://api.hipchat.com/v1/rooms/message' + API_V2_URL = 'https://api.hipchat.com/v2/' + + def __init__(self): + + super(CallbackModule, self).__init__() + + if not HAS_PRETTYTABLE: + self.disabled = True + self._display.warning('The `prettytable` python module is not installed. ' + 'Disabling the HipChat callback plugin.') + self.printed_playbook = False + self.playbook_name = None + self.play = None + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.token = self.get_option('token') + self.api_version = self.get_option('api_version') + self.from_name = self.get_option('from') + self.allow_notify = self.get_option('notify') + self.room = self.get_option('room') + + if self.token is None: + self.disabled = True + self._display.warning('HipChat token could not be loaded. The HipChat ' + 'token can be provided using the `HIPCHAT_TOKEN` ' + 'environment variable.') + + # Pick the request handler. + if self.api_version == 'v2': + self.send_msg = self.send_msg_v2 + else: + self.send_msg = self.send_msg_v1 + + def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False): + """Method for sending a message to HipChat""" + + headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'} + + body = {} + body['room_id'] = self.room + body['from'] = self.from_name[:15] # max length is 15 + body['message'] = msg + body['message_format'] = msg_format + body['color'] = color + body['notify'] = self.allow_notify and notify + + data = json.dumps(body) + url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room) + try: + response = open_url(url, data=data, headers=headers, method='POST') + return response.read() + except Exception as ex: + self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) + + def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False): + """Method for sending a message to HipChat""" + + params = {} + params['room_id'] = self.room + params['from'] = self.from_name[:15] # max length is 15 + params['message'] = msg + params['message_format'] = msg_format + params['color'] = color + params['notify'] = int(self.allow_notify and notify) + + url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token)) + try: + response = open_url(url, data=urlencode(params)) + return response.read() + except Exception as ex: + self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) + + def v2_playbook_on_play_start(self, play): + """Display Playbook and play start messages""" + + self.play = play + name = play.name + # This block sends information about a playbook when it starts + # The playbook object is not immediately available at + # playbook_on_start so we grab it via the play + # + # Displays info about playbook being started by a person on an + # inventory, as well as Tags, Skip Tags and Limits + if not self.printed_playbook: + self.playbook_name, _ = os.path.splitext( + os.path.basename(self.play.playbook.filename)) + host_list = self.play.playbook.inventory.host_list + inventory = os.path.basename(os.path.realpath(host_list)) + self.send_msg("%s: Playbook initiated by %s against %s" % + (self.playbook_name, + self.play.playbook.remote_user, + inventory), notify=True) + self.printed_playbook = True + subset = self.play.playbook.inventory._subset + skip_tags = self.play.playbook.skip_tags + self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" % + (self.playbook_name, + ', '.join(self.play.playbook.only_tags), + ', '.join(skip_tags) if skip_tags else None, + ', '.join(subset) if subset else subset)) + + # This is where we actually say we are starting a play + self.send_msg("%s: Starting play: %s" % + (self.playbook_name, name)) + + def playbook_on_stats(self, stats): + """Display info about playbook statistics""" + hosts = sorted(stats.processed.keys()) + + t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', + 'Failures']) + + failures = False + unreachable = False + + for h in hosts: + s = stats.summarize(h) + + if s['failures'] > 0: + failures = True + if s['unreachable'] > 0: + unreachable = True + + t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', + 'failures']]) + + self.send_msg("%s: Playbook complete" % self.playbook_name, + notify=True) + + if failures or unreachable: + color = 'red' + self.send_msg("%s: Failures detected" % self.playbook_name, + color=color, notify=True) + else: + color = 'green' + + self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py new file mode 100644 index 0000000000..efe7d9b245 --- /dev/null +++ b/plugins/callback/jabber.py @@ -0,0 +1,117 @@ +# Copyright (C) 2016 maxn nikolaev.makc@gmail.com +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: jabber + type: notification + short_description: post task events to a jabber server + description: + - The chatty part of ChatOps with a Hipchat server as a target + - This callback plugin sends status updates to a HipChat channel during playbook execution. + requirements: + - xmpp (python lib https://github.com/ArchipelProject/xmpppy) + options: + server: + description: connection info to jabber server + required: True + env: + - name: JABBER_SERV + user: + description: Jabber user to authenticate as + required: True + env: + - name: JABBER_USER + password: + description: Password for the user to the jabber server + required: True + env: + - name: JABBER_PASS + to: + description: chat identifier that will receive the message + required: True + env: + - name: JABBER_TO +''' + +import os + +HAS_XMPP = True +try: + import xmpp +except ImportError: + HAS_XMPP = False + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.jabber' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + + super(CallbackModule, self).__init__(display=display) + + if not HAS_XMPP: + self._display.warning("The required python xmpp library (xmpppy) is not installed. " + "pip install git+https://github.com/ArchipelProject/xmpppy") + self.disabled = True + + self.serv = os.getenv('JABBER_SERV') + self.j_user = os.getenv('JABBER_USER') + self.j_pass = os.getenv('JABBER_PASS') + self.j_to = os.getenv('JABBER_TO') + + if (self.j_user or self.j_pass or self.serv or self.j_to) is None: + self.disabled = True + self._display.warning('Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables') + + def send_msg(self, msg): + """Send message""" + jid = xmpp.JID(self.j_user) + client = xmpp.Client(self.serv, debug=[]) + client.connect(server=(self.serv, 5222)) + client.auth(jid.getNode(), self.j_pass, resource=jid.getResource()) + message = xmpp.Message(self.j_to, msg) + message.setAttr('type', 'chat') + client.send(message) + client.disconnect() + + def v2_runner_on_ok(self, result): + self._clean_results(result._result, result._task.action) + self.debug = self._dump_results(result._result) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.task = task + + def v2_playbook_on_play_start(self, play): + """Display Playbook and play start messages""" + self.play = play + name = play.name + self.send_msg("Ansible starting play: %s" % (name)) + + def playbook_on_stats(self, stats): + name = self.play + hosts = sorted(stats.processed.keys()) + failures = False + unreachable = False + for h in hosts: + s = stats.summarize(h) + if s['failures'] > 0: + failures = True + if s['unreachable'] > 0: + unreachable = True + + if failures or unreachable: + out = self.debug + self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out)) + else: + out = self.debug + self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out)) diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py new file mode 100644 index 0000000000..bce66d0fa5 --- /dev/null +++ b/plugins/callback/log_plays.py @@ -0,0 +1,108 @@ +# (C) 2012, Michael DeHaan, +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: log_plays + type: notification + short_description: write playbook output to log file + description: + - This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory + requirements: + - Whitelist in configuration + - A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller + options: + log_folder: + default: /var/log/ansible/hosts + description: The folder where log files will be created. + env: + - name: ANSIBLE_LOG_FOLDER + ini: + - section: callback_log_plays + key: log_folder +''' + +import os +import time +import json + +from ansible.utils.path import makedirs_safe +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.parsing.ajson import AnsibleJSONEncoder +from ansible.plugins.callback import CallbackBase + + +# NOTE: in Ansible 1.2 or later general logging is available without +# this plugin, just set ANSIBLE_LOG_PATH as an environment variable +# or log_path in the DEFAULTS section of your ansible configuration +# file. This callback is an example of per hosts logging for those +# that want it. + + +class CallbackModule(CallbackBase): + """ + logs playbook results, per host, in /var/log/ansible/hosts + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.log_plays' + CALLBACK_NEEDS_WHITELIST = True + + TIME_FORMAT = "%b %d %Y %H:%M:%S" + MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n" + + def __init__(self): + + super(CallbackModule, self).__init__() + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.log_folder = self.get_option("log_folder") + + if not os.path.exists(self.log_folder): + makedirs_safe(self.log_folder) + + def log(self, host, category, data): + if isinstance(data, MutableMapping): + if '_ansible_verbose_override' in data: + # avoid logging extraneous data + data = 'omitted' + else: + data = data.copy() + invocation = data.pop('invocation', None) + data = json.dumps(data, cls=AnsibleJSONEncoder) + if invocation is not None: + data = json.dumps(invocation) + " => %s " % data + + path = os.path.join(self.log_folder, host) + now = time.strftime(self.TIME_FORMAT, time.localtime()) + + msg = to_bytes(self.MSG_FORMAT % dict(now=now, category=category, data=data)) + with open(path, "ab") as fd: + fd.write(msg) + + def runner_on_failed(self, host, res, ignore_errors=False): + self.log(host, 'FAILED', res) + + def runner_on_ok(self, host, res): + self.log(host, 'OK', res) + + def runner_on_skipped(self, host, item=None): + self.log(host, 'SKIPPED', '...') + + def runner_on_unreachable(self, host, res): + self.log(host, 'UNREACHABLE', res) + + def runner_on_async_failed(self, host, res, jid): + self.log(host, 'ASYNC_FAILED', res) + + def playbook_on_import_for_host(self, host, imported_file): + self.log(host, 'IMPORTED', imported_file) + + def playbook_on_not_import_for_host(self, host, missing_file): + self.log(host, 'NOTIMPORTED', missing_file) diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py new file mode 100644 index 0000000000..4899ecfc70 --- /dev/null +++ b/plugins/callback/logdna.py @@ -0,0 +1,207 @@ +# (c) 2018, Samir Musali +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: logdna + callback_type: aggregate + short_description: Sends playbook logs to LogDNA + description: + - This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com) + requirements: + - LogDNA Python Library (https://github.com/logdna/python) + - whitelisting in configuration + options: + conf_key: + required: True + description: LogDNA Ingestion Key + type: string + env: + - name: LOGDNA_INGESTION_KEY + ini: + - section: callback_logdna + key: conf_key + plugin_ignore_errors: + required: False + description: Whether to ignore errors on failing or not + type: boolean + env: + - name: ANSIBLE_IGNORE_ERRORS + ini: + - section: callback_logdna + key: plugin_ignore_errors + default: False + conf_hostname: + required: False + description: Alternative Host Name; the current host name by default + type: string + env: + - name: LOGDNA_HOSTNAME + ini: + - section: callback_logdna + key: conf_hostname + conf_tags: + required: False + description: Tags + type: string + env: + - name: LOGDNA_TAGS + ini: + - section: callback_logdna + key: conf_tags + default: ansible +''' + +import logging +import json +import socket +from uuid import getnode +from ansible.plugins.callback import CallbackBase +from ansible.parsing.ajson import AnsibleJSONEncoder + +try: + from logdna import LogDNAHandler + HAS_LOGDNA = True +except ImportError: + HAS_LOGDNA = False + + +# Getting MAC Address of system: +def get_mac(): + mac = "%012x" % getnode() + return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2)))) + + +# Getting hostname of system: +def get_hostname(): + return str(socket.gethostname()).split('.local')[0] + + +# Getting IP of system: +def get_ip(): + try: + return socket.gethostbyname(get_hostname()) + except Exception: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + s.connect(('10.255.255.255', 1)) + IP = s.getsockname()[0] + except Exception: + IP = '127.0.0.1' + finally: + s.close() + return IP + + +# Is it JSON? +def isJSONable(obj): + try: + json.dumps(obj, sort_keys=True, cls=AnsibleJSONEncoder) + return True + except Exception: + return False + + +# LogDNA Callback Module: +class CallbackModule(CallbackBase): + + CALLBACK_VERSION = 0.1 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.logdna' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display=display) + + self.disabled = True + self.playbook_name = None + self.playbook = None + self.conf_key = None + self.plugin_ignore_errors = None + self.conf_hostname = None + self.conf_tags = None + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.conf_key = self.get_option('conf_key') + self.plugin_ignore_errors = self.get_option('plugin_ignore_errors') + self.conf_hostname = self.get_option('conf_hostname') + self.conf_tags = self.get_option('conf_tags') + self.mac = get_mac() + self.ip = get_ip() + + if self.conf_hostname is None: + self.conf_hostname = get_hostname() + + self.conf_tags = self.conf_tags.split(',') + + if HAS_LOGDNA: + self.log = logging.getLogger('logdna') + self.log.setLevel(logging.INFO) + self.options = {'hostname': self.conf_hostname, 'mac': self.mac, 'index_meta': True} + self.log.addHandler(LogDNAHandler(self.conf_key, self.options)) + self.disabled = False + else: + self.disabled = True + self._display.warning('WARNING:\nPlease, install LogDNA Python Package: `pip install logdna`') + + def metaIndexing(self, meta): + invalidKeys = [] + ninvalidKeys = 0 + for key, value in meta.items(): + if not isJSONable(value): + invalidKeys.append(key) + ninvalidKeys += 1 + if ninvalidKeys > 0: + for key in invalidKeys: + del meta[key] + meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys) + return meta + + def sanitizeJSON(self, data): + try: + return json.loads(json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)) + except Exception: + return {'warnings': ['JSON Formatting Issue', json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)]} + + def flush(self, log, options): + if HAS_LOGDNA: + self.log.info(json.dumps(log), options) + + def sendLog(self, host, category, logdata): + options = {'app': 'ansible', 'meta': {'playbook': self.playbook_name, 'host': host, 'category': category}} + logdata['info'].pop('invocation', None) + warnings = logdata['info'].pop('warnings', None) + if warnings is not None: + self.flush({'warn': warnings}, options) + self.flush(logdata, options) + + def v2_playbook_on_start(self, playbook): + self.playbook = playbook + self.playbook_name = playbook._file_name + + def v2_playbook_on_stats(self, stats): + result = dict() + for host in stats.processed.keys(): + result[host] = stats.summarize(host) + self.sendLog(self.conf_hostname, 'STATS', {'info': self.sanitizeJSON(result)}) + + def runner_on_failed(self, host, res, ignore_errors=False): + if self.plugin_ignore_errors: + ignore_errors = self.plugin_ignore_errors + self.sendLog(host, 'FAILED', {'info': self.sanitizeJSON(res), 'ignore_errors': ignore_errors}) + + def runner_on_ok(self, host, res): + self.sendLog(host, 'OK', {'info': self.sanitizeJSON(res)}) + + def runner_on_unreachable(self, host, res): + self.sendLog(host, 'UNREACHABLE', {'info': self.sanitizeJSON(res)}) + + def runner_on_async_failed(self, host, res, jid): + self.sendLog(host, 'ASYNC_FAILED', {'info': self.sanitizeJSON(res), 'job_id': jid}) + + def runner_on_async_ok(self, host, res, jid): + self.sendLog(host, 'ASYNC_OK', {'info': self.sanitizeJSON(res), 'job_id': jid}) diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py new file mode 100644 index 0000000000..67e0b1ce45 --- /dev/null +++ b/plugins/callback/logentries.py @@ -0,0 +1,329 @@ +# (c) 2015, Logentries.com, Jimmy Tang +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: logentries + type: notification + short_description: Sends events to Logentries + description: + - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes. + - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini + - In 2.4 and above you can just put it in the main Ansible configuration file. + requirements: + - whitelisting in configuration + - certifi (python library) + - flatdict (python library), if you want to use the 'flatten' option + options: + api: + description: URI to the Logentries API + env: + - name: LOGENTRIES_API + default: data.logentries.com + ini: + - section: callback_logentries + key: api + port: + description: HTTP port to use when connecting to the API + env: + - name: LOGENTRIES_PORT + default: 80 + ini: + - section: callback_logentries + key: port + tls_port: + description: Port to use when connecting to the API when TLS is enabled + env: + - name: LOGENTRIES_TLS_PORT + default: 443 + ini: + - section: callback_logentries + key: tls_port + token: + description: The logentries "TCP token" + env: + - name: LOGENTRIES_ANSIBLE_TOKEN + required: True + ini: + - section: callback_logentries + key: token + use_tls: + description: + - Toggle to decide whether to use TLS to encrypt the communications with the API server + env: + - name: LOGENTRIES_USE_TLS + default: False + type: boolean + ini: + - section: callback_logentries + key: use_tls + flatten: + description: flatten complex data structures into a single dictionary with complex keys + type: boolean + default: False + env: + - name: LOGENTRIES_FLATTEN + ini: + - section: callback_logentries + key: flatten +''' + +EXAMPLES = ''' +examples: > + To enable, add this to your ansible.cfg file in the defaults block + + [defaults] + callback_whitelist = logentries + + Either set the environment variables + export LOGENTRIES_API=data.logentries.com + export LOGENTRIES_PORT=10000 + export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af + + Or in the main Ansible config file + [callback_logentries] + api = data.logentries.com + port = 10000 + tls_port = 20000 + use_tls = no + token = dd21fc88-f00a-43ff-b977-e3a4233c53af + flatten = False +''' + +import os +import socket +import random +import time +import uuid + +try: + import certifi + HAS_CERTIFI = True +except ImportError: + HAS_CERTIFI = False + +try: + import flatdict + HAS_FLATDICT = True +except ImportError: + HAS_FLATDICT = False + +from ansible.module_utils._text import to_bytes, to_text +from ansible.plugins.callback import CallbackBase + +# Todo: +# * Better formatting of output before sending out to logentries data/api nodes. + + +class PlainTextSocketAppender(object): + def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443): + + self.LE_API = LE_API + self.LE_PORT = LE_PORT + self.LE_TLS_PORT = LE_TLS_PORT + self.MIN_DELAY = 0.1 + self.MAX_DELAY = 10 + # Error message displayed when an incorrect Token has been detected + self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n" + # Unicode Line separator character \u2028 + self.LINE_SEP = u'\u2028' + + self._display = display + self._conn = None + + def open_connection(self): + self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._conn.connect((self.LE_API, self.LE_PORT)) + + def reopen_connection(self): + self.close_connection() + + root_delay = self.MIN_DELAY + while True: + try: + self.open_connection() + return + except Exception as e: + self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e)) + + root_delay *= 2 + if root_delay > self.MAX_DELAY: + root_delay = self.MAX_DELAY + + wait_for = root_delay + random.uniform(0, root_delay) + + try: + self._display.vvvv("sleeping %s before retry" % wait_for) + time.sleep(wait_for) + except KeyboardInterrupt: + raise + + def close_connection(self): + if self._conn is not None: + self._conn.close() + + def put(self, data): + # Replace newlines with Unicode line separator + # for multi-line events + data = to_text(data, errors='surrogate_or_strict') + multiline = data.replace(u'\n', self.LINE_SEP) + multiline += u"\n" + # Send data, reconnect if needed + while True: + try: + self._conn.send(to_bytes(multiline, errors='surrogate_or_strict')) + except socket.error: + self.reopen_connection() + continue + break + + self.close_connection() + + +try: + import ssl + HAS_SSL = True +except ImportError: # for systems without TLS support. + SocketAppender = PlainTextSocketAppender + HAS_SSL = False +else: + + class TLSSocketAppender(PlainTextSocketAppender): + def open_connection(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock = ssl.wrap_socket( + sock=sock, + keyfile=None, + certfile=None, + server_side=False, + cert_reqs=ssl.CERT_REQUIRED, + ssl_version=getattr( + ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1), + ca_certs=certifi.where(), + do_handshake_on_connect=True, + suppress_ragged_eofs=True, ) + sock.connect((self.LE_API, self.LE_TLS_PORT)) + self._conn = sock + + SocketAppender = TLSSocketAppender + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.logentries' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + + # TODO: allow for alternate posting methods (REST/UDP/agent/etc) + super(CallbackModule, self).__init__() + + # verify dependencies + if not HAS_SSL: + self._display.warning("Unable to import ssl module. Will send over port 80.") + + if not HAS_CERTIFI: + self.disabled = True + self._display.warning('The `certifi` python module is not installed.\nDisabling the Logentries callback plugin.') + + self.le_jobid = str(uuid.uuid4()) + + # FIXME: make configurable, move to options + self.timeout = 10 + + def set_options(self, task_keys=None, var_options=None, direct=None): + + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + # get options + try: + self.api_url = self.get_option('api') + self.api_port = self.get_option('port') + self.api_tls_port = self.get_option('tls_port') + self.use_tls = self.get_option('use_tls') + self.flatten = self.get_option('flatten') + except KeyError as e: + self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e)) + self.disabled = True + + try: + self.token = self.get_option('token') + except KeyError as e: + self._display.warning('Logentries token was not provided, this is required for this callback to operate, disabling') + self.disabled = True + + if self.flatten and not HAS_FLATDICT: + self.disabled = True + self._display.warning('You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin.') + + self._initialize_connections() + + def _initialize_connections(self): + + if not self.disabled: + if self.use_tls: + self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port)) + self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port) + else: + self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port)) + self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port) + self._appender.reopen_connection() + + def emit_formatted(self, record): + if self.flatten: + results = flatdict.FlatDict(record) + self.emit(self._dump_results(results)) + else: + self.emit(self._dump_results(record)) + + def emit(self, record): + msg = record.rstrip('\n') + msg = "{0} {1}".format(self.token, msg) + self._appender.put(msg) + self._display.vvvv("Sent event to logentries") + + def _set_info(self, host, res): + return {'le_jobid': self.le_jobid, 'hostname': host, 'results': res} + + def runner_on_ok(self, host, res): + results = self._set_info(host, res) + results['status'] = 'OK' + self.emit_formatted(results) + + def runner_on_failed(self, host, res, ignore_errors=False): + results = self._set_info(host, res) + results['status'] = 'FAILED' + self.emit_formatted(results) + + def runner_on_skipped(self, host, item=None): + results = self._set_info(host, item) + del results['results'] + results['status'] = 'SKIPPED' + self.emit_formatted(results) + + def runner_on_unreachable(self, host, res): + results = self._set_info(host, res) + results['status'] = 'UNREACHABLE' + self.emit_formatted(results) + + def runner_on_async_failed(self, host, res, jid): + results = self._set_info(host, res) + results['jid'] = jid + results['status'] = 'ASYNC_FAILED' + self.emit_formatted(results) + + def v2_playbook_on_play_start(self, play): + results = {} + results['le_jobid'] = self.le_jobid + results['started_by'] = os.getlogin() + if play.name: + results['play'] = play.name + results['hosts'] = play.hosts + self.emit_formatted(results) + + def playbook_on_stats(self, stats): + """ close connection """ + self._appender.close_connection() diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py new file mode 100644 index 0000000000..1383fab6b9 --- /dev/null +++ b/plugins/callback/logstash.py @@ -0,0 +1,228 @@ +# (C) 2016, Ievgen Khmelenko +# (C) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: logstash + type: notification + short_description: Sends events to Logstash + description: + - This callback will report facts and task events to Logstash https://www.elastic.co/products/logstash + requirements: + - whitelisting in configuration + - logstash (python library) + options: + server: + description: Address of the Logstash server + env: + - name: LOGSTASH_SERVER + default: localhost + port: + description: Port on which logstash is listening + env: + - name: LOGSTASH_PORT + default: 5000 + type: + description: Message type + env: + - name: LOGSTASH_TYPE + default: ansible +''' + +import os +import json +import socket +import uuid +from datetime import datetime + +import logging + +try: + import logstash + HAS_LOGSTASH = True +except ImportError: + HAS_LOGSTASH = False + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + ansible logstash callback plugin + ansible.cfg: + callback_plugins = + callback_whitelist = logstash + and put the plugin in + + logstash config: + input { + tcp { + port => 5000 + codec => json + } + } + + Requires: + python-logstash + + This plugin makes use of the following environment variables: + LOGSTASH_SERVER (optional): defaults to localhost + LOGSTASH_PORT (optional): defaults to 5000 + LOGSTASH_TYPE (optional): defaults to ansible + """ + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.logstash' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + super(CallbackModule, self).__init__() + + if not HAS_LOGSTASH: + self.disabled = True + self._display.warning("The required python-logstash is not installed. " + "pip install python-logstash") + else: + self.logger = logging.getLogger('python-logstash-logger') + self.logger.setLevel(logging.DEBUG) + + self.handler = logstash.TCPLogstashHandler( + os.getenv('LOGSTASH_SERVER', 'localhost'), + int(os.getenv('LOGSTASH_PORT', 5000)), + version=1, + message_type=os.getenv('LOGSTASH_TYPE', 'ansible') + ) + + self.logger.addHandler(self.handler) + self.hostname = socket.gethostname() + self.session = str(uuid.uuid1()) + self.errors = 0 + self.start_time = datetime.utcnow() + + def v2_playbook_on_start(self, playbook): + self.playbook = playbook._file_name + data = { + 'status': "OK", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "start", + 'ansible_playbook': self.playbook, + } + self.logger.info("ansible start", extra=data) + + def v2_playbook_on_stats(self, stats): + end_time = datetime.utcnow() + runtime = end_time - self.start_time + summarize_stat = {} + for host in stats.processed.keys(): + summarize_stat[host] = stats.summarize(host) + + if self.errors == 0: + status = "OK" + else: + status = "FAILED" + + data = { + 'status': status, + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "finish", + 'ansible_playbook': self.playbook, + 'ansible_playbook_duration': runtime.total_seconds(), + 'ansible_result': json.dumps(summarize_stat), + } + self.logger.info("ansible stats", extra=data) + + def v2_runner_on_ok(self, result, **kwargs): + data = { + 'status': "OK", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "task", + 'ansible_playbook': self.playbook, + 'ansible_host': result._host.name, + 'ansible_task': result._task, + 'ansible_result': self._dump_results(result._result) + } + self.logger.info("ansible ok", extra=data) + + def v2_runner_on_skipped(self, result, **kwargs): + data = { + 'status': "SKIPPED", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "task", + 'ansible_playbook': self.playbook, + 'ansible_task': result._task, + 'ansible_host': result._host.name + } + self.logger.info("ansible skipped", extra=data) + + def v2_playbook_on_import_for_host(self, result, imported_file): + data = { + 'status': "IMPORTED", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "import", + 'ansible_playbook': self.playbook, + 'ansible_host': result._host.name, + 'imported_file': imported_file + } + self.logger.info("ansible import", extra=data) + + def v2_playbook_on_not_import_for_host(self, result, missing_file): + data = { + 'status': "NOT IMPORTED", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "import", + 'ansible_playbook': self.playbook, + 'ansible_host': result._host.name, + 'missing_file': missing_file + } + self.logger.info("ansible import", extra=data) + + def v2_runner_on_failed(self, result, **kwargs): + data = { + 'status': "FAILED", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "task", + 'ansible_playbook': self.playbook, + 'ansible_host': result._host.name, + 'ansible_task': result._task, + 'ansible_result': self._dump_results(result._result) + } + self.errors += 1 + self.logger.error("ansible failed", extra=data) + + def v2_runner_on_unreachable(self, result, **kwargs): + data = { + 'status': "UNREACHABLE", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "task", + 'ansible_playbook': self.playbook, + 'ansible_host': result._host.name, + 'ansible_task': result._task, + 'ansible_result': self._dump_results(result._result) + } + self.logger.error("ansible unreachable", extra=data) + + def v2_runner_on_async_failed(self, result, **kwargs): + data = { + 'status': "FAILED", + 'host': self.hostname, + 'session': self.session, + 'ansible_type': "task", + 'ansible_playbook': self.playbook, + 'ansible_host': result._host.name, + 'ansible_task': result._task, + 'ansible_result': self._dump_results(result._result) + } + self.errors += 1 + self.logger.error("ansible async", extra=data) diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py new file mode 100644 index 0000000000..247f7671fc --- /dev/null +++ b/plugins/callback/mail.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +callback: mail +type: notification +short_description: Sends failure events via email +description: +- This callback will report failures via email +author: +- Dag Wieers (@dagwieers) +requirements: +- whitelisting in configuration +options: + mta: + description: Mail Transfer Agent, server that accepts SMTP + env: + - name: SMTPHOST + ini: + - section: callback_mail + key: smtphost + default: localhost + mtaport: + description: Mail Transfer Agent Port, port at which server SMTP + ini: + - section: callback_mail + key: smtpport + default: 25 + to: + description: Mail recipient + ini: + - section: callback_mail + key: to + default: root + sender: + description: Mail sender + ini: + - section: callback_mail + key: sender + cc: + description: CC'd recipient + ini: + - section: callback_mail + key: cc + bcc: + description: BCC'd recipient + ini: + - section: callback_mail + key: bcc +note: +- "TODO: expand configuration options now that plugins can leverage Ansible's configuration" +''' + +import json +import os +import re +import smtplib + +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_bytes +from ansible.parsing.ajson import AnsibleJSONEncoder +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + ''' This Ansible callback plugin mails errors to interested parties. ''' + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.mail' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display=display) + self.sender = None + self.to = 'root' + self.smtphost = os.getenv('SMTPHOST', 'localhost') + self.smtpport = 25 + self.cc = None + self.bcc = None + + def set_options(self, task_keys=None, var_options=None, direct=None): + + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.sender = self.get_option('sender') + self.to = self.get_option('to') + self.smtphost = self.get_option('mta') + self.smtpport = int(self.get_option('mtaport')) + self.cc = self.get_option('cc') + self.bcc = self.get_option('bcc') + + def mail(self, subject='Ansible error mail', body=None): + if body is None: + body = subject + + smtp = smtplib.SMTP(self.smtphost, port=self.smtpport) + + b_sender = to_bytes(self.sender) + b_to = to_bytes(self.to) + b_cc = to_bytes(self.cc) + b_bcc = to_bytes(self.bcc) + b_subject = to_bytes(subject) + b_body = to_bytes(body) + + b_content = b'From: %s\n' % b_sender + b_content += b'To: %s\n' % b_to + if self.cc: + b_content += b'Cc: %s\n' % b_cc + b_content += b'Subject: %s\n\n' % b_subject + b_content += b_body + + b_addresses = b_to.split(b',') + if self.cc: + b_addresses += b_cc.split(b',') + if self.bcc: + b_addresses += b_bcc.split(b',') + + for b_address in b_addresses: + smtp.sendmail(b_sender, b_address, b_content) + + smtp.quit() + + def subject_msg(self, multiline, failtype, linenr): + return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr]) + + def indent(self, multiline, indent=8): + return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE) + + def body_blob(self, multiline, texttype): + ''' Turn some text output in a well-indented block for sending in a mail body ''' + intro = 'with the following %s:\n\n' % texttype + blob = '' + for line in multiline.strip('\r\n').splitlines(): + blob += '%s\n' % line + return intro + self.indent(blob) + '\n' + + def mail_result(self, result, failtype): + host = result._host.get_name() + if not self.sender: + self.sender = '"Ansible: %s" ' % host + + # Add subject + if self.itembody: + subject = self.itemsubject + elif result._result.get('failed_when_result') is True: + subject = "Failed due to 'failed_when' condition" + elif result._result.get('msg'): + subject = self.subject_msg(result._result['msg'], failtype, 0) + elif result._result.get('stderr'): + subject = self.subject_msg(result._result['stderr'], failtype, -1) + elif result._result.get('stdout'): + subject = self.subject_msg(result._result['stdout'], failtype, -1) + elif result._result.get('exception'): # Unrelated exceptions are added to output :-/ + subject = self.subject_msg(result._result['exception'], failtype, -1) + else: + subject = '%s: %s' % (failtype, result._task.name or result._task.action) + + # Make playbook name visible (e.g. in Outlook/Gmail condensed view) + body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name) + if result._task.name: + body += 'Task: %s\n' % result._task.name + body += 'Module: %s\n' % result._task.action + body += 'Host: %s\n' % host + body += '\n' + + # Add task information (as much as possible) + body += 'The following task failed:\n\n' + if 'invocation' in result._result: + body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4))) + elif result._task.name: + body += self.indent('%s (%s)\n' % (result._task.name, result._task.action)) + else: + body += self.indent('%s\n' % result._task.action) + body += '\n' + + # Add item / message + if self.itembody: + body += self.itembody + elif result._result.get('failed_when_result') is True: + body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n' + elif result._result.get('msg'): + body += self.body_blob(result._result['msg'], 'message') + + # Add stdout / stderr / exception / warnings / deprecations + if result._result.get('stdout'): + body += self.body_blob(result._result['stdout'], 'standard output') + if result._result.get('stderr'): + body += self.body_blob(result._result['stderr'], 'error output') + if result._result.get('exception'): # Unrelated exceptions are added to output :-/ + body += self.body_blob(result._result['exception'], 'exception') + if result._result.get('warnings'): + for i in range(len(result._result.get('warnings'))): + body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1)) + if result._result.get('deprecations'): + for i in range(len(result._result.get('deprecations'))): + body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1)) + + body += 'and a complete dump of the error:\n\n' + body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4))) + + self.mail(subject=subject, body=body) + + def v2_playbook_on_start(self, playbook): + self.playbook = playbook + self.itembody = '' + + def v2_runner_on_failed(self, result, ignore_errors=False): + if ignore_errors: + return + + self.mail_result(result, 'Failed') + + def v2_runner_on_unreachable(self, result): + self.mail_result(result, 'Unreachable') + + def v2_runner_on_async_failed(self, result): + self.mail_result(result, 'Async failure') + + def v2_runner_item_on_failed(self, result): + # Pass item information to task failure + self.itemsubject = result._result['msg'] + self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result) diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py new file mode 100644 index 0000000000..a814a41cf3 --- /dev/null +++ b/plugins/callback/nrdp.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Remi Verchere +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: nrdp + type: notification + author: "Remi VERCHERE (@rverchere)" + short_description: post task result to a nagios server through nrdp + description: + - this callback send playbook result to nagios + - nagios shall use NRDP to recive passive events + - the passive check is sent to a dedicated host/service for ansible + options: + url: + description: url of the nrdp server + required: True + env: + - name : NRDP_URL + ini: + - section: callback_nrdp + key: url + validate_certs: + description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url) + env: + - name: NRDP_VALIDATE_CERTS + ini: + - section: callback_nrdp + key: validate_nrdp_certs + - section: callback_nrdp + key: validate_certs + default: False + aliases: [ validate_nrdp_certs ] + token: + description: token to be allowed to push nrdp events + required: True + env: + - name: NRDP_TOKEN + ini: + - section: callback_nrdp + key: token + hostname: + description: hostname where the passive check is linked to + required: True + env: + - name : NRDP_HOSTNAME + ini: + - section: callback_nrdp + key: hostname + servicename: + description: service where the passive check is linked to + required: True + env: + - name : NRDP_SERVICENAME + ini: + - section: callback_nrdp + key: servicename +''' + +import os +import json + +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import open_url +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + ''' + send ansible-playbook to Nagios server using nrdp protocol + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.nrdp' + CALLBACK_NEEDS_WHITELIST = True + + # Nagios states + OK = 0 + WARNING = 1 + CRITICAL = 2 + UNKNOWN = 3 + + def __init__(self): + super(CallbackModule, self).__init__() + + self.printed_playbook = False + self.playbook_name = None + self.play = None + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.url = self.get_option('url') + if not self.url.endswith('/'): + self.url += '/' + self.token = self.get_option('token') + self.hostname = self.get_option('hostname') + self.servicename = self.get_option('servicename') + self.validate_nrdp_certs = self.get_option('validate_certs') + + if (self.url or self.token or self.hostname or + self.servicename) is None: + self._display.warning("NRDP callback wants the NRDP_URL," + " NRDP_TOKEN, NRDP_HOSTNAME," + " NRDP_SERVICENAME" + " environment variables'." + " The NRDP callback plugin is disabled.") + self.disabled = True + + def _send_nrdp(self, state, msg): + ''' + nrpd service check send XMLDATA like this: + + + + somehost + someservice + 1 + WARNING: Danger Will Robinson!|perfdata + + + ''' + xmldata = "\n" + xmldata += "\n" + xmldata += "\n" + xmldata += "%s\n" % self.hostname + xmldata += "%s\n" % self.servicename + xmldata += "%d\n" % state + xmldata += "%s\n" % msg + xmldata += "\n" + xmldata += "\n" + + body = { + 'cmd': 'submitcheck', + 'token': self.token, + 'XMLDATA': bytes(xmldata) + } + + try: + response = open_url(self.url, + data=urlencode(body), + method='POST', + validate_certs=self.validate_nrdp_certs) + return response.read() + except Exception as ex: + self._display.warning("NRDP callback cannot send result {0}".format(ex)) + + def v2_playbook_on_play_start(self, play): + ''' + Display Playbook and play start messages + ''' + self.play = play + + def v2_playbook_on_stats(self, stats): + ''' + Display info about playbook statistics + ''' + name = self.play + gstats = "" + hosts = sorted(stats.processed.keys()) + critical = warning = 0 + for host in hosts: + stat = stats.summarize(host) + gstats += "'%s_ok'=%d '%s_changed'=%d \ + '%s_unreachable'=%d '%s_failed'=%d " % \ + (host, stat['ok'], host, stat['changed'], + host, stat['unreachable'], host, stat['failures']) + # Critical when failed tasks or unreachable host + critical += stat['failures'] + critical += stat['unreachable'] + # Warning when changed tasks + warning += stat['changed'] + + msg = "%s | %s" % (name, gstats) + if critical: + # Send Critical + self._send_nrdp(self.CRITICAL, msg) + elif warning: + # Send Warning + self._send_nrdp(self.WARNING, msg) + else: + # Send OK + self._send_nrdp(self.OK, msg) diff --git a/plugins/callback/null.py b/plugins/callback/null.py new file mode 100644 index 0000000000..d8ae7a8526 --- /dev/null +++ b/plugins/callback/null.py @@ -0,0 +1,29 @@ +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: 'null' + callback_type: stdout + requirements: + - set as main display callback + short_description: Don't display stuff to screen + description: + - This callback prevents outputing events to screen +''' + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + + ''' + This callback wont print messages to stdout when new callback events are received. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.null' diff --git a/plugins/callback/osx_say.py b/plugins/callback/osx_say.py new file mode 120000 index 0000000000..f080521d9d --- /dev/null +++ b/plugins/callback/osx_say.py @@ -0,0 +1 @@ +say.py \ No newline at end of file diff --git a/plugins/callback/say.py b/plugins/callback/say.py new file mode 100644 index 0000000000..b8e69932e1 --- /dev/null +++ b/plugins/callback/say.py @@ -0,0 +1,113 @@ +# (c) 2012, Michael DeHaan, +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: say + type: notification + requirements: + - whitelisting in configuration + - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program + short_description: notify using software speech synthesizer + description: + - This plugin will use the 'say' or 'espeak' program to "speak" about play events. + notes: + - In 2.8, this callback has been renamed from C(osx_say) into M(say). +''' + +import distutils.spawn +import platform +import subprocess +import os + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + makes Ansible much more exciting. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.say' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + + super(CallbackModule, self).__init__() + + self.FAILED_VOICE = None + self.REGULAR_VOICE = None + self.HAPPY_VOICE = None + self.LASER_VOICE = None + + self.synthesizer = distutils.spawn.find_executable('say') + if not self.synthesizer: + self.synthesizer = distutils.spawn.find_executable('espeak') + if self.synthesizer: + self.FAILED_VOICE = 'klatt' + self.HAPPY_VOICE = 'f5' + self.LASER_VOICE = 'whisper' + elif platform.system() != 'Darwin': + # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter + self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system()) + else: + self.FAILED_VOICE = 'Zarvox' + self.REGULAR_VOICE = 'Trinoids' + self.HAPPY_VOICE = 'Cellos' + self.LASER_VOICE = 'Princess' + + # plugin disable itself if say is not present + # ansible will not call any callback if disabled is set to True + if not self.synthesizer: + self.disabled = True + self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__)) + + def say(self, msg, voice): + cmd = [self.synthesizer, msg] + if voice: + cmd.extend(('-v', voice)) + subprocess.call(cmd) + + def runner_on_failed(self, host, res, ignore_errors=False): + self.say("Failure on host %s" % host, self.FAILED_VOICE) + + def runner_on_ok(self, host, res): + self.say("pew", self.LASER_VOICE) + + def runner_on_skipped(self, host, item=None): + self.say("pew", self.LASER_VOICE) + + def runner_on_unreachable(self, host, res): + self.say("Failure on host %s" % host, self.FAILED_VOICE) + + def runner_on_async_ok(self, host, res, jid): + self.say("pew", self.LASER_VOICE) + + def runner_on_async_failed(self, host, res, jid): + self.say("Failure on host %s" % host, self.FAILED_VOICE) + + def playbook_on_start(self): + self.say("Running Playbook", self.REGULAR_VOICE) + + def playbook_on_notify(self, host, handler): + self.say("pew", self.LASER_VOICE) + + def playbook_on_task_start(self, name, is_conditional): + if not is_conditional: + self.say("Starting task: %s" % name, self.REGULAR_VOICE) + else: + self.say("Notifying task: %s" % name, self.REGULAR_VOICE) + + def playbook_on_setup(self): + self.say("Gathering facts", self.REGULAR_VOICE) + + def playbook_on_play_start(self, name): + self.say("Starting play: %s" % name, self.HAPPY_VOICE) + + def playbook_on_stats(self, stats): + self.say("Play complete", self.HAPPY_VOICE) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py new file mode 100644 index 0000000000..3c37495e31 --- /dev/null +++ b/plugins/callback/selective.py @@ -0,0 +1,275 @@ +# (c) Fastly, inc 2016 +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: selective + callback_type: stdout + requirements: + - set as main display callback + short_description: only print certain tasks + description: + - This callback only prints tasks that have been tagged with `print_action` or that have failed. + This allows operators to focus on the tasks that provide value only. + - Tasks that are not printed are placed with a '.'. + - If you increase verbosity all tasks are printed. + options: + nocolor: + default: False + description: This setting allows suppressing colorizing output + env: + - name: ANSIBLE_NOCOLOR + - name: ANSIBLE_SELECTIVE_DONT_COLORIZE + ini: + - section: defaults + key: nocolor + type: boolean +''' + +EXAMPLES = """ + - debug: msg="This will not be printed" + - debug: msg="But this will" + tags: [print_action] +""" + +import difflib + +from ansible import constants as C +from ansible.plugins.callback import CallbackBase +from ansible.module_utils._text import to_text +from ansible.utils.color import codeCodes + +DONT_COLORIZE = False +COLORS = { + 'normal': '\033[0m', + 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]), + 'bold': '\033[1m', + 'not_so_bold': '\033[1m\033[34m', + 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]), + 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]), + 'endc': '\033[0m', + 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]), +} + + +def dict_diff(prv, nxt): + """Return a dict of keys that differ with another config object.""" + keys = set(prv.keys() + nxt.keys()) + result = {} + for k in keys: + if prv.get(k) != nxt.get(k): + result[k] = (prv.get(k), nxt.get(k)) + return result + + +def colorize(msg, color): + """Given a string add necessary codes to format the string.""" + if DONT_COLORIZE: + return msg + else: + return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc']) + + +class CallbackModule(CallbackBase): + """selective.py callback plugin.""" + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.selective' + + def __init__(self, display=None): + """selective.py callback plugin.""" + super(CallbackModule, self).__init__(display) + self.last_skipped = False + self.last_task_name = None + self.printed_last_task = False + + def set_options(self, task_keys=None, var_options=None, direct=None): + + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + global DONT_COLORIZE + DONT_COLORIZE = self.get_option('nocolor') + + def _print_task(self, task_name=None): + if task_name is None: + task_name = self.last_task_name + + if not self.printed_last_task: + self.printed_last_task = True + line_length = 120 + if self.last_skipped: + print() + msg = colorize("# {0} {1}".format(task_name, + '*' * (line_length - len(task_name))), 'bold') + print(msg) + + def _indent_text(self, text, indent_level): + lines = text.splitlines() + result_lines = [] + for l in lines: + result_lines.append("{0}{1}".format(' ' * indent_level, l)) + return '\n'.join(result_lines) + + def _print_diff(self, diff, indent_level): + if isinstance(diff, dict): + try: + diff = '\n'.join(difflib.unified_diff(diff['before'].splitlines(), + diff['after'].splitlines(), + fromfile=diff.get('before_header', + 'new_file'), + tofile=diff['after_header'])) + except AttributeError: + diff = dict_diff(diff['before'], diff['after']) + if diff: + diff = colorize(str(diff), 'changed') + print(self._indent_text(diff, indent_level + 4)) + + def _print_host_or_item(self, host_or_item, changed, msg, diff, is_host, error, stdout, stderr): + if is_host: + indent_level = 0 + name = colorize(host_or_item.name, 'not_so_bold') + else: + indent_level = 4 + if isinstance(host_or_item, dict): + if 'key' in host_or_item.keys(): + host_or_item = host_or_item['key'] + name = colorize(to_text(host_or_item), 'bold') + + if error: + color = 'failed' + change_string = colorize('FAILED!!!', color) + else: + color = 'changed' if changed else 'ok' + change_string = colorize("changed={0}".format(changed), color) + + msg = colorize(msg, color) + + line_length = 120 + spaces = ' ' * (40 - len(name) - indent_level) + line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string) + + if len(msg) < 50: + line += ' -- {0}'.format(msg) + print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + else: + print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(self._indent_text(msg, indent_level + 4)) + + if diff: + self._print_diff(diff, indent_level) + if stdout: + stdout = colorize(stdout, 'failed') + print(self._indent_text(stdout, indent_level + 4)) + if stderr: + stderr = colorize(stderr, 'failed') + print(self._indent_text(stderr, indent_level + 4)) + + def v2_playbook_on_play_start(self, play): + """Run on start of the play.""" + pass + + def v2_playbook_on_task_start(self, task, **kwargs): + """Run when a task starts.""" + self.last_task_name = task.get_name() + self.printed_last_task = False + + def _print_task_result(self, result, error=False, **kwargs): + """Run when a task finishes correctly.""" + + if 'print_action' in result._task.tags or error or self._display.verbosity > 1: + self._print_task() + self.last_skipped = False + msg = to_text(result._result.get('msg', '')) or\ + to_text(result._result.get('reason', '')) + + stderr = [result._result.get('exception', None), + result._result.get('module_stderr', None)] + stderr = "\n".join([e for e in stderr if e]).strip() + + self._print_host_or_item(result._host, + result._result.get('changed', False), + msg, + result._result.get('diff', None), + is_host=True, + error=error, + stdout=result._result.get('module_stdout', None), + stderr=stderr.strip(), + ) + if 'results' in result._result: + for r in result._result['results']: + failed = 'failed' in r + + stderr = [r.get('exception', None), r.get('module_stderr', None)] + stderr = "\n".join([e for e in stderr if e]).strip() + + self._print_host_or_item(r['item'], + r.get('changed', False), + to_text(r.get('msg', '')), + r.get('diff', None), + is_host=False, + error=failed, + stdout=r.get('module_stdout', None), + stderr=stderr.strip(), + ) + else: + self.last_skipped = True + print('.', end="") + + def v2_playbook_on_stats(self, stats): + """Display info about playbook statistics.""" + print() + self.printed_last_task = False + self._print_task('STATS') + + hosts = sorted(stats.processed.keys()) + for host in hosts: + s = stats.summarize(host) + + if s['failures'] or s['unreachable']: + color = 'failed' + elif s['changed']: + color = 'changed' + else: + color = 'ok' + + msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format( + host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored']) + print(colorize(msg, color)) + + def v2_runner_on_skipped(self, result, **kwargs): + """Run when a task is skipped.""" + if self._display.verbosity > 1: + self._print_task() + self.last_skipped = False + + line_length = 120 + spaces = ' ' * (31 - len(result._host.name) - 4) + + line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'), + spaces, + colorize("skipped", 'skipped'),) + + reason = result._result.get('skipped_reason', '') or \ + result._result.get('skip_reason', '') + if len(reason) < 50: + line += ' -- {0}'.format(reason) + print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + else: + print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(self._indent_text(reason, 8)) + print(reason) + + def v2_runner_on_ok(self, result, **kwargs): + self._print_task_result(result, error=False, **kwargs) + + def v2_runner_on_failed(self, result, **kwargs): + self._print_task_result(result, error=True, **kwargs) + + def v2_runner_on_unreachable(self, result, **kwargs): + self._print_task_result(result, error=True, **kwargs) + + v2_playbook_on_handler_task_start = v2_playbook_on_task_start diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py new file mode 100644 index 0000000000..4de59d3fc7 --- /dev/null +++ b/plugins/callback/slack.py @@ -0,0 +1,250 @@ +# (C) 2014-2015, Matt Martz +# (C) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: slack + callback_type: notification + requirements: + - whitelist in configuration + - prettytable (python library) + short_description: Sends play events to a Slack channel + description: + - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. + - Before 2.4 only environment variables were available for configuring this plugin + options: + webhook_url: + required: True + description: Slack Webhook URL + env: + - name: SLACK_WEBHOOK_URL + ini: + - section: callback_slack + key: webhook_url + channel: + default: "#ansible" + description: Slack room to post in. + env: + - name: SLACK_CHANNEL + ini: + - section: callback_slack + key: channel + username: + description: Username to post as. + env: + - name: SLACK_USERNAME + default: ansible + ini: + - section: callback_slack + key: username + validate_certs: + description: validate the SSL certificate of the Slack server. (For HTTPS URLs) + env: + - name: SLACK_VALIDATE_CERTS + ini: + - section: callback_slack + key: validate_certs + default: True + type: bool +''' + +import json +import os +import uuid + +from ansible import context +from ansible.module_utils._text import to_text +from ansible.module_utils.urls import open_url +from ansible.plugins.callback import CallbackBase + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + + +class CallbackModule(CallbackBase): + """This is an ansible callback plugin that sends status + updates to a Slack channel during playbook execution. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.slack' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + + super(CallbackModule, self).__init__(display=display) + + if not HAS_PRETTYTABLE: + self.disabled = True + self._display.warning('The `prettytable` python module is not ' + 'installed. Disabling the Slack callback ' + 'plugin.') + + self.playbook_name = None + + # This is a 6 character identifier provided with each message + # This makes it easier to correlate messages when there are more + # than 1 simultaneous playbooks running + self.guid = uuid.uuid4().hex[:6] + + def set_options(self, task_keys=None, var_options=None, direct=None): + + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.webhook_url = self.get_option('webhook_url') + self.channel = self.get_option('channel') + self.username = self.get_option('username') + self.show_invocation = (self._display.verbosity > 1) + self.validate_certs = self.get_option('validate_certs') + + if self.webhook_url is None: + self.disabled = True + self._display.warning('Slack Webhook URL was not provided. The ' + 'Slack Webhook URL can be provided using ' + 'the `SLACK_WEBHOOK_URL` environment ' + 'variable.') + + def send_msg(self, attachments): + headers = { + 'Content-type': 'application/json', + } + + payload = { + 'channel': self.channel, + 'username': self.username, + 'attachments': attachments, + 'parse': 'none', + 'icon_url': ('https://cdn2.hubspot.net/hub/330046/' + 'file-449187601-png/ansible_badge.png'), + } + + data = json.dumps(payload) + self._display.debug(data) + self._display.debug(self.webhook_url) + try: + response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs, + headers=headers) + return response.read() + except Exception as e: + self._display.warning(u'Could not submit message to Slack: %s' % + to_text(e)) + + def v2_playbook_on_start(self, playbook): + self.playbook_name = os.path.basename(playbook._file_name) + + title = [ + '*Playbook initiated* (_%s_)' % self.guid + ] + + invocation_items = [] + if context.CLIARGS and self.show_invocation: + tags = context.CLIARGS['tags'] + skip_tags = context.CLIARGS['skip_tags'] + extra_vars = context.CLIARGS['extra_vars'] + subset = context.CLIARGS['subset'] + inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] + + invocation_items.append('Inventory: %s' % ', '.join(inventory)) + if tags and tags != ['all']: + invocation_items.append('Tags: %s' % ', '.join(tags)) + if skip_tags: + invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) + if subset: + invocation_items.append('Limit: %s' % subset) + if extra_vars: + invocation_items.append('Extra Vars: %s' % + ' '.join(extra_vars)) + + title.append('by *%s*' % context.CLIARGS['remote_user']) + + title.append('\n\n*%s*' % self.playbook_name) + msg_items = [' '.join(title)] + if invocation_items: + msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) + + msg = '\n'.join(msg_items) + + attachments = [{ + 'fallback': msg, + 'fields': [ + { + 'value': msg + } + ], + 'color': 'warning', + 'mrkdwn_in': ['text', 'fallback', 'fields'], + }] + + self.send_msg(attachments=attachments) + + def v2_playbook_on_play_start(self, play): + """Display Play start messages""" + + name = play.name or 'Play name not specified (%s)' % play._uuid + msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) + attachments = [ + { + 'fallback': msg, + 'text': msg, + 'color': 'warning', + 'mrkdwn_in': ['text', 'fallback', 'fields'], + } + ] + self.send_msg(attachments=attachments) + + def v2_playbook_on_stats(self, stats): + """Display info about playbook statistics""" + + hosts = sorted(stats.processed.keys()) + + t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', + 'Failures', 'Rescued', 'Ignored']) + + failures = False + unreachable = False + + for h in hosts: + s = stats.summarize(h) + + if s['failures'] > 0: + failures = True + if s['unreachable'] > 0: + unreachable = True + + t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', + 'failures', 'rescued', 'ignored']]) + + attachments = [] + msg_items = [ + '*Playbook Complete* (_%s_)' % self.guid + ] + if failures or unreachable: + color = 'danger' + msg_items.append('\n*Failed!*') + else: + color = 'good' + msg_items.append('\n*Success!*') + + msg_items.append('```\n%s\n```' % t) + + msg = '\n'.join(msg_items) + + attachments.append({ + 'fallback': msg, + 'fields': [ + { + 'value': msg + } + ], + 'color': color, + 'mrkdwn_in': ['text', 'fallback', 'fields'] + }) + + self.send_msg(attachments=attachments) diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py new file mode 100644 index 0000000000..0be2bd4d09 --- /dev/null +++ b/plugins/callback/splunk.py @@ -0,0 +1,230 @@ +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: splunk + type: aggregate + short_description: Sends task result events to Splunk HTTP Event Collector + author: "Stuart Hirst " + description: + - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector. + - The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/" + - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. + requirements: + - Whitelisting this callback plugin + - 'Create a HTTP Event Collector in Splunk' + - 'Define the url and token in ansible.cfg' + options: + url: + description: URL to the Splunk HTTP collector source + env: + - name: SPLUNK_URL + ini: + - section: callback_splunk + key: url + authtoken: + description: Token to authenticate the connection to the Splunk HTTP collector + env: + - name: SPLUNK_AUTHTOKEN + ini: + - section: callback_splunk + key: authtoken +''' + +EXAMPLES = ''' +examples: > + To enable, add this to your ansible.cfg file in the defaults block + [defaults] + callback_whitelist = splunk + Set the environment variable + export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event + export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88 + Set the ansible.cfg variable in the callback_splunk block + [callback_splunk] + url = http://mysplunkinstance.datapaas.io:8088/services/collector/event + authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88 +''' + +import json +import uuid +import socket +import getpass + +from datetime import datetime +from os.path import basename + +from ansible.module_utils.urls import open_url +from ansible.parsing.ajson import AnsibleJSONEncoder +from ansible.plugins.callback import CallbackBase + + +class SplunkHTTPCollectorSource(object): + def __init__(self): + self.ansible_check_mode = False + self.ansible_playbook = "" + self.ansible_version = "" + self.session = str(uuid.uuid4()) + self.host = socket.gethostname() + self.ip_address = socket.gethostbyname(socket.gethostname()) + self.user = getpass.getuser() + + def send_event(self, url, authtoken, state, result, runtime): + if result._task_fields['args'].get('_ansible_check_mode') is True: + self.ansible_check_mode = True + + if result._task_fields['args'].get('_ansible_version'): + self.ansible_version = \ + result._task_fields['args'].get('_ansible_version') + + if result._task._role: + ansible_role = str(result._task._role) + else: + ansible_role = None + + if 'args' in result._task_fields: + del result._task_fields['args'] + + data = {} + data['uuid'] = result._task._uuid + data['session'] = self.session + data['status'] = state + data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S ' + '+0000') + data['host'] = self.host + data['ip_address'] = self.ip_address + data['user'] = self.user + data['runtime'] = runtime + data['ansible_version'] = self.ansible_version + data['ansible_check_mode'] = self.ansible_check_mode + data['ansible_host'] = result._host.name + data['ansible_playbook'] = self.ansible_playbook + data['ansible_role'] = ansible_role + data['ansible_task'] = result._task_fields + data['ansible_result'] = result._result + + # This wraps the json payload in and outer json event needed by Splunk + jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True) + jsondata = '{"event":' + jsondata + "}" + + open_url( + url, + jsondata, + headers={ + 'Content-type': 'application/json', + 'Authorization': 'Splunk ' + authtoken + }, + method='POST' + ) + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.splunk' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display=display) + self.start_datetimes = {} # Collect task start times + self.url = None + self.authtoken = None + self.splunk = SplunkHTTPCollectorSource() + + def _runtime(self, result): + return ( + datetime.utcnow() - + self.start_datetimes[result._task._uuid] + ).total_seconds() + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.url = self.get_option('url') + + if self.url is None: + self.disabled = True + self._display.warning('Splunk HTTP collector source URL was ' + 'not provided. The Splunk HTTP collector ' + 'source URL can be provided using the ' + '`SPLUNK_URL` environment variable or ' + 'in the ansible.cfg file.') + + self.authtoken = self.get_option('authtoken') + + if self.authtoken is None: + self.disabled = True + self._display.warning('Splunk HTTP collector requires an authentication' + 'token. The Splunk HTTP collector ' + 'authentication token can be provided using the ' + '`SPLUNK_AUTHTOKEN` environment variable or ' + 'in the ansible.cfg file.') + + def v2_playbook_on_start(self, playbook): + self.splunk.ansible_playbook = basename(playbook._file_name) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.start_datetimes[task._uuid] = datetime.utcnow() + + def v2_playbook_on_handler_task_start(self, task): + self.start_datetimes[task._uuid] = datetime.utcnow() + + def v2_runner_on_ok(self, result, **kwargs): + self.splunk.send_event( + self.url, + self.authtoken, + 'OK', + result, + self._runtime(result) + ) + + def v2_runner_on_skipped(self, result, **kwargs): + self.splunk.send_event( + self.url, + self.authtoken, + 'SKIPPED', + result, + self._runtime(result) + ) + + def v2_runner_on_failed(self, result, **kwargs): + self.splunk.send_event( + self.url, + self.authtoken, + 'FAILED', + result, + self._runtime(result) + ) + + def runner_on_async_failed(self, result, **kwargs): + self.splunk.send_event( + self.url, + self.authtoken, + 'FAILED', + result, + self._runtime(result) + ) + + def v2_runner_on_unreachable(self, result, **kwargs): + self.splunk.send_event( + self.url, + self.authtoken, + 'UNREACHABLE', + result, + self._runtime(result) + ) diff --git a/plugins/callback/stderr.py b/plugins/callback/stderr.py new file mode 100644 index 0000000000..990af95b61 --- /dev/null +++ b/plugins/callback/stderr.py @@ -0,0 +1,70 @@ +# (c) 2017, Frederic Van Espen +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: stderr + callback_type: stdout + requirements: + - set as main display callback + short_description: Splits output, sending failed tasks to stderr + deprecated: + why: The 'default' callback plugin now supports this functionality + removed_in: '2.11' + alternative: "'default' callback plugin with 'display_failed_stderr = yes' option" + extends_documentation_fragment: + - default_callback + description: + - This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr. + - Also it does not output skipped host/task/item status +''' + +from ansible import constants as C +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + + +class CallbackModule(CallbackModule_default): + + ''' + This is the stderr callback plugin, which reuses the default + callback plugin but sends error output to stderr. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.stderr' + + def __init__(self): + + self.super_ref = super(CallbackModule, self) + self.super_ref.__init__() + + def v2_runner_on_failed(self, result, ignore_errors=False): + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + self._clean_results(result._result, result._task.action) + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + self._handle_exception(result._result, use_stderr=True) + self._handle_warnings(result._result) + + if result._task.loop and 'results' in result._result: + self._process_items(result) + + else: + if delegated_vars: + self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], + self._dump_results(result._result)), color=C.COLOR_ERROR, + stderr=True) + else: + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), + color=C.COLOR_ERROR, stderr=True) + + if ignore_errors: + self._display.display("...ignoring", color=C.COLOR_SKIP) diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py new file mode 100644 index 0000000000..94aa7c93a2 --- /dev/null +++ b/plugins/callback/sumologic.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +callback: sumologic +type: aggregate +short_description: Sends task result events to Sumologic +author: "Ryan Currah (@ryancurrah)" +description: + - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source +requirements: + - Whitelisting this callback plugin + - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator + of C("timestamp": "(.*)")' +options: + url: + description: URL to the Sumologic HTTP collector source + env: + - name: SUMOLOGIC_URL + ini: + - section: callback_sumologic + key: url +''' + +EXAMPLES = ''' +examples: > + To enable, add this to your ansible.cfg file in the defaults block + [defaults] + callback_whitelist = sumologic + + Set the environment variable + export SUMOLOGIC_URL=https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp== + + Set the ansible.cfg variable in the callback_sumologic block + [callback_sumologic] + url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp== +''' + +import json +import uuid +import socket +import getpass + +from datetime import datetime +from os.path import basename + +from ansible.module_utils.urls import open_url +from ansible.parsing.ajson import AnsibleJSONEncoder +from ansible.plugins.callback import CallbackBase + + +class SumologicHTTPCollectorSource(object): + def __init__(self): + self.ansible_check_mode = False + self.ansible_playbook = "" + self.ansible_version = "" + self.session = str(uuid.uuid4()) + self.host = socket.gethostname() + self.ip_address = socket.gethostbyname(socket.gethostname()) + self.user = getpass.getuser() + + def send_event(self, url, state, result, runtime): + if result._task_fields['args'].get('_ansible_check_mode') is True: + self.ansible_check_mode = True + + if result._task_fields['args'].get('_ansible_version'): + self.ansible_version = \ + result._task_fields['args'].get('_ansible_version') + + if result._task._role: + ansible_role = str(result._task._role) + else: + ansible_role = None + + if 'args' in result._task_fields: + del result._task_fields['args'] + + data = {} + data['uuid'] = result._task._uuid + data['session'] = self.session + data['status'] = state + data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S ' + '+0000') + data['host'] = self.host + data['ip_address'] = self.ip_address + data['user'] = self.user + data['runtime'] = runtime + data['ansible_version'] = self.ansible_version + data['ansible_check_mode'] = self.ansible_check_mode + data['ansible_host'] = result._host.name + data['ansible_playbook'] = self.ansible_playbook + data['ansible_role'] = ansible_role + data['ansible_task'] = result._task_fields + data['ansible_result'] = result._result + + open_url( + url, + data=json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True), + headers={ + 'Content-type': 'application/json', + 'X-Sumo-Host': data['ansible_host'] + }, + method='POST' + ) + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.sumologic' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display=display) + self.start_datetimes = {} # Collect task start times + self.url = None + self.sumologic = SumologicHTTPCollectorSource() + + def _runtime(self, result): + return ( + datetime.utcnow() - + self.start_datetimes[result._task._uuid] + ).total_seconds() + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + self.url = self.get_option('url') + + if self.url is None: + self.disabled = True + self._display.warning('Sumologic HTTP collector source URL was ' + 'not provided. The Sumologic HTTP collector ' + 'source URL can be provided using the ' + '`SUMOLOGIC_URL` environment variable or ' + 'in the ansible.cfg file.') + + def v2_playbook_on_start(self, playbook): + self.sumologic.ansible_playbook = basename(playbook._file_name) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.start_datetimes[task._uuid] = datetime.utcnow() + + def v2_playbook_on_handler_task_start(self, task): + self.start_datetimes[task._uuid] = datetime.utcnow() + + def v2_runner_on_ok(self, result, **kwargs): + self.sumologic.send_event( + self.url, + 'OK', + result, + self._runtime(result) + ) + + def v2_runner_on_skipped(self, result, **kwargs): + self.sumologic.send_event( + self.url, + 'SKIPPED', + result, + self._runtime(result) + ) + + def v2_runner_on_failed(self, result, **kwargs): + self.sumologic.send_event( + self.url, + 'FAILED', + result, + self._runtime(result) + ) + + def runner_on_async_failed(self, result, **kwargs): + self.sumologic.send_event( + self.url, + 'FAILED', + result, + self._runtime(result) + ) + + def v2_runner_on_unreachable(self, result, **kwargs): + self.sumologic.send_event( + self.url, + 'UNREACHABLE', + result, + self._runtime(result) + ) diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py new file mode 100644 index 0000000000..a53f359471 --- /dev/null +++ b/plugins/callback/syslog_json.py @@ -0,0 +1,104 @@ +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: syslog_json + callback_type: notification + requirements: + - whitelist in configuration + short_description: sends JSON events to syslog + description: + - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format + - Before 2.9 only environment variables were available for configuration + options: + server: + description: syslog server that will receive the event + env: + - name: SYSLOG_SERVER + default: localhost + ini: + - section: callback_syslog_json + key: syslog_server + port: + description: port on which the syslog server is listening + env: + - name: SYSLOG_PORT + default: 514 + ini: + - section: callback_syslog_json + key: syslog_port + facility: + description: syslog facility to log as + env: + - name: SYSLOG_FACILITY + default: user + ini: + - section: callback_syslog_json + key: syslog_facility +''' + +import os +import json + +import logging +import logging.handlers + +import socket + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + logs ansible-playbook and ansible runs to a syslog server in json format + """ + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.syslog_json' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + + super(CallbackModule, self).__init__() + + self.set_options() + + syslog_host = self.get_option("server") + syslog_port = int(self.get_option("port")) + syslog_facility = self.get_option("facility") + + self.logger = logging.getLogger('ansible logger') + self.logger.setLevel(logging.DEBUG) + + self.handler = logging.handlers.SysLogHandler( + address=(syslog_host, syslog_port), + facility=syslog_facility + ) + self.logger.addHandler(self.handler) + self.hostname = socket.gethostname() + + def runner_on_failed(self, host, res, ignore_errors=False): + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + + def runner_on_ok(self, host, res): + self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + + def runner_on_skipped(self, host, item=None): + self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped') + + def runner_on_unreachable(self, host, res): + self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + + def runner_on_async_failed(self, host, res, jid): + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) + + def playbook_on_import_for_host(self, host, imported_file): + self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file) + + def playbook_on_not_import_for_host(self, host, missing_file): + self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py new file mode 100644 index 0000000000..3bd943ed2b --- /dev/null +++ b/plugins/callback/unixy.py @@ -0,0 +1,246 @@ +# Copyright: (c) 2017, Allyson Bowles <@akatch> +# Copyright: (c) 2012-2014, Michael DeHaan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: unixy + type: stdout + author: Allyson Bowles <@akatch> + short_description: condensed Ansible output + description: + - Consolidated Ansible output in the style of LINUX/UNIX startup logs. + extends_documentation_fragment: + - default_callback + requirements: + - set as stdout in configuration +''' + +from os.path import basename +from ansible import constants as C +from ansible import context +from ansible.module_utils._text import to_text +from ansible.utils.color import colorize, hostcolor +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + + +class CallbackModule(CallbackModule_default): + + ''' + Design goals: + - Print consolidated output that looks like a *NIX startup log + - Defaults should avoid displaying unnecessary information wherever possible + + TODOs: + - Only display task names if the task runs on at least one host + - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line) + - Consolidate stats display + - Display whether run is in --check mode + - Don't show play name if no hosts found + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.unixy' + + def _run_is_verbose(self, result): + return ((self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result) + + def _get_task_display_name(self, task): + self.task_display_name = None + display_name = task.get_name().strip().split(" : ") + + task_display_name = display_name[-1] + if task_display_name.startswith("include"): + return + else: + self.task_display_name = task_display_name + + def _preprocess_result(self, result): + self.delegated_vars = result._result.get('_ansible_delegated_vars', None) + self._handle_exception(result._result, use_stderr=self.display_failed_stderr) + self._handle_warnings(result._result) + + def _process_result_output(self, result, msg): + task_host = result._host.get_name() + task_result = "%s %s" % (task_host, msg) + + if self._run_is_verbose(result): + task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4)) + return task_result + + if self.delegated_vars: + task_delegate_host = self.delegated_vars['ansible_host'] + task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg) + + if result._result.get('msg') and result._result.get('msg') != "All items completed": + task_result += " | msg: " + to_text(result._result.get('msg')) + + if result._result.get('stdout'): + task_result += " | stdout: " + result._result.get('stdout') + + if result._result.get('stderr'): + task_result += " | stderr: " + result._result.get('stderr') + + return task_result + + def v2_playbook_on_task_start(self, task, is_conditional): + self._get_task_display_name(task) + if self.task_display_name is not None: + self._display.display("%s..." % self.task_display_name) + + def v2_playbook_on_handler_task_start(self, task): + self._get_task_display_name(task) + if self.task_display_name is not None: + self._display.display("%s (via handler)... " % self.task_display_name) + + def v2_playbook_on_play_start(self, play): + name = play.get_name().strip() + if name and play.hosts: + msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts)) + else: + msg = u"---" + + self._display.display(msg) + + def v2_runner_on_skipped(self, result, ignore_errors=False): + if self.display_skipped_hosts: + self._preprocess_result(result) + display_color = C.COLOR_SKIP + msg = "skipped" + + task_result = self._process_result_output(result, msg) + self._display.display(" " + task_result, display_color) + else: + return + + def v2_runner_on_failed(self, result, ignore_errors=False): + self._preprocess_result(result) + display_color = C.COLOR_ERROR + msg = "failed" + item_value = self._get_item_label(result._result) + if item_value: + msg += " | item: %s" % (item_value,) + + task_result = self._process_result_output(result, msg) + self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr) + + def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): + self._preprocess_result(result) + + result_was_changed = ('changed' in result._result and result._result['changed']) + if result_was_changed: + msg = "done" + item_value = self._get_item_label(result._result) + if item_value: + msg += " | item: %s" % (item_value,) + display_color = C.COLOR_CHANGED + task_result = self._process_result_output(result, msg) + self._display.display(" " + task_result, display_color) + elif self.display_ok_hosts: + task_result = self._process_result_output(result, msg) + self._display.display(" " + task_result, display_color) + + def v2_runner_item_on_skipped(self, result): + self.v2_runner_on_skipped(result) + + def v2_runner_item_on_failed(self, result): + self.v2_runner_on_failed(result) + + def v2_runner_item_on_ok(self, result): + self.v2_runner_on_ok(result) + + def v2_runner_on_unreachable(self, result): + self._preprocess_result(result) + + msg = "unreachable" + display_color = C.COLOR_UNREACHABLE + task_result = self._process_result_output(result, msg) + + self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr) + + def v2_on_file_diff(self, result): + if result._task.loop and 'results' in result._result: + for res in result._result['results']: + if 'diff' in res and res['diff'] and res.get('changed', False): + diff = self._get_diff(res['diff']) + if diff: + self._display.display(diff) + elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False): + diff = self._get_diff(result._result['diff']) + if diff: + self._display.display(diff) + + def v2_playbook_on_stats(self, stats): + self._display.display("\n- Play recap -", screen_only=True) + + hosts = sorted(stats.processed.keys()) + for h in hosts: + # TODO how else can we display these? + t = stats.summarize(h) + + self._display.display(u" %s : %s %s %s %s %s %s" % ( + hostcolor(h, t), + colorize(u'ok', t['ok'], C.COLOR_OK), + colorize(u'changed', t['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', t['failures'], C.COLOR_ERROR), + colorize(u'rescued', t['rescued'], C.COLOR_OK), + colorize(u'ignored', t['ignored'], C.COLOR_WARN)), + screen_only=True + ) + + self._display.display(u" %s : %s %s %s %s %s %s" % ( + hostcolor(h, t, False), + colorize(u'ok', t['ok'], None), + colorize(u'changed', t['changed'], None), + colorize(u'unreachable', t['unreachable'], None), + colorize(u'failed', t['failures'], None), + colorize(u'rescued', t['rescued'], None), + colorize(u'ignored', t['ignored'], None)), + log_only=True + ) + if stats.custom and self.show_custom_stats: + self._display.banner("CUSTOM STATS: ") + # per host + # TODO: come up with 'pretty format' + for k in sorted(stats.custom.keys()): + if k == '_run': + continue + self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + + # print per run custom stats + if '_run' in stats.custom: + self._display.display("", screen_only=True) + self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + self._display.display("", screen_only=True) + + def v2_playbook_on_no_hosts_matched(self): + self._display.display(" No hosts found!", color=C.COLOR_DEBUG) + + def v2_playbook_on_no_hosts_remaining(self): + self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR) + + def v2_playbook_on_start(self, playbook): + # TODO display whether this run is happening in check mode + self._display.display("Executing playbook %s" % basename(playbook._file_name)) + + # show CLI arguments + if self._display.verbosity > 3: + if context.CLIARGS.get('args'): + self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']), + color=C.COLOR_VERBOSE, screen_only=True) + + for argument in (a for a in context.CLIARGS if a != 'args'): + val = context.CLIARGS[argument] + if val: + self._display.vvvv('%s: %s' % (argument, val)) + + def v2_runner_retry(self, result): + msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries']) + if self._run_is_verbose(result): + msg += "Result was: %s" % self._dump_results(result._result) + self._display.display(msg, color=C.COLOR_DEBUG) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py new file mode 100644 index 0000000000..4cea0233b9 --- /dev/null +++ b/plugins/callback/yaml.py @@ -0,0 +1,129 @@ +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: yaml + type: stdout + short_description: yaml-ized Ansible screen output + description: + - Ansible output that can be quite a bit easier to read than the + default JSON formatting. + extends_documentation_fragment: + - default_callback + requirements: + - set as stdout in configuration +''' + +import yaml +import json +import re +import string +import sys + +from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.six import string_types +from ansible.parsing.yaml.dumper import AnsibleDumper +from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy +from ansible.plugins.callback.default import CallbackModule as Default + + +# from http://stackoverflow.com/a/15423007/115478 +def should_use_block(value): + """Returns true if string should be in block format""" + for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029": + if c in value: + return True + return False + + +def my_represent_scalar(self, tag, value, style=None): + """Uses block style for multi-line strings""" + if style is None: + if should_use_block(value): + style = '|' + # we care more about readable than accuracy, so... + # ...no trailing space + value = value.rstrip() + # ...and non-printable characters + value = ''.join(x for x in value if x in string.printable) + # ...tabs prevent blocks from expanding + value = value.expandtabs() + # ...and odd bits of whitespace + value = re.sub(r'[\x0b\x0c\r]', '', value) + # ...as does trailing space + value = re.sub(r' +\n', '\n', value) + else: + style = self.default_style + node = yaml.representer.ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + +class CallbackModule(Default): + + """ + Variation of the Default output which uses nicely readable YAML instead + of JSON for printing results. + """ + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.yaml' + + def __init__(self): + super(CallbackModule, self).__init__() + yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar + + def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): + if result.get('_ansible_no_log', False): + return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result")) + + # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. + abridged_result = strip_internal_keys(module_response_deepcopy(result)) + + # remove invocation unless specifically wanting it + if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: + del abridged_result['invocation'] + + # remove diff information from screen output + if self._display.verbosity < 3 and 'diff' in result: + del abridged_result['diff'] + + # remove exception from screen output + if 'exception' in abridged_result: + del abridged_result['exception'] + + dumped = '' + + # put changed and skipped into a header line + if 'changed' in abridged_result: + dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' + del abridged_result['changed'] + + if 'skipped' in abridged_result: + dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' + del abridged_result['skipped'] + + # if we already have stdout, we don't need stdout_lines + if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: + abridged_result['stdout_lines'] = '' + + # if we already have stderr, we don't need stderr_lines + if 'stderr' in abridged_result and 'stderr_lines' in abridged_result: + abridged_result['stderr_lines'] = '' + + if abridged_result: + dumped += '\n' + dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False)) + + # indent by a couple of spaces + dumped = '\n '.join(dumped.split('\n')).rstrip() + return dumped + + def _serialize_diff(self, diff): + return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False)) diff --git a/plugins/cliconf/__init__.py b/plugins/cliconf/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/cliconf/aireos.py b/plugins/cliconf/aireos.py new file mode 100644 index 0000000000..438e4972cc --- /dev/null +++ b/plugins/cliconf/aireos.py @@ -0,0 +1,95 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: aireos +short_description: Use aireos cliconf to run command on Cisco WLC platform +description: + - This aireos plugin provides low level abstraction apis for + sending and receiving CLI commands from Cisco WLC network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'aireos' + reply = self.get('show sysinfo') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Product Version\.* (.*)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'System Name\.* (.*)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + reply = self.get('show inventory') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'DESCR: \"(.*)\"', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + return self.invalid_params("fetching configuration from %s is not supported" % source) + if source == 'running': + cmd = 'show run-config commands' + else: + cmd = 'show run-config startup-commands' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['config'], to_list(command), ['end']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + self._update_cli_prompt_context(config_context=')#') diff --git a/plugins/cliconf/apconos.py b/plugins/cliconf/apconos.py new file mode 100644 index 0000000000..03c7e7a1de --- /dev/null +++ b/plugins/cliconf/apconos.py @@ -0,0 +1,72 @@ +# (C) 2018 Red Hat Inc. +# Copyright (C) 2019 APCON. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains CLIConf Plugin methods for apconos Modules +# APCON Networking + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: "David Li (@davidlee-ap)" +cliconf: apconos +short_description: Use apconos cliconf to run command on APCON network devices +description: + - This apconos plugin provides low level abstraction apis for + sending and receiving CLI commands from APCON network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'apconos' + reply = self.get(b'show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + if data: + device_info['network_os_version'] = self.parse_version(data) + device_info['network_os_model'] = self.parse_model(data) + + return device_info + + def parse_version(self, data): + return "" + + def parse_model(self, data): + return "" + + @enable_mode + def get_config(self, source='running', format='text'): + pass + + @enable_mode + def edit_config(self, command): + for cmd in chain([b'configure terminal'], to_list(command), [b'end']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, check_all=check_all) + + def get_capabilities(self): + return json.dumps(self.get_device_info()) diff --git a/plugins/cliconf/aruba.py b/plugins/cliconf/aruba.py new file mode 100644 index 0000000000..52c0a974a4 --- /dev/null +++ b/plugins/cliconf/aruba.py @@ -0,0 +1,95 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: aruba +short_description: Use aruba cliconf to run command on Aruba platform +description: + - This aruba plugin provides low level abstraction apis for + sending and receiving CLI commands from Aruba network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'aruba' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Version (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'^MODEL: (\S+)\),', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + reply = self.get('show hostname') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'^Hostname is (.+)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + return self.invalid_params("fetching configuration from %s is not supported" % source) + if source == 'running': + cmd = 'show running-config all' + else: + cmd = 'show configuration' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['end']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + self._update_cli_prompt_context(config_context=')#') diff --git a/plugins/cliconf/ce.py b/plugins/cliconf/ce.py new file mode 100644 index 0000000000..4af008c085 --- /dev/null +++ b/plugins/cliconf/ce.py @@ -0,0 +1,121 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: ce +short_description: Use ce cliconf to run command on HUAWEI CloudEngine platform +description: + - This ce plugin provides low level abstraction apis for + sending and receiving CLI commands from HUAWEI CloudEngine network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'ce' + reply = self.get('display version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'^Huawei.+\n.+\Version\s+(\S+)', data) + if match: + device_info['network_os_version'] = match.group(1).strip(',') + + match = re.search(r'^Huawei(.+)\n.+\(\S+\s+\S+\)', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + match = re.search(r'HUAWEI\s+(\S+)\s+uptime', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running'): + return self.invalid_params("fetching configuration from %s is not supported" % source) + + if not flags: + flags = [] + + cmd = 'display current-configuration' + + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + results = [] + for cmd in chain(['configure terminal'], to_list(command), ['end']): + if isinstance(cmd, dict): + command = cmd['command'] + prompt = cmd['prompt'] + answer = cmd['answer'] + newline = cmd.get('newline', True) + else: + command = cmd + prompt = None + answer = None + newline = True + + results.append(self.send_command(command, prompt, answer, False, newline)) + return results[1:-1] + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + out = self._connection.get_prompt() + + if out is None: + raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received' + u' response window: %s' % self._connection._last_recv_window) + + prompt = to_text(out, errors='surrogate_then_replace').strip() + while prompt.endswith(']'): + self._connection.queue_message('vvvv', 'wrong context, sending return to device') + if prompt.startswith('[*'): + self._connection.exec_command('clear configuration candidate') + self._connection.exec_command('return') + out = self._connection.get_prompt() + prompt = to_text(out, errors='surrogate_then_replace').strip() diff --git a/plugins/cliconf/cnos.py b/plugins/cliconf/cnos.py new file mode 100644 index 0000000000..51541861c3 --- /dev/null +++ b/plugins/cliconf/cnos.py @@ -0,0 +1,135 @@ +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains CLIConf Plugin methods for CNOS Modules +# Lenovo Networking +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: cnos +short_description: Use cnos cliconf to run command on Lenovo CNOS platform +description: + - This cnos plugin provides low level abstraction apis for + sending and receiving CLI commands from Lenovo CNOS network devices. +''' + +import re +import json + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils.common._collections_compat import Mapping +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'cnos' + reply = self.get('show sys-info') + data = to_text(reply, errors='surrogate_or_strict').strip() + host = self.get('show hostname') + hostname = to_text(host, errors='surrogate_or_strict').strip() + if data: + device_info['network_os_version'] = self.parse_version(data) + device_info['network_os_model'] = self.parse_model(data) + device_info['network_os_hostname'] = hostname + + return device_info + + def parse_version(self, data): + for line in data.split('\n'): + line = line.strip() + match = re.match(r'System Software Revision (.*?)', + line, re.M | re.I) + if match: + vers = line.split(':') + ver = vers[1].strip() + return ver + return "NA" + + def parse_model(self, data): + for line in data.split('\n'): + line = line.strip() + match = re.match(r'System Model (.*?)', line, re.M | re.I) + if match: + mdls = line.split(':') + mdl = mdls[1].strip() + return mdl + return "NA" + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + msg = "fetching configuration from %s is not supported" + return self.invalid_params(msg % source) + if source == 'running': + cmd = 'show running-config' + else: + cmd = 'show startup-config' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, candidate=None, commit=True, + replace=None, comment=None): + resp = {} + results = [] + requests = [] + if commit: + self.send_command('configure terminal') + for line in to_list(candidate): + if not isinstance(line, Mapping): + line = {'command': line} + + cmd = line['command'] + if cmd != 'end' and cmd[0] != '!': + results.append(self.send_command(**line)) + requests.append(cmd) + + self.send_command('end') + else: + raise ValueError('check mode is not supported') + + resp['request'] = requests + resp['response'] = results + return resp + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + out = self._connection.get_prompt() + + if out is None: + raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received' + u' response window: %s' % self._connection._last_recv_window) + + if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'): + self._connection.queue_message('vvvv', 'In Config mode, sending exit to device') + self._connection.send_command('exit') + else: + self._connection.send_command('enable') diff --git a/plugins/cliconf/edgeos.py b/plugins/cliconf/edgeos.py new file mode 100644 index 0000000000..ef55889046 --- /dev/null +++ b/plugins/cliconf/edgeos.py @@ -0,0 +1,114 @@ +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: edgeos +short_description: Use edgeos cliconf to run command on EdgeOS platform +description: + - This edgeos plugin provides low level abstraction apis for + sending and receiving CLI commands from Ubiquiti EdgeOS network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible.module_utils.common._collections_compat import Mapping +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'edgeos' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Version:\s*v?(\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'HW model:\s*(\S+)', data) + if match: + device_info['network_os_model'] = match.group(1) + + reply = self.get('show host name') + device_info['network_os_hostname'] = to_text(reply, errors='surrogate_or_strict').strip() + + return device_info + + def get_config(self, source='running', format='text', flags=None): + return self.send_command('show configuration commands') + + def edit_config(self, candidate=None, commit=True, replace=False, comment=None): + for cmd in chain(['configure'], to_list(candidate)): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def commit(self, comment=None): + if comment: + command = 'commit comment "{0}"'.format(comment) + else: + command = 'commit' + self.send_command(command) + + def discard_changes(self, *args, **kwargs): + self.send_command('discard') + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + output = cmd.pop('output', None) + if output: + raise ValueError("'output' value %s is not supported for run_commands" % output) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, 'err', e) + + responses.append(out) + + return responses + + def get_device_operations(self): + return { + 'supports_diff_replace': False, + 'supports_commit': True, + 'supports_rollback': False, + 'supports_defaults': False, + 'supports_onbox_diff': False, + 'supports_commit_comment': True, + 'supports_multiline_delimiter': False, + 'supports_diff_match': False, + 'supports_diff_ignore_lines': False, + 'supports_generate_diff': False, + 'supports_replace': False + } + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + result['rpc'] += ['commit', 'discard_changes', 'run_commands'] + result['device_operations'] = self.get_device_operations() + return json.dumps(result) diff --git a/plugins/cliconf/edgeswitch.py b/plugins/cliconf/edgeswitch.py new file mode 100644 index 0000000000..eb99f23674 --- /dev/null +++ b/plugins/cliconf/edgeswitch.py @@ -0,0 +1,141 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: edgeswitch +short_description: Use edgeswitch cliconf to run command on EdgeSwitch platform +description: + - This edgeswitch plugin provides low level abstraction apis for + sending and receiving CLI commands from Ubiquiti EdgeSwitch network devices. +''' + +import re +import time +import json + +from itertools import chain + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import dumps +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode +from ansible.module_utils.common._collections_compat import Mapping + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'edgeswitch' + reply = self.get(command='show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Software Version\.+ (.*)', data) + if match: + device_info['network_os_version'] = match.group(1).strip(',') + + match = re.search(r'^Machine Model\.+ (.*)', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + match = re.search(r'System Name\.+ (.*)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + @enable_mode + def get_config(self, source='running', flags=None): + if source not in ('running', 'startup'): + raise ValueError("fetching configuration from %s is not supported" % source) + + if source == 'running': + cmd = 'show running-config ' + else: + cmd = 'show startup-config ' + + if flags: + cmd += ' '.join(to_list(flags)) + cmd = cmd.strip() + + return self.send_command(cmd) + + @enable_mode + def edit_config(self, commands): + resp = {} + + results = [] + requests = [] + self.send_command('configure') + for line in to_list(commands): + if not isinstance(line, Mapping): + line = {'command': line} + + cmd = line['command'] + if cmd != 'end' and cmd[0] != '!': + results.append(self.send_command(**line)) + requests.append(cmd) + + self.send_command('end') + + resp['request'] = requests + resp['response'] = results + return resp + + def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False): + if not command: + raise ValueError('must provide value of command to execute') + if output: + raise ValueError("'output' value %s is not supported for get" % output) + + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + result['rpc'] += ['run_commands'] + return json.dumps(result) + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + output = cmd.pop('output', None) + if output: + raise ValueError("'output' value %s is not supported for run_commands" % output) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, 'err', e) + + responses.append(out) + + return responses diff --git a/plugins/cliconf/enos.py b/plugins/cliconf/enos.py new file mode 100644 index 0000000000..225409f5cd --- /dev/null +++ b/plugins/cliconf/enos.py @@ -0,0 +1,103 @@ +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains CLIConf Plugin methods for ENOS Modules +# Lenovo Networking +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: enos +short_description: Use enos cliconf to run command on Lenovo ENOS platform +description: + - This enos plugin provides low level abstraction apis for + sending and receiving CLI commands from Lenovo ENOS network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'enos' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'^Software Version (.*?) ', data, re.M | re.I) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'^Lenovo RackSwitch (\S+)', data, re.M | re.I) + if match: + device_info['network_os_model'] = match.group(1) + + match = re.search(r'^(.+) uptime', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + else: + device_info['network_os_hostname'] = "NA" + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + msg = "fetching configuration from %s is not supported" + return self.invalid_params(msg % source) + if source == 'running': + cmd = 'show running-config' + else: + cmd = 'show startup-config' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['end']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + out = self._connection.get_prompt() + + if out is None: + raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received' + u' response window: %s' % self._connection._last_recv_window) + + if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'): + self._connection.queue_message('vvvv', 'In Config mode, sending exit to device') + self._connection.send_command('exit') + else: + self._connection.send_command('enable') diff --git a/plugins/cliconf/eric_eccli.py b/plugins/cliconf/eric_eccli.py new file mode 100644 index 0000000000..13c5f9edc3 --- /dev/null +++ b/plugins/cliconf/eric_eccli.py @@ -0,0 +1,97 @@ +# +# Copyright (c) 2019 Ericsson AB. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: Ericsson IPOS OAM team +cliconf: eccli +short_description: Use eccli cliconf to run command on Ericsson ECCLI platform +description: + - This eccli plugin provides low level abstraction APIs for + sending and receiving CLI commands from Ericsson ECCLI network devices. +''' + +from ansible.module_utils.common._collections_compat import Mapping +import collections +import re +import time +import json + +from itertools import chain + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_config(self, source='running', flags=None, format=None): + return + + def edit_config(self, candidate=None, commit=True, replace=None, comment=None): + return + + def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False): + if not command: + raise ValueError('must provide value of command to execute') + if output: + raise ValueError("'output' value %s is not supported for get" % output) + + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_device_info(self): + device_info = {} + device_info['network_os'] = 'eric_eccli' + return device_info + + def get_capabilities(self): + result = dict() + result['rpc'] = self.get_base_rpc() + ['run_commands'] + result['network_api'] = 'cliconf' + result['device_info'] = self.get_device_info() + return json.dumps(result) + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + output = cmd.pop('output', None) + if output: + raise ValueError("'output' value %s is not supported for run_commands" % output) + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, 'err', e) + + responses.append(out) + + return responses diff --git a/plugins/cliconf/exos.py b/plugins/cliconf/exos.py new file mode 100644 index 0000000000..c9ca494688 --- /dev/null +++ b/plugins/cliconf/exos.py @@ -0,0 +1,229 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: exos +short_description: Use exos cliconf to run command on Extreme EXOS platform +description: + - This exos plugin provides low level abstraction apis for + sending and receiving CLI commands from Extreme EXOS network devices. +''' + +import re +import json + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.module_utils.connection import ConnectionError +from ansible.module_utils.common._collections_compat import Mapping +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible.plugins.cliconf import CliconfBase + + +class Cliconf(CliconfBase): + + def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): + diff = {} + device_operations = self.get_device_operations() + option_values = self.get_option_values() + + if candidate is None and device_operations['supports_generate_diff']: + raise ValueError("candidate configuration is required to generate diff") + + if diff_match not in option_values['diff_match']: + raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match']))) + + if diff_replace not in option_values['diff_replace']: + raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace']))) + + # prepare candidate configuration + candidate_obj = NetworkConfig(indent=1) + candidate_obj.load(candidate) + + if running and diff_match != 'none' and diff_replace != 'config': + # running configuration + running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines) + configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace) + + else: + configdiffobjs = candidate_obj.items + + diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else '' + return diff + + def get_device_info(self): + device_info = {} + device_info['network_os'] = 'exos' + + reply = self.run_commands({'command': 'show switch detail', 'output': 'text'}) + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'ExtremeXOS version (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'System Type: +(\S+)', data) + if match: + device_info['network_os_model'] = match.group(1) + + match = re.search(r'SysName: +(\S+)', data) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + def get_default_flag(self): + # The flag to modify the command to collect configuration with defaults + return 'detail' + + def get_config(self, source='running', format='text', flags=None): + options_values = self.get_option_values() + if format not in options_values['format']: + raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format']))) + + lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'} + if source not in lookup: + raise ValueError("fetching configuration from %s is not supported" % source) + + cmd = {'command': lookup[source], 'output': 'text'} + + if source == 'startup': + reply = self.run_commands({'command': 'show switch', 'format': 'text'}) + data = to_text(reply, errors='surrogate_or_strict').strip() + match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE) + if match: + cmd['command'] += match.group(1) + else: + # No Startup(/Selected) Config + return {} + + cmd['command'] += ' '.join(to_list(flags)) + cmd['command'] = cmd['command'].strip() + + return self.run_commands(cmd)[0] + + def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None): + resp = {} + operations = self.get_device_operations() + self.check_edit_config_capability(operations, candidate, commit, replace, comment) + results = [] + requests = [] + + if commit: + for line in to_list(candidate): + if not isinstance(line, Mapping): + line = {'command': line} + results.append(self.send_command(**line)) + requests.append(line['command']) + else: + raise ValueError('check mode is not supported') + + resp['request'] = requests + resp['response'] = results + return resp + + def get(self, command, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False): + if output: + command = self._get_command_with_output(command, output) + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + output = cmd.pop('output', None) + if output: + cmd['command'] = self._get_command_with_output(cmd['command'], output) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc is True: + raise + out = getattr(e, 'err', e) + + if out is not None: + try: + out = to_text(out, errors='surrogate_or_strict').strip() + except UnicodeError: + raise ConnectionError(message=u'Failed to decode output from %s: %s' % (cmd, to_text(out))) + + if output and output == 'json': + try: + out = json.loads(out) + except ValueError: + raise ConnectionError('Response was not valid JSON, got {0}'.format( + to_text(out) + )) + responses.append(out) + + return responses + + def get_device_operations(self): + return { + 'supports_diff_replace': False, # identify if config should be merged or replaced is supported + 'supports_commit': False, # identify if commit is supported by device or not + 'supports_rollback': False, # identify if rollback is supported or not + 'supports_defaults': True, # identify if fetching running config with default is supported + 'supports_commit_comment': False, # identify if adding comment to commit is supported of not + 'supports_onbox_diff': False, # identify if on box diff capability is supported or not + 'supports_generate_diff': True, # identify if diff capability is supported within plugin + 'supports_multiline_delimiter': False, # identify if multiline delimiter is supported within config + 'supports_diff_match': True, # identify if match is supported + 'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported + 'supports_config_replace': False, # identify if running config replace with candidate config is supported + 'supports_admin': False, # identify if admin configure mode is supported or not + 'supports_commit_label': False, # identify if commit label is supported or not + 'supports_replace': False + } + + def get_option_values(self): + return { + 'format': ['text', 'json'], + 'diff_match': ['line', 'strict', 'exact', 'none'], + 'diff_replace': ['line', 'block'], + 'output': ['text', 'json'] + } + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + result['rpc'] += ['run_commmands', 'get_default_flag', 'get_diff'] + result['device_operations'] = self.get_device_operations() + result['device_info'] = self.get_device_info() + result.update(self.get_option_values()) + return json.dumps(result) + + def _get_command_with_output(self, command, output): + if output not in self.get_option_values().get('output'): + raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output')))) + + if output == 'json' and not command.startswith('run script cli2json.py'): + cmd = 'run script cli2json.py %s' % command + else: + cmd = command + return cmd diff --git a/plugins/cliconf/icx.py b/plugins/cliconf/icx.py new file mode 100644 index 0000000000..4ce6e5d130 --- /dev/null +++ b/plugins/cliconf/icx.py @@ -0,0 +1,314 @@ +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: Ruckus Wireless (@Commscope) +cliconf: icx +short_description: Use icx cliconf to run command on Ruckus ICX platform +description: + - This icx plugin provides low level abstraction APIs for + sending and receiving CLI commands from Ruckus ICX network devices. +''' + + +import re +import time +import json +import os + +from itertools import chain +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode +from ansible.module_utils.common._collections_compat import Mapping + + +class Cliconf(CliconfBase): + + @enable_mode + def get_config(self, source='running', flags=None, format=None, compare=None): + if source not in ('running', 'startup'): + raise ValueError("fetching configuration from %s is not supported" % source) + + if format: + raise ValueError("'format' value %s is not supported for get_config" % format) + + if not flags: + flags = [] + + if compare is False: + return '' + else: + if source == 'running': + cmd = 'show running-config ' + else: + cmd = 'show configuration ' + + cmd += ' '.join(to_list(flags)) + cmd = cmd.strip() + + return self.send_command(cmd) + + def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): + """ + Generate diff between candidate and running configuration. If the + remote host supports onbox diff capabilities ie. supports_onbox_diff in that case + candidate and running configurations are not required to be passed as argument. + In case if onbox diff capability is not supported candidate argument is mandatory + and running argument is optional. + :param candidate: The configuration which is expected to be present on remote host. + :param running: The base configuration which is used to generate diff. + :param diff_match: Instructs how to match the candidate configuration with current device configuration + Valid values are 'line', 'strict', 'exact', 'none'. + 'line' - commands are matched line by line + 'strict' - command lines are matched with respect to position + 'exact' - command lines must be an equal match + 'none' - will not compare the candidate configuration with the running configuration + :param diff_ignore_lines: Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + :param path: The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + :param diff_replace: Instructs on the way to perform the configuration on the device. + If the replace argument is set to I(line) then the modified lines are + pushed to the device in configuration mode. If the replace argument is + set to I(block) then the entire command block is pushed to the device in + configuration mode if any line is not correct. + :return: Configuration diff in json format. + { + 'config_diff': '', + 'banner_diff': {} + } + + """ + diff = {} + device_operations = self.get_device_operations() + option_values = self.get_option_values() + + if candidate is None and device_operations['supports_generate_diff']: + raise ValueError("candidate configuration is required to generate diff") + + if diff_match not in option_values['diff_match']: + raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match']))) + + if diff_replace not in option_values['diff_replace']: + raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace']))) + + # prepare candidate configuration + candidate_obj = NetworkConfig(indent=1) + want_src, want_banners = self._extract_banners(candidate) + candidate_obj.load(want_src) + + if running and diff_match != 'none': + # running configuration + have_src, have_banners = self._extract_banners(running) + + running_obj = NetworkConfig(indent=1, contents=have_src, ignore_lines=diff_ignore_lines) + configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace) + + else: + configdiffobjs = candidate_obj.items + have_banners = {} + + diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else '' + + banners = self._diff_banners(want_banners, have_banners) + diff['banner_diff'] = banners if banners else {} + return diff + + @enable_mode + def edit_config(self, candidate=None, commit=True, replace=None, comment=None): + resp = {} + operations = self.get_device_operations() + self.check_edit_config_capability(operations, candidate, commit, replace, comment) + + results = [] + requests = [] + if commit: + prompt = self._connection.get_prompt() + if (b'(config-if' in prompt) or (b'(config' in prompt) or (b'(config-lag-if' in prompt): + self.send_command('end') + + self.send_command('configure terminal') + + for line in to_list(candidate): + if not isinstance(line, Mapping): + line = {'command': line} + + cmd = line['command'] + if cmd != 'end' and cmd[0] != '!': + results.append(self.send_command(**line)) + requests.append(cmd) + + self.send_command('end') + else: + raise ValueError('check mode is not supported') + + resp['request'] = requests + resp['response'] = results + return resp + + def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, check_all=False): + if not command: + raise ValueError('must provide value of command to execute') + if output: + raise ValueError("'output' value %s is not supported for get" % output) + + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, check_all=check_all) + + def scp(self, command=None, scp_user=None, scp_pass=None): + if not command: + raise ValueError('must provide value of command to execute') + prompt = ["User name:", "Password:"] + if(scp_pass is None): + answer = [scp_user, self._connection._play_context.password] + else: + answer = [scp_user, scp_pass] + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=False, check_all=True) + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'icx' + reply = self.get(command='show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Version (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1).strip(',') + + match = re.search(r'^Cisco (.+) \(revision', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + match = re.search(r'^(.+) uptime', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + def get_device_operations(self): + return { + 'supports_diff_replace': True, + 'supports_commit': False, + 'supports_rollback': False, + 'supports_defaults': True, + 'supports_onbox_diff': False, + 'supports_commit_comment': False, + 'supports_multiline_delimiter': True, + 'supports_diff_match': True, + 'supports_diff_ignore_lines': True, + 'supports_generate_diff': True, + 'supports_replace': False + } + + def get_option_values(self): + return { + 'format': ['text'], + 'diff_match': ['line', 'strict', 'exact', 'none'], + 'diff_replace': ['line', 'block'], + 'output': [] + } + + def get_capabilities(self): + result = dict() + result['rpc'] = self.get_base_rpc() + ['edit_banner', 'get_diff', 'run_commands', 'get_defaults_flag'] + result['network_api'] = 'cliconf' + result['device_operations'] = self.get_device_operations() + result.update(self.get_option_values()) + return json.dumps(result) + + def edit_banner(self, candidate=None, multiline_delimiter="@", commit=True): + """ + Edit banner on remote device + :param banners: Banners to be loaded in json format + :param multiline_delimiter: Line delimiter for banner + :param commit: Boolean value that indicates if the device candidate + configuration should be pushed in the running configuration or discarded. + :param diff: Boolean flag to indicate if configuration that is applied on remote host should + generated and returned in response or not + :return: Returns response of executing the configuration command received + from remote host + """ + resp = {} + banners_obj = json.loads(candidate) + results = [] + requests = [] + if commit: + for key, value in iteritems(banners_obj): + key += ' %s' % multiline_delimiter + self.send_command('config terminal', sendonly=True) + for cmd in [key, value, multiline_delimiter]: + obj = {'command': cmd, 'sendonly': True} + results.append(self.send_command(**obj)) + requests.append(cmd) + + self.send_command('end', sendonly=True) + time.sleep(0.1) + results.append(self.send_command('\n')) + requests.append('\n') + + resp['request'] = requests + resp['response'] = results + + return resp + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + output = cmd.pop('output', None) + if output: + raise ValueError("'output' value %s is not supported for run_commands" % output) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, 'err', to_text(e)) + + responses.append(out) + + return responses + + def _extract_banners(self, config): + banners = {} + banner_cmds = re.findall(r'^banner (\w+)', config, re.M) + for cmd in banner_cmds: + regex = r'banner %s \$(.+?)(?=\$)' % cmd + match = re.search(regex, config, re.S) + if match: + key = 'banner %s' % cmd + banners[key] = match.group(1).strip() + + for cmd in banner_cmds: + regex = r'banner %s \$(.+?)(?=\$)' % cmd + match = re.search(regex, config, re.S) + if match: + config = config.replace(str(match.group(1)), '') + + config = re.sub(r'banner \w+ \$\$', '!! banner removed', config) + return config, banners + + def _diff_banners(self, want, have): + candidate = {} + for key, value in iteritems(want): + if value != have.get(key): + candidate[key] = value + return candidate diff --git a/plugins/cliconf/ironware.py b/plugins/cliconf/ironware.py new file mode 100644 index 0000000000..cb476f924d --- /dev/null +++ b/plugins/cliconf/ironware.py @@ -0,0 +1,95 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: ironware +short_description: Use ironware cliconf to run command on Extreme Ironware platform +description: + - This ironware plugin provides low level abstraction apis for + sending and receiving CLI commands from Extreme Ironware network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'ironware' + reply = self.send_command('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'IronWare : Version (\S+),', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'^(?:System Mode\:|System\:) (CES|CER|MLX|XMR)', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + raise ValueError("fetching configuration from %s is not supported" % source) + + if source == 'running': + cmd = 'show running-config' + if flags is not None: + cmd += ' ' + ' '.join(flags) + + else: + cmd = 'show configuration' + if flags is not None: + raise ValueError("flags are only supported with running-config") + + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['end']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + self._update_cli_prompt_context(config_context=')#') diff --git a/plugins/cliconf/netvisor.py b/plugins/cliconf/netvisor.py new file mode 100644 index 0000000000..51301531ce --- /dev/null +++ b/plugins/cliconf/netvisor.py @@ -0,0 +1,74 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: netvisor +short_description: Use netvisor cliconf to run command on Pluribus netvisor platform +description: + - This netvisor plugin provides low level abstraction apis for + sending and receiving CLI commands from Pluribus netvisor devices. +''' + +import json +from ansible.plugins.cliconf import CliconfBase + + +class Cliconf(CliconfBase): + + def get_config(self, source='running', format='text', flags=None): + if source not in ('running'): + return self.invalid_params("fetching configuration from %s is not supported" % source) + cmd = 'show running-config' + return self.send_command(cmd) + + def edit_config(self, command): + return + + def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False): + if not command: + raise ValueError('must provide value of command to execute') + if output: + raise ValueError("'output' value %s is not supported for get" % output) + + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_option_values(self): + return { + 'format': ['text'], + 'diff_match': ['line', 'strict', 'exact', 'none'], + 'diff_replace': ['line', 'block'], + 'output': [] + } + + def get_capabilities(self): + result = dict() + result['rpc'] = self.get_base_rpc() + result['network_api'] = 'cliconf' + result['device_info'] = self.get_device_info() + result.update(self.get_option_values()) + return json.dumps(result) + + def get_device_info(self): + device_info = {} + device_info['network_os'] = 'netvisor' + + return device_info diff --git a/plugins/cliconf/nos.py b/plugins/cliconf/nos.py new file mode 100644 index 0000000000..b1f922ac55 --- /dev/null +++ b/plugins/cliconf/nos.py @@ -0,0 +1,112 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: nos +short_description: Use nos cliconf to run command on Extreme NOS platform +description: + - This nos plugin provides low level abstraction apis for + sending and receiving CLI commands from Extreme NOS network devices. +''' + +import re +import json + +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'nos' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Network Operating System Version: (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + reply = self.get('show chassis') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'^Chassis Name:(\s+)(\S+)', data, re.M) + if match: + device_info['network_os_model'] = match.group(2) + + reply = self.get('show running-config | inc "switch-attributes host-name"') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'switch-attributes host-name (\S+)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + def get_config(self, source='running', flags=None): + if source not in 'running': + raise ValueError("fetching configuration from %s is not supported" % source) + if source == 'running': + cmd = 'show running-config' + + flags = [] if flags is None else flags + cmd += ' '.join(flags) + cmd = cmd.strip() + + return self.send_command(cmd) + + def edit_config(self, command): + resp = {} + results = [] + requests = [] + self.send_command('configure terminal') + for cmd in to_list(command): + if isinstance(cmd, dict): + command = cmd['command'] + prompt = cmd['prompt'] + answer = cmd['answer'] + newline = cmd.get('newline', True) + else: + command = cmd + prompt = None + answer = None + newline = True + + if cmd != 'end' and cmd[0] != '!': + results.append(self.send_command(command, prompt, answer, False, newline)) + requests.append(cmd) + + self.send_command('end') + + resp['request'] = requests + resp['response'] = results + return resp + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) diff --git a/plugins/cliconf/onyx.py b/plugins/cliconf/onyx.py new file mode 100644 index 0000000000..af7690efb5 --- /dev/null +++ b/plugins/cliconf/onyx.py @@ -0,0 +1,77 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: onyx +short_description: Use onyx cliconf to run command on Mellanox ONYX platform +description: + - This onyx plugin provides low level abstraction apis for + sending and receiving CLI commands from Mellanox ONYX network devices. +''' + +import json + +from itertools import chain + +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + reply = self.get('show version | json-print') + data = json.loads(reply) + device_info['network_os'] = data['Product name'] + device_info['network_os_version'] = data['Product release'] + device_info['network_os_version_summary'] = data['Version summary'] + device_info['network_os_model'] = data['Product model'] + + reply = self.get('show hosts | include Hostname') + data = to_text(reply, errors='surrogate_or_strict').strip() + hostname = data.split(':')[1] + hostname = hostname.strip() + device_info['network_os_hostname'] = hostname + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running',): + return self.invalid_params("fetching configuration from %s is not supported" % source) + cmd = 'show running-config' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['exit']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) diff --git a/plugins/cliconf/routeros.py b/plugins/cliconf/routeros.py new file mode 100644 index 0000000000..9e020a1b03 --- /dev/null +++ b/plugins/cliconf/routeros.py @@ -0,0 +1,78 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: routeros +short_description: Use routeros cliconf to run command on MikroTik RouterOS platform +description: + - This routeros plugin provides low level abstraction apis for + sending and receiving CLI commands from MikroTik RouterOS network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + device_info['network_os'] = 'RouterOS' + + resource = self.get('/system resource print') + data = to_text(resource, errors='surrogate_or_strict').strip() + match = re.search(r'version: (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + routerboard = self.get('/system routerboard print') + data = to_text(routerboard, errors='surrogate_or_strict').strip() + match = re.search(r'model: (.+)$', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + identity = self.get('/system identity print') + data = to_text(identity, errors='surrogate_or_strict').strip() + match = re.search(r'name: (.+)$', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + def get_config(self, source='running', format='text', flags=None): + return + + def edit_config(self, command): + return + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) diff --git a/plugins/cliconf/slxos.py b/plugins/cliconf/slxos.py new file mode 100644 index 0000000000..a7809a3b95 --- /dev/null +++ b/plugins/cliconf/slxos.py @@ -0,0 +1,104 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: slxos +short_description: Use slxos cliconf to run command on Extreme SLX-OS platform +description: + - This slxos plugin provides low level abstraction apis for + sending and receiving CLI commands from Extreme SLX-OS network devices. +''' + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'slxos' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'SLX\-OS Operating System Version: (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + reply = self.get('show chassis') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'^Chassis Name:(\s+)(\S+)', data, re.M) + if match: + device_info['network_os_model'] = match.group(2) + + reply = self.get('show running-config | inc "switch-attributes host-name"') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'switch-attributes host-name (\S+)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + def get_config(self, source='running', flags=None): + if source not in ('running', 'startup'): + raise ValueError("fetching configuration from %s is not supported" % source) + if source == 'running': + cmd = 'show running-config' + else: + cmd = 'show startup-config' + + flags = [] if flags is None else flags + cmd += ' '.join(flags) + cmd = cmd.strip() + + return self.send_command(cmd) + + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['end']): + if isinstance(cmd, dict): + command = cmd['command'] + prompt = cmd['prompt'] + answer = cmd['answer'] + newline = cmd.get('newline', True) + else: + command = cmd + prompt = None + answer = None + newline = True + + self.send_command(command, prompt, answer, False, newline) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) diff --git a/plugins/cliconf/voss.py b/plugins/cliconf/voss.py new file mode 100644 index 0000000000..7d3d26eb7d --- /dev/null +++ b/plugins/cliconf/voss.py @@ -0,0 +1,235 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +cliconf: voss +short_description: Use voss cliconf to run command on Extreme VOSS platform +description: + - This voss plugin provides low level abstraction apis for + sending and receiving CLI commands from Extreme VOSS network devices. +''' + +import re +import json + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible.module_utils.common._collections_compat import Mapping +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible_collections.community.general.plugins.module_utils.network.voss.voss import VossNetworkConfig +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + @enable_mode + def get_config(self, source='running', flags=None, format=None): + if source not in ('running', 'startup'): + raise ValueError("fetching configuration from %s is not supported" % source) + + if format: + raise ValueError("'format' value %s is not supported for get_config" % format) + + if not flags: + flags = [] + if source == 'running': + cmd = 'show running-config ' + cmd += ' '.join(to_list(flags)) + cmd = cmd.strip() + else: + cmd = 'more /intflash/config.cfg' + + return self.send_command(cmd) + + def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): + """ + Generate diff between candidate and running configuration. If the + remote host supports onbox diff capabilities ie. supports_onbox_diff in that case + candidate and running configurations are not required to be passed as argument. + In case if onbox diff capability is not supported candidate argument is mandatory + and running argument is optional. + :param candidate: The configuration which is expected to be present on remote host. + :param running: The base configuration which is used to generate diff. + :param diff_match: Instructs how to match the candidate configuration with current device configuration + Valid values are 'line', 'strict', 'exact', 'none'. + 'line' - commands are matched line by line + 'strict' - command lines are matched with respect to position + 'exact' - command lines must be an equal match + 'none' - will not compare the candidate configuration with the running configuration + :param diff_ignore_lines: Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + :param path: The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + :param diff_replace: Instructs on the way to perform the configuration on the device. + If the replace argument is set to I(line) then the modified lines are + pushed to the device in configuration mode. If the replace argument is + set to I(block) then the entire command block is pushed to the device in + configuration mode if any line is not correct. + :return: Configuration diff in json format. + { + 'config_diff': '', + } + + """ + diff = {} + + device_operations = self.get_device_operations() + option_values = self.get_option_values() + + if candidate is None and device_operations['supports_generate_diff']: + raise ValueError("candidate configuration is required to generate diff") + + if diff_match not in option_values['diff_match']: + raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match']))) + + if diff_replace not in option_values['diff_replace']: + raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace']))) + + # prepare candidate configuration + candidate_obj = VossNetworkConfig(indent=0, ignore_lines=diff_ignore_lines) + candidate_obj.load(candidate) + + if running and diff_match != 'none': + # running configuration + running_obj = VossNetworkConfig(indent=0, contents=running, ignore_lines=diff_ignore_lines) + configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace) + + else: + configdiffobjs = candidate_obj.items + + diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else '' + diff['diff_path'] = path + diff['diff_replace'] = diff_replace + return diff + + @enable_mode + def edit_config(self, candidate=None, commit=True, replace=None, comment=None): + resp = {} + operations = self.get_device_operations() + self.check_edit_config_capability(operations, candidate, commit, replace, comment) + + results = [] + requests = [] + if commit: + self.send_command('configure terminal') + for line in to_list(candidate): + if not isinstance(line, Mapping): + line = {'command': line} + + cmd = line['command'] + if cmd != 'end' and cmd[0] != '!': + results.append(self.send_command(**line)) + requests.append(cmd) + + self.send_command('end') + else: + raise ValueError('check mode is not supported') + + resp['request'] = requests + resp['response'] = results + return resp + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'voss' + reply = self.get(command='show sys-info') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'SysDescr\s+: \S+ \((\S+)\)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'Chassis\s+: (\S+)', data) + if match: + device_info['network_os_model'] = match.group(1) + + match = re.search(r'SysName\s+: (\S+)', data) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + def get_device_operations(self): + return { + 'supports_diff_replace': True, + 'supports_commit': False, + 'supports_rollback': False, + 'supports_defaults': True, + 'supports_onbox_diff': False, + 'supports_commit_comment': False, + 'supports_multiline_delimiter': False, + 'supports_diff_match': True, + 'supports_diff_ignore_lines': True, + 'supports_generate_diff': True, + 'supports_replace': False + } + + def get_option_values(self): + return { + 'format': ['text'], + 'diff_match': ['line', 'strict', 'exact', 'none'], + 'diff_replace': ['line', 'block'], + 'output': [] + } + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + result['rpc'] += ['get_diff', 'run_commands', 'get_defaults_flag'] + result['device_operations'] = self.get_device_operations() + result.update(self.get_option_values()) + return json.dumps(result) + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + output = cmd.pop('output', None) + if output: + raise ValueError("'output' value %s is not supported for run_commands" % output) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, 'err', e) + + responses.append(out) + + return responses + + def get_defaults_flag(self): + return 'verbose' diff --git a/plugins/connection/__init__.py b/plugins/connection/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py new file mode 100644 index 0000000000..2b64b43b43 --- /dev/null +++ b/plugins/connection/chroot.py @@ -0,0 +1,206 @@ +# Based on local.py (c) 2012, Michael DeHaan +# +# (c) 2013, Maykel Moya +# (c) 2015, Toshio Kuratomi +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Maykel Moya + connection: chroot + short_description: Interact with local chroot + description: + - Run commands or put/fetch files to an existing chroot on the Ansible controller. + options: + remote_addr: + description: + - The path of the chroot you want to access. + default: inventory_hostname + vars: + - name: ansible_host + executable: + description: + - User specified executable shell + ini: + - section: defaults + key: executable + env: + - name: ANSIBLE_EXECUTABLE + vars: + - name: ansible_executable + default: /bin/sh + chroot_exe: + description: + - User specified chroot binary + ini: + - section: chroot_connection + key: exe + env: + - name: ANSIBLE_CHROOT_EXE + vars: + - name: ansible_chroot_exe + default: chroot +''' + +import os +import os.path +import subprocess +import traceback + +from ansible.errors import AnsibleError +from ansible.module_utils.basic import is_executable +from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes, to_native +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + ''' Local chroot based connections ''' + + transport = 'community.general.chroot' + has_pipelining = True + # su currently has an undiagnosed issue with calculating the file + # checksums (so copy, for instance, doesn't work right) + # Have to look into that before re-enabling this + has_tty = False + + default_user = 'root' + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self.chroot = self._play_context.remote_addr + + if os.geteuid() != 0: + raise AnsibleError("chroot connection requires running as root") + + # we're running as root on the local system so do some + # trivial checks for ensuring 'host' is actually a chroot'able dir + if not os.path.isdir(self.chroot): + raise AnsibleError("%s is not a directory" % self.chroot) + + chrootsh = os.path.join(self.chroot, 'bin/sh') + # Want to check for a usable bourne shell inside the chroot. + # is_executable() == True is sufficient. For symlinks it + # gets really complicated really fast. So we punt on finding that + # out. As long as it's a symlink we assume that it will work + if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))): + raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) + + def _connect(self): + ''' connect to the chroot ''' + if os.path.isabs(self.get_option('chroot_exe')): + self.chroot_cmd = self.get_option('chroot_exe') + else: + try: + self.chroot_cmd = get_bin_path(self.get_option('chroot_exe')) + except ValueError as e: + raise AnsibleError(to_native(e)) + + super(Connection, self)._connect() + if not self._connected: + display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot) + self._connected = True + + def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): + ''' run a command on the chroot. This is only needed for implementing + put_file() get_file() so that we don't have to read the whole file + into memory. + + compared to exec_command() it looses some niceties like being able to + return the process's exit code immediately. + ''' + executable = self.get_option('executable') + local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] + + display.vvv("EXEC %s" % (local_cmd), host=self.chroot) + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + return p + + def exec_command(self, cmd, in_data=None, sudoable=False): + ''' run a command on the chroot ''' + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + p = self._buffered_exec_command(cmd) + + stdout, stderr = p.communicate(in_data) + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to chroot ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) + + out_path = shlex_quote(self._prefix_login_path(out_path)) + try: + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + try: + p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + except OSError: + raise AnsibleError("chroot connection requires dd command in the chroot") + try: + stdout, stderr = p.communicate() + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + except IOError: + raise AnsibleError("file or module does not exist at: %s" % in_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from chroot to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) + + in_path = shlex_quote(self._prefix_login_path(in_path)) + try: + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + except OSError: + raise AnsibleError("chroot connection requires dd command in the chroot") + + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + try: + chunk = p.stdout.read(BUFSIZE) + while chunk: + out_file.write(chunk) + chunk = p.stdout.read(BUFSIZE) + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/plugins/connection/docker.py b/plugins/connection/docker.py new file mode 100644 index 0000000000..a2daf4e191 --- /dev/null +++ b/plugins/connection/docker.py @@ -0,0 +1,356 @@ +# Based on the chroot connection plugin by Maykel Moya +# +# (c) 2014, Lorin Hochstein +# (c) 2015, Leendert Brouwer (https://github.com/objectified) +# (c) 2015, Toshio Kuratomi +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: + - Lorin Hochestein + - Leendert Brouwer + connection: docker + short_description: Run tasks in docker containers + description: + - Run commands or put/fetch files to an existing docker container. + options: + remote_user: + description: + - The user to execute as inside the container + vars: + - name: ansible_user + - name: ansible_docker_user + docker_extra_args: + description: + - Extra arguments to pass to the docker command line + default: '' + remote_addr: + description: + - The name of the container you want to access. + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_docker_host +''' + +import distutils.spawn +import fcntl +import os +import os.path +import subprocess +import re + +from distutils.version import LooseVersion + +import ansible.constants as C +from ansible.compat import selectors +from ansible.errors import AnsibleError, AnsibleFileNotFound +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + ''' Local docker based connections ''' + + transport = 'community.general.docker' + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + # Note: docker supports running as non-root in some configurations. + # (For instance, setting the UNIX socket file to be readable and + # writable by a specific UNIX group and then putting users into that + # group). Therefore we don't check that the user is root when using + # this connection. But if the user is getting a permission denied + # error it probably means that docker on their system is only + # configured to be connected to by root and they are not running as + # root. + + # Windows uses Powershell modules + if getattr(self._shell, "_IS_WINDOWS", False): + self.module_implementation_preferences = ('.ps1', '.exe', '') + + if 'docker_command' in kwargs: + self.docker_cmd = kwargs['docker_command'] + else: + self.docker_cmd = distutils.spawn.find_executable('docker') + if not self.docker_cmd: + raise AnsibleError("docker command not found in PATH") + + docker_version = self._get_docker_version() + if docker_version == u'dev': + display.warning(u'Docker version number is "dev". Will assume latest version.') + if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'): + raise AnsibleError('docker connection type requires docker 1.3 or higher') + + # The remote user we will request from docker (if supported) + self.remote_user = None + # The actual user which will execute commands in docker (if known) + self.actual_user = None + + if self._play_context.remote_user is not None: + if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'): + # Support for specifying the exec user was added in docker 1.7 + self.remote_user = self._play_context.remote_user + self.actual_user = self.remote_user + else: + self.actual_user = self._get_docker_remote_user() + + if self.actual_user != self._play_context.remote_user: + display.warning(u'docker {0} does not support remote_user, using container default: {1}' + .format(docker_version, self.actual_user or u'?')) + elif self._display.verbosity > 2: + # Since we're not setting the actual_user, look it up so we have it for logging later + # Only do this if display verbosity is high enough that we'll need the value + # This saves overhead from calling into docker when we don't need to + self.actual_user = self._get_docker_remote_user() + + @staticmethod + def _sanitize_version(version): + return re.sub(u'[^0-9a-zA-Z.]', u'', version) + + def _old_docker_version(self): + cmd_args = [] + if self._play_context.docker_extra_args: + cmd_args += self._play_context.docker_extra_args.split(' ') + + old_version_subcommand = ['version'] + + old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand + p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + cmd_output, err = p.communicate() + + return old_docker_cmd, to_native(cmd_output), err, p.returncode + + def _new_docker_version(self): + # no result yet, must be newer Docker version + cmd_args = [] + if self._play_context.docker_extra_args: + cmd_args += self._play_context.docker_extra_args.split(' ') + + new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"] + + new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand + p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + cmd_output, err = p.communicate() + return new_docker_cmd, to_native(cmd_output), err, p.returncode + + def _get_docker_version(self): + + cmd, cmd_output, err, returncode = self._old_docker_version() + if returncode == 0: + for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'): + if line.startswith(u'Server version:'): # old docker versions + return self._sanitize_version(line.split()[2]) + + cmd, cmd_output, err, returncode = self._new_docker_version() + if returncode: + raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err))) + + return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict')) + + def _get_docker_remote_user(self): + """ Get the default user configured in the docker container """ + p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + out, err = p.communicate() + out = to_text(out, errors='surrogate_or_strict') + + if p.returncode != 0: + display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err))) + return None + + # The default exec user is root, unless it was changed in the Dockerfile with USER + return out.strip() or u'root' + + def _build_exec_cmd(self, cmd): + """ Build the local docker exec command to run cmd on remote_host + + If remote_user is available and is supported by the docker + version we are using, it will be provided to docker exec. + """ + + local_cmd = [self.docker_cmd] + + if self._play_context.docker_extra_args: + local_cmd += self._play_context.docker_extra_args.split(' ') + + local_cmd += [b'exec'] + + if self.remote_user is not None: + local_cmd += [b'-u', self.remote_user] + + # -i is needed to keep stdin open which allows pipelining to work + local_cmd += [b'-i', self._play_context.remote_addr] + cmd + + return local_cmd + + def _connect(self, port=None): + """ Connect to the container. Nothing to do """ + super(Connection, self)._connect() + if not self._connected: + display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( + self.actual_user or u'?'), host=self._play_context.remote_addr + ) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=False): + """ Run a command on the docker host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) + + display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr) + display.debug("opening command with Popen()") + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + p = subprocess.Popen( + local_cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + display.debug("done running command with Popen()") + + if self.become and self.become.expect_prompt() and sudoable: + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) + selector = selectors.DefaultSelector() + selector.register(p.stdout, selectors.EVENT_READ) + selector.register(p.stderr, selectors.EVENT_READ) + + become_output = b'' + try: + while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output): + events = selector.select(self._play_context.timeout) + if not events: + stdout, stderr = p.communicate() + raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output)) + + for key, event in events: + if key.fileobj == p.stdout: + chunk = p.stdout.read() + elif key.fileobj == p.stderr: + chunk = p.stderr.read() + + if not chunk: + stdout, stderr = p.communicate() + raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output)) + become_output += chunk + finally: + selector.close() + + if not self.become.check_success(become_output): + become_pass = self.become.get_option('become_pass', playcontext=self._play_context) + p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n') + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) + + display.debug("getting output with communicate()") + stdout, stderr = p.communicate(in_data) + display.debug("done communicating") + + display.debug("done with docker.exec_command()") + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + """ Transfer a file from local to docker container """ + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + out_path = self._prefix_login_path(out_path) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % to_native(in_path)) + + out_path = shlex_quote(out_path) + # Older docker doesn't have native support for copying files into + # running containers, so we use docker exec to implement this + # Although docker version 1.8 and later provide support, the + # owner and group of the files are always set to root + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + try: + p = subprocess.Popen(args, stdin=in_file, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError("docker connection requires dd command in the container to put files") + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % + (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr))) + + def fetch_file(self, in_path, out_path): + """ Fetch a file from container to local. """ + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + in_path = self._prefix_login_path(in_path) + # out_path is the final file path, but docker takes a directory, not a + # file path + out_dir = os.path.dirname(out_path) + + args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir] + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + + p = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p.communicate() + + actual_out_path = os.path.join(out_dir, os.path.basename(in_path)) + + if p.returncode != 0: + # Older docker doesn't have native support for fetching files command `cp` + # If `cp` fails, try to use `dd` instead + args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file: + try: + p = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=out_file, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError("docker connection requires dd command in the container to put files") + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + # Rename if needed + if actual_out_path != out_path: + os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict')) + + def close(self): + """ Terminate the connection. Nothing to do for Docker""" + super(Connection, self).close() + self._connected = False diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py new file mode 100644 index 0000000000..83f4a9e05e --- /dev/null +++ b/plugins/connection/funcd.py @@ -0,0 +1,102 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# Copyright (c) 2013, Michael Scherer +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Michael Scherer (@msherer) + connection: funcd + short_description: Use funcd to connect to target + description: + - This transport permits you to use Ansible over Func. + - For people who have already setup func and that wish to play with ansible, + this permit to move gradually to ansible without having to redo completely the setup of the network. + options: + remote_addr: + description: + - The path of the chroot you want to access. + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_func_host +''' + +HAVE_FUNC = False +try: + import func.overlord.client as fc + HAVE_FUNC = True +except ImportError: + pass + +import os +import tempfile +import shutil + +from ansible.errors import AnsibleError +from ansible.utils.display import Display + +display = Display() + + +class Connection(object): + ''' Func-based connections ''' + + has_pipelining = False + + def __init__(self, runner, host, port, *args, **kwargs): + self.runner = runner + self.host = host + # port is unused, this go on func + self.port = port + + def connect(self, port=None): + if not HAVE_FUNC: + raise AnsibleError("func is not installed") + + self.client = fc.Client(self.host) + return self + + def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + ''' run a command on the remote minion ''' + + if in_data: + raise AnsibleError("Internal Error: this module does not support optimized module pipelining") + + # totally ignores privlege escalation + display.vvv("EXEC %s" % (cmd), host=self.host) + p = self.client.command.run(cmd)[self.host] + return (p[0], p[1], p[2]) + + def _normalize_path(self, path, prefix): + if not path.startswith(os.path.sep): + path = os.path.join(os.path.sep, path) + normpath = os.path.normpath(path) + return os.path.join(prefix, normpath[1:]) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to remote ''' + + out_path = self._normalize_path(out_path, '/') + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self.client.local.copyfile.send(in_path, out_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from remote to local ''' + + in_path = self._normalize_path(in_path, '/') + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + # need to use a tmp dir due to difference of semantic for getfile + # ( who take a # directory as destination) and fetch_file, who + # take a file directly + tmpdir = tempfile.mkdtemp(prefix="func_ansible") + self.client.local.getfile.get(in_path, tmpdir) + shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), out_path) + shutil.rmtree(tmpdir) + + def close(self): + ''' terminate the connection; nothing to do here ''' + pass diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py new file mode 100644 index 0000000000..d3ec7ce7c8 --- /dev/null +++ b/plugins/connection/iocage.py @@ -0,0 +1,82 @@ +# Based on jail.py +# (c) 2013, Michael Scherer +# (c) 2015, Toshio Kuratomi +# (c) 2016, Stephan Lohse +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Stephan Lohse + connection: iocage + short_description: Run tasks in iocage jails + description: + - Run commands or put/fetch files to an existing iocage jail + options: + remote_addr: + description: + - Path to the jail + vars: + - name: ansible_host + - name: ansible_iocage_host + remote_user: + description: + - User to execute as inside the jail + vars: + - name: ansible_user + - name: ansible_iocage_user +''' + +import subprocess + +from ansible_collections.community.general.plugins.connection.jail import Connection as Jail +from ansible.module_utils._text import to_native +from ansible.errors import AnsibleError +from ansible.utils.display import Display + +display = Display() + + +class Connection(Jail): + ''' Local iocage based connections ''' + + transport = 'community.general.iocage' + + def __init__(self, play_context, new_stdin, *args, **kwargs): + self.ioc_jail = play_context.remote_addr + + self.iocage_cmd = Jail._search_executable('iocage') + + jail_uuid = self.get_jail_uuid() + + kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid) + + display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format( + iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]), + host=kwargs[Jail.modified_jailname_key]) + + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + def get_jail_uuid(self): + p = subprocess.Popen([self.iocage_cmd, 'get', 'host_hostuuid', self.ioc_jail], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + stdout, stderr = p.communicate() + + if stdout is not None: + stdout = to_native(stdout) + + if stderr is not None: + stderr = to_native(stderr) + + # otherwise p.returncode would not be set + p.wait() + + if p.returncode != 0: + raise AnsibleError(u"iocage returned an error: {0}".format(stdout)) + + return stdout.strip('\n') diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py new file mode 100644 index 0000000000..7b44b9cd5f --- /dev/null +++ b/plugins/connection/jail.py @@ -0,0 +1,201 @@ +# Based on local.py by Michael DeHaan +# and chroot.py by Maykel Moya +# Copyright (c) 2013, Michael Scherer +# Copyright (c) 2015, Toshio Kuratomi +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Ansible Core Team + connection: jail + short_description: Run tasks in jails + description: + - Run commands or put/fetch files to an existing jail + options: + remote_addr: + description: + - Path to the jail + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_jail_host + remote_user: + description: + - User to execute as inside the jail + vars: + - name: ansible_user + - name: ansible_jail_user +''' + +import distutils.spawn +import os +import os.path +import subprocess +import traceback +import ansible.constants as C + +from ansible.errors import AnsibleError +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + ''' Local BSD Jail based connections ''' + + modified_jailname_key = 'conn_jail_name' + + transport = 'community.general.jail' + # Pipelining may work. Someone needs to test by setting this to True and + # having pipelining=True in their ansible.cfg + has_pipelining = True + has_tty = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self.jail = self._play_context.remote_addr + if self.modified_jailname_key in kwargs: + self.jail = kwargs[self.modified_jailname_key] + + if os.geteuid() != 0: + raise AnsibleError("jail connection requires running as root") + + self.jls_cmd = self._search_executable('jls') + self.jexec_cmd = self._search_executable('jexec') + + if self.jail not in self.list_jails(): + raise AnsibleError("incorrect jail name %s" % self.jail) + + @staticmethod + def _search_executable(executable): + cmd = distutils.spawn.find_executable(executable) + if not cmd: + raise AnsibleError("%s command not found in PATH" % executable) + return cmd + + def list_jails(self): + p = subprocess.Popen([self.jls_cmd, '-q', 'name'], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + stdout, stderr = p.communicate() + + return to_text(stdout, errors='surrogate_or_strict').split() + + def _connect(self): + ''' connect to the jail; nothing to do here ''' + super(Connection, self)._connect() + if not self._connected: + display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) + self._connected = True + + def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): + ''' run a command on the jail. This is only needed for implementing + put_file() get_file() so that we don't have to read the whole file + into memory. + + compared to exec_command() it looses some niceties like being able to + return the process's exit code immediately. + ''' + + local_cmd = [self.jexec_cmd] + set_env = '' + + if self._play_context.remote_user is not None: + local_cmd += ['-U', self._play_context.remote_user] + # update HOME since -U does not update the jail environment + set_env = 'HOME=~' + self._play_context.remote_user + ' ' + + local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd] + + display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + return p + + def exec_command(self, cmd, in_data=None, sudoable=False): + ''' run a command on the jail ''' + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + p = self._buffered_exec_command(cmd) + + stdout, stderr = p.communicate(in_data) + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to jail ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) + + out_path = shlex_quote(self._prefix_login_path(out_path)) + try: + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + try: + p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + except OSError: + raise AnsibleError("jail connection requires dd command in the jail") + try: + stdout, stderr = p.communicate() + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + except IOError: + raise AnsibleError("file or module does not exist at: %s" % in_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from jail to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) + + in_path = shlex_quote(self._prefix_login_path(in_path)) + try: + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + except OSError: + raise AnsibleError("jail connection requires dd command in the jail") + + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + try: + chunk = p.stdout.read(BUFSIZE) + while chunk: + out_file.write(chunk) + chunk = p.stdout.read(BUFSIZE) + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/plugins/connection/kubectl.py b/plugins/connection/kubectl.py new file mode 100644 index 0000000000..23b7f0d76e --- /dev/null +++ b/plugins/connection/kubectl.py @@ -0,0 +1,355 @@ +# Based on the docker connection plugin +# +# Connection plugin for configuring kubernetes containers with kubectl +# (c) 2017, XuXinkun +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: + - xuxinkun + + connection: kubectl + + short_description: Execute tasks in pods running on Kubernetes. + + description: + - Use the kubectl exec command to run tasks in, or put/fetch files to, pods running on the Kubernetes + container platform. + + + requirements: + - kubectl (go binary) + + options: + kubectl_pod: + description: + - Pod name. Required when the host name does not match pod name. + default: '' + vars: + - name: ansible_kubectl_pod + env: + - name: K8S_AUTH_POD + kubectl_container: + description: + - Container name. Required when a pod contains more than one container. + default: '' + vars: + - name: ansible_kubectl_container + env: + - name: K8S_AUTH_CONTAINER + kubectl_namespace: + description: + - The namespace of the pod + default: '' + vars: + - name: ansible_kubectl_namespace + env: + - name: K8S_AUTH_NAMESPACE + kubectl_extra_args: + description: + - Extra arguments to pass to the kubectl command line. + default: '' + vars: + - name: ansible_kubectl_extra_args + env: + - name: K8S_AUTH_EXTRA_ARGS + kubectl_kubeconfig: + description: + - Path to a kubectl config file. Defaults to I(~/.kube/config) + default: '' + vars: + - name: ansible_kubectl_kubeconfig + - name: ansible_kubectl_config + env: + - name: K8S_AUTH_KUBECONFIG + kubectl_context: + description: + - The name of a context found in the K8s config file. + default: '' + vars: + - name: ansible_kubectl_context + env: + - name: k8S_AUTH_CONTEXT + kubectl_host: + description: + - URL for accessing the API. + default: '' + vars: + - name: ansible_kubectl_host + - name: ansible_kubectl_server + env: + - name: K8S_AUTH_HOST + - name: K8S_AUTH_SERVER + kubectl_username: + description: + - Provide a username for authenticating with the API. + default: '' + vars: + - name: ansible_kubectl_username + - name: ansible_kubectl_user + env: + - name: K8S_AUTH_USERNAME + kubectl_password: + description: + - Provide a password for authenticating with the API. + default: '' + vars: + - name: ansible_kubectl_password + env: + - name: K8S_AUTH_PASSWORD + kubectl_token: + description: + - API authentication bearer token. + vars: + - name: ansible_kubectl_token + - name: ansible_kubectl_api_key + env: + - name: K8S_AUTH_TOKEN + - name: K8S_AUTH_API_KEY + client_cert: + description: + - Path to a certificate used to authenticate with the API. + default: '' + vars: + - name: ansible_kubectl_cert_file + - name: ansible_kubectl_client_cert + env: + - name: K8S_AUTH_CERT_FILE + aliases: [ kubectl_cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. + default: '' + vars: + - name: ansible_kubectl_key_file + - name: ansible_kubectl_client_key + env: + - name: K8S_AUTH_KEY_FILE + aliases: [ kubectl_key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. + default: '' + vars: + - name: ansible_kubectl_ssl_ca_cert + - name: ansible_kubectl_ca_cert + env: + - name: K8S_AUTH_SSL_CA_CERT + aliases: [ kubectl_ssl_ca_cert ] + validate_certs: + description: + - Whether or not to verify the API server's SSL certificate. Defaults to I(true). + default: '' + vars: + - name: ansible_kubectl_verify_ssl + - name: ansible_kubectl_validate_certs + env: + - name: K8S_AUTH_VERIFY_SSL + aliases: [ kubectl_verify_ssl ] +''' + +import distutils.spawn +import os +import os.path +import subprocess + +import ansible.constants as C +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.errors import AnsibleError, AnsibleFileNotFound +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +CONNECTION_TRANSPORT = 'kubectl' + +CONNECTION_OPTIONS = { + 'kubectl_container': '-c', + 'kubectl_namespace': '-n', + 'kubectl_kubeconfig': '--kubeconfig', + 'kubectl_context': '--context', + 'kubectl_host': '--server', + 'kubectl_username': '--username', + 'kubectl_password': '--password', + 'client_cert': '--client-certificate', + 'client_key': '--client-key', + 'ca_cert': '--certificate-authority', + 'validate_certs': '--insecure-skip-tls-verify', + 'kubectl_token': '--token' +} + + +class Connection(ConnectionBase): + ''' Local kubectl based connections ''' + + transport = CONNECTION_TRANSPORT + connection_options = CONNECTION_OPTIONS + documentation = DOCUMENTATION + has_pipelining = True + transport_cmd = None + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + # Note: kubectl runs commands as the user that started the container. + # It is impossible to set the remote user for a kubectl connection. + cmd_arg = '{0}_command'.format(self.transport) + if cmd_arg in kwargs: + self.transport_cmd = kwargs[cmd_arg] + else: + self.transport_cmd = distutils.spawn.find_executable(self.transport) + if not self.transport_cmd: + raise AnsibleError("{0} command not found in PATH".format(self.transport)) + + def _build_exec_cmd(self, cmd): + """ Build the local kubectl exec command to run cmd on remote_host + """ + local_cmd = [self.transport_cmd] + + # Build command options based on doc string + doc_yaml = AnsibleLoader(self.documentation).get_single_data() + for key in doc_yaml.get('options'): + if key.endswith('verify_ssl') and self.get_option(key) != '': + # Translate verify_ssl to skip_verify_ssl, and output as string + skip_verify_ssl = not self.get_option(key) + local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower())) + elif not key.endswith('container') and self.get_option(key) and self.connection_options.get(key): + cmd_arg = self.connection_options[key] + local_cmd += [cmd_arg, self.get_option(key)] + + extra_args_name = u'{0}_extra_args'.format(self.transport) + if self.get_option(extra_args_name): + local_cmd += self.get_option(extra_args_name).split(' ') + + pod = self.get_option(u'{0}_pod'.format(self.transport)) + if not pod: + pod = self._play_context.remote_addr + # -i is needed to keep stdin open which allows pipelining to work + local_cmd += ['exec', '-i', pod] + + # if the pod has more than one container, then container is required + container_arg_name = u'{0}_container'.format(self.transport) + if self.get_option(container_arg_name): + local_cmd += ['-c', self.get_option(container_arg_name)] + + local_cmd += ['--'] + cmd + + return local_cmd + + def _connect(self, port=None): + """ Connect to the container. Nothing to do """ + super(Connection, self)._connect() + if not self._connected: + display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._play_context.remote_addr) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=False): + """ Run a command in the container """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) + + display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + stdout, stderr = p.communicate(in_data) + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + """ Transfer a file from local to the container """ + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + out_path = self._prefix_login_path(out_path) + if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % in_path) + + out_path = shlex_quote(out_path) + # kubectl doesn't have native support for copying files into + # running containers, so we use kubectl exec to implement this + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + try: + p = subprocess.Popen(args, stdin=in_file, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError("kubectl connection requires dd command in the container to put files") + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + def fetch_file(self, in_path, out_path): + """ Fetch a file from container to local. """ + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) + + in_path = self._prefix_login_path(in_path) + out_dir = os.path.dirname(out_path) + + # kubectl doesn't have native support for fetching files from + # running containers, so we use kubectl exec to implement this + args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) + args = [to_bytes(i, errors='surrogate_or_strict') for i in args] + actual_out_path = os.path.join(out_dir, os.path.basename(in_path)) + with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file: + try: + p = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=out_file, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError( + "{0} connection requires dd command in the container to fetch files".format(self.transport) + ) + stdout, stderr = p.communicate() + + if p.returncode != 0: + raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + if actual_out_path != out_path: + os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict')) + + def close(self): + """ Terminate the connection. Nothing to do for kubectl""" + super(Connection, self).close() + self._connected = False diff --git a/plugins/connection/libvirt_lxc.py b/plugins/connection/libvirt_lxc.py new file mode 100644 index 0000000000..4f80299646 --- /dev/null +++ b/plugins/connection/libvirt_lxc.py @@ -0,0 +1,181 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# (c) 2015, Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Michael Scherer + connection: libvirt_lxc + short_description: Run tasks in lxc containers via libvirt + description: + - Run commands or put/fetch files to an existing lxc container using libvirt + options: + remote_addr: + description: + - Container identifier + default: The set user as per docker's configuration + vars: + - name: ansible_host + - name: ansible_libvirt_lxc_host +''' + +import distutils.spawn +import os +import os.path +import subprocess +import traceback + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + ''' Local lxc based connections ''' + + transport = 'community.general.libvirt_lxc' + has_pipelining = True + # su currently has an undiagnosed issue with calculating the file + # checksums (so copy, for instance, doesn't work right) + # Have to look into that before re-enabling this + default_user = 'root' + has_tty = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + self.lxc = self._play_context.remote_addr + + self.virsh = self._search_executable('virsh') + + self._check_domain(self.lxc) + + def _search_executable(self, executable): + cmd = distutils.spawn.find_executable(executable) + if not cmd: + raise AnsibleError("%s command not found in PATH") % executable + return cmd + + def _check_domain(self, domain): + p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p.communicate() + if p.returncode: + raise AnsibleError("%s is not a lxc defined in libvirt" % domain) + + def _connect(self): + ''' connect to the lxc; nothing to do here ''' + super(Connection, self)._connect() + if not self._connected: + display.vvv("THIS IS A LOCAL LXC DIR", host=self.lxc) + self._connected = True + + def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): + ''' run a command on the chroot. This is only needed for implementing + put_file() get_file() so that we don't have to read the whole file + into memory. + + compared to exec_command() it looses some niceties like being able to + return the process's exit code immediately. + ''' + executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' + local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace'] + + if C.DEFAULT_LIBVIRT_LXC_NOSECLABEL: + local_cmd += ['--noseclabel'] + + local_cmd += [self.lxc, '--', executable, '-c', cmd] + + display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + return p + + def exec_command(self, cmd, in_data=None, sudoable=False): + ''' run a command on the chroot ''' + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + p = self._buffered_exec_command(cmd) + + stdout, stderr = p.communicate(in_data) + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to lxc ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc) + + out_path = shlex_quote(self._prefix_login_path(out_path)) + try: + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + try: + p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + except OSError: + raise AnsibleError("chroot connection requires dd command in the chroot") + try: + stdout, stderr = p.communicate() + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + except IOError: + raise AnsibleError("file or module does not exist at: %s" % in_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from lxc to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc) + + in_path = shlex_quote(self._prefix_login_path(in_path)) + try: + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + except OSError: + raise AnsibleError("chroot connection requires dd command in the chroot") + + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + try: + chunk = p.stdout.read(BUFSIZE) + while chunk: + out_file.write(chunk) + chunk = p.stdout.read(BUFSIZE) + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py new file mode 100644 index 0000000000..b13f9b145d --- /dev/null +++ b/plugins/connection/lxc.py @@ -0,0 +1,228 @@ +# (c) 2015, Joerg Thalheim +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Joerg Thalheim + connection: lxc + short_description: Run tasks in lxc containers via lxc python library + description: + - Run commands or put/fetch files to an existing lxc container using lxc python library + options: + remote_addr: + description: + - Container identifier + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_lxc_host + executable: + default: /bin/sh + description: + - Shell executable + vars: + - name: ansible_executable + - name: ansible_lxc_executable +''' + +import os +import shutil +import traceback +import select +import fcntl +import errno + +HAS_LIBLXC = False +try: + import lxc as _lxc + HAS_LIBLXC = True +except ImportError: + pass + +from ansible import constants as C +from ansible import errors +from ansible.module_utils._text import to_bytes, to_native +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + ''' Local lxc based connections ''' + + transport = 'community.general.lxc' + has_pipelining = True + default_user = 'root' + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self.container_name = self._play_context.remote_addr + self.container = None + + def _connect(self): + ''' connect to the lxc; nothing to do here ''' + super(Connection, self)._connect() + + if not HAS_LIBLXC: + msg = "lxc bindings for python2 are not installed" + raise errors.AnsibleError(msg) + + if self.container: + return + + self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name) + self.container = _lxc.Container(self.container_name) + if self.container.state == "STOPPED": + raise errors.AnsibleError("%s is not running" % self.container_name) + + def _communicate(self, pid, in_data, stdin, stdout, stderr): + buf = {stdout: [], stderr: []} + read_fds = [stdout, stderr] + if in_data: + write_fds = [stdin] + else: + write_fds = [] + while len(read_fds) > 0 or len(write_fds) > 0: + try: + ready_reads, ready_writes, _ = select.select(read_fds, write_fds, []) + except select.error as e: + if e.args[0] == errno.EINTR: + continue + raise + for fd in ready_writes: + in_data = in_data[os.write(fd, in_data):] + if len(in_data) == 0: + write_fds.remove(fd) + for fd in ready_reads: + data = os.read(fd, 32768) + if not data: + read_fds.remove(fd) + buf[fd].append(data) + + (pid, returncode) = os.waitpid(pid, 0) + + return returncode, b"".join(buf[stdout]), b"".join(buf[stderr]) + + def _set_nonblocking(self, fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK + fcntl.fcntl(fd, fcntl.F_SETFL, flags) + return fd + + def exec_command(self, cmd, in_data=None, sudoable=False): + ''' run a command on the chroot ''' + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + # python2-lxc needs bytes. python3-lxc needs text. + executable = to_native(self._play_context.executable, errors='surrogate_or_strict') + local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')] + + read_stdout, write_stdout = None, None + read_stderr, write_stderr = None, None + read_stdin, write_stdin = None, None + + try: + read_stdout, write_stdout = os.pipe() + read_stderr, write_stderr = os.pipe() + + kwargs = { + 'stdout': self._set_nonblocking(write_stdout), + 'stderr': self._set_nonblocking(write_stderr), + 'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV + } + + if in_data: + read_stdin, write_stdin = os.pipe() + kwargs['stdin'] = self._set_nonblocking(read_stdin) + + self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name) + pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs) + if pid == -1: + msg = "failed to attach to container %s" % self.container_name + raise errors.AnsibleError(msg) + + write_stdout = os.close(write_stdout) + write_stderr = os.close(write_stderr) + if read_stdin: + read_stdin = os.close(read_stdin) + + return self._communicate(pid, + in_data, + write_stdin, + read_stdout, + read_stderr) + finally: + fds = [read_stdout, + write_stdout, + read_stderr, + write_stderr, + read_stdin, + write_stdin] + for fd in fds: + if fd: + os.close(fd) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to lxc ''' + super(Connection, self).put_file(in_path, out_path) + self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name) + in_path = to_bytes(in_path, errors='surrogate_or_strict') + out_path = to_bytes(out_path, errors='surrogate_or_strict') + + if not os.path.exists(in_path): + msg = "file or module does not exist: %s" % in_path + raise errors.AnsibleFileNotFound(msg) + try: + src_file = open(in_path, "rb") + except IOError: + traceback.print_exc() + raise errors.AnsibleError("failed to open input file to %s" % in_path) + try: + def write_file(args): + with open(out_path, 'wb+') as dst_file: + shutil.copyfileobj(src_file, dst_file) + try: + self.container.attach_wait(write_file, None) + except IOError: + traceback.print_exc() + msg = "failed to transfer file to %s" % out_path + raise errors.AnsibleError(msg) + finally: + src_file.close() + + def fetch_file(self, in_path, out_path): + ''' fetch a file from lxc to local ''' + super(Connection, self).fetch_file(in_path, out_path) + self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name) + in_path = to_bytes(in_path, errors='surrogate_or_strict') + out_path = to_bytes(out_path, errors='surrogate_or_strict') + + try: + dst_file = open(out_path, "wb") + except IOError: + traceback.print_exc() + msg = "failed to open output file %s" % out_path + raise errors.AnsibleError(msg) + try: + def write_file(args): + try: + with open(in_path, 'rb') as src_file: + shutil.copyfileobj(src_file, dst_file) + finally: + # this is needed in the lxc child process + # to flush internal python buffers + dst_file.close() + try: + self.container.attach_wait(write_file, None) + except IOError: + traceback.print_exc() + msg = "failed to transfer file from %s to %s" % (in_path, out_path) + raise errors.AnsibleError(msg) + finally: + dst_file.close() + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py new file mode 100644 index 0000000000..720e000ef6 --- /dev/null +++ b/plugins/connection/lxd.py @@ -0,0 +1,125 @@ +# (c) 2016 Matt Clay +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Matt Clay + connection: lxd + short_description: Run tasks in lxc containers via lxc CLI + description: + - Run commands or put/fetch files to an existing lxc container using lxc CLI + options: + remote_addr: + description: + - Container identifier + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_lxd_host + executable: + description: + - shell to use for execution inside container + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_lxd_executable +''' + +import os +from distutils.spawn import find_executable +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils._text import to_bytes, to_text +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + """ lxd based connections """ + + transport = 'community.general.lxd' + has_pipelining = True + default_user = 'root' + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._host = self._play_context.remote_addr + self._lxc_cmd = find_executable("lxc") + + if not self._lxc_cmd: + raise AnsibleError("lxc command not found in PATH") + + if self._play_context.remote_user is not None and self._play_context.remote_user != 'root': + self._display.warning('lxd does not support remote_user, using container default: root') + + def _connect(self): + """connect to lxd (nothing to do here) """ + super(Connection, self)._connect() + + if not self._connected: + self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the lxd host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) + + local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd] + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') + + process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate(in_data) + + stdout = to_text(stdout) + stderr = to_text(stderr) + + if stderr == "error: Container is not running.\n": + raise AnsibleConnectionFailure("container not running: %s" % self._host) + + if stderr == "error: not found\n": + raise AnsibleConnectionFailure("container not found: %s" % self._host) + + return process.returncode, stdout, stderr + + def put_file(self, in_path, out_path): + """ put a file from local to lxd """ + super(Connection, self).put_file(in_path, out_path) + + self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host) + + if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound("input path is not a file: %s" % in_path) + + local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path] + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + process.communicate() + + def fetch_file(self, in_path, out_path): + """ fetch a file from lxd to local """ + super(Connection, self).fetch_file(in_path, out_path) + + self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host) + + local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path] + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + process.communicate() + + def close(self): + """ close the connection (nothing to do here) """ + super(Connection, self).close() + + self._connected = False diff --git a/plugins/connection/oc.py b/plugins/connection/oc.py new file mode 100644 index 0000000000..a07e936c58 --- /dev/null +++ b/plugins/connection/oc.py @@ -0,0 +1,173 @@ +# Based on the docker connection plugin +# +# Connection plugin for configuring kubernetes containers with kubectl +# (c) 2017, XuXinkun +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: + - xuxinkun + + connection: oc + + short_description: Execute tasks in pods running on OpenShift. + + description: + - Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift + container platform. + + + requirements: + - oc (go binary) + + options: + oc_pod: + description: + - Pod name. Required when the host name does not match pod name. + default: '' + vars: + - name: ansible_oc_pod + env: + - name: K8S_AUTH_POD + oc_container: + description: + - Container name. Required when a pod contains more than one container. + default: '' + vars: + - name: ansible_oc_container + env: + - name: K8S_AUTH_CONTAINER + oc_namespace: + description: + - The namespace of the pod + default: '' + vars: + - name: ansible_oc_namespace + env: + - name: K8S_AUTH_NAMESPACE + oc_extra_args: + description: + - Extra arguments to pass to the oc command line. + default: '' + vars: + - name: ansible_oc_extra_args + env: + - name: K8S_AUTH_EXTRA_ARGS + oc_kubeconfig: + description: + - Path to a oc config file. Defaults to I(~/.kube/conig) + default: '' + vars: + - name: ansible_oc_kubeconfig + - name: ansible_oc_config + env: + - name: K8S_AUTH_KUBECONFIG + oc_context: + description: + - The name of a context found in the K8s config file. + default: '' + vars: + - name: ansible_oc_context + env: + - name: k8S_AUTH_CONTEXT + oc_host: + description: + - URL for accessing the API. + default: '' + vars: + - name: ansible_oc_host + - name: ansible_oc_server + env: + - name: K8S_AUTH_HOST + - name: K8S_AUTH_SERVER + oc_token: + description: + - API authentication bearer token. + vars: + - name: ansible_oc_token + - name: ansible_oc_api_key + env: + - name: K8S_AUTH_TOKEN + - name: K8S_AUTH_API_KEY + client_cert: + description: + - Path to a certificate used to authenticate with the API. + default: '' + vars: + - name: ansible_oc_cert_file + - name: ansible_oc_client_cert + env: + - name: K8S_AUTH_CERT_FILE + aliases: [ oc_cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. + default: '' + vars: + - name: ansible_oc_key_file + - name: ansible_oc_client_key + env: + - name: K8S_AUTH_KEY_FILE + aliases: [ oc_key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. + default: '' + vars: + - name: ansible_oc_ssl_ca_cert + - name: ansible_oc_ca_cert + env: + - name: K8S_AUTH_SSL_CA_CERT + aliases: [ oc_ssl_ca_cert ] + validate_certs: + description: + - Whether or not to verify the API server's SSL certificate. Defaults to I(true). + default: '' + vars: + - name: ansible_oc_verify_ssl + - name: ansible_oc_validate_certs + env: + - name: K8S_AUTH_VERIFY_SSL + aliases: [ oc_verify_ssl ] +''' + +from ansible_collections.community.general.plugins.connection.kubectl import Connection as KubectlConnection + + +CONNECTION_TRANSPORT = 'oc' + +CONNECTION_OPTIONS = { + 'oc_container': '-c', + 'oc_namespace': '-n', + 'oc_kubeconfig': '--config', + 'oc_context': '--context', + 'oc_host': '--server', + 'client_cert': '--client-certificate', + 'client_key': '--client-key', + 'ca_cert': '--certificate-authority', + 'validate_certs': '--insecure-skip-tls-verify', + 'oc_token': '--token' +} + + +class Connection(KubectlConnection): + ''' Local oc based connections ''' + transport = CONNECTION_TRANSPORT + connection_options = CONNECTION_OPTIONS + documentation = DOCUMENTATION diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py new file mode 100644 index 0000000000..ed03b3d02d --- /dev/null +++ b/plugins/connection/qubes.py @@ -0,0 +1,159 @@ +# Based on the buildah connection plugin +# Copyright (c) 2017 Ansible Project +# 2018 Kushal Das +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# +# Written by: Kushal Das (https://github.com/kushaldas) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' + connection: qubes + short_description: Interact with an existing QubesOS AppVM + + description: + - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. + + author: Kushal Das (@kushaldas) + + + options: + remote_addr: + description: + - vm name + default: inventory_hostname + vars: + - name: ansible_host + remote_user: + description: + - The user to execute as inside the vm. + default: The *user* account as default in Qubes OS. + vars: + - name: ansible_user +# keyword: +# - name: hosts +''' + +import shlex +import shutil + +import os +import base64 +import subprocess + +import ansible.constants as C +from ansible.module_utils._text import to_bytes, to_native +from ansible.plugins.connection import ConnectionBase, ensure_connect +from ansible.errors import AnsibleConnectionFailure +from ansible.utils.display import Display + +display = Display() + + +# this _has to be_ named Connection +class Connection(ConnectionBase): + """This is a connection plugin for qubes: it uses qubes-run-vm binary to interact with the containers.""" + + # String used to identify this Connection class from other classes + transport = 'community.general.qubes' + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._remote_vmname = self._play_context.remote_addr + self._connected = False + # Default username in Qubes + self.user = "user" + if self._play_context.remote_user: + self.user = self._play_context.remote_user + + def _qubes(self, cmd=None, in_data=None, shell="qubes.VMShell"): + """run qvm-run executable + + :param cmd: cmd string for remote system + :param in_data: data passed to qvm-run-vm's stdin + :return: return code, stdout, stderr + """ + display.vvvv("CMD: ", cmd) + if not cmd.endswith("\n"): + cmd = cmd + "\n" + local_cmd = [] + + # For dom0 + local_cmd.extend(["qvm-run", "--pass-io", "--service"]) + if self.user != "user": + # Means we have a remote_user value + local_cmd.extend(["-u", self.user]) + + local_cmd.append(self._remote_vmname) + + local_cmd.append(shell) + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + display.vvvv("Local cmd: ", local_cmd) + + display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname) + p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + # Here we are writing the actual command to the remote bash + p.stdin.write(to_bytes(cmd, errors='surrogate_or_strict')) + stdout, stderr = p.communicate(input=in_data) + return p.returncode, stdout, stderr + + def _connect(self): + """No persistent connection is being maintained.""" + super(Connection, self)._connect() + self._connected = True + + @ensure_connect + def exec_command(self, cmd, in_data=None, sudoable=False): + """Run specified command in a running QubesVM """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + display.vvvv("CMD IS: %s" % cmd) + + rc, stdout, stderr = self._qubes(cmd) + + display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr)) + return rc, stdout, stderr + + def put_file(self, in_path, out_path): + """ Place a local file located in 'in_path' inside VM at 'out_path' """ + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname) + + with open(in_path, "rb") as fobj: + source_data = fobj.read() + + retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell") + # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and + # hope it will have appropriate permissions + if retcode == 127: + retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data) + + if retcode != 0: + raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path)) + + def fetch_file(self, in_path, out_path): + """Obtain file specified via 'in_path' from the container and place it at 'out_path' """ + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname) + + # We are running in dom0 + cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)] + with open(out_path, "wb") as fobj: + p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj) + p.communicate() + if p.returncode != 0: + raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path)) + + def close(self): + """ Closing the connection """ + super(Connection, self).close() + self._connected = False diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py new file mode 100644 index 0000000000..638b04e1b4 --- /dev/null +++ b/plugins/connection/saltstack.py @@ -0,0 +1,105 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# Based on func.py +# (c) 2014, Michael Scherer +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Michael Scherer (@mscherer) + connection: saltstack + short_description: Allow ansible to piggyback on salt minions + description: + - This allows you to use existing Saltstack infrastructure to connect to targets. +''' + +import re +import os +import pty +import subprocess + +from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.six.moves import cPickle + +HAVE_SALTSTACK = False +try: + import salt.client as sc + HAVE_SALTSTACK = True +except ImportError: + pass + +import os +from ansible import errors +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + ''' Salt-based connections ''' + + has_pipelining = False + # while the name of the product is salt, naming that module salt cause + # trouble with module import + transport = 'community.general.saltstack' + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + self.host = self._play_context.remote_addr + + def _connect(self): + if not HAVE_SALTSTACK: + raise errors.AnsibleError("saltstack is not installed") + + self.client = sc.LocalClient() + self._connected = True + return self + + def exec_command(self, cmd, sudoable=False, in_data=None): + ''' run a command on the remote minion ''' + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + if in_data: + raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + + self._display.vvv("EXEC %s" % (cmd), host=self.host) + # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 + res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) + if self.host not in res: + raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) + + p = res[self.host] + return (p['retcode'], p['stdout'], p['stderr']) + + def _normalize_path(self, path, prefix): + if not path.startswith(os.path.sep): + path = os.path.join(os.path.sep, path) + normpath = os.path.normpath(path) + return os.path.join(prefix, normpath[1:]) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to remote ''' + + super(Connection, self).put_file(in_path, out_path) + + out_path = self._normalize_path(out_path, '/') + self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + with open(in_path) as in_fh: + content = in_fh.read() + self.client.cmd(self.host, 'file.write', [out_path, content]) + + # TODO test it + def fetch_file(self, in_path, out_path): + ''' fetch a file from remote to local ''' + + super(Connection, self).fetch_file(in_path, out_path) + + in_path = self._normalize_path(in_path, '/') + self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host] + open(out_path, 'wb').write(content) + + def close(self): + ''' terminate the connection; nothing to do here ''' + pass diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py new file mode 100644 index 0000000000..755081a8c1 --- /dev/null +++ b/plugins/connection/zone.py @@ -0,0 +1,200 @@ +# Based on local.py (c) 2012, Michael DeHaan +# and chroot.py (c) 2013, Maykel Moya +# and jail.py (c) 2013, Michael Scherer +# (c) 2015, Dagobert Michelsen +# (c) 2015, Toshio Kuratomi +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Ansible Core Team + connection: zone + short_description: Run tasks in a zone instance + description: + - Run commands or put/fetch files to an existing zone + options: + remote_addr: + description: + - Zone identifier + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_zone_host +''' + +import distutils.spawn +import os +import os.path +import subprocess +import traceback + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + ''' Local zone based connections ''' + + transport = 'community.general.zone' + has_pipelining = True + has_tty = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self.zone = self._play_context.remote_addr + + if os.geteuid() != 0: + raise AnsibleError("zone connection requires running as root") + + self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm')) + self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) + + if self.zone not in self.list_zones(): + raise AnsibleError("incorrect zone name %s" % self.zone) + + @staticmethod + def _search_executable(executable): + cmd = distutils.spawn.find_executable(executable) + if not cmd: + raise AnsibleError("%s command not found in PATH" % executable) + return cmd + + def list_zones(self): + process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + zones = [] + for l in process.stdout.readlines(): + # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared + s = l.split(':') + if s[1] != 'global': + zones.append(s[1]) + + return zones + + def get_zone_path(self): + # solaris10vm# zoneadm -z cswbuild list -p + # -:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared + process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + # stdout, stderr = p.communicate() + path = process.stdout.readlines()[0].split(':')[3] + return path + '/root' + + def _connect(self): + ''' connect to the zone; nothing to do here ''' + super(Connection, self)._connect() + if not self._connected: + display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone) + self._connected = True + + def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): + ''' run a command on the zone. This is only needed for implementing + put_file() get_file() so that we don't have to read the whole file + into memory. + + compared to exec_command() it looses some niceties like being able to + return the process's exit code immediately. + ''' + # NOTE: zlogin invokes a shell (just like ssh does) so we do not pass + # this through /bin/sh -c here. Instead it goes through the shell + # that zlogin selects. + local_cmd = [self.zlogin_cmd, self.zone, cmd] + local_cmd = map(to_bytes, local_cmd) + + display.vvv("EXEC %s" % (local_cmd), host=self.zone) + p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + return p + + def exec_command(self, cmd, in_data=None, sudoable=False): + ''' run a command on the zone ''' + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + p = self._buffered_exec_command(cmd) + + stdout, stderr = p.communicate(in_data) + return (p.returncode, stdout, stderr) + + def _prefix_login_path(self, remote_path): + ''' Make sure that we put files into a standard path + + If a path is relative, then we need to choose where to put it. + ssh chooses $HOME but we aren't guaranteed that a home dir will + exist in any given chroot. So for now we're choosing "/" instead. + This also happens to be the former default. + + Can revisit using $HOME instead if it's a problem + ''' + if not remote_path.startswith(os.path.sep): + remote_path = os.path.join(os.path.sep, remote_path) + return os.path.normpath(remote_path) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to zone ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) + + out_path = shlex_quote(self._prefix_login_path(out_path)) + try: + with open(in_path, 'rb') as in_file: + if not os.fstat(in_file.fileno()).st_size: + count = ' count=0' + else: + count = '' + try: + p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + except OSError: + raise AnsibleError("jail connection requires dd command in the jail") + try: + stdout, stderr = p.communicate() + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + except IOError: + raise AnsibleError("file or module does not exist at: %s" % in_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from zone to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) + + in_path = shlex_quote(self._prefix_login_path(in_path)) + try: + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + except OSError: + raise AnsibleError("zone connection requires dd command in the zone") + + with open(out_path, 'wb+') as out_file: + try: + chunk = p.stdout.read(BUFSIZE) + while chunk: + out_file.write(chunk) + chunk = p.stdout.read(BUFSIZE) + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/plugins/doc_fragments/__init__.py b/plugins/doc_fragments/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/doc_fragments/a10.py b/plugins/doc_fragments/a10.py new file mode 100644 index 0000000000..d2d6cb502f --- /dev/null +++ b/plugins/doc_fragments/a10.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, John Barker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + host: + description: + - Hostname or IP of the A10 Networks device. + type: str + required: true + username: + description: + - An account with administrator privileges. + type: str + required: true + aliases: [ admin, user ] + password: + description: + - Password for the C(username) account. + type: str + required: true + aliases: [ pass, pwd ] + write_config: + description: + - If C(yes), any changes will cause a write of the running configuration + to non-volatile memory. This will save I(all) configuration changes, + including those that may have been made manually or through other modules, + so care should be taken when specifying C(yes). + type: bool + default: no + validate_certs: + description: + - If C(no), SSL certificates will not be validated. + - This should only be used on personally controlled devices using self-signed certificates. + type: bool + default: yes +notes: + - Requires A10 Networks aXAPI 2.1. +''' diff --git a/plugins/doc_fragments/aireos.py b/plugins/doc_fragments/aireos.py new file mode 100644 index 0000000000..b3e734a06c --- /dev/null +++ b/plugins/doc_fragments/aireos.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, James Mighion <@jmighion> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote device over the specified transport. + - The value of host is used as the destination address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to the remote device. + - This value is used to authenticate the SSH session. + - If the value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to the remote device. + - This value is used to authenticate the SSH session. + - If the value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. + - If the timeout is exceeded before the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to the remote device. + - This value is the path to the key used to authenticate the SSH session. + - If the value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path +''' diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py new file mode 100644 index 0000000000..ced80906b0 --- /dev/null +++ b/plugins/doc_fragments/alicloud.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017 Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Alicloud only documentation fragment + DOCUMENTATION = r''' +options: + alicloud_access_key: + description: + - Aliyun Cloud access key. + - If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY), + C(ALICLOUD_ACCESS_KEY_ID) will be used instead. + type: str + aliases: [ access_key_id, access_key ] + alicloud_secret_key: + description: + - Aliyun Cloud secret key. + - If not set then the value of environment variable C(ALICLOUD_SECRET_KEY), + C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead. + type: str + aliases: [ secret_access_key, secret_key ] + alicloud_region: + description: + - The Aliyun Cloud region to use. + - If not specified then the value of environment variable + C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead. + type: str + aliases: [ region, region_id ] + alicloud_security_token: + description: + - The Aliyun Cloud security token. + - If not specified then the value of environment variable + C(ALICLOUD_SECURITY_TOKEN) will be used instead. + type: str + aliases: [ security_token ] +author: +- He Guimin (@xiaozhu36) +requirements: +- python >= 2.6 +extends_documentation_fragment: +- community.general.alicloud + +notes: + - If parameters are not set within the module, the following + environment variables can be used in decreasing order of precedence + C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID), + C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY), + C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID), + C(ALICLOUD_SECURITY_TOKEN) + - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the + ALICLOUD region, when required, but this can also be configured in the footmark config file +''' diff --git a/plugins/doc_fragments/aruba.py b/plugins/doc_fragments/aruba.py new file mode 100644 index 0000000000..6bd49ddf9f --- /dev/null +++ b/plugins/doc_fragments/aruba.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, James Mighion <@jmighion> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote. + device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + type: path +''' diff --git a/plugins/doc_fragments/auth_basic.py b/plugins/doc_fragments/auth_basic.py new file mode 100644 index 0000000000..64777f5921 --- /dev/null +++ b/plugins/doc_fragments/auth_basic.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + api_url: + description: + - The resolvable endpoint for the API + type: str + api_username: + description: + - The username to use for authentication against the API + type: str + api_password: + description: + - The password to use for authentication against the API + type: str + validate_certs: + description: + - Whether or not to validate SSL certs when supplying a https endpoint. + type: bool + default: yes +''' diff --git a/plugins/doc_fragments/avi.py b/plugins/doc_fragments/avi.py new file mode 100644 index 0000000000..2692e11888 --- /dev/null +++ b/plugins/doc_fragments/avi.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- + +# Created on December 12, 2016 +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Avi Version: 16.3.4 +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Avi common documentation fragment + DOCUMENTATION = r''' +options: + controller: + description: + - IP address or hostname of the controller. The default value is the environment variable C(AVI_CONTROLLER). + type: str + default: '' + username: + description: + - Username used for accessing Avi controller. The default value is the environment variable C(AVI_USERNAME). + type: str + default: '' + password: + description: + - Password of Avi user in Avi controller. The default value is the environment variable C(AVI_PASSWORD). + type: str + default: '' + tenant: + description: + - Name of tenant used for all Avi API calls and context of object. + type: str + default: admin + tenant_uuid: + description: + - UUID of tenant used for all Avi API calls and context of object. + type: str + default: '' + api_version: + description: + - Avi API version of to use for Avi API and objects. + type: str + default: 16.4.4 + avi_credentials: + description: + - Avi Credentials dictionary which can be used in lieu of enumerating Avi Controller login details. + suboptions: + controller: + description: + - Avi controller IP or SQDN + username: + description: + - Avi controller username + password: + description: + - Avi controller password + api_version: + description: + - Avi controller version + default: 16.4.4 + tenant: + description: + - Avi controller tenant + default: admin + tenant_uuid: + description: + - Avi controller tenant UUID + port: + description: + - Avi controller port + token: + description: + - Avi controller API token + timeout: + description: + - Avi controller request timeout + default: 300 + session_id: + description: + - Avi controller API session id to reuse existing session with csrftoken + csrftoken: + description: + - Avi controller API csrftoken to reuse existing session with session id + type: dict + api_context: + description: + - Avi API context that includes current session ID and CSRF Token. + - This allows user to perform single login and re-use the session. + type: dict + avi_disable_session_cache_as_fact: + description: + - It disables avi session information to be cached as a fact. + type: bool + +notes: + - For more information on using Ansible to manage Avi Network devices see U(https://www.ansible.com/ansible-avi-networks). +''' diff --git a/plugins/doc_fragments/ce.py b/plugins/doc_fragments/ce.py new file mode 100644 index 0000000000..0709ab26e5 --- /dev/null +++ b/plugins/doc_fragments/ce.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote + device. This value applies to either I(cli) or I(netconf). The port + value will default to the appropriate transport common port if + none is provided in the task. (cli=22, netconf=22). + type: int + default: 0 (use common port) + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate the CLI login. + If the value is not specified in the task, the value of environment + variable C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This is a common argument used for cli + transports. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This argument is used for the I(cli) + transport. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path + transport: + description: + - Configures the transport connection to use when connecting to the + remote device. The transport argument supports connectivity to the + device over cli (ssh). + type: str + required: true + choices: [ cli, netconf ] + default: cli +''' diff --git a/plugins/doc_fragments/cloudscale.py b/plugins/doc_fragments/cloudscale.py new file mode 100644 index 0000000000..c7c58819bd --- /dev/null +++ b/plugins/doc_fragments/cloudscale.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Standard cloudstack documentation fragment + DOCUMENTATION = ''' +options: + api_token: + description: + - cloudscale.ch API token. + - This can also be passed in the C(CLOUDSCALE_API_TOKEN) environment variable. + type: str + api_timeout: + description: + - Timeout in seconds for calls to the cloudscale.ch API. + default: 30 + type: int +notes: + - Instead of the api_token parameter the C(CLOUDSCALE_API_TOKEN) environment variable can be used. + - All operations are performed using the cloudscale.ch public API v1. + - "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)." + - A valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at + U(https://control.cloudscale.ch). +''' diff --git a/plugins/doc_fragments/cloudstack.py b/plugins/doc_fragments/cloudstack.py new file mode 100644 index 0000000000..7e93e6cbdc --- /dev/null +++ b/plugins/doc_fragments/cloudstack.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Standard cloudstack documentation fragment + DOCUMENTATION = r''' +options: + api_key: + description: + - API key of the CloudStack API. + - If not given, the C(CLOUDSTACK_KEY) env variable is considered. + - As the last option, the value is taken from the ini config file, also see the notes. + type: str + api_secret: + description: + - Secret key of the CloudStack API. + - If not set, the C(CLOUDSTACK_SECRET) env variable is considered. + - As the last option, the value is taken from the ini config file, also see the notes. + type: str + api_url: + description: + - URL of the CloudStack API e.g. https://cloud.example.com/client/api. + - If not given, the C(CLOUDSTACK_ENDPOINT) env variable is considered. + - As the last option, the value is taken from the ini config file, also see the notes. + type: str + api_http_method: + description: + - HTTP method used to query the API endpoint. + - If not given, the C(CLOUDSTACK_METHOD) env variable is considered. + - As the last option, the value is taken from the ini config file, also see the notes. + - Fallback value is C(get) if not specified. + type: str + choices: [ get, post ] + api_timeout: + description: + - HTTP timeout in seconds. + - If not given, the C(CLOUDSTACK_TIMEOUT) env variable is considered. + - As the last option, the value is taken from the ini config file, also see the notes. + - Fallback value is 10 seconds if not specified. + type: int + api_region: + description: + - Name of the ini section in the C(cloustack.ini) file. + - If not given, the C(CLOUDSTACK_REGION) env variable is considered. + type: str + default: cloudstack +requirements: + - python >= 2.6 + - cs >= 0.6.10 +notes: + - Ansible uses the C(cs) library's configuration method if credentials are not + provided by the arguments C(api_url), C(api_key), C(api_secret). + Configuration is read from several locations, in the following order. + The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and + C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables. + A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file. + A C(cloudstack.ini) file in the current working directory. + A C(.cloudstack.ini) file in the users home directory. + Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini). + Use the argument C(api_region) to select the section name, default section is C(cloudstack). + See https://github.com/exoscale/cs for more information. + - A detailed guide about cloudstack modules can be found in the L(CloudStack Cloud Guide,../scenario_guides/guide_cloudstack.html). + - This module supports check mode. +''' diff --git a/plugins/doc_fragments/cnos.py b/plugins/doc_fragments/cnos.py new file mode 100644 index 0000000000..11f59e95de --- /dev/null +++ b/plugins/doc_fragments/cnos.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Lenovo, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Standard CNOS documentation fragment + DOCUMENTATION = r''' +options: + outputfile: + description: + - This specifies the file path where the output of each command + execution is saved. Each command that is specified in the merged + template file and each response from the device are saved here. + Usually the location is the results folder, but you can + choose another location based on your write permission. + type: str + required: true + host: + description: + - This is the variable used to search the hosts file at + /etc/ansible/hosts and identify the IP address of the device on + which the template is going to be applied. Usually the Ansible + keyword {{ inventory_hostname }} is specified in the playbook as + an abstraction of the group of network elements that need to be + configured. + type: str + required: true + username: + description: + - Configures the username used to authenticate the connection to + the remote device. The value of the username parameter is used to + authenticate the SSH session. While generally the value should + come from the inventory file, you can also specify it as a + variable. This parameter is optional. If it is not specified, no + default value will be used. + type: str + required: true + password: + description: + - Configures the password used to authenticate the connection to + the remote device. The value of the password parameter is used to + authenticate the SSH session. While generally the value should + come from the inventory file, you can also specify it as a + variable. This parameter is optional. If it is not specified, no + default value will be used. + type: str + required: true + enablePassword: + description: + - Configures the password used to enter Global Configuration + command mode on the switch. If the switch does not request this + password, the parameter is ignored.While generally the value + should come from the inventory file, you can also specify it as a + variable. This parameter is optional. If it is not specified, + no default value will be used. + type: str + deviceType: + description: + - This specifies the type of device where the method is executed. + The choices NE1072T,NE1032,NE1032T,NE10032,NE2572 are added + since Ansible 2.4. The choice NE0152T is added since 2.8 + type: str + required: true + choices: + - g8272_cnos + - g8296_cnos + - g8332_cnos + - NE0152T + - NE1072T + - NE1032 + - NE1032T + - NE10032 + - NE2572 +notes: + - For more information on using Ansible to manage Lenovo Network devices see U(https://www.ansible.com/ansible-lenovo). +''' diff --git a/plugins/doc_fragments/digital_ocean.py b/plugins/doc_fragments/digital_ocean.py new file mode 100644 index 0000000000..4422339168 --- /dev/null +++ b/plugins/doc_fragments/digital_ocean.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Parameters for DigitalOcean modules + DOCUMENTATION = r''' +options: + oauth_token: + description: + - DigitalOcean OAuth token. + - "There are several other environment variables which can be used to provide this value." + - "i.e., - 'DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN' and 'OAUTH_TOKEN'" + type: str + aliases: [ api_token ] + timeout: + description: + - The timeout in seconds used for polling DigitalOcean's API. + type: int + default: 30 + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: yes +''' diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py new file mode 100644 index 0000000000..1eb5f16051 --- /dev/null +++ b/plugins/doc_fragments/dimensiondata.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2016, Dimension Data +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Authors: +# - Adam Friedman + + +class ModuleDocFragment(object): + + # Dimension Data doc fragment + DOCUMENTATION = r''' + +options: + region: + description: + - The target region. + choices: + - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py] + - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html) + - Note that the default value "na" stands for "North America". + - The module prepends 'dd-' to the region choice. + type: str + default: na + mcp_user: + description: + - The username used to authenticate to the CloudControl API. + - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata). + type: str + mcp_password: + description: + - The password used to authenticate to the CloudControl API. + - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). + - Required if I(mcp_user) is specified. + type: str + location: + description: + - The target datacenter. + type: str + required: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. + - This should only be used on private instances of the CloudControl API that use self-signed certificates. + type: bool + default: yes +''' diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py new file mode 100644 index 0000000000..b87132678f --- /dev/null +++ b/plugins/doc_fragments/dimensiondata_wait.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2016, Dimension Data +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Authors: +# - Adam Friedman + + +class ModuleDocFragment(object): + + # Dimension Data ("wait-for-completion" parameters) doc fragment + DOCUMENTATION = r''' + +options: + wait: + description: + - Should we wait for the task to complete before moving onto the next. + type: bool + default: no + wait_time: + description: + - The maximum amount of time (in seconds) to wait for the task to complete. + - Only applicable if I(wait=true). + type: int + default: 600 + wait_poll_interval: + description: + - The amount of time (in seconds) to wait between checks for task completion. + - Only applicable if I(wait=true). + type: int + default: 2 + ''' diff --git a/plugins/doc_fragments/docker.py b/plugins/doc_fragments/docker.py new file mode 100644 index 0000000000..80975ff30f --- /dev/null +++ b/plugins/doc_fragments/docker.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Docker doc fragment + DOCUMENTATION = r''' + +options: + docker_host: + description: + - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the + TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, + the module will automatically replace C(tcp) in the connection URL with C(https). + - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used + instead. If the environment variable is not set, the default value will be used. + type: str + default: unix://var/run/docker.sock + aliases: [ docker_url ] + tls_hostname: + description: + - When verifying the authenticity of the Docker Host server, provide the expected name of the server. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will + be used instead. If the environment variable is not set, the default value will be used. + type: str + default: localhost + api_version: + description: + - The version of the Docker API running on the Docker Host. + - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon. + - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be + used instead. If the environment variable is not set, the default value will be used. + type: str + default: auto + aliases: [ docker_api_version ] + timeout: + description: + - The maximum amount of time in seconds to wait on a response from the API. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used + instead. If the environment variable is not set, the default value will be used. + type: int + default: 60 + ca_cert: + description: + - Use a CA certificate when performing server verification by providing the path to a CA certificate file. + - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, + the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_ca_cert, cacert_path ] + client_cert: + description: + - Path to the client's TLS certificate file. + - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, + the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_client_cert, cert_path ] + client_key: + description: + - Path to the client's TLS key file. + - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, + the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_client_key, key_path ] + ssl_version: + description: + - Provide a valid SSL version number. Default value determined by ssl.py module. + - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be + used instead. + type: str + tls: + description: + - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host + server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used + instead. If the environment variable is not set, the default value will be used. + type: bool + default: no + validate_certs: + description: + - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be + used instead. If the environment variable is not set, the default value will be used. + type: bool + default: no + aliases: [ tls_verify ] + debug: + description: + - Debug mode + type: bool + default: no + +notes: + - Connect to the Docker daemon by providing parameters with each task or by defining environment variables. + You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION), + C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped + with the product that sets up the environment. It will set these variables for you. See + U(https://docs.docker.com/machine/reference/env/) for more details. + - When connecting to Docker daemon with TLS, you might need to install additional Python packages. + For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(pip). + - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions. + In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified, + and use C($DOCKER_CONFIG/config.json) otherwise. +''' + + # Additional, more specific stuff for minimal Docker SDK for Python version < 2.0 + + DOCKER_PY_1_DOCUMENTATION = r''' +options: {} +requirements: + - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/) + Python module has been superseded by L(docker,https://pypi.org/project/docker/) + (see L(here,https://github.com/docker/docker-py/issues/1310) for details). + For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to + install the C(docker) Python module. Note that both modules should *not* + be installed at the same time. Also note that when both modules are installed + and one of them is uninstalled, the other might no longer function and a + reinstall of it is required." +''' + + # Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0. + # Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer. + + DOCKER_PY_2_DOCUMENTATION = r''' +options: {} +requirements: + - "Python >= 2.7" + - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/) + Python module has been superseded by L(docker,https://pypi.org/project/docker/) + (see L(here,https://github.com/docker/docker-py/issues/1310) for details). + This module does *not* work with docker-py." +''' diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py new file mode 100644 index 0000000000..84accf42d3 --- /dev/null +++ b/plugins/doc_fragments/emc.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Luca Lorenzetto (@remix_tj) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - Ansible modules are available for EMC VNX. +''' + + # Documentation fragment for VNX (emc_vnx) + EMC_VNX = r''' +options: + sp_address: + description: + - Address of the SP of target/secondary storage. + type: str + required: true + sp_user: + description: + - Username for accessing SP. + type: str + default: sysadmin + sp_password: + description: + - password for accessing SP. + type: str + default: sysadmin +requirements: + - An EMC VNX Storage device. + - Ansible 2.7. + - storops (0.5.10 or greater). Install using 'pip install storops'. +notes: + - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform. +''' diff --git a/plugins/doc_fragments/enos.py b/plugins/doc_fragments/enos.py new file mode 100644 index 0000000000..7b26c9056d --- /dev/null +++ b/plugins/doc_fragments/enos.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Red Hat Inc. +# Copyright: (c) 2017, Lenovo. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: no + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. + provider: + description: + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + type: path + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: no + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. + type: str +''' diff --git a/plugins/doc_fragments/exoscale.py b/plugins/doc_fragments/exoscale.py new file mode 100644 index 0000000000..32719807b7 --- /dev/null +++ b/plugins/doc_fragments/exoscale.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Standard exoscale documentation fragment + DOCUMENTATION = r''' +options: + api_key: + description: + - API key of the Exoscale DNS API. + - Since 2.4, the ENV variable C(CLOUDSTACK_KEY) is used as default, when defined. + type: str + api_secret: + description: + - Secret key of the Exoscale DNS API. + - Since 2.4, the ENV variable C(CLOUDSTACK_SECRET) is used as default, when defined. + type: str + api_timeout: + description: + - HTTP timeout to Exoscale DNS API. + - Since 2.4, the ENV variable C(CLOUDSTACK_TIMEOUT) is used as default, when defined. + type: int + default: 10 + api_region: + description: + - Name of the ini section in the C(cloustack.ini) file. + - Since 2.4, the ENV variable C(CLOUDSTACK_REGION) is used as default, when defined. + type: str + default: cloudstack + validate_certs: + description: + - Validate SSL certs of the Exoscale DNS API. + type: bool + default: yes +requirements: + - python >= 2.6 +notes: + - As Exoscale DNS uses the same API key and secret for all services, we reuse the config used for Exscale Compute based on CloudStack. + The config is read from several locations, in the following order. + The C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) environment variables. + A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, + A C(cloudstack.ini) file in the current working directory. + A C(.cloudstack.ini) file in the users home directory. + Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini). + Use the argument C(api_region) to select the section name, default section is C(cloudstack). + - This module does not support multiple A records and will complain properly if you try. + - More information Exoscale DNS can be found on https://community.exoscale.ch/documentation/dns/. + - This module supports check mode and diff. +''' diff --git a/plugins/doc_fragments/gcp.py b/plugins/doc_fragments/gcp.py new file mode 100644 index 0000000000..308d48808b --- /dev/null +++ b/plugins/doc_fragments/gcp.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # GCP doc fragment. + DOCUMENTATION = r''' +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: [ application, machineaccount, serviceaccount ] + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected + and the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used. + type: list + elements: str + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: + - for authentication, you can set service_account_file using the + c(gcp_service_account_file) env variable. + - for authentication, you can set service_account_contents using the + c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable. + - For authentication, you can set service_account_email using the + C(GCP_SERVICE_ACCOUNT_EMAIL) env variable. + - For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env + variable. + - For authentication, you can set scopes using the C(GCP_SCOPES) env variable. + - Environment variables values will only be used if the playbook values are + not set. + - The I(service_account_email) and I(service_account_file) options are + mutually exclusive. +''' diff --git a/plugins/doc_fragments/hetzner.py b/plugins/doc_fragments/hetzner.py new file mode 100644 index 0000000000..536e1a50d6 --- /dev/null +++ b/plugins/doc_fragments/hetzner.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019 Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + hetzner_user: + description: The username for the Robot webservice user. + type: str + required: yes + hetzner_password: + description: The password for the Robot webservice user. + type: str + required: yes +''' diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py new file mode 100644 index 0000000000..2f0c69eea2 --- /dev/null +++ b/plugins/doc_fragments/hpe3par.py @@ -0,0 +1,32 @@ +# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # HPE 3PAR doc fragment + DOCUMENTATION = ''' +options: + storage_system_ip: + description: + - The storage system IP address. + type: str + required: true + storage_system_password: + description: + - The storage system password. + type: str + required: true + storage_system_username: + description: + - The storage system user name. + type: str + required: true + +requirements: + - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk' + - WSAPI service should be enabled on the 3PAR storage array. +notes: + - check_mode not supported + ''' diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py new file mode 100644 index 0000000000..e8d89b9ec0 --- /dev/null +++ b/plugins/doc_fragments/hwc.py @@ -0,0 +1,62 @@ +# Copyright: (c) 2018, Huawei Inc. +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # HWC doc fragment. + DOCUMENTATION = ''' +options: + identity_endpoint: + description: + - The Identity authentication URL. + type: str + required: true + user: + description: + - The user name to login with (currently only user names are + supported, and not user IDs). + type: str + required: true + password: + description: + - The password to login with. + type: str + required: true + domain: + description: + - The name of the Domain to scope to (Identity v3). + (currently only domain names are supported, and not domain IDs). + type: str + required: true + project: + description: + - The name of the Tenant (Identity v2) or Project (Identity v3). + (currently only project names are supported, and not + project IDs). + type: str + required: true + region: + description: + - The region to which the project belongs. + type: str + id: + description: + - The id of resource to be managed. + type: str +notes: + - For authentication, you can set identity_endpoint using the + C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable. + - For authentication, you can set user using the + C(ANSIBLE_HWC_USER) env variable. + - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env + variable. + - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env + variable. + - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env + variable. + - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable. + - Environment variables values will only be used if the playbook values are + not set. +''' diff --git a/plugins/doc_fragments/ibm_storage.py b/plugins/doc_fragments/ibm_storage.py new file mode 100644 index 0000000000..0d8eb5fe22 --- /dev/null +++ b/plugins/doc_fragments/ibm_storage.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, IBM CORPORATION +# Author(s): Tzur Eliyahu +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # ibm_storage documentation fragment + DOCUMENTATION = r''' +options: + username: + description: + - Management user on the spectrum accelerate storage system. + type: str + required: True + password: + description: + - Password for username on the spectrum accelerate storage system. + type: str + required: True + endpoints: + description: + - The hostname or management IP of Spectrum Accelerate storage system. + type: str + required: True +notes: + - This module requires pyxcli python library. + Use 'pip install pyxcli' in order to get pyxcli. +requirements: + - python >= 2.7 + - pyxcli +''' diff --git a/plugins/doc_fragments/infinibox.py b/plugins/doc_fragments/infinibox.py new file mode 100644 index 0000000000..094ba91838 --- /dev/null +++ b/plugins/doc_fragments/infinibox.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Gregory Shulov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard Infinibox documentation fragment + DOCUMENTATION = r''' +options: + system: + description: + - Infinibox Hostname or IPv4 Address. + type: str + required: true + user: + description: + - Infinibox User username with sufficient priveledges ( see notes ). + required: false + password: + description: + - Infinibox User password. + type: str +notes: + - This module requires infinisdk python library + - You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables + if user and password arguments are not passed to the module directly + - Ansible uses the infinisdk configuration file C(~/.infinidat/infinisdk.ini) if no credentials are provided. + See U(http://infinisdk.readthedocs.io/en/latest/getting_started.html) +requirements: + - "python >= 2.7" + - infinisdk +''' diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py new file mode 100644 index 0000000000..9a10124203 --- /dev/null +++ b/plugins/doc_fragments/influxdb.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Parameters for influxdb modules + DOCUMENTATION = r''' +options: + hostname: + description: + - The hostname or IP address on which InfluxDB server is listening. + - Since Ansible 2.5, defaulted to localhost. + type: str + default: localhost + username: + description: + - Username that will be used to authenticate against InfluxDB server. + - Alias C(login_username) added in Ansible 2.5. + type: str + default: root + aliases: [ login_username ] + password: + description: + - Password that will be used to authenticate against InfluxDB server. + - Alias C(login_password) added in Ansible 2.5. + type: str + default: root + aliases: [ login_password ] + port: + description: + - The port on which InfluxDB server is listening + type: int + default: 8086 + path: + description: + - The path on which InfluxDB server is accessible + type: str + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: yes + ssl: + description: + - Use https instead of http to connect to InfluxDB server. + type: bool + timeout: + description: + - Number of seconds Requests will wait for client to establish a connection. + type: int + retries: + description: + - Number of retries client will try before aborting. + - C(0) indicates try until success. + type: int + default: 3 + use_udp: + description: + - Use UDP to connect to InfluxDB server. + type: bool + udp_port: + description: + - UDP port to connect to InfluxDB server. + type: int + default: 4444 + proxies: + description: + - HTTP(S) proxy to use for Requests to connect to InfluxDB server. + type: dict +''' diff --git a/plugins/doc_fragments/ingate.py b/plugins/doc_fragments/ingate.py new file mode 100644 index 0000000000..ed1882d5c1 --- /dev/null +++ b/plugins/doc_fragments/ingate.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Ingate Systems AB +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = r''' +options: + client: + description: + - A dict object containing connection details. + suboptions: + version: + description: + - REST API version. + type: str + choices: [ v1 ] + default: v1 + scheme: + description: + - Which HTTP protocol to use. + type: str + required: true + choices: [ http, https ] + address: + description: + - The hostname or IP address to the unit. + type: str + required: true + username: + description: + - The username of the REST API user. + type: str + required: true + password: + description: + - The password for the REST API user. + type: str + required: true + port: + description: + - Which HTTP(S) port to connect to. + type: int + timeout: + description: + - The timeout (in seconds) for REST API requests. + type: int + validate_certs: + description: + - Verify the unit's HTTPS certificate. + type: bool + default: yes + aliases: [ verify_ssl ] +notes: + - This module requires that the Ingate Python SDK is installed on the + host. To install the SDK use the pip command from your shell + C(pip install ingatesdk). +requirements: + - ingatesdk >= 1.0.6 +''' diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py new file mode 100644 index 0000000000..182543beb3 --- /dev/null +++ b/plugins/doc_fragments/ipa.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017-18, Ansible Project +# Copyright: (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Parameters for FreeIPA/IPA modules + DOCUMENTATION = r''' +options: + ipa_port: + description: + - Port of FreeIPA / IPA server. + - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead. + - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set. + - Environment variable fallback mechanism is added in Ansible 2.5. + type: int + default: 443 + ipa_host: + description: + - IP or hostname of IPA server. + - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead. + - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server. + - The relevant entry needed in FreeIPA is the 'ipa-ca' entry. + - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used. + - Environment variable fallback mechanism is added in Ansible 2.5. + type: str + default: ipa.example.com + ipa_user: + description: + - Administrative account used on IPA server. + - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead. + - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set. + - Environment variable fallback mechanism is added in Ansible 2.5. + type: str + default: admin + ipa_pass: + description: + - Password of administrative user. + - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead. + - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA. + - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server. + - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate. + - If GSSAPI is not available, the usage of 'ipa_pass' is required. + - Environment variable fallback mechanism is added in Ansible 2.5. + type: str + ipa_prot: + description: + - Protocol used by IPA server. + - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead. + - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set. + - Environment variable fallback mechanism is added in Ansible 2.5. + type: str + choices: [ http, https ] + default: https + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: yes + ipa_timeout: + description: + - Specifies idle timeout (in seconds) for the connection. + - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. + - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead. + - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set. + type: int + default: 10 +''' diff --git a/plugins/doc_fragments/ironware.py b/plugins/doc_fragments/ironware.py new file mode 100644 index 0000000000..afdc431038 --- /dev/null +++ b/plugins/doc_fragments/ironware.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Paul Baker <@paulquack> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + authorize: + description: + - B(Deprecated) + - "Starting with Ansible 2.7 we recommend using C(connection: network_cli) and C(become: yes)." + - For more information please see the L(IronWare Platform Options guide, ../network/user_guide/platform_ironware.html). + - HORIZONTALLINE + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: no + provider: + description: + - B(Deprecated) + - "Starting with Ansible 2.7 we recommend using C(connection: network_cli) and C(become: yes)." + - For more information please see the L(IronWare Platform Options guide, ../network/user_guide/platform_ironware.html). + - HORIZONTALLINE + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + port: + description: + - Specifies the port to use when building the connection to the remote + device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + type: path + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: no + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. + type: str + timeout: + description: + - Specifies idle timeout in seconds for the connection, in seconds. Useful + if the console freezes before continuing. For example when saving + configurations. + type: int + default: 10 +notes: + - For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide ` +''' diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py new file mode 100644 index 0000000000..4eedf23bd9 --- /dev/null +++ b/plugins/doc_fragments/keycloak.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r''' +options: + auth_keycloak_url: + description: + - URL to the Keycloak instance. + type: str + required: true + aliases: + - url + + auth_client_id: + description: + - OpenID Connect I(client_id) to authenticate to the API with. + type: str + default: admin-cli + + auth_realm: + description: + - Keycloak realm name to authenticate to for API access. + type: str + required: true + + auth_client_secret: + description: + - Client Secret to use in conjunction with I(auth_client_id) (if required). + type: str + + auth_username: + description: + - Username to authenticate for API access with. + type: str + required: true + aliases: + - username + + auth_password: + description: + - Password to authenticate for API access with. + type: str + required: true + aliases: + - password + + validate_certs: + description: + - Verify TLS certificates (do not disable this in production). + type: bool + default: yes +''' diff --git a/plugins/doc_fragments/kubevirt_common_options.py b/plugins/doc_fragments/kubevirt_common_options.py new file mode 100644 index 0000000000..b014de15c4 --- /dev/null +++ b/plugins/doc_fragments/kubevirt_common_options.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, KubeVirt Team <@kubevirt> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = r''' +options: + resource_definition: + description: + - "A partial YAML definition of the object being created/updated. Here you can define Kubernetes + resource parameters not covered by this module's parameters." + - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g. + I(metadata.namespace) here, that value will be ignored and I(namespace) used instead." + aliases: + - definition + - inline + type: dict + wait: + description: + - "I(True) if the module should wait for the resource to get into desired state." + type: bool + default: yes + force: + description: + - If set to C(no), and I(state) is C(present), an existing object will be replaced. + type: bool + default: no + wait_timeout: + description: + - The amount of time in seconds the module should wait for the resource to get into desired state. + type: int + default: 120 + wait_sleep: + description: + - Number of seconds to sleep between checks. + default: 5 + memory: + description: + - The amount of memory to be requested by virtual machine. + - For example 1024Mi. + type: str + memory_limit: + description: + - The maximum memory to be used by virtual machine. + - For example 1024Mi. + type: str + machine_type: + description: + - QEMU machine type is the actual chipset of the virtual machine. + type: str + merge_type: + description: + - Whether to override the default patch merge approach with a specific type. + - If more than one merge type is given, the merge types will be tried in order. + - "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters + on resource kinds that combine Custom Resources and built-in resources, as + Custom Resource Definitions typically aren't updatable by the usual strategic merge." + - "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)" + type: list + choices: [ json, merge, strategic-merge ] + cpu_shares: + description: + - "Specify CPU shares." + type: int + cpu_limit: + description: + - "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use + every 100ms. A virtual machine cannot use more than its share of CPU time during this interval." + type: int + cpu_cores: + description: + - "Number of CPU cores." + type: int + cpu_model: + description: + - "CPU model." + - "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)." + - "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used." + - "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family." + - "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running." + type: str + bootloader: + description: + - "Specify the bootloader of the virtual machine." + - "All virtual machines use BIOS by default for booting." + type: str + smbios_uuid: + description: + - "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set." + type: str + cpu_features: + description: + - "List of dictionary to fine-tune features provided by the selected CPU model." + - "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid." + - "I(Note): In case a policy is omitted for a feature, it will default to require." + - "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)" + type: list + headless: + description: + - "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration." + - "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga + compatible and comes with a memory size of 16 MB." + hugepage_size: + description: + - "Specify huge page size." + type: str + tablets: + description: + - "Specify tablets to be used as input devices" + type: list + hostname: + description: + - "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine + name will be used." + subdomain: + description: + - "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified, + the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine + itself can pick up a hostname." +requirements: + - python >= 2.7 + - openshift >= 0.8.2 +notes: + - "In order to use this module you have to install Openshift Python SDK. + To ensure it's installed with correct version you can create the following task: + I(pip: name=openshift>=0.8.2)" +''' diff --git a/plugins/doc_fragments/kubevirt_vm_options.py b/plugins/doc_fragments/kubevirt_vm_options.py new file mode 100644 index 0000000000..c1f5b69fb6 --- /dev/null +++ b/plugins/doc_fragments/kubevirt_vm_options.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, KubeVirt Team <@kubevirt> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard oVirt documentation fragment + DOCUMENTATION = r''' +options: + disks: + description: + - List of dictionaries which specify disks of the virtual machine. + - "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)." + - "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)" + - Each disk must have specified a I(volume) that declares which volume type of the disk + All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume). + type: list + labels: + description: + - Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to + specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly + imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines. + Labels can be attached to virtual machines at creation time and subsequently added and modified at any time. + - More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels) + type: dict + interfaces: + description: + - An interface defines a virtual network interface of a virtual machine (also called a frontend). + - All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface) + - Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend). + All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network). + type: list + cloud_init_nocloud: + description: + - "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added + as a disk to the virtual machine. A proper cloud-init installation is required inside the guest. + More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)" + type: dict + affinity: + description: + - "Describes node affinity scheduling rules for the vm." + type: dict + suboptions: + soft: + description: + - "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a + node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for + each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute + a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding + C(term); the nodes with the highest sum are the most preferred." + type: dict + hard: + description: + - "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If + the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the + system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to + each C(term) are intersected, i.e. all terms must be satisfied." + type: dict + node_affinity: + description: + - "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms" + type: dict + suboptions: + soft: + description: + - "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding + match_expressions; the nodes with the highest sum are the most preferred." + type: dict + hard: + description: + - "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If + the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system + may or may not try to eventually evict the vm from its node." + type: dict + anti_affinity: + description: + - "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms." + type: dict + suboptions: + soft: + description: + - "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may + choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches + the corresponding C(term); the nodes with the highest sum are the most preferred." + type: dict + hard: + description: + - "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label + update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes + corresponding to each C(term) are intersected, i.e. all terms must be satisfied." + type: dict +''' diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py new file mode 100644 index 0000000000..a2c04962d5 --- /dev/null +++ b/plugins/doc_fragments/ldap.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Peter Sagerson +# Copyright: (c) 2016, Jiri Tyr +# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Standard LDAP documentation fragment + DOCUMENTATION = r''' +options: + bind_dn: + description: + - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism. + - If this is blank, we'll use an anonymous bind. + type: str + bind_pw: + description: + - The password to use with I(bind_dn). + type: str + dn: + required: true + description: + - The DN of the entry to add or remove. + type: str + server_uri: + description: + - A URI to the LDAP server. + - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location. + type: str + default: ldapi:/// + start_tls: + description: + - If true, we'll use the START_TLS LDAP extension. + type: bool + default: no + validate_certs: + description: + - If set to C(no), SSL certificates will not be validated. + - This should only be used on sites using self-signed certificates. + type: bool + default: yes +''' diff --git a/plugins/doc_fragments/lxca_common.py b/plugins/doc_fragments/lxca_common.py new file mode 100644 index 0000000000..7f9eddd1f3 --- /dev/null +++ b/plugins/doc_fragments/lxca_common.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their +# own license to the complete work. +# +# Copyright (C) 2017 Lenovo, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# + + +class ModuleDocFragment(object): + # Standard Pylxca documentation fragment + DOCUMENTATION = r''' +author: + - Naval Patel (@navalkp) + - Prashant Bhosale (@prabhosa) + +options: + login_user: + description: + - The username for use in HTTP basic authentication. + type: str + required: true + + login_password: + description: + - The password for use in HTTP basic authentication. + type: str + required: true + + auth_url: + description: + - lxca https full web address + type: str + required: true + +requirements: + - pylxca + +notes: + - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca) + - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca) + - Check mode is not supported. +''' diff --git a/plugins/doc_fragments/manageiq.py b/plugins/doc_fragments/manageiq.py new file mode 100644 index 0000000000..c4b2360347 --- /dev/null +++ b/plugins/doc_fragments/manageiq.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Daniel Korn +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard ManageIQ documentation fragment + DOCUMENTATION = r''' +options: + manageiq_connection: + description: + - ManageIQ connection configuration information. + required: true + type: dict + suboptions: + url: + description: + - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it. + type: str + required: true + username: + description: + - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in. + type: str + password: + description: + - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in. + type: str + token: + description: + - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in. + type: str + validate_certs: + description: + - Whether SSL certificates should be verified for HTTPS requests. defaults to True. + type: bool + default: yes + aliases: [ verify_ssl ] + ca_cert: + description: + - The path to a CA bundle file or directory with certificates. defaults to None. + type: path + aliases: [ ca_bundle_path ] + +requirements: + - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)' +''' diff --git a/plugins/doc_fragments/mysql.py b/plugins/doc_fragments/mysql.py new file mode 100644 index 0000000000..93c8d3475f --- /dev/null +++ b/plugins/doc_fragments/mysql.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Jonathan Mainguy +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard mysql documentation fragment + DOCUMENTATION = r''' +options: + login_user: + description: + - The username used to authenticate with. + type: str + login_password: + description: + - The password used to authenticate with. + type: str + login_host: + description: + - Host running the database. + - In some cases for local connections the I(login_unix_socket=/path/to/mysqld/socket), + that is usually C(/var/run/mysqld/mysqld.sock), needs to be used instead of I(login_host=localhost). + type: str + default: localhost + login_port: + description: + - Port of the MySQL server. Requires I(login_host) be defined as other than localhost if login_port is used. + type: int + default: 3306 + login_unix_socket: + description: + - The path to a Unix domain socket for local connections. + type: str + connect_timeout: + description: + - The connection timeout when connecting to the MySQL server. + type: int + default: 30 + config_file: + description: + - Specify a config file from which user and password are to be read. + type: path + default: '~/.my.cnf' + ca_cert: + description: + - The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate + as used by the server. + type: path + aliases: [ ssl_ca ] + client_cert: + description: + - The path to a client public key certificate. + type: path + aliases: [ ssl_cert ] + client_key: + description: + - The path to the client private key. + type: path + aliases: [ ssl_key ] +requirements: + - PyMySQL (Python 2.7 and Python 3.X), or + - MySQLdb (Python 2.x) +notes: + - Requires the PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) package on the remote host. + The Python package may be installed with apt-get install python-pymysql (Ubuntu; see M(apt)) or + yum install python2-PyMySQL (RHEL/CentOS/Fedora; see M(yum)). You can also use dnf install python2-PyMySQL + for newer versions of Fedora; see M(dnf). + - Both C(login_password) and C(login_user) are required when you are + passing credentials. If none are present, the module will attempt to read + the credentials from C(~/.my.cnf), and finally fall back to using the MySQL + default login of 'root' with no password. + - If there are problems with local connections, using I(login_unix_socket=/path/to/mysqld/socket) + instead of I(login_host=localhost) might help. As an example, the default MariaDB installation of version 10.4 + and later uses the unix_socket authentication plugin by default that + without using I(login_unix_socket=/var/run/mysqld/mysqld.sock) (the default path) + causes the error ``Host '127.0.0.1' is not allowed to connect to this MariaDB server``. +''' diff --git a/plugins/doc_fragments/netscaler.py b/plugins/doc_fragments/netscaler.py new file mode 100644 index 0000000000..98d464e36f --- /dev/null +++ b/plugins/doc_fragments/netscaler.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = r''' + +options: + nsip: + description: + - The ip address of the netscaler appliance where the nitro API calls will be made. + - "The port can be specified with the colon (:). E.g. 192.168.1.1:555." + type: str + required: True + + nitro_user: + description: + - The username with which to authenticate to the netscaler node. + type: str + required: True + + nitro_pass: + description: + - The password with which to authenticate to the netscaler node. + type: str + required: True + + nitro_protocol: + description: + - Which protocol to use when accessing the nitro API objects. + type: str + choices: [ http, https ] + default: http + + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + type: bool + default: yes + + nitro_timeout: + description: + - Time in seconds until a timeout error is thrown when establishing a new session with Netscaler + type: float + default: 310 + + state: + description: + - The state of the resource being configured by the module on the netscaler node. + - When present the resource will be created if needed and configured according to the module's parameters. + - When absent the resource will be deleted from the netscaler node. + type: str + choices: [ absent, present ] + default: present + + save_config: + description: + - If C(yes) the module will save the configuration on the netscaler node if it makes any changes. + - The module will not save the configuration on the netscaler node if it made no changes. + type: bool + default: yes +notes: + - For more information on using Ansible to manage Citrix NetScaler Network devices see U(https://www.ansible.com/ansible-netscaler). +''' diff --git a/plugins/doc_fragments/nios.py b/plugins/doc_fragments/nios.py new file mode 100644 index 0000000000..d2c4507ff5 --- /dev/null +++ b/plugins/doc_fragments/nios.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Peter Sprygada +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + instance of NIOS WAPI over REST + - Value can also be specified using C(INFOBLOX_HOST) environment + variable. + type: str + required: true + username: + description: + - Configures the username to use to authenticate the connection to + the remote instance of NIOS. + - Value can also be specified using C(INFOBLOX_USERNAME) environment + variable. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote instance of NIOS. + - Value can also be specified using C(INFOBLOX_PASSWORD) environment + variable. + type: str + validate_certs: + description: + - Boolean value to enable or disable verifying SSL certificates + - Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment + variable. + type: bool + default: no + aliases: [ ssl_verify ] + http_request_timeout: + description: + - The amount of time before to wait before receiving a response + - Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment + variable. + type: int + default: 10 + max_retries: + description: + - Configures the number of attempted retries before the connection + is declared usable + - Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment + variable. + type: int + default: 3 + wapi_version: + description: + - Specifies the version of WAPI to use + - Value can also be specified using C(INFOBLOX_WAP_VERSION) environment + variable. + - Until ansible 2.8 the default WAPI was 1.4 + type: str + default: '2.1' + max_results: + description: + - Specifies the maximum number of objects to be returned, + if set to a negative number the appliance will return an error when the + number of returned objects would exceed the setting. + - Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment + variable. + type: int + default: 1000 +notes: + - "This module must be run locally, which can be achieved by specifying C(connection: local)." + - Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible. + +''' diff --git a/plugins/doc_fragments/nso.py b/plugins/doc_fragments/nso.py new file mode 100644 index 0000000000..47e9acf2db --- /dev/null +++ b/plugins/doc_fragments/nso.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Cisco and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + url: + description: NSO JSON-RPC URL, http://localhost:8080/jsonrpc + type: str + required: true + username: + description: NSO username + type: str + required: true + password: + description: NSO password + type: str + required: true + timeout: + description: JSON-RPC request timeout in seconds + type: int + default: 300 + validate_certs: + description: When set to true, validates the SSL certificate of NSO when + using SSL + type: bool + required: false + default: false +''' diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py new file mode 100644 index 0000000000..13802d136b --- /dev/null +++ b/plugins/doc_fragments/oneview.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # OneView doc fragment + DOCUMENTATION = r''' +options: + config: + description: + - Path to a .json configuration file containing the OneView client configuration. + The configuration file is optional and when used should be present in the host running the ansible commands. + If the file path is not provided, the configuration will be loaded from environment variables. + For links to example configuration files or how to use the environment variables verify the notes section. + type: path + +requirements: + - python >= 2.7.9 + +notes: + - "A sample configuration file for the config parameter can be found at: + U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)" + - "Check how to use environment variables for configuration at: + U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)" + - "Additional Playbooks for the HPE OneView Ansible modules can be found at: + U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)" + - "The OneView API version used will directly affect returned and expected fields in resources. + Information on setting the desired API version and can be found at: + U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)" + ''' + + VALIDATEETAG = r''' +options: + validate_etag: + description: + - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag + for the resource matches the ETag provided in the data. + type: bool + default: yes +''' + + FACTSPARAMS = r''' +options: + params: + description: + - List of params to delimit, filter and sort the list of resources. + - "params allowed: + - C(start): The first item to return, using 0-based indexing. + - C(count): The number of resources to return. + - C(filter): A general filter/query string to narrow the list of items returned. + - C(sort): The sort order of the returned data set." + type: dict +''' diff --git a/plugins/doc_fragments/online.py b/plugins/doc_fragments/online.py new file mode 100644 index 0000000000..08a11da356 --- /dev/null +++ b/plugins/doc_fragments/online.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r''' +options: + api_token: + description: + - Online OAuth token. + type: str + aliases: [ oauth_token ] + api_url: + description: + - Online API URL + type: str + default: 'https://api.online.net' + aliases: [ base_url ] + api_timeout: + description: + - HTTP timeout to Online API in seconds. + type: int + default: 30 + aliases: [ timeout ] + validate_certs: + description: + - Validate SSL certs of the Online API. + type: bool + default: yes +notes: + - Also see the API documentation on U(https://console.online.net/en/api/) + - If C(api_token) is not set within the module, the following + environment variables can be used in decreasing order of precedence + C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN) + - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL) + environment variable. +''' diff --git a/plugins/doc_fragments/onyx.py b/plugins/doc_fragments/onyx.py new file mode 100644 index 0000000000..86bc543530 --- /dev/null +++ b/plugins/doc_fragments/onyx.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + type: path + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: no + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. + type: str +''' diff --git a/plugins/doc_fragments/opennebula.py b/plugins/doc_fragments/opennebula.py new file mode 100644 index 0000000000..b1ac91fa4a --- /dev/null +++ b/plugins/doc_fragments/opennebula.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, www.privaz.io Valletech AB +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # OpenNebula common documentation + DOCUMENTATION = r''' +options: + api_url: + description: + - The ENDPOINT URL of the XMLRPC server. + - If not specified then the value of the ONE_URL environment variable, if any, is used. + type: str + aliases: + - api_endpoint + api_username: + description: + - The name of the user for XMLRPC authentication. + - If not specified then the value of the ONE_USERNAME environment variable, if any, is used. + type: str + api_password: + description: + - The password or token for XMLRPC authentication. + - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used. + type: str + aliases: + - api_token + validate_certs: + description: + - Whether to validate the SSL certificates or not. + - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used. + type: bool + default: yes + wait_timeout: + description: + - Time to wait for the desired state to be reached before timeout, in seconds. + type: int + default: 300 +''' diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py new file mode 100644 index 0000000000..b4eac582dc --- /dev/null +++ b/plugins/doc_fragments/openswitch.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Peter Sprygada +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. Note this argument + does not affect the SSH argument. + type: str + port: + description: + - Specifies the port to use when building the connection to the remote + device. This value applies to either I(cli) or I(rest). The port + value will default to the appropriate transport common port if + none is provided in the task. (cli=22, http=80, https=443). Note + this argument does not affect the SSH transport. + type: int + default: 0 (use common port) + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + either the CLI login or the eAPI authentication depending on which + transport is used. Note this argument does not affect the SSH + transport. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This is a common argument used for either I(cli) + or I(rest) transports. Note this argument does not affect the SSH + transport. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This argument is only used for the I(cli) + transports. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path + transport: + description: + - Configures the transport connection to use when connecting to the + remote device. The transport argument supports connectivity to the + device over ssh, cli or REST. + required: true + type: str + choices: [ cli, rest, ssh ] + default: ssh + use_ssl: + description: + - Configures the I(transport) to use SSL if set to C(yes) only when the + I(transport) argument is configured as rest. If the transport + argument is not I(rest), this value is ignored. + type: bool + default: yes + provider: + description: + - Convenience method that allows all I(openswitch) arguments to be passed as + a dict object. All constraints (required, choices, etc) must be + met either by individual arguments or values in this dict. + type: dict +''' diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py new file mode 100644 index 0000000000..f894cae3ed --- /dev/null +++ b/plugins/doc_fragments/oracle.py @@ -0,0 +1,79 @@ +# Copyright (c) 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = """ + requirements: + - "python >= 2.7" + - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) + notes: + - For OCI python sdk configuration, please refer to + U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html) + options: + config_file_location: + description: + - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable, + if any, is used. Otherwise, defaults to ~/.oci/config. + type: str + config_profile_name: + description: + - The profile to load from the config file referenced by C(config_file_location). If not set, then the + value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the + "DEFAULT" profile in C(config_file_location). + default: "DEFAULT" + type: str + api_user: + description: + - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the + value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user + is not specified through a configuration file (See C(config_file_location)). To get the user's OCID, + please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_fingerprint: + description: + - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT + environment variable, if any, is used. This option is required if the key fingerprint is not + specified through a configuration file (See C(config_file_location)). To get the key pair's + fingerprint value please refer + U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_key_file: + description: + - Full path and filename of the private key (in PEM format). If not set, then the value of the + OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is + not specified through a configuration file (See C(config_file_location)). If the key is encrypted + with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided. + type: str + api_user_key_pass_phrase: + description: + - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then + the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the + key passphrase is not specified through a configuration file (See C(config_file_location)). + type: str + auth_type: + description: + - The type of authentication to use for making API requests. By default C(auth_type="api_key") based + authentication is performed and the API key (see I(api_user_key_file)) in your config file will be + used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE, + if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication + when running ansible playbooks within an OCI compute instance. + choices: ['api_key', 'instance_principal'] + default: 'api_key' + type: str + tenancy: + description: + - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is + used. This option is required if the tenancy OCID is not specified through a configuration file + (See C(config_file_location)). To get the tenancy OCID, please refer + U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm) + type: str + region: + description: + - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the + value of the OCI_REGION variable, if any, is used. This option is required if the region is + not specified through a configuration file (See C(config_file_location)). Please refer to + U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information + on OCI regions. + type: str + """ diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py new file mode 100644 index 0000000000..db7fe378ed --- /dev/null +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -0,0 +1,20 @@ +# Copyright (c) 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = """ + options: + force_create: + description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an + idempotent operation, and doesn't create the resource if it already exists. Setting this option + to true, forcefully creates a copy of the resource, even if it already exists.This option is + mutually exclusive with I(key_by). + default: False + type: bool + key_by: + description: The list of comma-separated attributes of this resource which should be used to uniquely + identify an instance of the resource. By default, all the attributes of a resource except + I(freeform_tags) are used to uniquely identify a resource. + type: list + """ diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py new file mode 100644 index 0000000000..029221c566 --- /dev/null +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -0,0 +1,12 @@ +# Copyright (c) 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = """ + options: + display_name: + description: Use I(display_name) along with the other options to return only resources that match the given + display name exactly. + type: str + """ diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py new file mode 100644 index 0000000000..302ba12ce1 --- /dev/null +++ b/plugins/doc_fragments/oracle_name_option.py @@ -0,0 +1,12 @@ +# Copyright (c) 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = """ + options: + name: + description: Use I(name) along with the other options to return only resources that match the given name + exactly. + type: str + """ diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py new file mode 100644 index 0000000000..d85ed60b28 --- /dev/null +++ b/plugins/doc_fragments/oracle_tags.py @@ -0,0 +1,18 @@ +# Copyright (c) 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = """ + options: + defined_tags: + description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more + information, see + U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict + freeform_tags: + description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, + type, or namespace. For more information, see + U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict + """ diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py new file mode 100644 index 0000000000..394a7cd530 --- /dev/null +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -0,0 +1,22 @@ +# Copyright (c) 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = """ + options: + wait: + description: Whether to wait for create or delete operation to complete. + default: yes + type: bool + wait_timeout: + description: Time, in seconds, to wait when I(wait=yes). + default: 1200 + type: int + wait_until: + description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default, + when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ + RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/ + TERMINATED lifecycle state during delete operation. + type: str + """ diff --git a/plugins/doc_fragments/ovirt_facts.py b/plugins/doc_fragments/ovirt_facts.py new file mode 100644 index 0000000000..713077eda7 --- /dev/null +++ b/plugins/doc_fragments/ovirt_facts.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # info standard oVirt documentation fragment + DOCUMENTATION = r''' +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_info) instead +options: + fetch_nested: + description: + - If I(yes) the module will fetch additional data from the API. + - It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes. + Only the attributes of the current entity. User can configure to fetch other + attributes of the nested entities by specifying C(nested_attributes). + type: bool + nested_attributes: + description: + - Specifies list of the attributes which should be fetched from the API. + - This parameter apply only when C(fetch_nested) is I(true). + type: list + auth: + description: + - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:" + - C(username)[I(required)] - The name of the user, something like I(admin@internal). + Default value is set by I(OVIRT_USERNAME) environment variable. + - "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable." + - "C(url)- A string containing the API URL of the server, usually + something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable. + Either C(url) or C(hostname) is required." + - "C(hostname) - A string containing the hostname of the server, usually + something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable. + Either C(url) or C(hostname) is required." + - "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable." + - "C(insecure) - A boolean flag that indicates if the server TLS + certificate and host name should be checked." + - "C(ca_file) - A PEM file containing the trusted CA certificates. The + certificate presented by the server will be verified using these CA + certificates. If `C(ca_file)` parameter is not set, system wide + CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable." + - "C(kerberos) - A boolean flag indicating if Kerberos authentication + should be used instead of the default basic authentication." + - "C(headers) - Dictionary of HTTP headers to be added to each API call." + type: dict + required: true +requirements: + - python >= 2.7 + - ovirt-engine-sdk-python >= 4.3.0 +notes: + - "In order to use this module you have to install oVirt Python SDK. + To ensure it's installed with correct version you can create the following task: + pip: name=ovirt-engine-sdk-python version=4.3.0" +''' diff --git a/plugins/doc_fragments/panos.py b/plugins/doc_fragments/panos.py new file mode 100644 index 0000000000..857f02d677 --- /dev/null +++ b/plugins/doc_fragments/panos.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, techbizdev +# Copyright: (c) 2018, Kevin Breit (@kbreit) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device. + type: str + required: true + password: + description: + - Password for authentication. + type: str + required: true + username: + description: + - Username for authentication. + type: str + default: admin +''' + + PROVIDER = r''' +options: + provider: + description: + - A dict object containing connection details. + version_added: '2.8' + required: true + suboptions: + ip_address: + description: + - The IP address or hostname of the PAN-OS device being configured. + type: str + required: true + username: + description: + - The username to use for authentication. This is ignored if + I(api_key) is specified. + type: str + default: 'admin' + password: + description: + - The password to use for authentication. This is ignored if + I(api_key) is specified. + type: str + api_key: + description: + - The API key to use instead of generating it using + I(username) / I(password). + type: str + port: + description: + - The port number to connect to the PAN-OS device on. + type: int + default: 443 + serial_number: + description: + - The serial number of a firewall to use for targeted commands. + If I(ip_address) is not a Panorama PAN-OS device, then + this param is ignored. + type: str +''' + + TRANSITIONAL_PROVIDER = r''' +options: + provider: + description: + - A dict object containing connection details. + version_added: '2.8' + suboptions: + ip_address: + description: + - The IP address or hostname of the PAN-OS device being configured. + type: str + username: + description: + - The username to use for authentication. This is ignored if + I(api_key) is specified. + type: str + default: 'admin' + password: + description: + - The password to use for authentication. This is ignored if + I(api_key) is specified. + type: str + api_key: + description: + - The API key to use instead of generating it using + I(username) / I(password). + type: str + port: + description: + - The port number to connect to the PAN-OS device on. + type: int + default: 443 + serial_number: + description: + - The serial number of a firewall to use for targeted commands. + If I(ip_address) is not a Panorama PAN-OS device, then + this param is ignored. + type: str + ip_address: + description: + - B(Deprecated) + - Use I(provider) to specify PAN-OS connectivity instead. + - HORIZONTALLINE + - The IP address or hostname of the PAN-OS device being configured. + type: str + username: + description: + - B(Deprecated) + - Use I(provider) to specify PAN-OS connectivity instead. + - HORIZONTALLINE + - The username to use for authentication. This is ignored if + I(api_key) is specified. + type: str + default: 'admin' + password: + description: + - B(Deprecated) + - Use I(provider) to specify PAN-OS connectivity instead. + - HORIZONTALLINE + - The password to use for authentication. This is ignored if + I(api_key) is specified. + type: str + api_key: + description: + - B(Deprecated) + - Use I(provider) to specify PAN-OS connectivity instead. + - HORIZONTALLINE + - The API key to use instead of generating it using + I(username) / I(password). + type: str + port: + description: + - B(Deprecated) + - Use I(provider) to specify PAN-OS connectivity instead. + - HORIZONTALLINE + - The port number to connect to the PAN-OS device on. + type: int + default: 443 +notes: + - PAN-OS connectivity should be specified using I(provider) or the + classic PAN-OS connectivity params (I(ip_address), I(username), + I(password), I(api_key), and I(port)). If both are present, then the + classic params are ignored. +''' + + STATE = r''' +options: + state: + description: + - The state. + type: str + default: present + choices: + - present + - absent +''' + + RULEBASE = r''' +options: + rulebase: + description: + - The rulebase in which the rule is to exist. If left unspecified, + this defaults to I(rulebase=pre-rulebase) for Panorama. For + NGFW, this is always set to be I(rulebase=rulebase). + type: str + choices: + - pre-rulebase + - rulebase + - post-rulebase +''' + + VSYS_DG = r''' +options: + vsys_dg: + description: + - The vsys (for NGFW) or device group (for Panorama) this + operation should target. If left unspecified, this defaults to + I(vsys_dg=vsys1) for NGFW or I(vsys_dg=shared) for Panorama. + type: str +''' + + DEVICE_GROUP = r''' +options: + device_group: + description: + - (Panorama only) The device group the operation should target. + type: str + default: shared +''' + + VSYS_IMPORT = r''' +options: + vsys: + description: + - The vsys this object should be imported into. Objects that are + imported include interfaces, virtual routers, virtual wires, and + VLANs. Interfaces are typically imported into vsys1 if no vsys + is specified. + type: str +''' + + VSYS = r''' +options: + vsys: + description: + - The vsys this object belongs to. + type: str + default: vsys1 +''' + + TEMPLATE_ONLY = r''' +options: + template: + description: + - (Panorama only) The template this operation should target. This + param is required if the PAN-OS device is Panorama. + type: str +''' + + FULL_TEMPLATE_SUPPORT = r''' +options: + template: + description: + - (Panorama only) The template this operation should target. + Mutually exclusive with I(template_stack). + type: str + template_stack: + description: + - (Panorama only) The template stack this operation should target. + Mutually exclusive with I(template). + type: str +notes: + - If the PAN-OS to be configured is Panorama, either I(template) or + I(template_stack) must be specified. +''' diff --git a/plugins/doc_fragments/postgres.py b/plugins/doc_fragments/postgres.py new file mode 100644 index 0000000000..f093f54209 --- /dev/null +++ b/plugins/doc_fragments/postgres.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Postgres documentation fragment + DOCUMENTATION = r''' +options: + login_user: + description: + - The username used to authenticate with. + type: str + default: postgres + login_password: + description: + - The password used to authenticate with. + type: str + login_host: + description: + - Host running the database. + type: str + login_unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + port: + description: + - Database port to connect to. + type: int + default: 5432 + aliases: [ login_port ] + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] +notes: +- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host. +- To avoid "Peer authentication failed for user postgres" error, + use postgres user as a I(become_user). +- This module uses psycopg2, a Python PostgreSQL database adapter. You must + ensure that psycopg2 is installed on the host before using this module. +- If the remote host is the PostgreSQL server (which is the default case), then + PostgreSQL must also be installed on the remote host. +- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages + on the remote host before using this module. +- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3. +requirements: [ psycopg2 ] +''' diff --git a/plugins/doc_fragments/proxysql.py b/plugins/doc_fragments/proxysql.py new file mode 100644 index 0000000000..cd67e8b160 --- /dev/null +++ b/plugins/doc_fragments/proxysql.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt + + +class ModuleDocFragment(object): + + # Documentation fragment for ProxySQL connectivity + CONNECTIVITY = r''' +options: + login_user: + description: + - The username used to authenticate to ProxySQL admin interface. + type: str + login_password: + description: + - The password used to authenticate to ProxySQL admin interface. + type: str + login_host: + description: + - The host used to connect to ProxySQL admin interface. + type: str + default: '127.0.0.1' + login_port: + description: + - The port used to connect to ProxySQL admin interface. + type: int + default: 6032 + config_file: + description: + - Specify a config file from which I(login_user) and I(login_password) + are to be read. + type: path + default: '' +requirements: + - PyMySQL (Python 2.7 and Python 3.X), or + - MySQLdb (Python 2.x) +''' + + # Documentation fragment for managing ProxySQL configuration + MANAGING_CONFIG = r''' +options: + save_to_disk: + description: + - Save config to sqlite db on disk to persist the configuration. + type: bool + default: 'yes' + load_to_runtime: + description: + - Dynamically load config to runtime memory. + type: bool + default: 'yes' +''' diff --git a/plugins/doc_fragments/purestorage.py b/plugins/doc_fragments/purestorage.py new file mode 100644 index 0000000000..53fe0f841e --- /dev/null +++ b/plugins/doc_fragments/purestorage.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Simon Dodsley +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard Pure Storage documentation fragment + DOCUMENTATION = r''' +options: + - See separate platform section for more details +requirements: + - See separate platform section for more details +notes: + - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade +''' + + # Documentation fragment for FlashBlade + FB = r''' +options: + fb_url: + description: + - FlashBlade management IP address or Hostname. + type: str + api_token: + description: + - FlashBlade API token for admin privileged user. + type: str +notes: + - This module requires the C(purity_fb) Python library + - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables + if I(fb_url) and I(api_token) arguments are not passed to the module directly +requirements: + - python >= 2.7 + - purity_fb >= 1.1 +''' + + # Documentation fragment for FlashArray + FA = r''' +options: + fa_url: + description: + - FlashArray management IPv4 address or Hostname. + type: str + required: true + api_token: + description: + - FlashArray API token for admin privileged user. + type: str + required: true +notes: + - This module requires the C(purestorage) Python library + - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables + if I(fa_url) and I(api_token) arguments are not passed to the module directly +requirements: + - python >= 2.7 + - purestorage +''' diff --git a/plugins/doc_fragments/rabbitmq.py b/plugins/doc_fragments/rabbitmq.py new file mode 100644 index 0000000000..e26cd98924 --- /dev/null +++ b/plugins/doc_fragments/rabbitmq.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Jorge Rodriguez +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Parameters for RabbitMQ modules + DOCUMENTATION = r''' +options: + login_user: + description: + - RabbitMQ user for connection. + type: str + default: guest + login_password: + description: + - RabbitMQ password for connection. + type: str + login_host: + description: + - RabbitMQ host for connection. + type: str + default: localhost + login_port: + description: + - RabbitMQ management API port. + type: str + default: '15672' + login_protocol: + description: + - RabbitMQ management API protocol. + type: str + choices: [ http , https ] + default: http + ca_cert: + description: + - CA certificate to verify SSL connection to management API. + type: path + aliases: [ cacert ] + client_cert: + description: + - Client certificate to send on SSL connections to management API. + type: path + aliases: [ cert ] + client_key: + description: + - Private key matching the client certificate. + type: path + aliases: [ key ] + vhost: + description: + - RabbitMQ virtual host. + type: str + default: "/" +''' diff --git a/plugins/doc_fragments/rackspace.py b/plugins/doc_fragments/rackspace.py new file mode 100644 index 0000000000..403f7359be --- /dev/null +++ b/plugins/doc_fragments/rackspace.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard Rackspace only documentation fragment + DOCUMENTATION = r''' +options: + api_key: + description: + - Rackspace API key, overrides I(credentials). + type: str + aliases: [ password ] + credentials: + description: + - File to find the Rackspace credentials in. Ignored if I(api_key) and + I(username) are provided. + type: path + aliases: [ creds_file ] + env: + description: + - Environment as configured in I(~/.pyrax.cfg), + see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). + type: str + region: + description: + - Region to create an instance in. + type: str + default: DFW + username: + description: + - Rackspace username, overrides I(credentials). + type: str + validate_certs: + description: + - Whether or not to require SSL validation of API endpoints. + type: bool + aliases: [ verify_ssl ] +requirements: + - python >= 2.6 + - pyrax +notes: + - The following environment variables can be used, C(RAX_USERNAME), + C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). + - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file + appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) + - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file + - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +''' + + # Documentation fragment including attributes to enable communication + # of other OpenStack clouds. Not all rax modules support this. + OPENSTACK = r''' +options: + api_key: + description: + - Rackspace API key, overrides I(credentials). + aliases: [ password ] + auth_endpoint: + description: + - The URI of the authentication service. + default: https://identity.api.rackspacecloud.com/v2.0/ + version_added: '1.5' + credentials: + description: + - File to find the Rackspace credentials in. Ignored if I(api_key) and + I(username) are provided. + aliases: [ creds_file ] + env: + description: + - Environment as configured in I(~/.pyrax.cfg), + see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). + version_added: '1.5' + identity_type: + description: + - Authentication mechanism to use, such as rackspace or keystone. + default: rackspace + version_added: '1.5' + region: + description: + - Region to create an instance in. + default: DFW + tenant_id: + description: + - The tenant ID used for authentication. + version_added: '1.5' + tenant_name: + description: + - The tenant name used for authentication. + version_added: '1.5' + username: + description: + - Rackspace username, overrides I(credentials). + validate_certs: + description: + - Whether or not to require SSL validation of API endpoints. + version_added: '1.5' + type: bool + aliases: [ verify_ssl ] +requirements: + - python >= 2.6 + - pyrax +notes: + - The following environment variables can be used, C(RAX_USERNAME), + C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). + - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file + appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) + - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file + - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +''' diff --git a/plugins/doc_fragments/scaleway.py b/plugins/doc_fragments/scaleway.py new file mode 100644 index 0000000000..844f14a209 --- /dev/null +++ b/plugins/doc_fragments/scaleway.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r''' +options: + api_token: + description: + - Scaleway OAuth token. + type: str + aliases: [ oauth_token ] + api_url: + description: + - Scaleway API URL. + type: str + default: https://api.scaleway.com + aliases: [ base_url ] + api_timeout: + description: + - HTTP timeout to Scaleway API in seconds. + type: int + default: 30 + aliases: [ timeout ] + query_parameters: + description: + - List of parameters passed to the query string. + type: dict + default: {} + validate_certs: + description: + - Validate SSL certs of the Scaleway API. + type: bool + default: yes +notes: + - Also see the API documentation on U(https://developer.scaleway.com/) + - If C(api_token) is not set within the module, the following + environment variables can be used in decreasing order of precedence + C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN). + - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL) + environment variable. +''' diff --git a/plugins/doc_fragments/sros.py b/plugins/doc_fragments/sros.py new file mode 100644 index 0000000000..be66404368 --- /dev/null +++ b/plugins/doc_fragments/sros.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Peter Sprygada +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote + device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + type: path +notes: + - For more information on using Ansible to manage Nokia SR OS Network devices see U(https://www.ansible.com/ansible-nokia). +''' diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py new file mode 100644 index 0000000000..690fb5232e --- /dev/null +++ b/plugins/doc_fragments/utm.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + DOCUMENTATION = r''' +options: + headers: + description: + - A dictionary of additional headers to be sent to POST and PUT requests. + - Is needed for some modules + type: dict + required: false + utm_host: + description: + - The REST Endpoint of the Sophos UTM. + type: str + required: true + utm_port: + description: + - The port of the REST interface. + type: int + default: 4444 + utm_token: + description: + - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\ + PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2." + type: str + required: true + utm_protocol: + description: + - The protocol of the REST Endpoint. + choices: [ http, https ] + type: str + default: https + validate_certs: + description: + - Whether the REST interface's ssl certificate should be verified or not. + type: bool + default: yes + state: + description: + - The desired state of the object. + - C(present) will create or update an object + - C(absent) will delete an object if it was present + type: str + choices: [ absent, present ] + default: present +''' diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py new file mode 100644 index 0000000000..066a815d3e --- /dev/null +++ b/plugins/doc_fragments/vexata.py @@ -0,0 +1,49 @@ +# +# Copyright: (c) 2019, Sandeep Kasargod +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - Ansible modules are available for Vexata VX100 arrays. +''' + + # Documentation fragment for Vexata VX100 series + VX100 = r''' +options: + array: + description: + - Vexata VX100 array hostname or IPv4 Address. + required: true + type: str + user: + description: + - Vexata API user with administrative privileges. + required: false + type: str + password: + description: + - Vexata API user password. + required: false + type: str + validate_certs: + description: + - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. + - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine. + required: false + type: bool + default: 'no' + +requirements: + - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array + - vexatapi >= 0.0.1 + - python >= 2.7 + - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if + user and password arguments are not passed to the module directly. +''' diff --git a/plugins/doc_fragments/vultr.py b/plugins/doc_fragments/vultr.py new file mode 100644 index 0000000000..61d36aed20 --- /dev/null +++ b/plugins/doc_fragments/vultr.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r''' +options: + api_key: + description: + - API key of the Vultr API. + - The ENV variable C(VULTR_API_KEY) is used as default, when defined. + type: str + api_timeout: + description: + - HTTP timeout to Vultr API. + - The ENV variable C(VULTR_API_TIMEOUT) is used as default, when defined. + - Fallback value is 60 seconds if not specified. + type: int + api_retries: + description: + - Amount of retries in case of the Vultr API retuns an HTTP 503 code. + - The ENV variable C(VULTR_API_RETRIES) is used as default, when defined. + - Fallback value is 5 retries if not specified. + type: int + api_retry_max_delay: + description: + - Retry backoff delay in seconds is exponential up to this max. value, in seconds. + - The ENV variable C(VULTR_API_RETRY_MAX_DELAY) is used as default, when defined. + - Fallback value is 12 seconds. + type: int + api_account: + description: + - Name of the ini section in the C(vultr.ini) file. + - The ENV variable C(VULTR_API_ACCOUNT) is used as default, when defined. + type: str + default: default + api_endpoint: + description: + - URL to API endpint (without trailing slash). + - The ENV variable C(VULTR_API_ENDPOINT) is used as default, when defined. + - Fallback value is U(https://api.vultr.com) if not specified. + type: str + validate_certs: + description: + - Validate SSL certs of the Vultr API. + type: bool + default: yes +requirements: + - python >= 2.6 +notes: + - Also see the API documentation on https://www.vultr.com/api/. +''' diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py new file mode 100644 index 0000000000..5be67da4c3 --- /dev/null +++ b/plugins/doc_fragments/xenserver.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + # Common parameters for XenServer modules + DOCUMENTATION = r''' +options: + hostname: + description: + - The hostname or IP address of the XenServer host or XenServer pool master. + - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead. + type: str + default: localhost + aliases: [ host, pool ] + username: + description: + - The username to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead. + type: str + default: root + aliases: [ admin, user ] + password: + description: + - The password to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead. + type: str + aliases: [ pass, pwd ] + validate_certs: + description: + - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. + - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead. + type: bool + default: yes +''' diff --git a/plugins/doc_fragments/zabbix.py b/plugins/doc_fragments/zabbix.py new file mode 100644 index 0000000000..83f5c18efc --- /dev/null +++ b/plugins/doc_fragments/zabbix.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r''' +options: + server_url: + description: + - URL of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + type: str + aliases: [ url ] + login_user: + description: + - Zabbix user name. + type: str + required: true + login_password: + description: + - Zabbix user password. + type: str + required: true + http_login_user: + description: + - Basic Auth login + type: str + http_login_password: + description: + - Basic Auth password + type: str + timeout: + description: + - The timeout of API request (seconds). + type: int + default: 10 + validate_certs: + description: + - If set to False, SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + type: bool + default: yes +notes: + - If you use I(login_password=zabbix), the word "zabbix" is replaced by "********" in all module output, because I(login_password) uses C(no_log). + See L(this FAQ,https://docs.ansible.com/ansible/latest/network/user_guide/faq.html#why-is-my-output-sometimes-replaced-with) for more information. +''' diff --git a/plugins/filter/__init__.py b/plugins/filter/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/filter/gcp_kms_filters.py b/plugins/filter/gcp_kms_filters.py new file mode 100644 index 0000000000..9be0be0df4 --- /dev/null +++ b/plugins/filter/gcp_kms_filters.py @@ -0,0 +1,90 @@ +# (c) 2019, Eric Anderson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Usage: +# vars: +# encrypted_myvar: "{{ var | b64encode | gcp_kms_encrypt(auth_kind='serviceaccount', +# service_account_file='gcp_service_account_file', projects='default', +# key_ring='key_ring', crypto_key='crypto_key') }}" +# decrypted_myvar: "{{ encrypted_myvar | gcp_kms_decrypt(auth_kind='serviceaccount', +# service_account_file=gcp_service_account_file, projects='default', +# key_ring='key_ring', crypto_key='crypto_key') }}" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import GcpSession + + +class GcpMockModule(object): + def __init__(self, params): + self.params = params + + def fail_json(self, *args, **kwargs): + raise AnsibleError(kwargs['msg']) + + +class GcpKmsFilter(): + def run(self, method, **kwargs): + params = { + 'ciphertext': kwargs.get('ciphertext', None), + 'plaintext': kwargs.get('plaintext', None), + 'additional_authenticated_data': kwargs.get('additional_authenticated_data', None), + 'key_ring': kwargs.get('key_ring', None), + 'crypto_key': kwargs.get('crypto_key', None), + 'projects': kwargs.get('projects', None), + 'scopes': kwargs.get('scopes', None), + 'locations': kwargs.get('locations', 'global'), + 'auth_kind': kwargs.get('auth_kind', None), + 'service_account_file': kwargs.get('service_account_file', None), + 'service_account_email': kwargs.get('service_account_email', None), + } + if not params['scopes']: + params['scopes'] = ['https://www.googleapis.com/auth/cloudkms'] + fake_module = GcpMockModule(params) + if method == "encrypt": + return self.kms_encrypt(fake_module) + elif method == "decrypt": + return self.kms_decrypt(fake_module) + + def kms_decrypt(self, module): + payload = {"ciphertext": module.params['ciphertext']} + + if module.params['additional_authenticated_data']: + payload['additionalAuthenticatedData'] = module.params['additional_authenticated_data'] + + auth = GcpSession(module, 'cloudkms') + url = "https://cloudkms.googleapis.com/v1/projects/{projects}/locations/{locations}/" \ + "keyRings/{key_ring}/cryptoKeys/{crypto_key}:decrypt".format(**module.params) + response = auth.post(url, body=payload) + return response.json()['plaintext'] + + def kms_encrypt(self, module): + payload = {"plaintext": module.params['plaintext']} + + if module.params['additional_authenticated_data']: + payload['additionalAuthenticatedData'] = module.params['additional_authenticated_data'] + + auth = GcpSession(module, 'cloudkms') + url = "https://cloudkms.googleapis.com/v1/projects/{projects}/locations/{locations}/" \ + "keyRings/{key_ring}/cryptoKeys/{crypto_key}:encrypt".format(**module.params) + response = auth.post(url, body=payload) + return response.json()['ciphertext'] + + +def gcp_kms_encrypt(plaintext, **kwargs): + return GcpKmsFilter().run('encrypt', plaintext=plaintext, **kwargs) + + +def gcp_kms_decrypt(ciphertext, **kwargs): + return GcpKmsFilter().run('decrypt', ciphertext=ciphertext, **kwargs) + + +class FilterModule(object): + + def filters(self): + return { + 'gcp_kms_encrypt': gcp_kms_encrypt, + 'gcp_kms_decrypt': gcp_kms_decrypt + } diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py new file mode 100644 index 0000000000..d1da71b476 --- /dev/null +++ b/plugins/filter/json_query.py @@ -0,0 +1,53 @@ +# (c) 2015, Filipe Niero Felisbino +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError, AnsibleFilterError + +try: + import jmespath + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def json_query(data, expr): + '''Query data using jmespath query language ( http://jmespath.org ). Example: + - debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}" + ''' + if not HAS_LIB: + raise AnsibleError('You need to install "jmespath" prior to running ' + 'json_query filter') + + try: + return jmespath.search(expr, data) + except jmespath.exceptions.JMESPathError as e: + raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e) + except Exception as e: + # For older jmespath, we can get ValueError and TypeError without much info. + raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e) + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + return { + 'json_query': json_query + } diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py new file mode 100644 index 0000000000..aa9f59be08 --- /dev/null +++ b/plugins/filter/random_mac.py @@ -0,0 +1,73 @@ +# (c) 2020 Ansible Project +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +from random import Random, SystemRandom + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.six import string_types + + +def random_mac(value, seed=None): + ''' takes string prefix, and return it completed with random bytes + to get a complete 6 bytes MAC address ''' + + if not isinstance(value, string_types): + raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' % + (type(value), value)) + + value = value.lower() + mac_items = value.split(':') + + if len(mac_items) > 5: + raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated' + ' items max' % value) + + err = "" + for mac in mac_items: + if not mac: + err += ",empty item" + continue + if not re.match('[a-f0-9]{2}', mac): + err += ",%s not hexa byte" % mac + err = err.strip(',') + + if err: + raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err)) + + if seed is None: + r = SystemRandom() + else: + r = Random(seed) + # Generate random int between x1000000000 and xFFFFFFFFFF + v = r.randint(68719476736, 1099511627775) + # Select first n chars to complement input prefix + remain = 2 * (6 - len(mac_items)) + rnd = ('%x' % v)[:remain] + return value + re.sub(r'(..)', r':\1', rnd) + + +class FilterModule: + ''' Ansible jinja2 filters ''' + def filters(self): + return { + 'random_mac': random_mac, + } diff --git a/plugins/httpapi/__init__.py b/plugins/httpapi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/httpapi/exos.py b/plugins/httpapi/exos.py new file mode 100644 index 0000000000..10d25dd4e9 --- /dev/null +++ b/plugins/httpapi/exos.py @@ -0,0 +1,252 @@ +# Copyright (c) 2019 Extreme Networks. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: + - "Ujwal Komarla (@ujwalkomarla)" +httpapi: exos +short_description: Use EXOS REST APIs to communicate with EXOS platform +description: + - This plugin provides low level abstraction api's to send REST API + requests to EXOS network devices and receive JSON responses. +''' + +import json +import re +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.httpapi import HttpApiBase +import ansible.module_utils.six.moves.http_cookiejar as cookiejar +from ansible.module_utils.common._collections_compat import Mapping +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +class HttpApi(HttpApiBase): + + def __init__(self, *args, **kwargs): + super(HttpApi, self).__init__(*args, **kwargs) + self._device_info = None + self._auth_token = cookiejar.CookieJar() + + def login(self, username, password): + auth_path = '/auth/token' + credentials = {'username': username, 'password': password} + self.send_request(path=auth_path, data=json.dumps(credentials), method='POST') + + def logout(self): + pass + + def handle_httperror(self, exc): + return False + + def send_request(self, path, data=None, method='GET', **message_kwargs): + headers = {'Content-Type': 'application/json'} + response, response_data = self.connection.send(path, data, method=method, cookies=self._auth_token, headers=headers, **message_kwargs) + try: + if response.status == 204: + response_data = {} + else: + response_data = json.loads(to_text(response_data.getvalue())) + except ValueError: + raise ConnectionError('Response was not valid JSON, got {0}'.format( + to_text(response_data.getvalue()) + )) + return response_data + + def run_commands(self, commands, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + headers = {'Content-Type': 'application/json'} + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + cmd['command'] = strip_run_script_cli2json(cmd['command']) + + output = cmd.pop('output', None) + if output and output not in self.get_option_values().get('output'): + raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output')))) + + data = request_builder(cmd['command']) + + response, response_data = self.connection.send('/jsonrpc', data, cookies=self._auth_token, headers=headers, method='POST') + try: + response_data = json.loads(to_text(response_data.getvalue())) + except ValueError: + raise ConnectionError('Response was not valid JSON, got {0}'.format( + to_text(response_data.getvalue()) + )) + + if response_data.get('error', None): + raise ConnectionError("Request Error, got {0}".format(response_data['error'])) + if not response_data.get('result', None): + raise ConnectionError("Request Error, got {0}".format(response_data)) + + response_data = response_data['result'] + + if output and output == 'text': + statusOut = getKeyInResponse(response_data, 'status') + cliOut = getKeyInResponse(response_data, 'CLIoutput') + if statusOut == "ERROR": + raise ConnectionError("Command error({1}) for request {0}".format(cmd['command'], cliOut)) + if cliOut is None: + raise ValueError("Response for request {0} doesn't have the CLIoutput field, got {1}".format(cmd['command'], response_data)) + response_data = cliOut + + responses.append(response_data) + return responses + + def get_device_info(self): + device_info = {} + device_info['network_os'] = 'exos' + + reply = self.run_commands({'command': 'show switch detail', 'output': 'text'}) + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'ExtremeXOS version (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'System Type: +(\S+)', data) + if match: + device_info['network_os_model'] = match.group(1) + + match = re.search(r'SysName: +(\S+)', data) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + def get_device_operations(self): + return { + 'supports_diff_replace': False, # identify if config should be merged or replaced is supported + 'supports_commit': False, # identify if commit is supported by device or not + 'supports_rollback': False, # identify if rollback is supported or not + 'supports_defaults': True, # identify if fetching running config with default is supported + 'supports_commit_comment': False, # identify if adding comment to commit is supported of not + 'supports_onbox_diff': False, # identify if on box diff capability is supported or not + 'supports_generate_diff': True, # identify if diff capability is supported within plugin + 'supports_multiline_delimiter': False, # identify if multiline demiliter is supported within config + 'supports_diff_match': True, # identify if match is supported + 'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported + 'supports_config_replace': False, # identify if running config replace with candidate config is supported + 'supports_admin': False, # identify if admin configure mode is supported or not + 'supports_commit_label': False # identify if commit label is supported or not + } + + def get_option_values(self): + return { + 'format': ['text', 'json'], + 'diff_match': ['line', 'strict', 'exact', 'none'], + 'diff_replace': ['line', 'block'], + 'output': ['text', 'json'] + } + + def get_capabilities(self): + result = {} + result['rpc'] = ['get_default_flag', 'run_commands', 'get_config', 'send_request', 'get_capabilities', 'get_diff'] + result['device_info'] = self.get_device_info() + result['device_operations'] = self.get_device_operations() + result.update(self.get_option_values()) + result['network_api'] = 'exosapi' + return json.dumps(result) + + def get_default_flag(self): + # The flag to modify the command to collect configuration with defaults + return 'detail' + + def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): + diff = {} + device_operations = self.get_device_operations() + option_values = self.get_option_values() + + if candidate is None and device_operations['supports_generate_diff']: + raise ValueError("candidate configuration is required to generate diff") + + if diff_match not in option_values['diff_match']: + raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match']))) + + if diff_replace not in option_values['diff_replace']: + raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace']))) + + # prepare candidate configuration + candidate_obj = NetworkConfig(indent=1) + candidate_obj.load(candidate) + + if running and diff_match != 'none' and diff_replace != 'config': + # running configuration + running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines) + configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace) + + else: + configdiffobjs = candidate_obj.items + + diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else '' + return diff + + def get_config(self, source='running', format='text', flags=None): + options_values = self.get_option_values() + if format not in options_values['format']: + raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format']))) + + lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'} + if source not in lookup: + raise ValueError("fetching configuration from %s is not supported" % source) + + cmd = {'command': lookup[source], 'output': 'text'} + + if source == 'startup': + reply = self.run_commands({'command': 'show switch', 'format': 'text'}) + data = to_text(reply, errors='surrogate_or_strict').strip() + match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE) + if match: + cmd['command'] += match.group(1) + else: + # No Startup(/Selected) Config + return {} + + cmd['command'] += ' '.join(to_list(flags)) + cmd['command'] = cmd['command'].strip() + + return self.run_commands(cmd)[0] + + +def request_builder(command, reqid=""): + return json.dumps(dict(jsonrpc='2.0', id=reqid, method='cli', params=to_list(command))) + + +def strip_run_script_cli2json(command): + if to_text(command, errors="surrogate_then_replace").startswith('run script cli2json.py'): + command = str(command).replace('run script cli2json.py', '') + return command + + +def getKeyInResponse(response, key): + keyOut = None + for item in response: + if key in item: + keyOut = item[key] + break + return keyOut diff --git a/plugins/httpapi/fortianalyzer.py b/plugins/httpapi/fortianalyzer.py new file mode 100644 index 0000000000..1e870140a2 --- /dev/null +++ b/plugins/httpapi/fortianalyzer.py @@ -0,0 +1,453 @@ +# Copyright (c) 2018 Fortinet and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +httpapi : fortianalyzer +short_description: HttpApi Plugin for Fortinet FortiAnalyzer Appliance or VM. +description: + - This HttpApi plugin provides methods to connect to Fortinet FortiAnalyzer Appliance or VM via JSON RPC API. + +''' + +import json +from ansible.plugins.httpapi import HttpApiBase +from ansible.module_utils.basic import to_text +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import BASE_HEADERS +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZBaseException +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZCommon +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZMethods + + +class HttpApi(HttpApiBase): + def __init__(self, connection): + super(HttpApi, self).__init__(connection) + self._req_id = 0 + self._sid = None + self._url = "/jsonrpc" + self._host = None + self._tools = FAZCommon + self._debug = False + self._connected_faz = None + self._last_response_msg = None + self._last_response_code = None + self._last_data_payload = None + self._last_url = None + self._last_response_raw = None + self._locked_adom_list = list() + self._locked_adoms_by_user = list() + self._uses_workspace = False + self._uses_adoms = False + self._adom_list = list() + self._logged_in_user = None + + def set_become(self, become_context): + """ + ELEVATION IS NOT REQUIRED ON FORTINET DEVICES - SKIPPED + :param become_context: Unused input. + :return: None + """ + return None + + def update_auth(self, response, response_data): + """ + TOKENS ARE NOT USED SO NO NEED TO UPDATE AUTH + :param response: Unused input. + :param response_data Unused_input. + :return: None + """ + return None + + def login(self, username, password): + """ + This function will log the plugin into FortiAnalyzer, and return the results. + :param username: Username of FortiAnalyzer Admin + :param password: Password of FortiAnalyzer Admin + + :return: Dictionary of status if it logged in or not. + """ + + self._logged_in_user = username + self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, "sys/login/user", + passwd=password, user=username,)) + + if "FortiAnalyzer object connected to FortiAnalyzer" in self.__str__(): + # If Login worked then inspect the FortiAnalyzer for Workspace Mode, and it's system information. + self.inspect_faz() + return + else: + raise FAZBaseException(msg="Unknown error while logging in...connection was lost during login operation..." + " Exiting") + + def inspect_faz(self): + # CHECK FOR WORKSPACE MODE TO SEE IF WE HAVE TO ENABLE ADOM LOCKS + status = self.get_system_status() + if status[0] == -11: + # THE CONNECTION GOT LOST SOMEHOW, REMOVE THE SID AND REPORT BAD LOGIN + self.logout() + raise FAZBaseException(msg="Error -11 -- the Session ID was likely malformed somehow. Contact authors." + " Exiting") + elif status[0] == 0: + try: + self.check_mode() + if self._uses_adoms: + self.get_adom_list() + if self._uses_workspace: + self.get_locked_adom_list() + self._connected_faz = status[1] + self._host = self._connected_faz["Hostname"] + except Exception: + pass + return + + def logout(self): + """ + This function will logout of the FortiAnalyzer. + """ + if self.sid is not None: + # IF WE WERE USING WORKSPACES, THEN CLEAN UP OUR LOCKS IF THEY STILL EXIST + if self.uses_workspace: + self.get_lock_info() + self.run_unlock() + ret_code, response = self.send_request(FAZMethods.EXEC, + self._tools.format_request(FAZMethods.EXEC, "sys/logout")) + self.sid = None + return ret_code, response + + def send_request(self, method, params): + """ + Responsible for actual sending of data to the connection httpapi base plugin. Does some formatting as well. + :param params: A formatted dictionary that was returned by self.common_datagram_params() + before being called here. + :param method: The preferred API Request method (GET, ADD, POST, etc....) + :type method: basestring + + :return: Dictionary of status if it logged in or not. + """ + + try: + if self.sid is None and params[0]["url"] != "sys/login/user": + try: + self.connection._connect() + except Exception as err: + raise FAZBaseException( + msg="An problem happened with the httpapi plugin self-init connection process. " + "Error: " + to_text(err)) + except IndexError: + raise FAZBaseException("An attempt was made at communicating with a FAZ with " + "no valid session and an incorrectly formatted request.") + except Exception: + raise FAZBaseException("An attempt was made at communicating with a FAZ with " + "no valid session and an unexpected error was discovered.") + + self._update_request_id() + json_request = { + "method": method, + "params": params, + "session": self.sid, + "id": self.req_id, + "verbose": 1 + } + data = json.dumps(json_request, ensure_ascii=False).replace('\\\\', '\\') + try: + # Sending URL and Data in Unicode, per Ansible Specifications for Connection Plugins + response, response_data = self.connection.send(path=to_text(self._url), data=to_text(data), + headers=BASE_HEADERS) + # Get Unicode Response - Must convert from StringIO to unicode first so we can do a replace function below + result = json.loads(to_text(response_data.getvalue())) + self._update_self_from_response(result, self._url, data) + return self._handle_response(result) + except Exception as err: + raise FAZBaseException(err) + + def _handle_response(self, response): + self._set_sid(response) + if isinstance(response["result"], list): + result = response["result"][0] + else: + result = response["result"] + if "data" in result: + return result["status"]["code"], result["data"] + else: + return result["status"]["code"], result + + def _update_self_from_response(self, response, url, data): + self._last_response_raw = response + if isinstance(response["result"], list): + result = response["result"][0] + else: + result = response["result"] + if "status" in result: + self._last_response_code = result["status"]["code"] + self._last_response_msg = result["status"]["message"] + self._last_url = url + self._last_data_payload = data + + def _set_sid(self, response): + if self.sid is None and "session" in response: + self.sid = response["session"] + + def return_connected_faz(self): + """ + Returns the data stored under self._connected_faz + + :return: dict + """ + try: + if self._connected_faz: + return self._connected_faz + except Exception: + raise FAZBaseException("Couldn't Retrieve Connected FAZ Stats") + + def get_system_status(self): + """ + Returns the system status page from the FortiAnalyzer, for logging and other uses. + return: status + """ + status = self.send_request(FAZMethods.GET, self._tools.format_request(FAZMethods.GET, "sys/status")) + return status + + @property + def debug(self): + return self._debug + + @debug.setter + def debug(self, val): + self._debug = val + + @property + def req_id(self): + return self._req_id + + @req_id.setter + def req_id(self, val): + self._req_id = val + + def _update_request_id(self, reqid=0): + self.req_id = reqid if reqid != 0 else self.req_id + 1 + + @property + def sid(self): + return self._sid + + @sid.setter + def sid(self, val): + self._sid = val + + def __str__(self): + if self.sid is not None and self.connection._url is not None: + return "FortiAnalyzer object connected to FortiAnalyzer: " + to_text(self.connection._url) + return "FortiAnalyzer object with no valid connection to a FortiAnalyzer appliance." + + ################################## + # BEGIN DATABASE LOCK CONTEXT CODE + ################################## + + @property + def uses_workspace(self): + return self._uses_workspace + + @uses_workspace.setter + def uses_workspace(self, val): + self._uses_workspace = val + + @property + def uses_adoms(self): + return self._uses_adoms + + @uses_adoms.setter + def uses_adoms(self, val): + self._uses_adoms = val + + def add_adom_to_lock_list(self, adom): + if adom not in self._locked_adom_list: + self._locked_adom_list.append(adom) + + def remove_adom_from_lock_list(self, adom): + if adom in self._locked_adom_list: + self._locked_adom_list.remove(adom) + + def check_mode(self): + """ + Checks FortiAnalyzer for the use of Workspace mode + """ + url = "/cli/global/system/global" + code, resp_obj = self.send_request(FAZMethods.GET, + self._tools.format_request(FAZMethods.GET, + url, + fields=["workspace-mode", "adom-status"])) + try: + if resp_obj["workspace-mode"] == "workflow": + self.uses_workspace = True + elif resp_obj["workspace-mode"] == "disabled": + self.uses_workspace = False + except KeyError: + self.uses_workspace = False + except Exception: + raise FAZBaseException(msg="Couldn't determine workspace-mode in the plugin") + try: + if resp_obj["adom-status"] in [1, "enable"]: + self.uses_adoms = True + else: + self.uses_adoms = False + except KeyError: + self.uses_adoms = False + except Exception: + raise FAZBaseException(msg="Couldn't determine adom-status in the plugin") + + def run_unlock(self): + """ + Checks for ADOM status, if locked, it will unlock + """ + for adom_locked in self._locked_adoms_by_user: + adom = adom_locked["adom"] + self.unlock_adom(adom) + + def lock_adom(self, adom=None, *args, **kwargs): + """ + Locks an ADOM for changes + """ + if adom: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/lock/" + else: + url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom) + else: + url = "/dvmdb/adom/root/workspace/lock" + code, respobj = self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, url)) + if code == 0 and respobj["status"]["message"].lower() == "ok": + self.add_adom_to_lock_list(adom) + return code, respobj + + def unlock_adom(self, adom=None, *args, **kwargs): + """ + Unlocks an ADOM after changes + """ + if adom: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/unlock/" + else: + url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom) + else: + url = "/dvmdb/adom/root/workspace/unlock" + code, respobj = self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, url)) + if code == 0 and respobj["status"]["message"].lower() == "ok": + self.remove_adom_from_lock_list(adom) + return code, respobj + + def commit_changes(self, adom=None, aux=False, *args, **kwargs): + """ + Commits changes to an ADOM + """ + if adom: + if aux: + url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom) + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/commit/" + else: + url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom) + else: + url = "/dvmdb/adom/root/workspace/commit" + return self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, url)) + + def get_lock_info(self, adom=None): + """ + Gets ADOM lock info so it can be displayed with the error messages. Or if determined to be locked by ansible + for some reason, then unlock it. + """ + if not adom or adom == "root": + url = "/dvmdb/adom/root/workspace/lockinfo" + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/lockinfo/" + else: + url = "/dvmdb/adom/{adom}/workspace/lockinfo/".format(adom=adom) + datagram = {} + data = self._tools.format_request(FAZMethods.GET, url, **datagram) + resp_obj = self.send_request(FAZMethods.GET, data) + code = resp_obj[0] + if code != 0: + self._module.fail_json(msg=("An error occurred trying to get the ADOM Lock Info. Error: " + to_text(resp_obj))) + elif code == 0: + try: + if resp_obj[1]["status"]["message"] == "OK": + self._lock_info = None + except Exception: + self._lock_info = resp_obj[1] + return resp_obj + + def get_adom_list(self): + """ + Gets the list of ADOMs for the FortiAnalyzer + """ + if self.uses_adoms: + url = "/dvmdb/adom" + datagram = {} + data = self._tools.format_request(FAZMethods.GET, url, **datagram) + resp_obj = self.send_request(FAZMethods.GET, data) + code = resp_obj[0] + if code != 0: + self._module.fail_json(msg=("An error occurred trying to get the ADOM Info. Error: " + to_text(resp_obj))) + elif code == 0: + num_of_adoms = len(resp_obj[1]) + append_list = ['root', ] + for adom in resp_obj[1]: + if adom["tab_status"] != "": + append_list.append(to_text(adom["name"])) + self._adom_list = append_list + return resp_obj + + def get_locked_adom_list(self): + """ + Gets the list of locked adoms + """ + try: + locked_list = list() + locked_by_user_list = list() + for adom in self._adom_list: + adom_lock_info = self.get_lock_info(adom=adom) + try: + if adom_lock_info[1]["status"]["message"] == "OK": + continue + except Exception: + pass + try: + if adom_lock_info[1][0]["lock_user"]: + locked_list.append(to_text(adom)) + if adom_lock_info[1][0]["lock_user"] == self._logged_in_user: + locked_by_user_list.append({"adom": to_text(adom), "user": to_text(adom_lock_info[1][0]["lock_user"])}) + except Exception as err: + raise FAZBaseException(err) + self._locked_adom_list = locked_list + self._locked_adoms_by_user = locked_by_user_list + + except Exception as err: + raise FAZBaseException(msg=("An error occurred while trying to get the locked adom list. Error: " + + to_text(err))) + + ################################# + # END DATABASE LOCK CONTEXT CODE + ################################# diff --git a/plugins/httpapi/fortimanager.py b/plugins/httpapi/fortimanager.py new file mode 100644 index 0000000000..1c80c2808e --- /dev/null +++ b/plugins/httpapi/fortimanager.py @@ -0,0 +1,451 @@ +# Copyright (c) 2018 Fortinet and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +httpapi : fortimanager +short_description: HttpApi Plugin for Fortinet FortiManager Appliance or VM. +description: + - This HttpApi plugin provides methods to connect to Fortinet FortiManager Appliance or VM via JSON RPC API. +''' + +import json +from ansible.plugins.httpapi import HttpApiBase +from ansible.module_utils.basic import to_text +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import BASE_HEADERS +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods + + +class HttpApi(HttpApiBase): + def __init__(self, connection): + super(HttpApi, self).__init__(connection) + self._req_id = 0 + self._sid = None + self._url = "/jsonrpc" + self._host = None + self._tools = FMGRCommon + self._debug = False + self._connected_fmgr = None + self._last_response_msg = None + self._last_response_code = None + self._last_data_payload = None + self._last_url = None + self._last_response_raw = None + self._locked_adom_list = list() + self._locked_adoms_by_user = list() + self._uses_workspace = False + self._uses_adoms = False + self._adom_list = list() + self._logged_in_user = None + + def set_become(self, become_context): + """ + ELEVATION IS NOT REQUIRED ON FORTINET DEVICES - SKIPPED. + :param become_context: Unused input. + :return: None + """ + return None + + def update_auth(self, response, response_data): + """ + TOKENS ARE NOT USED SO NO NEED TO UPDATE AUTH. + :param response: Unused input. + :param response_data Unused_input. + :return: None + """ + return None + + def login(self, username, password): + + """ + This function will log the plugin into FortiManager, and return the results. + :param username: Username of FortiManager Admin + :param password: Password of FortiManager Admin + + :return: Dictionary of status if it logged in or not. + """ + self._logged_in_user = username + self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, "sys/login/user", + passwd=password, user=username, )) + + if "FortiManager object connected to FortiManager" in self.__str__(): + # If Login worked, then inspect the FortiManager for Workspace Mode, and it's system information. + self.inspect_fmgr() + return + else: + raise FMGBaseException(msg="Unknown error while logging in...connection was lost during login operation...." + " Exiting") + + def inspect_fmgr(self): + # CHECK FOR WORKSPACE MODE TO SEE IF WE HAVE TO ENABLE ADOM LOCKS + status = self.get_system_status() + if status[0] == -11: + # THE CONNECTION GOT LOST SOMEHOW, REMOVE THE SID AND REPORT BAD LOGIN + self.logout() + raise FMGBaseException(msg="Error -11 -- the Session ID was likely malformed somehow. Contact authors." + " Exiting") + elif status[0] == 0: + try: + self.check_mode() + if self._uses_adoms: + self.get_adom_list() + if self._uses_workspace: + self.get_locked_adom_list() + self._connected_fmgr = status[1] + self._host = self._connected_fmgr["Hostname"] + except BaseException: + pass + return + + def logout(self): + """ + This function will logout of the FortiManager. + """ + if self.sid is not None: + # IF WE WERE USING WORKSPACES, THEN CLEAN UP OUR LOCKS IF THEY STILL EXIST + if self.uses_workspace: + self.get_lock_info() + self.run_unlock() + ret_code, response = self.send_request(FMGRMethods.EXEC, + self._tools.format_request(FMGRMethods.EXEC, "sys/logout")) + self.sid = None + return ret_code, response + + def send_request(self, method, params): + """ + Responsible for actual sending of data to the connection httpapi base plugin. Does some formatting too. + :param params: A formatted dictionary that was returned by self.common_datagram_params() + before being called here. + :param method: The preferred API Request method (GET, ADD, POST, etc....) + :type method: basestring + + :return: Dictionary of status, if it logged in or not. + """ + try: + if self.sid is None and params[0]["url"] != "sys/login/user": + try: + self.connection._connect() + except Exception as err: + raise FMGBaseException( + msg="An problem happened with the httpapi plugin self-init connection process. " + "Error: " + to_text(err)) + except IndexError: + raise FMGBaseException("An attempt was made at communicating with a FMG with " + "no valid session and an incorrectly formatted request.") + except Exception as err: + raise FMGBaseException("An attempt was made at communicating with a FMG with " + "no valid session and an unexpected error was discovered. \n Error: " + to_text(err)) + + self._update_request_id() + json_request = { + "method": method, + "params": params, + "session": self.sid, + "id": self.req_id, + "verbose": 1 + } + data = json.dumps(json_request, ensure_ascii=False).replace('\\\\', '\\') + try: + # Sending URL and Data in Unicode, per Ansible Specifications for Connection Plugins + response, response_data = self.connection.send(path=to_text(self._url), data=to_text(data), + headers=BASE_HEADERS) + # Get Unicode Response - Must convert from StringIO to unicode first so we can do a replace function below + result = json.loads(to_text(response_data.getvalue())) + self._update_self_from_response(result, self._url, data) + return self._handle_response(result) + except Exception as err: + raise FMGBaseException(err) + + def _handle_response(self, response): + self._set_sid(response) + if isinstance(response["result"], list): + result = response["result"][0] + else: + result = response["result"] + if "data" in result: + return result["status"]["code"], result["data"] + else: + return result["status"]["code"], result + + def _update_self_from_response(self, response, url, data): + self._last_response_raw = response + if isinstance(response["result"], list): + result = response["result"][0] + else: + result = response["result"] + if "status" in result: + self._last_response_code = result["status"]["code"] + self._last_response_msg = result["status"]["message"] + self._last_url = url + self._last_data_payload = data + + def _set_sid(self, response): + if self.sid is None and "session" in response: + self.sid = response["session"] + + def return_connected_fmgr(self): + """ + Returns the data stored under self._connected_fmgr + + :return: dict + """ + try: + if self._connected_fmgr: + return self._connected_fmgr + except Exception: + raise FMGBaseException("Couldn't Retrieve Connected FMGR Stats") + + def get_system_status(self): + """ + Returns the system status page from the FortiManager, for logging and other uses. + return: status + """ + status = self.send_request(FMGRMethods.GET, self._tools.format_request(FMGRMethods.GET, "sys/status")) + return status + + @property + def debug(self): + return self._debug + + @debug.setter + def debug(self, val): + self._debug = val + + @property + def req_id(self): + return self._req_id + + @req_id.setter + def req_id(self, val): + self._req_id = val + + def _update_request_id(self, reqid=0): + self.req_id = reqid if reqid != 0 else self.req_id + 1 + + @property + def sid(self): + return self._sid + + @sid.setter + def sid(self, val): + self._sid = val + + def __str__(self): + if self.sid is not None and self.connection._url is not None: + return "FortiManager object connected to FortiManager: " + to_text(self.connection._url) + return "FortiManager object with no valid connection to a FortiManager appliance." + + ################################## + # BEGIN DATABASE LOCK CONTEXT CODE + ################################## + + @property + def uses_workspace(self): + return self._uses_workspace + + @uses_workspace.setter + def uses_workspace(self, val): + self._uses_workspace = val + + @property + def uses_adoms(self): + return self._uses_adoms + + @uses_adoms.setter + def uses_adoms(self, val): + self._uses_adoms = val + + def add_adom_to_lock_list(self, adom): + if adom not in self._locked_adom_list: + self._locked_adom_list.append(adom) + + def remove_adom_from_lock_list(self, adom): + if adom in self._locked_adom_list: + self._locked_adom_list.remove(adom) + + def check_mode(self): + """ + Checks FortiManager for the use of Workspace mode + """ + url = "/cli/global/system/global" + code, resp_obj = self.send_request(FMGRMethods.GET, + self._tools.format_request(FMGRMethods.GET, + url, + fields=["workspace-mode", "adom-status"])) + try: + if resp_obj["workspace-mode"] == "workflow": + self.uses_workspace = True + elif resp_obj["workspace-mode"] == "disabled": + self.uses_workspace = False + except KeyError: + raise FMGBaseException(msg="Couldn't determine workspace-mode in the plugin") + try: + if resp_obj["adom-status"] in [1, "enable"]: + self.uses_adoms = True + else: + self.uses_adoms = False + except KeyError: + raise FMGBaseException(msg="Couldn't determine adom-status in the plugin") + + def run_unlock(self): + """ + Checks for ADOM status, if locked, it will unlock + """ + for adom_locked in self._locked_adoms_by_user: + adom = adom_locked["adom"] + self.unlock_adom(adom) + + def lock_adom(self, adom=None, *args, **kwargs): + """ + Locks an ADOM for changes + """ + if adom: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/lock/" + else: + url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom) + else: + url = "/dvmdb/adom/root/workspace/lock" + code, respobj = self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, url)) + if code == 0 and respobj["status"]["message"].lower() == "ok": + self.add_adom_to_lock_list(adom) + return code, respobj + + def unlock_adom(self, adom=None, *args, **kwargs): + """ + Unlocks an ADOM after changes + """ + if adom: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/unlock/" + else: + url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom) + else: + url = "/dvmdb/adom/root/workspace/unlock" + code, respobj = self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, url)) + if code == 0 and respobj["status"]["message"].lower() == "ok": + self.remove_adom_from_lock_list(adom) + return code, respobj + + def commit_changes(self, adom=None, aux=False, *args, **kwargs): + """ + Commits changes to an ADOM + """ + if adom: + if aux: + url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom) + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/commit/" + else: + url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom) + else: + url = "/dvmdb/adom/root/workspace/commit" + return self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, url)) + + def get_lock_info(self, adom=None): + """ + Gets ADOM lock info so it can be displayed with the error messages. Or if determined to be locked by ansible + for some reason, then unlock it. + """ + if not adom or adom == "root": + url = "/dvmdb/adom/root/workspace/lockinfo" + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/lockinfo/" + else: + url = "/dvmdb/adom/{adom}/workspace/lockinfo/".format(adom=adom) + datagram = {} + data = self._tools.format_request(FMGRMethods.GET, url, **datagram) + resp_obj = self.send_request(FMGRMethods.GET, data) + code = resp_obj[0] + if code != 0: + self._module.fail_json(msg=("An error occurred trying to get the ADOM Lock Info. " + "Error: " + to_text(resp_obj))) + elif code == 0: + try: + if resp_obj[1]["status"]["message"] == "OK": + self._lock_info = None + except Exception: + self._lock_info = resp_obj[1] + return resp_obj + + def get_adom_list(self): + """ + Gets the list of ADOMs for the FortiManager + """ + if self.uses_adoms: + url = "/dvmdb/adom" + datagram = {} + data = self._tools.format_request(FMGRMethods.GET, url, **datagram) + resp_obj = self.send_request(FMGRMethods.GET, data) + code = resp_obj[0] + if code != 0: + self._module.fail_json(msg=("An error occurred trying to get the ADOM Info. " + "Error: " + to_text(resp_obj))) + elif code == 0: + num_of_adoms = len(resp_obj[1]) + append_list = ['root', ] + for adom in resp_obj[1]: + if adom["tab_status"] != "": + append_list.append(to_text(adom["name"])) + self._adom_list = append_list + return resp_obj + + def get_locked_adom_list(self): + """ + Gets the list of locked adoms + """ + try: + locked_list = list() + locked_by_user_list = list() + for adom in self._adom_list: + adom_lock_info = self.get_lock_info(adom=adom) + try: + if adom_lock_info[1]["status"]["message"] == "OK": + continue + except IndexError as err: + pass + try: + if adom_lock_info[1][0]["lock_user"]: + locked_list.append(to_text(adom)) + if adom_lock_info[1][0]["lock_user"] == self._logged_in_user: + locked_by_user_list.append({"adom": to_text(adom), + "user": to_text(adom_lock_info[1][0]["lock_user"])}) + except Exception as err: + raise FMGBaseException(err) + self._locked_adom_list = locked_list + self._locked_adoms_by_user = locked_by_user_list + + except Exception as err: + raise FMGBaseException(msg=("An error occurred while trying to get the locked adom list. Error: " + + to_text(err))) + + ################################ + # END DATABASE LOCK CONTEXT CODE + ################################ diff --git a/plugins/httpapi/ftd.py b/plugins/httpapi/ftd.py new file mode 100644 index 0000000000..73db5ef79d --- /dev/null +++ b/plugins/httpapi/ftd.py @@ -0,0 +1,386 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: Ansible Networking Team +httpapi : ftd +short_description: HttpApi Plugin for Cisco ASA Firepower device +description: + - This HttpApi plugin provides methods to connect to Cisco ASA firepower + devices over a HTTP(S)-based api. +options: + token_path: + type: str + description: + - Specifies the api token path of the FTD device + vars: + - name: ansible_httpapi_ftd_token_path + spec_path: + type: str + description: + - Specifies the api spec path of the FTD device + default: '/apispec/ngfw.json' + vars: + - name: ansible_httpapi_ftd_spec_path +''' + +import json +import os +import re + +from ansible import __version__ as ansible_version + +from ansible.module_utils.basic import to_text +from ansible.errors import AnsibleConnectionFailure +from ansible_collections.community.general.plugins.module_utils.network.ftd.fdm_swagger_client import FdmSwaggerParser, SpecProp, FdmSwaggerValidator +from ansible_collections.community.general.plugins.module_utils.network.ftd.common import HTTPMethod, ResponseParams +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.plugins.httpapi import HttpApiBase +from urllib3 import encode_multipart_formdata +from urllib3.fields import RequestField +from ansible.module_utils.connection import ConnectionError + +BASE_HEADERS = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'User-Agent': 'FTD Ansible/%s' % ansible_version +} + +TOKEN_EXPIRATION_STATUS_CODE = 408 +UNAUTHORIZED_STATUS_CODE = 401 +API_TOKEN_PATH_OPTION_NAME = 'token_path' +TOKEN_PATH_TEMPLATE = '/api/fdm/{0}/fdm/token' +GET_API_VERSIONS_PATH = '/api/versions' +DEFAULT_API_VERSIONS = ['v2', 'v1'] + +INVALID_API_TOKEN_PATH_MSG = ('The API token path is incorrect. Please, check correctness of ' + 'the `ansible_httpapi_ftd_token_path` variable in the inventory file.') +MISSING_API_TOKEN_PATH_MSG = ('Ansible could not determine the API token path automatically. Please, ' + 'specify the `ansible_httpapi_ftd_token_path` variable in the inventory file.') + + +class HttpApi(HttpApiBase): + def __init__(self, connection): + super(HttpApi, self).__init__(connection) + self.connection = connection + self.access_token = None + self.refresh_token = None + self._api_spec = None + self._api_validator = None + self._ignore_http_errors = False + + def login(self, username, password): + def request_token_payload(username, password): + return { + 'grant_type': 'password', + 'username': username, + 'password': password + } + + def refresh_token_payload(refresh_token): + return { + 'grant_type': 'refresh_token', + 'refresh_token': refresh_token + } + + if self.refresh_token: + payload = refresh_token_payload(self.refresh_token) + elif username and password: + payload = request_token_payload(username, password) + else: + raise AnsibleConnectionFailure('Username and password are required for login in absence of refresh token') + + response = self._lookup_login_url(payload) + + try: + self.refresh_token = response['refresh_token'] + self.access_token = response['access_token'] + self.connection._auth = {'Authorization': 'Bearer %s' % self.access_token} + except KeyError: + raise ConnectionError( + 'Server returned response without token info during connection authentication: %s' % response) + + def _lookup_login_url(self, payload): + """ Try to find correct login URL and get api token using this URL. + + :param payload: Token request payload + :type payload: dict + :return: token generation response + """ + preconfigured_token_path = self._get_api_token_path() + if preconfigured_token_path: + token_paths = [preconfigured_token_path] + else: + token_paths = self._get_known_token_paths() + + for url in token_paths: + try: + response = self._send_login_request(payload, url) + + except ConnectionError as e: + self.connection.queue_message('vvvv', 'REST:request to %s failed because of connection error: %s ' % ( + url, e)) + # In the case of ConnectionError caused by HTTPError we should check response code. + # Response code 400 returned in case of invalid credentials so we should stop attempts to log in and + # inform the user. + if hasattr(e, 'http_code') and e.http_code == 400: + raise + else: + if not preconfigured_token_path: + self._set_api_token_path(url) + return response + + raise ConnectionError(INVALID_API_TOKEN_PATH_MSG if preconfigured_token_path else MISSING_API_TOKEN_PATH_MSG) + + def _send_login_request(self, payload, url): + self._display(HTTPMethod.POST, 'login', url) + response, response_data = self._send_auth_request( + url, json.dumps(payload), method=HTTPMethod.POST, headers=BASE_HEADERS + ) + self._display(HTTPMethod.POST, 'login:status_code', response.getcode()) + + response = self._response_to_json(self._get_response_value(response_data)) + return response + + def logout(self): + auth_payload = { + 'grant_type': 'revoke_token', + 'access_token': self.access_token, + 'token_to_revoke': self.refresh_token + } + + url = self._get_api_token_path() + + self._display(HTTPMethod.POST, 'logout', url) + response, dummy = self._send_auth_request(url, json.dumps(auth_payload), method=HTTPMethod.POST, + headers=BASE_HEADERS) + self._display(HTTPMethod.POST, 'logout:status_code', response.getcode()) + + self.refresh_token = None + self.access_token = None + + def _send_auth_request(self, path, data, **kwargs): + error_msg_prefix = 'Server returned an error during authentication request' + return self._send_service_request(path, error_msg_prefix, data=data, **kwargs) + + def _send_service_request(self, path, error_msg_prefix, data=None, **kwargs): + try: + self._ignore_http_errors = True + return self.connection.send(path, data, **kwargs) + except HTTPError as e: + # HttpApi connection does not read the error response from HTTPError, so we do it here and wrap it up in + # ConnectionError, so the actual error message is displayed to the user. + error_msg = self._response_to_json(to_text(e.read())) + raise ConnectionError('%s: %s' % (error_msg_prefix, error_msg), http_code=e.code) + finally: + self._ignore_http_errors = False + + def update_auth(self, response, response_data): + # With tokens, authentication should not be checked and updated on each request + return None + + def send_request(self, url_path, http_method, body_params=None, path_params=None, query_params=None): + url = construct_url_path(url_path, path_params, query_params) + data = json.dumps(body_params) if body_params else None + try: + self._display(http_method, 'url', url) + if data: + self._display(http_method, 'data', data) + + response, response_data = self.connection.send(url, data, method=http_method, headers=BASE_HEADERS) + + value = self._get_response_value(response_data) + self._display(http_method, 'response', value) + + return { + ResponseParams.SUCCESS: True, + ResponseParams.STATUS_CODE: response.getcode(), + ResponseParams.RESPONSE: self._response_to_json(value) + } + # Being invoked via JSON-RPC, this method does not serialize and pass HTTPError correctly to the method caller. + # Thus, in order to handle non-200 responses, we need to wrap them into a simple structure and pass explicitly. + except HTTPError as e: + error_msg = to_text(e.read()) + self._display(http_method, 'error', error_msg) + return { + ResponseParams.SUCCESS: False, + ResponseParams.STATUS_CODE: e.code, + ResponseParams.RESPONSE: self._response_to_json(error_msg) + } + + def upload_file(self, from_path, to_url): + url = construct_url_path(to_url) + self._display(HTTPMethod.POST, 'upload', url) + with open(from_path, 'rb') as src_file: + rf = RequestField('fileToUpload', src_file.read(), os.path.basename(src_file.name)) + rf.make_multipart() + body, content_type = encode_multipart_formdata([rf]) + + headers = dict(BASE_HEADERS) + headers['Content-Type'] = content_type + headers['Content-Length'] = len(body) + + dummy, response_data = self.connection.send(url, data=body, method=HTTPMethod.POST, headers=headers) + value = self._get_response_value(response_data) + self._display(HTTPMethod.POST, 'upload:response', value) + return self._response_to_json(value) + + def download_file(self, from_url, to_path, path_params=None): + url = construct_url_path(from_url, path_params=path_params) + self._display(HTTPMethod.GET, 'download', url) + response, response_data = self.connection.send(url, data=None, method=HTTPMethod.GET, headers=BASE_HEADERS) + + if os.path.isdir(to_path): + filename = extract_filename_from_headers(response.info()) + to_path = os.path.join(to_path, filename) + + with open(to_path, "wb") as output_file: + output_file.write(response_data.getvalue()) + self._display(HTTPMethod.GET, 'downloaded', to_path) + + def handle_httperror(self, exc): + is_auth_related_code = exc.code == TOKEN_EXPIRATION_STATUS_CODE or exc.code == UNAUTHORIZED_STATUS_CODE + if not self._ignore_http_errors and is_auth_related_code: + self.connection._auth = None + self.login(self.connection.get_option('remote_user'), self.connection.get_option('password')) + return True + # False means that the exception will be passed further to the caller + return False + + def _display(self, http_method, title, msg=''): + self.connection.queue_message('vvvv', 'REST:%s:%s:%s\n%s' % (http_method, self.connection._url, title, msg)) + + @staticmethod + def _get_response_value(response_data): + return to_text(response_data.getvalue()) + + def _get_api_spec_path(self): + return self.get_option('spec_path') + + def _get_known_token_paths(self): + """Generate list of token generation urls based on list of versions supported by device(if exposed via API) or + default list of API versions. + + :returns: list of token generation urls + :rtype: generator + """ + try: + api_versions = self._get_supported_api_versions() + except ConnectionError: + # API versions API is not supported we need to check all known version + api_versions = DEFAULT_API_VERSIONS + + return [TOKEN_PATH_TEMPLATE.format(version) for version in api_versions] + + def _get_supported_api_versions(self): + """ + Fetch list of API versions supported by device. + + :return: list of API versions suitable for device + :rtype: list + """ + # Try to fetch supported API version + http_method = HTTPMethod.GET + response, response_data = self._send_service_request( + path=GET_API_VERSIONS_PATH, + error_msg_prefix="Can't fetch list of supported api versions", + method=http_method, + headers=BASE_HEADERS + ) + + value = self._get_response_value(response_data) + self._display(http_method, 'response', value) + api_versions_info = self._response_to_json(value) + return api_versions_info["supportedVersions"] + + def _get_api_token_path(self): + return self.get_option(API_TOKEN_PATH_OPTION_NAME) + + def _set_api_token_path(self, url): + return self.set_option(API_TOKEN_PATH_OPTION_NAME, url) + + @staticmethod + def _response_to_json(response_text): + try: + return json.loads(response_text) if response_text else {} + # JSONDecodeError only available on Python 3.5+ + except getattr(json.decoder, 'JSONDecodeError', ValueError): + raise ConnectionError('Invalid JSON response: %s' % response_text) + + def get_operation_spec(self, operation_name): + return self.api_spec[SpecProp.OPERATIONS].get(operation_name, None) + + def get_operation_specs_by_model_name(self, model_name): + if model_name: + return self.api_spec[SpecProp.MODEL_OPERATIONS].get(model_name, None) + else: + return None + + def get_model_spec(self, model_name): + return self.api_spec[SpecProp.MODELS].get(model_name, None) + + def validate_data(self, operation_name, data): + return self.api_validator.validate_data(operation_name, data) + + def validate_query_params(self, operation_name, params): + return self.api_validator.validate_query_params(operation_name, params) + + def validate_path_params(self, operation_name, params): + return self.api_validator.validate_path_params(operation_name, params) + + @property + def api_spec(self): + if self._api_spec is None: + spec_path_url = self._get_api_spec_path() + response = self.send_request(url_path=spec_path_url, http_method=HTTPMethod.GET) + if response[ResponseParams.SUCCESS]: + self._api_spec = FdmSwaggerParser().parse_spec(response[ResponseParams.RESPONSE]) + else: + raise ConnectionError('Failed to download API specification. Status code: %s. Response: %s' % ( + response[ResponseParams.STATUS_CODE], response[ResponseParams.RESPONSE])) + return self._api_spec + + @property + def api_validator(self): + if self._api_validator is None: + self._api_validator = FdmSwaggerValidator(self.api_spec) + return self._api_validator + + +def construct_url_path(path, path_params=None, query_params=None): + url = path + if path_params: + url = url.format(**path_params) + if query_params: + url += "?" + urlencode(query_params) + return url + + +def extract_filename_from_headers(response_info): + content_header_regex = r'attachment; ?filename="?([^"]+)' + match = re.match(content_header_regex, response_info.get('Content-Disposition')) + if match: + return match.group(1) + else: + raise ValueError("No appropriate Content-Disposition header is specified.") diff --git a/plugins/inventory/__init__.py b/plugins/inventory/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/inventory/cloudscale.py b/plugins/inventory/cloudscale.py new file mode 100644 index 0000000000..5f8be5d34e --- /dev/null +++ b/plugins/inventory/cloudscale.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2018, Gaudenz Steinlin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +name: cloudscale +plugin_type: inventory +author: + - Gaudenz Steinlin (@gaudenz) +short_description: cloudscale.ch inventory source +description: + - Get inventory hosts from cloudscale.ch API + - Uses an YAML configuration file ending with either I(cloudscale.yml) or I(cloudscale.yaml) to set parameter values (also see examples). +extends_documentation_fragment: + - constructed +options: + plugin: + description: | + Token that ensures this is a source file for the 'cloudscale' + plugin. + required: True + choices: ['cloudscale'] + inventory_hostname: + description: | + What to register as the inventory hostname. + If set to 'uuid' the uuid of the server will be used and a + group will be created for the server name. + If set to 'name' the name of the server will be used unless + there are more than one server with the same name in which + case the 'uuid' logic will be used. + type: str + choices: + - name + - uuid + default: "name" + ansible_host: + description: | + Which IP address to register as the ansible_host. If the + requested value does not exist or this is set to 'none', no + ansible_host will be set. + type: str + choices: + - public_v4 + - public_v6 + - private + - none + default: public_v4 + api_token: + description: cloudscale.ch API token + env: + - name: CLOUDSCALE_API_TOKEN + type: str + api_timeout: + description: Timeout in seconds for calls to the cloudscale.ch API. + default: 30 + type: int +''' + +EXAMPLES = r''' +# cloudscale.yml name ending file in YAML format +# Example command line: ansible-inventory --list -i inventory_cloudscale.yml + +plugin: cloudscale + +# Example grouping by tag key "project" +plugin: cloudscale +keyed_groups: + - prefix: project + key: cloudscale.tags.project + +# Example grouping by key "operating_system" lowercased and prefixed with "os" +plugin: cloudscale +keyed_groups: + - prefix: os + key: cloudscale.image.operating_system | lower +''' + +from collections import defaultdict +from json import loads + +from ansible.errors import AnsibleError +from ansible_collections.community.general.plugins.module_utils.cloudscale import API_URL +from ansible.module_utils.urls import open_url +from ansible.inventory.group import to_safe_group_name +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable + +iface_type_map = { + 'public_v4': ('public', 4), + 'public_v6': ('public', 6), + 'private': ('private', 4), + 'none': (None, None), +} + + +class InventoryModule(BaseInventoryPlugin, Constructable): + + NAME = 'community.general.cloudscale' + + def _get_server_list(self): + # Get list of servers from cloudscale.ch API + response = open_url( + API_URL + '/servers', + headers={'Authorization': 'Bearer %s' % self._token} + ) + return loads(response.read()) + + def verify_file(self, path): + ''' + :param path: the path to the inventory config file + :return the contents of the config file + ''' + if super(InventoryModule, self).verify_file(path): + if path.endswith(('cloudscale.yml', 'cloudscale.yaml')): + return True + self.display.debug( + "cloudscale inventory filename must end with 'cloudscale.yml' or 'cloudscale.yaml'" + ) + return False + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + + self._token = self.get_option('api_token') + if not self._token: + raise AnsibleError('Could not find an API token. Set the ' + 'CLOUDSCALE_API_TOKEN environment variable.') + + inventory_hostname = self.get_option('inventory_hostname') + if inventory_hostname not in ('name', 'uuid'): + raise AnsibleError('Invalid value for option inventory_hostname: %s' + % inventory_hostname) + + ansible_host = self.get_option('ansible_host') + if ansible_host not in iface_type_map: + raise AnsibleError('Invalid value for option ansible_host: %s' + % ansible_host) + + # Merge servers with the same name + firstpass = defaultdict(list) + for server in self._get_server_list(): + firstpass[server['name']].append(server) + + # Add servers to inventory + for name, servers in firstpass.items(): + if len(servers) == 1 and inventory_hostname == 'name': + self.inventory.add_host(name) + servers[0]['inventory_hostname'] = name + else: + # Two servers with the same name exist, create a group + # with this name and add the servers by UUID + group_name = to_safe_group_name(name) + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + for server in servers: + self.inventory.add_host(server['uuid'], group_name) + server['inventory_hostname'] = server['uuid'] + + # Set variables + iface_type, iface_version = iface_type_map[ansible_host] + for server in servers: + hostname = server.pop('inventory_hostname') + if ansible_host != 'none': + addresses = [address['address'] + for interface in server['interfaces'] + for address in interface['addresses'] + if interface['type'] == iface_type + and address['version'] == iface_version] + + if len(addresses) > 0: + self.inventory.set_variable( + hostname, + 'ansible_host', + addresses[0], + ) + self.inventory.set_variable( + hostname, + 'cloudscale', + server, + ) + + variables = self.inventory.hosts[hostname].get_vars() + # Set composed variables + self._set_composite_vars( + self.get_option('compose'), + variables, + hostname, + self.get_option('strict'), + ) + + # Add host to composed groups + self._add_host_to_composed_groups( + self.get_option('groups'), + variables, + hostname, + self.get_option('strict'), + ) + + # Add host to keyed groups + self._add_host_to_keyed_groups( + self.get_option('keyed_groups'), + variables, + hostname, + self.get_option('strict'), + ) diff --git a/plugins/inventory/docker_machine.py b/plugins/inventory/docker_machine.py new file mode 100644 index 0000000000..3dcd61d25e --- /dev/null +++ b/plugins/inventory/docker_machine.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, Ximon Eighteen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: docker_machine + plugin_type: inventory + author: Ximon Eighteen (@ximon18) + short_description: Docker Machine inventory source + requirements: + - L(Docker Machine,https://docs.docker.com/machine/) + extends_documentation_fragment: + - constructed + description: + - Get inventory hosts from Docker Machine. + - Uses a YAML configuration file that ends with docker_machine.(yml|yaml). + - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key). + - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables. + + options: + plugin: + description: token that ensures this is a source file for the C(docker_machine) plugin. + required: yes + choices: ['docker_machine'] + daemon_env: + description: + - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched. + - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched. + A warning will be issued for any skipped host if the choice is C(require). + - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched. + A warning will be issued for hosts where they cannot be fetched if the choice is C(optional). + - With C(skip), do not attempt to fetch the docker daemon connection environment variables. + - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables. + type: str + choices: + - require + - require-silently + - optional + - optional-silently + - skip + default: require + running_required: + description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped. + type: bool + default: yes + verbose_output: + description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes). + type: bool + default: yes +''' + +EXAMPLES = ''' +# Minimal example +plugin: docker_machine + +# Example using constructed features to create a group per Docker Machine driver +# (https://docs.docker.com/machine/drivers/), e.g.: +# $ docker-machine create --driver digitalocean ... mymachine +# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine +# { +# ... +# "digitalocean": { +# "hosts": [ +# "mymachine" +# ] +# ... +# } +strict: no +keyed_groups: + - separator: '' + key: docker_machine_node_attributes.DriverName + +# Example grouping hosts by Digital Machine tag +strict: no +keyed_groups: + - prefix: tag + key: 'dm_tags' + +# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key +compose: + ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"' +''' + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_text +from ansible.module_utils.common.process import get_bin_path +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.display import Display + +import json +import re +import subprocess + +display = Display() + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using Docker machine as source. ''' + + NAME = 'community.general.docker_machine' + + DOCKER_MACHINE_PATH = None + + def _run_command(self, args): + if not self.DOCKER_MACHINE_PATH: + try: + self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine') + except ValueError as e: + raise AnsibleError(to_native(e)) + + command = [self.DOCKER_MACHINE_PATH] + command.extend(args) + display.debug('Executing command {0}'.format(command)) + try: + result = subprocess.check_output(command) + except subprocess.CalledProcessError as e: + display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e)) + raise e + + return to_text(result).strip() + + def _get_docker_daemon_variables(self, machine_name): + ''' + Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on + the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'. + ''' + try: + env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines() + except subprocess.CalledProcessError: + # This can happen when the machine is created but provisioning is incomplete + return [] + + # example output of docker-machine env --shell=sh: + # export DOCKER_TLS_VERIFY="1" + # export DOCKER_HOST="tcp://134.209.204.160:2376" + # export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator" + # export DOCKER_MACHINE_NAME="routinator" + # # Run this command to configure your shell: + # # eval $(docker-machine env --shell=bash routinator) + + # capture any of the DOCKER_xxx variables that were output and create Ansible host vars + # with the same name and value but with a dm_ name prefix. + vars = [] + for line in env_lines: + match = re.search('(DOCKER_[^=]+)="([^"]+)"', line) + if match: + env_var_name = match.group(1) + env_var_value = match.group(2) + vars.append((env_var_name, env_var_value)) + + return vars + + def _get_machine_names(self): + # Filter out machines that are not in the Running state as we probably can't do anything useful actions + # with them. + ls_command = ['ls', '-q'] + if self.get_option('running_required'): + ls_command.extend(['--filter', 'state=Running']) + + try: + ls_lines = self._run_command(ls_command) + except subprocess.CalledProcessError: + return [] + + return ls_lines.splitlines() + + def _inspect_docker_machine_host(self, node): + try: + inspect_lines = self._run_command(['inspect', self.node]) + except subprocess.CalledProcessError: + return None + + return json.loads(inspect_lines) + + def _should_skip_host(self, machine_name, env_var_tuples, daemon_env): + if not env_var_tuples: + warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name) + if daemon_env in ('require', 'require-silently'): + if daemon_env == 'require': + display.warning('{0}: host will be skipped'.format(warning_prefix)) + return True + else: # 'optional', 'optional-silently' + if daemon_env == 'optional': + display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix)) + return False + + def _populate(self): + daemon_env = self.get_option('daemon_env') + try: + for self.node in self._get_machine_names(): + self.node_attrs = self._inspect_docker_machine_host(self.node) + if not self.node_attrs: + continue + + machine_name = self.node_attrs['Driver']['MachineName'] + + # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands + # that could be used to set environment variables to influence a local Docker client: + if daemon_env == 'skip': + env_var_tuples = [] + else: + env_var_tuples = self._get_docker_daemon_variables(machine_name) + if self._should_skip_host(machine_name, env_var_tuples, daemon_env): + continue + + # add an entry in the inventory for this host + self.inventory.add_host(machine_name) + + # set standard Ansible remote host connection settings to details captured from `docker-machine` + # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html + self.inventory.set_variable(machine_name, 'ansible_host', self.node_attrs['Driver']['IPAddress']) + self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort']) + self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser']) + self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath']) + + # set variables based on Docker Machine tags + tags = self.node_attrs['Driver'].get('Tags') or '' + self.inventory.set_variable(machine_name, 'dm_tags', tags) + + # set variables based on Docker Machine env variables + for kv in env_var_tuples: + self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1]) + + if self.get_option('verbose_output'): + self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs) + + # Use constructed if applicable + strict = self.get_option('strict') + + # Composed variables + self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict) + + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict) + + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict) + + except Exception as e: + raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' % + to_native(e), orig_exc=e) + + def verify_file(self, path): + """Return the possibility of a file being consumable by this plugin.""" + return ( + super(InventoryModule, self).verify_file(path) and + path.endswith((self.NAME + '.yaml', self.NAME + '.yml'))) + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path, cache) + self._read_config_data(path) + self._populate() diff --git a/plugins/inventory/docker_swarm.py b/plugins/inventory/docker_swarm.py new file mode 100644 index 0000000000..05de1a4dde --- /dev/null +++ b/plugins/inventory/docker_swarm.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Stefan Heitmueller +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: docker_swarm + plugin_type: inventory + author: + - Stefan Heitmüller (@morph027) + short_description: Ansible dynamic inventory plugin for Docker swarm nodes. + requirements: + - python >= 2.7 + - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 + extends_documentation_fragment: + - constructed + description: + - Reads inventories from the Docker swarm API. + - Uses a YAML configuration file docker_swarm.[yml|yaml]. + - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes; + I(managers) - all manager nodes; I(leader) - the swarm leader node; + I(nonleaders) - all nodes except the swarm leader." + options: + plugin: + description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to + recognize it as it's own. + type: str + required: true + choices: docker_swarm + docker_host: + description: + - Socket of a Docker swarm manager node (C(tcp), C(unix)). + - "Use C(unix://var/run/docker.sock) to connect via local socket." + type: str + required: true + aliases: [ docker_url ] + verbose_output: + description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS), + C(EngineVersion)) + type: bool + default: yes + tls: + description: Connect using TLS without verifying the authenticity of the Docker host server. + type: bool + default: no + validate_certs: + description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker + host server. + type: bool + default: no + aliases: [ tls_verify ] + client_key: + description: Path to the client's TLS key file. + type: path + aliases: [ tls_client_key, key_path ] + ca_cert: + description: Use a CA certificate when performing server verification by providing the path to a CA + certificate file. + type: path + aliases: [ tls_ca_cert, cacert_path ] + client_cert: + description: Path to the client's TLS certificate file. + type: path + aliases: [ tls_client_cert, cert_path ] + tls_hostname: + description: When verifying the authenticity of the Docker host server, provide the expected name of + the server. + type: str + ssl_version: + description: Provide a valid SSL version number. Default value determined by ssl.py module. + type: str + api_version: + description: + - The version of the Docker API running on the Docker Host. + - Defaults to the latest version of the API supported by docker-py. + type: str + aliases: [ docker_api_version ] + timeout: + description: + - The maximum amount of time in seconds to wait on a response from the API. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) + will be used instead. If the environment variable is not set, the default value will be used. + type: int + default: 60 + aliases: [ time_out ] + include_host_uri: + description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the + swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional + modification as value of option I(docker_host) in Docker Swarm modules when connecting via API. + The port always defaults to C(2376). + type: bool + default: no + include_host_uri_port: + description: Override the detected port number included in I(ansible_host_uri) + type: int +''' + +EXAMPLES = ''' +# Minimal example using local docker +plugin: docker_swarm +docker_host: unix://var/run/docker.sock + +# Minimal example using remote docker +plugin: docker_swarm +docker_host: tcp://my-docker-host:2375 + +# Example using remote docker with unverified TLS +plugin: docker_swarm +docker_host: tcp://my-docker-host:2376 +tls: yes + +# Example using remote docker with verified TLS and client certificate verification +plugin: docker_swarm +docker_host: tcp://my-docker-host:2376 +validate_certs: yes +ca_cert: /somewhere/ca.pem +client_key: /somewhere/key.pem +client_cert: /somewhere/cert.pem + +# Example using constructed features to create groups and set ansible_host +plugin: docker_swarm +docker_host: tcp://my-docker-host:2375 +strict: False +keyed_groups: + # add e.g. x86_64 hosts to an arch_x86_64 group + - prefix: arch + key: 'Description.Platform.Architecture' + # add e.g. linux hosts to an os_linux group + - prefix: os + key: 'Description.Platform.OS' + # create a group per node label + # e.g. a node labeled w/ "production" ends up in group "label_production" + # hint: labels containing special characters will be converted to safe names + - key: 'Spec.Labels' + prefix: label +''' + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.parsing.utils.addresses import parse_address + +try: + import docker + HAS_DOCKER = True +except ImportError: + HAS_DOCKER = False + + +class InventoryModule(BaseInventoryPlugin, Constructable): + ''' Host inventory parser for ansible using Docker swarm as source. ''' + + NAME = 'community.general.docker_swarm' + + def _fail(self, msg): + raise AnsibleError(msg) + + def _populate(self): + raw_params = dict( + docker_host=self.get_option('docker_host'), + tls=self.get_option('tls'), + tls_verify=self.get_option('validate_certs'), + key_path=self.get_option('client_key'), + cacert_path=self.get_option('ca_cert'), + cert_path=self.get_option('client_cert'), + tls_hostname=self.get_option('tls_hostname'), + api_version=self.get_option('api_version'), + timeout=self.get_option('timeout'), + ssl_version=self.get_option('ssl_version'), + debug=None, + ) + update_tls_hostname(raw_params) + connect_params = get_connect_params(raw_params, fail_function=self._fail) + self.client = docker.DockerClient(**connect_params) + self.inventory.add_group('all') + self.inventory.add_group('manager') + self.inventory.add_group('worker') + self.inventory.add_group('leader') + self.inventory.add_group('nonleaders') + + if self.get_option('include_host_uri'): + if self.get_option('include_host_uri_port'): + host_uri_port = str(self.get_option('include_host_uri_port')) + elif self.get_option('tls') or self.get_option('validate_certs'): + host_uri_port = '2376' + else: + host_uri_port = '2375' + + try: + self.nodes = self.client.nodes.list() + for self.node in self.nodes: + self.node_attrs = self.client.nodes.get(self.node.id).attrs + self.inventory.add_host(self.node_attrs['ID']) + self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role']) + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', + self.node_attrs['Status']['Addr']) + if self.get_option('include_host_uri'): + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', + 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port) + if self.get_option('verbose_output'): + self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs) + if 'ManagerStatus' in self.node_attrs: + if self.node_attrs['ManagerStatus'].get('Leader'): + # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 + # Check moby/moby#35437 for details + swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \ + self.node_attrs['Status']['Addr'] + if self.get_option('include_host_uri'): + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', + 'tcp://' + swarm_leader_ip + ':' + host_uri_port) + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip) + self.inventory.add_host(self.node_attrs['ID'], group='leader') + else: + self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + else: + self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + # Use constructed if applicable + strict = self.get_option('strict') + # Composed variables + self._set_composite_vars(self.get_option('compose'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + except Exception as e: + raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' % + to_native(e)) + + def verify_file(self, path): + """Return the possibly of a file being consumable by this plugin.""" + return ( + super(InventoryModule, self).verify_file(path) and + path.endswith((self.NAME + '.yaml', self.NAME + '.yml'))) + + def parse(self, inventory, loader, path, cache=True): + if not HAS_DOCKER: + raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: ' + 'https://github.com/docker/docker-py.') + super(InventoryModule, self).parse(inventory, loader, path, cache) + self._read_config_data(path) + self._populate() diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py new file mode 100644 index 0000000000..ed8bb167ab --- /dev/null +++ b/plugins/inventory/gitlab_runners.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Stefan Heitmueller +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: gitlab_runners + plugin_type: inventory + authors: + - Stefan Heitmüller (stefan.heitmueller@gmx.com) + short_description: Ansible dynamic inventory plugin for GitLab runners. + requirements: + - python >= 2.7 + - python-gitlab > 1.8.0 + extends_documentation_fragment: + - constructed + description: + - Reads inventories from the GitLab API. + - Uses a YAML configuration file gitlab_runners.[yml|yaml]. + options: + plugin: + description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own. + type: str + required: true + choices: + - gitlab_runners + server_url: + description: The URL of the GitLab server, with protocol (i.e. http or https). + type: str + required: true + default: https://gitlab.com + api_token: + description: GitLab token for logging in. + type: str + aliases: + - private_token + - access_token + filter: + description: filter runners from GitLab API + type: str + choices: ['active', 'paused', 'online', 'specific', 'shared'] + verbose_output: + description: Toggle to (not) include all available nodes metadata + type: bool + default: yes +''' + +EXAMPLES = ''' +# gitlab_runners.yml +plugin: gitlab_runners +host: https://gitlab.com + +# Example using constructed features to create groups and set ansible_host +plugin: gitlab_runners +host: https://gitlab.com +strict: False +keyed_groups: + # add e.g. amd64 hosts to an arch_amd64 group + - prefix: arch + key: 'architecture' + # add e.g. linux hosts to an os_linux group + - prefix: os + key: 'platform' + # create a group per runner tag + # e.g. a runner tagged w/ "production" ends up in group "label_production" + # hint: labels containing special characters will be converted to safe names + - key: 'tag_list' + prefix: tag +''' + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils._text import to_native +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable + +try: + import gitlab + HAS_GITLAB = True +except ImportError: + HAS_GITLAB = False + + +class InventoryModule(BaseInventoryPlugin, Constructable): + ''' Host inventory parser for ansible using GitLab API as source. ''' + + NAME = 'community.general.gitlab_runners' + + def _populate(self): + gl = gitlab.Gitlab(self.get_option('server_url'), private_token=self.get_option('api_token')) + self.inventory.add_group('gitlab_runners') + try: + if self.get_option('filter'): + runners = gl.runners.all(scope=self.get_option('filter')) + else: + runners = gl.runners.all() + for runner in runners: + host = str(runner['id']) + ip_address = runner['ip_address'] + host_attrs = vars(gl.runners.get(runner['id']))['_attrs'] + self.inventory.add_host(host, group='gitlab_runners') + self.inventory.set_variable(host, 'ansible_host', ip_address) + if self.get_option('verbose_output', True): + self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs) + + # Use constructed if applicable + strict = self.get_option('strict') + # Composed variables + self._set_composite_vars(self.get_option('compose'), host_attrs, host, strict=strict) + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), host_attrs, host, strict=strict) + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict) + except Exception as e: + raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e)) + + def verify_file(self, path): + """Return the possibly of a file being consumable by this plugin.""" + return ( + super(InventoryModule, self).verify_file(path) and + path.endswith((self.NAME + ".yaml", self.NAME + ".yml"))) + + def parse(self, inventory, loader, path, cache=True): + if not HAS_GITLAB: + raise AnsibleError('The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/') + super(InventoryModule, self).parse(inventory, loader, path, cache) + self._read_config_data(path) + self._populate() diff --git a/plugins/inventory/kubevirt.py b/plugins/inventory/kubevirt.py new file mode 100644 index 0000000000..800c223547 --- /dev/null +++ b/plugins/inventory/kubevirt.py @@ -0,0 +1,255 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: kubevirt + plugin_type: inventory + author: + - KubeVirt Team (@kubevirt) + + short_description: KubeVirt inventory source + extends_documentation_fragment: + - inventory_cache + - constructed + description: + - Fetch running VirtualMachines for one or more namespaces. + - Groups by namespace, namespace_vms and labels. + - Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values. + + options: + plugin: + description: token that ensures this is a source file for the 'kubevirt' plugin. + required: True + choices: ['kubevirt'] + type: str + host_format: + description: + - Specify the format of the host in the inventory group. + default: "{namespace}-{name}-{uid}" + connections: + type: list + description: + - Optional list of cluster connection settings. If no connections are provided, the default + I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces + the active user is authorized to access. + suboptions: + name: + description: + - Optional name to assign to the cluster. If not provided, a name is constructed from the server + and port. + type: str + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the OpenShift client will attempt to load the default + configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG + environment variable. + type: str + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment + variable. + type: str + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + type: str + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment + variable. + type: str + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME + environment variable. + type: str + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD + environment variable. + type: str + cert_file: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE + environment variable. + type: str + key_file: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST + environment variable. + type: str + ssl_ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. Can also be specified via + K8S_AUTH_SSL_CA_CERT environment variable. + type: str + verify_ssl: + description: + - "Whether or not to verify the API server's SSL certificates. Can also be specified via + K8S_AUTH_VERIFY_SSL environment variable." + type: bool + namespaces: + description: + - List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized + to access. + type: list + network_name: + description: + - In case of multiple network attached to virtual machine, define which interface should be returned as primary IP + address. + type: str + api_version: + description: + - "Specify the KubeVirt API version." + type: str + annotation_variable: + description: + - "Specify the name of the annotation which provides data, which should be used as inventory host variables." + - "Note, that the value in ansible annotations should be json." + type: str + default: 'ansible' + requirements: + - "openshift >= 0.6" + - "PyYAML >= 3.11" +''' + +EXAMPLES = ''' +# File must be named kubevirt.yaml or kubevirt.yml + +# Authenticate with token, and return all virtual machines for all namespaces +plugin: kubevirt +connections: + - host: https://kubevirt.io + token: xxxxxxxxxxxxxxxx + ssl_verify: false + +# Use default config (~/.kube/config) file and active context, and return vms with interfaces +# connected to network myovsnetwork and from namespace vms +plugin: kubevirt +connections: + - namespaces: + - vms + network_name: myovsnetwork +''' + +import json + +from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc + +try: + from openshift.dynamic.exceptions import DynamicApiError +except ImportError: + pass + + +API_VERSION = 'kubevirt.io/v1alpha3' + + +class InventoryModule(K8sInventoryModule): + NAME = 'community.general.kubevirt' + + def setup(self, config_data, cache, cache_key): + self.config_data = config_data + super(InventoryModule, self).setup(config_data, cache, cache_key) + + def fetch_objects(self, connections): + client = self.get_api_client() + vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}') + + if connections: + for connection in connections: + client = self.get_api_client(**connection) + name = connection.get('name', self.get_default_host_name(client.configuration.host)) + if connection.get('namespaces'): + namespaces = connection['namespaces'] + else: + namespaces = self.get_available_namespaces(client) + interface_name = connection.get('network_name') + api_version = connection.get('api_version', API_VERSION) + annotation_variable = connection.get('annotation_variable', 'ansible') + for namespace in namespaces: + self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable) + else: + name = self.get_default_host_name(client.configuration.host) + namespaces = self.get_available_namespaces(client) + for namespace in namespaces: + self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable) + + def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None): + v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance') + try: + obj = v1_vm.get(namespace=namespace) + except DynamicApiError as exc: + self.display.debug(exc) + raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc)) + + namespace_group = 'namespace_{0}'.format(namespace) + namespace_vms_group = '{0}_vms'.format(namespace_group) + + name = self._sanitize_group_name(name) + namespace_group = self._sanitize_group_name(namespace_group) + namespace_vms_group = self._sanitize_group_name(namespace_vms_group) + self.inventory.add_group(name) + self.inventory.add_group(namespace_group) + self.inventory.add_child(name, namespace_group) + self.inventory.add_group(namespace_vms_group) + self.inventory.add_child(namespace_group, namespace_vms_group) + for vm in obj.items: + if not (vm.status and vm.status.interfaces): + continue + + # Find interface by its name: + if interface_name is None: + interface = vm.status.interfaces[0] + else: + interface = next( + (i for i in vm.status.interfaces if i.name == interface_name), + None + ) + + # If interface is not found or IP address is not reported skip this VM: + if interface is None or interface.ipAddress is None: + continue + + vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid) + vm_ip = interface.ipAddress + vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations) + + self.inventory.add_host(vm_name) + + if vm.metadata.labels: + # create a group for each label_value + for key, value in vm.metadata.labels: + group_name = 'label_{0}_{1}'.format(key, value) + group_name = self._sanitize_group_name(group_name) + self.inventory.add_group(group_name) + self.inventory.add_child(group_name, vm_name) + vm_labels = dict(vm.metadata.labels) + else: + vm_labels = {} + + self.inventory.add_child(namespace_vms_group, vm_name) + + # add hostvars + self.inventory.set_variable(vm_name, 'ansible_host', vm_ip) + self.inventory.set_variable(vm_name, 'labels', vm_labels) + self.inventory.set_variable(vm_name, 'annotations', vm_annotations) + self.inventory.set_variable(vm_name, 'object_type', 'vm') + self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion) + self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid) + + # Add all variables which are listed in 'ansible' annotation: + annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}")) + for k, v in annotations_data.items(): + self.inventory.set_variable(vm_name, k, v) + + def verify_file(self, path): + if super(InventoryModule, self).verify_file(path): + if path.endswith(('kubevirt.yml', 'kubevirt.yaml')): + return True + return False diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py new file mode 100644 index 0000000000..00428aa23d --- /dev/null +++ b/plugins/inventory/linode.py @@ -0,0 +1,207 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' + name: linode + plugin_type: inventory + author: + - Luke Murphy (@decentral1se) + short_description: Ansible dynamic inventory plugin for Linode. + requirements: + - python >= 2.7 + - linode_api4 >= 2.0.0 + description: + - Reads inventories from the Linode API v4. + - Uses a YAML configuration file that ends with linode.(yml|yaml). + - Linode labels are used by default as the hostnames. + - The inventory groups are built from groups and not tags. + options: + plugin: + description: marks this as an instance of the 'linode' plugin + required: true + choices: ['linode'] + access_token: + description: The Linode account personal access token. + required: true + env: + - name: LINODE_ACCESS_TOKEN + regions: + description: Populate inventory with instances in this region. + default: [] + type: list + required: false + types: + description: Populate inventory with instances with this type. + default: [] + type: list + required: false +''' + +EXAMPLES = r''' +# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment. +plugin: linode + +# Example with regions, types, groups and access token +plugin: linode +access_token: foobar +regions: + - eu-west +types: + - g5-standard-2 +''' + +import os + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.six import string_types +from ansible.plugins.inventory import BaseInventoryPlugin + + +try: + from linode_api4 import LinodeClient + from linode_api4.errors import ApiError as LinodeApiError +except ImportError: + raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.') + + +class InventoryModule(BaseInventoryPlugin): + + NAME = 'community.general.linode' + + def _build_client(self): + """Build the Linode client.""" + + access_token = self.get_option('access_token') + + if access_token is None: + try: + access_token = os.environ['LINODE_ACCESS_TOKEN'] + except KeyError: + pass + + if access_token is None: + raise AnsibleError(( + 'Could not retrieve Linode access token ' + 'from plugin configuration or environment' + )) + + self.client = LinodeClient(access_token) + + def _get_instances_inventory(self): + """Retrieve Linode instance information from cloud inventory.""" + try: + self.instances = self.client.linode.instances() + except LinodeApiError as exception: + raise AnsibleError('Linode client raised: %s' % exception) + + def _add_groups(self): + """Add Linode instance groups to the dynamic inventory.""" + self.linode_groups = set( + filter(None, [ + instance.group + for instance + in self.instances + ]) + ) + + for linode_group in self.linode_groups: + self.inventory.add_group(linode_group) + + def _filter_by_config(self, regions, types): + """Filter instances by user specified configuration.""" + if regions: + self.instances = [ + instance for instance in self.instances + if instance.region.id in regions + ] + + if types: + self.instances = [ + instance for instance in self.instances + if instance.type.id in types + ] + + def _add_instances_to_groups(self): + """Add instance names to their dynamic inventory groups.""" + for instance in self.instances: + self.inventory.add_host(instance.label, group=instance.group) + + def _add_hostvars_for_instances(self): + """Add hostvars for instances in the dynamic inventory.""" + for instance in self.instances: + hostvars = instance._raw_json + for hostvar_key in hostvars: + self.inventory.set_variable( + instance.label, + hostvar_key, + hostvars[hostvar_key] + ) + + def _validate_option(self, name, desired_type, option_value): + """Validate user specified configuration data against types.""" + if isinstance(option_value, string_types) and desired_type == list: + option_value = [option_value] + + if option_value is None: + option_value = desired_type() + + if not isinstance(option_value, desired_type): + raise AnsibleParserError( + 'The option %s (%s) must be a %s' % ( + name, option_value, desired_type + ) + ) + + return option_value + + def _get_query_options(self, config_data): + """Get user specified query options from the configuration.""" + options = { + 'regions': { + 'type_to_be': list, + 'value': config_data.get('regions', []) + }, + 'types': { + 'type_to_be': list, + 'value': config_data.get('types', []) + }, + } + + for name in options: + options[name]['value'] = self._validate_option( + name, + options[name]['type_to_be'], + options[name]['value'] + ) + + regions = options['regions']['value'] + types = options['types']['value'] + + return regions, types + + def verify_file(self, path): + """Verify the Linode configuration file.""" + if super(InventoryModule, self).verify_file(path): + endings = ('linode.yaml', 'linode.yml') + if any((path.endswith(ending) for ending in endings)): + return True + return False + + def parse(self, inventory, loader, path, cache=True): + """Dynamically parse Linode the cloud inventory.""" + super(InventoryModule, self).parse(inventory, loader, path) + + self._build_client() + + self._get_instances_inventory() + + config_data = self._read_config_data(path) + regions, types = self._get_query_options(config_data) + self._filter_by_config(regions, types) + + self._add_groups() + self._add_instances_to_groups() + self._add_hostvars_for_instances() diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py new file mode 100644 index 0000000000..2bf2da4d82 --- /dev/null +++ b/plugins/inventory/nmap.py @@ -0,0 +1,167 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: nmap + plugin_type: inventory + short_description: Uses nmap to find hosts to target + description: + - Uses a YAML configuration file with a valid YAML extension. + extends_documentation_fragment: + - constructed + - inventory_cache + requirements: + - nmap CLI installed + options: + plugin: + description: token that ensures this is a source file for the 'nmap' plugin. + required: True + choices: ['nmap'] + address: + description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. + required: True + exclude: + description: list of addresses to exclude + type: list + ports: + description: Enable/disable scanning for open ports + type: boolean + default: True + ipv4: + description: use IPv4 type addresses + type: boolean + default: True + ipv6: + description: use IPv6 type addresses + type: boolean + default: True + notes: + - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False. + - 'TODO: add OS fingerprinting' +''' +EXAMPLES = ''' + # inventory.config file in YAML format + plugin: nmap + strict: False + address: 192.168.0.0/24 +''' + +import os +import re + +from subprocess import Popen, PIPE + +from ansible import constants as C +from ansible.errors import AnsibleParserError +from ansible.module_utils._text import to_native, to_text +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.module_utils.common.process import get_bin_path + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + + NAME = 'community.general.nmap' + find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?') + find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)') + + def __init__(self): + self._nmap = None + super(InventoryModule, self).__init__() + + def verify_file(self, path): + + valid = False + if super(InventoryModule, self).verify_file(path): + file_name, ext = os.path.splitext(path) + + if not ext or ext in C.YAML_FILENAME_EXTENSIONS: + valid = True + + return valid + + def parse(self, inventory, loader, path, cache=False): + + try: + self._nmap = get_bin_path('nmap') + except ValueError as e: + raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e))) + + super(InventoryModule, self).parse(inventory, loader, path, cache=cache) + + self._read_config_data(path) + + # setup command + cmd = [self._nmap] + if not self._options['ports']: + cmd.append('-sP') + + if self._options['ipv4'] and not self._options['ipv6']: + cmd.append('-4') + elif self._options['ipv6'] and not self._options['ipv4']: + cmd.append('-6') + elif not self._options['ipv6'] and not self._options['ipv4']: + raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') + + if self._options['exclude']: + cmd.append('--exclude') + cmd.append(','.join(self._options['exclude'])) + + cmd.append(self._options['address']) + try: + # execute + p = Popen(cmd, stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) + + # parse results + host = None + ip = None + ports = [] + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) + + for line in t_stdout.splitlines(): + hits = self.find_host.match(line) + if hits: + if host is not None: + self.inventory.set_variable(host, 'ports', ports) + + # if dns only shows arpa, just use ip instead as hostname + if hits.group(1).endswith('.in-addr.arpa'): + host = hits.group(2) + else: + host = hits.group(1) + + # if no reverse dns exists, just use ip instead as hostname + if hits.group(2) is not None: + ip = hits.group(2) + else: + ip = hits.group(1) + + if host is not None: + # update inventory + self.inventory.add_host(host) + self.inventory.set_variable(host, 'ip', ip) + ports = [] + continue + + host_ports = self.find_port.match(line) + if host is not None and host_ports: + ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)}) + continue + + # TODO: parse more data, OS? + + # if any leftovers + if host and ports: + self.inventory.set_variable(host, 'ports', ports) + + except Exception as e: + raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py new file mode 100644 index 0000000000..7b0c505212 --- /dev/null +++ b/plugins/inventory/online.py @@ -0,0 +1,262 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + + +__metaclass__ = type + +DOCUMENTATION = ''' + name: online + plugin_type: inventory + author: + - Remy Leone (@sieben) + short_description: Online inventory source + description: + - Get inventory hosts from Online + options: + plugin: + description: token that ensures this is a source file for the 'online' plugin. + required: True + choices: ['online'] + oauth_token: + required: True + description: Online OAuth token. + env: + # in order of precedence + - name: ONLINE_TOKEN + - name: ONLINE_API_KEY + - name: ONLINE_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - hostname + groups: + description: List of groups. + type: list + choices: + - location + - offer + - rpn +''' + +EXAMPLES = ''' +# online_inventory.yml file in YAML format +# Example command line: ansible-inventory --list -i online_inventory.yml + +plugin: online +hostnames: + - public_ipv4 +groups: + - location + - offer + - rpn +''' + +import json +from sys import version as python_version + +from ansible.errors import AnsibleError +from ansible.module_utils.urls import open_url +from ansible.plugins.inventory import BaseInventoryPlugin +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.ansible_release import __version__ as ansible_version +from ansible.module_utils.six.moves.urllib.parse import urljoin + + +class InventoryModule(BaseInventoryPlugin): + NAME = 'community.general.online' + API_ENDPOINT = "https://api.online.net" + + def extract_public_ipv4(self, host_infos): + try: + return host_infos["network"]["ip"][0] + except (KeyError, TypeError, IndexError): + self.display.warning("An error happened while extracting public IPv4 address. Information skipped.") + return None + + def extract_private_ipv4(self, host_infos): + try: + return host_infos["network"]["private"][0] + except (KeyError, TypeError, IndexError): + self.display.warning("An error happened while extracting private IPv4 address. Information skipped.") + return None + + def extract_os_name(self, host_infos): + try: + return host_infos["os"]["name"] + except (KeyError, TypeError): + self.display.warning("An error happened while extracting OS name. Information skipped.") + return None + + def extract_os_version(self, host_infos): + try: + return host_infos["os"]["version"] + except (KeyError, TypeError): + self.display.warning("An error happened while extracting OS version. Information skipped.") + return None + + def extract_hostname(self, host_infos): + try: + return host_infos["hostname"] + except (KeyError, TypeError): + self.display.warning("An error happened while extracting hostname. Information skipped.") + return None + + def extract_location(self, host_infos): + try: + return host_infos["location"]["datacenter"] + except (KeyError, TypeError): + self.display.warning("An error happened while extracting datacenter location. Information skipped.") + return None + + def extract_offer(self, host_infos): + try: + return host_infos["offer"] + except (KeyError, TypeError): + self.display.warning("An error happened while extracting commercial offer. Information skipped.") + return None + + def extract_rpn(self, host_infos): + try: + return self.rpn_lookup_cache[host_infos["id"]] + except (KeyError, TypeError): + self.display.warning("An error happened while extracting RPN information. Information skipped.") + return None + + def _fetch_information(self, url): + try: + response = open_url(url, headers=self.headers) + except Exception as e: + self.display.warning("An error happened while fetching: %s" % url) + return None + + try: + raw_data = to_text(response.read(), errors='surrogate_or_strict') + except UnicodeError: + raise AnsibleError("Incorrect encoding of fetched payload from Online servers") + + try: + return json.loads(raw_data) + except ValueError: + raise AnsibleError("Incorrect JSON payload") + + @staticmethod + def extract_rpn_lookup_cache(rpn_list): + lookup = {} + for rpn in rpn_list: + for member in rpn["members"]: + lookup[member["id"]] = rpn["name"] + return lookup + + def _fill_host_variables(self, hostname, host_infos): + targeted_attributes = ( + "offer", + "id", + "hostname", + "location", + "boot_mode", + "power", + "last_reboot", + "anti_ddos", + "hardware_watch", + "support" + ) + for attribute in targeted_attributes: + self.inventory.set_variable(hostname, attribute, host_infos[attribute]) + + if self.extract_public_ipv4(host_infos=host_infos): + self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos)) + + if self.extract_private_ipv4(host_infos=host_infos): + self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos)) + + if self.extract_os_name(host_infos=host_infos): + self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos)) + + if self.extract_os_version(host_infos=host_infos): + self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos)) + + def _filter_host(self, host_infos, hostname_preferences): + + for pref in hostname_preferences: + if self.extractors[pref](host_infos): + return self.extractors[pref](host_infos) + + return None + + def do_server_inventory(self, host_infos, hostname_preferences, group_preferences): + + hostname = self._filter_host(host_infos=host_infos, + hostname_preferences=hostname_preferences) + + # No suitable hostname were found in the attributes and the host won't be in the inventory + if not hostname: + return + + self.inventory.add_host(host=hostname) + self._fill_host_variables(hostname=hostname, host_infos=host_infos) + + for g in group_preferences: + group = self.group_extractors[g](host_infos) + + if not group: + return + + self.inventory.add_group(group=group) + self.inventory.add_host(group=group, host=hostname) + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + self._read_config_data(path=path) + + token = self.get_option("oauth_token") + hostname_preferences = self.get_option("hostnames") + + group_preferences = self.get_option("groups") + if group_preferences is None: + group_preferences = [] + + self.extractors = { + "public_ipv4": self.extract_public_ipv4, + "private_ipv4": self.extract_private_ipv4, + "hostname": self.extract_hostname, + } + + self.group_extractors = { + "location": self.extract_location, + "offer": self.extract_offer, + "rpn": self.extract_rpn + } + + self.headers = { + 'Authorization': "Bearer %s" % token, + 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]), + 'Content-type': 'application/json' + } + + servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server") + servers_api_path = self._fetch_information(url=servers_url) + + if "rpn" in group_preferences: + rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group") + rpn_list = self._fetch_information(url=rpn_groups_url) + self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list) + + for server_api_path in servers_api_path: + + server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path) + raw_server_info = self._fetch_information(url=server_url) + + if raw_server_info is None: + continue + + self.do_server_inventory(host_infos=raw_server_info, + hostname_preferences=hostname_preferences, + group_preferences=group_preferences) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py new file mode 100644 index 0000000000..c6c9ae0e04 --- /dev/null +++ b/plugins/inventory/scaleway.py @@ -0,0 +1,279 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: scaleway + plugin_type: inventory + author: + - Remy Leone (@sieben) + short_description: Scaleway inventory source + description: + - Get inventory hosts from Scaleway + options: + plugin: + description: token that ensures this is a source file for the 'scaleway' plugin. + required: True + choices: ['scaleway'] + regions: + description: Filter results on a specific Scaleway region + type: list + default: + - ams1 + - par1 + tags: + description: Filter results on a specific tag + type: list + oauth_token: + required: True + description: Scaleway OAuth token. + env: + # in order of precedence + - name: SCW_TOKEN + - name: SCW_API_KEY + - name: SCW_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - public_ipv6 + - hostname + - id + variables: + description: 'set individual variables: keys are variable names and + values are templates. Any value returned by the + L(Scaleway API, https://developer.scaleway.com/#servers-server-get) + can be used.' + type: dict +''' + +EXAMPLES = ''' +# scaleway_inventory.yml file in YAML format +# Example command line: ansible-inventory --list -i scaleway_inventory.yml + +# use hostname as inventory_hostname +# use the private IP address to connect to the host +plugin: scaleway +regions: + - ams1 + - par1 +tags: + - foobar +hostnames: + - hostname +variables: + ansible_host: private_ip + state: state + +# use hostname as inventory_hostname and public IP address to connect to the host +plugin: scaleway +hostnames: + - hostname +regions: + - par1 +variables: + ansible_host: public_ip.address +''' + +import json + +from ansible.errors import AnsibleError +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link +from ansible.module_utils.urls import open_url +from ansible.module_utils._text import to_native + +import ansible.module_utils.six.moves.urllib.parse as urllib_parse + + +def _fetch_information(token, url): + results = [] + paginated_url = url + while True: + try: + response = open_url(paginated_url, + headers={'X-Auth-Token': token, + 'Content-type': 'application/json'}) + except Exception as e: + raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) + try: + raw_json = json.loads(response.read()) + except ValueError: + raise AnsibleError("Incorrect JSON payload") + + try: + results.extend(raw_json["servers"]) + except KeyError: + raise AnsibleError("Incorrect format from the Scaleway API response") + + link = response.headers['Link'] + if not link: + return results + relations = parse_pagination_link(link) + if 'next' not in relations: + return results + paginated_url = urllib_parse.urljoin(paginated_url, relations['next']) + + +def _build_server_url(api_endpoint): + return "/".join([api_endpoint, "servers"]) + + +def extract_public_ipv4(server_info): + try: + return server_info["public_ip"]["address"] + except (KeyError, TypeError): + return None + + +def extract_private_ipv4(server_info): + try: + return server_info["private_ip"] + except (KeyError, TypeError): + return None + + +def extract_hostname(server_info): + try: + return server_info["hostname"] + except (KeyError, TypeError): + return None + + +def extract_server_id(server_info): + try: + return server_info["id"] + except (KeyError, TypeError): + return None + + +def extract_public_ipv6(server_info): + try: + return server_info["ipv6"]["address"] + except (KeyError, TypeError): + return None + + +def extract_tags(server_info): + try: + return server_info["tags"] + except (KeyError, TypeError): + return None + + +def extract_zone(server_info): + try: + return server_info["location"]["zone_id"] + except (KeyError, TypeError): + return None + + +extractors = { + "public_ipv4": extract_public_ipv4, + "private_ipv4": extract_private_ipv4, + "public_ipv6": extract_public_ipv6, + "hostname": extract_hostname, + "id": extract_server_id +} + + +class InventoryModule(BaseInventoryPlugin, Constructable): + NAME = 'community.general.scaleway' + + def _fill_host_variables(self, host, server_info): + targeted_attributes = ( + "arch", + "commercial_type", + "id", + "organization", + "state", + "hostname", + ) + for attribute in targeted_attributes: + self.inventory.set_variable(host, attribute, server_info[attribute]) + + self.inventory.set_variable(host, "tags", server_info["tags"]) + + if extract_public_ipv6(server_info=server_info): + self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info)) + + if extract_public_ipv4(server_info=server_info): + self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info)) + + if extract_private_ipv4(server_info=server_info): + self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info)) + + def _get_zones(self, config_zones): + return set(SCALEWAY_LOCATION.keys()).intersection(config_zones) + + def match_groups(self, server_info, tags): + server_zone = extract_zone(server_info=server_info) + server_tags = extract_tags(server_info=server_info) + + # If a server does not have a zone, it means it is archived + if server_zone is None: + return set() + + # If no filtering is defined, all tags are valid groups + if tags is None: + return set(server_tags).union((server_zone,)) + + matching_tags = set(server_tags).intersection(tags) + + if not matching_tags: + return set() + else: + return matching_tags.union((server_zone,)) + + def _filter_host(self, host_infos, hostname_preferences): + + for pref in hostname_preferences: + if extractors[pref](host_infos): + return extractors[pref](host_infos) + + return None + + def do_zone_inventory(self, zone, token, tags, hostname_preferences): + self.inventory.add_group(zone) + zone_info = SCALEWAY_LOCATION[zone] + + url = _build_server_url(zone_info["api_endpoint"]) + raw_zone_hosts_infos = _fetch_information(url=url, token=token) + + for host_infos in raw_zone_hosts_infos: + + hostname = self._filter_host(host_infos=host_infos, + hostname_preferences=hostname_preferences) + + # No suitable hostname were found in the attributes and the host won't be in the inventory + if not hostname: + continue + + groups = self.match_groups(host_infos, tags) + + for group in groups: + self.inventory.add_group(group=group) + self.inventory.add_host(group=group, host=hostname) + self._fill_host_variables(host=hostname, server_info=host_infos) + + # Composed variables + self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False) + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + self._read_config_data(path=path) + + config_zones = self.get_option("regions") + tags = self.get_option("tags") + token = self.get_option("oauth_token") + hostname_preference = self.get_option("hostnames") + + for zone in self._get_zones(config_zones): + self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference) diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py new file mode 100644 index 0000000000..14192b0f9c --- /dev/null +++ b/plugins/inventory/virtualbox.py @@ -0,0 +1,282 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: virtualbox + plugin_type: inventory + short_description: virtualbox inventory source + description: + - Get inventory hosts from the local virtualbox installation. + - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). + - The inventory_hostname is always the 'Name' of the virtualbox instance. + extends_documentation_fragment: + - constructed + - inventory_cache + options: + plugin: + description: token that ensures this is a source file for the 'virtualbox' plugin + required: True + choices: ['virtualbox'] + running_only: + description: toggles showing all vms vs only those currently running + type: boolean + default: False + settings_password_file: + description: provide a file containing the settings password (equivalent to --settingspwfile) + network_info_path: + description: property path to query for network information (ansible_host) + default: "/VirtualBox/GuestInfo/Net/0/V4/IP" + query: + description: create vars from virtualbox properties + type: dictionary + default: {} +''' + +EXAMPLES = ''' +# file must be named vbox.yaml or vbox.yml +simple_config_file: + plugin: virtualbox + settings_password_file: /etc/virtulbox/secrets + query: + logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList + compose: + ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') + +# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory' +plugin: virtualbox +groups: + container: "'minis' in (inventory_hostname)" +''' + +import os + +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleParserError +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.module_utils.common.process import get_bin_path + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using local virtualbox. ''' + + NAME = 'community.general.virtualbox' + VBOX = "VBoxManage" + + def __init__(self): + self._vbox_path = None + super(InventoryModule, self).__init__() + + def _query_vbox_data(self, host, property_path): + ret = None + try: + cmd = [self._vbox_path, b'guestproperty', b'get', + to_bytes(host, errors='surrogate_or_strict'), + to_bytes(property_path, errors='surrogate_or_strict')] + x = Popen(cmd, stdout=PIPE) + ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict') + if 'Value' in ipinfo: + a, ip = ipinfo.split(':', 1) + ret = ip.strip() + except Exception: + pass + return ret + + def _set_variables(self, hostvars): + + # set vars in inventory from hostvars + for host in hostvars: + + query = self.get_option('query') + # create vars from vbox properties + if query and isinstance(query, MutableMapping): + for varname in query: + hostvars[host][varname] = self._query_vbox_data(host, query[varname]) + + strict = self.get_option('strict') + + # create composite vars + self._set_composite_vars(self.get_option('compose'), hostvars[host], host, strict=strict) + + # actually update inventory + for key in hostvars[host]: + self.inventory.set_variable(host, key, hostvars[host][key]) + + # constructed groups based on conditionals + self._add_host_to_composed_groups(self.get_option('groups'), hostvars[host], host, strict=strict) + + # constructed keyed_groups + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict) + + def _populate_from_cache(self, source_data): + hostvars = source_data.pop('_meta', {}).get('hostvars', {}) + for group in source_data: + if group == 'all': + continue + else: + group = self.inventory.add_group(group) + hosts = source_data[group].get('hosts', []) + for host in hosts: + self._populate_host_vars([host], hostvars.get(host, {}), group) + self.inventory.add_child('all', group) + if not source_data: + for host in hostvars: + self.inventory.add_host(host) + self._populate_host_vars([host], hostvars.get(host, {})) + + def _populate_from_source(self, source_data, using_current_cache=False): + if using_current_cache: + self._populate_from_cache(source_data) + return source_data + + cacheable_results = {'_meta': {'hostvars': {}}} + + hostvars = {} + prevkey = pref_k = '' + current_host = None + + # needed to possibly set ansible_host + netinfo = self.get_option('network_info_path') + + for line in source_data: + line = to_text(line) + if ':' not in line: + continue + try: + k, v = line.split(':', 1) + except Exception: + # skip non splitable + continue + + if k.strip() == '': + # skip empty + continue + + v = v.strip() + # found host + if k.startswith('Name') and ',' not in v: # some setting strings appear in Name + current_host = v + if current_host not in hostvars: + hostvars[current_host] = {} + self.inventory.add_host(current_host) + + # try to get network info + netdata = self._query_vbox_data(current_host, netinfo) + if netdata: + self.inventory.set_variable(current_host, 'ansible_host', netdata) + + # found groups + elif k == 'Groups': + for group in v.split('/'): + if group: + group = self.inventory.add_group(group) + self.inventory.add_child(group, current_host) + if group not in cacheable_results: + cacheable_results[group] = {'hosts': []} + cacheable_results[group]['hosts'].append(current_host) + continue + + else: + # found vars, accumulate in hostvars for clean inventory set + pref_k = 'vbox_' + k.strip().replace(' ', '_') + if k.startswith(' '): + if prevkey not in hostvars[current_host]: + hostvars[current_host][prevkey] = {} + hostvars[current_host][prevkey][pref_k] = v + else: + if v != '': + hostvars[current_host][pref_k] = v + if self._ungrouped_host(current_host, cacheable_results): + if 'ungrouped' not in cacheable_results: + cacheable_results['ungrouped'] = {'hosts': []} + cacheable_results['ungrouped']['hosts'].append(current_host) + + prevkey = pref_k + + self._set_variables(hostvars) + for host in hostvars: + h = self.inventory.get_host(host) + cacheable_results['_meta']['hostvars'][h.name] = h.vars + + return cacheable_results + + def _ungrouped_host(self, host, inventory): + def find_host(host, inventory): + for k, v in inventory.items(): + if k == '_meta': + continue + if isinstance(v, dict): + yield self._ungrouped_host(host, v) + elif isinstance(v, list): + yield host not in v + yield True + + return all([found_host for found_host in find_host(host, inventory)]) + + def verify_file(self, path): + + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')): + valid = True + return valid + + def parse(self, inventory, loader, path, cache=True): + + try: + self._vbox_path = get_bin_path(self.VBOX) + except ValueError as e: + raise AnsibleParserError(e) + + super(InventoryModule, self).parse(inventory, loader, path) + + cache_key = self.get_cache_key(path) + + config_data = self._read_config_data(path) + + # set _options from config data + self._consume_options(config_data) + + source_data = None + if cache: + cache = self.get_option('cache') + + update_cache = False + if cache: + try: + source_data = self._cache[cache_key] + except KeyError: + update_cache = True + + if not source_data: + b_pwfile = to_bytes(self.get_option('settings_password_file'), errors='surrogate_or_strict', nonstring='passthru') + running = self.get_option('running_only') + + # start getting data + cmd = [self._vbox_path, b'list', b'-l'] + if running: + cmd.append(b'runningvms') + else: + cmd.append(b'vms') + + if b_pwfile and os.path.exists(b_pwfile): + cmd.append(b'--settingspwfile') + cmd.append(b_pwfile) + + try: + p = Popen(cmd, stdout=PIPE) + except Exception as e: + raise AnsibleParserError(to_native(e)) + + source_data = p.stdout.read().splitlines() + + using_current_cache = cache and not update_cache + cacheable_results = self._populate_from_source(source_data, using_current_cache) + + if update_cache: + self._cache[cache_key] = cacheable_results diff --git a/plugins/inventory/vultr.py b/plugins/inventory/vultr.py new file mode 100644 index 0000000000..ff56ffa818 --- /dev/null +++ b/plugins/inventory/vultr.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Yanis Guenane +# Copyright (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' + name: vultr + plugin_type: inventory + author: + - Yanis Guenane (@Spredzy) + - René Moser (@resmo) + short_description: Vultr inventory source + extends_documentation_fragment: + - constructed + description: + - Get inventory hosts from Vultr public cloud. + - Uses an YAML configuration file ending with either I(vultr.yml) or I(vultr.yaml) to set parameter values (also see examples). + - Uses I(api_config), I(~/.vultr.ini), I(./vultr.ini) or C(VULTR_API_CONFIG) pointing to a Vultr credentials INI file + (see U(https://docs.ansible.com/ansible/latest/scenario_guides/guide_vultr.html)). + options: + plugin: + description: Token that ensures this is a source file for the 'vultr' plugin. + type: string + required: True + choices: [ vultr ] + api_account: + description: Specify the account to be used. + type: string + default: default + api_config: + description: Path to the vultr configuration file. If not specified will be taken from regular Vultr configuration. + type: path + env: + - name: VULTR_API_CONFIG + api_key: + description: Vultr API key. If not specified will be taken from regular Vultr configuration. + type: string + env: + - name: VULTR_API_KEY + hostname: + description: Field to match the hostname. Note v4_main_ip corresponds to the main_ip field returned from the API and name to label. + type: string + default: v4_main_ip + choices: + - v4_main_ip + - v6_main_ip + - name + filter_by_tag: + description: Only return servers filtered by this tag + type: string +''' + +EXAMPLES = r''' +# inventory_vultr.yml file in YAML format +# Example command line: ansible-inventory --list -i inventory_vultr.yml + +# Group by a region as lower case and with prefix e.g. "vultr_region_amsterdam" and by OS without prefix e.g. "CentOS_7_x64" +plugin: vultr +keyed_groups: + - prefix: vultr_region + key: region | lower + - separator: "" + key: os + +# Pass a tag filter to the API +plugin: vultr +filter_by_tag: Cache +''' + +import json + +from ansible.errors import AnsibleError +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.module_utils.six.moves import configparser +from ansible.module_utils.urls import open_url +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.vultr import Vultr, VULTR_API_ENDPOINT, VULTR_USER_AGENT +from ansible.module_utils.six.moves.urllib.parse import quote + + +SCHEMA = { + 'SUBID': dict(key='id'), + 'label': dict(key='name'), + 'date_created': dict(), + 'allowed_bandwidth_gb': dict(convert_to='int'), + 'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'), + 'current_bandwidth_gb': dict(), + 'kvm_url': dict(), + 'default_password': dict(), + 'internal_ip': dict(), + 'disk': dict(), + 'cost_per_month': dict(convert_to='float'), + 'location': dict(key='region'), + 'main_ip': dict(key='v4_main_ip'), + 'network_v4': dict(key='v4_network'), + 'gateway_v4': dict(key='v4_gateway'), + 'os': dict(), + 'pending_charges': dict(convert_to='float'), + 'power_status': dict(), + 'ram': dict(), + 'plan': dict(), + 'server_state': dict(), + 'status': dict(), + 'firewall_group': dict(), + 'tag': dict(), + 'v6_main_ip': dict(), + 'v6_network': dict(), + 'v6_network_size': dict(), + 'v6_networks': dict(), + 'vcpu_count': dict(convert_to='int'), +} + + +def _load_conf(path, account): + + if path: + conf = configparser.ConfigParser() + conf.read(path) + + if not conf._sections.get(account): + return None + + return dict(conf.items(account)) + else: + return Vultr.read_ini_config(account) + + +def _retrieve_servers(api_key, tag_filter=None): + api_url = '%s/v1/server/list' % VULTR_API_ENDPOINT + if tag_filter is not None: + api_url = api_url + '?tag=%s' % quote(tag_filter) + + try: + response = open_url( + api_url, headers={'API-Key': api_key, 'Content-type': 'application/json'}, + http_agent=VULTR_USER_AGENT, + ) + servers_list = json.loads(response.read()) + + return servers_list.values() if servers_list else [] + except ValueError: + raise AnsibleError("Incorrect JSON payload") + except Exception as e: + raise AnsibleError("Error while fetching %s: %s" % (api_url, to_native(e))) + + +class InventoryModule(BaseInventoryPlugin, Constructable): + + NAME = 'community.general.vultr' + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('vultr.yaml', 'vultr.yml')): + valid = True + return valid + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + self._read_config_data(path=path) + + conf = _load_conf(self.get_option('api_config'), self.get_option('api_account')) + try: + api_key = self.get_option('api_key') or conf.get('key') + except Exception: + raise AnsibleError('Could not find an API key. Check inventory file and Vultr configuration files.') + + hostname_preference = self.get_option('hostname') + + # Add a top group 'vultr' + self.inventory.add_group(group='vultr') + + # Filter by tag is supported by the api with a query + filter_by_tag = self.get_option('filter_by_tag') + for server in _retrieve_servers(api_key, filter_by_tag): + + server = Vultr.normalize_result(server, SCHEMA) + + self.inventory.add_host(host=server['name'], group='vultr') + + for attribute, value in server.items(): + self.inventory.set_variable(server['name'], attribute, value) + + if hostname_preference != 'name': + self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference]) + + # Use constructed if applicable + strict = self.get_option('strict') + + # Composed variables + self._set_composite_vars(self.get_option('compose'), server, server['name'], strict=strict) + + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), server, server['name'], strict=strict) + + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, server['name'], strict=strict) diff --git a/plugins/lookup/__init__.py b/plugins/lookup/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/lookup/avi.py b/plugins/lookup/avi.py new file mode 100644 index 0000000000..dfc8cc4f21 --- /dev/null +++ b/plugins/lookup/avi.py @@ -0,0 +1,127 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +lookup: avi +author: Sandeep Bandi +short_description: Look up ``Avi`` objects. +description: + - Given an object_type, fetch all the objects of that type or fetch + the specific object that matches the name/uuid given via options. + - For single object lookup. If you want the output to be a list, you may + want to pass option wantlist=True to the plugin. + +options: + obj_type: + description: + - type of object to query + required: True + obj_name: + description: + - name of the object to query + obj_uuid: + description: + - UUID of the object to query +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +# Lookup query for all the objects of a specific type. +- debug: msg="{{ lookup('avi', avi_credentials=avi_credentials, obj_type='virtualservice') }}" +# Lookup query for an object with the given name and type. +- debug: msg="{{ lookup('avi', avi_credentials=avi_credentials, obj_name='vs1', obj_type='virtualservice', wantlist=True) }}" +# Lookup query for an object with the given UUID and type. +- debug: msg="{{ lookup('avi', obj_uuid='virtualservice-5c0e183a-690a-45d8-8d6f-88c30a52550d', obj_type='virtualservice') }}" +# We can replace lookup with query function to always the get the output as list. +# This is helpful for looping. +- debug: msg="{{ query('avi', obj_uuid='virtualservice-5c0e183a-690a-45d8-8d6f-88c30a52550d', obj_type='virtualservice') }}" +""" + +RETURN = """ + _raw: + description: + - One ore more objects returned from ``Avi`` API. + type: list + elements: dictionary +""" + +from ansible.module_utils._text import to_native +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display +from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import (ApiSession, + AviCredentials, + AviServerError, + ObjectNotFound, + APIError) + +display = Display() + + +def _api(avi_session, path, **kwargs): + ''' + Generic function to handle both // and / + API resource endpoints. + ''' + rsp = [] + try: + rsp_data = avi_session.get(path, **kwargs).json() + if 'results' in rsp_data: + rsp = rsp_data['results'] + else: + rsp.append(rsp_data) + except ObjectNotFound as e: + display.warning('Resource not found. Please check obj_name/' + 'obj_uuid/obj_type are spelled correctly.') + display.v(to_native(e)) + except (AviServerError, APIError) as e: + raise AnsibleError(to_native(e)) + except Exception as e: + # Generic excption handling for connection failures + raise AnsibleError('Unable to communicate with controller' + 'due to error: %s' % to_native(e)) + + return rsp + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, avi_credentials=None, **kwargs): + + api_creds = AviCredentials(**avi_credentials) + # Create the session using avi_credentials + try: + avi = ApiSession(avi_credentials=api_creds) + except Exception as e: + raise AnsibleError(to_native(e)) + + # Return an empty list if the object is not found + rsp = [] + try: + path = kwargs.pop('obj_type') + except KeyError: + raise AnsibleError("Please pass the obj_type for lookup") + + if kwargs.get('obj_name', None): + name = kwargs.pop('obj_name') + try: + display.v("Fetching obj: %s of type: %s" % (name, path)) + rsp_data = avi.get_object_by_name(path, name, **kwargs) + if rsp_data: + # Append the return data only if it is not None. i.e object + # with specified name is present + rsp.append(rsp_data) + except AviServerError as e: + raise AnsibleError(to_native(e)) + elif kwargs.get('obj_uuid', None): + obj_uuid = kwargs.pop('obj_uuid') + obj_path = "%s/%s" % (path, obj_uuid) + display.v("Fetching obj: %s of type: %s" % (obj_uuid, path)) + rsp = _api(avi, obj_path, **kwargs) + else: + display.v("Fetching all objects of type: %s" % path) + rsp = _api(avi, path, **kwargs) + + return rsp diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py new file mode 100644 index 0000000000..19b08793bc --- /dev/null +++ b/plugins/lookup/cartesian.py @@ -0,0 +1,73 @@ +# (c) 2013, Bradley Young +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: cartesian + short_description: returns the cartesian product of lists + description: + - Takes the input lists and returns a list that represents the product of the input lists. + - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. + You can see the exact syntax in the examples section. + options: + _raw: + description: + - a set of lists + required: True +''' + +EXAMPLES = """ +- name: Example of the change in the description + debug: msg="{{ [1,2,3]|lookup('cartesian', [a, b])}}" + +- name: loops over the cartesian product of the supplied lists + debug: msg="{{item}}" + with_cartesian: + - "{{list1}}" + - "{{list2}}" + - [1,2,3,4,5,6] +""" + +RETURN = """ + _list: + description: + - list of lists composed of elements of the input lists + type: lists +""" + +from itertools import product + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.utils.listify import listify_lookup_plugin_terms + + +class LookupModule(LookupBase): + """ + Create the cartesian product of lists + """ + + def _lookup_variables(self, terms): + """ + Turn this: + terms == ["1,2,3", "a,b"] + into this: + terms == [[1,2,3], [a, b]] + """ + results = [] + for x in terms: + intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader) + results.append(intermediate) + return results + + def run(self, terms, variables=None, **kwargs): + + terms = self._lookup_variables(terms) + + my_list = terms[:] + if len(my_list) == 0: + raise AnsibleError("with_cartesian requires at least one element in each list") + + return [self._flatten(x) for x in product(*my_list)] diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py new file mode 100644 index 0000000000..2d831731ef --- /dev/null +++ b/plugins/lookup/chef_databag.py @@ -0,0 +1,101 @@ +# (c) 2016, Josh Bradley +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: chef_databag + short_description: fetches data from a Chef Databag + description: + - "This is a lookup plugin to provide access to chef data bags using the pychef package. + It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from, + starting from either the given base path or the current working directory. + The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration + file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb" + requirements: + - "pychef (python library https://pychef.readthedocs.io `pip install pychef`)" + options: + name: + description: + - Name of the databag + required: True + item: + description: + - Item to fetch + required: True +''' + +EXAMPLES = """ + - debug: + msg: "{{ lookup('chef_databag', 'name=data_bag_name item=data_bag_item') }}" +""" + +RETURN = """ + _raw: + description: + - The value from the databag +""" + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.parsing.splitter import parse_kv + +try: + import chef + HAS_CHEF = True +except ImportError as missing_module: + HAS_CHEF = False + + +class LookupModule(LookupBase): + """ + Chef data bag lookup module + """ + def __init__(self, loader=None, templar=None, **kwargs): + + super(LookupModule, self).__init__(loader, templar, **kwargs) + + # setup vars for data bag name and data bag item + self.name = None + self.item = None + + def parse_kv_args(self, args): + """ + parse key-value style arguments + """ + + for arg in ["name", "item"]: + try: + arg_raw = args.pop(arg, None) + if arg_raw is None: + continue + parsed = str(arg_raw) + setattr(self, arg, parsed) + except ValueError: + raise AnsibleError( + "can't parse arg {0}={1} as string".format(arg, arg_raw) + ) + if args: + raise AnsibleError( + "unrecognized arguments to with_sequence: %r" % args.keys() + ) + + def run(self, terms, variables=None, **kwargs): + # Ensure pychef has been loaded + if not HAS_CHEF: + raise AnsibleError('PyChef needed for lookup plugin, try `pip install pychef`') + + for term in terms: + self.parse_kv_args(parse_kv(term)) + + api_object = chef.autoconfigure() + + if not isinstance(api_object, chef.api.ChefAPI): + raise AnsibleError('Unable to connect to Chef Server API.') + + data_bag_object = chef.DataBag(self.name) + + data_bag_item = data_bag_object[self.item] + + return [dict(data_bag_item)] diff --git a/plugins/lookup/conjur_variable.py b/plugins/lookup/conjur_variable.py new file mode 100644 index 0000000000..2e7cc1509e --- /dev/null +++ b/plugins/lookup/conjur_variable.py @@ -0,0 +1,163 @@ +# (c) 2018, Jason Vanderhoof , Oren Ben Meir +# (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + lookup: conjur_variable + short_description: Fetch credentials from CyberArk Conjur. + description: + - "Retrieves credentials from Conjur using the controlling host's Conjur identity. Conjur info: U(https://www.conjur.org/)." + requirements: + - 'The controlling host running Ansible has a Conjur identity. + (More: U(https://docs.conjur.org/Latest/en/Content/Get%20Started/key_concepts/machine_identity.html))' + options: + _term: + description: Variable path + required: True + identity_file: + description: Path to the Conjur identity file. The identity file follows the netrc file format convention. + type: path + default: /etc/conjur.identity + required: False + ini: + - section: conjur, + key: identity_file_path + env: + - name: CONJUR_IDENTITY_FILE + config_file: + description: Path to the Conjur configuration file. The configuration file is a YAML file. + type: path + default: /etc/conjur.conf + required: False + ini: + - section: conjur, + key: config_file_path + env: + - name: CONJUR_CONFIG_FILE +''' + +EXAMPLES = """ + - debug: + msg: "{{ lookup('conjur_variable', '/path/to/secret') }}" +""" + +RETURN = """ + _raw: + description: + - Value stored in Conjur. +""" + +import os.path +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from base64 import b64encode +from netrc import netrc +from os import environ +from time import time +from ansible.module_utils.six.moves.urllib.parse import quote_plus +import yaml + +from ansible.module_utils.urls import open_url +from ansible.utils.display import Display + +display = Display() + + +# Load configuration and return as dictionary if file is present on file system +def _load_conf_from_file(conf_path): + display.vvv('conf file: {0}'.format(conf_path)) + + if not os.path.exists(conf_path): + raise AnsibleError('Conjur configuration file `{0}` was not found on the controlling host' + .format(conf_path)) + + display.vvvv('Loading configuration from: {0}'.format(conf_path)) + with open(conf_path) as f: + config = yaml.safe_load(f.read()) + if 'account' not in config or 'appliance_url' not in config: + raise AnsibleError('{0} on the controlling host must contain an `account` and `appliance_url` entry' + .format(conf_path)) + return config + + +# Load identity and return as dictionary if file is present on file system +def _load_identity_from_file(identity_path, appliance_url): + display.vvvv('identity file: {0}'.format(identity_path)) + + if not os.path.exists(identity_path): + raise AnsibleError('Conjur identity file `{0}` was not found on the controlling host' + .format(identity_path)) + + display.vvvv('Loading identity from: {0} for {1}'.format(identity_path, appliance_url)) + + conjur_authn_url = '{0}/authn'.format(appliance_url) + identity = netrc(identity_path) + + if identity.authenticators(conjur_authn_url) is None: + raise AnsibleError('The netrc file on the controlling host does not contain an entry for: {0}' + .format(conjur_authn_url)) + + id, account, api_key = identity.authenticators(conjur_authn_url) + if not id or not api_key: + raise AnsibleError('{0} on the controlling host must contain a `login` and `password` entry for {1}' + .format(identity_path, appliance_url)) + + return {'id': id, 'api_key': api_key} + + +# Use credentials to retrieve temporary authorization token +def _fetch_conjur_token(conjur_url, account, username, api_key): + conjur_url = '{0}/authn/{1}/{2}/authenticate'.format(conjur_url, account, username) + display.vvvv('Authentication request to Conjur at: {0}, with user: {1}'.format(conjur_url, username)) + + response = open_url(conjur_url, data=api_key, method='POST') + code = response.getcode() + if code != 200: + raise AnsibleError('Failed to authenticate as \'{0}\' (got {1} response)' + .format(username, code)) + + return response.read() + + +# Retrieve Conjur variable using the temporary token +def _fetch_conjur_variable(conjur_variable, token, conjur_url, account): + token = b64encode(token) + headers = {'Authorization': 'Token token="{0}"'.format(token)} + display.vvvv('Header: {0}'.format(headers)) + + url = '{0}/secrets/{1}/variable/{2}'.format(conjur_url, account, quote_plus(conjur_variable)) + display.vvvv('Conjur Variable URL: {0}'.format(url)) + + response = open_url(url, headers=headers, method='GET') + + if response.getcode() == 200: + display.vvvv('Conjur variable {0} was successfully retrieved'.format(conjur_variable)) + return [response.read()] + if response.getcode() == 401: + raise AnsibleError('Conjur request has invalid authorization credentials') + if response.getcode() == 403: + raise AnsibleError('The controlling host\'s Conjur identity does not have authorization to retrieve {0}' + .format(conjur_variable)) + if response.getcode() == 404: + raise AnsibleError('The variable {0} does not exist'.format(conjur_variable)) + + return {} + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + conf_file = self.get_option('config_file') + conf = _load_conf_from_file(conf_file) + + identity_file = self.get_option('identity_file') + identity = _load_identity_from_file(identity_file, conf['appliance_url']) + + token = _fetch_conjur_token(conf['appliance_url'], conf['account'], identity['id'], identity['api_key']) + return _fetch_conjur_variable(terms[0], token, conf['appliance_url'], conf['account']) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py new file mode 100644 index 0000000000..ec103193af --- /dev/null +++ b/plugins/lookup/consul_kv.py @@ -0,0 +1,181 @@ +# (c) 2015, Steve Gargan +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + lookup: consul_kv + short_description: Fetch metadata from a Consul key value store. + description: + - Lookup metadata for a playbook from the key value store in a Consul cluster. + Values can be easily set in the kv store with simple rest commands + - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata) + requirements: + - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)' + options: + _raw: + description: List of key(s) to retrieve. + type: list + required: True + recurse: + type: boolean + description: If true, will retrieve all the values that have the given key as prefix. + default: False + index: + description: + - If the key has a value with the specified index then this is returned allowing access to historical values. + datacenter: + description: + - Retrieve the key from a consul datatacenter other than the default for the consul host. + token: + description: The acl token to allow access to restricted values. + host: + default: localhost + description: + - The target to connect to, must be a resolvable address. + Will be determined from C(ANSIBLE_CONSUL_URL) if that is set. + - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)" + env: + - name: ANSIBLE_CONSUL_URL + ini: + - section: lookup_consul + key: host + port: + description: + - The port of the target host to connect to. + - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. + default: 8500 + scheme: + default: http + description: + - Whether to use http or https. + - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. + validate_certs: + default: True + description: Whether to verify the ssl connection or not. + env: + - name: ANSIBLE_CONSUL_VALIDATE_CERTS + ini: + - section: lookup_consul + key: validate_certs + client_cert: + description: The client cert to verify the ssl connection. + env: + - name: ANSIBLE_CONSUL_CLIENT_CERT + ini: + - section: lookup_consul + key: client_cert +''' + +EXAMPLES = """ + - debug: + msg: 'key contains {{item}}' + with_consul_kv: + - 'key/to/retrieve' + + - name: Parameters can be provided after the key be more specific about what to retrieve + debug: + msg: 'key contains {{item}}' + with_consul_kv: + - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' + + - name: retrieving a KV from a remote cluster on non default port + debug: + msg: "{{ lookup('consul_kv', 'my/key', host='10.10.10.10', port='2000') }}" +""" + +RETURN = """ + _raw: + description: + - Value(s) stored in consul. +""" + +import os +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.errors import AnsibleError, AnsibleAssertionError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_text + +try: + import consul + + HAS_CONSUL = True +except ImportError as e: + HAS_CONSUL = False + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + + if not HAS_CONSUL: + raise AnsibleError( + 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation') + + values = [] + try: + for term in terms: + params = self.parse_params(term) + try: + url = os.environ['ANSIBLE_CONSUL_URL'] + validate_certs = os.environ['ANSIBLE_CONSUL_VALIDATE_CERTS'] or True + client_cert = os.environ['ANSIBLE_CONSUL_CLIENT_CERT'] or None + u = urlparse(url) + consul_api = consul.Consul(host=u.hostname, port=u.port, scheme=u.scheme, verify=validate_certs, + cert=client_cert) + except KeyError: + port = kwargs.get('port', '8500') + host = kwargs.get('host', 'localhost') + scheme = kwargs.get('scheme', 'http') + validate_certs = kwargs.get('validate_certs', True) + client_cert = kwargs.get('client_cert', None) + consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs, + cert=client_cert) + + results = consul_api.kv.get(params['key'], + token=params['token'], + index=params['index'], + recurse=params['recurse'], + dc=params['datacenter']) + if results[1]: + # responds with a single or list of result maps + if isinstance(results[1], list): + for r in results[1]: + values.append(to_text(r['Value'])) + else: + values.append(to_text(results[1]['Value'])) + except Exception as e: + raise AnsibleError( + "Error locating '%s' in kv store. Error was %s" % (term, e)) + + return values + + def parse_params(self, term): + params = term.split(' ') + + paramvals = { + 'key': params[0], + 'token': None, + 'recurse': False, + 'index': None, + 'datacenter': None + } + + # parameters specified? + try: + for param in params[1:]: + if param and len(param) > 0: + name, value = param.split('=') + if name not in paramvals: + raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name) + paramvals[name] = value + except (ValueError, AssertionError) as e: + raise AnsibleError(e) + + return paramvals diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py new file mode 100644 index 0000000000..b712247e10 --- /dev/null +++ b/plugins/lookup/credstash.py @@ -0,0 +1,118 @@ +# (c) 2015, Ensighten +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: credstash + short_description: retrieve secrets from Credstash on AWS + requirements: + - credstash (python library) + description: + - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash" + options: + _terms: + description: term or list of terms to lookup in the credit store + type: list + required: True + table: + description: name of the credstash table to query + default: 'credential-store' + required: True + version: + description: Credstash version + region: + description: AWS region + profile_name: + description: AWS profile to use for authentication + env: + - name: AWS_PROFILE + aws_access_key_id: + description: AWS access key ID + env: + - name: AWS_ACCESS_KEY_ID + aws_secret_access_key: + description: AWS access key + env: + - name: AWS_SECRET_ACCESS_KEY + aws_session_token: + description: AWS session token + env: + - name: AWS_SESSION_TOKEN +''' + +EXAMPLES = """ +- name: first use credstash to store your secrets + shell: credstash put my-github-password secure123 + +- name: "Test credstash lookup plugin -- get my github password" + debug: msg="Credstash lookup! {{ lookup('credstash', 'my-github-password') }}" + +- name: "Test credstash lookup plugin -- get my other password from us-west-1" + debug: msg="Credstash lookup! {{ lookup('credstash', 'my-other-password', region='us-west-1') }}" + +- name: "Test credstash lookup plugin -- get the company's github password" + debug: msg="Credstash lookup! {{ lookup('credstash', 'company-github-password', table='company-passwords') }}" + +- name: Example play using the 'context' feature + hosts: localhost + vars: + context: + app: my_app + environment: production + tasks: + + - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" + debug: msg="{{ lookup('credstash', 'some-password', context=context) }}" + + - name: "Test credstash lookup plugin -- get the password with a context defined here" + debug: msg="{{ lookup('credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" +""" + +RETURN = """ + _raw: + description: + - value(s) stored in Credstash +""" + +import os + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + +CREDSTASH_INSTALLED = False + +try: + import credstash + CREDSTASH_INSTALLED = True +except ImportError: + CREDSTASH_INSTALLED = False + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + + if not CREDSTASH_INSTALLED: + raise AnsibleError('The credstash lookup plugin requires credstash to be installed.') + + ret = [] + for term in terms: + try: + version = kwargs.pop('version', '') + region = kwargs.pop('region', None) + table = kwargs.pop('table', 'credential-store') + profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None)) + aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None)) + aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None)) + aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None)) + kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token} + val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass) + except credstash.ItemNotFound: + raise AnsibleError('Key {0} not found'.format(term)) + except Exception as e: + raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + ret.append(val) + + return ret diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py new file mode 100644 index 0000000000..8da79850f7 --- /dev/null +++ b/plugins/lookup/cyberarkpassword.py @@ -0,0 +1,179 @@ +# (c) 2017, Edward Nunez +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: cyberarkpassword + short_description: get secrets from CyberArk AIM + requirements: + - CyberArk AIM tool installed + description: + - Get secrets from CyberArk AIM. + options : + _command: + description: Cyberark CLI utility. + env: + - name: AIM_CLIPASSWORDSDK_CMD + default: '/opt/CARKaim/sdk/clipasswordsdk' + appid: + description: Defines the unique ID of the application that is issuing the password request. + required: True + query: + description: Describes the filter criteria for the password retrieval. + required: True + output: + description: + - Specifies the desired output fields separated by commas. + - "They could be: Password, PassProps., PasswordChangeInProcess" + default: 'password' + _extra: + description: for extra_parms values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide" + note: + - For Ansible on windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe +''' + +EXAMPLES = """ + - name: passing options to the lookup + debug: msg={{ lookup("cyberarkpassword", cyquery)}} + vars: + cyquery: + appid: "app_ansible" + query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" + output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" + + + - name: used in a loop + debug: msg={{item}} + with_cyberarkpassword: + appid: 'app_ansible' + query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' + output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' +""" + +RETURN = """ + password: + description: + - The actual value stored + passprops: + description: properties assigned to the entry + type: dictionary + passwordchangeinprocess: + description: did the password change? +""" + +import os +import subprocess +from subprocess import PIPE +from subprocess import Popen + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.parsing.splitter import parse_kv +from ansible.module_utils._text import to_bytes, to_text, to_native +from ansible.utils.display import Display + +display = Display() + +CLIPASSWORDSDK_CMD = os.getenv('AIM_CLIPASSWORDSDK_CMD', '/opt/CARKaim/sdk/clipasswordsdk') + + +class CyberarkPassword: + + def __init__(self, appid=None, query=None, output=None, **kwargs): + + self.appid = appid + self.query = query + self.output = output + + # Support for Generic parameters to be able to specify + # FailRequestOnPasswordChange, Queryformat, Reason, etc. + self.extra_parms = [] + for key, value in kwargs.items(): + self.extra_parms.append('-p') + self.extra_parms.append("%s=%s" % (key, value)) + + if self.appid is None: + raise AnsibleError("CyberArk Error: No Application ID specified") + if self.query is None: + raise AnsibleError("CyberArk Error: No Vault query specified") + + if self.output is None: + # If no output is specified, return at least the password + self.output = "password" + else: + # To avoid reference issues/confusion to values, all + # output 'keys' will be in lowercase. + self.output = self.output.lower() + + self.b_delimiter = b"@#@" # Known delimiter to split output results + + def get(self): + + result_dict = {} + + try: + all_parms = [ + CLIPASSWORDSDK_CMD, + 'GetPassword', + '-p', 'AppDescs.AppID=%s' % self.appid, + '-p', 'Query=%s' % self.query, + '-o', self.output, + '-d', self.b_delimiter] + all_parms.extend(self.extra_parms) + + b_credential = b"" + b_all_params = [to_bytes(v) for v in all_parms] + tmp_output, tmp_error = Popen(b_all_params, stdout=PIPE, stderr=PIPE, stdin=PIPE).communicate() + + if tmp_output: + b_credential = to_bytes(tmp_output) + + if tmp_error: + raise AnsibleError("ERROR => %s " % (tmp_error)) + + if b_credential and b_credential.endswith(b'\n'): + b_credential = b_credential[:-1] + + output_names = self.output.split(",") + output_values = b_credential.split(self.b_delimiter) + + for i in range(len(output_names)): + if output_names[i].startswith("passprops."): + if "passprops" not in result_dict: + result_dict["passprops"] = {} + output_prop_name = output_names[i][10:] + result_dict["passprops"][output_prop_name] = to_native(output_values[i]) + else: + result_dict[output_names[i]] = to_native(output_values[i]) + + except subprocess.CalledProcessError as e: + raise AnsibleError(e.output) + except OSError as e: + raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror)) + + return [result_dict] + + +class LookupModule(LookupBase): + + """ + USAGE: + + """ + + def run(self, terms, variables=None, **kwargs): + + display.vvvv("%s" % terms) + if isinstance(terms, list): + return_values = [] + for term in terms: + display.vvvv("Term: %s" % term) + cyberark_conn = CyberarkPassword(**term) + return_values.append(cyberark_conn.get()) + return return_values + else: + cyberark_conn = CyberarkPassword(**terms) + result = cyberark_conn.get() + return result diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py new file mode 100644 index 0000000000..29c2563903 --- /dev/null +++ b/plugins/lookup/dig.py @@ -0,0 +1,301 @@ +# (c) 2015, Jan-Piet Mens +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: dig + author: Jan-Piet Mens (@jpmens) + short_description: query DNS using the dnspython library + requirements: + - dnspython (python library, http://www.dnspython.org/) + description: + - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name). + It is possible to lookup any DNS record in this manner. + - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. + It is also possible to explicitly specify the DNS server(s) to use for lookups. + - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN + - In addition to (default) A record, it is also possible to specify a different record type that should be queried. + This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried. + - If multiple values are associated with the requested record, the results will be returned as a comma-separated list. + In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list + over which you can iterate later on. + - By default, the lookup will rely on system-wide configured DNS servers for performing the query. + It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. + This needs to be passed-in as an additional parameter to the lookup + options: + _terms: + description: domain(s) to query + qtype: + description: record type to query + default: 'A' + choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT] + flat: + description: If 0 each record is returned as a dictionary, otherwise a string + default: 1 + notes: + - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. + - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. + - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. + Syntax for specifying the record type is shown in the examples below. + - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. +''' + +EXAMPLES = """ +- name: Simple A record (IPV4 address) lookup for example.com + debug: msg="{{ lookup('dig', 'example.com.')}}" + +- name: "The TXT record for example.org." + debug: msg="{{ lookup('dig', 'example.org.', 'qtype=TXT') }}" + +- name: "The TXT record for example.org, alternative syntax." + debug: msg="{{ lookup('dig', 'example.org./TXT') }}" + +- name: use in a loop + debug: msg="MX record for gmail.com {{ item }}" + with_items: "{{ lookup('dig', 'gmail.com./MX', wantlist=True) }}" + +- debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '192.0.2.5/PTR') }}" +- debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '5.2.0.192.in-addr.arpa./PTR') }}" +- debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}" +- debug: msg="Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}" + +- debug: msg="XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" + with_items: "{{ lookup('dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" +""" + +RETURN = """ + _list: + description: + - list of composed strings or dictonaries with key and value + If a dictionary, fields shows the keys returned depending on query type + fields: + ALL: owner, ttl, type + A: address + AAAA: address + CNAME: target + DNAME: target + DLV: algorithm, digest_type, key_tag, digest + DNSKEY: flags, algorithm, protocol, key + DS: algorithm, digest_type, key_tag, digest + HINFO: cpu, os + LOC: latitude, longitude, altitude, size, horizontal_precision, vertical_precision + MX: preference, exchange + NAPTR: order, preference, flags, service, regexp, replacement + NS: target + NSEC3PARAM: algorithm, flags, iterations, salt + PTR: target + RP: mbox, txt + SOA: mname, rname, serial, refresh, retry, expire, minimum + SPF: strings + SRV: priority, weight, port, target + SSHFP: algorithm, fp_type, fingerprint + TLSA: usage, selector, mtype, cert + TXT: strings +""" + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native +import socket + +try: + import dns.exception + import dns.name + import dns.resolver + import dns.reversename + import dns.rdataclass + from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC, + MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT) + HAVE_DNS = True +except ImportError: + HAVE_DNS = False + + +def make_rdata_dict(rdata): + ''' While the 'dig' lookup plugin supports anything which dnspython supports + out of the box, the following supported_types list describes which + DNS query types we can convert to a dict. + + Note: adding support for RRSIG is hard work. :) + ''' + supported_types = { + A: ['address'], + AAAA: ['address'], + CNAME: ['target'], + DNAME: ['target'], + DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'], + DNSKEY: ['flags', 'algorithm', 'protocol', 'key'], + DS: ['algorithm', 'digest_type', 'key_tag', 'digest'], + HINFO: ['cpu', 'os'], + LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'], + MX: ['preference', 'exchange'], + NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'], + NS: ['target'], + NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'], + PTR: ['target'], + RP: ['mbox', 'txt'], + # RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'], + SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'], + SPF: ['strings'], + SRV: ['priority', 'weight', 'port', 'target'], + SSHFP: ['algorithm', 'fp_type', 'fingerprint'], + TLSA: ['usage', 'selector', 'mtype', 'cert'], + TXT: ['strings'], + } + + rd = {} + + if rdata.rdtype in supported_types: + fields = supported_types[rdata.rdtype] + for f in fields: + val = rdata.__getattribute__(f) + + if isinstance(val, dns.name.Name): + val = dns.name.Name.to_text(val) + + if rdata.rdtype == DLV and f == 'digest': + val = dns.rdata._hexify(rdata.digest).replace(' ', '') + if rdata.rdtype == DS and f == 'digest': + val = dns.rdata._hexify(rdata.digest).replace(' ', '') + if rdata.rdtype == DNSKEY and f == 'key': + val = dns.rdata._base64ify(rdata.key).replace(' ', '') + if rdata.rdtype == NSEC3PARAM and f == 'salt': + val = dns.rdata._hexify(rdata.salt).replace(' ', '') + if rdata.rdtype == SSHFP and f == 'fingerprint': + val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '') + if rdata.rdtype == TLSA and f == 'cert': + val = dns.rdata._hexify(rdata.cert).replace(' ', '') + + rd[f] = val + + return rd + + +# ============================================================== +# dig: Lookup DNS records +# +# -------------------------------------------------------------- + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + + ''' + terms contains a string with things to `dig' for. We support the + following formats: + example.com # A record + example.com qtype=A # same + example.com/TXT # specific qtype + example.com qtype=txt # same + 192.0.2.23/PTR # reverse PTR + ^^ shortcut for 23.2.0.192.in-addr.arpa/PTR + example.net/AAAA @nameserver # query specified server + ^^^ can be comma-sep list of names/addresses + + ... flat=0 # returns a dict; default is 1 == string + ''' + + if HAVE_DNS is False: + raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed") + + # Create Resolver object so that we can set NS if necessary + myres = dns.resolver.Resolver(configure=True) + edns_size = 4096 + myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) + + domain = None + qtype = 'A' + flat = True + rdclass = dns.rdataclass.from_text('IN') + + for t in terms: + if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. + nsset = t[1:].split(',') + for ns in nsset: + nameservers = [] + # Check if we have a valid IP address. If so, use that, otherwise + # try to resolve name to address using system's resolver. If that + # fails we bail out. + try: + socket.inet_aton(ns) + nameservers.append(ns) + except Exception: + try: + nsaddr = dns.resolver.query(ns)[0].address + nameservers.append(nsaddr) + except Exception as e: + raise AnsibleError("dns lookup NS: %s" % to_native(e)) + myres.nameservers = nameservers + continue + if '=' in t: + try: + opt, arg = t.split('=') + except Exception: + pass + + if opt == 'qtype': + qtype = arg.upper() + elif opt == 'flat': + flat = int(arg) + elif opt == 'class': + try: + rdclass = dns.rdataclass.from_text(arg) + except Exception as e: + raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + + continue + + if '/' in t: + try: + domain, qtype = t.split('/') + except Exception: + domain = t + else: + domain = t + + # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + + ret = [] + + if qtype.upper() == 'PTR': + try: + n = dns.reversename.from_address(domain) + domain = n.to_text() + except dns.exception.SyntaxError: + pass + except Exception as e: + raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e)) + + try: + answers = myres.query(domain, qtype, rdclass=rdclass) + for rdata in answers: + s = rdata.to_text() + if qtype.upper() == 'TXT': + s = s[1:-1] # Strip outside quotes on TXT rdata + + if flat: + ret.append(s) + else: + try: + rd = make_rdata_dict(rdata) + rd['owner'] = answers.canonical_name.to_text() + rd['type'] = dns.rdatatype.to_text(rdata.rdtype) + rd['ttl'] = answers.rrset.ttl + rd['class'] = dns.rdataclass.to_text(rdata.rdclass) + + ret.append(rd) + except Exception as e: + ret.append(str(e)) + + except dns.resolver.NXDOMAIN: + ret.append('NXDOMAIN') + except dns.resolver.NoAnswer: + ret.append("") + except dns.resolver.Timeout: + ret.append('') + except dns.exception.DNSException as e: + raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + + return ret diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py new file mode 100644 index 0000000000..10f97bfc0e --- /dev/null +++ b/plugins/lookup/dnstxt.py @@ -0,0 +1,93 @@ +# (c) 2012, Jan-Piet Mens +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: dnstxt + author: Jan-Piet Mens (@jpmens) + short_description: query a domain(s)'s DNS txt fields + requirements: + - dns/dns.resolver (python library) + description: + - Uses a python library to return the DNS TXT record for a domain. + options: + _terms: + description: domain or list of domains to query TXT records from + required: True + type: list +''' + +EXAMPLES = """ +- name: show txt entry + debug: msg="{{lookup('dnstxt', ['test.example.com'])}}" + +- name: iterate over txt entries + debug: msg="{{item}}" + with_dnstxt: + - 'test.example.com' + - 'other.example.com' + - 'last.example.com' + +- name: iterate of a comma delimited DNS TXT entry + debug: msg="{{item}}" + with_dnstxt: "{{lookup('dnstxt', ['test.example.com']).split(',')}}" +""" + +RETURN = """ + _list: + description: + - values returned by the DNS TXT record. + type: list +""" + +HAVE_DNS = False +try: + import dns.resolver + from dns.exception import DNSException + HAVE_DNS = True +except ImportError: + pass + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.plugins.lookup import LookupBase + +# ============================================================== +# DNSTXT: DNS TXT records +# +# key=domainname +# TODO: configurable resolver IPs +# -------------------------------------------------------------- + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + + if HAVE_DNS is False: + raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed") + + ret = [] + for term in terms: + domain = term.split()[0] + string = [] + try: + answers = dns.resolver.query(domain, 'TXT') + for rdata in answers: + s = rdata.to_text() + string.append(s[1:-1]) # Strip outside quotes on TXT rdata + + except dns.resolver.NXDOMAIN: + string = 'NXDOMAIN' + except dns.resolver.Timeout: + string = '' + except dns.resolver.NoAnswer: + string = '' + except DNSException as e: + raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + + ret.append(''.join(string)) + + return ret diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py new file mode 100644 index 0000000000..59f8df9916 --- /dev/null +++ b/plugins/lookup/etcd.py @@ -0,0 +1,177 @@ +# (c) 2013, Jan-Piet Mens +# (m) 2016, Mihai Moldovanu +# (m) 2017, Juan Manuel Parrilla +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: + - Jan-Piet Mens (@jpmens) + lookup: etcd + short_description: get info from an etcd server + description: + - Retrieves data from an etcd server + options: + _terms: + description: + - the list of keys to lookup on the etcd server + type: list + elements: string + required: True + url: + description: + - Environment variable with the url for the etcd server + default: 'http://127.0.0.1:4001' + env: + - name: ANSIBLE_ETCD_URL + version: + description: + - Environment variable with the etcd protocol version + default: 'v1' + env: + - name: ANSIBLE_ETCD_VERSION + validate_certs: + description: + - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. + default: True + type: boolean +''' + +EXAMPLES = ''' + - name: "a value from a locally running etcd" + debug: msg={{ lookup('etcd', 'foo/bar') }} + + - name: "values from multiple folders on a locally running etcd" + debug: msg={{ lookup('etcd', 'foo', 'bar', 'baz') }} + + - name: "since Ansible 2.5 you can set server options inline" + debug: msg="{{ lookup('etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}" +''' + +RETURN = ''' + _raw: + description: + - list of values associated with input keys + type: list + elements: strings +''' + +import json + +from ansible.plugins.lookup import LookupBase +from ansible.module_utils.urls import open_url + +# this can be made configurable, not should not use ansible.cfg +# +# Made module configurable from playbooks: +# If etcd v2 running on host 192.168.1.21 on port 2379 +# we can use the following in a playbook to retrieve /tfm/network/config key +# +# - debug: msg={{lookup('etcd','/tfm/network/config', url='http://192.168.1.21:2379' , version='v2')}} +# +# Example Output: +# +# TASK [debug] ******************************************************************* +# ok: [localhost] => { +# "msg": { +# "Backend": { +# "Type": "vxlan" +# }, +# "Network": "172.30.0.0/16", +# "SubnetLen": 24 +# } +# } +# +# +# +# + + +class Etcd: + def __init__(self, url, version, validate_certs): + self.url = url + self.version = version + self.baseurl = '%s/%s/keys' % (self.url, self.version) + self.validate_certs = validate_certs + + def _parse_node(self, node): + # This function will receive all etcd tree, + # if the level requested has any node, the recursion starts + # create a list in the dir variable and it is passed to the + # recursive function, and so on, if we get a variable, + # the function will create a key-value at this level and + # undoing the loop. + path = {} + if node.get('dir', False): + for n in node.get('nodes', []): + path[n['key'].split('/')[-1]] = self._parse_node(n) + + else: + path = node['value'] + + return path + + def get(self, key): + url = "%s/%s?recursive=true" % (self.baseurl, key) + data = None + value = {} + try: + r = open_url(url, validate_certs=self.validate_certs) + data = r.read() + except Exception: + return None + + try: + # I will not support Version 1 of etcd for folder parsing + item = json.loads(data) + if self.version == 'v1': + # When ETCD are working with just v1 + if 'value' in item: + value = item['value'] + else: + if 'node' in item: + # When a usual result from ETCD + value = self._parse_node(item['node']) + + if 'errorCode' in item: + # Here return an error when an unknown entry responds + value = "ENOENT" + except Exception: + raise + + return value + + +class LookupModule(LookupBase): + + def run(self, terms, variables, **kwargs): + + self.set_options(var_options=variables, direct=kwargs) + + validate_certs = self.get_option('validate_certs') + url = self.get_option('url') + version = self.get_option('version') + + etcd = Etcd(url=url, version=version, validate_certs=validate_certs) + + ret = [] + for term in terms: + key = term.split()[0] + value = etcd.get(key) + ret.append(value) + return ret diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py new file mode 100644 index 0000000000..4cbe4a5ee9 --- /dev/null +++ b/plugins/lookup/filetree.py @@ -0,0 +1,196 @@ +# (c) 2016 Dag Wieers +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +lookup: filetree +author: Dag Wieers (@dagwieers) +short_description: recursively match all files in a directory tree +description: +- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. +- Supports directories, files and symlinks, including SELinux and other file properties. +- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths. + This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role. +options: + _terms: + description: path(s) of files to read + required: True +''' + +EXAMPLES = """ +- name: Create directories + file: + path: /web/{{ item.path }} + state: directory + mode: '{{ item.mode }}' + with_filetree: web/ + when: item.state == 'directory' + +- name: Template files (explicitly skip directories in order to use the 'src' attribute) + template: + src: '{{ item.src }}' + dest: /web/{{ item.path }} + mode: '{{ item.mode }}' + with_filetree: web/ + when: item.state == 'file' + +- name: Recreate symlinks + file: + src: '{{ item.src }}' + dest: /web/{{ item.path }} + state: link + force: yes + mode: '{{ item.mode }}' + with_filetree: web/ + when: item.state == 'link' +""" + +RETURN = """ + _raw: + description: list of dictionaries with file information + contains: + src: + description: + - full path to file + - not returned when C(item.state) is set to C(directory) + root: + description: allows filtering by original location + path: + description: contains the relative path to root + mode: + description: TODO + state: + description: TODO + owner: + description: TODO + group: + description: TODO + seuser: + description: TODO + serole: + description: TODO + setype: + description: TODO + selevel: + description: TODO + uid: + description: TODO + gid: + description: TODO + size: + description: TODO + mtime: + description: TODO + ctime: + description: TODO +""" +import os +import pwd +import grp +import stat + +HAVE_SELINUX = False +try: + import selinux + HAVE_SELINUX = True +except ImportError: + pass + +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native, to_text +from ansible.utils.display import Display + +display = Display() + + +# If selinux fails to find a default, return an array of None +def selinux_context(path): + context = [None, None, None, None] + if HAVE_SELINUX and selinux.is_selinux_enabled(): + try: + # note: the selinux module uses byte strings on python2 and text + # strings on python3 + ret = selinux.lgetfilecon_raw(to_native(path)) + except OSError: + return context + if ret[0] != -1: + # Limit split to 4 because the selevel, the last in the list, + # may contain ':' characters + context = ret[1].split(':', 3) + return context + + +def file_props(root, path): + ''' Returns dictionary with file properties, or return None on failure ''' + abspath = os.path.join(root, path) + + try: + st = os.lstat(abspath) + except OSError as e: + display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e)) + return None + + ret = dict(root=root, path=path) + + if stat.S_ISLNK(st.st_mode): + ret['state'] = 'link' + ret['src'] = os.readlink(abspath) + elif stat.S_ISDIR(st.st_mode): + ret['state'] = 'directory' + elif stat.S_ISREG(st.st_mode): + ret['state'] = 'file' + ret['src'] = abspath + else: + display.warning('filetree: Error file type of %s is not supported' % abspath) + return None + + ret['uid'] = st.st_uid + ret['gid'] = st.st_gid + try: + ret['owner'] = pwd.getpwuid(st.st_uid).pw_name + except KeyError: + ret['owner'] = st.st_uid + try: + ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name) + except KeyError: + ret['group'] = st.st_gid + ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode)) + ret['size'] = st.st_size + ret['mtime'] = st.st_mtime + ret['ctime'] = st.st_ctime + + if HAVE_SELINUX and selinux.is_selinux_enabled() == 1: + context = selinux_context(abspath) + ret['seuser'] = context[0] + ret['serole'] = context[1] + ret['setype'] = context[2] + ret['selevel'] = context[3] + + return ret + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + basedir = self.get_basedir(variables) + + ret = [] + for term in terms: + term_file = os.path.basename(term) + dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term)) + path = os.path.join(dwimmed_path, term_file) + display.debug("Walking '{0}'".format(path)) + for root, dirs, files in os.walk(path, topdown=True): + for entry in dirs + files: + relpath = os.path.relpath(os.path.join(root, entry), path) + + # Skip if relpath was already processed (from another root) + if relpath not in [entry['path'] for entry in ret]: + props = file_props(path, relpath) + if props is not None: + display.debug(" found '{0}'".format(os.path.join(path, relpath))) + ret.append(props) + + return ret diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py new file mode 100644 index 0000000000..c88683d7cc --- /dev/null +++ b/plugins/lookup/flattened.py @@ -0,0 +1,83 @@ +# (c) 2013, Serge van Ginderachter +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: flattened + author: Serge van Ginderachter + short_description: return single list completely flattened + description: + - given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left. + options: + _terms: + description: lists to flatten + required: True + notes: + - unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore. + - aka highlander plugin, there can only be one (list). +''' + +EXAMPLES = """ +- name: "'unnest' all elements into single list" + debug: msg="all in one list {{lookup('flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}" +""" + +RETURN = """ + _raw: + description: + - flattened list + type: list +""" +from ansible.errors import AnsibleError +from ansible.module_utils.six import string_types +from ansible.plugins.lookup import LookupBase +from ansible.utils.listify import listify_lookup_plugin_terms + + +class LookupModule(LookupBase): + + def _check_list_of_one_list(self, term): + # make sure term is not a list of one (list of one..) item + # return the final non list item if so + + if isinstance(term, list) and len(term) == 1: + term = term[0] + if isinstance(term, list): + term = self._check_list_of_one_list(term) + + return term + + def _do_flatten(self, terms, variables): + + ret = [] + for term in terms: + term = self._check_list_of_one_list(term) + + if term == 'None' or term == 'null': + # ignore undefined items + break + + if isinstance(term, string_types): + # convert a variable to a list + term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader) + # but avoid converting a plain string to a list of one string + if term2 != [term]: + term = term2 + + if isinstance(term, list): + # if it's a list, check recursively for items that are a list + term = self._do_flatten(term, variables) + ret.extend(term) + else: + ret.append(term) + + return ret + + def run(self, terms, variables, **kwargs): + + if not isinstance(terms, list): + raise AnsibleError("with_flattened expects a list") + + return self._do_flatten(terms, variables) diff --git a/plugins/lookup/gcp_storage_file.py b/plugins/lookup/gcp_storage_file.py new file mode 100644 index 0000000000..d73ebd2cc9 --- /dev/null +++ b/plugins/lookup/gcp_storage_file.py @@ -0,0 +1,138 @@ +# (c) 2019, Eric Anderson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +lookup: gcp_storage_file +description: + - This lookup returns the contents from a file residing on Google Cloud Storage +short_description: Return GC Storage content +author: Eric Anderson +requirements: + - python >= 2.6 + - requests >= 2.18.4 + - google-auth >= 1.3.0 +options: + src: + description: + - Source location of file (may be local machine or cloud depending on action). + required: false + bucket: + description: + - The name of the bucket. + required: false +extends_documentation_fragment: +- community.general.gcp + +''' + +EXAMPLES = ''' +- debug: msg="the value of foo.txt is {{ lookup('gcp_storage_file', + bucket='gcp-bucket', src='mydir/foo.txt', project='project-name', + auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}" +''' + +RETURN = ''' +_raw: + description: + - base64 encoded file content +''' + +import base64 +import json +import mimetypes +import os +import requests +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession + + +display = Display() + + +class GcpMockModule(object): + def __init__(self, params): + self.params = params + + def fail_json(self, *args, **kwargs): + raise AnsibleError(kwargs['msg']) + + def raise_for_status(self, response): + try: + response.raise_for_status() + except getattr(requests.exceptions, 'RequestException'): + self.fail_json(msg="GCP returned error: %s" % response.json()) + + +class GcpFileLookup(): + def get_file_contents(self, module): + auth = GcpSession(module, 'storage') + data = auth.get(self.media_link(module)) + return base64.b64encode(data.content.rstrip()) + + def fetch_resource(self, module, link, allow_not_found=True): + auth = GcpSession(module, 'storage') + return self.return_if_object(module, auth.get(link), allow_not_found) + + def self_link(self, module): + return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params) + + def media_link(self, module): + return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params) + + def return_if_object(self, module, response, allow_not_found=False): + # If not found, return nothing. + if allow_not_found and response.status_code == 404: + return None + # If no content, return nothing. + if response.status_code == 204: + return None + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + raise AnsibleError("Invalid JSON response with error: %s" % inst) + if navigate_hash(result, ['error', 'errors']): + raise AnsibleError(navigate_hash(result, ['error', 'errors'])) + return result + + def object_headers(self, module): + return { + "name": module.params['src'], + "Content-Type": mimetypes.guess_type(module.params['src'])[0], + "Content-Length": str(os.path.getsize(module.params['src'])), + } + + def run(self, terms, variables=None, **kwargs): + params = { + 'bucket': kwargs.get('bucket', None), + 'src': kwargs.get('src', None), + 'projects': kwargs.get('projects', None), + 'scopes': kwargs.get('scopes', None), + 'zones': kwargs.get('zones', None), + 'auth_kind': kwargs.get('auth_kind', None), + 'service_account_file': kwargs.get('service_account_file', None), + 'service_account_email': kwargs.get('service_account_email', None), + } + + if not params['scopes']: + params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control'] + + fake_module = GcpMockModule(params) + + # Check if files exist. + remote_object = self.fetch_resource(fake_module, self.self_link(fake_module)) + if not remote_object: + raise AnsibleError("File does not exist in bucket") + + result = self.get_file_contents(fake_module) + return [result] + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + return GcpFileLookup().run(terms, variables=variables, **kwargs) diff --git a/plugins/lookup/hashi_vault.py b/plugins/lookup/hashi_vault.py new file mode 100644 index 0000000000..e4a761574b --- /dev/null +++ b/plugins/lookup/hashi_vault.py @@ -0,0 +1,300 @@ +# (c) 2015, Jonathan Davila +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: hashi_vault + author: Jonathan Davila + short_description: retrieve secrets from HashiCorp's vault + requirements: + - hvac (python library) + description: + - retrieve secrets from HashiCorp's vault + notes: + - Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified. + - As of Ansible 2.10, only the latest secret is returned when specifying a KV v2 path. + options: + secret: + description: query you are making. + required: True + token: + description: vault token. + env: + - name: VAULT_TOKEN + url: + description: URL to vault service. + env: + - name: VAULT_ADDR + default: 'http://127.0.0.1:8200' + username: + description: Authentication user name. + password: + description: Authentication password. + role_id: + description: Role id for a vault AppRole auth. + env: + - name: VAULT_ROLE_ID + secret_id: + description: Secret id for a vault AppRole auth. + env: + - name: VAULT_SECRET_ID + auth_method: + description: + - Authentication method to be used. + - C(userpass) is added in version 2.8. + env: + - name: VAULT_AUTH_METHOD + choices: + - userpass + - ldap + - approle + mount_point: + description: vault mount point, only required if you have a custom mount point. + default: ldap + ca_cert: + description: path to certificate to use for authentication. + aliases: [ cacert ] + validate_certs: + description: controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones. + type: boolean + default: True + namespace: + description: namespace where secrets reside. requires HVAC 0.7.0+ and Vault 0.11+. +''' + +EXAMPLES = """ +- debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}" + +- name: Return all secrets from a path + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}" + +- name: Vault that requires authentication via LDAP + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas url=http://myvault:8200')}}" + +- name: Vault that requires authentication via username and password + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=mypas url=http://myvault:8200')}}" + +- name: Using an ssl vault + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=https://myvault:8200 validate_certs=False')}}" + +- name: using certificate auth + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hi:value token=xxxx-xxx-xxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem')}}" + +- name: authenticate with a Vault app role + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid url=http://myvault:8200')}}" + +- name: Return all secrets from a path in a namespace + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200 namespace=teama/admins')}}" + +# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path") +# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version +- name: Return latest KV v2 secret from path + debug: + msg: "{{ lookup('hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}" + + +""" + +RETURN = """ +_raw: + description: + - secrets(s) requested +""" + +import os + +from ansible.errors import AnsibleError +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.lookup import LookupBase + +HAS_HVAC = False +try: + import hvac + HAS_HVAC = True +except ImportError: + HAS_HVAC = False + + +ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200' + +if os.getenv('VAULT_ADDR') is not None: + ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR'] + + +class HashiVault: + def __init__(self, **kwargs): + + self.url = kwargs.get('url', ANSIBLE_HASHI_VAULT_ADDR) + self.namespace = kwargs.get('namespace', None) + self.avail_auth_method = ['approle', 'userpass', 'ldap'] + + # split secret arg, which has format 'secret/hello:value' into secret='secret/hello' and secret_field='value' + s = kwargs.get('secret') + if s is None: + raise AnsibleError("No secret specified for hashi_vault lookup") + + s_f = s.rsplit(':', 1) + self.secret = s_f[0] + if len(s_f) >= 2: + self.secret_field = s_f[1] + else: + self.secret_field = '' + + self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', '')) + + # If a particular backend is asked for (and its method exists) we call it, otherwise drop through to using + # token auth. This means if a particular auth backend is requested and a token is also given, then we + # ignore the token and attempt authentication against the specified backend. + # + # to enable a new auth backend, simply add a new 'def auth_' method below. + # + self.auth_method = kwargs.get('auth_method', os.environ.get('VAULT_AUTH_METHOD')) + self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', '')) + if self.auth_method and self.auth_method != 'token': + try: + if self.namespace is not None: + self.client = hvac.Client(url=self.url, verify=self.verify, namespace=self.namespace) + else: + self.client = hvac.Client(url=self.url, verify=self.verify) + # prefixing with auth_ to limit which methods can be accessed + getattr(self, 'auth_' + self.auth_method)(**kwargs) + except AttributeError: + raise AnsibleError("Authentication method '%s' not supported." + " Available options are %r" % (self.auth_method, self.avail_auth_method)) + else: + self.token = kwargs.get('token', os.environ.get('VAULT_TOKEN', None)) + if self.token is None and os.environ.get('HOME'): + token_filename = os.path.join( + os.environ.get('HOME'), + '.vault-token' + ) + if os.path.exists(token_filename): + with open(token_filename) as token_file: + self.token = token_file.read().strip() + + if self.token is None: + raise AnsibleError("No Vault Token specified") + + if self.namespace is not None: + self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify, namespace=self.namespace) + else: + self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify) + + if not self.client.is_authenticated(): + raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup") + + def get(self): + data = self.client.read(self.secret) + + # Check response for KV v2 fields and flatten nested secret data. + # + # https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1 + try: + # sentinel field checks + check_dd = data['data']['data'] + check_md = data['data']['metadata'] + # unwrap nested data + data = data['data'] + except KeyError: + pass + + if data is None: + raise AnsibleError("The secret %s doesn't seem to exist for hashi_vault lookup" % self.secret) + + if self.secret_field == '': + return data['data'] + + if self.secret_field not in data['data']: + raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (self.secret, self.secret_field)) + + return data['data'][self.secret_field] + + def check_params(self, **kwargs): + username = kwargs.get('username') + if username is None: + raise AnsibleError("Authentication method %s requires a username" % self.auth_method) + + password = kwargs.get('password') + if password is None: + raise AnsibleError("Authentication method %s requires a password" % self.auth_method) + + mount_point = kwargs.get('mount_point') + + return username, password, mount_point + + def auth_userpass(self, **kwargs): + username, password, mount_point = self.check_params(**kwargs) + if mount_point is None: + mount_point = 'userpass' + + self.client.auth_userpass(username, password, mount_point=mount_point) + + def auth_ldap(self, **kwargs): + username, password, mount_point = self.check_params(**kwargs) + if mount_point is None: + mount_point = 'ldap' + + self.client.auth.ldap.login(username, password, mount_point=mount_point) + + def boolean_or_cacert(self, validate_certs, cacert): + validate_certs = boolean(validate_certs, strict=False) + '''' return a bool or cacert ''' + if validate_certs is True: + if cacert != '': + return cacert + else: + return True + else: + return False + + def auth_approle(self, **kwargs): + role_id = kwargs.get('role_id', os.environ.get('VAULT_ROLE_ID', None)) + if role_id is None: + raise AnsibleError("Authentication method app role requires a role_id") + + secret_id = kwargs.get('secret_id', os.environ.get('VAULT_SECRET_ID', None)) + if secret_id is None: + raise AnsibleError("Authentication method app role requires a secret_id") + + self.client.auth_approle(role_id, secret_id) + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + if not HAS_HVAC: + raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.") + + vault_args = terms[0].split() + vault_dict = {} + ret = [] + + for param in vault_args: + try: + key, value = param.split('=') + except ValueError: + raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % terms) + vault_dict[key] = value + + if 'ca_cert' in vault_dict.keys(): + vault_dict['cacert'] = vault_dict['ca_cert'] + vault_dict.pop('ca_cert', None) + + vault_conn = HashiVault(**vault_dict) + + for term in terms: + key = term.split()[0] + value = vault_conn.get() + ret.append(value) + + return ret diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py new file mode 100644 index 0000000000..fdf9d123ff --- /dev/null +++ b/plugins/lookup/hiera.py @@ -0,0 +1,86 @@ +# (c) 2017, Juan Manuel Parrilla +# (c) 2012-17 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: + - Juan Manuel Parrilla (@jparrill) + lookup: hiera + short_description: get info from hiera data + requirements: + - hiera (command line utility) + description: + - Retrieves data from an Puppetmaster node using Hiera as ENC + options: + _hiera_key: + description: + - The list of keys to lookup on the Puppetmaster + type: list + element_type: string + required: True + _bin_file: + description: + - Binary file to execute Hiera + default: '/usr/bin/hiera' + env: + - name: ANSIBLE_HIERA_BIN + _hierarchy_file: + description: + - File that describes the hierarchy of Hiera + default: '/etc/hiera.yaml' + env: + - name: ANSIBLE_HIERA_CFG +# FIXME: incomplete options .. _terms? environment/fqdn? +''' + +EXAMPLES = """ +# All this examples depends on hiera.yml that describes the hierarchy + +- name: "a value from Hiera 'DB'" + debug: msg={{ lookup('hiera', 'foo') }} + +- name: "a value from a Hiera 'DB' on other environment" + debug: msg={{ lookup('hiera', 'foo environment=production') }} + +- name: "a value from a Hiera 'DB' for a concrete node" + debug: msg={{ lookup('hiera', 'foo fqdn=puppet01.localdomain') }} +""" + +RETURN = """ + _raw: + description: + - a value associated with input key + type: strings +""" + +import os + +from ansible.plugins.lookup import LookupBase +from ansible.utils.cmd_functions import run_cmd + +ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml') +ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera') + + +class Hiera(object): + def get(self, hiera_key): + pargs = [ANSIBLE_HIERA_BIN] + pargs.extend(['-c', ANSIBLE_HIERA_CFG]) + + pargs.extend(hiera_key) + + rc, output, err = run_cmd("{0} -c {1} {2}".format( + ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0])) + + return output.strip() + + +class LookupModule(LookupBase): + def run(self, terms, variables=''): + hiera = Hiera() + ret = [] + + ret.append(hiera.get(terms)) + return ret diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py new file mode 100644 index 0000000000..d39f556621 --- /dev/null +++ b/plugins/lookup/keyring.py @@ -0,0 +1,65 @@ +# (c) 2016, Samuel Boucher +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: keyring + author: + - Samuel Boucher + requirements: + - keyring (python library) + short_description: grab secrets from the OS keyring + description: + - Allows you to access data stored in the OS provided keyring/keychain. +''' + +EXAMPLES = """ +- name : output secrets to screen (BAD IDEA) + debug: + msg: "Password: {{item}}" + with_keyring: + - 'servicename username' + +- name: access mysql with password from keyring + mysql_db: login_password={{lookup('keyring','mysql joe')}} login_user=joe +""" + +RETURN = """ + _raw: + description: secrets stored +""" + +HAS_KEYRING = True + +from ansible.errors import AnsibleError +from ansible.utils.display import Display + +try: + import keyring +except ImportError: + HAS_KEYRING = False + +from ansible.plugins.lookup import LookupBase + +display = Display() + + +class LookupModule(LookupBase): + + def run(self, terms, **kwargs): + if not HAS_KEYRING: + raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'") + + display.vvvv(u"keyring: %s" % keyring.get_keyring()) + ret = [] + for term in terms: + (servicename, username) = (term.split()[0], term.split()[1]) + display.vvvv(u"username: %s, servicename: %s " % (username, servicename)) + password = keyring.get_password(servicename, username) + if password is None: + raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username)) + ret.append(password.rstrip()) + return ret diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py new file mode 100644 index 0000000000..bd946c0d71 --- /dev/null +++ b/plugins/lookup/lastpass.py @@ -0,0 +1,97 @@ +# (c) 2016, Andrew Zenk +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: lastpass + author: + - Andrew Zenk + requirements: + - lpass (command line utility) + - must have already logged into lastpass + short_description: fetch data from lastpass + description: + - use the lpass command line utility to fetch specific fields from lastpass + options: + _terms: + description: key from which you want to retrieve the field + required: True + field: + description: field to return from lastpass + default: 'password' +''' + +EXAMPLES = """ +- name: get 'custom_field' from lastpass entry 'entry-name' + debug: + msg: "{{ lookup('lastpass', 'entry-name', field='custom_field') }}" +""" + +RETURN = """ + _raw: + description: secrets stored +""" + +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_bytes, to_text +from ansible.plugins.lookup import LookupBase + + +class LPassException(AnsibleError): + pass + + +class LPass(object): + + def __init__(self, path='lpass'): + self._cli_path = path + + @property + def cli_path(self): + return self._cli_path + + @property + def logged_in(self): + out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1) + return err.startswith("Are you sure you would like to log out?") + + def _run(self, args, stdin=None, expected_rc=0): + p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(to_bytes(stdin)) + rc = p.wait() + if rc != expected_rc: + raise LPassException(err) + return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') + + def _build_args(self, command, args=None): + if args is None: + args = [] + args = [command] + args + args += ["--color=never"] + return args + + def get_field(self, key, field): + if field in ['username', 'password', 'url', 'notes', 'id', 'name']: + out, err = self._run(self._build_args("show", ["--{0}".format(field), key])) + else: + out, err = self._run(self._build_args("show", ["--field={0}".format(field), key])) + return out.strip() + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + lp = LPass() + + if not lp.logged_in: + raise AnsibleError("Not logged into lastpass: please run 'lpass login' first") + + field = kwargs.get('field', 'password') + values = [] + for term in terms: + values.append(lp.get_field(term, field)) + return values diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py new file mode 100644 index 0000000000..0e89ca31a0 --- /dev/null +++ b/plugins/lookup/lmdb_kv.py @@ -0,0 +1,117 @@ +# (c) 2017-2018, Jan-Piet Mens +# (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: lmdb_kv + author: + - Jan-Piet Mens (@jpmens) + short_description: fetch data from LMDB + description: + - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it + requirements: + - lmdb (python library https://lmdb.readthedocs.io/en/release/) + options: + _terms: + description: list of keys to query + db: + description: path to LMDB database + default: 'ansible.mdb' +''' + +EXAMPLES = """ +- name: query LMDB for a list of country codes + debug: + msg: "{{ query('lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}" + +- name: use list of values in a loop by key wildcard + debug: + msg: "Hello from {{ item.0 }} a.k.a. {{ item.1 }}" + vars: + - lmdb_kv_db: jp.mdb + with_lmdb_kv: + - "n*" + +- name: get an item by key + assert: + that: + - item == 'Belgium' + vars: + - lmdb_kv_db: jp.mdb + with_lmdb_kv: + - be +""" + +RETURN = """ +_raw: + description: value(s) stored in LMDB +""" + + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native, to_text +HAVE_LMDB = True +try: + import lmdb +except ImportError: + HAVE_LMDB = False + + +class LookupModule(LookupBase): + + def run(self, terms, variables, **kwargs): + + ''' + terms contain any number of keys to be retrieved. + If terms is None, all keys from the database are returned + with their values, and if term ends in an asterisk, we + start searching there + + The LMDB database defaults to 'ansible.mdb' if Ansible's + variable 'lmdb_kv_db' is not set: + + vars: + - lmdb_kv_db: "jp.mdb" + ''' + + if HAVE_LMDB is False: + raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed") + + db = variables.get('lmdb_kv_db', None) + if db is None: + db = kwargs.get('db', 'ansible.mdb') + db = str(db) + + try: + env = lmdb.open(db, readonly=True) + except Exception as e: + raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e))) + + ret = [] + if len(terms) == 0: + with env.begin() as txn: + cursor = txn.cursor() + cursor.first() + for key, value in cursor: + ret.append((to_text(key), to_native(value))) + + else: + for term in terms: + with env.begin() as txn: + if term.endswith('*'): + cursor = txn.cursor() + prefix = term[:-1] # strip asterisk + cursor.set_range(to_text(term).encode()) + while cursor.key().startswith(to_text(prefix).encode()): + for key, value in cursor: + ret.append((to_text(key), to_native(value))) + cursor.next() + else: + value = txn.get(to_text(term).encode()) + if value is not None: + ret.append(to_native(value)) + + return ret diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py new file mode 100644 index 0000000000..c6b55c0b9e --- /dev/null +++ b/plugins/lookup/manifold.py @@ -0,0 +1,275 @@ +# (c) 2018, Arigato Machine Inc. +# (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: + - Kyrylo Galanov (galanoff@gmail.com) + lookup: manifold + short_description: get credentials from Manifold.co + description: + - Retrieves resources' credentials from Manifold.co + options: + _terms: + description: + - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all + matched resources will be returned. + type: list + elements: string + required: False + api_token: + description: + - manifold API token + type: string + required: True + env: + - name: MANIFOLD_API_TOKEN + project: + description: + - The project label you want to get the resource for. + type: string + required: False + team: + description: + - The team label you want to get the resource for. + type: string + required: False +''' + +EXAMPLES = ''' + - name: all available resources + debug: msg="{{ lookup('manifold', api_token='SecretToken') }}" + - name: all available resources for a specific project in specific team + debug: msg="{{ lookup('manifold', api_token='SecretToken', project='poject-1', team='team-2') }}" + - name: two specific resources + debug: msg="{{ lookup('manifold', 'resource-1', 'resource-2') }}" +''' + +RETURN = ''' + _raw: + description: + - dictionary of credentials ready to be consumed as environment variables. If multiple resources define + the same environment variable(s), the last one returned by the Manifold API will take precedence. + type: dict +''' +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils import six +from ansible.utils.display import Display +from traceback import format_exception +import json +import sys +import os + +display = Display() + + +class ApiError(Exception): + pass + + +class ManifoldApiClient(object): + base_url = 'https://api.{api}.manifold.co/v1/{endpoint}' + http_agent = 'python-manifold-ansible-1.0.0' + + def __init__(self, token): + self._token = token + + def request(self, api, endpoint, *args, **kwargs): + """ + Send a request to API backend and pre-process a response. + :param api: API to send a request to + :type api: str + :param endpoint: API endpoint to fetch data from + :type endpoint: str + :param args: other args for open_url + :param kwargs: other kwargs for open_url + :return: server response. JSON response is automatically deserialized. + :rtype: dict | list | str + """ + + default_headers = { + 'Authorization': "Bearer {0}".format(self._token), + 'Accept': "*/*" # Otherwise server doesn't set content-type header + } + + url = self.base_url.format(api=api, endpoint=endpoint) + + headers = default_headers + arg_headers = kwargs.pop('headers', None) + if arg_headers: + headers.update(arg_headers) + + try: + display.vvvv('manifold lookup connecting to {0}'.format(url)) + response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs) + data = response.read() + if response.headers.get('content-type') == 'application/json': + data = json.loads(data) + return data + except ValueError: + raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url)) + except HTTPError as e: + raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format( + err=str(e), url=url, response=e.read())) + except URLError as e: + raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e))) + except SSLValidationError as e: + raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e))) + except ConnectionError as e: + raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e))) + + def get_resources(self, team_id=None, project_id=None, label=None): + """ + Get resources list + :param team_id: ID of the Team to filter resources by + :type team_id: str + :param project_id: ID of the project to filter resources by + :type project_id: str + :param label: filter resources by a label, returns a list with one or zero elements + :type label: str + :return: list of resources + :rtype: list + """ + api = 'marketplace' + endpoint = 'resources' + query_params = {} + + if team_id: + query_params['team_id'] = team_id + if project_id: + query_params['project_id'] = project_id + if label: + query_params['label'] = label + + if query_params: + endpoint += '?' + urlencode(query_params) + + return self.request(api, endpoint) + + def get_teams(self, label=None): + """ + Get teams list + :param label: filter teams by a label, returns a list with one or zero elements + :type label: str + :return: list of teams + :rtype: list + """ + api = 'identity' + endpoint = 'teams' + data = self.request(api, endpoint) + # Label filtering is not supported by API, however this function provides uniform interface + if label: + data = list(filter(lambda x: x['body']['label'] == label, data)) + return data + + def get_projects(self, label=None): + """ + Get projects list + :param label: filter projects by a label, returns a list with one or zero elements + :type label: str + :return: list of projects + :rtype: list + """ + api = 'marketplace' + endpoint = 'projects' + query_params = {} + + if label: + query_params['label'] = label + + if query_params: + endpoint += '?' + urlencode(query_params) + + return self.request(api, endpoint) + + def get_credentials(self, resource_id): + """ + Get resource credentials + :param resource_id: ID of the resource to filter credentials by + :type resource_id: str + :return: + """ + api = 'marketplace' + endpoint = 'credentials?' + urlencode({'resource_id': resource_id}) + return self.request(api, endpoint) + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, api_token=None, project=None, team=None): + """ + :param terms: a list of resources lookups to run. + :param variables: ansible variables active at the time of the lookup + :param api_token: API token + :param project: optional project label + :param team: optional team label + :return: a dictionary of resources credentials + """ + + if not api_token: + api_token = os.getenv('MANIFOLD_API_TOKEN') + if not api_token: + raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var') + + try: + labels = terms + client = ManifoldApiClient(api_token) + + if team: + team_data = client.get_teams(team) + if len(team_data) == 0: + raise AnsibleError("Team '{0}' does not exist".format(team)) + team_id = team_data[0]['id'] + else: + team_id = None + + if project: + project_data = client.get_projects(project) + if len(project_data) == 0: + raise AnsibleError("Project '{0}' does not exist".format(project)) + project_id = project_data[0]['id'] + else: + project_id = None + + if len(labels) == 1: # Use server-side filtering if one resource is requested + resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0]) + else: # Get all resources and optionally filter labels + resources_data = client.get_resources(team_id=team_id, project_id=project_id) + if labels: + resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data)) + + if labels and len(resources_data) < len(labels): + fetched_labels = [r['body']['label'] for r in resources_data] + not_found_labels = [label for label in labels if label not in fetched_labels] + raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels))) + + credentials = {} + cred_map = {} + for resource in resources_data: + resource_credentials = client.get_credentials(resource['id']) + if len(resource_credentials) and resource_credentials[0]['body']['values']: + for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']): + label = resource['body']['label'] + if cred_key in credentials: + display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data " + "with label '{new_label}'".format(cred_key=cred_key, + old_label=cred_map[cred_key], + new_label=label)) + credentials[cred_key] = cred_val + cred_map[cred_key] = label + + ret = [credentials] + return ret + except ApiError as e: + raise AnsibleError('API Error: {0}'.format(str(e))) + except AnsibleError as e: + raise e + except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() + raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback)) diff --git a/plugins/lookup/nios.py b/plugins/lookup/nios.py new file mode 100644 index 0000000000..8848e8cd1a --- /dev/null +++ b/plugins/lookup/nios.py @@ -0,0 +1,118 @@ +# +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +lookup: nios +short_description: Query Infoblox NIOS objects +description: + - Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup + supports adding additional keywords to filter the return data and specify + the desired set of returned fields. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + _terms: + description: The name of the object to return from NIOS + required: True + return_fields: + description: The list of field names to return for the specified object. + filter: + description: a dict object that is used to filter the return objects + extattrs: + description: a dict object that is used to filter on extattrs +''' + +EXAMPLES = """ +- name: fetch all networkview objects + set_fact: + networkviews: "{{ lookup('nios', 'networkview', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" + +- name: fetch the default dns view + set_fact: + dns_views: "{{ lookup('nios', 'view', filter={'name': 'default'}, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" + +# all of the examples below use credentials that are set using env variables +# export INFOBLOX_HOST=nios01 +# export INFOBLOX_USERNAME=admin +# export INFOBLOX_PASSWORD=admin + +- name: fetch all host records and include extended attributes + set_fact: + host_records: "{{ lookup('nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}" + + +- name: use env variables to pass credentials + set_fact: + networkviews: "{{ lookup('nios', 'networkview') }}" + +- name: get a host record + set_fact: + host: "{{ lookup('nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}" + +- name: get the authoritative zone from a non default dns view + set_fact: + host: "{{ lookup('nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}" +""" + +RETURN = """ +obj_type: + description: + - The object type specified in the terms argument + returned: always + type: complex + contains: + obj_field: + - One or more obj_type fields as specified by return_fields argument or + the default set of fields as per the object type +""" + +from ansible.plugins.lookup import LookupBase +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs +from ansible.errors import AnsibleError + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + try: + obj_type = terms[0] + except IndexError: + raise AnsibleError('the object_type must be specified') + + return_fields = kwargs.pop('return_fields', None) + filter_data = kwargs.pop('filter', {}) + extattrs = normalize_extattrs(kwargs.pop('extattrs', {})) + provider = kwargs.pop('provider', {}) + wapi = WapiLookup(provider) + res = wapi.get_object(obj_type, filter_data, return_fields=return_fields, extattrs=extattrs) + if res is not None: + for obj in res: + if 'extattrs' in obj: + obj['extattrs'] = flatten_extattrs(obj['extattrs']) + else: + res = [] + return res diff --git a/plugins/lookup/nios_next_ip.py b/plugins/lookup/nios_next_ip.py new file mode 100644 index 0000000000..62d0954044 --- /dev/null +++ b/plugins/lookup/nios_next_ip.py @@ -0,0 +1,100 @@ +# +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +lookup: nios_next_ip +short_description: Return the next available IP address for a network +description: + - Uses the Infoblox WAPI API to return the next available IP addresses + for a given network CIDR +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + _terms: + description: The CIDR network to retrieve the next addresses from + required: True + num: + description: The number of IP addresses to return + required: false + default: 1 + exclude: + description: List of IP's that need to be excluded from returned IP addresses + required: false +''' + +EXAMPLES = """ +- name: return next available IP address for network 192.168.10.0/24 + set_fact: + ipaddr: "{{ lookup('nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" + +- name: return the next 3 available IP addresses for network 192.168.10.0/24 + set_fact: + ipaddr: "{{ lookup('nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" + +- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2'] + set_fact: + ipaddr: "{{ lookup('nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'], + provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" +""" + +RETURN = """ +_list: + description: + - The list of next IP addresses available + returned: always + type: list +""" + +from ansible.plugins.lookup import LookupBase +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup +from ansible.module_utils._text import to_text +from ansible.errors import AnsibleError + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + try: + network = terms[0] + except IndexError: + raise AnsibleError('missing argument in the form of A.B.C.D/E') + + provider = kwargs.pop('provider', {}) + wapi = WapiLookup(provider) + + network_obj = wapi.get_object('network', {'network': network}) + if network_obj is None: + raise AnsibleError('unable to find network object %s' % network) + + num = kwargs.get('num', 1) + exclude_ip = kwargs.get('exclude', []) + + try: + ref = network_obj[0]['_ref'] + avail_ips = wapi.call_func('next_available_ip', ref, {'num': num, 'exclude': exclude_ip}) + return [avail_ips['ips']] + except Exception as exc: + raise AnsibleError(to_text(exc)) diff --git a/plugins/lookup/nios_next_network.py b/plugins/lookup/nios_next_network.py new file mode 100644 index 0000000000..f0aab7abd7 --- /dev/null +++ b/plugins/lookup/nios_next_network.py @@ -0,0 +1,112 @@ +# +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +lookup: nios_next_network +short_description: Return the next available network range for a network-container +description: + - Uses the Infoblox WAPI API to return the next available network addresses for + a given network CIDR +requirements: + - infoblox_client +extends_documentation_fragment: +- community.general.nios + +options: + _terms: + description: The CIDR network to retrieve the next network from next available network within the specified + container. + required: True + cidr: + description: + - The CIDR of the network to retrieve the next network from next available network within the + specified container. Also, Requested CIDR must be specified and greater than the parent CIDR. + required: True + default: 24 + num: + description: The number of network addresses to return from network-container + required: false + default: 1 + exclude: + description: Network addresses returned from network-container excluding list of user's input network range + required: false + default: '' +''' + +EXAMPLES = """ +- name: return next available network for network-container 192.168.10.0/24 + set_fact: + networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" + +- name: return the next 2 available network addresses for network-container 192.168.10.0/24 + set_fact: + networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, num=2, + provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" + +- name: return the available network addresses for network-container 192.168.10.0/24 excluding network range '192.168.10.0/25' + set_fact: + networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'], + provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}" +""" + +RETURN = """ +_list: + description: + - The list of next network addresses available + returned: always + type: list +""" + +from ansible.plugins.lookup import LookupBase +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup +from ansible.module_utils._text import to_text +from ansible.errors import AnsibleError + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + try: + network = terms[0] + except IndexError: + raise AnsibleError('missing network argument in the form of A.B.C.D/E') + try: + cidr = kwargs.get('cidr', 24) + except IndexError: + raise AnsibleError('missing CIDR argument in the form of xx') + + provider = kwargs.pop('provider', {}) + wapi = WapiLookup(provider) + network_obj = wapi.get_object('networkcontainer', {'network': network}) + + if network_obj is None: + raise AnsibleError('unable to find network-container object %s' % network) + num = kwargs.get('num', 1) + exclude_ip = kwargs.get('exclude', []) + + try: + ref = network_obj[0]['_ref'] + avail_nets = wapi.call_func('next_available_network', ref, {'cidr': cidr, 'num': num, 'exclude': exclude_ip}) + return [avail_nets['networks']] + except Exception as exc: + raise AnsibleError(to_text(exc)) diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py new file mode 100644 index 0000000000..8d44dfe98e --- /dev/null +++ b/plugins/lookup/onepassword.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Scott Buchanan +# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' + lookup: onepassword + author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) + requirements: + - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) + short_description: fetch field values from 1Password + description: + - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password. + options: + _terms: + description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve. + required: True + field: + description: field to return from each matching item (case-insensitive). + default: 'password' + master_password: + description: The password used to unlock the specified vault. + aliases: ['vault_password'] + section: + description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. + subdomain: + description: The 1Password subdomain to authenticate against. + username: + description: The username used to sign in. + secret_key: + description: The secret key used when performing an initial sign in. + vault: + description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. + notes: + - This lookup will use an existing 1Password session if one exists. If not, and you have already + performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. + You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). + - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). + - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials + needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength + to the 1Password master password. + - This lookup stores potentially sensitive data from 1Password as Ansible facts. + Facts are subject to caching if enabled, which means this data could be stored in clear text + on disk or in a database. + - Tested with C(op) version 0.5.3 +''' + +EXAMPLES = """ +# These examples only work when already signed in to 1Password +- name: Retrieve password for KITT when already signed in to 1Password + debug: + var: lookup('onepassword', 'KITT') + +- name: Retrieve password for Wintermute when already signed in to 1Password + debug: + var: lookup('onepassword', 'Tessier-Ashpool', section='Wintermute') + +- name: Retrieve username for HAL when already signed in to 1Password + debug: + var: lookup('onepassword', 'HAL 9000', field='username', vault='Discovery') + +- name: Retrieve password for HAL when not signed in to 1Password + debug: + var: lookup('onepassword' + 'HAL 9000' + subdomain='Discovery' + master_password=vault_master_password) + +- name: Retrieve password for HAL when never signed in to 1Password + debug: + var: lookup('onepassword' + 'HAL 9000' + subdomain='Discovery' + master_password=vault_master_password + username='tweety@acme.com' + secret_key=vault_secret_key) +""" + +RETURN = """ + _raw: + description: field data requested +""" + +import errno +import json +import os + +from subprocess import Popen, PIPE + +from ansible.plugins.lookup import LookupBase +from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_bytes, to_text + + +class OnePass(object): + + def __init__(self, path='op'): + self.cli_path = path + self.config_file_path = os.path.expanduser('~/.op/config') + self.logged_in = False + self.token = None + self.subdomain = None + self.username = None + self.secret_key = None + self.master_password = None + + def get_token(self): + # If the config file exists, assume an initial signin has taken place and try basic sign in + if os.path.isfile(self.config_file_path): + + if not self.master_password: + raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.') + + try: + args = ['signin', '--output=raw'] + + if self.subdomain: + args = ['signin', self.subdomain, '--output=raw'] + + rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) + self.token = out.strip() + + except AnsibleLookupError: + self.full_login() + + else: + # Attempt a full sign in since there appears to be no existing sign in + self.full_login() + + def assert_logged_in(self): + try: + rc, out, err = self._run(['get', 'account'], ignore_errors=True) + if rc == 0: + self.logged_in = True + if not self.logged_in: + self.get_token() + except OSError as e: + if e.errno == errno.ENOENT: + raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) + raise e + + def get_raw(self, item_id, vault=None): + args = ["get", "item", item_id] + if vault is not None: + args += ['--vault={0}'.format(vault)] + if not self.logged_in: + args += [to_bytes('--session=') + self.token] + rc, output, dummy = self._run(args) + return output + + def get_field(self, item_id, field, section=None, vault=None): + output = self.get_raw(item_id, vault) + return self._parse_field(output, field, section) if output != '' else '' + + def full_login(self): + if None in [self.subdomain, self.username, self.secret_key, self.master_password]: + raise AnsibleLookupError('Unable to perform initial sign in to 1Password. ' + 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') + + args = [ + 'signin', + '{0}.1password.com'.format(self.subdomain), + to_bytes(self.username), + to_bytes(self.secret_key), + '--output=raw', + ] + + rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) + self.token = out.strip() + + def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): + command = [self.cli_path] + args + p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(input=command_input) + rc = p.wait() + if not ignore_errors and rc != expected_rc: + raise AnsibleLookupError(to_text(err)) + return rc, out, err + + def _parse_field(self, data_json, field_name, section_title=None): + data = json.loads(data_json) + if section_title is None: + for field_data in data['details'].get('fields', []): + if field_data.get('name', '').lower() == field_name.lower(): + return field_data.get('value', '') + for section_data in data['details'].get('sections', []): + if section_title is not None and section_title.lower() != section_data['title'].lower(): + continue + for field_data in section_data.get('fields', []): + if field_data.get('t', '').lower() == field_name.lower(): + return field_data.get('v', '') + return '' + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + op = OnePass() + + field = kwargs.get('field', 'password') + section = kwargs.get('section') + vault = kwargs.get('vault') + op.subdomain = kwargs.get('subdomain') + op.username = kwargs.get('username') + op.secret_key = kwargs.get('secret_key') + op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) + + op.assert_logged_in() + + values = [] + for term in terms: + values.append(op.get_field(term, field, section, vault)) + return values diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py new file mode 100644 index 0000000000..d9d8f2cb0a --- /dev/null +++ b/plugins/lookup/onepassword_raw.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Scott Buchanan +# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + lookup: onepassword_raw + author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) + requirements: + - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) + short_description: fetch an entire item from 1Password + description: + - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password + options: + _terms: + description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve. + required: True + master_password: + description: The password used to unlock the specified vault. + aliases: ['vault_password'] + section: + description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. + subdomain: + description: The 1Password subdomain to authenticate against. + username: + description: The username used to sign in. + secret_key: + description: The secret key used when performing an initial sign in. + vault: + description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. + notes: + - This lookup will use an existing 1Password session if one exists. If not, and you have already + performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. + You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). + - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). + - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials + needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength + to the 1Password master password. + - This lookup stores potentially sensitive data from 1Password as Ansible facts. + Facts are subject to caching if enabled, which means this data could be stored in clear text + on disk or in a database. + - Tested with C(op) version 0.5.3 +''' + +EXAMPLES = """ +- name: Retrieve all data about Wintermute + debug: + var: lookup('onepassword_raw', 'Wintermute') + +- name: Retrieve all data about Wintermute when not signed in to 1Password + debug: + var: lookup('onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl') +""" + +RETURN = """ + _raw: + description: field data requested +""" + +import json + +from ansible_collections.community.general.plugins.lookup.onepassword import OnePass +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + op = OnePass() + + vault = kwargs.get('vault') + op.subdomain = kwargs.get('subdomain') + op.username = kwargs.get('username') + op.secret_key = kwargs.get('secret_key') + op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) + + op.assert_logged_in() + + values = [] + for term in terms: + data = json.loads(op.get_raw(term, vault)) + values.append(data) + return values diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py new file mode 100644 index 0000000000..86615cffbf --- /dev/null +++ b/plugins/lookup/passwordstore.py @@ -0,0 +1,280 @@ +# (c) 2017, Patrick Deelman +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' + lookup: passwordstore + author: + - Patrick Deelman + short_description: manage passwords with passwordstore.org's pass utility + description: + - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. + It also retrieves YAML style keys stored as multilines in the passwordfile. + options: + _terms: + description: query key + required: True + passwordstore: + description: location of the password store + default: '~/.password-store' + directory: + description: The directory of the password store. + env: + - name: PASSWORD_STORE_DIR + create: + description: Create the password if it does not already exist. + type: bool + default: 'no' + overwrite: + description: Overwrite the password if it does already exist. + type: bool + default: 'no' + returnall: + description: Return all the content of the password, not only the first line. + type: bool + default: 'no' + subkey: + description: Return a specific subkey of the password. When set to C(password), always returns the first line. + default: password + userpass: + description: Specify a password to save, instead of a generated one. + length: + description: The length of the generated password + type: integer + default: 16 + backup: + description: Used with C(overwrite=yes). Backup the previous password in a subkey. + type: bool + default: 'no' + nosymbols: + description: use alphanumeric characters + type: bool + default: 'no' +''' +EXAMPLES = """ +# Debug is used for examples, BAD IDEA to show passwords on screen +- name: Basic lookup. Fails if example/test doesn't exist + debug: + msg: "{{ lookup('passwordstore', 'example/test')}}" + +- name: Create pass with random 16 character password. If password exists just give the password + debug: + var: mypassword + vars: + mypassword: "{{ lookup('passwordstore', 'example/test create=true')}}" + +- name: Different size password + debug: + msg: "{{ lookup('passwordstore', 'example/test create=true length=42')}}" + +- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file + debug: + msg: "{{ lookup('passwordstore', 'example/test create=true overwrite=true')}}" + +- name: Create an alphanumeric password + debug: msg="{{ lookup('passwordstore', 'example/test create=true nosymbols=true') }}" + +- name: Return the value for user in the KV pair user, username + debug: + msg: "{{ lookup('passwordstore', 'example/test subkey=user')}}" + +- name: Return the entire password file content + set_fact: + passfilecontent: "{{ lookup('passwordstore', 'example/test returnall=true')}}" +""" + +RETURN = """ +_raw: + description: + - a password +""" + +import os +import subprocess +import time + +from distutils import util +from ansible.errors import AnsibleError, AnsibleAssertionError +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.utils.encrypt import random_password +from ansible.plugins.lookup import LookupBase +from ansible import constants as C + + +# backhacked check_output with input for python 2.7 +# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output +def check_output2(*popenargs, **kwargs): + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, it will be overridden.') + if 'stderr' in kwargs: + raise ValueError('stderr argument not allowed, it will be overridden.') + if 'input' in kwargs: + if 'stdin' in kwargs: + raise ValueError('stdin and input arguments may not both be used.') + b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict') + del kwargs['input'] + kwargs['stdin'] = subprocess.PIPE + else: + b_inputdata = None + process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) + try: + b_out, b_err = process.communicate(b_inputdata) + except Exception: + process.kill() + process.wait() + raise + retcode = process.poll() + if retcode != 0 or \ + b'encryption failed: Unusable public key' in b_out or \ + b'encryption failed: Unusable public key' in b_err: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise subprocess.CalledProcessError( + retcode, + cmd, + to_native(b_out + b_err, errors='surrogate_or_strict') + ) + return b_out + + +class LookupModule(LookupBase): + def parse_params(self, term): + # I went with the "traditional" param followed with space separated KV pairs. + # Waiting for final implementation of lookup parameter parsing. + # See: https://github.com/ansible/ansible/issues/12255 + params = term.split() + if len(params) > 0: + # the first param is the pass-name + self.passname = params[0] + # next parse the optional parameters in keyvalue pairs + try: + for param in params[1:]: + name, value = param.split('=') + if name not in self.paramvals: + raise AnsibleAssertionError('%s not in paramvals' % name) + self.paramvals[name] = value + except (ValueError, AssertionError) as e: + raise AnsibleError(e) + # check and convert values + try: + for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']: + if not isinstance(self.paramvals[key], bool): + self.paramvals[key] = util.strtobool(self.paramvals[key]) + except (ValueError, AssertionError) as e: + raise AnsibleError(e) + if not isinstance(self.paramvals['length'], int): + if self.paramvals['length'].isdigit(): + self.paramvals['length'] = int(self.paramvals['length']) + else: + raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) + + # Set PASSWORD_STORE_DIR if directory is set + if self.paramvals['directory']: + if os.path.isdir(self.paramvals['directory']): + os.environ['PASSWORD_STORE_DIR'] = self.paramvals['directory'] + else: + raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory'])) + + def check_pass(self): + try: + self.passoutput = to_text( + check_output2(["pass", self.passname]), + errors='surrogate_or_strict' + ).splitlines() + self.password = self.passoutput[0] + self.passdict = {} + for line in self.passoutput[1:]: + if ':' in line: + name, value = line.split(':', 1) + self.passdict[name.strip()] = value.strip() + except (subprocess.CalledProcessError) as e: + if e.returncode == 1 and 'not in the password store' in e.output: + # if pass returns 1 and return string contains 'is not in the password store.' + # We need to determine if this is valid or Error. + if not self.paramvals['create']: + raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname)) + else: + return False + else: + raise AnsibleError(e) + return True + + def get_newpass(self): + if self.paramvals['nosymbols']: + chars = C.DEFAULT_PASSWORD_CHARS[:62] + else: + chars = C.DEFAULT_PASSWORD_CHARS + + if self.paramvals['userpass']: + newpass = self.paramvals['userpass'] + else: + newpass = random_password(length=self.paramvals['length'], chars=chars) + return newpass + + def update_password(self): + # generate new password, insert old lines from current result and return new password + newpass = self.get_newpass() + datetime = time.strftime("%d/%m/%Y %H:%M:%S") + msg = newpass + '\n' + if self.passoutput[1:]: + msg += '\n'.join(self.passoutput[1:]) + '\n' + if self.paramvals['backup']: + msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + try: + check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg) + except (subprocess.CalledProcessError) as e: + raise AnsibleError(e) + return newpass + + def generate_password(self): + # generate new file and insert lookup_pass: Generated by Ansible on {date} + # use pwgen to generate the password and insert values with pass -m + newpass = self.get_newpass() + datetime = time.strftime("%d/%m/%Y %H:%M:%S") + msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) + try: + check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg) + except (subprocess.CalledProcessError) as e: + raise AnsibleError(e) + return newpass + + def get_passresult(self): + if self.paramvals['returnall']: + return os.linesep.join(self.passoutput) + if self.paramvals['subkey'] == 'password': + return self.password + else: + if self.paramvals['subkey'] in self.passdict: + return self.passdict[self.paramvals['subkey']] + else: + return None + + def run(self, terms, variables, **kwargs): + result = [] + self.paramvals = { + 'subkey': 'password', + 'directory': variables.get('passwordstore'), + 'create': False, + 'returnall': False, + 'overwrite': False, + 'nosymbols': False, + 'userpass': '', + 'length': 16, + 'backup': False, + } + + for term in terms: + self.parse_params(term) # parse the input into paramvals + if self.check_pass(): # password exists + if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': + result.append(self.update_password()) + else: + result.append(self.get_passresult()) + else: # password does not exist + if self.paramvals['create']: + result.append(self.generate_password()) + return result diff --git a/plugins/lookup/rabbitmq.py b/plugins/lookup/rabbitmq.py new file mode 100644 index 0000000000..727af9539a --- /dev/null +++ b/plugins/lookup/rabbitmq.py @@ -0,0 +1,189 @@ +# (c) 2018, John Imison +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: rabbitmq + author: John Imison <@Im0> + short_description: Retrieve messages from an AMQP/AMQPS RabbitMQ queue. + description: + - This lookup uses a basic get to retrieve all, or a limited number C(count), messages from a RabbitMQ queue. + options: + url: + description: + - An URI connection string to connect to the AMQP/AMQPS RabbitMQ server. + - For more information refer to the URI spec U(https://www.rabbitmq.com/uri-spec.html). + required: True + queue: + description: + - The queue to get messages from. + required: True + count: + description: + - How many messages to collect from the queue. + - If not set, defaults to retrieving all the messages from the queue. + requirements: + - The python pika package U(https://pypi.org/project/pika/). + notes: + - This lookup implements BlockingChannel.basic_get to get messages from a RabbitMQ server. + - After retrieving a message from the server, receipt of the message is acknowledged and the message on the server is deleted. + - Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library. + - More information about pika can be found at U(https://pika.readthedocs.io/en/stable/). + - This plugin is tested against RabbitMQ. Other AMQP 0.9.1 protocol based servers may work but not tested/guaranteed. + - Assigning the return messages to a variable under C(vars) may result in unexpected results as the lookup is evaluated every time the + variable is referenced. + - Currently this plugin only handles text based messages from a queue. Unexpected results may occur when retrieving binary data. +''' + + +EXAMPLES = """ +- name: Get all messages off a queue + debug: + msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello') }}" + + +# If you are intending on using the returned messages as a variable in more than +# one task (eg. debug, template), it is recommended to set_fact. + +- name: Get 2 messages off a queue and set a fact for re-use + set_fact: + messages: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello', count=2) }}" + +- name: Dump out contents of the messages + debug: + var: messages + +""" + +RETURN = """ + _list: + description: + - A list of dictionaries with keys and value from the queue. + type: list + contains: + content_type: + description: The content_type on the message in the queue. + type: str + delivery_mode: + description: The delivery_mode on the message in the queue. + type: str + delivery_tag: + description: The delivery_tag on the message in the queue. + type: str + exchange: + description: The exchange the message came from. + type: str + message_count: + description: The message_count for the message on the queue. + type: str + msg: + description: The content of the message. + type: str + redelivered: + description: The redelivered flag. True if the message has been delivered before. + type: bool + routing_key: + description: The routing_key on the message in the queue. + type: str + headers: + description: The headers for the message returned from the queue. + type: dict + json: + description: If application/json is specified in content_type, json will be loaded into variables. + type: dict + +""" + +import json + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native, to_text +from ansible.utils.display import Display + +try: + import pika + from pika import spec + HAS_PIKA = True +except ImportError: + HAS_PIKA = False + +display = Display() + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, url=None, queue=None, count=None): + if not HAS_PIKA: + raise AnsibleError('pika python package is required for rabbitmq lookup.') + if not url: + raise AnsibleError('URL is required for rabbitmq lookup.') + if not queue: + raise AnsibleError('Queue is required for rabbitmq lookup.') + + display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" % (terms, variables, url, queue, count)) + + try: + parameters = pika.URLParameters(url) + except Exception as e: + raise AnsibleError("URL malformed: %s" % to_native(e)) + + try: + connection = pika.BlockingConnection(parameters) + except Exception as e: + raise AnsibleError("Connection issue: %s" % to_native(e)) + + try: + conn_channel = connection.channel() + except pika.exceptions.AMQPChannelError as e: + try: + connection.close() + except pika.exceptions.AMQPConnectionError as ie: + raise AnsibleError("Channel and connection closing issues: %s / %s" % to_native(e), to_native(ie)) + raise AnsibleError("Channel issue: %s" % to_native(e)) + + ret = [] + idx = 0 + + while True: + method_frame, properties, body = conn_channel.basic_get(queue=queue) + if method_frame: + display.vvv(u"%s, %s, %s " % (method_frame, properties, to_text(body))) + + # TODO: In the future consider checking content_type and handle text/binary data differently. + msg_details = dict({ + 'msg': to_text(body), + 'message_count': method_frame.message_count, + 'routing_key': method_frame.routing_key, + 'delivery_tag': method_frame.delivery_tag, + 'redelivered': method_frame.redelivered, + 'exchange': method_frame.exchange, + 'delivery_mode': properties.delivery_mode, + 'content_type': properties.content_type, + 'headers': properties.headers + }) + if properties.content_type == 'application/json': + try: + msg_details['json'] = json.loads(msg_details['msg']) + except ValueError as e: + raise AnsibleError("Unable to decode JSON for message %s: %s" % (method_frame.delivery_tag, to_native(e))) + + ret.append(msg_details) + conn_channel.basic_ack(method_frame.delivery_tag) + idx += 1 + if method_frame.message_count == 0 or idx == count: + break + # If we didn't get a method_frame, exit. + else: + break + + if connection.is_closed: + return [ret] + else: + try: + connection.close() + except pika.exceptions.AMQPConnectionError: + pass + return [ret] diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py new file mode 100644 index 0000000000..92933e0d2a --- /dev/null +++ b/plugins/lookup/redis.py @@ -0,0 +1,111 @@ +# (c) 2012, Jan-Piet Mens +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: redis + author: + - Jan-Piet Mens (@jpmens) + - Ansible Core + short_description: fetch data from Redis + description: + - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it + requirements: + - redis (python library https://github.com/andymccurdy/redis-py/) + options: + _terms: + description: list of keys to query + host: + description: location of Redis host + default: '127.0.0.1' + env: + - name: ANSIBLE_REDIS_HOST + ini: + - section: lookup_redis + key: host + port: + description: port on which Redis is listening on + default: 6379 + type: int + env: + - name: ANSIBLE_REDIS_PORT + ini: + - section: lookup_redis + key: port + socket: + description: path to socket on which to query Redis, this option overrides host and port options when set. + type: path + env: + - name: ANSIBLE_REDIS_SOCKET + ini: + - section: lookup_redis + key: socket +''' + +EXAMPLES = """ +- name: query redis for somekey (default or configured settings used) + debug: msg="{{ lookup('redis', 'somekey') }}" + +- name: query redis for list of keys and non-default host and port + debug: msg="{{ lookup('redis', item, host='myredis.internal.com', port=2121) }}" + loop: '{{list_of_redis_keys}}' + +- name: use list directly + debug: msg="{{ lookup('redis', 'key1', 'key2', 'key3') }}" + +- name: use list directly with a socket + debug: msg="{{ lookup('redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}" + +""" + +RETURN = """ +_raw: + description: value(s) stored in Redis +""" + +import os + +HAVE_REDIS = False +try: + import redis + HAVE_REDIS = True +except ImportError: + pass + +from ansible.module_utils._text import to_text +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + + def run(self, terms, variables, **kwargs): + + if not HAVE_REDIS: + raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed") + + # get options + self.set_options(direct=kwargs) + + # setup connection + host = self.get_option('host') + port = self.get_option('port') + socket = self.get_option('socket') + if socket is None: + conn = redis.Redis(host=host, port=port) + else: + conn = redis.Redis(unix_socket_path=socket) + + ret = [] + for term in terms: + try: + res = conn.get(term) + if res is None: + res = "" + ret.append(to_text(res)) + except Exception as e: + # connection failed or key not found + raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + return ret diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py new file mode 100644 index 0000000000..9f2d49b3c1 --- /dev/null +++ b/plugins/lookup/shelvefile.py @@ -0,0 +1,88 @@ +# (c) 2015, Alejandro Guirao +# (c) 2012-17 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + lookup: shelvefile + author: Alejandro Guirao + short_description: read keys from Python shelve file + description: + - Read keys from Python shelve file. + options: + _terms: + description: sets of key value pairs of parameters + key: + description: key to query + required: True + file: + description: path to shelve file + required: True +''' + +EXAMPLES = """ +- name: retrieve a string value corresponding to a key inside a Python shelve file + debug: msg="{{ lookup('shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }} +""" + +RETURN = """ +_list: + description: value(s) of key(s) in shelve file(s) +""" +import shelve + +from ansible.errors import AnsibleError, AnsibleAssertionError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_bytes, to_text + + +class LookupModule(LookupBase): + + def read_shelve(self, shelve_filename, key): + """ + Read the value of "key" from a shelve file + """ + d = shelve.open(to_bytes(shelve_filename)) + res = d.get(key, None) + d.close() + return res + + def run(self, terms, variables=None, **kwargs): + + if not isinstance(terms, list): + terms = [terms] + + ret = [] + + for term in terms: + paramvals = {"file": None, "key": None} + params = term.split() + + try: + for param in params: + name, value = param.split('=') + if name not in paramvals: + raise AnsibleAssertionError('%s not in paramvals' % name) + paramvals[name] = value + + except (ValueError, AssertionError) as e: + # In case "file" or "key" are not present + raise AnsibleError(e) + + key = paramvals['key'] + + # Search also in the role/files directory and in the playbook directory + shelvefile = self.find_file_in_search_path(variables, 'files', paramvals['file']) + + if shelvefile: + res = self.read_shelve(shelvefile, key) + if res is None: + raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile)) + # Convert the value read to string + ret.append(to_text(res)) + break + else: + raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file']) + + return ret diff --git a/plugins/module_utils/__init__.py b/plugins/module_utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/alicloud_ecs.py b/plugins/module_utils/alicloud_ecs.py new file mode 100644 index 0000000000..31b4694c0b --- /dev/null +++ b/plugins/module_utils/alicloud_ecs.py @@ -0,0 +1,158 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +from ansible.module_utils.basic import env_fallback + +try: + import footmark + import footmark.ecs + import footmark.slb + import footmark.vpc + import footmark.rds + import footmark.ess + HAS_FOOTMARK = True +except ImportError: + HAS_FOOTMARK = False + + +class AnsibleACSError(Exception): + pass + + +def acs_common_argument_spec(): + return dict( + alicloud_access_key=dict(required=True, aliases=['access_key_id', 'access_key'], no_log=True, + fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])), + alicloud_secret_key=dict(required=True, aliases=['secret_access_key', 'secret_key'], no_log=True, + fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])), + alicloud_security_token=dict(aliases=['security_token'], no_log=True, + fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])), + ) + + +def ecs_argument_spec(): + spec = acs_common_argument_spec() + spec.update( + dict( + alicloud_region=dict(required=True, aliases=['region', 'region_id'], + fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])), + ) + ) + return spec + + +def get_acs_connection_info(module): + + ecs_params = dict(acs_access_key_id=module.params.get('alicloud_access_key'), + acs_secret_access_key=module.params.get('alicloud_secret_key'), + security_token=module.params.get('alicloud_security_token'), + user_agent='Ansible-Provider-Alicloud') + + return module.params.get('alicloud_region'), ecs_params + + +def connect_to_acs(acs_module, region, **params): + conn = acs_module.connect_to_region(region, **params) + if not conn: + if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]: + raise AnsibleACSError( + "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__)) + else: + raise AnsibleACSError( + "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__)) + return conn + + +def ecs_connect(module): + """ Return an ecs connection""" + + region, ecs_params = get_acs_connection_info(module) + # If we have a region specified, connect to its endpoint. + if region: + try: + ecs = connect_to_acs(footmark.ecs, region, **ecs_params) + except AnsibleACSError as e: + module.fail_json(msg=str(e)) + # Otherwise, no region so we fallback to the old connection method + return ecs + + +def slb_connect(module): + """ Return an slb connection""" + + region, slb_params = get_acs_connection_info(module) + # If we have a region specified, connect to its endpoint. + if region: + try: + slb = connect_to_acs(footmark.slb, region, **slb_params) + except AnsibleACSError as e: + module.fail_json(msg=str(e)) + # Otherwise, no region so we fallback to the old connection method + return slb + + +def vpc_connect(module): + """ Return an vpc connection""" + + region, vpc_params = get_acs_connection_info(module) + # If we have a region specified, connect to its endpoint. + if region: + try: + vpc = connect_to_acs(footmark.vpc, region, **vpc_params) + except AnsibleACSError as e: + module.fail_json(msg=str(e)) + # Otherwise, no region so we fallback to the old connection method + return vpc + + +def rds_connect(module): + """ Return an rds connection""" + + region, rds_params = get_acs_connection_info(module) + # If we have a region specified, connect to its endpoint. + if region: + try: + rds = connect_to_acs(footmark.rds, region, **rds_params) + except AnsibleACSError as e: + module.fail_json(msg=str(e)) + # Otherwise, no region so we fallback to the old connection method + return rds + + +def ess_connect(module): + """ Return an ess connection""" + + region, ess_params = get_acs_connection_info(module) + # If we have a region specified, connect to its endpoint. + if region: + try: + ess = connect_to_acs(footmark.ess, region, **ess_params) + except AnsibleACSError as e: + module.fail_json(msg=str(e)) + # Otherwise, no region so we fallback to the old connection method + return ess diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py new file mode 100644 index 0000000000..0d29071fe1 --- /dev/null +++ b/plugins/module_utils/cloud.py @@ -0,0 +1,217 @@ +# +# (c) 2016 Allen Sanabria, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +""" +This module adds shared support for generic cloud modules + +In order to use this module, include it as part of a custom +module as shown below. + +from ansible.module_utils.cloud import CloudRetry + +The 'cloud' module provides the following common classes: + + * CloudRetry + - The base class to be used by other cloud providers, in order to + provide a backoff/retry decorator based on status codes. + + - Example using the AWSRetry class which inherits from CloudRetry. + + @AWSRetry.exponential_backoff(retries=10, delay=3) + get_ec2_security_group_ids_from_names() + + @AWSRetry.jittered_backoff() + get_ec2_security_group_ids_from_names() + +""" +import random +from functools import wraps +import syslog +import time + + +def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60): + """ Customizable exponential backoff strategy. + Args: + retries (int): Maximum number of times to retry a request. + delay (float): Initial (base) delay. + backoff (float): base of the exponent to use for exponential + backoff. + max_delay (int): Optional. If provided each delay generated is capped + at this amount. Defaults to 60 seconds. + Returns: + Callable that returns a generator. This generator yields durations in + seconds to be used as delays for an exponential backoff strategy. + Usage: + >>> backoff = _exponential_backoff() + >>> backoff + + >>> list(backoff()) + [2, 4, 8, 16, 32, 60, 60, 60, 60, 60] + """ + def backoff_gen(): + for retry in range(0, retries): + sleep = delay * backoff ** retry + yield sleep if max_delay is None else min(sleep, max_delay) + return backoff_gen + + +def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random): + """ Implements the "Full Jitter" backoff strategy described here + https://www.awsarchitectureblog.com/2015/03/backoff.html + Args: + retries (int): Maximum number of times to retry a request. + delay (float): Approximate number of seconds to sleep for the first + retry. + max_delay (int): The maximum number of seconds to sleep for any retry. + _random (random.Random or None): Makes this generator testable by + allowing developers to explicitly pass in the a seeded Random. + Returns: + Callable that returns a generator. This generator yields durations in + seconds to be used as delays for a full jitter backoff strategy. + Usage: + >>> backoff = _full_jitter_backoff(retries=5) + >>> backoff + + >>> list(backoff()) + [3, 6, 5, 23, 38] + >>> list(backoff()) + [2, 1, 6, 6, 31] + """ + def backoff_gen(): + for retry in range(0, retries): + yield _random.randint(0, min(max_delay, delay * 2 ** retry)) + return backoff_gen + + +class CloudRetry(object): + """ CloudRetry can be used by any cloud provider, in order to implement a + backoff algorithm/retry effect based on Status Code from Exceptions. + """ + # This is the base class of the exception. + # AWS Example botocore.exceptions.ClientError + base_class = None + + @staticmethod + def status_code_from_exception(error): + """ Return the status code from the exception object + Args: + error (object): The exception itself. + """ + pass + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + """ Return True if the Response Code to retry on was found. + Args: + response_code (str): This is the Response Code that is being matched against. + """ + pass + + @classmethod + def _backoff(cls, backoff_strategy, catch_extra_error_codes=None): + """ Retry calling the Cloud decorated function using the provided + backoff strategy. + Args: + backoff_strategy (callable): Callable that returns a generator. The + generator should yield sleep times for each retry of the decorated + function. + """ + def deco(f): + @wraps(f) + def retry_func(*args, **kwargs): + for delay in backoff_strategy(): + try: + return f(*args, **kwargs) + except Exception as e: + if isinstance(e, cls.base_class): + response_code = cls.status_code_from_exception(e) + if cls.found(response_code, catch_extra_error_codes): + msg = "{0}: Retrying in {1} seconds...".format(str(e), delay) + syslog.syslog(syslog.LOG_INFO, msg) + time.sleep(delay) + else: + # Return original exception if exception is not a ClientError + raise e + else: + # Return original exception if exception is not a ClientError + raise e + return f(*args, **kwargs) + + return retry_func # true decorator + + return deco + + @classmethod + def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None): + """ + Retry calling the Cloud decorated function using an exponential backoff. + + Kwargs: + retries (int): Number of times to retry a failed request before giving up + default=10 + delay (int or float): Initial delay between retries in seconds + default=3 + backoff (int or float): backoff multiplier e.g. value of 2 will + double the delay each retry + default=1.1 + max_delay (int or None): maximum amount of time to wait between retries. + default=60 + """ + return cls._backoff(_exponential_backoff( + retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes) + + @classmethod + def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None): + """ + Retry calling the Cloud decorated function using a jittered backoff + strategy. More on this strategy here: + + https://www.awsarchitectureblog.com/2015/03/backoff.html + + Kwargs: + retries (int): Number of times to retry a failed request before giving up + default=10 + delay (int): Initial delay between retries in seconds + default=3 + max_delay (int): maximum amount of time to wait between retries. + default=60 + """ + return cls._backoff(_full_jitter_backoff( + retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes) + + @classmethod + def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): + """ + Retry calling the Cloud decorated function using an exponential backoff. + + Compatibility for the original implementation of CloudRetry.backoff that + did not provide configurable backoff strategies. Developers should use + CloudRetry.exponential_backoff instead. + + Kwargs: + tries (int): Number of times to try (not retry) before giving up + default=10 + delay (int or float): Initial delay between retries in seconds + default=3 + backoff (int or float): backoff multiplier e.g. value of 2 will + double the delay each retry + default=1.1 + """ + return cls.exponential_backoff( + retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes) diff --git a/plugins/module_utils/cloudscale.py b/plugins/module_utils/cloudscale.py new file mode 100644 index 0000000000..01f72bd21f --- /dev/null +++ b/plugins/module_utils/cloudscale.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# +# (c) 2017, Gaudenz Steinlin +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from copy import deepcopy +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text + +API_URL = 'https://api.cloudscale.ch/v1/' + + +def cloudscale_argument_spec(): + return dict( + api_token=dict(fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']), + no_log=True, + required=True, + type='str'), + api_timeout=dict(default=30, type='int'), + ) + + +class AnsibleCloudscaleBase(object): + + def __init__(self, module): + self._module = module + self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']} + self._result = { + 'changed': False, + 'diff': dict(before=dict(), after=dict()), + } + + def _get(self, api_call): + resp, info = fetch_url(self._module, API_URL + api_call, + headers=self._auth_header, + timeout=self._module.params['api_timeout']) + + if info['status'] == 200: + return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict')) + elif info['status'] == 404: + return None + else: + self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for ' + '"%s".' % api_call, fetch_url_info=info) + + def _post_or_patch(self, api_call, method, data): + # This helps with tags when we have the full API resource href to update. + if API_URL not in api_call: + api_endpoint = API_URL + api_call + else: + api_endpoint = api_call + + headers = self._auth_header.copy() + if data is not None: + # Sanitize data dictionary + # Deepcopy: Duplicate the data object for iteration, because + # iterating an object and changing it at the same time is insecure + for k, v in deepcopy(data).items(): + if v is None: + del data[k] + + data = self._module.jsonify(data) + headers['Content-type'] = 'application/json' + + resp, info = fetch_url(self._module, + api_endpoint, + headers=headers, + method=method, + data=data, + timeout=self._module.params['api_timeout']) + + if info['status'] in (200, 201): + return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict')) + elif info['status'] == 204: + return None + else: + self._module.fail_json(msg='Failure while calling the cloudscale.ch API with %s for ' + '"%s".' % (method, api_call), fetch_url_info=info) + + def _post(self, api_call, data=None): + return self._post_or_patch(api_call, 'POST', data) + + def _patch(self, api_call, data=None): + return self._post_or_patch(api_call, 'PATCH', data) + + def _delete(self, api_call): + resp, info = fetch_url(self._module, + API_URL + api_call, + headers=self._auth_header, + method='DELETE', + timeout=self._module.params['api_timeout']) + + if info['status'] == 204: + return None + else: + self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for ' + '"%s".' % api_call, fetch_url_info=info) + + def _param_updated(self, key, resource): + param = self._module.params.get(key) + if param is None: + return False + + if resource and key in resource: + if param != resource[key]: + self._result['changed'] = True + + patch_data = { + key: param + } + + self._result['diff']['before'].update({key: resource[key]}) + self._result['diff']['after'].update(patch_data) + + if not self._module.check_mode: + href = resource.get('href') + if not href: + self._module.fail_json(msg='Unable to update %s, no href found.' % key) + + self._patch(href, patch_data) + return True + return False + + def get_result(self, resource): + if resource: + for k, v in resource.items(): + self._result[k] = v + return self._result diff --git a/plugins/module_utils/cloudstack.py b/plugins/module_utils/cloudstack.py new file mode 100644 index 0000000000..85a53b6b6e --- /dev/null +++ b/plugins/module_utils/cloudstack.py @@ -0,0 +1,664 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015, René Moser +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import os +import sys +import time +import traceback + +from ansible.module_utils._text import to_text, to_native +from ansible.module_utils.basic import missing_required_lib + +CS_IMP_ERR = None +try: + from cs import CloudStack, CloudStackException, read_config + HAS_LIB_CS = True +except ImportError: + CS_IMP_ERR = traceback.format_exc() + HAS_LIB_CS = False + + +if sys.version_info > (3,): + long = int + + +def cs_argument_spec(): + return dict( + api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')), + api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True), + api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')), + api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD')), + api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT')), + api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'), + ) + + +def cs_required_together(): + return [['api_key', 'api_secret']] + + +class AnsibleCloudStack: + + def __init__(self, module): + if not HAS_LIB_CS: + module.fail_json(msg=missing_required_lib('cs'), exception=CS_IMP_ERR) + + self.result = { + 'changed': False, + 'diff': { + 'before': dict(), + 'after': dict() + } + } + + # Common returns, will be merged with self.returns + # search_for_key: replace_with_key + self.common_returns = { + 'id': 'id', + 'name': 'name', + 'created': 'created', + 'zonename': 'zone', + 'state': 'state', + 'project': 'project', + 'account': 'account', + 'domain': 'domain', + 'displaytext': 'display_text', + 'displayname': 'display_name', + 'description': 'description', + } + + # Init returns dict for use in subclasses + self.returns = {} + # these values will be casted to int + self.returns_to_int = {} + # these keys will be compared case sensitive in self.has_changed() + self.case_sensitive_keys = [ + 'id', + 'displaytext', + 'displayname', + 'description', + ] + + self.module = module + self._cs = None + + # Helper for VPCs + self._vpc_networks_ids = None + + self.domain = None + self.account = None + self.project = None + self.ip_address = None + self.network = None + self.physical_network = None + self.vpc = None + self.zone = None + self.vm = None + self.vm_default_nic = None + self.os_type = None + self.hypervisor = None + self.capabilities = None + self.network_acl = None + + @property + def cs(self): + if self._cs is None: + api_config = self.get_api_config() + self._cs = CloudStack(**api_config) + return self._cs + + def get_api_config(self): + api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION') + try: + config = read_config(api_region) + except KeyError: + config = {} + + api_config = { + 'endpoint': self.module.params.get('api_url') or config.get('endpoint'), + 'key': self.module.params.get('api_key') or config.get('key'), + 'secret': self.module.params.get('api_secret') or config.get('secret'), + 'timeout': self.module.params.get('api_timeout') or config.get('timeout') or 10, + 'method': self.module.params.get('api_http_method') or config.get('method') or 'get', + } + self.result.update({ + 'api_region': api_region, + 'api_url': api_config['endpoint'], + 'api_key': api_config['key'], + 'api_timeout': int(api_config['timeout']), + 'api_http_method': api_config['method'], + }) + if not all([api_config['endpoint'], api_config['key'], api_config['secret']]): + self.fail_json(msg="Missing api credentials: can not authenticate") + return api_config + + def fail_json(self, **kwargs): + self.result.update(kwargs) + self.module.fail_json(**self.result) + + def get_or_fallback(self, key=None, fallback_key=None): + value = self.module.params.get(key) + if not value: + value = self.module.params.get(fallback_key) + return value + + def has_changed(self, want_dict, current_dict, only_keys=None, skip_diff_for_keys=None): + result = False + for key, value in want_dict.items(): + + # Optionally limit by a list of keys + if only_keys and key not in only_keys: + continue + + # Skip None values + if value is None: + continue + + if key in current_dict: + if isinstance(value, (int, float, long, complex)): + + # ensure we compare the same type + if isinstance(value, int): + current_dict[key] = int(current_dict[key]) + elif isinstance(value, float): + current_dict[key] = float(current_dict[key]) + elif isinstance(value, long): + current_dict[key] = long(current_dict[key]) + elif isinstance(value, complex): + current_dict[key] = complex(current_dict[key]) + + if value != current_dict[key]: + if skip_diff_for_keys and key not in skip_diff_for_keys: + self.result['diff']['before'][key] = current_dict[key] + self.result['diff']['after'][key] = value + result = True + else: + before_value = to_text(current_dict[key]) + after_value = to_text(value) + + if self.case_sensitive_keys and key in self.case_sensitive_keys: + if before_value != after_value: + if skip_diff_for_keys and key not in skip_diff_for_keys: + self.result['diff']['before'][key] = before_value + self.result['diff']['after'][key] = after_value + result = True + + # Test for diff in case insensitive way + elif before_value.lower() != after_value.lower(): + if skip_diff_for_keys and key not in skip_diff_for_keys: + self.result['diff']['before'][key] = before_value + self.result['diff']['after'][key] = after_value + result = True + else: + if skip_diff_for_keys and key not in skip_diff_for_keys: + self.result['diff']['before'][key] = None + self.result['diff']['after'][key] = to_text(value) + result = True + return result + + def _get_by_key(self, key=None, my_dict=None): + if my_dict is None: + my_dict = {} + if key: + if key in my_dict: + return my_dict[key] + self.fail_json(msg="Something went wrong: %s not found" % key) + return my_dict + + def query_api(self, command, **args): + try: + res = getattr(self.cs, command)(**args) + + if 'errortext' in res: + self.fail_json(msg="Failed: '%s'" % res['errortext']) + + except CloudStackException as e: + self.fail_json(msg='CloudStackException: %s' % to_native(e)) + + except Exception as e: + self.fail_json(msg=to_native(e)) + + return res + + def get_network_acl(self, key=None): + if self.network_acl is None: + args = { + 'name': self.module.params.get('network_acl'), + 'vpcid': self.get_vpc(key='id'), + } + network_acls = self.query_api('listNetworkACLLists', **args) + if network_acls: + self.network_acl = network_acls['networkacllist'][0] + self.result['network_acl'] = self.network_acl['name'] + if self.network_acl: + return self._get_by_key(key, self.network_acl) + else: + self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl')) + + def get_vpc(self, key=None): + """Return a VPC dictionary or the value of given key of.""" + if self.vpc: + return self._get_by_key(key, self.vpc) + + vpc = self.module.params.get('vpc') + if not vpc: + vpc = os.environ.get('CLOUDSTACK_VPC') + if not vpc: + return None + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + } + vpcs = self.query_api('listVPCs', **args) + if not vpcs: + self.fail_json(msg="No VPCs available.") + + for v in vpcs['vpc']: + if vpc in [v['name'], v['displaytext'], v['id']]: + # Fail if the identifyer matches more than one VPC + if self.vpc: + self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc) + else: + self.vpc = v + self.result['vpc'] = v['name'] + if self.vpc: + return self._get_by_key(key, self.vpc) + self.fail_json(msg="VPC '%s' not found" % vpc) + + def is_vpc_network(self, network_id): + """Returns True if network is in VPC.""" + # This is an efficient way to query a lot of networks at a time + if self._vpc_networks_ids is None: + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + } + vpcs = self.query_api('listVPCs', **args) + self._vpc_networks_ids = [] + if vpcs: + for vpc in vpcs['vpc']: + for n in vpc.get('network', []): + self._vpc_networks_ids.append(n['id']) + return network_id in self._vpc_networks_ids + + def get_physical_network(self, key=None): + if self.physical_network: + return self._get_by_key(key, self.physical_network) + physical_network = self.module.params.get('physical_network') + args = { + 'zoneid': self.get_zone(key='id') + } + physical_networks = self.query_api('listPhysicalNetworks', **args) + if not physical_networks: + self.fail_json(msg="No physical networks available.") + + for net in physical_networks['physicalnetwork']: + if physical_network in [net['name'], net['id']]: + self.physical_network = net + self.result['physical_network'] = net['name'] + return self._get_by_key(key, self.physical_network) + self.fail_json(msg="Physical Network '%s' not found" % physical_network) + + def get_network(self, key=None): + """Return a network dictionary or the value of given key of.""" + if self.network: + return self._get_by_key(key, self.network) + + network = self.module.params.get('network') + if not network: + vpc_name = self.get_vpc(key='name') + if vpc_name: + self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name) + return None + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'vpcid': self.get_vpc(key='id') + } + networks = self.query_api('listNetworks', **args) + if not networks: + self.fail_json(msg="No networks available.") + + for n in networks['network']: + # ignore any VPC network if vpc param is not given + if 'vpcid' in n and not self.get_vpc(key='id'): + continue + if network in [n['displaytext'], n['name'], n['id']]: + self.result['network'] = n['name'] + self.network = n + return self._get_by_key(key, self.network) + self.fail_json(msg="Network '%s' not found" % network) + + def get_project(self, key=None): + if self.project: + return self._get_by_key(key, self.project) + + project = self.module.params.get('project') + if not project: + project = os.environ.get('CLOUDSTACK_PROJECT') + if not project: + return None + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id') + } + projects = self.query_api('listProjects', **args) + if projects: + for p in projects['project']: + if project.lower() in [p['name'].lower(), p['id']]: + self.result['project'] = p['name'] + self.project = p + return self._get_by_key(key, self.project) + self.fail_json(msg="project '%s' not found" % project) + + def get_ip_address(self, key=None): + if self.ip_address: + return self._get_by_key(key, self.ip_address) + + ip_address = self.module.params.get('ip_address') + if not ip_address: + self.fail_json(msg="IP address param 'ip_address' is required") + + args = { + 'ipaddress': ip_address, + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'vpcid': self.get_vpc(key='id'), + } + + ip_addresses = self.query_api('listPublicIpAddresses', **args) + + if not ip_addresses: + self.fail_json(msg="IP address '%s' not found" % args['ipaddress']) + + self.ip_address = ip_addresses['publicipaddress'][0] + return self._get_by_key(key, self.ip_address) + + def get_vm_guest_ip(self): + vm_guest_ip = self.module.params.get('vm_guest_ip') + default_nic = self.get_vm_default_nic() + + if not vm_guest_ip: + return default_nic['ipaddress'] + + for secondary_ip in default_nic['secondaryip']: + if vm_guest_ip == secondary_ip['ipaddress']: + return vm_guest_ip + self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip) + + def get_vm_default_nic(self): + if self.vm_default_nic: + return self.vm_default_nic + + nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id')) + if nics: + for n in nics['nic']: + if n['isdefault']: + self.vm_default_nic = n + return self.vm_default_nic + self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm')) + + def get_vm(self, key=None, filter_zone=True): + if self.vm: + return self._get_by_key(key, self.vm) + + vm = self.module.params.get('vm') + if not vm: + self.fail_json(msg="Virtual machine param 'vm' is required") + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id') if filter_zone else None, + 'fetch_list': True, + } + vms = self.query_api('listVirtualMachines', **args) + if vms: + for v in vms: + if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]: + self.vm = v + return self._get_by_key(key, self.vm) + self.fail_json(msg="Virtual machine '%s' not found" % vm) + + def get_disk_offering(self, key=None): + disk_offering = self.module.params.get('disk_offering') + if not disk_offering: + return None + + # Do not add domain filter for disk offering listing. + disk_offerings = self.query_api('listDiskOfferings') + if disk_offerings: + for d in disk_offerings['diskoffering']: + if disk_offering in [d['displaytext'], d['name'], d['id']]: + return self._get_by_key(key, d) + self.fail_json(msg="Disk offering '%s' not found" % disk_offering) + + def get_zone(self, key=None): + if self.zone: + return self._get_by_key(key, self.zone) + + zone = self.module.params.get('zone') + if not zone: + zone = os.environ.get('CLOUDSTACK_ZONE') + zones = self.query_api('listZones') + + if not zones: + self.fail_json(msg="No zones available. Please create a zone first") + + # use the first zone if no zone param given + if not zone: + self.zone = zones['zone'][0] + self.result['zone'] = self.zone['name'] + return self._get_by_key(key, self.zone) + + if zones: + for z in zones['zone']: + if zone.lower() in [z['name'].lower(), z['id']]: + self.result['zone'] = z['name'] + self.zone = z + return self._get_by_key(key, self.zone) + self.fail_json(msg="zone '%s' not found" % zone) + + def get_os_type(self, key=None): + if self.os_type: + return self._get_by_key(key, self.zone) + + os_type = self.module.params.get('os_type') + if not os_type: + return None + + os_types = self.query_api('listOsTypes') + if os_types: + for o in os_types['ostype']: + if os_type in [o['description'], o['id']]: + self.os_type = o + return self._get_by_key(key, self.os_type) + self.fail_json(msg="OS type '%s' not found" % os_type) + + def get_hypervisor(self): + if self.hypervisor: + return self.hypervisor + + hypervisor = self.module.params.get('hypervisor') + hypervisors = self.query_api('listHypervisors') + + # use the first hypervisor if no hypervisor param given + if not hypervisor: + self.hypervisor = hypervisors['hypervisor'][0]['name'] + return self.hypervisor + + for h in hypervisors['hypervisor']: + if hypervisor.lower() == h['name'].lower(): + self.hypervisor = h['name'] + return self.hypervisor + self.fail_json(msg="Hypervisor '%s' not found" % hypervisor) + + def get_account(self, key=None): + if self.account: + return self._get_by_key(key, self.account) + + account = self.module.params.get('account') + if not account: + account = os.environ.get('CLOUDSTACK_ACCOUNT') + if not account: + return None + + domain = self.module.params.get('domain') + if not domain: + self.fail_json(msg="Account must be specified with Domain") + + args = { + 'name': account, + 'domainid': self.get_domain(key='id'), + 'listall': True + } + accounts = self.query_api('listAccounts', **args) + if accounts: + self.account = accounts['account'][0] + self.result['account'] = self.account['name'] + return self._get_by_key(key, self.account) + self.fail_json(msg="Account '%s' not found" % account) + + def get_domain(self, key=None): + if self.domain: + return self._get_by_key(key, self.domain) + + domain = self.module.params.get('domain') + if not domain: + domain = os.environ.get('CLOUDSTACK_DOMAIN') + if not domain: + return None + + args = { + 'listall': True, + } + domains = self.query_api('listDomains', **args) + if domains: + for d in domains['domain']: + if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]: + self.domain = d + self.result['domain'] = d['path'] + return self._get_by_key(key, self.domain) + self.fail_json(msg="Domain '%s' not found" % domain) + + def query_tags(self, resource, resource_type): + args = { + 'resourceid': resource['id'], + 'resourcetype': resource_type, + } + tags = self.query_api('listTags', **args) + return self.get_tags(resource=tags, key='tag') + + def get_tags(self, resource=None, key='tags'): + existing_tags = [] + for tag in resource.get(key) or []: + existing_tags.append({'key': tag['key'], 'value': tag['value']}) + return existing_tags + + def _process_tags(self, resource, resource_type, tags, operation="create"): + if tags: + self.result['changed'] = True + if not self.module.check_mode: + args = { + 'resourceids': resource['id'], + 'resourcetype': resource_type, + 'tags': tags, + } + if operation == "create": + response = self.query_api('createTags', **args) + else: + response = self.query_api('deleteTags', **args) + self.poll_job(response) + + def _tags_that_should_exist_or_be_updated(self, resource, tags): + existing_tags = self.get_tags(resource) + return [tag for tag in tags if tag not in existing_tags] + + def _tags_that_should_not_exist(self, resource, tags): + existing_tags = self.get_tags(resource) + return [tag for tag in existing_tags if tag not in tags] + + def ensure_tags(self, resource, resource_type=None): + if not resource_type or not resource: + self.fail_json(msg="Error: Missing resource or resource_type for tags.") + + if 'tags' in resource: + tags = self.module.params.get('tags') + if tags is not None: + self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete") + self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags)) + resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type) + return resource + + def get_capabilities(self, key=None): + if self.capabilities: + return self._get_by_key(key, self.capabilities) + capabilities = self.query_api('listCapabilities') + self.capabilities = capabilities['capability'] + return self._get_by_key(key, self.capabilities) + + def poll_job(self, job=None, key=None): + if 'jobid' in job: + while True: + res = self.query_api('queryAsyncJobResult', jobid=job['jobid']) + if res['jobstatus'] != 0 and 'jobresult' in res: + + if 'errortext' in res['jobresult']: + self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext']) + + if key and key in res['jobresult']: + job = res['jobresult'][key] + + break + time.sleep(2) + return job + + def update_result(self, resource, result=None): + if result is None: + result = dict() + if resource: + returns = self.common_returns.copy() + returns.update(self.returns) + for search_key, return_key in returns.items(): + if search_key in resource: + result[return_key] = resource[search_key] + + # Bad bad API does not always return int when it should. + for search_key, return_key in self.returns_to_int.items(): + if search_key in resource: + result[return_key] = int(resource[search_key]) + + if 'tags' in resource: + result['tags'] = resource['tags'] + return result + + def get_result(self, resource): + return self.update_result(resource, self.result) + + def get_result_and_facts(self, facts_name, resource): + result = self.get_result(resource) + + ansible_facts = { + facts_name: result.copy() + } + for k in ['diff', 'changed']: + if k in ansible_facts[facts_name]: + del ansible_facts[facts_name][k] + + result.update(ansible_facts=ansible_facts) + return result diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py new file mode 100644 index 0000000000..014939a260 --- /dev/null +++ b/plugins/module_utils/database.py @@ -0,0 +1,142 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2014, Toshio Kuratomi +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class SQLParseError(Exception): + pass + + +class UnclosedQuoteError(SQLParseError): + pass + + +# maps a type of identifier to the maximum number of dot levels that are +# allowed to specify that identifier. For example, a database column can be +# specified by up to 4 levels: database.schema.table.column +_PG_IDENTIFIER_TO_DOT_LEVEL = dict( + database=1, + schema=2, + table=3, + column=4, + role=1, + tablespace=1, + sequence=3, + publication=1, +) +_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) + + +def _find_end_quote(identifier, quote_char): + accumulate = 0 + while True: + try: + quote = identifier.index(quote_char) + except ValueError: + raise UnclosedQuoteError + accumulate = accumulate + quote + try: + next_char = identifier[quote + 1] + except IndexError: + return accumulate + if next_char == quote_char: + try: + identifier = identifier[quote + 2:] + accumulate = accumulate + 2 + except IndexError: + raise UnclosedQuoteError + else: + return accumulate + + +def _identifier_parse(identifier, quote_char): + if not identifier: + raise SQLParseError('Identifier name unspecified or unquoted trailing dot') + + already_quoted = False + if identifier.startswith(quote_char): + already_quoted = True + try: + end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1 + except UnclosedQuoteError: + already_quoted = False + else: + if end_quote < len(identifier) - 1: + if identifier[end_quote + 1] == '.': + dot = end_quote + 1 + first_identifier = identifier[:dot] + next_identifier = identifier[dot + 1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + further_identifiers.insert(0, first_identifier) + else: + raise SQLParseError('User escaped identifiers must escape extra quotes') + else: + further_identifiers = [identifier] + + if not already_quoted: + try: + dot = identifier.index('.') + except ValueError: + identifier = identifier.replace(quote_char, quote_char * 2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + if dot == 0 or dot >= len(identifier) - 1: + identifier = identifier.replace(quote_char, quote_char * 2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + first_identifier = identifier[:dot] + next_identifier = identifier[dot + 1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + first_identifier = first_identifier.replace(quote_char, quote_char * 2) + first_identifier = ''.join((quote_char, first_identifier, quote_char)) + further_identifiers.insert(0, first_identifier) + + return further_identifiers + + +def pg_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='"') + if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + return '.'.join(identifier_fragments) + + +def mysql_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='`') + if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) + + special_cased_fragments = [] + for fragment in identifier_fragments: + if fragment == '`*`': + special_cased_fragments.append('*') + else: + special_cased_fragments.append(fragment) + + return '.'.join(special_cased_fragments) diff --git a/plugins/module_utils/digital_ocean.py b/plugins/module_utils/digital_ocean.py new file mode 100644 index 0000000000..fc30343e28 --- /dev/null +++ b/plugins/module_utils/digital_ocean.py @@ -0,0 +1,147 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Ansible Project 2017 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import json +import os +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(to_text(self.info["body"])) + return None + try: + return json.loads(to_text(self.body)) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class DigitalOceanHelper: + + def __init__(self, module): + self.module = module + self.baseurl = 'https://api.digitalocean.com/v2' + self.timeout = module.params.get('timeout', 30) + self.oauth_token = module.params.get('oauth_token') + self.headers = {'Authorization': 'Bearer {0}'.format(self.oauth_token), + 'Content-type': 'application/json'} + + # Check if api_token is valid or not + response = self.get('account') + if response.status_code == 401: + self.module.fail_json(msg='Failed to login using API token, please verify validity of API token.') + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.baseurl, path) + + def send(self, method, path, data=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + + resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=self.timeout) + + return Response(resp, info) + + def get(self, path, data=None): + return self.send('GET', path, data) + + def put(self, path, data=None): + return self.send('PUT', path, data) + + def post(self, path, data=None): + return self.send('POST', path, data) + + def delete(self, path, data=None): + return self.send('DELETE', path, data) + + @staticmethod + def digital_ocean_argument_spec(): + return dict( + validate_certs=dict(type='bool', required=False, default=True), + oauth_token=dict( + no_log=True, + # Support environment variable for DigitalOcean OAuth Token + fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN', 'OAUTH_TOKEN']), + required=False, + aliases=['api_token'], + ), + timeout=dict(type='int', default=30), + ) + + def get_paginated_data(self, base_url=None, data_key_name=None, data_per_page=40, expected_status_code=200): + """ + Function to get all paginated data from given URL + Args: + base_url: Base URL to get data from + data_key_name: Name of data key value + data_per_page: Number results per page (Default: 40) + expected_status_code: Expected returned code from DigitalOcean (Default: 200) + Returns: List of data + + """ + page = 1 + has_next = True + ret_data = [] + status_code = None + response = None + while has_next or status_code != expected_status_code: + required_url = "{0}page={1}&per_page={2}".format(base_url, page, data_per_page) + response = self.get(required_url) + status_code = response.status_code + # stop if any error during pagination + if status_code != expected_status_code: + break + page += 1 + ret_data.extend(response.json[data_key_name]) + has_next = "pages" in response.json["links"] and "next" in response.json["links"]["pages"] + + if status_code != expected_status_code: + msg = "Failed to fetch %s from %s" % (data_key_name, base_url) + if response: + msg += " due to error : %s" % response.json['message'] + self.module.fail_json(msg=msg) + + return ret_data diff --git a/plugins/module_utils/dimensiondata.py b/plugins/module_utils/dimensiondata.py new file mode 100644 index 0000000000..179c3eff9c --- /dev/null +++ b/plugins/module_utils/dimensiondata.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Dimension Data +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +# +# Authors: +# - Aimon Bustardo +# - Mark Maglana +# - Adam Friedman +# +# Common functionality to be used by versious module components + +import os +import re +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves import configparser +from os.path import expanduser +from uuid import UUID + +LIBCLOUD_IMP_ERR = None +try: + from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus + from libcloud.compute.base import Node, NodeLocation + from libcloud.compute.providers import get_driver + from libcloud.compute.types import Provider + + import libcloud.security + + HAS_LIBCLOUD = True +except ImportError: + LIBCLOUD_IMP_ERR = traceback.format_exc() + HAS_LIBCLOUD = False + +# MCP 2.x version patten for location (datacenter) names. +# +# Note that this is not a totally reliable way of determining MCP version. +# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties. +# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version +# by specifying it in the module parameters. +MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*") + + +class DimensionDataModule(object): + """ + The base class containing common functionality used by Dimension Data modules for Ansible. + """ + + def __init__(self, module): + """ + Create a new DimensionDataModule. + + Will fail if Apache libcloud is not present. + + :param module: The underlying Ansible module. + :type module: AnsibleModule + """ + + self.module = module + + if not HAS_LIBCLOUD: + self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR) + + # Credentials are common to all Dimension Data modules. + credentials = self.get_credentials() + self.user_id = credentials['user_id'] + self.key = credentials['key'] + + # Region and location are common to all Dimension Data modules. + region = self.module.params['region'] + self.region = 'dd-{0}'.format(region) + self.location = self.module.params['location'] + + libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs'] + + self.driver = get_driver(Provider.DIMENSIONDATA)( + self.user_id, + self.key, + region=self.region + ) + + # Determine the MCP API version (this depends on the target datacenter). + self.mcp_version = self.get_mcp_version(self.location) + + # Optional "wait-for-completion" arguments + if 'wait' in self.module.params: + self.wait = self.module.params['wait'] + self.wait_time = self.module.params['wait_time'] + self.wait_poll_interval = self.module.params['wait_poll_interval'] + else: + self.wait = False + self.wait_time = 0 + self.wait_poll_interval = 0 + + def get_credentials(self): + """ + Get user_id and key from module configuration, environment, or dotfile. + Order of priority is module, environment, dotfile. + + To set in environment: + + export MCP_USER='myusername' + export MCP_PASSWORD='mypassword' + + To set in dot file place a file at ~/.dimensiondata with + the following contents: + + [dimensiondatacloud] + MCP_USER: myusername + MCP_PASSWORD: mypassword + """ + + if not HAS_LIBCLOUD: + self.module.fail_json(msg='libcloud is required for this module.') + + user_id = None + key = None + + # First, try the module configuration + if 'mcp_user' in self.module.params: + if 'mcp_password' not in self.module.params: + self.module.fail_json( + msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).' + ) + + user_id = self.module.params['mcp_user'] + key = self.module.params['mcp_password'] + + # Fall back to environment + if not user_id or not key: + user_id = os.environ.get('MCP_USER', None) + key = os.environ.get('MCP_PASSWORD', None) + + # Finally, try dotfile (~/.dimensiondata) + if not user_id or not key: + home = expanduser('~') + config = configparser.RawConfigParser() + config.read("%s/.dimensiondata" % home) + + try: + user_id = config.get("dimensiondatacloud", "MCP_USER") + key = config.get("dimensiondatacloud", "MCP_PASSWORD") + except (configparser.NoSectionError, configparser.NoOptionError): + pass + + # One or more credentials not found. Function can't recover from this + # so it has to raise an error instead of fail silently. + if not user_id: + raise MissingCredentialsError("Dimension Data user id not found") + elif not key: + raise MissingCredentialsError("Dimension Data key not found") + + # Both found, return data + return dict(user_id=user_id, key=key) + + def get_mcp_version(self, location): + """ + Get the MCP version for the specified location. + """ + + location = self.driver.ex_get_location_by_id(location) + if MCP_2_LOCATION_NAME_PATTERN.match(location.name): + return '2.0' + + return '1.0' + + def get_network_domain(self, locator, location): + """ + Retrieve a network domain by its name or Id. + """ + + if is_uuid(locator): + network_domain = self.driver.ex_get_network_domain(locator) + else: + matching_network_domains = [ + network_domain for network_domain in self.driver.ex_list_network_domains(location=location) + if network_domain.name == locator + ] + + if matching_network_domains: + network_domain = matching_network_domains[0] + else: + network_domain = None + + if network_domain: + return network_domain + + raise UnknownNetworkError("Network '%s' could not be found" % locator) + + def get_vlan(self, locator, location, network_domain): + """ + Get a VLAN object by its name or id + """ + if is_uuid(locator): + vlan = self.driver.ex_get_vlan(locator) + else: + matching_vlans = [ + vlan for vlan in self.driver.ex_list_vlans(location, network_domain) + if vlan.name == locator + ] + + if matching_vlans: + vlan = matching_vlans[0] + else: + vlan = None + + if vlan: + return vlan + + raise UnknownVLANError("VLAN '%s' could not be found" % locator) + + @staticmethod + def argument_spec(**additional_argument_spec): + """ + Build an argument specification for a Dimension Data module. + :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any). + :return: A dict containing the argument specification. + """ + + spec = dict( + region=dict(type='str', default='na'), + mcp_user=dict(type='str', required=False), + mcp_password=dict(type='str', required=False, no_log=True), + location=dict(type='str', required=True), + validate_certs=dict(type='bool', required=False, default=True) + ) + + if additional_argument_spec: + spec.update(additional_argument_spec) + + return spec + + @staticmethod + def argument_spec_with_wait(**additional_argument_spec): + """ + Build an argument specification for a Dimension Data module that includes "wait for completion" arguments. + :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any). + :return: A dict containing the argument specification. + """ + + spec = DimensionDataModule.argument_spec( + wait=dict(type='bool', required=False, default=False), + wait_time=dict(type='int', required=False, default=600), + wait_poll_interval=dict(type='int', required=False, default=2) + ) + + if additional_argument_spec: + spec.update(additional_argument_spec) + + return spec + + @staticmethod + def required_together(*additional_required_together): + """ + Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together. + :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together. + :return: An array containing the argument specifications. + """ + + required_together = [ + ['mcp_user', 'mcp_password'] + ] + + if additional_required_together: + required_together.extend(additional_required_together) + + return required_together + + +class LibcloudNotFound(Exception): + """ + Exception raised when Apache libcloud cannot be found. + """ + + pass + + +class MissingCredentialsError(Exception): + """ + Exception raised when credentials for Dimension Data CloudControl cannot be found. + """ + + pass + + +class UnknownNetworkError(Exception): + """ + Exception raised when a network or network domain cannot be found. + """ + + pass + + +class UnknownVLANError(Exception): + """ + Exception raised when a VLAN cannot be found. + """ + + pass + + +def get_dd_regions(): + """ + Get the list of available regions whose vendor is Dimension Data. + """ + + # Get endpoints + all_regions = API_ENDPOINTS.keys() + + # Only Dimension Data endpoints (no prefix) + regions = [region[3:] for region in all_regions if region.startswith('dd-')] + + return regions + + +def is_uuid(u, version=4): + """ + Test if valid v4 UUID + """ + try: + uuid_obj = UUID(u, version=version) + + return str(uuid_obj) == u + except ValueError: + return False diff --git a/plugins/module_utils/docker/__init__.py b/plugins/module_utils/docker/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/docker/common.py b/plugins/module_utils/docker/common.py new file mode 100644 index 0000000000..03307250d6 --- /dev/null +++ b/plugins/module_utils/docker/common.py @@ -0,0 +1,1022 @@ +# +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import os +import platform +import re +import sys +from datetime import timedelta +from distutils.version import LooseVersion + + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible.module_utils.common._collections_compat import Mapping, Sequence +from ansible.module_utils.six import string_types +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE + +HAS_DOCKER_PY = True +HAS_DOCKER_PY_2 = False +HAS_DOCKER_PY_3 = False +HAS_DOCKER_ERROR = None + +try: + from requests.exceptions import SSLError + from docker import __version__ as docker_version + from docker.errors import APIError, NotFound, TLSParameterError + from docker.tls import TLSConfig + from docker import auth + + if LooseVersion(docker_version) >= LooseVersion('3.0.0'): + HAS_DOCKER_PY_3 = True + from docker import APIClient as Client + elif LooseVersion(docker_version) >= LooseVersion('2.0.0'): + HAS_DOCKER_PY_2 = True + from docker import APIClient as Client + else: + from docker import Client + +except ImportError as exc: + HAS_DOCKER_ERROR = str(exc) + HAS_DOCKER_PY = False + + +# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used +# to ensure the user does not have both ``docker`` and ``docker-py`` modules +# installed, as they utilize the same namespace are are incompatible +try: + # docker (Docker SDK for Python >= 2.0.0) + import docker.models # noqa: F401 + HAS_DOCKER_MODELS = True +except ImportError: + HAS_DOCKER_MODELS = False + +try: + # docker-py (Docker SDK for Python < 2.0.0) + import docker.ssladapter # noqa: F401 + HAS_DOCKER_SSLADAPTER = True +except ImportError: + HAS_DOCKER_SSLADAPTER = False + + +try: + from requests.exceptions import RequestException +except ImportError: + # Either docker-py is no longer using requests, or docker-py isn't around either, + # or docker-py's dependency requests is missing. In any case, define an exception + # class RequestException so that our code doesn't break. + class RequestException(Exception): + pass + + +DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' +DEFAULT_TLS = False +DEFAULT_TLS_VERIFY = False +DEFAULT_TLS_HOSTNAME = 'localhost' +MIN_DOCKER_VERSION = "1.8.0" +DEFAULT_TIMEOUT_SECONDS = 60 + +DOCKER_COMMON_ARGS = dict( + docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']), + tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])), + api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']), + timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])), + ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']), + client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']), + client_key=dict(type='path', aliases=['tls_client_key', 'key_path']), + ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])), + tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])), + validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']), + debug=dict(type='bool', default=False) +) + +DOCKER_MUTUALLY_EXCLUSIVE = [] + +DOCKER_REQUIRED_TOGETHER = [ + ['client_cert', 'client_key'] +] + +DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/' +EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+' +BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + + +if not HAS_DOCKER_PY: + docker_version = None + + # No Docker SDK for Python. Create a place holder client to allow + # instantiation of AnsibleModule and proper error handing + class Client(object): # noqa: F811 + def __init__(self, **kwargs): + pass + + class APIError(Exception): # noqa: F811 + pass + + class NotFound(Exception): # noqa: F811 + pass + + +def is_image_name_id(name): + """Check whether the given image name is in fact an image ID (hash).""" + if re.match('^sha256:[0-9a-fA-F]{64}$', name): + return True + return False + + +def is_valid_tag(tag, allow_empty=False): + """Check whether the given string is a valid docker tag name.""" + if not tag: + return allow_empty + # See here ("Extended description") for a definition what tags can be: + # https://docs.docker.com/engine/reference/commandline/tag/ + return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag)) + + +def sanitize_result(data): + """Sanitize data object for return to Ansible. + + When the data object contains types such as docker.types.containers.HostConfig, + Ansible will fail when these are returned via exit_json or fail_json. + HostConfig is derived from dict, but its constructor requires additional + arguments. This function sanitizes data structures by recursively converting + everything derived from dict to dict and everything derived from list (and tuple) + to a list. + """ + if isinstance(data, dict): + return dict((k, sanitize_result(v)) for k, v in data.items()) + elif isinstance(data, (list, tuple)): + return [sanitize_result(v) for v in data] + else: + return data + + +class DockerBaseClass(object): + + def __init__(self): + self.debug = False + + def log(self, msg, pretty_print=False): + pass + # if self.debug: + # log_file = open('docker.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) + # log_file.write(u'\n') + # else: + # log_file.write(msg + u'\n') + + +def update_tls_hostname(result): + if result['tls_hostname'] is None: + # get default machine name from the url + parsed_url = urlparse(result['docker_host']) + if ':' in parsed_url.netloc: + result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + result['tls_hostname'] = parsed_url + + +def _get_tls_config(fail_function, **kwargs): + try: + tls_config = TLSConfig(**kwargs) + return tls_config + except TLSParameterError as exc: + fail_function("TLS config error: %s" % exc) + + +def get_connect_params(auth, fail_function): + if auth['tls'] or auth['tls_verify']: + auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') + + if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and host verification + if auth['cacert_path']: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + ca_cert=auth['cacert_path'], + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + else: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify'] and auth['cacert_path']: + # TLS with cacert only + tls_config = _get_tls_config(ca_cert=auth['cacert_path'], + assert_hostname=auth['tls_hostname'], + verify=True, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify']: + # TLS with verify and no certs + tls_config = _get_tls_config(verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and no host verification + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls']: + # TLS with no certs and not host verification + tls_config = _get_tls_config(verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + # No TLS + return dict(base_url=auth['docker_host'], + version=auth['api_version'], + timeout=auth['timeout']) + + +DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`." +DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade." +DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. " + "Hint: if you do not need Python 2.6 support, try " + "`pip uninstall docker-py` instead, followed by `pip install docker`.") + + +class AnsibleDockerClient(Client): + + def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, + required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION, + min_docker_api_version=None, option_minimal_versions=None, + option_minimal_versions_ignore_params=None, fail_results=None): + + # Modules can put information in here which will always be returned + # in case client.fail() is called. + self.fail_results = fail_results or {} + + merged_arg_spec = dict() + merged_arg_spec.update(DOCKER_COMMON_ARGS) + if argument_spec: + merged_arg_spec.update(argument_spec) + self.arg_spec = merged_arg_spec + + mutually_exclusive_params = [] + mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE + if mutually_exclusive: + mutually_exclusive_params += mutually_exclusive + + required_together_params = [] + required_together_params += DOCKER_REQUIRED_TOGETHER + if required_together: + required_together_params += required_together + + self.module = AnsibleModule( + argument_spec=merged_arg_spec, + supports_check_mode=supports_check_mode, + mutually_exclusive=mutually_exclusive_params, + required_together=required_together_params, + required_if=required_if) + + NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0')) + + self.docker_py_version = LooseVersion(docker_version) + + if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER: + self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker " + "SDK for Python) installed together as they use the same namespace and cause a corrupt " + "installation. Please uninstall both packages, and re-install only the docker-py or docker " + "python module (for %s's Python %s). It is recommended to install the docker module if no " + "support for Python 2.6 is required. Please note that simply uninstalling one of the modules " + "can leave the other module in a broken state." % (platform.node(), sys.executable)) + + if not HAS_DOCKER_PY: + if NEEDS_DOCKER_PY2: + msg = missing_required_lib("Docker SDK for Python: docker") + msg = msg + ", for example via `pip install docker`. The error was: %s" + else: + msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)") + msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s" + self.fail(msg % HAS_DOCKER_ERROR) + + if self.docker_py_version < LooseVersion(min_docker_version): + msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s." + if not NEEDS_DOCKER_PY2: + # The minimal required version is < 2.0 (and the current version as well). + # Advertise docker (instead of docker-py) for non-Python-2.6 users. + msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER + elif docker_version < LooseVersion('2.0'): + msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER + else: + msg += DOCKERPYUPGRADE_UPGRADE_DOCKER + self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version)) + + self.debug = self.module.params.get('debug') + self.check_mode = self.module.check_mode + self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail) + + try: + super(AnsibleDockerClient, self).__init__(**self._connect_params) + self.docker_api_version_str = self.version()['ApiVersion'] + except APIError as exc: + self.fail("Docker API error: %s" % exc) + except Exception as exc: + self.fail("Error connecting: %s" % exc) + + self.docker_api_version = LooseVersion(self.docker_api_version_str) + if min_docker_api_version is not None: + if self.docker_api_version < LooseVersion(min_docker_api_version): + self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) + + if option_minimal_versions is not None: + self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params) + + def log(self, msg, pretty_print=False): + pass + # if self.debug: + # log_file = open('docker.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) + # log_file.write(u'\n') + # else: + # log_file.write(msg + u'\n') + + def fail(self, msg, **kwargs): + self.fail_results.update(kwargs) + self.module.fail_json(msg=msg, **sanitize_result(self.fail_results)) + + @staticmethod + def _get_value(param_name, param_value, env_variable, default_value): + if param_value is not None: + # take module parameter value + if param_value in BOOLEANS_TRUE: + return True + if param_value in BOOLEANS_FALSE: + return False + return param_value + + if env_variable is not None: + env_value = os.environ.get(env_variable) + if env_value is not None: + # take the env variable value + if param_name == 'cert_path': + return os.path.join(env_value, 'cert.pem') + if param_name == 'cacert_path': + return os.path.join(env_value, 'ca.pem') + if param_name == 'key_path': + return os.path.join(env_value, 'key.pem') + if env_value in BOOLEANS_TRUE: + return True + if env_value in BOOLEANS_FALSE: + return False + return env_value + + # take the default + return default_value + + @property + def auth_params(self): + # Get authentication credentials. + # Precedence: module parameters-> environment variables-> defaults. + + self.log('Getting credentials') + + params = dict() + for key in DOCKER_COMMON_ARGS: + params[key] = self.module.params.get(key) + + if self.module.params.get('use_tls'): + # support use_tls option in docker_image.py. This will be deprecated. + use_tls = self.module.params.get('use_tls') + if use_tls == 'encrypt': + params['tls'] = True + if use_tls == 'verify': + params['validate_certs'] = True + + result = dict( + docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST', + DEFAULT_DOCKER_HOST), + tls_hostname=self._get_value('tls_hostname', params['tls_hostname'], + 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME), + api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION', + 'auto'), + cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None), + cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None), + key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None), + ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None), + tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS), + tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY', + DEFAULT_TLS_VERIFY), + timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT', + DEFAULT_TIMEOUT_SECONDS), + ) + + update_tls_hostname(result) + + return result + + def _handle_ssl_error(self, error): + match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) + if match: + self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. " + "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME " + "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by " + "setting the `tls` parameter to true." + % (self.auth_params['tls_hostname'], match.group(1), match.group(1))) + self.fail("SSL Exception: %s" % (error)) + + def _get_minimal_versions(self, option_minimal_versions, ignore_params=None): + self.option_minimal_versions = dict() + for option in self.module.argument_spec: + if ignore_params is not None: + if option in ignore_params: + continue + self.option_minimal_versions[option] = dict() + self.option_minimal_versions.update(option_minimal_versions) + + for option, data in self.option_minimal_versions.items(): + # Test whether option is supported, and store result + support_docker_py = True + support_docker_api = True + if 'docker_py_version' in data: + support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version']) + if 'docker_api_version' in data: + support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version']) + data['supported'] = support_docker_py and support_docker_api + # Fail if option is not supported but used + if not data['supported']: + # Test whether option is specified + if 'detect_usage' in data: + used = data['detect_usage'](self) + else: + used = self.module.params.get(option) is not None + if used and 'default' in self.module.argument_spec[option]: + used = self.module.params[option] != self.module.argument_spec[option]['default'] + if used: + # If the option is used, compose error message. + if 'usage_msg' in data: + usg = data['usage_msg'] + else: + usg = 'set %s option' % (option, ) + if not support_docker_api: + msg = 'Docker API version is %s. Minimum version required is %s to %s.' + msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg) + elif not support_docker_py: + msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. " + if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'): + msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER + elif self.docker_py_version < LooseVersion('2.0.0'): + msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER + else: + msg += DOCKERPYUPGRADE_UPGRADE_DOCKER + msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg) + else: + # should not happen + msg = 'Cannot %s with your configuration.' % (usg, ) + self.fail(msg) + + def get_container_by_id(self, container_id): + try: + self.log("Inspecting container Id %s" % container_id) + result = self.inspect_container(container=container_id) + self.log("Completed container inspection") + return result + except NotFound as dummy: + return None + except Exception as exc: + self.fail("Error inspecting container: %s" % exc) + + def get_container(self, name=None): + ''' + Lookup a container and return the inspection results. + ''' + if name is None: + return None + + search_name = name + if not name.startswith('/'): + search_name = '/' + name + + result = None + try: + for container in self.containers(all=True): + self.log("testing container: %s" % (container['Names'])) + if isinstance(container['Names'], list) and search_name in container['Names']: + result = container + break + if container['Id'].startswith(name): + result = container + break + if container['Id'] == name: + result = container + break + except SSLError as exc: + self._handle_ssl_error(exc) + except Exception as exc: + self.fail("Error retrieving container list: %s" % exc) + + if result is None: + return None + + return self.get_container_by_id(result['Id']) + + def get_network(self, name=None, network_id=None): + ''' + Lookup a network and return the inspection results. + ''' + if name is None and network_id is None: + return None + + result = None + + if network_id is None: + try: + for network in self.networks(): + self.log("testing network: %s" % (network['Name'])) + if name == network['Name']: + result = network + break + if network['Id'].startswith(name): + result = network + break + except SSLError as exc: + self._handle_ssl_error(exc) + except Exception as exc: + self.fail("Error retrieving network list: %s" % exc) + + if result is not None: + network_id = result['Id'] + + if network_id is not None: + try: + self.log("Inspecting network Id %s" % network_id) + result = self.inspect_network(network_id) + self.log("Completed network inspection") + except NotFound as dummy: + return None + except Exception as exc: + self.fail("Error inspecting network: %s" % exc) + + return result + + def find_image(self, name, tag): + ''' + Lookup an image (by name and tag) and return the inspection results. + ''' + if not name: + return None + + self.log("Find image %s:%s" % (name, tag)) + images = self._image_lookup(name, tag) + if not images: + # In API <= 1.20 seeing 'docker.io/' as the name of images pulled from docker hub + registry, repo_name = auth.resolve_repository_name(name) + if registry == 'docker.io': + # If docker.io is explicitly there in name, the image + # isn't found in some cases (#41509) + self.log("Check for docker.io image: %s" % repo_name) + images = self._image_lookup(repo_name, tag) + if not images and repo_name.startswith('library/'): + # Sometimes library/xxx images are not found + lookup = repo_name[len('library/'):] + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + if not images: + # Last case: if docker.io wasn't there, it can be that + # the image wasn't found either (#15586) + lookup = "%s/%s" % (registry, repo_name) + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + + if len(images) > 1: + self.fail("Registry returned more than one result for %s:%s" % (name, tag)) + + if len(images) == 1: + try: + inspection = self.inspect_image(images[0]['Id']) + except Exception as exc: + self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc))) + return inspection + + self.log("Image %s:%s not found." % (name, tag)) + return None + + def find_image_by_id(self, image_id): + ''' + Lookup an image (by ID) and return the inspection results. + ''' + if not image_id: + return None + + self.log("Find image %s (by ID)" % image_id) + try: + inspection = self.inspect_image(image_id) + except Exception as exc: + self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) + return inspection + + def _image_lookup(self, name, tag): + ''' + Including a tag in the name parameter sent to the Docker SDK for Python images method + does not work consistently. Instead, get the result set for name and manually check + if the tag exists. + ''' + try: + response = self.images(name=name) + except Exception as exc: + self.fail("Error searching for image %s - %s" % (name, str(exc))) + images = response + if tag: + lookup = "%s:%s" % (name, tag) + lookup_digest = "%s@%s" % (name, tag) + images = [] + for image in response: + tags = image.get('RepoTags') + digests = image.get('RepoDigests') + if (tags and lookup in tags) or (digests and lookup_digest in digests): + images = [image] + break + return images + + def pull_image(self, name, tag="latest"): + ''' + Pull an image + ''' + self.log("Pulling image %s:%s" % (name, tag)) + old_tag = self.find_image(name, tag) + try: + for line in self.pull(name, tag=tag, stream=True, decode=True): + self.log(line, pretty_print=True) + if line.get('error'): + if line.get('errorDetail'): + error_detail = line.get('errorDetail') + self.fail("Error pulling %s - code: %s message: %s" % (name, + error_detail.get('code'), + error_detail.get('message'))) + else: + self.fail("Error pulling %s - %s" % (name, line.get('error'))) + except Exception as exc: + self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc))) + + new_tag = self.find_image(name, tag) + + return new_tag, old_tag == new_tag + + def report_warnings(self, result, warnings_key=None): + ''' + Checks result of client operation for warnings, and if present, outputs them. + + warnings_key should be a list of keys used to crawl the result dictionary. + For example, if warnings_key == ['a', 'b'], the function will consider + result['a']['b'] if these keys exist. If the result is a non-empty string, it + will be reported as a warning. If the result is a list, every entry will be + reported as a warning. + + In most cases (if warnings are returned at all), warnings_key should be + ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings']. + ''' + if warnings_key is None: + warnings_key = ['Warnings'] + for key in warnings_key: + if not isinstance(result, Mapping): + return + result = result.get(key) + if isinstance(result, Sequence): + for warning in result: + self.module.warn('Docker warning: {0}'.format(warning)) + elif isinstance(result, string_types) and result: + self.module.warn('Docker warning: {0}'.format(result)) + + def inspect_distribution(self, image, **kwargs): + ''' + Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0 + since prior versions did not support accessing private repositories. + ''' + if self.docker_py_version < LooseVersion('4.0.0'): + registry = auth.resolve_repository_name(image)[0] + header = auth.get_config_header(self, registry) + if header: + return self._result(self._get( + self._url('/distribution/{0}/json', image), + headers={'X-Registry-Auth': header} + ), json=True) + return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs) + + +def compare_dict_allow_more_present(av, bv): + ''' + Compare two dictionaries for whether every entry of the first is in the second. + ''' + for key, value in av.items(): + if key not in bv: + return False + if bv[key] != value: + return False + return True + + +def compare_generic(a, b, method, datatype): + ''' + Compare values a and b as described by method and datatype. + + Returns ``True`` if the values compare equal, and ``False`` if not. + + ``a`` is usually the module's parameter, while ``b`` is a property + of the current object. ``a`` must not be ``None`` (except for + ``datatype == 'value'``). + + Valid values for ``method`` are: + - ``ignore`` (always compare as equal); + - ``strict`` (only compare if really equal) + - ``allow_more_present`` (allow b to have elements which a does not have). + + Valid values for ``datatype`` are: + - ``value``: for simple values (strings, numbers, ...); + - ``list``: for ``list``s or ``tuple``s where order matters; + - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not + matter; + - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does + not matter and which contain ``dict``s; ``allow_more_present`` is used + for the ``dict``s, and these are assumed to be dictionaries of values; + - ``dict``: for dictionaries of values. + ''' + if method == 'ignore': + return True + # If a or b is None: + if a is None or b is None: + # If both are None: equality + if a == b: + return True + # Otherwise, not equal for values, and equal + # if the other is empty for set/list/dict + if datatype == 'value': + return False + # For allow_more_present, allow a to be None + if method == 'allow_more_present' and a is None: + return True + # Otherwise, the iterable object which is not None must have length 0 + return len(b if a is None else a) == 0 + # Do proper comparison (both objects not None) + if datatype == 'value': + return a == b + elif datatype == 'list': + if method == 'strict': + return a == b + else: + i = 0 + for v in a: + while i < len(b) and b[i] != v: + i += 1 + if i == len(b): + return False + i += 1 + return True + elif datatype == 'dict': + if method == 'strict': + return a == b + else: + return compare_dict_allow_more_present(a, b) + elif datatype == 'set': + set_a = set(a) + set_b = set(b) + if method == 'strict': + return set_a == set_b + else: + return set_b >= set_a + elif datatype == 'set(dict)': + for av in a: + found = False + for bv in b: + if compare_dict_allow_more_present(av, bv): + found = True + break + if not found: + return False + if method == 'strict': + # If we would know that both a and b do not contain duplicates, + # we could simply compare len(a) to len(b) to finish this test. + # We can assume that b has no duplicates (as it is returned by + # docker), but we don't know for a. + for bv in b: + found = False + for av in a: + if compare_dict_allow_more_present(av, bv): + found = True + break + if not found: + return False + return True + + +class DifferenceTracker(object): + def __init__(self): + self._diff = [] + + def add(self, name, parameter=None, active=None): + self._diff.append(dict( + name=name, + parameter=parameter, + active=active, + )) + + def merge(self, other_tracker): + self._diff.extend(other_tracker._diff) + + @property + def empty(self): + return len(self._diff) == 0 + + def get_before_after(self): + ''' + Return texts ``before`` and ``after``. + ''' + before = dict() + after = dict() + for item in self._diff: + before[item['name']] = item['active'] + after[item['name']] = item['parameter'] + return before, after + + def has_difference_for(self, name): + ''' + Returns a boolean if a difference exists for name + ''' + return any(diff for diff in self._diff if diff['name'] == name) + + def get_legacy_docker_container_diffs(self): + ''' + Return differences in the docker_container legacy format. + ''' + result = [] + for entry in self._diff: + item = dict() + item[entry['name']] = dict( + parameter=entry['parameter'], + container=entry['active'], + ) + result.append(item) + return result + + def get_legacy_docker_diffs(self): + ''' + Return differences in the docker_container legacy format. + ''' + result = [entry['name'] for entry in self._diff] + return result + + +def clean_dict_booleans_for_docker_api(data): + ''' + Go doesn't like Python booleans 'True' or 'False', while Ansible is just + fine with them in YAML. As such, they need to be converted in cases where + we pass dictionaries to the Docker API (e.g. docker_network's + driver_options and docker_prune's filters). + ''' + result = dict() + if data is not None: + for k, v in data.items(): + if v is True: + v = 'true' + elif v is False: + v = 'false' + else: + v = str(v) + result[str(k)] = v + return result + + +def convert_duration_to_nanosecond(time_str): + """ + Return time duration in nanosecond. + """ + if not isinstance(time_str, str): + raise ValueError('Missing unit in duration - %s' % time_str) + + regex = re.compile( + r'^(((?P\d+)h)?' + r'((?P\d+)m(?!s))?' + r'((?P\d+)s)?' + r'((?P\d+)ms)?' + r'((?P\d+)us)?)$' + ) + parts = regex.match(time_str) + + if not parts: + raise ValueError('Invalid time duration - %s' % time_str) + + parts = parts.groupdict() + time_params = {} + for (name, value) in parts.items(): + if value: + time_params[name] = int(value) + + delta = timedelta(**time_params) + time_in_nanoseconds = ( + delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6 + ) * 10 ** 3 + + return time_in_nanoseconds + + +def parse_healthcheck(healthcheck): + """ + Return dictionary of healthcheck parameters and boolean if + healthcheck defined in image was requested to be disabled. + """ + if (not healthcheck) or (not healthcheck.get('test')): + return None, None + + result = dict() + + # All supported healthcheck parameters + options = dict( + test='test', + interval='interval', + timeout='timeout', + start_period='start_period', + retries='retries' + ) + + duration_options = ['interval', 'timeout', 'start_period'] + + for (key, value) in options.items(): + if value in healthcheck: + if healthcheck.get(value) is None: + # due to recursive argument_spec, all keys are always present + # (but have default value None if not specified) + continue + if value in duration_options: + time = convert_duration_to_nanosecond(healthcheck.get(value)) + if time: + result[key] = time + elif healthcheck.get(value): + result[key] = healthcheck.get(value) + if key == 'test': + if isinstance(result[key], (tuple, list)): + result[key] = [str(e) for e in result[key]] + else: + result[key] = ['CMD-SHELL', str(result[key])] + elif key == 'retries': + try: + result[key] = int(result[key]) + except ValueError: + raise ValueError( + 'Cannot parse number of retries for healthcheck. ' + 'Expected an integer, got "{0}".'.format(result[key]) + ) + + if result['test'] == ['NONE']: + # If the user explicitly disables the healthcheck, return None + # as the healthcheck object, and set disable_healthcheck to True + return None, True + + return result, False + + +def omit_none_from_dict(d): + """ + Return a copy of the dictionary with all keys with value None omitted. + """ + return dict((k, v) for (k, v) in d.items() if v is not None) diff --git a/plugins/module_utils/docker/swarm.py b/plugins/module_utils/docker/swarm.py new file mode 100644 index 0000000000..610ed9a81f --- /dev/null +++ b/plugins/module_utils/docker/swarm.py @@ -0,0 +1,280 @@ +# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) +# (c) Thierry Bouvet (@tbouvet) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import json +from time import sleep + +try: + from docker.errors import APIError, NotFound +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + LooseVersion, +) + + +class AnsibleDockerSwarmClient(AnsibleDockerClient): + + def __init__(self, **kwargs): + super(AnsibleDockerSwarmClient, self).__init__(**kwargs) + + def get_swarm_node_id(self): + """ + Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID + of Docker host the module is executed on + :return: + NodeID of host or 'None' if not part of Swarm + """ + + try: + info = self.info() + except APIError as exc: + self.fail("Failed to get node information for %s" % to_native(exc)) + + if info: + json_str = json.dumps(info, ensure_ascii=False) + swarm_info = json.loads(json_str) + if swarm_info['Swarm']['NodeID']: + return swarm_info['Swarm']['NodeID'] + return None + + def check_if_swarm_node(self, node_id=None): + """ + Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host + system information looking if specific key in output exists. If 'node_id' is provided then it tries to + read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if + it is not executed on Swarm manager + + :param node_id: Node identifier + :return: + bool: True if node is part of Swarm, False otherwise + """ + + if node_id is None: + try: + info = self.info() + except APIError: + self.fail("Failed to get host information.") + + if info: + json_str = json.dumps(info, ensure_ascii=False) + swarm_info = json.loads(json_str) + if swarm_info['Swarm']['NodeID']: + return True + if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'): + return True + return False + else: + try: + node_info = self.get_node_inspect(node_id=node_id) + except APIError: + return + + if node_info['ID'] is not None: + return True + return False + + def check_if_swarm_manager(self): + """ + Checks if node role is set as Manager in Swarm. The node is the docker host on which module action + is performed. The inspect_swarm() will fail if node is not a manager + + :return: True if node is Swarm Manager, False otherwise + """ + + try: + self.inspect_swarm() + return True + except APIError: + return False + + def fail_task_if_not_swarm_manager(self): + """ + If host is not a swarm manager then Ansible task on this host should end with 'failed' state + """ + if not self.check_if_swarm_manager(): + self.fail("Error running docker swarm module: must run on swarm manager node") + + def check_if_swarm_worker(self): + """ + Checks if node role is set as Worker in Swarm. The node is the docker host on which module action + is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node() + + :return: True if node is Swarm Worker, False otherwise + """ + + if self.check_if_swarm_node() and not self.check_if_swarm_manager(): + return True + return False + + def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1): + """ + Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about + node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or + host that is not part of Swarm it will fail the playbook + + :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once + :param node_id: node ID or name, if None then method will try to get node_id of host module run on + :return: + True if node is part of swarm but its state is down, False otherwise + """ + + if repeat_check < 1: + repeat_check = 1 + + if node_id is None: + node_id = self.get_swarm_node_id() + + for retry in range(0, repeat_check): + if retry > 0: + sleep(5) + node_info = self.get_node_inspect(node_id=node_id) + if node_info['Status']['State'] == 'down': + return True + return False + + def get_node_inspect(self, node_id=None, skip_missing=False): + """ + Returns Swarm node info as in 'docker node inspect' command about single node + + :param skip_missing: if True then function will return None instead of failing the task + :param node_id: node ID or name, if None then method will try to get node_id of host module run on + :return: + Single node information structure + """ + + if node_id is None: + node_id = self.get_swarm_node_id() + + if node_id is None: + self.fail("Failed to get node information.") + + try: + node_info = self.inspect_node(node_id=node_id) + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") + if exc.status_code == 404: + if skip_missing: + return None + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + except Exception as exc: + self.fail("Error inspecting swarm node: %s" % exc) + + json_str = json.dumps(node_info, ensure_ascii=False) + node_info = json.loads(json_str) + + if 'ManagerStatus' in node_info: + if node_info['ManagerStatus'].get('Leader'): + # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 + # Check moby/moby#35437 for details + count_colons = node_info['ManagerStatus']['Addr'].count(":") + if count_colons == 1: + swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr'] + else: + swarm_leader_ip = node_info['Status']['Addr'] + node_info['Status']['Addr'] = swarm_leader_ip + return node_info + + def get_all_nodes_inspect(self): + """ + Returns Swarm node info as in 'docker node inspect' command about all registered nodes + + :return: + Structure with information about all nodes + """ + try: + node_info = self.nodes() + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + except Exception as exc: + self.fail("Error inspecting swarm node: %s" % exc) + + json_str = json.dumps(node_info, ensure_ascii=False) + node_info = json.loads(json_str) + return node_info + + def get_all_nodes_list(self, output='short'): + """ + Returns list of nodes registered in Swarm + + :param output: Defines format of returned data + :return: + If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm, + if 'output' is 'long' then returns data is list of dict containing the attributes as in + output of command 'docker node ls' + """ + nodes_list = [] + + nodes_inspect = self.get_all_nodes_inspect() + if nodes_inspect is None: + return None + + if output == 'short': + for node in nodes_inspect: + nodes_list.append(node['Description']['Hostname']) + elif output == 'long': + for node in nodes_inspect: + node_property = {} + + node_property.update({'ID': node['ID']}) + node_property.update({'Hostname': node['Description']['Hostname']}) + node_property.update({'Status': node['Status']['State']}) + node_property.update({'Availability': node['Spec']['Availability']}) + if 'ManagerStatus' in node: + if node['ManagerStatus']['Leader'] is True: + node_property.update({'Leader': True}) + node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']}) + node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']}) + + nodes_list.append(node_property) + else: + return None + + return nodes_list + + def get_node_name_by_id(self, nodeid): + return self.get_node_inspect(nodeid)['Description']['Hostname'] + + def get_unlock_key(self): + if self.docker_py_version < LooseVersion('2.7.0'): + return None + return super(AnsibleDockerSwarmClient, self).get_unlock_key() + + def get_service_inspect(self, service_id, skip_missing=False): + """ + Returns Swarm service info as in 'docker service inspect' command about single service + + :param service_id: service ID or name + :param skip_missing: if True then function will return None instead of failing the task + :return: + Single service information structure + """ + try: + service_info = self.inspect_service(service_id) + except NotFound as exc: + if skip_missing is False: + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + else: + return None + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager") + self.fail("Error inspecting swarm service: %s" % exc) + except Exception as exc: + self.fail("Error inspecting swarm service: %s" % exc) + + json_str = json.dumps(service_info, ensure_ascii=False) + service_info = json.loads(json_str) + return service_info diff --git a/plugins/module_utils/exoscale.py b/plugins/module_utils/exoscale.py new file mode 100644 index 0000000000..e56f27144f --- /dev/null +++ b/plugins/module_utils/exoscale.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2016, René Moser +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os + +from ansible.module_utils.six.moves import configparser +from ansible.module_utils.six import integer_types, string_types +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.urls import fetch_url + +EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1" + + +def exo_dns_argument_spec(): + return dict( + api_key=dict(default=os.environ.get('CLOUDSTACK_KEY'), no_log=True), + api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True), + api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10), + api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'), + validate_certs=dict(default=True, type='bool'), + ) + + +def exo_dns_required_together(): + return [['api_key', 'api_secret']] + + +class ExoDns(object): + + def __init__(self, module): + self.module = module + + self.api_key = self.module.params.get('api_key') + self.api_secret = self.module.params.get('api_secret') + if not (self.api_key and self.api_secret): + try: + region = self.module.params.get('api_region') + config = self.read_config(ini_group=region) + self.api_key = config['key'] + self.api_secret = config['secret'] + except Exception as e: + self.module.fail_json(msg="Error while processing config: %s" % to_native(e)) + + self.headers = { + 'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret), + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + self.result = { + 'changed': False, + 'diff': { + 'before': {}, + 'after': {}, + } + } + + def read_config(self, ini_group=None): + if not ini_group: + ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack') + + keys = ['key', 'secret'] + env_conf = {} + for key in keys: + if 'CLOUDSTACK_%s' % key.upper() not in os.environ: + break + else: + env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()] + else: + return env_conf + + # Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini + # Last read wins in configparser + paths = ( + os.path.join(os.path.expanduser('~'), '.cloudstack.ini'), + os.path.join(os.getcwd(), 'cloudstack.ini'), + ) + # Look at CLOUDSTACK_CONFIG first if present + if 'CLOUDSTACK_CONFIG' in os.environ: + paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),) + if not any([os.path.exists(c) for c in paths]): + self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths)) + + conf = configparser.ConfigParser() + conf.read(paths) + return dict(conf.items(ini_group)) + + def api_query(self, resource="/domains", method="GET", data=None): + url = EXO_DNS_BASEURL + resource + if data: + data = self.module.jsonify(data) + + response, info = fetch_url( + module=self.module, + url=url, + data=data, + method=method, + headers=self.headers, + timeout=self.module.params.get('api_timeout'), + ) + + if info['status'] not in (200, 201, 204): + self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) + + try: + return self.module.from_json(to_text(response.read())) + + except Exception as e: + self.module.fail_json(msg="Could not process response into json: %s" % to_native(e)) + + def has_changed(self, want_dict, current_dict, only_keys=None): + changed = False + for key, value in want_dict.items(): + # Optionally limit by a list of keys + if only_keys and key not in only_keys: + continue + # Skip None values + if value is None: + continue + if key in current_dict: + if isinstance(current_dict[key], integer_types): + if value != current_dict[key]: + self.result['diff']['before'][key] = current_dict[key] + self.result['diff']['after'][key] = value + changed = True + elif isinstance(current_dict[key], string_types): + if value.lower() != current_dict[key].lower(): + self.result['diff']['before'][key] = current_dict[key] + self.result['diff']['after'][key] = value + changed = True + else: + self.module.fail_json(msg="Unable to determine comparison for key %s" % key) + else: + self.result['diff']['after'][key] = value + changed = True + return changed diff --git a/plugins/module_utils/f5_utils.py b/plugins/module_utils/f5_utils.py new file mode 100644 index 0000000000..17994f99cf --- /dev/null +++ b/plugins/module_utils/f5_utils.py @@ -0,0 +1,383 @@ +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# Legacy + +try: + import bigsuds + bigsuds_found = True +except ImportError: + bigsuds_found = False + + +from ansible.module_utils.basic import env_fallback + + +def f5_argument_spec(): + return dict( + server=dict( + type='str', + required=True, + fallback=(env_fallback, ['F5_SERVER']) + ), + user=dict( + type='str', + required=True, + fallback=(env_fallback, ['F5_USER']) + ), + password=dict( + type='str', + aliases=['pass', 'pwd'], + required=True, + no_log=True, + fallback=(env_fallback, ['F5_PASSWORD']) + ), + validate_certs=dict( + default='yes', + type='bool', + fallback=(env_fallback, ['F5_VALIDATE_CERTS']) + ), + server_port=dict( + type='int', + default=443, + fallback=(env_fallback, ['F5_SERVER_PORT']) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + partition=dict( + type='str', + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ) + ) + + +def f5_parse_arguments(module): + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json( + msg="bigsuds does not support verifying certificates with python < 2.7.9." + "Either update python or set validate_certs=False on the task'") + + return ( + module.params['server'], + module.params['user'], + module.params['password'], + module.params['state'], + module.params['partition'], + module.params['validate_certs'], + module.params['server_port'] + ) + + +def bigip_api(bigip, user, password, validate_certs, port=443): + try: + if bigsuds.__version__ >= '1.0.4': + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port) + elif bigsuds.__version__ == '1.0.3': + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs) + else: + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + except TypeError: + # bigsuds < 1.0.3, no verify param + if validate_certs: + # Note: verified we have SSLContext when we parsed params + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + else: + import ssl + if hasattr(ssl, 'SSLContext'): + # Really, you should never do this. It disables certificate + # verification *globally*. But since older bigip libraries + # don't give us a way to toggle verification we need to + # disable it at the global level. + # From https://www.python.org/dev/peps/pep-0476/#id29 + ssl._create_default_https_context = ssl._create_unverified_context + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + + return api + + +# Fully Qualified name (with the partition) +def fq_name(partition, name): + if name is not None and not name.startswith('/'): + return '/%s/%s' % (partition, name) + return name + + +# Fully Qualified name (with partition) for a list +def fq_list_names(partition, list_names): + if list_names is None: + return None + return map(lambda x: fq_name(partition, x), list_names) + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc) + responses.append(to_text(out, errors='surrogate_then_replace')) + return responses + + +# New style + +from abc import ABCMeta, abstractproperty +from collections import defaultdict + +try: + from f5.bigip import ManagementRoot as BigIpMgmt + from f5.bigip.contexts import TransactionContextManager as BigIpTxContext + + from f5.bigiq import ManagementRoot as BigIqMgmt + + from f5.iworkflow import ManagementRoot as iWorkflowMgmt + from icontrol.exceptions import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems, with_metaclass +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import exec_command +from ansible.module_utils._text import to_text + + +F5_COMMON_ARGS = dict( + server=dict( + type='str', + required=True, + fallback=(env_fallback, ['F5_SERVER']) + ), + user=dict( + type='str', + required=True, + fallback=(env_fallback, ['F5_USER']) + ), + password=dict( + type='str', + aliases=['pass', 'pwd'], + required=True, + no_log=True, + fallback=(env_fallback, ['F5_PASSWORD']) + ), + validate_certs=dict( + default='yes', + type='bool', + fallback=(env_fallback, ['F5_VALIDATE_CERTS']) + ), + server_port=dict( + type='int', + default=443, + fallback=(env_fallback, ['F5_SERVER_PORT']) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + partition=dict( + type='str', + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ) +) + + +class AnsibleF5Client(object): + def __init__(self, argument_spec=None, supports_check_mode=False, + mutually_exclusive=None, required_together=None, + required_if=None, required_one_of=None, add_file_common_args=False, + f5_product_name='bigip', sans_state=False, sans_partition=False): + + self.f5_product_name = f5_product_name + + merged_arg_spec = dict() + merged_arg_spec.update(F5_COMMON_ARGS) + if argument_spec: + merged_arg_spec.update(argument_spec) + if sans_state: + del merged_arg_spec['state'] + if sans_partition: + del merged_arg_spec['partition'] + self.arg_spec = merged_arg_spec + + mutually_exclusive_params = [] + if mutually_exclusive: + mutually_exclusive_params += mutually_exclusive + + required_together_params = [] + if required_together: + required_together_params += required_together + + self.module = AnsibleModule( + argument_spec=merged_arg_spec, + supports_check_mode=supports_check_mode, + mutually_exclusive=mutually_exclusive_params, + required_together=required_together_params, + required_if=required_if, + required_one_of=required_one_of, + add_file_common_args=add_file_common_args + ) + + self.check_mode = self.module.check_mode + self._connect_params = self._get_connect_params() + + if 'transport' not in self.module.params or self.module.params['transport'] != 'cli': + try: + self.api = self._get_mgmt_root( + f5_product_name, **self._connect_params + ) + except iControlUnexpectedHTTPError as exc: + self.fail(str(exc)) + + def fail(self, msg): + self.module.fail_json(msg=msg) + + def _get_connect_params(self): + params = dict( + user=self.module.params['user'], + password=self.module.params['password'], + server=self.module.params['server'], + server_port=self.module.params['server_port'], + validate_certs=self.module.params['validate_certs'] + ) + return params + + def _get_mgmt_root(self, type, **kwargs): + if type == 'bigip': + return BigIpMgmt( + kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port'], + token='tmos' + ) + elif type == 'iworkflow': + return iWorkflowMgmt( + kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port'], + token='local' + ) + elif type == 'bigiq': + return BigIqMgmt( + kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port'], + auth_provider='local' + ) + + def reconnect(self): + """Attempts to reconnect to a device + + The existing token from a ManagementRoot can become invalid if you, + for example, upgrade the device (such as is done in the *_software + module. + + This method can be used to reconnect to a remote device without + having to re-instantiate the ArgumentSpec and AnsibleF5Client classes + it will use the same values that were initially provided to those + classes + + :return: + :raises iControlUnexpectedHTTPError + """ + self.api = self._get_mgmt_root( + self.f5_product_name, **self._connect_params + ) + + +class AnsibleF5Parameters(object): + def __init__(self, params=None): + self._values = defaultdict(lambda: None) + self._values['__warnings'] = [] + if params: + self.update(params=params) + + def update(self, params=None): + if params: + for k, v in iteritems(params): + if self.api_map is not None and k in self.api_map: + dict_to_use = self.api_map + map_key = self.api_map[k] + else: + dict_to_use = self._values + map_key = k + + # Handle weird API parameters like `dns.proxy.__iter__` by + # using a map provided by the module developer + class_attr = getattr(type(self), map_key, None) + if isinstance(class_attr, property): + # There is a mapped value for the api_map key + if class_attr.fset is None: + # If the mapped value does not have an associated setter + self._values[map_key] = v + else: + # The mapped value has a setter + setattr(self, map_key, v) + else: + # If the mapped value is not a @property + self._values[map_key] = v + + def __getattr__(self, item): + # Ensures that properties that weren't defined, and therefore stashed + # in the `_values` dict, will be retrievable. + return self._values[item] + + @property + def partition(self): + if self._values['partition'] is None: + return 'Common' + return self._values['partition'].strip('/') + + @partition.setter + def partition(self, value): + self._values['partition'] = value + + def _filter_params(self, params): + return dict((k, v) for k, v in iteritems(params) if v is not None) + + +class F5ModuleError(Exception): + pass diff --git a/plugins/module_utils/firewalld.py b/plugins/module_utils/firewalld.py new file mode 100644 index 0000000000..b44e0316aa --- /dev/null +++ b/plugins/module_utils/firewalld.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# +# (c) 2013-2018, Adam Miller (maxamillion@fedoraproject.org) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Imports and info for sanity checking +from distutils.version import LooseVersion + +FW_VERSION = None +fw = None +fw_offline = False +import_failure = True +try: + import firewall.config + FW_VERSION = firewall.config.VERSION + + from firewall.client import FirewallClient + from firewall.client import FirewallClientZoneSettings + from firewall.errors import FirewallError + import_failure = False + + try: + fw = FirewallClient() + fw.getDefaultZone() + + except (AttributeError, FirewallError): + # Firewalld is not currently running, permanent-only operations + fw_offline = True + + # Import other required parts of the firewalld API + # + # NOTE: + # online and offline operations do not share a common firewalld API + try: + from firewall.core.fw_test import Firewall_test + fw = Firewall_test() + except (ModuleNotFoundError): + # In firewalld version 0.7.0 this behavior changed + from firewall.core.fw import Firewall + fw = Firewall(offline=True) + + fw.start() +except ImportError: + pass + + +class FirewallTransaction(object): + """ + FirewallTransaction + + This is the base class for all firewalld transactions we might want to have + """ + + def __init__(self, module, action_args=(), zone=None, desired_state=None, + permanent=False, immediate=False, enabled_values=None, disabled_values=None): + # type: (firewall.client, tuple, str, bool, bool, bool) + """ + initializer the transaction + + :module: AnsibleModule, instance of AnsibleModule + :action_args: tuple, args to pass for the action to take place + :zone: str, firewall zone + :desired_state: str, the desired state (enabled, disabled, etc) + :permanent: bool, action should be permanent + :immediate: bool, action should take place immediately + :enabled_values: str[], acceptable values for enabling something (default: enabled) + :disabled_values: str[], acceptable values for disabling something (default: disabled) + """ + + self.module = module + self.fw = fw + self.action_args = action_args + + if zone: + self.zone = zone + else: + if fw_offline: + self.zone = fw.get_default_zone() + else: + self.zone = fw.getDefaultZone() + + self.desired_state = desired_state + self.permanent = permanent + self.immediate = immediate + self.fw_offline = fw_offline + self.enabled_values = enabled_values or ["enabled"] + self.disabled_values = disabled_values or ["disabled"] + + # List of messages that we'll call module.fail_json or module.exit_json + # with. + self.msgs = [] + + # Allow for custom messages to be added for certain subclass transaction + # types + self.enabled_msg = None + self.disabled_msg = None + + ##################### + # exception handling + # + def action_handler(self, action_func, action_func_args): + """ + Function to wrap calls to make actions on firewalld in try/except + logic and emit (hopefully) useful error messages + """ + + try: + return action_func(*action_func_args) + except Exception as e: + + # If there are any commonly known errors that we should provide more + # context for to help the users diagnose what's wrong. Handle that here + if "INVALID_SERVICE" in "%s" % e: + self.msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)") + + if len(self.msgs) > 0: + self.module.fail_json( + msg='ERROR: Exception caught: %s %s' % (e, ', '.join(self.msgs)) + ) + else: + self.module.fail_json(msg='ERROR: Exception caught: %s' % e) + + def get_fw_zone_settings(self): + if self.fw_offline: + fw_zone = self.fw.config.get_zone(self.zone) + fw_settings = FirewallClientZoneSettings( + list(self.fw.config.get_zone_config(fw_zone)) + ) + else: + fw_zone = self.fw.config().getZoneByName(self.zone) + fw_settings = fw_zone.getSettings() + + return (fw_zone, fw_settings) + + def update_fw_settings(self, fw_zone, fw_settings): + if self.fw_offline: + self.fw.config.set_zone_config(fw_zone, fw_settings.settings) + else: + fw_zone.update(fw_settings) + + def get_enabled_immediate(self): + raise NotImplementedError + + def get_enabled_permanent(self): + raise NotImplementedError + + def set_enabled_immediate(self): + raise NotImplementedError + + def set_enabled_permanent(self): + raise NotImplementedError + + def set_disabled_immediate(self): + raise NotImplementedError + + def set_disabled_permanent(self): + raise NotImplementedError + + def run(self): + """ + run + + This function contains the "transaction logic" where as all operations + follow a similar pattern in order to perform their action but simply + call different functions to carry that action out. + """ + + self.changed = False + + if self.immediate and self.permanent: + is_enabled_permanent = self.action_handler( + self.get_enabled_permanent, + self.action_args + ) + is_enabled_immediate = self.action_handler( + self.get_enabled_immediate, + self.action_args + ) + self.msgs.append('Permanent and Non-Permanent(immediate) operation') + + if self.desired_state in self.enabled_values: + if not is_enabled_permanent or not is_enabled_immediate: + if self.module.check_mode: + self.module.exit_json(changed=True) + if not is_enabled_permanent: + self.action_handler( + self.set_enabled_permanent, + self.action_args + ) + self.changed = True + if not is_enabled_immediate: + self.action_handler( + self.set_enabled_immediate, + self.action_args + ) + self.changed = True + if self.changed and self.enabled_msg: + self.msgs.append(self.enabled_msg) + + elif self.desired_state in self.disabled_values: + if is_enabled_permanent or is_enabled_immediate: + if self.module.check_mode: + self.module.exit_json(changed=True) + if is_enabled_permanent: + self.action_handler( + self.set_disabled_permanent, + self.action_args + ) + self.changed = True + if is_enabled_immediate: + self.action_handler( + self.set_disabled_immediate, + self.action_args + ) + self.changed = True + if self.changed and self.disabled_msg: + self.msgs.append(self.disabled_msg) + + elif self.permanent and not self.immediate: + is_enabled = self.action_handler( + self.get_enabled_permanent, + self.action_args + ) + self.msgs.append('Permanent operation') + + if self.desired_state in self.enabled_values: + if not is_enabled: + if self.module.check_mode: + self.module.exit_json(changed=True) + + self.action_handler( + self.set_enabled_permanent, + self.action_args + ) + self.changed = True + if self.changed and self.enabled_msg: + self.msgs.append(self.enabled_msg) + + elif self.desired_state in self.disabled_values: + if is_enabled: + if self.module.check_mode: + self.module.exit_json(changed=True) + + self.action_handler( + self.set_disabled_permanent, + self.action_args + ) + self.changed = True + if self.changed and self.disabled_msg: + self.msgs.append(self.disabled_msg) + + elif self.immediate and not self.permanent: + is_enabled = self.action_handler( + self.get_enabled_immediate, + self.action_args + ) + self.msgs.append('Non-permanent operation') + + if self.desired_state in self.enabled_values: + if not is_enabled: + if self.module.check_mode: + self.module.exit_json(changed=True) + + self.action_handler( + self.set_enabled_immediate, + self.action_args + ) + self.changed = True + if self.changed and self.enabled_msg: + self.msgs.append(self.enabled_msg) + + elif self.desired_state in self.disabled_values: + if is_enabled: + if self.module.check_mode: + self.module.exit_json(changed=True) + + self.action_handler( + self.set_disabled_immediate, + self.action_args + ) + self.changed = True + if self.changed and self.disabled_msg: + self.msgs.append(self.disabled_msg) + + return (self.changed, self.msgs) + + @staticmethod + def sanity_check(module): + """ + Perform sanity checking, version checks, etc + + :module: AnsibleModule instance + """ + + if FW_VERSION and fw_offline: + # Pre-run version checking + if LooseVersion(FW_VERSION) < LooseVersion("0.3.9"): + module.fail_json(msg='unsupported version of firewalld, offline operations require >= 0.3.9 - found: {0}'.format(FW_VERSION)) + elif FW_VERSION and not fw_offline: + # Pre-run version checking + if LooseVersion(FW_VERSION) < LooseVersion("0.2.11"): + module.fail_json(msg='unsupported version of firewalld, requires >= 0.2.11 - found: {0}'.format(FW_VERSION)) + + # Check for firewalld running + try: + if fw.connected is False: + module.fail_json(msg='firewalld service must be running, or try with offline=true') + except AttributeError: + module.fail_json(msg="firewalld connection can't be established,\ + installed version (%s) likely too old. Requires firewalld >= 0.2.11" % FW_VERSION) + + if import_failure: + module.fail_json( + msg='Python Module not found: firewalld and its python module are required for this module, \ + version 0.2.11 or newer required (0.3.9 or newer for offline operations)' + ) diff --git a/plugins/module_utils/gcdns.py b/plugins/module_utils/gcdns.py new file mode 100644 index 0000000000..8aff67afa2 --- /dev/null +++ b/plugins/module_utils/gcdns.py @@ -0,0 +1,55 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Franck Cuny , 2014 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +try: + from libcloud.dns.types import Provider + from libcloud.dns.providers import get_driver + HAS_LIBCLOUD_BASE = True +except ImportError: + HAS_LIBCLOUD_BASE = False + +from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect +from ansible_collections.community.general.plugins.module_utils.gcp import unexpected_error_msg as gcp_error + +USER_AGENT_PRODUCT = "Ansible-gcdns" +USER_AGENT_VERSION = "v1" + + +def gcdns_connect(module, provider=None): + """Return a GCP connection for Google Cloud DNS.""" + if not HAS_LIBCLOUD_BASE: + module.fail_json(msg='libcloud must be installed to use this module') + + provider = provider or Provider.GOOGLE + return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION) + + +def unexpected_error_msg(error): + """Create an error string based on passed in error.""" + return gcp_error(error) diff --git a/plugins/module_utils/gce.py b/plugins/module_utils/gce.py new file mode 100644 index 0000000000..053527575a --- /dev/null +++ b/plugins/module_utils/gce.py @@ -0,0 +1,54 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Franck Cuny , 2014 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + HAS_LIBCLOUD_BASE = True +except ImportError: + HAS_LIBCLOUD_BASE = False + +from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect +from ansible_collections.community.general.plugins.module_utils.gcp import unexpected_error_msg as gcp_error + +USER_AGENT_PRODUCT = "Ansible-gce" +USER_AGENT_VERSION = "v1" + + +def gce_connect(module, provider=None): + """Return a GCP connection for Google Compute Engine.""" + if not HAS_LIBCLOUD_BASE: + module.fail_json(msg='libcloud must be installed to use this module') + provider = provider or Provider.GCE + + return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION) + + +def unexpected_error_msg(error): + """Create an error string based on passed in error.""" + return gcp_error(error) diff --git a/plugins/module_utils/gcp.py b/plugins/module_utils/gcp.py new file mode 100644 index 0000000000..508df44ab6 --- /dev/null +++ b/plugins/module_utils/gcp.py @@ -0,0 +1,815 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Franck Cuny , 2014 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +import json +import os +import time +import traceback +from distutils.version import LooseVersion + +# libcloud +try: + import libcloud + HAS_LIBCLOUD_BASE = True +except ImportError: + HAS_LIBCLOUD_BASE = False + +# google-auth +try: + import google.auth + from google.oauth2 import service_account + HAS_GOOGLE_AUTH = True +except ImportError: + HAS_GOOGLE_AUTH = False + +# google-python-api +try: + import google_auth_httplib2 + from httplib2 import Http + from googleapiclient.http import set_user_agent + from googleapiclient.errors import HttpError + from apiclient.discovery import build + HAS_GOOGLE_API_LIB = True +except ImportError: + HAS_GOOGLE_API_LIB = False + + +import ansible.module_utils.six.moves.urllib.parse as urlparse + +GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform'] + + +def _get_gcp_ansible_credentials(module): + """Helper to fetch creds from AnsibleModule object.""" + service_account_email = module.params.get('service_account_email', None) + # Note: pem_file is discouraged and will be deprecated + credentials_file = module.params.get('pem_file', None) or module.params.get( + 'credentials_file', None) + project_id = module.params.get('project_id', None) + + return (service_account_email, credentials_file, project_id) + + +def _get_gcp_environ_var(var_name, default_value): + """Wrapper around os.environ.get call.""" + return os.environ.get( + var_name, default_value) + + +def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id): + """Helper to look in environment variables for credentials.""" + # If any of the values are not given as parameters, check the appropriate + # environment variables. + if not service_account_email: + service_account_email = _get_gcp_environ_var('GCE_EMAIL', None) + if not credentials_file: + credentials_file = _get_gcp_environ_var( + 'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var( + 'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var( + 'GCE_PEM_FILE_PATH', None) + if not project_id: + project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var( + 'GOOGLE_CLOUD_PROJECT', None) + return (service_account_email, credentials_file, project_id) + + +def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False): + """ + Obtain GCP credentials by trying various methods. + + There are 3 ways to specify GCP credentials: + 1. Specify via Ansible module parameters (recommended). + 2. Specify via environment variables. Two sets of env vars are available: + a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred) + b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if + using p12 key) + 3. Specify via libcloud secrets.py file (deprecated). + + There are 3 helper functions to assist in the above. + + Regardless of method, the user also has the option of specifying a JSON + file or a p12 file as the credentials file. JSON is strongly recommended and + p12 will be removed in the future. + + Additionally, flags may be set to require valid json and check the libcloud + version. + + AnsibleModule.fail_json is called only if the project_id cannot be found. + + :param module: initialized Ansible module object + :type module: `class AnsibleModule` + + :param require_valid_json: If true, require credentials to be valid JSON. Default is True. + :type require_valid_json: ``bool`` + + :params check_libcloud: If true, check the libcloud version available to see if + JSON creds are supported. + :type check_libcloud: ``bool`` + + :return: {'service_account_email': service_account_email, + 'credentials_file': credentials_file, + 'project_id': project_id} + :rtype: ``dict`` + """ + (service_account_email, + credentials_file, + project_id) = _get_gcp_ansible_credentials(module) + + # If any of the values are not given as parameters, check the appropriate + # environment variables. + (service_account_email, + credentials_file, + project_id) = _get_gcp_environment_credentials(service_account_email, + credentials_file, project_id) + + if credentials_file is None or project_id is None or service_account_email is None: + if check_libcloud is True: + if project_id is None: + # TODO(supertom): this message is legacy and integration tests + # depend on it. + module.fail_json(msg='Missing GCE connection parameters in libcloud ' + 'secrets file.') + else: + if project_id is None: + module.fail_json(msg=('GCP connection error: unable to determine project (%s) or ' + 'credentials file (%s)' % (project_id, credentials_file))) + # Set these fields to empty strings if they are None + # consumers of this will make the distinction between an empty string + # and None. + if credentials_file is None: + credentials_file = '' + if service_account_email is None: + service_account_email = '' + + # ensure the credentials file is found and is in the proper format. + if credentials_file: + _validate_credentials_file(module, credentials_file, + require_valid_json=require_valid_json, + check_libcloud=check_libcloud) + + return {'service_account_email': service_account_email, + 'credentials_file': credentials_file, + 'project_id': project_id} + + +def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False): + """ + Check for valid credentials file. + + Optionally check for JSON format and if libcloud supports JSON. + + :param module: initialized Ansible module object + :type module: `class AnsibleModule` + + :param credentials_file: path to file on disk + :type credentials_file: ``str``. Complete path to file on disk. + + :param require_valid_json: This argument is ignored as of Ansible 2.7. + :type require_valid_json: ``bool`` + + :params check_libcloud: If true, check the libcloud version available to see if + JSON creds are supported. + :type check_libcloud: ``bool`` + + :returns: True + :rtype: ``bool`` + """ + try: + # Try to read credentials as JSON + with open(credentials_file) as credentials: + json.loads(credentials.read()) + # If the credentials are proper JSON and we do not have the minimum + # required libcloud version, bail out and return a descriptive + # error + if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0': + module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. ' + 'Upgrade to libcloud>=0.17.0.') + return True + except IOError as e: + module.fail_json(msg='GCP Credentials File %s not found.' % + credentials_file, changed=False) + return False + except ValueError as e: + module.fail_json( + msg='Non-JSON credentials file provided. Please generate a new JSON key from the Google Cloud console', + changed=False) + + +def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version): + """Return a Google libcloud driver connection.""" + if not HAS_LIBCLOUD_BASE: + module.fail_json(msg='libcloud must be installed to use this module') + + creds = _get_gcp_credentials(module, + require_valid_json=False, + check_libcloud=True) + try: + gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'], + datacenter=module.params.get('zone', None), + project=creds['project_id']) + gcp.connection.user_agent_append("%s/%s" % ( + user_agent_product, user_agent_version)) + except (RuntimeError, ValueError) as e: + module.fail_json(msg=str(e), changed=False) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + return gcp + + +def get_google_cloud_credentials(module, scopes=None): + """ + Get credentials object for use with Google Cloud client. + + Attempts to obtain credentials by calling _get_gcp_credentials. If those are + not present will attempt to connect via Application Default Credentials. + + To connect via libcloud, don't use this function, use gcp_connect instead. For + Google Python API Client, see get_google_api_auth for how to connect. + + For more information on Google's client library options for Python, see: + U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries) + + Google Cloud example: + creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version) + pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds) + pubsub_client.user_agent = 'ansible-pubsub-0.1' + ... + + :param module: initialized Ansible module object + :type module: `class AnsibleModule` + + :param scopes: list of scopes + :type module: ``list`` of URIs + + :returns: A tuple containing (google authorized) credentials object and + params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...} + :rtype: ``tuple`` + """ + scopes = [] if scopes is None else scopes + + if not HAS_GOOGLE_AUTH: + module.fail_json(msg='Please install google-auth.') + + conn_params = _get_gcp_credentials(module, + require_valid_json=True, + check_libcloud=False) + try: + if conn_params['credentials_file']: + credentials = service_account.Credentials.from_service_account_file( + conn_params['credentials_file']) + if scopes: + credentials = credentials.with_scopes(scopes) + else: + (credentials, project_id) = google.auth.default( + scopes=scopes) + if project_id is not None: + conn_params['project_id'] = project_id + + return (credentials, conn_params) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + return (None, None) + + +def get_google_api_auth(module, scopes=None, user_agent_product='ansible-python-api', user_agent_version='NA'): + """ + Authentication for use with google-python-api-client. + + Function calls get_google_cloud_credentials, which attempts to assemble the credentials + from various locations. Next it attempts to authenticate with Google. + + This function returns an httplib2 (compatible) object that can be provided to the Google Python API client. + + For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See + get_google_cloud_credentials for how to connect. + + For more information on Google's client library options for Python, see: + U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries) + + Google API example: + http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version) + service = build('myservice', 'v1', http=http_auth) + ... + + :param module: initialized Ansible module object + :type module: `class AnsibleModule` + + :param scopes: list of scopes + :type scopes: ``list`` of URIs + + :param user_agent_product: User agent product. eg: 'ansible-python-api' + :type user_agent_product: ``str`` + + :param user_agent_version: Version string to append to product. eg: 'NA' or '0.1' + :type user_agent_version: ``str`` + + :returns: A tuple containing (google authorized) httplib2 request object and a + params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...} + :rtype: ``tuple`` + """ + scopes = [] if scopes is None else scopes + + if not HAS_GOOGLE_API_LIB: + module.fail_json(msg="Please install google-api-python-client library") + if not scopes: + scopes = GCP_DEFAULT_SCOPES + try: + (credentials, conn_params) = get_google_cloud_credentials(module, scopes) + http = set_user_agent(Http(), '%s-%s' % + (user_agent_product, user_agent_version)) + http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http) + + return (http_auth, conn_params) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + return (None, None) + + +def get_google_api_client(module, service, user_agent_product, user_agent_version, + scopes=None, api_version='v1'): + """ + Get the discovery-based python client. Use when a cloud client is not available. + + client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT, + user_agent_version=USER_AGENT_VERSION) + + :returns: A tuple containing the authorized client to the specified service and a + params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...} + :rtype: ``tuple`` + """ + if not scopes: + scopes = GCP_DEFAULT_SCOPES + + http_auth, conn_params = get_google_api_auth(module, scopes=scopes, + user_agent_product=user_agent_product, + user_agent_version=user_agent_version) + client = build(service, api_version, http=http_auth) + + return (client, conn_params) + + +def check_min_pkg_version(pkg_name, minimum_version): + """Minimum required version is >= installed version.""" + from pkg_resources import get_distribution + try: + installed_version = get_distribution(pkg_name).version + return LooseVersion(installed_version) >= minimum_version + except Exception as e: + return False + + +def unexpected_error_msg(error): + """Create an error string based on passed in error.""" + return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc()) + + +def get_valid_location(module, driver, location, location_type='zone'): + if location_type == 'zone': + l = driver.ex_get_zone(location) + else: + l = driver.ex_get_region(location) + if l is None: + link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available' + module.fail_json(msg=('%s %s is invalid. Please see the list of ' + 'available %s at %s' % ( + location_type, location, location_type, link)), + changed=False) + return l + + +def check_params(params, field_list): + """ + Helper to validate params. + + Use this in function definitions if they require specific fields + to be present. + + :param params: structure that contains the fields + :type params: ``dict`` + + :param field_list: list of dict representing the fields + [{'name': str, 'required': True/False', 'type': cls}] + :type field_list: ``list`` of ``dict`` + + :return True or raises ValueError + :rtype: ``bool`` or `class:ValueError` + """ + for d in field_list: + if not d['name'] in params: + if 'required' in d and d['required'] is True: + raise ValueError(("%s is required and must be of type: %s" % + (d['name'], str(d['type'])))) + else: + if not isinstance(params[d['name']], d['type']): + raise ValueError(("%s must be of type: %s. %s (%s) provided." % ( + d['name'], str(d['type']), params[d['name']], + type(params[d['name']])))) + if 'values' in d: + if params[d['name']] not in d['values']: + raise ValueError(("%s must be one of: %s" % ( + d['name'], ','.join(d['values'])))) + if isinstance(params[d['name']], int): + if 'min' in d: + if params[d['name']] < d['min']: + raise ValueError(("%s must be greater than or equal to: %s" % ( + d['name'], d['min']))) + if 'max' in d: + if params[d['name']] > d['max']: + raise ValueError("%s must be less than or equal to: %s" % ( + d['name'], d['max'])) + return True + + +class GCPUtils(object): + """ + Helper utilities for GCP. + """ + + @staticmethod + def underscore_to_camel(txt): + return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:]) + + @staticmethod + def remove_non_gcp_params(params): + """ + Remove params if found. + """ + params_to_remove = ['state'] + for p in params_to_remove: + if p in params: + del params[p] + + return params + + @staticmethod + def params_to_gcp_dict(params, resource_name=None): + """ + Recursively convert ansible params to GCP Params. + + Keys are converted from snake to camelCase + ex: default_service to defaultService + + Handles lists, dicts and strings + + special provision for the resource name + """ + if not isinstance(params, dict): + return params + gcp_dict = {} + params = GCPUtils.remove_non_gcp_params(params) + for k, v in params.items(): + gcp_key = GCPUtils.underscore_to_camel(k) + if isinstance(v, dict): + retval = GCPUtils.params_to_gcp_dict(v) + gcp_dict[gcp_key] = retval + elif isinstance(v, list): + gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v] + else: + if resource_name and k == resource_name: + gcp_dict['name'] = v + else: + gcp_dict[gcp_key] = v + return gcp_dict + + @staticmethod + def execute_api_client_req(req, client=None, raw=True, + operation_timeout=180, poll_interval=5, + raise_404=True): + """ + General python api client interaction function. + + For use with google-api-python-client, or clients created + with get_google_api_client function + Not for use with Google Cloud client libraries + + For long-running operations, we make an immediate query and then + sleep poll_interval before re-querying. After the request is done + we rebuild the request with a get method and return the result. + + """ + try: + resp = req.execute() + + if not resp: + return None + + if raw: + return resp + + if resp['kind'] == 'compute#operation': + resp = GCPUtils.execute_api_client_operation_req(req, resp, + client, + operation_timeout, + poll_interval) + + if 'items' in resp: + return resp['items'] + + return resp + except HttpError as h: + # Note: 404s can be generated (incorrectly) for dependent + # resources not existing. We let the caller determine if + # they want 404s raised for their invocation. + if h.resp.status == 404 and not raise_404: + return None + else: + raise + except Exception: + raise + + @staticmethod + def execute_api_client_operation_req(orig_req, op_resp, client, + operation_timeout=180, poll_interval=5): + """ + Poll an operation for a result. + """ + parsed_url = GCPUtils.parse_gcp_url(orig_req.uri) + project_id = parsed_url['project'] + resource_name = GCPUtils.get_gcp_resource_from_methodId( + orig_req.methodId) + resource = GCPUtils.build_resource_from_name(client, resource_name) + + start_time = time.time() + + complete = False + attempts = 1 + while not complete: + if start_time + operation_timeout >= time.time(): + op_req = client.globalOperations().get( + project=project_id, operation=op_resp['name']) + op_resp = op_req.execute() + if op_resp['status'] != 'DONE': + time.sleep(poll_interval) + attempts += 1 + else: + complete = True + if op_resp['operationType'] == 'delete': + # don't wait for the delete + return True + elif op_resp['operationType'] in ['insert', 'update', 'patch']: + # TODO(supertom): Isolate 'build-new-request' stuff. + resource_name_singular = GCPUtils.get_entity_name_from_resource_name( + resource_name) + if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url: + parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[ + 'entity_name'] + args = {'project': project_id, + resource_name_singular: parsed_url['entity_name']} + new_req = resource.get(**args) + resp = new_req.execute() + return resp + else: + # assuming multiple entities, do a list call. + new_req = resource.list(project=project_id) + resp = new_req.execute() + return resp + else: + # operation didn't complete on time. + raise GCPOperationTimeoutError("Operation timed out: %s" % ( + op_resp['targetLink'])) + + @staticmethod + def build_resource_from_name(client, resource_name): + try: + method = getattr(client, resource_name) + return method() + except AttributeError: + raise NotImplementedError('%s is not an attribute of %s' % (resource_name, + client)) + + @staticmethod + def get_gcp_resource_from_methodId(methodId): + try: + parts = methodId.split('.') + if len(parts) != 3: + return None + else: + return parts[1] + except AttributeError: + return None + + @staticmethod + def get_entity_name_from_resource_name(resource_name): + if not resource_name: + return None + + try: + # Chop off global or region prefixes + if resource_name.startswith('global'): + resource_name = resource_name.replace('global', '') + elif resource_name.startswith('regional'): + resource_name = resource_name.replace('region', '') + + # ensure we have a lower case first letter + resource_name = resource_name[0].lower() + resource_name[1:] + + if resource_name[-3:] == 'ies': + return resource_name.replace( + resource_name[-3:], 'y') + if resource_name[-1] == 's': + return resource_name[:-1] + + return resource_name + + except AttributeError: + return None + + @staticmethod + def parse_gcp_url(url): + """ + Parse GCP urls and return dict of parts. + + Supported URL structures: + /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE + /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME + /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME + /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE + /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME + /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME + /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE + /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME + /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME + + :param url: GCP-generated URL, such as a selflink or resource location. + :type url: ``str`` + + :return: dictionary of parts. Includes stanard components of urlparse, plus + GCP-specific 'service', 'api_version', 'project' and + 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name' + and 'method_name', if applicable. + :rtype: ``dict`` + """ + + p = urlparse.urlparse(url) + if not p: + return None + else: + # we add extra items such as + # zone, region and resource_name + url_parts = {} + url_parts['scheme'] = p.scheme + url_parts['host'] = p.netloc + url_parts['path'] = p.path + if p.path.find('/') == 0: + url_parts['path'] = p.path[1:] + url_parts['params'] = p.params + url_parts['fragment'] = p.fragment + url_parts['query'] = p.query + url_parts['project'] = None + url_parts['service'] = None + url_parts['api_version'] = None + + path_parts = url_parts['path'].split('/') + url_parts['service'] = path_parts[0] + url_parts['api_version'] = path_parts[1] + if path_parts[2] == 'projects': + url_parts['project'] = path_parts[3] + else: + # invalid URL + raise GCPInvalidURLError('unable to parse: %s' % url) + + if 'global' in path_parts: + url_parts['global'] = True + idx = path_parts.index('global') + if len(path_parts) - idx == 4: + # we have a resource, entity and method_name + url_parts['resource_name'] = path_parts[idx + 1] + url_parts['entity_name'] = path_parts[idx + 2] + url_parts['method_name'] = path_parts[idx + 3] + + if len(path_parts) - idx == 3: + # we have a resource and entity + url_parts['resource_name'] = path_parts[idx + 1] + url_parts['entity_name'] = path_parts[idx + 2] + + if len(path_parts) - idx == 2: + url_parts['resource_name'] = path_parts[idx + 1] + + if len(path_parts) - idx < 2: + # invalid URL + raise GCPInvalidURLError('unable to parse: %s' % url) + + elif 'regions' in path_parts or 'zones' in path_parts: + idx = -1 + if 'regions' in path_parts: + idx = path_parts.index('regions') + url_parts['region'] = path_parts[idx + 1] + else: + idx = path_parts.index('zones') + url_parts['zone'] = path_parts[idx + 1] + + if len(path_parts) - idx == 5: + # we have a resource, entity and method_name + url_parts['resource_name'] = path_parts[idx + 2] + url_parts['entity_name'] = path_parts[idx + 3] + url_parts['method_name'] = path_parts[idx + 4] + + if len(path_parts) - idx == 4: + # we have a resource and entity + url_parts['resource_name'] = path_parts[idx + 2] + url_parts['entity_name'] = path_parts[idx + 3] + + if len(path_parts) - idx == 3: + url_parts['resource_name'] = path_parts[idx + 2] + + if len(path_parts) - idx < 3: + # invalid URL + raise GCPInvalidURLError('unable to parse: %s' % url) + + else: + # no location in URL. + idx = path_parts.index('projects') + if len(path_parts) - idx == 5: + # we have a resource, entity and method_name + url_parts['resource_name'] = path_parts[idx + 2] + url_parts['entity_name'] = path_parts[idx + 3] + url_parts['method_name'] = path_parts[idx + 4] + + if len(path_parts) - idx == 4: + # we have a resource and entity + url_parts['resource_name'] = path_parts[idx + 2] + url_parts['entity_name'] = path_parts[idx + 3] + + if len(path_parts) - idx == 3: + url_parts['resource_name'] = path_parts[idx + 2] + + if len(path_parts) - idx < 3: + # invalid URL + raise GCPInvalidURLError('unable to parse: %s' % url) + + return url_parts + + @staticmethod + def build_googleapi_url(project, api_version='v1', service='compute'): + return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project) + + @staticmethod + def filter_gcp_fields(params, excluded_fields=None): + new_params = {} + if not excluded_fields: + excluded_fields = ['creationTimestamp', 'id', 'kind', + 'selfLink', 'fingerprint', 'description'] + + if isinstance(params, list): + new_params = [GCPUtils.filter_gcp_fields( + x, excluded_fields) for x in params] + elif isinstance(params, dict): + for k in params.keys(): + if k not in excluded_fields: + new_params[k] = GCPUtils.filter_gcp_fields( + params[k], excluded_fields) + else: + new_params = params + + return new_params + + @staticmethod + def are_params_equal(p1, p2): + """ + Check if two params dicts are equal. + TODO(supertom): need a way to filter out URLs, or they need to be built + """ + filtered_p1 = GCPUtils.filter_gcp_fields(p1) + filtered_p2 = GCPUtils.filter_gcp_fields(p2) + if filtered_p1 != filtered_p2: + return False + return True + + +class GCPError(Exception): + pass + + +class GCPOperationTimeoutError(GCPError): + pass + + +class GCPInvalidURLError(GCPError): + pass diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py new file mode 100644 index 0000000000..5d8a7fea2b --- /dev/null +++ b/plugins/module_utils/gitlab.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright: (c) 2018, Marcus Watkins +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import +import json +from distutils.version import StrictVersion + +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_native + +try: + from urllib import quote_plus # Python 2.X +except ImportError: + from urllib.parse import quote_plus # Python 3+ + +import traceback + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + + +def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'): + url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path) + headers = {} + if access_token: + headers['Authorization'] = "Bearer %s" % access_token + else: + headers['Private-Token'] = private_token + + headers['Accept'] = "application/json" + headers['Content-Type'] = "application/json" + + response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method) + status = info['status'] + content = "" + if response: + content = response.read() + if status == 204: + return True, content + elif status == 200 or status == 201: + return True, json.loads(content) + else: + return False, str(status) + ": " + content + + +def findProject(gitlab_instance, identifier): + try: + project = gitlab_instance.projects.get(identifier) + except Exception as e: + current_user = gitlab_instance.user + try: + project = gitlab_instance.projects.get(current_user.username + '/' + identifier) + except Exception as e: + return None + + return project + + +def findGroup(gitlab_instance, identifier): + try: + project = gitlab_instance.groups.get(identifier) + except Exception as e: + return None + + return project + + +def gitlabAuthentication(module): + gitlab_url = module.params['api_url'] + validate_certs = module.params['validate_certs'] + gitlab_user = module.params['api_username'] + gitlab_password = module.params['api_password'] + gitlab_token = module.params['api_token'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + try: + # python-gitlab library remove support for username/password authentication since 1.13.0 + # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0 + # This condition allow to still support older version of the python-gitlab library + if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"): + gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password, + private_token=gitlab_token, api_version=4) + else: + gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4) + + gitlab_instance.auth() + except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: + module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e)) + except (gitlab.exceptions.GitlabHttpError) as e: + module.fail_json(msg="Failed to connect to GitLab server: %s. \ + GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e)) + + return gitlab_instance diff --git a/plugins/module_utils/heroku.py b/plugins/module_utils/heroku.py new file mode 100644 index 0000000000..b6e89614f1 --- /dev/null +++ b/plugins/module_utils/heroku.py @@ -0,0 +1,41 @@ +# Copyright: (c) 2018, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback + +from ansible.module_utils.basic import env_fallback, missing_required_lib + +HAS_HEROKU = False +HEROKU_IMP_ERR = None +try: + import heroku3 + HAS_HEROKU = True +except ImportError: + HEROKU_IMP_ERR = traceback.format_exc() + + +class HerokuHelper(): + def __init__(self, module): + self.module = module + self.check_lib() + self.api_key = module.params["api_key"] + + def check_lib(self): + if not HAS_HEROKU: + self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR) + + @staticmethod + def heroku_argument_spec(): + return dict( + api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True)) + + def get_heroku_client(self): + client = heroku3.from_key(self.api_key) + + if not client.is_authenticated: + self.module.fail_json(msg='Heroku authentication failure, please check your API Key') + + return client diff --git a/plugins/module_utils/hetzner.py b/plugins/module_utils/hetzner.py new file mode 100644 index 0000000000..2bc3d1666a --- /dev/null +++ b/plugins/module_utils/hetzner.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Felix Fontein , 2019 +# +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlencode + +import time + + +HETZNER_DEFAULT_ARGUMENT_SPEC = dict( + hetzner_user=dict(type='str', required=True), + hetzner_password=dict(type='str', required=True, no_log=True), +) + +# The API endpoint is fixed. +BASE_URL = "https://robot-ws.your-server.de" + + +def fetch_url_json(module, url, method='GET', timeout=10, data=None, headers=None, accept_errors=None): + ''' + Make general request to Hetzner's JSON robot API. + ''' + module.params['url_username'] = module.params['hetzner_user'] + module.params['url_password'] = module.params['hetzner_password'] + resp, info = fetch_url(module, url, method=method, timeout=timeout, data=data, headers=headers) + try: + content = resp.read() + except AttributeError: + content = info.pop('body', None) + + if not content: + module.fail_json(msg='Cannot retrieve content from {0}'.format(url)) + + try: + result = module.from_json(content.decode('utf8')) + if 'error' in result: + if accept_errors: + if result['error']['code'] in accept_errors: + return result, result['error']['code'] + module.fail_json(msg='Request failed: {0} {1} ({2})'.format( + result['error']['status'], + result['error']['code'], + result['error']['message'] + )) + return result, None + except ValueError: + module.fail_json(msg='Cannot decode content retrieved from {0}'.format(url)) + + +class CheckDoneTimeoutException(Exception): + def __init__(self, result, error): + super(CheckDoneTimeoutException, self).__init__() + self.result = result + self.error = error + + +def fetch_url_json_with_retries(module, url, check_done_callback, check_done_delay=10, check_done_timeout=180, skip_first=False, **kwargs): + ''' + Make general request to Hetzner's JSON robot API, with retries until a condition is satisfied. + + The condition is tested by calling ``check_done_callback(result, error)``. If it is not satisfied, + it will be retried with delays ``check_done_delay`` (in seconds) until a total timeout of + ``check_done_timeout`` (in seconds) since the time the first request is started is reached. + + If ``skip_first`` is specified, will assume that a first call has already been made and will + directly start with waiting. + ''' + start_time = time.time() + if not skip_first: + result, error = fetch_url_json(module, url, **kwargs) + if check_done_callback(result, error): + return result, error + while True: + elapsed = (time.time() - start_time) + left_time = check_done_timeout - elapsed + time.sleep(max(min(check_done_delay, left_time), 0)) + result, error = fetch_url_json(module, url, **kwargs) + if check_done_callback(result, error): + return result, error + if left_time < check_done_delay: + raise CheckDoneTimeoutException(result, error) + + +# ##################################################################################### +# ## FAILOVER IP ###################################################################### + +def get_failover_record(module, ip): + ''' + Get information record of failover IP. + + See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip + ''' + url = "{0}/failover/{1}".format(BASE_URL, ip) + result, error = fetch_url_json(module, url) + if 'failover' not in result: + module.fail_json(msg='Cannot interpret result: {0}'.format(result)) + return result['failover'] + + +def get_failover(module, ip): + ''' + Get current routing target of failover IP. + + The value ``None`` represents unrouted. + + See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip + ''' + return get_failover_record(module, ip)['active_server_ip'] + + +def set_failover(module, ip, value, timeout=180): + ''' + Set current routing target of failover IP. + + Return a pair ``(value, changed)``. The value ``None`` for ``value`` represents unrouted. + + See https://robot.your-server.de/doc/webservice/en.html#post-failover-failover-ip + and https://robot.your-server.de/doc/webservice/en.html#delete-failover-failover-ip + ''' + url = "{0}/failover/{1}".format(BASE_URL, ip) + if value is None: + result, error = fetch_url_json( + module, + url, + method='DELETE', + timeout=timeout, + accept_errors=['FAILOVER_ALREADY_ROUTED'] + ) + else: + headers = {"Content-type": "application/x-www-form-urlencoded"} + data = dict( + active_server_ip=value, + ) + result, error = fetch_url_json( + module, + url, + method='POST', + timeout=timeout, + data=urlencode(data), + headers=headers, + accept_errors=['FAILOVER_ALREADY_ROUTED'] + ) + if error is not None: + return value, False + else: + return result['failover']['active_server_ip'], True + + +def get_failover_state(value): + ''' + Create result dictionary for failover IP's value. + + The value ``None`` represents unrouted. + ''' + return dict( + value=value, + state='routed' if value else 'unrouted' + ) diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py new file mode 100644 index 0000000000..52b5cb5cf8 --- /dev/null +++ b/plugins/module_utils/hwc_utils.py @@ -0,0 +1,438 @@ +# Copyright (c), Google Inc, 2017 +# Simplified BSD License (see licenses/simplified_bsd.txt or +# https://opensource.org/licenses/BSD-2-Clause) + +import re +import time +import traceback + +THIRD_LIBRARIES_IMP_ERR = None +try: + from keystoneauth1.adapter import Adapter + from keystoneauth1.identity import v3 + from keystoneauth1 import session + HAS_THIRD_LIBRARIES = True +except ImportError: + THIRD_LIBRARIES_IMP_ERR = traceback.format_exc() + HAS_THIRD_LIBRARIES = False + +from ansible.module_utils.basic import (AnsibleModule, env_fallback, + missing_required_lib) +from ansible.module_utils._text import to_text + + +class HwcModuleException(Exception): + def __init__(self, message): + super(HwcModuleException, self).__init__() + + self._message = message + + def __str__(self): + return "[HwcClientException] message=%s" % self._message + + +class HwcClientException(Exception): + def __init__(self, code, message): + super(HwcClientException, self).__init__() + + self._code = code + self._message = message + + def __str__(self): + msg = " code=%s," % str(self._code) if self._code != 0 else "" + return "[HwcClientException]%s message=%s" % ( + msg, self._message) + + +class HwcClientException404(HwcClientException): + def __init__(self, message): + super(HwcClientException404, self).__init__(404, message) + + def __str__(self): + return "[HwcClientException404] message=%s" % self._message + + +def session_method_wrapper(f): + def _wrap(self, url, *args, **kwargs): + try: + url = self.endpoint + url + r = f(self, url, *args, **kwargs) + except Exception as ex: + raise HwcClientException( + 0, "Sending request failed, error=%s" % ex) + + result = None + if r.content: + try: + result = r.json() + except Exception as ex: + raise HwcClientException( + 0, "Parsing response to json failed, error: %s" % ex) + + code = r.status_code + if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]: + msg = "" + for i in ['message', 'error.message']: + try: + msg = navigate_value(result, i) + break + except Exception: + pass + else: + msg = str(result) + + if code == 404: + raise HwcClientException404(msg) + + raise HwcClientException(code, msg) + + return result + + return _wrap + + +class _ServiceClient(object): + def __init__(self, client, endpoint, product): + self._client = client + self._endpoint = endpoint + self._default_header = { + 'User-Agent': "Huawei-Ansible-MM-%s" % product, + 'Accept': 'application/json', + } + + @property + def endpoint(self): + return self._endpoint + + @endpoint.setter + def endpoint(self, e): + self._endpoint = e + + @session_method_wrapper + def get(self, url, body=None, header=None, timeout=None): + return self._client.get(url, json=body, timeout=timeout, + headers=self._header(header)) + + @session_method_wrapper + def post(self, url, body=None, header=None, timeout=None): + return self._client.post(url, json=body, timeout=timeout, + headers=self._header(header)) + + @session_method_wrapper + def delete(self, url, body=None, header=None, timeout=None): + return self._client.delete(url, json=body, timeout=timeout, + headers=self._header(header)) + + @session_method_wrapper + def put(self, url, body=None, header=None, timeout=None): + return self._client.put(url, json=body, timeout=timeout, + headers=self._header(header)) + + def _header(self, header): + if header and isinstance(header, dict): + for k, v in self._default_header.items(): + if k not in header: + header[k] = v + else: + header = self._default_header + + return header + + +class Config(object): + def __init__(self, module, product): + self._project_client = None + self._domain_client = None + self._module = module + self._product = product + self._endpoints = {} + + self._validate() + self._gen_provider_client() + + @property + def module(self): + return self._module + + def client(self, region, service_type, service_level): + c = self._project_client + if service_level == "domain": + c = self._domain_client + + e = self._get_service_endpoint(c, service_type, region) + + return _ServiceClient(c, e, self._product) + + def _gen_provider_client(self): + m = self._module + p = { + "auth_url": m.params['identity_endpoint'], + "password": m.params['password'], + "username": m.params['user'], + "project_name": m.params['project'], + "user_domain_name": m.params['domain'], + "reauthenticate": True + } + + self._project_client = Adapter( + session.Session(auth=v3.Password(**p)), + raise_exc=False) + + p.pop("project_name") + self._domain_client = Adapter( + session.Session(auth=v3.Password(**p)), + raise_exc=False) + + def _get_service_endpoint(self, client, service_type, region): + k = "%s.%s" % (service_type, region if region else "") + + if k in self._endpoints: + return self._endpoints.get(k) + + url = None + try: + url = client.get_endpoint(service_type=service_type, + region_name=region, interface="public") + except Exception as ex: + raise HwcClientException( + 0, "Getting endpoint failed, error=%s" % ex) + + if url == "": + raise HwcClientException( + 0, "Can not find the enpoint for %s" % service_type) + + if url[-1] != "/": + url += "/" + + self._endpoints[k] = url + return url + + def _validate(self): + if not HAS_THIRD_LIBRARIES: + self.module.fail_json( + msg=missing_required_lib('keystoneauth1'), + exception=THIRD_LIBRARIES_IMP_ERR) + + +class HwcModule(AnsibleModule): + def __init__(self, *args, **kwargs): + arg_spec = kwargs.setdefault('argument_spec', {}) + + arg_spec.update( + dict( + identity_endpoint=dict( + required=True, type='str', + fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']), + ), + user=dict( + required=True, type='str', + fallback=(env_fallback, ['ANSIBLE_HWC_USER']), + ), + password=dict( + required=True, type='str', no_log=True, + fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']), + ), + domain=dict( + required=True, type='str', + fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']), + ), + project=dict( + required=True, type='str', + fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']), + ), + region=dict( + type='str', + fallback=(env_fallback, ['ANSIBLE_HWC_REGION']), + ), + id=dict(type='str') + ) + ) + + super(HwcModule, self).__init__(*args, **kwargs) + + +class _DictComparison(object): + ''' This class takes in two dictionaries `a` and `b`. + These are dictionaries of arbitrary depth, but made up of standard + Python types only. + This differ will compare all values in `a` to those in `b`. + If value in `a` is None, always returns True, indicating + this value is no need to compare. + Note: On all lists, order does matter. + ''' + + def __init__(self, request): + self.request = request + + def __eq__(self, other): + return self._compare_dicts(self.request, other.request) + + def __ne__(self, other): + return not self.__eq__(other) + + def _compare_dicts(self, dict1, dict2): + if dict1 is None: + return True + + if set(dict1.keys()) != set(dict2.keys()): + return False + + for k in dict1: + if not self._compare_value(dict1.get(k), dict2.get(k)): + return False + + return True + + def _compare_lists(self, list1, list2): + """Takes in two lists and compares them.""" + if list1 is None: + return True + + if len(list1) != len(list2): + return False + + for i in range(len(list1)): + if not self._compare_value(list1[i], list2[i]): + return False + + return True + + def _compare_value(self, value1, value2): + """ + return: True: value1 is same as value2, otherwise False. + """ + if value1 is None: + return True + + if not (value1 and value2): + return (not value1) and (not value2) + + # Can assume non-None types at this point. + if isinstance(value1, list) and isinstance(value2, list): + return self._compare_lists(value1, value2) + + elif isinstance(value1, dict) and isinstance(value2, dict): + return self._compare_dicts(value1, value2) + + # Always use to_text values to avoid unicode issues. + return (to_text(value1, errors='surrogate_or_strict') == to_text( + value2, errors='surrogate_or_strict')) + + +def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): + is_last_time = False + not_found_times = 0 + wait = 0 + + time.sleep(delay) + + end = time.time() + timeout + while not is_last_time: + if time.time() > end: + is_last_time = True + + obj, status = refresh() + + if obj is None: + not_found_times += 1 + + if not_found_times > 10: + raise HwcModuleException( + "not found the object for %d times" % not_found_times) + else: + not_found_times = 0 + + if status in target: + return obj + + if pending and status not in pending: + raise HwcModuleException( + "unexpect status(%s) occured" % status) + + if not is_last_time: + wait *= 2 + if wait < min_interval: + wait = min_interval + elif wait > 10: + wait = 10 + + time.sleep(wait) + + raise HwcModuleException("asycn wait timeout after %d seconds" % timeout) + + +def navigate_value(data, index, array_index=None): + if array_index and (not isinstance(array_index, dict)): + raise HwcModuleException("array_index must be dict") + + d = data + for n in range(len(index)): + if d is None: + return None + + if not isinstance(d, dict): + raise HwcModuleException( + "can't navigate value from a non-dict object") + + i = index[n] + if i not in d: + raise HwcModuleException( + "navigate value failed: key(%s) is not exist in dict" % i) + d = d[i] + + if not array_index: + continue + + k = ".".join(index[: (n + 1)]) + if k not in array_index: + continue + + if d is None: + return None + + if not isinstance(d, list): + raise HwcModuleException( + "can't navigate value from a non-list object") + + j = array_index.get(k) + if j >= len(d): + raise HwcModuleException( + "navigate value failed: the index is out of list") + d = d[j] + + return d + + +def build_path(module, path, kv=None): + if kv is None: + kv = dict() + + v = {} + for p in re.findall(r"{[^/]*}", path): + n = p[1:][:-1] + + if n in kv: + v[n] = str(kv[n]) + + else: + if n in module.params: + v[n] = str(module.params.get(n)) + else: + v[n] = "" + + return path.format(**v) + + +def get_region(module): + if module.params['region']: + return module.params['region'] + + return module.params['project'].split("_")[0] + + +def is_empty_value(v): + return (not v) + + +def are_different_dicts(dict1, dict2): + return _DictComparison(dict1) != _DictComparison(dict2) diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py new file mode 100644 index 0000000000..c3ab4103a9 --- /dev/null +++ b/plugins/module_utils/ibm_sa_utils.py @@ -0,0 +1,94 @@ +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback + +from functools import wraps +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import missing_required_lib + +PYXCLI_INSTALLED = True +PYXCLI_IMP_ERR = None +try: + from pyxcli import client, errors +except ImportError: + PYXCLI_IMP_ERR = traceback.format_exc() + PYXCLI_INSTALLED = False + +AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size', + 'domain', 'perf_class', 'vol', + 'iscsi_chap_name', 'iscsi_chap_secret', + 'cluster', 'host', 'lun', 'override', + 'fcaddress', 'iscsi_name', 'max_dms', + 'max_cgs', 'ldap_id', 'max_mirrors', + 'max_pools', 'max_volumes', 'hard_capacity', + 'soft_capacity'] + + +def xcli_wrapper(func): + """ Catch xcli errors and return a proper message""" + @wraps(func) + def wrapper(module, *args, **kwargs): + try: + return func(module, *args, **kwargs) + except errors.CommandExecutionError as e: + module.fail_json(msg=to_native(e)) + return wrapper + + +@xcli_wrapper +def connect_ssl(module): + endpoints = module.params['endpoints'] + username = module.params['username'] + password = module.params['password'] + if not (username and password and endpoints): + module.fail_json( + msg="Username, password or endpoints arguments " + "are missing from the module arguments") + + try: + return client.XCLIClient.connect_multiendpoint_ssl(username, + password, + endpoints) + except errors.CommandFailedConnectionError as e: + module.fail_json( + msg="Connection with Spectrum Accelerate system has " + "failed: {[0]}.".format(to_native(e))) + + +def spectrum_accelerate_spec(): + """ Return arguments spec for AnsibleModule """ + return dict( + endpoints=dict(required=True), + username=dict(required=True), + password=dict(no_log=True, required=True), + ) + + +@xcli_wrapper +def execute_pyxcli_command(module, xcli_command, xcli_client): + pyxcli_args = build_pyxcli_command(module.params) + getattr(xcli_client.cmd, xcli_command)(**(pyxcli_args)) + return True + + +def build_pyxcli_command(fields): + """ Builds the args for pyxcli using the exact args from ansible""" + pyxcli_args = {} + for field in fields: + if not fields[field]: + continue + if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '': + pyxcli_args[field] = fields[field] + return pyxcli_args + + +def is_pyxcli_installed(module): + if not PYXCLI_INSTALLED: + module.fail_json(msg=missing_required_lib('pyxcli'), + exception=PYXCLI_IMP_ERR) diff --git a/plugins/module_utils/identity/__init__.py b/plugins/module_utils/identity/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/identity/keycloak/__init__.py b/plugins/module_utils/identity/keycloak/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py new file mode 100644 index 0000000000..5cab048dc8 --- /dev/null +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -0,0 +1,480 @@ +# Copyright (c) 2017, Eike Frost +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json + +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils._text import to_native + +URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" +URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" +URL_CLIENTS = "{url}/admin/realms/{realm}/clients" +URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" +URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" + +URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}" +URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" +URL_GROUPS = "{url}/admin/realms/{realm}/groups" +URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" + + +def keycloak_argument_spec(): + """ + Returns argument_spec of options common to keycloak_*-modules + + :return: argument_spec dict + """ + return dict( + auth_keycloak_url=dict(type='str', aliases=['url'], required=True), + auth_client_id=dict(type='str', default='admin-cli'), + auth_realm=dict(type='str', required=True), + auth_client_secret=dict(type='str', default=None), + auth_username=dict(type='str', aliases=['username'], required=True), + auth_password=dict(type='str', aliases=['password'], required=True, no_log=True), + validate_certs=dict(type='bool', default=True) + ) + + +def camel(words): + return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:]) + + +class KeycloakError(Exception): + pass + + +def get_token(base_url, validate_certs, auth_realm, client_id, + auth_username, auth_password, client_secret): + auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) + temp_payload = { + 'grant_type': 'password', + 'client_id': client_id, + 'client_secret': client_secret, + 'username': auth_username, + 'password': auth_password, + } + # Remove empty items, for instance missing client_secret + payload = dict( + (k, v) for k, v in temp_payload.items() if v is not None) + try: + r = json.loads(to_native(open_url(auth_url, method='POST', + validate_certs=validate_certs, + data=urlencode(payload)).read())) + except ValueError as e: + raise KeycloakError( + 'API returned invalid JSON when trying to obtain access token from %s: %s' + % (auth_url, str(e))) + except Exception as e: + raise KeycloakError('Could not obtain access token from %s: %s' + % (auth_url, str(e))) + + try: + return { + 'Authorization': 'Bearer ' + r['access_token'], + 'Content-Type': 'application/json' + } + except KeyError: + raise KeycloakError( + 'Could not obtain access token from %s' % auth_url) + + +class KeycloakAPI(object): + """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which + is obtained through OpenID connect + """ + def __init__(self, module, connection_header): + self.module = module + self.baseurl = self.module.params.get('auth_keycloak_url') + self.validate_certs = self.module.params.get('validate_certs') + self.restheaders = connection_header + + def get_clients(self, realm='master', filter=None): + """ Obtains client representations for clients in a realm + + :param realm: realm to be queried + :param filter: if defined, only the client with clientId specified in the filter is returned + :return: list of dicts of client representations + """ + clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) + if filter is not None: + clientlist_url += '?clientId=%s' % filter + + try: + return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s' + % (realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s' + % (realm, str(e))) + + def get_client_by_clientid(self, client_id, realm='master'): + """ Get client representation by clientId + :param client_id: The clientId to be queried + :param realm: realm from which to obtain the client representation + :return: dict with a client representation or None if none matching exist + """ + r = self.get_clients(realm=realm, filter=client_id) + if len(r) > 0: + return r[0] + else: + return None + + def get_client_by_id(self, id, realm='master'): + """ Obtain client representation by id + + :param id: id (not clientId) of client to be queried + :param realm: client from this realm + :return: dict of client representation or None if none matching exist + """ + client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) + + try: + return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' + % (id, realm, str(e))) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s' + % (id, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' + % (id, realm, str(e))) + + def get_client_id(self, client_id, realm='master'): + """ Obtain id of client by client_id + + :param client_id: client_id of client to be queried + :param realm: client template from this realm + :return: id of client (usually a UUID) + """ + result = self.get_client_by_clientid(client_id, realm) + if isinstance(result, dict) and 'id' in result: + return result['id'] + else: + return None + + def update_client(self, id, clientrep, realm="master"): + """ Update an existing client + :param id: id (not clientId) of client to be updated in Keycloak + :param clientrep: corresponding (partial/full) client representation with updates + :param realm: realm the client is in + :return: HTTPResponse object on success + """ + client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) + + try: + return open_url(client_url, method='PUT', headers=self.restheaders, + data=json.dumps(clientrep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update client %s in realm %s: %s' + % (id, realm, str(e))) + + def create_client(self, clientrep, realm="master"): + """ Create a client in keycloak + :param clientrep: Client representation of client to be created. Must at least contain field clientId + :param realm: realm for client to be created + :return: HTTPResponse object on success + """ + client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) + + try: + return open_url(client_url, method='POST', headers=self.restheaders, + data=json.dumps(clientrep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create client %s in realm %s: %s' + % (clientrep['clientId'], realm, str(e))) + + def delete_client(self, id, realm="master"): + """ Delete a client from Keycloak + + :param id: id (not clientId) of client to be deleted + :param realm: realm of client to be deleted + :return: HTTPResponse object on success + """ + client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) + + try: + return open_url(client_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not delete client %s in realm %s: %s' + % (id, realm, str(e))) + + def get_client_templates(self, realm='master'): + """ Obtains client template representations for client templates in a realm + + :param realm: realm to be queried + :return: list of dicts of client representations + """ + url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) + + try: + return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s' + % (realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s' + % (realm, str(e))) + + def get_client_template_by_id(self, id, realm='master'): + """ Obtain client template representation by id + + :param id: id (not name) of client template to be queried + :param realm: client template from this realm + :return: dict of client template representation or None if none matching exist + """ + url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm) + + try: + return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s' + % (id, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s' + % (id, realm, str(e))) + + def get_client_template_by_name(self, name, realm='master'): + """ Obtain client template representation by name + + :param name: name of client template to be queried + :param realm: client template from this realm + :return: dict of client template representation or None if none matching exist + """ + result = self.get_client_templates(realm) + if isinstance(result, list): + result = [x for x in result if x['name'] == name] + if len(result) > 0: + return result[0] + return None + + def get_client_template_id(self, name, realm='master'): + """ Obtain client template id by name + + :param name: name of client template to be queried + :param realm: client template from this realm + :return: client template id (usually a UUID) + """ + result = self.get_client_template_by_name(name, realm) + if isinstance(result, dict) and 'id' in result: + return result['id'] + else: + return None + + def update_client_template(self, id, clienttrep, realm="master"): + """ Update an existing client template + :param id: id (not name) of client template to be updated in Keycloak + :param clienttrep: corresponding (partial/full) client template representation with updates + :param realm: realm the client template is in + :return: HTTPResponse object on success + """ + url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) + + try: + return open_url(url, method='PUT', headers=self.restheaders, + data=json.dumps(clienttrep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update client template %s in realm %s: %s' + % (id, realm, str(e))) + + def create_client_template(self, clienttrep, realm="master"): + """ Create a client in keycloak + :param clienttrep: Client template representation of client template to be created. Must at least contain field name + :param realm: realm for client template to be created in + :return: HTTPResponse object on success + """ + url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) + + try: + return open_url(url, method='POST', headers=self.restheaders, + data=json.dumps(clienttrep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create client template %s in realm %s: %s' + % (clienttrep['clientId'], realm, str(e))) + + def delete_client_template(self, id, realm="master"): + """ Delete a client template from Keycloak + + :param id: id (not name) of client to be deleted + :param realm: realm of client template to be deleted + :return: HTTPResponse object on success + """ + url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) + + try: + return open_url(url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not delete client template %s in realm %s: %s' + % (id, realm, str(e))) + + def get_groups(self, realm="master"): + """ Fetch the name and ID of all groups on the Keycloak server. + + To fetch the full data of the group, make a subsequent call to + get_group_by_groupid, passing in the ID of the group you wish to return. + + :param realm: Return the groups of this realm (default "master"). + """ + groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s" + % (realm, str(e))) + + def get_group_by_groupid(self, gid, realm="master"): + """ Fetch a keycloak group from the provided realm using the group's unique ID. + + If the group does not exist, None is returned. + + gid is a UUID provided by the Keycloak API + :param gid: UUID of the group to be returned + :param realm: Realm in which the group resides; default 'master'. + """ + groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid) + try: + return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" + % (gid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" + % (gid, realm, str(e))) + + def get_group_by_name(self, name, realm="master"): + """ Fetch a keycloak group within a realm based on its name. + + The Keycloak API does not allow filtering of the Groups resource by name. + As a result, this method first retrieves the entire list of groups - name and ID - + then performs a second query to fetch the group. + + If the group does not exist, None is returned. + :param name: Name of the group to fetch. + :param realm: Realm in which the group resides; default 'master' + """ + groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) + try: + all_groups = self.get_groups(realm=realm) + + for group in all_groups: + if group['name'] == name: + return self.get_group_by_groupid(group['id'], realm=realm) + + return None + + except Exception as e: + self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" + % (name, realm, str(e))) + + def create_group(self, grouprep, realm="master"): + """ Create a Keycloak group. + + :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) + try: + return open_url(groups_url, method='POST', headers=self.restheaders, + data=json.dumps(grouprep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not create group %s in realm %s: %s" + % (grouprep['name'], realm, str(e))) + + def update_group(self, grouprep, realm="master"): + """ Update an existing group. + + :param grouprep: A GroupRepresentation of the updated group. + :return HTTPResponse object on success + """ + group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id']) + + try: + return open_url(group_url, method='PUT', headers=self.restheaders, + data=json.dumps(grouprep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update group %s in realm %s: %s' + % (grouprep['name'], realm, str(e))) + + def delete_group(self, name=None, groupid=None, realm="master"): + """ Delete a group. One of name or groupid must be provided. + + Providing the group ID is preferred as it avoids a second lookup to + convert a group name to an ID. + + :param name: The name of the group. A lookup will be performed to retrieve the group ID. + :param groupid: The ID of the group (preferred to name). + :param realm: The realm in which this group resides, default "master". + """ + + if groupid is None and name is None: + # prefer an exception since this is almost certainly a programming error in the module itself. + raise Exception("Unable to delete group - one of group ID or name must be provided.") + + # only lookup the name if groupid isn't provided. + # in the case that both are provided, prefer the ID, since it's one + # less lookup. + if groupid is None and name is not None: + for group in self.get_groups(realm=realm): + if group['name'] == name: + groupid = group['id'] + break + + # if the group doesn't exist - no problem, nothing to delete. + if groupid is None: + return None + + # should have a good groupid by here. + group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl) + try: + return open_url(group_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + + except Exception as e: + self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) diff --git a/plugins/module_utils/infinibox.py b/plugins/module_utils/infinibox.py new file mode 100644 index 0000000000..57ee89ec2c --- /dev/null +++ b/plugins/module_utils/infinibox.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Gregory Shulov ,2016 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +HAS_INFINISDK = True +try: + from infinisdk import InfiniBox, core +except ImportError: + HAS_INFINISDK = False + +from functools import wraps +from os import environ +from os import path + + +def api_wrapper(func): + """ Catch API Errors Decorator""" + @wraps(func) + def __wrapper(*args, **kwargs): + module = args[0] + try: + return func(*args, **kwargs) + except core.exceptions.APICommandException as e: + module.fail_json(msg=e.message) + except core.exceptions.SystemNotFoundException as e: + module.fail_json(msg=e.message) + except Exception: + raise + return __wrapper + + +@api_wrapper +def get_system(module): + """Return System Object or Fail""" + box = module.params['system'] + user = module.params.get('user', None) + password = module.params.get('password', None) + + if user and password: + system = InfiniBox(box, auth=(user, password)) + elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'): + system = InfiniBox(box, auth=(environ.get('INFINIBOX_USER'), environ.get('INFINIBOX_PASSWORD'))) + elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'): + system = InfiniBox(box) + else: + module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments") + + try: + system.login() + except Exception: + module.fail_json(msg="Infinibox authentication failed. Check your credentials") + return system + + +def infinibox_argument_spec(): + """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" + + return dict( + system=dict(required=True), + user=dict(), + password=dict(no_log=True), + ) + + +def infinibox_required_together(): + """Return the default list used for the required_together argument to AnsibleModule""" + return [['user', 'password']] diff --git a/plugins/module_utils/influxdb.py b/plugins/module_utils/influxdb.py new file mode 100644 index 0000000000..0bdd4e6cdd --- /dev/null +++ b/plugins/module_utils/influxdb.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback + +from ansible.module_utils.basic import missing_required_lib + +REQUESTS_IMP_ERR = None +try: + import requests.exceptions + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +INFLUXDB_IMP_ERR = None +try: + from influxdb import InfluxDBClient + from influxdb import __version__ as influxdb_version + from influxdb import exceptions + HAS_INFLUXDB = True +except ImportError: + INFLUXDB_IMP_ERR = traceback.format_exc() + HAS_INFLUXDB = False + + +class InfluxDb(): + def __init__(self, module): + self.module = module + self.params = self.module.params + self.check_lib() + self.hostname = self.params['hostname'] + self.port = self.params['port'] + self.path = self.params['path'] + self.username = self.params['username'] + self.password = self.params['password'] + self.database_name = self.params.get('database_name') + + def check_lib(self): + if not HAS_REQUESTS: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + + if not HAS_INFLUXDB: + self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR) + + @staticmethod + def influxdb_argument_spec(): + return dict( + hostname=dict(type='str', default='localhost'), + port=dict(type='int', default=8086), + path=dict(type='str', default=''), + username=dict(type='str', default='root', aliases=['login_username']), + password=dict(type='str', default='root', no_log=True, aliases=['login_password']), + ssl=dict(type='bool', default=False), + validate_certs=dict(type='bool', default=True), + timeout=dict(type='int'), + retries=dict(type='int', default=3), + proxies=dict(type='dict', default={}), + use_udp=dict(type='bool', default=False), + udp_port=dict(type='int', default=4444), + ) + + def connect_to_influxdb(self): + args = dict( + host=self.hostname, + port=self.port, + path=self.path, + username=self.username, + password=self.password, + database=self.database_name, + ssl=self.params['ssl'], + verify_ssl=self.params['validate_certs'], + timeout=self.params['timeout'], + use_udp=self.params['use_udp'], + udp_port=self.params['udp_port'], + proxies=self.params['proxies'], + ) + influxdb_api_version = tuple(influxdb_version.split(".")) + if influxdb_api_version >= ('4', '1', '0'): + # retries option is added in version 4.1.0 + args.update(retries=self.params['retries']) + + return InfluxDBClient(**args) diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py new file mode 100644 index 0000000000..c834c873f8 --- /dev/null +++ b/plugins/module_utils/ipa.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2016 Thomas Krahn (@Nosmoht) +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import json +import os +import socket +import uuid + +import re +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.six import PY3 +from ansible.module_utils.six.moves.urllib.parse import quote +from ansible.module_utils.urls import fetch_url, HAS_GSSAPI +from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound + + +def _env_then_dns_fallback(*args, **kwargs): + ''' Load value from environment or DNS in that order''' + try: + return env_fallback(*args, **kwargs) + except AnsibleFallbackNotFound: + # If no host was given, we try to guess it from IPA. + # The ipa-ca entry is a standard entry that IPA will have set for + # the CA. + try: + return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0] + except Exception: + raise AnsibleFallbackNotFound + + +class IPAClient(object): + def __init__(self, module, host, port, protocol): + self.host = host + self.port = port + self.protocol = protocol + self.module = module + self.headers = None + self.timeout = module.params.get('ipa_timeout') + self.use_gssapi = False + + def get_base_url(self): + return '%s://%s/ipa' % (self.protocol, self.host) + + def get_json_url(self): + return '%s/session/json' % self.get_base_url() + + def login(self, username, password): + if 'KRB5CCNAME' in os.environ and HAS_GSSAPI: + self.use_gssapi = True + elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI: + ccache = "MEMORY:" + str(uuid.uuid4()) + os.environ['KRB5CCNAME'] = ccache + self.use_gssapi = True + else: + if not password: + if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ: + self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'") + self._fail('login', 'Password is required if not using ' + 'GSSAPI. To use GSSAPI, please set the ' + 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) ' + ' environment variables.') + url = '%s/session/login_password' % self.get_base_url() + data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe='')) + headers = {'referer': self.get_base_url(), + 'Content-Type': 'application/x-www-form-urlencoded', + 'Accept': 'text/plain'} + try: + resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout) + status_code = info['status'] + if status_code not in [200, 201, 204]: + self._fail('login', info['msg']) + + self.headers = {'Cookie': resp.info().get('Set-Cookie')} + except Exception as e: + self._fail('login', to_native(e)) + if not self.headers: + self.headers = dict() + self.headers.update({ + 'referer': self.get_base_url(), + 'Content-Type': 'application/json', + 'Accept': 'application/json'}) + + def _fail(self, msg, e): + if 'message' in e: + err_string = e.get('message') + else: + err_string = e + self.module.fail_json(msg='%s: %s' % (msg, err_string)) + + def get_ipa_version(self): + response = self.ping()['summary'] + ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*') + version_match = ipa_ver_regex.match(response) + ipa_version = None + if version_match: + ipa_version = version_match.groups()[0] + return ipa_version + + def ping(self): + return self._post_json(method='ping', name=None) + + def _post_json(self, method, name, item=None): + if item is None: + item = {} + url = '%s/session/json' % self.get_base_url() + data = dict(method=method) + + # TODO: We should probably handle this a little better. + if method in ('ping', 'config_show'): + data['params'] = [[], {}] + elif method == 'config_mod': + data['params'] = [[], item] + else: + data['params'] = [[name], item] + + try: + resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)), + headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi) + status_code = info['status'] + if status_code not in [200, 201, 204]: + self._fail(method, info['msg']) + except Exception as e: + self._fail('post %s' % method, to_native(e)) + + if PY3: + charset = resp.headers.get_content_charset('latin-1') + else: + response_charset = resp.headers.getparam('charset') + if response_charset: + charset = response_charset + else: + charset = 'latin-1' + resp = json.loads(to_text(resp.read(), encoding=charset), encoding=charset) + err = resp.get('error') + if err is not None: + self._fail('response %s' % method, err) + + if 'result' in resp: + result = resp.get('result') + if 'result' in result: + result = result.get('result') + if isinstance(result, list): + if len(result) > 0: + return result[0] + else: + return {} + return result + return None + + def get_diff(self, ipa_data, module_data): + result = [] + for key in module_data.keys(): + mod_value = module_data.get(key, None) + if isinstance(mod_value, list): + default = [] + else: + default = None + ipa_value = ipa_data.get(key, default) + if isinstance(ipa_value, list) and not isinstance(mod_value, list): + mod_value = [mod_value] + if isinstance(ipa_value, list) and isinstance(mod_value, list): + mod_value = sorted(mod_value) + ipa_value = sorted(ipa_value) + if mod_value != ipa_value: + result.append(key) + return result + + def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None): + changed = False + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not self.module.check_mode: + if item: + remove_method(name=name, item={item: diff}) + else: + remove_method(name=name, item=diff) + + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not self.module.check_mode: + if item: + add_method(name=name, item={item: diff}) + else: + add_method(name=name, item=diff) + + return changed + + +def ipa_argument_spec(): + return dict( + ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])), + ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])), + ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])), + ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])), + ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])), + ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])), + validate_certs=dict(type='bool', default=True), + ) diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py new file mode 100644 index 0000000000..42f067f225 --- /dev/null +++ b/plugins/module_utils/known_hosts.py @@ -0,0 +1,195 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import hmac +import re + +from ansible.module_utils.six.moves.urllib.parse import urlparse + +try: + from hashlib import sha1 +except ImportError: + import sha as sha1 + +HASHED_KEY_MAGIC = "|1|" + + +def is_ssh_url(url): + + """ check if url is ssh """ + + if "@" in url and "://" not in url: + return True + for scheme in "ssh://", "git+ssh://", "ssh+git://": + if url.startswith(scheme): + return True + return False + + +def get_fqdn_and_port(repo_url): + + """ chop the hostname and port out of a url """ + + fqdn = None + port = None + ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?') + if "@" in repo_url and "://" not in repo_url: + # most likely an user@host:path or user@host/path type URL + repo_url = repo_url.split("@", 1)[1] + match = ipv6_re.match(repo_url) + # For this type of URL, colon specifies the path, not the port + if match: + fqdn, path = match.groups() + elif ":" in repo_url: + fqdn = repo_url.split(":")[0] + elif "/" in repo_url: + fqdn = repo_url.split("/")[0] + elif "://" in repo_url: + # this should be something we can parse with urlparse + parts = urlparse(repo_url) + # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so + # ensure we actually have a parts[1] before continuing. + if parts[1] != '': + fqdn = parts[1] + if "@" in fqdn: + fqdn = fqdn.split("@", 1)[1] + match = ipv6_re.match(fqdn) + if match: + fqdn, port = match.groups() + elif ":" in fqdn: + fqdn, port = fqdn.split(":")[0:2] + return fqdn, port + + +def check_hostkey(module, fqdn): + return not not_in_host_file(module, fqdn) + + +# this is a variant of code found in connection_plugins/paramiko.py and we should modify +# the paramiko code to import and use this. + +def not_in_host_file(self, host): + + if 'USER' in os.environ: + user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") + else: + user_host_file = "~/.ssh/known_hosts" + user_host_file = os.path.expanduser(user_host_file) + + host_file_list = [] + host_file_list.append(user_host_file) + host_file_list.append("/etc/ssh/ssh_known_hosts") + host_file_list.append("/etc/ssh/ssh_known_hosts2") + host_file_list.append("/etc/openssh/ssh_known_hosts") + + hfiles_not_found = 0 + for hf in host_file_list: + if not os.path.exists(hf): + hfiles_not_found += 1 + continue + + try: + host_fh = open(hf) + except IOError: + hfiles_not_found += 1 + continue + else: + data = host_fh.read() + host_fh.close() + + for line in data.split("\n"): + if line is None or " " not in line: + continue + tokens = line.split() + if tokens[0].find(HASHED_KEY_MAGIC) == 0: + # this is a hashed known host entry + try: + (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2) + hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) + hash.update(host) + if hash.digest() == kn_host.decode('base64'): + return False + except Exception: + # invalid hashed host key, skip it + continue + else: + # standard host file entry + if host in tokens[0]: + return False + + return True + + +def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): + + """ use ssh-keyscan to add the hostkey """ + + keyscan_cmd = module.get_bin_path('ssh-keyscan', True) + + if 'USER' in os.environ: + user_ssh_dir = os.path.expandvars("~${USER}/.ssh/") + user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") + else: + user_ssh_dir = "~/.ssh/" + user_host_file = "~/.ssh/known_hosts" + user_ssh_dir = os.path.expanduser(user_ssh_dir) + + if not os.path.exists(user_ssh_dir): + if create_dir: + try: + os.makedirs(user_ssh_dir, int('700', 8)) + except Exception: + module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) + else: + module.fail_json(msg="%s does not exist" % user_ssh_dir) + elif not os.path.isdir(user_ssh_dir): + module.fail_json(msg="%s is not a directory" % user_ssh_dir) + + if port: + this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn) + else: + this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) + + rc, out, err = module.run_command(this_cmd) + # ssh-keyscan gives a 0 exit code and prints nothing on timeout + if rc != 0 or not out: + msg = 'failed to retrieve hostkey' + if not out: + msg += '. "%s" returned no matches.' % this_cmd + else: + msg += ' using command "%s". [stdout]: %s' % (this_cmd, out) + + if err: + msg += ' [stderr]: %s' % err + + module.fail_json(msg=msg) + + module.append_to_file(user_host_file, out) + + return rc, out, err diff --git a/plugins/module_utils/kubevirt.py b/plugins/module_utils/kubevirt.py new file mode 100644 index 0000000000..b47162b295 --- /dev/null +++ b/plugins/module_utils/kubevirt.py @@ -0,0 +1,462 @@ +# -*- coding: utf-8 -*- +# + +# Copyright (c) 2018, KubeVirt Team <@kubevirt> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from collections import defaultdict +from distutils.version import Version + +from ansible.module_utils.common import dict_transformations +from ansible.module_utils.common._collections_compat import Sequence +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import list_dict_str +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.raw import KubernetesRawModule + +import copy +import re + +MAX_SUPPORTED_API_VERSION = 'v1alpha3' +API_GROUP = 'kubevirt.io' + + +# Put all args that (can) modify 'spec:' here: +VM_SPEC_DEF_ARG_SPEC = { + 'resource_definition': { + 'type': 'dict', + 'aliases': ['definition', 'inline'] + }, + 'memory': {'type': 'str'}, + 'memory_limit': {'type': 'str'}, + 'cpu_cores': {'type': 'int'}, + 'disks': {'type': 'list'}, + 'labels': {'type': 'dict'}, + 'interfaces': {'type': 'list'}, + 'machine_type': {'type': 'str'}, + 'cloud_init_nocloud': {'type': 'dict'}, + 'bootloader': {'type': 'str'}, + 'smbios_uuid': {'type': 'str'}, + 'cpu_model': {'type': 'str'}, + 'headless': {'type': 'str'}, + 'hugepage_size': {'type': 'str'}, + 'tablets': {'type': 'list'}, + 'cpu_limit': {'type': 'int'}, + 'cpu_shares': {'type': 'int'}, + 'cpu_features': {'type': 'list'}, + 'affinity': {'type': 'dict'}, + 'anti_affinity': {'type': 'dict'}, + 'node_affinity': {'type': 'dict'}, +} +# And other common args go here: +VM_COMMON_ARG_SPEC = { + 'name': {'required': True}, + 'namespace': {'required': True}, + 'hostname': {'type': 'str'}, + 'subdomain': {'type': 'str'}, + 'state': { + 'default': 'present', + 'choices': ['present', 'absent'], + }, + 'force': { + 'type': 'bool', + 'default': False, + }, + 'merge_type': {'type': 'list', 'choices': ['json', 'merge', 'strategic-merge']}, + 'wait': {'type': 'bool', 'default': True}, + 'wait_timeout': {'type': 'int', 'default': 120}, + 'wait_sleep': {'type': 'int', 'default': 5}, +} +VM_COMMON_ARG_SPEC.update(VM_SPEC_DEF_ARG_SPEC) + + +def virtdict(): + """ + This function create dictionary, with defaults to dictionary. + """ + return defaultdict(virtdict) + + +class KubeAPIVersion(Version): + component_re = re.compile(r'(\d+ | [a-z]+)', re.VERBOSE) + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def parse(self, vstring): + self.vstring = vstring + components = [x for x in self.component_re.split(vstring) if x] + for i, obj in enumerate(components): + try: + components[i] = int(obj) + except ValueError: + pass + + errmsg = "version '{0}' does not conform to kubernetes api versioning guidelines".format(vstring) + c = components + + if len(c) not in (2, 4) or c[0] != 'v' or not isinstance(c[1], int): + raise ValueError(errmsg) + if len(c) == 4 and (c[2] not in ('alpha', 'beta') or not isinstance(c[3], int)): + raise ValueError(errmsg) + + self.version = components + + def __str__(self): + return self.vstring + + def __repr__(self): + return "KubeAPIVersion ('{0}')".format(str(self)) + + def _cmp(self, other): + if isinstance(other, str): + other = KubeAPIVersion(other) + + myver = self.version + otherver = other.version + + for ver in myver, otherver: + if len(ver) == 2: + ver.extend(['zeta', 9999]) + + if myver == otherver: + return 0 + if myver < otherver: + return -1 + if myver > otherver: + return 1 + + # python2 compatibility + def __cmp__(self, other): + return self._cmp(other) + + +class KubeVirtRawModule(KubernetesRawModule): + def __init__(self, *args, **kwargs): + super(KubeVirtRawModule, self).__init__(*args, **kwargs) + + @staticmethod + def merge_dicts(base_dict, merging_dicts): + """This function merges a base dictionary with one or more other dictionaries. + The base dictionary takes precedence when there is a key collision. + merging_dicts can be a dict or a list or tuple of dicts. In the latter case, the + dictionaries at the front of the list have higher precedence over the ones at the end. + """ + if not merging_dicts: + merging_dicts = ({},) + + if not isinstance(merging_dicts, Sequence): + merging_dicts = (merging_dicts,) + + new_dict = {} + for d in reversed(merging_dicts): + new_dict = dict_transformations.dict_merge(new_dict, d) + + new_dict = dict_transformations.dict_merge(new_dict, base_dict) + + return new_dict + + def get_resource(self, resource): + try: + existing = resource.get(name=self.name, namespace=self.namespace) + except Exception: + existing = None + + return existing + + def _define_datavolumes(self, datavolumes, spec): + """ + Takes datavoulmes parameter of Ansible and create kubevirt API datavolumesTemplateSpec + structure from it + """ + if not datavolumes: + return + + spec['dataVolumeTemplates'] = [] + for dv in datavolumes: + # Add datavolume to datavolumetemplates spec: + dvt = virtdict() + dvt['metadata']['name'] = dv.get('name') + dvt['spec']['pvc'] = { + 'accessModes': dv.get('pvc').get('accessModes'), + 'resources': { + 'requests': { + 'storage': dv.get('pvc').get('storage'), + } + } + } + dvt['spec']['source'] = dv.get('source') + spec['dataVolumeTemplates'].append(dvt) + + # Add datavolume to disks spec: + if not spec['template']['spec']['domain']['devices']['disks']: + spec['template']['spec']['domain']['devices']['disks'] = [] + + spec['template']['spec']['domain']['devices']['disks'].append( + { + 'name': dv.get('name'), + 'disk': dv.get('disk', {'bus': 'virtio'}), + } + ) + + # Add datavolume to volumes spec: + if not spec['template']['spec']['volumes']: + spec['template']['spec']['volumes'] = [] + + spec['template']['spec']['volumes'].append( + { + 'dataVolume': { + 'name': dv.get('name') + }, + 'name': dv.get('name'), + } + ) + + def _define_cloud_init(self, cloud_init_nocloud, template_spec): + """ + Takes the user's cloud_init_nocloud parameter and fill it in kubevirt + API strucuture. The name for disk is hardcoded to ansiblecloudinitdisk. + """ + if cloud_init_nocloud: + if not template_spec['volumes']: + template_spec['volumes'] = [] + if not template_spec['domain']['devices']['disks']: + template_spec['domain']['devices']['disks'] = [] + + template_spec['volumes'].append({'name': 'ansiblecloudinitdisk', 'cloudInitNoCloud': cloud_init_nocloud}) + template_spec['domain']['devices']['disks'].append({ + 'name': 'ansiblecloudinitdisk', + 'disk': {'bus': 'virtio'}, + }) + + def _define_interfaces(self, interfaces, template_spec, defaults): + """ + Takes interfaces parameter of Ansible and create kubevirt API interfaces + and networks strucutre out from it. + """ + if not interfaces and defaults and 'interfaces' in defaults: + interfaces = copy.deepcopy(defaults['interfaces']) + for d in interfaces: + d['network'] = defaults['networks'][0] + + if interfaces: + # Extract interfaces k8s specification from interfaces list passed to Ansible: + spec_interfaces = [] + for i in interfaces: + spec_interfaces.append( + self.merge_dicts(dict((k, v) for k, v in i.items() if k != 'network'), defaults['interfaces']) + ) + if 'interfaces' not in template_spec['domain']['devices']: + template_spec['domain']['devices']['interfaces'] = [] + template_spec['domain']['devices']['interfaces'].extend(spec_interfaces) + + # Extract networks k8s specification from interfaces list passed to Ansible: + spec_networks = [] + for i in interfaces: + net = i['network'] + net['name'] = i['name'] + spec_networks.append(self.merge_dicts(net, defaults['networks'])) + if 'networks' not in template_spec: + template_spec['networks'] = [] + template_spec['networks'].extend(spec_networks) + + def _define_disks(self, disks, template_spec, defaults): + """ + Takes disks parameter of Ansible and create kubevirt API disks and + volumes strucutre out from it. + """ + if not disks and defaults and 'disks' in defaults: + disks = copy.deepcopy(defaults['disks']) + for d in disks: + d['volume'] = defaults['volumes'][0] + + if disks: + # Extract k8s specification from disks list passed to Ansible: + spec_disks = [] + for d in disks: + spec_disks.append( + self.merge_dicts(dict((k, v) for k, v in d.items() if k != 'volume'), defaults['disks']) + ) + if 'disks' not in template_spec['domain']['devices']: + template_spec['domain']['devices']['disks'] = [] + template_spec['domain']['devices']['disks'].extend(spec_disks) + + # Extract volumes k8s specification from disks list passed to Ansible: + spec_volumes = [] + for d in disks: + volume = d['volume'] + volume['name'] = d['name'] + spec_volumes.append(self.merge_dicts(volume, defaults['volumes'])) + if 'volumes' not in template_spec: + template_spec['volumes'] = [] + template_spec['volumes'].extend(spec_volumes) + + def find_supported_resource(self, kind): + results = self.client.resources.search(kind=kind, group=API_GROUP) + if not results: + self.fail('Failed to find resource {0} in {1}'.format(kind, API_GROUP)) + sr = sorted(results, key=lambda r: KubeAPIVersion(r.api_version), reverse=True) + for r in sr: + if KubeAPIVersion(r.api_version) <= KubeAPIVersion(MAX_SUPPORTED_API_VERSION): + return r + self.fail("API versions {0} are too recent. Max supported is {1}/{2}.".format( + str([r.api_version for r in sr]), API_GROUP, MAX_SUPPORTED_API_VERSION)) + + def _construct_vm_definition(self, kind, definition, template, params, defaults=None): + self.client = self.get_api_client() + + disks = params.get('disks', []) + memory = params.get('memory') + memory_limit = params.get('memory_limit') + cpu_cores = params.get('cpu_cores') + cpu_model = params.get('cpu_model') + cpu_features = params.get('cpu_features') + labels = params.get('labels') + datavolumes = params.get('datavolumes') + interfaces = params.get('interfaces') + bootloader = params.get('bootloader') + cloud_init_nocloud = params.get('cloud_init_nocloud') + machine_type = params.get('machine_type') + headless = params.get('headless') + smbios_uuid = params.get('smbios_uuid') + hugepage_size = params.get('hugepage_size') + tablets = params.get('tablets') + cpu_shares = params.get('cpu_shares') + cpu_limit = params.get('cpu_limit') + node_affinity = params.get('node_affinity') + vm_affinity = params.get('affinity') + vm_anti_affinity = params.get('anti_affinity') + hostname = params.get('hostname') + subdomain = params.get('subdomain') + template_spec = template['spec'] + + # Merge additional flat parameters: + if memory: + template_spec['domain']['resources']['requests']['memory'] = memory + + if cpu_shares: + template_spec['domain']['resources']['requests']['cpu'] = cpu_shares + + if cpu_limit: + template_spec['domain']['resources']['limits']['cpu'] = cpu_limit + + if tablets: + for tablet in tablets: + tablet['type'] = 'tablet' + template_spec['domain']['devices']['inputs'] = tablets + + if memory_limit: + template_spec['domain']['resources']['limits']['memory'] = memory_limit + + if hugepage_size is not None: + template_spec['domain']['memory']['hugepages']['pageSize'] = hugepage_size + + if cpu_features is not None: + template_spec['domain']['cpu']['features'] = cpu_features + + if cpu_cores is not None: + template_spec['domain']['cpu']['cores'] = cpu_cores + + if cpu_model: + template_spec['domain']['cpu']['model'] = cpu_model + + if labels: + template['metadata']['labels'] = self.merge_dicts(labels, template['metadata']['labels']) + + if machine_type: + template_spec['domain']['machine']['type'] = machine_type + + if bootloader: + template_spec['domain']['firmware']['bootloader'] = {bootloader: {}} + + if smbios_uuid: + template_spec['domain']['firmware']['uuid'] = smbios_uuid + + if headless is not None: + template_spec['domain']['devices']['autoattachGraphicsDevice'] = not headless + + if vm_affinity or vm_anti_affinity: + vms_affinity = vm_affinity or vm_anti_affinity + affinity_name = 'podAffinity' if vm_affinity else 'podAntiAffinity' + for affinity in vms_affinity.get('soft', []): + if not template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution']: + template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'] = [] + template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'].append({ + 'weight': affinity.get('weight'), + 'podAffinityTerm': { + 'labelSelector': { + 'matchExpressions': affinity.get('term').get('match_expressions'), + }, + 'topologyKey': affinity.get('topology_key'), + }, + }) + for affinity in vms_affinity.get('hard', []): + if not template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution']: + template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'] = [] + template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'].append({ + 'labelSelector': { + 'matchExpressions': affinity.get('term').get('match_expressions'), + }, + 'topologyKey': affinity.get('topology_key'), + }) + + if node_affinity: + for affinity in node_affinity.get('soft', []): + if not template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution']: + template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'] = [] + template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'].append({ + 'weight': affinity.get('weight'), + 'preference': { + 'matchExpressions': affinity.get('term').get('match_expressions'), + } + }) + for affinity in node_affinity.get('hard', []): + if not template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms']: + template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'] = [] + template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'].append({ + 'matchExpressions': affinity.get('term').get('match_expressions'), + }) + + if hostname: + template_spec['hostname'] = hostname + + if subdomain: + template_spec['subdomain'] = subdomain + + # Define disks + self._define_disks(disks, template_spec, defaults) + + # Define cloud init disk if defined: + # Note, that this must be called after _define_disks, so the cloud_init + # is not first in order and it's not used as boot disk: + self._define_cloud_init(cloud_init_nocloud, template_spec) + + # Define interfaces: + self._define_interfaces(interfaces, template_spec, defaults) + + # Define datavolumes: + self._define_datavolumes(datavolumes, definition['spec']) + + return self.merge_dicts(definition, self.resource_definitions[0]) + + def construct_vm_definition(self, kind, definition, template, defaults=None): + definition = self._construct_vm_definition(kind, definition, template, self.params, defaults) + resource = self.find_supported_resource(kind) + definition = self.set_defaults(resource, definition) + return resource, definition + + def construct_vm_template_definition(self, kind, definition, template, params): + definition = self._construct_vm_definition(kind, definition, template, params) + resource = self.find_resource(kind, definition['apiVersion'], fail=True) + + # Set defaults: + definition['kind'] = kind + definition['metadata']['name'] = params.get('name') + definition['metadata']['namespace'] = params.get('namespace') + + return resource, definition + + def execute_crud(self, kind, definition): + """ Module execution """ + resource = self.find_supported_resource(kind) + definition = self.set_defaults(resource, definition) + return self.perform_action(resource, definition) diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py new file mode 100644 index 0000000000..d49d0a97e8 --- /dev/null +++ b/plugins/module_utils/ldap.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Peter Sagerson +# Copyright: (c) 2016, Jiri Tyr +# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback +from ansible.module_utils._text import to_native + +try: + import ldap + import ldap.sasl + + HAS_LDAP = True +except ImportError: + HAS_LDAP = False + + +def gen_specs(**specs): + specs.update({ + 'bind_dn': dict(), + 'bind_pw': dict(default='', no_log=True), + 'dn': dict(required=True), + 'server_uri': dict(default='ldapi:///'), + 'start_tls': dict(default=False, type='bool'), + 'validate_certs': dict(default=True, type='bool'), + }) + + return specs + + +class LdapGeneric(object): + def __init__(self, module): + # Shortcuts + self.module = module + self.bind_dn = self.module.params['bind_dn'] + self.bind_pw = self.module.params['bind_pw'] + self.dn = self.module.params['dn'] + self.server_uri = self.module.params['server_uri'] + self.start_tls = self.module.params['start_tls'] + self.verify_cert = self.module.params['validate_certs'] + + # Establish connection + self.connection = self._connect_to_ldap() + + def fail(self, msg, exn): + self.module.fail_json( + msg=msg, + details=to_native(exn), + exception=traceback.format_exc() + ) + + def _connect_to_ldap(self): + if not self.verify_cert: + ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) + + connection = ldap.initialize(self.server_uri) + + if self.start_tls: + try: + connection.start_tls_s() + except ldap.LDAPError as e: + self.fail("Cannot start TLS.", e) + + try: + if self.bind_dn is not None: + connection.simple_bind_s(self.bind_dn, self.bind_pw) + else: + connection.sasl_interactive_bind_s('', ldap.sasl.external()) + except ldap.LDAPError as e: + self.fail("Cannot bind to the server.", e) + + return connection diff --git a/plugins/module_utils/linode.py b/plugins/module_utils/linode.py new file mode 100644 index 0000000000..a631f74b2f --- /dev/null +++ b/plugins/module_utils/linode.py @@ -0,0 +1,37 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Luke Murphy @decentral1se +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +def get_user_agent(module): + """Retrieve a user-agent to send with LinodeClient requests.""" + try: + from ansible.module_utils.ansible_release import __version__ as ansible_version + except ImportError: + ansible_version = 'unknown' + return 'Ansible-%s/%s' % (module, ansible_version) diff --git a/plugins/module_utils/lxd.py b/plugins/module_utils/lxd.py new file mode 100644 index 0000000000..c53c3a76ab --- /dev/null +++ b/plugins/module_utils/lxd.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- + +# (c) 2016, Hiroaki Nakamura +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import socket +import ssl + +from ansible.module_utils.urls import generic_urlparse +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.six.moves import http_client +from ansible.module_utils._text import to_text + +# httplib/http.client connection using unix domain socket +HTTPConnection = http_client.HTTPConnection +HTTPSConnection = http_client.HTTPSConnection + +import json + + +class UnixHTTPConnection(HTTPConnection): + def __init__(self, path): + HTTPConnection.__init__(self, 'localhost') + self.path = path + + def connect(self): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.path) + self.sock = sock + + +class LXDClientException(Exception): + def __init__(self, msg, **kwargs): + self.msg = msg + self.kwargs = kwargs + + +class LXDClient(object): + def __init__(self, url, key_file=None, cert_file=None, debug=False): + """LXD Client. + + :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1) + :type url: ``str`` + :param key_file: The path of the client certificate key file. + :type key_file: ``str`` + :param cert_file: The path of the client certificate file. + :type cert_file: ``str`` + :param debug: The debug flag. The request and response are stored in logs when debug is true. + :type debug: ``bool`` + """ + self.url = url + self.debug = debug + self.logs = [] + if url.startswith('https:'): + self.cert_file = cert_file + self.key_file = key_file + parts = generic_urlparse(urlparse(self.url)) + ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ctx.load_cert_chain(cert_file, keyfile=key_file) + self.connection = HTTPSConnection(parts.get('netloc'), context=ctx) + elif url.startswith('unix:'): + unix_socket_path = url[len('unix:'):] + self.connection = UnixHTTPConnection(unix_socket_path) + else: + raise LXDClientException('URL scheme must be unix: or https:') + + def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None): + resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout) + if resp_json['type'] == 'async': + url = '{0}/wait'.format(resp_json['operation']) + resp_json = self._send_request('GET', url) + if resp_json['metadata']['status'] != 'Success': + self._raise_err_from_json(resp_json) + return resp_json + + def authenticate(self, trust_password): + body_json = {'type': 'client', 'password': trust_password} + return self._send_request('POST', '/1.0/certificates', body_json=body_json) + + def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None): + try: + body = json.dumps(body_json) + self.connection.request(method, url, body=body) + resp = self.connection.getresponse() + resp_data = resp.read() + resp_data = to_text(resp_data, errors='surrogate_or_strict') + resp_json = json.loads(resp_data) + self.logs.append({ + 'type': 'sent request', + 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout}, + 'response': {'json': resp_json} + }) + resp_type = resp_json.get('type', None) + if resp_type == 'error': + if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes: + return resp_json + if resp_json['error'] == "Certificate already in trust store": + return resp_json + self._raise_err_from_json(resp_json) + return resp_json + except socket.error as e: + raise LXDClientException('cannot connect to the LXD server', err=e) + + def _raise_err_from_json(self, resp_json): + err_params = {} + if self.debug: + err_params['logs'] = self.logs + raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params) + + @staticmethod + def _get_err_from_resp_json(resp_json): + err = None + metadata = resp_json.get('metadata', None) + if metadata is not None: + err = metadata.get('err', None) + if err is None: + err = resp_json.get('error', None) + return err diff --git a/plugins/module_utils/manageiq.py b/plugins/module_utils/manageiq.py new file mode 100644 index 0000000000..36e130f895 --- /dev/null +++ b/plugins/module_utils/manageiq.py @@ -0,0 +1,170 @@ +# +# Copyright (c) 2017, Daniel Korn +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import os +import traceback + +from ansible.module_utils.basic import missing_required_lib + +CLIENT_IMP_ERR = None +try: + from manageiq_client.api import ManageIQClient + HAS_CLIENT = True +except ImportError: + CLIENT_IMP_ERR = traceback.format_exc() + HAS_CLIENT = False + + +def manageiq_argument_spec(): + options = dict( + url=dict(default=os.environ.get('MIQ_URL', None)), + username=dict(default=os.environ.get('MIQ_USERNAME', None)), + password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True), + token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True), + validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), + ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']), + ) + + return dict( + manageiq_connection=dict(type='dict', + apply_defaults=True, + options=options), + ) + + +def check_client(module): + if not HAS_CLIENT: + module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR) + + +def validate_connection_params(module): + params = module.params['manageiq_connection'] + error_str = "missing required argument: manageiq_connection[{}]" + url = params['url'] + token = params['token'] + username = params['username'] + password = params['password'] + + if (url and username and password) or (url and token): + return params + for arg in ['url', 'username', 'password']: + if params[arg] in (None, ''): + module.fail_json(msg=error_str.format(arg)) + + +def manageiq_entities(): + return { + 'provider': 'providers', 'host': 'hosts', 'vm': 'vms', + 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores', + 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services', + 'service template': 'service_templates', 'template': 'templates', + 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints' + } + + +class ManageIQ(object): + """ + class encapsulating ManageIQ API client. + """ + + def __init__(self, module): + # handle import errors + check_client(module) + + params = validate_connection_params(module) + + url = params['url'] + username = params['username'] + password = params['password'] + token = params['token'] + verify_ssl = params['validate_certs'] + ca_bundle_path = params['ca_cert'] + + self._module = module + self._api_url = url + '/api' + self._auth = dict(user=username, password=password, token=token) + try: + self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path) + except Exception as e: + self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e))) + + @property + def module(self): + """ Ansible module module + + Returns: + the ansible module + """ + return self._module + + @property + def api_url(self): + """ Base ManageIQ API + + Returns: + the base ManageIQ API + """ + return self._api_url + + @property + def client(self): + """ ManageIQ client + + Returns: + the ManageIQ client + """ + return self._client + + def find_collection_resource_by(self, collection_name, **params): + """ Searches the collection resource by the collection name and the param passed. + + Returns: + the resource as an object if it exists in manageiq, None otherwise. + """ + try: + entity = self.client.collections.__getattribute__(collection_name).get(**params) + except ValueError: + return None + except Exception as e: + self.module.fail_json(msg="failed to find resource {error}".format(error=e)) + return vars(entity) + + def find_collection_resource_or_fail(self, collection_name, **params): + """ Searches the collection resource by the collection name and the param passed. + + Returns: + the resource as an object if it exists in manageiq, Fail otherwise. + """ + resource = self.find_collection_resource_by(collection_name, **params) + if resource: + return resource + else: + msg = "{collection_name} where {params} does not exist in manageiq".format( + collection_name=collection_name, params=str(params)) + self.module.fail_json(msg=msg) diff --git a/plugins/module_utils/memset.py b/plugins/module_utils/memset.py new file mode 100644 index 0000000000..51dce0c690 --- /dev/null +++ b/plugins/module_utils/memset.py @@ -0,0 +1,151 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2018, Simon Weald +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import open_url, urllib_error +from ansible.module_utils.basic import json + + +class Response(object): + ''' + Create a response object to mimic that of requests. + ''' + + def __init__(self): + self.content = None + self.status_code = None + + def json(self): + return json.loads(self.content) + + +def memset_api_call(api_key, api_method, payload=None): + ''' + Generic function which returns results back to calling function. + + Requires an API key and an API method to assemble the API URL. + Returns response text to be analysed. + ''' + # instantiate a response object + response = Response() + + # if we've already started preloading the payload then copy it + # and use that, otherwise we need to isntantiate it. + if payload is None: + payload = dict() + else: + payload = payload.copy() + + # set some sane defaults + has_failed = False + msg = None + + data = urlencode(payload) + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + api_uri_base = 'https://api.memset.com/v1/json/' + api_uri = '{0}{1}/' . format(api_uri_base, api_method) + + try: + resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key) + response.content = resp.read().decode('utf-8') + response.status_code = resp.getcode() + except urllib_error.HTTPError as e: + try: + errorcode = e.code + except AttributeError: + errorcode = None + + has_failed = True + response.content = e.read().decode('utf8') + response.status_code = errorcode + + if response.status_code is not None: + msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error']) + else: + msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error']) + + if msg is None: + msg = response.json() + + return(has_failed, msg, response) + + +def check_zone_domain(data, domain): + ''' + Returns true if domain already exists, and false if not. + ''' + exists = False + + if data.status_code in [201, 200]: + for zone_domain in data.json(): + if zone_domain['domain'] == domain: + exists = True + + return(exists) + + +def check_zone(data, name): + ''' + Returns true if zone already exists, and false if not. + ''' + counter = 0 + exists = False + + if data.status_code in [201, 200]: + for zone in data.json(): + if zone['nickname'] == name: + counter += 1 + if counter == 1: + exists = True + + return(exists, counter) + + +def get_zone_id(zone_name, current_zones): + ''' + Returns the zone's id if it exists and is unique + ''' + zone_exists = False + zone_id, msg = None, None + zone_list = [] + + for zone in current_zones: + if zone['nickname'] == zone_name: + zone_list.append(zone['id']) + + counter = len(zone_list) + + if counter == 0: + msg = 'No matching zone found' + elif counter == 1: + zone_id = zone_list[0] + zone_exists = True + elif counter > 1: + zone_id = None + msg = 'Zone ID could not be returned as duplicate zone names were detected' + + return(zone_exists, msg, counter, zone_id) diff --git a/plugins/module_utils/mysql.py b/plugins/module_utils/mysql.py new file mode 100644 index 0000000000..46198f367b --- /dev/null +++ b/plugins/module_utils/mysql.py @@ -0,0 +1,106 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Jonathan Mainguy , 2015 +# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +try: + import pymysql as mysql_driver + _mysql_cursor_param = 'cursor' +except ImportError: + try: + import MySQLdb as mysql_driver + import MySQLdb.cursors + _mysql_cursor_param = 'cursorclass' + except ImportError: + mysql_driver = None + +mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.' + + +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None, + connect_timeout=30, autocommit=False): + config = {} + + if ssl_ca is not None or ssl_key is not None or ssl_cert is not None: + config['ssl'] = {} + + if module.params['login_unix_socket']: + config['unix_socket'] = module.params['login_unix_socket'] + else: + config['host'] = module.params['login_host'] + config['port'] = module.params['login_port'] + + if os.path.exists(config_file): + config['read_default_file'] = config_file + + # If login_user or login_password are given, they should override the + # config file + if login_user is not None: + config['user'] = login_user + if login_password is not None: + config['passwd'] = login_password + if ssl_cert is not None: + config['ssl']['cert'] = ssl_cert + if ssl_key is not None: + config['ssl']['key'] = ssl_key + if ssl_ca is not None: + config['ssl']['ca'] = ssl_ca + if db is not None: + config['db'] = db + if connect_timeout is not None: + config['connect_timeout'] = connect_timeout + + if _mysql_cursor_param == 'cursor': + # In case of PyMySQL driver: + db_connection = mysql_driver.connect(autocommit=autocommit, **config) + else: + # In case of MySQLdb driver + db_connection = mysql_driver.connect(**config) + if autocommit: + db_connection.autocommit(True) + + if cursor_class == 'DictCursor': + return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection + else: + return db_connection.cursor(), db_connection + + +def mysql_common_argument_spec(): + return dict( + login_user=dict(type='str', default=None), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + config_file=dict(type='path', default='~/.my.cnf'), + connect_timeout=dict(type='int', default=30), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + ) diff --git a/plugins/module_utils/net_tools/__init__.py b/plugins/module_utils/net_tools/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/net_tools/netbox/__init__.py b/plugins/module_utils/net_tools/netbox/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/net_tools/nios/__init__.py b/plugins/module_utils/net_tools/nios/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py new file mode 100644 index 0000000000..2a759033e2 --- /dev/null +++ b/plugins/module_utils/net_tools/nios/api.py @@ -0,0 +1,601 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2018 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +import os +from functools import partial +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback + +try: + from infoblox_client.connector import Connector + from infoblox_client.exceptions import InfobloxException + HAS_INFOBLOX_CLIENT = True +except ImportError: + HAS_INFOBLOX_CLIENT = False + +# defining nios constants +NIOS_DNS_VIEW = 'view' +NIOS_NETWORK_VIEW = 'networkview' +NIOS_HOST_RECORD = 'record:host' +NIOS_IPV4_NETWORK = 'network' +NIOS_IPV6_NETWORK = 'ipv6network' +NIOS_ZONE = 'zone_auth' +NIOS_PTR_RECORD = 'record:ptr' +NIOS_A_RECORD = 'record:a' +NIOS_AAAA_RECORD = 'record:aaaa' +NIOS_CNAME_RECORD = 'record:cname' +NIOS_MX_RECORD = 'record:mx' +NIOS_SRV_RECORD = 'record:srv' +NIOS_NAPTR_RECORD = 'record:naptr' +NIOS_TXT_RECORD = 'record:txt' +NIOS_NSGROUP = 'nsgroup' +NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress' +NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress' +NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip' +NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer' +NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer' +NIOS_MEMBER = 'member' + +NIOS_PROVIDER_SPEC = { + 'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])), + 'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True), + 'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']), + 'silent_ssl_warnings': dict(type='bool', default=True), + 'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])), + 'http_pool_connections': dict(type='int', default=10), + 'http_pool_maxsize': dict(type='int', default=10), + 'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])), + 'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])), + 'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])) +} + + +def get_connector(*args, **kwargs): + ''' Returns an instance of infoblox_client.connector.Connector + :params args: positional arguments are silently ignored + :params kwargs: dict that is passed to Connector init + :returns: Connector + ''' + if not HAS_INFOBLOX_CLIENT: + raise Exception('infoblox-client is required but does not appear ' + 'to be installed. It can be installed using the ' + 'command `pip install infoblox-client`') + + if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']): + raise Exception('invalid or unsupported keyword argument for connector') + for key, value in iteritems(NIOS_PROVIDER_SPEC): + if key not in kwargs: + # apply default values from NIOS_PROVIDER_SPEC since we cannot just + # assume the provider values are coming from AnsibleModule + if 'default' in value: + kwargs[key] = value['default'] + + # override any values with env variables unless they were + # explicitly set + env = ('INFOBLOX_%s' % key).upper() + if env in os.environ: + kwargs[key] = os.environ.get(env) + + if 'validate_certs' in kwargs.keys(): + kwargs['ssl_verify'] = kwargs['validate_certs'] + kwargs.pop('validate_certs', None) + + return Connector(kwargs) + + +def normalize_extattrs(value): + ''' Normalize extattrs field to expected format + The module accepts extattrs as key/value pairs. This method will + transform the key/value pairs into a structure suitable for + sending across WAPI in the format of: + extattrs: { + key: { + value: + } + } + ''' + return dict([(k, {'value': v}) for k, v in iteritems(value)]) + + +def flatten_extattrs(value): + ''' Flatten the key/value struct for extattrs + WAPI returns extattrs field as a dict in form of: + extattrs: { + key: { + value: + } + } + This method will flatten the structure to: + extattrs: { + key: value + } + ''' + return dict([(k, v['value']) for k, v in iteritems(value)]) + + +def member_normalize(member_spec): + ''' Transforms the member module arguments into a valid WAPI struct + This function will transform the arguments into a structure that + is a valid WAPI structure in the format of: + { + key: , + } + It will remove any arguments that are set to None since WAPI will error on + that condition. + The remainder of the value validation is performed by WAPI + Some parameters in ib_spec are passed as a list in order to pass the validation for elements. + In this function, they are converted to dictionary. + ''' + member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting', + 'pre_provisioning', 'network_setting', 'v6_network_setting', + 'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting', + 'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting'] + for key in member_spec.keys(): + if key in member_elements and member_spec[key] is not None: + member_spec[key] = member_spec[key][0] + if isinstance(member_spec[key], dict): + member_spec[key] = member_normalize(member_spec[key]) + elif isinstance(member_spec[key], list): + for x in member_spec[key]: + if isinstance(x, dict): + x = member_normalize(x) + elif member_spec[key] is None: + del member_spec[key] + return member_spec + + +class WapiBase(object): + ''' Base class for implementing Infoblox WAPI API ''' + provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)} + + def __init__(self, provider): + self.connector = get_connector(**provider) + + def __getattr__(self, name): + try: + return self.__dict__[name] + except KeyError: + if name.startswith('_'): + raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) + return partial(self._invoke_method, name) + + def _invoke_method(self, name, *args, **kwargs): + try: + method = getattr(self.connector, name) + return method(*args, **kwargs) + except InfobloxException as exc: + if hasattr(self, 'handle_exception'): + self.handle_exception(name, exc) + else: + raise + + +class WapiLookup(WapiBase): + ''' Implements WapiBase for lookup plugins ''' + def handle_exception(self, method_name, exc): + if ('text' in exc.response): + raise Exception(exc.response['text']) + else: + raise Exception(exc) + + +class WapiInventory(WapiBase): + ''' Implements WapiBase for dynamic inventory script ''' + pass + + +class WapiModule(WapiBase): + ''' Implements WapiBase for executing a NIOS module ''' + def __init__(self, module): + self.module = module + provider = module.params['provider'] + try: + super(WapiModule, self).__init__(provider) + except Exception as exc: + self.module.fail_json(msg=to_text(exc)) + + def handle_exception(self, method_name, exc): + ''' Handles any exceptions raised + This method will be called if an InfobloxException is raised for + any call to the instance of Connector and also, in case of generic + exception. This method will then gracefully fail the module. + :args exc: instance of InfobloxException + ''' + if ('text' in exc.response): + self.module.fail_json( + msg=exc.response['text'], + type=exc.response['Error'].split(':')[0], + code=exc.response.get('code'), + operation=method_name + ) + else: + self.module.fail_json(msg=to_native(exc)) + + def run(self, ib_obj_type, ib_spec): + ''' Runs the module and performans configuration tasks + :args ib_obj_type: the WAPI object type to operate against + :args ib_spec: the specification for the WAPI object as a dict + :returns: a results dict + ''' + + update = new_name = None + state = self.module.params['state'] + if state not in ('present', 'absent'): + self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state) + + result = {'changed': False} + + obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')]) + + # get object reference + ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec) + proposed_object = {} + for key, value in iteritems(ib_spec): + if self.module.params[key] is not None: + if 'transform' in value: + proposed_object[key] = value['transform'](self.module) + else: + proposed_object[key] = self.module.params[key] + + # If configure_by_dns is set to False, then delete the default dns set in the param else throw exception + if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\ + and ib_obj_type == NIOS_HOST_RECORD: + del proposed_object['view'] + elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\ + and ib_obj_type == NIOS_HOST_RECORD: + self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'') + + if ib_obj_ref: + if len(ib_obj_ref) > 1: + for each in ib_obj_ref: + # To check for existing A_record with same name with input A_record by IP + if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'): + current_object = each + # To check for existing Host_record with same name with input Host_record by IP + elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\ + == proposed_object.get('ipv4addrs')[0].get('ipv4addr'): + current_object = each + # Else set the current_object with input value + else: + current_object = obj_filter + ref = None + else: + current_object = ib_obj_ref[0] + if 'extattrs' in current_object: + current_object['extattrs'] = flatten_extattrs(current_object['extattrs']) + if current_object.get('_ref'): + ref = current_object.pop('_ref') + else: + current_object = obj_filter + ref = None + # checks if the object type is member to normalize the attributes being passed + if (ib_obj_type == NIOS_MEMBER): + proposed_object = member_normalize(proposed_object) + + # checks if the name's field has been updated + if update and new_name: + proposed_object['name'] = new_name + + check_remove = [] + if (ib_obj_type == NIOS_HOST_RECORD): + # this check is for idempotency, as if the same ip address shall be passed + # add param will be removed, and same exists true for remove case as well. + if 'ipv4addrs' in [current_object and proposed_object]: + for each in current_object['ipv4addrs']: + if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']: + if 'add' in proposed_object['ipv4addrs'][0]: + del proposed_object['ipv4addrs'][0]['add'] + break + check_remove += each.values() + if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove: + if 'remove' in proposed_object['ipv4addrs'][0]: + del proposed_object['ipv4addrs'][0]['remove'] + + res = None + modified = not self.compare_objects(current_object, proposed_object) + if 'extattrs' in proposed_object: + proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs']) + + # Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args + proposed_object = self.check_if_nios_next_ip_exists(proposed_object) + + if state == 'present': + if ref is None: + if not self.module.check_mode: + self.create_object(ib_obj_type, proposed_object) + result['changed'] = True + # Check if NIOS_MEMBER and the flag to call function create_token is set + elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']): + proposed_object = None + # the function creates a token that can be used by a pre-provisioned member to join the grid + result['api_results'] = self.call_func('create_token', ref, proposed_object) + result['changed'] = True + elif modified: + if 'ipv4addrs' in proposed_object: + if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]): + self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object) + + if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)): + run_update = True + proposed_object = self.on_update(proposed_object, ib_spec) + if 'ipv4addrs' in proposed_object: + if ('add' or 'remove') in proposed_object['ipv4addrs'][0]: + run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object) + if run_update: + res = self.update_object(ref, proposed_object) + result['changed'] = True + else: + res = ref + if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)): + # popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record + proposed_object = self.on_update(proposed_object, ib_spec) + del proposed_object['view'] + if not self.module.check_mode: + res = self.update_object(ref, proposed_object) + result['changed'] = True + elif 'network_view' in proposed_object: + proposed_object.pop('network_view') + result['changed'] = True + if not self.module.check_mode and res is None: + proposed_object = self.on_update(proposed_object, ib_spec) + self.update_object(ref, proposed_object) + result['changed'] = True + + elif state == 'absent': + if ref is not None: + if 'ipv4addrs' in proposed_object: + if 'remove' in proposed_object['ipv4addrs'][0]: + self.check_if_add_remove_ip_arg_exists(proposed_object) + self.update_object(ref, proposed_object) + result['changed'] = True + elif not self.module.check_mode: + self.delete_object(ref) + result['changed'] = True + + return result + + def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object): + ''' Send POST request if host record input name and retrieved ref name is same, + but input IP and retrieved IP is different''' + + if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD: + obj_host_name = obj_filter['name'] + ref_host_name = ib_obj_ref[0]['name'] + if 'ipv4addrs' in (current_object and proposed_object): + current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr'] + proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr'] + elif 'ipv6addrs' in (current_object and proposed_object): + current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr'] + proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr'] + + if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr: + self.create_object(ib_obj_type, proposed_object) + + def check_if_nios_next_ip_exists(self, proposed_object): + ''' Check if nios_next_ip argument is passed in ipaddr while creating + host record, if yes then format proposed object ipv4addrs and pass + func:nextavailableip and ipaddr range to create hostrecord with next + available ip in one call to avoid any race condition ''' + + if 'ipv4addrs' in proposed_object: + if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']: + ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip'] + proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range + elif 'ipv4addr' in proposed_object: + if 'nios_next_ip' in proposed_object['ipv4addr']: + ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip'] + proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range + + return proposed_object + + def check_if_add_remove_ip_arg_exists(self, proposed_object): + ''' + This function shall check if add/remove param is set to true and + is passed in the args, then we will update the proposed dictionary + to add/remove IP to existing host_record, if the user passes false + param with the argument nothing shall be done. + :returns: True if param is changed based on add/remove, and also the + changed proposed_object. + ''' + update = False + if 'add' in proposed_object['ipv4addrs'][0]: + if proposed_object['ipv4addrs'][0]['add']: + proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs'] + del proposed_object['ipv4addrs'] + del proposed_object['ipv4addrs+'][0]['add'] + update = True + else: + del proposed_object['ipv4addrs'][0]['add'] + elif 'remove' in proposed_object['ipv4addrs'][0]: + if proposed_object['ipv4addrs'][0]['remove']: + proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs'] + del proposed_object['ipv4addrs'] + del proposed_object['ipv4addrs-'][0]['remove'] + update = True + else: + del proposed_object['ipv4addrs'][0]['remove'] + return update, proposed_object + + def issubset(self, item, objects): + ''' Checks if item is a subset of objects + :args item: the subset item to validate + :args objects: superset list of objects to validate against + :returns: True if item is a subset of one entry in objects otherwise + this method will return None + ''' + for obj in objects: + if isinstance(item, dict): + if all(entry in obj.items() for entry in item.items()): + return True + else: + if item in obj: + return True + + def compare_objects(self, current_object, proposed_object): + for key, proposed_item in iteritems(proposed_object): + current_item = current_object.get(key) + + # if proposed has a key that current doesn't then the objects are + # not equal and False will be immediately returned + if current_item is None: + return False + + elif isinstance(proposed_item, list): + for subitem in proposed_item: + if not self.issubset(subitem, current_item): + return False + + elif isinstance(proposed_item, dict): + return self.compare_objects(current_item, proposed_item) + + else: + if current_item != proposed_item: + return False + + return True + + def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec): + ''' this function gets the reference object of pre-existing nios objects ''' + + update = False + old_name = new_name = None + if ('name' in obj_filter): + # gets and returns the current object based on name/old_name passed + try: + name_obj = self.module._check_type_dict(obj_filter['name']) + old_name = name_obj['old_name'] + new_name = name_obj['new_name'] + except TypeError: + name = obj_filter['name'] + + if old_name and new_name: + if (ib_obj_type == NIOS_HOST_RECORD): + test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])]) + elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)): + test_obj_filter = obj_filter + else: + test_obj_filter = dict([('name', old_name)]) + # get the object reference + ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys()) + if ib_obj: + obj_filter['name'] = new_name + else: + test_obj_filter['name'] = new_name + ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys()) + update = True + return ib_obj, update, new_name + if (ib_obj_type == NIOS_HOST_RECORD): + # to check only by name if dns bypassing is set + if not obj_filter['configure_for_dns']: + test_obj_filter = dict([('name', name)]) + else: + test_obj_filter = dict([('name', name), ('view', obj_filter['view'])]) + elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter): + test_obj_filter = dict([['mac', obj_filter['mac']]]) + elif (ib_obj_type == NIOS_A_RECORD): + # resolves issue where a_record with uppercase name was returning null and was failing + test_obj_filter = obj_filter + test_obj_filter['name'] = test_obj_filter['name'].lower() + # resolves issue where multiple a_records with same name and different IP address + try: + ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr']) + ipaddr = ipaddr_obj['old_ipv4addr'] + except TypeError: + ipaddr = obj_filter['ipv4addr'] + test_obj_filter['ipv4addr'] = ipaddr + elif (ib_obj_type == NIOS_TXT_RECORD): + # resolves issue where multiple txt_records with same name and different text + test_obj_filter = obj_filter + try: + text_obj = self.module._check_type_dict(obj_filter['text']) + txt = text_obj['old_text'] + except TypeError: + txt = obj_filter['text'] + test_obj_filter['text'] = txt + # check if test_obj_filter is empty copy passed obj_filter + else: + test_obj_filter = obj_filter + ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys()) + elif (ib_obj_type == NIOS_A_RECORD): + # resolves issue where multiple a_records with same name and different IP address + test_obj_filter = obj_filter + try: + ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr']) + ipaddr = ipaddr_obj['old_ipv4addr'] + except TypeError: + ipaddr = obj_filter['ipv4addr'] + test_obj_filter['ipv4addr'] = ipaddr + ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys()) + elif (ib_obj_type == NIOS_TXT_RECORD): + # resolves issue where multiple txt_records with same name and different text + test_obj_filter = obj_filter + try: + text_obj = self.module._check_type_dict(obj_filter['text']) + txt = text_obj['old_text'] + except TypeError: + txt = obj_filter['text'] + test_obj_filter['text'] = txt + ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys()) + elif (ib_obj_type == NIOS_ZONE): + # del key 'restart_if_needed' as nios_zone get_object fails with the key present + temp = ib_spec['restart_if_needed'] + del ib_spec['restart_if_needed'] + ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys()) + # reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref + if not ib_obj: + ib_spec['restart_if_needed'] = temp + elif (ib_obj_type == NIOS_MEMBER): + # del key 'create_token' as nios_member get_object fails with the key present + temp = ib_spec['create_token'] + del ib_spec['create_token'] + ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys()) + if temp: + # reinstate 'create_token' key + ib_spec['create_token'] = temp + else: + ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys()) + return ib_obj, update, new_name + + def on_update(self, proposed_object, ib_spec): + ''' Event called before the update is sent to the API endpoing + This method will allow the final proposed object to be changed + and/or keys filtered before it is sent to the API endpoint to + be processed. + :args proposed_object: A dict item that will be encoded and sent + the API endpoint with the updated data structure + :returns: updated object to be sent to API endpoint + ''' + keys = set() + for key, value in iteritems(proposed_object): + update = ib_spec[key].get('update', True) + if not update: + keys.add(key) + return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys]) diff --git a/plugins/module_utils/network/__init__.py b/plugins/module_utils/network/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/a10/__init__.py b/plugins/module_utils/network/a10/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/a10/a10.py b/plugins/module_utils/network/a10/a10.py new file mode 100644 index 0000000000..bf713702e4 --- /dev/null +++ b/plugins/module_utils/network/a10/a10.py @@ -0,0 +1,153 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import json + +from ansible.module_utils.urls import fetch_url + + +AXAPI_PORT_PROTOCOLS = { + 'tcp': 2, + 'udp': 3, +} + +AXAPI_VPORT_PROTOCOLS = { + 'tcp': 2, + 'udp': 3, + 'fast-http': 9, + 'http': 11, + 'https': 12, +} + + +def a10_argument_spec(): + return dict( + host=dict(type='str', required=True), + username=dict(type='str', aliases=['user', 'admin'], required=True), + password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True), + write_config=dict(type='bool', default=False) + ) + + +def axapi_failure(result): + if 'response' in result and result['response'].get('status') == 'fail': + return True + return False + + +def axapi_call(module, url, post=None): + ''' + Returns a datastructure based on the result of the API call + ''' + rsp, info = fetch_url(module, url, data=post) + if not rsp or info['status'] >= 400: + module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given'))) + try: + raw_data = rsp.read() + data = json.loads(raw_data) + except ValueError: + # at least one API call (system.action.write_config) returns + # XML even when JSON is requested, so do some minimal handling + # here to prevent failing even when the call succeeded + if 'status="ok"' in raw_data.lower(): + data = {"response": {"status": "OK"}} + else: + data = {"response": {"status": "fail", "err": {"msg": raw_data}}} + except Exception: + module.fail_json(msg="could not read the result from the host") + finally: + rsp.close() + return data + + +def axapi_authenticate(module, base_url, username, password): + url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password) + result = axapi_call(module, url) + if axapi_failure(result): + return module.fail_json(msg=result['response']['err']['msg']) + sessid = result['session_id'] + return base_url + '&session_id=' + sessid + + +def axapi_authenticate_v3(module, base_url, username, password): + url = base_url + auth_payload = {"credentials": {"username": username, "password": password}} + result = axapi_call_v3(module, url, method='POST', body=json.dumps(auth_payload)) + if axapi_failure(result): + return module.fail_json(msg=result['response']['err']['msg']) + signature = result['authresponse']['signature'] + return signature + + +def axapi_call_v3(module, url, method=None, body=None, signature=None): + ''' + Returns a datastructure based on the result of the API call + ''' + if signature: + headers = {'content-type': 'application/json', 'Authorization': 'A10 %s' % signature} + else: + headers = {'content-type': 'application/json'} + rsp, info = fetch_url(module, url, method=method, data=body, headers=headers) + if not rsp or info['status'] >= 400: + module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given'))) + try: + raw_data = rsp.read() + data = json.loads(raw_data) + except ValueError: + # at least one API call (system.action.write_config) returns + # XML even when JSON is requested, so do some minimal handling + # here to prevent failing even when the call succeeded + if 'status="ok"' in raw_data.lower(): + data = {"response": {"status": "OK"}} + else: + data = {"response": {"status": "fail", "err": {"msg": raw_data}}} + except Exception: + module.fail_json(msg="could not read the result from the host") + finally: + rsp.close() + return data + + +def axapi_enabled_disabled(flag): + ''' + The axapi uses 0/1 integer values for flags, rather than strings + or booleans, so convert the given flag to a 0 or 1. For now, params + are specified as strings only so thats what we check. + ''' + if flag == 'enabled': + return 1 + else: + return 0 + + +def axapi_get_port_protocol(protocol): + return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None) + + +def axapi_get_vport_protocol(protocol): + return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None) diff --git a/plugins/module_utils/network/aci/__init__.py b/plugins/module_utils/network/aci/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/aireos/__init__.py b/plugins/module_utils/network/aireos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/aireos/aireos.py b/plugins/module_utils/network/aireos/aireos.py new file mode 100644 index 0000000000..e5db446ad9 --- /dev/null +++ b/plugins/module_utils/network/aireos/aireos.py @@ -0,0 +1,129 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import exec_command + +_DEVICE_CONFIGS = {} + +aireos_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'timeout': dict(type='int'), +} +aireos_argument_spec = { + 'provider': dict(type='dict', options=aireos_provider_spec) +} + +aireos_top_spec = { + 'host': dict(removed_in_version=2.9), + 'port': dict(removed_in_version=2.9, type='int'), + 'username': dict(removed_in_version=2.9), + 'password': dict(removed_in_version=2.9, no_log=True), + 'ssh_keyfile': dict(removed_in_version=2.9, type='path'), + 'timeout': dict(removed_in_version=2.9, type='int'), +} +aireos_argument_spec.update(aireos_top_spec) + + +def sanitize(resp): + # Takes response from device and strips whitespace from all lines + # Aireos adds in extra preceding whitespace which netcfg parses as children/parents, which Aireos does not do + # Aireos also adds in trailing whitespace that is unused + cleaned = [] + for line in resp.splitlines(): + cleaned.append(line.strip()) + return '\n'.join(cleaned).strip() + + +def get_provider_argspec(): + return aireos_provider_spec + + +def check_args(module, warnings): + pass + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = 'show run-config commands ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + rc, out, err = exec_command(module, cmd) + if rc != 0: + module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace')) + cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip()) + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc) + responses.append(sanitize(to_text(out, errors='surrogate_then_replace'))) + return responses + + +def load_config(module, commands): + + rc, out, err = exec_command(module, 'config') + if rc != 0: + module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace')) + + for command in to_list(commands): + if command == 'end': + continue + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc) + + exec_command(module, 'end') diff --git a/plugins/module_utils/network/aos/__init__.py b/plugins/module_utils/network/aos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/aos/aos.py b/plugins/module_utils/network/aos/aos.py new file mode 100644 index 0000000000..092bbf5b4a --- /dev/null +++ b/plugins/module_utils/network/aos/aos.py @@ -0,0 +1,180 @@ +# +# Copyright (c) 2017 Apstra Inc, +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +""" +This module adds shared support for Apstra AOS modules + +In order to use this module, include it as part of your module + +from ansible.module_utils.network.aos.aos import (check_aos_version, get_aos_session, find_collection_item, + content_to_dict, do_load_resource) + +""" +import json + +from distutils.version import LooseVersion + +try: + import yaml + HAS_YAML = True +except ImportError: + HAS_YAML = False + +try: + from apstra.aosom.session import Session + + HAS_AOS_PYEZ = True +except ImportError: + HAS_AOS_PYEZ = False + +from ansible.module_utils._text import to_native + + +def check_aos_version(module, min=False): + """ + Check if the library aos-pyez is present. + If provided, also check if the minimum version requirement is met + """ + if not HAS_AOS_PYEZ: + module.fail_json(msg='aos-pyez is not installed. Please see details ' + 'here: https://github.com/Apstra/aos-pyez') + + elif min: + import apstra.aosom + AOS_PYEZ_VERSION = apstra.aosom.__version__ + + if LooseVersion(AOS_PYEZ_VERSION) < LooseVersion(min): + module.fail_json(msg='aos-pyez >= %s is required for this module' % min) + + return True + + +def get_aos_session(module, auth): + """ + Resume an existing session and return an AOS object. + + Args: + auth (dict): An AOS session as obtained by aos_login module blocks:: + + dict( token=, + server=, + port= + ) + + Return: + Aos object + """ + + check_aos_version(module) + + aos = Session() + aos.session = auth + + return aos + + +def find_collection_item(collection, item_name=False, item_id=False): + """ + Find collection_item based on name or id from a collection object + Both Collection_item and Collection Objects are provided by aos-pyez library + + Return + collection_item: object corresponding to the collection type + """ + my_dict = None + + if item_name: + my_dict = collection.find(label=item_name) + elif item_id: + my_dict = collection.find(uid=item_id) + + if my_dict is None: + return collection[''] + else: + return my_dict + + +def content_to_dict(module, content): + """ + Convert 'content' into a Python Dict based on 'content_format' + """ + + # if not HAS_YAML: + # module.fail_json(msg="Python Library Yaml is not present, mandatory to use 'content'") + + content_dict = None + + # try: + # content_dict = json.loads(content.replace("\'", '"')) + # except: + # module.fail_json(msg="Unable to convert 'content' from JSON, please check if valid") + # + # elif format in ['yaml', 'var']: + + try: + content_dict = yaml.safe_load(content) + + if not isinstance(content_dict, dict): + raise Exception() + + # Check if dict is empty and return an error if it's + if not content_dict: + raise Exception() + + except Exception: + module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid") + + # replace the string with the dict + module.params['content'] = content_dict + + return content_dict + + +def do_load_resource(module, collection, name): + """ + Create a new object (collection.item) by loading a datastructure directly + """ + + try: + item = find_collection_item(collection, name, '') + except Exception: + module.fail_json(msg="An error occurred while running 'find_collection_item'") + + if item.exists: + module.exit_json(changed=False, name=item.name, id=item.id, value=item.value) + + # If not in check mode, apply the changes + if not module.check_mode: + try: + item.datum = module.params['content'] + item.write() + except Exception as e: + module.fail_json(msg="Unable to write item content : %r" % to_native(e)) + + module.exit_json(changed=True, name=item.name, id=item.id, value=item.value) diff --git a/plugins/module_utils/network/apconos/__init__.py b/plugins/module_utils/network/apconos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/apconos/apconos.py b/plugins/module_utils/network/apconos/apconos.py new file mode 100644 index 0000000000..1b9eebcda8 --- /dev/null +++ b/plugins/module_utils/network/apconos/apconos.py @@ -0,0 +1,113 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their own +# license to the complete work. +# +# Copyright (C) 2019 APCON, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Contains utility methods +# APCON Networking + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import EntityCollection +from ansible.module_utils.connection import Connection, exec_command +from ansible.module_utils.connection import ConnectionError + +_DEVICE_CONFIGS = {} +_CONNECTION = None + + +command_spec = { + 'command': dict(key=True), +} + + +def check_args(module, warnings): + pass + + +def get_connection(module): + global _CONNECTION + if _CONNECTION: + return _CONNECTION + _CONNECTION = Connection(module._socket_path) + + return _CONNECTION + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = ' '.join(flags).strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + conn = get_connection(module) + out = conn.get(cmd) + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + transform = EntityCollection(module, command_spec) + commands = transform(commands) + + responses = list() + + for cmd in commands: + out = connection.get(**cmd) + responses.append(to_text(out, errors='surrogate_then_replace')) + + return responses + + +def load_config(module, config): + try: + conn = get_connection(module) + conn.edit_config(config) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def get_defaults_flag(module): + rc, out, err = exec_command(module, 'display running-config ?') + out = to_text(out, errors='surrogate_then_replace') + + commands = set() + for line in out.splitlines(): + if line: + commands.add(line.strip().split()[0]) + + if 'all' in commands: + return 'all' + else: + return 'full' diff --git a/plugins/module_utils/network/aruba/__init__.py b/plugins/module_utils/network/aruba/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/aruba/aruba.py b/plugins/module_utils/network/aruba/aruba.py new file mode 100644 index 0000000000..0499a44315 --- /dev/null +++ b/plugins/module_utils/network/aruba/aruba.py @@ -0,0 +1,131 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import exec_command + +_DEVICE_CONFIGS = {} + +aruba_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'timeout': dict(type='int'), +} +aruba_argument_spec = { + 'provider': dict(type='dict', options=aruba_provider_spec) +} + +aruba_top_spec = { + 'host': dict(removed_in_version=2.9), + 'port': dict(removed_in_version=2.9, type='int'), + 'username': dict(removed_in_version=2.9), + 'password': dict(removed_in_version=2.9, no_log=True), + 'ssh_keyfile': dict(removed_in_version=2.9, type='path'), + 'timeout': dict(removed_in_version=2.9, type='int'), +} + +aruba_argument_spec.update(aruba_top_spec) + + +def get_provider_argspec(): + return aruba_provider_spec + + +def check_args(module, warnings): + pass + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = 'show running-config ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + rc, out, err = exec_command(module, cmd) + if rc != 0: + module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace')) + cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip()) + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def sanitize(resp): + # Takes response from device and adjusts leading whitespace to just 1 space + cleaned = [] + for line in resp.splitlines(): + cleaned.append(re.sub(r"^\s+", " ", line)) + return '\n'.join(cleaned).strip() + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc) + responses.append(to_text(out, errors='surrogate_then_replace')) + return responses + + +def load_config(module, commands): + + rc, out, err = exec_command(module, 'configure terminal') + if rc != 0: + module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace')) + + for command in to_list(commands): + if command == 'end': + continue + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc) + + exec_command(module, 'end') diff --git a/plugins/module_utils/network/avi/__init__.py b/plugins/module_utils/network/avi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/avi/ansible_utils.py b/plugins/module_utils/network/avi/ansible_utils.py new file mode 100644 index 0000000000..2dea53319a --- /dev/null +++ b/plugins/module_utils/network/avi/ansible_utils.py @@ -0,0 +1,572 @@ +from __future__ import absolute_import + +""" +Created on Aug 16, 2016 + +@author: Gaurav Rastogi (grastogi@avinetworks.com) +""" +import os +import re +import logging +import sys +from copy import deepcopy +from ansible.module_utils.basic import env_fallback + +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import ( + ApiSession, ObjectNotFound, avi_sdk_syslog_logger, AviCredentials, HAS_AVI) +except ImportError: + HAS_AVI = False + + +if os.environ.get('AVI_LOG_HANDLER', '') != 'syslog': + log = logging.getLogger(__name__) +else: + # Ansible does not allow logging from the modules. + log = avi_sdk_syslog_logger() + + +def _check_type_string(x): + """ + :param x: + :return: True if it is of type string + """ + if isinstance(x, str): + return True + if sys.version_info[0] < 3: + try: + return isinstance(x, unicode) + except NameError: + return False + + +class AviCheckModeResponse(object): + """ + Class to support ansible check mode. + """ + + def __init__(self, obj, status_code=200): + self.obj = obj + self.status_code = status_code + + def json(self): + return self.obj + + +def ansible_return(module, rsp, changed, req=None, existing_obj=None, + api_context=None): + """ + :param module: AnsibleModule + :param rsp: ApiResponse from avi_api + :param changed: boolean + :param req: ApiRequest to avi_api + :param existing_obj: object to be passed debug output + :param api_context: api login context + + helper function to return the right ansible based on the error code and + changed + Returns: specific ansible module exit function + """ + + if rsp is not None and rsp.status_code > 299: + return module.fail_json( + msg='Error %d Msg %s req: %s api_context:%s ' % ( + rsp.status_code, rsp.text, req, api_context)) + api_creds = AviCredentials() + api_creds.update_from_ansible_module(module) + key = '%s:%s:%s' % (api_creds.controller, api_creds.username, + api_creds.port) + disable_fact = module.params.get('avi_disable_session_cache_as_fact') + + fact_context = None + if not disable_fact: + fact_context = module.params.get('api_context', {}) + if fact_context: + fact_context.update({key: api_context}) + else: + fact_context = {key: api_context} + + obj_val = rsp.json() if rsp else existing_obj + + if (obj_val and module.params.get("obj_username", None) and + "username" in obj_val): + obj_val["obj_username"] = obj_val["username"] + if (obj_val and module.params.get("obj_password", None) and + "password" in obj_val): + obj_val["obj_password"] = obj_val["password"] + old_obj_val = existing_obj if changed and existing_obj else None + api_context_val = api_context if disable_fact else None + ansible_facts_val = dict( + avi_api_context=fact_context) if not disable_fact else {} + + return module.exit_json( + changed=changed, obj=obj_val, old_obj=old_obj_val, + ansible_facts=ansible_facts_val, api_context=api_context_val) + + +def purge_optional_fields(obj, module): + """ + It purges the optional arguments to be sent to the controller. + :param obj: dictionary of the ansible object passed as argument. + :param module: AnsibleModule + return modified obj + """ + purge_fields = [] + for param, spec in module.argument_spec.items(): + if not spec.get('required', False): + if param not in obj: + # these are ansible common items + continue + if obj[param] is None: + purge_fields.append(param) + log.debug('purging fields %s', purge_fields) + for param in purge_fields: + obj.pop(param, None) + return obj + + +def cleanup_absent_fields(obj): + """ + cleans up any field that is marked as state: absent. It needs to be removed + from the object if it is present. + :param obj: + :return: Purged object + """ + if type(obj) != dict: + return obj + cleanup_keys = [] + for k, v in obj.items(): + if type(v) == dict: + if (('state' in v and v['state'] == 'absent') or + (v == "{'state': 'absent'}")): + cleanup_keys.append(k) + else: + cleanup_absent_fields(v) + if not v: + cleanup_keys.append(k) + elif type(v) == list: + new_list = [] + for elem in v: + elem = cleanup_absent_fields(elem) + if elem: + # remove the item from list + new_list.append(elem) + if new_list: + obj[k] = new_list + else: + cleanup_keys.append(k) + elif isinstance(v, str) or isinstance(v, str): + if v == "{'state': 'absent'}": + cleanup_keys.append(k) + for k in cleanup_keys: + del obj[k] + return obj + + +RE_REF_MATCH = re.compile(r'^/api/[\w/]+\?name\=[\w]+[^#<>]*$') +# if HTTP ref match then strip out the #name +HTTP_REF_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.+') +HTTP_REF_W_NAME_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.*#.+') + + +def ref_n_str_cmp(x, y): + """ + compares two references + 1. check for exact reference + 2. check for obj_type/uuid + 3. check for name + + if x is ref=name then extract uuid and name from y and use it. + if x is http_ref then + strip x and y + compare them. + + if x and y are urls then match with split on # + if x is a RE_REF_MATCH then extract name + if y is a REF_MATCH then extract name + :param x: first string + :param y: second string from controller's object + + Returns + True if they are equivalent else False + """ + if type(y) in (int, float, bool, int, complex): + y = str(y) + x = str(x) + if not (_check_type_string(x) and _check_type_string(y)): + return False + y_uuid = y_name = str(y) + x = str(x) + if RE_REF_MATCH.match(x): + x = x.split('name=')[1] + elif HTTP_REF_MATCH.match(x): + x = x.rsplit('#', 1)[0] + y = y.rsplit('#', 1)[0] + elif RE_REF_MATCH.match(y): + y = y.split('name=')[1] + + if HTTP_REF_W_NAME_MATCH.match(y): + path = y.split('api/', 1)[1] + # Fetching name or uuid from path /xxxx_xx/xx/xx_x/uuid_or_name + uuid_or_name = path.split('/')[-1] + parts = uuid_or_name.rsplit('#', 1) + y_uuid = parts[0] + y_name = parts[1] if len(parts) > 1 else '' + # is just string but y is a url so match either uuid or name + result = (x in (y, y_name, y_uuid)) + if not result: + log.debug('x: %s y: %s y_name %s y_uuid %s', + x, y, y_name, y_uuid) + return result + + +def avi_obj_cmp(x, y, sensitive_fields=None): + """ + compares whether x is fully contained in y. The comparision is different + from a simple dictionary compare for following reasons + 1. Some fields could be references. The object in controller returns the + full URL for those references. However, the ansible script would have + it specified as /api/pool?name=blah. So, the reference fields need + to match uuid, relative reference based on name and actual reference. + + 2. Optional fields with defaults: In case there are optional fields with + defaults then controller automatically fills it up. This would + cause the comparison with Ansible object specification to always return + changed. + + 3. Optional fields without defaults: This is most tricky. The issue is + how to specify deletion of such objects from ansible script. If the + ansible playbook has object specified as Null then Avi controller will + reject for non Message(dict) type fields. In addition, to deal with the + defaults=null issue all the fields that are set with None are purged + out before comparing with Avi controller's version + + So, the solution is to pass state: absent if any optional field needs + to be deleted from the configuration. The script would return changed + =true if it finds a key in the controller version and it is marked with + state: absent in ansible playbook. Alternatively, it would return + false if key is not present in the controller object. Before, doing + put or post it would purge the fields that are marked state: absent. + + :param x: first string + :param y: second string from controller's object + :param sensitive_fields: sensitive fields to ignore for diff + + Returns: + True if x is subset of y else False + """ + if not sensitive_fields: + sensitive_fields = set() + if isinstance(x, str) or isinstance(x, str): + # Special handling for strings as they can be references. + return ref_n_str_cmp(x, y) + if type(x) not in [list, dict]: + # if it is not list or dict or string then simply compare the values + return x == y + if type(x) == list: + # should compare each item in the list and that should match + if len(x) != len(y): + log.debug('x has %d items y has %d', len(x), len(y)) + return False + for i in zip(x, y): + if not avi_obj_cmp(i[0], i[1], sensitive_fields=sensitive_fields): + # no need to continue + return False + + if type(x) == dict: + x.pop('_last_modified', None) + x.pop('tenant', None) + y.pop('_last_modified', None) + x.pop('api_version', None) + y.pop('api_verison', None) + d_xks = [k for k in x.keys() if k in sensitive_fields] + + if d_xks: + # if there is sensitive field then always return changed + return False + # pop the keys that are marked deleted but not present in y + # return false if item is marked absent and is present in y + d_x_absent_ks = [] + for k, v in x.items(): + if v is None: + d_x_absent_ks.append(k) + continue + if isinstance(v, dict): + if ('state' in v) and (v['state'] == 'absent'): + if type(y) == dict and k not in y: + d_x_absent_ks.append(k) + else: + return False + elif not v: + d_x_absent_ks.append(k) + elif isinstance(v, list) and not v: + d_x_absent_ks.append(k) + # Added condition to check key in dict. + elif isinstance(v, str) or (k in y and isinstance(y[k], str)): + # this is the case when ansible converts the dictionary into a + # string. + if v == "{'state': 'absent'}" and k not in y: + d_x_absent_ks.append(k) + elif not v and k not in y: + # this is the case when x has set the value that qualifies + # as not but y does not have that value + d_x_absent_ks.append(k) + for k in d_x_absent_ks: + x.pop(k) + x_keys = set(x.keys()) + y_keys = set(y.keys()) + if not x_keys.issubset(y_keys): + # log.debug('x has %s and y has %s keys', len(x_keys), len(y_keys)) + return False + for k, v in x.items(): + if k not in y: + # log.debug('k %s is not in y %s', k, y) + return False + if not avi_obj_cmp(v, y[k], sensitive_fields=sensitive_fields): + # log.debug('k %s v %s did not match in y %s', k, v, y[k]) + return False + return True + + +POP_FIELDS = ['state', 'controller', 'username', 'password', 'api_version', + 'avi_credentials', 'avi_api_update_method', 'avi_api_patch_op', + 'api_context', 'tenant', 'tenant_uuid', 'avi_disable_session_cache_as_fact'] + + +def get_api_context(module, api_creds): + api_context = module.params.get('api_context') + if api_context and module.params.get('avi_disable_session_cache_as_fact'): + return api_context + elif api_context and not module.params.get( + 'avi_disable_session_cache_as_fact'): + key = '%s:%s:%s' % (api_creds.controller, api_creds.username, + api_creds.port) + return api_context.get(key) + else: + return None + + +def avi_ansible_api(module, obj_type, sensitive_fields): + """ + This converts the Ansible module into AVI object and invokes APIs + :param module: Ansible module + :param obj_type: string representing Avi object type + :param sensitive_fields: sensitive fields to be excluded for comparison + purposes. + Returns: + success: module.exit_json with obj=avi object + faliure: module.fail_json + """ + + api_creds = AviCredentials() + api_creds.update_from_ansible_module(module) + api_context = get_api_context(module, api_creds) + if api_context: + api = ApiSession.get_session( + api_creds.controller, + api_creds.username, + password=api_creds.password, + timeout=api_creds.timeout, + tenant=api_creds.tenant, + tenant_uuid=api_creds.tenant_uuid, + token=api_context['csrftoken'], + port=api_creds.port, + session_id=api_context['session_id'], + csrftoken=api_context['csrftoken']) + else: + api = ApiSession.get_session( + api_creds.controller, + api_creds.username, + password=api_creds.password, + timeout=api_creds.timeout, + tenant=api_creds.tenant, + tenant_uuid=api_creds.tenant_uuid, + token=api_creds.token, + port=api_creds.port) + state = module.params['state'] + # Get the api version. + avi_update_method = module.params.get('avi_api_update_method', 'put') + avi_patch_op = module.params.get('avi_api_patch_op', 'add') + + api_version = api_creds.api_version + name = module.params.get('name', None) + # Added Support to get uuid + uuid = module.params.get('uuid', None) + check_mode = module.check_mode + if uuid and obj_type != 'cluster': + obj_path = '%s/%s' % (obj_type, uuid) + else: + obj_path = '%s/' % obj_type + obj = deepcopy(module.params) + tenant = obj.pop('tenant', '') + tenant_uuid = obj.pop('tenant_uuid', '') + # obj.pop('cloud_ref', None) + for k in POP_FIELDS: + obj.pop(k, None) + purge_optional_fields(obj, module) + + # Special code to handle situation where object has a field + # named username. This is used in case of api/user + # The following code copies the username and password + # from the obj_username and obj_password fields. + if 'obj_username' in obj: + obj['username'] = obj['obj_username'] + obj.pop('obj_username') + if 'obj_password' in obj: + obj['password'] = obj['obj_password'] + obj.pop('obj_password') + if 'full_name' not in obj and 'name' in obj and obj_type == "user": + obj['full_name'] = obj['name'] + # Special case as name represent full_name in user module + # As per API response, name is always same as username regardless of full_name + obj['name'] = obj['username'] + + log.info('passed object %s ', obj) + + if uuid: + # Get the object based on uuid. + try: + existing_obj = api.get( + obj_path, tenant=tenant, tenant_uuid=tenant_uuid, + params={'include_refs': '', 'include_name': ''}, + api_version=api_version) + existing_obj = existing_obj.json() + except ObjectNotFound: + existing_obj = None + elif name: + params = {'include_refs': '', 'include_name': ''} + if obj.get('cloud_ref', None): + # this is the case when gets have to be scoped with cloud + cloud = obj['cloud_ref'].split('name=')[1] + params['cloud_ref.name'] = cloud + existing_obj = api.get_object_by_name( + obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid, + params=params, api_version=api_version) + + # Need to check if tenant_ref was provided and the object returned + # is actually in admin tenant. + if existing_obj and 'tenant_ref' in obj and 'tenant_ref' in existing_obj: + # https://10.10.25.42/api/tenant/admin#admin + existing_obj_tenant = existing_obj['tenant_ref'].split('#')[1] + obj_tenant = obj['tenant_ref'].split('name=')[1] + if obj_tenant != existing_obj_tenant: + existing_obj = None + else: + # added api version to avi api call. + existing_obj = api.get(obj_path, tenant=tenant, tenant_uuid=tenant_uuid, + params={'include_refs': '', 'include_name': ''}, + api_version=api_version).json() + + if state == 'absent': + rsp = None + changed = False + err = False + if not check_mode and existing_obj: + try: + if name is not None: + # added api version to avi api call. + rsp = api.delete_by_name( + obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid, + api_version=api_version) + else: + # added api version to avi api call. + rsp = api.delete( + obj_path, tenant=tenant, tenant_uuid=tenant_uuid, + api_version=api_version) + except ObjectNotFound: + pass + if check_mode and existing_obj: + changed = True + + if rsp: + if rsp.status_code == 204: + changed = True + else: + err = True + if not err: + return ansible_return( + module, rsp, changed, existing_obj=existing_obj, + api_context=api.get_context()) + elif rsp: + return module.fail_json(msg=rsp.text) + + rsp = None + req = None + if existing_obj: + # this is case of modify as object exists. should find out + # if changed is true or not + if name is not None and obj_type != 'cluster': + obj_uuid = existing_obj['uuid'] + obj_path = '%s/%s' % (obj_type, obj_uuid) + if avi_update_method == 'put': + changed = not avi_obj_cmp(obj, existing_obj, sensitive_fields) + obj = cleanup_absent_fields(obj) + if changed: + req = obj + if check_mode: + # No need to process any further. + rsp = AviCheckModeResponse(obj=existing_obj) + else: + rsp = api.put( + obj_path, data=req, tenant=tenant, + tenant_uuid=tenant_uuid, api_version=api_version) + elif check_mode: + rsp = AviCheckModeResponse(obj=existing_obj) + else: + if check_mode: + # No need to process any further. + rsp = AviCheckModeResponse(obj=existing_obj) + changed = True + else: + obj.pop('name', None) + patch_data = {avi_patch_op: obj} + rsp = api.patch( + obj_path, data=patch_data, tenant=tenant, + tenant_uuid=tenant_uuid, api_version=api_version) + obj = rsp.json() + changed = not avi_obj_cmp(obj, existing_obj) + if changed: + log.debug('EXISTING OBJ %s', existing_obj) + log.debug('NEW OBJ %s', obj) + else: + changed = True + req = obj + if check_mode: + rsp = AviCheckModeResponse(obj=None) + else: + rsp = api.post(obj_type, data=obj, tenant=tenant, + tenant_uuid=tenant_uuid, api_version=api_version) + return ansible_return(module, rsp, changed, req, existing_obj=existing_obj, + api_context=api.get_context()) + + +def avi_common_argument_spec(): + """ + Returns common arguments for all Avi modules + :return: dict + """ + credentials_spec = dict( + controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])), + username=dict(fallback=(env_fallback, ['AVI_USERNAME'])), + password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True), + api_version=dict(default='16.4.4', type='str'), + tenant=dict(default='admin'), + tenant_uuid=dict(default='', type='str'), + port=dict(type='int'), + timeout=dict(default=300, type='int'), + token=dict(default='', type='str', no_log=True), + session_id=dict(default='', type='str', no_log=True), + csrftoken=dict(default='', type='str', no_log=True) + ) + + return dict( + controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])), + username=dict(fallback=(env_fallback, ['AVI_USERNAME'])), + password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True), + tenant=dict(default='admin'), + tenant_uuid=dict(default=''), + api_version=dict(default='16.4.4', type='str'), + avi_credentials=dict(default=None, type='dict', + options=credentials_spec), + api_context=dict(type='dict'), + avi_disable_session_cache_as_fact=dict(default=False, type='bool')) diff --git a/plugins/module_utils/network/avi/avi.py b/plugins/module_utils/network/avi/avi.py new file mode 100644 index 0000000000..04cb4157f4 --- /dev/null +++ b/plugins/module_utils/network/avi/avi.py @@ -0,0 +1,38 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Gaurav Rastogi , 2017 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This module initially matched the namespace of network module avi. However, +# that causes namespace import error when other modules from avi namespaces +# are imported. Added import of absolute_import to avoid import collisions for +# avi.sdk. + +from __future__ import absolute_import + +from ansible_collections.community.general.plugins.module_utils.network.avi.ansible_utils import ( + avi_ansible_api, avi_common_argument_spec, ansible_return, + avi_obj_cmp, cleanup_absent_fields, AviCheckModeResponse, HAS_AVI) diff --git a/plugins/module_utils/network/avi/avi_api.py b/plugins/module_utils/network/avi/avi_api.py new file mode 100644 index 0000000000..817f909dd0 --- /dev/null +++ b/plugins/module_utils/network/avi/avi_api.py @@ -0,0 +1,972 @@ +from __future__ import absolute_import +import os +import sys +import copy +import json +import logging +import time +from datetime import datetime, timedelta +from ssl import SSLError + + +class MockResponse(object): + def __init__(self, *args, **kwargs): + raise Exception("Requests library Response object not found. Using fake one.") + + +class MockRequestsConnectionError(Exception): + pass + + +class MockSession(object): + def __init__(self, *args, **kwargs): + raise Exception("Requests library Session object not found. Using fake one.") + + +HAS_AVI = True +try: + from requests import ConnectionError as RequestsConnectionError + from requests import Response + from requests.sessions import Session +except ImportError: + HAS_AVI = False + Response = MockResponse + RequestsConnectionError = MockRequestsConnectionError + Session = MockSession + + +logger = logging.getLogger(__name__) + +sessionDict = {} + + +def avi_timedelta(td): + ''' + This is a wrapper class to workaround python 2.6 builtin datetime.timedelta + does not have total_seconds method + :param timedelta object + ''' + if type(td) != timedelta: + raise TypeError() + if sys.version_info >= (2, 7): + ts = td.total_seconds() + else: + ts = td.seconds + (24 * 3600 * td.days) + return ts + + +def avi_sdk_syslog_logger(logger_name='avi.sdk'): + # The following sets up syslog module to log underlying avi SDK messages + # based on the environment variables: + # AVI_LOG_HANDLER: names the logging handler to use. Only syslog is + # supported. + # AVI_LOG_LEVEL: Logging level used for the avi SDK. Default is DEBUG + # AVI_SYSLOG_ADDRESS: Destination address for the syslog handler. + # Default is /dev/log + from logging.handlers import SysLogHandler + lf = '[%(asctime)s] %(levelname)s [%(module)s.%(funcName)s:%(lineno)d] %(message)s' + log = logging.getLogger(logger_name) + log_level = os.environ.get('AVI_LOG_LEVEL', 'DEBUG') + if log_level: + log.setLevel(getattr(logging, log_level)) + formatter = logging.Formatter(lf) + sh = SysLogHandler(address=os.environ.get('AVI_SYSLOG_ADDRESS', '/dev/log')) + sh.setFormatter(formatter) + log.addHandler(sh) + return log + + +class ObjectNotFound(Exception): + pass + + +class APIError(Exception): + def __init__(self, arg, rsp=None): + self.args = [arg, rsp] + self.rsp = rsp + + +class AviServerError(APIError): + def __init__(self, arg, rsp=None): + super(AviServerError, self).__init__(arg, rsp) + + +class APINotImplemented(Exception): + pass + + +class ApiResponse(Response): + """ + Returns copy of the requests.Response object provides additional helper + routines + 1. obj: returns dictionary of Avi Object + """ + def __init__(self, rsp): + super(ApiResponse, self).__init__() + for k, v in list(rsp.__dict__.items()): + setattr(self, k, v) + + def json(self): + """ + Extends the session default json interface to handle special errors + and raise Exceptions + returns the Avi object as a dictionary from rsp.text + """ + if self.status_code in (200, 201): + if not self.text: + # In cases like status_code == 201 the response text could be + # empty string. + return None + return super(ApiResponse, self).json() + elif self.status_code == 204: + # No response needed; e.g., delete operation + return None + elif self.status_code == 404: + raise ObjectNotFound('HTTP Error: %s Error Msg %s' % ( + self.status_code, self.text), self) + elif self.status_code >= 500: + raise AviServerError('HTTP Error: %s Error Msg %s' % ( + self.status_code, self.text), self) + else: + raise APIError('HTTP Error: %s Error Msg %s' % ( + self.status_code, self.text), self) + + def count(self): + """ + return the number of objects in the collection response. If it is not + a collection response then it would simply return 1. + """ + obj = self.json() + if 'count' in obj: + # this was a resposne to collection + return obj['count'] + return 1 + + @staticmethod + def to_avi_response(resp): + if type(resp) == Response: + return ApiResponse(resp) + return resp + + +class AviCredentials(object): + controller = '' + username = '' + password = '' + api_version = '16.4.4' + tenant = None + tenant_uuid = None + token = None + port = None + timeout = 300 + session_id = None + csrftoken = None + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def update_from_ansible_module(self, m): + """ + :param m: ansible module + :return: + """ + if m.params.get('avi_credentials'): + for k, v in m.params['avi_credentials'].items(): + if hasattr(self, k): + setattr(self, k, v) + if m.params['controller']: + self.controller = m.params['controller'] + if m.params['username']: + self.username = m.params['username'] + if m.params['password']: + self.password = m.params['password'] + if (m.params['api_version'] and + (m.params['api_version'] != '16.4.4')): + self.api_version = m.params['api_version'] + if m.params['tenant']: + self.tenant = m.params['tenant'] + if m.params['tenant_uuid']: + self.tenant_uuid = m.params['tenant_uuid'] + if m.params.get('session_id'): + self.session_id = m.params['session_id'] + if m.params.get('csrftoken'): + self.csrftoken = m.params['csrftoken'] + + def __str__(self): + return 'controller %s user %s api %s tenant %s' % ( + self.controller, self.username, self.api_version, self.tenant) + + +class ApiSession(Session): + """ + Extends the Request library's session object to provide helper + utilities to work with Avi Controller like authentication, api massaging + etc. + """ + + # This keeps track of the process which created the cache. + # At anytime the pid of the process changes then it would create + # a new cache for that process. + AVI_SLUG = 'Slug' + SESSION_CACHE_EXPIRY = 20 * 60 + SHARED_USER_HDRS = ['X-CSRFToken', 'Session-Id', 'Referer', 'Content-Type'] + MAX_API_RETRIES = 3 + + def __init__(self, controller_ip=None, username=None, password=None, + token=None, tenant=None, tenant_uuid=None, verify=False, + port=None, timeout=60, api_version=None, + retry_conxn_errors=True, data_log=False, + avi_credentials=None, session_id=None, csrftoken=None, + lazy_authentication=False, max_api_retries=None): + """ + ApiSession takes ownership of avi_credentials and may update the + information inside it. + + Initialize new session object with authenticated token from login api. + It also keeps a cache of user sessions that are cleaned up if inactive + for more than 20 mins. + + Notes: + 01. If mode is https and port is none or 443, we don't embed the + port in the prefix. The prefix would be 'https://ip'. If port + is a non-default value then we concatenate https://ip:port + in the prefix. + 02. If mode is http and the port is none or 80, we don't embed the + port in the prefix. The prefix would be 'http://ip'. If port is + a non-default value, then we concatenate http://ip:port in + the prefix. + """ + super(ApiSession, self).__init__() + if not avi_credentials: + tenant = tenant if tenant else "admin" + self.avi_credentials = AviCredentials( + controller=controller_ip, username=username, password=password, + api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid, + token=token, port=port, timeout=timeout, + session_id=session_id, csrftoken=csrftoken) + else: + self.avi_credentials = avi_credentials + self.headers = {} + self.verify = verify + self.retry_conxn_errors = retry_conxn_errors + self.remote_api_version = {} + self.session_cookie_name = '' + self.user_hdrs = {} + self.data_log = data_log + self.num_session_retries = 0 + self.retry_wait_time = 0 + self.max_session_retries = ( + self.MAX_API_RETRIES if max_api_retries is None + else int(max_api_retries)) + # Refer Notes 01 and 02 + k_port = port if port else 443 + if self.avi_credentials.controller.startswith('http'): + k_port = 80 if not self.avi_credentials.port else k_port + if self.avi_credentials.port is None or self.avi_credentials.port\ + == 80: + self.prefix = self.avi_credentials.controller + else: + self.prefix = '{x}:{y}'.format( + x=self.avi_credentials.controller, + y=self.avi_credentials.port) + else: + if port is None or port == 443: + self.prefix = 'https://{x}'.format( + x=self.avi_credentials.controller) + else: + self.prefix = 'https://{x}:{y}'.format( + x=self.avi_credentials.controller, + y=self.avi_credentials.port) + self.timeout = timeout + self.key = '%s:%s:%s' % (self.avi_credentials.controller, + self.avi_credentials.username, k_port) + # Added api token and session id to sessionDict for handle single + # session + if self.avi_credentials.csrftoken: + sessionDict[self.key] = { + 'api': self, + "csrftoken": self.avi_credentials.csrftoken, + "session_id": self.avi_credentials.session_id, + "last_used": datetime.utcnow() + } + elif lazy_authentication: + sessionDict.get(self.key, {}).update( + {'api': self, "last_used": datetime.utcnow()}) + else: + self.authenticate_session() + + self.num_session_retries = 0 + self.pid = os.getpid() + ApiSession._clean_inactive_sessions() + return + + @property + def controller_ip(self): + return self.avi_credentials.controller + + @controller_ip.setter + def controller_ip(self, controller_ip): + self.avi_credentials.controller = controller_ip + + @property + def username(self): + return self.avi_credentials.username + + @property + def connected(self): + return sessionDict.get(self.key, {}).get('connected', False) + + @username.setter + def username(self, username): + self.avi_credentials.username = username + + @property + def password(self): + return self.avi_credentials.password + + @password.setter + def password(self, password): + self.avi_credentials.password = password + + @property + def keystone_token(self): + return sessionDict.get(self.key, {}).get('csrftoken', None) + + @keystone_token.setter + def keystone_token(self, token): + sessionDict[self.key]['csrftoken'] = token + + @property + def tenant_uuid(self): + self.avi_credentials.tenant_uuid + + @tenant_uuid.setter + def tenant_uuid(self, tenant_uuid): + self.avi_credentials.tenant_uuid = tenant_uuid + + @property + def tenant(self): + return self.avi_credentials.tenant + + @tenant.setter + def tenant(self, tenant): + if tenant: + self.avi_credentials.tenant = tenant + else: + self.avi_credentials.tenant = 'admin' + + @property + def port(self): + self.avi_credentials.port + + @port.setter + def port(self, port): + self.avi_credentials.port = port + + @property + def api_version(self): + return self.avi_credentials.api_version + + @api_version.setter + def api_version(self, api_version): + self.avi_credentials.api_version = api_version + + @property + def session_id(self): + return sessionDict[self.key]['session_id'] + + def get_context(self): + return { + 'session_id': sessionDict[self.key]['session_id'], + 'csrftoken': sessionDict[self.key]['csrftoken'] + } + + @staticmethod + def clear_cached_sessions(): + global sessionDict + sessionDict = {} + + @staticmethod + def get_session( + controller_ip=None, username=None, password=None, token=None, tenant=None, + tenant_uuid=None, verify=False, port=None, timeout=60, + retry_conxn_errors=True, api_version=None, data_log=False, + avi_credentials=None, session_id=None, csrftoken=None, + lazy_authentication=False, max_api_retries=None): + """ + returns the session object for same user and tenant + calls init if session dose not exist and adds it to session cache + :param controller_ip: controller IP address + :param username: + :param password: + :param token: Token to use; example, a valid keystone token + :param tenant: Name of the tenant on Avi Controller + :param tenant_uuid: Don't specify tenant when using tenant_id + :param port: Rest-API may use a different port other than 443 + :param timeout: timeout for API calls; Default value is 60 seconds + :param retry_conxn_errors: retry on connection errors + :param api_version: Controller API version + """ + if not avi_credentials: + tenant = tenant if tenant else "admin" + avi_credentials = AviCredentials( + controller=controller_ip, username=username, password=password, + api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid, + token=token, port=port, timeout=timeout, + session_id=session_id, csrftoken=csrftoken) + + k_port = avi_credentials.port if avi_credentials.port else 443 + if avi_credentials.controller.startswith('http'): + k_port = 80 if not avi_credentials.port else k_port + key = '%s:%s:%s' % (avi_credentials.controller, + avi_credentials.username, k_port) + cached_session = sessionDict.get(key) + if cached_session: + user_session = cached_session['api'] + if not (user_session.avi_credentials.csrftoken or + lazy_authentication): + user_session.authenticate_session() + else: + user_session = ApiSession( + controller_ip, username, password, token=token, tenant=tenant, + tenant_uuid=tenant_uuid, verify=verify, port=port, + timeout=timeout, retry_conxn_errors=retry_conxn_errors, + api_version=api_version, data_log=data_log, + avi_credentials=avi_credentials, + lazy_authentication=lazy_authentication, + max_api_retries=max_api_retries) + ApiSession._clean_inactive_sessions() + return user_session + + def reset_session(self): + """ + resets and re-authenticates the current session. + """ + sessionDict[self.key]['connected'] = False + logger.info('resetting session for %s', self.key) + self.user_hdrs = {} + for k, v in self.headers.items(): + if k not in self.SHARED_USER_HDRS: + self.user_hdrs[k] = v + self.headers = {} + self.authenticate_session() + + def authenticate_session(self): + """ + Performs session authentication with Avi controller and stores + session cookies and sets header options like tenant. + """ + body = {"username": self.avi_credentials.username} + if self.avi_credentials.password: + body["password"] = self.avi_credentials.password + elif self.avi_credentials.token: + body["token"] = self.avi_credentials.token + else: + raise APIError("Neither user password or token provided") + logger.debug('authenticating user %s prefix %s', + self.avi_credentials.username, self.prefix) + self.cookies.clear() + err = None + try: + rsp = super(ApiSession, self).post( + self.prefix + "/login", body, timeout=self.timeout, verify=self.verify) + + if rsp.status_code == 200: + self.num_session_retries = 0 + self.remote_api_version = rsp.json().get('version', {}) + self.session_cookie_name = rsp.json().get('session_cookie_name', 'sessionid') + self.headers.update(self.user_hdrs) + if rsp.cookies and 'csrftoken' in rsp.cookies: + csrftoken = rsp.cookies['csrftoken'] + sessionDict[self.key] = { + 'csrftoken': csrftoken, + 'session_id': rsp.cookies[self.session_cookie_name], + 'last_used': datetime.utcnow(), + 'api': self, + 'connected': True + } + logger.debug("authentication success for user %s", + self.avi_credentials.username) + return + # Check for bad request and invalid credentials response code + elif rsp.status_code in [401, 403]: + logger.error('Status Code %s msg %s', rsp.status_code, rsp.text) + err = APIError('Status Code %s msg %s' % ( + rsp.status_code, rsp.text), rsp) + raise err + else: + logger.error("Error status code %s msg %s", rsp.status_code, + rsp.text) + err = APIError('Status Code %s msg %s' % ( + rsp.status_code, rsp.text), rsp) + except (RequestsConnectionError, SSLError) as e: + if not self.retry_conxn_errors: + raise + logger.warning('Connection error retrying %s', e) + err = e + # comes here only if there was either exception or login was not + # successful + if self.retry_wait_time: + time.sleep(self.retry_wait_time) + self.num_session_retries += 1 + if self.num_session_retries > self.max_session_retries: + self.num_session_retries = 0 + logger.error("giving up after %d retries connection failure %s", + self.max_session_retries, True) + ret_err = ( + err if err else APIError("giving up after %d retries connection failure %s" % + (self.max_session_retries, True))) + raise ret_err + self.authenticate_session() + return + + def _get_api_headers(self, tenant, tenant_uuid, timeout, headers, + api_version): + """ + returns the headers that are passed to the requests.Session api calls. + """ + api_hdrs = copy.deepcopy(self.headers) + api_hdrs.update({ + "Referer": self.prefix, + "Content-Type": "application/json" + }) + api_hdrs['timeout'] = str(timeout) + if self.key in sessionDict and 'csrftoken' in sessionDict.get(self.key): + api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken'] + else: + self.authenticate_session() + api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken'] + if api_version: + api_hdrs['X-Avi-Version'] = api_version + elif self.avi_credentials.api_version: + api_hdrs['X-Avi-Version'] = self.avi_credentials.api_version + if tenant: + tenant_uuid = None + elif tenant_uuid: + tenant = None + else: + tenant = self.avi_credentials.tenant + tenant_uuid = self.avi_credentials.tenant_uuid + if tenant_uuid: + api_hdrs.update({"X-Avi-Tenant-UUID": "%s" % tenant_uuid}) + api_hdrs.pop("X-Avi-Tenant", None) + elif tenant: + api_hdrs.update({"X-Avi-Tenant": "%s" % tenant}) + api_hdrs.pop("X-Avi-Tenant-UUID", None) + # Override any user headers that were passed by users. We don't know + # when the user had updated the user_hdrs + if self.user_hdrs: + api_hdrs.update(self.user_hdrs) + if headers: + # overwrite the headers passed via the API calls. + api_hdrs.update(headers) + return api_hdrs + + def _api(self, api_name, path, tenant, tenant_uuid, data=None, + headers=None, timeout=None, api_version=None, **kwargs): + """ + It calls the requests.Session APIs and handles session expiry + and other situations where session needs to be reset. + returns ApiResponse object + :param path: takes relative path to the AVI api. + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param headers: dictionary of headers that override the session + headers. + """ + if self.pid != os.getpid(): + logger.info('pid %d change detected new %d. Closing session', + self.pid, os.getpid()) + self.close() + self.pid = os.getpid() + if timeout is None: + timeout = self.timeout + fullpath = self._get_api_path(path) + fn = getattr(super(ApiSession, self), api_name) + api_hdrs = self._get_api_headers(tenant, tenant_uuid, timeout, headers, + api_version) + connection_error = False + err = None + cookies = { + 'csrftoken': api_hdrs['X-CSRFToken'], + } + try: + if self.session_cookie_name: + cookies[self.session_cookie_name] = sessionDict[self.key]['session_id'] + except KeyError: + pass + try: + if (data is not None) and (type(data) == dict): + resp = fn(fullpath, data=json.dumps(data), headers=api_hdrs, + timeout=timeout, cookies=cookies, **kwargs) + else: + resp = fn(fullpath, data=data, headers=api_hdrs, + timeout=timeout, cookies=cookies, **kwargs) + except (RequestsConnectionError, SSLError) as e: + logger.warning('Connection error retrying %s', e) + if not self.retry_conxn_errors: + raise + connection_error = True + err = e + except Exception as e: + logger.error('Error in Requests library %s', e) + raise + if not connection_error: + logger.debug('path: %s http_method: %s hdrs: %s params: ' + '%s data: %s rsp: %s', fullpath, api_name.upper(), + api_hdrs, kwargs, data, + (resp.text if self.data_log else 'None')) + if connection_error or resp.status_code in (401, 419): + if connection_error: + try: + self.close() + except Exception: + # ignoring exception in cleanup path + pass + logger.warning('Connection failed, retrying.') + # Adding sleep before retrying + if self.retry_wait_time: + time.sleep(self.retry_wait_time) + else: + logger.info('received error %d %s so resetting connection', + resp.status_code, resp.text) + ApiSession.reset_session(self) + self.num_session_retries += 1 + if self.num_session_retries > self.max_session_retries: + # Added this such that any code which re-tries can succeed + # eventually. + self.num_session_retries = 0 + if not connection_error: + err = APIError('Status Code %s msg %s' % ( + resp.status_code, resp.text), resp) + logger.error( + "giving up after %d retries conn failure %s err %s", + self.max_session_retries, connection_error, err) + ret_err = ( + err if err else APIError("giving up after %d retries connection failure %s" % + (self.max_session_retries, True))) + raise ret_err + # should restore the updated_hdrs to one passed down + resp = self._api(api_name, path, tenant, tenant_uuid, data, + headers=headers, api_version=api_version, + timeout=timeout, **kwargs) + self.num_session_retries = 0 + + if resp.cookies and 'csrftoken' in resp.cookies: + csrftoken = resp.cookies['csrftoken'] + self.headers.update({"X-CSRFToken": csrftoken}) + self._update_session_last_used() + return ApiResponse.to_avi_response(resp) + + def get_controller_details(self): + result = { + "controller_ip": self.controller_ip, + "controller_api_version": self.remote_api_version + } + return result + + def get(self, path, tenant='', tenant_uuid='', timeout=None, params=None, + api_version=None, **kwargs): + """ + It extends the Session Library interface to add AVI API prefixes, + handle session exceptions related to authentication and update + the global user session cache. + :param path: takes relative path to the AVI api. + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param api_version: overrides x-avi-header in request header during + session creation + get method takes relative path to service and kwargs as per Session + class get method + returns session's response object + """ + return self._api('get', path, tenant, tenant_uuid, timeout=timeout, + params=params, api_version=api_version, **kwargs) + + def get_object_by_name(self, path, name, tenant='', tenant_uuid='', + timeout=None, params=None, api_version=None, + **kwargs): + """ + Helper function to access Avi REST Objects using object + type and name. It behaves like python dictionary interface where it + returns None when the object is not present in the AviController. + Internally, it transforms the request to api/path?name=... + :param path: relative path to service + :param name: name of the object + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param api_version: overrides x-avi-header in request header during + session creation + returns dictionary object if successful else None + """ + obj = None + if not params: + params = {} + params['name'] = name + resp = self.get(path, tenant=tenant, tenant_uuid=tenant_uuid, + timeout=timeout, + params=params, api_version=api_version, **kwargs) + if resp.status_code in (401, 419): + ApiSession.reset_session(self) + resp = self.get_object_by_name( + path, name, tenant, tenant_uuid, timeout=timeout, + params=params, **kwargs) + if resp.status_code > 499 or 'Invalid version' in resp.text: + logger.error('Error in get object by name for %s named %s. ' + 'Error: %s', path, name, resp.text) + raise AviServerError(resp.text, rsp=resp) + elif resp.status_code > 299: + return obj + try: + if 'results' in resp.json(): + obj = resp.json()['results'][0] + else: + # For apis returning single object eg. api/cluster + obj = resp.json() + except IndexError: + logger.warning('Warning: Object Not found for %s named %s', + path, name) + obj = None + self._update_session_last_used() + return obj + + def post(self, path, data=None, tenant='', tenant_uuid='', timeout=None, + force_uuid=None, params=None, api_version=None, **kwargs): + """ + It extends the Session Library interface to add AVI API prefixes, + handle session exceptions related to authentication and update + the global user session cache. + :param path: takes relative path to the AVI api.It is modified by + the library to conform to AVI Controller's REST API interface + :param data: dictionary of the data. Support for json string + is deprecated + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param api_version: overrides x-avi-header in request header during + session creation + returns session's response object + """ + if force_uuid is not None: + headers = kwargs.get('headers', {}) + headers[self.AVI_SLUG] = force_uuid + kwargs['headers'] = headers + return self._api('post', path, tenant, tenant_uuid, data=data, + timeout=timeout, params=params, + api_version=api_version, **kwargs) + + def put(self, path, data=None, tenant='', tenant_uuid='', + timeout=None, params=None, api_version=None, **kwargs): + """ + It extends the Session Library interface to add AVI API prefixes, + handle session exceptions related to authentication and update + the global user session cache. + :param path: takes relative path to the AVI api.It is modified by + the library to conform to AVI Controller's REST API interface + :param data: dictionary of the data. Support for json string + is deprecated + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param api_version: overrides x-avi-header in request header during + session creation + returns session's response object + """ + return self._api('put', path, tenant, tenant_uuid, data=data, + timeout=timeout, params=params, + api_version=api_version, **kwargs) + + def patch(self, path, data=None, tenant='', tenant_uuid='', + timeout=None, params=None, api_version=None, **kwargs): + """ + It extends the Session Library interface to add AVI API prefixes, + handle session exceptions related to authentication and update + the global user session cache. + :param path: takes relative path to the AVI api.It is modified by + the library to conform to AVI Controller's REST API interface + :param data: dictionary of the data. Support for json string + is deprecated + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param api_version: overrides x-avi-header in request header during + session creation + returns session's response object + """ + return self._api('patch', path, tenant, tenant_uuid, data=data, + timeout=timeout, params=params, + api_version=api_version, **kwargs) + + def put_by_name(self, path, name, data=None, tenant='', + tenant_uuid='', timeout=None, params=None, + api_version=None, **kwargs): + """ + Helper function to perform HTTP PUT on Avi REST Objects using object + type and name. + Internally, it transforms the request to api/path?name=... + :param path: relative path to service + :param name: name of the object + :param data: dictionary of the data. Support for json string + is deprecated + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param api_version: overrides x-avi-header in request header during + session creation + returns session's response object + """ + uuid = self._get_uuid_by_name( + path, name, tenant, tenant_uuid, api_version=api_version) + path = '%s/%s' % (path, uuid) + return self.put(path, data, tenant, tenant_uuid, timeout=timeout, + params=params, api_version=api_version, **kwargs) + + def delete(self, path, tenant='', tenant_uuid='', timeout=None, params=None, + data=None, api_version=None, **kwargs): + """ + It extends the Session Library interface to add AVI API prefixes, + handle session exceptions related to authentication and update + the global user session cache. + :param path: takes relative path to the AVI api.It is modified by + the library to conform to AVI Controller's REST API interface + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param data: dictionary of the data. Support for json string + is deprecated + :param api_version: overrides x-avi-header in request header during + session creation + returns session's response object + """ + return self._api('delete', path, tenant, tenant_uuid, data=data, + timeout=timeout, params=params, + api_version=api_version, **kwargs) + + def delete_by_name(self, path, name, tenant='', tenant_uuid='', + timeout=None, params=None, api_version=None, **kwargs): + """ + Helper function to perform HTTP DELETE on Avi REST Objects using object + type and name.Internally, it transforms the request to + api/path?name=... + :param path: relative path to service + :param name: name of the object + :param tenant: overrides the tenant used during session creation + :param tenant_uuid: overrides the tenant or tenant_uuid during session + creation + :param timeout: timeout for API calls; Default value is 60 seconds + :param params: dictionary of key value pairs to be sent as query + parameters + :param api_version: overrides x-avi-header in request header during + session creation + returns session's response object + """ + uuid = self._get_uuid_by_name(path, name, tenant, tenant_uuid, + api_version=api_version) + if not uuid: + raise ObjectNotFound("%s/?name=%s" % (path, name)) + path = '%s/%s' % (path, uuid) + return self.delete(path, tenant, tenant_uuid, timeout=timeout, + params=params, api_version=api_version, **kwargs) + + def get_obj_ref(self, obj): + """returns reference url from dict object""" + if not obj: + return None + if isinstance(obj, Response): + obj = json.loads(obj.text) + if obj.get(0, None): + return obj[0]['url'] + elif obj.get('url', None): + return obj['url'] + elif obj.get('results', None): + return obj['results'][0]['url'] + else: + return None + + def get_obj_uuid(self, obj): + """returns uuid from dict object""" + if not obj: + raise ObjectNotFound('Object %s Not found' % (obj)) + if isinstance(obj, Response): + obj = json.loads(obj.text) + if obj.get(0, None): + return obj[0]['uuid'] + elif obj.get('uuid', None): + return obj['uuid'] + elif obj.get('results', None): + return obj['results'][0]['uuid'] + else: + return None + + def _get_api_path(self, path, uuid=None): + """ + This function returns the full url from relative path and uuid. + """ + if path == 'logout': + return self.prefix + '/' + path + elif uuid: + return self.prefix + '/api/' + path + '/' + uuid + else: + return self.prefix + '/api/' + path + + def _get_uuid_by_name(self, path, name, tenant='admin', + tenant_uuid='', api_version=None): + """gets object by name and service path and returns uuid""" + resp = self.get_object_by_name( + path, name, tenant, tenant_uuid, api_version=api_version) + if not resp: + raise ObjectNotFound("%s/%s" % (path, name)) + return self.get_obj_uuid(resp) + + def _update_session_last_used(self): + if self.key in sessionDict: + sessionDict[self.key]["last_used"] = datetime.utcnow() + + @staticmethod + def _clean_inactive_sessions(): + """Removes sessions which are inactive more than 20 min""" + session_cache = sessionDict + logger.debug("cleaning inactive sessions in pid %d num elem %d", + os.getpid(), len(session_cache)) + keys_to_delete = [] + for key, session in list(session_cache.items()): + tdiff = avi_timedelta(datetime.utcnow() - session["last_used"]) + if tdiff < ApiSession.SESSION_CACHE_EXPIRY: + continue + keys_to_delete.append(key) + for key in keys_to_delete: + del session_cache[key] + logger.debug("Removed session for : %s", key) + + def delete_session(self): + """ Removes the session for cleanup""" + logger.debug("Removed session for : %s", self.key) + sessionDict.pop(self.key, None) + return +# End of file diff --git a/plugins/module_utils/network/bigswitch/__init__.py b/plugins/module_utils/network/bigswitch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/bigswitch/bigswitch.py b/plugins/module_utils/network/bigswitch/bigswitch.py new file mode 100644 index 0000000000..299fcd3310 --- /dev/null +++ b/plugins/module_utils/network/bigswitch/bigswitch.py @@ -0,0 +1,91 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016, Ted Elhourani +# +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import json + +from ansible.module_utils.urls import fetch_url + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class Rest(object): + + def __init__(self, module, headers, baseurl): + self.module = module + self.headers = headers + self.baseurl = baseurl + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + + resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) + + return Response(resp, info) + + def get(self, path, data=None, headers=None): + return self.send('GET', path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send('PUT', path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send('POST', path, data, headers) + + def patch(self, path, data=None, headers=None): + return self.send('PATCH', path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send('DELETE', path, data, headers) diff --git a/plugins/module_utils/network/checkpoint/__init__.py b/plugins/module_utils/network/checkpoint/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/cloudengine/__init__.py b/plugins/module_utils/network/cloudengine/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/cloudengine/ce.py b/plugins/module_utils/network/cloudengine/ce.py new file mode 100644 index 0000000000..b9fe91ff3c --- /dev/null +++ b/plugins/module_utils/network/cloudengine/ce.py @@ -0,0 +1,421 @@ +# +# This code is part of Ansible, but is an independent component. +# +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2017 Red Hat, Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +import re +import socket +import sys +import traceback + +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import exec_command, ConnectionError +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import NetconfConnection + + +try: + from ncclient.xml_ import to_xml, new_ele_ns + HAS_NCCLIENT = True +except ImportError: + HAS_NCCLIENT = False + + +try: + from lxml import etree +except ImportError: + from xml.etree import ElementTree as etree + +_DEVICE_CLI_CONNECTION = None +_DEVICE_NC_CONNECTION = None + +ce_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'use_ssl': dict(type='bool'), + 'validate_certs': dict(type='bool'), + 'timeout': dict(type='int'), + 'transport': dict(default='cli', choices=['cli', 'netconf']), +} +ce_argument_spec = { + 'provider': dict(type='dict', options=ce_provider_spec), +} +ce_top_spec = { + 'host': dict(removed_in_version=2.9), + 'port': dict(removed_in_version=2.9, type='int'), + 'username': dict(removed_in_version=2.9), + 'password': dict(removed_in_version=2.9, no_log=True), + 'ssh_keyfile': dict(removed_in_version=2.9, type='path'), + 'use_ssl': dict(removed_in_version=2.9, type='bool'), + 'validate_certs': dict(removed_in_version=2.9, type='bool'), + 'timeout': dict(removed_in_version=2.9, type='int'), + 'transport': dict(removed_in_version=2.9, choices=['cli', 'netconf']), +} +ce_argument_spec.update(ce_top_spec) + + +def to_string(data): + return re.sub(r'|>)', r' 2 and err[0] in ["<", "["] and err[-1] in [">", "]"]: + continue + err.strip('.,\r\n\t ') + if err: + msg.append(err) + + if cmd: + msg.insert(0, "Command: %s" % cmd) + + return ", ".join(msg).capitalize() + "." + + +def to_command(module, commands): + default_output = 'text' + transform = ComplexList(dict( + command=dict(key=True), + output=dict(default=default_output), + prompt=dict(), + answer=dict() + ), module) + + commands = transform(to_list(commands)) + + return commands + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + conn = get_connection(module) + return conn.get_config(flags) + + +def run_commands(module, commands, check_rc=True): + conn = get_connection(module) + return conn.run_commands(to_command(module, commands), check_rc) + + +def load_config(module, config): + """load_config""" + conn = get_connection(module) + return conn.load_config(config) + + +def ce_unknown_host_cb(host, fingerprint): + """ ce_unknown_host_cb """ + + return True + + +def get_nc_set_id(xml_str): + """get netconf set-id value""" + + result = re.findall(r'= 0 and index >= len(xml_list): + return None + if index < 0 and abs(index) > len(xml_list): + return None + + ele = xml_list[index] + if not ele.replace(" ", ""): + xml_list.pop(index) + ele = None + return ele + + +def merge_nc_xml(xml1, xml2): + """merge xml1 and xml2""" + + xml1_list = xml1.split("")[0].split("\n") + xml2_list = xml2.split("")[1].split("\n") + + while True: + xml1_ele1 = get_xml_line(xml1_list, -1) + xml1_ele2 = get_xml_line(xml1_list, -2) + xml2_ele1 = get_xml_line(xml2_list, 0) + xml2_ele2 = get_xml_line(xml2_list, 1) + if not xml1_ele1 or not xml1_ele2 or not xml2_ele1 or not xml2_ele2: + return xml1 + + if "xmlns" in xml2_ele1: + xml2_ele1 = xml2_ele1.lstrip().split(" ")[0] + ">" + if "xmlns" in xml2_ele2: + xml2_ele2 = xml2_ele2.lstrip().split(" ")[0] + ">" + if xml1_ele1.replace(" ", "").replace("/", "") == xml2_ele1.replace(" ", "").replace("/", ""): + if xml1_ele2.replace(" ", "").replace("/", "") == xml2_ele2.replace(" ", "").replace("/", ""): + xml1_list.pop() + xml2_list.pop(0) + else: + break + else: + break + + return "\n".join(xml1_list + xml2_list) + + +def get_nc_connection(module): + global _DEVICE_NC_CONNECTION + if not _DEVICE_NC_CONNECTION: + load_params(module) + conn = NetconfConnection(module._socket_path) + _DEVICE_NC_CONNECTION = conn + return _DEVICE_NC_CONNECTION + + +def set_nc_config(module, xml_str): + """ set_config """ + + conn = get_nc_connection(module) + try: + out = conn.edit_config(target='running', config=xml_str, default_operation='merge', + error_option='rollback-on-error') + finally: + # conn.unlock(target = 'candidate') + pass + return to_string(to_xml(out)) + + +def get_nc_next(module, xml_str): + """ get_nc_next for exchange capability """ + + conn = get_nc_connection(module) + result = None + if xml_str is not None: + response = conn.get(xml_str, if_rpc_reply=True) + result = response.find('./*') + set_id = response.get('set-id') + while True and set_id is not None: + try: + fetch_node = new_ele_ns('get-next', 'http://www.huawei.com/netconf/capability/base/1.0', {'set-id': set_id}) + next_xml = conn.dispatch_rpc(etree.tostring(fetch_node)) + if next_xml is not None: + result.extend(next_xml.find('./*')) + set_id = next_xml.get('set-id') + except ConnectionError: + break + if result is not None: + return etree.tostring(result) + return result + + +def get_nc_config(module, xml_str): + """ get_config """ + + conn = get_nc_connection(module) + if xml_str is not None: + response = conn.get(xml_str) + else: + return None + + return to_string(to_xml(response)) + + +def execute_nc_action(module, xml_str): + """ huawei execute-action """ + + conn = get_nc_connection(module) + response = conn.execute_action(xml_str) + return to_string(to_xml(response)) + + +def execute_nc_cli(module, xml_str): + """ huawei execute-cli """ + + if xml_str is not None: + try: + conn = get_nc_connection(module) + out = conn.execute_nc_cli(command=xml_str) + return to_string(to_xml(out)) + except Exception as exc: + raise Exception(exc) + + +def check_ip_addr(ipaddr): + """ check ip address, Supports IPv4 and IPv6 """ + + if not ipaddr or '\x00' in ipaddr: + return False + + try: + res = socket.getaddrinfo(ipaddr, 0, socket.AF_UNSPEC, + socket.SOCK_STREAM, + 0, socket.AI_NUMERICHOST) + return bool(res) + except socket.gaierror: + err = sys.exc_info()[1] + if err.args[0] == socket.EAI_NONAME: + return False + raise diff --git a/plugins/module_utils/network/cnos/__init__.py b/plugins/module_utils/network/cnos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/cnos/cnos.py b/plugins/module_utils/network/cnos/cnos.py new file mode 100644 index 0000000000..ae12a9a22d --- /dev/null +++ b/plugins/module_utils/network/cnos/cnos.py @@ -0,0 +1,660 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their own +# license to the complete work. +# +# Copyright (C) 2017 Lenovo, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Contains utility methods +# Lenovo Networking + +import time +import socket +import re +import json +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos_errorcodes + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos_devicerules + HAS_LIB = True +except Exception: + HAS_LIB = False +from distutils.cmd import Command +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection +from ansible.module_utils.connection import Connection, exec_command +from ansible.module_utils.connection import ConnectionError + +_DEVICE_CONFIGS = {} +_CONNECTION = None +_VALID_USER_ROLES = ['network-admin', 'network-operator'] + +cnos_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), + no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), + type='path'), + 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), + type='bool'), + 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), + no_log=True), + 'timeout': dict(type='int'), + 'context': dict(), + 'passwords': dict() +} + +cnos_argument_spec = { + 'provider': dict(type='dict', options=cnos_provider_spec), +} + +command_spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict(), + 'check_all': dict() +} + + +def get_provider_argspec(): + return cnos_provider_spec + + +def check_args(module, warnings): + pass + + +def get_user_roles(): + return _VALID_USER_ROLES + + +def get_connection(module): + global _CONNECTION + if _CONNECTION: + return _CONNECTION + _CONNECTION = Connection(module._socket_path) + + context = None + try: + context = module.params['context'] + except KeyError: + context = None + + if context: + if context == 'system': + command = 'changeto system' + else: + command = 'changeto context %s' % context + _CONNECTION.get(command) + + return _CONNECTION + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + passwords = None + try: + passwords = module.params['passwords'] + except KeyError: + passwords = None + if passwords: + cmd = 'more system:running-config' + else: + cmd = 'display running-config ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + conn = get_connection(module) + out = conn.get(cmd) + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def to_commands(module, commands): + if not isinstance(commands, list): + raise AssertionError('argument must be of type ') + + transform = EntityCollection(module, command_spec) + commands = transform(commands) + + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + module.warn('only show commands are supported when using check ' + 'mode, not executing `%s`' % item['command']) + + return commands + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + connection.get('enable') + commands = to_commands(module, to_list(commands)) + + responses = list() + + for cmd in commands: + out = connection.get(**cmd) + responses.append(to_text(out, errors='surrogate_then_replace')) + + return responses + + +def run_cnos_commands(module, commands, check_rc=True): + retVal = '' + enter_config = {'command': 'configure terminal', 'prompt': None, + 'answer': None} + exit_config = {'command': 'end', 'prompt': None, 'answer': None} + commands.insert(0, enter_config) + commands.append(exit_config) + for cmd in commands: + retVal = retVal + '>> ' + cmd['command'] + '\n' + try: + responses = run_commands(module, commands, check_rc) + for response in responses: + retVal = retVal + '<< ' + response + '\n' + except Exception as e: + errMsg = '' + if hasattr(e, 'message'): + errMsg = e.message + else: + errMsg = str(e) + # Exception in Exceptions + if 'VLAN_ACCESS_MAP' in errMsg: + return retVal + '<<' + errMsg + '\n' + if 'confederation identifier' in errMsg: + return retVal + '<<' + errMsg + '\n' + # Add more here if required + retVal = retVal + '<< ' + 'Error-101 ' + errMsg + '\n' + return str(retVal) + + +def get_capabilities(module): + if hasattr(module, '_cnos_capabilities'): + return module._cnos_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + module._cnos_capabilities = json.loads(capabilities) + return module._cnos_capabilities + + +def load_config(module, config): + try: + conn = get_connection(module) + conn.get('enable') + resp = conn.edit_config(config) + return resp.get('response') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def get_defaults_flag(module): + rc, out, err = exec_command(module, 'display running-config ?') + out = to_text(out, errors='surrogate_then_replace') + + commands = set() + for line in out.splitlines(): + if line: + commands.add(line.strip().split()[0]) + + if 'all' in commands: + return 'all' + else: + return 'full' + + +def enterEnableModeForDevice(enablePassword, timeout, obj): + command = "enable\n" + pwdPrompt = "password:" + # debugOutput(enablePassword) + # debugOutput('\n') + obj.settimeout(int(timeout)) + # Executing enable + obj.send(command) + flag = False + retVal = "" + count = 5 + while not flag: + # If wait time is execeeded. + if(count == 0): + flag = True + else: + count = count - 1 + # A delay of one second + time.sleep(1) + try: + buffByte = obj.recv(9999) + buff = buffByte.decode() + retVal = retVal + buff + # debugOutput(buff) + gotit = buff.find(pwdPrompt) + if(gotit != -1): + time.sleep(1) + if(enablePassword is None or enablePassword == ""): + return "\n Error-106" + obj.send(enablePassword) + obj.send("\r") + obj.send("\n") + time.sleep(1) + innerBuffByte = obj.recv(9999) + innerBuff = innerBuffByte.decode() + retVal = retVal + innerBuff + # debugOutput(innerBuff) + innerGotit = innerBuff.find("#") + if(innerGotit != -1): + return retVal + else: + gotit = buff.find("#") + if(gotit != -1): + return retVal + except Exception: + retVal = retVal + "\n Error-101" + flag = True + if(retVal == ""): + retVal = "\n Error-101" + return retVal +# EOM + + +def waitForDeviceResponse(command, prompt, timeout, obj): + obj.settimeout(int(timeout)) + obj.send(command) + flag = False + retVal = "" + while not flag: + time.sleep(1) + try: + buffByte = obj.recv(9999) + buff = buffByte.decode() + retVal = retVal + buff + # debugOutput(retVal) + gotit = buff.find(prompt) + if(gotit != -1): + flag = True + except Exception: + # debugOutput(prompt) + if prompt == "(yes/no)?": + pass + elif prompt == "Password:": + pass + else: + retVal = retVal + "\n Error-101" + flag = True + return retVal +# EOM + + +def checkOutputForError(output): + retVal = "" + index = output.lower().find('error') + startIndex = index + 6 + if(index == -1): + index = output.lower().find('invalid') + startIndex = index + 8 + if(index == -1): + index = output.lower().find('cannot be enabled in l2 interface') + startIndex = index + 34 + if(index == -1): + index = output.lower().find('incorrect') + startIndex = index + 10 + if(index == -1): + index = output.lower().find('failure') + startIndex = index + 8 + if(index == -1): + return None + + endIndex = startIndex + 3 + errorCode = output[startIndex:endIndex] + result = errorCode.isdigit() + if(result is not True): + return "Device returned an Error. Please check Results for more \ + information" + + errorFile = "dictionary/ErrorCodes.lvo" + try: + # with open(errorFile, 'r') as f: + f = open(errorFile, 'r') + for line in f: + if('=' in line): + data = line.split('=') + if(data[0].strip() == errorCode): + errorString = data[1].strip() + return errorString + except Exception: + errorString = cnos_errorcodes.getErrorString(errorCode) + errorString = errorString.strip() + return errorString + return "Error Code Not Found" +# EOM + + +def checkSanityofVariable(deviceType, variableId, variableValue): + retVal = "" + ruleFile = "dictionary/" + deviceType + "_rules.lvo" + ruleString = getRuleStringForVariable(deviceType, ruleFile, variableId) + retVal = validateValueAgainstRule(ruleString, variableValue) + return retVal +# EOM + + +def getRuleStringForVariable(deviceType, ruleFile, variableId): + retVal = "" + try: + # with open(ruleFile, 'r') as f: + f = open(ruleFile, 'r') + for line in f: + # debugOutput(line) + if(':' in line): + data = line.split(':') + # debugOutput(data[0]) + if(data[0].strip() == variableId): + retVal = line + except Exception: + ruleString = cnos_devicerules.getRuleString(deviceType, variableId) + retVal = ruleString.strip() + return retVal +# EOM + + +def validateValueAgainstRule(ruleString, variableValue): + + retVal = "" + if(ruleString == ""): + return 1 + rules = ruleString.split(':') + variableType = rules[1].strip() + varRange = rules[2].strip() + if(variableType == "INTEGER"): + result = checkInteger(variableValue) + if(result is True): + return "ok" + else: + return "Error-111" + elif(variableType == "FLOAT"): + result = checkFloat(variableValue) + if(result is True): + return "ok" + else: + return "Error-112" + + elif(variableType == "INTEGER_VALUE"): + int_range = varRange.split('-') + r = range(int(int_range[0].strip()), int(int_range[1].strip())) + if(checkInteger(variableValue) is not True): + return "Error-111" + result = int(variableValue) in r + if(result is True): + return "ok" + else: + return "Error-113" + + elif(variableType == "INTEGER_VALUE_RANGE"): + int_range = varRange.split('-') + varLower = int_range[0].strip() + varHigher = int_range[1].strip() + r = range(int(varLower), int(varHigher)) + val_range = variableValue.split('-') + try: + valLower = val_range[0].strip() + valHigher = val_range[1].strip() + except Exception: + return "Error-113" + if((checkInteger(valLower) is not True) or + (checkInteger(valHigher) is not True)): + # debugOutput("Error-114") + return "Error-114" + result = (int(valLower) in r) and (int(valHigher)in r) \ + and (int(valLower) < int(valHigher)) + if(result is True): + return "ok" + else: + # debugOutput("Error-113") + return "Error-113" + + elif(variableType == "INTEGER_OPTIONS"): + int_options = varRange.split(',') + if(checkInteger(variableValue) is not True): + return "Error-111" + for opt in int_options: + if(opt.strip() is variableValue): + result = True + break + if(result is True): + return "ok" + else: + return "Error-115" + + elif(variableType == "LONG"): + result = checkLong(variableValue) + if(result is True): + return "ok" + else: + return "Error-116" + + elif(variableType == "LONG_VALUE"): + long_range = varRange.split('-') + r = range(int(long_range[0].strip()), int(long_range[1].strip())) + if(checkLong(variableValue) is not True): + # debugOutput(variableValue) + return "Error-116" + result = int(variableValue) in r + if(result is True): + return "ok" + else: + return "Error-113" + + elif(variableType == "LONG_VALUE_RANGE"): + long_range = varRange.split('-') + r = range(int(long_range[0].strip()), int(long_range[1].strip())) + val_range = variableValue.split('-') + if((checkLong(val_range[0]) is not True) or + (checkLong(val_range[1]) is not True)): + return "Error-117" + result = (val_range[0] in r) and ( + val_range[1] in r) and (val_range[0] < val_range[1]) + if(result is True): + return "ok" + else: + return "Error-113" + elif(variableType == "LONG_OPTIONS"): + long_options = varRange.split(',') + if(checkLong(variableValue) is not True): + return "Error-116" + for opt in long_options: + if(opt.strip() == variableValue): + result = True + break + if(result is True): + return "ok" + else: + return "Error-115" + + elif(variableType == "TEXT"): + if(variableValue == ""): + return "Error-118" + if(True is isinstance(variableValue, str)): + return "ok" + else: + return "Error-119" + + elif(variableType == "NO_VALIDATION"): + if(variableValue == ""): + return "Error-118" + else: + return "ok" + + elif(variableType == "TEXT_OR_EMPTY"): + if(variableValue is None or variableValue == ""): + return "ok" + if(result == isinstance(variableValue, str)): + return "ok" + else: + return "Error-119" + + elif(variableType == "MATCH_TEXT"): + if(variableValue == ""): + return "Error-118" + if(isinstance(variableValue, str)): + if(varRange == variableValue): + return "ok" + else: + return "Error-120" + else: + return "Error-119" + + elif(variableType == "MATCH_TEXT_OR_EMPTY"): + if(variableValue is None or variableValue == ""): + return "ok" + if(isinstance(variableValue, str)): + if(varRange == variableValue): + return "ok" + else: + return "Error-120" + else: + return "Error-119" + + elif(variableType == "TEXT_OPTIONS"): + str_options = varRange.split(',') + if(isinstance(variableValue, str) is not True): + return "Error-119" + result = False + for opt in str_options: + if(opt.strip() == variableValue): + result = True + break + if(result is True): + return "ok" + else: + return "Error-115" + + elif(variableType == "TEXT_OPTIONS_OR_EMPTY"): + if(variableValue is None or variableValue == ""): + return "ok" + str_options = varRange.split(',') + if(isinstance(variableValue, str) is not True): + return "Error-119" + for opt in str_options: + if(opt.strip() == variableValue): + result = True + break + if(result is True): + return "ok" + else: + return "Error-115" + + elif(variableType == "IPV4Address"): + try: + socket.inet_pton(socket.AF_INET, variableValue) + result = True + except socket.error: + result = False + if(result is True): + return "ok" + else: + return "Error-121" + elif(variableType == "IPV4AddressWithMask"): + if(variableValue is None or variableValue == ""): + return "Error-119" + str_options = variableValue.split('/') + ipaddr = str_options[0] + mask = str_options[1] + try: + socket.inet_pton(socket.AF_INET, ipaddr) + if(checkInteger(mask) is True): + result = True + else: + result = False + except socket.error: + result = False + if(result is True): + return "ok" + else: + return "Error-121" + + elif(variableType == "IPV6Address"): + try: + socket.inet_pton(socket.AF_INET6, variableValue) + result = True + except socket.error: + result = False + if(result is True): + return "ok" + else: + return "Error-122" + + return retVal +# EOM + + +def disablePaging(remote_conn): + remote_conn.send("terminal length 0\n") + time.sleep(1) + # Clear the buffer on the screen + outputByte = remote_conn.recv(1000) + output = outputByte.decode() + return output +# EOM + + +def checkInteger(s): + try: + int(s) + return True + except ValueError: + return False +# EOM + + +def checkFloat(s): + try: + float(s) + return True + except ValueError: + return False +# EOM + + +def checkLong(s): + try: + int(s) + return True + except ValueError: + return False + + +def debugOutput(command): + f = open('debugOutput.txt', 'a') + f.write(str(command)) # python will convert \n to os.linesep + f.close() # you can omit in most cases as the destructor will call it +# EOM diff --git a/plugins/module_utils/network/cnos/cnos_devicerules.py b/plugins/module_utils/network/cnos/cnos_devicerules.py new file mode 100644 index 0000000000..f6c8f24ea7 --- /dev/null +++ b/plugins/module_utils/network/cnos/cnos_devicerules.py @@ -0,0 +1,1921 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their +# own license to the complete work. +# +# Copyright (C) 2017 Lenovo, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Contains device rule and methods +# Lenovo Networking + + +def getRuleString(deviceType, variableId): + retVal = variableId + ":" + if(deviceType == 'g8272_cnos'): + if variableId in g8272_cnos: + retVal = retVal + g8272_cnos[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'g8296_cnos'): + if variableId in g8296_cnos: + retVal = retVal + g8296_cnos[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'g8332_cnos'): + if variableId in g8332_cnos: + retVal = retVal + g8332_cnos[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'NE1072T'): + if variableId in NE1072T: + retVal = retVal + NE1072T[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'NE1032'): + if variableId in NE1032: + retVal = retVal + NE1032[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'NE1032T'): + if variableId in NE1032T: + retVal = retVal + NE1032T[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'NE10032'): + if variableId in NE10032: + retVal = retVal + NE10032[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'NE2572'): + if variableId in NE2572: + retVal = retVal + NE2572[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + elif(deviceType == 'NE0152T'): + if variableId in NE0152T: + retVal = retVal + NE0152T[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + else: + if variableId in default_cnos: + retVal = retVal + default_cnos[variableId] + else: + retVal = "The variable " + variableId + " is not supported" + return retVal +# EOM + + +default_cnos = { + 'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-32', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,\ + interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,\ + trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\ + input,output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,\ + vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', +} +NE0152T = { + 'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-52', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-52', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,\ + interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,\ + trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\ + input,output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,\ + vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:10,100,1000,10000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', +} +NE2572 = { + 'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-54', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\ + output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:10000,100000,25000,40000,50000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', +} +NE1032T = { + 'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-32', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\ + output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', +} +NE1032 = { + 'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-32', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\ + output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', +} +NE1072T = { + 'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-54', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\ + output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', +} +NE10032 = { + 'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-32', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\ + output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:10000,100000,25000,40000,50000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', +} +g8272_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-64', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-54', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\ + output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:1000,10000,40000', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', + } +g8296_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-128', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-96', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-96', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\ + arp,dhcp,ospf,port,port-unreachable,redirects,router,\ + unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\ + input,output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:1000,10000,40000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', + } +g8332_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999', + 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999', + 'vlan_name': 'TEXT:', + 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6', + 'vlan_state': 'TEXT_OPTIONS:active,suspend', + 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25', + 'vlan_querier': 'IPV4Address:', + 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535', + 'vlan_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25', + 'vlan_report_suppression': 'INTEGER_VALUE:1-25', + 'vlan_robustness_variable': 'INTEGER_VALUE:1-7', + 'vlan_startup_query_count': 'INTEGER_VALUE:1-10', + 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000', + 'vlan_snooping_version': 'INTEGER_VALUE:2-3', + 'vlan_access_map_name': 'TEXT: ', + 'vlan_ethernet_interface': 'TEXT:', + 'vlan_portagg_number': 'INTEGER_VALUE:1-4096', + 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect', + 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only', + 'vlan_filter_name': 'TEXT:', + 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600', + 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict', + 'vlag_instance': 'INTEGER_VALUE:1-128', + 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096', + 'vlag_priority': 'INTEGER_VALUE:0-65535', + 'vlag_startup_delay': 'INTEGER_VALUE:0-3600', + 'vlag_tier_id': 'INTEGER_VALUE:1-512', + 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\ + keepalive-interval,peer-ip,retry-interval', + 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24', + 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300', + 'vlag_retry_interval': 'INTEGER_VALUE:1-300', + 'vlag_peerip': 'IPV4Address:', + 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management', + 'bgp_as_number': 'NO_VALIDATION:1-4294967295', + 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64', + 'cluster_id_as_ip': 'IPV4Address:', + 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295', + 'confederation_identifier': 'INTEGER_VALUE:1-65535', + 'condeferation_peers_as': 'INTEGER_VALUE:1-65535', + 'stalepath_delay_value': 'INTEGER_VALUE:1-3600', + 'maxas_limit_as': 'INTEGER_VALUE:1-2000', + 'neighbor_ipaddress': 'IPV4Address:', + 'neighbor_as': 'NO_VALIDATION:1-4294967295', + 'router_id': 'IPV4Address:', + 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600', + 'bgp_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_aggregate_prefix': 'IPV4AddressWithMask:', + 'addrfamily_routemap_name': 'TEXT:', + 'reachability_half_life': 'INTEGER_VALUE:1-45', + 'start_reuse_route_value': 'INTEGER_VALUE:1-20000', + 'start_suppress_route_value': 'INTEGER_VALUE:1-20000', + 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255', + 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45', + 'distance_external_AS': 'INTEGER_VALUE:1-255', + 'distance_internal_AS': 'INTEGER_VALUE:1-255', + 'distance_local_routes': 'INTEGER_VALUE:1-255', + 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp', + 'maxpath_numbers': 'INTEGER_VALUE:2-32', + 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:', + 'network_ip_prefix_value': 'IPV4Address:', + 'network_ip_prefix_mask': 'IPV4Address:', + 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295', + 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295', + 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\ + static', + 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10', + 'bgp_neighbor_af_filtername': 'TEXT:', + 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870', + 'bgp_neighbor_af_prefixname': 'TEXT:', + 'bgp_neighbor_af_routemap': 'TEXT:', + 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6', + 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535', + 'bgp_neighbor_description': 'TEXT:', + 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255', + 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295', + 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96', + 'bgp_neighbor_password': 'TEXT:', + 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600', + 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254', + 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\ + vlan', + 'bgp_neighbor_update_ethernet': 'TEXT:', + 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7', + 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094', + 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535', + 'ethernet_interface_value': 'INTEGER_VALUE:1-32', + 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32', + 'ethernet_interface_string': 'TEXT:', + 'loopback_interface_value': 'INTEGER_VALUE:0-7', + 'mgmt_interface_value': 'INTEGER_VALUE:0-0', + 'vlan_interface_value': 'INTEGER_VALUE:1-4094', + 'portchannel_interface_value': 'INTEGER_VALUE:1-4096', + 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096', + 'portchannel_interface_string': 'TEXT:', + 'aggregation_group_no': 'INTEGER_VALUE:1-4096', + 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive', + 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\ + ipv6,neighbor', + 'bfd_interval': 'INTEGER_VALUE:50-999', + 'bfd_minrx': 'INTEGER_VALUE:50-999', + 'bfd_ multiplier': 'INTEGER_VALUE:3-50', + 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval', + 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\ + meticulous-keyed-md5,meticulous-keyed-sha1,simple', + 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id', + 'bfd_key_chain': 'TEXT:', + 'bfd_key_id': 'INTEGER_VALUE:0-255', + 'bfd_key_name': 'TEXT:', + 'bfd_neighbor_ip': 'TEXT:', + 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\ + non-persistent', + 'bfd_access_vlan': 'INTEGER_VALUE:1-3999', + 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk', + 'trunk_options': 'TEXT_OPTIONS:allowed,native', + 'trunk_vlanid': 'INTEGER_VALUE:1-3999', + 'portCh_description': 'TEXT:', + 'duplex_option': 'TEXT_OPTIONS:auto,full,half', + 'flowcontrol_options': 'TEXT_OPTIONS:receive,send', + 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,arp,\ + dhcp,ospf,port,port-unreachable,redirects,router,unreachables', + 'accessgroup_name': 'TEXT:', + 'portchannel_ipv4': 'IPV4Address:', + 'portchannel_ipv4_mask': 'TEXT:', + 'arp_ipaddress': 'IPV4Address:', + 'arp_macaddress': 'TEXT:', + 'arp_timeout_value': 'INTEGER_VALUE:60-28800', + 'relay_ipaddress': 'IPV4Address:', + 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\ + authentication-key,bfd,cost,database-filter,dead-interval,\ + hello-interval,message-digest-key,mtu,mtu-ignore,network,\ + passive-interface,priority,retransmit-interval,shutdown,\ + transmit-delay', + 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295', + 'ospf_id_ipaddres_value': 'IPV4Address:', + 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\ + timeout', + 'port_priority': 'INTEGER_VALUE:1-65535', + 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\ + trap-notification', + 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\ + mac-phy-status,management-address,max-frame-size,\ + port-description,port-protocol-vlan,port-vlan,power-mdi,\ + protocol-identity,system-capabilities,system-description,\ + system-name,vid-management,vlan-name', + 'load_interval_delay': 'INTEGER_VALUE:30-300', + 'load_interval_counter': 'INTEGER_VALUE:1-3', + 'mac_accessgroup_name': 'TEXT:', + 'mac_address': 'TEXT:', + 'microburst_threshold': 'NO_VALIDATION:1-4294967295', + 'mtu_value': 'INTEGER_VALUE:64-9216', + 'service_instance': 'NO_VALIDATION:1-4294967295', + 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\ + input,output,type', + 'service_policy_name': 'TEXT:', + 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\ + cost,disable,enable,guard,link-type,mst,port,port-priority,vlan', + 'spanning_tree_cost': 'NO_VALIDATION:1-200000000', + 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999', + 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\ + 192,224', + 'portchannel_ipv6_neighbor_mac': 'TEXT:', + 'portchannel_ipv6_neighbor_address': 'IPV6Address:', + 'portchannel_ipv6_linklocal': 'IPV6Address:', + 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094', + 'portchannel_ipv6_dhcp_ethernet': 'TEXT:', + 'portchannel_ipv6_dhcp': 'IPV6Address:', + 'portchannel_ipv6_address': 'IPV6Address:', + 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\ + link-local,nd,neighbor', + 'interface_speed': 'TEXT_OPTIONS:1000,10000,40000,50000,auto', + 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\ + unicast', + 'stormcontrol_level': 'FLOAT:', + 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\ + egress-only', + 'vrrp_id': 'INTEGER_VALUE:1-255', + } diff --git a/plugins/module_utils/network/cnos/cnos_errorcodes.py b/plugins/module_utils/network/cnos/cnos_errorcodes.py new file mode 100644 index 0000000000..3a83af00fc --- /dev/null +++ b/plugins/module_utils/network/cnos/cnos_errorcodes.py @@ -0,0 +1,256 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their own +# license to the complete work. +# +# Copyright (C) 2017 Lenovo, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Contains error codes and methods +# Lenovo Networking + +errorDict = {0: 'Success', + 1: 'NOK', + 101: 'Device Response Timed out', + 102: 'Command Not supported - Use CLI command', + 103: 'Invalid Context', + 104: 'Command Value Not Supported as of Now. Use vlan Id only', + 105: 'Invalid interface Range', + 106: 'Please provide Enable Password.', + 108: '', + 109: '', + 110: 'Invalid protocol option', + 111: 'The Value is not Integer', + 112: 'The Value is not Float', + 113: 'Value is not in Range', + 114: 'Range value is not Integer', + 115: 'Value is not in Options', + 116: 'The Value is not Long', + 117: 'Range value is not Long', + 118: 'The Value cannot be empty', + 119: 'The Value is not String', + 120: 'The Value is not Matching', + 121: 'The Value is not IPV4 Address', + 122: 'The Value is not IPV6 Address', + 123: '', + 124: '', + 125: '', + 126: '', + 127: '', + 128: '', + 129: '', + 130: 'Invalid Access Map Name', + 131: 'Invalid Vlan Dot1q Tag', + 132: 'Invalid Vlan filter value', + 133: 'Invalid Vlan Range Value', + 134: 'Invalid Vlan Id', + 135: 'Invalid Vlan Access Map Action', + 136: 'Invalid Vlan Access Map Name', + 137: 'Invalid Access List', + 138: 'Invalid Vlan Access Map parameter', + 139: 'Invalid Vlan Name', + 140: 'Invalid Vlan Flood value,', + 141: 'Invalid Vlan State Value', + 142: 'Invalid Vlan Last Member query Interval', + 143: 'Invalid Querier IP address', + 144: 'Invalid Querier Time out', + 145: 'Invalid Query Interval', + 146: 'Invalid Vlan query max response time', + 147: 'Invalid vlan robustness variable', + 148: 'Invalid Vlan Startup Query count', + 149: 'Invalid vlan Startup Query Interval', + 150: 'Invalid Vlan snooping version', + 151: 'Invalid Vlan Ethernet Interface', + 152: 'Invalid Vlan Port Tag Number', + 153: 'Invalid mrouter option', + 154: 'Invalid Vlan Option', + 155: '', + 156: '', + 157: '', + 158: '', + 159: '', + 160: 'Invalid Vlag Auto Recovery Value', + 161: 'Invalid Vlag Config Consistency Value', + 162: 'Invalid Vlag Port Aggregation Number', + 163: 'Invalid Vlag Priority Value', + 164: 'Invalid Vlag Startup delay value', + 165: 'Invalid Vlag Trie Id', + 166: 'Invalid Vlag Instance Option', + 167: 'Invalid Vlag Keep Alive Attempts', + 168: 'Invalid Vlag Keep Alive Interval', + 169: 'Invalid Vlag Retry Interval', + 170: 'Invalid Vlag Peer Ip VRF Value', + 171: 'Invalid Vlag Health Check Options', + 172: 'Invalid Vlag Option', + 173: '', + 174: '', + 175: '', + 176: 'Invalid BGP As Number', + 177: 'Invalid Routing protocol option', + 178: 'Invalid BGP Address Family', + 179: 'Invalid AS Path options', + 180: 'Invalid BGP med options', + 181: 'Invalid Best Path option', + 182: 'Invalid BGP Local count number', + 183: 'Cluster Id has to either IP or AS Number', + 184: 'Invalid confederation identifier', + 185: 'Invalid Confederation Peer AS Value', + 186: 'Invalid Confederation Option', + 187: 'Invalid state path relay value', + 188: 'Invalid Maxas Limit AS Value', + 189: 'Invalid Neighbor IP Address or Neighbor AS Number', + 190: 'Invalid Router Id', + 191: 'Invalid BGP Keep Alive Interval', + 192: 'Invalid BGP Hold time', + 193: 'Invalid BGP Option', + 194: 'Invalid BGP Address Family option', + 195: 'Invalid BGP Address Family Redistribution option. ', + 196: 'Invalid BGP Address Family Route Map Name', + 197: 'Invalid Next Hop Critical Delay', + 198: 'Invalid Next Hop Non Critical Delay', + 199: 'Invalid Multipath Number Value', + 200: 'Invalid Aggegation Group Mode', + 201: 'Invalid Aggregation Group No', + 202: 'Invalid BFD Access Vlan', + 203: 'Invalid CFD Bridgeport Mode', + 204: 'Invalid Trunk Option', + 205: 'Invalid BFD Option', + 206: 'Invalid Portchannel description', + 207: 'Invalid Portchannel duplex option', + 208: 'Invalid Flow control option state', + 209: 'Invalid Flow control option', + 210: 'Invalid LACP Port priority', + 211: 'Invalid LACP Time out options', + 212: 'Invalid LACP Command options', + 213: 'Invalid LLDP TLV Option', + 214: 'Invalid LLDP Option', + 215: 'Invalid Load interval delay', + 216: 'Invalid Load interval Counter Number', + 217: 'Invalid Load Interval option', + 218: 'Invalid Mac Access Group Name', + 219: 'Invalid Mac Address', + 220: 'Invalid Microburst threshold value', + 221: 'Invalid MTU Value', + 222: 'Invalid Service instance value', + 223: 'Invalid service policy name', + 224: 'Invalid service policy options', + 225: 'Invalid Interface speed value', + 226: 'Invalid Storm control level value', + 227: 'Invalid Storm control option', + 228: 'Invalid Portchannel dot1q tag', + 229: 'Invalid VRRP Id Value', + 230: 'Invalid VRRP Options', + 231: 'Invalid portchannel source interface option', + 232: 'Invalid portchannel load balance options', + 233: 'Invalid Portchannel configuration attribute', + 234: 'Invalid BFD Interval Value', + 235: 'Invalid BFD minrx Value', + 236: 'Invalid BFD multiplier Value', + 237: 'Invalid Key Chain Value', + 238: 'Invalid key name option', + 239: 'Invalid key id value', + 240: 'Invalid Key Option', + 241: 'Invalid authentication option', + 242: 'Invalid destination Ip', + 243: 'Invalid source Ip', + 244: 'Invalid IP Option', + 245: 'Invalid Access group option', + 246: 'Invalid Access group name', + 247: 'Invalid ARP MacAddress Value', + 248: 'Invalid ARP timeout value', + 249: 'Invalid ARP Option', + 250: 'Invalid dhcp request option', + 251: 'Invalid dhcp Client option', + 252: 'Invalid relay Ip Address', + 253: 'Invalid dhcp Option', + 254: 'Invalid OSPF Option', + 255: 'Invalid OSPF Id IP Address Value', + 256: 'Invalid Ip Router Option', + 257: 'Invalid Spanning tree bpdufilter Options', + 258: 'Invalid Spanning tree bpduguard Options', + 259: 'Invalid Spanning tree cost Options', + 260: 'Invalid Spanning tree guard Options', + 261: 'Invalid Spanning tree link-type Options', + 262: 'Invalid Spanning tree link-type Options', + 263: 'Invalid Spanning tree options', + 264: 'Port-priority in increments of 32 is required', + 265: 'Invalid Spanning tree vlan options', + 266: 'Invalid IPv6 option', + 267: 'Invalid IPV6 neighbor IP Address', + 268: 'Invalid IPV6 neighbor mac address', + 269: 'Invalid IPV6 dhcp option', + 270: 'Invalid IPV6 relay address option', + 271: 'Invalid IPV6 Ethernet option', + 272: 'Invalid IPV6 Vlan option', + 273: 'Invalid IPV6 Link Local option', + 274: 'Invalid IPV6 dhcp option', + 275: 'Invalid IPV6 Address', + 276: 'Invalid IPV6 Address option', + 277: 'Invalid BFD neighbor options', + 278: 'Invalid Secondary option', + 289: 'Invalid PortChannel IPV4 address', + 290: 'Invalid Max Path Options', + 291: 'Invalid Distance Local Route value', + 292: 'Invalid Distance Internal AS value', + 293: 'Invalid Distance External AS value', + 294: 'Invalid BGP Reachability Half Life', + 295: 'Invalid BGP Dampening parameter', + 296: 'Invalid BGP Aggregate Prefix value', + 297: 'Invalid BGP Aggregate Prefix Option', + 298: 'Invalid BGP Address Family Route Map Name', + 299: 'Invalid BGP Net IP Mask Value', + 300: 'Invalid BGP Net IP Prefix Value', + 301: 'Invalid BGP Neighbor configuration option', + 302: 'Invalid BGP Neighbor Weight Value', + 303: 'Invalid Neigbor update source option', + 304: 'Invalid Ethernet slot/chassis number', + 305: 'Invalid Loopback Interface number', + 306: 'Invalid vlan id', + 307: 'Invalid Number of hops', + 308: 'Invalid Neighbor Keepalive interval', + 309: 'Invalid Neighbor timer hold time', + 310: 'Invalid neighbor password ', + 311: 'Invalid Max peer limit', + 312: 'Invalid Local AS Number', + 313: 'Invalid maximum hop count', + 314: 'Invalid neighbor description', + 315: 'Invalid Neighbor connect timer value', + 316: 'Invalid Neighbor address family option', + 317: 'Invalid neighbor address family option', + 318: 'Invalid route-map name', + 319: 'Invalid route-map', + 320: 'Invalid Name of a prefix list', + 321: 'Invalid Filter incoming option', + 322: 'Invalid AS path access-list name', + 323: 'Invalid Filter route option', + 324: 'Invalid route-map name', + 325: 'Invalid Number of occurrences of AS number', + 326: 'Invalid Prefix Limit'} + + +def getErrorString(errorCode): + retVal = errorDict[int(errorCode)] + return retVal +# EOM diff --git a/plugins/module_utils/network/edgeos/__init__.py b/plugins/module_utils/network/edgeos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/edgeos/edgeos.py b/plugins/module_utils/network/edgeos/edgeos.py new file mode 100644 index 0000000000..c7eef62179 --- /dev/null +++ b/plugins/module_utils/network/edgeos/edgeos.py @@ -0,0 +1,132 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2018 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONFIGS = None + + +def get_connection(module): + if hasattr(module, '_edgeos_connection'): + return module._edgeos_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module._edgeos_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module._edgeos_connection + + +def get_capabilities(module): + if hasattr(module, '_edgeos_capabilities'): + return module._edgeos_capabilities + + capabilities = Connection(module._socket_path).get_capabilities() + module._edgeos_capabilities = json.loads(capabilities) + return module._edgeos_capabilities + + +def get_config(module): + global _DEVICE_CONFIGS + + if _DEVICE_CONFIGS is not None: + return _DEVICE_CONFIGS + else: + connection = get_connection(module) + out = connection.get_config() + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS = cfg + return cfg + + +def run_commands(module, commands, check_rc=True): + responses = list() + connection = get_connection(module) + + for cmd in to_list(commands): + if isinstance(cmd, dict): + command = cmd['command'] + prompt = cmd['prompt'] + answer = cmd['answer'] + else: + command = cmd + prompt = None + answer = None + + try: + out = connection.get(command, prompt, answer) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + try: + out = to_text(out, errors='surrogate_or_strict') + except UnicodeError: + module.fail_json(msg=u'Failed to decode output from %s: %s' % + (cmd, to_text(out))) + + responses.append(out) + + return responses + + +def load_config(module, commands, commit=False, comment=None): + connection = get_connection(module) + + try: + out = connection.edit_config(commands) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + diff = None + if module._diff: + out = connection.get('compare') + out = to_text(out, errors='surrogate_or_strict') + + if not out.startswith('No changes'): + out = connection.get('show') + diff = to_text(out, errors='surrogate_or_strict').strip() + + if commit: + try: + out = connection.commit(comment) + except ConnectionError: + connection.discard_changes() + module.fail_json(msg='commit failed: %s' % out) + + if not commit: + connection.discard_changes() + else: + connection.get('exit') + + if diff: + return diff diff --git a/plugins/module_utils/network/edgeswitch/__init__.py b/plugins/module_utils/network/edgeswitch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/edgeswitch/edgeswitch.py b/plugins/module_utils/network/edgeswitch/edgeswitch.py new file mode 100644 index 0000000000..78073c2158 --- /dev/null +++ b/plugins/module_utils/network/edgeswitch/edgeswitch.py @@ -0,0 +1,168 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2018 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json +import re + +from copy import deepcopy + +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import Connection, ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + +_DEVICE_CONFIGS = {} + + +def build_aggregate_spec(element_spec, required, *extra_spec): + aggregate_spec = deepcopy(element_spec) + for elt in required: + aggregate_spec[elt] = dict(required=True) + remove_default_spec(aggregate_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec) + ) + argument_spec.update(element_spec) + for elt in extra_spec: + argument_spec.update(elt) + return argument_spec + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + d = item.copy() + obj.append(d) + else: + obj.append(module.params) + + return obj + + +def get_connection(module): + if hasattr(module, '_edgeswitch_connection'): + return module._edgeswitch_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module._edgeswitch_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module._edgeswitch_connection + + +def get_capabilities(module): + if hasattr(module, '_edgeswitch_capabilities'): + return module._edgeswitch_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + module._edgeswitch_capabilities = json.loads(capabilities) + return module._edgeswitch_capabilities + + +def get_defaults_flag(module): + connection = get_connection(module) + try: + out = connection.get_defaults_flag() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + return to_text(out, errors='surrogate_then_replace').strip() + + +def get_config(module, flags=None): + flag_str = ' '.join(to_list(flags)) + + try: + return _DEVICE_CONFIGS[flag_str] + except KeyError: + connection = get_connection(module) + try: + out = connection.get_config(flags=flags) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS[flag_str] = cfg + return cfg + + +def get_interfaces_config(module): + config = get_config(module) + lines = config.split('\n') + interfaces = {} + interface = None + for line in lines: + if line == 'exit': + if interface: + interfaces[interface[0]] = interface + interface = None + elif interface: + interface.append(line) + else: + match = re.match(r'^interface (.*)$', line) + if match: + interface = list() + interface.append(line) + + return interfaces + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + try: + return connection.run_commands(commands=commands, check_rc=check_rc) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def load_config(module, commands): + connection = get_connection(module) + + try: + resp = connection.edit_config(commands) + return resp.get('response') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) diff --git a/plugins/module_utils/network/edgeswitch/edgeswitch_interface.py b/plugins/module_utils/network/edgeswitch/edgeswitch_interface.py new file mode 100644 index 0000000000..793d0e0831 --- /dev/null +++ b/plugins/module_utils/network/edgeswitch/edgeswitch_interface.py @@ -0,0 +1,91 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2018 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +import re + + +class InterfaceConfiguration: + def __init__(self): + self.commands = [] + self.merged = False + + def has_same_commands(self, interface): + len1 = len(self.commands) + len2 = len(interface.commands) + return len1 == len2 and len1 == len(frozenset(self.commands).intersection(interface.commands)) + + +def merge_interfaces(interfaces): + """ to reduce commands generated by an edgeswitch module + we take interfaces one by one and we try to merge them with neighbors if everyone has same commands to run + """ + merged = {} + + for i, interface in interfaces.items(): + if interface.merged: + continue + interface.merged = True + + match = re.match(r'(\d+)\/(\d+)', i) + group = int(match.group(1)) + start = int(match.group(2)) + end = start + + while True: + try: + start = start - 1 + key = '{0}/{1}'.format(group, start) + neighbor = interfaces[key] + if not neighbor.merged and interface.has_same_commands(neighbor): + neighbor.merged = True + else: + break + except KeyError: + break + start = start + 1 + + while True: + try: + end = end + 1 + key = '{0}/{1}'.format(group, end) + neighbor = interfaces[key] + if not neighbor.merged and interface.has_same_commands(neighbor): + neighbor.merged = True + else: + break + except KeyError: + break + end = end - 1 + + if end == start: + key = '{0}/{1}'.format(group, start) + else: + key = '{0}/{1}-{2}/{3}'.format(group, start, group, end) + + merged[key] = interface + return merged diff --git a/plugins/module_utils/network/enos/__init__.py b/plugins/module_utils/network/enos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/enos/enos.py b/plugins/module_utils/network/enos/enos.py new file mode 100644 index 0000000000..9cb4ba0081 --- /dev/null +++ b/plugins/module_utils/network/enos/enos.py @@ -0,0 +1,172 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their own +# license to the complete work. +# +# Copyright (C) 2017 Lenovo. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Contains utility methods +# Lenovo Networking + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection +from ansible.module_utils.connection import Connection, exec_command +from ansible.module_utils.connection import ConnectionError + +_DEVICE_CONFIGS = {} +_CONNECTION = None + +enos_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'), + 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True), + 'timeout': dict(type='int'), + 'context': dict(), + 'passwords': dict() +} + +enos_argument_spec = { + 'provider': dict(type='dict', options=enos_provider_spec), +} + +command_spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() +} + + +def get_provider_argspec(): + return enos_provider_spec + + +def check_args(module, warnings): + pass + + +def get_connection(module): + global _CONNECTION + if _CONNECTION: + return _CONNECTION + _CONNECTION = Connection(module._socket_path) + + context = None + try: + context = module.params['context'] + except KeyError: + context = None + + if context: + if context == 'system': + command = 'changeto system' + else: + command = 'changeto context %s' % context + _CONNECTION.get(command) + + return _CONNECTION + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + passwords = None + try: + passwords = module.params['passwords'] + except KeyError: + passwords = None + if passwords: + cmd = 'more system:running-config' + else: + cmd = 'show running-config ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + conn = get_connection(module) + out = conn.get(cmd) + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def to_commands(module, commands): + if not isinstance(commands, list): + raise AssertionError('argument must be of type ') + + transform = EntityCollection(module, command_spec) + commands = transform(commands) + + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + module.warn('only show commands are supported when using check ' + 'mode, not executing `%s`' % item['command']) + + return commands + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + + commands = to_commands(module, to_list(commands)) + + responses = list() + + for cmd in commands: + out = connection.get(**cmd) + responses.append(to_text(out, errors='surrogate_then_replace')) + + return responses + + +def load_config(module, config): + try: + conn = get_connection(module) + conn.get('enable') + conn.edit_config(config) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def get_defaults_flag(module): + rc, out, err = exec_command(module, 'show running-config ?') + out = to_text(out, errors='surrogate_then_replace') + + commands = set() + for line in out.splitlines(): + if line: + commands.add(line.strip().split()[0]) + + if 'all' in commands: + return 'all' + else: + return 'full' diff --git a/plugins/module_utils/network/eric_eccli/__init__.py b/plugins/module_utils/network/eric_eccli/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/eric_eccli/eric_eccli.py b/plugins/module_utils/network/eric_eccli/eric_eccli.py new file mode 100644 index 0000000000..19a526ec28 --- /dev/null +++ b/plugins/module_utils/network/eric_eccli/eric_eccli.py @@ -0,0 +1,49 @@ +# +# Copyright (c) 2019 Ericsson AB. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONFIGS = {} + + +def get_connection(module): + if hasattr(module, '_eric_eccli_connection'): + return module._eric_eccli_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module._eric_eccli_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module._eric_eccli_connection + + +def get_capabilities(module): + if hasattr(module, '_eric_eccli_capabilities'): + return module._eric_eccli_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + module._eric_eccli_capabilities = json.loads(capabilities) + return module._eric_eccli_capabilities + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + try: + return connection.run_commands(commands=commands, check_rc=check_rc) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) diff --git a/plugins/module_utils/network/exos/__init__.py b/plugins/module_utils/network/exos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/argspec/__init__.py b/plugins/module_utils/network/exos/argspec/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/argspec/facts/__init__.py b/plugins/module_utils/network/exos/argspec/facts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/argspec/facts/facts.py b/plugins/module_utils/network/exos/argspec/facts/facts.py new file mode 100644 index 0000000000..4ab2e934ea --- /dev/null +++ b/plugins/module_utils/network/exos/argspec/facts/facts.py @@ -0,0 +1,23 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The arg spec for the exos facts module. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class FactsArgs(object): # pylint: disable=R0903 + """ The arg spec for the exos facts module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'gather_subset': dict(default=['!config'], type='list'), + 'gather_network_resources': dict(type='list'), + } diff --git a/plugins/module_utils/network/exos/argspec/l2_interfaces/__init__.py b/plugins/module_utils/network/exos/argspec/l2_interfaces/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py b/plugins/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py new file mode 100644 index 0000000000..3c6f250811 --- /dev/null +++ b/plugins/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py @@ -0,0 +1,48 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The arg spec for the exos_l2_interfaces module +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class L2_interfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the exos_l2_interfaces module + """ + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'access': {'options': {'vlan': {'type': 'int'}}, + 'type': 'dict'}, + 'name': {'required': True, 'type': 'str'}, + 'trunk': {'options': {'native_vlan': {'type': 'int'}, 'trunk_allowed_vlans': {'type': 'list'}}, + 'type': 'dict'}}, + 'type': 'list'}, + 'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'], 'default': 'merged', 'type': 'str'} + } # pylint: disable=C0301 diff --git a/plugins/module_utils/network/exos/argspec/lldp_global/__init__.py b/plugins/module_utils/network/exos/argspec/lldp_global/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/argspec/lldp_global/lldp_global.py b/plugins/module_utils/network/exos/argspec/lldp_global/lldp_global.py new file mode 100644 index 0000000000..4106c53428 --- /dev/null +++ b/plugins/module_utils/network/exos/argspec/lldp_global/lldp_global.py @@ -0,0 +1,57 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the exos_lldp_global module +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Lldp_globalArgs(object): # pylint: disable=R0903 + """The arg spec for the exos_lldp_global module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'options': { + 'interval': {'default': 30, 'type': 'int'}, + 'tlv_select': { + 'options': { + 'management_address': {'type': 'bool'}, + 'port_description': {'type': 'bool'}, + 'system_capabilities': {'type': 'bool'}, + 'system_description': { + 'default': True, + 'type': 'bool'}, + 'system_name': {'default': True, 'type': 'bool'}}, + 'type': 'dict'}}, + 'type': 'dict'}, + 'state': { + 'choices': ['merged', 'replaced', 'deleted'], + 'default': 'merged', + 'type': 'str'}} # pylint: disable=C0301 diff --git a/plugins/module_utils/network/exos/argspec/lldp_interfaces/__init__.py b/plugins/module_utils/network/exos/argspec/lldp_interfaces/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/argspec/lldp_interfaces/lldp_interfaces.py b/plugins/module_utils/network/exos/argspec/lldp_interfaces/lldp_interfaces.py new file mode 100644 index 0000000000..c2a981f919 --- /dev/null +++ b/plugins/module_utils/network/exos/argspec/lldp_interfaces/lldp_interfaces.py @@ -0,0 +1,49 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the exos_lldp_interfaces module +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Lldp_interfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the exos_lldp_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'enabled': {'type': 'bool'}, + 'name': {'required': True, 'type': 'str'}}, + 'type': 'list'}, + 'state': { + 'choices': ['merged', 'replaced', 'overridden', 'deleted'], + 'default': 'merged', + 'type': 'str'}} # pylint: disable=C0301 diff --git a/plugins/module_utils/network/exos/argspec/vlans/__init__.py b/plugins/module_utils/network/exos/argspec/vlans/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/argspec/vlans/vlans.py b/plugins/module_utils/network/exos/argspec/vlans/vlans.py new file mode 100644 index 0000000000..538a155a7d --- /dev/null +++ b/plugins/module_utils/network/exos/argspec/vlans/vlans.py @@ -0,0 +1,53 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the exos_vlans module +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class VlansArgs(object): # pylint: disable=R0903 + """The arg spec for the exos_vlans module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'name': {'type': 'str'}, + 'state': { + 'choices': ['active', 'suspend'], + 'default': 'active', + 'type': 'str'}, + 'vlan_id': {'required': True, 'type': 'int'}}, + 'type': 'list'}, + 'state': { + 'choices': ['merged', 'replaced', 'overridden', 'deleted'], + 'default': 'merged', + 'type': 'str'}} # pylint: disable=C0301 diff --git a/plugins/module_utils/network/exos/config/__init__.py b/plugins/module_utils/network/exos/config/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/config/l2_interfaces/__init__.py b/plugins/module_utils/network/exos/config/l2_interfaces/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/config/l2_interfaces/l2_interfaces.py b/plugins/module_utils/network/exos/config/l2_interfaces/l2_interfaces.py new file mode 100644 index 0000000000..51f8951db2 --- /dev/null +++ b/plugins/module_utils/network/exos/config/l2_interfaces/l2_interfaces.py @@ -0,0 +1,294 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos_l2_interfaces class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +from copy import deepcopy +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, dict_diff +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests + + +class L2_interfaces(ConfigBase): + """ + The exos_l2_interfaces class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'l2_interfaces', + ] + + L2_INTERFACE_NATIVE = { + "data": { + "openconfig-vlan:config": { + "interface-mode": "TRUNK", + "native-vlan": None, + "trunk-vlans": [] + } + }, + "method": "PATCH", + "path": None + } + + L2_INTERFACE_TRUNK = { + "data": { + "openconfig-vlan:config": { + "interface-mode": "TRUNK", + "trunk-vlans": [] + } + }, + "method": "PATCH", + "path": None + } + + L2_INTERFACE_ACCESS = { + "data": { + "openconfig-vlan:config": { + "interface-mode": "ACCESS", + "access-vlan": None + } + }, + "method": "PATCH", + "path": None + } + + L2_PATH = "/rest/restconf/data/openconfig-interfaces:interfaces/interface=" + + def __init__(self, module): + super(L2_interfaces, self).__init__(module) + + def get_l2_interfaces_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts( + self.gather_subset, self.gather_network_resources) + l2_interfaces_facts = facts['ansible_network_resources'].get( + 'l2_interfaces') + if not l2_interfaces_facts: + return [] + return l2_interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + requests = list() + + existing_l2_interfaces_facts = self.get_l2_interfaces_facts() + requests.extend(self.set_config(existing_l2_interfaces_facts)) + if requests: + if not self._module.check_mode: + send_requests(self._module, requests=requests) + result['changed'] = True + result['requests'] = requests + + changed_l2_interfaces_facts = self.get_l2_interfaces_facts() + + result['before'] = existing_l2_interfaces_facts + if result['changed']: + result['after'] = changed_l2_interfaces_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_l2_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_l2_interfaces_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + if state == 'overridden': + requests = self._state_overridden(want, have) + elif state == 'deleted': + requests = self._state_deleted(want, have) + elif state == 'merged': + requests = self._state_merged(want, have) + elif state == 'replaced': + requests = self._state_replaced(want, have) + return requests + + def _state_replaced(self, want, have): + """ The request generator when state is replaced + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + for w in want: + for h in have: + if w["name"] == h["name"]: + if dict_diff(w, h): + l2_request = self._update_patch_request(w, h) + l2_request["data"] = json.dumps(l2_request["data"]) + requests.append(l2_request) + break + + return requests + + def _state_overridden(self, want, have): + """ The request generator when state is overridden + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + have_copy = [] + for w in want: + for h in have: + if w["name"] == h["name"]: + if dict_diff(w, h): + l2_request = self._update_patch_request(w, h) + l2_request["data"] = json.dumps(l2_request["data"]) + requests.append(l2_request) + have_copy.append(h) + break + + for h in have: + if h not in have_copy: + l2_delete = self._update_delete_request(h) + if l2_delete["path"]: + l2_delete["data"] = json.dumps(l2_delete["data"]) + requests.append(l2_delete) + + return requests + + def _state_merged(self, want, have): + """ The request generator when state is merged + + :rtype: A list + :returns: the requests necessary to merge the provided into + the current configuration + """ + requests = [] + for w in want: + for h in have: + if w["name"] == h["name"]: + if dict_diff(h, w): + l2_request = self._update_patch_request(w, h) + l2_request["data"] = json.dumps(l2_request["data"]) + requests.append(l2_request) + break + + return requests + + def _state_deleted(self, want, have): + """ The request generator when state is deleted + + :rtype: A list + :returns: the requests necessary to remove the current configuration + of the provided objects + """ + requests = [] + if want: + for w in want: + for h in have: + if w["name"] == h["name"]: + l2_delete = self._update_delete_request(h) + if l2_delete["path"]: + l2_delete["data"] = json.dumps(l2_delete["data"]) + requests.append(l2_delete) + break + + else: + for h in have: + l2_delete = self._update_delete_request(h) + if l2_delete["path"]: + l2_delete["data"] = json.dumps(l2_delete["data"]) + requests.append(l2_delete) + + return requests + + def _update_patch_request(self, want, have): + + facts, _warnings = Facts(self._module).get_facts( + self.gather_subset, ['vlans', ]) + vlans_facts = facts['ansible_network_resources'].get('vlans') + + vlan_id = [] + + for vlan in vlans_facts: + vlan_id.append(vlan['vlan_id']) + + if want.get("access"): + if want["access"]["vlan"] in vlan_id: + l2_request = deepcopy(self.L2_INTERFACE_ACCESS) + l2_request["data"]["openconfig-vlan:config"]["access-vlan"] = want["access"]["vlan"] + l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" + else: + self._module.fail_json(msg="VLAN %s does not exist" % (want["access"]["vlan"])) + + elif want.get("trunk"): + if want["trunk"]["native_vlan"]: + if want["trunk"]["native_vlan"] in vlan_id: + l2_request = deepcopy(self.L2_INTERFACE_NATIVE) + l2_request["data"]["openconfig-vlan:config"]["native-vlan"] = want["trunk"]["native_vlan"] + l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" + for vlan in want["trunk"]["trunk_allowed_vlans"]: + if int(vlan) in vlan_id: + l2_request["data"]["openconfig-vlan:config"]["trunk-vlans"].append(int(vlan)) + else: + self._module.fail_json(msg="VLAN %s does not exist" % (vlan)) + else: + self._module.fail_json(msg="VLAN %s does not exist" % (want["trunk"]["native_vlan"])) + else: + l2_request = deepcopy(self.L2_INTERFACE_TRUNK) + l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" + for vlan in want["trunk"]["trunk_allowed_vlans"]: + if int(vlan) in vlan_id: + l2_request["data"]["openconfig-vlan:config"]["trunk-vlans"].append(int(vlan)) + else: + self._module.fail_json(msg="VLAN %s does not exist" % (vlan)) + return l2_request + + def _update_delete_request(self, have): + + l2_request = deepcopy(self.L2_INTERFACE_ACCESS) + + if have["access"] and have["access"]["vlan"] != 1 or have["trunk"] or not have["access"]: + l2_request["data"]["openconfig-vlan:config"]["access-vlan"] = 1 + l2_request["path"] = self.L2_PATH + str(have["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" + + return l2_request diff --git a/plugins/module_utils/network/exos/config/lldp_global/__init__.py b/plugins/module_utils/network/exos/config/lldp_global/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/config/lldp_global/lldp_global.py b/plugins/module_utils/network/exos/config/lldp_global/lldp_global.py new file mode 100644 index 0000000000..0bac6bf505 --- /dev/null +++ b/plugins/module_utils/network/exos/config/lldp_global/lldp_global.py @@ -0,0 +1,199 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos_lldp_global class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests + +import json +from copy import deepcopy + + +class Lldp_global(ConfigBase): + """ + The exos_lldp_global class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'lldp_global', + ] + + LLDP_DEFAULT_INTERVAL = 30 + LLDP_DEFAULT_TLV = { + 'system_name': True, + 'system_description': True, + 'system_capabilities': False, + 'port_description': False, + 'management_address': False + } + LLDP_REQUEST = { + "data": {"openconfig-lldp:config": {}}, + "method": "PUT", + "path": "/rest/restconf/data/openconfig-lldp:lldp/config" + } + + def __init__(self, module): + super(Lldp_global, self).__init__(module) + + def get_lldp_global_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts( + self.gather_subset, self.gather_network_resources) + lldp_global_facts = facts['ansible_network_resources'].get('lldp_global') + if not lldp_global_facts: + return {} + return lldp_global_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + requests = list() + + existing_lldp_global_facts = self.get_lldp_global_facts() + requests.extend(self.set_config(existing_lldp_global_facts)) + if requests: + if not self._module.check_mode: + send_requests(self._module, requests) + result['changed'] = True + result['requests'] = requests + + changed_lldp_global_facts = self.get_lldp_global_facts() + + result['before'] = existing_lldp_global_facts + if result['changed']: + result['after'] = changed_lldp_global_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_lldp_global_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_lldp_global_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + + if state == 'deleted': + requests = self._state_deleted(want, have) + elif state == 'merged': + requests = self._state_merged(want, have) + elif state == 'replaced': + requests = self._state_replaced(want, have) + + return requests + + def _state_replaced(self, want, have): + """ The request generator when state is replaced + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + requests.extend(self._state_deleted(want, have)) + requests.extend(self._state_merged(want, have)) + return requests + + def _state_merged(self, want, have): + """ The request generator when state is merged + + :rtype: A list + :returns: the requests necessary to merge the provided into + the current configuration + """ + requests = [] + + request = deepcopy(self.LLDP_REQUEST) + self._update_lldp_config_body_if_diff(want, have, request) + + if len(request["data"]["openconfig-lldp:config"]): + request["data"] = json.dumps(request["data"]) + requests.append(request) + + return requests + + def _state_deleted(self, want, have): + """ The request generator when state is deleted + + :rtype: A list + :returns: the requests necessary to remove the current configuration + of the provided objects + """ + requests = [] + + request = deepcopy(self.LLDP_REQUEST) + if want: + self._update_lldp_config_body_if_diff(want, have, request) + else: + if self.LLDP_DEFAULT_INTERVAL != have['interval']: + request["data"]["openconfig-lldp:config"].update( + {"hello-timer": self.LLDP_DEFAULT_INTERVAL}) + + if have['tlv_select'] != self.LLDP_DEFAULT_TLV: + request["data"]["openconfig-lldp:config"].update( + {"suppress-tlv-advertisement": [key.upper() for key, value in self.LLDP_DEFAULT_TLV.items() if not value]}) + request["data"]["openconfig-lldp:config"]["suppress-tlv-advertisement"].sort() + if len(request["data"]["openconfig-lldp:config"]): + request["data"] = json.dumps(request["data"]) + requests.append(request) + + return requests + + def _update_lldp_config_body_if_diff(self, want, have, request): + if want.get('interval'): + if want['interval'] != have['interval']: + request["data"]["openconfig-lldp:config"].update( + {"hello-timer": want['interval']}) + if want.get('tlv_select'): + # Create list of TLVs to be suppressed which aren't already + want_suppress = [key.upper() for key, value in want["tlv_select"].items() if have["tlv_select"][key] != value and value is False] + if want_suppress: + # Add previously suppressed TLVs to the list as we are doing a PUT op + want_suppress.extend([key.upper() for key, value in have["tlv_select"].items() if value is False]) + request["data"]["openconfig-lldp:config"].update( + {"suppress-tlv-advertisement": want_suppress}) + request["data"]["openconfig-lldp:config"]["suppress-tlv-advertisement"].sort() diff --git a/plugins/module_utils/network/exos/config/lldp_interfaces/__init__.py b/plugins/module_utils/network/exos/config/lldp_interfaces/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/config/lldp_interfaces/lldp_interfaces.py b/plugins/module_utils/network/exos/config/lldp_interfaces/lldp_interfaces.py new file mode 100644 index 0000000000..6b81806b6a --- /dev/null +++ b/plugins/module_utils/network/exos/config/lldp_interfaces/lldp_interfaces.py @@ -0,0 +1,243 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos_lldp_interfaces class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +from copy import deepcopy +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, dict_diff +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests + + +class Lldp_interfaces(ConfigBase): + """ + The exos_lldp_interfaces class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'lldp_interfaces', + ] + + LLDP_INTERFACE = { + "data": { + "openconfig-lldp:config": { + "name": None, + "enabled": True + } + }, + "method": "PATCH", + "path": None + } + + LLDP_PATH = "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=" + + def __init__(self, module): + super(Lldp_interfaces, self).__init__(module) + + def get_lldp_interfaces_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts( + self.gather_subset, self.gather_network_resources) + lldp_interfaces_facts = facts['ansible_network_resources'].get( + 'lldp_interfaces') + if not lldp_interfaces_facts: + return [] + return lldp_interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + requests = list() + + existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts() + requests.extend(self.set_config(existing_lldp_interfaces_facts)) + if requests: + if not self._module.check_mode: + send_requests(self._module, requests=requests) + result['changed'] = True + result['requests'] = requests + + changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts() + + result['before'] = existing_lldp_interfaces_facts + if result['changed']: + result['after'] = changed_lldp_interfaces_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_lldp_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_lldp_interfaces_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + if state == 'overridden': + requests = self._state_overridden(want, have) + elif state == 'deleted': + requests = self._state_deleted(want, have) + elif state == 'merged': + requests = self._state_merged(want, have) + elif state == 'replaced': + requests = self._state_replaced(want, have) + return requests + + def _state_replaced(self, want, have): + """ The request generator when state is replaced + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + + for w in want: + for h in have: + if w['name'] == h['name']: + lldp_request = self._update_patch_request(w, h) + if lldp_request["path"]: + lldp_request["data"] = json.dumps(lldp_request["data"]) + requests.append(lldp_request) + + return requests + + def _state_overridden(self, want, have): + """ The request generator when state is overridden + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + have_copy = [] + for w in want: + for h in have: + if w['name'] == h['name']: + lldp_request = self._update_patch_request(w, h) + if lldp_request["path"]: + lldp_request["data"] = json.dumps(lldp_request["data"]) + requests.append(lldp_request) + have_copy.append(h) + + for h in have: + if h not in have_copy: + if not h['enabled']: + lldp_delete = self._update_delete_request(h) + if lldp_delete["path"]: + lldp_delete["data"] = json.dumps(lldp_delete["data"]) + requests.append(lldp_delete) + + return requests + + def _state_merged(self, want, have): + """ The request generator when state is merged + + :rtype: A list + :returns: the requests necessary to merge the provided into + the current configuration + """ + requests = [] + for w in want: + for h in have: + if w['name'] == h['name']: + lldp_request = self._update_patch_request(w, h) + if lldp_request["path"]: + lldp_request["data"] = json.dumps(lldp_request["data"]) + requests.append(lldp_request) + + return requests + + def _state_deleted(self, want, have): + """ The request generator when state is deleted + + :rtype: A list + :returns: the requests necessary to remove the current configuration + of the provided objects + """ + requests = [] + if want: + for w in want: + for h in have: + if w['name'] == h['name']: + if not h['enabled']: + lldp_delete = self._update_delete_request(h) + if lldp_delete["path"]: + lldp_delete["data"] = json.dumps( + lldp_delete["data"]) + requests.append(lldp_delete) + else: + for h in have: + if not h['enabled']: + lldp_delete = self._update_delete_request(h) + if lldp_delete["path"]: + lldp_delete["data"] = json.dumps(lldp_delete["data"]) + requests.append(lldp_delete) + + return requests + + def _update_patch_request(self, want, have): + + lldp_request = deepcopy(self.LLDP_INTERFACE) + + if have['enabled'] != want['enabled']: + lldp_request["data"]["openconfig-lldp:config"]["name"] = want[ + 'name'] + lldp_request["data"]["openconfig-lldp:config"]["enabled"] = want[ + 'enabled'] + lldp_request["path"] = self.LLDP_PATH + str( + want['name']) + "/config" + + return lldp_request + + def _update_delete_request(self, have): + + lldp_delete = deepcopy(self.LLDP_INTERFACE) + + lldp_delete["data"]["openconfig-lldp:config"]["name"] = have['name'] + lldp_delete["data"]["openconfig-lldp:config"]["enabled"] = True + lldp_delete["path"] = self.LLDP_PATH + str(have['name']) + "/config" + + return lldp_delete diff --git a/plugins/module_utils/network/exos/config/vlans/__init__.py b/plugins/module_utils/network/exos/config/vlans/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/config/vlans/vlans.py b/plugins/module_utils/network/exos/config/vlans/vlans.py new file mode 100644 index 0000000000..bd4c102025 --- /dev/null +++ b/plugins/module_utils/network/exos/config/vlans/vlans.py @@ -0,0 +1,277 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos_vlans class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +from copy import deepcopy +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, dict_diff +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests +from ansible_collections.community.general.plugins.module_utils.network.exos.utils.utils import search_obj_in_list + + +class Vlans(ConfigBase): + """ + The exos_vlans class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'vlans', + ] + + VLAN_POST = { + "data": {"openconfig-vlan:vlans": []}, + "method": "POST", + "path": "/rest/restconf/data/openconfig-vlan:vlans/" + } + + VLAN_PATCH = { + "data": {"openconfig-vlan:vlans": {"vlan": []}}, + "method": "PATCH", + "path": "/rest/restconf/data/openconfig-vlan:vlans/" + } + + VLAN_DELETE = { + "method": "DELETE", + "path": None + } + + DEL_PATH = "/rest/restconf/data/openconfig-vlan:vlans/vlan=" + + REQUEST_BODY = { + "config": {"name": None, "status": "ACTIVE", "tpid": "oc-vlan-types:TPID_0x8100", "vlan-id": None} + } + + def __init__(self, module): + super(Vlans, self).__init__(module) + + def get_vlans_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts( + self.gather_subset, self.gather_network_resources) + vlans_facts = facts['ansible_network_resources'].get('vlans') + if not vlans_facts: + return [] + return vlans_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + requests = list() + + existing_vlans_facts = self.get_vlans_facts() + requests.extend(self.set_config(existing_vlans_facts)) + if requests: + if not self._module.check_mode: + send_requests(self._module, requests=requests) + result['changed'] = True + result['requests'] = requests + + changed_vlans_facts = self.get_vlans_facts() + + result['before'] = existing_vlans_facts + if result['changed']: + result['after'] = changed_vlans_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_vlans_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_vlans_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + if state == 'overridden': + requests = self._state_overridden(want, have) + elif state == 'deleted': + requests = self._state_deleted(want, have) + elif state == 'merged': + requests = self._state_merged(want, have) + elif state == 'replaced': + requests = self._state_replaced(want, have) + return requests + + def _state_replaced(self, want, have): + """ The request generator when state is replaced + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + request_patch = deepcopy(self.VLAN_PATCH) + + for w in want: + if w.get('vlan_id'): + h = search_obj_in_list(w['vlan_id'], have, 'vlan_id') + if h: + if dict_diff(w, h): + request_body = self._update_patch_request(w) + request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body) + else: + request_post = self._update_post_request(w) + requests.append(request_post) + + if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]): + request_patch["data"] = json.dumps(request_patch["data"]) + requests.append(request_patch) + + return requests + + def _state_overridden(self, want, have): + """ The request generator when state is overridden + + :rtype: A list + :returns: the requests necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + request_patch = deepcopy(self.VLAN_PATCH) + + have_copy = [] + for w in want: + if w.get('vlan_id'): + h = search_obj_in_list(w['vlan_id'], have, 'vlan_id') + if h: + if dict_diff(w, h): + request_body = self._update_patch_request(w) + request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body) + have_copy.append(h) + else: + request_post = self._update_post_request(w) + requests.append(request_post) + + for h in have: + if h not in have_copy and h['vlan_id'] != 1: + request_delete = self._update_delete_request(h) + requests.append(request_delete) + + if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]): + request_patch["data"] = json.dumps(request_patch["data"]) + requests.append(request_patch) + + return requests + + def _state_merged(self, want, have): + """ The requests generator when state is merged + + :rtype: A list + :returns: the requests necessary to merge the provided into + the current configuration + """ + requests = [] + + request_patch = deepcopy(self.VLAN_PATCH) + + for w in want: + if w.get('vlan_id'): + h = search_obj_in_list(w['vlan_id'], have, 'vlan_id') + if h: + if dict_diff(w, h): + request_body = self._update_patch_request(w) + request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body) + else: + request_post = self._update_post_request(w) + requests.append(request_post) + + if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]): + request_patch["data"] = json.dumps(request_patch["data"]) + requests.append(request_patch) + return requests + + def _state_deleted(self, want, have): + """ The requests generator when state is deleted + + :rtype: A list + :returns: the requests necessary to remove the current configuration + of the provided objects + """ + requests = [] + + if want: + for w in want: + if w.get('vlan_id'): + h = search_obj_in_list(w['vlan_id'], have, 'vlan_id') + if h: + request_delete = self._update_delete_request(h) + requests.append(request_delete) + + else: + if not have: + return requests + for h in have: + if h['vlan_id'] == 1: + continue + else: + request_delete = self._update_delete_request(h) + requests.append(request_delete) + + return requests + + def _update_vlan_config_body(self, want, request): + request["config"]["name"] = want["name"] + request["config"]["status"] = "SUSPENDED" if want["state"] == "suspend" else want["state"].upper() + request["config"]["vlan-id"] = want["vlan_id"] + return request + + def _update_patch_request(self, want): + request_body = deepcopy(self.REQUEST_BODY) + request_body = self._update_vlan_config_body(want, request_body) + return request_body + + def _update_post_request(self, want): + request_post = deepcopy(self.VLAN_POST) + request_body = deepcopy(self.REQUEST_BODY) + request_body = self._update_vlan_config_body(want, request_body) + request_post["data"]["openconfig-vlan:vlans"].append(request_body) + request_post["data"] = json.dumps(request_post["data"]) + return request_post + + def _update_delete_request(self, have): + request_delete = deepcopy(self.VLAN_DELETE) + request_delete["path"] = self.DEL_PATH + str(have['vlan_id']) + return request_delete diff --git a/plugins/module_utils/network/exos/exos.py b/plugins/module_utils/network/exos/exos.py new file mode 100644 index 0000000000..f7f70ae3e3 --- /dev/null +++ b/plugins/module_utils/network/exos/exos.py @@ -0,0 +1,219 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.common._collections_compat import Mapping +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONNECTION = None + + +class Cli: + def __init__(self, module): + self._module = module + self._device_configs = {} + self._connection = None + + def get_capabilities(self): + """Returns platform info of the remove device + """ + connection = self._get_connection() + return json.loads(connection.get_capabilities()) + + def _get_connection(self): + if not self._connection: + self._connection = Connection(self._module._socket_path) + return self._connection + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + if self._device_configs == {}: + connection = self._get_connection() + try: + out = connection.get_config(flags=flags) + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + self._device_configs = to_text(out, errors='surrogate_then_replace').strip() + return self._device_configs + + def run_commands(self, commands, check_rc=True): + """Runs list of commands on remote device and returns results + """ + connection = self._get_connection() + try: + response = connection.run_commands(commands=commands, check_rc=check_rc) + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + return response + + def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): + conn = self._get_connection() + try: + diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, + diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace) + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + return diff + + +class HttpApi: + def __init__(self, module): + self._module = module + self._device_configs = {} + self._connection_obj = None + + def get_capabilities(self): + """Returns platform info of the remove device + """ + try: + capabilities = self._connection.get_capabilities() + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + return json.loads(capabilities) + + @property + def _connection(self): + if not self._connection_obj: + self._connection_obj = Connection(self._module._socket_path) + return self._connection_obj + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + if self._device_configs == {}: + try: + out = self._connection.get_config(flags=flags) + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + self._device_configs = to_text(out, errors='surrogate_then_replace').strip() + return self._device_configs + + def run_commands(self, commands, check_rc=True): + """Runs list of commands on remote device and returns results + """ + try: + response = self._connection.run_commands(commands=commands, check_rc=check_rc) + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + return response + + def send_requests(self, requests): + """Send a list of http requests to remote device and return results + """ + if requests is None: + raise ValueError("'requests' value is required") + + responses = list() + for req in to_list(requests): + try: + response = self._connection.send_request(**req) + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + responses.append(response) + return responses + + def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): + try: + diff = self._connection.get_diff(candidate=candidate, running=running, diff_match=diff_match, + diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace) + except ConnectionError as exc: + self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + return diff + + +def get_capabilities(module): + conn = get_connection(module) + return conn.get_capabilities() + + +def get_connection(module): + global _DEVICE_CONNECTION + if not _DEVICE_CONNECTION: + connection_proxy = Connection(module._socket_path) + cap = json.loads(connection_proxy.get_capabilities()) + if cap['network_api'] == 'cliconf': + conn = Cli(module) + elif cap['network_api'] == 'exosapi': + conn = HttpApi(module) + else: + module.fail_json(msg='Invalid connection type %s' % cap['network_api']) + _DEVICE_CONNECTION = conn + return _DEVICE_CONNECTION + + +def get_config(module, flags=None): + flags = None if flags is None else flags + conn = get_connection(module) + return conn.get_config(flags) + + +def load_config(module, commands): + conn = get_connection(module) + return conn.run_commands(to_command(module, commands)) + + +def run_commands(module, commands, check_rc=True): + conn = get_connection(module) + return conn.run_commands(to_command(module, commands), check_rc=check_rc) + + +def to_command(module, commands): + transform = ComplexList(dict( + command=dict(key=True), + output=dict(default='text'), + prompt=dict(type='list'), + answer=dict(type='list'), + sendonly=dict(type='bool', default=False), + check_all=dict(type='bool', default=False), + ), module) + return transform(to_list(commands)) + + +def send_requests(module, requests): + conn = get_connection(module) + return conn.send_requests(to_request(module, requests)) + + +def to_request(module, requests): + transform = ComplexList(dict( + path=dict(key=True), + method=dict(), + data=dict(type='dict'), + ), module) + return transform(to_list(requests)) + + +def get_diff(module, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): + conn = get_connection(module) + return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace) diff --git a/plugins/module_utils/network/exos/facts/__init__.py b/plugins/module_utils/network/exos/facts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/facts/facts.py b/plugins/module_utils/network/exos/facts/facts.py new file mode 100644 index 0000000000..b9b058304f --- /dev/null +++ b/plugins/module_utils/network/exos/facts/facts.py @@ -0,0 +1,61 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The facts class for exos +this file validates each subset of facts and selectively +calls the appropriate facts gathering function +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.facts.facts import FactsArgs +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts import FactsBase +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.lldp_global.lldp_global import Lldp_globalFacts +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.vlans.vlans import VlansFacts +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.legacy.base import Default, Hardware, Interfaces, Config +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.lldp_interfaces.lldp_interfaces import Lldp_interfacesFacts +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.l2_interfaces.l2_interfaces import L2_interfacesFacts + +FACT_LEGACY_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config) + +FACT_RESOURCE_SUBSETS = dict( + lldp_global=Lldp_globalFacts, + vlans=VlansFacts, + lldp_interfaces=Lldp_interfacesFacts, + l2_interfaces=L2_interfacesFacts, +) + + +class Facts(FactsBase): + """ The fact class for exos + """ + + VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys()) + VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys()) + + def __init__(self, module): + super(Facts, self).__init__(module) + + def get_facts(self, legacy_facts_type=None, resource_facts_type=None, data=None): + """ Collect the facts for exos + + :param legacy_facts_type: List of legacy facts types + :param resource_facts_type: List of resource fact types + :param data: previously collected conf + :rtype: dict + :return: the facts gathered + """ + if self.VALID_RESOURCE_SUBSETS: + self.get_network_resources_facts(FACT_RESOURCE_SUBSETS, resource_facts_type, data) + + if self.VALID_LEGACY_GATHER_SUBSETS: + self.get_network_legacy_facts(FACT_LEGACY_SUBSETS, legacy_facts_type) + + return self.ansible_facts, self._warnings diff --git a/plugins/module_utils/network/exos/facts/l2_interfaces/__init__.py b/plugins/module_utils/network/exos/facts/l2_interfaces/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/facts/l2_interfaces/l2_interfaces.py b/plugins/module_utils/network/exos/facts/l2_interfaces/l2_interfaces.py new file mode 100644 index 0000000000..bbe9ab4402 --- /dev/null +++ b/plugins/module_utils/network/exos/facts/l2_interfaces/l2_interfaces.py @@ -0,0 +1,92 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos l2_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.l2_interfaces.l2_interfaces import L2_interfacesArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests + + +class L2_interfacesFacts(object): + """ The exos l2_interfaces fact class + """ + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = L2_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for l2_interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + + if not data: + request = [{ + "path": "/rest/restconf/data/openconfig-interfaces:interfaces", + "method": "GET" + }] + data = send_requests(self._module, requests=request) + + objs = [] + if data: + for d in data[0]["openconfig-interfaces:interfaces"]["interface"]: + obj = self.render_config(self.generated_spec, d) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('l2_interfaces', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['l2_interfaces'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + if conf["config"]["type"] == "ethernetCsmacd": + conf_dict = conf["openconfig-if-ethernet:ethernet"]["openconfig-vlan:switched-vlan"]["config"] + config["name"] = conf["name"] + if conf_dict["interface-mode"] == "ACCESS": + config["access"]["vlan"] = conf_dict.get("access-vlan") + else: + if 'native-vlan' in conf_dict: + config["trunk"]["native_vlan"] = conf_dict.get("native-vlan") + config["trunk"]["trunk_allowed_vlans"] = conf_dict.get("trunk-vlans") + return utils.remove_empties(config) diff --git a/plugins/module_utils/network/exos/facts/legacy/__init__.py b/plugins/module_utils/network/exos/facts/legacy/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/facts/legacy/base.py b/plugins/module_utils/network/exos/facts/legacy/base.py new file mode 100644 index 0000000000..c913350973 --- /dev/null +++ b/plugins/module_utils/network/exos/facts/legacy/base.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +The exos legacy fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import re +import json + +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.warnings = list() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS) + + def run(self, cmd): + return run_commands(self.module, cmd) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version', + 'show switch' + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + + data = self.responses[1] + if data: + self.facts['model'] = self.parse_model(data) + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'Image\s+: ExtremeXOS version (\S+)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'System Type:\s+(.*$)', data, re.M) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'SysName:\s+(\S+)', data, re.M) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'Switch\s+: \S+ (\S+)', data, re.M) + if match: + return match.group(1) + # For stack, return serial number of the first switch in the stack. + match = re.search(r'Slot-\d+\s+: \S+ (\S+)', data, re.M) + if match: + return match.group(1) + # Handle unique formatting for VM + match = re.search(r'Switch\s+: PN:\S+\s+SN:(\S+)', data, re.M) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show memory' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + if data: + self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0)) + self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0)) + + def parse_memtotal(self, data): + match = re.search(r' Total DRAM \(KB\): (\d+)', data, re.M) + if match: + return match.group(1) + # Handle unique formatting for VM + match = re.search(r' Total \s+\(KB\): (\d+)', data, re.M) + if match: + return match.group(1) + + def parse_memfree(self, data): + match = re.search(r' Free\s+\(KB\): (\d+)', data, re.M) + if match: + return match.group(1) + + +class Config(FactsBase): + + COMMANDS = ['show configuration detail'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show switch', + {'command': 'show port config', 'output': 'json'}, + {'command': 'show port description', 'output': 'json'}, + {'command': 'show vlan detail', 'output': 'json'}, + {'command': 'show lldp neighbors', 'output': 'json'} + ] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.responses[0] + if data: + sysmac = self.parse_sysmac(data) + + data = self.responses[1] + if data: + self.facts['interfaces'] = self.populate_interfaces(data, sysmac) + + data = self.responses[2] + if data: + self.populate_interface_descriptions(data) + + data = self.responses[3] + if data: + self.populate_vlan_interfaces(data, sysmac) + + data = self.responses[4] + if data: + self.facts['neighbors'] = self.parse_neighbors(data) + + def parse_sysmac(self, data): + match = re.search(r'System MAC:\s+(\S+)', data, re.M) + if match: + return match.group(1) + + def populate_interfaces(self, interfaces, sysmac): + facts = dict() + for elem in interfaces: + intf = dict() + + if 'show_ports_config' not in elem: + continue + + key = str(elem['show_ports_config']['port']) + + if elem['show_ports_config']['linkState'] == 2: + # Link state is "not present", don't include + continue + + intf['type'] = 'Ethernet' + intf['macaddress'] = sysmac + intf['bandwidth_configured'] = str(elem['show_ports_config']['speedCfg']) + intf['bandwidth'] = str(elem['show_ports_config']['speedActual']) + intf['duplex_configured'] = elem['show_ports_config']['duplexCfg'] + intf['duplex'] = elem['show_ports_config']['duplexActual'] + if elem['show_ports_config']['linkState'] == 1: + intf['lineprotocol'] = 'up' + else: + intf['lineprotocol'] = 'down' + if elem['show_ports_config']['portState'] == 1: + intf['operstatus'] = 'up' + else: + intf['operstatus'] = 'admin down' + + facts[key] = intf + return facts + + def populate_interface_descriptions(self, data): + for elem in data: + if 'show_ports_description' not in elem: + continue + key = str(elem['show_ports_description']['port']) + + if 'descriptionString' in elem['show_ports_description']: + desc = elem['show_ports_description']['descriptionString'] + self.facts['interfaces'][key]['description'] = desc + + def populate_vlan_interfaces(self, data, sysmac): + for elem in data: + if 'vlanProc' in elem: + key = elem['vlanProc']['name1'] + if key not in self.facts['interfaces']: + intf = dict() + intf['type'] = 'VLAN' + intf['macaddress'] = sysmac + self.facts['interfaces'][key] = intf + + if elem['vlanProc']['ipAddress'] != '0.0.0.0': + self.facts['interfaces'][key]['ipv4'] = list() + addr = elem['vlanProc']['ipAddress'] + subnet = elem['vlanProc']['maskForDisplay'] + ipv4 = dict(address=addr, subnet=subnet) + self.add_ip_address(addr, 'ipv4') + self.facts['interfaces'][key]['ipv4'].append(ipv4) + + if 'rtifIpv6Address' in elem: + key = elem['rtifIpv6Address']['rtifName'] + if key not in self.facts['interfaces']: + intf = dict() + intf['type'] = 'VLAN' + intf['macaddress'] = sysmac + self.facts['interfaces'][key] = intf + self.facts['interfaces'][key]['ipv6'] = list() + addr, subnet = elem['rtifIpv6Address']['ipv6_address_mask'].split('/') + ipv6 = dict(address=addr, subnet=subnet) + self.add_ip_address(addr, 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + if address not in self.facts['all_ipv4_addresses']: + self.facts['all_ipv4_addresses'].append(address) + else: + if address not in self.facts['all_ipv6_addresses']: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, data): + facts = dict() + for elem in data: + if 'lldpPortNbrInfoShort' not in elem: + continue + intf = str(elem['lldpPortNbrInfoShort']['port']) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = elem['lldpPortNbrInfoShort']['nbrSysName'] + fact['port'] = str(elem['lldpPortNbrInfoShort']['nbrPortID']) + facts[intf].append(fact) + return facts diff --git a/plugins/module_utils/network/exos/facts/lldp_global/__init__.py b/plugins/module_utils/network/exos/facts/lldp_global/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/facts/lldp_global/lldp_global.py b/plugins/module_utils/network/exos/facts/lldp_global/lldp_global.py new file mode 100644 index 0000000000..f01893da81 --- /dev/null +++ b/plugins/module_utils/network/exos/facts/lldp_global/lldp_global.py @@ -0,0 +1,97 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos lldp_global fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.lldp_global.lldp_global \ + import Lldp_globalArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests + + +class Lldp_globalFacts(object): + """ The exos lldp_global fact class + """ + + TLV_SELECT_OPTIONS = [ + "SYSTEM_NAME", + "SYSTEM_DESCRIPTION", + "SYSTEM_CAPABILITIES", + "MANAGEMENT_ADDRESS", + "PORT_DESCRIPTION"] + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Lldp_globalArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for lldp_global + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + request = { + "path": "/rest/restconf/data/openconfig-lldp:lldp/config/", + "method": "GET", + } + data = send_requests(self._module, request) + + obj = {} + if data: + lldp_obj = self.render_config(self.generated_spec, data[0]) + if lldp_obj: + obj = lldp_obj + + ansible_facts['ansible_network_resources'].pop('lldp_global', None) + facts = {} + + params = utils.validate_config(self.argument_spec, {'config': obj}) + facts['lldp_global'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + config['interval'] = conf["openconfig-lldp:config"]["hello-timer"] + + for item in self.TLV_SELECT_OPTIONS: + config["tlv_select"][item.lower()] = ( + False if (item in conf["openconfig-lldp:config"]["suppress-tlv-advertisement"]) + else True) + + return utils.remove_empties(config) diff --git a/plugins/module_utils/network/exos/facts/lldp_interfaces/__init__.py b/plugins/module_utils/network/exos/facts/lldp_interfaces/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/facts/lldp_interfaces/lldp_interfaces.py b/plugins/module_utils/network/exos/facts/lldp_interfaces/lldp_interfaces.py new file mode 100644 index 0000000000..444dee443e --- /dev/null +++ b/plugins/module_utils/network/exos/facts/lldp_interfaces/lldp_interfaces.py @@ -0,0 +1,88 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos lldp_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.lldp_interfaces.lldp_interfaces import Lldp_interfacesArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests + + +class Lldp_interfacesFacts(object): + """ The exos lldp_interfaces fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Lldp_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for lldp_interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + + if not data: + request = [{ + "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4", + "method": "GET" + }] + data = send_requests(self._module, requests=request) + + objs = [] + if data: + for d in data[0]["openconfig-lldp:interfaces"]["interface"]: + obj = self.render_config(self.generated_spec, d["config"]) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('lldp_interfaces', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['lldp_interfaces'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + + config["name"] = conf["name"] + config["enabled"] = bool(conf["enabled"]) + + return utils.remove_empties(config) diff --git a/plugins/module_utils/network/exos/facts/vlans/__init__.py b/plugins/module_utils/network/exos/facts/vlans/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/facts/vlans/vlans.py b/plugins/module_utils/network/exos/facts/vlans/vlans.py new file mode 100644 index 0000000000..4ba7284747 --- /dev/null +++ b/plugins/module_utils/network/exos/facts/vlans/vlans.py @@ -0,0 +1,89 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The exos vlans fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.vlans.vlans import VlansArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests + + +class VlansFacts(object): + """ The exos vlans fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = VlansArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for vlans + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + + if not data: + request = [{ + "path": "/rest/restconf/data/openconfig-vlan:vlans?depth=5", + "method": "GET" + }] + data = send_requests(self._module, requests=request) + + objs = [] + if data: + for d in data[0]["openconfig-vlan:vlans"]["vlan"]: + obj = self.render_config(self.generated_spec, d["config"]) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('vlans', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['vlans'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + + config["name"] = conf["name"] + config["state"] = "suspend" if conf["status"] == "SUSPENDED" else conf["status"].lower() + config["vlan_id"] = conf["vlan-id"] + + return utils.remove_empties(config) diff --git a/plugins/module_utils/network/exos/utils/__init__.py b/plugins/module_utils/network/exos/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/exos/utils/utils.py b/plugins/module_utils/network/exos/utils/utils.py new file mode 100644 index 0000000000..d40f81714c --- /dev/null +++ b/plugins/module_utils/network/exos/utils/utils.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def search_obj_in_list(item, lst, key): + for o in lst: + if o[key] == item: + return o + return None diff --git a/plugins/module_utils/network/f5/__init__.py b/plugins/module_utils/network/f5/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/f5/iworkflow.py b/plugins/module_utils/network/f5/iworkflow.py new file mode 100644 index 0000000000..d6dda32f6a --- /dev/null +++ b/plugins/module_utils/network/f5/iworkflow.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import time + +try: + from f5.iworkflow import ManagementRoot + from icontrol.exceptions import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +try: + from library.module_utils.network.f5.common import F5BaseClient + from library.module_utils.network.f5.common import F5ModuleError +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5BaseClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + + +class F5Client(F5BaseClient): + @property + def api(self): + exc = None + if self._client: + return self._client + for x in range(0, 3): + try: + server = self.params['provider']['server'] or self.params['server'] + user = self.params['provider']['user'] or self.params['user'] + password = self.params['provider']['password'] or self.params['password'] + server_port = self.params['provider']['server_port'] or self.params['server_port'] or 443 + validate_certs = self.params['provider']['validate_certs'] or self.params['validate_certs'] + + result = ManagementRoot( + server, + user, + password, + port=server_port, + verify=validate_certs, + token='local' + ) + self._client = result + return self._client + except Exception as ex: + exc = ex + time.sleep(3) + error = 'Unable to connect to {0} on port {1}.'.format(self.params['server'], self.params['server_port']) + if exc is not None: + error += ' The reported error was "{0}".'.format(str(exc)) + raise F5ModuleError(error) diff --git a/plugins/module_utils/network/f5/legacy.py b/plugins/module_utils/network/f5/legacy.py new file mode 100644 index 0000000000..bb2189c2bb --- /dev/null +++ b/plugins/module_utils/network/f5/legacy.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +try: + import bigsuds + bigsuds_found = True +except ImportError: + bigsuds_found = False + + +from ansible.module_utils.basic import env_fallback + + +def f5_argument_spec(): + return dict( + server=dict( + type='str', + required=True, + fallback=(env_fallback, ['F5_SERVER']) + ), + user=dict( + type='str', + required=True, + fallback=(env_fallback, ['F5_USER']) + ), + password=dict( + type='str', + aliases=['pass', 'pwd'], + required=True, + no_log=True, + fallback=(env_fallback, ['F5_PASSWORD']) + ), + validate_certs=dict( + default='yes', + type='bool', + fallback=(env_fallback, ['F5_VALIDATE_CERTS']) + ), + server_port=dict( + type='int', + default=443, + fallback=(env_fallback, ['F5_SERVER_PORT']) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + partition=dict( + type='str', + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ) + ) + + +def f5_parse_arguments(module): + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json( + msg="bigsuds does not support verifying certificates with python < 2.7.9." + "Either update python or set validate_certs=False on the task'") + + return ( + module.params['server'], + module.params['user'], + module.params['password'], + module.params['state'], + module.params['partition'], + module.params['validate_certs'], + module.params['server_port'] + ) + + +def bigip_api(bigip, user, password, validate_certs, port=443): + try: + if bigsuds.__version__ >= '1.0.4': + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port) + elif bigsuds.__version__ == '1.0.3': + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs) + else: + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + except TypeError: + # bigsuds < 1.0.3, no verify param + if validate_certs: + # Note: verified we have SSLContext when we parsed params + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + else: + import ssl + if hasattr(ssl, 'SSLContext'): + # Really, you should never do this. It disables certificate + # verification *globally*. But since older bigip libraries + # don't give us a way to toggle verification we need to + # disable it at the global level. + # From https://www.python.org/dev/peps/pep-0476/#id29 + ssl._create_default_https_context = ssl._create_unverified_context + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + + return api + + +# Fully Qualified name (with the partition) +def fq_name(partition, name): + if name is not None and not name.startswith('/'): + return '/%s/%s' % (partition, name) + return name + + +# Fully Qualified name (with partition) for a list +def fq_list_names(partition, list_names): + if list_names is None: + return None + return map(lambda x: fq_name(partition, x), list_names) diff --git a/plugins/module_utils/network/f5/urls.py b/plugins/module_utils/network/f5/urls.py new file mode 100644 index 0000000000..cc4ab57786 --- /dev/null +++ b/plugins/module_utils/network/f5/urls.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import re + +try: + from library.module_utils.network.f5.common import F5ModuleError +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + +_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') +_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') + + +def check_header_validity(header): + """Verifies that header value is a string which doesn't contain + leading whitespace or return characters. + + NOTE: This is a slightly modified version of the original function + taken from the requests library: + http://docs.python-requests.org/en/master/_modules/requests/utils/ + + :param header: string containing ':'. + """ + try: + name, value = header.split(':') + except ValueError: + raise F5ModuleError('Invalid header format: {0}'.format(header)) + if name == '': + raise F5ModuleError('Invalid header format: {0}'.format(header)) + + if isinstance(value, bytes): + pat = _CLEAN_HEADER_REGEX_BYTE + else: + pat = _CLEAN_HEADER_REGEX_STR + try: + if not pat.match(value): + raise F5ModuleError("Invalid return character or leading space in header: %s" % name) + except TypeError: + raise F5ModuleError("Value for header {%s: %s} must be of type str or " + "bytes, not %s" % (name, value, type(value))) + + +def build_service_uri(base_uri, partition, name): + """Build the proper uri for a service resource. + This follows the scheme: + /~~<.app>~ + :param base_uri: str -- base uri of the REST endpoint + :param partition: str -- partition for the service + :param name: str -- name of the service + :returns: str -- uri to access the service + """ + name = name.replace('/', '~') + return '%s~%s~%s.app~%s' % (base_uri, partition, name, name) + + +def parseStats(entry): + if 'description' in entry: + return entry['description'] + elif 'value' in entry: + return entry['value'] + elif 'entries' in entry or 'nestedStats' in entry and 'entries' in entry['nestedStats']: + if 'entries' in entry: + entries = entry['entries'] + else: + entries = entry['nestedStats']['entries'] + result = None + + for name in entries: + entry = entries[name] + if 'https://localhost' in name: + name = name.split('/') + name = name[-1] + if result and isinstance(result, list): + result.append(parseStats(entry)) + elif result and isinstance(result, dict): + result[name] = parseStats(entry) + else: + try: + int(name) + result = list() + result.append(parseStats(entry)) + except ValueError: + result = dict() + result[name] = parseStats(entry) + else: + if '.' in name: + names = name.split('.') + key = names[0] + value = names[1] + if result is None: + # result can be None if this branch is reached first + # + # For example, the mgmt/tm/net/trunk/NAME/stats API + # returns counters.bitsIn before anything else. + result = dict() + result[key] = dict() + elif key not in result: + result[key] = dict() + elif result[key] is None: + result[key] = dict() + result[key][value] = parseStats(entry) + else: + if result and isinstance(result, list): + result.append(parseStats(entry)) + elif result and isinstance(result, dict): + result[name] = parseStats(entry) + else: + try: + int(name) + result = list() + result.append(parseStats(entry)) + except ValueError: + result = dict() + result[name] = parseStats(entry) + return result diff --git a/plugins/module_utils/network/fortianalyzer/__init__.py b/plugins/module_utils/network/fortianalyzer/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/fortianalyzer/common.py b/plugins/module_utils/network/fortianalyzer/common.py new file mode 100644 index 0000000000..546f71aa12 --- /dev/null +++ b/plugins/module_utils/network/fortianalyzer/common.py @@ -0,0 +1,292 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2017 Fortinet, Inc +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +# BEGIN STATIC DATA AND MESSAGES +class FAZMethods: + GET = "get" + SET = "set" + EXEC = "exec" + EXECUTE = "exec" + UPDATE = "update" + ADD = "add" + DELETE = "delete" + REPLACE = "replace" + CLONE = "clone" + MOVE = "move" + + +BASE_HEADERS = { + 'Content-Type': 'application/json', + 'Accept': 'application/json' +} + + +# FAZ RETURN CODES +FAZ_RC = { + "faz_return_codes": { + 0: { + "msg": "OK", + "changed": True, + "stop_on_success": True + }, + -100000: { + "msg": "Module returned without actually running anything. " + "Check parameters, and please contact the authors if needed.", + "failed": True + }, + -2: { + "msg": "Object already exists.", + "skipped": True, + "changed": False, + "good_codes": [0, -2] + }, + -6: { + "msg": "Invalid Url. Sometimes this can happen because the path is mapped to a hostname or object that" + " doesn't exist. Double check your input object parameters." + }, + -3: { + "msg": "Object doesn't exist.", + "skipped": True, + "changed": False, + "good_codes": [0, -3] + }, + -10131: { + "msg": "Object dependency failed. Do all named objects in parameters exist?", + "changed": False, + "skipped": True + }, + -9998: { + "msg": "Duplicate object. Try using mode='set', if using add. STOPPING. Use 'ignore_errors=yes' in playbook" + "to override and mark successful.", + }, + -20042: { + "msg": "Device Unreachable.", + "skipped": True + }, + -10033: { + "msg": "Duplicate object. Try using mode='set', if using add.", + "changed": False, + "skipped": True + }, + -10000: { + "msg": "Duplicate object. Try using mode='set', if using add.", + "changed": False, + "skipped": True + }, + -20010: { + "msg": "Device already added to FortiAnalyzer. Serial number already in use.", + "good_codes": [0, -20010], + "changed": False, + "stop_on_failure": False + }, + -20002: { + "msg": "Invalid Argument -- Does this Device exist on FortiAnalyzer?", + "changed": False, + "skipped": True, + } + } +} + +DEFAULT_RESULT_OBJ = (-100000, {"msg": "Nothing Happened. Check that handle_response is being called!"}) +FAIL_SOCKET_MSG = {"msg": "Socket Path Empty! The persistent connection manager is messed up. " + "Try again in a few moments."} + + +# BEGIN ERROR EXCEPTIONS +class FAZBaseException(Exception): + """Wrapper to catch the unexpected""" + + def __init__(self, msg=None, *args, **kwargs): + if msg is None: + msg = "An exception occurred within the fortianalyzer.py httpapi connection plugin." + super(FAZBaseException, self).__init__(msg, *args) + +# END ERROR CLASSES + + +# BEGIN CLASSES +class FAZCommon(object): + + @staticmethod + def format_request(method, url, *args, **kwargs): + """ + Formats the payload from the module, into a payload the API handler can use. + + :param url: Connection URL to access + :type url: string + :param method: The preferred API Request method (GET, ADD, POST, etc....) + :type method: basestring + :param kwargs: The payload dictionary from the module to be converted. + + :return: Properly formatted dictionary payload for API Request via Connection Plugin. + :rtype: dict + """ + + params = [{"url": url}] + if args: + for arg in args: + params[0].update(arg) + if kwargs: + keylist = list(kwargs) + for k in keylist: + kwargs[k.replace("__", "-")] = kwargs.pop(k) + if method == "get" or method == "clone": + params[0].update(kwargs) + else: + if kwargs.get("data", False): + params[0]["data"] = kwargs["data"] + else: + params[0]["data"] = kwargs + return params + + @staticmethod + def split_comma_strings_into_lists(obj): + """ + Splits a CSV String into a list. Also takes a dictionary, and converts any CSV strings in any key, to a list. + + :param obj: object in CSV format to be parsed. + :type obj: str or dict + + :return: A list containing the CSV items. + :rtype: list + """ + return_obj = () + if isinstance(obj, dict): + if len(obj) > 0: + for k, v in obj.items(): + if isinstance(v, str): + new_list = list() + if "," in v: + new_items = v.split(",") + for item in new_items: + new_list.append(item.strip()) + obj[k] = new_list + return_obj = obj + elif isinstance(obj, str): + return_obj = obj.replace(" ", "").split(",") + + return return_obj + + @staticmethod + def cidr_to_netmask(cidr): + """ + Converts a CIDR Network string to full blown IP/Subnet format in decimal format. + Decided not use IP Address module to keep includes to a minimum. + + :param cidr: String object in CIDR format to be processed + :type cidr: str + + :return: A string object that looks like this "x.x.x.x/y.y.y.y" + :rtype: str + """ + if isinstance(cidr, str): + cidr = int(cidr) + mask = (0xffffffff >> (32 - cidr)) << (32 - cidr) + return (str((0xff000000 & mask) >> 24) + '.' + + str((0x00ff0000 & mask) >> 16) + '.' + + str((0x0000ff00 & mask) >> 8) + '.' + + str((0x000000ff & mask))) + + @staticmethod + def paramgram_child_list_override(list_overrides, paramgram, module): + """ + If a list of items was provided to a "parent" paramgram attribute, the paramgram needs to be rewritten. + The child keys of the desired attribute need to be deleted, and then that "parent" keys' contents is replaced + With the list of items that was provided. + + :param list_overrides: Contains the response from the FortiAnalyzer. + :type list_overrides: list + :param paramgram: Contains the paramgram passed to the modules' local modify function. + :type paramgram: dict + :param module: Contains the Ansible Module Object being used by the module. + :type module: classObject + + :return: A new "paramgram" refactored to allow for multiple entries being added. + :rtype: dict + """ + if len(list_overrides) > 0: + for list_variable in list_overrides: + try: + list_variable = list_variable.replace("-", "_") + override_data = module.params[list_variable] + if override_data: + del paramgram[list_variable] + paramgram[list_variable] = override_data + except BaseException as e: + raise FAZBaseException("Error occurred merging custom lists for the paramgram parent: " + str(e)) + return paramgram + + @staticmethod + def syslog(module, msg): + try: + module.log(msg=msg) + except BaseException: + pass + + +# RECURSIVE FUNCTIONS START +def prepare_dict(obj): + """ + Removes any keys from a dictionary that are only specific to our use in the module. FortiAnalyzer will reject + requests with these empty/None keys in it. + + :param obj: Dictionary object to be processed. + :type obj: dict + + :return: Processed dictionary. + :rtype: dict + """ + + list_of_elems = ["mode", "adom", "host", "username", "password"] + + if isinstance(obj, dict): + obj = dict((key, prepare_dict(value)) for (key, value) in obj.items() if key not in list_of_elems) + return obj + + +def scrub_dict(obj): + """ + Removes any keys from a dictionary that are EMPTY -- this includes parent keys. FortiAnalyzer doesn't + like empty keys in dictionaries + + :param obj: Dictionary object to be processed. + :type obj: dict + + :return: Processed dictionary. + :rtype: dict + """ + + if isinstance(obj, dict): + return dict((k, scrub_dict(v)) for k, v in obj.items() if v and scrub_dict(v)) + else: + return obj diff --git a/plugins/module_utils/network/fortianalyzer/fortianalyzer.py b/plugins/module_utils/network/fortianalyzer/fortianalyzer.py new file mode 100644 index 0000000000..94d83cd7ef --- /dev/null +++ b/plugins/module_utils/network/fortianalyzer/fortianalyzer.py @@ -0,0 +1,477 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2017 Fortinet, Inc +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZ_RC +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZBaseException +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZCommon +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import scrub_dict +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZMethods + + +# ACTIVE BUG WITH OUR DEBUG IMPORT CALL - BECAUSE IT'S UNDER MODULE_UTILITIES +# WHEN module_common.recursive_finder() runs under the module loader, it looks for this namespace debug import +# and because it's not there, it always fails, regardless of it being under a try/catch here. +# we're going to move it to a different namespace. +# # check for debug lib +# try: +# from ansible.module_utils.network.fortianalyzer.fortianalyzer_debug import debug_dump +# HAS_FAZ_DEBUG = True +# except: +# HAS_FAZ_DEBUG = False + + +# BEGIN HANDLER CLASSES +class FortiAnalyzerHandler(object): + def __init__(self, conn, module): + self._conn = conn + self._module = module + self._tools = FAZCommon + self._uses_workspace = None + self._uses_adoms = None + self._locked_adom_list = list() + self._lock_info = None + + self.workspace_check() + if self._uses_workspace: + self.get_lock_info(adom=self._module.paramgram["adom"]) + + def process_request(self, url, datagram, method): + """ + Formats and Runs the API Request via Connection Plugin. Streamlined for use from Modules. + + :param url: Connection URL to access + :type url: string + :param datagram: The prepared payload for the API Request in dictionary format + :type datagram: dict + :param method: The preferred API Request method (GET, ADD, POST, etc....) + :type method: basestring + + :return: Dictionary containing results of the API Request via Connection Plugin. + :rtype: dict + """ + try: + adom = self._module.paramgram["adom"] + if self.uses_workspace and adom not in self._locked_adom_list and method != FAZMethods.GET: + self.lock_adom(adom=adom) + except BaseException as err: + raise FAZBaseException(err) + + data = self._tools.format_request(method, url, **datagram) + response = self._conn.send_request(method, data) + + try: + adom = self._module.paramgram["adom"] + if self.uses_workspace and adom in self._locked_adom_list \ + and response[0] == 0 and method != FAZMethods.GET: + self.commit_changes(adom=adom) + except BaseException as err: + raise FAZBaseException(err) + + # if HAS_FAZ_DEBUG: + # try: + # debug_dump(response, datagram, self._module.paramgram, url, method) + # except BaseException: + # pass + + return response + + def workspace_check(self): + """ + Checks FortiAnalyzer for the use of Workspace mode. + """ + url = "/cli/global/system/global" + data = {"fields": ["workspace-mode", "adom-status"]} + resp_obj = self.process_request(url, data, FAZMethods.GET) + try: + if resp_obj[1]["workspace-mode"] in ["workflow", "normal"]: + self.uses_workspace = True + elif resp_obj[1]["workspace-mode"] == "disabled": + self.uses_workspace = False + except KeyError: + self.uses_workspace = False + except BaseException as err: + raise FAZBaseException(msg="Couldn't determine workspace-mode in the plugin. Error: " + str(err)) + try: + if resp_obj[1]["adom-status"] in [1, "enable"]: + self.uses_adoms = True + else: + self.uses_adoms = False + except KeyError: + self.uses_adoms = False + except BaseException as err: + raise FAZBaseException(msg="Couldn't determine adom-status in the plugin. Error: " + str(err)) + + def run_unlock(self): + """ + Checks for ADOM status, if locked, it will unlock + """ + for adom_locked in self._locked_adom_list: + self.unlock_adom(adom_locked) + + def lock_adom(self, adom=None): + """ + Locks an ADOM for changes + """ + if not adom or adom == "root": + url = "/dvmdb/adom/root/workspace/lock" + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/lock/" + else: + url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom) + datagram = {} + data = self._tools.format_request(FAZMethods.EXEC, url, **datagram) + resp_obj = self._conn.send_request(FAZMethods.EXEC, data) + code = resp_obj[0] + if code == 0 and resp_obj[1]["status"]["message"].lower() == "ok": + self.add_adom_to_lock_list(adom) + else: + lockinfo = self.get_lock_info(adom=adom) + self._module.fail_json(msg=("An error occurred trying to lock the adom. Error: " + + str(resp_obj) + ", LOCK INFO: " + str(lockinfo))) + return resp_obj + + def unlock_adom(self, adom=None): + """ + Unlocks an ADOM after changes + """ + if not adom or adom == "root": + url = "/dvmdb/adom/root/workspace/unlock" + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/unlock/" + else: + url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom) + datagram = {} + data = self._tools.format_request(FAZMethods.EXEC, url, **datagram) + resp_obj = self._conn.send_request(FAZMethods.EXEC, data) + code = resp_obj[0] + if code == 0 and resp_obj[1]["status"]["message"].lower() == "ok": + self.remove_adom_from_lock_list(adom) + else: + self._module.fail_json(msg=("An error occurred trying to unlock the adom. Error: " + str(resp_obj))) + return resp_obj + + def get_lock_info(self, adom=None): + """ + Gets ADOM lock info so it can be displayed with the error messages. Or if determined to be locked by ansible + for some reason, then unlock it. + """ + if not adom or adom == "root": + url = "/dvmdb/adom/root/workspace/lockinfo" + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/lockinfo/" + else: + url = "/dvmdb/adom/{adom}/workspace/lockinfo/".format(adom=adom) + datagram = {} + data = self._tools.format_request(FAZMethods.GET, url, **datagram) + resp_obj = self._conn.send_request(FAZMethods.GET, data) + code = resp_obj[0] + if code != 0: + self._module.fail_json(msg=("An error occurred trying to get the ADOM Lock Info. Error: " + str(resp_obj))) + elif code == 0: + self._lock_info = resp_obj[1] + return resp_obj + + def commit_changes(self, adom=None, aux=False): + """ + Commits changes to an ADOM + """ + if not adom or adom == "root": + url = "/dvmdb/adom/root/workspace/commit" + else: + if aux: + url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom) + else: + if adom.lower() == "global": + url = "/dvmdb/global/workspace/commit/" + else: + url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom) + datagram = {} + data = self._tools.format_request(FAZMethods.EXEC, url, **datagram) + resp_obj = self._conn.send_request(FAZMethods.EXEC, data) + code = resp_obj[0] + if code != 0: + self._module.fail_json(msg=("An error occurred trying to commit changes to the adom. Error: " + + str(resp_obj))) + + def govern_response(self, module, results, msg=None, good_codes=None, + stop_on_fail=None, stop_on_success=None, skipped=None, + changed=None, unreachable=None, failed=None, success=None, changed_if_success=None, + ansible_facts=None): + """ + This function will attempt to apply default values to canned responses from FortiAnalyzer we know of. + This saves time, and turns the response in the module into a "one-liner", while still giving us... + the flexibility to directly use return_response in modules if we have too. This function saves repeated code. + + :param module: The Ansible Module CLASS object, used to run fail/exit json + :type module: object + :param msg: An overridable custom message from the module that called this. + :type msg: string + :param results: A dictionary object containing an API call results + :type results: dict + :param good_codes: A list of exit codes considered successful from FortiAnalyzer + :type good_codes: list + :param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true) + :type stop_on_fail: boolean + :param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false) + :type stop_on_success: boolean + :param changed: If True, tells Ansible that object was changed (default: false) + :type skipped: boolean + :param skipped: If True, tells Ansible that object was skipped (default: false) + :type skipped: boolean + :param unreachable: If True, tells Ansible that object was unreachable (default: false) + :type unreachable: boolean + :param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false) + :type unreachable: boolean + :param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false) + :type unreachable: boolean + :param changed_if_success: If True, defaults to changed if successful if you specify or not" + :type changed_if_success: boolean + :param ansible_facts: A prepared dictionary of ansible facts from the execution. + :type ansible_facts: dict + """ + if module is None and results is None: + raise FAZBaseException("govern_response() was called without a module and/or results tuple! Fix!") + # Get the Return code from results + try: + rc = results[0] + except BaseException: + raise FAZBaseException("govern_response() was called without the return code at results[0]") + + # init a few items + rc_data = None + + # Get the default values for the said return code. + try: + rc_codes = FAZ_RC.get('faz_return_codes') + rc_data = rc_codes.get(rc) + except BaseException: + pass + + if not rc_data: + rc_data = {} + # ONLY add to overrides if not none -- This is very important that the keys aren't added at this stage + # if they are empty. And there aren't that many, so let's just do a few if then statements. + if good_codes is not None: + rc_data["good_codes"] = good_codes + if stop_on_fail is not None: + rc_data["stop_on_fail"] = stop_on_fail + if stop_on_success is not None: + rc_data["stop_on_success"] = stop_on_success + if skipped is not None: + rc_data["skipped"] = skipped + if changed is not None: + rc_data["changed"] = changed + if unreachable is not None: + rc_data["unreachable"] = unreachable + if failed is not None: + rc_data["failed"] = failed + if success is not None: + rc_data["success"] = success + if changed_if_success is not None: + rc_data["changed_if_success"] = changed_if_success + if results is not None: + rc_data["results"] = results + if msg is not None: + rc_data["msg"] = msg + if ansible_facts is None: + rc_data["ansible_facts"] = {} + else: + rc_data["ansible_facts"] = ansible_facts + + return self.return_response(module=module, + results=results, + msg=rc_data.get("msg", "NULL"), + good_codes=rc_data.get("good_codes", (0,)), + stop_on_fail=rc_data.get("stop_on_fail", True), + stop_on_success=rc_data.get("stop_on_success", False), + skipped=rc_data.get("skipped", False), + changed=rc_data.get("changed", False), + changed_if_success=rc_data.get("changed_if_success", False), + unreachable=rc_data.get("unreachable", False), + failed=rc_data.get("failed", False), + success=rc_data.get("success", False), + ansible_facts=rc_data.get("ansible_facts", dict())) + + def return_response(self, module, results, msg="NULL", good_codes=(0,), + stop_on_fail=True, stop_on_success=False, skipped=False, + changed=False, unreachable=False, failed=False, success=False, changed_if_success=True, + ansible_facts=()): + """ + This function controls the logout and error reporting after an method or function runs. The exit_json for + ansible comes from logic within this function. If this function returns just the msg, it means to continue + execution on the playbook. It is called from the ansible module, or from the self.govern_response function. + + :param module: The Ansible Module CLASS object, used to run fail/exit json + :type module: object + :param msg: An overridable custom message from the module that called this. + :type msg: string + :param results: A dictionary object containing an API call results + :type results: dict + :param good_codes: A list of exit codes considered successful from FortiAnalyzer + :type good_codes: list + :param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true) + :type stop_on_fail: boolean + :param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false) + :type stop_on_success: boolean + :param changed: If True, tells Ansible that object was changed (default: false) + :type skipped: boolean + :param skipped: If True, tells Ansible that object was skipped (default: false) + :type skipped: boolean + :param unreachable: If True, tells Ansible that object was unreachable (default: false) + :type unreachable: boolean + :param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false) + :type unreachable: boolean + :param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false) + :type unreachable: boolean + :param changed_if_success: If True, defaults to changed if successful if you specify or not" + :type changed_if_success: boolean + :param ansible_facts: A prepared dictionary of ansible facts from the execution. + :type ansible_facts: dict + + :return: A string object that contains an error message + :rtype: str + """ + + # VALIDATION ERROR + if (len(results) == 0) or (failed and success) or (changed and unreachable): + module.exit_json(msg="Handle_response was called with no results, or conflicting failed/success or " + "changed/unreachable parameters. Fix the exit code on module. " + "Generic Failure", failed=True) + + # IDENTIFY SUCCESS/FAIL IF NOT DEFINED + if not failed and not success: + if len(results) > 0: + if results[0] not in good_codes: + failed = True + elif results[0] in good_codes: + success = True + + if len(results) > 0: + # IF NO MESSAGE WAS SUPPLIED, GET IT FROM THE RESULTS, IF THAT DOESN'T WORK, THEN WRITE AN ERROR MESSAGE + if msg == "NULL": + try: + msg = results[1]['status']['message'] + except BaseException: + msg = "No status message returned at results[1][status][message], " \ + "and none supplied to msg parameter for handle_response." + + if failed: + # BECAUSE SKIPPED/FAILED WILL OFTEN OCCUR ON CODES THAT DON'T GET INCLUDED, THEY ARE CONSIDERED FAILURES + # HOWEVER, THEY ARE MUTUALLY EXCLUSIVE, SO IF IT IS MARKED SKIPPED OR UNREACHABLE BY THE MODULE LOGIC + # THEN REMOVE THE FAILED FLAG SO IT DOESN'T OVERRIDE THE DESIRED STATUS OF SKIPPED OR UNREACHABLE. + if failed and skipped: + failed = False + if failed and unreachable: + failed = False + if stop_on_fail: + if self._uses_workspace: + try: + self.run_unlock() + except BaseException as err: + raise FAZBaseException(msg=("Couldn't unlock ADOM! Error: " + str(err))) + module.exit_json(msg=msg, failed=failed, changed=changed, unreachable=unreachable, skipped=skipped, + results=results[1], ansible_facts=ansible_facts, rc=results[0], + invocation={"module_args": ansible_facts["ansible_params"]}) + elif success: + if changed_if_success: + changed = True + success = False + if stop_on_success: + if self._uses_workspace: + try: + self.run_unlock() + except BaseException as err: + raise FAZBaseException(msg=("Couldn't unlock ADOM! Error: " + str(err))) + module.exit_json(msg=msg, success=success, changed=changed, unreachable=unreachable, + skipped=skipped, results=results[1], ansible_facts=ansible_facts, rc=results[0], + invocation={"module_args": ansible_facts["ansible_params"]}) + return msg + + @staticmethod + def construct_ansible_facts(response, ansible_params, paramgram, *args, **kwargs): + """ + Constructs a dictionary to return to ansible facts, containing various information about the execution. + + :param response: Contains the response from the FortiAnalyzer. + :type response: dict + :param ansible_params: Contains the parameters Ansible was called with. + :type ansible_params: dict + :param paramgram: Contains the paramgram passed to the modules' local modify function. + :type paramgram: dict + :param args: Free-form arguments that could be added. + :param kwargs: Free-form keyword arguments that could be added. + + :return: A dictionary containing lots of information to append to Ansible Facts. + :rtype: dict + """ + + facts = { + "response": response, + "ansible_params": scrub_dict(ansible_params), + "paramgram": scrub_dict(paramgram), + } + + if args: + facts["custom_args"] = args + if kwargs: + facts.update(kwargs) + + return facts + + @property + def uses_workspace(self): + return self._uses_workspace + + @uses_workspace.setter + def uses_workspace(self, val): + self._uses_workspace = val + + @property + def uses_adoms(self): + return self._uses_adoms + + @uses_adoms.setter + def uses_adoms(self, val): + self._uses_adoms = val + + def add_adom_to_lock_list(self, adom): + if adom not in self._locked_adom_list: + self._locked_adom_list.append(adom) + + def remove_adom_from_lock_list(self, adom): + if adom in self._locked_adom_list: + self._locked_adom_list.remove(adom) diff --git a/plugins/module_utils/network/ftd/__init__.py b/plugins/module_utils/network/ftd/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/ftd/common.py b/plugins/module_utils/network/ftd/common.py new file mode 100644 index 0000000000..de3f459d5b --- /dev/null +++ b/plugins/module_utils/network/ftd/common.py @@ -0,0 +1,238 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.common.collections import is_string +from ansible.module_utils.six import iteritems + +INVALID_IDENTIFIER_SYMBOLS = r'[^a-zA-Z0-9_]' + +IDENTITY_PROPERTIES = ['id', 'version', 'ruleId'] +NON_COMPARABLE_PROPERTIES = IDENTITY_PROPERTIES + ['isSystemDefined', 'links', 'token', 'rulePosition'] + + +class HTTPMethod: + GET = 'get' + POST = 'post' + PUT = 'put' + DELETE = 'delete' + + +class ResponseParams: + SUCCESS = 'success' + STATUS_CODE = 'status_code' + RESPONSE = 'response' + + +class FtdConfigurationError(Exception): + def __init__(self, msg, obj=None): + super(FtdConfigurationError, self).__init__(msg) + self.msg = msg + self.obj = obj + + +class FtdServerError(Exception): + def __init__(self, response, code): + super(FtdServerError, self).__init__(response) + self.response = response + self.code = code + + +class FtdUnexpectedResponse(Exception): + """The exception to be raised in case of unexpected responses from 3d parties.""" + pass + + +def construct_ansible_facts(response, params): + facts = dict() + if response: + response_body = response['items'] if 'items' in response else response + if params.get('register_as'): + facts[params['register_as']] = response_body + elif type(response_body) is dict and response_body.get('name') and response_body.get('type'): + object_name = re.sub(INVALID_IDENTIFIER_SYMBOLS, '_', response_body['name'].lower()) + fact_name = '%s_%s' % (response_body['type'], object_name) + facts[fact_name] = response_body + return facts + + +def copy_identity_properties(source_obj, dest_obj): + for property_name in IDENTITY_PROPERTIES: + if property_name in source_obj: + dest_obj[property_name] = source_obj[property_name] + return dest_obj + + +def is_object_ref(d): + """ + Checks if a dictionary is a reference object. The dictionary is considered to be a + reference object when it contains non-empty 'id' and 'type' fields. + + :type d: dict + :return: True if passed dictionary is a reference object, otherwise False + """ + has_id = 'id' in d.keys() and d['id'] + has_type = 'type' in d.keys() and d['type'] + return has_id and has_type + + +def equal_object_refs(d1, d2): + """ + Checks whether two references point to the same object. + + :type d1: dict + :type d2: dict + :return: True if passed references point to the same object, otherwise False + """ + have_equal_ids = d1['id'] == d2['id'] + have_equal_types = d1['type'] == d2['type'] + return have_equal_ids and have_equal_types + + +def equal_lists(l1, l2): + """ + Checks whether two lists are equal. The order of elements in the arrays is important. + + :type l1: list + :type l2: list + :return: True if passed lists, their elements and order of elements are equal. Otherwise, returns False. + """ + if len(l1) != len(l2): + return False + + for v1, v2 in zip(l1, l2): + if not equal_values(v1, v2): + return False + + return True + + +def equal_dicts(d1, d2, compare_by_reference=True): + """ + Checks whether two dictionaries are equal. If `compare_by_reference` is set to True, dictionaries referencing + objects are compared using `equal_object_refs` method. Otherwise, every key and value is checked. + + :type d1: dict + :type d2: dict + :param compare_by_reference: if True, dictionaries referencing objects are compared using `equal_object_refs` method + :return: True if passed dicts are equal. Otherwise, returns False. + """ + if compare_by_reference and is_object_ref(d1) and is_object_ref(d2): + return equal_object_refs(d1, d2) + + if len(d1) != len(d2): + return False + + for key, v1 in d1.items(): + if key not in d2: + return False + + v2 = d2[key] + if not equal_values(v1, v2): + return False + + return True + + +def equal_values(v1, v2): + """ + Checks whether types and content of two values are the same. In case of complex objects, the method might be + called recursively. + + :param v1: first value + :param v2: second value + :return: True if types and content of passed values are equal. Otherwise, returns False. + :rtype: bool + """ + + # string-like values might have same text but different types, so checking them separately + if is_string(v1) and is_string(v2): + return to_text(v1) == to_text(v2) + + if type(v1) != type(v2): + return False + value_type = type(v1) + + if value_type == list: + return equal_lists(v1, v2) + elif value_type == dict: + return equal_dicts(v1, v2) + else: + return v1 == v2 + + +def equal_objects(d1, d2): + """ + Checks whether two objects are equal. Ignores special object properties (e.g. 'id', 'version') and + properties with None and empty values. In case properties contains a reference to the other object, + only object identities (ids and types) are checked. Also, if an array field contains multiple references + to the same object, duplicates are ignored when comparing objects. + + :type d1: dict + :type d2: dict + :return: True if passed objects and their properties are equal. Otherwise, returns False. + """ + + def prepare_data_for_comparison(d): + d = dict((k, d[k]) for k in d.keys() if k not in NON_COMPARABLE_PROPERTIES and d[k]) + d = delete_ref_duplicates(d) + return d + + d1 = prepare_data_for_comparison(d1) + d2 = prepare_data_for_comparison(d2) + return equal_dicts(d1, d2, compare_by_reference=False) + + +def delete_ref_duplicates(d): + """ + Removes reference duplicates from array fields: if an array contains multiple items and some of + them refer to the same object, only unique references are preserved (duplicates are removed). + + :param d: dict with data + :type d: dict + :return: dict without reference duplicates + """ + + def delete_ref_duplicates_from_list(refs): + if all(type(i) == dict and is_object_ref(i) for i in refs): + unique_refs = set() + unique_list = list() + for i in refs: + key = (i['id'], i['type']) + if key not in unique_refs: + unique_refs.add(key) + unique_list.append(i) + + return list(unique_list) + + else: + return refs + + if not d: + return d + + modified_d = {} + for k, v in iteritems(d): + if type(v) == list: + modified_d[k] = delete_ref_duplicates_from_list(v) + elif type(v) == dict: + modified_d[k] = delete_ref_duplicates(v) + else: + modified_d[k] = v + return modified_d diff --git a/plugins/module_utils/network/ftd/configuration.py b/plugins/module_utils/network/ftd/configuration.py new file mode 100644 index 0000000000..d8c92758f5 --- /dev/null +++ b/plugins/module_utils/network/ftd/configuration.py @@ -0,0 +1,565 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +import copy +from functools import partial + +from ansible_collections.community.general.plugins.module_utils.network.ftd.common import HTTPMethod, equal_objects, FtdConfigurationError, \ + FtdServerError, ResponseParams, copy_identity_properties, FtdUnexpectedResponse +from ansible_collections.community.general.plugins.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError +from ansible.module_utils.six import iteritems + +DEFAULT_PAGE_SIZE = 10 +DEFAULT_OFFSET = 0 + +UNPROCESSABLE_ENTITY_STATUS = 422 +INVALID_UUID_ERROR_MESSAGE = "Validation failed due to an invalid UUID" +DUPLICATE_NAME_ERROR_MESSAGE = "Validation failed due to a duplicate name" + +MULTIPLE_DUPLICATES_FOUND_ERROR = ( + "Multiple objects matching specified filters are found. " + "Please, define filters more precisely to match one object exactly." +) +DUPLICATE_ERROR = ( + "Cannot add a new object. " + "An object with the same name but different parameters already exists." +) +ADD_OPERATION_NOT_SUPPORTED_ERROR = ( + "Cannot add a new object while executing an upsert request. " + "Creation of objects with this type is not supported." +) + +PATH_PARAMS_FOR_DEFAULT_OBJ = {'objId': 'default'} + + +class OperationNamePrefix: + ADD = 'add' + EDIT = 'edit' + GET = 'get' + DELETE = 'delete' + UPSERT = 'upsert' + + +class QueryParams: + FILTER = 'filter' + + +class ParamName: + QUERY_PARAMS = 'query_params' + PATH_PARAMS = 'path_params' + DATA = 'data' + FILTERS = 'filters' + + +class CheckModeException(Exception): + pass + + +class FtdInvalidOperationNameError(Exception): + def __init__(self, operation_name): + super(FtdInvalidOperationNameError, self).__init__(operation_name) + self.operation_name = operation_name + + +class OperationChecker(object): + + @classmethod + def is_add_operation(cls, operation_name, operation_spec): + """ + Check if operation defined with 'operation_name' is add object operation according to 'operation_spec'. + + :param operation_name: name of the operation being called by the user + :type operation_name: str + :param operation_spec: specification of the operation being called by the user + :type operation_spec: dict + :return: True if the called operation is add object operation, otherwise False + :rtype: bool + """ + # Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method + return operation_name.startswith(OperationNamePrefix.ADD) and is_post_request(operation_spec) + + @classmethod + def is_edit_operation(cls, operation_name, operation_spec): + """ + Check if operation defined with 'operation_name' is edit object operation according to 'operation_spec'. + + :param operation_name: name of the operation being called by the user + :type operation_name: str + :param operation_spec: specification of the operation being called by the user + :type operation_spec: dict + :return: True if the called operation is edit object operation, otherwise False + :rtype: bool + """ + # Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method + return operation_name.startswith(OperationNamePrefix.EDIT) and is_put_request(operation_spec) + + @classmethod + def is_delete_operation(cls, operation_name, operation_spec): + """ + Check if operation defined with 'operation_name' is delete object operation according to 'operation_spec'. + + :param operation_name: name of the operation being called by the user + :type operation_name: str + :param operation_spec: specification of the operation being called by the user + :type operation_spec: dict + :return: True if the called operation is delete object operation, otherwise False + :rtype: bool + """ + # Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method + return operation_name.startswith(OperationNamePrefix.DELETE) \ + and operation_spec[OperationField.METHOD] == HTTPMethod.DELETE + + @classmethod + def is_get_list_operation(cls, operation_name, operation_spec): + """ + Check if operation defined with 'operation_name' is get list of objects operation according to 'operation_spec'. + + :param operation_name: name of the operation being called by the user + :type operation_name: str + :param operation_spec: specification of the operation being called by the user + :type operation_spec: dict + :return: True if the called operation is get a list of objects operation, otherwise False + :rtype: bool + """ + return operation_spec[OperationField.METHOD] == HTTPMethod.GET \ + and operation_spec[OperationField.RETURN_MULTIPLE_ITEMS] + + @classmethod + def is_get_operation(cls, operation_name, operation_spec): + """ + Check if operation defined with 'operation_name' is get objects operation according to 'operation_spec'. + + :param operation_name: name of the operation being called by the user + :type operation_name: str + :param operation_spec: specification of the operation being called by the user + :type operation_spec: dict + :return: True if the called operation is get object operation, otherwise False + :rtype: bool + """ + return operation_spec[OperationField.METHOD] == HTTPMethod.GET \ + and not operation_spec[OperationField.RETURN_MULTIPLE_ITEMS] + + @classmethod + def is_upsert_operation(cls, operation_name): + """ + Check if operation defined with 'operation_name' is upsert objects operation according to 'operation_name'. + + :param operation_name: name of the operation being called by the user + :type operation_name: str + :return: True if the called operation is upsert object operation, otherwise False + :rtype: bool + """ + return operation_name.startswith(OperationNamePrefix.UPSERT) + + @classmethod + def is_find_by_filter_operation(cls, operation_name, params, operation_spec): + """ + Checks whether the called operation is 'find by filter'. This operation fetches all objects and finds + the matching ones by the given filter. As filtering is done on the client side, this operation should be used + only when selected filters are not implemented on the server side. + + :param operation_name: name of the operation being called by the user + :type operation_name: str + :param operation_spec: specification of the operation being called by the user + :type operation_spec: dict + :param params: params - params should contain 'filters' + :return: True if the called operation is find by filter, otherwise False + :rtype: bool + """ + is_get_list = cls.is_get_list_operation(operation_name, operation_spec) + return is_get_list and ParamName.FILTERS in params and params[ParamName.FILTERS] + + @classmethod + def is_upsert_operation_supported(cls, operations): + """ + Checks if all operations required for upsert object operation are defined in 'operations'. + + :param operations: specification of the operations supported by model + :type operations: dict + :return: True if all criteria required to provide requested called operation are satisfied, otherwise False + :rtype: bool + """ + has_edit_op = next((name for name, spec in iteritems(operations) if cls.is_edit_operation(name, spec)), None) + has_get_list_op = next((name for name, spec in iteritems(operations) + if cls.is_get_list_operation(name, spec)), None) + return has_edit_op and has_get_list_op + + +class BaseConfigurationResource(object): + + def __init__(self, conn, check_mode=False): + self._conn = conn + self.config_changed = False + self._operation_spec_cache = {} + self._models_operations_specs_cache = {} + self._check_mode = check_mode + self._operation_checker = OperationChecker + self._system_info = None + + def execute_operation(self, op_name, params): + """ + Allow user request execution of simple operations(natively supported by API provider) as well as complex + operations(operations that are implemented as a set of simple operations). + + :param op_name: name of the operation being called by the user + :type op_name: str + :param params: definition of the params that operation should be executed with + :type params: dict + :return: Result of the operation being executed + :rtype: dict + """ + if self._operation_checker.is_upsert_operation(op_name): + return self.upsert_object(op_name, params) + else: + return self.crud_operation(op_name, params) + + def crud_operation(self, op_name, params): + """ + Allow user request execution of simple operations(natively supported by API provider) only. + + :param op_name: name of the operation being called by the user + :type op_name: str + :param params: definition of the params that operation should be executed with + :type params: dict + :return: Result of the operation being executed + :rtype: dict + """ + op_spec = self.get_operation_spec(op_name) + if op_spec is None: + raise FtdInvalidOperationNameError(op_name) + + if self._operation_checker.is_add_operation(op_name, op_spec): + resp = self.add_object(op_name, params) + elif self._operation_checker.is_edit_operation(op_name, op_spec): + resp = self.edit_object(op_name, params) + elif self._operation_checker.is_delete_operation(op_name, op_spec): + resp = self.delete_object(op_name, params) + elif self._operation_checker.is_find_by_filter_operation(op_name, params, op_spec): + resp = list(self.get_objects_by_filter(op_name, params)) + else: + resp = self.send_general_request(op_name, params) + return resp + + def get_operation_spec(self, operation_name): + if operation_name not in self._operation_spec_cache: + self._operation_spec_cache[operation_name] = self._conn.get_operation_spec(operation_name) + return self._operation_spec_cache[operation_name] + + def get_operation_specs_by_model_name(self, model_name): + if model_name not in self._models_operations_specs_cache: + model_op_specs = self._conn.get_operation_specs_by_model_name(model_name) + self._models_operations_specs_cache[model_name] = model_op_specs + for op_name, op_spec in iteritems(model_op_specs): + self._operation_spec_cache.setdefault(op_name, op_spec) + return self._models_operations_specs_cache[model_name] + + def get_objects_by_filter(self, operation_name, params): + + def match_filters(filter_params, obj): + for k, v in iteritems(filter_params): + if k not in obj or obj[k] != v: + return False + return True + + dummy, query_params, path_params = _get_user_params(params) + # copy required params to avoid mutation of passed `params` dict + url_params = {ParamName.QUERY_PARAMS: dict(query_params), ParamName.PATH_PARAMS: dict(path_params)} + + filters = params.get(ParamName.FILTERS) or {} + if QueryParams.FILTER not in url_params[ParamName.QUERY_PARAMS] and 'name' in filters: + # most endpoints only support filtering by name, so remaining `filters` are applied on returned objects + url_params[ParamName.QUERY_PARAMS][QueryParams.FILTER] = self._stringify_name_filter(filters) + + item_generator = iterate_over_pageable_resource( + partial(self.send_general_request, operation_name=operation_name), url_params + ) + return (i for i in item_generator if match_filters(filters, i)) + + def _stringify_name_filter(self, filters): + build_version = self.get_build_version() + if build_version >= '6.4.0': + return "fts~%s" % filters['name'] + return "name:%s" % filters['name'] + + def _fetch_system_info(self): + if not self._system_info: + params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ} + self._system_info = self.send_general_request('getSystemInformation', params) + + return self._system_info + + def get_build_version(self): + system_info = self._fetch_system_info() + return system_info['databaseInfo']['buildVersion'] + + def add_object(self, operation_name, params): + def is_duplicate_name_error(err): + return err.code == UNPROCESSABLE_ENTITY_STATUS and DUPLICATE_NAME_ERROR_MESSAGE in str(err) + + try: + return self.send_general_request(operation_name, params) + except FtdServerError as e: + if is_duplicate_name_error(e): + return self._check_equality_with_existing_object(operation_name, params, e) + else: + raise e + + def _check_equality_with_existing_object(self, operation_name, params, e): + """ + Looks for an existing object that caused "object duplicate" error and + checks whether it corresponds to the one specified in `params`. + + In case a single object is found and it is equal to one we are trying + to create, the existing object is returned. + + When the existing object is not equal to the object being created or + several objects are returned, an exception is raised. + """ + model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME] + existing_obj = self._find_object_matching_params(model_name, params) + + if existing_obj is not None: + if equal_objects(existing_obj, params[ParamName.DATA]): + return existing_obj + else: + raise FtdConfigurationError(DUPLICATE_ERROR, existing_obj) + + raise e + + def _find_object_matching_params(self, model_name, params): + get_list_operation = self._find_get_list_operation(model_name) + if not get_list_operation: + return None + + data = params[ParamName.DATA] + if not params.get(ParamName.FILTERS): + params[ParamName.FILTERS] = {'name': data['name']} + + obj = None + filtered_objs = self.get_objects_by_filter(get_list_operation, params) + + for i, obj in enumerate(filtered_objs): + if i > 0: + raise FtdConfigurationError(MULTIPLE_DUPLICATES_FOUND_ERROR) + obj = obj + + return obj + + def _find_get_list_operation(self, model_name): + operations = self.get_operation_specs_by_model_name(model_name) or {} + return next(( + op for op, op_spec in operations.items() + if self._operation_checker.is_get_list_operation(op, op_spec)), None) + + def _find_get_operation(self, model_name): + operations = self.get_operation_specs_by_model_name(model_name) or {} + return next(( + op for op, op_spec in operations.items() + if self._operation_checker.is_get_operation(op, op_spec)), None) + + def delete_object(self, operation_name, params): + def is_invalid_uuid_error(err): + return err.code == UNPROCESSABLE_ENTITY_STATUS and INVALID_UUID_ERROR_MESSAGE in str(err) + + try: + return self.send_general_request(operation_name, params) + except FtdServerError as e: + if is_invalid_uuid_error(e): + return {'status': 'Referenced object does not exist'} + else: + raise e + + def edit_object(self, operation_name, params): + data, dummy, path_params = _get_user_params(params) + + model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME] + get_operation = self._find_get_operation(model_name) + + if get_operation: + existing_object = self.send_general_request(get_operation, {ParamName.PATH_PARAMS: path_params}) + if not existing_object: + raise FtdConfigurationError('Referenced object does not exist') + elif equal_objects(existing_object, data): + return existing_object + + return self.send_general_request(operation_name, params) + + def send_general_request(self, operation_name, params): + def stop_if_check_mode(): + if self._check_mode: + raise CheckModeException() + + self.validate_params(operation_name, params) + stop_if_check_mode() + + data, query_params, path_params = _get_user_params(params) + op_spec = self.get_operation_spec(operation_name) + url, method = op_spec[OperationField.URL], op_spec[OperationField.METHOD] + + return self._send_request(url, method, data, path_params, query_params) + + def _send_request(self, url_path, http_method, body_params=None, path_params=None, query_params=None): + def raise_for_failure(resp): + if not resp[ResponseParams.SUCCESS]: + raise FtdServerError(resp[ResponseParams.RESPONSE], resp[ResponseParams.STATUS_CODE]) + + response = self._conn.send_request(url_path=url_path, http_method=http_method, body_params=body_params, + path_params=path_params, query_params=query_params) + raise_for_failure(response) + if http_method != HTTPMethod.GET: + self.config_changed = True + return response[ResponseParams.RESPONSE] + + def validate_params(self, operation_name, params): + report = {} + op_spec = self.get_operation_spec(operation_name) + data, query_params, path_params = _get_user_params(params) + + def validate(validation_method, field_name, user_params): + key = 'Invalid %s provided' % field_name + try: + is_valid, validation_report = validation_method(operation_name, user_params) + if not is_valid: + report[key] = validation_report + except Exception as e: + report[key] = str(e) + return report + + validate(self._conn.validate_query_params, ParamName.QUERY_PARAMS, query_params) + validate(self._conn.validate_path_params, ParamName.PATH_PARAMS, path_params) + if is_post_request(op_spec) or is_put_request(op_spec): + validate(self._conn.validate_data, ParamName.DATA, data) + + if report: + raise ValidationError(report) + + @staticmethod + def _get_operation_name(checker, operations): + return next((op_name for op_name, op_spec in iteritems(operations) if checker(op_name, op_spec)), None) + + def _add_upserted_object(self, model_operations, params): + add_op_name = self._get_operation_name(self._operation_checker.is_add_operation, model_operations) + if not add_op_name: + raise FtdConfigurationError(ADD_OPERATION_NOT_SUPPORTED_ERROR) + return self.add_object(add_op_name, params) + + def _edit_upserted_object(self, model_operations, existing_object, params): + edit_op_name = self._get_operation_name(self._operation_checker.is_edit_operation, model_operations) + _set_default(params, 'path_params', {}) + _set_default(params, 'data', {}) + + params['path_params']['objId'] = existing_object['id'] + copy_identity_properties(existing_object, params['data']) + return self.edit_object(edit_op_name, params) + + def upsert_object(self, op_name, params): + """ + Updates an object if it already exists, or tries to create a new one if there is no + such object. If multiple objects match filter criteria, or add operation is not supported, + the exception is raised. + + :param op_name: upsert operation name + :type op_name: str + :param params: params that upsert operation should be executed with + :type params: dict + :return: upserted object representation + :rtype: dict + """ + + def extract_and_validate_model(): + model = op_name[len(OperationNamePrefix.UPSERT):] + if not self._conn.get_model_spec(model): + raise FtdInvalidOperationNameError(op_name) + return model + + model_name = extract_and_validate_model() + model_operations = self.get_operation_specs_by_model_name(model_name) + + if not self._operation_checker.is_upsert_operation_supported(model_operations): + raise FtdInvalidOperationNameError(op_name) + + existing_obj = self._find_object_matching_params(model_name, params) + if existing_obj: + equal_to_existing_obj = equal_objects(existing_obj, params[ParamName.DATA]) + return existing_obj if equal_to_existing_obj \ + else self._edit_upserted_object(model_operations, existing_obj, params) + else: + return self._add_upserted_object(model_operations, params) + + +def _set_default(params, field_name, value): + if field_name not in params or params[field_name] is None: + params[field_name] = value + + +def is_post_request(operation_spec): + return operation_spec[OperationField.METHOD] == HTTPMethod.POST + + +def is_put_request(operation_spec): + return operation_spec[OperationField.METHOD] == HTTPMethod.PUT + + +def _get_user_params(params): + return params.get(ParamName.DATA) or {}, params.get(ParamName.QUERY_PARAMS) or {}, params.get( + ParamName.PATH_PARAMS) or {} + + +def iterate_over_pageable_resource(resource_func, params): + """ + A generator function that iterates over a resource that supports pagination and lazily returns present items + one by one. + + :param resource_func: function that receives `params` argument and returns a page of objects + :type resource_func: callable + :param params: initial dictionary of parameters that will be passed to the resource_func. + Should contain `query_params` inside. + :type params: dict + :return: an iterator containing returned items + :rtype: iterator of dict + """ + # creating a copy not to mutate passed dict + params = copy.deepcopy(params) + params[ParamName.QUERY_PARAMS].setdefault('limit', DEFAULT_PAGE_SIZE) + params[ParamName.QUERY_PARAMS].setdefault('offset', DEFAULT_OFFSET) + limit = int(params[ParamName.QUERY_PARAMS]['limit']) + + def received_less_items_than_requested(items_in_response, items_expected): + if items_in_response == items_expected: + return False + elif items_in_response < items_expected: + return True + + raise FtdUnexpectedResponse( + "Get List of Objects Response from the server contains more objects than requested. " + "There are {0} item(s) in the response while {1} was(ere) requested".format( + items_in_response, items_expected) + ) + + while True: + result = resource_func(params=params) + + for item in result['items']: + yield item + + if received_less_items_than_requested(len(result['items']), limit): + break + + # creating a copy not to mutate existing dict + params = copy.deepcopy(params) + query_params = params[ParamName.QUERY_PARAMS] + query_params['offset'] = int(query_params['offset']) + limit diff --git a/plugins/module_utils/network/ftd/device.py b/plugins/module_utils/network/ftd/device.py new file mode 100644 index 0000000000..47b0eb3a43 --- /dev/null +++ b/plugins/module_utils/network/ftd/device.py @@ -0,0 +1,138 @@ +# Copyright (c) 2019 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from ansible.module_utils.six.moves.urllib.parse import urlparse + +try: + from kick.device2.ftd5500x.actions.ftd5500x import Ftd5500x + from kick.device2.kp.actions import Kp + + HAS_KICK = True +except ImportError: + HAS_KICK = False + + +def assert_kick_is_installed(module): + if not HAS_KICK: + module.fail_json(msg='Firepower-kickstart library is required to run this module. ' + 'Please, install the library with `pip install firepower-kickstart` ' + 'command and run the playbook again.') + + +class FtdModel: + FTD_ASA5506_X = 'Cisco ASA5506-X Threat Defense' + FTD_ASA5508_X = 'Cisco ASA5508-X Threat Defense' + FTD_ASA5516_X = 'Cisco ASA5516-X Threat Defense' + + FTD_2110 = 'Cisco Firepower 2110 Threat Defense' + FTD_2120 = 'Cisco Firepower 2120 Threat Defense' + FTD_2130 = 'Cisco Firepower 2130 Threat Defense' + FTD_2140 = 'Cisco Firepower 2140 Threat Defense' + + @classmethod + def supported_models(cls): + return [getattr(cls, item) for item in dir(cls) if item.startswith('FTD_')] + + +class FtdPlatformFactory(object): + + @staticmethod + def create(model, module_params): + for cls in AbstractFtdPlatform.__subclasses__(): + if cls.supports_ftd_model(model): + return cls(module_params) + raise ValueError("FTD model '%s' is not supported by this module." % model) + + +class AbstractFtdPlatform(object): + PLATFORM_MODELS = [] + + def install_ftd_image(self, params): + raise NotImplementedError('The method should be overridden in subclass') + + @classmethod + def supports_ftd_model(cls, model): + return model in cls.PLATFORM_MODELS + + @staticmethod + def parse_rommon_file_location(rommon_file_location): + rommon_url = urlparse(rommon_file_location) + if rommon_url.scheme != 'tftp': + raise ValueError('The ROMMON image must be downloaded from TFTP server, other protocols are not supported.') + return rommon_url.netloc, rommon_url.path + + +class Ftd2100Platform(AbstractFtdPlatform): + PLATFORM_MODELS = [FtdModel.FTD_2110, FtdModel.FTD_2120, FtdModel.FTD_2130, FtdModel.FTD_2140] + + def __init__(self, params): + self._ftd = Kp(hostname=params["device_hostname"], + login_username=params["device_username"], + login_password=params["device_password"], + sudo_password=params.get("device_sudo_password") or params["device_password"]) + + def install_ftd_image(self, params): + line = self._ftd.ssh_console(ip=params["console_ip"], + port=params["console_port"], + username=params["console_username"], + password=params["console_password"]) + + try: + rommon_server, rommon_path = self.parse_rommon_file_location(params["rommon_file_location"]) + line.baseline_fp2k_ftd(tftp_server=rommon_server, + rommon_file=rommon_path, + uut_hostname=params["device_hostname"], + uut_username=params["device_username"], + uut_password=params.get("device_new_password") or params["device_password"], + uut_ip=params["device_ip"], + uut_netmask=params["device_netmask"], + uut_gateway=params["device_gateway"], + dns_servers=params["dns_server"], + search_domains=params["search_domains"], + fxos_url=params["image_file_location"], + ftd_version=params["image_version"]) + finally: + line.disconnect() + + +class FtdAsa5500xPlatform(AbstractFtdPlatform): + PLATFORM_MODELS = [FtdModel.FTD_ASA5506_X, FtdModel.FTD_ASA5508_X, FtdModel.FTD_ASA5516_X] + + def __init__(self, params): + self._ftd = Ftd5500x(hostname=params["device_hostname"], + login_password=params["device_password"], + sudo_password=params.get("device_sudo_password") or params["device_password"]) + + def install_ftd_image(self, params): + line = self._ftd.ssh_console(ip=params["console_ip"], + port=params["console_port"], + username=params["console_username"], + password=params["console_password"]) + try: + rommon_server, rommon_path = self.parse_rommon_file_location(params["rommon_file_location"]) + line.rommon_to_new_image(rommon_tftp_server=rommon_server, + rommon_image=rommon_path, + pkg_image=params["image_file_location"], + uut_ip=params["device_ip"], + uut_netmask=params["device_netmask"], + uut_gateway=params["device_gateway"], + dns_server=params["dns_server"], + search_domains=params["search_domains"], + hostname=params["device_hostname"]) + finally: + line.disconnect() diff --git a/plugins/module_utils/network/ftd/fdm_swagger_client.py b/plugins/module_utils/network/ftd/fdm_swagger_client.py new file mode 100644 index 0000000000..f7d4114be2 --- /dev/null +++ b/plugins/module_utils/network/ftd/fdm_swagger_client.py @@ -0,0 +1,638 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from ansible_collections.community.general.plugins.module_utils.network.ftd.common import HTTPMethod +from ansible.module_utils.six import integer_types, string_types, iteritems + +FILE_MODEL_NAME = '_File' +SUCCESS_RESPONSE_CODE = '200' +DELETE_PREFIX = 'delete' + + +class OperationField: + URL = 'url' + METHOD = 'method' + PARAMETERS = 'parameters' + MODEL_NAME = 'modelName' + DESCRIPTION = 'description' + RETURN_MULTIPLE_ITEMS = 'returnMultipleItems' + TAGS = "tags" + + +class SpecProp: + DEFINITIONS = 'definitions' + OPERATIONS = 'operations' + MODELS = 'models' + MODEL_OPERATIONS = 'model_operations' + + +class PropName: + ENUM = 'enum' + TYPE = 'type' + REQUIRED = 'required' + INVALID_TYPE = 'invalid_type' + REF = '$ref' + ALL_OF = 'allOf' + BASE_PATH = 'basePath' + PATHS = 'paths' + OPERATION_ID = 'operationId' + SCHEMA = 'schema' + ITEMS = 'items' + PROPERTIES = 'properties' + RESPONSES = 'responses' + NAME = 'name' + DESCRIPTION = 'description' + + +class PropType: + STRING = 'string' + BOOLEAN = 'boolean' + INTEGER = 'integer' + NUMBER = 'number' + OBJECT = 'object' + ARRAY = 'array' + FILE = 'file' + + +class OperationParams: + PATH = 'path' + QUERY = 'query' + + +class QueryParams: + FILTER = 'filter' + + +class PathParams: + OBJ_ID = 'objId' + + +def _get_model_name_from_url(schema_ref): + path = schema_ref.split('/') + return path[len(path) - 1] + + +class IllegalArgumentException(ValueError): + """ + Exception raised when the function parameters: + - not all passed + - empty string + - wrong type + """ + pass + + +class ValidationError(ValueError): + pass + + +class FdmSwaggerParser: + _definitions = None + _base_path = None + + def parse_spec(self, spec, docs=None): + """ + This method simplifies a swagger format, resolves a model name for each operation, and adds documentation for + each operation and model if it is provided. + + :param spec: An API specification in the swagger format, see + + :type spec: dict + :param spec: A documentation map containing descriptions for models, operations and operation parameters. + :type docs: dict + :rtype: dict + :return: + Ex. + The models field contains model definition from swagger see + <#https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#definitions> + { + 'models':{ + 'model_name':{...}, + ... + }, + 'operations':{ + 'operation_name':{ + 'method': 'get', #post, put, delete + 'url': '/api/fdm/v2/object/networks', #url already contains a value from `basePath` + 'modelName': 'NetworkObject', # it is a link to the model from 'models' + # None - for a delete operation or we don't have information + # '_File' - if an endpoint works with files + 'returnMultipleItems': False, # shows if the operation returns a single item or an item list + 'parameters': { + 'path':{ + 'param_name':{ + 'type': 'string'#integer, boolean, number + 'required' True #False + } + ... + }, + 'query':{ + 'param_name':{ + 'type': 'string'#integer, boolean, number + 'required' True #False + } + ... + } + } + }, + ... + }, + 'model_operations':{ + 'model_name':{ # a list of operations available for the current model + 'operation_name':{ + ... # the same as in the operations section + }, + ... + }, + ... + } + } + """ + self._definitions = spec[SpecProp.DEFINITIONS] + self._base_path = spec[PropName.BASE_PATH] + operations = self._get_operations(spec) + + if docs: + operations = self._enrich_operations_with_docs(operations, docs) + self._definitions = self._enrich_definitions_with_docs(self._definitions, docs) + + return { + SpecProp.MODELS: self._definitions, + SpecProp.OPERATIONS: operations, + SpecProp.MODEL_OPERATIONS: self._get_model_operations(operations) + } + + @property + def base_path(self): + return self._base_path + + def _get_model_operations(self, operations): + model_operations = {} + for operations_name, params in iteritems(operations): + model_name = params[OperationField.MODEL_NAME] + model_operations.setdefault(model_name, {})[operations_name] = params + return model_operations + + def _get_operations(self, spec): + paths_dict = spec[PropName.PATHS] + operations_dict = {} + for url, operation_params in iteritems(paths_dict): + for method, params in iteritems(operation_params): + operation = { + OperationField.METHOD: method, + OperationField.URL: self._base_path + url, + OperationField.MODEL_NAME: self._get_model_name(method, params), + OperationField.RETURN_MULTIPLE_ITEMS: self._return_multiple_items(params), + OperationField.TAGS: params.get(OperationField.TAGS, []) + } + if OperationField.PARAMETERS in params: + operation[OperationField.PARAMETERS] = self._get_rest_params(params[OperationField.PARAMETERS]) + + operation_id = params[PropName.OPERATION_ID] + operations_dict[operation_id] = operation + return operations_dict + + def _enrich_operations_with_docs(self, operations, docs): + def get_operation_docs(op): + op_url = op[OperationField.URL][len(self._base_path):] + return docs[PropName.PATHS].get(op_url, {}).get(op[OperationField.METHOD], {}) + + for operation in operations.values(): + operation_docs = get_operation_docs(operation) + operation[OperationField.DESCRIPTION] = operation_docs.get(PropName.DESCRIPTION, '') + + if OperationField.PARAMETERS in operation: + param_descriptions = dict(( + (p[PropName.NAME], p[PropName.DESCRIPTION]) + for p in operation_docs.get(OperationField.PARAMETERS, {}) + )) + + for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.PATH].items(): + params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '') + + for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.QUERY].items(): + params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '') + + return operations + + def _enrich_definitions_with_docs(self, definitions, docs): + for model_name, model_def in definitions.items(): + model_docs = docs[SpecProp.DEFINITIONS].get(model_name, {}) + model_def[PropName.DESCRIPTION] = model_docs.get(PropName.DESCRIPTION, '') + for prop_name, prop_spec in model_def.get(PropName.PROPERTIES, {}).items(): + prop_spec[PropName.DESCRIPTION] = model_docs.get(PropName.PROPERTIES, {}).get(prop_name, '') + prop_spec[PropName.REQUIRED] = prop_name in model_def.get(PropName.REQUIRED, []) + return definitions + + def _get_model_name(self, method, params): + if method == HTTPMethod.GET: + return self._get_model_name_from_responses(params) + elif method == HTTPMethod.POST or method == HTTPMethod.PUT: + return self._get_model_name_for_post_put_requests(params) + elif method == HTTPMethod.DELETE: + return self._get_model_name_from_delete_operation(params) + else: + return None + + @staticmethod + def _return_multiple_items(op_params): + """ + Defines if the operation returns one item or a list of items. + + :param op_params: operation specification + :return: True if the operation returns a list of items, otherwise False + """ + try: + schema = op_params[PropName.RESPONSES][SUCCESS_RESPONSE_CODE][PropName.SCHEMA] + return PropName.ITEMS in schema[PropName.PROPERTIES] + except KeyError: + return False + + def _get_model_name_from_delete_operation(self, params): + operation_id = params[PropName.OPERATION_ID] + if operation_id.startswith(DELETE_PREFIX): + model_name = operation_id[len(DELETE_PREFIX):] + if model_name in self._definitions: + return model_name + return None + + def _get_model_name_for_post_put_requests(self, params): + model_name = None + if OperationField.PARAMETERS in params: + body_param_dict = self._get_body_param_from_parameters(params[OperationField.PARAMETERS]) + if body_param_dict: + schema_ref = body_param_dict[PropName.SCHEMA][PropName.REF] + model_name = self._get_model_name_byschema_ref(schema_ref) + if model_name is None: + model_name = self._get_model_name_from_responses(params) + return model_name + + @staticmethod + def _get_body_param_from_parameters(params): + return next((param for param in params if param['in'] == 'body'), None) + + def _get_model_name_from_responses(self, params): + responses = params[PropName.RESPONSES] + if SUCCESS_RESPONSE_CODE in responses: + response = responses[SUCCESS_RESPONSE_CODE][PropName.SCHEMA] + if PropName.REF in response: + return self._get_model_name_byschema_ref(response[PropName.REF]) + elif PropName.PROPERTIES in response: + ref = response[PropName.PROPERTIES][PropName.ITEMS][PropName.ITEMS][PropName.REF] + return self._get_model_name_byschema_ref(ref) + elif (PropName.TYPE in response) and response[PropName.TYPE] == PropType.FILE: + return FILE_MODEL_NAME + else: + return None + + def _get_rest_params(self, params): + path = {} + query = {} + operation_param = { + OperationParams.PATH: path, + OperationParams.QUERY: query + } + for param in params: + in_param = param['in'] + if in_param == OperationParams.QUERY: + query[param[PropName.NAME]] = self._simplify_param_def(param) + elif in_param == OperationParams.PATH: + path[param[PropName.NAME]] = self._simplify_param_def(param) + return operation_param + + @staticmethod + def _simplify_param_def(param): + return { + PropName.TYPE: param[PropName.TYPE], + PropName.REQUIRED: param[PropName.REQUIRED] + } + + def _get_model_name_byschema_ref(self, schema_ref): + model_name = _get_model_name_from_url(schema_ref) + model_def = self._definitions[model_name] + if PropName.ALL_OF in model_def: + return self._get_model_name_byschema_ref(model_def[PropName.ALL_OF][0][PropName.REF]) + else: + return model_name + + +class FdmSwaggerValidator: + def __init__(self, spec): + """ + :param spec: dict + data from FdmSwaggerParser().parse_spec() + """ + self._operations = spec[SpecProp.OPERATIONS] + self._models = spec[SpecProp.MODELS] + + def validate_data(self, operation_name, data=None): + """ + Validate data for the post|put requests + :param operation_name: string + The value must be non empty string. + The operation name is used to get a model specification + :param data: dict + The value must be in the format that the model(from operation) expects + :rtype: (bool, string|dict) + :return: + (True, None) - if data valid + Invalid: + (False, { + 'required': [ #list of the fields that are required but were not present in the data + 'field_name', + 'patent.field_name',# when the nested field is omitted + 'patent.list[2].field_name' # if data is array and one of the field is omitted + ], + 'invalid_type':[ #list of the fields with invalid data + { + 'path': 'objId', #field name or path to the field. Ex. objects[3].id, parent.name + 'expected_type': 'string',# expected type. Ex. 'object', 'array', 'string', 'integer', + # 'boolean', 'number' + 'actually_value': 1 # the value that user passed + } + ] + }) + :raises IllegalArgumentException + 'The operation_name parameter must be a non-empty string' if operation_name is not valid + 'The data parameter must be a dict' if data neither dict or None + '{operation_name} operation does not support' if the spec does not contain the operation + """ + if data is None: + data = {} + + self._check_validate_data_params(data, operation_name) + + operation = self._operations[operation_name] + model = self._models[operation[OperationField.MODEL_NAME]] + status = self._init_report() + + self._validate_object(status, model, data, '') + + if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0: + return False, self._delete_empty_field_from_report(status) + return True, None + + def _check_validate_data_params(self, data, operation_name): + if not operation_name or not isinstance(operation_name, string_types): + raise IllegalArgumentException("The operation_name parameter must be a non-empty string") + if not isinstance(data, dict): + raise IllegalArgumentException("The data parameter must be a dict") + if operation_name not in self._operations: + raise IllegalArgumentException("{0} operation does not support".format(operation_name)) + + def validate_query_params(self, operation_name, params): + """ + Validate params for the get requests. Use this method for validating the query part of the url. + :param operation_name: string + The value must be non empty string. + The operation name is used to get a params specification + :param params: dict + should be in the format that the specification(from operation) expects + Ex. + { + 'objId': "string_value", + 'p_integer': 1, + 'p_boolean': True, + 'p_number': 2.3 + } + :rtype:(Boolean, msg) + :return: + (True, None) - if params valid + Invalid: + (False, { + 'required': [ #list of the fields that are required but are not present in the params + 'field_name' + ], + 'invalid_type':[ #list of the fields with invalid data and expected type of the params + { + 'path': 'objId', #field name + 'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number' + 'actually_value': 1 # the value that user passed + } + ] + }) + :raises IllegalArgumentException + 'The operation_name parameter must be a non-empty string' if operation_name is not valid + 'The params parameter must be a dict' if params neither dict or None + '{operation_name} operation does not support' if the spec does not contain the operation + """ + return self._validate_url_params(operation_name, params, resource=OperationParams.QUERY) + + def validate_path_params(self, operation_name, params): + """ + Validate params for the get requests. Use this method for validating the path part of the url. + :param operation_name: string + The value must be non empty string. + The operation name is used to get a params specification + :param params: dict + should be in the format that the specification(from operation) expects + + Ex. + { + 'objId': "string_value", + 'p_integer': 1, + 'p_boolean': True, + 'p_number': 2.3 + } + :rtype:(Boolean, msg) + :return: + (True, None) - if params valid + Invalid: + (False, { + 'required': [ #list of the fields that are required but are not present in the params + 'field_name' + ], + 'invalid_type':[ #list of the fields with invalid data and expected type of the params + { + 'path': 'objId', #field name + 'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number' + 'actually_value': 1 # the value that user passed + } + ] + }) + :raises IllegalArgumentException + 'The operation_name parameter must be a non-empty string' if operation_name is not valid + 'The params parameter must be a dict' if params neither dict or None + '{operation_name} operation does not support' if the spec does not contain the operation + """ + return self._validate_url_params(operation_name, params, resource=OperationParams.PATH) + + def _validate_url_params(self, operation, params, resource): + if params is None: + params = {} + + self._check_validate_url_params(operation, params) + + operation = self._operations[operation] + if OperationField.PARAMETERS in operation and resource in operation[OperationField.PARAMETERS]: + spec = operation[OperationField.PARAMETERS][resource] + status = self._init_report() + self._check_url_params(status, spec, params) + + if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0: + return False, self._delete_empty_field_from_report(status) + return True, None + else: + return True, None + + def _check_validate_url_params(self, operation, params): + if not operation or not isinstance(operation, string_types): + raise IllegalArgumentException("The operation_name parameter must be a non-empty string") + if not isinstance(params, dict): + raise IllegalArgumentException("The params parameter must be a dict") + if operation not in self._operations: + raise IllegalArgumentException("{0} operation does not support".format(operation)) + + def _check_url_params(self, status, spec, params): + for prop_name in spec.keys(): + prop = spec[prop_name] + if prop[PropName.REQUIRED] and prop_name not in params: + status[PropName.REQUIRED].append(prop_name) + continue + if prop_name in params: + expected_type = prop[PropName.TYPE] + value = params[prop_name] + if prop_name in params and not self._is_correct_simple_types(expected_type, value, allow_null=False): + self._add_invalid_type_report(status, '', prop_name, expected_type, value) + + def _validate_object(self, status, model, data, path): + if self._is_enum(model): + self._check_enum(status, model, data, path) + elif self._is_object(model): + self._check_object(status, model, data, path) + + def _is_enum(self, model): + return self._is_string_type(model) and PropName.ENUM in model + + def _check_enum(self, status, model, value, path): + if value is not None and value not in model[PropName.ENUM]: + self._add_invalid_type_report(status, path, '', PropName.ENUM, value) + + def _add_invalid_type_report(self, status, path, prop_name, expected_type, actually_value): + status[PropName.INVALID_TYPE].append({ + 'path': self._create_path_to_field(path, prop_name), + 'expected_type': expected_type, + 'actually_value': actually_value + }) + + def _check_object(self, status, model, data, path): + if data is None: + return + + if not isinstance(data, dict): + self._add_invalid_type_report(status, path, '', PropType.OBJECT, data) + return None + + if PropName.REQUIRED in model: + self._check_required_fields(status, model[PropName.REQUIRED], data, path) + + model_properties = model[PropName.PROPERTIES] + for prop in model_properties.keys(): + if prop in data: + model_prop_val = model_properties[prop] + expected_type = model_prop_val[PropName.TYPE] + actually_value = data[prop] + self._check_types(status, actually_value, expected_type, model_prop_val, path, prop) + + def _check_types(self, status, actually_value, expected_type, model, path, prop_name): + if expected_type == PropType.OBJECT: + ref_model = self._get_model_by_ref(model) + + self._validate_object(status, ref_model, actually_value, + path=self._create_path_to_field(path, prop_name)) + elif expected_type == PropType.ARRAY: + self._check_array(status, model, actually_value, + path=self._create_path_to_field(path, prop_name)) + elif not self._is_correct_simple_types(expected_type, actually_value): + self._add_invalid_type_report(status, path, prop_name, expected_type, actually_value) + + def _get_model_by_ref(self, model_prop_val): + model = _get_model_name_from_url(model_prop_val[PropName.REF]) + return self._models[model] + + def _check_required_fields(self, status, required_fields, data, path): + missed_required_fields = [self._create_path_to_field(path, field) for field in + required_fields if field not in data.keys() or data[field] is None] + if len(missed_required_fields) > 0: + status[PropName.REQUIRED] += missed_required_fields + + def _check_array(self, status, model, data, path): + if data is None: + return + elif not isinstance(data, list): + self._add_invalid_type_report(status, path, '', PropType.ARRAY, data) + else: + item_model = model[PropName.ITEMS] + for i, item_data in enumerate(data): + self._check_types(status, item_data, item_model[PropName.TYPE], item_model, "{0}[{1}]".format(path, i), + '') + + @staticmethod + def _is_correct_simple_types(expected_type, value, allow_null=True): + def is_numeric_string(s): + try: + float(s) + return True + except ValueError: + return False + + if value is None and allow_null: + return True + elif expected_type == PropType.STRING: + return isinstance(value, string_types) + elif expected_type == PropType.BOOLEAN: + return isinstance(value, bool) + elif expected_type == PropType.INTEGER: + is_integer = isinstance(value, integer_types) and not isinstance(value, bool) + is_digit_string = isinstance(value, string_types) and value.isdigit() + return is_integer or is_digit_string + elif expected_type == PropType.NUMBER: + is_number = isinstance(value, (integer_types, float)) and not isinstance(value, bool) + is_numeric_string = isinstance(value, string_types) and is_numeric_string(value) + return is_number or is_numeric_string + return False + + @staticmethod + def _is_string_type(model): + return PropName.TYPE in model and model[PropName.TYPE] == PropType.STRING + + @staticmethod + def _init_report(): + return { + PropName.REQUIRED: [], + PropName.INVALID_TYPE: [] + } + + @staticmethod + def _delete_empty_field_from_report(status): + if not status[PropName.REQUIRED]: + del status[PropName.REQUIRED] + if not status[PropName.INVALID_TYPE]: + del status[PropName.INVALID_TYPE] + return status + + @staticmethod + def _create_path_to_field(path='', field=''): + separator = '' + if path and field: + separator = '.' + return "{0}{1}{2}".format(path, separator, field) + + @staticmethod + def _is_object(model): + return PropName.TYPE in model and model[PropName.TYPE] == PropType.OBJECT diff --git a/plugins/module_utils/network/ftd/operation.py b/plugins/module_utils/network/ftd/operation.py new file mode 100644 index 0000000000..ecba70e6b5 --- /dev/null +++ b/plugins/module_utils/network/ftd/operation.py @@ -0,0 +1,41 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from ansible_collections.community.general.plugins.module_utils.network.ftd.configuration import ParamName, PATH_PARAMS_FOR_DEFAULT_OBJ + + +class FtdOperations: + """ + Utility class for common operation names + """ + GET_SYSTEM_INFO = 'getSystemInformation' + GET_MANAGEMENT_IP_LIST = 'getManagementIPList' + GET_DNS_SETTING_LIST = 'getDeviceDNSSettingsList' + GET_DNS_SERVER_GROUP = 'getDNSServerGroup' + + +def get_system_info(resource): + """ + Executes `getSystemInformation` operation and returns information about the system. + + :param resource: a BaseConfigurationResource object to connect to the device + :return: a dictionary with system information about the device and its software + """ + path_params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ} + system_info = resource.execute_operation(FtdOperations.GET_SYSTEM_INFO, path_params) + return system_info diff --git a/plugins/module_utils/network/icx/__init__.py b/plugins/module_utils/network/icx/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/icx/icx.py b/plugins/module_utils/network/icx/icx.py new file mode 100644 index 0000000000..cabece78d4 --- /dev/null +++ b/plugins/module_utils/network/icx/icx.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONFIGS = {} + + +def get_connection(module): + return Connection(module._socket_path) + + +def load_config(module, commands): + connection = get_connection(module) + + try: + resp = connection.edit_config(candidate=commands) + return resp.get('response') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + try: + return connection.run_commands(commands=commands, check_rc=check_rc) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def exec_scp(module, command): + connection = Connection(module._socket_path) + return connection.scp(**command) + + +def get_config(module, flags=None, compare=None): + flag_str = ' '.join(to_list(flags)) + try: + return _DEVICE_CONFIGS[flag_str] + except KeyError: + connection = get_connection(module) + try: + out = connection.get_config(flags=flags, compare=compare) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS[flag_str] = cfg + return cfg + + +def check_args(module, warnings): + pass + + +def get_defaults_flag(module): + connection = get_connection(module) + try: + out = connection.get_defaults_flag() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + return to_text(out, errors='surrogate_then_replace').strip() diff --git a/plugins/module_utils/network/ingate/__init__.py b/plugins/module_utils/network/ingate/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/ingate/common.py b/plugins/module_utils/network/ingate/common.py new file mode 100644 index 0000000000..ff632520b0 --- /dev/null +++ b/plugins/module_utils/network/ingate/common.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Ingate Systems AB +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +try: + from ingate import ingatesdk + HAS_INGATESDK = True +except ImportError: + HAS_INGATESDK = False + + +def ingate_argument_spec(**kwargs): + client_options = dict( + version=dict(choices=['v1'], default='v1'), + scheme=dict(choices=['http', 'https'], required=True), + address=dict(type='str', required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + port=dict(type='int'), + timeout=dict(type='int'), + validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), + ) + argument_spec = dict( + client=dict(type='dict', required=True, + options=client_options), + ) + argument_spec.update(kwargs) + return argument_spec + + +def ingate_create_client(**kwargs): + api_client = ingate_create_client_noauth(**kwargs) + + # Authenticate and get hold of a security token. + api_client.authenticate() + + # Return the client. + return api_client + + +def ingate_create_client_noauth(**kwargs): + client_params = kwargs['client'] + + # Create API client. + api_client = ingatesdk.Client(client_params['version'], + client_params['scheme'], + client_params['address'], + client_params['username'], + client_params['password'], + port=client_params['port'], + timeout=client_params['timeout']) + + # Check if we should skip SSL Certificate verification. + verify_ssl = client_params.get('validate_certs') + if not verify_ssl: + api_client.skip_verify_certificate() + + # Return the client. + return api_client + + +def is_ingatesdk_installed(module): + if not HAS_INGATESDK: + module.fail_json(msg="The Ingate Python SDK module is required for this module.") diff --git a/plugins/module_utils/network/ironware/__init__.py b/plugins/module_utils/network/ironware/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/ironware/ironware.py b/plugins/module_utils/network/ironware/ironware.py new file mode 100644 index 0000000000..f09338de16 --- /dev/null +++ b/plugins/module_utils/network/ironware/ironware.py @@ -0,0 +1,113 @@ +# +# Copyright (c) 2017, Paul Baker +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection +from ansible.module_utils.connection import Connection, exec_command + +_DEVICE_CONFIG = None +_CONNECTION = None + +ironware_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'), + 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True), + 'timeout': dict(type='int'), +} + +ironware_argument_spec = { + 'provider': dict(type='dict', options=ironware_provider_spec) +} + +command_spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() +} + + +def get_provider_argspec(): + return ironware_provider_spec + + +def check_args(module): + pass + + +def get_connection(module): + global _CONNECTION + if _CONNECTION: + return _CONNECTION + _CONNECTION = Connection(module._socket_path) + + return _CONNECTION + + +def to_commands(module, commands): + if not isinstance(commands, list): + raise AssertionError('argument must be of type ') + + transform = EntityCollection(module, command_spec) + commands = transform(commands) + + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + module.warn('only show commands are supported when using check ' + 'mode, not executing `%s`' % item['command']) + + return commands + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + + commands = to_commands(module, to_list(commands)) + + responses = list() + + for cmd in commands: + out = connection.get(**cmd) + responses.append(to_text(out, errors='surrogate_then_replace')) + + return responses + + +def get_config(module, source='running', flags=None): + global _DEVICE_CONFIG + if source == 'running' and flags is None and _DEVICE_CONFIG is not None: + return _DEVICE_CONFIG + else: + conn = get_connection(module) + out = conn.get_config(source=source, flags=flags) + cfg = to_text(out, errors='surrogate_then_replace').strip() + if source == 'running' and flags is None: + _DEVICE_CONFIG = cfg + return cfg + + +def load_config(module, config): + conn = get_connection(module) + conn.edit_config(config) diff --git a/plugins/module_utils/network/netscaler/__init__.py b/plugins/module_utils/network/netscaler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/netscaler/netscaler.py b/plugins/module_utils/network/netscaler/netscaler.py new file mode 100644 index 0000000000..ccf0dbff8f --- /dev/null +++ b/plugins/module_utils/network/netscaler/netscaler.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +import json +import re +import sys + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.six import binary_type, text_type +from ansible.module_utils._text import to_native + + +class ConfigProxy(object): + + def __init__(self, actual, client, attribute_values_dict, readwrite_attrs, transforms=None, readonly_attrs=None, immutable_attrs=None, json_encodes=None): + transforms = {} if transforms is None else transforms + readonly_attrs = [] if readonly_attrs is None else readonly_attrs + immutable_attrs = [] if immutable_attrs is None else immutable_attrs + json_encodes = [] if json_encodes is None else json_encodes + + # Actual config object from nitro sdk + self.actual = actual + + # nitro client + self.client = client + + # ansible attribute_values_dict + self.attribute_values_dict = attribute_values_dict + + self.readwrite_attrs = readwrite_attrs + self.readonly_attrs = readonly_attrs + self.immutable_attrs = immutable_attrs + self.json_encodes = json_encodes + self.transforms = transforms + + self.attribute_values_processed = {} + for attribute, value in self.attribute_values_dict.items(): + if value is None: + continue + if attribute in transforms: + for transform in self.transforms[attribute]: + if transform == 'bool_yes_no': + if value is True: + value = 'YES' + elif value is False: + value = 'NO' + elif transform == 'bool_on_off': + if value is True: + value = 'ON' + elif value is False: + value = 'OFF' + elif callable(transform): + value = transform(value) + else: + raise Exception('Invalid transform %s' % transform) + self.attribute_values_processed[attribute] = value + + self._copy_attributes_to_actual() + + def _copy_attributes_to_actual(self): + for attribute in self.readwrite_attrs: + if attribute in self.attribute_values_processed: + attribute_value = self.attribute_values_processed[attribute] + + if attribute_value is None: + continue + + # Fallthrough + if attribute in self.json_encodes: + attribute_value = json.JSONEncoder().encode(attribute_value).strip('"') + setattr(self.actual, attribute, attribute_value) + + def __getattr__(self, name): + if name in self.attribute_values_dict: + return self.attribute_values_dict[name] + else: + raise AttributeError('No attribute %s found' % name) + + def add(self): + self.actual.__class__.add(self.client, self.actual) + + def update(self): + return self.actual.__class__.update(self.client, self.actual) + + def delete(self): + self.actual.__class__.delete(self.client, self.actual) + + def get(self, *args, **kwargs): + result = self.actual.__class__.get(self.client, *args, **kwargs) + + return result + + def has_equal_attributes(self, other): + if self.diff_object(other) == {}: + return True + else: + return False + + def diff_object(self, other): + diff_dict = {} + for attribute in self.attribute_values_processed: + # Skip readonly attributes + if attribute not in self.readwrite_attrs: + continue + + # Skip attributes not present in module arguments + if self.attribute_values_processed[attribute] is None: + continue + + # Check existence + if hasattr(other, attribute): + attribute_value = getattr(other, attribute) + else: + diff_dict[attribute] = 'missing from other' + continue + + # Compare values + param_type = self.attribute_values_processed[attribute].__class__ + if attribute_value is None or param_type(attribute_value) != self.attribute_values_processed[attribute]: + str_tuple = ( + type(self.attribute_values_processed[attribute]), + self.attribute_values_processed[attribute], + type(attribute_value), + attribute_value, + ) + diff_dict[attribute] = 'difference. ours: (%s) %s other: (%s) %s' % str_tuple + return diff_dict + + def get_actual_rw_attributes(self, filter='name'): + if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0: + return {} + server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) + actual_instance = server_list[0] + ret_val = {} + for attribute in self.readwrite_attrs: + if not hasattr(actual_instance, attribute): + continue + ret_val[attribute] = getattr(actual_instance, attribute) + return ret_val + + def get_actual_ro_attributes(self, filter='name'): + if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0: + return {} + server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) + actual_instance = server_list[0] + ret_val = {} + for attribute in self.readonly_attrs: + if not hasattr(actual_instance, attribute): + continue + ret_val[attribute] = getattr(actual_instance, attribute) + return ret_val + + def get_missing_rw_attributes(self): + return list(set(self.readwrite_attrs) - set(self.get_actual_rw_attributes().keys())) + + def get_missing_ro_attributes(self): + return list(set(self.readonly_attrs) - set(self.get_actual_ro_attributes().keys())) + + +def get_immutables_intersection(config_proxy, keys): + immutables_set = set(config_proxy.immutable_attrs) + keys_set = set(keys) + # Return list of sets' intersection + return list(immutables_set & keys_set) + + +def ensure_feature_is_enabled(client, feature_str): + enabled_features = client.get_enabled_features() + + if enabled_features is None: + enabled_features = [] + + if feature_str not in enabled_features: + client.enable_features(feature_str) + client.save_config() + + +def get_nitro_client(module): + from nssrc.com.citrix.netscaler.nitro.service.nitro_service import nitro_service + + client = nitro_service(module.params['nsip'], module.params['nitro_protocol']) + client.set_credential(module.params['nitro_user'], module.params['nitro_pass']) + client.timeout = float(module.params['nitro_timeout']) + client.certvalidation = module.params['validate_certs'] + return client + + +netscaler_common_arguments = dict( + nsip=dict( + required=True, + fallback=(env_fallback, ['NETSCALER_NSIP']), + ), + nitro_user=dict( + required=True, + fallback=(env_fallback, ['NETSCALER_NITRO_USER']), + no_log=True + ), + nitro_pass=dict( + required=True, + fallback=(env_fallback, ['NETSCALER_NITRO_PASS']), + no_log=True + ), + nitro_protocol=dict( + choices=['http', 'https'], + fallback=(env_fallback, ['NETSCALER_NITRO_PROTOCOL']), + default='http' + ), + validate_certs=dict( + default=True, + type='bool' + ), + nitro_timeout=dict(default=310, type='float'), + state=dict( + choices=[ + 'present', + 'absent', + ], + default='present', + ), + save_config=dict( + type='bool', + default=True, + ), +) + + +loglines = [] + + +def complete_missing_attributes(actual, attrs_list, fill_value=None): + for attribute in attrs_list: + if not hasattr(actual, attribute): + setattr(actual, attribute, fill_value) + + +def log(msg): + loglines.append(msg) + + +def get_ns_version(client): + from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsversion import nsversion + result = nsversion.get(client) + m = re.match(r'^.*NS(\d+)\.(\d+).*$', result[0].version) + if m is None: + return None + else: + return int(m.group(1)), int(m.group(2)) + + +def get_ns_hardware(client): + from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nshardware import nshardware + result = nshardware.get(client) + return result + + +def monkey_patch_nitro_api(): + + from nssrc.com.citrix.netscaler.nitro.resource.base.Json import Json + + def new_resource_to_string_convert(self, resrc): + # Line below is the actual patch + dict_valid_values = dict((k.replace('_', '', 1), v) for k, v in resrc.__dict__.items() if v) + return json.dumps(dict_valid_values) + Json.resource_to_string_convert = new_resource_to_string_convert + + from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util + + @classmethod + def object_to_string_new(cls, obj): + output = [] + flds = obj.__dict__ + for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v): + if isinstance(v, bool): + output.append('"%s":%s' % (k, v)) + elif isinstance(v, (binary_type, text_type)): + v = to_native(v, errors='surrogate_or_strict') + output.append('"%s":"%s"' % (k, v)) + elif isinstance(v, int): + output.append('"%s":"%s"' % (k, v)) + return ','.join(output) + + @classmethod + def object_to_string_withoutquotes_new(cls, obj): + output = [] + flds = obj.__dict__ + for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v): + if isinstance(v, (int, bool)): + output.append('%s:%s' % (k, v)) + elif isinstance(v, (binary_type, text_type)): + v = to_native(v, errors='surrogate_or_strict') + output.append('%s:%s' % (k, cls.encode(v))) + return ','.join(output) + + nitro_util.object_to_string = object_to_string_new + nitro_util.object_to_string_withoutquotes = object_to_string_withoutquotes_new diff --git a/plugins/module_utils/network/netvisor/__init__.py b/plugins/module_utils/network/netvisor/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/netvisor/netvisor.py b/plugins/module_utils/network/netvisor/netvisor.py new file mode 100644 index 0000000000..0be1af2e3d --- /dev/null +++ b/plugins/module_utils/network/netvisor/netvisor.py @@ -0,0 +1,59 @@ +# Copyright: (c) 2018, Pluribus Networks +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import Connection, ConnectionError +from ansible.module_utils.connection import exec_command + + +def get_connection(module): + if hasattr(module, '_nvos_connection'): + return module._nvos_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module._nvos_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module._nvos_connection + + +def get_capabilities(module): + if hasattr(module, '_nvos_capabilities'): + return module._nvos_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + module._nvos_capabilities = json.loads(capabilities) + return module._nvos_capabilities + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc) + responses = (to_text(out, errors='surrogate_or_strict')) + + return rc, out, err diff --git a/plugins/module_utils/network/netvisor/pn_nvos.py b/plugins/module_utils/network/netvisor/pn_nvos.py new file mode 100644 index 0000000000..cd5cdc598f --- /dev/null +++ b/plugins/module_utils/network/netvisor/pn_nvos.py @@ -0,0 +1,66 @@ +# Copyright: (c) 2018, Pluribus Networks +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def pn_cli(module, switch=None, username=None, password=None, switch_local=None): + """ + Method to generate the cli portion to launch the Netvisor cli. + :param module: The Ansible module to fetch username and password. + :return: The cli string for further processing. + """ + + cli = '' + + if username and password: + cli += '--user "%s":"%s" ' % (username, password) + if switch: + cli += ' switch ' + switch + if switch_local: + cli += ' switch-local ' + + return cli + + +def booleanArgs(arg, trueString, falseString): + if arg is True: + return " %s " % trueString + elif arg is False: + return " %s " % falseString + else: + return "" + + +def run_cli(module, cli, state_map): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param state_map: Provides state of the command. + :param module: The Ansible module to fetch command + """ + state = module.params['state'] + command = state_map[state] + + result, out, err = run_commands(module, cli) + + results = dict( + command=cli, + msg="%s operation completed" % cli, + changed=True + ) + # Response in JSON format + if result != 0: + module.exit_json( + command=cli, + msg="%s operation failed" % cli, + changed=False + ) + + module.exit_json(**results) diff --git a/plugins/module_utils/network/nos/__init__.py b/plugins/module_utils/network/nos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/nos/nos.py b/plugins/module_utils/network/nos/nos.py new file mode 100644 index 0000000000..c87b0644f6 --- /dev/null +++ b/plugins/module_utils/network/nos/nos.py @@ -0,0 +1,160 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +import json +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.module_utils.connection import Connection, ConnectionError + + +def get_connection(module): + """Get switch connection + + Creates reusable SSH connection to the switch described in a given module. + + Args: + module: A valid AnsibleModule instance. + + Returns: + An instance of `ansible.module_utils.connection.Connection` with a + connection to the switch described in the provided module. + + Raises: + AnsibleConnectionFailure: An error occurred connecting to the device + """ + if hasattr(module, 'nos_connection'): + return module.nos_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module.nos_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module.nos_connection + + +def get_capabilities(module): + """Get switch capabilities + + Collects and returns a python object with the switch capabilities. + + Args: + module: A valid AnsibleModule instance. + + Returns: + A dictionary containing the switch capabilities. + """ + if hasattr(module, 'nos_capabilities'): + return module.nos_capabilities + + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + module.nos_capabilities = json.loads(capabilities) + return module.nos_capabilities + + +def run_commands(module, commands): + """Run command list against connection. + + Get new or previously used connection and send commands to it one at a time, + collecting response. + + Args: + module: A valid AnsibleModule instance. + commands: Iterable of command strings. + + Returns: + A list of output strings. + """ + responses = list() + connection = get_connection(module) + + for cmd in to_list(commands): + if isinstance(cmd, dict): + command = cmd['command'] + prompt = cmd['prompt'] + answer = cmd['answer'] + else: + command = cmd + prompt = None + answer = None + + try: + out = connection.get(command, prompt, answer) + out = to_text(out, errors='surrogate_or_strict') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + except UnicodeError: + module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out))) + + responses.append(out) + + return responses + + +def get_config(module): + """Get switch configuration + + Gets the described device's current configuration. If a configuration has + already been retrieved it will return the previously obtained configuration. + + Args: + module: A valid AnsibleModule instance. + + Returns: + A string containing the configuration. + """ + if not hasattr(module, 'device_configs'): + module.device_configs = {} + elif module.device_configs != {}: + return module.device_configs + + connection = get_connection(module) + try: + out = connection.get_config() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + cfg = to_text(out, errors='surrogate_then_replace').strip() + module.device_configs = cfg + return cfg + + +def load_config(module, commands): + """Apply a list of commands to a device. + + Given a list of commands apply them to the device to modify the + configuration in bulk. + + Args: + module: A valid AnsibleModule instance. + commands: Iterable of command strings. + + Returns: + None + """ + connection = get_connection(module) + + try: + resp = connection.edit_config(commands) + return resp.get('response') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) diff --git a/plugins/module_utils/network/nso/__init__.py b/plugins/module_utils/network/nso/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/nso/nso.py b/plugins/module_utils/network/nso/nso.py new file mode 100644 index 0000000000..217ac5dd8b --- /dev/null +++ b/plugins/module_utils/network/nso/nso.py @@ -0,0 +1,822 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Cisco and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import open_url +from ansible.module_utils._text import to_text + +import json +import re +import socket + +try: + unicode + HAVE_UNICODE = True +except NameError: + unicode = str + HAVE_UNICODE = False + + +nso_argument_spec = dict( + url=dict(type='str', required=True), + username=dict(type='str', required=True, fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + password=dict(type='str', required=True, no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD'])), + timeout=dict(type='int', default=300), + validate_certs=dict(type='bool', default=False) +) + + +class State(object): + SET = 'set' + PRESENT = 'present' + ABSENT = 'absent' + CHECK_SYNC = 'check-sync' + DEEP_CHECK_SYNC = 'deep-check-sync' + IN_SYNC = 'in-sync' + DEEP_IN_SYNC = 'deep-in-sync' + + SYNC_STATES = ('check-sync', 'deep-check-sync', 'in-sync', 'deep-in-sync') + + +class ModuleFailException(Exception): + def __init__(self, message): + super(ModuleFailException, self).__init__(message) + self.message = message + + +class NsoException(Exception): + def __init__(self, message, error): + super(NsoException, self).__init__(message) + self.message = message + self.error = error + + +class JsonRpc(object): + def __init__(self, url, timeout, validate_certs): + self._url = url + self._timeout = timeout + self._validate_certs = validate_certs + self._id = 0 + self._trans = {} + self._headers = {'Content-Type': 'application/json'} + self._conn = None + self._system_settings = {} + + def login(self, user, passwd): + payload = { + 'method': 'login', + 'params': {'user': user, 'passwd': passwd} + } + resp, resp_json = self._call(payload) + self._headers['Cookie'] = resp.headers['set-cookie'] + + def logout(self): + payload = {'method': 'logout', 'params': {}} + self._call(payload) + + def get_system_setting(self, setting): + if setting not in self._system_settings: + payload = {'method': 'get_system_setting', 'params': {'operation': setting}} + resp, resp_json = self._call(payload) + self._system_settings[setting] = resp_json['result'] + return self._system_settings[setting] + + def new_trans(self, **kwargs): + payload = {'method': 'new_trans', 'params': kwargs} + resp, resp_json = self._call(payload) + return resp_json['result']['th'] + + def get_trans(self, mode): + if mode not in self._trans: + th = self.new_trans(mode=mode) + self._trans[mode] = th + return self._trans[mode] + + def delete_trans(self, th): + payload = {'method': 'delete_trans', 'params': {'th': th}} + resp, resp_json = self._call(payload) + self._maybe_delete_trans(th) + + def validate_trans(self, th): + payload = {'method': 'validate_trans', 'params': {'th': th}} + resp, resp_json = self._write_call(payload) + return resp_json['result'] + + def get_trans_changes(self, th): + payload = {'method': 'get_trans_changes', 'params': {'th': th}} + resp, resp_json = self._write_call(payload) + return resp_json['result']['changes'] + + def validate_commit(self, th): + payload = {'method': 'validate_commit', 'params': {'th': th}} + resp, resp_json = self._write_call(payload) + return resp_json['result'].get('warnings', []) + + def commit(self, th): + payload = {'method': 'commit', 'params': {'th': th}} + resp, resp_json = self._write_call(payload) + if len(resp_json['result']) == 0: + self._maybe_delete_trans(th) + return resp_json['result'] + + def get_schema(self, **kwargs): + payload = {'method': 'get_schema', 'params': kwargs} + resp, resp_json = self._maybe_write_call(payload) + return resp_json['result'] + + def get_module_prefix_map(self, path=None): + if path is None: + payload = {'method': 'get_module_prefix_map', 'params': {}} + resp, resp_json = self._call(payload) + else: + payload = {'method': 'get_module_prefix_map', 'params': {'path': path}} + resp, resp_json = self._maybe_write_call(payload) + return resp_json['result'] + + def get_value(self, path): + payload = { + 'method': 'get_value', + 'params': {'path': path} + } + resp, resp_json = self._read_call(payload) + return resp_json['result'] + + def exists(self, path): + payload = {'method': 'exists', 'params': {'path': path}} + try: + resp, resp_json = self._read_call(payload) + return resp_json['result']['exists'] + except NsoException as ex: + # calling exists on a sub-list when the parent list does + # not exists will cause data.not_found errors on recent + # NSO + if 'type' in ex.error and ex.error['type'] == 'data.not_found': + return False + raise + + def create(self, th, path): + payload = {'method': 'create', 'params': {'th': th, 'path': path}} + self._write_call(payload) + + def delete(self, th, path): + payload = {'method': 'delete', 'params': {'th': th, 'path': path}} + self._write_call(payload) + + def set_value(self, th, path, value): + payload = { + 'method': 'set_value', + 'params': {'th': th, 'path': path, 'value': value} + } + resp, resp_json = self._write_call(payload) + return resp_json['result'] + + def show_config(self, path, operational=False): + payload = { + 'method': 'show_config', + 'params': { + 'path': path, + 'result_as': 'json', + 'with_oper': operational} + } + resp, resp_json = self._read_call(payload) + return resp_json['result'] + + def query(self, xpath, fields): + payload = { + 'method': 'query', + 'params': { + 'xpath_expr': xpath, + 'selection': fields + } + } + resp, resp_json = self._read_call(payload) + return resp_json['result']['results'] + + def run_action(self, th, path, params=None): + if params is None: + params = {} + + if is_version(self, [(4, 5), (4, 4, 3)]): + result_format = 'json' + else: + result_format = 'normal' + + payload = { + 'method': 'run_action', + 'params': { + 'format': result_format, + 'path': path, + 'params': params + } + } + if th is None: + resp, resp_json = self._read_call(payload) + else: + payload['params']['th'] = th + resp, resp_json = self._call(payload) + + if result_format == 'normal': + # this only works for one-level results, list entries, + # containers etc will have / in their name. + result = {} + for info in resp_json['result']: + result[info['name']] = info['value'] + else: + result = resp_json['result'] + + return result + + def _call(self, payload): + self._id += 1 + if 'id' not in payload: + payload['id'] = self._id + + if 'jsonrpc' not in payload: + payload['jsonrpc'] = '2.0' + + data = json.dumps(payload) + try: + resp = open_url( + self._url, timeout=self._timeout, + method='POST', data=data, headers=self._headers, + validate_certs=self._validate_certs) + if resp.code != 200: + raise NsoException( + 'NSO returned HTTP code {0}, expected 200'.format(resp.status), {}) + except socket.timeout: + raise NsoException('request timed out against NSO at {0}'.format(self._url), {}) + + resp_body = resp.read() + resp_json = json.loads(resp_body) + + if 'error' in resp_json: + self._handle_call_error(payload, resp_json) + return resp, resp_json + + def _handle_call_error(self, payload, resp_json): + method = payload['method'] + + error = resp_json['error'] + error_type = error['type'][len('rpc.method.'):] + if error_type in ('unexpected_params', + 'unknown_params_value', + 'invalid_params', + 'invalid_params_type', + 'data_not_found'): + key = error['data']['param'] + error_type_s = error_type.replace('_', ' ') + if key == 'path': + msg = 'NSO {0} {1}. path = {2}'.format( + method, error_type_s, payload['params']['path']) + else: + path = payload['params'].get('path', 'unknown') + msg = 'NSO {0} {1}. path = {2}. {3} = {4}'.format( + method, error_type_s, path, key, payload['params'][key]) + else: + msg = 'NSO {0} returned JSON-RPC error: {1}'.format(method, error) + + raise NsoException(msg, error) + + def _read_call(self, payload): + if 'th' not in payload['params']: + payload['params']['th'] = self.get_trans(mode='read') + return self._call(payload) + + def _write_call(self, payload): + if 'th' not in payload['params']: + payload['params']['th'] = self.get_trans(mode='read_write') + return self._call(payload) + + def _maybe_write_call(self, payload): + if 'read_write' in self._trans: + return self._write_call(payload) + else: + return self._read_call(payload) + + def _maybe_delete_trans(self, th): + for mode in ('read', 'read_write'): + if th == self._trans.get(mode, None): + del self._trans[mode] + + +class ValueBuilder(object): + PATH_RE = re.compile('{[^}]*}') + PATH_RE_50 = re.compile('{[^}]*}$') + + class Value(object): + __slots__ = ['path', 'tag_path', 'state', 'value', 'deps'] + + def __init__(self, path, state, value, deps): + self.path = path + self.tag_path = ValueBuilder.PATH_RE.sub('', path) + self.state = state + self.value = value + self.deps = deps + + # nodes can depend on themselves + if self.tag_path in self.deps: + self.deps.remove(self.tag_path) + + def __lt__(self, rhs): + l_len = len(self.path.split('/')) + r_len = len(rhs.path.split('/')) + if l_len == r_len: + return self.path.__lt__(rhs.path) + return l_len < r_len + + def __str__(self): + return 'Value'.format( + self.path, self.state, self.value) + + class ValueIterator(object): + def __init__(self, client, values, delayed_values): + self._client = client + self._values = values + self._delayed_values = delayed_values + self._pos = 0 + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def next(self): + if self._pos >= len(self._values): + if len(self._delayed_values) == 0: + raise StopIteration() + + builder = ValueBuilder(self._client, delay=False) + for (parent, maybe_qname, value) in self._delayed_values: + builder.build(parent, maybe_qname, value) + del self._delayed_values[:] + self._values.extend(builder.values) + + return self.next() + + value = self._values[self._pos] + self._pos += 1 + return value + + def __init__(self, client, mode='config', delay=None): + self._client = client + self._mode = mode + self._schema_cache = {} + self._module_prefix_map_cache = {} + self._values = [] + self._values_dirty = False + self._delay = delay is None and mode == 'config' and is_version(self._client, [(5, 0)]) + self._delayed_values = [] + + def build(self, parent, maybe_qname, value, schema=None): + qname, name = self.get_prefix_name(parent, maybe_qname) + if name is None: + path = parent + else: + path = '{0}/{1}'.format(parent, qname) + + if schema is None: + schema = self._get_schema(path) + + if self._delay and schema.get('is_mount_point', False): + # delay conversion of mounted values, required to get + # shema information on 5.0 and later. + self._delayed_values.append((parent, maybe_qname, value)) + elif self._is_leaf_list(schema) and is_version(self._client, [(4, 5)]): + self._build_leaf_list(path, schema, value) + elif self._is_leaf(schema): + deps = schema.get('deps', []) + if self._is_empty_leaf(schema): + exists = self._client.exists(path) + if exists and value != [None]: + self._add_value(path, State.ABSENT, None, deps) + elif not exists and value == [None]: + self._add_value(path, State.PRESENT, None, deps) + else: + if maybe_qname is None: + value_type = self.get_type(path) + else: + value_type = self._get_child_type(parent, qname) + + if 'identityref' in value_type: + if isinstance(value, list): + value = [ll_v for ll_v, t_ll_v + in [self.get_prefix_name(parent, v) for v in value]] + else: + value, t_value = self.get_prefix_name(parent, value) + self._add_value(path, State.SET, value, deps) + elif isinstance(value, dict): + self._build_dict(path, schema, value) + elif isinstance(value, list): + self._build_list(path, schema, value) + else: + raise ModuleFailException( + 'unsupported schema {0} at {1}'.format( + schema['kind'], path)) + + @property + def values(self): + if self._values_dirty: + self._values = ValueBuilder.sort_values(self._values) + self._values_dirty = False + + return ValueBuilder.ValueIterator(self._client, self._values, self._delayed_values) + + @staticmethod + def sort_values(values): + class N(object): + def __init__(self, v): + self.tmp_mark = False + self.mark = False + self.v = v + + sorted_values = [] + nodes = [N(v) for v in sorted(values)] + + def get_node(tag_path): + return next((m for m in nodes + if m.v.tag_path == tag_path), None) + + def is_cycle(n, dep, visited): + visited.add(n.v.tag_path) + if dep in visited: + return True + + dep_n = get_node(dep) + if dep_n is not None: + for sub_dep in dep_n.v.deps: + if is_cycle(dep_n, sub_dep, visited): + return True + + return False + + # check for dependency cycles, remove if detected. sort will + # not be 100% but allows for a best-effort to work around + # issue in NSO. + for n in nodes: + for dep in n.v.deps: + if is_cycle(n, dep, set()): + n.v.deps.remove(dep) + + def visit(n): + if n.tmp_mark: + return False + if not n.mark: + n.tmp_mark = True + for m in nodes: + if m.v.tag_path in n.v.deps: + if not visit(m): + return False + + n.tmp_mark = False + n.mark = True + + sorted_values.insert(0, n.v) + + return True + + n = next((n for n in nodes if not n.mark), None) + while n is not None: + visit(n) + n = next((n for n in nodes if not n.mark), None) + + return sorted_values[::-1] + + def _build_dict(self, path, schema, value): + keys = schema.get('key', []) + for dict_key, dict_value in value.items(): + qname, name = self.get_prefix_name(path, dict_key) + if dict_key in ('__state', ) or name in keys: + continue + + child_schema = self._find_child(path, schema, qname) + self.build(path, dict_key, dict_value, child_schema) + + def _build_leaf_list(self, path, schema, value): + deps = schema.get('deps', []) + entry_type = self.get_type(path, schema) + + if self._mode == 'verify': + for entry in value: + if 'identityref' in entry_type: + entry, t_entry = self.get_prefix_name(path, entry) + entry_path = '{0}{{{1}}}'.format(path, entry) + if not self._client.exists(entry_path): + self._add_value(entry_path, State.ABSENT, None, deps) + else: + # remove leaf list if treated as a list and then re-create the + # expected list entries. + self._add_value(path, State.ABSENT, None, deps) + + for entry in value: + if 'identityref' in entry_type: + entry, t_entry = self.get_prefix_name(path, entry) + entry_path = '{0}{{{1}}}'.format(path, entry) + self._add_value(entry_path, State.PRESENT, None, deps) + + def _build_list(self, path, schema, value): + deps = schema.get('deps', []) + for entry in value: + entry_key = self._build_key(path, entry, schema['key']) + entry_path = '{0}{{{1}}}'.format(path, entry_key) + entry_state = entry.get('__state', 'present') + entry_exists = self._client.exists(entry_path) + + if entry_state == 'absent': + if entry_exists: + self._add_value(entry_path, State.ABSENT, None, deps) + else: + if not entry_exists: + self._add_value(entry_path, State.PRESENT, None, deps) + if entry_state in State.SYNC_STATES: + self._add_value(entry_path, entry_state, None, deps) + + self.build(entry_path, None, entry) + + def _build_key(self, path, entry, schema_keys): + key_parts = [] + for key in schema_keys: + value = entry.get(key, None) + if value is None: + raise ModuleFailException( + 'required leaf {0} in {1} not set in data'.format( + key, path)) + + value_type = self._get_child_type(path, key) + if 'identityref' in value_type: + value, t_value = self.get_prefix_name(path, value) + key_parts.append(self._quote_key(value)) + return ' '.join(key_parts) + + def _quote_key(self, key): + if isinstance(key, bool): + return key and 'true' or 'false' + + q_key = [] + for c in str(key): + if c in ('{', '}', "'", '\\'): + q_key.append('\\') + q_key.append(c) + q_key = ''.join(q_key) + if ' ' in q_key: + return '"{0}"'.format(q_key) + return q_key + + def _find_child(self, path, schema, qname): + if 'children' not in schema: + schema = self._get_schema(path) + + # look for the qualified name if : is in the name + child_schema = self._get_child(schema, qname) + if child_schema is not None: + return child_schema + + # no child was found, look for a choice with a child matching + for child_schema in schema['children']: + if child_schema['kind'] != 'choice': + continue + choice_child_schema = self._get_choice_child(child_schema, qname) + if choice_child_schema is not None: + return choice_child_schema + + raise ModuleFailException( + 'no child in {0} with name {1}. children {2}'.format( + path, qname, ','.join((c.get('qname', c.get('name', None)) for c in schema['children'])))) + + def _add_value(self, path, state, value, deps): + self._values.append(ValueBuilder.Value(path, state, value, deps)) + self._values_dirty = True + + def get_prefix_name(self, path, qname): + if not isinstance(qname, (str, unicode)): + return qname, None + if ':' not in qname: + return qname, qname + + module_prefix_map = self._get_module_prefix_map(path) + module, name = qname.split(':', 1) + if module not in module_prefix_map: + raise ModuleFailException( + 'no module mapping for module {0}. loaded modules {1}'.format( + module, ','.join(sorted(module_prefix_map.keys())))) + + return '{0}:{1}'.format(module_prefix_map[module], name), name + + def _get_schema(self, path): + return self._ensure_schema_cached(path)['data'] + + def _get_child_type(self, parent_path, key): + all_schema = self._ensure_schema_cached(parent_path) + parent_schema = all_schema['data'] + meta = all_schema['meta'] + schema = self._find_child(parent_path, parent_schema, key) + return self.get_type(parent_path, schema, meta) + + def get_type(self, path, schema=None, meta=None): + if schema is None or meta is None: + all_schema = self._ensure_schema_cached(path) + schema = all_schema['data'] + meta = all_schema['meta'] + + if self._is_leaf(schema): + def get_type(meta, curr_type): + if curr_type.get('primitive', False): + return [curr_type['name']] + if 'namespace' in curr_type: + curr_type_key = '{0}:{1}'.format( + curr_type['namespace'], curr_type['name']) + type_info = meta['types'][curr_type_key][-1] + return get_type(meta, type_info) + if 'leaf_type' in curr_type: + return get_type(meta, curr_type['leaf_type'][-1]) + if 'union' in curr_type: + union_types = [] + for union_type in curr_type['union']: + union_types.extend(get_type(meta, union_type[-1])) + return union_types + return [curr_type.get('name', 'unknown')] + + return get_type(meta, schema['type']) + return None + + def _ensure_schema_cached(self, path): + if not self._delay and is_version(self._client, [(5, 0)]): + # newer versions of NSO support multiple different schemas + # for different devices, thus the device is required to + # look up the schema. Remove the key entry to get schema + # logic working ok. + path = ValueBuilder.PATH_RE_50.sub('', path) + else: + path = ValueBuilder.PATH_RE.sub('', path) + + if path not in self._schema_cache: + schema = self._client.get_schema(path=path, levels=1) + self._schema_cache[path] = schema + return self._schema_cache[path] + + def _get_module_prefix_map(self, path): + # newer versions of NSO support multiple mappings from module + # to prefix depending on which device is used. + if path != '' and is_version(self._client, [(5, 0)]): + if path not in self._module_prefix_map_cache: + self._module_prefix_map_cache[path] = self._client.get_module_prefix_map(path) + return self._module_prefix_map_cache[path] + + if '' not in self._module_prefix_map_cache: + self._module_prefix_map_cache[''] = self._client.get_module_prefix_map() + return self._module_prefix_map_cache[''] + + def _get_child(self, schema, qname): + # no child specified, return parent + if qname is None: + return schema + + name_key = ':' in qname and 'qname' or 'name' + return next((c for c in schema['children'] + if c.get(name_key, None) == qname), None) + + def _get_choice_child(self, schema, qname): + name_key = ':' in qname and 'qname' or 'name' + for child_case in schema['cases']: + # look for direct child + choice_child_schema = next( + (c for c in child_case['children'] + if c.get(name_key, None) == qname), None) + if choice_child_schema is not None: + return choice_child_schema + + # look for nested choice + for child_schema in child_case['children']: + if child_schema['kind'] != 'choice': + continue + choice_child_schema = self._get_choice_child(child_schema, qname) + if choice_child_schema is not None: + return choice_child_schema + return None + + def _is_leaf_list(self, schema): + return schema.get('kind', None) == 'leaf-list' + + def _is_leaf(self, schema): + # still checking for leaf-list here to be compatible with pre + # 4.5 versions of NSO. + return schema.get('kind', None) in ('key', 'leaf', 'leaf-list') + + def _is_empty_leaf(self, schema): + return (schema.get('kind', None) == 'leaf' and + schema['type'].get('primitive', False) and + schema['type'].get('name', '') == 'empty') + + +def connect(params): + client = JsonRpc(params['url'], + params['timeout'], + params['validate_certs']) + client.login(params['username'], params['password']) + return client + + +def verify_version(client, required_versions): + version_str = client.get_system_setting('version') + if not verify_version_str(version_str, required_versions): + supported_versions = ', '.join( + ['.'.join([str(p) for p in required_version]) + for required_version in required_versions]) + raise ModuleFailException( + 'unsupported NSO version {0}. {1} or later supported'.format( + version_str, supported_versions)) + + +def is_version(client, required_versions): + version_str = client.get_system_setting('version') + return verify_version_str(version_str, required_versions) + + +def verify_version_str(version_str, required_versions): + version_str = re.sub('_.*', '', version_str) + + version = [int(p) for p in version_str.split('.')] + if len(version) < 2: + raise ModuleFailException( + 'unsupported NSO version format {0}'.format(version_str)) + + def check_version(required_version, version): + for pos in range(len(required_version)): + if pos >= len(version): + return False + if version[pos] > required_version[pos]: + return True + if version[pos] < required_version[pos]: + return False + return True + + for required_version in required_versions: + if check_version(required_version, version): + return True + return False + + +def normalize_value(expected_value, value, key): + if value is None: + return None + if (isinstance(expected_value, bool) and + isinstance(value, (str, unicode))): + return value == 'true' + if isinstance(expected_value, int): + try: + return int(value) + except TypeError: + raise ModuleFailException( + 'returned value {0} for {1} is not a valid integer'.format( + key, value)) + if isinstance(expected_value, float): + try: + return float(value) + except TypeError: + raise ModuleFailException( + 'returned value {0} for {1} is not a valid float'.format( + key, value)) + if isinstance(expected_value, (list, tuple)): + if not isinstance(value, (list, tuple)): + raise ModuleFailException( + 'returned value {0} for {1} is not a list'.format(value, key)) + if len(expected_value) != len(value): + raise ModuleFailException( + 'list length mismatch for {0}'.format(key)) + + normalized_value = [] + for i in range(len(expected_value)): + normalized_value.append( + normalize_value(expected_value[i], value[i], '{0}[{1}]'.format(key, i))) + return normalized_value + + if isinstance(expected_value, dict): + if not isinstance(value, dict): + raise ModuleFailException( + 'returned value {0} for {1} is not a dict'.format(value, key)) + if len(expected_value) != len(value): + raise ModuleFailException( + 'dict length mismatch for {0}'.format(key)) + + normalized_value = {} + for k in expected_value.keys(): + n_k = normalize_value(k, k, '{0}[{1}]'.format(key, k)) + if n_k not in value: + raise ModuleFailException('missing {0} in value'.format(n_k)) + normalized_value[n_k] = normalize_value(expected_value[k], value[k], '{0}[{1}]'.format(key, k)) + return normalized_value + + if HAVE_UNICODE: + if isinstance(expected_value, unicode) and isinstance(value, str): + return value.decode('utf-8') + if isinstance(expected_value, str) and isinstance(value, unicode): + return value.encode('utf-8') + else: + if hasattr(expected_value, 'encode') and hasattr(value, 'decode'): + return value.decode('utf-8') + if hasattr(expected_value, 'decode') and hasattr(value, 'encode'): + return value.encode('utf-8') + + return value diff --git a/plugins/module_utils/network/onyx/__init__.py b/plugins/module_utils/network/onyx/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/onyx/onyx.py b/plugins/module_utils/network/onyx/onyx.py new file mode 100644 index 0000000000..d537e048b9 --- /dev/null +++ b/plugins/module_utils/network/onyx/onyx.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# +# (c) 2017, Ansible by Red Hat, inc +# +# This file is part of Ansible by Red Hat +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import Connection, ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection + +_DEVICE_CONFIGS = {} +_CONNECTION = None + +_COMMAND_SPEC = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() +} + + +def get_connection(module): + global _CONNECTION + if _CONNECTION: + return _CONNECTION + _CONNECTION = Connection(module._socket_path) + return _CONNECTION + + +def to_commands(module, commands): + if not isinstance(commands, list): + raise AssertionError('argument must be of type ') + + transform = EntityCollection(module, _COMMAND_SPEC) + commands = transform(commands) + return commands + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + + commands = to_commands(module, to_list(commands)) + + responses = list() + + for cmd in commands: + out = connection.get(**cmd) + responses.append(to_text(out, errors='surrogate_then_replace')) + + return responses + + +def get_config(module, source='running'): + conn = get_connection(module) + out = conn.get_config(source) + cfg = to_text(out, errors='surrogate_then_replace').strip() + return cfg + + +def load_config(module, config): + try: + conn = get_connection(module) + conn.edit_config(config) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def _parse_json_output(out): + out_list = out.split('\n') + first_index = 0 + opening_char = None + lines_count = len(out_list) + while first_index < lines_count: + first_line = out_list[first_index].strip() + if not first_line or first_line[0] not in ("[", "{"): + first_index += 1 + continue + opening_char = first_line[0] + break + if not opening_char: + return "null" + closing_char = ']' if opening_char == '[' else '}' + last_index = lines_count - 1 + found = False + while last_index > first_index: + last_line = out_list[last_index].strip() + if not last_line or last_line[0] != closing_char: + last_index -= 1 + continue + found = True + break + if not found: + return opening_char + closing_char + return "".join(out_list[first_index:last_index + 1]) + + +def show_cmd(module, cmd, json_fmt=True, fail_on_error=True): + if json_fmt: + cmd += " | json-print" + conn = get_connection(module) + command_obj = to_commands(module, to_list(cmd))[0] + try: + out = conn.get(**command_obj) + except ConnectionError: + if fail_on_error: + raise + return None + if json_fmt: + out = _parse_json_output(out) + try: + cfg = json.loads(out) + except ValueError: + module.fail_json( + msg="got invalid json", + stderr=to_text(out, errors='surrogate_then_replace')) + else: + cfg = to_text(out, errors='surrogate_then_replace').strip() + return cfg + + +def get_interfaces_config(module, interface_type, flags=None, json_fmt=True): + cmd = "show interfaces %s" % interface_type + if flags: + cmd += " %s" % flags + return show_cmd(module, cmd, json_fmt) + + +def get_bgp_summary(module): + cmd = "show running-config protocol bgp" + return show_cmd(module, cmd, json_fmt=False, fail_on_error=False) + + +def get_capabilities(module): + """Returns platform info of the remove device + """ + if hasattr(module, '_capabilities'): + return module._capabilities + + connection = get_connection(module) + try: + capabilities = connection.get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + module._capabilities = json.loads(capabilities) + return module._capabilities + + +class BaseOnyxModule(object): + ONYX_API_VERSION = "3.6.6000" + + def __init__(self): + self._module = None + self._commands = list() + self._current_config = None + self._required_config = None + self._os_version = None + + def init_module(self): + pass + + def load_current_config(self): + pass + + def get_required_config(self): + pass + + def _get_os_version(self): + capabilities = get_capabilities(self._module) + device_info = capabilities['device_info'] + return device_info['network_os_version'] + + # pylint: disable=unused-argument + def check_declarative_intent_params(self, result): + return None + + def _validate_key(self, param, key): + validator = getattr(self, 'validate_%s' % key) + if callable(validator): + validator(param.get(key)) + + def validate_param_values(self, obj, param=None): + if param is None: + param = self._module.params + for key in obj: + # validate the param value (if validator func exists) + try: + self._validate_key(param, key) + except AttributeError: + pass + + @classmethod + def get_config_attr(cls, item, arg): + return item.get(arg) + + @classmethod + def get_mtu(cls, item): + mtu = cls.get_config_attr(item, "MTU") + mtu_parts = mtu.split() + try: + return int(mtu_parts[0]) + except ValueError: + return None + + def _validate_range(self, attr_name, min_val, max_val, value): + if value is None: + return True + if not min_val <= int(value) <= max_val: + msg = '%s must be between %s and %s' % ( + attr_name, min_val, max_val) + self._module.fail_json(msg=msg) + + def validate_mtu(self, value): + self._validate_range('mtu', 1500, 9612, value) + + def generate_commands(self): + pass + + def run(self): + self.init_module() + + result = {'changed': False} + + self.get_required_config() + self.load_current_config() + + self.generate_commands() + result['commands'] = self._commands + + if self._commands: + if not self._module.check_mode: + load_config(self._module, self._commands) + result['changed'] = True + + failed_conditions = self.check_declarative_intent_params(result) + + if failed_conditions: + msg = 'One or more conditional statements have not been satisfied' + self._module.fail_json(msg=msg, + failed_conditions=failed_conditions) + + self._module.exit_json(**result) + + @classmethod + def main(cls): + app = cls() + app.run() diff --git a/plugins/module_utils/network/ordnance/__init__.py b/plugins/module_utils/network/ordnance/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/ordnance/ordnance.py b/plugins/module_utils/network/ordnance/ordnance.py new file mode 100644 index 0000000000..070a86d3e1 --- /dev/null +++ b/plugins/module_utils/network/ordnance/ordnance.py @@ -0,0 +1,19 @@ +_DEVICE_CONFIGS = {} + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = 'show running-config ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + rc, out, err = module.exec_command(cmd) + if rc != 0: + module.fail_json(msg='unable to retrieve current config', stderr=err) + cfg = str(out).strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg diff --git a/plugins/module_utils/network/panos/__init__.py b/plugins/module_utils/network/panos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/panos/panos.py b/plugins/module_utils/network/panos/panos.py new file mode 100644 index 0000000000..f50257dcf2 --- /dev/null +++ b/plugins/module_utils/network/panos/panos.py @@ -0,0 +1,418 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2018 Palo Alto Networks techbizdev, +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +_MIN_VERSION_ERROR = '{0} version ({1}) < minimum version ({2})' +HAS_PANDEVICE = True +try: + import pandevice + from pandevice.base import PanDevice + from pandevice.firewall import Firewall + from pandevice.panorama import DeviceGroup, Template, TemplateStack + from pandevice.policies import PreRulebase, PostRulebase, Rulebase + from pandevice.device import Vsys + from pandevice.errors import PanDeviceError +except ImportError: + HAS_PANDEVICE = False + + +def _vstr(val): + return '{0}.{1}.{2}'.format(*val) + + +class ConnectionHelper(object): + def __init__(self, min_pandevice_version, min_panos_version, + panorama_error, firewall_error): + """Performs connection initialization and determines params.""" + # Params for AnsibleModule. + self.argument_spec = {} + self.required_one_of = [] + + # Params for pandevice tree construction. + self.vsys = None + self.device_group = None + self.vsys_dg = None + self.rulebase = None + self.template = None + self.template_stack = None + self.vsys_importable = None + self.min_pandevice_version = min_pandevice_version + self.min_panos_version = min_panos_version + self.panorama_error = panorama_error + self.firewall_error = firewall_error + + # The PAN-OS device. + self.device = None + + def get_pandevice_parent(self, module): + """Builds the pandevice object tree, returning the parent object. + + If pandevice is not installed, then module.fail_json() will be + invoked. + + Arguments: + * module(AnsibleModule): the ansible module. + + Returns: + * The parent pandevice object based on the spec given to + get_connection(). + """ + # Sanity check. + if not HAS_PANDEVICE: + module.fail_json(msg='Missing required library "pandevice".') + + # Verify pandevice minimum version. + if self.min_pandevice_version is not None: + pdv = tuple(int(x) for x in pandevice.__version__.split('.')) + if pdv < self.min_pandevice_version: + module.fail_json(msg=_MIN_VERSION_ERROR.format( + 'pandevice', pandevice.__version__, + _vstr(self.min_pandevice_version))) + + pan_device_auth, serial_number = None, None + if module.params['provider'] and module.params['provider']['ip_address']: + pan_device_auth = ( + module.params['provider']['ip_address'], + module.params['provider']['username'], + module.params['provider']['password'], + module.params['provider']['api_key'], + module.params['provider']['port'], + ) + serial_number = module.params['provider']['serial_number'] + elif module.params.get('ip_address', None) is not None: + pan_device_auth = ( + module.params['ip_address'], + module.params['username'], + module.params['password'], + module.params['api_key'], + module.params['port'], + ) + msg = 'Classic provider params are deprecated; use "provider" instead' + module.deprecate(msg, '2.12') + else: + module.fail_json(msg='Provider params are required.') + + # Create the connection object. + try: + self.device = PanDevice.create_from_device(*pan_device_auth) + except PanDeviceError as e: + module.fail_json(msg='Failed connection: {0}'.format(e)) + + # Verify PAN-OS minimum version. + if self.min_panos_version is not None: + if self.device._version_info < self.min_panos_version: + module.fail_json(msg=_MIN_VERSION_ERROR.format( + 'PAN-OS', _vstr(self.device._version_info), + _vstr(self.min_panos_version))) + + # Optional: Firewall via Panorama connectivity specified. + if hasattr(self.device, 'refresh_devices') and serial_number: + fw = Firewall(serial=serial_number) + self.device.add(fw) + self.device = fw + + parent = self.device + not_found = '{0} "{1}" is not present.' + pano_mia_param = 'Param "{0}" is required for Panorama but not specified.' + ts_error = 'Specify either the template or the template stack{0}.' + if hasattr(self.device, 'refresh_devices'): + # Panorama connection. + # Error if Panorama is not supported. + if self.panorama_error is not None: + module.fail_json(msg=self.panorama_error) + + # Spec: template stack. + tmpl_required = False + added_template = False + if self.template_stack is not None: + name = module.params[self.template_stack] + if name is not None: + stacks = TemplateStack.refreshall(parent, name_only=True) + for ts in stacks: + if ts.name == name: + parent = ts + added_template = True + break + else: + module.fail_json(msg=not_found.format( + 'Template stack', name, + )) + elif self.template is not None: + tmpl_required = True + else: + module.fail_json(msg=pano_mia_param.format(self.template_stack)) + + # Spec: template. + if self.template is not None: + name = module.params[self.template] + if name is not None: + if added_template: + module.fail_json(msg=ts_error.format(', not both')) + templates = Template.refreshall(parent, name_only=True) + for t in templates: + if t.name == name: + parent = t + break + else: + module.fail_json(msg=not_found.format( + 'Template', name, + )) + elif tmpl_required: + module.fail_json(msg=ts_error.format('')) + else: + module.fail_json(msg=pano_mia_param.format(self.template)) + + # Spec: vsys importable. + vsys_name = self.vsys_importable or self.vsys + if vsys_name is not None: + name = module.params[vsys_name] + if name not in (None, 'shared'): + vo = Vsys(name) + parent.add(vo) + parent = vo + + # Spec: vsys_dg or device_group. + dg_name = self.vsys_dg or self.device_group + if dg_name is not None: + name = module.params[dg_name] + if name not in (None, 'shared'): + groups = DeviceGroup.refreshall(parent, name_only=True) + for dg in groups: + if dg.name == name: + parent = dg + break + else: + module.fail_json(msg=not_found.format( + 'Device group', name, + )) + + # Spec: rulebase. + if self.rulebase is not None: + if module.params[self.rulebase] in (None, 'pre-rulebase'): + rb = PreRulebase() + parent.add(rb) + parent = rb + elif module.params[self.rulebase] == 'rulebase': + rb = Rulebase() + parent.add(rb) + parent = rb + elif module.params[self.rulebase] == 'post-rulebase': + rb = PostRulebase() + parent.add(rb) + parent = rb + else: + module.fail_json(msg=not_found.format( + 'Rulebase', module.params[self.rulebase])) + else: + # Firewall connection. + # Error if firewalls are not supported. + if self.firewall_error is not None: + module.fail_json(msg=self.firewall_error) + + # Spec: vsys or vsys_dg or vsys_importable. + vsys_name = self.vsys_dg or self.vsys or self.vsys_importable + if vsys_name is not None: + parent.vsys = module.params[vsys_name] + + # Spec: rulebase. + if self.rulebase is not None: + rb = Rulebase() + parent.add(rb) + parent = rb + + # Done. + return parent + + +def get_connection(vsys=None, device_group=None, + vsys_dg=None, vsys_importable=None, + rulebase=None, template=None, template_stack=None, + with_classic_provider_spec=False, with_state=True, + argument_spec=None, required_one_of=None, + min_pandevice_version=None, min_panos_version=None, + panorama_error=None, firewall_error=None): + """Returns a helper object that handles pandevice object tree init. + + The `vsys`, `device_group`, `vsys_dg`, `vsys_importable`, `rulebase`, + `template`, and `template_stack` params can be any of the following types: + + * None - do not include this in the spec + * True - use the default param name + * string - use this string for the param name + + The `min_pandevice_version` and `min_panos_version` args expect a 3 element + tuple of ints. For example, `(0, 6, 0)` or `(8, 1, 0)`. + + If you are including template support (by defining either `template` and/or + `template_stack`), and the thing the module is enabling the management of is + an "importable", you should define either `vsys_importable` (whose default + value is None) or `vsys` (whose default value is 'vsys1'). + + Arguments: + vsys: The vsys (default: 'vsys1'). + device_group: Panorama only - The device group (default: 'shared'). + vsys_dg: The param name if vsys and device_group are a shared param. + vsys_importable: Either this or `vsys` should be specified. For: + - Interfaces + - VLANs + - Virtual Wires + - Virtual Routers + rulebase: This is a policy of some sort. + template: Panorama - The template name. + template_stack: Panorama - The template stack name. + with_classic_provider_spec(bool): Include the ip_address, username, + password, api_key, and port params in the base spec, and make the + "provider" param optional. + with_state(bool): Include the standard 'state' param. + argument_spec(dict): The argument spec to mixin with the + generated spec based on the given parameters. + required_one_of(list): List of lists to extend into required_one_of. + min_pandevice_version(tuple): Minimum pandevice version allowed. + min_panos_version(tuple): Minimum PAN-OS version allowed. + panorama_error(str): The error message if the device is Panorama. + firewall_error(str): The error message if the device is a firewall. + + Returns: + ConnectionHelper + """ + helper = ConnectionHelper( + min_pandevice_version, min_panos_version, + panorama_error, firewall_error) + req = [] + spec = { + 'provider': { + 'required': True, + 'type': 'dict', + 'required_one_of': [['password', 'api_key'], ], + 'options': { + 'ip_address': {'required': True}, + 'username': {'default': 'admin'}, + 'password': {'no_log': True}, + 'api_key': {'no_log': True}, + 'port': {'default': 443, 'type': 'int'}, + 'serial_number': {'no_log': True}, + }, + }, + } + + if with_classic_provider_spec: + spec['provider']['required'] = False + spec['provider']['options']['ip_address']['required'] = False + del(spec['provider']['required_one_of']) + spec.update({ + 'ip_address': {'required': False}, + 'username': {'default': 'admin'}, + 'password': {'no_log': True}, + 'api_key': {'no_log': True}, + 'port': {'default': 443, 'type': 'int'}, + }) + req.extend([ + ['provider', 'ip_address'], + ['provider', 'password', 'api_key'], + ]) + + if with_state: + spec['state'] = { + 'default': 'present', + 'choices': ['present', 'absent'], + } + + if vsys_dg is not None: + if isinstance(vsys_dg, bool): + param = 'vsys_dg' + else: + param = vsys_dg + spec[param] = {} + helper.vsys_dg = param + else: + if vsys is not None: + if isinstance(vsys, bool): + param = 'vsys' + else: + param = vsys + spec[param] = {'default': 'vsys1'} + helper.vsys = param + if device_group is not None: + if isinstance(device_group, bool): + param = 'device_group' + else: + param = device_group + spec[param] = {'default': 'shared'} + helper.device_group = param + if vsys_importable is not None: + if vsys is not None: + raise KeyError('Define "vsys" or "vsys_importable", not both.') + if isinstance(vsys_importable, bool): + param = 'vsys' + else: + param = vsys_importable + spec[param] = {} + helper.vsys_importable = param + + if rulebase is not None: + if isinstance(rulebase, bool): + param = 'rulebase' + else: + param = rulebase + spec[param] = { + 'default': None, + 'choices': ['pre-rulebase', 'rulebase', 'post-rulebase'], + } + helper.rulebase = param + + if template is not None: + if isinstance(template, bool): + param = 'template' + else: + param = template + spec[param] = {} + helper.template = param + + if template_stack is not None: + if isinstance(template_stack, bool): + param = 'template_stack' + else: + param = template_stack + spec[param] = {} + helper.template_stack = param + + if argument_spec is not None: + for k in argument_spec.keys(): + if k in spec: + raise KeyError('{0}: key used by connection helper.'.format(k)) + spec[k] = argument_spec[k] + + if required_one_of is not None: + req.extend(required_one_of) + + # Done. + helper.argument_spec = spec + helper.required_one_of = req + return helper diff --git a/plugins/module_utils/network/routeros/__init__.py b/plugins/module_utils/network/routeros/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/routeros/routeros.py b/plugins/module_utils/network/routeros/routeros.py new file mode 100644 index 0000000000..63eb3c0470 --- /dev/null +++ b/plugins/module_utils/network/routeros/routeros.py @@ -0,0 +1,156 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONFIGS = {} + +routeros_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'timeout': dict(type='int') +} +routeros_argument_spec = {} + + +def get_provider_argspec(): + return routeros_provider_spec + + +def get_connection(module): + if hasattr(module, '_routeros_connection'): + return module._routeros_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module._routeros_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module._routeros_connection + + +def get_capabilities(module): + if hasattr(module, '_routeros_capabilities'): + return module._routeros_capabilities + + capabilities = Connection(module._socket_path).get_capabilities() + module._routeros_capabilities = json.loads(capabilities) + return module._routeros_capabilities + + +def get_defaults_flag(module): + connection = get_connection(module) + + try: + out = connection.get('/system default-configuration print') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + out = to_text(out, errors='surrogate_then_replace') + + commands = set() + for line in out.splitlines(): + if line.strip(): + commands.add(line.strip().split()[0]) + + if 'all' in commands: + return ['all'] + else: + return ['full'] + + +def get_config(module, flags=None): + flag_str = ' '.join(to_list(flags)) + + try: + return _DEVICE_CONFIGS[flag_str] + except KeyError: + connection = get_connection(module) + + try: + out = connection.get_config(flags=flags) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS[flag_str] = cfg + return cfg + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + connection = get_connection(module) + + for cmd in to_list(commands): + if isinstance(cmd, dict): + command = cmd['command'] + prompt = cmd['prompt'] + answer = cmd['answer'] + else: + command = cmd + prompt = None + answer = None + + try: + out = connection.get(command, prompt, answer) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + try: + out = to_text(out, errors='surrogate_or_strict') + except UnicodeError: + module.fail_json( + msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out))) + + responses.append(out) + + return responses + + +def load_config(module, commands): + connection = get_connection(module) + + out = connection.edit_config(commands) diff --git a/plugins/module_utils/network/slxos/__init__.py b/plugins/module_utils/network/slxos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/slxos/slxos.py b/plugins/module_utils/network/slxos/slxos.py new file mode 100644 index 0000000000..d9971840de --- /dev/null +++ b/plugins/module_utils/network/slxos/slxos.py @@ -0,0 +1,148 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +import json +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import Connection + + +def get_connection(module): + """Get switch connection + + Creates reusable SSH connection to the switch described in a given module. + + Args: + module: A valid AnsibleModule instance. + + Returns: + An instance of `ansible.module_utils.connection.Connection` with a + connection to the switch described in the provided module. + + Raises: + AnsibleConnectionFailure: An error occurred connecting to the device + """ + if hasattr(module, 'slxos_connection'): + return module.slxos_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module.slxos_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module.slxos_connection + + +def get_capabilities(module): + """Get switch capabilities + + Collects and returns a python object with the switch capabilities. + + Args: + module: A valid AnsibleModule instance. + + Returns: + A dictionary containing the switch capabilities. + """ + if hasattr(module, 'slxos_capabilities'): + return module.slxos_capabilities + + capabilities = Connection(module._socket_path).get_capabilities() + module.slxos_capabilities = json.loads(capabilities) + return module.slxos_capabilities + + +def run_commands(module, commands): + """Run command list against connection. + + Get new or previously used connection and send commands to it one at a time, + collecting response. + + Args: + module: A valid AnsibleModule instance. + commands: Iterable of command strings. + + Returns: + A list of output strings. + """ + responses = list() + connection = get_connection(module) + + for cmd in to_list(commands): + if isinstance(cmd, dict): + command = cmd['command'] + prompt = cmd['prompt'] + answer = cmd['answer'] + else: + command = cmd + prompt = None + answer = None + + out = connection.get(command, prompt, answer) + + try: + out = to_text(out, errors='surrogate_or_strict') + except UnicodeError: + module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out))) + + responses.append(out) + + return responses + + +def get_config(module): + """Get switch configuration + + Gets the described device's current configuration. If a configuration has + already been retrieved it will return the previously obtained configuration. + + Args: + module: A valid AnsibleModule instance. + + Returns: + A string containing the configuration. + """ + if not hasattr(module, 'device_configs'): + module.device_configs = {} + elif module.device_configs != {}: + return module.device_configs + + connection = get_connection(module) + out = connection.get_config() + cfg = to_text(out, errors='surrogate_then_replace').strip() + module.device_configs = cfg + return cfg + + +def load_config(module, commands): + """Apply a list of commands to a device. + + Given a list of commands apply them to the device to modify the + configuration in bulk. + + Args: + module: A valid AnsibleModule instance. + commands: Iterable of command strings. + + Returns: + None + """ + connection = get_connection(module) + connection.edit_config(commands) diff --git a/plugins/module_utils/network/sros/__init__.py b/plugins/module_utils/network/sros/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/sros/sros.py b/plugins/module_utils/network/sros/sros.py new file mode 100644 index 0000000000..4bbce73903 --- /dev/null +++ b/plugins/module_utils/network/sros/sros.py @@ -0,0 +1,111 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2016 Peter Sprygada, +# +# Redistribution and use in source and binary forms, with or without +# modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, +# this list of conditions and the following disclaimer in the +# documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import exec_command + +_DEVICE_CONFIGS = {} + +sros_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'timeout': dict(type='int'), +} +sros_argument_spec = { + 'provider': dict(type='dict', options=sros_provider_spec), +} +sros_top_spec = { + 'host': dict(removed_in_version=2.9), + 'port': dict(removed_in_version=2.9, type='int'), + 'username': dict(removed_in_version=2.9), + 'password': dict(removed_in_version=2.9, no_log=True), + 'ssh_keyfile': dict(removed_in_version=2.9, type='path'), + 'timeout': dict(removed_in_version=2.9, type='int'), +} +sros_argument_spec.update(sros_top_spec) + + +def check_args(module, warnings): + pass + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = 'admin display-config ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + rc, out, err = exec_command(module, cmd) + if rc != 0: + module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict')) + cfg = to_text(out, errors='surrogate_or_strict').strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc) + responses.append(to_text(out, errors='surrogate_or_strict')) + return responses + + +def load_config(module, commands): + for command in to_list(commands): + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc) + exec_command(module, 'exit all') diff --git a/plugins/module_utils/network/voss/__init__.py b/plugins/module_utils/network/voss/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/network/voss/voss.py b/plugins/module_utils/network/voss/voss.py new file mode 100644 index 0000000000..0de269e440 --- /dev/null +++ b/plugins/module_utils/network/voss/voss.py @@ -0,0 +1,219 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2018 Extreme Networks Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json +import re + +from ansible.module_utils._text import to_native, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import Connection, ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine + +_DEVICE_CONFIGS = {} + +DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/', 'echo'] + +DEFAULT_IGNORE_LINES_RE = set([ + re.compile(r"Preparing to Display Configuration\.\.\.") +]) + + +def get_connection(module): + if hasattr(module, '_voss_connection'): + return module._voss_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get('network_api') + if network_api == 'cliconf': + module._voss_connection = Connection(module._socket_path) + else: + module.fail_json(msg='Invalid connection type %s' % network_api) + + return module._voss_connection + + +def get_capabilities(module): + if hasattr(module, '_voss_capabilities'): + return module._voss_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + module._voss_capabilities = json.loads(capabilities) + return module._voss_capabilities + + +def get_defaults_flag(module): + connection = get_connection(module) + try: + out = connection.get_defaults_flag() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + return to_text(out, errors='surrogate_then_replace').strip() + + +def get_config(module, source='running', flags=None): + flag_str = ' '.join(to_list(flags)) + + try: + return _DEVICE_CONFIGS[flag_str] + except KeyError: + connection = get_connection(module) + try: + out = connection.get_config(source=source, flags=flags) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + cfg = to_text(out, errors='surrogate_then_replace').strip() + _DEVICE_CONFIGS[flag_str] = cfg + return cfg + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + try: + out = connection.run_commands(commands=commands, check_rc=check_rc) + return out + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def load_config(module, commands): + connection = get_connection(module) + + try: + resp = connection.edit_config(commands) + return resp.get('response') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def get_sublevel_config(running_config, module): + contents = list() + current_config_contents = list() + sublevel_config = VossNetworkConfig(indent=0) + obj = running_config.get_object(module.params['parents']) + if obj: + contents = obj._children + for c in contents: + if isinstance(c, ConfigLine): + current_config_contents.append(c.raw) + sublevel_config.add(current_config_contents, module.params['parents']) + return sublevel_config + + +def ignore_line(text, tokens=None): + for item in (tokens or DEFAULT_COMMENT_TOKENS): + if text.startswith(item): + return True + for regex in DEFAULT_IGNORE_LINES_RE: + if regex.match(text): + return True + + +def voss_parse(lines, indent=None, comment_tokens=None): + toplevel = re.compile(r'(^interface.*$)|(^router \w+$)|(^router vrf \w+$)') + exitline = re.compile(r'^exit$') + entry_reg = re.compile(r'([{};])') + + ancestors = list() + config = list() + dup_parent_index = None + + for line in to_native(lines, errors='surrogate_or_strict').split('\n'): + text = entry_reg.sub('', line).strip() + + cfg = ConfigLine(text) + + if not text or ignore_line(text, comment_tokens): + continue + + # Handle top level commands + if toplevel.match(text): + # Looking to see if we have existing parent + for index, item in enumerate(config): + if item.text == text: + # This means we have an existing parent with same label + dup_parent_index = index + break + ancestors = [cfg] + config.append(cfg) + + # Handle 'exit' line + elif exitline.match(text): + ancestors = list() + + if dup_parent_index is not None: + # We're working with a duplicate parent + # Don't need to store exit, just go to next line in config + dup_parent_index = None + else: + cfg._parents = ancestors[:1] + config.append(cfg) + + # Handle sub-level commands. Only have single sub-level + elif ancestors: + cfg._parents = ancestors[:1] + if dup_parent_index is not None: + # Update existing entry, since this already exists in config + config[int(dup_parent_index)].add_child(cfg) + new_index = dup_parent_index + 1 + config.insert(new_index, cfg) + else: + ancestors[0].add_child(cfg) + config.append(cfg) + + else: + # Global command, no further special handling needed + config.append(cfg) + return config + + +class VossNetworkConfig(NetworkConfig): + + def load(self, s): + self._config_text = s + self._items = voss_parse(s, self._indent) + + def _diff_line(self, other): + updates = list() + for item in self.items: + if str(item) == "exit": + if updates and updates[-1]._parents: + updates.append(item) + elif item not in other: + updates.append(item) + return updates diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py new file mode 100644 index 0000000000..75cfbae695 --- /dev/null +++ b/plugins/module_utils/oneandone.py @@ -0,0 +1,277 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import time + + +class OneAndOneResources: + firewall_policy = 'firewall_policy' + load_balancer = 'load_balancer' + monitoring_policy = 'monitoring_policy' + private_network = 'private_network' + public_ip = 'public_ip' + role = 'role' + server = 'server' + user = 'user' + vpn = 'vpn' + + +def get_resource(oneandone_conn, resource_type, resource_id): + switcher = { + 'firewall_policy': oneandone_conn.get_firewall, + 'load_balancer': oneandone_conn.get_load_balancer, + 'monitoring_policy': oneandone_conn.get_monitoring_policy, + 'private_network': oneandone_conn.get_private_network, + 'public_ip': oneandone_conn.get_public_ip, + 'role': oneandone_conn.get_role, + 'server': oneandone_conn.get_server, + 'user': oneandone_conn.get_user, + 'vpn': oneandone_conn.get_vpn, + } + + return switcher.get(resource_type, None)(resource_id) + + +def get_datacenter(oneandone_conn, datacenter, full_object=False): + """ + Validates the datacenter exists by ID or country code. + Returns the datacenter ID. + """ + for _datacenter in oneandone_conn.list_datacenters(): + if datacenter in (_datacenter['id'], _datacenter['country_code']): + if full_object: + return _datacenter + return _datacenter['id'] + + +def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False): + """ + Validates the fixed instance size exists by ID or name. + Return the instance size ID. + """ + for _fixed_instance_size in oneandone_conn.fixed_server_flavors(): + if fixed_instance_size in (_fixed_instance_size['id'], + _fixed_instance_size['name']): + if full_object: + return _fixed_instance_size + return _fixed_instance_size['id'] + + +def get_appliance(oneandone_conn, appliance, full_object=False): + """ + Validates the appliance exists by ID or name. + Return the appliance ID. + """ + for _appliance in oneandone_conn.list_appliances(q='IMAGE'): + if appliance in (_appliance['id'], _appliance['name']): + if full_object: + return _appliance + return _appliance['id'] + + +def get_private_network(oneandone_conn, private_network, full_object=False): + """ + Validates the private network exists by ID or name. + Return the private network ID. + """ + for _private_network in oneandone_conn.list_private_networks(): + if private_network in (_private_network['name'], + _private_network['id']): + if full_object: + return _private_network + return _private_network['id'] + + +def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False): + """ + Validates the monitoring policy exists by ID or name. + Return the monitoring policy ID. + """ + for _monitoring_policy in oneandone_conn.list_monitoring_policies(): + if monitoring_policy in (_monitoring_policy['name'], + _monitoring_policy['id']): + if full_object: + return _monitoring_policy + return _monitoring_policy['id'] + + +def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False): + """ + Validates the firewall policy exists by ID or name. + Return the firewall policy ID. + """ + for _firewall_policy in oneandone_conn.list_firewall_policies(): + if firewall_policy in (_firewall_policy['name'], + _firewall_policy['id']): + if full_object: + return _firewall_policy + return _firewall_policy['id'] + + +def get_load_balancer(oneandone_conn, load_balancer, full_object=False): + """ + Validates the load balancer exists by ID or name. + Return the load balancer ID. + """ + for _load_balancer in oneandone_conn.list_load_balancers(): + if load_balancer in (_load_balancer['name'], + _load_balancer['id']): + if full_object: + return _load_balancer + return _load_balancer['id'] + + +def get_server(oneandone_conn, instance, full_object=False): + """ + Validates that the server exists whether by ID or name. + Returns the server if one was found. + """ + for server in oneandone_conn.list_servers(per_page=1000): + if instance in (server['id'], server['name']): + if full_object: + return server + return server['id'] + + +def get_user(oneandone_conn, user, full_object=False): + """ + Validates that the user exists by ID or a name. + Returns the user if one was found. + """ + for _user in oneandone_conn.list_users(per_page=1000): + if user in (_user['id'], _user['name']): + if full_object: + return _user + return _user['id'] + + +def get_role(oneandone_conn, role, full_object=False): + """ + Given a name, validates that the role exists + whether it is a proper ID or a name. + Returns the role if one was found, else None. + """ + for _role in oneandone_conn.list_roles(per_page=1000): + if role in (_role['id'], _role['name']): + if full_object: + return _role + return _role['id'] + + +def get_vpn(oneandone_conn, vpn, full_object=False): + """ + Validates that the vpn exists by ID or a name. + Returns the vpn if one was found. + """ + for _vpn in oneandone_conn.list_vpns(per_page=1000): + if vpn in (_vpn['id'], _vpn['name']): + if full_object: + return _vpn + return _vpn['id'] + + +def get_public_ip(oneandone_conn, public_ip, full_object=False): + """ + Validates that the public ip exists by ID or a name. + Returns the public ip if one was found. + """ + for _public_ip in oneandone_conn.list_public_ips(per_page=1000): + if public_ip in (_public_ip['id'], _public_ip['ip']): + if full_object: + return _public_ip + return _public_ip['id'] + + +def wait_for_resource_creation_completion(oneandone_conn, + resource_type, + resource_id, + wait_timeout, + wait_interval): + """ + Waits for the resource create operation to complete based on the timeout period. + """ + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(wait_interval) + + # Refresh the resource info + resource = get_resource(oneandone_conn, resource_type, resource_id) + + if resource_type == OneAndOneResources.server: + resource_state = resource['status']['state'] + else: + resource_state = resource['state'] + + if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or + (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')): + return + elif resource_state.lower() == 'failed': + raise Exception('%s creation failed for %s' % (resource_type, resource_id)) + elif resource_state.lower() in ('active', + 'enabled', + 'deploying', + 'configuring'): + continue + else: + raise Exception( + 'Unknown %s state %s' % (resource_type, resource_state)) + + raise Exception( + 'Timed out waiting for %s completion for %s' % (resource_type, resource_id)) + + +def wait_for_resource_deletion_completion(oneandone_conn, + resource_type, + resource_id, + wait_timeout, + wait_interval): + """ + Waits for the resource delete operation to complete based on the timeout period. + """ + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(wait_interval) + + # Refresh the operation info + logs = oneandone_conn.list_logs(q='DELETE', + period='LAST_HOUR', + sort='-start_date') + + if resource_type == OneAndOneResources.server: + _type = 'VM' + elif resource_type == OneAndOneResources.private_network: + _type = 'PRIVATENETWORK' + else: + raise Exception( + 'Unsupported wait_for delete operation for %s resource' % resource_type) + + for log in logs: + if (log['resource']['id'] == resource_id and + log['action'] == 'DELETE' and + log['type'] == _type and + log['status']['state'] == 'OK'): + return + raise Exception( + 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id)) diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py new file mode 100644 index 0000000000..0d3116c8e6 --- /dev/null +++ b/plugins/module_utils/oneview.py @@ -0,0 +1,502 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (2016-2017) Hewlett Packard Enterprise Development LP +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) + +import abc +import collections +import json +import os +import traceback + +HPE_ONEVIEW_IMP_ERR = None +try: + from hpOneView.oneview_client import OneViewClient + HAS_HPE_ONEVIEW = True +except ImportError: + HPE_ONEVIEW_IMP_ERR = traceback.format_exc() + HAS_HPE_ONEVIEW = False + +from ansible.module_utils import six +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native +from ansible.module_utils.common._collections_compat import Mapping + + +def transform_list_to_dict(list_): + """ + Transforms a list into a dictionary, putting values as keys. + + :arg list list_: List of values + :return: dict: dictionary built + """ + + ret = {} + + if not list_: + return ret + + for value in list_: + if isinstance(value, Mapping): + ret.update(value) + else: + ret[to_native(value, errors='surrogate_or_strict')] = True + + return ret + + +def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None): + """ + Merge two lists by the key. It basically: + + 1. Adds the items that are present on updated_list and are absent on original_list. + + 2. Removes items that are absent on updated_list and are present on original_list. + + 3. For all items that are in both lists, overwrites the values from the original item by the updated item. + + :arg list original_list: original list. + :arg list updated_list: list with changes. + :arg str key: unique identifier. + :arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge, + if its values are null. + :return: list: Lists merged. + """ + ignore_when_null = [] if ignore_when_null is None else ignore_when_null + + if not original_list: + return updated_list + + items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list]) + + merged_items = collections.OrderedDict() + + for item in updated_list: + item_key = item[key] + if item_key in items_map: + for ignored_key in ignore_when_null: + if ignored_key in item and item[ignored_key] is None: + item.pop(ignored_key) + merged_items[item_key] = items_map[item_key] + merged_items[item_key].update(item) + else: + merged_items[item_key] = item + + return list(merged_items.values()) + + +def _str_sorted(obj): + if isinstance(obj, Mapping): + return json.dumps(obj, sort_keys=True) + else: + return str(obj) + + +def _standardize_value(value): + """ + Convert value to string to enhance the comparison. + + :arg value: Any object type. + + :return: str: Converted value. + """ + if isinstance(value, float) and value.is_integer(): + # Workaround to avoid erroneous comparison between int and float + # Removes zero from integer floats + value = int(value) + + return str(value) + + +class OneViewModuleException(Exception): + """ + OneView base Exception. + + Attributes: + msg (str): Exception message. + oneview_response (dict): OneView rest response. + """ + + def __init__(self, data): + self.msg = None + self.oneview_response = None + + if isinstance(data, six.string_types): + self.msg = data + else: + self.oneview_response = data + + if data and isinstance(data, dict): + self.msg = data.get('message') + + if self.oneview_response: + Exception.__init__(self, self.msg, self.oneview_response) + else: + Exception.__init__(self, self.msg) + + +class OneViewModuleTaskError(OneViewModuleException): + """ + OneView Task Error Exception. + + Attributes: + msg (str): Exception message. + error_code (str): A code which uniquely identifies the specific error. + """ + + def __init__(self, msg, error_code=None): + super(OneViewModuleTaskError, self).__init__(msg) + self.error_code = error_code + + +class OneViewModuleValueError(OneViewModuleException): + """ + OneView Value Error. + The exception is raised when the data contains an inappropriate value. + + Attributes: + msg (str): Exception message. + """ + pass + + +class OneViewModuleResourceNotFound(OneViewModuleException): + """ + OneView Resource Not Found Exception. + The exception is raised when an associated resource was not found. + + Attributes: + msg (str): Exception message. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class OneViewModuleBase(object): + MSG_CREATED = 'Resource created successfully.' + MSG_UPDATED = 'Resource updated successfully.' + MSG_DELETED = 'Resource deleted successfully.' + MSG_ALREADY_PRESENT = 'Resource is already present.' + MSG_ALREADY_ABSENT = 'Resource is already absent.' + MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. ' + + ONEVIEW_COMMON_ARGS = dict( + config=dict(type='path'), + hostname=dict(type='str'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + api_version=dict(type='int'), + image_streamer_hostname=dict(type='str') + ) + + ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True)) + + resource_client = None + + def __init__(self, additional_arg_spec=None, validate_etag_support=False): + """ + OneViewModuleBase constructor. + + :arg dict additional_arg_spec: Additional argument spec definition. + :arg bool validate_etag_support: Enables support to eTag validation. + """ + argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + self._check_hpe_oneview_sdk() + self._create_oneview_client() + + self.state = self.module.params.get('state') + self.data = self.module.params.get('data') + + # Preload params for get_all - used by facts + self.facts_params = self.module.params.get('params') or {} + + # Preload options as dict - used by facts + self.options = transform_list_to_dict(self.module.params.get('options')) + + self.validate_etag_support = validate_etag_support + + def _build_argument_spec(self, additional_arg_spec, validate_etag_support): + + merged_arg_spec = dict() + merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS) + + if validate_etag_support: + merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS) + + if additional_arg_spec: + merged_arg_spec.update(additional_arg_spec) + + return merged_arg_spec + + def _check_hpe_oneview_sdk(self): + if not HAS_HPE_ONEVIEW: + self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR) + + def _create_oneview_client(self): + if self.module.params.get('hostname'): + config = dict(ip=self.module.params['hostname'], + credentials=dict(userName=self.module.params['username'], password=self.module.params['password']), + api_version=self.module.params['api_version'], + image_streamer_ip=self.module.params['image_streamer_hostname']) + self.oneview_client = OneViewClient(config) + elif not self.module.params['config']: + self.oneview_client = OneViewClient.from_environment_variables() + else: + self.oneview_client = OneViewClient.from_json_file(self.module.params['config']) + + @abc.abstractmethod + def execute_module(self): + """ + Abstract method, must be implemented by the inheritor. + + This method is called from the run method. It should contains the module logic + + :return: dict: It must return a dictionary with the attributes for the module result, + such as ansible_facts, msg and changed. + """ + pass + + def run(self): + """ + Common implementation of the OneView run modules. + + It calls the inheritor 'execute_module' function and sends the return to the Ansible. + + It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message. + + """ + try: + if self.validate_etag_support: + if not self.module.params.get('validate_etag'): + self.oneview_client.connection.disable_etag_validation() + + result = self.execute_module() + + if "changed" not in result: + result['changed'] = False + + self.module.exit_json(**result) + + except OneViewModuleException as exception: + error_msg = '; '.join(to_native(e) for e in exception.args) + self.module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + def resource_absent(self, resource, method='delete'): + """ + Generic implementation of the absent state for the OneView resources. + + It checks if the resource needs to be removed. + + :arg dict resource: Resource to delete. + :arg str method: Function of the OneView client that will be called for resource deletion. + Usually delete or remove. + :return: A dictionary with the expected arguments for the AnsibleModule.exit_json + """ + if resource: + getattr(self.resource_client, method)(resource) + + return {"changed": True, "msg": self.MSG_DELETED} + else: + return {"changed": False, "msg": self.MSG_ALREADY_ABSENT} + + def get_by_name(self, name): + """ + Generic get by name implementation. + + :arg str name: Resource name to search for. + + :return: The resource found or None. + """ + result = self.resource_client.get_by('name', name) + return result[0] if result else None + + def resource_present(self, resource, fact_name, create_method='create'): + """ + Generic implementation of the present state for the OneView resources. + + It checks if the resource needs to be created or updated. + + :arg dict resource: Resource to create or update. + :arg str fact_name: Name of the fact returned to the Ansible. + :arg str create_method: Function of the OneView client that will be called for resource creation. + Usually create or add. + :return: A dictionary with the expected arguments for the AnsibleModule.exit_json + """ + + changed = False + if "newName" in self.data: + self.data["name"] = self.data.pop("newName") + + if not resource: + resource = getattr(self.resource_client, create_method)(self.data) + msg = self.MSG_CREATED + changed = True + + else: + merged_data = resource.copy() + merged_data.update(self.data) + + if self.compare(resource, merged_data): + msg = self.MSG_ALREADY_PRESENT + else: + resource = self.resource_client.update(merged_data) + changed = True + msg = self.MSG_UPDATED + + return dict( + msg=msg, + changed=changed, + ansible_facts={fact_name: resource} + ) + + def resource_scopes_set(self, state, fact_name, scope_uris): + """ + Generic implementation of the scopes update PATCH for the OneView resources. + It checks if the resource needs to be updated with the current scopes. + This method is meant to be run after ensuring the present state. + :arg dict state: Dict containing the data from the last state results in the resource. + It needs to have the 'msg', 'changed', and 'ansible_facts' entries. + :arg str fact_name: Name of the fact returned to the Ansible. + :arg list scope_uris: List with all the scope URIs to be added to the resource. + :return: A dictionary with the expected arguments for the AnsibleModule.exit_json + """ + if scope_uris is None: + scope_uris = [] + resource = state['ansible_facts'][fact_name] + operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris) + + if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris): + state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data) + state['changed'] = True + state['msg'] = self.MSG_UPDATED + + return state + + def compare(self, first_resource, second_resource): + """ + Recursively compares dictionary contents equivalence, ignoring types and elements order. + Particularities of the comparison: + - Inexistent key = None + - These values are considered equal: None, empty, False + - Lists are compared value by value after a sort, if they have same size. + - Each element is converted to str before the comparison. + :arg dict first_resource: first dictionary + :arg dict second_resource: second dictionary + :return: bool: True when equal, False when different. + """ + resource1 = first_resource + resource2 = second_resource + + debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + + # The first resource is True / Not Null and the second resource is False / Null + if resource1 and not resource2: + self.module.log("resource1 and not resource2. " + debug_resources) + return False + + # Checks all keys in first dict against the second dict + for key in resource1: + if key not in resource2: + if resource1[key] is not None: + # Inexistent key is equivalent to exist with value None + self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) + return False + # If both values are null, empty or False it will be considered equal. + elif not resource1[key] and not resource2[key]: + continue + elif isinstance(resource1[key], Mapping): + # recursive call + if not self.compare(resource1[key], resource2[key]): + self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) + return False + elif isinstance(resource1[key], list): + # change comparison function to compare_list + if not self.compare_list(resource1[key], resource2[key]): + self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) + return False + elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]): + self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) + return False + + # Checks all keys in the second dict, looking for missing elements + for key in resource2.keys(): + if key not in resource1: + if resource2[key] is not None: + # Inexistent key is equivalent to exist with value None + self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) + return False + + return True + + def compare_list(self, first_resource, second_resource): + """ + Recursively compares lists contents equivalence, ignoring types and element orders. + Lists with same size are compared value by value after a sort, + each element is converted to str before the comparison. + :arg list first_resource: first list + :arg list second_resource: second list + :return: True when equal; False when different. + """ + + resource1 = first_resource + resource2 = second_resource + + debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + + # The second list is null / empty / False + if not resource2: + self.module.log("resource 2 is null. " + debug_resources) + return False + + if len(resource1) != len(resource2): + self.module.log("resources have different length. " + debug_resources) + return False + + resource1 = sorted(resource1, key=_str_sorted) + resource2 = sorted(resource2, key=_str_sorted) + + for i, val in enumerate(resource1): + if isinstance(val, Mapping): + # change comparison function to compare dictionaries + if not self.compare(val, resource2[i]): + self.module.log("resources are different. " + debug_resources) + return False + elif isinstance(val, list): + # recursive call + if not self.compare_list(val, resource2[i]): + self.module.log("lists are different. " + debug_resources) + return False + elif _standardize_value(val) != _standardize_value(resource2[i]): + self.module.log("values are different. " + debug_resources) + return False + + # no differences found + return True diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py new file mode 100644 index 0000000000..464e454288 --- /dev/null +++ b/plugins/module_utils/online.py @@ -0,0 +1,121 @@ +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import fetch_url + + +def online_argument_spec(): + return dict( + api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']), + no_log=True, aliases=['oauth_token']), + api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']), + api_timeout=dict(type='int', default=30, aliases=['timeout']), + validate_certs=dict(default=True, type='bool'), + ) + + +class OnlineException(Exception): + + def __init__(self, message): + self.message = message + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + @property + def ok(self): + return self.status_code in (200, 201, 202, 204) + + +class Online(object): + + def __init__(self, module): + self.module = module + self.headers = { + 'Authorization': "Bearer %s" % self.module.params.get('api_token'), + 'User-Agent': self.get_user_agent_string(module), + 'Content-type': 'application/json', + } + self.name = None + + def get_resources(self): + results = self.get('/%s' % self.name) + if not results.ok: + raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format( + self.name, '%s/%s' % (self.module.params.get('api_url'), self.name), + results.status_code, results.json['message'] + )) + + return results.json + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.module.params.get('api_url'), path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + + if headers is not None: + self.headers.update(headers) + + resp, info = fetch_url( + self.module, url, data=data, headers=self.headers, method=method, + timeout=self.module.params.get('api_timeout') + ) + + # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases + if info['status'] == -1: + self.module.fail_json(msg=info['msg']) + + return Response(resp, info) + + @staticmethod + def get_user_agent_string(module): + return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0]) + + def get(self, path, data=None, headers=None): + return self.send('GET', path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send('PUT', path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send('POST', path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send('DELETE', path, data, headers) + + def patch(self, path, data=None, headers=None): + return self.send("PATCH", path, data, headers) + + def update(self, path, data=None, headers=None): + return self.send("UPDATE", path, data, headers) diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py new file mode 100644 index 0000000000..a520e32187 --- /dev/null +++ b/plugins/module_utils/opennebula.py @@ -0,0 +1,306 @@ +# +# Copyright 2018 www.privaz.io Valletech AB +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + + +import time +import ssl +from os import environ +from ansible.module_utils.six import string_types +from ansible.module_utils.basic import AnsibleModule + + +HAS_PYONE = True + +try: + from pyone import OneException + from pyone.server import OneServer +except ImportError: + OneException = Exception + HAS_PYONE = False + + +class OpenNebulaModule: + """ + Base class for all OpenNebula Ansible Modules. + This is basically a wrapper of the common arguments, the pyone client and + some utility methods. + """ + + common_args = dict( + api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")), + api_username=dict(type='str', default=environ.get("ONE_USERNAME")), + api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")), + validate_certs=dict(default=True, type='bool'), + wait_timeout=dict(type='int', default=300), + ) + + def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None): + + module_args = OpenNebulaModule.common_args + module_args.update(argument_spec) + + self.module = AnsibleModule(argument_spec=module_args, + supports_check_mode=supports_check_mode, + mutually_exclusive=mutually_exclusive) + self.result = dict(changed=False, + original_message='', + message='') + self.one = self.create_one_client() + + self.resolved_parameters = self.resolve_parameters() + + def create_one_client(self): + """ + Creates an XMLPRC client to OpenNebula. + + Returns: the new xmlrpc client. + + """ + + # context required for not validating SSL, old python versions won't validate anyway. + if hasattr(ssl, '_create_unverified_context'): + no_ssl_validation_context = ssl._create_unverified_context() + else: + no_ssl_validation_context = None + + # Check if the module can run + if not HAS_PYONE: + self.fail("pyone is required for this module") + + if self.module.params.get("api_url"): + url = self.module.params.get("api_url") + else: + self.fail("Either api_url or the environment variable ONE_URL must be provided") + + if self.module.params.get("api_username"): + username = self.module.params.get("api_username") + else: + self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided") + + if self.module.params.get("api_password"): + password = self.module.params.get("api_password") + else: + self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided") + + session = "%s:%s" % (username, password) + + if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ: + return OneServer(url, session=session, context=no_ssl_validation_context) + else: + return OneServer(url, session) + + def close_one_client(self): + """ + Close the pyone session. + """ + self.one.server_close() + + def fail(self, msg): + """ + Utility failure method, will ensure pyone is properly closed before failing. + Args: + msg: human readable failure reason. + """ + if hasattr(self, 'one'): + self.close_one_client() + self.module.fail_json(msg=msg) + + def exit(self): + """ + Utility exit method, will ensure pyone is properly closed before exiting. + + """ + if hasattr(self, 'one'): + self.close_one_client() + self.module.exit_json(**self.result) + + def resolve_parameters(self): + """ + This method resolves parameters provided by a secondary ID to the primary ID. + For example if cluster_name is present, cluster_id will be introduced by performing + the required resolution + + Returns: a copy of the parameters that includes the resolved parameters. + + """ + + resolved_params = dict(self.module.params) + + if 'cluster_name' in self.module.params: + clusters = self.one.clusterpool.info() + for cluster in clusters.CLUSTER: + if cluster.NAME == self.module.params.get('cluster_name'): + resolved_params['cluster_id'] = cluster.ID + + return resolved_params + + def is_parameter(self, name): + """ + Utility method to check if a parameter was provided or is resolved + Args: + name: the parameter to check + """ + if name in self.resolved_parameters: + return self.get_parameter(name) is not None + else: + return False + + def get_parameter(self, name): + """ + Utility method for accessing parameters that includes resolved ID + parameters from provided Name parameters. + """ + return self.resolved_parameters.get(name) + + def get_host_by_name(self, name): + ''' + Returns a host given its name. + Args: + name: the name of the host + + Returns: the host object or None if the host is absent. + + ''' + hosts = self.one.hostpool.info() + for h in hosts.HOST: + if h.NAME == name: + return h + return None + + def get_cluster_by_name(self, name): + """ + Returns a cluster given its name. + Args: + name: the name of the cluster + + Returns: the cluster object or None if the host is absent. + """ + + clusters = self.one.clusterpool.info() + for c in clusters.CLUSTER: + if c.NAME == name: + return c + return None + + def get_template_by_name(self, name): + ''' + Returns a template given its name. + Args: + name: the name of the template + + Returns: the template object or None if the host is absent. + + ''' + templates = self.one.templatepool.info() + for t in templates.TEMPLATE: + if t.NAME == name: + return t + return None + + def cast_template(self, template): + """ + OpenNebula handles all template elements as strings + At some point there is a cast being performed on types provided by the user + This function mimics that transformation so that required template updates are detected properly + additionally an array will be converted to a comma separated list, + which works for labels and hopefully for something more. + + Args: + template: the template to transform + + Returns: the transformed template with data casts applied. + """ + + # TODO: check formally available data types in templates + # TODO: some arrays might be converted to space separated + + for key in template: + value = template[key] + if isinstance(value, dict): + self.cast_template(template[key]) + elif isinstance(value, list): + template[key] = ', '.join(value) + elif not isinstance(value, string_types): + template[key] = str(value) + + def requires_template_update(self, current, desired): + """ + This function will help decide if a template update is required or not + If a desired key is missing from the current dictionary an update is required + If the intersection of both dictionaries is not deep equal, an update is required + Args: + current: current template as a dictionary + desired: desired template as a dictionary + + Returns: True if a template update is required + """ + + if not desired: + return False + + self.cast_template(desired) + intersection = dict() + for dkey in desired.keys(): + if dkey in current.keys(): + intersection[dkey] = current[dkey] + else: + return True + return not (desired == intersection) + + def wait_for_state(self, element_name, state, state_name, target_states, + invalid_states=None, transition_states=None, + wait_timeout=None): + """ + Args: + element_name: the name of the object we are waiting for: HOST, VM, etc. + state: lambda that returns the current state, will be queried until target state is reached + state_name: lambda that returns the readable form of a given state + target_states: states expected to be reached + invalid_states: if any of this states is reached, fail + transition_states: when used, these are the valid states during the transition. + wait_timeout: timeout period in seconds. Defaults to the provided parameter. + """ + + if not wait_timeout: + wait_timeout = self.module.params.get("wait_timeout") + + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + current_state = state() + + if current_state in invalid_states: + self.fail('invalid %s state %s' % (element_name, state_name(current_state))) + + if transition_states: + if current_state not in transition_states: + self.fail('invalid %s transition state %s' % (element_name, state_name(current_state))) + + if current_state in target_states: + return True + + time.sleep(self.one.server_retry_interval()) + + self.fail(msg="Wait timeout has expired!") + + def run_module(self): + """ + trigger the start of the execution of the module. + Returns: + + """ + try: + self.run(self.one, self.module, self.result) + except OneException as e: + self.fail(msg="OpenNebula Exception: %s" % e) + + def run(self, one, module, result): + """ + to be implemented by subclass with the actual module actions. + Args: + one: the OpenNebula XMLRPC client + module: the Ansible Module object + result: the Ansible result + """ + raise NotImplementedError("Method requires implementation") diff --git a/plugins/module_utils/oracle/__init__.py b/plugins/module_utils/oracle/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py new file mode 100644 index 0000000000..368337a496 --- /dev/null +++ b/plugins/module_utils/oracle/oci_utils.py @@ -0,0 +1,1961 @@ +# Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import + +import logging +import logging.config +import os +import tempfile +from datetime import datetime +from operator import eq + +import time + +try: + import yaml + + import oci + from oci.constants import HEADER_NEXT_PAGE + + from oci.exceptions import ( + InvalidConfig, + InvalidPrivateKey, + MissingPrivateKeyPassphrase, + ConfigFileNotFound, + ServiceError, + MaximumWaitTimeExceeded, + ) + from oci.identity.identity_client import IdentityClient + from oci.object_storage.models import CreateBucketDetails + from oci.object_storage.models import UpdateBucketDetails + from oci.retry import RetryStrategyBuilder + from oci.util import to_dict, Sentinel + + HAS_OCI_PY_SDK = True +except ImportError: + HAS_OCI_PY_SDK = False + + +from ansible.module_utils._text import to_bytes +from ansible.module_utils.six import iteritems + +__version__ = "1.6.0-dev" + +MAX_WAIT_TIMEOUT_IN_SECONDS = 1200 + +# If a resource is in one of these states it would be considered inactive +DEAD_STATES = [ + "TERMINATING", + "TERMINATED", + "FAULTY", + "FAILED", + "DELETING", + "DELETED", + "UNKNOWN_ENUM_VALUE", + "DETACHING", + "DETACHED", +] + +# If a resource is in one of these states it would be considered available +DEFAULT_READY_STATES = [ + "AVAILABLE", + "ACTIVE", + "RUNNING", + "PROVISIONED", + "ATTACHED", + "ASSIGNED", + "SUCCEEDED", + "PENDING_PROVIDER", +] + +# If a resource is in one of these states, it would be considered deleted +DEFAULT_TERMINATED_STATES = ["TERMINATED", "DETACHED", "DELETED"] + + +def get_common_arg_spec(supports_create=False, supports_wait=False): + """ + Return the common set of module arguments for all OCI cloud modules. + :param supports_create: Variable to decide whether to add options related to idempotency of create operation. + :param supports_wait: Variable to decide whether to add options related to waiting for completion. + :return: A dict with applicable module options. + """ + # Note: This method is used by most OCI ansible resource modules during initialization. When making changes to this + # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules + # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in + # this method would break that error handling logic. + common_args = dict( + config_file_location=dict(type="str"), + config_profile_name=dict(type="str", default="DEFAULT"), + api_user=dict(type="str"), + api_user_fingerprint=dict(type="str", no_log=True), + api_user_key_file=dict(type="str"), + api_user_key_pass_phrase=dict(type="str", no_log=True), + auth_type=dict( + type="str", + required=False, + choices=["api_key", "instance_principal"], + default="api_key", + ), + tenancy=dict(type="str"), + region=dict(type="str"), + ) + + if supports_create: + common_args.update( + key_by=dict(type="list"), + force_create=dict(type="bool", default=False), + ) + + if supports_wait: + common_args.update( + wait=dict(type="bool", default=True), + wait_timeout=dict( + type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS + ), + wait_until=dict(type="str"), + ) + + return common_args + + +def get_facts_module_arg_spec(filter_by_name=False): + # Note: This method is used by most OCI ansible fact modules during initialization. When making changes to this + # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules + # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in + # this method would break that error handling logic. + facts_module_arg_spec = get_common_arg_spec() + if filter_by_name: + facts_module_arg_spec.update(name=dict(type="str")) + else: + facts_module_arg_spec.update(display_name=dict(type="str")) + return facts_module_arg_spec + + +def get_oci_config(module, service_client_class=None): + """Return the OCI configuration to use for all OCI API calls. The effective OCI configuration is derived by merging + any overrides specified for configuration attributes through Ansible module options or environment variables. The + order of precedence for deriving the effective configuration dict is: + 1. If a config file is provided, use that to setup the initial config dict. + 2. If a config profile is specified, use that config profile to setup the config dict. + 3. For each authentication attribute, check if an override is provided either through + a. Ansible Module option + b. Environment variable + and override the value in the config dict in that order.""" + config = {} + + config_file = module.params.get("config_file_location") + _debug("Config file through module options - {0} ".format(config_file)) + if not config_file: + if "OCI_CONFIG_FILE" in os.environ: + config_file = os.environ["OCI_CONFIG_FILE"] + _debug( + "Config file through OCI_CONFIG_FILE environment variable - {0}".format( + config_file + ) + ) + else: + config_file = "~/.oci/config" + _debug("Config file (fallback) - {0} ".format(config_file)) + + config_profile = module.params.get("config_profile_name") + if not config_profile: + if "OCI_CONFIG_PROFILE" in os.environ: + config_profile = os.environ["OCI_CONFIG_PROFILE"] + else: + config_profile = "DEFAULT" + try: + config = oci.config.from_file( + file_location=config_file, profile_name=config_profile + ) + except ( + ConfigFileNotFound, + InvalidConfig, + InvalidPrivateKey, + MissingPrivateKeyPassphrase, + ) as ex: + if not _is_instance_principal_auth(module): + # When auth_type is not instance_principal, config file is required + module.fail_json(msg=str(ex)) + else: + _debug( + "Ignore {0} as the auth_type is set to instance_principal".format( + str(ex) + ) + ) + # if instance_principal auth is used, an empty 'config' map is used below. + + config["additional_user_agent"] = "Oracle-Ansible/{0}".format(__version__) + # Merge any overrides through other IAM options + _merge_auth_option( + config, + module, + module_option_name="api_user", + env_var_name="OCI_USER_ID", + config_attr_name="user", + ) + _merge_auth_option( + config, + module, + module_option_name="api_user_fingerprint", + env_var_name="OCI_USER_FINGERPRINT", + config_attr_name="fingerprint", + ) + _merge_auth_option( + config, + module, + module_option_name="api_user_key_file", + env_var_name="OCI_USER_KEY_FILE", + config_attr_name="key_file", + ) + _merge_auth_option( + config, + module, + module_option_name="api_user_key_pass_phrase", + env_var_name="OCI_USER_KEY_PASS_PHRASE", + config_attr_name="pass_phrase", + ) + _merge_auth_option( + config, + module, + module_option_name="tenancy", + env_var_name="OCI_TENANCY", + config_attr_name="tenancy", + ) + _merge_auth_option( + config, + module, + module_option_name="region", + env_var_name="OCI_REGION", + config_attr_name="region", + ) + + # Redirect calls to home region for IAM service. + do_not_redirect = module.params.get( + "do_not_redirect_to_home_region", False + ) or os.environ.get("OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION") + if service_client_class == IdentityClient and not do_not_redirect: + _debug("Region passed for module invocation - {0} ".format(config["region"])) + identity_client = IdentityClient(config) + region_subscriptions = identity_client.list_region_subscriptions( + config["tenancy"] + ).data + # Replace the region in the config with the home region. + [config["region"]] = [ + rs.region_name for rs in region_subscriptions if rs.is_home_region is True + ] + _debug( + "Setting region in the config to home region - {0} ".format( + config["region"] + ) + ) + + return config + + +def create_service_client(module, service_client_class): + """ + Creates a service client using the common module options provided by the user. + :param module: An AnsibleModule that represents user provided options for a Task + :param service_client_class: A class that represents a client to an OCI Service + :return: A fully configured client + """ + config = get_oci_config(module, service_client_class) + kwargs = {} + + if _is_instance_principal_auth(module): + try: + signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner() + except Exception as ex: + message = ( + "Failed retrieving certificates from localhost. Instance principal based authentication is only" + "possible from within OCI compute instances. Exception: {0}".format( + str(ex) + ) + ) + module.fail_json(msg=message) + + kwargs["signer"] = signer + + # XXX: Validate configuration -- this may be redundant, as all Client constructors perform a validation + try: + oci.config.validate_config(config, **kwargs) + except oci.exceptions.InvalidConfig as ic: + module.fail_json( + msg="Invalid OCI configuration. Exception: {0}".format(str(ic)) + ) + + # Create service client class with the signer + client = service_client_class(config, **kwargs) + + return client + + +def _is_instance_principal_auth(module): + # check if auth type is overridden via module params + instance_principal_auth = ( + "auth_type" in module.params + and module.params["auth_type"] == "instance_principal" + ) + if not instance_principal_auth: + instance_principal_auth = ( + "OCI_ANSIBLE_AUTH_TYPE" in os.environ + and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal" + ) + return instance_principal_auth + + +def _merge_auth_option( + config, module, module_option_name, env_var_name, config_attr_name +): + """Merge the values for an authentication attribute from ansible module options and + environment variables with the values specified in a configuration file""" + _debug("Merging {0}".format(module_option_name)) + + auth_attribute = module.params.get(module_option_name) + _debug( + "\t Ansible module option {0} = {1}".format(module_option_name, auth_attribute) + ) + if not auth_attribute: + if env_var_name in os.environ: + auth_attribute = os.environ[env_var_name] + _debug( + "\t Environment variable {0} = {1}".format(env_var_name, auth_attribute) + ) + + # An authentication attribute has been provided through an env-variable or an ansible + # option and must override the corresponding attribute's value specified in the + # config file [profile]. + if auth_attribute: + _debug( + "Updating config attribute {0} -> {1} ".format( + config_attr_name, auth_attribute + ) + ) + config.update({config_attr_name: auth_attribute}) + + +def bucket_details_factory(bucket_details_type, module): + bucket_details = None + if bucket_details_type == "create": + bucket_details = CreateBucketDetails() + elif bucket_details_type == "update": + bucket_details = UpdateBucketDetails() + + bucket_details.compartment_id = module.params["compartment_id"] + bucket_details.name = module.params["name"] + bucket_details.public_access_type = module.params["public_access_type"] + bucket_details.metadata = module.params["metadata"] + + return bucket_details + + +def filter_resources(all_resources, filter_params): + if not filter_params: + return all_resources + filtered_resources = [] + filtered_resources.extend( + [ + resource + for resource in all_resources + for key, value in filter_params.items() + if getattr(resource, key) == value + ] + ) + return filtered_resources + + +def list_all_resources(target_fn, **kwargs): + """ + Return all resources after paging through all results returned by target_fn. If a `display_name` or `name` is + provided as a kwarg, then only resources matching the specified name are returned. + :param target_fn: The target OCI SDK paged function to call + :param kwargs: All arguments that the OCI SDK paged function expects + :return: List of all objects returned by target_fn + :raises ServiceError: When the Service returned an Error response + :raises MaximumWaitTimeExceededError: When maximum wait time is exceeded while invoking target_fn + """ + filter_params = None + try: + response = call_with_backoff(target_fn, **kwargs) + except ValueError as ex: + if "unknown kwargs" in str(ex): + if "display_name" in kwargs: + if kwargs["display_name"]: + filter_params = {"display_name": kwargs["display_name"]} + del kwargs["display_name"] + elif "name" in kwargs: + if kwargs["name"]: + filter_params = {"name": kwargs["name"]} + del kwargs["name"] + response = call_with_backoff(target_fn, **kwargs) + + existing_resources = response.data + while response.has_next_page: + kwargs.update(page=response.headers.get(HEADER_NEXT_PAGE)) + response = call_with_backoff(target_fn, **kwargs) + existing_resources += response.data + + # If the underlying SDK Service list* method doesn't support filtering by name or display_name, filter the resources + # and return the matching list of resources + return filter_resources(existing_resources, filter_params) + + +def _debug(s): + get_logger("oci_utils").debug(s) + + +def get_logger(module_name): + oci_logging = setup_logging() + return oci_logging.getLogger(module_name) + + +def setup_logging( + default_level="INFO", +): + """Setup logging configuration""" + env_log_path = "LOG_PATH" + env_log_level = "LOG_LEVEL" + + default_log_path = tempfile.gettempdir() + log_path = os.getenv(env_log_path, default_log_path) + log_level_str = os.getenv(env_log_level, default_level) + log_level = logging.getLevelName(log_level_str) + log_file_path = os.path.join(log_path, "oci_ansible_module.log") + logging.basicConfig(filename=log_file_path, filemode="a", level=log_level) + return logging + + +def check_and_update_attributes( + target_instance, attr_name, input_value, existing_value, changed +): + """ + This function checks the difference between two resource attributes of literal types and sets the attrbute + value in the target instance type holding the attribute. + :param target_instance: The instance which contains the attribute whose values to be compared + :param attr_name: Name of the attribute whose value required to be compared + :param input_value: The value of the attribute provided by user + :param existing_value: The value of the attribute in the existing resource + :param changed: Flag to indicate whether there is any difference between the values + :return: Returns a boolean value indicating whether there is any difference between the values + """ + if input_value is not None and not eq(input_value, existing_value): + changed = True + target_instance.__setattr__(attr_name, input_value) + else: + target_instance.__setattr__(attr_name, existing_value) + return changed + + +def check_and_update_resource( + resource_type, + get_fn, + kwargs_get, + update_fn, + primitive_params_update, + kwargs_non_primitive_update, + module, + update_attributes, + client=None, + sub_attributes_of_update_model=None, + wait_applicable=True, + states=None, +): + + """ + This function handles update operation on a resource. It checks whether update is required and accordingly returns + the resource and the changed status. + :param wait_applicable: Indicates if the resource support wait + :param client: The resource Client class to use to perform the wait checks. This param must be specified if + wait_applicable is True + :param resource_type: The type of the resource. e.g. "private_ip" + :param get_fn: Function used to get the resource. e.g. virtual_network_client.get_private_ip + :param kwargs_get: Dictionary containing the arguments to be used to call get function. + e.g. {"private_ip_id": module.params["private_ip_id"]} + :param update_fn: Function used to update the resource. e.g virtual_network_client.update_private_ip + :param primitive_params_update: List of primitive parameters used for update function. e.g. ['private_ip_id'] + :param kwargs_non_primitive_update: Dictionary containing the non-primitive arguments to be used to call get + function with key as the non-primitive argument type & value as the name of the non-primitive argument to be passed + to the update function. e.g. {UpdatePrivateIpDetails: "update_private_ip_details"} + :param module: Instance of AnsibleModule + :param update_attributes: Attributes in update model. + :param states: List of lifecycle states to watch for while waiting after create_fn is called. + e.g. [module.params['wait_until'], "FAULTY"] + :param sub_attributes_of_update_model: Dictionary of non-primitive sub-attributes of update model. for example, + {'services': [ServiceIdRequestDetails()]} as in UpdateServiceGatewayDetails. + :return: Returns a dictionary containing the "changed" status and the resource. + """ + try: + result = dict(changed=False) + attributes_to_update, resource = get_attr_to_update( + get_fn, kwargs_get, module, update_attributes + ) + + if attributes_to_update: + kwargs_update = get_kwargs_update( + attributes_to_update, + kwargs_non_primitive_update, + module, + primitive_params_update, + sub_attributes_of_update_model, + ) + resource = call_with_backoff(update_fn, **kwargs_update).data + if wait_applicable: + if client is None: + module.fail_json( + msg="wait_applicable is True, but client is not specified." + ) + resource = wait_for_resource_lifecycle_state( + client, module, True, kwargs_get, get_fn, None, resource, states + ) + result["changed"] = True + result[resource_type] = to_dict(resource) + return result + except ServiceError as ex: + module.fail_json(msg=ex.message) + + +def get_kwargs_update( + attributes_to_update, + kwargs_non_primitive_update, + module, + primitive_params_update, + sub_attributes_of_update_model=None, +): + kwargs_update = dict() + for param in primitive_params_update: + kwargs_update[param] = module.params[param] + for param in kwargs_non_primitive_update: + update_object = param() + for key in update_object.attribute_map: + if key in attributes_to_update: + if ( + sub_attributes_of_update_model + and key in sub_attributes_of_update_model + ): + setattr(update_object, key, sub_attributes_of_update_model[key]) + else: + setattr(update_object, key, module.params[key]) + kwargs_update[kwargs_non_primitive_update[param]] = update_object + return kwargs_update + + +def is_dictionary_subset(sub, super_dict): + """ + This function checks if `sub` dictionary is a subset of `super` dictionary. + :param sub: subset dictionary, for example user_provided_attr_value. + :param super_dict: super dictionary, for example resources_attr_value. + :return: True if sub is contained in super. + """ + for key in sub: + if sub[key] != super_dict[key]: + return False + return True + + +def are_lists_equal(s, t): + if s is None and t is None: + return True + + if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)): + return False + + if len(s) == 0: + return True + + s = to_dict(s) + t = to_dict(t) + + if type(s[0]) == dict: + # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on + # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key + # `service_name` which is not provided in the list of `services` by a user while making an update call; only + # `service_id` is provided by the user in the update call. + sorted_s = sort_list_of_dictionary(s) + sorted_t = sort_list_of_dictionary(t) + for index, d in enumerate(sorted_s): + if not is_dictionary_subset(d, sorted_t[index]): + return False + return True + else: + # Handle lists of primitive types. + try: + for elem in s: + t.remove(elem) + except ValueError: + return False + return not t + + +def get_attr_to_update(get_fn, kwargs_get, module, update_attributes): + try: + resource = call_with_backoff(get_fn, **kwargs_get).data + except ServiceError as ex: + module.fail_json(msg=ex.message) + + attributes_to_update = [] + + for attr in update_attributes: + resources_attr_value = getattr(resource, attr, None) + user_provided_attr_value = module.params.get(attr, None) + + unequal_list_attr = ( + type(resources_attr_value) == list or type(user_provided_attr_value) == list + ) and not are_lists_equal(user_provided_attr_value, resources_attr_value) + unequal_attr = type(resources_attr_value) != list and to_dict( + resources_attr_value + ) != to_dict(user_provided_attr_value) + if unequal_list_attr or unequal_attr: + # only update if the user has explicitly provided a value for this attribute + # otherwise, no update is necessary because the user hasn't expressed a particular + # value for that attribute + if module.params.get(attr, None): + attributes_to_update.append(attr) + + return attributes_to_update, resource + + +def get_taggable_arg_spec(supports_create=False, supports_wait=False): + """ + Returns an arg_spec that is valid for taggable OCI resources. + :return: A dict that represents an ansible arg spec that builds over the common_arg_spec and adds free-form and + defined tags. + """ + tag_arg_spec = get_common_arg_spec(supports_create, supports_wait) + tag_arg_spec.update( + dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict")) + ) + return tag_arg_spec + + +def add_tags_to_model_from_module(model, module): + """ + Adds free-form and defined tags from an ansible module to a resource model + :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes + :param module: An AnsibleModule representing the options provided by the user + :return: The updated model class with the tags specified by the user. + """ + freeform_tags = module.params.get("freeform_tags", None) + defined_tags = module.params.get("defined_tags", None) + return add_tags_to_model_class(model, freeform_tags, defined_tags) + + +def add_tags_to_model_class(model, freeform_tags, defined_tags): + """ + Add free-form and defined tags to a resource model. + :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes + :param freeform_tags: A dict representing the freeform_tags to be applied to the model + :param defined_tags: A dict representing the defined_tags to be applied to the model + :return: The updated model class with the tags specified by the user + """ + try: + if freeform_tags is not None: + _debug("Model {0} set freeform tags to {1}".format(model, freeform_tags)) + model.__setattr__("freeform_tags", freeform_tags) + + if defined_tags is not None: + _debug("Model {0} set defined tags to {1}".format(model, defined_tags)) + model.__setattr__("defined_tags", defined_tags) + except AttributeError as ae: + _debug("Model {0} doesn't support tags. Error {1}".format(model, ae)) + + return model + + +def check_and_create_resource( + resource_type, + create_fn, + kwargs_create, + list_fn, + kwargs_list, + module, + model, + existing_resources=None, + exclude_attributes=None, + dead_states=None, + default_attribute_values=None, + supports_sort_by_time_created=True, +): + """ + This function checks whether there is a resource with same attributes as specified in the module options. If not, + it creates and returns the resource. + :param resource_type: Type of the resource to be created. + :param create_fn: Function used in the module to handle create operation. The function should return a dict with + keys as resource & changed. + :param kwargs_create: Dictionary of parameters for create operation. + :param list_fn: List function in sdk to list all the resources of type resource_type. + :param kwargs_list: Dictionary of parameters for list operation. + :param module: Instance of AnsibleModule + :param model: Model used to create a resource. + :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name, + dns_label. + :param dead_states: List of states which can't transition to any of the usable states of the resource. This deafults + to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"] + :param default_attribute_values: A dictionary containing default values for attributes. + :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} + """ + + if module.params.get("force_create", None): + _debug("Force creating {0}".format(resource_type)) + result = call_with_backoff(create_fn, **kwargs_create) + return result + + # Get the existing resources list sorted by creation time in descending order. Return the latest matching resource + # in case of multiple resource matches. + if exclude_attributes is None: + exclude_attributes = {} + if default_attribute_values is None: + default_attribute_values = {} + try: + if existing_resources is None: + if supports_sort_by_time_created: + kwargs_list["sort_by"] = "TIMECREATED" + existing_resources = list_all_resources(list_fn, **kwargs_list) + except ValueError: + # list_fn doesn't support sort_by, so remove the sort_by key in kwargs_list and retry + kwargs_list.pop("sort_by", None) + try: + existing_resources = list_all_resources(list_fn, **kwargs_list) + # Handle errors like 404 due to bad arguments to the list_all_resources call. + except ServiceError as ex: + module.fail_json(msg=ex.message) + except ServiceError as ex: + module.fail_json(msg=ex.message) + + result = dict() + + attributes_to_consider = _get_attributes_to_consider( + exclude_attributes, model, module + ) + if "defined_tags" not in default_attribute_values: + default_attribute_values["defined_tags"] = {} + resource_matched = None + _debug( + "Trying to find a match within {0} existing resources".format( + len(existing_resources) + ) + ) + + for resource in existing_resources: + if _is_resource_active(resource, dead_states): + _debug( + "Comparing user specified values {0} against an existing resource's " + "values {1}".format(module.params, to_dict(resource)) + ) + if does_existing_resource_match_user_inputs( + to_dict(resource), + module, + attributes_to_consider, + exclude_attributes, + default_attribute_values, + ): + resource_matched = to_dict(resource) + break + + if resource_matched: + _debug("Resource with same attributes found: {0}.".format(resource_matched)) + result[resource_type] = resource_matched + result["changed"] = False + else: + _debug("No matching resource found. Attempting to create a new resource.") + result = call_with_backoff(create_fn, **kwargs_create) + + return result + + +def _get_attributes_to_consider(exclude_attributes, model, module): + """ + Determine the attributes to detect if an existing resource already matches the requested resource state + :param exclude_attributes: Attributes to not consider for matching + :param model: The model class used to create the Resource + :param module: An instance of AnsibleModule that contains user's desires around a resource's state + :return: A list of attributes that needs to be matched + """ + + # If a user explicitly requests us to match only against a set of resources (using 'key_by', use that as the list + # of attributes to consider for matching. + if "key_by" in module.params and module.params["key_by"] is not None: + attributes_to_consider = module.params["key_by"] + else: + # Consider all attributes except freeform_tags as freeform tags do not distinguish a resource. + attributes_to_consider = list(model.attribute_map) + if "freeform_tags" in attributes_to_consider: + attributes_to_consider.remove("freeform_tags") + # Temporarily removing node_count as the exisiting resource does not reflect it + if "node_count" in attributes_to_consider: + attributes_to_consider.remove("node_count") + _debug("attributes to consider: {0}".format(attributes_to_consider)) + return attributes_to_consider + + +def _is_resource_active(resource, dead_states): + if dead_states is None: + dead_states = DEAD_STATES + + if "lifecycle_state" not in resource.attribute_map: + return True + return resource.lifecycle_state not in dead_states + + +def is_attr_assigned_default(default_attribute_values, attr, assigned_value): + if not default_attribute_values: + return False + + if attr in default_attribute_values: + default_val_for_attr = default_attribute_values.get(attr, None) + if isinstance(default_val_for_attr, dict): + # When default value for a resource's attribute is empty dictionary, check if the corresponding value of the + # existing resource's attribute is also empty. + if not default_val_for_attr: + return not assigned_value + # only compare keys that are in default_attribute_values[attr] + # this is to ensure forward compatibility when the API returns new keys that are not known during + # the time when the module author provided default values for the attribute + keys = {} + for k, v in iteritems(assigned_value.items()): + if k in default_val_for_attr: + keys[k] = v + + return default_val_for_attr == keys + # non-dict, normal comparison + return default_val_for_attr == assigned_value + else: + # module author has not provided a default value for attr + return True + + +def create_resource(resource_type, create_fn, kwargs_create, module): + """ + Create an OCI resource + :param resource_type: Type of the resource to be created. e.g.: "vcn" + :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn + :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn + :param module: Instance of AnsibleModule + """ + result = dict(changed=False) + try: + resource = to_dict(call_with_backoff(create_fn, **kwargs_create).data) + _debug("Created {0}, {1}".format(resource_type, resource)) + result["changed"] = True + result[resource_type] = resource + return result + except (ServiceError, TypeError) as ex: + module.fail_json(msg=str(ex)) + + +def does_existing_resource_match_user_inputs( + existing_resource, + module, + attributes_to_compare, + exclude_attributes, + default_attribute_values=None, +): + """ + Check if 'attributes_to_compare' in an existing_resource match the desired state provided by a user in 'module'. + :param existing_resource: A dictionary representing an existing resource's values. + :param module: The AnsibleModule representing the options provided by the user. + :param attributes_to_compare: A list of attributes of a resource that are used to compare if an existing resource + matches the desire state of the resource expressed by the user in 'module'. + :param exclude_attributes: The attributes, that a module author provides, which should not be used to match the + resource. This dictionary typically includes: (a) attributes which are initialized with dynamic default values + like 'display_name', 'security_list_ids' for subnets and (b) attributes that don't have any defaults like + 'dns_label' in VCNs. The attributes are part of keys and 'True' is the value for all existing keys. + :param default_attribute_values: A dictionary containing default values for attributes. + :return: True if the values for the list of attributes is the same in the existing_resource and module instances. + """ + if not default_attribute_values: + default_attribute_values = {} + for attr in attributes_to_compare: + attribute_with_default_metadata = None + if attr in existing_resource: + resources_value_for_attr = existing_resource[attr] + # Check if the user has explicitly provided the value for attr. + user_provided_value_for_attr = _get_user_provided_value(module, attr) + if user_provided_value_for_attr is not None: + res = [True] + check_if_user_value_matches_resources_attr( + attr, + resources_value_for_attr, + user_provided_value_for_attr, + exclude_attributes, + default_attribute_values, + res, + ) + if not res[0]: + _debug( + "Mismatch on attribute '{0}'. User provided value is {1} & existing resource's value" + "is {2}.".format( + attr, user_provided_value_for_attr, resources_value_for_attr + ) + ) + return False + else: + # If the user has not explicitly provided the value for attr and attr is in exclude_list, we can + # consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and + # that attribute is in the 'exclude_list' according to the module author(Not User), then exclude + if ( + exclude_attributes.get(attr) is None + and resources_value_for_attr is not None + ): + if module.argument_spec.get(attr): + attribute_with_default_metadata = module.argument_spec.get(attr) + default_attribute_value = attribute_with_default_metadata.get( + "default", None + ) + if default_attribute_value is not None: + if existing_resource[attr] != default_attribute_value: + return False + # Check if attr has a value that is not default. For example, a custom `security_list_id` + # is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a + # value that is not the default, then it must be considered a mismatch and false returned. + elif not is_attr_assigned_default( + default_attribute_values, attr, existing_resource[attr] + ): + return False + + else: + _debug( + "Attribute {0} is in the create model of resource {1}" + "but doesn't exist in the get model of the resource".format( + attr, existing_resource.__class__ + ) + ) + return True + + +def tuplize(d): + """ + This function takes a dictionary and converts it to a list of tuples recursively. + :param d: A dictionary. + :return: List of tuples. + """ + list_of_tuples = [] + key_list = sorted(list(d.keys())) + for key in key_list: + if type(d[key]) == list: + # Convert a value which is itself a list of dict to a list of tuples. + if d[key] and type(d[key][0]) == dict: + sub_tuples = [] + for sub_dict in d[key]: + sub_tuples.append(tuplize(sub_dict)) + # To handle comparing two None values, while creating a tuple for a {key: value}, make the first element + # in the tuple a boolean `True` if value is None so that attributes with None value are put at last + # in the sorted list. + list_of_tuples.append((sub_tuples is None, key, sub_tuples)) + else: + list_of_tuples.append((d[key] is None, key, d[key])) + elif type(d[key]) == dict: + tupled_value = tuplize(d[key]) + list_of_tuples.append((tupled_value is None, key, tupled_value)) + else: + list_of_tuples.append((d[key] is None, key, d[key])) + return list_of_tuples + + +def get_key_for_comparing_dict(d): + tuple_form_of_d = tuplize(d) + return tuple_form_of_d + + +def sort_dictionary(d): + """ + This function sorts values of a dictionary recursively. + :param d: A dictionary. + :return: Dictionary with sorted elements. + """ + sorted_d = {} + for key in d: + if type(d[key]) == list: + if d[key] and type(d[key][0]) == dict: + sorted_value = sort_list_of_dictionary(d[key]) + sorted_d[key] = sorted_value + else: + sorted_d[key] = sorted(d[key]) + elif type(d[key]) == dict: + sorted_d[key] = sort_dictionary(d[key]) + else: + sorted_d[key] = d[key] + return sorted_d + + +def sort_list_of_dictionary(list_of_dict): + """ + This functions sorts a list of dictionaries. It first sorts each value of the dictionary and then sorts the list of + individually sorted dictionaries. For sorting, each dictionary's tuple equivalent is used. + :param list_of_dict: List of dictionaries. + :return: A sorted dictionary. + """ + list_with_sorted_dict = [] + for d in list_of_dict: + sorted_d = sort_dictionary(d) + list_with_sorted_dict.append(sorted_d) + return sorted(list_with_sorted_dict, key=get_key_for_comparing_dict) + + +def check_if_user_value_matches_resources_attr( + attribute_name, + resources_value_for_attr, + user_provided_value_for_attr, + exclude_attributes, + default_attribute_values, + res, +): + if isinstance(default_attribute_values.get(attribute_name), dict): + default_attribute_values = default_attribute_values.get(attribute_name) + + if isinstance(exclude_attributes.get(attribute_name), dict): + exclude_attributes = exclude_attributes.get(attribute_name) + + if isinstance(resources_value_for_attr, list) or isinstance( + user_provided_value_for_attr, list + ): + # Perform a deep equivalence check for a List attribute + if exclude_attributes.get(attribute_name): + return + if ( + user_provided_value_for_attr is None + and default_attribute_values.get(attribute_name) is not None + ): + user_provided_value_for_attr = default_attribute_values.get(attribute_name) + + if resources_value_for_attr is None and user_provided_value_for_attr is None: + return + + if ( + resources_value_for_attr is None + and len(user_provided_value_for_attr) >= 0 + or user_provided_value_for_attr is None + and len(resources_value_for_attr) >= 0 + ): + res[0] = False + return + + if ( + resources_value_for_attr is not None + and user_provided_value_for_attr is not None + and len(resources_value_for_attr) != len(user_provided_value_for_attr) + ): + res[0] = False + return + + if ( + user_provided_value_for_attr + and type(user_provided_value_for_attr[0]) == dict + ): + # Process a list of dict + sorted_user_provided_value_for_attr = sort_list_of_dictionary( + user_provided_value_for_attr + ) + sorted_resources_value_for_attr = sort_list_of_dictionary( + resources_value_for_attr + ) + + else: + sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr) + sorted_resources_value_for_attr = sorted(resources_value_for_attr) + + # Walk through the sorted list values of the resource's value for this attribute, and compare against user + # provided values. + for index, resources_value_for_attr_part in enumerate( + sorted_resources_value_for_attr + ): + check_if_user_value_matches_resources_attr( + attribute_name, + resources_value_for_attr_part, + sorted_user_provided_value_for_attr[index], + exclude_attributes, + default_attribute_values, + res, + ) + + elif isinstance(resources_value_for_attr, dict): + # Perform a deep equivalence check for dict typed attributes + + if not resources_value_for_attr and user_provided_value_for_attr: + res[0] = False + for key in resources_value_for_attr: + if ( + user_provided_value_for_attr is not None + and user_provided_value_for_attr + ): + check_if_user_value_matches_resources_attr( + key, + resources_value_for_attr.get(key), + user_provided_value_for_attr.get(key), + exclude_attributes, + default_attribute_values, + res, + ) + else: + if exclude_attributes.get(key) is None: + if default_attribute_values.get(key) is not None: + user_provided_value_for_attr = default_attribute_values.get(key) + check_if_user_value_matches_resources_attr( + key, + resources_value_for_attr.get(key), + user_provided_value_for_attr, + exclude_attributes, + default_attribute_values, + res, + ) + else: + res[0] = is_attr_assigned_default( + default_attribute_values, + attribute_name, + resources_value_for_attr.get(key), + ) + + elif resources_value_for_attr != user_provided_value_for_attr: + if ( + exclude_attributes.get(attribute_name) is None + and default_attribute_values.get(attribute_name) is not None + ): + # As the user has not specified a value for an optional attribute, if the existing resource's + # current state has a DEFAULT value for that attribute, we must not consider this incongruence + # an issue and continue with other checks. If the existing resource's value for the attribute + # is not the default value, then the existing resource is not a match. + if not is_attr_assigned_default( + default_attribute_values, attribute_name, resources_value_for_attr + ): + res[0] = False + elif user_provided_value_for_attr is not None: + res[0] = False + + +def are_dicts_equal( + option_name, + existing_resource_dict, + user_provided_dict, + exclude_list, + default_attribute_values, +): + if not user_provided_dict: + # User has not provided a value for the map option. In this case, the user hasn't expressed an intent around + # this optional attribute. Check if existing_resource_dict matches default. + # For example, source_details attribute in volume is optional and does not have any defaults. + return is_attr_assigned_default( + default_attribute_values, option_name, existing_resource_dict + ) + + # If the existing resource has an empty dict, while the user has provided entries, dicts are not equal + if not existing_resource_dict and user_provided_dict: + return False + + # check if all keys of an existing resource's dict attribute matches user-provided dict's entries + for sub_attr in existing_resource_dict: + # If user has provided value for sub-attribute, then compare it with corresponding key in existing resource. + if sub_attr in user_provided_dict: + if existing_resource_dict[sub_attr] != user_provided_dict[sub_attr]: + _debug( + "Failed to match: Existing resource's attr {0} sub-attr {1} value is {2}, while user " + "provided value is {3}".format( + option_name, + sub_attr, + existing_resource_dict[sub_attr], + user_provided_dict.get(sub_attr, None), + ) + ) + return False + + # If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value. + else: + if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list): + default_value_for_dict_attr = default_attribute_values.get( + option_name, None + ) + if default_value_for_dict_attr: + # if a default value for the sub-attr was provided by the module author, fail if the existing + # resource's value for the sub-attr is not the default + if not is_attr_assigned_default( + default_value_for_dict_attr, + sub_attr, + existing_resource_dict[sub_attr], + ): + return False + else: + # No default value specified by module author for sub_attr + _debug( + "Consider as match: Existing resource's attr {0} sub-attr {1} value is {2}, while user did" + "not provide a value for it. The module author also has not provided a default value for it" + "or marked it for exclusion. So ignoring this attribute during matching and continuing with" + "other checks".format( + option_name, sub_attr, existing_resource_dict[sub_attr] + ) + ) + + return True + + +def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list): + """An entry for the Exclude list for excluding a map's key is specifed as a dict with the map option name as the + key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map + option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """ + for exclude_item in exclude_list: + if isinstance(exclude_item, dict): + if map_option_name in exclude_item: + if option_key in exclude_item[map_option_name]: + return True + return False + + +def create_and_wait( + resource_type, + client, + create_fn, + kwargs_create, + get_fn, + get_param, + module, + states=None, + wait_applicable=True, + kwargs_get=None, +): + """ + A utility function to create a resource and wait for the resource to get into the state as specified in the module + options. + :param wait_applicable: Specifies if wait for create is applicable for this resource + :param resource_type: Type of the resource to be created. e.g. "vcn" + :param client: OCI service client instance to call the service periodically to retrieve data. + e.g. VirtualNetworkClient() + :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn + :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn. + :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn + :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" + :param module: Instance of AnsibleModule. + :param states: List of lifecycle states to watch for while waiting after create_fn is called. + e.g. [module.params['wait_until'], "FAULTY"] + :param kwargs_get: Dictionary containing arguments to be used to call a multi-argument `get` function + :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} + """ + try: + return create_or_update_resource_and_wait( + resource_type, + create_fn, + kwargs_create, + module, + wait_applicable, + get_fn, + get_param, + states, + client, + kwargs_get, + ) + except MaximumWaitTimeExceeded as ex: + module.fail_json(msg=str(ex)) + except ServiceError as ex: + module.fail_json(msg=ex.message) + + +def update_and_wait( + resource_type, + client, + update_fn, + kwargs_update, + get_fn, + get_param, + module, + states=None, + wait_applicable=True, + kwargs_get=None, +): + """ + A utility function to update a resource and wait for the resource to get into the state as specified in the module + options. It wraps the create_and_wait method as apart from the method and arguments, everything else is similar. + :param wait_applicable: Specifies if wait for create is applicable for this resource + :param resource_type: Type of the resource to be created. e.g. "vcn" + :param client: OCI service client instance to call the service periodically to retrieve data. + e.g. VirtualNetworkClient() + :param update_fn: Function in the SDK to update the resource. e.g. virtual_network_client.update_vcn + :param kwargs_update: Dictionary containing arguments to be used to call the update function update_fn. + :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn + :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" + :param module: Instance of AnsibleModule. + :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments. + :param states: List of lifecycle states to watch for while waiting after update_fn is called. + e.g. [module.params['wait_until'], "FAULTY"] + :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} + """ + try: + return create_or_update_resource_and_wait( + resource_type, + update_fn, + kwargs_update, + module, + wait_applicable, + get_fn, + get_param, + states, + client, + kwargs_get=kwargs_get, + ) + except MaximumWaitTimeExceeded as ex: + module.fail_json(msg=str(ex)) + except ServiceError as ex: + module.fail_json(msg=ex.message) + + +def create_or_update_resource_and_wait( + resource_type, + function, + kwargs_function, + module, + wait_applicable, + get_fn, + get_param, + states, + client, + update_target_resource_id_in_get_param=False, + kwargs_get=None, +): + """ + A utility function to create or update a resource and wait for the resource to get into the state as specified in + the module options. + :param resource_type: Type of the resource to be created. e.g. "vcn" + :param function: Function in the SDK to create or update the resource. + :param kwargs_function: Dictionary containing arguments to be used to call the create or update function + :param module: Instance of AnsibleModule. + :param wait_applicable: Specifies if wait for create is applicable for this resource + :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn + :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" + :param states: List of lifecycle states to watch for while waiting after create_fn is called. + e.g. [module.params['wait_until'], "FAULTY"] + :param client: OCI service client instance to call the service periodically to retrieve data. + e.g. VirtualNetworkClient() + :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments. + :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} + """ + result = create_resource(resource_type, function, kwargs_function, module) + resource = result[resource_type] + result[resource_type] = wait_for_resource_lifecycle_state( + client, + module, + wait_applicable, + kwargs_get, + get_fn, + get_param, + resource, + states, + resource_type, + ) + return result + + +def wait_for_resource_lifecycle_state( + client, + module, + wait_applicable, + kwargs_get, + get_fn, + get_param, + resource, + states, + resource_type=None, +): + """ + A utility function to wait for the resource to get into the state as specified in + the module options. + :param client: OCI service client instance to call the service periodically to retrieve data. + e.g. VirtualNetworkClient + :param module: Instance of AnsibleModule. + :param wait_applicable: Specifies if wait for create is applicable for this resource + :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments. + :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn + :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" + :param resource_type: Type of the resource to be created. e.g. "vcn" + :param states: List of lifecycle states to watch for while waiting after create_fn is called. + e.g. [module.params['wait_until'], "FAULTY"] + :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} + """ + if wait_applicable and module.params.get("wait", None): + if resource_type == "compartment": + # An immediate attempt to retrieve a compartment after a compartment is created fails with + # 'Authorization failed or requested resource not found', 'status': 404}. + # This is because it takes few seconds for the permissions on a compartment to be ready. + # Wait for few seconds before attempting a get call on compartment. + _debug( + "Pausing execution for permission on the newly created compartment to be ready." + ) + time.sleep(15) + if kwargs_get: + _debug( + "Waiting for resource to reach READY state. get_args: {0}".format( + kwargs_get + ) + ) + response_get = call_with_backoff(get_fn, **kwargs_get) + else: + _debug( + "Waiting for resource with id {0} to reach READY state.".format( + resource["id"] + ) + ) + response_get = call_with_backoff(get_fn, **{get_param: resource["id"]}) + if states is None: + states = module.params.get("wait_until") or DEFAULT_READY_STATES + resource = to_dict( + oci.wait_until( + client, + response_get, + evaluate_response=lambda r: r.data.lifecycle_state in states, + max_wait_seconds=module.params.get( + "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS + ), + ).data + ) + return resource + + +def wait_on_work_request(client, response, module): + try: + if module.params.get("wait", None): + _debug( + "Waiting for work request with id {0} to reach SUCCEEDED state.".format( + response.data.id + ) + ) + wait_response = oci.wait_until( + client, + response, + evaluate_response=lambda r: r.data.status == "SUCCEEDED", + max_wait_seconds=module.params.get( + "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS + ), + ) + else: + _debug( + "Waiting for work request with id {0} to reach ACCEPTED state.".format( + response.data.id + ) + ) + wait_response = oci.wait_until( + client, + response, + evaluate_response=lambda r: r.data.status == "ACCEPTED", + max_wait_seconds=module.params.get( + "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS + ), + ) + except MaximumWaitTimeExceeded as ex: + _debug(str(ex)) + module.fail_json(msg=str(ex)) + except ServiceError as ex: + _debug(str(ex)) + module.fail_json(msg=str(ex)) + return wait_response.data + + +def delete_and_wait( + resource_type, + client, + get_fn, + kwargs_get, + delete_fn, + kwargs_delete, + module, + states=None, + wait_applicable=True, + process_work_request=False, +): + """A utility function to delete a resource and wait for the resource to get into the state as specified in the + module options. + :param wait_applicable: Specifies if wait for delete is applicable for this resource + :param resource_type: Type of the resource to be deleted. e.g. "vcn" + :param client: OCI service client instance to call the service periodically to retrieve data. + e.g. VirtualNetworkClient() + :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn + :param kwargs_get: Dictionary of arguments for get function get_fn. e.g. {"vcn_id": module.params["id"]} + :param delete_fn: Function in the SDK to delete the resource. e.g. virtual_network_client.delete_vcn + :param kwargs_delete: Dictionary of arguments for delete function delete_fn. e.g. {"vcn_id": module.params["id"]} + :param module: Instance of AnsibleModule. + :param states: List of lifecycle states to watch for while waiting after delete_fn is called. If nothing is passed, + defaults to ["TERMINATED", "DETACHED", "DELETED"]. + :param process_work_request: Whether a work request is generated on an API call and if it needs to be handled. + :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} + """ + + states_set = set(["DETACHING", "DETACHED", "DELETING", "DELETED", "TERMINATING", "TERMINATED"]) + result = dict(changed=False) + result[resource_type] = dict() + try: + resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data) + if resource: + if "lifecycle_state" not in resource or resource["lifecycle_state"] not in states_set: + response = call_with_backoff(delete_fn, **kwargs_delete) + if process_work_request: + wr_id = response.headers.get("opc-work-request-id") + get_wr_response = call_with_backoff( + client.get_work_request, work_request_id=wr_id + ) + result["work_request"] = to_dict( + wait_on_work_request(client, get_wr_response, module) + ) + # Set changed to True as work request has been created to delete the resource. + result["changed"] = True + resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data) + else: + _debug("Deleted {0}, {1}".format(resource_type, resource)) + result["changed"] = True + + if wait_applicable and module.params.get("wait", None): + if states is None: + states = ( + module.params.get("wait_until") + or DEFAULT_TERMINATED_STATES + ) + try: + wait_response = oci.wait_until( + client, + get_fn(**kwargs_get), + evaluate_response=lambda r: r.data.lifecycle_state + in states, + max_wait_seconds=module.params.get( + "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS + ), + succeed_on_not_found=True, + ) + except MaximumWaitTimeExceeded as ex: + module.fail_json(msg=str(ex)) + except ServiceError as ex: + if ex.status != 404: + module.fail_json(msg=ex.message) + else: + # While waiting for resource to get into terminated state, if the resource is not found. + _debug( + "API returned Status:404(Not Found) while waiting for resource to get into" + " terminated state." + ) + resource["lifecycle_state"] = "DELETED" + result[resource_type] = resource + return result + # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found. + if type(wait_response) is not Sentinel: + resource = to_dict(wait_response.data) + else: + resource["lifecycle_state"] = "DELETED" + + result[resource_type] = resource + else: + _debug( + "Resource {0} with {1} already deleted. So returning changed=False".format( + resource_type, kwargs_get + ) + ) + except ServiceError as ex: + # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone + # resource is not available, instead of the expected 404. So working around this for now. + if type(client) == oci.dns.DnsClient: + if ex.status == 400 and ex.code == "InvalidParameter": + _debug( + "Resource {0} with {1} already deleted. So returning changed=False".format( + resource_type, kwargs_get + ) + ) + elif ex.status != 404: + module.fail_json(msg=ex.message) + result[resource_type] = dict() + return result + + +def are_attrs_equal(current_resource, module, attributes): + """ + Check if the specified attributes are equal in the specified 'model' and 'module'. This is used to check if an OCI + Model instance already has the values specified by an Ansible user while invoking an OCI Ansible module and if a + resource needs to be updated. + :param current_resource: A resource model instance + :param module: The AnsibleModule representing the options provided by the user + :param attributes: A list of attributes that would need to be compared in the model and the module instances. + :return: True if the values for the list of attributes is the same in the model and module instances + """ + for attr in attributes: + curr_value = getattr(current_resource, attr, None) + user_provided_value = _get_user_provided_value(module, attribute_name=attr) + + if user_provided_value is not None: + if curr_value != user_provided_value: + _debug( + "are_attrs_equal - current resource's attribute " + + attr + + " value is " + + str(curr_value) + + " and this doesn't match user provided value of " + + str(user_provided_value) + ) + return False + return True + + +def _get_user_provided_value(module, attribute_name): + """ + Returns the user provided value for "attribute_name". We consider aliases in the module. + """ + user_provided_value = module.params.get(attribute_name, None) + if user_provided_value is None: + # If the attribute_name is set as an alias for some option X and user has provided value in the playbook using + # option X, then user provided value for attribute_name is equal to value for X. + # Get option name for attribute_name from module.aliases. + # module.aliases is a dictionary with key as alias name and its value as option name. + option_alias_for_attribute = module.aliases.get(attribute_name, None) + if option_alias_for_attribute is not None: + user_provided_value = module.params.get(option_alias_for_attribute, None) + return user_provided_value + + +def update_model_with_user_options(curr_model, update_model, module): + """ + Update the 'update_model' with user provided values in 'module' for the specified 'attributes' if they are different + from the values in the 'curr_model'. + :param curr_model: A resource model instance representing the state of the current resource + :param update_model: An instance of the update resource model for the current resource's type + :param module: An AnsibleModule representing the options provided by the user + :return: An updated 'update_model' instance filled with values that would need to be updated in the current resource + state to satisfy the user's requested state. + """ + attributes = update_model.attribute_map.keys() + for attr in attributes: + curr_value_for_attr = getattr(curr_model, attr, None) + user_provided_value = _get_user_provided_value(module, attribute_name=attr) + + if curr_value_for_attr != user_provided_value: + if user_provided_value is not None: + # Only update if a user has specified a value for an option + _debug( + "User requested {0} for attribute {1}, whereas the current value is {2}. So adding it " + "to the update model".format( + user_provided_value, attr, curr_value_for_attr + ) + ) + setattr(update_model, attr, user_provided_value) + else: + # Always set current values of the resource in the update model if there is no request for change in + # values + setattr(update_model, attr, curr_value_for_attr) + return update_model + + +def _get_retry_strategy(): + retry_strategy_builder = RetryStrategyBuilder( + max_attempts_check=True, + max_attempts=10, + retry_max_wait_between_calls_seconds=30, + retry_base_sleep_time_seconds=3, + backoff_type=oci.retry.BACKOFF_FULL_JITTER_EQUAL_ON_THROTTLE_VALUE, + ) + retry_strategy_builder.add_service_error_check( + service_error_retry_config={ + 429: [], + 400: ["QuotaExceeded", "LimitExceeded"], + 409: ["Conflict"], + }, + service_error_retry_on_any_5xx=True, + ) + return retry_strategy_builder.get_retry_strategy() + + +def call_with_backoff(fn, **kwargs): + if "retry_strategy" not in kwargs: + kwargs["retry_strategy"] = _get_retry_strategy() + try: + return fn(**kwargs) + except TypeError as te: + if "unexpected keyword argument" in str(te): + # to handle older SDKs that did not support retry_strategy + del kwargs["retry_strategy"] + return fn(**kwargs) + else: + # A validation error raised by the SDK, throw it back + raise + + +def generic_hash(obj): + """ + Compute a hash of all the fields in the object + :param obj: Object whose hash needs to be computed + :return: a hash value for the object + """ + sum = 0 + for field in obj.attribute_map.keys(): + field_value = getattr(obj, field) + if isinstance(field_value, list): + for value in field_value: + sum = sum + hash(value) + elif isinstance(field_value, dict): + for k, v in field_value.items(): + sum = sum + hash(hash(k) + hash(":") + hash(v)) + else: + sum = sum + hash(getattr(obj, field)) + return sum + + +def generic_eq(s, other): + if other is None: + return False + return s.__dict__ == other.__dict__ + + +def generate_subclass(parent_class): + """Make a class hash-able by generating a subclass with a __hash__ method that returns the sum of all fields within + the parent class""" + dict_of_method_in_subclass = { + "__init__": parent_class.__init__, + "__hash__": generic_hash, + "__eq__": generic_eq, + } + subclass_name = "GeneratedSub" + parent_class.__name__ + generated_sub_class = type( + subclass_name, (parent_class,), dict_of_method_in_subclass + ) + return generated_sub_class + + +def create_hashed_instance(class_type): + hashed_class = generate_subclass(class_type) + return hashed_class() + + +def get_hashed_object_list(class_type, object_with_values, attributes_class_type=None): + if object_with_values is None: + return None + hashed_class_instances = [] + for object_with_value in object_with_values: + hashed_class_instances.append( + get_hashed_object(class_type, object_with_value, attributes_class_type) + ) + return hashed_class_instances + + +def get_hashed_object( + class_type, object_with_value, attributes_class_type=None, supported_attributes=None +): + """ + Convert any class instance into hashable so that the + instances are eligible for various comparison + operation available under set() object. + :param class_type: Any class type whose instances needs to be hashable + :param object_with_value: Instance of the class type with values which + would be set in the resulting isinstance + :param attributes_class_type: A list of class types of attributes, if attribute is a custom class instance + :param supported_attributes: A list of attributes which should be considered while populating the instance + with the values in the object. This helps in avoiding new attributes of the class_type which are still not + supported by the current implementation. + :return: A hashable instance with same state of the provided object_with_value + """ + if object_with_value is None: + return None + + HashedClass = generate_subclass(class_type) + hashed_class_instance = HashedClass() + + if supported_attributes: + class_attributes = list( + set(hashed_class_instance.attribute_map) & set(supported_attributes) + ) + else: + class_attributes = hashed_class_instance.attribute_map + + for attribute in class_attributes: + attribute_value = getattr(object_with_value, attribute) + if attributes_class_type: + for attribute_class_type in attributes_class_type: + if isinstance(attribute_value, attribute_class_type): + attribute_value = get_hashed_object( + attribute_class_type, attribute_value + ) + hashed_class_instance.__setattr__(attribute, attribute_value) + + return hashed_class_instance + + +def update_class_type_attr_difference( + update_class_details, existing_instance, attr_name, attr_class, input_attr_value +): + """ + Checks the difference and updates an attribute which is represented by a class + instance. Not aplicable if the attribute type is a primitive value. + For example, if a class name is A with an attribute x, then if A.x = X(), then only + this method works. + :param update_class_details The instance which should be updated if there is change in + attribute value + :param existing_instance The instance whose attribute value is compared with input + attribute value + :param attr_name Name of the attribute whose value should be compared + :param attr_class Class type of the attribute + :param input_attr_value The value of input attribute which should replaced the current + value in case of mismatch + :return: A boolean value indicating whether attribute value has been replaced + """ + changed = False + # Here existing attribute values is an instance + existing_attr_value = get_hashed_object( + attr_class, getattr(existing_instance, attr_name) + ) + if input_attr_value is None: + update_class_details.__setattr__(attr_name, existing_attr_value) + else: + changed = not input_attr_value.__eq__(existing_attr_value) + if changed: + update_class_details.__setattr__(attr_name, input_attr_value) + else: + update_class_details.__setattr__(attr_name, existing_attr_value) + + return changed + + +def get_existing_resource(target_fn, module, **kwargs): + """ + Returns the requested resource if it exists based on the input arguments. + :param target_fn The function which should be used to find the requested resource + :param module Instance of AnsibleModule attribute value + :param kwargs A map of arguments consisting of values based on which requested resource should be searched + :return: Instance of requested resource + """ + existing_resource = None + try: + response = call_with_backoff(target_fn, **kwargs) + existing_resource = response.data + except ServiceError as ex: + if ex.status != 404: + module.fail_json(msg=ex.message) + + return existing_resource + + +def get_attached_instance_info( + module, lookup_attached_instance, list_attachments_fn, list_attachments_args +): + config = get_oci_config(module) + identity_client = create_service_client(module, IdentityClient) + + volume_attachments = [] + + if lookup_attached_instance: + # Get all the compartments in the tenancy + compartments = to_dict( + identity_client.list_compartments( + config.get("tenancy"), compartment_id_in_subtree=True + ).data + ) + # For each compartment, get the volume attachments for the compartment_id with the other args in + # list_attachments_args. + for compartment in compartments: + list_attachments_args["compartment_id"] = compartment["id"] + try: + volume_attachments += list_all_resources( + list_attachments_fn, **list_attachments_args + ) + + # Pass ServiceError due to authorization issue in accessing volume attachments of a compartment + except ServiceError as ex: + if ex.status == 404: + pass + + else: + volume_attachments = list_all_resources( + list_attachments_fn, **list_attachments_args + ) + + volume_attachments = to_dict(volume_attachments) + # volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or + # ATTACHED state + + return next( + ( + volume_attachment + for volume_attachment in volume_attachments + if volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"] + ), + None, + ) + + +def check_mode(fn): + def wrapper(*args, **kwargs): + if os.environ.get("OCI_ANSIBLE_EXPERIMENTAL", None): + return fn(*args, **kwargs) + return None + + return wrapper + + +def check_and_return_component_list_difference( + input_component_list, existing_components, purge_components, delete_components=False +): + if input_component_list: + existing_components, changed = get_component_list_difference( + input_component_list, + existing_components, + purge_components, + delete_components, + ) + else: + existing_components = [] + changed = True + return existing_components, changed + + +def get_component_list_difference( + input_component_list, existing_components, purge_components, delete_components=False +): + if delete_components: + if existing_components is None: + return None, False + component_differences = set(existing_components).intersection( + set(input_component_list) + ) + if component_differences: + return list(set(existing_components) - component_differences), True + else: + return None, False + if existing_components is None: + return input_component_list, True + if purge_components: + components_differences = set(input_component_list).symmetric_difference( + set(existing_components) + ) + + if components_differences: + return input_component_list, True + + components_differences = set(input_component_list).difference( + set(existing_components) + ) + if components_differences: + return list(components_differences) + existing_components, True + return None, False + + +def write_to_file(path, content): + with open(to_bytes(path), "wb") as dest_file: + dest_file.write(content) + + +def get_target_resource_from_list( + module, list_resource_fn, target_resource_id=None, **kwargs +): + """ + Returns a resource filtered by identifer from a list of resources. This method should be + used as an alternative of 'get resource' method when 'get resource' is nor provided by + resource api. This method returns a wrapper of response object but that should not be + used as an input to 'wait_until' utility as this is only a partial wrapper of response object. + :param module The AnsibleModule representing the options provided by the user + :param list_resource_fn The function which lists all the resources + :param target_resource_id The identifier of the resource which should be filtered from the list + :param kwargs A map of arguments consisting of values based on which requested resource should be searched + :return: A custom wrapper which partially wraps a response object where the data field contains the target + resource, if found. + """ + + class ResponseWrapper: + def __init__(self, data): + self.data = data + + try: + resources = list_all_resources(list_resource_fn, **kwargs) + if resources is not None: + for resource in resources: + if resource.id == target_resource_id: + # Returning an object that mimics an OCI response as oci_utils methods assumes an Response-ish + # object + return ResponseWrapper(data=resource) + return ResponseWrapper(data=None) + except ServiceError as ex: + module.fail_json(msg=ex.message) diff --git a/plugins/module_utils/postgres.py b/plugins/module_utils/postgres.py new file mode 100644 index 0000000000..63811c3055 --- /dev/null +++ b/plugins/module_utils/postgres.py @@ -0,0 +1,330 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Ted Timmons , 2017. +# Most of this was originally added by other creators in the postgresql_user module. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +psycopg2 = None # This line needs for unit tests +try: + import psycopg2 + HAS_PSYCOPG2 = True +except ImportError: + HAS_PSYCOPG2 = False + +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems +from distutils.version import LooseVersion + + +def postgres_common_argument_spec(): + """ + Return a dictionary with connection options. + + The options are commonly used by most of PostgreSQL modules. + """ + return dict( + login_user=dict(default='postgres'), + login_password=dict(default='', no_log=True), + login_host=dict(default=''), + login_unix_socket=dict(default=''), + port=dict(type='int', default=5432, aliases=['login_port']), + ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']), + ca_cert=dict(aliases=['ssl_rootcert']), + ) + + +def ensure_required_libs(module): + """Check required libraries.""" + if not HAS_PSYCOPG2: + module.fail_json(msg=missing_required_lib('psycopg2')) + + if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'): + module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter') + + +def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True): + """Connect to a PostgreSQL database. + + Return psycopg2 connection object. + + Args: + module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class + conn_params (dict) -- dictionary with connection parameters + + Kwargs: + autocommit (bool) -- commit automatically (default False) + fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True) + """ + ensure_required_libs(module) + + db_connection = None + try: + db_connection = psycopg2.connect(**conn_params) + if autocommit: + if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'): + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + + # Switch role, if specified: + if module.params.get('session_role'): + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + + try: + cursor.execute('SET ROLE "%s"' % module.params['session_role']) + except Exception as e: + module.fail_json(msg="Could not switch role: %s" % to_native(e)) + finally: + cursor.close() + + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least ' + 'version 8.4 to support sslrootcert') + + if fail_on_conn: + module.fail_json(msg="unable to connect to database: %s" % to_native(e)) + else: + module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) + db_connection = None + + except Exception as e: + if fail_on_conn: + module.fail_json(msg="unable to connect to database: %s" % to_native(e)) + else: + module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) + db_connection = None + + return db_connection + + +def exec_sql(obj, query, query_params=None, ddl=False, add_to_executed=True, dont_exec=False): + """Execute SQL. + + Auxiliary function for PostgreSQL user classes. + + Returns a query result if possible or True/False if ddl=True arg was passed. + It necessary for statements that don't return any result (like DDL queries). + + Args: + obj (obj) -- must be an object of a user class. + The object must have module (AnsibleModule class object) and + cursor (psycopg cursor object) attributes + query (str) -- SQL query to execute + + Kwargs: + query_params (dict or tuple) -- Query parameters to prevent SQL injections, + could be a dict or tuple + ddl (bool) -- must return True or False instead of rows (typical for DDL queries) + (default False) + add_to_executed (bool) -- append the query to obj.executed_queries attribute + dont_exec (bool) -- used with add_to_executed=True to generate a query, add it + to obj.executed_queries list and return True (default False) + """ + + if dont_exec: + # This is usually needed to return queries in check_mode + # without execution + query = obj.cursor.mogrify(query, query_params) + if add_to_executed: + obj.executed_queries.append(query) + + return True + + try: + if query_params is not None: + obj.cursor.execute(query, query_params) + else: + obj.cursor.execute(query) + + if add_to_executed: + if query_params is not None: + obj.executed_queries.append(obj.cursor.mogrify(query, query_params)) + else: + obj.executed_queries.append(query) + + if not ddl: + res = obj.cursor.fetchall() + return res + return True + except Exception as e: + obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + return False + + +def get_conn_params(module, params_dict, warn_db_default=True): + """Get connection parameters from the passed dictionary. + + Return a dictionary with parameters to connect to PostgreSQL server. + + Args: + module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class + params_dict (dict) -- dictionary with variables + + Kwargs: + warn_db_default (bool) -- warn that the default DB is used (default True) + """ + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the return dictionary + params_map = { + "login_host": "host", + "login_user": "user", + "login_password": "password", + "port": "port", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + + # Might be different in the modules: + if params_dict.get('db'): + params_map['db'] = 'database' + elif params_dict.get('database'): + params_map['database'] = 'database' + elif params_dict.get('login_db'): + params_map['login_db'] = 'database' + else: + if warn_db_default: + module.warn('Database name has not been passed, ' + 'used default database to connect to.') + + kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict) + if k in params_map and v != '' and v is not None) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost" + if is_localhost and params_dict["login_unix_socket"] != "": + kw["host"] = params_dict["login_unix_socket"] + + return kw + + +class PgMembership(object): + def __init__(self, module, cursor, groups, target_roles, fail_on_role=True): + self.module = module + self.cursor = cursor + self.target_roles = [r.strip() for r in target_roles] + self.groups = [r.strip() for r in groups] + self.executed_queries = [] + self.granted = {} + self.revoked = {} + self.fail_on_role = fail_on_role + self.non_existent_roles = [] + self.changed = False + self.__check_roles_exist() + + def grant(self): + for group in self.groups: + self.granted[group] = [] + + for role in self.target_roles: + # If role is in a group now, pass: + if self.__check_membership(group, role): + continue + + query = 'GRANT "%s" TO "%s"' % (group, role) + self.changed = exec_sql(self, query, ddl=True) + + if self.changed: + self.granted[group].append(role) + + return self.changed + + def revoke(self): + for group in self.groups: + self.revoked[group] = [] + + for role in self.target_roles: + # If role is not in a group now, pass: + if not self.__check_membership(group, role): + continue + + query = 'REVOKE "%s" FROM "%s"' % (group, role) + self.changed = exec_sql(self, query, ddl=True) + + if self.changed: + self.revoked[group].append(role) + + return self.changed + + def __check_membership(self, src_role, dst_role): + query = ("SELECT ARRAY(SELECT b.rolname FROM " + "pg_catalog.pg_auth_members m " + "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) " + "WHERE m.member = r.oid) " + "FROM pg_catalog.pg_roles r " + "WHERE r.rolname = %(dst_role)s") + + res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False) + membership = [] + if res: + membership = res[0][0] + + if not membership: + return False + + if src_role in membership: + return True + + return False + + def __check_roles_exist(self): + existent_groups = self.__roles_exist(self.groups) + existent_roles = self.__roles_exist(self.target_roles) + + for group in self.groups: + if group not in existent_groups: + if self.fail_on_role: + self.module.fail_json(msg="Role %s does not exist" % group) + else: + self.module.warn("Role %s does not exist, pass" % group) + self.non_existent_roles.append(group) + + for role in self.target_roles: + if role not in existent_roles: + if self.fail_on_role: + self.module.fail_json(msg="Role %s does not exist" % role) + else: + self.module.warn("Role %s does not exist, pass" % role) + + if role not in self.groups: + self.non_existent_roles.append(role) + + else: + if self.fail_on_role: + self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role)) + else: + self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role)) + + # Update role lists, excluding non existent roles: + self.groups = [g for g in self.groups if g not in self.non_existent_roles] + + self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles] + + def __roles_exist(self, roles): + tmp = ["'" + x + "'" for x in roles] + query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp) + return [x[0] for x in exec_sql(self, query, add_to_executed=False)] diff --git a/plugins/module_utils/pure.py b/plugins/module_utils/pure.py new file mode 100644 index 0000000000..019c11add2 --- /dev/null +++ b/plugins/module_utils/pure.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Simon Dodsley ,2017 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +HAS_PURESTORAGE = True +try: + from purestorage import purestorage +except ImportError: + HAS_PURESTORAGE = False + +HAS_PURITY_FB = True +try: + from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest +except ImportError: + HAS_PURITY_FB = False + +from functools import wraps +from os import environ +from os import path +import platform + +VERSION = 1.2 +USER_AGENT_BASE = 'Ansible' +API_AGENT_VERSION = 1.5 + + +def get_system(module): + """Return System Object or Fail""" + user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { + 'base': USER_AGENT_BASE, + 'class': __name__, + 'version': VERSION, + 'platform': platform.platform() + } + array_name = module.params['fa_url'] + api = module.params['api_token'] + + if array_name and api: + system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent) + elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'): + system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent) + else: + module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments") + try: + system.get() + except Exception: + module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials") + return system + + +def get_blade(module): + """Return System Object or Fail""" + user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { + 'base': USER_AGENT_BASE, + 'class': __name__, + 'version': VERSION, + 'platform': platform.platform() + } + blade_name = module.params['fb_url'] + api = module.params['api_token'] + + if blade_name and api: + blade = PurityFb(blade_name) + blade.disable_verify_ssl() + try: + blade.login(api) + versions = blade.api_version.list_versions().versions + if API_AGENT_VERSION in versions: + blade._api_client.user_agent = user_agent + except rest.ApiException as e: + module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") + elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'): + blade = PurityFb(environ.get('PUREFB_URL')) + blade.disable_verify_ssl() + try: + blade.login(environ.get('PUREFB_API')) + versions = blade.api_version.list_versions().versions + if API_AGENT_VERSION in versions: + blade._api_client.user_agent = user_agent + except rest.ApiException as e: + module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") + else: + module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments") + return blade + + +def purefa_argument_spec(): + """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" + + return dict( + fa_url=dict(), + api_token=dict(no_log=True), + ) + + +def purefb_argument_spec(): + """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" + + return dict( + fb_url=dict(), + api_token=dict(no_log=True), + ) diff --git a/plugins/module_utils/rabbitmq.py b/plugins/module_utils/rabbitmq.py new file mode 100644 index 0000000000..cf76400644 --- /dev/null +++ b/plugins/module_utils/rabbitmq.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2016, Jorge Rodriguez +# Copyright: (c) 2018, John Imison +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils.six.moves.urllib import parse as urllib_parse +from mimetypes import MimeTypes + +import os +import json +import traceback + +PIKA_IMP_ERR = None +try: + import pika + import pika.exceptions + from pika import spec + HAS_PIKA = True +except ImportError: + PIKA_IMP_ERR = traceback.format_exc() + HAS_PIKA = False + + +def rabbitmq_argument_spec(): + return dict( + login_user=dict(type='str', default='guest'), + login_password=dict(type='str', default='guest', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='str', default='15672'), + login_protocol=dict(type='str', default='http', choices=['http', 'https']), + ca_cert=dict(type='path', aliases=['cacert']), + client_cert=dict(type='path', aliases=['cert']), + client_key=dict(type='path', aliases=['key']), + vhost=dict(type='str', default='/'), + ) + + +# notification/rabbitmq_basic_publish.py +class RabbitClient(): + def __init__(self, module): + self.module = module + self.params = module.params + self.check_required_library() + self.check_host_params() + self.url = self.params['url'] + self.proto = self.params['proto'] + self.username = self.params['username'] + self.password = self.params['password'] + self.host = self.params['host'] + self.port = self.params['port'] + self.vhost = self.params['vhost'] + self.queue = self.params['queue'] + self.headers = self.params['headers'] + self.cafile = self.params['cafile'] + self.certfile = self.params['certfile'] + self.keyfile = self.params['keyfile'] + + if self.host is not None: + self.build_url() + + if self.cafile is not None: + self.append_ssl_certs() + + self.connect_to_rabbitmq() + + def check_required_library(self): + if not HAS_PIKA: + self.module.fail_json(msg=missing_required_lib("pika"), exception=PIKA_IMP_ERR) + + def check_host_params(self): + # Fail if url is specified and other conflicting parameters have been specified + if self.params['url'] is not None and any(self.params[k] is not None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']): + self.module.fail_json(msg="url and proto, host, port, vhost, username or password cannot be specified at the same time.") + + # Fail if url not specified and there is a missing parameter to build the url + if self.params['url'] is None and any(self.params[k] is None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']): + self.module.fail_json(msg="Connection parameters must be passed via url, or, proto, host, port, vhost, username or password.") + + def append_ssl_certs(self): + ssl_options = {} + if self.cafile: + ssl_options['cafile'] = self.cafile + if self.certfile: + ssl_options['certfile'] = self.certfile + if self.keyfile: + ssl_options['keyfile'] = self.keyfile + + self.url = self.url + '?ssl_options=' + urllib_parse.quote(json.dumps(ssl_options)) + + @staticmethod + def rabbitmq_argument_spec(): + return dict( + url=dict(type='str'), + proto=dict(type='str', choices=['amqp', 'amqps']), + host=dict(type='str'), + port=dict(type='int'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + vhost=dict(type='str'), + queue=dict(type='str') + ) + + ''' Consider some file size limits here ''' + def _read_file(self, path): + try: + with open(path, "rb") as file_handle: + return file_handle.read() + except IOError as e: + self.module.fail_json(msg="Unable to open file %s: %s" % (path, to_native(e))) + + @staticmethod + def _check_file_mime_type(path): + mime = MimeTypes() + return mime.guess_type(path) + + def build_url(self): + self.url = '{0}://{1}:{2}@{3}:{4}/{5}'.format(self.proto, + self.username, + self.password, + self.host, + self.port, + self.vhost) + + def connect_to_rabbitmq(self): + """ + Function to connect to rabbitmq using username and password + """ + try: + parameters = pika.URLParameters(self.url) + except Exception as e: + self.module.fail_json(msg="URL malformed: %s" % to_native(e)) + + try: + self.connection = pika.BlockingConnection(parameters) + except Exception as e: + self.module.fail_json(msg="Connection issue: %s" % to_native(e)) + + try: + self.conn_channel = self.connection.channel() + except pika.exceptions.AMQPChannelError as e: + self.close_connection() + self.module.fail_json(msg="Channel issue: %s" % to_native(e)) + + def close_connection(self): + try: + self.connection.close() + except pika.exceptions.AMQPConnectionError: + pass + + def basic_publish(self): + self.content_type = self.params.get("content_type") + + if self.params.get("body") is not None: + args = dict( + body=self.params.get("body"), + exchange=self.params.get("exchange"), + routing_key=self.params.get("routing_key"), + properties=pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers=self.headers)) + + # If src (file) is defined and content_type is left as default, do a mime lookup on the file + if self.params.get("src") is not None and self.content_type == 'text/plain': + self.content_type = RabbitClient._check_file_mime_type(self.params.get("src"))[0] + self.headers.update( + filename=os.path.basename(self.params.get("src")) + ) + + args = dict( + body=self._read_file(self.params.get("src")), + exchange=self.params.get("exchange"), + routing_key=self.params.get("routing_key"), + properties=pika.BasicProperties(content_type=self.content_type, + delivery_mode=1, + headers=self.headers + )) + elif self.params.get("src") is not None: + args = dict( + body=self._read_file(self.params.get("src")), + exchange=self.params.get("exchange"), + routing_key=self.params.get("routing_key"), + properties=pika.BasicProperties(content_type=self.content_type, + delivery_mode=1, + headers=self.headers + )) + + try: + # If queue is not defined, RabbitMQ will return the queue name of the automatically generated queue. + if self.queue is None: + result = self.conn_channel.queue_declare(durable=self.params.get("durable"), + exclusive=self.params.get("exclusive"), + auto_delete=self.params.get("auto_delete")) + self.conn_channel.confirm_delivery() + self.queue = result.method.queue + else: + self.conn_channel.queue_declare(queue=self.queue, + durable=self.params.get("durable"), + exclusive=self.params.get("exclusive"), + auto_delete=self.params.get("auto_delete")) + self.conn_channel.confirm_delivery() + except Exception as e: + self.module.fail_json(msg="Queue declare issue: %s" % to_native(e)) + + # https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/cloudstack.py#L150 + if args['routing_key'] is None: + args['routing_key'] = self.queue + + if args['exchange'] is None: + args['exchange'] = '' + + try: + self.conn_channel.basic_publish(**args) + return True + except pika.exceptions.UnroutableError: + return False diff --git a/plugins/module_utils/rax.py b/plugins/module_utils/rax.py new file mode 100644 index 0000000000..d8607541f2 --- /dev/null +++ b/plugins/module_utils/rax.py @@ -0,0 +1,331 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their own +# license to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import os +import re +from uuid import UUID + +from ansible.module_utils.six import text_type, binary_type + +FINAL_STATUSES = ('ACTIVE', 'ERROR') +VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', + 'error', 'error_deleting') + +CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', + 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] +CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', + 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', + 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] + +NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None)) +PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" +SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" + + +def rax_slugify(value): + """Prepend a key with rax_ and normalize the key name""" + return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) + + +def rax_clb_node_to_dict(obj): + """Function to convert a CLB Node object to a dict""" + if not obj: + return {} + node = obj.to_dict() + node['id'] = obj.id + node['weight'] = obj.weight + return node + + +def rax_to_dict(obj, obj_type='standard'): + """Generic function to convert a pyrax object to a dict + + obj_type values: + standard + clb + server + + """ + instance = {} + for key in dir(obj): + value = getattr(obj, key) + if obj_type == 'clb' and key == 'nodes': + instance[key] = [] + for node in value: + instance[key].append(rax_clb_node_to_dict(node)) + elif (isinstance(value, list) and len(value) > 0 and + not isinstance(value[0], NON_CALLABLES)): + instance[key] = [] + for item in value: + instance[key].append(rax_to_dict(item)) + elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): + if obj_type == 'server': + if key == 'image': + if not value: + instance['rax_boot_source'] = 'volume' + else: + instance['rax_boot_source'] = 'local' + key = rax_slugify(key) + instance[key] = value + + if obj_type == 'server': + for attr in ['id', 'accessIPv4', 'name', 'status']: + instance[attr] = instance.get(rax_slugify(attr)) + + return instance + + +def rax_find_bootable_volume(module, rax_module, server, exit=True): + """Find a servers bootable volume""" + cs = rax_module.cloudservers + cbs = rax_module.cloud_blockstorage + server_id = rax_module.utils.get_id(server) + volumes = cs.volumes.get_server_volumes(server_id) + bootable_volumes = [] + for volume in volumes: + vol = cbs.get(volume) + if module.boolean(vol.bootable): + bootable_volumes.append(vol) + if not bootable_volumes: + if exit: + module.fail_json(msg='No bootable volumes could be found for ' + 'server %s' % server_id) + else: + return False + elif len(bootable_volumes) > 1: + if exit: + module.fail_json(msg='Multiple bootable volumes found for server ' + '%s' % server_id) + else: + return False + + return bootable_volumes[0] + + +def rax_find_image(module, rax_module, image, exit=True): + """Find a server image by ID or Name""" + cs = rax_module.cloudservers + try: + UUID(image) + except ValueError: + try: + image = cs.images.find(human_id=image) + except(cs.exceptions.NotFound, + cs.exceptions.NoUniqueMatch): + try: + image = cs.images.find(name=image) + except (cs.exceptions.NotFound, + cs.exceptions.NoUniqueMatch): + if exit: + module.fail_json(msg='No matching image found (%s)' % + image) + else: + return False + + return rax_module.utils.get_id(image) + + +def rax_find_volume(module, rax_module, name): + """Find a Block storage volume by ID or name""" + cbs = rax_module.cloud_blockstorage + try: + UUID(name) + volume = cbs.get(name) + except ValueError: + try: + volume = cbs.find(name=name) + except rax_module.exc.NotFound: + volume = None + except Exception as e: + module.fail_json(msg='%s' % e) + return volume + + +def rax_find_network(module, rax_module, network): + """Find a cloud network by ID or name""" + cnw = rax_module.cloud_networks + try: + UUID(network) + except ValueError: + if network.lower() == 'public': + return cnw.get_server_networks(PUBLIC_NET_ID) + elif network.lower() == 'private': + return cnw.get_server_networks(SERVICE_NET_ID) + else: + try: + network_obj = cnw.find_network_by_label(network) + except (rax_module.exceptions.NetworkNotFound, + rax_module.exceptions.NetworkLabelNotUnique): + module.fail_json(msg='No matching network found (%s)' % + network) + else: + return cnw.get_server_networks(network_obj) + else: + return cnw.get_server_networks(network) + + +def rax_find_server(module, rax_module, server): + """Find a Cloud Server by ID or name""" + cs = rax_module.cloudservers + try: + UUID(server) + server = cs.servers.get(server) + except ValueError: + servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) + if not servers: + module.fail_json(msg='No Server was matched by name, ' + 'try using the Server ID instead') + if len(servers) > 1: + module.fail_json(msg='Multiple servers matched by name, ' + 'try using the Server ID instead') + + # We made it this far, grab the first and hopefully only server + # in the list + server = servers[0] + return server + + +def rax_find_loadbalancer(module, rax_module, loadbalancer): + """Find a Cloud Load Balancer by ID or name""" + clb = rax_module.cloud_loadbalancers + try: + found = clb.get(loadbalancer) + except Exception: + found = [] + for lb in clb.list(): + if loadbalancer == lb.name: + found.append(lb) + + if not found: + module.fail_json(msg='No loadbalancer was matched') + + if len(found) > 1: + module.fail_json(msg='Multiple loadbalancers matched') + + # We made it this far, grab the first and hopefully only item + # in the list + found = found[0] + + return found + + +def rax_argument_spec(): + """Return standard base dictionary used for the argument_spec + argument in AnsibleModule + + """ + return dict( + api_key=dict(type='str', aliases=['password'], no_log=True), + auth_endpoint=dict(type='str'), + credentials=dict(type='path', aliases=['creds_file']), + env=dict(type='str'), + identity_type=dict(type='str', default='rackspace'), + region=dict(type='str'), + tenant_id=dict(type='str'), + tenant_name=dict(type='str'), + username=dict(type='str'), + validate_certs=dict(type='bool', aliases=['verify_ssl']), + ) + + +def rax_required_together(): + """Return the default list used for the required_together argument to + AnsibleModule""" + return [['api_key', 'username']] + + +def setup_rax_module(module, rax_module, region_required=True): + """Set up pyrax in a standard way for all modules""" + rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version, + rax_module.USER_AGENT) + + api_key = module.params.get('api_key') + auth_endpoint = module.params.get('auth_endpoint') + credentials = module.params.get('credentials') + env = module.params.get('env') + identity_type = module.params.get('identity_type') + region = module.params.get('region') + tenant_id = module.params.get('tenant_id') + tenant_name = module.params.get('tenant_name') + username = module.params.get('username') + verify_ssl = module.params.get('validate_certs') + + if env is not None: + rax_module.set_environment(env) + + rax_module.set_setting('identity_type', identity_type) + if verify_ssl is not None: + rax_module.set_setting('verify_ssl', verify_ssl) + if auth_endpoint is not None: + rax_module.set_setting('auth_endpoint', auth_endpoint) + if tenant_id is not None: + rax_module.set_setting('tenant_id', tenant_id) + if tenant_name is not None: + rax_module.set_setting('tenant_name', tenant_name) + + try: + username = username or os.environ.get('RAX_USERNAME') + if not username: + username = rax_module.get_setting('keyring_username') + if username: + api_key = 'USE_KEYRING' + if not api_key: + api_key = os.environ.get('RAX_API_KEY') + credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or + os.environ.get('RAX_CREDS_FILE')) + region = (region or os.environ.get('RAX_REGION') or + rax_module.get_setting('region')) + except KeyError as e: + module.fail_json(msg='Unable to load %s' % e.message) + + try: + if api_key and username: + if api_key == 'USE_KEYRING': + rax_module.keyring_auth(username, region=region) + else: + rax_module.set_credentials(username, api_key=api_key, + region=region) + elif credentials: + credentials = os.path.expanduser(credentials) + rax_module.set_credential_file(credentials, region=region) + else: + raise Exception('No credentials supplied!') + except Exception as e: + if e.message: + msg = str(e.message) + else: + msg = repr(e) + module.fail_json(msg=msg) + + if region_required and region not in rax_module.regions: + module.fail_json(msg='%s is not a valid region, must be one of: %s' % + (region, ','.join(rax_module.regions))) + + return rax_module diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py new file mode 100644 index 0000000000..8fc6b42e4d --- /dev/null +++ b/plugins/module_utils/redfish_utils.py @@ -0,0 +1,2458 @@ +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +from ansible.module_utils.urls import open_url +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves import http_client +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} +POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', + 'OData-Version': '4.0'} +PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', + 'OData-Version': '4.0'} +DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} + +DEPRECATE_MSG = 'Issuing a data modification command without specifying the '\ + 'ID of the target %(resource)s resource when there is more '\ + 'than one %(resource)s will use the first one in the '\ + 'collection. Use the `resource_id` option to specify the '\ + 'target %(resource)s ID' + + +class RedfishUtils(object): + + def __init__(self, creds, root_uri, timeout, module, resource_id=None, + data_modification=False): + self.root_uri = root_uri + self.creds = creds + self.timeout = timeout + self.module = module + self.service_root = '/redfish/v1/' + self.resource_id = resource_id + self.data_modification = data_modification + self._init_session() + + # The following functions are to send GET/POST/PATCH/DELETE requests + def get_request(self, uri): + try: + resp = open_url(uri, method="GET", headers=GET_HEADERS, + url_username=self.creds['user'], + url_password=self.creds['pswd'], + force_basic_auth=True, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + data = json.loads(resp.read()) + headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + except HTTPError as e: + msg = self._get_extended_message(e) + return {'ret': False, + 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" + % (e.code, uri, msg), + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" + % (uri, e.reason)} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} + return {'ret': True, 'data': data, 'headers': headers} + + def post_request(self, uri, pyld): + try: + resp = open_url(uri, data=json.dumps(pyld), + headers=POST_HEADERS, method="POST", + url_username=self.creds['user'], + url_password=self.creds['pswd'], + force_basic_auth=True, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + except HTTPError as e: + msg = self._get_extended_message(e) + return {'ret': False, + 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" + % (e.code, uri, msg), + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" + % (uri, e.reason)} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} + return {'ret': True, 'resp': resp} + + def patch_request(self, uri, pyld): + headers = PATCH_HEADERS + r = self.get_request(uri) + if r['ret']: + # Get etag from etag header or @odata.etag property + etag = r['headers'].get('etag') + if not etag: + etag = r['data'].get('@odata.etag') + if etag: + # Make copy of headers and add If-Match header + headers = dict(headers) + headers['If-Match'] = etag + try: + resp = open_url(uri, data=json.dumps(pyld), + headers=headers, method="PATCH", + url_username=self.creds['user'], + url_password=self.creds['pswd'], + force_basic_auth=True, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + except HTTPError as e: + msg = self._get_extended_message(e) + return {'ret': False, + 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" + % (e.code, uri, msg), + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'" + % (uri, e.reason)} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))} + return {'ret': True, 'resp': resp} + + def delete_request(self, uri, pyld=None): + try: + data = json.dumps(pyld) if pyld else None + resp = open_url(uri, data=data, + headers=DELETE_HEADERS, method="DELETE", + url_username=self.creds['user'], + url_password=self.creds['pswd'], + force_basic_auth=True, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + except HTTPError as e: + msg = self._get_extended_message(e) + return {'ret': False, + 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" + % (e.code, uri, msg), + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" + % (uri, e.reason)} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} + return {'ret': True, 'resp': resp} + + @staticmethod + def _get_extended_message(error): + """ + Get Redfish ExtendedInfo message from response payload if present + :param error: an HTTPError exception + :type error: HTTPError + :return: the ExtendedInfo message if present, else standard HTTP error + """ + msg = http_client.responses.get(error.code, '') + if error.code >= 400: + try: + body = error.read().decode('utf-8') + data = json.loads(body) + ext_info = data['error']['@Message.ExtendedInfo'] + msg = ext_info[0]['Message'] + except Exception: + pass + return msg + + def _init_session(self): + pass + + def _find_accountservice_resource(self): + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + if 'AccountService' not in data: + return {'ret': False, 'msg': "AccountService resource not found"} + else: + account_service = data["AccountService"]["@odata.id"] + response = self.get_request(self.root_uri + account_service) + if response['ret'] is False: + return response + data = response['data'] + accounts = data['Accounts']['@odata.id'] + if accounts[-1:] == '/': + accounts = accounts[:-1] + self.accounts_uri = accounts + return {'ret': True} + + def _find_sessionservice_resource(self): + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + if 'SessionService' not in data: + return {'ret': False, 'msg': "SessionService resource not found"} + else: + session_service = data["SessionService"]["@odata.id"] + response = self.get_request(self.root_uri + session_service) + if response['ret'] is False: + return response + data = response['data'] + sessions = data['Sessions']['@odata.id'] + if sessions[-1:] == '/': + sessions = sessions[:-1] + self.sessions_uri = sessions + return {'ret': True} + + def _get_resource_uri_by_id(self, uris, id_prop): + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + if id_prop == data.get('Id'): + return uri + return None + + def _find_systems_resource(self): + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + if 'Systems' not in data: + return {'ret': False, 'msg': "Systems resource not found"} + response = self.get_request(self.root_uri + data['Systems']['@odata.id']) + if response['ret'] is False: + return response + self.systems_uris = [ + i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.systems_uris: + return { + 'ret': False, + 'msg': "ComputerSystem's Members array is either empty or missing"} + self.systems_uri = self.systems_uris[0] + if self.data_modification: + if self.resource_id: + self.systems_uri = self._get_resource_uri_by_id(self.systems_uris, + self.resource_id) + if not self.systems_uri: + return { + 'ret': False, + 'msg': "System resource %s not found" % self.resource_id} + elif len(self.systems_uris) > 1: + self.module.deprecate(DEPRECATE_MSG % {'resource': 'System'}, + version='2.14') + return {'ret': True} + + def _find_updateservice_resource(self): + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + if 'UpdateService' not in data: + return {'ret': False, 'msg': "UpdateService resource not found"} + else: + update = data["UpdateService"]["@odata.id"] + self.update_uri = update + response = self.get_request(self.root_uri + update) + if response['ret'] is False: + return response + data = response['data'] + self.firmware_uri = self.software_uri = None + if 'FirmwareInventory' in data: + self.firmware_uri = data['FirmwareInventory'][u'@odata.id'] + if 'SoftwareInventory' in data: + self.software_uri = data['SoftwareInventory'][u'@odata.id'] + return {'ret': True} + + def _find_chassis_resource(self): + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + if 'Chassis' not in data: + return {'ret': False, 'msg': "Chassis resource not found"} + chassis = data["Chassis"]["@odata.id"] + response = self.get_request(self.root_uri + chassis) + if response['ret'] is False: + return response + self.chassis_uris = [ + i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.chassis_uris: + return {'ret': False, + 'msg': "Chassis Members array is either empty or missing"} + self.chassis_uri = self.chassis_uris[0] + if self.data_modification: + if self.resource_id: + self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris, + self.resource_id) + if not self.chassis_uri: + return { + 'ret': False, + 'msg': "Chassis resource %s not found" % self.resource_id} + elif len(self.chassis_uris) > 1: + self.module.deprecate(DEPRECATE_MSG % {'resource': 'Chassis'}, + version='2.14') + return {'ret': True} + + def _find_managers_resource(self): + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + if 'Managers' not in data: + return {'ret': False, 'msg': "Manager resource not found"} + manager = data["Managers"]["@odata.id"] + response = self.get_request(self.root_uri + manager) + if response['ret'] is False: + return response + self.manager_uris = [ + i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.manager_uris: + return {'ret': False, + 'msg': "Managers Members array is either empty or missing"} + self.manager_uri = self.manager_uris[0] + if self.data_modification: + if self.resource_id: + self.manager_uri = self._get_resource_uri_by_id(self.manager_uris, + self.resource_id) + if not self.manager_uri: + return { + 'ret': False, + 'msg': "Manager resource %s not found" % self.resource_id} + elif len(self.manager_uris) > 1: + self.module.deprecate(DEPRECATE_MSG % {'resource': 'Manager'}, + version='2.14') + return {'ret': True} + + def get_logs(self): + log_svcs_uri_list = [] + list_of_logs = [] + properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat', + 'Message', 'MessageId', 'MessageArgs'] + + # Find LogService + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'LogServices' not in data: + return {'ret': False, 'msg': "LogServices resource not found"} + + # Find all entries in LogServices + logs_uri = data["LogServices"]["@odata.id"] + response = self.get_request(self.root_uri + logs_uri) + if response['ret'] is False: + return response + data = response['data'] + for log_svcs_entry in data.get('Members', []): + response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id']) + if response['ret'] is False: + return response + _data = response['data'] + if 'Entries' in _data: + log_svcs_uri_list.append(_data['Entries'][u'@odata.id']) + + # For each entry in LogServices, get log name and all log entries + for log_svcs_uri in log_svcs_uri_list: + logs = {} + list_of_log_entries = [] + response = self.get_request(self.root_uri + log_svcs_uri) + if response['ret'] is False: + return response + data = response['data'] + logs['Description'] = data.get('Description', + 'Collection of log entries') + # Get all log entries for each type of log found + for logEntry in data.get('Members', []): + entry = {} + for prop in properties: + if prop in logEntry: + entry[prop] = logEntry.get(prop) + if entry: + list_of_log_entries.append(entry) + log_name = log_svcs_uri.split('/')[-1] + logs[log_name] = list_of_log_entries + list_of_logs.append(logs) + + # list_of_logs[logs{list_of_log_entries[entry{}]}] + return {'ret': True, 'entries': list_of_logs} + + def clear_logs(self): + # Find LogService + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'LogServices' not in data: + return {'ret': False, 'msg': "LogServices resource not found"} + + # Find all entries in LogServices + logs_uri = data["LogServices"]["@odata.id"] + response = self.get_request(self.root_uri + logs_uri) + if response['ret'] is False: + return response + data = response['data'] + + for log_svcs_entry in data[u'Members']: + response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) + if response['ret'] is False: + return response + _data = response['data'] + # Check to make sure option is available, otherwise error is ugly + if "Actions" in _data: + if "#LogService.ClearLog" in _data[u"Actions"]: + self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {}) + if response['ret'] is False: + return response + return {'ret': True} + + def aggregate(self, func, uri_list, uri_name): + ret = True + entries = [] + for uri in uri_list: + inventory = func(uri) + ret = inventory.pop('ret') and ret + if 'entries' in inventory: + entries.append(({uri_name: uri}, + inventory['entries'])) + return dict(ret=ret, entries=entries) + + def aggregate_chassis(self, func): + return self.aggregate(func, self.chassis_uris, 'chassis_uri') + + def aggregate_managers(self, func): + return self.aggregate(func, self.manager_uris, 'manager_uri') + + def aggregate_systems(self, func): + return self.aggregate(func, self.systems_uris, 'system_uri') + + def get_storage_controller_inventory(self, systems_uri): + result = {} + controller_list = [] + controller_results = [] + # Get these entries, but does not fail if not found + properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', + 'Location', 'Manufacturer', 'Model', 'Name', + 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] + key = "StorageControllers" + + # Find Storage service + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + data = response['data'] + + if 'Storage' not in data: + return {'ret': False, 'msg': "Storage resource not found"} + + # Get a list of all storage controllers and build respective URIs + storage_uri = data['Storage']["@odata.id"] + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + # Loop through Members and their StorageControllers + # and gather properties from each StorageController + if data[u'Members']: + for storage_member in data[u'Members']: + storage_member_uri = storage_member[u'@odata.id'] + response = self.get_request(self.root_uri + storage_member_uri) + data = response['data'] + + if key in data: + controller_list = data[key] + for controller in controller_list: + controller_result = {} + for property in properties: + if property in controller: + controller_result[property] = controller[property] + controller_results.append(controller_result) + result['entries'] = controller_results + return result + else: + return {'ret': False, 'msg': "Storage resource not found"} + + def get_multi_storage_controller_inventory(self): + return self.aggregate_systems(self.get_storage_controller_inventory) + + def get_disk_inventory(self, systems_uri): + result = {'entries': []} + controller_list = [] + # Get these entries, but does not fail if not found + properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', + 'EncryptionAbility', 'EncryptionStatus', + 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', + 'Manufacturer', 'MediaType', 'Model', 'Name', + 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', + 'RotationSpeedRPM', 'SerialNumber', 'Status'] + + # Find Storage service + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + data = response['data'] + + if 'SimpleStorage' not in data and 'Storage' not in data: + return {'ret': False, 'msg': "SimpleStorage and Storage resource \ + not found"} + + if 'Storage' in data: + # Get a list of all storage controllers and build respective URIs + storage_uri = data[u'Storage'][u'@odata.id'] + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if data[u'Members']: + for controller in data[u'Members']: + controller_list.append(controller[u'@odata.id']) + for c in controller_list: + uri = self.root_uri + c + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + controller_name = 'Controller 1' + if 'StorageControllers' in data: + sc = data['StorageControllers'] + if sc: + if 'Name' in sc[0]: + controller_name = sc[0]['Name'] + else: + sc_id = sc[0].get('Id', '1') + controller_name = 'Controller %s' % sc_id + drive_results = [] + if 'Drives' in data: + for device in data[u'Drives']: + disk_uri = self.root_uri + device[u'@odata.id'] + response = self.get_request(disk_uri) + data = response['data'] + + drive_result = {} + for property in properties: + if property in data: + if data[property] is not None: + drive_result[property] = data[property] + drive_results.append(drive_result) + drives = {'Controller': controller_name, + 'Drives': drive_results} + result["entries"].append(drives) + + if 'SimpleStorage' in data: + # Get a list of all storage controllers and build respective URIs + storage_uri = data["SimpleStorage"]["@odata.id"] + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for controller in data[u'Members']: + controller_list.append(controller[u'@odata.id']) + + for c in controller_list: + uri = self.root_uri + c + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + if 'Name' in data: + controller_name = data['Name'] + else: + sc_id = data.get('Id', '1') + controller_name = 'Controller %s' % sc_id + drive_results = [] + for device in data[u'Devices']: + drive_result = {} + for property in properties: + if property in device: + drive_result[property] = device[property] + drive_results.append(drive_result) + drives = {'Controller': controller_name, + 'Drives': drive_results} + result["entries"].append(drives) + + return result + + def get_multi_disk_inventory(self): + return self.aggregate_systems(self.get_disk_inventory) + + def get_volume_inventory(self, systems_uri): + result = {'entries': []} + controller_list = [] + volume_list = [] + # Get these entries, but does not fail if not found + properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes', + 'Capacity', 'CapacityBytes', 'CapacitySources', + 'Encrypted', 'EncryptionTypes', 'Identifiers', + 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities', + 'AllocatedPools', 'Status'] + + # Find Storage service + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + data = response['data'] + + if 'SimpleStorage' not in data and 'Storage' not in data: + return {'ret': False, 'msg': "SimpleStorage and Storage resource \ + not found"} + + if 'Storage' in data: + # Get a list of all storage controllers and build respective URIs + storage_uri = data[u'Storage'][u'@odata.id'] + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if data.get('Members'): + for controller in data[u'Members']: + controller_list.append(controller[u'@odata.id']) + for c in controller_list: + uri = self.root_uri + c + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + controller_name = 'Controller 1' + if 'StorageControllers' in data: + sc = data['StorageControllers'] + if sc: + if 'Name' in sc[0]: + controller_name = sc[0]['Name'] + else: + sc_id = sc[0].get('Id', '1') + controller_name = 'Controller %s' % sc_id + volume_results = [] + if 'Volumes' in data: + # Get a list of all volumes and build respective URIs + volumes_uri = data[u'Volumes'][u'@odata.id'] + response = self.get_request(self.root_uri + volumes_uri) + data = response['data'] + + if data.get('Members'): + for volume in data[u'Members']: + volume_list.append(volume[u'@odata.id']) + for v in volume_list: + uri = self.root_uri + v + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + + volume_result = {} + for property in properties: + if property in data: + if data[property] is not None: + volume_result[property] = data[property] + + # Get related Drives Id + drive_id_list = [] + if 'Links' in data: + if 'Drives' in data[u'Links']: + for link in data[u'Links'][u'Drives']: + drive_id_link = link[u'@odata.id'] + drive_id = drive_id_link.split("/")[-1] + drive_id_list.append({'Id': drive_id}) + volume_result['Linked_drives'] = drive_id_list + volume_results.append(volume_result) + volumes = {'Controller': controller_name, + 'Volumes': volume_results} + result["entries"].append(volumes) + else: + return {'ret': False, 'msg': "Storage resource not found"} + + return result + + def get_multi_volume_inventory(self): + return self.aggregate_systems(self.get_volume_inventory) + + def restart_manager_gracefully(self): + result = {} + key = "Actions" + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + action_uri = data[key]["#Manager.Reset"]["target"] + + payload = {'ResetType': 'GracefulRestart'} + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def manage_indicator_led(self, command): + result = {} + key = 'IndicatorLED' + + payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'} + + result = {} + response = self.get_request(self.root_uri + self.chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + if command in payloads.keys(): + payload = {'IndicatorLED': payloads[command]} + response = self.patch_request(self.root_uri + self.chassis_uri, payload) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': 'Invalid command'} + + return result + + def _map_reset_type(self, reset_type, allowable_values): + equiv_types = { + 'On': 'ForceOn', + 'ForceOn': 'On', + 'ForceOff': 'GracefulShutdown', + 'GracefulShutdown': 'ForceOff', + 'GracefulRestart': 'ForceRestart', + 'ForceRestart': 'GracefulRestart' + } + + if reset_type in allowable_values: + return reset_type + if reset_type not in equiv_types: + return reset_type + mapped_type = equiv_types[reset_type] + if mapped_type in allowable_values: + return mapped_type + return reset_type + + def manage_system_power(self, command): + key = "Actions" + reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', + 'GracefulRestart', 'ForceRestart', 'Nmi', + 'ForceOn', 'PushPowerButton', 'PowerCycle'] + + # command should be PowerOn, PowerForceOff, etc. + if not command.startswith('Power'): + return {'ret': False, 'msg': 'Invalid Command (%s)' % command} + reset_type = command[5:] + + # map Reboot to a ResetType that does a reboot + if reset_type == 'Reboot': + reset_type = 'GracefulRestart' + + if reset_type not in reset_type_values: + return {'ret': False, 'msg': 'Invalid Command (%s)' % command} + + # read the system resource and get the current power state + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + power_state = data.get('PowerState') + + # if power is already in target state, nothing to do + if power_state == "On" and reset_type in ['On', 'ForceOn']: + return {'ret': True, 'changed': False} + if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']: + return {'ret': True, 'changed': False} + + # get the #ComputerSystem.Reset Action and target URI + if key not in data or '#ComputerSystem.Reset' not in data[key]: + return {'ret': False, 'msg': 'Action #ComputerSystem.Reset not found'} + reset_action = data[key]['#ComputerSystem.Reset'] + if 'target' not in reset_action: + return {'ret': False, + 'msg': 'target URI missing from Action #ComputerSystem.Reset'} + action_uri = reset_action['target'] + + # get AllowableValues from ActionInfo + allowable_values = None + if '@Redfish.ActionInfo' in reset_action: + action_info_uri = reset_action.get('@Redfish.ActionInfo') + response = self.get_request(self.root_uri + action_info_uri) + if response['ret'] is True: + data = response['data'] + if 'Parameters' in data: + params = data['Parameters'] + for param in params: + if param.get('Name') == 'ResetType': + allowable_values = param.get('AllowableValues') + break + + # fallback to @Redfish.AllowableValues annotation + if allowable_values is None: + allowable_values = reset_action.get('ResetType@Redfish.AllowableValues', []) + + # map ResetType to an allowable value if needed + if reset_type not in allowable_values: + reset_type = self._map_reset_type(reset_type, allowable_values) + + # define payload + payload = {'ResetType': reset_type} + + # POST to Action URI + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + + def _find_account_uri(self, username=None, acct_id=None): + if not any((username, acct_id)): + return {'ret': False, 'msg': + 'Must provide either account_id or account_username'} + + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + data = response['data'] + + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + headers = response['headers'] + if username: + if username == data.get('UserName'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + if acct_id: + if acct_id == data.get('Id'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + + return {'ret': False, 'no_match': True, 'msg': + 'No account with the given account_id or account_username found'} + + def _find_empty_account_slot(self): + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + data = response['data'] + + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + if uris: + # first slot may be reserved, so move to end of list + uris += [uris.pop(0)] + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + headers = response['headers'] + if data.get('UserName') == "" and not data.get('Enabled', True): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + + return {'ret': False, 'no_match': True, 'msg': + 'No empty account slot found'} + + def list_users(self): + result = {} + # listing all users has always been slower than other operations, why? + user_list = [] + users_results = [] + # Get these entries, but does not fail if not found + properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled'] + + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for users in data.get('Members', []): + user_list.append(users[u'@odata.id']) # user_list[] are URIs + + # for each user, get details + for uri in user_list: + user = {} + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + return response + data = response['data'] + + for property in properties: + if property in data: + user[property] = data[property] + + users_results.append(user) + result["entries"] = users_results + return result + + def add_user_via_patch(self, user): + if user.get('account_id'): + # If Id slot specified, use it + response = self._find_account_uri(acct_id=user.get('account_id')) + else: + # Otherwise find first empty slot + response = self._find_empty_account_slot() + + if not response['ret']: + return response + uri = response['uri'] + payload = {} + if user.get('account_username'): + payload['UserName'] = user.get('account_username') + if user.get('account_password'): + payload['Password'] = user.get('account_password') + if user.get('account_roleid'): + payload['RoleId'] = user.get('account_roleid') + response = self.patch_request(self.root_uri + uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def add_user(self, user): + if not user.get('account_username'): + return {'ret': False, 'msg': + 'Must provide account_username for AddUser command'} + + response = self._find_account_uri(username=user.get('account_username')) + if response['ret']: + # account_username already exists, nothing to do + return {'ret': True, 'changed': False} + + response = self.get_request(self.root_uri + self.accounts_uri) + if not response['ret']: + return response + headers = response['headers'] + + if 'allow' in headers: + methods = [m.strip() for m in headers.get('allow').split(',')] + if 'POST' not in methods: + # if Allow header present and POST not listed, add via PATCH + return self.add_user_via_patch(user) + + payload = {} + if user.get('account_username'): + payload['UserName'] = user.get('account_username') + if user.get('account_password'): + payload['Password'] = user.get('account_password') + if user.get('account_roleid'): + payload['RoleId'] = user.get('account_roleid') + + response = self.post_request(self.root_uri + self.accounts_uri, payload) + if not response['ret']: + if response.get('status') == 405: + # if POST returned a 405, try to add via PATCH + return self.add_user_via_patch(user) + else: + return response + return {'ret': True} + + def enable_user(self, user): + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + uri = response['uri'] + data = response['data'] + + if data.get('Enabled', True): + # account already enabled, nothing to do + return {'ret': True, 'changed': False} + + payload = {'Enabled': True} + response = self.patch_request(self.root_uri + uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def delete_user_via_patch(self, user, uri=None, data=None): + if not uri: + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + uri = response['uri'] + data = response['data'] + + if data and data.get('UserName') == '' and not data.get('Enabled', False): + # account UserName already cleared, nothing to do + return {'ret': True, 'changed': False} + + payload = {'UserName': ''} + if data.get('Enabled', False): + payload['Enabled'] = False + response = self.patch_request(self.root_uri + uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def delete_user(self, user): + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + if response.get('no_match'): + # account does not exist, nothing to do + return {'ret': True, 'changed': False} + else: + # some error encountered + return response + + uri = response['uri'] + headers = response['headers'] + data = response['data'] + + if 'allow' in headers: + methods = [m.strip() for m in headers.get('allow').split(',')] + if 'DELETE' not in methods: + # if Allow header present and DELETE not listed, del via PATCH + return self.delete_user_via_patch(user, uri=uri, data=data) + + response = self.delete_request(self.root_uri + uri) + if not response['ret']: + if response.get('status') == 405: + # if DELETE returned a 405, try to delete via PATCH + return self.delete_user_via_patch(user, uri=uri, data=data) + else: + return response + return {'ret': True} + + def disable_user(self, user): + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + uri = response['uri'] + data = response['data'] + + if not data.get('Enabled'): + # account already disabled, nothing to do + return {'ret': True, 'changed': False} + + payload = {'Enabled': False} + response = self.patch_request(self.root_uri + uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def update_user_role(self, user): + if not user.get('account_roleid'): + return {'ret': False, 'msg': + 'Must provide account_roleid for UpdateUserRole command'} + + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + uri = response['uri'] + data = response['data'] + + if data.get('RoleId') == user.get('account_roleid'): + # account already has RoleId , nothing to do + return {'ret': True, 'changed': False} + + payload = {'RoleId': user.get('account_roleid')} + response = self.patch_request(self.root_uri + uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def update_user_password(self, user): + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + uri = response['uri'] + payload = {'Password': user['account_password']} + response = self.patch_request(self.root_uri + uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def update_user_name(self, user): + if not user.get('account_updatename'): + return {'ret': False, 'msg': + 'Must provide account_updatename for UpdateUserName command'} + + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + uri = response['uri'] + payload = {'UserName': user['account_updatename']} + response = self.patch_request(self.root_uri + uri, payload) + if response['ret'] is False: + return response + return {'ret': True} + + def update_accountservice_properties(self, user): + if user.get('account_properties') is None: + return {'ret': False, 'msg': + 'Must provide account_properties for UpdateAccountServiceProperties command'} + account_properties = user.get('account_properties') + + # Find AccountService + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + if 'AccountService' not in data: + return {'ret': False, 'msg': "AccountService resource not found"} + accountservice_uri = data["AccountService"]["@odata.id"] + + # Check support or not + response = self.get_request(self.root_uri + accountservice_uri) + if response['ret'] is False: + return response + data = response['data'] + for property_name in account_properties.keys(): + if property_name not in data: + return {'ret': False, 'msg': + 'property %s not supported' % property_name} + + # if properties is already matched, nothing to do + need_change = False + for property_name in account_properties.keys(): + if account_properties[property_name] != data[property_name]: + need_change = True + break + + if not need_change: + return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"} + + payload = account_properties + response = self.patch_request(self.root_uri + accountservice_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"} + + def get_sessions(self): + result = {} + # listing all users has always been slower than other operations, why? + session_list = [] + sessions_results = [] + # Get these entries, but does not fail if not found + properties = ['Description', 'Id', 'Name', 'UserName'] + + response = self.get_request(self.root_uri + self.sessions_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for sessions in data[u'Members']: + session_list.append(sessions[u'@odata.id']) # session_list[] are URIs + + # for each session, get details + for uri in session_list: + session = {} + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + return response + data = response['data'] + + for property in properties: + if property in data: + session[property] = data[property] + + sessions_results.append(session) + result["entries"] = sessions_results + return result + + def clear_sessions(self): + response = self.get_request(self.root_uri + self.sessions_uri) + if response['ret'] is False: + return response + data = response['data'] + + # if no active sessions, return as success + if data['Members@odata.count'] == 0: + return {'ret': True, 'changed': False, 'msg': "There is no active sessions"} + + # loop to delete every active session + for session in data[u'Members']: + response = self.delete_request(self.root_uri + session[u'@odata.id']) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"} + + def get_firmware_update_capabilities(self): + result = {} + response = self.get_request(self.root_uri + self.update_uri) + if response['ret'] is False: + return response + + result['ret'] = True + + result['entries'] = {} + + data = response['data'] + + if "Actions" in data: + actions = data['Actions'] + if len(actions) > 0: + for key in actions.keys(): + action = actions.get(key) + if 'title' in action: + title = action['title'] + else: + title = key + result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues', + ["Key TransferProtocol@Redfish.AllowableValues not found"]) + else: + return {'ret': "False", 'msg': "Actions list is empty."} + else: + return {'ret': "False", 'msg': "Key Actions not found."} + return result + + def _software_inventory(self, uri): + result = {} + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + result['entries'] = [] + for member in data[u'Members']: + uri = self.root_uri + member[u'@odata.id'] + # Get details for each software or firmware member + response = self.get_request(uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + software = {} + # Get these standard properties if present + for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', + 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', + 'ReleaseDate']: + if key in data: + software[key] = data.get(key) + result['entries'].append(software) + return result + + def get_firmware_inventory(self): + if self.firmware_uri is None: + return {'ret': False, 'msg': 'No FirmwareInventory resource found'} + else: + return self._software_inventory(self.firmware_uri) + + def get_software_inventory(self): + if self.software_uri is None: + return {'ret': False, 'msg': 'No SoftwareInventory resource found'} + else: + return self._software_inventory(self.software_uri) + + def _get_allowable_values(self, action, name, default_values=None): + if default_values is None: + default_values = [] + allowable_values = None + # get Allowable values from ActionInfo + if '@Redfish.ActionInfo' in action: + action_info_uri = action.get('@Redfish.ActionInfo') + response = self.get_request(self.root_uri + action_info_uri) + if response['ret'] is True: + data = response['data'] + if 'Parameters' in data: + params = data['Parameters'] + for param in params: + if param.get('Name') == name: + allowable_values = param.get('AllowableValues') + break + # fallback to @Redfish.AllowableValues annotation + if allowable_values is None: + prop = '%s@Redfish.AllowableValues' % name + if prop in action: + allowable_values = action[prop] + # fallback to default values + if allowable_values is None: + allowable_values = default_values + return allowable_values + + def simple_update(self, update_opts): + image_uri = update_opts.get('update_image_uri') + protocol = update_opts.get('update_protocol') + targets = update_opts.get('update_targets') + creds = update_opts.get('update_creds') + + if not image_uri: + return {'ret': False, 'msg': + 'Must specify update_image_uri for the SimpleUpdate command'} + + response = self.get_request(self.root_uri + self.update_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'Actions' not in data: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + if '#UpdateService.SimpleUpdate' not in data['Actions']: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + action = data['Actions']['#UpdateService.SimpleUpdate'] + if 'target' not in action: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + update_uri = action['target'] + if protocol: + default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF', + 'SCP', 'TFTP', 'OEM', 'NFS'] + allowable_values = self._get_allowable_values(action, + 'TransferProtocol', + default_values) + if protocol not in allowable_values: + return {'ret': False, + 'msg': 'Specified update_protocol (%s) not supported ' + 'by service. Supported protocols: %s' % + (protocol, allowable_values)} + if targets: + allowable_values = self._get_allowable_values(action, 'Targets') + if allowable_values: + for target in targets: + if target not in allowable_values: + return {'ret': False, + 'msg': 'Specified target (%s) not supported ' + 'by service. Supported targets: %s' % + (target, allowable_values)} + + payload = { + 'ImageURI': image_uri + } + if protocol: + payload["TransferProtocol"] = protocol + if targets: + payload["Targets"] = targets + if creds: + if creds.get('username'): + payload["Username"] = creds.get('username') + if creds.get('password'): + payload["Password"] = creds.get('password') + response = self.post_request(self.root_uri + update_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "SimpleUpdate requested"} + + def get_bios_attributes(self, systems_uri): + result = {} + bios_attributes = {} + key = "Bios" + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + bios_uri = data[key]["@odata.id"] + + response = self.get_request(self.root_uri + bios_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + for attribute in data[u'Attributes'].items(): + bios_attributes[attribute[0]] = attribute[1] + result["entries"] = bios_attributes + return result + + def get_multi_bios_attributes(self): + return self.aggregate_systems(self.get_bios_attributes) + + def _get_boot_options_dict(self, boot): + # Get these entries from BootOption, if present + properties = ['DisplayName', 'BootOptionReference'] + + # Retrieve BootOptions if present + if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']: + boot_options_uri = boot['BootOptions']["@odata.id"] + # Get BootOptions resource + response = self.get_request(self.root_uri + boot_options_uri) + if response['ret'] is False: + return {} + data = response['data'] + + # Retrieve Members array + if 'Members' not in data: + return {} + members = data['Members'] + else: + members = [] + + # Build dict of BootOptions keyed by BootOptionReference + boot_options_dict = {} + for member in members: + if '@odata.id' not in member: + return {} + boot_option_uri = member['@odata.id'] + response = self.get_request(self.root_uri + boot_option_uri) + if response['ret'] is False: + return {} + data = response['data'] + if 'BootOptionReference' not in data: + return {} + boot_option_ref = data['BootOptionReference'] + + # fetch the props to display for this boot device + boot_props = {} + for prop in properties: + if prop in data: + boot_props[prop] = data[prop] + + boot_options_dict[boot_option_ref] = boot_props + + return boot_options_dict + + def get_boot_order(self, systems_uri): + result = {} + + # Retrieve System resource + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + # Confirm needed Boot properties are present + if 'Boot' not in data or 'BootOrder' not in data['Boot']: + return {'ret': False, 'msg': "Key BootOrder not found"} + + boot = data['Boot'] + boot_order = boot['BootOrder'] + boot_options_dict = self._get_boot_options_dict(boot) + + # Build boot device list + boot_device_list = [] + for ref in boot_order: + boot_device_list.append( + boot_options_dict.get(ref, {'BootOptionReference': ref})) + + result["entries"] = boot_device_list + return result + + def get_multi_boot_order(self): + return self.aggregate_systems(self.get_boot_order) + + def get_boot_override(self, systems_uri): + result = {} + + properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget", + "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"] + + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if 'Boot' not in data: + return {'ret': False, 'msg': "Key Boot not found"} + + boot = data['Boot'] + + boot_overrides = {} + if "BootSourceOverrideEnabled" in boot: + if boot["BootSourceOverrideEnabled"] is not False: + for property in properties: + if property in boot: + if boot[property] is not None: + boot_overrides[property] = boot[property] + else: + return {'ret': False, 'msg': "No boot override is enabled."} + + result['entries'] = boot_overrides + return result + + def get_multi_boot_override(self): + return self.aggregate_systems(self.get_boot_override) + + def set_bios_default_settings(self): + result = {} + key = "Bios" + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + bios_uri = data[key]["@odata.id"] + + # Extract proper URI + response = self.get_request(self.root_uri + bios_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"] + + response = self.post_request(self.root_uri + reset_bios_settings_uri, {}) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"} + + def set_one_time_boot_device(self, bootdevice, uefi_target, boot_next): + result = {} + key = "Boot" + + if not bootdevice: + return {'ret': False, + 'msg': "bootdevice option required for SetOneTimeBoot"} + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + boot = data[key] + + annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' + if annotation in boot: + allowable_values = boot[annotation] + if isinstance(allowable_values, list) and bootdevice not in allowable_values: + return {'ret': False, + 'msg': "Boot device %s not in list of allowable values (%s)" % + (bootdevice, allowable_values)} + + # read existing values + enabled = boot.get('BootSourceOverrideEnabled') + target = boot.get('BootSourceOverrideTarget') + cur_uefi_target = boot.get('UefiTargetBootSourceOverride') + cur_boot_next = boot.get('BootNext') + + if bootdevice == 'UefiTarget': + if not uefi_target: + return {'ret': False, + 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"} + if enabled == 'Once' and target == bootdevice and uefi_target == cur_uefi_target: + # If properties are already set, no changes needed + return {'ret': True, 'changed': False} + payload = { + 'Boot': { + 'BootSourceOverrideEnabled': 'Once', + 'BootSourceOverrideTarget': bootdevice, + 'UefiTargetBootSourceOverride': uefi_target + } + } + elif bootdevice == 'UefiBootNext': + if not boot_next: + return {'ret': False, + 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"} + if enabled == 'Once' and target == bootdevice and boot_next == cur_boot_next: + # If properties are already set, no changes needed + return {'ret': True, 'changed': False} + payload = { + 'Boot': { + 'BootSourceOverrideEnabled': 'Once', + 'BootSourceOverrideTarget': bootdevice, + 'BootNext': boot_next + } + } + else: + if enabled == 'Once' and target == bootdevice: + # If properties are already set, no changes needed + return {'ret': True, 'changed': False} + payload = { + 'Boot': { + 'BootSourceOverrideEnabled': 'Once', + 'BootSourceOverrideTarget': bootdevice + } + } + + response = self.patch_request(self.root_uri + self.systems_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + + def set_bios_attributes(self, attributes): + result = {} + key = "Bios" + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + bios_uri = data[key]["@odata.id"] + + # Extract proper URI + response = self.get_request(self.root_uri + bios_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + # Make a copy of the attributes dict + attrs_to_patch = dict(attributes) + + # Check the attributes + for attr in attributes: + if attr not in data[u'Attributes']: + return {'ret': False, 'msg': "BIOS attribute %s not found" % attr} + # If already set to requested value, remove it from PATCH payload + if data[u'Attributes'][attr] == attributes[attr]: + del attrs_to_patch[attr] + + # Return success w/ changed=False if no attrs need to be changed + if not attrs_to_patch: + return {'ret': True, 'changed': False, + 'msg': "BIOS attributes already set"} + + # Get the SettingsObject URI + set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] + + # Construct payload and issue PATCH command + payload = {"Attributes": attrs_to_patch} + response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"} + + def set_boot_order(self, boot_list): + if not boot_list: + return {'ret': False, + 'msg': "boot_order list required for SetBootOrder command"} + + systems_uri = self.systems_uri + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Confirm needed Boot properties are present + if 'Boot' not in data or 'BootOrder' not in data['Boot']: + return {'ret': False, 'msg': "Key BootOrder not found"} + + boot = data['Boot'] + boot_order = boot['BootOrder'] + boot_options_dict = self._get_boot_options_dict(boot) + + # validate boot_list against BootOptionReferences if available + if boot_options_dict: + boot_option_references = boot_options_dict.keys() + for ref in boot_list: + if ref not in boot_option_references: + return {'ret': False, + 'msg': "BootOptionReference %s not found in BootOptions" % ref} + + # If requested BootOrder is already set, nothing to do + if boot_order == boot_list: + return {'ret': True, 'changed': False, + 'msg': "BootOrder already set to %s" % boot_list} + + payload = { + 'Boot': { + 'BootOrder': boot_list + } + } + response = self.patch_request(self.root_uri + systems_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, 'msg': "BootOrder set"} + + def set_default_boot_order(self): + systems_uri = self.systems_uri + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + data = response['data'] + + # get the #ComputerSystem.SetDefaultBootOrder Action and target URI + action = '#ComputerSystem.SetDefaultBootOrder' + if 'Actions' not in data or action not in data['Actions']: + return {'ret': False, 'msg': 'Action %s not found' % action} + if 'target' not in data['Actions'][action]: + return {'ret': False, + 'msg': 'target URI missing from Action %s' % action} + action_uri = data['Actions'][action]['target'] + + # POST to Action URI + payload = {} + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "BootOrder set to default"} + + def get_chassis_inventory(self): + result = {} + chassis_results = [] + + # Get these entries, but does not fail if not found + properties = ['ChassisType', 'PartNumber', 'AssetTag', + 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model'] + + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + chassis_result = {} + for property in properties: + if property in data: + chassis_result[property] = data[property] + chassis_results.append(chassis_result) + + result["entries"] = chassis_results + return result + + def get_fan_inventory(self): + result = {} + fan_results = [] + key = "Thermal" + # Get these entries, but does not fail if not found + properties = ['FanName', 'Reading', 'ReadingUnits', 'Status'] + + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + if key in data: + # match: found an entry for "Thermal" information = fans + thermal_uri = data[key]["@odata.id"] + response = self.get_request(self.root_uri + thermal_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for device in data[u'Fans']: + fan = {} + for property in properties: + if property in device: + fan[property] = device[property] + fan_results.append(fan) + result["entries"] = fan_results + return result + + def get_chassis_power(self): + result = {} + key = "Power" + + # Get these entries, but does not fail if not found + properties = ['Name', 'PowerAllocatedWatts', + 'PowerAvailableWatts', 'PowerCapacityWatts', + 'PowerConsumedWatts', 'PowerMetrics', + 'PowerRequestedWatts', 'RelatedItem', 'Status'] + + chassis_power_results = [] + # Go through list + for chassis_uri in self.chassis_uris: + chassis_power_result = {} + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + if key in data: + response = self.get_request(self.root_uri + data[key]['@odata.id']) + data = response['data'] + if 'PowerControl' in data: + if len(data['PowerControl']) > 0: + data = data['PowerControl'][0] + for property in properties: + if property in data: + chassis_power_result[property] = data[property] + else: + return {'ret': False, 'msg': 'Key PowerControl not found.'} + chassis_power_results.append(chassis_power_result) + else: + return {'ret': False, 'msg': 'Key Power not found.'} + + result['entries'] = chassis_power_results + return result + + def get_chassis_thermals(self): + result = {} + sensors = [] + key = "Thermal" + + # Get these entries, but does not fail if not found + properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical', + 'UpperThresholdFatal', 'UpperThresholdNonCritical', + 'LowerThresholdCritical', 'LowerThresholdFatal', + 'LowerThresholdNonCritical', 'MaxReadingRangeTemp', + 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem', + 'SensorNumber'] + + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + if key in data: + thermal_uri = data[key]["@odata.id"] + response = self.get_request(self.root_uri + thermal_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + if "Temperatures" in data: + for sensor in data[u'Temperatures']: + sensor_result = {} + for property in properties: + if property in sensor: + if sensor[property] is not None: + sensor_result[property] = sensor[property] + sensors.append(sensor_result) + + if sensors is None: + return {'ret': False, 'msg': 'Key Temperatures was not found.'} + + result['entries'] = sensors + return result + + def get_cpu_inventory(self, systems_uri): + result = {} + cpu_list = [] + cpu_results = [] + key = "Processors" + # Get these entries, but does not fail if not found + properties = ['Id', 'Manufacturer', 'Model', 'MaxSpeedMHz', 'TotalCores', + 'TotalThreads', 'Status'] + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + processors_uri = data[key]["@odata.id"] + + # Get a list of all CPUs and build respective URIs + response = self.get_request(self.root_uri + processors_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for cpu in data[u'Members']: + cpu_list.append(cpu[u'@odata.id']) + + for c in cpu_list: + cpu = {} + uri = self.root_uri + c + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + + for property in properties: + if property in data: + cpu[property] = data[property] + + cpu_results.append(cpu) + result["entries"] = cpu_results + return result + + def get_multi_cpu_inventory(self): + return self.aggregate_systems(self.get_cpu_inventory) + + def get_memory_inventory(self, systems_uri): + result = {} + memory_list = [] + memory_results = [] + key = "Memory" + # Get these entries, but does not fail if not found + properties = ['SerialNumber', 'MemoryDeviceType', 'PartNuber', + 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name'] + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + memory_uri = data[key]["@odata.id"] + + # Get a list of all DIMMs and build respective URIs + response = self.get_request(self.root_uri + memory_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for dimm in data[u'Members']: + memory_list.append(dimm[u'@odata.id']) + + for m in memory_list: + dimm = {} + uri = self.root_uri + m + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + + if "Status" in data: + if "State" in data["Status"]: + if data["Status"]["State"] == "Absent": + continue + else: + continue + + for property in properties: + if property in data: + dimm[property] = data[property] + + memory_results.append(dimm) + result["entries"] = memory_results + return result + + def get_multi_memory_inventory(self): + return self.aggregate_systems(self.get_memory_inventory) + + def get_nic_inventory(self, resource_uri): + result = {} + nic_list = [] + nic_results = [] + key = "EthernetInterfaces" + # Get these entries, but does not fail if not found + properties = ['Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', + 'NameServers', 'MACAddress', 'PermanentMACAddress', + 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] + + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + ethernetinterfaces_uri = data[key]["@odata.id"] + + # Get a list of all network controllers and build respective URIs + response = self.get_request(self.root_uri + ethernetinterfaces_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for nic in data[u'Members']: + nic_list.append(nic[u'@odata.id']) + + for n in nic_list: + nic = {} + uri = self.root_uri + n + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + + for property in properties: + if property in data: + nic[property] = data[property] + + nic_results.append(nic) + result["entries"] = nic_results + return result + + def get_multi_nic_inventory(self, resource_type): + ret = True + entries = [] + + # Given resource_type, use the proper URI + if resource_type == 'Systems': + resource_uris = self.systems_uris + elif resource_type == 'Manager': + resource_uris = self.manager_uris + + for resource_uri in resource_uris: + inventory = self.get_nic_inventory(resource_uri) + ret = inventory.pop('ret') and ret + if 'entries' in inventory: + entries.append(({'resource_uri': resource_uri}, + inventory['entries'])) + return dict(ret=ret, entries=entries) + + def get_virtualmedia(self, resource_uri): + result = {} + virtualmedia_list = [] + virtualmedia_results = [] + key = "VirtualMedia" + # Get these entries, but does not fail if not found + properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes', + 'Image', 'ImageName', 'Name', 'WriteProtected', + 'TransferMethod', 'TransferProtocolType'] + + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + virtualmedia_uri = data[key]["@odata.id"] + + # Get a list of all virtual media and build respective URIs + response = self.get_request(self.root_uri + virtualmedia_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for virtualmedia in data[u'Members']: + virtualmedia_list.append(virtualmedia[u'@odata.id']) + + for n in virtualmedia_list: + virtualmedia = {} + uri = self.root_uri + n + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + + for property in properties: + if property in data: + virtualmedia[property] = data[property] + + virtualmedia_results.append(virtualmedia) + result["entries"] = virtualmedia_results + return result + + def get_multi_virtualmedia(self): + ret = True + entries = [] + + resource_uris = self.manager_uris + + for resource_uri in resource_uris: + virtualmedia = self.get_virtualmedia(resource_uri) + ret = virtualmedia.pop('ret') and ret + if 'entries' in virtualmedia: + entries.append(({'resource_uri': resource_uri}, + virtualmedia['entries'])) + return dict(ret=ret, entries=entries) + + def get_psu_inventory(self): + result = {} + psu_list = [] + psu_results = [] + key = "PowerSupplies" + # Get these entries, but does not fail if not found + properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer', + 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType', + 'Status'] + + # Get a list of all Chassis and build URIs, then get all PowerSupplies + # from each Power entry in the Chassis + chassis_uri_list = self.chassis_uris + for chassis_uri in chassis_uri_list: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + + result['ret'] = True + data = response['data'] + + if 'Power' in data: + power_uri = data[u'Power'][u'@odata.id'] + else: + continue + + response = self.get_request(self.root_uri + power_uri) + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + psu_list = data[key] + for psu in psu_list: + psu_not_present = False + psu_data = {} + for property in properties: + if property in psu: + if psu[property] is not None: + if property == 'Status': + if 'State' in psu[property]: + if psu[property]['State'] == 'Absent': + psu_not_present = True + psu_data[property] = psu[property] + if psu_not_present: + continue + psu_results.append(psu_data) + + result["entries"] = psu_results + if not result["entries"]: + return {'ret': False, 'msg': "No PowerSupply objects found"} + return result + + def get_multi_psu_inventory(self): + return self.aggregate_systems(self.get_psu_inventory) + + def get_system_inventory(self, systems_uri): + result = {} + inventory = {} + # Get these entries, but does not fail if not found + properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', + 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', + 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', + 'ProcessorSummary', 'TrustedModules'] + + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for property in properties: + if property in data: + inventory[property] = data[property] + + result["entries"] = inventory + return result + + def get_multi_system_inventory(self): + return self.aggregate_systems(self.get_system_inventory) + + def get_network_protocols(self): + result = {} + service_result = {} + # Find NetworkProtocol + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'NetworkProtocol' not in data: + return {'ret': False, 'msg': "NetworkProtocol resource not found"} + networkprotocol_uri = data["NetworkProtocol"]["@odata.id"] + + response = self.get_request(self.root_uri + networkprotocol_uri) + if response['ret'] is False: + return response + data = response['data'] + protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH', + 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP', + 'RFB'] + for protocol_service in protocol_services: + if protocol_service in data.keys(): + service_result[protocol_service] = data[protocol_service] + + result['ret'] = True + result["entries"] = service_result + return result + + def set_network_protocols(self, manager_services): + # Check input data validity + protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH', + 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP', + 'RFB'] + protocol_state_onlist = ['true', 'True', True, 'on', 1] + protocol_state_offlist = ['false', 'False', False, 'off', 0] + payload = {} + for service_name in manager_services.keys(): + if service_name not in protocol_services: + return {'ret': False, 'msg': "Service name %s is invalid" % service_name} + payload[service_name] = {} + for service_property in manager_services[service_name].keys(): + value = manager_services[service_name][service_property] + if service_property in ['ProtocolEnabled', 'protocolenabled']: + if value in protocol_state_onlist: + payload[service_name]['ProtocolEnabled'] = True + elif value in protocol_state_offlist: + payload[service_name]['ProtocolEnabled'] = False + else: + return {'ret': False, 'msg': "Value of property %s is invalid" % service_property} + elif service_property in ['port', 'Port']: + if isinstance(value, int): + payload[service_name]['Port'] = value + elif isinstance(value, str) and value.isdigit(): + payload[service_name]['Port'] = int(value) + else: + return {'ret': False, 'msg': "Value of property %s is invalid" % service_property} + else: + payload[service_name][service_property] = value + + # Find NetworkProtocol + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'NetworkProtocol' not in data: + return {'ret': False, 'msg': "NetworkProtocol resource not found"} + networkprotocol_uri = data["NetworkProtocol"]["@odata.id"] + + # Check service property support or not + response = self.get_request(self.root_uri + networkprotocol_uri) + if response['ret'] is False: + return response + data = response['data'] + for service_name in payload.keys(): + if service_name not in data: + return {'ret': False, 'msg': "%s service not supported" % service_name} + for service_property in payload[service_name].keys(): + if service_property not in data[service_name]: + return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)} + + # if the protocol is already set, nothing to do + need_change = False + for service_name in payload.keys(): + for service_property in payload[service_name].keys(): + value = payload[service_name][service_property] + if value != data[service_name][service_property]: + need_change = True + break + + if not need_change: + return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"} + + response = self.patch_request(self.root_uri + networkprotocol_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"} + + @staticmethod + def to_singular(resource_name): + if resource_name.endswith('ies'): + resource_name = resource_name[:-3] + 'y' + elif resource_name.endswith('s'): + resource_name = resource_name[:-1] + return resource_name + + def get_health_resource(self, subsystem, uri, health, expanded): + status = 'Status' + + if expanded: + d = expanded + else: + r = self.get_request(self.root_uri + uri) + if r.get('ret'): + d = r.get('data') + else: + return + + if 'Members' in d: # collections case + for m in d.get('Members'): + u = m.get('@odata.id') + r = self.get_request(self.root_uri + u) + if r.get('ret'): + p = r.get('data') + if p: + e = {self.to_singular(subsystem.lower()) + '_uri': u, + status: p.get(status, + "Status not available")} + health[subsystem].append(e) + else: # non-collections case + e = {self.to_singular(subsystem.lower()) + '_uri': uri, + status: d.get(status, + "Status not available")} + health[subsystem].append(e) + + def get_health_subsystem(self, subsystem, data, health): + if subsystem in data: + sub = data.get(subsystem) + if isinstance(sub, list): + for r in sub: + if '@odata.id' in r: + uri = r.get('@odata.id') + expanded = None + if '#' in uri and len(r) > 1: + expanded = r + self.get_health_resource(subsystem, uri, health, expanded) + elif isinstance(sub, dict): + if '@odata.id' in sub: + uri = sub.get('@odata.id') + self.get_health_resource(subsystem, uri, health, None) + elif 'Members' in data: + for m in data.get('Members'): + u = m.get('@odata.id') + r = self.get_request(self.root_uri + u) + if r.get('ret'): + d = r.get('data') + self.get_health_subsystem(subsystem, d, health) + + def get_health_report(self, category, uri, subsystems): + result = {} + health = {} + status = 'Status' + + # Get health status of top level resource + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + health[category] = {status: data.get(status, "Status not available")} + + # Get health status of subsystems + for sub in subsystems: + d = None + if sub.startswith('Links.'): # ex: Links.PCIeDevices + sub = sub[len('Links.'):] + d = data.get('Links', {}) + elif '.' in sub: # ex: Thermal.Fans + p, sub = sub.split('.') + u = data.get(p, {}).get('@odata.id') + if u: + r = self.get_request(self.root_uri + u) + if r['ret']: + d = r['data'] + if not d: + continue + else: # ex: Memory + d = data + health[sub] = [] + self.get_health_subsystem(sub, d, health) + if not health[sub]: + del health[sub] + + result["entries"] = health + return result + + def get_system_health_report(self, systems_uri): + subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage', + 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts', + 'NetworkInterfaces.NetworkDeviceFunctions'] + return self.get_health_report('System', systems_uri, subsystems) + + def get_multi_system_health_report(self): + return self.aggregate_systems(self.get_system_health_report) + + def get_chassis_health_report(self, chassis_uri): + subsystems = ['Power.PowerSupplies', 'Thermal.Fans', + 'Links.PCIeDevices'] + return self.get_health_report('Chassis', chassis_uri, subsystems) + + def get_multi_chassis_health_report(self): + return self.aggregate_chassis(self.get_chassis_health_report) + + def get_manager_health_report(self, manager_uri): + subsystems = [] + return self.get_health_report('Manager', manager_uri, subsystems) + + def get_multi_manager_health_report(self): + return self.aggregate_managers(self.get_manager_health_report) + + def set_manager_nic(self, nic_addr, nic_config): + # Get EthernetInterface collection + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'EthernetInterfaces' not in data: + return {'ret': False, 'msg': "EthernetInterfaces resource not found"} + ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"] + response = self.get_request(self.root_uri + ethernetinterfaces_uri) + if response['ret'] is False: + return response + data = response['data'] + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + + # Find target EthernetInterface + target_ethernet_uri = None + target_ethernet_current_setting = None + if nic_addr == 'null': + # Find root_uri matched EthernetInterface when nic_addr is not specified + nic_addr = (self.root_uri).split('/')[-1] + nic_addr = nic_addr.split(':')[0] # split port if existing + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + return response + data = response['data'] + if '"' + nic_addr + '"' in str(data) or "'" + nic_addr + "'" in str(data): + target_ethernet_uri = uri + target_ethernet_current_setting = data + break + if target_ethernet_uri is None: + return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"} + + # Convert input to payload and check validity + payload = {} + for property in nic_config.keys(): + value = nic_config[property] + if property not in target_ethernet_current_setting: + return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property} + if isinstance(value, dict): + if isinstance(target_ethernet_current_setting[property], dict): + payload[property] = value + elif isinstance(target_ethernet_current_setting[property], list): + payload[property] = list() + payload[property].append(value) + else: + return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property} + else: + payload[property] = value + + # If no need change, nothing to do. If error detected, report it + need_change = False + for property in payload.keys(): + set_value = payload[property] + cur_value = target_ethernet_current_setting[property] + # type is simple(not dict/list) + if not isinstance(set_value, dict) and not isinstance(set_value, list): + if set_value != cur_value: + need_change = True + # type is dict + if isinstance(set_value, dict): + for subprop in payload[property].keys(): + if subprop not in target_ethernet_current_setting[property]: + return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop} + sub_set_value = payload[property][subprop] + sub_cur_value = target_ethernet_current_setting[property][subprop] + if sub_set_value != sub_cur_value: + need_change = True + # type is list + if isinstance(set_value, list): + for i in range(len(set_value)): + for subprop in payload[property][i].keys(): + if subprop not in target_ethernet_current_setting[property][i]: + return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop} + sub_set_value = payload[property][i][subprop] + sub_cur_value = target_ethernet_current_setting[property][i][subprop] + if sub_set_value != sub_cur_value: + need_change = True + + if not need_change: + return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"} + + response = self.patch_request(self.root_uri + target_ethernet_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"} diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py new file mode 100644 index 0000000000..358a2bd7b1 --- /dev/null +++ b/plugins/module_utils/redhat.py @@ -0,0 +1,284 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), James Laska +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re +import shutil +import tempfile +import types + +from ansible.module_utils.six.moves import configparser + + +class RegistrationBase(object): + def __init__(self, module, username=None, password=None): + self.module = module + self.username = username + self.password = password + + def configure(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def enable(self): + # Remove any existing redhat.repo + redhat_repo = '/etc/yum.repos.d/redhat.repo' + if os.path.isfile(redhat_repo): + os.unlink(redhat_repo) + + def register(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unregister(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unsubscribe(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def update_plugin_conf(self, plugin, enabled=True): + plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin + + if os.path.isfile(plugin_conf): + tmpfd, tmpfile = tempfile.mkstemp() + shutil.copy2(plugin_conf, tmpfile) + cfg = configparser.ConfigParser() + cfg.read([tmpfile]) + + if enabled: + cfg.set('main', 'enabled', 1) + else: + cfg.set('main', 'enabled', 0) + + fd = open(tmpfile, 'w+') + cfg.write(fd) + fd.close() + self.module.atomic_move(tmpfile, plugin_conf) + + def subscribe(self, **kwargs): + raise NotImplementedError("Must be implemented by a sub-class") + + +class Rhsm(RegistrationBase): + def __init__(self, module, username=None, password=None): + RegistrationBase.__init__(self, module, username, password) + self.config = self._read_config() + self.module = module + + def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): + ''' + Load RHSM configuration from /etc/rhsm/rhsm.conf. + Returns: + * ConfigParser object + ''' + + # Read RHSM defaults ... + cp = configparser.ConfigParser() + cp.read(rhsm_conf) + + # Add support for specifying a default value w/o having to standup some configuration + # Yeah, I know this should be subclassed ... but, oh well + def get_option_default(self, key, default=''): + sect, opt = key.split('.', 1) + if self.has_section(sect) and self.has_option(sect, opt): + return self.get(sect, opt) + else: + return default + + cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser) + + return cp + + def enable(self): + ''' + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + ''' + RegistrationBase.enable(self) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', True) + + def configure(self, **kwargs): + ''' + Configure the system as directed for registration with RHN + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'config'] + + # Pass supplied **kwargs as parameters to subscription-manager. Ignore + # non-configuration parameters and replace '_' with '.'. For example, + # 'server_hostname' becomes '--system.hostname'. + for k, v in kwargs.items(): + if re.search(r'^(system|rhsm)_', k): + args.append('--%s=%s' % (k.replace('_', '.'), v)) + + self.module.run_command(args, check_rc=True) + + @property + def is_registered(self): + ''' + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHN. + ''' + args = ['subscription-manager', 'identity'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: + return True + else: + return False + + def register(self, username, password, autosubscribe, activationkey): + ''' + Register the current system to the provided RHN server + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'register'] + + # Generate command arguments + if activationkey: + args.append('--activationkey "%s"' % activationkey) + else: + if autosubscribe: + args.append('--autosubscribe') + if username: + args.extend(['--username', username]) + if password: + args.extend(['--password', password]) + + # Do the needful... + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def unsubscribe(self): + ''' + Unsubscribe a system from all subscribed channels + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'unsubscribe', '--all'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def unregister(self): + ''' + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'unregister'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', False) + + def subscribe(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression + Raises: + * Exception - if error occurs while running command + ''' + + # Available pools ready for subscription + available_pools = RhsmPools(self.module) + + for pool in available_pools.filter(regexp): + pool.subscribe() + + +class RhsmPool(object): + ''' + Convenience class for housing subscription information + ''' + + def __init__(self, module, **kwargs): + self.module = module + for k, v in kwargs.items(): + setattr(self, k, v) + + def __str__(self): + return str(self.__getattribute__('_name')) + + def subscribe(self): + args = "subscription-manager subscribe --pool %s" % self.PoolId + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False + + +class RhsmPools(object): + """ + This class is used for manipulating pools subscriptions with RHSM + """ + def __init__(self, module): + self.module = module + self.products = self._load_product_list() + + def __iter__(self): + return self.products.__iter__() + + def _load_product_list(self): + """ + Loads list of all available pools for system in data structure + """ + args = "subscription-manager list --available" + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + + products = [] + for line in stdout.split('\n'): + # Remove leading+trailing whitespace + line = line.strip() + # An empty line implies the end of an output group + if len(line) == 0: + continue + # If a colon ':' is found, parse + elif ':' in line: + (key, value) = line.split(':', 1) + key = key.strip().replace(" ", "") # To unify + value = value.strip() + if key in ['ProductName', 'SubscriptionName']: + # Remember the name for later processing + products.append(RhsmPool(self.module, _name=value, key=value)) + elif products: + # Associate value with most recently recorded product + products[-1].__setattr__(key, value) + # FIXME - log some warning? + # else: + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + return products + + def filter(self, regexp='^$'): + ''' + Return a list of RhsmPools whose name matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product._name): + yield product diff --git a/plugins/module_utils/remote_management/__init__.py b/plugins/module_utils/remote_management/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/remote_management/dellemc/__init__.py b/plugins/module_utils/remote_management/dellemc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py b/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py new file mode 100644 index 0000000000..b633b93ebf --- /dev/null +++ b/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 1.0 +# Copyright (C) 2018 Dell Inc. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. +# Other trademarks may be trademarks of their respective owners. +# + +from __future__ import (absolute_import, division, + print_function) +__metaclass__ = type + +try: + from omsdk.sdkinfra import sdkinfra + from omsdk.sdkcreds import UserCredentials + from omsdk.sdkfile import FileOnShare, file_share_manager + from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum + from omsdk.http.sdkwsmanbase import WsManOptions + HAS_OMSDK = True +except ImportError: + HAS_OMSDK = False + + +class iDRACConnection: + + def __init__(self, module_params): + if not HAS_OMSDK: + raise ImportError("Dell EMC OMSDK library is required for this module") + self.idrac_ip = module_params['idrac_ip'] + self.idrac_user = module_params['idrac_user'] + self.idrac_pwd = module_params['idrac_password'] + self.idrac_port = module_params['idrac_port'] + if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)): + raise ValueError("hostname, username and password required") + self.handle = None + self.creds = UserCredentials(self.idrac_user, self.idrac_pwd) + self.pOp = WsManOptions(port=self.idrac_port) + self.sdk = sdkinfra() + if self.sdk is None: + msg = "Could not initialize iDRAC drivers." + raise RuntimeError(msg) + + def __enter__(self): + self.sdk.importPath() + self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, pOptions=self.pOp) + if self.handle is None: + msg = "Could not find device driver for iDRAC with IP Address: {0}".format(self.idrac_ip) + raise RuntimeError(msg) + return self.handle + + def __exit__(self, exc_type, exc_val, exc_tb): + self.handle.disconnect() + return False diff --git a/plugins/module_utils/remote_management/dellemc/ome.py b/plugins/module_utils/remote_management/dellemc/ome.py new file mode 100644 index 0000000000..c387e2a164 --- /dev/null +++ b/plugins/module_utils/remote_management/dellemc/ome.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +# Dell EMC OpenManage Ansible Modules +# Version 1.3 +# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved. + +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.six.moves.urllib.parse import urlencode + +SESSION_RESOURCE_COLLECTION = { + "SESSION": "SessionService/Sessions", + "SESSION_ID": "SessionService/Sessions('{Id}')", +} + + +class OpenURLResponse(object): + """Handles HTTPResponse""" + + def __init__(self, resp): + self.body = None + self.resp = resp + if self.resp: + self.body = self.resp.read() + + @property + def json_data(self): + try: + return json.loads(self.body) + except ValueError: + raise ValueError("Unable to parse json") + + @property + def status_code(self): + return self.resp.getcode() + + @property + def success(self): + return self.status_code in (200, 201, 202, 204) + + @property + def token_header(self): + return self.resp.headers.get('X-Auth-Token') + + +class RestOME(object): + """Handles OME API requests""" + + def __init__(self, module_params=None, req_session=False): + self.module_params = module_params + self.hostname = self.module_params["hostname"] + self.username = self.module_params["username"] + self.password = self.module_params["password"] + self.port = self.module_params["port"] + self.req_session = req_session + self.session_id = None + self.protocol = 'https' + self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} + + def _get_base_url(self): + """builds base url""" + return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port) + + def _build_url(self, path, query_param=None): + """builds complete url""" + url = path + base_uri = self._get_base_url() + if path: + url = '{0}/{1}'.format(base_uri, path) + if query_param: + url += "?{0}".format(urlencode(query_param)) + return url + + def _url_common_args_spec(self, method, api_timeout, headers=None): + """Creates an argument common spec""" + req_header = self._headers + if headers: + req_header.update(headers) + url_kwargs = { + "method": method, + "validate_certs": False, + "use_proxy": True, + "headers": req_header, + "timeout": api_timeout, + "follow_redirects": 'all', + } + return url_kwargs + + def _args_without_session(self, method, api_timeout=30, headers=None): + """Creates an argument spec in case of basic authentication""" + req_header = self._headers + if headers: + req_header.update(headers) + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + url_kwargs["url_username"] = self.username + url_kwargs["url_password"] = self.password + url_kwargs["force_basic_auth"] = True + return url_kwargs + + def _args_with_session(self, method, api_timeout=30, headers=None): + """Creates an argument spec, in case of authentication with session""" + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + url_kwargs["force_basic_auth"] = False + return url_kwargs + + def invoke_request(self, method, path, data=None, query_param=None, headers=None, + api_timeout=30, dump=True): + """ + Sends a request via open_url + Returns :class:`OpenURLResponse` object. + :arg method: HTTP verb to use for the request + :arg path: path to request without query parameter + :arg data: (optional) Payload to send with the request + :arg query_param: (optional) Dictionary of query parameter to send with request + :arg headers: (optional) Dictionary of HTTP Headers to send with the + request + :arg api_timeout: (optional) How long to wait for the server to send + data before giving up + :arg dump: (Optional) boolean value for dumping payload data. + :returns: OpenURLResponse + """ + try: + if 'X-Auth-Token' in self._headers: + url_kwargs = self._args_with_session(method, api_timeout, headers=headers) + else: + url_kwargs = self._args_without_session(method, api_timeout, headers=headers) + if data and dump: + data = json.dumps(data) + url = self._build_url(path, query_param=query_param) + resp = open_url(url, data=data, **url_kwargs) + resp_data = OpenURLResponse(resp) + except (HTTPError, URLError, SSLValidationError, ConnectionError) as err: + raise err + return resp_data + + def __enter__(self): + """Creates sessions by passing it to header""" + if self.req_session: + payload = {'UserName': self.username, + 'Password': self.password, + 'SessionType': 'API', } + path = SESSION_RESOURCE_COLLECTION["SESSION"] + resp = self.invoke_request('POST', path, data=payload) + if resp and resp.success: + self.session_id = resp.json_data.get("Id") + self._headers["X-Auth-Token"] = resp.token_header + else: + msg = "Could not create the session" + raise ConnectionError(msg) + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Deletes a session id, which is in use for request""" + if self.session_id: + path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id) + self.invoke_request('DELETE', path) + return False diff --git a/plugins/module_utils/remote_management/lxca/__init__.py b/plugins/module_utils/remote_management/lxca/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/remote_management/lxca/common.py b/plugins/module_utils/remote_management/lxca/common.py new file mode 100644 index 0000000000..50080ccb4b --- /dev/null +++ b/plugins/module_utils/remote_management/lxca/common.py @@ -0,0 +1,95 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by +# Ansible still belong to the author of the module, and may assign their +# own license to the complete work. +# +# Copyright (C) 2017 Lenovo, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Contains LXCA common class +# Lenovo xClarity Administrator (LXCA) + +import traceback +try: + from pylxca import connect, disconnect + HAS_PYLXCA = True +except ImportError: + HAS_PYLXCA = False + + +PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module." + + +def has_pylxca(module): + """ + Check pylxca is installed + :param module: + """ + if not HAS_PYLXCA: + module.fail_json(msg=PYLXCA_REQUIRED) + + +LXCA_COMMON_ARGS = dict( + login_user=dict(required=True), + login_password=dict(required=True, no_log=True), + auth_url=dict(required=True), +) + + +class connection_object: + def __init__(self, module): + self.module = module + + def __enter__(self): + return setup_conn(self.module) + + def __exit__(self, type, value, traceback): + close_conn() + + +def setup_conn(module): + """ + this function create connection to LXCA + :param module: + :return: lxca connection + """ + lxca_con = None + try: + lxca_con = connect(module.params['auth_url'], + module.params['login_user'], + module.params['login_password'], + "True") + except Exception as exception: + error_msg = '; '.join(exception.args) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + return lxca_con + + +def close_conn(): + """ + this function close connection to LXCA + :param module: + :return: None + """ + disconnect() diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py new file mode 100644 index 0000000000..50041eff52 --- /dev/null +++ b/plugins/module_utils/scaleway.py @@ -0,0 +1,183 @@ +import json +import re +import sys + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlencode + + +def scaleway_argument_spec(): + return dict( + api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']), + no_log=True, aliases=['oauth_token']), + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']), + api_timeout=dict(type='int', default=30, aliases=['timeout']), + query_parameters=dict(type='dict', default={}), + validate_certs=dict(default=True, type='bool'), + ) + + +def payload_from_object(scw_object): + return dict( + (k, v) + for k, v in scw_object.items() + if k != 'id' and v is not None + ) + + +class ScalewayException(Exception): + + def __init__(self, message): + self.message = message + + +# Specify a complete Link header, for validation purposes +R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)" + (,<[^>]+>;\srel="(first|previous|next|last)")*''' +# Specify a single relation, for iteration and string extraction purposes +R_RELATION = r'<(?P[^>]+)>; rel="(?Pfirst|previous|next|last)"' + + +def parse_pagination_link(header): + if not re.match(R_LINK_HEADER, header, re.VERBOSE): + raise ScalewayException('Scaleway API answered with an invalid Link pagination header') + else: + relations = header.split(',') + parsed_relations = {} + rc_relation = re.compile(R_RELATION) + for relation in relations: + match = rc_relation.match(relation) + if not match: + raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header') + data = match.groupdict() + parsed_relations[data['relation']] = data['target_IRI'] + return parsed_relations + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + @property + def ok(self): + return self.status_code in (200, 201, 202, 204) + + +class Scaleway(object): + + def __init__(self, module): + self.module = module + self.headers = { + 'X-Auth-Token': self.module.params.get('api_token'), + 'User-Agent': self.get_user_agent_string(module), + 'Content-type': 'application/json', + } + self.name = None + + def get_resources(self): + results = self.get('/%s' % self.name) + + if not results.ok: + raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format( + self.name, '%s/%s' % (self.module.params.get('api_url'), self.name), + results.status_code, results.json['message'] + )) + + return results.json.get(self.name) + + def _url_builder(self, path, params): + d = self.module.params.get('query_parameters') + if params is not None: + d.update(params) + query_string = urlencode(d, doseq=True) + + if path[0] == '/': + path = path[1:] + return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string) + + def send(self, method, path, data=None, headers=None, params=None): + url = self._url_builder(path=path, params=params) + self.warn(url) + + if headers is not None: + self.headers.update(headers) + + if self.headers['Content-Type'] == "application/json": + data = self.module.jsonify(data) + + resp, info = fetch_url( + self.module, url, data=data, headers=self.headers, method=method, + timeout=self.module.params.get('api_timeout') + ) + + # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases + if info['status'] == -1: + self.module.fail_json(msg=info['msg']) + + return Response(resp, info) + + @staticmethod + def get_user_agent_string(module): + return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0]) + + def get(self, path, data=None, headers=None, params=None): + return self.send(method='GET', path=path, data=data, headers=headers, params=params) + + def put(self, path, data=None, headers=None, params=None): + return self.send(method='PUT', path=path, data=data, headers=headers, params=params) + + def post(self, path, data=None, headers=None, params=None): + return self.send(method='POST', path=path, data=data, headers=headers, params=params) + + def delete(self, path, data=None, headers=None, params=None): + return self.send(method='DELETE', path=path, data=data, headers=headers, params=params) + + def patch(self, path, data=None, headers=None, params=None): + return self.send(method="PATCH", path=path, data=data, headers=headers, params=params) + + def update(self, path, data=None, headers=None, params=None): + return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params) + + def warn(self, x): + self.module.warn(str(x)) + + +SCALEWAY_LOCATION = { + 'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'}, + 'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'}, + + 'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'}, + 'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'} +} + +SCALEWAY_ENDPOINT = "https://api.scaleway.com" + +SCALEWAY_REGIONS = [ + "fr-par", + "nl-ams", +] + +SCALEWAY_ZONES = [ + "fr-par-1", + "nl-ams-1", +] diff --git a/plugins/module_utils/source_control/__init__.py b/plugins/module_utils/source_control/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/source_control/bitbucket.py b/plugins/module_utils/source_control/bitbucket.py new file mode 100644 index 0000000000..8359eec11f --- /dev/null +++ b/plugins/module_utils/source_control/bitbucket.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) + +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import fetch_url, basic_auth_header + +# Makes all classes defined in the file into new-style classes without explicitly inheriting from object +__metaclass__ = type + + +class BitbucketHelper: + BITBUCKET_API_URL = 'https://api.bitbucket.org' + + error_messages = { + 'required_client_id': '`client_id` must be specified as a parameter or ' + 'BITBUCKET_CLIENT_ID environment variable', + 'required_client_secret': '`client_secret` must be specified as a parameter or ' + 'BITBUCKET_CLIENT_SECRET environment variable', + } + + def __init__(self, module): + self.module = module + self.access_token = None + + @staticmethod + def bitbucket_argument_spec(): + return dict( + client_id=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])), + client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])), + ) + + def check_arguments(self): + if self.module.params['client_id'] is None: + self.module.fail_json(msg=self.error_messages['required_client_id']) + + if self.module.params['client_secret'] is None: + self.module.fail_json(msg=self.error_messages['required_client_secret']) + + def fetch_access_token(self): + self.check_arguments() + + headers = { + 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']) + } + + info, content = self.request( + api_url='https://bitbucket.org/site/oauth2/access_token', + method='POST', + data='grant_type=client_credentials', + headers=headers, + ) + + if info['status'] == 200: + self.access_token = content['access_token'] + else: + self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info)) + + def request(self, api_url, method, data=None, headers=None): + headers = headers or {} + + if self.access_token: + headers.update({ + 'Authorization': 'Bearer {0}'.format(self.access_token), + }) + + if isinstance(data, dict): + data = self.module.jsonify(data) + headers.update({ + 'Content-type': 'application/json', + }) + + response, info = fetch_url( + module=self.module, + url=api_url, + method=method, + headers=headers, + data=data, + force=True, + ) + + content = {} + + if response is not None: + body = to_text(response.read()) + if body: + content = json.loads(body) + + return info, content diff --git a/plugins/module_utils/storage/__init__.py b/plugins/module_utils/storage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/storage/emc/__init__.py b/plugins/module_utils/storage/emc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/storage/emc/emc_vnx.py b/plugins/module_utils/storage/emc/emc_vnx.py new file mode 100644 index 0000000000..c6177e5367 --- /dev/null +++ b/plugins/module_utils/storage/emc/emc_vnx.py @@ -0,0 +1,34 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2018 Luca 'remix_tj' Lorenzetto +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +emc_vnx_argument_spec = { + 'sp_address': dict(type='str', required=True), + 'sp_user': dict(type='str', required=False, default='sysadmin'), + 'sp_password': dict(type='str', required=False, default='sysadmin', + no_log=True), +} diff --git a/plugins/module_utils/storage/hpe3par/__init__.py b/plugins/module_utils/storage/hpe3par/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/storage/hpe3par/hpe3par.py b/plugins/module_utils/storage/hpe3par/hpe3par.py new file mode 100644 index 0000000000..2495f9be1b --- /dev/null +++ b/plugins/module_utils/storage/hpe3par/hpe3par.py @@ -0,0 +1,90 @@ +# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from ansible.module_utils import basic + + +def convert_to_binary_multiple(size_with_unit): + if size_with_unit is None: + return -1 + valid_units = ['MiB', 'GiB', 'TiB'] + valid_unit = False + for unit in valid_units: + if size_with_unit.strip().endswith(unit): + valid_unit = True + size = size_with_unit.split(unit)[0] + if float(size) < 0: + return -1 + if not valid_unit: + raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units)) + + size = size_with_unit.replace(" ", "").split('iB')[0] + size_kib = basic.human_to_bytes(size) + return int(size_kib / (1024 * 1024)) + + +storage_system_spec = { + "storage_system_ip": { + "required": True, + "type": "str" + }, + "storage_system_username": { + "required": True, + "type": "str", + "no_log": True + }, + "storage_system_password": { + "required": True, + "type": "str", + "no_log": True + }, + "secure": { + "type": "bool", + "default": False + } +} + + +def cpg_argument_spec(): + spec = { + "state": { + "required": True, + "choices": ['present', 'absent'], + "type": 'str' + }, + "cpg_name": { + "required": True, + "type": "str" + }, + "domain": { + "type": "str" + }, + "growth_increment": { + "type": "str", + }, + "growth_limit": { + "type": "str", + }, + "growth_warning": { + "type": "str", + }, + "raid_type": { + "required": False, + "type": "str", + "choices": ['R0', 'R1', 'R5', 'R6'] + }, + "set_size": { + "required": False, + "type": "int" + }, + "high_availability": { + "type": "str", + "choices": ['PORT', 'CAGE', 'MAG'] + }, + "disk_type": { + "type": "str", + "choices": ['FC', 'NL', 'SSD'] + } + } + spec.update(storage_system_spec) + return spec diff --git a/plugins/module_utils/univention_umc.py b/plugins/module_utils/univention_umc.py new file mode 100644 index 0000000000..9c84930cf5 --- /dev/null +++ b/plugins/module_utils/univention_umc.py @@ -0,0 +1,293 @@ +# -*- coding: UTF-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +"""Univention Corporate Server (UCS) access module. + +Provides the following functions for working with an UCS server. + + - ldap_search(filter, base=None, attr=None) + Search the LDAP via Univention's LDAP wrapper (ULDAP) + + - config_registry() + Return the UCR registry object + + - base_dn() + Return the configured Base DN according to the UCR + + - uldap() + Return a handle to the ULDAP LDAP wrapper + + - umc_module_for_add(module, container_dn, superordinate=None) + Return a UMC module for creating a new object of the given type + + - umc_module_for_edit(module, object_dn, superordinate=None) + Return a UMC module for editing an existing object of the given type + + +Any other module is not part of the "official" API and may change at any time. +""" + +import re + + +__all__ = [ + 'ldap_search', + 'config_registry', + 'base_dn', + 'uldap', + 'umc_module_for_add', + 'umc_module_for_edit', +] + + +_singletons = {} + + +def ldap_module(): + import ldap as orig_ldap + return orig_ldap + + +def _singleton(name, constructor): + if name in _singletons: + return _singletons[name] + _singletons[name] = constructor() + return _singletons[name] + + +def config_registry(): + + def construct(): + import univention.config_registry + ucr = univention.config_registry.ConfigRegistry() + ucr.load() + return ucr + + return _singleton('config_registry', construct) + + +def base_dn(): + return config_registry()['ldap/base'] + + +def uldap(): + "Return a configured univention uldap object" + + def construct(): + try: + secret_file = open('/etc/ldap.secret', 'r') + bind_dn = 'cn=admin,{0}'.format(base_dn()) + except IOError: # pragma: no cover + secret_file = open('/etc/machine.secret', 'r') + bind_dn = config_registry()["ldap/hostdn"] + pwd_line = secret_file.readline() + pwd = re.sub('\n', '', pwd_line) + + import univention.admin.uldap + return univention.admin.uldap.access( + host=config_registry()['ldap/master'], + base=base_dn(), + binddn=bind_dn, + bindpw=pwd, + start_tls=1, + ) + + return _singleton('uldap', construct) + + +def config(): + def construct(): + import univention.admin.config + return univention.admin.config.config() + return _singleton('config', construct) + + +def init_modules(): + def construct(): + import univention.admin.modules + univention.admin.modules.update() + return True + return _singleton('modules_initialized', construct) + + +def position_base_dn(): + def construct(): + import univention.admin.uldap + return univention.admin.uldap.position(base_dn()) + return _singleton('position_base_dn', construct) + + +def ldap_dn_tree_parent(dn, count=1): + dn_array = dn.split(',') + dn_array[0:count] = [] + return ','.join(dn_array) + + +def ldap_search(filter, base=None, attr=None): + """Replaces uldaps search and uses a generator. + !! Arguments are not the same.""" + + if base is None: + base = base_dn() + msgid = uldap().lo.lo.search( + base, + ldap_module().SCOPE_SUBTREE, + filterstr=filter, + attrlist=attr + ) + # I used to have a try: finally: here but there seems to be a bug in python + # which swallows the KeyboardInterrupt + # The abandon now doesn't make too much sense + while True: + result_type, result_data = uldap().lo.lo.result(msgid, all=0) + if not result_data: + break + if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover + break + else: + if result_type is ldap_module().RES_SEARCH_ENTRY: + for res in result_data: + yield res + uldap().lo.lo.abandon(msgid) + + +def module_by_name(module_name_): + """Returns an initialized UMC module, identified by the given name. + + The module is a module specification according to the udm commandline. + Example values are: + * users/user + * shares/share + * groups/group + + If the module does not exist, a KeyError is raised. + + The modules are cached, so they won't be re-initialized + in subsequent calls. + """ + + def construct(): + import univention.admin.modules + init_modules() + module = univention.admin.modules.get(module_name_) + univention.admin.modules.init(uldap(), position_base_dn(), module) + return module + + return _singleton('module/%s' % module_name_, construct) + + +def get_umc_admin_objects(): + """Convenience accessor for getting univention.admin.objects. + + This implements delayed importing, so the univention.* modules + are not loaded until this function is called. + """ + import univention.admin + return univention.admin.objects + + +def umc_module_for_add(module, container_dn, superordinate=None): + """Returns an UMC module object prepared for creating a new entry. + + The module is a module specification according to the udm commandline. + Example values are: + * users/user + * shares/share + * groups/group + + The container_dn MUST be the dn of the container (not of the object to + be created itself!). + """ + mod = module_by_name(module) + + position = position_base_dn() + position.setDn(container_dn) + + # config, ldap objects from common module + obj = mod.object(config(), uldap(), position, superordinate=superordinate) + obj.open() + + return obj + + +def umc_module_for_edit(module, object_dn, superordinate=None): + """Returns an UMC module object prepared for editing an existing entry. + + The module is a module specification according to the udm commandline. + Example values are: + * users/user + * shares/share + * groups/group + + The object_dn MUST be the dn of the object itself, not the container! + """ + mod = module_by_name(module) + + objects = get_umc_admin_objects() + + position = position_base_dn() + position.setDn(ldap_dn_tree_parent(object_dn)) + + obj = objects.get( + mod, + config(), + uldap(), + position=position, + superordinate=superordinate, + dn=object_dn + ) + obj.open() + + return obj + + +def create_containers_and_parents(container_dn): + """Create a container and if needed the parents containers""" + import univention.admin.uexceptions as uexcp + if not container_dn.startswith("cn="): + raise AssertionError() + try: + parent = ldap_dn_tree_parent(container_dn) + obj = umc_module_for_add( + 'container/cn', + parent + ) + obj['name'] = container_dn.split(',')[0].split('=')[1] + obj['description'] = "container created by import" + except uexcp.ldapError: + create_containers_and_parents(parent) + obj = umc_module_for_add( + 'container/cn', + parent + ) + obj['name'] = container_dn.split(',')[0].split('=')[1] + obj['description'] = "container created by import" diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py new file mode 100644 index 0000000000..ba193713f4 --- /dev/null +++ b/plugins/module_utils/utm_utils.py @@ -0,0 +1,234 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright: (c) 2018, Johannes Brunswicker +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class UTMModuleConfigurationError(Exception): + + def __init__(self, msg, **args): + super(UTMModuleConfigurationError, self).__init__(self, msg) + self.msg = msg + self.module_fail_args = args + + def do_fail(self, module): + module.fail_json(msg=self.msg, other=self.module_fail_args) + + +class UTMModule(AnsibleModule): + """ + This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token, + protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module + just initialize this UTMModule class and define the Payload fields that are needed for your module. + See the other modules like utm_aaa_group for example. + """ + + def __init__(self, argument_spec, bypass_checks=False, no_log=False, + mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False, + supports_check_mode=False, required_if=None): + default_specs = dict( + headers=dict(type='dict', required=False, default={}), + utm_host=dict(type='str', required=True), + utm_port=dict(type='int', default=4444), + utm_token=dict(type='str', required=True, no_log=True), + utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]), + validate_certs=dict(type='bool', required=False, default=True), + state=dict(default='present', choices=['present', 'absent']) + ) + super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log, + mutually_exclusive, required_together, required_one_of, + add_file_common_args, supports_check_mode, required_if) + + def _merge_specs(self, default_specs, custom_specs): + result = default_specs.copy() + result.update(custom_specs) + return result + + +class UTM: + + def __init__(self, module, endpoint, change_relevant_keys, info_only=False): + """ + Initialize UTM Class + :param module: The Ansible module + :param endpoint: The corresponding endpoint to the module + :param change_relevant_keys: The keys of the object to check for changes + :param info_only: When implementing an info module, set this to true. Will allow access to the info method only + """ + self.info_only = info_only + self.module = module + self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native( + module.params.get('utm_port')) + "/api/objects/" + endpoint + "/" + + """ + The change_relevant_keys will be checked for changes to determine whether the object needs to be updated + """ + self.change_relevant_keys = change_relevant_keys + self.module.params['url_username'] = 'token' + self.module.params['url_password'] = module.params.get('utm_token') + if all(elem in self.change_relevant_keys for elem in module.params.keys()): + raise UTMModuleConfigurationError( + "The keys " + to_native( + self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native( + module.params.keys())) + + def execute(self): + try: + if not self.info_only: + if self.module.params.get('state') == 'present': + self._add() + elif self.module.params.get('state') == 'absent': + self._remove() + else: + self._info() + except Exception as e: + self.module.fail_json(msg=to_native(e)) + + def _info(self): + """ + returns the info for an object in utm + """ + info, result = self._lookup_entry(self.module, self.request_url) + if info["status"] >= 400: + self.module.fail_json(result=json.loads(info)) + else: + if result is None: + self.module.exit_json(changed=False) + else: + self.module.exit_json(result=result, changed=False) + + def _add(self): + """ + adds or updates a host object on utm + """ + + combined_headers = self._combine_headers() + + is_changed = False + info, result = self._lookup_entry(self.module, self.request_url) + if info["status"] >= 400: + self.module.fail_json(result=json.loads(info)) + else: + data_as_json_string = self.module.jsonify(self.module.params) + if result is None: + response, info = fetch_url(self.module, self.request_url, method="POST", + headers=combined_headers, + data=data_as_json_string) + if info["status"] >= 400: + self.module.fail_json(msg=json.loads(info["body"])) + is_changed = True + result = self._clean_result(json.loads(response.read())) + else: + if self._is_object_changed(self.change_relevant_keys, self.module, result): + response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT", + headers=combined_headers, + data=data_as_json_string) + if info['status'] >= 400: + self.module.fail_json(msg=json.loads(info["body"])) + is_changed = True + result = self._clean_result(json.loads(response.read())) + self.module.exit_json(result=result, changed=is_changed) + + def _combine_headers(self): + """ + This will combine a header default with headers that come from the module declaration + :return: A combined headers dict + """ + default_headers = {"Accept": "application/json", "Content-type": "application/json"} + if self.module.params.get('headers') is not None: + result = default_headers.copy() + result.update(self.module.params.get('headers')) + else: + result = default_headers + return result + + def _remove(self): + """ + removes an object from utm + """ + is_changed = False + info, result = self._lookup_entry(self.module, self.request_url) + if result is not None: + response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE", + headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"}, + data=self.module.jsonify(self.module.params)) + if info["status"] >= 400: + self.module.fail_json(msg=json.loads(info["body"])) + else: + is_changed = True + self.module.exit_json(changed=is_changed) + + def _lookup_entry(self, module, request_url): + """ + Lookup for existing entry + :param module: + :param request_url: + :return: + """ + response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"}) + result = None + if response is not None: + results = json.loads(response.read()) + result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None) + return info, result + + def _clean_result(self, result): + """ + Will clean the result from irrelevant fields + :param result: The result from the query + :return: The modified result + """ + del result['utm_host'] + del result['utm_port'] + del result['utm_token'] + del result['utm_protocol'] + del result['validate_certs'] + del result['url_username'] + del result['url_password'] + del result['state'] + return result + + def _is_object_changed(self, keys, module, result): + """ + Check if my object is changed + :param keys: The keys that will determine if an object is changed + :param module: The module + :param result: The result from the query + :return: + """ + for key in keys: + if module.params.get(key) != result[key]: + return True + return False diff --git a/plugins/module_utils/vexata.py b/plugins/module_utils/vexata.py new file mode 100644 index 0000000000..072def8744 --- /dev/null +++ b/plugins/module_utils/vexata.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2019, Sandeep Kasargod +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + + +HAS_VEXATAPI = True +try: + from vexatapi.vexata_api_proxy import VexataAPIProxy +except ImportError: + HAS_VEXATAPI = False + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import env_fallback + +VXOS_VERSION = None + + +def get_version(iocs_json): + if not iocs_json: + raise Exception('Invalid IOC json') + active = filter(lambda x: x['mgmtRole'], iocs_json) + if not active: + raise Exception('Unable to detect active IOC') + active = active[0] + ver = active['swVersion'] + if ver[0] != 'v': + raise Exception('Illegal version string') + ver = ver[1:ver.find('-')] + ver = map(int, ver.split('.')) + return tuple(ver) + + +def get_array(module): + """Return storage array object or fail""" + global VXOS_VERSION + array = module.params['array'] + user = module.params.get('user', None) + password = module.params.get('password', None) + validate = module.params.get('validate_certs') + + if not HAS_VEXATAPI: + module.fail_json(msg='vexatapi library is required for this module. ' + 'To install, use `pip install vexatapi`') + + if user and password: + system = VexataAPIProxy(array, user, password, verify_cert=validate) + else: + module.fail_json(msg='The user/password are required to be passed in to ' + 'the module as arguments or by setting the ' + 'VEXATA_USER and VEXATA_PASSWORD environment variables.') + try: + if system.test_connection(): + VXOS_VERSION = get_version(system.iocs()) + return system + else: + module.fail_json(msg='Test connection to array failed.') + except Exception as e: + module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e))) + + +def argument_spec(): + """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" + return dict( + array=dict(type='str', + required=True), + user=dict(type='str', + fallback=(env_fallback, ['VEXATA_USER'])), + password=dict(type='str', + no_log=True, + fallback=(env_fallback, ['VEXATA_PASSWORD'])), + validate_certs=dict(type='bool', + required=False, + default=False), + ) + + +def required_together(): + """Return the default list used for the required_together argument to AnsibleModule""" + return [['user', 'password']] + + +def size_to_MiB(size): + """Convert a '[MGT]' string to MiB, return -1 on error.""" + quant = size[:-1] + exponent = size[-1] + if not quant.isdigit() or exponent not in 'MGT': + return -1 + quant = int(quant) + if exponent == 'G': + quant <<= 10 + elif exponent == 'T': + quant <<= 20 + return quant diff --git a/plugins/module_utils/vultr.py b/plugins/module_utils/vultr.py new file mode 100644 index 0000000000..e5d23ede8a --- /dev/null +++ b/plugins/module_utils/vultr.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +# (c) 2017, René Moser +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import time +import random +import urllib +from ansible.module_utils.six.moves import configparser +from ansible.module_utils._text import to_text, to_native +from ansible.module_utils.urls import fetch_url + + +VULTR_API_ENDPOINT = "https://api.vultr.com" +VULTR_USER_AGENT = 'Ansible Vultr' + + +def vultr_argument_spec(): + return dict( + api_key=dict(type='str', default=os.environ.get('VULTR_API_KEY'), no_log=True), + api_timeout=dict(type='int', default=os.environ.get('VULTR_API_TIMEOUT')), + api_retries=dict(type='int', default=os.environ.get('VULTR_API_RETRIES')), + api_retry_max_delay=dict(type='int', default=os.environ.get('VULTR_API_RETRY_MAX_DELAY')), + api_account=dict(type='str', default=os.environ.get('VULTR_API_ACCOUNT') or 'default'), + api_endpoint=dict(type='str', default=os.environ.get('VULTR_API_ENDPOINT')), + validate_certs=dict(type='bool', default=True), + ) + + +class Vultr: + + def __init__(self, module, namespace): + + if module._name.startswith('vr_'): + module.deprecate("The Vultr modules were renamed. The prefix of the modules changed from vr_ to vultr_", version='2.11') + + self.module = module + + # Namespace use for returns + self.namespace = namespace + self.result = { + 'changed': False, + namespace: dict(), + 'diff': dict(before=dict(), after=dict()) + } + + # For caching HTTP API responses + self.api_cache = dict() + + try: + config = self.read_env_variables() + config.update(Vultr.read_ini_config(self.module.params.get('api_account'))) + except KeyError: + config = {} + + try: + self.api_config = { + 'api_key': self.module.params.get('api_key') or config.get('key'), + 'api_timeout': self.module.params.get('api_timeout') or int(config.get('timeout') or 60), + 'api_retries': self.module.params.get('api_retries') or int(config.get('retries') or 5), + 'api_retry_max_delay': self.module.params.get('api_retry_max_delay') or int(config.get('retry_max_delay') or 12), + 'api_endpoint': self.module.params.get('api_endpoint') or config.get('endpoint') or VULTR_API_ENDPOINT, + } + except ValueError as e: + self.fail_json(msg="One of the following settings, " + "in section '%s' in the ini config file has not an int value: timeout, retries. " + "Error was %s" % (self.module.params.get('api_account'), to_native(e))) + + if not self.api_config.get('api_key'): + self.module.fail_json(msg="The API key is not speicied. Please refer to the documentation.") + + # Common vultr returns + self.result['vultr_api'] = { + 'api_account': self.module.params.get('api_account'), + 'api_timeout': self.api_config['api_timeout'], + 'api_retries': self.api_config['api_retries'], + 'api_retry_max_delay': self.api_config['api_retry_max_delay'], + 'api_endpoint': self.api_config['api_endpoint'], + } + + # Headers to be passed to the API + self.headers = { + 'API-Key': "%s" % self.api_config['api_key'], + 'User-Agent': VULTR_USER_AGENT, + 'Accept': 'application/json', + } + + def read_env_variables(self): + keys = ['key', 'timeout', 'retries', 'retry_max_delay', 'endpoint'] + env_conf = {} + for key in keys: + if 'VULTR_API_%s' % key.upper() not in os.environ: + continue + env_conf[key] = os.environ['VULTR_API_%s' % key.upper()] + + return env_conf + + @staticmethod + def read_ini_config(ini_group): + paths = ( + os.path.join(os.path.expanduser('~'), '.vultr.ini'), + os.path.join(os.getcwd(), 'vultr.ini'), + ) + if 'VULTR_API_CONFIG' in os.environ: + paths += (os.path.expanduser(os.environ['VULTR_API_CONFIG']),) + + conf = configparser.ConfigParser() + conf.read(paths) + + if not conf._sections.get(ini_group): + return dict() + + return dict(conf.items(ini_group)) + + def fail_json(self, **kwargs): + self.result.update(kwargs) + self.module.fail_json(**self.result) + + def get_yes_or_no(self, key): + if self.module.params.get(key) is not None: + return 'yes' if self.module.params.get(key) is True else 'no' + + def switch_enable_disable(self, resource, param_key, resource_key=None): + if resource_key is None: + resource_key = param_key + + param = self.module.params.get(param_key) + if param is None: + return + + r_value = resource.get(resource_key) + if r_value in ['yes', 'no']: + if param and r_value != 'yes': + return "enable" + elif not param and r_value != 'no': + return "disable" + else: + if param and not r_value: + return "enable" + elif not param and r_value: + return "disable" + + def api_query(self, path="/", method="GET", data=None): + url = self.api_config['api_endpoint'] + path + + if data: + data_encoded = dict() + data_list = "" + for k, v in data.items(): + if isinstance(v, list): + for s in v: + try: + data_list += '&%s[]=%s' % (k, urllib.quote(s)) + except AttributeError: + data_list += '&%s[]=%s' % (k, urllib.parse.quote(s)) + elif v is not None: + data_encoded[k] = v + try: + data = urllib.urlencode(data_encoded) + data_list + except AttributeError: + data = urllib.parse.urlencode(data_encoded) + data_list + + retry_max_delay = self.api_config['api_retry_max_delay'] + randomness = random.randint(0, 1000) / 1000.0 + + for retry in range(0, self.api_config['api_retries']): + response, info = fetch_url( + module=self.module, + url=url, + data=data, + method=method, + headers=self.headers, + timeout=self.api_config['api_timeout'], + ) + + if info.get('status') == 200: + break + + # Vultr has a rate limiting requests per second, try to be polite + # Use exponential backoff plus a little bit of randomness + delay = 2 ** retry + randomness + if delay > retry_max_delay: + delay = retry_max_delay + randomness + time.sleep(delay) + + else: + self.fail_json(msg="Reached API retries limit %s for URL %s, method %s with data %s. Returned %s, with body: %s %s" % ( + self.api_config['api_retries'], + url, + method, + data, + info['status'], + info['msg'], + info.get('body') + )) + + if info.get('status') != 200: + self.fail_json(msg="URL %s, method %s with data %s. Returned %s, with body: %s %s" % ( + url, + method, + data, + info['status'], + info['msg'], + info.get('body') + )) + + res = response.read() + if not res: + return {} + + try: + return self.module.from_json(to_native(res)) or {} + except ValueError as e: + self.module.fail_json(msg="Could not process response into json: %s" % e) + + def query_resource_by_key(self, key, value, resource='regions', query_by='list', params=None, use_cache=False, id_key=None, optional=False): + if not value: + return {} + + r_list = None + if use_cache: + r_list = self.api_cache.get(resource) + + if not r_list: + r_list = self.api_query(path="/v1/%s/%s" % (resource, query_by), data=params) + if use_cache: + self.api_cache.update({ + resource: r_list + }) + + if not r_list: + return {} + + elif isinstance(r_list, list): + for r_data in r_list: + if str(r_data[key]) == str(value): + return r_data + if id_key is not None and to_text(r_data[id_key]) == to_text(value): + return r_data + elif isinstance(r_list, dict): + for r_id, r_data in r_list.items(): + if str(r_data[key]) == str(value): + return r_data + if id_key is not None and to_text(r_data[id_key]) == to_text(value): + return r_data + if not optional: + if id_key: + msg = "Could not find %s with ID or %s: %s" % (resource, key, value) + else: + msg = "Could not find %s with %s: %s" % (resource, key, value) + self.module.fail_json(msg=msg) + return {} + + @staticmethod + def normalize_result(resource, schema, remove_missing_keys=True): + if remove_missing_keys: + fields_to_remove = set(resource.keys()) - set(schema.keys()) + for field in fields_to_remove: + resource.pop(field) + + for search_key, config in schema.items(): + if search_key in resource: + if 'convert_to' in config: + if config['convert_to'] == 'int': + resource[search_key] = int(resource[search_key]) + elif config['convert_to'] == 'float': + resource[search_key] = float(resource[search_key]) + elif config['convert_to'] == 'bool': + resource[search_key] = True if resource[search_key] == 'yes' else False + + if 'transform' in config: + resource[search_key] = config['transform'](resource[search_key]) + + if 'key' in config: + resource[config['key']] = resource[search_key] + del resource[search_key] + + return resource + + def get_result(self, resource): + if resource: + if isinstance(resource, list): + self.result[self.namespace] = [Vultr.normalize_result(item, self.returns) for item in resource] + else: + self.result[self.namespace] = Vultr.normalize_result(resource, self.returns) + + return self.result + + def get_plan(self, plan=None, key='name', optional=False): + value = plan or self.module.params.get('plan') + + return self.query_resource_by_key( + key=key, + value=value, + resource='plans', + use_cache=True, + id_key='VPSPLANID', + optional=optional, + ) + + def get_firewallgroup(self, firewallgroup=None, key='description'): + value = firewallgroup or self.module.params.get('firewallgroup') + + return self.query_resource_by_key( + key=key, + value=value, + resource='firewall', + query_by='group_list', + use_cache=True + ) + + def get_application(self, application=None, key='name'): + value = application or self.module.params.get('application') + + return self.query_resource_by_key( + key=key, + value=value, + resource='app', + use_cache=True + ) + + def get_region(self, region=None, key='name'): + value = region or self.module.params.get('region') + + return self.query_resource_by_key( + key=key, + value=value, + resource='regions', + use_cache=True + ) diff --git a/plugins/module_utils/xenserver.py b/plugins/module_utils/xenserver.py new file mode 100644 index 0000000000..dbc6a0adbe --- /dev/null +++ b/plugins/module_utils/xenserver.py @@ -0,0 +1,862 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import atexit +import time +import re +import traceback + +XENAPI_IMP_ERR = None +try: + import XenAPI + HAS_XENAPI = True +except ImportError: + HAS_XENAPI = False + XENAPI_IMP_ERR = traceback.format_exc() + +from ansible.module_utils.basic import env_fallback, missing_required_lib +from ansible.module_utils.common.network import is_mac +from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION + + +def xenserver_common_argument_spec(): + return dict( + hostname=dict(type='str', + aliases=['host', 'pool'], + required=False, + default='localhost', + fallback=(env_fallback, ['XENSERVER_HOST']), + ), + username=dict(type='str', + aliases=['user', 'admin'], + required=False, + default='root', + fallback=(env_fallback, ['XENSERVER_USER'])), + password=dict(type='str', + aliases=['pass', 'pwd'], + required=False, + no_log=True, + fallback=(env_fallback, ['XENSERVER_PASSWORD'])), + validate_certs=dict(type='bool', + required=False, + default=True, + fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])), + ) + + +def xapi_to_module_vm_power_state(power_state): + """Maps XAPI VM power states to module VM power states.""" + module_power_state_map = { + "running": "poweredon", + "halted": "poweredoff", + "suspended": "suspended", + "paused": "paused" + } + + return module_power_state_map.get(power_state) + + +def module_to_xapi_vm_power_state(power_state): + """Maps module VM power states to XAPI VM power states.""" + vm_power_state_map = { + "poweredon": "running", + "poweredoff": "halted", + "restarted": "running", + "suspended": "suspended", + "shutdownguest": "halted", + "rebootguest": "running", + } + + return vm_power_state_map.get(power_state) + + +def is_valid_ip_addr(ip_addr): + """Validates given string as IPv4 address for given string. + + Args: + ip_addr (str): string to validate as IPv4 address. + + Returns: + bool: True if string is valid IPv4 address, else False. + """ + ip_addr_split = ip_addr.split('.') + + if len(ip_addr_split) != 4: + return False + + for ip_addr_octet in ip_addr_split: + if not ip_addr_octet.isdigit(): + return False + + ip_addr_octet_int = int(ip_addr_octet) + + if ip_addr_octet_int < 0 or ip_addr_octet_int > 255: + return False + + return True + + +def is_valid_ip_netmask(ip_netmask): + """Validates given string as IPv4 netmask. + + Args: + ip_netmask (str): string to validate as IPv4 netmask. + + Returns: + bool: True if string is valid IPv4 netmask, else False. + """ + ip_netmask_split = ip_netmask.split('.') + + if len(ip_netmask_split) != 4: + return False + + valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255'] + + for ip_netmask_octet in ip_netmask_split: + if ip_netmask_octet not in valid_octet_values: + return False + + if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'): + return False + elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'): + return False + elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0': + return False + + return True + + +def is_valid_ip_prefix(ip_prefix): + """Validates given string as IPv4 prefix. + + Args: + ip_prefix (str): string to validate as IPv4 prefix. + + Returns: + bool: True if string is valid IPv4 prefix, else False. + """ + if not ip_prefix.isdigit(): + return False + + ip_prefix_int = int(ip_prefix) + + if ip_prefix_int < 0 or ip_prefix_int > 32: + return False + + return True + + +def ip_prefix_to_netmask(ip_prefix, skip_check=False): + """Converts IPv4 prefix to netmask. + + Args: + ip_prefix (str): IPv4 prefix to convert. + skip_check (bool): Skip validation of IPv4 prefix + (default: False). Use if you are sure IPv4 prefix is valid. + + Returns: + str: IPv4 netmask equivalent to given IPv4 prefix if + IPv4 prefix is valid, else an empty string. + """ + if skip_check: + ip_prefix_valid = True + else: + ip_prefix_valid = is_valid_ip_prefix(ip_prefix) + + if ip_prefix_valid: + return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]]) + else: + return "" + + +def ip_netmask_to_prefix(ip_netmask, skip_check=False): + """Converts IPv4 netmask to prefix. + + Args: + ip_netmask (str): IPv4 netmask to convert. + skip_check (bool): Skip validation of IPv4 netmask + (default: False). Use if you are sure IPv4 netmask is valid. + + Returns: + str: IPv4 prefix equivalent to given IPv4 netmask if + IPv4 netmask is valid, else an empty string. + """ + if skip_check: + ip_netmask_valid = True + else: + ip_netmask_valid = is_valid_ip_netmask(ip_netmask) + + if ip_netmask_valid: + return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")])) + else: + return "" + + +def is_valid_ip6_addr(ip6_addr): + """Validates given string as IPv6 address. + + Args: + ip6_addr (str): string to validate as IPv6 address. + + Returns: + bool: True if string is valid IPv6 address, else False. + """ + ip6_addr = ip6_addr.lower() + ip6_addr_split = ip6_addr.split(':') + + if ip6_addr_split[0] == "": + ip6_addr_split.pop(0) + + if ip6_addr_split[-1] == "": + ip6_addr_split.pop(-1) + + if len(ip6_addr_split) > 8: + return False + + if ip6_addr_split.count("") > 1: + return False + elif ip6_addr_split.count("") == 1: + ip6_addr_split.remove("") + else: + if len(ip6_addr_split) != 8: + return False + + ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$') + + for ip6_addr_hextet in ip6_addr_split: + if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)): + return False + + return True + + +def is_valid_ip6_prefix(ip6_prefix): + """Validates given string as IPv6 prefix. + + Args: + ip6_prefix (str): string to validate as IPv6 prefix. + + Returns: + bool: True if string is valid IPv6 prefix, else False. + """ + if not ip6_prefix.isdigit(): + return False + + ip6_prefix_int = int(ip6_prefix) + + if ip6_prefix_int < 0 or ip6_prefix_int > 128: + return False + + return True + + +def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""): + """Finds and returns a reference to arbitrary XAPI object. + + An object is searched by using either name (name_label) or UUID + with UUID taken precedence over name. + + Args: + module: Reference to Ansible module object. + name (str): Name (name_label) of an object to search for. + uuid (str): UUID of an object to search for. + obj_type (str): Any valid XAPI object type. See XAPI docs. + fail (bool): Should function fail with error message if object + is not found or exit silently (default: True). The function + always fails if multiple objects with same name are found. + msg_prefix (str): A string error messages should be prefixed + with (default: ""). + + Returns: + XAPI reference to found object or None if object is not found + and fail=False. + """ + xapi_session = XAPI.connect(module) + + if obj_type in ["template", "snapshot"]: + real_obj_type = "VM" + elif obj_type == "home server": + real_obj_type = "host" + elif obj_type == "ISO image": + real_obj_type = "VDI" + else: + real_obj_type = obj_type + + obj_ref = None + + # UUID has precedence over name. + if uuid: + try: + # Find object by UUID. If no object is found using given UUID, + # an exception will be generated. + obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,)) + except XenAPI.Failure as f: + if fail: + module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid)) + elif name: + try: + # Find object by name (name_label). + obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,)) + except XenAPI.Failure as f: + module.fail_json(msg="XAPI ERROR: %s" % f.details) + + # If obj_ref_list is empty. + if not obj_ref_list: + if fail: + module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name)) + # If obj_ref_list contains multiple object references. + elif len(obj_ref_list) > 1: + module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name)) + # The obj_ref_list contains only one object reference. + else: + obj_ref = obj_ref_list[0] + else: + module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type)) + + return obj_ref + + +def gather_vm_params(module, vm_ref): + """Gathers all VM parameters available in XAPI database. + + Args: + module: Reference to Ansible module object. + vm_ref (str): XAPI reference to VM. + + Returns: + dict: VM parameters. + """ + # We silently return empty vm_params if bad vm_ref was supplied. + if not vm_ref or vm_ref == "OpaqueRef:NULL": + return {} + + xapi_session = XAPI.connect(module) + + try: + vm_params = xapi_session.xenapi.VM.get_record(vm_ref) + + # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced. + + # Affinity. + if vm_params['affinity'] != "OpaqueRef:NULL": + vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity']) + vm_params['affinity'] = vm_affinity + else: + vm_params['affinity'] = {} + + # VBDs. + vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']] + + # List of VBDs is usually sorted by userdevice but we sort just + # in case. We need this list sorted by userdevice so that we can + # make positional pairing with module.params['disks']. + vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice'])) + vm_params['VBDs'] = vm_vbd_params_list + + # VDIs. + for vm_vbd_params in vm_params['VBDs']: + if vm_vbd_params['VDI'] != "OpaqueRef:NULL": + vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI']) + else: + vm_vdi_params = {} + + vm_vbd_params['VDI'] = vm_vdi_params + + # VIFs. + vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']] + + # List of VIFs is usually sorted by device but we sort just + # in case. We need this list sorted by device so that we can + # make positional pairing with module.params['networks']. + vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device'])) + vm_params['VIFs'] = vm_vif_params_list + + # Networks. + for vm_vif_params in vm_params['VIFs']: + if vm_vif_params['network'] != "OpaqueRef:NULL": + vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network']) + else: + vm_network_params = {} + + vm_vif_params['network'] = vm_network_params + + # Guest metrics. + if vm_params['guest_metrics'] != "OpaqueRef:NULL": + vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics']) + vm_params['guest_metrics'] = vm_guest_metrics + else: + vm_params['guest_metrics'] = {} + + # Detect customization agent. + xenserver_version = get_xenserver_version(module) + + if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and + "feature-static-ip-setting" in vm_params['guest_metrics']['other']): + vm_params['customization_agent'] = "native" + else: + vm_params['customization_agent'] = "custom" + + except XenAPI.Failure as f: + module.fail_json(msg="XAPI ERROR: %s" % f.details) + + return vm_params + + +def gather_vm_facts(module, vm_params): + """Gathers VM facts. + + Args: + module: Reference to Ansible module object. + vm_params (dict): A dictionary with VM parameters as returned + by gather_vm_params() function. + + Returns: + dict: VM facts. + """ + # We silently return empty vm_facts if no vm_params are available. + if not vm_params: + return {} + + xapi_session = XAPI.connect(module) + + # Gather facts. + vm_facts = { + "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()), + "name": vm_params['name_label'], + "name_desc": vm_params['name_description'], + "uuid": vm_params['uuid'], + "is_template": vm_params['is_a_template'], + "folder": vm_params['other_config'].get('folder', ''), + "hardware": { + "num_cpus": int(vm_params['VCPUs_max']), + "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')), + "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576), + }, + "disks": [], + "cdrom": {}, + "networks": [], + "home_server": vm_params['affinity'].get('name_label', ''), + "domid": vm_params['domid'], + "platform": vm_params['platform'], + "other_config": vm_params['other_config'], + "xenstore_data": vm_params['xenstore_data'], + "customization_agent": vm_params['customization_agent'], + } + + for vm_vbd_params in vm_params['VBDs']: + if vm_vbd_params['type'] == "Disk": + vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR']) + + vm_disk_params = { + "size": int(vm_vbd_params['VDI']['virtual_size']), + "name": vm_vbd_params['VDI']['name_label'], + "name_desc": vm_vbd_params['VDI']['name_description'], + "sr": vm_disk_sr_params['name_label'], + "sr_uuid": vm_disk_sr_params['uuid'], + "os_device": vm_vbd_params['device'], + "vbd_userdevice": vm_vbd_params['userdevice'], + } + + vm_facts['disks'].append(vm_disk_params) + elif vm_vbd_params['type'] == "CD": + if vm_vbd_params['empty']: + vm_facts['cdrom'].update(type="none") + else: + vm_facts['cdrom'].update(type="iso") + vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label']) + + for vm_vif_params in vm_params['VIFs']: + vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {}) + + vm_network_params = { + "name": vm_vif_params['network']['name_label'], + "mac": vm_vif_params['MAC'], + "vif_device": vm_vif_params['device'], + "mtu": vm_vif_params['MTU'], + "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''), + "prefix": "", + "netmask": "", + "gateway": "", + "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" % + vm_vif_params['device'])], + "prefix6": "", + "gateway6": "", + } + + if vm_params['customization_agent'] == "native": + if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: + vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1] + vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix']) + + vm_network_params['gateway'] = vm_vif_params['ipv4_gateway'] + + if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: + vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1] + + vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway'] + + elif vm_params['customization_agent'] == "custom": + vm_xenstore_data = vm_params['xenstore_data'] + + for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']: + vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "") + + vm_facts['networks'].append(vm_network_params) + + return vm_facts + + +def set_vm_power_state(module, vm_ref, power_state, timeout=300): + """Controls VM power state. + + Args: + module: Reference to Ansible module object. + vm_ref (str): XAPI reference to VM. + power_state (str): Power state to put VM into. Accepted values: + + - poweredon + - poweredoff + - restarted + - suspended + - shutdownguest + - rebootguest + + timeout (int): timeout in seconds (default: 300). + + Returns: + tuple (bool, str): Bool element is True if VM power state has + changed by calling this function, else False. Str element carries + a value of resulting power state as defined by XAPI - 'running', + 'halted' or 'suspended'. + """ + # Fail if we don't have a valid VM reference. + if not vm_ref or vm_ref == "OpaqueRef:NULL": + module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!") + + xapi_session = XAPI.connect(module) + + power_state = power_state.replace('_', '').replace('-', '').lower() + vm_power_state_resulting = module_to_xapi_vm_power_state(power_state) + + state_changed = False + + try: + # Get current state of the VM. + vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) + + if vm_power_state_current != power_state: + if power_state == "poweredon": + if not module.check_mode: + # VM can be in either halted, suspended, paused or running state. + # For VM to be in running state, start has to be called on halted, + # resume on suspended and unpause on paused VM. + if vm_power_state_current == "poweredoff": + xapi_session.xenapi.VM.start(vm_ref, False, False) + elif vm_power_state_current == "suspended": + xapi_session.xenapi.VM.resume(vm_ref, False, False) + elif vm_power_state_current == "paused": + xapi_session.xenapi.VM.unpause(vm_ref) + elif power_state == "poweredoff": + if not module.check_mode: + # hard_shutdown will halt VM regardless of current state. + xapi_session.xenapi.VM.hard_shutdown(vm_ref) + elif power_state == "restarted": + # hard_reboot will restart VM only if VM is in paused or running state. + if vm_power_state_current in ["paused", "poweredon"]: + if not module.check_mode: + xapi_session.xenapi.VM.hard_reboot(vm_ref) + else: + module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current) + elif power_state == "suspended": + # running state is required for suspend. + if vm_power_state_current == "poweredon": + if not module.check_mode: + xapi_session.xenapi.VM.suspend(vm_ref) + else: + module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current) + elif power_state == "shutdownguest": + # running state is required for guest shutdown. + if vm_power_state_current == "poweredon": + if not module.check_mode: + if timeout == 0: + xapi_session.xenapi.VM.clean_shutdown(vm_ref) + else: + task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref) + task_result = wait_for_task(module, task_ref, timeout) + + if task_result: + module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result) + else: + module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current) + elif power_state == "rebootguest": + # running state is required for guest reboot. + if vm_power_state_current == "poweredon": + if not module.check_mode: + if timeout == 0: + xapi_session.xenapi.VM.clean_reboot(vm_ref) + else: + task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref) + task_result = wait_for_task(module, task_ref, timeout) + + if task_result: + module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result) + else: + module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current) + else: + module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state) + + state_changed = True + except XenAPI.Failure as f: + module.fail_json(msg="XAPI ERROR: %s" % f.details) + + return (state_changed, vm_power_state_resulting) + + +def wait_for_task(module, task_ref, timeout=300): + """Waits for async XAPI task to finish. + + Args: + module: Reference to Ansible module object. + task_ref (str): XAPI reference to task. + timeout (int): timeout in seconds (default: 300). + + Returns: + str: failure message on failure, else an empty string. + """ + # Fail if we don't have a valid task reference. + if not task_ref or task_ref == "OpaqueRef:NULL": + module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!") + + xapi_session = XAPI.connect(module) + + interval = 2 + + result = "" + + # If we have to wait indefinitely, make time_left larger than 0 so we can + # enter while loop. + if timeout == 0: + time_left = 1 + else: + time_left = timeout + + try: + while time_left > 0: + task_status = xapi_session.xenapi.task.get_status(task_ref).lower() + + if task_status == "pending": + # Task is still running. + time.sleep(interval) + + # We decrease time_left only if we don't wait indefinitely. + if timeout != 0: + time_left -= interval + + continue + elif task_status == "success": + # Task is done. + break + else: + # Task failed. + result = task_status + break + else: + # We timed out. + result = "timeout" + + xapi_session.xenapi.task.destroy(task_ref) + except XenAPI.Failure as f: + module.fail_json(msg="XAPI ERROR: %s" % f.details) + + return result + + +def wait_for_vm_ip_address(module, vm_ref, timeout=300): + """Waits for VM to acquire an IP address. + + Args: + module: Reference to Ansible module object. + vm_ref (str): XAPI reference to VM. + timeout (int): timeout in seconds (default: 300). + + Returns: + dict: VM guest metrics as retrieved by + VM_guest_metrics.get_record() XAPI method with info + on IP address acquired. + """ + # Fail if we don't have a valid VM reference. + if not vm_ref or vm_ref == "OpaqueRef:NULL": + module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!") + + xapi_session = XAPI.connect(module) + + vm_guest_metrics = {} + + try: + # We translate VM power state string so that error message can be + # consistent with module VM power states. + vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) + + if vm_power_state != 'poweredon': + module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state) + + interval = 2 + + # If we have to wait indefinitely, make time_left larger than 0 so we can + # enter while loop. + if timeout == 0: + time_left = 1 + else: + time_left = timeout + + while time_left > 0: + vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref) + + if vm_guest_metrics_ref != "OpaqueRef:NULL": + vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref) + vm_ips = vm_guest_metrics['networks'] + + if "0/ip" in vm_ips: + break + + time.sleep(interval) + + # We decrease time_left only if we don't wait indefinitely. + if timeout != 0: + time_left -= interval + else: + # We timed out. + module.fail_json(msg="Timed out waiting for VM IP address!") + + except XenAPI.Failure as f: + module.fail_json(msg="XAPI ERROR: %s" % f.details) + + return vm_guest_metrics + + +def get_xenserver_version(module): + """Returns XenServer version. + + Args: + module: Reference to Ansible module object. + + Returns: + list: Element [0] is major version. Element [1] is minor version. + Element [2] is update number. + """ + xapi_session = XAPI.connect(module) + + host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session) + + try: + xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')] + except ValueError: + xenserver_version = [0, 0, 0] + + return xenserver_version + + +class XAPI(object): + """Class for XAPI session management.""" + _xapi_session = None + + @classmethod + def connect(cls, module, disconnect_atexit=True): + """Establishes XAPI connection and returns session reference. + + If no existing session is available, establishes a new one + and returns it, else returns existing one. + + Args: + module: Reference to Ansible module object. + disconnect_atexit (bool): Controls if method should + register atexit handler to disconnect from XenServer + on module exit (default: True). + + Returns: + XAPI session reference. + """ + if cls._xapi_session is not None: + return cls._xapi_session + + hostname = module.params['hostname'] + username = module.params['username'] + password = module.params['password'] + ignore_ssl = not module.params['validate_certs'] + + if hostname == 'localhost': + cls._xapi_session = XenAPI.xapi_local() + username = '' + password = '' + else: + # If scheme is not specified we default to http:// because https:// + # is problematic in most setups. + if not hostname.startswith("http://") and not hostname.startswith("https://"): + hostname = "http://%s" % hostname + + try: + # ignore_ssl is supported in XenAPI library from XenServer 7.2 + # SDK onward but there is no way to tell which version we + # are using. TypeError will be raised if ignore_ssl is not + # supported. Additionally, ignore_ssl requires Python 2.7.9 + # or newer. + cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl) + except TypeError: + # Try without ignore_ssl. + cls._xapi_session = XenAPI.Session(hostname) + + if not password: + password = '' + + try: + cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible') + except XenAPI.Failure as f: + module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details)) + + # Disabling atexit should be used in special cases only. + if disconnect_atexit: + atexit.register(cls._xapi_session.logout) + + return cls._xapi_session + + +class XenServerObject(object): + """Base class for all XenServer objects. + + This class contains active XAPI session reference and common + attributes with useful info about XenServer host/pool. + + Attributes: + module: Reference to Ansible module object. + xapi_session: Reference to XAPI session. + pool_ref (str): XAPI reference to a pool currently connected to. + default_sr_ref (str): XAPI reference to a pool default + Storage Repository. + host_ref (str): XAPI rerefence to a host currently connected to. + xenserver_version (list of str): Contains XenServer major and + minor version. + """ + + def __init__(self, module): + """Inits XenServerObject using common module parameters. + + Args: + module: Reference to Ansible module object. + """ + if not HAS_XENAPI: + module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR) + + self.module = module + self.xapi_session = XAPI.connect(module) + + try: + self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0] + self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref) + self.xenserver_version = get_xenserver_version(module) + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) diff --git a/plugins/modules/__init__.py b/plugins/modules/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/modules/cloud/alicloud/ali_instance.py b/plugins/modules/cloud/alicloud/ali_instance.py new file mode 100644 index 0000000000..419ae45729 --- /dev/null +++ b/plugins/modules/cloud/alicloud/ali_instance.py @@ -0,0 +1,817 @@ +#!/usr/bin/python +# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see http://www.gnu.org/licenses/. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: ali_instance +short_description: Create, start, stop, restart or terminate an instance in ECS, add or remove an instance to/from a security group +description: +- Create, start, stop, restart, modify or terminate ecs instances. +- Add or remove ecs instances to/from security group. +options: + state: + description: + - The state of the instance after operating. + type: str + default: 'present' + choices: ['absent', 'present', 'restarted', 'running', 'stopped'] + availability_zone: + description: + - Aliyun availability zone ID in which to launch the instance. + - If it is not specified, it will be allocated by system automatically. + aliases: ['alicloud_zone'] + type: str + image_id: + description: + - Image ID used to launch instances. + - Required when I(state=present) and creating new ECS instances. + aliases: ['image'] + type: str + instance_type: + description: + - Instance type used to launch instances. + - Required when I(state=present) and creating new ECS instances. + aliases: ['type'] + type: str + security_groups: + description: + - A list of security group IDs. + type: list + vswitch_id: + description: + - The subnet ID in which to launch the instances (VPC). + aliases: ['subnet_id'] + type: str + instance_name: + description: + - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. + - It must begin with an uppercase/lowercase letter or a Chinese character and + can contain numerals, ".", "_" or "-". It cannot begin with http:// or https://. + aliases: ['name'] + type: str + description: + description: + - The description of ECS instance, which is a string of 2 to 256 characters. + - It cannot begin with http:// or https://. + type: str + internet_charge_type: + description: + - Internet charge type of ECS instance. + type: str + default: 'PayByBandwidth' + choices: ['PayByBandwidth', 'PayByTraffic'] + max_bandwidth_in: + description: + - Maximum incoming bandwidth from the public network, + measured in Mbps (Megabits per second). + default: 200 + type: int + max_bandwidth_out: + description: + - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). + type: int + default: 0 + host_name: + description: + - Instance host name. + type: str + password: + description: + - The password to login instance. + - After rebooting instances, modified password will take effect. + type: str + system_disk_category: + description: + - Category of the system disk. + type: str + default: 'cloud_efficiency' + choices: ['cloud_efficiency', 'cloud_ssd'] + system_disk_size: + description: + - Size of the system disk, in GB. The valid values are 40~500. + type: int + default: 40 + system_disk_name: + description: + - Name of the system disk. + type: str + system_disk_description: + description: + - Description of the system disk. + type: str + count: + description: + - The number of the new instance. + - Indicates how many instances that match I(count_tag) should be running. + - Instances are either created or terminated based on this value. + type: int + default: 1 + count_tag: + description: + - Determines how many instances based on a specific tag criteria should be present. + - This can be expressed in multiple ways and is shown in the EXAMPLES section. + - The specified count_tag must already exist or be passed in as the I(instance_tags) option. + - If it is not specified, it will be replaced by I(instance_name). + type: str + allocate_public_ip: + description: + - Whether allocate a public ip for the new instance. + default: False + aliases: ['assign_public_ip'] + type: bool + instance_charge_type: + description: + - The charge type of the instance. + type: str + choices: ['PrePaid', 'PostPaid'] + default: 'PostPaid' + period: + description: + - The charge duration of the instance, in month. + - Required when I(instance_charge_type=PrePaid). + - The valid value are [1-9, 12, 24, 36]. + type: int + default: 1 + auto_renew: + description: + - Whether automate renew the charge of the instance. + type: bool + default: False + auto_renew_period: + description: + - The duration of the automatic renew the charge of the instance. + - Required when I(auto_renew=True). + type: int + choices: [1, 2, 3, 6, 12] + instance_ids: + description: + - A list of instance ids. It is required when need to operate existing instances. + - If it is specified, I(count) will lose efficacy. + type: list + force: + description: + - Whether the current operation needs to be execute forcibly. + default: False + type: bool + instance_tags: + description: + - A hash/dictionaries of instance tags, to add to the new instance or + for starting/stopping instance by tag (C({"key":"value"})). + aliases: ['tags'] + type: dict + key_name: + description: + - The name of key pair which is used to access ECS instance in SSH. + type: str + required: false + aliases: ['keypair'] + user_data: + description: + - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. + It only will take effect when launching the new ECS instances. + required: false + type: str +author: +- "He Guimin (@xiaozhu36)" +requirements: +- "python >= 2.6" +- "footmark >= 1.1.16" +extends_documentation_fragment: +- community.general.alicloud + +''' + +EXAMPLES = r''' +# basic provisioning example vpc network +- name: basic provisioning example + hosts: localhost + vars: + alicloud_access_key: + alicloud_secret_key: + alicloud_region: cn-beijing + image: ubuntu1404_64_40G_cloudinit_20160727.raw + instance_type: ecs.n4.small + vswitch_id: vsw-abcd1234 + assign_public_ip: True + max_bandwidth_out: 10 + host_name: myhost + password: mypassword + system_disk_category: cloud_efficiency + system_disk_size: 100 + internet_charge_type: PayByBandwidth + security_groups: ["sg-f2rwnfh23r"] + + instance_ids: ["i-abcd12346", "i-abcd12345"] + force: True + + tasks: + - name: launch ECS instance in VPC network + ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + image: '{{ image }}' + system_disk_category: '{{ system_disk_category }}' + system_disk_size: '{{ system_disk_size }}' + instance_type: '{{ instance_type }}' + vswitch_id: '{{ vswitch_id }}' + assign_public_ip: '{{ assign_public_ip }}' + internet_charge_type: '{{ internet_charge_type }}' + max_bandwidth_out: '{{ max_bandwidth_out }}' + instance_tags: + Name: created_one + host_name: '{{ host_name }}' + password: '{{ password }}' + + - name: with count and count_tag to create a number of instances + ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + image: '{{ image }}' + system_disk_category: '{{ system_disk_category }}' + system_disk_size: '{{ system_disk_size }}' + instance_type: '{{ instance_type }}' + assign_public_ip: '{{ assign_public_ip }}' + security_groups: '{{ security_groups }}' + internet_charge_type: '{{ internet_charge_type }}' + max_bandwidth_out: '{{ max_bandwidth_out }}' + instance_tags: + Name: created_one + Version: 0.1 + count: 2 + count_tag: + Name: created_one + host_name: '{{ host_name }}' + password: '{{ password }}' + + - name: start instance + ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + state: 'running' + + - name: reboot instance forcibly + ecs: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + state: 'restarted' + force: '{{ force }}' + + - name: Add instances to an security group + ecs: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + security_groups: '{{ security_groups }}' +''' + +RETURN = r''' +instances: + description: List of ECS instances. + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/xvda). + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance will expire. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance. + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The id of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: dict + sample: vpc-0011223344 +ids: + description: List of ECS instance IDs. + returned: always + type: list + sample: [i-12345er, i-3245fs] +''' + +import time +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect + +HAS_FOOTMARK = False +FOOTMARK_IMP_ERR = None +try: + from footmark.exception import ECSResponseError + HAS_FOOTMARK = True +except ImportError: + FOOTMARK_IMP_ERR = traceback.format_exc() + HAS_FOOTMARK = False + + +def get_instances_info(connection, ids): + result = [] + instances = connection.get_all_instances(instance_ids=ids) + if len(instances) > 0: + for inst in instances: + result.append(inst.read()) + return result + + +def create_instance(module, ecs, exact_count): + if exact_count <= 0: + return None + zone_id = module.params['availability_zone'] + image_id = module.params['image_id'] + instance_type = module.params['instance_type'] + security_groups = module.params['security_groups'] + vswitch_id = module.params['vswitch_id'] + instance_name = module.params['instance_name'] + description = module.params['description'] + internet_charge_type = module.params['internet_charge_type'] + max_bandwidth_out = module.params['max_bandwidth_out'] + max_bandwidth_in = module.params['max_bandwidth_out'] + host_name = module.params['host_name'] + password = module.params['password'] + system_disk_category = module.params['system_disk_category'] + system_disk_size = module.params['system_disk_size'] + system_disk_name = module.params['system_disk_name'] + system_disk_description = module.params['system_disk_description'] + allocate_public_ip = module.params['allocate_public_ip'] + instance_tags = module.params['instance_tags'] + period = module.params['period'] + auto_renew = module.params['auto_renew'] + instance_charge_type = module.params['instance_charge_type'] + auto_renew_period = module.params['auto_renew_period'] + user_data = module.params['user_data'] + key_name = module.params['key_name'] + + # check whether the required parameter passed or not + if not image_id: + module.fail_json(msg='image_id is required for new instance') + if not instance_type: + module.fail_json(msg='instance_type is required for new instance') + if not isinstance(security_groups, list): + module.fail_json(msg='The parameter security_groups should be a list, aborting') + if len(security_groups) <= 0: + module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting') + + client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time())) + + try: + # call to create_instance method from footmark + instances = ecs.create_instance(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0], + zone_id=zone_id, instance_name=instance_name, description=description, + internet_charge_type=internet_charge_type, max_bandwidth_out=max_bandwidth_out, + max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password, + io_optimized='optimized', system_disk_category=system_disk_category, + system_disk_size=system_disk_size, system_disk_name=system_disk_name, + system_disk_description=system_disk_description, + vswitch_id=vswitch_id, count=exact_count, allocate_public_ip=allocate_public_ip, + instance_charge_type=instance_charge_type, period=period, auto_renew=auto_renew, + auto_renew_period=auto_renew_period, instance_tags=instance_tags, + key_pair_name=key_name, user_data=user_data, client_token=client_token) + + except Exception as e: + module.fail_json(msg='Unable to create instance, error: {0}'.format(e)) + + return instances + + +def main(): + argument_spec = ecs_argument_spec() + argument_spec.update(dict( + security_groups=dict(type='list'), + availability_zone=dict(type='str', aliases=['alicloud_zone']), + instance_type=dict(type='str', aliases=['type']), + image_id=dict(type='str', aliases=['image']), + count=dict(type='int', default=1), + count_tag=dict(type='str'), + vswitch_id=dict(type='str', aliases=['subnet_id']), + instance_name=dict(type='str', aliases=['name']), + host_name=dict(type='str'), + password=dict(type='str', no_log=True), + internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']), + max_bandwidth_in=dict(type='int', default=200), + max_bandwidth_out=dict(type='int', default=0), + system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']), + system_disk_size=dict(type='int', default=40), + system_disk_name=dict(type='str'), + system_disk_description=dict(type='str'), + force=dict(type='bool', default=False), + instance_tags=dict(type='dict', aliases=['tags']), + state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']), + description=dict(type='str'), + allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False), + instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']), + period=dict(type='int', default=1), + auto_renew=dict(type='bool', default=False), + instance_ids=dict(type='list'), + auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]), + key_name=dict(type='str', aliases=['keypair']), + user_data=dict(type='str') + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if HAS_FOOTMARK is False: + module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + + ecs = ecs_connect(module) + state = module.params['state'] + instance_ids = module.params['instance_ids'] + count_tag = module.params['count_tag'] + count = module.params['count'] + instance_name = module.params['instance_name'] + force = module.params['force'] + zone_id = module.params['availability_zone'] + key_name = module.params['key_name'] + changed = False + + instances = [] + if instance_ids: + if not isinstance(instance_ids, list): + module.fail_json(msg='The parameter instance_ids should be a list, aborting') + instances = ecs.get_all_instances(zone_id=zone_id, instance_ids=instance_ids) + if not instances: + module.fail_json(msg="There are no instances in our record based on instance_ids {0}. " + "Please check it and try again.".format(instance_ids)) + elif count_tag: + instances = ecs.get_all_instances(zone_id=zone_id, instance_tags=eval(count_tag)) + elif instance_name: + instances = ecs.get_all_instances(zone_id=zone_id, instance_name=instance_name) + + ids = [] + if state == 'present': + if not instance_ids: + if len(instances) > count: + for i in range(0, len(instances) - count): + inst = instances[len(instances) - 1] + if inst.status != 'stopped' and not force: + module.fail_json(msg="That to delete instance {0} is failed results from it is running, " + "and please stop it or set 'force' as True.".format(inst.id)) + try: + changed = inst.terminate(force=force) + except Exception as e: + module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e)) + instances.pop(len(instances) - 1) + else: + try: + new_instances = create_instance(module, ecs, count - len(instances)) + if new_instances: + changed = True + instances.extend(new_instances) + except Exception as e: + module.fail_json(msg="Create new instances got an error: {0}".format(e)) + + # Security Group join/leave begin + security_groups = module.params['security_groups'] + if not isinstance(security_groups, list): + module.fail_json(msg='The parameter security_groups should be a list, aborting') + if len(security_groups) > 0: + for inst in instances: + existing = inst.security_group_ids['security_group_id'] + remove = list(set(existing).difference(set(security_groups))) + add = list(set(security_groups).difference(set(existing))) + for sg in remove: + if inst.leave_security_group(sg): + changed = True + for sg in add: + if inst.join_security_group(sg): + changed = True + # Security Group join/leave ends here + + # Attach/Detach key pair + inst_ids = [] + for inst in instances: + if key_name is not None and key_name != inst.key_name: + if key_name == "": + changed = inst.detach_key_pair() + else: + inst_ids.append(inst.id) + if inst_ids: + changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name) + + # Modify instance attribute + description = module.params['description'] + host_name = module.params['host_name'] + password = module.params['password'] + for inst in instances: + if not instance_name: + instance_name = inst.name + if not description: + description = inst.description + if not host_name: + host_name = inst.host_name + try: + if inst.modify(name=instance_name, description=description, host_name=host_name, password=password): + changed = True + except Exception as e: + module.fail_json(msg="Modify instance attribute {0} got an error: {1}".format(inst.id, e)) + + if inst.id not in ids: + ids.append(inst.id) + + module.exit_json(changed=changed, ids=ids, instances=get_instances_info(ecs, ids)) + + else: + if len(instances) < 1: + module.fail_json(msg='Please specify ECS instances that you want to operate by using ' + 'parameters instance_ids, instance_tags or instance_name, aborting') + force = module.params['force'] + if state == 'running': + try: + for inst in instances: + if inst.start(): + changed = True + ids.append(inst.id) + + module.exit_json(changed=changed, ids=ids, instances=get_instances_info(ecs, ids)) + except Exception as e: + module.fail_json(msg='Start instances got an error: {0}'.format(e)) + elif state == 'stopped': + try: + for inst in instances: + if inst.stop(force=force): + changed = True + ids.append(inst.id) + + module.exit_json(changed=changed, ids=ids, instances=get_instances_info(ecs, ids)) + except Exception as e: + module.fail_json(msg='Stop instances got an error: {0}'.format(e)) + elif state == 'restarted': + try: + for inst in instances: + if inst.reboot(force=module.params['force']): + changed = True + ids.append(inst.id) + + module.exit_json(changed=changed, ids=ids, instances=get_instances_info(ecs, ids)) + except Exception as e: + module.fail_json(msg='Reboot instances got an error: {0}'.format(e)) + else: + try: + for inst in instances: + if inst.status != 'stopped' and not force: + module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.") + if inst.terminate(force=module.params['force']): + changed = True + + module.exit_json(changed=changed, ids=[], instances=[]) + except Exception as e: + module.fail_json(msg='Delete instance got an error: {0}'.format(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/alicloud/ali_instance_facts.py b/plugins/modules/cloud/alicloud/ali_instance_facts.py new file mode 120000 index 0000000000..5202c55448 --- /dev/null +++ b/plugins/modules/cloud/alicloud/ali_instance_facts.py @@ -0,0 +1 @@ +ali_instance_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py new file mode 100644 index 0000000000..487834513d --- /dev/null +++ b/plugins/modules/cloud/alicloud/ali_instance_info.py @@ -0,0 +1,411 @@ +#!/usr/bin/python +# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see http://www.gnu.org/licenses/. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ali_instance_info +short_description: Gather information on instances of Alibaba Cloud ECS. +description: + - This module fetches data from the Open API in Alicloud. + The module must be called from within the ECS instance itself. + - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change. + +options: + availability_zone: + description: + - Aliyun availability zone ID in which to launch the instance + aliases: ['alicloud_zone'] + instance_names: + description: + - A list of ECS instance names. + aliases: [ "names"] + instance_ids: + description: + - A list of ECS instance ids. + aliases: ["ids"] + instance_tags: + description: + - A hash/dictionaries of instance tags. C({"key":"value"}) + aliases: ["tags"] +author: + - "He Guimin (@xiaozhu36)" +requirements: + - "python >= 2.6" + - "footmark >= 1.1.16" +extends_documentation_fragment: +- community.general.alicloud + +''' + +EXAMPLES = ''' +# Fetch instances details according to setting different filters +- name: fetch instances details example + hosts: localhost + vars: + alicloud_access_key: + alicloud_secret_key: + alicloud_region: cn-beijing + availability_zone: cn-beijing-a + + tasks: + - name: Find all instances in the specified region + ali_instance_info: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + register: all_instances + + - name: Find all instances based on the specified ids + ali_instance_info: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: + - "i-35b333d9" + - "i-ddav43kd" + register: instances_by_ids + + - name: Find all instances based on the specified names/name-prefixes + ali_instance_info: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_names: + - "ecs_instance-1" + - "ecs_instance_2" + register: instances_by_ids + +''' + +RETURN = ''' +instances: + description: List of ECS instances + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/xvda). + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance will expire. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The id of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: dict + sample: vpc-0011223344 +ids: + description: List of ECS instance IDs + returned: always + type: list + sample: [i-12345er, i-3245fs] +''' + +# import time +# import sys +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import get_acs_connection_info, ecs_argument_spec, ecs_connect + +HAS_FOOTMARK = False +FOOTMARK_IMP_ERR = None +try: + from footmark.exception import ECSResponseError + HAS_FOOTMARK = True +except ImportError: + FOOTMARK_IMP_ERR = traceback.format_exc() + HAS_FOOTMARK = False + + +def main(): + argument_spec = ecs_argument_spec() + argument_spec.update(dict( + availability_zone=dict(aliases=['alicloud_zone']), + instance_ids=dict(type='list', aliases=['ids']), + instance_names=dict(type='list', aliases=['names']), + instance_tags=dict(type='list', aliases=['tags']), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'ali_instance_facts': + module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'", version='2.13') + + if HAS_FOOTMARK is False: + module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + + ecs = ecs_connect(module) + + instances = [] + instance_ids = [] + ids = module.params['instance_ids'] + names = module.params['instance_names'] + zone_id = module.params['availability_zone'] + if ids and (not isinstance(ids, list) or len(ids) < 1): + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + if names and (not isinstance(names, list) or len(names) < 1): + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + if names: + for name in names: + for inst in ecs.get_all_instances(zone_id=zone_id, instance_ids=ids, instance_name=name): + instances.append(inst.read()) + instance_ids.append(inst.id) + else: + for inst in ecs.get_all_instances(zone_id=zone_id, instance_ids=ids): + instances.append(inst.read()) + instance_ids.append(inst.id) + + module.exit_json(changed=False, ids=instance_ids, instances=instances) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/atomic/atomic_container.py b/plugins/modules/cloud/atomic/atomic_container.py new file mode 100644 index 0000000000..a6d43f7979 --- /dev/null +++ b/plugins/modules/cloud/atomic/atomic_container.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: atomic_container +short_description: Manage the containers on the atomic host platform +description: + - Manage the containers on the atomic host platform + - Allows to manage the lifecycle of a container on the atomic host platform +author: "Giuseppe Scrivano (@giuseppe)" +notes: + - Host should support C(atomic) command +requirements: + - atomic + - "python >= 2.6" +options: + backend: + description: + - Define the backend to use for the container + required: True + choices: ["docker", "ostree"] + name: + description: + - Name of the container + required: True + image: + description: + - The image to use to install the container + required: True + rootfs: + description: + - Define the rootfs of the image + state: + description: + - State of the container + required: True + choices: ["latest", "present", "absent", "rollback"] + default: "latest" + mode: + description: + - Define if it is an user or a system container + required: True + choices: ["user", "system"] + values: + description: + - Values for the installation of the container. This option is permitted only with mode 'user' or 'system'. + The values specified here will be used at installation time as --set arguments for atomic install. +''' + +EXAMPLES = ''' + +# Install the etcd system container +- atomic_container: + name: etcd + image: rhel/etcd + backend: ostree + state: latest + mode: system + values: + - ETCD_NAME=etcd.server + +# Uninstall the etcd system container +- atomic_container: + name: etcd + image: rhel/etcd + backend: ostree + state: absent + mode: system +''' + +RETURN = ''' +msg: + description: The command standard output + returned: always + type: str + sample: [u'Using default tag: latest ...'] +''' + +# import module snippets +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def do_install(module, mode, rootfs, container, image, values_list, backend): + system_list = ["--system"] if mode == 'system' else [] + user_list = ["--user"] if mode == 'user' else [] + rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else [] + args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=out, changed=changed) + + +def do_update(module, container, image, values_list): + args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=out, changed=changed) + + +def do_uninstall(module, name, backend): + args = ['atomic', 'uninstall', "--storage=%s" % backend, name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + module.exit_json(msg=out, changed=True) + + +def do_rollback(module, name): + args = ['atomic', 'containers', 'rollback', name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Rolling back" in out + module.exit_json(msg=out, changed=changed) + + +def core(module): + mode = module.params['mode'] + name = module.params['name'] + image = module.params['image'] + rootfs = module.params['rootfs'] + values = module.params['values'] + backend = module.params['backend'] + state = module.params['state'] + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + out = {} + err = {} + rc = 0 + + values_list = ["--set=%s" % x for x in values] if values else [] + + args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + return + present = name in out + + if state == 'present' and present: + module.exit_json(msg=out, changed=False) + elif (state in ['latest', 'present']) and not present: + do_install(module, mode, rootfs, name, image, values_list, backend) + elif state == 'latest': + do_update(module, name, image, values_list) + elif state == 'absent': + if not present: + module.exit_json(msg="The container is not present", changed=False) + else: + do_uninstall(module, name, backend) + elif state == 'rollback': + do_rollback(module, name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + mode=dict(default=None, choices=['user', 'system']), + name=dict(default=None, required=True), + image=dict(default=None, required=True), + rootfs=dict(default=None), + state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), + backend=dict(default=None, required=True, choices=['docker', 'ostree']), + values=dict(type='list', default=[]), + ), + ) + + if module.params['values'] is not None and module.params['mode'] == 'default': + module.fail_json(msg="values is supported only with user or system mode") + + # Verify that the platform supports atomic command + rc, out, err = module.run_command('atomic -v', check_rc=False) + if rc != 0: + module.fail_json(msg="Error in running atomic command", err=err) + + try: + core(module) + except Exception as e: + module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/atomic/atomic_host.py b/plugins/modules/cloud/atomic/atomic_host.py new file mode 100644 index 0000000000..b01071a905 --- /dev/null +++ b/plugins/modules/cloud/atomic/atomic_host.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: atomic_host +short_description: Manage the atomic host platform +description: + - Manage the atomic host platform. + - Rebooting of Atomic host platform should be done outside this module. +author: +- Saravanan KR (@krsacme) +notes: + - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). +requirements: + - atomic + - python >= 2.6 +options: + revision: + description: + - The version number of the atomic host to be deployed. Providing C(latest) will upgrade to the latest available version. + default: latest + aliases: [ version ] +''' + +EXAMPLES = ''' +- name: Upgrade the atomic host platform to the latest version (atomic host upgrade) + atomic_host: + revision: latest + +- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) + atomic_host: + revision: 23.130 +''' + +RETURN = ''' +msg: + description: The command standard output + returned: always + type: str + sample: 'Already on latest' +''' +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def core(module): + revision = module.params['revision'] + args = [] + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + if revision == 'latest': + args = ['atomic', 'host', 'upgrade'] + else: + args = ['atomic', 'host', 'deploy', revision] + + out = {} + err = {} + rc = 0 + + rc, out, err = module.run_command(args, check_rc=False) + + if rc == 77 and revision == 'latest': + module.exit_json(msg="Already on latest", changed=False) + elif rc != 0: + module.fail_json(rc=rc, msg=err) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + revision=dict(type='str', default='latest', aliases=["version"]), + ), + ) + + # Verify that the platform is atomic host + if not os.path.exists("/run/ostree-booted"): + module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/atomic/atomic_image.py b/plugins/modules/cloud/atomic/atomic_image.py new file mode 100644 index 0000000000..50fe76b99d --- /dev/null +++ b/plugins/modules/cloud/atomic/atomic_image.py @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: atomic_image +short_description: Manage the container images on the atomic host platform +description: + - Manage the container images on the atomic host platform. + - Allows to execute the commands specified by the RUN label in the container image when present. +author: +- Saravanan KR (@krsacme) +notes: + - Host should support C(atomic) command. +requirements: + - atomic + - python >= 2.6 +options: + backend: + description: + - Define the backend where the image is pulled. + choices: [ docker, ostree ] + name: + description: + - Name of the container image. + required: True + state: + description: + - The state of the container image. + - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running. + choices: [ absent, latest, present ] + default: latest + started: + description: + - Start or Stop the container. + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) + atomic_image: + name: rhel7/rsyslog + state: latest + +- name: Pull busybox to the OSTree backend + atomic_image: + name: busybox + state: latest + backend: ostree +''' + +RETURN = ''' +msg: + description: The command standard output + returned: always + type: str + sample: [u'Using default tag: latest ...'] +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def do_upgrade(module, image): + args = ['atomic', 'update', '--force', image] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=err) + elif 'Image is up to date' in out: + return False + + return True + + +def core(module): + image = module.params['name'] + state = module.params['state'] + started = module.params['started'] + backend = module.params['backend'] + is_upgraded = False + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + out = {} + err = {} + rc = 0 + + if backend: + if state == 'present' or state == 'latest': + args = ['atomic', 'pull', "--storage=%s" % backend, image] + rc, out, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + else: + out_run = "" + if started: + args = ['atomic', 'run', "--storage=%s" % backend, image] + rc, out_run, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=(out + out_run), changed=changed) + elif state == 'absent': + args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image] + rc, out, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Unable to find" not in out + module.exit_json(msg=out, changed=changed) + return + + if state == 'present' or state == 'latest': + if state == 'latest': + is_upgraded = do_upgrade(module, image) + + if started: + args = ['atomic', 'run', image] + else: + args = ['atomic', 'install', image] + elif state == 'absent': + args = ['atomic', 'uninstall', image] + + rc, out, err = module.run_command(args, check_rc=False) + + if rc < 0: + module.fail_json(rc=rc, msg=err) + elif rc == 1 and 'already present' in err: + module.exit_json(restult=err, changed=is_upgraded) + elif started and 'Container is running' in out: + module.exit_json(result=out, changed=is_upgraded) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + backend=dict(type='str', choices=['docker', 'ostree']), + name=dict(type='str', required=True), + state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']), + started=dict(type='bool', default=True), + ), + ) + + # Verify that the platform supports atomic command + rc, out, err = module.run_command('atomic -v', check_rc=False) + if rc != 0: + module.fail_json(msg="Error in running atomic command", err=err) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_aa_policy.py b/plugins/modules/cloud/centurylink/clc_aa_policy.py new file mode 100644 index 0000000000..d7d4877199 --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_aa_policy.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_aa_policy +short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud. +description: + - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. +options: + name: + description: + - The name of the Anti Affinity Policy. + required: True + location: + description: + - Datacenter in which the policy lives/should live. + required: True + state: + description: + - Whether to create or delete the policy. + required: False + default: present + choices: ['present','absent'] + wait: + description: + - This option does nothing and will be removed in Ansible 2.14. + type: bool +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +--- +- name: Create AA Policy + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create an Anti Affinity Policy + clc_aa_policy: + name: Hammer Time + location: UK3 + state: present + register: policy + + - name: debug + debug: + var: policy + +--- +- name: Delete AA Policy + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Delete an Anti Affinity Policy + clc_aa_policy: + name: Hammer Time + location: UK3 + state: absent + register: policy + + - name: debug + debug: + var: policy +''' + +RETURN = ''' +policy: + description: The anti affinity policy information + returned: success + type: dict + sample: + { + "id":"1a28dd0988984d87b9cd61fa8da15424", + "name":"test_aa_policy", + "location":"UC1", + "links":[ + { + "rel":"self", + "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", + "verbs":[ + "GET", + "DELETE", + "PUT" + ] + }, + { + "rel":"location", + "href":"/v2/datacenters/wfad/UC1", + "id":"uc1", + "name":"UC1 - US West (Santa Clara)" + } + ] + } +''' + +__version__ = '${version}' + +import os +import traceback + +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk: +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcAntiAffinityPolicy: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + self.policy_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), + exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), + exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(required=True), + location=dict(required=True), + wait=dict(type='bool', removed_in_version='2.14'), + state=dict(default='present', choices=['present', 'absent']), + ) + return argument_spec + + # Module Behavior Goodness + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + + self._set_clc_credentials_from_env() + self.policy_dict = self._get_policies_for_datacenter(p) + + if p['state'] == "absent": + changed, policy = self._ensure_policy_is_absent(p) + else: + changed, policy = self._ensure_policy_is_present(p) + + if hasattr(policy, 'data'): + policy = policy.data + elif hasattr(policy, '__dict__'): + policy = policy.__dict__ + + self.module.exit_json(changed=changed, policy=policy) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _get_policies_for_datacenter(self, p): + """ + Get the Policies for a datacenter by calling the CLC API. + :param p: datacenter to get policies from + :return: policies in the datacenter + """ + response = {} + + policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) + + for policy in policies: + response[policy.name] = policy + return response + + def _create_policy(self, p): + """ + Create an Anti Affinity Policy using the CLC API. + :param p: datacenter to create policy in + :return: response dictionary from the CLC API. + """ + try: + return self.clc.v2.AntiAffinity.Create( + name=p['name'], + location=p['location']) + except CLCException as ex: + self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( + p['name'], ex.response_text + )) + + def _delete_policy(self, p): + """ + Delete an Anti Affinity Policy using the CLC API. + :param p: datacenter to delete a policy from + :return: none + """ + try: + policy = self.policy_dict[p['name']] + policy.Delete() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( + p['name'], ex.response_text + )) + + def _policy_exists(self, policy_name): + """ + Check to see if an Anti Affinity Policy exists + :param policy_name: name of the policy + :return: boolean of if the policy exists + """ + if policy_name in self.policy_dict: + return self.policy_dict.get(policy_name) + + return False + + def _ensure_policy_is_absent(self, p): + """ + Makes sure that a policy is absent + :param p: dictionary of policy name + :return: tuple of if a deletion occurred and the name of the policy that was deleted + """ + changed = False + if self._policy_exists(policy_name=p['name']): + changed = True + if not self.module.check_mode: + self._delete_policy(p) + return changed, None + + def _ensure_policy_is_present(self, p): + """ + Ensures that a policy is present + :param p: dictionary of a policy name + :return: tuple of if an addition occurred and the name of the policy that was added + """ + changed = False + policy = self._policy_exists(policy_name=p['name']) + if not policy: + changed = True + policy = None + if not self.module.check_mode: + policy = self._create_policy(p) + return changed, policy + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), + supports_check_mode=True) + clc_aa_policy = ClcAntiAffinityPolicy(module) + clc_aa_policy.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_alert_policy.py b/plugins/modules/cloud/centurylink/clc_alert_policy.py new file mode 100644 index 0000000000..6710b8819f --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_alert_policy.py @@ -0,0 +1,524 @@ +#!/usr/bin/python + +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_alert_policy +short_description: Create or Delete Alert Policies at CenturyLink Cloud. +description: + - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. +options: + alias: + description: + - The alias of your CLC Account + required: True + name: + description: + - The name of the alert policy. This is mutually exclusive with id + id: + description: + - The alert policy id. This is mutually exclusive with name + alert_recipients: + description: + - A list of recipient email ids to notify the alert. + This is required for state 'present' + metric: + description: + - The metric on which to measure the condition that will trigger the alert. + This is required for state 'present' + choices: ['cpu','memory','disk'] + duration: + description: + - The length of time in minutes that the condition must exceed the threshold. + This is required for state 'present' + threshold: + description: + - The threshold that will trigger the alert when the metric equals or exceeds it. + This is required for state 'present' + This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 + state: + description: + - Whether to create or delete the policy. + default: present + choices: ['present','absent'] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +--- +- name: Create Alert Policy Example + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create an Alert Policy for disk above 80% for 5 minutes + clc_alert_policy: + alias: wfad + name: 'alert for disk > 80%' + alert_recipients: + - test1@centurylink.com + - test2@centurylink.com + metric: 'disk' + duration: '00:05:00' + threshold: 80 + state: present + register: policy + + - name: debug + debug: var=policy + +--- +- name: Delete Alert Policy Example + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Delete an Alert Policy + clc_alert_policy: + alias: wfad + name: 'alert for disk > 80%' + state: absent + register: policy + + - name: debug + debug: var=policy +''' + +RETURN = ''' +policy: + description: The alert policy information + returned: success + type: dict + sample: + { + "actions": [ + { + "action": "email", + "settings": { + "recipients": [ + "user1@domain.com", + "user1@domain.com" + ] + } + } + ], + "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", + "links": [ + { + "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", + "rel": "self", + "verbs": [ + "GET", + "DELETE", + "PUT" + ] + } + ], + "name": "test_alert", + "triggers": [ + { + "duration": "00:05:00", + "metric": "disk", + "threshold": 80.0 + } + ] + } +''' + +__version__ = '${version}' + +import json +import os +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcAlertPolicy: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + self.policy_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(default=None), + id=dict(default=None), + alias=dict(required=True, default=None), + alert_recipients=dict(type='list', default=None), + metric=dict( + choices=[ + 'cpu', + 'memory', + 'disk'], + default=None), + duration=dict(type='str', default=None), + threshold=dict(type='int', default=None), + state=dict(default='present', choices=['present', 'absent']) + ) + mutually_exclusive = [ + ['name', 'id'] + ] + return {'argument_spec': argument_spec, + 'mutually_exclusive': mutually_exclusive} + + # Module Behavior Goodness + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + + self._set_clc_credentials_from_env() + self.policy_dict = self._get_alert_policies(p['alias']) + + if p['state'] == 'present': + changed, policy = self._ensure_alert_policy_is_present() + else: + changed, policy = self._ensure_alert_policy_is_absent() + + self.module.exit_json(changed=changed, policy=policy) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _ensure_alert_policy_is_present(self): + """ + Ensures that the alert policy is present + :return: (changed, policy) + changed: A flag representing if anything is modified + policy: the created/updated alert policy + """ + changed = False + p = self.module.params + policy_name = p.get('name') + + if not policy_name: + self.module.fail_json(msg='Policy name is a required') + policy = self._alert_policy_exists(policy_name) + if not policy: + changed = True + policy = None + if not self.module.check_mode: + policy = self._create_alert_policy() + else: + changed_u, policy = self._ensure_alert_policy_is_updated(policy) + if changed_u: + changed = True + return changed, policy + + def _ensure_alert_policy_is_absent(self): + """ + Ensures that the alert policy is absent + :return: (changed, None) + changed: A flag representing if anything is modified + """ + changed = False + p = self.module.params + alert_policy_id = p.get('id') + alert_policy_name = p.get('name') + alias = p.get('alias') + if not alert_policy_id and not alert_policy_name: + self.module.fail_json( + msg='Either alert policy id or policy name is required') + if not alert_policy_id and alert_policy_name: + alert_policy_id = self._get_alert_policy_id( + self.module, + alert_policy_name) + if alert_policy_id and alert_policy_id in self.policy_dict: + changed = True + if not self.module.check_mode: + self._delete_alert_policy(alias, alert_policy_id) + return changed, None + + def _ensure_alert_policy_is_updated(self, alert_policy): + """ + Ensures the alert policy is updated if anything is changed in the alert policy configuration + :param alert_policy: the target alert policy + :return: (changed, policy) + changed: A flag representing if anything is modified + policy: the updated the alert policy + """ + changed = False + p = self.module.params + alert_policy_id = alert_policy.get('id') + email_list = p.get('alert_recipients') + metric = p.get('metric') + duration = p.get('duration') + threshold = p.get('threshold') + policy = alert_policy + if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ + (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ + (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): + changed = True + elif email_list: + t_email_list = list( + alert_policy.get('actions')[0].get('settings').get('recipients')) + if set(email_list) != set(t_email_list): + changed = True + if changed and not self.module.check_mode: + policy = self._update_alert_policy(alert_policy_id) + return changed, policy + + def _get_alert_policies(self, alias): + """ + Get the alert policies for account alias by calling the CLC API. + :param alias: the account alias + :return: the alert policies for the account alias + """ + response = {} + + policies = self.clc.v2.API.Call('GET', + '/v2/alertPolicies/%s' + % alias) + + for policy in policies.get('items'): + response[policy.get('id')] = policy + return response + + def _create_alert_policy(self): + """ + Create an alert Policy using the CLC API. + :return: response dictionary from the CLC API. + """ + p = self.module.params + alias = p['alias'] + email_list = p['alert_recipients'] + metric = p['metric'] + duration = p['duration'] + threshold = p['threshold'] + policy_name = p['name'] + arguments = json.dumps( + { + 'name': policy_name, + 'actions': [{ + 'action': 'email', + 'settings': { + 'recipients': email_list + } + }], + 'triggers': [{ + 'metric': metric, + 'duration': duration, + 'threshold': threshold + }] + } + ) + try: + result = self.clc.v2.API.Call( + 'POST', + '/v2/alertPolicies/%s' % alias, + arguments) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to create alert policy "{0}". {1}'.format( + policy_name, str(e.response_text))) + return result + + def _update_alert_policy(self, alert_policy_id): + """ + Update alert policy using the CLC API. + :param alert_policy_id: The clc alert policy id + :return: response dictionary from the CLC API. + """ + p = self.module.params + alias = p['alias'] + email_list = p['alert_recipients'] + metric = p['metric'] + duration = p['duration'] + threshold = p['threshold'] + policy_name = p['name'] + arguments = json.dumps( + { + 'name': policy_name, + 'actions': [{ + 'action': 'email', + 'settings': { + 'recipients': email_list + } + }], + 'triggers': [{ + 'metric': metric, + 'duration': duration, + 'threshold': threshold + }] + } + ) + try: + result = self.clc.v2.API.Call( + 'PUT', '/v2/alertPolicies/%s/%s' % + (alias, alert_policy_id), arguments) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to update alert policy "{0}". {1}'.format( + policy_name, str(e.response_text))) + return result + + def _delete_alert_policy(self, alias, policy_id): + """ + Delete an alert policy using the CLC API. + :param alias : the account alias + :param policy_id: the alert policy id + :return: response dictionary from the CLC API. + """ + try: + result = self.clc.v2.API.Call( + 'DELETE', '/v2/alertPolicies/%s/%s' % + (alias, policy_id), None) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to delete alert policy id "{0}". {1}'.format( + policy_id, str(e.response_text))) + return result + + def _alert_policy_exists(self, policy_name): + """ + Check to see if an alert policy exists + :param policy_name: name of the alert policy + :return: boolean of if the policy exists + """ + result = False + for policy_id in self.policy_dict: + if self.policy_dict.get(policy_id).get('name') == policy_name: + result = self.policy_dict.get(policy_id) + return result + + def _get_alert_policy_id(self, module, alert_policy_name): + """ + retrieves the alert policy id of the account based on the name of the policy + :param module: the AnsibleModule object + :param alert_policy_name: the alert policy name + :return: alert_policy_id: The alert policy id + """ + alert_policy_id = None + for policy_id in self.policy_dict: + if self.policy_dict.get(policy_id).get('name') == alert_policy_name: + if not alert_policy_id: + alert_policy_id = policy_id + else: + return module.fail_json( + msg='multiple alert policies were found with policy name : %s' % alert_policy_name) + return alert_policy_id + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + argument_dict = ClcAlertPolicy._define_module_argument_spec() + module = AnsibleModule(supports_check_mode=True, **argument_dict) + clc_alert_policy = ClcAlertPolicy(module) + clc_alert_policy.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/plugins/modules/cloud/centurylink/clc_blueprint_package.py new file mode 100644 index 0000000000..5572b9fcd6 --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_blueprint_package.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_blueprint_package +short_description: deploys a blue print package on a set of servers in CenturyLink Cloud. +description: + - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. +options: + server_ids: + description: + - A list of server Ids to deploy the blue print package. + required: True + package_id: + description: + - The package id of the blue print. + required: True + package_params: + description: + - The dictionary of arguments required to deploy the blue print. + default: {} + required: False + state: + description: + - Whether to install or uninstall the package. Currently it supports only "present" for install action. + required: False + default: present + choices: ['present'] + wait: + description: + - Whether to wait for the tasks to finish before returning. + type: bool + default: True + required: False +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Deploy package + clc_blueprint_package: + server_ids: + - UC1TEST-SERVER1 + - UC1TEST-SERVER2 + package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a + package_params: {} +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SERVER1", + "UC1TEST-SERVER2" + ] +''' + +__version__ = '${version}' + +import os +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcBlueprintPackage: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion( + requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + changed = False + changed_server_ids = [] + self._set_clc_credentials_from_env() + server_ids = p['server_ids'] + package_id = p['package_id'] + package_params = p['package_params'] + state = p['state'] + if state == 'present': + changed, changed_server_ids, request_list = self.ensure_package_installed( + server_ids, package_id, package_params) + self._wait_for_requests_to_complete(request_list) + self.module.exit_json(changed=changed, server_ids=changed_server_ids) + + @staticmethod + def define_argument_spec(): + """ + This function defines the dictionary object required for + package module + :return: the package dictionary object + """ + argument_spec = dict( + server_ids=dict(type='list', required=True), + package_id=dict(required=True), + package_params=dict(type='dict', default={}), + wait=dict(default=True), + state=dict(default='present', choices=['present']) + ) + return argument_spec + + def ensure_package_installed(self, server_ids, package_id, package_params): + """ + Ensure the package is installed in the given list of servers + :param server_ids: the server list where the package needs to be installed + :param package_id: the blueprint package id + :param package_params: the package arguments + :return: (changed, server_ids, request_list) + changed: A flag indicating if a change was made + server_ids: The list of servers modified + request_list: The list of request objects from clc-sdk + """ + changed = False + request_list = [] + servers = self._get_servers_from_clc( + server_ids, + 'Failed to get servers from CLC') + for server in servers: + if not self.module.check_mode: + request = self.clc_install_package( + server, + package_id, + package_params) + request_list.append(request) + changed = True + return changed, server_ids, request_list + + def clc_install_package(self, server, package_id, package_params): + """ + Install the package to a given clc server + :param server: The server object where the package needs to be installed + :param package_id: The blue print package id + :param package_params: the required argument dict for the package installation + :return: The result object from the CLC API call + """ + result = None + try: + result = server.ExecutePackage( + package_id=package_id, + parameters=package_params) + except CLCException as ex: + self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( + package_id, server.id, ex.message + )) + return result + + def _wait_for_requests_to_complete(self, request_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param request_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in request_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process package install request') + + def _get_servers_from_clc(self, server_list, message): + """ + Internal function to fetch list of CLC server objects from a list of server ids + :param server_list: the list of server ids + :param message: the error message to raise if there is any error + :return the list of CLC server objects + """ + try: + return self.clc.v2.Servers(server_list).servers + except CLCException as ex: + self.module.fail_json(msg=message + ': %s' % ex) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + Main function + :return: None + """ + module = AnsibleModule( + argument_spec=ClcBlueprintPackage.define_argument_spec(), + supports_check_mode=True + ) + clc_blueprint_package = ClcBlueprintPackage(module) + clc_blueprint_package.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/plugins/modules/cloud/centurylink/clc_firewall_policy.py new file mode 100644 index 0000000000..789d2dd3f7 --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_firewall_policy.py @@ -0,0 +1,581 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_firewall_policy +short_description: Create/delete/update firewall policies +description: + - Create or delete or update firewall policies on Centurylink Cloud +options: + location: + description: + - Target datacenter for the firewall policy + required: True + state: + description: + - Whether to create or delete the firewall policy + default: present + choices: ['present', 'absent'] + source: + description: + - The list of source addresses for traffic on the originating firewall. + This is required when state is 'present' + destination: + description: + - The list of destination addresses for traffic on the terminating firewall. + This is required when state is 'present' + ports: + description: + - The list of ports associated with the policy. + TCP and UDP can take in single ports or port ranges. + choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'] + firewall_policy_id: + description: + - Id of the firewall policy. This is required to update or delete an existing firewall policy + source_account_alias: + description: + - CLC alias for the source account + required: True + destination_account_alias: + description: + - CLC alias for the destination account + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + type: bool + default: 'yes' + enabled: + description: + - Whether the firewall policy is enabled or disabled + choices: [True, False] + default: 'yes' +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +--- +- name: Create Firewall Policy + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create / Verify an Firewall Policy at CenturyLink Cloud + clc_firewall: + source_account_alias: WFAD + location: VA1 + state: present + source: 10.128.216.0/24 + destination: 10.128.216.0/24 + ports: Any + destination_account_alias: WFAD + +--- +- name: Delete Firewall Policy + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Delete an Firewall Policy at CenturyLink Cloud + clc_firewall: + source_account_alias: WFAD + location: VA1 + state: absent + firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 +''' + +RETURN = ''' +firewall_policy_id: + description: The fire wall policy id + returned: success + type: str + sample: fc36f1bfd47242e488a9c44346438c05 +firewall_policy: + description: The fire wall policy information + returned: success + type: dict + sample: + { + "destination":[ + "10.1.1.0/24", + "10.2.2.0/24" + ], + "destinationAccount":"wfad", + "enabled":true, + "id":"fc36f1bfd47242e488a9c44346438c05", + "links":[ + { + "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", + "rel":"self", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + } + ], + "ports":[ + "any" + ], + "source":[ + "10.1.1.0/24", + "10.2.2.0/24" + ], + "status":"active" + } +''' + +__version__ = '${version}' + +import os +import traceback +from ansible.module_utils.six.moves.urllib.parse import urlparse +from time import sleep +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcFirewallPolicy: + + clc = None + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.firewall_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion( + requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + location=dict(required=True), + source_account_alias=dict(required=True, default=None), + destination_account_alias=dict(default=None), + firewall_policy_id=dict(default=None), + ports=dict(default=None, type='list'), + source=dict(default=None, type='list'), + destination=dict(default=None, type='list'), + wait=dict(default=True), + state=dict(default='present', choices=['present', 'absent']), + enabled=dict(default=True, choices=[True, False]) + ) + return argument_spec + + def process_request(self): + """ + Execute the main code path, and handle the request + :return: none + """ + changed = False + firewall_policy = None + location = self.module.params.get('location') + source_account_alias = self.module.params.get('source_account_alias') + destination_account_alias = self.module.params.get( + 'destination_account_alias') + firewall_policy_id = self.module.params.get('firewall_policy_id') + ports = self.module.params.get('ports') + source = self.module.params.get('source') + destination = self.module.params.get('destination') + wait = self.module.params.get('wait') + state = self.module.params.get('state') + enabled = self.module.params.get('enabled') + + self.firewall_dict = { + 'location': location, + 'source_account_alias': source_account_alias, + 'destination_account_alias': destination_account_alias, + 'firewall_policy_id': firewall_policy_id, + 'ports': ports, + 'source': source, + 'destination': destination, + 'wait': wait, + 'state': state, + 'enabled': enabled} + + self._set_clc_credentials_from_env() + + if state == 'absent': + changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( + source_account_alias, location, self.firewall_dict) + + elif state == 'present': + changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( + source_account_alias, location, self.firewall_dict) + + return self.module.exit_json( + changed=changed, + firewall_policy_id=firewall_policy_id, + firewall_policy=firewall_policy) + + @staticmethod + def _get_policy_id_from_response(response): + """ + Method to parse out the policy id from creation response + :param response: response from firewall creation API call + :return: policy_id: firewall policy id from creation call + """ + url = response.get('links')[0]['href'] + path = urlparse(url).path + path_list = os.path.split(path) + policy_id = path_list[-1] + return policy_id + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _ensure_firewall_policy_is_present( + self, + source_account_alias, + location, + firewall_dict): + """ + Ensures that a given firewall policy is present + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_dict: dictionary of request parameters for firewall policy + :return: (changed, firewall_policy_id, firewall_policy) + changed: flag for if a change occurred + firewall_policy_id: the firewall policy id that was created/updated + firewall_policy: The firewall_policy object + """ + firewall_policy = None + firewall_policy_id = firewall_dict.get('firewall_policy_id') + + if firewall_policy_id is None: + if not self.module.check_mode: + response = self._create_firewall_policy( + source_account_alias, + location, + firewall_dict) + firewall_policy_id = self._get_policy_id_from_response( + response) + changed = True + else: + firewall_policy = self._get_firewall_policy( + source_account_alias, location, firewall_policy_id) + if not firewall_policy: + return self.module.fail_json( + msg='Unable to find the firewall policy id : {0}'.format( + firewall_policy_id)) + changed = self._compare_get_request_with_dict( + firewall_policy, + firewall_dict) + if not self.module.check_mode and changed: + self._update_firewall_policy( + source_account_alias, + location, + firewall_policy_id, + firewall_dict) + if changed and firewall_policy_id: + firewall_policy = self._wait_for_requests_to_complete( + source_account_alias, + location, + firewall_policy_id) + return changed, firewall_policy_id, firewall_policy + + def _ensure_firewall_policy_is_absent( + self, + source_account_alias, + location, + firewall_dict): + """ + Ensures that a given firewall policy is removed if present + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_dict: firewall policy to delete + :return: (changed, firewall_policy_id, response) + changed: flag for if a change occurred + firewall_policy_id: the firewall policy id that was deleted + response: response from CLC API call + """ + changed = False + response = [] + firewall_policy_id = firewall_dict.get('firewall_policy_id') + result = self._get_firewall_policy( + source_account_alias, location, firewall_policy_id) + if result: + if not self.module.check_mode: + response = self._delete_firewall_policy( + source_account_alias, + location, + firewall_policy_id) + changed = True + return changed, firewall_policy_id, response + + def _create_firewall_policy( + self, + source_account_alias, + location, + firewall_dict): + """ + Creates the firewall policy for the given account alias + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_dict: dictionary of request parameters for firewall policy + :return: response from CLC API call + """ + payload = { + 'destinationAccount': firewall_dict.get('destination_account_alias'), + 'source': firewall_dict.get('source'), + 'destination': firewall_dict.get('destination'), + 'ports': firewall_dict.get('ports')} + try: + response = self.clc.v2.API.Call( + 'POST', '/v2-experimental/firewallPolicies/%s/%s' % + (source_account_alias, location), payload) + except APIFailedResponse as e: + return self.module.fail_json( + msg="Unable to create firewall policy. %s" % + str(e.response_text)) + return response + + def _delete_firewall_policy( + self, + source_account_alias, + location, + firewall_policy_id): + """ + Deletes a given firewall policy for an account alias in a datacenter + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: firewall policy id to delete + :return: response: response from CLC API call + """ + try: + response = self.clc.v2.API.Call( + 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % + (source_account_alias, location, firewall_policy_id)) + except APIFailedResponse as e: + return self.module.fail_json( + msg="Unable to delete the firewall policy id : {0}. {1}".format( + firewall_policy_id, str(e.response_text))) + return response + + def _update_firewall_policy( + self, + source_account_alias, + location, + firewall_policy_id, + firewall_dict): + """ + Updates a firewall policy for a given datacenter and account alias + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: firewall policy id to update + :param firewall_dict: dictionary of request parameters for firewall policy + :return: response: response from CLC API call + """ + try: + response = self.clc.v2.API.Call( + 'PUT', + '/v2-experimental/firewallPolicies/%s/%s/%s' % + (source_account_alias, + location, + firewall_policy_id), + firewall_dict) + except APIFailedResponse as e: + return self.module.fail_json( + msg="Unable to update the firewall policy id : {0}. {1}".format( + firewall_policy_id, str(e.response_text))) + return response + + @staticmethod + def _compare_get_request_with_dict(response, firewall_dict): + """ + Helper method to compare the json response for getting the firewall policy with the request parameters + :param response: response from the get method + :param firewall_dict: dictionary of request parameters for firewall policy + :return: changed: Boolean that returns true if there are differences between + the response parameters and the playbook parameters + """ + + changed = False + + response_dest_account_alias = response.get('destinationAccount') + response_enabled = response.get('enabled') + response_source = response.get('source') + response_dest = response.get('destination') + response_ports = response.get('ports') + request_dest_account_alias = firewall_dict.get( + 'destination_account_alias') + request_enabled = firewall_dict.get('enabled') + if request_enabled is None: + request_enabled = True + request_source = firewall_dict.get('source') + request_dest = firewall_dict.get('destination') + request_ports = firewall_dict.get('ports') + + if ( + response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( + response_enabled != request_enabled) or ( + response_source and response_source != request_source) or ( + response_dest and response_dest != request_dest) or ( + response_ports and response_ports != request_ports): + changed = True + return changed + + def _get_firewall_policy( + self, + source_account_alias, + location, + firewall_policy_id): + """ + Get back details for a particular firewall policy + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: id of the firewall policy to get + :return: response - The response from CLC API call + """ + response = None + try: + response = self.clc.v2.API.Call( + 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % + (source_account_alias, location, firewall_policy_id)) + except APIFailedResponse as e: + if e.response_status_code != 404: + self.module.fail_json( + msg="Unable to fetch the firewall policy with id : {0}. {1}".format( + firewall_policy_id, str(e.response_text))) + return response + + def _wait_for_requests_to_complete( + self, + source_account_alias, + location, + firewall_policy_id, + wait_limit=50): + """ + Waits until the CLC requests are complete if the wait argument is True + :param source_account_alias: The source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: The firewall policy id + :param wait_limit: The number of times to check the status for completion + :return: the firewall_policy object + """ + wait = self.module.params.get('wait') + count = 0 + firewall_policy = None + while wait: + count += 1 + firewall_policy = self._get_firewall_policy( + source_account_alias, location, firewall_policy_id) + status = firewall_policy.get('status') + if status == 'active' or count > wait_limit: + wait = False + else: + # wait for 2 seconds + sleep(2) + return firewall_policy + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcFirewallPolicy._define_module_argument_spec(), + supports_check_mode=True) + + clc_firewall = ClcFirewallPolicy(module) + clc_firewall.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_group.py b/plugins/modules/cloud/centurylink/clc_group.py new file mode 100644 index 0000000000..cc7a5aef7d --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_group.py @@ -0,0 +1,514 @@ +#!/usr/bin/python + +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_group +short_description: Create/delete Server Groups at Centurylink Cloud +description: + - Create or delete Server Groups at Centurylink Centurylink Cloud +options: + name: + description: + - The name of the Server Group + required: True + description: + description: + - A description of the Server Group + required: False + parent: + description: + - The parent group of the server group. If parent is not provided, it creates the group at top level. + required: False + location: + description: + - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter + associated with the account + required: False + state: + description: + - Whether to create or delete the group + default: present + choices: ['present', 'absent'] + wait: + description: + - Whether to wait for the tasks to finish before returning. + type: bool + default: True + required: False +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' + +# Create a Server Group + +--- +- name: Create Server Group + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create / Verify a Server Group at CenturyLink Cloud + clc_group: + name: My Cool Server Group + parent: Default Group + state: present + register: clc + + - name: debug + debug: + var: clc + +# Delete a Server Group + +--- +- name: Delete Server Group + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Delete / Verify Absent a Server Group at CenturyLink Cloud + clc_group: + name: My Cool Server Group + parent: Default Group + state: absent + register: clc + + - name: debug + debug: + var: clc +''' + +RETURN = ''' +group: + description: The group information + returned: success + type: dict + sample: + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":"2015-07-29T18:52:47Z", + "modifiedBy":"service.wfad", + "modifiedDate":"2015-07-29T18:52:47Z" + }, + "customFields":[ + + ], + "description":"test group", + "groups":[ + + ], + "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", + "links":[ + { + "href":"/v2/groups/wfad", + "rel":"createGroup", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad", + "rel":"createServer", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"parentGroup" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", + "rel":"defaults", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", + "rel":"billing" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", + "rel":"archiveGroupAction" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", + "rel":"statistics" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", + "rel":"horizontalAutoscalePolicyMapping", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + } + ], + "locationId":"UC1", + "name":"test group", + "status":"active", + "type":"default" + } +''' + +__version__ = '${version}' + +import os +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcGroup(object): + + clc = None + root_group = None + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.group_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Execute the main code path, and handle the request + :return: none + """ + location = self.module.params.get('location') + group_name = self.module.params.get('name') + parent_name = self.module.params.get('parent') + group_description = self.module.params.get('description') + state = self.module.params.get('state') + + self._set_clc_credentials_from_env() + self.group_dict = self._get_group_tree_for_datacenter( + datacenter=location) + + if state == "absent": + changed, group, requests = self._ensure_group_is_absent( + group_name=group_name, parent_name=parent_name) + if requests: + self._wait_for_requests_to_complete(requests) + else: + changed, group = self._ensure_group_is_present( + group_name=group_name, parent_name=parent_name, group_description=group_description) + try: + group = group.data + except AttributeError: + group = group_name + self.module.exit_json(changed=changed, group=group) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(required=True), + description=dict(default=None), + parent=dict(default=None), + location=dict(default=None), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=True)) + + return argument_spec + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _ensure_group_is_absent(self, group_name, parent_name): + """ + Ensure that group_name is absent by deleting it if necessary + :param group_name: string - the name of the clc server group to delete + :param parent_name: string - the name of the parent group for group_name + :return: changed, group + """ + changed = False + group = [] + results = [] + + if self._group_exists(group_name=group_name, parent_name=parent_name): + if not self.module.check_mode: + group.append(group_name) + result = self._delete_group(group_name) + results.append(result) + changed = True + return changed, group, results + + def _delete_group(self, group_name): + """ + Delete the provided server group + :param group_name: string - the server group to delete + :return: none + """ + response = None + group, parent = self.group_dict.get(group_name) + try: + response = group.Delete() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( + group_name, ex.response_text + )) + return response + + def _ensure_group_is_present( + self, + group_name, + parent_name, + group_description): + """ + Checks to see if a server group exists, creates it if it doesn't. + :param group_name: the name of the group to validate/create + :param parent_name: the name of the parent group for group_name + :param group_description: a short description of the server group (used when creating) + :return: (changed, group) - + changed: Boolean- whether a change was made, + group: A clc group object for the group + """ + if not self.root_group: + raise AssertionError("Implementation Error: Root Group not set") + parent = parent_name if parent_name is not None else self.root_group.name + description = group_description + changed = False + group = group_name + + parent_exists = self._group_exists(group_name=parent, parent_name=None) + child_exists = self._group_exists( + group_name=group_name, + parent_name=parent) + + if parent_exists and child_exists: + group, parent = self.group_dict[group_name] + changed = False + elif parent_exists and not child_exists: + if not self.module.check_mode: + group = self._create_group( + group=group, + parent=parent, + description=description) + changed = True + else: + self.module.fail_json( + msg="parent group: " + + parent + + " does not exist") + + return changed, group + + def _create_group(self, group, parent, description): + """ + Create the provided server group + :param group: clc_sdk.Group - the group to create + :param parent: clc_sdk.Parent - the parent group for {group} + :param description: string - a text description of the group + :return: clc_sdk.Group - the created group + """ + response = None + (parent, grandparent) = self.group_dict[parent] + try: + response = parent.Create(name=group, description=description) + except CLCException as ex: + self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( + group, ex.response_text)) + return response + + def _group_exists(self, group_name, parent_name): + """ + Check to see if a group exists + :param group_name: string - the group to check + :param parent_name: string - the parent of group_name + :return: boolean - whether the group exists + """ + result = False + if group_name in self.group_dict: + (group, parent) = self.group_dict[group_name] + if parent_name is None or parent_name == parent.name: + result = True + return result + + def _get_group_tree_for_datacenter(self, datacenter=None): + """ + Walk the tree of groups for a datacenter + :param datacenter: string - the datacenter to walk (ex: 'UC1') + :return: a dictionary of groups and parents + """ + self.root_group = self.clc.v2.Datacenter( + location=datacenter).RootGroup() + return self._walk_groups_recursive( + parent_group=None, + child_group=self.root_group) + + def _walk_groups_recursive(self, parent_group, child_group): + """ + Walk a parent-child tree of groups, starting with the provided child group + :param parent_group: clc_sdk.Group - the parent group to start the walk + :param child_group: clc_sdk.Group - the child group to start the walk + :return: a dictionary of groups and parents + """ + result = {str(child_group): (child_group, parent_group)} + groups = child_group.Subgroups().groups + if len(groups) > 0: + for group in groups: + if group.type != 'default': + continue + + result.update(self._walk_groups_recursive(child_group, group)) + return result + + def _wait_for_requests_to_complete(self, requests_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param requests_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in requests_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process group request') + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcGroup._define_module_argument_spec(), + supports_check_mode=True) + + clc_group = ClcGroup(module) + clc_group.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/plugins/modules/cloud/centurylink/clc_loadbalancer.py new file mode 100644 index 0000000000..50232509c3 --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_loadbalancer.py @@ -0,0 +1,930 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_loadbalancer +short_description: Create, Delete shared loadbalancers in CenturyLink Cloud. +description: + - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. +options: + name: + description: + - The name of the loadbalancer + required: True + description: + description: + - A description for the loadbalancer + alias: + description: + - The alias of your CLC Account + required: True + location: + description: + - The location of the datacenter where the load balancer resides in + required: True + method: + description: + -The balancing method for the load balancer pool + choices: ['leastConnection', 'roundRobin'] + persistence: + description: + - The persistence method for the load balancer + choices: ['standard', 'sticky'] + port: + description: + - Port to configure on the public-facing side of the load balancer pool + choices: [80, 443] + nodes: + description: + - A list of nodes that needs to be added to the load balancer pool + default: [] + status: + description: + - The status of the loadbalancer + default: enabled + choices: ['enabled', 'disabled'] + state: + description: + - Whether to create or delete the load balancer pool + default: present + choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples +- name: Create Loadbalancer + hosts: localhost + connection: local + tasks: + - name: Actually Create things + clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.123 + privatePort: 80 + state: present + +- name: Add node to an existing loadbalancer pool + hosts: localhost + connection: local + tasks: + - name: Actually Create things + clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.234 + privatePort: 80 + state: nodes_present + +- name: Remove node from an existing loadbalancer pool + hosts: localhost + connection: local + tasks: + - name: Actually Create things + clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.234 + privatePort: 80 + state: nodes_absent + +- name: Delete LoadbalancerPool + hosts: localhost + connection: local + tasks: + - name: Actually Delete things + clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.123 + privatePort: 80 + state: port_absent + +- name: Delete Loadbalancer + hosts: localhost + connection: local + tasks: + - name: Actually Delete things + clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.123 + privatePort: 80 + state: absent +''' + +RETURN = ''' +loadbalancer: + description: The load balancer result object from CLC + returned: success + type: dict + sample: + { + "description":"test-lb", + "id":"ab5b18cb81e94ab9925b61d1ca043fb5", + "ipAddress":"66.150.174.197", + "links":[ + { + "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", + "rel":"self", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", + "rel":"pools", + "verbs":[ + "GET", + "POST" + ] + } + ], + "name":"test-lb", + "pools":[ + + ], + "status":"enabled" + } +''' + +__version__ = '${version}' + +import json +import os +import traceback +from time import sleep +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcLoadBalancer: + + clc = None + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.lb_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion( + requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Execute the main code path, and handle the request + :return: none + """ + changed = False + result_lb = None + loadbalancer_name = self.module.params.get('name') + loadbalancer_alias = self.module.params.get('alias') + loadbalancer_location = self.module.params.get('location') + loadbalancer_description = self.module.params.get('description') + loadbalancer_port = self.module.params.get('port') + loadbalancer_method = self.module.params.get('method') + loadbalancer_persistence = self.module.params.get('persistence') + loadbalancer_nodes = self.module.params.get('nodes') + loadbalancer_status = self.module.params.get('status') + state = self.module.params.get('state') + + if loadbalancer_description is None: + loadbalancer_description = loadbalancer_name + + self._set_clc_credentials_from_env() + + self.lb_dict = self._get_loadbalancer_list( + alias=loadbalancer_alias, + location=loadbalancer_location) + + if state == 'present': + changed, result_lb, lb_id = self.ensure_loadbalancer_present( + name=loadbalancer_name, + alias=loadbalancer_alias, + location=loadbalancer_location, + description=loadbalancer_description, + status=loadbalancer_status) + if loadbalancer_port: + changed, result_pool, pool_id = self.ensure_loadbalancerpool_present( + lb_id=lb_id, + alias=loadbalancer_alias, + location=loadbalancer_location, + method=loadbalancer_method, + persistence=loadbalancer_persistence, + port=loadbalancer_port) + + if loadbalancer_nodes: + changed, result_nodes = self.ensure_lbpool_nodes_set( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port, + nodes=loadbalancer_nodes) + elif state == 'absent': + changed, result_lb = self.ensure_loadbalancer_absent( + name=loadbalancer_name, + alias=loadbalancer_alias, + location=loadbalancer_location) + + elif state == 'port_absent': + changed, result_lb = self.ensure_loadbalancerpool_absent( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port) + + elif state == 'nodes_present': + changed, result_lb = self.ensure_lbpool_nodes_present( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port, + nodes=loadbalancer_nodes) + + elif state == 'nodes_absent': + changed, result_lb = self.ensure_lbpool_nodes_absent( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port, + nodes=loadbalancer_nodes) + + self.module.exit_json(changed=changed, loadbalancer=result_lb) + + def ensure_loadbalancer_present( + self, name, alias, location, description, status): + """ + Checks to see if a load balancer exists and creates one if it does not. + :param name: Name of loadbalancer + :param alias: Alias of account + :param location: Datacenter + :param description: Description of loadbalancer + :param status: Enabled / Disabled + :return: (changed, result, lb_id) + changed: Boolean whether a change was made + result: The result object from the CLC load balancer request + lb_id: The load balancer id + """ + changed = False + result = name + lb_id = self._loadbalancer_exists(name=name) + if not lb_id: + if not self.module.check_mode: + result = self.create_loadbalancer(name=name, + alias=alias, + location=location, + description=description, + status=status) + lb_id = result.get('id') + changed = True + + return changed, result, lb_id + + def ensure_loadbalancerpool_present( + self, lb_id, alias, location, method, persistence, port): + """ + Checks to see if a load balancer pool exists and creates one if it does not. + :param lb_id: The loadbalancer id + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param method: the load balancing method + :param persistence: the load balancing persistence type + :param port: the port that the load balancer will listen on + :return: (changed, group, pool_id) - + changed: Boolean whether a change was made + result: The result from the CLC API call + pool_id: The string id of the load balancer pool + """ + changed = False + result = port + if not lb_id: + return changed, None, None + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if not pool_id: + if not self.module.check_mode: + result = self.create_loadbalancerpool( + alias=alias, + location=location, + lb_id=lb_id, + method=method, + persistence=persistence, + port=port) + pool_id = result.get('id') + changed = True + + return changed, result, pool_id + + def ensure_loadbalancer_absent(self, name, alias, location): + """ + Checks to see if a load balancer exists and deletes it if it does + :param name: Name of the load balancer + :param alias: Alias of account + :param location: Datacenter + :return: (changed, result) + changed: Boolean whether a change was made + result: The result from the CLC API Call + """ + changed = False + result = name + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + if not self.module.check_mode: + result = self.delete_loadbalancer(alias=alias, + location=location, + name=name) + changed = True + return changed, result + + def ensure_loadbalancerpool_absent(self, alias, location, name, port): + """ + Checks to see if a load balancer pool exists and deletes it if it does + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer listens on + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + result = None + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + changed = True + if not self.module.check_mode: + result = self.delete_loadbalancerpool( + alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id) + else: + result = "Pool doesn't exist" + else: + result = "LB Doesn't Exist" + return changed, result + + def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): + """ + Checks to see if the provided list of nodes exist for the pool + and set the nodes if any in the list those doesn't exist + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer will listen on + :param nodes: The list of nodes to be updated to the pool + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + result = {} + changed = False + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes_to_check=nodes) + if not nodes_exist: + changed = True + result = self.set_loadbalancernodes(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes=nodes) + else: + result = "Pool doesn't exist" + else: + result = "Load balancer doesn't Exist" + return changed, result + + def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): + """ + Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer will listen on + :param nodes: the list of nodes to be added + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + changed, result = self.add_lbpool_nodes(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes_to_add=nodes) + else: + result = "Pool doesn't exist" + else: + result = "Load balancer doesn't Exist" + return changed, result + + def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): + """ + Checks to see if the provided list of nodes exist for the pool and removes them if found any + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer will listen on + :param nodes: the list of nodes to be removed + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + changed, result = self.remove_lbpool_nodes(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes_to_remove=nodes) + else: + result = "Pool doesn't exist" + else: + result = "Load balancer doesn't Exist" + return changed, result + + def create_loadbalancer(self, name, alias, location, description, status): + """ + Create a loadbalancer w/ params + :param name: Name of loadbalancer + :param alias: Alias of account + :param location: Datacenter + :param description: Description for loadbalancer to be created + :param status: Enabled / Disabled + :return: result: The result from the CLC API call + """ + result = None + try: + result = self.clc.v2.API.Call('POST', + '/v2/sharedLoadBalancers/%s/%s' % (alias, + location), + json.dumps({"name": name, + "description": description, + "status": status})) + sleep(1) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to create load balancer "{0}". {1}'.format( + name, str(e.response_text))) + return result + + def create_loadbalancerpool( + self, alias, location, lb_id, method, persistence, port): + """ + Creates a pool on the provided load balancer + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param method: the load balancing method + :param persistence: the load balancing persistence type + :param port: the port that the load balancer will listen on + :return: result: The result from the create API call + """ + result = None + try: + result = self.clc.v2.API.Call( + 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % + (alias, location, lb_id), json.dumps( + { + "port": port, "method": method, "persistence": persistence + })) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to create pool for load balancer id "{0}". {1}'.format( + lb_id, str(e.response_text))) + return result + + def delete_loadbalancer(self, alias, location, name): + """ + Delete CLC loadbalancer + :param alias: Alias for account + :param location: Datacenter + :param name: Name of the loadbalancer to delete + :return: result: The result from the CLC API call + """ + result = None + lb_id = self._get_loadbalancer_id(name=name) + try: + result = self.clc.v2.API.Call( + 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % + (alias, location, lb_id)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to delete load balancer "{0}". {1}'.format( + name, str(e.response_text))) + return result + + def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): + """ + Delete the pool on the provided load balancer + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the load balancer pool + :return: result: The result from the delete API call + """ + result = None + try: + result = self.clc.v2.API.Call( + 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % + (alias, location, lb_id, pool_id)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to delete pool for load balancer id "{0}". {1}'.format( + lb_id, str(e.response_text))) + return result + + def _get_loadbalancer_id(self, name): + """ + Retrieves unique ID of loadbalancer + :param name: Name of loadbalancer + :return: Unique ID of the loadbalancer + """ + id = None + for lb in self.lb_dict: + if lb.get('name') == name: + id = lb.get('id') + return id + + def _get_loadbalancer_list(self, alias, location): + """ + Retrieve a list of loadbalancers + :param alias: Alias for account + :param location: Datacenter + :return: JSON data for all loadbalancers at datacenter + """ + result = None + try: + result = self.clc.v2.API.Call( + 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to fetch load balancers for account: {0}. {1}'.format( + alias, str(e.response_text))) + return result + + def _loadbalancer_exists(self, name): + """ + Verify a loadbalancer exists + :param name: Name of loadbalancer + :return: False or the ID of the existing loadbalancer + """ + result = False + + for lb in self.lb_dict: + if lb.get('name') == name: + result = lb.get('id') + return result + + def _loadbalancerpool_exists(self, alias, location, port, lb_id): + """ + Checks to see if a pool exists on the specified port on the provided load balancer + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param port: the port to check and see if it exists + :param lb_id: the id string of the provided load balancer + :return: result: The id string of the pool or False + """ + result = False + try: + pool_list = self.clc.v2.API.Call( + 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % + (alias, location, lb_id)) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format( + lb_id, str(e.response_text))) + for pool in pool_list: + if int(pool.get('port')) == int(port): + result = pool.get('id') + return result + + def _loadbalancerpool_nodes_exists( + self, alias, location, lb_id, pool_id, nodes_to_check): + """ + Checks to see if a set of nodes exists on the specified port on the provided load balancer + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the provided load balancer + :param pool_id: the id string of the load balancer pool + :param nodes_to_check: the list of nodes to check for + :return: result: True / False indicating if the given nodes exist + """ + result = False + nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) + for node in nodes_to_check: + if not node.get('status'): + node['status'] = 'enabled' + if node in nodes: + result = True + else: + result = False + return result + + def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): + """ + Updates nodes to the provided pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :param nodes: a list of dictionaries containing the nodes to set + :return: result: The result from the CLC API call + """ + result = None + if not lb_id: + return result + if not self.module.check_mode: + try: + result = self.clc.v2.API.Call('PUT', + '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' + % (alias, location, lb_id, pool_id), json.dumps(nodes)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format( + pool_id, str(e.response_text))) + return result + + def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): + """ + Add nodes to the provided pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :param nodes_to_add: a list of dictionaries containing the nodes to add + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + result = {} + nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) + for node in nodes_to_add: + if not node.get('status'): + node['status'] = 'enabled' + if node not in nodes: + changed = True + nodes.append(node) + if changed is True and not self.module.check_mode: + result = self.set_loadbalancernodes( + alias, + location, + lb_id, + pool_id, + nodes) + return changed, result + + def remove_lbpool_nodes( + self, alias, location, lb_id, pool_id, nodes_to_remove): + """ + Removes nodes from the provided pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :param nodes_to_remove: a list of dictionaries containing the nodes to remove + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + result = {} + nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) + for node in nodes_to_remove: + if not node.get('status'): + node['status'] = 'enabled' + if node in nodes: + changed = True + nodes.remove(node) + if changed is True and not self.module.check_mode: + result = self.set_loadbalancernodes( + alias, + location, + lb_id, + pool_id, + nodes) + return changed, result + + def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): + """ + Return the list of nodes available to the provided load balancer pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :return: result: The list of nodes + """ + result = None + try: + result = self.clc.v2.API.Call('GET', + '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' + % (alias, location, lb_id, pool_id)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format( + pool_id, str(e.response_text))) + return result + + @staticmethod + def define_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(required=True), + description=dict(default=None), + location=dict(required=True), + alias=dict(required=True), + port=dict(choices=[80, 443]), + method=dict(choices=['leastConnection', 'roundRobin']), + persistence=dict(choices=['standard', 'sticky']), + nodes=dict(type='list', default=[]), + status=dict(default='enabled', choices=['enabled', 'disabled']), + state=dict( + default='present', + choices=[ + 'present', + 'absent', + 'port_absent', + 'nodes_present', + 'nodes_absent']) + ) + return argument_spec + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), + supports_check_mode=True) + clc_loadbalancer = ClcLoadBalancer(module) + clc_loadbalancer.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_modify_server.py b/plugins/modules/cloud/centurylink/clc_modify_server.py new file mode 100644 index 0000000000..4cd47a110d --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_modify_server.py @@ -0,0 +1,962 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_modify_server +short_description: modify servers in CenturyLink Cloud. +description: + - An Ansible module to modify servers in CenturyLink Cloud. +options: + server_ids: + description: + - A list of server Ids to modify. + required: True + cpu: + description: + - How many CPUs to update on the server + memory: + description: + - Memory (in GB) to set to the server. + anti_affinity_policy_id: + description: + - The anti affinity policy id to be set for a hyper scale server. + This is mutually exclusive with 'anti_affinity_policy_name' + anti_affinity_policy_name: + description: + - The anti affinity policy name to be set for a hyper scale server. + This is mutually exclusive with 'anti_affinity_policy_id' + alert_policy_id: + description: + - The alert policy id to be associated to the server. + This is mutually exclusive with 'alert_policy_name' + alert_policy_name: + description: + - The alert policy name to be associated to the server. + This is mutually exclusive with 'alert_policy_id' + state: + description: + - The state to insure that the provided resources are in. + default: 'present' + choices: ['present', 'absent'] + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + type: bool + default: 'yes' +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: set the cpu count to 4 on a server + clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + cpu: 4 + state: present + +- name: set the memory to 8GB on a server + clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + memory: 8 + state: present + +- name: set the anti affinity policy on a server + clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + anti_affinity_policy_name: 'aa_policy' + state: present + +- name: remove the anti affinity policy on a server + clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + anti_affinity_policy_name: 'aa_policy' + state: absent + +- name: add the alert policy on a server + clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + alert_policy_name: 'alert_policy' + state: present + +- name: remove the alert policy on a server + clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + alert_policy_name: 'alert_policy' + state: absent + +- name: set the memory to 16GB and cpu to 8 core on a lust if servers + clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + cpu: 8 + memory: 16 + state: present +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +servers: + description: The list of server objects that are changed + returned: success + type: list + sample: + [ + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":1438196820, + "modifiedBy":"service.wfad", + "modifiedDate":1438196820 + }, + "description":"test-server", + "details":{ + "alertPolicies":[ + + ], + "cpu":1, + "customFields":[ + + ], + "diskCount":3, + "disks":[ + { + "id":"0:0", + "partitionPaths":[ + + ], + "sizeGB":1 + }, + { + "id":"0:1", + "partitionPaths":[ + + ], + "sizeGB":2 + }, + { + "id":"0:2", + "partitionPaths":[ + + ], + "sizeGB":14 + } + ], + "hostName":"", + "inMaintenanceMode":false, + "ipAddresses":[ + { + "internal":"10.1.1.1" + } + ], + "memoryGB":1, + "memoryMB":1024, + "partitions":[ + + ], + "powerState":"started", + "snapshots":[ + + ], + "storageGB":17 + }, + "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", + "id":"test-server", + "ipaddress":"10.120.45.23", + "isTemplate":false, + "links":[ + { + "href":"/v2/servers/wfad/test-server", + "id":"test-server", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"group" + }, + { + "href":"/v2/accounts/wfad", + "id":"wfad", + "rel":"account" + }, + { + "href":"/v2/billing/wfad/serverPricing/test-server", + "rel":"billing" + }, + { + "href":"/v2/servers/wfad/test-server/publicIPAddresses", + "rel":"publicIPAddresses", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/credentials", + "rel":"credentials" + }, + { + "href":"/v2/servers/wfad/test-server/statistics", + "rel":"statistics" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/capabilities", + "rel":"capabilities" + }, + { + "href":"/v2/servers/wfad/test-server/alertPolicies", + "rel":"alertPolicyMappings", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", + "rel":"antiAffinityPolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", + "rel":"cpuAutoscalePolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + } + ], + "locationId":"UC1", + "name":"test-server", + "os":"ubuntu14_64Bit", + "osType":"Ubuntu 14 64-bit", + "status":"active", + "storageType":"standard", + "type":"standard" + } + ] +''' + +__version__ = '${version}' + +import json +import os +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcModifyServer: + clc = clc_sdk + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion( + requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + self._set_clc_credentials_from_env() + + p = self.module.params + cpu = p.get('cpu') + memory = p.get('memory') + state = p.get('state') + if state == 'absent' and (cpu or memory): + return self.module.fail_json( + msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments') + + server_ids = p['server_ids'] + if not isinstance(server_ids, list): + return self.module.fail_json( + msg='server_ids needs to be a list of instances to modify: %s' % + server_ids) + + (changed, server_dict_array, changed_server_ids) = self._modify_servers( + server_ids=server_ids) + + self.module.exit_json( + changed=changed, + server_ids=changed_server_ids, + servers=server_dict_array) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + server_ids=dict(type='list', required=True), + state=dict(default='present', choices=['present', 'absent']), + cpu=dict(), + memory=dict(), + anti_affinity_policy_id=dict(), + anti_affinity_policy_name=dict(), + alert_policy_id=dict(), + alert_policy_name=dict(), + wait=dict(type='bool', default=True) + ) + mutually_exclusive = [ + ['anti_affinity_policy_id', 'anti_affinity_policy_name'], + ['alert_policy_id', 'alert_policy_name'] + ] + return {"argument_spec": argument_spec, + "mutually_exclusive": mutually_exclusive} + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _get_servers_from_clc(self, server_list, message): + """ + Internal function to fetch list of CLC server objects from a list of server ids + :param server_list: The list of server ids + :param message: the error message to throw in case of any error + :return the list of CLC server objects + """ + try: + return self.clc.v2.Servers(server_list).servers + except CLCException as ex: + return self.module.fail_json(msg=message + ': %s' % ex.message) + + def _modify_servers(self, server_ids): + """ + modify the servers configuration on the provided list + :param server_ids: list of servers to modify + :return: a list of dictionaries with server information about the servers that were modified + """ + p = self.module.params + state = p.get('state') + server_params = { + 'cpu': p.get('cpu'), + 'memory': p.get('memory'), + 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), + 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), + 'alert_policy_id': p.get('alert_policy_id'), + 'alert_policy_name': p.get('alert_policy_name'), + } + changed = False + server_changed = False + aa_changed = False + ap_changed = False + server_dict_array = [] + result_server_ids = [] + request_list = [] + changed_servers = [] + + if not isinstance(server_ids, list) or len(server_ids) < 1: + return self.module.fail_json( + msg='server_ids should be a list of servers, aborting') + + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + for server in servers: + if state == 'present': + server_changed, server_result = self._ensure_server_config( + server, server_params) + if server_result: + request_list.append(server_result) + aa_changed = self._ensure_aa_policy_present( + server, + server_params) + ap_changed = self._ensure_alert_policy_present( + server, + server_params) + elif state == 'absent': + aa_changed = self._ensure_aa_policy_absent( + server, + server_params) + ap_changed = self._ensure_alert_policy_absent( + server, + server_params) + if server_changed or aa_changed or ap_changed: + changed_servers.append(server) + changed = True + + self._wait_for_requests(self.module, request_list) + self._refresh_servers(self.module, changed_servers) + + for server in changed_servers: + server_dict_array.append(server.data) + result_server_ids.append(server.id) + + return changed, server_dict_array, result_server_ids + + def _ensure_server_config( + self, server, server_params): + """ + ensures the server is updated with the provided cpu and memory + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + cpu = server_params.get('cpu') + memory = server_params.get('memory') + changed = False + result = None + + if not cpu: + cpu = server.cpu + if not memory: + memory = server.memory + if memory != server.memory or cpu != server.cpu: + if not self.module.check_mode: + result = self._modify_clc_server( + self.clc, + self.module, + server.id, + cpu, + memory) + changed = True + return changed, result + + @staticmethod + def _modify_clc_server(clc, module, server_id, cpu, memory): + """ + Modify the memory or CPU of a clc server. + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param server_id: id of the server to modify + :param cpu: the new cpu value + :param memory: the new memory value + :return: the result of CLC API call + """ + result = None + acct_alias = clc.v2.Account.GetAlias() + try: + # Update the server configuration + job_obj = clc.v2.API.Call('PATCH', + 'servers/%s/%s' % (acct_alias, + server_id), + json.dumps([{"op": "set", + "member": "memory", + "value": memory}, + {"op": "set", + "member": "cpu", + "value": cpu}])) + result = clc.v2.Requests(job_obj) + except APIFailedResponse as ex: + module.fail_json( + msg='Unable to update the server configuration for server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _wait_for_requests(module, request_list): + """ + Block until server provisioning requests are completed. + :param module: the AnsibleModule object + :param request_list: a list of clc-sdk.Request instances + :return: none + """ + wait = module.params.get('wait') + if wait: + # Requests.WaitUntilComplete() returns the count of failed requests + failed_requests_count = sum( + [request.WaitUntilComplete() for request in request_list]) + + if failed_requests_count > 0: + module.fail_json( + msg='Unable to process modify server request') + + @staticmethod + def _refresh_servers(module, servers): + """ + Loop through a list of servers and refresh them. + :param module: the AnsibleModule object + :param servers: list of clc-sdk.Server instances to refresh + :return: none + """ + for server in servers: + try: + server.Refresh() + except CLCException as ex: + module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( + server.id, ex.message + )) + + def _ensure_aa_policy_present( + self, server, server_params): + """ + ensures the server is updated with the provided anti affinity policy + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + acct_alias = self.clc.v2.Account.GetAlias() + + aa_policy_id = server_params.get('anti_affinity_policy_id') + aa_policy_name = server_params.get('anti_affinity_policy_name') + if not aa_policy_id and aa_policy_name: + aa_policy_id = self._get_aa_policy_id_by_name( + self.clc, + self.module, + acct_alias, + aa_policy_name) + current_aa_policy_id = self._get_aa_policy_id_of_server( + self.clc, + self.module, + acct_alias, + server.id) + + if aa_policy_id and aa_policy_id != current_aa_policy_id: + self._modify_aa_policy( + self.clc, + self.module, + acct_alias, + server.id, + aa_policy_id) + changed = True + return changed + + def _ensure_aa_policy_absent( + self, server, server_params): + """ + ensures the provided anti affinity policy is removed from the server + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + acct_alias = self.clc.v2.Account.GetAlias() + aa_policy_id = server_params.get('anti_affinity_policy_id') + aa_policy_name = server_params.get('anti_affinity_policy_name') + if not aa_policy_id and aa_policy_name: + aa_policy_id = self._get_aa_policy_id_by_name( + self.clc, + self.module, + acct_alias, + aa_policy_name) + current_aa_policy_id = self._get_aa_policy_id_of_server( + self.clc, + self.module, + acct_alias, + server.id) + + if aa_policy_id and aa_policy_id == current_aa_policy_id: + self._delete_aa_policy( + self.clc, + self.module, + acct_alias, + server.id) + changed = True + return changed + + @staticmethod + def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): + """ + modifies the anti affinity policy of the CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :param aa_policy_id: the anti affinity policy id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('PUT', + 'servers/%s/%s/antiAffinityPolicy' % ( + acct_alias, + server_id), + json.dumps({"id": aa_policy_id})) + except APIFailedResponse as ex: + module.fail_json( + msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _delete_aa_policy(clc, module, acct_alias, server_id): + """ + Delete the anti affinity policy of the CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('DELETE', + 'servers/%s/%s/antiAffinityPolicy' % ( + acct_alias, + server_id), + json.dumps({})) + except APIFailedResponse as ex: + module.fail_json( + msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): + """ + retrieves the anti affinity policy id of the server based on the name of the policy + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param aa_policy_name: the anti affinity policy name + :return: aa_policy_id: The anti affinity policy id + """ + aa_policy_id = None + try: + aa_policies = clc.v2.API.Call(method='GET', + url='antiAffinityPolicies/%s' % alias) + except APIFailedResponse as ex: + return module.fail_json( + msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format( + alias, str(ex.response_text))) + for aa_policy in aa_policies.get('items'): + if aa_policy.get('name') == aa_policy_name: + if not aa_policy_id: + aa_policy_id = aa_policy.get('id') + else: + return module.fail_json( + msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) + if not aa_policy_id: + module.fail_json( + msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) + return aa_policy_id + + @staticmethod + def _get_aa_policy_id_of_server(clc, module, alias, server_id): + """ + retrieves the anti affinity policy id of the server based on the CLC server id + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param server_id: the CLC server id + :return: aa_policy_id: The anti affinity policy id + """ + aa_policy_id = None + try: + result = clc.v2.API.Call( + method='GET', url='servers/%s/%s/antiAffinityPolicy' % + (alias, server_id)) + aa_policy_id = result.get('id') + except APIFailedResponse as ex: + if ex.response_status_code != 404: + module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format( + server_id, str(ex.response_text))) + return aa_policy_id + + def _ensure_alert_policy_present( + self, server, server_params): + """ + ensures the server is updated with the provided alert policy + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + acct_alias = self.clc.v2.Account.GetAlias() + alert_policy_id = server_params.get('alert_policy_id') + alert_policy_name = server_params.get('alert_policy_name') + if not alert_policy_id and alert_policy_name: + alert_policy_id = self._get_alert_policy_id_by_name( + self.clc, + self.module, + acct_alias, + alert_policy_name) + if alert_policy_id and not self._alert_policy_exists( + server, alert_policy_id): + self._add_alert_policy_to_server( + self.clc, + self.module, + acct_alias, + server.id, + alert_policy_id) + changed = True + return changed + + def _ensure_alert_policy_absent( + self, server, server_params): + """ + ensures the alert policy is removed from the server + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + + acct_alias = self.clc.v2.Account.GetAlias() + alert_policy_id = server_params.get('alert_policy_id') + alert_policy_name = server_params.get('alert_policy_name') + if not alert_policy_id and alert_policy_name: + alert_policy_id = self._get_alert_policy_id_by_name( + self.clc, + self.module, + acct_alias, + alert_policy_name) + + if alert_policy_id and self._alert_policy_exists( + server, alert_policy_id): + self._remove_alert_policy_to_server( + self.clc, + self.module, + acct_alias, + server.id, + alert_policy_id) + changed = True + return changed + + @staticmethod + def _add_alert_policy_to_server( + clc, module, acct_alias, server_id, alert_policy_id): + """ + add the alert policy to CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :param alert_policy_id: the alert policy id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('POST', + 'servers/%s/%s/alertPolicies' % ( + acct_alias, + server_id), + json.dumps({"id": alert_policy_id})) + except APIFailedResponse as ex: + module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _remove_alert_policy_to_server( + clc, module, acct_alias, server_id, alert_policy_id): + """ + remove the alert policy to the CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :param alert_policy_id: the alert policy id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('DELETE', + 'servers/%s/%s/alertPolicies/%s' + % (acct_alias, server_id, alert_policy_id)) + except APIFailedResponse as ex: + module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): + """ + retrieves the alert policy id of the server based on the name of the policy + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param alert_policy_name: the alert policy name + :return: alert_policy_id: The alert policy id + """ + alert_policy_id = None + try: + alert_policies = clc.v2.API.Call(method='GET', + url='alertPolicies/%s' % alias) + except APIFailedResponse as ex: + return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format( + alias, str(ex.response_text))) + for alert_policy in alert_policies.get('items'): + if alert_policy.get('name') == alert_policy_name: + if not alert_policy_id: + alert_policy_id = alert_policy.get('id') + else: + return module.fail_json( + msg='multiple alert policies were found with policy name : %s' % alert_policy_name) + return alert_policy_id + + @staticmethod + def _alert_policy_exists(server, alert_policy_id): + """ + Checks if the alert policy exists for the server + :param server: the clc server object + :param alert_policy_id: the alert policy + :return: True: if the given alert policy id associated to the server, False otherwise + """ + result = False + alert_policies = server.alertPolicies + if alert_policies: + for alert_policy in alert_policies: + if alert_policy.get('id') == alert_policy_id: + result = True + return result + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + + argument_dict = ClcModifyServer._define_module_argument_spec() + module = AnsibleModule(supports_check_mode=True, **argument_dict) + clc_modify_server = ClcModifyServer(module) + clc_modify_server.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_publicip.py b/plugins/modules/cloud/centurylink/clc_publicip.py new file mode 100644 index 0000000000..98d1dd0ab1 --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_publicip.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_publicip +short_description: Add and Delete public ips on servers in CenturyLink Cloud. +description: + - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. +options: + protocol: + description: + - The protocol that the public IP will listen for. + default: TCP + choices: ['TCP', 'UDP', 'ICMP'] + ports: + description: + - A list of ports to expose. This is required when state is 'present' + server_ids: + description: + - A list of servers to create public ips on. + required: True + state: + description: + - Determine whether to create or delete public IPs. If present module will not create a second public ip if one + already exists. + default: present + choices: ['present', 'absent'] + wait: + description: + - Whether to wait for the tasks to finish before returning. + type: bool + default: 'yes' +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Add Public IP to Server + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create Public IP For Servers + clc_publicip: + protocol: TCP + ports: + - 80 + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + state: present + register: clc + + - name: debug + debug: + var: clc + +- name: Delete Public IP from Server + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create Public IP For Servers + clc_publicip: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + state: absent + register: clc + + - name: debug + debug: + var: clc +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +''' + +__version__ = '${version}' + +import os +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcPublicIp(object): + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + self._set_clc_credentials_from_env() + params = self.module.params + server_ids = params['server_ids'] + ports = params['ports'] + protocol = params['protocol'] + state = params['state'] + + if state == 'present': + changed, changed_server_ids, requests = self.ensure_public_ip_present( + server_ids=server_ids, protocol=protocol, ports=ports) + elif state == 'absent': + changed, changed_server_ids, requests = self.ensure_public_ip_absent( + server_ids=server_ids) + else: + return self.module.fail_json(msg="Unknown State: " + state) + self._wait_for_requests_to_complete(requests) + return self.module.exit_json(changed=changed, + server_ids=changed_server_ids) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + server_ids=dict(type='list', required=True), + protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), + ports=dict(type='list'), + wait=dict(type='bool', default=True), + state=dict(default='present', choices=['present', 'absent']), + ) + return argument_spec + + def ensure_public_ip_present(self, server_ids, protocol, ports): + """ + Ensures the given server ids having the public ip available + :param server_ids: the list of server ids + :param protocol: the ip protocol + :param ports: the list of ports to expose + :return: (changed, changed_server_ids, results) + changed: A flag indicating if there is any change + changed_server_ids : the list of server ids that are changed + results: The result list from clc public ip call + """ + changed = False + results = [] + changed_server_ids = [] + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.PublicIPs().public_ips) == 0] + ports_to_expose = [{'protocol': protocol, 'port': port} + for port in ports] + for server in servers_to_change: + if not self.module.check_mode: + result = self._add_publicip_to_server(server, ports_to_expose) + results.append(result) + changed_server_ids.append(server.id) + changed = True + return changed, changed_server_ids, results + + def _add_publicip_to_server(self, server, ports_to_expose): + result = None + try: + result = server.PublicIPs().Add(ports_to_expose) + except CLCException as ex: + self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_public_ip_absent(self, server_ids): + """ + Ensures the given server ids having the public ip removed if there is any + :param server_ids: the list of server ids + :return: (changed, changed_server_ids, results) + changed: A flag indicating if there is any change + changed_server_ids : the list of server ids that are changed + results: The result list from clc public ip call + """ + changed = False + results = [] + changed_server_ids = [] + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.PublicIPs().public_ips) > 0] + for server in servers_to_change: + if not self.module.check_mode: + result = self._remove_publicip_from_server(server) + results.append(result) + changed_server_ids.append(server.id) + changed = True + return changed, changed_server_ids, results + + def _remove_publicip_from_server(self, server): + result = None + try: + for ip_address in server.PublicIPs().public_ips: + result = ip_address.Delete() + except CLCException as ex: + self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def _wait_for_requests_to_complete(self, requests_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param requests_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in requests_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process public ip request') + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _get_servers_from_clc(self, server_ids, message): + """ + Gets list of servers form CLC api + """ + try: + return self.clc.v2.Servers(server_ids).servers + except CLCException as exception: + self.module.fail_json(msg=message + ': %s' % exception) + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcPublicIp._define_module_argument_spec(), + supports_check_mode=True + ) + clc_public_ip = ClcPublicIp(module) + clc_public_ip.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_server.py b/plugins/modules/cloud/centurylink/clc_server.py new file mode 100644 index 0000000000..84b0f110a7 --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_server.py @@ -0,0 +1,1528 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_server +short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud. +description: + - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. +options: + additional_disks: + description: + - The list of additional disks for the server + default: [] + add_public_ip: + description: + - Whether to add a public ip to the server + type: bool + default: 'no' + alias: + description: + - The account alias to provision the servers under. + anti_affinity_policy_id: + description: + - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. + anti_affinity_policy_name: + description: + - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. + alert_policy_id: + description: + - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. + alert_policy_name: + description: + - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. + count: + description: + - The number of servers to build (mutually exclusive with exact_count) + default: 1 + count_group: + description: + - Required when exact_count is specified. The Server Group use to determine how many servers to deploy. + cpu: + description: + - How many CPUs to provision on the server + default: 1 + cpu_autoscale_policy_id: + description: + - The autoscale policy to assign to the server. + custom_fields: + description: + - The list of custom fields to set on the server. + default: [] + description: + description: + - The description to set for the server. + exact_count: + description: + - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, + creating and deleting them to reach that count. Requires count_group to be set. + group: + description: + - The Server Group to create servers under. + default: 'Default Group' + ip_address: + description: + - The IP Address for the server. One is assigned if not provided. + location: + description: + - The Datacenter to create servers in. + managed_os: + description: + - Whether to create the server as 'Managed' or not. + type: bool + default: 'no' + required: False + memory: + description: + - Memory in GB. + default: 1 + name: + description: + - A 1 to 6 character identifier to use for the server. This is required when state is 'present' + network_id: + description: + - The network UUID on which to create servers. + packages: + description: + - The list of blue print packages to run on the server after its created. + default: [] + password: + description: + - Password for the administrator / root user + primary_dns: + description: + - Primary DNS used by the server. + public_ip_protocol: + description: + - The protocol to use for the public ip if add_public_ip is set to True. + default: 'TCP' + choices: ['TCP', 'UDP', 'ICMP'] + public_ip_ports: + description: + - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. + default: [] + secondary_dns: + description: + - Secondary DNS used by the server. + server_ids: + description: + - Required for started, stopped, and absent states. + A list of server Ids to insure are started, stopped, or absent. + default: [] + source_server_password: + description: + - The password for the source server if a clone is specified. + state: + description: + - The state to insure that the provided resources are in. + default: 'present' + choices: ['present', 'absent', 'started', 'stopped'] + storage_type: + description: + - The type of storage to attach to the server. + default: 'standard' + choices: ['standard', 'hyperscale'] + template: + description: + - The template to use for server creation. Will search for a template if a partial string is provided. + This is required when state is 'present' + ttl: + description: + - The time to live for the server in seconds. The server will be deleted when this time expires. + type: + description: + - The type of server to create. + default: 'standard' + choices: ['standard', 'hyperscale', 'bareMetal'] + configuration_id: + description: + - Only required for bare metal servers. + Specifies the identifier for the specific configuration type of bare metal server to deploy. + os_type: + description: + - Only required for bare metal servers. + Specifies the OS to provision with the bare metal server. + choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + type: bool + default: 'yes' +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Provision a single Ubuntu Server + clc_server: + name: test + template: ubuntu-14-64 + count: 1 + group: Default Group + state: present + +- name: Ensure 'Default Group' has exactly 5 servers + clc_server: + name: test + template: ubuntu-14-64 + exact_count: 5 + count_group: Default Group + group: Default Group + +- name: Stop a Server + clc_server: + server_ids: + - UC1ACCT-TEST01 + state: stopped + +- name: Start a Server + clc_server: + server_ids: + - UC1ACCT-TEST01 + state: started + +- name: Delete a Server + clc_server: + server_ids: + - UC1ACCT-TEST01 + state: absent +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are created + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +partially_created_server_ids: + description: The list of server ids that are partially created + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +servers: + description: The list of server objects returned from CLC + returned: success + type: list + sample: + [ + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":1438196820, + "modifiedBy":"service.wfad", + "modifiedDate":1438196820 + }, + "description":"test-server", + "details":{ + "alertPolicies":[ + + ], + "cpu":1, + "customFields":[ + + ], + "diskCount":3, + "disks":[ + { + "id":"0:0", + "partitionPaths":[ + + ], + "sizeGB":1 + }, + { + "id":"0:1", + "partitionPaths":[ + + ], + "sizeGB":2 + }, + { + "id":"0:2", + "partitionPaths":[ + + ], + "sizeGB":14 + } + ], + "hostName":"", + "inMaintenanceMode":false, + "ipAddresses":[ + { + "internal":"10.1.1.1" + } + ], + "memoryGB":1, + "memoryMB":1024, + "partitions":[ + + ], + "powerState":"started", + "snapshots":[ + + ], + "storageGB":17 + }, + "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", + "id":"test-server", + "ipaddress":"10.120.45.23", + "isTemplate":false, + "links":[ + { + "href":"/v2/servers/wfad/test-server", + "id":"test-server", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"group" + }, + { + "href":"/v2/accounts/wfad", + "id":"wfad", + "rel":"account" + }, + { + "href":"/v2/billing/wfad/serverPricing/test-server", + "rel":"billing" + }, + { + "href":"/v2/servers/wfad/test-server/publicIPAddresses", + "rel":"publicIPAddresses", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/credentials", + "rel":"credentials" + }, + { + "href":"/v2/servers/wfad/test-server/statistics", + "rel":"statistics" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/capabilities", + "rel":"capabilities" + }, + { + "href":"/v2/servers/wfad/test-server/alertPolicies", + "rel":"alertPolicyMappings", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", + "rel":"antiAffinityPolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", + "rel":"cpuAutoscalePolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + } + ], + "locationId":"UC1", + "name":"test-server", + "os":"ubuntu14_64Bit", + "osType":"Ubuntu 14 64-bit", + "status":"active", + "storageType":"standard", + "type":"standard" + } + ] +''' + +__version__ = '${version}' + +import json +import os +import time +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcServer: + clc = clc_sdk + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.group_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion( + requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + changed = False + new_server_ids = [] + server_dict_array = [] + + self._set_clc_credentials_from_env() + self.module.params = self._validate_module_params( + self.clc, + self.module) + p = self.module.params + state = p.get('state') + + # + # Handle each state + # + partial_servers_ids = [] + if state == 'absent': + server_ids = p['server_ids'] + if not isinstance(server_ids, list): + return self.module.fail_json( + msg='server_ids needs to be a list of instances to delete: %s' % + server_ids) + + (changed, + server_dict_array, + new_server_ids) = self._delete_servers(module=self.module, + clc=self.clc, + server_ids=server_ids) + + elif state in ('started', 'stopped'): + server_ids = p.get('server_ids') + if not isinstance(server_ids, list): + return self.module.fail_json( + msg='server_ids needs to be a list of servers to run: %s' % + server_ids) + + (changed, + server_dict_array, + new_server_ids) = self._start_stop_servers(self.module, + self.clc, + server_ids) + + elif state == 'present': + # Changed is always set to true when provisioning new instances + if not p.get('template') and p.get('type') != 'bareMetal': + return self.module.fail_json( + msg='template parameter is required for new instance') + + if p.get('exact_count') is None: + (server_dict_array, + new_server_ids, + partial_servers_ids, + changed) = self._create_servers(self.module, + self.clc) + else: + (server_dict_array, + new_server_ids, + partial_servers_ids, + changed) = self._enforce_count(self.module, + self.clc) + + self.module.exit_json( + changed=changed, + server_ids=new_server_ids, + partially_created_server_ids=partial_servers_ids, + servers=server_dict_array) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(), + template=dict(), + group=dict(default='Default Group'), + network_id=dict(), + location=dict(default=None), + cpu=dict(default=1, type='int'), + memory=dict(default=1, type='int'), + alias=dict(default=None), + password=dict(default=None, no_log=True), + ip_address=dict(default=None), + storage_type=dict( + default='standard', + choices=[ + 'standard', + 'hyperscale']), + type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), + primary_dns=dict(default=None), + secondary_dns=dict(default=None), + additional_disks=dict(type='list', default=[]), + custom_fields=dict(type='list', default=[]), + ttl=dict(default=None), + managed_os=dict(type='bool', default=False), + description=dict(default=None), + source_server_password=dict(default=None, no_log=True), + cpu_autoscale_policy_id=dict(default=None), + anti_affinity_policy_id=dict(default=None), + anti_affinity_policy_name=dict(default=None), + alert_policy_id=dict(default=None), + alert_policy_name=dict(default=None), + packages=dict(type='list', default=[]), + state=dict( + default='present', + choices=[ + 'present', + 'absent', + 'started', + 'stopped']), + count=dict(type='int', default=1), + exact_count=dict(type='int', default=None), + count_group=dict(), + server_ids=dict(type='list', default=[]), + add_public_ip=dict(type='bool', default=False), + public_ip_protocol=dict( + default='TCP', + choices=[ + 'TCP', + 'UDP', + 'ICMP']), + public_ip_ports=dict(type='list', default=[]), + configuration_id=dict(default=None), + os_type=dict(default=None, + choices=[ + 'redHat6_64Bit', + 'centOS6_64Bit', + 'windows2012R2Standard_64Bit', + 'ubuntu14_64Bit' + ]), + wait=dict(type='bool', default=True)) + + mutually_exclusive = [ + ['exact_count', 'count'], + ['exact_count', 'state'], + ['anti_affinity_policy_id', 'anti_affinity_policy_name'], + ['alert_policy_id', 'alert_policy_name'], + ] + return {"argument_spec": argument_spec, + "mutually_exclusive": mutually_exclusive} + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _validate_module_params(clc, module): + """ + Validate the module params, and lookup default values. + :param clc: clc-sdk instance to use + :param module: module to validate + :return: dictionary of validated params + """ + params = module.params + datacenter = ClcServer._find_datacenter(clc, module) + + ClcServer._validate_types(module) + ClcServer._validate_name(module) + + params['alias'] = ClcServer._find_alias(clc, module) + params['cpu'] = ClcServer._find_cpu(clc, module) + params['memory'] = ClcServer._find_memory(clc, module) + params['description'] = ClcServer._find_description(module) + params['ttl'] = ClcServer._find_ttl(clc, module) + params['template'] = ClcServer._find_template_id(module, datacenter) + params['group'] = ClcServer._find_group(module, datacenter).id + params['network_id'] = ClcServer._find_network_id(module, datacenter) + params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( + clc, + module) + params['alert_policy_id'] = ClcServer._find_alert_policy_id( + clc, + module) + + return params + + @staticmethod + def _find_datacenter(clc, module): + """ + Find the datacenter by calling the CLC API. + :param clc: clc-sdk instance to use + :param module: module to validate + :return: clc-sdk.Datacenter instance + """ + location = module.params.get('location') + try: + if not location: + account = clc.v2.Account() + location = account.data.get('primaryDataCenter') + data_center = clc.v2.Datacenter(location) + return data_center + except CLCException: + module.fail_json(msg="Unable to find location: {0}".format(location)) + + @staticmethod + def _find_alias(clc, module): + """ + Find or Validate the Account Alias by calling the CLC API + :param clc: clc-sdk instance to use + :param module: module to validate + :return: clc-sdk.Account instance + """ + alias = module.params.get('alias') + if not alias: + try: + alias = clc.v2.Account.GetAlias() + except CLCException as ex: + module.fail_json(msg='Unable to find account alias. {0}'.format( + ex.message + )) + return alias + + @staticmethod + def _find_cpu(clc, module): + """ + Find or validate the CPU value by calling the CLC API + :param clc: clc-sdk instance to use + :param module: module to validate + :return: Int value for CPU + """ + cpu = module.params.get('cpu') + group_id = module.params.get('group_id') + alias = module.params.get('alias') + state = module.params.get('state') + + if not cpu and state == 'present': + group = clc.v2.Group(id=group_id, + alias=alias) + if group.Defaults("cpu"): + cpu = group.Defaults("cpu") + else: + module.fail_json( + msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) + return cpu + + @staticmethod + def _find_memory(clc, module): + """ + Find or validate the Memory value by calling the CLC API + :param clc: clc-sdk instance to use + :param module: module to validate + :return: Int value for Memory + """ + memory = module.params.get('memory') + group_id = module.params.get('group_id') + alias = module.params.get('alias') + state = module.params.get('state') + + if not memory and state == 'present': + group = clc.v2.Group(id=group_id, + alias=alias) + if group.Defaults("memory"): + memory = group.Defaults("memory") + else: + module.fail_json(msg=str( + "Can\'t determine a default memory value. Please provide a value for memory.")) + return memory + + @staticmethod + def _find_description(module): + """ + Set the description module param to name if description is blank + :param module: the module to validate + :return: string description + """ + description = module.params.get('description') + if not description: + description = module.params.get('name') + return description + + @staticmethod + def _validate_types(module): + """ + Validate that type and storage_type are set appropriately, and fail if not + :param module: the module to validate + :return: none + """ + state = module.params.get('state') + server_type = module.params.get( + 'type').lower() if module.params.get('type') else None + storage_type = module.params.get( + 'storage_type').lower() if module.params.get('storage_type') else None + + if state == "present": + if server_type == "standard" and storage_type not in ( + "standard", "premium"): + module.fail_json( + msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) + + if server_type == "hyperscale" and storage_type != "hyperscale": + module.fail_json( + msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) + + @staticmethod + def _validate_name(module): + """ + Validate that name is the correct length if provided, fail if it's not + :param module: the module to validate + :return: none + """ + server_name = module.params.get('name') + state = module.params.get('state') + + if state == 'present' and ( + len(server_name) < 1 or len(server_name) > 6): + module.fail_json(msg=str( + "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) + + @staticmethod + def _find_ttl(clc, module): + """ + Validate that TTL is > 3600 if set, and fail if not + :param clc: clc-sdk instance to use + :param module: module to validate + :return: validated ttl + """ + ttl = module.params.get('ttl') + + if ttl: + if ttl <= 3600: + return module.fail_json(msg=str("Ttl cannot be <= 3600")) + else: + ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) + return ttl + + @staticmethod + def _find_template_id(module, datacenter): + """ + Find the template id by calling the CLC API. + :param module: the module to validate + :param datacenter: the datacenter to search for the template + :return: a valid clc template id + """ + lookup_template = module.params.get('template') + state = module.params.get('state') + type = module.params.get('type') + result = None + + if state == 'present' and type != 'bareMetal': + try: + result = datacenter.Templates().Search(lookup_template)[0].id + except CLCException: + module.fail_json( + msg=str( + "Unable to find a template: " + + lookup_template + + " in location: " + + datacenter.id)) + return result + + @staticmethod + def _find_network_id(module, datacenter): + """ + Validate the provided network id or return a default. + :param module: the module to validate + :param datacenter: the datacenter to search for a network id + :return: a valid network id + """ + network_id = module.params.get('network_id') + + if not network_id: + try: + network_id = datacenter.Networks().networks[0].id + # -- added for clc-sdk 2.23 compatibility + # datacenter_networks = clc_sdk.v2.Networks( + # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) + # network_id = datacenter_networks.networks[0].id + # -- end + except CLCException: + module.fail_json( + msg=str( + "Unable to find a network in location: " + + datacenter.id)) + + return network_id + + @staticmethod + def _find_aa_policy_id(clc, module): + """ + Validate if the anti affinity policy exist for the given name and throw error if not + :param clc: the clc-sdk instance + :param module: the module to validate + :return: aa_policy_id: the anti affinity policy id of the given name. + """ + aa_policy_id = module.params.get('anti_affinity_policy_id') + aa_policy_name = module.params.get('anti_affinity_policy_name') + if not aa_policy_id and aa_policy_name: + alias = module.params.get('alias') + aa_policy_id = ClcServer._get_anti_affinity_policy_id( + clc, + module, + alias, + aa_policy_name) + if not aa_policy_id: + module.fail_json( + msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) + return aa_policy_id + + @staticmethod + def _find_alert_policy_id(clc, module): + """ + Validate if the alert policy exist for the given name and throw error if not + :param clc: the clc-sdk instance + :param module: the module to validate + :return: alert_policy_id: the alert policy id of the given name. + """ + alert_policy_id = module.params.get('alert_policy_id') + alert_policy_name = module.params.get('alert_policy_name') + if not alert_policy_id and alert_policy_name: + alias = module.params.get('alias') + alert_policy_id = ClcServer._get_alert_policy_id_by_name( + clc=clc, + module=module, + alias=alias, + alert_policy_name=alert_policy_name + ) + if not alert_policy_id: + module.fail_json( + msg='No alert policy exist with name : %s' % alert_policy_name) + return alert_policy_id + + def _create_servers(self, module, clc, override_count=None): + """ + Create New Servers in CLC cloud + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :return: a list of dictionaries with server information about the servers that were created + """ + p = module.params + request_list = [] + servers = [] + server_dict_array = [] + created_server_ids = [] + partial_created_servers_ids = [] + + add_public_ip = p.get('add_public_ip') + public_ip_protocol = p.get('public_ip_protocol') + public_ip_ports = p.get('public_ip_ports') + + params = { + 'name': p.get('name'), + 'template': p.get('template'), + 'group_id': p.get('group'), + 'network_id': p.get('network_id'), + 'cpu': p.get('cpu'), + 'memory': p.get('memory'), + 'alias': p.get('alias'), + 'password': p.get('password'), + 'ip_address': p.get('ip_address'), + 'storage_type': p.get('storage_type'), + 'type': p.get('type'), + 'primary_dns': p.get('primary_dns'), + 'secondary_dns': p.get('secondary_dns'), + 'additional_disks': p.get('additional_disks'), + 'custom_fields': p.get('custom_fields'), + 'ttl': p.get('ttl'), + 'managed_os': p.get('managed_os'), + 'description': p.get('description'), + 'source_server_password': p.get('source_server_password'), + 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), + 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), + 'packages': p.get('packages'), + 'configuration_id': p.get('configuration_id'), + 'os_type': p.get('os_type') + } + + count = override_count if override_count else p.get('count') + + changed = False if count == 0 else True + + if not changed: + return server_dict_array, created_server_ids, partial_created_servers_ids, changed + for i in range(0, count): + if not module.check_mode: + req = self._create_clc_server(clc=clc, + module=module, + server_params=params) + server = req.requests[0].Server() + request_list.append(req) + servers.append(server) + + self._wait_for_requests(module, request_list) + self._refresh_servers(module, servers) + + ip_failed_servers = self._add_public_ip_to_servers( + module=module, + should_add_public_ip=add_public_ip, + servers=servers, + public_ip_protocol=public_ip_protocol, + public_ip_ports=public_ip_ports) + ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, + module=module, + servers=servers) + + for server in servers: + if server in ip_failed_servers or server in ap_failed_servers: + partial_created_servers_ids.append(server.id) + else: + # reload server details + server = clc.v2.Server(server.id) + server.data['ipaddress'] = server.details[ + 'ipAddresses'][0]['internal'] + + if add_public_ip and len(server.PublicIPs().public_ips) > 0: + server.data['publicip'] = str( + server.PublicIPs().public_ips[0]) + created_server_ids.append(server.id) + server_dict_array.append(server.data) + + return server_dict_array, created_server_ids, partial_created_servers_ids, changed + + def _enforce_count(self, module, clc): + """ + Enforce that there is the right number of servers in the provided group. + Starts or stops servers as necessary. + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :return: a list of dictionaries with server information about the servers that were created or deleted + """ + p = module.params + changed = False + count_group = p.get('count_group') + datacenter = ClcServer._find_datacenter(clc, module) + exact_count = p.get('exact_count') + server_dict_array = [] + partial_servers_ids = [] + changed_server_ids = [] + + # fail here if the exact count was specified without filtering + # on a group, as this may lead to a undesired removal of instances + if exact_count and count_group is None: + return module.fail_json( + msg="you must use the 'count_group' option with exact_count") + + servers, running_servers = ClcServer._find_running_servers_by_group( + module, datacenter, count_group) + + if len(running_servers) == exact_count: + changed = False + + elif len(running_servers) < exact_count: + to_create = exact_count - len(running_servers) + server_dict_array, changed_server_ids, partial_servers_ids, changed \ + = self._create_servers(module, clc, override_count=to_create) + + for server in server_dict_array: + running_servers.append(server) + + elif len(running_servers) > exact_count: + to_remove = len(running_servers) - exact_count + all_server_ids = sorted([x.id for x in running_servers]) + remove_ids = all_server_ids[0:to_remove] + + (changed, server_dict_array, changed_server_ids) \ + = ClcServer._delete_servers(module, clc, remove_ids) + + return server_dict_array, changed_server_ids, partial_servers_ids, changed + + @staticmethod + def _wait_for_requests(module, request_list): + """ + Block until server provisioning requests are completed. + :param module: the AnsibleModule object + :param request_list: a list of clc-sdk.Request instances + :return: none + """ + wait = module.params.get('wait') + if wait: + # Requests.WaitUntilComplete() returns the count of failed requests + failed_requests_count = sum( + [request.WaitUntilComplete() for request in request_list]) + + if failed_requests_count > 0: + module.fail_json( + msg='Unable to process server request') + + @staticmethod + def _refresh_servers(module, servers): + """ + Loop through a list of servers and refresh them. + :param module: the AnsibleModule object + :param servers: list of clc-sdk.Server instances to refresh + :return: none + """ + for server in servers: + try: + server.Refresh() + except CLCException as ex: + module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( + server.id, ex.message + )) + + @staticmethod + def _add_public_ip_to_servers( + module, + should_add_public_ip, + servers, + public_ip_protocol, + public_ip_ports): + """ + Create a public IP for servers + :param module: the AnsibleModule object + :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False + :param servers: List of servers to add public ips to + :param public_ip_protocol: a protocol to allow for the public ips + :param public_ip_ports: list of ports to allow for the public ips + :return: none + """ + failed_servers = [] + if not should_add_public_ip: + return failed_servers + + ports_lst = [] + request_list = [] + server = None + + for port in public_ip_ports: + ports_lst.append( + {'protocol': public_ip_protocol, 'port': port}) + try: + if not module.check_mode: + for server in servers: + request = server.PublicIPs().Add(ports_lst) + request_list.append(request) + except APIFailedResponse: + failed_servers.append(server) + ClcServer._wait_for_requests(module, request_list) + return failed_servers + + @staticmethod + def _add_alert_policy_to_servers(clc, module, servers): + """ + Associate the alert policy to servers + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param servers: List of servers to add alert policy to + :return: failed_servers: the list of servers which failed while associating alert policy + """ + failed_servers = [] + p = module.params + alert_policy_id = p.get('alert_policy_id') + alias = p.get('alias') + + if alert_policy_id and not module.check_mode: + for server in servers: + try: + ClcServer._add_alert_policy_to_server( + clc=clc, + alias=alias, + server_id=server.id, + alert_policy_id=alert_policy_id) + except CLCException: + failed_servers.append(server) + return failed_servers + + @staticmethod + def _add_alert_policy_to_server( + clc, alias, server_id, alert_policy_id): + """ + Associate an alert policy to a clc server + :param clc: the clc-sdk instance to use + :param alias: the clc account alias + :param server_id: The clc server id + :param alert_policy_id: the alert policy id to be associated to the server + :return: none + """ + try: + clc.v2.API.Call( + method='POST', + url='servers/%s/%s/alertPolicies' % (alias, server_id), + payload=json.dumps( + { + 'id': alert_policy_id + })) + except APIFailedResponse as e: + raise CLCException( + 'Failed to associate alert policy to the server : {0} with Error {1}'.format( + server_id, str(e.response_text))) + + @staticmethod + def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): + """ + Returns the alert policy id for the given alert policy name + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the clc account alias + :param alert_policy_name: the name of the alert policy + :return: alert_policy_id: the alert policy id + """ + alert_policy_id = None + policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) + if not policies: + return alert_policy_id + for policy in policies.get('items'): + if policy.get('name') == alert_policy_name: + if not alert_policy_id: + alert_policy_id = policy.get('id') + else: + return module.fail_json( + msg='multiple alert policies were found with policy name : %s' % alert_policy_name) + return alert_policy_id + + @staticmethod + def _delete_servers(module, clc, server_ids): + """ + Delete the servers on the provided list + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :param server_ids: list of servers to delete + :return: a list of dictionaries with server information about the servers that were deleted + """ + terminated_server_ids = [] + server_dict_array = [] + request_list = [] + + if not isinstance(server_ids, list) or len(server_ids) < 1: + return module.fail_json( + msg='server_ids should be a list of servers, aborting') + + servers = clc.v2.Servers(server_ids).Servers() + for server in servers: + if not module.check_mode: + request_list.append(server.Delete()) + ClcServer._wait_for_requests(module, request_list) + + for server in servers: + terminated_server_ids.append(server.id) + + return True, server_dict_array, terminated_server_ids + + @staticmethod + def _start_stop_servers(module, clc, server_ids): + """ + Start or Stop the servers on the provided list + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :param server_ids: list of servers to start or stop + :return: a list of dictionaries with server information about the servers that were started or stopped + """ + p = module.params + state = p.get('state') + changed = False + changed_servers = [] + server_dict_array = [] + result_server_ids = [] + request_list = [] + + if not isinstance(server_ids, list) or len(server_ids) < 1: + return module.fail_json( + msg='server_ids should be a list of servers, aborting') + + servers = clc.v2.Servers(server_ids).Servers() + for server in servers: + if server.powerState != state: + changed_servers.append(server) + if not module.check_mode: + request_list.append( + ClcServer._change_server_power_state( + module, + server, + state)) + changed = True + + ClcServer._wait_for_requests(module, request_list) + ClcServer._refresh_servers(module, changed_servers) + + for server in set(changed_servers + servers): + try: + server.data['ipaddress'] = server.details[ + 'ipAddresses'][0]['internal'] + server.data['publicip'] = str( + server.PublicIPs().public_ips[0]) + except (KeyError, IndexError): + pass + + server_dict_array.append(server.data) + result_server_ids.append(server.id) + + return changed, server_dict_array, result_server_ids + + @staticmethod + def _change_server_power_state(module, server, state): + """ + Change the server powerState + :param module: the module to check for intended state + :param server: the server to start or stop + :param state: the intended powerState for the server + :return: the request object from clc-sdk call + """ + result = None + try: + if state == 'started': + result = server.PowerOn() + else: + # Try to shut down the server and fall back to power off when unable to shut down. + result = server.ShutDown() + if result and hasattr(result, 'requests') and result.requests[0]: + return result + else: + result = server.PowerOff() + except CLCException: + module.fail_json( + msg='Unable to change power state for server {0}'.format( + server.id)) + return result + + @staticmethod + def _find_running_servers_by_group(module, datacenter, count_group): + """ + Find a list of running servers in the provided group + :param module: the AnsibleModule object + :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group + :param count_group: the group to count the servers + :return: list of servers, and list of running servers + """ + group = ClcServer._find_group( + module=module, + datacenter=datacenter, + lookup_group=count_group) + + servers = group.Servers().Servers() + running_servers = [] + + for server in servers: + if server.status == 'active' and server.powerState == 'started': + running_servers.append(server) + + return servers, running_servers + + @staticmethod + def _find_group(module, datacenter, lookup_group=None): + """ + Find a server group in a datacenter by calling the CLC API + :param module: the AnsibleModule instance + :param datacenter: clc-sdk.Datacenter instance to search for the group + :param lookup_group: string name of the group to search for + :return: clc-sdk.Group instance + """ + if not lookup_group: + lookup_group = module.params.get('group') + try: + return datacenter.Groups().Get(lookup_group) + except CLCException: + pass + + # The search above only acts on the main + result = ClcServer._find_group_recursive( + module, + datacenter.Groups(), + lookup_group) + + if result is None: + module.fail_json( + msg=str( + "Unable to find group: " + + lookup_group + + " in location: " + + datacenter.id)) + + return result + + @staticmethod + def _find_group_recursive(module, group_list, lookup_group): + """ + Find a server group by recursively walking the tree + :param module: the AnsibleModule instance to use + :param group_list: a list of groups to search + :param lookup_group: the group to look for + :return: list of groups + """ + result = None + for group in group_list.groups: + subgroups = group.Subgroups() + try: + return subgroups.Get(lookup_group) + except CLCException: + result = ClcServer._find_group_recursive( + module, + subgroups, + lookup_group) + + if result is not None: + break + + return result + + @staticmethod + def _create_clc_server( + clc, + module, + server_params): + """ + Call the CLC Rest API to Create a Server + :param clc: the clc-python-sdk instance to use + :param module: the AnsibleModule instance to use + :param server_params: a dictionary of params to use to create the servers + :return: clc-sdk.Request object linked to the queued server request + """ + + try: + res = clc.v2.API.Call( + method='POST', + url='servers/%s' % + (server_params.get('alias')), + payload=json.dumps( + { + 'name': server_params.get('name'), + 'description': server_params.get('description'), + 'groupId': server_params.get('group_id'), + 'sourceServerId': server_params.get('template'), + 'isManagedOS': server_params.get('managed_os'), + 'primaryDNS': server_params.get('primary_dns'), + 'secondaryDNS': server_params.get('secondary_dns'), + 'networkId': server_params.get('network_id'), + 'ipAddress': server_params.get('ip_address'), + 'password': server_params.get('password'), + 'sourceServerPassword': server_params.get('source_server_password'), + 'cpu': server_params.get('cpu'), + 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), + 'memoryGB': server_params.get('memory'), + 'type': server_params.get('type'), + 'storageType': server_params.get('storage_type'), + 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), + 'customFields': server_params.get('custom_fields'), + 'additionalDisks': server_params.get('additional_disks'), + 'ttl': server_params.get('ttl'), + 'packages': server_params.get('packages'), + 'configurationId': server_params.get('configuration_id'), + 'osType': server_params.get('os_type')})) + + result = clc.v2.Requests(res) + except APIFailedResponse as ex: + return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( + server_params.get('name'), + ex.response_text + )) + + # + # Patch the Request object so that it returns a valid server + + # Find the server's UUID from the API response + server_uuid = [obj['id'] + for obj in res['links'] if obj['rel'] == 'self'][0] + + # Change the request server method to a _find_server_by_uuid closure so + # that it will work + result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( + clc, + module, + server_uuid, + server_params.get('alias')) + + return result + + @staticmethod + def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): + """ + retrieves the anti affinity policy id of the server based on the name of the policy + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param aa_policy_name: the anti affinity policy name + :return: aa_policy_id: The anti affinity policy id + """ + aa_policy_id = None + try: + aa_policies = clc.v2.API.Call(method='GET', + url='antiAffinityPolicies/%s' % alias) + except APIFailedResponse as ex: + return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( + alias, ex.response_text)) + for aa_policy in aa_policies.get('items'): + if aa_policy.get('name') == aa_policy_name: + if not aa_policy_id: + aa_policy_id = aa_policy.get('id') + else: + return module.fail_json( + msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) + return aa_policy_id + + # + # This is the function that gets patched to the Request.server object using a lamda closure + # + + @staticmethod + def _find_server_by_uuid_w_retry( + clc, module, svr_uuid, alias=None, retries=5, back_out=2): + """ + Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param svr_uuid: UUID of the server + :param retries: the number of retry attempts to make prior to fail. default is 5 + :param alias: the Account Alias to search + :return: a clc-sdk.Server instance + """ + if not alias: + alias = clc.v2.Account.GetAlias() + + # Wait and retry if the api returns a 404 + while True: + retries -= 1 + try: + server_obj = clc.v2.API.Call( + method='GET', url='servers/%s/%s?uuid=true' % + (alias, svr_uuid)) + server_id = server_obj['id'] + server = clc.v2.Server( + id=server_id, + alias=alias, + server_obj=server_obj) + return server + + except APIFailedResponse as e: + if e.response_status_code != 404: + return module.fail_json( + msg='A failure response was received from CLC API when ' + 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % + (svr_uuid, e.response_status_code, e.message)) + if retries == 0: + return module.fail_json( + msg='Unable to reach the CLC API after 5 attempts') + time.sleep(back_out) + back_out *= 2 + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + argument_dict = ClcServer._define_module_argument_spec() + module = AnsibleModule(supports_check_mode=True, **argument_dict) + clc_server = ClcServer(module) + clc_server.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/plugins/modules/cloud/centurylink/clc_server_snapshot.py new file mode 100644 index 0000000000..bce4619371 --- /dev/null +++ b/plugins/modules/cloud/centurylink/clc_server_snapshot.py @@ -0,0 +1,411 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: clc_server_snapshot +short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud. +description: + - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. +options: + server_ids: + description: + - The list of CLC server Ids. + required: True + expiration_days: + description: + - The number of days to keep the server snapshot before it expires. + default: 7 + required: False + state: + description: + - The state to insure that the provided resources are in. + default: 'present' + required: False + choices: ['present', 'absent', 'restore'] + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + default: True + required: False + type: bool +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Create server snapshot + clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + expiration_days: 10 + wait: True + state: present + +- name: Restore server snapshot + clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + wait: True + state: restore + +- name: Delete server snapshot + clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + wait: True + state: absent +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +''' + +__version__ = '${version}' + +import os +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcSnapshot: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion( + requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + server_ids = p['server_ids'] + expiration_days = p['expiration_days'] + state = p['state'] + request_list = [] + changed = False + changed_servers = [] + + self._set_clc_credentials_from_env() + if state == 'present': + changed, request_list, changed_servers = self.ensure_server_snapshot_present( + server_ids=server_ids, + expiration_days=expiration_days) + elif state == 'absent': + changed, request_list, changed_servers = self.ensure_server_snapshot_absent( + server_ids=server_ids) + elif state == 'restore': + changed, request_list, changed_servers = self.ensure_server_snapshot_restore( + server_ids=server_ids) + + self._wait_for_requests_to_complete(request_list) + return self.module.exit_json( + changed=changed, + server_ids=changed_servers) + + def ensure_server_snapshot_present(self, server_ids, expiration_days): + """ + Ensures the given set of server_ids have the snapshots created + :param server_ids: The list of server_ids to create the snapshot + :param expiration_days: The number of days to keep the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) == 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._create_server_snapshot(server, expiration_days) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _create_server_snapshot(self, server, expiration_days): + """ + Create the snapshot for the CLC server + :param server: the CLC server object + :param expiration_days: The number of days to keep the snapshot + :return: the create request object from CLC API Call + """ + result = None + try: + result = server.CreateSnapshot( + delete_existing=True, + expiration_days=expiration_days) + except CLCException as ex: + self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_server_snapshot_absent(self, server_ids): + """ + Ensures the given set of server_ids have the snapshots removed + :param server_ids: The list of server_ids to delete the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) > 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._delete_server_snapshot(server) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _delete_server_snapshot(self, server): + """ + Delete snapshot for the CLC server + :param server: the CLC server object + :return: the delete snapshot request object from CLC API + """ + result = None + try: + result = server.DeleteSnapshot() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_server_snapshot_restore(self, server_ids): + """ + Ensures the given set of server_ids have the snapshots restored + :param server_ids: The list of server_ids to delete the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) > 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._restore_server_snapshot(server) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _restore_server_snapshot(self, server): + """ + Restore snapshot for the CLC server + :param server: the CLC server object + :return: the restore snapshot request object from CLC API + """ + result = None + try: + result = server.RestoreSnapshot() + except CLCException as ex: + self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def _wait_for_requests_to_complete(self, requests_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param requests_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in requests_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process server snapshot request') + + @staticmethod + def define_argument_spec(): + """ + This function defines the dictionary object required for + package module + :return: the package dictionary object + """ + argument_spec = dict( + server_ids=dict(type='list', required=True), + expiration_days=dict(default=7, type='int'), + wait=dict(default=True), + state=dict( + default='present', + choices=[ + 'present', + 'absent', + 'restore']), + ) + return argument_spec + + def _get_servers_from_clc(self, server_list, message): + """ + Internal function to fetch list of CLC server objects from a list of server ids + :param server_list: The list of server ids + :param message: The error message to throw in case of any error + :return the list of CLC server objects + """ + try: + return self.clc.v2.Servers(server_list).servers + except CLCException as ex: + return self.module.fail_json(msg=message + ': %s' % ex) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + Main function + :return: None + """ + module = AnsibleModule( + argument_spec=ClcSnapshot.define_argument_spec(), + supports_check_mode=True + ) + clc_snapshot = ClcSnapshot(module) + clc_snapshot.process_request() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudscale/cloudscale_floating_ip.py b/plugins/modules/cloud/cloudscale/cloudscale_floating_ip.py new file mode 100644 index 0000000000..1b82d7d9a6 --- /dev/null +++ b/plugins/modules/cloud/cloudscale/cloudscale_floating_ip.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, Gaudenz Steinlin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudscale_floating_ip +short_description: Manages floating IPs on the cloudscale.ch IaaS service +description: + - Create, assign and delete floating IPs on the cloudscale.ch IaaS service. +notes: + - To create a new floating IP at least the C(ip_version) and C(server) options are required. + - Once a floating_ip is created all parameters except C(server) are read-only. + - It's not possible to request a floating IP without associating it with a server at the same time. + - This module requires the ipaddress python library. This library is included in Python since version 3.3. It is available as a + module on PyPI for earlier versions. +author: + - Gaudenz Steinlin (@gaudenz) + - Denis Krienbühl (@href) +options: + state: + description: + - State of the floating IP. + default: present + choices: [ present, absent ] + type: str + ip: + description: + - Floating IP address to change. + - Required to assign the IP to a different server or if I(state) is absent. + aliases: [ network ] + type: str + ip_version: + description: + - IP protocol version of the floating IP. + choices: [ 4, 6 ] + type: int + server: + description: + - UUID of the server assigned to this floating IP. + - Required unless I(state) is absent. + type: str + type: + description: + - The type of the floating IP. + choices: [ regional, global ] + type: str + default: regional + region: + description: + - Region in which the floating IP resides (e.g. C(lgp) or C(rma)). + If omitted, the region of the project default zone is used. + This parameter must be omitted if I(type) is set to C(global). + type: str + prefix_length: + description: + - Only valid if I(ip_version) is 6. + - Prefix length for the IPv6 network. Currently only a prefix of /56 can be requested. If no I(prefix_length) is present, a + single address is created. + choices: [ 56 ] + type: int + reverse_ptr: + description: + - Reverse PTR entry for this address. + - You cannot set a reverse PTR entry for IPv6 floating networks. Reverse PTR entries are only allowed for single addresses. + type: str +extends_documentation_fragment: +- community.general.cloudscale + +''' + +EXAMPLES = ''' +# Request a new floating IP +- name: Request a floating IP + cloudscale_floating_ip: + ip_version: 4 + server: 47cec963-fcd2-482f-bdb6-24461b2d47b1 + reverse_ptr: my-server.example.com + api_token: xxxxxx + register: floating_ip + +# Assign an existing floating IP to a different server +- name: Move floating IP to backup server + cloudscale_floating_ip: + ip: 192.0.2.123 + server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Request a new floating IPv6 network +- name: Request a floating IP + cloudscale_floating_ip: + ip_version: 6 + prefix_length: 56 + server: 47cec963-fcd2-482f-bdb6-24461b2d47b1 + api_token: xxxxxx + region: lpg1 + register: floating_ip + +# Assign an existing floating network to a different server +- name: Move floating IP to backup server + cloudscale_floating_ip: + ip: '{{ floating_ip.network | ip }}' + server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Release a floating IP +- name: Release floating IP + cloudscale_floating_ip: + ip: 192.0.2.123 + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +href: + description: The API URL to get details about this floating IP. + returned: success when state == present + type: str + sample: https://api.cloudscale.ch/v1/floating-ips/2001:db8::cafe +network: + description: The CIDR notation of the network that is routed to your server. + returned: success when state == present + type: str + sample: 2001:db8::cafe/128 +next_hop: + description: Your floating IP is routed to this IP address. + returned: success when state == present + type: str + sample: 2001:db8:dead:beef::42 +reverse_ptr: + description: The reverse pointer for this floating IP address. + returned: success when state == present + type: str + sample: 185-98-122-176.cust.cloudscale.ch +server: + description: The floating IP is routed to this server. + returned: success when state == present + type: str + sample: 47cec963-fcd2-482f-bdb6-24461b2d47b1 +ip: + description: The floating IP address or network. This is always present and used to identify floating IPs after creation. + returned: success + type: str + sample: 185.98.122.176 +region: + description: The region of the floating IP. + returned: success when state == present + type: dict + sample: {'slug': 'lpg'} + version_added: '2.10' +state: + description: The current status of the floating IP. + returned: success + type: str + sample: present +''' + +import traceback + +IPADDRESS_IMP_ERR = None +try: + from ipaddress import ip_network + HAS_IPADDRESS = True +except ImportError: + IPADDRESS_IMP_ERR = traceback.format_exc() + HAS_IPADDRESS = False + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.cloudscale import AnsibleCloudscaleBase, cloudscale_argument_spec + + +class AnsibleCloudscaleFloatingIP(AnsibleCloudscaleBase): + + def __init__(self, module): + super(AnsibleCloudscaleFloatingIP, self).__init__(module) + + # Initialize info dict + # Set state to absent, will be updated by self.update_info() + self.info = {'state': 'absent'} + + if self._module.params['ip']: + self.update_info() + + @staticmethod + def _resp2info(resp): + # If the API response has some content, the floating IP must exist + resp['state'] = 'present' + + # Add the IP address to the response, otherwise handling get's to complicated as this + # has to be converted from the network all the time. + resp['ip'] = str(ip_network(resp['network']).network_address) + + # Replace the server with just the UUID, the href to the server is useless and just makes + # things more complicated + if resp['server'] is not None: + resp['server'] = resp['server']['uuid'] + + return resp + + def update_info(self): + resp = self._get('floating-ips/' + self._module.params['ip']) + if resp: + self.info = self._resp2info(resp) + else: + self.info = {'ip': self._module.params['ip'], + 'state': 'absent'} + + def request_floating_ip(self): + params = self._module.params + + # check for required parameters to request a floating IP + missing_parameters = [] + for p in ('ip_version', 'server'): + if p not in params or not params[p]: + missing_parameters.append(p) + + if len(missing_parameters) > 0: + self._module.fail_json(msg='Missing required parameter(s) to request a floating IP: %s.' % + ' '.join(missing_parameters)) + + data = {'ip_version': params['ip_version'], + 'server': params['server']} + + for p in ('prefix_length', 'reverse_ptr', 'type', 'region'): + if params[p]: + data[p] = params[p] + + self.info = self._resp2info(self._post('floating-ips', data)) + + def release_floating_ip(self): + self._delete('floating-ips/%s' % self._module.params['ip']) + self.info = {'ip': self.info['ip'], 'state': 'absent'} + + def update_floating_ip(self): + params = self._module.params + if 'server' not in params or not params['server']: + self._module.fail_json(msg='Missing required parameter to update a floating IP: server.') + self.info = self._resp2info(self._post('floating-ips/%s' % params['ip'], {'server': params['server']})) + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=('present', 'absent'), type='str'), + ip=dict(aliases=('network', ), type='str'), + ip_version=dict(choices=(4, 6), type='int'), + server=dict(type='str'), + type=dict(type='str', choices=('regional', 'global'), default='regional'), + region=dict(type='str'), + prefix_length=dict(choices=(56,), type='int'), + reverse_ptr=dict(type='str'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('ip', 'ip_version'),), + supports_check_mode=True, + ) + + if not HAS_IPADDRESS: + module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) + + target_state = module.params['state'] + target_server = module.params['server'] + floating_ip = AnsibleCloudscaleFloatingIP(module) + current_state = floating_ip.info['state'] + current_server = floating_ip.info['server'] if 'server' in floating_ip.info else None + + if module.check_mode: + module.exit_json(changed=not target_state == current_state or + (current_state == 'present' and current_server != target_server), + **floating_ip.info) + + changed = False + if current_state == 'absent' and target_state == 'present': + floating_ip.request_floating_ip() + changed = True + elif current_state == 'present' and target_state == 'absent': + floating_ip.release_floating_ip() + changed = True + elif current_state == 'present' and current_server != target_server: + floating_ip.update_floating_ip() + changed = True + + module.exit_json(changed=changed, **floating_ip.info) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudscale/cloudscale_server.py b/plugins/modules/cloud/cloudscale/cloudscale_server.py new file mode 100644 index 0000000000..c6cdea09a6 --- /dev/null +++ b/plugins/modules/cloud/cloudscale/cloudscale_server.py @@ -0,0 +1,555 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, Gaudenz Steinlin +# Copyright: (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudscale_server +short_description: Manages servers on the cloudscale.ch IaaS service +description: + - Create, update, start, stop and delete servers on the cloudscale.ch IaaS service. +notes: + - Since version 2.8, I(uuid) and I(name) or not mutually exclusive anymore. + - If I(uuid) option is provided, it takes precedence over I(name) for server selection. This allows to update the server's name. + - If no I(uuid) option is provided, I(name) is used for server selection. If more than one server with this name exists, execution is aborted. + - Only the I(name) and I(flavor) are evaluated for the update. + - The option I(force=true) must be given to allow the reboot of existing running servers for applying the changes. +author: + - Gaudenz Steinlin (@gaudenz) + - René Moser (@resmo) + - Denis Krienbühl (@href) +options: + state: + description: + - State of the server. + choices: [ running, stopped, absent ] + default: running + type: str + name: + description: + - Name of the Server. + - Either I(name) or I(uuid) are required. + type: str + uuid: + description: + - UUID of the server. + - Either I(name) or I(uuid) are required. + type: str + flavor: + description: + - Flavor of the server. + type: str + image: + description: + - Image used to create the server. + type: str + zone: + description: + - Zone in which the server resides (e.g. C(lgp1) or C(rma1)). + type: str + volume_size_gb: + description: + - Size of the root volume in GB. + default: 10 + type: int + bulk_volume_size_gb: + description: + - Size of the bulk storage volume in GB. + - No bulk storage volume if not set. + type: int + ssh_keys: + description: + - List of SSH public keys. + - Use the full content of your .pub file here. + type: list + password: + description: + - Password for the server. + type: str + use_public_network: + description: + - Attach a public network interface to the server. + default: yes + type: bool + use_private_network: + description: + - Attach a private network interface to the server. + default: no + type: bool + use_ipv6: + description: + - Enable IPv6 on the public network interface. + default: yes + type: bool + anti_affinity_with: + description: + - UUID of another server to create an anti-affinity group with. + - Mutually exclusive with I(server_groups). + - Deprecated, removed in version 2.11. + type: str + server_groups: + description: + - List of UUID or names of server groups. + - Mutually exclusive with I(anti_affinity_with). + type: list + user_data: + description: + - Cloud-init configuration (cloud-config) data to use for the server. + type: str + force: + description: + - Allow to stop the running server for updating if necessary. + default: no + type: bool + tags: + description: + - Tags assosiated with the servers. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: +- community.general.cloudscale + +''' + +EXAMPLES = ''' +# Create and start a server with an existing server group (shiny-group) +- name: Start cloudscale.ch server + cloudscale_server: + name: my-shiny-cloudscale-server + image: debian-8 + flavor: flex-4 + ssh_keys: ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + server_groups: shiny-group + zone: lpg1 + use_private_network: True + bulk_volume_size_gb: 100 + api_token: xxxxxx + +# Start another server in anti-affinity (server group shiny-group) +- name: Start second cloudscale.ch server + cloudscale_server: + name: my-other-shiny-server + image: ubuntu-16.04 + flavor: flex-8 + ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale + server_groups: shiny-group + zone: lpg1 + api_token: xxxxxx + + +# Force to update the flavor of a running server +- name: Start cloudscale.ch server + cloudscale_server: + name: my-shiny-cloudscale-server + image: debian-8 + flavor: flex-8 + force: yes + ssh_keys: ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale + use_private_network: True + bulk_volume_size_gb: 100 + api_token: xxxxxx + register: server1 + +# Stop the first server +- name: Stop my first server + cloudscale_server: + uuid: '{{ server1.uuid }}' + state: stopped + api_token: xxxxxx + +# Delete my second server +- name: Delete my second server + cloudscale_server: + name: my-other-shiny-server + state: absent + api_token: xxxxxx + +# Start a server and wait for the SSH host keys to be generated +- name: Start server and wait for SSH host keys + cloudscale_server: + name: my-cloudscale-server-with-ssh-key + image: debian-8 + flavor: flex-4 + ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale + api_token: xxxxxx + register: server + until: server.ssh_fingerprints is defined and server.ssh_fingerprints + retries: 60 + delay: 2 +''' + +RETURN = ''' +href: + description: API URL to get details about this server + returned: success when not state == absent + type: str + sample: https://api.cloudscale.ch/v1/servers/cfde831a-4e87-4a75-960f-89b0148aa2cc +uuid: + description: The unique identifier for this server + returned: success + type: str + sample: cfde831a-4e87-4a75-960f-89b0148aa2cc +name: + description: The display name of the server + returned: success + type: str + sample: its-a-me-mario.cloudscale.ch +state: + description: The current status of the server + returned: success + type: str + sample: running +flavor: + description: The flavor that has been used for this server + returned: success when not state == absent + type: dict + sample: { "slug": "flex-4", "name": "Flex-4", "vcpu_count": 2, "memory_gb": 4 } +image: + description: The image used for booting this server + returned: success when not state == absent + type: dict + sample: { "default_username": "ubuntu", "name": "Ubuntu 18.04 LTS", "operating_system": "Ubuntu", "slug": "ubuntu-18.04" } +zone: + description: The zone used for booting this server + returned: success when not state == absent + type: dict + sample: { 'slug': 'lpg1' } + version_added: '2.10' +volumes: + description: List of volumes attached to the server + returned: success when not state == absent + type: list + sample: [ {"type": "ssd", "device": "/dev/vda", "size_gb": "50"} ] +interfaces: + description: List of network ports attached to the server + returned: success when not state == absent + type: list + sample: [ { "type": "public", "addresses": [ ... ] } ] +ssh_fingerprints: + description: A list of SSH host key fingerprints. Will be null until the host keys could be retrieved from the server. + returned: success when not state == absent + type: list + sample: ["ecdsa-sha2-nistp256 SHA256:XXXX", ... ] +ssh_host_keys: + description: A list of SSH host keys. Will be null until the host keys could be retrieved from the server. + returned: success when not state == absent + type: list + sample: ["ecdsa-sha2-nistp256 XXXXX", ... ] +anti_affinity_with: + description: + - List of servers in the same anti-affinity group + - Deprecated, removed in version 2.11. + returned: success when not state == absent + type: list + sample: [] +server_groups: + description: List of server groups + returned: success when not state == absent + type: list + sample: [ {"href": "https://api.cloudscale.ch/v1/server-groups/...", "uuid": "...", "name": "db-group"} ] + version_added: '2.8' +tags: + description: Tags assosiated with the volume. + returned: success + type: dict + sample: { 'project': 'my project' } + version_added: '2.9' +''' + +from datetime import datetime, timedelta +from time import sleep +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudscale import AnsibleCloudscaleBase, cloudscale_argument_spec + +ALLOWED_STATES = ('running', + 'stopped', + 'absent', + ) + + +class AnsibleCloudscaleServer(AnsibleCloudscaleBase): + + def __init__(self, module): + super(AnsibleCloudscaleServer, self).__init__(module) + + # Initialize server dictionary + self._info = {} + + def _init_server_container(self): + return { + 'uuid': self._module.params.get('uuid') or self._info.get('uuid'), + 'name': self._module.params.get('name') or self._info.get('name'), + 'state': 'absent', + } + + def _get_server_info(self, refresh=False): + if self._info and not refresh: + return self._info + + self._info = self._init_server_container() + + uuid = self._info.get('uuid') + if uuid is not None: + server_info = self._get('servers/%s' % uuid) + if server_info: + self._info = self._transform_state(server_info) + + else: + name = self._info.get('name') + if name is not None: + servers = self._get('servers') or [] + matching_server = [] + for server in servers: + if server['name'] == name: + matching_server.append(server) + + if len(matching_server) == 1: + self._info = self._transform_state(matching_server[0]) + elif len(matching_server) > 1: + self._module.fail_json(msg="More than one server with name '%s' exists. " + "Use the 'uuid' parameter to identify the server." % name) + + return self._info + + @staticmethod + def _transform_state(server): + if 'status' in server: + server['state'] = server['status'] + del server['status'] + else: + server['state'] = 'absent' + return server + + def _wait_for_state(self, states): + start = datetime.now() + timeout = self._module.params['api_timeout'] * 2 + while datetime.now() - start < timedelta(seconds=timeout): + server_info = self._get_server_info(refresh=True) + if server_info.get('state') in states: + return server_info + sleep(1) + + # Timeout succeeded + if server_info.get('name') is not None: + msg = "Timeout while waiting for a state change on server %s to states %s. " \ + "Current state is %s." % (server_info.get('name'), states, server_info.get('state')) + else: + name_uuid = self._module.params.get('name') or self._module.params.get('uuid') + msg = 'Timeout while waiting to find the server %s' % name_uuid + + self._module.fail_json(msg=msg) + + def _start_stop_server(self, server_info, target_state="running", ignore_diff=False): + actions = { + 'stopped': 'stop', + 'running': 'start', + } + + server_state = server_info.get('state') + if server_state != target_state: + self._result['changed'] = True + + if not ignore_diff: + self._result['diff']['before'].update({ + 'state': server_info.get('state'), + }) + self._result['diff']['after'].update({ + 'state': target_state, + }) + if not self._module.check_mode: + self._post('servers/%s/%s' % (server_info['uuid'], actions[target_state])) + server_info = self._wait_for_state((target_state, )) + + return server_info + + def _update_param(self, param_key, server_info, requires_stop=False): + param_value = self._module.params.get(param_key) + if param_value is None: + return server_info + + if 'slug' in server_info[param_key]: + server_v = server_info[param_key]['slug'] + else: + server_v = server_info[param_key] + + if server_v != param_value: + # Set the diff output + self._result['diff']['before'].update({param_key: server_v}) + self._result['diff']['after'].update({param_key: param_value}) + + if server_info.get('state') == "running": + if requires_stop and not self._module.params.get('force'): + self._module.warn("Some changes won't be applied to running servers. " + "Use force=yes to allow the server '%s' to be stopped/started." % server_info['name']) + return server_info + + # Either the server is stopped or change is forced + self._result['changed'] = True + if not self._module.check_mode: + + if requires_stop: + self._start_stop_server(server_info, target_state="stopped", ignore_diff=True) + + patch_data = { + param_key: param_value, + } + + # Response is 204: No Content + self._patch('servers/%s' % server_info['uuid'], patch_data) + + # State changes to "changing" after update, waiting for stopped/running + server_info = self._wait_for_state(('stopped', 'running')) + + return server_info + + def _get_server_group_ids(self): + server_group_params = self._module.params['server_groups'] + if not server_group_params: + return None + + matching_group_names = [] + results = [] + server_groups = self._get('server-groups') + for server_group in server_groups: + if server_group['uuid'] in server_group_params: + results.append(server_group['uuid']) + server_group_params.remove(server_group['uuid']) + + elif server_group['name'] in server_group_params: + results.append(server_group['uuid']) + server_group_params.remove(server_group['name']) + # Remember the names found + matching_group_names.append(server_group['name']) + + # Names are not unique, verify if name already found in previous iterations + elif server_group['name'] in matching_group_names: + self._module.fail_json(msg="More than one server group with name exists: '%s'. " + "Use the 'uuid' parameter to identify the server group." % server_group['name']) + + if server_group_params: + self._module.fail_json(msg="Server group name or UUID not found: %s" % ', '.join(server_group_params)) + + return results + + def _create_server(self, server_info): + self._result['changed'] = True + + data = deepcopy(self._module.params) + for i in ('uuid', 'state', 'force', 'api_timeout', 'api_token'): + del data[i] + data['server_groups'] = self._get_server_group_ids() + + self._result['diff']['before'] = self._init_server_container() + self._result['diff']['after'] = deepcopy(data) + if not self._module.check_mode: + self._post('servers', data) + server_info = self._wait_for_state(('running', )) + return server_info + + def _update_server(self, server_info): + + previous_state = server_info.get('state') + + # The API doesn't support to update server groups. + # Show a warning to the user if the desired state does not match. + desired_server_group_ids = self._get_server_group_ids() + if desired_server_group_ids is not None: + current_server_group_ids = [grp['uuid'] for grp in server_info['server_groups']] + if desired_server_group_ids != current_server_group_ids: + self._module.warn("Server groups can not be mutated, server needs redeployment to change groups.") + + server_info = self._update_param('flavor', server_info, requires_stop=True) + server_info = self._update_param('name', server_info) + server_info = self._update_param('tags', server_info) + + if previous_state == "running": + server_info = self._start_stop_server(server_info, target_state="running", ignore_diff=True) + + return server_info + + def present_server(self): + server_info = self._get_server_info() + + if server_info.get('state') != "absent": + + # If target state is stopped, stop before an potential update and force would not be required + if self._module.params.get('state') == "stopped": + server_info = self._start_stop_server(server_info, target_state="stopped") + + server_info = self._update_server(server_info) + + if self._module.params.get('state') == "running": + server_info = self._start_stop_server(server_info, target_state="running") + else: + server_info = self._create_server(server_info) + server_info = self._start_stop_server(server_info, target_state=self._module.params.get('state')) + + return server_info + + def absent_server(self): + server_info = self._get_server_info() + if server_info.get('state') != "absent": + self._result['changed'] = True + self._result['diff']['before'] = deepcopy(server_info) + self._result['diff']['after'] = self._init_server_container() + if not self._module.check_mode: + self._delete('servers/%s' % server_info['uuid']) + server_info = self._wait_for_state(('absent', )) + return server_info + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + state=dict(default='running', choices=ALLOWED_STATES), + name=dict(), + uuid=dict(), + flavor=dict(), + image=dict(), + zone=dict(), + volume_size_gb=dict(type='int', default=10), + bulk_volume_size_gb=dict(type='int'), + ssh_keys=dict(type='list'), + password=dict(no_log=True), + use_public_network=dict(type='bool', default=True), + use_private_network=dict(type='bool', default=False), + use_ipv6=dict(type='bool', default=True), + anti_affinity_with=dict(removed_in_version='2.11'), + server_groups=dict(type='list'), + user_data=dict(), + force=dict(type='bool', default=False), + tags=dict(type='dict'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('name', 'uuid'),), + mutually_exclusive=(('anti_affinity_with', 'server_groups'),), + supports_check_mode=True, + ) + + cloudscale_server = AnsibleCloudscaleServer(module) + if module.params['state'] == "absent": + server = cloudscale_server.absent_server() + else: + server = cloudscale_server.present_server() + + result = cloudscale_server.get_result(server) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudscale/cloudscale_server_group.py b/plugins/modules/cloud/cloudscale/cloudscale_server_group.py new file mode 100644 index 0000000000..136f0c35a8 --- /dev/null +++ b/plugins/modules/cloud/cloudscale/cloudscale_server_group.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudscale_server_group +short_description: Manages server groups on the cloudscale.ch IaaS service +description: + - Create, update and remove server groups. +author: + - René Moser (@resmo) + - Denis Krienbühl (@href) +options: + name: + description: + - Name of the server group. + - Either I(name) or I(uuid) is required. These options are mutually exclusive. + type: str + uuid: + description: + - UUID of the server group. + - Either I(name) or I(uuid) is required. These options are mutually exclusive. + type: str + type: + description: + - Type of the server group. + default: anti-affinity + type: str + zone: + description: + - Zone slug of the server group (e.g. C(lgp1) or C(rma1)). + type: str + state: + description: + - State of the server group. + choices: [ present, absent ] + default: present + type: str + tags: + description: + - Tags assosiated with the server groups. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: +- community.general.cloudscale + +''' + +EXAMPLES = ''' +--- +- name: Ensure server group exists + cloudscale_server_group: + name: my-name + type: anti-affinity + api_token: xxxxxx + +- name: Ensure server group in a specific zone + cloudscale_server_group: + name: my-rma-group + type: anti-affinity + zone: lpg1 + api_token: xxxxxx + +- name: Ensure a server group is absent + cloudscale_server_group: + name: my-name + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +--- +href: + description: API URL to get details about this server group + returned: if available + type: str + sample: https://api.cloudscale.ch/v1/server-group/cfde831a-4e87-4a75-960f-89b0148aa2cc +uuid: + description: The unique identifier for this server + returned: always + type: str + sample: cfde831a-4e87-4a75-960f-89b0148aa2cc +name: + description: The display name of the server group + returned: always + type: str + sample: load balancers +type: + description: The type the server group + returned: if available + type: str + sample: anti-affinity +zone: + description: The zone of the server group + returned: success + type: dict + sample: { 'slug': 'rma1' } + version_added: '2.10' +servers: + description: A list of servers that are part of the server group. + returned: if available + type: list + sample: [] +state: + description: State of the server group. + returned: always + type: str + sample: present +tags: + description: Tags assosiated with the server group. + returned: success + type: dict + sample: { 'project': 'my project' } + version_added: '2.9' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudscale import AnsibleCloudscaleBase, cloudscale_argument_spec + + +class AnsibleCloudscaleServerGroup(AnsibleCloudscaleBase): + + def __init__(self, module, namespace): + super(AnsibleCloudscaleServerGroup, self).__init__(module) + self._info = {} + + def _init_container(self): + return { + 'uuid': self._module.params.get('uuid') or self._info.get('uuid'), + 'name': self._module.params.get('name') or self._info.get('name'), + 'state': 'absent', + } + + def _create_server_group(self, server_group): + self._module.fail_on_missing_params(['name']) + self._result['changed'] = True + data = { + 'name': self._module.params.get('name'), + 'type': self._module.params.get('type'), + 'zone': self._module.params.get('zone'), + 'tags': self._module.params.get('tags'), + } + if not self._module.check_mode: + server_group = self._post('server-groups', data) + return server_group + + def _update_server_group(self, server_group): + updated = self._param_updated('name', server_group) + updated = self._param_updated('tags', server_group) or updated + + # Refresh if resource was updated in live mode + if updated and not self._module.check_mode: + server_group = self.get_server_group() + return server_group + + def get_server_group(self): + self._info = self._init_container() + + uuid = self._info.get('uuid') + if uuid is not None: + server_group = self._get('server-groups/%s' % uuid) + if server_group: + self._info.update(server_group) + self._info.update(dict(state='present')) + + else: + name = self._info.get('name') + matching_server_groups = [] + for server_group in self._get('server-groups'): + if server_group['name'] == name: + matching_server_groups.append(server_group) + + if len(matching_server_groups) > 1: + self._module.fail_json(msg="More than one server group with name exists: '%s'. " + "Use the 'uuid' parameter to identify the server group." % name) + elif len(matching_server_groups) == 1: + self._info.update(matching_server_groups[0]) + self._info.update(dict(state='present')) + return self._info + + def present_group(self): + server_group = self.get_server_group() + if server_group.get('state') == 'absent': + server_group = self._create_server_group(server_group) + else: + server_group = self._update_server_group(server_group) + return server_group + + def absent_group(self): + server_group = self.get_server_group() + if server_group.get('state') != 'absent': + self._result['changed'] = True + if not self._module.check_mode: + self._delete('server-groups/%s' % server_group['uuid']) + return server_group + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + name=dict(), + uuid=dict(), + type=dict(default='anti-affinity'), + zone=dict(), + tags=dict(type='dict'), + state=dict(default='present', choices=['absent', 'present']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('name', 'uuid'),), + supports_check_mode=True, + ) + cloudscale_server_group = AnsibleCloudscaleServerGroup(module, 'cloudscale_server_group') + + if module.params['state'] == 'absent': + server_group = cloudscale_server_group.absent_group() + else: + server_group = cloudscale_server_group.present_group() + + result = cloudscale_server_group.get_result(server_group) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudscale/cloudscale_volume.py b/plugins/modules/cloud/cloudscale/cloudscale_volume.py new file mode 100644 index 0000000000..22680ffab7 --- /dev/null +++ b/plugins/modules/cloud/cloudscale/cloudscale_volume.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Gaudenz Steinlin +# Copyright (c) 2019, René Moser + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudscale_volume +short_description: Manages volumes on the cloudscale.ch IaaS service. +description: + - Create, attach/detach, update and delete volumes on the cloudscale.ch IaaS service. +notes: + - To create a new volume at least the I(name) and I(size_gb) options + are required. + - A volume can be created and attached to a server in the same task. +author: + - Gaudenz Steinlin (@gaudenz) + - René Moser (@resmo) + - Denis Krienbühl (@href) +options: + state: + description: + - State of the volume. + default: present + choices: [ present, absent ] + type: str + name: + description: + - Name of the volume. Either name or UUID must be present to change an + existing volume. + type: str + uuid: + description: + - UUID of the volume. Either name or UUID must be present to change an + existing volume. + type: str + size_gb: + description: + - Size of the volume in GB. + type: int + type: + description: + - Type of the volume. Cannot be changed after creating the volume. + Defaults to C(ssd) on volume creation. + choices: [ ssd, bulk ] + type: str + zone: + description: + - Zone in which the volume resides (e.g. C(lgp1) or C(rma1)). Cannot be + changed after creating the volume. Defaults to the project default zone. + type: str + server_uuids: + description: + - UUIDs of the servers this volume is attached to. Set this to C([]) to + detach the volume. Currently a volume can only be attached to a + single server. + aliases: [ server_uuid ] + type: list + tags: + description: + - Tags associated with the volume. Set this to C({}) to clear any tags. + type: dict +extends_documentation_fragment: +- community.general.cloudscale + +''' + +EXAMPLES = ''' +# Create a new SSD volume +- name: Create an SSD volume + cloudscale_volume: + name: my_ssd_volume + zone: 'lpg1' + size_gb: 50 + api_token: xxxxxx + register: my_ssd_volume + +# Attach an existing volume to a server +- name: Attach volume to server + cloudscale_volume: + uuid: my_ssd_volume.uuid + server_uuids: + - ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Create and attach a volume to a server +- name: Create and attach volume to server + cloudscale_volume: + name: my_ssd_volume + zone: 'lpg1' + size_gb: 50 + server_uuids: + - ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + api_token: xxxxxx + +# Detach volume from server +- name: Detach volume from server + cloudscale_volume: + uuid: my_ssd_volume.uuid + server_uuids: [] + api_token: xxxxxx + +# Delete a volume +- name: Delete volume + cloudscale_volume: + name: my_ssd_volume + state: absent + api_token: xxxxxx +''' + +RETURN = ''' +href: + description: The API URL to get details about this volume. + returned: state == present + type: str + sample: https://api.cloudscale.ch/v1/volumes/2db69ba3-1864-4608-853a-0771b6885a3a +uuid: + description: The unique identifier for this volume. + returned: state == present + type: str + sample: 2db69ba3-1864-4608-853a-0771b6885a3a +name: + description: The display name of the volume. + returned: state == present + type: str + sample: my_ssd_volume +size_gb: + description: The size of the volume in GB. + returned: state == present + type: str + sample: 50 +type: + description: The type of the volume. + returned: state == present + type: str + sample: bulk +zone: + description: The zone of the volume. + returned: state == present + type: dict + sample: {'slug': 'lpg1'} + version_added: '2.10' +server_uuids: + description: The UUIDs of the servers this volume is attached to. + returned: state == present + type: list + sample: ['47cec963-fcd2-482f-bdb6-24461b2d47b1'] +state: + description: The current status of the volume. + returned: success + type: str + sample: present +tags: + description: Tags associated with the volume. + returned: state == present + type: dict + sample: { 'project': 'my project' } + version_added: '2.9' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudscale import (AnsibleCloudscaleBase, + cloudscale_argument_spec, + ) + + +class AnsibleCloudscaleVolume(AnsibleCloudscaleBase): + + def __init__(self, module): + super(AnsibleCloudscaleVolume, self).__init__(module) + self._info = {} + + def _init_container(self): + return { + 'uuid': self._module.params.get('uuid') or self._info.get('uuid'), + 'name': self._module.params.get('name') or self._info.get('name'), + 'state': 'absent', + } + + def _create(self, volume): + # Fail when missing params for creation + self._module.fail_on_missing_params(['name', 'size_gb']) + + # Fail if a user uses a UUID and state=present but the volume was not found. + if self._module.params.get('uuid'): + self._module.fail_json(msg="The volume with UUID '%s' was not found " + "and we would create a new one with different UUID, " + "this is probably not want you have asked for." % self._module.params.get('uuid')) + + self._result['changed'] = True + data = { + 'name': self._module.params.get('name'), + 'type': self._module.params.get('type'), + 'zone': self._module.params.get('zone'), + 'size_gb': self._module.params.get('size_gb') or 'ssd', + 'server_uuids': self._module.params.get('server_uuids') or [], + 'tags': self._module.params.get('tags'), + } + if not self._module.check_mode: + volume = self._post('volumes', data) + return volume + + def _update(self, volume): + update_params = ( + 'name', + 'size_gb', + 'server_uuids', + 'tags', + ) + updated = False + for param in update_params: + updated = self._param_updated(param, volume) or updated + + # Refresh if resource was updated in live mode + if updated and not self._module.check_mode: + volume = self.get_volume() + return volume + + def get_volume(self): + self._info = self._init_container() + + uuid = self._info.get('uuid') + if uuid is not None: + volume = self._get('volumes/%s' % uuid) + if volume: + self._info.update(volume) + self._info['state'] = 'present' + + else: + name = self._info.get('name') + matching_volumes = [] + for volume in self._get('volumes'): + if volume['name'] == name: + matching_volumes.append(volume) + + if len(matching_volumes) > 1: + self._module.fail_json(msg="More than one volume with name exists: '%s'. " + "Use the 'uuid' parameter to identify the volume." % name) + elif len(matching_volumes) == 1: + self._info.update(matching_volumes[0]) + self._info['state'] = 'present' + return self._info + + def present(self): + volume = self.get_volume() + if volume.get('state') == 'absent': + volume = self._create(volume) + else: + volume = self._update(volume) + return volume + + def absent(self): + volume = self.get_volume() + if volume.get('state') != 'absent': + self._result['changed'] = True + if not self._module.check_mode: + volume['state'] = "absent" + self._delete('volumes/%s' % volume['uuid']) + return volume + + +def main(): + argument_spec = cloudscale_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=('present', 'absent')), + name=dict(), + uuid=dict(), + zone=dict(), + size_gb=dict(type='int'), + type=dict(choices=('ssd', 'bulk')), + server_uuids=dict(type='list', aliases=['server_uuid']), + tags=dict(type='dict'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=(('name', 'uuid'),), + supports_check_mode=True, + ) + + cloudscale_volume = AnsibleCloudscaleVolume(module) + + if module.params['state'] == 'absent': + server_group = cloudscale_volume.absent() + else: + server_group = cloudscale_volume.present() + + result = cloudscale_volume.get_result(server_group) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_account.py b/plugins/modules/cloud/cloudstack/cs_account.py new file mode 100644 index 0000000000..1cf4becb6c --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_account.py @@ -0,0 +1,460 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_account +short_description: Manages accounts on Apache CloudStack based clouds. +description: + - Create, disable, lock, enable and remove accounts. +author: René Moser (@resmo) +options: + name: + description: + - Name of account. + type: str + required: true + username: + description: + - Username of the user to be created if account did not exist. + - Required on I(state=present). + type: str + password: + description: + - Password of the user to be created if account did not exist. + - Required on I(state=present) if I(ldap_domain) is not set. + type: str + first_name: + description: + - First name of the user to be created if account did not exist. + - Required on I(state=present) if I(ldap_domain) is not set. + type: str + last_name: + description: + - Last name of the user to be created if account did not exist. + - Required on I(state=present) if I(ldap_domain) is not set. + type: str + email: + description: + - Email of the user to be created if account did not exist. + - Required on I(state=present) if I(ldap_domain) is not set. + type: str + timezone: + description: + - Timezone of the user to be created if account did not exist. + type: str + network_domain: + description: + - Network domain of the account. + type: str + account_type: + description: + - Type of the account. + type: str + choices: [ user, root_admin, domain_admin ] + default: user + domain: + description: + - Domain the account is related to. + type: str + default: ROOT + role: + description: + - Creates the account under the specified role name or id. + type: str + ldap_domain: + description: + - Name of the LDAP group or OU to bind. + - If set, account will be linked to LDAP. + type: str + ldap_type: + description: + - Type of the ldap name. GROUP or OU, defaults to GROUP. + type: str + choices: [ GROUP, OU ] + default: GROUP + state: + description: + - State of the account. + - C(unlocked) is an alias for C(enabled). + type: str + choices: [ present, absent, enabled, disabled, locked, unlocked ] + default: present + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create an account in domain 'CUSTOMERS' + cs_account: + name: customer_xy + username: customer_xy + password: S3Cur3 + last_name: Doe + first_name: John + email: john.doe@example.com + domain: CUSTOMERS + role: Domain Admin + delegate_to: localhost + +- name: Lock an existing account in domain 'CUSTOMERS' + cs_account: + name: customer_xy + domain: CUSTOMERS + state: locked + delegate_to: localhost + +- name: Disable an existing account in domain 'CUSTOMERS' + cs_account: + name: customer_xy + domain: CUSTOMERS + state: disabled + delegate_to: localhost + +- name: Enable an existing account in domain 'CUSTOMERS' + cs_account: + name: customer_xy + domain: CUSTOMERS + state: enabled + delegate_to: localhost + +- name: Remove an account in domain 'CUSTOMERS' + cs_account: + name: customer_xy + domain: CUSTOMERS + state: absent + delegate_to: localhost + +- name: Create a single user LDAP account in domain 'CUSTOMERS' + cs_account: + name: customer_xy + username: customer_xy + domain: CUSTOMERS + ldap_domain: cn=customer_xy,cn=team_xy,ou=People,dc=domain,dc=local + delegate_to: localhost + +- name: Create a LDAP account in domain 'CUSTOMERS' and bind it to a LDAP group + cs_account: + name: team_xy + username: customer_xy + domain: CUSTOMERS + ldap_domain: cn=team_xy,ou=People,dc=domain,dc=local + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the account. + returned: success + type: str + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +name: + description: Name of the account. + returned: success + type: str + sample: linus@example.com +account_type: + description: Type of the account. + returned: success + type: str + sample: user +state: + description: State of the account. + returned: success + type: str + sample: enabled +network_domain: + description: Network domain of the account. + returned: success + type: str + sample: example.local +domain: + description: Domain the account is related. + returned: success + type: str + sample: ROOT +role: + description: The role name of the account + returned: success + type: str + sample: Domain Admin +''' + +# import cloudstack common +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackAccount(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackAccount, self).__init__(module) + self.returns = { + 'networkdomain': 'network_domain', + 'rolename': 'role', + } + self.account = None + self.account_types = { + 'user': 0, + 'root_admin': 1, + 'domain_admin': 2, + } + + def get_role_id(self): + role_param = self.module.params.get('role') + role_id = None + + if role_param: + role_list = self.query_api('listRoles') + for role in role_list['role']: + if role_param in [role['name'], role['id']]: + role_id = role['id'] + + if not role_id: + self.module.fail_json(msg="Role not found: %s" % role_param) + + return role_id + + def get_account_type(self): + account_type = self.module.params.get('account_type') + return self.account_types[account_type] + + def get_account(self): + if not self.account: + args = { + 'listall': True, + 'domainid': self.get_domain(key='id'), + 'fetch_list': True, + } + accounts = self.query_api('listAccounts', **args) + if accounts: + account_name = self.module.params.get('name') + for a in accounts: + if account_name == a['name']: + self.account = a + break + + return self.account + + def enable_account(self): + account = self.get_account() + if not account: + account = self.present_account() + + if account['state'].lower() != 'enabled': + self.result['changed'] = True + args = { + 'id': account['id'], + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id') + } + if not self.module.check_mode: + res = self.query_api('enableAccount', **args) + account = res['account'] + return account + + def lock_account(self): + return self.lock_or_disable_account(lock=True) + + def disable_account(self): + return self.lock_or_disable_account() + + def lock_or_disable_account(self, lock=False): + account = self.get_account() + if not account: + account = self.present_account() + + # we need to enable the account to lock it. + if lock and account['state'].lower() == 'disabled': + account = self.enable_account() + + if (lock and account['state'].lower() != 'locked' or + not lock and account['state'].lower() != 'disabled'): + self.result['changed'] = True + args = { + 'id': account['id'], + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + 'lock': lock, + } + if not self.module.check_mode: + account = self.query_api('disableAccount', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + account = self.poll_job(account, 'account') + return account + + def present_account(self): + account = self.get_account() + + if not account: + self.result['changed'] = True + + if self.module.params.get('ldap_domain'): + required_params = [ + 'domain', + 'username', + ] + self.module.fail_on_missing_params(required_params=required_params) + + account = self.create_ldap_account(account) + + else: + required_params = [ + 'email', + 'username', + 'password', + 'first_name', + 'last_name', + ] + self.module.fail_on_missing_params(required_params=required_params) + + account = self.create_account(account) + + return account + + def create_ldap_account(self, account): + args = { + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + 'accounttype': self.get_account_type(), + 'networkdomain': self.module.params.get('network_domain'), + 'username': self.module.params.get('username'), + 'timezone': self.module.params.get('timezone'), + 'roleid': self.get_role_id() + } + if not self.module.check_mode: + res = self.query_api('ldapCreateAccount', **args) + account = res['account'] + + args = { + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + 'accounttype': self.get_account_type(), + 'ldapdomain': self.module.params.get('ldap_domain'), + 'type': self.module.params.get('ldap_type') + } + + self.query_api('linkAccountToLdap', **args) + + return account + + def create_account(self, account): + args = { + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + 'accounttype': self.get_account_type(), + 'networkdomain': self.module.params.get('network_domain'), + 'username': self.module.params.get('username'), + 'password': self.module.params.get('password'), + 'firstname': self.module.params.get('first_name'), + 'lastname': self.module.params.get('last_name'), + 'email': self.module.params.get('email'), + 'timezone': self.module.params.get('timezone'), + 'roleid': self.get_role_id() + } + if not self.module.check_mode: + res = self.query_api('createAccount', **args) + account = res['account'] + + return account + + def absent_account(self): + account = self.get_account() + if account: + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('deleteAccount', id=account['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'account') + return account + + def get_result(self, account): + super(AnsibleCloudStackAccount, self).get_result(account) + if account: + if 'accounttype' in account: + for key, value in self.account_types.items(): + if value == account['accounttype']: + self.result['account_type'] = key + break + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'), + account_type=dict(choices=['user', 'root_admin', 'domain_admin'], default='user'), + network_domain=dict(), + domain=dict(default='ROOT'), + email=dict(), + first_name=dict(), + last_name=dict(), + username=dict(), + password=dict(no_log=True), + timezone=dict(), + role=dict(), + ldap_domain=dict(), + ldap_type=dict(choices=['GROUP', 'OU'], default='GROUP'), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_acc = AnsibleCloudStackAccount(module) + + state = module.params.get('state') + + if state in ['absent']: + account = acs_acc.absent_account() + + elif state in ['enabled', 'unlocked']: + account = acs_acc.enable_account() + + elif state in ['disabled']: + account = acs_acc.disable_account() + + elif state in ['locked']: + account = acs_acc.lock_account() + + else: + account = acs_acc.present_account() + + result = acs_acc.get_result(account) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_affinitygroup.py b/plugins/modules/cloud/cloudstack/cs_affinitygroup.py new file mode 100644 index 0000000000..7860941cbe --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_affinitygroup.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_affinitygroup +short_description: Manages affinity groups on Apache CloudStack based clouds. +description: + - Create and remove affinity groups. +author: René Moser (@resmo) +options: + name: + description: + - Name of the affinity group. + type: str + required: true + affinity_type: + description: + - Type of the affinity group. If not specified, first found affinity type is used. + type: str + description: + description: + - Description of the affinity group. + type: str + state: + description: + - State of the affinity group. + type: str + choices: [ present, absent ] + default: present + domain: + description: + - Domain the affinity group is related to. + type: str + account: + description: + - Account the affinity group is related to. + type: str + project: + description: + - Name of the project the affinity group is related to. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a affinity group + cs_affinitygroup: + name: haproxy + affinity_type: host anti-affinity + delegate_to: localhost + +- name: Remove a affinity group + cs_affinitygroup: + name: haproxy + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the affinity group. + returned: success + type: str + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +name: + description: Name of affinity group. + returned: success + type: str + sample: app +description: + description: Description of affinity group. + returned: success + type: str + sample: application affinity group +affinity_type: + description: Type of affinity group. + returned: success + type: str + sample: host anti-affinity +project: + description: Name of project the affinity group is related to. + returned: success + type: str + sample: Production +domain: + description: Domain the affinity group is related to. + returned: success + type: str + sample: example domain +account: + description: Account the affinity group is related to. + returned: success + type: str + sample: example account +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackAffinityGroup, self).__init__(module) + self.returns = { + 'type': 'affinity_type', + } + self.affinity_group = None + + def get_affinity_group(self): + if not self.affinity_group: + + args = { + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'name': self.module.params.get('name'), + } + affinity_groups = self.query_api('listAffinityGroups', **args) + if affinity_groups: + self.affinity_group = affinity_groups['affinitygroup'][0] + return self.affinity_group + + def get_affinity_type(self): + affinity_type = self.module.params.get('affinity_type') + + affinity_types = self.query_api('listAffinityGroupTypes', ) + if affinity_types: + if not affinity_type: + return affinity_types['affinityGroupType'][0]['type'] + + for a in affinity_types['affinityGroupType']: + if a['type'] == affinity_type: + return a['type'] + self.module.fail_json(msg="affinity group type not found: %s" % affinity_type) + + def create_affinity_group(self): + affinity_group = self.get_affinity_group() + if not affinity_group: + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'type': self.get_affinity_type(), + 'description': self.module.params.get('description'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + } + if not self.module.check_mode: + res = self.query_api('createAffinityGroup', **args) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + affinity_group = self.poll_job(res, 'affinitygroup') + return affinity_group + + def remove_affinity_group(self): + affinity_group = self.get_affinity_group() + if affinity_group: + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + } + if not self.module.check_mode: + res = self.query_api('deleteAffinityGroup', **args) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + self.poll_job(res, 'affinitygroup') + return affinity_group + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + affinity_type=dict(), + description=dict(), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_ag = AnsibleCloudStackAffinityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + affinity_group = acs_ag.remove_affinity_group() + else: + affinity_group = acs_ag.create_affinity_group() + + result = acs_ag.get_result(affinity_group) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_cluster.py b/plugins/modules/cloud/cloudstack/cs_cluster.py new file mode 100644 index 0000000000..852cc8c7c4 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_cluster.py @@ -0,0 +1,393 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_cluster +short_description: Manages host clusters on Apache CloudStack based clouds. +description: + - Create, update and remove clusters. +author: René Moser (@resmo) +options: + name: + description: + - name of the cluster. + type: str + required: true + zone: + description: + - Name of the zone in which the cluster belongs to. + - If not set, default zone is used. + type: str + pod: + description: + - Name of the pod in which the cluster belongs to. + type: str + cluster_type: + description: + - Type of the cluster. + - Required if I(state=present) + type: str + choices: [ CloudManaged, ExternalManaged ] + hypervisor: + description: + - Name the hypervisor to be used. + - Required if I(state=present). + - Possible values are C(KVM), C(VMware), C(BareMetal), C(XenServer), C(LXC), C(HyperV), C(UCS), C(OVM), C(Simulator). + type: str + url: + description: + - URL for the cluster + type: str + username: + description: + - Username for the cluster. + type: str + password: + description: + - Password for the cluster. + type: str + guest_vswitch_name: + description: + - Name of virtual switch used for guest traffic in the cluster. + - This would override zone wide traffic label setting. + type: str + guest_vswitch_type: + description: + - Type of virtual switch used for guest traffic in the cluster. + - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch) + type: str + choices: [ vmwaresvs, vmwaredvs ] + public_vswitch_name: + description: + - Name of virtual switch used for public traffic in the cluster. + - This would override zone wide traffic label setting. + type: str + public_vswitch_type: + description: + - Type of virtual switch used for public traffic in the cluster. + - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch) + type: str + choices: [ vmwaresvs, vmwaredvs ] + vms_ip_address: + description: + - IP address of the VSM associated with this cluster. + type: str + vms_username: + description: + - Username for the VSM associated with this cluster. + type: str + vms_password: + description: + - Password for the VSM associated with this cluster. + type: str + ovm3_cluster: + description: + - Ovm3 native OCFS2 clustering enabled for cluster. + type: str + ovm3_pool: + description: + - Ovm3 native pooling enabled for cluster. + type: str + ovm3_vip: + description: + - Ovm3 vip to use for pool (and cluster). + type: str + state: + description: + - State of the cluster. + type: str + choices: [ present, absent, disabled, enabled ] + default: present +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure a cluster is present + cs_cluster: + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + hypervisor: KVM + cluster_type: CloudManaged + delegate_to: localhost + +- name: Ensure a cluster is disabled + cs_cluster: + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + state: disabled + delegate_to: localhost + +- name: Ensure a cluster is enabled + cs_cluster: + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + state: enabled + delegate_to: localhost + +- name: Ensure a cluster is absent + cs_cluster: + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the cluster. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the cluster. + returned: success + type: str + sample: cluster01 +allocation_state: + description: State of the cluster. + returned: success + type: str + sample: Enabled +cluster_type: + description: Type of the cluster. + returned: success + type: str + sample: ExternalManaged +cpu_overcommit_ratio: + description: The CPU overcommit ratio of the cluster. + returned: success + type: str + sample: 1.0 +memory_overcommit_ratio: + description: The memory overcommit ratio of the cluster. + returned: success + type: str + sample: 1.0 +managed_state: + description: Whether this cluster is managed by CloudStack. + returned: success + type: str + sample: Managed +ovm3_vip: + description: Ovm3 VIP to use for pooling and/or clustering + returned: success + type: str + sample: 10.10.10.101 +hypervisor: + description: Hypervisor of the cluster + returned: success + type: str + sample: VMware +zone: + description: Name of zone the cluster is in. + returned: success + type: str + sample: ch-gva-2 +pod: + description: Name of pod the cluster is in. + returned: success + type: str + sample: pod01 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackCluster(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackCluster, self).__init__(module) + self.returns = { + 'allocationstate': 'allocation_state', + 'hypervisortype': 'hypervisor', + 'clustertype': 'cluster_type', + 'podname': 'pod', + 'managedstate': 'managed_state', + 'memoryovercommitratio': 'memory_overcommit_ratio', + 'cpuovercommitratio': 'cpu_overcommit_ratio', + 'ovm3vip': 'ovm3_vip', + } + self.cluster = None + + def _get_common_cluster_args(self): + args = { + 'clustername': self.module.params.get('name'), + 'hypervisor': self.module.params.get('hypervisor'), + 'clustertype': self.module.params.get('cluster_type'), + } + state = self.module.params.get('state') + if state in ['enabled', 'disabled']: + args['allocationstate'] = state.capitalize() + return args + + def get_pod(self, key=None): + args = { + 'name': self.module.params.get('pod'), + 'zoneid': self.get_zone(key='id'), + } + pods = self.query_api('listPods', **args) + if pods: + return self._get_by_key(key, pods['pod'][0]) + self.module.fail_json(msg="Pod %s not found in zone %s" % (self.module.params.get('pod'), self.get_zone(key='name'))) + + def get_cluster(self): + if not self.cluster: + args = {} + + uuid = self.module.params.get('id') + if uuid: + args['id'] = uuid + clusters = self.query_api('listClusters', **args) + if clusters: + self.cluster = clusters['cluster'][0] + return self.cluster + + args['name'] = self.module.params.get('name') + clusters = self.query_api('listClusters', **args) + if clusters: + self.cluster = clusters['cluster'][0] + # fix different return from API then request argument given + self.cluster['hypervisor'] = self.cluster['hypervisortype'] + self.cluster['clustername'] = self.cluster['name'] + return self.cluster + + def present_cluster(self): + cluster = self.get_cluster() + if cluster: + cluster = self._update_cluster() + else: + cluster = self._create_cluster() + return cluster + + def _create_cluster(self): + required_params = [ + 'cluster_type', + 'hypervisor', + ] + self.module.fail_on_missing_params(required_params=required_params) + + args = self._get_common_cluster_args() + args['zoneid'] = self.get_zone(key='id') + args['podid'] = self.get_pod(key='id') + args['url'] = self.module.params.get('url') + args['username'] = self.module.params.get('username') + args['password'] = self.module.params.get('password') + args['guestvswitchname'] = self.module.params.get('guest_vswitch_name') + args['guestvswitchtype'] = self.module.params.get('guest_vswitch_type') + args['publicvswitchtype'] = self.module.params.get('public_vswitch_name') + args['publicvswitchtype'] = self.module.params.get('public_vswitch_type') + args['vsmipaddress'] = self.module.params.get('vms_ip_address') + args['vsmusername'] = self.module.params.get('vms_username') + args['vmspassword'] = self.module.params.get('vms_password') + args['ovm3cluster'] = self.module.params.get('ovm3_cluster') + args['ovm3pool'] = self.module.params.get('ovm3_pool') + args['ovm3vip'] = self.module.params.get('ovm3_vip') + + self.result['changed'] = True + + cluster = None + if not self.module.check_mode: + res = self.query_api('addCluster', **args) + + # API returns a list as result CLOUDSTACK-9205 + if isinstance(res['cluster'], list): + cluster = res['cluster'][0] + else: + cluster = res['cluster'] + return cluster + + def _update_cluster(self): + cluster = self.get_cluster() + + args = self._get_common_cluster_args() + args['id'] = cluster['id'] + + if self.has_changed(args, cluster): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updateCluster', **args) + cluster = res['cluster'] + + return cluster + + def absent_cluster(self): + cluster = self.get_cluster() + if cluster: + self.result['changed'] = True + + args = { + 'id': cluster['id'], + } + + if not self.module.check_mode: + self.query_api('deleteCluster', **args) + + return cluster + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + zone=dict(), + pod=dict(), + cluster_type=dict(choices=['CloudManaged', 'ExternalManaged']), + hypervisor=dict(), + state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), + url=dict(), + username=dict(), + password=dict(no_log=True), + guest_vswitch_name=dict(), + guest_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs']), + public_vswitch_name=dict(), + public_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs']), + vms_ip_address=dict(), + vms_username=dict(), + vms_password=dict(no_log=True), + ovm3_cluster=dict(), + ovm3_pool=dict(), + ovm3_vip=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_cluster = AnsibleCloudStackCluster(module) + + state = module.params.get('state') + if state in ['absent']: + cluster = acs_cluster.absent_cluster() + else: + cluster = acs_cluster.present_cluster() + + result = acs_cluster.get_result(cluster) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_configuration.py b/plugins/modules/cloud/cloudstack/cs_configuration.py new file mode 100644 index 0000000000..d716676958 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_configuration.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_configuration +short_description: Manages configuration on Apache CloudStack based clouds. +description: + - Manages global, zone, account, storage and cluster configurations. +author: René Moser (@resmo) +options: + name: + description: + - Name of the configuration. + type: str + required: true + value: + description: + - Value of the configuration. + type: str + required: true + account: + description: + - Ensure the value for corresponding account. + type: str + domain: + description: + - Domain the account is related to. + - Only considered if I(account) is used. + type: str + default: ROOT + zone: + description: + - Ensure the value for corresponding zone. + type: str + storage: + description: + - Ensure the value for corresponding storage pool. + type: str + cluster: + description: + - Ensure the value for corresponding cluster. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure global configuration + cs_configuration: + name: router.reboot.when.outofband.migrated + value: false + delegate_to: localhost + +- name: Ensure zone configuration + cs_configuration: + name: router.reboot.when.outofband.migrated + zone: ch-gva-01 + value: true + delegate_to: localhost + +- name: Ensure storage configuration + cs_configuration: + name: storage.overprovisioning.factor + storage: storage01 + value: 2.0 + delegate_to: localhost + +- name: Ensure account configuration + cs_configuration: + name: allow.public.user.templates + value: false + account: acme inc + domain: customers + delegate_to: localhost +''' + +RETURN = ''' +--- +category: + description: Category of the configuration. + returned: success + type: str + sample: Advanced +scope: + description: Scope (zone/cluster/storagepool/account) of the parameter that needs to be updated. + returned: success + type: str + sample: storagepool +description: + description: Description of the configuration. + returned: success + type: str + sample: Setup the host to do multipath +name: + description: Name of the configuration. + returned: success + type: str + sample: zone.vlan.capacity.notificationthreshold +value: + description: Value of the configuration. + returned: success + type: str + sample: "0.75" +account: + description: Account of the configuration. + returned: success + type: str + sample: admin +Domain: + description: Domain of account of the configuration. + returned: success + type: str + sample: ROOT +zone: + description: Zone of the configuration. + returned: success + type: str + sample: ch-gva-01 +cluster: + description: Cluster of the configuration. + returned: success + type: str + sample: cluster01 +storage: + description: Storage of the configuration. + returned: success + type: str + sample: storage01 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackConfiguration(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackConfiguration, self).__init__(module) + self.returns = { + 'category': 'category', + 'scope': 'scope', + 'value': 'value', + } + self.storage = None + self.account = None + self.cluster = None + + def _get_common_configuration_args(self): + args = { + 'name': self.module.params.get('name'), + 'accountid': self.get_account(key='id'), + 'storageid': self.get_storage(key='id'), + 'zoneid': self.get_zone(key='id'), + 'clusterid': self.get_cluster(key='id'), + } + return args + + def get_zone(self, key=None): + # make sure we do net use the default zone + zone = self.module.params.get('zone') + if zone: + return super(AnsibleCloudStackConfiguration, self).get_zone(key=key) + + def get_cluster(self, key=None): + if not self.cluster: + cluster_name = self.module.params.get('cluster') + if not cluster_name: + return None + args = { + 'name': cluster_name, + } + clusters = self.query_api('listClusters', **args) + if clusters: + self.cluster = clusters['cluster'][0] + self.result['cluster'] = self.cluster['name'] + else: + self.module.fail_json(msg="Cluster %s not found." % cluster_name) + return self._get_by_key(key=key, my_dict=self.cluster) + + def get_storage(self, key=None): + if not self.storage: + storage_pool_name = self.module.params.get('storage') + if not storage_pool_name: + return None + args = { + 'name': storage_pool_name, + } + storage_pools = self.query_api('listStoragePools', **args) + if storage_pools: + self.storage = storage_pools['storagepool'][0] + self.result['storage'] = self.storage['name'] + else: + self.module.fail_json(msg="Storage pool %s not found." % storage_pool_name) + return self._get_by_key(key=key, my_dict=self.storage) + + def get_configuration(self): + configuration = None + args = self._get_common_configuration_args() + args['fetch_list'] = True + configurations = self.query_api('listConfigurations', **args) + if not configurations: + self.module.fail_json(msg="Configuration %s not found." % args['name']) + for config in configurations: + if args['name'] == config['name']: + configuration = config + return configuration + + def get_value(self): + value = str(self.module.params.get('value')) + if value in ('True', 'False'): + value = value.lower() + return value + + def present_configuration(self): + configuration = self.get_configuration() + args = self._get_common_configuration_args() + args['value'] = self.get_value() + if self.has_changed(args, configuration, ['value']): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateConfiguration', **args) + configuration = res['configuration'] + return configuration + + def get_result(self, configuration): + self.result = super(AnsibleCloudStackConfiguration, self).get_result(configuration) + if self.account: + self.result['account'] = self.account['name'] + self.result['domain'] = self.domain['path'] + elif self.zone: + self.result['zone'] = self.zone['name'] + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + value=dict(type='str', required=True), + zone=dict(), + storage=dict(), + cluster=dict(), + account=dict(), + domain=dict(default='ROOT') + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_configuration = AnsibleCloudStackConfiguration(module) + configuration = acs_configuration.present_configuration() + result = acs_configuration.get_result(configuration) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_disk_offering.py b/plugins/modules/cloud/cloudstack/cs_disk_offering.py new file mode 100644 index 0000000000..441563840c --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_disk_offering.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, David Passante <@dpassante> +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_disk_offering +description: + - Create and delete disk offerings for guest VMs. + - Update display_text or display_offering of existing disk offering. +short_description: Manages disk offerings on Apache CloudStack based clouds. +author: + - David Passante (@dpassante) + - René Moser (@resmo) +options: + disk_size: + description: + - Size of the disk offering in GB (1GB = 1,073,741,824 bytes). + type: int + bytes_read_rate: + description: + - Bytes read rate of the disk offering. + type: int + bytes_write_rate: + description: + - Bytes write rate of the disk offering. + type: int + display_text: + description: + - Display text of the disk offering. + - If not set, C(name) will be used as C(display_text) while creating. + type: str + domain: + description: + - Domain the disk offering is related to. + - Public for all domains and subdomains if not set. + type: str + hypervisor_snapshot_reserve: + description: + - Hypervisor snapshot reserve space as a percent of a volume. + - Only for managed storage using Xen or VMware. + type: int + customized: + description: + - Whether disk offering iops is custom or not. + type: bool + default: no + iops_read_rate: + description: + - IO requests read rate of the disk offering. + type: int + iops_write_rate: + description: + - IO requests write rate of the disk offering. + type: int + iops_max: + description: + - Max. iops of the disk offering. + type: int + iops_min: + description: + - Min. iops of the disk offering. + type: int + name: + description: + - Name of the disk offering. + type: str + required: true + provisioning_type: + description: + - Provisioning type used to create volumes. + type: str + choices: [ thin, sparse, fat ] + state: + description: + - State of the disk offering. + type: str + choices: [ present, absent ] + default: present + storage_type: + description: + - The storage type of the disk offering. + type: str + choices: [ local, shared ] + storage_tags: + description: + - The storage tags for this disk offering. + type: list + aliases: [ storage_tag ] + display_offering: + description: + - An optional field, whether to display the offering to the end user or not. + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a disk offering with local storage + cs_disk_offering: + name: small + display_text: Small 10GB + disk_size: 10 + storage_type: local + delegate_to: localhost + +- name: Create or update a disk offering with shared storage + cs_disk_offering: + name: small + display_text: Small 10GB + disk_size: 10 + storage_type: shared + storage_tags: SAN01 + delegate_to: localhost + +- name: Remove a disk offering + cs_disk_offering: + name: small + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the disk offering + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +disk_size: + description: Size of the disk offering in GB + returned: success + type: int + sample: 10 +iops_max: + description: Max iops of the disk offering + returned: success + type: int + sample: 1000 +iops_min: + description: Min iops of the disk offering + returned: success + type: int + sample: 500 +bytes_read_rate: + description: Bytes read rate of the disk offering + returned: success + type: int + sample: 1000 +bytes_write_rate: + description: Bytes write rate of the disk offering + returned: success + type: int + sample: 1000 +iops_read_rate: + description: IO requests per second read rate of the disk offering + returned: success + type: int + sample: 1000 +iops_write_rate: + description: IO requests per second write rate of the disk offering + returned: success + type: int + sample: 1000 +created: + description: Date the offering was created + returned: success + type: str + sample: 2017-11-19T10:48:59+0000 +display_text: + description: Display text of the offering + returned: success + type: str + sample: Small 10GB +domain: + description: Domain the offering is into + returned: success + type: str + sample: ROOT +storage_tags: + description: List of storage tags + returned: success + type: list + sample: [ 'eco' ] +customized: + description: Whether the offering uses custom IOPS or not + returned: success + type: bool + sample: false +name: + description: Name of the system offering + returned: success + type: str + sample: Micro +provisioning_type: + description: Provisioning type used to create volumes + returned: success + type: str + sample: thin +storage_type: + description: Storage type used to create volumes + returned: success + type: str + sample: shared +display_offering: + description: Whether to display the offering to the end user or not. + returned: success + type: bool + sample: false +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackDiskOffering(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackDiskOffering, self).__init__(module) + self.returns = { + 'disksize': 'disk_size', + 'diskBytesReadRate': 'bytes_read_rate', + 'diskBytesWriteRate': 'bytes_write_rate', + 'diskIopsReadRate': 'iops_read_rate', + 'diskIopsWriteRate': 'iops_write_rate', + 'maxiops': 'iops_max', + 'miniops': 'iops_min', + 'hypervisorsnapshotreserve': 'hypervisor_snapshot_reserve', + 'customized': 'customized', + 'provisioningtype': 'provisioning_type', + 'storagetype': 'storage_type', + 'tags': 'storage_tags', + 'displayoffering': 'display_offering', + } + + self.disk_offering = None + + def get_disk_offering(self): + args = { + 'name': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + } + disk_offerings = self.query_api('listDiskOfferings', **args) + if disk_offerings: + for disk_offer in disk_offerings['diskoffering']: + if args['name'] == disk_offer['name']: + self.disk_offering = disk_offer + + return self.disk_offering + + def present_disk_offering(self): + disk_offering = self.get_disk_offering() + if not disk_offering: + disk_offering = self._create_offering(disk_offering) + else: + disk_offering = self._update_offering(disk_offering) + + return disk_offering + + def absent_disk_offering(self): + disk_offering = self.get_disk_offering() + if disk_offering: + self.result['changed'] = True + if not self.module.check_mode: + args = { + 'id': disk_offering['id'], + } + self.query_api('deleteDiskOffering', **args) + return disk_offering + + def _create_offering(self, disk_offering): + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'disksize': self.module.params.get('disk_size'), + 'bytesreadrate': self.module.params.get('bytes_read_rate'), + 'byteswriterate': self.module.params.get('bytes_write_rate'), + 'customized': self.module.params.get('customized'), + 'domainid': self.get_domain(key='id'), + 'hypervisorsnapshotreserve': self.module.params.get('hypervisor_snapshot_reserve'), + 'iopsreadrate': self.module.params.get('iops_read_rate'), + 'iopswriterate': self.module.params.get('iops_write_rate'), + 'maxiops': self.module.params.get('iops_max'), + 'miniops': self.module.params.get('iops_min'), + 'provisioningtype': self.module.params.get('provisioning_type'), + 'diskofferingdetails': self.module.params.get('disk_offering_details'), + 'storagetype': self.module.params.get('storage_type'), + 'tags': self.module.params.get('storage_tags'), + 'displayoffering': self.module.params.get('display_offering'), + } + if not self.module.check_mode: + res = self.query_api('createDiskOffering', **args) + disk_offering = res['diskoffering'] + return disk_offering + + def _update_offering(self, disk_offering): + args = { + 'id': disk_offering['id'], + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'displayoffering': self.module.params.get('display_offering'), + } + if self.has_changed(args, disk_offering): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updateDiskOffering', **args) + disk_offering = res['diskoffering'] + return disk_offering + + def get_result(self, disk_offering): + super(AnsibleCloudStackDiskOffering, self).get_result(disk_offering) + if disk_offering: + # Prevent confusion, the api returns a tags key for storage tags. + if 'tags' in disk_offering: + self.result['storage_tags'] = disk_offering['tags'].split(',') or [disk_offering['tags']] + if 'tags' in self.result: + del self.result['tags'] + + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + display_text=dict(), + domain=dict(), + disk_size=dict(type='int'), + display_offering=dict(type='bool'), + hypervisor_snapshot_reserve=dict(type='int'), + bytes_read_rate=dict(type='int'), + bytes_write_rate=dict(type='int'), + customized=dict(type='bool'), + iops_read_rate=dict(type='int'), + iops_write_rate=dict(type='int'), + iops_max=dict(type='int'), + iops_min=dict(type='int'), + provisioning_type=dict(choices=['thin', 'sparse', 'fat']), + storage_type=dict(choices=['local', 'shared']), + storage_tags=dict(type='list', aliases=['storage_tag']), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_do = AnsibleCloudStackDiskOffering(module) + + state = module.params.get('state') + if state == "absent": + disk_offering = acs_do.absent_disk_offering() + else: + disk_offering = acs_do.present_disk_offering() + + result = acs_do.get_result(disk_offering) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_domain.py b/plugins/modules/cloud/cloudstack/cs_domain.py new file mode 100644 index 0000000000..7d4644ae90 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_domain.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_domain +short_description: Manages domains on Apache CloudStack based clouds. +description: + - Create, update and remove domains. +author: René Moser (@resmo) +options: + path: + description: + - Path of the domain. + - Prefix C(ROOT/) or C(/ROOT/) in path is optional. + type: str + required: true + network_domain: + description: + - Network domain for networks in the domain. + type: str + clean_up: + description: + - Clean up all domain resources like child domains and accounts. + - Considered on I(state=absent). + type: bool + default: no + state: + description: + - State of the domain. + type: str + choices: [ present, absent ] + default: present + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a domain + cs_domain: + path: ROOT/customers + network_domain: customers.example.com + delegate_to: localhost + +- name: Create another subdomain + cs_domain: + path: ROOT/customers/xy + network_domain: xy.customers.example.com + delegate_to: localhost + +- name: Remove a domain + cs_domain: + path: ROOT/customers/xy + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the domain. + returned: success + type: str + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +name: + description: Name of the domain. + returned: success + type: str + sample: customers +path: + description: Domain path. + returned: success + type: str + sample: /ROOT/customers +parent_domain: + description: Parent domain of the domain. + returned: success + type: str + sample: ROOT +network_domain: + description: Network domain of the domain. + returned: success + type: str + sample: example.local +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackDomain(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackDomain, self).__init__(module) + self.returns = { + 'path': 'path', + 'networkdomain': 'network_domain', + 'parentdomainname': 'parent_domain', + } + self.domain = None + + def _get_domain_internal(self, path=None): + if not path: + path = self.module.params.get('path') + + if path.endswith('/'): + self.module.fail_json(msg="Path '%s' must not end with /" % path) + + path = path.lower() + + if path.startswith('/') and not path.startswith('/root/'): + path = "root" + path + elif not path.startswith('root/'): + path = "root/" + path + + args = { + 'listall': True, + 'fetch_list': True, + } + + domains = self.query_api('listDomains', **args) + if domains: + for d in domains: + if path == d['path'].lower(): + return d + return None + + def get_name(self): + # last part of the path is the name + name = self.module.params.get('path').split('/')[-1:] + return name + + def get_domain(self, key=None): + if not self.domain: + self.domain = self._get_domain_internal() + return self._get_by_key(key, self.domain) + + def get_parent_domain(self, key=None): + path = self.module.params.get('path') + # cut off last /* + path = '/'.join(path.split('/')[:-1]) + if not path: + return None + parent_domain = self._get_domain_internal(path=path) + if not parent_domain: + self.module.fail_json(msg="Parent domain path %s does not exist" % path) + return self._get_by_key(key, parent_domain) + + def present_domain(self): + domain = self.get_domain() + if not domain: + domain = self.create_domain(domain) + else: + domain = self.update_domain(domain) + return domain + + def create_domain(self, domain): + self.result['changed'] = True + + args = { + 'name': self.get_name(), + 'parentdomainid': self.get_parent_domain(key='id'), + 'networkdomain': self.module.params.get('network_domain') + } + if not self.module.check_mode: + res = self.query_api('createDomain', **args) + domain = res['domain'] + return domain + + def update_domain(self, domain): + args = { + 'id': domain['id'], + 'networkdomain': self.module.params.get('network_domain') + } + if self.has_changed(args, domain): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateDomain', **args) + domain = res['domain'] + return domain + + def absent_domain(self): + domain = self.get_domain() + if domain: + self.result['changed'] = True + + if not self.module.check_mode: + args = { + 'id': domain['id'], + 'cleanup': self.module.params.get('clean_up') + } + res = self.query_api('deleteDomain', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self.poll_job(res, 'domain') + return domain + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + path=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + network_domain=dict(), + clean_up=dict(type='bool', default=False), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_dom = AnsibleCloudStackDomain(module) + + state = module.params.get('state') + if state in ['absent']: + domain = acs_dom.absent_domain() + else: + domain = acs_dom.present_domain() + + result = acs_dom.get_result(domain) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_facts.py b/plugins/modules/cloud/cloudstack/cs_facts.py new file mode 100644 index 0000000000..d93fec4764 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_facts.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_facts +short_description: Gather facts on instances of Apache CloudStack based clouds. +description: + - This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself. +author: René Moser (@resmo) +options: + filter: + description: + - Filter for a specific fact. + type: str + choices: + - cloudstack_service_offering + - cloudstack_availability_zone + - cloudstack_public_hostname + - cloudstack_public_ipv4 + - cloudstack_local_hostname + - cloudstack_local_ipv4 + - cloudstack_instance_id + - cloudstack_user_data + meta_data_host: + description: + - Host or IP of the meta data API service. + - If not set, determination by parsing the dhcp lease file. + type: str +requirements: [ yaml ] +''' + +EXAMPLES = ''' +# Gather all facts on instances +- name: Gather cloudstack facts + cs_facts: + +# Gather specific fact on instances +- name: Gather cloudstack facts + cs_facts: filter=cloudstack_instance_id + +# Gather specific fact on instances with a given meta_data_host +- name: Gather cloudstack facts + cs_facts: + filter: cloudstack_instance_id + meta_data_host: 169.254.169.254 +''' + +RETURN = ''' +--- +cloudstack_availability_zone: + description: zone the instance is deployed in. + returned: success + type: str + sample: ch-gva-2 +cloudstack_instance_id: + description: UUID of the instance. + returned: success + type: str + sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_hostname: + description: local hostname of the instance. + returned: success + type: str + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_local_ipv4: + description: local IPv4 of the instance. + returned: success + type: str + sample: 185.19.28.35 +cloudstack_public_hostname: + description: public IPv4 of the router. Same as I(cloudstack_public_ipv4). + returned: success + type: str + sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 +cloudstack_public_ipv4: + description: public IPv4 of the router. + returned: success + type: str + sample: 185.19.28.35 +cloudstack_service_offering: + description: service offering of the instance. + returned: success + type: str + sample: Micro 512mb 1cpu +cloudstack_user_data: + description: data of the instance provided by users. + returned: success + type: dict + sample: { "bla": "foo" } +''' + +import os +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.facts import ansible_collector, default_collectors + +YAML_IMP_ERR = None +try: + import yaml + HAS_LIB_YAML = True +except ImportError: + YAML_IMP_ERR = traceback.format_exc() + HAS_LIB_YAML = False + +CS_METADATA_BASE_URL = "http://%s/latest/meta-data" +CS_USERDATA_BASE_URL = "http://%s/latest/user-data" + + +class CloudStackFacts(object): + + def __init__(self): + collector = ansible_collector.get_ansible_collector(all_collector_classes=default_collectors.collectors, + filter_spec='default_ipv4', + gather_subset=['!all', 'network'], + gather_timeout=10) + self.facts = collector.collect(module) + + self.api_ip = None + self.fact_paths = { + 'cloudstack_service_offering': 'service-offering', + 'cloudstack_availability_zone': 'availability-zone', + 'cloudstack_public_hostname': 'public-hostname', + 'cloudstack_public_ipv4': 'public-ipv4', + 'cloudstack_local_hostname': 'local-hostname', + 'cloudstack_local_ipv4': 'local-ipv4', + 'cloudstack_instance_id': 'instance-id' + } + + def run(self): + result = {} + filter = module.params.get('filter') + if not filter: + for key, path in self.fact_paths.items(): + result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path) + result['cloudstack_user_data'] = self._get_user_data_json() + else: + if filter == 'cloudstack_user_data': + result['cloudstack_user_data'] = self._get_user_data_json() + elif filter in self.fact_paths: + result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter]) + return result + + def _get_user_data_json(self): + try: + # this data come form users, we try what we can to parse it... + return yaml.safe_load(self._fetch(CS_USERDATA_BASE_URL)) + except Exception: + return None + + def _fetch(self, path): + api_ip = self._get_api_ip() + if not api_ip: + return None + api_url = path % api_ip + (response, info) = fetch_url(module, api_url, force=True) + if response: + data = response.read() + else: + data = None + return data + + def _get_dhcp_lease_file(self): + """Return the path of the lease file.""" + default_iface = self.facts['default_ipv4']['interface'] + dhcp_lease_file_locations = [ + '/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu + '/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6 + '/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7 + '/var/db/dhclient.leases.%s' % default_iface, # openbsd + ] + for file_path in dhcp_lease_file_locations: + if os.path.exists(file_path): + return file_path + module.fail_json(msg="Could not find dhclient leases file.") + + def _get_api_ip(self): + """Return the IP of the DHCP server.""" + if module.params.get('meta_data_host'): + return module.params.get('meta_data_host') + elif not self.api_ip: + dhcp_lease_file = self._get_dhcp_lease_file() + for line in open(dhcp_lease_file): + if 'dhcp-server-identifier' in line: + # get IP of string "option dhcp-server-identifier 185.19.28.176;" + line = line.translate(None, ';') + self.api_ip = line.split()[2] + break + if not self.api_ip: + module.fail_json(msg="No dhcp-server-identifier found in leases file.") + return self.api_ip + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + filter=dict(default=None, choices=[ + 'cloudstack_service_offering', + 'cloudstack_availability_zone', + 'cloudstack_public_hostname', + 'cloudstack_public_ipv4', + 'cloudstack_local_hostname', + 'cloudstack_local_ipv4', + 'cloudstack_instance_id', + 'cloudstack_user_data', + ]), + meta_data_host=dict(), + ), + supports_check_mode=True + ) + + if not HAS_LIB_YAML: + module.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR) + + cs_facts = CloudStackFacts().run() + cs_facts_result = dict(changed=False, ansible_facts=cs_facts) + module.exit_json(**cs_facts_result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_firewall.py b/plugins/modules/cloud/cloudstack/cs_firewall.py new file mode 100644 index 0000000000..ba9bd7673b --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_firewall.py @@ -0,0 +1,448 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_firewall +short_description: Manages firewall rules on Apache CloudStack based clouds. +description: + - Creates and removes firewall rules. +author: René Moser (@resmo) +options: + ip_address: + description: + - Public IP address the ingress rule is assigned to. + - Required if I(type=ingress). + type: str + network: + description: + - Network the egress rule is related to. + - Required if I(type=egress). + type: str + state: + description: + - State of the firewall rule. + type: str + default: present + choices: [ present, absent ] + type: + description: + - Type of the firewall rule. + type: str + default: ingress + choices: [ ingress, egress ] + protocol: + description: + - Protocol of the firewall rule. + - C(all) is only available if I(type=egress). + type: str + default: tcp + choices: [ tcp, udp, icmp, all ] + cidrs: + description: + - List of CIDRs (full notation) to be used for firewall rule. + - Since version 2.5, it is a list of CIDR. + type: list + default: 0.0.0.0/0 + aliases: [ cidr ] + start_port: + description: + - Start port for this rule. + - Considered if I(protocol=tcp) or I(protocol=udp). + type: int + aliases: [ port ] + end_port: + description: + - End port for this rule. Considered if I(protocol=tcp) or I(protocol=udp). + - If not specified, equal I(start_port). + type: int + icmp_type: + description: + - Type of the icmp message being sent. + - Considered if I(protocol=icmp). + type: int + icmp_code: + description: + - Error code for this icmp message. + - Considered if I(protocol=icmp). + type: int + domain: + description: + - Domain the firewall rule is related to. + type: str + account: + description: + - Account the firewall rule is related to. + type: str + project: + description: + - Name of the project the firewall rule is related to. + type: str + zone: + description: + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set an empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 + cs_firewall: + ip_address: 4.3.2.1 + port: 80 + cidr: 1.2.3.4/32 + delegate_to: localhost + +- name: Allow inbound tcp/udp port 53 to 4.3.2.1 + cs_firewall: + ip_address: 4.3.2.1 + port: 53 + protocol: '{{ item }}' + with_items: + - tcp + - udp + delegate_to: localhost + +- name: Ensure firewall rule is removed + cs_firewall: + ip_address: 4.3.2.1 + start_port: 8000 + end_port: 8888 + cidr: 17.0.0.0/8 + state: absent + delegate_to: localhost + +- name: Allow all outbound traffic + cs_firewall: + network: my_network + type: egress + protocol: all + delegate_to: localhost + +- name: Allow only HTTP outbound traffic for an IP + cs_firewall: + network: my_network + type: egress + port: 80 + cidr: 10.101.1.20 + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the rule. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +ip_address: + description: IP address of the rule if C(type=ingress) + returned: success + type: str + sample: 10.100.212.10 +type: + description: Type of the rule. + returned: success + type: str + sample: ingress +cidr: + description: CIDR string of the rule. + returned: success + type: str + sample: 0.0.0.0/0 +cidrs: + description: CIDR list of the rule. + returned: success + type: list + sample: [ '0.0.0.0/0' ] + version_added: '2.5' +protocol: + description: Protocol of the rule. + returned: success + type: str + sample: tcp +start_port: + description: Start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: End port of the rule. + returned: success + type: int + sample: 80 +icmp_code: + description: ICMP code of the rule. + returned: success + type: int + sample: 1 +icmp_type: + description: ICMP type of the rule. + returned: success + type: int + sample: 1 +network: + description: Name of the network if C(type=egress) + returned: success + type: str + sample: my_network +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackFirewall(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackFirewall, self).__init__(module) + self.returns = { + 'cidrlist': 'cidr', + 'startport': 'start_port', + 'endport': 'end_port', + 'protocol': 'protocol', + 'ipaddress': 'ip_address', + 'icmpcode': 'icmp_code', + 'icmptype': 'icmp_type', + } + self.firewall_rule = None + self.network = None + + def get_firewall_rule(self): + if not self.firewall_rule: + cidrs = self.module.params.get('cidrs') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.get_or_fallback('end_port', 'start_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + fw_type = self.module.params.get('type') + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="missing required argument for protocol '%s': start_port or end_port" % protocol) + + if protocol == 'icmp' and not icmp_type: + self.module.fail_json(msg="missing required argument for protocol 'icmp': icmp_type") + + if protocol == 'all' and fw_type != 'egress': + self.module.fail_json(msg="protocol 'all' could only be used for type 'egress'") + + args = { + 'account': self.get_account('name'), + 'domainid': self.get_domain('id'), + 'projectid': self.get_project('id'), + 'fetch_list': True, + } + if fw_type == 'egress': + args['networkid'] = self.get_network(key='id') + if not args['networkid']: + self.module.fail_json(msg="missing required argument for type egress: network") + + # CloudStack 4.11 use the network cidr for 0.0.0.0/0 in egress + # That is why we need to replace it. + network_cidr = self.get_network(key='cidr') + egress_cidrs = [network_cidr if cidr == '0.0.0.0/0' else cidr for cidr in cidrs] + + firewall_rules = self.query_api('listEgressFirewallRules', **args) + else: + args['ipaddressid'] = self.get_ip_address('id') + if not args['ipaddressid']: + self.module.fail_json(msg="missing required argument for type ingress: ip_address") + egress_cidrs = None + + firewall_rules = self.query_api('listFirewallRules', **args) + + if firewall_rules: + for rule in firewall_rules: + type_match = self._type_cidrs_match(rule, cidrs, egress_cidrs) + + protocol_match = ( + self._tcp_udp_match(rule, protocol, start_port, end_port) or + self._icmp_match(rule, protocol, icmp_code, icmp_type) or + self._egress_all_match(rule, protocol, fw_type) + ) + + if type_match and protocol_match: + self.firewall_rule = rule + break + return self.firewall_rule + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return ( + protocol in ['tcp', 'udp'] and + protocol == rule['protocol'] and + start_port == int(rule['startport']) and + end_port == int(rule['endport']) + ) + + def _egress_all_match(self, rule, protocol, fw_type): + return ( + protocol in ['all'] and + protocol == rule['protocol'] and + fw_type == 'egress' + ) + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return ( + protocol == 'icmp' and + protocol == rule['protocol'] and + icmp_code == rule['icmpcode'] and + icmp_type == rule['icmptype'] + ) + + def _type_cidrs_match(self, rule, cidrs, egress_cidrs): + if egress_cidrs is not None: + return ",".join(egress_cidrs) == rule['cidrlist'] or ",".join(cidrs) == rule['cidrlist'] + else: + return ",".join(cidrs) == rule['cidrlist'] + + def create_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if not firewall_rule: + self.result['changed'] = True + + args = { + 'cidrlist': self.module.params.get('cidrs'), + 'protocol': self.module.params.get('protocol'), + 'startport': self.module.params.get('start_port'), + 'endport': self.get_or_fallback('end_port', 'start_port'), + 'icmptype': self.module.params.get('icmp_type'), + 'icmpcode': self.module.params.get('icmp_code') + } + + fw_type = self.module.params.get('type') + if not self.module.check_mode: + if fw_type == 'egress': + args['networkid'] = self.get_network(key='id') + res = self.query_api('createEgressFirewallRule', **args) + else: + args['ipaddressid'] = self.get_ip_address('id') + res = self.query_api('createFirewallRule', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + firewall_rule = self.poll_job(res, 'firewallrule') + + if firewall_rule: + firewall_rule = self.ensure_tags(resource=firewall_rule, resource_type='Firewallrule') + self.firewall_rule = firewall_rule + + return firewall_rule + + def remove_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if firewall_rule: + self.result['changed'] = True + + args = { + 'id': firewall_rule['id'] + } + + fw_type = self.module.params.get('type') + if not self.module.check_mode: + if fw_type == 'egress': + res = self.query_api('deleteEgressFirewallRule', **args) + else: + res = self.query_api('deleteFirewallRule', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'firewallrule') + return firewall_rule + + def get_result(self, firewall_rule): + super(AnsibleCloudStackFirewall, self).get_result(firewall_rule) + if firewall_rule: + self.result['type'] = self.module.params.get('type') + if self.result['type'] == 'egress': + self.result['network'] = self.get_network(key='displaytext') + if 'cidrlist' in firewall_rule: + self.result['cidrs'] = firewall_rule['cidrlist'].split(',') or [firewall_rule['cidrlist']] + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address=dict(), + network=dict(), + cidrs=dict(type='list', default='0.0.0.0/0', aliases=['cidr']), + protocol=dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'), + type=dict(choices=['ingress', 'egress'], default='ingress'), + icmp_type=dict(type='int'), + icmp_code=dict(type='int'), + start_port=dict(type='int', aliases=['port']), + end_port=dict(type='int'), + state=dict(choices=['present', 'absent'], default='present'), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag'], default=None), + )) + + required_together = cs_required_together() + required_together.extend([ + ['icmp_type', 'icmp_code'], + ]) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=required_together, + required_one_of=( + ['ip_address', 'network'], + ), + mutually_exclusive=( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ['ip_address', 'network'], + ), + supports_check_mode=True + ) + + acs_fw = AnsibleCloudStackFirewall(module) + + state = module.params.get('state') + if state in ['absent']: + fw_rule = acs_fw.remove_firewall_rule() + else: + fw_rule = acs_fw.create_firewall_rule() + + result = acs_fw.get_result(fw_rule) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_host.py b/plugins/modules/cloud/cloudstack/cs_host.py new file mode 100644 index 0000000000..a55c1a1bf6 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_host.py @@ -0,0 +1,627 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_host +short_description: Manages hosts on Apache CloudStack based clouds. +description: + - Create, update and remove hosts. +author: René Moser (@resmo) +options: + name: + description: + - Name of the host. + type: str + required: true + aliases: [ ip_address ] + url: + description: + - Url of the host used to create a host. + - If not provided, C(http://) and param I(name) is used as url. + - Only considered if I(state=present) and host does not yet exist. + type: str + username: + description: + - Username for the host. + - Required if I(state=present) and host does not yet exist. + type: str + password: + description: + - Password for the host. + - Required if I(state=present) and host does not yet exist. + type: str + pod: + description: + - Name of the pod. + - Required if I(state=present) and host does not yet exist. + type: str + cluster: + description: + - Name of the cluster. + type: str + hypervisor: + description: + - Name of the cluster. + - Required if I(state=present) and host does not yet exist. + - Possible values are C(KVM), C(VMware), C(BareMetal), C(XenServer), C(LXC), C(HyperV), C(UCS), C(OVM), C(Simulator). + type: str + allocation_state: + description: + - Allocation state of the host. + type: str + choices: [ enabled, disabled, maintenance ] + host_tags: + description: + - Tags of the host. + type: list + aliases: [ host_tag ] + state: + description: + - State of the host. + type: str + default: present + choices: [ present, absent ] + zone: + description: + - Name of the zone in which the host should be deployed. + - If not set, default zone is used. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure a host is present but disabled + cs_host: + name: pod01.zone01.example.com + cluster: vcenter.example.com/zone01/cluster01 + pod: pod01 + zone: zone01 + hypervisor: VMware + allocation_state: disabled + host_tags: + - perf + - gpu + delegate_to: localhost + +- name: Ensure an existing host is disabled + cs_host: + name: pod01.zone01.example.com + zone: zone01 + allocation_state: disabled + delegate_to: localhost + +- name: Ensure an existing host is enabled + cs_host: + name: pod01.zone01.example.com + zone: zone01 + allocation_state: enabled + delegate_to: localhost + +- name: Ensure a host is absent + cs_host: + name: pod01.zone01.example.com + zone: zone01 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +capabilities: + description: Capabilities of the host. + returned: success + type: str + sample: hvm +cluster: + description: Cluster of the host. + returned: success + type: str + sample: vcenter.example.com/zone/cluster01 +cluster_type: + description: Type of the cluster of the host. + returned: success + type: str + sample: ExternalManaged +cpu_allocated: + description: Amount in percent of the host's CPU currently allocated. + returned: success + type: str + sample: 166.25% +cpu_number: + description: Number of CPUs of the host. + returned: success + type: str + sample: 24 +cpu_sockets: + description: Number of CPU sockets of the host. + returned: success + type: int + sample: 2 +cpu_speed: + description: CPU speed in Mhz + returned: success + type: int + sample: 1999 +cpu_used: + description: Amount of the host's CPU currently used. + returned: success + type: str + sample: 33.6% +cpu_with_overprovisioning: + description: Amount of the host's CPU after applying the cpu.overprovisioning.factor. + returned: success + type: str + sample: 959520.0 +created: + description: Date when the host was created. + returned: success + type: str + sample: 2015-05-03T15:05:51+0200 +disconnected: + description: Date when the host was disconnected. + returned: success + type: str + sample: 2015-05-03T15:05:51+0200 +disk_size_allocated: + description: Host's currently allocated disk size. + returned: success + type: int + sample: 2593 +disk_size_total: + description: Total disk size of the host + returned: success + type: int + sample: 259300 +events: + description: Events available for the host + returned: success + type: str + sample: "Ping; HostDown; AgentConnected; AgentDisconnected; PingTimeout; ShutdownRequested; Remove; StartAgentRebalance; ManagementServerDown" +ha_host: + description: Whether the host is a HA host. + returned: success + type: bool + sample: false +has_enough_capacity: + description: Whether the host has enough CPU and RAM capacity to migrate a VM to it. + returned: success + type: bool + sample: true +host_tags: + description: Comma-separated list of tags for the host. + returned: success + type: str + sample: "perf" +hypervisor: + description: Host's hypervisor. + returned: success + type: str + sample: VMware +hypervisor_version: + description: Hypervisor version. + returned: success + type: str + sample: 5.1 +ip_address: + description: IP address of the host + returned: success + type: str + sample: 10.10.10.1 +is_local_storage_active: + description: Whether the local storage is available or not. + returned: success + type: bool + sample: false +last_pinged: + description: Date and time the host was last pinged. + returned: success + type: str + sample: "1970-01-17T17:27:32+0100" +management_server_id: + description: Management server ID of the host. + returned: success + type: int + sample: 345050593418 +memory_allocated: + description: Amount of the host's memory currently allocated. + returned: success + type: int + sample: 69793218560 +memory_total: + description: Total of memory of the host. + returned: success + type: int + sample: 206085263360 +memory_used: + description: Amount of the host's memory currently used. + returned: success + type: int + sample: 65504776192 +name: + description: Name of the host. + returned: success + type: str + sample: esx32.example.com +network_kbs_read: + description: Incoming network traffic on the host. + returned: success + type: int + sample: 0 +network_kbs_write: + description: Outgoing network traffic on the host. + returned: success + type: int + sample: 0 +os_category: + description: OS category name of the host. + returned: success + type: str + sample: ... +out_of_band_management: + description: Host out-of-band management information. + returned: success + type: str + sample: ... +pod: + description: Pod name of the host. + returned: success + type: str + sample: Pod01 +removed: + description: Date and time the host was removed. + returned: success + type: str + sample: "1970-01-17T17:27:32+0100" +resource_state: + description: Resource state of the host. + returned: success + type: str + sample: Enabled +allocation_state:: + description: Allocation state of the host. + returned: success + type: str + sample: enabled +state: + description: State of the host. + returned: success + type: str + sample: Up +suitable_for_migration: + description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM + to it or not. + returned: success + type: str + sample: true +host_type: + description: Type of the host. + returned: success + type: str + sample: Routing +host_version: + description: Version of the host. + returned: success + type: str + sample: 4.5.2 +gpu_group: + description: GPU cards present in the host. + returned: success + type: list + sample: [] +zone: + description: Zone of the host. + returned: success + type: str + sample: zone01 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) +import time + + +class AnsibleCloudStackHost(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackHost, self).__init__(module) + self.returns = { + 'averageload': 'average_load', + 'capabilities': 'capabilities', + 'clustername': 'cluster', + 'clustertype': 'cluster_type', + 'cpuallocated': 'cpu_allocated', + 'cpunumber': 'cpu_number', + 'cpusockets': 'cpu_sockets', + 'cpuspeed': 'cpu_speed', + 'cpuused': 'cpu_used', + 'cpuwithoverprovisioning': 'cpu_with_overprovisioning', + 'disconnected': 'disconnected', + 'details': 'details', + 'disksizeallocated': 'disk_size_allocated', + 'disksizetotal': 'disk_size_total', + 'events': 'events', + 'hahost': 'ha_host', + 'hasenoughcapacity': 'has_enough_capacity', + 'hypervisor': 'hypervisor', + 'hypervisorversion': 'hypervisor_version', + 'ipaddress': 'ip_address', + 'islocalstorageactive': 'is_local_storage_active', + 'lastpinged': 'last_pinged', + 'managementserverid': 'management_server_id', + 'memoryallocated': 'memory_allocated', + 'memorytotal': 'memory_total', + 'memoryused': 'memory_used', + 'networkkbsread': 'network_kbs_read', + 'networkkbswrite': 'network_kbs_write', + 'oscategoryname': 'os_category', + 'outofbandmanagement': 'out_of_band_management', + 'podname': 'pod', + 'removed': 'removed', + 'resourcestate': 'resource_state', + 'suitableformigration': 'suitable_for_migration', + 'type': 'host_type', + 'version': 'host_version', + 'gpugroup': 'gpu_group', + } + # States only usable by the updateHost API + self.allocation_states_for_update = { + 'enabled': 'Enable', + 'disabled': 'Disable', + } + self.host = None + + def get_pod(self, key=None): + pod_name = self.module.params.get('pod') + if not pod_name: + return None + args = { + 'name': pod_name, + 'zoneid': self.get_zone(key='id'), + } + pods = self.query_api('listPods', **args) + if pods: + return self._get_by_key(key, pods['pod'][0]) + self.module.fail_json(msg="Pod %s not found" % pod_name) + + def get_cluster(self, key=None): + cluster_name = self.module.params.get('cluster') + if not cluster_name: + return None + args = { + 'name': cluster_name, + 'zoneid': self.get_zone(key='id'), + } + clusters = self.query_api('listClusters', **args) + if clusters: + return self._get_by_key(key, clusters['cluster'][0]) + self.module.fail_json(msg="Cluster %s not found" % cluster_name) + + def get_host_tags(self): + host_tags = self.module.params.get('host_tags') + if host_tags is None: + return None + return ','.join(host_tags) + + def get_host(self, refresh=False): + if self.host is not None and not refresh: + return self.host + + name = self.module.params.get('name') + args = { + 'zoneid': self.get_zone(key='id'), + 'fetch_list': True, + } + res = self.query_api('listHosts', **args) + if res: + for h in res: + if name in [h['ipaddress'], h['name']]: + self.host = h + return self.host + + def _handle_allocation_state(self, host): + allocation_state = self.module.params.get('allocation_state') + if not allocation_state: + return host + + host = self._set_host_allocation_state(host) + + # In case host in maintenance and target is maintenance + if host['allocationstate'].lower() == allocation_state and allocation_state == 'maintenance': + return host + + # Cancel maintenance if target state is enabled/disabled + elif allocation_state in list(self.allocation_states_for_update.keys()): + host = self.disable_maintenance(host) + host = self._update_host(host, self.allocation_states_for_update[allocation_state]) + + # Only an enabled host can put in maintenance + elif allocation_state == 'maintenance': + host = self._update_host(host, 'Enable') + host = self.enable_maintenance(host) + + return host + + def _set_host_allocation_state(self, host): + if host is None: + host['allocationstate'] = 'Enable' + + # Set host allocationstate to be disabled/enabled + elif host['resourcestate'].lower() in list(self.allocation_states_for_update.keys()): + host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()] + + else: + host['allocationstate'] = host['resourcestate'] + + return host + + def present_host(self): + host = self.get_host() + + if not host: + host = self._create_host(host) + else: + host = self._update_host(host) + + if host: + host = self._handle_allocation_state(host) + + return host + + def _get_url(self): + url = self.module.params.get('url') + if url: + return url + else: + return "http://%s" % self.module.params.get('name') + + def _create_host(self, host): + required_params = [ + 'password', + 'username', + 'hypervisor', + 'pod', + ] + self.module.fail_on_missing_params(required_params=required_params) + self.result['changed'] = True + args = { + 'hypervisor': self.module.params.get('hypervisor'), + 'url': self._get_url(), + 'username': self.module.params.get('username'), + 'password': self.module.params.get('password'), + 'podid': self.get_pod(key='id'), + 'zoneid': self.get_zone(key='id'), + 'clusterid': self.get_cluster(key='id'), + 'hosttags': self.get_host_tags(), + } + if not self.module.check_mode: + host = self.query_api('addHost', **args) + host = host['host'][0] + return host + + def _update_host(self, host, allocation_state=None): + args = { + 'id': host['id'], + 'hosttags': self.get_host_tags(), + 'allocationstate': allocation_state, + } + + if allocation_state is not None: + host = self._set_host_allocation_state(host) + + if self.has_changed(args, host): + self.result['changed'] = True + if not self.module.check_mode: + host = self.query_api('updateHost', **args) + host = host['host'] + + return host + + def absent_host(self): + host = self.get_host() + if host: + self.result['changed'] = True + args = { + 'id': host['id'], + } + if not self.module.check_mode: + res = self.enable_maintenance(host) + if res: + res = self.query_api('deleteHost', **args) + return host + + def enable_maintenance(self, host): + if host['resourcestate'] not in ['PrepareForMaintenance', 'Maintenance']: + self.result['changed'] = True + args = { + 'id': host['id'], + } + if not self.module.check_mode: + res = self.query_api('prepareHostForMaintenance', **args) + self.poll_job(res, 'host') + host = self._poll_for_maintenance() + return host + + def disable_maintenance(self, host): + if host['resourcestate'] in ['PrepareForMaintenance', 'Maintenance']: + self.result['changed'] = True + args = { + 'id': host['id'], + } + if not self.module.check_mode: + res = self.query_api('cancelHostMaintenance', **args) + host = self.poll_job(res, 'host') + return host + + def _poll_for_maintenance(self): + for i in range(0, 300): + time.sleep(2) + host = self.get_host(refresh=True) + if not host: + return None + elif host['resourcestate'] != 'PrepareForMaintenance': + return host + self.fail_json(msg="Polling for maintenance timed out") + + def get_result(self, host): + super(AnsibleCloudStackHost, self).get_result(host) + if host: + self.result['allocation_state'] = host['resourcestate'].lower() + self.result['host_tags'] = host['hosttags'].split(',') if host.get('hosttags') else [] + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['ip_address']), + url=dict(), + password=dict(no_log=True), + username=dict(), + hypervisor=dict(), + allocation_state=dict(choices=['enabled', 'disabled', 'maintenance']), + pod=dict(), + cluster=dict(), + host_tags=dict(type='list', aliases=['host_tag']), + zone=dict(), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_host = AnsibleCloudStackHost(module) + + state = module.params.get('state') + if state == 'absent': + host = acs_host.absent_host() + else: + host = acs_host.present_host() + + result = acs_host.get_result(host) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_image_store.py b/plugins/modules/cloud/cloudstack/cs_image_store.py new file mode 100644 index 0000000000..40615563e4 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_image_store.py @@ -0,0 +1,252 @@ +#!/usr/bin/python + +# Copyright: (c) 2019, Patryk Cichy @PatTheSilent +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: cs_image_store + +short_description: Manages CloudStack Image Stores. + + +description: + - Deploy, remove, recreate CloudStack Image Stores. + +options: + url: + description: + - The URL for the Image Store. + - Required when I(state=present). + type: str + name: + description: + - The ID of the Image Store. Required when deleting a Image Store. + required: true + type: str + zone: + description: + - The Zone name for the Image Store. + required: true + type: str + state: + description: + - Stage of the Image Store + choices: [present, absent] + default: present + type: str + provider: + description: + - The image store provider name. Required when creating a new Image Store + type: str + force_recreate: + description: + - Set to C(yes) if you're changing an existing Image Store. + - This will force the recreation of the Image Store. + - Recreation might fail if there are snapshots present on the Image Store. Delete them before running the recreation. + type: bool + default: no + +extends_documentation_fragment: +- community.general.cloudstack + + +author: + - Patryk Cichy (@PatTheSilent) +''' + +EXAMPLES = ''' +- name: Add a Image Store (NFS) + cs_image_store: + zone: zone-01 + name: nfs-01 + provider: NFS + url: nfs://192.168.21.16/exports/secondary + delegate_to: localhost + +# Change the NFS share URL and force a Image Store recreation +- name: Change the NFS url + cs_image_store: + zone: zone-01 + name: nfs-01 + provider: NFS + force_recreate: yes + url: nfs://192.168.21.10/shares/secondary + delegate_to: localhost + +- name: delete the image store + cs_image_store: + name: nfs-01 + zone: zone-01 + state: absent + delegate_to: localhost + +''' + +RETURN = ''' +id: + description: the ID of the image store + type: str + returned: success + sample: feb11a84-a093-45eb-b84d-7f680313c40b +name: + description: the name of the image store + type: str + returned: success + sample: nfs-01 +protocol: + description: the protocol of the image store + type: str + returned: success + sample: nfs +provider_name: + description: the provider name of the image store + type: str + returned: success + sample: NFS +scope: + description: the scope of the image store + type: str + returned: success + sample: ZONE +url: + description: the url of the image store + type: str + sample: nfs://192.168.21.16/exports/secondary + returned: success +zone: + description: the Zone name of the image store + type: str + returned: success + sample: zone-01 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together + + +class AnsibleCloudstackImageStore(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudstackImageStore, self).__init__(module) + self.returns = { + 'protocol': 'protocol', + 'providername': 'provider_name', + 'scope': 'scope', + 'url': 'url' + } + self.image_store = None + + def get_storage_providers(self, storage_type="image"): + args = { + 'type': storage_type + } + storage_provides = self.query_api('listStorageProviders', **args) + return [provider.get('name') for provider in storage_provides.get('dataStoreProvider')] + + def get_image_store(self): + if self.image_store: + return self.image_store + image_store = self.module.params.get('name') + args = { + 'name': self.module.params.get('name'), + 'zoneid': self.get_zone(key='id') + } + + image_stores = self.query_api('listImageStores', **args) + if image_stores: + for img_s in image_stores.get('imagestore'): + if image_store.lower() in [img_s['name'].lower(), img_s['id']]: + self.image_store = img_s + break + + return self.image_store + + def present_image_store(self): + provider_list = self.get_storage_providers() + image_store = self.get_image_store() + + if self.module.params.get('provider') not in provider_list: + self.module.fail_json( + msg='Provider %s is not in the provider list (%s). Please specify a correct provider' % ( + self.module.params.get('provider'), provider_list)) + args = { + 'name': self.module.params.get('name'), + 'url': self.module.params.get('url'), + 'zoneid': self.get_zone(key='id'), + 'provider': self.module.params.get('provider') + } + if not image_store: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('addImageStore', **args) + self.image_store = res.get('imagestore') + else: + # Cloudstack API expects 'provider' but returns 'providername' + args['providername'] = args.pop('provider') + if self.has_changed(args, image_store): + if self.module.params.get('force_recreate'): + self.absent_image_store() + self.image_store = None + self.image_store = self.present_image_store() + else: + self.module.warn("Changes to the Image Store won't be applied" + "Use force_recreate=yes to allow the store to be recreated") + + return self.image_store + + def absent_image_store(self): + image_store = self.get_image_store() + if image_store: + self.result['changed'] = True + if not self.module.check_mode: + args = { + 'id': image_store.get('id') + } + self.query_api('deleteImageStore', **args) + return image_store + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + url=dict(), + name=dict(required=True), + zone=dict(required=True), + provider=dict(), + force_recreate=dict(type='bool', default=False), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_if=[ + ('state', 'present', ['url', 'provider']), + ], + supports_check_mode=True + ) + + acis_do = AnsibleCloudstackImageStore(module) + + state = module.params.get('state') + if state == "absent": + image_store = acis_do.absent_image_store() + else: + image_store = acis_do.present_image_store() + + result = acis_do.get_result(image_store) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_instance.py b/plugins/modules/cloud/cloudstack/cs_instance.py new file mode 100644 index 0000000000..fd1e85d34f --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_instance.py @@ -0,0 +1,1111 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_instance +short_description: Manages instances and virtual machines on Apache CloudStack based clouds. +description: + - Deploy, start, update, scale, restart, restore, stop and destroy instances. +author: René Moser (@resmo) +options: + name: + description: + - Host name of the instance. C(name) can only contain ASCII letters. + - Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards. + - Either C(name) or C(display_name) is required. + type: str + display_name: + description: + - Custom display name of the instances. + - Display name will be set to I(name) if not specified. + - Either I(name) or I(display_name) is required. + type: str + group: + description: + - Group in where the new instance should be in. + type: str + state: + description: + - State of the instance. + type: str + default: present + choices: [ deployed, started, stopped, restarted, restored, destroyed, expunged, present, absent ] + service_offering: + description: + - Name or id of the service offering of the new instance. + - If not set, first found service offering is used. + type: str + cpu: + description: + - The number of CPUs to allocate to the instance, used with custom service offerings + type: int + cpu_speed: + description: + - The clock speed/shares allocated to the instance, used with custom service offerings + type: int + memory: + description: + - The memory allocated to the instance, used with custom service offerings + type: int + template: + description: + - Name, display text or id of the template to be used for creating the new instance. + - Required when using I(state=present). + - Mutually exclusive with I(iso) option. + type: str + iso: + description: + - Name or id of the ISO to be used for creating the new instance. + - Required when using I(state=present). + - Mutually exclusive with I(template) option. + type: str + template_filter: + description: + - Name of the filter used to search for the template or iso. + - Used for params I(iso) or I(template) on I(state=present). + - The filter C(all) was added in 2.6. + type: str + default: executable + choices: [ all, featured, self, selfexecutable, sharedexecutable, executable, community ] + aliases: [ iso_filter ] + hypervisor: + description: + - Name the hypervisor to be used for creating the new instance. + - Relevant when using I(state=present), but only considered if not set on ISO/template. + - If not set or found on ISO/template, first found hypervisor will be used. + - Possible values are C(KVM), C(VMware), C(BareMetal), C(XenServer), C(LXC), C(HyperV), C(UCS), C(OVM), C(Simulator). + type: str + keyboard: + description: + - Keyboard device type for the instance. + type: str + choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ] + networks: + description: + - List of networks to use for the new instance. + type: list + aliases: [ network ] + ip_address: + description: + - IPv4 address for default instance's network during creation. + type: str + ip6_address: + description: + - IPv6 address for default instance's network. + type: str + ip_to_networks: + description: + - "List of mappings in the form I({'network': NetworkName, 'ip': 1.2.3.4})" + - Mutually exclusive with I(networks) option. + type: list + aliases: [ ip_to_network ] + disk_offering: + description: + - Name of the disk offering to be used. + type: str + disk_size: + description: + - Disk size in GByte required if deploying instance from ISO. + type: int + root_disk_size: + description: + - Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup + (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template) + type: int + security_groups: + description: + - List of security groups the instance to be applied to. + type: list + aliases: [ security_group ] + host: + description: + - Host on which an instance should be deployed or started on. + - Only considered when I(state=started) or instance is running. + - Requires root admin privileges. + type: str + domain: + description: + - Domain the instance is related to. + type: str + account: + description: + - Account the instance is related to. + type: str + project: + description: + - Name of the project the instance to be deployed in. + type: str + zone: + description: + - Name of the zone in which the instance should be deployed. + - If not set, default zone is used. + type: str + ssh_key: + description: + - Name of the SSH key to be deployed on the new instance. + type: str + affinity_groups: + description: + - Affinity groups names to be applied to the new instance. + type: list + aliases: [ affinity_group ] + user_data: + description: + - Optional data (ASCII) that can be sent to the instance upon a successful deployment. + - The data will be automatically base64 encoded. + - Consider switching to HTTP_POST by using I(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB. + type: str + force: + description: + - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. + type: bool + default: no + allow_root_disk_shrink: + description: + - Enables a volume shrinkage when the new size is smaller than the old one. + type: bool + default: no + tags: + description: + - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). + - "If you want to delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes + details: + description: + - Map to specify custom parameters. + type: dict +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +# NOTE: Names of offerings and ISOs depending on the CloudStack configuration. +- name: create a instance from an ISO + cs_instance: + name: web-vm-1 + iso: Linux Debian 7 64-bit + hypervisor: VMware + project: Integration + zone: ch-zrh-ix-01 + service_offering: 1cpu_1gb + disk_offering: PerfPlus Storage + disk_size: 20 + networks: + - Server Integration + - Sync Integration + - Storage Integration + delegate_to: localhost + +- name: for changing a running instance, use the 'force' parameter + cs_instance: + name: web-vm-1 + display_name: web-vm-01.example.com + iso: Linux Debian 7 64-bit + service_offering: 2cpu_2gb + force: yes + delegate_to: localhost + +# NOTE: user_data can be used to kickstart the instance using cloud-init yaml config. +- name: create or update a instance on Exoscale's public cloud using display_name. + cs_instance: + display_name: web-vm-1 + template: Linux Debian 7 64-bit + service_offering: Tiny + ssh_key: john@example.com + tags: + - key: admin + value: john + - key: foo + value: bar + user_data: | + #cloud-config + packages: + - nginx + delegate_to: localhost + +- name: create an instance with multiple interfaces specifying the IP addresses + cs_instance: + name: web-vm-1 + template: Linux Debian 7 64-bit + service_offering: Tiny + ip_to_networks: + - network: NetworkA + ip: 10.1.1.1 + - network: NetworkB + ip: 192.0.2.1 + delegate_to: localhost + +- name: ensure an instance is stopped + cs_instance: + name: web-vm-1 + state: stopped + delegate_to: localhost + +- name: ensure an instance is running + cs_instance: + name: web-vm-1 + state: started + delegate_to: localhost + +- name: remove an instance + cs_instance: + name: web-vm-1 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the instance. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance. + returned: success + type: str + sample: web-01 +display_name: + description: Display name of the instance. + returned: success + type: str + sample: web-01 +group: + description: Group name of the instance is related. + returned: success + type: str + sample: web +created: + description: Date of the instance was created. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 +password_enabled: + description: True if password setting is enabled. + returned: success + type: bool + sample: true +password: + description: The password of the instance if exists. + returned: if available + type: str + sample: Ge2oe7Do +ssh_key: + description: Name of SSH key deployed to instance. + returned: if available + type: str + sample: key@work +domain: + description: Domain the instance is related to. + returned: success + type: str + sample: example domain +account: + description: Account the instance is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the instance is related to. + returned: success + type: str + sample: Production +default_ip: + description: Default IP address of the instance. + returned: success + type: str + sample: 10.23.37.42 +default_ip6: + description: Default IPv6 address of the instance. + returned: if available + type: str + sample: 2a04:c43:c00:a07:4b4:beff:fe00:74 + version_added: '2.6' +public_ip: + description: Public IP address with instance via static NAT rule. + returned: if available + type: str + sample: 1.2.3.4 +iso: + description: Name of ISO the instance was deployed with. + returned: if available + type: str + sample: Debian-8-64bit +template: + description: Name of template the instance was deployed with. + returned: success + type: str + sample: Linux Debian 9 64-bit +template_display_text: + description: Display text of template the instance was deployed with. + returned: success + type: str + sample: Linux Debian 9 64-bit 200G Disk (2017-10-08-622866) + version_added: '2.6' +service_offering: + description: Name of the service offering the instance has. + returned: success + type: str + sample: 2cpu_2gb +zone: + description: Name of zone the instance is in. + returned: success + type: str + sample: ch-gva-2 +state: + description: State of the instance. + returned: success + type: str + sample: Running +security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' +affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' +tags: + description: List of resource tags associated with the instance. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +hypervisor: + description: Hypervisor related to this instance. + returned: success + type: str + sample: KVM +host: + description: Hostname of hypervisor an instance is running on. + returned: success and instance is running + type: str + sample: host-01.example.com + version_added: '2.6' +instance_name: + description: Internal name of the instance (ROOT admin only). + returned: success + type: str + sample: i-44-3992-VM +user-data: + description: Optional data sent to the instance. + returned: success + type: str + sample: VXNlciBkYXRhIGV4YW1wbGUK +''' + +import base64 +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackInstance(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackInstance, self).__init__(module) + self.returns = { + 'group': 'group', + 'hypervisor': 'hypervisor', + 'instancename': 'instance_name', + 'publicip': 'public_ip', + 'passwordenabled': 'password_enabled', + 'password': 'password', + 'serviceofferingname': 'service_offering', + 'isoname': 'iso', + 'templatename': 'template', + 'templatedisplaytext': 'template_display_text', + 'keypair': 'ssh_key', + 'hostname': 'host', + } + self.instance = None + self.template = None + self.iso = None + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + + service_offerings = self.query_api('listServiceOfferings') + if service_offerings: + if not service_offering: + return service_offerings['serviceoffering'][0]['id'] + + for s in service_offerings['serviceoffering']: + if service_offering in [s['name'], s['id']]: + return s['id'] + self.fail_json(msg="Service offering '%s' not found" % service_offering) + + def get_host_id(self): + host_name = self.module.params.get('host') + if not host_name: + return None + + args = { + 'type': 'routing', + 'zoneid': self.get_zone(key='id'), + } + hosts = self.query_api('listHosts', **args) + if hosts: + for h in hosts['host']: + if h['name'] == host_name: + return h['id'] + + self.fail_json(msg="Host '%s' not found" % host_name) + + def get_template_or_iso(self, key=None): + template = self.module.params.get('template') + iso = self.module.params.get('iso') + + if not template and not iso: + return None + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'isrecursive': True, + 'fetch_list': True, + } + + if template: + if self.template: + return self._get_by_key(key, self.template) + + rootdisksize = self.module.params.get('root_disk_size') + args['templatefilter'] = self.module.params.get('template_filter') + args['fetch_list'] = True + templates = self.query_api('listTemplates', **args) + if templates: + for t in templates: + if template in [t['displaytext'], t['name'], t['id']]: + if rootdisksize and t['size'] > rootdisksize * 1024 ** 3: + continue + self.template = t + return self._get_by_key(key, self.template) + + if rootdisksize: + more_info = " (with size <= %s)" % rootdisksize + else: + more_info = "" + + self.module.fail_json(msg="Template '%s' not found%s" % (template, more_info)) + + elif iso: + if self.iso: + return self._get_by_key(key, self.iso) + + args['isofilter'] = self.module.params.get('template_filter') + args['fetch_list'] = True + isos = self.query_api('listIsos', **args) + if isos: + for i in isos: + if iso in [i['displaytext'], i['name'], i['id']]: + self.iso = i + return self._get_by_key(key, self.iso) + + self.module.fail_json(msg="ISO '%s' not found" % iso) + + def get_instance(self): + instance = self.instance + if not instance: + instance_name = self.get_or_fallback('name', 'display_name') + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'fetch_list': True, + } + # Do not pass zoneid, as the instance name must be unique across zones. + instances = self.query_api('listVirtualMachines', **args) + if instances: + for v in instances: + if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]: + self.instance = v + break + return self.instance + + def _get_instance_user_data(self, instance): + # Query the user data if we need to + if 'userdata' in instance: + return instance['userdata'] + + user_data = "" + if self.get_user_data() is not None and instance.get('id'): + res = self.query_api('getVirtualMachineUserData', virtualmachineid=instance['id']) + user_data = res['virtualmachineuserdata'].get('userdata', "") + return user_data + + def get_iptonetwork_mappings(self): + network_mappings = self.module.params.get('ip_to_networks') + if network_mappings is None: + return + + if network_mappings and self.module.params.get('networks'): + self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.") + + network_names = [n['network'] for n in network_mappings] + ids = self.get_network_ids(network_names) + res = [] + for i, data in enumerate(network_mappings): + res.append({'networkid': ids[i], 'ip': data['ip']}) + return res + + def get_ssh_keypair(self, key=None, name=None, fail_on_missing=True): + ssh_key_name = name or self.module.params.get('ssh_key') + if ssh_key_name is None: + return + + args = { + 'domainid': self.get_domain('id'), + 'account': self.get_account('name'), + 'projectid': self.get_project('id'), + 'name': ssh_key_name, + } + ssh_key_pairs = self.query_api('listSSHKeyPairs', **args) + if 'sshkeypair' in ssh_key_pairs: + return self._get_by_key(key=key, my_dict=ssh_key_pairs['sshkeypair'][0]) + + elif fail_on_missing: + self.module.fail_json(msg="SSH key not found: %s" % ssh_key_name) + + def ssh_key_has_changed(self): + ssh_key_name = self.module.params.get('ssh_key') + if ssh_key_name is None: + return False + + # Fails if keypair for param is inexistent + param_ssh_key_fp = self.get_ssh_keypair(key='fingerprint') + + # CloudStack 4.5 does return keypair on instance for a non existent key. + instance_ssh_key_name = self.instance.get('keypair') + if instance_ssh_key_name is None: + return True + + # Get fingerprint for keypair of instance but do not fail if inexistent. + instance_ssh_key_fp = self.get_ssh_keypair(key='fingerprint', name=instance_ssh_key_name, fail_on_missing=False) + if not instance_ssh_key_fp: + return True + + # Compare fingerprints to ensure the keypair changed + if instance_ssh_key_fp != param_ssh_key_fp: + return True + return False + + def security_groups_has_changed(self): + security_groups = self.module.params.get('security_groups') + if security_groups is None: + return False + + security_groups = [s.lower() for s in security_groups] + instance_security_groups = self.instance.get('securitygroup') or [] + + instance_security_group_names = [] + for instance_security_group in instance_security_groups: + if instance_security_group['name'].lower() not in security_groups: + return True + else: + instance_security_group_names.append(instance_security_group['name'].lower()) + + for security_group in security_groups: + if security_group not in instance_security_group_names: + return True + return False + + def get_network_ids(self, network_names=None): + if network_names is None: + network_names = self.module.params.get('networks') + + if not network_names: + return None + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'fetch_list': True, + } + networks = self.query_api('listNetworks', **args) + if not networks: + self.module.fail_json(msg="No networks available") + + network_ids = [] + network_displaytexts = [] + for network_name in network_names: + for n in networks: + if network_name in [n['displaytext'], n['name'], n['id']]: + network_ids.append(n['id']) + network_displaytexts.append(n['name']) + break + + if len(network_ids) != len(network_names): + self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts) + + return network_ids + + def present_instance(self, start_vm=True): + instance = self.get_instance() + + if not instance: + instance = self.deploy_instance(start_vm=start_vm) + else: + instance = self.recover_instance(instance=instance) + instance = self.update_instance(instance=instance, start_vm=start_vm) + + # In check mode, we do not necessarily have an instance + if instance: + instance = self.ensure_tags(resource=instance, resource_type='UserVm') + # refresh instance data + self.instance = instance + + return instance + + def get_user_data(self): + user_data = self.module.params.get('user_data') + if user_data is not None: + user_data = to_text(base64.b64encode(to_bytes(user_data))) + return user_data + + def get_details(self): + details = self.module.params.get('details') + cpu = self.module.params.get('cpu') + cpu_speed = self.module.params.get('cpu_speed') + memory = self.module.params.get('memory') + if all([cpu, cpu_speed, memory]): + details.extends({ + 'cpuNumber': cpu, + 'cpuSpeed': cpu_speed, + 'memory': memory, + }) + + return details + + def deploy_instance(self, start_vm=True): + self.result['changed'] = True + networkids = self.get_network_ids() + if networkids is not None: + networkids = ','.join(networkids) + + args = {} + args['templateid'] = self.get_template_or_iso(key='id') + if not args['templateid']: + self.module.fail_json(msg="Template or ISO is required.") + + args['zoneid'] = self.get_zone(key='id') + args['serviceofferingid'] = self.get_service_offering_id() + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['diskofferingid'] = self.get_disk_offering(key='id') + args['networkids'] = networkids + args['iptonetworklist'] = self.get_iptonetwork_mappings() + args['userdata'] = self.get_user_data() + args['keyboard'] = self.module.params.get('keyboard') + args['ipaddress'] = self.module.params.get('ip_address') + args['ip6address'] = self.module.params.get('ip6_address') + args['name'] = self.module.params.get('name') + args['displayname'] = self.get_or_fallback('display_name', 'name') + args['group'] = self.module.params.get('group') + args['keypair'] = self.get_ssh_keypair(key='name') + args['size'] = self.module.params.get('disk_size') + args['startvm'] = start_vm + args['rootdisksize'] = self.module.params.get('root_disk_size') + args['affinitygroupnames'] = self.module.params.get('affinity_groups') + args['details'] = self.get_details() + args['securitygroupnames'] = self.module.params.get('security_groups') + args['hostid'] = self.get_host_id() + + template_iso = self.get_template_or_iso() + if 'hypervisor' not in template_iso: + args['hypervisor'] = self.get_hypervisor() + + instance = None + if not self.module.check_mode: + instance = self.query_api('deployVirtualMachine', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(instance, 'virtualmachine') + return instance + + def update_instance(self, instance, start_vm=True): + # Service offering data + args_service_offering = { + 'id': instance['id'], + } + if self.module.params.get('service_offering'): + args_service_offering['serviceofferingid'] = self.get_service_offering_id() + service_offering_changed = self.has_changed(args_service_offering, instance) + + # Instance data + args_instance_update = { + 'id': instance['id'], + 'userdata': self.get_user_data(), + } + instance['userdata'] = self._get_instance_user_data(instance) + args_instance_update['ostypeid'] = self.get_os_type(key='id') + if self.module.params.get('group'): + args_instance_update['group'] = self.module.params.get('group') + if self.module.params.get('display_name'): + args_instance_update['displayname'] = self.module.params.get('display_name') + instance_changed = self.has_changed(args_instance_update, instance) + + ssh_key_changed = self.ssh_key_has_changed() + + security_groups_changed = self.security_groups_has_changed() + + # Volume data + args_volume_update = {} + root_disk_size = self.module.params.get('root_disk_size') + root_disk_size_changed = False + + if root_disk_size is not None: + res = self.query_api('listVolumes', type='ROOT', virtualmachineid=instance['id']) + [volume] = res['volume'] + + size = volume['size'] >> 30 + + args_volume_update['id'] = volume['id'] + args_volume_update['size'] = root_disk_size + + shrinkok = self.module.params.get('allow_root_disk_shrink') + if shrinkok: + args_volume_update['shrinkok'] = shrinkok + + root_disk_size_changed = root_disk_size != size + + changed = [ + service_offering_changed, + instance_changed, + security_groups_changed, + ssh_key_changed, + root_disk_size_changed, + ] + + if any(changed): + force = self.module.params.get('force') + instance_state = instance['state'].lower() + if instance_state == 'stopped' or force: + self.result['changed'] = True + if not self.module.check_mode: + + # Ensure VM has stopped + instance = self.stop_instance() + instance = self.poll_job(instance, 'virtualmachine') + self.instance = instance + + # Change service offering + if service_offering_changed: + res = self.query_api('changeServiceForVirtualMachine', **args_service_offering) + instance = res['virtualmachine'] + self.instance = instance + + # Update VM + if instance_changed or security_groups_changed: + if security_groups_changed: + args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) + res = self.query_api('updateVirtualMachine', **args_instance_update) + instance = res['virtualmachine'] + self.instance = instance + + # Reset SSH key + if ssh_key_changed: + # SSH key data + args_ssh_key = {} + args_ssh_key['id'] = instance['id'] + args_ssh_key['projectid'] = self.get_project(key='id') + args_ssh_key['keypair'] = self.module.params.get('ssh_key') + instance = self.query_api('resetSSHKeyForVirtualMachine', **args_ssh_key) + instance = self.poll_job(instance, 'virtualmachine') + self.instance = instance + + # Root disk size + if root_disk_size_changed: + async_result = self.query_api('resizeVolume', **args_volume_update) + self.poll_job(async_result, 'volume') + + # Start VM again if it was running before + if instance_state == 'running' and start_vm: + instance = self.start_instance() + else: + self.module.warn("Changes won't be applied to running instances. " + "Use force=true to allow the instance %s to be stopped/started." % instance['name']) + + # migrate to other host + host_changed = all([ + instance['state'].lower() in ['starting', 'running'], + instance.get('hostname') is not None, + self.module.params.get('host') is not None, + self.module.params.get('host') != instance.get('hostname') + ]) + if host_changed: + self.result['changed'] = True + args_host = { + 'virtualmachineid': instance['id'], + 'hostid': self.get_host_id(), + } + if not self.module.check_mode: + res = self.query_api('migrateVirtualMachine', **args_host) + instance = self.poll_job(res, 'virtualmachine') + + return instance + + def recover_instance(self, instance): + if instance['state'].lower() in ['destroying', 'destroyed']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('recoverVirtualMachine', id=instance['id']) + instance = res['virtualmachine'] + return instance + + def absent_instance(self): + instance = self.get_instance() + if instance: + if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('destroyVirtualMachine', id=instance['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(res, 'virtualmachine') + return instance + + def expunge_instance(self): + instance = self.get_instance() + if instance: + res = {} + if instance['state'].lower() in ['destroying', 'destroyed']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True) + + elif instance['state'].lower() not in ['expunging']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self.poll_job(res, 'virtualmachine') + return instance + + def stop_instance(self): + instance = self.get_instance() + # in check mode instance may not be instantiated + if instance: + if instance['state'].lower() in ['stopping', 'stopped']: + return instance + + if instance['state'].lower() in ['starting', 'running']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.query_api('stopVirtualMachine', id=instance['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(instance, 'virtualmachine') + return instance + + def start_instance(self): + instance = self.get_instance() + # in check mode instance may not be instantiated + if instance: + if instance['state'].lower() in ['starting', 'running']: + return instance + + if instance['state'].lower() in ['stopped', 'stopping']: + self.result['changed'] = True + if not self.module.check_mode: + args = { + 'id': instance['id'], + 'hostid': self.get_host_id(), + } + instance = self.query_api('startVirtualMachine', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(instance, 'virtualmachine') + return instance + + def restart_instance(self): + instance = self.get_instance() + # in check mode instance may not be instantiated + if instance: + if instance['state'].lower() in ['running', 'starting']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.query_api('rebootVirtualMachine', id=instance['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(instance, 'virtualmachine') + + elif instance['state'].lower() in ['stopping', 'stopped']: + instance = self.start_instance() + return instance + + def restore_instance(self): + instance = self.get_instance() + self.result['changed'] = True + # in check mode instance may not be instantiated + if instance: + args = {} + args['templateid'] = self.get_template_or_iso(key='id') + args['virtualmachineid'] = instance['id'] + res = self.query_api('restoreVirtualMachine', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(res, 'virtualmachine') + return instance + + def get_result(self, instance): + super(AnsibleCloudStackInstance, self).get_result(instance) + if instance: + self.result['user_data'] = self._get_instance_user_data(instance) + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + self.result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + self.result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault']: + if 'ipaddress' in nic: + self.result['default_ip'] = nic['ipaddress'] + if 'ip6address' in nic: + self.result['default_ip6'] = nic['ip6address'] + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(), + display_name=dict(), + group=dict(), + state=dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'), + service_offering=dict(), + cpu=dict(type='int'), + cpu_speed=dict(type='int'), + memory=dict(type='int'), + template=dict(), + iso=dict(), + template_filter=dict( + default="executable", + aliases=['iso_filter'], + choices=['all', 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community'] + ), + networks=dict(type='list', aliases=['network']), + ip_to_networks=dict(type='list', aliases=['ip_to_network']), + ip_address=dict(), + ip6_address=dict(), + disk_offering=dict(), + disk_size=dict(type='int'), + root_disk_size=dict(type='int'), + keyboard=dict(type='str', choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us']), + hypervisor=dict(), + host=dict(), + security_groups=dict(type='list', aliases=['security_group']), + affinity_groups=dict(type='list', aliases=['affinity_group']), + domain=dict(), + account=dict(), + project=dict(), + user_data=dict(), + zone=dict(), + ssh_key=dict(), + force=dict(type='bool', default=False), + tags=dict(type='list', aliases=['tag']), + details=dict(type='dict'), + poll_async=dict(type='bool', default=True), + allow_root_disk_shrink=dict(type='bool', default=False), + )) + + required_together = cs_required_together() + required_together.extend([ + ['cpu', 'cpu_speed', 'memory'], + ]) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=required_together, + required_one_of=( + ['display_name', 'name'], + ), + mutually_exclusive=( + ['template', 'iso'], + ), + supports_check_mode=True + ) + + acs_instance = AnsibleCloudStackInstance(module) + + state = module.params.get('state') + + if state in ['absent', 'destroyed']: + instance = acs_instance.absent_instance() + + elif state in ['expunged']: + instance = acs_instance.expunge_instance() + + elif state in ['restored']: + acs_instance.present_instance() + instance = acs_instance.restore_instance() + + elif state in ['present', 'deployed']: + instance = acs_instance.present_instance() + + elif state in ['stopped']: + acs_instance.present_instance(start_vm=False) + instance = acs_instance.stop_instance() + + elif state in ['started']: + acs_instance.present_instance() + instance = acs_instance.start_instance() + + elif state in ['restarted']: + acs_instance.present_instance() + instance = acs_instance.restart_instance() + + if instance and 'state' in instance and instance['state'].lower() == 'error': + module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name')) + + result = acs_instance.get_result(instance) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_instance_facts.py b/plugins/modules/cloud/cloudstack/cs_instance_facts.py new file mode 100644 index 0000000000..4ad1ad77b7 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_instance_facts.py @@ -0,0 +1,375 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_instance_facts +short_description: Gathering facts from the API of instances from Apache CloudStack based clouds. +description: + - Gathering facts from the API of an instance. +deprecated: + removed_in: "2.13" + why: Transformed into an info module. + alternative: Use M(cs_instance_info) instead. +author: René Moser (@resmo) +options: + name: + description: + - Name or display name of the instance. + type: str + required: true + domain: + description: + - Domain the instance is related to. + type: str + account: + description: + - Account the instance is related to. + type: str + project: + description: + - Project the instance is related to. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: gather instance facts + cs_instance_facts: + name: web-vm-1 + delegate_to: localhost + register: vm + +- debug: + var: cloudstack_instance + +- debug: + var: vm +''' + +RETURN = ''' +--- +id: + description: UUID of the instance. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance. + returned: success + type: str + sample: web-01 +display_name: + description: Display name of the instance. + returned: success + type: str + sample: web-01 +group: + description: Group name of the instance is related. + returned: success + type: str + sample: web +created: + description: Date of the instance was created. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 +password_enabled: + description: True if password setting is enabled. + returned: success + type: bool + sample: true +password: + description: The password of the instance if exists. + returned: success + type: str + sample: Ge2oe7Do +ssh_key: + description: Name of SSH key deployed to instance. + returned: success + type: str + sample: key@work +domain: + description: Domain the instance is related to. + returned: success + type: str + sample: example domain +account: + description: Account the instance is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the instance is related to. + returned: success + type: str + sample: Production +default_ip: + description: Default IP address of the instance. + returned: success + type: str + sample: 10.23.37.42 +public_ip: + description: Public IP address with instance via static NAT rule. + returned: success + type: str + sample: 1.2.3.4 +iso: + description: Name of ISO the instance was deployed with. + returned: success + type: str + sample: Debian-8-64bit +template: + description: Name of template the instance was deployed with. + returned: success + type: str + sample: Debian-8-64bit +service_offering: + description: Name of the service offering the instance has. + returned: success + type: str + sample: 2cpu_2gb +zone: + description: Name of zone the instance is in. + returned: success + type: str + sample: ch-gva-2 +state: + description: State of the instance. + returned: success + type: str + sample: Running +security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' +affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' +tags: + description: List of resource tags associated with the instance. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +hypervisor: + description: Hypervisor related to this instance. + returned: success + type: str + sample: KVM +host: + description: Host the instance is running on. + returned: success and instance is running + type: str + sample: host01.example.com + version_added: '2.6' +instance_name: + description: Internal name of the instance (ROOT admin only). + returned: success + type: str + sample: i-44-3992-VM +volumes: + description: List of dictionaries of the volumes attached to the instance. + returned: success + type: list + sample: '[ { name: "ROOT-1369", type: "ROOT", size: 10737418240 }, { name: "data01, type: "DATADISK", size: 10737418240 } ]' +nic: + description: List of dictionaries of the instance nics. + returned: success + type: complex + version_added: '2.8' + contains: + broadcasturi: + description: The broadcast uri of the nic. + returned: success + type: str + sample: vlan://2250 + gateway: + description: The gateway of the nic. + returned: success + type: str + sample: 10.1.2.1 + id: + description: The ID of the nic. + returned: success + type: str + sample: 5dc74fa3-2ec3-48a0-9e0d-6f43365336a9 + ipaddress: + description: The ip address of the nic. + returned: success + type: str + sample: 10.1.2.3 + isdefault: + description: True if nic is default, false otherwise. + returned: success + type: bool + sample: true + isolationuri: + description: The isolation uri of the nic. + returned: success + type: str + sample: vlan://2250 + macaddress: + description: The mac address of the nic. + returned: success + type: str + sample: 06:a2:03:00:08:12 + netmask: + description: The netmask of the nic. + returned: success + type: str + sample: 255.255.255.0 + networkid: + description: The ID of the corresponding network. + returned: success + type: str + sample: 432ce27b-c2bb-4e12-a88c-a919cd3a3017 + networkname: + description: The name of the corresponding network. + returned: success + type: str + sample: network1 + traffictype: + description: The traffic type of the nic. + returned: success + type: str + sample: Guest + type: + description: The type of the network. + returned: success + type: str + sample: Shared +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec + + +class AnsibleCloudStackInstanceFacts(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackInstanceFacts, self).__init__(module) + self.instance = None + self.returns = { + 'group': 'group', + 'hypervisor': 'hypervisor', + 'instancename': 'instance_name', + 'publicip': 'public_ip', + 'passwordenabled': 'password_enabled', + 'password': 'password', + 'serviceofferingname': 'service_offering', + 'isoname': 'iso', + 'templatename': 'template', + 'keypair': 'ssh_key', + 'hostname': 'host', + } + self.facts = { + 'cloudstack_instance': None, + } + + def get_instance(self): + instance = self.instance + if not instance: + instance_name = self.module.params.get('name') + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'fetch_list': True, + } + # Do not pass zoneid, as the instance name must be unique across zones. + instances = self.query_api('listVirtualMachines', **args) + if instances: + for v in instances: + if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]: + self.instance = v + break + return self.instance + + def get_volumes(self, instance): + volume_details = [] + if instance: + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'virtualmachineid': instance['id'], + 'fetch_list': True, + } + + volumes = self.query_api('listVolumes', **args) + if volumes: + for vol in volumes: + volume_details.append({'size': vol['size'], 'type': vol['type'], 'name': vol['name']}) + return volume_details + + def run(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name')) + return instance + + def get_result(self, instance): + super(AnsibleCloudStackInstanceFacts, self).get_result(instance) + if instance: + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + self.result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + self.result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault'] and 'ipaddress' in nic: + self.result['default_ip'] = nic['ipaddress'] + self.result['nic'] = instance['nic'] + volumes = self.get_volumes(instance) + if volumes: + self.result['volumes'] = volumes + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + domain=dict(), + account=dict(), + project=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + acs_instance_facts = AnsibleCloudStackInstanceFacts(module=module) + cs_instance_facts = acs_instance_facts.get_result_and_facts( + facts_name='cloudstack_instance', + resource=acs_instance_facts.run() + ) + module.exit_json(**cs_instance_facts) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_instance_info.py b/plugins/modules/cloud/cloudstack/cs_instance_info.py new file mode 100644 index 0000000000..e3a1211099 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_instance_info.py @@ -0,0 +1,379 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_instance_info +short_description: Gathering information from the API of instances from Apache CloudStack based clouds. +description: + - Gathering information from the API of an instance. +author: René Moser (@resmo) +options: + name: + description: + - Name or display name of the instance. + - If not specified, all instances are returned + type: str + required: false + domain: + description: + - Domain the instance is related to. + type: str + account: + description: + - Account the instance is related to. + type: str + project: + description: + - Project the instance is related to. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Gather instance information + cs_instance_info: + name: web-vm-1 + delegate_to: localhost + register: vm + +- name: Show the returned results of the registered variable + debug: + msg: "{{ vm }}" + +- name: Gather information from all instances + cs_instance_info: + delegate_to: localhost + register: vms + +- name: Show information on all instances + debug: + msg: "{{ vms }}" +''' + +RETURN = ''' +--- +instances: + description: A list of matching instances. + type: list + returned: success + contains: + id: + description: UUID of the instance. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 + name: + description: Name of the instance. + returned: success + type: str + sample: web-01 + display_name: + description: Display name of the instance. + returned: success + type: str + sample: web-01 + group: + description: Group name of the instance is related. + returned: success + type: str + sample: web + created: + description: Date of the instance was created. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 + password_enabled: + description: True if password setting is enabled. + returned: success + type: bool + sample: true + password: + description: The password of the instance if exists. + returned: success + type: str + sample: Ge2oe7Do + ssh_key: + description: Name of SSH key deployed to instance. + returned: success + type: str + sample: key@work + domain: + description: Domain the instance is related to. + returned: success + type: str + sample: example domain + account: + description: Account the instance is related to. + returned: success + type: str + sample: example account + project: + description: Name of project the instance is related to. + returned: success + type: str + sample: Production + default_ip: + description: Default IP address of the instance. + returned: success + type: str + sample: 10.23.37.42 + public_ip: + description: Public IP address with instance via static NAT rule. + returned: success + type: str + sample: 1.2.3.4 + iso: + description: Name of ISO the instance was deployed with. + returned: success + type: str + sample: Debian-8-64bit + template: + description: Name of template the instance was deployed with. + returned: success + type: str + sample: Debian-8-64bit + service_offering: + description: Name of the service offering the instance has. + returned: success + type: str + sample: 2cpu_2gb + zone: + description: Name of zone the instance is in. + returned: success + type: str + sample: ch-gva-2 + state: + description: State of the instance. + returned: success + type: str + sample: Running + security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' + affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' + tags: + description: List of resource tags associated with the instance. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' + hypervisor: + description: Hypervisor related to this instance. + returned: success + type: str + sample: KVM + host: + description: Host the instance is running on. + returned: success and instance is running + type: str + sample: host01.example.com + version_added: '2.6' + instance_name: + description: Internal name of the instance (ROOT admin only). + returned: success + type: str + sample: i-44-3992-VM + volumes: + description: List of dictionaries of the volumes attached to the instance. + returned: success + type: list + sample: '[ { name: "ROOT-1369", type: "ROOT", size: 10737418240 }, { name: "data01, type: "DATADISK", size: 10737418240 } ]' + nic: + description: List of dictionaries of the instance nics. + returned: success + type: complex + version_added: '2.8' + contains: + broadcasturi: + description: The broadcast uri of the nic. + returned: success + type: str + sample: vlan://2250 + gateway: + description: The gateway of the nic. + returned: success + type: str + sample: 10.1.2.1 + id: + description: The ID of the nic. + returned: success + type: str + sample: 5dc74fa3-2ec3-48a0-9e0d-6f43365336a9 + ipaddress: + description: The ip address of the nic. + returned: success + type: str + sample: 10.1.2.3 + isdefault: + description: True if nic is default, false otherwise. + returned: success + type: bool + sample: true + isolationuri: + description: The isolation uri of the nic. + returned: success + type: str + sample: vlan://2250 + macaddress: + description: The mac address of the nic. + returned: success + type: str + sample: 06:a2:03:00:08:12 + netmask: + description: The netmask of the nic. + returned: success + type: str + sample: 255.255.255.0 + networkid: + description: The ID of the corresponding network. + returned: success + type: str + sample: 432ce27b-c2bb-4e12-a88c-a919cd3a3017 + networkname: + description: The name of the corresponding network. + returned: success + type: str + sample: network1 + traffictype: + description: The traffic type of the nic. + returned: success + type: str + sample: Guest + type: + description: The type of the network. + returned: success + type: str + sample: Shared +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec + + +class AnsibleCloudStackInstanceInfo(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackInstanceInfo, self).__init__(module) + self.returns = { + 'group': 'group', + 'hypervisor': 'hypervisor', + 'instancename': 'instance_name', + 'publicip': 'public_ip', + 'passwordenabled': 'password_enabled', + 'password': 'password', + 'serviceofferingname': 'service_offering', + 'isoname': 'iso', + 'templatename': 'template', + 'keypair': 'ssh_key', + 'hostname': 'host', + } + + def get_instances(self): + instance_name = self.module.params.get('name') + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'fetch_list': True, + } + # Do not pass zoneid, as the instance name must be unique across zones. + instances = self.query_api('listVirtualMachines', **args) + if not instance_name: + return instances or [] + if instances: + for v in instances: + if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]: + return [v] + return [] + + def get_volumes(self, instance): + volume_details = [] + if instance: + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'virtualmachineid': instance['id'], + 'fetch_list': True, + } + + volumes = self.query_api('listVolumes', **args) + if volumes: + for vol in volumes: + volume_details.append({'size': vol['size'], 'type': vol['type'], 'name': vol['name']}) + return volume_details + + def run(self): + instances = self.get_instances() + if self.module.params.get('name') and not instances: + self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name')) + return { + 'instances': [self.update_result(resource) for resource in instances] + } + + def update_result(self, instance, result=None): + result = super(AnsibleCloudStackInstanceInfo, self).update_result(instance, result) + if instance: + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault'] and 'ipaddress' in nic: + result['default_ip'] = nic['ipaddress'] + result['nic'] = instance['nic'] + volumes = self.get_volumes(instance) + if volumes: + result['volumes'] = volumes + return result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(), + domain=dict(), + account=dict(), + project=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + acs_instance_info = AnsibleCloudStackInstanceInfo(module=module) + cs_instance_info = acs_instance_info.run() + module.exit_json(**cs_instance_info) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_instance_nic.py b/plugins/modules/cloud/cloudstack/cs_instance_nic.py new file mode 100644 index 0000000000..c2a0498d89 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_instance_nic.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, Marc-Aurèle Brothier @marcaurele +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_instance_nic +short_description: Manages NICs of an instance on Apache CloudStack based clouds. +description: + - Add and remove nic to and from network +author: + - Marc-Aurèle Brothier (@marcaurele) + - René Moser (@resmo) +options: + vm: + description: + - Name of instance. + required: true + type: str + aliases: [ name ] + network: + description: + - Name of the network. + type: str + required: true + ip_address: + description: + - IP address to be used for the nic. + type: str + vpc: + description: + - Name of the VPC the I(vm) is related to. + type: str + domain: + description: + - Domain the instance is related to. + type: str + account: + description: + - Account the instance is related to. + type: str + project: + description: + - Name of the project the instance is deployed in. + type: str + zone: + description: + - Name of the zone in which the instance is deployed in. + - If not set, default zone is used. + type: str + state: + description: + - State of the nic. + type: str + default: present + choices: [ present, absent ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Add a nic on another network + cs_instance_nic: + vm: privnet + network: privNetForBasicZone + delegate_to: localhost + +- name: Ensure IP address on a nic + cs_instance_nic: + vm: privnet + ip_address: 10.10.11.32 + network: privNetForBasicZone + delegate_to: localhost + +- name: Remove a secondary nic + cs_instance_nic: + vm: privnet + state: absent + network: privNetForBasicZone + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the nic. + returned: success + type: str + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +vm: + description: Name of the VM. + returned: success + type: str + sample: web-01 +ip_address: + description: Primary IP of the NIC. + returned: success + type: str + sample: 10.10.10.10 +netmask: + description: Netmask of the NIC. + returned: success + type: str + sample: 255.255.255.0 +mac_address: + description: MAC address of the NIC. + returned: success + type: str + sample: 02:00:33:31:00:e4 +network: + description: Name of the network if not default. + returned: success + type: str + sample: sync network +domain: + description: Domain the VM is related to. + returned: success + type: str + sample: example domain +account: + description: Account the VM is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the VM is related to. + returned: success + type: str + sample: Production +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import (AnsibleCloudStack, + cs_argument_spec, + cs_required_together) + + +class AnsibleCloudStackInstanceNic(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackInstanceNic, self).__init__(module) + self.nic = None + self.returns = { + 'ipaddress': 'ip_address', + 'macaddress': 'mac_address', + 'netmask': 'netmask', + } + + def get_nic(self): + if self.nic: + return self.nic + args = { + 'virtualmachineid': self.get_vm(key='id'), + 'networkid': self.get_network(key='id'), + } + nics = self.query_api('listNics', **args) + if nics: + self.nic = nics['nic'][0] + return self.nic + return None + + def get_nic_from_result(self, result): + for nic in result.get('nic') or []: + if nic['networkid'] == self.get_network(key='id'): + return nic + + def add_nic(self): + self.result['changed'] = True + args = { + 'virtualmachineid': self.get_vm(key='id'), + 'networkid': self.get_network(key='id'), + 'ipaddress': self.module.params.get('ip_address'), + } + if not self.module.check_mode: + res = self.query_api('addNicToVirtualMachine', **args) + + if self.module.params.get('poll_async'): + vm = self.poll_job(res, 'virtualmachine') + self.nic = self.get_nic_from_result(result=vm) + return self.nic + + def update_nic(self, nic): + # Do not try to update if no IP address is given + ip_address = self.module.params.get('ip_address') + if not ip_address: + return nic + + args = { + 'nicid': nic['id'], + 'ipaddress': ip_address, + } + if self.has_changed(args, nic, ['ipaddress']): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateVmNicIp', **args) + + if self.module.params.get('poll_async'): + vm = self.poll_job(res, 'virtualmachine') + self.nic = self.get_nic_from_result(result=vm) + return self.nic + + def remove_nic(self, nic): + self.result['changed'] = True + args = { + 'virtualmachineid': self.get_vm(key='id'), + 'nicid': nic['id'], + } + if not self.module.check_mode: + res = self.query_api('removeNicFromVirtualMachine', **args) + + if self.module.params.get('poll_async'): + self.poll_job(res, 'virtualmachine') + return nic + + def present_nic(self): + nic = self.get_nic() + if not nic: + nic = self.add_nic() + else: + nic = self.update_nic(nic) + return nic + + def absent_nic(self): + nic = self.get_nic() + if nic: + return self.remove_nic(nic) + return nic + + def get_result(self, nic): + super(AnsibleCloudStackInstanceNic, self).get_result(nic) + if nic and not self.module.params.get('network'): + self.module.params['network'] = nic.get('networkid') + self.result['network'] = self.get_network(key='name') + self.result['vm'] = self.get_vm(key='name') + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + vm=dict(required=True, aliases=['name']), + network=dict(required=True), + vpc=dict(), + ip_address=dict(), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + zone=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True, + ) + + acs_nic = AnsibleCloudStackInstanceNic(module) + + state = module.params.get('state') + if state == 'absent': + nic = acs_nic.absent_nic() + else: + nic = acs_nic.present_nic() + + result = acs_nic.get_result(nic) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_instance_nic_secondaryip.py b/plugins/modules/cloud/cloudstack/cs_instance_nic_secondaryip.py new file mode 100644 index 0000000000..ef0f9a8c37 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_instance_nic_secondaryip.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_instance_nic_secondaryip +short_description: Manages secondary IPs of an instance on Apache CloudStack based clouds. +description: + - Add and remove secondary IPs to and from a NIC of an instance. +author: René Moser (@resmo) +options: + vm: + description: + - Name of instance. + type: str + required: true + aliases: [ name ] + network: + description: + - Name of the network. + - Required to find the NIC if instance has multiple networks assigned. + type: str + vm_guest_ip: + description: + - Secondary IP address to be added to the instance nic. + - If not set, the API always returns a new IP address and idempotency is not given. + type: str + aliases: [ secondary_ip ] + vpc: + description: + - Name of the VPC the I(vm) is related to. + type: str + domain: + description: + - Domain the instance is related to. + type: str + account: + description: + - Account the instance is related to. + type: str + project: + description: + - Name of the project the instance is deployed in. + type: str + zone: + description: + - Name of the zone in which the instance is deployed in. + - If not set, default zone is used. + type: str + state: + description: + - State of the ipaddress. + type: str + default: present + choices: [ present, absent ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Assign a specific IP to the default NIC of the VM + cs_instance_nic_secondaryip: + vm: customer_xy + vm_guest_ip: 10.10.10.10 + delegate_to: localhost + +# Note: If vm_guest_ip is not set, you will get a new IP address on every run. +- name: Assign an IP to the default NIC of the VM + cs_instance_nic_secondaryip: + vm: customer_xy + delegate_to: localhost + +- name: Remove a specific IP from the default NIC + cs_instance_nic_secondaryip: + vm: customer_xy + vm_guest_ip: 10.10.10.10 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the NIC. + returned: success + type: str + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +vm: + description: Name of the VM. + returned: success + type: str + sample: web-01 +ip_address: + description: Primary IP of the NIC. + returned: success + type: str + sample: 10.10.10.10 +netmask: + description: Netmask of the NIC. + returned: success + type: str + sample: 255.255.255.0 +mac_address: + description: MAC address of the NIC. + returned: success + type: str + sample: 02:00:33:31:00:e4 +vm_guest_ip: + description: Secondary IP of the NIC. + returned: success + type: str + sample: 10.10.10.10 +network: + description: Name of the network if not default. + returned: success + type: str + sample: sync network +domain: + description: Domain the VM is related to. + returned: success + type: str + sample: example domain +account: + description: Account the VM is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the VM is related to. + returned: success + type: str + sample: Production +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackInstanceNicSecondaryIp(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackInstanceNicSecondaryIp, self).__init__(module) + self.vm_guest_ip = self.module.params.get('vm_guest_ip') + self.nic = None + self.returns = { + 'ipaddress': 'ip_address', + 'macaddress': 'mac_address', + 'netmask': 'netmask', + } + + def get_nic(self): + if self.nic: + return self.nic + args = { + 'virtualmachineid': self.get_vm(key='id'), + 'networkid': self.get_network(key='id'), + } + nics = self.query_api('listNics', **args) + if nics: + self.nic = nics['nic'][0] + return self.nic + self.fail_json(msg="NIC for VM %s in network %s not found" % (self.get_vm(key='name'), self.get_network(key='name'))) + + def get_secondary_ip(self): + nic = self.get_nic() + if self.vm_guest_ip: + secondary_ips = nic.get('secondaryip') or [] + for secondary_ip in secondary_ips: + if secondary_ip['ipaddress'] == self.vm_guest_ip: + return secondary_ip + return None + + def present_nic_ip(self): + nic = self.get_nic() + if not self.get_secondary_ip(): + self.result['changed'] = True + args = { + 'nicid': nic['id'], + 'ipaddress': self.vm_guest_ip, + } + + if not self.module.check_mode: + res = self.query_api('addIpToNic', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + nic = self.poll_job(res, 'nicsecondaryip') + # Save result for RETURNS + self.vm_guest_ip = nic['ipaddress'] + return nic + + def absent_nic_ip(self): + nic = self.get_nic() + secondary_ip = self.get_secondary_ip() + if secondary_ip: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('removeIpFromNic', id=secondary_ip['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'nicsecondaryip') + return nic + + def get_result(self, nic): + super(AnsibleCloudStackInstanceNicSecondaryIp, self).get_result(nic) + if nic and not self.module.params.get('network'): + self.module.params['network'] = nic.get('networkid') + self.result['network'] = self.get_network(key='name') + self.result['vm'] = self.get_vm(key='name') + self.result['vm_guest_ip'] = self.vm_guest_ip + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + vm=dict(required=True, aliases=['name']), + vm_guest_ip=dict(aliases=['secondary_ip']), + network=dict(), + vpc=dict(), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + zone=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True, + required_if=([ + ('state', 'absent', ['vm_guest_ip']) + ]) + ) + + acs_instance_nic_secondaryip = AnsibleCloudStackInstanceNicSecondaryIp(module) + state = module.params.get('state') + + if state == 'absent': + nic = acs_instance_nic_secondaryip.absent_nic_ip() + else: + nic = acs_instance_nic_secondaryip.present_nic_ip() + + result = acs_instance_nic_secondaryip.get_result(nic) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_instance_password_reset.py b/plugins/modules/cloud/cloudstack/cs_instance_password_reset.py new file mode 100644 index 0000000000..bd6d0874bc --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_instance_password_reset.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Gregor Riepl +# based on cs_sshkeypair (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_instance_password_reset +short_description: Allows resetting VM the default passwords on Apache CloudStack based clouds. +description: + - Resets the default user account's password on an instance. + - Requires cloud-init to be installed in the virtual machine. + - The passwordenabled flag must be set on the template associated with the VM. +author: Gregor Riepl (@onitake) +options: + vm: + description: + - Name of the virtual machine to reset the password on. + type: str + required: true + domain: + description: + - Name of the domain the virtual machine belongs to. + type: str + account: + description: + - Account the virtual machine belongs to. + type: str + project: + description: + - Name of the project the virtual machine belongs to. + type: str + zone: + description: + - Name of the zone in which the instance is deployed. + - If not set, the default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: stop the virtual machine before resetting the password + cs_instance: + name: myvirtualmachine + state: stopped + delegate_to: localhost + +- name: reset and get new default password + cs_instance_password_reset: + vm: myvirtualmachine + register: root + delegate_to: localhost +- debug: + msg: "new default password is {{ root.password }}" + +- name: boot the virtual machine to activate the new password + cs_instance: + name: myvirtualmachine + state: started + delegate_to: localhost + when: root is changed +''' + +RETURN = ''' +--- +id: + description: ID of the virtual machine. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +password: + description: The new default password. + returned: success + type: str + sample: ahQu5nuNge3keesh +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_required_together, + cs_argument_spec +) + + +class AnsibleCloudStackPasswordReset(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackPasswordReset, self).__init__(module) + self.returns = { + 'password': 'password', + } + self.password = None + + def reset_password(self): + args = { + 'id': self.get_vm(key='id'), + } + + res = None + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('resetPasswordForVirtualMachine', **args) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self.poll_job(res, 'virtualmachine') + + if res and 'password' in res: + self.password = res['password'] + + return self.password + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + vm=dict(required=True), + domain=dict(), + account=dict(), + project=dict(), + zone=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_password = AnsibleCloudStackPasswordReset(module) + password = acs_password.reset_password() + result = acs_password.get_result({'password': password}) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_instancegroup.py b/plugins/modules/cloud/cloudstack/cs_instancegroup.py new file mode 100644 index 0000000000..aee3544c50 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_instancegroup.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_instancegroup +short_description: Manages instance groups on Apache CloudStack based clouds. +description: + - Create and remove instance groups. +author: René Moser (@resmo) +options: + name: + description: + - Name of the instance group. + type: str + required: true + domain: + description: + - Domain the instance group is related to. + type: str + account: + description: + - Account the instance group is related to. + type: str + project: + description: + - Project the instance group is related to. + type: str + state: + description: + - State of the instance group. + type: str + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create an instance group + cs_instancegroup: + name: loadbalancers + delegate_to: localhost + +- name: Remove an instance group + cs_instancegroup: + name: loadbalancers + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the instance group. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance group. + returned: success + type: str + sample: webservers +created: + description: Date when the instance group was created. + returned: success + type: str + sample: 2015-05-03T15:05:51+0200 +domain: + description: Domain the instance group is related to. + returned: success + type: str + sample: example domain +account: + description: Account the instance group is related to. + returned: success + type: str + sample: example account +project: + description: Project the instance group is related to. + returned: success + type: str + sample: example project +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackInstanceGroup(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackInstanceGroup, self).__init__(module) + self.instance_group = None + + def get_instance_group(self): + if self.instance_group: + return self.instance_group + + name = self.module.params.get('name') + + args = { + 'account': self.get_account('name'), + 'domainid': self.get_domain('id'), + 'projectid': self.get_project('id'), + 'fetch_list': True, + } + instance_groups = self.query_api('listInstanceGroups', **args) + if instance_groups: + for g in instance_groups: + if name in [g['name'], g['id']]: + self.instance_group = g + break + return self.instance_group + + def present_instance_group(self): + instance_group = self.get_instance_group() + if not instance_group: + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'account': self.get_account('name'), + 'domainid': self.get_domain('id'), + 'projectid': self.get_project('id'), + } + if not self.module.check_mode: + res = self.query_api('createInstanceGroup', **args) + instance_group = res['instancegroup'] + return instance_group + + def absent_instance_group(self): + instance_group = self.get_instance_group() + if instance_group: + self.result['changed'] = True + if not self.module.check_mode: + self.query_api('deleteInstanceGroup', id=instance_group['id']) + return instance_group + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + domain=dict(), + account=dict(), + project=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_ig = AnsibleCloudStackInstanceGroup(module) + + state = module.params.get('state') + if state in ['absent']: + instance_group = acs_ig.absent_instance_group() + else: + instance_group = acs_ig.present_instance_group() + + result = acs_ig.get_result(instance_group) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_ip_address.py b/plugins/modules/cloud/cloudstack/cs_ip_address.py new file mode 100644 index 0000000000..c740d70f97 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_ip_address.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, Darren Worrall +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_ip_address +short_description: Manages public IP address associations on Apache CloudStack based clouds. +description: + - Acquires and associates a public IP to an account or project. + - Due to API limitations this is not an idempotent call, so be sure to only + conditionally call this when I(state=present). + - Tagging the IP address can also make the call idempotent. +author: + - Darren Worrall (@dazworrall) + - René Moser (@resmo) +options: + ip_address: + description: + - Public IP address. + - Required if I(state=absent) and I(tags) is not set. + type: str + domain: + description: + - Domain the IP address is related to. + type: str + network: + description: + - Network the IP address is related to. + - Mutually exclusive with I(vpc). + type: str + vpc: + description: + - VPC the IP address is related to. + - Mutually exclusive with I(network). + type: str + account: + description: + - Account the IP address is related to. + type: str + project: + description: + - Name of the project the IP address is related to. + type: str + zone: + description: + - Name of the zone in which the IP address is in. + - If not set, default zone is used. + type: str + state: + description: + - State of the IP address. + type: str + default: present + choices: [ present, absent ] + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - Tags can be used as an unique identifier for the IP Addresses. + - In this case, at least one of them must be unique to ensure idempotency. + type: list + aliases: [ tag ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Associate an IP address conditionally + cs_ip_address: + network: My Network + register: ip_address + when: instance.public_ip is undefined + delegate_to: localhost + +- name: Disassociate an IP address + cs_ip_address: + ip_address: 1.2.3.4 + state: absent + delegate_to: localhost + +- name: Associate an IP address with tags + cs_ip_address: + network: My Network + tags: + - key: myCustomID + - value: 5510c31a-416e-11e8-9013-02000a6b00bf + register: ip_address + delegate_to: localhost + +- name: Disassociate an IP address with tags + cs_ip_address: + state: absent + tags: + - key: myCustomID + - value: 5510c31a-416e-11e8-9013-02000a6b00bf + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the Public IP address. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +ip_address: + description: Public IP address. + returned: success + type: str + sample: 1.2.3.4 +zone: + description: Name of zone the IP address is related to. + returned: success + type: str + sample: ch-gva-2 +project: + description: Name of project the IP address is related to. + returned: success + type: str + sample: Production +account: + description: Account the IP address is related to. + returned: success + type: str + sample: example account +domain: + description: Domain the IP address is related to. + returned: success + type: str + sample: example domain +tags: + description: List of resource tags associated with the IP address. + returned: success + type: dict + sample: '[ { "key": "myCustomID", "value": "5510c31a-416e-11e8-9013-02000a6b00bf" } ]' + version_added: '2.6' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackIPAddress(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackIPAddress, self).__init__(module) + self.returns = { + 'ipaddress': 'ip_address', + } + + def get_ip_address(self, key=None): + if self.ip_address: + return self._get_by_key(key, self.ip_address) + args = { + 'ipaddress': self.module.params.get('ip_address'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'vpcid': self.get_vpc(key='id'), + } + ip_addresses = self.query_api('listPublicIpAddresses', **args) + + if ip_addresses: + tags = self.module.params.get('tags') + for ip_addr in ip_addresses['publicipaddress']: + if ip_addr['ipaddress'] == args['ipaddress'] != '': + self.ip_address = ip_addresses['publicipaddress'][0] + elif tags: + if sorted([tag for tag in tags if tag in ip_addr['tags']]) == sorted(tags): + self.ip_address = ip_addr + return self._get_by_key(key, self.ip_address) + + def present_ip_address(self): + ip_address = self.get_ip_address() + + if not ip_address: + ip_address = self.associate_ip_address(ip_address) + + if ip_address: + ip_address = self.ensure_tags(resource=ip_address, resource_type='publicipaddress') + + return ip_address + + def associate_ip_address(self, ip_address): + self.result['changed'] = True + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + # For the VPC case networkid is irrelevant, special case and we have to ignore it here. + 'networkid': self.get_network(key='id') if not self.module.params.get('vpc') else None, + 'zoneid': self.get_zone(key='id'), + 'vpcid': self.get_vpc(key='id'), + } + ip_address = None + if not self.module.check_mode: + res = self.query_api('associateIpAddress', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + ip_address = self.poll_job(res, 'ipaddress') + return ip_address + + def disassociate_ip_address(self): + ip_address = self.get_ip_address() + if not ip_address: + return None + if ip_address['isstaticnat']: + self.module.fail_json(msg="IP address is allocated via static nat") + + self.result['changed'] = True + if not self.module.check_mode: + self.module.params['tags'] = [] + ip_address = self.ensure_tags(resource=ip_address, resource_type='publicipaddress') + + res = self.query_api('disassociateIpAddress', id=ip_address['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'ipaddress') + return ip_address + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address=dict(required=False), + state=dict(choices=['present', 'absent'], default='present'), + vpc=dict(), + network=dict(), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + tags=dict(type='list', aliases=['tag']), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_if=[ + ('state', 'absent', ['ip_address', 'tags'], True), + ], + mutually_exclusive=( + ['vpc', 'network'], + ), + supports_check_mode=True + ) + + acs_ip_address = AnsibleCloudStackIPAddress(module) + + state = module.params.get('state') + if state in ['absent']: + ip_address = acs_ip_address.disassociate_ip_address() + else: + ip_address = acs_ip_address.present_ip_address() + + result = acs_ip_address.get_result(ip_address) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_iso.py b/plugins/modules/cloud/cloudstack/cs_iso.py new file mode 100644 index 0000000000..0d9f6a94ac --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_iso.py @@ -0,0 +1,450 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_iso +short_description: Manages ISO images on Apache CloudStack based clouds. +description: + - Register and remove ISO images. +author: René Moser (@resmo) +options: + name: + description: + - Name of the ISO. + type: str + required: true + display_text: + description: + - Display text of the ISO. + - If not specified, I(name) will be used. + type: str + url: + description: + - URL where the ISO can be downloaded from. Required if I(state) is present. + type: str + os_type: + description: + - Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if I(state) is present. + type: str + is_ready: + description: + - This flag is used for searching existing ISOs. If set to C(yes), it will only list ISO ready for deployment e.g. + successfully downloaded and installed. Recommended to set it to C(no). + type: bool + default: no + is_public: + description: + - Register the ISO to be publicly available to all users. Only used if I(state) is present. + type: bool + is_featured: + description: + - Register the ISO to be featured. Only used if I(state) is present. + type: bool + is_dynamically_scalable: + description: + - Register the ISO having XS/VMware tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if I(state) is present. + type: bool + checksum: + description: + - The MD5 checksum value of this ISO. If set, we search by checksum instead of name. + type: str + bootable: + description: + - Register the ISO to be bootable. Only used if I(state) is present. + type: bool + domain: + description: + - Domain the ISO is related to. + type: str + account: + description: + - Account the ISO is related to. + type: str + project: + description: + - Name of the project the ISO to be registered in. + type: str + zone: + description: + - Name of the zone you wish the ISO to be registered or deleted from. + - If not specified, first zone found will be used. + type: str + cross_zones: + description: + - Whether the ISO should be synced or removed across zones. + - Mutually exclusive with I(zone). + type: bool + default: no + iso_filter: + description: + - Name of the filter used to search for the ISO. + type: str + default: self + choices: [ featured, self, selfexecutable,sharedexecutable,executable, community ] + state: + description: + - State of the ISO. + type: str + default: present + choices: [ present, absent ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Register an ISO if ISO name does not already exist + cs_iso: + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: Debian GNU/Linux 7(64-bit) + delegate_to: localhost + +- name: Register an ISO with given name if ISO md5 checksum does not already exist + cs_iso: + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: Debian GNU/Linux 7(64-bit) + checksum: 0b31bccccb048d20b551f70830bb7ad0 + delegate_to: localhost + +- name: Remove an ISO by name + cs_iso: + name: Debian 7 64-bit + state: absent + delegate_to: localhost + +- name: Remove an ISO by checksum + cs_iso: + name: Debian 7 64-bit + checksum: 0b31bccccb048d20b551f70830bb7ad0 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the ISO. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: Name of the ISO. + returned: success + type: str + sample: Debian 7 64-bit +display_text: + description: Text to be displayed of the ISO. + returned: success + type: str + sample: Debian 7.7 64-bit minimal 2015-03-19 +zone: + description: Name of zone the ISO is registered in. + returned: success + type: str + sample: zuerich +status: + description: Status of the ISO. + returned: success + type: str + sample: Successfully Installed +is_ready: + description: True if the ISO is ready to be deployed from. + returned: success + type: bool + sample: true +is_public: + description: True if the ISO is public. + returned: success + type: bool + sample: true + version_added: '2.4' +bootable: + description: True if the ISO is bootable. + returned: success + type: bool + sample: true + version_added: '2.4' +is_featured: + description: True if the ISO is featured. + returned: success + type: bool + sample: true + version_added: '2.4' +format: + description: Format of the ISO. + returned: success + type: str + sample: ISO + version_added: '2.4' +os_type: + description: Typo of the OS. + returned: success + type: str + sample: CentOS 6.5 (64-bit) + version_added: '2.4' +checksum: + description: MD5 checksum of the ISO. + returned: success + type: str + sample: 0b31bccccb048d20b551f70830bb7ad0 +created: + description: Date of registering. + returned: success + type: str + sample: 2015-03-29T14:57:06+0200 +cross_zones: + description: true if the ISO is managed across all zones, false otherwise. + returned: success + type: bool + sample: false + version_added: '2.4' +domain: + description: Domain the ISO is related to. + returned: success + type: str + sample: example domain +account: + description: Account the ISO is related to. + returned: success + type: str + sample: example account +project: + description: Project the ISO is related to. + returned: success + type: str + sample: example project +tags: + description: List of resource tags associated with the ISO. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' + version_added: '2.4' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackIso(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackIso, self).__init__(module) + self.returns = { + 'checksum': 'checksum', + 'status': 'status', + 'isready': 'is_ready', + 'crossZones': 'cross_zones', + 'format': 'format', + 'ostypename': 'os_type', + 'isfeatured': 'is_featured', + 'bootable': 'bootable', + 'ispublic': 'is_public', + + } + self.iso = None + + def _get_common_args(self): + return { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'isdynamicallyscalable': self.module.params.get('is_dynamically_scalable'), + 'ostypeid': self.get_os_type('id'), + 'bootable': self.module.params.get('bootable'), + } + + def register_iso(self): + args = self._get_common_args() + args.update({ + 'domainid': self.get_domain('id'), + 'account': self.get_account('name'), + 'projectid': self.get_project('id'), + 'checksum': self.module.params.get('checksum'), + 'isfeatured': self.module.params.get('is_featured'), + 'ispublic': self.module.params.get('is_public'), + }) + + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + else: + args['zoneid'] = -1 + + if args['bootable'] and not args['ostypeid']: + self.module.fail_json(msg="OS type 'os_type' is required if 'bootable=true'.") + + args['url'] = self.module.params.get('url') + if not args['url']: + self.module.fail_json(msg="URL is required.") + + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('registerIso', **args) + self.iso = res['iso'][0] + return self.iso + + def present_iso(self): + iso = self.get_iso() + if not iso: + iso = self.register_iso() + else: + iso = self.update_iso(iso) + + if iso: + iso = self.ensure_tags(resource=iso, resource_type='ISO') + self.iso = iso + return iso + + def update_iso(self, iso): + args = self._get_common_args() + args.update({ + 'id': iso['id'], + }) + if self.has_changed(args, iso): + self.result['changed'] = True + + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + else: + # Workaround API does not return cross_zones=true + self.result['cross_zones'] = True + args['zoneid'] = -1 + + if not self.module.check_mode: + res = self.query_api('updateIso', **args) + self.iso = res['iso'] + return self.iso + + def get_iso(self): + if not self.iso: + args = { + 'isready': self.module.params.get('is_ready'), + 'isofilter': self.module.params.get('iso_filter'), + 'domainid': self.get_domain('id'), + 'account': self.get_account('name'), + 'projectid': self.get_project('id'), + } + + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + isos = self.query_api('listIsos', **args) + if isos: + if not checksum: + self.iso = isos['iso'][0] + else: + for i in isos['iso']: + if i['checksum'] == checksum: + self.iso = i + break + return self.iso + + def absent_iso(self): + iso = self.get_iso() + if iso: + self.result['changed'] = True + + args = { + 'id': iso['id'], + 'projectid': self.get_project('id'), + } + + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + + if not self.module.check_mode: + res = self.query_api('deleteIso', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'iso') + return iso + + def get_result(self, iso): + super(AnsibleCloudStackIso, self).get_result(iso) + # Workaround API does not return cross_zones=true + if self.module.params.get('cross_zones'): + self.result['cross_zones'] = True + if 'zone' in self.result: + del self.result['zone'] + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + display_text=dict(), + url=dict(), + os_type=dict(), + zone=dict(), + cross_zones=dict(type='bool', default=False), + iso_filter=dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), + domain=dict(), + account=dict(), + project=dict(), + checksum=dict(), + is_ready=dict(type='bool', default=False), + bootable=dict(type='bool'), + is_featured=dict(type='bool'), + is_public=dict(type='bool'), + is_dynamically_scalable=dict(type='bool'), + state=dict(choices=['present', 'absent'], default='present'), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive=( + ['zone', 'cross_zones'], + ), + supports_check_mode=True + ) + + acs_iso = AnsibleCloudStackIso(module) + + state = module.params.get('state') + if state in ['absent']: + iso = acs_iso.absent_iso() + else: + iso = acs_iso.present_iso() + + result = acs_iso.get_result(iso) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_loadbalancer_rule.py b/plugins/modules/cloud/cloudstack/cs_loadbalancer_rule.py new file mode 100644 index 0000000000..ea329e5b51 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_loadbalancer_rule.py @@ -0,0 +1,378 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Darren Worrall +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_loadbalancer_rule +short_description: Manages load balancer rules on Apache CloudStack based clouds. +description: + - Add, update and remove load balancer rules. +author: + - Darren Worrall (@dazworrall) + - René Moser (@resmo) +options: + name: + description: + - The name of the load balancer rule. + type: str + required: true + description: + description: + - The description of the load balancer rule. + type: str + algorithm: + description: + - Load balancer algorithm + - Required when using I(state=present). + type: str + choices: [ source, roundrobin, leastconn ] + default: source + private_port: + description: + - The private port of the private ip address/virtual machine where the network traffic will be load balanced to. + - Required when using I(state=present). + - Can not be changed once the rule exists due API limitation. + type: int + public_port: + description: + - The public port from where the network traffic will be load balanced from. + - Required when using I(state=present). + - Can not be changed once the rule exists due API limitation. + type: int + required: true + ip_address: + description: + - Public IP address from where the network traffic will be load balanced from. + type: str + required: true + aliases: [ public_ip ] + open_firewall: + description: + - Whether the firewall rule for public port should be created, while creating the new rule. + - Use M(cs_firewall) for managing firewall rules. + type: bool + default: no + cidr: + description: + - CIDR (full notation) to be used for firewall rule if required. + type: str + protocol: + description: + - The protocol to be used on the load balancer + type: str + project: + description: + - Name of the project the load balancer IP address is related to. + type: str + state: + description: + - State of the rule. + type: str + default: present + choices: [ present, absent ] + domain: + description: + - Domain the rule is related to. + type: str + account: + description: + - Account the rule is related to. + type: str + zone: + description: + - Name of the zone in which the rule should be created. + - If not set, default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] + network: + description: + - Name of the network. + type: str + vpc: + description: + - Name of the VPC. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a load balancer rule + cs_loadbalancer_rule: + name: balance_http + public_ip: 1.2.3.4 + algorithm: leastconn + public_port: 80 + private_port: 8080 + delegate_to: localhost + +- name: Update algorithm of an existing load balancer rule + cs_loadbalancer_rule: + name: balance_http + public_ip: 1.2.3.4 + algorithm: roundrobin + public_port: 80 + private_port: 8080 + delegate_to: localhost + +- name: Delete a load balancer rule + cs_loadbalancer_rule: + name: balance_http + public_ip: 1.2.3.4 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the rule. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +zone: + description: Name of zone the rule is related to. + returned: success + type: str + sample: ch-gva-2 +project: + description: Name of project the rule is related to. + returned: success + type: str + sample: Production +account: + description: Account the rule is related to. + returned: success + type: str + sample: example account +domain: + description: Domain the rule is related to. + returned: success + type: str + sample: example domain +algorithm: + description: Load balancer algorithm used. + returned: success + type: str + sample: source +cidr: + description: CIDR to forward traffic from. + returned: success + type: str + sample: 0.0.0.0/0 +name: + description: Name of the rule. + returned: success + type: str + sample: http-lb +description: + description: Description of the rule. + returned: success + type: str + sample: http load balancer rule +protocol: + description: Protocol of the rule. + returned: success + type: str + sample: tcp +public_port: + description: Public port. + returned: success + type: int + sample: 80 +private_port: + description: Private IP address. + returned: success + type: int + sample: 80 +public_ip: + description: Public IP address. + returned: success + type: str + sample: 1.2.3.4 +tags: + description: List of resource tags associated with the rule. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +state: + description: State of the rule. + returned: success + type: str + sample: Add +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackLBRule(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackLBRule, self).__init__(module) + self.returns = { + 'publicip': 'public_ip', + 'algorithm': 'algorithm', + 'cidrlist': 'cidr', + 'protocol': 'protocol', + } + # these values will be casted to int + self.returns_to_int = { + 'publicport': 'public_port', + 'privateport': 'private_port', + } + + def get_rule(self, **kwargs): + rules = self.query_api('listLoadBalancerRules', **kwargs) + if rules: + return rules['loadbalancerrule'][0] + + def _get_common_args(self): + return { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id') if self.module.params.get('zone') else None, + 'publicipid': self.get_ip_address(key='id'), + 'name': self.module.params.get('name'), + } + + def present_lb_rule(self): + required_params = [ + 'algorithm', + 'private_port', + 'public_port', + ] + self.module.fail_on_missing_params(required_params=required_params) + + args = self._get_common_args() + rule = self.get_rule(**args) + if rule: + rule = self._update_lb_rule(rule) + else: + rule = self._create_lb_rule(rule) + + if rule: + rule = self.ensure_tags(resource=rule, resource_type='LoadBalancer') + return rule + + def _create_lb_rule(self, rule): + self.result['changed'] = True + if not self.module.check_mode: + args = self._get_common_args() + args.update({ + 'algorithm': self.module.params.get('algorithm'), + 'privateport': self.module.params.get('private_port'), + 'publicport': self.module.params.get('public_port'), + 'cidrlist': self.module.params.get('cidr'), + 'description': self.module.params.get('description'), + 'protocol': self.module.params.get('protocol'), + 'networkid': self.get_network(key='id'), + }) + res = self.query_api('createLoadBalancerRule', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + rule = self.poll_job(res, 'loadbalancer') + return rule + + def _update_lb_rule(self, rule): + args = { + 'id': rule['id'], + 'algorithm': self.module.params.get('algorithm'), + 'description': self.module.params.get('description'), + } + if self.has_changed(args, rule): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateLoadBalancerRule', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + rule = self.poll_job(res, 'loadbalancer') + return rule + + def absent_lb_rule(self): + args = self._get_common_args() + rule = self.get_rule(**args) + if rule: + self.result['changed'] = True + if rule and not self.module.check_mode: + res = self.query_api('deleteLoadBalancerRule', id=rule['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'loadbalancer') + return rule + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + description=dict(), + algorithm=dict(choices=['source', 'roundrobin', 'leastconn'], default='source'), + private_port=dict(type='int'), + public_port=dict(type='int'), + protocol=dict(), + state=dict(choices=['present', 'absent'], default='present'), + ip_address=dict(required=True, aliases=['public_ip']), + cidr=dict(), + project=dict(), + open_firewall=dict(type='bool', default=False), + tags=dict(type='list', aliases=['tag']), + zone=dict(), + domain=dict(), + account=dict(), + vpc=dict(), + network=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_lb_rule = AnsibleCloudStackLBRule(module) + + state = module.params.get('state') + if state in ['absent']: + rule = acs_lb_rule.absent_lb_rule() + else: + rule = acs_lb_rule.present_lb_rule() + + result = acs_lb_rule.get_result(rule) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py b/plugins/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py new file mode 100644 index 0000000000..269844160a --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, Darren Worrall +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_loadbalancer_rule_member +short_description: Manages load balancer rule members on Apache CloudStack based clouds. +description: + - Add and remove load balancer rule members. +author: + - Darren Worrall (@dazworrall) + - René Moser (@resmo) +options: + name: + description: + - The name of the load balancer rule. + type: str + required: true + ip_address: + description: + - Public IP address from where the network traffic will be load balanced from. + - Only needed to find the rule if I(name) is not unique. + type: str + aliases: [ public_ip ] + vms: + description: + - List of VMs to assign to or remove from the rule. + type: list + required: true + aliases: [ vm ] + state: + description: + - Should the VMs be present or absent from the rule. + type: str + default: present + choices: [ present, absent ] + project: + description: + - Name of the project the firewall rule is related to. + type: str + domain: + description: + - Domain the rule is related to. + type: str + account: + description: + - Account the rule is related to. + type: str + zone: + description: + - Name of the zone in which the rule should be located. + - If not set, default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Add VMs to an existing load balancer + cs_loadbalancer_rule_member: + name: balance_http + vms: + - web01 + - web02 + delegate_to: localhost + +- name: Remove a VM from an existing load balancer + cs_loadbalancer_rule_member: + name: balance_http + vms: + - web01 + - web02 + state: absent + delegate_to: localhost + +# Rolling upgrade of hosts +- hosts: webservers + serial: 1 + pre_tasks: + - name: Remove from load balancer + cs_loadbalancer_rule_member: + name: balance_http + vm: "{{ ansible_hostname }}" + state: absent + delegate_to: localhost + tasks: + # Perform update + post_tasks: + - name: Add to load balancer + cs_loadbalancer_rule_member: + name: balance_http + vm: "{{ ansible_hostname }}" + state: present + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the rule. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +zone: + description: Name of zone the rule is related to. + returned: success + type: str + sample: ch-gva-2 +project: + description: Name of project the rule is related to. + returned: success + type: str + sample: Production +account: + description: Account the rule is related to. + returned: success + type: str + sample: example account +domain: + description: Domain the rule is related to. + returned: success + type: str + sample: example domain +algorithm: + description: Load balancer algorithm used. + returned: success + type: str + sample: source +cidr: + description: CIDR to forward traffic from. + returned: success + type: str + sample: 0.0.0.0/0 +name: + description: Name of the rule. + returned: success + type: str + sample: http-lb +description: + description: Description of the rule. + returned: success + type: str + sample: http load balancer rule +protocol: + description: Protocol of the rule. + returned: success + type: str + sample: tcp +public_port: + description: Public port. + returned: success + type: int + sample: 80 +private_port: + description: Private IP address. + returned: success + type: int + sample: 80 +public_ip: + description: Public IP address. + returned: success + type: str + sample: 1.2.3.4 +vms: + description: Rule members. + returned: success + type: list + sample: '[ "web01", "web02" ]' +tags: + description: List of resource tags associated with the rule. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +state: + description: State of the rule. + returned: success + type: str + sample: Add +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackLBRuleMember(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackLBRuleMember, self).__init__(module) + self.returns = { + 'publicip': 'public_ip', + 'algorithm': 'algorithm', + 'cidrlist': 'cidr', + 'protocol': 'protocol', + } + # these values will be casted to int + self.returns_to_int = { + 'publicport': 'public_port', + 'privateport': 'private_port', + } + + def get_rule(self): + args = self._get_common_args() + args.update({ + 'name': self.module.params.get('name'), + 'zoneid': self.get_zone(key='id') if self.module.params.get('zone') else None, + }) + if self.module.params.get('ip_address'): + args['publicipid'] = self.get_ip_address(key='id') + + rules = self.query_api('listLoadBalancerRules', **args) + if rules: + if len(rules['loadbalancerrule']) > 1: + self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name']) + return rules['loadbalancerrule'][0] + return None + + def _get_common_args(self): + return { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + } + + def _get_members_of_rule(self, rule): + res = self.query_api('listLoadBalancerRuleInstances', id=rule['id']) + return res.get('loadbalancerruleinstance', []) + + def _ensure_members(self, operation): + if operation not in ['add', 'remove']: + self.module.fail_json(msg="Bad operation: %s" % operation) + + rule = self.get_rule() + if not rule: + self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name')) + + existing = {} + for vm in self._get_members_of_rule(rule=rule): + existing[vm['name']] = vm['id'] + + wanted_names = self.module.params.get('vms') + + if operation == 'add': + cs_func = 'assignToLoadBalancerRule' + to_change = set(wanted_names) - set(existing.keys()) + else: + cs_func = 'removeFromLoadBalancerRule' + to_change = set(wanted_names) & set(existing.keys()) + + if not to_change: + return rule + + args = self._get_common_args() + args['fetch_list'] = True + vms = self.query_api('listVirtualMachines', **args) + to_change_ids = [] + for name in to_change: + for vm in vms: + if vm['name'] == name: + to_change_ids.append(vm['id']) + break + else: + self.module.fail_json(msg="Unknown VM: %s" % name) + + if to_change_ids: + self.result['changed'] = True + + if to_change_ids and not self.module.check_mode: + res = self.query_api( + cs_func, + id=rule['id'], + virtualmachineids=to_change_ids, + ) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res) + rule = self.get_rule() + return rule + + def add_members(self): + return self._ensure_members('add') + + def remove_members(self): + return self._ensure_members('remove') + + def get_result(self, rule): + super(AnsibleCloudStackLBRuleMember, self).get_result(rule) + if rule: + self.result['vms'] = [] + for vm in self._get_members_of_rule(rule=rule): + self.result['vms'].append(vm['name']) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + ip_address=dict(aliases=['public_ip']), + vms=dict(required=True, aliases=['vm'], type='list'), + state=dict(choices=['present', 'absent'], default='present'), + zone=dict(), + domain=dict(), + project=dict(), + account=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module) + + state = module.params.get('state') + if state in ['absent']: + rule = acs_lb_rule_member.remove_members() + else: + rule = acs_lb_rule_member.add_members() + + result = acs_lb_rule_member.get_result(rule) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_network.py b/plugins/modules/cloud/cloudstack/cs_network.py new file mode 100644 index 0000000000..1443291df6 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_network.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_network +short_description: Manages networks on Apache CloudStack based clouds. +description: + - Create, update, restart and delete networks. +author: René Moser (@resmo) +options: + name: + description: + - Name (case sensitive) of the network. + type: str + required: true + display_text: + description: + - Display text of the network. + - If not specified, I(name) will be used as I(display_text). + type: str + network_offering: + description: + - Name of the offering for the network. + - Required if I(state=present). + type: str + start_ip: + description: + - The beginning IPv4 address of the network belongs to. + - Only considered on create. + type: str + end_ip: + description: + - The ending IPv4 address of the network belongs to. + - If not specified, value of I(start_ip) is used. + - Only considered on create. + type: str + gateway: + description: + - The gateway of the network. + - Required for shared networks and isolated networks when it belongs to a VPC. + - Only considered on create. + type: str + netmask: + description: + - The netmask of the network. + - Required for shared networks and isolated networks when it belongs to a VPC. + - Only considered on create. + type: str + start_ipv6: + description: + - The beginning IPv6 address of the network belongs to. + - Only considered on create. + type: str + end_ipv6: + description: + - The ending IPv6 address of the network belongs to. + - If not specified, value of I(start_ipv6) is used. + - Only considered on create. + type: str + cidr_ipv6: + description: + - CIDR of IPv6 network, must be at least /64. + - Only considered on create. + type: str + gateway_ipv6: + description: + - The gateway of the IPv6 network. + - Required for shared networks. + - Only considered on create. + type: str + vlan: + description: + - The ID or VID of the network. + type: str + vpc: + description: + - Name of the VPC of the network. + type: str + isolated_pvlan: + description: + - The isolated private VLAN for this network. + type: str + clean_up: + description: + - Cleanup old network elements. + - Only considered on I(state=restarted). + default: no + type: bool + acl_type: + description: + - Access control type for the network. + - If not specified, Cloudstack will default to C(account) for isolated networks + - and C(domain) for shared networks. + - Only considered on create. + type: str + choices: [ account, domain ] + acl: + description: + - The name of the access control list for the VPC network tier. + type: str + subdomain_access: + description: + - Defines whether to allow subdomains to use networks dedicated to their parent domain(s). + - Should be used with I(acl_type=domain). + - Only considered on create. + type: bool + network_domain: + description: + - The network domain. + type: str + state: + description: + - State of the network. + type: str + default: present + choices: [ present, absent, restarted ] + zone: + description: + - Name of the zone in which the network should be deployed. + - If not set, default zone is used. + type: str + project: + description: + - Name of the project the network to be deployed in. + type: str + domain: + description: + - Domain the network is related to. + type: str + account: + description: + - Account the network is related to. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a network + cs_network: + name: my network + zone: gva-01 + network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService + network_domain: example.com + delegate_to: localhost + +- name: Create a VPC tier + cs_network: + name: my VPC tier 1 + zone: gva-01 + vpc: my VPC + network_offering: DefaultIsolatedNetworkOfferingForVpcNetworks + gateway: 10.43.0.1 + netmask: 255.255.255.0 + acl: my web acl + delegate_to: localhost + +- name: Update a network + cs_network: + name: my network + display_text: network of domain example.local + network_domain: example.local + delegate_to: localhost + +- name: Restart a network with clean up + cs_network: + name: my network + clean_up: yes + state: restarted + delegate_to: localhost + +- name: Remove a network + cs_network: + name: my network + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the network. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the network. + returned: success + type: str + sample: web project +display_text: + description: Display text of the network. + returned: success + type: str + sample: web project +dns1: + description: IP address of the 1st nameserver. + returned: success + type: str + sample: 1.2.3.4 +dns2: + description: IP address of the 2nd nameserver. + returned: success + type: str + sample: 1.2.3.4 +cidr: + description: IPv4 network CIDR. + returned: success + type: str + sample: 10.101.64.0/24 +gateway: + description: IPv4 gateway. + returned: success + type: str + sample: 10.101.64.1 +netmask: + description: IPv4 netmask. + returned: success + type: str + sample: 255.255.255.0 +cidr_ipv6: + description: IPv6 network CIDR. + returned: if available + type: str + sample: 2001:db8::/64 +gateway_ipv6: + description: IPv6 gateway. + returned: if available + type: str + sample: 2001:db8::1 +zone: + description: Name of zone. + returned: success + type: str + sample: ch-gva-2 +domain: + description: Domain the network is related to. + returned: success + type: str + sample: ROOT +account: + description: Account the network is related to. + returned: success + type: str + sample: example account +project: + description: Name of project. + returned: success + type: str + sample: Production +tags: + description: List of resource tags associated with the network. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +acl_type: + description: Access type of the network (Domain, Account). + returned: success + type: str + sample: Account +acl: + description: Name of the access control list for the VPC network tier. + returned: success + type: str + sample: My ACL + version_added: '2.5' +acl_id: + description: ID of the access control list for the VPC network tier. + returned: success + type: str + sample: dfafcd55-0510-4b8c-b6c5-b8cedb4cfd88 + version_added: '2.5' +broadcast_domain_type: + description: Broadcast domain type of the network. + returned: success + type: str + sample: Vlan +type: + description: Type of the network. + returned: success + type: str + sample: Isolated +traffic_type: + description: Traffic type of the network. + returned: success + type: str + sample: Guest +state: + description: State of the network (Allocated, Implemented, Setup). + returned: success + type: str + sample: Allocated +is_persistent: + description: Whether the network is persistent or not. + returned: success + type: bool + sample: false +network_domain: + description: The network domain + returned: success + type: str + sample: example.local +network_offering: + description: The network offering name. + returned: success + type: str + sample: DefaultIsolatedNetworkOfferingWithSourceNatService +network_offering_display_text: + description: The network offering display text. + returned: success + type: str + sample: Offering for Isolated Vpc networks with Source Nat service enabled + version_added: '2.5' +network_offering_conserve_mode: + description: Whether the network offering has IP conserve mode enabled or not. + returned: success + type: bool + sample: false + version_added: '2.5' +network_offering_availability: + description: The availability of the network offering the network is created from + returned: success + type: str + sample: Optional + version_added: '2.5' +is_system: + description: Whether the network is system related or not. + returned: success + type: bool + sample: false + version_added: '2.5' +vpc: + description: Name of the VPC. + returned: if available + type: str + sample: My VPC +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackNetwork(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackNetwork, self).__init__(module) + self.returns = { + 'networkdomain': 'network_domain', + 'networkofferingname': 'network_offering', + 'networkofferingdisplaytext': 'network_offering_display_text', + 'networkofferingconservemode': 'network_offering_conserve_mode', + 'networkofferingavailability': 'network_offering_availability', + 'aclid': 'acl_id', + 'issystem': 'is_system', + 'ispersistent': 'is_persistent', + 'acltype': 'acl_type', + 'type': 'type', + 'traffictype': 'traffic_type', + 'ip6gateway': 'gateway_ipv6', + 'ip6cidr': 'cidr_ipv6', + 'gateway': 'gateway', + 'cidr': 'cidr', + 'netmask': 'netmask', + 'broadcastdomaintype': 'broadcast_domain_type', + 'dns1': 'dns1', + 'dns2': 'dns2', + } + self.network = None + + def get_network_acl(self, key=None, acl_id=None): + if acl_id is not None: + args = { + 'id': acl_id, + 'vpcid': self.get_vpc(key='id'), + } + else: + acl_name = self.module.params.get('acl') + if not acl_name: + return + + args = { + 'name': acl_name, + 'vpcid': self.get_vpc(key='id'), + } + network_acls = self.query_api('listNetworkACLLists', **args) + if network_acls: + acl = network_acls['networkacllist'][0] + return self._get_by_key(key, acl) + + def get_network_offering(self, key=None): + network_offering = self.module.params.get('network_offering') + if not network_offering: + self.module.fail_json(msg="missing required arguments: network_offering") + + args = { + 'zoneid': self.get_zone(key='id'), + 'fetch_list': True, + } + + network_offerings = self.query_api('listNetworkOfferings', **args) + if network_offerings: + for no in network_offerings: + if network_offering in [no['name'], no['displaytext'], no['id']]: + return self._get_by_key(key, no) + self.module.fail_json(msg="Network offering '%s' not found" % network_offering) + + def _get_args(self): + args = { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'networkdomain': self.module.params.get('network_domain'), + 'networkofferingid': self.get_network_offering(key='id') + } + return args + + def get_network(self, refresh=False): + if not self.network or refresh: + network = self.module.params.get('name') + args = { + 'zoneid': self.get_zone(key='id'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'vpcid': self.get_vpc(key='id'), + 'fetch_list': True, + } + networks = self.query_api('listNetworks', **args) + if networks: + for n in networks: + if network in [n['name'], n['displaytext'], n['id']]: + self.network = n + self.network['acl'] = self.get_network_acl(key='name', acl_id=n.get('aclid')) + break + return self.network + + def present_network(self): + if self.module.params.get('acl') is not None and self.module.params.get('vpc') is None: + self.module.fail_json(msg="Missing required params: vpc") + + network = self.get_network() + if not network: + network = self.create_network(network) + else: + network = self.update_network(network) + + if network: + network = self.ensure_tags(resource=network, resource_type='Network') + + return network + + def update_network(self, network): + args = self._get_args() + args['id'] = network['id'] + + if self.has_changed(args, network): + self.result['changed'] = True + if not self.module.check_mode: + network = self.query_api('updateNetwork', **args) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self.poll_job(network, 'network') + + # Skip ACL check if the network is not a VPC tier + if network.get('aclid') != self.get_network_acl(key='id'): + self.result['changed'] = True + if not self.module.check_mode: + args = { + 'aclid': self.get_network_acl(key='id'), + 'networkid': network['id'], + } + network = self.query_api('replaceNetworkACLList', **args) + if self.module.params.get('poll_async'): + self.poll_job(network, 'networkacllist') + network = self.get_network(refresh=True) + return network + + def create_network(self, network): + self.result['changed'] = True + + args = self._get_args() + args.update({ + 'acltype': self.module.params.get('acl_type'), + 'aclid': self.get_network_acl(key='id'), + 'zoneid': self.get_zone(key='id'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'startip': self.module.params.get('start_ip'), + 'endip': self.get_or_fallback('end_ip', 'start_ip'), + 'netmask': self.module.params.get('netmask'), + 'gateway': self.module.params.get('gateway'), + 'startipv6': self.module.params.get('start_ipv6'), + 'endipv6': self.get_or_fallback('end_ipv6', 'start_ipv6'), + 'ip6cidr': self.module.params.get('cidr_ipv6'), + 'ip6gateway': self.module.params.get('gateway_ipv6'), + 'vlan': self.module.params.get('vlan'), + 'isolatedpvlan': self.module.params.get('isolated_pvlan'), + 'subdomainaccess': self.module.params.get('subdomain_access'), + 'vpcid': self.get_vpc(key='id') + }) + + if not self.module.check_mode: + res = self.query_api('createNetwork', **args) + + network = res['network'] + return network + + def restart_network(self): + network = self.get_network() + + if not network: + self.module.fail_json(msg="No network named '%s' found." % self.module.params('name')) + + # Restarting only available for these states + if network['state'].lower() in ['implemented', 'setup']: + self.result['changed'] = True + + args = { + 'id': network['id'], + 'cleanup': self.module.params.get('clean_up') + } + + if not self.module.check_mode: + network = self.query_api('restartNetwork', **args) + + poll_async = self.module.params.get('poll_async') + if network and poll_async: + network = self.poll_job(network, 'network') + return network + + def absent_network(self): + network = self.get_network() + if network: + self.result['changed'] = True + + args = { + 'id': network['id'] + } + + if not self.module.check_mode: + res = self.query_api('deleteNetwork', **args) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + self.poll_job(res, 'network') + return network + + def get_result(self, network): + super(AnsibleCloudStackNetwork, self).get_result(network) + if network: + self.result['acl'] = self.get_network_acl(key='name', acl_id=network.get('aclid')) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + display_text=dict(), + network_offering=dict(), + zone=dict(), + start_ip=dict(), + end_ip=dict(), + gateway=dict(), + netmask=dict(), + start_ipv6=dict(), + end_ipv6=dict(), + cidr_ipv6=dict(), + gateway_ipv6=dict(), + vlan=dict(), + vpc=dict(), + isolated_pvlan=dict(), + clean_up=dict(type='bool', default=False), + network_domain=dict(), + subdomain_access=dict(type='bool'), + state=dict(choices=['present', 'absent', 'restarted'], default='present'), + acl=dict(), + acl_type=dict(choices=['account', 'domain']), + project=dict(), + domain=dict(), + account=dict(), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag']), + )) + required_together = cs_required_together() + required_together.extend([ + ['netmask', 'gateway'], + ]) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=required_together, + supports_check_mode=True + ) + + acs_network = AnsibleCloudStackNetwork(module) + + state = module.params.get('state') + if state == 'absent': + network = acs_network.absent_network() + + elif state == 'restarted': + network = acs_network.restart_network() + + else: + network = acs_network.present_network() + + result = acs_network.get_result(network) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_network_acl.py b/plugins/modules/cloud/cloudstack/cs_network_acl.py new file mode 100644 index 0000000000..1c7020f304 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_network_acl.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_network_acl +short_description: Manages network access control lists (ACL) on Apache CloudStack based clouds. +description: + - Create and remove network ACLs. +author: René Moser (@resmo) +options: + name: + description: + - Name of the network ACL. + type: str + required: true + description: + description: + - Description of the network ACL. + - If not set, identical to I(name). + type: str + vpc: + description: + - VPC the network ACL is related to. + type: str + required: true + state: + description: + - State of the network ACL. + type: str + default: present + choices: [ present, absent ] + domain: + description: + - Domain the network ACL rule is related to. + type: str + account: + description: + - Account the network ACL rule is related to. + type: str + project: + description: + - Name of the project the network ACL is related to. + type: str + zone: + description: + - Name of the zone the VPC is related to. + - If not set, default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create a network ACL + cs_network_acl: + name: Webserver ACL + description: a more detailed description of the ACL + vpc: customers + delegate_to: localhost + +- name: remove a network ACL + cs_network_acl: + name: Webserver ACL + vpc: customers + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +name: + description: Name of the network ACL. + returned: success + type: str + sample: customer acl +description: + description: Description of the network ACL. + returned: success + type: str + sample: Example description of a network ACL +vpc: + description: VPC of the network ACL. + returned: success + type: str + sample: customer vpc +zone: + description: Zone the VPC is related to. + returned: success + type: str + sample: ch-gva-2 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackNetworkAcl(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackNetworkAcl, self).__init__(module) + + def get_network_acl(self): + args = { + 'name': self.module.params.get('name'), + 'vpcid': self.get_vpc(key='id'), + } + network_acls = self.query_api('listNetworkACLLists', **args) + if network_acls: + return network_acls['networkacllist'][0] + return None + + def present_network_acl(self): + network_acl = self.get_network_acl() + if not network_acl: + self.result['changed'] = True + args = { + 'name': self.module.params.get('name'), + 'description': self.get_or_fallback('description', 'name'), + 'vpcid': self.get_vpc(key='id') + } + if not self.module.check_mode: + res = self.query_api('createNetworkACLList', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + network_acl = self.poll_job(res, 'networkacllist') + + return network_acl + + def absent_network_acl(self): + network_acl = self.get_network_acl() + if network_acl: + self.result['changed'] = True + args = { + 'id': network_acl['id'], + } + if not self.module.check_mode: + res = self.query_api('deleteNetworkACLList', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'networkacllist') + + return network_acl + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + description=dict(), + vpc=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_network_acl = AnsibleCloudStackNetworkAcl(module) + + state = module.params.get('state') + if state == 'absent': + network_acl = acs_network_acl.absent_network_acl() + else: + network_acl = acs_network_acl.present_network_acl() + + result = acs_network_acl.get_result(network_acl) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_network_acl_rule.py b/plugins/modules/cloud/cloudstack/cs_network_acl_rule.py new file mode 100644 index 0000000000..31776d795a --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_network_acl_rule.py @@ -0,0 +1,462 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_network_acl_rule +short_description: Manages network access control list (ACL) rules on Apache CloudStack based clouds. +description: + - Add, update and remove network ACL rules. +author: René Moser (@resmo) +options: + network_acl: + description: + - Name of the network ACL. + type: str + required: true + aliases: [ acl ] + cidrs: + description: + - CIDRs of the rule. + type: list + default: [ 0.0.0.0/0 ] + aliases: [ cidr ] + rule_position: + description: + - The position of the network ACL rule. + type: int + required: true + aliases: [ number ] + protocol: + description: + - Protocol of the rule + choices: [ tcp, udp, icmp, all, by_number ] + type: str + default: tcp + protocol_number: + description: + - Protocol number from 1 to 256 required if I(protocol=by_number). + type: int + start_port: + description: + - Start port for this rule. + - Considered if I(protocol=tcp) or I(protocol=udp). + type: int + aliases: [ port ] + end_port: + description: + - End port for this rule. + - Considered if I(protocol=tcp) or I(protocol=udp). + - If not specified, equal I(start_port). + type: int + icmp_type: + description: + - Type of the icmp message being sent. + - Considered if I(protocol=icmp). + type: int + icmp_code: + description: + - Error code for this icmp message. + - Considered if I(protocol=icmp). + type: int + vpc: + description: + - VPC the network ACL is related to. + type: str + required: true + traffic_type: + description: + - Traffic type of the rule. + type: str + choices: [ ingress, egress ] + default: ingress + aliases: [ type ] + action_policy: + description: + - Action policy of the rule. + type: str + choices: [ allow, deny ] + default: allow + aliases: [ action ] + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "If you want to delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] + domain: + description: + - Domain the VPC is related to. + type: str + account: + description: + - Account the VPC is related to. + type: str + project: + description: + - Name of the project the VPC is related to. + type: str + zone: + description: + - Name of the zone the VPC related to. + - If not set, default zone is used. + type: str + state: + description: + - State of the network ACL rule. + type: str + default: present + choices: [ present, absent ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create a network ACL rule, allow port 80 ingress + cs_network_acl_rule: + network_acl: web + rule_position: 1 + vpc: my vpc + traffic_type: ingress + action_policy: allow + port: 80 + cidr: 0.0.0.0/0 + delegate_to: localhost + +- name: create a network ACL rule, deny port range 8000-9000 ingress for 10.20.0.0/16 and 10.22.0.0/16 + cs_network_acl_rule: + network_acl: web + rule_position: 1 + vpc: my vpc + traffic_type: ingress + action_policy: deny + start_port: 8000 + end_port: 9000 + cidrs: + - 10.20.0.0/16 + - 10.22.0.0/16 + delegate_to: localhost + +- name: remove a network ACL rule + cs_network_acl_rule: + network_acl: web + rule_position: 1 + vpc: my vpc + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +network_acl: + description: Name of the network ACL. + returned: success + type: str + sample: customer acl +cidr: + description: CIDR of the network ACL rule. + returned: success + type: str + sample: 0.0.0.0/0 +cidrs: + description: CIDRs of the network ACL rule. + returned: success + type: list + sample: [ 0.0.0.0/0 ] + version_added: '2.9' +rule_position: + description: Position of the network ACL rule. + returned: success + type: int + sample: 1 +action_policy: + description: Action policy of the network ACL rule. + returned: success + type: str + sample: deny +traffic_type: + description: Traffic type of the network ACL rule. + returned: success + type: str + sample: ingress +protocol: + description: Protocol of the network ACL rule. + returned: success + type: str + sample: tcp +protocol_number: + description: Protocol number in case protocol is by number. + returned: success + type: int + sample: 8 +start_port: + description: Start port of the network ACL rule. + returned: success + type: int + sample: 80 +end_port: + description: End port of the network ACL rule. + returned: success + type: int + sample: 80 +icmp_code: + description: ICMP code of the network ACL rule. + returned: success + type: int + sample: 8 +icmp_type: + description: ICMP type of the network ACL rule. + returned: success + type: int + sample: 0 +state: + description: State of the network ACL rule. + returned: success + type: str + sample: Active +vpc: + description: VPC of the network ACL. + returned: success + type: str + sample: customer vpc +tags: + description: List of resource tags associated with the network ACL rule. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +domain: + description: Domain the network ACL rule is related to. + returned: success + type: str + sample: example domain +account: + description: Account the network ACL rule is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the network ACL rule is related to. + returned: success + type: str + sample: Production +zone: + description: Zone the VPC is related to. + returned: success + type: str + sample: ch-gva-2 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackNetworkAclRule(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackNetworkAclRule, self).__init__(module) + self.returns = { + 'cidrlist': 'cidr', + 'action': 'action_policy', + 'protocol': 'protocol', + 'icmpcode': 'icmp_code', + 'icmptype': 'icmp_type', + 'number': 'rule_position', + 'traffictype': 'traffic_type', + } + # these values will be casted to int + self.returns_to_int = { + 'startport': 'start_port', + 'endport': 'end_port', + } + + def get_network_acl_rule(self): + args = { + 'aclid': self.get_network_acl(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + } + network_acl_rules = self.query_api('listNetworkACLs', **args) + for acl_rule in network_acl_rules.get('networkacl', []): + if acl_rule['number'] == self.module.params.get('rule_position'): + return acl_rule + return None + + def present_network_acl_rule(self): + network_acl_rule = self.get_network_acl_rule() + + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.get_or_fallback('end_port', 'start_port') + icmp_type = self.module.params.get('icmp_type') + icmp_code = self.module.params.get('icmp_code') + + if protocol in ['tcp', 'udp'] and (start_port is None or end_port is None): + self.module.fail_json(msg="protocol is %s but the following are missing: start_port, end_port" % protocol) + + elif protocol == 'icmp' and (icmp_type is None or icmp_code is None): + self.module.fail_json(msg="protocol is icmp but the following are missing: icmp_type, icmp_code") + + elif protocol == 'by_number' and self.module.params.get('protocol_number') is None: + self.module.fail_json(msg="protocol is by_number but the following are missing: protocol_number") + + if not network_acl_rule: + network_acl_rule = self._create_network_acl_rule(network_acl_rule) + else: + network_acl_rule = self._update_network_acl_rule(network_acl_rule) + + if network_acl_rule: + network_acl_rule = self.ensure_tags(resource=network_acl_rule, resource_type='NetworkACL') + return network_acl_rule + + def absent_network_acl_rule(self): + network_acl_rule = self.get_network_acl_rule() + if network_acl_rule: + self.result['changed'] = True + args = { + 'id': network_acl_rule['id'], + } + if not self.module.check_mode: + res = self.query_api('deleteNetworkACL', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'networkacl') + + return network_acl_rule + + def _create_network_acl_rule(self, network_acl_rule): + self.result['changed'] = True + protocol = self.module.params.get('protocol') + args = { + 'aclid': self.get_network_acl(key='id'), + 'action': self.module.params.get('action_policy'), + 'protocol': protocol if protocol != 'by_number' else self.module.params.get('protocol_number'), + 'startport': self.module.params.get('start_port'), + 'endport': self.get_or_fallback('end_port', 'start_port'), + 'number': self.module.params.get('rule_position'), + 'icmpcode': self.module.params.get('icmp_code'), + 'icmptype': self.module.params.get('icmp_type'), + 'traffictype': self.module.params.get('traffic_type'), + 'cidrlist': self.module.params.get('cidrs'), + } + if not self.module.check_mode: + res = self.query_api('createNetworkACL', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + network_acl_rule = self.poll_job(res, 'networkacl') + + return network_acl_rule + + def _update_network_acl_rule(self, network_acl_rule): + protocol = self.module.params.get('protocol') + args = { + 'id': network_acl_rule['id'], + 'action': self.module.params.get('action_policy'), + 'protocol': protocol if protocol != 'by_number' else str(self.module.params.get('protocol_number')), + 'startport': self.module.params.get('start_port'), + 'endport': self.get_or_fallback('end_port', 'start_port'), + 'icmpcode': self.module.params.get('icmp_code'), + 'icmptype': self.module.params.get('icmp_type'), + 'traffictype': self.module.params.get('traffic_type'), + 'cidrlist': ",".join(self.module.params.get('cidrs')), + } + if self.has_changed(args, network_acl_rule): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateNetworkACLItem', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + network_acl_rule = self.poll_job(res, 'networkacl') + + return network_acl_rule + + def get_result(self, network_acl_rule): + super(AnsibleCloudStackNetworkAclRule, self).get_result(network_acl_rule) + if network_acl_rule: + if 'cidrlist' in network_acl_rule: + self.result['cidrs'] = network_acl_rule['cidrlist'].split(',') or [network_acl_rule['cidrlist']] + if network_acl_rule['protocol'] not in ['tcp', 'udp', 'icmp', 'all']: + self.result['protocol_number'] = int(network_acl_rule['protocol']) + self.result['protocol'] = 'by_number' + self.result['action_policy'] = self.result['action_policy'].lower() + self.result['traffic_type'] = self.result['traffic_type'].lower() + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + network_acl=dict(required=True, aliases=['acl']), + rule_position=dict(required=True, type='int', aliases=['number']), + vpc=dict(required=True), + cidrs=dict(type='list', default=['0.0.0.0/0'], aliases=['cidr']), + protocol=dict(choices=['tcp', 'udp', 'icmp', 'all', 'by_number'], default='tcp'), + protocol_number=dict(type='int'), + traffic_type=dict(choices=['ingress', 'egress'], aliases=['type'], default='ingress'), + action_policy=dict(choices=['allow', 'deny'], aliases=['action'], default='allow'), + icmp_type=dict(type='int'), + icmp_code=dict(type='int'), + start_port=dict(type='int', aliases=['port']), + end_port=dict(type='int'), + state=dict(choices=['present', 'absent'], default='present'), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + tags=dict(type='list', aliases=['tag']), + poll_async=dict(type='bool', default=True), + )) + + required_together = cs_required_together() + required_together.extend([ + ['icmp_type', 'icmp_code'], + ]) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive=( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ), + supports_check_mode=True + ) + + acs_network_acl_rule = AnsibleCloudStackNetworkAclRule(module) + + state = module.params.get('state') + if state == 'absent': + network_acl_rule = acs_network_acl_rule.absent_network_acl_rule() + else: + network_acl_rule = acs_network_acl_rule.present_network_acl_rule() + + result = acs_network_acl_rule.get_result(network_acl_rule) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_network_offering.py b/plugins/modules/cloud/cloudstack/cs_network_offering.py new file mode 100644 index 0000000000..b4384bbf19 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_network_offering.py @@ -0,0 +1,425 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, David Passante (@dpassante) +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_network_offering +short_description: Manages network offerings on Apache CloudStack based clouds. +description: + - Create, update, enable, disable and remove network offerings. +author: David Passante (@dpassante) +options: + state: + description: + - State of the network offering. + type: str + choices: [ enabled, present, disabled, absent] + default: present + display_text: + description: + - Display text of the network offerings. + type: str + guest_ip_type: + description: + - Guest type of the network offering. + type: str + choices: [ Shared, Isolated ] + name: + description: + - The name of the network offering. + type: str + required: true + supported_services: + description: + - Services supported by the network offering. + - A list of one or more items from the choice list. + type: list + choices: [ Dns, PortForwarding, Dhcp, SourceNat, UserData, Firewall, StaticNat, Vpn, Lb ] + aliases: [ supported_service ] + traffic_type: + description: + - The traffic type for the network offering. + type: str + default: Guest + availability: + description: + - The availability of network offering. Default value is Optional + type: str + conserve_mode: + description: + - Whether the network offering has IP conserve mode enabled. + type: bool + details: + description: + - Network offering details in key/value pairs. + - with service provider as a value + type: list + egress_default_policy: + description: + - Whether the default egress policy is allow or to deny. + type: str + choices: [ allow, deny ] + persistent: + description: + - True if network offering supports persistent networks + - defaulted to false if not specified + type: bool + keepalive_enabled: + description: + - If true keepalive will be turned on in the loadbalancer. + - At the time of writing this has only an effect on haproxy. + - the mode http and httpclose options are unset in the haproxy conf file. + type: bool + max_connections: + description: + - Maximum number of concurrent connections supported by the network offering. + type: int + network_rate: + description: + - Data transfer rate in megabits per second allowed. + type: int + service_capabilities: + description: + - Desired service capabilities as part of network offering. + type: list + aliases: [ service_capability ] + service_offering: + description: + - The service offering name or ID used by virtual router provider. + type: str + service_providers: + description: + - Provider to service mapping. + - If not specified, the provider for the service will be mapped to the default provider on the physical network. + type: list + aliases: [ service_provider ] + specify_ip_ranges: + description: + - Whether the network offering supports specifying IP ranges. + - Defaulted to C(no) by the API if not specified. + type: bool + specify_vlan: + description: + - Whether the network offering supports vlans or not. + type: bool + for_vpc: + description: + - Whether the offering is meant to be used for VPC or not. + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a network offering and enable it + cs_network_offering: + name: my_network_offering + display_text: network offering description + state: enabled + guest_ip_type: Isolated + supported_services: [ Dns, PortForwarding, Dhcp, SourceNat, UserData, Firewall, StaticNat, Vpn, Lb ] + service_providers: + - { service: 'dns', provider: 'virtualrouter' } + - { service: 'dhcp', provider: 'virtualrouter' } + delegate_to: localhost + + +- name: Remove a network offering + cs_network_offering: + name: my_network_offering + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the network offering. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: The name of the network offering. + returned: success + type: str + sample: MyCustomNetworkOffering +display_text: + description: The display text of the network offering. + returned: success + type: str + sample: My network offering +state: + description: The state of the network offering. + returned: success + type: str + sample: Enabled +guest_ip_type: + description: Guest type of the network offering. + returned: success + type: str + sample: Isolated +availability: + description: The availability of network offering. + returned: success + type: str + sample: Optional +service_offering_id: + description: The service offering ID. + returned: success + type: str + sample: c5f7a5fc-43f8-11e5-a151-feff819cdc9f +max_connections: + description: The maximum number of concurrent connections to be handled by LB. + returned: success + type: int + sample: 300 +network_rate: + description: The network traffic transfer ate in Mbit/s. + returned: success + type: int + sample: 200 +traffic_type: + description: The traffic type. + returned: success + type: str + sample: Guest +egress_default_policy: + description: Default egress policy. + returned: success + type: str + sample: allow +is_persistent: + description: Whether persistent networks are supported or not. + returned: success + type: bool + sample: false +is_default: + description: Whether network offering is the default offering or not. + returned: success + type: bool + sample: false +for_vpc: + description: Whether the offering is meant to be used for VPC or not. + returned: success + type: bool + sample: false + version_added: '2.8' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackNetworkOffering(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackNetworkOffering, self).__init__(module) + self.returns = { + 'guestiptype': 'guest_ip_type', + 'availability': 'availability', + 'serviceofferingid': 'service_offering_id', + 'networkrate': 'network_rate', + 'maxconnections': 'max_connections', + 'traffictype': 'traffic_type', + 'isdefault': 'is_default', + 'ispersistent': 'is_persistent', + 'forvpc': 'for_vpc' + } + self.network_offering = None + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + if not service_offering: + return None + + args = { + 'issystem': True + } + + service_offerings = self.query_api('listServiceOfferings', **args) + if service_offerings: + for s in service_offerings['serviceoffering']: + if service_offering in [s['name'], s['id']]: + return s['id'] + self.fail_json(msg="Service offering '%s' not found" % service_offering) + + def get_network_offering(self): + if self.network_offering: + return self.network_offering + + args = { + 'name': self.module.params.get('name'), + 'guestiptype': self.module.params.get('guest_type'), + } + no = self.query_api('listNetworkOfferings', **args) + if no: + self.network_offering = no['networkoffering'][0] + + return self.network_offering + + def create_or_update(self): + network_offering = self.get_network_offering() + + if not network_offering: + network_offering = self.create_network_offering() + + return self.update_network_offering(network_offering=network_offering) + + def create_network_offering(self): + network_offering = None + self.result['changed'] = True + + args = { + 'state': self.module.params.get('state'), + 'displaytext': self.module.params.get('display_text'), + 'guestiptype': self.module.params.get('guest_ip_type'), + 'name': self.module.params.get('name'), + 'supportedservices': self.module.params.get('supported_services'), + 'traffictype': self.module.params.get('traffic_type'), + 'availability': self.module.params.get('availability'), + 'conservemode': self.module.params.get('conserve_mode'), + 'details': self.module.params.get('details'), + 'egressdefaultpolicy': self.module.params.get('egress_default_policy') == 'allow', + 'ispersistent': self.module.params.get('persistent'), + 'keepaliveenabled': self.module.params.get('keepalive_enabled'), + 'maxconnections': self.module.params.get('max_connections'), + 'networkrate': self.module.params.get('network_rate'), + 'servicecapabilitylist': self.module.params.get('service_capabilities'), + 'serviceofferingid': self.get_service_offering_id(), + 'serviceproviderlist': self.module.params.get('service_providers'), + 'specifyipranges': self.module.params.get('specify_ip_ranges'), + 'specifyvlan': self.module.params.get('specify_vlan'), + 'forvpc': self.module.params.get('for_vpc'), + } + + required_params = [ + 'display_text', + 'guest_ip_type', + 'supported_services', + 'service_providers', + ] + + self.module.fail_on_missing_params(required_params=required_params) + + if not self.module.check_mode: + res = self.query_api('createNetworkOffering', **args) + network_offering = res['networkoffering'] + + return network_offering + + def delete_network_offering(self): + network_offering = self.get_network_offering() + + if network_offering: + self.result['changed'] = True + if not self.module.check_mode: + self.query_api('deleteNetworkOffering', id=network_offering['id']) + + return network_offering + + def update_network_offering(self, network_offering): + if not network_offering: + return network_offering + + args = { + 'id': network_offering['id'], + 'state': self.module.params.get('state'), + 'displaytext': self.module.params.get('display_text'), + 'name': self.module.params.get('name'), + 'availability': self.module.params.get('availability'), + 'maxconnections': self.module.params.get('max_connections'), + } + + if args['state'] in ['enabled', 'disabled']: + args['state'] = args['state'].title() + else: + del args['state'] + + if self.has_changed(args, network_offering): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updateNetworkOffering', **args) + network_offering = res['networkoffering'] + + return network_offering + + def get_result(self, network_offering): + super(AnsibleCloudStackNetworkOffering, self).get_result(network_offering) + if network_offering: + self.result['egress_default_policy'] = 'allow' if network_offering.get('egressdefaultpolicy') else 'deny' + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + state=dict(choices=['enabled', 'present', 'disabled', 'absent'], default='present'), + display_text=dict(), + guest_ip_type=dict(choices=['Shared', 'Isolated']), + name=dict(required=True), + supported_services=dict(type='list', aliases=['supported_service'], choices=[ + 'Dns', + 'PortForwarding', + 'Dhcp', + 'SourceNat', + 'UserData', + 'Firewall', + 'StaticNat', + 'Vpn', + 'Lb', + ]), + traffic_type=dict(default='Guest'), + availability=dict(), + conserve_mode=dict(type='bool'), + details=dict(type='list'), + egress_default_policy=dict(choices=['allow', 'deny']), + persistent=dict(type='bool'), + keepalive_enabled=dict(type='bool'), + max_connections=dict(type='int'), + network_rate=dict(type='int'), + service_capabilities=dict(type='list', aliases=['service_capability']), + service_offering=dict(), + service_providers=dict(type='list', aliases=['service_provider']), + specify_ip_ranges=dict(type='bool'), + specify_vlan=dict(type='bool'), + for_vpc=dict(type='bool'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_network_offering = AnsibleCloudStackNetworkOffering(module) + + state = module.params.get('state') + if state in ['absent']: + network_offering = acs_network_offering.delete_network_offering() + else: + network_offering = acs_network_offering.create_or_update() + + result = acs_network_offering.get_result(network_offering) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_physical_network.py b/plugins/modules/cloud/cloudstack/cs_physical_network.py new file mode 100644 index 0000000000..5a1e5b08d7 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_physical_network.py @@ -0,0 +1,483 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, Netservers Ltd. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_physical_network +short_description: Manages physical networks on Apache CloudStack based clouds. +description: + - Create, update and remove networks. + - Enabled and disabled Network Service Providers + - Enables Internal LoadBalancer and VPC/VirtualRouter elements as required +author: + - Netservers Ltd. (@netservers) + - Patryk Cichy (@PatTheSilent) +options: + name: + description: + - Name of the physical network. + required: true + aliases: + - physical_network + type: str + zone: + description: + - Name of the zone in which the network belongs. + - If not set, default zone is used. + type: str + broadcast_domain_range: + description: + - broadcast domain range for the physical network[Pod or Zone]. + choices: [ POD, ZONE ] + type: str + domain: + description: + - Domain the network is owned by. + type: str + isolation_method: + description: + - Isolation method for the physical network. + choices: [ VLAN, GRE, L3 ] + type: str + network_speed: + description: + - The speed for the physical network. + choices: [1G, 10G] + type: str + tags: + description: + - A tag to identify this network. + - Physical networks support only one tag. + - To remove an existing tag pass an empty string. + aliases: + - tag + type: str + vlan: + description: + - The VLAN/VNI Ranges of the physical network. + type: str + nsps_enabled: + description: + - List of Network Service Providers to enable. + type: list + nsps_disabled: + description: + - List of Network Service Providers to disable. + type: list + state: + description: + - State of the physical network. + default: present + type: str + choices: [ present, absent, disabled, enabled ] + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure a network is present + cs_physical_network: + name: net01 + zone: zone01 + isolation_method: VLAN + broadcast_domain_range: ZONE + delegate_to: localhost + +- name: Set a tag on a network + cs_physical_network: + name: net01 + tag: overlay + delegate_to: localhost + +- name: Remove tag on a network + cs_physical_network: + name: net01 + tag: "" + delegate_to: localhost + +- name: Ensure a network is enabled with specific nsps enabled + cs_physical_network: + name: net01 + zone: zone01 + isolation_method: VLAN + vlan: 100-200,300-400 + broadcast_domain_range: ZONE + state: enabled + nsps_enabled: + - virtualrouter + - internallbvm + - vpcvirtualrouter + delegate_to: localhost + +- name: Ensure a network is disabled + cs_physical_network: + name: net01 + zone: zone01 + state: disabled + delegate_to: localhost + +- name: Ensure a network is enabled + cs_physical_network: + name: net01 + zone: zone01 + state: enabled + delegate_to: localhost + +- name: Ensure a network is absent + cs_physical_network: + name: net01 + zone: zone01 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the network. + returned: success + type: str + sample: 3f8f25cd-c498-443f-9058-438cfbcbff50 +name: + description: Name of the network. + returned: success + type: str + sample: net01 +state: + description: State of the network [Enabled/Disabled]. + returned: success + type: str + sample: Enabled +broadcast_domain_range: + description: broadcastdomainrange of the network [POD / ZONE]. + returned: success + type: str + sample: ZONE +isolation_method: + description: isolationmethod of the network [VLAN/GRE/L3]. + returned: success + type: str + sample: VLAN +network_speed: + description: networkspeed of the network [1G/10G]. + returned: success + type: str + sample: 1G +zone: + description: Name of zone the physical network is in. + returned: success + type: str + sample: ch-gva-2 +domain: + description: Name of domain the network is in. + returned: success + type: str + sample: domain1 +nsps: + description: list of enabled or disabled Network Service Providers + type: complex + returned: on enabling/disabling of Network Service Providers + contains: + enabled: + description: list of Network Service Providers that were enabled + returned: on Network Service Provider enabling + type: list + sample: + - virtualrouter + disabled: + description: list of Network Service Providers that were disabled + returned: on Network Service Provider disabling + type: list + sample: + - internallbvm + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackPhysicalNetwork(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackPhysicalNetwork, self).__init__(module) + self.returns = { + 'isolationmethods': 'isolation_method', + 'broadcastdomainrange': 'broadcast_domain_range', + 'networkspeed': 'network_speed', + 'vlan': 'vlan', + 'tags': 'tags', + } + self.nsps = [] + self.vrouters = None + self.loadbalancers = None + + def _get_common_args(self): + args = { + 'name': self.module.params.get('name'), + 'isolationmethods': self.module.params.get('isolation_method'), + 'broadcastdomainrange': self.module.params.get('broadcast_domain_range'), + 'networkspeed': self.module.params.get('network_speed'), + 'tags': self.module.params.get('tags'), + 'vlan': self.module.params.get('vlan'), + } + + state = self.module.params.get('state') + if state in ['enabled', 'disabled']: + args['state'] = state.capitalize() + return args + + def get_physical_network(self, key=None): + physical_network = self.module.params.get('name') + if self.physical_network: + return self._get_by_key(key, self.physical_network) + + args = { + 'zoneid': self.get_zone(key='id') + } + physical_networks = self.query_api('listPhysicalNetworks', **args) + if physical_networks: + for net in physical_networks['physicalnetwork']: + if physical_network.lower() in [net['name'].lower(), net['id']]: + self.physical_network = net + self.result['physical_network'] = net['name'] + break + + return self._get_by_key(key, self.physical_network) + + def get_nsp(self, name=None): + if not self.nsps: + args = { + 'physicalnetworkid': self.get_physical_network(key='id') + } + res = self.query_api('listNetworkServiceProviders', **args) + + self.nsps = res['networkserviceprovider'] + + names = [] + for nsp in self.nsps: + names.append(nsp['name']) + if nsp['name'].lower() == name.lower(): + return nsp + + self.module.fail_json(msg="Failed: '{0}' not in network service providers list '[{1}]'".format(name, names)) + + def update_nsp(self, name=None, state=None, service_list=None): + nsp = self.get_nsp(name) + if not service_list and nsp['state'] == state: + return nsp + + args = { + 'id': nsp['id'], + 'servicelist': service_list, + 'state': state + } + if not self.module.check_mode: + res = self.query_api('updateNetworkServiceProvider', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + nsp = self.poll_job(res, 'networkserviceprovider') + + self.result['changed'] = True + return nsp + + def get_vrouter_element(self, nsp_name='virtualrouter'): + nsp = self.get_nsp(nsp_name) + nspid = nsp['id'] + if self.vrouters is None: + self.vrouters = dict() + res = self.query_api('listVirtualRouterElements', ) + for vrouter in res['virtualrouterelement']: + self.vrouters[vrouter['nspid']] = vrouter + + if nspid not in self.vrouters: + self.module.fail_json(msg="Failed: No VirtualRouterElement found for nsp '%s'" % nsp_name) + + return self.vrouters[nspid] + + def get_loadbalancer_element(self, nsp_name='internallbvm'): + nsp = self.get_nsp(nsp_name) + nspid = nsp['id'] + if self.loadbalancers is None: + self.loadbalancers = dict() + res = self.query_api('listInternalLoadBalancerElements', ) + for loadbalancer in res['internalloadbalancerelement']: + self.loadbalancers[loadbalancer['nspid']] = loadbalancer + + if nspid not in self.loadbalancers: + self.module.fail_json(msg="Failed: No Loadbalancer found for nsp '%s'" % nsp_name) + + return self.loadbalancers[nspid] + + def set_vrouter_element_state(self, enabled, nsp_name='virtualrouter'): + vrouter = self.get_vrouter_element(nsp_name) + if vrouter['enabled'] == enabled: + return vrouter + + args = { + 'id': vrouter['id'], + 'enabled': enabled + } + if not self.module.check_mode: + res = self.query_api('configureVirtualRouterElement', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + vrouter = self.poll_job(res, 'virtualrouterelement') + + self.result['changed'] = True + return vrouter + + def set_loadbalancer_element_state(self, enabled, nsp_name='internallbvm'): + loadbalancer = self.get_loadbalancer_element(nsp_name=nsp_name) + if loadbalancer['enabled'] == enabled: + return loadbalancer + + args = { + 'id': loadbalancer['id'], + 'enabled': enabled + } + if not self.module.check_mode: + res = self.query_api('configureInternalLoadBalancerElement', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + loadbalancer = self.poll_job(res, 'internalloadbalancerelement') + + self.result['changed'] = True + return loadbalancer + + def present_network(self): + network = self.get_physical_network() + if network: + network = self._update_network() + else: + network = self._create_network() + return network + + def _create_network(self): + self.result['changed'] = True + args = dict(zoneid=self.get_zone(key='id')) + args.update(self._get_common_args()) + if self.get_domain(key='id'): + args['domainid'] = self.get_domain(key='id') + + if not self.module.check_mode: + resource = self.query_api('createPhysicalNetwork', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.network = self.poll_job(resource, 'physicalnetwork') + + return self.network + + def _update_network(self): + network = self.get_physical_network() + + args = dict(id=network['id']) + args.update(self._get_common_args()) + + if self.has_changed(args, network): + self.result['changed'] = True + + if not self.module.check_mode: + resource = self.query_api('updatePhysicalNetwork', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.physical_network = self.poll_job(resource, 'physicalnetwork') + return self.physical_network + + def absent_network(self): + physical_network = self.get_physical_network() + if physical_network: + self.result['changed'] = True + args = { + 'id': physical_network['id'], + } + if not self.module.check_mode: + resource = self.query_api('deletePhysicalNetwork', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(resource, 'success') + + return physical_network + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['physical_network']), + zone=dict(), + domain=dict(), + vlan=dict(), + nsps_disabled=dict(type='list'), + nsps_enabled=dict(type='list'), + network_speed=dict(choices=['1G', '10G']), + broadcast_domain_range=dict(choices=['POD', 'ZONE']), + isolation_method=dict(choices=['VLAN', 'GRE', 'L3']), + state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), + tags=dict(aliases=['tag']), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_network = AnsibleCloudStackPhysicalNetwork(module) + state = module.params.get('state') + nsps_disabled = module.params.get('nsps_disabled', []) + nsps_enabled = module.params.get('nsps_enabled', []) + + if state in ['absent']: + network = acs_network.absent_network() + else: + network = acs_network.present_network() + if nsps_disabled is not None: + for name in nsps_disabled: + acs_network.update_nsp(name=name, state='Disabled') + + if nsps_enabled is not None: + for nsp_name in nsps_enabled: + if nsp_name.lower() in ['virtualrouter', 'vpcvirtualrouter']: + acs_network.set_vrouter_element_state(enabled=True, nsp_name=nsp_name) + elif nsp_name.lower() == 'internallbvm': + acs_network.set_loadbalancer_element_state(enabled=True, nsp_name=nsp_name) + + acs_network.update_nsp(name=nsp_name, state='Enabled') + + result = acs_network.get_result(network) + + if nsps_enabled: + result['nsps_enabled'] = nsps_enabled + if nsps_disabled: + result['nsps_disabled'] = nsps_disabled + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_pod.py b/plugins/modules/cloud/cloudstack/cs_pod.py new file mode 100644 index 0000000000..47b7b952f4 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_pod.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_pod +short_description: Manages pods on Apache CloudStack based clouds. +description: + - Create, update, delete pods. +author: René Moser (@resmo) +options: + name: + description: + - Name of the pod. + type: str + required: true + id: + description: + - uuid of the existing pod. + type: str + start_ip: + description: + - Starting IP address for the Pod. + - Required on I(state=present) + type: str + end_ip: + description: + - Ending IP address for the Pod. + type: str + netmask: + description: + - Netmask for the Pod. + - Required on I(state=present) + type: str + gateway: + description: + - Gateway for the Pod. + - Required on I(state=present) + type: str + zone: + description: + - Name of the zone in which the pod belongs to. + - If not set, default zone is used. + type: str + state: + description: + - State of the pod. + type: str + default: present + choices: [ present, enabled, disabled, absent ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure a pod is present + cs_pod: + name: pod1 + zone: ch-zrh-ix-01 + start_ip: 10.100.10.101 + gateway: 10.100.10.1 + netmask: 255.255.255.0 + delegate_to: localhost + +- name: Ensure a pod is disabled + cs_pod: + name: pod1 + zone: ch-zrh-ix-01 + state: disabled + delegate_to: localhost + +- name: Ensure a pod is enabled + cs_pod: + name: pod1 + zone: ch-zrh-ix-01 + state: enabled + delegate_to: localhost + +- name: Ensure a pod is absent + cs_pod: + name: pod1 + zone: ch-zrh-ix-01 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the pod. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the pod. + returned: success + type: str + sample: pod01 +start_ip: + description: Starting IP of the pod. + returned: success + type: str + sample: 10.100.1.101 +end_ip: + description: Ending IP of the pod. + returned: success + type: str + sample: 10.100.1.254 +netmask: + description: Netmask of the pod. + returned: success + type: str + sample: 255.255.255.0 +gateway: + description: Gateway of the pod. + returned: success + type: str + sample: 10.100.1.1 +allocation_state: + description: State of the pod. + returned: success + type: str + sample: Enabled +zone: + description: Name of zone the pod is in. + returned: success + type: str + sample: ch-gva-2 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackPod(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackPod, self).__init__(module) + self.returns = { + 'endip': 'end_ip', + 'startip': 'start_ip', + 'gateway': 'gateway', + 'netmask': 'netmask', + 'allocationstate': 'allocation_state', + } + self.pod = None + + def _get_common_pod_args(self): + args = { + 'name': self.module.params.get('name'), + 'zoneid': self.get_zone(key='id'), + 'startip': self.module.params.get('start_ip'), + 'endip': self.module.params.get('end_ip'), + 'netmask': self.module.params.get('netmask'), + 'gateway': self.module.params.get('gateway') + } + state = self.module.params.get('state') + if state in ['enabled', 'disabled']: + args['allocationstate'] = state.capitalize() + return args + + def get_pod(self): + if not self.pod: + args = { + 'zoneid': self.get_zone(key='id') + } + + uuid = self.module.params.get('id') + if uuid: + args['id'] = uuid + else: + args['name'] = self.module.params.get('name') + + pods = self.query_api('listPods', **args) + if pods: + for pod in pods['pod']: + if not args['name']: + self.pod = self._transform_ip_list(pod) + break + elif args['name'] == pod['name']: + self.pod = self._transform_ip_list(pod) + break + return self.pod + + def present_pod(self): + pod = self.get_pod() + if pod: + pod = self._update_pod() + else: + pod = self._create_pod() + return pod + + def _create_pod(self): + required_params = [ + 'start_ip', + 'netmask', + 'gateway', + ] + self.module.fail_on_missing_params(required_params=required_params) + + pod = None + self.result['changed'] = True + args = self._get_common_pod_args() + if not self.module.check_mode: + res = self.query_api('createPod', **args) + pod = res['pod'] + return pod + + def _update_pod(self): + pod = self.get_pod() + args = self._get_common_pod_args() + args['id'] = pod['id'] + + if self.has_changed(args, pod): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updatePod', **args) + pod = res['pod'] + return pod + + def absent_pod(self): + pod = self.get_pod() + if pod: + self.result['changed'] = True + + args = { + 'id': pod['id'] + } + if not self.module.check_mode: + self.query_api('deletePod', **args) + return pod + + def _transform_ip_list(self, resource): + """ Workaround for 4.11 return API break """ + keys = ['endip', 'startip'] + if resource: + for key in keys: + if key in resource and isinstance(resource[key], list): + resource[key] = resource[key][0] + return resource + + def get_result(self, pod): + pod = self._transform_ip_list(pod) + super(AnsibleCloudStackPod, self).get_result(pod) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + id=dict(), + name=dict(required=True), + gateway=dict(), + netmask=dict(), + start_ip=dict(), + end_ip=dict(), + zone=dict(), + state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_pod = AnsibleCloudStackPod(module) + state = module.params.get('state') + if state in ['absent']: + pod = acs_pod.absent_pod() + else: + pod = acs_pod.present_pod() + + result = acs_pod.get_result(pod) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_portforward.py b/plugins/modules/cloud/cloudstack/cs_portforward.py new file mode 100644 index 0000000000..bb78f43c58 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_portforward.py @@ -0,0 +1,396 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_portforward +short_description: Manages port forwarding rules on Apache CloudStack based clouds. +description: + - Create, update and remove port forwarding rules. +author: René Moser (@resmo) +options: + ip_address: + description: + - Public IP address the rule is assigned to. + type: str + required: true + vm: + description: + - Name of virtual machine which we make the port forwarding rule for. + - Required if I(state=present). + type: str + state: + description: + - State of the port forwarding rule. + type: str + default: present + choices: [ present, absent ] + protocol: + description: + - Protocol of the port forwarding rule. + type: str + default: tcp + choices: [ tcp, udp ] + public_port: + description: + - Start public port for this rule. + type: int + required: true + public_end_port: + description: + - End public port for this rule. + - If not specified equal I(public_port). + type: int + private_port: + description: + - Start private port for this rule. + type: int + required: true + private_end_port: + description: + - End private port for this rule. + - If not specified equal I(private_port). + type: int + open_firewall: + description: + - Whether the firewall rule for public port should be created, while creating the new rule. + - Use M(cs_firewall) for managing firewall rules. + default: no + type: bool + vm_guest_ip: + description: + - VM guest NIC secondary IP address for the port forwarding rule. + type: str + network: + description: + - Name of the network. + type: str + vpc: + description: + - Name of the VPC. + type: str + domain: + description: + - Domain the I(vm) is related to. + type: str + account: + description: + - Account the I(vm) is related to. + type: str + project: + description: + - Name of the project the I(vm) is located in. + type: str + zone: + description: + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: 1.2.3.4:80 -> web01:8080 + cs_portforward: + ip_address: 1.2.3.4 + vm: web01 + public_port: 80 + private_port: 8080 + delegate_to: localhost + +- name: forward SSH and open firewall + cs_portforward: + ip_address: '{{ public_ip }}' + vm: '{{ inventory_hostname }}' + public_port: '{{ ansible_ssh_port }}' + private_port: 22 + open_firewall: true + delegate_to: localhost + +- name: forward DNS traffic, but do not open firewall + cs_portforward: + ip_address: 1.2.3.4 + vm: '{{ inventory_hostname }}' + public_port: 53 + private_port: 53 + protocol: udp + delegate_to: localhost + +- name: remove ssh port forwarding + cs_portforward: + ip_address: 1.2.3.4 + public_port: 22 + private_port: 22 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the public IP address. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +ip_address: + description: Public IP address. + returned: success + type: str + sample: 1.2.3.4 +protocol: + description: Protocol. + returned: success + type: str + sample: tcp +private_port: + description: Start port on the virtual machine's IP address. + returned: success + type: int + sample: 80 +private_end_port: + description: End port on the virtual machine's IP address. + returned: success + type: int + sample: 80 +public_port: + description: Start port on the public IP address. + returned: success + type: int + sample: 80 +public_end_port: + description: End port on the public IP address. + returned: success + type: int + sample: 80 +tags: + description: Tags related to the port forwarding. + returned: success + type: list + sample: [] +vm_name: + description: Name of the virtual machine. + returned: success + type: str + sample: web-01 +vm_display_name: + description: Display name of the virtual machine. + returned: success + type: str + sample: web-01 +vm_guest_ip: + description: IP of the virtual machine. + returned: success + type: str + sample: 10.101.65.152 +vpc: + description: Name of the VPC. + returned: success + type: str + sample: my_vpc +network: + description: Name of the network. + returned: success + type: str + sample: dmz +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together + + +class AnsibleCloudStackPortforwarding(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackPortforwarding, self).__init__(module) + self.returns = { + 'virtualmachinedisplayname': 'vm_display_name', + 'virtualmachinename': 'vm_name', + 'ipaddress': 'ip_address', + 'vmguestip': 'vm_guest_ip', + 'publicip': 'public_ip', + 'protocol': 'protocol', + } + # these values will be casted to int + self.returns_to_int = { + 'publicport': 'public_port', + 'publicendport': 'public_end_port', + 'privateport': 'private_port', + 'privateendport': 'private_end_port', + } + self.portforwarding_rule = None + + def get_portforwarding_rule(self): + if not self.portforwarding_rule: + protocol = self.module.params.get('protocol') + public_port = self.module.params.get('public_port') + + args = { + 'ipaddressid': self.get_ip_address(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + } + portforwarding_rules = self.query_api('listPortForwardingRules', **args) + + if portforwarding_rules and 'portforwardingrule' in portforwarding_rules: + for rule in portforwarding_rules['portforwardingrule']: + if (protocol == rule['protocol'] and + public_port == int(rule['publicport'])): + self.portforwarding_rule = rule + break + return self.portforwarding_rule + + def present_portforwarding_rule(self): + portforwarding_rule = self.get_portforwarding_rule() + if portforwarding_rule: + portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule) + else: + portforwarding_rule = self.create_portforwarding_rule() + + if portforwarding_rule: + portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule') + self.portforwarding_rule = portforwarding_rule + + return portforwarding_rule + + def create_portforwarding_rule(self): + args = { + 'protocol': self.module.params.get('protocol'), + 'publicport': self.module.params.get('public_port'), + 'publicendport': self.get_or_fallback('public_end_port', 'public_port'), + 'privateport': self.module.params.get('private_port'), + 'privateendport': self.get_or_fallback('private_end_port', 'private_port'), + 'openfirewall': self.module.params.get('open_firewall'), + 'vmguestip': self.get_vm_guest_ip(), + 'ipaddressid': self.get_ip_address(key='id'), + 'virtualmachineid': self.get_vm(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'networkid': self.get_network(key='id'), + } + + portforwarding_rule = None + self.result['changed'] = True + if not self.module.check_mode: + portforwarding_rule = self.query_api('createPortForwardingRule', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule') + return portforwarding_rule + + def update_portforwarding_rule(self, portforwarding_rule): + args = { + 'protocol': self.module.params.get('protocol'), + 'publicport': self.module.params.get('public_port'), + 'publicendport': self.get_or_fallback('public_end_port', 'public_port'), + 'privateport': self.module.params.get('private_port'), + 'privateendport': self.get_or_fallback('private_end_port', 'private_port'), + 'vmguestip': self.get_vm_guest_ip(), + 'ipaddressid': self.get_ip_address(key='id'), + 'virtualmachineid': self.get_vm(key='id'), + 'networkid': self.get_network(key='id'), + } + + if self.has_changed(args, portforwarding_rule): + self.result['changed'] = True + if not self.module.check_mode: + # API broken in 4.2.1?, workaround using remove/create instead of update + # portforwarding_rule = self.query_api('updatePortForwardingRule', **args) + self.absent_portforwarding_rule() + portforwarding_rule = self.query_api('createPortForwardingRule', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule') + return portforwarding_rule + + def absent_portforwarding_rule(self): + portforwarding_rule = self.get_portforwarding_rule() + + if portforwarding_rule: + self.result['changed'] = True + args = { + 'id': portforwarding_rule['id'], + } + if not self.module.check_mode: + res = self.query_api('deletePortForwardingRule', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'portforwardingrule') + return portforwarding_rule + + def get_result(self, portforwarding_rule): + super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule) + if portforwarding_rule: + for search_key, return_key in self.returns_to_int.items(): + if search_key in portforwarding_rule: + self.result[return_key] = int(portforwarding_rule[search_key]) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address=dict(required=True), + protocol=dict(choices=['tcp', 'udp'], default='tcp'), + public_port=dict(type='int', required=True), + public_end_port=dict(type='int'), + private_port=dict(type='int', required=True), + private_end_port=dict(type='int'), + state=dict(choices=['present', 'absent'], default='present'), + open_firewall=dict(type='bool', default=False), + vm_guest_ip=dict(), + vm=dict(), + vpc=dict(), + network=dict(), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_pf = AnsibleCloudStackPortforwarding(module) + state = module.params.get('state') + if state in ['absent']: + pf_rule = acs_pf.absent_portforwarding_rule() + else: + pf_rule = acs_pf.present_portforwarding_rule() + + result = acs_pf.get_result(pf_rule) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_project.py b/plugins/modules/cloud/cloudstack/cs_project.py new file mode 100644 index 0000000000..4edb4b0993 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_project.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_project +short_description: Manages projects on Apache CloudStack based clouds. +description: + - Create, update, suspend, activate and remove projects. +author: René Moser (@resmo) +options: + name: + description: + - Name of the project. + type: str + required: true + display_text: + description: + - Display text of the project. + - If not specified, I(name) will be used as I(display_text). + type: str + state: + description: + - State of the project. + type: str + default: present + choices: [ present, absent, active, suspended ] + domain: + description: + - Domain the project is related to. + type: str + account: + description: + - Account the project is related to. + type: str + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "If you want to delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a project + cs_project: + name: web + tags: + - { key: admin, value: john } + - { key: foo, value: bar } + delegate_to: localhost + +- name: Rename a project + cs_project: + name: web + display_text: my web project + delegate_to: localhost + +- name: Suspend an existing project + cs_project: + name: web + state: suspended + delegate_to: localhost + +- name: Activate an existing project + cs_project: + name: web + state: active + delegate_to: localhost + +- name: Remove a project + cs_project: + name: web + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the project. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the project. + returned: success + type: str + sample: web project +display_text: + description: Display text of the project. + returned: success + type: str + sample: web project +state: + description: State of the project. + returned: success + type: str + sample: Active +domain: + description: Domain the project is related to. + returned: success + type: str + sample: example domain +account: + description: Account the project is related to. + returned: success + type: str + sample: example account +tags: + description: List of resource tags associated with the project. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackProject(AnsibleCloudStack): + + def get_project(self): + if not self.project: + project = self.module.params.get('name') + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'fetch_list': True, + } + projects = self.query_api('listProjects', **args) + if projects: + for p in projects: + if project.lower() in [p['name'].lower(), p['id']]: + self.project = p + break + return self.project + + def present_project(self): + project = self.get_project() + if not project: + project = self.create_project(project) + else: + project = self.update_project(project) + if project: + project = self.ensure_tags(resource=project, resource_type='project') + # refresh resource + self.project = project + return project + + def update_project(self, project): + args = { + 'id': project['id'], + 'displaytext': self.get_or_fallback('display_text', 'name') + } + if self.has_changed(args, project): + self.result['changed'] = True + if not self.module.check_mode: + project = self.query_api('updateProject', **args) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self.poll_job(project, 'project') + return project + + def create_project(self, project): + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'account': self.get_account('name'), + 'domainid': self.get_domain('id') + } + if not self.module.check_mode: + project = self.query_api('createProject', **args) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self.poll_job(project, 'project') + return project + + def state_project(self, state='active'): + project = self.present_project() + + if project['state'].lower() != state: + self.result['changed'] = True + + args = { + 'id': project['id'] + } + if not self.module.check_mode: + if state == 'suspended': + project = self.query_api('suspendProject', **args) + else: + project = self.query_api('activateProject', **args) + + poll_async = self.module.params.get('poll_async') + if project and poll_async: + project = self.poll_job(project, 'project') + return project + + def absent_project(self): + project = self.get_project() + if project: + self.result['changed'] = True + + args = { + 'id': project['id'] + } + if not self.module.check_mode: + res = self.query_api('deleteProject', **args) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self.poll_job(res, 'project') + return project + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + display_text=dict(), + state=dict(choices=['present', 'absent', 'active', 'suspended'], default='present'), + domain=dict(), + account=dict(), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_project = AnsibleCloudStackProject(module) + + state = module.params.get('state') + if state in ['absent']: + project = acs_project.absent_project() + + elif state in ['active', 'suspended']: + project = acs_project.state_project(state=state) + + else: + project = acs_project.present_project() + + result = acs_project.get_result(project) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_region.py b/plugins/modules/cloud/cloudstack/cs_region.py new file mode 100644 index 0000000000..12eae04e26 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_region.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_region +short_description: Manages regions on Apache CloudStack based clouds. +description: + - Add, update and remove regions. +author: René Moser (@resmo) +options: + id: + description: + - ID of the region. + - Must be an number (int). + type: int + required: true + name: + description: + - Name of the region. + - Required if I(state=present) + type: str + endpoint: + description: + - Endpoint URL of the region. + - Required if I(state=present) + type: str + state: + description: + - State of the region. + type: str + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create a region + cs_region: + id: 2 + name: geneva + endpoint: https://cloud.gva.example.com + delegate_to: localhost + +- name: remove a region with ID 2 + cs_region: + id: 2 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: ID of the region. + returned: success + type: int + sample: 1 +name: + description: Name of the region. + returned: success + type: str + sample: local +endpoint: + description: Endpoint of the region. + returned: success + type: str + sample: http://cloud.example.com +gslb_service_enabled: + description: Whether the GSLB service is enabled or not. + returned: success + type: bool + sample: true +portable_ip_service_enabled: + description: Whether the portable IP service is enabled or not. + returned: success + type: bool + sample: true +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackRegion(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackRegion, self).__init__(module) + self.returns = { + 'endpoint': 'endpoint', + 'gslbserviceenabled': 'gslb_service_enabled', + 'portableipserviceenabled': 'portable_ip_service_enabled', + } + + def get_region(self): + id = self.module.params.get('id') + regions = self.query_api('listRegions', id=id) + if regions: + return regions['region'][0] + return None + + def present_region(self): + region = self.get_region() + if not region: + region = self._create_region(region=region) + else: + region = self._update_region(region=region) + return region + + def _create_region(self, region): + self.result['changed'] = True + args = { + 'id': self.module.params.get('id'), + 'name': self.module.params.get('name'), + 'endpoint': self.module.params.get('endpoint') + } + if not self.module.check_mode: + res = self.query_api('addRegion', **args) + region = res['region'] + return region + + def _update_region(self, region): + args = { + 'id': self.module.params.get('id'), + 'name': self.module.params.get('name'), + 'endpoint': self.module.params.get('endpoint') + } + if self.has_changed(args, region): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateRegion', **args) + region = res['region'] + return region + + def absent_region(self): + region = self.get_region() + if region: + self.result['changed'] = True + if not self.module.check_mode: + self.query_api('removeRegion', id=region['id']) + return region + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + id=dict(required=True, type='int'), + name=dict(), + endpoint=dict(), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_if=[ + ('state', 'present', ['name', 'endpoint']), + ], + supports_check_mode=True + ) + + acs_region = AnsibleCloudStackRegion(module) + + state = module.params.get('state') + if state == 'absent': + region = acs_region.absent_region() + else: + region = acs_region.present_region() + + result = acs_region.get_result(region) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_resourcelimit.py b/plugins/modules/cloud/cloudstack/cs_resourcelimit.py new file mode 100644 index 0000000000..0f183fe2e2 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_resourcelimit.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_resourcelimit +short_description: Manages resource limits on Apache CloudStack based clouds. +description: + - Manage limits of resources for domains, accounts and projects. +author: René Moser (@resmo) +options: + resource_type: + description: + - Type of the resource. + type: str + required: true + choices: + - instance + - ip_address + - volume + - snapshot + - template + - network + - vpc + - cpu + - memory + - primary_storage + - secondary_storage + aliases: [ type ] + limit: + description: + - Maximum number of the resource. + - Default is unlimited C(-1). + type: int + default: -1 + aliases: [ max ] + domain: + description: + - Domain the resource is related to. + type: str + account: + description: + - Account the resource is related to. + type: str + project: + description: + - Name of the project the resource is related to. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Update a resource limit for instances of a domain + cs_resourcelimit: + type: instance + limit: 10 + domain: customers + delegate_to: localhost + +- name: Update a resource limit for instances of an account + cs_resourcelimit: + type: instance + limit: 12 + account: moserre + domain: customers + delegate_to: localhost +''' + +RETURN = ''' +--- +recource_type: + description: Type of the resource + returned: success + type: str + sample: instance +limit: + description: Maximum number of the resource. + returned: success + type: int + sample: -1 +domain: + description: Domain the resource is related to. + returned: success + type: str + sample: example domain +account: + description: Account the resource is related to. + returned: success + type: str + sample: example account +project: + description: Project the resource is related to. + returned: success + type: str + sample: example project +''' + +# import cloudstack common +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_required_together, + cs_argument_spec +) + + +RESOURCE_TYPES = { + 'instance': 0, + 'ip_address': 1, + 'volume': 2, + 'snapshot': 3, + 'template': 4, + 'network': 6, + 'vpc': 7, + 'cpu': 8, + 'memory': 9, + 'primary_storage': 10, + 'secondary_storage': 11, +} + + +class AnsibleCloudStackResourceLimit(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackResourceLimit, self).__init__(module) + self.returns = { + 'max': 'limit', + } + + def get_resource_type(self): + resource_type = self.module.params.get('resource_type') + return RESOURCE_TYPES.get(resource_type) + + def get_resource_limit(self): + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'resourcetype': self.get_resource_type() + } + resource_limit = self.query_api('listResourceLimits', **args) + if resource_limit: + if 'limit' in resource_limit['resourcelimit'][0]: + resource_limit['resourcelimit'][0]['limit'] = int(resource_limit['resourcelimit'][0]) + return resource_limit['resourcelimit'][0] + self.module.fail_json(msg="Resource limit type '%s' not found." % self.module.params.get('resource_type')) + + def update_resource_limit(self): + resource_limit = self.get_resource_limit() + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'resourcetype': self.get_resource_type(), + 'max': self.module.params.get('limit', -1) + } + + if self.has_changed(args, resource_limit): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateResourceLimit', **args) + resource_limit = res['resourcelimit'] + return resource_limit + + def get_result(self, resource_limit): + self.result = super(AnsibleCloudStackResourceLimit, self).get_result(resource_limit) + self.result['resource_type'] = self.module.params.get('resource_type') + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + resource_type=dict(required=True, choices=RESOURCE_TYPES.keys(), aliases=['type']), + limit=dict(default=-1, aliases=['max'], type='int'), + domain=dict(), + account=dict(), + project=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_resource_limit = AnsibleCloudStackResourceLimit(module) + resource_limit = acs_resource_limit.update_resource_limit() + result = acs_resource_limit.get_result(resource_limit) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_role.py b/plugins/modules/cloud/cloudstack/cs_role.py new file mode 100644 index 0000000000..ab3a3950e6 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_role.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_role +short_description: Manages user roles on Apache CloudStack based clouds. +description: + - Create, update, delete user roles. +author: René Moser (@resmo) +options: + name: + description: + - Name of the role. + type: str + required: true + uuid: + description: + - ID of the role. + - If provided, I(uuid) is used as key. + type: str + aliases: [ id ] + role_type: + description: + - Type of the role. + - Only considered for creation. + type: str + default: User + choices: [ User, DomainAdmin, ResourceAdmin, Admin ] + description: + description: + - Description of the role. + type: str + state: + description: + - State of the role. + type: str + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure an user role is present + cs_role: + name: myrole_user + delegate_to: localhost + +- name: Ensure a role having particular ID is named as myrole_user + cs_role: + name: myrole_user + id: 04589590-ac63-4ffc-93f5-b698b8ac38b6 + delegate_to: localhost + +- name: Ensure a role is absent + cs_role: + name: myrole_user + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the role. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the role. + returned: success + type: str + sample: myrole +description: + description: Description of the role. + returned: success + type: str + sample: "This is my role description" +role_type: + description: Type of the role. + returned: success + type: str + sample: User +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackRole(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackRole, self).__init__(module) + self.returns = { + 'type': 'role_type', + } + + def get_role(self): + uuid = self.module.params.get('uuid') + if uuid: + args = { + 'id': uuid, + } + roles = self.query_api('listRoles', **args) + if roles: + return roles['role'][0] + else: + args = { + 'name': self.module.params.get('name'), + } + roles = self.query_api('listRoles', **args) + if roles: + return roles['role'][0] + return None + + def present_role(self): + role = self.get_role() + if role: + role = self._update_role(role) + else: + role = self._create_role(role) + return role + + def _create_role(self, role): + self.result['changed'] = True + args = { + 'name': self.module.params.get('name'), + 'type': self.module.params.get('role_type'), + 'description': self.module.params.get('description'), + } + if not self.module.check_mode: + res = self.query_api('createRole', **args) + role = res['role'] + return role + + def _update_role(self, role): + args = { + 'id': role['id'], + 'name': self.module.params.get('name'), + 'description': self.module.params.get('description'), + } + if self.has_changed(args, role): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateRole', **args) + + # The API as in 4.9 does not return an updated role yet + if 'role' not in res: + role = self.get_role() + else: + role = res['role'] + return role + + def absent_role(self): + role = self.get_role() + if role: + self.result['changed'] = True + args = { + 'id': role['id'], + } + if not self.module.check_mode: + self.query_api('deleteRole', **args) + return role + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + uuid=dict(aliases=['id']), + name=dict(required=True), + description=dict(), + role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_role = AnsibleCloudStackRole(module) + state = module.params.get('state') + if state == 'absent': + role = acs_role.absent_role() + else: + role = acs_role.present_role() + + result = acs_role.get_result(role) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_role_permission.py b/plugins/modules/cloud/cloudstack/cs_role_permission.py new file mode 100644 index 0000000000..98e83c7024 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_role_permission.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, David Passante (@dpassante) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_role_permission +short_description: Manages role permissions on Apache CloudStack based clouds. +description: + - Create, update and remove CloudStack role permissions. + - Managing role permissions only supported in CloudStack >= 4.9. +author: David Passante (@dpassante) +options: + name: + description: + - The API name of the permission. + type: str + required: true + role: + description: + - Name or ID of the role. + type: str + required: true + permission: + description: + - The rule permission, allow or deny. Defaulted to deny. + type: str + choices: [ allow, deny ] + default: deny + state: + description: + - State of the role permission. + type: str + choices: [ present, absent ] + default: present + description: + description: + - The description of the role permission. + type: str + parent: + description: + - The parent role permission uuid. use 0 to move this rule at the top of the list. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a role permission + cs_role_permission: + role: My_Custom_role + name: createVPC + permission: allow + description: My comments + delegate_to: localhost + +- name: Remove a role permission + cs_role_permission: + state: absent + role: My_Custom_role + name: createVPC + delegate_to: localhost + +- name: Update a system role permission + cs_role_permission: + role: Domain Admin + name: createVPC + permission: deny + delegate_to: localhost + +- name: Update rules order. Move the rule at the top of list + cs_role_permission: + role: Domain Admin + name: createVPC + parent: 0 + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: The ID of the role permission. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: The API name of the permission. + returned: success + type: str + sample: createVPC +permission: + description: The permission type of the api name. + returned: success + type: str + sample: allow +role_id: + description: The ID of the role to which the role permission belongs. + returned: success + type: str + sample: c6f7a5fc-43f8-11e5-a151-feff819cdc7f +description: + description: The description of the role permission + returned: success + type: str + sample: Deny createVPC for users +''' + +from distutils.version import LooseVersion + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackRolePermission(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackRolePermission, self).__init__(module) + cloudstack_min_version = LooseVersion('4.9.2') + + self.returns = { + 'id': 'id', + 'roleid': 'role_id', + 'rule': 'name', + 'permission': 'permission', + 'description': 'description', + } + self.role_permission = None + + self.cloudstack_version = self._cloudstack_ver() + + if self.cloudstack_version < cloudstack_min_version: + self.fail_json(msg="This module requires CloudStack >= %s." % cloudstack_min_version) + + def _cloudstack_ver(self): + capabilities = self.get_capabilities() + return LooseVersion(capabilities['cloudstackversion']) + + def _get_role_id(self): + role = self.module.params.get('role') + if not role: + return None + + res = self.query_api('listRoles') + roles = res['role'] + if roles: + for r in roles: + if role in [r['name'], r['id']]: + return r['id'] + self.fail_json(msg="Role '%s' not found" % role) + + def _get_role_perm(self): + role_permission = self.role_permission + + args = { + 'roleid': self._get_role_id(), + } + + rp = self.query_api('listRolePermissions', **args) + + if rp: + role_permission = rp['rolepermission'] + + return role_permission + + def _get_rule(self, rule=None): + if not rule: + rule = self.module.params.get('name') + + if self._get_role_perm(): + for _rule in self._get_role_perm(): + if rule == _rule['rule'] or rule == _rule['id']: + return _rule + + return None + + def _get_rule_order(self): + perms = self._get_role_perm() + rules = [] + + if perms: + for i, rule in enumerate(perms): + rules.append(rule['id']) + + return rules + + def replace_rule(self): + old_rule = self._get_rule() + + if old_rule: + rules_order = self._get_rule_order() + old_pos = rules_order.index(old_rule['id']) + + self.remove_role_perm() + + new_rule = self.create_role_perm() + + if new_rule: + perm_order = self.order_permissions(int(old_pos - 1), new_rule['id']) + + return perm_order + + return None + + def order_permissions(self, parent, rule_id): + rules = self._get_rule_order() + + if isinstance(parent, int): + parent_pos = parent + elif parent == '0': + parent_pos = -1 + else: + parent_rule = self._get_rule(parent) + if not parent_rule: + self.fail_json(msg="Parent rule '%s' not found" % parent) + + parent_pos = rules.index(parent_rule['id']) + + r_id = rules.pop(rules.index(rule_id)) + + rules.insert((parent_pos + 1), r_id) + rules = ','.join(map(str, rules)) + + return rules + + def create_or_update_role_perm(self): + role_permission = self._get_rule() + + if not role_permission: + role_permission = self.create_role_perm() + else: + role_permission = self.update_role_perm(role_permission) + + return role_permission + + def create_role_perm(self): + role_permission = None + + self.result['changed'] = True + + args = { + 'rule': self.module.params.get('name'), + 'description': self.module.params.get('description'), + 'roleid': self._get_role_id(), + 'permission': self.module.params.get('permission'), + } + + if not self.module.check_mode: + res = self.query_api('createRolePermission', **args) + role_permission = res['rolepermission'] + + return role_permission + + def update_role_perm(self, role_perm): + perm_order = None + + if not self.module.params.get('parent'): + args = { + 'ruleid': role_perm['id'], + 'roleid': role_perm['roleid'], + 'permission': self.module.params.get('permission'), + } + + if self.has_changed(args, role_perm, only_keys=['permission']): + self.result['changed'] = True + + if not self.module.check_mode: + if self.cloudstack_version >= LooseVersion('4.11.0'): + self.query_api('updateRolePermission', **args) + role_perm = self._get_rule() + else: + perm_order = self.replace_rule() + else: + perm_order = self.order_permissions(self.module.params.get('parent'), role_perm['id']) + + if perm_order: + args = { + 'roleid': role_perm['roleid'], + 'ruleorder': perm_order, + } + + self.result['changed'] = True + + if not self.module.check_mode: + self.query_api('updateRolePermission', **args) + role_perm = self._get_rule() + + return role_perm + + def remove_role_perm(self): + role_permission = self._get_rule() + + if role_permission: + self.result['changed'] = True + + args = { + 'id': role_permission['id'], + } + + if not self.module.check_mode: + self.query_api('deleteRolePermission', **args) + + return role_permission + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + role=dict(required=True), + name=dict(required=True), + permission=dict(choices=['allow', 'deny'], default='deny'), + description=dict(), + state=dict(choices=['present', 'absent'], default='present'), + parent=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive=( + ['permission', 'parent'], + ), + supports_check_mode=True + ) + + acs_role_perm = AnsibleCloudStackRolePermission(module) + + state = module.params.get('state') + if state in ['absent']: + role_permission = acs_role_perm.remove_role_perm() + else: + role_permission = acs_role_perm.create_or_update_role_perm() + + result = acs_role_perm.get_result(role_permission) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_router.py b/plugins/modules/cloud/cloudstack/cs_router.py new file mode 100644 index 0000000000..af621542bf --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_router.py @@ -0,0 +1,377 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_router +short_description: Manages routers on Apache CloudStack based clouds. +description: + - Start, restart, stop and destroy routers. + - I(state=present) is not able to create routers, use M(cs_network) instead. +author: René Moser (@resmo) +options: + name: + description: + - Name of the router. + type: str + required: true + service_offering: + description: + - Name or id of the service offering of the router. + type: str + domain: + description: + - Domain the router is related to. + type: str + account: + description: + - Account the router is related to. + type: str + project: + description: + - Name of the project the router is related to. + type: str + zone: + description: + - Name of the zone the router is deployed in. + - If not set, all zones are used. + type: str + state: + description: + - State of the router. + type: str + default: present + choices: [ present, absent, started, stopped, restarted ] + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +# Ensure the router has the desired service offering, no matter if +# the router is running or not. +- name: Present router + cs_router: + name: r-40-VM + service_offering: System Offering for Software Router + delegate_to: localhost + +- name: Ensure started + cs_router: + name: r-40-VM + state: started + delegate_to: localhost + +# Ensure started with desired service offering. +# If the service offerings changes, router will be rebooted. +- name: Ensure started with desired service offering + cs_router: + name: r-40-VM + service_offering: System Offering for Software Router + state: started + delegate_to: localhost + +- name: Ensure stopped + cs_router: + name: r-40-VM + state: stopped + delegate_to: localhost + +- name: Remove a router + cs_router: + name: r-40-VM + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the router. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the router. + returned: success + type: str + sample: r-40-VM +created: + description: Date of the router was created. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 +template_version: + description: Version of the system VM template. + returned: success + type: str + sample: 4.5.1 +requires_upgrade: + description: Whether the router needs to be upgraded to the new template. + returned: success + type: bool + sample: false +redundant_state: + description: Redundant state of the router. + returned: success + type: str + sample: UNKNOWN +role: + description: Role of the router. + returned: success + type: str + sample: VIRTUAL_ROUTER +zone: + description: Name of zone the router is in. + returned: success + type: str + sample: ch-gva-2 +service_offering: + description: Name of the service offering the router has. + returned: success + type: str + sample: System Offering For Software Router +state: + description: State of the router. + returned: success + type: str + sample: Active +domain: + description: Domain the router is related to. + returned: success + type: str + sample: ROOT +account: + description: Account the router is related to. + returned: success + type: str + sample: admin +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackRouter(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackRouter, self).__init__(module) + self.returns = { + 'serviceofferingname': 'service_offering', + 'version': 'template_version', + 'requiresupgrade': 'requires_upgrade', + 'redundantstate': 'redundant_state', + 'role': 'role' + } + self.router = None + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + if not service_offering: + return None + + args = { + 'issystem': True + } + + service_offerings = self.query_api('listServiceOfferings', **args) + if service_offerings: + for s in service_offerings['serviceoffering']: + if service_offering in [s['name'], s['id']]: + return s['id'] + self.module.fail_json(msg="Service offering '%s' not found" % service_offering) + + def get_router(self): + if not self.router: + router = self.module.params.get('name') + + args = { + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'listall': True, + 'fetch_list': True, + } + + if self.module.params.get('zone'): + args['zoneid'] = self.get_zone(key='id') + + routers = self.query_api('listRouters', **args) + if routers: + for r in routers: + if router.lower() in [r['name'].lower(), r['id']]: + self.router = r + break + return self.router + + def start_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router not found") + + if router['state'].lower() != "running": + self.result['changed'] = True + + args = { + 'id': router['id'], + } + + if not self.module.check_mode: + res = self.query_api('startRouter', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + router = self.poll_job(res, 'router') + return router + + def stop_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router not found") + + if router['state'].lower() != "stopped": + self.result['changed'] = True + + args = { + 'id': router['id'], + } + + if not self.module.check_mode: + res = self.query_api('stopRouter', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + router = self.poll_job(res, 'router') + return router + + def reboot_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router not found") + + self.result['changed'] = True + + args = { + 'id': router['id'], + } + + if not self.module.check_mode: + res = self.query_api('rebootRouter', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + router = self.poll_job(res, 'router') + return router + + def absent_router(self): + router = self.get_router() + if router: + self.result['changed'] = True + + args = { + 'id': router['id'], + } + + if not self.module.check_mode: + res = self.query_api('destroyRouter', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'router') + return router + + def present_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router can not be created using the API, see cs_network.") + + args = { + 'id': router['id'], + 'serviceofferingid': self.get_service_offering_id(), + } + + state = self.module.params.get('state') + + if self.has_changed(args, router): + self.result['changed'] = True + + if not self.module.check_mode: + current_state = router['state'].lower() + + self.stop_router() + router = self.query_api('changeServiceForRouter', **args) + + if state in ['restarted', 'started']: + router = self.start_router() + + # if state=present we get to the state before the service + # offering change. + elif state == "present" and current_state == "running": + router = self.start_router() + + elif state == "started": + router = self.start_router() + + elif state == "stopped": + router = self.stop_router() + + elif state == "restarted": + router = self.reboot_router() + + return router + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + service_offering=dict(), + state=dict(choices=['present', 'started', 'stopped', 'restarted', 'absent'], default="present"), + domain=dict(), + account=dict(), + project=dict(), + zone=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_router = AnsibleCloudStackRouter(module) + + state = module.params.get('state') + if state in ['absent']: + router = acs_router.absent_router() + else: + router = acs_router.present_router() + + result = acs_router.get_result(router) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_securitygroup.py b/plugins/modules/cloud/cloudstack/cs_securitygroup.py new file mode 100644 index 0000000000..fa029d1406 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_securitygroup.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_securitygroup +short_description: Manages security groups on Apache CloudStack based clouds. +description: + - Create and remove security groups. +author: René Moser (@resmo) +options: + name: + description: + - Name of the security group. + type: str + required: true + description: + description: + - Description of the security group. + type: str + state: + description: + - State of the security group. + type: str + default: present + choices: [ present, absent ] + domain: + description: + - Domain the security group is related to. + type: str + account: + description: + - Account the security group is related to. + type: str + project: + description: + - Name of the project the security group to be created in. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create a security group + cs_securitygroup: + name: default + description: default security group + delegate_to: localhost + +- name: remove a security group + cs_securitygroup: + name: default + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the security group. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: Name of security group. + returned: success + type: str + sample: app +description: + description: Description of security group. + returned: success + type: str + sample: application security group +tags: + description: List of resource tags associated with the security group. + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +project: + description: Name of project the security group is related to. + returned: success + type: str + sample: Production +domain: + description: Domain the security group is related to. + returned: success + type: str + sample: example domain +account: + description: Account the security group is related to. + returned: success + type: str + sample: example account +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together + + +class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackSecurityGroup, self).__init__(module) + self.security_group = None + + def get_security_group(self): + if not self.security_group: + + args = { + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'securitygroupname': self.module.params.get('name'), + } + sgs = self.query_api('listSecurityGroups', **args) + if sgs: + self.security_group = sgs['securitygroup'][0] + return self.security_group + + def create_security_group(self): + security_group = self.get_security_group() + if not security_group: + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'description': self.module.params.get('description'), + } + + if not self.module.check_mode: + res = self.query_api('createSecurityGroup', **args) + security_group = res['securitygroup'] + + return security_group + + def remove_security_group(self): + security_group = self.get_security_group() + if security_group: + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + } + + if not self.module.check_mode: + self.query_api('deleteSecurityGroup', **args) + + return security_group + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + description=dict(), + state=dict(choices=['present', 'absent'], default='present'), + project=dict(), + account=dict(), + domain=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_sg = AnsibleCloudStackSecurityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + sg = acs_sg.remove_security_group() + else: + sg = acs_sg.create_security_group() + + result = acs_sg.get_result(sg) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_securitygroup_rule.py b/plugins/modules/cloud/cloudstack/cs_securitygroup_rule.py new file mode 100644 index 0000000000..4a6bd2ee90 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_securitygroup_rule.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_securitygroup_rule +short_description: Manages security group rules on Apache CloudStack based clouds. +description: + - Add and remove security group rules. +author: René Moser (@resmo) +options: + security_group: + description: + - Name of the security group the rule is related to. The security group must be existing. + type: str + required: true + state: + description: + - State of the security group rule. + type: str + default: present + choices: [ present, absent ] + protocol: + description: + - Protocol of the security group rule. + type: str + default: tcp + choices: [ tcp, udp, icmp, ah, esp, gre ] + type: + description: + - Ingress or egress security group rule. + type: str + default: ingress + choices: [ ingress, egress ] + cidr: + description: + - CIDR (full notation) to be used for security group rule. + type: str + default: 0.0.0.0/0 + user_security_group: + description: + - Security group this rule is based of. + type: str + start_port: + description: + - Start port for this rule. Required if I(protocol=tcp) or I(protocol=udp). + type: int + aliases: [ port ] + end_port: + description: + - End port for this rule. Required if I(protocol=tcp) or I(protocol=udp), but I(start_port) will be used if not set. + type: int + icmp_type: + description: + - Type of the icmp message being sent. Required if I(protocol=icmp). + type: int + icmp_code: + description: + - Error code for this icmp message. Required if I(protocol=icmp). + type: int + project: + description: + - Name of the project the security group to be created in. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +--- +- name: allow inbound port 80/tcp from 1.2.3.4 added to security group 'default' + cs_securitygroup_rule: + security_group: default + port: 80 + cidr: 1.2.3.4/32 + delegate_to: localhost + +- name: allow tcp/udp outbound added to security group 'default' + cs_securitygroup_rule: + security_group: default + type: egress + start_port: 1 + end_port: 65535 + protocol: '{{ item }}' + with_items: + - tcp + - udp + delegate_to: localhost + +- name: allow inbound icmp from 0.0.0.0/0 added to security group 'default' + cs_securitygroup_rule: + security_group: default + protocol: icmp + icmp_code: -1 + icmp_type: -1 + delegate_to: localhost + +- name: remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' + cs_securitygroup_rule: + security_group: default + port: 80 + state: absent + delegate_to: localhost + +- name: allow inbound port 80/tcp from security group web added to security group 'default' + cs_securitygroup_rule: + security_group: default + port: 80 + user_security_group: web + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the of the rule. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +security_group: + description: security group of the rule. + returned: success + type: str + sample: default +type: + description: type of the rule. + returned: success + type: str + sample: ingress +cidr: + description: CIDR of the rule. + returned: success and cidr is defined + type: str + sample: 0.0.0.0/0 +user_security_group: + description: user security group of the rule. + returned: success and user_security_group is defined + type: str + sample: default +protocol: + description: protocol of the rule. + returned: success + type: str + sample: tcp +start_port: + description: start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: end port of the rule. + returned: success + type: int + sample: 80 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together + + +class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackSecurityGroupRule, self).__init__(module) + self.returns = { + 'icmptype': 'icmp_type', + 'icmpcode': 'icmp_code', + 'endport': 'end_port', + 'startport': 'start_port', + 'protocol': 'protocol', + 'cidr': 'cidr', + 'securitygroupname': 'user_security_group', + } + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return (protocol in ['tcp', 'udp'] and + protocol == rule['protocol'] and + start_port == int(rule['startport']) and + end_port == int(rule['endport'])) + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return (protocol == 'icmp' and + protocol == rule['protocol'] and + icmp_code == int(rule['icmpcode']) and + icmp_type == int(rule['icmptype'])) + + def _ah_esp_gre_match(self, rule, protocol): + return (protocol in ['ah', 'esp', 'gre'] and + protocol == rule['protocol']) + + def _type_security_group_match(self, rule, security_group_name): + return (security_group_name and + 'securitygroupname' in rule and + security_group_name == rule['securitygroupname']) + + def _type_cidr_match(self, rule, cidr): + return ('cidr' in rule and + cidr == rule['cidr']) + + def _get_rule(self, rules): + user_security_group_name = self.module.params.get('user_security_group') + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.get_or_fallback('end_port', 'start_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if protocol in ['tcp', 'udp'] and (start_port is None or end_port is None): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and (icmp_type is None or icmp_code is None): + self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol) + + for rule in rules: + if user_security_group_name: + type_match = self._type_security_group_match(rule, user_security_group_name) + else: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = (self._tcp_udp_match(rule, protocol, start_port, end_port) or + self._icmp_match(rule, protocol, icmp_code, icmp_type) or + self._ah_esp_gre_match(rule, protocol)) + + if type_match and protocol_match: + return rule + return None + + def get_security_group(self, security_group_name=None): + if not security_group_name: + security_group_name = self.module.params.get('security_group') + args = { + 'securitygroupname': security_group_name, + 'projectid': self.get_project('id'), + } + sgs = self.query_api('listSecurityGroups', **args) + if not sgs or 'securitygroup' not in sgs: + self.module.fail_json(msg="security group '%s' not found" % security_group_name) + return sgs['securitygroup'][0] + + def add_rule(self): + security_group = self.get_security_group() + + args = {} + user_security_group_name = self.module.params.get('user_security_group') + + # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0. + # that is why we ignore if we have a user_security_group. + if user_security_group_name: + args['usersecuritygrouplist'] = [] + user_security_group = self.get_security_group(user_security_group_name) + args['usersecuritygrouplist'].append({ + 'group': user_security_group['name'], + 'account': user_security_group['account'], + }) + else: + args['cidrlist'] = self.module.params.get('cidr') + + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.get_or_fallback('end_port', 'start_port') + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['projectid'] = self.get_project('id') + args['securitygroupid'] = security_group['id'] + + rule = None + res = None + sg_type = self.module.params.get('type') + if sg_type == 'ingress': + if 'ingressrule' in security_group: + rule = self._get_rule(security_group['ingressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('authorizeSecurityGroupIngress', **args) + + elif sg_type == 'egress': + if 'egressrule' in security_group: + rule = self._get_rule(security_group['egressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('authorizeSecurityGroupEgress', **args) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + security_group = self.poll_job(res, 'securitygroup') + key = sg_type + "rule" # ingressrule / egressrule + if key in security_group: + rule = security_group[key][0] + return rule + + def remove_rule(self): + security_group = self.get_security_group() + rule = None + res = None + sg_type = self.module.params.get('type') + if sg_type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('revokeSecurityGroupIngress', id=rule['ruleid']) + + elif sg_type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('revokeSecurityGroupEgress', id=rule['ruleid']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self.poll_job(res, 'securitygroup') + return rule + + def get_result(self, security_group_rule): + super(AnsibleCloudStackSecurityGroupRule, self).get_result(security_group_rule) + self.result['type'] = self.module.params.get('type') + self.result['security_group'] = self.module.params.get('security_group') + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + security_group=dict(required=True), + type=dict(choices=['ingress', 'egress'], default='ingress'), + cidr=dict(default='0.0.0.0/0'), + user_security_group=dict(), + protocol=dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), + icmp_type=dict(type='int'), + icmp_code=dict(type='int'), + start_port=dict(type='int', aliases=['port']), + end_port=dict(type='int'), + state=dict(choices=['present', 'absent'], default='present'), + project=dict(), + poll_async=dict(type='bool', default=True), + )) + required_together = cs_required_together() + required_together.extend([ + ['icmp_type', 'icmp_code'], + ]) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=required_together, + mutually_exclusive=( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ['icmp_code', 'start_port'], + ['icmp_code', 'end_port'], + ), + supports_check_mode=True + ) + + acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module) + + state = module.params.get('state') + if state in ['absent']: + sg_rule = acs_sg_rule.remove_rule() + else: + sg_rule = acs_sg_rule.add_rule() + + result = acs_sg_rule.get_result(sg_rule) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_service_offering.py b/plugins/modules/cloud/cloudstack/cs_service_offering.py new file mode 100644 index 0000000000..486011703f --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_service_offering.py @@ -0,0 +1,583 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_service_offering +description: + - Create and delete service offerings for guest and system VMs. + - Update display_text of existing service offering. +short_description: Manages service offerings on Apache CloudStack based clouds. +author: René Moser (@resmo) +options: + disk_bytes_read_rate: + description: + - Bytes read rate of the disk offering. + type: int + aliases: [ bytes_read_rate ] + disk_bytes_write_rate: + description: + - Bytes write rate of the disk offering. + type: int + aliases: [ bytes_write_rate ] + cpu_number: + description: + - The number of CPUs of the service offering. + type: int + cpu_speed: + description: + - The CPU speed of the service offering in MHz. + type: int + limit_cpu_usage: + description: + - Restrict the CPU usage to committed service offering. + type: bool + deployment_planner: + description: + - The deployment planner heuristics used to deploy a VM of this offering. + - If not set, the value of global config I(vm.deployment.planner) is used. + type: str + display_text: + description: + - Display text of the service offering. + - If not set, I(name) will be used as I(display_text) while creating. + type: str + domain: + description: + - Domain the service offering is related to. + - Public for all domains and subdomains if not set. + type: str + host_tags: + description: + - The host tags for this service offering. + type: list + aliases: + - host_tag + hypervisor_snapshot_reserve: + description: + - Hypervisor snapshot reserve space as a percent of a volume. + - Only for managed storage using Xen or VMware. + type: int + is_iops_customized: + description: + - Whether compute offering iops is custom or not. + type: bool + aliases: [ disk_iops_customized ] + disk_iops_read_rate: + description: + - IO requests read rate of the disk offering. + type: int + disk_iops_write_rate: + description: + - IO requests write rate of the disk offering. + type: int + disk_iops_max: + description: + - Max. iops of the compute offering. + type: int + disk_iops_min: + description: + - Min. iops of the compute offering. + type: int + is_system: + description: + - Whether it is a system VM offering or not. + type: bool + default: no + is_volatile: + description: + - Whether the virtual machine needs to be volatile or not. + - Every reboot of VM the root disk is detached then destroyed and a fresh root disk is created and attached to VM. + type: bool + memory: + description: + - The total memory of the service offering in MB. + type: int + name: + description: + - Name of the service offering. + type: str + required: true + network_rate: + description: + - Data transfer rate in Mb/s allowed. + - Supported only for non-system offering and system offerings having I(system_vm_type=domainrouter). + type: int + offer_ha: + description: + - Whether HA is set for the service offering. + type: bool + default: no + provisioning_type: + description: + - Provisioning type used to create volumes. + type: str + choices: + - thin + - sparse + - fat + service_offering_details: + description: + - Details for planner, used to store specific parameters. + - A list of dictionaries having keys C(key) and C(value). + type: list + state: + description: + - State of the service offering. + type: str + choices: + - present + - absent + default: present + storage_type: + description: + - The storage type of the service offering. + type: str + choices: + - local + - shared + system_vm_type: + description: + - The system VM type. + - Required if I(is_system=yes). + type: str + choices: + - domainrouter + - consoleproxy + - secondarystoragevm + storage_tags: + description: + - The storage tags for this service offering. + type: list + aliases: + - storage_tag + is_customized: + description: + - Whether the offering is customizable or not. + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a non-volatile compute service offering with local storage + cs_service_offering: + name: Micro + display_text: Micro 512mb 1cpu + cpu_number: 1 + cpu_speed: 2198 + memory: 512 + host_tags: eco + storage_type: local + delegate_to: localhost + +- name: Create a volatile compute service offering with shared storage + cs_service_offering: + name: Tiny + display_text: Tiny 1gb 1cpu + cpu_number: 1 + cpu_speed: 2198 + memory: 1024 + storage_type: shared + is_volatile: yes + host_tags: eco + storage_tags: eco + delegate_to: localhost + +- name: Create or update a volatile compute service offering with shared storage + cs_service_offering: + name: Tiny + display_text: Tiny 1gb 1cpu + cpu_number: 1 + cpu_speed: 2198 + memory: 1024 + storage_type: shared + is_volatile: yes + host_tags: eco + storage_tags: eco + delegate_to: localhost + +- name: Create or update a custom compute service offering + cs_service_offering: + name: custom + display_text: custom compute offer + is_customized: yes + storage_type: shared + host_tags: eco + storage_tags: eco + delegate_to: localhost + +- name: Remove a compute service offering + cs_service_offering: + name: Tiny + state: absent + delegate_to: localhost + +- name: Create or update a system offering for the console proxy + cs_service_offering: + name: System Offering for Console Proxy 2GB + display_text: System Offering for Console Proxy 2GB RAM + is_system: yes + system_vm_type: consoleproxy + cpu_number: 1 + cpu_speed: 2198 + memory: 2048 + storage_type: shared + storage_tags: perf + delegate_to: localhost + +- name: Remove a system offering + cs_service_offering: + name: System Offering for Console Proxy 2GB + is_system: yes + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the service offering + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +cpu_number: + description: Number of CPUs in the service offering + returned: success + type: int + sample: 4 +cpu_speed: + description: Speed of CPUs in MHz in the service offering + returned: success + type: int + sample: 2198 +disk_iops_max: + description: Max iops of the disk offering + returned: success + type: int + sample: 1000 +disk_iops_min: + description: Min iops of the disk offering + returned: success + type: int + sample: 500 +disk_bytes_read_rate: + description: Bytes read rate of the service offering + returned: success + type: int + sample: 1000 +disk_bytes_write_rate: + description: Bytes write rate of the service offering + returned: success + type: int + sample: 1000 +disk_iops_read_rate: + description: IO requests per second read rate of the service offering + returned: success + type: int + sample: 1000 +disk_iops_write_rate: + description: IO requests per second write rate of the service offering + returned: success + type: int + sample: 1000 +created: + description: Date the offering was created + returned: success + type: str + sample: 2017-11-19T10:48:59+0000 +display_text: + description: Display text of the offering + returned: success + type: str + sample: Micro 512mb 1cpu +domain: + description: Domain the offering is into + returned: success + type: str + sample: ROOT +host_tags: + description: List of host tags + returned: success + type: list + sample: [ 'eco' ] +storage_tags: + description: List of storage tags + returned: success + type: list + sample: [ 'eco' ] +is_system: + description: Whether the offering is for system VMs or not + returned: success + type: bool + sample: false +is_iops_customized: + description: Whether the offering uses custom IOPS or not + returned: success + type: bool + sample: false +is_volatile: + description: Whether the offering is volatile or not + returned: success + type: bool + sample: false +limit_cpu_usage: + description: Whether the CPU usage is restricted to committed service offering + returned: success + type: bool + sample: false +memory: + description: Memory of the system offering + returned: success + type: int + sample: 512 +name: + description: Name of the system offering + returned: success + type: str + sample: Micro +offer_ha: + description: Whether HA support is enabled in the offering or not + returned: success + type: bool + sample: false +provisioning_type: + description: Provisioning type used to create volumes + returned: success + type: str + sample: thin +storage_type: + description: Storage type used to create volumes + returned: success + type: str + sample: shared +system_vm_type: + description: System VM type of this offering + returned: success + type: str + sample: consoleproxy +service_offering_details: + description: Additioanl service offering details + returned: success + type: dict + sample: "{'vgpuType': 'GRID K180Q','pciDevice':'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}" +network_rate: + description: Data transfer rate in megabits per second allowed + returned: success + type: int + sample: 1000 +is_customized: + description: Whether the offering is customizable or not + returned: success + type: bool + sample: false + version_added: '2.8' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackServiceOffering(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackServiceOffering, self).__init__(module) + self.returns = { + 'cpunumber': 'cpu_number', + 'cpuspeed': 'cpu_speed', + 'deploymentplanner': 'deployment_planner', + 'diskBytesReadRate': 'disk_bytes_read_rate', + 'diskBytesWriteRate': 'disk_bytes_write_rate', + 'diskIopsReadRate': 'disk_iops_read_rate', + 'diskIopsWriteRate': 'disk_iops_write_rate', + 'maxiops': 'disk_iops_max', + 'miniops': 'disk_iops_min', + 'hypervisorsnapshotreserve': 'hypervisor_snapshot_reserve', + 'iscustomized': 'is_customized', + 'iscustomizediops': 'is_iops_customized', + 'issystem': 'is_system', + 'isvolatile': 'is_volatile', + 'limitcpuuse': 'limit_cpu_usage', + 'memory': 'memory', + 'networkrate': 'network_rate', + 'offerha': 'offer_ha', + 'provisioningtype': 'provisioning_type', + 'serviceofferingdetails': 'service_offering_details', + 'storagetype': 'storage_type', + 'systemvmtype': 'system_vm_type', + 'tags': 'storage_tags', + } + + def get_service_offering(self): + args = { + 'name': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + 'issystem': self.module.params.get('is_system'), + 'systemvmtype': self.module.params.get('system_vm_type'), + } + service_offerings = self.query_api('listServiceOfferings', **args) + if service_offerings: + return service_offerings['serviceoffering'][0] + + def present_service_offering(self): + service_offering = self.get_service_offering() + if not service_offering: + service_offering = self._create_offering(service_offering) + else: + service_offering = self._update_offering(service_offering) + + return service_offering + + def absent_service_offering(self): + service_offering = self.get_service_offering() + if service_offering: + self.result['changed'] = True + if not self.module.check_mode: + args = { + 'id': service_offering['id'], + } + self.query_api('deleteServiceOffering', **args) + return service_offering + + def _create_offering(self, service_offering): + self.result['changed'] = True + + system_vm_type = self.module.params.get('system_vm_type') + is_system = self.module.params.get('is_system') + + required_params = [] + if is_system and not system_vm_type: + required_params.append('system_vm_type') + self.module.fail_on_missing_params(required_params=required_params) + + args = { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'bytesreadrate': self.module.params.get('disk_bytes_read_rate'), + 'byteswriterate': self.module.params.get('disk_bytes_write_rate'), + 'cpunumber': self.module.params.get('cpu_number'), + 'cpuspeed': self.module.params.get('cpu_speed'), + 'customizediops': self.module.params.get('is_iops_customized'), + 'deploymentplanner': self.module.params.get('deployment_planner'), + 'domainid': self.get_domain(key='id'), + 'hosttags': self.module.params.get('host_tags'), + 'hypervisorsnapshotreserve': self.module.params.get('hypervisor_snapshot_reserve'), + 'iopsreadrate': self.module.params.get('disk_iops_read_rate'), + 'iopswriterate': self.module.params.get('disk_iops_write_rate'), + 'maxiops': self.module.params.get('disk_iops_max'), + 'miniops': self.module.params.get('disk_iops_min'), + 'issystem': is_system, + 'isvolatile': self.module.params.get('is_volatile'), + 'memory': self.module.params.get('memory'), + 'networkrate': self.module.params.get('network_rate'), + 'offerha': self.module.params.get('offer_ha'), + 'provisioningtype': self.module.params.get('provisioning_type'), + 'serviceofferingdetails': self.module.params.get('service_offering_details'), + 'storagetype': self.module.params.get('storage_type'), + 'systemvmtype': system_vm_type, + 'tags': self.module.params.get('storage_tags'), + 'limitcpuuse': self.module.params.get('limit_cpu_usage'), + 'customized': self.module.params.get('is_customized') + } + if not self.module.check_mode: + res = self.query_api('createServiceOffering', **args) + service_offering = res['serviceoffering'] + return service_offering + + def _update_offering(self, service_offering): + args = { + 'id': service_offering['id'], + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + } + if self.has_changed(args, service_offering): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updateServiceOffering', **args) + service_offering = res['serviceoffering'] + return service_offering + + def get_result(self, service_offering): + super(AnsibleCloudStackServiceOffering, self).get_result(service_offering) + if service_offering: + if 'hosttags' in service_offering: + self.result['host_tags'] = service_offering['hosttags'].split(',') or [service_offering['hosttags']] + + # Prevent confusion, the api returns a tags key for storage tags. + if 'tags' in service_offering: + self.result['storage_tags'] = service_offering['tags'].split(',') or [service_offering['tags']] + if 'tags' in self.result: + del self.result['tags'] + + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + display_text=dict(), + cpu_number=dict(type='int'), + cpu_speed=dict(type='int'), + limit_cpu_usage=dict(type='bool'), + deployment_planner=dict(), + domain=dict(), + host_tags=dict(type='list', aliases=['host_tag']), + hypervisor_snapshot_reserve=dict(type='int'), + disk_bytes_read_rate=dict(type='int', aliases=['bytes_read_rate']), + disk_bytes_write_rate=dict(type='int', aliases=['bytes_write_rate']), + disk_iops_read_rate=dict(type='int'), + disk_iops_write_rate=dict(type='int'), + disk_iops_max=dict(type='int'), + disk_iops_min=dict(type='int'), + is_system=dict(type='bool', default=False), + is_volatile=dict(type='bool'), + is_iops_customized=dict(type='bool', aliases=['disk_iops_customized']), + memory=dict(type='int'), + network_rate=dict(type='int'), + offer_ha=dict(type='bool'), + provisioning_type=dict(choices=['thin', 'sparse', 'fat']), + service_offering_details=dict(type='list'), + storage_type=dict(choices=['local', 'shared']), + system_vm_type=dict(choices=['domainrouter', 'consoleproxy', 'secondarystoragevm']), + storage_tags=dict(type='list', aliases=['storage_tag']), + state=dict(choices=['present', 'absent'], default='present'), + is_customized=dict(type='bool'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_so = AnsibleCloudStackServiceOffering(module) + + state = module.params.get('state') + if state == "absent": + service_offering = acs_so.absent_service_offering() + else: + service_offering = acs_so.present_service_offering() + + result = acs_so.get_result(service_offering) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_snapshot_policy.py b/plugins/modules/cloud/cloudstack/cs_snapshot_policy.py new file mode 100644 index 0000000000..943facb99f --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_snapshot_policy.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_snapshot_policy +short_description: Manages volume snapshot policies on Apache CloudStack based clouds. +description: + - Create, update and delete volume snapshot policies. +author: René Moser (@resmo) +options: + volume: + description: + - Name of the volume. + - Either I(volume) or I(vm) is required. + type: str + volume_type: + description: + - Type of the volume. + type: str + choices: + - DATADISK + - ROOT + vm: + description: + - Name of the instance to select the volume from. + - Use I(volume_type) if VM has a DATADISK and ROOT volume. + - In case of I(volume_type=DATADISK), additionally use I(device_id) if VM has more than one DATADISK volume. + - Either I(volume) or I(vm) is required. + type: str + device_id: + description: + - ID of the device on a VM the volume is attached to. + - This will only be considered if VM has multiple DATADISK volumes. + type: int + vpc: + description: + - Name of the vpc the instance is deployed in. + type: str + interval_type: + description: + - Interval of the snapshot. + type: str + default: daily + choices: [ hourly, daily, weekly, monthly ] + aliases: [ interval ] + max_snaps: + description: + - Max number of snapshots. + type: int + default: 8 + aliases: [ max ] + schedule: + description: + - Time the snapshot is scheduled. Required if I(state=present). + - 'Format for I(interval_type=HOURLY): C(MM)' + - 'Format for I(interval_type=DAILY): C(MM:HH)' + - 'Format for I(interval_type=WEEKLY): C(MM:HH:DD (1-7))' + - 'Format for I(interval_type=MONTHLY): C(MM:HH:DD (1-28))' + type: str + time_zone: + description: + - Specifies a timezone for this command. + type: str + default: UTC + aliases: [ timezone ] + state: + description: + - State of the snapshot policy. + type: str + default: present + choices: [ present, absent ] + domain: + description: + - Domain the volume is related to. + type: str + account: + description: + - Account the volume is related to. + type: str + project: + description: + - Name of the project the volume is related to. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: ensure a snapshot policy daily at 1h00 UTC + cs_snapshot_policy: + volume: ROOT-478 + schedule: '00:1' + max_snaps: 3 + delegate_to: localhost + +- name: ensure a snapshot policy daily at 1h00 UTC on the second DATADISK of VM web-01 + cs_snapshot_policy: + vm: web-01 + volume_type: DATADISK + device_id: 2 + schedule: '00:1' + max_snaps: 3 + delegate_to: localhost + +- name: ensure a snapshot policy hourly at minute 5 UTC + cs_snapshot_policy: + volume: ROOT-478 + schedule: '5' + interval_type: hourly + max_snaps: 1 + delegate_to: localhost + +- name: ensure a snapshot policy weekly on Sunday at 05h00, TZ Europe/Zurich + cs_snapshot_policy: + volume: ROOT-478 + schedule: '00:5:1' + interval_type: weekly + max_snaps: 1 + time_zone: 'Europe/Zurich' + delegate_to: localhost + +- name: ensure a snapshot policy is absent + cs_snapshot_policy: + volume: ROOT-478 + interval_type: hourly + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the snapshot policy. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +interval_type: + description: interval type of the snapshot policy. + returned: success + type: str + sample: daily +schedule: + description: schedule of the snapshot policy. + returned: success + type: str + sample: +max_snaps: + description: maximum number of snapshots retained. + returned: success + type: int + sample: 10 +time_zone: + description: the time zone of the snapshot policy. + returned: success + type: str + sample: Etc/UTC +volume: + description: the volume of the snapshot policy. + returned: success + type: str + sample: Etc/UTC +zone: + description: Name of zone the volume is related to. + returned: success + type: str + sample: ch-gva-2 +project: + description: Name of project the volume is related to. + returned: success + type: str + sample: Production +account: + description: Account the volume is related to. + returned: success + type: str + sample: example account +domain: + description: Domain the volume is related to. + returned: success + type: str + sample: example domain +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackSnapshotPolicy(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackSnapshotPolicy, self).__init__(module) + self.returns = { + 'schedule': 'schedule', + 'timezone': 'time_zone', + 'maxsnaps': 'max_snaps', + } + self.interval_types = { + 'hourly': 0, + 'daily': 1, + 'weekly': 2, + 'monthly': 3, + } + self.volume = None + + def get_interval_type(self): + interval_type = self.module.params.get('interval_type') + return self.interval_types[interval_type] + + def get_volume(self, key=None): + if self.volume: + return self._get_by_key(key, self.volume) + + args = { + 'name': self.module.params.get('volume'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'virtualmachineid': self.get_vm(key='id', filter_zone=False), + 'type': self.module.params.get('volume_type'), + } + volumes = self.query_api('listVolumes', **args) + if volumes: + if volumes['count'] > 1: + device_id = self.module.params.get('device_id') + if not device_id: + self.module.fail_json(msg="Found more then 1 volume: combine params 'vm', 'volume_type', 'device_id' and/or 'volume' to select the volume") + else: + for v in volumes['volume']: + if v.get('deviceid') == device_id: + self.volume = v + return self._get_by_key(key, self.volume) + self.module.fail_json(msg="No volume found with device id %s" % device_id) + self.volume = volumes['volume'][0] + return self._get_by_key(key, self.volume) + return None + + def get_snapshot_policy(self): + args = { + 'volumeid': self.get_volume(key='id') + } + policies = self.query_api('listSnapshotPolicies', **args) + if policies: + for policy in policies['snapshotpolicy']: + if policy['intervaltype'] == self.get_interval_type(): + return policy + return None + + def present_snapshot_policy(self): + required_params = [ + 'schedule', + ] + self.module.fail_on_missing_params(required_params=required_params) + + policy = self.get_snapshot_policy() + args = { + 'id': policy.get('id') if policy else None, + 'intervaltype': self.module.params.get('interval_type'), + 'schedule': self.module.params.get('schedule'), + 'maxsnaps': self.module.params.get('max_snaps'), + 'timezone': self.module.params.get('time_zone'), + 'volumeid': self.get_volume(key='id') + } + if not policy or (policy and self.has_changed(policy, args, only_keys=['schedule', 'maxsnaps', 'timezone'])): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('createSnapshotPolicy', **args) + policy = res['snapshotpolicy'] + return policy + + def absent_snapshot_policy(self): + policy = self.get_snapshot_policy() + if policy: + self.result['changed'] = True + args = { + 'id': policy['id'] + } + if not self.module.check_mode: + self.query_api('deleteSnapshotPolicies', **args) + return policy + + def get_result(self, policy): + super(AnsibleCloudStackSnapshotPolicy, self).get_result(policy) + if policy and 'intervaltype' in policy: + for key, value in self.interval_types.items(): + if value == policy['intervaltype']: + self.result['interval_type'] = key + break + volume = self.get_volume() + if volume: + volume_results = { + 'volume': volume.get('name'), + 'zone': volume.get('zonename'), + 'project': volume.get('project'), + 'account': volume.get('account'), + 'domain': volume.get('domain'), + } + self.result.update(volume_results) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + volume=dict(), + volume_type=dict(choices=['DATADISK', 'ROOT']), + vm=dict(), + device_id=dict(type='int'), + vpc=dict(), + interval_type=dict(default='daily', choices=['hourly', 'daily', 'weekly', 'monthly'], aliases=['interval']), + schedule=dict(), + time_zone=dict(default='UTC', aliases=['timezone']), + max_snaps=dict(type='int', default=8, aliases=['max']), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_one_of=( + ['vm', 'volume'], + ), + supports_check_mode=True + ) + + acs_snapshot_policy = AnsibleCloudStackSnapshotPolicy(module) + + state = module.params.get('state') + if state in ['absent']: + policy = acs_snapshot_policy.absent_snapshot_policy() + else: + policy = acs_snapshot_policy.present_snapshot_policy() + + result = acs_snapshot_policy.get_result(policy) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_sshkeypair.py b/plugins/modules/cloud/cloudstack/cs_sshkeypair.py new file mode 100644 index 0000000000..06a2263594 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_sshkeypair.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_sshkeypair +short_description: Manages SSH keys on Apache CloudStack based clouds. +description: + - Create, register and remove SSH keys. + - If no key was found and no public key was provided and a new SSH + private/public key pair will be created and the private key will be returned. +author: René Moser (@resmo) +options: + name: + description: + - Name of public key. + type: str + required: true + domain: + description: + - Domain the public key is related to. + type: str + account: + description: + - Account the public key is related to. + type: str + project: + description: + - Name of the project the public key to be registered in. + type: str + state: + description: + - State of the public key. + type: str + default: present + choices: [ present, absent ] + public_key: + description: + - String of the public key. + type: str +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create a new private / public key pair + cs_sshkeypair: + name: linus@example.com + delegate_to: localhost + register: key +- debug: + msg: 'Private key is {{ key.private_key }}' + +- name: remove a public key by its name + cs_sshkeypair: + name: linus@example.com + state: absent + delegate_to: localhost + +- name: register your existing local public key + cs_sshkeypair: + name: linus@example.com + public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the SSH public key. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: Name of the SSH public key. + returned: success + type: str + sample: linus@example.com +fingerprint: + description: Fingerprint of the SSH public key. + returned: success + type: str + sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28" +private_key: + description: Private key of generated SSH keypair. + returned: changed + type: str + sample: "-----BEGIN RSA PRIVATE KEY-----\nMII...8tO\n-----END RSA PRIVATE KEY-----\n" +''' + +import traceback + +SSHPUBKEYS_IMP_ERR = None +try: + import sshpubkeys + HAS_LIB_SSHPUBKEYS = True +except ImportError: + SSHPUBKEYS_IMP_ERR = traceback.format_exc() + HAS_LIB_SSHPUBKEYS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_required_together, + cs_argument_spec +) + + +class AnsibleCloudStackSshKey(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackSshKey, self).__init__(module) + self.returns = { + 'privatekey': 'private_key', + 'fingerprint': 'fingerprint', + } + self.ssh_key = None + + def register_ssh_key(self, public_key): + ssh_key = self.get_ssh_key() + args = self._get_common_args() + name = self.module.params.get('name') + + res = None + if not ssh_key: + self.result['changed'] = True + args['publickey'] = public_key + if not self.module.check_mode: + args['name'] = name + res = self.query_api('registerSSHKeyPair', **args) + else: + fingerprint = self._get_ssh_fingerprint(public_key) + if ssh_key['fingerprint'] != fingerprint: + self.result['changed'] = True + if not self.module.check_mode: + # delete the ssh key with matching name but wrong fingerprint + args['name'] = name + self.query_api('deleteSSHKeyPair', **args) + + elif ssh_key['name'].lower() != name.lower(): + self.result['changed'] = True + if not self.module.check_mode: + # delete the ssh key with matching fingerprint but wrong name + args['name'] = ssh_key['name'] + self.query_api('deleteSSHKeyPair', **args) + # First match for key retrievement will be the fingerprint. + # We need to make another lookup if there is a key with identical name. + self.ssh_key = None + ssh_key = self.get_ssh_key() + if ssh_key and ssh_key['fingerprint'] != fingerprint: + args['name'] = name + self.query_api('deleteSSHKeyPair', **args) + + if not self.module.check_mode and self.result['changed']: + args['publickey'] = public_key + args['name'] = name + res = self.query_api('registerSSHKeyPair', **args) + + if res and 'keypair' in res: + ssh_key = res['keypair'] + + return ssh_key + + def create_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + self.result['changed'] = True + args = self._get_common_args() + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.query_api('createSSHKeyPair', **args) + ssh_key = res['keypair'] + return ssh_key + + def remove_ssh_key(self, name=None): + ssh_key = self.get_ssh_key() + if ssh_key: + self.result['changed'] = True + args = self._get_common_args() + args['name'] = name or self.module.params.get('name') + if not self.module.check_mode: + self.query_api('deleteSSHKeyPair', **args) + return ssh_key + + def _get_common_args(self): + return { + 'domainid': self.get_domain('id'), + 'account': self.get_account('name'), + 'projectid': self.get_project('id') + } + + def get_ssh_key(self): + if not self.ssh_key: + public_key = self.module.params.get('public_key') + if public_key: + # Query by fingerprint of the public key + args_fingerprint = self._get_common_args() + args_fingerprint['fingerprint'] = self._get_ssh_fingerprint(public_key) + ssh_keys = self.query_api('listSSHKeyPairs', **args_fingerprint) + if ssh_keys and 'sshkeypair' in ssh_keys: + self.ssh_key = ssh_keys['sshkeypair'][0] + # When key has not been found by fingerprint, use the name + if not self.ssh_key: + args_name = self._get_common_args() + args_name['name'] = self.module.params.get('name') + ssh_keys = self.query_api('listSSHKeyPairs', **args_name) + if ssh_keys and 'sshkeypair' in ssh_keys: + self.ssh_key = ssh_keys['sshkeypair'][0] + return self.ssh_key + + def _get_ssh_fingerprint(self, public_key): + key = sshpubkeys.SSHKey(public_key) + if hasattr(key, 'hash_md5'): + return key.hash_md5().replace(to_native('MD5:'), to_native('')) + return key.hash() + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + public_key=dict(), + domain=dict(), + account=dict(), + project=dict(), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + if not HAS_LIB_SSHPUBKEYS: + module.fail_json(msg=missing_required_lib("sshpubkeys"), exception=SSHPUBKEYS_IMP_ERR) + + acs_sshkey = AnsibleCloudStackSshKey(module) + state = module.params.get('state') + if state in ['absent']: + ssh_key = acs_sshkey.remove_ssh_key() + else: + public_key = module.params.get('public_key') + if public_key: + ssh_key = acs_sshkey.register_ssh_key(public_key) + else: + ssh_key = acs_sshkey.create_ssh_key() + + result = acs_sshkey.get_result(ssh_key) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_staticnat.py b/plugins/modules/cloud/cloudstack/cs_staticnat.py new file mode 100644 index 0000000000..936cc6e518 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_staticnat.py @@ -0,0 +1,255 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_staticnat +short_description: Manages static NATs on Apache CloudStack based clouds. +description: + - Create, update and remove static NATs. +author: René Moser (@resmo) +options: + ip_address: + description: + - Public IP address the static NAT is assigned to. + type: str + required: true + vm: + description: + - Name of virtual machine which we make the static NAT for. + - Required if I(state=present). + type: str + vm_guest_ip: + description: + - VM guest NIC secondary IP address for the static NAT. + type: str + network: + description: + - Network the IP address is related to. + type: str + vpc: + description: + - VPC the network related to. + type: str + state: + description: + - State of the static NAT. + type: str + default: present + choices: [ present, absent ] + domain: + description: + - Domain the static NAT is related to. + type: str + account: + description: + - Account the static NAT is related to. + type: str + project: + description: + - Name of the project the static NAT is related to. + type: str + zone: + description: + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a static NAT for IP 1.2.3.4 to web01 + cs_staticnat: + ip_address: 1.2.3.4 + vm: web01 + delegate_to: localhost + +- name: Remove a static NAT + cs_staticnat: + ip_address: 1.2.3.4 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the ip_address. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +ip_address: + description: Public IP address. + returned: success + type: str + sample: 1.2.3.4 +vm_name: + description: Name of the virtual machine. + returned: success + type: str + sample: web-01 +vm_display_name: + description: Display name of the virtual machine. + returned: success + type: str + sample: web-01 +vm_guest_ip: + description: IP of the virtual machine. + returned: success + type: str + sample: 10.101.65.152 +zone: + description: Name of zone the static NAT is related to. + returned: success + type: str + sample: ch-gva-2 +project: + description: Name of project the static NAT is related to. + returned: success + type: str + sample: Production +account: + description: Account the static NAT is related to. + returned: success + type: str + sample: example account +domain: + description: Domain the static NAT is related to. + returned: success + type: str + sample: example domain +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackStaticNat(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackStaticNat, self).__init__(module) + self.returns = { + 'virtualmachinedisplayname': 'vm_display_name', + 'virtualmachinename': 'vm_name', + 'ipaddress': 'ip_address', + 'vmipaddress': 'vm_guest_ip', + } + + def create_static_nat(self, ip_address): + self.result['changed'] = True + args = { + 'virtualmachineid': self.get_vm(key='id'), + 'ipaddressid': ip_address['id'], + 'vmguestip': self.get_vm_guest_ip(), + 'networkid': self.get_network(key='id') + } + if not self.module.check_mode: + self.query_api('enableStaticNat', **args) + + # reset ip address and query new values + self.ip_address = None + ip_address = self.get_ip_address() + return ip_address + + def update_static_nat(self, ip_address): + args = { + 'virtualmachineid': self.get_vm(key='id'), + 'ipaddressid': ip_address['id'], + 'vmguestip': self.get_vm_guest_ip(), + 'networkid': self.get_network(key='id') + } + # make an alias, so we can use has_changed() + ip_address['vmguestip'] = ip_address['vmipaddress'] + if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('disableStaticNat', ipaddressid=ip_address['id']) + self.poll_job(res, 'staticnat') + + self.query_api('enableStaticNat', **args) + + # reset ip address and query new values + self.ip_address = None + ip_address = self.get_ip_address() + return ip_address + + def present_static_nat(self): + ip_address = self.get_ip_address() + if not ip_address['isstaticnat']: + ip_address = self.create_static_nat(ip_address) + else: + ip_address = self.update_static_nat(ip_address) + return ip_address + + def absent_static_nat(self): + ip_address = self.get_ip_address() + if ip_address['isstaticnat']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('disableStaticNat', ipaddressid=ip_address['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'staticnat') + return ip_address + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address=dict(required=True), + vm=dict(), + vm_guest_ip=dict(), + network=dict(), + vpc=dict(), + state=dict(choices=['present', 'absent'], default='present'), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_static_nat = AnsibleCloudStackStaticNat(module) + + state = module.params.get('state') + if state in ['absent']: + ip_address = acs_static_nat.absent_static_nat() + else: + ip_address = acs_static_nat.present_static_nat() + + result = acs_static_nat.get_result(ip_address) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_storage_pool.py b/plugins/modules/cloud/cloudstack/cs_storage_pool.py new file mode 100644 index 0000000000..9eb854c77a --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_storage_pool.py @@ -0,0 +1,510 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, Netservers Ltd. +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_storage_pool +short_description: Manages Primary Storage Pools on Apache CloudStack based clouds. +description: + - Create, update, put into maintenance, disable, enable and remove storage pools. +author: + - Netservers Ltd. (@netservers) + - René Moser (@resmo) +options: + name: + description: + - Name of the storage pool. + type: str + required: true + zone: + description: + - Name of the zone in which the host should be deployed. + - If not set, default zone is used. + type: str + storage_url: + description: + - URL of the storage pool. + - Required if I(state=present). + type: str + pod: + description: + - Name of the pod. + type: str + cluster: + description: + - Name of the cluster. + type: str + scope: + description: + - The scope of the storage pool. + - Defaults to cluster when C(cluster) is provided, otherwise zone. + type: str + choices: [ cluster, zone ] + managed: + description: + - Whether the storage pool should be managed by CloudStack. + - Only considered on creation. + type: bool + hypervisor: + description: + - Required when creating a zone scoped pool. + - Possible values are C(KVM), C(VMware), C(BareMetal), C(XenServer), C(LXC), C(HyperV), C(UCS), C(OVM), C(Simulator). + type: str + storage_tags: + description: + - Tags associated with this storage pool. + type: list + aliases: [ storage_tag ] + provider: + description: + - Name of the storage provider e.g. SolidFire, SolidFireShared, DefaultPrimary, CloudByte. + type: str + default: DefaultPrimary + capacity_bytes: + description: + - Bytes CloudStack can provision from this storage pool. + type: int + capacity_iops: + description: + - Bytes CloudStack can provision from this storage pool. + type: int + allocation_state: + description: + - Allocation state of the storage pool. + type: str + choices: [ enabled, disabled, maintenance ] + state: + description: + - State of the storage pool. + type: str + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: ensure a zone scoped storage_pool is present + cs_storage_pool: + zone: zone01 + storage_url: rbd://admin:SECRET@ceph-mons.domain/poolname + provider: DefaultPrimary + name: Ceph RBD + scope: zone + hypervisor: KVM + delegate_to: localhost + +- name: ensure a cluster scoped storage_pool is disabled + cs_storage_pool: + name: Ceph RBD + zone: zone01 + cluster: cluster01 + pod: pod01 + storage_url: rbd://admin:SECRET@ceph-the-mons.domain/poolname + provider: DefaultPrimary + scope: cluster + allocation_state: disabled + delegate_to: localhost + +- name: ensure a cluster scoped storage_pool is in maintenance + cs_storage_pool: + name: Ceph RBD + zone: zone01 + cluster: cluster01 + pod: pod01 + storage_url: rbd://admin:SECRET@ceph-the-mons.domain/poolname + provider: DefaultPrimary + scope: cluster + allocation_state: maintenance + delegate_to: localhost + +- name: ensure a storage_pool is absent + cs_storage_pool: + name: Ceph RBD + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the pool. + returned: success + type: str + sample: a3fca65a-7db1-4891-b97c-48806a978a96 +created: + description: Date of the pool was created. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 +capacity_iops: + description: IOPS CloudStack can provision from this storage pool + returned: when available + type: int + sample: 60000 +zone: + description: The name of the zone. + returned: success + type: str + sample: Zone01 +cluster: + description: The name of the cluster. + returned: when scope is cluster + type: str + sample: Cluster01 +pod: + description: The name of the pod. + returned: when scope is cluster + type: str + sample: Cluster01 +disk_size_allocated: + description: The pool's currently allocated disk space. + returned: success + type: int + sample: 2443517624320 +disk_size_total: + description: The total size of the pool. + returned: success + type: int + sample: 3915055693824 +disk_size_used: + description: The pool's currently used disk size. + returned: success + type: int + sample: 1040862622180 +scope: + description: The scope of the storage pool. + returned: success + type: str + sample: cluster +hypervisor: + description: Hypervisor related to this storage pool. + returned: when available + type: str + sample: KVM +state: + description: The state of the storage pool as returned by the API. + returned: success + type: str + sample: Up +allocation_state: + description: The state of the storage pool. + returned: success + type: str + sample: enabled +path: + description: The storage pool path used in the storage_url. + returned: success + type: str + sample: poolname +overprovision_factor: + description: The overprovision factor of the storage pool. + returned: success + type: str + sample: 2.0 +suitable_for_migration: + description: Whether the storage pool is suitable to migrate a volume or not. + returned: success + type: bool + sample: false +storage_capabilities: + description: Capabilities of the storage pool. + returned: success + type: dict + sample: {"VOLUME_SNAPSHOT_QUIESCEVM": "false"} +storage_tags: + description: the tags for the storage pool. + returned: success + type: list + sample: ["perf", "ssd"] +''' + +# import cloudstack common +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackStoragePool(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackStoragePool, self).__init__(module) + self.returns = { + 'capacityiops': 'capacity_iops', + 'podname': 'pod', + 'clustername': 'cluster', + 'disksizeallocated': 'disk_size_allocated', + 'disksizetotal': 'disk_size_total', + 'disksizeused': 'disk_size_used', + 'scope': 'scope', + 'hypervisor': 'hypervisor', + 'type': 'type', + 'ip_address': 'ipaddress', + 'path': 'path', + 'overprovisionfactor': 'overprovision_factor', + 'storagecapabilities': 'storage_capabilities', + 'suitableformigration': 'suitable_for_migration', + } + self.allocation_states = { + # Host state: param state + 'Up': 'enabled', + 'Disabled': 'disabled', + 'Maintenance': 'maintenance', + } + self.storage_pool = None + + def _get_common_args(self): + return { + 'name': self.module.params.get('name'), + 'url': self.module.params.get('storage_url'), + 'zoneid': self.get_zone(key='id'), + 'provider': self.get_storage_provider(), + 'scope': self.module.params.get('scope'), + 'hypervisor': self.module.params.get('hypervisor'), + 'capacitybytes': self.module.params.get('capacity_bytes'), + 'capacityiops': self.module.params.get('capacity_iops'), + } + + def _allocation_state_enabled_disabled_changed(self, pool, allocation_state): + if allocation_state in ['enabled', 'disabled']: + for pool_state, param_state in self.allocation_states.items(): + if pool_state == pool['state'] and allocation_state != param_state: + return True + return False + + def _handle_allocation_state(self, pool, state=None): + allocation_state = state or self.module.params.get('allocation_state') + if not allocation_state: + return pool + + if self.allocation_states.get(pool['state']) == allocation_state: + return pool + + # Cancel maintenance if target state is enabled/disabled + elif allocation_state in ['enabled', 'disabled']: + pool = self._cancel_maintenance(pool) + pool = self._update_storage_pool(pool=pool, allocation_state=allocation_state) + + # Only an enabled host can put in maintenance + elif allocation_state == 'maintenance': + pool = self._update_storage_pool(pool=pool, allocation_state='enabled') + pool = self._enable_maintenance(pool=pool) + + return pool + + def _create_storage_pool(self): + args = self._get_common_args() + args.update({ + 'clusterid': self.get_cluster(key='id'), + 'podid': self.get_pod(key='id'), + 'managed': self.module.params.get('managed'), + }) + + scope = self.module.params.get('scope') + if scope is None: + args['scope'] = 'cluster' if args['clusterid'] else 'zone' + + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('createStoragePool', **args) + return res['storagepool'] + + def _update_storage_pool(self, pool, allocation_state=None): + args = { + 'id': pool['id'], + 'capacitybytes': self.module.params.get('capacity_bytes'), + 'capacityiops': self.module.params.get('capacity_iops'), + 'tags': self.get_storage_tags(), + } + + if self.has_changed(args, pool) or self._allocation_state_enabled_disabled_changed(pool, allocation_state): + self.result['changed'] = True + args['enabled'] = allocation_state == 'enabled' if allocation_state in ['enabled', 'disabled'] else None + if not self.module.check_mode: + res = self.query_api('updateStoragePool', **args) + pool = res['storagepool'] + return pool + + def _enable_maintenance(self, pool): + if pool['state'].lower() != "maintenance": + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('enableStorageMaintenance', id=pool['id']) + pool = self.poll_job(res, 'storagepool') + return pool + + def _cancel_maintenance(self, pool): + if pool['state'].lower() == "maintenance": + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('cancelStorageMaintenance', id=pool['id']) + pool = self.poll_job(res, 'storagepool') + return pool + + def get_storage_tags(self): + storage_tags = self.module.params.get('storage_tags') + if storage_tags is None: + return None + return ','.join(storage_tags) + + def get_storage_pool(self, key=None): + if self.storage_pool is None: + zoneid = self.get_zone(key='id') + clusterid = self.get_cluster(key='id') + podid = self.get_pod(key='id') + + args = { + 'zoneid': zoneid, + 'podid': podid, + 'clusterid': clusterid, + 'name': self.module.params.get('name'), + } + + res = self.query_api('listStoragePools', **args) + if 'storagepool' not in res: + return None + + self.storage_pool = res['storagepool'][0] + + return self.storage_pool + + def present_storage_pool(self): + pool = self.get_storage_pool() + if pool: + pool = self._update_storage_pool(pool=pool) + else: + pool = self._create_storage_pool() + + if pool: + pool = self._handle_allocation_state(pool=pool) + + return pool + + def absent_storage_pool(self): + pool = self.get_storage_pool() + if pool: + self.result['changed'] = True + + args = { + 'id': pool['id'], + } + if not self.module.check_mode: + # Only a pool in maintenance can be deleted + self._handle_allocation_state(pool=pool, state='maintenance') + self.query_api('deleteStoragePool', **args) + return pool + + def get_storage_provider(self, type="primary"): + args = { + 'type': type, + } + provider = self.module.params.get('provider') + storage_providers = self.query_api('listStorageProviders', **args) + for sp in storage_providers.get('dataStoreProvider') or []: + if sp['name'].lower() == provider.lower(): + return provider + self.fail_json(msg="Storage provider %s not found" % provider) + + def get_pod(self, key=None): + pod = self.module.params.get('pod') + if not pod: + return None + args = { + 'name': pod, + 'zoneid': self.get_zone(key='id'), + } + pods = self.query_api('listPods', **args) + if pods: + return self._get_by_key(key, pods['pod'][0]) + + self.fail_json(msg="Pod %s not found" % self.module.params.get('pod')) + + def get_cluster(self, key=None): + cluster = self.module.params.get('cluster') + if not cluster: + return None + + args = { + 'name': cluster, + 'zoneid': self.get_zone(key='id'), + } + + clusters = self.query_api('listClusters', **args) + if clusters: + return self._get_by_key(key, clusters['cluster'][0]) + + self.fail_json(msg="Cluster %s not found" % cluster) + + def get_result(self, pool): + super(AnsibleCloudStackStoragePool, self).get_result(pool) + if pool: + self.result['storage_url'] = "%s://%s/%s" % (pool['type'], pool['ipaddress'], pool['path']) + self.result['scope'] = pool['scope'].lower() + self.result['storage_tags'] = pool['tags'].split(',') if pool.get('tags') else [] + self.result['allocation_state'] = self.allocation_states.get(pool['state']) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + storage_url=dict(), + zone=dict(), + pod=dict(), + cluster=dict(), + scope=dict(choices=['zone', 'cluster']), + hypervisor=dict(), + provider=dict(default='DefaultPrimary'), + capacity_bytes=dict(type='int'), + capacity_iops=dict(type='int'), + managed=dict(type='bool'), + storage_tags=dict(type='list', aliases=['storage_tag']), + allocation_state=dict(choices=['enabled', 'disabled', 'maintenance']), + state=dict(choices=['present', 'absent'], default='present'), + )) + + required_together = cs_required_together() + required_together.extend([ + ['pod', 'cluster'], + ]) + module = AnsibleModule( + argument_spec=argument_spec, + required_together=required_together, + required_if=[ + ('state', 'present', ['storage_url']), + ], + supports_check_mode=True + ) + + acs_storage_pool = AnsibleCloudStackStoragePool(module) + + state = module.params.get('state') + if state in ['absent']: + pool = acs_storage_pool.absent_storage_pool() + else: + pool = acs_storage_pool.present_storage_pool() + + result = acs_storage_pool.get_result(pool) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_template.py b/plugins/modules/cloud/cloudstack/cs_template.py new file mode 100644 index 0000000000..e3f29404ea --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_template.py @@ -0,0 +1,744 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_template +short_description: Manages templates on Apache CloudStack based clouds. +description: + - Register templates from an URL. + - Create templates from a ROOT volume of a stopped VM or its snapshot. + - Update (since version 2.7), extract and delete templates. +author: René Moser (@resmo) +options: + name: + description: + - Name of the template. + type: str + required: true + url: + description: + - URL of where the template is hosted on I(state=present). + - URL to which the template would be extracted on I(state=extracted). + - Mutually exclusive with I(vm). + type: str + vm: + description: + - VM name the template will be created from its volume or alternatively from a snapshot. + - VM must be in stopped state if created from its volume. + - Mutually exclusive with I(url). + type: str + snapshot: + description: + - Name of the snapshot, created from the VM ROOT volume, the template will be created from. + - I(vm) is required together with this argument. + type: str + os_type: + description: + - OS type that best represents the OS of this template. + type: str + checksum: + description: + - The MD5 checksum value of this template. + - If set, we search by checksum instead of name. + type: str + is_ready: + description: + - "Note: this flag was not implemented and therefore marked as deprecated." + - Deprecated, will be removed in version 2.11. + type: bool + is_public: + description: + - Register the template to be publicly available to all users. + - Only used if I(state) is C(present). + type: bool + is_featured: + description: + - Register the template to be featured. + - Only used if I(state) is C(present). + type: bool + is_dynamically_scalable: + description: + - Register the template having XS/VMware tools installed in order to support dynamic scaling of VM CPU/memory. + - Only used if I(state) is C(present). + type: bool + cross_zones: + description: + - Whether the template should be synced or removed across zones. + - Only used if I(state) is C(present) or C(absent). + default: no + type: bool + mode: + description: + - Mode for the template extraction. + - Only used if I(state=extracted). + type: str + default: http_download + choices: [ http_download, ftp_upload ] + domain: + description: + - Domain the template, snapshot or VM is related to. + type: str + account: + description: + - Account the template, snapshot or VM is related to. + type: str + project: + description: + - Name of the project the template to be registered in. + type: str + zone: + description: + - Name of the zone you wish the template to be registered or deleted from. + - If not specified, first found zone will be used. + type: str + template_filter: + description: + - Name of the filter used to search for the template. + - The filter C(all) was added in 2.7. + type: str + default: self + choices: [ all, featured, self, selfexecutable, sharedexecutable, executable, community ] + template_find_options: + description: + - Options to find a template uniquely. + - More than one allowed. + type: list + choices: [ display_text, checksum, cross_zones ] + aliases: [ template_find_option ] + default: [] + hypervisor: + description: + - Name the hypervisor to be used for creating the new template. + - Relevant when using I(state=present). + - Possible values are C(KVM), C(VMware), C(BareMetal), C(XenServer), C(LXC), C(HyperV), C(UCS), C(OVM), C(Simulator). + type: str + requires_hvm: + description: + - Whether the template requires HVM or not. + - Only considered while creating the template. + type: bool + password_enabled: + description: + - Enable template password reset support. + type: bool + template_tag: + description: + - The tag for this template. + type: str + sshkey_enabled: + description: + - True if the template supports the sshkey upload feature. + - Only considered if I(url) is used (API limitation). + type: bool + is_routing: + description: + - Sets the template type to routing, i.e. if template is used to deploy routers. + - Only considered if I(url) is used. + type: bool + format: + description: + - The format for the template. + - Only considered if I(state=present). + type: str + choices: [ QCOW2, RAW, VHD, OVA ] + is_extractable: + description: + - Allows the template or its derivatives to be extractable. + type: bool + details: + description: + - Template details in key/value pairs. + type: str + bits: + description: + - 32 or 64 bits support. + type: int + default: 64 + choices: [ 32, 64 ] + display_text: + description: + - Display text of the template. + type: str + state: + description: + - State of the template. + type: str + default: present + choices: [ present, absent, extracted ] + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: register a systemvm template + cs_template: + name: systemvm-vmware-4.5 + url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova" + hypervisor: VMware + format: OVA + cross_zones: yes + os_type: Debian GNU/Linux 7(64-bit) + delegate_to: localhost + +- name: Create a template from a stopped virtual machine's volume + cs_template: + name: Debian 9 (64-bit) 20GB ({{ ansible_date_time.date }}) + vm: debian-9-base-vm + os_type: Debian GNU/Linux 9 (64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + delegate_to: localhost + +# Note: Use template_find_option(s) when a template name is not unique +- name: Create a template from a stopped virtual machine's volume + cs_template: + name: Debian 9 (64-bit) + display_text: Debian 9 (64-bit) 20GB ({{ ansible_date_time.date }}) + template_find_option: display_text + vm: debian-9-base-vm + os_type: Debian GNU/Linux 9 (64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + delegate_to: localhost + +- name: create a template from a virtual machine's root volume snapshot + cs_template: + name: Debian 9 (64-bit) Snapshot ROOT-233_2015061509114 + snapshot: ROOT-233_2015061509114 + os_type: Debian GNU/Linux 9 (64-bit) + zone: tokio-ix + password_enabled: yes + is_public: yes + delegate_to: localhost + +- name: Remove a template + cs_template: + name: systemvm-4.2 + cross_zones: yes + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the template or extracted object. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: Name of the template or extracted object. + returned: success + type: str + sample: Debian 7 64-bit +display_text: + description: Display text of the template. + returned: if available + type: str + sample: Debian 7.7 64-bit minimal 2015-03-19 +checksum: + description: MD5 checksum of the template. + returned: if available + type: str + sample: 0b31bccccb048d20b551f70830bb7ad0 +status: + description: Status of the template or extracted object. + returned: success + type: str + sample: Download Complete +is_ready: + description: True if the template is ready to be deployed from. + returned: if available + type: bool + sample: true +is_public: + description: True if the template is public. + returned: if available + type: bool + sample: true +is_featured: + description: True if the template is featured. + returned: if available + type: bool + sample: true +is_extractable: + description: True if the template is extractable. + returned: if available + type: bool + sample: true +format: + description: Format of the template. + returned: if available + type: str + sample: OVA +os_type: + description: Type of the OS. + returned: if available + type: str + sample: CentOS 6.5 (64-bit) +password_enabled: + description: True if the reset password feature is enabled, false otherwise. + returned: if available + type: bool + sample: false +sshkey_enabled: + description: true if template is sshkey enabled, false otherwise. + returned: if available + type: bool + sample: false +cross_zones: + description: true if the template is managed across all zones, false otherwise. + returned: if available + type: bool + sample: false +template_type: + description: Type of the template. + returned: if available + type: str + sample: USER +created: + description: Date of registering. + returned: success + type: str + sample: 2015-03-29T14:57:06+0200 +template_tag: + description: Template tag related to this template. + returned: if available + type: str + sample: special +hypervisor: + description: Hypervisor related to this template. + returned: if available + type: str + sample: VMware +mode: + description: Mode of extraction + returned: on state=extracted + type: str + sample: http_download +state: + description: State of the extracted template + returned: on state=extracted + type: str + sample: DOWNLOAD_URL_CREATED +url: + description: Url to which the template is extracted to + returned: on state=extracted + type: str + sample: "http://1.2.3.4/userdata/eb307f13-4aca-45e8-b157-a414a14e6b04.ova" +tags: + description: List of resource tags associated with the template. + returned: if available + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +zone: + description: Name of zone the template is registered in. + returned: success + type: str + sample: zuerich +domain: + description: Domain the template is related to. + returned: success + type: str + sample: example domain +account: + description: Account the template is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the template is related to. + returned: success + type: str + sample: Production +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackTemplate(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackTemplate, self).__init__(module) + self.returns = { + 'checksum': 'checksum', + 'status': 'status', + 'isready': 'is_ready', + 'templatetag': 'template_tag', + 'sshkeyenabled': 'sshkey_enabled', + 'passwordenabled': 'password_enabled', + 'templatetype': 'template_type', + 'ostypename': 'os_type', + 'crossZones': 'cross_zones', + 'format': 'format', + 'hypervisor': 'hypervisor', + 'url': 'url', + 'extractMode': 'mode', + 'state': 'state', + } + + def _get_args(self): + args = { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'bits': self.module.params.get('bits'), + 'isdynamicallyscalable': self.module.params.get('is_dynamically_scalable'), + 'isextractable': self.module.params.get('is_extractable'), + 'isfeatured': self.module.params.get('is_featured'), + 'ispublic': self.module.params.get('is_public'), + 'passwordenabled': self.module.params.get('password_enabled'), + 'requireshvm': self.module.params.get('requires_hvm'), + 'templatetag': self.module.params.get('template_tag'), + 'ostypeid': self.get_os_type(key='id'), + } + + if not args['ostypeid']: + self.module.fail_json(msg="Missing required arguments: os_type") + + return args + + def get_root_volume(self, key=None): + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'virtualmachineid': self.get_vm(key='id'), + 'type': "ROOT" + } + volumes = self.query_api('listVolumes', **args) + if volumes: + return self._get_by_key(key, volumes['volume'][0]) + self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name')) + + def get_snapshot(self, key=None): + snapshot = self.module.params.get('snapshot') + if not snapshot: + return None + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'volumeid': self.get_root_volume('id'), + 'fetch_list': True, + } + snapshots = self.query_api('listSnapshots', **args) + if snapshots: + for s in snapshots: + if snapshot in [s['name'], s['id']]: + return self._get_by_key(key, s) + self.module.fail_json(msg="Snapshot '%s' not found" % snapshot) + + def present_template(self): + template = self.get_template() + if template: + template = self.update_template(template) + elif self.module.params.get('url'): + template = self.register_template() + elif self.module.params.get('vm'): + template = self.create_template() + else: + self.fail_json(msg="one of the following is required on state=present: url, vm") + return template + + def create_template(self): + template = None + self.result['changed'] = True + + args = self._get_args() + snapshot_id = self.get_snapshot(key='id') + if snapshot_id: + args['snapshotid'] = snapshot_id + else: + args['volumeid'] = self.get_root_volume('id') + + if not self.module.check_mode: + template = self.query_api('createTemplate', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + template = self.poll_job(template, 'template') + + if template: + template = self.ensure_tags(resource=template, resource_type='Template') + + return template + + def register_template(self): + required_params = [ + 'format', + 'url', + 'hypervisor', + ] + self.module.fail_on_missing_params(required_params=required_params) + template = None + self.result['changed'] = True + args = self._get_args() + args.update({ + 'url': self.module.params.get('url'), + 'format': self.module.params.get('format'), + 'checksum': self.module.params.get('checksum'), + 'isextractable': self.module.params.get('is_extractable'), + 'isrouting': self.module.params.get('is_routing'), + 'sshkeyenabled': self.module.params.get('sshkey_enabled'), + 'hypervisor': self.get_hypervisor(), + 'domainid': self.get_domain(key='id'), + 'account': self.get_account(key='name'), + 'projectid': self.get_project(key='id'), + }) + + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + else: + args['zoneid'] = -1 + + if not self.module.check_mode: + self.query_api('registerTemplate', **args) + template = self.get_template() + return template + + def update_template(self, template): + args = { + 'id': template['id'], + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'format': self.module.params.get('format'), + 'isdynamicallyscalable': self.module.params.get('is_dynamically_scalable'), + 'isrouting': self.module.params.get('is_routing'), + 'ostypeid': self.get_os_type(key='id'), + 'passwordenabled': self.module.params.get('password_enabled'), + } + if self.has_changed(args, template): + self.result['changed'] = True + if not self.module.check_mode: + self.query_api('updateTemplate', **args) + template = self.get_template() + + args = { + 'id': template['id'], + 'isextractable': self.module.params.get('is_extractable'), + 'isfeatured': self.module.params.get('is_featured'), + 'ispublic': self.module.params.get('is_public'), + } + if self.has_changed(args, template): + self.result['changed'] = True + if not self.module.check_mode: + self.query_api('updateTemplatePermissions', **args) + # Refresh + template = self.get_template() + + if template: + template = self.ensure_tags(resource=template, resource_type='Template') + + return template + + def _is_find_option(self, param_name): + return param_name in self.module.params.get('template_find_options') + + def _find_option_match(self, template, param_name, internal_name=None): + if not internal_name: + internal_name = param_name + + if param_name in self.module.params.get('template_find_options'): + param_value = self.module.params.get(param_name) + + if not param_value: + self.fail_json(msg="The param template_find_options has %s but param was not provided." % param_name) + + if template[internal_name] == param_value: + return True + return False + + def get_template(self): + args = { + 'name': self.module.params.get('name'), + 'templatefilter': self.module.params.get('template_filter'), + 'domainid': self.get_domain(key='id'), + 'account': self.get_account(key='name'), + 'projectid': self.get_project(key='id') + } + + cross_zones = self.module.params.get('cross_zones') + if not cross_zones: + args['zoneid'] = self.get_zone(key='id') + + template_found = None + + templates = self.query_api('listTemplates', **args) + if templates: + for tmpl in templates['template']: + + if self._is_find_option('cross_zones') and not self._find_option_match( + template=tmpl, + param_name='cross_zones', + internal_name='crossZones'): + continue + + if self._is_find_option('checksum') and not self._find_option_match( + template=tmpl, + param_name='checksum'): + continue + + if self._is_find_option('display_text') and not self._find_option_match( + template=tmpl, + param_name='display_text', + internal_name='displaytext'): + continue + + if not template_found: + template_found = tmpl + # A cross zones template has one entry per zone but the same id + elif tmpl['id'] == template_found['id']: + continue + else: + self.fail_json(msg="Multiple templates found matching provided params. Please use template_find_options.") + + return template_found + + def extract_template(self): + template = self.get_template() + if not template: + self.module.fail_json(msg="Failed: template not found") + + args = { + 'id': template['id'], + 'url': self.module.params.get('url'), + 'mode': self.module.params.get('mode'), + 'zoneid': self.get_zone(key='id') + } + self.result['changed'] = True + + if not self.module.check_mode: + template = self.query_api('extractTemplate', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + template = self.poll_job(template, 'template') + return template + + def remove_template(self): + template = self.get_template() + if template: + self.result['changed'] = True + + args = { + 'id': template['id'] + } + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + + if not self.module.check_mode: + res = self.query_api('deleteTemplate', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self.poll_job(res, 'template') + return template + + def get_result(self, template): + super(AnsibleCloudStackTemplate, self).get_result(template) + if template: + if 'isextractable' in template: + self.result['is_extractable'] = True if template['isextractable'] else False + if 'isfeatured' in template: + self.result['is_featured'] = True if template['isfeatured'] else False + if 'ispublic' in template: + self.result['is_public'] = True if template['ispublic'] else False + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + display_text=dict(), + url=dict(), + vm=dict(), + snapshot=dict(), + os_type=dict(), + is_ready=dict(type='bool', removed_in_version='2.11'), + is_public=dict(type='bool'), + is_featured=dict(type='bool'), + is_dynamically_scalable=dict(type='bool'), + is_extractable=dict(type='bool'), + is_routing=dict(type='bool'), + checksum=dict(), + template_filter=dict(default='self', choices=['all', 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), + template_find_options=dict(type='list', choices=['display_text', 'checksum', 'cross_zones'], aliases=['template_find_option'], default=[]), + hypervisor=dict(), + requires_hvm=dict(type='bool'), + password_enabled=dict(type='bool'), + template_tag=dict(), + sshkey_enabled=dict(type='bool'), + format=dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA']), + details=dict(), + bits=dict(type='int', choices=[32, 64], default=64), + state=dict(choices=['present', 'absent', 'extracted'], default='present'), + cross_zones=dict(type='bool', default=False), + mode=dict(choices=['http_download', 'ftp_upload'], default='http_download'), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive=( + ['url', 'vm'], + ['zone', 'cross_zones'], + ), + supports_check_mode=True + ) + + acs_tpl = AnsibleCloudStackTemplate(module) + + state = module.params.get('state') + if state == 'absent': + tpl = acs_tpl.remove_template() + + elif state == 'extracted': + tpl = acs_tpl.extract_template() + else: + tpl = acs_tpl.present_template() + + result = acs_tpl.get_result(tpl) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_traffic_type.py b/plugins/modules/cloud/cloudstack/cs_traffic_type.py new file mode 100644 index 0000000000..ac6d565489 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_traffic_type.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2019, Patryk D. Cichy +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_traffic_type +short_description: Manages traffic types on CloudStack Physical Networks +description: + - Add, remove, update Traffic Types associated with CloudStack Physical Networks. +extends_documentation_fragment: +- community.general.cloudstack + +author: + - Patryk Cichy (@PatTheSilent) +options: + physical_network: + description: + - the name of the Physical Network + required: true + type: str + zone: + description: + - Name of the zone with the physical network. + - Default zone will be used if this is empty. + type: str + traffic_type: + description: + - the trafficType to be added to the physical network. + required: true + choices: [Management, Guest, Public, Storage] + type: str + state: + description: + - State of the traffic type + choices: [present, absent] + default: present + type: str + hyperv_networklabel: + description: + - The network name label of the physical device dedicated to this traffic on a HyperV host. + type: str + isolation_method: + description: + - Use if the physical network has multiple isolation types and traffic type is public. + choices: [vlan, vxlan] + type: str + kvm_networklabel: + description: + - The network name label of the physical device dedicated to this traffic on a KVM host. + type: str + ovm3_networklabel: + description: + - The network name of the physical device dedicated to this traffic on an OVM3 host. + type: str + vlan: + description: + - The VLAN id to be used for Management traffic by VMware host. + type: str + vmware_networklabel: + description: + - The network name label of the physical device dedicated to this traffic on a VMware host. + type: str + xen_networklabel: + description: + - The network name label of the physical device dedicated to this traffic on a XenServer host. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +''' + +EXAMPLES = ''' +- name: add a traffic type + cs_traffic_type: + physical_network: public-network + traffic_type: Guest + zone: test-zone + delegate_to: localhost + +- name: update traffic type + cs_traffic_type: + physical_network: public-network + traffic_type: Guest + kvm_networklabel: cloudbr0 + zone: test-zone + delegate_to: localhost + +- name: remove traffic type + cs_traffic_type: + physical_network: public-network + traffic_type: Public + state: absent + zone: test-zone + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: ID of the network provider + returned: success + type: str + sample: 659c1840-9374-440d-a412-55ca360c9d3c +traffic_type: + description: the trafficType that was added to the physical network + returned: success + type: str + sample: Public +hyperv_networklabel: + description: The network name label of the physical device dedicated to this traffic on a HyperV host + returned: success + type: str + sample: HyperV Internal Switch +kvm_networklabel: + description: The network name label of the physical device dedicated to this traffic on a KVM host + returned: success + type: str + sample: cloudbr0 +ovm3_networklabel: + description: The network name of the physical device dedicated to this traffic on an OVM3 host + returned: success + type: str + sample: cloudbr0 +physical_network: + description: the physical network this belongs to + returned: success + type: str + sample: 28ed70b7-9a1f-41bf-94c3-53a9f22da8b6 +vmware_networklabel: + description: The network name label of the physical device dedicated to this traffic on a VMware host + returned: success + type: str + sample: Management Network +xen_networklabel: + description: The network name label of the physical device dedicated to this traffic on a XenServer host + returned: success + type: str + sample: xenbr0 +zone: + description: Name of zone the physical network is in. + returned: success + type: str + sample: ch-gva-2 +''' + +from ansible_collections.community.general.plugins.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together +from ansible.module_utils.basic import AnsibleModule + + +class AnsibleCloudStackTrafficType(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackTrafficType, self).__init__(module) + self.returns = { + 'traffictype': 'traffic_type', + 'hypervnetworklabel': 'hyperv_networklabel', + 'kvmnetworklabel': 'kvm_networklabel', + 'ovm3networklabel': 'ovm3_networklabel', + 'physicalnetworkid': 'physical_network', + 'vmwarenetworklabel': 'vmware_networklabel', + 'xennetworklabel': 'xen_networklabel' + } + + self.traffic_type = None + + def _get_label_args(self): + label_args = dict() + if self.module.params.get('hyperv_networklabel'): + label_args.update(dict(hypervnetworklabel=self.module.params.get('hyperv_networklabel'))) + if self.module.params.get('kvm_networklabel'): + label_args.update(dict(kvmnetworklabel=self.module.params.get('kvm_networklabel'))) + if self.module.params.get('ovm3_networklabel'): + label_args.update(dict(ovm3networklabel=self.module.params.get('ovm3_networklabel'))) + if self.module.params.get('vmware_networklabel'): + label_args.update(dict(vmwarenetworklabel=self.module.params.get('vmware_networklabel'))) + return label_args + + def _get_additional_args(self): + additional_args = dict() + + if self.module.params.get('isolation_method'): + additional_args.update(dict(isolationmethod=self.module.params.get('isolation_method'))) + + if self.module.params.get('vlan'): + additional_args.update(dict(vlan=self.module.params.get('vlan'))) + + additional_args.update(self._get_label_args()) + + return additional_args + + def get_traffic_types(self): + args = { + 'physicalnetworkid': self.get_physical_network(key='id') + } + traffic_types = self.query_api('listTrafficTypes', **args) + return traffic_types + + def get_traffic_type(self): + if self.traffic_type: + return self.traffic_type + + traffic_type = self.module.params.get('traffic_type') + + traffic_types = self.get_traffic_types() + + if traffic_types: + for t_type in traffic_types['traffictype']: + if traffic_type.lower() in [t_type['traffictype'].lower(), t_type['id']]: + self.traffic_type = t_type + break + return self.traffic_type + + def present_traffic_type(self): + traffic_type = self.get_traffic_type() + if traffic_type: + self.traffic_type = self.update_traffic_type() + else: + self.result['changed'] = True + self.traffic_type = self.add_traffic_type() + + return self.traffic_type + + def add_traffic_type(self): + traffic_type = self.module.params.get('traffic_type') + args = { + 'physicalnetworkid': self.get_physical_network(key='id'), + 'traffictype': traffic_type + } + args.update(self._get_additional_args()) + if not self.module.check_mode: + resource = self.query_api('addTrafficType', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.traffic_type = self.poll_job(resource, 'traffictype') + return self.traffic_type + + def absent_traffic_type(self): + traffic_type = self.get_traffic_type() + if traffic_type: + + args = { + 'id': traffic_type['id'] + } + self.result['changed'] = True + if not self.module.check_mode: + resource = self.query_api('deleteTrafficType', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(resource, 'traffictype') + + return traffic_type + + def update_traffic_type(self): + + traffic_type = self.get_traffic_type() + args = { + 'id': traffic_type['id'] + } + args.update(self._get_label_args()) + if self.has_changed(args, traffic_type): + self.result['changed'] = True + if not self.module.check_mode: + resource = self.query_api('updateTrafficType', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.traffic_type = self.poll_job(resource, 'traffictype') + + return self.traffic_type + + +def setup_module_object(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + physical_network=dict(required=True), + zone=dict(), + state=dict(choices=['present', 'absent'], default='present'), + traffic_type=dict(required=True, choices=['Management', 'Guest', 'Public', 'Storage']), + hyperv_networklabel=dict(), + isolation_method=dict(choices=['vlan', 'vxlan']), + kvm_networklabel=dict(), + ovm3_networklabel=dict(), + vlan=dict(), + vmware_networklabel=dict(), + xen_networklabel=dict(), + poll_async=dict(type='bool', default=True) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + return module + + +def execute_module(module): + actt = AnsibleCloudStackTrafficType(module) + state = module.params.get('state') + + if state in ['present']: + result = actt.present_traffic_type() + else: + result = actt.absent_traffic_type() + + return actt.get_result(result) + + +def main(): + module = setup_module_object() + result = execute_module(module) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_user.py b/plugins/modules/cloud/cloudstack/cs_user.py new file mode 100644 index 0000000000..abbf2788d3 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_user.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_user +short_description: Manages users on Apache CloudStack based clouds. +description: + - Create, update, disable, lock, enable and remove users. +author: René Moser (@resmo) +options: + username: + description: + - Username of the user. + type: str + required: true + account: + description: + - Account the user will be created under. + - Required on I(state=present). + type: str + password: + description: + - Password of the user to be created. + - Required on I(state=present). + - Only considered on creation and will not be updated if user exists. + type: str + first_name: + description: + - First name of the user. + - Required on I(state=present). + type: str + last_name: + description: + - Last name of the user. + - Required on I(state=present). + type: str + email: + description: + - Email of the user. + - Required on I(state=present). + type: str + timezone: + description: + - Timezone of the user. + type: str + keys_registered: + description: + - If API keys of the user should be generated. + - "Note: Keys can not be removed by the API again." + type: bool + default: no + domain: + description: + - Domain the user is related to. + type: str + default: ROOT + state: + description: + - State of the user. + - C(unlocked) is an alias for C(enabled). + type: str + default: present + choices: [ present, absent, enabled, disabled, locked, unlocked ] + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create an user in domain 'CUSTOMERS' + cs_user: + account: developers + username: johndoe + password: S3Cur3 + last_name: Doe + first_name: John + email: john.doe@example.com + domain: CUSTOMERS + delegate_to: localhost + +- name: Lock an existing user in domain 'CUSTOMERS' + cs_user: + username: johndoe + domain: CUSTOMERS + state: locked + delegate_to: localhost + +- name: Disable an existing user in domain 'CUSTOMERS' + cs_user: + username: johndoe + domain: CUSTOMERS + state: disabled + delegate_to: localhost + +- name: Enable/unlock an existing user in domain 'CUSTOMERS' + cs_user: + username: johndoe + domain: CUSTOMERS + state: enabled + delegate_to: localhost + +- name: Remove an user in domain 'CUSTOMERS' + cs_user: + name: customer_xy + domain: CUSTOMERS + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the user. + returned: success + type: str + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +username: + description: Username of the user. + returned: success + type: str + sample: johndoe +fist_name: + description: First name of the user. + returned: success + type: str + sample: John +last_name: + description: Last name of the user. + returned: success + type: str + sample: Doe +email: + description: Emailof the user. + returned: success + type: str + sample: john.doe@example.com +user_api_key: + description: API key of the user. + returned: success + type: str + sample: JLhcg8VWi8DoFqL2sSLZMXmGojcLnFrOBTipvBHJjySODcV4mCOo29W2duzPv5cALaZnXj5QxDx3xQfaQt3DKg +user_api_secret: + description: API secret of the user. + returned: success + type: str + sample: FUELo3LB9fa1UopjTLPdqLv_6OXQMJZv9g9N4B_Ao3HFz8d6IGFCV9MbPFNM8mwz00wbMevja1DoUNDvI8C9-g +account: + description: Account name of the user. + returned: success + type: str + sample: developers +account_type: + description: Type of the account. + returned: success + type: str + sample: user +timezone: + description: Timezone of the user. + returned: success + type: str + sample: enabled +created: + description: Date the user was created. + returned: success + type: str + sample: Doe +state: + description: State of the user. + returned: success + type: str + sample: enabled +domain: + description: Domain the user is related. + returned: success + type: str + sample: ROOT +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackUser(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackUser, self).__init__(module) + self.returns = { + 'username': 'username', + 'firstname': 'first_name', + 'lastname': 'last_name', + 'email': 'email', + 'secretkey': 'user_api_secret', + 'apikey': 'user_api_key', + 'timezone': 'timezone', + } + self.account_types = { + 'user': 0, + 'root_admin': 1, + 'domain_admin': 2, + } + self.user = None + + def get_account_type(self): + account_type = self.module.params.get('account_type') + return self.account_types[account_type] + + def get_user(self): + if not self.user: + args = { + 'domainid': self.get_domain('id'), + 'fetch_list': True, + } + + users = self.query_api('listUsers', **args) + + if users: + user_name = self.module.params.get('username') + for u in users: + if user_name.lower() == u['username'].lower(): + self.user = u + break + return self.user + + def enable_user(self): + user = self.get_user() + if not user: + user = self.present_user() + + if user['state'].lower() != 'enabled': + self.result['changed'] = True + args = { + 'id': user['id'], + } + if not self.module.check_mode: + res = self.query_api('enableUser', **args) + user = res['user'] + return user + + def lock_user(self): + user = self.get_user() + if not user: + user = self.present_user() + + # we need to enable the user to lock it. + if user['state'].lower() == 'disabled': + user = self.enable_user() + + if user['state'].lower() != 'locked': + self.result['changed'] = True + + args = { + 'id': user['id'], + } + + if not self.module.check_mode: + res = self.query_api('lockUser', **args) + user = res['user'] + + return user + + def disable_user(self): + user = self.get_user() + if not user: + user = self.present_user() + + if user['state'].lower() != 'disabled': + self.result['changed'] = True + args = { + 'id': user['id'], + } + if not self.module.check_mode: + user = self.query_api('disableUser', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + user = self.poll_job(user, 'user') + return user + + def present_user(self): + required_params = [ + 'account', + 'email', + 'password', + 'first_name', + 'last_name', + ] + self.module.fail_on_missing_params(required_params=required_params) + + user = self.get_user() + if user: + user = self._update_user(user) + else: + user = self._create_user(user) + return user + + def _get_common_args(self): + return { + 'firstname': self.module.params.get('first_name'), + 'lastname': self.module.params.get('last_name'), + 'email': self.module.params.get('email'), + 'timezone': self.module.params.get('timezone'), + } + + def _create_user(self, user): + self.result['changed'] = True + + args = self._get_common_args() + args.update({ + 'account': self.get_account(key='name'), + 'domainid': self.get_domain('id'), + 'username': self.module.params.get('username'), + 'password': self.module.params.get('password'), + }) + + if not self.module.check_mode: + res = self.query_api('createUser', **args) + user = res['user'] + + # register user api keys + if self.module.params.get('keys_registered'): + res = self.query_api('registerUserKeys', id=user['id']) + user.update(res['userkeys']) + + return user + + def _update_user(self, user): + args = self._get_common_args() + args.update({ + 'id': user['id'], + }) + + if self.has_changed(args, user): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updateUser', **args) + + user = res['user'] + + # register user api keys + if 'apikey' not in user and self.module.params.get('keys_registered'): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('registerUserKeys', id=user['id']) + user.update(res['userkeys']) + return user + + def absent_user(self): + user = self.get_user() + if user: + self.result['changed'] = True + + if not self.module.check_mode: + self.query_api('deleteUser', id=user['id']) + + return user + + def get_result(self, user): + super(AnsibleCloudStackUser, self).get_result(user) + if user: + if 'accounttype' in user: + for key, value in self.account_types.items(): + if value == user['accounttype']: + self.result['account_type'] = key + break + + # secretkey has been removed since CloudStack 4.10 from listUsers API + if self.module.params.get('keys_registered') and 'apikey' in user and 'secretkey' not in user: + user_keys = self.query_api('getUserKeys', id=user['id']) + if user_keys: + self.result['user_api_secret'] = user_keys['userkeys'].get('secretkey') + + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + username=dict(required=True), + account=dict(), + state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'), + domain=dict(default='ROOT'), + email=dict(), + first_name=dict(), + last_name=dict(), + password=dict(no_log=True), + timezone=dict(), + keys_registered=dict(type='bool', default=False), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_acc = AnsibleCloudStackUser(module) + + state = module.params.get('state') + + if state == 'absent': + user = acs_acc.absent_user() + + elif state in ['enabled', 'unlocked']: + user = acs_acc.enable_user() + + elif state == 'disabled': + user = acs_acc.disable_user() + + elif state == 'locked': + user = acs_acc.lock_user() + + else: + user = acs_acc.present_user() + + result = acs_acc.get_result(user) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_vlan_ip_range.py b/plugins/modules/cloud/cloudstack/cs_vlan_ip_range.py new file mode 100644 index 0000000000..cd672e6b24 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_vlan_ip_range.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, David Passante <@dpassante> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_vlan_ip_range +short_description: Manages VLAN IP ranges on Apache CloudStack based clouds. +description: + - Create and delete VLAN IP range. +author: David Passante (@dpassante) +options: + network: + description: + - The network name or id. + - Required if I(for_virtual_network) and I(physical_network) are not set. + type: str + physical_network: + description: + - The physical network name or id. + type: str + start_ip: + description: + - The beginning IPv4 address in the VLAN IP range. + - Only considered on create. + type: str + required: true + end_ip: + description: + - The ending IPv4 address in the VLAN IP range. + - If not specified, value of I(start_ip) is used. + - Only considered on create. + type: str + gateway: + description: + - The gateway of the VLAN IP range. + - Required if I(state=present). + type: str + netmask: + description: + - The netmask of the VLAN IP range. + - Required if I(state=present). + type: str + start_ipv6: + description: + - The beginning IPv6 address in the IPv6 network range. + - Only considered on create. + type: str + end_ipv6: + description: + - The ending IPv6 address in the IPv6 network range. + - If not specified, value of I(start_ipv6) is used. + - Only considered on create. + type: str + gateway_ipv6: + description: + - The gateway of the IPv6 network. + - Only considered on create. + type: str + cidr_ipv6: + description: + - The CIDR of IPv6 network, must be at least /64. + type: str + vlan: + description: + - The ID or VID of the network. + - If not specified, will be defaulted to the vlan of the network. + type: str + state: + description: + - State of the network ip range. + type: str + default: present + choices: [ present, absent ] + zone: + description: + - The Zone ID of the VLAN IP range. + - If not set, default zone is used. + type: str + domain: + description: + - Domain of the account owning the VLAN. + type: str + account: + description: + - Account who owns the VLAN. + - Mutually exclusive with I(project). + type: str + project: + description: + - Project who owns the VLAN. + - Mutually exclusive with I(account). + type: str + for_virtual_network: + description: + - C(yes) if VLAN is of Virtual type, C(no) if Direct. + - If set to C(yes) but neither I(physical_network) or I(network) is set CloudStack will try to add the + VLAN range to the Physical Network with a Public traffic type. + type: bool + default: no +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create a VLAN IP range for network test + cs_vlan_ip_range: + network: test + vlan: 98 + start_ip: 10.2.4.10 + end_ip: 10.2.4.100 + gateway: 10.2.4.1 + netmask: 255.255.255.0 + zone: zone-02 + delegate_to: localhost + +- name: remove a VLAN IP range for network test + cs_vlan_ip_range: + state: absent + network: test + start_ip: 10.2.4.10 + end_ip: 10.2.4.100 + zone: zone-02 + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the VLAN IP range. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +network: + description: The network of vlan range + returned: if available + type: str + sample: test +vlan: + description: The ID or VID of the VLAN. + returned: success + type: str + sample: vlan://98 +gateway: + description: IPv4 gateway. + returned: success + type: str + sample: 10.2.4.1 +netmask: + description: IPv4 netmask. + returned: success + type: str + sample: 255.255.255.0 +gateway_ipv6: + description: IPv6 gateway. + returned: if available + type: str + sample: 2001:db8::1 +cidr_ipv6: + description: The CIDR of IPv6 network. + returned: if available + type: str + sample: 2001:db8::/64 +zone: + description: Name of zone. + returned: success + type: str + sample: zone-02 +domain: + description: Domain name of the VLAN IP range. + returned: success + type: str + sample: ROOT +account: + description: Account who owns the network. + returned: if available + type: str + sample: example account +project: + description: Project who owns the network. + returned: if available + type: str + sample: example project +for_systemvms: + description: Whether VLAN IP range is dedicated to system vms or not. + returned: success + type: bool + sample: false +for_virtual_network: + description: Whether VLAN IP range is of Virtual type or not. + returned: success + type: bool + sample: false +physical_network: + description: The physical network VLAN IP range belongs to. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +start_ip: + description: The start ip of the VLAN IP range. + returned: success + type: str + sample: 10.2.4.10 +end_ip: + description: The end ip of the VLAN IP range. + returned: success + type: str + sample: 10.2.4.100 +start_ipv6: + description: The start ipv6 of the VLAN IP range. + returned: if available + type: str + sample: 2001:db8::10 +end_ipv6: + description: The end ipv6 of the VLAN IP range. + returned: if available + type: str + sample: 2001:db8::50 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackVlanIpRange(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVlanIpRange, self).__init__(module) + self.returns = { + 'startip': 'start_ip', + 'endip': 'end_ip', + 'physicalnetworkid': 'physical_network', + 'vlan': 'vlan', + 'forsystemvms': 'for_systemvms', + 'forvirtualnetwork': 'for_virtual_network', + 'gateway': 'gateway', + 'netmask': 'netmask', + 'ip6gateway': 'gateway_ipv6', + 'ip6cidr': 'cidr_ipv6', + 'startipv6': 'start_ipv6', + 'endipv6': 'end_ipv6', + } + self.ip_range = None + + def get_vlan_ip_range(self): + if not self.ip_range: + args = { + 'zoneid': self.get_zone(key='id'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'networkid': self.get_network(key='id'), + } + + res = self.query_api('listVlanIpRanges', **args) + if res: + ip_range_list = res['vlaniprange'] + + params = { + 'startip': self.module.params.get('start_ip'), + 'endip': self.get_or_fallback('end_ip', 'start_ip'), + } + + for ipr in ip_range_list: + if params['startip'] == ipr['startip'] and params['endip'] == ipr['endip']: + self.ip_range = ipr + break + + return self.ip_range + + def present_vlan_ip_range(self): + ip_range = self.get_vlan_ip_range() + + if not ip_range: + ip_range = self.create_vlan_ip_range() + + return ip_range + + def create_vlan_ip_range(self): + self.result['changed'] = True + + vlan = self.module.params.get('vlan') + + args = { + 'zoneid': self.get_zone(key='id'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'startip': self.module.params.get('start_ip'), + 'endip': self.get_or_fallback('end_ip', 'start_ip'), + 'netmask': self.module.params.get('netmask'), + 'gateway': self.module.params.get('gateway'), + 'startipv6': self.module.params.get('start_ipv6'), + 'endipv6': self.get_or_fallback('end_ipv6', 'start_ipv6'), + 'ip6gateway': self.module.params.get('gateway_ipv6'), + 'ip6cidr': self.module.params.get('cidr_ipv6'), + 'vlan': self.get_network(key='vlan') if not vlan else vlan, + 'networkid': self.get_network(key='id'), + 'forvirtualnetwork': self.module.params.get('for_virtual_network'), + } + if self.module.params.get('physical_network'): + args['physicalnetworkid'] = self.get_physical_network(key='id') + + if not self.module.check_mode: + res = self.query_api('createVlanIpRange', **args) + + self.ip_range = res['vlan'] + + return self.ip_range + + def absent_vlan_ip_range(self): + ip_range = self.get_vlan_ip_range() + + if ip_range: + self.result['changed'] = True + + args = { + 'id': ip_range['id'], + } + + if not self.module.check_mode: + self.query_api('deleteVlanIpRange', **args) + + return ip_range + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + network=dict(type='str'), + physical_network=dict(type='str'), + zone=dict(type='str'), + start_ip=dict(type='str', required=True), + end_ip=dict(type='str'), + gateway=dict(type='str'), + netmask=dict(type='str'), + start_ipv6=dict(type='str'), + end_ipv6=dict(type='str'), + gateway_ipv6=dict(type='str'), + cidr_ipv6=dict(type='str'), + vlan=dict(type='str'), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(type='str'), + account=dict(type='str'), + project=dict(type='str'), + for_virtual_network=dict(type='bool', default=False), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive=( + ['account', 'project'], + ), + required_if=(("state", "present", ("gateway", "netmask")),), + supports_check_mode=True, + ) + + acs_vlan_ip_range = AnsibleCloudStackVlanIpRange(module) + + state = module.params.get('state') + if state == 'absent': + ipr = acs_vlan_ip_range.absent_vlan_ip_range() + + else: + ipr = acs_vlan_ip_range.present_vlan_ip_range() + + result = acs_vlan_ip_range.get_result(ipr) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_vmsnapshot.py b/plugins/modules/cloud/cloudstack/cs_vmsnapshot.py new file mode 100644 index 0000000000..11d3634e4c --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_vmsnapshot.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_vmsnapshot +short_description: Manages VM snapshots on Apache CloudStack based clouds. +description: + - Create, remove and revert VM from snapshots. +author: René Moser (@resmo) +options: + name: + description: + - Unique Name of the snapshot. In CloudStack terms display name. + type: str + required: true + aliases: [ display_name ] + vm: + description: + - Name of the virtual machine. + type: str + required: true + description: + description: + - Description of the snapshot. + type: str + snapshot_memory: + description: + - Snapshot memory if set to true. + default: no + type: bool + zone: + description: + - Name of the zone in which the VM is in. If not set, default zone is used. + type: str + project: + description: + - Name of the project the VM is assigned to. + type: str + state: + description: + - State of the snapshot. + type: str + default: present + choices: [ present, absent, revert ] + domain: + description: + - Domain the VM snapshot is related to. + type: str + account: + description: + - Account the VM snapshot is related to. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a VM snapshot of disk and memory before an upgrade + cs_vmsnapshot: + name: Snapshot before upgrade + vm: web-01 + snapshot_memory: yes + delegate_to: localhost + +- name: Revert a VM to a snapshot after a failed upgrade + cs_vmsnapshot: + name: Snapshot before upgrade + vm: web-01 + state: revert + delegate_to: localhost + +- name: Remove a VM snapshot after successful upgrade + cs_vmsnapshot: + name: Snapshot before upgrade + vm: web-01 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the snapshot. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: Name of the snapshot. + returned: success + type: str + sample: snapshot before update +display_name: + description: Display name of the snapshot. + returned: success + type: str + sample: snapshot before update +created: + description: date of the snapshot. + returned: success + type: str + sample: 2015-03-29T14:57:06+0200 +current: + description: true if the snapshot is current + returned: success + type: bool + sample: True +state: + description: state of the vm snapshot + returned: success + type: str + sample: Allocated +type: + description: type of vm snapshot + returned: success + type: str + sample: DiskAndMemory +description: + description: description of vm snapshot + returned: success + type: str + sample: snapshot brought to you by Ansible +domain: + description: Domain the vm snapshot is related to. + returned: success + type: str + sample: example domain +account: + description: Account the vm snapshot is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the vm snapshot is related to. + returned: success + type: str + sample: Production +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVmSnapshot, self).__init__(module) + self.returns = { + 'type': 'type', + 'current': 'current', + } + + def get_snapshot(self): + args = { + 'virtualmachineid': self.get_vm('id'), + 'account': self.get_account('name'), + 'domainid': self.get_domain('id'), + 'projectid': self.get_project('id'), + 'name': self.module.params.get('name'), + } + snapshots = self.query_api('listVMSnapshot', **args) + if snapshots: + return snapshots['vmSnapshot'][0] + return None + + def create_snapshot(self): + snapshot = self.get_snapshot() + if not snapshot: + self.result['changed'] = True + + args = { + 'virtualmachineid': self.get_vm('id'), + 'name': self.module.params.get('name'), + 'description': self.module.params.get('description'), + 'snapshotmemory': self.module.params.get('snapshot_memory'), + } + if not self.module.check_mode: + res = self.query_api('createVMSnapshot', **args) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + snapshot = self.poll_job(res, 'vmsnapshot') + + if snapshot: + snapshot = self.ensure_tags(resource=snapshot, resource_type='Snapshot') + + return snapshot + + def remove_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('deleteVMSnapshot', vmsnapshotid=snapshot['id']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self.poll_job(res, 'vmsnapshot') + return snapshot + + def revert_vm_to_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + + if snapshot['state'] != "Ready": + self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state']) + + if not self.module.check_mode: + res = self.query_api('revertToVMSnapshot', vmsnapshotid=snapshot['id']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self.poll_job(res, 'vmsnapshot') + return snapshot + + self.module.fail_json(msg="snapshot not found, could not revert VM") + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['display_name']), + vm=dict(required=True), + description=dict(), + zone=dict(), + snapshot_memory=dict(type='bool', default=False), + state=dict(choices=['present', 'absent', 'revert'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module) + + state = module.params.get('state') + if state in ['revert']: + snapshot = acs_vmsnapshot.revert_vm_to_snapshot() + elif state in ['absent']: + snapshot = acs_vmsnapshot.remove_snapshot() + else: + snapshot = acs_vmsnapshot.create_snapshot() + + result = acs_vmsnapshot.get_result(snapshot) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_volume.py b/plugins/modules/cloud/cloudstack/cs_volume.py new file mode 100644 index 0000000000..c280155f5d --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_volume.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Jefferson Girão +# (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_volume +short_description: Manages volumes on Apache CloudStack based clouds. +description: + - Create, destroy, attach, detach, extract or upload volumes. +author: + - Jefferson Girão (@jeffersongirao) + - René Moser (@resmo) +options: + name: + description: + - Name of the volume. + - I(name) can only contain ASCII letters. + type: str + required: true + account: + description: + - Account the volume is related to. + type: str + device_id: + description: + - ID of the device on a VM the volume is attached to. + - Only considered if I(state) is C(attached). + type: int + custom_id: + description: + - Custom id to the resource. + - Allowed to Root Admins only. + type: str + disk_offering: + description: + - Name of the disk offering to be used. + - Required one of I(disk_offering), I(snapshot) if volume is not already I(state=present). + type: str + display_volume: + description: + - Whether to display the volume to the end user or not. + - Allowed to Root Admins only. + type: bool + domain: + description: + - Name of the domain the volume to be deployed in. + type: str + max_iops: + description: + - Max iops + type: int + min_iops: + description: + - Min iops + type: int + project: + description: + - Name of the project the volume to be deployed in. + type: str + size: + description: + - Size of disk in GB + type: int + snapshot: + description: + - The snapshot name for the disk volume. + - Required one of I(disk_offering), I(snapshot) if volume is not already I(state=present). + type: str + force: + description: + - Force removal of volume even it is attached to a VM. + - Considered on I(state=absent) only. + default: no + type: bool + shrink_ok: + description: + - Whether to allow to shrink the volume. + default: no + type: bool + vm: + description: + - Name of the virtual machine to attach the volume to. + type: str + zone: + description: + - Name of the zone in which the volume should be deployed. + - If not set, default zone is used. + type: str + state: + description: + - State of the volume. + - The choices C(extracted) and C(uploaded) were added in version 2.8. + type: str + default: present + choices: [ present, absent, attached, detached, extracted, uploaded ] + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "To delete all tags, set a empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] + url: + description: + - URL to which the volume would be extracted on I(state=extracted) + - or the URL where to download the volume on I(state=uploaded). + - Only considered if I(state) is C(extracted) or C(uploaded). + type: str + mode: + description: + - Mode for the volume extraction. + - Only considered if I(state=extracted). + type: str + choices: [ http_download, ftp_upload ] + default: http_download + format: + description: + - The format for the volume. + - Only considered if I(state=uploaded). + type: str + choices: [ QCOW2, RAW, VHD, VHDX, OVA ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: create volume within project and zone with specified storage options + cs_volume: + name: web-vm-1-volume + project: Integration + zone: ch-zrh-ix-01 + disk_offering: PerfPlus Storage + size: 20 + delegate_to: localhost + +- name: create/attach volume to instance + cs_volume: + name: web-vm-1-volume + disk_offering: PerfPlus Storage + size: 20 + vm: web-vm-1 + state: attached + delegate_to: localhost + +- name: detach volume + cs_volume: + name: web-vm-1-volume + state: detached + delegate_to: localhost + +- name: remove volume + cs_volume: + name: web-vm-1-volume + state: absent + delegate_to: localhost + +# New in version 2.8 +- name: Extract DATA volume to make it downloadable + cs_volume: + state: extracted + name: web-vm-1-volume + register: data_vol_out + delegate_to: localhost + +- name: Create new volume by downloading source volume + cs_volume: + state: uploaded + name: web-vm-1-volume-2 + format: VHD + url: "{{ data_vol_out.url }}" + delegate_to: localhost +''' + +RETURN = ''' +id: + description: ID of the volume. + returned: success + type: str + sample: +name: + description: Name of the volume. + returned: success + type: str + sample: web-volume-01 +display_name: + description: Display name of the volume. + returned: success + type: str + sample: web-volume-01 +group: + description: Group the volume belongs to + returned: success + type: str + sample: web +domain: + description: Domain the volume belongs to + returned: success + type: str + sample: example domain +project: + description: Project the volume belongs to + returned: success + type: str + sample: Production +zone: + description: Name of zone the volume is in. + returned: success + type: str + sample: ch-gva-2 +created: + description: Date of the volume was created. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 +attached: + description: Date of the volume was attached. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 +type: + description: Disk volume type. + returned: success + type: str + sample: DATADISK +size: + description: Size of disk volume. + returned: success + type: int + sample: 20 +vm: + description: Name of the vm the volume is attached to (not returned when detached) + returned: success + type: str + sample: web-01 +state: + description: State of the volume + returned: success + type: str + sample: Attached +device_id: + description: Id of the device on user vm the volume is attached to (not returned when detached) + returned: success + type: int + sample: 1 +url: + description: The url of the uploaded volume or the download url depending extraction mode. + returned: success when I(state=extracted) + type: str + sample: http://1.12.3.4/userdata/387e2c7c-7c42-4ecc-b4ed-84e8367a1965.vhd + version_added: '2.8' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_required_together, + cs_argument_spec +) + + +class AnsibleCloudStackVolume(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVolume, self).__init__(module) + self.returns = { + 'group': 'group', + 'attached': 'attached', + 'vmname': 'vm', + 'deviceid': 'device_id', + 'type': 'type', + 'size': 'size', + 'url': 'url', + } + self.volume = None + + def get_volume(self): + if not self.volume: + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'displayvolume': self.module.params.get('display_volume'), + 'type': 'DATADISK', + 'fetch_list': True, + } + # Do not filter on DATADISK when state=extracted + if self.module.params.get('state') == 'extracted': + del args['type'] + + volumes = self.query_api('listVolumes', **args) + if volumes: + volume_name = self.module.params.get('name') + for v in volumes: + if volume_name.lower() == v['name'].lower(): + self.volume = v + break + return self.volume + + def get_snapshot(self, key=None): + snapshot = self.module.params.get('snapshot') + if not snapshot: + return None + + args = { + 'name': snapshot, + 'account': self.get_account('name'), + 'domainid': self.get_domain('id'), + 'projectid': self.get_project('id'), + } + snapshots = self.query_api('listSnapshots', **args) + if snapshots: + return self._get_by_key(key, snapshots['snapshot'][0]) + self.module.fail_json(msg="Snapshot with name %s not found" % snapshot) + + def present_volume(self): + volume = self.get_volume() + if volume: + volume = self.update_volume(volume) + else: + disk_offering_id = self.get_disk_offering(key='id') + snapshot_id = self.get_snapshot(key='id') + + if not disk_offering_id and not snapshot_id: + self.module.fail_json(msg="Required one of: disk_offering,snapshot") + + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'diskofferingid': disk_offering_id, + 'displayvolume': self.module.params.get('display_volume'), + 'maxiops': self.module.params.get('max_iops'), + 'miniops': self.module.params.get('min_iops'), + 'projectid': self.get_project(key='id'), + 'size': self.module.params.get('size'), + 'snapshotid': snapshot_id, + 'zoneid': self.get_zone(key='id') + } + if not self.module.check_mode: + res = self.query_api('createVolume', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + if volume: + volume = self.ensure_tags(resource=volume, resource_type='Volume') + self.volume = volume + + return volume + + def attached_volume(self): + volume = self.present_volume() + + if volume: + if volume.get('virtualmachineid') != self.get_vm(key='id'): + self.result['changed'] = True + + if not self.module.check_mode: + volume = self.detached_volume() + + if 'attached' not in volume: + self.result['changed'] = True + + args = { + 'id': volume['id'], + 'virtualmachineid': self.get_vm(key='id'), + 'deviceid': self.module.params.get('device_id'), + } + if not self.module.check_mode: + res = self.query_api('attachVolume', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + def detached_volume(self): + volume = self.present_volume() + + if volume: + if 'attached' not in volume: + return volume + + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('detachVolume', id=volume['id']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + def absent_volume(self): + volume = self.get_volume() + + if volume: + if 'attached' in volume and not self.module.params.get('force'): + self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name')) + + self.result['changed'] = True + if not self.module.check_mode: + volume = self.detached_volume() + res = self.query_api('deleteVolume', id=volume['id']) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'volume') + + return volume + + def update_volume(self, volume): + args_resize = { + 'id': volume['id'], + 'diskofferingid': self.get_disk_offering(key='id'), + 'maxiops': self.module.params.get('max_iops'), + 'miniops': self.module.params.get('min_iops'), + 'size': self.module.params.get('size') + } + # change unit from bytes to giga bytes to compare with args + volume_copy = volume.copy() + volume_copy['size'] = volume_copy['size'] / (2**30) + + if self.has_changed(args_resize, volume_copy): + + self.result['changed'] = True + if not self.module.check_mode: + args_resize['shrinkok'] = self.module.params.get('shrink_ok') + res = self.query_api('resizeVolume', **args_resize) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + self.volume = volume + + return volume + + def extract_volume(self): + volume = self.get_volume() + if not volume: + self.module.fail_json(msg="Failed: volume not found") + + args = { + 'id': volume['id'], + 'url': self.module.params.get('url'), + 'mode': self.module.params.get('mode').upper(), + 'zoneid': self.get_zone(key='id') + } + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('extractVolume', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + self.volume = volume + + return volume + + def upload_volume(self): + volume = self.get_volume() + if not volume: + disk_offering_id = self.get_disk_offering(key='id') + + self.result['changed'] = True + + args = { + 'name': self.module.params.get('name'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'format': self.module.params.get('format'), + 'url': self.module.params.get('url'), + 'diskofferingid': disk_offering_id, + } + if not self.module.check_mode: + res = self.query_api('uploadVolume', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + if volume: + volume = self.ensure_tags(resource=volume, resource_type='Volume') + self.volume = volume + + return volume + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + disk_offering=dict(), + display_volume=dict(type='bool'), + max_iops=dict(type='int'), + min_iops=dict(type='int'), + size=dict(type='int'), + snapshot=dict(), + vm=dict(), + device_id=dict(type='int'), + custom_id=dict(), + force=dict(type='bool', default=False), + shrink_ok=dict(type='bool', default=False), + state=dict(default='present', choices=[ + 'present', + 'absent', + 'attached', + 'detached', + 'extracted', + 'uploaded', + ]), + zone=dict(), + domain=dict(), + account=dict(), + project=dict(), + poll_async=dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag']), + url=dict(), + mode=dict(choices=['http_download', 'ftp_upload'], default='http_download'), + format=dict(choices=['QCOW2', 'RAW', 'VHD', 'VHDX', 'OVA']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive=( + ['snapshot', 'disk_offering'], + ), + required_if=[ + ('state', 'uploaded', ['url', 'format']), + ], + supports_check_mode=True + ) + + acs_vol = AnsibleCloudStackVolume(module) + + state = module.params.get('state') + + if state in ['absent']: + volume = acs_vol.absent_volume() + elif state in ['attached']: + volume = acs_vol.attached_volume() + elif state in ['detached']: + volume = acs_vol.detached_volume() + elif state == 'extracted': + volume = acs_vol.extract_volume() + elif state == 'uploaded': + volume = acs_vol.upload_volume() + else: + volume = acs_vol.present_volume() + + result = acs_vol.get_result(volume) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_vpc.py b/plugins/modules/cloud/cloudstack/cs_vpc.py new file mode 100644 index 0000000000..dc2301e74a --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_vpc.py @@ -0,0 +1,398 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_vpc +short_description: "Manages VPCs on Apache CloudStack based clouds." +description: + - Create, update and delete VPCs. +author: René Moser (@resmo) +options: + name: + description: + - Name of the VPC. + type: str + required: true + display_text: + description: + - Display text of the VPC. + - If not set, I(name) will be used for creating. + type: str + cidr: + description: + - CIDR of the VPC, e.g. 10.1.0.0/16 + - All VPC guest networks' CIDRs must be within this CIDR. + - Required on I(state=present). + type: str + network_domain: + description: + - Network domain for the VPC. + - All networks inside the VPC will belong to this domain. + - Only considered while creating the VPC, can not be changed. + type: str + vpc_offering: + description: + - Name of the VPC offering. + - If not set, default VPC offering is used. + type: str + clean_up: + description: + - Whether to redeploy a VPC router or not when I(state=restarted) + type: bool + state: + description: + - State of the VPC. + - The state C(present) creates a started VPC. + - The state C(stopped) is only considered while creating the VPC, added in version 2.6. + type: str + default: present + choices: + - present + - absent + - stopped + - restarted + domain: + description: + - Domain the VPC is related to. + type: str + account: + description: + - Account the VPC is related to. + type: str + project: + description: + - Name of the project the VPC is related to. + type: str + zone: + description: + - Name of the zone. + - If not set, default zone is used. + type: str + tags: + description: + - List of tags. Tags are a list of dictionaries having keys I(key) and I(value). + - "For deleting all tags, set an empty list e.g. I(tags: [])." + type: list + aliases: [ tag ] + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure a VPC is present but not started after creating + cs_vpc: + name: my_vpc + display_text: My example VPC + cidr: 10.10.0.0/16 + state: stopped + delegate_to: localhost + +- name: Ensure a VPC is present and started after creating + cs_vpc: + name: my_vpc + display_text: My example VPC + cidr: 10.10.0.0/16 + delegate_to: localhost + +- name: Ensure a VPC is absent + cs_vpc: + name: my_vpc + state: absent + delegate_to: localhost + +- name: Ensure a VPC is restarted with clean up + cs_vpc: + name: my_vpc + clean_up: yes + state: restarted + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: "UUID of the VPC." + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: "Name of the VPC." + returned: success + type: str + sample: my_vpc +display_text: + description: "Display text of the VPC." + returned: success + type: str + sample: My example VPC +cidr: + description: "CIDR of the VPC." + returned: success + type: str + sample: 10.10.0.0/16 +network_domain: + description: "Network domain of the VPC." + returned: success + type: str + sample: example.com +region_level_vpc: + description: "Whether the VPC is region level or not." + returned: success + type: bool + sample: true +restart_required: + description: "Whether the VPC router needs a restart or not." + returned: success + type: bool + sample: true +distributed_vpc_router: + description: "Whether the VPC uses distributed router or not." + returned: success + type: bool + sample: true +redundant_vpc_router: + description: "Whether the VPC has redundant routers or not." + returned: success + type: bool + sample: true +domain: + description: "Domain the VPC is related to." + returned: success + type: str + sample: example domain +account: + description: "Account the VPC is related to." + returned: success + type: str + sample: example account +project: + description: "Name of project the VPC is related to." + returned: success + type: str + sample: Production +zone: + description: "Name of zone the VPC is in." + returned: success + type: str + sample: ch-gva-2 +state: + description: "State of the VPC." + returned: success + type: str + sample: Enabled +tags: + description: "List of resource tags associated with the VPC." + returned: success + type: list + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackVpc(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVpc, self).__init__(module) + self.returns = { + 'cidr': 'cidr', + 'networkdomain': 'network_domain', + 'redundantvpcrouter': 'redundant_vpc_router', + 'distributedvpcrouter': 'distributed_vpc_router', + 'regionlevelvpc': 'region_level_vpc', + 'restartrequired': 'restart_required', + } + self.vpc = None + + def get_vpc_offering(self, key=None): + vpc_offering = self.module.params.get('vpc_offering') + args = { + 'state': 'Enabled', + } + if vpc_offering: + args['name'] = vpc_offering + fail_msg = "VPC offering not found or not enabled: %s" % vpc_offering + else: + args['isdefault'] = True + fail_msg = "No enabled default VPC offering found" + + vpc_offerings = self.query_api('listVPCOfferings', **args) + if vpc_offerings: + # The API name argument filter also matches substrings, we have to + # iterate over the results to get an exact match + for vo in vpc_offerings['vpcoffering']: + if 'name' in args: + if args['name'] == vo['name']: + return self._get_by_key(key, vo) + # Return the first offering found, if not queried for the name + else: + return self._get_by_key(key, vo) + self.module.fail_json(msg=fail_msg) + + def get_vpc(self): + if self.vpc: + return self.vpc + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'fetch_list': True, + } + vpcs = self.query_api('listVPCs', **args) + if vpcs: + vpc_name = self.module.params.get('name') + for v in vpcs: + if vpc_name in [v['name'], v['displaytext'], v['id']]: + # Fail if the identifier matches more than one VPC + if self.vpc: + self.module.fail_json(msg="More than one VPC found with the provided identifyer: %s" % vpc_name) + else: + self.vpc = v + return self.vpc + + def restart_vpc(self): + self.result['changed'] = True + vpc = self.get_vpc() + if vpc and not self.module.check_mode: + args = { + 'id': vpc['id'], + 'cleanup': self.module.params.get('clean_up'), + } + res = self.query_api('restartVPC', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'vpc') + return vpc + + def present_vpc(self): + vpc = self.get_vpc() + if not vpc: + vpc = self._create_vpc(vpc) + else: + vpc = self._update_vpc(vpc) + + if vpc: + vpc = self.ensure_tags(resource=vpc, resource_type='Vpc') + return vpc + + def _create_vpc(self, vpc): + self.result['changed'] = True + args = { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'networkdomain': self.module.params.get('network_domain'), + 'vpcofferingid': self.get_vpc_offering(key='id'), + 'cidr': self.module.params.get('cidr'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'start': self.module.params.get('state') != 'stopped' + } + self.result['diff']['after'] = args + if not self.module.check_mode: + res = self.query_api('createVPC', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + vpc = self.poll_job(res, 'vpc') + return vpc + + def _update_vpc(self, vpc): + args = { + 'id': vpc['id'], + 'displaytext': self.module.params.get('display_text'), + } + if self.has_changed(args, vpc): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateVPC', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + vpc = self.poll_job(res, 'vpc') + return vpc + + def absent_vpc(self): + vpc = self.get_vpc() + if vpc: + self.result['changed'] = True + self.result['diff']['before'] = vpc + if not self.module.check_mode: + res = self.query_api('deleteVPC', id=vpc['id']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'vpc') + return vpc + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + cidr=dict(), + display_text=dict(), + vpc_offering=dict(), + network_domain=dict(), + clean_up=dict(type='bool'), + state=dict(choices=['present', 'absent', 'stopped', 'restarted'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + zone=dict(), + tags=dict(type='list', aliases=['tag']), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_if=[ + ('state', 'present', ['cidr']), + ], + supports_check_mode=True, + ) + + acs_vpc = AnsibleCloudStackVpc(module) + + state = module.params.get('state') + if state == 'absent': + vpc = acs_vpc.absent_vpc() + elif state == 'restarted': + vpc = acs_vpc.restart_vpc() + else: + vpc = acs_vpc.present_vpc() + + result = acs_vpc.get_result(vpc) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_vpc_offering.py b/plugins/modules/cloud/cloudstack/cs_vpc_offering.py new file mode 100644 index 0000000000..4f26182d07 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_vpc_offering.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, David Passante (@dpassante) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cs_vpc_offering +short_description: Manages vpc offerings on Apache CloudStack based clouds. +description: + - Create, update, enable, disable and remove CloudStack VPC offerings. +author: David Passante (@dpassante) +options: + name: + description: + - The name of the vpc offering + type: str + required: true + state: + description: + - State of the vpc offering. + type: str + choices: [ enabled, present, disabled, absent ] + default: present + display_text: + description: + - Display text of the vpc offerings + type: str + service_capabilities: + description: + - Desired service capabilities as part of vpc offering. + type: list + aliases: [ service_capability ] + service_offering: + description: + - The name or ID of the service offering for the VPC router appliance. + type: str + supported_services: + description: + - Services supported by the vpc offering + type: list + aliases: [ supported_service ] + service_providers: + description: + - provider to service mapping. If not specified, the provider for the service will be mapped to the default provider on the physical network + type: list + aliases: [ service_provider ] + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Create a vpc offering and enable it + cs_vpc_offering: + name: my_vpc_offering + display_text: vpc offering description + state: enabled + supported_services: [ Dns, Dhcp ] + service_providers: + - {service: 'dns', provider: 'VpcVirtualRouter'} + - {service: 'dhcp', provider: 'VpcVirtualRouter'} + delegate_to: localhost + +- name: Create a vpc offering with redundant router + cs_vpc_offering: + name: my_vpc_offering + display_text: vpc offering description + supported_services: [ Dns, Dhcp, SourceNat ] + service_providers: + - {service: 'dns', provider: 'VpcVirtualRouter'} + - {service: 'dhcp', provider: 'VpcVirtualRouter'} + - {service: 'SourceNat', provider: 'VpcVirtualRouter'} + service_capabilities: + - {service: 'SourceNat', capabilitytype: 'RedundantRouter', capabilityvalue: true} + delegate_to: localhost + +- name: Create a region level vpc offering with distributed router + cs_vpc_offering: + name: my_vpc_offering + display_text: vpc offering description + state: present + supported_services: [ Dns, Dhcp, SourceNat ] + service_providers: + - {service: 'dns', provider: 'VpcVirtualRouter'} + - {service: 'dhcp', provider: 'VpcVirtualRouter'} + - {service: 'SourceNat', provider: 'VpcVirtualRouter'} + service_capabilities: + - {service: 'Connectivity', capabilitytype: 'DistributedRouter', capabilityvalue: true} + - {service: 'Connectivity', capabilitytype: 'RegionLevelVPC', capabilityvalue: true} + delegate_to: localhost + +- name: Remove a vpc offering + cs_vpc_offering: + name: my_vpc_offering + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the vpc offering. + returned: success + type: str + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +name: + description: The name of the vpc offering + returned: success + type: str + sample: MyCustomVPCOffering +display_text: + description: The display text of the vpc offering + returned: success + type: str + sample: My vpc offering +state: + description: The state of the vpc offering + returned: success + type: str + sample: Enabled +service_offering_id: + description: The service offering ID. + returned: success + type: str + sample: c5f7a5fc-43f8-11e5-a151-feff819cdc9f +is_default: + description: Whether VPC offering is the default offering or not. + returned: success + type: bool + sample: false +region_level: + description: Indicated if the offering can support region level vpc. + returned: success + type: bool + sample: false +distributed: + description: Indicates if the vpc offering supports distributed router for one-hop forwarding. + returned: success + type: bool + sample: false +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackVPCOffering(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVPCOffering, self).__init__(module) + self.returns = { + 'serviceofferingid': 'service_offering_id', + 'isdefault': 'is_default', + 'distributedvpcrouter': 'distributed', + 'supportsregionLevelvpc': 'region_level', + } + self.vpc_offering = None + + def get_vpc_offering(self): + if self.vpc_offering: + return self.vpc_offering + + args = { + 'name': self.module.params.get('name'), + } + vo = self.query_api('listVPCOfferings', **args) + + if vo: + for vpc_offer in vo['vpcoffering']: + if args['name'] == vpc_offer['name']: + self.vpc_offering = vpc_offer + + return self.vpc_offering + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + if not service_offering: + return None + + args = { + 'issystem': True + } + + service_offerings = self.query_api('listServiceOfferings', **args) + if service_offerings: + for s in service_offerings['serviceoffering']: + if service_offering in [s['name'], s['id']]: + return s['id'] + self.fail_json(msg="Service offering '%s' not found" % service_offering) + + def create_or_update(self): + vpc_offering = self.get_vpc_offering() + + if not vpc_offering: + vpc_offering = self.create_vpc_offering() + + return self.update_vpc_offering(vpc_offering) + + def create_vpc_offering(self): + vpc_offering = None + self.result['changed'] = True + args = { + 'name': self.module.params.get('name'), + 'state': self.module.params.get('state'), + 'displaytext': self.module.params.get('display_text'), + 'supportedservices': self.module.params.get('supported_services'), + 'serviceproviderlist': self.module.params.get('service_providers'), + 'serviceofferingid': self.get_service_offering_id(), + 'servicecapabilitylist': self.module.params.get('service_capabilities'), + } + + required_params = [ + 'display_text', + 'supported_services', + ] + self.module.fail_on_missing_params(required_params=required_params) + + if not self.module.check_mode: + res = self.query_api('createVPCOffering', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + vpc_offering = self.poll_job(res, 'vpcoffering') + + return vpc_offering + + def delete_vpc_offering(self): + vpc_offering = self.get_vpc_offering() + + if vpc_offering: + self.result['changed'] = True + + args = { + 'id': vpc_offering['id'], + } + + if not self.module.check_mode: + res = self.query_api('deleteVPCOffering', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + vpc_offering = self.poll_job(res, 'vpcoffering') + + return vpc_offering + + def update_vpc_offering(self, vpc_offering): + if not vpc_offering: + return vpc_offering + + args = { + 'id': vpc_offering['id'], + 'state': self.module.params.get('state'), + 'name': self.module.params.get('name'), + 'displaytext': self.module.params.get('display_text'), + } + + if args['state'] in ['enabled', 'disabled']: + args['state'] = args['state'].title() + else: + del args['state'] + + if self.has_changed(args, vpc_offering): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updateVPCOffering', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + vpc_offering = self.poll_job(res, 'vpcoffering') + + return vpc_offering + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + display_text=dict(), + state=dict(choices=['enabled', 'present', 'disabled', 'absent'], default='present'), + service_capabilities=dict(type='list', aliases=['service_capability']), + service_offering=dict(), + supported_services=dict(type='list', aliases=['supported_service']), + service_providers=dict(type='list', aliases=['service_provider']), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_vpc_offering = AnsibleCloudStackVPCOffering(module) + + state = module.params.get('state') + if state in ['absent']: + vpc_offering = acs_vpc_offering.delete_vpc_offering() + else: + vpc_offering = acs_vpc_offering.create_or_update() + + result = acs_vpc_offering.get_result(vpc_offering) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_vpn_connection.py b/plugins/modules/cloud/cloudstack/cs_vpn_connection.py new file mode 100644 index 0000000000..a6b67d7474 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_vpn_connection.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: cs_vpn_connection +short_description: Manages site-to-site VPN connections on Apache CloudStack based clouds. +description: + - Create and remove VPN connections. +author: René Moser (@resmo) +options: + vpc: + description: + - Name of the VPC the VPN connection is related to. + type: str + required: true + vpn_customer_gateway: + description: + - Name of the VPN customer gateway. + type: str + required: true + passive: + description: + - State of the VPN connection. + - Only considered when I(state=present). + default: no + type: bool + force: + description: + - Activate the VPN gateway if not already activated on I(state=present). + - Also see M(cs_vpn_gateway). + default: no + type: bool + state: + description: + - State of the VPN connection. + type: str + default: present + choices: [ present, absent ] + zone: + description: + - Name of the zone the VPC is related to. + - If not set, default zone is used. + type: str + domain: + description: + - Domain the VPN connection is related to. + type: str + account: + description: + - Account the VPN connection is related to. + type: str + project: + description: + - Name of the project the VPN connection is related to. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = r''' +- name: Create a VPN connection with activated VPN gateway + cs_vpn_connection: + vpn_customer_gateway: my vpn connection + vpc: my vpc + delegate_to: localhost + +- name: Create a VPN connection and force VPN gateway activation + cs_vpn_connection: + vpn_customer_gateway: my vpn connection + vpc: my vpc + force: yes + delegate_to: localhost + +- name: Remove a vpn connection + cs_vpn_connection: + vpn_customer_gateway: my vpn connection + vpc: my vpc + state: absent + delegate_to: localhost +''' + +RETURN = r''' +--- +id: + description: UUID of the VPN connection. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +vpn_gateway_id: + description: UUID of the VPN gateway. + returned: success + type: str + sample: 04589590-ac63-93f5-4ffc-b698b8ac38b6 +domain: + description: Domain the VPN connection is related to. + returned: success + type: str + sample: example domain +account: + description: Account the VPN connection is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the VPN connection is related to. + returned: success + type: str + sample: Production +created: + description: Date the connection was created. + returned: success + type: str + sample: 2014-12-01T14:57:57+0100 +dpd: + description: Whether dead pear detection is enabled or not. + returned: success + type: bool + sample: true +esp_lifetime: + description: Lifetime in seconds of phase 2 VPN connection. + returned: success + type: int + sample: 86400 +esp_policy: + description: IKE policy of the VPN connection. + returned: success + type: str + sample: aes256-sha1;modp1536 +force_encap: + description: Whether encapsulation for NAT traversal is enforced or not. + returned: success + type: bool + sample: true +ike_lifetime: + description: Lifetime in seconds of phase 1 VPN connection. + returned: success + type: int + sample: 86400 +ike_policy: + description: ESP policy of the VPN connection. + returned: success + type: str + sample: aes256-sha1;modp1536 +cidrs: + description: List of CIDRs of the customer gateway. + returned: success + type: list + sample: [ 10.10.10.0/24 ] +passive: + description: Whether the connection is passive or not. + returned: success + type: bool + sample: false +public_ip: + description: IP address of the VPN gateway. + returned: success + type: str + sample: 10.100.212.10 +gateway: + description: IP address of the VPN customer gateway. + returned: success + type: str + sample: 10.101.214.10 +state: + description: State of the VPN connection. + returned: success + type: str + sample: Connected +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackVpnConnection(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVpnConnection, self).__init__(module) + self.returns = { + 'dpd': 'dpd', + 'esplifetime': 'esp_lifetime', + 'esppolicy': 'esp_policy', + 'gateway': 'gateway', + 'ikepolicy': 'ike_policy', + 'ikelifetime': 'ike_lifetime', + 'publicip': 'public_ip', + 'passive': 'passive', + 's2svpngatewayid': 'vpn_gateway_id', + } + self.vpn_customer_gateway = None + + def get_vpn_customer_gateway(self, key=None, identifier=None, refresh=False): + if not refresh and self.vpn_customer_gateway: + return self._get_by_key(key, self.vpn_customer_gateway) + + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'fetch_list': True, + } + + vpn_customer_gateway = identifier or self.module.params.get('vpn_customer_gateway') + vcgws = self.query_api('listVpnCustomerGateways', **args) + if vcgws: + for vcgw in vcgws: + if vpn_customer_gateway.lower() in [vcgw['id'], vcgw['name'].lower()]: + self.vpn_customer_gateway = vcgw + return self._get_by_key(key, self.vpn_customer_gateway) + self.fail_json(msg="VPN customer gateway not found: %s" % vpn_customer_gateway) + + def get_vpn_gateway(self, key=None): + args = { + 'vpcid': self.get_vpc(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + } + vpn_gateways = self.query_api('listVpnGateways', **args) + if vpn_gateways: + return self._get_by_key(key, vpn_gateways['vpngateway'][0]) + + elif self.module.params.get('force'): + if self.module.check_mode: + return {} + res = self.query_api('createVpnGateway', **args) + vpn_gateway = self.poll_job(res, 'vpngateway') + return self._get_by_key(key, vpn_gateway) + + self.fail_json(msg="VPN gateway not found and not forced to create one") + + def get_vpn_connection(self): + args = { + 'vpcid': self.get_vpc(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + } + + vpn_conns = self.query_api('listVpnConnections', **args) + if vpn_conns: + for vpn_conn in vpn_conns['vpnconnection']: + if self.get_vpn_customer_gateway(key='id') == vpn_conn['s2scustomergatewayid']: + return vpn_conn + + def present_vpn_connection(self): + vpn_conn = self.get_vpn_connection() + + args = { + 's2scustomergatewayid': self.get_vpn_customer_gateway(key='id'), + 's2svpngatewayid': self.get_vpn_gateway(key='id'), + 'passive': self.module.params.get('passive'), + } + + if not vpn_conn: + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('createVpnConnection', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + vpn_conn = self.poll_job(res, 'vpnconnection') + + return vpn_conn + + def absent_vpn_connection(self): + vpn_conn = self.get_vpn_connection() + + if vpn_conn: + self.result['changed'] = True + + args = { + 'id': vpn_conn['id'] + } + + if not self.module.check_mode: + res = self.query_api('deleteVpnConnection', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'vpnconnection') + + return vpn_conn + + def get_result(self, vpn_conn): + super(AnsibleCloudStackVpnConnection, self).get_result(vpn_conn) + if vpn_conn: + if 'cidrlist' in vpn_conn: + self.result['cidrs'] = vpn_conn['cidrlist'].split(',') or [vpn_conn['cidrlist']] + # Ensure we return a bool + self.result['force_encap'] = True if vpn_conn.get('forceencap') else False + args = { + 'key': 'name', + 'identifier': vpn_conn['s2scustomergatewayid'], + 'refresh': True, + } + self.result['vpn_customer_gateway'] = self.get_vpn_customer_gateway(**args) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + vpn_customer_gateway=dict(required=True), + vpc=dict(required=True), + domain=dict(), + account=dict(), + project=dict(), + zone=dict(), + passive=dict(type='bool', default=False), + force=dict(type='bool', default=False), + state=dict(choices=['present', 'absent'], default='present'), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_vpn_conn = AnsibleCloudStackVpnConnection(module) + + state = module.params.get('state') + if state == "absent": + vpn_conn = acs_vpn_conn.absent_vpn_connection() + else: + vpn_conn = acs_vpn_conn.present_vpn_connection() + + result = acs_vpn_conn.get_result(vpn_conn) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_vpn_customer_gateway.py b/plugins/modules/cloud/cloudstack/cs_vpn_customer_gateway.py new file mode 100644 index 0000000000..1cce8dbcdd --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_vpn_customer_gateway.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: cs_vpn_customer_gateway +short_description: Manages site-to-site VPN customer gateway configurations on Apache CloudStack based clouds. +description: + - Create, update and remove VPN customer gateways. +author: René Moser (@resmo) +options: + name: + description: + - Name of the gateway. + type: str + required: true + cidrs: + description: + - List of guest CIDRs behind the gateway. + - Required if I(state=present). + type: list + aliases: [ cidr ] + gateway: + description: + - Public IP address of the gateway. + - Required if I(state=present). + type: str + esp_policy: + description: + - ESP policy in the format e.g. C(aes256-sha1;modp1536). + - Required if I(state=present). + type: str + ike_policy: + description: + - IKE policy in the format e.g. C(aes256-sha1;modp1536). + - Required if I(state=present). + type: str + ipsec_psk: + description: + - IPsec Preshared-Key. + - Cannot contain newline or double quotes. + - Required if I(state=present). + type: str + ike_lifetime: + description: + - Lifetime in seconds of phase 1 VPN connection. + - Defaulted to 86400 by the API on creation if not set. + type: int + esp_lifetime: + description: + - Lifetime in seconds of phase 2 VPN connection. + - Defaulted to 3600 by the API on creation if not set. + type: int + dpd: + description: + - Enable Dead Peer Detection. + - Disabled per default by the API on creation if not set. + type: bool + force_encap: + description: + - Force encapsulation for NAT traversal. + - Disabled per default by the API on creation if not set. + type: bool + state: + description: + - State of the VPN customer gateway. + type: str + default: present + choices: [ present, absent ] + domain: + description: + - Domain the VPN customer gateway is related to. + type: str + account: + description: + - Account the VPN customer gateway is related to. + type: str + project: + description: + - Name of the project the VPN gateway is related to. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + default: yes + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = r''' +- name: Create a vpn customer gateway + cs_vpn_customer_gateway: + name: my vpn customer gateway + cidrs: + - 192.168.123.0/24 + - 192.168.124.0/24 + esp_policy: aes256-sha1;modp1536 + gateway: 10.10.1.1 + ike_policy: aes256-sha1;modp1536 + ipsec_psk: "S3cr3Tk3Y" + delegate_to: localhost + +- name: Remove a vpn customer gateway + cs_vpn_customer_gateway: + name: my vpn customer gateway + state: absent + delegate_to: localhost +''' + +RETURN = r''' +--- +id: + description: UUID of the VPN customer gateway. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +gateway: + description: IP address of the VPN customer gateway. + returned: success + type: str + sample: 10.100.212.10 +domain: + description: Domain the VPN customer gateway is related to. + returned: success + type: str + sample: example domain +account: + description: Account the VPN customer gateway is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the VPN customer gateway is related to. + returned: success + type: str + sample: Production +dpd: + description: Whether dead pear detection is enabled or not. + returned: success + type: bool + sample: true +esp_lifetime: + description: Lifetime in seconds of phase 2 VPN connection. + returned: success + type: int + sample: 86400 +esp_policy: + description: IKE policy of the VPN customer gateway. + returned: success + type: str + sample: aes256-sha1;modp1536 +force_encap: + description: Whether encapsulation for NAT traversal is enforced or not. + returned: success + type: bool + sample: true +ike_lifetime: + description: Lifetime in seconds of phase 1 VPN connection. + returned: success + type: int + sample: 86400 +ike_policy: + description: ESP policy of the VPN customer gateway. + returned: success + type: str + sample: aes256-sha1;modp1536 +name: + description: Name of this customer gateway. + returned: success + type: str + sample: my vpn customer gateway +cidrs: + description: List of CIDRs of this customer gateway. + returned: success + type: list + sample: [ 10.10.10.0/24 ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackVpnCustomerGateway(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVpnCustomerGateway, self).__init__(module) + self.returns = { + 'dpd': 'dpd', + 'esplifetime': 'esp_lifetime', + 'esppolicy': 'esp_policy', + 'gateway': 'gateway', + 'ikepolicy': 'ike_policy', + 'ikelifetime': 'ike_lifetime', + 'ipaddress': 'ip_address', + } + + def _common_args(self): + return { + 'name': self.module.params.get('name'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'cidrlist': ','.join(self.module.params.get('cidrs')) if self.module.params.get('cidrs') is not None else None, + 'esppolicy': self.module.params.get('esp_policy'), + 'esplifetime': self.module.params.get('esp_lifetime'), + 'ikepolicy': self.module.params.get('ike_policy'), + 'ikelifetime': self.module.params.get('ike_lifetime'), + 'ipsecpsk': self.module.params.get('ipsec_psk'), + 'dpd': self.module.params.get('dpd'), + 'forceencap': self.module.params.get('force_encap'), + 'gateway': self.module.params.get('gateway'), + } + + def get_vpn_customer_gateway(self): + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'fetch_list': True, + } + vpn_customer_gateway = self.module.params.get('name') + vpn_customer_gateways = self.query_api('listVpnCustomerGateways', **args) + if vpn_customer_gateways: + for vgw in vpn_customer_gateways: + if vpn_customer_gateway.lower() in [vgw['id'], vgw['name'].lower()]: + return vgw + + def present_vpn_customer_gateway(self): + vpn_customer_gateway = self.get_vpn_customer_gateway() + required_params = [ + 'cidrs', + 'esp_policy', + 'gateway', + 'ike_policy', + 'ipsec_psk', + ] + self.module.fail_on_missing_params(required_params=required_params) + + if not vpn_customer_gateway: + vpn_customer_gateway = self._create_vpn_customer_gateway(vpn_customer_gateway) + else: + vpn_customer_gateway = self._update_vpn_customer_gateway(vpn_customer_gateway) + + return vpn_customer_gateway + + def _create_vpn_customer_gateway(self, vpn_customer_gateway): + self.result['changed'] = True + args = self._common_args() + if not self.module.check_mode: + res = self.query_api('createVpnCustomerGateway', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + vpn_customer_gateway = self.poll_job(res, 'vpncustomergateway') + return vpn_customer_gateway + + def _update_vpn_customer_gateway(self, vpn_customer_gateway): + args = self._common_args() + args.update({'id': vpn_customer_gateway['id']}) + if self.has_changed(args, vpn_customer_gateway, skip_diff_for_keys=['ipsecpsk']): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateVpnCustomerGateway', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + vpn_customer_gateway = self.poll_job(res, 'vpncustomergateway') + return vpn_customer_gateway + + def absent_vpn_customer_gateway(self): + vpn_customer_gateway = self.get_vpn_customer_gateway() + if vpn_customer_gateway: + self.result['changed'] = True + args = { + 'id': vpn_customer_gateway['id'] + } + if not self.module.check_mode: + res = self.query_api('deleteVpnCustomerGateway', **args) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'vpncustomergateway') + + return vpn_customer_gateway + + def get_result(self, vpn_customer_gateway): + super(AnsibleCloudStackVpnCustomerGateway, self).get_result(vpn_customer_gateway) + if vpn_customer_gateway: + if 'cidrlist' in vpn_customer_gateway: + self.result['cidrs'] = vpn_customer_gateway['cidrlist'].split(',') or [vpn_customer_gateway['cidrlist']] + # Ensure we return a bool + self.result['force_encap'] = True if vpn_customer_gateway.get('forceencap') else False + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + cidrs=dict(type='list', aliases=['cidr']), + esp_policy=dict(), + esp_lifetime=dict(type='int'), + gateway=dict(), + ike_policy=dict(), + ike_lifetime=dict(type='int'), + ipsec_psk=dict(no_log=True), + dpd=dict(type='bool'), + force_encap=dict(type='bool'), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_vpn_cgw = AnsibleCloudStackVpnCustomerGateway(module) + + state = module.params.get('state') + if state == "absent": + vpn_customer_gateway = acs_vpn_cgw.absent_vpn_customer_gateway() + else: + vpn_customer_gateway = acs_vpn_cgw.present_vpn_customer_gateway() + + result = acs_vpn_cgw.get_result(vpn_customer_gateway) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_vpn_gateway.py b/plugins/modules/cloud/cloudstack/cs_vpn_gateway.py new file mode 100644 index 0000000000..91d40e8e72 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_vpn_gateway.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_vpn_gateway +short_description: Manages site-to-site VPN gateways on Apache CloudStack based clouds. +description: + - Creates and removes VPN site-to-site gateways. +author: René Moser (@resmo) +options: + vpc: + description: + - Name of the VPC. + type: str + required: true + state: + description: + - State of the VPN gateway. + type: str + default: present + choices: [ present, absent ] + domain: + description: + - Domain the VPN gateway is related to. + type: str + account: + description: + - Account the VPN gateway is related to. + type: str + project: + description: + - Name of the project the VPN gateway is related to. + type: str + zone: + description: + - Name of the zone the VPC is related to. + - If not set, default zone is used. + type: str + poll_async: + description: + - Poll async jobs until job has finished. + type: bool + default: yes +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure a vpn gateway is present + cs_vpn_gateway: + vpc: my VPC + delegate_to: localhost + +- name: Ensure a vpn gateway is absent + cs_vpn_gateway: + vpc: my VPC + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the VPN site-to-site gateway. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +public_ip: + description: IP address of the VPN site-to-site gateway. + returned: success + type: str + sample: 10.100.212.10 +vpc: + description: Name of the VPC. + returned: success + type: str + sample: My VPC +domain: + description: Domain the VPN site-to-site gateway is related to. + returned: success + type: str + sample: example domain +account: + description: Account the VPN site-to-site gateway is related to. + returned: success + type: str + sample: example account +project: + description: Name of project the VPN site-to-site gateway is related to. + returned: success + type: str + sample: Production +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together +) + + +class AnsibleCloudStackVpnGateway(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVpnGateway, self).__init__(module) + self.returns = { + 'publicip': 'public_ip' + } + + def get_vpn_gateway(self): + args = { + 'vpcid': self.get_vpc(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id') + } + vpn_gateways = self.query_api('listVpnGateways', **args) + if vpn_gateways: + return vpn_gateways['vpngateway'][0] + return None + + def present_vpn_gateway(self): + vpn_gateway = self.get_vpn_gateway() + if not vpn_gateway: + self.result['changed'] = True + args = { + 'vpcid': self.get_vpc(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id') + } + if not self.module.check_mode: + res = self.query_api('createVpnGateway', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + vpn_gateway = self.poll_job(res, 'vpngateway') + + return vpn_gateway + + def absent_vpn_gateway(self): + vpn_gateway = self.get_vpn_gateway() + if vpn_gateway: + self.result['changed'] = True + args = { + 'id': vpn_gateway['id'] + } + if not self.module.check_mode: + res = self.query_api('deleteVpnGateway', **args) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'vpngateway') + + return vpn_gateway + + def get_result(self, vpn_gateway): + super(AnsibleCloudStackVpnGateway, self).get_result(vpn_gateway) + if vpn_gateway: + self.result['vpc'] = self.get_vpc(key='name') + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + vpc=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(), + account=dict(), + project=dict(), + zone=dict(), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_vpn_gw = AnsibleCloudStackVpnGateway(module) + + state = module.params.get('state') + if state == "absent": + vpn_gateway = acs_vpn_gw.absent_vpn_gateway() + else: + vpn_gateway = acs_vpn_gw.present_vpn_gateway() + + result = acs_vpn_gw.get_result(vpn_gateway) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_zone.py b/plugins/modules/cloud/cloudstack/cs_zone.py new file mode 100644 index 0000000000..30e26d1fe9 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_zone.py @@ -0,0 +1,385 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_zone +short_description: Manages zones on Apache CloudStack based clouds. +description: + - Create, update and remove zones. +author: René Moser (@resmo) +options: + name: + description: + - Name of the zone. + type: str + required: true + id: + description: + - uuid of the existing zone. + type: str + state: + description: + - State of the zone. + type: str + default: present + choices: [ present, enabled, disabled, absent ] + domain: + description: + - Domain the zone is related to. + - Zone is a public zone if not set. + type: str + network_domain: + description: + - Network domain for the zone. + type: str + network_type: + description: + - Network type of the zone. + type: str + default: Basic + choices: [ Basic, Advanced ] + dns1: + description: + - First DNS for the zone. + - Required if I(state=present) + type: str + dns2: + description: + - Second DNS for the zone. + type: str + internal_dns1: + description: + - First internal DNS for the zone. + - If not set I(dns1) will be used on I(state=present). + type: str + internal_dns2: + description: + - Second internal DNS for the zone. + type: str + dns1_ipv6: + description: + - First DNS for IPv6 for the zone. + type: str + dns2_ipv6: + description: + - Second DNS for IPv6 for the zone. + type: str + guest_cidr_address: + description: + - Guest CIDR address for the zone. + type: str + dhcp_provider: + description: + - DHCP provider for the Zone. + type: str + local_storage_enabled: + description: + - Whether to enable local storage for the zone or not.. + type: bool + securitygroups_enabled: + description: + - Whether the zone is security group enabled or not. + type: bool +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Ensure a zone is present + cs_zone: + name: ch-zrh-ix-01 + dns1: 8.8.8.8 + dns2: 8.8.4.4 + network_type: basic + delegate_to: localhost + +- name: Ensure a zone is disabled + cs_zone: + name: ch-zrh-ix-01 + state: disabled + delegate_to: localhost + +- name: Ensure a zone is enabled + cs_zone: + name: ch-zrh-ix-01 + state: enabled + delegate_to: localhost + +- name: Ensure a zone is absent + cs_zone: + name: ch-zrh-ix-01 + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the zone. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the zone. + returned: success + type: str + sample: zone01 +dns1: + description: First DNS for the zone. + returned: success + type: str + sample: 8.8.8.8 +dns2: + description: Second DNS for the zone. + returned: success + type: str + sample: 8.8.4.4 +internal_dns1: + description: First internal DNS for the zone. + returned: success + type: str + sample: 8.8.8.8 +internal_dns2: + description: Second internal DNS for the zone. + returned: success + type: str + sample: 8.8.4.4 +dns1_ipv6: + description: First IPv6 DNS for the zone. + returned: success + type: str + sample: "2001:4860:4860::8888" +dns2_ipv6: + description: Second IPv6 DNS for the zone. + returned: success + type: str + sample: "2001:4860:4860::8844" +allocation_state: + description: State of the zone. + returned: success + type: str + sample: Enabled +domain: + description: Domain the zone is related to. + returned: success + type: str + sample: ROOT +network_domain: + description: Network domain for the zone. + returned: success + type: str + sample: example.com +network_type: + description: Network type for the zone. + returned: success + type: str + sample: basic +local_storage_enabled: + description: Local storage offering enabled. + returned: success + type: bool + sample: false +securitygroups_enabled: + description: Security groups support is enabled. + returned: success + type: bool + sample: false +guest_cidr_address: + description: Guest CIDR address for the zone + returned: success + type: str + sample: 10.1.1.0/24 +dhcp_provider: + description: DHCP provider for the zone + returned: success + type: str + sample: VirtualRouter +zone_token: + description: Zone token + returned: success + type: str + sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7 +tags: + description: List of resource tags associated with the zone. + returned: success + type: dict + sample: [ { "key": "foo", "value": "bar" } ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackZone(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackZone, self).__init__(module) + self.returns = { + 'dns1': 'dns1', + 'dns2': 'dns2', + 'internaldns1': 'internal_dns1', + 'internaldns2': 'internal_dns2', + 'ipv6dns1': 'dns1_ipv6', + 'ipv6dns2': 'dns2_ipv6', + 'domain': 'network_domain', + 'networktype': 'network_type', + 'securitygroupsenabled': 'securitygroups_enabled', + 'localstorageenabled': 'local_storage_enabled', + 'guestcidraddress': 'guest_cidr_address', + 'dhcpprovider': 'dhcp_provider', + 'allocationstate': 'allocation_state', + 'zonetoken': 'zone_token', + } + self.zone = None + + def _get_common_zone_args(self): + args = { + 'name': self.module.params.get('name'), + 'dns1': self.module.params.get('dns1'), + 'dns2': self.module.params.get('dns2'), + 'internaldns1': self.get_or_fallback('internal_dns1', 'dns1'), + 'internaldns2': self.get_or_fallback('internal_dns2', 'dns2'), + 'ipv6dns1': self.module.params.get('dns1_ipv6'), + 'ipv6dns2': self.module.params.get('dns2_ipv6'), + 'networktype': self.module.params.get('network_type'), + 'domain': self.module.params.get('network_domain'), + 'localstorageenabled': self.module.params.get('local_storage_enabled'), + 'guestcidraddress': self.module.params.get('guest_cidr_address'), + 'dhcpprovider': self.module.params.get('dhcp_provider'), + } + state = self.module.params.get('state') + if state in ['enabled', 'disabled']: + args['allocationstate'] = state.capitalize() + return args + + def get_zone(self): + if not self.zone: + args = {} + + uuid = self.module.params.get('id') + if uuid: + args['id'] = uuid + zones = self.query_api('listZones', **args) + if zones: + self.zone = zones['zone'][0] + return self.zone + + args['name'] = self.module.params.get('name') + zones = self.query_api('listZones', **args) + if zones: + self.zone = zones['zone'][0] + return self.zone + + def present_zone(self): + zone = self.get_zone() + if zone: + zone = self._update_zone() + else: + zone = self._create_zone() + return zone + + def _create_zone(self): + required_params = [ + 'dns1', + ] + self.module.fail_on_missing_params(required_params=required_params) + + self.result['changed'] = True + + args = self._get_common_zone_args() + args['domainid'] = self.get_domain(key='id') + args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled') + + zone = None + if not self.module.check_mode: + res = self.query_api('createZone', **args) + zone = res['zone'] + return zone + + def _update_zone(self): + zone = self.get_zone() + + args = self._get_common_zone_args() + args['id'] = zone['id'] + + if self.has_changed(args, zone): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.query_api('updateZone', **args) + zone = res['zone'] + return zone + + def absent_zone(self): + zone = self.get_zone() + if zone: + self.result['changed'] = True + + args = { + 'id': zone['id'] + } + if not self.module.check_mode: + self.query_api('deleteZone', **args) + + return zone + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + id=dict(), + name=dict(required=True), + dns1=dict(), + dns2=dict(), + internal_dns1=dict(), + internal_dns2=dict(), + dns1_ipv6=dict(), + dns2_ipv6=dict(), + network_type=dict(default='Basic', choices=['Basic', 'Advanced']), + network_domain=dict(), + guest_cidr_address=dict(), + dhcp_provider=dict(), + local_storage_enabled=dict(type='bool'), + securitygroups_enabled=dict(type='bool'), + state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), + domain=dict(), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_zone = AnsibleCloudStackZone(module) + + state = module.params.get('state') + if state in ['absent']: + zone = acs_zone.absent_zone() + else: + zone = acs_zone.present_zone() + + result = acs_zone.get_result(zone) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_zone_facts.py b/plugins/modules/cloud/cloudstack/cs_zone_facts.py new file mode 100644 index 0000000000..8b87b36b76 --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_zone_facts.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_zone_facts +short_description: Gathering facts of zones from Apache CloudStack based clouds. +description: + - Gathering facts from the API of a zone. + - Sets Ansible facts accessible by the key C(cloudstack_zone) and since version 2.6 also returns results. +deprecated: + removed_in: "2.13" + why: Transformed into an info module. + alternative: Use M(cs_zone_info) instead. +author: René Moser (@resmo) +options: + zone: + description: + - Name of the zone. + type: str + required: true + aliases: [ name ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Gather facts from a zone + cs_zone_facts: + name: ch-gva-1 + register: zone + delegate_to: localhost + +- name: Show the returned results of the registered variable + debug: + var: zone + +- name: Show the facts by the ansible_facts key cloudstack_zone + debug: + var: cloudstack_zone +''' + +RETURN = ''' +--- +id: + description: UUID of the zone. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the zone. + returned: success + type: str + sample: zone01 +dns1: + description: First DNS for the zone. + returned: success + type: str + sample: 8.8.8.8 +dns2: + description: Second DNS for the zone. + returned: success + type: str + sample: 8.8.4.4 +internal_dns1: + description: First internal DNS for the zone. + returned: success + type: str + sample: 8.8.8.8 +internal_dns2: + description: Second internal DNS for the zone. + returned: success + type: str + sample: 8.8.4.4 +dns1_ipv6: + description: First IPv6 DNS for the zone. + returned: success + type: str + sample: "2001:4860:4860::8888" +dns2_ipv6: + description: Second IPv6 DNS for the zone. + returned: success + type: str + sample: "2001:4860:4860::8844" +allocation_state: + description: State of the zone. + returned: success + type: str + sample: Enabled +domain: + description: Domain the zone is related to. + returned: success + type: str + sample: ROOT +network_domain: + description: Network domain for the zone. + returned: success + type: str + sample: example.com +network_type: + description: Network type for the zone. + returned: success + type: str + sample: basic +local_storage_enabled: + description: Local storage offering enabled. + returned: success + type: bool + sample: false +securitygroups_enabled: + description: Security groups support is enabled. + returned: success + type: bool + sample: false +guest_cidr_address: + description: Guest CIDR address for the zone + returned: success + type: str + sample: 10.1.1.0/24 +dhcp_provider: + description: DHCP provider for the zone + returned: success + type: str + sample: VirtualRouter +zone_token: + description: Zone token + returned: success + type: str + sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7 +tags: + description: List of resource tags associated with the zone. + returned: success + type: dict + sample: [ { "key": "foo", "value": "bar" } ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, +) + + +class AnsibleCloudStackZoneFacts(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackZoneFacts, self).__init__(module) + self.returns = { + 'dns1': 'dns1', + 'dns2': 'dns2', + 'internaldns1': 'internal_dns1', + 'internaldns2': 'internal_dns2', + 'ipv6dns1': 'dns1_ipv6', + 'ipv6dns2': 'dns2_ipv6', + 'domain': 'network_domain', + 'networktype': 'network_type', + 'securitygroupsenabled': 'securitygroups_enabled', + 'localstorageenabled': 'local_storage_enabled', + 'guestcidraddress': 'guest_cidr_address', + 'dhcpprovider': 'dhcp_provider', + 'allocationstate': 'allocation_state', + 'zonetoken': 'zone_token', + } + + def get_zone(self): + return super(AnsibleCloudStackZoneFacts, self).get_zone() + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + zone=dict(required=True, aliases=['name']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + acs_zone_facts = AnsibleCloudStackZoneFacts(module=module) + result = acs_zone_facts.get_result_and_facts( + facts_name='cloudstack_zone', + resource=acs_zone_facts.get_zone() + ) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/cloudstack/cs_zone_info.py b/plugins/modules/cloud/cloudstack/cs_zone_info.py new file mode 100644 index 0000000000..5d97c1d43b --- /dev/null +++ b/plugins/modules/cloud/cloudstack/cs_zone_info.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_zone_info +short_description: Gathering information about zones from Apache CloudStack based clouds. +description: + - Gathering information from the API of a zone. +author: René Moser (@resmo) +options: + zone: + description: + - Name of the zone. + - If not specified, all zones are returned + type: str + aliases: [ name ] +extends_documentation_fragment: +- community.general.cloudstack + +''' + +EXAMPLES = ''' +- name: Gather information from a zone + cs_zone_info: + zone: ch-gva-1 + register: zone + delegate_to: localhost + +- name: Show the returned results of the registered variable + debug: + msg: "{{ zone }}" + +- name: Gather information from all zones + cs_zone_info: + register: zones + delegate_to: localhost + +- name: Show information on all zones + debug: + msg: "{{ zones }}" +''' + +RETURN = ''' +--- +zones: + description: A list of matching zones. + type: list + returned: success + contains: + id: + description: UUID of the zone. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 + name: + description: Name of the zone. + returned: success + type: str + sample: zone01 + dns1: + description: First DNS for the zone. + returned: success + type: str + sample: 8.8.8.8 + dns2: + description: Second DNS for the zone. + returned: success + type: str + sample: 8.8.4.4 + internal_dns1: + description: First internal DNS for the zone. + returned: success + type: str + sample: 8.8.8.8 + internal_dns2: + description: Second internal DNS for the zone. + returned: success + type: str + sample: 8.8.4.4 + dns1_ipv6: + description: First IPv6 DNS for the zone. + returned: success + type: str + sample: "2001:4860:4860::8888" + dns2_ipv6: + description: Second IPv6 DNS for the zone. + returned: success + type: str + sample: "2001:4860:4860::8844" + allocation_state: + description: State of the zone. + returned: success + type: str + sample: Enabled + domain: + description: Domain the zone is related to. + returned: success + type: str + sample: ROOT + network_domain: + description: Network domain for the zone. + returned: success + type: str + sample: example.com + network_type: + description: Network type for the zone. + returned: success + type: str + sample: basic + local_storage_enabled: + description: Local storage offering enabled. + returned: success + type: bool + sample: false + securitygroups_enabled: + description: Security groups support is enabled. + returned: success + type: bool + sample: false + guest_cidr_address: + description: Guest CIDR address for the zone + returned: success + type: str + sample: 10.1.1.0/24 + dhcp_provider: + description: DHCP provider for the zone + returned: success + type: str + sample: VirtualRouter + zone_token: + description: Zone token + returned: success + type: str + sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7 + tags: + description: List of resource tags associated with the zone. + returned: success + type: dict + sample: [ { "key": "foo", "value": "bar" } ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, +) + + +class AnsibleCloudStackZoneInfo(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackZoneInfo, self).__init__(module) + self.returns = { + 'dns1': 'dns1', + 'dns2': 'dns2', + 'internaldns1': 'internal_dns1', + 'internaldns2': 'internal_dns2', + 'ipv6dns1': 'dns1_ipv6', + 'ipv6dns2': 'dns2_ipv6', + 'domain': 'network_domain', + 'networktype': 'network_type', + 'securitygroupsenabled': 'securitygroups_enabled', + 'localstorageenabled': 'local_storage_enabled', + 'guestcidraddress': 'guest_cidr_address', + 'dhcpprovider': 'dhcp_provider', + 'allocationstate': 'allocation_state', + 'zonetoken': 'zone_token', + } + + def get_zone(self): + if self.module.params['zone']: + zones = [super(AnsibleCloudStackZoneInfo, self).get_zone()] + else: + zones = self.query_api('listZones') + if zones: + zones = zones['zone'] + else: + zones = [] + return { + 'zones': [self.update_result(resource) for resource in zones] + } + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + zone=dict(type='str', aliases=['name']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + acs_zone_info = AnsibleCloudStackZoneInfo(module=module) + result = acs_zone_info.get_zone() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean.py b/plugins/modules/cloud/digital_ocean/digital_ocean.py new file mode 100644 index 0000000000..fd2a0b6a4c --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean.py @@ -0,0 +1,479 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean +short_description: Create/delete a droplet/SSH_key in DigitalOcean +deprecated: + removed_in: '2.12' + why: Updated module to remove external dependency with increased functionality. + alternative: Use M(digital_ocean_droplet) instead. +description: + - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. +author: "Vincent Viallet (@zbal)" +options: + command: + description: + - Which target you want to operate on. + default: droplet + choices: ['droplet', 'ssh'] + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'active', 'absent', 'deleted'] + api_token: + description: + - DigitalOcean api token. + id: + description: + - Numeric, the droplet id you want to operate on. + aliases: ['droplet_id'] + name: + description: + - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. + unique_name: + description: + - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host + per name. Useful for idempotence. + type: bool + default: 'no' + size_id: + description: + - This is the slug of the size you would like the droplet created with. + image_id: + description: + - This is the slug of the image you would like the droplet created with. + region_id: + description: + - This is the slug of the region you would like your server to be created in. + ssh_key_ids: + description: + - Optional, array of SSH key (numeric) ID that you would like to be added to the server. + virtio: + description: + - "Bool, turn on virtio driver in droplet for improved network and storage I/O." + type: bool + default: 'yes' + private_networking: + description: + - "Bool, add an additional, private network interface to droplet for inter-droplet communication." + type: bool + default: 'no' + backups_enabled: + description: + - Optional, Boolean, enables backups for your droplet. + type: bool + default: 'no' + user_data: + description: + - opaque blob of data which is made available to the droplet + ipv6: + description: + - Optional, Boolean, enable IPv6 for your droplet. + type: bool + default: 'no' + wait: + description: + - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. + type: bool + default: 'yes' + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + ssh_pub_key: + description: + - The public SSH key you want to add to your account. + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory. +requirements: + - "python >= 2.6" + - dopy +''' + + +EXAMPLES = ''' +# Ensure a SSH key is present +# If a key matches this name, will return the ssh key id and changed = False +# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False + +- digital_ocean: + state: present + command: ssh + name: my_ssh_key + ssh_pub_key: 'ssh-rsa AAAA...' + api_token: XXX + +# Create a new Droplet +# Will return the droplet details including the droplet id (used for idempotence) + +- digital_ocean: + state: present + command: droplet + name: mydroplet + api_token: XXX + size_id: 2gb + region_id: ams2 + image_id: fedora-19-x64 + wait_timeout: 500 + register: my_droplet + +- debug: + msg: "ID is {{ my_droplet.droplet.id }}" + +- debug: + msg: "IP is {{ my_droplet.droplet.ip_address }}" + +# Ensure a droplet is present +# If droplet id already exist, will return the droplet details and changed = False +# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True. + +- digital_ocean: + state: present + command: droplet + id: 123 + name: mydroplet + api_token: XXX + size_id: 2gb + region_id: ams2 + image_id: fedora-19-x64 + wait_timeout: 500 + +# Create a droplet with ssh key +# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids). +# Several keys can be added to ssh_key_ids as id1,id2,id3 +# The keys are used to connect as root to the droplet. + +- digital_ocean: + state: present + ssh_key_ids: 123,456 + name: mydroplet + api_token: XXX + size_id: 2gb + region_id: ams2 + image_id: fedora-19-x64 + +''' + +import os +import time +import traceback + +from distutils.version import LooseVersion + +try: + # Imported as a dependency for dopy + import ansible.module_utils.six + HAS_SIX = True +except ImportError: + HAS_SIX = False + +HAS_DOPY = False +try: + import dopy + from dopy.manager import DoError, DoManager + if LooseVersion(dopy.__version__) >= LooseVersion('0.3.2'): + HAS_DOPY = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule, env_fallback + + +class TimeoutError(Exception): + + def __init__(self, msg, id_): + super(TimeoutError, self).__init__(msg) + self.id = id_ + + +class JsonfyMixIn(object): + + def to_json(self): + return self.__dict__ + + +class Droplet(JsonfyMixIn): + manager = None + + def __init__(self, droplet_json): + self.status = 'new' + self.__dict__.update(droplet_json) + + def is_powered_on(self): + return self.status == 'active' + + def update_attr(self, attrs=None): + if attrs: + for k, v in attrs.items(): + setattr(self, k, v) + networks = attrs.get('networks', {}) + for network in networks.get('v6', []): + if network['type'] == 'public': + setattr(self, 'public_ipv6_address', network['ip_address']) + else: + setattr(self, 'private_ipv6_address', network['ip_address']) + else: + json = self.manager.show_droplet(self.id) + if json['ip_address']: + self.update_attr(json) + + def power_on(self): + if self.status != 'off': + raise AssertionError('Can only power on a closed one.') + json = self.manager.power_on_droplet(self.id) + self.update_attr(json) + + def ensure_powered_on(self, wait=True, wait_timeout=300): + if self.is_powered_on(): + return + if self.status == 'off': # powered off + self.power_on() + + if wait: + end_time = time.time() + wait_timeout + while time.time() < end_time: + time.sleep(min(20, end_time - time.time())) + self.update_attr() + if self.is_powered_on(): + if not self.ip_address: + raise TimeoutError('No ip is found.', self.id) + return + raise TimeoutError('Wait for droplet running timeout', self.id) + + def destroy(self): + return self.manager.destroy_droplet(self.id, scrub_data=True) + + @classmethod + def setup(cls, api_token): + cls.manager = DoManager(None, api_token, api_version=2) + + @classmethod + def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None, + ipv6=False): + private_networking_lower = str(private_networking).lower() + backups_enabled_lower = str(backups_enabled).lower() + ipv6_lower = str(ipv6).lower() + json = cls.manager.new_droplet(name, size_id, image_id, region_id, + ssh_key_ids=ssh_key_ids, virtio=virtio, private_networking=private_networking_lower, + backups_enabled=backups_enabled_lower, user_data=user_data, ipv6=ipv6_lower) + droplet = cls(json) + return droplet + + @classmethod + def find(cls, id=None, name=None): + if not id and not name: + return False + + droplets = cls.list_all() + + # Check first by id. digital ocean requires that it be unique + for droplet in droplets: + if droplet.id == id: + return droplet + + # Failing that, check by hostname. + for droplet in droplets: + if droplet.name == name: + return droplet + + return False + + @classmethod + def list_all(cls): + json = cls.manager.all_active_droplets() + return list(map(cls, json)) + + +class SSH(JsonfyMixIn): + manager = None + + def __init__(self, ssh_key_json): + self.__dict__.update(ssh_key_json) + update_attr = __init__ + + def destroy(self): + self.manager.destroy_ssh_key(self.id) + return True + + @classmethod + def setup(cls, api_token): + cls.manager = DoManager(None, api_token, api_version=2) + + @classmethod + def find(cls, name): + if not name: + return False + keys = cls.list_all() + for key in keys: + if key.name == name: + return key + return False + + @classmethod + def list_all(cls): + json = cls.manager.all_ssh_keys() + return list(map(cls, json)) + + @classmethod + def add(cls, name, key_pub): + json = cls.manager.new_ssh_key(name, key_pub) + return cls(json) + + +def core(module): + def getkeyordie(k): + v = module.params[k] + if v is None: + module.fail_json(msg='Unable to load %s' % k) + return v + + api_token = module.params['api_token'] + changed = True + command = module.params['command'] + state = module.params['state'] + + if command == 'droplet': + Droplet.setup(api_token) + if state in ('active', 'present'): + + # First, try to find a droplet by id. + droplet = Droplet.find(id=module.params['id']) + + # If we couldn't find the droplet and the user is allowing unique + # hostnames, then check to see if a droplet with the specified + # hostname already exists. + if not droplet and module.params['unique_name']: + droplet = Droplet.find(name=getkeyordie('name')) + + # If both of those attempts failed, then create a new droplet. + if not droplet: + droplet = Droplet.add( + name=getkeyordie('name'), + size_id=getkeyordie('size_id'), + image_id=getkeyordie('image_id'), + region_id=getkeyordie('region_id'), + ssh_key_ids=module.params['ssh_key_ids'], + virtio=module.params['virtio'], + private_networking=module.params['private_networking'], + backups_enabled=module.params['backups_enabled'], + user_data=module.params.get('user_data'), + ipv6=module.params['ipv6'], + ) + + if droplet.is_powered_on(): + changed = False + + droplet.ensure_powered_on( + wait=getkeyordie('wait'), + wait_timeout=getkeyordie('wait_timeout') + ) + + module.exit_json(changed=changed, droplet=droplet.to_json()) + + elif state in ('absent', 'deleted'): + # First, try to find a droplet by id. + droplet = Droplet.find(module.params['id']) + + # If we couldn't find the droplet and the user is allowing unique + # hostnames, then check to see if a droplet with the specified + # hostname already exists. + if not droplet and module.params['unique_name']: + droplet = Droplet.find(name=getkeyordie('name')) + + if not droplet: + module.exit_json(changed=False, msg='The droplet is not found.') + + droplet.destroy() + module.exit_json(changed=True) + + elif command == 'ssh': + SSH.setup(api_token) + name = getkeyordie('name') + if state in ('active', 'present'): + key = SSH.find(name) + if key: + module.exit_json(changed=False, ssh_key=key.to_json()) + key = SSH.add(name, getkeyordie('ssh_pub_key')) + module.exit_json(changed=True, ssh_key=key.to_json()) + + elif state in ('absent', 'deleted'): + key = SSH.find(name) + if not key: + module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name) + key.destroy() + module.exit_json(changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(choices=['droplet', 'ssh'], default='droplet'), + state=dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), + api_token=dict( + aliases=['API_TOKEN'], + no_log=True, + fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY']) + ), + name=dict(type='str'), + size_id=dict(), + image_id=dict(), + region_id=dict(), + ssh_key_ids=dict(type='list'), + virtio=dict(type='bool', default='yes'), + private_networking=dict(type='bool', default='no'), + backups_enabled=dict(type='bool', default='no'), + id=dict(aliases=['droplet_id'], type='int'), + unique_name=dict(type='bool', default='no'), + user_data=dict(default=None), + ipv6=dict(type='bool', default='no'), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=300, type='int'), + ssh_pub_key=dict(type='str'), + ), + required_together=( + ['size_id', 'image_id', 'region_id'], + ), + mutually_exclusive=( + ['size_id', 'ssh_pub_key'], + ['image_id', 'ssh_pub_key'], + ['region_id', 'ssh_pub_key'], + ), + required_one_of=( + ['id', 'name'], + ), + ) + if not HAS_DOPY and not HAS_SIX: + module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. ' + 'Make sure both dopy and six are installed.') + if not HAS_DOPY: + module.fail_json(msg='dopy >= 0.3.2 required for this module') + + try: + core(module) + except TimeoutError as e: + module.fail_json(msg=str(e), id=e.id) + except (DoError, Exception) as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_account_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_account_facts.py new file mode 120000 index 0000000000..6eff816538 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_account_facts.py @@ -0,0 +1 @@ +digital_ocean_account_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_account_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_account_info.py new file mode 100644 index 0000000000..b7e28dbfc9 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_account_info.py @@ -0,0 +1,87 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_account_info +short_description: Gather information about DigitalOcean User account +description: + - This module can be used to gather information about User account. + - This module was called C(digital_ocean_account_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" + +requirements: + - "python >= 2.6" + +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about user account + digital_ocean_account_info: + oauth_token: "{{ oauth_token }}" +''' + + +RETURN = ''' +data: + description: DigitalOcean account information + returned: success + type: dict + sample: { + "droplet_limit": 10, + "email": "testuser1@gmail.com", + "email_verified": true, + "floating_ip_limit": 3, + "status": "active", + "status_message": "", + "uuid": "aaaaaaaaaaaaaa" + } +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("account") + if response.status_code != 200: + module.fail_json(msg="Failed to fetch 'account' information due to error : %s" % response.json['message']) + + module.exit_json(changed=False, data=response.json["account"]) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_account_facts': + module.deprecate("The 'digital_ocean_account_facts' module has been renamed to 'digital_ocean_account_info'", version='2.13') + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_block_storage.py b/plugins/modules/cloud/digital_ocean/digital_ocean_block_storage.py new file mode 100644 index 0000000000..d2702eb2d2 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_block_storage.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_block_storage +short_description: Create/destroy or attach/detach Block Storage volumes in DigitalOcean +description: + - Create/destroy Block Storage volume in DigitalOcean, or attach/detach Block Storage volume to a droplet. +options: + command: + description: + - Which operation do you want to perform. + choices: ['create', 'attach'] + required: true + state: + description: + - Indicate desired state of the target. + choices: ['present', 'absent'] + required: true + block_size: + description: + - The size of the Block Storage volume in gigabytes. Required when command=create and state=present. If snapshot_id is included, this will be ignored. + volume_name: + description: + - The name of the Block Storage volume. + required: true + description: + description: + - Description of the Block Storage volume. + region: + description: + - The slug of the region where your Block Storage volume should be located in. If snapshot_id is included, this will be ignored. + required: true + snapshot_id: + description: + - The snapshot id you would like the Block Storage volume created with. If included, region and block_size will be ignored and changed to null. + droplet_id: + description: + - The droplet id you want to operate on. Required when command=attach. +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. + They both refer to the v2 token. + - If snapshot_id is used, region and block_size will be ignored and changed to null. + +author: + - "Harnek Sidhu (@harneksidhu)" +''' + +EXAMPLES = ''' +# Create new Block Storage +- digital_ocean_block_storage: + state: present + command: create + api_token: + region: nyc1 + block_size: 10 + volume_name: nyc1-block-storage +# Delete Block Storage +- digital_ocean_block_storage: + state: absent + command: create + api_token: + region: nyc1 + volume_name: nyc1-block-storage +# Attach Block Storage to a Droplet +- digital_ocean_block_storage: + state: present + command: attach + api_token: + volume_name: nyc1-block-storage + region: nyc1 + droplet_id: +# Detach Block Storage from a Droplet +- digital_ocean_block_storage: + state: absent + command: attach + api_token: + volume_name: nyc1-block-storage + region: nyc1 + droplet_id: +''' + +RETURN = ''' +id: + description: Unique identifier of a Block Storage volume returned during creation. + returned: changed + type: str + sample: "69b25d9a-494c-12e6-a5af-001f53126b44" +''' + +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper + + +class DOBlockStorageException(Exception): + pass + + +class DOBlockStorage(object): + def __init__(self, module): + self.module = module + self.rest = DigitalOceanHelper(module) + + def get_key_or_fail(self, k): + v = self.module.params[k] + if v is None: + self.module.fail_json(msg='Unable to load %s' % k) + return v + + def poll_action_for_complete_status(self, action_id): + url = 'actions/{0}'.format(action_id) + end_time = time.time() + self.module.params['timeout'] + while time.time() < end_time: + time.sleep(2) + response = self.rest.get(url) + status = response.status_code + json = response.json + if status == 200: + if json['action']['status'] == 'completed': + return True + elif json['action']['status'] == 'errored': + raise DOBlockStorageException(json['message']) + raise DOBlockStorageException('Unable to reach api.digitalocean.com') + + def get_attached_droplet_ID(self, volume_name, region): + url = 'volumes?name={0}®ion={1}'.format(volume_name, region) + response = self.rest.get(url) + status = response.status_code + json = response.json + if status == 200: + volumes = json['volumes'] + if len(volumes) > 0: + droplet_ids = volumes[0]['droplet_ids'] + if len(droplet_ids) > 0: + return droplet_ids[0] + return None + else: + raise DOBlockStorageException(json['message']) + + def attach_detach_block_storage(self, method, volume_name, region, droplet_id): + data = { + 'type': method, + 'volume_name': volume_name, + 'region': region, + 'droplet_id': droplet_id + } + response = self.rest.post('volumes/actions', data=data) + status = response.status_code + json = response.json + if status == 202: + return self.poll_action_for_complete_status(json['action']['id']) + elif status == 200: + return True + elif status == 422: + return False + else: + raise DOBlockStorageException(json['message']) + + def create_block_storage(self): + volume_name = self.get_key_or_fail('volume_name') + snapshot_id = self.module.params['snapshot_id'] + if snapshot_id: + self.module.params['block_size'] = None + self.module.params['region'] = None + block_size = None + region = None + else: + block_size = self.get_key_or_fail('block_size') + region = self.get_key_or_fail('region') + description = self.module.params['description'] + data = { + 'size_gigabytes': block_size, + 'name': volume_name, + 'description': description, + 'region': region, + 'snapshot_id': snapshot_id, + } + response = self.rest.post("volumes", data=data) + status = response.status_code + json = response.json + if status == 201: + self.module.exit_json(changed=True, id=json['volume']['id']) + elif status == 409 and json['id'] == 'conflict': + self.module.exit_json(changed=False) + else: + raise DOBlockStorageException(json['message']) + + def delete_block_storage(self): + volume_name = self.get_key_or_fail('volume_name') + region = self.get_key_or_fail('region') + url = 'volumes?name={0}®ion={1}'.format(volume_name, region) + attached_droplet_id = self.get_attached_droplet_ID(volume_name, region) + if attached_droplet_id is not None: + self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id) + response = self.rest.delete(url) + status = response.status_code + json = response.json + if status == 204: + self.module.exit_json(changed=True) + elif status == 404: + self.module.exit_json(changed=False) + else: + raise DOBlockStorageException(json['message']) + + def attach_block_storage(self): + volume_name = self.get_key_or_fail('volume_name') + region = self.get_key_or_fail('region') + droplet_id = self.get_key_or_fail('droplet_id') + attached_droplet_id = self.get_attached_droplet_ID(volume_name, region) + if attached_droplet_id is not None: + if attached_droplet_id == droplet_id: + self.module.exit_json(changed=False) + else: + self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id) + changed_status = self.attach_detach_block_storage('attach', volume_name, region, droplet_id) + self.module.exit_json(changed=changed_status) + + def detach_block_storage(self): + volume_name = self.get_key_or_fail('volume_name') + region = self.get_key_or_fail('region') + droplet_id = self.get_key_or_fail('droplet_id') + changed_status = self.attach_detach_block_storage('detach', volume_name, region, droplet_id) + self.module.exit_json(changed=changed_status) + + +def handle_request(module): + block_storage = DOBlockStorage(module) + command = module.params['command'] + state = module.params['state'] + if command == 'create': + if state == 'present': + block_storage.create_block_storage() + elif state == 'absent': + block_storage.delete_block_storage() + elif command == 'attach': + if state == 'present': + block_storage.attach_block_storage() + elif state == 'absent': + block_storage.detach_block_storage() + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=['present', 'absent'], required=True), + command=dict(choices=['create', 'attach'], required=True), + block_size=dict(type='int', required=False), + volume_name=dict(type='str', required=True), + description=dict(type='str'), + region=dict(type='str', required=False), + snapshot_id=dict(type='str', required=False), + droplet_id=dict(type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec) + + try: + handle_request(module) + except DOBlockStorageException as e: + module.fail_json(msg=e.message, exception=traceback.format_exc()) + except KeyError as e: + module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_certificate.py b/plugins/modules/cloud/digital_ocean/digital_ocean_certificate.py new file mode 100644 index 0000000000..792ba2cf6f --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_certificate.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Abhijeet Kasurde +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_certificate +short_description: Manage certificates in DigitalOcean. +description: + - Create, Retrieve and remove certificates DigitalOcean. +author: "Abhijeet Kasurde (@Akasurde)" +options: + name: + description: + - The name of the certificate. + required: true + private_key: + description: + - A PEM-formatted private key content of SSL Certificate. + leaf_certificate: + description: + - A PEM-formatted public SSL Certificate. + certificate_chain: + description: + - The full PEM-formatted trust chain between the certificate authority's certificate and your domain's SSL certificate. + state: + description: + - Whether the certificate should be present or absent. + default: present + choices: ['present', 'absent'] +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +notes: + - Two environment variables can be used, DO_API_KEY, DO_OAUTH_TOKEN and DO_API_TOKEN. + They both refer to the v2 token. +''' + + +EXAMPLES = ''' +- name: create a certificate + digital_ocean_certificate: + name: production + state: present + private_key: "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkM8OI7pRpgyj1I\n-----END PRIVATE KEY-----" + leaf_certificate: "-----BEGIN CERTIFICATE-----\nMIIFDmg2Iaw==\n-----END CERTIFICATE-----" + oauth_token: b7d03a6947b217efb6f3ec3bd365652 + +- name: create a certificate using file lookup plugin + digital_ocean_certificate: + name: production + state: present + private_key: "{{ lookup('file', 'test.key') }}" + leaf_certificate: "{{ lookup('file', 'test.cert') }}" + oauth_token: "{{ oauth_token }}" + +- name: create a certificate with trust chain + digital_ocean_certificate: + name: production + state: present + private_key: "{{ lookup('file', 'test.key') }}" + leaf_certificate: "{{ lookup('file', 'test.cert') }}" + certificate_chain: "{{ lookup('file', 'chain.cert') }}" + oauth_token: "{{ oauth_token }}" + +- name: remove a certificate + digital_ocean_certificate: + name: production + state: absent + oauth_token: "{{ oauth_token }}" + +''' + + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + state = module.params['state'] + name = module.params['name'] + + rest = DigitalOceanHelper(module) + + results = dict(changed=False) + + response = rest.get('certificates') + status_code = response.status_code + resp_json = response.json + + if status_code != 200: + module.fail_json(msg="Failed to retrieve certificates for DigitalOcean") + + if state == 'present': + for cert in resp_json['certificates']: + if cert['name'] == name: + module.fail_json(msg="Certificate name %s already exists" % name) + + # Certificate does not exist, let us create it + cert_data = dict(name=name, + private_key=module.params['private_key'], + leaf_certificate=module.params['leaf_certificate']) + + if module.params['certificate_chain'] is not None: + cert_data.update(certificate_chain=module.params['certificate_chain']) + + response = rest.post("certificates", data=cert_data) + status_code = response.status_code + if status_code == 500: + module.fail_json(msg="Failed to upload certificates as the certificates are malformed.") + + resp_json = response.json + if status_code == 201: + results.update(changed=True, response=resp_json) + elif status_code == 422: + results.update(changed=False, response=resp_json) + + elif state == 'absent': + cert_id_del = None + for cert in resp_json['certificates']: + if cert['name'] == name: + cert_id_del = cert['id'] + + if cert_id_del is not None: + url = "certificates/{0}".format(cert_id_del) + response = rest.delete(url) + if response.status_code == 204: + results.update(changed=True) + else: + results.update(changed=False) + else: + module.fail_json(msg="Failed to find certificate %s" % name) + + module.exit_json(**results) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type='str'), + leaf_certificate=dict(type='str'), + private_key=dict(type='str', no_log=True), + state=dict(choices=['present', 'absent'], default='present'), + certificate_chain=dict(type='str') + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[('state', 'present', ['name', 'leaf_certificate', 'private_key']), + ('state', 'absent', ['name']) + ], + ) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_certificate_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_certificate_facts.py new file mode 120000 index 0000000000..adbb7c1ccf --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_certificate_facts.py @@ -0,0 +1 @@ +digital_ocean_certificate_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_certificate_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_certificate_info.py new file mode 100644 index 0000000000..afa900f49a --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_certificate_info.py @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_certificate_info +short_description: Gather information about DigitalOcean certificates +description: + - This module can be used to gather information about DigitalOcean provided certificates. + - This module was called C(digital_ocean_certificate_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + certificate_id: + description: + - Certificate ID that can be used to identify and reference a certificate. + required: false +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all certificates + digital_ocean_certificate_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about certificate with given id + digital_ocean_certificate_info: + oauth_token: "{{ oauth_token }}" + certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf" + +- name: Get not after information about certificate + digital_ocean_certificate_info: + register: resp_out +- set_fact: + not_after_date: "{{ item.not_after }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?name=='web-cert-01']" +- debug: var=not_after_date +''' + + +RETURN = ''' +data: + description: DigitalOcean certificate information + returned: success + type: list + sample: [ + { + "id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "name": "web-cert-01", + "not_after": "2017-02-22T00:23:00Z", + "sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7", + "created_at": "2017-02-08T16:02:37Z" + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + certificate_id = module.params.get('certificate_id', None) + rest = DigitalOceanHelper(module) + + base_url = 'certificates?' + if certificate_id is not None: + response = rest.get("%s/%s" % (base_url, certificate_id)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve certificates for DigitalOcean") + + resp_json = response.json + certificate = resp_json['certificate'] + else: + certificate = rest.get_paginated_data(base_url=base_url, data_key_name='certificates') + + module.exit_json(changed=False, data=certificate) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + certificate_id=dict(type='str', required=False), + ) + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_certificate_facts': + module.deprecate("The 'digital_ocean_certificate_facts' module has been renamed to 'digital_ocean_certificate_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_domain.py b/plugins/modules/cloud/digital_ocean/digital_ocean_domain.py new file mode 100644 index 0000000000..4236af8244 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_domain.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_domain +short_description: Create/delete a DNS domain in DigitalOcean +description: + - Create/delete a DNS domain in DigitalOcean. +author: "Michael Gregson (@mgregson)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + id: + description: + - Numeric, the droplet id you want to operate on. + aliases: ['droplet_id'] + name: + description: + - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain. + ip: + description: + - An 'A' record for '@' ($ORIGIN) will be created with the value 'ip'. 'ip' is an IP version 4 address. +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +notes: + - Environment variables DO_OAUTH_TOKEN can be used for the oauth_token. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(oauth_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +# Create a domain + +- digital_ocean_domain: + state: present + name: my.digitalocean.domain + ip: 127.0.0.1 + +# Create a droplet and a corresponding domain + +- digital_ocean: + state: present + name: test_droplet + size_id: 1gb + region_id: sgp1 + image_id: ubuntu-14-04-x64 + + + register: test_droplet + +- digital_ocean_domain: + state: present + name: "{{ test_droplet.droplet.name }}.my.domain" + ip: "{{ test_droplet.droplet.ip_address }}" + +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +class DoManager(DigitalOceanHelper, object): + def __init__(self, module): + super(DoManager, self).__init__(module) + self.domain_name = module.params.get('name', None) + self.domain_ip = module.params.get('ip', None) + self.domain_id = module.params.get('id', None) + + @staticmethod + def jsonify(response): + return response.status_code, response.json + + def all_domains(self): + resp = self.get('domains/') + return resp + + def find(self): + if self.domain_name is None and self.domain_id is None: + return False + + domains = self.all_domains() + status, json = self.jsonify(domains) + for domain in json['domains']: + if domain['name'] == self.domain_name: + return True + return False + + def add(self): + params = {'name': self.domain_name, 'ip_address': self.domain_ip} + resp = self.post('domains/', data=params) + status = resp.status_code + json = resp.json + if status == 201: + return json['domain'] + else: + return json + + def all_domain_records(self): + resp = self.get('domains/%s/records/' % self.domain_name) + return resp.json + + def domain_record(self): + resp = self.get('domains/%s' % self.domain_name) + status, json = self.jsonify(resp) + return json + + def destroy_domain(self): + resp = self.delete('domains/%s' % self.domain_name) + status, json = self.jsonify(resp) + if status == 204: + return True + else: + return json + + def edit_domain_record(self, record): + params = {'name': '@', + 'data': self.module.params.get('ip')} + resp = self.put('domains/%s/records/%s' % (self.domain_name, record['id']), data=params) + status, json = self.jsonify(resp) + + return json['domain_record'] + + def create_domain_record(self): + params = {'name': '@', + 'type': 'A', + 'data': self.module.params.get('ip')} + + resp = self.post('domains/%s/records' % (self.domain_name), data=params) + status, json = self.jsonify(resp) + + return json['domain_record'] + + +def core(module): + do_manager = DoManager(module) + state = module.params.get('state') + + domain = do_manager.find() + if state == 'present': + if not domain: + domain = do_manager.add() + if 'message' in domain: + module.fail_json(changed=False, msg=domain['message']) + else: + module.exit_json(changed=True, domain=domain) + else: + records = do_manager.all_domain_records() + at_record = None + for record in records['domain_records']: + if record['name'] == "@" and record['type'] == 'A': + at_record = record + + if not at_record: + do_manager.create_domain_record() + module.exit_json(changed=True, domain=do_manager.find()) + elif not at_record['data'] == module.params.get('ip'): + do_manager.edit_domain_record(at_record) + module.exit_json(changed=True, domain=do_manager.find()) + else: + module.exit_json(changed=False, domain=do_manager.domain_record()) + + elif state == 'absent': + if not domain: + module.exit_json(changed=False, msg="Domain not found") + else: + delete_event = do_manager.destroy_domain() + if not delete_event: + module.fail_json(changed=False, msg=delete_event['message']) + else: + module.exit_json(changed=True, event=None) + delete_event = do_manager.destroy_domain() + module.exit_json(changed=delete_event) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + state=dict(choices=['present', 'absent'], default='present'), + name=dict(type='str'), + id=dict(aliases=['droplet_id'], type='int'), + ip=dict(type='str') + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=( + ['id', 'name'], + ), + ) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_domain_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_domain_facts.py new file mode 120000 index 0000000000..f226660018 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_domain_facts.py @@ -0,0 +1 @@ +digital_ocean_domain_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_domain_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_domain_info.py new file mode 100644 index 0000000000..25eac2d172 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_domain_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_domain_info +short_description: Gather information about DigitalOcean Domains +description: + - This module can be used to gather information about DigitalOcean provided Domains. + - This module was called C(digital_ocean_domain_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + domain_name: + description: + - Name of the domain to gather information for. + required: false +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all domains + digital_ocean_domain_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about domain with given name + digital_ocean_domain_info: + oauth_token: "{{ oauth_token }}" + domain_name: "example.com" + +- name: Get ttl from domain + digital_ocean_domain_info: + register: resp_out +- set_fact: + domain_ttl: "{{ item.ttl }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?name=='example.com']" +- debug: var=domain_ttl +''' + + +RETURN = ''' +data: + description: DigitalOcean Domain information + returned: success + type: list + sample: [ + { + "domain_records": [ + { + "data": "ns1.digitalocean.com", + "flags": null, + "id": 37826823, + "name": "@", + "port": null, + "priority": null, + "tag": null, + "ttl": 1800, + "type": "NS", + "weight": null + }, + ], + "name": "myexample123.com", + "ttl": 1800, + "zone_file": "myexample123.com. IN SOA ns1.digitalocean.com. hostmaster.myexample123.com. 1520702984 10800 3600 604800 1800\n", + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + domain_name = module.params.get('domain_name', None) + rest = DigitalOceanHelper(module) + domain_results = [] + + if domain_name is not None: + response = rest.get("domains/%s" % domain_name) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve domain for DigitalOcean") + + resp_json = response.json + domains = [resp_json['domain']] + else: + domains = rest.get_paginated_data(base_url="domains?", data_key_name='domains') + + for temp_domain in domains: + temp_domain_dict = { + "name": temp_domain['name'], + "ttl": temp_domain['ttl'], + "zone_file": temp_domain['zone_file'], + "domain_records": list(), + } + + base_url = "domains/%s/records?" % temp_domain['name'] + + temp_domain_dict["domain_records"] = rest.get_paginated_data(base_url=base_url, data_key_name='domain_records') + domain_results.append(temp_domain_dict) + + module.exit_json(changed=False, data=domain_results) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + domain_name=dict(type='str', required=False), + ) + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_domain_facts': + module.deprecate("The 'digital_ocean_domain_facts' module has been renamed to 'digital_ocean_domain_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_droplet.py b/plugins/modules/cloud/digital_ocean/digital_ocean_droplet.py new file mode 100644 index 0000000000..6427a2a001 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_droplet.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_droplet +short_description: Create and delete a DigitalOcean droplet +description: + - Create and delete a droplet in DigitalOcean and optionally wait for it to be active. +author: "Gurchet Rai (@gurch101)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + id: + description: + - Numeric, the droplet id you want to operate on. + aliases: ['droplet_id'] + name: + description: + - String, this is the name of the droplet - must be formatted by hostname rules. + unique_name: + description: + - require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host + per name. Useful for idempotence. + default: False + type: bool + size: + description: + - This is the slug of the size you would like the droplet created with. + aliases: ['size_id'] + image: + description: + - This is the slug of the image you would like the droplet created with. + aliases: ['image_id'] + region: + description: + - This is the slug of the region you would like your server to be created in. + aliases: ['region_id'] + ssh_keys: + description: + - array of SSH key Fingerprint that you would like to be added to the server. + required: False + private_networking: + description: + - add an additional, private network interface to droplet for inter-droplet communication. + default: False + type: bool + user_data: + description: + - opaque blob of data which is made available to the droplet + required: False + ipv6: + description: + - enable IPv6 for your droplet. + required: False + default: False + type: bool + wait: + description: + - Wait for the droplet to be active before returning. If wait is "no" an ip_address may not be returned. + required: False + default: True + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds, when creating a droplet. + default: 120 + backups: + description: + - indicates whether automated backups should be enabled. + required: False + default: False + type: bool + monitoring: + description: + - indicates whether to install the DigitalOcean agent for monitoring. + required: False + default: False + type: bool + tags: + description: + - List, A list of tag names as strings to apply to the Droplet after it is created. Tag names can either be existing or new tags. + required: False + volumes: + description: + - List, A list including the unique string identifier for each Block Storage volume to be attached to the Droplet. + required: False + oauth_token: + description: + - DigitalOcean OAuth token. Can be specified in C(DO_API_KEY), C(DO_API_TOKEN), or C(DO_OAUTH_TOKEN) environment variables + aliases: ['API_TOKEN'] + required: True +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- name: create a new droplet + digital_ocean_droplet: + state: present + name: mydroplet + oauth_token: XXX + size: 2gb + region: sfo1 + image: ubuntu-16-04-x64 + wait_timeout: 500 + ssh_keys: [ .... ] + register: my_droplet + +- debug: + msg: "ID is {{ my_droplet.data.droplet.id }}, IP is {{ my_droplet.data.ip_address }}" + +- name: ensure a droplet is present + digital_ocean_droplet: + state: present + id: 123 + name: mydroplet + oauth_token: XXX + size: 2gb + region: sfo1 + image: ubuntu-16-04-x64 + wait_timeout: 500 + +- name: ensure a droplet is present with SSH keys installed + digital_ocean_droplet: + state: present + id: 123 + name: mydroplet + oauth_token: XXX + size: 2gb + region: sfo1 + ssh_keys: ['1534404', '1784768'] + image: ubuntu-16-04-x64 + wait_timeout: 500 +''' + +RETURN = ''' +# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#droplets +data: + description: a DigitalOcean Droplet + returned: changed + type: dict + sample: { + "ip_address": "104.248.118.172", + "ipv6_address": "2604:a880:400:d1::90a:6001", + "private_ipv4_address": "10.136.122.141", + "droplet": { + "id": 3164494, + "name": "example.com", + "memory": 512, + "vcpus": 1, + "disk": 20, + "locked": true, + "status": "new", + "kernel": { + "id": 2233, + "name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic", + "version": "3.13.0-37-generic" + }, + "created_at": "2014-11-14T16:36:31Z", + "features": ["virtio"], + "backup_ids": [], + "snapshot_ids": [], + "image": {}, + "volume_ids": [], + "size": {}, + "size_slug": "512mb", + "networks": {}, + "region": {}, + "tags": ["web"] + } + } +''' + +import time +import json +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper + + +class DODroplet(object): + def __init__(self, module): + self.rest = DigitalOceanHelper(module) + self.module = module + self.wait = self.module.params.pop('wait', True) + self.wait_timeout = self.module.params.pop('wait_timeout', 120) + self.unique_name = self.module.params.pop('unique_name', False) + # pop the oauth token so we don't include it in the POST data + self.module.params.pop('oauth_token') + + def get_by_id(self, droplet_id): + if not droplet_id: + return None + response = self.rest.get('droplets/{0}'.format(droplet_id)) + json_data = response.json + if response.status_code == 200: + return json_data + return None + + def get_by_name(self, droplet_name): + if not droplet_name: + return None + page = 1 + while page is not None: + response = self.rest.get('droplets?page={0}'.format(page)) + json_data = response.json + if response.status_code == 200: + for droplet in json_data['droplets']: + if droplet['name'] == droplet_name: + return {'droplet': droplet} + if 'links' in json_data and 'pages' in json_data['links'] and 'next' in json_data['links']['pages']: + page += 1 + else: + page = None + return None + + def get_addresses(self, data): + """ + Expose IP addresses as their own property allowing users extend to additional tasks + """ + _data = data + for k, v in data.items(): + setattr(self, k, v) + networks = _data['droplet']['networks'] + for network in networks.get('v4', []): + if network['type'] == 'public': + _data['ip_address'] = network['ip_address'] + else: + _data['private_ipv4_address'] = network['ip_address'] + for network in networks.get('v6', []): + if network['type'] == 'public': + _data['ipv6_address'] = network['ip_address'] + else: + _data['private_ipv6_address'] = network['ip_address'] + return _data + + def get_droplet(self): + json_data = self.get_by_id(self.module.params['id']) + if not json_data and self.unique_name: + json_data = self.get_by_name(self.module.params['name']) + return json_data + + def create(self): + json_data = self.get_droplet() + droplet_data = None + if json_data: + droplet_data = self.get_addresses(json_data) + self.module.exit_json(changed=False, data=droplet_data) + if self.module.check_mode: + self.module.exit_json(changed=True) + request_params = dict(self.module.params) + del request_params['id'] + response = self.rest.post('droplets', data=request_params) + json_data = response.json + if response.status_code >= 400: + self.module.fail_json(changed=False, msg=json_data['message']) + if self.wait: + json_data = self.ensure_power_on(json_data['droplet']['id']) + droplet_data = self.get_addresses(json_data) + self.module.exit_json(changed=True, data=droplet_data) + + def delete(self): + json_data = self.get_droplet() + if json_data: + if self.module.check_mode: + self.module.exit_json(changed=True) + response = self.rest.delete('droplets/{0}'.format(json_data['droplet']['id'])) + json_data = response.json + if response.status_code == 204: + self.module.exit_json(changed=True, msg='Droplet deleted') + self.module.fail_json(changed=False, msg='Failed to delete droplet') + else: + self.module.exit_json(changed=False, msg='Droplet not found') + + def ensure_power_on(self, droplet_id): + end_time = time.time() + self.wait_timeout + while time.time() < end_time: + response = self.rest.get('droplets/{0}'.format(droplet_id)) + json_data = response.json + if json_data['droplet']['status'] == 'active': + return json_data + time.sleep(min(2, end_time - time.time())) + self.module.fail_json(msg='Wait for droplet powering on timeout') + + +def core(module): + state = module.params.pop('state') + droplet = DODroplet(module) + if state == 'present': + droplet.create() + elif state == 'absent': + droplet.delete() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + oauth_token=dict( + aliases=['API_TOKEN'], + no_log=True, + fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']) + ), + name=dict(type='str'), + size=dict(aliases=['size_id']), + image=dict(aliases=['image_id']), + region=dict(aliases=['region_id']), + ssh_keys=dict(type='list'), + private_networking=dict(type='bool', default=False), + backups=dict(type='bool', default=False), + monitoring=dict(type='bool', default=False), + id=dict(aliases=['droplet_id'], type='int'), + user_data=dict(default=None), + ipv6=dict(type='bool', default=False), + volumes=dict(type='list'), + tags=dict(type='list'), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=120, type='int'), + unique_name=dict(type='bool', default=False), + ), + required_one_of=( + ['id', 'name'], + ), + required_if=([ + ('state', 'present', ['name', 'size', 'image', 'region']), + ]), + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_firewall_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_firewall_facts.py new file mode 120000 index 0000000000..847f422b67 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_firewall_facts.py @@ -0,0 +1 @@ +digital_ocean_firewall_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_firewall_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_firewall_info.py new file mode 100644 index 0000000000..313e649284 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_firewall_info.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Anthony Bond +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_firewall_info +short_description: Gather information about DigitalOcean firewalls +description: + - This module can be used to gather information about DigitalOcean firewalls. + - This module was called C(digital_ocean_firewall_facts) before Ansible 2.9. The usage did not change. +author: "Anthony Bond (@BondAnthony)" +options: + name: + description: + - Firewall rule name that can be used to identify and reference a specific firewall rule. + required: false +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all firewalls + digital_ocean_firewall_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about a specific firewall by name + digital_ocean_firewall_info: + oauth_token: "{{ oauth_token }}" + name: "firewall_name" + +- name: Gather information from a firewall rule + digital_ocean_firewall_info: + name: SSH + register: resp_out + +- set_fact: + firewall_id: "{{ resp_out.data.id }}" + +- debug: + msg: "{{ firewall_id }}" +''' + + +RETURN = ''' +data: + description: DigitalOcean firewall information + returned: success + type: list + sample: [ + { + "id": "435tbg678-1db53-32b6-t543-28322569t252", + "name": "metrics", + "status": "succeeded", + "inbound_rules": [ + { + "protocol": "tcp", + "ports": "9100", + "sources": { + "addresses": [ + "1.1.1.1" + ] + } + } + ], + "outbound_rules": [], + "created_at": "2018-01-15T07:04:25Z", + "droplet_ids": [ + 87426985 + ], + "tags": [], + "pending_changes": [] + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + firewall_name = module.params.get('name', None) + rest = DigitalOceanHelper(module) + base_url = 'firewalls?' + + response = rest.get("%s" % base_url) + status_code = response.status_code + if status_code != 200: + module.fail_json(msg="Failed to retrieve firewalls from Digital Ocean") + firewalls = rest.get_paginated_data(base_url=base_url, data_key_name='firewalls') + + if firewall_name is not None: + rule = {} + for firewall in firewalls: + if firewall['name'] == firewall_name: + rule.update(firewall) + module.exit_json(changed=False, data=rule) + else: + module.exit_json(changed=False, data=firewalls) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type='str', required=False), + ) + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_firewall_facts': + module.deprecate("The 'digital_ocean_firewall_facts' module has been renamed to 'digital_ocean_firewall_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip.py b/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip.py new file mode 100644 index 0000000000..f1e156e9c3 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Patrick F. Marques +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: digital_ocean_floating_ip +short_description: Manage DigitalOcean Floating IPs +description: + - Create/delete/assign a floating IP. +author: "Patrick Marques (@pmarques)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + ip: + description: + - Public IP address of the Floating IP. Used to remove an IP + region: + description: + - The region that the Floating IP is reserved to. + droplet_id: + description: + - The Droplet that the Floating IP has been assigned to. + oauth_token: + description: + - DigitalOcean OAuth token. + required: true +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- name: "Create a Floating IP in region lon1" + digital_ocean_floating_ip: + state: present + region: lon1 + +- name: "Create a Floating IP assigned to Droplet ID 123456" + digital_ocean_floating_ip: + state: present + droplet_id: 123456 + +- name: "Delete a Floating IP with ip 1.2.3.4" + digital_ocean_floating_ip: + state: absent + ip: "1.2.3.4" + +''' + + +RETURN = ''' +# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#floating-ips +data: + description: a DigitalOcean Floating IP resource + returned: success and no resource constraint + type: dict + sample: { + "action": { + "id": 68212728, + "status": "in-progress", + "type": "assign_ip", + "started_at": "2015-10-15T17:45:44Z", + "completed_at": null, + "resource_id": 758603823, + "resource_type": "floating_ip", + "region": { + "name": "New York 3", + "slug": "nyc3", + "sizes": [ + "512mb", + "1gb", + "2gb", + "4gb", + "8gb", + "16gb", + "32gb", + "48gb", + "64gb" + ], + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata" + ], + "available": true + }, + "region_slug": "nyc3" + } + } +''' + +import json +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.urls import fetch_url + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class Rest(object): + + def __init__(self, module, headers): + self.module = module + self.headers = headers + self.baseurl = 'https://api.digitalocean.com/v2' + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + timeout = self.module.params['timeout'] + + resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=timeout) + + # Exceptions in fetch_url may result in a status -1, the ensures a + if info['status'] == -1: + self.module.fail_json(msg=info['msg']) + + return Response(resp, info) + + def get(self, path, data=None, headers=None): + return self.send('GET', path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send('PUT', path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send('POST', path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send('DELETE', path, data, headers) + + +def wait_action(module, rest, ip, action_id, timeout=10): + end_time = time.time() + 10 + while time.time() < end_time: + response = rest.get('floating_ips/{0}/actions/{1}'.format(ip, action_id)) + status_code = response.status_code + status = response.json['action']['status'] + # TODO: check status_code == 200? + if status == 'completed': + return True + elif status == 'errored': + module.fail_json(msg='Floating ip action error [ip: {0}: action: {1}]'.format( + ip, action_id), data=json) + + module.fail_json(msg='Floating ip action timeout [ip: {0}: action: {1}]'.format( + ip, action_id), data=json) + + +def core(module): + api_token = module.params['oauth_token'] + state = module.params['state'] + ip = module.params['ip'] + droplet_id = module.params['droplet_id'] + + rest = Rest(module, {'Authorization': 'Bearer {0}'.format(api_token), + 'Content-type': 'application/json'}) + + if state in ('present'): + if droplet_id is not None and module.params['ip'] is not None: + # Lets try to associate the ip to the specified droplet + associate_floating_ips(module, rest) + else: + create_floating_ips(module, rest) + + elif state in ('absent'): + response = rest.delete("floating_ips/{0}".format(ip)) + status_code = response.status_code + json_data = response.json + if status_code == 204: + module.exit_json(changed=True) + elif status_code == 404: + module.exit_json(changed=False) + else: + module.exit_json(changed=False, data=json_data) + + +def get_floating_ip_details(module, rest): + ip = module.params['ip'] + + response = rest.get("floating_ips/{0}".format(ip)) + status_code = response.status_code + json_data = response.json + if status_code == 200: + return json_data['floating_ip'] + else: + module.fail_json(msg="Error assigning floating ip [{0}: {1}]".format( + status_code, json_data["message"]), region=module.params['region']) + + +def assign_floating_id_to_droplet(module, rest): + ip = module.params['ip'] + + payload = { + "type": "assign", + "droplet_id": module.params['droplet_id'], + } + + response = rest.post("floating_ips/{0}/actions".format(ip), data=payload) + status_code = response.status_code + json_data = response.json + if status_code == 201: + wait_action(module, rest, ip, json_data['action']['id']) + + module.exit_json(changed=True, data=json_data) + else: + module.fail_json(msg="Error creating floating ip [{0}: {1}]".format( + status_code, json_data["message"]), region=module.params['region']) + + +def associate_floating_ips(module, rest): + floating_ip = get_floating_ip_details(module, rest) + droplet = floating_ip['droplet'] + + # TODO: If already assigned to a droplet verify if is one of the specified as valid + if droplet is not None and str(droplet['id']) in [module.params['droplet_id']]: + module.exit_json(changed=False) + else: + assign_floating_id_to_droplet(module, rest) + + +def create_floating_ips(module, rest): + payload = { + } + + if module.params['region'] is not None: + payload["region"] = module.params['region'] + if module.params['droplet_id'] is not None: + payload["droplet_id"] = module.params['droplet_id'] + + response = rest.post("floating_ips", data=payload) + status_code = response.status_code + json_data = response.json + if status_code == 202: + module.exit_json(changed=True, data=json_data) + else: + module.fail_json(msg="Error creating floating ip [{0}: {1}]".format( + status_code, json_data["message"]), region=module.params['region']) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + ip=dict(aliases=['id'], required=False), + region=dict(required=False), + droplet_id=dict(required=False), + oauth_token=dict( + no_log=True, + # Support environment variable for DigitalOcean OAuth Token + fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']), + required=True, + ), + validate_certs=dict(type='bool', default=True), + timeout=dict(type='int', default=30), + ), + required_if=[ + ('state', 'delete', ['ip']) + ], + mutually_exclusive=[ + ['region', 'droplet_id'] + ], + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip_facts.py new file mode 120000 index 0000000000..bd71456669 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip_facts.py @@ -0,0 +1 @@ +digital_ocean_floating_ip_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip_info.py new file mode 100644 index 0000000000..3a253cd61d --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_floating_ip_info.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (C) 2017-18, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1' +} + +DOCUMENTATION = ''' +--- +module: digital_ocean_floating_ip_info +short_description: DigitalOcean Floating IPs information +description: + - This module can be used to fetch DigitalOcean Floating IPs information. + - This module was called C(digital_ocean_floating_ip_facts) before Ansible 2.9. The usage did not change. +author: "Patrick Marques (@pmarques)" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- name: "Gather information about all Floating IPs" + digital_ocean_floating_ip_info: + register: result + +- name: "List of current floating ips" + debug: var=result.floating_ips +''' + + +RETURN = ''' +# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#floating-ips +floating_ips: + description: a DigitalOcean Floating IP resource + returned: success and no resource constraint + type: list + sample: [ + { + "ip": "45.55.96.47", + "droplet": null, + "region": { + "name": "New York 3", + "slug": "nyc3", + "sizes": [ + "512mb", + "1gb", + "2gb", + "4gb", + "8gb", + "16gb", + "32gb", + "48gb", + "64gb" + ], + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata" + ], + "available": true + }, + "locked": false + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + page = 1 + has_next = True + floating_ips = [] + status_code = None + while has_next or status_code != 200: + response = rest.get("floating_ips?page={0}&per_page=20".format(page)) + status_code = response.status_code + # stop if any error during pagination + if status_code != 200: + break + page += 1 + floating_ips.extend(response.json["floating_ips"]) + has_next = "pages" in response.json["links"] and "next" in response.json["links"]["pages"] + + if status_code == 200: + module.exit_json(changed=False, floating_ips=floating_ips) + else: + module.fail_json(msg="Error fetching information [{0}: {1}]".format( + status_code, response.json["message"])) + + +def main(): + module = AnsibleModule( + argument_spec=DigitalOceanHelper.digital_ocean_argument_spec() + ) + if module._name == 'digital_ocean_floating_ip_facts': + module.deprecate("The 'digital_ocean_floating_ip_facts' module has been renamed to 'digital_ocean_floating_ip_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_image_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_image_facts.py new file mode 120000 index 0000000000..e25fb47543 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_image_facts.py @@ -0,0 +1 @@ +digital_ocean_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_image_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_image_info.py new file mode 100644 index 0000000000..62a4e59f33 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_image_info.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_image_info +short_description: Gather information about DigitalOcean images +description: + - This module can be used to gather information about DigitalOcean provided images. + - These images can be either of type C(distribution), C(application) and C(private). + - This module was called C(digital_ocean_image_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + image_type: + description: + - Specifies the type of image information to be retrieved. + - If set to C(application), then information are gathered related to all application images. + - If set to C(distribution), then information are gathered related to all distribution images. + - If set to C(private), then information are gathered related to all private images. + - If not set to any of above, then information are gathered related to all images. + default: 'all' + choices: [ 'all', 'application', 'distribution', 'private' ] + required: false +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all images + digital_ocean_image_info: + image_type: all + oauth_token: "{{ oauth_token }}" + +- name: Gather information about application images + digital_ocean_image_info: + image_type: application + oauth_token: "{{ oauth_token }}" + +- name: Gather information about distribution images + digital_ocean_image_info: + image_type: distribution + oauth_token: "{{ oauth_token }}" + +- name: Get distribution about image with slug coreos-beta + digital_ocean_image_info: + register: resp_out +- set_fact: + distribution_name: "{{ item.distribution }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?slug=='coreos-beta']" +- debug: var=distribution_name + +''' + + +RETURN = ''' +data: + description: DigitalOcean image information + returned: success + type: list + sample: [ + { + "created_at": "2018-02-02T07:11:43Z", + "distribution": "CoreOS", + "id": 31434061, + "min_disk_size": 20, + "name": "1662.1.0 (beta)", + "public": true, + "regions": [ + "nyc1", + "sfo1", + "nyc2", + "ams2", + "sgp1", + "lon1", + "nyc3", + "ams3", + "fra1", + "tor1", + "sfo2", + "blr1" + ], + "size_gigabytes": 0.42, + "slug": "coreos-beta", + "type": "snapshot" + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + image_type = module.params['image_type'] + + rest = DigitalOceanHelper(module) + + base_url = 'images?' + if image_type == 'distribution': + base_url += "type=distribution&" + elif image_type == 'application': + base_url += "type=application&" + elif image_type == 'private': + base_url += "private=true&" + + images = rest.get_paginated_data(base_url=base_url, data_key_name='images') + + module.exit_json(changed=False, data=images) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + image_type=dict(type='str', + required=False, + choices=['all', 'application', 'distribution', 'private'], + default='all' + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_image_facts': + module.deprecate("The 'digital_ocean_image_facts' module has been renamed to 'digital_ocean_image_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_load_balancer_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_load_balancer_facts.py new file mode 120000 index 0000000000..2883d53ecd --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_load_balancer_facts.py @@ -0,0 +1 @@ +digital_ocean_load_balancer_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_load_balancer_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_load_balancer_info.py new file mode 100644 index 0000000000..35c7520936 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_load_balancer_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_load_balancer_info +short_description: Gather information about DigitalOcean load balancers +description: + - This module can be used to gather information about DigitalOcean provided load balancers. + - This module was called C(digital_ocean_load_balancer_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + load_balancer_id: + description: + - Load balancer ID that can be used to identify and reference a load_balancer. + required: false +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all load balancers + digital_ocean_load_balancer_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about load balancer with given id + digital_ocean_load_balancer_info: + oauth_token: "{{ oauth_token }}" + load_balancer_id: "4de7ac8b-495b-4884-9a69-1050c6793cd6" + +- name: Get name from load balancer id + digital_ocean_load_balancer_info: + register: resp_out +- set_fact: + load_balancer_name: "{{ item.name }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?id=='4de7ac8b-495b-4884-9a69-1050c6793cd6']" +- debug: var=load_balancer_name +''' + + +RETURN = ''' +data: + description: DigitalOcean Load balancer information + returned: success + type: list + sample: [ + { + "id": "4de7ac8b-495b-4884-9a69-1050c6793cd6", + "name": "example-lb-01", + "ip": "104.131.186.241", + "algorithm": "round_robin", + "status": "new", + "created_at": "2017-02-01T22:22:58Z", + ... + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + load_balancer_id = module.params.get('load_balancer_id', None) + rest = DigitalOceanHelper(module) + + base_url = 'load_balancers?' + if load_balancer_id is not None: + response = rest.get("%s/%s" % (base_url, load_balancer_id)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve load balancers for DigitalOcean") + + resp_json = response.json + load_balancer = resp_json['load_balancer'] + else: + load_balancer = rest.get_paginated_data(base_url=base_url, data_key_name='load_balancers') + + module.exit_json(changed=False, data=load_balancer) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + load_balancer_id=dict(type='str', required=False), + ) + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_load_balancer_facts': + module.deprecate("The 'digital_ocean_load_balancer_facts' module has been renamed to 'digital_ocean_load_balancer_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_region_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_region_facts.py new file mode 120000 index 0000000000..c4ade0405c --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_region_facts.py @@ -0,0 +1 @@ +digital_ocean_region_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_region_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_region_info.py new file mode 100644 index 0000000000..8cfd5ad2ed --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_region_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_region_info +short_description: Gather information about DigitalOcean regions +description: + - This module can be used to gather information about regions. + - This module was called C(digital_ocean_region_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- name: Gather information about all regions + digital_ocean_region_info: + oauth_token: "{{ oauth_token }}" + +- name: Get Name of region where slug is known + digital_ocean_region_info: + oauth_token: "{{ oauth_token }}" + register: resp_out +- debug: var=resp_out +- set_fact: + region_slug: "{{ item.name }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?slug==`nyc1`]" +- debug: var=region_slug +''' + + +RETURN = ''' +data: + description: DigitalOcean regions information + returned: success + type: list + sample: [ + { + "available": true, + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata", + "install_agent", + "storage" + ], + "name": "New York 1", + "sizes": [ + "512mb", + "s-1vcpu-1gb", + "1gb", + "s-3vcpu-1gb", + "s-1vcpu-2gb", + "s-2vcpu-2gb", + "2gb", + "s-1vcpu-3gb", + "s-2vcpu-4gb", + "4gb", + "c-2", + "m-1vcpu-8gb", + "8gb", + "s-4vcpu-8gb", + "s-6vcpu-16gb", + "16gb" + ], + "slug": "nyc1" + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + base_url = 'regions?' + regions = rest.get_paginated_data(base_url=base_url, data_key_name='regions') + + module.exit_json(changed=False, data=regions) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_region_facts': + module.deprecate("The 'digital_ocean_region_facts' module has been renamed to 'digital_ocean_region_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_size_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_size_facts.py new file mode 120000 index 0000000000..01aabd5750 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_size_facts.py @@ -0,0 +1 @@ +digital_ocean_size_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_size_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_size_info.py new file mode 100644 index 0000000000..a10ad5aa0b --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_size_info.py @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_size_info +short_description: Gather information about DigitalOcean Droplet sizes +description: + - This module can be used to gather information about droplet sizes. + - This module was called C(digital_ocean_size_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all droplet sizes + digital_ocean_size_info: + oauth_token: "{{ oauth_token }}" + +- name: Get droplet Size Slug where vcpus is 1 + digital_ocean_size_info: + oauth_token: "{{ oauth_token }}" + register: resp_out +- debug: var=resp_out +- set_fact: + size_slug: "{{ item.slug }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?vcpus==`1`]" +- debug: var=size_slug + + +''' + + +RETURN = ''' +data: + description: DigitalOcean droplet size information + returned: success + type: list + sample: [ + { + "available": true, + "disk": 20, + "memory": 512, + "price_hourly": 0.00744, + "price_monthly": 5.0, + "regions": [ + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sgp1", + "tor1" + ], + "slug": "512mb", + "transfer": 1.0, + "vcpus": 1 + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get('sizes') + if response.status_code != 200: + module.fail_json(msg="Failed to fetch 'sizes' information due to error : %s" % response.json['message']) + + module.exit_json(changed=False, data=response.json['sizes']) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + module = AnsibleModule( + argument_spec=argument_spec, + ) + if module._name == 'digital_ocean_size_facts': + module.deprecate("The 'digital_ocean_size_facts' module has been renamed to 'digital_ocean_size_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_snapshot_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_snapshot_facts.py new file mode 120000 index 0000000000..c902df906e --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_snapshot_facts.py @@ -0,0 +1 @@ +digital_ocean_snapshot_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_snapshot_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_snapshot_info.py new file mode 100644 index 0000000000..1af701b9e9 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_snapshot_info.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_snapshot_info +short_description: Gather information about DigitalOcean Snapshot +description: + - This module can be used to gather information about snapshot information based upon provided values such as droplet, volume and snapshot id. + - This module was called C(digital_ocean_snapshot_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + snapshot_type: + description: + - Specifies the type of snapshot information to be retrieved. + - If set to C(droplet), then information are gathered related to snapshots based on Droplets only. + - If set to C(volume), then information are gathered related to snapshots based on volumes only. + - If set to C(by_id), then information are gathered related to snapshots based on snapshot id only. + - If not set to any of the above, then information are gathered related to all snapshots. + default: 'all' + choices: [ 'all', 'droplet', 'volume', 'by_id'] + required: false + snapshot_id: + description: + - To retrieve information about a snapshot, please specify this as a snapshot id. + - If set to actual snapshot id, then information are gathered related to that particular snapshot only. + - This is required parameter, if C(snapshot_type) is set to C(by_id). + required: false +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all snapshots + digital_ocean_snapshot_info: + snapshot_type: all + oauth_token: "{{ oauth_token }}" + +- name: Gather information about droplet snapshots + digital_ocean_snapshot_info: + snapshot_type: droplet + oauth_token: "{{ oauth_token }}" + +- name: Gather information about volume snapshots + digital_ocean_snapshot_info: + snapshot_type: volume + oauth_token: "{{ oauth_token }}" + +- name: Gather information about snapshot by snapshot id + digital_ocean_snapshot_info: + snapshot_type: by_id + snapshot_id: 123123123 + oauth_token: "{{ oauth_token }}" + +- name: Get information about snapshot named big-data-snapshot1 + digital_ocean_snapshot_info: + register: resp_out +- set_fact: + snapshot_id: "{{ item.id }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?name=='big-data-snapshot1']" +- debug: var=snapshot_id + +''' + + +RETURN = ''' +data: + description: DigitalOcean snapshot information + returned: success + type: list + sample: [ + { + "id": "4f60fc64-85d1-11e6-a004-000f53315871", + "name": "big-data-snapshot1", + "regions": [ + "nyc1" + ], + "created_at": "2016-09-28T23:14:30Z", + "resource_id": "89bcc42f-85cf-11e6-a004-000f53315871", + "resource_type": "volume", + "min_disk_size": 10, + "size_gigabytes": 0 + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + snapshot_type = module.params['snapshot_type'] + + rest = DigitalOceanHelper(module) + + base_url = 'snapshots?' + snapshot = [] + + if snapshot_type == 'by_id': + base_url += "/{0}".format(module.params.get('snapshot_id')) + response = rest.get(base_url) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to fetch snapshot information due to error : %s" % response.json['message']) + + snapshot.extend(response.json["snapshot"]) + else: + if snapshot_type == 'droplet': + base_url += "resource_type=droplet&" + elif snapshot_type == 'volume': + base_url += "resource_type=volume&" + + snapshot = rest.get_paginated_data(base_url=base_url, data_key_name='snapshots') + module.exit_json(changed=False, data=snapshot) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + snapshot_type=dict(type='str', + required=False, + choices=['all', 'droplet', 'volume', 'by_id'], + default='all'), + snapshot_id=dict(type='str', + required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ['snapshot_type', 'by_id', ['snapshot_id']], + ], + ) + if module._name == 'digital_ocean_snapshot_facts': + module.deprecate("The 'digital_ocean_snapshot_facts' module has been renamed to 'digital_ocean_snapshot_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey.py b/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey.py new file mode 100644 index 0000000000..7732fca8e7 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey.py @@ -0,0 +1,262 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_sshkey +short_description: Manage DigitalOcean SSH keys +description: + - Create/delete DigitalOcean SSH keys. +author: "Patrick Marques (@pmarques)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + fingerprint: + description: + - This is a unique identifier for the SSH key used to delete a key + aliases: ['id'] + name: + description: + - The name for the SSH key + ssh_pub_key: + description: + - The Public SSH key to add. + oauth_token: + description: + - DigitalOcean OAuth token. + required: true +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- name: "Create ssh key" + digital_ocean_sshkey: + oauth_token: "{{ oauth_token }}" + name: "My SSH Public Key" + ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example" + state: present + register: result + +- name: "Delete ssh key" + digital_ocean_sshkey: + oauth_token: "{{ oauth_token }}" + state: "absent" + fingerprint: "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa" +''' + + +RETURN = ''' +# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#list-all-keys +data: + description: This is only present when C(state=present) + returned: when C(state=present) + type: dict + sample: { + "ssh_key": { + "id": 512189, + "fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", + "name": "My SSH Public Key", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example" + } + } +''' + +import json +import hashlib +import base64 + +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class Rest(object): + + def __init__(self, module, headers): + self.module = module + self.headers = headers + self.baseurl = 'https://api.digitalocean.com/v2' + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + timeout = self.module.params['timeout'] + + resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=timeout) + + # Exceptions in fetch_url may result in a status -1, the ensures a + if info['status'] == -1: + self.module.fail_json(msg=info['msg']) + + return Response(resp, info) + + def get(self, path, data=None, headers=None): + return self.send('GET', path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send('PUT', path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send('POST', path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send('DELETE', path, data, headers) + + +def core(module): + api_token = module.params['oauth_token'] + state = module.params['state'] + fingerprint = module.params['fingerprint'] + name = module.params['name'] + ssh_pub_key = module.params['ssh_pub_key'] + + rest = Rest(module, {'Authorization': 'Bearer {0}'.format(api_token), + 'Content-type': 'application/json'}) + + fingerprint = fingerprint or ssh_key_fingerprint(ssh_pub_key) + response = rest.get('account/keys/{0}'.format(fingerprint)) + status_code = response.status_code + json = response.json + + if status_code not in (200, 404): + module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format( + status_code, response.json['message']), fingerprint=fingerprint) + + if state in ('present'): + if status_code == 404: + # IF key not found create it! + + if module.check_mode: + module.exit_json(changed=True) + + payload = { + 'name': name, + 'public_key': ssh_pub_key + } + response = rest.post('account/keys', data=payload) + status_code = response.status_code + json = response.json + if status_code == 201: + module.exit_json(changed=True, data=json) + + module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format( + status_code, response.json['message'])) + + elif status_code == 200: + # If key found was found, check if name needs to be updated + if name is None or json['ssh_key']['name'] == name: + module.exit_json(changed=False, data=json) + + if module.check_mode: + module.exit_json(changed=True) + + payload = { + 'name': name, + } + response = rest.put('account/keys/{0}'.format(fingerprint), data=payload) + status_code = response.status_code + json = response.json + if status_code == 200: + module.exit_json(changed=True, data=json) + + module.fail_json(msg='Error updating ssh key name [{0}: {1}]'.format( + status_code, response.json['message']), fingerprint=fingerprint) + + elif state in ('absent'): + if status_code == 404: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + response = rest.delete('account/keys/{0}'.format(fingerprint)) + status_code = response.status_code + json = response.json + if status_code == 204: + module.exit_json(changed=True) + + module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format( + status_code, response.json['message'])) + + +def ssh_key_fingerprint(ssh_pub_key): + key = ssh_pub_key.split(None, 2)[1] + fingerprint = hashlib.md5(base64.b64decode(key)).hexdigest() + return ':'.join(a + b for a, b in zip(fingerprint[::2], fingerprint[1::2])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + fingerprint=dict(aliases=['id'], required=False), + name=dict(required=False), + ssh_pub_key=dict(required=False), + oauth_token=dict( + no_log=True, + # Support environment variable for DigitalOcean OAuth Token + fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']), + required=True, + ), + validate_certs=dict(type='bool', default=True), + timeout=dict(type='int', default=30), + ), + required_one_of=( + ('fingerprint', 'ssh_pub_key'), + ), + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey_facts.py new file mode 100644 index 0000000000..22cbbd1328 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey_facts.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_sshkey_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(digital_ocean_sshkey_info) instead. +short_description: DigitalOcean SSH keys facts +description: + - Fetch DigitalOcean SSH keys facts. +author: "Patrick Marques (@pmarques)" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- digital_ocean_sshkey_facts: + oauth_token: "{{ my_do_key }}" + +- set_fact: + pubkey: "{{ item.public_key }}" + loop: "{{ ssh_keys|json_query(ssh_pubkey) }}" + vars: + ssh_pubkey: "[?name=='ansible_ctrl']" + +- debug: + msg: "{{ pubkey }}" +''' + + +RETURN = ''' +# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#list-all-keys +data: + description: List of SSH keys on DigitalOcean + returned: success and no resource constraint + type: dict + sample: { + "ssh_keys": [ + { + "id": 512189, + "fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + "name": "My SSH Public Key" + } + ], + "links": { + }, + "meta": { + "total": 1 + } + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("account/keys") + status_code = response.status_code + json = response.json + if status_code == 200: + module.exit_json(changed=False, ansible_facts=json) + else: + module.fail_json(msg='Error fetching facts [{0}: {1}]'.format( + status_code, response.json['message'])) + + +def main(): + module = AnsibleModule( + argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(), + supports_check_mode=False, + ) + + module.deprecate("The 'digital_ocean_sshkey_facts' module has been deprecated, use the new 'digital_ocean_sshkey_info' module", version='2.13') + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey_info.py new file mode 100644 index 0000000000..51ee20b58a --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_sshkey_info.py @@ -0,0 +1,94 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_sshkey_info +short_description: Gather information about DigitalOcean SSH keys +description: + - This module can be used to gather information about DigitalOcean SSH keys. + - This module replaces the C(digital_ocean_sshkey_facts) module. +author: "Patrick Marques (@pmarques)" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +notes: + - Version 2 of DigitalOcean API is used. +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- digital_ocean_sshkey_info: + oauth_token: "{{ my_do_key }}" + register: ssh_keys + +- set_fact: + pubkey: "{{ item.public_key }}" + loop: "{{ ssh_keys.data|json_query(ssh_pubkey) }}" + vars: + ssh_pubkey: "[?name=='ansible_ctrl']" + +- debug: + msg: "{{ pubkey }}" +''' + + +RETURN = ''' +# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#list-all-keys +data: + description: List of SSH keys on DigitalOcean + returned: success and no resource constraint + type: dict + sample: [ + { + "id": 512189, + "fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + "name": "My SSH Public Key" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper + + +def core(module): + rest = DigitalOceanHelper(module) + + response = rest.get("account/keys") + status_code = response.status_code + json = response.json + if status_code == 200: + module.exit_json(changed=False, data=json['ssh_keys']) + else: + module.fail_json(msg='Error fetching SSH Key information [{0}: {1}]'.format( + status_code, response.json['message'])) + + +def main(): + module = AnsibleModule( + argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(), + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_tag.py b/plugins/modules/cloud/digital_ocean/digital_ocean_tag.py new file mode 100644 index 0000000000..50bb692396 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_tag.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_tag +short_description: Create and remove tag(s) to DigitalOcean resource. +description: + - Create and remove tag(s) to DigitalOcean resource. +author: "Victor Volle (@kontrafiktion)" +options: + name: + description: + - The name of the tag. The supported characters for names include + alphanumeric characters, dashes, and underscores. + required: true + resource_id: + description: + - The ID of the resource to operate on. + - The data type of resource_id is changed from integer to string, from version 2.5. + aliases: ['droplet_id'] + resource_type: + description: + - The type of resource to operate on. Currently, only tagging of + droplets is supported. + default: droplet + choices: ['droplet'] + state: + description: + - Whether the tag should be present or absent on the resource. + default: present + choices: ['present', 'absent'] +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. + They both refer to the v2 token. + - As of Ansible 2.0, Version 2 of the DigitalOcean API is used. + +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- name: create a tag + digital_ocean_tag: + name: production + state: present + +- name: tag a resource; creating the tag if it does not exist + digital_ocean_tag: + name: "{{ item }}" + resource_id: "73333005" + state: present + loop: + - staging + - dbserver + +- name: untag a resource + digital_ocean_tag: + name: staging + resource_id: "73333005" + state: absent + +# Deleting a tag also untags all the resources that have previously been +# tagged with it +- name: remove a tag + digital_ocean_tag: + name: dbserver + state: absent +''' + + +RETURN = ''' +data: + description: a DigitalOcean Tag resource + returned: success and no resource constraint + type: dict + sample: { + "tag": { + "name": "awesome", + "resources": { + "droplets": { + "count": 0, + "last_tagged": null + } + } + } + } +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + state = module.params['state'] + name = module.params['name'] + resource_id = module.params['resource_id'] + resource_type = module.params['resource_type'] + + rest = DigitalOceanHelper(module) + + if state == 'present': + response = rest.get('tags/{0}'.format(name)) + status_code = response.status_code + resp_json = response.json + changed = False + if status_code == 200 and resp_json['tag']['name'] == name: + changed = False + else: + # Ensure Tag exists + response = rest.post("tags", data={'name': name}) + status_code = response.status_code + resp_json = response.json + if status_code == 201: + changed = True + elif status_code == 422: + changed = False + else: + module.exit_json(changed=False, data=resp_json) + + if resource_id is None: + # No resource defined, we're done. + module.exit_json(changed=changed, data=resp_json) + else: + # Check if resource is already tagged or not + found = False + url = "{0}?tag_name={1}".format(resource_type, name) + if resource_type == 'droplet': + url = "droplets?tag_name={0}".format(name) + response = rest.get(url) + status_code = response.status_code + resp_json = response.json + if status_code == 200: + for resource in resp_json['droplets']: + if not found and resource['id'] == int(resource_id): + found = True + break + if not found: + # If resource is not tagged, tag a resource + url = "tags/{0}/resources".format(name) + payload = { + 'resources': [{ + 'resource_id': resource_id, + 'resource_type': resource_type}]} + response = rest.post(url, data=payload) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error tagging resource '{0}': {1}".format(resource_id, response.json["message"])) + else: + # Already tagged resource + module.exit_json(changed=False) + else: + # Unable to find resource specified by user + module.fail_json(msg=resp_json['message']) + + elif state == 'absent': + if resource_id: + url = "tags/{0}/resources".format(name) + payload = { + 'resources': [{ + 'resource_id': resource_id, + 'resource_type': resource_type}]} + response = rest.delete(url, data=payload) + else: + url = "tags/{0}".format(name) + response = rest.delete(url) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.exit_json(changed=False, data=response.json) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + name=dict(type='str', required=True), + resource_id=dict(aliases=['droplet_id'], type='str'), + resource_type=dict(choices=['droplet'], default='droplet'), + state=dict(choices=['present', 'absent'], default='present'), + ) + + module = AnsibleModule(argument_spec=argument_spec) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_tag_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_tag_facts.py new file mode 120000 index 0000000000..a29f869b96 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_tag_facts.py @@ -0,0 +1 @@ +digital_ocean_tag_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_tag_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_tag_info.py new file mode 100644 index 0000000000..d5c4c4bf4f --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_tag_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_tag_info +short_description: Gather information about DigitalOcean tags +description: + - This module can be used to gather information about DigitalOcean provided tags. + - This module was called C(digital_ocean_tag_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + tag_name: + description: + - Tag name that can be used to identify and reference a tag. + required: false +requirements: + - "python >= 2.6" +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all tags + digital_ocean_tag_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about tag with given name + digital_ocean_tag_info: + oauth_token: "{{ oauth_token }}" + tag_name: "extra_awesome_tag" + +- name: Get resources from tag name + digital_ocean_tag_info: + register: resp_out +- set_fact: + resources: "{{ item.resources }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?name=='extra_awesome_tag']" +- debug: var=resources +''' + + +RETURN = ''' +data: + description: DigitalOcean tag information + returned: success + type: list + sample: [ + { + "name": "extra-awesome", + "resources": { + "droplets": { + "count": 1, + ... + } + } + }, + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + tag_name = module.params.get('tag_name', None) + rest = DigitalOceanHelper(module) + + base_url = 'tags?' + if tag_name is not None: + response = rest.get("%s/%s" % (base_url, tag_name)) + status_code = response.status_code + + if status_code != 200: + module.fail_json(msg="Failed to retrieve tags for DigitalOcean") + + resp_json = response.json + tag = resp_json['tag'] + else: + tag = rest.get_paginated_data(base_url=base_url, data_key_name='tags') + + module.exit_json(changed=False, data=tag) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + tag_name=dict(type='str', required=False), + ) + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_tag_facts': + module.deprecate("The 'digital_ocean_tag_facts' module has been renamed to 'digital_ocean_tag_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_volume_facts.py b/plugins/modules/cloud/digital_ocean/digital_ocean_volume_facts.py new file mode 120000 index 0000000000..b6491ad79e --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_volume_facts.py @@ -0,0 +1 @@ +digital_ocean_volume_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/digital_ocean/digital_ocean_volume_info.py b/plugins/modules/cloud/digital_ocean/digital_ocean_volume_info.py new file mode 100644 index 0000000000..52c5176b70 --- /dev/null +++ b/plugins/modules/cloud/digital_ocean/digital_ocean_volume_info.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: digital_ocean_volume_info +short_description: Gather information about DigitalOcean volumes +description: + - This module can be used to gather information about DigitalOcean provided volumes. + - This module was called C(digital_ocean_volume_facts) before Ansible 2.9. The usage did not change. +author: "Abhijeet Kasurde (@Akasurde)" +options: + region_name: + description: + - Name of region to restrict results to volumes available in a specific region. + - Please use M(digital_ocean_region_info) for getting valid values related regions. + required: false + +requirements: + - "python >= 2.6" + +extends_documentation_fragment: +- community.general.digital_ocean.documentation + +''' + + +EXAMPLES = ''' +- name: Gather information about all volume + digital_ocean_volume_info: + oauth_token: "{{ oauth_token }}" + +- name: Gather information about volume in given region + digital_ocean_volume_info: + region_name: nyc1 + oauth_token: "{{ oauth_token }}" + +- name: Get information about volume named nyc3-test-volume + digital_ocean_volume_info: + register: resp_out +- set_fact: + volume_id: "{{ item.id }}" + loop: "{{ resp_out.data|json_query(name) }}" + vars: + name: "[?name=='nyc3-test-volume']" +- debug: var=volume_id +''' + + +RETURN = ''' +data: + description: DigitalOcean volume information + returned: success + type: list + sample: [ + { + "id": "506f78a4-e098-11e5-ad9f-000f53306ae1", + "region": { + "name": "New York 1", + "slug": "nyc1", + "sizes": [ + "s-1vcpu-1gb", + "s-1vcpu-2gb", + "s-1vcpu-3gb", + "s-2vcpu-2gb", + "s-3vcpu-1gb", + "s-2vcpu-4gb", + "s-4vcpu-8gb", + "s-6vcpu-16gb", + "s-8vcpu-32gb", + "s-12vcpu-48gb", + "s-16vcpu-64gb", + "s-20vcpu-96gb", + "s-24vcpu-128gb", + "s-32vcpu-192gb" + ], + "features": [ + "private_networking", + "backups", + "ipv6", + "metadata" + ], + "available": true + }, + "droplet_ids": [ + + ], + "name": "example", + "description": "Block store for examples", + "size_gigabytes": 10, + "created_at": "2016-03-02T17:00:49Z" + } + ] +''' + +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native + + +def core(module): + region_name = module.params.get('region_name', None) + + rest = DigitalOceanHelper(module) + + base_url = 'volumes?' + if region_name is not None: + base_url += "region=%s&" % region_name + + volumes = rest.get_paginated_data(base_url=base_url, data_key_name='volumes') + + module.exit_json(changed=False, data=volumes) + + +def main(): + argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() + argument_spec.update( + region_name=dict(type='str', required=False), + ) + module = AnsibleModule(argument_spec=argument_spec) + if module._name == 'digital_ocean_volume_facts': + module.deprecate("The 'digital_ocean_volume_facts' module has been renamed to 'digital_ocean_volume_info'", version='2.13') + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py new file mode 100644 index 0000000000..dcb1ad9e66 --- /dev/null +++ b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Dimension Data +# Authors: +# - Aimon Bustardo +# - Bert Diwa +# - Adam Friedman +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dimensiondata_network +short_description: Create, update, and delete MCP 1.0 & 2.0 networks +extends_documentation_fragment: +- community.general.dimensiondata +- community.general.dimensiondata_wait + +description: + - Create, update, and delete MCP 1.0 & 2.0 networks +author: 'Aimon Bustardo (@aimonb)' +options: + name: + description: + - The name of the network domain to create. + required: true + description: + description: + - Additional description of the network domain. + required: false + service_plan: + description: + - The service plan, either "ESSENTIALS" or "ADVANCED". + - MCP 2.0 Only. + choices: [ESSENTIALS, ADVANCED] + default: ESSENTIALS + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +''' + +EXAMPLES = ''' +# Create an MCP 1.0 network +- dimensiondata_network: + region: na + location: NA5 + name: mynet +# Create an MCP 2.0 network +- dimensiondata_network: + region: na + mcp_user: my_user + mcp_password: my_password + location: NA9 + name: mynet + service_plan: ADVANCED +# Delete a network +- dimensiondata_network: + region: na + location: NA1 + name: mynet + state: absent +''' + +RETURN = ''' +network: + description: Dictionary describing the network. + returned: On success when I(state=present). + type: complex + contains: + id: + description: Network ID. + type: str + sample: "8c787000-a000-4050-a215-280893411a7d" + name: + description: Network name. + type: str + sample: "My network" + description: + description: Network description. + type: str + sample: "My network description" + location: + description: Datacenter location. + type: str + sample: NA3 + status: + description: Network status. (MCP 2.0 only) + type: str + sample: NORMAL + private_net: + description: Private network subnet. (MCP 1.0 only) + type: str + sample: "10.2.3.0" + multicast: + description: Multicast enabled? (MCP 1.0 only) + type: bool + sample: false +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule +from ansible.module_utils._text import to_native + +if HAS_LIBCLOUD: + from libcloud.compute.base import NodeLocation + from libcloud.common.dimensiondata import DimensionDataAPIException + + +class DimensionDataNetworkModule(DimensionDataModule): + """ + The dimensiondata_network module for Ansible. + """ + + def __init__(self): + """ + Create a new Dimension Data network module. + """ + + super(DimensionDataNetworkModule, self).__init__( + module=AnsibleModule( + argument_spec=DimensionDataModule.argument_spec_with_wait( + name=dict(type='str', required=True), + description=dict(type='str', required=False), + service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), + state=dict(default='present', choices=['present', 'absent']) + ), + required_together=DimensionDataModule.required_together() + ) + ) + + self.name = self.module.params['name'] + self.description = self.module.params['description'] + self.service_plan = self.module.params['service_plan'] + self.state = self.module.params['state'] + + def state_present(self): + network = self._get_network() + + if network: + self.module.exit_json( + changed=False, + msg='Network already exists', + network=self._network_to_dict(network) + ) + + network = self._create_network() + + self.module.exit_json( + changed=True, + msg='Created network "%s" in datacenter "%s".' % (self.name, self.location), + network=self._network_to_dict(network) + ) + + def state_absent(self): + network = self._get_network() + + if not network: + self.module.exit_json( + changed=False, + msg='Network "%s" does not exist' % self.name, + network=self._network_to_dict(network) + ) + + self._delete_network(network) + + def _get_network(self): + if self.mcp_version == '1.0': + networks = self.driver.list_networks(location=self.location) + else: + networks = self.driver.ex_list_network_domains(location=self.location) + + matched_network = [network for network in networks if network.name == self.name] + if matched_network: + return matched_network[0] + + return None + + def _network_to_dict(self, network): + network_dict = dict( + id=network.id, + name=network.name, + description=network.description + ) + + if isinstance(network.location, NodeLocation): + network_dict['location'] = network.location.id + else: + network_dict['location'] = network.location + + if self.mcp_version == '1.0': + network_dict['private_net'] = network.private_net + network_dict['multicast'] = network.multicast + network_dict['status'] = None + else: + network_dict['private_net'] = None + network_dict['multicast'] = None + network_dict['status'] = network.status + + return network_dict + + def _create_network(self): + + # Make sure service_plan argument is defined + if self.mcp_version == '2.0' and 'service_plan' not in self.module.params: + self.module.fail_json( + msg='service_plan required when creating network and location is MCP 2.0' + ) + + # Create network + try: + if self.mcp_version == '1.0': + network = self.driver.ex_create_network( + self.location, + self.name, + description=self.description + ) + else: + network = self.driver.ex_create_network_domain( + self.location, + self.name, + self.module.params['service_plan'], + description=self.description + ) + except DimensionDataAPIException as e: + + self.module.fail_json( + msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc() + ) + + if self.module.params['wait'] is True: + network = self._wait_for_network_state(network.id, 'NORMAL') + + return network + + def _delete_network(self, network): + try: + if self.mcp_version == '1.0': + deleted = self.driver.ex_delete_network(network) + else: + deleted = self.driver.ex_delete_network_domain(network) + + if deleted: + self.module.exit_json( + changed=True, + msg="Deleted network with id %s" % network.id + ) + + self.module.fail_json( + "Unexpected failure deleting network with id %s", network.id + ) + + except DimensionDataAPIException as e: + self.module.fail_json( + msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc() + ) + + def _wait_for_network_state(self, net_id, state_to_wait_for): + try: + return self.driver.connection.wait_for_state( + state_to_wait_for, + self.driver.ex_get_network_domain, + self.module.params['wait_poll_interval'], + self.module.params['wait_time'], + net_id + ) + except DimensionDataAPIException as e: + self.module.fail_json( + msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)), + exception=traceback.format_exc() + ) + + +def main(): + module = DimensionDataNetworkModule() + if module.state == 'present': + module.state_present() + elif module.state == 'absent': + module.state_absent() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py b/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py new file mode 100644 index 0000000000..08143974e4 --- /dev/null +++ b/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py @@ -0,0 +1,566 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Dimension Data +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +# +# Authors: +# - Adam Friedman +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1' +} + +DOCUMENTATION = ''' +--- +module: dimensiondata_vlan +short_description: Manage a VLAN in a Cloud Control network domain. +extends_documentation_fragment: +- community.general.dimensiondata +- community.general.dimensiondata_wait + +description: + - Manage VLANs in Cloud Control network domains. +author: 'Adam Friedman (@tintoy)' +options: + name: + description: + - The name of the target VLAN. + - Required if C(state) is C(present). + description: + description: + - A description of the VLAN. + network_domain: + description: + - The Id or name of the target network domain. + required: true + private_ipv4_base_address: + description: + - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0). + private_ipv4_prefix_size: + description: + - The size of the IPv4 address space, e.g 24. + - Required, if C(private_ipv4_base_address) is specified. + state: + description: + - The desired state for the target VLAN. + - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist). + choices: [present, absent, readonly] + default: present + allow_expand: + description: + - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses. + - If C(False), the module will fail under these conditions. + - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). + type: bool + default: 'no' +''' + +EXAMPLES = ''' +# Add or update VLAN +- dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan1 + description: A test VLAN + private_ipv4_base_address: 192.168.23.0 + private_ipv4_prefix_size: 24 + state: present + wait: yes +# Read / get VLAN details +- dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan1 + state: readonly + wait: yes +# Delete a VLAN +- dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan_1 + state: absent + wait: yes +''' + +RETURN = ''' +vlan: + description: Dictionary describing the VLAN. + returned: On success when I(state) is 'present' + type: complex + contains: + id: + description: VLAN ID. + type: str + sample: "aaaaa000-a000-4050-a215-2808934ccccc" + name: + description: VLAN name. + type: str + sample: "My VLAN" + description: + description: VLAN description. + type: str + sample: "My VLAN description" + location: + description: Datacenter location. + type: str + sample: NA3 + private_ipv4_base_address: + description: The base address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.0 + private_ipv4_prefix_size: + description: The prefix size for the VLAN's private IPV4 network. + type: int + sample: 24 + private_ipv4_gateway_address: + description: The gateway address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.1 + private_ipv6_base_address: + description: The base address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:0 + private_ipv6_prefix_size: + description: The prefix size for the VLAN's IPV6 network. + type: int + sample: 64 + private_ipv6_gateway_address: + description: The gateway address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:1 + status: + description: VLAN status. + type: str + sample: NORMAL +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError + +try: + from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException + + HAS_LIBCLOUD = True + +except ImportError: + DimensionDataVlan = None + + HAS_LIBCLOUD = False + + +class DimensionDataVlanModule(DimensionDataModule): + """ + The dimensiondata_vlan module for Ansible. + """ + + def __init__(self): + """ + Create a new Dimension Data VLAN module. + """ + + super(DimensionDataVlanModule, self).__init__( + module=AnsibleModule( + argument_spec=DimensionDataModule.argument_spec_with_wait( + name=dict(required=True, type='str'), + description=dict(default='', type='str'), + network_domain=dict(required=True, type='str'), + private_ipv4_base_address=dict(default='', type='str'), + private_ipv4_prefix_size=dict(default=0, type='int'), + allow_expand=dict(required=False, default=False, type='bool'), + state=dict(default='present', choices=['present', 'absent', 'readonly']) + ), + required_together=DimensionDataModule.required_together() + ) + ) + + self.name = self.module.params['name'] + self.description = self.module.params['description'] + self.network_domain_selector = self.module.params['network_domain'] + self.private_ipv4_base_address = self.module.params['private_ipv4_base_address'] + self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size'] + self.state = self.module.params['state'] + self.allow_expand = self.module.params['allow_expand'] + + if self.wait and self.state != 'present': + self.module.fail_json( + msg='The wait parameter is only supported when state is "present".' + ) + + def state_present(self): + """ + Ensure that the target VLAN is present. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if not vlan: + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format( + self.name, self.network_domain_selector + ), + changed=True + ) + + vlan = self._create_vlan(network_domain) + self.module.exit_json( + msg='Created VLAN "{0}" in network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + else: + diff = VlanDiff(vlan, self.module.params) + if not diff.has_changes(): + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=False + ) + + return + + try: + diff.ensure_legal_change() + except InvalidVlanChangeError as invalid_vlan_change: + self.module.fail_json( + msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format( + self.name, self.network_domain_selector, invalid_vlan_change + ) + ) + + if diff.needs_expand() and not self.allow_expand: + self.module.fail_json( + msg='The configured private IPv4 network size ({0}-bit prefix) for '.format( + self.private_ipv4_prefix_size + ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format( + vlan.private_ipv4_range_size + ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.' + ) + + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + if diff.needs_edit(): + vlan.name = self.name + vlan.description = self.description + + self.driver.ex_update_vlan(vlan) + + if diff.needs_expand(): + vlan.private_ipv4_range_size = self.private_ipv4_prefix_size + self.driver.ex_expand_vlan(vlan) + + self.module.exit_json( + msg='Updated VLAN "{0}" in network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + def state_readonly(self): + """ + Read the target VLAN's state. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if vlan: + self.module.exit_json( + vlan=vlan_to_dict(vlan), + changed=False + ) + else: + self.module.fail_json( + msg='VLAN "{0}" does not exist in network domain "{1}".'.format( + self.name, self.network_domain_selector + ) + ) + + def state_absent(self): + """ + Ensure that the target VLAN is not present. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if not vlan: + self.module.exit_json( + msg='VLAN "{0}" is absent from network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + changed=False + ) + + return + + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + self._delete_vlan(vlan) + + self.module.exit_json( + msg='Deleted VLAN "{0}" from network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + changed=True + ) + + def _get_vlan(self, network_domain): + """ + Retrieve the target VLAN details from CloudControl. + + :param network_domain: The target network domain. + :return: The VLAN, or None if the target VLAN was not found. + :rtype: DimensionDataVlan + """ + + vlans = self.driver.ex_list_vlans( + location=self.location, + network_domain=network_domain + ) + matching_vlans = [vlan for vlan in vlans if vlan.name == self.name] + if matching_vlans: + return matching_vlans[0] + + return None + + def _create_vlan(self, network_domain): + vlan = self.driver.ex_create_vlan( + network_domain, + self.name, + self.private_ipv4_base_address, + self.description, + self.private_ipv4_prefix_size + ) + + if self.wait: + vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL') + + return vlan + + def _delete_vlan(self, vlan): + try: + self.driver.ex_delete_vlan(vlan) + + # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present"). + if self.wait: + self._wait_for_vlan_state(vlan, 'NOT_FOUND') + + except DimensionDataAPIException as api_exception: + self.module.fail_json( + msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format( + vlan.id, api_exception.msg + ) + ) + + def _wait_for_vlan_state(self, vlan, state_to_wait_for): + network_domain = self._get_network_domain() + + wait_poll_interval = self.module.params['wait_poll_interval'] + wait_time = self.module.params['wait_time'] + + # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try. + + try: + return self.driver.connection.wait_for_state( + state_to_wait_for, + self.driver.ex_get_vlan, + wait_poll_interval, + wait_time, + vlan + ) + + except DimensionDataAPIException as api_exception: + if api_exception.code != 'RESOURCE_NOT_FOUND': + raise + + return DimensionDataVlan( + id=vlan.id, + status='NOT_FOUND', + name='', + description='', + private_ipv4_range_address='', + private_ipv4_range_size=0, + ipv4_gateway='', + ipv6_range_address='', + ipv6_range_size=0, + ipv6_gateway='', + location=self.location, + network_domain=network_domain + ) + + def _get_network_domain(self): + """ + Retrieve the target network domain from the Cloud Control API. + + :return: The network domain. + """ + + try: + return self.get_network_domain( + self.network_domain_selector, self.location + ) + except UnknownNetworkError: + self.module.fail_json( + msg='Cannot find network domain "{0}" in datacenter "{1}".'.format( + self.network_domain_selector, self.location + ) + ) + + return None + + +class InvalidVlanChangeError(Exception): + """ + Error raised when an illegal change to VLAN state is attempted. + """ + + pass + + +class VlanDiff(object): + """ + Represents differences between VLAN information (from CloudControl) and module parameters. + """ + + def __init__(self, vlan, module_params): + """ + + :param vlan: The VLAN information from CloudControl. + :type vlan: DimensionDataVlan + :param module_params: The module parameters. + :type module_params: dict + """ + + self.vlan = vlan + self.module_params = module_params + + self.name_changed = module_params['name'] != vlan.name + self.description_changed = module_params['description'] != vlan.description + self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address + self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size + + # Is configured prefix size greater than or less than the actual prefix size? + private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size + self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0 + self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0 + + def has_changes(self): + """ + Does the VlanDiff represent any changes between the VLAN and module configuration? + + :return: True, if there are change changes; otherwise, False. + """ + + return self.needs_edit() or self.needs_expand() + + def ensure_legal_change(self): + """ + Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state. + + - private_ipv4_base_address cannot be changed + - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size + + :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state. + """ + + # Cannot change base address for private IPv4 network. + if self.private_ipv4_base_address_changed: + raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.') + + # Cannot shrink private IPv4 network (by increasing prefix size). + if self.private_ipv4_prefix_size_increased: + raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).') + + def needs_edit(self): + """ + Is an Edit operation required to resolve the differences between the VLAN information and the module parameters? + + :return: True, if an Edit operation is required; otherwise, False. + """ + + return self.name_changed or self.description_changed + + def needs_expand(self): + """ + Is an Expand operation required to resolve the differences between the VLAN information and the module parameters? + + The VLAN's network is expanded by reducing the size of its network prefix. + + :return: True, if an Expand operation is required; otherwise, False. + """ + + return self.private_ipv4_prefix_size_decreased + + +def vlan_to_dict(vlan): + return { + 'id': vlan.id, + 'name': vlan.name, + 'description': vlan.description, + 'location': vlan.location.id, + 'private_ipv4_base_address': vlan.private_ipv4_range_address, + 'private_ipv4_prefix_size': vlan.private_ipv4_range_size, + 'private_ipv4_gateway_address': vlan.ipv4_gateway, + 'ipv6_base_address': vlan.ipv6_range_address, + 'ipv6_prefix_size': vlan.ipv6_range_size, + 'ipv6_gateway_address': vlan.ipv6_gateway, + 'status': vlan.status + } + + +def main(): + module = DimensionDataVlanModule() + + if module.state == 'present': + module.state_present() + elif module.state == 'readonly': + module.state_readonly() + elif module.state == 'absent': + module.state_absent() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_compose.py b/plugins/modules/cloud/docker/docker_compose.py new file mode 100644 index 0000000000..aa7382b70a --- /dev/null +++ b/plugins/modules/cloud/docker/docker_compose.py @@ -0,0 +1,1156 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: docker_compose + +short_description: Manage multi-container Docker applications with Docker Compose. + + +author: "Chris Houseknecht (@chouseknecht)" + +description: + - Uses Docker Compose to start, shutdown and scale services. + - Works with compose versions 1 and 2. + - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option. + - See the examples for more details. + - Supports check mode. + - This module was called C(docker_service) before Ansible 2.8. The usage did not change. + +options: + project_src: + description: + - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file. + - Mutually exclusive with I(definition). + - Required when no I(definition) is provided. + type: path + project_name: + description: + - Provide a project name. If not provided, the project name is taken from the basename of I(project_src). + - Required when I(definition) is provided. + type: str + files: + description: + - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml). + - Files are loaded and merged in the order given. + type: list + elements: path + state: + description: + - Desired state of the project. + - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) + (with I(restarted)). + - Specifying C(absent) is the same as running C(docker-compose down). + type: str + default: present + choices: + - absent + - present + services: + description: + - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted)) + on a subset of services. + - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)). + type: list + elements: str + scale: + description: + - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key + is the name of the service and the value is an integer count for the number of containers. + type: dict + dependencies: + description: + - When I(state) is C(present) specify whether or not to include linked services. + type: bool + default: yes + definition: + description: + - Compose file describing one or more services, networks and volumes. + - Mutually exclusive with I(project_src) and I(files). + type: dict + hostname_check: + description: + - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate. + type: bool + default: no + recreate: + description: + - By default containers will be recreated when their configuration differs from the service definition. + - Setting to C(never) ignores configuration differences and leaves existing containers unchanged. + - Setting to C(always) forces recreation of all existing containers. + type: str + default: smart + choices: + - always + - never + - smart + build: + description: + - Use with I(state) C(present) to always build images prior to starting the application. + - Same as running C(docker-compose build) with the pull option. + - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents. + - Use the I(nocache) option to ignore the image cache when performing the build. + - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never). + type: bool + default: no + pull: + description: + - Use with I(state) C(present) to always pull images prior to starting the application. + - Same as running C(docker-compose pull). + - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never). + type: bool + default: no + nocache: + description: + - Use with the I(build) option to ignore the cache during the image build process. + type: bool + default: no + remove_images: + description: + - Use with I(state) C(absent) to remove all images or only local images. + type: str + choices: + - 'all' + - 'local' + remove_volumes: + description: + - Use with I(state) C(absent) to remove data volumes. + type: bool + default: no + stopped: + description: + - Use with I(state) C(present) to stop all containers defined in the Compose file. + - If I(services) is defined, only the containers listed there will be stopped. + type: bool + default: no + restarted: + description: + - Use with I(state) C(present) to restart all containers defined in the Compose file. + - If I(services) is defined, only the containers listed there will be restarted. + type: bool + default: no + remove_orphans: + description: + - Remove containers for services not defined in the Compose file. + type: bool + default: no + timeout: + description: + - timeout in seconds for container shutdown when attached or when containers are already running. + type: int + default: 10 + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "docker-compose >= 1.7.0" + - "Docker API >= 1.20" + - "PyYAML >= 3.11" +''' + +EXAMPLES = ''' +# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the +# flask directory + +- name: Run using a project directory + hosts: localhost + gather_facts: no + tasks: + - name: Tear down existing services + docker_compose: + project_src: flask + state: absent + + - name: Create and start services + docker_compose: + project_src: flask + register: output + + - debug: + var: output + + - name: Run `docker-compose up` again + docker_compose: + project_src: flask + build: no + register: output + + - debug: + var: output + + - assert: + that: "not output.changed " + + - name: Stop all services + docker_compose: + project_src: flask + build: no + stopped: yes + register: output + + - debug: + var: output + + - assert: + that: + - "not web.flask_web_1.state.running" + - "not db.flask_db_1.state.running" + + - name: Restart services + docker_compose: + project_src: flask + build: no + restarted: yes + register: output + + - debug: + var: output + + - assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" + +- name: Scale the web service to 2 + hosts: localhost + gather_facts: no + tasks: + - docker_compose: + project_src: flask + scale: + web: 2 + register: output + + - debug: + var: output + +- name: Run with inline v2 compose + hosts: localhost + gather_facts: no + tasks: + - docker_compose: + project_src: flask + state: absent + + - docker_compose: + project_name: flask + definition: + version: '2' + services: + db: + image: postgres + web: + build: "{{ playbook_dir }}/flask" + command: "python manage.py runserver 0.0.0.0:8000" + volumes: + - "{{ playbook_dir }}/flask:/code" + ports: + - "8000:8000" + depends_on: + - db + register: output + + - debug: + var: output + + - assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" + +- name: Run with inline v1 compose + hosts: localhost + gather_facts: no + tasks: + - docker_compose: + project_src: flask + state: absent + + - docker_compose: + project_name: flask + definition: + db: + image: postgres + web: + build: "{{ playbook_dir }}/flask" + command: "python manage.py runserver 0.0.0.0:8000" + volumes: + - "{{ playbook_dir }}/flask:/code" + ports: + - "8000:8000" + links: + - db + register: output + + - debug: + var: output + + - assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" +''' + +RETURN = ''' +services: + description: + - A dictionary mapping the service's name to a dictionary of containers. + - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts + are also accessible directly. The service's name is the variable with which the container dictionary + can be accessed. Note that the returned facts will be removed in Ansible 2.12. + returned: success + type: complex + contains: + container_name: + description: Name of the container. Format is C(project_service_#). + returned: success + type: complex + contains: + cmd: + description: One or more commands to be executed in the container. + returned: success + type: list + elements: str + example: ["postgres"] + image: + description: Name of the image from which the container was built. + returned: success + type: str + example: postgres + labels: + description: Meta data assigned to the container. + returned: success + type: dict + example: {...} + networks: + description: Contains a dictionary for each network to which the container is a member. + returned: success + type: list + elements: dict + contains: + IPAddress: + description: The IP address assigned to the container. + returned: success + type: str + example: 172.17.0.2 + IPPrefixLen: + description: Number of bits used by the subnet. + returned: success + type: int + example: 16 + aliases: + description: Aliases assigned to the container by the network. + returned: success + type: list + elements: str + example: ['db'] + globalIPv6: + description: IPv6 address assigned to the container. + returned: success + type: str + example: '' + globalIPv6PrefixLen: + description: IPv6 subnet length. + returned: success + type: int + example: 0 + links: + description: List of container names to which this container is linked. + returned: success + type: list + elements: str + example: null + macAddress: + description: Mac Address assigned to the virtual NIC. + returned: success + type: str + example: "02:42:ac:11:00:02" + state: + description: Information regarding the current disposition of the container. + returned: success + type: dict + contains: + running: + description: Whether or not the container is up with a running process. + returned: success + type: bool + example: true + status: + description: Description of the running state. + returned: success + type: str + example: running + +actions: + description: Provides the actions to be taken on each service as determined by compose. + returned: when in check mode or I(debug) is C(yes) + type: complex + contains: + service_name: + description: Name of the service. + returned: always + type: complex + contains: + pulled_image: + description: Provides image details when a new image is pulled for the service. + returned: on image pull + type: complex + contains: + name: + description: name of the image + returned: always + type: str + id: + description: image hash + returned: always + type: str + built_image: + description: Provides image details when a new image is built for the service. + returned: on image build + type: complex + contains: + name: + description: name of the image + returned: always + type: str + id: + description: image hash + returned: always + type: str + + action: + description: A descriptive name of the action to be performed on the service's containers. + returned: always + type: list + elements: str + contains: + id: + description: the container's long ID + returned: always + type: str + name: + description: the container's name + returned: always + type: str + short_id: + description: the container's short ID + returned: always + type: str +''' + +import os +import re +import sys +import tempfile +import traceback +from contextlib import contextmanager +from distutils.version import LooseVersion + +try: + import yaml + HAS_YAML = True + HAS_YAML_EXC = None +except ImportError as dummy: + HAS_YAML = False + HAS_YAML_EXC = traceback.format_exc() + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +try: + from compose import __version__ as compose_version + from compose.cli.command import project_from_options + from compose.service import NoSuchImageError + from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt + from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF + HAS_COMPOSE = True + HAS_COMPOSE_EXC = None + MINIMUM_COMPOSE_VERSION = '1.7.0' +except ImportError as dummy: + HAS_COMPOSE = False + HAS_COMPOSE_EXC = traceback.format_exc() + DEFAULT_TIMEOUT = 10 + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DockerBaseClass, + RequestException, +) + + +AUTH_PARAM_MAPPING = { + u'docker_host': u'--host', + u'tls': u'--tls', + u'cacert_path': u'--tlscacert', + u'cert_path': u'--tlscert', + u'key_path': u'--tlskey', + u'tls_verify': u'--tlsverify' +} + + +@contextmanager +def stdout_redirector(path_name): + old_stdout = sys.stdout + fd = open(path_name, 'w') + sys.stdout = fd + try: + yield + finally: + sys.stdout = old_stdout + + +@contextmanager +def stderr_redirector(path_name): + old_fh = sys.stderr + fd = open(path_name, 'w') + sys.stderr = fd + try: + yield + finally: + sys.stderr = old_fh + + +def make_redirection_tempfiles(): + dummy, out_redir_name = tempfile.mkstemp(prefix="ansible") + dummy, err_redir_name = tempfile.mkstemp(prefix="ansible") + return (out_redir_name, err_redir_name) + + +def cleanup_redirection_tempfiles(out_name, err_name): + for i in [out_name, err_name]: + os.remove(i) + + +def get_redirected_output(path_name): + output = [] + with open(path_name, 'r') as fd: + for line in fd: + # strip terminal format/color chars + new_line = re.sub(r'\x1b\[.+m', '', line) + output.append(new_line) + os.remove(path_name) + return output + + +def attempt_extract_errors(exc_str, stdout, stderr): + errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')] + errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')]) + + warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')] + warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')]) + + # assume either the exception body (if present) or the last warning was the 'most' + # fatal. + + if exc_str.strip(): + msg = exc_str.strip() + elif errors: + msg = errors[-1].encode('utf-8') + else: + msg = 'unknown cause' + + return { + 'warnings': [w.encode('utf-8') for w in warnings], + 'errors': [e.encode('utf-8') for e in errors], + 'msg': msg, + 'module_stderr': ''.join(stderr), + 'module_stdout': ''.join(stdout) + } + + +def get_failure_info(exc, out_name, err_name=None, msg_format='%s'): + if err_name is None: + stderr = [] + else: + stderr = get_redirected_output(err_name) + stdout = get_redirected_output(out_name) + + reason = attempt_extract_errors(str(exc), stdout, stderr) + reason['msg'] = msg_format % reason['msg'] + return reason + + +class ContainerManager(DockerBaseClass): + + def __init__(self, client): + + super(ContainerManager, self).__init__() + + self.client = client + self.project_src = None + self.files = None + self.project_name = None + self.state = None + self.definition = None + self.hostname_check = None + self.timeout = None + self.remove_images = None + self.remove_orphans = None + self.remove_volumes = None + self.stopped = None + self.restarted = None + self.recreate = None + self.build = None + self.dependencies = None + self.services = None + self.scale = None + self.debug = None + self.pull = None + self.nocache = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + self.check_mode = client.check_mode + + if not self.debug: + self.debug = client.module._debug + + self.options = dict() + self.options.update(self._get_auth_options()) + self.options[u'--skip-hostname-check'] = (not self.hostname_check) + + if self.project_name: + self.options[u'--project-name'] = self.project_name + + if self.files: + self.options[u'--file'] = self.files + + if not HAS_COMPOSE: + self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" % + HAS_COMPOSE_EXC) + + if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION): + self.client.fail("Found docker-compose version %s. Minimum required version is %s. " + "Upgrade docker-compose to a min version of %s." % + (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION)) + + self.log("options: ") + self.log(self.options, pretty_print=True) + + if self.definition: + if not HAS_YAML: + self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC) + + if not self.project_name: + self.client.fail("Parameter error - project_name required when providing definition.") + + self.project_src = tempfile.mkdtemp(prefix="ansible") + compose_file = os.path.join(self.project_src, "docker-compose.yml") + try: + self.log('writing: ') + self.log(yaml.dump(self.definition, default_flow_style=False)) + with open(compose_file, 'w') as f: + f.write(yaml.dump(self.definition, default_flow_style=False)) + except Exception as exc: + self.client.fail("Error writing to %s - %s" % (compose_file, str(exc))) + else: + if not self.project_src: + self.client.fail("Parameter error - project_src required.") + + try: + self.log("project_src: %s" % self.project_src) + self.project = project_from_options(self.project_src, self.options) + except Exception as exc: + self.client.fail("Configuration error - %s" % str(exc)) + + def exec_module(self): + result = dict() + + if self.state == 'present': + result = self.cmd_up() + elif self.state == 'absent': + result = self.cmd_down() + + if self.definition: + compose_file = os.path.join(self.project_src, "docker-compose.yml") + self.log("removing %s" % compose_file) + os.remove(compose_file) + self.log("removing %s" % self.project_src) + os.rmdir(self.project_src) + + if not self.check_mode and not self.debug and result.get('actions'): + result.pop('actions') + + return result + + def _get_auth_options(self): + options = dict() + for key, value in self.client.auth_params.items(): + if value is not None: + option = AUTH_PARAM_MAPPING.get(key) + if option: + options[option] = value + return options + + def cmd_up(self): + + start_deps = self.dependencies + service_names = self.services + detached = True + result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict()) + + up_options = { + u'--no-recreate': False, + u'--build': False, + u'--no-build': False, + u'--no-deps': False, + u'--force-recreate': False, + } + + if self.recreate == 'never': + up_options[u'--no-recreate'] = True + elif self.recreate == 'always': + up_options[u'--force-recreate'] = True + + if self.remove_orphans: + up_options[u'--remove-orphans'] = True + + converge = convergence_strategy_from_opts(up_options) + self.log("convergence strategy: %s" % converge) + + if self.pull: + pull_output = self.cmd_pull() + result['changed'] = pull_output['changed'] + result['actions'] += pull_output['actions'] + + if self.build: + build_output = self.cmd_build() + result['changed'] = build_output['changed'] + result['actions'] += build_output['actions'] + + if self.remove_orphans: + containers = self.client.containers( + filters={ + 'label': [ + '{0}={1}'.format(LABEL_PROJECT, self.project.name), + '{0}={1}'.format(LABEL_ONE_OFF, "False") + ], + } + ) + + orphans = [] + for container in containers: + service_name = container.get('Labels', {}).get(LABEL_SERVICE) + if service_name not in self.project.service_names: + orphans.append(service_name) + + if orphans: + result['changed'] = True + + for service in self.project.services: + if not service_names or service.name in service_names: + plan = service.convergence_plan(strategy=converge) + if plan.action != 'noop': + result['changed'] = True + result_action = dict(service=service.name) + result_action[plan.action] = [] + for container in plan.containers: + result_action[plan.action].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id, + )) + result['actions'].append(result_action) + + if not self.check_mode and result['changed']: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + do_build = build_action_from_opts(up_options) + self.log('Setting do_build to %s' % do_build) + self.project.up( + service_names=service_names, + start_deps=start_deps, + strategy=converge, + do_build=do_build, + detached=detached, + remove_orphans=self.remove_orphans, + timeout=self.timeout) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error starting project %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + + if self.stopped: + stop_output = self.cmd_stop(service_names) + result['changed'] = stop_output['changed'] + result['actions'] += stop_output['actions'] + + if self.restarted: + restart_output = self.cmd_restart(service_names) + result['changed'] = restart_output['changed'] + result['actions'] += restart_output['actions'] + + if self.scale: + scale_output = self.cmd_scale() + result['changed'] = scale_output['changed'] + result['actions'] += scale_output['actions'] + + for service in self.project.services: + service_facts = dict() + result['ansible_facts'][service.name] = service_facts + result['services'][service.name] = service_facts + for container in service.containers(stopped=True): + inspection = container.inspect() + # pare down the inspection data to the most useful bits + facts = dict( + cmd=[], + labels=dict(), + image=None, + state=dict( + running=None, + status=None + ), + networks=dict() + ) + if inspection['Config'].get('Cmd', None) is not None: + facts['cmd'] = inspection['Config']['Cmd'] + if inspection['Config'].get('Labels', None) is not None: + facts['labels'] = inspection['Config']['Labels'] + if inspection['Config'].get('Image', None) is not None: + facts['image'] = inspection['Config']['Image'] + if inspection['State'].get('Running', None) is not None: + facts['state']['running'] = inspection['State']['Running'] + if inspection['State'].get('Status', None) is not None: + facts['state']['status'] = inspection['State']['Status'] + + if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'): + networks = inspection['NetworkSettings']['Networks'] + for key in networks: + facts['networks'][key] = dict( + aliases=[], + globalIPv6=None, + globalIPv6PrefixLen=0, + IPAddress=None, + IPPrefixLen=0, + links=None, + macAddress=None, + ) + if networks[key].get('Aliases', None) is not None: + facts['networks'][key]['aliases'] = networks[key]['Aliases'] + if networks[key].get('GlobalIPv6Address', None) is not None: + facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address'] + if networks[key].get('GlobalIPv6PrefixLen', None) is not None: + facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen'] + if networks[key].get('IPAddress', None) is not None: + facts['networks'][key]['IPAddress'] = networks[key]['IPAddress'] + if networks[key].get('IPPrefixLen', None) is not None: + facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen'] + if networks[key].get('Links', None) is not None: + facts['networks'][key]['links'] = networks[key]['Links'] + if networks[key].get('MacAddress', None) is not None: + facts['networks'][key]['macAddress'] = networks[key]['MacAddress'] + + service_facts[container.name] = facts + + return result + + def cmd_pull(self): + result = dict( + changed=False, + actions=[], + ) + + if not self.check_mode: + for service in self.project.get_services(self.services, include_deps=False): + if 'image' not in service.options: + continue + + self.log('Pulling image for service %s' % service.name) + # store the existing image ID + old_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + old_image_id = image['Id'] + except NoSuchImageError: + pass + except Exception as exc: + self.client.fail("Error: service image lookup failed - %s" % str(exc)) + + out_redir_name, err_redir_name = make_redirection_tempfiles() + # pull the image + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + service.pull(ignore_pull_failures=False) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error: pull failed with %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + + # store the new image ID + new_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + new_image_id = image['Id'] + except NoSuchImageError as exc: + self.client.fail("Error: service image lookup failed after pull - %s" % str(exc)) + + if new_image_id != old_image_id: + # if a new image was pulled + result['changed'] = True + result['actions'].append(dict( + service=service.name, + pulled_image=dict( + name=service.image_name, + id=new_image_id + ) + )) + return result + + def cmd_build(self): + result = dict( + changed=False, + actions=[] + ) + if not self.check_mode: + for service in self.project.get_services(self.services, include_deps=False): + if service.can_be_built(): + self.log('Building image for service %s' % service.name) + # store the existing image ID + old_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + old_image_id = image['Id'] + except NoSuchImageError: + pass + except Exception as exc: + self.client.fail("Error: service image lookup failed - %s" % str(exc)) + + out_redir_name, err_redir_name = make_redirection_tempfiles() + # build the image + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + new_image_id = service.build(pull=self.pull, no_cache=self.nocache) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error: build failed with %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + + if new_image_id not in old_image_id: + # if a new image was built + result['changed'] = True + result['actions'].append(dict( + service=service.name, + built_image=dict( + name=service.image_name, + id=new_image_id + ) + )) + return result + + def cmd_down(self): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + containers = service.containers(stopped=True) + if len(containers): + result['changed'] = True + result['actions'].append(dict( + service=service.name, + deleted=[container.name for container in containers] + )) + if not self.check_mode and result['changed']: + image_type = image_type_from_opt('--rmi', self.remove_images) + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + self.project.down(image_type, self.remove_volumes, self.remove_orphans) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error stopping project - %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + return result + + def cmd_stop(self, service_names): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + if not service_names or service.name in service_names: + service_res = dict( + service=service.name, + stop=[] + ) + for container in service.containers(stopped=False): + result['changed'] = True + service_res['stop'].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id + )) + result['actions'].append(service_res) + if not self.check_mode and result['changed']: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + self.project.stop(service_names=service_names, timeout=self.timeout) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error stopping project %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + return result + + def cmd_restart(self, service_names): + result = dict( + changed=False, + actions=[] + ) + + for service in self.project.services: + if not service_names or service.name in service_names: + service_res = dict( + service=service.name, + restart=[] + ) + for container in service.containers(stopped=True): + result['changed'] = True + service_res['restart'].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id + )) + result['actions'].append(service_res) + + if not self.check_mode and result['changed']: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + self.project.restart(service_names=service_names, timeout=self.timeout) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error restarting project %s") + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + return result + + def cmd_scale(self): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + if service.name in self.scale: + service_res = dict( + service=service.name, + scale=0 + ) + containers = service.containers(stopped=True) + scale = self.parse_scale(service.name) + if len(containers) != scale: + result['changed'] = True + service_res['scale'] = scale - len(containers) + if not self.check_mode: + out_redir_name, err_redir_name = make_redirection_tempfiles() + try: + with stdout_redirector(out_redir_name): + with stderr_redirector(err_redir_name): + service.scale(scale) + except Exception as exc: + fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, + msg_format="Error scaling {0} - %s".format(service.name)) + self.client.fail(**fail_reason) + else: + cleanup_redirection_tempfiles(out_redir_name, err_redir_name) + result['actions'].append(service_res) + return result + + def parse_scale(self, service_name): + try: + return int(self.scale[service_name]) + except ValueError: + self.client.fail("Error scaling %s - expected int, got %s", + service_name, str(type(self.scale[service_name]))) + + +def main(): + argument_spec = dict( + project_src=dict(type='path'), + project_name=dict(type='str',), + files=dict(type='list', elements='path'), + state=dict(type='str', default='present', choices=['absent', 'present']), + definition=dict(type='dict'), + hostname_check=dict(type='bool', default=False), + recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']), + build=dict(type='bool', default=False), + remove_images=dict(type='str', choices=['all', 'local']), + remove_volumes=dict(type='bool', default=False), + remove_orphans=dict(type='bool', default=False), + stopped=dict(type='bool', default=False), + restarted=dict(type='bool', default=False), + scale=dict(type='dict'), + services=dict(type='list', elements='str'), + dependencies=dict(type='bool', default=True), + pull=dict(type='bool', default=False), + nocache=dict(type='bool', default=False), + debug=dict(type='bool', default=False), + timeout=dict(type='int', default=DEFAULT_TIMEOUT) + ) + + mutually_exclusive = [ + ('definition', 'project_src'), + ('definition', 'files') + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + if client.module._name == 'docker_service': + client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.", version='2.12') + + try: + result = ContainerManager(client).exec_module() + client.module.exit_json(**result) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_config.py b/plugins/modules/cloud/docker/docker_config.py new file mode 100644 index 0000000000..540bbaa8f6 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_config.py @@ -0,0 +1,304 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_config + +short_description: Manage docker configs. + + +description: + - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm). + - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used + in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated + unless the I(force) option is set. + - Updates to configs are performed by removing the config and creating it again. +options: + data: + description: + - The value of the config. Required when state is C(present). + type: str + data_is_b64: + description: + - If set to C(true), the data is assumed to be Base64 encoded and will be + decoded before being used. + - To use binary I(data), it is better to keep it Base64 encoded and let it + be decoded by this option. + type: bool + default: no + labels: + description: + - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string." + - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again. + type: dict + force: + description: + - Use with state C(present) to always remove and recreate an existing config. + - If C(true), an existing config will be replaced, even if it has not been changed. + type: bool + default: no + name: + description: + - The name of the config. + type: str + required: yes + state: + description: + - Set to C(present), if the config should exist, and C(absent), if it should not. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_2_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0" + - "Docker API >= 1.30" + +author: + - Chris Houseknecht (@chouseknecht) + - John Hu (@ushuz) +''' + +EXAMPLES = ''' + +- name: Create config foo (from a file on the control machine) + docker_config: + name: foo + # If the file is JSON or binary, Ansible might modify it (because + # it is first decoded and later re-encoded). Base64-encoding the + # file directly after reading it prevents this to happen. + data: "{{ lookup('file', '/path/to/config/file') | b64encode }}" + data_is_b64: true + state: present + +- name: Change the config data + docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + state: present + +- name: Add a new label + docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Adding a new label will cause a remove/create of the config + two: '2' + state: present + +- name: No change + docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Even though 'two' is missing, there is no change to the existing config + state: present + +- name: Update an existing label + docker_config: + name: foo + data: Goodnight everyone! + labels: + bar: monkey # Changing a label will cause a remove/create of the config + one: '1' + state: present + +- name: Force the (re-)creation of the config + docker_config: + name: foo + data: Goodnight everyone! + force: yes + state: present + +- name: Remove config foo + docker_config: + name: foo + state: absent +''' + +RETURN = ''' +config_id: + description: + - The ID assigned by Docker to the config object. + returned: success and I(state) is C(present) + type: str + sample: 'hzehrmyjigmcp2gb6nlhmjqcv' +''' + +import base64 +import hashlib +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DockerBaseClass, + compare_generic, + RequestException, +) +from ansible.module_utils._text import to_native, to_bytes + + +class ConfigManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ConfigManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + + parameters = self.client.module.params + self.name = parameters.get('name') + self.state = parameters.get('state') + self.data = parameters.get('data') + if self.data is not None: + if parameters.get('data_is_b64'): + self.data = base64.b64decode(self.data) + else: + self.data = to_bytes(self.data) + self.labels = parameters.get('labels') + self.force = parameters.get('force') + self.data_key = None + + def __call__(self): + if self.state == 'present': + self.data_key = hashlib.sha224(self.data).hexdigest() + self.present() + elif self.state == 'absent': + self.absent() + + def get_config(self): + ''' Find an existing config. ''' + try: + configs = self.client.configs(filters={'name': self.name}) + except APIError as exc: + self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc))) + + for config in configs: + if config['Spec']['Name'] == self.name: + return config + return None + + def create_config(self): + ''' Create a new config ''' + config_id = None + # We can't see the data after creation, so adding a label we can use for idempotency check + labels = { + 'ansible_key': self.data_key + } + if self.labels: + labels.update(self.labels) + + try: + if not self.check_mode: + config_id = self.client.create_config(self.name, self.data, labels=labels) + except APIError as exc: + self.client.fail("Error creating config: %s" % to_native(exc)) + + if isinstance(config_id, dict): + config_id = config_id['ID'] + + return config_id + + def present(self): + ''' Handles state == 'present', creating or updating the config ''' + config = self.get_config() + if config: + self.results['config_id'] = config['ID'] + data_changed = False + attrs = config.get('Spec', {}) + if attrs.get('Labels', {}).get('ansible_key'): + if attrs['Labels']['ansible_key'] != self.data_key: + data_changed = True + labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict') + if data_changed or labels_changed or self.force: + # if something changed or force, delete and re-create the config + self.absent() + config_id = self.create_config() + self.results['changed'] = True + self.results['config_id'] = config_id + else: + self.results['changed'] = True + self.results['config_id'] = self.create_config() + + def absent(self): + ''' Handles state == 'absent', removing the config ''' + config = self.get_config() + if config: + try: + if not self.check_mode: + self.client.remove_config(config['ID']) + except APIError as exc: + self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc))) + self.results['changed'] = True + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + data=dict(type='str'), + data_is_b64=dict(type='bool', default=False), + labels=dict(type='dict'), + force=dict(type='bool', default=False) + ) + + required_if = [ + ('state', 'present', ['data']) + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='2.6.0', + min_docker_api_version='1.30', + ) + + try: + results = dict( + changed=False, + ) + + ConfigManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_container.py b/plugins/modules/cloud/docker/docker_container.py new file mode 100644 index 0000000000..950ede03ed --- /dev/null +++ b/plugins/modules/cloud/docker/docker_container.py @@ -0,0 +1,3460 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_container + +short_description: manage docker containers + +description: + - Manage the life cycle of docker containers. + - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken. + + +notes: + - For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and + a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to + prevent this. + - If the module needs to recreate the container, it will only use the options provided to the module to create the + new container (except I(image)). Therefore, always specify *all* options relevant to the container. + - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected. + Please note that several options have default values; if the container to be restarted uses different values for + these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove), + I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior + can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from + Ansible 2.14 on. + +options: + auto_remove: + description: + - Enable auto-removal of the container on daemon side when the container's process exits. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + blkio_weight: + description: + - Block IO (relative weight), between 10 and 1000. + type: int + capabilities: + description: + - List of capabilities to add to the container. + type: list + elements: str + cap_drop: + description: + - List of capabilities to drop from the container. + type: list + elements: str + cleanup: + description: + - Use with I(detach=false) to remove the container after successful execution. + type: bool + default: no + command: + description: + - Command to execute when the container starts. A command may be either a string or a list. + - Prior to version 2.4, strings were split on commas. + type: raw + comparisons: + description: + - Allows to specify how properties of existing containers are compared with + module options to decide whether the container should be recreated / updated + or not. + - Only options which correspond to the state of a container as handled by the + Docker daemon can be specified, as well as C(networks). + - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore) + and C(allow_more_present). + - If C(strict) is specified, values are tested for equality, and changes always + result in updating or restarting. If C(ignore) is specified, changes are ignored. + - C(allow_more_present) is allowed only for lists, sets and dicts. If it is + specified for lists or sets, the container will only be updated or restarted if + the module option contains a value which is not present in the container's + options. If the option is specified for a dict, the container will only be updated + or restarted if the module option contains a key which isn't present in the + container's option, or if the value of a key present differs. + - The wildcard option C(*) can be used to set one of the default values C(strict) + or C(ignore) to *all* comparisons which are not explicitly set to other values. + - See the examples for details. + type: dict + container_default_behavior: + description: + - Various module options used to have default values. This causes problems with + containers which use different values for these options. + - The default value is C(compatibility), which will ensure that the default values + are used when the values are not explicitly specified by the user. + - From Ansible 2.14 on, the default value will switch to C(no_defaults). To avoid + deprecation warnings, please set I(container_default_behavior) to an explicit + value. + - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory), + I(paused), I(privileged), I(read_only) and I(tty) options. + type: str + choices: + - compatibility + - no_defaults + cpu_period: + description: + - Limit CPU CFS (Completely Fair Scheduler) period. + - See I(cpus) for an easier to use alternative. + type: int + cpu_quota: + description: + - Limit CPU CFS (Completely Fair Scheduler) quota. + - See I(cpus) for an easier to use alternative. + type: int + cpus: + description: + - Specify how much of the available CPU resources a container can use. + - A value of C(1.5) means that at most one and a half CPU (core) will be used. + type: float + cpuset_cpus: + description: + - CPUs in which to allow execution C(1,3) or C(1-3). + type: str + cpuset_mems: + description: + - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1). + type: str + cpu_shares: + description: + - CPU shares (relative weight). + type: int + detach: + description: + - Enable detached mode to leave the container running in background. + - If disabled, the task will reflect the status of the container run (failed if the command failed). + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(yes). + type: bool + devices: + description: + - List of host device bindings to add to the container. + - "Each binding is a mapping expressed in the format C(::)." + type: list + elements: str + device_read_bps: + description: + - "List of device path and read rate (bytes per second) from device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit in format C([])." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + required: yes + device_write_bps: + description: + - "List of device and write rate (bytes per second) to device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit in format C([])." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + required: yes + device_read_iops: + description: + - "List of device and read rate (IO per second) from device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit." + - "Must be a positive integer." + type: int + required: yes + device_write_iops: + description: + - "List of device and write rate (IO per second) to device." + type: list + elements: dict + suboptions: + path: + description: + - Device path in the container. + type: str + required: yes + rate: + description: + - "Device read limit." + - "Must be a positive integer." + type: int + required: yes + dns_opts: + description: + - List of DNS options. + type: list + elements: str + dns_servers: + description: + - List of custom DNS servers. + type: list + elements: str + dns_search_domains: + description: + - List of custom DNS search domains. + type: list + elements: str + domainname: + description: + - Container domainname. + type: str + env: + description: + - Dictionary of key,value pairs. + - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss. + type: dict + env_file: + description: + - Path to a file, present on the target, containing environment variables I(FOO=BAR). + - If variable also present in I(env), then the I(env) value will override. + type: path + entrypoint: + description: + - Command that overwrites the default C(ENTRYPOINT) of the image. + type: list + elements: str + etc_hosts: + description: + - Dict of host-to-IP mappings, where each host name is a key in the dictionary. + Each host name will be added to the container's C(/etc/hosts) file. + type: dict + exposed_ports: + description: + - List of additional container ports which informs Docker that the container + listens on the specified network ports at runtime. + - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not + need to be exposed again. + type: list + elements: str + aliases: + - exposed + - expose + force_kill: + description: + - Use the kill command when stopping a running container. + type: bool + default: no + aliases: + - forcekill + groups: + description: + - List of additional group names and/or IDs that the container process will run as. + type: list + elements: str + healthcheck: + description: + - Configure a check that is run to determine whether or not containers for this service are "healthy". + - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) + for details on how healthchecks work." + - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format + that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + type: dict + suboptions: + test: + description: + - Command to run to check health. + - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). + type: raw + interval: + description: + - Time between running the check. + - The default used by the Docker daemon is C(30s). + type: str + timeout: + description: + - Maximum time to allow one check to run. + - The default used by the Docker daemon is C(30s). + type: str + retries: + description: + - Consecutive number of failures needed to report unhealthy. + - The default used by the Docker daemon is C(3). + type: int + start_period: + description: + - Start period for the container to initialize before starting health-retries countdown. + - The default used by the Docker daemon is C(0s). + type: str + hostname: + description: + - The container's hostname. + type: str + ignore_image: + description: + - When I(state) is C(present) or C(started), the module compares the configuration of an existing + container to requested configuration. The evaluation includes the image version. If the image + version in the registry does not match the container, the container will be recreated. You can + stop this behavior by setting I(ignore_image) to C(True). + - "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the + I(comparisons) option." + type: bool + default: no + image: + description: + - Repository path and tag used to create the container. If an image is not found or pull is true, the image + will be pulled from the registry. If no tag is included, C(latest) will be used. + - Can also be an image ID. If this is the case, the image is assumed to be available locally. + The I(pull) option is ignored for this case. + type: str + init: + description: + - Run an init inside the container that forwards signals and reaps processes. + - This option requires Docker API >= 1.25. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + interactive: + description: + - Keep stdin open after a container is launched, even if not attached. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + ipc_mode: + description: + - Set the IPC mode for the container. + - Can be one of C(container:) to reuse another container's IPC namespace or C(host) to use + the host's IPC namespace within the container. + type: str + keep_volumes: + description: + - Retain volumes associated with a removed container. + type: bool + default: yes + kill_signal: + description: + - Override default signal used to kill a running container. + type: str + kernel_memory: + description: + - "Kernel memory limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)." + - Omitting the unit defaults to bytes. + type: str + labels: + description: + - Dictionary of key value pairs. + type: dict + links: + description: + - List of name aliases for linked containers in the format C(container_name:alias). + - Setting this will force container to be restarted. + type: list + elements: str + log_driver: + description: + - Specify the logging driver. Docker uses C(json-file) by default. + - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices. + type: str + log_options: + description: + - Dictionary of options specific to the chosen I(log_driver). + - See U(https://docs.docker.com/engine/admin/logging/overview/) for details. + type: dict + aliases: + - log_opt + mac_address: + description: + - Container MAC address (e.g. 92:d0:c6:0a:29:33). + type: str + memory: + description: + - "Memory limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C("0"). + type: str + memory_reservation: + description: + - "Memory soft limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + type: str + memory_swap: + description: + - "Total memory limit (memory + swap) in format C([]). + Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), + C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. + type: str + memory_swappiness: + description: + - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - If not set, the value will be remain the same if container exists and will be inherited + from the host machine if it is (re-)created. + type: int + mounts: + type: list + elements: dict + description: + - Specification for mounts to be added to the container. More powerful alternative to I(volumes). + suboptions: + target: + description: + - Path inside the container. + type: str + required: true + source: + description: + - Mount source (e.g. a volume name or a host path). + type: str + type: + description: + - The mount type. + - Note that C(npipe) is only supported by Docker for Windows. + type: str + choices: + - bind + - npipe + - tmpfs + - volume + default: volume + read_only: + description: + - Whether the mount should be read-only. + type: bool + consistency: + description: + - The consistency requirement for the mount. + type: str + choices: + - cached + - consistent + - default + - delegated + propagation: + description: + - Propagation mode. Only valid for the C(bind) type. + type: str + choices: + - private + - rprivate + - shared + - rshared + - slave + - rslave + no_copy: + description: + - False if the volume should be populated with the data from the target. Only valid for the C(volume) type. + - The default value is C(false). + type: bool + labels: + description: + - User-defined name and labels for the volume. Only valid for the C(volume) type. + type: dict + volume_driver: + description: + - Specify the volume driver. Only valid for the C(volume) type. + - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. + type: str + volume_options: + description: + - Dictionary of options specific to the chosen volume_driver. See + L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. + type: dict + tmpfs_size: + description: + - "The size for the tmpfs mount in bytes in format []." + - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - "Omitting the unit defaults to bytes." + type: str + tmpfs_mode: + description: + - The permission mode for the tmpfs mount. + type: str + name: + description: + - Assign a name to a new container or match an existing container. + - When identifying an existing container name may be a name or a long or short container ID. + type: str + required: yes + network_mode: + description: + - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:), C() or C(default). + - "*Note* that from Ansible 2.14 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network, + the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this + by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if + I(network_mode) is not specified." + type: str + userns_mode: + description: + - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string. + type: str + networks: + description: + - List of networks the container belongs to. + - For examples of the data structure and usage see EXAMPLES below. + - To remove a container from one or more networks, use the I(purge_networks) option. + - Note that as opposed to C(docker run ...), M(docker_container) does not remove the default + network if I(networks) is specified. You need to explicitly use I(purge_networks) to enforce + the removal of the default network (and all other networks not explicitly mentioned in I(networks)). + Alternatively, use the I(networks_cli_compatible) option, which will be enabled by default from Ansible 2.12 on. + type: list + elements: dict + suboptions: + name: + description: + - The network's name. + type: str + required: yes + ipv4_address: + description: + - The container's IPv4 address in this network. + type: str + ipv6_address: + description: + - The container's IPv6 address in this network. + type: str + links: + description: + - A list of containers to link to. + type: list + elements: str + aliases: + description: + - List of aliases for this container in this network. These names + can be used in the network to reach this container. + type: list + elements: str + networks_cli_compatible: + description: + - "When networks are provided to the module via the I(networks) option, the module + behaves differently than C(docker run --network): C(docker run --network other) + will create a container with network C(other) attached, but the default network + not attached. This module with I(networks: {name: other}) will create a container + with both C(default) and C(other) attached. If I(purge_networks) is set to C(yes), + the C(default) network will be removed afterwards." + - "If I(networks_cli_compatible) is set to C(yes), this module will behave as + C(docker run --network) and will *not* add the default network if I(networks) is + specified. If I(networks) is not specified, the default network will be attached." + - "*Note* that docker CLI also sets I(network_mode) to the name of the first network + added if C(--network) is specified. For more compatibility with docker CLI, you + explicitly have to set I(network_mode) to the name of the first network you're + adding. This behavior will change for Ansible 2.14: then I(network_mode) will + automatically be set to the first network name in I(networks) if I(network_mode) + is not specified, I(networks) has at least one entry and I(networks_cli_compatible) + is C(true)." + - Current value is C(no). A new default of C(yes) will be set in Ansible 2.12. + type: bool + oom_killer: + description: + - Whether or not to disable OOM Killer for the container. + type: bool + oom_score_adj: + description: + - An integer value containing the score given to the container in order to tune + OOM killer preferences. + type: int + output_logs: + description: + - If set to true, output of the container command will be printed. + - Only effective when I(log_driver) is set to C(json-file) or C(journald). + type: bool + default: no + paused: + description: + - Use with the started state to pause running processes inside the container. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + pid_mode: + description: + - Set the PID namespace mode for the container. + - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the + Docker SDK for Python (docker) allow all values supported by the Docker daemon. + type: str + pids_limit: + description: + - Set PIDs limit for the container. It accepts an integer value. + - Set C(-1) for unlimited PIDs. + type: int + privileged: + description: + - Give extended privileges to the container. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + published_ports: + description: + - List of ports to publish from the container to the host. + - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a + container port, 9000 is a host port, and 0.0.0.0 is a host interface." + - Port ranges can be used for source and destination ports. If two ranges with + different lengths are specified, the shorter range will be used. + Since Ansible 2.10, if the source port range has length 1, the port will not be assigned + to the first port of the destination range, but to a free port in that range. This is the + same behavior as for C(docker) command line utility. + - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This + is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html) + to resolve hostnames." + - A value of C(all) will publish all exposed container ports to random host ports, ignoring + any other mappings. + - If I(networks) parameter is provided, will inspect each network to see if there exists + a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4). + If such a network is found, then published ports where no host IP address is specified + will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4). + Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4) + value encountered in the list of I(networks) is the one that will be used. + type: list + elements: str + aliases: + - ports + pull: + description: + - If true, always pull the latest version of an image. Otherwise, will only pull an image + when missing. + - "*Note:* images are only pulled when specified by name. If the image is specified + as a image ID (hash), it cannot be pulled." + type: bool + default: no + purge_networks: + description: + - Remove the container from ALL networks not included in I(networks) parameter. + - Any default networks such as C(bridge), if not found in I(networks), will be removed as well. + type: bool + default: no + read_only: + description: + - Mount the container's root file system as read-only. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + recreate: + description: + - Use with present and started states to force the re-creation of an existing container. + type: bool + default: no + removal_wait_timeout: + description: + - When removing an existing container, the docker daemon API call exists after the container + is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O + load, removal can take longer. By default, the module will wait until the container has been + removed before trying to (re-)create it, however long this takes. + - By setting this option, the module will wait at most this many seconds for the container to be + removed. If the container is still in the removal phase after this many seconds, the module will + fail. + type: float + restart: + description: + - Use with started state to force a matching container to be stopped and restarted. + type: bool + default: no + restart_policy: + description: + - Container restart policy. + - Place quotes around C(no) option. + type: str + choices: + - 'no' + - 'on-failure' + - 'always' + - 'unless-stopped' + restart_retries: + description: + - Use with restart policy to control maximum number of restart attempts. + type: int + runtime: + description: + - Runtime to use for the container. + type: str + shm_size: + description: + - "Size of C(/dev/shm) in format C([]). Number is positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M). + type: str + security_opts: + description: + - List of security options in the form of C("label:user:User"). + type: list + elements: str + state: + description: + - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container + rather than stopping it. Use I(keep_volumes) to retain volumes associated with the removed container.' + - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no + container matches the name, a container will be created. If a container matches the name but the provided configuration + does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created + with the requested config.' + - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running + state. Use I(restart) to force a matching container to be stopped and restarted.' + - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped + state.' + - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the + image version will be taken into account, you can also use the I(ignore_image) option. + - Use the I(recreate) option to always force re-creation of a matching container, even if it is running. + - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is + C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain volumes associated with a removed container. + - Use I(keep_volumes) to retain volumes associated with a removed container. + type: str + default: started + choices: + - absent + - present + - stopped + - started + stop_signal: + description: + - Override default signal used to stop the container. + type: str + stop_timeout: + description: + - Number of seconds to wait for the container to stop before sending C(SIGKILL). + When the container is created by this module, its C(StopTimeout) configuration + will be set to this value. + - When the container is stopped, will be used as a timeout for stopping the + container. In case the container has a custom C(StopTimeout) configuration, + the behavior depends on the version of the docker daemon. New versions of + the docker daemon will always use the container's configured C(StopTimeout) + value if it has been configured. + type: int + trust_image_content: + description: + - If C(yes), skip image verification. + - The option has never been used by the module. It will be removed in Ansible 2.14. + type: bool + default: no + tmpfs: + description: + - Mount a tmpfs directory. + type: list + elements: str + tty: + description: + - Allocate a pseudo-TTY. + - If I(container_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(no). + type: bool + ulimits: + description: + - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)." + type: list + elements: str + sysctls: + description: + - Dictionary of key,value pairs. + type: dict + user: + description: + - Sets the username or UID used and optionally the groupname or GID for the specified command. + - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)." + type: str + uts: + description: + - Set the UTS namespace mode for the container. + type: str + volumes: + description: + - List of volumes to mount within the container. + - "Use docker CLI-style syntax: C(/host:/container[:mode])" + - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent), + C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and + C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes." + - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume. + - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw), + C(z), and C(Z)." + type: list + elements: str + volume_driver: + description: + - The container volume driver. + type: str + volumes_from: + description: + - List of container names or IDs to get volumes from. + type: list + elements: str + working_dir: + description: + - Path to the working directory. + type: str +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" + - "Daan Oosterveld (@dusdanig)" + - "Chris Houseknecht (@chouseknecht)" + - "Kassian Sun (@kassiansun)" + - "Felix Fontein (@felixfontein)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" +''' + +EXAMPLES = ''' +- name: Create a data container + docker_container: + name: mydata + image: busybox + volumes: + - /data + +- name: Re-create a redis container + docker_container: + name: myredis + image: redis + command: redis-server --appendonly yes + state: present + recreate: yes + exposed_ports: + - 6379 + volumes_from: + - mydata + +- name: Restart a container + docker_container: + name: myapplication + image: someuser/appimage + state: started + restart: yes + links: + - "myredis:aliasedredis" + devices: + - "/dev/sda:/dev/xvda:rwm" + ports: + # Publish container port 9000 as host port 8080 + - "8080:9000" + # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1 + - "127.0.0.1:8081:9001/udp" + # Publish container port 9002 as a random host port + - "9002" + # Publish container port 9003 as a free host port in range 8000-8100 + # (the host port will be selected by the Docker daemon) + - "8000-8100:9003" + # Publish container ports 9010-9020 to host ports 7000-7010 + - "7000-7010:9010-9020" + env: + SECRET_KEY: "ssssh" + # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted + BOOLEAN_KEY: "yes" + +- name: Container present + docker_container: + name: mycontainer + state: present + image: ubuntu:14.04 + command: sleep infinity + +- name: Stop a container + docker_container: + name: mycontainer + state: stopped + +- name: Start 4 load-balanced containers + docker_container: + name: "container{{ item }}" + recreate: yes + image: someuser/anotherappimage + command: sleep 1d + with_sequence: count=4 + +- name: remove container + docker_container: + name: ohno + state: absent + +- name: Syslogging output + docker_container: + name: myservice + image: busybox + log_driver: syslog + log_options: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for + # older docker installs, use "syslog-tag" instead + tag: myservice + +- name: Create db container and connect to network + docker_container: + name: db_test + image: "postgres:latest" + networks: + - name: "{{ docker_network_name }}" + +- name: Start container, connect to network and link + docker_container: + name: sleeper + image: ubuntu:14.04 + networks: + - name: TestingNet + ipv4_address: "172.1.1.100" + aliases: + - sleepyzz + links: + - db_test:db + - name: TestingNet2 + +- name: Start a container with a command + docker_container: + name: sleepy + image: ubuntu:14.04 + command: ["sleep", "infinity"] + +- name: Add container to networks + docker_container: + name: sleepy + networks: + - name: TestingNet + ipv4_address: 172.1.1.18 + links: + - sleeper + - name: TestingNet2 + ipv4_address: 172.1.10.20 + +- name: Update network with aliases + docker_container: + name: sleepy + networks: + - name: TestingNet + aliases: + - sleepyz + - zzzz + +- name: Remove container from one network + docker_container: + name: sleepy + networks: + - name: TestingNet2 + purge_networks: yes + +- name: Remove container from all networks + docker_container: + name: sleepy + purge_networks: yes + +- name: Start a container and use an env file + docker_container: + name: agent + image: jenkinsci/ssh-slave + env_file: /var/tmp/jenkins/agent.env + +- name: Create a container with limited capabilities + docker_container: + name: sleepy + image: ubuntu:16.04 + command: sleep infinity + capabilities: + - sys_time + cap_drop: + - all + +- name: Finer container restart/update control + docker_container: + name: test + image: ubuntu:18.04 + env: + arg1: "true" + arg2: "whatever" + volumes: + - /tmp:/tmp + comparisons: + image: ignore # don't restart containers with older versions of the image + env: strict # we want precisely this environment + volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there + +- name: Finer container restart/update control II + docker_container: + name: test + image: ubuntu:18.04 + env: + arg1: "true" + arg2: "whatever" + comparisons: + '*': ignore # by default, ignore *all* options (including image) + env: strict # except for environment variables; there, we want to be strict + +- name: Start container with healthstatus + docker_container: + name: nginx-proxy + image: nginx:1.13 + state: started + healthcheck: + # Check if nginx server is healthy by curl'ing the server. + # If this fails or timeouts, the healthcheck fails. + test: ["CMD", "curl", "--fail", "http://nginx.host.com"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 30s + +- name: Remove healthcheck from container + docker_container: + name: nginx-proxy + image: nginx:1.13 + state: started + healthcheck: + # The "NONE" check needs to be specified + test: ["NONE"] + +- name: start container with block device read limit + docker_container: + name: test + image: ubuntu:18.04 + state: started + device_read_bps: + # Limit read rate for /dev/sda to 20 mebibytes per second + - path: /dev/sda + rate: 20M + device_read_iops: + # Limit read rate for /dev/sdb to 300 IO per second + - path: /dev/sdb + rate: 300 +''' + +RETURN = ''' +container: + description: + - Facts representing the current state of the container. Matches the docker inspection output. + - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts + are also accessible directly as C(docker_container). Note that the returned fact will be removed in Ansible 2.12. + - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to + conflicts with the connection plugin. + - Empty if I(state) is C(absent) + - If I(detached) is C(false), will include C(Output) attribute containing any output from container run. + returned: always + type: dict + sample: '{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/usr/bin/supervisord" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "443/tcp": {}, + "80/tcp": {} + }, + "Hostname": "8e47bf643eb9", + "Image": "lnmp_nginx:v1", + "Labels": {}, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/tmp/lnmp/nginx-sites/logs/": {} + }, + ... + }' +''' + +import os +import re +import shlex +import traceback +from distutils.version import LooseVersion +from time import sleep + +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DifferenceTracker, + DockerBaseClass, + compare_generic, + is_image_name_id, + sanitize_result, + clean_dict_booleans_for_docker_api, + omit_none_from_dict, + parse_healthcheck, + DOCKER_COMMON_ARGS, + RequestException, +) +from ansible.module_utils.six import string_types + +try: + from docker import utils + from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version + if LooseVersion(docker_version) >= LooseVersion('1.10.0'): + from docker.types import Ulimit, LogConfig + from docker import types as docker_types + else: + from docker.utils.types import Ulimit, LogConfig + from docker.errors import DockerException, APIError, NotFound +except Exception: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +REQUIRES_CONVERSION_TO_BYTES = [ + 'kernel_memory', + 'memory', + 'memory_reservation', + 'memory_swap', + 'shm_size' +] + + +def is_volume_permissions(mode): + for part in mode.split(','): + if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): + return False + return True + + +def parse_port_range(range_or_port, client): + ''' + Parses a string containing either a single port or a range of ports. + + Returns a list of integers for each port in the list. + ''' + if '-' in range_or_port: + try: + start, end = [int(port) for port in range_or_port.split('-')] + except Exception: + client.fail('Invalid port range: "{0}"'.format(range_or_port)) + if end < start: + client.fail('Invalid port range: "{0}"'.format(range_or_port)) + return list(range(start, end + 1)) + else: + try: + return [int(range_or_port)] + except Exception: + client.fail('Invalid port: "{0}"'.format(range_or_port)) + + +def split_colon_ipv6(text, client): + ''' + Split string by ':', while keeping IPv6 addresses in square brackets in one component. + ''' + if '[' not in text: + return text.split(':') + start = 0 + result = [] + while start < len(text): + i = text.find('[', start) + if i < 0: + result.extend(text[start:].split(':')) + break + j = text.find(']', i) + if j < 0: + client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) + result.extend(text[start:i].split(':')) + k = text.find(':', j) + if k < 0: + result[-1] += text[i:] + start = len(text) + else: + result[-1] += text[i:k] + if k == len(text): + result.append('') + break + start = k + 1 + return result + + +class TaskParameters(DockerBaseClass): + ''' + Access and parse module parameters + ''' + + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.auto_remove = None + self.blkio_weight = None + self.capabilities = None + self.cap_drop = None + self.cleanup = None + self.command = None + self.cpu_period = None + self.cpu_quota = None + self.cpus = None + self.cpuset_cpus = None + self.cpuset_mems = None + self.cpu_shares = None + self.detach = None + self.debug = None + self.devices = None + self.device_read_bps = None + self.device_write_bps = None + self.device_read_iops = None + self.device_write_iops = None + self.dns_servers = None + self.dns_opts = None + self.dns_search_domains = None + self.domainname = None + self.env = None + self.env_file = None + self.entrypoint = None + self.etc_hosts = None + self.exposed_ports = None + self.force_kill = None + self.groups = None + self.healthcheck = None + self.hostname = None + self.ignore_image = None + self.image = None + self.init = None + self.interactive = None + self.ipc_mode = None + self.keep_volumes = None + self.kernel_memory = None + self.kill_signal = None + self.labels = None + self.links = None + self.log_driver = None + self.output_logs = None + self.log_options = None + self.mac_address = None + self.memory = None + self.memory_reservation = None + self.memory_swap = None + self.memory_swappiness = None + self.mounts = None + self.name = None + self.network_mode = None + self.userns_mode = None + self.networks = None + self.networks_cli_compatible = None + self.oom_killer = None + self.oom_score_adj = None + self.paused = None + self.pid_mode = None + self.pids_limit = None + self.privileged = None + self.purge_networks = None + self.pull = None + self.read_only = None + self.recreate = None + self.removal_wait_timeout = None + self.restart = None + self.restart_retries = None + self.restart_policy = None + self.runtime = None + self.shm_size = None + self.security_opts = None + self.state = None + self.stop_signal = None + self.stop_timeout = None + self.tmpfs = None + self.trust_image_content = None + self.tty = None + self.user = None + self.uts = None + self.volumes = None + self.volume_binds = dict() + self.volumes_from = None + self.volume_driver = None + self.working_dir = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + self.comparisons = client.comparisons + + # If state is 'absent', parameters do not have to be parsed or interpreted. + # Only the container's name is needed. + if self.state == 'absent': + return + + if self.cpus is not None: + self.cpus = int(round(self.cpus * 1E9)) + + if self.groups: + # In case integers are passed as groups, we need to convert them to + # strings as docker internally treats them as strings. + self.groups = [str(g) for g in self.groups] + + for param_name in REQUIRES_CONVERSION_TO_BYTES: + if client.module.params.get(param_name): + try: + setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) + except ValueError as exc: + self.fail("Failed to convert %s to bytes: %s" % (param_name, exc)) + + self.publish_all_ports = False + self.published_ports = self._parse_publish_ports() + if self.published_ports in ('all', 'ALL'): + self.publish_all_ports = True + self.published_ports = None + + self.ports = self._parse_exposed_ports(self.published_ports) + self.log("expose ports:") + self.log(self.ports, pretty_print=True) + + self.links = self._parse_links(self.links) + + if self.volumes: + self.volumes = self._expand_host_paths() + + self.tmpfs = self._parse_tmpfs() + self.env = self._get_environment() + self.ulimits = self._parse_ulimits() + self.sysctls = self._parse_sysctls() + self.log_config = self._parse_log_config() + try: + self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck) + except ValueError as e: + self.fail(str(e)) + + self.exp_links = None + self.volume_binds = self._get_volume_binds(self.volumes) + self.pid_mode = self._replace_container_names(self.pid_mode) + self.ipc_mode = self._replace_container_names(self.ipc_mode) + self.network_mode = self._replace_container_names(self.network_mode) + + self.log("volumes:") + self.log(self.volumes, pretty_print=True) + self.log("volume binds:") + self.log(self.volume_binds, pretty_print=True) + + if self.networks: + for network in self.networks: + network['id'] = self._get_network_id(network['name']) + if not network['id']: + self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) + if network.get('links'): + network['links'] = self._parse_links(network['links']) + + if self.mac_address: + # Ensure the MAC address uses colons instead of hyphens for later comparison + self.mac_address = self.mac_address.replace('-', ':') + + if self.entrypoint: + # convert from list to str. + self.entrypoint = ' '.join([str(x) for x in self.entrypoint]) + + if self.command: + # convert from list to str + if isinstance(self.command, list): + self.command = ' '.join([str(x) for x in self.command]) + + self.mounts_opt, self.expected_mounts = self._process_mounts() + + self._check_mount_target_collisions() + + for param_name in ["device_read_bps", "device_write_bps"]: + if client.module.params.get(param_name): + self._process_rate_bps(option=param_name) + + for param_name in ["device_read_iops", "device_write_iops"]: + if client.module.params.get(param_name): + self._process_rate_iops(option=param_name) + + def fail(self, msg): + self.client.fail(msg) + + @property + def update_parameters(self): + ''' + Returns parameters used to update a container + ''' + + update_parameters = dict( + blkio_weight='blkio_weight', + cpu_period='cpu_period', + cpu_quota='cpu_quota', + cpu_shares='cpu_shares', + cpuset_cpus='cpuset_cpus', + cpuset_mems='cpuset_mems', + mem_limit='memory', + mem_reservation='memory_reservation', + memswap_limit='memory_swap', + kernel_memory='kernel_memory', + restart_policy='restart_policy', + ) + + result = dict() + for key, value in update_parameters.items(): + if getattr(self, value, None) is not None: + if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']: + restart_policy = dict(Name=self.restart_policy, + MaximumRetryCount=self.restart_retries) + result[key] = restart_policy + elif self.client.option_minimal_versions[value]['supported']: + result[key] = getattr(self, value) + return result + + @property + def create_parameters(self): + ''' + Returns parameters used to create a container + ''' + create_params = dict( + command='command', + domainname='domainname', + hostname='hostname', + user='user', + detach='detach', + stdin_open='interactive', + tty='tty', + ports='ports', + environment='env', + name='name', + entrypoint='entrypoint', + mac_address='mac_address', + labels='labels', + stop_signal='stop_signal', + working_dir='working_dir', + stop_timeout='stop_timeout', + healthcheck='healthcheck', + ) + + if self.client.docker_py_version < LooseVersion('3.0'): + # cpu_shares and volume_driver moved to create_host_config in > 3 + create_params['cpu_shares'] = 'cpu_shares' + create_params['volume_driver'] = 'volume_driver' + + result = dict( + host_config=self._host_config(), + volumes=self._get_mounts(), + ) + + for key, value in create_params.items(): + if getattr(self, value, None) is not None: + if self.client.option_minimal_versions[value]['supported']: + result[key] = getattr(self, value) + + if self.disable_healthcheck: + # Make sure image's health check is overridden + result['healthcheck'] = {'test': ['NONE']} + + if self.networks_cli_compatible and self.networks: + network = self.networks[0] + params = dict() + for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): + if network.get(para): + params[para] = network[para] + network_config = dict() + network_config[network['name']] = self.client.create_endpoint_config(**params) + result['networking_config'] = self.client.create_networking_config(network_config) + return result + + def _expand_host_paths(self): + new_vols = [] + for vol in self.volumes: + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + self.fail('Found invalid volumes mode: {0}'.format(mode)) + if re.match(r'[.~]', host): + host = os.path.abspath(os.path.expanduser(host)) + new_vols.append("%s:%s:%s" % (host, container, mode)) + continue + elif len(parts) == 2: + if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): + host = os.path.abspath(os.path.expanduser(parts[0])) + new_vols.append("%s:%s:rw" % (host, parts[1])) + continue + new_vols.append(vol) + return new_vols + + def _get_mounts(self): + ''' + Return a list of container mounts. + :return: + ''' + result = [] + if self.volumes: + for vol in self.volumes: + # Only pass anonymous volumes to create container + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not is_volume_permissions(parts[1]): + continue + result.append(vol) + self.log("mounts:") + self.log(result, pretty_print=True) + return result + + def _host_config(self): + ''' + Returns parameters used to create a HostConfig object + ''' + + host_config_params = dict( + port_bindings='published_ports', + publish_all_ports='publish_all_ports', + links='links', + privileged='privileged', + dns='dns_servers', + dns_opt='dns_opts', + dns_search='dns_search_domains', + binds='volume_binds', + volumes_from='volumes_from', + network_mode='network_mode', + userns_mode='userns_mode', + cap_add='capabilities', + cap_drop='cap_drop', + extra_hosts='etc_hosts', + read_only='read_only', + ipc_mode='ipc_mode', + security_opt='security_opts', + ulimits='ulimits', + sysctls='sysctls', + log_config='log_config', + mem_limit='memory', + memswap_limit='memory_swap', + mem_swappiness='memory_swappiness', + oom_score_adj='oom_score_adj', + oom_kill_disable='oom_killer', + shm_size='shm_size', + group_add='groups', + devices='devices', + pid_mode='pid_mode', + tmpfs='tmpfs', + init='init', + uts_mode='uts', + runtime='runtime', + auto_remove='auto_remove', + device_read_bps='device_read_bps', + device_write_bps='device_write_bps', + device_read_iops='device_read_iops', + device_write_iops='device_write_iops', + pids_limit='pids_limit', + mounts='mounts', + nano_cpus='cpus', + ) + + if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'): + # blkio_weight can always be updated, but can only be set on creation + # when Docker SDK for Python and Docker API are new enough + host_config_params['blkio_weight'] = 'blkio_weight' + + if self.client.docker_py_version >= LooseVersion('3.0'): + # cpu_shares and volume_driver moved to create_host_config in > 3 + host_config_params['cpu_shares'] = 'cpu_shares' + host_config_params['volume_driver'] = 'volume_driver' + + params = dict() + for key, value in host_config_params.items(): + if getattr(self, value, None) is not None: + if self.client.option_minimal_versions[value]['supported']: + params[key] = getattr(self, value) + + if self.restart_policy: + params['restart_policy'] = dict(Name=self.restart_policy, + MaximumRetryCount=self.restart_retries) + + if 'mounts' in params: + params['mounts'] = self.mounts_opt + + return self.client.create_host_config(**params) + + @property + def default_host_ip(self): + ip = '0.0.0.0' + if not self.networks: + return ip + for net in self.networks: + if net.get('name'): + try: + network = self.client.inspect_network(net['name']) + if network.get('Driver') == 'bridge' and \ + network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): + ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] + break + except NotFound as nfe: + self.client.fail( + "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe), + exception=traceback.format_exc() + ) + return ip + + def _parse_publish_ports(self): + ''' + Parse ports from docker CLI syntax + ''' + if self.published_ports is None: + return None + + if 'all' in self.published_ports: + return 'all' + + default_ip = self.default_host_ip + + binds = {} + for port in self.published_ports: + parts = split_colon_ipv6(str(port), self.client) + container_port = parts[-1] + protocol = '' + if '/' in container_port: + container_port, protocol = parts[-1].split('/') + container_ports = parse_port_range(container_port, self.client) + + p_len = len(parts) + if p_len == 1: + port_binds = len(container_ports) * [(default_ip,)] + elif p_len == 2: + if len(container_ports) == 1: + port_binds = [(default_ip, parts[0])] + else: + port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)] + elif p_len == 3: + # We only allow IPv4 and IPv6 addresses for the bind address + ipaddr = parts[0] + if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): + self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' + 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr)) + if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): + ipaddr = ipaddr[1:-1] + if parts[1]: + if len(container_ports) == 1: + port_binds = [(ipaddr, parts[1])] + else: + port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)] + else: + port_binds = len(container_ports) * [(ipaddr,)] + + for bind, container_port in zip(port_binds, container_ports): + idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port + if idx in binds: + old_bind = binds[idx] + if isinstance(old_bind, list): + old_bind.append(bind) + else: + binds[idx] = [old_bind, bind] + else: + binds[idx] = bind + return binds + + def _get_volume_binds(self, volumes): + ''' + Extract host bindings, if any, from list of volume mapping strings. + + :return: dictionary of bind mappings + ''' + result = dict() + if volumes: + for vol in volumes: + host = None + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + self.fail('Found invalid volumes mode: {0}'.format(mode)) + elif len(parts) == 2: + if not is_volume_permissions(parts[1]): + host, container, mode = (parts + ['rw']) + if host is not None: + result[host] = dict( + bind=container, + mode=mode + ) + return result + + def _parse_exposed_ports(self, published_ports): + ''' + Parse exposed ports from docker CLI-style ports syntax. + ''' + exposed = [] + if self.exposed_ports: + for port in self.exposed_ports: + port = str(port).strip() + protocol = 'tcp' + match = re.search(r'(/.+$)', port) + if match: + protocol = match.group(1).replace('/', '') + port = re.sub(r'/.+$', '', port) + exposed.append((port, protocol)) + if published_ports: + # Any published port should also be exposed + for publish_port in published_ports: + match = False + if isinstance(publish_port, string_types) and '/' in publish_port: + port, protocol = publish_port.split('/') + port = int(port) + else: + protocol = 'tcp' + port = int(publish_port) + for exposed_port in exposed: + if exposed_port[1] != protocol: + continue + if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: + start_port, end_port = exposed_port[0].split('-') + if int(start_port) <= port <= int(end_port): + match = True + elif exposed_port[0] == port: + match = True + if not match: + exposed.append((port, protocol)) + return exposed + + @staticmethod + def _parse_links(links): + ''' + Turn links into a dictionary + ''' + if links is None: + return None + + result = [] + for link in links: + parsed_link = link.split(':', 1) + if len(parsed_link) == 2: + result.append((parsed_link[0], parsed_link[1])) + else: + result.append((parsed_link[0], parsed_link[0])) + return result + + def _parse_ulimits(self): + ''' + Turn ulimits into an array of Ulimit objects + ''' + if self.ulimits is None: + return None + + results = [] + for limit in self.ulimits: + limits = dict() + pieces = limit.split(':') + if len(pieces) >= 2: + limits['name'] = pieces[0] + limits['soft'] = int(pieces[1]) + limits['hard'] = int(pieces[1]) + if len(pieces) == 3: + limits['hard'] = int(pieces[2]) + try: + results.append(Ulimit(**limits)) + except ValueError as exc: + self.fail("Error parsing ulimits value %s - %s" % (limit, exc)) + return results + + def _parse_sysctls(self): + ''' + Turn sysctls into an hash of Sysctl objects + ''' + return self.sysctls + + def _parse_log_config(self): + ''' + Create a LogConfig object + ''' + if self.log_driver is None: + return None + + options = dict( + Type=self.log_driver, + Config=dict() + ) + + if self.log_options is not None: + options['Config'] = dict() + for k, v in self.log_options.items(): + if not isinstance(v, string_types): + self.client.module.warn( + "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " + "If this is not correct, or you want to avoid such warnings, please quote the value." % (k, str(v)) + ) + v = str(v) + self.log_options[k] = v + options['Config'][k] = v + + try: + return LogConfig(**options) + except ValueError as exc: + self.fail('Error parsing logging options - %s' % (exc)) + + def _parse_tmpfs(self): + ''' + Turn tmpfs into a hash of Tmpfs objects + ''' + result = dict() + if self.tmpfs is None: + return result + + for tmpfs_spec in self.tmpfs: + split_spec = tmpfs_spec.split(":", 1) + if len(split_spec) > 1: + result[split_spec[0]] = split_spec[1] + else: + result[split_spec[0]] = "" + return result + + def _get_environment(self): + """ + If environment file is combined with explicit environment variables, the explicit environment variables + take precedence. + """ + final_env = {} + if self.env_file: + parsed_env_file = utils.parse_env_file(self.env_file) + for name, value in parsed_env_file.items(): + final_env[name] = str(value) + if self.env: + for name, value in self.env.items(): + if not isinstance(value, string_types): + self.fail("Non-string value found for env option. Ambiguous env options must be " + "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )) + final_env[name] = str(value) + return final_env + + def _get_network_id(self, network_name): + network_id = None + try: + for network in self.client.networks(names=[network_name]): + if network['Name'] == network_name: + network_id = network['Id'] + break + except Exception as exc: + self.fail("Error getting network id for %s - %s" % (network_name, str(exc))) + return network_id + + def _process_mounts(self): + if self.mounts is None: + return None, None + mounts_list = [] + mounts_expected = [] + for mount in self.mounts: + target = mount['target'] + datatype = mount['type'] + mount_dict = dict(mount) + # Sanity checks (so we don't wait for docker-py to barf on input) + if mount_dict.get('source') is None and datatype != 'tmpfs': + self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype)) + mount_option_types = dict( + volume_driver='volume', + volume_options='volume', + propagation='bind', + no_copy='volume', + labels='volume', + tmpfs_size='tmpfs', + tmpfs_mode='tmpfs', + ) + for option, req_datatype in mount_option_types.items(): + if mount_dict.get(option) is not None and datatype != req_datatype: + self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype)) + # Handle volume_driver and volume_options + volume_driver = mount_dict.pop('volume_driver') + volume_options = mount_dict.pop('volume_options') + if volume_driver: + if volume_options: + volume_options = clean_dict_booleans_for_docker_api(volume_options) + mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options) + if mount_dict['labels']: + mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) + if mount_dict.get('tmpfs_size') is not None: + try: + mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) + except ValueError as exc: + self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc)) + if mount_dict.get('tmpfs_mode') is not None: + try: + mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) + except Exception as dummy: + self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) + # Fill expected mount dict + mount_expected = dict(mount) + mount_expected['tmpfs_size'] = mount_dict['tmpfs_size'] + mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode'] + # Add result to lists + mounts_list.append(docker_types.Mount(**mount_dict)) + mounts_expected.append(omit_none_from_dict(mount_expected)) + return mounts_list, mounts_expected + + def _process_rate_bps(self, option): + """ + Format device_read_bps and device_write_bps option + """ + devices_list = [] + for v in getattr(self, option): + device_dict = dict((x.title(), y) for x, y in v.items()) + device_dict['Rate'] = human_to_bytes(device_dict['Rate']) + devices_list.append(device_dict) + + setattr(self, option, devices_list) + + def _process_rate_iops(self, option): + """ + Format device_read_iops and device_write_iops option + """ + devices_list = [] + for v in getattr(self, option): + device_dict = dict((x.title(), y) for x, y in v.items()) + devices_list.append(device_dict) + + setattr(self, option, devices_list) + + def _replace_container_names(self, mode): + """ + Parse IPC and PID modes. If they contain a container name, replace + with the container's ID. + """ + if mode is None or not mode.startswith('container:'): + return mode + container_name = mode[len('container:'):] + # Try to inspect container to see whether this is an ID or a + # name (and in the latter case, retrieve it's ID) + container = self.client.get_container(container_name) + if container is None: + # If we can't find the container, issue a warning and continue with + # what the user specified. + self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) + return mode + return 'container:{0}'.format(container['Id']) + + def _check_mount_target_collisions(self): + last = dict() + + def f(t, name): + if t in last: + if name == last[t]: + self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name)) + else: + self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) + last[t] = name + + if self.expected_mounts: + for t in [m['target'] for m in self.expected_mounts]: + f(t, 'mounts') + if self.volumes: + for v in self.volumes: + vs = v.split(':') + f(vs[0 if len(vs) == 1 else 1], 'volumes') + + +class Container(DockerBaseClass): + + def __init__(self, container, parameters): + super(Container, self).__init__() + self.raw = container + self.Id = None + self.container = container + if container: + self.Id = container['Id'] + self.Image = container['Image'] + self.log(self.container, pretty_print=True) + self.parameters = parameters + self.parameters.expected_links = None + self.parameters.expected_ports = None + self.parameters.expected_exposed = None + self.parameters.expected_volumes = None + self.parameters.expected_ulimits = None + self.parameters.expected_sysctls = None + self.parameters.expected_etc_hosts = None + self.parameters.expected_env = None + self.parameters_map = dict() + self.parameters_map['expected_links'] = 'links' + self.parameters_map['expected_ports'] = 'expected_ports' + self.parameters_map['expected_exposed'] = 'exposed_ports' + self.parameters_map['expected_volumes'] = 'volumes' + self.parameters_map['expected_ulimits'] = 'ulimits' + self.parameters_map['expected_sysctls'] = 'sysctls' + self.parameters_map['expected_etc_hosts'] = 'etc_hosts' + self.parameters_map['expected_env'] = 'env' + self.parameters_map['expected_entrypoint'] = 'entrypoint' + self.parameters_map['expected_binds'] = 'volumes' + self.parameters_map['expected_cmd'] = 'command' + self.parameters_map['expected_devices'] = 'devices' + self.parameters_map['expected_healthcheck'] = 'healthcheck' + self.parameters_map['expected_mounts'] = 'mounts' + + def fail(self, msg): + self.parameters.client.fail(msg) + + @property + def exists(self): + return True if self.container else False + + @property + def removing(self): + if self.container and self.container.get('State'): + return self.container['State'].get('Status') == 'removing' + return False + + @property + def running(self): + if self.container and self.container.get('State'): + if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): + return True + return False + + @property + def paused(self): + if self.container and self.container.get('State'): + return self.container['State'].get('Paused', False) + return False + + def _compare(self, a, b, compare): + ''' + Compare values a and b as described in compare. + ''' + return compare_generic(a, b, compare['comparison'], compare['type']) + + def _decode_mounts(self, mounts): + if not mounts: + return mounts + result = [] + empty_dict = dict() + for mount in mounts: + res = dict() + res['type'] = mount.get('Type') + res['source'] = mount.get('Source') + res['target'] = mount.get('Target') + res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False + res['consistency'] = mount.get('Consistency') + res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation') + res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False) + res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict) + res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name') + res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict) + res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes') + res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode') + result.append(res) + return result + + def has_different_configuration(self, image): + ''' + Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) + ''' + self.log('Starting has_different_configuration') + self.parameters.expected_entrypoint = self._get_expected_entrypoint() + self.parameters.expected_links = self._get_expected_links() + self.parameters.expected_ports = self._get_expected_ports() + self.parameters.expected_exposed = self._get_expected_exposed(image) + self.parameters.expected_volumes = self._get_expected_volumes(image) + self.parameters.expected_binds = self._get_expected_binds(image) + self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) + self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls) + self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') + self.parameters.expected_env = self._get_expected_env(image) + self.parameters.expected_cmd = self._get_expected_cmd() + self.parameters.expected_devices = self._get_expected_devices() + self.parameters.expected_healthcheck = self._get_expected_healthcheck() + + if not self.container.get('HostConfig'): + self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") + if not self.container.get('Config'): + self.fail("has_config_diff: Error parsing container properties. Config missing.") + if not self.container.get('NetworkSettings'): + self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") + + host_config = self.container['HostConfig'] + log_config = host_config.get('LogConfig', dict()) + config = self.container['Config'] + network = self.container['NetworkSettings'] + + # The previous version of the docker module ignored the detach state by + # assuming if the container was running, it must have been detached. + detach = not (config.get('AttachStderr') and config.get('AttachStdout')) + + # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 + if config.get('ExposedPorts') is not None: + expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()] + else: + expected_exposed = [] + + # Map parameters to container inspect results + config_mapping = dict( + expected_cmd=config.get('Cmd'), + domainname=config.get('Domainname'), + hostname=config.get('Hostname'), + user=config.get('User'), + detach=detach, + init=host_config.get('Init'), + interactive=config.get('OpenStdin'), + capabilities=host_config.get('CapAdd'), + cap_drop=host_config.get('CapDrop'), + expected_devices=host_config.get('Devices'), + dns_servers=host_config.get('Dns'), + dns_opts=host_config.get('DnsOptions'), + dns_search_domains=host_config.get('DnsSearch'), + expected_env=(config.get('Env') or []), + expected_entrypoint=config.get('Entrypoint'), + expected_etc_hosts=host_config['ExtraHosts'], + expected_exposed=expected_exposed, + groups=host_config.get('GroupAdd'), + ipc_mode=host_config.get("IpcMode"), + labels=config.get('Labels'), + expected_links=host_config.get('Links'), + mac_address=network.get('MacAddress'), + memory_swappiness=host_config.get('MemorySwappiness'), + network_mode=host_config.get('NetworkMode'), + userns_mode=host_config.get('UsernsMode'), + oom_killer=host_config.get('OomKillDisable'), + oom_score_adj=host_config.get('OomScoreAdj'), + pid_mode=host_config.get('PidMode'), + privileged=host_config.get('Privileged'), + expected_ports=host_config.get('PortBindings'), + read_only=host_config.get('ReadonlyRootfs'), + runtime=host_config.get('Runtime'), + shm_size=host_config.get('ShmSize'), + security_opts=host_config.get("SecurityOpt"), + stop_signal=config.get("StopSignal"), + tmpfs=host_config.get('Tmpfs'), + tty=config.get('Tty'), + expected_ulimits=host_config.get('Ulimits'), + expected_sysctls=host_config.get('Sysctls'), + uts=host_config.get('UTSMode'), + expected_volumes=config.get('Volumes'), + expected_binds=host_config.get('Binds'), + volume_driver=host_config.get('VolumeDriver'), + volumes_from=host_config.get('VolumesFrom'), + working_dir=config.get('WorkingDir'), + publish_all_ports=host_config.get('PublishAllPorts'), + expected_healthcheck=config.get('Healthcheck'), + disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']), + device_read_bps=host_config.get('BlkioDeviceReadBps'), + device_write_bps=host_config.get('BlkioDeviceWriteBps'), + device_read_iops=host_config.get('BlkioDeviceReadIOps'), + device_write_iops=host_config.get('BlkioDeviceWriteIOps'), + pids_limit=host_config.get('PidsLimit'), + # According to https://github.com/moby/moby/, support for HostConfig.Mounts + # has been included at least since v17.03.0-ce, which has API version 1.26. + # The previous tag, v1.9.1, has API version 1.21 and does not have + # HostConfig.Mounts. I have no idea what about API 1.25... + expected_mounts=self._decode_mounts(host_config.get('Mounts')), + cpus=host_config.get('NanoCpus'), + ) + # Options which don't make sense without their accompanying option + if self.parameters.log_driver: + config_mapping['log_driver'] = log_config.get('Type') + config_mapping['log_options'] = log_config.get('Config') + + if self.parameters.client.option_minimal_versions['auto_remove']['supported']: + # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately + # it has a default value, that's why we have to jump through the hoops here + config_mapping['auto_remove'] = host_config.get('AutoRemove') + + if self.parameters.client.option_minimal_versions['stop_timeout']['supported']: + # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that + # stop_timeout has a hybrid role, in that it used to be something only used + # for stopping containers, and is now also used as a container property. + # That's why it needs special handling here. + config_mapping['stop_timeout'] = config.get('StopTimeout') + + if self.parameters.client.docker_api_version < LooseVersion('1.22'): + # For docker API < 1.22, update_container() is not supported. Thus + # we need to handle all limits which are usually handled by + # update_container() as configuration changes which require a container + # restart. + restart_policy = host_config.get('RestartPolicy', dict()) + + # Options which don't make sense without their accompanying option + if self.parameters.restart_policy: + config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount') + + config_mapping.update(dict( + blkio_weight=host_config.get('BlkioWeight'), + cpu_period=host_config.get('CpuPeriod'), + cpu_quota=host_config.get('CpuQuota'), + cpu_shares=host_config.get('CpuShares'), + cpuset_cpus=host_config.get('CpusetCpus'), + cpuset_mems=host_config.get('CpusetMems'), + kernel_memory=host_config.get("KernelMemory"), + memory=host_config.get('Memory'), + memory_reservation=host_config.get('MemoryReservation'), + memory_swap=host_config.get('MemorySwap'), + restart_policy=restart_policy.get('Name') + )) + + differences = DifferenceTracker() + for key, value in config_mapping.items(): + minimal_version = self.parameters.client.option_minimal_versions.get(key, {}) + if not minimal_version.get('supported', True): + continue + compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] + self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), str(value), compare)) + if getattr(self.parameters, key, None) is not None: + match = self._compare(getattr(self.parameters, key), value, compare) + + if not match: + # no match. record the differences + p = getattr(self.parameters, key) + c = value + if compare['type'] == 'set': + # Since the order does not matter, sort so that the diff output is better. + if p is not None: + p = sorted(p) + if c is not None: + c = sorted(c) + elif compare['type'] == 'set(dict)': + # Since the order does not matter, sort so that the diff output is better. + if key == 'expected_mounts': + # For selected values, use one entry as key + def sort_key_fn(x): + return x['target'] + else: + # We sort the list of dictionaries by using the sorted items of a dict as its key. + def sort_key_fn(x): + return sorted((a, str(b)) for a, b in x.items()) + if p is not None: + p = sorted(p, key=sort_key_fn) + if c is not None: + c = sorted(c, key=sort_key_fn) + differences.add(key, parameter=p, active=c) + + has_differences = not differences.empty + return has_differences, differences + + def has_different_resource_limits(self): + ''' + Diff parameters and container resource limits + ''' + if not self.container.get('HostConfig'): + self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") + if self.parameters.client.docker_api_version < LooseVersion('1.22'): + # update_container() call not supported + return False, [] + + host_config = self.container['HostConfig'] + + restart_policy = host_config.get('RestartPolicy') or dict() + + config_mapping = dict( + blkio_weight=host_config.get('BlkioWeight'), + cpu_period=host_config.get('CpuPeriod'), + cpu_quota=host_config.get('CpuQuota'), + cpu_shares=host_config.get('CpuShares'), + cpuset_cpus=host_config.get('CpusetCpus'), + cpuset_mems=host_config.get('CpusetMems'), + kernel_memory=host_config.get("KernelMemory"), + memory=host_config.get('Memory'), + memory_reservation=host_config.get('MemoryReservation'), + memory_swap=host_config.get('MemorySwap'), + restart_policy=restart_policy.get('Name') + ) + + # Options which don't make sense without their accompanying option + if self.parameters.restart_policy: + config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount') + + differences = DifferenceTracker() + for key, value in config_mapping.items(): + if getattr(self.parameters, key, None): + compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] + match = self._compare(getattr(self.parameters, key), value, compare) + + if not match: + # no match. record the differences + differences.add(key, parameter=getattr(self.parameters, key), active=value) + different = not differences.empty + return different, differences + + def has_network_differences(self): + ''' + Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 + ''' + different = False + differences = [] + + if not self.parameters.networks: + return different, differences + + if not self.container.get('NetworkSettings'): + self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = self.container['NetworkSettings']['Networks'] + for network in self.parameters.networks: + network_info = connected_networks.get(network['name']) + if network_info is None: + different = True + differences.append(dict( + parameter=network, + container=None + )) + else: + diff = False + network_info_ipam = network_info.get('IPAMConfig') or {} + if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): + diff = True + if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): + diff = True + if network.get('aliases'): + if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): + diff = True + if network.get('links'): + expected_links = [] + for link, alias in network['links']: + expected_links.append("%s:%s" % (link, alias)) + if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): + diff = True + if diff: + different = True + differences.append(dict( + parameter=network, + container=dict( + name=network['name'], + ipv4_address=network_info_ipam.get('IPv4Address'), + ipv6_address=network_info_ipam.get('IPv6Address'), + aliases=network_info.get('Aliases'), + links=network_info.get('Links') + ) + )) + return different, differences + + def has_extra_networks(self): + ''' + Check if the container is connected to non-requested networks + ''' + extra_networks = [] + extra = False + + if not self.container.get('NetworkSettings'): + self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = self.container['NetworkSettings'].get('Networks') + if connected_networks: + for network, network_config in connected_networks.items(): + keep = False + if self.parameters.networks: + for expected_network in self.parameters.networks: + if expected_network['name'] == network: + keep = True + if not keep: + extra = True + extra_networks.append(dict(name=network, id=network_config['NetworkID'])) + return extra, extra_networks + + def _get_expected_devices(self): + if not self.parameters.devices: + return None + expected_devices = [] + for device in self.parameters.devices: + parts = device.split(':') + if len(parts) == 1: + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[0], + PathOnHost=parts[0] + )) + elif len(parts) == 2: + parts = device.split(':') + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[1], + PathOnHost=parts[0] + ) + ) + else: + expected_devices.append( + dict( + CgroupPermissions=parts[2], + PathInContainer=parts[1], + PathOnHost=parts[0] + )) + return expected_devices + + def _get_expected_entrypoint(self): + if not self.parameters.entrypoint: + return None + return shlex.split(self.parameters.entrypoint) + + def _get_expected_ports(self): + if not self.parameters.published_ports: + return None + expected_bound_ports = {} + for container_port, config in self.parameters.published_ports.items(): + if isinstance(container_port, int): + container_port = "%s/tcp" % container_port + if len(config) == 1: + if isinstance(config[0], int): + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for host_ip, host_port in config: + expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)}) + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] + return expected_bound_ports + + def _get_expected_links(self): + if self.parameters.links is None: + return None + self.log('parameter links:') + self.log(self.parameters.links, pretty_print=True) + exp_links = [] + for link, alias in self.parameters.links: + exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) + return exp_links + + def _get_expected_binds(self, image): + self.log('_get_expected_binds') + image_vols = [] + if image: + image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes')) + param_vols = [] + if self.parameters.volumes: + for vol in self.parameters.volumes: + host = None + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not is_volume_permissions(mode): + self.fail('Found invalid volumes mode: {0}'.format(mode)) + if len(parts) == 2: + if not is_volume_permissions(parts[1]): + host, container, mode = parts + ['rw'] + if host: + param_vols.append("%s:%s:%s" % (host, container, mode)) + result = list(set(image_vols + param_vols)) + self.log("expected_binds:") + self.log(result, pretty_print=True) + return result + + def _get_image_binds(self, volumes): + ''' + Convert array of binds to array of strings with format host_path:container_path:mode + + :param volumes: array of bind dicts + :return: array of strings + ''' + results = [] + if isinstance(volumes, dict): + results += self._get_bind_from_dict(volumes) + elif isinstance(volumes, list): + for vol in volumes: + results += self._get_bind_from_dict(vol) + return results + + @staticmethod + def _get_bind_from_dict(volume_dict): + results = [] + if volume_dict: + for host_path, config in volume_dict.items(): + if isinstance(config, dict) and config.get('bind'): + container_path = config.get('bind') + mode = config.get('mode', 'rw') + results.append("%s:%s:%s" % (host_path, container_path, mode)) + return results + + def _get_expected_volumes(self, image): + self.log('_get_expected_volumes') + expected_vols = dict() + if image and image[self.parameters.client.image_inspect_source].get('Volumes'): + expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes')) + + if self.parameters.volumes: + for vol in self.parameters.volumes: + # We only expect anonymous volumes to show up in the list + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not is_volume_permissions(parts[1]): + continue + expected_vols[vol] = dict() + + if not expected_vols: + expected_vols = None + self.log("expected_volumes:") + self.log(expected_vols, pretty_print=True) + return expected_vols + + def _get_expected_env(self, image): + self.log('_get_expected_env') + expected_env = dict() + if image and image[self.parameters.client.image_inspect_source].get('Env'): + for env_var in image[self.parameters.client.image_inspect_source]['Env']: + parts = env_var.split('=', 1) + expected_env[parts[0]] = parts[1] + if self.parameters.env: + expected_env.update(self.parameters.env) + param_env = [] + for key, value in expected_env.items(): + param_env.append("%s=%s" % (key, value)) + return param_env + + def _get_expected_exposed(self, image): + self.log('_get_expected_exposed') + image_ports = [] + if image: + image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {} + image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()] + param_ports = [] + if self.parameters.ports: + param_ports = [str(p[0]) + '/' + p[1] for p in self.parameters.ports] + result = list(set(image_ports + param_ports)) + self.log(result, pretty_print=True) + return result + + def _get_expected_ulimits(self, config_ulimits): + self.log('_get_expected_ulimits') + if config_ulimits is None: + return None + results = [] + for limit in config_ulimits: + results.append(dict( + Name=limit.name, + Soft=limit.soft, + Hard=limit.hard + )) + return results + + def _get_expected_sysctls(self, config_sysctls): + self.log('_get_expected_sysctls') + if config_sysctls is None: + return None + result = dict() + for key, value in config_sysctls.items(): + result[key] = str(value) + return result + + def _get_expected_cmd(self): + self.log('_get_expected_cmd') + if not self.parameters.command: + return None + return shlex.split(self.parameters.command) + + def _convert_simple_dict_to_list(self, param_name, join_with=':'): + if getattr(self.parameters, param_name, None) is None: + return None + results = [] + for key, value in getattr(self.parameters, param_name).items(): + results.append("%s%s%s" % (key, join_with, value)) + return results + + def _normalize_port(self, port): + if '/' not in port: + return port + '/tcp' + return port + + def _get_expected_healthcheck(self): + self.log('_get_expected_healthcheck') + expected_healthcheck = dict() + + if self.parameters.healthcheck: + expected_healthcheck.update([(k.title().replace("_", ""), v) + for k, v in self.parameters.healthcheck.items()]) + + return expected_healthcheck + + +class ContainerManager(DockerBaseClass): + ''' + Perform container management tasks + ''' + + def __init__(self, client): + + super(ContainerManager, self).__init__() + + if client.module.params.get('log_options') and not client.module.params.get('log_driver'): + client.module.warn('log_options is ignored when log_driver is not specified') + if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'): + client.module.warn('healthcheck is ignored when test is not specified') + if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'): + client.module.warn('restart_retries is ignored when restart_policy is not specified') + + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = {'changed': False, 'actions': []} + self.diff = {} + self.diff_tracker = DifferenceTracker() + self.facts = {} + + state = self.parameters.state + if state in ('stopped', 'started', 'present'): + self.present(state) + elif state == 'absent': + self.absent() + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + if self.client.module._diff or self.parameters.debug: + self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff + + if self.facts: + self.results['ansible_facts'] = {'docker_container': self.facts} + self.results['container'] = self.facts + + def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): + delay = 1.0 + total_wait = 0 + while True: + # Inspect container + result = self.client.get_container_by_id(container_id) + if result is None: + if accept_removal: + return + msg = 'Encontered vanished container while waiting for container "{0}"' + self.fail(msg.format(container_id)) + # Check container state + state = result.get('State', {}).get('Status') + if complete_states is not None and state in complete_states: + return + if wait_states is not None and state not in wait_states: + msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' + self.fail(msg.format(container_id, state)) + # Wait + if max_wait is not None: + if total_wait > max_wait: + msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' + self.fail(msg.format(container_id, max_wait)) + if total_wait + delay > max_wait: + delay = max_wait - total_wait + sleep(delay) + total_wait += delay + # Exponential backoff, but never wait longer than 10 seconds + # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations + # until the maximal 10 seconds delay is reached. By then, the + # code will have slept for ~1.5 minutes.) + delay = min(delay * 1.1, 10) + + def present(self, state): + container = self._get_container(self.parameters.name) + was_running = container.running + was_paused = container.paused + container_created = False + + # If the image parameter was passed then we need to deal with the image + # version comparison. Otherwise we handle this depending on whether + # the container already runs or not; in the former case, in case the + # container needs to be restarted, we use the existing container's + # image ID. + image = self._get_image() + self.log(image, pretty_print=True) + if not container.exists or container.removing: + # New container + if container.removing: + self.log('Found container in removal phase') + else: + self.log('No container found') + if not self.parameters.image: + self.fail('Cannot create container when image is not specified!') + self.diff_tracker.add('exists', parameter=True, active=False) + if container.removing and not self.check_mode: + # Wait for container to be removed before trying to create it + self.wait_for_state( + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) + new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) + if new_container: + container = new_container + container_created = True + else: + # Existing container + different, differences = container.has_different_configuration(image) + image_different = False + if self.parameters.comparisons['image']['comparison'] == 'strict': + image_different = self._image_is_different(image, container) + if image_different or different or self.parameters.recreate: + self.diff_tracker.merge(differences) + self.diff['differences'] = differences.get_legacy_docker_container_diffs() + if image_different: + self.diff['image_different'] = True + self.log("differences") + self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) + image_to_use = self.parameters.image + if not image_to_use and container and container.Image: + image_to_use = container.Image + if not image_to_use: + self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') + if container.running: + self.container_stop(container.Id) + self.container_remove(container.Id) + if not self.check_mode: + self.wait_for_state( + container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) + new_container = self.container_create(image_to_use, self.parameters.create_parameters) + if new_container: + container = new_container + container_created = True + + if container and container.exists: + container = self.update_limits(container) + container = self.update_networks(container, container_created) + + if state == 'started' and not container.running: + self.diff_tracker.add('running', parameter=True, active=was_running) + container = self.container_start(container.Id) + elif state == 'started' and self.parameters.restart: + self.diff_tracker.add('running', parameter=True, active=was_running) + self.diff_tracker.add('restarted', parameter=True, active=False) + container = self.container_restart(container.Id) + elif state == 'stopped' and container.running: + self.diff_tracker.add('running', parameter=False, active=was_running) + self.container_stop(container.Id) + container = self._get_container(container.Id) + + if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused: + self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused) + if not self.check_mode: + try: + if self.parameters.paused: + self.client.pause(container=container.Id) + else: + self.client.unpause(container=container.Id) + except Exception as exc: + self.fail("Error %s container %s: %s" % ( + "pausing" if self.parameters.paused else "unpausing", container.Id, str(exc) + )) + container = self._get_container(container.Id) + self.results['changed'] = True + self.results['actions'].append(dict(set_paused=self.parameters.paused)) + + self.facts = container.raw + + def absent(self): + container = self._get_container(self.parameters.name) + if container.exists: + if container.running: + self.diff_tracker.add('running', parameter=False, active=True) + self.container_stop(container.Id) + self.diff_tracker.add('exists', parameter=False, active=True) + self.container_remove(container.Id) + + def fail(self, msg, **kwargs): + self.client.fail(msg, **kwargs) + + def _output_logs(self, msg): + self.client.module.log(msg=msg) + + def _get_container(self, container): + ''' + Expects container ID or Name. Returns a container object + ''' + return Container(self.client.get_container(container), self.parameters) + + def _get_image(self): + if not self.parameters.image: + self.log('No image specified') + return None + if is_image_name_id(self.parameters.image): + image = self.client.find_image_by_id(self.parameters.image) + else: + repository, tag = utils.parse_repository_tag(self.parameters.image) + if not tag: + tag = "latest" + image = self.client.find_image(repository, tag) + if not image or self.parameters.pull: + if not self.check_mode: + self.log("Pull the image.") + image, alreadyToLatest = self.client.pull_image(repository, tag) + if alreadyToLatest: + self.results['changed'] = False + else: + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + elif not image: + # If the image isn't there, claim we'll pull. + # (Implicitly: if the image is there, claim it already was latest.) + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + + self.log("image") + self.log(image, pretty_print=True) + return image + + def _image_is_different(self, image, container): + if image and image.get('Id'): + if container and container.Image: + if image.get('Id') != container.Image: + self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image) + return True + return False + + def update_limits(self, container): + limits_differ, different_limits = container.has_different_resource_limits() + if limits_differ: + self.log("limit differences:") + self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) + self.diff_tracker.merge(different_limits) + if limits_differ and not self.check_mode: + self.container_update(container.Id, self.parameters.update_parameters) + return self._get_container(container.Id) + return container + + def update_networks(self, container, container_created): + updated_container = container + if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created: + has_network_differences, network_differences = container.has_network_differences() + if has_network_differences: + if self.diff.get('differences'): + self.diff['differences'].append(dict(network_differences=network_differences)) + else: + self.diff['differences'] = [dict(network_differences=network_differences)] + for netdiff in network_differences: + self.diff_tracker.add( + 'network.{0}'.format(netdiff['parameter']['name']), + parameter=netdiff['parameter'], + active=netdiff['container'] + ) + self.results['changed'] = True + updated_container = self._add_networks(container, network_differences) + + if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks: + has_extra_networks, extra_networks = container.has_extra_networks() + if has_extra_networks: + if self.diff.get('differences'): + self.diff['differences'].append(dict(purge_networks=extra_networks)) + else: + self.diff['differences'] = [dict(purge_networks=extra_networks)] + for extra_network in extra_networks: + self.diff_tracker.add( + 'network.{0}'.format(extra_network['name']), + active=extra_network + ) + self.results['changed'] = True + updated_container = self._purge_networks(container, extra_networks) + return updated_container + + def _add_networks(self, container, differences): + for diff in differences: + # remove the container from the network, if connected + if diff.get('container'): + self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) + if not self.check_mode: + try: + self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], + str(exc))) + # connect to the network + params = dict() + for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): + if diff['parameter'].get(para): + params[para] = diff['parameter'][para] + self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) + if not self.check_mode: + try: + self.log("Connecting container to network %s" % diff['parameter']['id']) + self.log(params, pretty_print=True) + self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) + except Exception as exc: + self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc))) + return self._get_container(container.Id) + + def _purge_networks(self, container, networks): + for network in networks: + self.results['actions'].append(dict(removed_from_network=network['name'])) + if not self.check_mode: + try: + self.client.disconnect_container_from_network(container.Id, network['name']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (network['name'], + str(exc))) + return self._get_container(container.Id) + + def container_create(self, image, create_parameters): + self.log("create container") + self.log("image: %s parameters:" % image) + self.log(create_parameters, pretty_print=True) + self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) + self.results['changed'] = True + new_container = None + if not self.check_mode: + try: + new_container = self.client.create_container(image, **create_parameters) + self.client.report_warnings(new_container) + except Exception as exc: + self.fail("Error creating container: %s" % str(exc)) + return self._get_container(new_container['Id']) + return new_container + + def container_start(self, container_id): + self.log("start container %s" % (container_id)) + self.results['actions'].append(dict(started=container_id)) + self.results['changed'] = True + if not self.check_mode: + try: + self.client.start(container=container_id) + except Exception as exc: + self.fail("Error starting container %s: %s" % (container_id, str(exc))) + + if self.parameters.detach is False: + if self.client.docker_py_version >= LooseVersion('3.0'): + status = self.client.wait(container_id)['StatusCode'] + else: + status = self.client.wait(container_id) + if self.parameters.auto_remove: + output = "Cannot retrieve result as auto_remove is enabled" + if self.parameters.output_logs: + self.client.module.warn('Cannot output_logs if auto_remove is enabled!') + else: + config = self.client.inspect_container(container_id) + logging_driver = config['HostConfig']['LogConfig']['Type'] + + if logging_driver in ('json-file', 'journald'): + output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) + if self.parameters.output_logs: + self._output_logs(msg=output) + else: + output = "Result logged using `%s` driver" % logging_driver + + if status != 0: + self.fail(output, status=status) + if self.parameters.cleanup: + self.container_remove(container_id, force=True) + insp = self._get_container(container_id) + if insp.raw: + insp.raw['Output'] = output + else: + insp.raw = dict(Output=output) + return insp + return self._get_container(container_id) + + def container_remove(self, container_id, link=False, force=False): + volume_state = (not self.parameters.keep_volumes) + self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) + self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) + self.results['changed'] = True + response = None + if not self.check_mode: + count = 0 + while True: + try: + response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) + except NotFound as dummy: + pass + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc))) + count += 1 + # Unpause + try: + self.client.unpause(container=container_id) + except Exception as exc2: + self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2))) + # Now try again + continue + if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: + pass + else: + self.fail("Error removing container %s: %s" % (container_id, str(exc))) + except Exception as exc: + self.fail("Error removing container %s: %s" % (container_id, str(exc))) + # We only loop when explicitly requested by 'continue' + break + return response + + def container_update(self, container_id, update_parameters): + if update_parameters: + self.log("update container %s" % (container_id)) + self.log(update_parameters, pretty_print=True) + self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) + self.results['changed'] = True + if not self.check_mode and callable(getattr(self.client, 'update_container')): + try: + result = self.client.update_container(container_id, **update_parameters) + self.client.report_warnings(result) + except Exception as exc: + self.fail("Error updating container %s: %s" % (container_id, str(exc))) + return self._get_container(container_id) + + def container_kill(self, container_id): + self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) + self.results['changed'] = True + response = None + if not self.check_mode: + try: + if self.parameters.kill_signal: + response = self.client.kill(container_id, signal=self.parameters.kill_signal) + else: + response = self.client.kill(container_id) + except Exception as exc: + self.fail("Error killing container %s: %s" % (container_id, exc)) + return response + + def container_restart(self, container_id): + self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout)) + self.results['changed'] = True + if not self.check_mode: + try: + if self.parameters.stop_timeout: + dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout) + else: + dummy = self.client.restart(container_id) + except Exception as exc: + self.fail("Error restarting container %s: %s" % (container_id, str(exc))) + return self._get_container(container_id) + + def container_stop(self, container_id): + if self.parameters.force_kill: + self.container_kill(container_id) + return + self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) + self.results['changed'] = True + response = None + if not self.check_mode: + count = 0 + while True: + try: + if self.parameters.stop_timeout: + response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) + else: + response = self.client.stop(container_id) + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc))) + count += 1 + # Unpause + try: + self.client.unpause(container=container_id) + except Exception as exc2: + self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2))) + # Now try again + continue + self.fail("Error stopping container %s: %s" % (container_id, str(exc))) + except Exception as exc: + self.fail("Error stopping container %s: %s" % (container_id, str(exc))) + # We only loop when explicitly requested by 'continue' + break + return response + + +def detect_ipvX_address_usage(client): + ''' + Helper function to detect whether any specified network uses ipv4_address or ipv6_address + ''' + for network in client.module.params.get("networks") or []: + if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None: + return True + return False + + +class AnsibleDockerClientContainer(AnsibleDockerClient): + # A list of module options which are not docker container properties + __NON_CONTAINER_PROPERTY_OPTIONS = tuple([ + 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks', + 'recreate', 'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal', + 'output_logs', 'paused', 'removal_wait_timeout' + ] + list(DOCKER_COMMON_ARGS.keys())) + + def _parse_comparisons(self): + comparisons = {} + comp_aliases = {} + # Put in defaults + explicit_types = dict( + command='list', + devices='set(dict)', + dns_search_domains='list', + dns_servers='list', + env='set', + entrypoint='list', + etc_hosts='set', + mounts='set(dict)', + networks='set(dict)', + ulimits='set(dict)', + device_read_bps='set(dict)', + device_write_bps='set(dict)', + device_read_iops='set(dict)', + device_write_iops='set(dict)', + ) + all_options = set() # this is for improving user feedback when a wrong option was specified for comparison + default_values = dict( + stop_timeout='ignore', + ) + for option, data in self.module.argument_spec.items(): + all_options.add(option) + for alias in data.get('aliases', []): + all_options.add(alias) + # Ignore options which aren't used as container properties + if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks': + continue + # Determine option type + if option in explicit_types: + datatype = explicit_types[option] + elif data['type'] == 'list': + datatype = 'set' + elif data['type'] == 'dict': + datatype = 'dict' + else: + datatype = 'value' + # Determine comparison type + if option in default_values: + comparison = default_values[option] + elif datatype in ('list', 'value'): + comparison = 'strict' + else: + comparison = 'allow_more_present' + comparisons[option] = dict(type=datatype, comparison=comparison, name=option) + # Keep track of aliases + comp_aliases[option] = option + for alias in data.get('aliases', []): + comp_aliases[alias] = option + # Process legacy ignore options + if self.module.params['ignore_image']: + comparisons['image']['comparison'] = 'ignore' + if self.module.params['purge_networks']: + comparisons['networks']['comparison'] = 'strict' + # Process options + if self.module.params.get('comparisons'): + # If '*' appears in comparisons, process it first + if '*' in self.module.params['comparisons']: + value = self.module.params['comparisons']['*'] + if value not in ('strict', 'ignore'): + self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") + for option, v in comparisons.items(): + if option == 'networks': + # `networks` is special: only update if + # some value is actually specified + if self.module.params['networks'] is None: + continue + v['comparison'] = value + # Now process all other comparisons. + comp_aliases_used = {} + for key, value in self.module.params['comparisons'].items(): + if key == '*': + continue + # Find main key + key_main = comp_aliases.get(key) + if key_main is None: + if key_main in all_options: + self.fail("The module option '%s' cannot be specified in the comparisons dict, " + "since it does not correspond to container's state!" % key) + self.fail("Unknown module option '%s' in comparisons dict!" % key) + if key_main in comp_aliases_used: + self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) + comp_aliases_used[key_main] = key + # Check value and update accordingly + if value in ('strict', 'ignore'): + comparisons[key_main]['comparison'] = value + elif value == 'allow_more_present': + if comparisons[key_main]['type'] == 'value': + self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) + comparisons[key_main]['comparison'] = value + else: + self.fail("Unknown comparison mode '%s'!" % value) + # Add implicit options + comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports') + comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports') + comparisons['disable_healthcheck'] = dict(type='value', + comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict', + name='disable_healthcheck') + # Check legacy values + if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore': + self.module.warn('The ignore_image option has been overridden by the comparisons option!') + if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict': + self.module.warn('The purge_networks option has been overridden by the comparisons option!') + self.comparisons = comparisons + + def _get_additional_minimal_versions(self): + stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25') + stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent' + if stop_timeout_supported: + stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1') + if stop_timeout_needed_for_update and not stop_timeout_supported: + # We warn (instead of fail) since in older versions, stop_timeout was not used + # to update the container's configuration, but only when stopping a container. + self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update " + "the container's stop_timeout configuration. " + "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,)) + else: + if stop_timeout_needed_for_update and not stop_timeout_supported: + # We warn (instead of fail) since in older versions, stop_timeout was not used + # to update the container's configuration, but only when stopping a container. + self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or " + "update the container's stop_timeout configuration." % (self.docker_api_version_str,)) + self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported + + def __init__(self, **kwargs): + option_minimal_versions = dict( + # internal options + log_config=dict(), + publish_all_ports=dict(), + ports=dict(), + volume_binds=dict(), + name=dict(), + # normal options + device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'), + dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'), + ipc_mode=dict(docker_api_version='1.25'), + mac_address=dict(docker_api_version='1.25'), + oom_score_adj=dict(docker_api_version='1.22'), + shm_size=dict(docker_api_version='1.22'), + stop_signal=dict(docker_api_version='1.21'), + tmpfs=dict(docker_api_version='1.22'), + volume_driver=dict(docker_api_version='1.21'), + memory_reservation=dict(docker_api_version='1.21'), + kernel_memory=dict(docker_api_version='1.21'), + auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'), + healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'), + init=dict(docker_py_version='2.2.0', docker_api_version='1.25'), + runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'), + sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'), + userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'), + uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'), + pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'), + mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'), + # specials + ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22', + detect_usage=detect_ipvX_address_usage, + usage_msg='ipv4_address or ipv6_address in networks'), + stop_timeout=dict(), # see _get_additional_minimal_versions() + ) + + super(AnsibleDockerClientContainer, self).__init__( + option_minimal_versions=option_minimal_versions, + option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS, + **kwargs + ) + + self.image_inspect_source = 'Config' + if self.docker_api_version < LooseVersion('1.21'): + self.image_inspect_source = 'ContainerConfig' + + self._get_additional_minimal_versions() + self._parse_comparisons() + + if self.module.params['container_default_behavior'] is None: + self.module.params['container_default_behavior'] = 'compatibility' + self.module.deprecate( + 'The container_default_behavior option will change its default value from "compatibility" to ' + '"no_defaults" in Ansible 2.14. To remove this warning, please specify an explicit value for it now', + version='2.14' + ) + if self.module.params['container_default_behavior'] == 'compatibility': + old_default_values = dict( + auto_remove=False, + detach=True, + init=False, + interactive=False, + memory="0", + paused=False, + privileged=False, + read_only=False, + tty=False, + ) + for param, value in old_default_values.items(): + if self.module.params[param] is None: + self.module.params[param] = value + + +def main(): + argument_spec = dict( + auto_remove=dict(type='bool'), + blkio_weight=dict(type='int'), + capabilities=dict(type='list', elements='str'), + cap_drop=dict(type='list', elements='str'), + cleanup=dict(type='bool', default=False), + command=dict(type='raw'), + comparisons=dict(type='dict'), + container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']), + cpu_period=dict(type='int'), + cpu_quota=dict(type='int'), + cpus=dict(type='float'), + cpuset_cpus=dict(type='str'), + cpuset_mems=dict(type='str'), + cpu_shares=dict(type='int'), + detach=dict(type='bool'), + devices=dict(type='list', elements='str'), + device_read_bps=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )), + device_write_bps=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )), + device_read_iops=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )), + device_write_iops=dict(type='list', elements='dict', options=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )), + dns_servers=dict(type='list', elements='str'), + dns_opts=dict(type='list', elements='str'), + dns_search_domains=dict(type='list', elements='str'), + domainname=dict(type='str'), + entrypoint=dict(type='list', elements='str'), + env=dict(type='dict'), + env_file=dict(type='path'), + etc_hosts=dict(type='dict'), + exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), + force_kill=dict(type='bool', default=False, aliases=['forcekill']), + groups=dict(type='list', elements='str'), + healthcheck=dict(type='dict', options=dict( + test=dict(type='raw'), + interval=dict(type='str'), + timeout=dict(type='str'), + start_period=dict(type='str'), + retries=dict(type='int'), + )), + hostname=dict(type='str'), + ignore_image=dict(type='bool', default=False), + image=dict(type='str'), + init=dict(type='bool'), + interactive=dict(type='bool'), + ipc_mode=dict(type='str'), + keep_volumes=dict(type='bool', default=True), + kernel_memory=dict(type='str'), + kill_signal=dict(type='str'), + labels=dict(type='dict'), + links=dict(type='list', elements='str'), + log_driver=dict(type='str'), + log_options=dict(type='dict', aliases=['log_opt']), + mac_address=dict(type='str'), + memory=dict(type='str'), + memory_reservation=dict(type='str'), + memory_swap=dict(type='str'), + memory_swappiness=dict(type='int'), + mounts=dict(type='list', elements='dict', options=dict( + target=dict(type='str', required=True), + source=dict(type='str'), + type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), + read_only=dict(type='bool'), + consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), + propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), + no_copy=dict(type='bool'), + labels=dict(type='dict'), + volume_driver=dict(type='str'), + volume_options=dict(type='dict'), + tmpfs_size=dict(type='str'), + tmpfs_mode=dict(type='str'), + )), + name=dict(type='str', required=True), + network_mode=dict(type='str'), + networks=dict(type='list', elements='dict', options=dict( + name=dict(type='str', required=True), + ipv4_address=dict(type='str'), + ipv6_address=dict(type='str'), + aliases=dict(type='list', elements='str'), + links=dict(type='list', elements='str'), + )), + networks_cli_compatible=dict(type='bool'), + oom_killer=dict(type='bool'), + oom_score_adj=dict(type='int'), + output_logs=dict(type='bool', default=False), + paused=dict(type='bool'), + pid_mode=dict(type='str'), + pids_limit=dict(type='int'), + privileged=dict(type='bool'), + published_ports=dict(type='list', elements='str', aliases=['ports']), + pull=dict(type='bool', default=False), + purge_networks=dict(type='bool', default=False), + read_only=dict(type='bool'), + recreate=dict(type='bool', default=False), + removal_wait_timeout=dict(type='float'), + restart=dict(type='bool', default=False), + restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), + restart_retries=dict(type='int'), + runtime=dict(type='str'), + security_opts=dict(type='list', elements='str'), + shm_size=dict(type='str'), + state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), + stop_signal=dict(type='str'), + stop_timeout=dict(type='int'), + sysctls=dict(type='dict'), + tmpfs=dict(type='list', elements='str'), + trust_image_content=dict(type='bool', default=False, removed_in_version='2.14'), + tty=dict(type='bool'), + ulimits=dict(type='list', elements='str'), + user=dict(type='str'), + userns_mode=dict(type='str'), + uts=dict(type='str'), + volume_driver=dict(type='str'), + volumes=dict(type='list', elements='str'), + volumes_from=dict(type='list', elements='str'), + working_dir=dict(type='str'), + ) + + required_if = [ + ('state', 'present', ['image']) + ] + + client = AnsibleDockerClientContainer( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + if client.module.params['networks_cli_compatible'] is None and client.module.params['networks']: + client.module.deprecate( + 'Please note that docker_container handles networks slightly different than docker CLI. ' + 'If you specify networks, the default network will still be attached as the first network. ' + '(You can specify purge_networks to remove all networks not explicitly listed.) ' + 'This behavior will change in Ansible 2.12. You can change the behavior now by setting ' + 'the new `networks_cli_compatible` option to `yes`, and remove this warning by setting ' + 'it to `no`', + version='2.12' + ) + if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None: + client.module.deprecate( + 'Please note that the default value for `network_mode` will change from not specified ' + '(which is equal to `default`) to the name of the first network in `networks` if ' + '`networks` has at least one entry and `networks_cli_compatible` is `true`. You can ' + 'change the behavior now by explicitly setting `network_mode` to the name of the first ' + 'network in `networks`, and remove this warning by setting `network_mode` to `default`. ' + 'Please make sure that the value you set to `network_mode` equals the inspection result ' + 'for existing containers, otherwise the module will recreate them. You can find out the ' + 'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' "', + version='2.14' + ) + + try: + cm = ContainerManager(client) + client.module.exit_json(**sanitize_result(cm.results)) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_container_info.py b/plugins/modules/cloud/docker/docker_container_info.py new file mode 100644 index 0000000000..44ed22e7b3 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_container_info.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_container_info + +short_description: Retrieves facts about docker container + +description: + - Retrieves facts about a docker container. + - Essentially returns the output of C(docker inspect ), similar to what M(docker_container) + returns for a non-absent container. + + +options: + name: + description: + - The name of the container to inspect. + - When identifying an existing container name may be a name or a long or short container ID. + type: str + required: yes +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - "Felix Fontein (@felixfontein)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" +''' + +EXAMPLES = ''' +- name: Get infos on container + docker_container_info: + name: mydata + register: result + +- name: Does container exist? + debug: + msg: "The container {{ 'exists' if result.exists else 'does not exist' }}" + +- name: Print information about container + debug: + var: result.container + when: result.exists +''' + +RETURN = ''' +exists: + description: + - Returns whether the container exists. + type: bool + returned: always + sample: true +container: + description: + - Facts representing the current state of the container. Matches the docker inspection output. + - Will be C(none) if container does not exist. + returned: always + type: dict + sample: '{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/usr/bin/supervisord" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "443/tcp": {}, + "80/tcp": {} + }, + "Hostname": "8e47bf643eb9", + "Image": "lnmp_nginx:v1", + "Labels": {}, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/tmp/lnmp/nginx-sites/logs/": {} + }, + ... + }' +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + RequestException, +) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + + try: + container = client.get_container(client.module.params['name']) + + client.module.exit_json( + changed=False, + exists=(True if container else False), + container=container, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_host_info.py b/plugins/modules/cloud/docker/docker_host_info.py new file mode 100644 index 0000000000..20471ddae1 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_host_info.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_host_info + +short_description: Retrieves facts about docker host and lists of objects of the services. + +description: + - Retrieves facts about a docker host. + - Essentially returns the output of C(docker system info). + - The module also allows to list object names for containers, images, networks and volumes. + It also allows to query information on disk usage. + - The output differs depending on API version of the docker daemon. + - If the docker daemon cannot be contacted or does not meet the API version requirements, + the module will fail. + + +options: + containers: + description: + - Whether to list containers. + type: bool + default: no + containers_filters: + description: + - A dictionary of filter values used for selecting containers to delete. + - "For example, C(until: 24h)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) + for more information on possible filters. + type: dict + images: + description: + - Whether to list images. + type: bool + default: no + images_filters: + description: + - A dictionary of filter values used for selecting images to delete. + - "For example, C(dangling: true)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) + for more information on possible filters. + type: dict + networks: + description: + - Whether to list networks. + type: bool + default: no + networks_filters: + description: + - A dictionary of filter values used for selecting networks to delete. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) + for more information on possible filters. + type: dict + volumes: + description: + - Whether to list volumes. + type: bool + default: no + volumes_filters: + description: + - A dictionary of filter values used for selecting volumes to delete. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) + for more information on possible filters. + type: dict + disk_usage: + description: + - Summary information on used disk space by all Docker layers. + - The output is a sum of images, volumes, containers and build cache. + type: bool + default: no + verbose_output: + description: + - When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes) + then output will contain verbose information about objects matching the full output of API method. + For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/). + - The verbose output in this module contains only subset of information returned by I(_info) module + for each type of the objects. + type: bool + default: no +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.21" +''' + +EXAMPLES = ''' +- name: Get info on docker host + docker_host_info: + register: result + +- name: Get info on docker host and list images + docker_host_info: + images: yes + register: result + +- name: Get info on docker host and list images matching the filter + docker_host_info: + images: yes + images_filters: + label: "mylabel" + register: result + +- name: Get info on docker host and verbose list images + docker_host_info: + images: yes + verbose_output: yes + register: result + +- name: Get info on docker host and used disk space + docker_host_info: + disk_usage: yes + register: result + +- debug: + var: result.host_info + +''' + +RETURN = ''' +can_talk_to_docker: + description: + - Will be C(true) if the module can talk to the docker daemon. + returned: both on success and on error + type: bool + +host_info: + description: + - Facts representing the basic state of the docker host. Matches the C(docker system info) output. + returned: always + type: dict +volumes: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker volume ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(volumes) is C(yes) + type: list + elements: dict +networks: + description: + - List of dict objects containing the basic information about each network. + Keys matches the C(docker network ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(networks) is C(yes) + type: list + elements: dict +containers: + description: + - List of dict objects containing the basic information about each container. + Keys matches the C(docker container ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(containers) is C(yes) + type: list + elements: dict +images: + description: + - List of dict objects containing the basic information about each image. + Keys matches the C(docker image ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(images) is C(yes) + type: list + elements: dict +disk_usage: + description: + - Information on summary disk usage by images, containers and volumes on docker host + unless I(verbose_output=yes). See description for I(verbose_output). + returned: When I(disk_usage) is C(yes) + type: dict + +''' + +import traceback + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DockerBaseClass, + RequestException, +) +from ansible.module_utils._text import to_native + +try: + from docker.errors import DockerException, APIError +except ImportError: + # Missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import clean_dict_booleans_for_docker_api + + +class DockerHostManager(DockerBaseClass): + + def __init__(self, client, results): + + super(DockerHostManager, self).__init__() + + self.client = client + self.results = results + self.verbose_output = self.client.module.params['verbose_output'] + + listed_objects = ['volumes', 'networks', 'containers', 'images'] + + self.results['host_info'] = self.get_docker_host_info() + + if self.client.module.params['disk_usage']: + self.results['disk_usage'] = self.get_docker_disk_usage_facts() + + for docker_object in listed_objects: + if self.client.module.params[docker_object]: + returned_name = docker_object + filter_name = docker_object + "_filters" + filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name)) + self.results[returned_name] = self.get_docker_items_list(docker_object, filters) + + def get_docker_host_info(self): + try: + return self.client.info() + except APIError as exc: + self.client.fail("Error inspecting docker host: %s" % to_native(exc)) + + def get_docker_disk_usage_facts(self): + try: + if self.verbose_output: + return self.client.df() + else: + return dict(LayersSize=self.client.df()['LayersSize']) + except APIError as exc: + self.client.fail("Error inspecting docker host: %s" % to_native(exc)) + + def get_docker_items_list(self, docker_object=None, filters=None, verbose=False): + items = None + items_list = [] + + header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names'] + header_volumes = ['Driver', 'Name'] + header_images = ['Id', 'RepoTags', 'Created', 'Size'] + header_networks = ['Id', 'Driver', 'Name', 'Scope'] + + filter_arg = dict() + if filters: + filter_arg['filters'] = filters + try: + if docker_object == 'containers': + items = self.client.containers(**filter_arg) + elif docker_object == 'networks': + items = self.client.networks(**filter_arg) + elif docker_object == 'images': + items = self.client.images(**filter_arg) + elif docker_object == 'volumes': + items = self.client.volumes(**filter_arg) + except APIError as exc: + self.client.fail("Error inspecting docker host for object '%s': %s" % + (docker_object, to_native(exc))) + + if self.verbose_output: + if docker_object != 'volumes': + return items + else: + return items['Volumes'] + + if docker_object == 'volumes': + items = items['Volumes'] + + for item in items: + item_record = dict() + + if docker_object == 'containers': + for key in header_containers: + item_record[key] = item.get(key) + elif docker_object == 'networks': + for key in header_networks: + item_record[key] = item.get(key) + elif docker_object == 'images': + for key in header_images: + item_record[key] = item.get(key) + elif docker_object == 'volumes': + for key in header_volumes: + item_record[key] = item.get(key) + items_list.append(item_record) + + return items_list + + +def main(): + argument_spec = dict( + containers=dict(type='bool', default=False), + containers_filters=dict(type='dict'), + images=dict(type='bool', default=False), + images_filters=dict(type='dict'), + networks=dict(type='bool', default=False), + networks_filters=dict(type='dict'), + volumes=dict(type='bool', default=False), + volumes_filters=dict(type='dict'), + disk_usage=dict(type='bool', default=False), + verbose_output=dict(type='bool', default=False), + ) + + option_minimal_versions = dict( + network_filters=dict(docker_py_version='2.0.2'), + disk_usage=dict(docker_py_version='2.2.0'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.21', + option_minimal_versions=option_minimal_versions, + fail_results=dict( + can_talk_to_docker=False, + ), + ) + client.fail_results['can_talk_to_docker'] = True + + try: + results = dict( + changed=False, + ) + + DockerHostManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_image.py b/plugins/modules/cloud/docker/docker_image.py new file mode 100644 index 0000000000..78b706b94f --- /dev/null +++ b/plugins/modules/cloud/docker/docker_image.py @@ -0,0 +1,953 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_image + +short_description: Manage docker images. + + +description: + - Build, load or pull an image, making the image available for creating containers. Also supports tagging an + image into a repository and archiving an image to a .tar file. + - Since Ansible 2.8, it is recommended to explicitly specify the image's source (I(source) can be C(build), + C(load), C(pull) or C(local)). This will be required from Ansible 2.12 on. + +options: + source: + description: + - "Determines where the module will try to retrieve the image from." + - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must + be specified when this value is used." + - "Use C(load) to load the image from a C(.tar) file. I(load_path) must + be specified when this value is used." + - "Use C(pull) to pull the image from a registry." + - "Use C(local) to make sure that the image is already available on the local + docker daemon, i.e. do not try to build, pull or load the image." + - "Before Ansible 2.12, the value of this option will be auto-detected + to be backwards compatible, but a warning will be issued if it is not + explicitly specified. From Ansible 2.12 on, auto-detection will be disabled + and this option will be made mandatory." + type: str + choices: + - build + - load + - pull + - local + build: + description: + - "Specifies options used for building images." + type: dict + suboptions: + cache_from: + description: + - List of image names to consider as cache source. + type: list + elements: str + dockerfile: + description: + - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image. + - This can also include a relative path (relative to I(path)). + type: str + http_timeout: + description: + - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of + seconds. + type: int + path: + description: + - Use with state 'present' to build an image. Will be the path to a directory containing the context and + Dockerfile for building an image. + type: path + required: yes + pull: + description: + - When building an image downloads any updates to the FROM image in Dockerfile. + - The default is currently C(yes). This will change to C(no) in Ansible 2.12. + type: bool + rm: + description: + - Remove intermediate containers after build. + type: bool + default: yes + network: + description: + - The network to use for C(RUN) build instructions. + type: str + nocache: + description: + - Do not use cache when building an image. + type: bool + default: no + etc_hosts: + description: + - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address. + type: dict + args: + description: + - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive. + - Docker expects the value to be a string. For convenience any non-string values will be converted to strings. + - Requires Docker API >= 1.21. + type: dict + container_limits: + description: + - A dictionary of limits applied to each container created by the build process. + type: dict + suboptions: + memory: + description: + - Set memory limit for build. + type: int + memswap: + description: + - Total memory (memory + swap), -1 to disable swap. + type: int + cpushares: + description: + - CPU shares (relative weight). + type: int + cpusetcpus: + description: + - CPUs in which to allow execution, e.g., "0-3", "0,1". + type: str + use_config_proxy: + description: + - If set to C(yes) and a proxy configuration is specified in the docker client configuration + (by default C($HOME/.docker/config.json)), the corresponding environment variables will + be set in the container being built. + - Needs Docker SDK for Python >= 3.7.0. + type: bool + target: + description: + - When building an image specifies an intermediate build stage by + name as a final stage for the resulting image. + type: str + archive_path: + description: + - Use with state C(present) to archive an image to a .tar file. + type: path + load_path: + description: + - Use with state C(present) to load an image from a .tar file. + - Set I(source) to C(load) if you want to load the image. The option will + be set automatically before Ansible 2.12 if this option is used (except + if I(path) is specified as well, in which case building will take precedence). + From Ansible 2.12 on, you have to set I(source) to C(load). + type: path + dockerfile: + description: + - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image. + - This can also include a relative path (relative to I(path)). + - Please use I(build.dockerfile) instead. This option will be removed in Ansible 2.12. + type: str + force: + description: + - Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state + C(present) to build, load or pull an image when the image already exists. Also use with state C(present) + to force tagging an image. + - Please stop using this option, and use the more specialized force options + I(force_source), I(force_absent) and I(force_tag) instead. + - This option will be removed in Ansible 2.12. + type: bool + force_source: + description: + - Use with state C(present) to build, load or pull an image (depending on the + value of the I(source) option) when the image already exists. + type: bool + default: false + force_absent: + description: + - Use with state I(absent) to un-tag and remove all images matching the specified name. + type: bool + default: false + force_tag: + description: + - Use with state C(present) to force tagging an image. + type: bool + default: false + http_timeout: + description: + - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of + seconds. + - Please use I(build.http_timeout) instead. This option will be removed in Ansible 2.12. + type: int + name: + description: + - "Image name. Name format will be one of: name, repository/name, registry_server:port/name. + When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'." + - Note that image IDs (hashes) are not supported. + type: str + required: yes + path: + description: + - Use with state 'present' to build an image. Will be the path to a directory containing the context and + Dockerfile for building an image. + - Set I(source) to C(build) if you want to build the image. The option will + be set automatically before Ansible 2.12 if this option is used. From Ansible 2.12 + on, you have to set I(source) to C(build). + - Please use I(build.path) instead. This option will be removed in Ansible 2.12. + type: path + aliases: + - build_path + pull: + description: + - When building an image downloads any updates to the FROM image in Dockerfile. + - Please use I(build.pull) instead. This option will be removed in Ansible 2.12. + - The default is currently C(yes). This will change to C(no) in Ansible 2.12. + type: bool + push: + description: + - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter. + type: bool + default: no + rm: + description: + - Remove intermediate containers after build. + - Please use I(build.rm) instead. This option will be removed in Ansible 2.12. + type: bool + default: yes + nocache: + description: + - Do not use cache when building an image. + - Please use I(build.nocache) instead. This option will be removed in Ansible 2.12. + type: bool + default: no + repository: + description: + - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects + format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest). + type: str + state: + description: + - Make assertions about the state of an image. + - When C(absent) an image will be removed. Use the force option to un-tag and remove all images + matching the provided name. + - When C(present) check if an image exists using the provided name and tag. If the image is not found or the + force option is used, the image will either be pulled, built or loaded, depending on the I(source) option. + - By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that + this will change in Ansible 2.12, so to make sure that you are pulling, set I(source) to C(pull). To build + the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source) + to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to + a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed. + - "*Note:* C(state=build) is DEPRECATED and will be removed in Ansible 2.11. Specifying C(build) will behave the + same as C(present)." + type: str + default: present + choices: + - absent + - present + - build + tag: + description: + - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to + I(latest). + - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence. + type: str + default: latest + buildargs: + description: + - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive. + - Docker expects the value to be a string. For convenience any non-string values will be converted to strings. + - Requires Docker API >= 1.21. + - Please use I(build.args) instead. This option will be removed in Ansible 2.12. + type: dict + container_limits: + description: + - A dictionary of limits applied to each container created by the build process. + - Please use I(build.container_limits) instead. This option will be removed in Ansible 2.12. + type: dict + suboptions: + memory: + description: + - Set memory limit for build. + type: int + memswap: + description: + - Total memory (memory + swap), -1 to disable swap. + type: int + cpushares: + description: + - CPU shares (relative weight). + type: int + cpusetcpus: + description: + - CPUs in which to allow execution, e.g., "0-3", "0,1". + type: str + use_tls: + description: + - "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to + C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that + the server's certificate is valid for the server." + - "*Note:* If you specify this option, it will set the value of the I(tls) or + I(validate_certs) parameters if not set to C(no)." + - Will be removed in Ansible 2.11. + type: str + choices: + - 'no' + - 'encrypt' + - 'verify' + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" + +author: + - Pavel Antonov (@softzilla) + - Chris Houseknecht (@chouseknecht) + - Sorin Sbarnea (@ssbarnea) + +''' + +EXAMPLES = ''' + +- name: pull an image + docker_image: + name: pacur/centos-7 + source: pull + +- name: Tag and push to docker hub + docker_image: + name: pacur/centos-7:56 + repository: dcoppenhagan/myimage:7.56 + push: yes + source: local + +- name: Tag and push to local registry + docker_image: + # Image will be centos:7 + name: centos + # Will be pushed to localhost:5000/centos:7 + repository: localhost:5000/centos + tag: 7 + push: yes + source: local + +- name: Add tag latest to image + docker_image: + name: myimage:7.1.2 + repository: myimage:latest + # As 'latest' usually already is present, we need to enable overwriting of existing tags: + force_tag: yes + source: local + +- name: Remove image + docker_image: + state: absent + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + +- name: Build an image and push it to a private repo + docker_image: + build: + path: ./sinatra + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + push: yes + source: build + +- name: Archive image + docker_image: + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + archive_path: my_sinatra.tar + source: local + +- name: Load image from archive and push to a private registry + docker_image: + name: localhost:5000/myimages/sinatra + tag: v1 + push: yes + load_path: my_sinatra.tar + source: load + +- name: Build image and with build args + docker_image: + name: myimage + build: + path: /path/to/build/dir + args: + log_volume: /var/log/myapp + listen_port: 8080 + source: build + +- name: Build image using cache source + docker_image: + name: myimage:latest + build: + path: /path/to/build/dir + # Use as cache source for building myimage + cache_from: + - nginx:latest + - alpine:3.8 + source: build +''' + +RETURN = ''' +image: + description: Image inspection results for the affected image. + returned: success + type: dict + sample: {} +''' + +import errno +import os +import re +import traceback + +from distutils.version import LooseVersion + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + clean_dict_booleans_for_docker_api, + docker_version, + AnsibleDockerClient, + DockerBaseClass, + is_image_name_id, + is_valid_tag, + RequestException, +) +from ansible.module_utils._text import to_native + +if docker_version is not None: + try: + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + from docker.auth import resolve_repository_name + else: + from docker.auth.auth import resolve_repository_name + from docker.utils.utils import parse_repository_tag + from docker.errors import DockerException + except ImportError: + # missing Docker SDK for Python handled in module_utils.docker.common + pass + + +class ImageManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ImageManager, self).__init__() + + self.client = client + self.results = results + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.source = parameters['source'] + build = parameters['build'] or dict() + self.archive_path = parameters.get('archive_path') + self.cache_from = build.get('cache_from') + self.container_limits = build.get('container_limits') + self.dockerfile = build.get('dockerfile') + self.force_source = parameters.get('force_source') + self.force_absent = parameters.get('force_absent') + self.force_tag = parameters.get('force_tag') + self.load_path = parameters.get('load_path') + self.name = parameters.get('name') + self.network = build.get('network') + self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts')) + self.nocache = build.get('nocache', False) + self.build_path = build.get('path') + self.pull = build.get('pull') + self.target = build.get('target') + self.repository = parameters.get('repository') + self.rm = build.get('rm', True) + self.state = parameters.get('state') + self.tag = parameters.get('tag') + self.http_timeout = build.get('http_timeout') + self.push = parameters.get('push') + self.buildargs = build.get('args') + self.use_config_proxy = build.get('use_config_proxy') + + # If name contains a tag, it takes precedence over tag parameter. + if not is_image_name_id(self.name): + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + if self.state == 'present': + self.present() + elif self.state == 'absent': + self.absent() + + def fail(self, msg): + self.client.fail(msg) + + def present(self): + ''' + Handles state = 'present', which includes building, loading or pulling an image, + depending on user provided parameters. + + :returns None + ''' + image = self.client.find_image(name=self.name, tag=self.tag) + + if not image or self.force_source: + if self.source == 'build': + # Build the image + if not os.path.isdir(self.build_path): + self.fail("Requested build path %s could not be found or you do not have access." % self.build_path) + image_name = self.name + if self.tag: + image_name = "%s:%s" % (self.name, self.tag) + self.log("Building image %s" % image_name) + self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'] = self.build_image() + elif self.source == 'load': + # Load the image from an archive + if not os.path.isfile(self.load_path): + self.fail("Error loading image %s. Specified path %s does not exist." % (self.name, + self.load_path)) + image_name = self.name + if self.tag: + image_name = "%s:%s" % (self.name, self.tag) + self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'] = self.load_image() + elif self.source == 'pull': + # pull the image + self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag) + elif self.source == 'local': + if image is None: + name = self.name + if self.tag: + name = "%s:%s" % (self.name, self.tag) + self.client.fail('Cannot find the image %s locally.' % name) + if not self.check_mode and image and image['Id'] == self.results['image']['Id']: + self.results['changed'] = False + + if self.archive_path: + self.archive_image(self.name, self.tag) + + if self.push and not self.repository: + self.push_image(self.name, self.tag) + elif self.repository: + self.tag_image(self.name, self.tag, self.repository, push=self.push) + + def absent(self): + ''' + Handles state = 'absent', which removes an image. + + :return None + ''' + name = self.name + if is_image_name_id(name): + image = self.client.find_image_by_id(name) + else: + image = self.client.find_image(name, self.tag) + if self.tag: + name = "%s:%s" % (self.name, self.tag) + if image: + if not self.check_mode: + try: + self.client.remove_image(name, force=self.force_absent) + except Exception as exc: + self.fail("Error removing image %s - %s" % (name, str(exc))) + + self.results['changed'] = True + self.results['actions'].append("Removed image %s" % (name)) + self.results['image']['state'] = 'Deleted' + + def archive_image(self, name, tag): + ''' + Archive an image to a .tar file. Called when archive_path is passed. + + :param name - name of the image. Type: str + :return None + ''' + + if not tag: + tag = "latest" + + image = self.client.find_image(name=name, tag=tag) + if not image: + self.log("archive image: image %s:%s not found" % (name, tag)) + return + + image_name = "%s:%s" % (name, tag) + self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path)) + self.results['changed'] = True + if not self.check_mode: + self.log("Getting archive of image %s" % image_name) + try: + image = self.client.get_image(image_name) + except Exception as exc: + self.fail("Error getting image %s - %s" % (image_name, str(exc))) + + try: + with open(self.archive_path, 'wb') as fd: + if self.client.docker_py_version >= LooseVersion('3.0.0'): + for chunk in image: + fd.write(chunk) + else: + for chunk in image.stream(2048, decode_content=False): + fd.write(chunk) + except Exception as exc: + self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc))) + + image = self.client.find_image(name=name, tag=tag) + if image: + self.results['image'] = image + + def push_image(self, name, tag=None): + ''' + If the name of the image contains a repository path, then push the image. + + :param name Name of the image to push. + :param tag Use a specific tag. + :return: None + ''' + + repository = name + if not tag: + repository, tag = parse_repository_tag(name) + registry, repo_name = resolve_repository_name(repository) + + self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + + if registry: + self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + self.results['changed'] = True + if not self.check_mode: + status = None + try: + changed = False + for line in self.client.push(repository, tag=tag, stream=True, decode=True): + self.log(line, pretty_print=True) + if line.get('errorDetail'): + raise Exception(line['errorDetail']['message']) + status = line.get('status') + if status == 'Pushing': + changed = True + self.results['changed'] = changed + except Exception as exc: + if re.search('unauthorized', str(exc)): + if re.search('authentication required', str(exc)): + self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." % + (registry, repo_name, tag, str(exc), registry)) + else: + self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" % + (registry, repo_name, tag, str(exc))) + self.fail("Error pushing image %s: %s" % (repository, str(exc))) + self.results['image'] = self.client.find_image(name=repository, tag=tag) + if not self.results['image']: + self.results['image'] = dict() + self.results['image']['push_status'] = status + + def tag_image(self, name, tag, repository, push=False): + ''' + Tag an image into a repository. + + :param name: name of the image. required. + :param tag: image tag. + :param repository: path to the repository. required. + :param push: bool. push the image once it's tagged. + :return: None + ''' + repo, repo_tag = parse_repository_tag(repository) + if not repo_tag: + repo_tag = "latest" + if tag: + repo_tag = tag + image = self.client.find_image(name=repo, tag=repo_tag) + found = 'found' if image else 'not found' + self.log("image %s was %s" % (repo, found)) + + if not image or self.force_tag: + self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag)) + self.results['changed'] = True + self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag)) + if not self.check_mode: + try: + # Finding the image does not always work, especially running a localhost registry. In those + # cases, if we don't set force=True, it errors. + image_name = name + if tag and not re.search(tag, name): + image_name = "%s:%s" % (name, tag) + tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True) + if not tag_status: + raise Exception("Tag operation failed.") + except Exception as exc: + self.fail("Error: failed to tag image - %s" % str(exc)) + self.results['image'] = self.client.find_image(name=repo, tag=repo_tag) + if image and image['Id'] == self.results['image']['Id']: + self.results['changed'] = False + + if push: + self.push_image(repo, repo_tag) + + def build_image(self): + ''' + Build an image + + :return: image dict + ''' + params = dict( + path=self.build_path, + tag=self.name, + rm=self.rm, + nocache=self.nocache, + timeout=self.http_timeout, + pull=self.pull, + forcerm=self.rm, + dockerfile=self.dockerfile, + decode=True, + ) + if self.client.docker_py_version < LooseVersion('3.0.0'): + params['stream'] = True + build_output = [] + if self.tag: + params['tag'] = "%s:%s" % (self.name, self.tag) + if self.container_limits: + params['container_limits'] = self.container_limits + if self.buildargs: + for key, value in self.buildargs.items(): + self.buildargs[key] = to_native(value) + params['buildargs'] = self.buildargs + if self.cache_from: + params['cache_from'] = self.cache_from + if self.network: + params['network_mode'] = self.network + if self.extra_hosts: + params['extra_hosts'] = self.extra_hosts + if self.use_config_proxy: + params['use_config_proxy'] = self.use_config_proxy + # Due to a bug in docker-py, it will crash if + # use_config_proxy is True and buildargs is None + if 'buildargs' not in params: + params['buildargs'] = {} + if self.target: + params['target'] = self.target + + for line in self.client.build(**params): + # line = json.loads(line) + self.log(line, pretty_print=True) + if "stream" in line: + build_output.append(line["stream"]) + if line.get('error'): + if line.get('errorDetail'): + errorDetail = line.get('errorDetail') + self.fail( + "Error building %s - code: %s, message: %s, logs: %s" % ( + self.name, + errorDetail.get('code'), + errorDetail.get('message'), + build_output)) + else: + self.fail("Error building %s - message: %s, logs: %s" % ( + self.name, line.get('error'), build_output)) + return self.client.find_image(name=self.name, tag=self.tag) + + def load_image(self): + ''' + Load an image from a .tar archive + + :return: image dict + ''' + try: + self.log("Opening image %s" % self.load_path) + with open(self.load_path, 'rb') as image_tar: + self.log("Loading image from %s" % self.load_path) + self.client.load_image(image_tar) + except EnvironmentError as exc: + if exc.errno == errno.ENOENT: + self.fail("Error opening image %s - %s" % (self.load_path, str(exc))) + self.fail("Error loading image %s - %s" % (self.name, str(exc))) + except Exception as exc: + self.fail("Error loading image %s - %s" % (self.name, str(exc))) + + return self.client.find_image(self.name, self.tag) + + +def main(): + argument_spec = dict( + source=dict(type='str', choices=['build', 'load', 'pull', 'local']), + build=dict(type='dict', options=dict( + cache_from=dict(type='list', elements='str'), + container_limits=dict(type='dict', options=dict( + memory=dict(type='int'), + memswap=dict(type='int'), + cpushares=dict(type='int'), + cpusetcpus=dict(type='str'), + )), + dockerfile=dict(type='str'), + http_timeout=dict(type='int'), + network=dict(type='str'), + nocache=dict(type='bool', default=False), + path=dict(type='path', required=True), + pull=dict(type='bool'), + rm=dict(type='bool', default=True), + args=dict(type='dict'), + use_config_proxy=dict(type='bool'), + target=dict(type='str'), + etc_hosts=dict(type='dict'), + )), + archive_path=dict(type='path'), + container_limits=dict(type='dict', options=dict( + memory=dict(type='int'), + memswap=dict(type='int'), + cpushares=dict(type='int'), + cpusetcpus=dict(type='str'), + ), removed_in_version='2.12'), + dockerfile=dict(type='str', removed_in_version='2.12'), + force=dict(type='bool', removed_in_version='2.12'), + force_source=dict(type='bool', default=False), + force_absent=dict(type='bool', default=False), + force_tag=dict(type='bool', default=False), + http_timeout=dict(type='int', removed_in_version='2.12'), + load_path=dict(type='path'), + name=dict(type='str', required=True), + nocache=dict(type='bool', default=False, removed_in_version='2.12'), + path=dict(type='path', aliases=['build_path'], removed_in_version='2.12'), + pull=dict(type='bool', removed_in_version='2.12'), + push=dict(type='bool', default=False), + repository=dict(type='str'), + rm=dict(type='bool', default=True, removed_in_version='2.12'), + state=dict(type='str', default='present', choices=['absent', 'present', 'build']), + tag=dict(type='str', default='latest'), + use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.11'), + buildargs=dict(type='dict', removed_in_version='2.12'), + ) + + required_if = [ + # ('state', 'present', ['source']), -- enable in Ansible 2.12. + # ('source', 'build', ['build']), -- enable in Ansible 2.12. + ('source', 'load', ['load_path']), + ] + + def detect_build_cache_from(client): + return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None + + def detect_build_network(client): + return client.module.params['build'] and client.module.params['build'].get('network') is not None + + def detect_build_target(client): + return client.module.params['build'] and client.module.params['build'].get('target') is not None + + def detect_use_config_proxy(client): + return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None + + def detect_etc_hosts(client): + return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts')) + + option_minimal_versions = dict() + option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from) + option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network) + option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target) + option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy) + option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + min_docker_version='1.8.0', + min_docker_api_version='1.20', + option_minimal_versions=option_minimal_versions, + ) + + if client.module.params['state'] == 'build': + client.module.warn('The "build" state has been deprecated for a long time ' + 'and will be removed in Ansible 2.11. Please use ' + '"present", which has the same meaning as "build".') + client.module.params['state'] = 'present' + if client.module.params['use_tls']: + client.module.warn('The "use_tls" option has been deprecated for a long time ' + 'and will be removed in Ansible 2.11. Please use the' + '"tls" and "validate_certs" options instead.') + + if not is_valid_tag(client.module.params['tag'], allow_empty=True): + client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag'])) + + build_options = dict( + container_limits='container_limits', + dockerfile='dockerfile', + http_timeout='http_timeout', + nocache='nocache', + path='path', + pull='pull', + rm='rm', + buildargs='args', + ) + for option, build_option in build_options.items(): + default_value = None + if option in ('rm', ): + default_value = True + elif option in ('nocache', ): + default_value = False + if client.module.params[option] != default_value: + if client.module.params['build'] is None: + client.module.params['build'] = dict() + if client.module.params['build'].get(build_option, default_value) != default_value: + client.fail('Cannot specify both %s and build.%s!' % (option, build_option)) + client.module.params['build'][build_option] = client.module.params[option] + client.module.warn('Please specify build.%s instead of %s. The %s option ' + 'has been renamed and will be removed in Ansible 2.12.' % (build_option, option, option)) + if client.module.params['source'] == 'build': + if (not client.module.params['build'] or not client.module.params['build'].get('path')): + client.fail('If "source" is set to "build", the "build.path" option must be specified.') + if client.module.params['build'].get('pull') is None: + client.module.warn("The default for build.pull is currently 'yes', but will be changed to 'no' in Ansible 2.12. " + "Please set build.pull explicitly to the value you need.") + client.module.params['build']['pull'] = True # TODO: change to False in Ansible 2.12 + + if client.module.params['state'] == 'present' and client.module.params['source'] is None: + # Autodetection. To be removed in Ansible 2.12. + if (client.module.params['build'] or dict()).get('path'): + client.module.params['source'] = 'build' + elif client.module.params['load_path']: + client.module.params['source'] = 'load' + else: + client.module.params['source'] = 'pull' + client.module.warn('The value of the "source" option was determined to be "%s". ' + 'Please set the "source" option explicitly. Autodetection will ' + 'be removed in Ansible 2.12.' % client.module.params['source']) + + if client.module.params['force']: + client.module.params['force_source'] = True + client.module.params['force_absent'] = True + client.module.params['force_tag'] = True + client.module.warn('The "force" option will be removed in Ansible 2.12. Please ' + 'use the "force_source", "force_absent" or "force_tag" option ' + 'instead, depending on what you want to force.') + + try: + results = dict( + changed=False, + actions=[], + image={} + ) + + ImageManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_image_facts.py b/plugins/modules/cloud/docker/docker_image_facts.py new file mode 120000 index 0000000000..f9db45b3c7 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_image_facts.py @@ -0,0 +1 @@ +docker_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/docker/docker_image_info.py b/plugins/modules/cloud/docker/docker_image_info.py new file mode 100644 index 0000000000..0affde3169 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_image_info.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_image_info + +short_description: Inspect docker images + + +description: + - Provide one or more image names, and the module will inspect each, returning an array of inspection results. + - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists + locally, you can call the module with the image name, then check whether the result list is empty (image does not + exist) or has one element (the image exists locally). + - The module will not attempt to pull images from registries. Use M(docker_image) with I(source) set to C(pull) + to ensure an image is pulled. + +notes: + - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change. + +options: + name: + description: + - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]), + where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also + image IDs can be used. + - If no name is provided, a list of all images will be returned. + type: list + elements: str + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.20" + +author: + - Chris Houseknecht (@chouseknecht) + +''' + +EXAMPLES = ''' +- name: Inspect a single image + docker_image_info: + name: pacur/centos-7 + +- name: Inspect multiple images + docker_image_info: + name: + - pacur/centos-7 + - sinatra + register: result + +- name: Make sure that both images pacur/centos-7 and sinatra exist locally + assert: + that: + - result.images | length == 2 +''' + +RETURN = ''' +images: + description: + - Inspection results for the selected images. + - The list only contains inspection results of images existing locally. + returned: always + type: list + elements: dict + sample: [ + { + "Architecture": "amd64", + "Author": "", + "Comment": "", + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/etc/docker/registry/config.yml" + ], + "Domainname": "", + "Entrypoint": [ + "/bin/registry" + ], + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "5000/tcp": {} + }, + "Hostname": "e5c68db50333", + "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/var/lib/registry": {} + }, + "WorkingDir": "" + }, + "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610", + "ContainerConfig": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + '#(nop) CMD ["/etc/docker/registry/config.yml"]' + ], + "Domainname": "", + "Entrypoint": [ + "/bin/registry" + ], + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "5000/tcp": {} + }, + "Hostname": "e5c68db50333", + "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/var/lib/registry": {} + }, + "WorkingDir": "" + }, + "Created": "2016-03-08T21:08:15.399680378Z", + "DockerVersion": "1.9.1", + "GraphDriver": { + "Data": null, + "Name": "aufs" + }, + "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08", + "Name": "registry:2", + "Os": "linux", + "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805", + "RepoDigests": [], + "RepoTags": [ + "registry:2" + ], + "Size": 0, + "VirtualSize": 165808884 + } + ] +''' + +import traceback + +try: + from docker import utils + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DockerBaseClass, + is_image_name_id, + RequestException, +) + + +class ImageManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ImageManager, self).__init__() + + self.client = client + self.results = results + self.name = self.client.module.params.get('name') + self.log("Gathering facts for images: %s" % (str(self.name))) + + if self.name: + self.results['images'] = self.get_facts() + else: + self.results['images'] = self.get_all_images() + + def fail(self, msg): + self.client.fail(msg) + + def get_facts(self): + ''' + Lookup and inspect each image name found in the names parameter. + + :returns array of image dictionaries + ''' + + results = [] + + names = self.name + if not isinstance(names, list): + names = [names] + + for name in names: + if is_image_name_id(name): + self.log('Fetching image %s (ID)' % (name)) + image = self.client.find_image_by_id(name) + else: + repository, tag = utils.parse_repository_tag(name) + if not tag: + tag = 'latest' + self.log('Fetching image %s:%s' % (repository, tag)) + image = self.client.find_image(name=repository, tag=tag) + if image: + results.append(image) + return results + + def get_all_images(self): + results = [] + images = self.client.images() + for image in images: + try: + inspection = self.client.inspect_image(image['Id']) + except Exception as exc: + self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc))) + results.append(inspection) + return results + + +def main(): + argument_spec = dict( + name=dict(type='list', elements='str'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_api_version='1.20', + ) + if client.module._name == 'docker_image_facts': + client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'", version='2.12') + + try: + results = dict( + changed=False, + images=[] + ) + + ImageManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_login.py b/plugins/modules/cloud/docker/docker_login.py new file mode 100644 index 0000000000..e89f8e2ea2 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_login.py @@ -0,0 +1,490 @@ +#!/usr/bin/python +# +# (c) 2016 Olaf Kilian +# Chris Houseknecht, +# James Tanner, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_login +short_description: Log into a Docker registry. +description: + - Provides functionality similar to the "docker login" command. + - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the + credentials store associated to the registry. Adding the credentials to the config files resp. the credential + store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI + and Docker SDK for Python without needing to provide credentials. + - Running in check mode will perform the authentication without updating the config file. +options: + registry_url: + description: + - The registry URL. + type: str + default: "https://index.docker.io/v1/" + aliases: + - registry + - url + username: + description: + - The username for the registry account. + - Required when I(state) is C(present). + type: str + password: + description: + - The plaintext password for the registry account. + - Required when I(state) is C(present). + type: str + email: + description: + - Does nothing, do not use. + - Will be removed in Ansible 2.14. + type: str + reauthorize: + description: + - Refresh existing authentication found in the configuration file. + type: bool + default: no + aliases: + - reauth + config_path: + description: + - Custom path to the Docker CLI configuration file. + type: path + default: ~/.docker/config.json + aliases: + - dockercfg_path + state: + description: + - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out. + - To logout you only need the registry server, which defaults to DockerHub. + - Before 2.1 you could ONLY log in. + - Docker does not support 'logout' with a custom config file. + type: str + default: 'present' + choices: ['present', 'absent'] + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "L(Python bindings for docker credentials store API) >= 0.2.1 + (use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)" + - "Docker API >= 1.20" +author: + - Olaf Kilian (@olsaki) + - Chris Houseknecht (@chouseknecht) +''' + +EXAMPLES = ''' + +- name: Log into DockerHub + docker_login: + username: docker + password: rekcod + +- name: Log into private registry and force re-authorization + docker_login: + registry_url: your.private.registry.io + username: yourself + password: secrets3 + reauthorize: yes + +- name: Log into DockerHub using a custom config file + docker_login: + username: docker + password: rekcod + config_path: /tmp/.mydockercfg + +- name: Log out of DockerHub + docker_login: + state: absent +''' + +RETURN = ''' +login_results: + description: Results from the login. + returned: when state='present' + type: dict + sample: { + "serveraddress": "localhost:5000", + "username": "testuser" + } +''' + +import base64 +import json +import os +import re +import traceback +from ansible.module_utils._text import to_bytes, to_text + +try: + from docker.errors import DockerException + from docker import auth + + # Earlier versions of docker/docker-py put decode_auth + # in docker.auth.auth instead of docker.auth + if hasattr(auth, 'decode_auth'): + from docker.auth import decode_auth + else: + from docker.auth.auth import decode_auth + +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + HAS_DOCKER_PY, + DEFAULT_DOCKER_REGISTRY, + DockerBaseClass, + EMAIL_REGEX, + RequestException, +) + +NEEDS_DOCKER_PYCREDS = False + +# Early versions of docker/docker-py rely on docker-pycreds for +# the credential store api. +if HAS_DOCKER_PY: + try: + from docker.credentials.errors import StoreError, CredentialsNotFound + from docker.credentials import Store + except ImportError: + try: + from dockerpycreds.errors import StoreError, CredentialsNotFound + from dockerpycreds.store import Store + except ImportError as exc: + HAS_DOCKER_ERROR = str(exc) + NEEDS_DOCKER_PYCREDS = True + + +if NEEDS_DOCKER_PYCREDS: + # docker-pycreds missing, so we need to create some place holder classes + # to allow instantiation. + + class StoreError(Exception): + pass + + class CredentialsNotFound(Exception): + pass + + +class DockerFileStore(object): + ''' + A custom credential store class that implements only the functionality we need to + update the docker config file when no credential helpers is provided. + ''' + + program = "" + + def __init__(self, config_path): + self._config_path = config_path + + # Make sure we have a minimal config if none is available. + self._config = dict( + auths=dict() + ) + + try: + # Attempt to read the existing config. + with open(self._config_path, "r") as f: + config = json.load(f) + except (ValueError, IOError): + # No config found or an invalid config found so we'll ignore it. + config = dict() + + # Update our internal config with what ever was loaded. + self._config.update(config) + + @property + def config_path(self): + ''' + Return the config path configured in this DockerFileStore instance. + ''' + + return self._config_path + + def get(self, server): + ''' + Retrieve credentials for `server` if there are any in the config file. + Otherwise raise a `StoreError` + ''' + + server_creds = self._config['auths'].get(server) + if not server_creds: + raise CredentialsNotFound('No matching credentials') + + (username, password) = decode_auth(server_creds['auth']) + + return dict( + Username=username, + Secret=password + ) + + def _write(self): + ''' + Write config back out to disk. + ''' + # Make sure directory exists + dir = os.path.dirname(self._config_path) + if not os.path.exists(dir): + os.makedirs(dir) + # Write config; make sure it has permissions 0x600 + content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8') + f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) + try: + os.write(f, content) + finally: + os.close(f) + + def store(self, server, username, password): + ''' + Add a credentials for `server` to the current configuration. + ''' + + b64auth = base64.b64encode( + to_bytes(username) + b':' + to_bytes(password) + ) + auth = to_text(b64auth) + + # build up the auth structure + new_auth = dict( + auths=dict() + ) + new_auth['auths'][server] = dict( + auth=auth + ) + + self._config.update(new_auth) + self._write() + + def erase(self, server): + ''' + Remove credentials for the given server from the configuration. + ''' + + self._config['auths'].pop(server) + self._write() + + +class LoginManager(DockerBaseClass): + + def __init__(self, client, results): + + super(LoginManager, self).__init__() + + self.client = client + self.results = results + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.registry_url = parameters.get('registry_url') + self.username = parameters.get('username') + self.password = parameters.get('password') + self.email = parameters.get('email') + self.reauthorize = parameters.get('reauthorize') + self.config_path = parameters.get('config_path') + self.state = parameters.get('state') + + def run(self): + ''' + Do the actuall work of this task here. This allows instantiation for partial + testing. + ''' + + if self.state == 'present': + self.login() + else: + self.logout() + + def fail(self, msg): + self.client.fail(msg) + + def login(self): + ''' + Log into the registry with provided username/password. On success update the config + file with the new authorization. + + :return: None + ''' + + if self.email and not re.match(EMAIL_REGEX, self.email): + self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match " + "/%s/" % (EMAIL_REGEX)) + + self.results['actions'].append("Logged into %s" % (self.registry_url)) + self.log("Log into %s with username %s" % (self.registry_url, self.username)) + try: + response = self.client.login( + self.username, + password=self.password, + email=self.email, + registry=self.registry_url, + reauth=self.reauthorize, + dockercfg_path=self.config_path + ) + except Exception as exc: + self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc))) + + # If user is already logged in, then response contains password for user + if 'password' in response: + # This returns correct password if user is logged in and wrong password is given. + # So if it returns another password as we passed, and the user didn't request to + # reauthorize, still do it. + if not self.reauthorize and response['password'] != self.password: + try: + response = self.client.login( + self.username, + password=self.password, + email=self.email, + registry=self.registry_url, + reauth=True, + dockercfg_path=self.config_path + ) + except Exception as exc: + self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc))) + response.pop('password', None) + self.results['login_result'] = response + + self.update_credentials() + + def logout(self): + ''' + Log out of the registry. On success update the config file. + + :return: None + ''' + + # Get the configuration store. + store = self.get_credential_store_instance(self.registry_url, self.config_path) + + try: + current = store.get(self.registry_url) + except CredentialsNotFound: + # get raises an exception on not found. + self.log("Credentials for %s not present, doing nothing." % (self.registry_url)) + self.results['changed'] = False + return + + if not self.check_mode: + store.erase(self.registry_url) + self.results['changed'] = True + + def update_credentials(self): + ''' + If the authorization is not stored attempt to store authorization values via + the appropriate credential helper or to the config file. + + :return: None + ''' + + # Check to see if credentials already exist. + store = self.get_credential_store_instance(self.registry_url, self.config_path) + + try: + current = store.get(self.registry_url) + except CredentialsNotFound: + # get raises an exception on not found. + current = dict( + Username='', + Secret='' + ) + + if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize: + if not self.check_mode: + store.store(self.registry_url, self.username, self.password) + self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url)) + self.results['actions'].append("Wrote credentials to configured helper %s for %s" % ( + store.program, self.registry_url)) + self.results['changed'] = True + + def get_credential_store_instance(self, registry, dockercfg_path): + ''' + Return an instance of docker.credentials.Store used by the given registry. + + :return: A Store or None + :rtype: Union[docker.credentials.Store, NoneType] + ''' + + # Older versions of docker-py don't have this feature. + try: + credstore_env = self.client.credstore_env + except AttributeError: + credstore_env = None + + config = auth.load_config(config_path=dockercfg_path) + + if hasattr(auth, 'get_credential_store'): + store_name = auth.get_credential_store(config, registry) + elif 'credsStore' in config: + store_name = config['credsStore'] + else: + store_name = None + + # Make sure that there is a credential helper before trying to instantiate a + # Store object. + if store_name: + self.log("Found credential store %s" % store_name) + return Store(store_name, environment=credstore_env) + + return DockerFileStore(dockercfg_path) + + +def main(): + + argument_spec = dict( + registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']), + username=dict(type='str'), + password=dict(type='str', no_log=True), + email=dict(type='str', removed_in_version='2.14'), + reauthorize=dict(type='bool', default=False, aliases=['reauth']), + state=dict(type='str', default='present', choices=['present', 'absent']), + config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']), + ) + + required_if = [ + ('state', 'present', ['username', 'password']), + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_api_version='1.20', + ) + + try: + results = dict( + changed=False, + actions=[], + login_result={} + ) + + manager = LoginManager(client, results) + manager.run() + + if 'actions' in results: + del results['actions'] + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_network.py b/plugins/modules/cloud/docker/docker_network.py new file mode 100644 index 0000000000..f3424b6705 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_network.py @@ -0,0 +1,722 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: docker_network +short_description: Manage Docker networks +description: + - Create/remove Docker networks and connect containers to them. + - Performs largely the same function as the "docker network" CLI subcommand. +options: + name: + description: + - Name of the network to operate on. + type: str + required: yes + aliases: + - network_name + + connected: + description: + - List of container names or container IDs to connect to a network. + - Please note that the module only makes sure that these containers are connected to the network, + but does not care about connection options. If you rely on specific IP addresses etc., use the + M(docker_container) module to ensure your containers are correctly connected to this network. + type: list + elements: str + aliases: + - containers + + driver: + description: + - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used. + type: str + default: bridge + + driver_options: + description: + - Dictionary of network settings. Consult docker docs for valid options and values. + type: dict + + force: + description: + - With state C(absent) forces disconnecting all containers from the + network prior to deleting the network. With state C(present) will + disconnect all containers, delete the network and re-create the + network. + - This option is required if you have changed the IPAM or driver options + and want an existing network to be updated to use the new options. + type: bool + default: no + + appends: + description: + - By default the connected list is canonical, meaning containers not on the list are removed from the network. + - Use I(appends) to leave existing containers connected. + type: bool + default: no + aliases: + - incremental + + enable_ipv6: + description: + - Enable IPv6 networking. + type: bool + + ipam_driver: + description: + - Specify an IPAM driver. + type: str + + ipam_driver_options: + description: + - Dictionary of IPAM driver options. + type: dict + + ipam_options: + description: + - Dictionary of IPAM options. + - Deprecated in 2.8, will be removed in 2.12. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM + options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses + the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options) + parameter. + type: dict + suboptions: + subnet: + description: + - IP subset in CIDR notation. + type: str + iprange: + description: + - IP address range in CIDR notation. + type: str + gateway: + description: + - IP gateway address. + type: str + aux_addresses: + description: + - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP. + type: dict + + ipam_config: + description: + - List of IPAM config blocks. Consult + L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values. + Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python). + type: list + elements: dict + suboptions: + subnet: + description: + - IP subset in CIDR notation. + type: str + iprange: + description: + - IP address range in CIDR notation. + type: str + gateway: + description: + - IP gateway address. + type: str + aux_addresses: + description: + - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP. + type: dict + + state: + description: + - C(absent) deletes the network. If a network has connected containers, it + cannot be deleted. Use the I(force) option to disconnect all containers + and delete the network. + - C(present) creates the network, if it does not already exist with the + specified parameters, and connects the list of containers provided via + the connected parameter. Containers not on the list will be disconnected. + An empty list will leave no containers connected to the network. Use the + I(appends) option to leave existing containers connected. Use the I(force) + options to force re-creation of the network. + type: str + default: present + choices: + - absent + - present + + internal: + description: + - Restrict external access to the network. + type: bool + + labels: + description: + - Dictionary of labels. + type: dict + + scope: + description: + - Specify the network's scope. + type: str + choices: + - local + - global + - swarm + + attachable: + description: + - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network. + type: bool + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +notes: + - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network. + It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific + connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the + network, loop the M(docker_container) module to loop over your containers to make sure they are connected properly. + - The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the + network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will + fail as well. + +author: + - "Ben Keith (@keitwb)" + - "Chris Houseknecht (@chouseknecht)" + - "Dave Bendit (@DBendit)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "The docker server >= 1.10.0" +''' + +EXAMPLES = ''' +- name: Create a network + docker_network: + name: network_one + +- name: Remove all but selected list of containers + docker_network: + name: network_one + connected: + - container_a + - container_b + - container_c + +- name: Remove a single container + docker_network: + name: network_one + connected: "{{ fulllist|difference(['container_a']) }}" + +- name: Add a container to a network, leaving existing containers connected + docker_network: + name: network_one + connected: + - container_a + appends: yes + +- name: Create a network with driver options + docker_network: + name: network_two + driver_options: + com.docker.network.bridge.name: net2 + +- name: Create a network with custom IPAM config + docker_network: + name: network_three + ipam_config: + - subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + aux_addresses: + host1: 172.3.27.3 + host2: 172.3.27.4 + +- name: Create a network with labels + docker_network: + name: network_four + labels: + key1: value1 + key2: value2 + +- name: Create a network with IPv6 IPAM config + docker_network: + name: network_ipv6_one + enable_ipv6: yes + ipam_config: + - subnet: fdd1:ac8c:0557:7ce1::/64 + +- name: Create a network with IPv6 and custom IPv4 IPAM config + docker_network: + name: network_ipv6_two + enable_ipv6: yes + ipam_config: + - subnet: 172.4.27.0/24 + - subnet: fdd1:ac8c:0557:7ce2::/64 + +- name: Delete a network, disconnecting all containers + docker_network: + name: network_one + state: absent + force: yes +''' + +RETURN = ''' +network: + description: + - Network inspection results for the affected network. + - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts + are also accessible directly as C(docker_network). Note that the returned fact will be removed in Ansible 2.12. + returned: success + type: dict + sample: {} +''' + +import re +import traceback + +from distutils.version import LooseVersion + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DockerBaseClass, + docker_version, + DifferenceTracker, + clean_dict_booleans_for_docker_api, + RequestException, +) + +try: + from docker import utils + from docker.errors import DockerException + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + from docker.types import IPAMPool, IPAMConfig +except Exception: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +class TaskParameters(DockerBaseClass): + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.name = None + self.connected = None + self.driver = None + self.driver_options = None + self.ipam_driver = None + self.ipam_driver_options = None + self.ipam_options = None + self.ipam_config = None + self.appends = None + self.force = None + self.internal = None + self.labels = None + self.debug = None + self.enable_ipv6 = None + self.scope = None + self.attachable = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + +def container_names_in_network(network): + return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else [] + + +CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$') +CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$') + + +def validate_cidr(cidr): + """Validate CIDR. Return IP version of a CIDR string on success. + + :param cidr: Valid CIDR + :type cidr: str + :return: ``ipv4`` or ``ipv6`` + :rtype: str + :raises ValueError: If ``cidr`` is not a valid CIDR + """ + if CIDR_IPV4.match(cidr): + return 'ipv4' + elif CIDR_IPV6.match(cidr): + return 'ipv6' + raise ValueError('"{0}" is not a valid CIDR'.format(cidr)) + + +def normalize_ipam_config_key(key): + """Normalizes IPAM config keys returned by Docker API to match Ansible keys. + + :param key: Docker API key + :type key: str + :return Ansible module key + :rtype str + """ + special_cases = { + 'AuxiliaryAddresses': 'aux_addresses' + } + return special_cases.get(key, key.lower()) + + +def dicts_are_essentially_equal(a, b): + """Make sure that a is a subset of b, where None entries of a are ignored.""" + for k, v in a.items(): + if v is None: + continue + if b.get(k) != v: + return False + return True + + +class DockerNetworkManager(object): + + def __init__(self, client): + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = { + u'changed': False, + u'actions': [] + } + self.diff = self.client.module._diff + self.diff_tracker = DifferenceTracker() + self.diff_result = dict() + + self.existing_network = self.get_existing_network() + + if not self.parameters.connected and self.existing_network: + self.parameters.connected = container_names_in_network(self.existing_network) + + if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or + self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']): + self.parameters.ipam_config = [self.parameters.ipam_options] + + if self.parameters.ipam_config: + try: + for ipam_config in self.parameters.ipam_config: + validate_cidr(ipam_config['subnet']) + except ValueError as e: + self.client.fail(str(e)) + + if self.parameters.driver_options: + self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options) + + state = self.parameters.state + if state == 'present': + self.present() + elif state == 'absent': + self.absent() + + if self.diff or self.check_mode or self.parameters.debug: + if self.diff: + self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff_result + + def get_existing_network(self): + return self.client.get_network(name=self.parameters.name) + + def has_different_config(self, net): + ''' + Evaluates an existing network and returns a tuple containing a boolean + indicating if the configuration is different and a list of differences. + + :param net: the inspection output for an existing network + :return: (bool, list) + ''' + differences = DifferenceTracker() + if self.parameters.driver and self.parameters.driver != net['Driver']: + differences.add('driver', + parameter=self.parameters.driver, + active=net['Driver']) + if self.parameters.driver_options: + if not net.get('Options'): + differences.add('driver_options', + parameter=self.parameters.driver_options, + active=net.get('Options')) + else: + for key, value in self.parameters.driver_options.items(): + if not (key in net['Options']) or value != net['Options'][key]: + differences.add('driver_options.%s' % key, + parameter=value, + active=net['Options'].get(key)) + + if self.parameters.ipam_driver: + if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver: + differences.add('ipam_driver', + parameter=self.parameters.ipam_driver, + active=net.get('IPAM')) + + if self.parameters.ipam_driver_options is not None: + ipam_driver_options = net['IPAM'].get('Options') or {} + if ipam_driver_options != self.parameters.ipam_driver_options: + differences.add('ipam_driver_options', + parameter=self.parameters.ipam_driver_options, + active=ipam_driver_options) + + if self.parameters.ipam_config is not None and self.parameters.ipam_config: + if not net.get('IPAM') or not net['IPAM']['Config']: + differences.add('ipam_config', + parameter=self.parameters.ipam_config, + active=net.get('IPAM', {}).get('Config')) + else: + # Put network's IPAM config into the same format as module's IPAM config + net_ipam_configs = [] + for net_ipam_config in net['IPAM']['Config']: + config = dict() + for k, v in net_ipam_config.items(): + config[normalize_ipam_config_key(k)] = v + net_ipam_configs.append(config) + # Compare lists of dicts as sets of dicts + for idx, ipam_config in enumerate(self.parameters.ipam_config): + net_config = dict() + for net_ipam_config in net_ipam_configs: + if dicts_are_essentially_equal(ipam_config, net_ipam_config): + net_config = net_ipam_config + break + for key, value in ipam_config.items(): + if value is None: + # due to recursive argument_spec, all keys are always present + # (but have default value None if not specified) + continue + if value != net_config.get(key): + differences.add('ipam_config[%s].%s' % (idx, key), + parameter=value, + active=net_config.get(key)) + + if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False): + differences.add('enable_ipv6', + parameter=self.parameters.enable_ipv6, + active=net.get('EnableIPv6', False)) + + if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False): + differences.add('internal', + parameter=self.parameters.internal, + active=net.get('Internal')) + + if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'): + differences.add('scope', + parameter=self.parameters.scope, + active=net.get('Scope')) + + if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False): + differences.add('attachable', + parameter=self.parameters.attachable, + active=net.get('Attachable')) + if self.parameters.labels: + if not net.get('Labels'): + differences.add('labels', + parameter=self.parameters.labels, + active=net.get('Labels')) + else: + for key, value in self.parameters.labels.items(): + if not (key in net['Labels']) or value != net['Labels'][key]: + differences.add('labels.%s' % key, + parameter=value, + active=net['Labels'].get(key)) + + return not differences.empty, differences + + def create_network(self): + if not self.existing_network: + params = dict( + driver=self.parameters.driver, + options=self.parameters.driver_options, + ) + + ipam_pools = [] + if self.parameters.ipam_config: + for ipam_pool in self.parameters.ipam_config: + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + ipam_pools.append(IPAMPool(**ipam_pool)) + else: + ipam_pools.append(utils.create_ipam_pool(**ipam_pool)) + + if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools: + # Only add ipam parameter if a driver was specified or if IPAM parameters + # were specified. Leaving this parameter away can significantly speed up + # creation; on my machine creation with this option needs ~15 seconds, + # and without just a few seconds. + if LooseVersion(docker_version) >= LooseVersion('2.0.0'): + params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver, + pool_configs=ipam_pools, + options=self.parameters.ipam_driver_options) + else: + params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver, + pool_configs=ipam_pools) + + if self.parameters.enable_ipv6 is not None: + params['enable_ipv6'] = self.parameters.enable_ipv6 + if self.parameters.internal is not None: + params['internal'] = self.parameters.internal + if self.parameters.scope is not None: + params['scope'] = self.parameters.scope + if self.parameters.attachable is not None: + params['attachable'] = self.parameters.attachable + if self.parameters.labels: + params['labels'] = self.parameters.labels + + if not self.check_mode: + resp = self.client.create_network(self.parameters.name, **params) + self.client.report_warnings(resp, ['Warning']) + self.existing_network = self.client.get_network(network_id=resp['Id']) + self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver)) + self.results['changed'] = True + + def remove_network(self): + if self.existing_network: + self.disconnect_all_containers() + if not self.check_mode: + self.client.remove_network(self.parameters.name) + self.results['actions'].append("Removed network %s" % (self.parameters.name,)) + self.results['changed'] = True + + def is_container_connected(self, container_name): + if not self.existing_network: + return False + return container_name in container_names_in_network(self.existing_network) + + def connect_containers(self): + for name in self.parameters.connected: + if not self.is_container_connected(name): + if not self.check_mode: + self.client.connect_container_to_network(name, self.parameters.name) + self.results['actions'].append("Connected container %s" % (name,)) + self.results['changed'] = True + self.diff_tracker.add('connected.{0}'.format(name), + parameter=True, + active=False) + + def disconnect_missing(self): + if not self.existing_network: + return + containers = self.existing_network['Containers'] + if not containers: + return + for c in containers.values(): + name = c['Name'] + if name not in self.parameters.connected: + self.disconnect_container(name) + + def disconnect_all_containers(self): + containers = self.client.get_network(name=self.parameters.name)['Containers'] + if not containers: + return + for cont in containers.values(): + self.disconnect_container(cont['Name']) + + def disconnect_container(self, container_name): + if not self.check_mode: + self.client.disconnect_container_from_network(container_name, self.parameters.name) + self.results['actions'].append("Disconnected container %s" % (container_name,)) + self.results['changed'] = True + self.diff_tracker.add('connected.{0}'.format(container_name), + parameter=False, + active=True) + + def present(self): + different = False + differences = DifferenceTracker() + if self.existing_network: + different, differences = self.has_different_config(self.existing_network) + + self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None) + if self.parameters.force or different: + self.remove_network() + self.existing_network = None + + self.create_network() + self.connect_containers() + if not self.parameters.appends: + self.disconnect_missing() + + if self.diff or self.check_mode or self.parameters.debug: + self.diff_result['differences'] = differences.get_legacy_docker_diffs() + self.diff_tracker.merge(differences) + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + network_facts = self.get_existing_network() + self.results['ansible_facts'] = {u'docker_network': network_facts} + self.results['network'] = network_facts + + def absent(self): + self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None) + self.remove_network() + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True, aliases=['network_name']), + connected=dict(type='list', default=[], elements='str', aliases=['containers']), + state=dict(type='str', default='present', choices=['present', 'absent']), + driver=dict(type='str', default='bridge'), + driver_options=dict(type='dict', default={}), + force=dict(type='bool', default=False), + appends=dict(type='bool', default=False, aliases=['incremental']), + ipam_driver=dict(type='str'), + ipam_driver_options=dict(type='dict'), + ipam_options=dict(type='dict', default={}, options=dict( + subnet=dict(type='str'), + iprange=dict(type='str'), + gateway=dict(type='str'), + aux_addresses=dict(type='dict'), + ), removed_in_version='2.12'), + ipam_config=dict(type='list', elements='dict', options=dict( + subnet=dict(type='str'), + iprange=dict(type='str'), + gateway=dict(type='str'), + aux_addresses=dict(type='dict'), + )), + enable_ipv6=dict(type='bool'), + internal=dict(type='bool'), + labels=dict(type='dict', default={}), + debug=dict(type='bool', default=False), + scope=dict(type='str', choices=['local', 'global', 'swarm']), + attachable=dict(type='bool'), + ) + + mutually_exclusive = [ + ('ipam_config', 'ipam_options') + ] + + option_minimal_versions = dict( + scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'), + labels=dict(docker_api_version='1.23'), + ipam_driver_options=dict(docker_py_version='2.0.0'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.22', + # "The docker server >= 1.10.0" + option_minimal_versions=option_minimal_versions, + ) + + try: + cm = DockerNetworkManager(client) + client.module.exit_json(**cm.results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_network_info.py b/plugins/modules/cloud/docker/docker_network_info.py new file mode 100644 index 0000000000..6e9f19eeb1 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_network_info.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_network_info + +short_description: Retrieves facts about docker network + +description: + - Retrieves facts about a docker network. + - Essentially returns the output of C(docker network inspect ), similar to what M(docker_network) + returns for a non-absent network. + + +options: + name: + description: + - The name of the network to inspect. + - When identifying an existing network name may be a name or a long or short network ID. + type: str + required: yes +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - "Dave Bendit (@DBendit)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.21" +''' + +EXAMPLES = ''' +- name: Get infos on network + docker_network_info: + name: mydata + register: result + +- name: Does network exist? + debug: + msg: "The network {{ 'exists' if result.exists else 'does not exist' }}" + +- name: Print information about network + debug: + var: result.network + when: result.exists +''' + +RETURN = ''' +exists: + description: + - Returns whether the network exists. + type: bool + returned: always + sample: true +network: + description: + - Facts representing the current state of the network. Matches the docker inspection output. + - Will be C(none) if network does not exist. + returned: always + type: dict + sample: '{ + "Attachable": false, + "ConfigFrom": { + "Network": "" + }, + "ConfigOnly": false, + "Containers": {}, + "Created": "2018-12-07T01:47:51.250835114-06:00", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Config": [ + { + "Gateway": "192.168.96.1", + "Subnet": "192.168.96.0/20" + } + ], + "Driver": "default", + "Options": null + }, + "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a", + "Ingress": false, + "Internal": false, + "Labels": {}, + "Name": "ansible-test-f2700bba", + "Options": {}, + "Scope": "local" + }' +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + RequestException, +) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_api_version='1.21', + ) + + try: + network = client.get_network(client.module.params['name']) + + client.module.exit_json( + changed=False, + exists=(True if network else False), + network=network, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_node.py b/plugins/modules/cloud/docker/docker_node.py new file mode 100644 index 0000000000..a3b472b5db --- /dev/null +++ b/plugins/modules/cloud/docker/docker_node.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: docker_node +short_description: Manage Docker Swarm node +description: + - Manages the Docker nodes via Swarm Manager. + - This module allows to change the node's role, its availability, and to modify, add or remove node labels. +options: + hostname: + description: + - The hostname or ID of node as registered in Swarm. + - If more than one node is registered using the same hostname the ID must be used, + otherwise module will fail. + type: str + required: yes + labels: + description: + - User-defined key/value metadata that will be assigned as node attribute. + - Label operations in this module apply to the docker swarm node specified by I(hostname). + Use M(docker_swarm) module to add/modify/remove swarm cluster labels. + - The actual state of labels assigned to the node when module completes its work depends on + I(labels_state) and I(labels_to_remove) parameters values. See description below. + type: dict + labels_state: + description: + - It defines the operation on the labels assigned to node and labels specified in I(labels) option. + - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node. + If no labels are assigned then it will add listed labels. For labels that are already assigned + to the node, it will update their values. The labels not specified in I(labels) will remain unchanged. + If I(labels) is empty then no changes will be made. + - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then + all labels assigned to the node will be removed. + type: str + default: 'merge' + choices: + - merge + - replace + labels_to_remove: + description: + - List of labels that will be removed from the node configuration. The list has to contain only label + names, not their values. + - If the label provided on the list is not assigned to the node, the entry is ignored. + - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains + assigned to the node. + - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to + node are removed and I(labels_to_remove) is ignored. + type: list + elements: str + availability: + description: Node availability to assign. If not provided then node availability remains unchanged. + choices: + - active + - pause + - drain + type: str + role: + description: Node role to assign. If not provided then node role remains unchanged. + choices: + - manager + - worker + type: str +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0" + - Docker API >= 1.25 +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + - Thierry Bouvet (@tbouvet) + +''' + +EXAMPLES = ''' +- name: Set node role + docker_node: + hostname: mynode + role: manager + +- name: Set node availability + docker_node: + hostname: mynode + availability: drain + +- name: Replace node labels with new labels + docker_node: + hostname: mynode + labels: + key: value + labels_state: replace + +- name: Merge node labels and new labels + docker_node: + hostname: mynode + labels: + key: value + +- name: Remove all labels assigned to node + docker_node: + hostname: mynode + labels_state: replace + +- name: Remove selected labels from the node + docker_node: + hostname: mynode + labels_to_remove: + - key1 + - key2 +''' + +RETURN = ''' +node: + description: Information about node after 'update' operation + returned: success + type: dict + +''' + +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + DockerBaseClass, + RequestException, +) + +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient + + +class TaskParameters(DockerBaseClass): + def __init__(self, client): + super(TaskParameters, self).__init__() + + # Spec + self.name = None + self.labels = None + self.labels_state = None + self.labels_to_remove = None + + # Node + self.availability = None + self.role = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + +class SwarmNodeManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SwarmNodeManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + + self.client.fail_task_if_not_swarm_manager() + + self.parameters = TaskParameters(client) + + self.node_update() + + def node_update(self): + if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)): + self.client.fail("This node is not part of a swarm.") + return + + if self.client.check_if_swarm_node_is_down(): + self.client.fail("Can not update the node. The node is down.") + + try: + node_info = self.client.inspect_node(node_id=self.parameters.hostname) + except APIError as exc: + self.client.fail("Failed to get node information for %s" % to_native(exc)) + + changed = False + node_spec = dict( + Availability=self.parameters.availability, + Role=self.parameters.role, + Labels=self.parameters.labels, + ) + + if self.parameters.role is None: + node_spec['Role'] = node_info['Spec']['Role'] + else: + if not node_info['Spec']['Role'] == self.parameters.role: + node_spec['Role'] = self.parameters.role + changed = True + + if self.parameters.availability is None: + node_spec['Availability'] = node_info['Spec']['Availability'] + else: + if not node_info['Spec']['Availability'] == self.parameters.availability: + node_info['Spec']['Availability'] = self.parameters.availability + changed = True + + if self.parameters.labels_state == 'replace': + if self.parameters.labels is None: + node_spec['Labels'] = {} + if node_info['Spec']['Labels']: + changed = True + else: + if (node_info['Spec']['Labels'] or {}) != self.parameters.labels: + node_spec['Labels'] = self.parameters.labels + changed = True + elif self.parameters.labels_state == 'merge': + node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {}) + if self.parameters.labels is not None: + for key, value in self.parameters.labels.items(): + if node_spec['Labels'].get(key) != value: + node_spec['Labels'][key] = value + changed = True + + if self.parameters.labels_to_remove is not None: + for key in self.parameters.labels_to_remove: + if self.parameters.labels is not None: + if not self.parameters.labels.get(key): + if node_spec['Labels'].get(key): + node_spec['Labels'].pop(key) + changed = True + else: + self.client.module.warn( + "Label '%s' listed both in 'labels' and 'labels_to_remove'. " + "Keeping the assigned label value." + % to_native(key)) + else: + if node_spec['Labels'].get(key): + node_spec['Labels'].pop(key) + changed = True + + if changed is True: + if not self.check_mode: + try: + self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'], + node_spec=node_spec) + except APIError as exc: + self.client.fail("Failed to update node : %s" % to_native(exc)) + self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID']) + self.results['changed'] = changed + else: + self.results['node'] = node_info + self.results['changed'] = changed + + +def main(): + argument_spec = dict( + hostname=dict(type='str', required=True), + labels=dict(type='dict'), + labels_state=dict(type='str', default='merge', choices=['merge', 'replace']), + labels_to_remove=dict(type='list', elements='str'), + availability=dict(type='str', choices=['active', 'pause', 'drain']), + role=dict(type='str', choices=['worker', 'manager']), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='2.4.0', + min_docker_api_version='1.25', + ) + + try: + results = dict( + changed=False, + ) + + SwarmNodeManager(client, results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_node_info.py b/plugins/modules/cloud/docker/docker_node_info.py new file mode 100644 index 0000000000..5bb027fbaa --- /dev/null +++ b/plugins/modules/cloud/docker/docker_node_info.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: docker_node_info + +short_description: Retrieves facts about docker swarm node from Swarm Manager + +description: + - Retrieves facts about a docker node. + - Essentially returns the output of C(docker node inspect ). + - Must be executed on a host running as Swarm Manager, otherwise the module will fail. + + +options: + name: + description: + - The name of the node to inspect. + - The list of nodes names to inspect. + - If empty then return information of all nodes in Swarm cluster. + - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID. + - If I(self) is C(true) then this parameter is ignored. + type: list + elements: str + self: + description: + - If C(true), queries the node (i.e. the docker daemon) the module communicates with. + - If C(true) then I(name) is ignored. + - If C(false) then query depends on I(name) presence and value. + type: bool + default: no +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0" + - "Docker API >= 1.24" +''' + +EXAMPLES = ''' +- name: Get info on all nodes + docker_node_info: + register: result + +- name: Get info on node + docker_node_info: + name: mynode + register: result + +- name: Get info on list of nodes + docker_node_info: + name: + - mynode1 + - mynode2 + register: result + +- name: Get info on host if it is Swarm Manager + docker_node_info: + self: true + register: result +''' + +RETURN = ''' +nodes: + description: + - Facts representing the current state of the nodes. Matches the C(docker node inspect) output. + - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided. + - If I(name) contains a list of nodes, the output will provide information on all nodes registered + at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm + managers and nodes that are unreachable. + returned: always + type: list + elements: dict +''' + +import traceback + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + RequestException, +) +from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +def get_node_facts(client): + + results = [] + + if client.module.params['self'] is True: + self_node_id = client.get_swarm_node_id() + node_info = client.get_node_inspect(node_id=self_node_id) + results.append(node_info) + return results + + if client.module.params['name'] is None: + node_info = client.get_all_nodes_inspect() + return node_info + + nodes = client.module.params['name'] + if not isinstance(nodes, list): + nodes = [nodes] + + for next_node_name in nodes: + next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True) + if next_node_info: + results.append(next_node_info) + return results + + +def main(): + argument_spec = dict( + name=dict(type='list', elements='str'), + self=dict(type='bool', default=False), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='2.4.0', + min_docker_api_version='1.24', + ) + + client.fail_task_if_not_swarm_manager() + + try: + nodes = get_node_facts(client) + + client.module.exit_json( + changed=False, + nodes=nodes, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_prune.py b/plugins/modules/cloud/docker/docker_prune.py new file mode 100644 index 0000000000..dfd2f37b69 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_prune.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_prune + +short_description: Allows to prune various docker objects + +description: + - Allows to run C(docker container prune), C(docker image prune), C(docker network prune) + and C(docker volume prune) via the Docker API. + + +options: + containers: + description: + - Whether to prune containers. + type: bool + default: no + containers_filters: + description: + - A dictionary of filter values used for selecting containers to delete. + - "For example, C(until: 24h)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) + for more information on possible filters. + type: dict + images: + description: + - Whether to prune images. + type: bool + default: no + images_filters: + description: + - A dictionary of filter values used for selecting images to delete. + - "For example, C(dangling: true)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) + for more information on possible filters. + type: dict + networks: + description: + - Whether to prune networks. + type: bool + default: no + networks_filters: + description: + - A dictionary of filter values used for selecting networks to delete. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) + for more information on possible filters. + type: dict + volumes: + description: + - Whether to prune volumes. + type: bool + default: no + volumes_filters: + description: + - A dictionary of filter values used for selecting volumes to delete. + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) + for more information on possible filters. + type: dict + builder_cache: + description: + - Whether to prune the builder cache. + - Requires version 3.3.0 of the Docker SDK for Python or newer. + type: bool + default: no + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_2_documentation + + +author: + - "Felix Fontein (@felixfontein)" + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0" + - "Docker API >= 1.25" +''' + +EXAMPLES = ''' +- name: Prune containers older than 24h + docker_prune: + containers: yes + containers_filters: + # only consider containers created more than 24 hours ago + until: 24h + +- name: Prune everything + docker_prune: + containers: yes + images: yes + networks: yes + volumes: yes + builder_cache: yes + +- name: Prune everything (including non-dangling images) + docker_prune: + containers: yes + images: yes + images_filters: + dangling: false + networks: yes + volumes: yes + builder_cache: yes +''' + +RETURN = ''' +# containers +containers: + description: + - List of IDs of deleted containers. + returned: I(containers) is C(true) + type: list + elements: str + sample: '[]' +containers_space_reclaimed: + description: + - Amount of reclaimed disk space from container pruning in bytes. + returned: I(containers) is C(true) + type: int + sample: '0' + +# images +images: + description: + - List of IDs of deleted images. + returned: I(images) is C(true) + type: list + elements: str + sample: '[]' +images_space_reclaimed: + description: + - Amount of reclaimed disk space from image pruning in bytes. + returned: I(images) is C(true) + type: int + sample: '0' + +# networks +networks: + description: + - List of IDs of deleted networks. + returned: I(networks) is C(true) + type: list + elements: str + sample: '[]' + +# volumes +volumes: + description: + - List of IDs of deleted volumes. + returned: I(volumes) is C(true) + type: list + elements: str + sample: '[]' +volumes_space_reclaimed: + description: + - Amount of reclaimed disk space from volumes pruning in bytes. + returned: I(volumes) is C(true) + type: int + sample: '0' + +# builder_cache +builder_cache_space_reclaimed: + description: + - Amount of reclaimed disk space from builder cache pruning in bytes. + returned: I(builder_cache) is C(true) + type: int + sample: '0' +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from distutils.version import LooseVersion + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + RequestException, +) + +try: + from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version, clean_dict_booleans_for_docker_api +except Exception as dummy: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +def main(): + argument_spec = dict( + containers=dict(type='bool', default=False), + containers_filters=dict(type='dict'), + images=dict(type='bool', default=False), + images_filters=dict(type='dict'), + networks=dict(type='bool', default=False), + networks_filters=dict(type='dict'), + volumes=dict(type='bool', default=False), + volumes_filters=dict(type='dict'), + builder_cache=dict(type='bool', default=False), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + # supports_check_mode=True, + min_docker_api_version='1.25', + min_docker_version='2.1.0', + ) + + # Version checks + cache_min_version = '3.3.0' + if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version): + msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade." + client.fail(msg % (docker_version, cache_min_version)) + + try: + result = dict() + + if client.module.params['containers']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters')) + res = client.prune_containers(filters=filters) + result['containers'] = res.get('ContainersDeleted') or [] + result['containers_space_reclaimed'] = res['SpaceReclaimed'] + + if client.module.params['images']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters')) + res = client.prune_images(filters=filters) + result['images'] = res.get('ImagesDeleted') or [] + result['images_space_reclaimed'] = res['SpaceReclaimed'] + + if client.module.params['networks']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters')) + res = client.prune_networks(filters=filters) + result['networks'] = res.get('NetworksDeleted') or [] + + if client.module.params['volumes']: + filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters')) + res = client.prune_volumes(filters=filters) + result['volumes'] = res.get('VolumesDeleted') or [] + result['volumes_space_reclaimed'] = res['SpaceReclaimed'] + + if client.module.params['builder_cache']: + res = client.prune_builds() + result['builder_cache_space_reclaimed'] = res['SpaceReclaimed'] + + client.module.exit_json(**result) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_secret.py b/plugins/modules/cloud/docker/docker_secret.py new file mode 100644 index 0000000000..682be58941 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_secret.py @@ -0,0 +1,304 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: docker_secret + +short_description: Manage docker secrets. + + +description: + - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm). + - Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used + in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated + unless the I(force) option is set. + - Updates to secrets are performed by removing the secret and creating it again. +options: + data: + description: + - The value of the secret. Required when state is C(present). + type: str + data_is_b64: + description: + - If set to C(true), the data is assumed to be Base64 encoded and will be + decoded before being used. + - To use binary I(data), it is better to keep it Base64 encoded and let it + be decoded by this option. + type: bool + default: no + labels: + description: + - "A map of key:value meta data, where both key and value are expected to be strings." + - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again. + type: dict + force: + description: + - Use with state C(present) to always remove and recreate an existing secret. + - If C(true), an existing secret will be replaced, even if it has not changed. + type: bool + default: no + name: + description: + - The name of the secret. + type: str + required: yes + state: + description: + - Set to C(present), if the secret should exist, and C(absent), if it should not. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_2_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0" + - "Docker API >= 1.25" + +author: + - Chris Houseknecht (@chouseknecht) +''' + +EXAMPLES = ''' + +- name: Create secret foo (from a file on the control machine) + docker_secret: + name: foo + # If the file is JSON or binary, Ansible might modify it (because + # it is first decoded and later re-encoded). Base64-encoding the + # file directly after reading it prevents this to happen. + data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}" + data_is_b64: true + state: present + +- name: Change the secret data + docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + state: present + +- name: Add a new label + docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Adding a new label will cause a remove/create of the secret + two: '2' + state: present + +- name: No change + docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: baz + one: '1' + # Even though 'two' is missing, there is no change to the existing secret + state: present + +- name: Update an existing label + docker_secret: + name: foo + data: Goodnight everyone! + labels: + bar: monkey # Changing a label will cause a remove/create of the secret + one: '1' + state: present + +- name: Force the removal/creation of the secret + docker_secret: + name: foo + data: Goodnight everyone! + force: yes + state: present + +- name: Remove secret foo + docker_secret: + name: foo + state: absent +''' + +RETURN = ''' +secret_id: + description: + - The ID assigned by Docker to the secret object. + returned: success and I(state) is C(present) + type: str + sample: 'hzehrmyjigmcp2gb6nlhmjqcv' +''' + +import base64 +import hashlib +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DockerBaseClass, + compare_generic, + RequestException, +) +from ansible.module_utils._text import to_native, to_bytes + + +class SecretManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SecretManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + + parameters = self.client.module.params + self.name = parameters.get('name') + self.state = parameters.get('state') + self.data = parameters.get('data') + if self.data is not None: + if parameters.get('data_is_b64'): + self.data = base64.b64decode(self.data) + else: + self.data = to_bytes(self.data) + self.labels = parameters.get('labels') + self.force = parameters.get('force') + self.data_key = None + + def __call__(self): + if self.state == 'present': + self.data_key = hashlib.sha224(self.data).hexdigest() + self.present() + elif self.state == 'absent': + self.absent() + + def get_secret(self): + ''' Find an existing secret. ''' + try: + secrets = self.client.secrets(filters={'name': self.name}) + except APIError as exc: + self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc))) + + for secret in secrets: + if secret['Spec']['Name'] == self.name: + return secret + return None + + def create_secret(self): + ''' Create a new secret ''' + secret_id = None + # We can't see the data after creation, so adding a label we can use for idempotency check + labels = { + 'ansible_key': self.data_key + } + if self.labels: + labels.update(self.labels) + + try: + if not self.check_mode: + secret_id = self.client.create_secret(self.name, self.data, labels=labels) + except APIError as exc: + self.client.fail("Error creating secret: %s" % to_native(exc)) + + if isinstance(secret_id, dict): + secret_id = secret_id['ID'] + + return secret_id + + def present(self): + ''' Handles state == 'present', creating or updating the secret ''' + secret = self.get_secret() + if secret: + self.results['secret_id'] = secret['ID'] + data_changed = False + attrs = secret.get('Spec', {}) + if attrs.get('Labels', {}).get('ansible_key'): + if attrs['Labels']['ansible_key'] != self.data_key: + data_changed = True + labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict') + if data_changed or labels_changed or self.force: + # if something changed or force, delete and re-create the secret + self.absent() + secret_id = self.create_secret() + self.results['changed'] = True + self.results['secret_id'] = secret_id + else: + self.results['changed'] = True + self.results['secret_id'] = self.create_secret() + + def absent(self): + ''' Handles state == 'absent', removing the secret ''' + secret = self.get_secret() + if secret: + try: + if not self.check_mode: + self.client.remove_secret(secret['ID']) + except APIError as exc: + self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc))) + self.results['changed'] = True + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + data=dict(type='str', no_log=True), + data_is_b64=dict(type='bool', default=False), + labels=dict(type='dict'), + force=dict(type='bool', default=False) + ) + + required_if = [ + ('state', 'present', ['data']) + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='2.1.0', + min_docker_api_version='1.25', + ) + + try: + results = dict( + changed=False, + secret_id='' + ) + + SecretManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_service.py b/plugins/modules/cloud/docker/docker_service.py new file mode 120000 index 0000000000..d8792e0ab5 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_service.py @@ -0,0 +1 @@ +docker_compose.py \ No newline at end of file diff --git a/plugins/modules/cloud/docker/docker_stack.py b/plugins/modules/cloud/docker/docker_stack.py new file mode 100644 index 0000000000..4a8c50ccd2 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_stack.py @@ -0,0 +1,312 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: docker_stack +author: "Dario Zanzico (@dariko)" +short_description: docker stack module +description: + - Manage docker stacks using the 'docker stack' command + on the target node (see examples). +options: + name: + description: + - Stack name + type: str + required: yes + state: + description: + - Service state. + type: str + default: "present" + choices: + - present + - absent + compose: + description: + - List of compose definitions. Any element may be a string + referring to the path of the compose file on the target host + or the YAML contents of a compose file nested as dictionary. + type: list + # elements: raw + default: [] + prune: + description: + - If true will add the C(--prune) option to the C(docker stack deploy) command. + This will have docker remove the services not present in the + current stack definition. + type: bool + default: no + with_registry_auth: + description: + - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command. + This will have docker send registry authentication details to Swarm agents. + type: bool + default: no + resolve_image: + description: + - If set will add the C(--resolve-image) option to the C(docker stack deploy) command. + This will have docker query the registry to resolve image digest and + supported platforms. If not set, docker use "always" by default. + type: str + choices: ["always", "changed", "never"] + absent_retries: + description: + - If C(>0) and I(state) is C(absent) the module will retry up to + I(absent_retries) times to delete the stack until all the + resources have been effectively deleted. + If the last try still reports the stack as not completely + removed the module will fail. + type: int + default: 0 + absent_retries_interval: + description: + - Interval in seconds between consecutive I(absent_retries). + type: int + default: 1 + +requirements: + - jsondiff + - pyyaml + +notes: + - Return values I(out) and I(err) have been deprecated and will be removed in Ansible 2.14. Use I(stdout) and I(stderr) instead. +''' + +RETURN = ''' +stack_spec_diff: + description: | + dictionary containing the differences between the 'Spec' field + of the stack services before and after applying the new stack + definition. + sample: > + "stack_spec_diff": + {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}} + returned: on change + type: dict +''' + +EXAMPLES = ''' + - name: Deploy stack from a compose file + docker_stack: + state: present + name: mystack + compose: + - /opt/docker-compose.yml + + - name: Deploy stack from base compose file and override the web service + docker_stack: + state: present + name: mystack + compose: + - /opt/docker-compose.yml + - version: '3' + services: + web: + image: nginx:latest + environment: + ENVVAR: envvar + + - name: Remove stack + docker_stack: + name: mystack + state: absent +''' + + +import json +import tempfile +from ansible.module_utils.six import string_types +from time import sleep + +try: + from jsondiff import diff as json_diff + HAS_JSONDIFF = True +except ImportError: + HAS_JSONDIFF = False + +try: + from yaml import dump as yaml_dump + HAS_YAML = True +except ImportError: + HAS_YAML = False + +from ansible.module_utils.basic import AnsibleModule, os + + +def docker_stack_services(module, stack_name): + docker_bin = module.get_bin_path('docker', required=True) + rc, out, err = module.run_command([docker_bin, + "stack", + "services", + stack_name, + "--format", + "{{.Name}}"]) + if err == "Nothing found in stack: %s\n" % stack_name: + return [] + return out.strip().split('\n') + + +def docker_service_inspect(module, service_name): + docker_bin = module.get_bin_path('docker', required=True) + rc, out, err = module.run_command([docker_bin, + "service", + "inspect", + service_name]) + if rc != 0: + return None + else: + ret = json.loads(out)[0]['Spec'] + return ret + + +def docker_stack_deploy(module, stack_name, compose_files): + docker_bin = module.get_bin_path('docker', required=True) + command = [docker_bin, "stack", "deploy"] + if module.params["prune"]: + command += ["--prune"] + if module.params["with_registry_auth"]: + command += ["--with-registry-auth"] + if module.params["resolve_image"]: + command += ["--resolve-image", + module.params["resolve_image"]] + for compose_file in compose_files: + command += ["--compose-file", + compose_file] + command += [stack_name] + return module.run_command(command) + + +def docker_stack_inspect(module, stack_name): + ret = {} + for service_name in docker_stack_services(module, stack_name): + ret[service_name] = docker_service_inspect(module, service_name) + return ret + + +def docker_stack_rm(module, stack_name, retries, interval): + docker_bin = module.get_bin_path('docker', required=True) + command = [docker_bin, "stack", "rm", stack_name] + + rc, out, err = module.run_command(command) + + while err != "Nothing found in stack: %s\n" % stack_name and retries > 0: + sleep(interval) + retries = retries - 1 + rc, out, err = module.run_command(command) + return rc, out, err + + +def main(): + module = AnsibleModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'compose': dict(type='list', elements='raw', default=[]), + 'prune': dict(type='bool', default=False), + 'with_registry_auth': dict(type='bool', default=False), + 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']), + 'state': dict(type='str', default='present', choices=['present', 'absent']), + 'absent_retries': dict(type='int', default=0), + 'absent_retries_interval': dict(type='int', default=1) + }, + supports_check_mode=False + ) + + if not HAS_JSONDIFF: + return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'") + + if not HAS_YAML: + return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'") + + state = module.params['state'] + compose = module.params['compose'] + name = module.params['name'] + absent_retries = module.params['absent_retries'] + absent_retries_interval = module.params['absent_retries_interval'] + + if state == 'present': + if not compose: + module.fail_json(msg=("compose parameter must be a list " + "containing at least one element")) + + compose_files = [] + for i, compose_def in enumerate(compose): + if isinstance(compose_def, dict): + compose_file_fd, compose_file = tempfile.mkstemp() + module.add_cleanup_file(compose_file) + with os.fdopen(compose_file_fd, 'w') as stack_file: + compose_files.append(compose_file) + stack_file.write(yaml_dump(compose_def)) + elif isinstance(compose_def, string_types): + compose_files.append(compose_def) + else: + module.fail_json(msg="compose element '%s' must be a " + + "string or a dictionary" % compose_def) + + before_stack_services = docker_stack_inspect(module, name) + + rc, out, err = docker_stack_deploy(module, name, compose_files) + + after_stack_services = docker_stack_inspect(module, name) + + if rc != 0: + module.fail_json(msg="docker stack up deploy command failed", + rc=rc, + out=out, err=err, # Deprecated + stdout=out, stderr=err) + + before_after_differences = json_diff(before_stack_services, + after_stack_services) + for k in before_after_differences.keys(): + if isinstance(before_after_differences[k], dict): + before_after_differences[k].pop('UpdatedAt', None) + before_after_differences[k].pop('Version', None) + if not list(before_after_differences[k].keys()): + before_after_differences.pop(k) + + if not before_after_differences: + module.exit_json( + changed=False, + rc=rc, + stdout=out, + stderr=err) + else: + module.exit_json( + changed=True, + rc=rc, + stdout=out, + stderr=err, + stack_spec_diff=json_diff(before_stack_services, + after_stack_services, + dump=True)) + + else: + if docker_stack_services(module, name): + rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval) + if rc != 0: + module.fail_json(msg="'docker stack down' command failed", + rc=rc, + out=out, err=err, # Deprecated + stdout=out, stderr=err) + else: + module.exit_json(changed=True, + msg=out, rc=rc, + err=err, # Deprecated + stdout=out, stderr=err) + module.exit_json(changed=False) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/docker/docker_swarm.py b/plugins/modules/cloud/docker/docker_swarm.py new file mode 100644 index 0000000000..71436b073f --- /dev/null +++ b/plugins/modules/cloud/docker/docker_swarm.py @@ -0,0 +1,679 @@ +#!/usr/bin/python + +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: docker_swarm +short_description: Manage Swarm cluster +description: + - Create a new Swarm cluster. + - Add/Remove nodes or managers to an existing cluster. +options: + advertise_addr: + description: + - Externally reachable address advertised to other nodes. + - This can either be an address/port combination + in the form C(192.168.1.1:4567), or an interface followed by a + port number, like C(eth0:4567). + - If the port number is omitted, + the port number from the listen address is used. + - If I(advertise_addr) is not specified, it will be automatically + detected when possible. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default_addr_pool: + description: + - Default address pool in CIDR format. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: list + elements: str + subnet_size: + description: + - Default address pool subnet mask length. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: int + listen_addr: + description: + - Listen address used for inter-manager communication. + - This can either be an address/port combination in the form + C(192.168.1.1:4567), or an interface followed by a port number, + like C(eth0:4567). + - If the port number is omitted, the default swarm listening port + is used. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default: 0.0.0.0:2377 + force: + description: + - Use with state C(present) to force creating a new Swarm, even if already part of one. + - Use with state C(absent) to Leave the swarm even if this node is a manager. + type: bool + default: no + state: + description: + - Set to C(present), to create/update a new cluster. + - Set to C(join), to join an existing cluster. + - Set to C(absent), to leave an existing cluster. + - Set to C(remove), to remove an absent node from the cluster. + Note that removing requires Docker SDK for Python >= 2.4.0. + - Set to C(inspect) to display swarm informations. + type: str + default: present + choices: + - present + - join + - absent + - remove + - inspect + node_id: + description: + - Swarm id of the node to remove. + - Used with I(state=remove). + type: str + join_token: + description: + - Swarm token used to join a swarm cluster. + - Used with I(state=join). + type: str + remote_addrs: + description: + - Remote address of one or more manager nodes of an existing Swarm to connect to. + - Used with I(state=join). + type: list + elements: str + task_history_retention_limit: + description: + - Maximum number of tasks history stored. + - Docker default value is C(5). + type: int + snapshot_interval: + description: + - Number of logs entries between snapshot. + - Docker default value is C(10000). + type: int + keep_old_snapshots: + description: + - Number of snapshots to keep beyond the current snapshot. + - Docker default value is C(0). + type: int + log_entries_for_slow_followers: + description: + - Number of log entries to keep around to sync up slow followers after a snapshot is created. + type: int + heartbeat_tick: + description: + - Amount of ticks (in seconds) between each heartbeat. + - Docker default value is C(1s). + type: int + election_tick: + description: + - Amount of ticks (in seconds) needed without a leader to trigger a new election. + - Docker default value is C(10s). + type: int + dispatcher_heartbeat_period: + description: + - The delay for an agent to send a heartbeat to the dispatcher. + - Docker default value is C(5s). + type: int + node_cert_expiry: + description: + - Automatic expiry for nodes certificates. + - Docker default value is C(3months). + type: int + name: + description: + - The name of the swarm. + type: str + labels: + description: + - User-defined key/value metadata. + - Label operations in this module apply to the docker swarm cluster. + Use M(docker_node) module to add/modify/remove swarm node labels. + - Requires API version >= 1.32. + type: dict + signing_ca_cert: + description: + - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a certificate, but the contents of the certificate. + - Requires API version >= 1.30. + type: str + signing_ca_key: + description: + - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a key, but the contents of the key. + - Requires API version >= 1.30. + type: str + ca_force_rotate: + description: + - An integer whose purpose is to force swarm to generate a new signing CA certificate and key, + if none have been specified. + - Docker default value is C(0). + - Requires API version >= 1.30. + type: int + autolock_managers: + description: + - If set, generate a key and use it to lock data stored on the managers. + - Docker default value is C(no). + - M(docker_swarm_info) can be used to retrieve the unlock key. + type: bool + rotate_worker_token: + description: Rotate the worker join token. + type: bool + default: no + rotate_manager_token: + description: Rotate the manager join token. + type: bool + default: no +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - Docker API >= 1.25 +author: + - Thierry Bouvet (@tbouvet) + - Piotr Wojciechowski (@WojciechowskiPiotr) +''' + +EXAMPLES = ''' + +- name: Init a new swarm with default parameters + docker_swarm: + state: present + +- name: Update swarm configuration + docker_swarm: + state: present + election_tick: 5 + +- name: Add nodes + docker_swarm: + state: join + advertise_addr: 192.168.1.2 + join_token: SWMTKN-1--xxxxx + remote_addrs: [ '192.168.1.1:2377' ] + +- name: Leave swarm for a node + docker_swarm: + state: absent + +- name: Remove a swarm manager + docker_swarm: + state: absent + force: true + +- name: Remove node from swarm + docker_swarm: + state: remove + node_id: mynode + +- name: Inspect swarm + docker_swarm: + state: inspect + register: swarm_info +''' + +RETURN = ''' +swarm_facts: + description: Informations about swarm. + returned: success + type: dict + contains: + JoinTokens: + description: Tokens to connect to the Swarm. + returned: success + type: dict + contains: + Worker: + description: Token to create a new *worker* node + returned: success + type: str + example: SWMTKN-1--xxxxx + Manager: + description: Token to create a new *manager* node + returned: success + type: str + example: SWMTKN-1--xxxxx + UnlockKey: + description: The swarm unlock-key if I(autolock_managers) is C(true). + returned: on success if I(autolock_managers) is C(true) + and swarm is initialised, or if I(autolock_managers) has changed. + type: str + example: SWMKEY-1-xxx + +actions: + description: Provides the actions done on the swarm. + returned: when action failed. + type: list + elements: str + example: "['This cluster is already a swarm cluster']" + +''' + +import json +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + DockerBaseClass, + DifferenceTracker, + RequestException, +) + +from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient + +from ansible.module_utils._text import to_native + + +class TaskParameters(DockerBaseClass): + def __init__(self): + super(TaskParameters, self).__init__() + + self.advertise_addr = None + self.listen_addr = None + self.remote_addrs = None + self.join_token = None + + # Spec + self.snapshot_interval = None + self.task_history_retention_limit = None + self.keep_old_snapshots = None + self.log_entries_for_slow_followers = None + self.heartbeat_tick = None + self.election_tick = None + self.dispatcher_heartbeat_period = None + self.node_cert_expiry = None + self.name = None + self.labels = None + self.log_driver = None + self.signing_ca_cert = None + self.signing_ca_key = None + self.ca_force_rotate = None + self.autolock_managers = None + self.rotate_worker_token = None + self.rotate_manager_token = None + self.default_addr_pool = None + self.subnet_size = None + + @staticmethod + def from_ansible_params(client): + result = TaskParameters() + for key, value in client.module.params.items(): + if key in result.__dict__: + setattr(result, key, value) + + result.update_parameters(client) + return result + + def update_from_swarm_info(self, swarm_info): + spec = swarm_info['Spec'] + + ca_config = spec.get('CAConfig') or dict() + if self.node_cert_expiry is None: + self.node_cert_expiry = ca_config.get('NodeCertExpiry') + if self.ca_force_rotate is None: + self.ca_force_rotate = ca_config.get('ForceRotate') + + dispatcher = spec.get('Dispatcher') or dict() + if self.dispatcher_heartbeat_period is None: + self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod') + + raft = spec.get('Raft') or dict() + if self.snapshot_interval is None: + self.snapshot_interval = raft.get('SnapshotInterval') + if self.keep_old_snapshots is None: + self.keep_old_snapshots = raft.get('KeepOldSnapshots') + if self.heartbeat_tick is None: + self.heartbeat_tick = raft.get('HeartbeatTick') + if self.log_entries_for_slow_followers is None: + self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers') + if self.election_tick is None: + self.election_tick = raft.get('ElectionTick') + + orchestration = spec.get('Orchestration') or dict() + if self.task_history_retention_limit is None: + self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit') + + encryption_config = spec.get('EncryptionConfig') or dict() + if self.autolock_managers is None: + self.autolock_managers = encryption_config.get('AutoLockManagers') + + if self.name is None: + self.name = spec['Name'] + + if self.labels is None: + self.labels = spec.get('Labels') or {} + + if 'LogDriver' in spec['TaskDefaults']: + self.log_driver = spec['TaskDefaults']['LogDriver'] + + def update_parameters(self, client): + assign = dict( + snapshot_interval='snapshot_interval', + task_history_retention_limit='task_history_retention_limit', + keep_old_snapshots='keep_old_snapshots', + log_entries_for_slow_followers='log_entries_for_slow_followers', + heartbeat_tick='heartbeat_tick', + election_tick='election_tick', + dispatcher_heartbeat_period='dispatcher_heartbeat_period', + node_cert_expiry='node_cert_expiry', + name='name', + labels='labels', + signing_ca_cert='signing_ca_cert', + signing_ca_key='signing_ca_key', + ca_force_rotate='ca_force_rotate', + autolock_managers='autolock_managers', + log_driver='log_driver', + ) + params = dict() + for dest, source in assign.items(): + if not client.option_minimal_versions[source]['supported']: + continue + value = getattr(self, source) + if value is not None: + params[dest] = value + self.spec = client.create_swarm_spec(**params) + + def compare_to_active(self, other, client, differences): + for k in self.__dict__: + if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token', + 'rotate_worker_token', 'rotate_manager_token', 'spec', + 'default_addr_pool', 'subnet_size'): + continue + if not client.option_minimal_versions[k]['supported']: + continue + value = getattr(self, k) + if value is None: + continue + other_value = getattr(other, k) + if value != other_value: + differences.add(k, parameter=value, active=other_value) + if self.rotate_worker_token: + differences.add('rotate_worker_token', parameter=True, active=False) + if self.rotate_manager_token: + differences.add('rotate_manager_token', parameter=True, active=False) + return differences + + +class SwarmManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SwarmManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + self.swarm_info = {} + + self.state = client.module.params['state'] + self.force = client.module.params['force'] + self.node_id = client.module.params['node_id'] + + self.differences = DifferenceTracker() + self.parameters = TaskParameters.from_ansible_params(client) + + self.created = False + + def __call__(self): + choice_map = { + "present": self.init_swarm, + "join": self.join, + "absent": self.leave, + "remove": self.remove, + "inspect": self.inspect_swarm + } + + if self.state == 'inspect': + self.client.module.deprecate( + "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster", + version='2.12') + + choice_map.get(self.state)() + + if self.client.module._diff or self.parameters.debug: + diff = dict() + diff['before'], diff['after'] = self.differences.get_before_after() + self.results['diff'] = diff + + def inspect_swarm(self): + try: + data = self.client.inspect_swarm() + json_str = json.dumps(data, ensure_ascii=False) + self.swarm_info = json.loads(json_str) + + self.results['changed'] = False + self.results['swarm_facts'] = self.swarm_info + + unlock_key = self.get_unlock_key() + self.swarm_info.update(unlock_key) + except APIError: + return + + def get_unlock_key(self): + default = {'UnlockKey': None} + if not self.has_swarm_lock_changed(): + return default + try: + return self.client.get_unlock_key() or default + except APIError: + return default + + def has_swarm_lock_changed(self): + return self.parameters.autolock_managers and ( + self.created or self.differences.has_difference_for('autolock_managers') + ) + + def init_swarm(self): + if not self.force and self.client.check_if_swarm_manager(): + self.__update_swarm() + return + + if not self.check_mode: + init_arguments = { + 'advertise_addr': self.parameters.advertise_addr, + 'listen_addr': self.parameters.listen_addr, + 'force_new_cluster': self.force, + 'swarm_spec': self.parameters.spec, + } + if self.parameters.default_addr_pool is not None: + init_arguments['default_addr_pool'] = self.parameters.default_addr_pool + if self.parameters.subnet_size is not None: + init_arguments['subnet_size'] = self.parameters.subnet_size + try: + self.client.init_swarm(**init_arguments) + except APIError as exc: + self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc)) + + if not self.client.check_if_swarm_manager(): + if not self.check_mode: + self.client.fail("Swarm not created or other error!") + + self.created = True + self.inspect_swarm() + self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID'))) + self.differences.add('state', parameter='present', active='absent') + self.results['changed'] = True + self.results['swarm_facts'] = { + 'JoinTokens': self.swarm_info.get('JoinTokens'), + 'UnlockKey': self.swarm_info.get('UnlockKey') + } + + def __update_swarm(self): + try: + self.inspect_swarm() + version = self.swarm_info['Version']['Index'] + self.parameters.update_from_swarm_info(self.swarm_info) + old_parameters = TaskParameters() + old_parameters.update_from_swarm_info(self.swarm_info) + self.parameters.compare_to_active(old_parameters, self.client, self.differences) + if self.differences.empty: + self.results['actions'].append("No modification") + self.results['changed'] = False + return + update_parameters = TaskParameters.from_ansible_params(self.client) + update_parameters.update_parameters(self.client) + if not self.check_mode: + self.client.update_swarm( + version=version, swarm_spec=update_parameters.spec, + rotate_worker_token=self.parameters.rotate_worker_token, + rotate_manager_token=self.parameters.rotate_manager_token) + except APIError as exc: + self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc)) + return + + self.inspect_swarm() + self.results['actions'].append("Swarm cluster updated") + self.results['changed'] = True + + def join(self): + if self.client.check_if_swarm_node(): + self.results['actions'].append("This node is already part of a swarm.") + return + if not self.check_mode: + try: + self.client.join_swarm( + remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, + listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr) + except APIError as exc: + self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("New node is added to swarm cluster") + self.differences.add('joined', parameter=True, active=False) + self.results['changed'] = True + + def leave(self): + if not self.client.check_if_swarm_node(): + self.results['actions'].append("This node is not part of a swarm.") + return + if not self.check_mode: + try: + self.client.leave_swarm(force=self.force) + except APIError as exc: + self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node has left the swarm cluster") + self.differences.add('joined', parameter='absent', active='present') + self.results['changed'] = True + + def remove(self): + if not self.client.check_if_swarm_manager(): + self.client.fail("This node is not a manager.") + + try: + status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5) + except APIError: + return + + if not status_down: + self.client.fail("Can not remove the node. The status node is ready and not down.") + + if not self.check_mode: + try: + self.client.remove_node(node_id=self.node_id, force=self.force) + except APIError as exc: + self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node is removed from swarm cluster.") + self.differences.add('joined', parameter=False, active=True) + self.results['changed'] = True + + +def _detect_remove_operation(client): + return client.module.params['state'] == 'remove' + + +def main(): + argument_spec = dict( + advertise_addr=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']), + force=dict(type='bool', default=False), + listen_addr=dict(type='str', default='0.0.0.0:2377'), + remote_addrs=dict(type='list', elements='str'), + join_token=dict(type='str'), + snapshot_interval=dict(type='int'), + task_history_retention_limit=dict(type='int'), + keep_old_snapshots=dict(type='int'), + log_entries_for_slow_followers=dict(type='int'), + heartbeat_tick=dict(type='int'), + election_tick=dict(type='int'), + dispatcher_heartbeat_period=dict(type='int'), + node_cert_expiry=dict(type='int'), + name=dict(type='str'), + labels=dict(type='dict'), + signing_ca_cert=dict(type='str'), + signing_ca_key=dict(type='str'), + ca_force_rotate=dict(type='int'), + autolock_managers=dict(type='bool'), + node_id=dict(type='str'), + rotate_worker_token=dict(type='bool', default=False), + rotate_manager_token=dict(type='bool', default=False), + default_addr_pool=dict(type='list', elements='str'), + subnet_size=dict(type='int'), + ) + + required_if = [ + ('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']), + ('state', 'remove', ['node_id']) + ] + + option_minimal_versions = dict( + labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'), + signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + autolock_managers=dict(docker_py_version='2.6.0'), + log_driver=dict(docker_py_version='2.6.0'), + remove_operation=dict( + docker_py_version='2.4.0', + detect_usage=_detect_remove_operation, + usage_msg='remove swarm nodes' + ), + default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='1.10.0', + min_docker_api_version='1.25', + option_minimal_versions=option_minimal_versions, + ) + + try: + results = dict( + changed=False, + result='', + actions=[] + ) + + SwarmManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_swarm_info.py b/plugins/modules/cloud/docker/docker_swarm_info.py new file mode 100644 index 0000000000..f9ace62721 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_swarm_info.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# +# (c) 2019 Piotr Wojciechowski +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: docker_swarm_info + +short_description: Retrieves facts about Docker Swarm cluster. + +description: + - Retrieves facts about a Docker Swarm. + - Returns lists of swarm objects names for the services - nodes, services, tasks. + - The output differs depending on API version available on docker host. + - Must be run on Swarm Manager node; otherwise module fails with error message. + It does return boolean flags in on both error and success which indicate whether + the docker daemon can be communicated with, whether it is in Swarm mode, and + whether it is a Swarm Manager node. + + +author: + - Piotr Wojciechowski (@WojciechowskiPiotr) + +options: + nodes: + description: + - Whether to list swarm nodes. + type: bool + default: no + nodes_filters: + description: + - A dictionary of filter values used for selecting nodes to list. + - "For example, C(name: mynode)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering) + for more information on possible filters. + type: dict + services: + description: + - Whether to list swarm services. + type: bool + default: no + services_filters: + description: + - A dictionary of filter values used for selecting services to list. + - "For example, C(name: myservice)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering) + for more information on possible filters. + type: dict + tasks: + description: + - Whether to list containers. + type: bool + default: no + tasks_filters: + description: + - A dictionary of filter values used for selecting tasks to list. + - "For example, C(node: mynode-1)." + - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering) + for more information on possible filters. + type: dict + unlock_key: + description: + - Whether to retrieve the swarm unlock key. + type: bool + default: no + verbose_output: + description: + - When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will + contain verbose information about objects matching the full output of API method. + - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/). + - The verbose output in this module contains only subset of information returned by I(_info) module + for each type of the objects. + type: bool + default: no +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.24" +''' + +EXAMPLES = ''' +- name: Get info on Docker Swarm + docker_swarm_info: + ignore_errors: yes + register: result + +- name: Inform about basic flags + debug: + msg: | + Was able to talk to docker daemon: {{ result.can_talk_to_docker }} + Docker in Swarm mode: {{ result.docker_swarm_active }} + This is a Manager node: {{ result.docker_swarm_manager }} + +- block: + +- name: Get info on Docker Swarm and list of registered nodes + docker_swarm_info: + nodes: yes + register: result + +- name: Get info on Docker Swarm and extended list of registered nodes + docker_swarm_info: + nodes: yes + verbose_output: yes + register: result + +- name: Get info on Docker Swarm and filtered list of registered nodes + docker_swarm_info: + nodes: yes + nodes_filter: + name: mynode + register: result + +- debug: + var: result.swarm_facts + +- name: Get the swarm unlock key + docker_swarm_info: + unlock_key: yes + register: result + +- debug: + var: result.swarm_unlock_key + +''' + +RETURN = ''' +can_talk_to_docker: + description: + - Will be C(true) if the module can talk to the docker daemon. + returned: both on success and on error + type: bool +docker_swarm_active: + description: + - Will be C(true) if the module can talk to the docker daemon, + and the docker daemon is in Swarm mode. + returned: both on success and on error + type: bool +docker_swarm_manager: + description: + - Will be C(true) if the module can talk to the docker daemon, + the docker daemon is in Swarm mode, and the current node is + a manager node. + - Only if this one is C(true), the module will not fail. + returned: both on success and on error + type: bool +swarm_facts: + description: + - Facts representing the basic state of the docker Swarm cluster. + - Contains tokens to connect to the Swarm + returned: always + type: dict +swarm_unlock_key: + description: + - Contains the key needed to unlock the swarm. + returned: When I(unlock_key) is C(true). + type: str +nodes: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker node ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(nodes) is C(yes) + type: list + elements: dict +services: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker service ls) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(services) is C(yes) + type: list + elements: dict +tasks: + description: + - List of dict objects containing the basic information about each volume. + Keys matches the C(docker service ps) output unless I(verbose_output=yes). + See description for I(verbose_output). + returned: When I(tasks) is C(yes) + type: list + elements: dict + +''' + +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker_common + pass + +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + DockerBaseClass, + clean_dict_booleans_for_docker_api, + RequestException, +) + + +class DockerSwarmManager(DockerBaseClass): + + def __init__(self, client, results): + + super(DockerSwarmManager, self).__init__() + + self.client = client + self.results = results + self.verbose_output = self.client.module.params['verbose_output'] + + listed_objects = ['tasks', 'services', 'nodes'] + + self.client.fail_task_if_not_swarm_manager() + + self.results['swarm_facts'] = self.get_docker_swarm_facts() + + for docker_object in listed_objects: + if self.client.module.params[docker_object]: + returned_name = docker_object + filter_name = docker_object + "_filters" + filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name)) + self.results[returned_name] = self.get_docker_items_list(docker_object, filters) + if self.client.module.params['unlock_key']: + self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key() + + def get_docker_swarm_facts(self): + try: + return self.client.inspect_swarm() + except APIError as exc: + self.client.fail("Error inspecting docker swarm: %s" % to_native(exc)) + + def get_docker_items_list(self, docker_object=None, filters=None): + items = None + items_list = [] + + try: + if docker_object == 'nodes': + items = self.client.nodes(filters=filters) + elif docker_object == 'tasks': + items = self.client.tasks(filters=filters) + elif docker_object == 'services': + items = self.client.services(filters=filters) + except APIError as exc: + self.client.fail("Error inspecting docker swarm for object '%s': %s" % + (docker_object, to_native(exc))) + + if self.verbose_output: + return items + + for item in items: + item_record = dict() + + if docker_object == 'nodes': + item_record = self.get_essential_facts_nodes(item) + elif docker_object == 'tasks': + item_record = self.get_essential_facts_tasks(item) + elif docker_object == 'services': + item_record = self.get_essential_facts_services(item) + if item_record['Mode'] == 'Global': + item_record['Replicas'] = len(items) + items_list.append(item_record) + + return items_list + + @staticmethod + def get_essential_facts_nodes(item): + object_essentials = dict() + + object_essentials['ID'] = item.get('ID') + object_essentials['Hostname'] = item['Description']['Hostname'] + object_essentials['Status'] = item['Status']['State'] + object_essentials['Availability'] = item['Spec']['Availability'] + if 'ManagerStatus' in item: + object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability'] + if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True: + object_essentials['ManagerStatus'] = "Leader" + else: + object_essentials['ManagerStatus'] = None + object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion'] + + return object_essentials + + def get_essential_facts_tasks(self, item): + object_essentials = dict() + + object_essentials['ID'] = item['ID'] + # Returning container ID to not trigger another connection to host + # Container ID is sufficient to get extended info in other tasks + object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID'] + object_essentials['Image'] = item['Spec']['ContainerSpec']['Image'] + object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID']) + object_essentials['DesiredState'] = item['DesiredState'] + object_essentials['CurrentState'] = item['Status']['State'] + if 'Err' in item['Status']: + object_essentials['Error'] = item['Status']['Err'] + else: + object_essentials['Error'] = None + + return object_essentials + + @staticmethod + def get_essential_facts_services(item): + object_essentials = dict() + + object_essentials['ID'] = item['ID'] + object_essentials['Name'] = item['Spec']['Name'] + if 'Replicated' in item['Spec']['Mode']: + object_essentials['Mode'] = "Replicated" + object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas'] + elif 'Global' in item['Spec']['Mode']: + object_essentials['Mode'] = "Global" + # Number of replicas have to be updated in calling method or may be left as None + object_essentials['Replicas'] = None + object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image'] + if 'Ports' in item['Spec']['EndpointSpec']: + object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports'] + else: + object_essentials['Ports'] = [] + + return object_essentials + + def get_docker_swarm_unlock_key(self): + unlock_key = self.client.get_unlock_key() or {} + return unlock_key.get('UnlockKey') or None + + +def main(): + argument_spec = dict( + nodes=dict(type='bool', default=False), + nodes_filters=dict(type='dict'), + tasks=dict(type='bool', default=False), + tasks_filters=dict(type='dict'), + services=dict(type='bool', default=False), + services_filters=dict(type='dict'), + unlock_key=dict(type='bool', default=False), + verbose_output=dict(type='bool', default=False), + ) + option_minimal_versions = dict( + unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.24', + option_minimal_versions=option_minimal_versions, + fail_results=dict( + can_talk_to_docker=False, + docker_swarm_active=False, + docker_swarm_manager=False, + ), + ) + client.fail_results['can_talk_to_docker'] = True + client.fail_results['docker_swarm_active'] = client.check_if_swarm_node() + client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager() + + try: + results = dict( + changed=False, + ) + + DockerSwarmManager(client, results) + results.update(client.fail_results) + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_swarm_service.py b/plugins/modules/cloud/docker/docker_swarm_service.py new file mode 100644 index 0000000000..3bf40571c9 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_swarm_service.py @@ -0,0 +1,2972 @@ +#!/usr/bin/python +# +# (c) 2017, Dario Zanzico (git@dariozanzico.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} +DOCUMENTATION = ''' +--- +module: docker_swarm_service +author: + - "Dario Zanzico (@dariko)" + - "Jason Witkowski (@jwitko)" + - "Hannes Ljungberg (@hannseman)" +short_description: docker swarm service +description: + - Manages docker services via a swarm manager node. +options: + args: + description: + - List arguments to be passed to the container. + - Corresponds to the C(ARG) parameter of C(docker service create). + type: list + elements: str + command: + description: + - Command to execute when the container starts. + - A command may be either a string or a list or a list of strings. + - Corresponds to the C(COMMAND) parameter of C(docker service create). + type: raw + configs: + description: + - List of dictionaries describing the service configs. + - Corresponds to the C(--config) option of C(docker service create). + - Requires API version >= 1.30. + type: list + elements: dict + suboptions: + config_id: + description: + - Config's ID. + type: str + config_name: + description: + - Config's name as defined at its creation. + type: str + required: yes + filename: + description: + - Name of the file containing the config. Defaults to the I(config_name) if not specified. + type: str + uid: + description: + - UID of the config file's owner. + type: str + gid: + description: + - GID of the config file's group. + type: str + mode: + description: + - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)). + type: int + constraints: + description: + - List of the service constraints. + - Corresponds to the C(--constraint) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(placement.constraints) instead. + type: list + elements: str + container_labels: + description: + - Dictionary of key value pairs. + - Corresponds to the C(--container-label) option of C(docker service create). + type: dict + dns: + description: + - List of custom DNS servers. + - Corresponds to the C(--dns) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: str + dns_search: + description: + - List of custom DNS search domains. + - Corresponds to the C(--dns-search) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: str + dns_options: + description: + - List of custom DNS options. + - Corresponds to the C(--dns-option) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: str + endpoint_mode: + description: + - Service endpoint mode. + - Corresponds to the C(--endpoint-mode) option of C(docker service create). + - Requires API version >= 1.25. + type: str + choices: + - vip + - dnsrr + env: + description: + - List or dictionary of the service environment variables. + - If passed a list each items need to be in the format of C(KEY=VALUE). + - If passed a dictionary values which might be parsed as numbers, + booleans or other types by the YAML parser must be quoted (e.g. C("true")) + in order to avoid data loss. + - Corresponds to the C(--env) option of C(docker service create). + type: raw + env_files: + description: + - List of paths to files, present on the target, containing environment variables C(FOO=BAR). + - The order of the list is significant in determining the value assigned to a + variable that shows up more than once. + - If variable also present in I(env), then I(env) value will override. + type: list + elements: path + force_update: + description: + - Force update even if no changes require it. + - Corresponds to the C(--force) option of C(docker service update). + - Requires API version >= 1.25. + type: bool + default: no + groups: + description: + - List of additional group names and/or IDs that the container process will run as. + - Corresponds to the C(--group) option of C(docker service update). + - Requires API version >= 1.25. + type: list + elements: str + healthcheck: + description: + - Configure a check that is run to determine whether or not containers for this service are "healthy". + See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) + for details on how healthchecks work. + - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format + that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Requires API version >= 1.25. + type: dict + suboptions: + test: + description: + - Command to run to check health. + - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). + type: raw + interval: + description: + - Time between running the check. + type: str + timeout: + description: + - Maximum time to allow one check to run. + type: str + retries: + description: + - Consecutive failures needed to report unhealthy. It accept integer value. + type: int + start_period: + description: + - Start period for the container to initialize before starting health-retries countdown. + type: str + hostname: + description: + - Container hostname. + - Corresponds to the C(--hostname) option of C(docker service create). + - Requires API version >= 1.25. + type: str + hosts: + description: + - Dict of host-to-IP mappings, where each host name is a key in the dictionary. + Each host name will be added to the container's /etc/hosts file. + - Corresponds to the C(--host) option of C(docker service create). + - Requires API version >= 1.25. + type: dict + image: + description: + - Service image path and tag. + - Corresponds to the C(IMAGE) parameter of C(docker service create). + type: str + labels: + description: + - Dictionary of key value pairs. + - Corresponds to the C(--label) option of C(docker service create). + type: dict + limits: + description: + - Configures service resource limits. + suboptions: + cpus: + description: + - Service CPU limit. C(0) equals no limit. + - Corresponds to the C(--limit-cpu) option of C(docker service create). + type: float + memory: + description: + - "Service memory limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - C(0) equals no limit. + - Omitting the unit defaults to bytes. + - Corresponds to the C(--limit-memory) option of C(docker service create). + type: str + type: dict + limit_cpu: + description: + - Service CPU limit. C(0) equals no limit. + - Corresponds to the C(--limit-cpu) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(limits.cpus) instead. + type: float + limit_memory: + description: + - "Service memory limit in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - C(0) equals no limit. + - Omitting the unit defaults to bytes. + - Corresponds to the C(--limit-memory) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(limits.memory) instead. + type: str + logging: + description: + - "Logging configuration for the service." + suboptions: + driver: + description: + - Configure the logging driver for a service. + - Corresponds to the C(--log-driver) option of C(docker service create). + type: str + options: + description: + - Options for service logging driver. + - Corresponds to the C(--log-opt) option of C(docker service create). + type: dict + type: dict + log_driver: + description: + - Configure the logging driver for a service. + - Corresponds to the C(--log-driver) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(logging.driver) instead. + type: str + log_driver_options: + description: + - Options for service logging driver. + - Corresponds to the C(--log-opt) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(logging.options) instead. + type: dict + mode: + description: + - Service replication mode. + - Service will be removed and recreated when changed. + - Corresponds to the C(--mode) option of C(docker service create). + type: str + default: replicated + choices: + - replicated + - global + mounts: + description: + - List of dictionaries describing the service mounts. + - Corresponds to the C(--mount) option of C(docker service create). + type: list + elements: dict + suboptions: + source: + description: + - Mount source (e.g. a volume name or a host path). + - Must be specified if I(type) is not C(tmpfs). + type: str + target: + description: + - Container path. + type: str + required: yes + type: + description: + - The mount type. + - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9. + type: str + default: bind + choices: + - bind + - volume + - tmpfs + - npipe + readonly: + description: + - Whether the mount should be read-only. + type: bool + labels: + description: + - Volume labels to apply. + type: dict + propagation: + description: + - The propagation mode to use. + - Can only be used when I(mode) is C(bind). + type: str + choices: + - shared + - slave + - private + - rshared + - rslave + - rprivate + no_copy: + description: + - Disable copying of data from a container when a volume is created. + - Can only be used when I(mode) is C(volume). + type: bool + driver_config: + description: + - Volume driver configuration. + - Can only be used when I(mode) is C(volume). + suboptions: + name: + description: + - Name of the volume-driver plugin to use for the volume. + type: str + options: + description: + - Options as key-value pairs to pass to the driver for this volume. + type: dict + type: dict + tmpfs_size: + description: + - "Size of the tmpfs mount in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - Can only be used when I(mode) is C(tmpfs). + type: str + tmpfs_mode: + description: + - File mode of the tmpfs in octal. + - Can only be used when I(mode) is C(tmpfs). + type: int + name: + description: + - Service name. + - Corresponds to the C(--name) option of C(docker service create). + type: str + required: yes + networks: + description: + - List of the service networks names or dictionaries. + - When passed dictionaries valid sub-options are I(name), which is required, and + I(aliases) and I(options). + - Prior to API version 1.29, updating and removing networks is not supported. + If changes are made the service will then be removed and recreated. + - Corresponds to the C(--network) option of C(docker service create). + type: list + elements: raw + placement: + description: + - Configures service placement preferences and constraints. + suboptions: + constraints: + description: + - List of the service constraints. + - Corresponds to the C(--constraint) option of C(docker service create). + type: list + elements: str + preferences: + description: + - List of the placement preferences as key value pairs. + - Corresponds to the C(--placement-pref) option of C(docker service create). + - Requires API version >= 1.27. + type: list + elements: dict + type: dict + publish: + description: + - List of dictionaries describing the service published ports. + - Corresponds to the C(--publish) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: dict + suboptions: + published_port: + description: + - The port to make externally available. + type: int + required: yes + target_port: + description: + - The port inside the container to expose. + type: int + required: yes + protocol: + description: + - What protocol to use. + type: str + default: tcp + choices: + - tcp + - udp + mode: + description: + - What publish mode to use. + - Requires API version >= 1.32. + type: str + choices: + - ingress + - host + read_only: + description: + - Mount the containers root filesystem as read only. + - Corresponds to the C(--read-only) option of C(docker service create). + type: bool + replicas: + description: + - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated). + - If set to C(-1), and service is not present, service replicas will be set to C(1). + - If set to C(-1), and service is present, service replicas will be unchanged. + - Corresponds to the C(--replicas) option of C(docker service create). + type: int + default: -1 + reservations: + description: + - Configures service resource reservations. + suboptions: + cpus: + description: + - Service CPU reservation. C(0) equals no reservation. + - Corresponds to the C(--reserve-cpu) option of C(docker service create). + type: float + memory: + description: + - "Service memory reservation in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - C(0) equals no reservation. + - Omitting the unit defaults to bytes. + - Corresponds to the C(--reserve-memory) option of C(docker service create). + type: str + type: dict + reserve_cpu: + description: + - Service CPU reservation. C(0) equals no reservation. + - Corresponds to the C(--reserve-cpu) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(reservations.cpus) instead. + type: float + reserve_memory: + description: + - "Service memory reservation in format C([]). Number is a positive integer. + Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), + C(T) (tebibyte), or C(P) (pebibyte)." + - C(0) equals no reservation. + - Omitting the unit defaults to bytes. + - Corresponds to the C(--reserve-memory) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(reservations.memory) instead. + type: str + resolve_image: + description: + - If the current image digest should be resolved from registry and updated if changed. + - Requires API version >= 1.30. + type: bool + default: no + restart_config: + description: + - Configures if and how to restart containers when they exit. + suboptions: + condition: + description: + - Restart condition of the service. + - Corresponds to the C(--restart-condition) option of C(docker service create). + type: str + choices: + - none + - on-failure + - any + delay: + description: + - Delay between restarts. + - "Accepts a a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--restart-delay) option of C(docker service create). + type: str + max_attempts: + description: + - Maximum number of service restarts. + - Corresponds to the C(--restart-condition) option of C(docker service create). + type: int + window: + description: + - Restart policy evaluation window. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--restart-window) option of C(docker service create). + type: str + type: dict + restart_policy: + description: + - Restart condition of the service. + - Corresponds to the C(--restart-condition) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.condition) instead. + type: str + choices: + - none + - on-failure + - any + restart_policy_attempts: + description: + - Maximum number of service restarts. + - Corresponds to the C(--restart-condition) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.max_attempts) instead. + type: int + restart_policy_delay: + description: + - Delay between restarts. + - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--restart-delay) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.delay) instead. + type: raw + restart_policy_window: + description: + - Restart policy evaluation window. + - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--restart-window) option of C(docker service create). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.window) instead. + type: raw + rollback_config: + description: + - Configures how the service should be rolled back in case of a failing update. + suboptions: + parallelism: + description: + - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously. + - Corresponds to the C(--rollback-parallelism) option of C(docker service create). + - Requires API version >= 1.28. + type: int + delay: + description: + - Delay between task rollbacks. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--rollback-delay) option of C(docker service create). + - Requires API version >= 1.28. + type: str + failure_action: + description: + - Action to take in case of rollback failure. + - Corresponds to the C(--rollback-failure-action) option of C(docker service create). + - Requires API version >= 1.28. + type: str + choices: + - continue + - pause + monitor: + description: + - Duration after each task rollback to monitor for failure. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--rollback-monitor) option of C(docker service create). + - Requires API version >= 1.28. + type: str + max_failure_ratio: + description: + - Fraction of tasks that may fail during a rollback. + - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create). + - Requires API version >= 1.28. + type: float + order: + description: + - Specifies the order of operations during rollbacks. + - Corresponds to the C(--rollback-order) option of C(docker service create). + - Requires API version >= 1.29. + type: str + type: dict + secrets: + description: + - List of dictionaries describing the service secrets. + - Corresponds to the C(--secret) option of C(docker service create). + - Requires API version >= 1.25. + type: list + elements: dict + suboptions: + secret_id: + description: + - Secret's ID. + type: str + secret_name: + description: + - Secret's name as defined at its creation. + type: str + required: yes + filename: + description: + - Name of the file containing the secret. Defaults to the I(secret_name) if not specified. + - Corresponds to the C(target) key of C(docker service create --secret). + type: str + uid: + description: + - UID of the secret file's owner. + type: str + gid: + description: + - GID of the secret file's group. + type: str + mode: + description: + - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)). + type: int + state: + description: + - C(absent) - A service matching the specified name will be removed and have its tasks stopped. + - C(present) - Asserts the existence of a service matching the name and provided configuration parameters. + Unspecified configuration parameters will be set to docker defaults. + type: str + default: present + choices: + - present + - absent + stop_grace_period: + description: + - Time to wait before force killing a container. + - "Accepts a duration as a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--stop-grace-period) option of C(docker service create). + type: str + stop_signal: + description: + - Override default signal used to stop the container. + - Corresponds to the C(--stop-signal) option of C(docker service create). + type: str + tty: + description: + - Allocate a pseudo-TTY. + - Corresponds to the C(--tty) option of C(docker service create). + - Requires API version >= 1.25. + type: bool + update_config: + description: + - Configures how the service should be updated. Useful for configuring rolling updates. + suboptions: + parallelism: + description: + - Rolling update parallelism. + - Corresponds to the C(--update-parallelism) option of C(docker service create). + type: int + delay: + description: + - Rolling update delay. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--update-delay) option of C(docker service create). + type: str + failure_action: + description: + - Action to take in case of container failure. + - Corresponds to the C(--update-failure-action) option of C(docker service create). + - Usage of I(rollback) requires API version >= 1.29. + type: str + choices: + - continue + - pause + - rollback + monitor: + description: + - Time to monitor updated tasks for failures. + - "Accepts a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--update-monitor) option of C(docker service create). + - Requires API version >= 1.25. + type: str + max_failure_ratio: + description: + - Fraction of tasks that may fail during an update before the failure action is invoked. + - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create). + - Requires API version >= 1.25. + type: float + order: + description: + - Specifies the order of operations when rolling out an updated task. + - Corresponds to the C(--update-order) option of C(docker service create). + - Requires API version >= 1.29. + type: str + type: dict + update_delay: + description: + - Rolling update delay. + - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--update-delay) option of C(docker service create). + - Before Ansible 2.8, the default value for this option was C(10). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.delay) instead. + type: raw + update_parallelism: + description: + - Rolling update parallelism. + - Corresponds to the C(--update-parallelism) option of C(docker service create). + - Before Ansible 2.8, the default value for this option was C(1). + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.parallelism) instead. + type: int + update_failure_action: + description: + - Action to take in case of container failure. + - Corresponds to the C(--update-failure-action) option of C(docker service create). + - Usage of I(rollback) requires API version >= 1.29. + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.failure_action) instead. + type: str + choices: + - continue + - pause + - rollback + update_monitor: + description: + - Time to monitor updated tasks for failures. + - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like: + C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - Corresponds to the C(--update-monitor) option of C(docker service create). + - Requires API version >= 1.25. + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.monitor) instead. + type: raw + update_max_failure_ratio: + description: + - Fraction of tasks that may fail during an update before the failure action is invoked. + - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create). + - Requires API version >= 1.25. + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.max_failure_ratio) instead. + type: float + update_order: + description: + - Specifies the order of operations when rolling out an updated task. + - Corresponds to the C(--update-order) option of C(docker service create). + - Requires API version >= 1.29. + - Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.order) instead. + type: str + choices: + - stop-first + - start-first + user: + description: + - Sets the username or UID used for the specified command. + - Before Ansible 2.8, the default value for this option was C(root). + - The default has been removed so that the user defined in the image is used if no user is specified here. + - Corresponds to the C(--user) option of C(docker service create). + type: str + working_dir: + description: + - Path to the working directory. + - Corresponds to the C(--workdir) option of C(docker service create). + type: str +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_2_documentation + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2" + - "Docker API >= 1.24" +notes: + - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0. + When using older versions use C(force_update: true) to trigger the swarm to resolve a new image." +''' + +RETURN = ''' +swarm_service: + returned: always + type: dict + description: + - Dictionary of variables representing the current state of the service. + Matches the module parameters format. + - Note that facts are not part of registered vars but accessible directly. + - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service), + while the module actually returned a variable called C(ansible_docker_service). The variable + was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0. + In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used. + sample: '{ + "args": [ + "3600" + ], + "command": [ + "sleep" + ], + "configs": null, + "constraints": [ + "node.role == manager", + "engine.labels.operatingsystem == ubuntu 14.04" + ], + "container_labels": null, + "dns": null, + "dns_options": null, + "dns_search": null, + "endpoint_mode": null, + "env": [ + "ENVVAR1=envvar1", + "ENVVAR2=envvar2" + ], + "force_update": null, + "groups": null, + "healthcheck": { + "interval": 90000000000, + "retries": 3, + "start_period": 30000000000, + "test": [ + "CMD", + "curl", + "--fail", + "http://nginx.host.com" + ], + "timeout": 10000000000 + }, + "healthcheck_disabled": false, + "hostname": null, + "hosts": null, + "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8", + "labels": { + "com.example.department": "Finance", + "com.example.description": "Accounting webapp" + }, + "limit_cpu": 0.5, + "limit_memory": 52428800, + "log_driver": "fluentd", + "log_driver_options": { + "fluentd-address": "127.0.0.1:24224", + "fluentd-async-connect": "true", + "tag": "myservice" + }, + "mode": "replicated", + "mounts": [ + { + "readonly": false, + "source": "/tmp/", + "target": "/remote_tmp/", + "type": "bind", + "labels": null, + "propagation": null, + "no_copy": null, + "driver_config": null, + "tmpfs_size": null, + "tmpfs_mode": null + } + ], + "networks": null, + "placement_preferences": [ + { + "spread": "node.labels.mylabel" + } + ], + "publish": null, + "read_only": null, + "replicas": 1, + "reserve_cpu": 0.25, + "reserve_memory": 20971520, + "restart_policy": "on-failure", + "restart_policy_attempts": 3, + "restart_policy_delay": 5000000000, + "restart_policy_window": 120000000000, + "secrets": null, + "stop_grace_period": null, + "stop_signal": null, + "tty": null, + "update_delay": 10000000000, + "update_failure_action": null, + "update_max_failure_ratio": null, + "update_monitor": null, + "update_order": "stop-first", + "update_parallelism": 2, + "user": null, + "working_dir": null + }' +changes: + returned: always + description: + - List of changed service attributes if a service has been altered, [] otherwise. + type: list + elements: str + sample: ['container_labels', 'replicas'] +rebuilt: + returned: always + description: + - True if the service has been recreated (removed and created) + type: bool + sample: True +''' + +EXAMPLES = ''' +- name: Set command and arguments + docker_swarm_service: + name: myservice + image: alpine + command: sleep + args: + - "3600" + +- name: Set a bind mount + docker_swarm_service: + name: myservice + image: alpine + mounts: + - source: /tmp/ + target: /remote_tmp/ + type: bind + +- name: Set service labels + docker_swarm_service: + name: myservice + image: alpine + labels: + com.example.description: "Accounting webapp" + com.example.department: "Finance" + +- name: Set environment variables + docker_swarm_service: + name: myservice + image: alpine + env: + ENVVAR1: envvar1 + ENVVAR2: envvar2 + env_files: + - envs/common.env + - envs/apps/web.env + +- name: Set fluentd logging + docker_swarm_service: + name: myservice + image: alpine + logging: + driver: fluentd + options: + fluentd-address: "127.0.0.1:24224" + fluentd-async-connect: "true" + tag: myservice + +- name: Set restart policies + docker_swarm_service: + name: myservice + image: alpine + restart_config: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + +- name: Set update config + docker_swarm_service: + name: myservice + image: alpine + update_config: + parallelism: 2 + delay: 10s + order: stop-first + +- name: Set rollback config + docker_swarm_service: + name: myservice + image: alpine + update_config: + failure_action: rollback + rollback_config: + parallelism: 2 + delay: 10s + order: stop-first + +- name: Set placement preferences + docker_swarm_service: + name: myservice + image: alpine:edge + placement: + preferences: + - spread: node.labels.mylabel + constraints: + - node.role == manager + - engine.labels.operatingsystem == ubuntu 14.04 + +- name: Set configs + docker_swarm_service: + name: myservice + image: alpine:edge + configs: + - config_name: myconfig_name + filename: "/tmp/config.txt" + +- name: Set networks + docker_swarm_service: + name: myservice + image: alpine:edge + networks: + - mynetwork + +- name: Set networks as a dictionary + docker_swarm_service: + name: myservice + image: alpine:edge + networks: + - name: "mynetwork" + aliases: + - "mynetwork_alias" + options: + foo: bar + +- name: Set secrets + docker_swarm_service: + name: myservice + image: alpine:edge + secrets: + - secret_name: mysecret_name + filename: "/run/secrets/secret.txt" + +- name: Start service with healthcheck + docker_swarm_service: + name: myservice + image: nginx:1.13 + healthcheck: + # Check if nginx server is healthy by curl'ing the server. + # If this fails or timeouts, the healthcheck fails. + test: ["CMD", "curl", "--fail", "http://nginx.host.com"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 30s + +- name: Configure service resources + docker_swarm_service: + name: myservice + image: alpine:edge + reservations: + cpus: 0.25 + memory: 20M + limits: + cpus: 0.50 + memory: 50M + +- name: Remove service + docker_swarm_service: + name: myservice + state: absent +''' + +import shlex +import time +import operator +import traceback + +from distutils.version import LooseVersion + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + DifferenceTracker, + DockerBaseClass, + convert_duration_to_nanosecond, + parse_healthcheck, + clean_dict_booleans_for_docker_api, + RequestException, +) + +from ansible.module_utils.basic import human_to_bytes +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text + +try: + from docker import types + from docker.utils import ( + parse_repository_tag, + parse_env_file, + format_environment, + ) + from docker.errors import ( + APIError, + DockerException, + NotFound, + ) +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + + +def get_docker_environment(env, env_files): + """ + Will return a list of "KEY=VALUE" items. Supplied env variable can + be either a list or a dictionary. + + If environment files are combined with explicit environment variables, + the explicit environment variables take precedence. + """ + env_dict = {} + if env_files: + for env_file in env_files: + parsed_env_file = parse_env_file(env_file) + for name, value in parsed_env_file.items(): + env_dict[name] = str(value) + if env is not None and isinstance(env, string_types): + env = env.split(',') + if env is not None and isinstance(env, dict): + for name, value in env.items(): + if not isinstance(value, string_types): + raise ValueError( + 'Non-string value found for env option. ' + 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name + ) + env_dict[name] = str(value) + elif env is not None and isinstance(env, list): + for item in env: + try: + name, value = item.split('=', 1) + except ValueError: + raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.') + env_dict[name] = value + elif env is not None: + raise ValueError( + 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env)) + ) + env_list = format_environment(env_dict) + if not env_list: + if env is not None or env_files is not None: + return [] + else: + return None + return sorted(env_list) + + +def get_docker_networks(networks, network_ids): + """ + Validate a list of network names or a list of network dictionaries. + Network names will be resolved to ids by using the network_ids mapping. + """ + if networks is None: + return None + parsed_networks = [] + for network in networks: + if isinstance(network, string_types): + parsed_network = {'name': network} + elif isinstance(network, dict): + if 'name' not in network: + raise TypeError( + '"name" is required when networks are passed as dictionaries.' + ) + name = network.pop('name') + parsed_network = {'name': name} + aliases = network.pop('aliases', None) + if aliases is not None: + if not isinstance(aliases, list): + raise TypeError('"aliases" network option is only allowed as a list') + if not all( + isinstance(alias, string_types) for alias in aliases + ): + raise TypeError('Only strings are allowed as network aliases.') + parsed_network['aliases'] = aliases + options = network.pop('options', None) + if options is not None: + if not isinstance(options, dict): + raise TypeError('Only dict is allowed as network options.') + parsed_network['options'] = clean_dict_booleans_for_docker_api(options) + # Check if any invalid keys left + if network: + invalid_keys = ', '.join(network.keys()) + raise TypeError( + '%s are not valid keys for the networks option' % invalid_keys + ) + + else: + raise TypeError( + 'Only a list of strings or dictionaries are allowed to be passed as networks.' + ) + network_name = parsed_network.pop('name') + try: + parsed_network['id'] = network_ids[network_name] + except KeyError as e: + raise ValueError('Could not find a network named: %s.' % e) + parsed_networks.append(parsed_network) + return parsed_networks or [] + + +def get_nanoseconds_from_raw_option(name, value): + if value is None: + return None + elif isinstance(value, int): + return value + elif isinstance(value, string_types): + try: + return int(value) + except ValueError: + return convert_duration_to_nanosecond(value) + else: + raise ValueError( + 'Invalid type for %s %s (%s). Only string or int allowed.' + % (name, value, type(value)) + ) + + +def get_value(key, values, default=None): + value = values.get(key) + return value if value is not None else default + + +def has_dict_changed(new_dict, old_dict): + """ + Check if new_dict has differences compared to old_dict while + ignoring keys in old_dict which are None in new_dict. + """ + if new_dict is None: + return False + if not new_dict and old_dict: + return True + if not old_dict and new_dict: + return True + defined_options = dict( + (option, value) for option, value in new_dict.items() + if value is not None + ) + for option, value in defined_options.items(): + old_value = old_dict.get(option) + if not value and not old_value: + continue + if value != old_value: + return True + return False + + +def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None): + """ + Check two lists have differences. Sort lists by default. + """ + + def sort_list(unsorted_list): + """ + Sort a given list. + The list may contain dictionaries, so use the sort key to handle them. + """ + + if unsorted_list and isinstance(unsorted_list[0], dict): + if not sort_key: + raise Exception( + 'A sort key was not specified when sorting list' + ) + else: + return sorted(unsorted_list, key=lambda k: k[sort_key]) + + # Either the list is empty or does not contain dictionaries + try: + return sorted(unsorted_list) + except TypeError: + return unsorted_list + + if new_list is None: + return False + old_list = old_list or [] + if len(new_list) != len(old_list): + return True + + if sort_lists: + zip_data = zip(sort_list(new_list), sort_list(old_list)) + else: + zip_data = zip(new_list, old_list) + for new_item, old_item in zip_data: + is_same_type = type(new_item) == type(old_item) + if not is_same_type: + if isinstance(new_item, string_types) and isinstance(old_item, string_types): + # Even though the types are different between these items, + # they are both strings. Try matching on the same string type. + try: + new_item_type = type(new_item) + old_item_casted = new_item_type(old_item) + if new_item != old_item_casted: + return True + else: + continue + except UnicodeEncodeError: + # Fallback to assuming the strings are different + return True + else: + return True + if isinstance(new_item, dict): + if has_dict_changed(new_item, old_item): + return True + elif new_item != old_item: + return True + + return False + + +def have_networks_changed(new_networks, old_networks): + """Special case list checking for networks to sort aliases""" + + if new_networks is None: + return False + old_networks = old_networks or [] + if len(new_networks) != len(old_networks): + return True + + zip_data = zip( + sorted(new_networks, key=lambda k: k['id']), + sorted(old_networks, key=lambda k: k['id']) + ) + + for new_item, old_item in zip_data: + new_item = dict(new_item) + old_item = dict(old_item) + # Sort the aliases + if 'aliases' in new_item: + new_item['aliases'] = sorted(new_item['aliases'] or []) + if 'aliases' in old_item: + old_item['aliases'] = sorted(old_item['aliases'] or []) + + if has_dict_changed(new_item, old_item): + return True + + return False + + +class DockerService(DockerBaseClass): + def __init__(self, docker_api_version, docker_py_version): + super(DockerService, self).__init__() + self.image = "" + self.command = None + self.args = None + self.endpoint_mode = None + self.dns = None + self.healthcheck = None + self.healthcheck_disabled = None + self.hostname = None + self.hosts = None + self.tty = None + self.dns_search = None + self.dns_options = None + self.env = None + self.force_update = None + self.groups = None + self.log_driver = None + self.log_driver_options = None + self.labels = None + self.container_labels = None + self.limit_cpu = None + self.limit_memory = None + self.reserve_cpu = None + self.reserve_memory = None + self.mode = "replicated" + self.user = None + self.mounts = None + self.configs = None + self.secrets = None + self.constraints = None + self.networks = None + self.stop_grace_period = None + self.stop_signal = None + self.publish = None + self.placement_preferences = None + self.replicas = -1 + self.service_id = False + self.service_version = False + self.read_only = None + self.restart_policy = None + self.restart_policy_attempts = None + self.restart_policy_delay = None + self.restart_policy_window = None + self.rollback_config = None + self.update_delay = None + self.update_parallelism = None + self.update_failure_action = None + self.update_monitor = None + self.update_max_failure_ratio = None + self.update_order = None + self.working_dir = None + + self.docker_api_version = docker_api_version + self.docker_py_version = docker_py_version + + def get_facts(self): + return { + 'image': self.image, + 'mounts': self.mounts, + 'configs': self.configs, + 'networks': self.networks, + 'command': self.command, + 'args': self.args, + 'tty': self.tty, + 'dns': self.dns, + 'dns_search': self.dns_search, + 'dns_options': self.dns_options, + 'healthcheck': self.healthcheck, + 'healthcheck_disabled': self.healthcheck_disabled, + 'hostname': self.hostname, + 'hosts': self.hosts, + 'env': self.env, + 'force_update': self.force_update, + 'groups': self.groups, + 'log_driver': self.log_driver, + 'log_driver_options': self.log_driver_options, + 'publish': self.publish, + 'constraints': self.constraints, + 'placement_preferences': self.placement_preferences, + 'labels': self.labels, + 'container_labels': self.container_labels, + 'mode': self.mode, + 'replicas': self.replicas, + 'endpoint_mode': self.endpoint_mode, + 'restart_policy': self.restart_policy, + 'secrets': self.secrets, + 'stop_grace_period': self.stop_grace_period, + 'stop_signal': self.stop_signal, + 'limit_cpu': self.limit_cpu, + 'limit_memory': self.limit_memory, + 'read_only': self.read_only, + 'reserve_cpu': self.reserve_cpu, + 'reserve_memory': self.reserve_memory, + 'restart_policy_delay': self.restart_policy_delay, + 'restart_policy_attempts': self.restart_policy_attempts, + 'restart_policy_window': self.restart_policy_window, + 'rollback_config': self.rollback_config, + 'update_delay': self.update_delay, + 'update_parallelism': self.update_parallelism, + 'update_failure_action': self.update_failure_action, + 'update_monitor': self.update_monitor, + 'update_max_failure_ratio': self.update_max_failure_ratio, + 'update_order': self.update_order, + 'user': self.user, + 'working_dir': self.working_dir, + } + + @property + def can_update_networks(self): + # Before Docker API 1.29 adding/removing networks was not supported + return ( + self.docker_api_version >= LooseVersion('1.29') and + self.docker_py_version >= LooseVersion('2.7') + ) + + @property + def can_use_task_template_networks(self): + # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec + return ( + self.docker_api_version >= LooseVersion('1.25') and + self.docker_py_version >= LooseVersion('2.7') + ) + + @staticmethod + def get_restart_config_from_ansible_params(params): + restart_config = params['restart_config'] or {} + condition = get_value( + 'condition', + restart_config, + default=params['restart_policy'] + ) + delay = get_value( + 'delay', + restart_config, + default=params['restart_policy_delay'] + ) + delay = get_nanoseconds_from_raw_option( + 'restart_policy_delay', + delay + ) + max_attempts = get_value( + 'max_attempts', + restart_config, + default=params['restart_policy_attempts'] + ) + window = get_value( + 'window', + restart_config, + default=params['restart_policy_window'] + ) + window = get_nanoseconds_from_raw_option( + 'restart_policy_window', + window + ) + return { + 'restart_policy': condition, + 'restart_policy_delay': delay, + 'restart_policy_attempts': max_attempts, + 'restart_policy_window': window + } + + @staticmethod + def get_update_config_from_ansible_params(params): + update_config = params['update_config'] or {} + parallelism = get_value( + 'parallelism', + update_config, + default=params['update_parallelism'] + ) + delay = get_value( + 'delay', + update_config, + default=params['update_delay'] + ) + delay = get_nanoseconds_from_raw_option( + 'update_delay', + delay + ) + failure_action = get_value( + 'failure_action', + update_config, + default=params['update_failure_action'] + ) + monitor = get_value( + 'monitor', + update_config, + default=params['update_monitor'] + ) + monitor = get_nanoseconds_from_raw_option( + 'update_monitor', + monitor + ) + max_failure_ratio = get_value( + 'max_failure_ratio', + update_config, + default=params['update_max_failure_ratio'] + ) + order = get_value( + 'order', + update_config, + default=params['update_order'] + ) + return { + 'update_parallelism': parallelism, + 'update_delay': delay, + 'update_failure_action': failure_action, + 'update_monitor': monitor, + 'update_max_failure_ratio': max_failure_ratio, + 'update_order': order + } + + @staticmethod + def get_rollback_config_from_ansible_params(params): + if params['rollback_config'] is None: + return None + rollback_config = params['rollback_config'] or {} + delay = get_nanoseconds_from_raw_option( + 'rollback_config.delay', + rollback_config.get('delay') + ) + monitor = get_nanoseconds_from_raw_option( + 'rollback_config.monitor', + rollback_config.get('monitor') + ) + return { + 'parallelism': rollback_config.get('parallelism'), + 'delay': delay, + 'failure_action': rollback_config.get('failure_action'), + 'monitor': monitor, + 'max_failure_ratio': rollback_config.get('max_failure_ratio'), + 'order': rollback_config.get('order'), + + } + + @staticmethod + def get_logging_from_ansible_params(params): + logging_config = params['logging'] or {} + driver = get_value( + 'driver', + logging_config, + default=params['log_driver'] + ) + options = get_value( + 'options', + logging_config, + default=params['log_driver_options'] + ) + return { + 'log_driver': driver, + 'log_driver_options': options, + } + + @staticmethod + def get_limits_from_ansible_params(params): + limits = params['limits'] or {} + cpus = get_value( + 'cpus', + limits, + default=params['limit_cpu'] + ) + memory = get_value( + 'memory', + limits, + default=params['limit_memory'] + ) + if memory is not None: + try: + memory = human_to_bytes(memory) + except ValueError as exc: + raise Exception('Failed to convert limit_memory to bytes: %s' % exc) + return { + 'limit_cpu': cpus, + 'limit_memory': memory, + } + + @staticmethod + def get_reservations_from_ansible_params(params): + reservations = params['reservations'] or {} + cpus = get_value( + 'cpus', + reservations, + default=params['reserve_cpu'] + ) + memory = get_value( + 'memory', + reservations, + default=params['reserve_memory'] + ) + + if memory is not None: + try: + memory = human_to_bytes(memory) + except ValueError as exc: + raise Exception('Failed to convert reserve_memory to bytes: %s' % exc) + return { + 'reserve_cpu': cpus, + 'reserve_memory': memory, + } + + @staticmethod + def get_placement_from_ansible_params(params): + placement = params['placement'] or {} + constraints = get_value( + 'constraints', + placement, + default=params['constraints'] + ) + + preferences = placement.get('preferences') + return { + 'constraints': constraints, + 'placement_preferences': preferences, + } + + @classmethod + def from_ansible_params( + cls, + ap, + old_service, + image_digest, + secret_ids, + config_ids, + network_ids, + docker_api_version, + docker_py_version, + ): + s = DockerService(docker_api_version, docker_py_version) + s.image = image_digest + s.args = ap['args'] + s.endpoint_mode = ap['endpoint_mode'] + s.dns = ap['dns'] + s.dns_search = ap['dns_search'] + s.dns_options = ap['dns_options'] + s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck']) + s.hostname = ap['hostname'] + s.hosts = ap['hosts'] + s.tty = ap['tty'] + s.labels = ap['labels'] + s.container_labels = ap['container_labels'] + s.mode = ap['mode'] + s.stop_signal = ap['stop_signal'] + s.user = ap['user'] + s.working_dir = ap['working_dir'] + s.read_only = ap['read_only'] + + s.networks = get_docker_networks(ap['networks'], network_ids) + + s.command = ap['command'] + if isinstance(s.command, string_types): + s.command = shlex.split(s.command) + elif isinstance(s.command, list): + invalid_items = [ + (index, item) + for index, item in enumerate(s.command) + if not isinstance(item, string_types) + ] + if invalid_items: + errors = ', '.join( + [ + '%s (%s) at index %s' % (item, type(item), index) + for index, item in invalid_items + ] + ) + raise Exception( + 'All items in a command list need to be strings. ' + 'Check quoting. Invalid items: %s.' + % errors + ) + s.command = ap['command'] + elif s.command is not None: + raise ValueError( + 'Invalid type for command %s (%s). ' + 'Only string or list allowed. Check quoting.' + % (s.command, type(s.command)) + ) + + s.env = get_docker_environment(ap['env'], ap['env_files']) + s.rollback_config = cls.get_rollback_config_from_ansible_params(ap) + + update_config = cls.get_update_config_from_ansible_params(ap) + for key, value in update_config.items(): + setattr(s, key, value) + + restart_config = cls.get_restart_config_from_ansible_params(ap) + for key, value in restart_config.items(): + setattr(s, key, value) + + logging_config = cls.get_logging_from_ansible_params(ap) + for key, value in logging_config.items(): + setattr(s, key, value) + + limits = cls.get_limits_from_ansible_params(ap) + for key, value in limits.items(): + setattr(s, key, value) + + reservations = cls.get_reservations_from_ansible_params(ap) + for key, value in reservations.items(): + setattr(s, key, value) + + placement = cls.get_placement_from_ansible_params(ap) + for key, value in placement.items(): + setattr(s, key, value) + + if ap['stop_grace_period'] is not None: + s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period']) + + if ap['force_update']: + s.force_update = int(str(time.time()).replace('.', '')) + + if ap['groups'] is not None: + # In case integers are passed as groups, we need to convert them to + # strings as docker internally treats them as strings. + s.groups = [str(g) for g in ap['groups']] + + if ap['replicas'] == -1: + if old_service: + s.replicas = old_service.replicas + else: + s.replicas = 1 + else: + s.replicas = ap['replicas'] + + if ap['publish'] is not None: + s.publish = [] + for param_p in ap['publish']: + service_p = {} + service_p['protocol'] = param_p['protocol'] + service_p['mode'] = param_p['mode'] + service_p['published_port'] = param_p['published_port'] + service_p['target_port'] = param_p['target_port'] + s.publish.append(service_p) + + if ap['mounts'] is not None: + s.mounts = [] + for param_m in ap['mounts']: + service_m = {} + service_m['readonly'] = param_m['readonly'] + service_m['type'] = param_m['type'] + if param_m['source'] is None and param_m['type'] != 'tmpfs': + raise ValueError('Source must be specified for mounts which are not of type tmpfs') + service_m['source'] = param_m['source'] or '' + service_m['target'] = param_m['target'] + service_m['labels'] = param_m['labels'] + service_m['no_copy'] = param_m['no_copy'] + service_m['propagation'] = param_m['propagation'] + service_m['driver_config'] = param_m['driver_config'] + service_m['tmpfs_mode'] = param_m['tmpfs_mode'] + tmpfs_size = param_m['tmpfs_size'] + if tmpfs_size is not None: + try: + tmpfs_size = human_to_bytes(tmpfs_size) + except ValueError as exc: + raise ValueError( + 'Failed to convert tmpfs_size to bytes: %s' % exc + ) + + service_m['tmpfs_size'] = tmpfs_size + s.mounts.append(service_m) + + if ap['configs'] is not None: + s.configs = [] + for param_m in ap['configs']: + service_c = {} + config_name = param_m['config_name'] + service_c['config_id'] = param_m['config_id'] or config_ids[config_name] + service_c['config_name'] = config_name + service_c['filename'] = param_m['filename'] or config_name + service_c['uid'] = param_m['uid'] + service_c['gid'] = param_m['gid'] + service_c['mode'] = param_m['mode'] + s.configs.append(service_c) + + if ap['secrets'] is not None: + s.secrets = [] + for param_m in ap['secrets']: + service_s = {} + secret_name = param_m['secret_name'] + service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name] + service_s['secret_name'] = secret_name + service_s['filename'] = param_m['filename'] or secret_name + service_s['uid'] = param_m['uid'] + service_s['gid'] = param_m['gid'] + service_s['mode'] = param_m['mode'] + s.secrets.append(service_s) + + return s + + def compare(self, os): + differences = DifferenceTracker() + needs_rebuild = False + force_update = False + if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode: + differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode) + if has_list_changed(self.env, os.env): + differences.add('env', parameter=self.env, active=os.env) + if self.log_driver is not None and self.log_driver != os.log_driver: + differences.add('log_driver', parameter=self.log_driver, active=os.log_driver) + if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}): + differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options) + if self.mode != os.mode: + needs_rebuild = True + differences.add('mode', parameter=self.mode, active=os.mode) + if has_list_changed(self.mounts, os.mounts, sort_key='target'): + differences.add('mounts', parameter=self.mounts, active=os.mounts) + if has_list_changed(self.configs, os.configs, sort_key='config_name'): + differences.add('configs', parameter=self.configs, active=os.configs) + if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'): + differences.add('secrets', parameter=self.secrets, active=os.secrets) + if have_networks_changed(self.networks, os.networks): + differences.add('networks', parameter=self.networks, active=os.networks) + needs_rebuild = not self.can_update_networks + if self.replicas != os.replicas: + differences.add('replicas', parameter=self.replicas, active=os.replicas) + if has_list_changed(self.command, os.command, sort_lists=False): + differences.add('command', parameter=self.command, active=os.command) + if has_list_changed(self.args, os.args, sort_lists=False): + differences.add('args', parameter=self.args, active=os.args) + if has_list_changed(self.constraints, os.constraints): + differences.add('constraints', parameter=self.constraints, active=os.constraints) + if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False): + differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences) + if has_list_changed(self.groups, os.groups): + differences.add('groups', parameter=self.groups, active=os.groups) + if self.labels is not None and self.labels != (os.labels or {}): + differences.add('labels', parameter=self.labels, active=os.labels) + if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu: + differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu) + if self.limit_memory is not None and self.limit_memory != os.limit_memory: + differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory) + if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu: + differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu) + if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory: + differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory) + if self.container_labels is not None and self.container_labels != (os.container_labels or {}): + differences.add('container_labels', parameter=self.container_labels, active=os.container_labels) + if self.stop_signal is not None and self.stop_signal != os.stop_signal: + differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal) + if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period: + differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period) + if self.has_publish_changed(os.publish): + differences.add('publish', parameter=self.publish, active=os.publish) + if self.read_only is not None and self.read_only != os.read_only: + differences.add('read_only', parameter=self.read_only, active=os.read_only) + if self.restart_policy is not None and self.restart_policy != os.restart_policy: + differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy) + if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts: + differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts) + if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay: + differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay) + if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window: + differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window) + if has_dict_changed(self.rollback_config, os.rollback_config): + differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config) + if self.update_delay is not None and self.update_delay != os.update_delay: + differences.add('update_delay', parameter=self.update_delay, active=os.update_delay) + if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism: + differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism) + if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action: + differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action) + if self.update_monitor is not None and self.update_monitor != os.update_monitor: + differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor) + if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio: + differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio) + if self.update_order is not None and self.update_order != os.update_order: + differences.add('update_order', parameter=self.update_order, active=os.update_order) + has_image_changed, change = self.has_image_changed(os.image) + if has_image_changed: + differences.add('image', parameter=self.image, active=change) + if self.user and self.user != os.user: + differences.add('user', parameter=self.user, active=os.user) + if has_list_changed(self.dns, os.dns, sort_lists=False): + differences.add('dns', parameter=self.dns, active=os.dns) + if has_list_changed(self.dns_search, os.dns_search, sort_lists=False): + differences.add('dns_search', parameter=self.dns_search, active=os.dns_search) + if has_list_changed(self.dns_options, os.dns_options): + differences.add('dns_options', parameter=self.dns_options, active=os.dns_options) + if self.has_healthcheck_changed(os): + differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck) + if self.hostname is not None and self.hostname != os.hostname: + differences.add('hostname', parameter=self.hostname, active=os.hostname) + if self.hosts is not None and self.hosts != (os.hosts or {}): + differences.add('hosts', parameter=self.hosts, active=os.hosts) + if self.tty is not None and self.tty != os.tty: + differences.add('tty', parameter=self.tty, active=os.tty) + if self.working_dir is not None and self.working_dir != os.working_dir: + differences.add('working_dir', parameter=self.working_dir, active=os.working_dir) + if self.force_update: + force_update = True + return not differences.empty or force_update, differences, needs_rebuild, force_update + + def has_healthcheck_changed(self, old_publish): + if self.healthcheck_disabled is False and self.healthcheck is None: + return False + if self.healthcheck_disabled: + if old_publish.healthcheck is None: + return False + if old_publish.healthcheck.get('test') == ['NONE']: + return False + return self.healthcheck != old_publish.healthcheck + + def has_publish_changed(self, old_publish): + if self.publish is None: + return False + old_publish = old_publish or [] + if len(self.publish) != len(old_publish): + return True + publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol') + publish = sorted(self.publish, key=publish_sorter) + old_publish = sorted(old_publish, key=publish_sorter) + for publish_item, old_publish_item in zip(publish, old_publish): + ignored_keys = set() + if not publish_item.get('mode'): + ignored_keys.add('mode') + # Create copies of publish_item dicts where keys specified in ignored_keys are left out + filtered_old_publish_item = dict( + (k, v) for k, v in old_publish_item.items() if k not in ignored_keys + ) + filtered_publish_item = dict( + (k, v) for k, v in publish_item.items() if k not in ignored_keys + ) + if filtered_publish_item != filtered_old_publish_item: + return True + return False + + def has_image_changed(self, old_image): + if '@' not in self.image: + old_image = old_image.split('@')[0] + return self.image != old_image, old_image + + def build_container_spec(self): + mounts = None + if self.mounts is not None: + mounts = [] + for mount_config in self.mounts: + mount_options = { + 'target': 'target', + 'source': 'source', + 'type': 'type', + 'readonly': 'read_only', + 'propagation': 'propagation', + 'labels': 'labels', + 'no_copy': 'no_copy', + 'driver_config': 'driver_config', + 'tmpfs_size': 'tmpfs_size', + 'tmpfs_mode': 'tmpfs_mode' + } + mount_args = {} + for option, mount_arg in mount_options.items(): + value = mount_config.get(option) + if value is not None: + mount_args[mount_arg] = value + + mounts.append(types.Mount(**mount_args)) + + configs = None + if self.configs is not None: + configs = [] + for config_config in self.configs: + config_args = { + 'config_id': config_config['config_id'], + 'config_name': config_config['config_name'] + } + filename = config_config.get('filename') + if filename: + config_args['filename'] = filename + uid = config_config.get('uid') + if uid: + config_args['uid'] = uid + gid = config_config.get('gid') + if gid: + config_args['gid'] = gid + mode = config_config.get('mode') + if mode: + config_args['mode'] = mode + + configs.append(types.ConfigReference(**config_args)) + + secrets = None + if self.secrets is not None: + secrets = [] + for secret_config in self.secrets: + secret_args = { + 'secret_id': secret_config['secret_id'], + 'secret_name': secret_config['secret_name'] + } + filename = secret_config.get('filename') + if filename: + secret_args['filename'] = filename + uid = secret_config.get('uid') + if uid: + secret_args['uid'] = uid + gid = secret_config.get('gid') + if gid: + secret_args['gid'] = gid + mode = secret_config.get('mode') + if mode: + secret_args['mode'] = mode + + secrets.append(types.SecretReference(**secret_args)) + + dns_config_args = {} + if self.dns is not None: + dns_config_args['nameservers'] = self.dns + if self.dns_search is not None: + dns_config_args['search'] = self.dns_search + if self.dns_options is not None: + dns_config_args['options'] = self.dns_options + dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None + + container_spec_args = {} + if self.command is not None: + container_spec_args['command'] = self.command + if self.args is not None: + container_spec_args['args'] = self.args + if self.env is not None: + container_spec_args['env'] = self.env + if self.user is not None: + container_spec_args['user'] = self.user + if self.container_labels is not None: + container_spec_args['labels'] = self.container_labels + if self.healthcheck is not None: + container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck) + elif self.healthcheck_disabled: + container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE']) + if self.hostname is not None: + container_spec_args['hostname'] = self.hostname + if self.hosts is not None: + container_spec_args['hosts'] = self.hosts + if self.read_only is not None: + container_spec_args['read_only'] = self.read_only + if self.stop_grace_period is not None: + container_spec_args['stop_grace_period'] = self.stop_grace_period + if self.stop_signal is not None: + container_spec_args['stop_signal'] = self.stop_signal + if self.tty is not None: + container_spec_args['tty'] = self.tty + if self.groups is not None: + container_spec_args['groups'] = self.groups + if self.working_dir is not None: + container_spec_args['workdir'] = self.working_dir + if secrets is not None: + container_spec_args['secrets'] = secrets + if mounts is not None: + container_spec_args['mounts'] = mounts + if dns_config is not None: + container_spec_args['dns_config'] = dns_config + if configs is not None: + container_spec_args['configs'] = configs + + return types.ContainerSpec(self.image, **container_spec_args) + + def build_placement(self): + placement_args = {} + if self.constraints is not None: + placement_args['constraints'] = self.constraints + if self.placement_preferences is not None: + placement_args['preferences'] = [ + {key.title(): {'SpreadDescriptor': value}} + for preference in self.placement_preferences + for key, value in preference.items() + ] + return types.Placement(**placement_args) if placement_args else None + + def build_update_config(self): + update_config_args = {} + if self.update_parallelism is not None: + update_config_args['parallelism'] = self.update_parallelism + if self.update_delay is not None: + update_config_args['delay'] = self.update_delay + if self.update_failure_action is not None: + update_config_args['failure_action'] = self.update_failure_action + if self.update_monitor is not None: + update_config_args['monitor'] = self.update_monitor + if self.update_max_failure_ratio is not None: + update_config_args['max_failure_ratio'] = self.update_max_failure_ratio + if self.update_order is not None: + update_config_args['order'] = self.update_order + return types.UpdateConfig(**update_config_args) if update_config_args else None + + def build_log_driver(self): + log_driver_args = {} + if self.log_driver is not None: + log_driver_args['name'] = self.log_driver + if self.log_driver_options is not None: + log_driver_args['options'] = self.log_driver_options + return types.DriverConfig(**log_driver_args) if log_driver_args else None + + def build_restart_policy(self): + restart_policy_args = {} + if self.restart_policy is not None: + restart_policy_args['condition'] = self.restart_policy + if self.restart_policy_delay is not None: + restart_policy_args['delay'] = self.restart_policy_delay + if self.restart_policy_attempts is not None: + restart_policy_args['max_attempts'] = self.restart_policy_attempts + if self.restart_policy_window is not None: + restart_policy_args['window'] = self.restart_policy_window + return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None + + def build_rollback_config(self): + if self.rollback_config is None: + return None + rollback_config_options = [ + 'parallelism', + 'delay', + 'failure_action', + 'monitor', + 'max_failure_ratio', + 'order', + ] + rollback_config_args = {} + for option in rollback_config_options: + value = self.rollback_config.get(option) + if value is not None: + rollback_config_args[option] = value + return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None + + def build_resources(self): + resources_args = {} + if self.limit_cpu is not None: + resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0) + if self.limit_memory is not None: + resources_args['mem_limit'] = self.limit_memory + if self.reserve_cpu is not None: + resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0) + if self.reserve_memory is not None: + resources_args['mem_reservation'] = self.reserve_memory + return types.Resources(**resources_args) if resources_args else None + + def build_task_template(self, container_spec, placement=None): + log_driver = self.build_log_driver() + restart_policy = self.build_restart_policy() + resources = self.build_resources() + + task_template_args = {} + if placement is not None: + task_template_args['placement'] = placement + if log_driver is not None: + task_template_args['log_driver'] = log_driver + if restart_policy is not None: + task_template_args['restart_policy'] = restart_policy + if resources is not None: + task_template_args['resources'] = resources + if self.force_update: + task_template_args['force_update'] = self.force_update + if self.can_use_task_template_networks: + networks = self.build_networks() + if networks: + task_template_args['networks'] = networks + return types.TaskTemplate(container_spec=container_spec, **task_template_args) + + def build_service_mode(self): + if self.mode == 'global': + self.replicas = None + return types.ServiceMode(self.mode, replicas=self.replicas) + + def build_networks(self): + networks = None + if self.networks is not None: + networks = [] + for network in self.networks: + docker_network = {'Target': network['id']} + if 'aliases' in network: + docker_network['Aliases'] = network['aliases'] + if 'options' in network: + docker_network['DriverOpts'] = network['options'] + networks.append(docker_network) + return networks + + def build_endpoint_spec(self): + endpoint_spec_args = {} + if self.publish is not None: + ports = [] + for port in self.publish: + port_spec = { + 'Protocol': port['protocol'], + 'PublishedPort': port['published_port'], + 'TargetPort': port['target_port'] + } + if port.get('mode'): + port_spec['PublishMode'] = port['mode'] + ports.append(port_spec) + endpoint_spec_args['ports'] = ports + if self.endpoint_mode is not None: + endpoint_spec_args['mode'] = self.endpoint_mode + return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None + + def build_docker_service(self): + container_spec = self.build_container_spec() + placement = self.build_placement() + task_template = self.build_task_template(container_spec, placement) + + update_config = self.build_update_config() + rollback_config = self.build_rollback_config() + service_mode = self.build_service_mode() + endpoint_spec = self.build_endpoint_spec() + + service = {'task_template': task_template, 'mode': service_mode} + if update_config: + service['update_config'] = update_config + if rollback_config: + service['rollback_config'] = rollback_config + if endpoint_spec: + service['endpoint_spec'] = endpoint_spec + if self.labels: + service['labels'] = self.labels + if not self.can_use_task_template_networks: + networks = self.build_networks() + if networks: + service['networks'] = networks + return service + + +class DockerServiceManager(object): + + def __init__(self, client): + self.client = client + self.retries = 2 + self.diff_tracker = None + + def get_service(self, name): + try: + raw_data = self.client.inspect_service(name) + except NotFound: + return None + ds = DockerService(self.client.docker_api_version, self.client.docker_py_version) + + task_template_data = raw_data['Spec']['TaskTemplate'] + ds.image = task_template_data['ContainerSpec']['Image'] + ds.user = task_template_data['ContainerSpec'].get('User') + ds.env = task_template_data['ContainerSpec'].get('Env') + ds.command = task_template_data['ContainerSpec'].get('Command') + ds.args = task_template_data['ContainerSpec'].get('Args') + ds.groups = task_template_data['ContainerSpec'].get('Groups') + ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod') + ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal') + ds.working_dir = task_template_data['ContainerSpec'].get('Dir') + ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly') + + healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck') + if healthcheck_data: + options = { + 'Test': 'test', + 'Interval': 'interval', + 'Timeout': 'timeout', + 'StartPeriod': 'start_period', + 'Retries': 'retries' + } + healthcheck = dict( + (options[key], value) for key, value in healthcheck_data.items() + if value is not None and key in options + ) + ds.healthcheck = healthcheck + + update_config_data = raw_data['Spec'].get('UpdateConfig') + if update_config_data: + ds.update_delay = update_config_data.get('Delay') + ds.update_parallelism = update_config_data.get('Parallelism') + ds.update_failure_action = update_config_data.get('FailureAction') + ds.update_monitor = update_config_data.get('Monitor') + ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio') + ds.update_order = update_config_data.get('Order') + + rollback_config_data = raw_data['Spec'].get('RollbackConfig') + if rollback_config_data: + ds.rollback_config = { + 'parallelism': rollback_config_data.get('Parallelism'), + 'delay': rollback_config_data.get('Delay'), + 'failure_action': rollback_config_data.get('FailureAction'), + 'monitor': rollback_config_data.get('Monitor'), + 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'), + 'order': rollback_config_data.get('Order'), + } + + dns_config = task_template_data['ContainerSpec'].get('DNSConfig') + if dns_config: + ds.dns = dns_config.get('Nameservers') + ds.dns_search = dns_config.get('Search') + ds.dns_options = dns_config.get('Options') + + ds.hostname = task_template_data['ContainerSpec'].get('Hostname') + + hosts = task_template_data['ContainerSpec'].get('Hosts') + if hosts: + hosts = [ + list(reversed(host.split(":", 1))) + if ":" in host + else host.split(" ", 1) + for host in hosts + ] + ds.hosts = dict((hostname, ip) for ip, hostname in hosts) + ds.tty = task_template_data['ContainerSpec'].get('TTY') + + placement = task_template_data.get('Placement') + if placement: + ds.constraints = placement.get('Constraints') + placement_preferences = [] + for preference in placement.get('Preferences', []): + placement_preferences.append( + dict( + (key.lower(), value['SpreadDescriptor']) + for key, value in preference.items() + ) + ) + ds.placement_preferences = placement_preferences or None + + restart_policy_data = task_template_data.get('RestartPolicy') + if restart_policy_data: + ds.restart_policy = restart_policy_data.get('Condition') + ds.restart_policy_delay = restart_policy_data.get('Delay') + ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts') + ds.restart_policy_window = restart_policy_data.get('Window') + + raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec') + if raw_data_endpoint_spec: + ds.endpoint_mode = raw_data_endpoint_spec.get('Mode') + raw_data_ports = raw_data_endpoint_spec.get('Ports') + if raw_data_ports: + ds.publish = [] + for port in raw_data_ports: + ds.publish.append({ + 'protocol': port['Protocol'], + 'mode': port.get('PublishMode', None), + 'published_port': int(port['PublishedPort']), + 'target_port': int(port['TargetPort']) + }) + + raw_data_limits = task_template_data.get('Resources', {}).get('Limits') + if raw_data_limits: + raw_cpu_limits = raw_data_limits.get('NanoCPUs') + if raw_cpu_limits: + ds.limit_cpu = float(raw_cpu_limits) / 1000000000 + + raw_memory_limits = raw_data_limits.get('MemoryBytes') + if raw_memory_limits: + ds.limit_memory = int(raw_memory_limits) + + raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations') + if raw_data_reservations: + raw_cpu_reservations = raw_data_reservations.get('NanoCPUs') + if raw_cpu_reservations: + ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000 + + raw_memory_reservations = raw_data_reservations.get('MemoryBytes') + if raw_memory_reservations: + ds.reserve_memory = int(raw_memory_reservations) + + ds.labels = raw_data['Spec'].get('Labels') + ds.log_driver = task_template_data.get('LogDriver', {}).get('Name') + ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options') + ds.container_labels = task_template_data['ContainerSpec'].get('Labels') + + mode = raw_data['Spec']['Mode'] + if 'Replicated' in mode.keys(): + ds.mode = to_text('replicated', encoding='utf-8') + ds.replicas = mode['Replicated']['Replicas'] + elif 'Global' in mode.keys(): + ds.mode = 'global' + else: + raise Exception('Unknown service mode: %s' % mode) + + raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts') + if raw_data_mounts: + ds.mounts = [] + for mount_data in raw_data_mounts: + bind_options = mount_data.get('BindOptions', {}) + volume_options = mount_data.get('VolumeOptions', {}) + tmpfs_options = mount_data.get('TmpfsOptions', {}) + driver_config = volume_options.get('DriverConfig', {}) + driver_config = dict( + (key.lower(), value) for key, value in driver_config.items() + ) or None + ds.mounts.append({ + 'source': mount_data.get('Source', ''), + 'type': mount_data['Type'], + 'target': mount_data['Target'], + 'readonly': mount_data.get('ReadOnly'), + 'propagation': bind_options.get('Propagation'), + 'no_copy': volume_options.get('NoCopy'), + 'labels': volume_options.get('Labels'), + 'driver_config': driver_config, + 'tmpfs_mode': tmpfs_options.get('Mode'), + 'tmpfs_size': tmpfs_options.get('SizeBytes'), + }) + + raw_data_configs = task_template_data['ContainerSpec'].get('Configs') + if raw_data_configs: + ds.configs = [] + for config_data in raw_data_configs: + ds.configs.append({ + 'config_id': config_data['ConfigID'], + 'config_name': config_data['ConfigName'], + 'filename': config_data['File'].get('Name'), + 'uid': config_data['File'].get('UID'), + 'gid': config_data['File'].get('GID'), + 'mode': config_data['File'].get('Mode') + }) + + raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets') + if raw_data_secrets: + ds.secrets = [] + for secret_data in raw_data_secrets: + ds.secrets.append({ + 'secret_id': secret_data['SecretID'], + 'secret_name': secret_data['SecretName'], + 'filename': secret_data['File'].get('Name'), + 'uid': secret_data['File'].get('UID'), + 'gid': secret_data['File'].get('GID'), + 'mode': secret_data['File'].get('Mode') + }) + + raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks')) + if raw_networks_data: + ds.networks = [] + for network_data in raw_networks_data: + network = {'id': network_data['Target']} + if 'Aliases' in network_data: + network['aliases'] = network_data['Aliases'] + if 'DriverOpts' in network_data: + network['options'] = network_data['DriverOpts'] + ds.networks.append(network) + ds.service_version = raw_data['Version']['Index'] + ds.service_id = raw_data['ID'] + return ds + + def update_service(self, name, old_service, new_service): + service_data = new_service.build_docker_service() + result = self.client.update_service( + old_service.service_id, + old_service.service_version, + name=name, + **service_data + ) + # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored. + # (see https://github.com/docker/docker-py/pull/2272) + self.client.report_warnings(result, ['Warning']) + + def create_service(self, name, service): + service_data = service.build_docker_service() + result = self.client.create_service(name=name, **service_data) + self.client.report_warnings(result, ['Warning']) + + def remove_service(self, name): + self.client.remove_service(name) + + def get_image_digest(self, name, resolve=False): + if ( + not name + or not resolve + ): + return name + repo, tag = parse_repository_tag(name) + if not tag: + tag = 'latest' + name = repo + ':' + tag + distribution_data = self.client.inspect_distribution(name) + digest = distribution_data['Descriptor']['digest'] + return '%s@%s' % (name, digest) + + def get_networks_names_ids(self): + return dict( + (network['Name'], network['Id']) for network in self.client.networks() + ) + + def get_missing_secret_ids(self): + """ + Resolve missing secret ids by looking them up by name + """ + secret_names = [ + secret['secret_name'] + for secret in self.client.module.params.get('secrets') or [] + if secret['secret_id'] is None + ] + if not secret_names: + return {} + secrets = self.client.secrets(filters={'name': secret_names}) + secrets = dict( + (secret['Spec']['Name'], secret['ID']) + for secret in secrets + if secret['Spec']['Name'] in secret_names + ) + for secret_name in secret_names: + if secret_name not in secrets: + self.client.fail( + 'Could not find a secret named "%s"' % secret_name + ) + return secrets + + def get_missing_config_ids(self): + """ + Resolve missing config ids by looking them up by name + """ + config_names = [ + config['config_name'] + for config in self.client.module.params.get('configs') or [] + if config['config_id'] is None + ] + if not config_names: + return {} + configs = self.client.configs(filters={'name': config_names}) + configs = dict( + (config['Spec']['Name'], config['ID']) + for config in configs + if config['Spec']['Name'] in config_names + ) + for config_name in config_names: + if config_name not in configs: + self.client.fail( + 'Could not find a config named "%s"' % config_name + ) + return configs + + def run(self): + self.diff_tracker = DifferenceTracker() + module = self.client.module + + image = module.params['image'] + try: + image_digest = self.get_image_digest( + name=image, + resolve=module.params['resolve_image'] + ) + except DockerException as e: + self.client.fail( + 'Error looking for an image named %s: %s' + % (image, e) + ) + + try: + current_service = self.get_service(module.params['name']) + except Exception as e: + self.client.fail( + 'Error looking for service named %s: %s' + % (module.params['name'], e) + ) + try: + secret_ids = self.get_missing_secret_ids() + config_ids = self.get_missing_config_ids() + network_ids = self.get_networks_names_ids() + new_service = DockerService.from_ansible_params( + module.params, + current_service, + image_digest, + secret_ids, + config_ids, + network_ids, + self.client.docker_api_version, + self.client.docker_py_version + ) + except Exception as e: + return self.client.fail( + 'Error parsing module parameters: %s' % e + ) + + changed = False + msg = 'noop' + rebuilt = False + differences = DifferenceTracker() + facts = {} + + if current_service: + if module.params['state'] == 'absent': + if not module.check_mode: + self.remove_service(module.params['name']) + msg = 'Service removed' + changed = True + else: + changed, differences, need_rebuild, force_update = new_service.compare( + current_service + ) + if changed: + self.diff_tracker.merge(differences) + if need_rebuild: + if not module.check_mode: + self.remove_service(module.params['name']) + self.create_service( + module.params['name'], + new_service + ) + msg = 'Service rebuilt' + rebuilt = True + else: + if not module.check_mode: + self.update_service( + module.params['name'], + current_service, + new_service + ) + msg = 'Service updated' + rebuilt = False + else: + if force_update: + if not module.check_mode: + self.update_service( + module.params['name'], + current_service, + new_service + ) + msg = 'Service forcefully updated' + rebuilt = False + changed = True + else: + msg = 'Service unchanged' + facts = new_service.get_facts() + else: + if module.params['state'] == 'absent': + msg = 'Service absent' + else: + if not module.check_mode: + self.create_service(module.params['name'], new_service) + msg = 'Service created' + changed = True + facts = new_service.get_facts() + + return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts + + def run_safe(self): + while True: + try: + return self.run() + except APIError as e: + # Sometimes Version.Index will have changed between an inspect and + # update. If this is encountered we'll retry the update. + if self.retries > 0 and 'update out of sequence' in str(e.explanation): + self.retries -= 1 + time.sleep(1) + else: + raise + + +def _detect_publish_mode_usage(client): + for publish_def in client.module.params['publish'] or []: + if publish_def.get('mode'): + return True + return False + + +def _detect_healthcheck_start_period(client): + if client.module.params['healthcheck']: + return client.module.params['healthcheck']['start_period'] is not None + return False + + +def _detect_mount_tmpfs_usage(client): + for mount in client.module.params['mounts'] or []: + if mount.get('type') == 'tmpfs': + return True + if mount.get('tmpfs_size') is not None: + return True + if mount.get('tmpfs_mode') is not None: + return True + return False + + +def _detect_update_config_failure_action_rollback(client): + rollback_config_failure_action = ( + (client.module.params['update_config'] or {}).get('failure_action') + ) + update_failure_action = client.module.params['update_failure_action'] + failure_action = rollback_config_failure_action or update_failure_action + return failure_action == 'rollback' + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + image=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + mounts=dict(type='list', elements='dict', options=dict( + source=dict(type='str'), + target=dict(type='str', required=True), + type=dict( + type='str', + default='bind', + choices=['bind', 'volume', 'tmpfs', 'npipe'], + ), + readonly=dict(type='bool'), + labels=dict(type='dict'), + propagation=dict( + type='str', + choices=[ + 'shared', + 'slave', + 'private', + 'rshared', + 'rslave', + 'rprivate' + ] + ), + no_copy=dict(type='bool'), + driver_config=dict(type='dict', options=dict( + name=dict(type='str'), + options=dict(type='dict') + )), + tmpfs_size=dict(type='str'), + tmpfs_mode=dict(type='int') + )), + configs=dict(type='list', elements='dict', options=dict( + config_id=dict(type='str'), + config_name=dict(type='str', required=True), + filename=dict(type='str'), + uid=dict(type='str'), + gid=dict(type='str'), + mode=dict(type='int'), + )), + secrets=dict(type='list', elements='dict', options=dict( + secret_id=dict(type='str'), + secret_name=dict(type='str', required=True), + filename=dict(type='str'), + uid=dict(type='str'), + gid=dict(type='str'), + mode=dict(type='int'), + )), + networks=dict(type='list', elements='raw'), + command=dict(type='raw'), + args=dict(type='list', elements='str'), + env=dict(type='raw'), + env_files=dict(type='list', elements='path'), + force_update=dict(type='bool', default=False), + groups=dict(type='list', elements='str'), + logging=dict(type='dict', options=dict( + driver=dict(type='str'), + options=dict(type='dict'), + )), + log_driver=dict(type='str', removed_in_version='2.12'), + log_driver_options=dict(type='dict', removed_in_version='2.12'), + publish=dict(type='list', elements='dict', options=dict( + published_port=dict(type='int', required=True), + target_port=dict(type='int', required=True), + protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']), + mode=dict(type='str', choices=['ingress', 'host']), + )), + placement=dict(type='dict', options=dict( + constraints=dict(type='list', elements='str'), + preferences=dict(type='list', elements='dict'), + )), + constraints=dict(type='list', elements='str', removed_in_version='2.12'), + tty=dict(type='bool'), + dns=dict(type='list', elements='str'), + dns_search=dict(type='list', elements='str'), + dns_options=dict(type='list', elements='str'), + healthcheck=dict(type='dict', options=dict( + test=dict(type='raw'), + interval=dict(type='str'), + timeout=dict(type='str'), + start_period=dict(type='str'), + retries=dict(type='int'), + )), + hostname=dict(type='str'), + hosts=dict(type='dict'), + labels=dict(type='dict'), + container_labels=dict(type='dict'), + mode=dict( + type='str', + default='replicated', + choices=['replicated', 'global'] + ), + replicas=dict(type='int', default=-1), + endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']), + stop_grace_period=dict(type='str'), + stop_signal=dict(type='str'), + limits=dict(type='dict', options=dict( + cpus=dict(type='float'), + memory=dict(type='str'), + )), + limit_cpu=dict(type='float', removed_in_version='2.12'), + limit_memory=dict(type='str', removed_in_version='2.12'), + read_only=dict(type='bool'), + reservations=dict(type='dict', options=dict( + cpus=dict(type='float'), + memory=dict(type='str'), + )), + reserve_cpu=dict(type='float', removed_in_version='2.12'), + reserve_memory=dict(type='str', removed_in_version='2.12'), + resolve_image=dict(type='bool', default=False), + restart_config=dict(type='dict', options=dict( + condition=dict(type='str', choices=['none', 'on-failure', 'any']), + delay=dict(type='str'), + max_attempts=dict(type='int'), + window=dict(type='str'), + )), + restart_policy=dict( + type='str', + choices=['none', 'on-failure', 'any'], + removed_in_version='2.12' + ), + restart_policy_delay=dict(type='raw', removed_in_version='2.12'), + restart_policy_attempts=dict(type='int', removed_in_version='2.12'), + restart_policy_window=dict(type='raw', removed_in_version='2.12'), + rollback_config=dict(type='dict', options=dict( + parallelism=dict(type='int'), + delay=dict(type='str'), + failure_action=dict( + type='str', + choices=['continue', 'pause'] + ), + monitor=dict(type='str'), + max_failure_ratio=dict(type='float'), + order=dict(type='str'), + )), + update_config=dict(type='dict', options=dict( + parallelism=dict(type='int'), + delay=dict(type='str'), + failure_action=dict( + type='str', + choices=['continue', 'pause', 'rollback'] + ), + monitor=dict(type='str'), + max_failure_ratio=dict(type='float'), + order=dict(type='str'), + )), + update_delay=dict(type='raw', removed_in_version='2.12'), + update_parallelism=dict(type='int', removed_in_version='2.12'), + update_failure_action=dict( + type='str', + choices=['continue', 'pause', 'rollback'], + removed_in_version='2.12' + ), + update_monitor=dict(type='raw', removed_in_version='2.12'), + update_max_failure_ratio=dict(type='float', removed_in_version='2.12'), + update_order=dict( + type='str', + choices=['stop-first', 'start-first'], + removed_in_version='2.12' + ), + user=dict(type='str'), + working_dir=dict(type='str'), + ) + + option_minimal_versions = dict( + constraints=dict(docker_py_version='2.4.0'), + dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'), + force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'), + healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'), + hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'), + tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'), + secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'), + configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + update_max_failure_ratio=dict(docker_py_version='2.1.0', docker_api_version='1.25'), + update_monitor=dict(docker_py_version='2.1.0', docker_api_version='1.25'), + update_order=dict(docker_py_version='2.7.0', docker_api_version='1.29'), + stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'), + publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'), + read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'), + resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'), + rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'), + # specials + publish_mode=dict( + docker_py_version='3.0.0', + docker_api_version='1.25', + detect_usage=_detect_publish_mode_usage, + usage_msg='set publish.mode' + ), + healthcheck_start_period=dict( + docker_py_version='2.6.0', + docker_api_version='1.29', + detect_usage=_detect_healthcheck_start_period, + usage_msg='set healthcheck.start_period' + ), + update_config_max_failure_ratio=dict( + docker_py_version='2.1.0', + docker_api_version='1.25', + detect_usage=lambda c: (c.module.params['update_config'] or {}).get( + 'max_failure_ratio' + ) is not None, + usage_msg='set update_config.max_failure_ratio' + ), + update_config_failure_action=dict( + docker_py_version='3.5.0', + docker_api_version='1.28', + detect_usage=_detect_update_config_failure_action_rollback, + usage_msg='set update_config.failure_action.rollback' + ), + update_config_monitor=dict( + docker_py_version='2.1.0', + docker_api_version='1.25', + detect_usage=lambda c: (c.module.params['update_config'] or {}).get( + 'monitor' + ) is not None, + usage_msg='set update_config.monitor' + ), + update_config_order=dict( + docker_py_version='2.7.0', + docker_api_version='1.29', + detect_usage=lambda c: (c.module.params['update_config'] or {}).get( + 'order' + ) is not None, + usage_msg='set update_config.order' + ), + placement_config_preferences=dict( + docker_py_version='2.4.0', + docker_api_version='1.27', + detect_usage=lambda c: (c.module.params['placement'] or {}).get( + 'preferences' + ) is not None, + usage_msg='set placement.preferences' + ), + placement_config_constraints=dict( + docker_py_version='2.4.0', + detect_usage=lambda c: (c.module.params['placement'] or {}).get( + 'constraints' + ) is not None, + usage_msg='set placement.constraints' + ), + mounts_tmpfs=dict( + docker_py_version='2.6.0', + detect_usage=_detect_mount_tmpfs_usage, + usage_msg='set mounts.tmpfs' + ), + rollback_config_order=dict( + docker_api_version='1.29', + detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get( + 'order' + ) is not None, + usage_msg='set rollback_config.order' + ), + ) + required_if = [ + ('state', 'present', ['image']) + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + min_docker_version='2.0.2', + min_docker_api_version='1.24', + option_minimal_versions=option_minimal_versions, + ) + + try: + dsm = DockerServiceManager(client) + msg, changed, rebuilt, changes, facts = dsm.run_safe() + + results = dict( + msg=msg, + changed=changed, + rebuilt=rebuilt, + changes=changes, + swarm_service=facts, + ) + if client.module._diff: + before, after = dsm.diff_tracker.get_before_after() + results['diff'] = dict(before=before, after=after) + + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_swarm_service_info.py b/plugins/modules/cloud/docker/docker_swarm_service_info.py new file mode 100644 index 0000000000..5f21194388 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_swarm_service_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# +# (c) 2019 Hannes Ljungberg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: docker_swarm_service_info + +short_description: Retrieves information about docker services from a Swarm Manager + +description: + - Retrieves information about a docker service. + - Essentially returns the output of C(docker service inspect ). + - Must be executed on a host running as Swarm Manager, otherwise the module will fail. + + +options: + name: + description: + - The name of the service to inspect. + type: str + required: yes +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - Hannes Ljungberg (@hannseman) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0" + - "Docker API >= 1.24" +''' + +EXAMPLES = ''' +- name: Get info from a service + docker_swarm_service_info: + name: myservice + register: result +''' + +RETURN = ''' +exists: + description: + - Returns whether the service exists. + type: bool + returned: always + sample: true +service: + description: + - A dictionary representing the current state of the service. Matches the C(docker service inspect) output. + - Will be C(none) if service does not exist. + returned: always + type: dict +''' + +import traceback + +try: + from docker.errors import DockerException +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + RequestException, +) + +from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient + + +def get_service_info(client): + service = client.module.params['name'] + return client.get_service_inspect( + service_id=service, + skip_missing=True + ) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='2.0.0', + min_docker_api_version='1.24', + ) + + client.fail_task_if_not_swarm_manager() + + try: + service = get_service_info(client) + + client.module.exit_json( + changed=False, + service=service, + exists=bool(service) + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_volume.py b/plugins/modules/cloud/docker/docker_volume.py new file mode 100644 index 0000000000..cb81467641 --- /dev/null +++ b/plugins/modules/cloud/docker/docker_volume.py @@ -0,0 +1,338 @@ +#!/usr/bin/python +# coding: utf-8 +# +# Copyright 2017 Red Hat | Ansible, Alex Grönholm +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: docker_volume +short_description: Manage Docker volumes +description: + - Create/remove Docker volumes. + - Performs largely the same function as the "docker volume" CLI subcommand. +options: + volume_name: + description: + - Name of the volume to operate on. + type: str + required: yes + aliases: + - name + + driver: + description: + - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used. + type: str + default: local + + driver_options: + description: + - "Dictionary of volume settings. Consult docker docs for valid options and values: + U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)" + type: dict + + labels: + description: + - Dictionary of label key/values to set for the volume + type: dict + + force: + description: + - With state C(present) causes the volume to be deleted and recreated if the volume already + exist and the driver, driver options or labels differ. This will cause any data in the existing + volume to be lost. + - Deprecated. Will be removed in Ansible 2.12. Set I(recreate) to C(options-changed) instead + for the same behavior of setting I(force) to C(yes). + type: bool + default: no + + recreate: + description: + - Controls when a volume will be recreated when I(state) is C(present). Please + note that recreating an existing volume will cause **any data in the existing volume + to be lost!** The volume will be deleted and a new volume with the same name will be + created. + - The value C(always) forces the volume to be always recreated. + - The value C(never) makes sure the volume will not be recreated. + - The value C(options-changed) makes sure the volume will be recreated if the volume + already exist and the driver, driver options or labels differ. + type: str + default: never + choices: + - always + - never + - options-changed + + state: + description: + - C(absent) deletes the volume. + - C(present) creates the volume, if it does not already exist. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - Alex Grönholm (@agronholm) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "The docker server >= 1.9.0" +''' + +EXAMPLES = ''' +- name: Create a volume + docker_volume: + name: volume_one + +- name: Remove a volume + docker_volume: + name: volume_one + state: absent + +- name: Create a volume with options + docker_volume: + name: volume_two + driver_options: + type: btrfs + device: /dev/sda2 +''' + +RETURN = ''' +volume: + description: + - Volume inspection results for the affected volume. + - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts + are also accessible directly as C(docker_volume). Note that the returned fact will be removed in Ansible 2.12. + returned: success + type: dict + sample: {} +''' + +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + DockerBaseClass, + AnsibleDockerClient, + DifferenceTracker, + RequestException, +) +from ansible.module_utils.six import iteritems, text_type + + +class TaskParameters(DockerBaseClass): + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.volume_name = None + self.driver = None + self.driver_options = None + self.labels = None + self.force = None + self.recreate = None + self.debug = None + + for key, value in iteritems(client.module.params): + setattr(self, key, value) + + if self.force is not None: + if self.recreate != 'never': + client.fail('Cannot use the deprecated "force" ' + 'option when "recreate" is set. Please stop ' + 'using the force option.') + client.module.warn('The "force" option of docker_volume has been deprecated ' + 'in Ansible 2.8. Please use the "recreate" ' + 'option, which provides the same functionality as "force".') + self.recreate = 'options-changed' if self.force else 'never' + + +class DockerVolumeManager(object): + + def __init__(self, client): + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = { + u'changed': False, + u'actions': [] + } + self.diff = self.client.module._diff + self.diff_tracker = DifferenceTracker() + self.diff_result = dict() + + self.existing_volume = self.get_existing_volume() + + state = self.parameters.state + if state == 'present': + self.present() + elif state == 'absent': + self.absent() + + if self.diff or self.check_mode or self.parameters.debug: + if self.diff: + self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff_result + + def get_existing_volume(self): + try: + volumes = self.client.volumes() + except APIError as e: + self.client.fail(text_type(e)) + + if volumes[u'Volumes'] is None: + return None + + for volume in volumes[u'Volumes']: + if volume['Name'] == self.parameters.volume_name: + return volume + + return None + + def has_different_config(self): + """ + Return the list of differences between the current parameters and the existing volume. + + :return: list of options that differ + """ + differences = DifferenceTracker() + if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']: + differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver']) + if self.parameters.driver_options: + if not self.existing_volume.get('Options'): + differences.add('driver_options', + parameter=self.parameters.driver_options, + active=self.existing_volume.get('Options')) + else: + for key, value in iteritems(self.parameters.driver_options): + if (not self.existing_volume['Options'].get(key) or + value != self.existing_volume['Options'][key]): + differences.add('driver_options.%s' % key, + parameter=value, + active=self.existing_volume['Options'].get(key)) + if self.parameters.labels: + existing_labels = self.existing_volume.get('Labels', {}) + for label in self.parameters.labels: + if existing_labels.get(label) != self.parameters.labels.get(label): + differences.add('labels.%s' % label, + parameter=self.parameters.labels.get(label), + active=existing_labels.get(label)) + + return differences + + def create_volume(self): + if not self.existing_volume: + if not self.check_mode: + try: + params = dict( + driver=self.parameters.driver, + driver_opts=self.parameters.driver_options, + ) + + if self.parameters.labels is not None: + params['labels'] = self.parameters.labels + + resp = self.client.create_volume(self.parameters.volume_name, **params) + self.existing_volume = self.client.inspect_volume(resp['Name']) + except APIError as e: + self.client.fail(text_type(e)) + + self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver)) + self.results['changed'] = True + + def remove_volume(self): + if self.existing_volume: + if not self.check_mode: + try: + self.client.remove_volume(self.parameters.volume_name) + except APIError as e: + self.client.fail(text_type(e)) + + self.results['actions'].append("Removed volume %s" % self.parameters.volume_name) + self.results['changed'] = True + + def present(self): + differences = DifferenceTracker() + if self.existing_volume: + differences = self.has_different_config() + + self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None) + if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always': + self.remove_volume() + self.existing_volume = None + + self.create_volume() + + if self.diff or self.check_mode or self.parameters.debug: + self.diff_result['differences'] = differences.get_legacy_docker_diffs() + self.diff_tracker.merge(differences) + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + volume_facts = self.get_existing_volume() + self.results['ansible_facts'] = {u'docker_volume': volume_facts} + self.results['volume'] = volume_facts + + def absent(self): + self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None) + self.remove_volume() + + +def main(): + argument_spec = dict( + volume_name=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default='present', choices=['present', 'absent']), + driver=dict(type='str', default='local'), + driver_options=dict(type='dict', default={}), + labels=dict(type='dict'), + force=dict(type='bool', removed_in_version='2.12'), + recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']), + debug=dict(type='bool', default=False) + ) + + option_minimal_versions = dict( + labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.10.0', + min_docker_api_version='1.21', + # "The docker server >= 1.9.0" + option_minimal_versions=option_minimal_versions, + ) + + try: + cm = DockerVolumeManager(client) + client.module.exit_json(**cm.results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/docker/docker_volume_info.py b/plugins/modules/cloud/docker/docker_volume_info.py new file mode 100644 index 0000000000..3c7f1ec28f --- /dev/null +++ b/plugins/modules/cloud/docker/docker_volume_info.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# coding: utf-8 +# +# Copyright 2017 Red Hat | Ansible, Alex Grönholm +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: docker_volume_info +short_description: Retrieve facts about Docker volumes +description: + - Performs largely the same function as the "docker volume inspect" CLI subcommand. +options: + name: + description: + - Name of the volume to inspect. + type: str + required: yes + aliases: + - volume_name + +extends_documentation_fragment: +- community.general.docker +- community.general.docker.docker_py_1_documentation + + +author: + - Felix Fontein (@felixfontein) + +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - "Docker API >= 1.21" +''' + +EXAMPLES = ''' +- name: Get infos on volume + docker_volume_info: + name: mydata + register: result + +- name: Does volume exist? + debug: + msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}" + +- name: Print information about volume + debug: + var: result.volume + when: result.exists +''' + +RETURN = ''' +exists: + description: + - Returns whether the volume exists. + type: bool + returned: always + sample: true +volume: + description: + - Volume inspection results for the affected volume. + - Will be C(none) if volume does not exist. + returned: success + type: dict + sample: '{ + "CreatedAt": "2018-12-09T17:43:44+01:00", + "Driver": "local", + "Labels": null, + "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data", + "Name": "ansible-test-bd3f6172", + "Options": {}, + "Scope": "local" + }' +''' + +import traceback + +try: + from docker.errors import DockerException, NotFound +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible_collections.community.general.plugins.module_utils.docker.common import ( + AnsibleDockerClient, + RequestException, +) + + +def get_existing_volume(client, volume_name): + try: + return client.inspect_volume(volume_name) + except NotFound as dummy: + return None + except Exception as exc: + client.fail("Error inspecting volume: %s" % exc) + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True, aliases=['volume_name']), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + min_docker_version='1.8.0', + min_docker_api_version='1.21', + ) + + try: + volume = get_existing_volume(client, client.module.params['name']) + + client.module.exit_json( + changed=False, + exists=(True if volume else False), + volume=volume, + ) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gc_storage.py b/plugins/modules/cloud/google/gc_storage.py new file mode 100644 index 0000000000..09a286a8aa --- /dev/null +++ b/plugins/modules/cloud/google/gc_storage.py @@ -0,0 +1,487 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gc_storage +short_description: This module manages objects/buckets in Google Cloud Storage. +description: + - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some + canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module + requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for + information about setting the default project. + +options: + bucket: + description: + - Bucket name. + required: true + object: + description: + - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples). + src: + description: + - The source file path when performing a PUT operation. + dest: + description: + - The destination file path when downloading an object/key with a GET operation. + force: + description: + - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. + type: bool + default: 'yes' + aliases: [ 'overwrite' ] + permission: + description: + - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', + 'public-read', 'authenticated-read'. + default: private + headers: + description: + - Headers to attach to object. + default: {} + expiration: + description: + - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only + available when public-read is the acl for the object. + mode: + description: + - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and + delete (bucket). + required: true + choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ] + gs_secret_key: + description: + - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used. + required: true + gs_access_key: + description: + - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used. + required: true + region: + description: + - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations) + default: 'US' + versioning: + description: + - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended) + type: bool + +requirements: + - "python >= 2.6" + - "boto >= 2.9" + +author: +- Benno Joy (@bennojoy) +- Lukas Beumer (@Nitaco) + +''' + +EXAMPLES = ''' +- name: Upload some content + gc_storage: + bucket: mybucket + object: key.txt + src: /usr/local/myfile.txt + mode: put + permission: public-read + +- name: Upload some headers + gc_storage: + bucket: mybucket + object: key.txt + src: /usr/local/myfile.txt + headers: '{"Content-Encoding": "gzip"}' + +- name: Download some content + gc_storage: + bucket: mybucket + object: key.txt + dest: /usr/local/myfile.txt + mode: get + +- name: Download an object as a string to use else where in your playbook + gc_storage: + bucket: mybucket + object: key.txt + mode: get_str + +- name: Create an empty bucket + gc_storage: + bucket: mybucket + mode: create + +- name: Create a bucket with key as directory + gc_storage: + bucket: mybucket + object: /my/directory/path + mode: create + +- name: Delete a bucket and all contents + gc_storage: + bucket: mybucket + mode: delete + +- name: Create a bucket with versioning enabled + gc_storage: + bucket: "mybucket" + versioning: yes + mode: create + +- name: Create a bucket located in the eu + gc_storage: + bucket: "mybucket" + region: "europe-west3" + mode: create + +''' + +import os + +try: + import boto + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule + + +def grant_check(module, gs, obj): + try: + acp = obj.get_acl() + if module.params.get('permission') == 'public-read': + grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers'] + if not grant: + obj.set_acl('public-read') + module.exit_json(changed=True, result="The objects permission as been set to public-read") + if module.params.get('permission') == 'authenticated-read': + grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers'] + if not grant: + obj.set_acl('authenticated-read') + module.exit_json(changed=True, result="The objects permission as been set to authenticated-read") + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + return True + + +def key_check(module, gs, bucket, obj): + try: + bucket = gs.lookup(bucket) + key_check = bucket.get_key(obj) + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + if key_check: + grant_check(module, gs, key_check) + return True + else: + return False + + +def keysum(module, gs, bucket, obj): + bucket = gs.lookup(bucket) + key_check = bucket.get_key(obj) + if not key_check: + return None + md5_remote = key_check.etag[1:-1] + etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 + if etag_multipart is True: + module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.") + return md5_remote + + +def bucket_check(module, gs, bucket): + try: + result = gs.lookup(bucket) + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + if result: + grant_check(module, gs, result) + return True + else: + return False + + +def create_bucket(module, gs, bucket): + try: + bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region')) + bucket.set_acl(module.params.get('permission')) + bucket.configure_versioning(module.params.get('versioning')) + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + if bucket: + return True + + +def delete_bucket(module, gs, bucket): + try: + bucket = gs.lookup(bucket) + bucket_contents = bucket.list() + for key in bucket_contents: + bucket.delete_key(key.name) + bucket.delete() + return True + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + + +def delete_key(module, gs, bucket, obj): + try: + bucket = gs.lookup(bucket) + bucket.delete_key(obj) + module.exit_json(msg="Object deleted from bucket ", changed=True) + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + + +def create_dirkey(module, gs, bucket, obj): + try: + bucket = gs.lookup(bucket) + key = bucket.new_key(obj) + key.set_contents_from_string('') + module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + + +def path_check(path): + if os.path.exists(path): + return True + else: + return False + + +def transform_headers(headers): + """ + Boto url-encodes values unless we convert the value to `str`, so doing + this prevents 'max-age=100000' from being converted to "max-age%3D100000". + + :param headers: Headers to convert + :type headers: dict + :rtype: dict + + """ + + for key, value in headers.items(): + headers[key] = str(value) + return headers + + +def upload_gsfile(module, gs, bucket, obj, src, expiry): + try: + bucket = gs.lookup(bucket) + key = bucket.new_key(obj) + key.set_contents_from_filename( + filename=src, + headers=transform_headers(module.params.get('headers')) + ) + key.set_acl(module.params.get('permission')) + url = key.generate_url(expiry) + module.exit_json(msg="PUT operation complete", url=url, changed=True) + except gs.provider.storage_copy_error as e: + module.fail_json(msg=str(e)) + + +def download_gsfile(module, gs, bucket, obj, dest): + try: + bucket = gs.lookup(bucket) + key = bucket.lookup(obj) + key.get_contents_to_filename(dest) + module.exit_json(msg="GET operation complete", changed=True) + except gs.provider.storage_copy_error as e: + module.fail_json(msg=str(e)) + + +def download_gsstr(module, gs, bucket, obj): + try: + bucket = gs.lookup(bucket) + key = bucket.lookup(obj) + contents = key.get_contents_as_string() + module.exit_json(msg="GET operation complete", contents=contents, changed=True) + except gs.provider.storage_copy_error as e: + module.fail_json(msg=str(e)) + + +def get_download_url(module, gs, bucket, obj, expiry): + try: + bucket = gs.lookup(bucket) + key = bucket.lookup(obj) + url = key.generate_url(expiry) + module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True) + except gs.provider.storage_response_error as e: + module.fail_json(msg=str(e)) + + +def handle_get(module, gs, bucket, obj, overwrite, dest): + md5_remote = keysum(module, gs, bucket, obj) + md5_local = module.md5(dest) + if md5_local == md5_remote: + module.exit_json(changed=False) + if md5_local != md5_remote and not overwrite: + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) + else: + download_gsfile(module, gs, bucket, obj, dest) + + +def handle_put(module, gs, bucket, obj, overwrite, src, expiration): + # Lets check to see if bucket exists to get ground truth. + bucket_rc = bucket_check(module, gs, bucket) + key_rc = key_check(module, gs, bucket, obj) + + # Lets check key state. Does it exist and if it does, compute the etag md5sum. + if bucket_rc and key_rc: + md5_remote = keysum(module, gs, bucket, obj) + md5_local = module.md5(src) + if md5_local == md5_remote: + module.exit_json(msg="Local and remote object are identical", changed=False) + if md5_local != md5_remote and not overwrite: + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) + else: + upload_gsfile(module, gs, bucket, obj, src, expiration) + + if not bucket_rc: + create_bucket(module, gs, bucket) + upload_gsfile(module, gs, bucket, obj, src, expiration) + + # If bucket exists but key doesn't, just upload. + if bucket_rc and not key_rc: + upload_gsfile(module, gs, bucket, obj, src, expiration) + + +def handle_delete(module, gs, bucket, obj): + if bucket and not obj: + if bucket_check(module, gs, bucket): + module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket)) + else: + module.exit_json(msg="Bucket does not exist.", changed=False) + if bucket and obj: + if bucket_check(module, gs, bucket): + if key_check(module, gs, bucket, obj): + module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj)) + else: + module.exit_json(msg="Object does not exist.", changed=False) + else: + module.exit_json(msg="Bucket does not exist.", changed=False) + else: + module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True) + + +def handle_create(module, gs, bucket, obj): + if bucket and not obj: + if bucket_check(module, gs, bucket): + module.exit_json(msg="Bucket already exists.", changed=False) + else: + module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket)) + if bucket and obj: + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + + if bucket_check(module, gs, bucket): + if key_check(module, gs, bucket, dirobj): + module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) + else: + create_dirkey(module, gs, bucket, dirobj) + else: + create_bucket(module, gs, bucket) + create_dirkey(module, gs, bucket, dirobj) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + bucket=dict(required=True), + object=dict(default=None, type='path'), + src=dict(default=None), + dest=dict(default=None, type='path'), + expiration=dict(type='int', default=600, aliases=['expiry']), + mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True), + permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'), + headers=dict(type='dict', default={}), + gs_secret_key=dict(no_log=True, required=True), + gs_access_key=dict(required=True), + overwrite=dict(default=True, type='bool', aliases=['force']), + region=dict(default='US', type='str'), + versioning=dict(default='no', type='bool') + ), + ) + + if not HAS_BOTO: + module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade') + + bucket = module.params.get('bucket') + obj = module.params.get('object') + src = module.params.get('src') + dest = module.params.get('dest') + mode = module.params.get('mode') + expiry = module.params.get('expiration') + gs_secret_key = module.params.get('gs_secret_key') + gs_access_key = module.params.get('gs_access_key') + overwrite = module.params.get('overwrite') + + if mode == 'put': + if not src or not object: + module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters") + if mode == 'get': + if not dest or not object: + module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters") + + try: + gs = boto.connect_gs(gs_access_key, gs_secret_key) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + if mode == 'get': + if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj): + module.fail_json(msg="Target bucket/key cannot be found", failed=True) + if not path_check(dest): + download_gsfile(module, gs, bucket, obj, dest) + else: + handle_get(module, gs, bucket, obj, overwrite, dest) + + if mode == 'put': + if not path_check(src): + module.fail_json(msg="Local object for PUT does not exist", failed=True) + handle_put(module, gs, bucket, obj, overwrite, src, expiry) + + # Support for deleting an object if we have both params. + if mode == 'delete': + handle_delete(module, gs, bucket, obj) + + if mode == 'create': + handle_create(module, gs, bucket, obj) + + if mode == 'get_url': + if bucket and obj: + if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): + get_download_url(module, gs, bucket, obj, expiry) + else: + module.fail_json(msg="Key/Bucket does not exist", failed=True) + else: + module.fail_json(msg="Bucket and Object parameters must be set", failed=True) + + # --------------------------- Get the String contents of an Object ------------------------- + if mode == 'get_str': + if bucket and obj: + if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): + download_gsstr(module, gs, bucket, obj) + else: + module.fail_json(msg="Key/Bucket does not exist", failed=True) + else: + module.fail_json(msg="Bucket and Object parameters must be set", failed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcdns_record.py b/plugins/modules/cloud/google/gcdns_record.py new file mode 100644 index 0000000000..90a4a97b51 --- /dev/null +++ b/plugins/modules/cloud/google/gcdns_record.py @@ -0,0 +1,774 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 CallFire Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gcdns_record +short_description: Creates or removes resource records in Google Cloud DNS +description: + - Creates or removes resource records in Google Cloud DNS. +author: "William Albert (@walbert947)" +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.19.0" +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_dns_resource_record_set) instead. +options: + state: + description: + - Whether the given resource record should or should not be present. + choices: ["present", "absent"] + default: "present" + record: + description: + - The fully-qualified domain name of the resource record. + required: true + aliases: ['name'] + zone: + description: + - The DNS domain name of the zone (e.g., example.com). + - One of either I(zone) or I(zone_id) must be specified as an + option, or the module will fail. + - If both I(zone) and I(zone_id) are specified, I(zone_id) will be + used. + zone_id: + description: + - The Google Cloud ID of the zone (e.g., example-com). + - One of either I(zone) or I(zone_id) must be specified as an + option, or the module will fail. + - These usually take the form of domain names with the dots replaced + with dashes. A zone ID will never have any dots in it. + - I(zone_id) can be faster than I(zone) in projects with a large + number of zones. + - If both I(zone) and I(zone_id) are specified, I(zone_id) will be + used. + type: + description: + - The type of resource record to add. + required: true + choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ] + record_data: + description: + - The record_data to use for the resource record. + - I(record_data) must be specified if I(state) is C(present) or + I(overwrite) is C(True), or the module will fail. + - Valid record_data vary based on the record's I(type). In addition, + resource records that contain a DNS domain name in the value + field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot + in the value. + - Individual string record_data for TXT records must be enclosed in + double quotes. + - For resource records that have the same name but different + record_data (e.g., multiple A records), they must be defined as + multiple list entries in a single record. + required: false + aliases: ['value'] + ttl: + description: + - The amount of time in seconds that a resource record will remain + cached by a caching resolver. + default: 300 + overwrite: + description: + - Whether an attempt to overwrite an existing record should succeed + or fail. The behavior of this option depends on I(state). + - If I(state) is C(present) and I(overwrite) is C(True), this + module will replace an existing resource record of the same name + with the provided I(record_data). If I(state) is C(present) and + I(overwrite) is C(False), this module will fail if there is an + existing resource record with the same name and type, but + different resource data. + - If I(state) is C(absent) and I(overwrite) is C(True), this + module will remove the given resource record unconditionally. + If I(state) is C(absent) and I(overwrite) is C(False), this + module will fail if the provided record_data do not match exactly + with the existing resource record's record_data. + type: bool + default: 'no' + service_account_email: + description: + - The e-mail address for a service account with access to Google + Cloud DNS. + pem_file: + description: + - The path to the PEM file associated with the service account + email. + - This option is deprecated and may be removed in a future release. + Use I(credentials_file) instead. + credentials_file: + description: + - The path to the JSON file associated with the service account + email. + project_id: + description: + - The Google Cloud Platform project ID to use. +notes: + - See also M(gcdns_zone). + - This modules's underlying library does not support in-place updates for + DNS resource records. Instead, resource records are quickly deleted and + recreated. + - SOA records are technically supported, but their functionality is limited + to verifying that a zone's existing SOA record matches a pre-determined + value. The SOA record cannot be updated. + - Root NS records cannot be updated. + - NAPTR records are not supported. +''' + +EXAMPLES = ''' +# Create an A record. +- gcdns_record: + record: 'www1.example.com' + zone: 'example.com' + type: A + value: '1.2.3.4' + +# Update an existing record. +- gcdns_record: + record: 'www1.example.com' + zone: 'example.com' + type: A + overwrite: true + value: '5.6.7.8' + +# Remove an A record. +- gcdns_record: + record: 'www1.example.com' + zone_id: 'example-com' + state: absent + type: A + value: '5.6.7.8' + +# Create a CNAME record. +- gcdns_record: + record: 'www.example.com' + zone_id: 'example-com' + type: CNAME + value: 'www.example.com.' # Note the trailing dot + +# Create an MX record with a custom TTL. +- gcdns_record: + record: 'example.com' + zone: 'example.com' + type: MX + ttl: 3600 + value: '10 mail.example.com.' # Note the trailing dot + +# Create multiple A records with the same name. +- gcdns_record: + record: 'api.example.com' + zone_id: 'example-com' + type: A + record_data: + - '192.0.2.23' + - '10.4.5.6' + - '198.51.100.5' + - '203.0.113.10' + +# Change the value of an existing record with multiple record_data. +- gcdns_record: + record: 'api.example.com' + zone: 'example.com' + type: A + overwrite: true + record_data: # WARNING: All values in a record will be replaced + - '192.0.2.23' + - '192.0.2.42' # The changed record + - '198.51.100.5' + - '203.0.113.10' + +# Safely remove a multi-line record. +- gcdns_record: + record: 'api.example.com' + zone_id: 'example-com' + state: absent + type: A + record_data: # NOTE: All of the values must match exactly + - '192.0.2.23' + - '192.0.2.42' + - '198.51.100.5' + - '203.0.113.10' + +# Unconditionally remove a record. +- gcdns_record: + record: 'api.example.com' + zone_id: 'example-com' + state: absent + overwrite: true # overwrite is true, so no values are needed + type: A + +# Create an AAAA record +- gcdns_record: + record: 'www1.example.com' + zone: 'example.com' + type: AAAA + value: 'fd00:db8::1' + +# Create a PTR record +- gcdns_record: + record: '10.5.168.192.in-addr.arpa' + zone: '5.168.192.in-addr.arpa' + type: PTR + value: 'api.example.com.' # Note the trailing dot. + +# Create an NS record +- gcdns_record: + record: 'subdomain.example.com' + zone: 'example.com' + type: NS + ttl: 21600 + record_data: + - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values + - 'ns-cloud-d2.googledomains.com.' + - 'ns-cloud-d3.googledomains.com.' + - 'ns-cloud-d4.googledomains.com.' + +# Create a TXT record +- gcdns_record: + record: 'example.com' + zone_id: 'example-com' + type: TXT + record_data: + - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value + - '"hello " "world"' # A multi-string TXT value +''' + +RETURN = ''' +overwrite: + description: Whether to the module was allowed to overwrite the record + returned: success + type: bool + sample: True +record: + description: Fully-qualified domain name of the resource record + returned: success + type: str + sample: mail.example.com. +state: + description: Whether the record is present or absent + returned: success + type: str + sample: present +ttl: + description: The time-to-live of the resource record + returned: success + type: int + sample: 300 +type: + description: The type of the resource record + returned: success + type: str + sample: A +record_data: + description: The resource record values + returned: success + type: list + sample: ['5.6.7.8', '9.10.11.12'] +zone: + description: The dns name of the zone + returned: success + type: str + sample: example.com. +zone_id: + description: The Google Cloud DNS ID of the zone + returned: success + type: str + sample: example-com +''' + + +################################################################################ +# Imports +################################################################################ + +import socket +from distutils.version import LooseVersion + +try: + from libcloud import __version__ as LIBCLOUD_VERSION + from libcloud.common.google import InvalidRequestError + from libcloud.common.types import LibcloudError + from libcloud.dns.types import Provider + from libcloud.dns.types import RecordDoesNotExistError + from libcloud.dns.types import ZoneDoesNotExistError + HAS_LIBCLOUD = True + # The libcloud Google Cloud DNS provider. + PROVIDER = Provider.GOOGLE +except ImportError: + HAS_LIBCLOUD = False + PROVIDER = None + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect + + +################################################################################ +# Constants +################################################################################ + +# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS +# v1 API. Earlier versions contained the beta v1 API, which has since been +# deprecated and decommissioned. +MINIMUM_LIBCLOUD_VERSION = '0.19.0' + +# The records that libcloud's Google Cloud DNS provider supports. +# +# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains +# this information and is the authoritative source on which records are +# supported, but accessing the dictionary requires creating a Google Cloud DNS +# driver object, which is done in a helper module. +# +# I'm hard-coding the supported record types here, because they (hopefully!) +# shouldn't change much, and it allows me to use it as a "choices" parameter +# in an AnsibleModule argument_spec. +SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR'] + + +################################################################################ +# Functions +################################################################################ + +def create_record(module, gcdns, zone, record): + """Creates or overwrites a resource record.""" + + overwrite = module.boolean(module.params['overwrite']) + record_name = module.params['record'] + record_type = module.params['type'] + ttl = module.params['ttl'] + record_data = module.params['record_data'] + data = dict(ttl=ttl, rrdatas=record_data) + + # Google Cloud DNS wants the trailing dot on all DNS names. + if record_name[-1] != '.': + record_name = record_name + '.' + + # If we found a record, we need to check if the values match. + if record is not None: + # If the record matches, we obviously don't have to change anything. + if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data): + return False + + # The record doesn't match, so we need to check if we can overwrite it. + if not overwrite: + module.fail_json( + msg='cannot overwrite existing record, overwrite protection enabled', + changed=False + ) + + # The record either doesn't exist, or it exists and we can overwrite it. + if record is None and not module.check_mode: + # There's no existing record, so we'll just create it. + try: + gcdns.create_record(record_name, zone, record_type, data) + except InvalidRequestError as error: + if error.code == 'invalid': + # The resource record name and type are valid by themselves, but + # not when combined (e.g., an 'A' record with "www.example.com" + # as its value). + module.fail_json( + msg='value is invalid for the given type: ' + + "%s, got value: %s" % (record_type, record_data), + changed=False + ) + + elif error.code == 'cnameResourceRecordSetConflict': + # We're attempting to create a CNAME resource record when we + # already have another type of resource record with the name + # domain name. + module.fail_json( + msg="non-CNAME resource record already exists: %s" % record_name, + changed=False + ) + + else: + # The error is something else that we don't know how to handle, + # so we'll just re-raise the exception. + raise + + elif record is not None and not module.check_mode: + # The Google provider in libcloud doesn't support updating a record in + # place, so if the record already exists, we need to delete it and + # recreate it using the new information. + gcdns.delete_record(record) + + try: + gcdns.create_record(record_name, zone, record_type, data) + except InvalidRequestError: + # Something blew up when creating the record. This will usually be a + # result of invalid value data in the new record. Unfortunately, we + # already changed the state of the record by deleting the old one, + # so we'll try to roll back before failing out. + try: + gcdns.create_record(record.name, record.zone, record.type, record.data) + module.fail_json( + msg='error updating record, the original record was restored', + changed=False + ) + except LibcloudError: + # We deleted the old record, couldn't create the new record, and + # couldn't roll back. That really sucks. We'll dump the original + # record to the failure output so the user can restore it if + # necessary. + module.fail_json( + msg='error updating record, and could not restore original record, ' + + "original name: %s " % record.name + + "original zone: %s " % record.zone + + "original type: %s " % record.type + + "original data: %s" % record.data, + changed=True) + + return True + + +def remove_record(module, gcdns, record): + """Remove a resource record.""" + + overwrite = module.boolean(module.params['overwrite']) + ttl = module.params['ttl'] + record_data = module.params['record_data'] + + # If there is no record, we're obviously done. + if record is None: + return False + + # If there is an existing record, do our values match the values of the + # existing record? + if not overwrite: + if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data): + module.fail_json( + msg='cannot delete due to non-matching ttl or record_data: ' + + "ttl: %d, record_data: %s " % (ttl, record_data) + + "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']), + changed=False + ) + + # If we got to this point, we're okay to delete the record. + if not module.check_mode: + gcdns.delete_record(record) + + return True + + +def _get_record(gcdns, zone, record_type, record_name): + """Gets the record object for a given FQDN.""" + + # The record ID is a combination of its type and FQDN. For example, the + # ID of an A record for www.example.com would be 'A:www.example.com.' + record_id = "%s:%s" % (record_type, record_name) + + try: + return gcdns.get_record(zone.id, record_id) + except RecordDoesNotExistError: + return None + + +def _get_zone(gcdns, zone_name, zone_id): + """Gets the zone object for a given domain name.""" + + if zone_id is not None: + try: + return gcdns.get_zone(zone_id) + except ZoneDoesNotExistError: + return None + + # To create a zone, we need to supply a domain name. However, to delete a + # zone, we need to supply a zone ID. Zone ID's are often based on domain + # names, but that's not guaranteed, so we'll iterate through the list of + # zones to see if we can find a matching domain name. + available_zones = gcdns.iterate_zones() + found_zone = None + + for zone in available_zones: + if zone.domain == zone_name: + found_zone = zone + break + + return found_zone + + +def _records_match(old_ttl, old_record_data, new_ttl, new_record_data): + """Checks to see if original and new TTL and values match.""" + + matches = True + + if old_ttl != new_ttl: + matches = False + if old_record_data != new_record_data: + matches = False + + return matches + + +def _sanity_check(module): + """Run sanity checks that don't depend on info from the zone/record.""" + + overwrite = module.params['overwrite'] + record_name = module.params['record'] + record_type = module.params['type'] + state = module.params['state'] + ttl = module.params['ttl'] + record_data = module.params['record_data'] + + # Apache libcloud needs to be installed and at least the minimum version. + if not HAS_LIBCLOUD: + module.fail_json( + msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed=False + ) + elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION: + module.fail_json( + msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed=False + ) + + # A negative TTL is not permitted (how would they even work?!). + if ttl < 0: + module.fail_json( + msg='TTL cannot be less than zero, got: %d' % ttl, + changed=False + ) + + # Deleting SOA records is not permitted. + if record_type == 'SOA' and state == 'absent': + module.fail_json(msg='cannot delete SOA records', changed=False) + + # Updating SOA records is not permitted. + if record_type == 'SOA' and state == 'present' and overwrite: + module.fail_json(msg='cannot update SOA records', changed=False) + + # Some sanity checks depend on what value was supplied. + if record_data is not None and (state == 'present' or not overwrite): + # A records must contain valid IPv4 addresses. + if record_type == 'A': + for value in record_data: + try: + socket.inet_aton(value) + except socket.error: + module.fail_json( + msg='invalid A record value, got: %s' % value, + changed=False + ) + + # AAAA records must contain valid IPv6 addresses. + if record_type == 'AAAA': + for value in record_data: + try: + socket.inet_pton(socket.AF_INET6, value) + except socket.error: + module.fail_json( + msg='invalid AAAA record value, got: %s' % value, + changed=False + ) + + # CNAME and SOA records can't have multiple values. + if record_type in ['CNAME', 'SOA'] and len(record_data) > 1: + module.fail_json( + msg='CNAME or SOA records cannot have more than one value, ' + + "got: %s" % record_data, + changed=False + ) + + # Google Cloud DNS does not support wildcard NS records. + if record_type == 'NS' and record_name[0] == '*': + module.fail_json( + msg="wildcard NS records not allowed, got: %s" % record_name, + changed=False + ) + + # Values for txt records must begin and end with a double quote. + if record_type == 'TXT': + for value in record_data: + if value[0] != '"' and value[-1] != '"': + module.fail_json( + msg='TXT record_data must be enclosed in double quotes, ' + + 'got: %s' % value, + changed=False + ) + + +def _additional_sanity_checks(module, zone): + """Run input sanity checks that depend on info from the zone/record.""" + + overwrite = module.params['overwrite'] + record_name = module.params['record'] + record_type = module.params['type'] + state = module.params['state'] + + # CNAME records are not allowed to have the same name as the root domain. + if record_type == 'CNAME' and record_name == zone.domain: + module.fail_json( + msg='CNAME records cannot match the zone name', + changed=False + ) + + # The root domain must always have an NS record. + if record_type == 'NS' and record_name == zone.domain and state == 'absent': + module.fail_json( + msg='cannot delete root NS records', + changed=False + ) + + # Updating NS records with the name as the root domain is not allowed + # because libcloud does not support in-place updates and root domain NS + # records cannot be removed. + if record_type == 'NS' and record_name == zone.domain and overwrite: + module.fail_json( + msg='cannot update existing root NS records', + changed=False + ) + + # SOA records with names that don't match the root domain are not permitted + # (and wouldn't make sense anyway). + if record_type == 'SOA' and record_name != zone.domain: + module.fail_json( + msg='non-root SOA records are not permitted, got: %s' % record_name, + changed=False + ) + + +################################################################################ +# Main +################################################################################ + +def main(): + """Main function""" + + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + record=dict(required=True, aliases=['name'], type='str'), + zone=dict(type='str'), + zone_id=dict(type='str'), + type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'), + record_data=dict(aliases=['value'], type='list'), + ttl=dict(default=300, type='int'), + overwrite=dict(default=False, type='bool'), + service_account_email=dict(type='str'), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(type='str') + ), + required_if=[ + ('state', 'present', ['record_data']), + ('overwrite', False, ['record_data']) + ], + required_one_of=[['zone', 'zone_id']], + supports_check_mode=True + ) + + _sanity_check(module) + + record_name = module.params['record'] + record_type = module.params['type'] + state = module.params['state'] + ttl = module.params['ttl'] + zone_name = module.params['zone'] + zone_id = module.params['zone_id'] + + json_output = dict( + state=state, + record=record_name, + zone=zone_name, + zone_id=zone_id, + type=record_type, + record_data=module.params['record_data'], + ttl=ttl, + overwrite=module.boolean(module.params['overwrite']) + ) + + # Google Cloud DNS wants the trailing dot on all DNS names. + if zone_name is not None and zone_name[-1] != '.': + zone_name = zone_name + '.' + if record_name[-1] != '.': + record_name = record_name + '.' + + # Build a connection object that we can use to connect with Google Cloud + # DNS. + gcdns = gcdns_connect(module, provider=PROVIDER) + + # We need to check that the zone we're creating a record for actually + # exists. + zone = _get_zone(gcdns, zone_name, zone_id) + if zone is None and zone_name is not None: + module.fail_json( + msg='zone name was not found: %s' % zone_name, + changed=False + ) + elif zone is None and zone_id is not None: + module.fail_json( + msg='zone id was not found: %s' % zone_id, + changed=False + ) + + # Populate the returns with the actual zone information. + json_output['zone'] = zone.domain + json_output['zone_id'] = zone.id + + # We also need to check if the record we want to create or remove actually + # exists. + try: + record = _get_record(gcdns, zone, record_type, record_name) + except InvalidRequestError: + # We gave Google Cloud DNS an invalid DNS record name. + module.fail_json( + msg='record name is invalid: %s' % record_name, + changed=False + ) + + _additional_sanity_checks(module, zone) + + diff = dict() + + # Build the 'before' diff + if record is None: + diff['before'] = '' + diff['before_header'] = '' + else: + diff['before'] = dict( + record=record.data['name'], + type=record.data['type'], + record_data=record.data['rrdatas'], + ttl=record.data['ttl'] + ) + diff['before_header'] = "%s:%s" % (record_type, record_name) + + # Create, remove, or modify the record. + if state == 'present': + diff['after'] = dict( + record=record_name, + type=record_type, + record_data=module.params['record_data'], + ttl=ttl + ) + diff['after_header'] = "%s:%s" % (record_type, record_name) + + changed = create_record(module, gcdns, zone, record) + + elif state == 'absent': + diff['after'] = '' + diff['after_header'] = '' + + changed = remove_record(module, gcdns, record) + + module.exit_json(changed=changed, diff=diff, **json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcdns_zone.py b/plugins/modules/cloud/google/gcdns_zone.py new file mode 100644 index 0000000000..928534cf17 --- /dev/null +++ b/plugins/modules/cloud/google/gcdns_zone.py @@ -0,0 +1,370 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 CallFire Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gcdns_zone +short_description: Creates or removes zones in Google Cloud DNS +description: + - Creates or removes managed zones in Google Cloud DNS. +author: "William Albert (@walbert947)" +requirements: + - "apache-libcloud >= 0.19.0" +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_dns_managed_zone) instead. +options: + state: + description: + - Whether the given zone should or should not be present. + choices: ["present", "absent"] + default: "present" + zone: + description: + - The DNS domain name of the zone. + - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If + you attempt to specify a zone ID, this module will attempt to + create a TLD and will fail. + required: true + aliases: ['name'] + description: + description: + - An arbitrary text string to use for the zone description. + default: "" + service_account_email: + description: + - The e-mail address for a service account with access to Google + Cloud DNS. + pem_file: + description: + - The path to the PEM file associated with the service account + email. + - This option is deprecated and may be removed in a future release. + Use I(credentials_file) instead. + credentials_file: + description: + - The path to the JSON file associated with the service account + email. + project_id: + description: + - The Google Cloud Platform project ID to use. +notes: + - See also M(gcdns_record). + - Zones that are newly created must still be set up with a domain registrar + before they can be used. +''' + +EXAMPLES = ''' +# Basic zone creation example. +- name: Create a basic zone with the minimum number of parameters. + gcdns_zone: zone=example.com + +# Zone removal example. +- name: Remove a zone. + gcdns_zone: zone=example.com state=absent + +# Zone creation with description +- name: Creating a zone with a description + gcdns_zone: zone=example.com description="This is an awesome zone" +''' + +RETURN = ''' +description: + description: The zone's description + returned: success + type: str + sample: This is an awesome zone +state: + description: Whether the zone is present or absent + returned: success + type: str + sample: present +zone: + description: The zone's DNS name + returned: success + type: str + sample: example.com. +''' + + +################################################################################ +# Imports +################################################################################ + +from distutils.version import LooseVersion + +try: + from libcloud import __version__ as LIBCLOUD_VERSION + from libcloud.common.google import InvalidRequestError + from libcloud.common.google import ResourceExistsError + from libcloud.common.google import ResourceNotFoundError + from libcloud.dns.types import Provider + # The libcloud Google Cloud DNS provider. + PROVIDER = Provider.GOOGLE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + PROVIDER = None + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect + + +################################################################################ +# Constants +################################################################################ + +# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS +# v1 API. Earlier versions contained the beta v1 API, which has since been +# deprecated and decommissioned. +MINIMUM_LIBCLOUD_VERSION = '0.19.0' + +# The URL used to verify ownership of a zone in Google Cloud DNS. +ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/' + +################################################################################ +# Functions +################################################################################ + + +def create_zone(module, gcdns, zone): + """Creates a new Google Cloud DNS zone.""" + + description = module.params['description'] + extra = dict(description=description) + zone_name = module.params['zone'] + + # Google Cloud DNS wants the trailing dot on the domain name. + if zone_name[-1] != '.': + zone_name = zone_name + '.' + + # If we got a zone back, then the domain exists. + if zone is not None: + return False + + # The zone doesn't exist yet. + try: + if not module.check_mode: + gcdns.create_zone(domain=zone_name, extra=extra) + return True + + except ResourceExistsError: + # The zone already exists. We checked for this already, so either + # Google is lying, or someone was a ninja and created the zone + # within milliseconds of us checking for its existence. In any case, + # the zone has already been created, so we have nothing more to do. + return False + + except InvalidRequestError as error: + if error.code == 'invalid': + # The zone name or a parameter might be completely invalid. This is + # typically caused by an illegal DNS name (e.g. foo..com). + module.fail_json( + msg="zone name is not a valid DNS name: %s" % zone_name, + changed=False + ) + + elif error.code == 'managedZoneDnsNameNotAvailable': + # Google Cloud DNS will refuse to create zones with certain domain + # names, such as TLDs, ccTLDs, or special domain names such as + # example.com. + module.fail_json( + msg="zone name is reserved or already in use: %s" % zone_name, + changed=False + ) + + elif error.code == 'verifyManagedZoneDnsNameOwnership': + # This domain name needs to be verified before Google will create + # it. This occurs when a user attempts to create a zone which shares + # a domain name with a zone hosted elsewhere in Google Cloud DNS. + module.fail_json( + msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL), + changed=False + ) + + else: + # The error is something else that we don't know how to handle, + # so we'll just re-raise the exception. + raise + + +def remove_zone(module, gcdns, zone): + """Removes an existing Google Cloud DNS zone.""" + + # If there's no zone, then we're obviously done. + if zone is None: + return False + + # An empty zone will have two resource records: + # 1. An NS record with a list of authoritative name servers + # 2. An SOA record + # If any additional resource records are present, Google Cloud DNS will + # refuse to remove the zone. + if len(zone.list_records()) > 2: + module.fail_json( + msg="zone is not empty and cannot be removed: %s" % zone.domain, + changed=False + ) + + try: + if not module.check_mode: + gcdns.delete_zone(zone) + return True + + except ResourceNotFoundError: + # When we performed our check, the zone existed. It may have been + # deleted by something else. It's gone, so whatever. + return False + + except InvalidRequestError as error: + if error.code == 'containerNotEmpty': + # When we performed our check, the zone existed and was empty. In + # the milliseconds between the check and the removal command, + # records were added to the zone. + module.fail_json( + msg="zone is not empty and cannot be removed: %s" % zone.domain, + changed=False + ) + + else: + # The error is something else that we don't know how to handle, + # so we'll just re-raise the exception. + raise + + +def _get_zone(gcdns, zone_name): + """Gets the zone object for a given domain name.""" + + # To create a zone, we need to supply a zone name. However, to delete a + # zone, we need to supply a zone ID. Zone ID's are often based on zone + # names, but that's not guaranteed, so we'll iterate through the list of + # zones to see if we can find a matching name. + available_zones = gcdns.iterate_zones() + found_zone = None + + for zone in available_zones: + if zone.domain == zone_name: + found_zone = zone + break + + return found_zone + + +def _sanity_check(module): + """Run module sanity checks.""" + + zone_name = module.params['zone'] + + # Apache libcloud needs to be installed and at least the minimum version. + if not HAS_LIBCLOUD: + module.fail_json( + msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed=False + ) + elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION: + module.fail_json( + msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed=False + ) + + # Google Cloud DNS does not support the creation of TLDs. + if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1: + module.fail_json( + msg='cannot create top-level domain: %s' % zone_name, + changed=False + ) + +################################################################################ +# Main +################################################################################ + + +def main(): + """Main function""" + + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + zone=dict(required=True, aliases=['name'], type='str'), + description=dict(default='', type='str'), + service_account_email=dict(type='str'), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(type='str') + ), + supports_check_mode=True + ) + + _sanity_check(module) + + zone_name = module.params['zone'] + state = module.params['state'] + + # Google Cloud DNS wants the trailing dot on the domain name. + if zone_name[-1] != '.': + zone_name = zone_name + '.' + + json_output = dict( + state=state, + zone=zone_name, + description=module.params['description'] + ) + + # Build a connection object that was can use to connect with Google + # Cloud DNS. + gcdns = gcdns_connect(module, provider=PROVIDER) + + # We need to check if the zone we're attempting to create already exists. + zone = _get_zone(gcdns, zone_name) + + diff = dict() + + # Build the 'before' diff + if zone is None: + diff['before'] = '' + diff['before_header'] = '' + else: + diff['before'] = dict( + zone=zone.domain, + description=zone.extra['description'] + ) + diff['before_header'] = zone_name + + # Create or remove the zone. + if state == 'present': + diff['after'] = dict( + zone=zone_name, + description=module.params['description'] + ) + diff['after_header'] = zone_name + + changed = create_zone(module, gcdns, zone) + + elif state == 'absent': + diff['after'] = '' + diff['after_header'] = '' + + changed = remove_zone(module, gcdns, zone) + + module.exit_json(changed=changed, diff=diff, **json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce.py b/plugins/modules/cloud/google/gce.py new file mode 100644 index 0000000000..9e46cfed9f --- /dev/null +++ b/plugins/modules/cloud/google/gce.py @@ -0,0 +1,738 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gce +short_description: create or terminate GCE instances +description: + - Creates or terminates Google Compute Engine (GCE) instances. See + U(https://cloud.google.com/compute) for an overview. + Full install/configuration instructions for the gce* modules can + be found in the comments of ansible/test/gce_tests.py. +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_compute_instance) instead. +options: + image: + description: + - image string to use for the instance (default will follow latest + stable debian image) + default: "debian-8" + image_family: + description: + - image family from which to select the image. The most recent + non-deprecated image in the family will be used. + external_projects: + description: + - A list of other projects (accessible with the provisioning credentials) + to be searched for the image. + instance_names: + description: + - a comma-separated list of instance names to create or destroy + machine_type: + description: + - machine type to use for the instance, use 'n1-standard-1' by default + default: "n1-standard-1" + metadata: + description: + - a hash/dictionary of custom data for the instance; + '{"key":"value", ...}' + service_account_email: + description: + - service account email + service_account_permissions: + description: + - service account permissions (see + U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), + --scopes section for detailed information) + choices: [ + "bigquery", "cloud-platform", "compute-ro", "compute-rw", + "useraccounts-ro", "useraccounts-rw", "datastore", "logging-write", + "monitoring", "sql-admin", "storage-full", "storage-ro", + "storage-rw", "taskqueue", "userinfo-email" + ] + pem_file: + description: + - path to the pem file associated with the service account email + This option is deprecated. Use 'credentials_file'. + credentials_file: + description: + - path to the JSON file associated with the service account email + project_id: + description: + - your GCE project ID + name: + description: + - either a name of a single instance or when used with 'num_instances', + the base name of a cluster of nodes + aliases: ['base_name'] + num_instances: + description: + - can be used with 'name', specifies + the number of nodes to provision using 'name' + as a base name + network: + description: + - name of the network, 'default' will be used if not specified + default: "default" + subnetwork: + description: + - name of the subnetwork in which the instance should be created + persistent_boot_disk: + description: + - if set, create the instance with a persistent boot disk + type: bool + default: 'no' + disks: + description: + - a list of persistent disks to attach to the instance; a string value + gives the name of the disk; alternatively, a dictionary value can + define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry + will be the boot disk (which must be READ_WRITE). + state: + description: + - desired state of the resource + default: "present" + choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"] + tags: + description: + - a comma-separated list of tags to associate with the instance + zone: + description: + - the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available). + required: true + default: "us-central1-a" + ip_forward: + description: + - set to C(yes) if the instance can forward ip packets (useful for + gateways) + type: bool + default: 'no' + external_ip: + description: + - type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired. + default: "ephemeral" + disk_auto_delete: + description: + - if set boot disk will be removed after instance destruction + type: bool + default: 'yes' + preemptible: + description: + - if set to C(yes), instances will be preemptible and time-limited. + (requires libcloud >= 0.20.0) + type: bool + default: 'no' + disk_size: + description: + - The size of the boot disk created for this instance (in GB) + default: 10 + +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials, + >= 0.20.0 if using preemptible option" +notes: + - Either I(instance_names) or I(name) is required. + - JSON credentials strongly preferred. +author: + - Eric Johnson (@erjohnso) + - Tom Melendez (@supertom) +''' + +EXAMPLES = ''' +# Basic provisioning example. Create a single Debian 8 instance in the +# us-central1-a Zone of the n1-standard-1 machine type. +# Create multiple instances by specifying multiple names, separated by +# commas in the instance_names field +# (e.g. my-test-instance1,my-test-instance2) + - gce: + instance_names: my-test-instance1 + zone: us-central1-a + machine_type: n1-standard-1 + image: debian-8 + state: present + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + disk_size: 32 + +# Create a single instance of an image from the "my-base-image" image family +# in the us-central1-a Zone of the n1-standard-1 machine type. +# This image family is in the "my-other-project" GCP project. + - gce: + instance_names: my-test-instance1 + zone: us-central1-a + machine_type: n1-standard-1 + image_family: my-base-image + external_projects: + - my-other-project + state: present + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + disk_size: 32 + +# Create a single Debian 8 instance in the us-central1-a Zone +# Use existing disks, custom network/subnetwork, set service account permissions +# add tags and metadata. + - gce: + instance_names: my-test-instance + zone: us-central1-a + machine_type: n1-standard-1 + state: present + metadata: '{"db":"postgres", "group":"qa", "id":500}' + tags: + - http-server + - my-other-tag + disks: + - name: disk-2 + mode: READ_WRITE + - name: disk-3 + mode: READ_ONLY + disk_auto_delete: false + network: foobar-network + subnetwork: foobar-subnetwork-1 + preemptible: true + ip_forward: true + service_account_permissions: + - storage-full + - taskqueue + - bigquery + - https://www.googleapis.com/auth/ndev.clouddns.readwrite + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + +--- +# Example Playbook +- name: Compute Engine Instance Examples + hosts: localhost + vars: + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + tasks: + - name: create multiple instances + # Basic provisioning example. Create multiple Debian 8 instances in the + # us-central1-a Zone of n1-standard-1 machine type. + gce: + instance_names: test1,test2,test3 + zone: us-central1-a + machine_type: n1-standard-1 + image: debian-8 + state: present + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + metadata : '{ "startup-script" : "apt-get update" }' + register: gce + + - name: Save host data + add_host: + hostname: "{{ item.public_ip }}" + groupname: gce_instances_ips + with_items: "{{ gce.instance_data }}" + + - name: Wait for SSH for instances + wait_for: + delay: 1 + host: "{{ item.public_ip }}" + port: 22 + state: started + timeout: 30 + with_items: "{{ gce.instance_data }}" + + - name: Configure Hosts + hosts: gce_instances_ips + become: yes + become_method: sudo + roles: + - my-role-one + - my-role-two + tags: + - config + + - name: delete test-instances + # Basic termination of instance. + gce: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + instance_names: "{{ gce.instance_names }}" + zone: us-central1-a + state: absent + tags: + - delete +''' + +import socket +import logging + +try: + from ast import literal_eval + + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + import libcloud + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ + ResourceExistsError, ResourceInUseError, ResourceNotFoundError + from libcloud.compute.drivers.gce import GCEAddress + + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg +from ansible_collections.community.general.plugins.module_utils.gcp import get_valid_location +from ansible.module_utils.six.moves import reduce + + +def get_instance_info(inst): + """Retrieves instance information from an instance object and returns it + as a dictionary. + + """ + metadata = {} + if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: + for md in inst.extra['metadata']['items']: + metadata[md['key']] = md['value'] + + try: + netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + except Exception: + netname = None + try: + subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1] + except Exception: + subnetname = None + if 'disks' in inst.extra: + disk_names = [disk_info['source'].split('/')[-1] + for disk_info + in sorted(inst.extra['disks'], + key=lambda disk_info: disk_info['index'])] + else: + disk_names = [] + + if len(inst.public_ips) == 0: + public_ip = None + else: + public_ip = inst.public_ips[0] + + return ({ + 'image': inst.image is not None and inst.image.split('/')[-1] or None, + 'disks': disk_names, + 'machine_type': inst.size, + 'metadata': metadata, + 'name': inst.name, + 'network': netname, + 'subnetwork': subnetname, + 'private_ip': inst.private_ips[0], + 'public_ip': public_ip, + 'status': ('status' in inst.extra) and inst.extra['status'] or None, + 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], + 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, + }) + + +def create_instances(module, gce, instance_names, number, lc_zone): + """Creates new instances. Attributes other than instance_names are picked + up from 'module' + + module : AnsibleModule object + gce: authenticated GCE libcloud driver + instance_names: python list of instance names to create + number: number of instances to create + lc_zone: GCEZone object + + Returns: + A list of dictionaries with instance information + about the instances that were launched. + + """ + image = module.params.get('image') + image_family = module.params.get('image_family') + external_projects = module.params.get('external_projects') + machine_type = module.params.get('machine_type') + metadata = module.params.get('metadata') + network = module.params.get('network') + subnetwork = module.params.get('subnetwork') + persistent_boot_disk = module.params.get('persistent_boot_disk') + disks = module.params.get('disks') + tags = module.params.get('tags') + ip_forward = module.params.get('ip_forward') + external_ip = module.params.get('external_ip') + disk_auto_delete = module.params.get('disk_auto_delete') + preemptible = module.params.get('preemptible') + disk_size = module.params.get('disk_size') + service_account_permissions = module.params.get('service_account_permissions') + + if external_ip == "none": + instance_external_ip = None + elif external_ip != "ephemeral": + instance_external_ip = external_ip + try: + # check if instance_external_ip is an ip or a name + try: + socket.inet_aton(instance_external_ip) + instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce) + except socket.error: + instance_external_ip = gce.ex_get_address(instance_external_ip) + except GoogleBaseError as e: + module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value)) + else: + instance_external_ip = external_ip + + new_instances = [] + changed = False + + lc_disks = [] + disk_modes = [] + for i, disk in enumerate(disks or []): + if isinstance(disk, dict): + lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone)) + disk_modes.append(disk['mode']) + else: + lc_disks.append(gce.ex_get_volume(disk, lc_zone)) + # boot disk is implicitly READ_WRITE + disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') + lc_network = gce.ex_get_network(network) + lc_machine_type = gce.ex_get_size(machine_type, lc_zone) + + # Try to convert the user's metadata value into the format expected + # by GCE. First try to ensure user has proper quoting of a + # dictionary-like syntax using 'literal_eval', then convert the python + # dict into a python list of 'key' / 'value' dicts. Should end up + # with: + # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] + if metadata: + if isinstance(metadata, dict): + md = metadata + else: + try: + md = literal_eval(str(metadata)) + if not isinstance(md, dict): + raise ValueError('metadata must be a dict') + except ValueError as e: + module.fail_json(msg='bad metadata: %s' % str(e)) + except SyntaxError as e: + module.fail_json(msg='bad metadata syntax') + + if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': + items = [] + for k, v in md.items(): + items.append({"key": k, "value": v}) + metadata = {'items': items} + else: + metadata = md + + lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects) + ex_sa_perms = [] + bad_perms = [] + if service_account_permissions: + for perm in service_account_permissions: + if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'): + bad_perms.append(perm) + if len(bad_perms) > 0: + module.fail_json(msg='bad permissions: %s' % str(bad_perms)) + ex_sa_perms.append({'email': "default"}) + ex_sa_perms[0]['scopes'] = service_account_permissions + + # These variables all have default values but check just in case + if not lc_network or not lc_machine_type or not lc_zone: + module.fail_json(msg='Missing required create instance variable', + changed=False) + + gce_args = dict( + location=lc_zone, + ex_network=network, ex_tags=tags, ex_metadata=metadata, + ex_can_ip_forward=ip_forward, + external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete, + ex_service_accounts=ex_sa_perms + ) + if preemptible is not None: + gce_args['ex_preemptible'] = preemptible + if subnetwork is not None: + gce_args['ex_subnetwork'] = subnetwork + + if isinstance(instance_names, str) and not number: + instance_names = [instance_names] + + if isinstance(instance_names, str) and number: + instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type, + lc_image(), number, **gce_args) + for resp in instance_responses: + n = resp + if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode): + try: + n = gce.ex_get_node(n.name, lc_zone) + except ResourceNotFoundError: + pass + else: + # Assure that at least one node has been created to set changed=True + changed = True + new_instances.append(n) + else: + for instance in instance_names: + pd = None + if lc_disks: + pd = lc_disks[0] + elif persistent_boot_disk: + try: + pd = gce.ex_get_volume("%s" % instance, lc_zone) + except ResourceNotFoundError: + pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image()) + gce_args['ex_boot_disk'] = pd + + inst = None + try: + inst = gce.ex_get_node(instance, lc_zone) + except ResourceNotFoundError: + inst = gce.create_node( + instance, lc_machine_type, lc_image(), **gce_args + ) + changed = True + except GoogleBaseError as e: + module.fail_json(msg='Unexpected error attempting to create ' + + 'instance %s, error: %s' % (instance, e.value)) + if inst: + new_instances.append(inst) + + for inst in new_instances: + for i, lc_disk in enumerate(lc_disks): + # Check whether the disk is already attached + if (len(inst.extra['disks']) > i): + attached_disk = inst.extra['disks'][i] + if attached_disk['source'] != lc_disk.extra['selfLink']: + module.fail_json( + msg=("Disk at index %d does not match: requested=%s found=%s" % ( + i, lc_disk.extra['selfLink'], attached_disk['source']))) + elif attached_disk['mode'] != disk_modes[i]: + module.fail_json( + msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( + i, disk_modes[i], attached_disk['mode']))) + else: + continue + gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) + # Work around libcloud bug: attached volumes don't get added + # to the instance metadata. get_instance_info() only cares about + # source and index. + if len(inst.extra['disks']) != i + 1: + inst.extra['disks'].append( + {'source': lc_disk.extra['selfLink'], 'index': i}) + + instance_names = [] + instance_json_data = [] + for inst in new_instances: + d = get_instance_info(inst) + instance_names.append(d['name']) + instance_json_data.append(d) + + return (changed, instance_json_data, instance_names) + + +def change_instance_state(module, gce, instance_names, number, zone, state): + """Changes the state of a list of instances. For example, + change from started to stopped, or started to absent. + + module: Ansible module object + gce: authenticated GCE connection object + instance_names: a list of instance names to terminate + zone: GCEZone object where the instances reside prior to termination + state: 'state' parameter passed into module as argument + + Returns a dictionary of instance names that were changed. + + """ + changed = False + nodes = [] + state_instance_names = [] + + if isinstance(instance_names, str) and number: + node_names = ['%s-%03d' % (instance_names, i) for i in range(number)] + elif isinstance(instance_names, str) and not number: + node_names = [instance_names] + else: + node_names = instance_names + + for name in node_names: + inst = None + try: + inst = gce.ex_get_node(name, zone) + except ResourceNotFoundError: + state_instance_names.append(name) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + else: + nodes.append(inst) + state_instance_names.append(name) + + if state in ['absent', 'deleted'] and number: + changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False] + changed = reduce(lambda x, y: x or y, changed_nodes) + else: + for node in nodes: + if state in ['absent', 'deleted']: + gce.destroy_node(node) + changed = True + elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED: + gce.ex_start_node(node) + changed = True + elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING: + gce.ex_stop_node(node) + changed = True + + return (changed, state_instance_names) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + image=dict(default='debian-8'), + image_family=dict(), + external_projects=dict(type='list'), + instance_names=dict(), + machine_type=dict(default='n1-standard-1'), + metadata=dict(), + name=dict(aliases=['base_name']), + num_instances=dict(type='int'), + network=dict(default='default'), + subnetwork=dict(), + persistent_boot_disk=dict(type='bool', default=False), + disks=dict(type='list'), + state=dict(choices=['active', 'present', 'absent', 'deleted', + 'started', 'stopped', 'terminated'], + default='present'), + tags=dict(type='list'), + zone=dict(default='us-central1-a'), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(), + ip_forward=dict(type='bool', default=False), + external_ip=dict(default='ephemeral'), + disk_auto_delete=dict(type='bool', default=True), + disk_size=dict(type='int', default=10), + preemptible=dict(type='bool', default=None), + ), + mutually_exclusive=[('instance_names', 'name')] + ) + + if not HAS_PYTHON26: + module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+") + if not HAS_LIBCLOUD: + module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module') + + gce = gce_connect(module) + + image = module.params.get('image') + image_family = module.params.get('image_family') + external_projects = module.params.get('external_projects') + instance_names = module.params.get('instance_names') + name = module.params.get('name') + number = module.params.get('num_instances') + subnetwork = module.params.get('subnetwork') + state = module.params.get('state') + zone = module.params.get('zone') + preemptible = module.params.get('preemptible') + changed = False + + inames = None + if isinstance(instance_names, list): + inames = instance_names + elif isinstance(instance_names, str): + inames = instance_names.split(',') + if name: + inames = name + if not inames: + module.fail_json(msg='Must specify a "name" or "instance_names"', + changed=False) + if not zone: + module.fail_json(msg='Must specify a "zone"', changed=False) + + lc_zone = get_valid_location(module, gce, zone) + if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20': + module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option", + changed=False) + + if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'): + module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option", + changed=False) + + json_output = {'zone': zone} + if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']: + json_output['state'] = state + (changed, state_instance_names) = change_instance_state( + module, gce, inames, number, lc_zone, state) + + # based on what user specified, return the same variable, although + # value could be different if an instance could not be destroyed + if instance_names or name and number: + json_output['instance_names'] = state_instance_names + elif name: + json_output['name'] = name + + elif state in ['active', 'present']: + json_output['state'] = 'present' + (changed, instance_data, instance_name_list) = create_instances( + module, gce, inames, number, lc_zone) + json_output['instance_data'] = instance_data + if instance_names: + json_output['instance_names'] = instance_name_list + elif name: + json_output['name'] = name + + json_output['changed'] = changed + module.exit_json(**json_output) + + +class LazyDiskImage: + """ + Object for lazy instantiation of disk image + gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible. + """ + + def __init__(self, module, gce, name, has_pd, family=None, projects=None): + self.image = None + self.was_called = False + self.gce = gce + self.name = name + self.has_pd = has_pd + self.module = module + self.family = family + self.projects = projects + + def __call__(self): + if not self.was_called: + self.was_called = True + if not self.has_pd: + if self.family: + self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects) + else: + self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects) + if not self.image: + self.module.fail_json(msg='image or disks missing for create instance', changed=False) + return self.image + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_eip.py b/plugins/modules/cloud/google/gce_eip.py new file mode 100644 index 0000000000..a51b752ab3 --- /dev/null +++ b/plugins/modules/cloud/google/gce_eip.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# Copyright 2017 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: gce_eip +short_description: Create or Destroy Global or Regional External IP addresses. +description: + - Create (reserve) or Destroy (release) Regional or Global IP Addresses. See + U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses. +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.19.0" +notes: + - Global addresses can only be used with Global Forwarding Rules. +author: + - "Tom Melendez (@supertom) " +options: + name: + description: + - Name of Address. + required: true + region: + description: + - Region to create the address in. Set to 'global' to create a global address. + required: true + state: + description: The state the address should be in. C(present) or C(absent) are the only valid options. + default: present + required: false + choices: [present, absent] +''' + +EXAMPLES = ''' +# Create a Global external IP address +- gce_eip: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + name: my-global-ip + region: global + state: present + +# Create a Regional external IP address +- gce_eip: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + name: my-global-ip + region: us-east1 + state: present +''' + +RETURN = ''' +address: + description: IP address being operated on + returned: always + type: str + sample: "35.186.222.233" +name: + description: name of the address being operated on + returned: always + type: str + sample: "my-address" +region: + description: Which region an address belongs. + returned: always + type: str + sample: "global" +''' + +USER_AGENT_VERSION = 'v1' +USER_AGENT_PRODUCT = 'Ansible-gce_eip' + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + import libcloud + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ + ResourceExistsError, ResourceInUseError, ResourceNotFoundError + from libcloud.compute.drivers.gce import GCEAddress + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect + + +def get_address(gce, name, region): + """ + Get an Address from GCE. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param name: Name of the Address. + :type name: ``str`` + + :return: A GCEAddress object or None. + :rtype: :class: `GCEAddress` or None + """ + try: + return gce.ex_get_address(name=name, region=region) + + except ResourceNotFoundError: + return None + + +def create_address(gce, params): + """ + Create a new Address. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param params: Dictionary of parameters needed by the module. + :type params: ``dict`` + + :return: Tuple with changed status and address. + :rtype: tuple in the format of (bool, str) + """ + changed = False + return_data = [] + + address = gce.ex_create_address( + name=params['name'], region=params['region']) + + if address: + changed = True + return_data = address.address + + return (changed, return_data) + + +def delete_address(address): + """ + Delete an Address. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param params: Dictionary of parameters needed by the module. + :type params: ``dict`` + + :return: Tuple with changed status and address. + :rtype: tuple in the format of (bool, str) + """ + changed = False + return_data = [] + if address.destroy(): + changed = True + return_data = address.address + return (changed, return_data) + + +def main(): + module = AnsibleModule(argument_spec=dict( + name=dict(required=True), + state=dict(choices=['absent', 'present'], default='present'), + region=dict(required=True), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(), ), ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + if not HAS_LIBCLOUD: + module.fail_json( + msg='libcloud with GCE support (+0.19) required for this module.') + + gce = gcp_connect(module, Provider.GCE, get_driver, + USER_AGENT_PRODUCT, USER_AGENT_VERSION) + + params = {} + params['state'] = module.params.get('state') + params['name'] = module.params.get('name') + params['region'] = module.params.get('region') + + changed = False + json_output = {'state': params['state']} + address = get_address(gce, params['name'], region=params['region']) + + if params['state'] == 'absent': + if not address: + # Doesn't exist in GCE, and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown address: %s" % + (params['name'])) + else: + # Delete + (changed, json_output['address']) = delete_address(address) + else: + if not address: + # Create + (changed, json_output['address']) = create_address(gce, + params) + else: + changed = False + json_output['address'] = address.address + + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_img.py b/plugins/modules/cloud/google/gce_img.py new file mode 100644 index 0000000000..f01a757ec3 --- /dev/null +++ b/plugins/modules/cloud/google/gce_img.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# Copyright 2015 Google Inc. All Rights Reserved. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +"""An Ansible module to utilize GCE image resources.""" + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_img +short_description: utilize GCE image resources +description: + - This module can create and delete GCE private images from gzipped + compressed tarball containing raw disk data or from existing detached + disks in any zone. U(https://cloud.google.com/compute/docs/images) +options: + name: + description: + - the name of the image to create or delete + required: true + description: + description: + - an optional description + family: + description: + - an optional family name + source: + description: + - the source disk or the Google Cloud Storage URI to create the image from + state: + description: + - desired state of the image + default: "present" + choices: ["present", "absent"] + zone: + description: + - the zone of the disk specified by source + default: "us-central1-a" + timeout: + description: + - timeout for the operation + default: 180 + service_account_email: + description: + - service account email + pem_file: + description: + - path to the pem file associated with the service account email + project_id: + description: + - your GCE project ID +requirements: + - "python >= 2.6" + - "apache-libcloud" +author: "Tom Melendez (@supertom)" +''' + +EXAMPLES = ''' +# Create an image named test-image from the disk 'test-disk' in zone us-central1-a. +- gce_img: + name: test-image + source: test-disk + zone: us-central1-a + state: present + +# Create an image named test-image from a tarball in Google Cloud Storage. +- gce_img: + name: test-image + source: https://storage.googleapis.com/bucket/path/to/image.tgz + +# Alternatively use the gs scheme +- gce_img: + name: test-image + source: gs://bucket/path/to/image.tgz + +# Delete an image named test-image. +- gce_img: + name: test-image + state: absent +''' + + +try: + import libcloud + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError + from libcloud.common.google import ResourceExistsError + from libcloud.common.google import ResourceNotFoundError + _ = Provider.GCE + has_libcloud = True +except ImportError: + has_libcloud = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect + + +GCS_URI = 'https://storage.googleapis.com/' + + +def create_image(gce, name, module): + """Create an image with the specified name.""" + source = module.params.get('source') + zone = module.params.get('zone') + desc = module.params.get('description') + timeout = module.params.get('timeout') + family = module.params.get('family') + + if not source: + module.fail_json(msg='Must supply a source', changed=False) + + if source.startswith(GCS_URI): + # source is a Google Cloud Storage URI + volume = source + elif source.startswith('gs://'): + # libcloud only accepts https URI. + volume = source.replace('gs://', GCS_URI) + else: + try: + volume = gce.ex_get_volume(source, zone) + except ResourceNotFoundError: + module.fail_json(msg='Disk %s not found in zone %s' % (source, zone), + changed=False) + except GoogleBaseError as e: + module.fail_json(msg=str(e), changed=False) + + gce_extra_args = {} + if family is not None: + gce_extra_args['family'] = family + + old_timeout = gce.connection.timeout + try: + gce.connection.timeout = timeout + gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args) + return True + except ResourceExistsError: + return False + except GoogleBaseError as e: + module.fail_json(msg=str(e), changed=False) + finally: + gce.connection.timeout = old_timeout + + +def delete_image(gce, name, module): + """Delete a specific image resource by name.""" + try: + gce.ex_delete_image(name) + return True + except ResourceNotFoundError: + return False + except GoogleBaseError as e: + module.fail_json(msg=str(e), changed=False) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + family=dict(), + description=dict(), + source=dict(), + state=dict(default='present', choices=['present', 'absent']), + zone=dict(default='us-central1-a'), + service_account_email=dict(), + pem_file=dict(type='path'), + project_id=dict(), + timeout=dict(type='int', default=180) + ) + ) + + if not has_libcloud: + module.fail_json(msg='libcloud with GCE support is required.') + + gce = gce_connect(module) + + name = module.params.get('name') + state = module.params.get('state') + family = module.params.get('family') + changed = False + + if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1': + module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option", + changed=False) + + # user wants to create an image. + if state == 'present': + changed = create_image(gce, name, module) + + # user wants to delete the image. + if state == 'absent': + changed = delete_image(gce, name, module) + + module.exit_json(changed=changed, name=name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_instance_template.py b/plugins/modules/cloud/google/gce_instance_template.py new file mode 100644 index 0000000000..84ab6a3746 --- /dev/null +++ b/plugins/modules/cloud/google/gce_instance_template.py @@ -0,0 +1,585 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_instance_template +short_description: create or destroy instance templates of Compute Engine of GCP. +description: + - Creates or destroy Google instance templates + of Compute Engine of Google Cloud Platform. +options: + state: + description: + - The desired state for the instance template. + default: "present" + choices: ["present", "absent"] + name: + description: + - The name of the GCE instance template. + required: True + size: + description: + - The desired machine type for the instance template. + default: "f1-micro" + source: + description: + - A source disk to attach to the instance. + Cannot specify both I(image) and I(source). + image: + description: + - The image to use to create the instance. + Cannot specify both both I(image) and I(source). + image_family: + description: + - The image family to use to create the instance. + If I(image) has been used I(image_family) is ignored. + Cannot specify both I(image) and I(source). + disk_type: + description: + - Specify a C(pd-standard) disk or C(pd-ssd) + for an SSD disk. + default: pd-standard + disk_auto_delete: + description: + - Indicate that the boot disk should be + deleted when the Node is deleted. + default: true + type: bool + network: + description: + - The network to associate with the instance. + default: "default" + subnetwork: + description: + - The Subnetwork resource name for this instance. + can_ip_forward: + description: + - Set to C(yes) to allow instance to + send/receive non-matching src/dst packets. + type: bool + default: 'no' + external_ip: + description: + - The external IP address to use. + If C(ephemeral), a new non-static address will be + used. If C(None), then no external address will + be used. To use an existing static IP address + specify address name. + default: "ephemeral" + service_account_email: + description: + - service account email + service_account_permissions: + description: + - service account permissions (see + U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), + --scopes section for detailed information) + choices: [ + "bigquery", "cloud-platform", "compute-ro", "compute-rw", + "useraccounts-ro", "useraccounts-rw", "datastore", "logging-write", + "monitoring", "sql-admin", "storage-full", "storage-ro", + "storage-rw", "taskqueue", "userinfo-email" + ] + automatic_restart: + description: + - Defines whether the instance should be + automatically restarted when it is + terminated by Compute Engine. + type: bool + preemptible: + description: + - Defines whether the instance is preemptible. + type: bool + tags: + description: + - a comma-separated list of tags to associate with the instance + metadata: + description: + - a hash/dictionary of custom data for the instance; + '{"key":"value", ...}' + description: + description: + - description of instance template + disks: + description: + - a list of persistent disks to attach to the instance; a string value + gives the name of the disk; alternatively, a dictionary value can + define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry + will be the boot disk (which must be READ_WRITE). + nic_gce_struct: + description: + - Support passing in the GCE-specific + formatted networkInterfaces[] structure. + disks_gce_struct: + description: + - Support passing in the GCE-specific + formatted formatted disks[] structure. Case sensitive. + see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information + project_id: + description: + - your GCE project ID + pem_file: + description: + - path to the pem file associated with the service account email + This option is deprecated. Use 'credentials_file'. + credentials_file: + description: + - path to the JSON file associated with the service account email + subnetwork_region: + description: + - Region that subnetwork resides in. (Required for subnetwork to successfully complete) +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials, + >= 0.20.0 if using preemptible option" +notes: + - JSON credentials strongly preferred. +author: "Gwenael Pellen (@GwenaelPellenArkeup) " +''' + +EXAMPLES = ''' +# Usage +- name: create instance template named foo + gce_instance_template: + name: foo + size: n1-standard-1 + image_family: ubuntu-1604-lts + state: present + project_id: "your-project-name" + credentials_file: "/path/to/your-key.json" + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + +# Example Playbook +- name: Compute Engine Instance Template Examples + hosts: localhost + vars: + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + tasks: + - name: create instance template + gce_instance_template: + name: my-test-instance-template + size: n1-standard-1 + image_family: ubuntu-1604-lts + state: present + project_id: "{{ project_id }}" + credentials_file: "{{ credentials_file }}" + service_account_email: "{{ service_account_email }}" + - name: delete instance template + gce_instance_template: + name: my-test-instance-template + size: n1-standard-1 + image_family: ubuntu-1604-lts + state: absent + project_id: "{{ project_id }}" + credentials_file: "{{ credentials_file }}" + service_account_email: "{{ service_account_email }}" + +# Example playbook using disks_gce_struct +- name: Compute Engine Instance Template Examples + hosts: localhost + vars: + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + tasks: + - name: create instance template + gce_instance_template: + name: foo + size: n1-standard-1 + state: present + project_id: "{{ project_id }}" + credentials_file: "{{ credentials_file }}" + service_account_email: "{{ service_account_email }}" + disks_gce_struct: + - device_name: /dev/sda + boot: true + autoDelete: true + initializeParams: + diskSizeGb: 30 + diskType: pd-ssd + sourceImage: projects/debian-cloud/global/images/family/debian-8 + +''' + +RETURN = ''' +''' + +import traceback +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + import libcloud + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ + ResourceExistsError, ResourceInUseError, ResourceNotFoundError + from libcloud.compute.drivers.gce import GCEAddress + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect +from ansible.module_utils._text import to_native + + +def get_info(inst): + """Retrieves instance template information + """ + return({ + 'name': inst.name, + 'extra': inst.extra, + }) + + +def create_instance_template(module, gce): + """Create an instance template + module : AnsibleModule object + gce: authenticated GCE libcloud driver + Returns: + instance template information + """ + # get info from module + name = module.params.get('name') + size = module.params.get('size') + source = module.params.get('source') + image = module.params.get('image') + image_family = module.params.get('image_family') + disk_type = module.params.get('disk_type') + disk_auto_delete = module.params.get('disk_auto_delete') + network = module.params.get('network') + subnetwork = module.params.get('subnetwork') + subnetwork_region = module.params.get('subnetwork_region') + can_ip_forward = module.params.get('can_ip_forward') + external_ip = module.params.get('external_ip') + service_account_permissions = module.params.get( + 'service_account_permissions') + service_account_email = module.params.get('service_account_email') + on_host_maintenance = module.params.get('on_host_maintenance') + automatic_restart = module.params.get('automatic_restart') + preemptible = module.params.get('preemptible') + tags = module.params.get('tags') + metadata = module.params.get('metadata') + description = module.params.get('description') + disks_gce_struct = module.params.get('disks_gce_struct') + changed = False + + # args of ex_create_instancetemplate + gce_args = dict( + name="instance", + size="f1-micro", + source=None, + image=None, + disk_type='pd-standard', + disk_auto_delete=True, + network='default', + subnetwork=None, + can_ip_forward=None, + external_ip='ephemeral', + service_accounts=None, + on_host_maintenance=None, + automatic_restart=None, + preemptible=None, + tags=None, + metadata=None, + description=None, + disks_gce_struct=None, + nic_gce_struct=None + ) + + gce_args['name'] = name + gce_args['size'] = size + + if source is not None: + gce_args['source'] = source + + if image: + gce_args['image'] = image + else: + if image_family: + image = gce.ex_get_image_from_family(image_family) + gce_args['image'] = image + else: + gce_args['image'] = "debian-8" + + gce_args['disk_type'] = disk_type + gce_args['disk_auto_delete'] = disk_auto_delete + + gce_network = gce.ex_get_network(network) + gce_args['network'] = gce_network + + if subnetwork is not None: + gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region) + + if can_ip_forward is not None: + gce_args['can_ip_forward'] = can_ip_forward + + if external_ip == "ephemeral": + instance_external_ip = external_ip + elif external_ip == "none": + instance_external_ip = None + else: + try: + instance_external_ip = gce.ex_get_address(external_ip) + except GoogleBaseError as err: + # external_ip is name ? + instance_external_ip = external_ip + gce_args['external_ip'] = instance_external_ip + + ex_sa_perms = [] + bad_perms = [] + if service_account_permissions: + for perm in service_account_permissions: + if perm not in gce.SA_SCOPES_MAP: + bad_perms.append(perm) + if len(bad_perms) > 0: + module.fail_json(msg='bad permissions: %s' % str(bad_perms)) + if service_account_email is not None: + ex_sa_perms.append({'email': str(service_account_email)}) + else: + ex_sa_perms.append({'email': "default"}) + ex_sa_perms[0]['scopes'] = service_account_permissions + gce_args['service_accounts'] = ex_sa_perms + + if on_host_maintenance is not None: + gce_args['on_host_maintenance'] = on_host_maintenance + + if automatic_restart is not None: + gce_args['automatic_restart'] = automatic_restart + + if preemptible is not None: + gce_args['preemptible'] = preemptible + + if tags is not None: + gce_args['tags'] = tags + + if disks_gce_struct is not None: + gce_args['disks_gce_struct'] = disks_gce_struct + + # Try to convert the user's metadata value into the format expected + # by GCE. First try to ensure user has proper quoting of a + # dictionary-like syntax using 'literal_eval', then convert the python + # dict into a python list of 'key' / 'value' dicts. Should end up + # with: + # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] + if metadata: + if isinstance(metadata, dict): + md = metadata + else: + try: + md = literal_eval(str(metadata)) + if not isinstance(md, dict): + raise ValueError('metadata must be a dict') + except ValueError as e: + module.fail_json(msg='bad metadata: %s' % str(e)) + except SyntaxError as e: + module.fail_json(msg='bad metadata syntax') + + if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': + items = [] + for k, v in md.items(): + items.append({"key": k, "value": v}) + metadata = {'items': items} + else: + metadata = md + gce_args['metadata'] = metadata + + if description is not None: + gce_args['description'] = description + + instance = None + try: + instance = gce.ex_get_instancetemplate(name) + except ResourceNotFoundError: + try: + instance = gce.ex_create_instancetemplate(**gce_args) + changed = True + except GoogleBaseError as err: + module.fail_json( + msg='Unexpected error attempting to create instance {0}, error: {1}' + .format( + instance, + err.value + ) + ) + + if instance: + json_data = get_info(instance) + else: + module.fail_json(msg="no instance template!") + + return (changed, json_data, name) + + +def delete_instance_template(module, gce): + """ Delete instance template. + module : AnsibleModule object + gce: authenticated GCE libcloud driver + Returns: + instance template information + """ + name = module.params.get('name') + current_state = "absent" + changed = False + + # get instance template + instance = None + try: + instance = gce.ex_get_instancetemplate(name) + current_state = "present" + except GoogleBaseError as e: + json_data = dict(msg='instance template not exists: %s' % to_native(e), + exception=traceback.format_exc()) + + if current_state == "present": + rc = instance.destroy() + if rc: + changed = True + else: + module.fail_json( + msg='instance template destroy failed' + ) + + json_data = {} + return (changed, json_data, name) + + +def module_controller(module, gce): + ''' Control module state parameter. + module : AnsibleModule object + gce: authenticated GCE libcloud driver + Returns: + nothing + Exit: + AnsibleModule object exit with json data. + ''' + json_output = dict() + state = module.params.get("state") + if state == "present": + (changed, output, name) = create_instance_template(module, gce) + json_output['changed'] = changed + json_output['msg'] = output + elif state == "absent": + (changed, output, name) = delete_instance_template(module, gce) + json_output['changed'] = changed + json_output['msg'] = output + + module.exit_json(**json_output) + + +def check_if_system_state_would_be_changed(module, gce): + ''' check_if_system_state_would_be_changed ! + module : AnsibleModule object + gce: authenticated GCE libcloud driver + Returns: + system_state changed + ''' + changed = False + current_state = "absent" + + state = module.params.get("state") + name = module.params.get("name") + + try: + gce.ex_get_instancetemplate(name) + current_state = "present" + except GoogleBaseError as e: + module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e), + exception=traceback.format_exc()) + + if current_state != state: + changed = True + + if current_state == "absent": + if changed: + output = 'instance template {0} will be created'.format(name) + else: + output = 'nothing to do for instance template {0} '.format(name) + if current_state == "present": + if changed: + output = 'instance template {0} will be destroyed'.format(name) + else: + output = 'nothing to do for instance template {0} '.format(name) + + return (changed, output) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + name=dict(required=True, aliases=['base_name']), + size=dict(default='f1-micro'), + source=dict(), + image=dict(), + image_family=dict(default='debian-8'), + disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'), + disk_auto_delete=dict(type='bool', default=True), + network=dict(default='default'), + subnetwork=dict(), + can_ip_forward=dict(type='bool', default=False), + external_ip=dict(default='ephemeral'), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + automatic_restart=dict(type='bool', default=None), + preemptible=dict(type='bool', default=None), + tags=dict(type='list'), + metadata=dict(), + description=dict(), + disks=dict(type='list'), + nic_gce_struct=dict(type='list'), + project_id=dict(), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + subnetwork_region=dict(), + disks_gce_struct=dict(type='list') + ), + mutually_exclusive=[['source', 'image']], + required_one_of=[['image', 'image_family']], + supports_check_mode=True + ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + if not HAS_LIBCLOUD: + module.fail_json( + msg='libcloud with GCE support (0.17.0+) required for this module') + + try: + gce = gce_connect(module) + except GoogleBaseError as e: + module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc()) + + if module.check_mode: + (changed, output) = check_if_system_state_would_be_changed(module, gce) + module.exit_json( + changed=changed, + msg=output + ) + else: + module_controller(module, gce) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_labels.py b/plugins/modules/cloud/google/gce_labels.py new file mode 100644 index 0000000000..c790d7d6bf --- /dev/null +++ b/plugins/modules/cloud/google/gce_labels.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# Copyright 2017 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_labels +short_description: Create, Update or Destroy GCE Labels. +description: + - Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc. + When specifying the GCE resource, users may specify the full URL for + the resource (its 'self_link'), or the individual parameters of the + resource (type, location, name). Examples for the two options can be + seen in the documentation. + See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for + more information about GCE Labels. Labels are gradually being added to + more GCE resources, so this module will need to be updated as new + resources are added to the GCE (v1) API. +requirements: + - 'python >= 2.6' + - 'google-api-python-client >= 1.6.2' + - 'google-auth >= 1.0.0' + - 'google-auth-httplib2 >= 0.0.2' +notes: + - Labels support resources such as instances, disks, images, etc. See + U(https://cloud.google.com/compute/docs/labeling-resources) for the list + of resources available in the GCE v1 API (not alpha or beta). +author: + - 'Eric Johnson (@erjohnso) ' +options: + labels: + description: + - A list of labels (key/value pairs) to add or remove for the resource. + required: false + resource_url: + description: + - The 'self_link' for the resource (instance, disk, snapshot, etc) + required: false + resource_type: + description: + - The type of resource (instances, disks, snapshots, images) + required: false + resource_location: + description: + - The location of resource (global, us-central1-f, etc.) + required: false + resource_name: + description: + - The name of resource. + required: false +''' + +EXAMPLES = ''' +- name: Add labels on an existing instance (using resource_url) + gce_labels: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + labels: + webserver-frontend: homepage + environment: test + experiment-name: kennedy + resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance + state: present +- name: Add labels on an image (using resource params) + gce_labels: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + labels: + webserver-frontend: homepage + environment: test + experiment-name: kennedy + resource_type: images + resource_location: global + resource_name: my-custom-image + state: present +- name: Remove specified labels from the GCE instance + gce_labels: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + labels: + environment: prod + experiment-name: kennedy + resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance + state: absent +''' + +RETURN = ''' +labels: + description: List of labels that exist on the resource. + returned: Always. + type: dict + sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ] +resource_url: + description: The 'self_link' of the GCE resource. + returned: Always. + type: str + sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance' +resource_type: + description: The type of the GCE resource. + returned: Always. + type: str + sample: instances +resource_location: + description: The location of the GCE resource. + returned: Always. + type: str + sample: us-central1-f +resource_name: + description: The name of the GCE resource. + returned: Always. + type: str + sample: my-happy-little-instance +state: + description: state of the labels + returned: Always. + type: str + sample: present +''' + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils + + +UA_PRODUCT = 'ansible-gce_labels' +UA_VERSION = '0.0.1' +GCE_API_VERSION = 'v1' + +# TODO(all): As Labels are added to more GCE resources, this list will need to +# be updated (along with some code changes below). The list can *only* include +# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'. +KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images'] + + +def _fetch_resource(client, module): + params = module.params + if params['resource_url']: + if not params['resource_url'].startswith('https://www.googleapis.com/compute'): + module.fail_json( + msg='Invalid self_link url: %s' % params['resource_url']) + else: + parts = params['resource_url'].split('/')[8:] + if len(parts) == 2: + resource_type, resource_name = parts + resource_location = 'global' + else: + resource_location, resource_type, resource_name = parts + else: + if not params['resource_type'] or not params['resource_location'] \ + or not params['resource_name']: + module.fail_json(msg='Missing required resource params.') + resource_type = params['resource_type'].lower() + resource_name = params['resource_name'].lower() + resource_location = params['resource_location'].lower() + + if resource_type not in KNOWN_RESOURCES: + module.fail_json(msg='Unsupported resource_type: %s' % resource_type) + + # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are + # added to the v1 GCE API for more resources, some minor code work will + # need to be added here. + if resource_type == 'instances': + resource = client.instances().get(project=params['project_id'], + zone=resource_location, + instance=resource_name).execute() + elif resource_type == 'disks': + resource = client.disks().get(project=params['project_id'], + zone=resource_location, + disk=resource_name).execute() + elif resource_type == 'snapshots': + resource = client.snapshots().get(project=params['project_id'], + snapshot=resource_name).execute() + elif resource_type == 'images': + resource = client.images().get(project=params['project_id'], + image=resource_name).execute() + else: + module.fail_json(msg='Unsupported resource type: %s' % resource_type) + + return resource.get('labelFingerprint', ''), { + 'resource_name': resource.get('name'), + 'resource_url': resource.get('selfLink'), + 'resource_type': resource_type, + 'resource_location': resource_location, + 'labels': resource.get('labels', {}) + } + + +def _set_labels(client, new_labels, module, ri, fingerprint): + params = module.params + result = err = None + labels = { + 'labels': new_labels, + 'labelFingerprint': fingerprint + } + + # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are + # added to the v1 GCE API for more resources, some minor code work will + # need to be added here. + if ri['resource_type'] == 'instances': + req = client.instances().setLabels(project=params['project_id'], + instance=ri['resource_name'], + zone=ri['resource_location'], + body=labels) + elif ri['resource_type'] == 'disks': + req = client.disks().setLabels(project=params['project_id'], + zone=ri['resource_location'], + resource=ri['resource_name'], + body=labels) + elif ri['resource_type'] == 'snapshots': + req = client.snapshots().setLabels(project=params['project_id'], + resource=ri['resource_name'], + body=labels) + elif ri['resource_type'] == 'images': + req = client.images().setLabels(project=params['project_id'], + resource=ri['resource_name'], + body=labels) + else: + module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type']) + + # TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils + # method to poll for the async request/operation to complete before + # returning. However, during 'beta', we are in an odd state where + # API requests must be sent to the 'compute/beta' API, but the python + # client library only allows for *Operations.get() requests to be + # sent to 'compute/v1' API. The response operation is in the 'beta' + # API-scope, but the client library cannot find the operation (404). + # result = GCPUtils.execute_api_client_req(req, client=client, raw=False) + # return result, err + result = req.execute() + return True, err + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['absent', 'present'], default='present'), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(), + credentials_file=dict(), + labels=dict(required=False, type='dict', default={}), + resource_url=dict(required=False, type='str'), + resource_name=dict(required=False, type='str'), + resource_location=dict(required=False, type='str'), + resource_type=dict(required=False, type='str'), + project_id=dict() + ), + required_together=[ + ['resource_name', 'resource_location', 'resource_type'] + ], + mutually_exclusive=[ + ['resource_url', 'resource_name'], + ['resource_url', 'resource_location'], + ['resource_url', 'resource_type'] + ] + ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + + client, cparams = get_google_api_client(module, 'compute', + user_agent_product=UA_PRODUCT, + user_agent_version=UA_VERSION, + api_version=GCE_API_VERSION) + + # Get current resource info including labelFingerprint + fingerprint, resource_info = _fetch_resource(client, module) + new_labels = resource_info['labels'].copy() + + update_needed = False + if module.params['state'] == 'absent': + for k, v in module.params['labels'].items(): + if k in new_labels: + if new_labels[k] == v: + update_needed = True + new_labels.pop(k, None) + else: + module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v)) + else: + for k, v in module.params['labels'].items(): + if k not in new_labels: + update_needed = True + new_labels[k] = v + + changed = False + json_output = {'state': module.params['state']} + if update_needed: + changed, err = _set_labels(client, new_labels, module, resource_info, + fingerprint) + json_output['changed'] = changed + + # TODO(erjohnso): probably want to re-fetch the resource to return the + # new labelFingerprint, check that desired labels match updated labels. + # BUT! Will need to wait for setLabels() to hit v1 API so we can use the + # GCPUtils feature to poll for the operation to be complete. For now, + # we'll just update the output with what we have from the original + # state of the resource. + json_output.update(resource_info) + json_output.update(module.params) + + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_lb.py b/plugins/modules/cloud/google/gce_lb.py new file mode 100644 index 0000000000..ac737c5ee4 --- /dev/null +++ b/plugins/modules/cloud/google/gce_lb.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_lb +short_description: create/destroy GCE load-balancer resources +description: + - This module can create and destroy Google Compute Engine C(loadbalancer) + and C(httphealthcheck) resources. The primary LB resource is the + C(load_balancer) resource and the health check parameters are all + prefixed with I(httphealthcheck). + The full documentation for Google Compute Engine load balancing is at + U(https://developers.google.com/compute/docs/load-balancing/). However, + the ansible module simplifies the configuration by following the + libcloud model. + Full install/configuration instructions for the gce* modules can + be found in the comments of ansible/test/gce_tests.py. +options: + httphealthcheck_name: + description: + - the name identifier for the HTTP health check + httphealthcheck_port: + description: + - the TCP port to use for HTTP health checking + default: 80 + httphealthcheck_path: + description: + - the url path to use for HTTP health checking + default: "/" + httphealthcheck_interval: + description: + - the duration in seconds between each health check request + default: 5 + httphealthcheck_timeout: + description: + - the timeout in seconds before a request is considered a failed check + default: 5 + httphealthcheck_unhealthy_count: + description: + - number of consecutive failed checks before marking a node unhealthy + default: 2 + httphealthcheck_healthy_count: + description: + - number of consecutive successful checks before marking a node healthy + default: 2 + httphealthcheck_host: + description: + - host header to pass through on HTTP check requests + name: + description: + - name of the load-balancer resource + protocol: + description: + - the protocol used for the load-balancer packet forwarding, tcp or udp + default: "tcp" + choices: ['tcp', 'udp'] + region: + description: + - the GCE region where the load-balancer is defined + external_ip: + description: + - the external static IPv4 (or auto-assigned) address for the LB + port_range: + description: + - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports + members: + description: + - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...] + aliases: ['nodes'] + state: + description: + - desired state of the LB + default: "present" + choices: ["active", "present", "absent", "deleted"] + service_account_email: + description: + - service account email + pem_file: + description: + - path to the pem file associated with the service account email + This option is deprecated. Use 'credentials_file'. + credentials_file: + description: + - path to the JSON file associated with the service account email + project_id: + description: + - your GCE project ID + +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials" +author: "Eric Johnson (@erjohnso) " +''' + +EXAMPLES = ''' +# Simple example of creating a new LB, adding members, and a health check +- local_action: + module: gce_lb + name: testlb + region: us-central1 + members: ["us-central1-a/www-a", "us-central1-b/www-b"] + httphealthcheck_name: hc + httphealthcheck_port: 80 + httphealthcheck_path: "/up" +''' + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.loadbalancer.types import Provider as Provider_lb + from libcloud.loadbalancer.providers import get_driver as get_driver_lb + from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError + + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import USER_AGENT_PRODUCT, USER_AGENT_VERSION, gce_connect, unexpected_error_msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + httphealthcheck_name=dict(), + httphealthcheck_port=dict(default=80, type='int'), + httphealthcheck_path=dict(default='/'), + httphealthcheck_interval=dict(default=5, type='int'), + httphealthcheck_timeout=dict(default=5, type='int'), + httphealthcheck_unhealthy_count=dict(default=2, type='int'), + httphealthcheck_healthy_count=dict(default=2, type='int'), + httphealthcheck_host=dict(), + name=dict(), + protocol=dict(default='tcp'), + region=dict(), + external_ip=dict(), + port_range=dict(), + members=dict(type='list'), + state=dict(default='present'), + service_account_email=dict(), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(), + ) + ) + + if not HAS_LIBCLOUD: + module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.') + + gce = gce_connect(module) + + httphealthcheck_name = module.params.get('httphealthcheck_name') + httphealthcheck_port = module.params.get('httphealthcheck_port') + httphealthcheck_path = module.params.get('httphealthcheck_path') + httphealthcheck_interval = module.params.get('httphealthcheck_interval') + httphealthcheck_timeout = module.params.get('httphealthcheck_timeout') + httphealthcheck_unhealthy_count = module.params.get('httphealthcheck_unhealthy_count') + httphealthcheck_healthy_count = module.params.get('httphealthcheck_healthy_count') + httphealthcheck_host = module.params.get('httphealthcheck_host') + name = module.params.get('name') + protocol = module.params.get('protocol') + region = module.params.get('region') + external_ip = module.params.get('external_ip') + port_range = module.params.get('port_range') + members = module.params.get('members') + state = module.params.get('state') + + try: + gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce) + gcelb.connection.user_agent_append("%s/%s" % ( + USER_AGENT_PRODUCT, USER_AGENT_VERSION)) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + changed = False + json_output = {'name': name, 'state': state} + + if not name and not httphealthcheck_name: + module.fail_json(msg='Nothing to do, please specify a "name" ' + 'or "httphealthcheck_name" parameter', changed=False) + + if state in ['active', 'present']: + # first, create the httphealthcheck if requested + hc = None + if httphealthcheck_name: + json_output['httphealthcheck_name'] = httphealthcheck_name + try: + hc = gcelb.ex_create_healthcheck(httphealthcheck_name, + host=httphealthcheck_host, path=httphealthcheck_path, + port=httphealthcheck_port, + interval=httphealthcheck_interval, + timeout=httphealthcheck_timeout, + unhealthy_threshold=httphealthcheck_unhealthy_count, + healthy_threshold=httphealthcheck_healthy_count) + changed = True + except ResourceExistsError: + hc = gce.ex_get_healthcheck(httphealthcheck_name) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + if hc is not None: + json_output['httphealthcheck_host'] = hc.extra['host'] + json_output['httphealthcheck_path'] = hc.path + json_output['httphealthcheck_port'] = hc.port + json_output['httphealthcheck_interval'] = hc.interval + json_output['httphealthcheck_timeout'] = hc.timeout + json_output['httphealthcheck_unhealthy_count'] = hc.unhealthy_threshold + json_output['httphealthcheck_healthy_count'] = hc.healthy_threshold + + # create the forwarding rule (and target pool under the hood) + lb = None + if name: + if not region: + module.fail_json(msg='Missing required region name', + changed=False) + nodes = [] + output_nodes = [] + json_output['name'] = name + # members is a python list of 'zone/inst' strings + if members: + for node in members: + try: + zone, node_name = node.split('/') + nodes.append(gce.ex_get_node(node_name, zone)) + output_nodes.append(node) + except Exception: + # skip nodes that are badly formatted or don't exist + pass + try: + if hc is not None: + lb = gcelb.create_balancer(name, port_range, protocol, + None, nodes, ex_region=region, ex_healthchecks=[hc], + ex_address=external_ip) + else: + lb = gcelb.create_balancer(name, port_range, protocol, + None, nodes, ex_region=region, ex_address=external_ip) + changed = True + except ResourceExistsError: + lb = gcelb.get_balancer(name) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + if lb is not None: + json_output['members'] = output_nodes + json_output['protocol'] = protocol + json_output['region'] = region + json_output['external_ip'] = lb.ip + json_output['port_range'] = lb.port + hc_names = [] + if 'healthchecks' in lb.extra: + for hc in lb.extra['healthchecks']: + hc_names.append(hc.name) + json_output['httphealthchecks'] = hc_names + + if state in ['absent', 'deleted']: + # first, delete the load balancer (forwarding rule and target pool) + # if specified. + if name: + json_output['name'] = name + try: + lb = gcelb.get_balancer(name) + gcelb.destroy_balancer(lb) + changed = True + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + # destroy the health check if specified + if httphealthcheck_name: + json_output['httphealthcheck_name'] = httphealthcheck_name + try: + hc = gce.ex_get_healthcheck(httphealthcheck_name) + gce.ex_destroy_healthcheck(hc) + changed = True + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + json_output['changed'] = changed + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_mig.py b/plugins/modules/cloud/google/gce_mig.py new file mode 100644 index 0000000000..2509d28ac1 --- /dev/null +++ b/plugins/modules/cloud/google/gce_mig.py @@ -0,0 +1,885 @@ +#!/usr/bin/python +# Copyright 2016 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_mig +short_description: Create, Update or Destroy a Managed Instance Group (MIG). +description: + - Create, Update or Destroy a Managed Instance Group (MIG). See + U(https://cloud.google.com/compute/docs/instance-groups) for an overview. + Full install/configuration instructions for the gce* modules can + be found in the comments of ansible/test/gce_tests.py. +requirements: + - "python >= 2.6" + - "apache-libcloud >= 1.2.0" +notes: + - Resizing and Recreating VM are also supported. + - An existing instance template is required in order to create a + Managed Instance Group. +author: + - "Tom Melendez (@supertom) " +options: + name: + description: + - Name of the Managed Instance Group. + required: true + template: + description: + - Instance Template to be used in creating the VMs. See + U(https://cloud.google.com/compute/docs/instance-templates) to learn more + about Instance Templates. Required for creating MIGs. + size: + description: + - Size of Managed Instance Group. If MIG already exists, it will be + resized to the number provided here. Required for creating MIGs. + service_account_email: + description: + - service account email + credentials_file: + description: + - Path to the JSON file associated with the service account email + project_id: + description: + - GCE project ID + state: + description: + - desired state of the resource + default: "present" + choices: ["absent", "present"] + zone: + description: + - The GCE zone to use for this Managed Instance Group. + required: true + autoscaling: + description: + - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)' + and policy.max_instances (int) are required fields if autoscaling is used. See + U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information + on Autoscaling. + named_ports: + description: + - Define named ports that backend services can forward data to. Format is a a list of + name:port dictionaries. +''' + +EXAMPLES = ''' +# Following playbook creates, rebuilds instances, resizes and then deletes a MIG. +# Notes: +# - Two valid Instance Templates must exist in your GCE project in order to run +# this playbook. Change the fields to match the templates used in your +# project. +# - The use of the 'pause' module is not required, it is just for convenience. +- name: Managed Instance Group Example + hosts: localhost + gather_facts: False + tasks: + - name: Create MIG + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + size: 1 + template: my-instance-template-1 + named_ports: + - name: http + port: 80 + - name: foobar + port: 82 + + - name: Pause for 30 seconds + pause: + seconds: 30 + + - name: Recreate MIG Instances with Instance Template change. + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + template: my-instance-template-2-small + recreate_instances: yes + + - name: Pause for 30 seconds + pause: + seconds: 30 + + - name: Resize MIG + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + size: 3 + + - name: Update MIG with Autoscaler + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + size: 3 + template: my-instance-template-2-small + recreate_instances: yes + autoscaling: + enabled: yes + name: my-autoscaler + policy: + min_instances: 2 + max_instances: 5 + cool_down_period: 37 + cpu_utilization: + target: .39 + load_balancing_utilization: + target: 0.4 + + - name: Pause for 30 seconds + pause: + seconds: 30 + + - name: Delete MIG + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: absent + autoscaling: + enabled: no + name: my-autoscaler +''' +RETURN = ''' +zone: + description: Zone in which to launch MIG. + returned: always + type: str + sample: "us-central1-b" + +template: + description: Instance Template to use for VMs. Must exist prior to using with MIG. + returned: changed + type: str + sample: "my-instance-template" + +name: + description: Name of the Managed Instance Group. + returned: changed + type: str + sample: "my-managed-instance-group" + +named_ports: + description: list of named ports acted upon + returned: when named_ports are initially set or updated + type: list + sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }] + +size: + description: Number of VMs in Managed Instance Group. + returned: changed + type: int + sample: 4 + +created_instances: + description: Names of instances created. + returned: When instances are created. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +deleted_instances: + description: Names of instances deleted. + returned: When instances are deleted. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +resize_created_instances: + description: Names of instances created during resizing. + returned: When a resize results in the creation of instances. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +resize_deleted_instances: + description: Names of instances deleted during resizing. + returned: When a resize results in the deletion of instances. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +recreated_instances: + description: Names of instances recreated. + returned: When instances are recreated. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +created_autoscaler: + description: True if Autoscaler was attempted and created. False otherwise. + returned: When the creation of an Autoscaler was attempted. + type: bool + sample: true + +updated_autoscaler: + description: True if an Autoscaler update was attempted and succeeded. + False returned if update failed. + returned: When the update of an Autoscaler was attempted. + type: bool + sample: true + +deleted_autoscaler: + description: True if an Autoscaler delete attempted and succeeded. + False returned if delete failed. + returned: When the delete of an Autoscaler was attempted. + type: bool + sample: true + +set_named_ports: + description: True if the named_ports have been set + returned: named_ports have been set + type: bool + sample: true + +updated_named_ports: + description: True if the named_ports have been updated + returned: named_ports have been updated + type: bool + sample: true +''' + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + import libcloud + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ + ResourceExistsError, ResourceInUseError, ResourceNotFoundError + from libcloud.compute.drivers.gce import GCEAddress + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect + + +def _check_params(params, field_list): + """ + Helper to validate params. + + Use this in function definitions if they require specific fields + to be present. + + :param params: structure that contains the fields + :type params: ``dict`` + + :param field_list: list of dict representing the fields + [{'name': str, 'required': True/False', 'type': cls}] + :type field_list: ``list`` of ``dict`` + + :return True, exits otherwise + :rtype: ``bool`` + """ + for d in field_list: + if not d['name'] in params: + if d['required'] is True: + return (False, "%s is required and must be of type: %s" % + (d['name'], str(d['type']))) + else: + if not isinstance(params[d['name']], d['type']): + return (False, + "%s must be of type: %s" % (d['name'], str(d['type']))) + + return (True, '') + + +def _validate_autoscaling_params(params): + """ + Validate that the minimum configuration is present for autoscaling. + + :param params: Ansible dictionary containing autoscaling configuration + It is expected that autoscaling config will be found at the + key 'autoscaling'. + :type params: ``dict`` + + :return: Tuple containing a boolean and a string. True if autoscaler + is valid, False otherwise, plus str for message. + :rtype: ``(``bool``, ``str``)`` + """ + if not params['autoscaling']: + # It's optional, so if not set at all, it's valid. + return (True, '') + if not isinstance(params['autoscaling'], dict): + return (False, + 'autoscaling: configuration expected to be a dictionary.') + + # check first-level required fields + as_req_fields = [ + {'name': 'name', 'required': True, 'type': str}, + {'name': 'enabled', 'required': True, 'type': bool}, + {'name': 'policy', 'required': True, 'type': dict} + ] # yapf: disable + + (as_req_valid, as_req_msg) = _check_params(params['autoscaling'], + as_req_fields) + if not as_req_valid: + return (False, as_req_msg) + + # check policy configuration + as_policy_fields = [ + {'name': 'max_instances', 'required': True, 'type': int}, + {'name': 'min_instances', 'required': False, 'type': int}, + {'name': 'cool_down_period', 'required': False, 'type': int} + ] # yapf: disable + + (as_policy_valid, as_policy_msg) = _check_params( + params['autoscaling']['policy'], as_policy_fields) + if not as_policy_valid: + return (False, as_policy_msg) + + # TODO(supertom): check utilization fields + + return (True, '') + + +def _validate_named_port_params(params): + """ + Validate the named ports parameters + + :param params: Ansible dictionary containing named_ports configuration + It is expected that autoscaling config will be found at the + key 'named_ports'. That key should contain a list of + {name : port} dictionaries. + :type params: ``dict`` + + :return: Tuple containing a boolean and a string. True if params + are valid, False otherwise, plus str for message. + :rtype: ``(``bool``, ``str``)`` + """ + if not params['named_ports']: + # It's optional, so if not set at all, it's valid. + return (True, '') + if not isinstance(params['named_ports'], list): + return (False, 'named_ports: expected list of name:port dictionaries.') + req_fields = [ + {'name': 'name', 'required': True, 'type': str}, + {'name': 'port', 'required': True, 'type': int} + ] # yapf: disable + + for np in params['named_ports']: + (valid_named_ports, np_msg) = _check_params(np, req_fields) + if not valid_named_ports: + return (False, np_msg) + + return (True, '') + + +def _get_instance_list(mig, field='name', filter_list=None): + """ + Helper to grab field from instances response. + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :param field: Field name in list_managed_instances response. Defaults + to 'name'. + :type field: ``str`` + + :param filter_list: list of 'currentAction' strings to filter on. Only + items that match a currentAction in this list will + be returned. Default is "['NONE']". + :type filter_list: ``list`` of ``str`` + + :return: List of strings from list_managed_instances response. + :rtype: ``list`` + """ + filter_list = ['NONE'] if filter_list is None else filter_list + + return [x[field] for x in mig.list_managed_instances() + if x['currentAction'] in filter_list] + + +def _gen_gce_as_policy(as_params): + """ + Take Autoscaler params and generate GCE-compatible policy. + + :param as_params: Dictionary in Ansible-playbook format + containing policy arguments. + :type as_params: ``dict`` + + :return: GCE-compatible policy dictionary + :rtype: ``dict`` + """ + asp_data = {} + asp_data['maxNumReplicas'] = as_params['max_instances'] + if 'min_instances' in as_params: + asp_data['minNumReplicas'] = as_params['min_instances'] + if 'cool_down_period' in as_params: + asp_data['coolDownPeriodSec'] = as_params['cool_down_period'] + if 'cpu_utilization' in as_params and 'target' in as_params[ + 'cpu_utilization']: + asp_data['cpuUtilization'] = {'utilizationTarget': + as_params['cpu_utilization']['target']} + if 'load_balancing_utilization' in as_params and 'target' in as_params[ + 'load_balancing_utilization']: + asp_data['loadBalancingUtilization'] = { + 'utilizationTarget': + as_params['load_balancing_utilization']['target'] + } + + return asp_data + + +def create_autoscaler(gce, mig, params): + """ + Create a new Autoscaler for a MIG. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param mig: An initialized GCEInstanceGroupManager. + :type mig: :class: `GCEInstanceGroupManager` + + :param params: Dictionary of autoscaling parameters. + :type params: ``dict`` + + :return: Tuple with changed stats. + :rtype: tuple in the format of (bool, list) + """ + changed = False + as_policy = _gen_gce_as_policy(params['policy']) + autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone, + instance_group=mig, policy=as_policy) + if autoscaler: + changed = True + return changed + + +def update_autoscaler(gce, autoscaler, params): + """ + Update an Autoscaler. + + Takes an existing Autoscaler object, and updates it with + the supplied params before calling libcloud's update method. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param autoscaler: An initialized GCEAutoscaler. + :type autoscaler: :class: `GCEAutoscaler` + + :param params: Dictionary of autoscaling parameters. + :type params: ``dict`` + + :return: True if changes, False otherwise. + :rtype: ``bool`` + """ + as_policy = _gen_gce_as_policy(params['policy']) + if autoscaler.policy != as_policy: + autoscaler.policy = as_policy + autoscaler = gce.ex_update_autoscaler(autoscaler) + if autoscaler: + return True + return False + + +def delete_autoscaler(autoscaler): + """ + Delete an Autoscaler. Does not affect MIG. + + :param mig: Managed Instance Group Object from Libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + if autoscaler.destroy(): + changed = True + return changed + + +def get_autoscaler(gce, name, zone): + """ + Get an Autoscaler from GCE. + + If the Autoscaler is not found, None is found. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param name: Name of the Autoscaler. + :type name: ``str`` + + :param zone: Zone that the Autoscaler is located in. + :type zone: ``str`` + + :return: A GCEAutoscaler object or None. + :rtype: :class: `GCEAutoscaler` or None + """ + try: + # Does the Autoscaler already exist? + return gce.ex_get_autoscaler(name, zone) + + except ResourceNotFoundError: + return None + + +def create_mig(gce, params): + """ + Create a new Managed Instance Group. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param params: Dictionary of parameters needed by the module. + :type params: ``dict`` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + + changed = False + return_data = [] + actions_filter = ['CREATING'] + + mig = gce.ex_create_instancegroupmanager( + name=params['name'], size=params['size'], template=params['template'], + zone=params['zone']) + + if mig: + changed = True + return_data = _get_instance_list(mig, filter_list=actions_filter) + + return (changed, return_data) + + +def delete_mig(mig): + """ + Delete a Managed Instance Group. All VMs in that MIG are also deleted." + + :param mig: Managed Instance Group Object from Libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + return_data = [] + actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING', + 'ABANDONING', 'RESTARTING', 'REFRESHING'] + instance_names = _get_instance_list(mig, filter_list=actions_filter) + if mig.destroy(): + changed = True + return_data = instance_names + + return (changed, return_data) + + +def recreate_instances_in_mig(mig): + """ + Recreate the instances for a Managed Instance Group. + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + return_data = [] + actions_filter = ['RECREATING'] + + if mig.recreate_instances(): + changed = True + return_data = _get_instance_list(mig, filter_list=actions_filter) + + return (changed, return_data) + + +def resize_mig(mig, size): + """ + Resize a Managed Instance Group. + + Based on the size provided, GCE will automatically create and delete + VMs as needed. + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + return_data = [] + actions_filter = ['CREATING', 'DELETING'] + + if mig.resize(size): + changed = True + return_data = _get_instance_list(mig, filter_list=actions_filter) + + return (changed, return_data) + + +def get_mig(gce, name, zone): + """ + Get a Managed Instance Group from GCE. + + If the MIG is not found, None is found. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param name: Name of the Managed Instance Group. + :type name: ``str`` + + :param zone: Zone that the Managed Instance Group is located in. + :type zone: ``str`` + + :return: A GCEInstanceGroupManager object or None. + :rtype: :class: `GCEInstanceGroupManager` or None + """ + try: + # Does the MIG already exist? + return gce.ex_get_instancegroupmanager(name=name, zone=zone) + + except ResourceNotFoundError: + return None + + +def update_named_ports(mig, named_ports): + """ + Set the named ports on a Managed Instance Group. + + Sort the existing named ports and new. If different, update. + This also implicitly allows for the removal of named_por + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :param named_ports: list of dictionaries in the format of {'name': port} + :type named_ports: ``list`` of ``dict`` + + :return: True if successful + :rtype: ``bool`` + """ + changed = False + existing_ports = [] + new_ports = [] + if hasattr(mig.instance_group, 'named_ports'): + existing_ports = sorted(mig.instance_group.named_ports, + key=lambda x: x['name']) + if named_ports is not None: + new_ports = sorted(named_ports, key=lambda x: x['name']) + + if existing_ports != new_ports: + if mig.instance_group.set_named_ports(named_ports): + changed = True + + return changed + + +def main(): + module = AnsibleModule(argument_spec=dict( + name=dict(required=True), + template=dict(), + recreate_instances=dict(type='bool', default=False), + # Do not set a default size here. For Create and some update + # operations, it is required and should be explicitly set. + # Below, we set it to the existing value if it has not been set. + size=dict(type='int'), + state=dict(choices=['absent', 'present'], default='present'), + zone=dict(required=True), + autoscaling=dict(type='dict', default=None), + named_ports=dict(type='list', default=None), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(), ), ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + if not HAS_LIBCLOUD: + module.fail_json( + msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.') + + gce = gce_connect(module) + if not hasattr(gce, 'ex_create_instancegroupmanager'): + module.fail_json( + msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.', + changed=False) + + params = {} + params['state'] = module.params.get('state') + params['zone'] = module.params.get('zone') + params['name'] = module.params.get('name') + params['size'] = module.params.get('size') + params['template'] = module.params.get('template') + params['recreate_instances'] = module.params.get('recreate_instances') + params['autoscaling'] = module.params.get('autoscaling', None) + params['named_ports'] = module.params.get('named_ports', None) + + (valid_autoscaling, as_msg) = _validate_autoscaling_params(params) + if not valid_autoscaling: + module.fail_json(msg=as_msg, changed=False) + + if params['named_ports'] is not None and not hasattr( + gce, 'ex_instancegroup_set_named_ports'): + module.fail_json( + msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option", + changed=False) + + (valid_named_ports, np_msg) = _validate_named_port_params(params) + if not valid_named_ports: + module.fail_json(msg=np_msg, changed=False) + + changed = False + json_output = {'state': params['state'], 'zone': params['zone']} + mig = get_mig(gce, params['name'], params['zone']) + + if not mig: + if params['state'] == 'absent': + # Doesn't exist in GCE, and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown managed instance group: %s" % + (params['name'])) + else: + # Create MIG + req_create_fields = [ + {'name': 'template', 'required': True, 'type': str}, + {'name': 'size', 'required': True, 'type': int} + ] # yapf: disable + + (valid_create_fields, valid_create_msg) = _check_params( + params, req_create_fields) + if not valid_create_fields: + module.fail_json(msg=valid_create_msg, changed=False) + + (changed, json_output['created_instances']) = create_mig(gce, + params) + if params['autoscaling'] and params['autoscaling'][ + 'enabled'] is True: + # Fetch newly-created MIG and create Autoscaler for it. + mig = get_mig(gce, params['name'], params['zone']) + if not mig: + module.fail_json( + msg='Unable to fetch created MIG %s to create \ + autoscaler in zone: %s' % ( + params['name'], params['zone']), changed=False) + + if not create_autoscaler(gce, mig, params['autoscaling']): + module.fail_json( + msg='Unable to fetch MIG %s to create autoscaler \ + in zone: %s' % (params['name'], params['zone']), + changed=False) + + json_output['created_autoscaler'] = True + # Add named ports if available + if params['named_ports']: + mig = get_mig(gce, params['name'], params['zone']) + if not mig: + module.fail_json( + msg='Unable to fetch created MIG %s to create \ + autoscaler in zone: %s' % ( + params['name'], params['zone']), changed=False) + json_output['set_named_ports'] = update_named_ports( + mig, params['named_ports']) + if json_output['set_named_ports']: + json_output['named_ports'] = params['named_ports'] + + elif params['state'] == 'absent': + # Delete MIG + + # First, check and remove the autoscaler, if present. + # Note: multiple autoscalers can be associated to a single MIG. We + # only handle the one that is named, but we might want to think about this. + if params['autoscaling']: + autoscaler = get_autoscaler(gce, params['autoscaling']['name'], + params['zone']) + if not autoscaler: + module.fail_json(msg='Unable to fetch autoscaler %s to delete \ + in zone: %s' % (params['autoscaling']['name'], params['zone']), + changed=False) + + changed = delete_autoscaler(autoscaler) + json_output['deleted_autoscaler'] = changed + + # Now, delete the MIG. + (changed, json_output['deleted_instances']) = delete_mig(mig) + + else: + # Update MIG + + # If we're going to update a MIG, we need a size and template values. + # If not specified, we use the values from the existing MIG. + if not params['size']: + params['size'] = mig.size + + if not params['template']: + params['template'] = mig.template.name + + if params['template'] != mig.template.name: + # Update Instance Template. + new_template = gce.ex_get_instancetemplate(params['template']) + mig.set_instancetemplate(new_template) + json_output['updated_instancetemplate'] = True + changed = True + if params['recreate_instances'] is True: + # Recreate Instances. + (changed, json_output['recreated_instances'] + ) = recreate_instances_in_mig(mig) + + if params['size'] != mig.size: + # Resize MIG. + keystr = 'created' if params['size'] > mig.size else 'deleted' + (changed, json_output['resize_%s_instances' % + (keystr)]) = resize_mig(mig, params['size']) + + # Update Autoscaler + if params['autoscaling']: + autoscaler = get_autoscaler(gce, params['autoscaling']['name'], + params['zone']) + if not autoscaler: + # Try to create autoscaler. + # Note: this isn't perfect, if the autoscaler name has changed + # we wouldn't know that here. + if not create_autoscaler(gce, mig, params['autoscaling']): + module.fail_json( + msg='Unable to create autoscaler %s for existing MIG %s\ + in zone: %s' % (params['autoscaling']['name'], + params['name'], params['zone']), + changed=False) + json_output['created_autoscaler'] = True + changed = True + else: + if params['autoscaling']['enabled'] is False: + # Delete autoscaler + changed = delete_autoscaler(autoscaler) + json_output['delete_autoscaler'] = changed + else: + # Update policy, etc. + changed = update_autoscaler(gce, autoscaler, + params['autoscaling']) + json_output['updated_autoscaler'] = changed + named_ports = params['named_ports'] or [] + json_output['updated_named_ports'] = update_named_ports(mig, + named_ports) + if json_output['updated_named_ports']: + json_output['named_ports'] = named_ports + + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_net.py b/plugins/modules/cloud/google/gce_net.py new file mode 100644 index 0000000000..3e0a4fab42 --- /dev/null +++ b/plugins/modules/cloud/google/gce_net.py @@ -0,0 +1,503 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_net +short_description: create/destroy GCE networks and firewall rules +description: + - This module can create and destroy Google Compute Engine networks and + firewall rules U(https://cloud.google.com/compute/docs/networking). + The I(name) parameter is reserved for referencing a network while the + I(fwname) parameter is used to reference firewall rules. + IPv4 Address ranges must be specified using the CIDR + U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format. + Full install/configuration instructions for the gce* modules can + be found in the comments of ansible/test/gce_tests.py. +options: + allowed: + description: + - the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25)) + this parameter is mandatory when creating or updating a firewall rule + ipv4_range: + description: + - the IPv4 address range in CIDR notation for the network + this parameter is not mandatory when you specified existing network in name parameter, + but when you create new network, this parameter is mandatory + aliases: ['cidr'] + fwname: + description: + - name of the firewall rule + aliases: ['fwrule'] + name: + description: + - name of the network + src_range: + description: + - the source IPv4 address range in CIDR notation + default: [] + aliases: ['src_cidr'] + src_tags: + description: + - the source instance tags for creating a firewall rule + default: [] + target_tags: + description: + - the target instance tags for creating a firewall rule + default: [] + state: + description: + - desired state of the network or firewall + default: "present" + choices: ["active", "present", "absent", "deleted"] + service_account_email: + description: + - service account email + pem_file: + description: + - path to the pem file associated with the service account email + This option is deprecated. Use C(credentials_file). + credentials_file: + description: + - path to the JSON file associated with the service account email + project_id: + description: + - your GCE project ID + mode: + description: + - network mode for Google Cloud + C(legacy) indicates a network with an IP address range; + C(auto) automatically generates subnetworks in different regions; + C(custom) uses networks to group subnets of user specified IP address ranges + https://cloud.google.com/compute/docs/networking#network_types + default: "legacy" + choices: ["legacy", "auto", "custom"] + subnet_name: + description: + - name of subnet to create + subnet_region: + description: + - region of subnet to create + subnet_desc: + description: + - description of subnet to create + +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials" +author: "Eric Johnson (@erjohnso) , Tom Melendez (@supertom) " +''' + +EXAMPLES = ''' +# Create a 'legacy' Network +- name: Create Legacy Network + gce_net: + name: legacynet + ipv4_range: '10.24.17.0/24' + mode: legacy + state: present + +# Create an 'auto' Network +- name: Create Auto Network + gce_net: + name: autonet + mode: auto + state: present + +# Create a 'custom' Network +- name: Create Custom Network + gce_net: + name: customnet + mode: custom + subnet_name: "customsubnet" + subnet_region: us-east1 + ipv4_range: '10.240.16.0/24' + state: "present" + +# Create Firewall Rule with Source Tags +- name: Create Firewall Rule w/Source Tags + gce_net: + name: default + fwname: "my-firewall-rule" + allowed: tcp:80 + state: "present" + src_tags: "foo,bar" + +# Create Firewall Rule with Source Range +- name: Create Firewall Rule w/Source Range + gce_net: + name: default + fwname: "my-firewall-rule" + allowed: tcp:80 + state: "present" + src_range: ['10.1.1.1/32'] + +# Create Custom Subnetwork +- name: Create Custom Subnetwork + gce_net: + name: privatenet + mode: custom + subnet_name: subnet_example + subnet_region: us-central1 + ipv4_range: '10.0.0.0/16' +''' + +RETURN = ''' +allowed: + description: Rules (ports and protocols) specified by this firewall rule. + returned: When specified + type: str + sample: "tcp:80;icmp" + +fwname: + description: Name of the firewall rule. + returned: When specified + type: str + sample: "my-fwname" + +ipv4_range: + description: IPv4 range of the specified network or subnetwork. + returned: when specified or when a subnetwork is created + type: str + sample: "10.0.0.0/16" + +name: + description: Name of the network. + returned: always + type: str + sample: "my-network" + +src_range: + description: IP address blocks a firewall rule applies to. + returned: when specified + type: list + sample: [ '10.1.1.12/8' ] + +src_tags: + description: Instance Tags firewall rule applies to. + returned: when specified while creating a firewall rule + type: list + sample: [ 'foo', 'bar' ] + +state: + description: State of the item operated on. + returned: always + type: str + sample: "present" + +subnet_name: + description: Name of the subnetwork. + returned: when specified or when a subnetwork is created + type: str + sample: "my-subnetwork" + +subnet_region: + description: Region of the specified subnet. + returned: when specified or when a subnetwork is created + type: str + sample: "us-east1" + +target_tags: + description: Instance Tags with these tags receive traffic allowed by firewall rule. + returned: when specified while creating a firewall rule + type: list + sample: [ 'foo', 'bar' ] +''' +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg + + +def format_allowed_section(allowed): + """Format each section of the allowed list""" + if allowed.count(":") == 0: + protocol = allowed + ports = [] + elif allowed.count(":") == 1: + protocol, ports = allowed.split(":") + else: + return [] + if ports.count(","): + ports = ports.split(",") + elif ports: + ports = [ports] + return_val = {"IPProtocol": protocol} + if ports: + return_val["ports"] = ports + return return_val + + +def format_allowed(allowed): + """Format the 'allowed' value so that it is GCE compatible.""" + return_value = [] + if allowed.count(";") == 0: + return [format_allowed_section(allowed)] + else: + sections = allowed.split(";") + for section in sections: + return_value.append(format_allowed_section(section)) + return return_value + + +def sorted_allowed_list(allowed_list): + """Sort allowed_list (output of format_allowed) by protocol and port.""" + # sort by protocol + allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol']) + # sort the ports list + return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', []))) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + allowed=dict(), + ipv4_range=dict(), + fwname=dict(), + name=dict(), + src_range=dict(default=[], type='list'), + src_tags=dict(default=[], type='list'), + target_tags=dict(default=[], type='list'), + state=dict(default='present'), + service_account_email=dict(), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(), + mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']), + subnet_name=dict(), + subnet_region=dict(), + subnet_desc=dict(), + ) + ) + + if not HAS_LIBCLOUD: + module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module') + + gce = gce_connect(module) + + allowed = module.params.get('allowed') + ipv4_range = module.params.get('ipv4_range') + fwname = module.params.get('fwname') + name = module.params.get('name') + src_range = module.params.get('src_range') + src_tags = module.params.get('src_tags') + target_tags = module.params.get('target_tags') + state = module.params.get('state') + mode = module.params.get('mode') + subnet_name = module.params.get('subnet_name') + subnet_region = module.params.get('subnet_region') + subnet_desc = module.params.get('subnet_desc') + + changed = False + json_output = {'state': state} + + if state in ['active', 'present']: + network = None + subnet = None + try: + network = gce.ex_get_network(name) + json_output['name'] = name + if mode == 'legacy': + json_output['ipv4_range'] = network.cidr + if network and mode == 'custom' and subnet_name: + if not hasattr(gce, 'ex_get_subnetwork'): + module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False) + + subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region) + json_output['subnet_name'] = subnet_name + json_output['ipv4_range'] = subnet.cidr + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + # user wants to create a new network that doesn't yet exist + if name and not network: + if not ipv4_range and mode != 'auto': + module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required", + changed=False) + args = [ipv4_range if mode == 'legacy' else None] + kwargs = {} + if mode != 'legacy': + kwargs['mode'] = mode + + try: + network = gce.ex_create_network(name, *args, **kwargs) + json_output['name'] = name + json_output['ipv4_range'] = ipv4_range + changed = True + except TypeError: + module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + if (subnet_name or ipv4_range) and not subnet and mode == 'custom': + if not hasattr(gce, 'ex_create_subnetwork'): + module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed) + if not subnet_name or not ipv4_range or not subnet_region: + module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed) + + try: + subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc) + json_output['subnet_name'] = subnet_name + json_output['ipv4_range'] = ipv4_range + changed = True + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=changed) + + if fwname: + # user creating a firewall rule + if not allowed and not src_range and not src_tags: + if changed and network: + module.fail_json( + msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True) + module.fail_json( + msg="Missing required firewall rule parameter(s)", + changed=False) + + allowed_list = format_allowed(allowed) + + # Fetch existing rule and if it exists, compare attributes + # update if attributes changed. Create if doesn't exist. + try: + fw_changed = False + fw = gce.ex_get_firewall(fwname) + + # If old and new attributes are different, we update the firewall rule. + # This implicitly lets us clear out attributes as well. + # allowed_list is required and must not be None for firewall rules. + if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)): + fw.allowed = allowed_list + fw_changed = True + + # source_ranges might not be set in the project; cast it to an empty list + fw.source_ranges = fw.source_ranges or [] + + # If these attributes are lists, we sort them first, then compare. + # Otherwise, we update if they differ. + if fw.source_ranges != src_range: + if isinstance(src_range, list): + if sorted(fw.source_ranges) != sorted(src_range): + fw.source_ranges = src_range + fw_changed = True + else: + fw.source_ranges = src_range + fw_changed = True + + # source_tags might not be set in the project; cast it to an empty list + fw.source_tags = fw.source_tags or [] + + if fw.source_tags != src_tags: + if isinstance(src_tags, list): + if sorted(fw.source_tags) != sorted(src_tags): + fw.source_tags = src_tags + fw_changed = True + else: + fw.source_tags = src_tags + fw_changed = True + + # target_tags might not be set in the project; cast it to an empty list + fw.target_tags = fw.target_tags or [] + + if fw.target_tags != target_tags: + if isinstance(target_tags, list): + if sorted(fw.target_tags) != sorted(target_tags): + fw.target_tags = target_tags + fw_changed = True + else: + fw.target_tags = target_tags + fw_changed = True + + if fw_changed is True: + try: + gce.ex_update_firewall(fw) + changed = True + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + # Firewall rule not found so we try to create it. + except ResourceNotFoundError: + try: + gce.ex_create_firewall(fwname, allowed_list, network=name, + source_ranges=src_range, source_tags=src_tags, target_tags=target_tags) + changed = True + + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + json_output['fwname'] = fwname + json_output['allowed'] = allowed + json_output['src_range'] = src_range + json_output['src_tags'] = src_tags + json_output['target_tags'] = target_tags + + if state in ['absent', 'deleted']: + if fwname: + json_output['fwname'] = fwname + fw = None + try: + fw = gce.ex_get_firewall(fwname) + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + if fw: + gce.ex_destroy_firewall(fw) + changed = True + elif subnet_name: + if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'): + module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed) + json_output['name'] = subnet_name + subnet = None + try: + subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region) + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + if subnet: + gce.ex_destroy_subnetwork(subnet) + changed = True + elif name: + json_output['name'] = name + network = None + try: + network = gce.ex_get_network(name) + + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + if network: + try: + gce.ex_destroy_network(network) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + changed = True + + json_output['changed'] = changed + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_pd.py b/plugins/modules/cloud/google/gce_pd.py new file mode 100644 index 0000000000..4ae9df6c36 --- /dev/null +++ b/plugins/modules/cloud/google/gce_pd.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_pd +short_description: utilize GCE persistent disk resources +description: + - This module can create and destroy unformatted GCE persistent disks + U(https://developers.google.com/compute/docs/disks#persistentdisks). + It also supports attaching and detaching disks from running instances. + Full install/configuration instructions for the gce* modules can + be found in the comments of ansible/test/gce_tests.py. +options: + detach_only: + description: + - do not destroy the disk, merely detach it from an instance + type: bool + default: 'no' + instance_name: + description: + - instance name if you wish to attach or detach the disk + mode: + description: + - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE + default: "READ_ONLY" + choices: ["READ_WRITE", "READ_ONLY"] + name: + description: + - name of the disk + required: true + size_gb: + description: + - whole integer size of disk (in GB) to create, default is 10 GB + default: 10 + image: + description: + - the source image to use for the disk + snapshot: + description: + - the source snapshot to use for the disk + state: + description: + - desired state of the persistent disk + default: "present" + choices: ["active", "present", "absent", "deleted"] + zone: + description: + - zone in which to create the disk + default: "us-central1-b" + service_account_email: + description: + - service account email + pem_file: + description: + - path to the pem file associated with the service account email + This option is deprecated. Use 'credentials_file'. + credentials_file: + description: + - path to the JSON file associated with the service account email + project_id: + description: + - your GCE project ID + disk_type: + description: + - type of disk provisioned + default: "pd-standard" + choices: ["pd-standard", "pd-ssd"] + delete_on_termination: + description: + - If C(yes), deletes the volume when instance is terminated + type: bool + default: 'no' + +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials" +author: "Eric Johnson (@erjohnso) " +''' + +EXAMPLES = ''' +# Simple attachment action to an existing instance +- local_action: + module: gce_pd + instance_name: notlocalhost + size_gb: 5 + name: pd +''' + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError, ResourceInUseError + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + delete_on_termination=dict(type='bool'), + detach_only=dict(type='bool'), + instance_name=dict(), + mode=dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']), + name=dict(required=True), + size_gb=dict(default=10), + disk_type=dict(default='pd-standard'), + image=dict(), + image_family=dict(), + external_projects=dict(type='list'), + snapshot=dict(), + state=dict(default='present'), + zone=dict(default='us-central1-b'), + service_account_email=dict(), + pem_file=dict(type='path'), + credentials_file=dict(type='path'), + project_id=dict(), + ) + ) + if not HAS_LIBCLOUD: + module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module') + + gce = gce_connect(module) + + delete_on_termination = module.params.get('delete_on_termination') + detach_only = module.params.get('detach_only') + instance_name = module.params.get('instance_name') + mode = module.params.get('mode') + name = module.params.get('name') + size_gb = module.params.get('size_gb') + disk_type = module.params.get('disk_type') + image = module.params.get('image') + image_family = module.params.get('image_family') + external_projects = module.params.get('external_projects') + snapshot = module.params.get('snapshot') + state = module.params.get('state') + zone = module.params.get('zone') + + if delete_on_termination and not instance_name: + module.fail_json( + msg='Must specify an instance name when requesting delete on termination', + changed=False) + + if detach_only and not instance_name: + module.fail_json( + msg='Must specify an instance name when detaching a disk', + changed=False) + + disk = inst = None + changed = is_attached = False + + json_output = {'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type} + if detach_only: + json_output['detach_only'] = True + json_output['detached_from_instance'] = instance_name + + if instance_name: + # user wants to attach/detach from an existing instance + try: + inst = gce.ex_get_node(instance_name, zone) + # is the disk attached? + for d in inst.extra['disks']: + if d['deviceName'] == name: + is_attached = True + json_output['attached_mode'] = d['mode'] + json_output['attached_to_instance'] = inst.name + except Exception: + pass + + # find disk if it already exists + try: + disk = gce.ex_get_volume(name) + json_output['size_gb'] = int(disk.size) + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + # user wants a disk to exist. If "instance_name" is supplied the user + # also wants it attached + if state in ['active', 'present']: + + if not size_gb: + module.fail_json(msg="Must supply a size_gb", changed=False) + try: + size_gb = int(round(float(size_gb))) + if size_gb < 1: + raise Exception + except Exception: + module.fail_json(msg="Must supply a size_gb larger than 1 GB", + changed=False) + + if instance_name and inst is None: + module.fail_json(msg='Instance %s does not exist in zone %s' % ( + instance_name, zone), changed=False) + + if not disk: + if image is not None and snapshot is not None: + module.fail_json( + msg='Cannot give both image (%s) and snapshot (%s)' % ( + image, snapshot), changed=False) + lc_image = None + lc_snapshot = None + if image_family is not None: + lc_image = gce.ex_get_image_from_family(image_family, ex_project_list=external_projects) + elif image is not None: + lc_image = gce.ex_get_image(image, ex_project_list=external_projects) + elif snapshot is not None: + lc_snapshot = gce.ex_get_snapshot(snapshot) + try: + disk = gce.create_volume( + size_gb, name, location=zone, image=lc_image, + snapshot=lc_snapshot, ex_disk_type=disk_type) + except ResourceExistsError: + pass + except QuotaExceededError: + module.fail_json(msg='Requested disk size exceeds quota', + changed=False) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + json_output['size_gb'] = size_gb + if image is not None: + json_output['image'] = image + if snapshot is not None: + json_output['snapshot'] = snapshot + changed = True + if inst and not is_attached: + try: + gce.attach_volume(inst, disk, device=name, ex_mode=mode, + ex_auto_delete=delete_on_termination) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + json_output['attached_to_instance'] = inst.name + json_output['attached_mode'] = mode + if delete_on_termination: + json_output['delete_on_termination'] = True + changed = True + + # user wants to delete a disk (or perhaps just detach it). + if state in ['absent', 'deleted'] and disk: + + if inst and is_attached: + try: + gce.detach_volume(disk, ex_node=inst) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + changed = True + if not detach_only: + try: + gce.destroy_volume(disk) + except ResourceInUseError as e: + module.fail_json(msg=str(e.value), changed=False) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + changed = True + + json_output['changed'] = changed + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_snapshot.py b/plugins/modules/cloud/google/gce_snapshot.py new file mode 100644 index 0000000000..1257167b17 --- /dev/null +++ b/plugins/modules/cloud/google/gce_snapshot.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gce_snapshot +short_description: Create or destroy snapshots for GCE storage volumes +description: + - Manages snapshots for GCE instances. This module manages snapshots for + the storage volumes of a GCE compute instance. If there are multiple + volumes, each snapshot will be prepended with the disk name +options: + instance_name: + description: + - The GCE instance to snapshot + required: True + snapshot_name: + description: + - The name of the snapshot to manage + disks: + description: + - A list of disks to create snapshots for. If none is provided, + all of the volumes will be snapshotted + default: all + required: False + state: + description: + - Whether a snapshot should be C(present) or C(absent) + required: false + default: present + choices: [present, absent] + service_account_email: + description: + - GCP service account email for the project where the instance resides + required: true + credentials_file: + description: + - The path to the credentials file associated with the service account + required: true + project_id: + description: + - The GCP project ID to use + required: true +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.19.0" +author: Rob Wagner (@robwagner33) +''' + +EXAMPLES = ''' +- name: Create gce snapshot + gce_snapshot: + instance_name: example-instance + snapshot_name: example-snapshot + state: present + service_account_email: project_name@appspot.gserviceaccount.com + credentials_file: /path/to/credentials + project_id: project_name + delegate_to: localhost + +- name: Delete gce snapshot + gce_snapshot: + instance_name: example-instance + snapshot_name: example-snapshot + state: absent + service_account_email: project_name@appspot.gserviceaccount.com + credentials_file: /path/to/credentials + project_id: project_name + delegate_to: localhost + +# This example creates snapshots for only two of the available disks as +# disk0-example-snapshot and disk1-example-snapshot +- name: Create snapshots of specific disks + gce_snapshot: + instance_name: example-instance + snapshot_name: example-snapshot + state: present + disks: + - disk0 + - disk1 + service_account_email: project_name@appspot.gserviceaccount.com + credentials_file: /path/to/credentials + project_id: project_name + delegate_to: localhost +''' + +RETURN = ''' +snapshots_created: + description: List of newly created snapshots + returned: When snapshots are created + type: list + sample: "[disk0-example-snapshot, disk1-example-snapshot]" + +snapshots_deleted: + description: List of destroyed snapshots + returned: When snapshots are deleted + type: list + sample: "[disk0-example-snapshot, disk1-example-snapshot]" + +snapshots_existing: + description: List of snapshots that already existed (no-op) + returned: When snapshots were already present + type: list + sample: "[disk0-example-snapshot, disk1-example-snapshot]" + +snapshots_absent: + description: List of snapshots that were already absent (no-op) + returned: When snapshots were already absent + type: list + sample: "[disk0-example-snapshot, disk1-example-snapshot]" +''' + +try: + from libcloud.compute.types import Provider + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect + + +def find_snapshot(volume, name): + ''' + Check if there is a snapshot already created with the given name for + the passed in volume. + + Args: + volume: A gce StorageVolume object to manage + name: The name of the snapshot to look for + + Returns: + The VolumeSnapshot object if one is found + ''' + found_snapshot = None + snapshots = volume.list_snapshots() + for snapshot in snapshots: + if name == snapshot.name: + found_snapshot = snapshot + return found_snapshot + + +def main(): + module = AnsibleModule( + argument_spec=dict( + instance_name=dict(required=True), + snapshot_name=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + disks=dict(default=None, type='list'), + service_account_email=dict(type='str'), + credentials_file=dict(type='path'), + project_id=dict(type='str') + ) + ) + + if not HAS_LIBCLOUD: + module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module') + + gce = gce_connect(module) + + instance_name = module.params.get('instance_name') + snapshot_name = module.params.get('snapshot_name') + disks = module.params.get('disks') + state = module.params.get('state') + + json_output = dict( + changed=False, + snapshots_created=[], + snapshots_deleted=[], + snapshots_existing=[], + snapshots_absent=[] + ) + + snapshot = None + + instance = gce.ex_get_node(instance_name, 'all') + instance_disks = instance.extra['disks'] + + for instance_disk in instance_disks: + disk_snapshot_name = snapshot_name + disk_info = gce._get_components_from_path(instance_disk['source']) + device_name = disk_info['name'] + device_zone = disk_info['zone'] + if disks is None or device_name in disks: + volume_obj = gce.ex_get_volume(device_name, device_zone) + + # If we have more than one disk to snapshot, prepend the disk name + if len(instance_disks) > 1: + disk_snapshot_name = device_name + "-" + disk_snapshot_name + + snapshot = find_snapshot(volume_obj, disk_snapshot_name) + + if snapshot and state == 'present': + json_output['snapshots_existing'].append(disk_snapshot_name) + + elif snapshot and state == 'absent': + snapshot.destroy() + json_output['changed'] = True + json_output['snapshots_deleted'].append(disk_snapshot_name) + + elif not snapshot and state == 'present': + volume_obj.snapshot(disk_snapshot_name) + json_output['changed'] = True + json_output['snapshots_created'].append(disk_snapshot_name) + + elif not snapshot and state == 'absent': + json_output['snapshots_absent'].append(disk_snapshot_name) + + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gce_tag.py b/plugins/modules/cloud/google/gce_tag.py new file mode 100644 index 0000000000..0e90c407dd --- /dev/null +++ b/plugins/modules/cloud/google/gce_tag.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gce_tag +short_description: add or remove tag(s) to/from GCE instances +description: + - This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags) + to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone. +options: + instance_name: + description: + - The name of the GCE instance to add/remove tags. + - Required if C(instance_pattern) is not specified. + instance_pattern: + description: + - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported. + See U(https://docs.python.org/2/library/re.html) for details. + - If C(instance_name) is not specified, this field is required. + tags: + description: + - Comma-separated list of tags to add or remove. + required: yes + state: + description: + - Desired state of the tags. + choices: [ absent, present ] + default: present + zone: + description: + - The zone of the disk specified by source. + default: us-central1-a + service_account_email: + description: + - Service account email. + pem_file: + description: + - Path to the PEM file associated with the service account email. + project_id: + description: + - Your GCE project ID. +requirements: + - python >= 2.6 + - apache-libcloud >= 0.17.0 +notes: + - Either I(instance_name) or I(instance_pattern) is required. +author: + - Do Hoang Khiem (@dohoangkhiem) <(dohoangkhiem@gmail.com> + - Tom Melendez (@supertom) +''' + +EXAMPLES = ''' +- name: Add tags to instance + gce_tag: + instance_name: staging-server + tags: http-server,https-server,staging + zone: us-central1-a + state: present + +- name: Remove tags from instance in default zone (us-central1-a) + gce_tag: + instance_name: test-server + tags: foo,bar + state: absent + +- name: Add tags to instances in zone that match pattern + gce_tag: + instance_pattern: test-server-* + tags: foo,bar + zone: us-central1-a + state: present +''' + +import re +import traceback + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ + ResourceExistsError, ResourceNotFoundError, InvalidRequestError + + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect + + +def _union_items(baselist, comparelist): + """Combine two lists, removing duplicates.""" + return list(set(baselist) | set(comparelist)) + + +def _intersect_items(baselist, comparelist): + """Return matching items in both lists.""" + return list(set(baselist) & set(comparelist)) + + +def _get_changed_items(baselist, comparelist): + """Return changed items as they relate to baselist.""" + return list(set(baselist) & set(set(baselist) ^ set(comparelist))) + + +def modify_tags(gce, module, node, tags, state='present'): + """Modify tags on an instance.""" + + existing_tags = node.extra['tags'] + tags = [x.lower() for x in tags] + tags_changed = [] + + if state == 'absent': + # tags changed are any that intersect + tags_changed = _intersect_items(existing_tags, tags) + if not tags_changed: + return False, None + # update instance with tags in existing tags that weren't specified + node_tags = _get_changed_items(existing_tags, tags) + else: + # tags changed are any that in the new list that weren't in existing + tags_changed = _get_changed_items(tags, existing_tags) + if not tags_changed: + return False, None + # update instance with the combined list + node_tags = _union_items(existing_tags, tags) + + try: + gce.ex_set_node_tags(node, node_tags) + return True, tags_changed + except (GoogleBaseError, InvalidRequestError) as e: + module.fail_json(msg=str(e), changed=False) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + instance_name=dict(type='str'), + instance_pattern=dict(type='str'), + tags=dict(type='list', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + zone=dict(type='str', default='us-central1-a'), + service_account_email=dict(type='str'), + pem_file=dict(type='path'), + project_id=dict(type='str'), + ), + mutually_exclusive=[ + ['instance_name', 'instance_pattern'] + ], + required_one_of=[ + ['instance_name', 'instance_pattern'] + ], + ) + + instance_name = module.params.get('instance_name') + instance_pattern = module.params.get('instance_pattern') + state = module.params.get('state') + tags = module.params.get('tags') + zone = module.params.get('zone') + changed = False + + if not HAS_LIBCLOUD: + module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module') + + gce = gce_connect(module) + + # Create list of nodes to operate on + matching_nodes = [] + try: + if instance_pattern: + instances = gce.list_nodes(ex_zone=zone) + # no instances in zone + if not instances: + module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[]) + try: + # Python regex fully supported: https://docs.python.org/2/library/re.html + p = re.compile(instance_pattern) + matching_nodes = [i for i in instances if p.search(i.name) is not None] + except re.error as e: + module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False) + else: + matching_nodes = [gce.ex_get_node(instance_name, zone=zone)] + except ResourceNotFoundError: + module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False) + except GoogleBaseError as e: + module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc()) + + # Tag nodes + instance_pattern_matches = [] + tags_changed = [] + for node in matching_nodes: + changed, tags_changed = modify_tags(gce, module, node, tags, state) + if changed: + instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed}) + if instance_pattern: + module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches) + else: + module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcp_backend_service.py b/plugins/modules/cloud/google/gcp_backend_service.py new file mode 100644 index 0000000000..edbeee42b5 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_backend_service.py @@ -0,0 +1,403 @@ +#!/usr/bin/python +# Copyright 2017 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} +DOCUMENTATION = ''' +module: gcp_backend_service +short_description: Create or Destroy a Backend Service. +description: + - Create or Destroy a Backend Service. See + U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service) for an overview. + Full install/configuration instructions for the Google Cloud modules can + be found in the comments of ansible/test/gce_tests.py. +requirements: + - "python >= 2.6" + - "apache-libcloud >= 1.3.0" +notes: + - Update is not currently supported. + - Only global backend services are currently supported. Regional backends not currently supported. + - Internal load balancing not currently supported. +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_compute_backend_service) instead. +author: + - "Tom Melendez (@supertom) " +options: + backend_service_name: + description: + - Name of the Backend Service. + required: true + backends: + description: + - List of backends that make up the backend service. A backend is made up of + an instance group and optionally several other parameters. See + U(https://cloud.google.com/compute/docs/reference/latest/backendServices) + for details. + required: true + healthchecks: + description: + - List of healthchecks. Only one healthcheck is supported. + required: true + enable_cdn: + description: + - If true, enable Cloud CDN for this Backend Service. + type: bool + port_name: + description: + - Name of the port on the managed instance group (MIG) that backend + services can forward data to. Required for external load balancing. + protocol: + description: + - The protocol this Backend Service uses to communicate with backends. + Possible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP. + required: false + timeout: + description: + - How many seconds to wait for the backend before considering it a failed + request. Default is 30 seconds. Valid range is 1-86400. + required: false + service_account_email: + description: + - Service account email + credentials_file: + description: + - Path to the JSON file associated with the service account email. + project_id: + description: + - GCE project ID. + state: + description: + - Desired state of the resource + default: "present" + choices: ["absent", "present"] +''' + +EXAMPLES = ''' +- name: Create Minimum Backend Service + gcp_backend_service: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + backend_service_name: "{{ bes }}" + backends: + - instance_group: managed_instance_group_1 + healthchecks: + - healthcheck_name_for_backend_service + port_name: myhttpport + state: present + +- name: Create BES with extended backend parameters + gcp_backend_service: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + backend_service_name: "{{ bes }}" + backends: + - instance_group: managed_instance_group_1 + max_utilization: 0.6 + max_rate: 10 + - instance_group: managed_instance_group_2 + max_utilization: 0.5 + max_rate: 4 + healthchecks: + - healthcheck_name_for_backend_service + port_name: myhttpport + state: present + timeout: 60 +''' + +RETURN = ''' +backend_service_created: + description: Indicator Backend Service was created. + returned: When a Backend Service is created. + type: bool + sample: "True" +backend_service_deleted: + description: Indicator Backend Service was deleted. + returned: When a Backend Service is deleted. + type: bool + sample: "True" +backend_service_name: + description: Name of the Backend Service. + returned: Always. + type: str + sample: "my-backend-service" +backends: + description: List of backends (comprised of instance_group) that + make up a Backend Service. + returned: When a Backend Service exists. + type: list + sample: "[ { 'instance_group': 'mig_one', 'zone': 'us-central1-b'} ]" +enable_cdn: + description: If Cloud CDN is enabled. null if not set. + returned: When a backend service exists. + type: bool + sample: "True" +healthchecks: + description: List of healthchecks applied to the Backend Service. + returned: When a Backend Service exists. + type: list + sample: "[ 'my-healthcheck' ]" +protocol: + description: Protocol used to communicate with the Backends. + returned: When a Backend Service exists. + type: str + sample: "HTTP" +port_name: + description: Name of Backend Port. + returned: When a Backend Service exists. + type: str + sample: "myhttpport" +timeout: + description: In seconds, how long before a request sent to a backend is + considered failed. + returned: If specified. + type: int + sample: "myhttpport" +''' + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + import libcloud + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ + ResourceExistsError, ResourceInUseError, ResourceNotFoundError + from libcloud.compute.drivers.gce import GCEAddress + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gce import gce_connect +from ansible_collections.community.general.plugins.module_utils.gcp import check_params + + +def _validate_params(params): + """ + Validate backend_service params. + + This function calls _validate_backend_params to verify + the backend-specific parameters. + + :param params: Ansible dictionary containing configuration. + :type params: ``dict`` + + :return: True or raises ValueError + :rtype: ``bool`` or `class:ValueError` + """ + fields = [ + {'name': 'timeout', 'type': int, 'min': 1, 'max': 86400}, + ] + try: + check_params(params, fields) + _validate_backend_params(params['backends']) + except Exception: + raise + + return (True, '') + + +def _validate_backend_params(backends): + """ + Validate configuration for backends. + + :param backends: Ansible dictionary containing backends configuration (only). + :type backends: ``dict`` + + :return: True or raises ValueError + :rtype: ``bool`` or `class:ValueError` + """ + fields = [ + {'name': 'balancing_mode', 'type': str, 'values': ['UTILIZATION', 'RATE', 'CONNECTION']}, + {'name': 'max_utilization', 'type': float}, + {'name': 'max_connections', 'type': int}, + {'name': 'max_rate', 'type': int}, + {'name': 'max_rate_per_instance', 'type': float}, + ] + + if not backends: + raise ValueError('backends should be a list.') + + for backend in backends: + try: + check_params(backend, fields) + except Exception: + raise + + if 'max_rate' in backend and 'max_rate_per_instance' in backend: + raise ValueError('Both maxRate or maxRatePerInstance cannot be set.') + + return (True, '') + + +def get_backend_service(gce, name): + """ + Get a Backend Service from GCE. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param name: Name of the Backend Service. + :type name: ``str`` + + :return: A GCEBackendService object or None. + :rtype: :class: `GCEBackendService` or None + """ + try: + # Does the Backend Service already exist? + return gce.ex_get_backendservice(name=name) + + except ResourceNotFoundError: + return None + + +def get_healthcheck(gce, name): + return gce.ex_get_healthcheck(name) + + +def get_instancegroup(gce, name, zone=None): + return gce.ex_get_instancegroup(name=name, zone=zone) + + +def create_backend_service(gce, params): + """ + Create a new Backend Service. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param params: Dictionary of parameters needed by the module. + :type params: ``dict`` + + :return: Tuple with changed stats + :rtype: tuple in the format of (bool, bool) + """ + from copy import deepcopy + + changed = False + return_data = False + # only one healthcheck is currently supported + hc_name = params['healthchecks'][0] + hc = get_healthcheck(gce, hc_name) + backends = [] + for backend in params['backends']: + ig = get_instancegroup(gce, backend['instance_group'], + backend.get('zone', None)) + kwargs = deepcopy(backend) + kwargs['instance_group'] = ig + backends.append(gce.ex_create_backend( + **kwargs)) + + bes = gce.ex_create_backendservice( + name=params['backend_service_name'], healthchecks=[hc], backends=backends, + enable_cdn=params['enable_cdn'], port_name=params['port_name'], + timeout_sec=params['timeout'], protocol=params['protocol']) + + if bes: + changed = True + return_data = True + + return (changed, return_data) + + +def delete_backend_service(bes): + """ + Delete a Backend Service. The Instance Groups are NOT destroyed. + """ + changed = False + return_data = False + if bes.destroy(): + changed = True + return_data = True + return (changed, return_data) + + +def main(): + module = AnsibleModule(argument_spec=dict( + backends=dict(type='list', required=True), + backend_service_name=dict(required=True), + healthchecks=dict(type='list', required=True), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + enable_cdn=dict(type='bool'), + port_name=dict(type='str'), + protocol=dict(type='str', default='TCP', + choices=['HTTP', 'HTTPS', 'SSL', 'TCP']), + timeout=dict(type='int'), + state=dict(choices=['absent', 'present'], default='present'), + pem_file=dict(), + credentials_file=dict(), + project_id=dict(), ), ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + if not HAS_LIBCLOUD: + module.fail_json( + msg='libcloud with GCE Backend Service support (1.3+) required for this module.') + + gce = gce_connect(module) + if not hasattr(gce, 'ex_create_instancegroupmanager'): + module.fail_json( + msg='libcloud with GCE Backend Service support (1.3+) required for this module.', + changed=False) + + params = {} + params['state'] = module.params.get('state') + params['backend_service_name'] = module.params.get('backend_service_name') + params['backends'] = module.params.get('backends') + params['healthchecks'] = module.params.get('healthchecks') + params['enable_cdn'] = module.params.get('enable_cdn', None) + params['port_name'] = module.params.get('port_name', None) + params['protocol'] = module.params.get('protocol', None) + params['timeout'] = module.params.get('timeout', None) + + try: + _validate_params(params) + except Exception as e: + module.fail_json(msg=e.message, changed=False) + + changed = False + json_output = {'state': params['state']} + bes = get_backend_service(gce, params['backend_service_name']) + + if not bes: + if params['state'] == 'absent': + # Doesn't exist and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown backend service: %s" % + (params['backend_service_name'])) + else: + # Create + (changed, json_output['backend_service_created']) = create_backend_service(gce, + params) + elif params['state'] == 'absent': + # Delete + (changed, json_output['backend_service_deleted']) = delete_backend_service(bes) + else: + # TODO(supertom): Add update support when it is available in libcloud. + changed = False + + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcp_bigquery_dataset_facts.py b/plugins/modules/cloud/google/gcp_bigquery_dataset_facts.py new file mode 120000 index 0000000000..048b511508 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_bigquery_dataset_facts.py @@ -0,0 +1 @@ +gcp_bigquery_dataset_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_bigquery_dataset_info.py b/plugins/modules/cloud/google/gcp_bigquery_dataset_info.py new file mode 100644 index 0000000000..ba58001568 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_bigquery_dataset_info.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigquery_dataset_info +description: +- Gather info for GCP Dataset +short_description: Gather info for GCP Dataset +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a dataset + gcp_bigquery_dataset_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Dataset name. + returned: success + type: str + access: + description: + - An array of objects that define dataset access for one or more entities. + returned: success + type: complex + contains: + domain: + description: + - A domain to grant access to. Any users signed in with the domain specified + will be granted the specified access . + returned: success + type: str + groupByEmail: + description: + - An email address of a Google Group to grant access to. + returned: success + type: str + role: + description: + - Describes the rights granted to the user specified by the other member + of the access object. Primitive, Predefined and custom roles are supported. + Predefined roles that have equivalent primitive roles are swapped by the + API to their Primitive counterparts, and will show a diff post-create. + See [official docs](U(https://cloud.google.com/bigquery/docs/access-control)). + returned: success + type: str + specialGroup: + description: + - A special group to grant access to. + - 'Possible values include: * `projectOwners`: Owners of the enclosing project.' + - "* `projectReaders`: Readers of the enclosing project." + - "* `projectWriters`: Writers of the enclosing project." + - "* `allAuthenticatedUsers`: All authenticated BigQuery users. ." + returned: success + type: str + userByEmail: + description: + - 'An email address of a user to grant access to. For example: fred@example.com + .' + returned: success + type: str + view: + description: + - A view from a different dataset to grant access to. Queries executed against + that view will have read access to tables in this dataset. The role field + is not required when this field is set. If that view is updated by any + user, access to the view needs to be granted again via an update operation. + returned: success + type: complex + contains: + datasetId: + description: + - The ID of the dataset containing this table. + returned: success + type: str + projectId: + description: + - The ID of the project containing this table. + returned: success + type: str + tableId: + description: + - The ID of the table. The ID must contain only letters (a-z, A-Z), + numbers (0-9), or underscores. The maximum length is 1,024 characters. + returned: success + type: str + creationTime: + description: + - The time when this dataset was created, in milliseconds since the epoch. + returned: success + type: int + datasetReference: + description: + - A reference that identifies the dataset. + returned: success + type: complex + contains: + datasetId: + description: + - A unique ID for this dataset, without the project name. The ID must contain + only letters (a-z, A-Z), numbers (0-9), or underscores. The maximum length + is 1,024 characters. + returned: success + type: str + projectId: + description: + - The ID of the project containing this dataset. + returned: success + type: str + defaultTableExpirationMs: + description: + - The default lifetime of all tables in the dataset, in milliseconds. + - The minimum value is 3600000 milliseconds (one hour). + - Once this property is set, all newly-created tables in the dataset will have + an `expirationTime` property set to the creation time plus the value in this + property, and changing the value will only affect new tables, not existing + ones. When the `expirationTime` for a given table is reached, that table will + be deleted automatically. + - If a table's `expirationTime` is modified or removed before the table expires, + or if you provide an explicit `expirationTime` when creating a table, that + value takes precedence over the default expiration time indicated by this + property. + returned: success + type: int + defaultPartitionExpirationMs: + description: + - The default partition expiration for all partitioned tables in the dataset, + in milliseconds. + - Once this property is set, all newly-created partitioned tables in the dataset + will have an `expirationMs` property in the `timePartitioning` settings set + to this value, and changing the value will only affect new tables, not existing + ones. The storage in a partition will have an expiration time of its partition + time plus this value. + - 'Setting this property overrides the use of `defaultTableExpirationMs` for + partitioned tables: only one of `defaultTableExpirationMs` and `defaultPartitionExpirationMs` + will be used for any new partitioned table. If you provide an explicit `timePartitioning.expirationMs` + when creating or updating a partitioned table, that value takes precedence + over the default partition expiration time indicated by this property.' + returned: success + type: int + description: + description: + - A user-friendly description of the dataset. + returned: success + type: str + etag: + description: + - A hash of the resource. + returned: success + type: str + friendlyName: + description: + - A descriptive name for the dataset. + returned: success + type: str + id: + description: + - The fully-qualified unique name of the dataset in the format projectId:datasetId. + The dataset name without the project name is given in the datasetId field + . + returned: success + type: str + labels: + description: + - The labels associated with this dataset. You can use these to organize and + group your datasets . + returned: success + type: dict + lastModifiedTime: + description: + - The date when this dataset or any of its tables was last modified, in milliseconds + since the epoch. + returned: success + type: int + location: + description: + - The geographic location where the dataset should reside. + - See [official docs](U(https://cloud.google.com/bigquery/docs/dataset-locations)). + - There are two types of locations, regional or multi-regional. A regional location + is a specific geographic place, such as Tokyo, and a multi-regional location + is a large geographic area, such as the United States, that contains at least + two geographic places. + - 'Possible regional values include: `asia-east1`, `asia-northeast1`, `asia-southeast1`, + `australia-southeast1`, `europe-north1`, `europe-west2` and `us-east4`.' + - 'Possible multi-regional values: `EU` and `US`.' + - The default value is multi-regional location `US`. + - Changing this forces a new resource to be created. + returned: success + type: str + defaultEncryptionConfiguration: + description: + - The default encryption key for all tables in the dataset. Once this property + is set, all newly-created partitioned tables in the dataset will have encryption + key set to this value, unless table creation request (or query) overrides + the key. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'bigquery') + return auth.list(link, return_if_object, array_name='datasets') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_bigquery_table_facts.py b/plugins/modules/cloud/google/gcp_bigquery_table_facts.py new file mode 120000 index 0000000000..289c79bcfe --- /dev/null +++ b/plugins/modules/cloud/google/gcp_bigquery_table_facts.py @@ -0,0 +1 @@ +gcp_bigquery_table_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_bigquery_table_info.py b/plugins/modules/cloud/google/gcp_bigquery_table_info.py new file mode 100644 index 0000000000..6f3aebd05a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_bigquery_table_info.py @@ -0,0 +1,625 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_bigquery_table_info +description: +- Gather info for GCP Table +short_description: Gather info for GCP Table +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + dataset: + description: + - Name of the dataset. + required: false + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a table + gcp_bigquery_table_info: + dataset: example_dataset + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + tableReference: + description: + - Reference describing the ID of this table. + returned: success + type: complex + contains: + datasetId: + description: + - The ID of the dataset containing this table. + returned: success + type: str + projectId: + description: + - The ID of the project containing this table. + returned: success + type: str + tableId: + description: + - The ID of the table. + returned: success + type: str + clustering: + description: + - One or more fields on which data should be clustered. Only top-level, non-repeated, + simple-type fields are supported. When you cluster a table using multiple + columns, the order of columns you specify is important. The order of the specified + columns determines the sort order of the data. + returned: success + type: list + creationTime: + description: + - The time when this dataset was created, in milliseconds since the epoch. + returned: success + type: int + description: + description: + - A user-friendly description of the dataset. + returned: success + type: str + friendlyName: + description: + - A descriptive name for this table. + returned: success + type: str + id: + description: + - An opaque ID uniquely identifying the table. + returned: success + type: str + labels: + description: + - The labels associated with this dataset. You can use these to organize and + group your datasets . + returned: success + type: dict + lastModifiedTime: + description: + - The time when this table was last modified, in milliseconds since the epoch. + returned: success + type: int + location: + description: + - The geographic location where the table resides. This value is inherited from + the dataset. + returned: success + type: str + name: + description: + - Name of the table. + returned: success + type: str + numBytes: + description: + - The size of this table in bytes, excluding any data in the streaming buffer. + returned: success + type: int + numLongTermBytes: + description: + - The number of bytes in the table that are considered "long-term storage". + returned: success + type: int + numRows: + description: + - The number of rows of data in this table, excluding any data in the streaming + buffer. + returned: success + type: int + requirePartitionFilter: + description: + - If set to true, queries over this table require a partition filter that can + be used for partition elimination to be specified. + returned: success + type: bool + type: + description: + - Describes the table type. + returned: success + type: str + view: + description: + - The view definition. + returned: success + type: complex + contains: + useLegacySql: + description: + - Specifies whether to use BigQuery's legacy SQL for this view . + returned: success + type: bool + userDefinedFunctionResources: + description: + - Describes user-defined function resources used in the query. + returned: success + type: complex + contains: + inlineCode: + description: + - An inline resource that contains code for a user-defined function + (UDF). Providing a inline code resource is equivalent to providing + a URI for a file containing the same code. + returned: success + type: str + resourceUri: + description: + - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). + returned: success + type: str + timePartitioning: + description: + - If specified, configures time-based partitioning for this table. + returned: success + type: complex + contains: + expirationMs: + description: + - Number of milliseconds for which to keep the storage for a partition. + returned: success + type: int + field: + description: + - If not set, the table is partitioned by pseudo column, referenced via + either '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE + type. If field is specified, the table is instead partitioned by this + field. The field must be a top-level TIMESTAMP or DATE field. Its mode + must be NULLABLE or REQUIRED. + returned: success + type: str + type: + description: + - The only type supported is DAY, which will generate one partition per + day. + returned: success + type: str + streamingBuffer: + description: + - Contains information regarding this table's streaming buffer, if one is present. + This field will be absent if the table is not being streamed to or if there + is no data in the streaming buffer. + returned: success + type: complex + contains: + estimatedBytes: + description: + - A lower-bound estimate of the number of bytes currently in the streaming + buffer. + returned: success + type: int + estimatedRows: + description: + - A lower-bound estimate of the number of rows currently in the streaming + buffer. + returned: success + type: int + oldestEntryTime: + description: + - Contains the timestamp of the oldest entry in the streaming buffer, in + milliseconds since the epoch, if the streaming buffer is available. + returned: success + type: int + schema: + description: + - Describes the schema of this table. + returned: success + type: complex + contains: + fields: + description: + - Describes the fields in a table. + returned: success + type: complex + contains: + description: + description: + - The field description. The maximum length is 1,024 characters. + returned: success + type: str + fields: + description: + - Describes the nested schema fields if the type property is set to + RECORD. + returned: success + type: list + mode: + description: + - The field mode. + returned: success + type: str + name: + description: + - The field name. + returned: success + type: str + type: + description: + - The field data type. + returned: success + type: str + encryptionConfiguration: + description: + - Custom encryption configuration. + returned: success + type: complex + contains: + kmsKeyName: + description: + - Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project + requires access to this encryption key. + returned: success + type: str + expirationTime: + description: + - The time when this table expires, in milliseconds since the epoch. If not + present, the table will persist indefinitely. + returned: success + type: int + externalDataConfiguration: + description: + - Describes the data format, location, and other properties of a table stored + outside of BigQuery. By defining these properties, the data source can then + be queried as if it were a standard BigQuery table. + returned: success + type: complex + contains: + autodetect: + description: + - Try to detect schema and format options automatically. Any option specified + explicitly will be honored. + returned: success + type: bool + compression: + description: + - The compression type of the data source. + returned: success + type: str + ignoreUnknownValues: + description: + - Indicates if BigQuery should allow extra values that are not represented + in the table schema . + returned: success + type: bool + maxBadRecords: + description: + - The maximum number of bad records that BigQuery can ignore when reading + data . + returned: success + type: int + sourceFormat: + description: + - The data format. + returned: success + type: str + sourceUris: + description: + - The fully-qualified URIs that point to your data in Google Cloud. + - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard + character and it must come after the ''bucket'' name. Size limits related + to load jobs apply to external data sources. For Google Cloud Bigtable + URIs: Exactly one URI can be specified and it has be a fully specified + and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud + Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard + character is not allowed.' + returned: success + type: list + schema: + description: + - The schema for the data. Schema is required for CSV and JSON formats. + returned: success + type: complex + contains: + fields: + description: + - Describes the fields in a table. + returned: success + type: complex + contains: + description: + description: + - The field description. + returned: success + type: str + fields: + description: + - Describes the nested schema fields if the type property is set + to RECORD . + returned: success + type: list + mode: + description: + - Field mode. + returned: success + type: str + name: + description: + - Field name. + returned: success + type: str + type: + description: + - Field data type. + returned: success + type: str + googleSheetsOptions: + description: + - Additional options if sourceFormat is set to GOOGLE_SHEETS. + returned: success + type: complex + contains: + skipLeadingRows: + description: + - The number of rows at the top of a Google Sheet that BigQuery will + skip when reading the data. + returned: success + type: int + csvOptions: + description: + - Additional properties to set if sourceFormat is set to CSV. + returned: success + type: complex + contains: + allowJaggedRows: + description: + - Indicates if BigQuery should accept rows that are missing trailing + optional columns . + returned: success + type: bool + allowQuotedNewlines: + description: + - Indicates if BigQuery should allow quoted data sections that contain + newline characters in a CSV file . + returned: success + type: bool + encoding: + description: + - The character encoding of the data. + returned: success + type: str + fieldDelimiter: + description: + - The separator for fields in a CSV file. + returned: success + type: str + quote: + description: + - The value that is used to quote data sections in a CSV file. + returned: success + type: str + skipLeadingRows: + description: + - The number of rows at the top of a CSV file that BigQuery will skip + when reading the data. + returned: success + type: int + bigtableOptions: + description: + - Additional options if sourceFormat is set to BIGTABLE. + returned: success + type: complex + contains: + ignoreUnspecifiedColumnFamilies: + description: + - If field is true, then the column families that are not specified + in columnFamilies list are not exposed in the table schema . + returned: success + type: bool + readRowkeyAsString: + description: + - If field is true, then the rowkey column families will be read and + converted to string. + returned: success + type: bool + columnFamilies: + description: + - List of column families to expose in the table schema along with their + types. + returned: success + type: complex + contains: + columns: + description: + - Lists of columns that should be exposed as individual fields as + opposed to a list of (column name, value) pairs. + returned: success + type: complex + contains: + encoding: + description: + - The encoding of the values when the type is not STRING. + returned: success + type: str + fieldName: + description: + - If the qualifier is not a valid BigQuery field identifier, + a valid identifier must be provided as the column field name + and is used as field name in queries. + returned: success + type: str + onlyReadLatest: + description: + - If this is set, only the latest version of value in this column + are exposed . + returned: success + type: bool + qualifierString: + description: + - Qualifier of the column. + returned: success + type: str + type: + description: + - The type to convert the value in cells of this column. + returned: success + type: str + encoding: + description: + - The encoding of the values when the type is not STRING. + returned: success + type: str + familyId: + description: + - Identifier of the column family. + returned: success + type: str + onlyReadLatest: + description: + - If this is set only the latest version of value are exposed for + all columns in this column family . + returned: success + type: bool + type: + description: + - The type to convert the value in cells of this column family. + returned: success + type: str + dataset: + description: + - Name of the dataset. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(dataset=dict(type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'bigquery') + return auth.list(link, return_if_object, array_name='tables') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_cloudbuild_trigger_facts.py b/plugins/modules/cloud/google/gcp_cloudbuild_trigger_facts.py new file mode 120000 index 0000000000..66c2c93f2c --- /dev/null +++ b/plugins/modules/cloud/google/gcp_cloudbuild_trigger_facts.py @@ -0,0 +1 @@ +gcp_cloudbuild_trigger_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_cloudbuild_trigger_info.py b/plugins/modules/cloud/google/gcp_cloudbuild_trigger_info.py new file mode 100644 index 0000000000..f081595e96 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_cloudbuild_trigger_info.py @@ -0,0 +1,410 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_cloudbuild_trigger_info +description: +- Gather info for GCP Trigger +short_description: Gather info for GCP Trigger +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a trigger + gcp_cloudbuild_trigger_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - The unique identifier for the trigger. + returned: success + type: str + name: + description: + - Name of the trigger. Must be unique within the project. + returned: success + type: str + description: + description: + - Human-readable description of the trigger. + returned: success + type: str + disabled: + description: + - Whether the trigger is disabled or not. If true, the trigger will never result + in a build. + returned: success + type: bool + createTime: + description: + - Time when the trigger was created. + returned: success + type: str + substitutions: + description: + - Substitutions data for Build resource. + returned: success + type: dict + filename: + description: + - Path, from the source root, to a file whose contents is used for the template. + Either a filename or build template must be provided. + returned: success + type: str + ignoredFiles: + description: + - ignoredFiles and includedFiles are file glob matches using http://godoc/pkg/path/filepath#Match + extended with support for `**`. + - If ignoredFiles and changed files are both empty, then they are not used to + determine whether or not to trigger a build. + - If ignoredFiles is not empty, then we ignore any files that match any of the + ignored_file globs. If the change has no files that are outside of the ignoredFiles + globs, then we do not trigger a build. + returned: success + type: list + includedFiles: + description: + - ignoredFiles and includedFiles are file glob matches using http://godoc/pkg/path/filepath#Match + extended with support for `**`. + - If any of the files altered in the commit pass the ignoredFiles filter and + includedFiles is empty, then as far as this filter is concerned, we should + trigger the build. + - If any of the files altered in the commit pass the ignoredFiles filter and + includedFiles is not empty, then we make sure that at least one of those files + matches a includedFiles glob. If not, then we do not trigger a build. + returned: success + type: list + triggerTemplate: + description: + - Template describing the types of source changes to trigger a build. + - Branch and tag names in trigger templates are interpreted as regular expressions. + Any branch or tag change that matches that regular expression will trigger + a build. + returned: success + type: complex + contains: + projectId: + description: + - ID of the project that owns the Cloud Source Repository. If omitted, the + project ID requesting the build is assumed. + returned: success + type: str + repoName: + description: + - Name of the Cloud Source Repository. If omitted, the name "default" is + assumed. + returned: success + type: str + dir: + description: + - Directory, relative to the source root, in which to run the build. + - This must be a relative path. If a step's dir is specified and is an absolute + path, this value is ignored for that step's execution. + returned: success + type: str + branchName: + description: + - Name of the branch to build. Exactly one a of branch name, tag, or commit + SHA must be provided. + - This field is a regular expression. + returned: success + type: str + tagName: + description: + - Name of the tag to build. Exactly one of a branch name, tag, or commit + SHA must be provided. + - This field is a regular expression. + returned: success + type: str + commitSha: + description: + - Explicit commit SHA to build. Exactly one of a branch name, tag, or commit + SHA must be provided. + returned: success + type: str + build: + description: + - Contents of the build template. Either a filename or build template must be + provided. + returned: success + type: complex + contains: + tags: + description: + - Tags for annotation of a Build. These are not docker tags. + returned: success + type: list + images: + description: + - A list of images to be pushed upon the successful completion of all build + steps. + - The images are pushed using the builder service account's credentials. + - The digests of the pushed images will be stored in the Build resource's + results field. + - If any of the images fail to be pushed, the build status is marked FAILURE. + returned: success + type: list + steps: + description: + - The operations to be performed on the workspace. + returned: success + type: complex + contains: + name: + description: + - The name of the container image that will run this particular build + step. + - If the image is available in the host's Docker daemon's cache, it + will be run directly. If not, the host will attempt to pull the image + first, using the builder service account's credentials if necessary. + - The Docker daemon's cache will already have the latest versions of + all of the officially supported build steps (U(https://github.com/GoogleCloudPlatform/cloud-builders)). + - The Docker daemon will also have cached many of the layers for some + popular images, like "ubuntu", "debian", but they will be refreshed + at the time you attempt to use them. + - If you built an image in a previous build step, it will be stored + in the host's Docker daemon's cache and is available to use as the + name for a later build step. + returned: success + type: str + args: + description: + - A list of arguments that will be presented to the step when it is + started. + - If the image used to run the step's container has an entrypoint, the + args are used as arguments to that entrypoint. If the image does not + define an entrypoint, the first element in args is used as the entrypoint, + and the remainder will be used as arguments. + returned: success + type: list + env: + description: + - A list of environment variable definitions to be used when running + a step. + - The elements are of the form "KEY=VALUE" for the environment variable + "KEY" being given the value "VALUE". + returned: success + type: list + id: + description: + - Unique identifier for this build step, used in `wait_for` to reference + this build step as a dependency. + returned: success + type: str + entrypoint: + description: + - Entrypoint to be used instead of the build step image's default entrypoint. + - If unset, the image's default entrypoint is used . + returned: success + type: str + dir: + description: + - Working directory to use when running this step's container. + - If this value is a relative path, it is relative to the build's working + directory. If this value is absolute, it may be outside the build's + working directory, in which case the contents of the path may not + be persisted across build step executions, unless a `volume` for that + path is specified. + - If the build specifies a `RepoSource` with `dir` and a step with a + `dir`, which specifies an absolute path, the `RepoSource` `dir` is + ignored for the step's execution. + returned: success + type: str + secretEnv: + description: + - A list of environment variables which are encrypted using a Cloud + Key Management Service crypto key. These values must be specified + in the build's `Secret`. + returned: success + type: list + timeout: + description: + - Time limit for executing this build step. If not defined, the step + has no time limit and will be allowed to continue to run until either + it completes or the build itself times out. + returned: success + type: str + timing: + description: + - Output only. Stores timing information for executing this build step. + returned: success + type: str + volumes: + description: + - List of volumes to mount into the build step. + - Each volume is created as an empty volume prior to execution of the + build step. Upon completion of the build, volumes and their contents + are discarded. + - Using a named volume in only one step is not valid as it is indicative + of a build request with an incorrect configuration. + returned: success + type: complex + contains: + name: + description: + - Name of the volume to mount. + - Volume names must be unique per build step and must be valid names + for Docker volumes. Each named volume must be used by at least + two build steps. + returned: success + type: str + path: + description: + - Path at which to mount the volume. + - Paths must be absolute and cannot conflict with other volume paths + on the same build step or with certain reserved volume paths. + returned: success + type: str + waitFor: + description: + - The ID(s) of the step(s) that this build step depends on. + - This build step will not start until all the build steps in `wait_for` + have completed successfully. If `wait_for` is empty, this build step + will start when all previous build steps in the `Build.Steps` list + have completed successfully. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudbuild.googleapis.com/v1/projects/{project}/triggers".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'cloudbuild') + return auth.list(link, return_if_object, array_name='triggers') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_address_facts.py b/plugins/modules/cloud/google/gcp_compute_address_facts.py new file mode 120000 index 0000000000..d12b7e4d49 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_address_facts.py @@ -0,0 +1 @@ +gcp_compute_address_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_address_info.py b/plugins/modules/cloud/google/gcp_compute_address_info.py new file mode 100644 index 0000000000..b6c9b47653 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_address_info.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_address_info +description: +- Gather info for GCP Address +short_description: Gather info for GCP Address +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - URL of the region where the regional address resides. + - This field is not applicable to global addresses. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an address + gcp_compute_address_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + address: + description: + - The static external IP address represented by this resource. Only IPv4 is + supported. An address may only be specified for INTERNAL address types. The + IP address must be inside the specified subnetwork, if any. + returned: success + type: str + addressType: + description: + - The type of address to reserve, either INTERNAL or EXTERNAL. + - If unspecified, defaults to EXTERNAL. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + purpose: + description: + - 'The purpose of this resource, which can be one of the following values: - + GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, + internal load balancers, and similar resources.' + - This should only be set when using an Internal address. + returned: success + type: str + networkTier: + description: + - 'The networking tier used for configuring this address. This field can take + the following values: PREMIUM or STANDARD. If this field is not specified, + it is assumed to be PREMIUM.' + returned: success + type: str + subnetwork: + description: + - The URL of the subnetwork in which to reserve the address. If an IP address + is specified, it must be within the subnetwork's IP range. + - This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER + purposes. + returned: success + type: dict + users: + description: + - The URLs of the resources that are using this address. + returned: success + type: list + region: + description: + - URL of the region where the regional address resides. + - This field is not applicable to global addresses. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/addresses".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_backend_bucket_facts.py b/plugins/modules/cloud/google/gcp_compute_backend_bucket_facts.py new file mode 120000 index 0000000000..d80cf8b0ae --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_backend_bucket_facts.py @@ -0,0 +1 @@ +gcp_compute_backend_bucket_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_backend_bucket_info.py b/plugins/modules/cloud/google/gcp_compute_backend_bucket_info.py new file mode 100644 index 0000000000..9da18d8041 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_backend_bucket_info.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_backend_bucket_info +description: +- Gather info for GCP BackendBucket +short_description: Gather info for GCP BackendBucket +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a backend bucket + gcp_compute_backend_bucket_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + bucketName: + description: + - Cloud Storage bucket name. + returned: success + type: str + cdnPolicy: + description: + - Cloud CDN configuration for this Backend Bucket. + returned: success + type: complex + contains: + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be + considered fresh. Defaults to 1hr (3600s). After this time period, the + response will be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: + public, max-age=[TTL]" header, regardless of any existing Cache-Control + header. The actual headers served in responses will not be altered.' + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional textual description of the resource; provided by the client when + the resource is created. + returned: success + type: str + enableCdn: + description: + - If true, enable Cloud CDN for this BackendBucket. + returned: success + type: bool + id: + description: + - Unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendBuckets".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_backend_service_facts.py b/plugins/modules/cloud/google/gcp_compute_backend_service_facts.py new file mode 120000 index 0000000000..def0ed0e6f --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_backend_service_facts.py @@ -0,0 +1 @@ +gcp_compute_backend_service_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_backend_service_info.py b/plugins/modules/cloud/google/gcp_compute_backend_service_info.py new file mode 100644 index 0000000000..7fd35b0e8d --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_backend_service_info.py @@ -0,0 +1,475 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_backend_service_info +description: +- Gather info for GCP BackendService +short_description: Gather info for GCP BackendService +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a backend service + gcp_compute_backend_service_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + affinityCookieTtlSec: + description: + - Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If + set to 0, the cookie is non-persistent and lasts only until the end of the + browser session (or equivalent). The maximum allowed value for TTL is one + day. + - When the load balancing scheme is INTERNAL, this field is not used. + returned: success + type: int + backends: + description: + - The set of backends that serve this BackendService. + returned: success + type: complex + contains: + balancingMode: + description: + - Specifies the balancing mode for this backend. + - For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. + Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL). + returned: success + type: str + capacityScaler: + description: + - A multiplier applied to the group's maximum servicing capacity (based + on UTILIZATION, RATE or CONNECTION). + - Default value is 1, which means the group will serve up to 100% of its + configured capacity (depending on balancingMode). A setting of 0 means + the group is completely drained, offering 0% of its available Capacity. + Valid range is [0.0,1.0]. + returned: success + type: str + description: + description: + - An optional description of this resource. + - Provide this property when you create the resource. + returned: success + type: str + group: + description: + - The fully-qualified URL of an Instance Group or Network Endpoint Group + resource. In case of instance group this defines the list of instances + that serve traffic. Member virtual machine instances from each instance + group must live in the same zone as the instance group itself. No two + backends in a backend service are allowed to use same Instance Group resource. + - For Network Endpoint Groups this defines list of endpoints. All endpoints + of Network Endpoint Group must be hosted on instances located in the same + zone as the Network Endpoint Group. + - Backend services cannot mix Instance Group and Network Endpoint Group + backends. + - Note that you must specify an Instance Group or Network Endpoint Group + resource using the fully-qualified URL, rather than a partial URL. + returned: success + type: str + maxConnections: + description: + - The max number of simultaneous connections for the group. Can be used + with either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance + or maxConnectionsPerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxConnectionsPerInstance: + description: + - The max number of simultaneous connections that a single backend instance + can handle. This is used to calculate the capacity of the group. Can be + used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerInstance + must be set. + returned: success + type: int + maxConnectionsPerEndpoint: + description: + - The max number of simultaneous connections that a single backend network + endpoint can handle. This is used to calculate the capacity of the group. + Can be used in either CONNECTION or UTILIZATION balancing modes. + - For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint + must be set. + returned: success + type: int + maxRate: + description: + - The max requests per second (RPS) of the group. + - Can be used with either RATE or UTILIZATION balancing modes, but required + if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance + or maxRatePerEndpoint, as appropriate for group type, must be set. + returned: success + type: int + maxRatePerInstance: + description: + - The max requests per second (RPS) that a single backend instance can handle. + This is used to calculate the capacity of the group. Can be used in either + balancing mode. For RATE mode, either maxRate or maxRatePerInstance must + be set. + returned: success + type: str + maxRatePerEndpoint: + description: + - The max requests per second (RPS) that a single backend network endpoint + can handle. This is used to calculate the capacity of the group. Can be + used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint + must be set. + returned: success + type: str + maxUtilization: + description: + - Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization + target for the group. The default is 0.8. Valid range is [0.0, 1.0]. + returned: success + type: str + cdnPolicy: + description: + - Cloud CDN configuration for this BackendService. + returned: success + type: complex + contains: + cacheKeyPolicy: + description: + - The CacheKeyPolicy for this CdnPolicy. + returned: success + type: complex + contains: + includeHost: + description: + - If true requests to different hosts will be cached separately. + returned: success + type: bool + includeProtocol: + description: + - If true, http and https requests will be cached separately. + returned: success + type: bool + includeQueryString: + description: + - If true, include query string parameters in the cache key according + to query_string_whitelist and query_string_blacklist. If neither is + set, the entire query string will be included. + - If false, the query string will be excluded from the cache key entirely. + returned: success + type: bool + queryStringBlacklist: + description: + - Names of query string parameters to exclude in cache keys. + - All other parameters will be included. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + queryStringWhitelist: + description: + - Names of query string parameters to include in cache keys. + - All other parameters will be excluded. Either specify query_string_whitelist + or query_string_blacklist, not both. + - "'&' and '=' will be percent encoded and not treated as delimiters." + returned: success + type: list + signedUrlCacheMaxAgeSec: + description: + - Maximum number of seconds the response to a signed URL request will be + considered fresh, defaults to 1hr (3600s). After this time period, the + response will be revalidated before being served. + - 'When serving responses to signed URL requests, Cloud CDN will internally + behave as though all responses from this backend had a "Cache-Control: + public, max-age=[TTL]" header, regardless of any existing Cache-Control + header. The actual headers served in responses will not be altered.' + returned: success + type: int + connectionDraining: + description: + - Settings for connection draining . + returned: success + type: complex + contains: + drainingTimeoutSec: + description: + - Time for which instance will be drained (not accept new connections, but + still work to finish started). + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + enableCDN: + description: + - If true, enable Cloud CDN for this BackendService. + returned: success + type: bool + healthChecks: + description: + - The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health + checking this BackendService. Currently at most one health check can be specified, + and a health check is required. + - For internal load balancing, a URL to a HealthCheck resource must be specified + instead. + returned: success + type: list + id: + description: + - The unique identifier for the resource. + returned: success + type: int + iap: + description: + - Settings for enabling Cloud Identity Aware Proxy. + returned: success + type: complex + contains: + enabled: + description: + - Enables IAP. + returned: success + type: bool + oauth2ClientId: + description: + - OAuth2 Client ID for IAP . + returned: success + type: str + oauth2ClientSecret: + description: + - OAuth2 Client Secret for IAP . + returned: success + type: str + oauth2ClientSecretSha256: + description: + - OAuth2 Client Secret SHA-256 for IAP . + returned: success + type: str + loadBalancingScheme: + description: + - Indicates whether the backend service will be used with internal or external + load balancing. A backend service created for one type of load balancing cannot + be used with the other. Must be `EXTERNAL` or `INTERNAL_SELF_MANAGED` for + a global backend service. Defaults to `EXTERNAL`. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + portName: + description: + - Name of backend port. The same name should appear in the instance groups referenced + by this service. Required when the load balancing scheme is EXTERNAL. + returned: success + type: str + protocol: + description: + - The protocol this BackendService uses to communicate with backends. + - 'Possible values are HTTP, HTTPS, HTTP2, TCP, and SSL. The default is HTTP. + **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer types and may + result in errors if used with the GA API.' + returned: success + type: str + securityPolicy: + description: + - The security policy associated with this backend service. + returned: success + type: str + sessionAffinity: + description: + - Type of session affinity to use. The default is NONE. Session affinity is + not applicable if the protocol is UDP. + returned: success + type: str + timeoutSec: + description: + - How many seconds to wait for the backend before considering it a failed request. + Default is 30 seconds. Valid range is [1, 86400]. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_disk_facts.py b/plugins/modules/cloud/google/gcp_compute_disk_facts.py new file mode 120000 index 0000000000..52aabea81a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_disk_facts.py @@ -0,0 +1 @@ +gcp_compute_disk_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_disk_info.py b/plugins/modules/cloud/google/gcp_compute_disk_info.py new file mode 100644 index 0000000000..3b0e31f4a4 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_disk_info.py @@ -0,0 +1,401 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_disk_info +description: +- Gather info for GCP Disk +short_description: Gather info for GCP Disk +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + zone: + description: + - A reference to the zone where the disk resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a disk + gcp_compute_disk_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + lastAttachTimestamp: + description: + - Last attach timestamp in RFC3339 text format. + returned: success + type: str + lastDetachTimestamp: + description: + - Last detach timestamp in RFC3339 text format. + returned: success + type: str + labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + returned: success + type: dict + licenses: + description: + - Any applicable publicly visible licenses. + returned: success + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + sizeGb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of + the snapshot. + returned: success + type: int + users: + description: + - 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance + .' + returned: success + type: list + physicalBlockSizeBytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a + request, a default value is used. Currently supported sizes are 4096 and 16384, + other sizes may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + returned: success + type: int + type: + description: + - URL of the disk type resource describing which disk type to use to create + the disk. Provide this when creating the disk. + returned: success + type: str + sourceImage: + description: + - The source image used to create this disk. If the source image is deleted, + this field will not be set. + - 'To create a disk with one of the public operating system images, specify + the image by its family name. For example, specify family/debian-8 to use + the latest Debian 8 image: projects/debian-cloud/global/images/family/debian-8 + Alternatively, use a specific version of a public operating system image: + projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD To create a + disk with a private image that you created, specify the image name in the + following format: global/images/my-private-image You can also specify a private + image by its image family, which returns the latest version of the image in + that family. Replace the image name with family/family-name: global/images/family/my-private-family + .' + returned: success + type: str + zone: + description: + - A reference to the zone where the disk resides. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required if the + source image is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + sourceImageId: + description: + - The ID value of the image used to create this disk. This value identifies + the exact image that was used to create this persistent disk. For example, + if you created the persistent disk from an image that was later deleted and + recreated under the same name, the source image ID would identify the exact + version of the image that was used. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the + same key if you use the disk later (e.g. to create a disk snapshot or an image, + or to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the + disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need + to provide a key to use the disk later. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + - Your project's Compute Engine System service account (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) + must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + returned: success + type: str + sourceSnapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + returned: success + type: dict + sourceSnapshotEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + kmsKeyName: + description: + - The name of the encryption key that is stored in Google Cloud KMS. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + sourceSnapshotId: + description: + - The unique ID of the snapshot used to create this disk. This value identifies + the exact snapshot that was used to create this persistent disk. For example, + if you created the persistent disk from a snapshot that was later deleted + and recreated under the same name, the source snapshot ID would identify the + exact version of the snapshot that was used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_firewall_facts.py b/plugins/modules/cloud/google/gcp_compute_firewall_facts.py new file mode 120000 index 0000000000..7a8ccaa415 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_firewall_facts.py @@ -0,0 +1 @@ +gcp_compute_firewall_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_firewall_info.py b/plugins/modules/cloud/google/gcp_compute_firewall_info.py new file mode 100644 index 0000000000..0657939c23 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_firewall_info.py @@ -0,0 +1,368 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_firewall_info +description: +- Gather info for GCP Firewall +short_description: Gather info for GCP Firewall +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a firewall + gcp_compute_firewall_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + allowed: + description: + - The list of ALLOW rules specified by this firewall. Each rule specifies a + protocol and port-range tuple that describes a permitted connection. + returned: success + type: complex + contains: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP + protocol number. + returned: success + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only + applicable for UDP or TCP protocol. Each entry must be either an integer + or a range. If not specified, this rule applies to connections through + any port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + returned: success + type: list + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + denied: + description: + - The list of DENY rules specified by this firewall. Each rule specifies a protocol + and port-range tuple that describes a denied connection. + returned: success + type: complex + contains: + ip_protocol: + description: + - The IP protocol to which this rule applies. The protocol type is required + when creating a firewall rule. This value can either be one of the following + well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP + protocol number. + returned: success + type: str + ports: + description: + - An optional list of ports to which this rule applies. This field is only + applicable for UDP or TCP protocol. Each entry must be either an integer + or a range. If not specified, this rule applies to connections through + any port. + - 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].' + returned: success + type: list + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + destinationRanges: + description: + - If destination ranges are specified, the firewall will apply only to traffic + that has destination IP address in these ranges. These ranges must be expressed + in CIDR format. Only IPv4 is supported. + returned: success + type: list + direction: + description: + - 'Direction of traffic to which this firewall applies; default is INGRESS. + Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; + For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.' + returned: success + type: str + disabled: + description: + - Denotes whether the firewall rule is disabled, i.e not applied to the network + it is associated with. When set to true, the firewall rule is not enforced + and the network behaves as if it did not exist. If this is unspecified, the + firewall rule will be enabled. + returned: success + type: bool + logConfig: + description: + - This field denotes whether to enable logging for a particular firewall rule. + If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: complex + contains: + enableLogging: + description: + - This field denotes whether to enable logging for a particular firewall + rule. If logging is enabled, logs will be exported to Stackdriver. + returned: success + type: bool + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - 'URL of the network resource for this firewall rule. If not specified when + creating a firewall rule, the default network is used: global/networks/default + If you choose to specify this property, you can specify the network as a full + or partial URL. For example, the following are all valid URLs: https://www.googleapis.com/compute/v1/projects/myproject/global/ + networks/my-network projects/myproject/global/networks/my-network global/networks/default + .' + returned: success + type: dict + priority: + description: + - Priority for this rule. This is an integer between 0 and 65535, both inclusive. + When not specified, the value assumed is 1000. Relative priorities determine + precedence of conflicting rules. Lower value of priority implies higher precedence + (eg, a rule with priority 0 has higher precedence than a rule with priority + 1). DENY rules take precedence over ALLOW rules having equal priority. + returned: success + type: int + sourceRanges: + description: + - If source ranges are specified, the firewall will apply only to traffic that + has source IP address in these ranges. These ranges must be expressed in CIDR + format. One or both of sourceRanges and sourceTags may be set. If both properties + are set, the firewall will apply to traffic that has source IP address within + sourceRanges OR the source IP that belongs to a tag listed in the sourceTags + property. The connection does not need to match both properties for the firewall + to apply. Only IPv4 is supported. + returned: success + type: list + sourceServiceAccounts: + description: + - If source service accounts are specified, the firewall will apply only to + traffic originating from an instance with a service account in this list. + Source service accounts cannot be used to control traffic to an instance's + external IP address because service accounts are associated with an instance, + not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. + If both are set, the firewall will apply to traffic that has source IP address + within sourceRanges OR the source IP belongs to an instance with service account + listed in sourceServiceAccount. The connection does not need to match both + properties for the firewall to apply. sourceServiceAccounts cannot be used + at the same time as sourceTags or targetTags. + returned: success + type: list + sourceTags: + description: + - If source tags are specified, the firewall will apply only to traffic with + source IP that belongs to a tag listed in source tags. Source tags cannot + be used to control traffic to an instance's external IP address. Because tags + are associated with an instance, not an IP address. One or both of sourceRanges + and sourceTags may be set. If both properties are set, the firewall will apply + to traffic that has source IP address within sourceRanges OR the source IP + that belongs to a tag listed in the sourceTags property. The connection does + not need to match both properties for the firewall to apply. + returned: success + type: list + targetServiceAccounts: + description: + - A list of service accounts indicating sets of instances located in the network + that may make network connections as specified in allowed[]. + - targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. + If neither targetServiceAccounts nor targetTags are specified, the firewall + rule applies to all instances on the specified network. + returned: success + type: list + targetTags: + description: + - A list of instance tags indicating sets of instances located in the network + that may make network connections as specified in allowed[]. + - If no targetTags are specified, the firewall rule applies to all instances + on the specified network. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/firewalls".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_forwarding_rule_facts.py b/plugins/modules/cloud/google/gcp_compute_forwarding_rule_facts.py new file mode 120000 index 0000000000..4f09197451 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_forwarding_rule_facts.py @@ -0,0 +1 @@ +gcp_compute_forwarding_rule_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_forwarding_rule_info.py b/plugins/modules/cloud/google/gcp_compute_forwarding_rule_info.py new file mode 100644 index 0000000000..7f4112c5c5 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_forwarding_rule_info.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_forwarding_rule_info +description: +- Gather info for GCP ForwardingRule +short_description: Gather info for GCP ForwardingRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - A reference to the region where the regional forwarding rule resides. + - This field is not applicable to global forwarding rules. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a forwarding rule + gcp_compute_forwarding_rule_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + IPAddress: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the + address must be a global IP, and for regional forwarding rules, the address + must live in the same region as the forwarding rule. If this field is empty, + an ephemeral IPv4 address from the same scope (global or regional) will be + assigned. A regional forwarding rule supports IPv4 only. A global forwarding + rule supports either IPv4 or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP + address belonging to the network/subnet configured for the forwarding rule. + By default, if this field is empty, an ephemeral internal IP address will + be automatically allocated from the IP range of the subnet or network configured + for this forwarding rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + returned: success + type: str + IPProtocol: + description: + - The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, + AH, SCTP or ICMP. + - When the load balancing scheme is INTERNAL, only TCP and UDP are valid. + returned: success + type: str + backendService: + description: + - A BackendService to receive the matched traffic. This is used only for INTERNAL + load balancing. + returned: success + type: dict + loadBalancingScheme: + description: + - This signifies what the ForwardingRule will be used for and can be EXTERNAL, + INTERNAL, or INTERNAL_MANAGED. EXTERNAL is used for Classic Cloud VPN gateways, + protocol forwarding to VMs from an external IP address, and HTTP(S), SSL Proxy, + TCP Proxy, and Network TCP/UDP load balancers. + - INTERNAL is used for protocol forwarding to VMs from an internal IP address, + and internal TCP/UDP load balancers. + - INTERNAL_MANAGED is used for internal HTTP(S) load balancers. + returned: success + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - For internal load balancing, this field identifies the network that the load + balanced IP should belong to for this Forwarding Rule. If this field is not + specified, the default network will be used. + - This field is only used for INTERNAL load balancing. + returned: success + type: dict + portRange: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed + to ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: + * TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, + 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: + 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: + 500, 4500 .' + returned: success + type: str + ports: + description: + - This field is used along with the backend_service field for internal load + balancing. + - When the load balancing scheme is INTERNAL, a single port or a comma separated + list of ports can be configured. Only packets addressed to these ports will + be forwarded to the backends configured with this forwarding rule. + - You may specify a maximum of up to 5 ports. + returned: success + type: list + subnetwork: + description: + - The subnetwork that the load balanced IP should belong to for this Forwarding + Rule. This field is only used for INTERNAL load balancing. + - If the network specified is in auto subnet mode, this field is optional. However, + if the network is in custom subnet mode, a subnetwork must be specified. + returned: success + type: dict + target: + description: + - This field is only used for EXTERNAL load balancing. + - A reference to a TargetPool resource to receive the matched traffic. + - This target must live in the same region as the forwarding rule. + - The forwarded traffic must be of a type appropriate to the target object. + returned: success + type: dict + allPorts: + description: + - For internal TCP/UDP load balancing (i.e. load balancing scheme is INTERNAL + and protocol is TCP/UDP), set this to true to allow packets addressed to any + ports to be forwarded to the backends configured with this forwarding rule. + Used with backend service. Cannot be set if port or portRange are set. + returned: success + type: bool + networkTier: + description: + - 'The networking tier used for configuring this address. This field can take + the following values: PREMIUM or STANDARD. If this field is not specified, + it is assumed to be PREMIUM.' + returned: success + type: str + serviceLabel: + description: + - An optional prefix to the service name for this Forwarding Rule. + - If specified, will be the first label of the fully qualified service name. + - The label must be 1-63 characters long, and comply with RFC1035. + - Specifically, the label must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + - This field is only used for INTERNAL load balancing. + returned: success + type: str + serviceName: + description: + - The internal fully qualified service name for this Forwarding Rule. + - This field is only used for INTERNAL load balancing. + returned: success + type: str + region: + description: + - A reference to the region where the regional forwarding rule resides. + - This field is not applicable to global forwarding rules. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/forwardingRules".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_global_address_facts.py b/plugins/modules/cloud/google/gcp_compute_global_address_facts.py new file mode 120000 index 0000000000..497372674a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_global_address_facts.py @@ -0,0 +1 @@ +gcp_compute_global_address_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_global_address_info.py b/plugins/modules/cloud/google/gcp_compute_global_address_info.py new file mode 100644 index 0000000000..ab8dc4d6b7 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_global_address_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_global_address_info +description: +- Gather info for GCP GlobalAddress +short_description: Gather info for GCP GlobalAddress +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a global address + gcp_compute_global_address_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + address: + description: + - The static external IP address represented by this resource. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + ipVersion: + description: + - The IP Version that will be used by this address. Valid options are `IPV4` + or `IPV6`. The default value is `IPV4`. + returned: success + type: str + region: + description: + - A reference to the region where the regional address resides. + returned: success + type: str + prefixLength: + description: + - The prefix length of the IP range. If not present, it means the address field + is a single IP address. + - This field is not applicable to addresses with addressType=EXTERNAL. + returned: success + type: int + addressType: + description: + - The type of the address to reserve, default is EXTERNAL. + - "* EXTERNAL indicates public/external single IP address." + - "* INTERNAL indicates internal IP ranges belonging to some network." + returned: success + type: str + purpose: + description: + - The purpose of the resource. For global internal addresses it can be * VPC_PEERING + - for peer networks This should only be set when using an Internal address. + returned: success + type: str + network: + description: + - The URL of the network in which to reserve the IP range. The IP range must + be in RFC1918 space. The network cannot be deleted if there are any reserved + IP ranges referring to it. + - This should only be set when using an Internal address. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/addresses".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_global_forwarding_rule_facts.py b/plugins/modules/cloud/google/gcp_compute_global_forwarding_rule_facts.py new file mode 120000 index 0000000000..18d0b3b5db --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_global_forwarding_rule_facts.py @@ -0,0 +1 @@ +gcp_compute_global_forwarding_rule_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_global_forwarding_rule_info.py b/plugins/modules/cloud/google/gcp_compute_global_forwarding_rule_info.py new file mode 100644 index 0000000000..c29fd899e7 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_global_forwarding_rule_info.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_global_forwarding_rule_info +description: +- Gather info for GCP GlobalForwardingRule +short_description: Gather info for GCP GlobalForwardingRule +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a global forwarding rule + gcp_compute_global_forwarding_rule_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + IPAddress: + description: + - The IP address that this forwarding rule is serving on behalf of. + - Addresses are restricted based on the forwarding rule's load balancing scheme + (EXTERNAL or INTERNAL) and scope (global or regional). + - When the load balancing scheme is EXTERNAL, for global forwarding rules, the + address must be a global IP, and for regional forwarding rules, the address + must live in the same region as the forwarding rule. If this field is empty, + an ephemeral IPv4 address from the same scope (global or regional) will be + assigned. A regional forwarding rule supports IPv4 only. A global forwarding + rule supports either IPv4 or IPv6. + - When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP + address belonging to the network/subnet configured for the forwarding rule. + By default, if this field is empty, an ephemeral internal IP address will + be automatically allocated from the IP range of the subnet or network configured + for this forwarding rule. + - 'An address can be specified either by a literal IP address or a URL reference + to an existing Address resource. The following examples are all valid: * 100.1.2.3 + * https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address + * projects/project/regions/region/addresses/address * regions/region/addresses/address + * global/addresses/address * address .' + returned: success + type: str + IPProtocol: + description: + - The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, + AH, SCTP or ICMP. When the load balancing scheme is INTERNAL_SELF_MANAGED, + only TCP is valid. + returned: success + type: str + ipVersion: + description: + - The IP Version that will be used by this global forwarding rule. + - Valid options are IPV4 or IPV6. + returned: success + type: str + loadBalancingScheme: + description: + - This signifies what the GlobalForwardingRule will be used for. + - 'The value of INTERNAL_SELF_MANAGED means that this will be used for Internal + Global HTTP(S) LB. The value of EXTERNAL means that this will be used for + External Global Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy) + NOTE: Currently global forwarding rules cannot be used for INTERNAL load balancing.' + returned: success + type: str + metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing configuration + to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, + xDS clients present node metadata. If a match takes place, the relevant routing + configuration is made available to those proxies. + - For each metadataFilter in this list, if its filterMatchCriteria is set to + MATCH_ANY, at least one of the filterLabels must match the corresponding label + provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, + then all of its filterLabels must match with corresponding labels in the provided + metadata. + - metadataFilters specified here can be overridden by those specified in the + UrlMap that this ForwardingRule references. + - metadataFilters only applies to Loadbalancers that have their loadBalancingScheme + set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterMatchCriteria: + description: + - Specifies how individual filterLabel matches within the list of filterLabels + contribute towards the overall metadataFilter match. + - MATCH_ANY - At least one of the filterLabels must have a matching label + in the provided metadata. + - MATCH_ALL - All filterLabels must have matching labels in the provided + metadata. + returned: success + type: str + filterLabels: + description: + - The list of label value pairs that must match labels in the provided metadata + based on filterMatchCriteria This list must not be empty and can have + at the most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of the metadata label. The length must be between 1 and 1024 + characters, inclusive. + returned: success + type: str + value: + description: + - The value that the label must match. The value has a maximum length + of 1024 characters. + returned: success + type: str + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - This field is not used for external load balancing. + - For INTERNAL_SELF_MANAGED load balancing, this field identifies the network + that the load balanced IP should belong to for this global forwarding rule. + If this field is not specified, the default network will be used. + returned: success + type: dict + portRange: + description: + - This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, + TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. + - Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed + to ports in the specified range will be forwarded to target. + - Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint + port ranges. + - 'Some types of forwarding target have constraints on the acceptable ports: + * TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, + 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: + 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: + 500, 4500 .' + returned: success + type: str + target: + description: + - The URL of the target resource to receive the matched traffic. + - The forwarded traffic must be of a type appropriate to the target object. + - For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are + valid. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/forwardingRules".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_health_check_facts.py b/plugins/modules/cloud/google/gcp_compute_health_check_facts.py new file mode 120000 index 0000000000..a2646a6c8d --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_health_check_facts.py @@ -0,0 +1 @@ +gcp_compute_health_check_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_health_check_info.py b/plugins/modules/cloud/google/gcp_compute_health_check_info.py new file mode 100644 index 0000000000..a8e344211a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_health_check_info.py @@ -0,0 +1,524 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_health_check_info +description: +- Gather info for GCP HealthCheck +short_description: Gather info for GCP HealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a health check + gcp_compute_health_check_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + returned: success + type: int + unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int + type: + description: + - Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If + not specified, the default is TCP. Exactly one of the protocol-specific health + check field must be specified, which must match type field. + returned: success + type: str + httpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend, either NONE or PROXY_V1. The default is NONE. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTP health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + httpsHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTPS health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend, either NONE or PROXY_V1. The default is NONE. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTPS health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + tcpHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the TCP connection has been established + (default value is empty). If both request and response are empty, the + connection establishment alone will indicate health. The request data + can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the TCP health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend, either NONE or PROXY_V1. The default is NONE. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, TCP health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + sslHealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + request: + description: + - The application data to send once the SSL connection has been established + (default value is empty). If both request and response are empty, the + connection establishment alone will indicate health. The request data + can only be ASCII. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the SSL health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend, either NONE or PROXY_V1. The default is NONE. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, SSL health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str + http2HealthCheck: + description: + - A nested object resource. + returned: success + type: complex + contains: + host: + description: + - The value of the host header in the HTTP2 health check request. + - If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + returned: success + type: str + requestPath: + description: + - The request path of the HTTP2 health check request. + - The default value is /. + returned: success + type: str + response: + description: + - The bytes to match against the beginning of the response data. If left + empty (the default value), any response will indicate health. The response + data can only be ASCII. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP2 health check request. + - The default value is 443. + returned: success + type: int + portName: + description: + - Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the + backend, either NONE or PROXY_V1. The default is NONE. + returned: success + type: str + portSpecification: + description: + - 'Specifies how port is selected for health checking, can be one of the + following values: * `USE_FIXED_PORT`: The port number in `port` is used + for health checking.' + - "* `USE_NAMED_PORT`: The `portName` is used for health checking." + - "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for + each network endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is used for health + checking." + - If not specified, HTTP2 health check follows behavior specified in `port` + and `portName` fields. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/healthChecks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_http_health_check_facts.py b/plugins/modules/cloud/google/gcp_compute_http_health_check_facts.py new file mode 120000 index 0000000000..dbf679c115 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_http_health_check_facts.py @@ -0,0 +1 @@ +gcp_compute_http_health_check_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_http_health_check_info.py b/plugins/modules/cloud/google/gcp_compute_http_health_check_info.py new file mode 100644 index 0000000000..cf3c04dba8 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_http_health_check_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_http_health_check_info +description: +- Gather info for GCP HttpHealthCheck +short_description: Gather info for GCP HttpHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a HTTP health check + gcp_compute_http_health_check_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int + host: + description: + - The value of the host header in the HTTP health check request. If left empty + (default value), the public IP on behalf of which this health check is performed + will be used. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + port: + description: + - The TCP port number for the HTTP health check request. + - The default value is 80. + returned: success + type: int + requestPath: + description: + - The request path of the HTTP health check request. + - The default value is /. + returned: success + type: str + timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + returned: success + type: int + unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpHealthChecks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_https_health_check_facts.py b/plugins/modules/cloud/google/gcp_compute_https_health_check_facts.py new file mode 120000 index 0000000000..887a5ffe68 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_https_health_check_facts.py @@ -0,0 +1 @@ +gcp_compute_https_health_check_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_https_health_check_info.py b/plugins/modules/cloud/google/gcp_compute_https_health_check_info.py new file mode 100644 index 0000000000..d99d35a399 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_https_health_check_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_https_health_check_info +description: +- Gather info for GCP HttpsHealthCheck +short_description: Gather info for GCP HttpsHealthCheck +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a HTTPS health check + gcp_compute_https_health_check_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + checkIntervalSec: + description: + - How often (in seconds) to send a health check. The default value is 5 seconds. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + healthyThreshold: + description: + - A so-far unhealthy instance will be marked healthy after this many consecutive + successes. The default value is 2. + returned: success + type: int + host: + description: + - The value of the host header in the HTTPS health check request. If left empty + (default value), the public IP on behalf of which this health check is performed + will be used. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + port: + description: + - The TCP port number for the HTTPS health check request. + - The default value is 80. + returned: success + type: int + requestPath: + description: + - The request path of the HTTPS health check request. + - The default value is /. + returned: success + type: str + timeoutSec: + description: + - How long (in seconds) to wait before claiming failure. + - The default value is 5 seconds. It is invalid for timeoutSec to have greater + value than checkIntervalSec. + returned: success + type: int + unhealthyThreshold: + description: + - A so-far healthy instance will be marked unhealthy after this many consecutive + failures. The default value is 2. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_image_facts.py b/plugins/modules/cloud/google/gcp_compute_image_facts.py new file mode 120000 index 0000000000..f4ca4647ec --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_image_facts.py @@ -0,0 +1 @@ +gcp_compute_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_image_info.py b/plugins/modules/cloud/google/gcp_compute_image_info.py new file mode 100644 index 0000000000..987ba820d5 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_image_info.py @@ -0,0 +1,387 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_image_info +description: +- Gather info for GCP Image +short_description: Gather info for GCP Image +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an image + gcp_compute_image_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + archiveSizeBytes: + description: + - Size of the image tar.gz archive stored in Google Cloud Storage (in bytes). + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + deprecated: + description: + - The deprecation status associated with this image. + returned: success + type: complex + contains: + deleted: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to DELETED. This is only informational and the status + will not change unless the client explicitly changes it. + returned: success + type: str + deprecated: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to DEPRECATED. This is only informational and the + status will not change unless the client explicitly changes it. + returned: success + type: str + obsolete: + description: + - An optional RFC3339 timestamp on or after which the state of this resource + is intended to change to OBSOLETE. This is only informational and the + status will not change unless the client explicitly changes it. + returned: success + type: str + replacement: + description: + - The URL of the suggested replacement for a deprecated resource. + - The suggested replacement resource must be the same kind of resource as + the deprecated resource. + returned: success + type: str + state: + description: + - The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, + or DELETED. Operations which create a new resource using a DEPRECATED + resource will return successfully, but with a warning indicating the deprecated + resource and recommending its replacement. Operations which use OBSOLETE + or DELETED resources will be rejected and result in an error. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + diskSizeGb: + description: + - Size of the image when restored onto a persistent disk (in GB). + returned: success + type: int + family: + description: + - The name of the image family to which this image belongs. You can create disks + by specifying an image family instead of a specific image name. The image + family always returns its latest image that is not deprecated. The name of + the image family must comply with RFC1035. + returned: success + type: str + guestOsFeatures: + description: + - A list of features to enable on the guest operating system. + - Applicable only for bootable images. + returned: success + type: complex + contains: + type: + description: + - The type of supported feature. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + imageEncryptionKey: + description: + - Encrypts the image using a customer-supplied encryption key. + - After you encrypt an image with a customer-supplied key, you must provide + the same key if you use the image later (e.g. to create a disk from the image) + . + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + labels: + description: + - Labels to apply to this Image. + returned: success + type: dict + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + licenses: + description: + - Any applicable license URI. + returned: success + type: list + name: + description: + - Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + rawDisk: + description: + - The parameters of the raw disk image. + returned: success + type: complex + contains: + containerType: + description: + - The format used to encode and transmit the block device, which should + be TAR. This is just a container and transmission format and not a runtime + format. Provided by the client when the disk image is created. + returned: success + type: str + sha1Checksum: + description: + - An optional SHA1 checksum of the disk image before unpackaging. + - This is provided by the client when the disk image is created. + returned: success + type: str + source: + description: + - The full Google Cloud Storage URL where disk storage is stored You must + provide either this property or the sourceDisk property but not both. + returned: success + type: str + sourceDisk: + description: + - The source disk to create this image based on. + - You must provide either this property or the rawDisk.source property but not + both to create an image. + returned: success + type: dict + sourceDiskEncryptionKey: + description: + - The customer-supplied encryption key of the source disk. Required if the source + disk is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + sourceDiskId: + description: + - The ID value of the disk used to create this image. This value may be used + to determine whether the image was taken from the current or a previous instance + of a given disk name. + returned: success + type: str + sourceType: + description: + - The type of the image used to create this disk. The default and only value + is RAW . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_instance_facts.py b/plugins/modules/cloud/google/gcp_compute_instance_facts.py new file mode 120000 index 0000000000..b886b91c69 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_facts.py @@ -0,0 +1 @@ +gcp_compute_instance_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_instance_group_facts.py b/plugins/modules/cloud/google/gcp_compute_instance_group_facts.py new file mode 120000 index 0000000000..8703aff9e8 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_group_facts.py @@ -0,0 +1 @@ +gcp_compute_instance_group_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_instance_group_info.py b/plugins/modules/cloud/google/gcp_compute_instance_group_info.py new file mode 100644 index 0000000000..ee7fee1664 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_group_info.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_group_info +description: +- Gather info for GCP InstanceGroup +short_description: Gather info for GCP InstanceGroup +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + zone: + description: + - A reference to the zone where the instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance group + gcp_compute_instance_group_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - A unique identifier for this instance group. + returned: success + type: int + name: + description: + - The name of the instance group. + - The name must be 1-63 characters long, and comply with RFC1035. + returned: success + type: str + namedPorts: + description: + - Assigns a name to a port number. + - 'For example: {name: "http", port: 80}.' + - This allows the system to reference ports by the assigned name instead of + a port number. Named ports can also contain multiple ports. + - 'For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named + ports apply to all instances in this instance group.' + returned: success + type: complex + contains: + name: + description: + - The name for this named port. + - The name must be 1-63 characters long, and comply with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int + network: + description: + - The network to which all instances in the instance group belong. + returned: success + type: dict + region: + description: + - The region where the instance group is located (for regional resources). + returned: success + type: str + subnetwork: + description: + - The subnetwork to which all instances in the instance group belong. + returned: success + type: dict + zone: + description: + - A reference to the zone where the instance group resides. + returned: success + type: str + instances: + description: + - The list of instances associated with this InstanceGroup. + - All instances must be created before being added to an InstanceGroup. + - All instances not in this list will be removed from the InstanceGroup and + will not be deleted. + - Only the full identifier of the instance will be returned. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_instance_group_manager_facts.py b/plugins/modules/cloud/google/gcp_compute_instance_group_manager_facts.py new file mode 120000 index 0000000000..2b9ec76192 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_group_manager_facts.py @@ -0,0 +1 @@ +gcp_compute_instance_group_manager_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_instance_group_manager_info.py b/plugins/modules/cloud/google/gcp_compute_instance_group_manager_info.py new file mode 100644 index 0000000000..b9353dc39b --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_group_manager_info.py @@ -0,0 +1,340 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_group_manager_info +description: +- Gather info for GCP InstanceGroupManager +short_description: Gather info for GCP InstanceGroupManager +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + zone: + description: + - The zone the managed instance group resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance group manager + gcp_compute_instance_group_manager_info: + zone: us-west1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + baseInstanceName: + description: + - The base instance name to use for instances in this group. The value must + be 1-58 characters long. Instances are named by appending a hyphen and a random + four-character string to the base instance name. + - The base instance name must comply with RFC1035. + returned: success + type: str + creationTimestamp: + description: + - The creation timestamp for this managed instance group in RFC3339 text format. + returned: success + type: str + currentActions: + description: + - The list of instance actions and the number of instances in this managed instance + group that are scheduled for each of those actions. + returned: success + type: complex + contains: + abandoning: + description: + - The total number of instances in the managed instance group that are scheduled + to be abandoned. Abandoning an instance removes it from the managed instance + group without deleting it. + returned: success + type: int + creating: + description: + - The number of instances in the managed instance group that are scheduled + to be created or are currently being created. If the group fails to create + any of these instances, it tries again until it creates the instance successfully. + - If you have disabled creation retries, this field will not be populated; + instead, the creatingWithoutRetries field will be populated. + returned: success + type: int + creatingWithoutRetries: + description: + - The number of instances that the managed instance group will attempt to + create. The group attempts to create each instance only once. If the group + fails to create any of these instances, it decreases the group's targetSize + value accordingly. + returned: success + type: int + deleting: + description: + - The number of instances in the managed instance group that are scheduled + to be deleted or are currently being deleted. + returned: success + type: int + none: + description: + - The number of instances in the managed instance group that are running + and have no scheduled actions. + returned: success + type: int + recreating: + description: + - The number of instances in the managed instance group that are scheduled + to be recreated or are currently being being recreated. + - Recreating an instance deletes the existing root persistent disk and creates + a new disk from the image that is defined in the instance template. + returned: success + type: int + refreshing: + description: + - The number of instances in the managed instance group that are being reconfigured + with properties that do not require a restart or a recreate action. For + example, setting or removing target pools for the instance. + returned: success + type: int + restarting: + description: + - The number of instances in the managed instance group that are scheduled + to be restarted or are currently being restarted. + returned: success + type: int + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - A unique identifier for this resource. + returned: success + type: int + instanceGroup: + description: + - The instance group being managed. + returned: success + type: dict + instanceTemplate: + description: + - The instance template that is specified for this managed instance group. The + group uses this template to create all new instances in the managed instance + group. + returned: success + type: dict + name: + description: + - The name of the managed instance group. The name must be 1-63 characters long, + and comply with RFC1035. + returned: success + type: str + namedPorts: + description: + - Named ports configured for the Instance Groups complementary to this Instance + Group Manager. + returned: success + type: complex + contains: + name: + description: + - The name for this named port. The name must be 1-63 characters long, and + comply with RFC1035. + returned: success + type: str + port: + description: + - The port number, which can be a value between 1 and 65535. + returned: success + type: int + region: + description: + - The region this managed instance group resides (for regional resources). + returned: success + type: str + targetPools: + description: + - TargetPool resources to which instances in the instanceGroup field are added. + The target pools automatically apply to all of the instances in the managed + instance group. + returned: success + type: list + targetSize: + description: + - The target number of running instances for this managed instance group. Deleting + or abandoning instances reduces this number. Resizing the group changes this + number. + returned: success + type: int + zone: + description: + - The zone the managed instance group resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_instance_info.py b/plugins/modules/cloud/google/gcp_compute_instance_info.py new file mode 100644 index 0000000000..c3cffd39e5 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_info.py @@ -0,0 +1,649 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + zone: + description: + - A reference to the zone where the machine resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_compute_instance_info: + zone: us-central1-a + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + canIpForward: + description: + - Allows this instance to send and receive packets with non-matching destination + or source IPs. This is required if you plan to use this instance to forward + routes. + returned: success + type: bool + cpuPlatform: + description: + - The CPU platform used by this instance. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + deletionProtection: + description: + - Whether the resource should be protected against deletion. + returned: success + type: bool + disks: + description: + - An array of disks that are associated with the instances that are created + from this template. + returned: success + type: complex + contains: + autoDelete: + description: + - Specifies whether the disk will be auto-deleted when the instance is deleted + (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks are + not left behind on machine deletion.' + returned: success + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the first + partition of the disk for its root filesystem. + returned: success + type: bool + deviceName: + description: + - Specifies a unique device name of your choice that is reflected into the + /dev/disk/by-id/google-* tree of a Linux operating system running within + the instance. This name can be used to reference the device for mounting, + resizing, and so on, from within the instance. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC + 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + rsaEncryptedKey: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the boot + disk. For example, if you have many disks attached to an instance, each + disk would have a unique index number. If not specified, the server will + choose an appropriate value. + returned: success + type: int + initializeParams: + description: + - Specifies the parameters for a new disk that will be created alongside + the new instance. Use initialization parameters to create boot disks or + local SSDs attached to the new instance. + returned: success + type: complex + contains: + diskName: + description: + - Specifies the disk name. If not specified, the default is to use the + name of the instance. + returned: success + type: str + diskSizeGb: + description: + - Specifies the size of the disk in base-2 GB. + returned: success + type: int + diskType: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + returned: success + type: str + sourceImage: + description: + - The source image to create this disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. To + create a disk with one of the public operating system images, specify + the image by its family name. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required + if the source image is protected by a customer-supplied encryption + key. + - Instance templates do not store customer-supplied encryption keys, + so you cannot create disks for instances in a managed instance group + if the source images are encrypted with your own keys. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which is + either SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if you + attempt to attach a persistent disk in any other format than SCSI. + returned: success + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. + If not specified, the default is to attach the disk in READ_WRITE mode. + returned: success + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks using + this property. This field is only applicable for persistent disks. + returned: success + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, + the default is PERSISTENT. + returned: success + type: str + guestAccelerators: + description: + - List of the type and count of accelerator cards attached to the instance . + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + acceleratorType: + description: + - Full or partial URL of the accelerator type resource to expose to this + instance. + returned: success + type: str + hostname: + description: + - The hostname of the instance to be created. The specified hostname must be + RFC1035 compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal + when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal + when using zonal DNS. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + labels: + description: + - Labels to apply to this instance. A list of key->value pairs. + returned: success + type: dict + metadata: + description: + - The metadata key/value pairs to assign to instances that are created from + this template. These pairs can consist of custom metadata or predefined keys. + returned: success + type: dict + machineType: + description: + - A reference to a machine type which defines VM kind. + returned: success + type: str + minCpuPlatform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values are + the friendly names of CPU platforms . + returned: success + type: str + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The resource name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + networkInterfaces: + description: + - An array of configurations for this interface. This specifies how this interface + is configured to interact with other network services, such as connecting + to the internet. Only one network interface is supported per instance. + returned: success + type: complex + contains: + accessConfigs: + description: + - An array of configurations for this interface. Currently, only one access + config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, + then this instance will have no external internet access. + returned: success + type: complex + contains: + name: + description: + - The name of this access configuration. The default and recommended + name is External NAT but you can use any arbitrary string you would + like. For example, My external IP or Network Access. + returned: success + type: str + natIP: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the project + or leave this field undefined to use an IP from a shared ephemeral + IP address pool. If you specify a static external IP address, it must + live in the same region as the zone of the instance. + returned: success + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + returned: success + type: str + setPublicPtr: + description: + - Specifies whether a public DNS PTR record should be created to map + the external IP address of the instance to a DNS domain name. + returned: success + type: bool + publicPtrDomainName: + description: + - The DNS domain name for the public PTR record. You can set this field + only if the setPublicPtr field is enabled. + returned: success + type: str + networkTier: + description: + - This signifies the networking tier used for configuring this access + configuration. If an AccessConfig is specified without a valid external + IP address, an ephemeral IP will be created with this networkTier. + If an AccessConfig with a valid external IP address is specified, + it must match that of the networkTier associated with the Address + resource owning that IP. + returned: success + type: str + aliasIpRanges: + description: + - An array of alias IP ranges for this network interface. Can only be specified + for network interfaces on subnet-mode networks. + returned: success + type: complex + contains: + ipCidrRange: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and cannot + contain IP addresses reserved by system or used by other network interfaces. + This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. + /24) or a CIDR format string (e.g. 10.1.2.0/24). + returned: success + type: str + subnetworkRangeName: + description: + - Optional subnetwork secondary range name specifying the secondary + range from which to allocate the IP CIDR range for this alias IP range. + If left unspecified, the primary range of the subnetwork will be used. + returned: success + type: str + name: + description: + - The name of the network interface, generated by the server. For network + devices, these are eth0, eth1, etc . + returned: success + type: str + network: + description: + - Specifies the title of an existing network. Not setting the network title + will select the default network interface, which could have SSH already + configured . + returned: success + type: dict + networkIP: + description: + - An IPv4 internal network address to assign to the instance for this network + interface. If not specified by the user, an unused internal IP is assigned + by the system. + returned: success + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. + If the network is in auto subnet mode, providing the subnetwork is optional. + If the network is in custom subnet mode, then this field should be specified. + returned: success + type: dict + scheduling: + description: + - Sets the scheduling options for this instance. + returned: success + type: complex + contains: + automaticRestart: + description: + - Specifies whether the instance should be automatically restarted if it + is terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. + Preemptible instances cannot be automatically restarted. + returned: success + type: bool + onHostMaintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default + and only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + returned: success + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set during + instance creation, it cannot be set or changed after the instance has + been created. + returned: success + type: bool + serviceAccounts: + description: + - A list of service accounts, with their specified scopes, authorized for this + instance. Only one service account per VM instance is supported. + returned: success + type: complex + contains: + email: + description: + - Email address of the service account. + returned: success + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + returned: success + type: list + shieldedInstanceConfig: + description: + - Configuration for various parameters related to shielded instances. + returned: success + type: complex + contains: + enableSecureBoot: + description: + - Defines whether the instance has Secure Boot enabled. + returned: success + type: bool + enableVtpm: + description: + - Defines whether the instance has the vTPM enabled. + returned: success + type: bool + enableIntegrityMonitoring: + description: + - Defines whether the instance has integrity monitoring enabled. + returned: success + type: bool + status: + description: + - 'The status of the instance. One of the following values: PROVISIONING, STAGING, + RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.' + - As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine + off . + returned: success + type: str + statusMessage: + description: + - An optional, human-readable explanation of the status. + returned: success + type: str + tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid + sources or targets for network firewalls and are specified by the client during + instance creation. The tags can be later modified by the setTags method. Each + tag within the list must comply with RFC1035. + returned: success + type: complex + contains: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash + of the metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes after + every request to modify or update metadata. You must always provide an + up-to-date fingerprint hash in order to update or change metadata. + returned: success + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply with + RFC1035. + returned: success + type: list + zone: + description: + - A reference to the zone where the machine resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_instance_template_facts.py b/plugins/modules/cloud/google/gcp_compute_instance_template_facts.py new file mode 120000 index 0000000000..a9826ba8f1 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_template_facts.py @@ -0,0 +1 @@ +gcp_compute_instance_template_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_instance_template_info.py b/plugins/modules/cloud/google/gcp_compute_instance_template_info.py new file mode 100644 index 0000000000..394977f18c --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_instance_template_info.py @@ -0,0 +1,607 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_instance_template_info +description: +- Gather info for GCP InstanceTemplate +short_description: Gather info for GCP InstanceTemplate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance template + gcp_compute_instance_template_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: int + name: + description: + - Name of the resource. The name is 1-63 characters long and complies with RFC1035. + returned: success + type: str + properties: + description: + - The instance properties for this instance template. + returned: success + type: complex + contains: + canIpForward: + description: + - Enables instances created based on this template to send packets with + source IP addresses other than their own and receive packets with destination + IP addresses other than their own. If these instances will be used as + an IP gateway or it will be set as the next-hop in a Route resource, specify + true. If unsure, leave this set to false. + returned: success + type: bool + description: + description: + - An optional text description for the instances that are created from this + instance template. + returned: success + type: str + disks: + description: + - An array of disks that are associated with the instances that are created + from this template. + returned: success + type: complex + contains: + autoDelete: + description: + - Specifies whether the disk will be auto-deleted when the instance + is deleted (but not when the disk is detached from the instance). + - 'Tip: Disks should be set to autoDelete=true so that leftover disks + are not left behind on machine deletion.' + returned: success + type: bool + boot: + description: + - Indicates that this is a boot disk. The virtual machine will use the + first partition of the disk for its root filesystem. + returned: success + type: bool + deviceName: + description: + - Specifies a unique device name of your choice that is reflected into + the /dev/disk/by-id/google-* tree of a Linux operating system running + within the instance. This name can be used to reference the device + for mounting, resizing, and so on, from within the instance. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts or decrypts a disk using a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + rsaEncryptedKey: + description: + - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied + encryption key to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + index: + description: + - Assigns a zero-based index to this disk, where 0 is reserved for the + boot disk. For example, if you have many disks attached to an instance, + each disk would have a unique index number. If not specified, the + server will choose an appropriate value. + returned: success + type: int + initializeParams: + description: + - Specifies the parameters for a new disk that will be created alongside + the new instance. Use initialization parameters to create boot disks + or local SSDs attached to the new instance. + returned: success + type: complex + contains: + diskName: + description: + - Specifies the disk name. If not specified, the default is to use + the name of the instance. + returned: success + type: str + diskSizeGb: + description: + - Specifies the size of the disk in base-2 GB. + returned: success + type: int + diskType: + description: + - Reference to a disk type. + - Specifies the disk type to use to create the instance. + - If not specified, the default is pd-standard. + returned: success + type: str + sourceImage: + description: + - The source image to create this disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. + To create a disk with one of the public operating system images, + specify the image by its family name. + returned: success + type: str + sourceImageEncryptionKey: + description: + - The customer-supplied encryption key of the source image. Required + if the source image is protected by a customer-supplied encryption + key. + - Instance templates do not store customer-supplied encryption keys, + so you cannot create disks for instances in a managed instance + group if the source images are encrypted with your own keys. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded + in RFC 4648 base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + returned: success + type: str + interface: + description: + - Specifies the disk interface to use for attaching this disk, which + is either SCSI or NVME. The default is SCSI. + - Persistent disks must always use SCSI and the request will fail if + you attempt to attach a persistent disk in any other format than SCSI. + returned: success + type: str + mode: + description: + - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. + If not specified, the default is to attach the disk in READ_WRITE + mode. + returned: success + type: str + source: + description: + - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage + or disks.source is required. + - If desired, you can also attach existing non-root persistent disks + using this property. This field is only applicable for persistent + disks. + - Note that for InstanceTemplate, specify the disk name, not the URL + for the disk. + returned: success + type: dict + type: + description: + - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not + specified, the default is PERSISTENT. + returned: success + type: str + labels: + description: + - Labels to apply to this address. A list of key->value pairs. + returned: success + type: dict + machineType: + description: + - The machine type to use in the VM instance template. + returned: success + type: str + minCpuPlatform: + description: + - Specifies a minimum CPU platform for the VM instance. Applicable values + are the friendly names of CPU platforms . + returned: success + type: str + metadata: + description: + - The metadata key/value pairs to assign to instances that are created from + this template. These pairs can consist of custom metadata or predefined + keys. + returned: success + type: dict + guestAccelerators: + description: + - List of the type and count of accelerator cards attached to the instance + . + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the guest accelerator cards exposed to this instance. + returned: success + type: int + acceleratorType: + description: + - Full or partial URL of the accelerator type resource to expose to + this instance. + returned: success + type: str + networkInterfaces: + description: + - An array of configurations for this interface. This specifies how this + interface is configured to interact with other network services, such + as connecting to the internet. Only one network interface is supported + per instance. + returned: success + type: complex + contains: + accessConfigs: + description: + - An array of configurations for this interface. Currently, only one + access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs + specified, then this instance will have no external internet access. + returned: success + type: complex + contains: + name: + description: + - The name of this access configuration. The default and recommended + name is External NAT but you can use any arbitrary string you + would like. For example, My external IP or Network Access. + returned: success + type: str + natIP: + description: + - Reference to an address. + - An external IP address associated with this instance. + - Specify an unused static external IP address available to the + project or leave this field undefined to use an IP from a shared + ephemeral IP address pool. If you specify a static external IP + address, it must live in the same region as the zone of the instance. + returned: success + type: dict + type: + description: + - The type of configuration. The default and only option is ONE_TO_ONE_NAT. + returned: success + type: str + setPublicPtr: + description: + - Specifies whether a public DNS PTR record should be created to + map the external IP address of the instance to a DNS domain name. + returned: success + type: bool + publicPtrDomainName: + description: + - The DNS domain name for the public PTR record. You can set this + field only if the setPublicPtr field is enabled. + returned: success + type: str + networkTier: + description: + - This signifies the networking tier used for configuring this access + configuration. If an AccessConfig is specified without a valid + external IP address, an ephemeral IP will be created with this + networkTier. If an AccessConfig with a valid external IP address + is specified, it must match that of the networkTier associated + with the Address resource owning that IP. + returned: success + type: str + aliasIpRanges: + description: + - An array of alias IP ranges for this network interface. Can only be + specified for network interfaces on subnet-mode networks. + returned: success + type: complex + contains: + ipCidrRange: + description: + - The IP CIDR range represented by this alias IP range. + - This IP CIDR range must belong to the specified subnetwork and + cannot contain IP addresses reserved by system or used by other + network interfaces. This range may be a single IP address (e.g. + 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. + 10.1.2.0/24). + returned: success + type: str + subnetworkRangeName: + description: + - Optional subnetwork secondary range name specifying the secondary + range from which to allocate the IP CIDR range for this alias + IP range. If left unspecified, the primary range of the subnetwork + will be used. + returned: success + type: str + name: + description: + - The name of the network interface, generated by the server. For network + devices, these are eth0, eth1, etc . + returned: success + type: str + network: + description: + - Specifies the title of an existing network. When creating an instance, + if neither the network nor the subnetwork is specified, the default + network global/networks/default is used; if the network is not specified + but the subnetwork is specified, the network is inferred. + returned: success + type: dict + networkIP: + description: + - An IPv4 internal network address to assign to the instance for this + network interface. If not specified by the user, an unused internal + IP is assigned by the system. + returned: success + type: str + subnetwork: + description: + - Reference to a VPC network. + - If the network resource is in legacy mode, do not provide this property. + If the network is in auto subnet mode, providing the subnetwork is + optional. If the network is in custom subnet mode, then this field + should be specified. + returned: success + type: dict + scheduling: + description: + - Sets the scheduling options for this instance. + returned: success + type: complex + contains: + automaticRestart: + description: + - Specifies whether the instance should be automatically restarted if + it is terminated by Compute Engine (not terminated by a user). + - You can only set the automatic restart option for standard instances. + Preemptible instances cannot be automatically restarted. + returned: success + type: bool + onHostMaintenance: + description: + - Defines the maintenance behavior for this instance. For standard instances, + the default behavior is MIGRATE. For preemptible instances, the default + and only possible behavior is TERMINATE. + - For more information, see Setting Instance Scheduling Options. + returned: success + type: str + preemptible: + description: + - Defines whether the instance is preemptible. This can only be set + during instance creation, it cannot be set or changed after the instance + has been created. + returned: success + type: bool + serviceAccounts: + description: + - A list of service accounts, with their specified scopes, authorized for + this instance. Only one service account per VM instance is supported. + returned: success + type: complex + contains: + email: + description: + - Email address of the service account. + returned: success + type: str + scopes: + description: + - The list of scopes to be made available for this service account. + returned: success + type: list + tags: + description: + - A list of tags to apply to this instance. Tags are used to identify valid + sources or targets for network firewalls and are specified by the client + during instance creation. The tags can be later modified by the setTags + method. Each tag within the list must comply with RFC1035. + returned: success + type: complex + contains: + fingerprint: + description: + - Specifies a fingerprint for this request, which is essentially a hash + of the metadata's contents and used for optimistic locking. + - The fingerprint is initially generated by Compute Engine and changes + after every request to modify or update metadata. You must always + provide an up-to-date fingerprint hash in order to update or change + metadata. + returned: success + type: str + items: + description: + - An array of tags. Each tag must be 1-63 characters long, and comply + with RFC1035. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/instanceTemplates".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_interconnect_attachment_facts.py b/plugins/modules/cloud/google/gcp_compute_interconnect_attachment_facts.py new file mode 120000 index 0000000000..479308d6e7 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_interconnect_attachment_facts.py @@ -0,0 +1 @@ +gcp_compute_interconnect_attachment_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_interconnect_attachment_info.py b/plugins/modules/cloud/google/gcp_compute_interconnect_attachment_info.py new file mode 100644 index 0000000000..6e80d287b3 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_interconnect_attachment_info.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_interconnect_attachment_info +description: +- Gather info for GCP InterconnectAttachment +short_description: Gather info for GCP InterconnectAttachment +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - Region where the regional interconnect attachment resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an interconnect attachment + gcp_compute_interconnect_attachment_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + adminEnabled: + description: + - Whether the VLAN attachment is enabled or disabled. When using PARTNER type + this will Pre-Activate the interconnect attachment . + returned: success + type: bool + cloudRouterIpAddress: + description: + - IPv4 address + prefix length to be configured on Cloud Router Interface for + this interconnect attachment. + returned: success + type: str + customerRouterIpAddress: + description: + - IPv4 address + prefix length to be configured on the customer router subinterface + for this interconnect attachment. + returned: success + type: str + interconnect: + description: + - URL of the underlying Interconnect object that this attachment's traffic will + traverse through. Required if type is DEDICATED, must not be set if type is + PARTNER. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + bandwidth: + description: + - Provisioned bandwidth capacity for the interconnect attachment. + - For attachments of type DEDICATED, the user can set the bandwidth. + - For attachments of type PARTNER, the Google Partner that is operating the + interconnect must set the bandwidth. + - Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, + Defaults to BPS_10G . + returned: success + type: str + edgeAvailabilityDomain: + description: + - Desired availability domain for the attachment. Only available for type PARTNER, + at creation time. For improved reliability, customers should configure a pair + of attachments with one per availability domain. The selected availability + domain will be provided to the Partner via the pairing key so that the provisioned + circuit will lie in the specified domain. If not specified, the value will + default to AVAILABILITY_DOMAIN_ANY. + returned: success + type: str + pairingKey: + description: + - '[Output only for type PARTNER. Not present for DEDICATED]. The opaque identifier + of an PARTNER attachment used to initiate provisioning with a selected partner. + Of the form "XXXXX/region/domain" .' + returned: success + type: str + partnerAsn: + description: + - "[Output only for type PARTNER. Not present for DEDICATED]. Optional BGP ASN + for the router that should be supplied by a layer 3 Partner if they configured + BGP on behalf of the customer." + returned: success + type: str + privateInterconnectInfo: + description: + - Information specific to an InterconnectAttachment. This property is populated + if the interconnect that this is attached to is of type DEDICATED. + returned: success + type: complex + contains: + tag8021q: + description: + - 802.1q encapsulation tag to be used for traffic between Google and the + customer, going to and from this network and region. + returned: success + type: int + type: + description: + - The type of InterconnectAttachment you wish to create. Defaults to DEDICATED. + returned: success + type: str + state: + description: + - "[Output Only] The current state of this attachment's functionality." + returned: success + type: str + googleReferenceId: + description: + - Google reference ID, to be used when raising support tickets with Google or + otherwise to debug backend connectivity issues. + returned: success + type: str + router: + description: + - URL of the cloud router to be used for dynamic routing. This router must be + in the same region as this InterconnectAttachment. The InterconnectAttachment + will automatically connect the Interconnect to the network & region within + which the Cloud Router is configured. + returned: success + type: dict + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + candidateSubnets: + description: + - Up to 16 candidate prefixes that can be used to restrict the allocation of + cloudRouterIpAddress and customerRouterIpAddress for this attachment. + - All prefixes must be within link-local address space (169.254.0.0/16) and + must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused + /29 from the supplied candidate prefix(es). The request will fail if all possible + /29s are in use on Google's edge. If not supplied, Google will randomly select + an unused /29 from all of link-local space. + returned: success + type: list + vlanTag8021q: + description: + - The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When using + PARTNER type this will be managed upstream. + returned: success + type: int + region: + description: + - Region where the regional interconnect attachment resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_network_facts.py b/plugins/modules/cloud/google/gcp_compute_network_facts.py new file mode 120000 index 0000000000..c2e964a21d --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_network_facts.py @@ -0,0 +1 @@ +gcp_compute_network_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_network_info.py b/plugins/modules/cloud/google/gcp_compute_network_info.py new file mode 100644 index 0000000000..b12b2fe15a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_network_info.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_network_info +description: +- Gather info for GCP Network +short_description: Gather info for GCP Network +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a network + gcp_compute_network_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + description: + description: + - An optional description of this resource. The resource must be recreated to + modify this field. + returned: success + type: str + gateway_ipv4: + description: + - The gateway address for default routing out of the network. This value is + selected by GCP. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + subnetworks: + description: + - Server-defined fully-qualified URLs for all subnetworks in this network. + returned: success + type: list + autoCreateSubnetworks: + description: + - When set to `true`, the network is created in "auto subnet mode" and it will + create a subnet for each region automatically across the `10.128.0.0/9` address + range. + - When set to `false`, the network is created in "custom subnet mode" so the + user can explicitly connect subnetwork resources. + returned: success + type: bool + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + routingConfig: + description: + - The network-level routing configuration for this network. Used by Cloud Router + to determine what type of network-wide routing behavior to enforce. + returned: success + type: complex + contains: + routingMode: + description: + - The network-wide routing mode to use. If set to `REGIONAL`, this network's + cloud routers will only advertise routes with subnetworks of this network + in the same region as the router. If set to `GLOBAL`, this network's cloud + routers will advertise routes with all subnetworks of this network, across + regions. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/networks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_region_disk_facts.py b/plugins/modules/cloud/google/gcp_compute_region_disk_facts.py new file mode 120000 index 0000000000..1dbc112907 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_region_disk_facts.py @@ -0,0 +1 @@ +gcp_compute_region_disk_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_region_disk_info.py b/plugins/modules/cloud/google/gcp_compute_region_disk_info.py new file mode 100644 index 0000000000..4783ac1f0f --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_region_disk_info.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_region_disk_info +description: +- Gather info for GCP RegionDisk +short_description: Gather info for GCP RegionDisk +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - A reference to the region where the disk resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a region disk + gcp_compute_region_disk_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + labelFingerprint: + description: + - The fingerprint used for optimistic locking of this resource. Used internally + during updates. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + lastAttachTimestamp: + description: + - Last attach timestamp in RFC3339 text format. + returned: success + type: str + lastDetachTimestamp: + description: + - Last detach timestamp in RFC3339 text format. + returned: success + type: str + labels: + description: + - Labels to apply to this disk. A list of key->value pairs. + returned: success + type: dict + licenses: + description: + - Any applicable publicly visible licenses. + returned: success + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + sizeGb: + description: + - Size of the persistent disk, specified in GB. You can specify this field when + creating a persistent disk using the sourceImage or sourceSnapshot parameter, + or specify it alone to create an empty persistent disk. + - If you specify this field along with sourceImage or sourceSnapshot, the value + of sizeGb must not be less than the size of the sourceImage or the size of + the snapshot. + returned: success + type: int + users: + description: + - 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance + .' + returned: success + type: list + physicalBlockSizeBytes: + description: + - Physical block size of the persistent disk, in bytes. If not present in a + request, a default value is used. Currently supported sizes are 4096 and 16384, + other sizes may be added in the future. + - If an unsupported value is requested, the error message will list the supported + values for the caller's project. + returned: success + type: int + replicaZones: + description: + - URLs of the zones where the disk should be replicated to. + returned: success + type: list + type: + description: + - URL of the disk type resource describing which disk type to use to create + the disk. Provide this when creating the disk. + returned: success + type: str + region: + description: + - A reference to the region where the disk resides. + returned: success + type: str + diskEncryptionKey: + description: + - Encrypts the disk using a customer-supplied encryption key. + - After you encrypt a disk with a customer-supplied key, you must provide the + same key if you use the disk later (e.g. to create a disk snapshot or an image, + or to attach the disk to a virtual machine). + - Customer-supplied encryption keys do not protect access to metadata of the + disk. + - If you do not provide an encryption key when creating the disk, then the disk + will be encrypted using an automatically generated key and you do not need + to provide a key to use the disk later. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + sourceSnapshot: + description: + - The source snapshot used to create this disk. You can provide this as a partial + or full URL to the resource. + returned: success + type: dict + sourceSnapshotEncryptionKey: + description: + - The customer-supplied encryption key of the source snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. + returned: success + type: complex + contains: + rawKey: + description: + - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 + base64 to either encrypt or decrypt this resource. + returned: success + type: str + sha256: + description: + - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption + key that protects this resource. + returned: success + type: str + sourceSnapshotId: + description: + - The unique ID of the snapshot used to create this disk. This value identifies + the exact snapshot that was used to create this persistent disk. For example, + if you created the persistent disk from a snapshot that was later deleted + and recreated under the same name, the source snapshot ID would identify the + exact version of the snapshot that was used. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_route_facts.py b/plugins/modules/cloud/google/gcp_compute_route_facts.py new file mode 120000 index 0000000000..33dccb6671 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_route_facts.py @@ -0,0 +1 @@ +gcp_compute_route_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_route_info.py b/plugins/modules/cloud/google/gcp_compute_route_info.py new file mode 100644 index 0000000000..821f87f055 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_route_info.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_route_info +description: +- Gather info for GCP Route +short_description: Gather info for GCP Route +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a route + gcp_compute_route_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + destRange: + description: + - The destination range of outgoing packets that this route applies to. + - Only IPv4 is supported. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + network: + description: + - The network that this route applies to. + returned: success + type: dict + priority: + description: + - The priority of this route. Priority is used to break ties in cases where + there is more than one matching route of equal prefix length. + - In the case of two routes with equal prefix length, the one with the lowest-numbered + priority value wins. + - Default value is 1000. Valid range is 0 through 65535. + returned: success + type: int + tags: + description: + - A list of instance tags to which this route applies. + returned: success + type: list + nextHopGateway: + description: + - URL to a gateway that should handle matching packets. + - 'Currently, you can only specify the internet gateway, using a full or partial valid URL:' + - ' * https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' + - ' * projects/project/global/gateways/default-internet-gateway' + - ' * global/gateways/default-internet-gateway' + returned: success + type: str + nextHopInstance: + description: + - URL to an instance that should handle matching packets. + - 'You can specify this as a full or partial URL. For example:' + - ' * https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' + - ' * projects/project/zones/zone/instances/instance' + - ' * zones/zone/instances/instance' + returned: success + type: dict + nextHopIp: + description: + - Network IP address of an instance that should handle matching packets. + returned: success + type: str + nextHopVpnTunnel: + description: + - URL to a VpnTunnel that should handle matching packets. + returned: success + type: dict + nextHopNetwork: + description: + - URL to a Network that should handle matching packets. + returned: success + type: str + nextHopIlb: + description: + - The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should + handle matching packets. + - 'You can only specify the forwarding rule as a partial or full URL. For example, + the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule) + regions/region/forwardingRules/forwardingRule Note that this can only be used + when the destinationRange is a public (non-RFC 1918) IP CIDR range.' + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/routes".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_router_facts.py b/plugins/modules/cloud/google/gcp_compute_router_facts.py new file mode 120000 index 0000000000..00498a1b87 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_router_facts.py @@ -0,0 +1 @@ +gcp_compute_router_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_router_info.py b/plugins/modules/cloud/google/gcp_compute_router_info.py new file mode 100644 index 0000000000..889fbb2eda --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_router_info.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_router_info +description: +- Gather info for GCP Router +short_description: Gather info for GCP Router +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - Region where the router resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a router + gcp_compute_router_info: + region: us-central1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - The unique identifier for the resource. + returned: success + type: int + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + network: + description: + - A reference to the network to which this router belongs. + returned: success + type: dict + bgp: + description: + - BGP information specific to this router. + returned: success + type: complex + contains: + asn: + description: + - Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, + either 16-bit or 32-bit. The value will be fixed for this router resource. + All VPN tunnels that link to this router will have the same local ASN. + returned: success + type: int + advertiseMode: + description: + - User-specified flag to indicate which mode to use for advertisement. + - 'Valid values of this enum field are: DEFAULT, CUSTOM .' + returned: success + type: str + advertisedGroups: + description: + - User-specified list of prefix groups to advertise in custom mode. + - This field can only be populated if advertiseMode is CUSTOM and is advertised + to all peers of the router. These groups will be advertised in addition + to any specified prefixes. Leave this field blank to advertise no custom + groups. + - 'This enum field has the one valid value: ALL_SUBNETS .' + returned: success + type: list + advertisedIpRanges: + description: + - User-specified list of individual IP ranges to advertise in custom mode. + This field can only be populated if advertiseMode is CUSTOM and is advertised + to all peers of the router. These IP ranges will be advertised in addition + to any specified groups. + - Leave this field blank to advertise no custom IP ranges. + returned: success + type: complex + contains: + range: + description: + - The IP range to advertise. The value must be a CIDR-formatted string. + returned: success + type: str + description: + description: + - User-specified description for the IP range. + returned: success + type: str + region: + description: + - Region where the router resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_ssl_certificate_facts.py b/plugins/modules/cloud/google/gcp_compute_ssl_certificate_facts.py new file mode 120000 index 0000000000..188f2878a9 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_ssl_certificate_facts.py @@ -0,0 +1 @@ +gcp_compute_ssl_certificate_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_ssl_certificate_info.py b/plugins/modules/cloud/google/gcp_compute_ssl_certificate_info.py new file mode 100644 index 0000000000..33a832d117 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_ssl_certificate_info.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_ssl_certificate_info +description: +- Gather info for GCP SslCertificate +short_description: Gather info for GCP SslCertificate +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a SSL certificate + gcp_compute_ssl_certificate_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + certificate: + description: + - The certificate in PEM format. + - The certificate chain must be no greater than 5 certs long. + - The chain must include at least one intermediate cert. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + privateKey: + description: + - The write-only private key in PEM format. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslCertificates".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_ssl_policy_facts.py b/plugins/modules/cloud/google/gcp_compute_ssl_policy_facts.py new file mode 120000 index 0000000000..2e64eb7d47 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_ssl_policy_facts.py @@ -0,0 +1 @@ +gcp_compute_ssl_policy_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_ssl_policy_info.py b/plugins/modules/cloud/google/gcp_compute_ssl_policy_info.py new file mode 100644 index 0000000000..d069ca9acc --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_ssl_policy_info.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_ssl_policy_info +description: +- Gather info for GCP SslPolicy +short_description: Gather info for GCP SslPolicy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a SSL policy + gcp_compute_ssl_policy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + profile: + description: + - Profile specifies the set of SSL features that can be used by the load balancer + when negotiating SSL with clients. This can be one of `COMPATIBLE`, `MODERN`, + `RESTRICTED`, or `CUSTOM`. If using `CUSTOM`, the set of SSL features to enable + must be specified in the `customFeatures` field. + returned: success + type: str + minTlsVersion: + description: + - The minimum version of SSL protocol that can be used by the clients to establish + a connection with the load balancer. This can be one of `TLS_1_0`, `TLS_1_1`, + `TLS_1_2`. + returned: success + type: str + enabledFeatures: + description: + - The list of features enabled in the SSL policy. + returned: success + type: list + customFeatures: + description: + - A list of features enabled when the selected profile is CUSTOM. The method + returns the set of features that can be specified in this list. This field + must be empty if the profile is not CUSTOM. + returned: success + type: list + fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + returned: success + type: str + warnings: + description: + - If potential misconfigurations are detected for this SSL policy, this field + will be populated with warning messages. + returned: success + type: complex + contains: + code: + description: + - A warning code, if applicable. + returned: success + type: str + message: + description: + - A human-readable description of the warning code. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslPolicies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_subnetwork_facts.py b/plugins/modules/cloud/google/gcp_compute_subnetwork_facts.py new file mode 120000 index 0000000000..dc4a73ad54 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_subnetwork_facts.py @@ -0,0 +1 @@ +gcp_compute_subnetwork_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_subnetwork_info.py b/plugins/modules/cloud/google/gcp_compute_subnetwork_info.py new file mode 100644 index 0000000000..3904bbf2fc --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_subnetwork_info.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_subnetwork_info +description: +- Gather info for GCP Subnetwork +short_description: Gather info for GCP Subnetwork +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - URL of the GCP region for this subnetwork. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a subnetwork + gcp_compute_subnetwork_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. This field can be set only at resource creation time. + returned: success + type: str + gatewayAddress: + description: + - The gateway address for default routes to reach destination addresses outside + this subnetwork. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + ipCidrRange: + description: + - The range of internal addresses that are owned by this subnetwork. + - Provide this property when you create the subnetwork. For example, 10.0.0.0/8 + or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. + Only IPv4 is supported. + returned: success + type: str + name: + description: + - The name of the resource, provided by the client when initially creating the + resource. The name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must + be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + network: + description: + - The network this subnet belongs to. + - Only networks that are in the distributed mode can have subnetworks. + returned: success + type: dict + secondaryIpRanges: + description: + - An array of configurations for secondary IP ranges for VM instances contained + in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange + of the subnetwork. The alias IPs may belong to either primary or secondary + ranges. + returned: success + type: complex + contains: + rangeName: + description: + - The name associated with this subnetwork secondary range, used when adding + an alias IP range to a VM instance. The name must be 1-63 characters long, + and comply with RFC1035. The name must be unique within the subnetwork. + returned: success + type: str + ipCidrRange: + description: + - The range of IP addresses belonging to this subnetwork secondary range. + Provide this property when you create the subnetwork. + - Ranges must be unique and non-overlapping with all primary and secondary + IP ranges within a network. Only IPv4 is supported. + returned: success + type: str + privateIpGoogleAccess: + description: + - When enabled, VMs in this subnetwork without external IP addresses can access + Google APIs and services by using Private Google Access. + returned: success + type: bool + region: + description: + - URL of the GCP region for this subnetwork. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_target_http_proxy_facts.py b/plugins/modules/cloud/google/gcp_compute_target_http_proxy_facts.py new file mode 120000 index 0000000000..628457e100 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_http_proxy_facts.py @@ -0,0 +1 @@ +gcp_compute_target_http_proxy_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_target_http_proxy_info.py b/plugins/modules/cloud/google/gcp_compute_target_http_proxy_info.py new file mode 100644 index 0000000000..44292ce727 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_http_proxy_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_http_proxy_info +description: +- Gather info for GCP TargetHttpProxy +short_description: Gather info for GCP TargetHttpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target HTTP proxy + gcp_compute_target_http_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + urlMap: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the + BackendService. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetHttpProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_target_https_proxy_facts.py b/plugins/modules/cloud/google/gcp_compute_target_https_proxy_facts.py new file mode 120000 index 0000000000..9b6beebab5 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_https_proxy_facts.py @@ -0,0 +1 @@ +gcp_compute_target_https_proxy_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_target_https_proxy_info.py b/plugins/modules/cloud/google/gcp_compute_target_https_proxy_info.py new file mode 100644 index 0000000000..adcfd972fb --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_https_proxy_info.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_https_proxy_info +description: +- Gather info for GCP TargetHttpsProxy +short_description: Gather info for GCP TargetHttpsProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target HTTPS proxy + gcp_compute_target_https_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + quicOverride: + description: + - Specifies the QUIC override policy for this resource. This determines whether + the load balancer will attempt to negotiate QUIC with clients or not. Can + specify one of NONE, ENABLE, or DISABLE. If NONE is specified, uses the QUIC + policy with no user overrides, which is equivalent to DISABLE. Not specifying + this field is equivalent to specifying NONE. + returned: success + type: str + sslCertificates: + description: + - A list of SslCertificate resources that are used to authenticate connections + between users and the load balancer. At least one SSL certificate must be + specified. + returned: success + type: list + sslPolicy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy + resource. If not set, the TargetHttpsProxy resource will not have any SSL + policy configured. + returned: success + type: dict + urlMap: + description: + - A reference to the UrlMap resource that defines the mapping from URL to the + BackendService. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetHttpsProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_target_pool_facts.py b/plugins/modules/cloud/google/gcp_compute_target_pool_facts.py new file mode 120000 index 0000000000..e3583a7239 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_pool_facts.py @@ -0,0 +1 @@ +gcp_compute_target_pool_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_target_pool_info.py b/plugins/modules/cloud/google/gcp_compute_target_pool_info.py new file mode 100644 index 0000000000..7549558e45 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_pool_info.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_pool_info +description: +- Gather info for GCP TargetPool +short_description: Gather info for GCP TargetPool +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - The region where the target pool resides. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target pool + gcp_compute_target_pool_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + backupPool: + description: + - This field is applicable only when the containing target pool is serving a + forwarding rule as the primary pool, and its failoverRatio field is properly + set to a value between [0, 1]. + - 'backupPool and failoverRatio together define the fallback behavior of the + primary target pool: if the ratio of the healthy instances in the primary + pool is at or below failoverRatio, traffic arriving at the load-balanced IP + will be directed to the backup pool.' + - In case where failoverRatio and backupPool are not set, or all the instances + in the backup pool are unhealthy, the traffic will be directed back to the + primary pool in the "force" mode, where traffic will be spread to the healthy + instances with the best effort, or to all instances when no instance is healthy. + returned: success + type: dict + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + failoverRatio: + description: + - This field is applicable only when the containing target pool is serving a + forwarding rule as the primary pool (i.e., not as a backup pool to some other + target pool). The value of the field must be in [0, 1]. + - 'If set, backupPool must also be set. They together define the fallback behavior + of the primary target pool: if the ratio of the healthy instances in the primary + pool is at or below this number, traffic arriving at the load-balanced IP + will be directed to the backup pool.' + - In case where failoverRatio is not set or all the instances in the backup + pool are unhealthy, the traffic will be directed back to the primary pool + in the "force" mode, where traffic will be spread to the healthy instances + with the best effort, or to all instances when no instance is healthy. + returned: success + type: str + healthCheck: + description: + - A reference to a HttpHealthCheck resource. + - A member instance in this pool is considered healthy if and only if the health + checks pass. If not specified it means all member instances will be considered + healthy at all times. + returned: success + type: dict + id: + description: + - The unique identifier for the resource. + returned: success + type: int + instances: + description: + - A list of virtual machine instances serving this pool. + - They must live in zones contained in the same region as this pool. + returned: success + type: list + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + sessionAffinity: + description: + - 'Session affinity option. Must be one of these values: - NONE: Connections + from the same client IP may go to any instance in the pool.' + - "- CLIENT_IP: Connections from the same client IP will go to the same instance + in the pool while that instance remains healthy." + - "- CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol + will go to the same instance in the pool while that instance remains healthy." + returned: success + type: str + region: + description: + - The region where the target pool resides. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_target_ssl_proxy_facts.py b/plugins/modules/cloud/google/gcp_compute_target_ssl_proxy_facts.py new file mode 120000 index 0000000000..6f82c12ab5 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_ssl_proxy_facts.py @@ -0,0 +1 @@ +gcp_compute_target_ssl_proxy_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_target_ssl_proxy_info.py b/plugins/modules/cloud/google/gcp_compute_target_ssl_proxy_info.py new file mode 100644 index 0000000000..4814544551 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_ssl_proxy_info.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_ssl_proxy_info +description: +- Gather info for GCP TargetSslProxy +short_description: Gather info for GCP TargetSslProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target SSL proxy + gcp_compute_target_ssl_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend, + either NONE or PROXY_V1. The default is NONE. + returned: success + type: str + service: + description: + - A reference to the BackendService resource. + returned: success + type: dict + sslCertificates: + description: + - A list of SslCertificate resources that are used to authenticate connections + between users and the load balancer. Currently, exactly one SSL certificate + must be specified. + returned: success + type: list + sslPolicy: + description: + - A reference to the SslPolicy resource that will be associated with the TargetSslProxy + resource. If not set, the TargetSslProxy resource will not have any SSL policy + configured. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetSslProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_target_tcp_proxy_facts.py b/plugins/modules/cloud/google/gcp_compute_target_tcp_proxy_facts.py new file mode 120000 index 0000000000..551871b74e --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_tcp_proxy_facts.py @@ -0,0 +1 @@ +gcp_compute_target_tcp_proxy_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_target_tcp_proxy_info.py b/plugins/modules/cloud/google/gcp_compute_target_tcp_proxy_info.py new file mode 100644 index 0000000000..6e62aaf692 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_tcp_proxy_info.py @@ -0,0 +1,222 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_tcp_proxy_info +description: +- Gather info for GCP TargetTcpProxy +short_description: Gather info for GCP TargetTcpProxy +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target TCP proxy + gcp_compute_target_tcp_proxy_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + proxyHeader: + description: + - Specifies the type of proxy header to append before sending data to the backend, + either NONE or PROXY_V1. The default is NONE. + returned: success + type: str + service: + description: + - A reference to the BackendService resource. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetTcpProxies".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_target_vpn_gateway_facts.py b/plugins/modules/cloud/google/gcp_compute_target_vpn_gateway_facts.py new file mode 120000 index 0000000000..72c072765a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_vpn_gateway_facts.py @@ -0,0 +1 @@ +gcp_compute_target_vpn_gateway_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_target_vpn_gateway_info.py b/plugins/modules/cloud/google/gcp_compute_target_vpn_gateway_info.py new file mode 100644 index 0000000000..bbf05ced24 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_target_vpn_gateway_info.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_target_vpn_gateway_info +description: +- Gather info for GCP TargetVpnGateway +short_description: Gather info for GCP TargetVpnGateway +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - The region this gateway should sit in. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a target vpn gateway + gcp_compute_target_vpn_gateway_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + network: + description: + - The network this VPN gateway is accepting traffic for. + returned: success + type: dict + tunnels: + description: + - A list of references to VpnTunnel resources associated with this VPN gateway. + returned: success + type: list + forwardingRules: + description: + - A list of references to the ForwardingRule resources associated with this + VPN gateway. + returned: success + type: list + region: + description: + - The region this gateway should sit in. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_url_map_facts.py b/plugins/modules/cloud/google/gcp_compute_url_map_facts.py new file mode 120000 index 0000000000..e8046ebc54 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_url_map_facts.py @@ -0,0 +1 @@ +gcp_compute_url_map_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_url_map_info.py b/plugins/modules/cloud/google/gcp_compute_url_map_info.py new file mode 100644 index 0000000000..879f4a974b --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_url_map_info.py @@ -0,0 +1,1658 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_url_map_info +description: +- Gather info for GCP UrlMap +short_description: Gather info for GCP UrlMap +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an URL map + gcp_compute_url_map_info: + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + defaultService: + description: + - The BackendService resource to which traffic is directed if none of the hostRules + match. If defaultRouteAction is additionally specified, advanced routing actions + like URL Rewrites, etc. take effect prior to sending the request to the backend. + However, if defaultService is specified, defaultRouteAction cannot contain + any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, + service must not be specified. Only one of defaultService, defaultUrlRedirect + or defaultRouteAction.weightedBackendService must be set. + returned: success + type: dict + description: + description: + - An optional description of this resource. Provide this property when you create + the resource. + returned: success + type: str + id: + description: + - The unique identifier for the resource. + returned: success + type: int + fingerprint: + description: + - Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + returned: success + type: str + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. The headerAction specified here take effect + after headerAction specified under pathMatcher. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request to + the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to the + client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + returned: success + type: list + hostRules: + description: + - The list of HostRules to use against the URL. + returned: success + type: complex + contains: + description: + description: + - An optional description of this resource. Provide this property when you + create the resource. + returned: success + type: str + hosts: + description: + - The list of host patterns to match. They must be valid hostnames, except + * will match any string of ([a-z0-9-.]*). In that case, * must be the + first character and must be followed in the pattern by either - or . + returned: success + type: list + pathMatcher: + description: + - The name of the PathMatcher to use to match the path portion of the URL + if the hostRule matches the URL's host portion. + returned: success + type: str + name: + description: + - Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + returned: success + type: str + pathMatchers: + description: + - The list of named PathMatchers to use against the URL. + returned: success + type: complex + contains: + defaultService: + description: + - 'The BackendService resource. This will be used if none of the pathRules + or routeRules defined by this PathMatcher are matched. For example, the + following are all valid URLs to a BackendService resource: - U(https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backen) + dService - compute/v1/projects/project/global/backendServices/backendService + - global/backendServices/backendService If defaultRouteAction is additionally + specified, advanced routing actions like URL Rewrites, etc. take effect + prior to sending the request to the backend. However, if defaultService + is specified, defaultRouteAction cannot contain any weightedBackendServices. + Conversely, if defaultRouteAction specifies any weightedBackendServices, + defaultService must not be specified. Only one of defaultService, defaultUrlRedirect + or defaultRouteAction.weightedBackendService must be set. Authorization + requires one or more of the following Google IAM permissions on the specified + resource default_service: - compute.backendBuckets.use - compute.backendServices.use + .' + returned: success + type: dict + description: + description: + - An optional description of this resource. Provide this property when you + create the resource. + returned: success + type: str + headerAction: + description: + - Specifies changes to request and response headers that need to take effect + for the selected backendService. HeaderAction specified here are applied + after the matching HttpRouteRule HeaderAction and before the HeaderAction + in the UrlMap . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back to + the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already exist + for the header. If true, headerValue is set for the header, discarding + any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from the + response prior to sending the response back to the client. + returned: success + type: list + name: + description: + - The name to which this PathMatcher is referred by the HostRule. + returned: success + type: str + pathRules: + description: + - 'The list of path rules. Use this list instead of routeRules when routing + based on simple path matching is all that''s required. The order by which + path rules are specified does not matter. Matches are always done on the + longest-path-first basis. For example: a pathRule with a path /a/b/c/* + will match before /a/b/* irrespective of the order in which those paths + appear in this list. Within a given pathMatcher, only one of pathRules + or routeRules must be set.' + returned: success + type: complex + contains: + service: + description: + - The backend service resource to which traffic is directed if this + rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + returned: success + type: dict + paths: + description: + - 'The list of path patterns to match. Each must start with / and the + only place a * is allowed is at the end following a /. The string + fed to the path matcher does not include any text after the first + ? or #, and those chars are not allowed here.' + returned: success + type: list + routeAction: + description: + - In response to a matching path, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction + specifies any weightedBackendServices, service must not be set. Conversely + if service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This + translates to the Access- Control-Allow-Credentials header. + Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers + header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods + header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regualar expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or + allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can + be cached. This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic + to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage + of requests before sending those request to the backend service. + Similarly requests from clients can be aborted by the Loadbalancer + for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The + value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be + from 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's + backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. + Prior to sending traffic to the shadow service, the host / authority + header is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The BackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be + > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specifies one or more conditions when this retry rule applies. + Valid values are: - 5xx: Loadbalancer will attempt a retry + if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: + disconnects, reset, read timeout, connection failure, and + refused streams.' + - "- gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "- connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "- retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "- refused-stream: Loadbalancer will retry if the backend + service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry." + - "- cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled - deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded - resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header + is set to resource-exhausted - unavailable: Loadbalancer will + retry if the gRPC status code in the response header is set + to unavailable ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding + the request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by + pathPrefixRewrite. The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a + route match occurs. The weights determine the fraction of traffic + that flows to their corresponding backend service. If all traffic + needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default BackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, + computed as weight / (sum of all weightedBackendService weights + in routeAction) . The selection of a backend service is determined + only for new traffic. Once a user's request has been directed + to a backendService, subsequent requests will be sent to the + same backendService as determined by the BackendService's + session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When a path pattern is matched, the request is redirected to a URL + specified by urlRedirect. If urlRedirect is specified, service or + routeAction must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of + the one that was supplied in the request. The value must be between + 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. If set to false, the URL scheme of the redirected request + will remain the same as that of the request. This must only be + set for UrlMaps used in TargetHttpProxys. + - Setting this true for TargetHttpsProxy is not permitted. Defaults + to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of + the one that was supplied in the request. Only one of pathRedirect + or prefixRedirect must be specified. The value must be between + 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting + the request. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: - MOVED_PERMANENTLY_DEFAULT, which is the default + value and corresponds to 301.' + - "- FOUND, which corresponds to 302." + - "- SEE_OTHER which corresponds to 303." + - "- TEMPORARY_REDIRECT, which corresponds to 307. In this case, + the request method will be retained." + - "- PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained." + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original + URL is removed prior to redirecting the request. If set to false, + the query portion of the original URL is retained. + returned: success + type: bool + routeRules: + description: + - 'The list of ordered HTTP route rules. Use this list instead of pathRules + when advanced route matching and routing actions are desired. The order + of specifying routeRules matters: the first rule that matches will cause + its specified routing action to take effect. Within a given pathMatcher, + only one of pathRules or routeRules must be set. routeRules are not supported + in UrlMaps intended for External load balancers.' + returned: success + type: complex + contains: + priority: + description: + - For routeRules within a given pathMatcher, priority determines the + order in which load balancer will interpret routeRules. RouteRules + are evaluated in order of priority, from the lowest to highest number. + The priority of a rule decreases as its number increases (1, 2, 3, + N+1). The first rule that matches the request is applied. + - You cannot configure two or more routeRules with the same priority. + - Priority for each rule must be set to a number between 0 and 2147483647 + inclusive. + - Priority numbers can have gaps, which enable you to add or remove + rules in the future without affecting the rest of the rules. For example, + 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to + which you could add rules numbered from 6 to 8, 10 to 11, and 13 to + 15 in the future without any impact on existing rules. + returned: success + type: int + service: + description: + - The backend service resource to which traffic is directed if this + rule is matched. If routeAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction + cannot contain any weightedBackendService s. Conversely, if routeAction + specifies any weightedBackendServices, service must not be specified. + Only one of urlRedirect, service or routeAction.weightedBackendService + must be set. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need to take + effect for the selected backendService. The headerAction specified + here are applied before the matching pathMatchers[].headerAction and + after pathMatchers[].routeRules[].r outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + . + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding the request + to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the + header, discarding any values that were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the request prior to forwarding the request to the backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response back + to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that already + exist for the header. If true, headerValue is set for the + header, discarding any values that were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed from + the response prior to sending the response back to the client. + returned: success + type: list + matchRules: + description: + - The rules for determining a match. + returned: success + type: complex + contains: + fullPathMatch: + description: + - For satifying the matchRule condition, the path of the request + must exactly match the value specified in fullPathMatch after + removing any query parameters and anchor that may be part of the + original URL. FullPathMatch must be between 1 and 1024 characters. + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + headerMatches: + description: + - Specifies a list of header match criteria, all of which must match + corresponding headers in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The value should exactly match contents of exactMatch. Only + one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch + or rangeMatch must be set. + returned: success + type: str + headerName: + description: + - The name of the HTTP header to match. For matching against + the HTTP request's authority, use a headerMatch with the header + name ":authority". For matching a request's method, use the + headerName ":method". + returned: success + type: str + invertMatch: + description: + - If set to false, the headerMatch is considered a match if + the match criteria above are met. If set to true, the headerMatch + is considered a match if the match criteria above are NOT + met. Defaults to false. + returned: success + type: bool + prefixMatch: + description: + - The value of the header must start with the contents of prefixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + presentMatch: + description: + - A header with the contents of headerName must exist. The match + takes place whether or not the request's header has a value + or not. Only one of exactMatch, prefixMatch, suffixMatch, + regexMatch, presentMatch or rangeMatch must be set. + returned: success + type: bool + rangeMatch: + description: + - The header value must be an integer and its value must be + in the range specified in rangeMatch. If the header does not + contain an integer, number or is empty, the match fails. For + example for a range [-5, 0] - -3 will match. - 0 will not + match. - 0.25 will not match. - -3someString will not match. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: complex + contains: + rangeEnd: + description: + - The end of the range (exclusive). + returned: success + type: int + rangeStart: + description: + - The start of the range (inclusive). + returned: success + type: int + regexMatch: + description: + - 'The value of the header must match the regualar expression + specified in regexMatch. For regular expression grammar, please + see: en.cppreference.com/w/cpp/regex/ecmascript For matching + against a port specified in the HTTP request, use a headerMatch + with headerName set to PORT and a regular expression that + satisfies the RFC2616 Host header''s port specifier.' + - Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + suffixMatch: + description: + - The value of the header must end with the contents of suffixMatch. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + returned: success + type: str + ignoreCase: + description: + - Specifies that prefixMatch and fullPathMatch matches are case + sensitive. + - Defaults to false. + returned: success + type: bool + metadataFilters: + description: + - Opaque filter criteria used by Loadbalancer to restrict routing + configuration to a limited set xDS compliant clients. In their + xDS requests to Loadbalancer, xDS clients present node metadata. + If a match takes place, the relevant routing configuration is + made available to those proxies. For each metadataFilter in this + list, if its filterMatchCriteria is set to MATCH_ANY, at least + one of the filterLabels must match the corresponding label provided + in the metadata. If its filterMatchCriteria is set to MATCH_ALL, + then all of its filterLabels must match with corresponding labels + in the provided metadata. metadataFilters specified here can be + overrides those specified in ForwardingRule that refers to this + UrlMap. metadataFilters only applies to Loadbalancers that have + their loadBalancingScheme set to INTERNAL_SELF_MANAGED. + returned: success + type: complex + contains: + filterLabels: + description: + - The list of label value pairs that must match labels in the + provided metadata based on filterMatchCriteria This list must + not be empty and can have at the most 64 entries. + returned: success + type: complex + contains: + name: + description: + - Name of metadata label. The name can have a maximum length + of 1024 characters and must be at least 1 character long. + returned: success + type: str + value: + description: + - The value of the label must match the specified value. + value can have a maximum length of 1024 characters. + returned: success + type: str + filterMatchCriteria: + description: + - 'Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall metadataFilter + match. Supported values are: - MATCH_ANY: At least one of + the filterLabels must have a matching label in the provided + metadata.' + - "- MATCH_ALL: All filterLabels must have matching labels in + the provided metadata." + returned: success + type: str + prefixMatch: + description: + - For satifying the matchRule condition, the request's path must + begin with the specified prefixMatch. prefixMatch must begin with + a /. The value must be between 1 and 1024 characters. Only one + of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + queryParameterMatches: + description: + - Specifies a list of query parameter match criteria, all of which + must match corresponding query parameters in the request. + returned: success + type: complex + contains: + exactMatch: + description: + - The queryParameterMatch matches if the value of the parameter + exactly matches the contents of exactMatch. Only one of presentMatch, + exactMatch and regexMatch must be set. + returned: success + type: str + name: + description: + - The name of the query parameter to match. The query parameter + must exist in the request, in the absence of which the request + match fails. + returned: success + type: str + presentMatch: + description: + - Specifies that the queryParameterMatch matches if the request + contains the query parameter, irrespective of whether the + parameter has a value or not. Only one of presentMatch, exactMatch + and regexMatch must be set. + returned: success + type: bool + regexMatch: + description: + - The queryParameterMatch matches if the value of the parameter + matches the regular expression specified by regexMatch. For + the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of presentMatch, exactMatch and regexMatch must be + set. + returned: success + type: str + regexMatch: + description: + - For satifying the matchRule condition, the path of the request + must satisfy the regular expression specified in regexMatch after + removing any query parameters and anchor supplied with the original + URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + Only one of prefixMatch, fullPathMatch or regexMatch must be specified. + returned: success + type: str + routeAction: + description: + - In response to a matching matchRule, the load balancer performs advanced + routing actions like URL rewrites, header transformations, etc. prior + to forwarding the request to the selected backend. If routeAction + specifies any weightedBackendServices, service must not be set. Conversely + if service is set, routeAction cannot contain any weightedBackendServices. + Only one of routeAction or urlRedirect must be set. + returned: success + type: complex + contains: + corsPolicy: + description: + - The specification for allowing client side cross-origin requests. + Please see W3C Recommendation for Cross Origin Resource Sharing + . + returned: success + type: complex + contains: + allowCredentials: + description: + - In response to a preflight request, setting this to true indicates + that the actual request can include user credentials. This + translates to the Access- Control-Allow-Credentials header. + Defaults to false. + returned: success + type: bool + allowHeaders: + description: + - Specifies the content for the Access-Control-Allow-Headers + header. + returned: success + type: list + allowMethods: + description: + - Specifies the content for the Access-Control-Allow-Methods + header. + returned: success + type: list + allowOriginRegexes: + description: + - Specifies the regualar expression patterns that match allowed + origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or + allow_origin_regex. + returned: success + type: list + allowOrigins: + description: + - Specifies the list of origins that will be allowed to do CORS + requests. An origin is allowed if it matches either allow_origins + or allow_origin_regex. + returned: success + type: list + disabled: + description: + - If true, specifies the CORS policy is disabled. + - which indicates that the CORS policy is in effect. Defaults + to false. + returned: success + type: bool + exposeHeaders: + description: + - Specifies the content for the Access-Control-Expose-Headers + header. + returned: success + type: list + maxAge: + description: + - Specifies how long the results of a preflight request can + be cached. This translates to the content for the Access-Control-Max-Age + header. + returned: success + type: int + faultInjectionPolicy: + description: + - The specification for fault injection introduced into traffic + to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a percentage + of requests before sending those request to the backend service. + Similarly requests from clients can be aborted by the Loadbalancer + for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + returned: success + type: complex + contains: + abort: + description: + - The specification for how client requests are aborted as part + of fault injection. + returned: success + type: complex + contains: + httpStatus: + description: + - The HTTP status code used to abort the request. The value + must be between 200 and 599 inclusive. + returned: success + type: int + percentage: + description: + - The percentage of traffic (connections/operations/requests) + which will be aborted as part of fault injection. The + value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + delay: + description: + - The specification for how client requests are delayed as part + of fault injection, before being sent to a backend service. + returned: success + type: complex + contains: + fixedDelay: + description: + - Specifies the value of the fixed delay interval. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be + from 0 to 315,576,000,000 inclusive. + returned: success + type: str + percentage: + description: + - The percentage of traffic (connections/operations/requests) + on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + returned: success + type: str + requestMirrorPolicy: + description: + - Specifies the policy on how requests intended for the route's + backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. + Prior to sending traffic to the shadow service, the host / authority + header is suffixed with -shadow. + returned: success + type: complex + contains: + backendService: + description: + - The BackendService resource being mirrored to. + returned: success + type: dict + retryPolicy: + description: + - Specifies the retry policy associated with this route. + returned: success + type: complex + contains: + numRetries: + description: + - Specifies the allowed number retries. This number must be + > 0. + returned: success + type: int + perTryTimeout: + description: + - Specifies a non-zero timeout per retry attempt. + - If not specified, will use the timeout set in HttpRouteAction. + If timeout in HttpRouteAction is not set, will use the largest + timeout among all backend services associated with the route. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 `seconds` field and a positive `nanos` field. + Must be from 0 to 999,999,999 inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from + 0 to 315,576,000,000 inclusive. + returned: success + type: str + retryConditions: + description: + - 'Specfies one or more conditions when this retry rule applies. + Valid values are: - 5xx: Loadbalancer will attempt a retry + if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: + disconnects, reset, read timeout, connection failure, and + refused streams.' + - "- gateway-error: Similar to 5xx, but only applies to response + codes 502, 503 or 504." + - "- connect-failure: Loadbalancer will retry on failures connecting + to backend services, for example due to connection timeouts." + - "- retriable-4xx: Loadbalancer will retry for retriable 4xx + response codes." + - Currently the only retriable error supported is 409. + - "- refused-stream: Loadbalancer will retry if the backend + service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry." + - "- cancelled: Loadbalancer will retry if the gRPC status code + in the response header is set to cancelled - deadline-exceeded: + Loadbalancer will retry if the gRPC status code in the response + header is set to deadline-exceeded - resource-exhausted: Loadbalancer + will retry if the gRPC status code in the response header + is set to resource-exhausted - unavailable: Loadbalancer will + retry if the gRPC status code in the response header is set + to unavailable ." + returned: success + type: list + timeout: + description: + - Specifies the timeout for the selected route. Timeout is computed + from the time the request is has been fully processed (i.e. end-of-stream) + up until the response has been completely processed. Timeout includes + all retries. If not specified, the default value is 15 seconds. + returned: success + type: complex + contains: + nanos: + description: + - Span of time that's a fraction of a second at nanosecond resolution. + Durations less than one second are represented with a 0 `seconds` + field and a positive `nanos` field. Must be from 0 to 999,999,999 + inclusive. + returned: success + type: int + seconds: + description: + - Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. + returned: success + type: str + urlRewrite: + description: + - The spec to modify the URL of the request, prior to forwarding + the request to the matched service . + returned: success + type: complex + contains: + hostRewrite: + description: + - Prior to forwarding the request to the selected service, the + request's host header is replaced with contents of hostRewrite. + The value must be between 1 and 255 characters. + returned: success + type: str + pathPrefixRewrite: + description: + - Prior to forwarding the request to the selected backend service, + the matching portion of the request's path is replaced by + pathPrefixRewrite. The value must be between 1 and 1024 characters. + returned: success + type: str + weightedBackendServices: + description: + - A list of weighted backend services to send traffic to when a + route match occurs. The weights determine the fraction of traffic + that flows to their corresponding backend service. If all traffic + needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. Once a backendService is identified + and before forwarding the request to the backend service, advanced + routing actions like Url rewrites and header transformations are + applied depending on additional settings specified in this HttpRouteAction. + returned: success + type: complex + contains: + backendService: + description: + - The default BackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + returned: success + type: dict + headerAction: + description: + - Specifies changes to request and response headers that need + to take effect for the selected backendService. headerAction + specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + returned: success + type: complex + contains: + requestHeadersToAdd: + description: + - Headers to add to a matching request prior to forwarding + the request to the backendService. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + requestHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the request prior to forwarding the request to the + backendService. + returned: success + type: list + responseHeadersToAdd: + description: + - Headers to add the response prior to sending the response + back to the client. + returned: success + type: complex + contains: + headerName: + description: + - The name of the header. + returned: success + type: str + headerValue: + description: + - The value of the header to add. + returned: success + type: str + replace: + description: + - If false, headerValue is appended to any values that + already exist for the header. If true, headerValue + is set for the header, discarding any values that + were set for that header. + returned: success + type: bool + responseHeadersToRemove: + description: + - A list of header names for headers that need to be removed + from the response prior to sending the response back to + the client. + returned: success + type: list + weight: + description: + - Specifies the fraction of traffic sent to backendService, + computed as weight / (sum of all weightedBackendService weights + in routeAction) . The selection of a backend service is determined + only for new traffic. Once a user's request has been directed + to a backendService, subsequent requests will be sent to the + same backendService as determined by the BackendService's + session affinity policy. + - The value must be between 0 and 1000 . + returned: success + type: int + urlRedirect: + description: + - When this rule is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction + must not be set. + returned: success + type: complex + contains: + hostRedirect: + description: + - The host that will be used in the redirect response instead of + the one that was supplied in the request. The value must be between + 1 and 255 characters. + returned: success + type: str + httpsRedirect: + description: + - If set to true, the URL scheme in the redirected request is set + to https. If set to false, the URL scheme of the redirected request + will remain the same as that of the request. This must only be + set for UrlMaps used in TargetHttpProxys. + - Setting this true for TargetHttpsProxy is not permitted. Defaults + to false. + returned: success + type: bool + pathRedirect: + description: + - The path that will be used in the redirect response instead of + the one that was supplied in the request. Only one of pathRedirect + or prefixRedirect must be specified. The value must be between + 1 and 1024 characters. + returned: success + type: str + prefixRedirect: + description: + - The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting + the request. + returned: success + type: str + redirectResponseCode: + description: + - 'The HTTP Status code to use for this RedirectAction. Supported + values are: - MOVED_PERMANENTLY_DEFAULT, which is the default + value and corresponds to 301. - FOUND, which corresponds to 302. + - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which + corresponds to 307. In this case, the request method will be retained. + - PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained.' + returned: success + type: str + stripQuery: + description: + - If set to true, any accompanying query portion of the original + URL is removed prior to redirecting the request. If set to false, + the query portion of the original URL is retained. Defaults to + false. + returned: success + type: bool + tests: + description: + - The list of expected URL mapping tests. Request to update this UrlMap will + succeed only if all of the test cases pass. You can specify a maximum of 100 + tests per UrlMap. + returned: success + type: complex + contains: + description: + description: + - Description of this test case. + returned: success + type: str + host: + description: + - Host portion of the URL. + returned: success + type: str + path: + description: + - Path portion of the URL. + returned: success + type: str + service: + description: + - Expected BackendService resource the given URL should be mapped to. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/global/urlMaps".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_compute_vpn_tunnel_facts.py b/plugins/modules/cloud/google/gcp_compute_vpn_tunnel_facts.py new file mode 120000 index 0000000000..26de5596b8 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_vpn_tunnel_facts.py @@ -0,0 +1 @@ +gcp_compute_vpn_tunnel_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_compute_vpn_tunnel_info.py b/plugins/modules/cloud/google/gcp_compute_vpn_tunnel_info.py new file mode 100644 index 0000000000..8486c42552 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_compute_vpn_tunnel_info.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_compute_vpn_tunnel_info +description: +- Gather info for GCP VpnTunnel +short_description: Gather info for GCP VpnTunnel +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + filters: + description: + - A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). + - Each additional filter in the list will act be added as an AND condition (filter1 + and filter2) . + type: list + region: + description: + - The region where the tunnel is located. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a vpn tunnel + gcp_compute_vpn_tunnel_info: + region: us-west1 + filters: + - name = test_object + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + id: + description: + - The unique identifier for the resource. This identifier is defined by the + server. + returned: success + type: str + creationTimestamp: + description: + - Creation timestamp in RFC3339 text format. + returned: success + type: str + name: + description: + - Name of the resource. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + returned: success + type: str + description: + description: + - An optional description of this resource. + returned: success + type: str + targetVpnGateway: + description: + - URL of the Target VPN gateway with which this VPN tunnel is associated. + returned: success + type: dict + router: + description: + - URL of router resource to be used for dynamic routing. + returned: success + type: dict + peerIp: + description: + - IP address of the peer VPN gateway. Only IPv4 is supported. + returned: success + type: str + sharedSecret: + description: + - Shared secret used to set the secure session between the Cloud VPN gateway + and the peer VPN gateway. + returned: success + type: str + sharedSecretHash: + description: + - Hash of the shared secret. + returned: success + type: str + ikeVersion: + description: + - IKE protocol version to use when establishing the VPN tunnel with peer VPN + gateway. + - Acceptable IKE versions are 1 or 2. Default version is 2. + returned: success + type: int + localTrafficSelector: + description: + - Local traffic selector to use when establishing the VPN tunnel with peer VPN + gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. + The ranges should be disjoint. + - Only IPv4 is supported. + returned: success + type: list + remoteTrafficSelector: + description: + - Remote traffic selector to use when establishing the VPN tunnel with peer + VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. + The ranges should be disjoint. + - Only IPv4 is supported. + returned: success + type: list + region: + description: + - The region where the tunnel is located. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] + + return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/vpnTunnels".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'compute') + return auth.list(link, return_if_object, array_name='items', params={'filter': query}) + + +def query_options(filters): + if not filters: + return '' + + if len(filters) == 1: + return filters[0] + else: + queries = [] + for f in filters: + # For multiple queries, all queries should have () + if f[0] != '(' and f[-1] != ')': + queries.append("(%s)" % ''.join(f)) + else: + queries.append(f) + + return ' '.join(queries) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_container_cluster_facts.py b/plugins/modules/cloud/google/gcp_container_cluster_facts.py new file mode 120000 index 0000000000..50b4ee8253 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_container_cluster_facts.py @@ -0,0 +1 @@ +gcp_container_cluster_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_container_cluster_info.py b/plugins/modules/cloud/google/gcp_container_cluster_info.py new file mode 100644 index 0000000000..ca03fbd4ef --- /dev/null +++ b/plugins/modules/cloud/google/gcp_container_cluster_info.py @@ -0,0 +1,765 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_container_cluster_info +description: +- Gather info for GCP Cluster +short_description: Gather info for GCP Cluster +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + location: + description: + - The location where the cluster is deployed. + required: true + type: str + aliases: + - region + - zone + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a cluster + gcp_container_cluster_info: + location: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of this cluster. The name must be unique within this project and + location, and can be up to 40 characters. Must be Lowercase letters, numbers, + and hyphens only. Must start with a letter. Must end with a number or a letter. + returned: success + type: str + description: + description: + - An optional description of this cluster. + returned: success + type: str + initialNodeCount: + description: + - The number of nodes to create in this cluster. You must ensure that your Compute + Engine resource quota is sufficient for this number of instances. You must + also have available firewall and routes quota. For requests, this field should + only be used in lieu of a "nodePool" object, since this configuration (along + with the "nodeConfig") will be used to create a "NodePool" object with an + auto-generated name. Do not use this and a nodePool at the same time. + - This field has been deprecated. Please use nodePool.initial_node_count instead. + returned: success + type: int + nodeConfig: + description: + - Parameters used in creating the cluster's nodes. + - For requests, this field should only be used in lieu of a "nodePool" object, + since this configuration (along with the "initialNodeCount") will be used + to create a "NodePool" object with an auto-generated name. Do not use this + and a nodePool at the same time. For responses, this field will be populated + with the node configuration of the first node pool. If unspecified, the defaults + are used. + returned: success + type: complex + contains: + machineType: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + returned: success + type: str + diskSizeGb: + description: + - Size of the disk attached to each node, specified in GB. The smallest + allowed disk size is 10GB. If unspecified, the default disk size is 100GB. + returned: success + type: int + oauthScopes: + description: + - The set of Google API scopes to be made available on all of the node VMs + under the "default" service account. + - 'The following scopes are recommended, but not required, and by default + are not included: U(https://www.googleapis.com/auth/compute) is required + for mounting persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for + communicating with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + returned: success + type: list + serviceAccount: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. + If no Service Account is specified, the "default" service account is used. + returned: success + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. + Additionally, to avoid ambiguity, keys must not conflict with any other + metadata keys for the project or be one of the four reserved keys: "instance-template", + "kube-env", "startup-script", and "user-data" Values are free-form strings, + and only have meaning as interpreted by the image running in the instance. + The only restriction placed on them is that each value''s size must be + less than or equal to 32 KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + imageType: + description: + - The image type to use for this node. Note that for a given image type, + the latest version of it will be used. + returned: success + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each + node. These will added in addition to any default label(s) that Kubernetes + may apply to the node. In case of conflict in label keys, the applied + set may differ depending on the Kubernetes version -- it''s best to assume + the behavior is undefined and conflicts should be avoided. For more information, + including usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + localSsdCount: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks + available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + returned: success + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the + client during cluster or node pool creation. Each tag within the list + must comply with RFC1035. + returned: success + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + returned: success + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. See U(https://cloud.google.com/compute/docs/gpus) + for more information about support for GPUs. + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of accelerator cards exposed to an instance. + returned: success + type: str + acceleratorType: + description: + - The accelerator type resource name. + returned: success + type: str + diskType: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd-standard' . + returned: success + type: str + minCpuPlatform: + description: + - Minimum CPU platform to be used by this instance. The instance may be + scheduled on the specified or newer CPU platform. + returned: success + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + - 'For more information, including usage and the valid values, see: U(https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + .' + returned: success + type: complex + contains: + key: + description: + - Key for taint. + returned: success + type: str + value: + description: + - Value for taint. + returned: success + type: str + effect: + description: + - Effect for taint. + returned: success + type: str + masterAuth: + description: + - The authentication information for accessing the master endpoint. + returned: success + type: complex + contains: + username: + description: + - The username to use for HTTP basic authentication to the master endpoint. + returned: success + type: str + password: + description: + - The password to use for HTTP basic authentication to the master endpoint. + Because the master endpoint is open to the Internet, you should create + a strong password with a minimum of 16 characters. + returned: success + type: str + clientCertificateConfig: + description: + - Configuration for client certificate authentication on the cluster. For + clusters before v1.12, if no configuration is specified, a client certificate + is issued. + returned: success + type: complex + contains: + issueClientCertificate: + description: + - Issue a client certificate. + returned: success + type: bool + clusterCaCertificate: + description: + - Base64-encoded public certificate that is the root of trust for the cluster. + returned: success + type: str + clientCertificate: + description: + - Base64-encoded public certificate used by clients to authenticate to the + cluster endpoint. + returned: success + type: str + clientKey: + description: + - Base64-encoded private key used by clients to authenticate to the cluster + endpoint. + returned: success + type: str + loggingService: + description: + - 'The logging service the cluster should use to write logs. Currently available + options: logging.googleapis.com - the Google Cloud Logging service.' + - none - no logs will be exported from the cluster. + - if left as an empty string,logging.googleapis.com will be used. + returned: success + type: str + monitoringService: + description: + - The monitoring service the cluster should use to write metrics. + - 'Currently available options: monitoring.googleapis.com - the Google Cloud + Monitoring service.' + - none - no metrics will be exported from the cluster. + - if left as an empty string, monitoring.googleapis.com will be used. + returned: success + type: str + network: + description: + - The name of the Google Compute Engine network to which the cluster is connected. + If left unspecified, the default network will be used. + returned: success + type: str + privateClusterConfig: + description: + - Configuration for a private cluster. + returned: success + type: complex + contains: + enablePrivateNodes: + description: + - Whether nodes have internal IP addresses only. If enabled, all nodes are + given only RFC 1918 private addresses and communicate with the master + via private networking. + returned: success + type: bool + enablePrivateEndpoint: + description: + - Whether the master's internal IP address is used as the cluster endpoint. + returned: success + type: bool + masterIpv4CidrBlock: + description: + - The IP range in CIDR notation to use for the hosted master network. This + range will be used for assigning internal IP addresses to the master or + set of masters, as well as the ILB VIP. This range must not overlap with + any other ranges in use within the cluster's network. + returned: success + type: str + privateEndpoint: + description: + - The internal IP address of this cluster's master endpoint. + returned: success + type: str + publicEndpoint: + description: + - The external IP address of this cluster's master endpoint. + returned: success + type: str + clusterIpv4Cidr: + description: + - The IP address range of the container pods in this cluster, in CIDR notation + (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify + a /14 block in 10.0.0.0/8. + returned: success + type: str + addonsConfig: + description: + - Configurations for the various addons available to run in the cluster. + returned: success + type: complex + contains: + httpLoadBalancing: + description: + - Configuration for the HTTP (L7) load balancing controller addon, which + makes it easy to set up HTTP load balancers for services in a cluster. + returned: success + type: complex + contains: + disabled: + description: + - Whether the HTTP Load Balancing controller is enabled in the cluster. + When enabled, it runs a small pod in the cluster that manages the + load balancers. + returned: success + type: bool + horizontalPodAutoscaling: + description: + - Configuration for the horizontal pod autoscaling feature, which increases + or decreases the number of replica pods a replication controller has based + on the resource usage of the existing pods. + returned: success + type: complex + contains: + disabled: + description: + - Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. + When enabled, it ensures that a Heapster pod is running in the cluster, + which is also used by the Cloud Monitoring service. + returned: success + type: bool + networkPolicyConfig: + description: + - Configuration for NetworkPolicy. This only tracks whether the addon is + enabled or not on the Master, it does not track whether network policy + is enabled for the nodes. + returned: success + type: complex + contains: + disabled: + description: + - Whether NetworkPolicy is enabled for this cluster. + returned: success + type: bool + subnetwork: + description: + - The name of the Google Compute Engine subnetwork to which the cluster is connected. + returned: success + type: str + locations: + description: + - The list of Google Compute Engine zones in which the cluster's nodes should + be located. + returned: success + type: list + resourceLabels: + description: + - The resource labels for the cluster to use to annotate any related Google + Compute Engine resources. + returned: success + type: dict + labelFingerprint: + description: + - The fingerprint of the set of labels for this cluster. + returned: success + type: str + legacyAbac: + description: + - Configuration for the legacy ABAC authorization mode. + returned: success + type: complex + contains: + enabled: + description: + - Whether the ABAC authorizer is enabled for this cluster. When enabled, + identities in the system, including service accounts, nodes, and controllers, + will have statically granted permissions beyond those provided by the + RBAC configuration or IAM. + returned: success + type: bool + networkPolicy: + description: + - Configuration options for the NetworkPolicy feature. + returned: success + type: complex + contains: + provider: + description: + - The selected network policy provider. + returned: success + type: str + enabled: + description: + - Whether network policy is enabled on the cluster. + returned: success + type: bool + defaultMaxPodsConstraint: + description: + - The default constraint on the maximum number of pods that can be run simultaneously + on a node in the node pool of this cluster. + - Only honored if cluster created with IP Alias support. + returned: success + type: complex + contains: + maxPodsPerNode: + description: + - Constraint enforced on the max num of pods per node. + returned: success + type: str + ipAllocationPolicy: + description: + - Configuration for controlling how IPs are allocated in the cluster. + returned: success + type: complex + contains: + useIpAliases: + description: + - Whether alias IPs will be used for pod IPs in the cluster. + returned: success + type: bool + createSubnetwork: + description: + - Whether a new subnetwork will be created automatically for the cluster. + returned: success + type: bool + subnetworkName: + description: + - A custom subnetwork name to be used if createSubnetwork is true. + - If this field is empty, then an automatic name will be chosen for the + new subnetwork. + returned: success + type: str + clusterSecondaryRangeName: + description: + - The name of the secondary range to be used for the cluster CIDR block. + The secondary range will be used for pod IP addresses. + - This must be an existing secondary range associated with the cluster subnetwork + . + returned: success + type: str + servicesSecondaryRangeName: + description: + - The name of the secondary range to be used as for the services CIDR block. + The secondary range will be used for service ClusterIPs. This must be + an existing secondary range associated with the cluster subnetwork. + returned: success + type: str + clusterIpv4CidrBlock: + description: + - The IP address range for the cluster pod IPs. If this field is set, then + cluster.cluster_ipv4_cidr must be left blank. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + nodeIpv4CidrBlock: + description: + - The IP address range of the instance IPs in this cluster. + - This is applicable only if createSubnetwork is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + servicesIpv4CidrBlock: + description: + - The IP address range of the services IPs in this cluster. If blank, a + range will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - Set to blank to have a range chosen with the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + tpuIpv4CidrBlock: + description: + - The IP address range of the Cloud TPUs in this cluster. If unspecified, + a range will be automatically chosen with the default size. + - This field is only applicable when useIpAliases is true. + - If unspecified, the range will use the default size. + - Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. + returned: success + type: str + endpoint: + description: + - The IP address of this cluster's master endpoint. + - The endpoint can be accessed from the internet at https://username:password@endpoint/ + See the masterAuth property of this resource for username and password information. + returned: success + type: str + initialClusterVersion: + description: + - The software version of the master endpoint and kubelets used in the cluster + when it was first created. The version can be upgraded over time. + returned: success + type: str + currentMasterVersion: + description: + - The current software version of the master endpoint. + returned: success + type: str + currentNodeVersion: + description: + - The current version of the node software components. If they are currently + at multiple versions because they're in the process of being upgraded, this + reflects the minimum version of all nodes. + returned: success + type: str + createTime: + description: + - The time the cluster was created, in RFC3339 text format. + returned: success + type: str + status: + description: + - The current status of this cluster. + returned: success + type: str + statusMessage: + description: + - Additional information about the current status of this cluster, if available. + returned: success + type: str + nodeIpv4CidrSize: + description: + - The size of the address space on each node for hosting containers. + - This is provisioned from within the container_ipv4_cidr range. + returned: success + type: int + servicesIpv4Cidr: + description: + - The IP address range of the Kubernetes services in this cluster, in CIDR notation + (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from + the container CIDR. + returned: success + type: str + currentNodeCount: + description: + - The number of nodes currently in the cluster. + returned: success + type: int + expireTime: + description: + - The time the cluster will be automatically deleted in RFC3339 text format. + returned: success + type: str + enableTpu: + description: + - Enable the ability to use Cloud TPUs in this cluster. + returned: success + type: bool + tpuIpv4CidrBlock: + description: + - The IP address range of the Cloud TPUs in this cluster, in CIDR notation. + returned: success + type: str + conditions: + description: + - Which conditions caused the current cluster state. + returned: success + type: complex + contains: + code: + description: + - Machine-friendly representation of the condition. + returned: success + type: str + message: + description: + - Human-friendly representation of the condition. + returned: success + type: str + masterAuthorizedNetworksConfig: + description: + - Configuration for controlling how IPs are allocated in the cluster. + returned: success + type: complex + contains: + enabled: + description: + - Whether or not master authorized networks is enabled. + returned: success + type: bool + cidrBlocks: + description: + - Define up to 50 external networks that could access Kubernetes master + through HTTPS. + returned: success + type: complex + contains: + displayName: + description: + - Optional field used to identify cidr blocks. + returned: success + type: str + cidrBlock: + description: + - Block specified in CIDR notation. + returned: success + type: str + location: + description: + - The location where the cluster is deployed. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(location=dict(required=True, type='str', aliases=['region', 'zone']))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'container') + return auth.list(link, return_if_object, array_name='clusters') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_container_node_pool_facts.py b/plugins/modules/cloud/google/gcp_container_node_pool_facts.py new file mode 120000 index 0000000000..2b73f3c477 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_container_node_pool_facts.py @@ -0,0 +1 @@ +gcp_container_node_pool_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_container_node_pool_info.py b/plugins/modules/cloud/google/gcp_container_node_pool_info.py new file mode 100644 index 0000000000..f0b2da8921 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_container_node_pool_info.py @@ -0,0 +1,450 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_container_node_pool_info +description: +- Gather info for GCP NodePool +short_description: Gather info for GCP NodePool +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + location: + description: + - The location where the node pool is deployed. + required: true + type: str + aliases: + - region + - zone + cluster: + description: + - The cluster this node pool belongs to. + - 'This field represents a link to a Cluster resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_container_cluster task and then set this cluster field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a node pool + gcp_container_node_pool_info: + cluster: "{{ cluster }}" + location: us-central1-a + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the node pool. + returned: success + type: str + config: + description: + - The node configuration of the pool. + returned: success + type: complex + contains: + machineType: + description: + - The name of a Google Compute Engine machine type (e.g. + - n1-standard-1). If unspecified, the default machine type is n1-standard-1. + returned: success + type: str + diskSizeGb: + description: + - Size of the disk attached to each node, specified in GB. The smallest + allowed disk size is 10GB. If unspecified, the default disk size is 100GB. + returned: success + type: int + oauthScopes: + description: + - The set of Google API scopes to be made available on all of the node VMs + under the "default" service account. + - 'The following scopes are recommended, but not required, and by default + are not included: U(https://www.googleapis.com/auth/compute) is required + for mounting persistent storage on your nodes.' + - U(https://www.googleapis.com/auth/devstorage.read_only) is required for + communicating with gcr.io (the Google Container Registry). + - If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring + are enabled, in which case their required scopes will be added. + returned: success + type: list + serviceAccount: + description: + - The Google Cloud Platform Service Account to be used by the node VMs. + If no Service Account is specified, the "default" service account is used. + returned: success + type: str + metadata: + description: + - The metadata key/value pairs assigned to instances in the cluster. + - 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + in length. These are reflected as part of a URL in the metadata server. + Additionally, to avoid ambiguity, keys must not conflict with any other + metadata keys for the project or be one of the four reserved keys: "instance-template", + "kube-env", "startup-script", and "user-data" Values are free-form strings, + and only have meaning as interpreted by the image running in the instance. + The only restriction placed on them is that each value''s size must be + less than or equal to 32 KB.' + - The total size of all keys and values must be less than 512 KB. + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + imageType: + description: + - The image type to use for this node. Note that for a given image type, + the latest version of it will be used. + returned: success + type: str + labels: + description: + - 'The map of Kubernetes labels (key/value pairs) to be applied to each + node. These will added in addition to any default label(s) that Kubernetes + may apply to the node. In case of conflict in label keys, the applied + set may differ depending on the Kubernetes version -- it''s best to assume + the behavior is undefined and conflicts should be avoided. For more information, + including usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) + An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict + localSsdCount: + description: + - The number of local SSD disks to be attached to the node. + - 'The limit for this value is dependant upon the maximum number of disks + available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) + for more information.' + returned: success + type: int + tags: + description: + - The list of instance tags applied to all nodes. Tags are used to identify + valid sources or targets for network firewalls and are specified by the + client during cluster or node pool creation. Each tag within the list + must comply with RFC1035. + returned: success + type: list + preemptible: + description: + - 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible) + for more information about preemptible VM instances.' + returned: success + type: bool + accelerators: + description: + - A list of hardware accelerators to be attached to each node. + returned: success + type: complex + contains: + acceleratorCount: + description: + - The number of the accelerator cards exposed to an instance. + returned: success + type: int + acceleratorType: + description: + - The accelerator type resource name. + returned: success + type: str + diskType: + description: + - Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') + If unspecified, the default disk type is 'pd-standard' . + returned: success + type: str + minCpuPlatform: + description: + - Minimum CPU platform to be used by this instance. The instance may be + scheduled on the specified or newer CPU platform . + returned: success + type: str + taints: + description: + - List of kubernetes taints to be applied to each node. + returned: success + type: complex + contains: + key: + description: + - Key for taint. + returned: success + type: str + value: + description: + - Value for taint. + returned: success + type: str + effect: + description: + - Effect for taint. + returned: success + type: str + initialNodeCount: + description: + - The initial node count for the pool. You must ensure that your Compute Engine + resource quota is sufficient for this number of instances. You must also have + available firewall and routes quota. + returned: success + type: int + status: + description: + - Status of nodes in this pool instance. + returned: success + type: str + statusMessage: + description: + - Additional information about the current status of this node pool instance. + returned: success + type: str + version: + description: + - The version of the Kubernetes of this node. + returned: success + type: str + autoscaling: + description: + - Autoscaler configuration for this NodePool. Autoscaler is enabled only if + a valid configuration is present. + returned: success + type: complex + contains: + enabled: + description: + - Is autoscaling enabled for this node pool. + returned: success + type: bool + minNodeCount: + description: + - Minimum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount. + returned: success + type: int + maxNodeCount: + description: + - Maximum number of nodes in the NodePool. Must be >= minNodeCount. + - There has to enough quota to scale up the cluster. + returned: success + type: int + management: + description: + - Management configuration for this NodePool. + returned: success + type: complex + contains: + autoUpgrade: + description: + - A flag that specifies whether node auto-upgrade is enabled for the node + pool. If enabled, node auto-upgrade helps keep the nodes in your node + pool up to date with the latest release version of Kubernetes. + returned: success + type: bool + autoRepair: + description: + - A flag that specifies whether the node auto-repair is enabled for the + node pool. If enabled, the nodes in this node pool will be monitored and, + if they fail health checks too many times, an automatic repair action + will be triggered. + returned: success + type: bool + upgradeOptions: + description: + - Specifies the Auto Upgrade knobs for the node pool. + returned: success + type: complex + contains: + autoUpgradeStartTime: + description: + - This field is set when upgrades are about to commence with the approximate + start time for the upgrades, in RFC3339 text format. + returned: success + type: str + description: + description: + - This field is set when upgrades are about to commence with the description + of the upgrade. + returned: success + type: str + maxPodsConstraint: + description: + - The constraint on the maximum number of pods that can be run simultaneously + on a node in the node pool. + returned: success + type: complex + contains: + maxPodsPerNode: + description: + - Constraint enforced on the max num of pods per node. + returned: success + type: int + conditions: + description: + - Which conditions caused the current node pool state. + returned: success + type: complex + contains: + code: + description: + - Machine-friendly representation of the condition. + returned: success + type: str + podIpv4CidrSize: + description: + - The pod CIDR block size per node in this node pool. + returned: success + type: int + cluster: + description: + - The cluster this node pool belongs to. + returned: success + type: dict + location: + description: + - The location where the node pool is deployed. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(location=dict(required=True, type='str', aliases=['region', 'zone']), cluster=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'location': module.params['location'], 'cluster': replace_resource_dict(module.params['cluster'], 'name')} + return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters/{cluster}/nodePools".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'container') + return auth.list(link, return_if_object, array_name='nodePools') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_dns_managed_zone_facts.py b/plugins/modules/cloud/google/gcp_dns_managed_zone_facts.py new file mode 120000 index 0000000000..08fc673ed2 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_dns_managed_zone_facts.py @@ -0,0 +1 @@ +gcp_dns_managed_zone_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_dns_managed_zone_info.py b/plugins/modules/cloud/google/gcp_dns_managed_zone_info.py new file mode 100644 index 0000000000..f374ff78ab --- /dev/null +++ b/plugins/modules/cloud/google/gcp_dns_managed_zone_info.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_dns_managed_zone_info +description: +- Gather info for GCP ManagedZone +short_description: Gather info for GCP ManagedZone +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + dns_name: + description: + - Restricts the list to return only zones with this domain name. + type: list + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a managed zone + gcp_dns_managed_zone_info: + dns_name: test.somewild2.example.com. + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + description: + description: + - A mutable string of at most 1024 characters associated with this resource + for the user's convenience. Has no effect on the managed zone's function. + returned: success + type: str + dnsName: + description: + - The DNS name of this managed zone, for instance "example.com.". + returned: success + type: str + dnssecConfig: + description: + - DNSSEC configuration. + returned: success + type: complex + contains: + kind: + description: + - Identifies what kind of resource this is. + returned: success + type: str + nonExistence: + description: + - Specifies the mechanism used to provide authenticated denial-of-existence + responses. + returned: success + type: str + state: + description: + - Specifies whether DNSSEC is enabled, and what mode it is in. + returned: success + type: str + defaultKeySpecs: + description: + - Specifies parameters that will be used for generating initial DnsKeys + for this ManagedZone. If you provide a spec for keySigning or zoneSigning, + you must also provide one for the other. + returned: success + type: complex + contains: + algorithm: + description: + - String mnemonic specifying the DNSSEC algorithm of this key. + returned: success + type: str + keyLength: + description: + - Length of the keys in bits. + returned: success + type: int + keyType: + description: + - Specifies whether this is a key signing key (KSK) or a zone signing + key (ZSK). Key signing keys have the Secure Entry Point flag set and, + when active, will only be used to sign resource record sets of type + DNSKEY. Zone signing keys do not have the Secure Entry Point flag + set and will be used to sign all other types of resource record sets. + . + returned: success + type: str + kind: + description: + - Identifies what kind of resource this is. + returned: success + type: str + id: + description: + - Unique identifier for the resource; defined by the server. + returned: success + type: int + name: + description: + - User assigned name for this resource. + - Must be unique within the project. + returned: success + type: str + nameServers: + description: + - Delegate your managed_zone to these virtual name servers; defined by the server + . + returned: success + type: list + nameServerSet: + description: + - Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet + is a set of DNS name servers that all host the same ManagedZones. Most users + will leave this field unset. + returned: success + type: str + creationTime: + description: + - The time that this resource was created on the server. + - This is in RFC3339 text format. + returned: success + type: str + labels: + description: + - A set of key/value label pairs to assign to this ManagedZone. + returned: success + type: dict + visibility: + description: + - 'The zone''s visibility: public zones are exposed to the Internet, while private + zones are visible only to Virtual Private Cloud resources.' + - 'Must be one of: `public`, `private`.' + returned: success + type: str + privateVisibilityConfig: + description: + - For privately visible zones, the set of Virtual Private Cloud resources that + the zone is visible from. + returned: success + type: complex + contains: + networks: + description: + - The list of VPC networks that can see this zone. + returned: success + type: complex + contains: + networkUrl: + description: + - The fully qualified URL of the VPC network to bind to. + - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`) + . + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(dns_name=dict(type='list', elements='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite'] + + return_value = {'resources': fetch_list(module, collection(module), module.params['dns_name'])} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/dns/v1/projects/{project}/managedZones".format(**module.params) + + +def fetch_list(module, link, query): + auth = GcpSession(module, 'dns') + return auth.list(link, return_if_object, array_name='managedZones', params={'dnsName': query}) + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_dns_resource_record_set_facts.py b/plugins/modules/cloud/google/gcp_dns_resource_record_set_facts.py new file mode 120000 index 0000000000..879f4b6344 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_dns_resource_record_set_facts.py @@ -0,0 +1 @@ +gcp_dns_resource_record_set_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_dns_resource_record_set_info.py b/plugins/modules/cloud/google/gcp_dns_resource_record_set_info.py new file mode 100644 index 0000000000..a6f987f22c --- /dev/null +++ b/plugins/modules/cloud/google/gcp_dns_resource_record_set_info.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_dns_resource_record_set_info +description: +- Gather info for GCP ResourceRecordSet +short_description: Gather info for GCP ResourceRecordSet +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + managed_zone: + description: + - Identifies the managed zone addressed by this request. This must be a dictionary + that contains both a 'name' key and a 'dnsName' key. You can pass in the results + of the gcp_dns_managed_zone module, which will contain both. + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a resource record set + gcp_dns_resource_record_set_info: + managed_zone: "{{ managed_zone }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - For example, U(www.example.com). + returned: success + type: str + type: + description: + - One of valid DNS resource types. + returned: success + type: str + ttl: + description: + - Number of seconds that this ResourceRecordSet can be cached by resolvers. + returned: success + type: int + target: + description: + - As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) . + returned: success + type: list + managed_zone: + description: + - Identifies the managed zone addressed by this request. This must be a dictionary + that contains both a 'name' key and a 'dnsName' key. You can pass in the results + of the gcp_dns_managed_zone module, which will contain both. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(managed_zone=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'managed_zone': replace_resource_dict(module.params['managed_zone'], 'name')} + return "https://www.googleapis.com/dns/v1/projects/{project}/managedZones/{managed_zone}/rrsets".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'dns') + return auth.list(link, return_if_object, array_name='rrsets') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_forwarding_rule.py b/plugins/modules/cloud/google/gcp_forwarding_rule.py new file mode 100644 index 0000000000..4f88b2f424 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_forwarding_rule.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# Copyright 2017 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gcp_forwarding_rule +short_description: Create, Update or Destroy a Forwarding_Rule. +description: + - Create, Update or Destroy a Forwarding_Rule. See + U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview. + More details on the Global Forwarding_Rule API can be found at + U(https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules) + More details on the Forwarding Rules API can be found at + U(https://cloud.google.com/compute/docs/reference/latest/forwardingRules) +requirements: + - "python >= 2.6" + - "google-api-python-client >= 1.6.2" + - "google-auth >= 0.9.0" + - "google-auth-httplib2 >= 0.0.2" +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_compute_forwarding_rule) or M(gcp_compute_global_forwarding_rule) instead. +notes: + - Currently only supports global forwarding rules. + As such, Load Balancing Scheme is always EXTERNAL. +author: + - "Tom Melendez (@supertom) " +options: + address: + description: + - IPv4 or named IP address. Must be of the same scope (regional, global). + Reserved addresses can (and probably should) be used for global + forwarding rules. You may reserve IPs from the console or + via the gce_eip module. + required: false + forwarding_rule_name: + description: + - Name of the Forwarding_Rule. + required: true + port_range: + description: + - For global forwarding rules, must be set to 80 or 8080 for TargetHttpProxy, and + 443 for TargetHttpsProxy or TargetSslProxy. + required: false + protocol: + description: + - For global forwarding rules, TCP, UDP, ESP, AH, SCTP or ICMP. Default is TCP. + required: false + region: + description: + - The region for this forwarding rule. Currently, only 'global' is supported. + required: false + state: + description: + - The state of the Forwarding Rule. 'present' or 'absent' + required: true + choices: ["present", "absent"] + target: + description: + - Target resource for forwarding rule. For global proxy, this is a Global + TargetProxy resource. Required for external load balancing (including Global load balancing) + required: false +''' + +EXAMPLES = ''' +- name: Create Minimum GLOBAL Forwarding_Rule + gcp_forwarding_rule: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + forwarding_rule_name: my-forwarding_rule + protocol: TCP + port_range: 80 + region: global + target: my-target-proxy + state: present + +- name: Create Forwarding_Rule w/reserved static address + gcp_forwarding_rule: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + forwarding_rule_name: my-forwarding_rule + protocol: TCP + port_range: 80 + address: my-reserved-static-address-name + region: global + target: my-target-proxy + state: present +''' + +RETURN = ''' +forwarding_rule_name: + description: Name of the Forwarding_Rule + returned: Always + type: str + sample: my-target-proxy +forwarding_rule: + description: GCP Forwarding_Rule dictionary + returned: Always. Refer to GCP documentation for detailed field descriptions. + type: dict + sample: { "name": "my-forwarding_rule", "target": "..." } +region: + description: Region for Forwarding Rule. + returned: Always + type: bool + sample: true +state: + description: state of the Forwarding_Rule + returned: Always. + type: str + sample: present +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils + + +USER_AGENT_PRODUCT = 'ansible-forwarding_rule' +USER_AGENT_VERSION = '0.0.1' + + +def _build_global_forwarding_rule_dict(params, project_id=None): + """ + Reformat services in Ansible Params. + + :param params: Params from AnsibleModule object + :type params: ``dict`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: dictionary suitable for submission to GCP API. + :rtype ``dict`` + """ + url = '' + if project_id: + url = GCPUtils.build_googleapi_url(project_id) + gcp_dict = GCPUtils.params_to_gcp_dict(params, 'forwarding_rule_name') + if 'target' in gcp_dict: + gcp_dict['target'] = '%s/global/targetHttpProxies/%s' % (url, + gcp_dict['target']) + if 'address' in gcp_dict: + gcp_dict['IPAddress'] = '%s/global/addresses/%s' % (url, + gcp_dict['address']) + del gcp_dict['address'] + if 'protocol' in gcp_dict: + gcp_dict['IPProtocol'] = gcp_dict['protocol'] + del gcp_dict['protocol'] + return gcp_dict + + +def get_global_forwarding_rule(client, name, project_id=None): + """ + Get a Global Forwarding Rule from GCP. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Global Forwarding Rule. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: A dict resp from the respective GCP 'get' request. + :rtype: ``dict`` + """ + try: + req = client.globalForwardingRules().get( + project=project_id, forwardingRule=name) + return GCPUtils.execute_api_client_req(req, raise_404=False) + except Exception: + raise + + +def create_global_forwarding_rule(client, params, project_id): + """ + Create a new Global Forwarding Rule. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_global_forwarding_rule_dict(params, project_id) + try: + req = client.globalForwardingRules().insert(project=project_id, body=gcp_dict) + return_data = GCPUtils.execute_api_client_req(req, client, raw=False) + if not return_data: + return_data = get_global_forwarding_rule(client, + name=params['forwarding_rule_name'], + project_id=project_id) + return (True, return_data) + except Exception: + raise + + +def delete_global_forwarding_rule(client, name, project_id): + """ + Delete a Global Forwarding Rule. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Target Proxy. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + try: + req = client.globalForwardingRules().delete( + project=project_id, forwardingRule=name) + return_data = GCPUtils.execute_api_client_req(req, client) + return (True, return_data) + except Exception: + raise + + +def update_global_forwarding_rule(client, forwarding_rule, params, name, project_id): + """ + Update a Global Forwarding_Rule. Currently, only a target can be updated. + + If the forwarding_rule has not changed, the update will not occur. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param forwarding_rule: Name of the Target Proxy. + :type forwarding_rule: ``dict`` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :param name: Name of the Global Forwarding Rule. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_global_forwarding_rule_dict(params, project_id) + + GCPUtils.are_params_equal(forwarding_rule, gcp_dict) + if forwarding_rule['target'] == gcp_dict['target']: + return (False, 'no update necessary') + + try: + req = client.globalForwardingRules().setTarget(project=project_id, + forwardingRule=name, + body={'target': gcp_dict['target']}) + return_data = GCPUtils.execute_api_client_req( + req, client=client, raw=False) + return (True, return_data) + except Exception: + raise + + +def main(): + module = AnsibleModule(argument_spec=dict( + forwarding_rule_name=dict(required=True), + region=dict(required=True), + target=dict(required=False), + address=dict(type='str', required=False), + protocol=dict(required=False, default='TCP', choices=['TCP']), + port_range=dict(required=False), + load_balancing_scheme=dict( + required=False, default='EXTERNAL', choices=['EXTERNAL']), + state=dict(required=True, choices=['absent', 'present']), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(), + credentials_file=dict(), + project_id=dict(), ), ) + + client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT, + user_agent_version=USER_AGENT_VERSION) + + params = {} + params['state'] = module.params.get('state') + params['forwarding_rule_name'] = module.params.get('forwarding_rule_name') + params['region'] = module.params.get('region') + params['target'] = module.params.get('target', None) + params['protocol'] = module.params.get('protocol', None) + params['port_range'] = module.params.get('port_range') + if module.params.get('address', None): + params['address'] = module.params.get('address', None) + + if params['region'] != 'global': + # This module currently doesn't support regional rules. + module.fail_json( + msg=("%s - Only global forwarding rules currently supported. " + "Be sure to specify 'global' for the region option.") % + (params['forwarding_rule_name'])) + + changed = False + json_output = {'state': params['state']} + forwarding_rule = None + if params['region'] == 'global': + forwarding_rule = get_global_forwarding_rule(client, + name=params['forwarding_rule_name'], + project_id=conn_params['project_id']) + if not forwarding_rule: + if params['state'] == 'absent': + # Doesn't exist in GCE, and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown forwarding_rule: %s" % + (params['forwarding_rule_name'])) + else: + # Create + changed, json_output['forwarding_rule'] = create_global_forwarding_rule(client, + params=params, + project_id=conn_params['project_id']) + elif params['state'] == 'absent': + # Delete + changed, json_output['forwarding_rule'] = delete_global_forwarding_rule(client, + name=params['forwarding_rule_name'], + project_id=conn_params['project_id']) + else: + changed, json_output['forwarding_rule'] = update_global_forwarding_rule(client, + forwarding_rule=forwarding_rule, + params=params, + name=params['forwarding_rule_name'], + project_id=conn_params['project_id']) + + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcp_healthcheck.py b/plugins/modules/cloud/google/gcp_healthcheck.py new file mode 100644 index 0000000000..ddde17f37b --- /dev/null +++ b/plugins/modules/cloud/google/gcp_healthcheck.py @@ -0,0 +1,449 @@ +#!/usr/bin/python +# Copyright 2017 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gcp_healthcheck +short_description: Create, Update or Destroy a Healthcheck. +description: + - Create, Update or Destroy a Healthcheck. Currently only HTTP and + HTTPS Healthchecks are supported. Healthchecks are used to monitor + individual instances, managed instance groups and/or backend + services. Healtchecks are reusable. + - Visit + U(https://cloud.google.com/compute/docs/load-balancing/health-checks) + for an overview of Healthchecks on GCP. + - See + U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for + API details on HTTP Healthchecks. + - See + U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks) + for more details on the HTTPS Healtcheck API. +requirements: + - "python >= 2.6" + - "google-api-python-client >= 1.6.2" + - "google-auth >= 0.9.0" + - "google-auth-httplib2 >= 0.0.2" +notes: + - Only supports HTTP and HTTPS Healthchecks currently. +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: > + Use M(gcp_compute_health_check), M(gcp_compute_http_health_check) or + M(gcp_compute_https_health_check) instead. +author: + - "Tom Melendez (@supertom) " +options: + check_interval: + description: + - How often (in seconds) to send a health check. + default: 5 + healthcheck_name: + description: + - Name of the Healthcheck. + required: true + healthcheck_type: + description: + - Type of Healthcheck. + required: true + choices: ["HTTP", "HTTPS"] + host_header: + description: + - The value of the host header in the health check request. If left + empty, the public IP on behalf of which this health + check is performed will be used. + required: true + default: "" + port: + description: + - The TCP port number for the health check request. The default value is + 443 for HTTPS and 80 for HTTP. + request_path: + description: + - The request path of the HTTPS health check request. + required: false + default: "/" + state: + description: State of the Healthcheck. + required: true + choices: ["present", "absent"] + timeout: + description: + - How long (in seconds) to wait for a response before claiming + failure. It is invalid for timeout + to have a greater value than check_interval. + default: 5 + unhealthy_threshold: + description: + - A so-far healthy instance will be marked unhealthy after this + many consecutive failures. + default: 2 + healthy_threshold: + description: + - A so-far unhealthy instance will be marked healthy after this + many consecutive successes. + default: 2 + service_account_email: + description: + - service account email + service_account_permissions: + description: + - service account permissions (see + U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), + --scopes section for detailed information) + choices: [ + "bigquery", "cloud-platform", "compute-ro", "compute-rw", + "useraccounts-ro", "useraccounts-rw", "datastore", "logging-write", + "monitoring", "sql-admin", "storage-full", "storage-ro", + "storage-rw", "taskqueue", "userinfo-email" + ] + credentials_file: + description: + - Path to the JSON file associated with the service account email + project_id: + description: + - Your GCP project ID +''' + +EXAMPLES = ''' +- name: Create Minimum HealthCheck + gcp_healthcheck: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + healthcheck_name: my-healthcheck + healthcheck_type: HTTP + state: present +- name: Create HTTP HealthCheck + gcp_healthcheck: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + healthcheck_name: my-healthcheck + healthcheck_type: HTTP + host: my-host + request_path: /hc + check_interval: 10 + timeout: 30 + unhealthy_threshhold: 2 + healthy_threshhold: 1 + state: present +- name: Create HTTPS HealthCheck + gcp_healthcheck: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + healthcheck_name: "{{ https_healthcheck }}" + healthcheck_type: HTTPS + host_header: my-host + request_path: /hc + check_interval: 5 + timeout: 5 + unhealthy_threshold: 2 + healthy_threshold: 1 + state: present +''' + +RETURN = ''' +state: + description: state of the Healthcheck + returned: Always. + type: str + sample: present +healthcheck_name: + description: Name of the Healthcheck + returned: Always + type: str + sample: my-url-map +healthcheck_type: + description: Type of the Healthcheck + returned: Always + type: str + sample: HTTP +healthcheck: + description: GCP Healthcheck dictionary + returned: Always. Refer to GCP documentation for detailed field descriptions. + type: dict + sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils + + +USER_AGENT_PRODUCT = 'ansible-healthcheck' +USER_AGENT_VERSION = '0.0.1' + + +def _validate_healthcheck_params(params): + """ + Validate healthcheck params. + + Simple validation has already assumed by AnsibleModule. + + :param params: Ansible dictionary containing configuration. + :type params: ``dict`` + + :return: True or raises ValueError + :rtype: ``bool`` or `class:ValueError` + """ + if params['timeout'] > params['check_interval']: + raise ValueError("timeout (%s) is greater than check_interval (%s)" % ( + params['timeout'], params['check_interval'])) + + return (True, '') + + +def _build_healthcheck_dict(params): + """ + Reformat services in Ansible Params for GCP. + + :param params: Params from AnsibleModule object + :type params: ``dict`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: dictionary suitable for submission to GCP + HealthCheck (HTTP/HTTPS) API. + :rtype ``dict`` + """ + gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name') + if 'timeout' in gcp_dict: + gcp_dict['timeoutSec'] = gcp_dict['timeout'] + del gcp_dict['timeout'] + + if 'checkInterval' in gcp_dict: + gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval'] + del gcp_dict['checkInterval'] + + if 'hostHeader' in gcp_dict: + gcp_dict['host'] = gcp_dict['hostHeader'] + del gcp_dict['hostHeader'] + + if 'healthcheckType' in gcp_dict: + del gcp_dict['healthcheckType'] + return gcp_dict + + +def _get_req_resource(client, resource_type): + if resource_type == 'HTTPS': + return (client.httpsHealthChecks(), 'httpsHealthCheck') + else: + return (client.httpHealthChecks(), 'httpHealthCheck') + + +def get_healthcheck(client, name, project_id=None, resource_type='HTTP'): + """ + Get a Healthcheck from GCP. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Url Map. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: A dict resp from the respective GCP 'get' request. + :rtype: ``dict`` + """ + try: + resource, entity_name = _get_req_resource(client, resource_type) + args = {'project': project_id, entity_name: name} + req = resource.get(**args) + return GCPUtils.execute_api_client_req(req, raise_404=False) + except Exception: + raise + + +def create_healthcheck(client, params, project_id, resource_type='HTTP'): + """ + Create a new Healthcheck. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_healthcheck_dict(params) + try: + resource, _ = _get_req_resource(client, resource_type) + args = {'project': project_id, 'body': gcp_dict} + req = resource.insert(**args) + return_data = GCPUtils.execute_api_client_req(req, client, raw=False) + if not return_data: + return_data = get_healthcheck(client, + name=params['healthcheck_name'], + project_id=project_id) + return (True, return_data) + except Exception: + raise + + +def delete_healthcheck(client, name, project_id, resource_type='HTTP'): + """ + Delete a Healthcheck. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Url Map. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + try: + resource, entity_name = _get_req_resource(client, resource_type) + args = {'project': project_id, entity_name: name} + req = resource.delete(**args) + return_data = GCPUtils.execute_api_client_req(req, client) + return (True, return_data) + except Exception: + raise + + +def update_healthcheck(client, healthcheck, params, name, project_id, + resource_type='HTTP'): + """ + Update a Healthcheck. + + If the healthcheck has not changed, the update will not occur. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param healthcheck: Name of the Url Map. + :type healthcheck: ``dict`` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :param name: Name of the Url Map. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_healthcheck_dict(params) + ans = GCPUtils.are_params_equal(healthcheck, gcp_dict) + if ans: + return (False, 'no update necessary') + + try: + resource, entity_name = _get_req_resource(client, resource_type) + args = {'project': project_id, entity_name: name, 'body': gcp_dict} + req = resource.update(**args) + return_data = GCPUtils.execute_api_client_req( + req, client=client, raw=False) + return (True, return_data) + except Exception: + raise + + +def main(): + module = AnsibleModule(argument_spec=dict( + healthcheck_name=dict(required=True), + healthcheck_type=dict(required=True, + choices=['HTTP', 'HTTPS']), + request_path=dict(required=False, default='/'), + check_interval=dict(required=False, type='int', default=5), + healthy_threshold=dict(required=False, type='int', default=2), + unhealthy_threshold=dict(required=False, type='int', default=2), + host_header=dict(required=False, type='str', default=''), + timeout=dict(required=False, type='int', default=5), + port=dict(required=False, type='int'), + state=dict(choices=['absent', 'present'], default='present'), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + credentials_file=dict(), + project_id=dict(), ), ) + + client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT, + user_agent_version=USER_AGENT_VERSION) + + params = {} + + params['healthcheck_name'] = module.params.get('healthcheck_name') + params['healthcheck_type'] = module.params.get('healthcheck_type') + params['request_path'] = module.params.get('request_path') + params['check_interval'] = module.params.get('check_interval') + params['healthy_threshold'] = module.params.get('healthy_threshold') + params['unhealthy_threshold'] = module.params.get('unhealthy_threshold') + params['host_header'] = module.params.get('host_header') + params['timeout'] = module.params.get('timeout') + params['port'] = module.params.get('port', None) + params['state'] = module.params.get('state') + + if not params['port']: + params['port'] = 80 + if params['healthcheck_type'] == 'HTTPS': + params['port'] = 443 + try: + _validate_healthcheck_params(params) + except Exception as e: + module.fail_json(msg=e.message, changed=False) + + changed = False + json_output = {'state': params['state']} + healthcheck = get_healthcheck(client, + name=params['healthcheck_name'], + project_id=conn_params['project_id'], + resource_type=params['healthcheck_type']) + + if not healthcheck: + if params['state'] == 'absent': + # Doesn't exist in GCE, and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown healthcheck: %s" % + (params['healthcheck_name'])) + else: + # Create + changed, json_output['healthcheck'] = create_healthcheck(client, + params=params, + project_id=conn_params['project_id'], + resource_type=params['healthcheck_type']) + elif params['state'] == 'absent': + # Delete + changed, json_output['healthcheck'] = delete_healthcheck(client, + name=params['healthcheck_name'], + project_id=conn_params['project_id'], + resource_type=params['healthcheck_type']) + else: + changed, json_output['healthcheck'] = update_healthcheck(client, + healthcheck=healthcheck, + params=params, + name=params['healthcheck_name'], + project_id=conn_params['project_id'], + resource_type=params['healthcheck_type']) + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcp_iam_role_facts.py b/plugins/modules/cloud/google/gcp_iam_role_facts.py new file mode 120000 index 0000000000..f46a8e3ec7 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_iam_role_facts.py @@ -0,0 +1 @@ +gcp_iam_role_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_iam_role_info.py b/plugins/modules/cloud/google/gcp_iam_role_info.py new file mode 100644 index 0000000000..9e7682e6e0 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_iam_role_info.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_iam_role_info +description: +- Gather info for GCP Role +short_description: Gather info for GCP Role +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a role + gcp_iam_role_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the role. + returned: success + type: str + title: + description: + - A human-readable title for the role. Typically this is limited to 100 UTF-8 + bytes. + returned: success + type: str + description: + description: + - Human-readable description for the role. + returned: success + type: str + includedPermissions: + description: + - Names of permissions this role grants when bound in an IAM policy. + returned: success + type: list + stage: + description: + - The current launch stage of the role. + returned: success + type: str + deleted: + description: + - The current deleted state of the role. + returned: success + type: bool +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/iam'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://iam.googleapis.com/v1/projects/{project}/roles".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'iam') + return auth.list(link, return_if_object, array_name='roles') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_iam_service_account_facts.py b/plugins/modules/cloud/google/gcp_iam_service_account_facts.py new file mode 120000 index 0000000000..3f03024049 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_iam_service_account_facts.py @@ -0,0 +1 @@ +gcp_iam_service_account_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_iam_service_account_info.py b/plugins/modules/cloud/google/gcp_iam_service_account_info.py new file mode 100644 index 0000000000..a1cc91623a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_iam_service_account_info.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_iam_service_account_info +description: +- Gather info for GCP ServiceAccount +short_description: Gather info for GCP ServiceAccount +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a service account + gcp_iam_service_account_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The name of the service account. + returned: success + type: str + projectId: + description: + - Id of the project that owns the service account. + returned: success + type: str + uniqueId: + description: + - Unique and stable id of the service account. + returned: success + type: str + email: + description: + - Email address of the service account. + returned: success + type: str + displayName: + description: + - User specified description of service account. + returned: success + type: str + oauth2ClientId: + description: + - OAuth2 client id for the service account. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/iam'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://iam.googleapis.com/v1/projects/{project}/serviceAccounts".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'iam') + return auth.list(link, return_if_object, array_name='accounts') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_pubsub_subscription_facts.py b/plugins/modules/cloud/google/gcp_pubsub_subscription_facts.py new file mode 120000 index 0000000000..3196ae083b --- /dev/null +++ b/plugins/modules/cloud/google/gcp_pubsub_subscription_facts.py @@ -0,0 +1 @@ +gcp_pubsub_subscription_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_pubsub_subscription_info.py b/plugins/modules/cloud/google/gcp_pubsub_subscription_info.py new file mode 100644 index 0000000000..a3355cd7ef --- /dev/null +++ b/plugins/modules/cloud/google/gcp_pubsub_subscription_info.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_pubsub_subscription_info +description: +- Gather info for GCP Subscription +short_description: Gather info for GCP Subscription +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a subscription + gcp_pubsub_subscription_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Name of the subscription. + returned: success + type: str + topic: + description: + - A reference to a Topic resource. + returned: success + type: dict + labels: + description: + - A set of key/value label pairs to assign to this Subscription. + returned: success + type: dict + pushConfig: + description: + - If push delivery is used with this subscription, this field is used to configure + it. An empty pushConfig signifies that the subscriber will pull and ack messages + using API methods. + returned: success + type: complex + contains: + oidcToken: + description: + - If specified, Pub/Sub will generate and attach an OIDC JWT token as an + Authorization header in the HTTP request for every pushed message. + returned: success + type: complex + contains: + serviceAccountEmail: + description: + - Service account email to be used for generating the OIDC token. + - The caller (for subscriptions.create, subscriptions.patch, and subscriptions.modifyPushConfig + RPCs) must have the iam.serviceAccounts.actAs permission for the service + account. + returned: success + type: str + audience: + description: + - 'Audience to be used when generating OIDC token. The audience claim + identifies the recipients that the JWT is intended for. The audience + value is a single case-sensitive string. Having multiple values (array) + for the audience field is not supported. More info about the OIDC + JWT token audience here: U(https://tools.ietf.org/html/rfc7519#section-4.1.3) + Note: if not specified, the Push endpoint URL will be used.' + returned: success + type: str + pushEndpoint: + description: + - A URL locating the endpoint to which messages should be pushed. + - For example, a Webhook endpoint might use "U(https://example.com/push"). + returned: success + type: str + attributes: + description: + - Endpoint configuration attributes. + - Every endpoint has a set of API supported attributes that can be used + to control different aspects of the message delivery. + - The currently supported attribute is x-goog-version, which you can use + to change the format of the pushed message. This attribute indicates the + version of the data expected by the endpoint. This controls the shape + of the pushed message (i.e., its fields and metadata). The endpoint version + is based on the version of the Pub/Sub API. + - If not present during the subscriptions.create call, it will default to + the version of the API used to make such call. If not present during a + subscriptions.modifyPushConfig call, its value will not be changed. subscriptions.get + calls will always return a valid version, even if the subscription was + created without this attribute. + - 'The possible values for this attribute are: - v1beta1: uses the push + format defined in the v1beta1 Pub/Sub API.' + - "- v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API." + returned: success + type: dict + ackDeadlineSeconds: + description: + - This value is the maximum time after a subscriber receives a message before + the subscriber should acknowledge the message. After message delivery but + before the ack deadline expires and before the message is acknowledged, it + is an outstanding message and will not be delivered again during that time + (on a best-effort basis). + - For pull subscriptions, this value is used as the initial value for the ack + deadline. To override this value for a given message, call subscriptions.modifyAckDeadline + with the corresponding ackId if using pull. The minimum custom deadline you + can specify is 10 seconds. The maximum custom deadline you can specify is + 600 seconds (10 minutes). + - If this parameter is 0, a default value of 10 seconds is used. + - For push delivery, this value is also used to set the request timeout for + the call to the push endpoint. + - If the subscriber never acknowledges the message, the Pub/Sub system will + eventually redeliver the message. + returned: success + type: int + messageRetentionDuration: + description: + - How long to retain unacknowledged messages in the subscription's backlog, + from the moment a message is published. If retainAckedMessages is true, then + this also configures the retention of acknowledged messages, and thus configures + how far back in time a subscriptions.seek can be done. Defaults to 7 days. + Cannot be more than 7 days (`"604800s"`) or less than 10 minutes (`"600s"`). + - 'A duration in seconds with up to nine fractional digits, terminated by ''s''. + Example: `"600.5s"`.' + returned: success + type: str + retainAckedMessages: + description: + - Indicates whether to retain acknowledged messages. If `true`, then messages + are not expunged from the subscription's backlog, even if they are acknowledged, + until they fall out of the messageRetentionDuration window. + returned: success + type: bool + expirationPolicy: + description: + - A policy that specifies the conditions for this subscription's expiration. + - A subscription is considered active as long as any connected subscriber is + successfully consuming messages from the subscription or is issuing operations + on the subscription. If expirationPolicy is not set, a default policy with + ttl of 31 days will be used. If it is set but ttl is "", the resource never + expires. The minimum allowed value for expirationPolicy.ttl is 1 day. + returned: success + type: complex + contains: + ttl: + description: + - Specifies the "time-to-live" duration for an associated resource. The + resource expires if it is not active for a period of ttl. + - If ttl is not set, the associated resource never expires. + - A duration in seconds with up to nine fractional digits, terminated by + 's'. + - Example - "3.5s". + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/subscriptions".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'pubsub') + return auth.list(link, return_if_object, array_name='subscriptions') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_pubsub_topic_facts.py b/plugins/modules/cloud/google/gcp_pubsub_topic_facts.py new file mode 120000 index 0000000000..388ebfc1d1 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_pubsub_topic_facts.py @@ -0,0 +1 @@ +gcp_pubsub_topic_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_pubsub_topic_info.py b/plugins/modules/cloud/google/gcp_pubsub_topic_info.py new file mode 100644 index 0000000000..bc275da861 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_pubsub_topic_info.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_pubsub_topic_info +description: +- Gather info for GCP Topic +short_description: Gather info for GCP Topic +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a topic + gcp_pubsub_topic_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Name of the topic. + returned: success + type: str + kmsKeyName: + description: + - The resource name of the Cloud KMS CryptoKey to be used to protect access + to messages published on this topic. Your project's PubSub service account + (`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) must + have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + - The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` . + returned: success + type: str + labels: + description: + - A set of key/value label pairs to assign to this Topic. + returned: success + type: dict + messageStoragePolicy: + description: + - Policy constraining the set of Google Cloud Platform regions where messages + published to the topic may be stored. If not present, then no constraints + are in effect. + returned: success + type: complex + contains: + allowedPersistenceRegions: + description: + - A list of IDs of GCP regions where messages that are published to the + topic may be persisted in storage. Messages published by publishers running + in non-allowed GCP regions (or running outside of GCP altogether) will + be routed for storage in one of the allowed regions. An empty list means + that no regions are allowed, and is not a valid configuration. + returned: success + type: list +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://pubsub.googleapis.com/v1/projects/{project}/topics".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'pubsub') + return auth.list(link, return_if_object, array_name='topics') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_redis_instance_facts.py b/plugins/modules/cloud/google/gcp_redis_instance_facts.py new file mode 120000 index 0000000000..e6d6fa6db2 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_redis_instance_facts.py @@ -0,0 +1 @@ +gcp_redis_instance_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_redis_instance_info.py b/plugins/modules/cloud/google/gcp_redis_instance_info.py new file mode 100644 index 0000000000..9d09c6db92 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_redis_instance_info.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_redis_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + region: + description: + - The name of the Redis region of the instance. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_redis_instance_info: + region: us-central1 + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + alternativeLocationId: + description: + - Only applicable to STANDARD_HA tier which protects the instance against zonal + failures by provisioning it across two zones. + - If provided, it must be a different zone from the one provided in [locationId]. + returned: success + type: str + authorizedNetwork: + description: + - The full name of the Google Compute Engine network to which the instance is + connected. If left unspecified, the default network will be used. + returned: success + type: str + createTime: + description: + - The time the instance was created in RFC3339 UTC "Zulu" format, accurate to + nanoseconds. + returned: success + type: str + currentLocationId: + description: + - The current zone where the Redis endpoint is placed. + - For Basic Tier instances, this will always be the same as the [locationId] + provided by the user at creation time. For Standard Tier instances, this can + be either [locationId] or [alternativeLocationId] and can change after a failover + event. + returned: success + type: str + displayName: + description: + - An arbitrary and optional user-provided name for the instance. + returned: success + type: str + host: + description: + - Hostname or IP address of the exposed Redis endpoint used by clients to connect + to the service. + returned: success + type: str + labels: + description: + - Resource labels to represent user provided metadata. + returned: success + type: dict + redisConfigs: + description: + - Redis configuration parameters, according to U(http://redis.io/topics/config). + - 'Please check Memorystore documentation for the list of supported parameters: + U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs) + .' + returned: success + type: dict + locationId: + description: + - The zone where the instance will be provisioned. If not provided, the service + will choose a zone for the instance. For STANDARD_HA tier, instances will + be created across two zones for protection against zonal failures. If [alternativeLocationId] + is also provided, it must be different from [locationId]. + returned: success + type: str + name: + description: + - The ID of the instance or a fully qualified identifier for the instance. + returned: success + type: str + memorySizeGb: + description: + - Redis memory size in GiB. + returned: success + type: int + port: + description: + - The port number of the exposed Redis endpoint. + returned: success + type: int + redisVersion: + description: + - 'The version of Redis software. If not provided, latest supported version + will be used. Currently, the supported values are: - REDIS_4_0 for Redis 4.0 + compatibility - REDIS_3_2 for Redis 3.2 compatibility .' + returned: success + type: str + reservedIpRange: + description: + - The CIDR range of internal addresses that are reserved for this instance. + If not provided, the service will choose an unused /29 block, for example, + 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with + existing subnets in an authorized network. + returned: success + type: str + tier: + description: + - 'The service tier of the instance. Must be one of these values: - BASIC: standalone + instance - STANDARD_HA: highly available primary/replica instances .' + returned: success + type: str + region: + description: + - The name of the Redis region of the instance. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(region=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'redis') + return auth.list(link, return_if_object, array_name='instances') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_resourcemanager_project_facts.py b/plugins/modules/cloud/google/gcp_resourcemanager_project_facts.py new file mode 120000 index 0000000000..5766332a37 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_resourcemanager_project_facts.py @@ -0,0 +1 @@ +gcp_resourcemanager_project_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_resourcemanager_project_info.py b/plugins/modules/cloud/google/gcp_resourcemanager_project_info.py new file mode 100644 index 0000000000..8804bfa402 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_resourcemanager_project_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_resourcemanager_project_info +description: +- Gather info for GCP Project +short_description: Gather info for GCP Project +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a project + gcp_resourcemanager_project_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + number: + description: + - Number uniquely identifying the project. + returned: success + type: int + lifecycleState: + description: + - The Project lifecycle state. + returned: success + type: str + name: + description: + - 'The user-assigned display name of the Project. It must be 4 to 30 characters. + Allowed characters are: lowercase and uppercase letters, numbers, hyphen, + single-quote, double-quote, space, and exclamation point.' + returned: success + type: str + createTime: + description: + - Time of creation. + returned: success + type: str + labels: + description: + - The labels associated with this Project. + - 'Label keys must be between 1 and 63 characters long and must conform to the + following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.' + - Label values must be between 0 and 63 characters long and must conform to + the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + - No more than 256 labels can be associated with a given resource. + - Clients should store labels in a representation such as JSON that does not + depend on specific characters being disallowed . + returned: success + type: dict + parent: + description: + - A parent organization. + returned: success + type: complex + contains: + type: + description: + - Must be organization. + returned: success + type: str + id: + description: + - Id of the organization. + returned: success + type: str + id: + description: + - The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase + letters, digits, or hyphens. It must start with a letter. + - Trailing hyphens are prohibited. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://cloudresourcemanager.googleapis.com/v1/projects".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'resourcemanager') + return auth.list(link, return_if_object, array_name='projects') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_sourcerepo_repository_facts.py b/plugins/modules/cloud/google/gcp_sourcerepo_repository_facts.py new file mode 120000 index 0000000000..b6dc6a8d75 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sourcerepo_repository_facts.py @@ -0,0 +1 @@ +gcp_sourcerepo_repository_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_sourcerepo_repository_info.py b/plugins/modules/cloud/google/gcp_sourcerepo_repository_info.py new file mode 100644 index 0000000000..42d210f134 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sourcerepo_repository_info.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sourcerepo_repository_info +description: +- Gather info for GCP Repository +short_description: Gather info for GCP Repository +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a repository + gcp_sourcerepo_repository_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - Resource name of the repository, of the form projects/{{project}}/repos/{{repo}}. + - The repo name may contain slashes. eg, projects/myproject/repos/name/with/slash + . + returned: success + type: str + url: + description: + - URL to clone the repository from Google Cloud Source Repositories. + returned: success + type: str + size: + description: + - The disk usage of the repo, in bytes. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://sourcerepo.googleapis.com/v1/projects/{project}/repos".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sourcerepo') + return auth.list(link, return_if_object, array_name='repos') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_spanner_database_facts.py b/plugins/modules/cloud/google/gcp_spanner_database_facts.py new file mode 120000 index 0000000000..abadc34b30 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_spanner_database_facts.py @@ -0,0 +1 @@ +gcp_spanner_database_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_spanner_database_info.py b/plugins/modules/cloud/google/gcp_spanner_database_info.py new file mode 100644 index 0000000000..f7fd9d53bb --- /dev/null +++ b/plugins/modules/cloud/google/gcp_spanner_database_info.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_spanner_database_info +description: +- Gather info for GCP Database +short_description: Gather info for GCP Database +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + instance: + description: + - The instance to create the database on. + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_spanner_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a database + gcp_spanner_database_info: + instance: "{{ instance }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - A unique identifier for the database, which cannot be changed after the instance + is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9]. + returned: success + type: str + extraStatements: + description: + - 'An optional list of DDL statements to run inside the newly created database. + Statements can create tables, indexes, etc. These statements execute atomically + with the creation of the database: if there is an error in any statement, + the database is not created.' + returned: success + type: list + instance: + description: + - The instance to create the database on. + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'spanner') + return auth.list(link, return_if_object, array_name='databases') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_spanner_instance_facts.py b/plugins/modules/cloud/google/gcp_spanner_instance_facts.py new file mode 120000 index 0000000000..1c1e47220a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_spanner_instance_facts.py @@ -0,0 +1 @@ +gcp_spanner_instance_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_spanner_instance_info.py b/plugins/modules/cloud/google/gcp_spanner_instance_info.py new file mode 100644 index 0000000000..b2441bcf76 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_spanner_instance_info.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_spanner_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_spanner_instance_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - A unique identifier for the instance, which cannot be changed after the instance + is created. The name must be between 6 and 30 characters in length. + returned: success + type: str + config: + description: + - The name of the instance's configuration (similar but not quite the same as + a region) which defines defines the geographic placement and replication of + your databases in this instance. It determines where your data is stored. + Values are typically of the form `regional-europe-west1` , `us-central` etc. + - In order to obtain a valid list please consult the [Configuration section + of the docs](U(https://cloud.google.com/spanner/docs/instances)). + returned: success + type: str + displayName: + description: + - The descriptive name for this instance as it appears in UIs. Must be unique + per project and between 4 and 30 characters in length. + returned: success + type: str + nodeCount: + description: + - The number of nodes allocated to this instance. + returned: success + type: int + labels: + description: + - 'An object containing a list of "key": value pairs.' + - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' + returned: success + type: dict +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://spanner.googleapis.com/v1/projects/{project}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'spanner') + return auth.list(link, return_if_object, array_name='instances') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_sql_database_facts.py b/plugins/modules/cloud/google/gcp_sql_database_facts.py new file mode 120000 index 0000000000..de080a71eb --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sql_database_facts.py @@ -0,0 +1 @@ +gcp_sql_database_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_sql_database_info.py b/plugins/modules/cloud/google/gcp_sql_database_info.py new file mode 100644 index 0000000000..f1b212504a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sql_database_info.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_database_info +description: +- Gather info for GCP Database +short_description: Gather info for GCP Database +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a database + gcp_sql_database_info: + instance: "{{ instance.name }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + charset: + description: + - The charset value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Character Set Support](U(https://www.postgresql.org/docs/9.6/static/multibyte.html)) + for more details and supported values. Postgres databases only support a value + of `UTF8` at creation time. + returned: success + type: str + collation: + description: + - The collation value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html)) + and Postgres' [Collation Support](U(https://www.postgresql.org/docs/9.6/static/collation.html)) + for more details and supported values. Postgres databases only support a value + of `en_US.UTF8` at creation time. + returned: success + type: str + name: + description: + - The name of the database in the Cloud SQL instance. + - This does not include the project ID or instance name. + returned: success + type: str + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(instance=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sql') + return auth.list(link, return_if_object, array_name='items') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_sql_instance_facts.py b/plugins/modules/cloud/google/gcp_sql_instance_facts.py new file mode 120000 index 0000000000..c6c2c5f386 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sql_instance_facts.py @@ -0,0 +1 @@ +gcp_sql_instance_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_sql_instance_info.py b/plugins/modules/cloud/google/gcp_sql_instance_info.py new file mode 100644 index 0000000000..e3d125fabd --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sql_instance_info.py @@ -0,0 +1,470 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_instance_info +description: +- Gather info for GCP Instance +short_description: Gather info for GCP Instance +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on an instance + gcp_sql_instance_info: + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + backendType: + description: + - "* FIRST_GEN: First Generation instance. MySQL only." + - "* SECOND_GEN: Second Generation instance or PostgreSQL instance." + - "* EXTERNAL: A database server that is not managed by Google." + returned: success + type: str + connectionName: + description: + - Connection name of the Cloud SQL instance used in connection strings. + returned: success + type: str + databaseVersion: + description: + - The database engine type and version. For First Generation instances, can + be MYSQL_5_5, or MYSQL_5_6. For Second Generation instances, can be MYSQL_5_6 + or MYSQL_5_7. Defaults to MYSQL_5_6. + - 'PostgreSQL instances: POSTGRES_9_6 The databaseVersion property can not be + changed after instance creation.' + returned: success + type: str + failoverReplica: + description: + - The name and status of the failover replica. This property is applicable only + to Second Generation instances. + returned: success + type: complex + contains: + available: + description: + - The availability status of the failover replica. A false status indicates + that the failover replica is out of sync. The master can only failover + to the failover replica when the status is true. + returned: success + type: bool + name: + description: + - The name of the failover replica. If specified at instance creation, a + failover replica is created for the instance. The name doesn't include + the project ID. This property is applicable only to Second Generation + instances. + returned: success + type: str + instanceType: + description: + - The instance type. This can be one of the following. + - "* CLOUD_SQL_INSTANCE: A Cloud SQL instance that is not replicating from a + master." + - "* ON_PREMISES_INSTANCE: An instance running on the customer's premises." + - "* READ_REPLICA_INSTANCE: A Cloud SQL instance configured as a read-replica." + returned: success + type: str + ipAddresses: + description: + - The assigned IP addresses for the instance. + returned: success + type: complex + contains: + ipAddress: + description: + - The IP address assigned. + returned: success + type: str + timeToRetire: + description: + - The due time for this IP to be retired in RFC 3339 format, for example + 2012-11-15T16:19:00.094Z. This field is only available when the IP is + scheduled to be retired. + returned: success + type: str + type: + description: + - The type of this IP address. A PRIMARY address is an address that can + accept incoming connections. An OUTGOING address is the source address + of connections originating from the instance, if supported. + returned: success + type: str + ipv6Address: + description: + - The IPv6 address assigned to the instance. This property is applicable only + to First Generation instances. + returned: success + type: str + masterInstanceName: + description: + - The name of the instance which will act as master in the replication setup. + returned: success + type: str + maxDiskSize: + description: + - The maximum disk size of the instance in bytes. + returned: success + type: int + name: + description: + - Name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: str + region: + description: + - The geographical region. Defaults to us-central or us-central1 depending on + the instance type (First Generation or Second Generation/PostgreSQL). + returned: success + type: str + replicaConfiguration: + description: + - Configuration specific to failover replicas and read replicas. + returned: success + type: complex + contains: + failoverTarget: + description: + - Specifies if the replica is the failover target. If the field is set to + true the replica will be designated as a failover replica. + - In case the master instance fails, the replica instance will be promoted + as the new master instance. + - Only one replica can be specified as failover target, and the replica + has to be in different zone with the master instance. + returned: success + type: bool + mysqlReplicaConfiguration: + description: + - MySQL specific configuration when replicating from a MySQL on-premises + master. Replication configuration information such as the username, password, + certificates, and keys are not stored in the instance metadata. The configuration + information is used only to set up the replication connection and is stored + by MySQL in a file named master.info in the data directory. + returned: success + type: complex + contains: + caCertificate: + description: + - PEM representation of the trusted CA's x509 certificate. + returned: success + type: str + clientCertificate: + description: + - PEM representation of the slave's x509 certificate . + returned: success + type: str + clientKey: + description: + - PEM representation of the slave's private key. The corresponding public + key is encoded in the client's certificate. + returned: success + type: str + connectRetryInterval: + description: + - Seconds to wait between connect retries. MySQL's default is 60 seconds. + returned: success + type: int + dumpFilePath: + description: + - Path to a SQL dump file in Google Cloud Storage from which the slave + instance is to be created. The URI is in the form gs://bucketName/fileName. + Compressed gzip files (.gz) are also supported. Dumps should have + the binlog coordinates from which replication should begin. This can + be accomplished by setting --master-data to 1 when using mysqldump. + returned: success + type: str + masterHeartbeatPeriod: + description: + - Interval in milliseconds between replication heartbeats. + returned: success + type: int + password: + description: + - The password for the replication connection. + returned: success + type: str + sslCipher: + description: + - A list of permissible ciphers to use for SSL encryption. + returned: success + type: str + username: + description: + - The username for the replication connection. + returned: success + type: str + verifyServerCertificate: + description: + - Whether or not to check the master's Common Name value in the certificate + that it sends during the SSL handshake. + returned: success + type: bool + replicaNames: + description: + - The replicas of the instance. + returned: success + type: list + serviceAccountEmailAddress: + description: + - The service account email address assigned to the instance. This property + is applicable only to Second Generation instances. + returned: success + type: str + settings: + description: + - The user settings. + returned: success + type: complex + contains: + databaseFlags: + description: + - The database flags passed to the instance at startup. + returned: success + type: complex + contains: + name: + description: + - The name of the flag. These flags are passed at instance startup, + so include both server options and system variables for MySQL. Flags + should be specified with underscores, not hyphens. + returned: success + type: str + value: + description: + - The value of the flag. Booleans should be set to on for true and off + for false. This field must be omitted if the flag doesn't take a value. + returned: success + type: str + ipConfiguration: + description: + - The settings for IP Management. This allows to enable or disable the instance + IP and manage which external networks can connect to the instance. The + IPv4 address cannot be disabled for Second Generation instances. + returned: success + type: complex + contains: + ipv4Enabled: + description: + - Whether the instance should be assigned an IP address or not. + returned: success + type: bool + authorizedNetworks: + description: + - The list of external networks that are allowed to connect to the instance + using the IP. In CIDR notation, also known as 'slash' notation (e.g. + 192.168.100.0/24). + returned: success + type: complex + contains: + expirationTime: + description: + - The time when this access control entry expires in RFC 3339 format, + for example 2012-11-15T16:19:00.094Z. + returned: success + type: str + name: + description: + - An optional label to identify this entry. + returned: success + type: str + value: + description: + - The whitelisted value for the access control list. For example, + to grant access to a client from an external IP (IPv4 or IPv6) + address or subnet, use that address or subnet here. + returned: success + type: str + requireSsl: + description: + - Whether the mysqld should default to 'REQUIRE X509' for users connecting + over IP. + returned: success + type: bool + tier: + description: + - The tier or machine type for this instance, for example db-n1-standard-1. + For MySQL instances, this field determines whether the instance is Second + Generation (recommended) or First Generation. + returned: success + type: str + availabilityType: + description: + - The availabilityType define if your postgres instance is run zonal or + regional. + returned: success + type: str + backupConfiguration: + description: + - The daily backup configuration for the instance. + returned: success + type: complex + contains: + enabled: + description: + - Enable Autobackup for your instance. + returned: success + type: bool + binaryLogEnabled: + description: + - Whether binary log is enabled. If backup configuration is disabled, + binary log must be disabled as well. MySQL only. + returned: success + type: bool + startTime: + description: + - Define the backup start time in UTC (HH:MM) . + returned: success + type: str + settingsVersion: + description: + - The version of instance settings. This is a required field for update + method to make sure concurrent updates are handled properly. During update, + use the most recent settingsVersion value for this instance and do not + try to update this value. + returned: success + type: int +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict()) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sql') + return auth.list(link, return_if_object, array_name='items') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_sql_user_facts.py b/plugins/modules/cloud/google/gcp_sql_user_facts.py new file mode 120000 index 0000000000..44488004a4 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sql_user_facts.py @@ -0,0 +1 @@ +gcp_sql_user_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_sql_user_info.py b/plugins/modules/cloud/google/gcp_sql_user_info.py new file mode 100644 index 0000000000..a1cacdfad8 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_sql_user_info.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_sql_user_info +description: +- Gather info for GCP User +short_description: Gather info for GCP User +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + - 'This field represents a link to a Instance resource in GCP. It can be specified + in two ways. First, you can place a dictionary with key ''name'' and value of + your resource''s name Alternatively, you can add `register: name-of-resource` + to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource + }}"' + required: true + type: dict + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a user + gcp_sql_user_info: + instance: "{{ instance }}" + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + host: + description: + - The host name from which the user can connect. For insert operations, host + defaults to an empty string. For update operations, host is specified as part + of the request URL. The host name cannot be updated after insertion. + returned: success + type: str + name: + description: + - The name of the user in the Cloud SQL instance. + returned: success + type: str + instance: + description: + - The name of the Cloud SQL instance. This does not include the project ID. + returned: success + type: dict + password: + description: + - The password for the user. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')} + return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users".format(**res) + + +def fetch_list(module, link): + auth = GcpSession(module, 'sql') + return auth.list(link, return_if_object, array_name='items') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_target_proxy.py b/plugins/modules/cloud/google/gcp_target_proxy.py new file mode 100644 index 0000000000..90a401bada --- /dev/null +++ b/plugins/modules/cloud/google/gcp_target_proxy.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# Copyright 2017 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gcp_target_proxy +short_description: Create, Update or Destroy a Target_Proxy. +description: + - Create, Update or Destroy a Target_Proxy. See + U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview. + More details on the Target_Proxy API can be found at + U(https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies#resource-representations). +requirements: + - "python >= 2.6" + - "google-api-python-client >= 1.6.2" + - "google-auth >= 0.9.0" + - "google-auth-httplib2 >= 0.0.2" +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_compute_target_http_proxy) instead. +notes: + - Currently only supports global HTTP proxy. +author: + - "Tom Melendez (@supertom) " +options: + target_proxy_name: + description: + - Name of the Target_Proxy. + required: true + target_proxy_type: + description: + - Type of Target_Proxy. HTTP, HTTPS or SSL. Only HTTP is currently supported. + required: true + url_map_name: + description: + - Name of the Url Map. Required if type is HTTP or HTTPS proxy. + required: false +''' + +EXAMPLES = ''' +- name: Create Minimum HTTP Target_Proxy + gcp_target_proxy: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + target_proxy_name: my-target_proxy + target_proxy_type: HTTP + url_map_name: my-url-map + state: present +''' + +RETURN = ''' +state: + description: state of the Target_Proxy + returned: Always. + type: str + sample: present +updated_target_proxy: + description: True if the target_proxy has been updated. Will not appear on + initial target_proxy creation. + returned: if the target_proxy has been updated. + type: bool + sample: true +target_proxy_name: + description: Name of the Target_Proxy + returned: Always + type: str + sample: my-target-proxy +target_proxy_type: + description: Type of Target_Proxy. One of HTTP, HTTPS or SSL. + returned: Always + type: str + sample: HTTP +target_proxy: + description: GCP Target_Proxy dictionary + returned: Always. Refer to GCP documentation for detailed field descriptions. + type: dict + sample: { "name": "my-target-proxy", "urlMap": "..." } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils + + +USER_AGENT_PRODUCT = 'ansible-target_proxy' +USER_AGENT_VERSION = '0.0.1' + + +def _build_target_proxy_dict(params, project_id=None): + """ + Reformat services in Ansible Params. + + :param params: Params from AnsibleModule object + :type params: ``dict`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: dictionary suitable for submission to GCP UrlMap API. + :rtype ``dict`` + """ + url = '' + if project_id: + url = GCPUtils.build_googleapi_url(project_id) + gcp_dict = GCPUtils.params_to_gcp_dict(params, 'target_proxy_name') + if 'urlMap' in gcp_dict: + gcp_dict['urlMap'] = '%s/global/urlMaps/%s' % (url, + gcp_dict['urlMap']) + return gcp_dict + + +def get_target_http_proxy(client, name, project_id=None): + """ + Get a Target HTTP Proxy from GCP. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Target Proxy. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: A dict resp from the respective GCP 'get' request. + :rtype: ``dict`` + """ + req = client.targetHttpProxies().get(project=project_id, + targetHttpProxy=name) + return GCPUtils.execute_api_client_req(req, raise_404=False) + + +def create_target_http_proxy(client, params, project_id): + """ + Create a new Target_Proxy. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_target_proxy_dict(params, project_id) + try: + req = client.targetHttpProxies().insert(project=project_id, + body=gcp_dict) + return_data = GCPUtils.execute_api_client_req(req, client, raw=False) + if not return_data: + return_data = get_target_http_proxy(client, + name=params['target_proxy_name'], + project_id=project_id) + return (True, return_data) + except Exception: + raise + + +def delete_target_http_proxy(client, name, project_id): + """ + Delete a Target_Proxy. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Target Proxy. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + try: + req = client.targetHttpProxies().delete( + project=project_id, targetHttpProxy=name) + return_data = GCPUtils.execute_api_client_req(req, client) + return (True, return_data) + except Exception: + raise + + +def update_target_http_proxy(client, target_proxy, params, name, project_id): + """ + Update a HTTP Target_Proxy. Currently only the Url Map can be updated. + + If the target_proxy has not changed, the update will not occur. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param target_proxy: Name of the Target Proxy. + :type target_proxy: ``dict`` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :param name: Name of the Target Proxy. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_target_proxy_dict(params, project_id) + + GCPUtils.are_params_equal(target_proxy, gcp_dict) + if target_proxy['urlMap'] == gcp_dict['urlMap']: + return (False, 'no update necessary') + + try: + req = client.targetHttpProxies().setUrlMap(project=project_id, + targetHttpProxy=name, + body={"urlMap": gcp_dict['urlMap']}) + return_data = GCPUtils.execute_api_client_req( + req, client=client, raw=False) + return (True, return_data) + except Exception: + raise + + +def main(): + module = AnsibleModule(argument_spec=dict( + target_proxy_name=dict(required=True), + target_proxy_type=dict(required=True, choices=['HTTP']), + url_map_name=dict(required=False), + state=dict(required=True, choices=['absent', 'present']), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(), + credentials_file=dict(), + project_id=dict(), ), ) + + client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT, + user_agent_version=USER_AGENT_VERSION) + + params = {} + params['state'] = module.params.get('state') + params['target_proxy_name'] = module.params.get('target_proxy_name') + params['target_proxy_type'] = module.params.get('target_proxy_type') + params['url_map'] = module.params.get('url_map_name', None) + + changed = False + json_output = {'state': params['state']} + target_proxy = get_target_http_proxy(client, + name=params['target_proxy_name'], + project_id=conn_params['project_id']) + + if not target_proxy: + if params['state'] == 'absent': + # Doesn't exist in GCE, and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown target_proxy: %s" % + (params['target_proxy_name'])) + else: + # Create + changed, json_output['target_proxy'] = create_target_http_proxy(client, + params=params, + project_id=conn_params['project_id']) + elif params['state'] == 'absent': + # Delete + changed, json_output['target_proxy'] = delete_target_http_proxy(client, + name=params['target_proxy_name'], + project_id=conn_params['project_id']) + else: + changed, json_output['target_proxy'] = update_target_http_proxy(client, + target_proxy=target_proxy, + params=params, + name=params['target_proxy_name'], + project_id=conn_params['project_id']) + json_output['updated_target_proxy'] = changed + + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcp_tpu_node_facts.py b/plugins/modules/cloud/google/gcp_tpu_node_facts.py new file mode 120000 index 0000000000..5e0ba332eb --- /dev/null +++ b/plugins/modules/cloud/google/gcp_tpu_node_facts.py @@ -0,0 +1 @@ +gcp_tpu_node_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcp_tpu_node_info.py b/plugins/modules/cloud/google/gcp_tpu_node_info.py new file mode 100644 index 0000000000..dbaf202029 --- /dev/null +++ b/plugins/modules/cloud/google/gcp_tpu_node_info.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Google +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file at +# https://www.github.com/GoogleCloudPlatform/magic-modules +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcp_tpu_node_info +description: +- Gather info for GCP Node +short_description: Gather info for GCP Node +author: Google Inc. (@googlecloudplatform) +requirements: +- python >= 2.6 +- requests >= 2.18.4 +- google-auth >= 1.3.0 +options: + zone: + description: + - The GCP location for the TPU. + required: true + type: str + project: + description: + - The Google Cloud Platform project to use. + type: str + auth_kind: + description: + - The type of credential used. + type: str + required: true + choices: + - application + - machineaccount + - serviceaccount + service_account_contents: + description: + - The contents of a Service Account JSON file, either in a dictionary or as a + JSON string that represents it. + type: jsonarg + service_account_file: + description: + - The path of a Service Account JSON file if serviceaccount is selected as type. + type: path + service_account_email: + description: + - An optional service account email address if machineaccount is selected and + the user does not wish to use the default email. + type: str + scopes: + description: + - Array of scopes to be used + type: list + env_type: + description: + - Specifies which Ansible environment you're running this module within. + - This should not be set unless you know what you're doing. + - This only alters the User Agent string for any API requests. + type: str +notes: +- for authentication, you can set service_account_file using the C(gcp_service_account_file) + env variable. +- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) + env variable. +- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) + env variable. +- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. +- For authentication, you can set scopes using the C(GCP_SCOPES) env variable. +- Environment variables values will only be used if the playbook values are not set. +- The I(service_account_email) and I(service_account_file) options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: get info on a node + gcp_tpu_node_info: + zone: us-central1-b + project: test_project + auth_kind: serviceaccount + service_account_file: "/tmp/auth.pem" +''' + +RETURN = ''' +resources: + description: List of resources + returned: always + type: complex + contains: + name: + description: + - The immutable name of the TPU. + returned: success + type: str + description: + description: + - The user-supplied description of the TPU. Maximum of 512 characters. + returned: success + type: str + acceleratorType: + description: + - The type of hardware accelerators associated with this node. + returned: success + type: str + tensorflowVersion: + description: + - The version of Tensorflow running in the Node. + returned: success + type: str + network: + description: + - The name of a network to peer the TPU node to. It must be a preexisting Compute + Engine network inside of the project on which this API has been activated. + If none is provided, "default" will be used. + returned: success + type: str + cidrBlock: + description: + - The CIDR block that the TPU node will use when selecting an IP address. This + CIDR block must be a /29 block; the Compute Engine networks API forbids a + smaller block, and using a larger block would be wasteful (a node can only + consume one IP address). + - Errors will occur if the CIDR block has already been used for a currently + existing TPU node, the CIDR block conflicts with any subnetworks in the user's + provided network, or the provided network is peered with another network that + is using that CIDR block. + returned: success + type: str + serviceAccount: + description: + - The service account used to run the tensor flow services within the node. + To share resources, including Google Cloud Storage data, with the Tensorflow + job running in the Node, this account must have permissions to that data. + returned: success + type: str + schedulingConfig: + description: + - Sets the scheduling options for this TPU instance. + returned: success + type: complex + contains: + preemptible: + description: + - Defines whether the TPU instance is preemptible. + returned: success + type: bool + networkEndpoints: + description: + - The network endpoints where TPU workers can be accessed and sent work. + - It is recommended that Tensorflow clients of the node first reach out to the + first (index 0) entry. + returned: success + type: complex + contains: + ipAddress: + description: + - The IP address of this network endpoint. + returned: success + type: str + port: + description: + - The port of this network endpoint. + returned: success + type: int + labels: + description: + - Resource labels to represent user provided metadata. + returned: success + type: dict + zone: + description: + - The GCP location for the TPU. + returned: success + type: str +''' + +################################################################################ +# Imports +################################################################################ +from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest +import json + +################################################################################ +# Main +################################################################################ + + +def main(): + module = GcpModule(argument_spec=dict(zone=dict(required=True, type='str'))) + + if not module.params['scopes']: + module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] + + return_value = {'resources': fetch_list(module, collection(module))} + module.exit_json(**return_value) + + +def collection(module): + return "https://tpu.googleapis.com/v1/projects/{project}/locations/{zone}/nodes".format(**module.params) + + +def fetch_list(module, link): + auth = GcpSession(module, 'tpu') + return auth.list(link, return_if_object, array_name='nodes') + + +def return_if_object(module, response): + # If not found, return nothing. + if response.status_code == 404: + return None + + # If no content, return nothing. + if response.status_code == 204: + return None + + try: + module.raise_for_status(response) + result = response.json() + except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: + module.fail_json(msg="Invalid JSON response with error: %s" % inst) + + if navigate_hash(result, ['error', 'errors']): + module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) + + return result + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/google/gcp_url_map.py b/plugins/modules/cloud/google/gcp_url_map.py new file mode 100644 index 0000000000..c7508ee97a --- /dev/null +++ b/plugins/modules/cloud/google/gcp_url_map.py @@ -0,0 +1,509 @@ +#!/usr/bin/python +# Copyright 2017 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gcp_url_map +short_description: Create, Update or Destroy a Url_Map. +description: + - Create, Update or Destroy a Url_Map. See + U(https://cloud.google.com/compute/docs/load-balancing/http/url-map) for an overview. + More details on the Url_Map API can be found at + U(https://cloud.google.com/compute/docs/reference/latest/urlMaps#resource). +requirements: + - "python >= 2.6" + - "google-api-python-client >= 1.6.2" + - "google-auth >= 0.9.0" + - "google-auth-httplib2 >= 0.0.2" +notes: + - Only supports global Backend Services. + - Url_Map tests are not currently supported. +author: + - "Tom Melendez (@supertom) " +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_compute_url_map) instead. +options: + url_map_name: + description: + - Name of the Url_Map. + required: true + default_service: + description: + - Default Backend Service if no host rules match. + required: true + host_rules: + description: + - The list of HostRules to use against the URL. Contains + a list of hosts and an associated path_matcher. + - The 'hosts' parameter is a list of host patterns to match. They + must be valid hostnames, except * will match any string of + ([a-z0-9-.]*). In that case, * must be the first character + and must be followed in the pattern by either - or .. + - The 'path_matcher' parameter is name of the PathMatcher to use + to match the path portion of the URL if the hostRule matches the URL's + host portion. + required: false + path_matchers: + description: + - The list of named PathMatchers to use against the URL. Contains + path_rules, which is a list of paths and an associated service. A + default_service can also be specified for each path_matcher. + - The 'name' parameter to which this path_matcher is referred by the + host_rule. + - The 'default_service' parameter is the name of the + BackendService resource. This will be used if none of the path_rules + defined by this path_matcher is matched by the URL's path portion. + - The 'path_rules' parameter is a list of dictionaries containing a + list of paths and a service to direct traffic to. Each path item must + start with / and the only place a * is allowed is at the end following + a /. The string fed to the path matcher does not include any text after + the first ? or #, and those chars are not allowed here. + required: false +''' + +EXAMPLES = ''' +- name: Create Minimal Url_Map + gcp_url_map: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + url_map_name: my-url_map + default_service: my-backend-service + state: present +- name: Create UrlMap with pathmatcher + gcp_url_map: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + url_map_name: my-url-map-pm + default_service: default-backend-service + path_matchers: + - name: 'path-matcher-one' + description: 'path matcher one' + default_service: 'bes-pathmatcher-one-default' + path_rules: + - service: 'my-one-bes' + paths: + - '/data' + - '/aboutus' + host_rules: + - hosts: + - '*.' + path_matcher: 'path-matcher-one' + state: "present" +''' + +RETURN = ''' +host_rules: + description: List of HostRules. + returned: If specified. + type: dict + sample: [ { hosts: ["*."], "path_matcher": "my-pm" } ] +path_matchers: + description: The list of named PathMatchers to use against the URL. + returned: If specified. + type: dict + sample: [ { "name": "my-pm", "path_rules": [ { "paths": [ "/data" ] } ], "service": "my-service" } ] +state: + description: state of the Url_Map + returned: Always. + type: str + sample: present +updated_url_map: + description: True if the url_map has been updated. Will not appear on + initial url_map creation. + returned: if the url_map has been updated. + type: bool + sample: true +url_map_name: + description: Name of the Url_Map + returned: Always + type: str + sample: my-url-map +url_map: + description: GCP Url_Map dictionary + returned: Always. Refer to GCP documentation for detailed field descriptions. + type: dict + sample: { "name": "my-url-map", "hostRules": [...], "pathMatchers": [...] } +''' + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils +from ansible.module_utils.six import string_types + + +USER_AGENT_PRODUCT = 'ansible-url_map' +USER_AGENT_VERSION = '0.0.1' + + +def _validate_params(params): + """ + Validate url_map params. + + This function calls _validate_host_rules_params to verify + the host_rules-specific parameters. + + This function calls _validate_path_matchers_params to verify + the path_matchers-specific parameters. + + :param params: Ansible dictionary containing configuration. + :type params: ``dict`` + + :return: True or raises ValueError + :rtype: ``bool`` or `class:ValueError` + """ + fields = [ + {'name': 'default_service', 'type': str, 'required': True}, + {'name': 'host_rules', 'type': list}, + {'name': 'path_matchers', 'type': list}, + ] + try: + check_params(params, fields) + if 'path_matchers' in params and params['path_matchers'] is not None: + _validate_path_matcher_params(params['path_matchers']) + if 'host_rules' in params and params['host_rules'] is not None: + _validate_host_rules_params(params['host_rules']) + except Exception: + raise + + return (True, '') + + +def _validate_path_matcher_params(path_matchers): + """ + Validate configuration for path_matchers. + + :param path_matchers: Ansible dictionary containing path_matchers + configuration (only). + :type path_matchers: ``dict`` + + :return: True or raises ValueError + :rtype: ``bool`` or `class:ValueError` + """ + fields = [ + {'name': 'name', 'type': str, 'required': True}, + {'name': 'default_service', 'type': str, 'required': True}, + {'name': 'path_rules', 'type': list, 'required': True}, + {'name': 'max_rate', 'type': int}, + {'name': 'max_rate_per_instance', 'type': float}, + ] + pr_fields = [ + {'name': 'service', 'type': str, 'required': True}, + {'name': 'paths', 'type': list, 'required': True}, + ] + + if not path_matchers: + raise ValueError(('path_matchers should be a list. %s (%s) provided' + % (path_matchers, type(path_matchers)))) + + for pm in path_matchers: + try: + check_params(pm, fields) + for pr in pm['path_rules']: + check_params(pr, pr_fields) + for path in pr['paths']: + if not path.startswith('/'): + raise ValueError("path for %s must start with /" % ( + pm['name'])) + except Exception: + raise + + return (True, '') + + +def _validate_host_rules_params(host_rules): + """ + Validate configuration for host_rules. + + :param host_rules: Ansible dictionary containing host_rules + configuration (only). + :type host_rules ``dict`` + + :return: True or raises ValueError + :rtype: ``bool`` or `class:ValueError` + """ + fields = [ + {'name': 'path_matcher', 'type': str, 'required': True}, + ] + + if not host_rules: + raise ValueError('host_rules should be a list.') + + for hr in host_rules: + try: + check_params(hr, fields) + for host in hr['hosts']: + if not isinstance(host, string_types): + raise ValueError("host in hostrules must be a string") + elif '*' in host: + if host.index('*') != 0: + raise ValueError("wildcard must be first char in host, %s" % ( + host)) + else: + if host[1] not in ['.', '-', ]: + raise ValueError("wildcard be followed by a '.' or '-', %s" % ( + host)) + + except Exception: + raise + + return (True, '') + + +def _build_path_matchers(path_matcher_list, project_id): + """ + Reformat services in path matchers list. + + Specifically, builds out URLs. + + :param path_matcher_list: The GCP project ID. + :type path_matcher_list: ``list`` of ``dict`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: list suitable for submission to GCP + UrlMap API Path Matchers list. + :rtype ``list`` of ``dict`` + """ + url = '' + if project_id: + url = GCPUtils.build_googleapi_url(project_id) + for pm in path_matcher_list: + if 'defaultService' in pm: + pm['defaultService'] = '%s/global/backendServices/%s' % (url, + pm['defaultService']) + if 'pathRules' in pm: + for rule in pm['pathRules']: + if 'service' in rule: + rule['service'] = '%s/global/backendServices/%s' % (url, + rule['service']) + return path_matcher_list + + +def _build_url_map_dict(params, project_id=None): + """ + Reformat services in Ansible Params. + + :param params: Params from AnsibleModule object + :type params: ``dict`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: dictionary suitable for submission to GCP UrlMap API. + :rtype ``dict`` + """ + url = '' + if project_id: + url = GCPUtils.build_googleapi_url(project_id) + gcp_dict = GCPUtils.params_to_gcp_dict(params, 'url_map_name') + if 'defaultService' in gcp_dict: + gcp_dict['defaultService'] = '%s/global/backendServices/%s' % (url, + gcp_dict['defaultService']) + if 'pathMatchers' in gcp_dict: + gcp_dict['pathMatchers'] = _build_path_matchers(gcp_dict['pathMatchers'], project_id) + + return gcp_dict + + +def get_url_map(client, name, project_id=None): + """ + Get a Url_Map from GCP. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Url Map. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: A dict resp from the respective GCP 'get' request. + :rtype: ``dict`` + """ + try: + req = client.urlMaps().get(project=project_id, urlMap=name) + return GCPUtils.execute_api_client_req(req, raise_404=False) + except Exception: + raise + + +def create_url_map(client, params, project_id): + """ + Create a new Url_Map. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_url_map_dict(params, project_id) + try: + req = client.urlMaps().insert(project=project_id, body=gcp_dict) + return_data = GCPUtils.execute_api_client_req(req, client, raw=False) + if not return_data: + return_data = get_url_map(client, + name=params['url_map_name'], + project_id=project_id) + return (True, return_data) + except Exception: + raise + + +def delete_url_map(client, name, project_id): + """ + Delete a Url_Map. + + :param client: An initialized GCE Compute Discover resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param name: Name of the Url Map. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + try: + req = client.urlMaps().delete(project=project_id, urlMap=name) + return_data = GCPUtils.execute_api_client_req(req, client) + return (True, return_data) + except Exception: + raise + + +def update_url_map(client, url_map, params, name, project_id): + """ + Update a Url_Map. + + If the url_map has not changed, the update will not occur. + + :param client: An initialized GCE Compute Discovery resource. + :type client: :class: `googleapiclient.discovery.Resource` + + :param url_map: Name of the Url Map. + :type url_map: ``dict`` + + :param params: Dictionary of arguments from AnsibleModule. + :type params: ``dict`` + + :param name: Name of the Url Map. + :type name: ``str`` + + :param project_id: The GCP project ID. + :type project_id: ``str`` + + :return: Tuple with changed status and response dict + :rtype: ``tuple`` in the format of (bool, dict) + """ + gcp_dict = _build_url_map_dict(params, project_id) + + ans = GCPUtils.are_params_equal(url_map, gcp_dict) + if ans: + return (False, 'no update necessary') + + gcp_dict['fingerprint'] = url_map['fingerprint'] + try: + req = client.urlMaps().update(project=project_id, + urlMap=name, body=gcp_dict) + return_data = GCPUtils.execute_api_client_req(req, client=client, raw=False) + return (True, return_data) + except Exception: + raise + + +def main(): + module = AnsibleModule(argument_spec=dict( + url_map_name=dict(required=True), + state=dict(choices=['absent', 'present'], default='present'), + default_service=dict(required=True), + path_matchers=dict(type='list', required=False), + host_rules=dict(type='list', required=False), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(), + credentials_file=dict(), + project_id=dict(), ), required_together=[ + ['path_matchers', 'host_rules'], ]) + + client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT, + user_agent_version=USER_AGENT_VERSION) + + params = {} + params['state'] = module.params.get('state') + params['url_map_name'] = module.params.get('url_map_name') + params['default_service'] = module.params.get('default_service') + if module.params.get('path_matchers'): + params['path_matchers'] = module.params.get('path_matchers') + if module.params.get('host_rules'): + params['host_rules'] = module.params.get('host_rules') + + try: + _validate_params(params) + except Exception as e: + module.fail_json(msg=e.message, changed=False) + + changed = False + json_output = {'state': params['state']} + url_map = get_url_map(client, + name=params['url_map_name'], + project_id=conn_params['project_id']) + + if not url_map: + if params['state'] == 'absent': + # Doesn't exist in GCE, and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown url_map: %s" % + (params['url_map_name'])) + else: + # Create + changed, json_output['url_map'] = create_url_map(client, + params=params, + project_id=conn_params['project_id']) + elif params['state'] == 'absent': + # Delete + changed, json_output['url_map'] = delete_url_map(client, + name=params['url_map_name'], + project_id=conn_params['project_id']) + else: + changed, json_output['url_map'] = update_url_map(client, + url_map=url_map, + params=params, + name=params['url_map_name'], + project_id=conn_params['project_id']) + json_output['updated_url_map'] = changed + + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcpubsub.py b/plugins/modules/cloud/google/gcpubsub.py new file mode 100644 index 0000000000..7da8de764c --- /dev/null +++ b/plugins/modules/cloud/google/gcpubsub.py @@ -0,0 +1,332 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcpubsub +short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub +description: + - Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub. + See U(https://cloud.google.com/pubsub/docs) for an overview. +requirements: + - google-auth >= 0.5.0 + - google-cloud-pubsub >= 0.22.0 +notes: + - Subscription pull happens before publish. You cannot publish and pull in the same task. +author: + - Tom Melendez (@supertom) +options: + topic: + description: + - GCP pubsub topic name. + - Only the name, not the full path, is required. + required: yes + subscription: + description: + - Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull. + For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields. + See subfields name, push_endpoint and ack_deadline for more information. + name: + description: Subfield of subscription. Required if subscription is specified. See examples. + ack_deadline: + description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples. + pull: + description: + - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name. + max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately + (bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False. + push_endpoint: + description: + - Subfield of subscription. Not required. If specified, message will be sent to an endpoint. + See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information. + publish: + description: + - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. + Only message is required. + state: + description: + - State of the topic or queue. + - Applies to the most granular resource. + - If subscription isspecified we remove it. + - If only topic is specified, that is what is removed. + - NOTE - A topic can be removed without first removing the subscription. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = ''' +# (Message will be pushed; there is no check to see if the message was pushed before +- name: Create a topic and publish a message to it + gcpubsub: + topic: ansible-topic-example + state: present + +# Subscriptions associated with topic are not deleted. +- name: Delete Topic + gcpubsub: + topic: ansible-topic-example + state: absent + +# Setting absent will keep the messages from being sent +- name: Publish multiple messages, with attributes (key:value available with the message) + gcpubsub: + topic: '{{ topic_name }}' + state: present + publish: + - message: this is message 1 + attributes: + mykey1: myvalue + mykey2: myvalu2 + mykey3: myvalue3 + - message: this is message 2 + attributes: + server: prod + sla: "99.9999" + owner: fred + +- name: Create Subscription (pull) + gcpubsub: + topic: ansible-topic-example + subscription: + - name: mysub + state: present + +# pull is default, ack_deadline is not required +- name: Create Subscription with ack_deadline and push endpoint + gcpubsub: + topic: ansible-topic-example + subscription: + - name: mysub + ack_deadline: "60" + push_endpoint: http://pushendpoint.example.com + state: present + +# Setting push_endpoint to "None" converts subscription to pull. +- name: Subscription change from push to pull + gcpubsub: + topic: ansible-topic-example + subscription: + name: mysub + push_endpoint: "None" + +### Topic will not be deleted +- name: Delete subscription + gcpubsub: + topic: ansible-topic-example + subscription: + - name: mysub + state: absent + +# only pull keyword is required. +- name: Pull messages from subscription + gcpubsub: + topic: ansible-topic-example + subscription: + name: ansible-topic-example-sub + pull: + message_ack: yes + max_messages: "100" +''' + +RETURN = ''' +publish: + description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. + Only message is required. + returned: Only when specified + type: list + sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]" + +pulled_messages: + description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id. + returned: Only when subscription.pull is specified + type: list + sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..] + +state: + description: The state of the topic or subscription. Value will be either 'absent' or 'present'. + returned: Always + type: str + sample: "present" + +subscription: + description: Name of subscription. + returned: When subscription fields are specified + type: str + sample: "mysubscription" + +topic: + description: Name of topic. + returned: Always + type: str + sample: "mytopic" +''' + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + from google.cloud import pubsub + HAS_GOOGLE_CLOUD_PUBSUB = True +except ImportError as e: + HAS_GOOGLE_CLOUD_PUBSUB = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials + + +CLOUD_CLIENT = 'google-cloud-pubsub' +CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0' +CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1' + + +def publish_messages(message_list, topic): + with topic.batch() as batch: + for message in message_list: + msg = message['message'] + attrs = {} + if 'attributes' in message: + attrs = message['attributes'] + batch.publish(bytes(msg), **attrs) + return True + + +def pull_messages(pull_params, sub): + """ + :rtype: tuple (output, changed) + """ + changed = False + max_messages = pull_params.get('max_messages', None) + message_ack = pull_params.get('message_ack', 'no') + return_immediately = pull_params.get('return_immediately', False) + + output = [] + pulled = sub.pull(return_immediately=return_immediately, max_messages=max_messages) + + for ack_id, msg in pulled: + msg_dict = {'message_id': msg.message_id, + 'attributes': msg.attributes, + 'data': msg.data, + 'ack_id': ack_id} + output.append(msg_dict) + + if message_ack: + ack_ids = [m['ack_id'] for m in output] + if ack_ids: + sub.acknowledge(ack_ids) + changed = True + return (output, changed) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + topic=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + publish=dict(type='list'), + subscription=dict(type='dict'), + service_account_email=dict(type='str'), + credentials_file=dict(type='str'), + project_id=dict(type='str'), + ), + ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + + if not HAS_GOOGLE_CLOUD_PUBSUB: + module.fail_json(msg="Please install google-cloud-pubsub library.") + + if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION): + module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION)) + + mod_params = {} + mod_params['publish'] = module.params.get('publish') + mod_params['state'] = module.params.get('state') + mod_params['topic'] = module.params.get('topic') + mod_params['subscription'] = module.params.get('subscription') + + creds, params = get_google_cloud_credentials(module) + pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False) + pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT + + changed = False + json_output = {} + + t = None + if mod_params['topic']: + t = pubsub_client.topic(mod_params['topic']) + s = None + if mod_params['subscription']: + # Note: default ack deadline cannot be changed without deleting/recreating subscription + s = t.subscription(mod_params['subscription']['name'], + ack_deadline=mod_params['subscription'].get('ack_deadline', None), + push_endpoint=mod_params['subscription'].get('push_endpoint', None)) + + if mod_params['state'] == 'absent': + # Remove the most granular resource. If subscription is specified + # we remove it. If only topic is specified, that is what is removed. + # Note that a topic can be removed without first removing the subscription. + # TODO(supertom): Enhancement: Provide an option to only delete a topic + # if there are no subscriptions associated with it (which the API does not support). + if s is not None: + if s.exists(): + s.delete() + changed = True + else: + if t.exists(): + t.delete() + changed = True + elif mod_params['state'] == 'present': + if not t.exists(): + t.create() + changed = True + if s: + if not s.exists(): + s.create() + s.reload() + changed = True + else: + # Subscription operations + # TODO(supertom): if more 'update' operations arise, turn this into a function. + s.reload() + push_endpoint = mod_params['subscription'].get('push_endpoint', None) + if push_endpoint is not None: + if push_endpoint != s.push_endpoint: + if push_endpoint == 'None': + push_endpoint = None + s.modify_push_configuration(push_endpoint=push_endpoint) + s.reload() + changed = push_endpoint == s.push_endpoint + + if 'pull' in mod_params['subscription']: + if s.push_endpoint is not None: + module.fail_json(msg="Cannot pull messages, push_endpoint is configured.") + (json_output['pulled_messages'], changed) = pull_messages( + mod_params['subscription']['pull'], s) + + # publish messages to the topic + if mod_params['publish'] and len(mod_params['publish']) > 0: + changed = publish_messages(mod_params['publish'], t) + + json_output['changed'] = changed + json_output.update(mod_params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcpubsub_facts.py b/plugins/modules/cloud/google/gcpubsub_facts.py new file mode 120000 index 0000000000..3feb35c3ee --- /dev/null +++ b/plugins/modules/cloud/google/gcpubsub_facts.py @@ -0,0 +1 @@ +gcpubsub_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/google/gcpubsub_info.py b/plugins/modules/cloud/google/gcpubsub_info.py new file mode 100644 index 0000000000..30330f8f21 --- /dev/null +++ b/plugins/modules/cloud/google/gcpubsub_info.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# Copyright 2016 Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gcpubsub_info +short_description: List Topics/Subscriptions and Messages from Google PubSub. +description: + - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for + topic/subscription management. + See U(https://cloud.google.com/pubsub/docs) for an overview. + - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change. +requirements: + - "python >= 2.6" + - "google-auth >= 0.5.0" + - "google-cloud-pubsub >= 0.22.0" +notes: + - list state enables user to list topics or subscriptions in the project. See examples for details. +author: + - "Tom Melendez (@supertom) " +options: + topic: + description: + - GCP pubsub topic name. Only the name, not the full path, is required. + required: False + view: + description: + - Choices are 'topics' or 'subscriptions' + required: True + state: + description: + - list is the only valid option. + required: False +''' + +EXAMPLES = ''' +## List all Topics in a project +- gcpubsub_info: + view: topics + state: list + +## List all Subscriptions in a project +- gcpubsub_info: + view: subscriptions + state: list + +## List all Subscriptions for a Topic in a project +- gcpubsub_info: + view: subscriptions + topic: my-topic + state: list +''' + +RETURN = ''' +subscriptions: + description: List of subscriptions. + returned: When view is set to subscriptions. + type: list + sample: ["mysubscription", "mysubscription2"] +topic: + description: Name of topic. Used to filter subscriptions. + returned: Always + type: str + sample: "mytopic" +topics: + description: List of topics. + returned: When view is set to topics. + type: list + sample: ["mytopic", "mytopic2"] +''' + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + from google.cloud import pubsub + HAS_GOOGLE_CLOUD_PUBSUB = True +except ImportError as e: + HAS_GOOGLE_CLOUD_PUBSUB = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials + + +def list_func(data, member='name'): + """Used for state=list.""" + return [getattr(x, member) for x in data] + + +def main(): + module = AnsibleModule(argument_spec=dict( + view=dict(choices=['topics', 'subscriptions'], default='topics'), + topic=dict(required=False), + state=dict(choices=['list'], default='list'), + service_account_email=dict(), + credentials_file=dict(), + project_id=dict(), ),) + if module._name == 'gcpubsub_facts': + module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'", version='2.13') + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + + if not HAS_GOOGLE_CLOUD_PUBSUB: + module.fail_json(msg="Please install google-cloud-pubsub library.") + + CLIENT_MINIMUM_VERSION = '0.22.0' + if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION): + module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION) + + mod_params = {} + mod_params['state'] = module.params.get('state') + mod_params['topic'] = module.params.get('topic') + mod_params['view'] = module.params.get('view') + + creds, params = get_google_cloud_credentials(module) + pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False) + pubsub_client.user_agent = 'ansible-pubsub-0.1' + + json_output = {} + if mod_params['view'] == 'topics': + json_output['topics'] = list_func(pubsub_client.list_topics()) + elif mod_params['view'] == 'subscriptions': + if mod_params['topic']: + t = pubsub_client.topic(mod_params['topic']) + json_output['subscriptions'] = list_func(t.list_subscriptions()) + else: + json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions()) + + json_output['changed'] = False + json_output.update(mod_params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/google/gcspanner.py b/plugins/modules/cloud/google/gcspanner.py new file mode 100644 index 0000000000..4d014b7dbc --- /dev/null +++ b/plugins/modules/cloud/google/gcspanner.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gcspanner +short_description: Create and Delete Instances/Databases on Spanner +description: + - Create and Delete Instances/Databases on Spanner. + See U(https://cloud.google.com/spanner/docs) for an overview. +requirements: + - python >= 2.6 + - google-auth >= 0.5.0 + - google-cloud-spanner >= 0.23.0 +notes: + - Changing the configuration on an existing instance is not supported. +deprecated: + removed_in: "2.12" + why: Updated modules released with increased functionality + alternative: Use M(gcp_spanner_database) and/or M(gcp_spanner_instance) instead. +author: + - Tom Melendez (@supertom) +options: + configuration: + description: + - Configuration the instance should use. + - Examples are us-central1, asia-east1 and europe-west1. + required: yes + instance_id: + description: + - GCP spanner instance name. + required: yes + database_name: + description: + - Name of database contained on the instance. + force_instance_delete: + description: + - To delete an instance, this argument must exist and be true (along with state being equal to absent). + type: bool + default: 'no' + instance_display_name: + description: + - Name of Instance to display. + - If not specified, instance_id will be used instead. + node_count: + description: + - Number of nodes in the instance. + default: 1 + state: + description: + - State of the instance or database. Applies to the most granular resource. + - If a C(database_name) is specified we remove it. + - If only C(instance_id) is specified, that is what is removed. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = ''' +- name: Create instance + gcspanner: + instance_id: '{{ instance_id }}' + configuration: '{{ configuration }}' + state: present + node_count: 1 + +- name: Create database + gcspanner: + instance_id: '{{ instance_id }}' + configuration: '{{ configuration }}' + database_name: '{{ database_name }}' + state: present + +- name: Delete instance (and all databases) +- gcspanner: + instance_id: '{{ instance_id }}' + configuration: '{{ configuration }}' + state: absent + force_instance_delete: yes +''' + +RETURN = ''' +state: + description: The state of the instance or database. Value will be either 'absent' or 'present'. + returned: Always + type: str + sample: "present" + +database_name: + description: Name of database. + returned: When database name is specified + type: str + sample: "mydatabase" + +instance_id: + description: Name of instance. + returned: Always + type: str + sample: "myinstance" + +previous_values: + description: List of dictionaries containing previous values prior to update. + returned: When an instance update has occurred and a field has been modified. + type: dict + sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }" + +updated: + description: Boolean field to denote an update has occurred. + returned: When an update has occurred. + type: bool + sample: True +''' +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + +try: + from google.cloud import spanner + from google.gax.errors import GaxError + HAS_GOOGLE_CLOUD_SPANNER = True +except ImportError as e: + HAS_GOOGLE_CLOUD_SPANNER = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials +from ansible.module_utils.six import string_types + + +CLOUD_CLIENT = 'google-cloud-spanner' +CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0' +CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1' + + +def get_spanner_configuration_name(config_name, project_name): + config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name, + config_name) + return config_name + + +def instance_update(instance): + """ + Call update method on spanner client. + + Note: A ValueError exception is thrown despite the client succeeding. + So, we validate the node_count and instance_display_name parameters and then + ignore the ValueError exception. + + :param instance: a Spanner instance object + :type instance: class `google.cloud.spanner.Instance` + + :returns True on success, raises ValueError on type error. + :rtype ``bool`` + """ + errmsg = '' + if not isinstance(instance.node_count, int): + errmsg = 'node_count must be an integer %s (%s)' % ( + instance.node_count, type(instance.node_count)) + if instance.display_name and not isinstance(instance.display_name, + string_types): + errmsg = 'instance_display_name must be an string %s (%s)' % ( + instance.display_name, type(instance.display_name)) + if errmsg: + raise ValueError(errmsg) + + try: + instance.update() + except ValueError: + # The ValueError here is the one we 'expect'. + pass + + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + instance_id=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + database_name=dict(type='str'), + configuration=dict(type='str', required=True), + node_count=dict(type='int', default=1), + instance_display_name=dict(type='str'), + force_instance_delete=dict(type='bool', default=False), + service_account_email=dict(type='str'), + credentials_file=dict(type='str'), + project_id=dict(type='str'), + ), + ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + + if not HAS_GOOGLE_CLOUD_SPANNER: + module.fail_json(msg="Please install google-cloud-spanner.") + + if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION): + module.fail_json(msg="Please install %s client version %s" % + (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION)) + + mod_params = {} + mod_params['state'] = module.params.get('state') + mod_params['instance_id'] = module.params.get('instance_id') + mod_params['database_name'] = module.params.get('database_name') + mod_params['configuration'] = module.params.get('configuration') + mod_params['node_count'] = module.params.get('node_count', None) + mod_params['instance_display_name'] = module.params.get('instance_display_name') + mod_params['force_instance_delete'] = module.params.get('force_instance_delete') + + creds, params = get_google_cloud_credentials(module) + spanner_client = spanner.Client(project=params['project_id'], + credentials=creds, + user_agent=CLOUD_CLIENT_USER_AGENT) + changed = False + json_output = {} + + i = None + if mod_params['instance_id']: + config_name = get_spanner_configuration_name( + mod_params['configuration'], params['project_id']) + i = spanner_client.instance(mod_params['instance_id'], + configuration_name=config_name) + d = None + if mod_params['database_name']: + # TODO(supertom): support DDL + ddl_statements = '' + d = i.database(mod_params['database_name'], ddl_statements) + + if mod_params['state'] == 'absent': + # Remove the most granular resource. If database is specified + # we remove it. If only instance is specified, that is what is removed. + if d is not None and d.exists(): + d.drop() + changed = True + else: + if i.exists(): + if mod_params['force_instance_delete']: + i.delete() + else: + module.fail_json( + msg=(("Cannot delete Spanner instance: " + "'force_instance_delete' argument not specified"))) + changed = True + elif mod_params['state'] == 'present': + if not i.exists(): + i = spanner_client.instance(mod_params['instance_id'], + configuration_name=config_name, + display_name=mod_params['instance_display_name'], + node_count=mod_params['node_count'] or 1) + i.create() + changed = True + else: + # update instance + i.reload() + inst_prev_vals = {} + if i.display_name != mod_params['instance_display_name']: + inst_prev_vals['instance_display_name'] = i.display_name + i.display_name = mod_params['instance_display_name'] + if mod_params['node_count']: + if i.node_count != mod_params['node_count']: + inst_prev_vals['node_count'] = i.node_count + i.node_count = mod_params['node_count'] + if inst_prev_vals: + changed = instance_update(i) + json_output['updated'] = changed + json_output['previous_values'] = {'instance': inst_prev_vals} + if d: + if not d.exists(): + d.create() + d.reload() + changed = True + + json_output['changed'] = changed + json_output.update(mod_params) + module.exit_json(**json_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/heroku/heroku_collaborator.py b/plugins/modules/cloud/heroku/heroku_collaborator.py new file mode 100644 index 0000000000..5546d9ac05 --- /dev/null +++ b/plugins/modules/cloud/heroku/heroku_collaborator.py @@ -0,0 +1,126 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: heroku_collaborator +short_description: "Add or delete app collaborators on Heroku" +description: + - Manages collaborators for Heroku apps. + - If set to C(present) and heroku user is already collaborator, then do nothing. + - If set to C(present) and heroku user is not collaborator, then add user to app. + - If set to C(absent) and heroku user is collaborator, then delete user from app. +author: + - Marcel Arns (@marns93) +requirements: + - heroku3 +options: + api_key: + description: + - Heroku API key + apps: + description: + - List of Heroku App names + required: true + suppress_invitation: + description: + - Suppress email invitation when creating collaborator + type: bool + default: "no" + user: + description: + - User ID or e-mail + required: true + state: + description: + - Create or remove the heroku collaborator + choices: ["present", "absent"] + default: "present" +notes: + - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key). + - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"]. +''' + +EXAMPLES = ''' +- heroku_collaborator: + api_key: YOUR_API_KEY + user: max.mustermann@example.com + apps: heroku-example-app + state: present + +- heroku_collaborator: + api_key: YOUR_API_KEY + user: '{{ item.user }}' + apps: '{{ item.apps | default(apps) }}' + suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' + state: '{{ item.state | default("present") }}' + with_items: + - { user: 'a.b@example.com' } + - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false } + - { user: 'x.y@example.com', apps: ["heroku-example-app"] } +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper + + +def add_or_delete_heroku_collaborator(module, client): + user = module.params['user'] + state = module.params['state'] + affected_apps = [] + result_state = False + + for app in module.params['apps']: + if app not in client.apps(): + module.fail_json(msg='App {0} does not exist'.format(app)) + + heroku_app = client.apps()[app] + + heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()] + + if state == 'absent' and user in heroku_collaborator_list: + if not module.check_mode: + heroku_app.remove_collaborator(user) + affected_apps += [app] + result_state = True + elif state == 'present' and user not in heroku_collaborator_list: + if not module.check_mode: + heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation']) + affected_apps += [app] + result_state = True + + return result_state, affected_apps + + +def main(): + argument_spec = HerokuHelper.heroku_argument_spec() + argument_spec.update( + user=dict(required=True, type='str'), + apps=dict(required=True, type='list'), + suppress_invitation=dict(default=False, type='bool'), + state=dict(default='present', type='str', choices=['present', 'absent']), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = HerokuHelper(module).get_heroku_client() + + has_changed, msg = add_or_delete_heroku_collaborator(module, client) + module.exit_json(changed=has_changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/plugins/modules/cloud/huawei/hwc_ecs_instance.py new file mode 100644 index 0000000000..8495fe4fc2 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_ecs_instance.py @@ -0,0 +1,2135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_ecs_instance +description: + - instance management. +short_description: Creates a resource of Ecs/Instance in Huawei Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + required: true + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + required: true + image_id: + description: + - Specifies the ID of the system image. + type: str + required: true + name: + description: + - Specifies the ECS name. Value requirements consists of 1 to 64 + characters, including letters, digits, underscores C(_), hyphens + (-), periods (.). + type: str + required: true + nics: + description: + - Specifies the NIC information of the ECS. Constraints the + network of the NIC must belong to the VPC specified by vpc_id. A + maximum of 12 NICs can be attached to an ECS. + type: list + required: true + suboptions: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 + address. Its value must be an unused IP + address in the network segment of the subnet. + type: str + required: true + subnet_id: + description: + - Specifies the ID of subnet. + type: str + required: true + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + required: true + suboptions: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - co-p1 is high I/O (performance-optimized I) disk type. + - uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 + disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + required: true + size: + description: + - Specifies the system disk size, in GB. The value range is + 1 to 1024. The system disk size must be + greater than or equal to the minimum system disk size + supported by the image (min_disk attribute of the image). + If this parameter is not specified or is set to 0, the + default system disk size is the minimum value of the + system disk in the image (min_disk attribute of the + image). + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk + contained in the full-ECS image. + type: str + required: false + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + required: true + admin_pass: + description: + - Specifies the initial login password of the administrator account + for logging in to an ECS using password authentication. The Linux + administrator is root, and the Windows administrator is + Administrator. Password complexity requirements, consists of 8 to + 26 characters. The password must contain at least three of the + following character types 'uppercase letters, lowercase letters, + digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password + cannot contain the username or the username in reverse. The + Windows ECS password cannot contain the username, the username in + reverse, or more than two consecutive characters in the username. + type: str + required: false + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + required: false + suboptions: + volume_id: + description: + - Specifies the disk ID. + type: str + required: true + device: + description: + - Specifies the disk device name. + type: str + required: false + description: + description: + - Specifies the description of an ECS, which is a null string by + default. Can contain a maximum of 85 characters. Cannot contain + special characters, such as < and >. + type: str + required: false + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. + Only elastic IP addresses in the DOWN state can be + assigned. + type: str + required: false + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + required: false + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS + belongs. + type: str + required: false + security_groups: + description: + - Specifies the security groups of the ECS. If this + parameter is left blank, the default security group is bound to + the ECS by default. + type: list + required: false + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + required: false + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS + supports up to 10 tags. + type: dict + required: false + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + required: false + user_data: + description: + - Specifies the user data to be injected during the ECS creation + process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with + base64. The maximum size of the content to be injected (before + encoding) is 32 KB. For Linux ECSs, this parameter does not take + effect when adminPass is used. + type: str + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create an ecs instance +- name: create a vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: create a subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: create a eip + hwc_vpc_eip: + dedicated_bandwidth: + charge_mode: "traffic" + name: "ansible_test_dedicated_bandwidth" + size: 1 + type: "5_bgp" + register: eip +- name: create a disk + hwc_evs_disk: + availability_zone: "cn-north-1a" + name: "ansible_evs_disk_test" + volume_type: "SATA" + size: 10 + register: disk +- name: create an instance + hwc_ecs_instance: + data_volumes: + - volume_id: "{{ disk.id }}" + enable_auto_recovery: false + eip_id: "{{ eip.id }}" + name: "ansible_ecs_instance_test" + availability_zone: "cn-north-1a" + nics: + - subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" + - subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.34" + server_tags: + my_server: "my_server" + image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892" + flavor_name: "s3.small.1" + vpc_id: "{{ vpc.id }}" + root_volume: + volume_type: "SAS" +''' + +RETURN = ''' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + returned: success + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + returned: success + image_id: + description: + - Specifies the ID of the system image. + type: str + returned: success + name: + description: + - Specifies the ECS name. Value requirements "Consists of 1 to 64 + characters, including letters, digits, underscores C(_), hyphens + (-), periods (.)". + type: str + returned: success + nics: + description: + - Specifies the NIC information of the ECS. The + network of the NIC must belong to the VPC specified by vpc_id. A + maximum of 12 NICs can be attached to an ECS. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 + address. Its value must be an unused IP + address in the network segment of the subnet. + type: str + returned: success + subnet_id: + description: + - Specifies the ID of subnet. + type: str + returned: success + port_id: + description: + - Specifies the port ID corresponding to the IP address. + type: str + returned: success + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + returned: success + contains: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - co-p1 is high I/O (performance-optimized I) disk type. + - uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 + disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + returned: success + size: + description: + - Specifies the system disk size, in GB. The value range is + 1 to 1024. The system disk size must be + greater than or equal to the minimum system disk size + supported by the image (min_disk attribute of the image). + If this parameter is not specified or is set to 0, the + default system disk size is the minimum value of the + system disk in the image (min_disk attribute of the + image). + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk + contained in the full-ECS image. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + returned: success + admin_pass: + description: + - Specifies the initial login password of the administrator account + for logging in to an ECS using password authentication. The Linux + administrator is root, and the Windows administrator is + Administrator. Password complexity requirements consists of 8 to + 26 characters. The password must contain at least three of the + following character types "uppercase letters, lowercase letters, + digits, and special characters (!@$%^-_=+[{}]:,./?)". The password + cannot contain the username or the username in reverse. The + Windows ECS password cannot contain the username, the username in + reverse, or more than two consecutive characters in the username. + type: str + returned: success + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + returned: success + contains: + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + description: + description: + - Specifies the description of an ECS, which is a null string by + default. Can contain a maximum of 85 characters. Cannot contain + special characters, such as < and >. + type: str + returned: success + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. + Only elastic IP addresses in the DOWN state can be assigned. + type: str + returned: success + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + returned: success + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS + belongs. + type: str + returned: success + security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left + blank, the default security group is bound to the ECS by default. + type: list + returned: success + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + returned: success + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS + supports up to 10 tags. + type: dict + returned: success + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + returned: success + user_data: + description: + - Specifies the user data to be injected during the ECS creation + process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum + size of the content to be injected (before encoding) is 32 KB. For + Linux ECSs, this parameter does not take effect when adminPass is + used. + type: str + returned: success + config_drive: + description: + - Specifies the configuration driver. + type: str + returned: success + created: + description: + - Specifies the time when an ECS was created. + type: str + returned: success + disk_config_type: + description: + - Specifies the disk configuration type. MANUAL is The image + space is not expanded. AUTO is the image space of the system disk + will be expanded to be as same as the flavor. + type: str + returned: success + host_name: + description: + - Specifies the host name of the ECS. + type: str + returned: success + image_name: + description: + - Specifies the image name of the ECS. + type: str + returned: success + power_state: + description: + - Specifies the power status of the ECS. + type: int + returned: success + server_alias: + description: + - Specifies the ECS alias. + type: str + returned: success + status: + description: + - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, + REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR, + and DELETED. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='30m', type='str'), + update=dict(default='30m', type='str'), + delete=dict(default='30m', type='str'), + ), default=dict()), + availability_zone=dict(type='str', required=True), + flavor_name=dict(type='str', required=True), + image_id=dict(type='str', required=True), + name=dict(type='str', required=True), + nics=dict( + type='list', required=True, elements='dict', + options=dict( + ip_address=dict(type='str', required=True), + subnet_id=dict(type='str', required=True) + ), + ), + root_volume=dict(type='dict', required=True, options=dict( + volume_type=dict(type='str', required=True), + size=dict(type='int'), + snapshot_id=dict(type='str') + )), + vpc_id=dict(type='str', required=True), + admin_pass=dict(type='str'), + data_volumes=dict(type='list', elements='dict', options=dict( + volume_id=dict(type='str', required=True), + device=dict(type='str') + )), + description=dict(type='str'), + eip_id=dict(type='str'), + enable_auto_recovery=dict(type='bool'), + enterprise_project_id=dict(type='str'), + security_groups=dict(type='list', elements='str'), + server_metadata=dict(type='dict'), + server_tags=dict(type='dict'), + ssh_key_name=dict(type='str'), + user_data=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "ecs") + + try: + _init(config) + is_exist = module.params['id'] + + result = None + changed = False + if module.params['state'] == 'present': + if not is_exist: + if not module.check_mode: + create(config) + changed = True + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + if not module.check_mode: + update(config, inputv, result) + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + raise Exception("Update resource failed, " + "some attributes are not updated") + + changed = True + + result['id'] = module.params.get('id') + else: + result = dict() + if is_exist: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def _init(config): + module = config.module + if module.params['id']: + return + + v = search_resource(config) + n = len(v) + if n > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) + for i in v + ])) + + if n == 1: + module.params['id'] = navigate_value(v[0], ["id"]) + + +def user_input_parameters(module): + return { + "admin_pass": module.params.get("admin_pass"), + "availability_zone": module.params.get("availability_zone"), + "data_volumes": module.params.get("data_volumes"), + "description": module.params.get("description"), + "eip_id": module.params.get("eip_id"), + "enable_auto_recovery": module.params.get("enable_auto_recovery"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "flavor_name": module.params.get("flavor_name"), + "image_id": module.params.get("image_id"), + "name": module.params.get("name"), + "nics": module.params.get("nics"), + "root_volume": module.params.get("root_volume"), + "security_groups": module.params.get("security_groups"), + "server_metadata": module.params.get("server_metadata"), + "server_tags": module.params.get("server_tags"), + "ssh_key_name": module.params.get("ssh_key_name"), + "user_data": module.params.get("user_data"), + "vpc_id": module.params.get("vpc_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait(config, r, client, timeout) + + sub_job_identity = { + "job_type": "createSingleServer", + } + for item in navigate_value(obj, ["entities", "sub_jobs"]): + for k, v in sub_job_identity.items(): + if item[k] != v: + break + else: + obj = item + break + else: + raise Exception("Can't find the sub job") + module.params['id'] = navigate_value(obj, ["entities", "server_id"]) + + +def update(config, expect_state, current_state): + module = config.module + expect_state["current_state"] = current_state + current_state["current_state"] = current_state + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + client = config.client(get_region(module), "ecs", "project") + + params = build_delete_nics_parameters(expect_state) + params1 = build_delete_nics_parameters(current_state) + if params and are_different_dicts(params, params1): + r = send_delete_nics_request(module, params, client) + async_wait(config, r, client, timeout) + + params = build_set_auto_recovery_parameters(expect_state) + params1 = build_set_auto_recovery_parameters(current_state) + if params and are_different_dicts(params, params1): + send_set_auto_recovery_request(module, params, client) + + params = build_attach_nics_parameters(expect_state) + params1 = build_attach_nics_parameters(current_state) + if params and are_different_dicts(params, params1): + r = send_attach_nics_request(module, params, client) + async_wait(config, r, client, timeout) + + multi_invoke_delete_volume(config, expect_state, client, timeout) + + multi_invoke_attach_data_disk(config, expect_state, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_delete_parameters(opts) + if params: + r = send_delete_request(module, params, client) + async_wait(config, r, client, timeout) + + +def read_resource(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + + res = {} + + r = send_read_request(module, client) + preprocess_read_response(r) + res["read"] = fill_read_resp_body(r) + + r = send_read_auto_recovery_request(module, client) + res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r) + + return res, None + + +def preprocess_read_response(resp): + v = resp.get("os-extended-volumes:volumes_attached") + if v and isinstance(v, list): + for i in range(len(v)): + if v[i].get("bootIndex") == "0": + root_volume = v[i] + + if (i + 1) != len(v): + v[i] = v[-1] + + v.pop() + + resp["root_volume"] = root_volume + break + + v = resp.get("addresses") + if v: + rv = {} + eips = [] + for val in v.values(): + for item in val: + if item["OS-EXT-IPS:type"] == "floating": + eips.append(item) + else: + rv[item["OS-EXT-IPS:port_id"]] = item + + for item in eips: + k = item["OS-EXT-IPS:port_id"] + if k in rv: + rv[k]["eip_address"] = item.get("addr", "") + else: + rv[k] = item + item["eip_address"] = item.get("addr", "") + item["addr"] = "" + + resp["address"] = rv.values() + + +def build_state(opts, response, array_index): + states = flatten_options(response, array_index) + set_unreadable_options(opts, states) + adjust_options(opts, states) + return states + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enterprise_project_id"]) + if v or v in [False, 0]: + query_params.append( + "enterprise_project_id=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["name"]) + if v or v in [False, 0]: + query_params.append( + "name=" + (str(v) if v else str(v).lower())) + + query_link = "?limit=10&offset={offset}" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "cloudservers/detail" + query_link + + result = [] + p = {'offset': 1} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + adjust_list_resp(identity_obj, item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['offset'] += 1 + + return result + + +def build_delete_nics_parameters(opts): + params = dict() + + v = expand_delete_nics_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + return params + + +def expand_delete_nics_nics(d, array_index): + cv = d["current_state"].get("nics") + if not cv: + return None + + val = cv + + ev = d.get("nics") + if ev: + m = [item.get("ip_address") for item in ev] + val = [item for item in cv if item.get("ip_address") not in m] + + r = [] + for item in val: + transformed = dict() + + v = item.get("port_id") + if not is_empty_value(v): + transformed["id"] = v + + if transformed: + r.append(transformed) + + return r + + +def send_delete_nics_request(module, params, client): + url = build_path(module, "cloudservers/{id}/nics/delete") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete_nics), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_set_auto_recovery_parameters(opts): + params = dict() + + v = expand_set_auto_recovery_support_auto_recovery(opts, None) + if v is not None: + params["support_auto_recovery"] = v + + return params + + +def expand_set_auto_recovery_support_auto_recovery(d, array_index): + v = navigate_value(d, ["enable_auto_recovery"], None) + return None if v is None else str(v).lower() + + +def send_set_auto_recovery_request(module, params, client): + url = build_path(module, "cloudservers/{id}/autorecovery") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(set_auto_recovery), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["admin_pass"], None) + if not is_empty_value(v): + params["adminPass"] = v + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = expand_create_extendparam(opts, None) + if not is_empty_value(v): + params["extendparam"] = v + + v = navigate_value(opts, ["flavor_name"], None) + if not is_empty_value(v): + params["flavorRef"] = v + + v = navigate_value(opts, ["image_id"], None) + if not is_empty_value(v): + params["imageRef"] = v + + v = navigate_value(opts, ["ssh_key_name"], None) + if not is_empty_value(v): + params["key_name"] = v + + v = navigate_value(opts, ["server_metadata"], None) + if not is_empty_value(v): + params["metadata"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + v = expand_create_publicip(opts, None) + if not is_empty_value(v): + params["publicip"] = v + + v = expand_create_root_volume(opts, None) + if not is_empty_value(v): + params["root_volume"] = v + + v = expand_create_security_groups(opts, None) + if not is_empty_value(v): + params["security_groups"] = v + + v = expand_create_server_tags(opts, None) + if not is_empty_value(v): + params["server_tags"] = v + + v = navigate_value(opts, ["user_data"], None) + if not is_empty_value(v): + params["user_data"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpcid"] = v + + if not params: + return params + + params = {"server": params} + + return params + + +def expand_create_extendparam(d, array_index): + r = dict() + + r["chargingMode"] = 0 + + v = navigate_value(d, ["enterprise_project_id"], array_index) + if not is_empty_value(v): + r["enterprise_project_id"] = v + + v = navigate_value(d, ["enable_auto_recovery"], array_index) + if not is_empty_value(v): + r["support_auto_recovery"] = v + + return r + + +def expand_create_nics(d, array_index): + new_ai = dict() + if array_index: + new_ai.update(array_index) + + req = [] + + v = navigate_value( + d, ["nics"], new_ai) + + if not v: + return req + n = len(v) + for i in range(n): + new_ai["nics"] = i + transformed = dict() + + v = navigate_value(d, ["nics", "ip_address"], new_ai) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["nics", "subnet_id"], new_ai) + if not is_empty_value(v): + transformed["subnet_id"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_publicip(d, array_index): + r = dict() + + v = navigate_value(d, ["eip_id"], array_index) + if not is_empty_value(v): + r["id"] = v + + return r + + +def expand_create_root_volume(d, array_index): + r = dict() + + v = expand_create_root_volume_extendparam(d, array_index) + if not is_empty_value(v): + r["extendparam"] = v + + v = navigate_value(d, ["root_volume", "size"], array_index) + if not is_empty_value(v): + r["size"] = v + + v = navigate_value(d, ["root_volume", "volume_type"], array_index) + if not is_empty_value(v): + r["volumetype"] = v + + return r + + +def expand_create_root_volume_extendparam(d, array_index): + r = dict() + + v = navigate_value(d, ["root_volume", "snapshot_id"], array_index) + if not is_empty_value(v): + r["snapshotId"] = v + + return r + + +def expand_create_security_groups(d, array_index): + v = d.get("security_groups") + if not v: + return None + + return [{"id": i} for i in v] + + +def expand_create_server_tags(d, array_index): + v = d.get("server_tags") + if not v: + return None + + return [{"key": k, "value": v1} for k, v1 in v.items()] + + +def send_create_request(module, params, client): + url = "cloudservers" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_attach_nics_parameters(opts): + params = dict() + + v = expand_attach_nics_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + return params + + +def expand_attach_nics_nics(d, array_index): + ev = d.get("nics") + if not ev: + return None + + val = ev + + cv = d["current_state"].get("nics") + if cv: + m = [item.get("ip_address") for item in cv] + val = [item for item in ev if item.get("ip_address") not in m] + + r = [] + for item in val: + transformed = dict() + + v = item.get("ip_address") + if not is_empty_value(v): + transformed["ip_address"] = v + + v = item.get("subnet_id") + if not is_empty_value(v): + transformed["subnet_id"] = v + + if transformed: + r.append(transformed) + + return r + + +def send_attach_nics_request(module, params, client): + url = build_path(module, "cloudservers/{id}/nics") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(attach_nics), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_volume_request(module, params, client, info): + path_parameters = { + "volume_id": ["volume_id"], + } + data = dict((key, navigate_value(info, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete_volume), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_attach_data_disk_parameters(opts, array_index): + params = dict() + + v = expand_attach_data_disk_volume_attachment(opts, array_index) + if not is_empty_value(v): + params["volumeAttachment"] = v + + return params + + +def expand_attach_data_disk_volume_attachment(d, array_index): + r = dict() + + v = navigate_value(d, ["data_volumes", "device"], array_index) + if not is_empty_value(v): + r["device"] = v + + v = navigate_value(d, ["data_volumes", "volume_id"], array_index) + if not is_empty_value(v): + r["volumeId"] = v + + return r + + +def send_attach_data_disk_request(module, params, client): + url = build_path(module, "cloudservers/{id}/attachvolume") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(attach_data_disk), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_delete_parameters(opts): + params = dict() + + params["delete_publicip"] = False + + params["delete_volume"] = False + + v = expand_delete_servers(opts, None) + if not is_empty_value(v): + params["servers"] = v + + return params + + +def expand_delete_servers(d, array_index): + new_ai = dict() + if array_index: + new_ai.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = expand_delete_servers_id(d, new_ai) + if not is_empty_value(v): + transformed["id"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_delete_servers_id(d, array_index): + return d["ansible_module"].params.get("id") + + +def send_delete_request(module, params, client): + url = "cloudservers/delete" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait(config, result, client, timeout): + module = config.module + + url = build_path(module, "jobs/{job_id}", result) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["SUCCESS"], + ["RUNNING", "INIT"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_ecs_instance): error " + "waiting to be done, error= %s" % str(ex)) + + +def multi_invoke_delete_volume(config, opts, client, timeout): + module = config.module + + opts1 = None + expect = opts["data_volumes"] + current = opts["current_state"]["data_volumes"] + if expect and current: + v = [i["volume_id"] for i in expect] + opts1 = { + "data_volumes": [ + i for i in current if i["volume_id"] not in v + ] + } + + loop_val = navigate_value(opts1, ["data_volumes"]) + if not loop_val: + return + + for i in range(len(loop_val)): + r = send_delete_volume_request(module, None, client, loop_val[i]) + async_wait(config, r, client, timeout) + + +def multi_invoke_attach_data_disk(config, opts, client, timeout): + module = config.module + + opts1 = opts + expect = opts["data_volumes"] + current = opts["current_state"]["data_volumes"] + if expect and current: + v = [i["volume_id"] for i in current] + opts1 = { + "data_volumes": [ + i for i in expect if i["volume_id"] not in v + ] + } + + loop_val = navigate_value(opts1, ["data_volumes"]) + if not loop_val: + return + + for i in range(len(loop_val)): + params = build_attach_data_disk_parameters(opts1, {"data_volumes": i}) + r = send_attach_data_disk_request(module, params, client) + async_wait(config, r, client, timeout) + + +def send_read_request(module, client): + url = build_path(module, "cloudservers/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["server"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") + + result["OS-EXT-AZ:availability_zone"] = body.get( + "OS-EXT-AZ:availability_zone") + + result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") + + result["OS-EXT-SRV-ATTR:instance_name"] = body.get( + "OS-EXT-SRV-ATTR:instance_name") + + result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") + + result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") + + v = fill_read_resp_address(body.get("address")) + result["address"] = v + + result["config_drive"] = body.get("config_drive") + + result["created"] = body.get("created") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + v = fill_read_resp_flavor(body.get("flavor")) + result["flavor"] = v + + result["id"] = body.get("id") + + v = fill_read_resp_image(body.get("image")) + result["image"] = v + + result["key_name"] = body.get("key_name") + + v = fill_read_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["name"] = body.get("name") + + v = fill_read_resp_os_extended_volumes_volumes_attached( + body.get("os-extended-volumes:volumes_attached")) + result["os-extended-volumes:volumes_attached"] = v + + v = fill_read_resp_root_volume(body.get("root_volume")) + result["root_volume"] = v + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + return result + + +def fill_read_resp_address(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id") + + val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type") + + val["addr"] = item.get("addr") + + result.append(val) + + return result + + +def fill_read_resp_flavor(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_read_resp_image(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_read_resp_metadata(value): + if not value: + return None + + result = dict() + + result["image_name"] = value.get("image_name") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_read_resp_os_extended_volumes_volumes_attached(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["bootIndex"] = item.get("bootIndex") + + val["device"] = item.get("device") + + val["id"] = item.get("id") + + result.append(val) + + return result + + +def fill_read_resp_root_volume(value): + if not value: + return None + + result = dict() + + result["device"] = value.get("device") + + result["id"] = value.get("id") + + return result + + +def send_read_auto_recovery_request(module, client): + url = build_path(module, "cloudservers/{id}/autorecovery") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(read_auto_recovery), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def fill_read_auto_recovery_resp_body(body): + result = dict() + + result["support_auto_recovery"] = body.get("support_auto_recovery") + + return result + + +def flatten_options(response, array_index): + r = dict() + + v = navigate_value( + response, ["read", "OS-EXT-AZ:availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "config_drive"], array_index) + r["config_drive"] = v + + v = navigate_value(response, ["read", "created"], array_index) + r["created"] = v + + v = flatten_data_volumes(response, array_index) + r["data_volumes"] = v + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index) + r["disk_config_type"] = v + + v = flatten_enable_auto_recovery(response, array_index) + r["enable_auto_recovery"] = v + + v = navigate_value( + response, ["read", "enterprise_project_id"], array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "flavor", "id"], array_index) + r["flavor_name"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index) + r["host_name"] = v + + v = navigate_value(response, ["read", "image", "id"], array_index) + r["image_id"] = v + + v = navigate_value( + response, ["read", "metadata", "image_name"], array_index) + r["image_name"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = flatten_nics(response, array_index) + r["nics"] = v + + v = navigate_value( + response, ["read", "OS-EXT-STS:power_state"], array_index) + r["power_state"] = v + + v = flatten_root_volume(response, array_index) + r["root_volume"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index) + r["server_alias"] = v + + v = flatten_server_tags(response, array_index) + r["server_tags"] = v + + v = navigate_value(response, ["read", "key_name"], array_index) + r["ssh_key_name"] = v + + v = navigate_value(response, ["read", "status"], array_index) + r["status"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index) + r["user_data"] = v + + v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index) + r["vpc_id"] = v + + return r + + +def flatten_data_volumes(d, array_index): + v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.os-extended-volumes:volumes_attached"] = i + + val = dict() + + v = navigate_value( + d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai) + val["device"] = v + + v = navigate_value( + d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai) + val["volume_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_enable_auto_recovery(d, array_index): + v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"], + array_index) + return v == "true" + + +def flatten_nics(d, array_index): + v = navigate_value(d, ["read", "address"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.address"] = i + + val = dict() + + v = navigate_value(d, ["read", "address", "addr"], new_ai) + val["ip_address"] = v + + v = navigate_value( + d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai) + val["port_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_root_volume(d, array_index): + result = dict() + + v = navigate_value(d, ["read", "root_volume", "device"], array_index) + result["device"] = v + + v = navigate_value(d, ["read", "root_volume", "id"], array_index) + result["volume_id"] = v + + for v in result.values(): + if v is not None: + return result + return None + + +def flatten_server_tags(d, array_index): + v = navigate_value(d, ["read", "tags"], array_index) + if not v: + return None + + r = dict() + for item in v: + v1 = item.split("=") + if v1: + r[v1[0]] = v1[1] + return r + + +def adjust_options(opts, states): + adjust_data_volumes(opts, states) + + adjust_nics(opts, states) + + +def adjust_data_volumes(parent_input, parent_cur): + iv = parent_input.get("data_volumes") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("data_volumes") + if not (cv and isinstance(cv, list)): + return + + lcv = len(cv) + result = [] + q = [] + for iiv in iv: + if len(q) == lcv: + break + + icv = None + for j in range(lcv): + if j in q: + continue + + icv = cv[j] + + if iiv["volume_id"] != icv["volume_id"]: + continue + + result.append(icv) + q.append(j) + break + else: + break + + if len(q) != lcv: + for i in range(lcv): + if i not in q: + result.append(cv[i]) + + if len(result) != lcv: + raise Exception("adjust property(data_volumes) failed, " + "the array number is not equal") + + parent_cur["data_volumes"] = result + + +def adjust_nics(parent_input, parent_cur): + iv = parent_input.get("nics") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("nics") + if not (cv and isinstance(cv, list)): + return + + lcv = len(cv) + result = [] + q = [] + for iiv in iv: + if len(q) == lcv: + break + + icv = None + for j in range(lcv): + if j in q: + continue + + icv = cv[j] + + if iiv["ip_address"] != icv["ip_address"]: + continue + + result.append(icv) + q.append(j) + break + else: + break + + if len(q) != lcv: + for i in range(lcv): + if i not in q: + result.append(cv[i]) + + if len(result) != lcv: + raise Exception("adjust property(nics) failed, " + "the array number is not equal") + + parent_cur["nics"] = result + + +def set_unreadable_options(opts, states): + states["admin_pass"] = opts.get("admin_pass") + + states["eip_id"] = opts.get("eip_id") + + set_unread_nics( + opts.get("nics"), states.get("nics")) + + set_unread_root_volume( + opts.get("root_volume"), states.get("root_volume")) + + states["security_groups"] = opts.get("security_groups") + + states["server_metadata"] = opts.get("server_metadata") + + +def set_unread_nics(inputv, curv): + if not (inputv and isinstance(inputv, list)): + return + + if not (curv and isinstance(curv, list)): + return + + lcv = len(curv) + q = [] + for iv in inputv: + if len(q) == lcv: + break + + cv = None + for j in range(lcv): + if j in q: + continue + + cv = curv[j] + + if iv["ip_address"] != cv["ip_address"]: + continue + + q.append(j) + break + else: + continue + + cv["subnet_id"] = iv.get("subnet_id") + + +def set_unread_root_volume(inputv, curv): + if not (inputv and isinstance(inputv, dict)): + return + + if not (curv and isinstance(curv, dict)): + return + + curv["size"] = inputv.get("size") + + curv["snapshot_id"] = inputv.get("snapshot_id") + + curv["volume_type"] = inputv.get("volume_type") + + +def set_readonly_options(opts, states): + opts["config_drive"] = states.get("config_drive") + + opts["created"] = states.get("created") + + opts["disk_config_type"] = states.get("disk_config_type") + + opts["host_name"] = states.get("host_name") + + opts["image_name"] = states.get("image_name") + + set_readonly_nics( + opts.get("nics"), states.get("nics")) + + opts["power_state"] = states.get("power_state") + + set_readonly_root_volume( + opts.get("root_volume"), states.get("root_volume")) + + opts["server_alias"] = states.get("server_alias") + + opts["status"] = states.get("status") + + +def set_readonly_nics(inputv, curv): + if not (curv and isinstance(curv, list)): + return + + if not (inputv and isinstance(inputv, list)): + return + + lcv = len(curv) + q = [] + for iv in inputv: + if len(q) == lcv: + break + + cv = None + for j in range(lcv): + if j in q: + continue + + cv = curv[j] + + if iv["ip_address"] != cv["ip_address"]: + continue + + q.append(j) + break + else: + continue + + iv["port_id"] = cv.get("port_id") + + +def set_readonly_root_volume(inputv, curv): + if not (inputv and isinstance(inputv, dict)): + return + + if not (curv and isinstance(curv, dict)): + return + + inputv["device"] = curv.get("device") + + inputv["volume_id"] = curv.get("volume_id") + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["servers"], None) + + +def _build_identity_object(all_opts): + result = dict() + + result["OS-DCF:diskConfig"] = None + + v = navigate_value(all_opts, ["availability_zone"], None) + result["OS-EXT-AZ:availability_zone"] = v + + result["OS-EXT-SRV-ATTR:hostname"] = None + + result["OS-EXT-SRV-ATTR:instance_name"] = None + + v = navigate_value(all_opts, ["user_data"], None) + result["OS-EXT-SRV-ATTR:user_data"] = v + + result["OS-EXT-STS:power_state"] = None + + result["config_drive"] = None + + result["created"] = None + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + v = expand_list_flavor(all_opts, None) + result["flavor"] = v + + result["id"] = None + + v = expand_list_image(all_opts, None) + result["image"] = v + + v = navigate_value(all_opts, ["ssh_key_name"], None) + result["key_name"] = v + + v = expand_list_metadata(all_opts, None) + result["metadata"] = v + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["status"] = None + + v = expand_list_tags(all_opts, None) + result["tags"] = v + + return result + + +def expand_list_flavor(d, array_index): + r = dict() + + v = navigate_value(d, ["flavor_name"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_image(d, array_index): + r = dict() + + v = navigate_value(d, ["image_id"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_tags(d, array_index): + v = d.get("server_tags") + if not v: + return None + + return [k + "=" + v1 for k, v1 in v.items()] + + +def fill_list_resp_body(body): + result = dict() + + result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") + + result["OS-EXT-AZ:availability_zone"] = body.get( + "OS-EXT-AZ:availability_zone") + + result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") + + result["OS-EXT-SRV-ATTR:instance_name"] = body.get( + "OS-EXT-SRV-ATTR:instance_name") + + result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") + + result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") + + result["config_drive"] = body.get("config_drive") + + result["created"] = body.get("created") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + v = fill_list_resp_flavor(body.get("flavor")) + result["flavor"] = v + + result["id"] = body.get("id") + + v = fill_list_resp_image(body.get("image")) + result["image"] = v + + result["key_name"] = body.get("key_name") + + v = fill_list_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["name"] = body.get("name") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + return result + + +def fill_list_resp_flavor(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_list_resp_image(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_list_resp_metadata(value): + if not value: + return None + + result = dict() + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def adjust_list_resp(opts, resp): + adjust_list_api_tags(opts, resp) + + +def adjust_list_api_tags(parent_input, parent_cur): + iv = parent_input.get("tags") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("tags") + if not (cv and isinstance(cv, list)): + return + + result = [] + for iiv in iv: + if iiv not in cv: + break + + result.append(iiv) + + j = cv.index(iiv) + cv[j] = cv[-1] + cv.pop() + + if cv: + result.extend(cv) + parent_cur["tags"] = result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_evs_disk.py b/plugins/modules/cloud/huawei/hwc_evs_disk.py new file mode 100644 index 0000000000..f12668876e --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_evs_disk.py @@ -0,0 +1,1213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_evs_disk +description: + - block storage management. +short_description: Creates a resource of Evs/Disk in Huawei Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huaweicloud Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + required: true + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 + bytes. + type: str + required: true + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or + SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the + disk will fail to create. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the + snapshot's source disk. + type: str + required: true + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. + This parameter is mandatory when you use a backup to create the + disk. + type: str + required: false + description: + description: + - Specifies the disk description. The value can contain a maximum + of 255 bytes. + type: str + required: false + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs + to be used, set this parameter to True. + type: bool + required: false + enable_scsi: + description: + - If this parameter is set to True, the disk device type will be + SCSI, which allows ECS OSs to directly access underlying storage + media. SCSI reservation command is supported. If this parameter + is set to False, the disk device type will be VBD, which supports + only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter + is not specified, shared SCSI disks are created. SCSI EVS disks + cannot be created from backups, which means that this parameter + cannot be True if backup_id has been specified. + type: bool + required: false + enable_share: + description: + - Specifies whether the disk is shareable. The default value is + False. + type: bool + required: false + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + required: false + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with + the disk during the disk creation. If it is not specified, the + disk is bound to the default enterprise project. + type: str + required: false + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk + is created from an image. BMS system disks cannot be + created from BMS images. + type: str + required: false + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System + disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This + parameter is mandatory when you create an empty disk or use an + image or a snapshot to create a disk. If you use an image or a + snapshot to create a disk, the disk size must be greater than or + equal to the image or snapshot size. This parameter is optional + when you use a backup to create a disk. If this parameter is not + specified, the disk size is equal to the backup size. + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the + disk is created from a snapshot. + type: str + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# test create disk +- name: create a disk + hwc_evs_disk: + availability_zone: "cn-north-1a" + name: "ansible_evs_disk_test" + volume_type: "SATA" + size: 10 +''' + +RETURN = ''' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + returned: success + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 + bytes. + type: str + returned: success + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or + SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the + disk will fail to create. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the + snapshot's source disk. + type: str + returned: success + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. + This parameter is mandatory when you use a backup to create the + disk. + type: str + returned: success + description: + description: + - Specifies the disk description. The value can contain a maximum + of 255 bytes. + type: str + returned: success + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs + to be used, set this parameter to True. + type: bool + returned: success + enable_scsi: + description: + - If this parameter is set to True, the disk device type will be + SCSI, which allows ECS OSs to directly access underlying storage + media. SCSI reservation command is supported. If this parameter + is set to False, the disk device type will be VBD, which supports + only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter + is not specified, shared SCSI disks are created. SCSI EVS disks + cannot be created from backups, which means that this parameter + cannot be True if backup_id has been specified. + type: bool + returned: success + enable_share: + description: + - Specifies whether the disk is shareable. The default value is + False. + type: bool + returned: success + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + returned: success + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with + the disk during the disk creation. If it is not specified, the + disk is bound to the default enterprise project. + type: str + returned: success + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk + is created from an image. BMS system disks cannot be + created from BMS images. + type: str + returned: success + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System + disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This + parameter is mandatory when you create an empty disk or use an + image or a snapshot to create a disk. If you use an image or a + snapshot to create a disk, the disk size must be greater than or + equal to the image or snapshot size. This parameter is optional + when you use a backup to create a disk. If this parameter is not + specified, the disk size is equal to the backup size. + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the + disk is created from a snapshot. + type: str + returned: success + attachments: + description: + - Specifies the disk attachment information. + type: complex + returned: success + contains: + attached_at: + description: + - Specifies the time when the disk was attached. Time + format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + attachment_id: + description: + - Specifies the ID of the attachment information. + type: str + returned: success + device: + description: + - Specifies the device name. + type: str + returned: success + server_id: + description: + - Specifies the ID of the server to which the disk is + attached. + type: str + returned: success + backup_policy_id: + description: + - Specifies the backup policy ID. + type: str + returned: success + created_at: + description: + - Specifies the time when the disk was created. Time format is 'UTC + YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + is_bootable: + description: + - Specifies whether the disk is bootable. + type: bool + returned: success + is_readonly: + description: + - Specifies whether the disk is read-only or read/write. True + indicates that the disk is read-only. False indicates that the + disk is read/write. + type: bool + returned: success + source_volume_id: + description: + - Specifies the source disk ID. This parameter has a value if the + disk is created from a source disk. + type: str + returned: success + status: + description: + - Specifies the disk status. + type: str + returned: success + tags: + description: + - Specifies the disk tags. + type: dict + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='30m', type='str'), + update=dict(default='30m', type='str'), + delete=dict(default='30m', type='str'), + ), default=dict()), + availability_zone=dict(type='str', required=True), + name=dict(type='str', required=True), + volume_type=dict(type='str', required=True), + backup_id=dict(type='str'), + description=dict(type='str'), + enable_full_clone=dict(type='bool'), + enable_scsi=dict(type='bool'), + enable_share=dict(type='bool'), + encryption_id=dict(type='str'), + enterprise_project_id=dict(type='str'), + image_id=dict(type='str'), + size=dict(type='int'), + snapshot_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "evs") + + try: + _init(config) + is_exist = module.params.get('id') + + result = None + changed = False + if module.params['state'] == 'present': + if not is_exist: + if not module.check_mode: + create(config) + changed = True + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + if not module.check_mode: + update(config, inputv, result) + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + raise Exception("Update resource failed, " + "some attributes are not updated") + + changed = True + + result['id'] = module.params.get('id') + else: + result = dict() + if is_exist: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def _init(config): + module = config.module + if module.params.get('id'): + return + + v = search_resource(config) + n = len(v) + if n > 1: + raise Exception("find more than one resources(%s)" % ", ".join([ + navigate_value(i, ["id"]) + for i in v + ])) + + if n == 1: + module.params['id'] = navigate_value(v[0], ["id"]) + + +def user_input_parameters(module): + return { + "availability_zone": module.params.get("availability_zone"), + "backup_id": module.params.get("backup_id"), + "description": module.params.get("description"), + "enable_full_clone": module.params.get("enable_full_clone"), + "enable_scsi": module.params.get("enable_scsi"), + "enable_share": module.params.get("enable_share"), + "encryption_id": module.params.get("encryption_id"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "image_id": module.params.get("image_id"), + "name": module.params.get("name"), + "size": module.params.get("size"), + "snapshot_id": module.params.get("snapshot_id"), + "volume_type": module.params.get("volume_type"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + + client1 = config.client(get_region(module), "volume", "project") + client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") + obj = async_wait(config, r, client1, timeout) + module.params['id'] = navigate_value(obj, ["entities", "volume_id"]) + + +def update(config, expect_state, current_state): + module = config.module + expect_state["current_state"] = current_state + current_state["current_state"] = current_state + client = config.client(get_region(module), "evs", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + + params = build_update_parameters(expect_state) + params1 = build_update_parameters(current_state) + if params and are_different_dicts(params, params1): + send_update_request(module, params, client) + + params = build_extend_disk_parameters(expect_state) + params1 = build_extend_disk_parameters(current_state) + if params and are_different_dicts(params, params1): + client1 = config.client(get_region(module), "evsv2.1", "project") + r = send_extend_disk_request(module, params, client1) + + client1 = config.client(get_region(module), "volume", "project") + client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") + async_wait(config, r, client1, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "evs", "project") + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + + r = send_delete_request(module, None, client) + + client = config.client(get_region(module), "volume", "project") + client.endpoint = client.endpoint.replace("/v2/", "/v1/") + async_wait(config, r, client, timeout) + + +def read_resource(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return res, None + + +def build_state(opts, response, array_index): + states = flatten_options(response, array_index) + set_unreadable_options(opts, states) + return states + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enable_share"]) + if v or v in [False, 0]: + query_params.append( + "multiattach=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["name"]) + if v or v in [False, 0]: + query_params.append( + "name=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["availability_zone"]) + if v or v in [False, 0]: + query_params.append( + "availability_zone=" + (str(v) if v else str(v).lower())) + + query_link = "?limit=10&offset={start}" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + opts = user_input_parameters(module) + name = module.params.get("name") + query_link = _build_query_link(opts) + link = "os-vendor-volumes/detail" + query_link + + result = [] + p = {'start': 0} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + if name == item.get("name"): + result.append(item) + + if len(result) > 1: + break + + p['start'] += len(r) + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["backup_id"], None) + if not is_empty_value(v): + params["backup_id"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = navigate_value(opts, ["image_id"], None) + if not is_empty_value(v): + params["imageRef"] = v + + v = expand_create_metadata(opts, None) + if not is_empty_value(v): + params["metadata"] = v + + v = navigate_value(opts, ["enable_share"], None) + if not is_empty_value(v): + params["multiattach"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["size"], None) + if not is_empty_value(v): + params["size"] = v + + v = navigate_value(opts, ["snapshot_id"], None) + if not is_empty_value(v): + params["snapshot_id"] = v + + v = navigate_value(opts, ["volume_type"], None) + if not is_empty_value(v): + params["volume_type"] = v + + if not params: + return params + + params = {"volume": params} + + return params + + +def expand_create_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["encryption_id"], array_index) + if not is_empty_value(v): + r["__system__cmkid"] = v + + v = expand_create_metadata_system_encrypted(d, array_index) + if not is_empty_value(v): + r["__system__encrypted"] = v + + v = expand_create_metadata_full_clone(d, array_index) + if not is_empty_value(v): + r["full_clone"] = v + + v = expand_create_metadata_hw_passthrough(d, array_index) + if not is_empty_value(v): + r["hw:passthrough"] = v + + return r + + +def expand_create_metadata_system_encrypted(d, array_index): + v = navigate_value(d, ["encryption_id"], array_index) + return "1" if v else "" + + +def expand_create_metadata_full_clone(d, array_index): + v = navigate_value(d, ["enable_full_clone"], array_index) + return "0" if v else "" + + +def expand_create_metadata_hw_passthrough(d, array_index): + v = navigate_value(d, ["enable_scsi"], array_index) + if v is None: + return v + return "true" if v else "false" + + +def send_create_request(module, params, client): + url = "cloudvolumes" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if v is not None: + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"volume": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_extend_disk_parameters(opts): + params = dict() + + v = expand_extend_disk_os_extend(opts, None) + if not is_empty_value(v): + params["os-extend"] = v + + return params + + +def expand_extend_disk_os_extend(d, array_index): + r = dict() + + v = navigate_value(d, ["size"], array_index) + if not is_empty_value(v): + r["new_size"] = v + + return r + + +def send_extend_disk_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}/action") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(extend_disk), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait(config, result, client, timeout): + module = config.module + + path_parameters = { + "job_id": ["job_id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "jobs/{job_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["SUCCESS"], + ["RUNNING", "INIT"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_evs_disk): error " + "waiting to be done, error= %s" % str(ex)) + + +def send_read_request(module, client): + url = build_path(module, "os-vendor-volumes/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["volume"], None) + + +def fill_read_resp_body(body): + result = dict() + + v = fill_read_resp_attachments(body.get("attachments")) + result["attachments"] = v + + result["availability_zone"] = body.get("availability_zone") + + result["bootable"] = body.get("bootable") + + result["created_at"] = body.get("created_at") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + v = fill_read_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["multiattach"] = body.get("multiattach") + + result["name"] = body.get("name") + + result["size"] = body.get("size") + + result["snapshot_id"] = body.get("snapshot_id") + + result["source_volid"] = body.get("source_volid") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata")) + result["volume_image_metadata"] = v + + result["volume_type"] = body.get("volume_type") + + return result + + +def fill_read_resp_attachments(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["attached_at"] = item.get("attached_at") + + val["attachment_id"] = item.get("attachment_id") + + val["device"] = item.get("device") + + val["server_id"] = item.get("server_id") + + result.append(val) + + return result + + +def fill_read_resp_metadata(value): + if not value: + return None + + result = dict() + + result["__system__cmkid"] = value.get("__system__cmkid") + + result["attached_mode"] = value.get("attached_mode") + + result["full_clone"] = value.get("full_clone") + + result["hw:passthrough"] = value.get("hw:passthrough") + + result["policy"] = value.get("policy") + + result["readonly"] = value.get("readonly") + + return result + + +def fill_read_resp_volume_image_metadata(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def flatten_options(response, array_index): + r = dict() + + v = flatten_attachments(response, array_index) + r["attachments"] = v + + v = navigate_value(response, ["read", "availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "metadata", "policy"], array_index) + r["backup_policy_id"] = v + + v = navigate_value(response, ["read", "created_at"], array_index) + r["created_at"] = v + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = flatten_enable_full_clone(response, array_index) + r["enable_full_clone"] = v + + v = flatten_enable_scsi(response, array_index) + r["enable_scsi"] = v + + v = navigate_value(response, ["read", "multiattach"], array_index) + r["enable_share"] = v + + v = navigate_value( + response, ["read", "metadata", "__system__cmkid"], array_index) + r["encryption_id"] = v + + v = navigate_value( + response, ["read", "enterprise_project_id"], array_index) + r["enterprise_project_id"] = v + + v = navigate_value( + response, ["read", "volume_image_metadata", "id"], array_index) + r["image_id"] = v + + v = flatten_is_bootable(response, array_index) + r["is_bootable"] = v + + v = flatten_is_readonly(response, array_index) + r["is_readonly"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "size"], array_index) + r["size"] = v + + v = navigate_value(response, ["read", "snapshot_id"], array_index) + r["snapshot_id"] = v + + v = navigate_value(response, ["read", "source_volid"], array_index) + r["source_volume_id"] = v + + v = navigate_value(response, ["read", "status"], array_index) + r["status"] = v + + v = navigate_value(response, ["read", "tags"], array_index) + r["tags"] = v + + v = navigate_value(response, ["read", "volume_type"], array_index) + r["volume_type"] = v + + return r + + +def flatten_attachments(d, array_index): + v = navigate_value(d, ["read", "attachments"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.attachments"] = i + + val = dict() + + v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai) + val["attached_at"] = v + + v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai) + val["attachment_id"] = v + + v = navigate_value(d, ["read", "attachments", "device"], new_ai) + val["device"] = v + + v = navigate_value(d, ["read", "attachments", "server_id"], new_ai) + val["server_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_enable_full_clone(d, array_index): + v = navigate_value(d, ["read", "metadata", "full_clone"], + array_index) + if v is None: + return v + return True if v == "0" else False + + +def flatten_enable_scsi(d, array_index): + v = navigate_value(d, ["read", "metadata", "hw:passthrough"], + array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def flatten_is_bootable(d, array_index): + v = navigate_value(d, ["read", "bootable"], array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def flatten_is_readonly(d, array_index): + v = navigate_value(d, ["read", "metadata", "readonly"], + array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def set_unreadable_options(opts, states): + states["backup_id"] = opts.get("backup_id") + + +def set_readonly_options(opts, states): + opts["attachments"] = states.get("attachments") + + opts["backup_policy_id"] = states.get("backup_policy_id") + + opts["created_at"] = states.get("created_at") + + opts["is_bootable"] = states.get("is_bootable") + + opts["is_readonly"] = states.get("is_readonly") + + opts["source_volume_id"] = states.get("source_volume_id") + + opts["status"] = states.get("status") + + opts["tags"] = states.get("tags") + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["volumes"], None) + + +def expand_list_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["encryption_id"], array_index) + r["__system__cmkid"] = v + + r["attached_mode"] = None + + v = navigate_value(d, ["enable_full_clone"], array_index) + r["full_clone"] = v + + v = navigate_value(d, ["enable_scsi"], array_index) + r["hw:passthrough"] = v + + r["policy"] = None + + r["readonly"] = None + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_volume_image_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["image_id"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def fill_list_resp_body(body): + result = dict() + + v = fill_list_resp_attachments(body.get("attachments")) + result["attachments"] = v + + result["availability_zone"] = body.get("availability_zone") + + result["bootable"] = body.get("bootable") + + result["created_at"] = body.get("created_at") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + v = fill_list_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["multiattach"] = body.get("multiattach") + + result["name"] = body.get("name") + + result["size"] = body.get("size") + + result["snapshot_id"] = body.get("snapshot_id") + + result["source_volid"] = body.get("source_volid") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata")) + result["volume_image_metadata"] = v + + result["volume_type"] = body.get("volume_type") + + return result + + +def fill_list_resp_attachments(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["attached_at"] = item.get("attached_at") + + val["attachment_id"] = item.get("attachment_id") + + val["device"] = item.get("device") + + val["server_id"] = item.get("server_id") + + result.append(val) + + return result + + +def fill_list_resp_metadata(value): + if not value: + return None + + result = dict() + + result["__system__cmkid"] = value.get("__system__cmkid") + + result["attached_mode"] = value.get("attached_mode") + + result["full_clone"] = value.get("full_clone") + + result["hw:passthrough"] = value.get("hw:passthrough") + + result["policy"] = value.get("policy") + + result["readonly"] = value.get("readonly") + + return result + + +def fill_list_resp_volume_image_metadata(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_network_vpc.py b/plugins/modules/cloud/huawei/hwc_network_vpc.py new file mode 100644 index 0000000000..f46c5fd062 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_network_vpc.py @@ -0,0 +1,497 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2018 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_network_vpc +description: + - Represents an vpc resource. +short_description: Creates a Huawei Cloud VPC +author: Huawei Inc. (@huaweicloud) +requirements: + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in vpc. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + suboptions: + create: + description: + - The timeout for create operation. + type: str + default: '15m' + update: + description: + - The timeout for update operation. + type: str + default: '15m' + delete: + description: + - The timeout for delete operation. + type: str + default: '15m' + name: + description: + - The name of vpc. + type: str + required: true + cidr: + description: + - The range of available subnets in the vpc. + type: str + required: true +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +- name: create a vpc + hwc_network_vpc: + identity_endpoint: "{{ identity_endpoint }}" + user: "{{ user }}" + password: "{{ password }}" + domain: "{{ domain }}" + project: "{{ project }}" + region: "{{ region }}" + name: "vpc_1" + cidr: "192.168.100.0/24" + state: present +''' + +RETURN = ''' + id: + description: + - the id of vpc. + type: str + returned: success + name: + description: + - the name of vpc. + type: str + returned: success + cidr: + description: + - the range of available subnets in the vpc. + type: str + returned: success + status: + description: + - the status of vpc. + type: str + returned: success + routes: + description: + - the route information. + type: complex + returned: success + contains: + destination: + description: + - the destination network segment of a route. + type: str + returned: success + next_hop: + description: + - the next hop of a route. If the route type is peering, + it will provide VPC peering connection ID. + type: str + returned: success + enable_shared_snat: + description: + - show whether the shared snat is enabled. + type: bool + returned: success +''' + +############################################################################### +# Imports +############################################################################### + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, + HwcClientException404, HwcModule, + are_different_dicts, is_empty_value, + wait_to_finish, get_region, + build_path, navigate_value) +import re + +############################################################################### +# Main +############################################################################### + + +def main(): + """Main function""" + + module = HwcModule( + argument_spec=dict( + state=dict( + default='present', choices=['present', 'absent'], type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + update=dict(default='15m', type='str'), + delete=dict(default='15m', type='str'), + ), default=dict()), + name=dict(required=True, type='str'), + cidr=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + config = Config(module, 'vpc') + + state = module.params['state'] + + if (not module.params.get("id")) and module.params.get("name"): + module.params['id'] = get_id_by_name(config) + + fetch = None + link = self_link(module) + # the link will include Nones if required format parameters are missed + if not re.search('/None/|/None$', link): + client = config.client(get_region(module), "vpc", "project") + fetch = fetch_resource(module, client, link) + if fetch: + fetch = fetch.get('vpc') + changed = False + + if fetch: + if state == 'present': + expect = _get_editable_properties(module) + current_state = response_to_hash(module, fetch) + current = {"cidr": current_state["cidr"]} + if are_different_dicts(expect, current): + if not module.check_mode: + fetch = update(config, self_link(module)) + fetch = response_to_hash(module, fetch.get('vpc')) + changed = True + else: + fetch = current_state + else: + if not module.check_mode: + delete(config, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + if not module.check_mode: + fetch = create(config, "vpcs") + fetch = response_to_hash(module, fetch.get('vpc')) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + r = None + try: + r = client.post(link, resource_to_create(module)) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error creating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_done = wait_for_operation(config, 'create', r) + v = "" + try: + v = navigate_value(wait_done, ['vpc', 'id']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + url = build_path(module, 'vpcs/{op_id}', {'op_id': v}) + return fetch_resource(module, client, url) + + +def update(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + r = None + try: + r = client.put(link, resource_to_update(module)) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error updating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_for_operation(config, 'update', r) + + return fetch_resource(module, client, link) + + +def delete(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + try: + client.delete(link) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error deleting " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_for_delete(module, client, link) + + +def fetch_resource(module, client, link): + try: + return client.get(link) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error fetching " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def get_id_by_name(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + name = module.params.get("name") + link = "vpcs" + query_link = "?marker={marker}&limit=10" + link += query_link + not_format_keys = re.findall("={marker}", link) + none_values = re.findall("=None", link) + + if not (not_format_keys or none_values): + r = None + try: + r = client.get(link) + except Exception: + pass + if r is None: + return None + r = r.get('vpcs', []) + ids = [ + i.get('id') for i in r if i.get('name', '') == name + ] + if not ids: + return None + elif len(ids) == 1: + return ids[0] + else: + module.fail_json( + msg="Multiple resources with same name are found.") + elif none_values: + module.fail_json( + msg="Can not find id by name because url includes None.") + else: + p = {'marker': ''} + ids = set() + while True: + r = None + try: + r = client.get(link.format(**p)) + except Exception: + pass + if r is None: + break + r = r.get('vpcs', []) + if r == []: + break + for i in r: + if i.get('name') == name: + ids.add(i.get('id')) + if len(ids) >= 2: + module.fail_json( + msg="Multiple resources with same name are found.") + + p['marker'] = r[-1].get('id') + + return ids.pop() if ids else None + + +def self_link(module): + return build_path(module, "vpcs/{id}") + + +def resource_to_create(module): + params = dict() + + v = module.params.get('cidr') + if not is_empty_value(v): + params["cidr"] = v + + v = module.params.get('name') + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"vpc": params} + + return params + + +def resource_to_update(module): + params = dict() + + v = module.params.get('cidr') + if not is_empty_value(v): + params["cidr"] = v + + if not params: + return params + + params = {"vpc": params} + + return params + + +def _get_editable_properties(module): + return { + "cidr": module.params.get("cidr"), + } + + +def response_to_hash(module, response): + """ Remove unnecessary properties from the response. + This is for doing comparisons with Ansible's current parameters. + """ + return { + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'cidr': response.get(u'cidr'), + u'status': response.get(u'status'), + u'routes': VpcRoutesArray( + response.get(u'routes', []), module).from_response(), + u'enable_shared_snat': response.get(u'enable_shared_snat') + } + + +def wait_for_operation(config, op_type, op_result): + module = config.module + op_id = "" + try: + op_id = navigate_value(op_result, ['vpc', 'id']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + url = build_path(module, "vpcs/{op_id}", {'op_id': op_id}) + timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m')) + states = { + 'create': { + 'allowed': ['CREATING', 'DONW', 'OK'], + 'complete': ['OK'], + }, + 'update': { + 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'], + 'complete': ['OK'], + } + } + + return wait_for_completion(url, timeout, states[op_type]['allowed'], + states[op_type]['complete'], config) + + +def wait_for_completion(op_uri, timeout, allowed_states, + complete_states, config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + def _refresh_status(): + r = None + try: + r = fetch_resource(module, client, op_uri) + except Exception: + return None, "" + + status = "" + try: + status = navigate_value(r, ['vpc', 'status']) + except Exception: + return None, "" + + return r, status + + try: + return wait_to_finish(complete_states, allowed_states, + _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def wait_for_delete(module, client, link): + + def _refresh_status(): + try: + client.get(link) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + try: + return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +class VpcRoutesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return { + u'destination': item.get('destination'), + u'nexthop': item.get('next_hop') + } + + def _response_from_item(self, item): + return { + u'destination': item.get(u'destination'), + u'next_hop': item.get(u'nexthop') + } + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_smn_topic.py b/plugins/modules/cloud/huawei/hwc_smn_topic.py new file mode 100644 index 0000000000..952c939ab5 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_smn_topic.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_smn_topic +description: + - Represents a SMN notification topic resource. +short_description: Creates a resource of SMNTopic in Huaweicloud Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huaweicloud Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + display_name: + description: + - Topic display name, which is presented as the name of the email + sender in an email message. The topic display name contains a + maximum of 192 bytes. + type: str + required: false + name: + description: + - Name of the topic to be created. The topic name is a string of 1 + to 256 characters. It must contain upper- or lower-case letters, + digits, hyphens (-), and underscores C(_), and must start with a + letter or digit. + type: str + required: true +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +- name: create a smn topic + hwc_smn_topic: + identity_endpoint: "{{ identity_endpoint }}" + user_name: "{{ user_name }}" + password: "{{ password }}" + domain_name: "{{ domain_name }}" + project_name: "{{ project_name }}" + region: "{{ region }}" + name: "ansible_smn_topic_test" + state: present +''' + +RETURN = ''' +create_time: + description: + - Time when the topic was created. + returned: success + type: str +display_name: + description: + - Topic display name, which is presented as the name of the email + sender in an email message. The topic display name contains a + maximum of 192 bytes. + returned: success + type: str +name: + description: + - Name of the topic to be created. The topic name is a string of 1 + to 256 characters. It must contain upper- or lower-case letters, + digits, hyphens (-), and underscores C(_), and must start with a + letter or digit. + returned: success + type: str +push_policy: + description: + - Message pushing policy. 0 indicates that the message sending + fails and the message is cached in the queue. 1 indicates that + the failed message is discarded. + returned: success + type: int +topic_urn: + description: + - Resource identifier of a topic, which is unique. + returned: success + type: str +update_time: + description: + - Time when the topic was updated. + returned: success + type: str +''' + +############################################################################### +# Imports +############################################################################### + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, + HwcModule, navigate_value, + are_different_dicts, is_empty_value, + build_path, get_region) +import re + +############################################################################### +# Main +############################################################################### + + +def main(): + """Main function""" + + module = HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + display_name=dict(type='str'), + name=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + + config = Config(module, "smn") + + state = module.params['state'] + + if not module.params.get("id"): + module.params['id'] = get_resource_id(config) + + fetch = None + link = self_link(module) + # the link will include Nones if required format parameters are missed + if not re.search('/None/|/None$', link): + client = config.client(get_region(module), "smn", "project") + fetch = fetch_resource(module, client, link) + changed = False + + if fetch: + if state == 'present': + expect = _get_resource_editable_properties(module) + current_state = response_to_hash(module, fetch) + current = {'display_name': current_state['display_name']} + if are_different_dicts(expect, current): + if not module.check_mode: + fetch = update(config) + fetch = response_to_hash(module, fetch) + changed = True + else: + fetch = current_state + else: + if not module.check_mode: + delete(config) + fetch = {} + changed = True + else: + if state == 'present': + if not module.check_mode: + fetch = create(config) + fetch = response_to_hash(module, fetch) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = "notifications/topics" + r = None + try: + r = client.post(link, create_resource_opts(module)) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error creating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + return get_resource(config, r) + + +def update(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = self_link(module) + try: + client.put(link, update_resource_opts(module)) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error updating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + return fetch_resource(module, client, link) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = self_link(module) + try: + client.delete(link) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error deleting " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def fetch_resource(module, client, link): + try: + return client.get(link) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error fetching " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def get_resource(config, result): + module = config.module + client = config.client(get_region(module), "smn", "project") + + v = "" + try: + v = navigate_value(result, ['topic_urn']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + d = {'topic_urn': v} + url = build_path(module, 'notifications/topics/{topic_urn}', d) + + return fetch_resource(module, client, url) + + +def get_resource_id(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = "notifications/topics" + query_link = "?offset={offset}&limit=10" + link += query_link + + p = {'offset': 0} + v = module.params.get('name') + ids = set() + while True: + r = None + try: + r = client.get(link.format(**p)) + except Exception: + pass + if r is None: + break + r = r.get('topics', []) + if r == []: + break + for i in r: + if i.get('name') == v: + ids.add(i.get('topic_urn')) + if len(ids) >= 2: + module.fail_json(msg="Multiple resources are found") + + p['offset'] += 1 + + return ids.pop() if ids else None + + +def self_link(module): + return build_path(module, "notifications/topics/{id}") + + +def create_resource_opts(module): + params = dict() + + v = module.params.get('display_name') + if not is_empty_value(v): + params["display_name"] = v + + v = module.params.get('name') + if not is_empty_value(v): + params["name"] = v + + return params + + +def update_resource_opts(module): + params = dict() + + v = module.params.get('display_name') + if not is_empty_value(v): + params["display_name"] = v + + return params + + +def _get_resource_editable_properties(module): + return { + "display_name": module.params.get("display_name"), + } + + +def response_to_hash(module, response): + """Remove unnecessary properties from the response. + This is for doing comparisons with Ansible's current parameters. + """ + return { + u'create_time': response.get(u'create_time'), + u'display_name': response.get(u'display_name'), + u'name': response.get(u'name'), + u'push_policy': _push_policy_convert_from_response( + response.get('push_policy')), + u'topic_urn': response.get(u'topic_urn'), + u'update_time': response.get(u'update_time') + } + + +def _push_policy_convert_from_response(value): + return { + 0: "the message sending fails and is cached in the queue", + 1: "the failed message is discarded", + }.get(int(value)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_eip.py b/plugins/modules/cloud/huawei/hwc_vpc_eip.py new file mode 100644 index 0000000000..210f04e3e4 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_eip.py @@ -0,0 +1,880 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_eip +description: + - elastic ip management. +short_description: Creates a resource of Vpc/EIP in Huawei Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '5m' + update: + description: + - The timeouts for update operation. + type: str + default: '5m' + type: + description: + - Specifies the EIP type. + type: str + required: true + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + required: false + suboptions: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or + by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character + string, default value bandwidth is used. For IPv6 + addresses, the default parameter value is bandwidth + outside China and is traffic in China. + type: str + required: true + name: + description: + - Specifies the bandwidth name. The value is a string of 1 + to 64 characters that can contain letters, digits, + underscores C(_), hyphens (-), and periods (.). + type: str + required: true + size: + description: + - Specifies the bandwidth size. The value ranges from 1 + Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You + can see the bandwidth range of each region on the + management console.) The minimum unit for bandwidth + adjustment varies depending on the bandwidth range. The + details are as follows. + - The minimum unit is 1 Mbit/s if the allowed bandwidth + size ranges from 0 to 300 Mbit/s (with 300 Mbit/s + included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth + size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth + size is greater than 1000 Mbit/s. + type: int + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + required: false + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this + parameter is left blank, an IPv4 address will be assigned. + type: int + required: false + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns + an EIP if you do not specify it. + type: str + required: false + port_id: + description: + - Specifies the port ID. This parameter is returned only when a + private IP address is bound with the EIP. + type: str + required: false + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create an eip and bind it to a port +- name: create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: True + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: create a port + hwc_vpc_port: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" + register: port +- name: create an eip and bind it to a port + hwc_vpc_eip: + type: "5_bgp" + dedicated_bandwidth: + charge_mode: "traffic" + name: "ansible_test_dedicated_bandwidth" + size: 1 + port_id: "{{ port.id }}" +''' + +RETURN = ''' + type: + description: + - Specifies the EIP type. + type: str + returned: success + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + returned: success + contains: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or + by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character + string, default value bandwidth is used. For IPv6 + addresses, the default parameter value is bandwidth + outside China and is traffic in China. + type: str + returned: success + name: + description: + - Specifies the bandwidth name. The value is a string of 1 + to 64 characters that can contain letters, digits, + underscores C(_), hyphens (-), and periods (.). + type: str + returned: success + size: + description: + - Specifies the bandwidth size. The value ranges from 1 + Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You + can see the bandwidth range of each region on the + management console.) The minimum unit for bandwidth + adjustment varies depending on the bandwidth range. The + details are as follows:. + - The minimum unit is 1 Mbit/s if the allowed bandwidth + size ranges from 0 to 300 Mbit/s (with 300 Mbit/s + included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth + size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth + size is greater than 1000 Mbit/s. + type: int + returned: success + id: + description: + - Specifies the ID of dedicated bandwidth. + type: str + returned: success + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + returned: success + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this + parameter is left blank, an IPv4 address will be assigned. + type: int + returned: success + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns + an EIP if you do not specify it. + type: str + returned: success + port_id: + description: + - Specifies the port ID. This parameter is returned only when a + private IP address is bound with the EIP. + type: str + returned: success + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + returned: success + create_time: + description: + - Specifies the time (UTC time) when the EIP was assigned. + type: str + returned: success + ipv6_address: + description: + - Specifies the obtained IPv6 EIP. + type: str + returned: success + private_ip_address: + description: + - Specifies the private IP address bound with the EIP. This + parameter is returned only when a private IP address is bound + with the EIP. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='5m', type='str'), + update=dict(default='5m', type='str'), + ), default=dict()), + type=dict(type='str', required=True), + dedicated_bandwidth=dict(type='dict', options=dict( + charge_mode=dict(type='str', required=True), + name=dict(type='str', required=True), + size=dict(type='int', required=True) + )), + enterprise_project_id=dict(type='str'), + ip_version=dict(type='int'), + ipv4_address=dict(type='str'), + port_id=dict(type='str'), + shared_bandwidth_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "dedicated_bandwidth": module.params.get("dedicated_bandwidth"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "ip_version": module.params.get("ip_version"), + "ipv4_address": module.params.get("ipv4_address"), + "port_id": module.params.get("port_id"), + "shared_bandwidth_id": module.params.get("shared_bandwidth_id"), + "type": module.params.get("type"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["publicip", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + r = send_update_request(module, params, client) + async_wait_update(config, r, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + if module.params["port_id"]: + module.params["port_id"] = "" + update(config) + + send_delete_request(module, None, client) + + url = build_path(module, "publicips/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["ip_version"]) + if v: + query_params.append("ip_version=" + str(v)) + + v = navigate_value(opts, ["enterprise_project_id"]) + if v: + query_params.append("enterprise_project_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "publicips" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = expand_create_bandwidth(opts, None) + if not is_empty_value(v): + params["bandwidth"] = v + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = expand_create_publicip(opts, None) + if not is_empty_value(v): + params["publicip"] = v + + return params + + +def expand_create_bandwidth(d, array_index): + v = navigate_value(d, ["dedicated_bandwidth"], array_index) + sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) + if v and sbwid: + raise Exception("don't input shared_bandwidth_id and " + "dedicated_bandwidth at same time") + + if not (v or sbwid): + raise Exception("must input shared_bandwidth_id or " + "dedicated_bandwidth") + + if sbwid: + return { + "id": sbwid, + "share_type": "WHOLE"} + + return { + "charge_mode": v["charge_mode"], + "name": v["name"], + "share_type": "PER", + "size": v["size"]} + + +def expand_create_publicip(d, array_index): + r = dict() + + v = navigate_value(d, ["ipv4_address"], array_index) + if not is_empty_value(v): + r["ip_address"] = v + + v = navigate_value(d, ["ip_version"], array_index) + if not is_empty_value(v): + r["ip_version"] = v + + v = navigate_value(d, ["type"], array_index) + if not is_empty_value(v): + r["type"] = v + + return r + + +def send_create_request(module, params, client): + url = "publicips" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "publicip_id": ["publicip", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "publicips/{publicip_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["publicip", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + None, + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["ip_version"], None) + if not is_empty_value(v): + params["ip_version"] = v + + v = navigate_value(opts, ["port_id"], None) + if v is not None: + params["port_id"] = v + + if not params: + return params + + params = {"publicip": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "publicips/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_update(config, result, client, timeout): + module = config.module + + url = build_path(module, "publicips/{id}") + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["publicip", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + None, + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(update) to " + "be done, error= %s" % str(ex)) + + +def send_delete_request(module, params, client): + url = build_path(module, "publicips/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "publicips/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["publicip"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["bandwidth_id"] = body.get("bandwidth_id") + + result["bandwidth_name"] = body.get("bandwidth_name") + + result["bandwidth_share_type"] = body.get("bandwidth_share_type") + + result["bandwidth_size"] = body.get("bandwidth_size") + + result["create_time"] = body.get("create_time") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["ip_version"] = body.get("ip_version") + + result["port_id"] = body.get("port_id") + + result["private_ip_address"] = body.get("private_ip_address") + + result["public_ip_address"] = body.get("public_ip_address") + + result["public_ipv6_address"] = body.get("public_ipv6_address") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + result["type"] = body.get("type") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + if not exclude_output: + v = navigate_value(response, ["read", "create_time"], array_index) + r["create_time"] = v + + v = r.get("dedicated_bandwidth") + v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output) + r["dedicated_bandwidth"] = v + + v = navigate_value(response, ["read", "enterprise_project_id"], + array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "ip_version"], array_index) + r["ip_version"] = v + + v = navigate_value(response, ["read", "public_ip_address"], array_index) + r["ipv4_address"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "public_ipv6_address"], + array_index) + r["ipv6_address"] = v + + v = navigate_value(response, ["read", "port_id"], array_index) + r["port_id"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "private_ip_address"], + array_index) + r["private_ip_address"] = v + + v = r.get("shared_bandwidth_id") + v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output) + r["shared_bandwidth_id"] = v + + v = navigate_value(response, ["read", "type"], array_index) + r["type"] = v + + return r + + +def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output): + v = navigate_value(d, ["read", "bandwidth_share_type"], array_index) + if not (v and v == "PER"): + return current_value + + result = current_value + if not result: + result = dict() + + if not exclude_output: + v = navigate_value(d, ["read", "bandwidth_id"], array_index) + if v is not None: + result["id"] = v + + v = navigate_value(d, ["read", "bandwidth_name"], array_index) + if v is not None: + result["name"] = v + + v = navigate_value(d, ["read", "bandwidth_size"], array_index) + if v is not None: + result["size"] = v + + return result if result else current_value + + +def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output): + v = navigate_value(d, ["read", "bandwidth_id"], array_index) + + v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index) + + return v if (v1 and v1 == "WHOLE") else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["publicips"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = expand_list_bandwidth_id(all_opts, None) + result["bandwidth_id"] = v + + v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None) + result["bandwidth_name"] = v + + result["bandwidth_share_type"] = None + + v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None) + result["bandwidth_size"] = v + + result["create_time"] = None + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["ip_version"], None) + result["ip_version"] = v + + v = navigate_value(all_opts, ["port_id"], None) + result["port_id"] = v + + result["private_ip_address"] = None + + v = navigate_value(all_opts, ["ipv4_address"], None) + result["public_ip_address"] = v + + result["public_ipv6_address"] = None + + result["status"] = None + + result["tenant_id"] = None + + v = navigate_value(all_opts, ["type"], None) + result["type"] = v + + return result + + +def expand_list_bandwidth_id(d, array_index): + v = navigate_value(d, ["dedicated_bandwidth"], array_index) + sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) + if v and sbwid: + raise Exception("don't input shared_bandwidth_id and " + "dedicated_bandwidth at same time") + + return sbwid + + +def fill_list_resp_body(body): + result = dict() + + result["bandwidth_id"] = body.get("bandwidth_id") + + result["bandwidth_name"] = body.get("bandwidth_name") + + result["bandwidth_share_type"] = body.get("bandwidth_share_type") + + result["bandwidth_size"] = body.get("bandwidth_size") + + result["create_time"] = body.get("create_time") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["ip_version"] = body.get("ip_version") + + result["port_id"] = body.get("port_id") + + result["private_ip_address"] = body.get("private_ip_address") + + result["public_ip_address"] = body.get("public_ip_address") + + result["public_ipv6_address"] = body.get("public_ipv6_address") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + result["type"] = body.get("type") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py b/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py new file mode 100644 index 0000000000..842265688c --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py @@ -0,0 +1,694 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_peering_connect +description: + - vpc peering management. +short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + required: true + name: + description: + - Specifies the name of the VPC peering connection. The value can + contain 1 to 64 characters. + type: str + required: true + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + required: true + suboptions: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + required: true + project_id: + description: + - Specifies the ID of the project which the peering vpc + belongs to. + type: str + required: false + description: + description: + - The description of vpc peering connection. + type: str + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create a peering connect +- name: create a local vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_local" + register: vpc1 +- name: create a peering vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_peering" + register: vpc2 +- name: create a peering connect + hwc_vpc_peering_connect: + local_vpc_id: "{{ vpc1.id }}" + name: "ansible_network_peering_test" + peering_vpc: + vpc_id: "{{ vpc2.id }}" +''' + +RETURN = ''' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + returned: success + name: + description: + - Specifies the name of the VPC peering connection. The value can + contain 1 to 64 characters. + type: str + returned: success + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + returned: success + contains: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + returned: success + project_id: + description: + - Specifies the ID of the project which the peering vpc + belongs to. + type: str + returned: success + description: + description: + - The description of vpc peering connection. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + ), default=dict()), + local_vpc_id=dict(type='str', required=True), + name=dict(type='str', required=True), + peering_vpc=dict(type='dict', required=True, options=dict( + vpc_id=dict(type='str', required=True), + project_id=dict(type='str') + )), + description=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "description": module.params.get("description"), + "local_vpc_id": module.params.get("local_vpc_id"), + "name": module.params.get("name"), + "peering_vpc": module.params.get("peering_vpc"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "network", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["peering", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + send_update_request(module, params, client) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "network", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "v2.0/vpc/peerings/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_peering_connect): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "network", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["local_vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + v = navigate_value(opts, ["name"]) + if v: + query_params.append("name=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "v2.0/vpc/peerings" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = expand_create_accept_vpc_info(opts, None) + if not is_empty_value(v): + params["accept_vpc_info"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_request_vpc_info(opts, None) + if not is_empty_value(v): + params["request_vpc_info"] = v + + if not params: + return params + + params = {"peering": params} + + return params + + +def expand_create_accept_vpc_info(d, array_index): + r = dict() + + v = navigate_value(d, ["peering_vpc", "project_id"], array_index) + if not is_empty_value(v): + r["tenant_id"] = v + + v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) + if not is_empty_value(v): + r["vpc_id"] = v + + return r + + +def expand_create_request_vpc_info(d, array_index): + r = dict() + + r["tenant_id"] = "" + + v = navigate_value(d, ["local_vpc_id"], array_index) + if not is_empty_value(v): + r["vpc_id"] = v + + return r + + +def send_create_request(module, params, client): + url = "v2.0/vpc/peerings" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "peering_id": ["peering", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["peering", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["PENDING_ACCEPTANCE"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_peering_connect): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"peering": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["peering"], None) + + +def fill_read_resp_body(body): + result = dict() + + v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info")) + result["accept_vpc_info"] = v + + result["description"] = body.get("description") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_read_resp_request_vpc_info(body.get("request_vpc_info")) + result["request_vpc_info"] = v + + result["status"] = body.get("status") + + return result + + +def fill_read_resp_accept_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_read_resp_request_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"], + array_index) + r["local_vpc_id"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = r.get("peering_vpc") + v = flatten_peering_vpc(response, array_index, v, exclude_output) + r["peering_vpc"] = v + + return r + + +def flatten_peering_vpc(d, array_index, current_value, exclude_output): + result = current_value + has_init_value = True + if not result: + result = dict() + has_init_value = False + + v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"], + array_index) + result["project_id"] = v + + v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index) + result["vpc_id"] = v + + if has_init_value: + return result + + for v in result.values(): + if v is not None: + return result + return current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["peerings"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = expand_list_accept_vpc_info(all_opts, None) + result["accept_vpc_info"] = v + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + v = expand_list_request_vpc_info(all_opts, None) + result["request_vpc_info"] = v + + result["status"] = None + + return result + + +def expand_list_accept_vpc_info(d, array_index): + r = dict() + + v = navigate_value(d, ["peering_vpc", "project_id"], array_index) + r["tenant_id"] = v + + v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_request_vpc_info(d, array_index): + r = dict() + + r["tenant_id"] = None + + v = navigate_value(d, ["local_vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def fill_list_resp_body(body): + result = dict() + + v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info")) + result["accept_vpc_info"] = v + + result["description"] = body.get("description") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_list_resp_request_vpc_info(body.get("request_vpc_info")) + result["request_vpc_info"] = v + + result["status"] = body.get("status") + + return result + + +def fill_list_resp_accept_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_list_resp_request_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_port.py b/plugins/modules/cloud/huawei/hwc_vpc_port.py new file mode 100644 index 0000000000..bcd6b98d34 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_port.py @@ -0,0 +1,1160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_port +description: + - vpc port management. +short_description: Creates a resource of Vpc/Port in Huawei Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + required: true + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + required: false + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + required: false + type: list + suboptions: + ip_address: + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. + Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured + for parameter allowed_address_pairs. + type: str + required: false + mac_address: + description: + - Specifies the MAC address. + type: str + required: false + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + required: false + suboptions: + name: + description: + - Specifies the option name. + type: str + required: false + value: + description: + - Specifies the option value. + type: str + required: false + ip_address: + description: + - Specifies the port IP address. + type: str + required: false + name: + description: + - Specifies the port name. The value can contain no more than 255 + characters. + type: str + required: false + security_groups: + description: + - Specifies the ID of the security group. + type: list + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create a port +- name: create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: True + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: create a port + hwc_vpc_port: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" +''' + +RETURN = ''' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + returned: success + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + returned: success + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. + Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured + for parameter allowed_address_pairs. + type: str + returned: success + mac_address: + description: + - Specifies the MAC address. + type: str + returned: success + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + returned: success + contains: + name: + description: + - Specifies the option name. + type: str + returned: success + value: + description: + - Specifies the option value. + type: str + returned: success + ip_address: + description: + - Specifies the port IP address. + type: str + returned: success + name: + description: + - Specifies the port name. The value can contain no more than 255 + characters. + type: str + returned: success + security_groups: + description: + - Specifies the ID of the security group. + type: list + returned: success + mac_address: + description: + - Specifies the port MAC address. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + ), default=dict()), + subnet_id=dict(type='str', required=True), + admin_state_up=dict(type='bool'), + allowed_address_pairs=dict( + type='list', elements='dict', + options=dict( + ip_address=dict(type='str'), + mac_address=dict(type='str') + ), + ), + extra_dhcp_opts=dict(type='list', elements='dict', options=dict( + name=dict(type='str'), + value=dict(type='str') + )), + ip_address=dict(type='str'), + name=dict(type='str'), + security_groups=dict(type='list', elements='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "admin_state_up": module.params.get("admin_state_up"), + "allowed_address_pairs": module.params.get("allowed_address_pairs"), + "extra_dhcp_opts": module.params.get("extra_dhcp_opts"), + "ip_address": module.params.get("ip_address"), + "name": module.params.get("name"), + "security_groups": module.params.get("security_groups"), + "subnet_id": module.params.get("subnet_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["port", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + send_update_request(module, params, client) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "ports/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_port): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + array_index = { + "read.fixed_ips": 0, + } + + return update_properties(module, res, array_index, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["subnet_id"]) + if v: + query_params.append("network_id=" + str(v)) + + v = navigate_value(opts, ["name"]) + if v: + query_params.append("name=" + str(v)) + + v = navigate_value(opts, ["admin_state_up"]) + if v: + query_params.append("admin_state_up=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "ports" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["admin_state_up"], None) + if not is_empty_value(v): + params["admin_state_up"] = v + + v = expand_create_allowed_address_pairs(opts, None) + if not is_empty_value(v): + params["allowed_address_pairs"] = v + + v = expand_create_extra_dhcp_opts(opts, None) + if not is_empty_value(v): + params["extra_dhcp_opts"] = v + + v = expand_create_fixed_ips(opts, None) + if not is_empty_value(v): + params["fixed_ips"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["subnet_id"], None) + if not is_empty_value(v): + params["network_id"] = v + + v = navigate_value(opts, ["security_groups"], None) + if not is_empty_value(v): + params["security_groups"] = v + + if not params: + return params + + params = {"port": params} + + return params + + +def expand_create_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + if not is_empty_value(v): + transformed["mac_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + if not is_empty_value(v): + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + if not is_empty_value(v): + transformed["opt_value"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_fixed_ips(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = navigate_value(d, ["ip_address"], new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def send_create_request(module, params, client): + url = "ports" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "port_id": ["port", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "ports/{port_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["port", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + ["BUILD"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_port): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = expand_update_allowed_address_pairs(opts, None) + if v is not None: + params["allowed_address_pairs"] = v + + v = expand_update_extra_dhcp_opts(opts, None) + if v is not None: + params["extra_dhcp_opts"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["security_groups"], None) + if not is_empty_value(v): + params["security_groups"] = v + + if not params: + return params + + params = {"port": params} + + return params + + +def expand_update_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + if not is_empty_value(v): + transformed["mac_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_update_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + if not is_empty_value(v): + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + if not is_empty_value(v): + transformed["opt_value"] = v + + if transformed: + req.append(transformed) + + return req + + +def send_update_request(module, params, client): + url = build_path(module, "ports/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "ports/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "ports/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["port"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["admin_state_up"] = body.get("admin_state_up") + + v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs")) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = body.get("binding_host_id") + + result["binding_vnic_type"] = body.get("binding_vnic_type") + + result["device_id"] = body.get("device_id") + + result["device_owner"] = body.get("device_owner") + + result["dns_name"] = body.get("dns_name") + + v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) + result["extra_dhcp_opts"] = v + + v = fill_read_resp_fixed_ips(body.get("fixed_ips")) + result["fixed_ips"] = v + + result["id"] = body.get("id") + + result["mac_address"] = body.get("mac_address") + + result["name"] = body.get("name") + + result["network_id"] = body.get("network_id") + + result["security_groups"] = body.get("security_groups") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + return result + + +def fill_read_resp_allowed_address_pairs(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + val["mac_address"] = item.get("mac_address") + + result.append(val) + + return result + + +def fill_read_resp_extra_dhcp_opts(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["opt_name"] = item.get("opt_name") + + val["opt_value"] = item.get("opt_value") + + result.append(val) + + return result + + +def fill_read_resp_fixed_ips(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + result.append(val) + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "admin_state_up"], array_index) + r["admin_state_up"] = v + + v = r.get("allowed_address_pairs") + v = flatten_allowed_address_pairs(response, array_index, v, exclude_output) + r["allowed_address_pairs"] = v + + v = r.get("extra_dhcp_opts") + v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output) + r["extra_dhcp_opts"] = v + + v = navigate_value(response, ["read", "fixed_ips", "ip_address"], + array_index) + r["ip_address"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "mac_address"], array_index) + r["mac_address"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "security_groups"], array_index) + r["security_groups"] = v + + v = navigate_value(response, ["read", "network_id"], array_index) + r["subnet_id"] = v + + return r + + +def flatten_allowed_address_pairs(d, array_index, + current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "allowed_address_pairs"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.allowed_address_pairs"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"], + new_array_index) + val["ip_address"] = v + + v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"], + new_array_index) + val["mac_address"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "extra_dhcp_opts"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.extra_dhcp_opts"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"], + new_array_index) + val["name"] = v + + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"], + new_array_index) + val["value"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["ports"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["admin_state_up"], None) + result["admin_state_up"] = v + + v = expand_list_allowed_address_pairs(all_opts, None) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = None + + result["binding_vnic_type"] = None + + result["device_id"] = None + + result["device_owner"] = None + + result["dns_name"] = None + + v = expand_list_extra_dhcp_opts(all_opts, None) + result["extra_dhcp_opts"] = v + + v = expand_list_fixed_ips(all_opts, None) + result["fixed_ips"] = v + + result["id"] = None + + result["mac_address"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + v = navigate_value(all_opts, ["subnet_id"], None) + result["network_id"] = v + + v = navigate_value(all_opts, ["security_groups"], None) + result["security_groups"] = v + + result["status"] = None + + result["tenant_id"] = None + + return result + + +def expand_list_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + + n = len(v) if v else 1 + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + transformed["mac_address"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def expand_list_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + + n = len(v) if v else 1 + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + transformed["opt_value"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def expand_list_fixed_ips(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = navigate_value(d, ["ip_address"], new_array_index) + transformed["ip_address"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def fill_list_resp_body(body): + result = dict() + + result["admin_state_up"] = body.get("admin_state_up") + + v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs")) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = body.get("binding_host_id") + + result["binding_vnic_type"] = body.get("binding_vnic_type") + + result["device_id"] = body.get("device_id") + + result["device_owner"] = body.get("device_owner") + + result["dns_name"] = body.get("dns_name") + + v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) + result["extra_dhcp_opts"] = v + + v = fill_list_resp_fixed_ips(body.get("fixed_ips")) + result["fixed_ips"] = v + + result["id"] = body.get("id") + + result["mac_address"] = body.get("mac_address") + + result["name"] = body.get("name") + + result["network_id"] = body.get("network_id") + + result["security_groups"] = body.get("security_groups") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + return result + + +def fill_list_resp_allowed_address_pairs(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + val["mac_address"] = item.get("mac_address") + + result.append(val) + + return result + + +def fill_list_resp_extra_dhcp_opts(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["opt_name"] = item.get("opt_name") + + val["opt_value"] = item.get("opt_value") + + result.append(val) + + return result + + +def fill_list_resp_fixed_ips(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + result.append(val) + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py b/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py new file mode 100644 index 0000000000..0bfb2e1c0c --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_private_ip +description: + - vpc private ip management. +short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection. + - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are + assigned. Cannot be changed after creating the private ip. + type: str + required: true + ip_address: + description: + - Specifies the target IP address. The value can be an available IP + address in the subnet. If it is not specified, the system + automatically assigns an IP address. Cannot be changed after + creating the private ip. + type: str + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create a private ip +- name: create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: True + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: create a private ip + hwc_vpc_private_ip: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" +''' + +RETURN = ''' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are + assigned. + type: str + returned: success + ip_address: + description: + - Specifies the target IP address. The value can be an available IP + address in the subnet. If it is not specified, the system + automatically assigns an IP address. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + subnet_id=dict(type='str', required=True), + ip_address=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + raise Exception( + "Cannot change option from (%s) to (%s)of an" + " existing resource.(%s)" % (current, expect, module.params.get('id'))) + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "ip_address": module.params.get("ip_address"), + "subnet_id": module.params.get("subnet_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["privateips", "id"], + {"privateips": 0}) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = build_path(module, "subnets/{subnet_id}/privateips") + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["ip_address"], None) + if not is_empty_value(v): + params["ip_address"] = v + + v = navigate_value(opts, ["subnet_id"], None) + if not is_empty_value(v): + params["subnet_id"] = v + + if not params: + return params + + params = {"privateips": [params]} + + return params + + +def send_create_request(module, params, client): + url = "privateips" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "privateips/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "privateips/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["privateip"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["id"] = body.get("id") + + result["ip_address"] = body.get("ip_address") + + result["subnet_id"] = body.get("subnet_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "ip_address"], array_index) + r["ip_address"] = v + + v = navigate_value(response, ["read", "subnet_id"], array_index) + r["subnet_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["privateips"], None) + + +def _build_identity_object(all_opts): + result = dict() + + result["id"] = None + + v = navigate_value(all_opts, ["ip_address"], None) + result["ip_address"] = v + + v = navigate_value(all_opts, ["subnet_id"], None) + result["subnet_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["id"] = body.get("id") + + result["ip_address"] = body.get("ip_address") + + result["subnet_id"] = body.get("subnet_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_route.py b/plugins/modules/cloud/huawei/hwc_vpc_route.py new file mode 100644 index 0000000000..9b4e2de484 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_route.py @@ -0,0 +1,440 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_route +description: + - vpc route management. +short_description: Creates a resource of Vpc/Route in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection. + - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + required: true + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + required: true + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + required: true + type: + description: + - Specifies the type of route. + type: str + required: false + default: 'peering' +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create a peering connect +- name: create a local vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_local" + register: vpc1 +- name: create a peering vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_peering" + register: vpc2 +- name: create a peering connect + hwc_vpc_peering_connect: + local_vpc_id: "{{ vpc1.id }}" + name: "ansible_network_peering_test" + filters: + - "name" + peering_vpc: + vpc_id: "{{ vpc2.id }}" + register: connect +- name: create a route + hwc_vpc_route: + vpc_id: "{{ vpc1.id }}" + destination: "192.168.0.0/16" + next_hop: "{{ connect.id }}" +''' + +RETURN = ''' + id: + description: + - UUID of the route. + type: str + returned: success + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + returned: success + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + returned: success + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + returned: success + type: + description: + - Specifies the type of route. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + destination=dict(type='str', required=True), + next_hop=dict(type='str', required=True), + vpc_id=dict(type='str', required=True), + type=dict(type='str', default='peering'), + id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get("id"): + resource = get_resource_by_id(config) + if module.params['state'] == 'present': + opts = user_input_parameters(module) + if are_different_dicts(resource, opts): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing route.(%s)" % (resource, opts, + config.module.params.get( + 'id'))) + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = update_properties(module, {"read": v[0]}, None) + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + resource = create(config) + changed = True + + result = resource + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "destination": module.params.get("destination"), + "next_hop": module.params.get("next_hop"), + "type": module.params.get("type"), + "vpc_id": module.params.get("vpc_id"), + "id": module.params.get("id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["route", "id"]) + + result = update_properties(module, {"read": fill_resp_body(r)}, None) + return result + + +def delete(config): + module = config.module + client = config.client(get_region(module), "network", "project") + + send_delete_request(module, None, client) + + +def get_resource_by_id(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "network", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_resp_body(r) + + result = update_properties(module, res, None, exclude_output) + return result + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["type"]) + if v: + query_params.append("type=" + str(v)) + + v = navigate_value(opts, ["destination"]) + if v: + query_params.append("destination=" + str(v)) + + v = navigate_value(opts, ["vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "v2.0/vpc/routes" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["destination"], None) + if not is_empty_value(v): + params["destination"] = v + + v = navigate_value(opts, ["next_hop"], None) + if not is_empty_value(v): + params["nexthop"] = v + + v = navigate_value(opts, ["type"], None) + if not is_empty_value(v): + params["type"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"route": params} + + return params + + +def send_create_request(module, params, client): + url = "v2.0/vpc/routes" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "v2.0/vpc/routes/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "v2.0/vpc/routes/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["route"], None) + + +def fill_resp_body(body): + result = dict() + + result["destination"] = body.get("destination") + + result["id"] = body.get("id") + + result["nexthop"] = body.get("nexthop") + + result["type"] = body.get("type") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "destination"], array_index) + r["destination"] = v + + v = navigate_value(response, ["read", "nexthop"], array_index) + r["next_hop"] = v + + v = navigate_value(response, ["read", "type"], array_index) + r["type"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + v = navigate_value(response, ["read", "id"], array_index) + r["id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["routes"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["destination"], None) + result["destination"] = v + + v = navigate_value(all_opts, ["id"], None) + result["id"] = v + + v = navigate_value(all_opts, ["next_hop"], None) + result["nexthop"] = v + + v = navigate_value(all_opts, ["type"], None) + result["type"] = v + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["destination"] = body.get("destination") + + result["id"] = body.get("id") + + result["nexthop"] = body.get("nexthop") + + result["type"] = body.get("type") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_security_group.py b/plugins/modules/cloud/huawei/hwc_vpc_security_group.py new file mode 100644 index 0000000000..fd106f958f --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_security_group.py @@ -0,0 +1,648 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_security_group +description: + - vpc security group management. +short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over I(name), + I(enterprise_project_id) and I(vpc_id) for security group selection. + - I(name), I(enterprise_project_id) and I(vpc_id) are used for security + group selection. If more than one security group with this options exists, + execution is aborted. + - No parameter support updating. If one of option is changed, the module + will create a new resource. +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Specifies the security group name. The value is a string of 1 to + 64 characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security + group, associate the enterprise project ID with the security + group.s + type: str + required: false + default: 0 + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group + belongs. + type: str + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create a security group +- name: create a security group + hwc_vpc_security_group: + name: "ansible_network_security_group_test" +''' + +RETURN = ''' + name: + description: + - Specifies the security group name. The value is a string of 1 to + 64 characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + returned: success + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security + group, associate the enterprise project ID with the security + group. + type: str + returned: success + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group + belongs. + type: str + returned: success + rules: + description: + - Specifies the security group rule, which ensures that resources + in the security group can communicate with one another. + type: complex + returned: success + contains: + description: + description: + - Provides supplementary information about the security + group rule. + type: str + returned: success + direction: + description: + - Specifies the direction of access control. The value can + be egress or ingress. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 + or IPv6. + type: str + returned: success + id: + description: + - Specifies the security group rule ID. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to + 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value + indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 + to 65535. The value cannot be greater than the + port_range_max value. An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, + udp, or others. If the parameter is left blank, the + security group supports all protocols. + type: str + returned: success + remote_address_group_id: + description: + - Specifies the ID of remote IP address group. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control + direction is set to egress, the parameter specifies the + source IP address. If the access control direction is set + to ingress, the parameter specifies the destination IP + address. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + name=dict(type='str', required=True), + enterprise_project_id=dict(type='str'), + vpc_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get("id"): + resource = read_resource(config) + if module.params['state'] == 'present': + check_resource_option(resource, module) + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = update_properties(module, {"read": v[0]}, None) + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + resource = create(config) + changed = True + + result = resource + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "enterprise_project_id": module.params.get("enterprise_project_id"), + "name": module.params.get("name"), + "vpc_id": module.params.get("vpc_id"), + "id": module.params.get("id"), + } + + +def check_resource_option(resource, module): + opts = user_input_parameters(module) + + resource = { + "enterprise_project_id": resource.get("enterprise_project_id"), + "name": resource.get("name"), + "vpc_id": resource.get("vpc_id"), + "id": resource.get("id"), + } + + if are_different_dicts(resource, opts): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing security group(%s)." % (resource, opts, + module.params.get('id'))) + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["security_group", "id"]) + + result = update_properties(module, {"read": fill_read_resp_body(r)}, None) + return result + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enterprise_project_id"]) + if v: + query_params.append("enterprise_project_id=" + str(v)) + + v = navigate_value(opts, ["vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "security-groups" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"security_group": params} + + return params + + +def send_create_request(module, params, client): + url = "security-groups" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "security-groups/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "security-groups/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_read_resp_security_group_rules(body.get("security_group_rules")) + result["security_group_rules"] = v + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def fill_read_resp_security_group_rules(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["description"] = item.get("description") + + val["direction"] = item.get("direction") + + val["ethertype"] = item.get("ethertype") + + val["id"] = item.get("id") + + val["port_range_max"] = item.get("port_range_max") + + val["port_range_min"] = item.get("port_range_min") + + val["protocol"] = item.get("protocol") + + val["remote_address_group_id"] = item.get("remote_address_group_id") + + val["remote_group_id"] = item.get("remote_group_id") + + val["remote_ip_prefix"] = item.get("remote_ip_prefix") + + val["security_group_id"] = item.get("security_group_id") + + result.append(val) + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "enterprise_project_id"], + array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + if not exclude_output: + v = r.get("rules") + v = flatten_rules(response, array_index, v, exclude_output) + r["rules"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + v = navigate_value(response, ["read", "id"], array_index) + r["id"] = v + + return r + + +def flatten_rules(d, array_index, current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "security_group_rules"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.security_group_rules"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "description"], + new_array_index) + val["description"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "direction"], + new_array_index) + val["direction"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "ethertype"], + new_array_index) + val["ethertype"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "id"], + new_array_index) + val["id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "port_range_max"], + new_array_index) + val["port_range_max"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "port_range_min"], + new_array_index) + val["port_range_min"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "protocol"], + new_array_index) + val["protocol"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"], + new_array_index) + val["remote_address_group_id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"], + new_array_index) + val["remote_group_id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"], + new_array_index) + val["remote_ip_prefix"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_groups"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["security_group_rules"] = None + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_list_resp_security_group_rules(body.get("security_group_rules")) + result["security_group_rules"] = v + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def fill_list_resp_security_group_rules(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["description"] = item.get("description") + + val["direction"] = item.get("direction") + + val["ethertype"] = item.get("ethertype") + + val["id"] = item.get("id") + + val["port_range_max"] = item.get("port_range_max") + + val["port_range_min"] = item.get("port_range_min") + + val["protocol"] = item.get("protocol") + + val["remote_address_group_id"] = item.get("remote_address_group_id") + + val["remote_group_id"] = item.get("remote_group_id") + + val["remote_ip_prefix"] = item.get("remote_ip_prefix") + + val["security_group_id"] = item.get("security_group_id") + + result.append(val) + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py b/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py new file mode 100644 index 0000000000..8a6df7c4e3 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_security_group_rule +description: + - vpc security group management. +short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over + I(enterprise_project_id) for security group rule selection. + - I(security_group_id) is used for security group rule selection. If more + than one security group rule with this options exists, execution is + aborted. + - No parameter support updating. If one of option is changed, the module + will create a new resource. +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + direction: + description: + - Specifies the direction of access control. The value can be + egress or ingress. + type: str + required: true + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies + the security group rule. + type: str + required: true + description: + description: + - Provides supplementary information about the security group rule. + The value is a string of no more than 255 characters that can + contain letters and digits. + type: str + required: false + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + If you do not set this parameter, IPv4 is used by default. + type: str + required: false + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. + If the protocol is not icmp, the value cannot be smaller than the + port_range_min value. An empty value indicates all ports. + type: int + required: false + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to + 65535. The value cannot be greater than the port_range_max value. + An empty value indicates all ports. + type: int + required: false + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. + If the parameter is left blank, the security group supports all + protocols. + type: str + required: false + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is + exclusive with parameter remote_ip_prefix. + type: str + required: false + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction + is set to egress, the parameter specifies the source IP address. + If the access control direction is set to ingress, the parameter + specifies the destination IP address. The value can be in the + CIDR format or IP addresses. The parameter is exclusive with + parameter remote_group_id. + type: str + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create a security group rule +- name: create a security group + hwc_vpc_security_group: + name: "ansible_network_security_group_test" + register: sg +- name: create a security group rule + hwc_vpc_security_group_rule: + direction: "ingress" + protocol: "tcp" + ethertype: "IPv4" + port_range_max: 22 + security_group_id: "{{ sg.id }}" + port_range_min: 22 + remote_ip_prefix: "0.0.0.0/0" +''' + +RETURN = ''' + direction: + description: + - Specifies the direction of access control. The value can be + egress or ingress. + type: str + returned: success + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies + the security group rule. + type: str + returned: success + description: + description: + - Provides supplementary information about the security group rule. + The value is a string of no more than 255 characters that can + contain letters and digits. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + If you do not set this parameter, IPv4 is used by default. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. + If the protocol is not icmp, the value cannot be smaller than the + port_range_min value. An empty value indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to + 65535. The value cannot be greater than the port_range_max value. + An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. + If the parameter is left blank, the security group supports all + protocols. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is + exclusive with parameter remote_ip_prefix. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction + is set to egress, the parameter specifies the source IP address. + If the access control direction is set to ingress, the parameter + specifies the destination IP address. The value can be in the + CIDR format or IP addresses. The parameter is exclusive with + parameter remote_group_id. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + direction=dict(type='str', required=True), + security_group_id=dict(type='str', required=True), + description=dict(type='str'), + ethertype=dict(type='str'), + port_range_max=dict(type='int'), + port_range_min=dict(type='int'), + protocol=dict(type='str'), + remote_group_id=dict(type='str'), + remote_ip_prefix=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing security group(%s)." % (current, expect, module.params.get('id'))) + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "description": module.params.get("description"), + "direction": module.params.get("direction"), + "ethertype": module.params.get("ethertype"), + "port_range_max": module.params.get("port_range_max"), + "port_range_min": module.params.get("port_range_min"), + "protocol": module.params.get("protocol"), + "remote_group_id": module.params.get("remote_group_id"), + "remote_ip_prefix": module.params.get("remote_ip_prefix"), + "security_group_id": module.params.get("security_group_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["security_group_rule", "id"]) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + v = navigate_value(opts, ["security_group_id"]) + if v: + query_link += "&security_group_id=" + str(v) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "security-group-rules" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["direction"], None) + if not is_empty_value(v): + params["direction"] = v + + v = navigate_value(opts, ["ethertype"], None) + if not is_empty_value(v): + params["ethertype"] = v + + v = navigate_value(opts, ["port_range_max"], None) + if not is_empty_value(v): + params["port_range_max"] = v + + v = navigate_value(opts, ["port_range_min"], None) + if not is_empty_value(v): + params["port_range_min"] = v + + v = navigate_value(opts, ["protocol"], None) + if not is_empty_value(v): + params["protocol"] = v + + v = navigate_value(opts, ["remote_group_id"], None) + if not is_empty_value(v): + params["remote_group_id"] = v + + v = navigate_value(opts, ["remote_ip_prefix"], None) + if not is_empty_value(v): + params["remote_ip_prefix"] = v + + v = navigate_value(opts, ["security_group_id"], None) + if not is_empty_value(v): + params["security_group_id"] = v + + if not params: + return params + + params = {"security_group_rule": params} + + return params + + +def send_create_request(module, params, client): + url = "security-group-rules" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "security-group-rules/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "security-group-rules/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group_rule"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["description"] = body.get("description") + + result["direction"] = body.get("direction") + + result["ethertype"] = body.get("ethertype") + + result["id"] = body.get("id") + + result["port_range_max"] = body.get("port_range_max") + + result["port_range_min"] = body.get("port_range_min") + + result["protocol"] = body.get("protocol") + + result["remote_address_group_id"] = body.get("remote_address_group_id") + + result["remote_group_id"] = body.get("remote_group_id") + + result["remote_ip_prefix"] = body.get("remote_ip_prefix") + + result["security_group_id"] = body.get("security_group_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "direction"], array_index) + r["direction"] = v + + v = navigate_value(response, ["read", "ethertype"], array_index) + r["ethertype"] = v + + v = navigate_value(response, ["read", "port_range_max"], array_index) + r["port_range_max"] = v + + v = navigate_value(response, ["read", "port_range_min"], array_index) + r["port_range_min"] = v + + v = navigate_value(response, ["read", "protocol"], array_index) + r["protocol"] = v + + v = navigate_value(response, ["read", "remote_group_id"], array_index) + r["remote_group_id"] = v + + v = navigate_value(response, ["read", "remote_ip_prefix"], array_index) + r["remote_ip_prefix"] = v + + v = navigate_value(response, ["read", "security_group_id"], array_index) + r["security_group_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group_rules"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + v = navigate_value(all_opts, ["direction"], None) + result["direction"] = v + + v = navigate_value(all_opts, ["ethertype"], None) + result["ethertype"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["port_range_max"], None) + result["port_range_max"] = v + + v = navigate_value(all_opts, ["port_range_min"], None) + result["port_range_min"] = v + + v = navigate_value(all_opts, ["protocol"], None) + result["protocol"] = v + + result["remote_address_group_id"] = None + + v = navigate_value(all_opts, ["remote_group_id"], None) + result["remote_group_id"] = v + + v = navigate_value(all_opts, ["remote_ip_prefix"], None) + result["remote_ip_prefix"] = v + + v = navigate_value(all_opts, ["security_group_id"], None) + result["security_group_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["description"] = body.get("description") + + result["direction"] = body.get("direction") + + result["ethertype"] = body.get("ethertype") + + result["id"] = body.get("id") + + result["port_range_max"] = body.get("port_range_max") + + result["port_range_min"] = body.get("port_range_min") + + result["protocol"] = body.get("protocol") + + result["remote_address_group_id"] = body.get("remote_address_group_id") + + result["remote_group_id"] = body.get("remote_group_id") + + result["remote_ip_prefix"] = body.get("remote_ip_prefix") + + result["security_group_id"] = body.get("security_group_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/huawei/hwc_vpc_subnet.py b/plugins/modules/cloud/huawei/hwc_vpc_subnet.py new file mode 100644 index 0000000000..73724301a0 --- /dev/null +++ b/plugins/modules/cloud/huawei/hwc_vpc_subnet.py @@ -0,0 +1,736 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ["preview"], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hwc_vpc_subnet +description: + - subnet management. +short_description: Creates a resource of Vpc/Subnet in Huawei Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + update: + description: + - The timeouts for update operation. + type: str + default: '15m' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC + CIDR block and be in CIDR format. The subnet mask cannot be + greater than 28. Cannot be changed after creating the subnet. + type: str + required: true + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP + address in the subnet. Cannot be changed after creating the subnet. + type: str + required: true + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 + characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + required: true + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. Cannot + be changed after creating the subnet. + type: str + required: true + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. Cannot be changed + after creating the subnet. + type: str + required: false + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can + be true (enabled) or false(disabled), and default value is true. + If this parameter is set to false, newly created ECSs cannot + obtain IP addresses, and usernames and passwords cannot be + injected using Cloud-init. + type: bool + required: false + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address + in the head will be used first. + type: list + required: false +extends_documentation_fragment: +- community.general.hwc + +''' + +EXAMPLES = ''' +# create subnet +- name: create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: create subnet + hwc_vpc_subnet: + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: True +''' + +RETURN = ''' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC + CIDR block and be in CIDR format. The subnet mask cannot be + greater than 28. + type: str + returned: success + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP + address in the subnet. + type: str + returned: success + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 + characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + returned: success + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. + type: str + returned: success + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. + type: str + returned: success + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can + be true (enabled) or false(disabled), and default value is true. + If this parameter is set to false, newly created ECSs cannot + obtain IP addresses, and usernames and passwords cannot be + injected using Cloud-init. + type: bool + returned: success + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address + in the head will be used first. + type: list + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + update=dict(default='15m', type='str'), + ), default=dict()), + cidr=dict(type='str', required=True), + gateway_ip=dict(type='str', required=True), + name=dict(type='str', required=True), + vpc_id=dict(type='str', required=True), + availability_zone=dict(type='str'), + dhcp_enable=dict(type='bool'), + dns_address=dict(type='list', elements='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get('id'): + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "availability_zone": module.params.get("availability_zone"), + "cidr": module.params.get("cidr"), + "dhcp_enable": module.params.get("dhcp_enable"), + "dns_address": module.params.get("dns_address"), + "gateway_ip": module.params.get("gateway_ip"), + "name": module.params.get("name"), + "vpc_id": module.params.get("vpc_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["subnet", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + r = send_update_request(module, params, client) + async_wait_update(config, r, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "subnets/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + v = navigate_value(opts, ["vpc_id"]) + if v: + query_link += "&vpc_id=" + str(v) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "subnets" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["cidr"], None) + if not is_empty_value(v): + params["cidr"] = v + + v = navigate_value(opts, ["dhcp_enable"], None) + if v is not None: + params["dhcp_enable"] = v + + v = expand_create_dns_list(opts, None) + if not is_empty_value(v): + params["dnsList"] = v + + v = navigate_value(opts, ["gateway_ip"], None) + if not is_empty_value(v): + params["gateway_ip"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_primary_dns(opts, None) + if not is_empty_value(v): + params["primary_dns"] = v + + v = expand_create_secondary_dns(opts, None) + if not is_empty_value(v): + params["secondary_dns"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"subnet": params} + + return params + + +def expand_create_dns_list(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v if (v and len(v) > 2) else [] + + +def expand_create_primary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[0] if v else "" + + +def expand_create_secondary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[1] if (v and len(v) > 1) else "" + + +def send_create_request(module, params, client): + url = "subnets" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "subnet_id": ["subnet", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "subnets/{subnet_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["subnet", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["UNKNOWN"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["dhcp_enable"], None) + if v is not None: + params["dhcp_enable"] = v + + v = expand_update_dns_list(opts, None) + if v is not None: + params["dnsList"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_update_primary_dns(opts, None) + if v is not None: + params["primary_dns"] = v + + v = expand_update_secondary_dns(opts, None) + if v is not None: + params["secondary_dns"] = v + + if not params: + return params + + params = {"subnet": params} + + return params + + +def expand_update_dns_list(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + if v: + if len(v) > 2: + return v + return None + return [] + + +def expand_update_primary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[0] if v else "" + + +def expand_update_secondary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[1] if (v and len(v) > 1) else "" + + +def send_update_request(module, params, client): + url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_update(config, result, client, timeout): + module = config.module + + path_parameters = { + "subnet_id": ["subnet", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "subnets/{subnet_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["subnet", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["UNKNOWN"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(update) to " + "be done, error= %s" % str(ex)) + + +def send_delete_request(module, params, client): + url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "subnets/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["subnet"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["availability_zone"] = body.get("availability_zone") + + result["cidr"] = body.get("cidr") + + result["dhcp_enable"] = body.get("dhcp_enable") + + result["dnsList"] = body.get("dnsList") + + result["gateway_ip"] = body.get("gateway_ip") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + result["neutron_network_id"] = body.get("neutron_network_id") + + result["neutron_subnet_id"] = body.get("neutron_subnet_id") + + result["primary_dns"] = body.get("primary_dns") + + result["secondary_dns"] = body.get("secondary_dns") + + result["status"] = body.get("status") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "cidr"], array_index) + r["cidr"] = v + + v = navigate_value(response, ["read", "dhcp_enable"], array_index) + r["dhcp_enable"] = v + + v = navigate_value(response, ["read", "dnsList"], array_index) + r["dns_address"] = v + + v = navigate_value(response, ["read", "gateway_ip"], array_index) + r["gateway_ip"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["subnets"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["availability_zone"], None) + result["availability_zone"] = v + + v = navigate_value(all_opts, ["cidr"], None) + result["cidr"] = v + + v = navigate_value(all_opts, ["dhcp_enable"], None) + result["dhcp_enable"] = v + + v = navigate_value(all_opts, ["dns_address"], None) + result["dnsList"] = v + + v = navigate_value(all_opts, ["gateway_ip"], None) + result["gateway_ip"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["neutron_network_id"] = None + + result["neutron_subnet_id"] = None + + result["primary_dns"] = None + + result["secondary_dns"] = None + + result["status"] = None + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["availability_zone"] = body.get("availability_zone") + + result["cidr"] = body.get("cidr") + + result["dhcp_enable"] = body.get("dhcp_enable") + + result["dnsList"] = body.get("dnsList") + + result["gateway_ip"] = body.get("gateway_ip") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + result["neutron_network_id"] = body.get("neutron_network_id") + + result["neutron_subnet_id"] = body.get("neutron_subnet_id") + + result["primary_dns"] = body.get("primary_dns") + + result["secondary_dns"] = body.get("secondary_dns") + + result["status"] = body.get("status") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py b/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py new file mode 100644 index 0000000000..e0032431f1 --- /dev/null +++ b/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + +module: kubevirt_cdi_upload + +short_description: Upload local VM images to CDI Upload Proxy. + + +author: KubeVirt Team (@kubevirt) + + +description: + - Use Openshift Python SDK to create UploadTokenRequest objects. + - Transfer contents of local files to the CDI Upload Proxy. + +options: + pvc_name: + description: + - Use to specify the name of the target PersistentVolumeClaim. + required: true + pvc_namespace: + description: + - Use to specify the namespace of the target PersistentVolumeClaim. + required: true + upload_host: + description: + - URL containing the host and port on which the CDI Upload Proxy is available. + - "More info: U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/upload.md#expose-cdi-uploadproxy-service)" + upload_host_validate_certs: + description: + - Whether or not to verify the CDI Upload Proxy's SSL certificates against your system's CA trust store. + default: true + type: bool + aliases: [ upload_host_verify_ssl ] + path: + description: + - Path of local image file to transfer. + merge_type: + description: + - Whether to override the default patch merge approach with a specific type. By default, the strategic + merge will typically be used. + type: list + choices: [ json, merge, strategic-merge ] + +extends_documentation_fragment: +- community.kubernetes.k8s_auth_options + + +requirements: + - python >= 2.7 + - openshift >= 0.8.2 + - requests >= 2.0.0 +''' + +EXAMPLES = ''' +- name: Upload local image to pvc-vm1 + kubevirt_cdi_upload: + pvc_namespace: default + pvc_name: pvc-vm1 + upload_host: https://localhost:8443 + upload_host_validate_certs: false + path: /tmp/cirros-0.4.0-x86_64-disk.img +''' + +RETURN = '''# ''' + +import copy +import traceback + +from collections import defaultdict + +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.raw import KubernetesRawModule + +# 3rd party imports +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + + +SERVICE_ARG_SPEC = { + 'pvc_name': {'required': True}, + 'pvc_namespace': {'required': True}, + 'upload_host': {'required': True}, + 'upload_host_validate_certs': { + 'type': 'bool', + 'default': True, + 'aliases': ['upload_host_verify_ssl'] + }, + 'path': {'required': True}, + 'merge_type': { + 'type': 'list', + 'choices': ['json', 'merge', 'strategic-merge'] + }, +} + + +class KubeVirtCDIUpload(KubernetesRawModule): + def __init__(self, *args, **kwargs): + super(KubeVirtCDIUpload, self).__init__(*args, k8s_kind='UploadTokenRequest', **kwargs) + + if not HAS_REQUESTS: + self.fail("This module requires the python 'requests' package. Try `pip install requests`.") + + @property + def argspec(self): + """ argspec property builder """ + argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(SERVICE_ARG_SPEC) + return argument_spec + + def execute_module(self): + """ Module execution """ + + API = 'v1alpha1' + KIND = 'UploadTokenRequest' + + self.client = self.get_api_client() + + api_version = 'upload.cdi.kubevirt.io/{0}'.format(API) + pvc_name = self.params.get('pvc_name') + pvc_namespace = self.params.get('pvc_namespace') + upload_host = self.params.get('upload_host') + upload_host_verify_ssl = self.params.get('upload_host_validate_certs') + path = self.params.get('path') + + definition = defaultdict(defaultdict) + + definition['kind'] = KIND + definition['apiVersion'] = api_version + + def_meta = definition['metadata'] + def_meta['name'] = pvc_name + def_meta['namespace'] = pvc_namespace + + def_spec = definition['spec'] + def_spec['pvcName'] = pvc_name + + # Let's check the file's there before we do anything else + imgfile = open(path, 'rb') + + resource = self.find_resource(KIND, api_version, fail=True) + definition = self.set_defaults(resource, definition) + result = self.perform_action(resource, definition) + + headers = {'Authorization': "Bearer {0}".format(result['result']['status']['token'])} + url = "{0}/{1}/upload".format(upload_host, API) + ret = requests.post(url, data=imgfile, headers=headers, verify=upload_host_verify_ssl) + + if ret.status_code != 200: + self.fail_request("Something went wrong while uploading data", method='POST', url=url, + reason=ret.reason, status_code=ret.status_code) + + self.exit_json(changed=True) + + def fail_request(self, msg, **kwargs): + req_info = {} + for k, v in kwargs.items(): + req_info['req_' + k] = v + self.fail_json(msg=msg, **req_info) + + +def main(): + module = KubeVirtCDIUpload() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/kubevirt/kubevirt_preset.py b/plugins/modules/cloud/kubevirt/kubevirt_preset.py new file mode 100644 index 0000000000..c5032722f4 --- /dev/null +++ b/plugins/modules/cloud/kubevirt/kubevirt_preset.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: kubevirt_preset + +short_description: Manage KubeVirt virtual machine presets + +description: + - Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets. + + +author: KubeVirt Team (@kubevirt) + +options: + state: + description: + - Create or delete virtual machine presets. + default: "present" + choices: + - present + - absent + type: str + name: + description: + - Name of the virtual machine preset. + required: true + type: str + namespace: + description: + - Namespace where the virtual machine preset exists. + required: true + type: str + selector: + description: + - "Selector is a label query over a set of virtual machine preset." + type: dict + +extends_documentation_fragment: +- community.kubernetes.k8s_auth_options +- community.general.kubevirt_vm_options +- community.general.kubevirt_common_options + + +requirements: + - python >= 2.7 + - openshift >= 0.8.2 +''' + +EXAMPLES = ''' +- name: Create virtual machine preset 'vmi-preset-small' + kubevirt_preset: + state: present + name: vmi-preset-small + namespace: vms + memory: 64M + selector: + matchLabels: + kubevirt.io/vmPreset: vmi-preset-small + +- name: Remove virtual machine preset 'vmi-preset-small' + kubevirt_preset: + state: absent + name: vmi-preset-small + namespace: vms +''' + +RETURN = ''' +kubevirt_preset: + description: + - The virtual machine preset managed by the user. + - "This dictionary contains all values returned by the KubeVirt API all options + are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)" + returned: success + type: complex + contains: {} +''' + +import copy +import traceback + + +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC + +from ansible_collections.community.general.plugins.module_utils.kubevirt import ( + virtdict, + KubeVirtRawModule, + VM_COMMON_ARG_SPEC +) + + +KIND = 'VirtualMachineInstancePreset' +VMP_ARG_SPEC = { + 'selector': {'type': 'dict'}, +} + + +class KubeVirtVMPreset(KubeVirtRawModule): + + @property + def argspec(self): + """ argspec property builder """ + argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(VM_COMMON_ARG_SPEC) + argument_spec.update(VMP_ARG_SPEC) + return argument_spec + + def execute_module(self): + # Parse parameters specific for this module: + definition = virtdict() + selector = self.params.get('selector') + + if selector: + definition['spec']['selector'] = selector + + # FIXME: Devices must be set, but we don't yet support any + # attributes there, remove when we do: + definition['spec']['domain']['devices'] = dict() + + # defaults for template + defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []} + + # Execute the CURD of VM: + dummy, definition = self.construct_vm_definition(KIND, definition, definition, defaults) + result_crud = self.execute_crud(KIND, definition) + changed = result_crud['changed'] + result = result_crud.pop('result') + + # Return from the module: + self.exit_json(**{ + 'changed': changed, + 'kubevirt_preset': result, + 'result': result_crud, + }) + + +def main(): + module = KubeVirtVMPreset() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/kubevirt/kubevirt_pvc.py b/plugins/modules/cloud/kubevirt/kubevirt_pvc.py new file mode 100644 index 0000000000..68bb2621d6 --- /dev/null +++ b/plugins/modules/cloud/kubevirt/kubevirt_pvc.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + +module: kubevirt_pvc + +short_description: Manage PVCs on Kubernetes + + +author: KubeVirt Team (@kubevirt) + +description: + - Use Openshift Python SDK to manage PVCs on Kubernetes + - Support Containerized Data Importer out of the box + +options: + resource_definition: + description: + - "A partial YAML definition of the PVC object being created/updated. Here you can define Kubernetes + PVC Resource parameters not covered by this module's parameters." + - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g. + I(metadata.namespace) here, that value will be ignored and I(namespace) used instead." + aliases: + - definition + - inline + type: dict + state: + description: + - "Determines if an object should be created, patched, or deleted. When set to C(present), an object will be + created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to + C(present), an existing object will be patched, if its attributes differ from those specified using + module options and I(resource_definition)." + default: present + choices: + - present + - absent + force: + description: + - If set to C(True), and I(state) is C(present), an existing object will be replaced. + default: false + type: bool + merge_type: + description: + - Whether to override the default patch merge approach with a specific type. + - "This defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters + on resource kinds that combine Custom Resources and built-in resources." + - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment) + - If more than one merge_type is given, the merge_types will be tried in order + choices: + - json + - merge + - strategic-merge + type: list + name: + description: + - Use to specify a PVC object name. + required: true + type: str + namespace: + description: + - Use to specify a PVC object namespace. + required: true + type: str + annotations: + description: + - Annotations attached to this object. + - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + type: dict + labels: + description: + - Labels attached to this object. + - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + type: dict + selector: + description: + - A label query over volumes to consider for binding. + - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + type: dict + access_modes: + description: + - Contains the desired access modes the volume should have. + - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes)" + type: list + size: + description: + - How much storage to allocate to the PVC. + type: str + aliases: + - storage + storage_class_name: + description: + - Name of the StorageClass required by the claim. + - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1)" + type: str + volume_mode: + description: + - "This defines what type of volume is required by the claim. Value of Filesystem is implied when not + included in claim spec. This is an alpha feature of kubernetes and may change in the future." + type: str + volume_name: + description: + - This is the binding reference to the PersistentVolume backing this claim. + type: str + cdi_source: + description: + - "If data is to be copied onto the PVC using the Containerized Data Importer you can specify the source of + the data (along with any additional configuration) as well as it's format." + - "Valid source types are: blank, http, s3, registry, pvc and upload. The last one requires using the + M(kubevirt_cdi_upload) module to actually perform an upload." + - "Source data format is specified using the optional I(content_type). Valid options are C(kubevirt) + (default; raw image) and C(archive) (tar.gz)." + - "This uses the DataVolume source syntax: + U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/datavolumes.md#https3registry-source)" + type: dict + wait: + description: + - "If set, this module will wait for the PVC to become bound and CDI (if enabled) to finish its operation + before returning." + - "Used only if I(state) set to C(present)." + - "Unless used in conjunction with I(cdi_source), this might result in a timeout, as clusters may be configured + to not bind PVCs until first usage." + default: false + type: bool + wait_timeout: + description: + - Specifies how much time in seconds to wait for PVC creation to complete if I(wait) option is enabled. + - Default value is reasonably high due to an expectation that CDI might take a while to finish its operation. + type: int + default: 300 + +extends_documentation_fragment: +- community.kubernetes.k8s_auth_options + + +requirements: + - python >= 2.7 + - openshift >= 0.8.2 +''' + +EXAMPLES = ''' +- name: Create a PVC and import data from an external source + kubevirt_pvc: + name: pvc1 + namespace: default + size: 100Mi + access_modes: + - ReadWriteOnce + cdi_source: + http: + url: https://www.source.example/path/of/data/vm.img + # If the URL points to a tar.gz containing the disk image, uncomment the line below: + #content_type: archive + +- name: Create a PVC as a clone from a different PVC + kubevirt_pvc: + name: pvc2 + namespace: default + size: 100Mi + access_modes: + - ReadWriteOnce + cdi_source: + pvc: + namespace: source-ns + name: source-pvc + +- name: Create a PVC ready for data upload + kubevirt_pvc: + name: pvc3 + namespace: default + size: 100Mi + access_modes: + - ReadWriteOnce + cdi_source: + upload: yes + # You need the kubevirt_cdi_upload module to actually upload something + +- name: Create a PVC with a blank raw image + kubevirt_pvc: + name: pvc4 + namespace: default + size: 100Mi + access_modes: + - ReadWriteOnce + cdi_source: + blank: yes + +- name: Create a PVC and fill it with data from a container + kubevirt_pvc: + name: pvc5 + namespace: default + size: 100Mi + access_modes: + - ReadWriteOnce + cdi_source: + registry: + url: "docker://kubevirt/fedora-cloud-registry-disk-demo" + +''' + +RETURN = ''' +result: + description: + - The created, patched, or otherwise present object. Will be empty in the case of a deletion. + returned: success + type: complex + contains: + api_version: + description: The versioned schema of this representation of an object. + returned: success + type: str + kind: + description: Represents the REST resource this object represents. + returned: success + type: str + metadata: + description: Standard object metadata. Includes name, namespace, annotations, labels, etc. + returned: success + type: complex + spec: + description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind). + returned: success + type: complex + status: + description: Current status details for the object. + returned: success + type: complex + items: + description: Returned only when multiple yaml documents are passed to src or resource_definition + returned: when resource_definition or src contains list of objects + type: list + duration: + description: elapsed time of task in seconds + returned: when C(wait) is true + type: int + sample: 48 +''' + + +import copy +import traceback + +from collections import defaultdict + +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.raw import KubernetesRawModule +from ansible_collections.community.general.plugins.module_utils.kubevirt import virtdict, KubeVirtRawModule + + +PVC_ARG_SPEC = { + 'name': {'required': True}, + 'namespace': {'required': True}, + 'state': { + 'type': 'str', + 'choices': [ + 'present', 'absent' + ], + 'default': 'present' + }, + 'force': { + 'type': 'bool', + 'default': False, + }, + 'merge_type': { + 'type': 'list', + 'choices': ['json', 'merge', 'strategic-merge'] + }, + 'resource_definition': { + 'type': 'dict', + 'aliases': ['definition', 'inline'] + }, + 'labels': {'type': 'dict'}, + 'annotations': {'type': 'dict'}, + 'selector': {'type': 'dict'}, + 'access_modes': {'type': 'list'}, + 'size': { + 'type': 'str', + 'aliases': ['storage'] + }, + 'storage_class_name': {'type': 'str'}, + 'volume_mode': {'type': 'str'}, + 'volume_name': {'type': 'str'}, + 'cdi_source': {'type': 'dict'}, + 'wait': { + 'type': 'bool', + 'default': False + }, + 'wait_timeout': { + 'type': 'int', + 'default': 300 + } +} + + +class CreatePVCFailed(Exception): + pass + + +class KubevirtPVC(KubernetesRawModule): + def __init__(self): + super(KubevirtPVC, self).__init__() + + @property + def argspec(self): + argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(PVC_ARG_SPEC) + return argument_spec + + @staticmethod + def fix_serialization(obj): + if obj and hasattr(obj, 'to_dict'): + return obj.to_dict() + return obj + + def _parse_cdi_source(self, _cdi_src, metadata): + cdi_src = copy.deepcopy(_cdi_src) + annotations = metadata['annotations'] + labels = metadata['labels'] + + valid_content_types = ('kubevirt', 'archive') + valid_sources = ('http', 's3', 'pvc', 'upload', 'blank', 'registry') + + if 'content_type' in cdi_src: + content_type = cdi_src.pop('content_type') + if content_type not in valid_content_types: + raise ValueError("cdi_source.content_type must be one of {0}, not: '{1}'".format( + valid_content_types, content_type)) + annotations['cdi.kubevirt.io/storage.contentType'] = content_type + + if len(cdi_src) != 1: + raise ValueError("You must specify exactly one valid CDI source, not {0}: {1}".format(len(cdi_src), tuple(cdi_src.keys()))) + + src_type = tuple(cdi_src.keys())[0] + src_spec = cdi_src[src_type] + + if src_type not in valid_sources: + raise ValueError("Got an invalid CDI source type: '{0}', must be one of {1}".format(src_type, valid_sources)) + + # True for all cases save one + labels['app'] = 'containerized-data-importer' + + if src_type == 'upload': + annotations['cdi.kubevirt.io/storage.upload.target'] = '' + elif src_type == 'blank': + annotations['cdi.kubevirt.io/storage.import.source'] = 'none' + elif src_type == 'pvc': + if not isinstance(src_spec, dict) or sorted(src_spec.keys()) != ['name', 'namespace']: + raise ValueError("CDI Source 'pvc' requires specifying 'name' and 'namespace' (and nothing else)") + labels['app'] = 'host-assisted-cloning' + annotations['k8s.io/CloneRequest'] = '{0}/{1}'.format(src_spec['namespace'], src_spec['name']) + elif src_type in ('http', 's3', 'registry'): + if not isinstance(src_spec, dict) or 'url' not in src_spec: + raise ValueError("CDI Source '{0}' requires specifying 'url'".format(src_type)) + unknown_params = set(src_spec.keys()).difference(set(('url', 'secretRef', 'certConfigMap'))) + if unknown_params: + raise ValueError("CDI Source '{0}' does not know recognize params: {1}".format(src_type, tuple(unknown_params))) + annotations['cdi.kubevirt.io/storage.import.source'] = src_type + annotations['cdi.kubevirt.io/storage.import.endpoint'] = src_spec['url'] + if 'secretRef' in src_spec: + annotations['cdi.kubevirt.io/storage.import.secretName'] = src_spec['secretRef'] + if 'certConfigMap' in src_spec: + annotations['cdi.kubevirt.io/storage.import.certConfigMap'] = src_spec['certConfigMap'] + + def _wait_for_creation(self, resource, uid): + return_obj = None + desired_cdi_status = 'Succeeded' + use_cdi = True if self.params.get('cdi_source') else False + if use_cdi and 'upload' in self.params['cdi_source']: + desired_cdi_status = 'Running' + + for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')): + entity = event['object'] + metadata = entity.metadata + if not hasattr(metadata, 'uid') or metadata.uid != uid: + continue + if entity.status.phase == 'Bound': + if use_cdi and hasattr(metadata, 'annotations'): + import_status = metadata.annotations.get('cdi.kubevirt.io/storage.pod.phase') + if import_status == desired_cdi_status: + return_obj = entity + break + elif import_status == 'Failed': + raise CreatePVCFailed("PVC creation incomplete; importing data failed") + else: + return_obj = entity + break + elif entity.status.phase == 'Failed': + raise CreatePVCFailed("PVC creation failed") + + if not return_obj: + raise CreatePVCFailed("PVC creation timed out") + + return self.fix_serialization(return_obj) + + def execute_module(self): + KIND = 'PersistentVolumeClaim' + API = 'v1' + + definition = virtdict() + definition['kind'] = KIND + definition['apiVersion'] = API + + metadata = definition['metadata'] + metadata['name'] = self.params.get('name') + metadata['namespace'] = self.params.get('namespace') + if self.params.get('annotations'): + metadata['annotations'] = self.params.get('annotations') + if self.params.get('labels'): + metadata['labels'] = self.params.get('labels') + if self.params.get('cdi_source'): + self._parse_cdi_source(self.params.get('cdi_source'), metadata) + + spec = definition['spec'] + if self.params.get('access_modes'): + spec['accessModes'] = self.params.get('access_modes') + if self.params.get('size'): + spec['resources']['requests']['storage'] = self.params.get('size') + if self.params.get('storage_class_name'): + spec['storageClassName'] = self.params.get('storage_class_name') + if self.params.get('selector'): + spec['selector'] = self.params.get('selector') + if self.params.get('volume_mode'): + spec['volumeMode'] = self.params.get('volume_mode') + if self.params.get('volume_name'): + spec['volumeName'] = self.params.get('volume_name') + + # 'resource_definition:' has lower priority than module parameters + definition = dict(KubeVirtRawModule.merge_dicts(definition, self.resource_definitions[0])) + + self.client = self.get_api_client() + resource = self.find_resource(KIND, API, fail=True) + definition = self.set_defaults(resource, definition) + result = self.perform_action(resource, definition) + if self.params.get('wait') and self.params.get('state') == 'present': + result['result'] = self._wait_for_creation(resource, result['result']['metadata']['uid']) + + self.exit_json(**result) + + +def main(): + module = KubevirtPVC() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/kubevirt/kubevirt_rs.py b/plugins/modules/cloud/kubevirt/kubevirt_rs.py new file mode 100644 index 0000000000..8bc413fbb2 --- /dev/null +++ b/plugins/modules/cloud/kubevirt/kubevirt_rs.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: kubevirt_rs + +short_description: Manage KubeVirt virtual machine replica sets + +description: + - Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets. + + +author: KubeVirt Team (@kubevirt) + +options: + state: + description: + - Create or delete virtual machine replica sets. + default: "present" + choices: + - present + - absent + type: str + name: + description: + - Name of the virtual machine replica set. + required: true + type: str + namespace: + description: + - Namespace where the virtual machine replica set exists. + required: true + type: str + selector: + description: + - "Selector is a label query over a set of virtual machine." + required: true + type: dict + replicas: + description: + - Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. + - Replicas defaults to 1 if newly created replica set. + type: int + +extends_documentation_fragment: +- community.kubernetes.k8s_auth_options +- community.general.kubevirt_vm_options +- community.general.kubevirt_common_options + + +requirements: + - python >= 2.7 + - openshift >= 0.8.2 +''' + +EXAMPLES = ''' +- name: Create virtual machine replica set 'myvmir' + kubevirt_rs: + state: present + name: myvmir + namespace: vms + wait: true + replicas: 3 + memory: 64M + labels: + myvmi: myvmi + selector: + matchLabels: + myvmi: myvmi + disks: + - name: containerdisk + volume: + containerDisk: + image: kubevirt/cirros-container-disk-demo:latest + path: /custom-disk/cirros.img + disk: + bus: virtio + +- name: Remove virtual machine replica set 'myvmir' + kubevirt_rs: + state: absent + name: myvmir + namespace: vms + wait: true +''' + +RETURN = ''' +kubevirt_rs: + description: + - The virtual machine virtual machine replica set managed by the user. + - "This dictionary contains all values returned by the KubeVirt API all options + are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)" + returned: success + type: complex + contains: {} +''' + +import copy +import traceback + + +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC + +from ansible_collections.community.general.plugins.module_utils.kubevirt import ( + virtdict, + KubeVirtRawModule, + VM_COMMON_ARG_SPEC, +) + + +KIND = 'VirtualMachineInstanceReplicaSet' +VMIR_ARG_SPEC = { + 'replicas': {'type': 'int'}, + 'selector': {'type': 'dict'}, +} + + +class KubeVirtVMIRS(KubeVirtRawModule): + + @property + def argspec(self): + """ argspec property builder """ + argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC)) + argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC)) + return argument_spec + + def wait_for_replicas(self, replicas): + """ Wait for ready_replicas to equal the requested number of replicas. """ + resource = self.find_supported_resource(KIND) + return_obj = None + + for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')): + entity = event['object'] + if entity.metadata.name != self.name: + continue + status = entity.get('status', {}) + readyReplicas = status.get('readyReplicas', 0) + if readyReplicas == replicas: + return_obj = entity + break + + if not return_obj: + self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.") + if replicas and return_obj.status.readyReplicas is None: + self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.") + if replicas and return_obj.status.readyReplicas != replicas: + self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within " + "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas)) + return return_obj.to_dict() + + def execute_module(self): + # Parse parameters specific for this module: + definition = virtdict() + selector = self.params.get('selector') + replicas = self.params.get('replicas') + + if selector: + definition['spec']['selector'] = selector + + if replicas is not None: + definition['spec']['replicas'] = replicas + + # defaults for template + defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []} + + # Execute the CURD of VM: + template = definition['spec']['template'] + dummy, definition = self.construct_vm_definition(KIND, definition, template, defaults) + result_crud = self.execute_crud(KIND, definition) + changed = result_crud['changed'] + result = result_crud.pop('result') + + # When creating a new VMIRS object without specifying `replicas`, assume it's '1' to make the + # wait logic work correctly + if changed and result_crud['method'] == 'create' and replicas is None: + replicas = 1 + + # Wait for the new number of ready replicas after a CRUD update + # Note1: doesn't work correctly when reducing number of replicas due to how VMIRS works (as of kubevirt 1.5.0) + # Note2: not the place to wait for the VMIs to get deleted when deleting the VMIRS object; that *might* be + # achievable in execute_crud(); keywords: orphanDependents, propagationPolicy, DeleteOptions + if self.params.get('wait') and replicas is not None and self.params.get('state') == 'present': + result = self.wait_for_replicas(replicas) + + # Return from the module: + self.exit_json(**{ + 'changed': changed, + 'kubevirt_rs': result, + 'result': result_crud, + }) + + +def main(): + module = KubeVirtVMIRS() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/kubevirt/kubevirt_template.py b/plugins/modules/cloud/kubevirt/kubevirt_template.py new file mode 100644 index 0000000000..bc07fb2e3f --- /dev/null +++ b/plugins/modules/cloud/kubevirt/kubevirt_template.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: kubevirt_template + +short_description: Manage KubeVirt templates + +description: + - Use Openshift Python SDK to manage the state of KubeVirt templates. + + +author: KubeVirt Team (@kubevirt) + +options: + name: + description: + - Name of the Template object. + required: true + type: str + namespace: + description: + - Namespace where the Template object exists. + required: true + type: str + objects: + description: + - List of any valid API objects, such as a I(DeploymentConfig), I(Service), etc. The object + will be created exactly as defined here, with any parameter values substituted in prior to creation. + The definition of these objects can reference parameters defined earlier. + - As part of the list user can pass also I(VirtualMachine) kind. When passing I(VirtualMachine) + user must use Ansible structure of the parameters not the Kubernetes API structure. For more information + please take a look at M(kubevirt_vm) module and at EXAMPLES section, where you can see example. + type: list + merge_type: + description: + - Whether to override the default patch merge approach with a specific type. By default, the strategic + merge will typically be used. + type: list + choices: [ json, merge, strategic-merge ] + display_name: + description: + - "A brief, user-friendly name, which can be employed by user interfaces." + type: str + description: + description: + - A description of the template. + - Include enough detail that the user will understand what is being deployed... + and any caveats they need to know before deploying. It should also provide links to additional information, + such as a README file." + type: str + long_description: + description: + - "Additional template description. This may be displayed by the service catalog, for example." + type: str + provider_display_name: + description: + - "The name of the person or organization providing the template." + type: str + documentation_url: + description: + - "A URL referencing further documentation for the template." + type: str + support_url: + description: + - "A URL where support can be obtained for the template." + type: str + editable: + description: + - "Extension for hinting at which elements should be considered editable. + List of jsonpath selectors. The jsonpath root is the objects: element of the template." + - This is parameter can be used only when kubevirt addon is installed on your openshift cluster. + type: list + default_disk: + description: + - "The goal of default disk is to define what kind of disk is supported by the OS mainly in + terms of bus (ide, scsi, sata, virtio, ...)" + - The C(default_disk) parameter define configuration overlay for disks that will be applied on top of disks + during virtual machine creation to define global compatibility and/or performance defaults defined here. + - This is parameter can be used only when kubevirt addon is installed on your openshift cluster. + type: dict + default_volume: + description: + - "The goal of default volume is to be able to configure mostly performance parameters like + caches if those are exposed by the underlying volume implementation." + - The C(default_volume) parameter define configuration overlay for volumes that will be applied on top of volumes + during virtual machine creation to define global compatibility and/or performance defaults defined here. + - This is parameter can be used only when kubevirt addon is installed on your openshift cluster. + type: dict + default_nic: + description: + - "The goal of default network is similar to I(default_disk) and should be used as a template + to ensure OS compatibility and performance." + - The C(default_nic) parameter define configuration overlay for nic that will be applied on top of nics + during virtual machine creation to define global compatibility and/or performance defaults defined here. + - This is parameter can be used only when kubevirt addon is installed on your openshift cluster. + type: dict + default_network: + description: + - "The goal of default network is similar to I(default_volume) and should be used as a template + that specifies performance and connection parameters (L2 bridge for example)" + - The C(default_network) parameter define configuration overlay for networks that will be applied on top of networks + during virtual machine creation to define global compatibility and/or performance defaults defined here. + - This is parameter can be used only when kubevirt addon is installed on your openshift cluster. + type: dict + icon_class: + description: + - "An icon to be displayed with your template in the web console. Choose from our existing logo + icons when possible. You can also use icons from FontAwesome. Alternatively, provide icons through + CSS customizations that can be added to an OpenShift Container Platform cluster that uses your template. + You must specify an icon class that exists, or it will prevent falling back to the generic icon." + type: str + parameters: + description: + - "Parameters allow a value to be supplied by the user or generated when the template is instantiated. + Then, that value is substituted wherever the parameter is referenced. References can be defined in any + field in the objects list field. This is useful for generating random passwords or allowing the user to + supply a host name or other user-specific value that is required to customize the template." + - "More information can be found at: U(https://docs.openshift.com/container-platform/3.6/dev_guide/templates.html#writing-parameters)" + type: list + version: + description: + - Template structure version. + - This is parameter can be used only when kubevirt addon is installed on your openshift cluster. + type: str + +extends_documentation_fragment: +- community.kubernetes.k8s_auth_options +- community.kubernetes.k8s_state_options + + +requirements: + - python >= 2.7 + - openshift >= 0.8.2 +''' + +EXAMPLES = ''' +- name: Create template 'mytemplate' + kubevirt_template: + state: present + name: myvmtemplate + namespace: templates + display_name: Generic cirros template + description: Basic cirros template + long_description: Verbose description of cirros template + provider_display_name: Just Be Cool, Inc. + documentation_url: http://theverycoolcompany.com + support_url: http://support.theverycoolcompany.com + icon_class: icon-linux + default_disk: + disk: + bus: virtio + default_nic: + model: virtio + default_network: + resource: + resourceName: bridge.network.kubevirt.io/cnvmgmt + default_volume: + containerDisk: + image: kubevirt/cirros-container-disk-demo:latest + objects: + - name: ${NAME} + kind: VirtualMachine + memory: ${MEMORY_SIZE} + state: present + namespace: vms + parameters: + - name: NAME + description: VM name + generate: expression + from: 'vm-[A-Za-z0-9]{8}' + - name: MEMORY_SIZE + description: Memory size + value: 1Gi + +- name: Remove template 'myvmtemplate' + kubevirt_template: + state: absent + name: myvmtemplate + namespace: templates +''' + +RETURN = ''' +kubevirt_template: + description: + - The template dictionary specification returned by the API. + returned: success + type: complex + contains: {} +''' + + +import copy +import traceback + +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC + +from ansible_collections.community.general.plugins.module_utils.kubevirt import ( + virtdict, + KubeVirtRawModule, + API_GROUP, + MAX_SUPPORTED_API_VERSION +) + + +TEMPLATE_ARG_SPEC = { + 'name': {'required': True}, + 'namespace': {'required': True}, + 'state': { + 'default': 'present', + 'choices': ['present', 'absent'], + }, + 'force': { + 'type': 'bool', + 'default': False, + }, + 'merge_type': { + 'type': 'list', + 'choices': ['json', 'merge', 'strategic-merge'] + }, + 'objects': { + 'type': 'list', + }, + 'display_name': { + 'type': 'str', + }, + 'description': { + 'type': 'str', + }, + 'long_description': { + 'type': 'str', + }, + 'provider_display_name': { + 'type': 'str', + }, + 'documentation_url': { + 'type': 'str', + }, + 'support_url': { + 'type': 'str', + }, + 'icon_class': { + 'type': 'str', + }, + 'version': { + 'type': 'str', + }, + 'editable': { + 'type': 'list', + }, + 'default_disk': { + 'type': 'dict', + }, + 'default_volume': { + 'type': 'dict', + }, + 'default_network': { + 'type': 'dict', + }, + 'default_nic': { + 'type': 'dict', + }, + 'parameters': { + 'type': 'list', + }, +} + + +class KubeVirtVMTemplate(KubeVirtRawModule): + + @property + def argspec(self): + """ argspec property builder """ + argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(TEMPLATE_ARG_SPEC) + return argument_spec + + def execute_module(self): + # Parse parameters specific for this module: + definition = virtdict() + + # Execute the CRUD of VM template: + kind = 'Template' + template_api_version = 'template.openshift.io/v1' + + # Fill in template parameters: + definition['parameters'] = self.params.get('parameters') + + # Fill in the default Label + labels = definition['metadata']['labels'] + labels['template.cnv.io/type'] = 'vm' + + # Fill in Openshift/Kubevirt template annotations: + annotations = definition['metadata']['annotations'] + if self.params.get('display_name'): + annotations['openshift.io/display-name'] = self.params.get('display_name') + if self.params.get('description'): + annotations['description'] = self.params.get('description') + if self.params.get('long_description'): + annotations['openshift.io/long-description'] = self.params.get('long_description') + if self.params.get('provider_display_name'): + annotations['openshift.io/provider-display-name'] = self.params.get('provider_display_name') + if self.params.get('documentation_url'): + annotations['openshift.io/documentation-url'] = self.params.get('documentation_url') + if self.params.get('support_url'): + annotations['openshift.io/support-url'] = self.params.get('support_url') + if self.params.get('icon_class'): + annotations['iconClass'] = self.params.get('icon_class') + if self.params.get('version'): + annotations['template.cnv.io/version'] = self.params.get('version') + + # TODO: Make it more Ansiblish, so user don't have to specify API JSON path, but rather Ansible params: + if self.params.get('editable'): + annotations['template.cnv.io/editable'] = self.params.get('editable') + + # Set defaults annotations: + if self.params.get('default_disk'): + annotations['defaults.template.cnv.io/disk'] = self.params.get('default_disk').get('name') + if self.params.get('default_volume'): + annotations['defaults.template.cnv.io/volume'] = self.params.get('default_volume').get('name') + if self.params.get('default_nic'): + annotations['defaults.template.cnv.io/nic'] = self.params.get('default_nic').get('name') + if self.params.get('default_network'): + annotations['defaults.template.cnv.io/network'] = self.params.get('default_network').get('name') + + # Process objects: + self.client = self.get_api_client() + definition['objects'] = [] + objects = self.params.get('objects') or [] + for obj in objects: + if obj['kind'] != 'VirtualMachine': + definition['objects'].append(obj) + else: + vm_definition = virtdict() + + # Set VM defaults: + if self.params.get('default_disk'): + vm_definition['spec']['template']['spec']['domain']['devices']['disks'] = [self.params.get('default_disk')] + if self.params.get('default_volume'): + vm_definition['spec']['template']['spec']['volumes'] = [self.params.get('default_volume')] + if self.params.get('default_nic'): + vm_definition['spec']['template']['spec']['domain']['devices']['interfaces'] = [self.params.get('default_nic')] + if self.params.get('default_network'): + vm_definition['spec']['template']['spec']['networks'] = [self.params.get('default_network')] + + # Set kubevirt API version: + vm_definition['apiVersion'] = '%s/%s' % (API_GROUP, MAX_SUPPORTED_API_VERSION) + + # Construct k8s vm API object: + vm_template = vm_definition['spec']['template'] + dummy, vm_def = self.construct_vm_template_definition('VirtualMachine', vm_definition, vm_template, obj) + + definition['objects'].append(vm_def) + + # Create template: + resource = self.client.resources.get(api_version=template_api_version, kind=kind, name='templates') + definition = self.set_defaults(resource, definition) + result = self.perform_action(resource, definition) + + # Return from the module: + self.exit_json(**{ + 'changed': result['changed'], + 'kubevirt_template': result.pop('result'), + 'result': result, + }) + + +def main(): + module = KubeVirtVMTemplate() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/kubevirt/kubevirt_vm.py b/plugins/modules/cloud/kubevirt/kubevirt_vm.py new file mode 100644 index 0000000000..ddcab20e98 --- /dev/null +++ b/plugins/modules/cloud/kubevirt/kubevirt_vm.py @@ -0,0 +1,473 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: kubevirt_vm + +short_description: Manage KubeVirt virtual machine + +description: + - Use Openshift Python SDK to manage the state of KubeVirt virtual machines. + + +author: KubeVirt Team (@kubevirt) + +options: + state: + description: + - Set the virtual machine to either I(present), I(absent), I(running) or I(stopped). + - "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)" + - "I(absent) - Remove a virtual machine." + - "I(running) - Create or update a virtual machine and run it." + - "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)" + default: "present" + choices: + - present + - absent + - running + - stopped + type: str + name: + description: + - Name of the virtual machine. + required: true + type: str + namespace: + description: + - Namespace where the virtual machine exists. + required: true + type: str + ephemeral: + description: + - If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again. + - Works only with C(state) I(present) and I(absent). + type: bool + default: false + datavolumes: + description: + - "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's + launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning + it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user." + type: list + template: + description: + - "Name of Template to be used in creation of a virtual machine." + type: str + template_parameters: + description: + - "New values of parameters from Template." + type: dict + +extends_documentation_fragment: +- community.kubernetes.k8s_auth_options +- community.general.kubevirt_vm_options +- community.general.kubevirt_common_options + + +requirements: + - python >= 2.7 + - openshift >= 0.8.2 +''' + +EXAMPLES = ''' +- name: Start virtual machine 'myvm' + kubevirt_vm: + state: running + name: myvm + namespace: vms + +- name: Create virtual machine 'myvm' and start it + kubevirt_vm: + state: running + name: myvm + namespace: vms + memory: 64Mi + cpu_cores: 1 + bootloader: efi + smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223 + cpu_model: Conroe + headless: true + hugepage_size: 2Mi + tablets: + - bus: virtio + name: tablet1 + cpu_limit: 3 + cpu_shares: 2 + disks: + - name: containerdisk + volume: + containerDisk: + image: kubevirt/cirros-container-disk-demo:latest + path: /custom-disk/cirros.img + disk: + bus: virtio + +- name: Create virtual machine 'myvm' with multus network interface + kubevirt_vm: + name: myvm + namespace: vms + memory: 512M + interfaces: + - name: default + bridge: {} + network: + pod: {} + - name: mynet + bridge: {} + network: + multus: + networkName: mynetconf + +- name: Combine inline definition with Ansible parameters + kubevirt_vm: + # Kubernetes specification: + definition: + metadata: + labels: + app: galaxy + service: web + origin: vmware + + # Ansible parameters: + state: running + name: myvm + namespace: vms + memory: 64M + disks: + - name: containerdisk + volume: + containerDisk: + image: kubevirt/cirros-container-disk-demo:latest + path: /custom-disk/cirros.img + disk: + bus: virtio + +- name: Start ephemeral virtual machine 'myvm' and wait to be running + kubevirt_vm: + ephemeral: true + state: running + wait: true + wait_timeout: 180 + name: myvm + namespace: vms + memory: 64M + labels: + kubevirt.io/vm: myvm + disks: + - name: containerdisk + volume: + containerDisk: + image: kubevirt/cirros-container-disk-demo:latest + path: /custom-disk/cirros.img + disk: + bus: virtio + +- name: Start fedora vm with cloud init + kubevirt_vm: + state: running + wait: true + name: myvm + namespace: vms + memory: 1024M + cloud_init_nocloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + disks: + - name: containerdisk + volume: + containerDisk: + image: kubevirt/fedora-cloud-container-disk-demo:latest + path: /disk/fedora.qcow2 + disk: + bus: virtio + node_affinity: + soft: + - weight: 1 + term: + match_expressions: + - key: security + operator: In + values: + - S2 + +- name: Create virtual machine with datavolume and specify node affinity + kubevirt_vm: + name: myvm + namespace: default + memory: 1024Mi + datavolumes: + - name: mydv + source: + http: + url: https://url/disk.qcow2 + pvc: + accessModes: + - ReadWriteOnce + storage: 5Gi + node_affinity: + hard: + - term: + match_expressions: + - key: security + operator: In + values: + - S1 + +- name: Remove virtual machine 'myvm' + kubevirt_vm: + state: absent + name: myvm + namespace: vms +''' + +RETURN = ''' +kubevirt_vm: + description: + - The virtual machine dictionary specification returned by the API. + - "This dictionary contains all values returned by the KubeVirt API all options + are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)" + returned: success + type: complex + contains: {} +''' + + +import copy +import traceback + +from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC +from ansible_collections.community.general.plugins.module_utils.kubevirt import ( + virtdict, + KubeVirtRawModule, + VM_COMMON_ARG_SPEC, + VM_SPEC_DEF_ARG_SPEC +) + +VM_ARG_SPEC = { + 'ephemeral': {'type': 'bool', 'default': False}, + 'state': { + 'type': 'str', + 'choices': [ + 'present', 'absent', 'running', 'stopped' + ], + 'default': 'present' + }, + 'datavolumes': {'type': 'list'}, + 'template': {'type': 'str'}, + 'template_parameters': {'type': 'dict'}, +} + +# Which params (can) modify 'spec:' contents of a VM: +VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters'] + + +class KubeVirtVM(KubeVirtRawModule): + + @property + def argspec(self): + """ argspec property builder """ + argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(VM_COMMON_ARG_SPEC) + argument_spec.update(VM_ARG_SPEC) + return argument_spec + + @staticmethod + def fix_serialization(obj): + if obj and hasattr(obj, 'to_dict'): + return obj.to_dict() + return obj + + def _wait_for_vmi_running(self): + for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')): + entity = event['object'] + if entity.metadata.name != self.name: + continue + status = entity.get('status', {}) + phase = status.get('phase', None) + if phase == 'Running': + return entity + + self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?") + + def _wait_for_vm_state(self, new_state): + if new_state == 'running': + want_created = want_ready = True + else: + want_created = want_ready = False + + for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')): + entity = event['object'] + if entity.metadata.name != self.name: + continue + status = entity.get('status', {}) + created = status.get('created', False) + ready = status.get('ready', False) + if (created, ready) == (want_created, want_ready): + return entity + + self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. " + "Maybe try a higher wait_timeout value?".format(new_state)) + + def manage_vm_state(self, new_state, already_changed): + new_running = True if new_state == 'running' else False + changed = False + k8s_obj = {} + + if not already_changed: + k8s_obj = self.get_resource(self._kind_resource) + if not k8s_obj: + self.fail("VirtualMachine object disappeared during module operation, aborting.") + if k8s_obj.spec.get('running', False) == new_running: + return False, k8s_obj + + newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running)) + k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj, + self.name, self.namespace, merge_type='merge') + if err: + self.fail_json(**err) + else: + changed = True + + if self.params.get('wait'): + k8s_obj = self._wait_for_vm_state(new_state) + + return changed, k8s_obj + + def _process_template_defaults(self, proccess_template, processedtemplate, defaults): + def set_template_default(default_name, default_name_index, definition_spec): + default_value = proccess_template['metadata']['annotations'][default_name] + if default_value: + values = definition_spec[default_name_index] + default_values = [d for d in values if d.get('name') == default_value] + defaults[default_name_index] = default_values + if definition_spec[default_name_index] is None: + definition_spec[default_name_index] = [] + definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value]) + + devices = processedtemplate['spec']['template']['spec']['domain']['devices'] + spec = processedtemplate['spec']['template']['spec'] + + set_template_default('defaults.template.cnv.io/disk', 'disks', devices) + set_template_default('defaults.template.cnv.io/volume', 'volumes', spec) + set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices) + set_template_default('defaults.template.cnv.io/network', 'networks', spec) + + def construct_definition(self, kind, our_state, ephemeral): + definition = virtdict() + processedtemplate = {} + + # Construct the API object definition: + defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []} + vm_template = self.params.get('template') + if vm_template: + # Find the template the VM should be created from: + template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates') + proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace')) + + # Set proper template values taken from module option 'template_parameters': + for k, v in self.params.get('template_parameters', {}).items(): + for parameter in proccess_template.parameters: + if parameter.name == k: + parameter.value = v + + # Proccess the template: + processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates') + processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0] + + # Process defaults of the template: + self._process_template_defaults(proccess_template, processedtemplate, defaults) + + if not ephemeral: + definition['spec']['running'] = our_state == 'running' + template = definition if ephemeral else definition['spec']['template'] + template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name') + dummy, definition = self.construct_vm_definition(kind, definition, template, defaults) + + return self.merge_dicts(definition, processedtemplate) + + def execute_module(self): + # Parse parameters specific to this module: + ephemeral = self.params.get('ephemeral') + k8s_state = our_state = self.params.get('state') + kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine' + _used_params = [name for name in self.params if self.params[name] is not None] + # Is 'spec:' getting changed? + vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False + changed = False + crud_executed = False + method = '' + + # Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it + if ephemeral: + # Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead + if our_state == 'running': + self.params['state'] = k8s_state = 'present' + elif our_state == 'stopped': + self.params['state'] = k8s_state = 'absent' + else: + if our_state != 'absent': + self.params['state'] = k8s_state = 'present' + + # Start with fetching the current object to make sure it exists + # If it does, but we end up not performing any operations on it, at least we'll be able to return + # its current contents as part of the final json + self.client = self.get_api_client() + self._kind_resource = self.find_supported_resource(kind) + k8s_obj = self.get_resource(self._kind_resource) + if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj: + self.fail("It's impossible to create an empty VM or change state of a non-existent VM.") + + # If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD + # Also check_mode always warrants a CRUD, as that'll produce a sane result + if vm_spec_change or k8s_state == 'absent' or self.check_mode: + definition = self.construct_definition(kind, our_state, ephemeral) + result = self.execute_crud(kind, definition) + changed = result['changed'] + k8s_obj = result['result'] + method = result['method'] + crud_executed = True + + if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode: + # Waiting for k8s_state==absent is handled inside execute_crud() + k8s_obj = self._wait_for_vmi_running() + + if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode: + # State==present/absent doesn't involve any additional VMI state management and is fully + # handled inside execute_crud() (including wait logic) + patched, k8s_obj = self.manage_vm_state(our_state, crud_executed) + changed = changed or patched + if changed: + method = method or 'patch' + + # Return from the module: + self.exit_json(**{ + 'changed': changed, + 'kubevirt_vm': self.fix_serialization(k8s_obj), + 'method': method + }) + + +def main(): + module = KubeVirtVM() + try: + module.execute_module() + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/linode/linode.py b/plugins/modules/cloud/linode/linode.py new file mode 100644 index 0000000000..00a222e584 --- /dev/null +++ b/plugins/modules/cloud/linode/linode.py @@ -0,0 +1,673 @@ +#!/usr/bin/python + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: linode +short_description: Manage instances on the Linode Public Cloud +description: + - Manage Linode Public Cloud instances and optionally wait for it to be 'running'. +options: + state: + description: + - Indicate desired state of the resource + choices: [ absent, active, deleted, present, restarted, started, stopped ] + default: present + api_key: + description: + - Linode API key + name: + description: + - Name to give the instance (alphanumeric, dashes, underscore). + - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). + required: true + displaygroup: + description: + - Add the instance to a Display Group in Linode Manager. + linode_id: + description: + - Unique ID of a linode server. This value is read-only in the sense that + if you specify it on creation of a Linode it will not be used. The + Linode API generates these IDs and we can those generated value here to + reference a Linode more specifically. This is useful for idempotence. + aliases: [ lid ] + additional_disks: + description: + - List of dictionaries for creating additional disks that are added to the Linode configuration settings. + - Dictionary takes Size, Label, Type. Size is in MB. + alert_bwin_enabled: + description: + - Set status of bandwidth in alerts. + type: bool + alert_bwin_threshold: + description: + - Set threshold in MB of bandwidth in alerts. + alert_bwout_enabled: + description: + - Set status of bandwidth out alerts. + type: bool + alert_bwout_threshold: + description: + - Set threshold in MB of bandwidth out alerts. + alert_bwquota_enabled: + description: + - Set status of bandwidth quota alerts as percentage of network transfer quota. + type: bool + alert_bwquota_threshold: + description: + - Set threshold in MB of bandwidth quota alerts. + alert_cpu_enabled: + description: + - Set status of receiving CPU usage alerts. + type: bool + alert_cpu_threshold: + description: + - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. + alert_diskio_enabled: + description: + - Set status of receiving disk IO alerts. + type: bool + alert_diskio_threshold: + description: + - Set threshold for average IO ops/sec over 2 hour period. + backupweeklyday: + description: + - Integer value for what day of the week to store weekly backups. + plan: + description: + - plan to use for the instance (Linode plan) + payment_term: + description: + - payment term to use for the instance (payment term in months) + default: 1 + choices: [ 1, 12, 24 ] + password: + description: + - root password to apply to a new server (auto generated if missing) + private_ip: + description: + - Add private IPv4 address when Linode is created. + type: bool + default: "no" + ssh_pub_key: + description: + - SSH public key applied to root user + swap: + description: + - swap size in MB + default: 512 + distribution: + description: + - distribution to use for the instance (Linode Distribution) + datacenter: + description: + - datacenter to create an instance in (Linode Datacenter) + kernel_id: + description: + - kernel to use for the instance (Linode Kernel) + wait: + description: + - wait for the instance to be in state C(running) before returning + type: bool + default: "no" + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 + watchdog: + description: + - Set status of Lassie watchdog. + type: bool + default: "True" +requirements: + - python >= 2.6 + - linode-python +author: +- Vincent Viallet (@zbal) +notes: + - Please note, linode-python does not have python 3 support. + - This module uses the now deprecated v3 of the Linode API. + - C(LINODE_API_KEY) env variable can be used instead. + - Please review U(https://www.linode.com/api/linode) for determining the required parameters. +''' + +EXAMPLES = ''' + +- name: Create a new Linode + linode: + name: linode-test1 + plan: 1 + datacenter: 7 + distribution: 129 + state: present + register: linode_creation + +- name: Create a server with a private IP Address + linode: + module: linode + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + private_ip: yes + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: yes + wait_timeout: 600 + state: present + delegate_to: localhost + register: linode_creation + +- name: Fully configure new server + linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 4 + datacenter: 2 + distribution: 99 + kernel_id: 138 + password: 'superSecureRootPassword' + private_ip: yes + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: yes + wait_timeout: 600 + state: present + alert_bwquota_enabled: True + alert_bwquota_threshold: 80 + alert_bwin_enabled: True + alert_bwin_threshold: 10 + alert_cpu_enabled: True + alert_cpu_threshold: 210 + alert_bwout_enabled: True + alert_bwout_threshold: 10 + alert_diskio_enabled: True + alert_diskio_threshold: 10000 + backupweeklyday: 1 + backupwindow: 2 + displaygroup: 'test' + additional_disks: + - {Label: 'disk1', Size: 2500, Type: 'raw'} + - {Label: 'newdisk', Size: 2000} + watchdog: True + delegate_to: localhost + register: linode_creation + +- name: Ensure a running server (create if missing) + linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: yes + wait_timeout: 600 + state: present + delegate_to: localhost + register: linode_creation + +- name: Delete a server + linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: absent + delegate_to: localhost + +- name: Stop a server + linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: stopped + delegate_to: localhost + +- name: Reboot a server + linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: restarted + delegate_to: localhost +''' + +import os +import time +import traceback + +LINODE_IMP_ERR = None +try: + from linode import api as linode_api + HAS_LINODE = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def randompass(): + ''' + Generate a long random password that comply to Linode requirements + ''' + # Linode API currently requires the following: + # It must contain at least two of these four character classes: + # lower case letters - upper case letters - numbers - punctuation + # we play it safe :) + import random + import string + # as of python 2.4, this reseeds the PRNG from urandom + random.seed() + lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) + upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) + number = ''.join(random.choice(string.digits) for x in range(6)) + punct = ''.join(random.choice(string.punctuation) for x in range(6)) + p = lower + upper + number + punct + return ''.join(random.sample(p, len(p))) + + +def getInstanceDetails(api, server): + ''' + Return the details of an instance, populating IPs, etc. + ''' + instance = {'id': server['LINODEID'], + 'name': server['LABEL'], + 'public': [], + 'private': []} + + # Populate with ips + for ip in api.linode_ip_list(LinodeId=server['LINODEID']): + if ip['ISPUBLIC'] and 'ipv4' not in instance: + instance['ipv4'] = ip['IPADDRESS'] + instance['fqdn'] = ip['RDNS_NAME'] + if ip['ISPUBLIC']: + instance['public'].append({'ipv4': ip['IPADDRESS'], + 'fqdn': ip['RDNS_NAME'], + 'ip_id': ip['IPADDRESSID']}) + else: + instance['private'].append({'ipv4': ip['IPADDRESS'], + 'fqdn': ip['RDNS_NAME'], + 'ip_id': ip['IPADDRESSID']}) + return instance + + +def linodeServers(module, api, state, name, + displaygroup, plan, additional_disks, distribution, + datacenter, kernel_id, linode_id, payment_term, password, + private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs): + instances = [] + changed = False + new_server = False + servers = [] + disks = [] + configs = [] + jobs = [] + + # See if we can match an existing server details with the provided linode_id + if linode_id: + # For the moment we only consider linode_id as criteria for match + # Later we can use more (size, name, etc.) and update existing + servers = api.linode_list(LinodeId=linode_id) + # Attempt to fetch details about disks and configs only if servers are + # found with linode_id + if servers: + disks = api.linode_disk_list(LinodeId=linode_id) + configs = api.linode_config_list(LinodeId=linode_id) + + # Act on the state + if state in ('active', 'present', 'started'): + # TODO: validate all the plan / distribution / datacenter are valid + + # Multi step process/validation: + # - need linode_id (entity) + # - need disk_id for linode_id - create disk from distrib + # - need config_id for linode_id - create config (need kernel) + + # Any create step triggers a job that need to be waited for. + if not servers: + for arg in (name, plan, distribution, datacenter): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + # Create linode entity + new_server = True + + # Get size of all individually listed disks to subtract from Distribution disk + used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks) + + try: + res = api.linode_create(DatacenterID=datacenter, PlanID=plan, + PaymentTerm=payment_term) + linode_id = res['LinodeID'] + # Update linode Label to match name + api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name)) + # Update Linode with Ansible configuration options + api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs) + # Save server + servers = api.linode_list(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + + # Add private IP to Linode + if private_ip: + try: + res = api.linode_ip_addprivate(LinodeID=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + + if not disks: + for arg in (name, linode_id, distribution): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + # Create disks (1 from distrib, 1 for SWAP) + new_server = True + try: + if not password: + # Password is required on creation, if not provided generate one + password = randompass() + if not swap: + swap = 512 + # Create data disk + size = servers[0]['TOTALHD'] - used_disk_space - swap + + if ssh_pub_key: + res = api.linode_disk_createfromdistribution( + LinodeId=linode_id, DistributionID=distribution, + rootPass=password, rootSSHKey=ssh_pub_key, + Label='%s data disk (lid: %s)' % (name, linode_id), + Size=size) + else: + res = api.linode_disk_createfromdistribution( + LinodeId=linode_id, DistributionID=distribution, + rootPass=password, + Label='%s data disk (lid: %s)' % (name, linode_id), + Size=size) + jobs.append(res['JobID']) + # Create SWAP disk + res = api.linode_disk_create(LinodeId=linode_id, Type='swap', + Label='%s swap disk (lid: %s)' % (name, linode_id), + Size=swap) + # Create individually listed disks at specified size + if additional_disks: + for disk in additional_disks: + # If a disk Type is not passed in, default to ext4 + if disk.get('Type') is None: + disk['Type'] = 'ext4' + res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type']) + + jobs.append(res['JobID']) + except Exception as e: + # TODO: destroy linode ? + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + + if not configs: + for arg in (name, linode_id, distribution): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + + # Check architecture + for distrib in api.avail_distributions(): + if distrib['DISTRIBUTIONID'] != distribution: + continue + arch = '32' + if distrib['IS64BIT']: + arch = '64' + break + + # Get latest kernel matching arch if kernel_id is not specified + if not kernel_id: + for kernel in api.avail_kernels(): + if not kernel['LABEL'].startswith('Latest %s' % arch): + continue + kernel_id = kernel['KERNELID'] + break + + # Get disk list + disks_id = [] + for disk in api.linode_disk_list(LinodeId=linode_id): + if disk['TYPE'] == 'ext3': + disks_id.insert(0, str(disk['DISKID'])) + continue + disks_id.append(str(disk['DISKID'])) + # Trick to get the 9 items in the list + while len(disks_id) < 9: + disks_id.append('') + disks_list = ','.join(disks_id) + + # Create config + new_server = True + try: + api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, + Disklist=disks_list, Label='%s config' % name) + configs = api.linode_config_list(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + + # Start / Ensure servers are running + for server in servers: + # Refresh server state + server = api.linode_list(LinodeId=server['LINODEID'])[0] + # Ensure existing servers are up and running, boot if necessary + if server['STATUS'] != 1: + res = api.linode_boot(LinodeId=linode_id) + jobs.append(res['JobID']) + changed = True + + # wait here until the instances are up + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time(): + # refresh the server details + server = api.linode_list(LinodeId=server['LINODEID'])[0] + # status: + # -2: Boot failed + # 1: Running + if server['STATUS'] in (-2, 1): + break + time.sleep(5) + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID'])) + # Get a fresh copy of the server details + server = api.linode_list(LinodeId=server['LINODEID'])[0] + if server['STATUS'] == -2: + module.fail_json(msg='%s (lid: %s) failed to boot' % + (server['LABEL'], server['LINODEID'])) + # From now on we know the task is a success + # Build instance report + instance = getInstanceDetails(api, server) + # depending on wait flag select the status + if wait: + instance['status'] = 'Running' + else: + instance['status'] = 'Starting' + + # Return the root password if this is a new box and no SSH key + # has been provided + if new_server and not ssh_pub_key: + instance['password'] = password + instances.append(instance) + + elif state in ('stopped'): + if not linode_id: + module.fail_json(msg='linode_id is required for stopped state') + + if not servers: + module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) + + for server in servers: + instance = getInstanceDetails(api, server) + if server['STATUS'] != 2: + try: + res = api.linode_shutdown(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + instance['status'] = 'Stopping' + changed = True + else: + instance['status'] = 'Stopped' + instances.append(instance) + + elif state in ('restarted'): + if not linode_id: + module.fail_json(msg='linode_id is required for restarted state') + + if not servers: + module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) + + for server in servers: + instance = getInstanceDetails(api, server) + try: + res = api.linode_reboot(LinodeId=server['LINODEID']) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + instance['status'] = 'Restarting' + changed = True + instances.append(instance) + + elif state in ('absent', 'deleted'): + for server in servers: + instance = getInstanceDetails(api, server) + try: + api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + instance['status'] = 'Deleting' + changed = True + instances.append(instance) + + # Ease parsing if only 1 instance + if len(instances) == 1: + module.exit_json(changed=changed, instance=instances[0]) + + module.exit_json(changed=changed, instances=instances) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', + choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), + api_key=dict(type='str', no_log=True), + name=dict(type='str', required=True), + alert_bwin_enabled=dict(type='bool'), + alert_bwin_threshold=dict(type='int'), + alert_bwout_enabled=dict(type='bool'), + alert_bwout_threshold=dict(type='int'), + alert_bwquota_enabled=dict(type='bool'), + alert_bwquota_threshold=dict(type='int'), + alert_cpu_enabled=dict(type='bool'), + alert_cpu_threshold=dict(type='int'), + alert_diskio_enabled=dict(type='bool'), + alert_diskio_threshold=dict(type='int'), + backupsenabled=dict(type='int'), + backupweeklyday=dict(type='int'), + backupwindow=dict(type='int'), + displaygroup=dict(type='str', default=''), + plan=dict(type='int'), + additional_disks=dict(type='list'), + distribution=dict(type='int'), + datacenter=dict(type='int'), + kernel_id=dict(type='int'), + linode_id=dict(type='int', aliases=['lid']), + payment_term=dict(type='int', default=1, choices=[1, 12, 24]), + password=dict(type='str', no_log=True), + private_ip=dict(type='bool'), + ssh_pub_key=dict(type='str'), + swap=dict(type='int', default=512), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300), + watchdog=dict(type='bool', default=True), + ), + ) + + if not HAS_LINODE: + module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR) + + state = module.params.get('state') + api_key = module.params.get('api_key') + name = module.params.get('name') + alert_bwin_enabled = module.params.get('alert_bwin_enabled') + alert_bwin_threshold = module.params.get('alert_bwin_threshold') + alert_bwout_enabled = module.params.get('alert_bwout_enabled') + alert_bwout_threshold = module.params.get('alert_bwout_threshold') + alert_bwquota_enabled = module.params.get('alert_bwquota_enabled') + alert_bwquota_threshold = module.params.get('alert_bwquota_threshold') + alert_cpu_enabled = module.params.get('alert_cpu_enabled') + alert_cpu_threshold = module.params.get('alert_cpu_threshold') + alert_diskio_enabled = module.params.get('alert_diskio_enabled') + alert_diskio_threshold = module.params.get('alert_diskio_threshold') + backupsenabled = module.params.get('backupsenabled') + backupweeklyday = module.params.get('backupweeklyday') + backupwindow = module.params.get('backupwindow') + displaygroup = module.params.get('displaygroup') + plan = module.params.get('plan') + additional_disks = module.params.get('additional_disks') + distribution = module.params.get('distribution') + datacenter = module.params.get('datacenter') + kernel_id = module.params.get('kernel_id') + linode_id = module.params.get('linode_id') + payment_term = module.params.get('payment_term') + password = module.params.get('password') + private_ip = module.params.get('private_ip') + ssh_pub_key = module.params.get('ssh_pub_key') + swap = module.params.get('swap') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + watchdog = int(module.params.get('watchdog')) + + kwargs = dict() + check_items = dict( + alert_bwin_enabled=alert_bwin_enabled, + alert_bwin_threshold=alert_bwin_threshold, + alert_bwout_enabled=alert_bwout_enabled, + alert_bwout_threshold=alert_bwout_threshold, + alert_bwquota_enabled=alert_bwquota_enabled, + alert_bwquota_threshold=alert_bwquota_threshold, + alert_cpu_enabled=alert_cpu_enabled, + alert_cpu_threshold=alert_cpu_threshold, + alert_diskio_enabled=alert_diskio_enabled, + alert_diskio_threshold=alert_diskio_threshold, + backupweeklyday=backupweeklyday, + backupwindow=backupwindow, + ) + + for key, value in check_items.items(): + if value is not None: + kwargs[key] = value + + # Setup the api_key + if not api_key: + try: + api_key = os.environ['LINODE_API_KEY'] + except KeyError as e: + module.fail_json(msg='Unable to load %s' % e.message) + + # setup the auth + try: + api = linode_api.Api(api_key) + api.test_echo() + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + + linodeServers(module, api, state, name, + displaygroup, plan, + additional_disks, distribution, datacenter, kernel_id, linode_id, + payment_term, password, private_ip, ssh_pub_key, swap, wait, + wait_timeout, watchdog, **kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py new file mode 100644 index 0000000000..758bc66f34 --- /dev/null +++ b/plugins/modules/cloud/linode/linode_v4.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community', +} + +DOCUMENTATION = ''' +--- +module: linode_v4 +short_description: Manage instances on the Linode cloud. +description: Manage instances on the Linode cloud. +requirements: + - python >= 2.7 + - linode_api4 >= 2.0.0 +author: + - Luke Murphy (@decentral1se) +notes: + - No Linode resizing is currently implemented. This module will, in time, + replace the current Linode module which uses deprecated API bindings on the + Linode side. +options: + region: + description: + - The region of the instance. This is a required parameter only when + creating Linode instances. See + U(https://developers.linode.com/api/v4#tag/Regions). + required: false + type: str + image: + description: + - The image of the instance. This is a required parameter only when + creating Linode instances. See + U(https://developers.linode.com/api/v4#tag/Images). + type: str + required: false + type: + description: + - The type of the instance. This is a required parameter only when + creating Linode instances. See + U(https://developers.linode.com/api/v4#tag/Linode-Types). + type: str + required: false + label: + description: + - The instance label. This label is used as the main determiner for + idempotence for the module and is therefore mandatory. + type: str + required: true + group: + description: + - The group that the instance should be marked under. Please note, that + group labelling is deprecated but still supported. The encouraged + method for marking instances is to use tags. + type: str + required: false + tags: + description: + - The tags that the instance should be marked under. See + U(https://developers.linode.com/api/v4#tag/Tags). + required: false + type: list + root_pass: + description: + - The password for the root user. If not specified, one will be + generated. This generated password will be available in the task + success JSON. + required: false + type: str + authorized_keys: + description: + - A list of SSH public key parts to deploy for the root user. + required: false + type: list + state: + description: + - The desired instance state. + type: str + choices: + - present + - absent + required: true + access_token: + description: + - The Linode API v4 access token. It may also be specified by exposing + the C(LINODE_ACCESS_TOKEN) environment variable. See + U(https://developers.linode.com/api/v4#section/Access-and-Authentication). + required: true +''' + +EXAMPLES = """ +- name: Create a new Linode. + linode_v4: + label: new-linode + type: g6-nanode-1 + region: eu-west + image: linode/debian9 + root_pass: passw0rd + authorized_keys: + - "ssh-rsa ..." + state: present + +- name: Delete that new Linode. + linode_v4: + label: new-linode + state: absent +""" + +RETURN = """ +instance: + description: The instance description in JSON serialized form. + returned: Always. + type: dict + sample: { + "root_pass": "foobar", # if auto-generated + "alerts": { + "cpu": 90, + "io": 10000, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + } + }, + "created": "2018-09-26T08:12:33", + "group": "Foobar Group", + "hypervisor": "kvm", + "id": 10480444, + "image": "linode/centos7", + "ipv4": [ + "130.132.285.233" + ], + "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", + "label": "lin-foo", + "region": "eu-west", + "specs": { + "disk": 25600, + "memory": 1024, + "transfer": 1000, + "vcpus": 1 + }, + "status": "running", + "tags": [], + "type": "g6-nanode-1", + "updated": "2018-09-26T10:10:14", + "watchdog_enabled": true + } +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent + +LINODE_IMP_ERR = None +try: + from linode_api4 import Instance, LinodeClient + HAS_LINODE_DEPENDENCY = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE_DEPENDENCY = False + + +def create_linode(module, client, **kwargs): + """Creates a Linode instance and handles return format.""" + if kwargs['root_pass'] is None: + kwargs.pop('root_pass') + + try: + response = client.linode.instance_create(**kwargs) + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + try: + if isinstance(response, tuple): + instance, root_pass = response + instance_json = instance._raw_json + instance_json.update({'root_pass': root_pass}) + return instance_json + else: + return response._raw_json + except TypeError: + module.fail_json(msg='Unable to parse Linode instance creation' + ' response. Please raise a bug against this' + ' module on https://github.com/ansible/ansible/issues' + ) + + +def maybe_instance_from_label(module, client): + """Try to retrieve an instance based on a label.""" + try: + label = module.params['label'] + result = client.linode.instances(Instance.label == label) + return result[0] + except IndexError: + return None + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + +def initialise_module(): + """Initialise the module parameter specification.""" + return AnsibleModule( + argument_spec=dict( + label=dict(type='str', required=True), + state=dict( + type='str', + required=True, + choices=['present', 'absent'] + ), + access_token=dict( + type='str', + required=True, + no_log=True, + fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), + ), + authorized_keys=dict(type='list', required=False), + group=dict(type='str', required=False), + image=dict(type='str', required=False), + region=dict(type='str', required=False), + root_pass=dict(type='str', required=False, no_log=True), + tags=dict(type='list', required=False), + type=dict(type='str', required=False), + ), + supports_check_mode=False, + required_one_of=( + ['state', 'label'], + ), + required_together=( + ['region', 'image', 'type'], + ) + ) + + +def build_client(module): + """Build a LinodeClient.""" + return LinodeClient( + module.params['access_token'], + user_agent=get_user_agent('linode_v4_module') + ) + + +def main(): + """Module entrypoint.""" + module = initialise_module() + + if not HAS_LINODE_DEPENDENCY: + module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) + + client = build_client(module) + instance = maybe_instance_from_label(module, client) + + if module.params['state'] == 'present' and instance is not None: + module.exit_json(changed=False, instance=instance._raw_json) + + elif module.params['state'] == 'present' and instance is None: + instance_json = create_linode( + module, client, + authorized_keys=module.params['authorized_keys'], + group=module.params['group'], + image=module.params['image'], + label=module.params['label'], + region=module.params['region'], + root_pass=module.params['root_pass'], + tags=module.params['tags'], + ltype=module.params['type'], + ) + module.exit_json(changed=True, instance=instance_json) + + elif module.params['state'] == 'absent' and instance is not None: + instance.delete() + module.exit_json(changed=True, instance=instance._raw_json) + + elif module.params['state'] == 'absent' and instance is None: + module.exit_json(changed=False, instance={}) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/lxc/lxc_container.py b/plugins/modules/cloud/lxc/lxc_container.py new file mode 100644 index 0000000000..a7b86cee8e --- /dev/null +++ b/plugins/modules/cloud/lxc/lxc_container.py @@ -0,0 +1,1753 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Kevin Carter +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lxc_container +short_description: Manage LXC Containers +description: + - Management of LXC containers +author: "Kevin Carter (@cloudnull)" +options: + name: + description: + - Name of a container. + required: true + backing_store: + choices: + - dir + - lvm + - loop + - btrfs + - overlayfs + - zfs + description: + - Backend storage type for the container. + default: dir + template: + description: + - Name of the template to use within an LXC create. + default: ubuntu + template_options: + description: + - Template options when building the container. + config: + description: + - Path to the LXC configuration file. + lv_name: + description: + - Name of the logical volume, defaults to the container name. + default: $CONTAINER_NAME + vg_name: + description: + - If Backend store is lvm, specify the name of the volume group. + default: lxc + thinpool: + description: + - Use LVM thin pool called TP. + fs_type: + description: + - Create fstype TYPE. + default: ext4 + fs_size: + description: + - File system Size. + default: 5G + directory: + description: + - Place rootfs directory under DIR. + zfs_root: + description: + - Create zfs under given zfsroot. + container_command: + description: + - Run a command within a container. + lxc_path: + description: + - Place container under PATH + container_log: + choices: + - true + - false + description: + - Enable a container log for host actions to the container. + type: bool + default: 'no' + container_log_level: + choices: + - INFO + - ERROR + - DEBUG + description: + - Set the log level for a container where *container_log* was set. + required: false + default: INFO + clone_name: + description: + - Name of the new cloned server. This is only used when state is + clone. + type: str + clone_snapshot: + choices: + - true + - false + description: + - Create a snapshot a container when cloning. This is not supported + by all container storage backends. Enabling this may fail if the + backing store does not support snapshots. + type: bool + default: 'no' + archive: + choices: + - true + - false + description: + - Create an archive of a container. This will create a tarball of the + running container. + type: bool + default: 'no' + archive_path: + description: + - Path the save the archived container. If the path does not exist + the archive method will attempt to create it. + archive_compression: + choices: + - gzip + - bzip2 + - none + description: + - Type of compression to use when creating an archive of a running + container. + default: gzip + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Define the state of a container. If you clone a container using + `clone_name` the newly cloned container created in a stopped state. + The running container will be stopped while the clone operation is + happening and upon completion of the clone the original container + state will be restored. + default: started + container_config: + description: + - list of 'key=value' options to use when configuring a container. +requirements: + - 'lxc >= 1.0 # OS package' + - 'python >= 2.6 # OS Package' + - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc' +notes: + - Containers must have a unique name. If you attempt to create a container + with a name that already exists in the users namespace the module will + simply return as "unchanged". + - The "container_command" can be used with any state except "absent". If + used with state "stopped" the container will be "started", the command + executed, and then the container "stopped" again. Likewise if the state + is "stopped" and the container does not exist it will be first created, + "started", the command executed, and then "stopped". If you use a "|" + in the variable you can use common script formatting within the variable + itself The "container_command" option will always execute as BASH. + When using "container_command" a log file is created in the /tmp/ directory + which contains both stdout and stderr of any command executed. + - If "archive" is **true** the system will attempt to create a compressed + tarball of the running container. The "archive" option supports LVM backed + containers and will create a snapshot of the running container when + creating the archive. + - If your distro does not have a package for "python2-lxc", which is a + requirement for this module, it can be installed from source at + "https://github.com/lxc/python2-lxc" or installed via pip using the package + name lxc-python2. +''' + +EXAMPLES = """ +- name: Create a started container + lxc_container: + name: test-container-started + container_log: true + template: ubuntu + state: started + template_options: --release trusty + +- name: Create a stopped container + lxc_container: + name: test-container-stopped + container_log: true + template: ubuntu + state: stopped + template_options: --release trusty + +- name: Create a frozen container + lxc_container: + name: test-container-frozen + container_log: true + template: ubuntu + state: frozen + template_options: --release trusty + container_command: | + echo 'hello world.' | tee /opt/started-frozen + +# Create filesystem container, configure it, and archive it, and start it. +- name: Create filesystem container + lxc_container: + name: test-container-config + backing_store: dir + container_log: true + template: ubuntu + state: started + archive: true + archive_compression: none + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + template_options: --release trusty + +# Create an lvm container, run a complex command in it, add additional +# configuration to it, create an archive of it, and finally leave the container +# in a frozen state. The container archive will be compressed using bzip2 +- name: Create a frozen lvm container + lxc_container: + name: test-container-lvm + container_log: true + template: ubuntu + state: frozen + backing_store: lvm + template_options: --release trusty + container_command: | + apt-get update + apt-get install -y vim lxc-dev + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + archive: true + archive_compression: bzip2 + register: lvm_container_info + +- name: Debug info on container "test-container-lvm" + debug: + var: lvm_container_info + +- name: Run a command in a container and ensure its in a "stopped" state. + lxc_container: + name: test-container-started + state: stopped + container_command: | + echo 'hello world.' | tee /opt/stopped + +- name: Run a command in a container and ensure its it in a "frozen" state. + lxc_container: + name: test-container-stopped + state: frozen + container_command: | + echo 'hello world.' | tee /opt/frozen + +- name: Start a container + lxc_container: + name: test-container-stopped + state: started + +- name: Run a command in a container and then restart it + lxc_container: + name: test-container-started + state: restarted + container_command: | + echo 'hello world.' | tee /opt/restarted + +- name: Run a complex command within a "running" container + lxc_container: + name: test-container-started + container_command: | + apt-get update + apt-get install -y curl wget vim apache2 + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + +# Create an archive of an existing container, save the archive to a defined +# path and then destroy it. +- name: Archive container + lxc_container: + name: test-container-started + state: absent + archive: true + archive_path: /opt/archives + +# Create a container using overlayfs, create an archive of it, create a +# snapshot clone of the container and and finally leave the container +# in a frozen state. The container archive will be compressed using gzip. +- name: Create an overlayfs container archive and clone it + lxc_container: + name: test-container-overlayfs + container_log: true + template: ubuntu + state: started + backing_store: overlayfs + template_options: --release trusty + clone_snapshot: true + clone_name: test-container-overlayfs-clone-snapshot + archive: true + archive_compression: gzip + register: clone_container_info + +- name: debug info on container "test-container" + debug: + var: clone_container_info + +- name: Clone a container using snapshot + lxc_container: + name: test-container-overlayfs-clone-snapshot + backing_store: overlayfs + clone_name: test-container-overlayfs-clone-snapshot2 + clone_snapshot: true + +- name: Create a new container and clone it + lxc_container: + name: test-container-new-archive + backing_store: dir + clone_name: test-container-new-archive-clone + +- name: Archive and clone a container then destroy it + lxc_container: + name: test-container-new-archive + state: absent + clone_name: test-container-new-archive-destroyed-clone + archive: true + archive_compression: gzip + +- name: Start a cloned container. + lxc_container: + name: test-container-new-archive-destroyed-clone + state: started + +- name: Destroy a container + lxc_container: + name: '{{ item }}' + state: absent + with_items: + - test-container-stopped + - test-container-started + - test-container-frozen + - test-container-lvm + - test-container-config + - test-container-overlayfs + - test-container-overlayfs-clone + - test-container-overlayfs-clone-snapshot + - test-container-overlayfs-clone-snapshot2 + - test-container-new-archive + - test-container-new-archive-clone + - test-container-new-archive-destroyed-clone +""" + +RETURN = """ +lxc_container: + description: container information + returned: success + type: complex + contains: + name: + description: name of the lxc container + returned: success + type: str + sample: test_host + init_pid: + description: pid of the lxc init process + returned: success + type: int + sample: 19786 + interfaces: + description: list of the container's network interfaces + returned: success + type: list + sample: [ "eth0", "lo" ] + ips: + description: list of ips + returned: success + type: list + sample: [ "10.0.3.3" ] + state: + description: resulting state of the container + returned: success + type: str + sample: "running" + archive: + description: resulting state of the container + returned: success, when archive is true + type: str + sample: "/tmp/test-container-config.tar" + clone: + description: if the container was cloned + returned: success, when clone_name is specified + type: bool + sample: True +""" + +import os +import os.path +import re +import shutil +import subprocess +import tempfile +import time + +try: + import lxc +except ImportError: + HAS_LXC = False +else: + HAS_LXC = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE +from ansible.module_utils.six.moves import xrange +from ansible.module_utils._text import to_text, to_bytes + + +# LXC_COMPRESSION_MAP is a map of available compression types when creating +# an archive of a container. +LXC_COMPRESSION_MAP = { + 'gzip': { + 'extension': 'tar.tgz', + 'argument': '-czf' + }, + 'bzip2': { + 'extension': 'tar.bz2', + 'argument': '-cjf' + }, + 'none': { + 'extension': 'tar', + 'argument': '-cf' + } +} + + +# LXC_COMMAND_MAP is a map of variables that are available to a method based +# on the state the container is in. +LXC_COMMAND_MAP = { + 'create': { + 'variables': { + 'config': '--config', + 'template': '--template', + 'backing_store': '--bdev', + 'lxc_path': '--lxcpath', + 'lv_name': '--lvname', + 'vg_name': '--vgname', + 'thinpool': '--thinpool', + 'fs_type': '--fstype', + 'fs_size': '--fssize', + 'directory': '--dir', + 'zfs_root': '--zfsroot' + } + }, + 'clone': { + 'variables-lxc-copy': { + 'backing_store': '--backingstorage', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--name', + 'clone_name': '--newname' + }, + # lxc-clone is deprecated in favor of lxc-copy + 'variables-lxc-clone': { + 'backing_store': '--backingstore', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--orig', + 'clone_name': '--new' + } + } +} + + +# LXC_BACKING_STORE is a map of available storage backends and options that +# are incompatible with the given storage backend. +LXC_BACKING_STORE = { + 'dir': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ], + 'lvm': [ + 'zfs_root' + ], + 'btrfs': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size' + ], + 'loop': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ], + 'overlayfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' + ], + 'zfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ] +} + + +# LXC_LOGGING_LEVELS is a map of available log levels +LXC_LOGGING_LEVELS = { + 'INFO': ['info', 'INFO', 'Info'], + 'ERROR': ['error', 'ERROR', 'Error'], + 'DEBUG': ['debug', 'DEBUG', 'Debug'] +} + + +# LXC_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXC_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen', + 'clone': '_clone' +} + + +# This is used to attach to a running container and execute commands from +# within the container on the host. This will provide local access to a +# container without using SSH. The template will attempt to work within the +# home directory of the user that was attached to the container and source +# that users environment variables by default. +ATTACH_TEMPLATE = """#!/usr/bin/env bash +pushd "$(getent passwd $(whoami)|cut -f6 -d':')" + if [[ -f ".bashrc" ]];then + source .bashrc + unset HOSTNAME + fi +popd + +# User defined command +%(container_command)s +""" + + +def create_script(command): + """Write out a script onto a target. + + This method should be backward compatible with Python 2.4+ when executing + from within the container. + + :param command: command to run, this can be a script and can use spacing + with newlines as separation. + :type command: ``str`` + """ + + (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script') + f = os.fdopen(fd, 'wb') + try: + f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict')) + f.flush() + finally: + f.close() + + # Ensure the script is executable. + os.chmod(script_file, int('0700', 8)) + + # Output log file. + stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab') + + # Error log file. + stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab') + + # Execute the script command. + try: + subprocess.Popen( + [script_file], + stdout=stdout_file, + stderr=stderr_file + ).communicate() + finally: + # Close the log files. + stderr_file.close() + stdout_file.close() + + # Remove the script file upon completion of execution. + os.remove(script_file) + + +class LxcContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.state = self.module.params.get('state', None) + self.state_change = False + self.lxc_vg = None + self.lxc_path = self.module.params.get('lxc_path', None) + self.container_name = self.module.params['name'] + self.container = self.get_container_bind() + self.archive_info = None + self.clone_info = None + + def get_container_bind(self): + return lxc.Container(name=self.container_name) + + @staticmethod + def _roundup(num): + """Return a rounded floating point number. + + :param num: Number to round up. + :type: ``float`` + :returns: Rounded up number. + :rtype: ``int`` + """ + num, part = str(num).split('.') + num = int(num) + if int(part) != 0: + num += 1 + return num + + @staticmethod + def _container_exists(container_name, lxc_path=None): + """Check if a container exists. + + :param container_name: Name of the container. + :type: ``str`` + :returns: True or False if the container is found. + :rtype: ``bol`` + """ + if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]: + return True + else: + return False + + @staticmethod + def _add_variables(variables_dict, build_command): + """Return a command list with all found options. + + :param variables_dict: Pre-parsed optional variables used from a + seed command. + :type variables_dict: ``dict`` + :param build_command: Command to run. + :type build_command: ``list`` + :returns: list of command options. + :rtype: ``list`` + """ + + for key, value in variables_dict.items(): + build_command.append( + '%s %s' % (key, value) + ) + return build_command + + def _get_vars(self, variables): + """Return a dict of all variables as found within the module. + + :param variables: Hash of all variables to find. + :type variables: ``dict`` + """ + + # Remove incompatible storage backend options. + variables = variables.copy() + for v in LXC_BACKING_STORE[self.module.params['backing_store']]: + variables.pop(v, None) + + return_dict = dict() + false_values = BOOLEANS_FALSE.union([None, '']) + for k, v in variables.items(): + _var = self.module.params.get(k) + if _var not in false_values: + return_dict[v] = _var + return return_dict + + def _run_command(self, build_command, unsafe_shell=False): + """Return information from running an Ansible Command. + + This will squash the build command list into a string and then + execute the command via Ansible. The output is returned to the method. + This output is returned as `return_code`, `stdout`, `stderr`. + + :param build_command: Used for the command and all options. + :type build_command: ``list`` + :param unsafe_shell: Enable or Disable unsafe sell commands. + :type unsafe_shell: ``bol`` + """ + + return self.module.run_command( + ' '.join(build_command), + use_unsafe_shell=unsafe_shell + ) + + def _config(self): + """Configure an LXC container. + + Write new configuration values to the lxc config file. This will + stop the container if it's running write the new options and then + restart the container upon completion. + """ + + _container_config = self.module.params.get('container_config') + if not _container_config: + return False + + container_config_file = self.container.config_file_name + with open(container_config_file, 'rb') as f: + container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True) + + # Note used ast literal_eval because AnsibleModule does not provide for + # adequate dictionary parsing. + # Issue: https://github.com/ansible/ansible/issues/7679 + # TODO(cloudnull) adjust import when issue has been resolved. + import ast + options_dict = ast.literal_eval(_container_config) + parsed_options = [i.split('=', 1) for i in options_dict] + + config_change = False + for key, value in parsed_options: + key = key.strip() + value = value.strip() + new_entry = '%s = %s\n' % (key, value) + keyre = re.compile(r'%s(\s+)?=' % key) + for option_line in container_config: + # Look for key in config + if keyre.match(option_line): + _, _value = option_line.split('=', 1) + config_value = ' '.join(_value.split()) + line_index = container_config.index(option_line) + # If the sanitized values don't match replace them + if value != config_value: + line_index += 1 + if new_entry not in container_config: + config_change = True + container_config.insert(line_index, new_entry) + # Break the flow as values are written or not at this point + break + else: + config_change = True + container_config.append(new_entry) + + # If the config changed restart the container. + if config_change: + container_state = self._get_state() + if container_state != 'stopped': + self.container.stop() + + with open(container_config_file, 'wb') as f: + f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config]) + + self.state_change = True + if container_state == 'running': + self._container_startup() + elif container_state == 'frozen': + self._container_startup() + self.container.freeze() + + def _container_create_clone(self): + """Clone a new LXC container from an existing container. + + This method will clone an existing container to a new container using + the `clone_name` variable as the new container name. The method will + create a container if the container `name` does not exist. + + Note that cloning a container will ensure that the original container + is "stopped" before the clone can be done. Because this operation can + require a state change the method will return the original container + to its prior state upon completion of the clone. + + Once the clone is complete the new container will be left in a stopped + state. + """ + + # Ensure that the state of the original container is stopped + container_state = self._get_state() + if container_state != 'stopped': + self.state_change = True + self.container.stop() + + # lxc-clone is deprecated in favor of lxc-copy + clone_vars = 'variables-lxc-copy' + clone_cmd = self.module.get_bin_path('lxc-copy') + if not clone_cmd: + clone_vars = 'variables-lxc-clone' + clone_cmd = self.module.get_bin_path('lxc-clone', True) + + build_command = [ + clone_cmd, + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['clone'][clone_vars] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: + build_command.append('--snapshot') + # Check for backing_store == overlayfs if so force the use of snapshot + # If overlay fs is used and snapshot is unset the clone command will + # fail with an unsupported type. + elif self.module.params.get('backing_store') == 'overlayfs': + build_command.append('--snapshot') + + rc, return_data, err = self._run_command(build_command) + if rc != 0: + message = "Failed executing %s." % os.path.basename(clone_cmd) + self.failure( + err=err, rc=rc, msg=message, command=' '.join( + build_command + ) + ) + else: + self.state_change = True + # Restore the original state of the origin container if it was + # not in a stopped state. + if container_state == 'running': + self.container.start() + elif container_state == 'frozen': + self.container.start() + self.container.freeze() + + return True + + def _create(self): + """Create a new LXC container. + + This method will build and execute a shell command to build the + container. It would have been nice to simply use the lxc python library + however at the time this was written the python library, in both py2 + and py3 didn't support some of the more advanced container create + processes. These missing processes mainly revolve around backing + LXC containers with block devices. + """ + + build_command = [ + self.module.get_bin_path('lxc-create', True), + '--name %s' % self.container_name, + '--quiet' + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['create']['variables'] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params.get('container_log') in BOOLEANS_TRUE: + # Set the logging path to the /var/log/lxc if uid is root. else + # set it to the home folder of the user executing. + try: + if os.getuid() != 0: + log_path = os.getenv('HOME') + else: + if not os.path.isdir('/var/log/lxc/'): + os.makedirs('/var/log/lxc/') + log_path = '/var/log/lxc/' + except OSError: + log_path = os.getenv('HOME') + + build_command.extend([ + '--logfile %s' % os.path.join( + log_path, 'lxc-%s.log' % self.container_name + ), + '--logpriority %s' % self.module.params.get( + 'container_log_level' + ).upper() + ]) + + # Add the template commands to the end of the command if there are any + template_options = self.module.params.get('template_options', None) + if template_options: + build_command.append('-- %s' % template_options) + + rc, return_data, err = self._run_command(build_command) + if rc != 0: + message = "Failed executing lxc-create." + self.failure( + err=err, rc=rc, msg=message, command=' '.join(build_command) + ) + else: + self.state_change = True + + def _container_data(self): + """Returns a dict of container information. + + :returns: container data + :rtype: ``dict`` + """ + + return { + 'interfaces': self.container.get_interfaces(), + 'ips': self.container.get_ips(), + 'state': self._get_state(), + 'init_pid': int(self.container.init_pid), + 'name': self.container_name, + } + + def _unfreeze(self): + """Unfreeze a container. + + :returns: True or False based on if the container was unfrozen. + :rtype: ``bol`` + """ + + unfreeze = self.container.unfreeze() + if unfreeze: + self.state_change = True + return unfreeze + + def _get_state(self): + """Return the state of a container. + + If the container is not found the state returned is "absent" + + :returns: state of a container as a lower case string. + :rtype: ``str`` + """ + + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + return str(self.container.state).lower() + else: + return str('absent') + + def _execute_command(self): + """Execute a shell command.""" + + container_command = self.module.params.get('container_command') + if container_command: + container_state = self._get_state() + if container_state == 'frozen': + self._unfreeze() + elif container_state == 'stopped': + self._container_startup() + + self.container.attach_wait(create_script, container_command) + self.state_change = True + + def _container_startup(self, timeout=60): + """Ensure a container is started. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + self.container = self.get_container_bind() + for _ in xrange(timeout): + if self._get_state() != 'running': + self.container.start() + self.state_change = True + # post startup sleep for 1 second. + time.sleep(1) + else: + return True + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + def _check_archive(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + if self.module.params.get('archive') in BOOLEANS_TRUE: + self.archive_info = { + 'archive': self._container_create_tar() + } + + def _check_clone(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + clone_name = self.module.params.get('clone_name') + if clone_name: + if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): + self.clone_info = { + 'cloned': self._container_create_clone() + } + else: + self.clone_info = { + 'cloned': False + } + + def _destroyed(self, timeout=60): + """Ensure a container is destroyed. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + for _ in xrange(timeout): + if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + break + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + + if self._get_state() != 'stopped': + self.state_change = True + self.container.stop() + + if self.container.destroy(): + self.state_change = True + + # post destroy attempt sleep for 1 second. + time.sleep(1) + else: + self.failure( + lxc_container=self._container_data(), + error='Failed to destroy container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to be destroyed. Check' + ' that lxc is available and that the container is in a' + ' functional state.' % self.container_name + ) + + def _frozen(self, count=0): + """Ensure a container is frozen. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='frozen') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + container_state = self._get_state() + if container_state == 'frozen': + pass + elif container_state == 'running': + self.container.freeze() + self.state_change = True + else: + self._container_startup() + self.container.freeze() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._frozen(count) + + def _restarted(self, count=0): + """Ensure a container is restarted. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='restart') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Run container startup + self._container_startup() + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._restarted(count) + + def _stopped(self, count=0): + """Ensure a container is stopped. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='stop') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._stopped(count) + + def _started(self, count=0): + """Ensure a container is started. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='start') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + container_state = self._get_state() + if container_state == 'running': + pass + elif container_state == 'frozen': + self._unfreeze() + elif not self._container_startup(): + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + # Return data + self._execute_command() + + # Perform any configuration updates + self._config() + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._started(count) + + def _get_lxc_vg(self): + """Return the name of the Volume Group used in LXC.""" + + build_command = [ + self.module.get_bin_path('lxc-config', True), + "lxc.bdev.lvm.vg" + ] + rc, vg, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to read LVM VG from LXC config', + command=' '.join(build_command) + ) + else: + return str(vg.strip()) + + def _lvm_lv_list(self): + """Return a list of all lv in a current vg.""" + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvs', True) + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to get list of LVs', + command=' '.join(build_command) + ) + + all_lvms = [i.split() for i in stdout.splitlines()][1:] + return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] + + def _get_vg_free_pe(self, vg_name): + """Return the available size of a given VG. + + :param vg_name: Name of volume. + :type vg_name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + build_command = [ + 'vgdisplay', + vg_name, + '--units', + 'g' + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read vg %s' % vg_name, + command=' '.join(build_command) + ) + + vg_info = [i.strip() for i in stdout.splitlines()][1:] + free_pe = [i for i in vg_info if i.startswith('Free')] + _free_pe = free_pe[0].split() + return float(_free_pe[-2]), _free_pe[-1] + + def _get_lv_size(self, lv_name): + """Return the available size of a given LV. + + :param lv_name: Name of volume. + :type lv_name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + vg = self._get_lxc_vg() + lv = os.path.join(vg, lv_name) + build_command = [ + 'lvdisplay', + lv, + '--units', + 'g' + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read lv %s' % lv, + command=' '.join(build_command) + ) + + lv_info = [i.strip() for i in stdout.splitlines()][1:] + _free_pe = [i for i in lv_info if i.startswith('LV Size')] + free_pe = _free_pe[0].split() + return self._roundup(float(free_pe[-2])), free_pe[-1] + + def _lvm_snapshot_create(self, source_lv, snapshot_name, + snapshot_size_gb=5): + """Create an LVM snapshot. + + :param source_lv: Name of lv to snapshot + :type source_lv: ``str`` + :param snapshot_name: Name of lv snapshot + :type snapshot_name: ``str`` + :param snapshot_size_gb: Size of snapshot to create + :type snapshot_size_gb: ``int`` + """ + + vg = self._get_lxc_vg() + free_space, messurement = self._get_vg_free_pe(vg_name=vg) + + if free_space < float(snapshot_size_gb): + message = ( + 'Snapshot size [ %s ] is > greater than [ %s ] on volume group' + ' [ %s ]' % (snapshot_size_gb, free_space, vg) + ) + self.failure( + error='Not enough space to create snapshot', + rc=2, + msg=message + ) + + # Create LVM Snapshot + build_command = [ + self.module.get_bin_path('lvcreate', True), + "-n", + snapshot_name, + "-s", + os.path.join(vg, source_lv), + "-L%sg" % snapshot_size_gb + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to Create LVM snapshot %s/%s --> %s' + % (vg, source_lv, snapshot_name) + ) + + def _lvm_lv_mount(self, lv_name, mount_point): + """mount an lv. + + :param lv_name: name of the logical volume to mount + :type lv_name: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + vg = self._get_lxc_vg() + + build_command = [ + self.module.get_bin_path('mount', True), + "/dev/%s/%s" % (vg, lv_name), + mount_point, + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mountlvm lv %s/%s to %s' + % (vg, lv_name, mount_point) + ) + + def _create_tar(self, source_dir): + """Create an archive of a given ``source_dir`` to ``output_path``. + + :param source_dir: Path to the directory to be archived. + :type source_dir: ``str`` + """ + + old_umask = os.umask(int('0077', 8)) + + archive_path = self.module.params.get('archive_path') + if not os.path.isdir(archive_path): + os.makedirs(archive_path) + + archive_compression = self.module.params.get('archive_compression') + compression_type = LXC_COMPRESSION_MAP[archive_compression] + + # remove trailing / if present. + archive_name = '%s.%s' % ( + os.path.join( + archive_path, + self.container_name + ), + compression_type['extension'] + ) + + build_command = [ + self.module.get_bin_path('tar', True), + '--directory=%s' % os.path.realpath( + os.path.expanduser(source_dir) + ), + compression_type['argument'], + archive_name, + '.' + ] + + rc, stdout, err = self._run_command( + build_command=build_command, + unsafe_shell=True + ) + + os.umask(old_umask) + + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to create tar archive', + command=' '.join(build_command) + ) + + return archive_name + + def _lvm_lv_remove(self, lv_name): + """Remove an LV. + + :param lv_name: The name of the logical volume + :type lv_name: ``str`` + """ + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvremove', True), + "-f", + "%s/%s" % (vg, lv_name), + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to remove LVM LV %s/%s' % (vg, lv_name), + command=' '.join(build_command) + ) + + def _rsync_data(self, container_path, temp_dir): + """Sync the container directory to the temp directory. + + :param container_path: path to the container container + :type container_path: ``str`` + :param temp_dir: path to the temporary local working directory + :type temp_dir: ``str`` + """ + # This loop is created to support overlayfs archives. This should + # squash all of the layers into a single archive. + fs_paths = container_path.split(':') + if 'overlayfs' in fs_paths: + fs_paths.pop(fs_paths.index('overlayfs')) + + for fs_path in fs_paths: + # Set the path to the container data + fs_path = os.path.dirname(fs_path) + + # Run the sync command + build_command = [ + self.module.get_bin_path('rsync', True), + '-aHAX', + fs_path, + temp_dir + ] + rc, stdout, err = self._run_command( + build_command, + unsafe_shell=True + ) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to perform archive', + command=' '.join(build_command) + ) + + def _unmount(self, mount_point): + """Unmount a file system. + + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + build_command = [ + self.module.get_bin_path('umount', True), + mount_point, + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to unmount [ %s ]' % mount_point, + command=' '.join(build_command) + ) + + def _overlayfs_mount(self, lowerdir, upperdir, mount_point): + """mount an lv. + + :param lowerdir: name/path of the lower directory + :type lowerdir: ``str`` + :param upperdir: name/path of the upper directory + :type upperdir: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + build_command = [ + self.module.get_bin_path('mount', True), + '-t overlayfs', + '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir), + 'overlayfs', + mount_point, + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mount overlayfs:%s:%s to %s -- Command: %s' + % (lowerdir, upperdir, mount_point, build_command) + ) + + def _container_create_tar(self): + """Create a tar archive from an LXC container. + + The process is as follows: + * Stop or Freeze the container + * Create temporary dir + * Copy container and config to temporary directory + * If LVM backed: + * Create LVM snapshot of LV backing the container + * Mount the snapshot to tmpdir/rootfs + * Restore the state of the container + * Create tar of tmpdir + * Clean up + """ + + # Create a temp dir + temp_dir = tempfile.mkdtemp() + + # Set the name of the working dir, temp + container_name + work_dir = os.path.join(temp_dir, self.container_name) + + # LXC container rootfs + lxc_rootfs = self.container.get_config_item('lxc.rootfs') + + # Test if the containers rootfs is a block device + block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + + # Test if the container is using overlayfs + overlayfs_backed = lxc_rootfs.startswith('overlayfs') + + mount_point = os.path.join(work_dir, 'rootfs') + + # Set the snapshot name if needed + snapshot_name = '%s_lxc_snapshot' % self.container_name + + container_state = self._get_state() + try: + # Ensure the original container is stopped or frozen + if container_state not in ['stopped', 'frozen']: + if container_state == 'running': + self.container.freeze() + else: + self.container.stop() + + # Sync the container data from the container_path to work_dir + self._rsync_data(lxc_rootfs, temp_dir) + + if block_backed: + if snapshot_name not in self._lvm_lv_list(): + if not os.path.exists(mount_point): + os.makedirs(mount_point) + + # Take snapshot + size, measurement = self._get_lv_size( + lv_name=self.container_name + ) + self._lvm_snapshot_create( + source_lv=self.container_name, + snapshot_name=snapshot_name, + snapshot_size_gb=size + ) + + # Mount snapshot + self._lvm_lv_mount( + lv_name=snapshot_name, + mount_point=mount_point + ) + else: + self.failure( + err='snapshot [ %s ] already exists' % snapshot_name, + rc=1, + msg='The snapshot [ %s ] already exists. Please clean' + ' up old snapshot of containers before continuing.' + % snapshot_name + ) + elif overlayfs_backed: + lowerdir, upperdir = lxc_rootfs.split(':')[1:] + self._overlayfs_mount( + lowerdir=lowerdir, + upperdir=upperdir, + mount_point=mount_point + ) + + # Set the state as changed and set a new fact + self.state_change = True + return self._create_tar(source_dir=work_dir) + finally: + if block_backed or overlayfs_backed: + # unmount snapshot + self._unmount(mount_point) + + if block_backed: + # Remove snapshot + self._lvm_lv_remove(snapshot_name) + + # Restore original state of container + if container_state == 'running': + if self._get_state() == 'frozen': + self.container.unfreeze() + else: + self.container.start() + + # Remove tmpdir + shutil.rmtree(temp_dir) + + def check_count(self, count, method): + if count > 1: + self.failure( + error='Failed to %s container' % method, + rc=1, + msg='The container [ %s ] failed to %s. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % (self.container_name, method) + ) + + def failure(self, **kwargs): + """Return a Failure when running an Ansible command. + + :param error: ``str`` Error that occurred. + :param rc: ``int`` Return code while executing an Ansible command. + :param msg: ``str`` Message to report. + """ + + self.module.fail_json(**kwargs) + + def run(self): + """Run the main method.""" + + action = getattr(self, LXC_ANSIBLE_STATES[self.state]) + action() + + outcome = self._container_data() + if self.archive_info: + outcome.update(self.archive_info) + + if self.clone_info: + outcome.update(self.clone_info) + + self.module.exit_json( + changed=self.state_change, + lxc_container=outcome + ) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + template=dict( + type='str', + default='ubuntu' + ), + backing_store=dict( + type='str', + choices=LXC_BACKING_STORE.keys(), + default='dir' + ), + template_options=dict( + type='str' + ), + config=dict( + type='path', + ), + vg_name=dict( + type='str', + default='lxc' + ), + thinpool=dict( + type='str' + ), + fs_type=dict( + type='str', + default='ext4' + ), + fs_size=dict( + type='str', + default='5G' + ), + directory=dict( + type='path' + ), + zfs_root=dict( + type='str' + ), + lv_name=dict( + type='str' + ), + lxc_path=dict( + type='path' + ), + state=dict( + choices=LXC_ANSIBLE_STATES.keys(), + default='started' + ), + container_command=dict( + type='str' + ), + container_config=dict( + type='str' + ), + container_log=dict( + type='bool', + default='false' + ), + container_log_level=dict( + choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], + default='INFO' + ), + clone_name=dict( + type='str', + required=False + ), + clone_snapshot=dict( + type='bool', + default='false' + ), + archive=dict( + type='bool', + default='false' + ), + archive_path=dict( + type='path', + ), + archive_compression=dict( + choices=LXC_COMPRESSION_MAP.keys(), + default='gzip' + ) + ), + supports_check_mode=False, + required_if=([ + ('archive', True, ['archive_path']) + ]), + ) + + if not HAS_LXC: + module.fail_json( + msg='The `lxc` module is not importable. Check the requirements.' + ) + + lv_name = module.params.get('lv_name') + if not lv_name: + module.params['lv_name'] = module.params.get('name') + + lxc_manage = LxcContainerManagement(module=module) + lxc_manage.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/lxd/lxd_container.py b/plugins/modules/cloud/lxd/lxd_container.py new file mode 100644 index 0000000000..c492493ebe --- /dev/null +++ b/plugins/modules/cloud/lxd/lxd_container.py @@ -0,0 +1,655 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Hiroaki Nakamura +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lxd_container +short_description: Manage LXD Containers +description: + - Management of LXD containers +author: "Hiroaki Nakamura (@hnakamur)" +options: + name: + description: + - Name of a container. + required: true + architecture: + description: + - The architecture for the container (e.g. "x86_64" or "i686"). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) + required: false + config: + description: + - 'The config for the container (e.g. {"limits.cpu": "2"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)' + - If the container already exists and its "config" value in metadata + obtained from + GET /1.0/containers/ + U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname) + are different, they this module tries to apply the configurations. + - The key starts with 'volatile.' are ignored for this comparison. + - Not all config values are supported to apply the existing container. + Maybe you need to delete and recreate a container. + required: false + devices: + description: + - 'The devices for the container + (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)' + required: false + ephemeral: + description: + - Whether or not the container is ephemeral (e.g. true or false). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) + required: false + type: bool + source: + description: + - 'The source for the container + (e.g. { "type": "image", + "mode": "pull", + "server": "https://images.linuxcontainers.org", + "protocol": "lxd", + "alias": "ubuntu/xenial/amd64" }).' + - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.' + - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)' + required: false + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Define the state of a container. + required: false + default: started + timeout: + description: + - A timeout for changing the state of the container. + - This is also used as a timeout for waiting until IPv4 addresses + are set to the all network interfaces in the container after + starting or restarting. + required: false + default: 30 + wait_for_ipv4_addresses: + description: + - If this is true, the C(lxd_container) waits until IPv4 addresses + are set to the all network interfaces in the container after + starting or restarting. + required: false + default: false + type: bool + force_stop: + description: + - If this is true, the C(lxd_container) forces to stop the container + when it stops or restarts the container. + required: false + default: false + type: bool + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + client_key: + description: + - The client certificate key file path. + required: false + default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])' + aliases: [ key_file ] + client_cert: + description: + - The client certificate file path. + required: false + default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])' + aliases: [ cert_file ] + trust_password: + description: + - The client trusted password. + - You need to set this password on the LXD server before + running this module using the following command. + lxc config set core.trust_password + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) + - If trust_password is set, this module send a request for + authentication before sending any requests. + required: false +notes: + - Containers must have a unique name. If you attempt to create a container + with a name that already existed in the users namespace the module will + simply return as "unchanged". + - There are two ways to run commands in containers, using the command + module or using the ansible lxd connection plugin bundled in Ansible >= + 2.1, the later requires python to be installed in the container which can + be done with the command module. + - You can copy a file from the host to the container + with the Ansible M(copy) and M(template) module and the `lxd` connection plugin. + See the example below. + - You can copy a file in the created container to the localhost + with `command=lxc file pull container_name/dir/filename filename`. + See the first example below. +''' + +EXAMPLES = ''' +# An example for creating a Ubuntu container and install python +- hosts: localhost + connection: local + tasks: + - name: Create a started container + lxd_container: + name: mycontainer + state: started + source: + type: image + mode: pull + server: https://images.linuxcontainers.org + protocol: lxd # if you get a 404, try setting protocol: simplestreams + alias: ubuntu/xenial/amd64 + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + + - name: check python is installed in container + delegate_to: mycontainer + raw: dpkg -s python + register: python_install_check + failed_when: python_install_check.rc not in [0, 1] + changed_when: false + + - name: install python in container + delegate_to: mycontainer + raw: apt-get install -y python + when: python_install_check.rc == 1 + +# An example for creating an Ubuntu 14.04 container using an image fingerprint. +# This requires changing 'server' and 'protocol' key values, replacing the +# 'alias' key with with 'fingerprint' and supplying an appropriate value that +# matches the container image you wish to use. +- hosts: localhost + connection: local + tasks: + - name: Create a started container + lxd_container: + name: mycontainer + state: started + source: + type: image + mode: pull + # Provides current (and older) Ubuntu images with listed fingerprints + server: https://cloud-images.ubuntu.com/releases + # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list') + protocol: simplestreams + # This provides an Ubuntu 14.04 LTS amd64 image from 20150814. + fingerprint: e9a8bdfab6dc + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + +# An example for deleting a container +- hosts: localhost + connection: local + tasks: + - name: Delete a container + lxd_container: + name: mycontainer + state: absent + +# An example for restarting a container +- hosts: localhost + connection: local + tasks: + - name: Restart a container + lxd_container: + name: mycontainer + state: restarted + +# An example for restarting a container using https to connect to the LXD server +- hosts: localhost + connection: local + tasks: + - name: Restart a container + lxd_container: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: mycontainer + state: restarted + +# Note your container must be in the inventory for the below example. +# +# [containers] +# mycontainer ansible_connection=lxd +# +- hosts: + - mycontainer + tasks: + - name: copy /etc/hosts in the created container to localhost with name "mycontainer-hosts" + fetch: + src: /etc/hosts + dest: /tmp/mycontainer-hosts + flat: true +''' + +RETURN = ''' +addresses: + description: Mapping from the network device name to a list of IPv4 addresses in the container + returned: when state is started or restarted + type: dict + sample: {"eth0": ["10.155.92.191"]} +old_state: + description: The old state of the container + returned: when state is started or restarted + type: str + sample: "stopped" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the container. + returned: success + type: list + sample: '["create", "start"]' +''' +import datetime +import os +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException + + +# LXD_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXD_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen' +} + +# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible +# lxc_container module state parameter value. +ANSIBLE_LXD_STATES = { + 'Running': 'started', + 'Stopped': 'stopped', + 'Frozen': 'frozen', +} + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source' +] + + +class LXDContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self._build_config() + + self.state = self.module.params['state'] + + self.timeout = self.module.params['timeout'] + self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses'] + self.force_stop = self.module.params['force_stop'] + self.addresses = None + + self.key_file = self.module.params.get('client_key', None) + self.cert_file = self.module.params.get('client_cert', None) + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_container_json(self): + return self.client.do( + 'GET', '/1.0/containers/{0}'.format(self.name), + ok_error_codes=[404] + ) + + def _get_container_state_json(self): + return self.client.do( + 'GET', '/1.0/containers/{0}/state'.format(self.name), + ok_error_codes=[404] + ) + + @staticmethod + def _container_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return ANSIBLE_LXD_STATES[resp_json['metadata']['status']] + + def _change_state(self, action, force_stop=False): + body_json = {'action': action, 'timeout': self.timeout} + if force_stop: + body_json['force'] = True + return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json) + + def _create_container(self): + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', '/1.0/containers', config) + self.actions.append('create') + + def _start_container(self): + self._change_state('start') + self.actions.append('start') + + def _stop_container(self): + self._change_state('stop', self.force_stop) + self.actions.append('stop') + + def _restart_container(self): + self._change_state('restart', self.force_stop) + self.actions.append('restart') + + def _delete_container(self): + self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name)) + self.actions.append('delete') + + def _freeze_container(self): + self._change_state('freeze') + self.actions.append('freeze') + + def _unfreeze_container(self): + self._change_state('unfreeze') + self.actions.append('unfreez') + + def _container_ipv4_addresses(self, ignore_devices=None): + ignore_devices = ['lo'] if ignore_devices is None else ignore_devices + + resp_json = self._get_container_state_json() + network = resp_json['metadata']['network'] or {} + network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {} + addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {} + return addresses + + @staticmethod + def _has_all_ipv4_addresses(addresses): + return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values()) + + def _get_addresses(self): + try: + due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout) + while datetime.datetime.now() < due: + time.sleep(1) + addresses = self._container_ipv4_addresses() + if self._has_all_ipv4_addresses(addresses): + self.addresses = addresses + return + except LXDClientException as e: + e.msg = 'timeout for getting IPv4 addresses' + raise + + def _started(self): + if self.old_state == 'absent': + self._create_container() + self._start_container() + else: + if self.old_state == 'frozen': + self._unfreeze_container() + elif self.old_state == 'stopped': + self._start_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _stopped(self): + if self.old_state == 'absent': + self._create_container() + else: + if self.old_state == 'stopped': + if self._needs_to_apply_container_configs(): + self._start_container() + self._apply_container_configs() + self._stop_container() + else: + if self.old_state == 'frozen': + self._unfreeze_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + self._stop_container() + + def _restarted(self): + if self.old_state == 'absent': + self._create_container() + self._start_container() + else: + if self.old_state == 'frozen': + self._unfreeze_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + self._restart_container() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _destroyed(self): + if self.old_state != 'absent': + if self.old_state == 'frozen': + self._unfreeze_container() + if self.old_state != 'stopped': + self._stop_container() + self._delete_container() + + def _frozen(self): + if self.old_state == 'absent': + self._create_container() + self._start_container() + self._freeze_container() + else: + if self.old_state == 'stopped': + self._start_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + self._freeze_container() + + def _needs_to_change_container_config(self, key): + if key not in self.config: + return False + if key == 'config': + old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.')) + for k, v in self.config['config'].items(): + if k not in old_configs: + return True + if old_configs[k] != v: + return True + return False + else: + old_configs = self.old_container_json['metadata'][key] + return self.config[key] != old_configs + + def _needs_to_apply_container_configs(self): + return ( + self._needs_to_change_container_config('architecture') or + self._needs_to_change_container_config('config') or + self._needs_to_change_container_config('ephemeral') or + self._needs_to_change_container_config('devices') or + self._needs_to_change_container_config('profiles') + ) + + def _apply_container_configs(self): + old_metadata = self.old_container_json['metadata'] + body_json = { + 'architecture': old_metadata['architecture'], + 'config': old_metadata['config'], + 'devices': old_metadata['devices'], + 'profiles': old_metadata['profiles'] + } + if self._needs_to_change_container_config('architecture'): + body_json['architecture'] = self.config['architecture'] + if self._needs_to_change_container_config('config'): + for k, v in self.config['config'].items(): + body_json['config'][k] = v + if self._needs_to_change_container_config('ephemeral'): + body_json['ephemeral'] = self.config['ephemeral'] + if self._needs_to_change_container_config('devices'): + body_json['devices'] = self.config['devices'] + if self._needs_to_change_container_config('profiles'): + body_json['profiles'] = self.config['profiles'] + self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json) + self.actions.append('apply_container_configs') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_container_json = self._get_container_json() + self.old_state = self._container_json_to_module_state(self.old_container_json) + action = getattr(self, LXD_ANSIBLE_STATES[self.state]) + action() + + state_changed = len(self.actions) > 0 + result_json = { + 'log_verbosity': self.module._verbosity, + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + if self.addresses is not None: + result_json['addresses'] = self.addresses + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + architecture=dict( + type='str', + ), + config=dict( + type='dict', + ), + devices=dict( + type='dict', + ), + ephemeral=dict( + type='bool', + ), + profiles=dict( + type='list', + ), + source=dict( + type='dict', + ), + state=dict( + choices=LXD_ANSIBLE_STATES.keys(), + default='started' + ), + timeout=dict( + type='int', + default=30 + ), + wait_for_ipv4_addresses=dict( + type='bool', + default=False + ), + force_stop=dict( + type='bool', + default=False + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket' + ), + client_key=dict( + type='str', + default='{0}/.config/lxc/client.key'.format(os.environ['HOME']), + aliases=['key_file'] + ), + client_cert=dict( + type='str', + default='{0}/.config/lxc/client.crt'.format(os.environ['HOME']), + aliases=['cert_file'] + ), + trust_password=dict(type='str', no_log=True) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDContainerManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/lxd/lxd_profile.py b/plugins/modules/cloud/lxd/lxd_profile.py new file mode 100644 index 0000000000..606388f6e6 --- /dev/null +++ b/plugins/modules/cloud/lxd/lxd_profile.py @@ -0,0 +1,396 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Hiroaki Nakamura +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lxd_profile +short_description: Manage LXD profiles +description: + - Management of LXD profiles +author: "Hiroaki Nakamura (@hnakamur)" +options: + name: + description: + - Name of a profile. + required: true + description: + description: + - Description of the profile. + config: + description: + - 'The config for the container (e.g. {"limits.memory": "4GB"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' + - If the profile already exists and its "config" value in metadata + obtained from + GET /1.0/profiles/ + U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19) + are different, they this module tries to apply the configurations. + - Not all config values are supported to apply the existing profile. + Maybe you need to delete and recreate a profile. + required: false + devices: + description: + - 'The devices for the profile + (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' + required: false + new_name: + description: + - A new name of a profile. + - If this parameter is specified a profile will be renamed to this name. + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) + required: false + state: + choices: + - present + - absent + description: + - Define the state of a profile. + required: false + default: present + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + client_key: + description: + - The client certificate key file path. + required: false + default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])' + aliases: [ key_file ] + client_cert: + description: + - The client certificate file path. + required: false + default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])' + aliases: [ cert_file ] + trust_password: + description: + - The client trusted password. + - You need to set this password on the LXD server before + running this module using the following command. + lxc config set core.trust_password + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) + - If trust_password is set, this module send a request for + authentication before sending any requests. + required: false +notes: + - Profiles must have a unique name. If you attempt to create a profile + with a name that already existed in the users namespace the module will + simply return as "unchanged". +''' + +EXAMPLES = ''' +# An example for creating a profile +- hosts: localhost + connection: local + tasks: + - name: Create a profile + lxd_profile: + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for creating a profile via http connection +- hosts: localhost + connection: local + tasks: + - name: create macvlan profile + lxd_profile: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for deleting a profile +- hosts: localhost + connection: local + tasks: + - name: Delete a profile + lxd_profile: + name: macvlan + state: absent + +# An example for renaming a profile +- hosts: localhost + connection: local + tasks: + - name: Rename a profile + lxd_profile: + name: macvlan + new_name: macvlan2 + state: present +''' + +RETURN = ''' +old_state: + description: The old state of the profile + returned: success + type: str + sample: "absent" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the profile. + returned: success + type: list + sample: '["create"]' +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# PROFILE_STATES is a list for states supported +PROFILES_STATES = [ + 'present', 'absent' +] + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'config', 'description', 'devices' +] + + +class LXDProfileManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self._build_config() + self.state = self.module.params['state'] + self.new_name = self.module.params.get('new_name', None) + + self.key_file = self.module.params.get('client_key', None) + self.cert_file = self.module.params.get('client_cert', None) + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_profile_json(self): + return self.client.do( + 'GET', '/1.0/profiles/{0}'.format(self.name), + ok_error_codes=[404] + ) + + @staticmethod + def _profile_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return 'present' + + def _update_profile(self): + if self.state == 'present': + if self.old_state == 'absent': + if self.new_name is None: + self._create_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile does not exist and the specified state is present', + changed=False) + else: + if self.new_name is not None and self.new_name != self.name: + self._rename_profile() + if self._needs_to_apply_profile_configs(): + self._apply_profile_configs() + elif self.state == 'absent': + if self.old_state == 'present': + if self.new_name is None: + self._delete_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile exists and the specified state is absent', + changed=False) + + def _create_profile(self): + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', '/1.0/profiles', config) + self.actions.append('create') + + def _rename_profile(self): + config = {'name': self.new_name} + self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config) + self.actions.append('rename') + self.name = self.new_name + + def _needs_to_change_profile_config(self, key): + if key not in self.config: + return False + old_configs = self.old_profile_json['metadata'].get(key, None) + return self.config[key] != old_configs + + def _needs_to_apply_profile_configs(self): + return ( + self._needs_to_change_profile_config('config') or + self._needs_to_change_profile_config('description') or + self._needs_to_change_profile_config('devices') + ) + + def _apply_profile_configs(self): + config = self.old_profile_json.copy() + for k, v in self.config.items(): + config[k] = v + self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config) + self.actions.append('apply_profile_configs') + + def _delete_profile(self): + self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name)) + self.actions.append('delete') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_profile_json = self._get_profile_json() + self.old_state = self._profile_json_to_module_state(self.old_profile_json) + self._update_profile() + + state_changed = len(self.actions) > 0 + result_json = { + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + new_name=dict( + type='str', + ), + config=dict( + type='dict', + ), + description=dict( + type='str', + ), + devices=dict( + type='dict', + ), + state=dict( + choices=PROFILES_STATES, + default='present' + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket' + ), + client_key=dict( + type='str', + default='{0}/.config/lxc/client.key'.format(os.environ['HOME']), + aliases=['key_file'] + ), + client_cert=dict( + type='str', + default='{0}/.config/lxc/client.crt'.format(os.environ['HOME']), + aliases=['cert_file'] + ), + trust_password=dict(type='str', no_log=True) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDProfileManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/memset/memset_dns_reload.py b/plugins/modules/cloud/memset/memset_dns_reload.py new file mode 100644 index 0000000000..efb36caefc --- /dev/null +++ b/plugins/modules/cloud/memset/memset_dns_reload.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: memset_dns_reload +author: "Simon Weald (@glitchcrab)" +short_description: Request reload of Memset's DNS infrastructure, +notes: + - DNS reload requests are a best-effort service provided by Memset; these generally + happen every 15 minutes by default, however you can request an immediate reload if + later tasks rely on the records being created. An API key generated via the + Memset customer control panel is required with the following minimum scope - + I(dns.reload). If you wish to poll the job status to wait until the reload has + completed, then I(job.status) is also required. +description: + - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. +options: + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + poll: + default: false + type: bool + description: + - Boolean value, if set will poll the reload job's status and return + when the job has completed (unless the 30 second timeout is reached first). + If the timeout is reached then the task will not be marked as failed, but + stderr will indicate that the polling failed. +''' + +EXAMPLES = ''' +- name: submit DNS reload and poll. + memset_dns_reload: + api_key: 5eb86c9196ab03919abcf03857163741 + poll: True + delegate_to: localhost +''' + +RETURN = ''' +--- +memset_api: + description: Raw response from the Memset API. + returned: always + type: complex + contains: + error: + description: Whether the job ended in error state. + returned: always + type: bool + sample: true + finished: + description: Whether the job completed before the result was returned. + returned: always + type: bool + sample: true + id: + description: Job ID. + returned: always + type: str + sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8" + status: + description: Job status. + returned: always + type: str + sample: "DONE" + type: + description: Job type. + returned: always + type: str + sample: "dns" +''' + +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def poll_reload_status(api_key=None, job_id=None, payload=None): + ''' + We poll the `job.status` endpoint every 5 seconds up to a + maximum of 6 times. This is a relatively arbitrary choice of + timeout, however requests rarely take longer than 15 seconds + to complete. + ''' + memset_api, stderr, msg = None, None, None + payload['id'] = job_id + + api_method = 'job.status' + _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) + + while not response.json()['finished']: + counter = 0 + while counter < 6: + sleep(5) + _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) + counter += 1 + if response.json()['error']: + # the reload job was submitted but polling failed. Don't return this as an overall task failure. + stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status." + else: + memset_api = response.json() + msg = None + + return(memset_api, msg, stderr) + + +def reload_dns(args=None): + ''' + DNS reloads are a single API call and therefore there's not much + which can go wrong outside of auth errors. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + memset_api, msg, stderr = None, None, None + + api_method = 'dns.reload' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['memset_api'] = response.json() + retvals['msg'] = msg + return(retvals) + + # set changed to true if the reload request was accepted. + has_changed = True + memset_api = msg + # empty msg var as we don't want to return the API's json response twice. + msg = None + + if args['poll']: + # hand off to the poll function. + job_id = response.json()['id'] + memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload) + + # assemble return variables. + retvals['failed'] = has_failed + retvals['changed'] = has_changed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return(retvals) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + poll=dict(required=False, default=False, type='bool') + ), + supports_check_mode=False + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + + retvals = reload_dns(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/memset/memset_memstore_facts.py b/plugins/modules/cloud/memset/memset_memstore_facts.py new file mode 120000 index 0000000000..aa78805717 --- /dev/null +++ b/plugins/modules/cloud/memset/memset_memstore_facts.py @@ -0,0 +1 @@ +memset_memstore_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/memset/memset_memstore_info.py b/plugins/modules/cloud/memset/memset_memstore_info.py new file mode 100644 index 0000000000..6a9419c82a --- /dev/null +++ b/plugins/modules/cloud/memset/memset_memstore_info.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: memset_memstore_info +author: "Simon Weald (@glitchcrab)" +short_description: Retrieve Memstore product usage information. +notes: + - An API key generated via the Memset customer control panel is needed with the + following minimum scope - I(memstore.usage). +description: + - Retrieve Memstore product usage information. + - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change. +options: + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + name: + required: true + description: + - The Memstore product name (i.e. C(mstestyaa1)). +''' + +EXAMPLES = ''' +- name: get usage for mstestyaa1 + memset_memstore_info: + name: mstestyaa1 + api_key: 5eb86c9896ab03919abcf03857163741 + delegate_to: localhost +''' + +RETURN = ''' +--- +memset_api: + description: Info from the Memset API + returned: always + type: complex + contains: + cdn_bandwidth: + description: Dictionary of CDN bandwidth facts + returned: always + type: complex + contains: + bytes_out: + description: Outbound CDN bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 + requests: + description: Number of requests in the last 24 hours + returned: always + type: int + sample: 10 + bytes_in: + description: Inbound CDN bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 + containers: + description: Number of containers + returned: always + type: int + sample: 10 + bytes: + description: Space used in bytes + returned: always + type: int + sample: 3860997965 + objs: + description: Number of objects + returned: always + type: int + sample: 1000 + bandwidth: + description: Dictionary of CDN bandwidth facts + returned: always + type: complex + contains: + bytes_out: + description: Outbound bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 + requests: + description: Number of requests in the last 24 hours + returned: always + type: int + sample: 10 + bytes_in: + description: Inbound bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def get_facts(args=None): + ''' + Performs a simple API call and returns a JSON blob. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + payload['name'] = args['name'] + + api_method = 'memstore.usage' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + return(retvals) + + # we don't want to return the same thing twice + msg = None + memset_api = response.json() + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return(retvals) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, type='str') + ), + supports_check_mode=False + ) + if module._name == 'memset_memstore_facts': + module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'", version='2.13') + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + + retvals = get_facts(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/memset/memset_server_facts.py b/plugins/modules/cloud/memset/memset_server_facts.py new file mode 120000 index 0000000000..0a5766aee4 --- /dev/null +++ b/plugins/modules/cloud/memset/memset_server_facts.py @@ -0,0 +1 @@ +memset_server_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/memset/memset_server_info.py b/plugins/modules/cloud/memset/memset_server_info.py new file mode 100644 index 0000000000..29e1d8a084 --- /dev/null +++ b/plugins/modules/cloud/memset/memset_server_info.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: memset_server_info +author: "Simon Weald (@glitchcrab)" +short_description: Retrieve server information. +notes: + - An API key generated via the Memset customer control panel is needed with the + following minimum scope - I(server.info). +description: + - Retrieve server information. + - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change. +options: + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + name: + required: true + description: + - The server product name (i.e. C(testyaa1)). +''' + +EXAMPLES = ''' +- name: get details for testyaa1 + memset_server_info: + name: testyaa1 + api_key: 5eb86c9896ab03919abcf03857163741 + delegate_to: localhost +''' + +RETURN = ''' +--- +memset_api: + description: Info from the Memset API + returned: always + type: complex + contains: + backups: + description: Whether this server has a backup service. + returned: always + type: bool + sample: true + control_panel: + description: Whether the server has a control panel (i.e. cPanel). + returned: always + type: str + sample: 'cpanel' + data_zone: + description: The data zone the server is in. + returned: always + type: str + sample: 'Memset Public Cloud' + expiry_date: + description: Current expiry date of the server. + returned: always + type: str + sample: '2018-08-10' + firewall_rule_group: + description: Details about the firewall group this server is in. + returned: always + type: dict + sample: { + "default_outbound_policy": "RETURN", + "name": "testyaa-fw1", + "nickname": "testyaa cPanel rules", + "notes": "", + "public": false, + "rules": { + "51d7db54d39c3544ef7c48baa0b9944f": { + "action": "ACCEPT", + "comment": "", + "dest_ip6s": "any", + "dest_ips": "any", + "dest_ports": "any", + "direction": "Inbound", + "ip_version": "any", + "ordering": 2, + "protocols": "icmp", + "rule_group_name": "testyaa-fw1", + "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", + "source_ip6s": "any", + "source_ips": "any", + "source_ports": "any" + } + } + } + firewall_type: + description: The type of firewall the server has (i.e. self-managed, managed). + returned: always + type: str + sample: 'managed' + host_name: + description: The server's hostname. + returned: always + type: str + sample: 'testyaa1.miniserver.com' + ignore_monitoring_off: + description: When true, Memset won't remind the customer that monitoring is disabled. + returned: always + type: bool + sample: true + ips: + description: List of dictionaries of all IP addresses assigned to the server. + returned: always + type: list + sample: [ + { + "address": "1.2.3.4", + "bytes_in_today": 1000.0, + "bytes_in_yesterday": 2000.0, + "bytes_out_today": 1000.0, + "bytes_out_yesterday": 2000.0 + } + ] + monitor: + description: Whether the server has monitoring enabled. + returned: always + type: bool + sample: true + monitoring_level: + description: The server's monitoring level (i.e. basic). + returned: always + type: str + sample: 'basic' + name: + description: Server name (same as the service name). + returned: always + type: str + sample: 'testyaa1' + network_zones: + description: The network zone(s) the server is in. + returned: always + type: list + sample: [ 'reading' ] + nickname: + description: Customer-set nickname for the server. + returned: always + type: str + sample: 'database server' + no_auto_reboot: + description: Whether or not to reboot the server if monitoring detects it down. + returned: always + type: bool + sample: true + no_nrpe: + description: Whether Memset should use NRPE to monitor this server. + returned: always + type: bool + sample: true + os: + description: The server's Operating System. + returned: always + type: str + sample: 'debian_stretch_64' + penetration_patrol: + description: Intrusion detection support level for this server. + returned: always + type: str + sample: 'managed' + penetration_patrol_alert_level: + description: The alert level at which notifications are sent. + returned: always + type: int + sample: 10 + primary_ip: + description: Server's primary IP. + returned: always + type: str + sample: '1.2.3.4' + renewal_price_amount: + description: Renewal cost for the server. + returned: always + type: str + sample: '30.00' + renewal_price_currency: + description: Currency for renewal payments. + returned: always + type: str + sample: 'GBP' + renewal_price_vat: + description: VAT rate for renewal payments + returned: always + type: str + sample: '20' + start_date: + description: Server's start date. + returned: always + type: str + sample: '2013-04-10' + status: + description: Current status of the server (i.e. live, onhold). + returned: always + type: str + sample: 'LIVE' + support_level: + description: Support level included with the server. + returned: always + type: str + sample: 'managed' + type: + description: What this server is (i.e. dedicated) + returned: always + type: str + sample: 'miniserver' + vlans: + description: Dictionary of tagged and untagged VLANs this server is in. + returned: always + type: dict + sample: { + tagged: [], + untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ] + } + vulnscan: + description: Vulnerability scanning level. + returned: always + type: str + sample: 'basic' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def get_facts(args=None): + ''' + Performs a simple API call and returns a JSON blob. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + payload['name'] = args['name'] + + api_method = 'server.info' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + return(retvals) + + # we don't want to return the same thing twice + msg = None + memset_api = response.json() + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return(retvals) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, type='str') + ), + supports_check_mode=False + ) + if module._name == 'memset_server_facts': + module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'", version='2.13') + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + + retvals = get_facts(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/memset/memset_zone.py b/plugins/modules/cloud/memset/memset_zone.py new file mode 100644 index 0000000000..ca42039af5 --- /dev/null +++ b/plugins/modules/cloud/memset/memset_zone.py @@ -0,0 +1,313 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: memset_zone +author: "Simon Weald (@glitchcrab)" +short_description: Creates and deletes Memset DNS zones. +notes: + - Zones can be thought of as a logical group of domains, all of which share the + same DNS records (i.e. they point to the same IP). An API key generated via the + Memset customer control panel is needed with the following minimum scope - + I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). +description: + - Manage DNS zones in a Memset account. +options: + state: + required: true + description: + - Indicates desired state of resource. + choices: [ absent, present ] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + name: + required: true + description: + - The zone nickname; usually the same as the main domain. Ensure this + value has at most 250 characters. + aliases: [ nickname ] + ttl: + description: + - The default TTL for all records created in the zone. This must be a + valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). + choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] + force: + required: false + default: false + type: bool + description: + - Forces deletion of a zone and all zone domains/zone records it contains. +''' + +EXAMPLES = ''' +# Create the zone 'test' +- name: create zone + memset_zone: + name: test + state: present + api_key: 5eb86c9196ab03919abcf03857163741 + ttl: 300 + delegate_to: localhost + +# Force zone deletion +- name: force delete zone + memset_zone: + name: test + state: absent + api_key: 5eb86c9196ab03919abcf03857163741 + force: true + delegate_to: localhost +''' + +RETURN = ''' +memset_api: + description: Zone info from the Memset API + returned: when state == present + type: complex + contains: + domains: + description: List of domains in this zone + returned: always + type: list + sample: [] + id: + description: Zone id + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" + nickname: + description: Zone name + returned: always + type: str + sample: "example.com" + records: + description: List of DNS records for domains in this zone + returned: always + type: list + sample: [] + ttl: + description: Default TTL for domains in this zone + returned: always + type: int + sample: 300 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import check_zone +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) + ''' + # zone domain length must be less than 250 chars. + if len(args['name']) > 250: + stderr = 'Zone name must be less than 250 characters in length.' + module.fail_json(failed=True, msg=stderr, stderr=stderr) + + +def check(args=None): + ''' + Support for running with check mode. + ''' + retvals = dict() + + api_method = 'dns.zone_list' + has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + zone_exists, counter = check_zone(data=response, name=args['name']) + + # set changed to true if the operation would cause a change. + has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present')) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + + return(retvals) + + +def create_zone(args=None, zone_exists=None, payload=None): + ''' + At this point we already know whether the zone exists, so we + just need to make the API reflect the desired state. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + if not zone_exists: + payload['ttl'] = args['ttl'] + payload['nickname'] = args['name'] + api_method = 'dns.zone_create' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + else: + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + for zone in response.json(): + if zone['nickname'] == args['name']: + break + if zone['ttl'] != args['ttl']: + # update the zone if the desired TTL is different. + payload['id'] = zone['id'] + payload['ttl'] = args['ttl'] + api_method = 'dns.zone_update' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + + # populate return var with zone info. + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + + if zone_exists: + payload = dict() + payload['id'] = zone_id + api_method = 'dns.zone_info' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + memset_api = response.json() + + return(has_failed, has_changed, memset_api, msg) + + +def delete_zone(args=None, zone_exists=None, payload=None): + ''' + Deletion requires extra sanity checking as the zone cannot be + deleted if it contains domains or records. Setting force=true + will override this behaviour. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + if zone_exists: + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + counter = 0 + for zone in response.json(): + if zone['nickname'] == args['name']: + counter += 1 + if counter == 1: + for zone in response.json(): + if zone['nickname'] == args['name']: + zone_id = zone['id'] + domain_count = len(zone['domains']) + record_count = len(zone['records']) + if (domain_count > 0 or record_count > 0) and args['force'] is False: + # we need to fail out if force was not explicitly set. + stderr = 'Zone contains domains or records and force was not used.' + has_failed = True + has_changed = False + module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1) + api_method = 'dns.zone_delete' + payload['id'] = zone_id + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice. + memset_api = msg + msg = None + else: + # zone names are not unique, so we cannot safely delete the requested + # zone at this time. + has_failed = True + has_changed = False + msg = 'Unable to delete zone as multiple zones with the same name exist.' + else: + has_failed, has_changed = False, False + + return(has_failed, has_changed, memset_api, msg) + + +def create_or_delete(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete. + ''' + retvals, payload = dict(), dict() + has_failed, has_changed = False, False + msg, memset_api, stderr = None, None, None + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + if _has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = _has_failed + retvals['msg'] = _msg + + return(retvals) + + zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + + if args['state'] == 'present': + has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload) + + elif args['state'] == 'absent': + has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload) + + retvals['failed'] = has_failed + retvals['changed'] = has_changed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return(retvals) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, aliases=['nickname'], type='str'), + ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + force=dict(required=False, default=False, type='bool') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + args['check_mode'] = module.check_mode + + # validate some API-specific limitations. + api_validation(args=args) + + if module.check_mode: + retvals = check(args) + else: + retvals = create_or_delete(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/memset/memset_zone_domain.py b/plugins/modules/cloud/memset/memset_zone_domain.py new file mode 100644 index 0000000000..ed58742089 --- /dev/null +++ b/plugins/modules/cloud/memset/memset_zone_domain.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: memset_zone_domain +author: "Simon Weald (@glitchcrab)" +short_description: Create and delete domains in Memset DNS zones. +notes: + - Zone domains can be thought of as a collection of domains, all of which share the + same DNS records (i.e. they point to the same IP). An API key generated via the + Memset customer control panel is needed with the following minimum scope - + I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list). + - Currently this module can only create one domain at a time. Multiple domains should + be created using C(with_items). +description: + - Manage DNS zone domains in a Memset account. +options: + state: + default: present + description: + - Indicates desired state of resource. + choices: [ absent, present ] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + domain: + required: true + description: + - The zone domain name. Ensure this value has at most 250 characters. + aliases: ['name'] + zone: + required: true + description: + - The zone to add the domain to (this must already exist). +''' + +EXAMPLES = ''' +# Create the zone domain 'test.com' +- name: create zone domain + memset_zone_domain: + domain: test.com + zone: testzone + state: present + api_key: 5eb86c9196ab03919abcf03857163741 + delegate_to: localhost +''' + +RETURN = ''' +memset_api: + description: Domain info from the Memset API + returned: when changed or state == present + type: complex + contains: + domain: + description: Domain name + returned: always + type: str + sample: "example.com" + id: + description: Domain ID + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create) + ''' + # zone domain length must be less than 250 chars + if len(args['domain']) > 250: + stderr = 'Zone domain must be less than 250 characters in length.' + module.fail_json(failed=True, msg=stderr) + + +def check(args=None): + ''' + Support for running with check mode. + ''' + retvals = dict() + has_changed = False + + api_method = 'dns.zone_domain_list' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + domain_exists = check_zone_domain(data=response, domain=args['domain']) + + # set changed to true if the operation would cause a change. + has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present')) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + + return(retvals) + + +def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): + ''' + At this point we already know whether the containing zone exists, + so we just need to create the domain (or exit if it already exists). + ''' + has_changed, has_failed = False, False + msg = None + + api_method = 'dns.zone_domain_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + for zone_domain in response.json(): + if zone_domain['domain'] == args['domain']: + # zone domain already exists, nothing to change. + has_changed = False + break + else: + # we need to create the domain + api_method = 'dns.zone_domain_create' + payload['domain'] = args['domain'] + payload['zone_id'] = zone_id + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + + return(has_failed, has_changed, msg) + + +def delete_zone_domain(args=None, payload=None): + ''' + Deletion is pretty simple, domains are always unique so we + we don't need to do any sanity checking to avoid deleting the + wrong thing. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + api_method = 'dns.zone_domain_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + domain_exists = check_zone_domain(data=response, domain=args['domain']) + + if domain_exists: + api_method = 'dns.zone_domain_delete' + payload['domain'] = args['domain'] + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = response.json() + # unset msg as we don't want to return unnecessary info to the user. + msg = None + + return(has_failed, has_changed, memset_api, msg) + + +def create_or_delete_domain(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + return(retvals) + + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + + if not zone_exists: + # the zone needs to be unique - this isn't a requirement of Memset's API but it + # makes sense in the context of this module. + has_failed = True + if counter == 0: + stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone']) + elif counter > 1: + stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone']) + + retvals['failed'] = has_failed + retvals['msg'] = stderr + return(retvals) + + if args['state'] == 'present': + has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) + + if args['state'] == 'absent': + has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return(retvals) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + domain=dict(required=True, aliases=['name'], type='str'), + zone=dict(required=True, type='str') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + args['check_mode'] = module.check_mode + + # validate some API-specific limitations. + api_validation(args=args) + + if module.check_mode: + retvals = check(args) + else: + retvals = create_or_delete_domain(args) + + # we would need to populate the return values with the API's response + # in several places so it's easier to do it at the end instead. + if not retvals['failed']: + if args['state'] == 'present' and not module.check_mode: + payload = dict() + payload['domain'] = args['domain'] + api_method = 'dns.zone_domain_info' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + retvals['memset_api'] = response.json() + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/memset/memset_zone_record.py b/plugins/modules/cloud/memset/memset_zone_record.py new file mode 100644 index 0000000000..1ec2452cb6 --- /dev/null +++ b/plugins/modules/cloud/memset/memset_zone_record.py @@ -0,0 +1,377 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: memset_zone_record +author: "Simon Weald (@glitchcrab)" +short_description: Create and delete records in Memset DNS zones. +notes: + - Zones can be thought of as a logical group of domains, all of which share the + same DNS records (i.e. they point to the same IP). An API key generated via the + Memset customer control panel is needed with the following minimum scope - + I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). + - Currently this module can only create one DNS record at a time. Multiple records + should be created using C(with_items). +description: + - Manage DNS records in a Memset account. +options: + state: + default: present + description: + - Indicates desired state of resource. + choices: [ absent, present ] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + address: + required: true + description: + - The address for this record (can be IP or text string depending on record type). + aliases: [ ip, data ] + priority: + description: + - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). + record: + required: false + description: + - The subdomain to create. + type: + required: true + description: + - The type of DNS record to create. + choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ] + relative: + type: bool + description: + - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) + and C(SRV)record types. + ttl: + description: + - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a + valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). + choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] + zone: + required: true + description: + - The name of the zone to which to add the record to. +''' + +EXAMPLES = ''' +# Create DNS record for www.domain.com +- name: create DNS record + memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + state: present + zone: domain.com + type: A + record: www + address: 1.2.3.4 + ttl: 300 + relative: false + delegate_to: localhost + +# create an SPF record for domain.com +- name: create SPF record for domain.com + memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + state: present + zone: domain.com + type: TXT + address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all" + delegate_to: localhost + +# create multiple DNS records +- name: create multiple DNS records + memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + zone: "{{ item.zone }}" + type: "{{ item.type }}" + record: "{{ item.record }}" + address: "{{ item.address }}" + delegate_to: localhost + with_items: + - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' } + - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' } +''' + +RETURN = ''' +memset_api: + description: Record info from the Memset API. + returned: when state == present + type: complex + contains: + address: + description: Record content (may be an IP, string or blank depending on record type). + returned: always + type: str + sample: 1.1.1.1 + id: + description: Record ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" + priority: + description: Priority for C(MX) and C(SRV) records. + returned: always + type: int + sample: 10 + record: + description: Name of record. + returned: always + type: str + sample: "www" + relative: + description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types. + returned: always + type: bool + sample: False + ttl: + description: Record TTL. + returned: always + type: int + sample: 10 + type: + description: Record type. + returned: always + type: str + sample: AAAA + zone_id: + description: Zone ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) + ''' + failed_validation = False + + # priority can only be integer 0 > 999 + if not 0 <= args['priority'] <= 999: + failed_validation = True + error = 'Priority must be in the range 0 > 999 (inclusive).' + # data value must be max 250 chars + if len(args['address']) > 250: + failed_validation = True + error = "Address must be less than 250 characters in length." + # record value must be max 250 chars + if args['record']: + if len(args['record']) > 63: + failed_validation = True + error = "Record must be less than 63 characters in length." + # relative isn't used for all record types + if args['relative']: + if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']: + failed_validation = True + error = "Relative is only valid for CNAME, MX, NS and SRV record types." + # if any of the above failed then fail early + if failed_validation: + module.fail_json(failed=True, msg=error) + + +def create_zone_record(args=None, zone_id=None, records=None, payload=None): + ''' + Sanity checking has already occurred prior to this function being + called, so we can go ahead and either create or update the record. + As defaults are defined for all values in the argument_spec, this + may cause some changes to occur as the defaults are enforced (if + the user has only configured required variables). + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + # assemble the new record. + new_record = dict() + new_record['zone_id'] = zone_id + for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']: + new_record[arg] = args[arg] + + # if we have any matches, update them. + if records: + for zone_record in records: + # record exists, add ID to payload. + new_record['id'] = zone_record['id'] + if zone_record == new_record: + # nothing to do; record is already correct so we populate + # the return var with the existing record's details. + memset_api = zone_record + return(has_changed, has_failed, memset_api, msg) + else: + # merge dicts ensuring we change any updated values + payload = zone_record.copy() + payload.update(new_record) + api_method = 'dns.zone_record_update' + if args['check_mode']: + has_changed = True + # return the new record to the user in the returned var. + memset_api = new_record + return(has_changed, has_failed, memset_api, msg) + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = new_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + else: + # no record found, so we need to create it + api_method = 'dns.zone_record_create' + payload = new_record + if args['check_mode']: + has_changed = True + # populate the return var with the new record's details. + memset_api = new_record + return(has_changed, has_failed, memset_api, msg) + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = new_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + + return(has_changed, has_failed, memset_api, msg) + + +def delete_zone_record(args=None, records=None, payload=None): + ''' + Matching records can be cleanly deleted without affecting other + resource types, so this is pretty simple to achieve. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + # if we have any matches, delete them. + if records: + for zone_record in records: + if args['check_mode']: + has_changed = True + return(has_changed, has_failed, memset_api, msg) + payload['id'] = zone_record['id'] + api_method = 'dns.zone_record_delete' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = zone_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + + return(has_changed, has_failed, memset_api, msg) + + +def create_or_delete(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete functions. + Check mode is integrated into the create or delete functions. + ''' + has_failed, has_changed = False, False + msg, memset_api, stderr = None, None, None + retvals, payload = dict(), dict() + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if _has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = _has_failed + retvals['msg'] = msg + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + return(retvals) + + zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + + if not zone_exists: + has_failed = True + if counter == 0: + stderr = "DNS zone {0} does not exist." . format(args['zone']) + elif counter > 1: + stderr = "{0} matches multiple zones." . format(args['zone']) + retvals['failed'] = has_failed + retvals['msg'] = stderr + retvals['stderr'] = stderr + return(retvals) + + # get a list of all records ( as we can't limit records by zone) + api_method = 'dns.zone_record_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + # find any matching records + records = [record for record in response.json() if record['zone_id'] == zone_id + and record['record'] == args['record'] and record['type'] == args['type']] + + if args['state'] == 'present': + has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload) + + if args['state'] == 'absent': + has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return(retvals) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + zone=dict(required=True, type='str'), + type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), + address=dict(required=True, aliases=['ip', 'data'], type='str'), + record=dict(required=False, default='', type='str'), + ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + priority=dict(required=False, default=0, type='int'), + relative=dict(required=False, default=False, type='bool') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + args['check_mode'] = module.check_mode + + # perform some Memset API-specific validation + api_validation(args=args) + + retvals = create_or_delete(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/cloud_init_data_facts.py b/plugins/modules/cloud/misc/cloud_init_data_facts.py new file mode 100644 index 0000000000..3a278d5286 --- /dev/null +++ b/plugins/modules/cloud/misc/cloud_init_data_facts.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloud_init_data_facts +short_description: Retrieve facts of cloud-init. +description: + - Gathers facts by reading the status.json and result.json of cloud-init. +author: René Moser (@resmo) +options: + filter: + description: + - Filter facts + choices: [ status, result ] +notes: + - See http://cloudinit.readthedocs.io/ for more information about cloud-init. +''' + +EXAMPLES = ''' +- name: Gather all facts of cloud init + cloud_init_data_facts: + register: result + +- debug: + var: result + +- name: Wait for cloud init to finish + cloud_init_data_facts: + filter: status + register: res + until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" + retries: 50 + delay: 5 +''' + +RETURN = ''' +--- +cloud_init_data_facts: + description: Facts of result and status. + returned: success + type: dict + sample: '{ + "status": { + "v1": { + "datasource": "DataSourceCloudStack", + "errors": [] + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + }' +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text + + +CLOUD_INIT_PATH = "/var/lib/cloud/data/" + + +def gather_cloud_init_data_facts(module): + res = { + 'cloud_init_data_facts': dict() + } + + for i in ['result', 'status']: + filter = module.params.get('filter') + if filter is None or filter == i: + res['cloud_init_data_facts'][i] = dict() + json_file = CLOUD_INIT_PATH + i + '.json' + + if os.path.exists(json_file): + f = open(json_file, 'rb') + contents = to_text(f.read(), errors='surrogate_or_strict') + f.close() + + if contents: + res['cloud_init_data_facts'][i] = module.from_json(contents) + return res + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filter=dict(choices=['result', 'status']), + ), + supports_check_mode=True, + ) + + facts = gather_cloud_init_data_facts(module) + result = dict(changed=False, ansible_facts=facts, **facts) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/helm.py b/plugins/modules/cloud/misc/helm.py new file mode 100644 index 0000000000..a46b79f263 --- /dev/null +++ b/plugins/modules/cloud/misc/helm.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# (c) 2016, Flavio Percoco +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: helm +short_description: Manages Kubernetes packages with the Helm package manager +author: "Flavio Percoco (@flaper87)" +description: + - Install, upgrade, delete and list packages with the Helm package manager. +requirements: + - "pyhelm" + - "grpcio" +options: + host: + description: + - Tiller's server host. + default: "localhost" + port: + description: + - Tiller's server port. + default: 44134 + namespace: + description: + - Kubernetes namespace where the chart should be installed. + default: "default" + name: + description: + - Release name to manage. + state: + description: + - Whether to install C(present), remove C(absent), or purge C(purged) a package. + choices: ['absent', 'purged', 'present'] + default: "present" + chart: + description: | + A map describing the chart to install. See examples for available options. + default: {} + values: + description: + - A map of value options for the chart. + default: {} + disable_hooks: + description: + - Whether to disable hooks during the uninstall process. + type: bool + default: 'no' +''' + +RETURN = ''' # ''' + +EXAMPLES = ''' +- name: Install helm chart + helm: + host: localhost + chart: + name: memcached + version: 0.4.0 + source: + type: repo + location: https://kubernetes-charts.storage.googleapis.com + state: present + name: my-memcached + namespace: default + +- name: Uninstall helm chart + helm: + host: localhost + state: absent + name: my-memcached + +- name: Install helm chart from a git repo + helm: + host: localhost + chart: + source: + type: git + location: https://github.com/user/helm-chart.git + state: present + name: my-example + namespace: default + values: + foo: "bar" + +- name: Install helm chart from a git repo specifying path + helm: + host: localhost + chart: + source: + type: git + location: https://github.com/helm/charts.git + path: stable/memcached + state: present + name: my-memcached + namespace: default + values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}" +''' + +import traceback +HELM_IMPORT_ERR = None +try: + import grpc + from pyhelm import tiller + from pyhelm import chartbuilder +except ImportError: + HELM_IMPORT_ERR = traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def install(module, tserver): + changed = False + params = module.params + name = params['name'] + values = params['values'] + chart = module.params['chart'] + namespace = module.params['namespace'] + + chartb = chartbuilder.ChartBuilder(chart) + r_matches = (x for x in tserver.list_releases() + if x.name == name and x.namespace == namespace) + installed_release = next(r_matches, None) + if installed_release: + if installed_release.chart.metadata.version != chart['version']: + tserver.update_release(chartb.get_helm_chart(), False, + namespace, name=name, values=values) + changed = True + else: + tserver.install_release(chartb.get_helm_chart(), namespace, + dry_run=False, name=name, + values=values) + changed = True + + return dict(changed=changed) + + +def delete(module, tserver, purge=False): + changed = False + params = module.params + + if not module.params['name']: + module.fail_json(msg='Missing required field name') + + name = module.params['name'] + disable_hooks = params['disable_hooks'] + + try: + tserver.uninstall_release(name, disable_hooks, purge) + changed = True + except grpc._channel._Rendezvous as exc: + if 'not found' not in str(exc): + raise exc + + return dict(changed=changed) + + +def main(): + """The main function.""" + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', default='localhost'), + port=dict(type='int', default=44134), + name=dict(type='str', default=''), + chart=dict(type='dict'), + state=dict( + choices=['absent', 'purged', 'present'], + default='present' + ), + # Install options + values=dict(type='dict'), + namespace=dict(type='str', default='default'), + + # Uninstall options + disable_hooks=dict(type='bool', default=False), + ), + supports_check_mode=True) + + if HELM_IMPORT_ERR: + module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR) + + host = module.params['host'] + port = module.params['port'] + state = module.params['state'] + tserver = tiller.Tiller(host, port) + + if state == 'present': + rst = install(module, tserver) + + if state in 'absent': + rst = delete(module, tserver) + + if state in 'purged': + rst = delete(module, tserver, True) + + module.exit_json(**rst) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/ovirt.py b/plugins/modules/cloud/misc/ovirt.py new file mode 100644 index 0000000000..25cdc25b40 --- /dev/null +++ b/plugins/modules/cloud/misc/ovirt.py @@ -0,0 +1,475 @@ +#!/usr/bin/python + +# Copyright: (c) 2013, Vincent Van der Kussen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ovirt +author: +- Vincent Van der Kussen (@vincentvdk) +short_description: oVirt/RHEV platform management +deprecated: + removed_in: "2.10" + why: This module is for deprecated version of ovirt. + alternative: Use M(ovirt_vm) instead +description: + - This module only supports oVirt/RHEV version 3. A newer module M(ovirt_vm) supports oVirt/RHV version 4. + - Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform. +options: + user: + description: + - The user to authenticate with. + required: true + url: + description: + - The url of the oVirt instance. + required: true + instance_name: + description: + - The name of the instance to use. + required: true + aliases: [ vmname ] + password: + description: + - Password of the user to authenticate with. + required: true + image: + description: + - The template to use for the instance. + resource_type: + description: + - Whether you want to deploy an image or create an instance from scratch. + choices: [ new, template ] + zone: + description: + - Deploy the image to this oVirt cluster. + instance_disksize: + description: + - Size of the instance's disk in GB. + aliases: [ vm_disksize] + instance_cpus: + description: + - The instance's number of CPUs. + default: 1 + aliases: [ vmcpus ] + instance_nic: + description: + - The name of the network interface in oVirt/RHEV. + aliases: [ vmnic ] + instance_network: + description: + - The logical network the machine should belong to. + default: rhevm + aliases: [ vmnetwork ] + instance_mem: + description: + - The instance's amount of memory in MB. + aliases: [ vmmem ] + instance_type: + description: + - Define whether the instance is a server, desktop or high_performance. + - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2. + choices: [ desktop, server, high_performance ] + default: server + aliases: [ vmtype ] + disk_alloc: + description: + - Define whether disk is thin or preallocated. + choices: [ preallocated, thin ] + default: thin + disk_int: + description: + - Interface type of the disk. + choices: [ ide, virtio ] + default: virtio + instance_os: + description: + - Type of Operating System. + aliases: [ vmos ] + instance_cores: + description: + - Define the instance's number of cores. + default: 1 + aliases: [ vmcores ] + sdomain: + description: + - The Storage Domain where you want to create the instance's disk on. + region: + description: + - The oVirt/RHEV datacenter where you want to deploy to. + instance_dns: + description: + - Define the instance's Primary DNS server. + aliases: [ dns ] + instance_domain: + description: + - Define the instance's Domain. + aliases: [ domain ] + instance_hostname: + description: + - Define the instance's Hostname. + aliases: [ hostname ] + instance_ip: + description: + - Define the instance's IP. + aliases: [ ip ] + instance_netmask: + description: + - Define the instance's Netmask. + aliases: [ netmask ] + instance_rootpw: + description: + - Define the instance's Root password. + aliases: [ rootpw ] + instance_key: + description: + - Define the instance's Authorized key. + aliases: [ key ] + state: + description: + - Create, terminate or remove instances. + choices: [ absent, present, restarted, shutdown, started ] + default: present +requirements: + - ovirt-engine-sdk-python +''' + +EXAMPLES = ''' +- name: Basic example to provision from image + ovirt: + user: admin@internal + url: https://ovirt.example.com + instance_name: ansiblevm04 + password: secret + image: centos_64 + zone: cluster01 + resource_type: template + +- name: Full example to create new instance from scratch + ovirt: + instance_name: testansible + resource_type: new + instance_type: server + user: admin@internal + password: secret + url: https://ovirt.example.com + instance_disksize: 10 + zone: cluster01 + region: datacenter1 + instance_cpus: 1 + instance_nic: nic1 + instance_network: rhevm + instance_mem: 1000 + disk_alloc: thin + sdomain: FIBER01 + instance_cores: 1 + instance_os: rhel_6x64 + disk_int: virtio + +- name: Stopping an existing instance + ovirt: + instance_name: testansible + state: stopped + user: admin@internal + password: secret + url: https://ovirt.example.com + +- name: Start an existing instance + ovirt: + instance_name: testansible + state: started + user: admin@internal + password: secret + url: https://ovirt.example.com + +- name: Start an instance with cloud init information + ovirt: + instance_name: testansible + state: started + user: admin@internal + password: secret + url: https://ovirt.example.com + hostname: testansible + domain: ansible.local + ip: 192.0.2.100 + netmask: 255.255.255.0 + gateway: 192.0.2.1 + rootpw: bigsecret +''' + +import time + +try: + from ovirtsdk.api import API + from ovirtsdk.xml import params + HAS_OVIRTSDK = True +except ImportError: + HAS_OVIRTSDK = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.removed import removed_module + + +# ------------------------------------------------------------------- # +# create connection with API +# +def conn(url, user, password): + api = API(url=url, username=user, password=password, insecure=True) + try: + value = api.test() + except Exception: + raise Exception("error connecting to the oVirt API") + return api + + +# ------------------------------------------------------------------- # +# Create VM from scratch +def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): + if vmdisk_alloc == 'thin': + # define VM params + vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos), + template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem), + cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype) + # define disk params + vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", + format='cow', + storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) + # define network parameters + network_net = params.Network(name=vmnetwork) + nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') + elif vmdisk_alloc == 'preallocated': + # define VM params + vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos), + template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem), + cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype) + # define disk params + vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", + format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) + # define network parameters + network_net = params.Network(name=vmnetwork) + nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') + + try: + conn.vms.add(vmparams) + except Exception: + raise Exception("Error creating VM with specified parameters") + vm = conn.vms.get(name=vmname) + try: + vm.disks.add(vmdisk) + except Exception: + raise Exception("Error attaching disk") + try: + vm.nics.add(nic_net1) + except Exception: + raise Exception("Error adding nic") + + +# create an instance from a template +def create_vm_template(conn, vmname, image, zone): + vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True)) + try: + conn.vms.add(vmparams) + except Exception: + raise Exception('error adding template %s' % image) + + +# start instance +def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None, + domain=None, dns=None, rootpw=None, key=None): + vm = conn.vms.get(name=vmname) + use_cloud_init = False + nics = None + nic = None + if hostname or ip or netmask or gateway or domain or dns or rootpw or key: + use_cloud_init = True + if ip and netmask and gateway: + ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway) + nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True) + nics = params.Nics() + nics = params.GuestNicsConfiguration(nic_configuration=[nic]) + initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root', + root_password=rootpw, nic_configurations=nics, dns_servers=dns, + authorized_ssh_keys=key) + action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization)) + vm.start(action=action) + + +# Stop instance +def vm_stop(conn, vmname): + vm = conn.vms.get(name=vmname) + vm.stop() + + +# restart instance +def vm_restart(conn, vmname): + state = vm_status(conn, vmname) + vm = conn.vms.get(name=vmname) + vm.stop() + while conn.vms.get(vmname).get_status().get_state() != 'down': + time.sleep(5) + vm.start() + + +# remove an instance +def vm_remove(conn, vmname): + vm = conn.vms.get(name=vmname) + vm.delete() + + +# ------------------------------------------------------------------- # +# VM statuses +# +# Get the VMs status +def vm_status(conn, vmname): + status = conn.vms.get(name=vmname).status.state + return status + + +# Get VM object and return it's name if object exists +def get_vm(conn, vmname): + vm = conn.vms.get(name=vmname) + if vm is None: + name = "empty" + else: + name = vm.get_name() + return name + +# ------------------------------------------------------------------- # +# Hypervisor operations +# +# not available yet +# ------------------------------------------------------------------- # +# Main + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']), + user=dict(type='str', required=True), + url=dict(type='str', required=True), + instance_name=dict(type='str', required=True, aliases=['vmname']), + password=dict(type='str', required=True, no_log=True), + image=dict(type='str'), + resource_type=dict(type='str', choices=['new', 'template']), + zone=dict(type='str'), + instance_disksize=dict(type='str', aliases=['vm_disksize']), + instance_cpus=dict(type='str', default=1, aliases=['vmcpus']), + instance_nic=dict(type='str', aliases=['vmnic']), + instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']), + instance_mem=dict(type='str', aliases=['vmmem']), + instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']), + disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']), + disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']), + instance_os=dict(type='str', aliases=['vmos']), + instance_cores=dict(type='str', default=1, aliases=['vmcores']), + instance_hostname=dict(type='str', aliases=['hostname']), + instance_ip=dict(type='str', aliases=['ip']), + instance_netmask=dict(type='str', aliases=['netmask']), + instance_gateway=dict(type='str', aliases=['gateway']), + instance_domain=dict(type='str', aliases=['domain']), + instance_dns=dict(type='str', aliases=['dns']), + instance_rootpw=dict(type='str', aliases=['rootpw']), + instance_key=dict(type='str', aliases=['key']), + sdomain=dict(type='str'), + region=dict(type='str'), + ), + ) + + if not HAS_OVIRTSDK: + module.fail_json(msg='ovirtsdk required for this module') + + state = module.params['state'] + user = module.params['user'] + url = module.params['url'] + vmname = module.params['instance_name'] + password = module.params['password'] + image = module.params['image'] # name of the image to deploy + resource_type = module.params['resource_type'] # template or from scratch + zone = module.params['zone'] # oVirt cluster + vmdisk_size = module.params['instance_disksize'] # disksize + vmcpus = module.params['instance_cpus'] # number of cpu + vmnic = module.params['instance_nic'] # network interface + vmnetwork = module.params['instance_network'] # logical network + vmmem = module.params['instance_mem'] # mem size + vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated + vmdisk_int = module.params['disk_int'] # disk interface virtio or ide + vmos = module.params['instance_os'] # Operating System + vmtype = module.params['instance_type'] # server, desktop or high_performance + vmcores = module.params['instance_cores'] # number of cores + sdomain = module.params['sdomain'] # storage domain to store disk on + region = module.params['region'] # oVirt Datacenter + hostname = module.params['instance_hostname'] + ip = module.params['instance_ip'] + netmask = module.params['instance_netmask'] + gateway = module.params['instance_gateway'] + domain = module.params['instance_domain'] + dns = module.params['instance_dns'] + rootpw = module.params['instance_rootpw'] + key = module.params['instance_key'] + # initialize connection + try: + c = conn(url + "/api", user, password) + except Exception as e: + module.fail_json(msg='%s' % e) + + if state == 'present': + if get_vm(c, vmname) == "empty": + if resource_type == 'template': + try: + create_vm_template(c, vmname, image, zone) + except Exception as e: + module.fail_json(msg='%s' % e) + module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image)) + elif resource_type == 'new': + # FIXME: refactor, use keyword args. + try: + create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int) + except Exception as e: + module.fail_json(msg='%s' % e) + module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname) + else: + module.exit_json(changed=False, msg="You did not specify a resource type") + else: + module.exit_json(changed=False, msg="VM %s already exists" % vmname) + + if state == 'started': + if vm_status(c, vmname) == 'up': + module.exit_json(changed=False, msg="VM %s is already running" % vmname) + else: + # vm_start(c, vmname) + vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key) + module.exit_json(changed=True, msg="VM %s started" % vmname) + + if state == 'shutdown': + if vm_status(c, vmname) == 'down': + module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname) + else: + vm_stop(c, vmname) + module.exit_json(changed=True, msg="VM %s is shutting down" % vmname) + + if state == 'restart': + if vm_status(c, vmname) == 'up': + vm_restart(c, vmname) + module.exit_json(changed=True, msg="VM %s is restarted" % vmname) + else: + module.exit_json(changed=False, msg="VM %s is not running" % vmname) + + if state == 'absent': + if get_vm(c, vmname) == "empty": + module.exit_json(changed=False, msg="VM %s does not exist" % vmname) + else: + vm_remove(c, vmname) + module.exit_json(changed=True, msg="VM %s removed" % vmname) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py new file mode 100644 index 0000000000..108cf33886 --- /dev/null +++ b/plugins/modules/cloud/misc/proxmox.py @@ -0,0 +1,614 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: proxmox +short_description: management of instances in Proxmox VE cluster +description: + - allows you to create/delete/stop instances in Proxmox VE cluster + - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older) +options: + api_host: + description: + - the host of the Proxmox VE cluster + required: true + api_user: + description: + - the user to authenticate with + required: true + api_password: + description: + - the password to authenticate with + - you can use PROXMOX_PASSWORD environment variable + vmid: + description: + - the instance id + - if not set, the next available VM ID will be fetched from ProxmoxAPI. + - if not set, will be fetched from PromoxAPI based on the hostname + validate_certs: + description: + - enable / disable https certificate verification + type: bool + default: 'no' + node: + description: + - Proxmox VE node, when new VM will be created + - required only for C(state=present) + - for another states will be autodiscovered + pool: + description: + - Proxmox VE resource pool + password: + description: + - the instance root password + - required only for C(state=present) + hostname: + description: + - the instance hostname + - required only for C(state=present) + - must be unique if vmid is not passed + ostemplate: + description: + - the template for VM creating + - required only for C(state=present) + disk: + description: + - hard disk size in GB for instance + default: 3 + cores: + description: + - Specify number of cores per socket. + default: 1 + cpus: + description: + - numbers of allocated cpus for instance + default: 1 + memory: + description: + - memory size in MB for instance + default: 512 + swap: + description: + - swap memory size in MB for instance + default: 0 + netif: + description: + - specifies network interfaces for the container. As a hash/dictionary defining interfaces. + mounts: + description: + - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points + ip_address: + description: + - specifies the address the container will be assigned + onboot: + description: + - specifies whether a VM will be started during system bootup + type: bool + default: 'no' + storage: + description: + - target storage + default: 'local' + cpuunits: + description: + - CPU weight for a VM + default: 1000 + nameserver: + description: + - sets DNS server IP address for a container + searchdomain: + description: + - sets DNS search domain for a container + timeout: + description: + - timeout for operations + default: 30 + force: + description: + - forcing operations + - can be used only with states C(present), C(stopped), C(restarted) + - with C(state=present) force option allow to overwrite existing container + - with states C(stopped) , C(restarted) allow to force stop instance + type: bool + default: 'no' + state: + description: + - Indicate desired state of the instance + choices: ['present', 'started', 'absent', 'stopped', 'restarted'] + default: present + pubkey: + description: + - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions + unprivileged: + description: + - Indicate if the container should be unprivileged + type: bool + default: 'no' + +notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "python >= 2.7", "requests" ] +author: Sergei Antipov (@UnderGreen) +''' + +EXAMPLES = ''' +# Create new container with minimal options +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Create new container automatically selecting the next available vmid. +- proxmox: + node: 'uk-mc02' + api_user: 'root@pam' + api_password: '1q2w3e' + api_host: 'node1' + password: '123456' + hostname: 'example.org' + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Create new container with minimal options with force(it will rewrite existing container) +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + force: yes + +# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Create new container with minimal options defining network interface with dhcp +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' + +# Create new container with minimal options defining network interface with static ip +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' + +# Create new container with minimal options defining a mount with 8GB +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + mounts: '{"mp0":"local:8,mp=/mnt/test/"}' + +# Create new container with minimal options defining a cpu core limit +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + cores: 2 + +# Start container +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: started + +# Start container with mount. You should enter a 90-second timeout because servers with additional disks take longer to boot. +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: started + timeout: 90 + +# Stop container +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: stopped + +# Stop container with force +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + force: yes + state: stopped + +# Restart container(stopped or mounted container you can't restart) +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: restarted + +# Remove container +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: absent +''' + +import os +import time +import traceback +from distutils.version import LooseVersion + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +VZ_TYPE = None + + +def get_nextvmid(module, proxmox): + try: + vmid = proxmox.cluster.nextid.get() + return vmid + except Exception as e: + module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e), + exception=traceback.format_exc()) + + +def get_vmid(proxmox, hostname): + return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname] + + +def get_instance(proxmox, vmid): + return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] + + +def content_check(proxmox, node, ostemplate, template_store): + return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate] + + +def node_check(proxmox, node): + return [True for nd in proxmox.nodes.get() if nd['node'] == node] + + +def proxmox_version(proxmox): + apireturn = proxmox.version.get() + return LooseVersion(apireturn['version']) + + +def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs): + proxmox_node = proxmox.nodes(node) + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + if VZ_TYPE == 'lxc': + kwargs['cpulimit'] = cpus + kwargs['rootfs'] = disk + if 'netif' in kwargs: + kwargs.update(kwargs['netif']) + del kwargs['netif'] + if 'mounts' in kwargs: + kwargs.update(kwargs['mounts']) + del kwargs['mounts'] + if 'pubkey' in kwargs: + if proxmox_version(proxmox) >= LooseVersion('4.2'): + kwargs['ssh-public-keys'] = kwargs['pubkey'] + del kwargs['pubkey'] + else: + kwargs['cpus'] = cpus + kwargs['disk'] = disk + + taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) + + while timeout: + if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and + proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'): + return True + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % + proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + +def start_instance(module, proxmox, vm, vmid, timeout): + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post() + while timeout: + if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and + proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): + return True + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % + proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + +def stop_instance(module, proxmox, vm, vmid, timeout, force): + if force: + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) + else: + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post() + while timeout: + if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and + proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): + return True + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % + proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + +def umount_instance(module, proxmox, vm, vmid, timeout): + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post() + while timeout: + if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and + proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): + return True + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' % + proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_host=dict(required=True), + api_user=dict(required=True), + api_password=dict(no_log=True), + vmid=dict(required=False), + validate_certs=dict(type='bool', default='no'), + node=dict(), + pool=dict(), + password=dict(no_log=True), + hostname=dict(), + ostemplate=dict(), + disk=dict(type='str', default='3'), + cores=dict(type='int', default=1), + cpus=dict(type='int', default=1), + memory=dict(type='int', default=512), + swap=dict(type='int', default=0), + netif=dict(type='dict'), + mounts=dict(type='dict'), + ip_address=dict(), + onboot=dict(type='bool', default='no'), + storage=dict(default='local'), + cpuunits=dict(type='int', default=1000), + nameserver=dict(), + searchdomain=dict(), + timeout=dict(type='int', default=30), + force=dict(type='bool', default='no'), + state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), + pubkey=dict(type='str', default=None), + unprivileged=dict(type='bool', default='no') + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + state = module.params['state'] + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + vmid = module.params['vmid'] + validate_certs = module.params['validate_certs'] + node = module.params['node'] + disk = module.params['disk'] + cpus = module.params['cpus'] + memory = module.params['memory'] + swap = module.params['swap'] + storage = module.params['storage'] + hostname = module.params['hostname'] + if module.params['ostemplate'] is not None: + template_store = module.params['ostemplate'].split(":")[0] + timeout = module.params['timeout'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError as e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) + global VZ_TYPE + VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc' + + except Exception as e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + # If vmid not set get the Next VM id from ProxmoxAPI + # If hostname is set get the VM id from ProxmoxAPI + if not vmid and state == 'present': + vmid = get_nextvmid(module, proxmox) + elif not vmid and hostname: + hosts = get_vmid(proxmox, hostname) + if len(hosts) == 0: + module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state) + vmid = hosts[0] + elif not vmid: + module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) + + if state == 'present': + try: + if get_instance(proxmox, vmid) and not module.params['force']: + module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) + # If no vmid was passed, there cannot be another VM named 'hostname' + if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']: + module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0])) + elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']): + module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm') + elif not node_check(proxmox, node): + module.fail_json(msg="node '%s' not exists in cluster" % node) + elif not content_check(proxmox, node, module.params['ostemplate'], template_store): + module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" + % (module.params['ostemplate'], node, template_store)) + + create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, + cores=module.params['cores'], + pool=module.params['pool'], + password=module.params['password'], + hostname=module.params['hostname'], + ostemplate=module.params['ostemplate'], + netif=module.params['netif'], + mounts=module.params['mounts'], + ip_address=module.params['ip_address'], + onboot=int(module.params['onboot']), + cpuunits=module.params['cpuunits'], + nameserver=module.params['nameserver'], + searchdomain=module.params['searchdomain'], + force=int(module.params['force']), + pubkey=module.params['pubkey'], + unprivileged=int(module.params['unprivileged'])) + + module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) + except Exception as e: + module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + + elif state == 'started': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is already running" % vmid) + + if start_instance(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s started" % vmid) + except Exception as e: + module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'stopped': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': + if module.params['force']: + if umount_instance(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + else: + module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " + "You can use force option to umount it.") % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) + + if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + except Exception as e: + module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'restarted': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) + if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or + getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'): + module.exit_json(changed=False, msg="VM %s is not running" % vmid) + + if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and + start_instance(module, proxmox, vm, vmid, timeout)): + module.exit_json(changed=True, msg="VM %s is restarted" % vmid) + except Exception as e: + module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'absent': + try: + vm = get_instance(proxmox, vmid) + if not vm: + module.exit_json(changed=False, msg="VM %s does not exist" % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': + module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) + + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) + while timeout: + if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and + proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): + module.exit_json(changed=True, msg="VM %s removed" % vmid) + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + except Exception as e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py new file mode 100644 index 0000000000..533ebca9da --- /dev/null +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -0,0 +1,1122 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Abdoul Bah (@helldorado) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: proxmox_kvm +short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster. +description: + - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. +author: "Abdoul Bah (@helldorado) " +options: + acpi: + description: + - Specify if ACPI should be enabled/disabled. + type: bool + default: 'yes' + agent: + description: + - Specify if the QEMU Guest Agent should be enabled/disabled. + type: bool + args: + description: + - Pass arbitrary arguments to kvm. + - This option is for experts only! + default: "-serial unix:/var/run/qemu-server/VMID.serial,server,nowait" + api_host: + description: + - Specify the target host of the Proxmox VE cluster. + required: true + api_user: + description: + - Specify the user to authenticate with. + required: true + api_password: + description: + - Specify the password to authenticate with. + - You can use C(PROXMOX_PASSWORD) environment variable. + autostart: + description: + - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). + type: bool + default: 'no' + balloon: + description: + - Specify the amount of RAM for the VM in MB. + - Using zero disables the balloon driver. + default: 0 + bios: + description: + - Specify the BIOS implementation. + choices: ['seabios', 'ovmf'] + boot: + description: + - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n). + - You can combine to set order. + default: cnd + bootdisk: + description: + - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+) + clone: + description: + - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone. + cores: + description: + - Specify number of cores per socket. + default: 1 + cpu: + description: + - Specify emulated CPU type. + default: kvm64 + cpulimit: + description: + - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. + - If the computer has 2 CPUs, it has total of '2' CPU time + cpuunits: + description: + - Specify CPU weight for a VM. + - You can disable fair-scheduler configuration by setting this to 0 + default: 1000 + delete: + description: + - Specify a list of settings you want to delete. + description: + description: + - Specify the description for the VM. Only used on the configuration web interface. + - This is saved as comment inside the configuration file. + digest: + description: + - Specify if to prevent changes if current configuration file has different SHA1 digest. + - This can be used to prevent concurrent modifications. + force: + description: + - Allow to force stop VM. + - Can be used only with states C(stopped), C(restarted). + type: bool + format: + description: + - Target drive's backing file's data format. + - Used only with clone + default: qcow2 + choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk" ] + freeze: + description: + - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). + type: bool + full: + description: + - Create a full copy of all disk. This is always done when you clone a normal VM. + - For VM templates, we try to create a linked clone by default. + - Used only with clone + type: bool + default: 'yes' + hostpci: + description: + - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}'). + - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). + - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). + - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model). + - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. + - C(x-vga=boolean) I(default=0) Enable vfio-vga device support. + - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. + hotplug: + description: + - Selectively enable hotplug features. + - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb'). + - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb'). + hugepages: + description: + - Enable/disable hugepages memory. + choices: ['any', '2', '1024'] + ide: + description: + - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}'). + - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). + keyboard: + description: + - Sets the keyboard layout for VNC server. + kvm: + description: + - Enable/disable KVM hardware virtualization. + type: bool + default: 'yes' + localtime: + description: + - Sets the real time clock to local time. + - This is enabled by default if ostype indicates a Microsoft OS. + type: bool + lock: + description: + - Lock/unlock the VM. + choices: ['migrate', 'backup', 'snapshot', 'rollback'] + machine: + description: + - Specifies the Qemu machine type. + - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?)) + memory: + description: + - Memory size in MB for instance. + default: 512 + migrate_downtime: + description: + - Sets maximum tolerated downtime (in seconds) for migrations. + migrate_speed: + description: + - Sets maximum speed (in MB/s) for migrations. + - A value of 0 is no limit. + name: + description: + - Specifies the VM name. Only used on the configuration web interface. + - Required only for C(state=present). + net: + description: + - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}'). + - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). + - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). + - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. + - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. + - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'. + - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. + newid: + description: + - VMID for the clone. Used only with clone. + - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI. + node: + description: + - Proxmox VE node, where the new VM will be created. + - Only required for C(state=present). + - For other states, it will be autodiscovered. + numa: + description: + - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}'). + - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). + - C(cpus) CPUs accessing this NUMA node. + - C(hostnodes) Host NUMA nodes to use. + - C(memory) Amount of memory this NUMA node provides. + - C(policy) NUMA allocation policy. + onboot: + description: + - Specifies whether a VM will be started during system bootup. + type: bool + default: 'yes' + ostype: + description: + - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. + - The l26 is Linux 2.6/3.X Kernel. + choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris'] + default: l26 + parallel: + description: + - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}'). + - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. + - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). + pool: + description: + - Add the new VM to the specified pool. + protection: + description: + - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. + type: bool + reboot: + description: + - Allow reboot. If set to C(yes), the VM exit on reboot. + type: bool + revert: + description: + - Revert a pending change. + sata: + description: + - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}'). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). + scsi: + description: + - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}'). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). + scsihw: + description: + - Specifies the SCSI controller model. + choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] + serial: + description: + - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}'). + - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. + - Values allowed are - C((/dev/.+|socket)). + - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. + shares: + description: + - Rets amount of memory shares for auto-ballooning. (0 - 50000). + - The larger the number is, the more memory this VM gets. + - The number is relative to weights of all other running VMs. + - Using 0 disables auto-ballooning, this means no limit. + skiplock: + description: + - Ignore locks + - Only root is allowed to use this option. + smbios: + description: + - Specifies SMBIOS type 1 fields. + snapname: + description: + - The name of the snapshot. Used only with clone. + sockets: + description: + - Sets the number of CPU sockets. (1 - N). + default: 1 + startdate: + description: + - Sets the initial date of the real time clock. + - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25'). + startup: + description: + - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]). + - Order is a non-negative number defining the general startup order. + - Shutdown in done with reverse ordering. + state: + description: + - Indicates desired state of the instance. + - If C(current), the current state of the VM will be fetched. You can access it with C(results.status) + choices: ['present', 'started', 'absent', 'stopped', 'restarted','current'] + default: present + storage: + description: + - Target storage for full clone. + tablet: + description: + - Enables/disables the USB tablet device. + type: bool + default: 'no' + target: + description: + - Target node. Only allowed if the original VM is on shared storage. + - Used only with clone + tdf: + description: + - Enables/disables time drift fix. + type: bool + template: + description: + - Enables/disables the template. + type: bool + default: 'no' + timeout: + description: + - Timeout for operations. + default: 30 + update: + description: + - If C(yes), the VM will be updated with new value. + - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters + - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk... + type: bool + default: 'no' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + type: bool + default: 'no' + vcpus: + description: + - Sets number of hotplugged vcpus. + vga: + description: + - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'. + choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] + default: std + virtio: + description: + - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}'). + - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). + vmid: + description: + - Specifies the VM ID. Instead use I(name) parameter. + - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI. + watchdog: + description: + - Creates a virtual hardware watchdog device. +requirements: [ "proxmoxer", "requests" ] +''' + +EXAMPLES = ''' +# Create new VM with minimal options +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + +# Create new VM with minimal options and given vmid +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + vmid : 100 + +# Create new VM with two network interface options. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + net : '{"net0":"virtio,bridge=vmbr1,rate=200", "net1":"e1000,bridge=vmbr2,"}' + +# Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + net : '{"net0":"virtio,bridge=vmbr1,rate=200"}' + virtio : '{"virtio0":"VMs_LVM:10", "virtio1":"VMs:2,format=qcow2", "virtio2":"VMs:5,format=raw"}' + cores : 4 + vcpus : 2 + +# Clone VM with only source VM name +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + clone : spynal # The VM source + name : zavala # The target VM name + node : sabrewulf + storage : VMs + format : qcow2 + timeout : 500 # Note: The task can take a while. Adapt + +# Clone VM with source vmid and target newid and raw format +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + clone : arbitrary_name + vmid : 108 + newid : 152 + name : zavala # The target VM name + node : sabrewulf + storage : LVM_STO + format : raw + timeout : 300 # Note: The task can take a while. Adapt + +# Create new VM and lock it for snapashot. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + lock : snapshot + +# Create new VM and set protection to disable the remove VM and remove disk operations +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + protection : yes + +# Start VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : started + +# Stop VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : stopped + +# Stop VM with force +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : stopped + force : yes + +# Restart VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : restarted + +# Remove VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : absent + +# Get VM current state +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : current + +# Update VM configuration +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + cores : 8 + memory : 16384 + update : yes + +# Delete QEMU parameters +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + delete : 'args,template,cpulimit' + +# Revert a pending change +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + revert : 'template,cpulimit' +''' + +RETURN = ''' +devices: + description: The list of devices created or used. + returned: success + type: dict + sample: ' + { + "ide0": "VMS_LVM:vm-115-disk-1", + "ide1": "VMs:115/vm-115-disk-3.raw", + "virtio0": "VMS_LVM:vm-115-disk-2", + "virtio1": "VMs:115/vm-115-disk-1.qcow2", + "virtio2": "VMs:115/vm-115-disk-2.raw" + }' +mac: + description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE. + returned: success + type: dict + sample: ' + { + "net0": "3E:6E:97:D2:31:9F", + "net1": "B6:A1:FC:EF:78:A4" + }' +vmid: + description: The VM vmid. + returned: success + type: int + sample: 115 +status: + description: + - The current virtual machine status. + - Returned only when C(state=current) + returned: success + type: dict + sample: '{ + "changed": false, + "msg": "VM kropta with vmid = 110 is running", + "status": "running" + }' +''' + +import os +import re +import time +import traceback +from distutils.version import LooseVersion + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +VZ_TYPE = 'qemu' + + +def get_nextvmid(module, proxmox): + try: + vmid = proxmox.cluster.nextid.get() + return vmid + except Exception as e: + module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e), + exception=traceback.format_exc()) + + +def get_vmid(proxmox, name): + return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name] + + +def get_vm(proxmox, vmid): + return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] + + +def node_check(proxmox, node): + return [True for nd in proxmox.nodes.get() if nd['node'] == node] + + +def get_vminfo(module, proxmox, node, vmid, **kwargs): + global results + results = {} + mac = {} + devices = {} + try: + vm = proxmox.nodes(node).qemu(vmid).config.get() + except Exception as e: + module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] + for k in list(kwargs.keys()): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + # Split information by type + for k, v in kwargs.items(): + if re.match(r'net[0-9]', k) is not None: + interface = k + k = vm[k] + k = re.search('=(.*?),', k).group(1) + mac[interface] = k + if (re.match(r'virtio[0-9]', k) is not None or + re.match(r'ide[0-9]', k) is not None or + re.match(r'scsi[0-9]', k) is not None or + re.match(r'sata[0-9]', k) is not None): + device = k + k = vm[k] + k = re.search('(.*?),', k).group(1) + devices[device] = k + + results['mac'] = mac + results['devices'] = devices + results['vmid'] = int(vmid) + + +def settings(module, proxmox, vmid, node, name, timeout, **kwargs): + proxmox_node = proxmox.nodes(node) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + if getattr(proxmox_node, VZ_TYPE)(vmid).config.set(**kwargs) is None: + return True + else: + return False + + +def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update, **kwargs): + # Available only in PVE 4 + only_v4 = ['force', 'protection', 'skiplock'] + + # valide clone parameters + valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] + clone_params = {} + # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm. + vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid) + + proxmox_node = proxmox.nodes(node) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) + + # The features work only on PVE 4 + if PVE_MAJOR_VERSION < 4: + for p in only_v4: + if p in kwargs: + del kwargs[p] + + # If update, don't update disk (virtio, ide, sata, scsi) and network interface + if update: + if 'virtio' in kwargs: + del kwargs['virtio'] + if 'sata' in kwargs: + del kwargs['sata'] + if 'scsi' in kwargs: + del kwargs['scsi'] + if 'ide' in kwargs: + del kwargs['ide'] + if 'net' in kwargs: + del kwargs['net'] + + # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] + for k in list(kwargs.keys()): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + # Rename numa_enabled to numa. According the API documentation + if 'numa_enabled' in kwargs: + kwargs['numa'] = kwargs['numa_enabled'] + del kwargs['numa_enabled'] + + # -args and skiplock require root@pam user + if module.params['api_user'] == "root@pam" and module.params['args'] is None: + if not update: + kwargs['args'] = vm_args + elif module.params['api_user'] == "root@pam" and module.params['args'] is not None: + kwargs['args'] = module.params['args'] + elif module.params['api_user'] != "root@pam" and module.params['args'] is not None: + module.fail_json(msg='args parameter require root@pam user. ') + + if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None: + module.fail_json(msg='skiplock parameter require root@pam user. ') + + if update: + if getattr(proxmox_node, VZ_TYPE)(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None: + return True + else: + return False + elif module.params['clone'] is not None: + for param in valid_clone_params: + if module.params[param] is not None: + clone_params[param] = module.params[param] + clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool))) + taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) + else: + taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) + + while timeout: + if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and + proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % + proxmox_node.tasks(taskid).log.get()[:1]) + time.sleep(1) + return False + + +def start_vm(module, proxmox, vm, vmid, timeout): + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post() + while timeout: + if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and + proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): + return True + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + +def stop_vm(module, proxmox, vm, vmid, timeout, force): + if force: + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) + else: + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post() + while timeout: + if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and + proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): + return True + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + time.sleep(1) + return False + + +def proxmox_version(proxmox): + apireturn = proxmox.version.get() + return LooseVersion(apireturn['version']) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + acpi=dict(type='bool', default='yes'), + agent=dict(type='bool'), + args=dict(type='str', default=None), + api_host=dict(required=True), + api_user=dict(required=True), + api_password=dict(no_log=True), + autostart=dict(type='bool', default='no'), + balloon=dict(type='int', default=0), + bios=dict(choices=['seabios', 'ovmf']), + boot=dict(type='str', default='cnd'), + bootdisk=dict(type='str'), + clone=dict(type='str', default=None), + cores=dict(type='int', default=1), + cpu=dict(type='str', default='kvm64'), + cpulimit=dict(type='int'), + cpuunits=dict(type='int', default=1000), + delete=dict(type='str', default=None), + description=dict(type='str'), + digest=dict(type='str'), + force=dict(type='bool', default=None), + format=dict(type='str', default='qcow2', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk']), + freeze=dict(type='bool'), + full=dict(type='bool', default='yes'), + hostpci=dict(type='dict'), + hotplug=dict(type='str'), + hugepages=dict(choices=['any', '2', '1024']), + ide=dict(type='dict', default=None), + keyboard=dict(type='str'), + kvm=dict(type='bool', default='yes'), + localtime=dict(type='bool'), + lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), + machine=dict(type='str'), + memory=dict(type='int', default=512), + migrate_downtime=dict(type='int'), + migrate_speed=dict(type='int'), + name=dict(type='str'), + net=dict(type='dict'), + newid=dict(type='int', default=None), + node=dict(), + numa=dict(type='dict'), + numa_enabled=dict(type='bool'), + onboot=dict(type='bool', default='yes'), + ostype=dict(default='l26', choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']), + parallel=dict(type='dict'), + pool=dict(type='str'), + protection=dict(type='bool'), + reboot=dict(type='bool'), + revert=dict(type='str', default=None), + sata=dict(type='dict'), + scsi=dict(type='dict'), + scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), + serial=dict(type='dict'), + shares=dict(type='int'), + skiplock=dict(type='bool'), + smbios=dict(type='str'), + snapname=dict(type='str'), + sockets=dict(type='int', default=1), + startdate=dict(type='str'), + startup=dict(), + state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), + storage=dict(type='str'), + tablet=dict(type='bool', default='no'), + target=dict(type='str'), + tdf=dict(type='bool'), + template=dict(type='bool', default='no'), + timeout=dict(type='int', default=30), + update=dict(type='bool', default='no'), + validate_certs=dict(type='bool', default='no'), + vcpus=dict(type='int', default=None), + vga=dict(default='std', choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), + virtio=dict(type='dict', default=None), + vmid=dict(type='int', default=None), + watchdog=dict(), + ), + mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], + required_one_of=[('name', 'vmid',)], + required_if=[('state', 'present', ['node'])] + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + clone = module.params['clone'] + cpu = module.params['cpu'] + cores = module.params['cores'] + delete = module.params['delete'] + memory = module.params['memory'] + name = module.params['name'] + newid = module.params['newid'] + node = module.params['node'] + revert = module.params['revert'] + sockets = module.params['sockets'] + state = module.params['state'] + timeout = module.params['timeout'] + update = bool(module.params['update']) + vmid = module.params['vmid'] + validate_certs = module.params['validate_certs'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError as e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) + global VZ_TYPE + global PVE_MAJOR_VERSION + PVE_MAJOR_VERSION = 3 if proxmox_version(proxmox) < LooseVersion('4.0') else 4 + except Exception as e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + # If vmid not set get the Next VM id from ProxmoxAPI + # If vm name is set get the VM id from ProxmoxAPI + if not vmid: + if state == 'present' and (not update and not clone) and (not delete and not revert): + try: + vmid = get_nextvmid(module, proxmox) + except Exception as e: + module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) + else: + try: + if not clone: + vmid = get_vmid(proxmox, name)[0] + else: + vmid = get_vmid(proxmox, clone)[0] + except Exception as e: + if not clone: + module.fail_json(msg="VM {0} does not exist in cluster.".format(name)) + else: + module.fail_json(msg="VM {0} does not exist in cluster.".format(clone)) + + if clone is not None: + if get_vmid(proxmox, name): + module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) + if vmid is not None: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + if not newid: + try: + newid = get_nextvmid(module, proxmox) + except Exception as e: + module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) + else: + vm = get_vm(proxmox, newid) + if vm: + module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name)) + + if delete is not None: + try: + settings(module, proxmox, vmid, node, name, timeout, delete=delete) + module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) + except Exception as e: + module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) + elif revert is not None: + try: + settings(module, proxmox, vmid, node, name, timeout, revert=revert) + module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) + except Exception as e: + module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) + + if state == 'present': + try: + if get_vm(proxmox, vmid) and not (update or clone): + module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid) + elif get_vmid(proxmox, name) and not (update or clone): + module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) + elif not (node, name): + module.fail_json(msg='node, name is mandatory for creating/updating vm') + elif not node_check(proxmox, node): + module.fail_json(msg="node '%s' does not exist in cluster" % node) + + create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update, + acpi=module.params['acpi'], + agent=module.params['agent'], + autostart=module.params['autostart'], + balloon=module.params['balloon'], + bios=module.params['bios'], + boot=module.params['boot'], + bootdisk=module.params['bootdisk'], + cpulimit=module.params['cpulimit'], + cpuunits=module.params['cpuunits'], + description=module.params['description'], + digest=module.params['digest'], + force=module.params['force'], + freeze=module.params['freeze'], + hostpci=module.params['hostpci'], + hotplug=module.params['hotplug'], + hugepages=module.params['hugepages'], + ide=module.params['ide'], + keyboard=module.params['keyboard'], + kvm=module.params['kvm'], + localtime=module.params['localtime'], + lock=module.params['lock'], + machine=module.params['machine'], + migrate_downtime=module.params['migrate_downtime'], + migrate_speed=module.params['migrate_speed'], + net=module.params['net'], + numa=module.params['numa'], + numa_enabled=module.params['numa_enabled'], + onboot=module.params['onboot'], + ostype=module.params['ostype'], + parallel=module.params['parallel'], + pool=module.params['pool'], + protection=module.params['protection'], + reboot=module.params['reboot'], + sata=module.params['sata'], + scsi=module.params['scsi'], + scsihw=module.params['scsihw'], + serial=module.params['serial'], + shares=module.params['shares'], + skiplock=module.params['skiplock'], + smbios1=module.params['smbios'], + snapname=module.params['snapname'], + startdate=module.params['startdate'], + startup=module.params['startup'], + tablet=module.params['tablet'], + target=module.params['target'], + tdf=module.params['tdf'], + template=module.params['template'], + vcpus=module.params['vcpus'], + vga=module.params['vga'], + virtio=module.params['virtio'], + watchdog=module.params['watchdog']) + + if not clone: + get_vminfo(module, proxmox, node, vmid, + ide=module.params['ide'], + net=module.params['net'], + sata=module.params['sata'], + scsi=module.params['scsi'], + virtio=module.params['virtio']) + if update: + module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid)) + elif clone is not None: + module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) + else: + module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) + except Exception as e: + if update: + module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) + elif clone is not None: + module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) + else: + module.fail_json(msg="creation of %s VM %s with vmid %s failed with exception=%s" % (VZ_TYPE, name, vmid, e)) + + elif state == 'started': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid) + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is already running" % vmid) + + if start_vm(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s started" % vmid) + except Exception as e: + module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'stopped': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is already stopped" % vmid) + + if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + except Exception as e: + module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'restarted': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is not running" % vmid) + + if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']) and start_vm(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s is restarted" % vmid) + except Exception as e: + module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'absent': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.exit_json(changed=False, msg="VM %s does not exist" % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) + + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) + while timeout: + if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and + proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): + module.exit_json(changed=True, msg="VM %s removed" % vmid) + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + except Exception as e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'current': + status = {} + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + current = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] + status['status'] = current + if status: + module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) + except Exception as e: + module.fail_json(msg="Unable to get vm {0} with vmid = {1} status: ".format(name, vmid) + str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/proxmox_template.py b/plugins/modules/cloud/misc/proxmox_template.py new file mode 100644 index 0000000000..1b080a48cc --- /dev/null +++ b/plugins/modules/cloud/misc/proxmox_template.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: proxmox_template +short_description: management of OS templates in Proxmox VE cluster +description: + - allows you to upload/delete templates in Proxmox VE cluster +options: + api_host: + description: + - the host of the Proxmox VE cluster + required: true + api_user: + description: + - the user to authenticate with + required: true + api_password: + description: + - the password to authenticate with + - you can use PROXMOX_PASSWORD environment variable + validate_certs: + description: + - enable / disable https certificate verification + default: 'no' + type: bool + node: + description: + - Proxmox VE node, when you will operate with template + required: true + src: + description: + - path to uploaded file + - required only for C(state=present) + aliases: ['path'] + template: + description: + - the template name + - required only for states C(absent), C(info) + content_type: + description: + - content type + - required only for C(state=present) + default: 'vztmpl' + choices: ['vztmpl', 'iso'] + storage: + description: + - target storage + default: 'local' + timeout: + description: + - timeout for operations + default: 30 + force: + description: + - can be used only with C(state=present), exists template will be overwritten + type: bool + default: 'no' + state: + description: + - Indicate desired state of the template + choices: ['present', 'absent'] + default: present +notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "requests" ] +author: Sergei Antipov (@UnderGreen) +''' + +EXAMPLES = ''' +# Upload new openvz template with minimal options +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + src: ~/ubuntu-14.04-x86_64.tar.gz + +# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_host: node1 + src: ~/ubuntu-14.04-x86_64.tar.gz + +# Upload new openvz template with all options and force overwrite +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + storage: local + content_type: vztmpl + src: ~/ubuntu-14.04-x86_64.tar.gz + force: yes + +# Delete template with minimal options +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + template: ubuntu-14.04-x86_64.tar.gz + state: absent +''' + +import os +import time + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +from ansible.module_utils.basic import AnsibleModule + + +def get_template(proxmox, node, storage, content_type, template): + return [True for tmpl in proxmox.nodes(node).storage(storage).content.get() + if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)] + + +def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): + taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb')) + while timeout: + task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get() + if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s' + % proxmox.node(node).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + +def delete_template(module, proxmox, node, storage, content_type, template, timeout): + volid = '%s:%s/%s' % (storage, content_type, template) + proxmox.nodes(node).storage(storage).content.delete(volid) + while timeout: + if not get_template(proxmox, node, storage, content_type, template): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for deleting template.') + + time.sleep(1) + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_host=dict(required=True), + api_user=dict(required=True), + api_password=dict(no_log=True), + validate_certs=dict(type='bool', default='no'), + node=dict(), + src=dict(type='path'), + template=dict(), + content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']), + storage=dict(default='local'), + timeout=dict(type='int', default=30), + force=dict(type='bool', default='no'), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + state = module.params['state'] + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + validate_certs = module.params['validate_certs'] + node = module.params['node'] + storage = module.params['storage'] + timeout = module.params['timeout'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError as e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) + except Exception as e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + if state == 'present': + try: + content_type = module.params['content_type'] + src = module.params['src'] + + template = os.path.basename(src) + if get_template(proxmox, node, storage, content_type, template) and not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) + elif not src: + module.fail_json(msg='src param to uploading template file is mandatory') + elif not (os.path.exists(src) and os.path.isfile(src)): + module.fail_json(msg='template file on path %s not exists' % src) + + if upload_template(module, proxmox, api_host, node, storage, content_type, src, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) + except Exception as e: + module.fail_json(msg="uploading of template %s failed with exception: %s" % (template, e)) + + elif state == 'absent': + try: + content_type = module.params['content_type'] + template = module.params['template'] + + if not template: + module.fail_json(msg='template param is mandatory') + elif not get_template(proxmox, node, storage, content_type, template): + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) + + if delete_template(module, proxmox, node, storage, content_type, template, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) + except Exception as e: + module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/rhevm.py b/plugins/modules/cloud/misc/rhevm.py new file mode 100644 index 0000000000..b3d375c975 --- /dev/null +++ b/plugins/modules/cloud/misc/rhevm.py @@ -0,0 +1,1516 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Timothy Vandenbrande +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: rhevm +short_description: RHEV/oVirt automation +description: + - This module only supports oVirt/RHEV version 3. + - A newer module M(ovirt_vm) supports oVirt/RHV version 4. + - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. +requirements: + - ovirtsdk +author: +- Timothy Vandenbrande (@TimothyVandenbrande) +options: + user: + description: + - The user to authenticate with. + type: str + default: admin@internal + password: + description: + - The password for user authentication. + type: str + server: + description: + - The name/IP of your RHEV-m/oVirt instance. + type: str + default: 127.0.0.1 + port: + description: + - The port on which the API is reachable. + type: int + default: 443 + insecure_api: + description: + - A boolean switch to make a secure or insecure connection to the server. + type: bool + default: no + name: + description: + - The name of the VM. + type: str + cluster: + description: + - The RHEV/oVirt cluster in which you want you VM to start. + type: str + datacenter: + description: + - The RHEV/oVirt datacenter in which you want you VM to start. + type: str + default: Default + state: + description: + - This serves to create/remove/update or powermanage your VM. + type: str + choices: [ absent, cd, down, info, ping, present, restarted, up ] + default: present + image: + description: + - The template to use for the VM. + type: str + type: + description: + - To define if the VM is a server or desktop. + type: str + choices: [ desktop, host, server ] + default: server + vmhost: + description: + - The host you wish your VM to run on. + type: str + vmcpu: + description: + - The number of CPUs you want in your VM. + type: int + default: 2 + cpu_share: + description: + - This parameter is used to configure the CPU share. + type: int + default: 0 + vmmem: + description: + - The amount of memory you want your VM to use (in GB). + type: int + default: 1 + osver: + description: + - The operating system option in RHEV/oVirt. + type: str + default: rhel_6x64 + mempol: + description: + - The minimum amount of memory you wish to reserve for this system. + type: int + default: 1 + vm_ha: + description: + - To make your VM High Available. + type: bool + default: yes + disks: + description: + - This option uses complex arguments and is a list of disks with the options name, size and domain. + type: list + ifaces: + description: + - This option uses complex arguments and is a list of interfaces with the options name and vlan. + type: list + aliases: [ interfaces, nics ] + boot_order: + description: + - This option uses complex arguments and is a list of items that specify the bootorder. + type: list + default: [ hd, network ] + del_prot: + description: + - This option sets the delete protection checkbox. + type: bool + default: yes + cd_drive: + description: + - The CD you wish to have mounted on the VM when I(state = 'CD'). + type: str + timeout: + description: + - The timeout you wish to define for power actions. + - When I(state = 'up'). + - When I(state = 'down'). + - When I(state = 'restarted'). + type: int +''' + +RETURN = r''' +vm: + description: Returns all of the VMs variables and execution. + returned: always + type: dict + sample: '{ + "boot_order": [ + "hd", + "network" + ], + "changed": true, + "changes": [ + "Delete Protection" + ], + "cluster": "C1", + "cpu_share": "0", + "created": false, + "datacenter": "Default", + "del_prot": true, + "disks": [ + { + "domain": "ssd-san", + "name": "OS", + "size": 40 + } + ], + "eth0": "00:00:5E:00:53:00", + "eth1": "00:00:5E:00:53:01", + "eth2": "00:00:5E:00:53:02", + "exists": true, + "failed": false, + "ifaces": [ + { + "name": "eth0", + "vlan": "Management" + }, + { + "name": "eth1", + "vlan": "Internal" + }, + { + "name": "eth2", + "vlan": "External" + } + ], + "image": false, + "mempol": "0", + "msg": [ + "VM exists", + "cpu_share was already set to 0", + "VM high availability was already set to True", + "The boot order has already been set", + "VM delete protection has been set to True", + "Disk web2_Disk0_OS already exists", + "The VM starting host was already set to host416" + ], + "name": "web2", + "type": "server", + "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", + "vm_ha": true, + "vmcpu": "4", + "vmhost": "host416", + "vmmem": "16" + }' +''' + +EXAMPLES = r''' +- name: Basic get info from VM + rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: demo + state: info + +- name: Basic create example from image + rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: demo + cluster: centos + image: centos7_x64 + state: present + +- name: Power management + rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + cluster: RH + name: uptime_server + image: centos7_x64 + state: down + +- name: Multi disk, multi nic create example + rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + cluster: RH + name: server007 + type: server + vmcpu: 4 + vmmem: 2 + ifaces: + - name: eth0 + vlan: vlan2202 + - name: eth1 + vlan: vlan36 + - name: eth2 + vlan: vlan38 + - name: eth3 + vlan: vlan2202 + disks: + - name: root + size: 10 + domain: ssd-san + - name: swap + size: 10 + domain: 15kiscsi-san + - name: opt + size: 10 + domain: 15kiscsi-san + - name: var + size: 10 + domain: 10kiscsi-san + - name: home + size: 10 + domain: sata-san + boot_order: + - network + - hd + state: present + +- name: Add a CD to the disk cd_drive + rhevm: + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: server007 + cd_drive: rhev-tools-setup.iso + state: cd + +- name: New host deployment + host network configuration + rhevm: + password: '{{ rhevm.admin.pass }}' + name: ovirt_node007 + type: host + cluster: rhevm01 + ifaces: + - name: em1 + - name: em2 + - name: p3p1 + ip: 172.31.224.200 + netmask: 255.255.254.0 + - name: p3p2 + ip: 172.31.225.200 + netmask: 255.255.254.0 + - name: bond0 + bond: + - em1 + - em2 + network: rhevm + ip: 172.31.222.200 + netmask: 255.255.255.0 + management: yes + - name: bond0.36 + network: vlan36 + ip: 10.2.36.200 + netmask: 255.255.254.0 + gateway: 10.2.36.254 + - name: bond0.2202 + network: vlan2202 + - name: bond0.38 + network: vlan38 + state: present +''' + +import time + +try: + from ovirtsdk.api import API + from ovirtsdk.xml import params + HAS_SDK = True +except ImportError: + HAS_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +RHEV_FAILED = 1 +RHEV_SUCCESS = 0 +RHEV_UNAVAILABLE = 2 + +RHEV_TYPE_OPTS = ['desktop', 'host', 'server'] +STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up'] + +msg = [] +changed = False +failed = False + + +class RHEVConn(object): + 'Connection to RHEV-M' + + def __init__(self, module): + self.module = module + + user = module.params.get('user') + password = module.params.get('password') + server = module.params.get('server') + port = module.params.get('port') + insecure_api = module.params.get('insecure_api') + + url = "https://%s:%s" % (server, port) + + try: + api = API(url=url, username=user, password=password, insecure=str(insecure_api)) + api.test() + self.conn = api + except Exception: + raise Exception("Failed to connect to RHEV-M.") + + def __del__(self): + self.conn.disconnect() + + def createVMimage(self, name, cluster, template): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + template=self.conn.templates.get(name=template), + disks=params.Disks(clone=True) + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createVM(self, name, cluster, os, actiontype): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + os=params.OperatingSystem(type_=os), + template=self.conn.templates.get(name="Blank"), + type_=actiontype + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): + VM = self.get_VM(vmname) + + newdisk = params.Disk( + name=diskname, + size=1024 * 1024 * 1024 * int(disksize), + wipe_after_delete=True, + sparse=diskallocationtype, + interface=diskinterface, + format=diskformat, + bootable=diskboot, + storage_domains=params.StorageDomains( + storage_domain=[self.get_domain(diskdomain)] + ) + ) + + try: + VM.disks.add(newdisk) + VM.update() + setMsg("Successfully added disk " + diskname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentdisk = VM.disks.get(name=diskname) + attempt = 1 + while currentdisk.status.state != 'ok': + currentdisk = VM.disks.get(name=diskname) + if attempt == 100: + setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) + raise Exception() + else: + attempt += 1 + time.sleep(2) + setMsg("The disk " + diskname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + diskname + ".") + setMsg(str(e)) + return False + return True + + def createNIC(self, vmname, nicname, vlan, interface): + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + newnic = params.NIC( + name=nicname, + network=DC.networks.get(name=vlan), + interface=interface + ) + + try: + VM.nics.add(newnic) + VM.update() + setMsg("Successfully added iface " + nicname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentnic = VM.nics.get(name=nicname) + attempt = 1 + while currentnic.active is not True: + currentnic = VM.nics.get(name=nicname) + if attempt == 100: + setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active))) + raise Exception() + else: + attempt += 1 + time.sleep(2) + setMsg("The iface " + nicname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + nicname + ".") + setMsg(str(e)) + return False + return True + + def get_DC(self, dc_name): + return self.conn.datacenters.get(name=dc_name) + + def get_DC_byid(self, dc_id): + return self.conn.datacenters.get(id=dc_id) + + def get_VM(self, vm_name): + return self.conn.vms.get(name=vm_name) + + def get_cluster_byid(self, cluster_id): + return self.conn.clusters.get(id=cluster_id) + + def get_cluster(self, cluster_name): + return self.conn.clusters.get(name=cluster_name) + + def get_domain_byid(self, dom_id): + return self.conn.storagedomains.get(id=dom_id) + + def get_domain(self, domain_name): + return self.conn.storagedomains.get(name=domain_name) + + def get_disk(self, disk): + return self.conn.disks.get(disk) + + def get_network(self, dc_name, network_name): + return self.get_DC(dc_name).networks.get(network_name) + + def get_network_byid(self, network_id): + return self.conn.networks.get(id=network_id) + + def get_NIC(self, vm_name, nic_name): + return self.get_VM(vm_name).nics.get(nic_name) + + def get_Host(self, host_name): + return self.conn.hosts.get(name=host_name) + + def get_Host_byid(self, host_id): + return self.conn.hosts.get(id=host_id) + + def set_Memory(self, name, memory): + VM = self.get_VM(name) + VM.memory = int(int(memory) * 1024 * 1024 * 1024) + try: + VM.update() + setMsg("The Memory has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory.") + setMsg(str(e)) + setFailed() + return False + + def set_Memory_Policy(self, name, memory_policy): + VM = self.get_VM(name) + VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024) + try: + VM.update() + setMsg("The memory policy has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory policy.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU(self, name, cpu): + VM = self.get_VM(name) + VM.cpu.topology.cores = int(cpu) + try: + VM.update() + setMsg("The number of CPUs has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the number of CPUs.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU_share(self, name, cpu_share): + VM = self.get_VM(name) + VM.cpu_shares = int(cpu_share) + try: + VM.update() + setMsg("The CPU share has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the CPU share.") + setMsg(str(e)) + setFailed() + return False + + def set_Disk(self, diskname, disksize, diskinterface, diskboot): + DISK = self.get_disk(diskname) + setMsg("Checking disk " + diskname) + if DISK.get_bootable() != diskboot: + try: + DISK.set_bootable(diskboot) + setMsg("Updated the boot option on the disk.") + setChanged() + except Exception as e: + setMsg("Failed to set the boot option on the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The boot option of the disk is correct") + if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): + try: + DISK.size = (1024 * 1024 * 1024 * int(disksize)) + setMsg("Updated the size of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the size of the disk.") + setMsg(str(e)) + setFailed() + return False + elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)): + setMsg("Shrinking disks is not supported") + setFailed() + return False + else: + setMsg("The size of the disk is correct") + if str(DISK.interface) != str(diskinterface): + try: + DISK.interface = diskinterface + setMsg("Updated the interface of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the interface of the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The interface of the disk is correct") + return True + + def set_NIC(self, vmname, nicname, newname, vlan, interface): + NIC = self.get_NIC(vmname, nicname) + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + NETWORK = self.get_network(str(DC.name), vlan) + checkFail() + if NIC.name != newname: + NIC.name = newname + setMsg('Updating iface name to ' + newname) + setChanged() + if str(NIC.network.id) != str(NETWORK.id): + NIC.set_network(NETWORK) + setMsg('Updating iface network to ' + vlan) + setChanged() + if NIC.interface != interface: + NIC.interface = interface + setMsg('Updating iface interface to ' + interface) + setChanged() + try: + NIC.update() + setMsg('iface has successfully been updated.') + except Exception as e: + setMsg("Failed to update the iface.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_DeleteProtection(self, vmname, del_prot): + VM = self.get_VM(vmname) + VM.delete_protected = del_prot + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update delete protection.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_BootOrder(self, vmname, boot_order): + VM = self.get_VM(vmname) + bootorder = [] + for device in boot_order: + bootorder.append(params.Boot(dev=device)) + VM.os.boot = bootorder + + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update the boot order.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_Host(self, host_name, cluster, ifaces): + HOST = self.get_Host(host_name) + CLUSTER = self.get_cluster(cluster) + + if HOST is None: + setMsg("Host does not exist.") + ifacelist = dict() + networklist = [] + manageip = '' + + try: + for iface in ifaces: + try: + setMsg('creating host interface ' + iface['name']) + if 'management' in iface: + manageip = iface['ip'] + if 'boot_protocol' not in iface: + if 'ip' in iface: + iface['boot_protocol'] = 'static' + else: + iface['boot_protocol'] = 'none' + if 'ip' not in iface: + iface['ip'] = '' + if 'netmask' not in iface: + iface['netmask'] = '' + if 'gateway' not in iface: + iface['gateway'] = '' + + if 'network' in iface: + if 'bond' in iface: + bond = [] + for slave in iface['bond']: + bond.append(ifacelist[slave]) + try: + tmpiface = params.Bonding( + slaves=params.Slaves(host_nic=bond), + options=params.Options( + option=[ + params.Option(name='miimon', value='100'), + params.Option(name='mode', value='4') + ] + ) + ) + except Exception as e: + setMsg('Failed to create the bond for ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + try: + tmpnetwork = params.HostNIC( + network=params.Network(name=iface['network']), + name=iface['name'], + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + ), + override_configuration=True, + bonding=tmpiface) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + except Exception as e: + setMsg('Failed to set' + iface['name'] + ' as network interface') + setFailed() + setMsg(str(e)) + return False + else: + tmpnetwork = params.HostNIC( + network=params.Network(name=iface['network']), + name=iface['name'], + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + )) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + else: + tmpiface = params.HostNIC( + name=iface['name'], + network=params.Network(), + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + )) + ifacelist[iface['name']] = tmpiface + except Exception as e: + setMsg('Failed to set ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + except Exception as e: + setMsg('Failed to set networks') + setMsg(str(e)) + setFailed() + return False + + if manageip == '': + setMsg('No management network is defined') + setFailed() + return False + + try: + HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) + if self.conn.hosts.add(HOST): + setChanged() + HOST = self.get_Host(host_name) + state = HOST.status.state + while (state != 'non_operational' and state != 'up'): + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to add host to RHEVM') + setFailed() + return False + + setMsg('status host: up') + time.sleep(5) + + HOST = self.get_Host(host_name) + state = HOST.status.state + setMsg('State before setting to maintenance: ' + str(state)) + HOST.deactivate() + while state != 'maintenance': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + setMsg('status host: maintenance') + + try: + HOST.nics.setupnetworks(params.Action( + force=True, + check_connectivity=False, + host_nics=params.HostNics(host_nic=networklist) + )) + setMsg('nics are set') + except Exception as e: + setMsg('Failed to apply networkconfig') + setFailed() + setMsg(str(e)) + return False + + try: + HOST.commitnetconfig() + setMsg('Network config is saved') + except Exception as e: + setMsg('Failed to save networkconfig') + setFailed() + setMsg(str(e)) + return False + except Exception as e: + if 'The Host name is already in use' in str(e): + setMsg("Host already exists") + else: + setMsg("Failed to add host") + setFailed() + setMsg(str(e)) + return False + + HOST.activate() + while state != 'up': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to apply networkconfig.') + setFailed() + return False + setMsg('status host: up') + else: + setMsg("Host exists.") + + return True + + def del_NIC(self, vmname, nicname): + return self.get_NIC(vmname, nicname).delete() + + def remove_VM(self, vmname): + VM = self.get_VM(vmname) + try: + VM.delete() + except Exception as e: + setMsg("Failed to remove VM.") + setMsg(str(e)) + setFailed() + return False + return True + + def start_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.start() + except Exception as e: + setMsg("Failed to start VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "up", timeout) + + def wait_VM(self, vmname, state, timeout): + VM = self.get_VM(vmname) + while VM.status.state != state: + VM = self.get_VM(vmname) + time.sleep(10) + if timeout is not False: + timeout -= 10 + if timeout <= 0: + setMsg("Timeout expired") + setFailed() + return False + return True + + def stop_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.stop() + except Exception as e: + setMsg("Failed to stop VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "down", timeout) + + def set_CD(self, vmname, cd_drive): + VM = self.get_VM(vmname) + try: + if str(VM.status.state) == 'down': + cdrom = params.CdRom(file=cd_drive) + VM.cdroms.add(cdrom) + setMsg("Attached the image.") + setChanged() + else: + cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000") + cdrom.set_file(cd_drive) + cdrom.update(current=True) + setMsg("Attached the image.") + setChanged() + except Exception as e: + setMsg("Failed to attach image.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_VM_Host(self, vmname, vmhost): + VM = self.get_VM(vmname) + HOST = self.get_Host(vmhost) + try: + VM.placement_policy.host = HOST + VM.update() + setMsg("Set startup host to " + vmhost) + setChanged() + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def migrate_VM(self, vmname, vmhost): + VM = self.get_VM(vmname) + + HOST = self.get_Host_byid(VM.host.id) + if str(HOST.name) != vmhost: + try: + VM.migrate( + action=params.Action( + host=params.Host( + name=vmhost, + ) + ), + ) + setChanged() + setMsg("VM migrated to " + vmhost) + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def remove_CD(self, vmname): + VM = self.get_VM(vmname) + try: + VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete() + setMsg("Removed the image.") + setChanged() + except Exception as e: + setMsg("Failed to remove the image.") + setMsg(str(e)) + setFailed() + return False + return True + + +class RHEV(object): + def __init__(self, module): + self.module = module + + def __get_conn(self): + self.conn = RHEVConn(self.module) + return self.conn + + def test(self): + self.__get_conn() + return "OK" + + def getVM(self, name): + self.__get_conn() + VM = self.conn.get_VM(name) + if VM: + vminfo = dict() + vminfo['uuid'] = VM.id + vminfo['name'] = VM.name + vminfo['status'] = VM.status.state + vminfo['cpu_cores'] = VM.cpu.topology.cores + vminfo['cpu_sockets'] = VM.cpu.topology.sockets + vminfo['cpu_shares'] = VM.cpu_shares + vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024) + vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024) + vminfo['os'] = VM.get_os().type_ + vminfo['del_prot'] = VM.delete_protected + try: + vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name) + except Exception: + vminfo['host'] = None + vminfo['boot_order'] = [] + for boot_dev in VM.os.get_boot(): + vminfo['boot_order'].append(str(boot_dev.dev)) + vminfo['disks'] = [] + for DISK in VM.disks.list(): + disk = dict() + disk['name'] = DISK.name + disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024) + disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name) + disk['interface'] = DISK.interface + vminfo['disks'].append(disk) + vminfo['ifaces'] = [] + for NIC in VM.nics.list(): + iface = dict() + iface['name'] = str(NIC.name) + iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name) + iface['interface'] = NIC.interface + iface['mac'] = NIC.mac.address + vminfo['ifaces'].append(iface) + vminfo[str(NIC.name)] = NIC.mac.address + CLUSTER = self.conn.get_cluster_byid(VM.cluster.id) + if CLUSTER: + vminfo['cluster'] = CLUSTER.name + else: + vminfo = False + return vminfo + + def createVMimage(self, name, cluster, template, disks): + self.__get_conn() + return self.conn.createVMimage(name, cluster, template, disks) + + def createVM(self, name, cluster, os, actiontype): + self.__get_conn() + return self.conn.createVM(name, cluster, os, actiontype) + + def setMemory(self, name, memory): + self.__get_conn() + return self.conn.set_Memory(name, memory) + + def setMemoryPolicy(self, name, memory_policy): + self.__get_conn() + return self.conn.set_Memory_Policy(name, memory_policy) + + def setCPU(self, name, cpu): + self.__get_conn() + return self.conn.set_CPU(name, cpu) + + def setCPUShare(self, name, cpu_share): + self.__get_conn() + return self.conn.set_CPU_share(name, cpu_share) + + def setDisks(self, name, disks): + self.__get_conn() + counter = 0 + bootselect = False + for disk in disks: + if 'bootable' in disk: + if disk['bootable'] is True: + bootselect = True + + for disk in disks: + diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_') + disksize = disk.get('size', 1) + diskdomain = disk.get('domain', None) + if diskdomain is None: + setMsg("`domain` is a required disk key.") + setFailed() + return False + diskinterface = disk.get('interface', 'virtio') + diskformat = disk.get('format', 'raw') + diskallocationtype = disk.get('thin', False) + diskboot = disk.get('bootable', False) + + if bootselect is False and counter == 0: + diskboot = True + + DISK = self.conn.get_disk(diskname) + + if DISK is None: + self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot) + else: + self.conn.set_Disk(diskname, disksize, diskinterface, diskboot) + checkFail() + counter += 1 + + return True + + def setNetworks(self, vmname, ifaces): + self.__get_conn() + VM = self.conn.get_VM(vmname) + + counter = 0 + length = len(ifaces) + + for NIC in VM.nics.list(): + if counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + elif str(name) != str(NIC.name): + setMsg("ifaces are in the wrong order, rebuilding everything.") + for NIC in VM.nics.list(): + self.conn.del_NIC(vmname, NIC.name) + self.setNetworks(vmname, ifaces) + checkFail() + return True + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + checkFail() + interface = iface.get('interface', 'virtio') + self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface) + else: + self.conn.del_NIC(vmname, NIC.name) + counter += 1 + checkFail() + + while counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + if failed is True: + return False + interface = iface.get('interface', 'virtio') + self.conn.createNIC(vmname, name, vlan, interface) + + counter += 1 + checkFail() + return True + + def setDeleteProtection(self, vmname, del_prot): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if bool(VM.delete_protected) != bool(del_prot): + self.conn.set_DeleteProtection(vmname, del_prot) + checkFail() + setMsg("`delete protection` has been updated.") + else: + setMsg("`delete protection` already has the right value.") + return True + + def setBootOrder(self, vmname, boot_order): + self.__get_conn() + VM = self.conn.get_VM(vmname) + bootorder = [] + for boot_dev in VM.os.get_boot(): + bootorder.append(str(boot_dev.dev)) + + if boot_order != bootorder: + self.conn.set_BootOrder(vmname, boot_order) + setMsg('The boot order has been set') + else: + setMsg('The boot order has already been set') + return True + + def removeVM(self, vmname): + self.__get_conn() + self.setPower(vmname, "down", 300) + return self.conn.remove_VM(vmname) + + def setPower(self, vmname, state, timeout): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if VM is None: + setMsg("VM does not exist.") + setFailed() + return False + + if state == VM.status.state: + setMsg("VM state was already " + state) + else: + if state == "up": + setMsg("VM is going to start") + self.conn.start_VM(vmname, timeout) + setChanged() + elif state == "down": + setMsg("VM is going to stop") + self.conn.stop_VM(vmname, timeout) + setChanged() + elif state == "restarted": + self.setPower(vmname, "down", timeout) + checkFail() + self.setPower(vmname, "up", timeout) + checkFail() + setMsg("the vm state is set to " + state) + return True + + def setCD(self, vmname, cd_drive): + self.__get_conn() + if cd_drive: + return self.conn.set_CD(vmname, cd_drive) + else: + return self.conn.remove_CD(vmname) + + def setVMHost(self, vmname, vmhost): + self.__get_conn() + return self.conn.set_VM_Host(vmname, vmhost) + + # pylint: disable=unreachable + VM = self.conn.get_VM(vmname) + HOST = self.conn.get_Host(vmhost) + + if VM.placement_policy.host is None: + self.conn.set_VM_Host(vmname, vmhost) + elif str(VM.placement_policy.host.id) != str(HOST.id): + self.conn.set_VM_Host(vmname, vmhost) + else: + setMsg("VM's startup host was already set to " + vmhost) + checkFail() + + if str(VM.status.state) == "up": + self.conn.migrate_VM(vmname, vmhost) + checkFail() + + return True + + def setHost(self, hostname, cluster, ifaces): + self.__get_conn() + return self.conn.set_Host(hostname, cluster, ifaces) + + +def checkFail(): + if failed: + module.fail_json(msg=msg) + else: + return True + + +def setFailed(): + global failed + failed = True + + +def setChanged(): + global changed + changed = True + + +def setMsg(message): + global failed + msg.append(message) + + +def core(module): + + r = RHEV(module) + + state = module.params.get('state', 'present') + + if state == 'ping': + r.test() + return RHEV_SUCCESS, {"ping": "pong"} + elif state == 'info': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + elif state == 'present': + created = False + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + else: + # Create VM + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + template = module.params.get('image') + if template: + disks = module.params.get('disks') + if disks is None: + setMsg("disks is a required argument.") + setFailed() + checkFail() + if r.createVMimage(name, cluster, template, disks) is False: + return RHEV_FAILED, vminfo + else: + os = module.params.get('osver') + if os is None: + setMsg("osver is a required argument.") + setFailed() + checkFail() + if r.createVM(name, cluster, os, actiontype) is False: + return RHEV_FAILED, vminfo + created = True + + # Set MEMORY and MEMORY POLICY + vminfo = r.getVM(name) + memory = module.params.get('vmmem') + if memory is not None: + memory_policy = module.params.get('mempol') + if memory_policy == 0: + memory_policy = memory + mem_pol_nok = True + if int(vminfo['mem_pol']) == memory_policy: + setMsg("Memory is correct") + mem_pol_nok = False + + mem_nok = True + if int(vminfo['memory']) == memory: + setMsg("Memory is correct") + mem_nok = False + + if memory_policy > memory: + setMsg('memory_policy cannot have a higher value than memory.') + return RHEV_FAILED, msg + + if mem_nok and mem_pol_nok: + if memory_policy > int(vminfo['memory']): + r.setMemory(vminfo['name'], memory) + r.setMemoryPolicy(vminfo['name'], memory_policy) + else: + r.setMemoryPolicy(vminfo['name'], memory_policy) + r.setMemory(vminfo['name'], memory) + elif mem_nok: + r.setMemory(vminfo['name'], memory) + elif mem_pol_nok: + r.setMemoryPolicy(vminfo['name'], memory_policy) + checkFail() + + # Set CPU + cpu = module.params.get('vmcpu') + if int(vminfo['cpu_cores']) == cpu: + setMsg("Number of CPUs is correct") + else: + if r.setCPU(vminfo['name'], cpu) is False: + return RHEV_FAILED, msg + + # Set CPU SHARE + cpu_share = module.params.get('cpu_share') + if cpu_share is not None: + if int(vminfo['cpu_shares']) == cpu_share: + setMsg("CPU share is correct.") + else: + if r.setCPUShare(vminfo['name'], cpu_share) is False: + return RHEV_FAILED, msg + + # Set DISKS + disks = module.params.get('disks') + if disks is not None: + if r.setDisks(vminfo['name'], disks) is False: + return RHEV_FAILED, msg + + # Set NETWORKS + ifaces = module.params.get('ifaces', None) + if ifaces is not None: + if r.setNetworks(vminfo['name'], ifaces) is False: + return RHEV_FAILED, msg + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Set Boot Order + boot_order = module.params.get('boot_order') + if r.setBootOrder(vminfo['name'], boot_order) is False: + return RHEV_FAILED, msg + + # Set VM Host + vmhost = module.params.get('vmhost') + if vmhost: + if r.setVMHost(vminfo['name'], vmhost) is False: + return RHEV_FAILED, msg + + vminfo = r.getVM(name) + vminfo['created'] = created + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + if actiontype == 'host': + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + ifaces = module.params.get('ifaces') + if ifaces is None: + setMsg("ifaces is a required argument.") + setFailed() + if r.setHost(name, cluster, ifaces) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + elif state == 'absent': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Remove VM + if r.removeVM(vminfo['name']) is False: + return RHEV_FAILED, msg + setMsg('VM has been removed.') + vminfo['state'] = 'DELETED' + else: + setMsg('VM was already removed.') + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'up' or state == 'down' or state == 'restarted': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + timeout = module.params.get('timeout') + if r.setPower(name, state, timeout) is False: + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'cd': + name = module.params.get('name') + cd_drive = module.params.get('cd_drive') + if r.setCD(name, cd_drive) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']), + user=dict(type='str', default='admin@internal'), + password=dict(type='str', required=True, no_log=True), + server=dict(type='str', default='127.0.0.1'), + port=dict(type='int', default=443), + insecure_api=dict(type='bool', default=False), + name=dict(type='str'), + image=dict(type='str'), + datacenter=dict(type='str', default="Default"), + type=dict(type='str', default='server', choices=['desktop', 'host', 'server']), + cluster=dict(type='str', default=''), + vmhost=dict(type='str'), + vmcpu=dict(type='int', default=2), + vmmem=dict(type='int', default=1), + disks=dict(type='list'), + osver=dict(type='str', default="rhel_6x64"), + ifaces=dict(type='list', aliases=['interfaces', 'nics']), + timeout=dict(type='int'), + mempol=dict(type='int', default=1), + vm_ha=dict(type='bool', default=True), + cpu_share=dict(type='int', default=0), + boot_order=dict(type='list', default=['hd', 'network']), + del_prot=dict(type='bool', default=True), + cd_drive=dict(type='str'), + ), + ) + + if not HAS_SDK: + module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.") + + rc = RHEV_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/serverless.py b/plugins/modules/cloud/misc/serverless.py new file mode 100644 index 0000000000..c517a2c57a --- /dev/null +++ b/plugins/modules/cloud/misc/serverless.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ryan Scott Brown +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: serverless +short_description: Manages a Serverless Framework project +description: + - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks. +options: + state: + description: + - Goal state of given stage/project. + type: str + choices: [ absent, present ] + default: present + serverless_bin_path: + description: + - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless + type: path + service_path: + description: + - The path to the root of the Serverless Service to be operated on. + type: path + required: true + stage: + description: + - The name of the serverless framework project stage to deploy to. + - This uses the serverless framework default "dev". + type: str + functions: + description: + - A list of specific functions to deploy. + - If this is not provided, all functions in the service will be deployed. + type: list + default: [] + region: + description: + - AWS region to deploy the service to. + - This parameter defaults to C(us-east-1). + type: str + deploy: + description: + - Whether or not to deploy artifacts after building them. + - When this option is C(false) all the functions will be built, but no stack update will be run to send them out. + - This is mostly useful for generating artifacts to be stored/deployed elsewhere. + type: bool + default: yes + force: + description: + - Whether or not to force full deployment, equivalent to serverless C(--force) option. + type: bool + default: no + verbose: + description: + - Shows all stack events during deployment, and display any Stack Output. + type: bool + default: no +notes: + - Currently, the C(serverless) command must be in the path of the node executing the task. + In the future this may be a flag. +requirements: +- serverless +- yaml +author: +- Ryan Scott Brown (@ryansb) +''' + +EXAMPLES = r''' +- name: Basic deploy of a service + serverless: + service_path: '{{ project_dir }}' + state: present + +- name: Deploy specific functions + serverless: + service_path: '{{ project_dir }}' + functions: + - my_func_one + - my_func_two + +- name: Deploy a project, then pull its resource list back into Ansible + serverless: + stage: dev + region: us-east-1 + service_path: '{{ project_dir }}' + register: sls + +# The cloudformation stack is always named the same as the full service, so the +# cloudformation_info module can get a full list of the stack resources, as +# well as stack events and outputs +- cloudformation_info: + region: us-east-1 + stack_name: '{{ sls.service_name }}' + stack_resources: true + +- name: Deploy a project using a locally installed serverless binary + serverless: + stage: dev + region: us-east-1 + service_path: '{{ project_dir }}' + serverless_bin_path: node_modules/.bin/serverless +''' + +RETURN = r''' +service_name: + type: str + description: The service name specified in the serverless.yml that was just deployed. + returned: always + sample: my-fancy-service-dev +state: + type: str + description: Whether the stack for the serverless project is present/absent. + returned: always +command: + type: str + description: Full `serverless` command run by this module, in case you want to re-run the command outside the module. + returned: always + sample: serverless deploy --stage production +''' + +import os + +try: + import yaml + HAS_YAML = True +except ImportError: + HAS_YAML = False + +from ansible.module_utils.basic import AnsibleModule + + +def read_serverless_config(module): + path = module.params.get('service_path') + + try: + with open(os.path.join(path, 'serverless.yml')) as sls_config: + config = yaml.safe_load(sls_config.read()) + return config + except IOError as e: + module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e))) + + module.fail_json(msg="Failed to open serverless config at {0}".format( + os.path.join(path, 'serverless.yml'))) + + +def get_service_name(module, stage): + config = read_serverless_config(module) + if config.get('service') is None: + module.fail_json(msg="Could not read `service` key from serverless.yml file") + + if stage: + return "{0}-{1}".format(config['service'], stage) + + return "{0}-{1}".format(config['service'], config.get('stage', 'dev')) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + service_path=dict(type='path', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + functions=dict(type='list'), + region=dict(type='str', default=''), + stage=dict(type='str', default=''), + deploy=dict(type='bool', default=True), + serverless_bin_path=dict(type='path'), + force=dict(type='bool', default=False), + verbose=dict(type='bool', default=False), + ), + ) + + if not HAS_YAML: + module.fail_json(msg='yaml is required for this module') + + service_path = module.params.get('service_path') + state = module.params.get('state') + functions = module.params.get('functions') + region = module.params.get('region') + stage = module.params.get('stage') + deploy = module.params.get('deploy', True) + force = module.params.get('force', False) + verbose = module.params.get('verbose', False) + serverless_bin_path = module.params.get('serverless_bin_path') + + if serverless_bin_path is not None: + command = serverless_bin_path + " " + else: + command = "serverless " + + if state == 'present': + command += 'deploy ' + elif state == 'absent': + command += 'remove ' + else: + module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state)) + + if state == 'present': + if not deploy: + command += '--noDeploy ' + elif force: + command += '--force ' + + if region: + command += '--region {0} '.format(region) + if stage: + command += '--stage {0} '.format(stage) + if verbose: + command += '--verbose ' + + rc, out, err = module.run_command(command, cwd=service_path) + if rc != 0: + if state == 'absent' and "-{0}' does not exist".format(stage) in out: + module.exit_json(changed=False, state='absent', command=command, + out=out, service_name=get_service_name(module, stage)) + + module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err)) + + # gather some facts about the deployment + module.exit_json(changed=True, state='present', out=out, command=command, + service_name=get_service_name(module, stage)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py new file mode 100644 index 0000000000..fa448a649f --- /dev/null +++ b/plugins/modules/cloud/misc/terraform.py @@ -0,0 +1,394 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Ryan Scott Brown +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: terraform +short_description: Manages a Terraform deployment (and plans) +description: + - Provides support for deploying resources with Terraform and pulling + resource information back into Ansible. +options: + state: + choices: ['planned', 'present', 'absent'] + description: + - Goal state of given stage/project + required: false + default: present + binary_path: + description: + - The path of a terraform binary to use, relative to the 'service_path' + unless you supply an absolute path. + required: false + project_path: + description: + - The path to the root of the Terraform directory with the + vars.tf/main.tf/etc to use. + required: true + workspace: + description: + - The terraform workspace to work with. + required: false + default: default + purge_workspace: + description: + - Only works with state = absent + - If true, the workspace will be deleted after the "terraform destroy" action. + - The 'default' workspace will not be deleted. + required: false + default: false + type: bool + plan_file: + description: + - The path to an existing Terraform plan file to apply. If this is not + specified, Ansible will build a new TF plan and execute it. + Note that this option is required if 'state' has the 'planned' value. + required: false + state_file: + description: + - The path to an existing Terraform state file to use when building plan. + If this is not specified, the default `terraform.tfstate` will be used. + - This option is ignored when plan is specified. + required: false + variables_file: + description: + - The path to a variables file for Terraform to fill into the TF + configurations. + required: false + variables: + description: + - A group of key-values to override template variables or those in + variables files. + required: false + targets: + description: + - A list of specific resources to target in this plan/application. The + resources selected here will also auto-include any dependencies. + required: false + lock: + description: + - Enable statefile locking, if you use a service that accepts locks (such + as S3+DynamoDB) to store your statefile. + required: false + type: bool + lock_timeout: + description: + - How long to maintain the lock on the statefile, if you use a service + that accepts locks (such as S3+DynamoDB). + required: false + force_init: + description: + - To avoid duplicating infra, if a state file can't be found this will + force a `terraform init`. Generally, this should be turned off unless + you intend to provision an entirely new Terraform deployment. + default: false + required: false + type: bool + backend_config: + description: + - A group of key-values to provide at init stage to the -backend-config parameter. + required: false +notes: + - To just run a `terraform plan`, use check mode. +requirements: [ "terraform" ] +author: "Ryan Scott Brown (@ryansb)" +''' + +EXAMPLES = """ +# Basic deploy of a service +- terraform: + project_path: '{{ project_dir }}' + state: present + +# Define the backend configuration at init +- terraform: + project_path: 'project/' + state: "{{ state }}" + force_init: true + backend_config: + region: "eu-west-1" + bucket: "some-bucket" + key: "random.tfstate" +""" + +RETURN = """ +outputs: + type: complex + description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value. + returned: on success + sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}' + contains: + sensitive: + type: bool + returned: always + description: Whether Terraform has marked this value as sensitive + type: + type: str + returned: always + description: The type of the value (string, int, etc) + value: + returned: always + description: The value of the output as interpolated by Terraform +stdout: + type: str + description: Full `terraform` command stdout, in case you want to display it or examine the event log + returned: always + sample: '' +command: + type: str + description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem. + returned: always + sample: terraform apply ... +""" + +import os +import json +import tempfile +import traceback +from ansible.module_utils.six.moves import shlex_quote + +from ansible.module_utils.basic import AnsibleModule + +DESTROY_ARGS = ('destroy', '-no-color', '-force') +APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') +module = None + + +def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None): + if project_path in [None, ''] or '/' not in project_path: + module.fail_json(msg="Path for Terraform project can not be None or ''.") + if not os.path.exists(bin_path): + module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) + if not os.path.isdir(project_path): + module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path)) + + rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, cwd=project_path, use_unsafe_shell=True) + if rc != 0: + module.fail_json(msg="Failed to validate Terraform configuration files:\r\n{0}".format(err)) + + +def _state_args(state_file): + if state_file and os.path.exists(state_file): + return ['-state', state_file] + if state_file and not os.path.exists(state_file): + module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file)) + return [] + + +def init_plugins(bin_path, project_path, backend_config): + command = [bin_path, 'init', '-input=false'] + if backend_config: + for key, val in backend_config.items(): + command.extend([ + '-backend-config', + shlex_quote('{0}={1}'.format(key, val)) + ]) + rc, out, err = module.run_command(command, cwd=project_path) + if rc != 0: + module.fail_json(msg="Failed to initialize Terraform modules:\r\n{0}".format(err)) + + +def get_workspace_context(bin_path, project_path): + workspace_ctx = {"current": "default", "all": []} + command = [bin_path, 'workspace', 'list', '-no-color'] + rc, out, err = module.run_command(command, cwd=project_path) + if rc != 0: + module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err)) + for item in out.split('\n'): + stripped_item = item.strip() + if not stripped_item: + continue + elif stripped_item.startswith('* '): + workspace_ctx["current"] = stripped_item.replace('* ', '') + else: + workspace_ctx["all"].append(stripped_item) + return workspace_ctx + + +def _workspace_cmd(bin_path, project_path, action, workspace): + command = [bin_path, 'workspace', action, workspace, '-no-color'] + rc, out, err = module.run_command(command, cwd=project_path) + if rc != 0: + module.fail_json(msg="Failed to {0} workspace:\r\n{1}".format(action, err)) + return rc, out, err + + +def create_workspace(bin_path, project_path, workspace): + _workspace_cmd(bin_path, project_path, 'new', workspace) + + +def select_workspace(bin_path, project_path, workspace): + _workspace_cmd(bin_path, project_path, 'select', workspace) + + +def remove_workspace(bin_path, project_path, workspace): + _workspace_cmd(bin_path, project_path, 'delete', workspace) + + +def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None): + if plan_path is None: + f, plan_path = tempfile.mkstemp(suffix='.tfplan') + + plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path] + + for t in (module.params.get('targets') or []): + plan_command.extend(['-target', t]) + + plan_command.extend(_state_args(state_file)) + + rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True) + + if rc == 0: + # no changes + return plan_path, False, out, err, plan_command if state == 'planned' else command + elif rc == 1: + # failure to plan + module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err)) + elif rc == 2: + # changes, but successful + return plan_path, True, out, err, plan_command if state == 'planned' else command + + module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err)) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + project_path=dict(required=True, type='path'), + binary_path=dict(type='path'), + workspace=dict(required=False, type='str', default='default'), + purge_workspace=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent', 'planned']), + variables=dict(type='dict'), + variables_file=dict(type='path'), + plan_file=dict(type='path'), + state_file=dict(type='path'), + targets=dict(type='list', default=[]), + lock=dict(type='bool', default=True), + lock_timeout=dict(type='int',), + force_init=dict(type='bool', default=False), + backend_config=dict(type='dict', default=None), + ), + required_if=[('state', 'planned', ['plan_file'])], + supports_check_mode=True, + ) + + project_path = module.params.get('project_path') + bin_path = module.params.get('binary_path') + workspace = module.params.get('workspace') + purge_workspace = module.params.get('purge_workspace') + state = module.params.get('state') + variables = module.params.get('variables') or {} + variables_file = module.params.get('variables_file') + plan_file = module.params.get('plan_file') + state_file = module.params.get('state_file') + force_init = module.params.get('force_init') + backend_config = module.params.get('backend_config') + + if bin_path is not None: + command = [bin_path] + else: + command = [module.get_bin_path('terraform', required=True)] + + if force_init: + init_plugins(command[0], project_path, backend_config) + + workspace_ctx = get_workspace_context(command[0], project_path) + if workspace_ctx["current"] != workspace: + if workspace not in workspace_ctx["all"]: + create_workspace(command[0], project_path, workspace) + else: + select_workspace(command[0], project_path, workspace) + + if state == 'present': + command.extend(APPLY_ARGS) + elif state == 'absent': + command.extend(DESTROY_ARGS) + + variables_args = [] + for k, v in variables.items(): + variables_args.extend([ + '-var', + '{0}={1}'.format(k, v) + ]) + if variables_file: + variables_args.extend(['-var-file', variables_file]) + + preflight_validation(command[0], project_path, variables_args) + + if module.params.get('lock') is not None: + if module.params.get('lock'): + command.append('-lock=true') + else: + command.append('-lock=false') + if module.params.get('lock_timeout') is not None: + command.append('-lock-timeout=%ds' % module.params.get('lock_timeout')) + + for t in (module.params.get('targets') or []): + command.extend(['-target', t]) + + # we aren't sure if this plan will result in changes, so assume yes + needs_application, changed = True, False + + out, err = '', '' + + if state == 'absent': + command.extend(variables_args) + elif state == 'present' and plan_file: + if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]): + command.append(plan_file) + else: + module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file)) + else: + plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, + module.params.get('targets'), state, plan_file) + command.append(plan_file) + + if needs_application and not module.check_mode and not state == 'planned': + rc, out, err = module.run_command(command, cwd=project_path) + # checks out to decide if changes were made during execution + if '0 added, 0 changed' not in out and not state == "absent" or '0 destroyed' not in out: + changed = True + if rc != 0: + module.fail_json( + msg="Failure when executing Terraform command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), + command=' '.join(command) + ) + + outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file) + rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path) + if rc == 1: + module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err)) + outputs = {} + elif rc != 0: + module.fail_json( + msg="Failure when getting Terraform outputs. " + "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err), + command=' '.join(outputs_command)) + else: + outputs = json.loads(outputs_text) + + # Restore the Terraform workspace found when running the module + if workspace_ctx["current"] != workspace: + select_workspace(command[0], project_path, workspace_ctx["current"]) + if state == 'absent' and workspace != 'default' and purge_workspace is True: + remove_workspace(command[0], project_path, workspace) + + module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/virt.py b/plugins/modules/cloud/misc/virt.py new file mode 100644 index 0000000000..fefefd03dc --- /dev/null +++ b/plugins/modules/cloud/misc/virt.py @@ -0,0 +1,602 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2007, 2012 Red Hat, Inc +# Michael DeHaan +# Seth Vidal +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: virt +short_description: Manages virtual machines supported by libvirt +description: + - Manages virtual machines supported by I(libvirt). +options: + name: + description: + - name of the guest VM being managed. Note that VM must be previously + defined with xml. + - This option is required unless I(command) is C(list_vms) or C(info). + state: + description: + - Note that there may be some lag for state requests like C(shutdown) + since these refer only to VM states. After starting a guest, it may not + be immediately accessible. + state and command are mutually exclusive except when command=list_vms. In + this case all VMs in specified state will be listed. + choices: [ destroyed, paused, running, shutdown ] + command: + description: + - In addition to state management, various non-idempotent commands are available. + choices: [ create, define, destroy, freemem, get_xml, info, list_vms, nodeinfo, pause, shutdown, start, status, stop, undefine, unpause, virttype ] + autostart: + description: + - start VM at host startup. + type: bool + uri: + description: + - libvirt connection uri. + default: qemu:///system + xml: + description: + - XML document used with the define command. + - Must be raw XML content using C(lookup). XML cannot be reference to a file. +requirements: + - python >= 2.6 + - libvirt-python +author: + - Ansible Core Team + - Michael DeHaan + - Seth Vidal (@skvidal) +''' + +EXAMPLES = ''' +# a playbook task line: +- virt: + name: alpha + state: running + +# /usr/bin/ansible invocations +# ansible host -m virt -a "name=alpha command=status" +# ansible host -m virt -a "name=alpha command=get_xml" +# ansible host -m virt -a "name=alpha command=create uri=lxc:///" + +# defining and launching an LXC guest +- name: define vm + virt: + command: define + xml: "{{ lookup('template', 'container-template.xml.j2') }}" + uri: 'lxc:///' +- name: start vm + virt: + name: foo + state: running + uri: 'lxc:///' + +# setting autostart on a qemu VM (default uri) +- name: set autostart for a VM + virt: + name: foo + autostart: yes + +# Defining a VM and making is autostart with host. VM will be off after this task +- name: define vm from xml and set autostart + virt: + command: define + xml: "{{ lookup('template', 'vm_template.xml.j2') }}" + autostart: yes + +# Listing VMs +- name: list all VMs + virt: + command: list_vms + register: all_vms + +- name: list only running VMs + virt: + command: list_vms + state: running + register: running_vms +''' + +RETURN = ''' +# for list_vms command +list_vms: + description: The list of vms defined on the remote system + type: list + returned: success + sample: [ + "build.example.org", + "dev.example.org" + ] +# for status command +status: + description: The status of the VM, among running, crashed, paused and shutdown + type: str + sample: "success" + returned: success +''' + +import traceback + +try: + import libvirt + from libvirt import libvirtError +except ImportError: + HAS_VIRT = False +else: + HAS_VIRT = True + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +VIRT_FAILED = 1 +VIRT_SUCCESS = 0 +VIRT_UNAVAILABLE = 2 + +ALL_COMMANDS = [] +VM_COMMANDS = ['create', 'define', 'destroy', 'get_xml', 'pause', 'shutdown', 'status', 'start', 'stop', 'undefine', 'unpause'] +HOST_COMMANDS = ['freemem', 'info', 'list_vms', 'nodeinfo', 'virttype'] +ALL_COMMANDS.extend(VM_COMMANDS) +ALL_COMMANDS.extend(HOST_COMMANDS) + +VIRT_STATE_NAME_MAP = { + 0: 'running', + 1: 'running', + 2: 'running', + 3: 'paused', + 4: 'shutdown', + 5: 'shutdown', + 6: 'crashed', +} + + +class VMNotFound(Exception): + pass + + +class LibvirtConnection(object): + + def __init__(self, uri, module): + + self.module = module + + cmd = "uname -r" + rc, stdout, stderr = self.module.run_command(cmd) + + if "xen" in stdout: + conn = libvirt.open(None) + elif "esx" in uri: + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None] + conn = libvirt.openAuth(uri, auth) + else: + conn = libvirt.open(uri) + + if not conn: + raise Exception("hypervisor connection failure") + + self.conn = conn + + def find_vm(self, vmid): + """ + Extra bonus feature: vmid = -1 returns a list of everything + """ + conn = self.conn + + vms = [] + + # this block of code borrowed from virt-manager: + # get working domain's name + ids = conn.listDomainsID() + for id in ids: + vm = conn.lookupByID(id) + vms.append(vm) + # get defined domain + names = conn.listDefinedDomains() + for name in names: + vm = conn.lookupByName(name) + vms.append(vm) + + if vmid == -1: + return vms + + for vm in vms: + if vm.name() == vmid: + return vm + + raise VMNotFound("virtual machine %s not found" % vmid) + + def shutdown(self, vmid): + return self.find_vm(vmid).shutdown() + + def pause(self, vmid): + return self.suspend(vmid) + + def unpause(self, vmid): + return self.resume(vmid) + + def suspend(self, vmid): + return self.find_vm(vmid).suspend() + + def resume(self, vmid): + return self.find_vm(vmid).resume() + + def create(self, vmid): + return self.find_vm(vmid).create() + + def destroy(self, vmid): + return self.find_vm(vmid).destroy() + + def undefine(self, vmid): + return self.find_vm(vmid).undefine() + + def get_status2(self, vm): + state = vm.info()[0] + return VIRT_STATE_NAME_MAP.get(state, "unknown") + + def get_status(self, vmid): + state = self.find_vm(vmid).info()[0] + return VIRT_STATE_NAME_MAP.get(state, "unknown") + + def nodeinfo(self): + return self.conn.getInfo() + + def get_type(self): + return self.conn.getType() + + def get_xml(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.XMLDesc(0) + + def get_maxVcpus(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.maxVcpus() + + def get_maxMemory(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.maxMemory() + + def getFreeMemory(self): + return self.conn.getFreeMemory() + + def get_autostart(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.autostart() + + def set_autostart(self, vmid, val): + vm = self.conn.lookupByName(vmid) + return vm.setAutostart(val) + + def define_from_xml(self, xml): + return self.conn.defineXML(xml) + + +class Virt(object): + + def __init__(self, uri, module): + self.module = module + self.uri = uri + + def __get_conn(self): + self.conn = LibvirtConnection(self.uri, self.module) + return self.conn + + def get_vm(self, vmid): + self.__get_conn() + return self.conn.find_vm(vmid) + + def state(self): + vms = self.list_vms() + state = [] + for vm in vms: + state_blurb = self.conn.get_status(vm) + state.append("%s %s" % (vm, state_blurb)) + return state + + def info(self): + vms = self.list_vms() + info = dict() + for vm in vms: + data = self.conn.find_vm(vm).info() + # libvirt returns maxMem, memory, and cpuTime as long()'s, which + # xmlrpclib tries to convert to regular int's during serialization. + # This throws exceptions, so convert them to strings here and + # assume the other end of the xmlrpc connection can figure things + # out or doesn't care. + info[vm] = dict( + state=VIRT_STATE_NAME_MAP.get(data[0], "unknown"), + maxMem=str(data[1]), + memory=str(data[2]), + nrVirtCpu=data[3], + cpuTime=str(data[4]), + autostart=self.conn.get_autostart(vm), + ) + + return info + + def nodeinfo(self): + self.__get_conn() + data = self.conn.nodeinfo() + info = dict( + cpumodel=str(data[0]), + phymemory=str(data[1]), + cpus=str(data[2]), + cpumhz=str(data[3]), + numanodes=str(data[4]), + sockets=str(data[5]), + cpucores=str(data[6]), + cputhreads=str(data[7]) + ) + return info + + def list_vms(self, state=None): + self.conn = self.__get_conn() + vms = self.conn.find_vm(-1) + results = [] + for x in vms: + try: + if state: + vmstate = self.conn.get_status2(x) + if vmstate == state: + results.append(x.name()) + else: + results.append(x.name()) + except Exception: + pass + return results + + def virttype(self): + return self.__get_conn().get_type() + + def autostart(self, vmid, as_flag): + self.conn = self.__get_conn() + # Change autostart flag only if needed + if self.conn.get_autostart(vmid) != as_flag: + self.conn.set_autostart(vmid, as_flag) + return True + + return False + + def freemem(self): + self.conn = self.__get_conn() + return self.conn.getFreeMemory() + + def shutdown(self, vmid): + """ Make the machine with the given vmid stop running. Whatever that takes. """ + self.__get_conn() + self.conn.shutdown(vmid) + return 0 + + def pause(self, vmid): + """ Pause the machine with the given vmid. """ + + self.__get_conn() + return self.conn.suspend(vmid) + + def unpause(self, vmid): + """ Unpause the machine with the given vmid. """ + + self.__get_conn() + return self.conn.resume(vmid) + + def create(self, vmid): + """ Start the machine via the given vmid """ + + self.__get_conn() + return self.conn.create(vmid) + + def start(self, vmid): + """ Start the machine via the given id/name """ + + self.__get_conn() + return self.conn.create(vmid) + + def destroy(self, vmid): + """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """ + self.__get_conn() + return self.conn.destroy(vmid) + + def undefine(self, vmid): + """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """ + + self.__get_conn() + return self.conn.undefine(vmid) + + def status(self, vmid): + """ + Return a state suitable for server consumption. Aka, codes.py values, not XM output. + """ + self.__get_conn() + return self.conn.get_status(vmid) + + def get_xml(self, vmid): + """ + Receive a Vm id as input + Return an xml describing vm config returned by a libvirt call + """ + + self.__get_conn() + return self.conn.get_xml(vmid) + + def get_maxVcpus(self, vmid): + """ + Gets the max number of VCPUs on a guest + """ + + self.__get_conn() + return self.conn.get_maxVcpus(vmid) + + def get_max_memory(self, vmid): + """ + Gets the max memory on a guest + """ + + self.__get_conn() + return self.conn.get_MaxMemory(vmid) + + def define(self, xml): + """ + Define a guest with the given xml + """ + self.__get_conn() + return self.conn.define_from_xml(xml) + + +def core(module): + + state = module.params.get('state', None) + autostart = module.params.get('autostart', None) + guest = module.params.get('name', None) + command = module.params.get('command', None) + uri = module.params.get('uri', None) + xml = module.params.get('xml', None) + + v = Virt(uri, module) + res = dict() + + if state and command == 'list_vms': + res = v.list_vms(state=state) + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + if autostart is not None and command != 'define': + if not guest: + module.fail_json(msg="autostart requires 1 argument: name") + try: + v.get_vm(guest) + except VMNotFound: + module.fail_json(msg="domain %s not found" % guest) + res['changed'] = v.autostart(guest, autostart) + if not command and not state: + return VIRT_SUCCESS, res + + if state: + if not guest: + module.fail_json(msg="state change requires a guest specified") + + if state == 'running': + if v.status(guest) == 'paused': + res['changed'] = True + res['msg'] = v.unpause(guest) + elif v.status(guest) != 'running': + res['changed'] = True + res['msg'] = v.start(guest) + elif state == 'shutdown': + if v.status(guest) != 'shutdown': + res['changed'] = True + res['msg'] = v.shutdown(guest) + elif state == 'destroyed': + if v.status(guest) != 'shutdown': + res['changed'] = True + res['msg'] = v.destroy(guest) + elif state == 'paused': + if v.status(guest) == 'running': + res['changed'] = True + res['msg'] = v.pause(guest) + else: + module.fail_json(msg="unexpected state") + + return VIRT_SUCCESS, res + + if command: + if command in VM_COMMANDS: + if command == 'define': + if not xml: + module.fail_json(msg="define requires xml argument") + if guest: + # there might be a mismatch between quest 'name' in the module and in the xml + module.warn("'xml' is given - ignoring 'name'") + found_name = re.search('(.*)', xml).groups() + if found_name: + domain_name = found_name[0] + else: + module.fail_json(msg="Could not find domain 'name' in xml") + + # From libvirt docs (https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainDefineXML): + # -- A previous definition for this domain would be overridden if it already exists. + # + # In real world testing with libvirt versions 1.2.17-13, 2.0.0-10 and 3.9.0-14 + # on qemu and lxc domains results in: + # operation failed: domain '' already exists with + # + # In case a domain would be indeed overwritten, we should protect idempotency: + try: + existing_domain = v.get_vm(domain_name) + except VMNotFound: + existing_domain = None + try: + domain = v.define(xml) + if existing_domain: + # if we are here, then libvirt redefined existing domain as the doc promised + if existing_domain.XMLDesc() != domain.XMLDesc(): + res = {'changed': True, 'change_reason': 'config changed'} + else: + res = {'changed': True, 'created': domain.name()} + except libvirtError as e: + if e.get_error_code() != 9: # 9 means 'domain already exists' error + module.fail_json(msg='libvirtError: %s' % e.message) + if autostart is not None and v.autostart(domain_name, autostart): + res = {'changed': True, 'change_reason': 'autostart'} + + elif not guest: + module.fail_json(msg="%s requires 1 argument: guest" % command) + else: + res = getattr(v, command)(guest) + if not isinstance(res, dict): + res = {command: res} + + return VIRT_SUCCESS, res + + elif hasattr(v, command): + res = getattr(v, command)() + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + else: + module.fail_json(msg="Command %s not recognized" % command) + + module.fail_json(msg="expected state or command parameter to be specified") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', aliases=['guest']), + state=dict(type='str', choices=['destroyed', 'pause', 'running', 'shutdown']), + autostart=dict(type='bool'), + command=dict(type='str', choices=ALL_COMMANDS), + uri=dict(type='str', default='qemu:///system'), + xml=dict(type='str'), + ), + ) + + if not HAS_VIRT: + module.fail_json(msg='The `libvirt` module is not importable. Check the requirements.') + + rc = VIRT_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/virt_net.py b/plugins/modules/cloud/misc/virt_net.py new file mode 100644 index 0000000000..86c80072e6 --- /dev/null +++ b/plugins/modules/cloud/misc/virt_net.py @@ -0,0 +1,638 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Maciej Delmanowski +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: virt_net +author: "Maciej Delmanowski (@drybjed)" +short_description: Manage libvirt network configuration +description: + - Manage I(libvirt) networks. +options: + name: + required: true + aliases: ['network'] + description: + - name of the network being managed. Note that network must be previously + defined with xml. + state: + required: false + choices: [ "active", "inactive", "present", "absent" ] + description: + - specify which state you want a network to be in. + If 'active', network will be started. + If 'present', ensure that network is present but do not change its + state; if it's missing, you need to specify xml argument. + If 'inactive', network will be stopped. + If 'undefined' or 'absent', network will be removed from I(libvirt) configuration. + command: + required: false + choices: [ "define", "create", "start", "stop", "destroy", + "undefine", "get_xml", "list_nets", "facts", + "info", "status", "modify"] + description: + - in addition to state management, various non-idempotent commands are available. + See examples. + Modify was added in version 2.1 + autostart: + required: false + type: bool + description: + - Specify if a given network should be started automatically on system boot. + uri: + required: false + default: "qemu:///system" + description: + - libvirt connection uri. + xml: + required: false + description: + - XML document used with the define command. +requirements: + - "python >= 2.6" + - "python-libvirt" + - "python-lxml" +''' + +EXAMPLES = ''' +# Define a new network +- virt_net: + command: define + name: br_nat + xml: '{{ lookup("template", "network/bridge.xml.j2") }}' + +# Start a network +- virt_net: + command: create + name: br_nat + +# List available networks +- virt_net: + command: list_nets + +# Get XML data of a specified network +- virt_net: + command: get_xml + name: br_nat + +# Stop a network +- virt_net: + command: destroy + name: br_nat + +# Undefine a network +- virt_net: + command: undefine + name: br_nat + +# Gather facts about networks +# Facts will be available as 'ansible_libvirt_networks' +- virt_net: + command: facts + +# Gather information about network managed by 'libvirt' remotely using uri +- virt_net: + command: info + uri: '{{ item }}' + with_items: '{{ libvirt_uris }}' + register: networks + +# Ensure that a network is active (needs to be defined and built first) +- virt_net: + state: active + name: br_nat + +# Ensure that a network is inactive +- virt_net: + state: inactive + name: br_nat + +# Ensure that a given network will be started at boot +- virt_net: + autostart: yes + name: br_nat + +# Disable autostart for a given network +- virt_net: + autostart: no + name: br_nat + +# Add a new host in the dhcp pool +- virt_net: + name: br_nat + command: modify + xml: "" +''' + +try: + import libvirt +except ImportError: + HAS_VIRT = False +else: + HAS_VIRT = True + +try: + from lxml import etree +except ImportError: + HAS_XML = False +else: + HAS_XML = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +VIRT_FAILED = 1 +VIRT_SUCCESS = 0 +VIRT_UNAVAILABLE = 2 + +ALL_COMMANDS = [] +ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', + 'undefine', 'destroy', 'get_xml', 'define', + 'modify'] +HOST_COMMANDS = ['list_nets', 'facts', 'info'] +ALL_COMMANDS.extend(ENTRY_COMMANDS) +ALL_COMMANDS.extend(HOST_COMMANDS) + +ENTRY_STATE_ACTIVE_MAP = { + 0: "inactive", + 1: "active" +} + +ENTRY_STATE_AUTOSTART_MAP = { + 0: "no", + 1: "yes" +} + +ENTRY_STATE_PERSISTENT_MAP = { + 0: "no", + 1: "yes" +} + + +class EntryNotFound(Exception): + pass + + +class LibvirtConnection(object): + + def __init__(self, uri, module): + + self.module = module + + conn = libvirt.open(uri) + + if not conn: + raise Exception("hypervisor connection failure") + + self.conn = conn + + def find_entry(self, entryid): + if entryid == -1: # Get active entries + names = self.conn.listNetworks() + self.conn.listDefinedNetworks() + return [self.conn.networkLookupByName(n) for n in names] + + try: + return self.conn.networkLookupByName(entryid) + except libvirt.libvirtError as e: + if e.get_error_code() == libvirt.VIR_ERR_NO_NETWORK: + raise EntryNotFound("network %s not found" % entryid) + raise + + def create(self, entryid): + if not self.module.check_mode: + return self.find_entry(entryid).create() + else: + try: + state = self.find_entry(entryid).isActive() + except Exception: + return self.module.exit_json(changed=True) + if not state: + return self.module.exit_json(changed=True) + + def modify(self, entryid, xml): + network = self.find_entry(entryid) + # identify what type of entry is given in the xml + new_data = etree.fromstring(xml) + old_data = etree.fromstring(network.XMLDesc(0)) + if new_data.tag == 'host': + mac_addr = new_data.get('mac') + hosts = old_data.xpath('/network/ip/dhcp/host') + # find the one mac we're looking for + host = None + for h in hosts: + if h.get('mac') == mac_addr: + host = h + break + if host is None: + # add the host + if not self.module.check_mode: + res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST, + libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST, + -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT) + else: + # pretend there was a change + res = 0 + if res == 0: + return True + else: + # change the host + if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'): + return False + else: + if not self.module.check_mode: + res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY, + libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST, + -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT) + else: + # pretend there was a change + res = 0 + if res == 0: + return True + # command, section, parentIndex, xml, flags=0 + self.module.fail_json(msg='updating this is not supported yet %s' % to_native(xml)) + + def destroy(self, entryid): + if not self.module.check_mode: + return self.find_entry(entryid).destroy() + else: + if self.find_entry(entryid).isActive(): + return self.module.exit_json(changed=True) + + def undefine(self, entryid): + entry = None + try: + entry = self.find_entry(entryid) + found = True + except EntryNotFound: + found = False + + if found: + return self.find_entry(entryid).undefine() + + if self.module.check_mode: + return self.module.exit_json(changed=found) + + def get_status2(self, entry): + state = entry.isActive() + return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") + + def get_status(self, entryid): + if not self.module.check_mode: + state = self.find_entry(entryid).isActive() + return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") + else: + try: + state = self.find_entry(entryid).isActive() + return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") + except Exception: + return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown") + + def get_uuid(self, entryid): + return self.find_entry(entryid).UUIDString() + + def get_xml(self, entryid): + return self.find_entry(entryid).XMLDesc(0) + + def get_forward(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + try: + result = xml.xpath('/network/forward')[0].get('mode') + except Exception: + raise ValueError('Forward mode not specified') + return result + + def get_domain(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + try: + result = xml.xpath('/network/domain')[0].get('name') + except Exception: + raise ValueError('Domain not specified') + return result + + def get_macaddress(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + try: + result = xml.xpath('/network/mac')[0].get('address') + except Exception: + raise ValueError('MAC address not specified') + return result + + def get_autostart(self, entryid): + state = self.find_entry(entryid).autostart() + return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown") + + def get_autostart2(self, entryid): + if not self.module.check_mode: + return self.find_entry(entryid).autostart() + else: + try: + return self.find_entry(entryid).autostart() + except Exception: + return self.module.exit_json(changed=True) + + def set_autostart(self, entryid, val): + if not self.module.check_mode: + return self.find_entry(entryid).setAutostart(val) + else: + try: + state = self.find_entry(entryid).autostart() + except Exception: + return self.module.exit_json(changed=True) + if bool(state) != val: + return self.module.exit_json(changed=True) + + def get_bridge(self, entryid): + return self.find_entry(entryid).bridgeName() + + def get_persistent(self, entryid): + state = self.find_entry(entryid).isPersistent() + return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown") + + def get_dhcp_leases(self, entryid): + network = self.find_entry(entryid) + return network.DHCPLeases() + + def define_from_xml(self, entryid, xml): + if not self.module.check_mode: + return self.conn.networkDefineXML(xml) + else: + try: + self.find_entry(entryid) + except Exception: + return self.module.exit_json(changed=True) + + +class VirtNetwork(object): + + def __init__(self, uri, module): + self.module = module + self.uri = uri + self.conn = LibvirtConnection(self.uri, self.module) + + def get_net(self, entryid): + return self.conn.find_entry(entryid) + + def list_nets(self, state=None): + results = [] + for entry in self.conn.find_entry(-1): + if state: + if state == self.conn.get_status2(entry): + results.append(entry.name()) + else: + results.append(entry.name()) + return results + + def state(self): + results = [] + for entry in self.list_nets(): + state_blurb = self.conn.get_status(entry) + results.append("%s %s" % (entry, state_blurb)) + return results + + def autostart(self, entryid): + return self.conn.set_autostart(entryid, True) + + def get_autostart(self, entryid): + return self.conn.get_autostart2(entryid) + + def set_autostart(self, entryid, state): + return self.conn.set_autostart(entryid, state) + + def create(self, entryid): + if self.conn.get_status(entryid) == "active": + return + try: + return self.conn.create(entryid) + except libvirt.libvirtError as e: + if e.get_error_code() == libvirt.VIR_ERR_NETWORK_EXIST: + return None + raise + + def modify(self, entryid, xml): + return self.conn.modify(entryid, xml) + + def start(self, entryid): + return self.create(entryid) + + def stop(self, entryid): + if self.conn.get_status(entryid) == "active": + return self.conn.destroy(entryid) + + def destroy(self, entryid): + return self.stop(entryid) + + def undefine(self, entryid): + return self.conn.undefine(entryid) + + def status(self, entryid): + return self.conn.get_status(entryid) + + def get_xml(self, entryid): + return self.conn.get_xml(entryid) + + def define(self, entryid, xml): + return self.conn.define_from_xml(entryid, xml) + + def info(self): + return self.facts(facts_mode='info') + + def facts(self, name=None, facts_mode='facts'): + results = dict() + if name: + entries = [name] + else: + entries = self.list_nets() + for entry in entries: + results[entry] = dict() + results[entry]["autostart"] = self.conn.get_autostart(entry) + results[entry]["persistent"] = self.conn.get_persistent(entry) + results[entry]["state"] = self.conn.get_status(entry) + results[entry]["bridge"] = self.conn.get_bridge(entry) + results[entry]["uuid"] = self.conn.get_uuid(entry) + try: + results[entry]["dhcp_leases"] = self.conn.get_dhcp_leases(entry) + # not supported on RHEL 6 + except AttributeError: + pass + + try: + results[entry]["forward_mode"] = self.conn.get_forward(entry) + except ValueError: + pass + + try: + results[entry]["domain"] = self.conn.get_domain(entry) + except ValueError: + pass + + try: + results[entry]["macaddress"] = self.conn.get_macaddress(entry) + except ValueError: + pass + + facts = dict() + if facts_mode == 'facts': + facts["ansible_facts"] = dict() + facts["ansible_facts"]["ansible_libvirt_networks"] = results + elif facts_mode == 'info': + facts['networks'] = results + return facts + + +def core(module): + + state = module.params.get('state', None) + name = module.params.get('name', None) + command = module.params.get('command', None) + uri = module.params.get('uri', None) + xml = module.params.get('xml', None) + autostart = module.params.get('autostart', None) + + v = VirtNetwork(uri, module) + res = {} + + if state and command == 'list_nets': + res = v.list_nets(state=state) + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + if state: + if not name: + module.fail_json(msg="state change requires a specified name") + + res['changed'] = False + if state in ['active']: + if v.status(name) != 'active': + res['changed'] = True + res['msg'] = v.start(name) + elif state in ['present']: + try: + v.get_net(name) + except EntryNotFound: + if not xml: + module.fail_json(msg="network '" + name + "' not present, but xml not specified") + v.define(name, xml) + res = {'changed': True, 'created': name} + elif state in ['inactive']: + entries = v.list_nets() + if name in entries: + if v.status(name) != 'inactive': + res['changed'] = True + res['msg'] = v.destroy(name) + elif state in ['undefined', 'absent']: + entries = v.list_nets() + if name in entries: + if v.status(name) != 'inactive': + v.destroy(name) + res['changed'] = True + res['msg'] = v.undefine(name) + else: + module.fail_json(msg="unexpected state") + + return VIRT_SUCCESS, res + + if command: + if command in ENTRY_COMMANDS: + if not name: + module.fail_json(msg="%s requires 1 argument: name" % command) + if command in ('define', 'modify'): + if not xml: + module.fail_json(msg=command + " requires xml argument") + try: + v.get_net(name) + except EntryNotFound: + v.define(name, xml) + res = {'changed': True, 'created': name} + else: + if command == 'modify': + mod = v.modify(name, xml) + res = {'changed': mod, 'modified': name} + return VIRT_SUCCESS, res + res = getattr(v, command)(name) + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + elif hasattr(v, command): + if command == 'facts' and name: + res = v.facts(name) + else: + res = getattr(v, command)() + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + else: + module.fail_json(msg="Command %s not recognized" % command) + + if autostart is not None: + if not name: + module.fail_json(msg="state change requires a specified name") + + res['changed'] = False + if autostart: + if not v.get_autostart(name): + res['changed'] = True + res['msg'] = v.set_autostart(name, True) + else: + if v.get_autostart(name): + res['changed'] = True + res['msg'] = v.set_autostart(name, False) + + return VIRT_SUCCESS, res + + module.fail_json(msg="expected state or command parameter to be specified") + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['network']), + state=dict(choices=['active', 'inactive', 'present', 'absent']), + command=dict(choices=ALL_COMMANDS), + uri=dict(default='qemu:///system'), + xml=dict(), + autostart=dict(type='bool') + ), + supports_check_mode=True + ) + + if not HAS_VIRT: + module.fail_json( + msg='The `libvirt` module is not importable. Check the requirements.' + ) + + if not HAS_XML: + module.fail_json( + msg='The `lxml` module is not importable. Check the requirements.' + ) + + rc = VIRT_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/virt_pool.py b/plugins/modules/cloud/misc/virt_pool.py new file mode 100644 index 0000000000..656aa1e300 --- /dev/null +++ b/plugins/modules/cloud/misc/virt_pool.py @@ -0,0 +1,711 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Maciej Delmanowski +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: virt_pool +author: "Maciej Delmanowski (@drybjed)" +short_description: Manage libvirt storage pools +description: + - Manage I(libvirt) storage pools. +options: + name: + required: false + aliases: [ "pool" ] + description: + - name of the storage pool being managed. Note that pool must be previously + defined with xml. + state: + required: false + choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ] + description: + - specify which state you want a storage pool to be in. + If 'active', pool will be started. + If 'present', ensure that pool is present but do not change its + state; if it's missing, you need to specify xml argument. + If 'inactive', pool will be stopped. + If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration. + If 'deleted', pool contents will be deleted and then pool undefined. + command: + required: false + choices: [ "define", "build", "create", "start", "stop", "destroy", + "delete", "undefine", "get_xml", "list_pools", "facts", + "info", "status" ] + description: + - in addition to state management, various non-idempotent commands are available. + See examples. + autostart: + required: false + type: bool + description: + - Specify if a given storage pool should be started automatically on system boot. + uri: + required: false + default: "qemu:///system" + description: + - I(libvirt) connection uri. + xml: + required: false + description: + - XML document used with the define command. + mode: + required: false + choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ] + description: + - Pass additional parameters to 'build' or 'delete' commands. +requirements: + - "python >= 2.6" + - "python-libvirt" + - "python-lxml" +''' + +EXAMPLES = ''' +# Define a new storage pool +- virt_pool: + command: define + name: vms + xml: '{{ lookup("template", "pool/dir.xml.j2") }}' + +# Build a storage pool if it does not exist +- virt_pool: + command: build + name: vms + +# Start a storage pool +- virt_pool: + command: create + name: vms + +# List available pools +- virt_pool: + command: list_pools + +# Get XML data of a specified pool +- virt_pool: + command: get_xml + name: vms + +# Stop a storage pool +- virt_pool: + command: destroy + name: vms + +# Delete a storage pool (destroys contents) +- virt_pool: + command: delete + name: vms + +# Undefine a storage pool +- virt_pool: + command: undefine + name: vms + +# Gather facts about storage pools +# Facts will be available as 'ansible_libvirt_pools' +- virt_pool: + command: facts + +# Gather information about pools managed by 'libvirt' remotely using uri +- virt_pool: + command: info + uri: '{{ item }}' + with_items: '{{ libvirt_uris }}' + register: storage_pools + +# Ensure that a pool is active (needs to be defined and built first) +- virt_pool: + state: active + name: vms + +# Ensure that a pool is inactive +- virt_pool: + state: inactive + name: vms + +# Ensure that a given pool will be started at boot +- virt_pool: + autostart: yes + name: vms + +# Disable autostart for a given pool +- virt_pool: + autostart: no + name: vms +''' + +try: + import libvirt +except ImportError: + HAS_VIRT = False +else: + HAS_VIRT = True + +try: + from lxml import etree +except ImportError: + HAS_XML = False +else: + HAS_XML = True + +from ansible.module_utils.basic import AnsibleModule + + +VIRT_FAILED = 1 +VIRT_SUCCESS = 0 +VIRT_UNAVAILABLE = 2 + +ALL_COMMANDS = [] +ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete', + 'undefine', 'destroy', 'get_xml', 'define', 'refresh'] +HOST_COMMANDS = ['list_pools', 'facts', 'info'] +ALL_COMMANDS.extend(ENTRY_COMMANDS) +ALL_COMMANDS.extend(HOST_COMMANDS) + +ENTRY_STATE_ACTIVE_MAP = { + 0: "inactive", + 1: "active" +} + +ENTRY_STATE_AUTOSTART_MAP = { + 0: "no", + 1: "yes" +} + +ENTRY_STATE_PERSISTENT_MAP = { + 0: "no", + 1: "yes" +} + +ENTRY_STATE_INFO_MAP = { + 0: "inactive", + 1: "building", + 2: "running", + 3: "degraded", + 4: "inaccessible" +} + +ENTRY_BUILD_FLAGS_MAP = { + "new": 0, + "repair": 1, + "resize": 2, + "no_overwrite": 4, + "overwrite": 8 +} + +ENTRY_DELETE_FLAGS_MAP = { + "normal": 0, + "zeroed": 1 +} + +ALL_MODES = [] +ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys()) +ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys()) + + +class EntryNotFound(Exception): + pass + + +class LibvirtConnection(object): + + def __init__(self, uri, module): + + self.module = module + + conn = libvirt.open(uri) + + if not conn: + raise Exception("hypervisor connection failure") + + self.conn = conn + + def find_entry(self, entryid): + # entryid = -1 returns a list of everything + + results = [] + + # Get active entries + for name in self.conn.listStoragePools(): + entry = self.conn.storagePoolLookupByName(name) + results.append(entry) + + # Get inactive entries + for name in self.conn.listDefinedStoragePools(): + entry = self.conn.storagePoolLookupByName(name) + results.append(entry) + + if entryid == -1: + return results + + for entry in results: + if entry.name() == entryid: + return entry + + raise EntryNotFound("storage pool %s not found" % entryid) + + def create(self, entryid): + if not self.module.check_mode: + return self.find_entry(entryid).create() + else: + try: + state = self.find_entry(entryid).isActive() + except Exception: + return self.module.exit_json(changed=True) + if not state: + return self.module.exit_json(changed=True) + + def destroy(self, entryid): + if not self.module.check_mode: + return self.find_entry(entryid).destroy() + else: + if self.find_entry(entryid).isActive(): + return self.module.exit_json(changed=True) + + def undefine(self, entryid): + if not self.module.check_mode: + return self.find_entry(entryid).undefine() + else: + if not self.find_entry(entryid): + return self.module.exit_json(changed=True) + + def get_status2(self, entry): + state = entry.isActive() + return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") + + def get_status(self, entryid): + if not self.module.check_mode: + state = self.find_entry(entryid).isActive() + return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") + else: + try: + state = self.find_entry(entryid).isActive() + return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") + except Exception: + return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown") + + def get_uuid(self, entryid): + return self.find_entry(entryid).UUIDString() + + def get_xml(self, entryid): + return self.find_entry(entryid).XMLDesc(0) + + def get_info(self, entryid): + return self.find_entry(entryid).info() + + def get_volume_count(self, entryid): + return self.find_entry(entryid).numOfVolumes() + + def get_volume_names(self, entryid): + return self.find_entry(entryid).listVolumes() + + def get_devices(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + if xml.xpath('/pool/source/device'): + result = [] + for device in xml.xpath('/pool/source/device'): + result.append(device.get('path')) + try: + return result + except Exception: + raise ValueError('No devices specified') + + def get_format(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + try: + result = xml.xpath('/pool/source/format')[0].get('type') + except Exception: + raise ValueError('Format not specified') + return result + + def get_host(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + try: + result = xml.xpath('/pool/source/host')[0].get('name') + except Exception: + raise ValueError('Host not specified') + return result + + def get_source_path(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + try: + result = xml.xpath('/pool/source/dir')[0].get('path') + except Exception: + raise ValueError('Source path not specified') + return result + + def get_path(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + return xml.xpath('/pool/target/path')[0].text + + def get_type(self, entryid): + xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) + return xml.get('type') + + def build(self, entryid, flags): + if not self.module.check_mode: + return self.find_entry(entryid).build(flags) + else: + try: + state = self.find_entry(entryid) + except Exception: + return self.module.exit_json(changed=True) + if not state: + return self.module.exit_json(changed=True) + + def delete(self, entryid, flags): + if not self.module.check_mode: + return self.find_entry(entryid).delete(flags) + else: + try: + state = self.find_entry(entryid) + except Exception: + return self.module.exit_json(changed=True) + if state: + return self.module.exit_json(changed=True) + + def get_autostart(self, entryid): + state = self.find_entry(entryid).autostart() + return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown") + + def get_autostart2(self, entryid): + if not self.module.check_mode: + return self.find_entry(entryid).autostart() + else: + try: + return self.find_entry(entryid).autostart() + except Exception: + return self.module.exit_json(changed=True) + + def set_autostart(self, entryid, val): + if not self.module.check_mode: + return self.find_entry(entryid).setAutostart(val) + else: + try: + state = self.find_entry(entryid).autostart() + except Exception: + return self.module.exit_json(changed=True) + if bool(state) != val: + return self.module.exit_json(changed=True) + + def refresh(self, entryid): + return self.find_entry(entryid).refresh() + + def get_persistent(self, entryid): + state = self.find_entry(entryid).isPersistent() + return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown") + + def define_from_xml(self, entryid, xml): + if not self.module.check_mode: + return self.conn.storagePoolDefineXML(xml) + else: + try: + self.find_entry(entryid) + except Exception: + return self.module.exit_json(changed=True) + + +class VirtStoragePool(object): + + def __init__(self, uri, module): + self.module = module + self.uri = uri + self.conn = LibvirtConnection(self.uri, self.module) + + def get_pool(self, entryid): + return self.conn.find_entry(entryid) + + def list_pools(self, state=None): + results = [] + for entry in self.conn.find_entry(-1): + if state: + if state == self.conn.get_status2(entry): + results.append(entry.name()) + else: + results.append(entry.name()) + return results + + def state(self): + results = [] + for entry in self.list_pools(): + state_blurb = self.conn.get_status(entry) + results.append("%s %s" % (entry, state_blurb)) + return results + + def autostart(self, entryid): + return self.conn.set_autostart(entryid, True) + + def get_autostart(self, entryid): + return self.conn.get_autostart2(entryid) + + def set_autostart(self, entryid, state): + return self.conn.set_autostart(entryid, state) + + def create(self, entryid): + return self.conn.create(entryid) + + def start(self, entryid): + return self.conn.create(entryid) + + def stop(self, entryid): + return self.conn.destroy(entryid) + + def destroy(self, entryid): + return self.conn.destroy(entryid) + + def undefine(self, entryid): + return self.conn.undefine(entryid) + + def status(self, entryid): + return self.conn.get_status(entryid) + + def get_xml(self, entryid): + return self.conn.get_xml(entryid) + + def define(self, entryid, xml): + return self.conn.define_from_xml(entryid, xml) + + def build(self, entryid, flags): + return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags, 0)) + + def delete(self, entryid, flags): + return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags, 0)) + + def refresh(self, entryid): + return self.conn.refresh(entryid) + + def info(self): + return self.facts(facts_mode='info') + + def facts(self, facts_mode='facts'): + results = dict() + for entry in self.list_pools(): + results[entry] = dict() + if self.conn.find_entry(entry): + data = self.conn.get_info(entry) + # libvirt returns maxMem, memory, and cpuTime as long()'s, which + # xmlrpclib tries to convert to regular int's during serialization. + # This throws exceptions, so convert them to strings here and + # assume the other end of the xmlrpc connection can figure things + # out or doesn't care. + results[entry] = { + "status": ENTRY_STATE_INFO_MAP.get(data[0], "unknown"), + "size_total": str(data[1]), + "size_used": str(data[2]), + "size_available": str(data[3]), + } + results[entry]["autostart"] = self.conn.get_autostart(entry) + results[entry]["persistent"] = self.conn.get_persistent(entry) + results[entry]["state"] = self.conn.get_status(entry) + results[entry]["path"] = self.conn.get_path(entry) + results[entry]["type"] = self.conn.get_type(entry) + results[entry]["uuid"] = self.conn.get_uuid(entry) + if self.conn.find_entry(entry).isActive(): + results[entry]["volume_count"] = self.conn.get_volume_count(entry) + results[entry]["volumes"] = list() + for volume in self.conn.get_volume_names(entry): + results[entry]["volumes"].append(volume) + else: + results[entry]["volume_count"] = -1 + + try: + results[entry]["host"] = self.conn.get_host(entry) + except ValueError: + pass + + try: + results[entry]["source_path"] = self.conn.get_source_path(entry) + except ValueError: + pass + + try: + results[entry]["format"] = self.conn.get_format(entry) + except ValueError: + pass + + try: + devices = self.conn.get_devices(entry) + results[entry]["devices"] = devices + except ValueError: + pass + + else: + results[entry]["state"] = self.conn.get_status(entry) + + facts = dict() + if facts_mode == 'facts': + facts["ansible_facts"] = dict() + facts["ansible_facts"]["ansible_libvirt_pools"] = results + elif facts_mode == 'info': + facts['pools'] = results + return facts + + +def core(module): + + state = module.params.get('state', None) + name = module.params.get('name', None) + command = module.params.get('command', None) + uri = module.params.get('uri', None) + xml = module.params.get('xml', None) + autostart = module.params.get('autostart', None) + mode = module.params.get('mode', None) + + v = VirtStoragePool(uri, module) + res = {} + + if state and command == 'list_pools': + res = v.list_pools(state=state) + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + if state: + if not name: + module.fail_json(msg="state change requires a specified name") + + res['changed'] = False + if state in ['active']: + if v.status(name) != 'active': + res['changed'] = True + res['msg'] = v.start(name) + elif state in ['present']: + try: + v.get_pool(name) + except EntryNotFound: + if not xml: + module.fail_json(msg="storage pool '" + name + "' not present, but xml not specified") + v.define(name, xml) + res = {'changed': True, 'created': name} + elif state in ['inactive']: + entries = v.list_pools() + if name in entries: + if v.status(name) != 'inactive': + res['changed'] = True + res['msg'] = v.destroy(name) + elif state in ['undefined', 'absent']: + entries = v.list_pools() + if name in entries: + if v.status(name) != 'inactive': + v.destroy(name) + res['changed'] = True + res['msg'] = v.undefine(name) + elif state in ['deleted']: + entries = v.list_pools() + if name in entries: + if v.status(name) != 'inactive': + v.destroy(name) + v.delete(name, mode) + res['changed'] = True + res['msg'] = v.undefine(name) + else: + module.fail_json(msg="unexpected state") + + return VIRT_SUCCESS, res + + if command: + if command in ENTRY_COMMANDS: + if not name: + module.fail_json(msg="%s requires 1 argument: name" % command) + if command == 'define': + if not xml: + module.fail_json(msg="define requires xml argument") + try: + v.get_pool(name) + except EntryNotFound: + v.define(name, xml) + res = {'changed': True, 'created': name} + return VIRT_SUCCESS, res + elif command == 'build': + res = v.build(name, mode) + if not isinstance(res, dict): + res = {'changed': True, command: res} + return VIRT_SUCCESS, res + elif command == 'delete': + res = v.delete(name, mode) + if not isinstance(res, dict): + res = {'changed': True, command: res} + return VIRT_SUCCESS, res + res = getattr(v, command)(name) + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + elif hasattr(v, command): + res = getattr(v, command)() + if not isinstance(res, dict): + res = {command: res} + return VIRT_SUCCESS, res + + else: + module.fail_json(msg="Command %s not recognized" % command) + + if autostart is not None: + if not name: + module.fail_json(msg="state change requires a specified name") + + res['changed'] = False + if autostart: + if not v.get_autostart(name): + res['changed'] = True + res['msg'] = v.set_autostart(name, True) + else: + if v.get_autostart(name): + res['changed'] = True + res['msg'] = v.set_autostart(name, False) + + return VIRT_SUCCESS, res + + module.fail_json(msg="expected state or command parameter to be specified") + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pool']), + state=dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']), + command=dict(choices=ALL_COMMANDS), + uri=dict(default='qemu:///system'), + xml=dict(), + autostart=dict(type='bool'), + mode=dict(choices=ALL_MODES), + ), + supports_check_mode=True + ) + + if not HAS_VIRT: + module.fail_json( + msg='The `libvirt` module is not importable. Check the requirements.' + ) + + if not HAS_XML: + module.fail_json( + msg='The `lxml` module is not importable. Check the requirements.' + ) + + rc = VIRT_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/misc/xenserver_facts.py b/plugins/modules/cloud/misc/xenserver_facts.py new file mode 100644 index 0000000000..e85bcaa0a7 --- /dev/null +++ b/plugins/modules/cloud/misc/xenserver_facts.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: xenserver_facts +short_description: get facts reported on xenserver +description: + - Reads data out of XenAPI, can be used instead of multiple xe commands. +author: + - Andy Hill (@andyhky) + - Tim Rupp (@caphrim007) + - Robin Lee (@cheese) +options: {} +''' + +EXAMPLES = ''' +- name: Gather facts from xenserver + xenserver_facts: + +- name: Print running VMs + debug: + msg: "{{ item }}" + with_items: "{{ xs_vms.keys() }}" + when: xs_vms[item]['power_state'] == "Running" + +# Which will print: +# +# TASK: [Print running VMs] *********************************************************** +# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit)) +# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => { +# "item": "Control domain on host: 10.0.13.22", +# "msg": "Control domain on host: 10.0.13.22" +# } +''' + + +HAVE_XENAPI = False +try: + import XenAPI + HAVE_XENAPI = True +except ImportError: + pass + +from ansible.module_utils import distro +from ansible.module_utils.basic import AnsibleModule + + +class XenServerFacts: + def __init__(self): + self.codes = { + '5.5.0': 'george', + '5.6.100': 'oxford', + '6.0.0': 'boston', + '6.1.0': 'tampa', + '6.2.0': 'clearwater' + } + + @property + def version(self): + result = distro.linux_distribution()[1] + return result + + @property + def codename(self): + if self.version in self.codes: + result = self.codes[self.version] + else: + result = None + + return result + + +def get_xenapi_session(): + session = XenAPI.xapi_local() + session.xenapi.login_with_password('', '') + return session + + +def get_networks(session): + recs = session.xenapi.network.get_all_records() + networks = change_keys(recs, key='name_label') + return networks + + +def get_pifs(session): + recs = session.xenapi.PIF.get_all_records() + pifs = change_keys(recs, key='uuid') + xs_pifs = {} + devicenums = range(0, 7) + for pif in pifs.values(): + for eth in devicenums: + interface_name = "eth%s" % (eth) + bond_name = interface_name.replace('eth', 'bond') + if pif['device'] == interface_name: + xs_pifs[interface_name] = pif + elif pif['device'] == bond_name: + xs_pifs[bond_name] = pif + return xs_pifs + + +def get_vlans(session): + recs = session.xenapi.VLAN.get_all_records() + return change_keys(recs, key='tag') + + +def change_keys(recs, key='uuid', filter_func=None): + """ + Take a xapi dict, and make the keys the value of recs[ref][key]. + + Preserves the ref in rec['ref'] + + """ + new_recs = {} + + for ref, rec in recs.items(): + if filter_func is not None and not filter_func(rec): + continue + + for param_name, param_value in rec.items(): + # param_value may be of type xmlrpc.client.DateTime, + # which is not simply convertable to str. + # Use 'value' attr to get the str value, + # following an example in xmlrpc.client.DateTime document + if hasattr(param_value, "value"): + rec[param_name] = param_value.value + new_recs[rec[key]] = rec + new_recs[rec[key]]['ref'] = ref + + return new_recs + + +def get_host(session): + """Get the host""" + host_recs = session.xenapi.host.get_all() + # We only have one host, so just return its entry + return session.xenapi.host.get_record(host_recs[0]) + + +def get_vms(session): + recs = session.xenapi.VM.get_all_records() + if not recs: + return None + vms = change_keys(recs, key='name_label') + return vms + + +def get_srs(session): + recs = session.xenapi.SR.get_all_records() + if not recs: + return None + srs = change_keys(recs, key='name_label') + return srs + + +def main(): + module = AnsibleModule({}) + + if not HAVE_XENAPI: + module.fail_json(changed=False, msg="python xen api required for this module") + + obj = XenServerFacts() + try: + session = get_xenapi_session() + except XenAPI.Failure as e: + module.fail_json(msg='%s' % e) + + data = { + 'xenserver_version': obj.version, + 'xenserver_codename': obj.codename + } + + xs_networks = get_networks(session) + xs_pifs = get_pifs(session) + xs_vlans = get_vlans(session) + xs_vms = get_vms(session) + xs_srs = get_srs(session) + + if xs_vlans: + data['xs_vlans'] = xs_vlans + if xs_pifs: + data['xs_pifs'] = xs_pifs + if xs_networks: + data['xs_networks'] = xs_networks + + if xs_vms: + data['xs_vms'] = xs_vms + + if xs_srs: + data['xs_srs'] = xs_srs + + module.exit_json(ansible_facts=data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py new file mode 100644 index 0000000000..bbe069f2f5 --- /dev/null +++ b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py @@ -0,0 +1,575 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: oneandone_firewall_policy +short_description: Configure 1&1 firewall policy. +description: + - Create, remove, reconfigure, update firewall policies. + This module has a dependency on 1and1 >= 1.0 +options: + state: + description: + - Define a firewall policy state to create, remove, or update. + required: false + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + required: true + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + required: false + name: + description: + - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. + maxLength=128 + required: true + firewall_policy: + description: + - The identifier (id or name) of the firewall policy used with update state. + required: true + rules: + description: + - A list of rules that will be set for the firewall policy. + Each rule must contain protocol parameter, in addition to three optional parameters + (port_from, port_to, and source) + add_server_ips: + description: + - A list of server identifiers (id or name) to be assigned to a firewall policy. + Used in combination with update state. + required: false + remove_server_ips: + description: + - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state. + required: false + add_rules: + description: + - A list of rules that will be added to an existing firewall policy. + It is syntax is the same as the one used for rules parameter. Used in combination with update state. + required: false + remove_rules: + description: + - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state. + required: false + description: + description: + - Firewall policy description. maxLength=256 + required: false + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +''' + +EXAMPLES = ''' + +# Provisioning example. Create and destroy a firewall policy. + +- oneandone_firewall_policy: + auth_token: oneandone_private_api_key + name: ansible-firewall-policy + description: Testing creation of firewall policies with ansible + rules: + - + protocol: TCP + port_from: 80 + port_to: 80 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + +- oneandone_firewall_policy: + auth_token: oneandone_private_api_key + state: absent + name: ansible-firewall-policy + +# Update a firewall policy. + +- oneandone_firewall_policy: + auth_token: oneandone_private_api_key + state: update + firewall_policy: ansible-firewall-policy + name: ansible-firewall-policy-updated + description: Testing creation of firewall policies with ansible - updated + +# Add server to a firewall policy. + +- oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + add_server_ips: + - server_identifier (id or name) + - server_identifier #2 (id or name) + wait: true + wait_timeout: 500 + state: update + +# Remove server from a firewall policy. + +- oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + remove_server_ips: + - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) + wait: true + wait_timeout: 500 + state: update + +# Add rules to a firewall policy. + +- oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + description: Adding rules to an existing firewall policy + add_rules: + - + protocol: TCP + port_from: 70 + port_to: 70 + source: 0.0.0.0 + - + protocol: TCP + port_from: 60 + port_to: 60 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + state: update + +# Remove rules from a firewall policy. + +- oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + remove_rules: + - rule_id #1 + - rule_id #2 + - ... + wait: true + wait_timeout: 500 + state: update + +''' + +RETURN = ''' +firewall_policy: + description: Information about the firewall policy that was processed + type: dict + sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_firewall_policy, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): + """ + Assigns servers to a firewall policy. + """ + try: + attach_servers = [] + + for _server_id in server_ids: + server = get_server(oneandone_conn, _server_id, True) + attach_server = oneandone.client.AttachServer( + server_id=server['id'], + server_ip_id=next(iter(server['ips'] or []), None)['id'] + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + firewall_policy = oneandone_conn.attach_server_firewall_policy( + firewall_id=firewall_id, + server_ips=attach_servers) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id): + """ + Unassigns a server/IP from a firewall policy. + """ + try: + if module.check_mode: + firewall_server = oneandone_conn.get_firewall_server( + firewall_id=firewall_id, + server_ip_id=server_ip_id) + if firewall_server: + return True + return False + + firewall_policy = oneandone_conn.remove_firewall_server( + firewall_id=firewall_id, + server_ip_id=server_ip_id) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): + """ + Adds new rules to a firewall policy. + """ + try: + firewall_rules = [] + + for rule in rules: + firewall_rule = oneandone.client.FirewallPolicyRule( + protocol=rule['protocol'], + port_from=rule['port_from'], + port_to=rule['port_to'], + source=rule['source']) + firewall_rules.append(firewall_rule) + + if module.check_mode: + firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) + if (firewall_rules and firewall_policy_id): + return True + return False + + firewall_policy = oneandone_conn.add_firewall_policy_rule( + firewall_id=firewall_id, + firewall_policy_rules=firewall_rules + ) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id): + """ + Removes a rule from a firewall policy. + """ + try: + if module.check_mode: + rule = oneandone_conn.get_firewall_policy_rule( + firewall_id=firewall_id, + rule_id=rule_id) + if rule: + return True + return False + + firewall_policy = oneandone_conn.remove_firewall_rule( + firewall_id=firewall_id, + rule_id=rule_id + ) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_firewall_policy(module, oneandone_conn): + """ + Updates a firewall policy based on input arguments. + Firewall rules and server ips can be added/removed to/from + firewall policy. Firewall policy name and description can be + updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + firewall_policy_id = module.params.get('firewall_policy') + name = module.params.get('name') + description = module.params.get('description') + add_server_ips = module.params.get('add_server_ips') + remove_server_ips = module.params.get('remove_server_ips') + add_rules = module.params.get('add_rules') + remove_rules = module.params.get('remove_rules') + + changed = False + + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True) + if firewall_policy is None: + _check_mode(module, False) + + if name or description: + _check_mode(module, True) + firewall_policy = oneandone_conn.modify_firewall( + firewall_id=firewall_policy['id'], + name=name, + description=description) + changed = True + + if add_server_ips: + if module.check_mode: + _check_mode(module, _add_server_ips(module, + oneandone_conn, + firewall_policy['id'], + add_server_ips)) + + firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips) + changed = True + + if remove_server_ips: + chk_changed = False + for server_ip_id in remove_server_ips: + if module.check_mode: + chk_changed |= _remove_firewall_server(module, + oneandone_conn, + firewall_policy['id'], + server_ip_id) + + _remove_firewall_server(module, + oneandone_conn, + firewall_policy['id'], + server_ip_id) + _check_mode(module, chk_changed) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + changed = True + + if add_rules: + firewall_policy = _add_firewall_rules(module, + oneandone_conn, + firewall_policy['id'], + add_rules) + _check_mode(module, firewall_policy) + changed = True + + if remove_rules: + chk_changed = False + for rule_id in remove_rules: + if module.check_mode: + chk_changed |= _remove_firewall_rule(module, + oneandone_conn, + firewall_policy['id'], + rule_id) + + _remove_firewall_rule(module, + oneandone_conn, + firewall_policy['id'], + rule_id) + _check_mode(module, chk_changed) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + changed = True + + return (changed, firewall_policy) + except Exception as e: + module.fail_json(msg=str(e)) + + +def create_firewall_policy(module, oneandone_conn): + """ + Create a new firewall policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + rules = module.params.get('rules') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + firewall_rules = [] + + for rule in rules: + firewall_rule = oneandone.client.FirewallPolicyRule( + protocol=rule['protocol'], + port_from=rule['port_from'], + port_to=rule['port_to'], + source=rule['source']) + firewall_rules.append(firewall_rule) + + firewall_policy_obj = oneandone.client.FirewallPolicy( + name=name, + description=description + ) + + _check_mode(module, True) + firewall_policy = oneandone_conn.create_firewall_policy( + firewall_policy=firewall_policy_obj, + firewall_policy_rules=firewall_rules + ) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.firewall_policy, + firewall_policy['id'], + wait_timeout, + wait_interval) + + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh + changed = True if firewall_policy else False + + _check_mode(module, False) + + return (changed, firewall_policy) + except Exception as e: + module.fail_json(msg=str(e)) + + +def remove_firewall_policy(module, oneandone_conn): + """ + Removes a firewall policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + fp_id = module.params.get('name') + firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id) + if module.check_mode: + if firewall_policy_id is None: + _check_mode(module, False) + _check_mode(module, True) + firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id) + + changed = True if firewall_policy else False + + return (changed, { + 'id': firewall_policy['id'], + 'name': firewall_policy['name'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + name=dict(type='str'), + firewall_policy=dict(type='str'), + description=dict(type='str'), + rules=dict(type='list', default=[]), + add_server_ips=dict(type='list', default=[]), + remove_server_ips=dict(type='list', default=[]), + add_rules=dict(type='list', default=[]), + remove_rules=dict(type='list', default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='The "auth_token" parameter or ' + + 'ONEANDONE_AUTH_TOKEN environment variable is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required to delete a firewall policy.") + try: + (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'update': + if not module.params.get('firewall_policy'): + module.fail_json( + msg="'firewall_policy' parameter is required to update a firewall policy.") + try: + (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'present': + for param in ('name', 'rules'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new firewall policies." % param) + try: + (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, firewall_policy=firewall_policy) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py new file mode 100644 index 0000000000..12bf99c8f6 --- /dev/null +++ b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py @@ -0,0 +1,676 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneandone_load_balancer +short_description: Configure 1&1 load balancer. +description: + - Create, remove, update load balancers. + This module has a dependency on 1and1 >= 1.0 +options: + state: + description: + - Define a load balancer state to create, remove, or update. + required: false + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + required: true + load_balancer: + description: + - The identifier (id or name) of the load balancer used with update state. + required: true + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + required: false + name: + description: + - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. + maxLength=128 + required: true + health_check_test: + description: + - Type of the health check. At the moment, HTTP is not allowed. + choices: [ "NONE", "TCP", "HTTP", "ICMP" ] + required: true + health_check_interval: + description: + - Health check period in seconds. minimum=5, maximum=300, multipleOf=1 + required: true + health_check_path: + description: + - Url to call for checking. Required for HTTP health check. maxLength=1000 + required: false + health_check_parse: + description: + - Regular expression to check. Required for HTTP health check. maxLength=64 + required: false + persistence: + description: + - Persistence. + required: true + type: bool + persistence_time: + description: + - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1 + required: true + method: + description: + - Balancing procedure. + choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ] + required: true + datacenter: + description: + - ID or country code of the datacenter where the load balancer will be created. + default: US + choices: [ "US", "ES", "DE", "GB" ] + required: false + rules: + description: + - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, + port_balancer, and port_server parameters, in addition to source parameter, which is optional. + required: true + description: + description: + - Description of the load balancer. maxLength=256 + required: false + add_server_ips: + description: + - A list of server identifiers (id or name) to be assigned to a load balancer. + Used in combination with update state. + required: false + remove_server_ips: + description: + - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. + required: false + add_rules: + description: + - A list of rules that will be added to an existing load balancer. + It is syntax is the same as the one used for rules parameter. Used in combination with update state. + required: false + remove_rules: + description: + - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. + required: false + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +''' + +EXAMPLES = ''' + +# Provisioning example. Create and destroy a load balancer. + +- oneandone_load_balancer: + auth_token: oneandone_private_api_key + name: ansible load balancer + description: Testing creation of load balancer with ansible + health_check_test: TCP + health_check_interval: 40 + persistence: true + persistence_time: 1200 + method: ROUND_ROBIN + datacenter: US + rules: + - + protocol: TCP + port_balancer: 80 + port_server: 80 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + +- oneandone_load_balancer: + auth_token: oneandone_private_api_key + name: ansible load balancer + wait: true + wait_timeout: 500 + state: absent + +# Update a load balancer. + +- oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer + name: ansible load balancer updated + description: Testing the update of a load balancer with ansible + wait: true + wait_timeout: 500 + state: update + +# Add server to a load balancer. + +- oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding server to a load balancer with ansible + add_server_ips: + - server identifier (id or name) + wait: true + wait_timeout: 500 + state: update + +# Remove server from a load balancer. + +- oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Removing server from a load balancer with ansible + remove_server_ips: + - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) + wait: true + wait_timeout: 500 + state: update + +# Add rules to a load balancer. + +- oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding rules to a load balancer with ansible + add_rules: + - + protocol: TCP + port_balancer: 70 + port_server: 70 + source: 0.0.0.0 + - + protocol: TCP + port_balancer: 60 + port_server: 60 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + state: update + +# Remove rules from a load balancer. + +- oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding rules to a load balancer with ansible + remove_rules: + - rule_id #1 + - rule_id #2 + - ... + wait: true + wait_timeout: 500 + state: update +''' + +RETURN = ''' +load_balancer: + description: Information about the load balancer that was processed + type: dict + sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_load_balancer, + get_server, + get_datacenter, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] +HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] +METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): + """ + Assigns servers to a load balancer. + """ + try: + attach_servers = [] + + for server_id in server_ids: + server = get_server(oneandone_conn, server_id, True) + attach_server = oneandone.client.AttachServer( + server_id=server['id'], + server_ip_id=next(iter(server['ips'] or []), None)['id'] + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + load_balancer = oneandone_conn.attach_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ips=attach_servers) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): + """ + Unassigns a server/IP from a load balancer. + """ + try: + if module.check_mode: + lb_server = oneandone_conn.get_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ip_id=server_ip_id) + if lb_server: + return True + return False + + load_balancer = oneandone_conn.remove_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ip_id=server_ip_id) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): + """ + Adds new rules to a load_balancer. + """ + try: + load_balancer_rules = [] + + for rule in rules: + load_balancer_rule = oneandone.client.LoadBalancerRule( + protocol=rule['protocol'], + port_balancer=rule['port_balancer'], + port_server=rule['port_server'], + source=rule['source']) + load_balancer_rules.append(load_balancer_rule) + + if module.check_mode: + lb_id = get_load_balancer(oneandone_conn, load_balancer_id) + if (load_balancer_rules and lb_id): + return True + return False + + load_balancer = oneandone_conn.add_load_balancer_rule( + load_balancer_id=load_balancer_id, + load_balancer_rules=load_balancer_rules + ) + + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): + """ + Removes a rule from a load_balancer. + """ + try: + if module.check_mode: + rule = oneandone_conn.get_load_balancer_rule( + load_balancer_id=load_balancer_id, + rule_id=rule_id) + if rule: + return True + return False + + load_balancer = oneandone_conn.remove_load_balancer_rule( + load_balancer_id=load_balancer_id, + rule_id=rule_id + ) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def update_load_balancer(module, oneandone_conn): + """ + Updates a load_balancer based on input arguments. + Load balancer rules and server ips can be added/removed to/from + load balancer. Load balancer name, description, health_check_test, + health_check_interval, persistence, persistence_time, and method + can be updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + load_balancer_id = module.params.get('load_balancer') + name = module.params.get('name') + description = module.params.get('description') + health_check_test = module.params.get('health_check_test') + health_check_interval = module.params.get('health_check_interval') + health_check_path = module.params.get('health_check_path') + health_check_parse = module.params.get('health_check_parse') + persistence = module.params.get('persistence') + persistence_time = module.params.get('persistence_time') + method = module.params.get('method') + add_server_ips = module.params.get('add_server_ips') + remove_server_ips = module.params.get('remove_server_ips') + add_rules = module.params.get('add_rules') + remove_rules = module.params.get('remove_rules') + + changed = False + + load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) + if load_balancer is None: + _check_mode(module, False) + + if (name or description or health_check_test or health_check_interval or health_check_path or + health_check_parse or persistence or persistence_time or method): + _check_mode(module, True) + load_balancer = oneandone_conn.modify_load_balancer( + load_balancer_id=load_balancer['id'], + name=name, + description=description, + health_check_test=health_check_test, + health_check_interval=health_check_interval, + health_check_path=health_check_path, + health_check_parse=health_check_parse, + persistence=persistence, + persistence_time=persistence_time, + method=method) + changed = True + + if add_server_ips: + if module.check_mode: + _check_mode(module, _add_server_ips(module, + oneandone_conn, + load_balancer['id'], + add_server_ips)) + + load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) + changed = True + + if remove_server_ips: + chk_changed = False + for server_ip_id in remove_server_ips: + if module.check_mode: + chk_changed |= _remove_load_balancer_server(module, + oneandone_conn, + load_balancer['id'], + server_ip_id) + + _remove_load_balancer_server(module, + oneandone_conn, + load_balancer['id'], + server_ip_id) + _check_mode(module, chk_changed) + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + changed = True + + if add_rules: + load_balancer = _add_load_balancer_rules(module, + oneandone_conn, + load_balancer['id'], + add_rules) + _check_mode(module, load_balancer) + changed = True + + if remove_rules: + chk_changed = False + for rule_id in remove_rules: + if module.check_mode: + chk_changed |= _remove_load_balancer_rule(module, + oneandone_conn, + load_balancer['id'], + rule_id) + + _remove_load_balancer_rule(module, + oneandone_conn, + load_balancer['id'], + rule_id) + _check_mode(module, chk_changed) + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + changed = True + + try: + return (changed, load_balancer) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_load_balancer(module, oneandone_conn): + """ + Create a new load_balancer. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + health_check_test = module.params.get('health_check_test') + health_check_interval = module.params.get('health_check_interval') + health_check_path = module.params.get('health_check_path') + health_check_parse = module.params.get('health_check_parse') + persistence = module.params.get('persistence') + persistence_time = module.params.get('persistence_time') + method = module.params.get('method') + datacenter = module.params.get('datacenter') + rules = module.params.get('rules') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + load_balancer_rules = [] + + datacenter_id = None + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + for rule in rules: + load_balancer_rule = oneandone.client.LoadBalancerRule( + protocol=rule['protocol'], + port_balancer=rule['port_balancer'], + port_server=rule['port_server'], + source=rule['source']) + load_balancer_rules.append(load_balancer_rule) + + _check_mode(module, True) + load_balancer_obj = oneandone.client.LoadBalancer( + health_check_path=health_check_path, + health_check_parse=health_check_parse, + name=name, + description=description, + health_check_test=health_check_test, + health_check_interval=health_check_interval, + persistence=persistence, + persistence_time=persistence_time, + method=method, + datacenter_id=datacenter_id + ) + + load_balancer = oneandone_conn.create_load_balancer( + load_balancer=load_balancer_obj, + load_balancer_rules=load_balancer_rules + ) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.load_balancer, + load_balancer['id'], + wait_timeout, + wait_interval) + + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh + changed = True if load_balancer else False + + _check_mode(module, False) + + return (changed, load_balancer) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_load_balancer(module, oneandone_conn): + """ + Removes a load_balancer. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + lb_id = module.params.get('name') + load_balancer_id = get_load_balancer(oneandone_conn, lb_id) + if module.check_mode: + if load_balancer_id is None: + _check_mode(module, False) + _check_mode(module, True) + load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) + + changed = True if load_balancer else False + + return (changed, { + 'id': load_balancer['id'], + 'name': load_balancer['name'] + }) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + load_balancer=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + health_check_test=dict( + choices=HEALTH_CHECK_TESTS), + health_check_interval=dict(type='str'), + health_check_path=dict(type='str'), + health_check_parse=dict(type='str'), + persistence=dict(type='bool'), + persistence_time=dict(type='str'), + method=dict( + choices=METHODS), + datacenter=dict( + choices=DATACENTERS), + rules=dict(type='list', default=[]), + add_server_ips=dict(type='list', default=[]), + remove_server_ips=dict(type='list', default=[]), + add_rules=dict(type='list', default=[]), + remove_rules=dict(type='list', default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for deleting a load balancer.") + try: + (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + elif state == 'update': + if not module.params.get('load_balancer'): + module.fail_json( + msg="'load_balancer' parameter is required for updating a load balancer.") + try: + (changed, load_balancer) = update_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', + 'persistence_time', 'method', 'rules'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new load balancers." % param) + try: + (changed, load_balancer) = create_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, load_balancer=load_balancer) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py new file mode 100644 index 0000000000..6512bd261a --- /dev/null +++ b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py @@ -0,0 +1,1027 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneandone_monitoring_policy +short_description: Configure 1&1 monitoring policy. +description: + - Create, remove, update monitoring policies + (and add/remove ports, processes, and servers). + This module has a dependency on 1and1 >= 1.0 +options: + state: + description: + - Define a monitoring policy's state to create, remove, update. + required: false + default: present + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + required: true + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + required: false + name: + description: + - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128 + required: true + monitoring_policy: + description: + - The identifier (id or name) of the monitoring policy used with update state. + required: true + agent: + description: + - Set true for using agent. + required: true + email: + description: + - User's email. maxLength=128 + required: true + description: + description: + - Monitoring policy description. maxLength=256 + required: false + thresholds: + description: + - Monitoring policy thresholds. Each of the suboptions have warning and critical, + which both have alert and value suboptions. Warning is used to set limits for + warning alerts, critical is used to set critical alerts. alert enables alert, + and value is used to advise when the value is exceeded. + required: true + suboptions: + cpu: + description: + - Consumption limits of CPU. + required: true + ram: + description: + - Consumption limits of RAM. + required: true + disk: + description: + - Consumption limits of hard disk. + required: true + internal_ping: + description: + - Response limits of internal ping. + required: true + transfer: + description: + - Consumption limits for transfer. + required: true + ports: + description: + - Array of ports that will be monitoring. + required: true + suboptions: + protocol: + description: + - Internet protocol. + choices: [ "TCP", "UDP" ] + required: true + port: + description: + - Port number. minimum=1, maximum=65535 + required: true + alert_if: + description: + - Case of alert. + choices: [ "RESPONDING", "NOT_RESPONDING" ] + required: true + email_notification: + description: + - Set true for sending e-mail notifications. + required: true + processes: + description: + - Array of processes that will be monitoring. + required: true + suboptions: + process: + description: + - Name of the process. maxLength=50 + required: true + alert_if: + description: + - Case of alert. + choices: [ "RUNNING", "NOT_RUNNING" ] + required: true + add_ports: + description: + - Ports to add to the monitoring policy. + required: false + add_processes: + description: + - Processes to add to the monitoring policy. + required: false + add_servers: + description: + - Servers to add to the monitoring policy. + required: false + remove_ports: + description: + - Ports to remove from the monitoring policy. + required: false + remove_processes: + description: + - Processes to remove from the monitoring policy. + required: false + remove_servers: + description: + - Servers to remove from the monitoring policy. + required: false + update_ports: + description: + - Ports to be updated on the monitoring policy. + required: false + update_processes: + description: + - Processes to be updated on the monitoring policy. + required: false + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +''' + +EXAMPLES = ''' + +# Provisioning example. Create and destroy a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + name: ansible monitoring policy + description: Testing creation of a monitoring policy with ansible + email: your@emailaddress.com + agent: true + thresholds: + - + cpu: + warning: + value: 80 + alert: false + critical: + value: 92 + alert: false + - + ram: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - + disk: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - + internal_ping: + warning: + value: 50 + alert: false + critical: + value: 100 + alert: false + - + transfer: + warning: + value: 1000 + alert: false + critical: + value: 2000 + alert: false + ports: + - + protocol: TCP + port: 22 + alert_if: RESPONDING + email_notification: false + processes: + - + process: test + alert_if: NOT_RUNNING + email_notification: false + wait: true + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + state: absent + name: ansible monitoring policy + +# Update a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy + name: ansible monitoring policy updated + description: Testing creation of a monitoring policy with ansible updated + email: another@emailaddress.com + thresholds: + - + cpu: + warning: + value: 70 + alert: false + critical: + value: 90 + alert: false + - + ram: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - + disk: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - + internal_ping: + warning: + value: 60 + alert: false + critical: + value: 90 + alert: false + - + transfer: + warning: + value: 900 + alert: false + critical: + value: 1900 + alert: false + wait: true + state: update + +# Add a port to a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_ports: + - + protocol: TCP + port: 33 + alert_if: RESPONDING + email_notification: false + wait: true + state: update + +# Update existing ports of a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + update_ports: + - + id: existing_port_id + protocol: TCP + port: 34 + alert_if: RESPONDING + email_notification: false + - + id: existing_port_id + protocol: TCP + port: 23 + alert_if: RESPONDING + email_notification: false + wait: true + state: update + +# Remove a port from a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_ports: + - port_id + state: update + +# Add a process to a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_processes: + - + process: test_2 + alert_if: NOT_RUNNING + email_notification: false + wait: true + state: update + +# Update existing processes of a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + update_processes: + - + id: process_id + process: test_1 + alert_if: NOT_RUNNING + email_notification: false + - + id: process_id + process: test_3 + alert_if: NOT_RUNNING + email_notification: false + wait: true + state: update + +# Remove a process from a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_processes: + - process_id + wait: true + state: update + +# Add server to a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_servers: + - server id or name + wait: true + state: update + +# Remove server from a monitoring policy. + +- oneandone_moitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_servers: + - server01 + wait: true + state: update +''' + +RETURN = ''' +monitoring_policy: + description: Information about the monitoring policy that was processed + type: dict + sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_monitoring_policy, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): + """ + Adds new ports to a monitoring policy. + """ + try: + monitoring_policy_ports = [] + + for _port in ports: + monitoring_policy_port = oneandone.client.Port( + protocol=_port['protocol'], + port=_port['port'], + alert_if=_port['alert_if'], + email_notification=_port['email_notification'] + ) + monitoring_policy_ports.append(monitoring_policy_port) + + if module.check_mode: + if monitoring_policy_ports: + return True + return False + + monitoring_policy = oneandone_conn.add_port( + monitoring_policy_id=monitoring_policy_id, + ports=monitoring_policy_ports) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id): + """ + Removes a port from a monitoring policy. + """ + try: + if module.check_mode: + monitoring_policy = oneandone_conn.delete_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + if monitoring_policy: + return True + return False + + monitoring_policy = oneandone_conn.delete_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port): + """ + Modifies a monitoring policy port. + """ + try: + if module.check_mode: + cm_port = oneandone_conn.get_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + if cm_port: + return True + return False + + monitoring_policy_port = oneandone.client.Port( + protocol=port['protocol'], + port=port['port'], + alert_if=port['alert_if'], + email_notification=port['email_notification'] + ) + + monitoring_policy = oneandone_conn.modify_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id, + port=monitoring_policy_port) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): + """ + Adds new processes to a monitoring policy. + """ + try: + monitoring_policy_processes = [] + + for _process in processes: + monitoring_policy_process = oneandone.client.Process( + process=_process['process'], + alert_if=_process['alert_if'], + email_notification=_process['email_notification'] + ) + monitoring_policy_processes.append(monitoring_policy_process) + + if module.check_mode: + mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) + if (monitoring_policy_processes and mp_id): + return True + return False + + monitoring_policy = oneandone_conn.add_process( + monitoring_policy_id=monitoring_policy_id, + processes=monitoring_policy_processes) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id): + """ + Removes a process from a monitoring policy. + """ + try: + if module.check_mode: + process = oneandone_conn.get_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id + ) + if process: + return True + return False + + monitoring_policy = oneandone_conn.delete_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process): + """ + Modifies a monitoring policy process. + """ + try: + if module.check_mode: + cm_process = oneandone_conn.get_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id) + if cm_process: + return True + return False + + monitoring_policy_process = oneandone.client.Process( + process=process['process'], + alert_if=process['alert_if'], + email_notification=process['email_notification'] + ) + + monitoring_policy = oneandone_conn.modify_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id, + process=monitoring_policy_process) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): + """ + Attaches servers to a monitoring policy. + """ + try: + attach_servers = [] + + for _server_id in servers: + server_id = get_server(oneandone_conn, _server_id) + attach_server = oneandone.client.AttachServer( + server_id=server_id + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + monitoring_policy = oneandone_conn.attach_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + servers=attach_servers) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id): + """ + Detaches a server from a monitoring policy. + """ + try: + if module.check_mode: + mp_server = oneandone_conn.get_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + server_id=server_id) + if mp_server: + return True + return False + + monitoring_policy = oneandone_conn.detach_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + server_id=server_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def update_monitoring_policy(module, oneandone_conn): + """ + Updates a monitoring_policy based on input arguments. + Monitoring policy ports, processes and servers can be added/removed to/from + a monitoring policy. Monitoring policy name, description, email, + thresholds for cpu, ram, disk, transfer and internal_ping + can be updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + monitoring_policy_id = module.params.get('monitoring_policy') + name = module.params.get('name') + description = module.params.get('description') + email = module.params.get('email') + thresholds = module.params.get('thresholds') + add_ports = module.params.get('add_ports') + update_ports = module.params.get('update_ports') + remove_ports = module.params.get('remove_ports') + add_processes = module.params.get('add_processes') + update_processes = module.params.get('update_processes') + remove_processes = module.params.get('remove_processes') + add_servers = module.params.get('add_servers') + remove_servers = module.params.get('remove_servers') + + changed = False + + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True) + if monitoring_policy is None: + _check_mode(module, False) + + _monitoring_policy = oneandone.client.MonitoringPolicy( + name=name, + description=description, + email=email + ) + + _thresholds = None + + if thresholds: + threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + + _thresholds = [] + for treshold in thresholds: + key = treshold.keys()[0] + if key in threshold_entities: + _threshold = oneandone.client.Threshold( + entity=key, + warning_value=treshold[key]['warning']['value'], + warning_alert=str(treshold[key]['warning']['alert']).lower(), + critical_value=treshold[key]['critical']['value'], + critical_alert=str(treshold[key]['critical']['alert']).lower()) + _thresholds.append(_threshold) + + if name or description or email or thresholds: + _check_mode(module, True) + monitoring_policy = oneandone_conn.modify_monitoring_policy( + monitoring_policy_id=monitoring_policy['id'], + monitoring_policy=_monitoring_policy, + thresholds=_thresholds) + changed = True + + if add_ports: + if module.check_mode: + _check_mode(module, _add_ports(module, + oneandone_conn, + monitoring_policy['id'], + add_ports)) + + monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports) + changed = True + + if update_ports: + chk_changed = False + for update_port in update_ports: + if module.check_mode: + chk_changed |= _modify_port(module, + oneandone_conn, + monitoring_policy['id'], + update_port['id'], + update_port) + + _modify_port(module, + oneandone_conn, + monitoring_policy['id'], + update_port['id'], + update_port) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if remove_ports: + chk_changed = False + for port_id in remove_ports: + if module.check_mode: + chk_changed |= _delete_monitoring_policy_port(module, + oneandone_conn, + monitoring_policy['id'], + port_id) + + _delete_monitoring_policy_port(module, + oneandone_conn, + monitoring_policy['id'], + port_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if add_processes: + monitoring_policy = _add_processes(module, + oneandone_conn, + monitoring_policy['id'], + add_processes) + _check_mode(module, monitoring_policy) + changed = True + + if update_processes: + chk_changed = False + for update_process in update_processes: + if module.check_mode: + chk_changed |= _modify_process(module, + oneandone_conn, + monitoring_policy['id'], + update_process['id'], + update_process) + + _modify_process(module, + oneandone_conn, + monitoring_policy['id'], + update_process['id'], + update_process) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if remove_processes: + chk_changed = False + for process_id in remove_processes: + if module.check_mode: + chk_changed |= _delete_monitoring_policy_process(module, + oneandone_conn, + monitoring_policy['id'], + process_id) + + _delete_monitoring_policy_process(module, + oneandone_conn, + monitoring_policy['id'], + process_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if add_servers: + monitoring_policy = _attach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + add_servers) + _check_mode(module, monitoring_policy) + changed = True + + if remove_servers: + chk_changed = False + for _server_id in remove_servers: + server_id = get_server(oneandone_conn, _server_id) + + if module.check_mode: + chk_changed |= _detach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + server_id) + + _detach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + server_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + return (changed, monitoring_policy) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_monitoring_policy(module, oneandone_conn): + """ + Creates a new monitoring policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + email = module.params.get('email') + agent = module.params.get('agent') + thresholds = module.params.get('thresholds') + ports = module.params.get('ports') + processes = module.params.get('processes') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + _monitoring_policy = oneandone.client.MonitoringPolicy(name, + description, + email, + agent, ) + + _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower() + + threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + + _thresholds = [] + for treshold in thresholds: + key = treshold.keys()[0] + if key in threshold_entities: + _threshold = oneandone.client.Threshold( + entity=key, + warning_value=treshold[key]['warning']['value'], + warning_alert=str(treshold[key]['warning']['alert']).lower(), + critical_value=treshold[key]['critical']['value'], + critical_alert=str(treshold[key]['critical']['alert']).lower()) + _thresholds.append(_threshold) + + _ports = [] + for port in ports: + _port = oneandone.client.Port( + protocol=port['protocol'], + port=port['port'], + alert_if=port['alert_if'], + email_notification=str(port['email_notification']).lower()) + _ports.append(_port) + + _processes = [] + for process in processes: + _process = oneandone.client.Process( + process=process['process'], + alert_if=process['alert_if'], + email_notification=str(process['email_notification']).lower()) + _processes.append(_process) + + _check_mode(module, True) + monitoring_policy = oneandone_conn.create_monitoring_policy( + monitoring_policy=_monitoring_policy, + thresholds=_thresholds, + ports=_ports, + processes=_processes + ) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.monitoring_policy, + monitoring_policy['id'], + wait_timeout, + wait_interval) + + changed = True if monitoring_policy else False + + _check_mode(module, False) + + return (changed, monitoring_policy) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_monitoring_policy(module, oneandone_conn): + """ + Removes a monitoring policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + mp_id = module.params.get('name') + monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id) + if module.check_mode: + if monitoring_policy_id is None: + _check_mode(module, False) + _check_mode(module, True) + monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id) + + changed = True if monitoring_policy else False + + return (changed, { + 'id': monitoring_policy['id'], + 'name': monitoring_policy['name'] + }) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + name=dict(type='str'), + monitoring_policy=dict(type='str'), + agent=dict(type='str'), + email=dict(type='str'), + description=dict(type='str'), + thresholds=dict(type='list', default=[]), + ports=dict(type='list', default=[]), + processes=dict(type='list', default=[]), + add_ports=dict(type='list', default=[]), + update_ports=dict(type='list', default=[]), + remove_ports=dict(type='list', default=[]), + add_processes=dict(type='list', default=[]), + update_processes=dict(type='list', default=[]), + remove_processes=dict(type='list', default=[]), + add_servers=dict(type='list', default=[]), + remove_servers=dict(type='list', default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required to delete a monitoring policy.") + try: + (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + elif state == 'update': + if not module.params.get('monitoring_policy'): + module.fail_json( + msg="'monitoring_policy' parameter is required to update a monitoring policy.") + try: + (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for a new monitoring policy." % param) + try: + (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, monitoring_policy=monitoring_policy) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oneandone/oneandone_private_network.py b/plugins/modules/cloud/oneandone/oneandone_private_network.py new file mode 100644 index 0000000000..71c37e4d70 --- /dev/null +++ b/plugins/modules/cloud/oneandone/oneandone_private_network.py @@ -0,0 +1,452 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneandone_private_network +short_description: Configure 1&1 private networking. +description: + - Create, remove, reconfigure, update a private network. + This module has a dependency on 1and1 >= 1.0 +options: + state: + description: + - Define a network's state to create, remove, or update. + required: false + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + required: true + private_network: + description: + - The identifier (id or name) of the network used with update state. + required: true + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + required: false + name: + description: + - Private network name used with present state. Used as identifier (id or name) when used with absent state. + required: true + description: + description: + - Set a description for the network. + datacenter: + description: + - The identifier of the datacenter where the private network will be created + network_address: + description: + - Set a private network space, i.e. 192.168.1.0 + subnet_mask: + description: + - Set the netmask for the private network, i.e. 255.255.255.0 + add_members: + description: + - List of server identifiers (name or id) to be added to the private network. + remove_members: + description: + - List of server identifiers (name or id) to be removed from the private network. + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +''' + +EXAMPLES = ''' + +# Provisioning example. Create and destroy private networks. + +- oneandone_private_network: + auth_token: oneandone_private_api_key + name: backup_network + description: Testing creation of a private network with ansible + network_address: 70.35.193.100 + subnet_mask: 255.0.0.0 + datacenter: US + +- oneandone_private_network: + auth_token: oneandone_private_api_key + state: absent + name: backup_network + +# Modify the private network. + +- oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + network_address: 192.168.2.0 + subnet_mask: 255.255.255.0 + +# Add members to the private network. + +- oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + add_members: + - server identifier (id or name) + +# Remove members from the private network. + +- oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + remove_members: + - server identifier (id or name) + +''' + +RETURN = ''' +private_network: + description: Information about the private network. + type: dict + sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_private_network, + get_server, + get_datacenter, + OneAndOneResources, + wait_for_resource_creation_completion, + wait_for_resource_deletion_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_servers(module, oneandone_conn, name, members): + try: + private_network_id = get_private_network(oneandone_conn, name) + + if module.check_mode: + if private_network_id and members: + return True + return False + + network = oneandone_conn.attach_private_network_servers( + private_network_id=private_network_id, + server_ids=members) + + return network + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_member(module, oneandone_conn, name, member_id): + try: + private_network_id = get_private_network(oneandone_conn, name) + + if module.check_mode: + if private_network_id: + network_member = oneandone_conn.get_private_network_server( + private_network_id=private_network_id, + server_id=member_id) + if network_member: + return True + return False + + network = oneandone_conn.remove_private_network_server( + private_network_id=name, + server_id=member_id) + + return network + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_network(module, oneandone_conn): + """ + Create new private network + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any network was added. + """ + name = module.params.get('name') + description = module.params.get('description') + network_address = module.params.get('network_address') + subnet_mask = module.params.get('subnet_mask') + datacenter = module.params.get('datacenter') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + try: + _check_mode(module, True) + network = oneandone_conn.create_private_network( + private_network=oneandone.client.PrivateNetwork( + name=name, + description=description, + network_address=network_address, + subnet_mask=subnet_mask, + datacenter_id=datacenter_id + )) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.private_network, + network['id'], + wait_timeout, + wait_interval) + network = get_private_network(oneandone_conn, + network['id'], + True) + + changed = True if network else False + + _check_mode(module, False) + + return (changed, network) + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_network(module, oneandone_conn): + """ + Modifies a private network. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + _private_network_id = module.params.get('private_network') + _name = module.params.get('name') + _description = module.params.get('description') + _network_address = module.params.get('network_address') + _subnet_mask = module.params.get('subnet_mask') + _add_members = module.params.get('add_members') + _remove_members = module.params.get('remove_members') + + changed = False + + private_network = get_private_network(oneandone_conn, + _private_network_id, + True) + if private_network is None: + _check_mode(module, False) + + if _name or _description or _network_address or _subnet_mask: + _check_mode(module, True) + private_network = oneandone_conn.modify_private_network( + private_network_id=private_network['id'], + name=_name, + description=_description, + network_address=_network_address, + subnet_mask=_subnet_mask) + changed = True + + if _add_members: + instances = [] + + for member in _add_members: + instance_id = get_server(oneandone_conn, member) + instance_obj = oneandone.client.AttachServer(server_id=instance_id) + + instances.extend([instance_obj]) + private_network = _add_servers(module, oneandone_conn, private_network['id'], instances) + _check_mode(module, private_network) + changed = True + + if _remove_members: + chk_changed = False + for member in _remove_members: + instance = get_server(oneandone_conn, member, True) + + if module.check_mode: + chk_changed |= _remove_member(module, + oneandone_conn, + private_network['id'], + instance['id']) + _check_mode(module, instance and chk_changed) + + _remove_member(module, + oneandone_conn, + private_network['id'], + instance['id']) + private_network = get_private_network(oneandone_conn, + private_network['id'], + True) + changed = True + + return (changed, private_network) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_network(module, oneandone_conn): + """ + Removes a private network. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + """ + try: + pn_id = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + private_network_id = get_private_network(oneandone_conn, pn_id) + if module.check_mode: + if private_network_id is None: + _check_mode(module, False) + _check_mode(module, True) + private_network = oneandone_conn.delete_private_network(private_network_id) + wait_for_resource_deletion_completion(oneandone_conn, + OneAndOneResources.private_network, + private_network['id'], + wait_timeout, + wait_interval) + + changed = True if private_network else False + + return (changed, { + 'id': private_network['id'], + 'name': private_network['name'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + private_network=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + network_address=dict(type='str'), + subnet_mask=dict(type='str'), + add_members=dict(type='list', default=[]), + remove_members=dict(type='list', default=[]), + datacenter=dict( + choices=DATACENTERS), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for deleting a network.") + try: + (changed, private_network) = remove_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'update': + if not module.params.get('private_network'): + module.fail_json( + msg="'private_network' parameter is required for updating a network.") + try: + (changed, private_network) = update_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'present': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for new networks.") + try: + (changed, private_network) = create_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, private_network=private_network) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/plugins/modules/cloud/oneandone/oneandone_public_ip.py new file mode 100644 index 0000000000..a4b086aca5 --- /dev/null +++ b/plugins/modules/cloud/oneandone/oneandone_public_ip.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneandone_public_ip +short_description: Configure 1&1 public IPs. +description: + - Create, update, and remove public IPs. + This module has a dependency on 1and1 >= 1.0 +options: + state: + description: + - Define a public ip state to create, remove, or update. + required: false + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + required: true + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + required: false + reverse_dns: + description: + - Reverse DNS name. maxLength=256 + required: false + datacenter: + description: + - ID of the datacenter where the IP will be created (only for unassigned IPs). + required: false + type: + description: + - Type of IP. Currently, only IPV4 is available. + choices: ["IPV4", "IPV6"] + default: 'IPV4' + required: false + public_ip_id: + description: + - The ID of the public IP used with update and delete states. + required: true + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +''' + +EXAMPLES = ''' + +# Create a public IP. + +- oneandone_public_ip: + auth_token: oneandone_private_api_key + reverse_dns: example.com + datacenter: US + type: IPV4 + +# Update a public IP. + +- oneandone_public_ip: + auth_token: oneandone_private_api_key + public_ip_id: public ip id + reverse_dns: secondexample.com + state: update + + +# Delete a public IP + +- oneandone_public_ip: + auth_token: oneandone_private_api_key + public_ip_id: public ip id + state: absent + +''' + +RETURN = ''' +public_ip: + description: Information about the public ip that was processed + type: dict + sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_datacenter, + get_public_ip, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + +TYPES = ['IPV4', 'IPV6'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def create_public_ip(module, oneandone_conn): + """ + Create new public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was added. + """ + reverse_dns = module.params.get('reverse_dns') + datacenter = module.params.get('datacenter') + ip_type = module.params.get('type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + _check_mode(module, False) + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + try: + _check_mode(module, True) + public_ip = oneandone_conn.create_public_ip( + reverse_dns=reverse_dns, + ip_type=ip_type, + datacenter_id=datacenter_id) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.public_ip, + public_ip['id'], + wait_timeout, + wait_interval) + public_ip = oneandone_conn.get_public_ip(public_ip['id']) + + changed = True if public_ip else False + + return (changed, public_ip) + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_public_ip(module, oneandone_conn): + """ + Update a public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was changed. + """ + reverse_dns = module.params.get('reverse_dns') + public_ip_id = module.params.get('public_ip_id') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + public_ip = get_public_ip(oneandone_conn, public_ip_id, True) + if public_ip is None: + _check_mode(module, False) + module.fail_json( + msg='public IP %s not found.' % public_ip_id) + + try: + _check_mode(module, True) + public_ip = oneandone_conn.modify_public_ip( + ip_id=public_ip['id'], + reverse_dns=reverse_dns) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.public_ip, + public_ip['id'], + wait_timeout, + wait_interval) + public_ip = oneandone_conn.get_public_ip(public_ip['id']) + + changed = True if public_ip else False + + return (changed, public_ip) + except Exception as e: + module.fail_json(msg=str(e)) + + +def delete_public_ip(module, oneandone_conn): + """ + Delete a public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was deleted. + """ + public_ip_id = module.params.get('public_ip_id') + + public_ip = get_public_ip(oneandone_conn, public_ip_id, True) + if public_ip is None: + _check_mode(module, False) + module.fail_json( + msg='public IP %s not found.' % public_ip_id) + + try: + _check_mode(module, True) + deleted_public_ip = oneandone_conn.delete_public_ip( + ip_id=public_ip['id']) + + changed = True if deleted_public_ip else False + + return (changed, { + 'id': public_ip['id'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + public_ip_id=dict(type='str'), + reverse_dns=dict(type='str'), + datacenter=dict( + choices=DATACENTERS, + default='US'), + type=dict( + choices=TYPES, + default='IPV4'), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('public_ip_id'): + module.fail_json( + msg="'public_ip_id' parameter is required to delete a public ip.") + try: + (changed, public_ip) = delete_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'update': + if not module.params.get('public_ip_id'): + module.fail_json( + msg="'public_ip_id' parameter is required to update a public ip.") + try: + (changed, public_ip) = update_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'present': + try: + (changed, public_ip) = create_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, public_ip=public_ip) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oneandone/oneandone_server.py b/plugins/modules/cloud/oneandone/oneandone_server.py new file mode 100644 index 0000000000..d4cb11ec73 --- /dev/null +++ b/plugins/modules/cloud/oneandone/oneandone_server.py @@ -0,0 +1,696 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: oneandone_server +short_description: Create, destroy, start, stop, and reboot a 1&1 Host server. +description: + - Create, destroy, update, start, stop, and reboot a 1&1 Host server. + When the server is created it can optionally wait for it to be 'running' before returning. +options: + state: + description: + - Define a server's state to create, remove, start or stop it. + default: present + choices: [ "present", "absent", "running", "stopped" ] + auth_token: + description: + - Authenticating API token provided by 1&1. Overrides the + ONEANDONE_AUTH_TOKEN environment variable. + required: true + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + datacenter: + description: + - The datacenter location. + default: US + choices: [ "US", "ES", "DE", "GB" ] + hostname: + description: + - The hostname or ID of the server. Only used when state is 'present'. + description: + description: + - The description of the server. + appliance: + description: + - The operating system name or ID for the server. + It is required only for 'present' state. + fixed_instance_size: + description: + - The instance size name or ID of the server. + It is required only for 'present' state, and it is mutually exclusive with + vcore, cores_per_processor, ram, and hdds parameters. + required: true + choices: [ "S", "M", "L", "XL", "XXL", "3XL", "4XL", "5XL" ] + vcore: + description: + - The total number of processors. + It must be provided with cores_per_processor, ram, and hdds parameters. + cores_per_processor: + description: + - The number of cores per processor. + It must be provided with vcore, ram, and hdds parameters. + ram: + description: + - The amount of RAM memory. + It must be provided with with vcore, cores_per_processor, and hdds parameters. + hdds: + description: + - A list of hard disks with nested "size" and "is_main" properties. + It must be provided with vcore, cores_per_processor, and ram parameters. + private_network: + description: + - The private network name or ID. + firewall_policy: + description: + - The firewall policy name or ID. + load_balancer: + description: + - The load balancer name or ID. + monitoring_policy: + description: + - The monitoring policy name or ID. + server: + description: + - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'. + count: + description: + - The number of servers to create. + default: 1 + ssh_key: + description: + - User's public SSH key (contents, not path). + server_type: + description: + - The type of server to be built. + default: "cloud" + choices: [ "cloud", "baremetal", "k8s_node" ] + wait: + description: + - Wait for the server to be in state 'running' before returning. + Also used for delete operation (set to 'false' if you don't want to wait + for each individual server to be deleted before moving on with + other tasks.) + type: bool + default: 'yes' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the wait_for methods + default: 5 + auto_increment: + description: + - When creating multiple servers at once, whether to differentiate + hostnames by appending a count after them or substituting the count + where there is a %02d or %03d in the hostname string. + type: bool + default: 'yes' + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" + +''' + +EXAMPLES = ''' + +# Provisioning example. Creates three servers and enumerate their names. + +- oneandone_server: + auth_token: oneandone_private_api_key + hostname: node%02d + fixed_instance_size: XL + datacenter: US + appliance: C5A349786169F140BCBC335675014C08 + auto_increment: true + count: 3 + +# Create three servers, passing in an ssh_key. + +- oneandone_server: + auth_token: oneandone_private_api_key + hostname: node%02d + vcore: 2 + cores_per_processor: 4 + ram: 8.0 + hdds: + - size: 50 + is_main: false + datacenter: ES + appliance: C5A349786169F140BCBC335675014C08 + count: 3 + wait: yes + wait_timeout: 600 + wait_interval: 10 + ssh_key: SSH_PUBLIC_KEY + +# Removing server + +- oneandone_server: + auth_token: oneandone_private_api_key + state: absent + server: 'node01' + +# Starting server. + +- oneandone_server: + auth_token: oneandone_private_api_key + state: running + server: 'node01' + +# Stopping server + +- oneandone_server: + auth_token: oneandone_private_api_key + state: stopped + server: 'node01' +''' + +RETURN = ''' +servers: + description: Information about each server that was processed + type: list + sample: '[{"hostname": "my-server", "id": "server-id"}]' + returned: always +''' + +import os +import time +from ansible.module_utils.six.moves import xrange +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_datacenter, + get_fixed_instance_size, + get_appliance, + get_private_network, + get_monitoring_policy, + get_firewall_policy, + get_load_balancer, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion, + wait_for_resource_deletion_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + +ONEANDONE_SERVER_STATES = ( + 'DEPLOYING', + 'POWERED_OFF', + 'POWERED_ON', + 'POWERING_ON', + 'POWERING_OFF', +) + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _create_server(module, oneandone_conn, hostname, description, + fixed_instance_size_id, vcore, cores_per_processor, ram, + hdds, datacenter_id, appliance_id, ssh_key, + private_network_id, firewall_policy_id, load_balancer_id, + monitoring_policy_id, server_type, wait, wait_timeout, + wait_interval): + + try: + existing_server = get_server(oneandone_conn, hostname) + + if existing_server: + if module.check_mode: + return False + return None + + if module.check_mode: + return True + + server = oneandone_conn.create_server( + oneandone.client.Server( + name=hostname, + description=description, + fixed_instance_size_id=fixed_instance_size_id, + vcore=vcore, + cores_per_processor=cores_per_processor, + ram=ram, + appliance_id=appliance_id, + datacenter_id=datacenter_id, + rsa_key=ssh_key, + private_network_id=private_network_id, + firewall_policy_id=firewall_policy_id, + load_balancer_id=load_balancer_id, + monitoring_policy_id=monitoring_policy_id, + server_type=server_type,), hdds) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.server, + server['id'], + wait_timeout, + wait_interval) + server = oneandone_conn.get_server(server['id']) # refresh + + return server + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _insert_network_data(server): + for addr_data in server['ips']: + if addr_data['type'] == 'IPV6': + server['public_ipv6'] = addr_data['ip'] + elif addr_data['type'] == 'IPV4': + server['public_ipv4'] = addr_data['ip'] + return server + + +def create_server(module, oneandone_conn): + """ + Create new server + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any server was added, and a 'servers' attribute with the list of the + created servers' hostname, id and ip addresses. + """ + hostname = module.params.get('hostname') + description = module.params.get('description') + auto_increment = module.params.get('auto_increment') + count = module.params.get('count') + fixed_instance_size = module.params.get('fixed_instance_size') + vcore = module.params.get('vcore') + cores_per_processor = module.params.get('cores_per_processor') + ram = module.params.get('ram') + hdds = module.params.get('hdds') + datacenter = module.params.get('datacenter') + appliance = module.params.get('appliance') + ssh_key = module.params.get('ssh_key') + private_network = module.params.get('private_network') + monitoring_policy = module.params.get('monitoring_policy') + firewall_policy = module.params.get('firewall_policy') + load_balancer = module.params.get('load_balancer') + server_type = module.params.get('server_type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + _check_mode(module, False) + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + fixed_instance_size_id = None + if fixed_instance_size: + fixed_instance_size_id = get_fixed_instance_size( + oneandone_conn, + fixed_instance_size) + if fixed_instance_size_id is None: + _check_mode(module, False) + module.fail_json( + msg='fixed_instance_size %s not found.' % fixed_instance_size) + + appliance_id = get_appliance(oneandone_conn, appliance) + if appliance_id is None: + _check_mode(module, False) + module.fail_json( + msg='appliance %s not found.' % appliance) + + private_network_id = None + if private_network: + private_network_id = get_private_network( + oneandone_conn, + private_network) + if private_network_id is None: + _check_mode(module, False) + module.fail_json( + msg='private network %s not found.' % private_network) + + monitoring_policy_id = None + if monitoring_policy: + monitoring_policy_id = get_monitoring_policy( + oneandone_conn, + monitoring_policy) + if monitoring_policy_id is None: + _check_mode(module, False) + module.fail_json( + msg='monitoring policy %s not found.' % monitoring_policy) + + firewall_policy_id = None + if firewall_policy: + firewall_policy_id = get_firewall_policy( + oneandone_conn, + firewall_policy) + if firewall_policy_id is None: + _check_mode(module, False) + module.fail_json( + msg='firewall policy %s not found.' % firewall_policy) + + load_balancer_id = None + if load_balancer: + load_balancer_id = get_load_balancer( + oneandone_conn, + load_balancer) + if load_balancer_id is None: + _check_mode(module, False) + module.fail_json( + msg='load balancer %s not found.' % load_balancer) + + if auto_increment: + hostnames = _auto_increment_hostname(count, hostname) + descriptions = _auto_increment_description(count, description) + else: + hostnames = [hostname] * count + descriptions = [description] * count + + hdd_objs = [] + if hdds: + for hdd in hdds: + hdd_objs.append(oneandone.client.Hdd( + size=hdd['size'], + is_main=hdd['is_main'] + )) + + servers = [] + for index, name in enumerate(hostnames): + server = _create_server( + module=module, + oneandone_conn=oneandone_conn, + hostname=name, + description=descriptions[index], + fixed_instance_size_id=fixed_instance_size_id, + vcore=vcore, + cores_per_processor=cores_per_processor, + ram=ram, + hdds=hdd_objs, + datacenter_id=datacenter_id, + appliance_id=appliance_id, + ssh_key=ssh_key, + private_network_id=private_network_id, + monitoring_policy_id=monitoring_policy_id, + firewall_policy_id=firewall_policy_id, + load_balancer_id=load_balancer_id, + server_type=server_type, + wait=wait, + wait_timeout=wait_timeout, + wait_interval=wait_interval) + if server: + servers.append(server) + + changed = False + + if servers: + for server in servers: + if server: + _check_mode(module, True) + _check_mode(module, False) + servers = [_insert_network_data(_server) for _server in servers] + changed = True + + _check_mode(module, False) + + return (changed, servers) + + +def remove_server(module, oneandone_conn): + """ + Removes a server. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + + Returns a dictionary containing a 'changed' attribute indicating whether + the server was removed, and a 'removed_server' attribute with + the removed server's hostname and id. + """ + server_id = module.params.get('server') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + changed = False + removed_server = None + + server = get_server(oneandone_conn, server_id, True) + if server: + _check_mode(module, True) + try: + oneandone_conn.delete_server(server_id=server['id']) + if wait: + wait_for_resource_deletion_completion(oneandone_conn, + OneAndOneResources.server, + server['id'], + wait_timeout, + wait_interval) + changed = True + except Exception as ex: + module.fail_json( + msg="failed to terminate the server: %s" % str(ex)) + + removed_server = { + 'id': server['id'], + 'hostname': server['name'] + } + _check_mode(module, False) + + return (changed, removed_server) + + +def startstop_server(module, oneandone_conn): + """ + Starts or Stops a server. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + + Returns a dictionary with a 'changed' attribute indicating whether + anything has changed for the server as a result of this function + being run, and a 'server' attribute with basic information for + the server. + """ + state = module.params.get('state') + server_id = module.params.get('server') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + changed = False + + # Resolve server + server = get_server(oneandone_conn, server_id, True) + if server: + # Attempt to change the server state, only if it's not already there + # or on its way. + try: + if state == 'stopped' and server['status']['state'] == 'POWERED_ON': + _check_mode(module, True) + oneandone_conn.modify_server_status( + server_id=server['id'], + action='POWER_OFF', + method='SOFTWARE') + elif state == 'running' and server['status']['state'] == 'POWERED_OFF': + _check_mode(module, True) + oneandone_conn.modify_server_status( + server_id=server['id'], + action='POWER_ON', + method='SOFTWARE') + except Exception as ex: + module.fail_json( + msg="failed to set server %s to state %s: %s" % ( + server_id, state, str(ex))) + + _check_mode(module, False) + + # Make sure the server has reached the desired state + if wait: + operation_completed = False + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(wait_interval) + server = oneandone_conn.get_server(server['id']) # refresh + server_state = server['status']['state'] + if state == 'stopped' and server_state == 'POWERED_OFF': + operation_completed = True + break + if state == 'running' and server_state == 'POWERED_ON': + operation_completed = True + break + if not operation_completed: + module.fail_json( + msg="Timeout waiting for server %s to get to state %s" % ( + server_id, state)) + + changed = True + server = _insert_network_data(server) + + _check_mode(module, False) + + return (changed, server) + + +def _auto_increment_hostname(count, hostname): + """ + Allow a custom incremental count in the hostname when defined with the + string formatting (%) operator. Otherwise, increment using name-01, + name-02, name-03, and so forth. + """ + if '%' not in hostname: + hostname = "%s-%%01d" % hostname + + return [ + hostname % i + for i in xrange(1, count + 1) + ] + + +def _auto_increment_description(count, description): + """ + Allow the incremental count in the description when defined with the + string formatting (%) operator. Otherwise, repeat the same description. + """ + if '%' in description: + return [ + description % i + for i in xrange(1, count + 1) + ] + else: + return [description] * count + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN'), + no_log=True), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + hostname=dict(type='str'), + description=dict(type='str'), + appliance=dict(type='str'), + fixed_instance_size=dict(type='str'), + vcore=dict(type='int'), + cores_per_processor=dict(type='int'), + ram=dict(type='float'), + hdds=dict(type='list'), + count=dict(type='int', default=1), + ssh_key=dict(type='raw'), + auto_increment=dict(type='bool', default=True), + server=dict(type='str'), + datacenter=dict( + choices=DATACENTERS, + default='US'), + private_network=dict(type='str'), + firewall_policy=dict(type='str'), + load_balancer=dict(type='str'), + monitoring_policy=dict(type='str'), + server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']), + ), + supports_check_mode=True, + mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'], + ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],), + required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],) + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='The "auth_token" parameter or ' + + 'ONEANDONE_AUTH_TOKEN environment variable is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('server'): + module.fail_json( + msg="'server' parameter is required for deleting a server.") + try: + (changed, servers) = remove_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state in ('running', 'stopped'): + if not module.params.get('server'): + module.fail_json( + msg="'server' parameter is required for starting/stopping a server.") + try: + (changed, servers) = startstop_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('hostname', + 'appliance', + 'datacenter'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new server." % param) + try: + (changed, servers) = create_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, servers=servers) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/online/online_server_facts.py b/plugins/modules/cloud/online/online_server_facts.py new file mode 100644 index 0000000000..a2ca9ed691 --- /dev/null +++ b/plugins/modules/cloud/online/online_server_facts.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: online_server_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(online_server_info) instead. +short_description: Gather facts about Online servers. +description: + - Gather facts about the servers. + - U(https://www.online.net/en/dedicated-server) +author: + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.online + +''' + +EXAMPLES = r''' +- name: Gather Online server facts + online_server_facts: + api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' +''' + +RETURN = r''' +--- +online_server_facts: + description: Response from Online API + returned: success + type: complex + sample: + "online_server_facts": [ + { + "abuse": "abuse@example.com", + "anti_ddos": false, + "bmc": { + "session_key": null + }, + "boot_mode": "normal", + "contacts": { + "owner": "foobar", + "tech": "foobar" + }, + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "drive_arrays": [ + { + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "raid_controller": { + "$ref": "/api/v1/server/hardware/raidController/9910" + }, + "raid_level": "RAID1" + } + ], + "hardware_watch": true, + "hostname": "sd-42", + "id": 42, + "ip": [ + { + "address": "195.154.172.149", + "mac": "28:92:4a:33:5e:c6", + "reverse": "195-154-172-149.rev.poneytelecom.eu.", + "switch_port_state": "up", + "type": "public" + }, + { + "address": "10.90.53.212", + "mac": "28:92:4a:33:5e:c7", + "reverse": null, + "switch_port_state": "up", + "type": "private" + } + ], + "last_reboot": "2018-08-23T08:32:03.000Z", + "location": { + "block": "A", + "datacenter": "DC3", + "position": 19, + "rack": "A23", + "room": "4 4-4" + }, + "network": { + "ip": [ + "195.154.172.149" + ], + "ipfo": [], + "private": [ + "10.90.53.212" + ] + }, + "offer": "Pro-1-S-SATA", + "os": { + "name": "FreeBSD", + "version": "11.1-RELEASE" + }, + "power": "ON", + "proactive_monitoring": false, + "raid_controllers": [ + { + "$ref": "/api/v1/server/hardware/raidController/9910" + } + ], + "support": "Basic service level" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineServerFacts(Online): + + def __init__(self, module): + super(OnlineServerFacts, self).__init__(module) + self.name = 'api/v1/server' + + def _get_server_detail(self, server_path): + try: + return self.get(path=server_path).json + except OnlineException as exc: + self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) + + def all_detailed_servers(self): + servers_api_path = self.get_resources() + + server_data = ( + self._get_server_detail(server_api_path) + for server_api_path in servers_api_path + ) + + return [s for s in server_data if s is not None] + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + servers_facts = OnlineServerFacts(module).all_detailed_servers() + module.exit_json( + ansible_facts={'online_server_facts': servers_facts} + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/online/online_server_info.py b/plugins/modules/cloud/online/online_server_info.py new file mode 100644 index 0000000000..0a7c72fe1c --- /dev/null +++ b/plugins/modules/cloud/online/online_server_info.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: online_server_info +short_description: Gather information about Online servers. +description: + - Gather information about the servers. + - U(https://www.online.net/en/dedicated-server) +author: + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.online + +''' + +EXAMPLES = r''' +- name: Gather Online server information + online_server_info: + api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' + register: result + +- debug: + msg: "{{ result.online_server_info }}" +''' + +RETURN = r''' +--- +online_server_info: + description: Response from Online API + returned: success + type: complex + sample: + "online_server_info": [ + { + "abuse": "abuse@example.com", + "anti_ddos": false, + "bmc": { + "session_key": null + }, + "boot_mode": "normal", + "contacts": { + "owner": "foobar", + "tech": "foobar" + }, + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "drive_arrays": [ + { + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "raid_controller": { + "$ref": "/api/v1/server/hardware/raidController/9910" + }, + "raid_level": "RAID1" + } + ], + "hardware_watch": true, + "hostname": "sd-42", + "id": 42, + "ip": [ + { + "address": "195.154.172.149", + "mac": "28:92:4a:33:5e:c6", + "reverse": "195-154-172-149.rev.poneytelecom.eu.", + "switch_port_state": "up", + "type": "public" + }, + { + "address": "10.90.53.212", + "mac": "28:92:4a:33:5e:c7", + "reverse": null, + "switch_port_state": "up", + "type": "private" + } + ], + "last_reboot": "2018-08-23T08:32:03.000Z", + "location": { + "block": "A", + "datacenter": "DC3", + "position": 19, + "rack": "A23", + "room": "4 4-4" + }, + "network": { + "ip": [ + "195.154.172.149" + ], + "ipfo": [], + "private": [ + "10.90.53.212" + ] + }, + "offer": "Pro-1-S-SATA", + "os": { + "name": "FreeBSD", + "version": "11.1-RELEASE" + }, + "power": "ON", + "proactive_monitoring": false, + "raid_controllers": [ + { + "$ref": "/api/v1/server/hardware/raidController/9910" + } + ], + "support": "Basic service level" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineServerInfo(Online): + + def __init__(self, module): + super(OnlineServerInfo, self).__init__(module) + self.name = 'api/v1/server' + + def _get_server_detail(self, server_path): + try: + return self.get(path=server_path).json + except OnlineException as exc: + self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) + + def all_detailed_servers(self): + servers_api_path = self.get_resources() + + server_data = ( + self._get_server_detail(server_api_path) + for server_api_path in servers_api_path + ) + + return [s for s in server_data if s is not None] + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + servers_info = OnlineServerInfo(module).all_detailed_servers() + module.exit_json( + online_server_info=servers_info + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/online/online_user_facts.py b/plugins/modules/cloud/online/online_user_facts.py new file mode 100644 index 0000000000..cfb3db1284 --- /dev/null +++ b/plugins/modules/cloud/online/online_user_facts.py @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: online_user_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(online_user_info) instead. +short_description: Gather facts about Online user. +description: + - Gather facts about the user. +author: + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.online + +''' + +EXAMPLES = r''' +- name: Gather Online user facts + online_user_facts: +''' + +RETURN = r''' +--- +online_user_facts: + description: Response from Online API + returned: success + type: complex + sample: + "online_user_facts": { + "company": "foobar LLC", + "email": "foobar@example.com", + "first_name": "foo", + "id": 42, + "last_name": "bar", + "login": "foobar" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineUserFacts(Online): + + def __init__(self, module): + super(OnlineUserFacts, self).__init__(module) + self.name = 'api/v1/user' + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'online_user_facts': OnlineUserFacts(module).get_resources()} + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/online/online_user_info.py b/plugins/modules/cloud/online/online_user_info.py new file mode 100644 index 0000000000..47d4b28de0 --- /dev/null +++ b/plugins/modules/cloud/online/online_user_info.py @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: online_user_info +short_description: Gather information about Online user. +description: + - Gather information about the user. +author: + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.online + +''' + +EXAMPLES = r''' +- name: Gather Online user info + online_user_info: + register: result + +- debug: + msg: "{{ result.online_user_info }}" +''' + +RETURN = r''' +--- +online_user_info: + description: Response from Online API + returned: success + type: complex + sample: + "online_user_info": { + "company": "foobar LLC", + "email": "foobar@example.com", + "first_name": "foo", + "id": 42, + "last_name": "bar", + "login": "foobar" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineUserInfo(Online): + + def __init__(self, module): + super(OnlineUserInfo, self).__init__(module) + self.name = 'api/v1/user' + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + module.exit_json( + online_user_info=OnlineUserInfo(module).get_resources() + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/opennebula/one_host.py b/plugins/modules/cloud/opennebula/one_host.py new file mode 100644 index 0000000000..39a94846e4 --- /dev/null +++ b/plugins/modules/cloud/opennebula/one_host.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# +# Copyright 2018 www.privaz.io Valletech AB +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: one_host + +short_description: Manages OpenNebula Hosts + + +requirements: + - pyone + +description: + - "Manages OpenNebula Hosts" + +options: + name: + description: + - Hostname of the machine to manage. + required: true + state: + description: + - Takes the host to the desired lifecycle state. + - If C(absent) the host will be deleted from the cluster. + - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states). + - If C(enabled) the host is fully operational. + - C(disabled), e.g. to perform maintenance operations. + - C(offline), host is totally offline. + choices: + - absent + - present + - enabled + - disabled + - offline + default: present + im_mad_name: + description: + - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name) + default: kvm + vmm_mad_name: + description: + - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name) + default: kvm + cluster_id: + description: + - The cluster ID. + default: 0 + cluster_name: + description: + - The cluster specified by name. + labels: + description: + - The labels for this host. + template: + description: + - The template or attribute changes to merge into the host template. + aliases: + - attributes + +extends_documentation_fragment: +- community.general.opennebula + + +author: + - Rafael del Valle (@rvalle) +''' + +EXAMPLES = ''' +- name: Create a new host in OpenNebula + one_host: + name: host1 + cluster_id: 1 + api_url: http://127.0.0.1:2633/RPC2 + +- name: Create a host and adjust its template + one_host: + name: host2 + cluster_name: default + template: + LABELS: + - gold + - ssd + RESERVED_CPU: -100 +''' + +# TODO: pending setting guidelines on returned values +RETURN = ''' +''' + +# TODO: Documentation on valid state transitions is required to properly implement all valid cases +# TODO: To be coherent with CLI this module should also provide "flush" functionality + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + +try: + from pyone import HOST_STATES, HOST_STATUS +except ImportError: + pass # handled at module utils + + +# Pseudo definitions... + +HOST_ABSENT = -99 # the host is absent (special case defined by this module) + + +class HostModule(OpenNebulaModule): + + def __init__(self): + + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'), + im_mad_name=dict(type='str', default="kvm"), + vmm_mad_name=dict(type='str', default="kvm"), + cluster_id=dict(type='int', default=0), + cluster_name=dict(type='str'), + labels=dict(type='list'), + template=dict(type='dict', aliases=['attributes']), + ) + + mutually_exclusive = [ + ['cluster_id', 'cluster_name'] + ] + + OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive) + + def allocate_host(self): + """ + Creates a host entry in OpenNebula + Returns: True on success, fails otherwise. + + """ + if not self.one.host.allocate(self.get_parameter('name'), + self.get_parameter('vmm_mad_name'), + self.get_parameter('im_mad_name'), + self.get_parameter('cluster_id')): + self.fail(msg="could not allocate host") + else: + self.result['changed'] = True + return True + + def wait_for_host_state(self, host, target_states): + """ + Utility method that waits for a host state. + Args: + host: + target_states: + + """ + return self.wait_for_state('host', + lambda: self.one.host.info(host.ID).STATE, + lambda s: HOST_STATES(s).name, target_states, + invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]) + + def run(self, one, module, result): + + # Get the list of hosts + host_name = self.get_parameter("name") + host = self.get_host_by_name(host_name) + + # manage host state + desired_state = self.get_parameter('state') + if bool(host): + current_state = host.STATE + current_state_name = HOST_STATES(host.STATE).name + else: + current_state = HOST_ABSENT + current_state_name = "ABSENT" + + # apply properties + if desired_state == 'present': + if current_state == HOST_ABSENT: + self.allocate_host() + host = self.get_host_by_name(host_name) + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]: + self.fail(msg="invalid host state %s" % current_state_name) + + elif desired_state == 'enabled': + if current_state == HOST_ABSENT: + self.allocate_host() + host = self.get_host_by_name(host_name) + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]: + if one.host.status(host.ID, HOST_STATUS.ENABLED): + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + result['changed'] = True + else: + self.fail(msg="could not enable host") + elif current_state in [HOST_STATES.MONITORED]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name) + + elif desired_state == 'disabled': + if current_state == HOST_ABSENT: + self.fail(msg='absent host cannot be put in disabled state') + elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: + if one.host.status(host.ID, HOST_STATUS.DISABLED): + self.wait_for_host_state(host, [HOST_STATES.DISABLED]) + result['changed'] = True + else: + self.fail(msg="could not disable host") + elif current_state in [HOST_STATES.DISABLED]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name) + + elif desired_state == 'offline': + if current_state == HOST_ABSENT: + self.fail(msg='absent host cannot be placed in offline state') + elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: + if one.host.status(host.ID, HOST_STATUS.OFFLINE): + self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) + result['changed'] = True + else: + self.fail(msg="could not set host offline") + elif current_state in [HOST_STATES.OFFLINE]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name) + + elif desired_state == 'absent': + if current_state != HOST_ABSENT: + if one.host.delete(host.ID): + result['changed'] = True + else: + self.fail(msg="could not delete host from cluster") + + # if we reach this point we can assume that the host was taken to the desired state + + if desired_state != "absent": + # manipulate or modify the template + desired_template_changes = self.get_parameter('template') + + if desired_template_changes is None: + desired_template_changes = dict() + + # complete the template with specific ansible parameters + if self.is_parameter('labels'): + desired_template_changes['LABELS'] = self.get_parameter('labels') + + if self.requires_template_update(host.TEMPLATE, desired_template_changes): + # setup the root element so that pyone will generate XML instead of attribute vector + desired_template_changes = {"TEMPLATE": desired_template_changes} + if one.host.update(host.ID, desired_template_changes, 1): # merge the template + result['changed'] = True + else: + self.fail(msg="failed to update the host template") + + # the cluster + if host.CLUSTER_ID != self.get_parameter('cluster_id'): + if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID): + result['changed'] = True + else: + self.fail(msg="failed to update the host cluster") + + # return + self.exit() + + +def main(): + HostModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/opennebula/one_image.py b/plugins/modules/cloud/opennebula/one_image.py new file mode 100644 index 0000000000..bf3e68ca48 --- /dev/null +++ b/plugins/modules/cloud/opennebula/one_image.py @@ -0,0 +1,423 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +""" +(c) 2018, Milan Ilic + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a clone of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: one_image +short_description: Manages OpenNebula images +description: + - Manages OpenNebula images +requirements: + - python-oca +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not + - transferred over the network unencrypted. + - If not set then the value of the C(ONE_URL) environment variable is used. + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set + - then the value of the C(ONE_USERNAME) environment variable is used. + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set + - then the value of the C(ONE_PASSWORD) environment variable is used. + id: + description: + - A C(id) of the image you would like to manage. + name: + description: + - A C(name) of the image you would like to manage. + state: + description: + - C(present) - state that is used to manage the image + - C(absent) - delete the image + - C(cloned) - clone the image + - C(renamed) - rename the image to the C(new_name) + choices: ["present", "absent", "cloned", "renamed"] + default: present + enabled: + description: + - Whether the image should be enabled or disabled. + type: bool + new_name: + description: + - A name that will be assigned to the existing or new image. + - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'. +author: + - "Milan Ilic (@ilicmilan)" +''' + +EXAMPLES = ''' +# Fetch the IMAGE by id +- one_image: + id: 45 + register: result + +# Print the IMAGE properties +- debug: + msg: result + +# Rename existing IMAGE +- one_image: + id: 34 + state: renamed + new_name: bar-image + +# Disable the IMAGE by id +- one_image: + id: 37 + enabled: no + +# Enable the IMAGE by name +- one_image: + name: bar-image + enabled: yes + +# Clone the IMAGE by name +- one_image: + name: bar-image + state: cloned + new_name: bar-image-clone + register: result + +# Delete the IMAGE by id +- one_image: + id: '{{ result.id }}' + state: absent +''' + +RETURN = ''' +id: + description: image id + type: int + returned: success + sample: 153 +name: + description: image name + type: str + returned: success + sample: app1 +group_id: + description: image's group id + type: int + returned: success + sample: 1 +group_name: + description: image's group name + type: str + returned: success + sample: one-users +owner_id: + description: image's owner id + type: int + returned: success + sample: 143 +owner_name: + description: image's owner name + type: str + returned: success + sample: ansible-test +state: + description: state of image instance + type: str + returned: success + sample: READY +used: + description: is image in use + type: bool + returned: success + sample: true +running_vms: + description: count of running vms that use this image + type: int + returned: success + sample: 7 +''' + +try: + import oca + HAS_OCA = True +except ImportError: + HAS_OCA = False + +from ansible.module_utils.basic import AnsibleModule +import os + + +def get_image(module, client, predicate): + pool = oca.ImagePool(client) + # Filter -2 means fetch all images user can Use + pool.info(filter=-2) + + for image in pool: + if predicate(image): + return image + + return None + + +def get_image_by_name(module, client, image_name): + return get_image(module, client, lambda image: (image.name == image_name)) + + +def get_image_by_id(module, client, image_id): + return get_image(module, client, lambda image: (image.id == image_id)) + + +def get_image_instance(module, client, requested_id, requested_name): + if requested_id: + return get_image_by_id(module, client, requested_id) + else: + return get_image_by_name(module, client, requested_name) + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +def get_image_info(image): + image.info() + + info = { + 'id': image.id, + 'name': image.name, + 'state': IMAGE_STATES[image.state], + 'running_vms': image.running_vms, + 'used': bool(image.running_vms), + 'user_name': image.uname, + 'user_id': image.uid, + 'group_name': image.gname, + 'group_id': image.gid, + } + + return info + + +def wait_for_state(module, image, wait_timeout, state_predicate): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + image.info() + state = image.state + + if state_predicate(state): + return image + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired!") + + +def wait_for_ready(module, image, wait_timeout=60): + return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) + + +def wait_for_delete(module, image, wait_timeout=60): + return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) + + +def enable_image(module, client, image, enable): + image.info() + changed = False + + state = image.state + + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") + else: + module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") + + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + + if changed and not module.check_mode: + client.call('image.enable', image.id, enable) + + result = get_image_info(image) + result['changed'] = changed + + return result + + +def clone_image(module, client, image, new_name): + if new_name is None: + new_name = "Copy of " + image.name + + tmp_image = get_image_by_name(module, client, new_name) + if tmp_image: + result = get_image_info(tmp_image) + result['changed'] = False + return result + + if image.state == IMAGE_STATES.index('DISABLED'): + module.fail_json(msg="Cannot clone DISABLED image") + + if not module.check_mode: + new_id = client.call('image.clone', image.id, new_name) + image = get_image_by_id(module, client, new_id) + wait_for_ready(module, image) + + result = get_image_info(image) + result['changed'] = True + + return result + + +def rename_image(module, client, image, new_name): + if new_name is None: + module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") + + if new_name == image.name: + result = get_image_info(image) + result['changed'] = False + return result + + tmp_image = get_image_by_name(module, client, new_name) + if tmp_image: + module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id)) + + if not module.check_mode: + client.call('image.rename', image.id, new_name) + + result = get_image_info(image) + result['changed'] = True + return result + + +def delete_image(module, client, image): + + if not image: + return {'changed': False} + + if image.running_vms > 0: + module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.") + + if not module.check_mode: + client.call('image.delete', image.id) + wait_for_delete(module, image) + + return {'changed': True} + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not(url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "id": {"required": False, "type": "int"}, + "name": {"required": False, "type": "str"}, + "state": { + "default": "present", + "choices": ['present', 'absent', 'cloned', 'renamed'], + "type": "str" + }, + "enabled": {"required": False, "type": "bool"}, + "new_name": {"required": False, "type": "str"}, + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[['id', 'name']], + supports_check_mode=True) + + if not HAS_OCA: + module.fail_json(msg='This module requires python-oca to work!') + + auth = get_connection_info(module) + params = module.params + id = params.get('id') + name = params.get('name') + state = params.get('state') + enabled = params.get('enabled') + new_name = params.get('new_name') + client = oca.Client(auth.username + ':' + auth.password, auth.url) + + result = {} + + if not id and state == 'renamed': + module.fail_json(msg="Option 'id' is required when the state is 'renamed'") + + image = get_image_instance(module, client, id, name) + if not image and state != 'absent': + if id: + module.fail_json(msg="There is no image with id=" + str(id)) + else: + module.fail_json(msg="There is no image with name=" + name) + + if state == 'absent': + result = delete_image(module, client, image) + else: + result = get_image_info(image) + changed = False + result['changed'] = False + + if enabled is not None: + result = enable_image(module, client, image, enabled) + if state == "cloned": + result = clone_image(module, client, image, new_name) + elif state == "renamed": + result = rename_image(module, client, image, new_name) + + changed = changed or result['changed'] + result['changed'] = changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/opennebula/one_image_facts.py b/plugins/modules/cloud/opennebula/one_image_facts.py new file mode 120000 index 0000000000..96b8357316 --- /dev/null +++ b/plugins/modules/cloud/opennebula/one_image_facts.py @@ -0,0 +1 @@ +one_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/opennebula/one_image_info.py b/plugins/modules/cloud/opennebula/one_image_info.py new file mode 100644 index 0000000000..cfee862a29 --- /dev/null +++ b/plugins/modules/cloud/opennebula/one_image_info.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +""" +(c) 2018, Milan Ilic + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a clone of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: one_image_info +short_description: Gather information on OpenNebula images +description: + - Gather information on OpenNebula images. + - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change. +requirements: + - pyone +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not + - transferred over the network unencrypted. + - If not set then the value of the C(ONE_URL) environment variable is used. + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set + - then the value of the C(ONE_USERNAME) environment variable is used. + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set + - then the value of the C(ONE_PASSWORD) environment variable is used. + ids: + description: + - A list of images ids whose facts you want to gather. + aliases: ['id'] + name: + description: + - A C(name) of the image whose facts will be gathered. + - If the C(name) begins with '~' the C(name) will be used as regex pattern + - which restricts the list of images (whose facts will be returned) whose names match specified regex. + - Also, if the C(name) begins with '~*' case-insensitive matching will be performed. + - See examples for more details. +author: + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +''' + +EXAMPLES = ''' +# Gather facts about all images +- one_image_info: + register: result + +# Print all images facts +- debug: + msg: result + +# Gather facts about an image using ID +- one_image_info: + ids: + - 123 + +# Gather facts about an image using the name +- one_image_info: + name: 'foo-image' + register: foo_image + +# Gather facts about all IMAGEs whose name matches regex 'app-image-.*' +- one_image_info: + name: '~app-image-.*' + register: app_images + +# Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases +- one_image_info: + name: '~*foo-image-.*' + register: foo_images +''' + +RETURN = ''' +images: + description: A list of images info + type: complex + returned: success + contains: + id: + description: image id + type: int + sample: 153 + name: + description: image name + type: str + sample: app1 + group_id: + description: image's group id + type: int + sample: 1 + group_name: + description: image's group name + type: str + sample: one-users + owner_id: + description: image's owner id + type: int + sample: 143 + owner_name: + description: image's owner name + type: str + sample: ansible-test + state: + description: state of image instance + type: str + sample: READY + used: + description: is image in use + type: bool + sample: true + running_vms: + description: count of running vms that use this image + type: int + sample: 7 +''' + +try: + import pyone + HAS_PYONE = True +except ImportError: + HAS_PYONE = False + +from ansible.module_utils.basic import AnsibleModule +import os + + +def get_all_images(client): + pool = client.imagepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all images user can Use + + return pool + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +def get_image_info(image): + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + } + return info + + +def get_images_by_ids(module, client, ids): + images = [] + pool = get_all_images(client) + + for image in pool.IMAGE: + if str(image.ID) in ids: + images.append(image) + ids.remove(str(image.ID)) + if len(ids) == 0: + break + + if len(ids) > 0: + module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + + return images + + +def get_images_by_name(module, client, name_pattern): + + images = [] + pattern = None + + pool = get_all_images(client) + + if name_pattern.startswith('~'): + import re + if name_pattern[1] == '*': + pattern = re.compile(name_pattern[2:], re.IGNORECASE) + else: + pattern = re.compile(name_pattern[1:]) + + for image in pool.IMAGE: + if pattern is not None: + if pattern.match(image.NAME): + images.append(image) + elif name_pattern == image.NAME: + images.append(image) + break + + # if the specific name is indicated + if pattern is None and len(images) == 0: + module.fail_json(msg="There is no IMAGE with name=" + name_pattern) + + return images + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not(url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "ids": {"required": False, "aliases": ['id'], "type": "list"}, + "name": {"required": False, "type": "str"}, + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[['ids', 'name']], + supports_check_mode=True) + if module._name == 'one_image_facts': + module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'", version='2.13') + + if not HAS_PYONE: + module.fail_json(msg='This module requires pyone to work!') + + auth = get_connection_info(module) + params = module.params + ids = params.get('ids') + name = params.get('name') + client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + + result = {'images': []} + images = [] + + if ids: + images = get_images_by_ids(module, client, ids) + elif name: + images = get_images_by_name(module, client, name) + else: + images = get_all_images(client).IMAGE + + for image in images: + result['images'].append(get_image_info(image)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/opennebula/one_service.py b/plugins/modules/cloud/opennebula/one_service.py new file mode 100644 index 0000000000..767b3d8a45 --- /dev/null +++ b/plugins/modules/cloud/opennebula/one_service.py @@ -0,0 +1,757 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +""" +(c) 2017, Milan Ilic + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: one_service +short_description: Deploy and manage OpenNebula services +description: + - Manage OpenNebula services +options: + api_url: + description: + - URL of the OpenNebula OneFlow API server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the ONEFLOW_URL environment variable is used. + api_username: + description: + - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used. + api_password: + description: + - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used. + template_name: + description: + - Name of service template to use to create a new instance of a service + template_id: + description: + - ID of a service template to use to create a new instance of a service + service_id: + description: + - ID of a service instance that you would like to manage + service_name: + description: + - Name of a service instance that you would like to manage + unique: + description: + - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when + - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below. + type: bool + default: no + state: + description: + - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name). + - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name). + choices: ["present", "absent"] + default: present + mode: + description: + - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. + owner_id: + description: + - ID of the user which will be set as the owner of the service + group_id: + description: + - ID of the group which will be set as the group of the service + wait: + description: + - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING + type: bool + default: no + wait_timeout: + description: + - How long before wait gives up, in seconds + default: 300 + custom_attrs: + description: + - Dictionary of key/value custom attributes which will be used when instantiating a new service. + default: {} + role: + description: + - Name of the role whose cardinality should be changed + cardinality: + description: + - Number of VMs for the specified role + force: + description: + - Force the new cardinality even if it is outside the limits + type: bool + default: no +author: + - "Milan Ilic (@ilicmilan)" +''' + +EXAMPLES = ''' +# Instantiate a new service +- one_service: + template_id: 90 + register: result + +# Print service properties +- debug: + msg: result + +# Instantiate a new service with specified service_name, service group and mode +- one_service: + template_name: 'app1_template' + service_name: 'app1' + group_id: 1 + mode: '660' + +# Instantiate a new service with template_id and pass custom_attrs dict +- one_service: + template_id: 90 + custom_attrs: + public_network_id: 21 + private_network_id: 26 + +# Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing +- one_service: + template_id: 53 + service_name: 'foo' + unique: yes + +# Delete a service by ID +- one_service: + service_id: 153 + state: absent + +# Get service info +- one_service: + service_id: 153 + register: service_info + +# Change service owner, group and mode +- one_service: + service_name: 'app2' + owner_id: 34 + group_id: 113 + mode: '600' + +# Instantiate service and wait for it to become RUNNING +- one_service: + template_id: 43 + service_name: 'foo1' + +# Wait service to become RUNNING +- one_service: + service_id: 112 + wait: yes + +# Change role cardinality +- one_service: + service_id: 153 + role: bar + cardinality: 5 + +# Change role cardinality and wait for it to be applied +- one_service: + service_id: 112 + role: foo + cardinality: 7 + wait: yes +''' + +RETURN = ''' +service_id: + description: service id + type: int + returned: success + sample: 153 +service_name: + description: service name + type: str + returned: success + sample: app1 +group_id: + description: service's group id + type: int + returned: success + sample: 1 +group_name: + description: service's group name + type: str + returned: success + sample: one-users +owner_id: + description: service's owner id + type: int + returned: success + sample: 143 +owner_name: + description: service's owner name + type: str + returned: success + sample: ansible-test +state: + description: state of service instance + type: str + returned: success + sample: RUNNING +mode: + description: service's mode + type: int + returned: success + sample: 660 +roles: + description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids + type: list + returned: success + sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]}, + {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]' +''' + +import os +import sys +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url + +STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE", + "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN") + + +def get_all_templates(module, auth): + try: + all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + return module.from_json(all_templates.read()) + + +def get_template(module, auth, pred): + all_templates_dict = get_all_templates(module, auth) + + found = 0 + found_template = None + template_name = '' + + if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]: + for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]: + if pred(template): + found = found + 1 + found_template = template + template_name = template["NAME"] + + if found <= 0: + return None + elif found > 1: + module.fail_json(msg="There is no template with unique name: " + template_name) + else: + return found_template + + +def get_all_services(module, auth): + try: + response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + return module.from_json(response.read()) + + +def get_service(module, auth, pred): + all_services_dict = get_all_services(module, auth) + + found = 0 + found_service = None + service_name = '' + + if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]: + for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]: + if pred(service): + found = found + 1 + found_service = service + service_name = service["NAME"] + + # fail if there are more services with same name + if found > 1: + module.fail_json(msg="There are multiple services with a name: '" + + service_name + "'. You have to use a unique service name or use 'service_id' instead.") + elif found <= 0: + return None + else: + return found_service + + +def get_service_by_id(module, auth, service_id): + return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None + + +def get_service_by_name(module, auth, service_name): + return get_service(module, auth, lambda service: (service["NAME"] == service_name)) + + +def get_service_info(module, auth, service): + + result = { + "service_id": int(service["ID"]), + "service_name": service["NAME"], + "group_id": int(service["GID"]), + "group_name": service["GNAME"], + "owner_id": int(service["UID"]), + "owner_name": service["UNAME"], + "state": STATES[service["TEMPLATE"]["BODY"]["state"]] + } + + roles_status = service["TEMPLATE"]["BODY"]["roles"] + roles = [] + for role in roles_status: + nodes_ids = [] + if "nodes" in role: + for node in role["nodes"]: + nodes_ids.append(node["deploy_id"]) + roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids}) + + result["roles"] = roles + result["mode"] = int(parse_service_permissions(service)) + + return result + + +def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): + # make sure that the values in custom_attrs dict are strings + custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items()) + + data = { + "action": { + "perform": "instantiate", + "params": { + "merge_template": { + "custom_attrs_values": custom_attrs_with_str, + "name": service_name + } + } + } + } + + try: + response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST", + data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + service_result = module.from_json(response.read())["DOCUMENT"] + + return service_result + + +def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + try: + status_result = open_url(auth.url + "/service/" + str(service_id), method="GET", + force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg="Request for service status has failed. Error message: " + str(e)) + + status_result = module.from_json(status_result.read()) + service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"] + + if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]: + return status_result["DOCUMENT"] + elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]: + log_message = '' + for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]: + if log_info["severity"] == "E": + log_message = log_message + log_info["message"] + break + + module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message) + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired") + + +def change_service_permissions(module, auth, service_id, permissions): + + data = { + "action": { + "perform": "chmod", + "params": {"octet": permissions} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_service_owner(module, auth, service_id, owner_id): + data = { + "action": { + "perform": "chown", + "params": {"owner_id": owner_id} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_service_group(module, auth, service_id, group_id): + + data = { + "action": { + "perform": "chgrp", + "params": {"group_id": group_id} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_role_cardinality(module, auth, service_id, role, cardinality, force): + + data = { + "cardinality": cardinality, + "force": force + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT", + force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + if status_result.getcode() != 204: + module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode())) + + +def check_change_service_owner(module, service, owner_id): + old_owner_id = int(service["UID"]) + + return old_owner_id != owner_id + + +def check_change_service_group(module, service, group_id): + old_group_id = int(service["GID"]) + + return old_group_id != group_id + + +def parse_service_permissions(service): + perm_dict = service["PERMISSIONS"] + ''' + This is the structure of the 'PERMISSIONS' dictionary: + + "PERMISSIONS": { + "OWNER_U": "1", + "OWNER_M": "1", + "OWNER_A": "0", + "GROUP_U": "0", + "GROUP_M": "0", + "GROUP_A": "0", + "OTHER_U": "0", + "OTHER_M": "0", + "OTHER_A": "0" + } + ''' + + owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"]) + group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"]) + other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"]) + + permissions = str(owner_octal) + str(group_octal) + str(other_octal) + + return permissions + + +def check_change_service_permissions(module, service, permissions): + old_permissions = parse_service_permissions(service) + + return old_permissions != permissions + + +def check_change_role_cardinality(module, service, role_name, cardinality): + roles_list = service["TEMPLATE"]["BODY"]["roles"] + + for role in roles_list: + if role["name"] == role_name: + return int(role["cardinality"]) != cardinality + + module.fail_json(msg="There is no role with name: " + role_name) + + +def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout): + if not service_name: + service_name = '' + changed = False + service = None + + if unique: + service = get_service_by_name(module, auth, service_name) + + if not service: + if not module.check_mode: + service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) + changed = True + + # if check_mode=true and there would be changes, service doesn't exist and we can not get it + if module.check_mode and changed: + return {"changed": True} + + result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait, + wait_timeout=wait_timeout, permissions=permissions, service=service) + + if result["changed"]: + changed = True + + result["changed"] = changed + + return result + + +def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None, + role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None): + + changed = False + + if not service: + service = get_service_by_id(module, auth, service_id) + else: + service_id = service["ID"] + + if not service: + module.fail_json(msg="There is no service with id: " + str(service_id)) + + if owner_id: + if check_change_service_owner(module, service, owner_id): + if not module.check_mode: + change_service_owner(module, auth, service_id, owner_id) + changed = True + if group_id: + if check_change_service_group(module, service, group_id): + if not module.check_mode: + change_service_group(module, auth, service_id, group_id) + changed = True + if permissions: + if check_change_service_permissions(module, service, permissions): + if not module.check_mode: + change_service_permissions(module, auth, service_id, permissions) + changed = True + + if role: + if check_change_role_cardinality(module, service, role, cardinality): + if not module.check_mode: + change_role_cardinality(module, auth, service_id, role, cardinality, force) + changed = True + + if wait and not module.check_mode: + service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout) + + # if something has changed, fetch service info again + if changed: + service = get_service_by_id(module, auth, service_id) + + service_info = get_service_info(module, auth, service) + service_info["changed"] = changed + + return service_info + + +def delete_service(module, auth, service_id): + service = get_service_by_id(module, auth, service_id) + if not service: + return {"changed": False} + + service_info = get_service_info(module, auth, service) + + service_info["changed"] = True + + if module.check_mode: + return service_info + + try: + result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg="Service deletion has failed. Error message: " + str(e)) + + return service_info + + +def get_template_by_name(module, auth, template_name): + return get_template(module, auth, lambda template: (template["NAME"] == template_name)) + + +def get_template_by_id(module, auth, template_id): + return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None + + +def get_template_id(module, auth, requested_id, requested_name): + template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name) + + if template: + return template["ID"] + + return None + + +def get_service_id_by_name(module, auth, service_name): + service = get_service_by_name(module, auth, service_name) + + if service: + return service["ID"] + + return None + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONEFLOW_URL') + + if not username: + username = os.environ.get('ONEFLOW_USERNAME') + + if not password: + password = os.environ.get('ONEFLOW_PASSWORD') + + if not(url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'user', 'password')) + + return auth_params(url=url, user=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "service_name": {"required": False, "type": "str"}, + "service_id": {"required": False, "type": "int"}, + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "state": { + "default": "present", + "choices": ['present', 'absent'], + "type": "str" + }, + "mode": {"required": False, "type": "str"}, + "owner_id": {"required": False, "type": "int"}, + "group_id": {"required": False, "type": "int"}, + "unique": {"default": False, "type": "bool"}, + "wait": {"default": False, "type": "bool"}, + "wait_timeout": {"default": 300, "type": "int"}, + "custom_attrs": {"default": {}, "type": "dict"}, + "role": {"required": False, "type": "str"}, + "cardinality": {"required": False, "type": "int"}, + "force": {"default": False, "type": "bool"} + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[ + ['template_id', 'template_name', 'service_id'], + ['service_id', 'service_name'], + ['template_id', 'template_name', 'role'], + ['template_id', 'template_name', 'cardinality'], + ['service_id', 'custom_attrs'] + ], + required_together=[['role', 'cardinality']], + supports_check_mode=True) + + auth = get_connection_info(module) + params = module.params + service_name = params.get('service_name') + service_id = params.get('service_id') + + requested_template_id = params.get('template_id') + requested_template_name = params.get('template_name') + state = params.get('state') + permissions = params.get('mode') + owner_id = params.get('owner_id') + group_id = params.get('group_id') + unique = params.get('unique') + wait = params.get('wait') + wait_timeout = params.get('wait_timeout') + custom_attrs = params.get('custom_attrs') + role = params.get('role') + cardinality = params.get('cardinality') + force = params.get('force') + + template_id = None + + if requested_template_id or requested_template_name: + template_id = get_template_id(module, auth, requested_template_id, requested_template_name) + if not template_id: + if requested_template_id: + module.fail_json(msg="There is no template with template_id: " + str(requested_template_id)) + elif requested_template_name: + module.fail_json(msg="There is no template with name: " + requested_template_name) + + if unique and not service_name: + module.fail_json(msg="You cannot use unique without passing service_name!") + + if template_id and state == 'absent': + module.fail_json(msg="State absent is not valid for template") + + if template_id and state == 'present': # Instantiate a service + result = create_service_and_operation(module, auth, template_id, service_name, owner_id, + group_id, permissions, custom_attrs, unique, wait, wait_timeout) + else: + if not (service_id or service_name): + module.fail_json(msg="To manage the service at least the service id or service name should be specified!") + if custom_attrs: + module.fail_json(msg="You can only set custom_attrs when instantiate service!") + + if not service_id: + service_id = get_service_id_by_name(module, auth, service_name) + # The task should be failed when we want to manage a non-existent service identified by its name + if not service_id and state == 'present': + module.fail_json(msg="There is no service with name: " + service_name) + + if state == 'absent': + result = delete_service(module, auth, service_id) + else: + result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/opennebula/one_vm.py b/plugins/modules/cloud/opennebula/one_vm.py new file mode 100644 index 0000000000..c3e06c15e0 --- /dev/null +++ b/plugins/modules/cloud/opennebula/one_vm.py @@ -0,0 +1,1570 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +""" +(c) 2017, Milan Ilic +(c) 2019, Jan Meerkamp + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: one_vm +short_description: Creates or terminates OpenNebula instances +description: + - Manages OpenNebula instances +requirements: + - pyone +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not + - transferred over the network unencrypted. + - If not set then the value of the C(ONE_URL) environment variable is used. + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set + - then the value of the C(ONE_USERNAME) environment variable is used. + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set + - then the value of the C(ONE_PASSWORD) environment variable is used. + - if both I(api_username) or I(api_password) are not set, then it will try + - authenticate with ONE auth file. Default path is "~/.one/one_auth". + - Set environment variable C(ONE_AUTH) to override this path. + template_name: + description: + - Name of VM template to use to create a new instace + template_id: + description: + - ID of a VM template to use to create a new instance + vm_start_on_hold: + description: + - Set to true to put vm on hold while creating + default: False + instance_ids: + description: + - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff) + aliases: ['ids'] + state: + description: + - C(present) - create instances from a template specified with C(template_id)/C(template_name). + - C(running) - run instances + - C(poweredoff) - power-off instances + - C(rebooted) - reboot instances + - C(absent) - terminate instances + choices: ["present", "absent", "running", "rebooted", "poweredoff"] + default: present + hard: + description: + - Reboot, power-off or terminate instances C(hard) + default: no + type: bool + wait: + description: + - Wait for the instance to reach its desired state before returning. Keep + - in mind if you are waiting for instance to be in running state it + - doesn't mean that you will be able to SSH on that machine only that + - boot process have started on that instance, see 'wait_for' example for + - details. + default: yes + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds + default: 300 + attributes: + description: + - A dictionary of key/value attributes to add to new instances, or for + - setting C(state) of instances with these attributes. + - Keys are case insensitive and OpenNebula automatically converts them to upper case. + - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed. + - C(#) character(s) can be appended to the C(NAME) and the module will automatically add + - indexes to the names of VMs. + - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),... + - When used with C(count_attributes) and C(exact_count) the module will + - match the base name without the index part. + default: {} + labels: + description: + - A list of labels to associate with new instances, or for setting + - C(state) of instances with these labels. + default: [] + count_attributes: + description: + - A dictionary of key/value attributes that can only be used with + - C(exact_count) to determine how many nodes based on a specific + - attributes criteria should be deployed. This can be expressed in + - multiple ways and is shown in the EXAMPLES section. + count_labels: + description: + - A list of labels that can only be used with C(exact_count) to determine + - how many nodes based on a specific labels criteria should be deployed. + - This can be expressed in multiple ways and is shown in the EXAMPLES + - section. + count: + description: + - Number of instances to launch + default: 1 + exact_count: + description: + - Indicates how many instances that match C(count_attributes) and + - C(count_labels) parameters should be deployed. Instances are either + - created or terminated based on this value. + - NOTE':' Instances with the least IDs will be terminated first. + mode: + description: + - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. + owner_id: + description: + - ID of the user which will be set as the owner of the instance + group_id: + description: + - ID of the group which will be set as the group of the instance + memory: + description: + - The size of the memory for new instances (in MB, GB, ...) + disk_size: + description: + - The size of the disk created for new instances (in MB, GB, TB,...). + - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is + - matched against the order specified in C(template_id)/C(template_name). + cpu: + description: + - Percentage of CPU divided by 100 required for the new instance. Half a + - processor is written 0.5. + vcpu: + description: + - Number of CPUs (cores) new VM will have. + networks: + description: + - A list of dictionaries with network parameters. See examples for more details. + default: [] + disk_saveas: + description: + - Creates an image from a VM disk. + - It is a dictionary where you have to specify C(name) of the new image. + - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0. + - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed) + - and the VM has to be in the C(poweredoff) state. + - Also this operation will fail if an image with specified C(name) already exists. + persistent: + description: + - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy. + default: NO + type: bool + datastore_id: + description: + - Name of Datastore to use to create a new instace + datastore_name: + description: + - Name of Datastore to use to create a new instace +author: + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +''' + + +EXAMPLES = ''' +# Create a new instance +- one_vm: + template_id: 90 + register: result + +# Print VM properties +- debug: + msg: result + +# Deploy a new VM on hold +- one_vm: + template_name: 'app1_template' + vm_start_on_hold: 'True' + +# Deploy a new VM and set its name to 'foo' +- one_vm: + template_name: 'app1_template' + attributes: + name: foo + +# Deploy a new VM and set its group_id and mode +- one_vm: + template_id: 90 + group_id: 16 + mode: 660 + +# Deploy a new VM as persistent +- one_vm: + template_id: 90 + persistent: yes + +# Change VM's permissions to 640 +- one_vm: + instance_ids: 5 + mode: 640 + +# Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks +- one_vm: + template_id: 15 + disk_size: 35.2 GB + memory: 4 GB + vcpu: 4 + count: 2 + networks: + - NETWORK_ID: 27 + - NETWORK: "default-network" + NETWORK_UNAME: "app-user" + SECURITY_GROUPS: "120,124" + - NETWORK_ID: 27 + SECURITY_GROUPS: "10" + +# Deploy a new instance which uses a Template with two Disks +- one_vm: + template_id: 42 + disk_size: + - 35.2 GB + - 50 GB + memory: 4 GB + vcpu: 4 + count: 1 + networks: + - NETWORK_ID: 27 + +# Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo' +- one_vm: + template_id: 53 + attributes: + name: foo + bar: bar1 + +# Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed +- one_vm: + template_id: 53 + attributes: + foo1: app1 + foo2: app2 + exact_count: 2 + count_attributes: + foo1: app1 + foo2: app2 + +# Enforce that 4 instances with an attribute 'bar' are deployed +- one_vm: + template_id: 53 + attributes: + name: app + bar: bar2 + exact_count: 4 + count_attributes: + bar: + +# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##' +# Names will be: fooapp-00 and fooapp-01 +- one_vm: + template_id: 53 + attributes: + name: fooapp-## + foo: bar + labels: + - app1 + - app2 + count: 2 + +# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###' +# Names will be: fooapp-002 and fooapp-003 +- one_vm: + template_id: 53 + attributes: + name: fooapp-### + app: app1 + count: 2 + +# Reboot all instances with name in format 'fooapp-#' +# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted +- one_vm: + attributes: + name: fooapp-# + state: rebooted + +# Enforce that only 1 instance with name in format 'fooapp-#' is deployed +# The task will delete oldest instances, so only the 'fooapp-003' will remain +- one_vm: + template_id: 53 + exact_count: 1 + count_attributes: + name: fooapp-# + +# Deploy an new instance with a network +- one_vm: + template_id: 53 + networks: + - NETWORK_ID: 27 + register: vm + +# Wait for SSH to come up +- wait_for_connection: + delegate_to: '{{ vm.instances[0].networks[0].ip }}' + +# Terminate VMs by ids +- one_vm: + instance_ids: + - 153 + - 160 + state: absent + +# Reboot all VMs that have labels 'foo' and 'app1' +- one_vm: + labels: + - foo + - app1 + state: rebooted + +# Fetch all VMs that have name 'foo' and attribute 'app: bar' +- one_vm: + attributes: + name: foo + app: bar + register: results + +# Deploy 2 new instances with labels 'foo1' and 'foo2' +- one_vm: + template_name: app_template + labels: + - foo1 + - foo2 + count: 2 + +# Enforce that only 1 instance with label 'foo1' will be running +- one_vm: + template_name: app_template + labels: + - foo1 + exact_count: 1 + count_labels: + - foo1 + +# Terminate all instances that have attribute foo +- one_vm: + template_id: 53 + exact_count: 0 + count_attributes: + foo: + +# Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image' +- one_vm: + instance_ids: 351 + state: poweredoff + disk_saveas: + name: foo-image + +# Save VM's disk with id=1 to the image with name 'bar-image' +- one_vm: + instance_ids: 351 + disk_saveas: + name: bar-image + disk_id: 1 +''' + +RETURN = ''' +instances_ids: + description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option. + type: list + returned: success + sample: [ 1234, 1235 ] +instances: + description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option. + type: complex + returned: success + contains: + vm_id: + description: vm id + type: int + sample: 153 + vm_name: + description: vm name + type: str + sample: foo + template_id: + description: vm's template id + type: int + sample: 153 + group_id: + description: vm's group id + type: int + sample: 1 + group_name: + description: vm's group name + type: str + sample: one-users + owner_id: + description: vm's owner id + type: int + sample: 143 + owner_name: + description: vm's owner name + type: str + sample: app-user + mode: + description: vm's mode + type: str + returned: success + sample: 660 + state: + description: state of an instance + type: str + sample: ACTIVE + lcm_state: + description: lcm state of an instance that is only relevant when the state is ACTIVE + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100 + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores) + type: int + sample: 2 + memory: + description: The size of the memory in MB + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB + type: str + sample: 20480 MB + networks: + description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC + type: list + sample: [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance + type: list + sample: [ + "foo", + "spec-label" + ] + attributes: + description: A dictionary of key/values attributes that are associated with the instance + type: dict + sample: { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } +tagged_instances: + description: + - A list of instances info based on a specific attributes and/or + - labels that are specified with C(count_attributes) and C(count_labels) + - options. + type: complex + returned: success + contains: + vm_id: + description: vm id + type: int + sample: 153 + vm_name: + description: vm name + type: str + sample: foo + template_id: + description: vm's template id + type: int + sample: 153 + group_id: + description: vm's group id + type: int + sample: 1 + group_name: + description: vm's group name + type: str + sample: one-users + owner_id: + description: vm's user id + type: int + sample: 143 + owner_name: + description: vm's user name + type: str + sample: app-user + mode: + description: vm's mode + type: str + returned: success + sample: 660 + state: + description: state of an instance + type: str + sample: ACTIVE + lcm_state: + description: lcm state of an instance that is only relevant when the state is ACTIVE + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100 + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores) + type: int + sample: 2 + memory: + description: The size of the memory in MB + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB + type: list + sample: [ + "20480 MB", + "10240 MB" + ] + networks: + description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC + type: list + sample: [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance + type: list + sample: [ + "foo", + "spec-label" + ] + attributes: + description: A dictionary of key/values attributes that are associated with the instance + type: dict + sample: { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } +''' + +try: + import pyone + HAS_PYONE = True +except ImportError: + HAS_PYONE = False + +from ansible.module_utils.basic import AnsibleModule +import os + + +def get_template(module, client, predicate): + + pool = client.templatepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all templates user can Use + found = 0 + found_template = None + template_name = '' + + for template in pool.VMTEMPLATE: + if predicate(template): + found = found + 1 + found_template = template + template_name = template.NAME + + if found == 0: + return None + elif found > 1: + module.fail_json(msg='There are more templates with name: ' + template_name) + return found_template + + +def get_template_by_name(module, client, template_name): + return get_template(module, client, lambda template: (template.NAME == template_name)) + + +def get_template_by_id(module, client, template_id): + return get_template(module, client, lambda template: (template.ID == template_id)) + + +def get_template_id(module, client, requested_id, requested_name): + template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name) + if template: + return template.ID + else: + return None + + +def get_datastore(module, client, predicate): + pool = client.datastorepool.info() + found = 0 + found_datastore = None + datastore_name = '' + + for datastore in pool.DATASTORE: + if predicate(datastore): + found = found + 1 + found_datastore = datastore + datastore_name = datastore.NAME + + if found == 0: + return None + elif found > 1: + module.fail_json(msg='There are more datastores with name: ' + datastore_name) + return found_datastore + + +def get_datastore_by_name(module, client, datastore_name): + return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name)) + + +def get_datastore_by_id(module, client, datastore_id): + return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id)) + + +def get_datastore_id(module, client, requested_id, requested_name): + datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name) + if datastore: + return datastore.ID + else: + return None + + +def get_vm_by_id(client, vm_id): + try: + vm = client.vm.info(int(vm_id)) + except BaseException: + return None + return vm + + +def get_vms_by_ids(module, client, state, ids): + vms = [] + + for vm_id in ids: + vm = get_vm_by_id(client, vm_id) + if vm is None and state != 'absent': + module.fail_json(msg='There is no VM with id=' + str(vm_id)) + vms.append(vm) + + return vms + + +def get_vm_info(client, vm): + + vm = client.vm.info(vm.ID) + + networks_info = [] + + disk_size = [] + if 'DISK' in vm.TEMPLATE: + if isinstance(vm.TEMPLATE['DISK'], list): + for disk in vm.TEMPLATE['DISK']: + disk_size.append(disk['SIZE'] + ' MB') + else: + disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB') + + if 'NIC' in vm.TEMPLATE: + if isinstance(vm.TEMPLATE['NIC'], list): + for nic in vm.TEMPLATE['NIC']: + networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']}) + else: + networks_info.append( + {'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'], + 'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']}) + import time + + current_time = time.localtime() + vm_start_time = time.localtime(vm.STIME) + + vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) + vm_uptime /= (60 * 60) + + permissions_str = parse_vm_permissions(client, vm) + + # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE + vm_lcm_state = None + if vm.STATE == VM_STATES.index('ACTIVE'): + vm_lcm_state = LCM_STATES[vm.LCM_STATE] + + vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID) + + info = { + 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']), + 'vm_id': vm.ID, + 'vm_name': vm.NAME, + 'state': VM_STATES[vm.STATE], + 'lcm_state': vm_lcm_state, + 'owner_name': vm.UNAME, + 'owner_id': vm.UID, + 'networks': networks_info, + 'disk_size': disk_size, + 'memory': vm.TEMPLATE['MEMORY'] + ' MB', + 'vcpu': vm.TEMPLATE['VCPU'], + 'cpu': vm.TEMPLATE['CPU'], + 'group_name': vm.GNAME, + 'group_id': vm.GID, + 'uptime_h': int(vm_uptime), + 'attributes': vm_attributes, + 'mode': permissions_str, + 'labels': vm_labels + } + + return info + + +def parse_vm_permissions(client, vm): + vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS + + owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A) + group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A) + other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A) + + permissions = str(owner_octal) + str(group_octal) + str(other_octal) + + return permissions + + +def set_vm_permissions(module, client, vms, permissions): + changed = False + + for vm in vms: + vm = client.vm.info(vm.ID) + old_permissions = parse_vm_permissions(client, vm) + changed = changed or old_permissions != permissions + + if not module.check_mode and old_permissions != permissions: + permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000 + mode_bits = [int(d) for d in permissions_str] + try: + client.vm.chmod( + vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) + except pyone.OneAuthorizationException: + module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + +def set_vm_ownership(module, client, vms, owner_id, group_id): + changed = False + + for vm in vms: + vm = client.vm.info(vm.ID) + if owner_id is None: + owner_id = vm.UID + if group_id is None: + group_id = vm.GID + + changed = changed or owner_id != vm.UID or group_id != vm.GID + + if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID): + try: + client.vm.chown(vm.ID, owner_id, group_id) + except pyone.OneAuthorizationException: + module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + +def get_size_in_MB(module, size_str): + + SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] + + s = size_str + init = size_str + num = "" + while s and s[0:1].isdigit() or s[0:1] == '.': + num += s[0] + s = s[1:] + num = float(num) + symbol = s.strip() + + if symbol not in SYMBOLS: + module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num)) + + prefix = {'B': 1} + + for i, s in enumerate(SYMBOLS[1:]): + prefix[s] = 1 << (i + 1) * 10 + + size_in_bytes = int(num * prefix[symbol]) + size_in_MB = size_in_bytes / (1024 * 1024) + + return size_in_MB + + +def create_disk_str(module, client, template_id, disk_size_list): + + if not disk_size_list: + return '' + + template = client.template.info(template_id) + if isinstance(template.TEMPLATE['DISK'], list): + # check if the number of disks is correct + if len(template.TEMPLATE['DISK']) != len(disk_size_list): + module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list))) + result = '' + index = 0 + for DISKS in template.TEMPLATE['DISK']: + disk = {} + diskresult = '' + # Get all info about existed disk e.g. IMAGE_ID,... + for key, value in DISKS.items(): + disk[key] = value + # copy disk attributes if it is not the size attribute + diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') + # Set the Disk Size + diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n' + result += diskresult + index += 1 + else: + if len(disk_size_list) > 1: + module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list))) + disk = {} + # Get all info about existed disk e.g. IMAGE_ID,... + for key, value in template.TEMPLATE['DISK'].items(): + disk[key] = value + # copy disk attributes if it is not the size attribute + result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') + # Set the Disk Size + result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n' + + return result + + +def create_attributes_str(attributes_dict, labels_list): + + attributes_str = '' + + if labels_list: + attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n' + if attributes_dict: + attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n' + + return attributes_str + + +def create_nics_str(network_attrs_list): + nics_str = '' + + for network in network_attrs_list: + # Packing key-value dict in string with format key="value", key="value" + network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items()) + nics_str = nics_str + 'NIC = [' + network_str + ']\n' + + return nics_str + + +def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent): + + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + disk_str = create_disk_str(module, client, template_id, disk_size) + vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str + try: + vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent) + except pyone.OneException as e: + module.fail_json(msg=str(e)) + vm = get_vm_by_id(client, vm_id) + + return get_vm_info(client, vm) + + +def generate_next_index(vm_filled_indexes_list, num_sign_cnt): + counter = 0 + cnt_str = str(counter).zfill(num_sign_cnt) + + while cnt_str in vm_filled_indexes_list: + counter = counter + 1 + cnt_str = str(counter).zfill(num_sign_cnt) + + return cnt_str + + +def get_vm_labels_and_attributes_dict(client, vm_id): + vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE + + attrs_dict = {} + labels_list = [] + + for key, value in vm_USER_TEMPLATE.items(): + if key != 'LABELS': + attrs_dict[key] = value + else: + if key is not None: + labels_list = value.split(',') + + return labels_list, attrs_dict + + +def get_all_vms_by_attributes(client, attributes_dict, labels_list): + pool = client.vmpool.info(-2, -1, -1, -1).VM + vm_list = [] + name = '' + if attributes_dict: + name = attributes_dict.pop('NAME', '') + + if name != '': + base_name = name[:len(name) - name.count('#')] + # Check does the name have indexed format + with_hash = name.endswith('#') + + for vm in pool: + if vm.NAME.startswith(base_name): + if with_hash and vm.NAME[len(base_name):].isdigit(): + # If the name has indexed format and after base_name it has only digits it'll be matched + vm_list.append(vm) + elif not with_hash and vm.NAME == name: + # If the name is not indexed it has to be same + vm_list.append(vm) + pool = vm_list + + import copy + + vm_list = copy.copy(pool) + + for vm in pool: + remove_list = [] + vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID) + + if attributes_dict and len(attributes_dict) > 0: + for key, val in attributes_dict.items(): + if key in vm_attributes_dict: + if val and vm_attributes_dict[key] != val: + remove_list.append(vm) + break + else: + remove_list.append(vm) + break + vm_list = list(set(vm_list).difference(set(remove_list))) + + remove_list = [] + if labels_list and len(labels_list) > 0: + for label in labels_list: + if label not in vm_labels_list: + remove_list.append(vm) + break + vm_list = list(set(vm_list).difference(set(remove_list))) + + return vm_list + + +def create_count_of_vms( + module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent): + new_vms_list = [] + + vm_name = '' + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + if module.check_mode: + return True, [], [] + + # Create list of used indexes + vm_filled_indexes_list = None + num_sign_cnt = vm_name.count('#') + if vm_name != '' and num_sign_cnt > 0: + vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None) + base_name = vm_name[:len(vm_name) - num_sign_cnt] + vm_name = base_name + # Make list which contains used indexes in format ['000', '001',...] + vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list) + + while count > 0: + new_vm_name = vm_name + # Create indexed name + if vm_filled_indexes_list is not None: + next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt) + vm_filled_indexes_list.append(next_index) + new_vm_name += next_index + # Update NAME value in the attributes in case there is index + attributes_dict['NAME'] = new_vm_name + new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent) + new_vm_id = new_vm_dict.get('vm_id') + new_vm = get_vm_by_id(client, new_vm_id) + new_vms_list.append(new_vm) + count -= 1 + + if vm_start_on_hold: + if wait: + for vm in new_vms_list: + wait_for_hold(module, client, vm, wait_timeout) + else: + if wait: + for vm in new_vms_list: + wait_for_running(module, client, vm, wait_timeout) + + return True, new_vms_list, [] + + +def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, + labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent): + + vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) + + vm_count_diff = exact_count - len(vm_list) + changed = vm_count_diff != 0 + + new_vms_list = [] + instances_list = [] + tagged_instances_list = vm_list + + if module.check_mode: + return changed, instances_list, tagged_instances_list + + if vm_count_diff > 0: + # Add more VMs + changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, + labels_list, disk_size, network_attrs_list, wait, wait_timeout, + vm_start_on_hold, vm_persistent) + + tagged_instances_list += instances_list + elif vm_count_diff < 0: + # Delete surplus VMs + old_vms_list = [] + + while vm_count_diff < 0: + old_vm = vm_list.pop(0) + old_vms_list.append(old_vm) + terminate_vm(module, client, old_vm, hard) + vm_count_diff += 1 + + if wait: + for vm in old_vms_list: + wait_for_done(module, client, vm, wait_timeout) + + instances_list = old_vms_list + # store only the remaining instances + old_vms_set = set(old_vms_list) + tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set] + + return changed, instances_list, tagged_instances_list + + +VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] +LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', + 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME', + 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF', + 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC', + 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] + + +def wait_for_state(module, client, vm, wait_timeout, state_predicate): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + vm = client.vm.info(vm.ID) + state = vm.STATE + lcm_state = vm.LCM_STATE + + if state_predicate(state, lcm_state): + return vm + elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), + VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]: + module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state]) + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired!") + + +def wait_for_running(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, + lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) + + +def wait_for_done(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) + + +def wait_for_hold(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')])) + + +def wait_for_poweroff(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) + + +def terminate_vm(module, client, vm, hard=False): + changed = False + + if not vm: + return changed + + changed = True + + if not module.check_mode: + if hard: + client.vm.action('terminate-hard', vm.ID) + else: + client.vm.action('terminate', vm.ID) + + return changed + + +def terminate_vms(module, client, vms, hard): + changed = False + + for vm in vms: + changed = terminate_vm(module, client, vm, hard) or changed + + return changed + + +def poweroff_vm(module, client, vm, hard): + vm = client.vm.info(vm.ID) + changed = False + + lcm_state = vm.LCM_STATE + state = vm.STATE + + if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + changed = True + + if changed and not module.check_mode: + if not hard: + client.vm.action('poweroff', vm.ID) + else: + client.vm.action('poweroff-hard', vm.ID) + + return changed + + +def poweroff_vms(module, client, vms, hard): + changed = False + + for vm in vms: + changed = poweroff_vm(module, client, vm, hard) or changed + + return changed + + +def reboot_vms(module, client, vms, wait_timeout, hard): + + if not module.check_mode: + # Firstly, power-off all instances + for vm in vms: + vm = client.vm.info(vm.ID) + lcm_state = vm.LCM_STATE + state = vm.STATE + if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + poweroff_vm(module, client, vm, hard) + + # Wait for all to be power-off + for vm in vms: + wait_for_poweroff(module, client, vm, wait_timeout) + + for vm in vms: + resume_vm(module, client, vm) + + return True + + +def resume_vm(module, client, vm): + vm = client.vm.info(vm.ID) + changed = False + + lcm_state = vm.LCM_STATE + if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): + module.fail_json(msg="Cannot perform action 'resume' because this action is not available " + + "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") + if lcm_state not in [LCM_STATES.index('RUNNING')]: + changed = True + + if changed and not module.check_mode: + client.vm.action('resume', vm.ID) + + return changed + + +def resume_vms(module, client, vms): + changed = False + + for vm in vms: + changed = resume_vm(module, client, vm) or changed + + return changed + + +def check_name_attribute(module, attributes): + if attributes.get("NAME"): + import re + if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: + module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") + + "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") + + +TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS", + "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST", + "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"] + + +def check_attributes(module, attributes): + for key in attributes.keys(): + if key in TEMPLATE_RESTRICTED_ATTRIBUTES: + module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.') + # Check the format of the name attribute + check_name_attribute(module, attributes) + + +def disk_save_as(module, client, vm, disk_saveas, wait_timeout): + if not disk_saveas.get('name'): + module.fail_json(msg="Key 'name' is required for 'disk_saveas' option") + + image_name = disk_saveas.get('name') + disk_id = disk_saveas.get('disk_id', 0) + + if not module.check_mode: + if vm.STATE != VM_STATES.index('POWEROFF'): + module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") + try: + client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1) + except pyone.OneException as e: + module.fail_json(msg=str(e)) + wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not username: + if not password: + authfile = os.environ.get('ONE_AUTH') + if authfile is None: + authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") + try: + with open(authfile, "r") as fp: + authstring = fp.read().rstrip() + username = authstring.split(":")[0] + password = authstring.split(":")[1] + except (OSError, IOError): + module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile)) + except Exception: + module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile)) + if not url: + module.fail_json(msg="Opennebula API url (api_url) is not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "instance_ids": {"required": False, "aliases": ['ids'], "type": "list"}, + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "vm_start_on_hold": {"default": False, "type": "bool"}, + "state": { + "default": "present", + "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], + "type": "str" + }, + "mode": {"required": False, "type": "str"}, + "owner_id": {"required": False, "type": "int"}, + "group_id": {"required": False, "type": "int"}, + "wait": {"default": True, "type": "bool"}, + "wait_timeout": {"default": 300, "type": "int"}, + "hard": {"default": False, "type": "bool"}, + "memory": {"required": False, "type": "str"}, + "cpu": {"required": False, "type": "float"}, + "vcpu": {"required": False, "type": "int"}, + "disk_size": {"required": False, "type": "list"}, + "datastore_name": {"required": False, "type": "str"}, + "datastore_id": {"required": False, "type": "int"}, + "networks": {"default": [], "type": "list"}, + "count": {"default": 1, "type": "int"}, + "exact_count": {"required": False, "type": "int"}, + "attributes": {"default": {}, "type": "dict"}, + "count_attributes": {"required": False, "type": "dict"}, + "labels": {"default": [], "type": "list"}, + "count_labels": {"required": False, "type": "list"}, + "disk_saveas": {"type": "dict"}, + "persistent": {"default": False, "type": "bool"} + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[ + ['template_id', 'template_name', 'instance_ids'], + ['template_id', 'template_name', 'disk_saveas'], + ['instance_ids', 'count_attributes', 'count'], + ['instance_ids', 'count_labels', 'count'], + ['instance_ids', 'exact_count'], + ['instance_ids', 'attributes'], + ['instance_ids', 'labels'], + ['disk_saveas', 'attributes'], + ['disk_saveas', 'labels'], + ['exact_count', 'count'], + ['count', 'hard'], + ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], + ['instance_ids', 'memory'], ['instance_ids', 'disk_size'], + ['instance_ids', 'networks'], + ['persistent', 'disk_size'] + ], + supports_check_mode=True) + + if not HAS_PYONE: + module.fail_json(msg='This module requires pyone to work!') + + auth = get_connection_info(module) + params = module.params + instance_ids = params.get('instance_ids') + requested_template_name = params.get('template_name') + requested_template_id = params.get('template_id') + put_vm_on_hold = params.get('vm_start_on_hold') + state = params.get('state') + permissions = params.get('mode') + owner_id = params.get('owner_id') + group_id = params.get('group_id') + wait = params.get('wait') + wait_timeout = params.get('wait_timeout') + hard = params.get('hard') + memory = params.get('memory') + cpu = params.get('cpu') + vcpu = params.get('vcpu') + disk_size = params.get('disk_size') + requested_datastore_id = params.get('datastore_id') + requested_datastore_name = params.get('datastore_name') + networks = params.get('networks') + count = params.get('count') + exact_count = params.get('exact_count') + attributes = params.get('attributes') + count_attributes = params.get('count_attributes') + labels = params.get('labels') + count_labels = params.get('count_labels') + disk_saveas = params.get('disk_saveas') + persistent = params.get('persistent') + + if not (auth.username and auth.password): + module.warn("Credentials missing") + else: + one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + + if attributes: + attributes = dict((key.upper(), value) for key, value in attributes.items()) + check_attributes(module, attributes) + + if count_attributes: + count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) + if not attributes: + import copy + module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') + attributes = copy.copy(count_attributes) + check_attributes(module, count_attributes) + + if count_labels and not labels: + module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') + labels = count_labels + + # Fetch template + template_id = None + if requested_template_id is not None or requested_template_name: + template_id = get_template_id(module, one_client, requested_template_id, requested_template_name) + if template_id is None: + if requested_template_id is not None: + module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id)) + elif requested_template_name: + module.fail_json(msg="There is no template with name: " + requested_template_name) + + # Fetch datastore + datastore_id = None + if requested_datastore_id or requested_datastore_name: + datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name) + if datastore_id is None: + if requested_datastore_id: + module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id)) + elif requested_datastore_name: + module.fail_json(msg="There is no datastore with name: " + requested_datastore_name) + else: + attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id) + + if exact_count and template_id is None: + module.fail_json(msg='Option `exact_count` needs template_id or template_name') + + if exact_count is not None and not (count_attributes or count_labels): + module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.') + if (count_attributes or count_labels) and exact_count is None: + module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.') + if template_id is not None and state != 'present': + module.fail_json(msg="Only state 'present' is valid for the template") + + if memory: + attributes['MEMORY'] = str(int(get_size_in_MB(module, memory))) + if cpu: + attributes['CPU'] = str(cpu) + if vcpu: + attributes['VCPU'] = str(vcpu) + + if exact_count is not None and state != 'present': + module.fail_json(msg='The `exact_count` option is valid only for the `present` state') + if exact_count is not None and exact_count < 0: + module.fail_json(msg='`exact_count` cannot be less than 0') + if count <= 0: + module.fail_json(msg='`count` has to be greater than 0') + + if permissions is not None: + import re + if re.match("^[0-7]{3}$", permissions) is None: + module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600") + + if exact_count is not None: + # Deploy an exact count of VMs + changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes, + count_attributes, labels, count_labels, disk_size, + networks, hard, wait, wait_timeout, put_vm_on_hold, persistent) + vms = tagged_instances_list + elif template_id is not None and state == 'present': + # Deploy count VMs + changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count, + attributes, labels, disk_size, networks, wait, wait_timeout, + put_vm_on_hold, persistent) + # instances_list - new instances + # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` + vms = instances_list + else: + # Fetch data of instances, or change their state + if not (instance_ids or attributes or labels): + module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!") + + if memory or cpu or vcpu or disk_size or networks: + module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!") + + if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: + module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") + + vms = [] + tagged = False + changed = False + + if instance_ids: + vms = get_vms_by_ids(module, one_client, state, instance_ids) + else: + tagged = True + vms = get_all_vms_by_attributes(one_client, attributes, labels) + + if len(vms) == 0 and state != 'absent' and state != 'present': + module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') + + if len(vms) == 0 and state == 'present' and not tagged: + module.fail_json(msg='There are no instances with specified `instance_ids`.') + + if tagged and state == 'absent': + module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') + + if state == 'absent': + changed = terminate_vms(module, one_client, vms, hard) + elif state == 'rebooted': + changed = reboot_vms(module, one_client, vms, wait_timeout, hard) + elif state == 'poweredoff': + changed = poweroff_vms(module, one_client, vms, hard) + elif state == 'running': + changed = resume_vms(module, one_client, vms) + + instances_list = vms + tagged_instances_list = [] + + if permissions is not None: + changed = set_vm_permissions(module, one_client, vms, permissions) or changed + + if owner_id is not None or group_id is not None: + changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed + + if wait and not module.check_mode and state != 'present': + wait_for = { + 'absent': wait_for_done, + 'rebooted': wait_for_running, + 'poweredoff': wait_for_poweroff, + 'running': wait_for_running + } + for vm in vms: + if vm is not None: + wait_for[state](module, one_client, vm, wait_timeout) + + if disk_saveas is not None: + if len(vms) == 0: + module.fail_json(msg="There is no VM whose disk will be saved.") + disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout) + changed = True + + # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option + instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None) + instances_ids = list(vm.ID for vm in instances_list if vm is not None) + # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) + tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None) + + result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_flavor_facts.py b/plugins/modules/cloud/openstack/os_flavor_facts.py new file mode 120000 index 0000000000..5be269046d --- /dev/null +++ b/plugins/modules/cloud/openstack/os_flavor_facts.py @@ -0,0 +1 @@ +os_flavor_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_flavor_info.py b/plugins/modules/cloud/openstack/os_flavor_info.py new file mode 100644 index 0000000000..a990a817df --- /dev/null +++ b/plugins/modules/cloud/openstack/os_flavor_info.py @@ -0,0 +1,238 @@ +#!/usr/bin/python + +# Copyright (c) 2015 IBM +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: os_flavor_info +short_description: Retrieve information about one or more flavors +author: "David Shrewsbury (@Shrews)" +description: + - Retrieve information about available OpenStack instance flavors. By default, + information about ALL flavors are retrieved. Filters can be applied to get + information for only matching flavors. For example, you can filter on the + amount of RAM available to the flavor, or the number of virtual CPUs + available to the flavor, or both. When specifying multiple filters, + *ALL* filters must match on a flavor before that flavor is returned as + a fact. + - This module was called C(os_flavor_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_flavor_info) module no longer returns C(ansible_facts)! +notes: + - The result contains a list of unsorted flavors. +requirements: + - "python >= 2.7" + - "openstacksdk" +options: + name: + description: + - A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral). + ram: + description: + - "A string used for filtering flavors based on the amount of RAM + (in MB) desired. This string accepts the following special values: + 'MIN' (return flavors with the minimum amount of RAM), and 'MAX' + (return flavors with the maximum amount of RAM)." + + - "A specific amount of RAM may also be specified. Any flavors with this + exact amount of RAM will be returned." + + - "A range of acceptable RAM may be given using a special syntax. Simply + prefix the amount of RAM with one of these acceptable range values: + '<', '>', '<=', '>='. These values represent less than, greater than, + less than or equal to, and greater than or equal to, respectively." + type: bool + default: 'no' + vcpus: + description: + - A string used for filtering flavors based on the number of virtual + CPUs desired. Format is the same as the I(ram) parameter. + type: bool + default: 'no' + limit: + description: + - Limits the number of flavors returned. All matching flavors are + returned by default. + ephemeral: + description: + - A string used for filtering flavors based on the amount of ephemeral + storage. Format is the same as the I(ram) parameter + type: bool + default: 'no' + availability_zone: + description: + - Ignored. Present for backwards compatibility +extends_documentation_fragment: +- openstack.cloud.openstack + +''' + +EXAMPLES = ''' +# Gather information about all available flavors +- os_flavor_info: + cloud: mycloud + register: result + +- debug: + msg: "{{ result.openstack_flavors }}" + +# Gather information for the flavor named "xlarge-flavor" +- os_flavor_info: + cloud: mycloud + name: "xlarge-flavor" + +# Get all flavors that have exactly 512 MB of RAM. +- os_flavor_info: + cloud: mycloud + ram: "512" + +# Get all flavors that have 1024 MB or more of RAM. +- os_flavor_info: + cloud: mycloud + ram: ">=1024" + +# Get a single flavor that has the minimum amount of RAM. Using the 'limit' +# option will guarantee only a single flavor is returned. +- os_flavor_info: + cloud: mycloud + ram: "MIN" + limit: 1 + +# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs. +- os_flavor_info: + cloud: mycloud + ram: ">=1024" + vcpus: "2" + +# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and +# less than 30gb of ephemeral storage. +- os_flavor_info: + cloud: mycloud + ram: ">=1024" + vcpus: "2" + ephemeral: "<30" +''' + + +RETURN = ''' +openstack_flavors: + description: Dictionary describing the flavors. + returned: On success. + type: complex + contains: + id: + description: Flavor ID. + returned: success + type: str + sample: "515256b8-7027-4d73-aa54-4e30a4a4a339" + name: + description: Flavor name. + returned: success + type: str + sample: "tiny" + disk: + description: Size of local disk, in GB. + returned: success + type: int + sample: 10 + ephemeral: + description: Ephemeral space size, in GB. + returned: success + type: int + sample: 10 + ram: + description: Amount of memory, in MB. + returned: success + type: int + sample: 1024 + swap: + description: Swap space size, in MB. + returned: success + type: int + sample: 100 + vcpus: + description: Number of virtual CPUs. + returned: success + type: int + sample: 2 + is_public: + description: Make flavor accessible to the public. + returned: success + type: bool + sample: true +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + ram=dict(required=False, default=None), + vcpus=dict(required=False, default=None), + limit=dict(required=False, default=None, type='int'), + ephemeral=dict(required=False, default=None), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['name', 'ram'], + ['name', 'vcpus'], + ['name', 'ephemeral'] + ] + ) + module = AnsibleModule(argument_spec, **module_kwargs) + is_old_facts = module._name == 'os_flavor_facts' + if is_old_facts: + module.deprecate("The 'os_flavor_facts' module has been renamed to 'os_flavor_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + name = module.params['name'] + vcpus = module.params['vcpus'] + ram = module.params['ram'] + ephemeral = module.params['ephemeral'] + limit = module.params['limit'] + + filters = {} + if vcpus: + filters['vcpus'] = vcpus + if ram: + filters['ram'] = ram + if ephemeral: + filters['ephemeral'] = ephemeral + + sdk, cloud = openstack_cloud_from_module(module) + try: + if name: + flavors = cloud.search_flavors(filters={'name': name}) + + else: + flavors = cloud.list_flavors() + if filters: + flavors = cloud.range_search(flavors, filters) + + if limit is not None: + flavors = flavors[:limit] + + if is_old_facts: + module.exit_json(changed=False, + ansible_facts=dict(openstack_flavors=flavors)) + else: + module.exit_json(changed=False, + openstack_flavors=flavors) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_image_facts.py b/plugins/modules/cloud/openstack/os_image_facts.py new file mode 120000 index 0000000000..027dbfc4e2 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_image_facts.py @@ -0,0 +1 @@ +os_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_image_info.py b/plugins/modules/cloud/openstack/os_image_info.py new file mode 100644 index 0000000000..75c4272832 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_image_info.py @@ -0,0 +1,196 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: os_image_info +short_description: Retrieve information about an image within OpenStack. +author: "Davide Agnello (@dagnello)" +description: + - Retrieve information about a image image from OpenStack. + - This module was called C(os_image_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_image_info) module no longer returns C(ansible_facts)! +requirements: + - "python >= 2.7" + - "openstacksdk" +options: + image: + description: + - Name or ID of the image + required: false + properties: + description: + - Dict of properties of the images used for query + type: dict + required: false + availability_zone: + description: + - Ignored. Present for backwards compatibility + required: false +extends_documentation_fragment: +- openstack.cloud.openstack + +''' + +EXAMPLES = ''' +- name: Gather information about a previously created image named image1 + os_image_info: + auth: + auth_url: https://identity.example.com + username: user + password: password + project_name: someproject + image: image1 + register: result + +- name: Show openstack information + debug: + msg: "{{ result.openstack_image }}" + +# Show all available Openstack images +- name: Retrieve all available Openstack images + os_image_info: + register: result + +- name: Show images + debug: + msg: "{{ result.openstack_image }}" + +# Show images matching requested properties +- name: Retrieve images having properties with desired values + os_image_facts: + properties: + some_property: some_value + OtherProp: OtherVal + +- name: Show images + debug: + msg: "{{ result.openstack_image }}" +''' + +RETURN = ''' +openstack_image: + description: has all the openstack information about the image + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: str + name: + description: Name given to the image. + returned: success + type: str + status: + description: Image status. + returned: success + type: str + created_at: + description: Image created at timestamp. + returned: success + type: str + deleted: + description: Image deleted flag. + returned: success + type: bool + container_format: + description: Container format of the image. + returned: success + type: str + min_ram: + description: Min amount of RAM required for this image. + returned: success + type: int + disk_format: + description: Disk format of the image. + returned: success + type: str + updated_at: + description: Image updated at timestamp. + returned: success + type: str + properties: + description: Additional properties associated with the image. + returned: success + type: dict + min_disk: + description: Min amount of disk space required for this image. + returned: success + type: int + protected: + description: Image protected flag. + returned: success + type: bool + checksum: + description: Checksum for the image. + returned: success + type: str + owner: + description: Owner for the image. + returned: success + type: str + is_public: + description: Is public flag of the image. + returned: success + type: bool + deleted_at: + description: Image deleted at timestamp. + returned: success + type: str + size: + description: Size of the image. + returned: success + type: int +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module + + +def main(): + + argument_spec = openstack_full_argument_spec( + image=dict(required=False), + properties=dict(default=None, type='dict'), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + is_old_facts = module._name == 'os_image_facts' + if is_old_facts: + module.deprecate("The 'os_image_facts' module has been renamed to 'os_image_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + sdk, cloud = openstack_cloud_from_module(module) + try: + if module.params['image']: + image = cloud.get_image(module.params['image']) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_image=image)) + else: + module.exit_json(changed=False, openstack_image=image) + else: + images = cloud.search_images(filters=module.params['properties']) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_image=images)) + else: + module.exit_json(changed=False, openstack_image=images) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_keystone_domain_facts.py b/plugins/modules/cloud/openstack/os_keystone_domain_facts.py new file mode 120000 index 0000000000..44663623b8 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_keystone_domain_facts.py @@ -0,0 +1 @@ +os_keystone_domain_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_keystone_domain_info.py b/plugins/modules/cloud/openstack/os_keystone_domain_info.py new file mode 100644 index 0000000000..f05e5b875d --- /dev/null +++ b/plugins/modules/cloud/openstack/os_keystone_domain_info.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: os_keystone_domain_info +short_description: Retrieve information about one or more OpenStack domains +extends_documentation_fragment: +- openstack.cloud.openstack + +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Retrieve information about a one or more OpenStack domains + - This module was called C(os_keystone_domain_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_keystone_domain_info) module no longer returns C(ansible_facts)! +requirements: + - "python >= 2.7" + - "sdk" +options: + name: + description: + - Name or ID of the domain + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + availability_zone: + description: + - Ignored. Present for backwards compatibility +''' + +EXAMPLES = ''' +# Gather information about previously created domain +- os_keystone_domain_info: + cloud: awesomecloud + register: result +- debug: + msg: "{{ result.openstack_domains }}" + +# Gather information about a previously created domain by name +- os_keystone_domain_info: + cloud: awesomecloud + name: demodomain + register: result +- debug: + msg: "{{ result.openstack_domains }}" + +# Gather information about a previously created domain with filter +- os_keystone_domain_info: + cloud: awesomecloud + name: demodomain + filters: + enabled: false + register: result +- debug: + msg: "{{ result.openstack_domains }}" +''' + + +RETURN = ''' +openstack_domains: + description: has all the OpenStack information about domains + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: str + name: + description: Name given to the domain. + returned: success + type: str + description: + description: Description of the domain. + returned: success + type: str + enabled: + description: Flag to indicate if the domain is enabled. + returned: success + type: bool +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module + + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + filters=dict(required=False, type='dict', default=None), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['name', 'filters'], + ] + ) + module = AnsibleModule(argument_spec, **module_kwargs) + is_old_facts = module._name == 'os_keystone_domain_facts' + if is_old_facts: + module.deprecate("The 'os_keystone_domain_facts' module has been renamed to 'os_keystone_domain_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + sdk, opcloud = openstack_cloud_from_module(module) + try: + name = module.params['name'] + filters = module.params['filters'] + + if name: + # Let's suppose user is passing domain ID + try: + domains = opcloud.get_domain(name) + except Exception: + domains = opcloud.search_domains(filters={'name': name}) + + else: + domains = opcloud.search_domains(filters) + + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_domains=domains)) + else: + module.exit_json(changed=False, openstack_domains=domains) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_networks_facts.py b/plugins/modules/cloud/openstack/os_networks_facts.py new file mode 120000 index 0000000000..a0ee4ca4ee --- /dev/null +++ b/plugins/modules/cloud/openstack/os_networks_facts.py @@ -0,0 +1 @@ +os_networks_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_networks_info.py b/plugins/modules/cloud/openstack/os_networks_info.py new file mode 100644 index 0000000000..6fbd8877eb --- /dev/null +++ b/plugins/modules/cloud/openstack/os_networks_info.py @@ -0,0 +1,158 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: os_networks_info +short_description: Retrieve information about one or more OpenStack networks. +author: "Davide Agnello (@dagnello)" +description: + - Retrieve information about one or more networks from OpenStack. + - This module was called C(os_networks_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_networks_info) module no longer returns C(ansible_facts)! +requirements: + - "python >= 2.7" + - "sdk" +options: + name: + description: + - Name or ID of the Network + required: false + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + required: false + availability_zone: + description: + - Ignored. Present for backwards compatibility + required: false +extends_documentation_fragment: +- openstack.cloud.openstack + +''' + +EXAMPLES = ''' +- name: Gather information about previously created networks + os_networks_info: + auth: + auth_url: https://identity.example.com + username: user + password: password + project_name: someproject + register: result + +- name: Show openstack networks + debug: + msg: "{{ result.openstack_networks }}" + +- name: Gather information about a previously created network by name + os_networks_info: + auth: + auth_url: https://identity.example.com + username: user + password: password + project_name: someproject + name: network1 + register: result + +- name: Show openstack networks + debug: + msg: "{{ result.openstack_networks }}" + +- name: Gather information about a previously created network with filter + # Note: name and filters parameters are Not mutually exclusive + os_networks_info: + auth: + auth_url: https://identity.example.com + username: user + password: password + project_name: someproject + filters: + tenant_id: 55e2ce24b2a245b09f181bf025724cbe + subnets: + - 057d4bdf-6d4d-4728-bb0f-5ac45a6f7400 + - 443d4dc0-91d4-4998-b21c-357d10433483 + register: result + +- name: Show openstack networks + debug: + msg: "{{ result.openstack_networks }}" +''' + +RETURN = ''' +openstack_networks: + description: has all the openstack information about the networks + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: str + name: + description: Name given to the network. + returned: success + type: str + status: + description: Network status. + returned: success + type: str + subnets: + description: Subnet(s) included in this network. + returned: success + type: list + elements: str + tenant_id: + description: Tenant id associated with this network. + returned: success + type: str + shared: + description: Network shared flag. + returned: success + type: bool +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_cloud_from_module + + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + filters=dict(required=False, type='dict', default=None) + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'os_networks_facts' + if is_old_facts: + module.deprecate("The 'os_networks_facts' module has been renamed to 'os_networks_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + sdk, cloud = openstack_cloud_from_module(module) + try: + networks = cloud.search_networks(module.params['name'], + module.params['filters']) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_networks=networks)) + else: + module.exit_json(changed=False, openstack_networks=networks) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_port_facts.py b/plugins/modules/cloud/openstack/os_port_facts.py new file mode 120000 index 0000000000..2960bca055 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_port_facts.py @@ -0,0 +1 @@ +os_port_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_port_info.py b/plugins/modules/cloud/openstack/os_port_info.py new file mode 100644 index 0000000000..6adffe44df --- /dev/null +++ b/plugins/modules/cloud/openstack/os_port_info.py @@ -0,0 +1,225 @@ +#!/usr/bin/python + +# Copyright (c) 2016 IBM +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: os_port_info +short_description: Retrieve information about ports within OpenStack. +author: "David Shrewsbury (@Shrews)" +description: + - Retrieve information about ports from OpenStack. + - This module was called C(os_port_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_port_info) module no longer returns C(ansible_facts)! +requirements: + - "python >= 2.7" + - "openstacksdk" +options: + port: + description: + - Unique name or ID of a port. + filters: + description: + - A dictionary of meta data to use for further filtering. Elements + of this dictionary will be matched against the returned port + dictionaries. Matching is currently limited to strings within + the port dictionary, or strings within nested dictionaries. + availability_zone: + description: + - Ignored. Present for backwards compatibility +extends_documentation_fragment: +- openstack.cloud.openstack + +''' + +EXAMPLES = ''' +# Gather information about all ports +- os_port_info: + cloud: mycloud + register: result + +- debug: + msg: "{{ result.openstack_ports }}" + +# Gather information about a single port +- os_port_info: + cloud: mycloud + port: 6140317d-e676-31e1-8a4a-b1913814a471 + +# Gather information about all ports that have device_id set to a specific value +# and with a status of ACTIVE. +- os_port_info: + cloud: mycloud + filters: + device_id: 1038a010-3a37-4a9d-82ea-652f1da36597 + status: ACTIVE +''' + +RETURN = ''' +openstack_ports: + description: List of port dictionaries. A subset of the dictionary keys + listed below may be returned, depending on your cloud provider. + returned: always, but can be null + type: complex + contains: + admin_state_up: + description: The administrative state of the router, which is + up (true) or down (false). + returned: success + type: bool + sample: true + allowed_address_pairs: + description: A set of zero or more allowed address pairs. An + address pair consists of an IP address and MAC address. + returned: success + type: list + sample: [] + "binding:host_id": + description: The UUID of the host where the port is allocated. + returned: success + type: str + sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759" + "binding:profile": + description: A dictionary the enables the application running on + the host to pass and receive VIF port-specific + information to the plug-in. + returned: success + type: dict + sample: {} + "binding:vif_details": + description: A dictionary that enables the application to pass + information about functions that the Networking API + provides. + returned: success + type: dict + sample: {"port_filter": true} + "binding:vif_type": + description: The VIF type for the port. + returned: success + type: dict + sample: "ovs" + "binding:vnic_type": + description: The virtual network interface card (vNIC) type that is + bound to the neutron port. + returned: success + type: str + sample: "normal" + device_id: + description: The UUID of the device that uses this port. + returned: success + type: str + sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759" + device_owner: + description: The UUID of the entity that uses this port. + returned: success + type: str + sample: "network:router_interface" + dns_assignment: + description: DNS assignment information. + returned: success + type: list + dns_name: + description: DNS name + returned: success + type: str + sample: "" + extra_dhcp_opts: + description: A set of zero or more extra DHCP option pairs. + An option pair consists of an option value and name. + returned: success + type: list + sample: [] + fixed_ips: + description: The IP addresses for the port. Includes the IP address + and UUID of the subnet. + returned: success + type: list + id: + description: The UUID of the port. + returned: success + type: str + sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de" + ip_address: + description: The IP address. + returned: success + type: str + sample: "127.0.0.1" + mac_address: + description: The MAC address. + returned: success + type: str + sample: "00:00:5E:00:53:42" + name: + description: The port name. + returned: success + type: str + sample: "port_name" + network_id: + description: The UUID of the attached network. + returned: success + type: str + sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d" + port_security_enabled: + description: The port security status. The status is enabled (true) or disabled (false). + returned: success + type: bool + sample: false + security_groups: + description: The UUIDs of any attached security groups. + returned: success + type: list + status: + description: The port status. + returned: success + type: str + sample: "ACTIVE" + tenant_id: + description: The UUID of the tenant who owns the network. + returned: success + type: str + sample: "51fce036d7984ba6af4f6c849f65ef00" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module + + +def main(): + argument_spec = openstack_full_argument_spec( + port=dict(required=False), + filters=dict(type='dict', required=False), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + is_old_facts = module._name == 'os_port_facts' + if is_old_facts: + module.deprecate("The 'os_port_facts' module has been renamed to 'os_port_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + port = module.params.get('port') + filters = module.params.get('filters') + + sdk, cloud = openstack_cloud_from_module(module) + try: + ports = cloud.search_ports(port, filters) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_ports=ports)) + else: + module.exit_json(changed=False, openstack_ports=ports) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_project_facts.py b/plugins/modules/cloud/openstack/os_project_facts.py new file mode 120000 index 0000000000..dd28ce75a2 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_project_facts.py @@ -0,0 +1 @@ +os_project_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_project_info.py b/plugins/modules/cloud/openstack/os_project_info.py new file mode 100644 index 0000000000..0f972bda4e --- /dev/null +++ b/plugins/modules/cloud/openstack/os_project_info.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: os_project_info +short_description: Retrieve information about one or more OpenStack projects +extends_documentation_fragment: +- openstack.cloud.openstack + +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Retrieve information about a one or more OpenStack projects + - This module was called C(os_project_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_project_info) module no longer returns C(ansible_facts)! +requirements: + - "python >= 2.7" + - "openstacksdk" +options: + name: + description: + - Name or ID of the project + required: true + domain: + description: + - Name or ID of the domain containing the project if the cloud supports domains + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + availability_zone: + description: + - Ignored. Present for backwards compatibility +''' + +EXAMPLES = ''' +# Gather information about previously created projects +- os_project_info: + cloud: awesomecloud + register: result +- debug: + msg: "{{ result.openstack_projects }}" + +# Gather information about a previously created project by name +- os_project_info: + cloud: awesomecloud + name: demoproject + register: result +- debug: + msg: "{{ result.openstack_projects }}" + +# Gather information about a previously created project in a specific domain +- os_project_info: + cloud: awesomecloud + name: demoproject + domain: admindomain + register: result +- debug: + msg: "{{ result.openstack_projects }}" + +# Gather information about a previously created project in a specific domain with filter +- os_project_info: + cloud: awesomecloud + name: demoproject + domain: admindomain + filters: + enabled: False + register: result +- debug: + msg: "{{ result.openstack_projects }}" +''' + + +RETURN = ''' +openstack_projects: + description: has all the OpenStack information about projects + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: str + name: + description: Name given to the project. + returned: success + type: str + description: + description: Description of the project + returned: success + type: str + enabled: + description: Flag to indicate if the project is enabled + returned: success + type: bool + domain_id: + description: Domain ID containing the project (keystone v3 clouds only) + returned: success + type: bool +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_cloud_from_module + + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + domain=dict(required=False, default=None), + filters=dict(required=False, type='dict', default=None), + ) + + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'os_project_facts' + if is_old_facts: + module.deprecate("The 'os_project_facts' module has been renamed to 'os_project_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + sdk, opcloud = openstack_cloud_from_module(module) + try: + name = module.params['name'] + domain = module.params['domain'] + filters = module.params['filters'] + + if domain: + try: + # We assume admin is passing domain id + dom = opcloud.get_domain(domain)['id'] + domain = dom + except Exception: + # If we fail, maybe admin is passing a domain name. + # Note that domains have unique names, just like id. + dom = opcloud.search_domains(filters={'name': domain}) + if dom: + domain = dom[0]['id'] + else: + module.fail_json(msg='Domain name or ID does not exist') + + if not filters: + filters = {} + + filters['domain_id'] = domain + + projects = opcloud.search_projects(name, filters) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_projects=projects)) + else: + module.exit_json(changed=False, openstack_projects=projects) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_server_facts.py b/plugins/modules/cloud/openstack/os_server_facts.py new file mode 120000 index 0000000000..5a4dd1d8d6 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_server_facts.py @@ -0,0 +1 @@ +os_server_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_server_info.py b/plugins/modules/cloud/openstack/os_server_info.py new file mode 100644 index 0000000000..aa33275404 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_server_info.py @@ -0,0 +1,114 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: os_server_info +short_description: Retrieve information about one or more compute instances +author: Monty (@emonty) +description: + - Retrieve information about server instances from OpenStack. + - This module was called C(os_server_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_server_info) module no longer returns C(ansible_facts)! +notes: + - The result contains a list of servers. +requirements: + - "python >= 2.7" + - "openstacksdk" +options: + server: + description: + - restrict results to servers with names or UUID matching + this glob expression (e.g., ). + detailed: + description: + - when true, return additional detail about servers at the expense + of additional API calls. + type: bool + default: 'no' + filters: + description: + - restrict results to servers matching a dictionary of + filters + availability_zone: + description: + - Ignored. Present for backwards compatibility + all_projects: + description: + - Whether to list servers from all projects or just the current auth + scoped project. + type: bool + default: 'no' +extends_documentation_fragment: +- openstack.cloud.openstack + +''' + +EXAMPLES = ''' +# Gather information about all servers named that are in an active state: +- os_server_info: + cloud: rax-dfw + server: web* + filters: + vm_state: active + register: result +- debug: + msg: "{{ result.openstack_servers }}" +''' + +import fnmatch + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module + + +def main(): + + argument_spec = openstack_full_argument_spec( + server=dict(required=False), + detailed=dict(required=False, type='bool', default=False), + filters=dict(required=False, type='dict', default=None), + all_projects=dict(required=False, type='bool', default=False), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + is_old_facts = module._name == 'os_server_facts' + if is_old_facts: + module.deprecate("The 'os_server_facts' module has been renamed to 'os_server_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + sdk, cloud = openstack_cloud_from_module(module) + try: + openstack_servers = cloud.search_servers( + detailed=module.params['detailed'], filters=module.params['filters'], + all_projects=module.params['all_projects']) + + if module.params['server']: + # filter servers by name + pattern = module.params['server'] + # TODO(mordred) This is handled by sdk now + openstack_servers = [server for server in openstack_servers + if fnmatch.fnmatch(server['name'], pattern) or fnmatch.fnmatch(server['id'], pattern)] + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_servers=openstack_servers)) + else: + module.exit_json(changed=False, openstack_servers=openstack_servers) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_subnets_facts.py b/plugins/modules/cloud/openstack/os_subnets_facts.py new file mode 120000 index 0000000000..0e6c5f96c0 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_subnets_facts.py @@ -0,0 +1 @@ +os_subnets_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_subnets_info.py b/plugins/modules/cloud/openstack/os_subnets_info.py new file mode 100644 index 0000000000..fc90ec14a0 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_subnets_info.py @@ -0,0 +1,174 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: os_subnets_info +short_description: Retrieve information about one or more OpenStack subnets. +author: "Davide Agnello (@dagnello)" +description: + - Retrieve information about one or more subnets from OpenStack. + - This module was called C(os_subnets_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_subnets_info) module no longer returns C(ansible_facts)! +requirements: + - "python >= 2.7" + - "openstacksdk" +options: + name: + description: + - Name or ID of the subnet. + - Alias 'subnet' added in version 2.8. + required: false + aliases: ['subnet'] + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + required: false + availability_zone: + description: + - Ignored. Present for backwards compatibility + required: false +extends_documentation_fragment: +- openstack.cloud.openstack + +''' + +EXAMPLES = ''' +- name: Gather information about previously created subnets + os_subnets_info: + auth: + auth_url: https://identity.example.com + username: user + password: password + project_name: someproject + register: result + +- name: Show openstack subnets + debug: + msg: "{{ result.openstack_subnets }}" + +- name: Gather information about a previously created subnet by name + os_subnets_info: + auth: + auth_url: https://identity.example.com + username: user + password: password + project_name: someproject + name: subnet1 + register: result + +- name: Show openstack subnets + debug: + msg: "{{ result.openstack_subnets }}" + +- name: Gather information about a previously created subnet with filter + # Note: name and filters parameters are not mutually exclusive + os_subnets_info: + auth: + auth_url: https://identity.example.com + username: user + password: password + project_name: someproject + filters: + tenant_id: 55e2ce24b2a245b09f181bf025724cbe + register: result + +- name: Show openstack subnets + debug: + msg: "{{ result.openstack_subnets }}" +''' + +RETURN = ''' +openstack_subnets: + description: has all the openstack information about the subnets + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: str + name: + description: Name given to the subnet. + returned: success + type: str + network_id: + description: Network ID this subnet belongs in. + returned: success + type: str + cidr: + description: Subnet's CIDR. + returned: success + type: str + gateway_ip: + description: Subnet's gateway ip. + returned: success + type: str + enable_dhcp: + description: DHCP enable flag for this subnet. + returned: success + type: bool + ip_version: + description: IP version for this subnet. + returned: success + type: int + tenant_id: + description: Tenant id associated with this subnet. + returned: success + type: str + dns_nameservers: + description: DNS name servers for this subnet. + returned: success + type: list + elements: str + allocation_pools: + description: Allocation pools associated with this subnet. + returned: success + type: list + elements: dict +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_cloud_from_module + + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None, aliases=['subnet']), + filters=dict(required=False, type='dict', default=None) + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'os_subnets_facts' + if is_old_facts: + module.deprecate("The 'os_subnets_facts' module has been renamed to 'os_subnets_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + sdk, cloud = openstack_cloud_from_module(module) + try: + subnets = cloud.search_subnets(module.params['name'], + module.params['filters']) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_subnets=subnets)) + else: + module.exit_json(changed=False, openstack_subnets=subnets) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/openstack/os_user_facts.py b/plugins/modules/cloud/openstack/os_user_facts.py new file mode 120000 index 0000000000..383ca3e811 --- /dev/null +++ b/plugins/modules/cloud/openstack/os_user_facts.py @@ -0,0 +1 @@ +os_user_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/openstack/os_user_info.py b/plugins/modules/cloud/openstack/os_user_info.py new file mode 100644 index 0000000000..ad635ffbfa --- /dev/null +++ b/plugins/modules/cloud/openstack/os_user_info.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: os_user_info +short_description: Retrieve information about one or more OpenStack users +extends_documentation_fragment: +- openstack.cloud.openstack + +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Retrieve information about a one or more OpenStack users + - This module was called C(os_user_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(os_user_info) module no longer returns C(ansible_facts)! +requirements: + - "python >= 2.7" + - "openstacksdk" +options: + name: + description: + - Name or ID of the user + required: true + domain: + description: + - Name or ID of the domain containing the user if the cloud supports domains + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + availability_zone: + description: + - Ignored. Present for backwards compatibility +''' + +EXAMPLES = ''' +# Gather information about previously created users +- os_user_info: + cloud: awesomecloud + register: result +- debug: + msg: "{{ result.openstack_users }}" + +# Gather information about a previously created user by name +- os_user_info: + cloud: awesomecloud + name: demouser + register: result +- debug: + msg: "{{ result.openstack_users }}" + +# Gather information about a previously created user in a specific domain +- os_user_info: + cloud: awesomecloud + name: demouser + domain: admindomain + register: result +- debug: + msg: "{{ result.openstack_users }}" + +# Gather information about a previously created user in a specific domain with filter +- os_user_info: + cloud: awesomecloud + name: demouser + domain: admindomain + filters: + enabled: False + register: result +- debug: + msg: "{{ result.openstack_users }}" +''' + + +RETURN = ''' +openstack_users: + description: has all the OpenStack information about users + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: str + name: + description: Name given to the user. + returned: success + type: str + enabled: + description: Flag to indicate if the user is enabled + returned: success + type: bool + domain_id: + description: Domain ID containing the user + returned: success + type: str + default_project_id: + description: Default project ID of the user + returned: success + type: str + email: + description: Email of the user + returned: success + type: str + username: + description: Username of the user + returned: success + type: str +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module + + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + domain=dict(required=False, default=None), + filters=dict(required=False, type='dict', default=None), + ) + + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'os_user_facts' + if is_old_facts: + module.deprecate("The 'os_user_facts' module has been renamed to 'os_user_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + sdk, opcloud = openstack_cloud_from_module(module) + try: + name = module.params['name'] + domain = module.params['domain'] + filters = module.params['filters'] + + if domain: + try: + # We assume admin is passing domain id + dom = opcloud.get_domain(domain)['id'] + domain = dom + except Exception: + # If we fail, maybe admin is passing a domain name. + # Note that domains have unique names, just like id. + dom = opcloud.search_domains(filters={'name': domain}) + if dom: + domain = dom[0]['id'] + else: + module.fail_json(msg='Domain name or ID does not exist') + + if not filters: + filters = {} + + filters['domain_id'] = domain + + users = opcloud.search_users(name, filters) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=dict( + openstack_users=users)) + else: + module.exit_json(changed=False, openstack_users=users) + + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oracle/oci_vcn.py b/plugins/modules/cloud/oracle/oci_vcn.py new file mode 100644 index 0000000000..da3d111e01 --- /dev/null +++ b/plugins/modules/cloud/oracle/oci_vcn.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# Copyright (c) 2017, 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = ''' +--- +module: oci_vcn +short_description: Manage Virtual Cloud Networks(VCN) in OCI +description: + - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. + The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from + U(https://github.com/oracle/oci-ansible-modules/releases). +options: + cidr_block: + description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present). + required: false + compartment_id: + description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present). + This option is mutually exclusive with I(vcn_id). + type: str + display_name: + description: A user-friendly name. Does not have to be unique, and it's changeable. + type: str + aliases: [ 'name' ] + dns_label: + description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to + form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example, + bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice + to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins + with a letter. The value cannot be changed. + type: str + state: + description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN. + type: str + default: present + choices: ['present', 'absent'] + vcn_id: + description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN + with I(state=present). This option is mutually exclusive with I(compartment_id). + type: str + aliases: [ 'id' ] +author: "Rohit Chaware (@rohitChaware)" +extends_documentation_fragment: +- community.general.oracle +- community.general.oracle_creatable_resource +- community.general.oracle_wait_options +- community.general.oracle_tags + +''' + +EXAMPLES = """ +- name: Create a VCN + oci_vcn: + cidr_block: '10.0.0.0/16' + compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx' + display_name: my_vcn + dns_label: ansiblevcn + +- name: Updates the specified VCN's display name + oci_vcn: + vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx + display_name: ansible_vcn + +- name: Delete the specified VCN + oci_vcn: + vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx + state: absent +""" + +RETURN = """ +vcn: + description: Information about the VCN + returned: On successful create and update operation + type: dict + sample: { + "cidr_block": "10.0.0.0/16", + compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", + "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", + "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", + "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", + "display_name": "ansible_vcn", + "dns_label": "ansiblevcn", + "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", + "lifecycle_state": "AVAILABLE", + "time_created": "2017-11-13T20:22:40.626000+00:00", + "vcn_domain_name": "ansiblevcn.oraclevcn.com" + } +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils + +try: + from oci.core.virtual_network_client import VirtualNetworkClient + from oci.core.models import CreateVcnDetails + from oci.core.models import UpdateVcnDetails + + HAS_OCI_PY_SDK = True +except ImportError: + HAS_OCI_PY_SDK = False + + +def delete_vcn(virtual_network_client, module): + result = oci_utils.delete_and_wait( + resource_type="vcn", + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + kwargs_get={"vcn_id": module.params["vcn_id"]}, + delete_fn=virtual_network_client.delete_vcn, + kwargs_delete={"vcn_id": module.params["vcn_id"]}, + module=module, + ) + return result + + +def update_vcn(virtual_network_client, module): + result = oci_utils.check_and_update_resource( + resource_type="vcn", + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + kwargs_get={"vcn_id": module.params["vcn_id"]}, + update_fn=virtual_network_client.update_vcn, + primitive_params_update=["vcn_id"], + kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"}, + module=module, + update_attributes=UpdateVcnDetails().attribute_map.keys(), + ) + return result + + +def create_vcn(virtual_network_client, module): + create_vcn_details = CreateVcnDetails() + for attribute in create_vcn_details.attribute_map.keys(): + if attribute in module.params: + setattr(create_vcn_details, attribute, module.params[attribute]) + + result = oci_utils.create_and_wait( + resource_type="vcn", + create_fn=virtual_network_client.create_vcn, + kwargs_create={"create_vcn_details": create_vcn_details}, + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + get_param="vcn_id", + module=module, + ) + return result + + +def main(): + module_args = oci_utils.get_taggable_arg_spec( + supports_create=True, supports_wait=True + ) + module_args.update( + dict( + cidr_block=dict(type="str", required=False), + compartment_id=dict(type="str", required=False), + display_name=dict(type="str", required=False, aliases=["name"]), + dns_label=dict(type="str", required=False), + state=dict( + type="str", + required=False, + default="present", + choices=["absent", "present"], + ), + vcn_id=dict(type="str", required=False, aliases=["id"]), + ) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False, + mutually_exclusive=[["compartment_id", "vcn_id"]], + ) + + if not HAS_OCI_PY_SDK: + module.fail_json(msg=missing_required_lib("oci")) + + virtual_network_client = oci_utils.create_service_client( + module, VirtualNetworkClient + ) + + exclude_attributes = {"display_name": True, "dns_label": True} + state = module.params["state"] + vcn_id = module.params["vcn_id"] + + if state == "absent": + if vcn_id is not None: + result = delete_vcn(virtual_network_client, module) + else: + module.fail_json( + msg="Specify vcn_id with state as 'absent' to delete a VCN." + ) + + else: + if vcn_id is not None: + result = update_vcn(virtual_network_client, module) + else: + result = oci_utils.check_and_create_resource( + resource_type="vcn", + create_fn=create_vcn, + kwargs_create={ + "virtual_network_client": virtual_network_client, + "module": module, + }, + list_fn=virtual_network_client.list_vcns, + kwargs_list={"compartment_id": module.params["compartment_id"]}, + module=module, + model=CreateVcnDetails(), + exclude_attributes=exclude_attributes, + ) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/ovh/ovh_ip_failover.py b/plugins/modules/cloud/ovh/ovh_ip_failover.py new file mode 100644 index 0000000000..591a8e5297 --- /dev/null +++ b/plugins/modules/cloud/ovh/ovh_ip_failover.py @@ -0,0 +1,258 @@ +#!/usr/bin/python + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovh_ip_failover +short_description: Manage OVH IP failover address +description: + - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move + an ip failover (or failover block) between services +author: "Pascal HERAUD (@pascalheraud)" +notes: + - Uses the python OVH Api U(https://github.com/ovh/python-ovh). + You have to create an application (a key and secret) with a consummer + key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) +requirements: + - ovh >= 0.4.8 +options: + name: + required: true + description: + - The IP address to manage (can be a single IP like 1.1.1.1 + or a block like 1.1.1.1/28 ) + service: + required: true + description: + - The name of the OVH service this IP address should be routed + endpoint: + required: true + description: + - The endpoint to use ( for instance ovh-eu) + wait_completion: + required: false + default: true + type: bool + description: + - If true, the module will wait for the IP address to be moved. + If false, exit without waiting. The taskId will be returned + in module output + wait_task_completion: + required: false + default: 0 + description: + - If not 0, the module will wait for this task id to be + completed. Use wait_task_completion if you want to wait for + completion of a previously executed task with + wait_completion=false. You can execute this module repeatedly on + a list of failover IPs using wait_completion=false (see examples) + application_key: + required: true + description: + - The applicationKey to use + application_secret: + required: true + description: + - The application secret to use + consumer_key: + required: true + description: + - The consumer key to use + timeout: + required: false + default: 120 + description: + - The timeout in seconds used to wait for a task to be + completed. Default is 120 seconds. + +''' + +EXAMPLES = ''' +# Route an IP address 1.1.1.1 to the service ns666.ovh.net +- ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +- ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + wait_completion: false + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey + register: moved +- ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + wait_task_completion: "{{moved.taskId}}" + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +''' + +RETURN = ''' +''' + +import time + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import quote_plus + + +def getOvhClient(ansibleModule): + endpoint = ansibleModule.params.get('endpoint') + application_key = ansibleModule.params.get('application_key') + application_secret = ansibleModule.params.get('application_secret') + consumer_key = ansibleModule.params.get('consumer_key') + + return ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + +def waitForNoTask(client, name, timeout): + currentTimeout = timeout + while client.get('/ip/{0}/task'.format(quote_plus(name)), + function='genericMoveFloatingIp', + status='todo'): + time.sleep(1) # Delay for 1 sec + currentTimeout -= 1 + if currentTimeout < 0: + return False + return True + + +def waitForTaskDone(client, name, taskId, timeout): + currentTimeout = timeout + while True: + task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId)) + if task['status'] == 'done': + return True + time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API + currentTimeout -= 5 + if currentTimeout < 0: + return False + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + service=dict(required=True), + endpoint=dict(required=True), + wait_completion=dict(default=True, type='bool'), + wait_task_completion=dict(default=0, type='int'), + application_key=dict(required=True, no_log=True), + application_secret=dict(required=True, no_log=True), + consumer_key=dict(required=True, no_log=True), + timeout=dict(default=120, type='int') + ), + supports_check_mode=True + ) + + result = dict( + changed=False + ) + + if not HAS_OVH: + module.fail_json(msg='ovh-api python module is required to run this module ') + + # Get parameters + name = module.params.get('name') + service = module.params.get('service') + timeout = module.params.get('timeout') + wait_completion = module.params.get('wait_completion') + wait_task_completion = module.params.get('wait_task_completion') + + # Connect to OVH API + client = getOvhClient(module) + + # Check that the load balancing exists + try: + ips = client.get('/ip', ip=name, type='failover') + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of ips, ' + 'check application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + + if name not in ips and '{0}/32'.format(name) not in ips: + module.fail_json(msg='IP {0} does not exist'.format(name)) + + # Check that no task is pending before going on + try: + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for no pending ' + 'tasks before executing the module '.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of pending tasks ' + 'of the ip, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + try: + ipproperties = client.get('/ip/{0}'.format(quote_plus(name))) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the properties ' + 'of the ip, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + if ipproperties['routedTo']['serviceName'] != service: + if not module.check_mode: + if wait_task_completion == 0: + # Move the IP and get the created taskId + task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service) + taskId = task['taskId'] + result['moved'] = True + else: + # Just wait for the given taskId to be completed + taskId = wait_task_completion + result['moved'] = False + result['taskId'] = taskId + if wait_completion or wait_task_completion != 0: + if not waitForTaskDone(client, name, taskId, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of move ip to service'.format(timeout)) + result['waited'] = True + else: + result['waited'] = False + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py new file mode 100644 index 0000000000..0242b6faed --- /dev/null +++ b/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py @@ -0,0 +1,307 @@ +#!/usr/bin/python + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovh_ip_loadbalancing_backend +short_description: Manage OVH IP LoadBalancing backends +description: + - Manage OVH (French European hosting provider) LoadBalancing IP backends +author: Pascal Heraud (@pascalheraud) +notes: + - Uses the python OVH Api U(https://github.com/ovh/python-ovh). + You have to create an application (a key and secret) with a consumer + key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) +requirements: + - ovh > 0.3.5 +options: + name: + required: true + description: + - Name of the LoadBalancing internal name (ip-X.X.X.X) + backend: + required: true + description: + - The IP address of the backend to update / modify / delete + state: + default: present + choices: ['present', 'absent'] + description: + - Determines whether the backend is to be created/modified + or deleted + probe: + default: 'none' + choices: ['none', 'http', 'icmp' , 'oco'] + description: + - Determines the type of probe to use for this backend + weight: + default: 8 + description: + - Determines the weight for this backend + endpoint: + required: true + description: + - The endpoint to use ( for instance ovh-eu) + application_key: + required: true + description: + - The applicationKey to use + application_secret: + required: true + description: + - The application secret to use + consumer_key: + required: true + description: + - The consumer key to use + timeout: + default: 120 + description: + - The timeout in seconds used to wait for a task to be + completed. + +''' + +EXAMPLES = ''' +# Adds or modify the backend '212.1.1.1' to a +# loadbalancing 'ip-1.1.1.1' +- ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: present + probe: none + weight: 8 + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey + +# Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1' +- ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: absent + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +''' + +RETURN = ''' +''' + +import time + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + +from ansible.module_utils.basic import AnsibleModule + + +def getOvhClient(ansibleModule): + endpoint = ansibleModule.params.get('endpoint') + application_key = ansibleModule.params.get('application_key') + application_secret = ansibleModule.params.get('application_secret') + consumer_key = ansibleModule.params.get('consumer_key') + + return ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + +def waitForNoTask(client, name, timeout): + currentTimeout = timeout + while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0: + time.sleep(1) # Delay for 1 sec + currentTimeout -= 1 + if currentTimeout < 0: + return False + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + backend=dict(required=True), + weight=dict(default=8, type='int'), + probe=dict(default='none', + choices=['none', 'http', 'icmp', 'oco']), + state=dict(default='present', choices=['present', 'absent']), + endpoint=dict(required=True), + application_key=dict(required=True, no_log=True), + application_secret=dict(required=True, no_log=True), + consumer_key=dict(required=True, no_log=True), + timeout=dict(default=120, type='int') + ) + ) + + if not HAS_OVH: + module.fail_json(msg='ovh-api python module' + 'is required to run this module ') + + # Get parameters + name = module.params.get('name') + state = module.params.get('state') + backend = module.params.get('backend') + weight = module.params.get('weight') + probe = module.params.get('probe') + timeout = module.params.get('timeout') + + # Connect to OVH API + client = getOvhClient(module) + + # Check that the load balancing exists + try: + loadBalancings = client.get('/ip/loadBalancing') + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of loadBalancing, ' + 'check application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + + if name not in loadBalancings: + module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name)) + + # Check that no task is pending before going on + try: + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for no pending ' + 'tasks before executing the module '.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of pending tasks ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + try: + backends = client.get('/ip/loadBalancing/{0}/backend'.format(name)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of backends ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + backendExists = backend in backends + moduleChanged = False + if state == "absent": + if backendExists: + # Remove backend + try: + client.delete( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of removing backend task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for deleting the backend, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + else: + if backendExists: + # Get properties + try: + backendProperties = client.get( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the backend properties, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + if (backendProperties['weight'] != weight): + # Change weight + try: + client.post( + '/ip/loadBalancing/{0}/backend/{1}/setWeight' + .format(name, backend), weight=weight) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of setWeight to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the weight of the ' + 'backend, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + + if (backendProperties['probe'] != probe): + # Change probe + backendProperties['probe'] = probe + try: + client.put( + '/ip/loadBalancing/{0}/backend/{1}' + .format(name, backend), probe=probe) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'setProbe to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the probe of ' + 'the backend, check application key, secret, ' + 'consumerkey and parameters. Error returned by OVH api ' + 'was : {0}' + .format(apiError)) + moduleChanged = True + + else: + # Creates backend + try: + try: + client.post('/ip/loadBalancing/{0}/backend'.format(name), + ipBackend=backend, probe=probe, weight=weight) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}' + .format(apiError)) + + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'backend creation task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + moduleChanged = True + + module.exit_json(changed=moduleChanged) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/ovh/ovh_monthly_billing.py b/plugins/modules/cloud/ovh/ovh_monthly_billing.py new file mode 100644 index 0000000000..58ce649e04 --- /dev/null +++ b/plugins/modules/cloud/ovh/ovh_monthly_billing.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Francois Lallart (@fraff) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = ''' +--- +module: ovh_monthly_billing +author: Francois Lallart (@fraff) +short_description: Manage OVH monthly billing +description: + - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it). +requirements: [ "ovh" ] +options: + project_id: + required: true + type: str + description: + - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET) + instance_id: + required: true + type: str + description: + - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET) + endpoint: + type: str + description: + - The endpoint to use (for instance ovh-eu) + application_key: + type: str + description: + - The applicationKey to use + application_secret: + type: str + description: + - The application secret to use + consumer_key: + type: str + description: + - The consumer key to use +''' + +EXAMPLES = ''' +# basic usage, using auth from /etc/ovh.conf + - ovh_monthly_billing: + project_id: 0c727a20aa144485b70c44dee9123b46 + instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948 + +# a bit more more complex + # get openstack cloud ID and instance ID, OVH use them in its API + - os_server_info: + cloud: myProjectName + region_name: myRegionName + server: myServerName + # force run even in check_mode + check_mode: no + + # use theses IDs + - ovh_monthly_billing: + project_id: "{{ openstack_servers.0.tenant_id }}" + instance_id: "{{ openstack_servers.0.id }}" + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +''' + +RETURN = ''' +''' + +import os +import sys +import traceback + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + OVH_IMPORT_ERROR = traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + project_id=dict(required=True), + instance_id=dict(required=True), + endpoint=dict(required=False), + application_key=dict(required=False, no_log=True), + application_secret=dict(required=False, no_log=True), + consumer_key=dict(required=False, no_log=True), + ), + supports_check_mode=True + ) + + # Get parameters + project_id = module.params.get('project_id') + instance_id = module.params.get('instance_id') + endpoint = module.params.get('endpoint') + application_key = module.params.get('application_key') + application_secret = module.params.get('application_secret') + consumer_key = module.params.get('consumer_key') + project = "" + instance = "" + ovh_billing_status = "" + + if not HAS_OVH: + module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh') + + # Connect to OVH API + client = ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + # Check that the instance exists + try: + project = client.get('/cloud/project/{0}'.format(project_id)) + except ovh.exceptions.ResourceNotFoundError: + module.fail_json(msg='project {0} does not exist'.format(project_id)) + + # Check that the instance exists + try: + instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id)) + except ovh.exceptions.ResourceNotFoundError: + module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id)) + + # Is monthlyBilling already enabled or pending ? + if instance['monthlyBilling'] is not None: + if instance['monthlyBilling']['status'] in ['ok', 'activationPending']: + module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling']) + + if module.check_mode: + module.exit_json(changed=True, msg="Dry Run!") + + try: + ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id)) + module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling']) + except APIError as apiError: + module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError)) + + # We should never reach here + module.fail_json(msg='Internal ovh_monthly_billing module error') + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py b/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py new file mode 100644 index 0000000000..dac9ac0dce --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_affinity_label_facts +short_description: Retrieve information about one or more oVirt/RHV affinity labels +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_affinity_label_info) instead +description: + - "Retrieve information about one or more oVirt/RHV affinity labels." + - This module was called C(ovirt_affinity_label_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_affinity_label_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_affinity_labels), which + contains a list of affinity labels. You need to register the result with + the I(register) keyword to use it." +options: + name: + description: + - "Name of the affinity labels which should be listed." + vm: + description: + - "Name of the VM, which affinity labels should be listed." + host: + description: + - "Name of the host, which affinity labels should be listed." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all affinity labels, which names start with C(label): +- ovirt_affinity_label_info: + name: label* + register: result +- debug: + msg: "{{ result.ovirt_affinity_labels }}" + +# Gather information about all affinity labels, which are assigned to VMs +# which names start with C(postgres): +- ovirt_affinity_label_info: + vm: postgres* + register: result +- debug: + msg: "{{ result.ovirt_affinity_labels }}" + +# Gather information about all affinity labels, which are assigned to hosts +# which names start with C(west): +- ovirt_affinity_label_info: + host: west* + register: result +- debug: + msg: "{{ result.ovirt_affinity_labels }}" + +# Gather information about all affinity labels, which are assigned to hosts +# which names start with C(west) or VMs which names start with C(postgres): +- ovirt_affinity_label_info: + host: west* + vm: postgres* + register: result +- debug: + msg: "{{ result.ovirt_affinity_labels }}" +''' + +RETURN = ''' +ovirt_affinity_labels: + description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys, + all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label." + returned: On success. + type: list +''' + +import fnmatch +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + search_by_name, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + name=dict(default=None), + host=dict(default=None), + vm=dict(default=None), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_affinity_label_facts' + if is_old_facts: + module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + affinity_labels_service = connection.system_service().affinity_labels_service() + labels = [] + all_labels = affinity_labels_service.list() + if module.params['name']: + labels.extend([ + l for l in all_labels + if fnmatch.fnmatch(l.name, module.params['name']) + ]) + if module.params['host']: + hosts_service = connection.system_service().hosts_service() + if search_by_name(hosts_service, module.params['host']) is None: + raise Exception("Host '%s' was not found." % module.params['host']) + labels.extend([ + label + for label in all_labels + for host in connection.follow_link(label.hosts) + if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host']) + ]) + if module.params['vm']: + vms_service = connection.system_service().vms_service() + if search_by_name(vms_service, module.params['vm']) is None: + raise Exception("Vm '%s' was not found." % module.params['vm']) + labels.extend([ + label + for label in all_labels + for vm in connection.follow_link(label.vms) + if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm']) + ]) + + if not (module.params['vm'] or module.params['host'] or module.params['name']): + labels = all_labels + + result = dict( + ovirt_affinity_labels=[ + get_dict_of_struct( + struct=l, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for l in labels + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_api_facts.py b/plugins/modules/cloud/ovirt/ovirt_api_facts.py new file mode 100644 index 0000000000..494c2ac5a9 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_api_facts.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: ovirt_api_facts +short_description: Retrieve information about the oVirt/RHV API +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_api_info) instead +description: + - "Retrieve information about the oVirt/RHV API." + - This module was called C(ovirt_api_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_api_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_api), + which contains a information about oVirt/RHV API. You need to register the result with + the I(register) keyword to use it." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information oVirt API: +- ovirt_api_info: + register: result +- debug: + msg: "{{ result.ovirt_api }}" +''' + +RETURN = ''' +ovirt_api: + description: "Dictionary describing the oVirt API information. + Api attributes are mapped to dictionary keys, + all API attributes can be found at following + url: https://ovirt.example.com/ovirt-engine/api/model#types/api." + returned: On success. + type: dict +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec() + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_api_facts' + if is_old_facts: + module.deprecate("The 'ovirt_api_facts' module has been renamed to 'ovirt_api_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + api = connection.system_service().get() + result = dict( + ovirt_api=get_dict_of_struct( + struct=api, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py b/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py new file mode 100644 index 0000000000..0fa0ca37a2 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_cluster_facts +short_description: Retrieve information about one or more oVirt/RHV clusters +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_cluster_info) instead +description: + - "Retrieve information about one or more oVirt/RHV clusters." + - This module was called C(ovirt_cluster_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_cluster_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_clusters), which + contains a list of clusters. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search cluster X from datacenter Y use following pattern: + name=X and datacenter=Y" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all clusters which names start with C: +- ovirt_cluster_info: + pattern: + name: 'production*' + register: result +- debug: + msg: "{{ result.ovirt_clusters }}" +''' + +RETURN = ''' +ovirt_clusters: + description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys, + all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_cluster_facts' + if is_old_facts: + module.deprecate("The 'ovirt_cluster_facts' module has been renamed to 'ovirt_cluster_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + clusters_service = connection.system_service().clusters_service() + clusters = clusters_service.list(search=module.params['pattern']) + result = dict( + ovirt_clusters=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in clusters + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py b/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py new file mode 100644 index 0000000000..677aa21950 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_datacenter_facts +short_description: Retrieve information about one or more oVirt/RHV datacenters +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_datacenter_info) instead +description: + - "Retrieve information about one or more oVirt/RHV datacenters." + - This module was called C(ovirt_datacenter_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_datacenter_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_datacenters), which + contains a list of datacenters. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search datacenter I(X) use following pattern: I(name=X)" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all data centers which names start with C(production): +- ovirt_datacenter_info: + pattern: name=production* + register: result +- debug: + msg: "{{ result.ovirt_datacenters }}" +''' + +RETURN = ''' +ovirt_datacenters: + description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys, + all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_datacenter_facts' + if is_old_facts: + module.deprecate("The 'ovirt_datacenter_facts' module has been renamed to 'ovirt_datacenter_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + datacenters_service = connection.system_service().data_centers_service() + datacenters = datacenters_service.list(search=module.params['pattern']) + result = dict( + ovirt_datacenters=[ + get_dict_of_struct( + struct=d, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for d in datacenters + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_disk_facts.py b/plugins/modules/cloud/ovirt/ovirt_disk_facts.py new file mode 100644 index 0000000000..c8d119254d --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_disk_facts.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_disk_facts +short_description: Retrieve information about one or more oVirt/RHV disks +author: "Katerina Koukiou (@KKoukiou)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_disk_info) instead +description: + - "Retrieve information about one or more oVirt/RHV disks." + - This module was called C(ovirt_disk_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_disk_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_disks), which + contains a list of disks. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search Disk X from storage Y use following pattern: + name=X and storage.name=Y" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all Disks which names start with C(centos) +- ovirt_disk_info: + pattern: name=centos* + register: result +- debug: + msg: "{{ result.ovirt_disks }}" +''' + +RETURN = ''' +ovirt_disks: + description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys, + all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_disk_facts' + if is_old_facts: + module.deprecate("The 'ovirt_disk_facts' module has been renamed to 'ovirt_disk_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + disks_service = connection.system_service().disks_service() + disks = disks_service.list( + search=module.params['pattern'], + ) + result = dict( + ovirt_disks=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in disks + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_event_facts.py b/plugins/modules/cloud/ovirt/ovirt_event_facts.py new file mode 100644 index 0000000000..f328d3bf71 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_event_facts.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_event_facts +short_description: This module can be used to retrieve information about one or more oVirt/RHV events +author: "Chris Keller (@nasx)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_event_info) instead +description: + - "Retrieve information about one or more oVirt/RHV events." + - This module was called C(ovirt_event_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_event_info) module no longer returns C(ansible_facts)! +options: + case_sensitive: + description: + - "Indicates if the search performed using the search parameter should be performed taking case + into account. The default value is true, which means that case is taken into account. If you + want to search ignoring case set it to false." + required: false + default: true + type: bool + + from_: + description: + - "Indicates the event index after which events should be returned. The indexes of events are + strictly increasing, so when this parameter is used only the events with greater indexes + will be returned." + required: false + type: int + + max: + description: + - "Sets the maximum number of events to return. If not specified all the events are returned." + required: false + type: int + + search: + description: + - "Search term which is accepted by the oVirt/RHV API." + - "For example to search for events of severity alert use the following pattern: severity=alert" + required: false + type: str + + headers: + description: + - "Additional HTTP headers." + required: false + type: str + + query: + description: + - "Additional URL query parameters." + required: false + type: str + + wait: + description: + - "If True wait for the response." + required: false + default: true + type: bool +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain the auth parameter for simplicity, +# look at the ovirt_auth module to see how to reuse authentication. + +- name: Return all events + ovirt_event_info: + register: result + +- name: Return the last 10 events + ovirt_event_info: + max: 10 + register: result + +- name: Return all events of type alert + ovirt_event_info: + search: "severity=alert" + register: result +- debug: + msg: "{{ result.ovirt_events }}" +''' + +RETURN = ''' +ovirt_events: + description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys. + All event attributes can be found at the following url: + http://ovirt.github.io/ovirt-engine-api-model/master/#types/event" + returned: On success." + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + case_sensitive=dict(default=True, type='bool', required=False), + from_=dict(default=None, type='int', required=False), + max=dict(default=None, type='int', required=False), + search=dict(default='', required=False), + headers=dict(default='', required=False), + query=dict(default='', required=False), + wait=dict(default=True, type='bool', required=False) + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_event_facts' + if is_old_facts: + module.deprecate("The 'ovirt_event_facts' module has been renamed to 'ovirt_event_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + events_service = connection.system_service().events_service() + events = events_service.list( + case_sensitive=module.params['case_sensitive'], + from_=module.params['from_'], + max=module.params['max'], + search=module.params['search'], + headers=module.params['headers'], + query=module.params['query'], + wait=module.params['wait'] + ) + + result = dict( + ovirt_events=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in events + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py b/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py new file mode 100644 index 0000000000..bcbc1c8a96 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_external_provider_facts +short_description: Retrieve information about one or more oVirt/RHV external providers +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_external_provider_info) instead +description: + - "Retrieve information about one or more oVirt/RHV external providers." + - This module was called C(ovirt_external_provider_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_external_provider_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_external_providers), which + contains a list of external_providers. You need to register the result with + the I(register) keyword to use it." +options: + type: + description: + - "Type of the external provider." + choices: ['os_image', 'os_network', 'os_volume', 'foreman'] + required: true + name: + description: + - "Name of the external provider, can be used as glob expression." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all image external providers named C: +- ovirt_external_provider_info: + type: os_image + name: glance + register: result +- debug: + msg: "{{ result.ovirt_external_providers }}" +''' + +RETURN = ''' +ovirt_external_providers: + description: + - "List of dictionaries. Content depends on I(type)." + - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance + at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider." + - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance + at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider." + - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance + at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider." + - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance + at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider." + returned: On success + type: list +''' + +import fnmatch +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def _external_provider_service(provider_type, system_service): + if provider_type == 'os_image': + return system_service.openstack_image_providers_service() + elif provider_type == 'os_network': + return system_service.openstack_network_providers_service() + elif provider_type == 'os_volume': + return system_service.openstack_volume_providers_service() + elif provider_type == 'foreman': + return system_service.external_host_providers_service() + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + name=dict(default=None, required=False), + type=dict( + default=None, + required=True, + choices=[ + 'os_image', 'os_network', 'os_volume', 'foreman', + ], + aliases=['provider'], + ), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_external_provider_facts' + if is_old_facts: + module.deprecate("The 'ovirt_external_provider_facts' module has been renamed to 'ovirt_external_provider_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + external_providers_service = _external_provider_service( + provider_type=module.params.pop('type'), + system_service=connection.system_service(), + ) + if module.params['name']: + external_providers = [ + e for e in external_providers_service.list() + if fnmatch.fnmatch(e.name, module.params['name']) + ] + else: + external_providers = external_providers_service.list() + + result = dict( + ovirt_external_providers=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in external_providers + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_group_facts.py b/plugins/modules/cloud/ovirt/ovirt_group_facts.py new file mode 100644 index 0000000000..b4160b93ad --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_group_facts.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_group_facts +short_description: Retrieve information about one or more oVirt/RHV groups +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_group_info) instead +description: + - "Retrieve information about one or more oVirt/RHV groups." + - This module was called C(ovirt_group_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_group_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_groups), which + contains a list of groups. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search group X use following pattern: name=X" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all groups which names start with C(admin): +- ovirt_group_info: + pattern: name=admin* + register: result +- debug: + msg: "{{ result.ovirt_groups }}" +''' + +RETURN = ''' +ovirt_groups: + description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys, + all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_group_facts' + if is_old_facts: + module.deprecate("The 'ovirt_group_facts' module has been renamed to 'ovirt_group_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + groups_service = connection.system_service().groups_service() + groups = groups_service.list(search=module.params['pattern']) + result = dict( + ovirt_groups=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in groups + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_host_facts.py b/plugins/modules/cloud/ovirt/ovirt_host_facts.py new file mode 100644 index 0000000000..606f26a07b --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_host_facts.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_host_facts +short_description: Retrieve information about one or more oVirt/RHV hosts +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_host_info) instead +description: + - "Retrieve information about one or more oVirt/RHV hosts." + - This module was called C(ovirt_host_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_host_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_hosts), which + contains a list of hosts. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search host X from datacenter Y use following pattern: + name=X and datacenter=Y" + all_content: + description: + - "If I(true) all the attributes of the hosts should be + included in the response." + default: False + type: bool + cluster_version: + description: + - "Filter the hosts based on the cluster version." + type: str + +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all hosts which names start with C(host) and +# belong to data center C(west): +- ovirt_host_info: + pattern: name=host* and datacenter=west + register: result +- debug: + msg: "{{ result.ovirt_hosts }}" +# All hosts with cluster version 4.2: +- ovirt_host_info: + pattern: name=host* + cluster_version: "4.2" + register: result +- debug: + msg: "{{ result.ovirt_hosts }}" +''' + +RETURN = ''' +ovirt_hosts: + description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys, + all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def get_filtered_hosts(cluster_version, hosts, connection): + # Filtering by cluster version returns only those which have same cluster version as input + filtered_hosts = [] + for host in hosts: + cluster = connection.follow_link(host.cluster) + cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor) + if cluster_version_host == cluster_version: + filtered_hosts.append(host) + return filtered_hosts + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + all_content=dict(default=False, type='bool'), + cluster_version=dict(default=None, type='str'), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_host_facts' + if is_old_facts: + module.deprecate("The 'ovirt_host_facts' module has been renamed to 'ovirt_host_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + hosts_service = connection.system_service().hosts_service() + hosts = hosts_service.list( + search=module.params['pattern'], + all_content=module.params['all_content'] + ) + cluster_version = module.params.get('cluster_version') + if cluster_version is not None: + hosts = get_filtered_hosts(cluster_version, hosts, connection) + result = dict( + ovirt_hosts=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in hosts + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py b/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py new file mode 100644 index 0000000000..5fdc0d19e7 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ovirt_host_storage_facts +short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage) +author: "Daniel Erez (@derez)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_host_storage_info) instead +description: + - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)." + - This module was called C(ovirt_host_storage_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_host_storage_info) module no longer returns C(ansible_facts)! +options: + host: + description: + - "Host to get device list from." + required: true + iscsi: + description: + - "Dictionary with values for iSCSI storage type:" + suboptions: + address: + description: + - "Address of the iSCSI storage server." + target: + description: + - "The target IQN for the storage device." + username: + description: + - "A CHAP user name for logging into a target." + password: + description: + - "A CHAP password for logging into a target." + portal: + description: + - "The portal being used to connect with iscsi." + fcp: + description: + - "Dictionary with values for fibre channel storage type:" + suboptions: + address: + description: + - "Address of the fibre channel storage server." + port: + description: + - "Port of the fibre channel storage server." + lun_id: + description: + - "LUN id." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about HostStorages with specified target and address: +- ovirt_host_storage_info: + host: myhost + iscsi: + target: iqn.2016-08-09.domain-01:nickname + address: 10.34.63.204 + register: result +- debug: + msg: "{{ result.ovirt_host_storages }}" +''' + +RETURN = ''' +ovirt_host_storages: + description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys, + all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage." + returned: On success. + type: list +''' + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + get_id_by_name, +) + + +def _login(host_service, iscsi): + host_service.iscsi_login( + iscsi=otypes.IscsiDetails( + username=iscsi.get('username'), + password=iscsi.get('password'), + address=iscsi.get('address'), + target=iscsi.get('target'), + portal=iscsi.get('portal') + ), + ) + + +def _get_storage_type(params): + for sd_type in ['iscsi', 'fcp']: + if params.get(sd_type) is not None: + return sd_type + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + host=dict(required=True), + iscsi=dict(default=None, type='dict'), + fcp=dict(default=None, type='dict'), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_host_storage_facts' + if is_old_facts: + module.deprecate("The 'ovirt_host_storage_facts' module has been renamed to 'ovirt_host_storage_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + + # Get Host + hosts_service = connection.system_service().hosts_service() + host_id = get_id_by_name(hosts_service, module.params['host']) + storage_type = _get_storage_type(module.params) + host_service = hosts_service.host_service(host_id) + + if storage_type == 'iscsi': + # Login + iscsi = module.params.get('iscsi') + _login(host_service, iscsi) + + # Get LUNs exposed from the specified target + host_storages = host_service.storage_service().list() + + if storage_type == 'iscsi': + filterred_host_storages = [host_storage for host_storage in host_storages + if host_storage.type == otypes.StorageType.ISCSI] + if 'target' in iscsi: + filterred_host_storages = [host_storage for host_storage in filterred_host_storages + if iscsi.get('target') == host_storage.logical_units[0].target] + elif storage_type == 'fcp': + filterred_host_storages = [host_storage for host_storage in host_storages + if host_storage.type == otypes.StorageType.FCP] + + result = dict( + ovirt_host_storages=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in filterred_host_storages + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_network_facts.py b/plugins/modules/cloud/ovirt/ovirt_network_facts.py new file mode 100644 index 0000000000..f0a9134ebe --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_network_facts.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_network_facts +short_description: Retrieve information about one or more oVirt/RHV networks +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_network_info) instead +description: + - "Retrieve information about one or more oVirt/RHV networks." + - This module was called C(ovirt_network_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_network_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_networks), which + contains a list of networks. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search network starting with string vlan1 use: name=vlan1*" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all networks which names start with C(vlan1): +- ovirt_network_info: + pattern: name=vlan1* + register: result +- debug: + msg: "{{ result.ovirt_networks }}" +''' + + +RETURN = ''' +ovirt_networks: + description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys, + all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_network_facts' + if is_old_facts: + module.deprecate("The 'ovirt_network_facts' module has been renamed to 'ovirt_network_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + networks_service = connection.system_service().networks_service() + networks = networks_service.list(search=module.params['pattern']) + result = dict( + ovirt_networks=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in networks + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_nic_facts.py b/plugins/modules/cloud/ovirt/ovirt_nic_facts.py new file mode 100644 index 0000000000..d445ddbe70 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_nic_facts.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_nic_facts +short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_nic_info) instead +description: + - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces." + - This module was called C(ovirt_nic_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_nic_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_nics), which + contains a list of NICs. You need to register the result with + the I(register) keyword to use it." +options: + vm: + description: + - "Name of the VM where NIC is attached." + required: true + name: + description: + - "Name of the NIC, can be used as glob expression." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all NICs which names start with C(eth) for VM named C(centos7): +- ovirt_nic_info: + vm: centos7 + name: eth* + register: result +- debug: + msg: "{{ result.ovirt_nics }}" +''' + +RETURN = ''' +ovirt_nics: + description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys, + all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic." + returned: On success. + type: list +''' + +import fnmatch +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + search_by_name, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + vm=dict(required=True), + name=dict(default=None), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_nic_facts' + if is_old_facts: + module.deprecate("The 'ovirt_nic_facts' module has been renamed to 'ovirt_nic_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + vms_service = connection.system_service().vms_service() + vm_name = module.params['vm'] + vm = search_by_name(vms_service, vm_name) + if vm is None: + raise Exception("VM '%s' was not found." % vm_name) + + nics_service = vms_service.service(vm.id).nics_service() + if module.params['name']: + nics = [ + e for e in nics_service.list() + if fnmatch.fnmatch(e.name, module.params['name']) + ] + else: + nics = nics_service.list() + + result = dict( + ovirt_nics=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in nics + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_permission_facts.py b/plugins/modules/cloud/ovirt/ovirt_permission_facts.py new file mode 100644 index 0000000000..4e29d61449 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_permission_facts.py @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_permission_facts +short_description: Retrieve information about one or more oVirt/RHV permissions +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_permission_info) instead +description: + - "Retrieve information about one or more oVirt/RHV permissions." + - This module was called C(ovirt_permission_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_permission_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_permissions), which + contains a list of permissions. You need to register the result with + the I(register) keyword to use it." +options: + user_name: + description: + - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user." + group_name: + description: + - "Name of the group to manage." + authz_name: + description: + - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain." + required: true + aliases: ['domain'] + namespace: + description: + - "Namespace of the authorization provider, where user/group resides." + required: false +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all permissions of user with username C(john): +- ovirt_permission_info: + user_name: john + authz_name: example.com-authz + register: result +- debug: + msg: "{{ result.ovirt_permissions }}" +''' + +RETURN = ''' +ovirt_permissions: + description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys, + all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission." + returned: On success. + type: list +''' + +import traceback + +try: + import ovirtsdk4 as sdk +except ImportError: + pass + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_link_name, + ovirt_info_full_argument_spec, + search_by_name, +) + + +def _permissions_service(connection, module): + if module.params['user_name']: + service = connection.system_service().users_service() + entity = next( + iter( + service.list( + search='usrname={0}'.format( + '{0}@{1}'.format(module.params['user_name'], module.params['authz_name']) + ) + ) + ), + None + ) + else: + service = connection.system_service().groups_service() + entity = search_by_name(service, module.params['group_name']) + + if entity is None: + raise Exception("User/Group wasn't found.") + + return service.service(entity.id).permissions_service() + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + authz_name=dict(required=True, aliases=['domain']), + user_name=dict(default=None), + group_name=dict(default=None), + namespace=dict(default=None), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_permission_facts' + if is_old_facts: + module.deprecate("The 'ovirt_permission_facts' module has been renamed to 'ovirt_permission_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + permissions_service = _permissions_service(connection, module) + permissions = [] + for p in permissions_service.list(): + newperm = dict() + for key, value in p.__dict__.items(): + if value and isinstance(value, sdk.Struct): + newperm[key[1:]] = get_link_name(connection, value) + newperm['%s_id' % key[1:]] = value.id + permissions.append(newperm) + + result = dict(ovirt_permissions=permissions) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_quota_facts.py b/plugins/modules/cloud/ovirt/ovirt_quota_facts.py new file mode 100644 index 0000000000..9d2a349dff --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_quota_facts.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_quota_facts +short_description: Retrieve information about one or more oVirt/RHV quotas +author: "Maor Lipchuk (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_quota_info) instead +description: + - "Retrieve information about one or more oVirt/RHV quotas." + - This module was called C(ovirt_quota_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_quota_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_quotas), which + contains a list of quotas. You need to register the result with + the I(register) keyword to use it." +options: + data_center: + description: + - "Name of the datacenter where quota resides." + required: true + name: + description: + - "Name of the quota, can be used as glob expression." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about quota named C in Default datacenter: +- ovirt_quota_info: + data_center: Default + name: myquota + register: result +- debug: + msg: "{{ result.ovirt_quotas }}" +''' + +RETURN = ''' +ovirt_quotas: + description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys, + all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota." + returned: On success. + type: list +''' + +import fnmatch +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + search_by_name, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + data_center=dict(required=True), + name=dict(default=None), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_quota_facts' + if is_old_facts: + module.deprecate("The 'ovirt_quota_facts' module has been renamed to 'ovirt_quota_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + datacenters_service = connection.system_service().data_centers_service() + dc_name = module.params['data_center'] + dc = search_by_name(datacenters_service, dc_name) + if dc is None: + raise Exception("Datacenter '%s' was not found." % dc_name) + + quotas_service = datacenters_service.service(dc.id).quotas_service() + if module.params['name']: + quotas = [ + e for e in quotas_service.list() + if fnmatch.fnmatch(e.name, module.params['name']) + ] + else: + quotas = quotas_service.list() + + result = dict( + ovirt_quotas=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in quotas + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py b/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py new file mode 100644 index 0000000000..988b38aecd --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_scheduling_policy_facts +short_description: Retrieve information about one or more oVirt scheduling policies +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_scheduling_policy_info) instead +description: + - "Retrieve information about one or more oVirt scheduling policies." + - This module was called C(ovirt_scheduling_policy_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_scheduling_policy_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_scheduling_policies), + which contains a list of scheduling policies. You need to register the result with + the I(register) keyword to use it." +options: + id: + description: + - "ID of the scheduling policy." + required: true + name: + description: + - "Name of the scheduling policy, can be used as glob expression." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all scheduling policies with name InClusterUpgrade: +- ovirt_scheduling_policy_info: + name: InClusterUpgrade + register: result +- debug: + msg: "{{ result.ovirt_scheduling_policies }}" +''' + +RETURN = ''' +ovirt_scheduling_policies: + description: "List of dictionaries describing the scheduling policies. + Scheduling policies attributes are mapped to dictionary keys, + all scheduling policies attributes can be found at following + url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy." + returned: On success. + type: list +''' + +import fnmatch +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + id=dict(default=None), + name=dict(default=None), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_scheduling_policy_facts' + if is_old_facts: + module.deprecate("The 'ovirt_scheduling_policy_facts' module has been renamed to 'ovirt_scheduling_policy_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + system_service = connection.system_service() + sched_policies_service = system_service.scheduling_policies_service() + if module.params['name']: + sched_policies = [ + e for e in sched_policies_service.list() + if fnmatch.fnmatch(e.name, module.params['name']) + ] + elif module.params['id']: + sched_policies = [ + sched_policies_service.service(module.params['id']).get() + ] + else: + sched_policies = sched_policies_service.list() + + result = dict( + ovirt_scheduling_policies=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in sched_policies + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py b/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py new file mode 100644 index 0000000000..8bf4e69cc9 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_snapshot_facts +short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_snapshot_info) instead +description: + - "Retrieve information about one or more oVirt/RHV virtual machine snapshots." + - This module was called C(ovirt_snapshot_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_snapshot_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_snapshots), which + contains a list of snapshots. You need to register the result with + the I(register) keyword to use it." +options: + vm: + description: + - "Name of the VM with snapshot." + required: true + description: + description: + - "Description of the snapshot, can be used as glob expression." + snapshot_id: + description: + - "Id of the snapshot we want to retrieve information about." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all snapshots which description start with C(update) for VM named C(centos7): +- ovirt_snapshot_info: + vm: centos7 + description: update* + register: result +- debug: + msg: "{{ result.ovirt_snapshots }}" +''' + +RETURN = ''' +ovirt_snapshots: + description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys, + all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot." + returned: On success. + type: list +''' + + +import fnmatch +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + search_by_name, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + vm=dict(required=True), + description=dict(default=None), + snapshot_id=dict(default=None), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_snapshot_facts' + if is_old_facts: + module.deprecate("The 'ovirt_snapshot_facts' module has been renamed to 'ovirt_snapshot_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + vms_service = connection.system_service().vms_service() + vm_name = module.params['vm'] + vm = search_by_name(vms_service, vm_name) + if vm is None: + raise Exception("VM '%s' was not found." % vm_name) + + snapshots_service = vms_service.service(vm.id).snapshots_service() + if module.params['description']: + snapshots = [ + e for e in snapshots_service.list() + if fnmatch.fnmatch(e.description, module.params['description']) + ] + elif module.params['snapshot_id']: + snapshots = [ + snapshots_service.snapshot_service(module.params['snapshot_id']).get() + ] + else: + snapshots = snapshots_service.list() + + result = dict( + ovirt_snapshots=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in snapshots + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py b/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py new file mode 100644 index 0000000000..dd42ffe4fd --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_storage_domain_facts +short_description: Retrieve information about one or more oVirt/RHV storage domains +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_storage_domain_info) instead +description: + - "Retrieve information about one or more oVirt/RHV storage domains." + - This module was called C(ovirt_storage_domain_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_storage_domain_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_storage_domains), which + contains a list of storage domains. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search storage domain X from datacenter Y use following pattern: + name=X and datacenter=Y" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all storage domains which names start with C(data) and +# belong to data center C(west): +- ovirt_storage_domain_info: + pattern: name=data* and datacenter=west + register: result +- debug: + msg: "{{ result.ovirt_storage_domains }}" +''' + +RETURN = ''' +ovirt_storage_domains: + description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys, + all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_storage_domain_facts' + if is_old_facts: + module.deprecate("The 'ovirt_storage_domain_facts' module has been renamed to 'ovirt_storage_domain_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + storage_domains_service = connection.system_service().storage_domains_service() + storage_domains = storage_domains_service.list(search=module.params['pattern']) + result = dict( + ovirt_storage_domains=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in storage_domains + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py b/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py new file mode 100644 index 0000000000..9bcc856cc2 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_storage_template_facts +short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain. +author: "Maor Lipchuk (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_storage_template_info) instead +description: + - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain." + - This module was called C(ovirt_storage_template_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_storage_template_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_storage_templates), which + contains a list of templates. You need to register the result with + the I(register) keyword to use it." +options: + unregistered: + description: + - "Flag which indicates whether to get unregistered templates which contain one or more + disks which reside on a storage domain or diskless templates." + type: bool + default: false + max: + description: + - "Sets the maximum number of templates to return. If not specified all the templates are returned." + storage_domain: + description: + - "The storage domain name where the templates should be listed." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all Templates which relate to a storage domain and +# are unregistered: +- ovirt_storage_template_info: + unregistered=True + register: result +- debug: + msg: "{{ result.ovirt_storage_templates }}" +''' + +RETURN = ''' +ovirt_storage_templates: + description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys, + all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + get_id_by_name +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + storage_domain=dict(default=None), + max=dict(default=None, type='int'), + unregistered=dict(default=False, type='bool'), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_storage_template_facts' + if is_old_facts: + module.deprecate("The 'ovirt_storage_template_facts' module has been renamed to 'ovirt_storage_template_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + storage_domains_service = connection.system_service().storage_domains_service() + sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain']) + storage_domain_service = storage_domains_service.storage_domain_service(sd_id) + templates_service = storage_domain_service.templates_service() + + # Find the unregistered Template we want to register: + if module.params.get('unregistered'): + templates = templates_service.list(unregistered=True) + else: + templates = templates_service.list(max=module.params['max']) + result = dict( + ovirt_storage_templates=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in templates + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py b/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py new file mode 100644 index 0000000000..399a6727f2 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_storage_vm_facts +short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain. +author: "Maor Lipchuk (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_storage_vm_info) instead +description: + - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain." + - This module was called C(ovirt_storage_vm_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_storage_vm_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_storage_vms), which + contains a list of virtual machines. You need to register the result with + the I(register) keyword to use it." +options: + unregistered: + description: + - "Flag which indicates whether to get unregistered virtual machines which contain one or more + disks which reside on a storage domain or diskless virtual machines." + type: bool + default: false + max: + description: + - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned." + storage_domain: + description: + - "The storage domain name where the virtual machines should be listed." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all VMs which relate to a storage domain and +# are unregistered: +- ovirt_vms_info: + unregistered=True + register: result +- debug: + msg: "{{ result.ovirt_storage_vms }}" +''' + +RETURN = ''' +ovirt_storage_vms: + description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys, + all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + get_id_by_name +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + storage_domain=dict(default=None), + max=dict(default=None, type='int'), + unregistered=dict(default=False, type='bool'), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_storage_vm_facts' + if is_old_facts: + module.deprecate("The 'ovirt_storage_vm_facts' module has been renamed to 'ovirt_storage_vm_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + storage_domains_service = connection.system_service().storage_domains_service() + sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain']) + storage_domain_service = storage_domains_service.storage_domain_service(sd_id) + vms_service = storage_domain_service.vms_service() + + # Find the unregistered VM we want to register: + if module.params.get('unregistered'): + vms = vms_service.list(unregistered=True) + else: + vms = vms_service.list() + result = dict( + ovirt_storage_vms=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in vms + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_tag_facts.py b/plugins/modules/cloud/ovirt/ovirt_tag_facts.py new file mode 100644 index 0000000000..05f97f69a9 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_tag_facts.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_tag_facts +short_description: Retrieve information about one or more oVirt/RHV tags +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_tag_info) instead +description: + - "Retrieve information about one or more oVirt/RHV tags." + - This module was called C(ovirt_tag_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_tag_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_tags), which + contains a list of tags. You need to register the result with + the I(register) keyword to use it." +options: + name: + description: + - "Name of the tag which should be listed." + vm: + description: + - "Name of the VM, which tags should be listed." + host: + description: + - "Name of the host, which tags should be listed." +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all tags, which names start with C(tag): +- ovirt_tag_info: + name: tag* + register: result +- debug: + msg: "{{ result.ovirt_tags }}" + +# Gather information about all tags, which are assigned to VM C(postgres): +- ovirt_tag_info: + vm: postgres + register: result +- debug: + msg: "{{ result.ovirt_tags }}" + +# Gather information about all tags, which are assigned to host C(west): +- ovirt_tag_info: + host: west + register: result +- debug: + msg: "{{ result.ovirt_tags }}" +''' + +RETURN = ''' +ovirt_tags: + description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys, + all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag." + returned: On success. + type: list +''' + +import fnmatch +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, + search_by_name, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + name=dict(default=None), + host=dict(default=None), + vm=dict(default=None), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_tag_facts' + if is_old_facts: + module.deprecate("The 'ovirt_tag_facts' module has been renamed to 'ovirt_tag_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + tags_service = connection.system_service().tags_service() + tags = [] + all_tags = tags_service.list() + if module.params['name']: + tags.extend([ + t for t in all_tags + if fnmatch.fnmatch(t.name, module.params['name']) + ]) + if module.params['host']: + hosts_service = connection.system_service().hosts_service() + host = search_by_name(hosts_service, module.params['host']) + if host is None: + raise Exception("Host '%s' was not found." % module.params['host']) + tags.extend([ + tag for tag in hosts_service.host_service(host.id).tags_service().list() + ]) + if module.params['vm']: + vms_service = connection.system_service().vms_service() + vm = search_by_name(vms_service, module.params['vm']) + if vm is None: + raise Exception("Vm '%s' was not found." % module.params['vm']) + tags.extend([ + tag for tag in vms_service.vm_service(vm.id).tags_service().list() + ]) + + if not (module.params['vm'] or module.params['host'] or module.params['name']): + tags = all_tags + + result = dict( + ovirt_tags=[ + get_dict_of_struct( + struct=t, + connection=connection, + fetch_nested=module.params['fetch_nested'], + attributes=module.params['nested_attributes'], + ) for t in tags + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_template_facts.py b/plugins/modules/cloud/ovirt/ovirt_template_facts.py new file mode 100644 index 0000000000..31a9d2d1d8 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_template_facts.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_template_facts +short_description: Retrieve information about one or more oVirt/RHV templates +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_template_info) instead +description: + - "Retrieve information about one or more oVirt/RHV templates." + - This module was called C(ovirt_template_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_template_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_templates), which + contains a list of templates. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search template X from datacenter Y use following pattern: + name=X and datacenter=Y" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all templates which names start with C(centos) and +# belongs to data center C(west): +- ovirt_template_info: + pattern: name=centos* and datacenter=west + register: result +- debug: + msg: "{{ result.ovirt_templates }}" +''' + +RETURN = ''' +ovirt_templates: + description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys, + all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_template_facts' + if is_old_facts: + module.deprecate("The 'ovirt_template_facts' module has been renamed to 'ovirt_template_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + templates_service = connection.system_service().templates_service() + templates = templates_service.list(search=module.params['pattern']) + result = dict( + ovirt_templates=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in templates + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_user_facts.py b/plugins/modules/cloud/ovirt/ovirt_user_facts.py new file mode 100644 index 0000000000..03f4ce6634 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_user_facts.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_user_facts +short_description: Retrieve information about one or more oVirt/RHV users +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_user_info) instead +description: + - "Retrieve information about one or more oVirt/RHV users." + - This module was called C(ovirt_user_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_user_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_users), which + contains a list of users. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search user X use following pattern: name=X" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all users which first names start with C(john): +- ovirt_user_info: + pattern: name=john* + register: result +- debug: + msg: "{{ result.ovirt_users }}" +''' + +RETURN = ''' +ovirt_users: + description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys, + all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_user_facts' + if is_old_facts: + module.deprecate("The 'ovirt_user_facts' module has been renamed to 'ovirt_user_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + users_service = connection.system_service().users_service() + users = users_service.list(search=module.params['pattern']) + result = dict( + ovirt_users=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in users + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_vm_facts.py b/plugins/modules/cloud/ovirt/ovirt_vm_facts.py new file mode 100644 index 0000000000..9dd7c26fb0 --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_vm_facts.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_vm_facts +short_description: Retrieve information about one or more oVirt/RHV virtual machines +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_vm_info) instead +description: + - "Retrieve information about one or more oVirt/RHV virtual machines." + - This module was called C(ovirt_vm_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_vm_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_vms), which + contains a list of virtual machines. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search VM X from cluster Y use following pattern: + name=X and cluster=Y" + all_content: + description: + - "If I(true) all the attributes of the virtual machines should be + included in the response." + type: bool + case_sensitive: + description: + - "If I(true) performed search will take case into account." + type: bool + default: true + max: + description: + - "The maximum number of results to return." + next_run: + description: + - "Indicates if the returned result describes the virtual machine as it is currently running or if describes + the virtual machine with the modifications that have already been performed but that will only come into + effect when the virtual machine is restarted. By default the value is set by engine." + type: bool +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all VMs which names start with C(centos) and +# belong to cluster C(west): +- ovirt_vm_info: + pattern: name=centos* and cluster=west + register: result +- debug: + msg: "{{ result.ovirt_vms }}" + +# Gather info about next run configuration of virtual machine named myvm +- ovirt_vm_info: + pattern: name=myvm + next_run: true + register: result +- debug: + msg: "{{ result.ovirt_vms[0] }}" +''' + +RETURN = ''' +ovirt_vms: + description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys, + all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + all_content=dict(default=False, type='bool'), + next_run=dict(default=None, type='bool'), + case_sensitive=dict(default=True, type='bool'), + max=dict(default=None, type='int'), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_vm_facts' + if is_old_facts: + module.deprecate("The 'ovirt_vm_facts' module has been renamed to 'ovirt_vm_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + vms_service = connection.system_service().vms_service() + vms = vms_service.list( + search=module.params['pattern'], + all_content=module.params['all_content'], + case_sensitive=module.params['case_sensitive'], + max=module.params['max'], + ) + if module.params['next_run']: + vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms] + + result = dict( + ovirt_vms=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in vms + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py b/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py new file mode 100644 index 0000000000..e59b4073fd --- /dev/null +++ b/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ovirt_vmpool_facts +short_description: Retrieve information about one or more oVirt/RHV vmpools +author: "Ondra Machacek (@machacekondra)" +deprecated: + removed_in: "2.10" + why: When migrating to collection we decided to use only _info modules. + alternative: Use M(ovirt_vmpool_info) instead +description: + - "Retrieve information about one or more oVirt/RHV vmpools." + - This module was called C(ovirt_vmpool_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(ovirt_vmpool_info) module no longer returns C(ansible_facts)! +notes: + - "This module returns a variable C(ovirt_vmpools), which + contains a list of vmpools. You need to register the result with + the I(register) keyword to use it." +options: + pattern: + description: + - "Search term which is accepted by oVirt/RHV search backend." + - "For example to search vmpool X: name=X" +extends_documentation_fragment: +- ovirt.ovirt.ovirt_info + +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather information about all vm pools which names start with C(centos): +- ovirt_vmpool_info: + pattern: name=centos* + register: result +- debug: + msg: "{{ result.ovirt_vm_pools }}" +''' + +RETURN = ''' +ovirt_vm_pools: + description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys, + all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool." + returned: On success. + type: list +''' + +import traceback + +from ansible.module_utils.common.removed import removed_module +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_info_full_argument_spec, +) + + +def main(): + argument_spec = ovirt_info_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + is_old_facts = module._name == 'ovirt_vmpool_facts' + if is_old_facts: + module.deprecate("The 'ovirt_vmpool_facts' module has been renamed to 'ovirt_vmpool_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + check_sdk(module) + + try: + auth = module.params.pop('auth') + connection = create_connection(auth) + vmpools_service = connection.system_service().vm_pools_service() + vmpools = vmpools_service.list(search=module.params['pattern']) + result = dict( + ovirt_vm_pools=[ + get_dict_of_struct( + struct=c, + connection=connection, + fetch_nested=module.params.get('fetch_nested'), + attributes=module.params.get('nested_attributes'), + ) for c in vmpools + ], + ) + if is_old_facts: + module.exit_json(changed=False, ansible_facts=result) + else: + module.exit_json(changed=False, **result) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=auth.get('token') is None) + + +if __name__ == '__main__': + removed_module("2.10") diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py new file mode 100644 index 0000000000..975327528d --- /dev/null +++ b/plugins/modules/cloud/packet/packet_device.py @@ -0,0 +1,643 @@ +#!/usr/bin/python +# (c) 2016, Tomas Karasek +# (c) 2016, Matt Baldwin +# (c) 2016, Thibaud Morel l'Horset +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: packet_device + +short_description: Manage a bare metal server in the Packet Host. + +description: + - Manage a bare metal server in the Packet Host (a "device" in the API terms). + - When the machine is created it can optionally wait for public IP address, or for active state. + - This module has a dependency on packet >= 1.0. + - API is documented at U(https://www.packet.net/developers/api/devices). + + +author: + - Tomas Karasek (@t0mk) + - Matt Baldwin (@baldwinSPC) + - Thibaud Morel l'Horset (@teebes) + +options: + auth_token: + description: + - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + + count: + description: + - The number of devices to create. Count number can be included in hostname via the %d string formatter. + default: 1 + + count_offset: + description: + - From which number to start the count. + default: 1 + + device_ids: + description: + - List of device IDs on which to operate. + + facility: + description: + - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/). + + features: + description: + - Dict with "features" for device creation. See Packet API docs for details. + + hostnames: + description: + - A hostname of a device, or a list of hostnames. + - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count). + - If only one hostname, it might be expanded to list if I(count)>1. + aliases: [name] + + locked: + description: + - Whether to lock a created device. + default: false + aliases: [lock] + type: bool + + operating_system: + description: + - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/). + + plan: + description: + - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/). + + project_id: + description: + - ID of project of the device. + required: true + + state: + description: + - Desired state of the device. + - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns. + - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout). + choices: [present, absent, active, inactive, rebooted] + default: present + + user_data: + description: + - Userdata blob made available to the machine + + wait_for_public_IPv: + description: + - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. + - If set to 4, it will wait until IPv4 is assigned to the instance. + - If set to 6, wait until public IPv6 is assigned to the instance. + choices: [4,6] + + wait_timeout: + description: + - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state). + - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice. + default: 900 + ipxe_script_url: + description: + - URL of custom iPXE script for provisioning. + - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). + always_pxe: + description: + - Persist PXE as the first boot option. + - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE. + default: false + type: bool + + +requirements: + - "packet-python >= 1.35" + +notes: + - Doesn't support check mode. + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet api token in env var PACKET_API_TOKEN. +# You can also pass it to the auth_token parameter of the module instead. + +# Creating devices + +- name: create 1 device + hosts: localhost + tasks: + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +# Create the same device and wait until it is in state "active", (when it's +# ready for other API operations). Fail if the devices in not "active" in +# 10 minutes. + +- name: create device and wait up to 10 minutes for active state + hosts: localhost + tasks: + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + wait_timeout: 600 + +- name: create 3 ubuntu devices called server-01, server-02 and server-03 + hosts: localhost + tasks: + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: server-%02d + count: 3 + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH + hosts: localhost + tasks: + - name: create 3 devices and register their facts + packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_stable + plan: baremetal_0 + facility: ewr1 + locked: true + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + wait_for_public_IPv: 4 + user_data: | + #cloud-config + ssh_authorized_keys: + - {{ lookup('file', 'my_packet_sshkey') }} + coreos: + etcd: + discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + register: newhosts + + - name: wait for ssh + wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + with_items: "{{ newhosts.devices }}" + + +# Other states of devices + +- name: remove 3 devices by uuid + hosts: localhost + tasks: + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + state: absent + device_ids: + - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 + - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 + - 6bb4faf8-a638-4ac7-8f47-86fe514c301f +''' + +RETURN = ''' +changed: + description: True if a device was altered in any way (created, modified or removed) + type: bool + sample: True + returned: success + +devices: + description: Information about each device that was processed + type: list + sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", + "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", + "tags": [], "locked": false, "state": "provisioning", + "public_ipv6": ""2604:1380:2:5200::3"}]' + returned: success +''' # NOQA + + +import os +import re +import time +import uuid +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +HAS_PACKET_SDK = True +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') +HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) +MAX_DEVICES = 100 + +PACKET_DEVICE_STATES = ( + 'queued', + 'provisioning', + 'failed', + 'powering_on', + 'active', + 'powering_off', + 'inactive', + 'rebooting', +) + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present'] + + +def serialize_device(device): + """ + Standard representation for a device as returned by various tasks:: + + { + 'id': 'device_id' + 'hostname': 'device_hostname', + 'tags': [], + 'locked': false, + 'state': 'provisioning', + 'ip_addresses': [ + { + "address": "147.75.194.227", + "address_family": 4, + "public": true + }, + { + "address": "2604:1380:2:5200::3", + "address_family": 6, + "public": true + }, + { + "address": "10.100.11.129", + "address_family": 4, + "public": false + } + ], + "private_ipv4": "10.100.11.129", + "public_ipv4": "147.75.194.227", + "public_ipv6": "2604:1380:2:5200::3", + } + + """ + device_data = {} + device_data['id'] = device.id + device_data['hostname'] = device.hostname + device_data['tags'] = device.tags + device_data['locked'] = device.locked + device_data['state'] = device.state + device_data['ip_addresses'] = [ + { + 'address': addr_data['address'], + 'address_family': addr_data['address_family'], + 'public': addr_data['public'], + } + for addr_data in device.ip_addresses + ] + # Also include each IPs as a key for easier lookup in roles. + # Key names: + # - public_ipv4 + # - public_ipv6 + # - private_ipv4 + # - private_ipv6 (if there is one) + for ipdata in device_data['ip_addresses']: + if ipdata['public']: + if ipdata['address_family'] == 6: + device_data['public_ipv6'] = ipdata['address'] + elif ipdata['address_family'] == 4: + device_data['public_ipv4'] = ipdata['address'] + elif not ipdata['public']: + if ipdata['address_family'] == 6: + # Packet doesn't give public ipv6 yet, but maybe one + # day they will + device_data['private_ipv6'] = ipdata['address'] + elif ipdata['address_family'] == 4: + device_data['private_ipv4'] = ipdata['address'] + return device_data + + +def is_valid_hostname(hostname): + return re.match(HOSTNAME_RE, hostname) is not None + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def listify_string_name_or_id(s): + if ',' in s: + return s.split(',') + else: + return [s] + + +def get_hostname_list(module): + # hostname is a list-typed param, so I guess it should return list + # (and it does, in Ansible 2.2.1) but in order to be defensive, + # I keep here the code to convert an eventual string to list + hostnames = module.params.get('hostnames') + count = module.params.get('count') + count_offset = module.params.get('count_offset') + if isinstance(hostnames, str): + hostnames = listify_string_name_or_id(hostnames) + if not isinstance(hostnames, list): + raise Exception("name %s is not convertible to list" % hostnames) + + # at this point, hostnames is a list + hostnames = [h.strip() for h in hostnames] + + if (len(hostnames) > 1) and (count > 1): + _msg = ("If you set count>1, you should only specify one hostname " + "with the %d formatter, not a list of hostnames.") + raise Exception(_msg) + + if (len(hostnames) == 1) and (count > 0): + hostname_spec = hostnames[0] + count_range = range(count_offset, count_offset + count) + if re.search(r"%\d{0,2}d", hostname_spec): + hostnames = [hostname_spec % i for i in count_range] + elif count > 1: + hostname_spec = '%s%%02d' % hostname_spec + hostnames = [hostname_spec % i for i in count_range] + + for hn in hostnames: + if not is_valid_hostname(hn): + raise Exception("Hostname '%s' does not seem to be valid" % hn) + + if len(hostnames) > MAX_DEVICES: + raise Exception("You specified too many hostnames, max is %d" % + MAX_DEVICES) + return hostnames + + +def get_device_id_list(module): + device_ids = module.params.get('device_ids') + + if isinstance(device_ids, str): + device_ids = listify_string_name_or_id(device_ids) + + device_ids = [di.strip() for di in device_ids] + + for di in device_ids: + if not is_valid_uuid(di): + raise Exception("Device ID '%s' does not seem to be valid" % di) + + if len(device_ids) > MAX_DEVICES: + raise Exception("You specified too many devices, max is %d" % + MAX_DEVICES) + return device_ids + + +def create_single_device(module, packet_conn, hostname): + + for param in ('hostnames', 'operating_system', 'plan'): + if not module.params.get(param): + raise Exception("%s parameter is required for new device." + % param) + project_id = module.params.get('project_id') + plan = module.params.get('plan') + user_data = module.params.get('user_data') + facility = module.params.get('facility') + operating_system = module.params.get('operating_system') + locked = module.params.get('locked') + ipxe_script_url = module.params.get('ipxe_script_url') + always_pxe = module.params.get('always_pxe') + if operating_system != 'custom_ipxe': + for param in ('ipxe_script_url', 'always_pxe'): + if module.params.get(param): + raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param) + + device = packet_conn.create_device( + project_id=project_id, + hostname=hostname, + plan=plan, + facility=facility, + operating_system=operating_system, + userdata=user_data, + locked=locked, + ipxe_script_url=ipxe_script_url, + always_pxe=always_pxe) + return device + + +def refresh_device_list(module, packet_conn, devices): + device_ids = [d.id for d in devices] + new_device_list = get_existing_devices(module, packet_conn) + return [d for d in new_device_list if d.id in device_ids] + + +def wait_for_devices_active(module, packet_conn, watched_devices): + wait_timeout = module.params.get('wait_timeout') + wait_timeout = time.time() + wait_timeout + refreshed = watched_devices + while wait_timeout > time.time(): + refreshed = refresh_device_list(module, packet_conn, watched_devices) + if all(d.state == 'active' for d in refreshed): + return refreshed + time.sleep(5) + raise Exception("Waiting for state \"active\" timed out for devices: %s" + % [d.hostname for d in refreshed if d.state != "active"]) + + +def wait_for_public_IPv(module, packet_conn, created_devices): + + def has_public_ip(addr_list, ip_v): + return any([a['public'] and a['address_family'] == ip_v and + a['address'] for a in addr_list]) + + def all_have_public_ip(ds, ip_v): + return all([has_public_ip(d.ip_addresses, ip_v) for d in ds]) + + address_family = module.params.get('wait_for_public_IPv') + + wait_timeout = module.params.get('wait_timeout') + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + refreshed = refresh_device_list(module, packet_conn, created_devices) + if all_have_public_ip(refreshed, address_family): + return refreshed + time.sleep(5) + + raise Exception("Waiting for IPv%d address timed out. Hostnames: %s" + % (address_family, [d.hostname for d in created_devices])) + + +def get_existing_devices(module, packet_conn): + project_id = module.params.get('project_id') + return packet_conn.list_devices( + project_id, params={ + 'per_page': MAX_DEVICES}) + + +def get_specified_device_identifiers(module): + if module.params.get('device_ids'): + device_id_list = get_device_id_list(module) + return {'ids': device_id_list, 'hostnames': []} + elif module.params.get('hostnames'): + hostname_list = get_hostname_list(module) + return {'hostnames': hostname_list, 'ids': []} + + +def act_on_devices(module, packet_conn, target_state): + specified_identifiers = get_specified_device_identifiers(module) + existing_devices = get_existing_devices(module, packet_conn) + changed = False + create_hostnames = [] + if target_state in ['present', 'active', 'rebooted']: + # states where we might create non-existing specified devices + existing_devices_names = [ed.hostname for ed in existing_devices] + create_hostnames = [hn for hn in specified_identifiers['hostnames'] + if hn not in existing_devices_names] + + process_devices = [d for d in existing_devices + if (d.id in specified_identifiers['ids']) or + (d.hostname in specified_identifiers['hostnames'])] + + if target_state != 'present': + _absent_state_map = {} + for s in PACKET_DEVICE_STATES: + _absent_state_map[s] = packet.Device.delete + + state_map = { + 'absent': _absent_state_map, + 'active': {'inactive': packet.Device.power_on, + 'provisioning': None, 'rebooting': None + }, + 'inactive': {'active': packet.Device.power_off}, + 'rebooted': {'active': packet.Device.reboot, + 'inactive': packet.Device.power_on, + 'provisioning': None, 'rebooting': None + }, + } + + # First do non-creation actions, it might be faster + for d in process_devices: + if d.state == target_state: + continue + if d.state in state_map[target_state]: + api_operation = state_map[target_state].get(d.state) + if api_operation is not None: + api_operation(d) + changed = True + else: + _msg = ( + "I don't know how to process existing device %s from state %s " + "to state %s" % + (d.hostname, d.state, target_state)) + raise Exception(_msg) + + # At last create missing devices + created_devices = [] + if create_hostnames: + created_devices = [create_single_device(module, packet_conn, n) + for n in create_hostnames] + if module.params.get('wait_for_public_IPv'): + created_devices = wait_for_public_IPv( + module, packet_conn, created_devices) + changed = True + + processed_devices = created_devices + process_devices + if target_state == 'active': + processed_devices = wait_for_devices_active( + module, packet_conn, processed_devices) + + return { + 'changed': changed, + 'devices': [serialize_device(d) for d in processed_devices] + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), + no_log=True), + count=dict(type='int', default=1), + count_offset=dict(type='int', default=1), + device_ids=dict(type='list'), + facility=dict(), + features=dict(type='dict'), + hostnames=dict(type='list', aliases=['name']), + locked=dict(type='bool', default=False, aliases=['lock']), + operating_system=dict(), + plan=dict(), + project_id=dict(required=True), + state=dict(choices=ALLOWED_STATES, default='present'), + user_data=dict(default=None), + wait_for_public_IPv=dict(type='int', choices=[4, 6]), + wait_timeout=dict(type='int', default=900), + ipxe_script_url=dict(default=''), + always_pxe=dict(type='bool', default=False), + ), + required_one_of=[('device_ids', 'hostnames',)], + mutually_exclusive=[ + ('hostnames', 'device_ids'), + ('count', 'device_ids'), + ('count_offset', 'device_ids'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable %s, " + "the auth_token parameter is required" % + PACKET_API_TOKEN_ENV_VAR) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + try: + module.exit_json(**act_on_devices(module, packet_conn, state)) + except Exception as e: + module.fail_json(msg='failed to set device state %s, error: %s' % + (state, to_native(e)), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/packet/packet_ip_subnet.py b/plugins/modules/cloud/packet/packet_ip_subnet.py new file mode 100644 index 0000000000..d518d357f4 --- /dev/null +++ b/plugins/modules/cloud/packet/packet_ip_subnet.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Nurfet Becirevic +# Copyright: (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: packet_ip_subnet + +short_description: Assign IP subnet to a bare metal server. + +description: + - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. + - IPv4 subnets must come from already reserved block. + - IPv6 subnets must come from publicly routable /56 block from your project. + - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. + + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +options: + auth_token: + description: + - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + hostname: + description: + - A hostname of a device to/from which to assign/remove a subnet. + required: False + type: str + + device_id: + description: + - UUID of a device to/from which to assign/remove a subnet. + required: False + type: str + + project_id: + description: + - UUID of a project of the device to/from which to assign/remove a subnet. + required: True + type: str + + device_count: + description: + - The number of devices to retrieve from the project. The max allowed value is 1000. + - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info. + default: 100 + type: int + + cidr: + description: + - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host. + aliases: [name] + type: str + + state: + description: + - Desired state of the IP subnet on the specified device. + - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device. + - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices. + - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to. + choices: ['present', 'absent'] + default: 'present' + type: str + +requirements: + - "packet-python >= 1.35" + - "python >= 2.6" +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet api token in env var PACKET_API_TOKEN. +# You can also pass it to the auth_token parameter of the module instead. + +- name: create 1 device and assign an arbitrary public IPv4 subnet to it + hosts: localhost + tasks: + + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + +# Pick an IPv4 address from a block allocated to your project. + + - packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostname: myserver + cidr: "147.75.201.78/32" + +# Release IP address 147.75.201.78 + +- name: unassign IP address from any device in your project + hosts: localhost + tasks: + - packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + cidr: "147.75.201.78/32" + state: absent +''' + +RETURN = ''' +changed: + description: True if an IP address assignments were altered in any way (created or removed). + type: bool + sample: True + returned: success + +device_id: + type: str + description: UUID of the device associated with the specified IP address. + returned: success + +subnet: + description: Dict with data about the handled IP subnet. + type: dict + sample: + address: 147.75.90.241 + address_family: 4 + assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 } + cidr: 31 + created_at: '2017-08-07T15:15:30Z' + enabled: True + gateway: 147.75.90.240 + href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f + id: 1eda960-0a16-4c0f-b196-f3dc4928529f + manageable: True + management: True + netmask: 255.255.255.254 + network: 147.75.90.240 + public: True + returned: success +''' + + +import uuid +import re + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils._text import to_native + +HAS_PACKET_SDK = True + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') +HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) +PROJECT_MAX_DEVICES = 100 + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +ALLOWED_STATES = ['absent', 'present'] + + +def is_valid_hostname(hostname): + return re.match(HOSTNAME_RE, hostname) is not None + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_existing_devices(module, packet_conn): + project_id = module.params.get('project_id') + if not is_valid_uuid(project_id): + raise Exception("Project ID {0} does not seem to be valid".format(project_id)) + + per_page = module.params.get('device_count') + return packet_conn.list_devices( + project_id, params={'per_page': per_page}) + + +def get_specified_device_identifiers(module): + if module.params.get('device_id'): + _d_id = module.params.get('device_id') + if not is_valid_uuid(_d_id): + raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id)) + return {'device_id': _d_id, 'hostname': None} + elif module.params.get('hostname'): + _hn = module.params.get('hostname') + if not is_valid_hostname(_hn): + raise Exception("Hostname '{0}' does not seem to be valid".format(_hn)) + return {'hostname': _hn, 'device_id': None} + else: + return {'hostname': None, 'device_id': None} + + +def parse_subnet_cidr(cidr): + if "/" not in cidr: + raise Exception("CIDR expression in wrong format, must be address/prefix_len") + addr, prefixlen = cidr.split("/") + try: + prefixlen = int(prefixlen) + except ValueError: + raise("Wrong prefix length in CIDR expression {0}".format(cidr)) + return addr, prefixlen + + +def act_on_assignment(target_state, module, packet_conn): + return_dict = {'changed': False} + specified_cidr = module.params.get("cidr") + address, prefixlen = parse_subnet_cidr(specified_cidr) + + specified_identifier = get_specified_device_identifiers(module) + + if module.check_mode: + return return_dict + + if (specified_identifier['hostname'] is None) and ( + specified_identifier['device_id'] is None): + if target_state == 'absent': + # The special case to release the IP from any assignment + for d in get_existing_devices(module, packet_conn): + for ia in d.ip_addresses: + if address == ia['address'] and prefixlen == ia['cidr']: + packet_conn.call_api(ia['href'], "DELETE") + return_dict['changed'] = True + return_dict['subnet'] = ia + return_dict['device_id'] = d.id + return return_dict + raise Exception("If you assign an address, you must specify either " + "target device ID or target unique hostname.") + + if specified_identifier['device_id'] is not None: + device = packet_conn.get_device(specified_identifier['device_id']) + else: + all_devices = get_existing_devices(module, packet_conn) + hn = specified_identifier['hostname'] + matching_devices = [d for d in all_devices if d.hostname == hn] + if len(matching_devices) > 1: + raise Exception("There are more than one devices matching given hostname {0}".format(hn)) + if len(matching_devices) == 0: + raise Exception("There is no device matching given hostname {0}".format(hn)) + device = matching_devices[0] + + return_dict['device_id'] = device.id + assignment_dicts = [i for i in device.ip_addresses + if i['address'] == address and i['cidr'] == prefixlen] + if len(assignment_dicts) > 1: + raise Exception("IP address {0} is assigned more than once for device {1}".format( + specified_cidr, device.hostname)) + + if target_state == "absent": + if len(assignment_dicts) == 1: + packet_conn.call_api(assignment_dicts[0]['href'], "DELETE") + return_dict['subnet'] = assignment_dicts[0] + return_dict['changed'] = True + elif target_state == "present": + if len(assignment_dicts) == 0: + new_assignment = packet_conn.call_api( + "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)}) + return_dict['changed'] = True + return_dict['subnet'] = new_assignment + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + device_id=dict(type='str'), + hostname=dict(type='str'), + project_id=dict(type='str'), + device_count=dict(type='int', default=PROJECT_MAX_DEVICES), + cidr=dict(type='str', required=True, aliases=['name']), + state=dict(choices=ALLOWED_STATES, default='present'), + ), + supports_check_mode=True, + mutually_exclusive=[('hostname', 'device_id')], + required_one_of=[['hostname', 'device_id', 'project_id']], + required_by=dict( + hostname=('project_id',), + ), + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + try: + module.exit_json(**act_on_assignment(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/packet/packet_project.py b/plugins/modules/cloud/packet/packet_project.py new file mode 100644 index 0000000000..90b9d32f8b --- /dev/null +++ b/plugins/modules/cloud/packet/packet_project.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Nurfet Becirevic +# Copyright: (c) 2019, Tomas Karasek +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: packet_project + +short_description: Create/delete a project in Packet host. + +description: + - Create/delete a project in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#projects). + + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + + payment_method: + description: + - Payment method is name of one of the payment methods available to your user. + - When blank, the API assumes the default payment method. + type: str + + auth_token: + description: + - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + name: + description: + - Name for/of the project. + type: str + + org_id: + description: + - UUID of the organization to create a project for. + - When blank, the API assumes the default organization. + type: str + + id: + description: + - UUID of the project which you want to remove. + type: str + + custom_data: + description: + - Custom data about the project to create. + type: str + +requirements: + - "python >= 2.6" + - "packet-python >= 1.40" + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- name: create new project + hosts: localhost + tasks: + packet_project: + name: "new project" + +- name: create new project within non-default organization + hosts: localhost + tasks: + packet_project: + name: "my org project" + org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0 + +- name: remove project by id + hosts: localhost + tasks: + packet_project: + state: absent + id: eef49903-7a09-4ca1-af67-4087c29ab5b6 + +- name: create new project with non-default billing method + hosts: localhost + tasks: + packet_project: + name: "newer project" + payment_method: "the other visa" +''' + +RETURN = ''' +changed: + description: True if a project was created or removed. + type: bool + sample: True + returned: success + +name: + description: Name of addressed project. + type: str + returned: success + +id: + description: UUID of addressed project. + type: str + returned: success +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils._text import to_native + +HAS_PACKET_SDK = True + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +def act_on_project(target_state, module, packet_conn): + result_dict = {'changed': False} + given_id = module.params.get('id') + given_name = module.params.get('name') + if given_id: + matching_projects = [ + p for p in packet_conn.list_projects() if given_id == p.id] + else: + matching_projects = [ + p for p in packet_conn.list_projects() if given_name == p.name] + + if target_state == 'present': + if len(matching_projects) == 0: + org_id = module.params.get('org_id') + custom_data = module.params.get('custom_data') + payment_method = module.params.get('payment_method') + + if not org_id: + params = { + "name": given_name, + "payment_method_id": payment_method, + "customdata": custom_data + } + new_project_data = packet_conn.call_api("projects", "POST", params) + new_project = packet.Project(new_project_data, packet_conn) + else: + new_project = packet_conn.create_organization_project( + org_id=org_id, + name=given_name, + payment_method_id=payment_method, + customdata=custom_data + ) + + result_dict['changed'] = True + matching_projects.append(new_project) + + result_dict['name'] = matching_projects[0].name + result_dict['id'] = matching_projects[0].id + else: + if len(matching_projects) > 1: + _msg = ("More than projects matched for module call with state = absent: " + "{0}".format(to_native(matching_projects))) + module.fail_json(msg=_msg) + + if len(matching_projects) == 1: + p = matching_projects[0] + result_dict['name'] = p.name + result_dict['id'] = p.id + result_dict['changed'] = True + try: + p.delete() + except Exception as e: + _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format( + p.name, p.id, to_native(e))) + module.fail_json(msg=_msg) + return result_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + name=dict(type='str'), + id=dict(type='str'), + org_id=dict(type='str'), + payment_method=dict(type='str'), + custom_data=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=[("name", "id",)], + mutually_exclusive=[ + ('name', 'id'), + ] + ) + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in ['present', 'absent']: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json(**act_on_project(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set project state {0}: {1}".format(state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/cloud/packet/packet_sshkey.py new file mode 100644 index 0000000000..e31dff9046 --- /dev/null +++ b/plugins/modules/cloud/packet/packet_sshkey.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# Copyright 2016 Tomas Karasek +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: packet_sshkey +short_description: Create/delete an SSH key in Packet host. +description: + - Create/delete an SSH key in Packet host. + - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). +author: "Tomas Karasek (@t0mk) " +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + auth_token: + description: + - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + label: + description: + - Label for the key. If you keep it empty, it will be read from key string. + id: + description: + - UUID of the key which you want to remove. + fingerprint: + description: + - Fingerprint of the key which you want to remove. + key: + description: + - Public Key string ({type} {base64 encoded key} {description}). + key_file: + description: + - File with the public key. + +requirements: + - "python >= 2.6" + - packet-python + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- name: create sshkey from string + hosts: localhost + tasks: + packet_sshkey: + key: "{{ lookup('file', 'my_packet_sshkey.pub') }}" + +- name: create sshkey from file + hosts: localhost + tasks: + packet_sshkey: + label: key from file + key_file: ~/ff.pub + +- name: remove sshkey by id + hosts: localhost + tasks: + packet_sshkey: + state: absent + id: eef49903-7a09-4ca1-af67-4087c29ab5b6 +''' + +RETURN = ''' +changed: + description: True if a sshkey was created or removed. + type: bool + sample: True + returned: always +sshkeys: + description: Information about sshkeys that were createe/removed. + type: list + sample: [ + { + "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", + "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", + "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2", + "label": "mynewkey33" + } + ] + returned: always +''' # NOQA + +import os +import uuid + +from ansible.module_utils.basic import AnsibleModule + +HAS_PACKET_SDK = True +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +def serialize_sshkey(sshkey): + sshkey_data = {} + copy_keys = ['id', 'key', 'label', 'fingerprint'] + for name in copy_keys: + sshkey_data[name] = getattr(sshkey, name) + return sshkey_data + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def load_key_string(key_str): + ret_dict = {} + key_str = key_str.strip() + ret_dict['key'] = key_str + cut_key = key_str.split() + if len(cut_key) in [2, 3]: + if len(cut_key) == 3: + ret_dict['label'] = cut_key[2] + else: + raise Exception("Public key %s is in wrong format" % key_str) + return ret_dict + + +def get_sshkey_selector(module): + key_id = module.params.get('id') + if key_id: + if not is_valid_uuid(key_id): + raise Exception("sshkey ID %s is not valid UUID" % key_id) + selecting_fields = ['label', 'fingerprint', 'id', 'key'] + select_dict = {} + for f in selecting_fields: + if module.params.get(f) is not None: + select_dict[f] = module.params.get(f) + + if module.params.get('key_file'): + with open(module.params.get('key_file')) as _file: + loaded_key = load_key_string(_file.read()) + select_dict['key'] = loaded_key['key'] + if module.params.get('label') is None: + if loaded_key.get('label'): + select_dict['label'] = loaded_key['label'] + + def selector(k): + if 'key' in select_dict: + # if key string is specified, compare only the key strings + return k.key == select_dict['key'] + else: + # if key string not specified, all the fields must match + return all([select_dict[f] == getattr(k, f) for f in select_dict]) + return selector + + +def act_on_sshkeys(target_state, module, packet_conn): + selector = get_sshkey_selector(module) + existing_sshkeys = packet_conn.list_ssh_keys() + matching_sshkeys = filter(selector, existing_sshkeys) + changed = False + if target_state == 'present': + if matching_sshkeys == []: + # there is no key matching the fields from module call + # => create the key, label and + newkey = {} + if module.params.get('key_file'): + with open(module.params.get('key_file')) as f: + newkey = load_key_string(f.read()) + if module.params.get('key'): + newkey = load_key_string(module.params.get('key')) + if module.params.get('label'): + newkey['label'] = module.params.get('label') + for param in ('label', 'key'): + if param not in newkey: + _msg = ("If you want to ensure a key is present, you must " + "supply both a label and a key string, either in " + "module params, or in a key file. %s is missing" + % param) + raise Exception(_msg) + matching_sshkeys = [] + new_key_response = packet_conn.create_ssh_key( + newkey['label'], newkey['key']) + changed = True + + matching_sshkeys.append(new_key_response) + else: + # state is 'absent' => delete matching keys + for k in matching_sshkeys: + try: + k.delete() + changed = True + except Exception as e: + _msg = ("while trying to remove sshkey %s, id %s %s, " + "got error: %s" % + (k.label, k.id, target_state, e)) + raise Exception(_msg) + + return { + 'changed': changed, + 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys] + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), + no_log=True), + label=dict(type='str', aliases=['name'], default=None), + id=dict(type='str', default=None), + fingerprint=dict(type='str', default=None), + key=dict(type='str', default=None, no_log=True), + key_file=dict(type='path', default=None), + ), + mutually_exclusive=[ + ('label', 'id'), + ('label', 'fingerprint'), + ('id', 'fingerprint'), + ('key', 'fingerprint'), + ('key', 'id'), + ('key_file', 'key'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable %s, " + "the auth_token parameter is required" % + PACKET_API_TOKEN_ENV_VAR) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in ['present', 'absent']: + try: + module.exit_json(**act_on_sshkeys(state, module, packet_conn)) + except Exception as e: + module.fail_json(msg='failed to set sshkey state: %s' % str(e)) + else: + module.fail_json(msg='%s is not a valid state for this module' % state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/packet/packet_volume.py b/plugins/modules/cloud/packet/packet_volume.py new file mode 100644 index 0000000000..efc90ae1cc --- /dev/null +++ b/plugins/modules/cloud/packet/packet_volume.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Nurfet Becirevic +# Copyright: (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: packet_volume + +short_description: Create/delete a volume in Packet host. + +description: + - Create/delete a volume in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#volumes). + + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +options: + state: + description: + - Desired state of the volume. + default: present + choices: ['present', 'absent'] + type: str + + project_id: + description: + - ID of project of the device. + required: true + type: str + + auth_token: + description: + - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + name: + description: + - Selector for API-generated name of the volume + type: str + + description: + description: + - User-defined description attribute for Packet volume. + - "It is used used as idempotent identifier - if volume with given + description exists, new one is not created." + type: str + + id: + description: + - UUID of a volume. + type: str + + plan: + description: + - storage_1 for standard tier, storage_2 for premium (performance) tier. + - Tiers are described at U(https://www.packet.com/cloud/storage/). + choices: ['storage_1', 'storage_2'] + default: 'storage_1' + type: str + + facility: + description: + - Location of the volume. + - Volumes can only be attached to device in the same location. + type: str + + size: + description: + - Size of the volume in gigabytes. + type: int + + locked: + description: + - Create new volume locked. + type: bool + default: False + + billing_cycle: + description: + - Billing cycle for new volume. + choices: ['hourly', 'monthly'] + default: 'hourly' + type: str + + snapshot_policy: + description: + - Snapshot policy for new volume. + type: dict + + suboptions: + snapshot_count: + description: + - How many snapshots to keep, a positive integer. + required: True + type: int + + snapshot_frequency: + description: + - Frequency of snapshots. + required: True + choices: ["15min", "1hour", "1day", "1week", "1month", "1year"] + type: str + +requirements: + - "python >= 2.6" + - "packet-python >= 1.35" + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- hosts: localhost + vars: + volname: testvol123 + project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b + + tasks: + - name: test create volume + packet_volume: + description: "{{ volname }}" + project_id: "{{ project_id }}" + facility: 'ewr1' + plan: 'storage_1' + state: present + size: 10 + snapshot_policy: + snapshot_count: 10 + snapshot_frequency: 1day + register: result_create + + - name: test delete volume + packet_volume: + id: "{{ result_create.id }}" + project_id: "{{ project_id }}" + state: absent +''' + +RETURN = ''' +id: + description: UUID of specified volume + type: str + returned: success + sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c +name: + description: The API-generated name of the volume resource. + type: str + returned: if volume is attached/detached to/from some device + sample: "volume-a91dc506" +description: + description: The user-defined description of the volume resource. + type: str + returned: success + sample: "Just another volume" +''' + +import uuid + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils._text import to_native + +HAS_PACKET_SDK = True + + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + +VOLUME_PLANS = ["storage_1", "storage_2"] +VOLUME_STATES = ["present", "absent"] +BILLING = ["hourly", "monthly"] + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_volume_selector(module): + if module.params.get('id'): + i = module.params.get('id') + if not is_valid_uuid(i): + raise Exception("Volume ID '{0}' is not a valid UUID".format(i)) + return lambda v: v['id'] == i + elif module.params.get('name'): + n = module.params.get('name') + return lambda v: v['name'] == n + elif module.params.get('description'): + d = module.params.get('description') + return lambda v: v['description'] == d + + +def get_or_fail(params, key): + item = params.get(key) + if item is None: + raise Exception("{0} must be specified for new volume".format(key)) + return item + + +def act_on_volume(target_state, module, packet_conn): + return_dict = {'changed': False} + s = get_volume_selector(module) + project_id = module.params.get("project_id") + api_method = "projects/{0}/storage".format(project_id) + all_volumes = packet_conn.call_api(api_method, "GET")['volumes'] + matching_volumes = [v for v in all_volumes if s(v)] + + if target_state == "present": + if len(matching_volumes) == 0: + params = { + "description": get_or_fail(module.params, "description"), + "size": get_or_fail(module.params, "size"), + "plan": get_or_fail(module.params, "plan"), + "facility": get_or_fail(module.params, "facility"), + "locked": get_or_fail(module.params, "locked"), + "billing_cycle": get_or_fail(module.params, "billing_cycle"), + "snapshot_policies": module.params.get("snapshot_policy"), + } + + new_volume_data = packet_conn.call_api(api_method, "POST", params) + return_dict['changed'] = True + for k in ['id', 'name', 'description']: + return_dict[k] = new_volume_data[k] + + else: + for k in ['id', 'name', 'description']: + return_dict[k] = matching_volumes[0][k] + + else: + if len(matching_volumes) > 1: + _msg = ("More than one volume matches in module call for absent state: {0}".format( + to_native(matching_volumes))) + module.fail_json(msg=_msg) + + if len(matching_volumes) == 1: + volume = matching_volumes[0] + packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE") + return_dict['changed'] = True + for k in ['id', 'name', 'description']: + return_dict[k] = volume[k] + + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str', default=None), + description=dict(type="str", default=None), + name=dict(type='str', default=None), + state=dict(choices=VOLUME_STATES, default="present"), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + project_id=dict(required=True), + plan=dict(choices=VOLUME_PLANS, default="storage_1"), + facility=dict(type="str"), + size=dict(type="int"), + locked=dict(type="bool", default=False), + snapshot_policy=dict(type='dict', default=None), + billing_cycle=dict(type='str', choices=BILLING, default="hourly"), + ), + supports_check_mode=True, + required_one_of=[("name", "id", "description")], + mutually_exclusive=[ + ('name', 'id'), + ('id', 'description'), + ('name', 'description'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in VOLUME_STATES: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json(**act_on_volume(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set volume state {0}: {1}".format( + state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/packet/packet_volume_attachment.py b/plugins/modules/cloud/packet/packet_volume_attachment.py new file mode 100644 index 0000000000..73172024ec --- /dev/null +++ b/plugins/modules/cloud/packet/packet_volume_attachment.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Nurfet Becirevic +# Copyright: (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: packet_volume_attachment + +short_description: Attach/detach a volume to a device in the Packet host. + +description: + - Attach/detach a volume to a device in the Packet host. + - API is documented at U(https://www.packet.com/developers/api/volumes/). + - "This module creates the attachment route in the Packet API. In order to discover + the block devices on the server, you have to run the Attach Scripts, + as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)." + + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +options: + state: + description: + - Indicate desired state of the attachment. + default: present + choices: ['present', 'absent'] + type: str + + auth_token: + description: + - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + project_id: + description: + - UUID of the project to which the device and volume belong. + type: str + + volume: + description: + - Selector for the volume. + - It can be a UUID, an API-generated volume name, or user-defined description string. + - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"' + type: str + + device: + description: + - Selector for the device. + - It can be a UUID of the device, or a hostname. + - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"' + type: str + +requirements: + - "python >= 2.6" + - "packet-python >= 1.35" + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- hosts: localhost + + vars: + volname: testvol + devname: testdev + project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b + + tasks: + - name: test create volume + packet_volume: + description: "{{ volname }}" + project_id: "{{ project_id }}" + facility: ewr1 + plan: storage_1 + state: present + size: 10 + snapshot_policy: + snapshot_count: 10 + snapshot_frequency: 1day + + - packet_device: + project_id: "{{ project_id }}" + hostnames: "{{ devname }}" + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: ewr1 + state: present + + - name: Attach testvol to testdev + packet_volume_attachment: + project_id: "{{ project_id }}" + volume: "{{ volname }}" + device: "{{ devname }}" + + - name: Detach testvol from testdev + packet_volume_attachment: + project_id: "{{ project_id }}" + volume: "{{ volname }}" + device: "{{ devname }}" + state: absent + +''' + +RETURN = ''' +volume_id: + description: UUID of volume addressed by the module call. + type: str + returned: success + +device_id: + description: UUID of device addressed by the module call. + type: str + returned: success +''' + +import uuid + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils._text import to_native + +HAS_PACKET_SDK = True + + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + +STATES = ["present", "absent"] + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_volume_selector(spec): + if is_valid_uuid(spec): + return lambda v: v['id'] == spec + else: + return lambda v: v['name'] == spec or v['description'] == spec + + +def get_device_selector(spec): + if is_valid_uuid(spec): + return lambda v: v['id'] == spec + else: + return lambda v: v['hostname'] == spec + + +def do_attach(packet_conn, vol_id, dev_id): + api_method = "storage/{0}/attachments".format(vol_id) + packet_conn.call_api( + api_method, + params={"device_id": dev_id}, + type="POST") + + +def do_detach(packet_conn, vol, dev_id=None): + def dev_match(a): + return (dev_id is None) or (a['device']['id'] == dev_id) + for a in vol['attachments']: + if dev_match(a): + print(a['href']) + packet_conn.call_api(a['href'], type="DELETE") + + +def validate_selected(l, resource_type, spec): + if len(l) > 1: + _msg = ("more than one {0} matches specification {1}: {2}".format( + resource_type, spec, l)) + raise Exception(_msg) + if len(l) == 0: + _msg = "no {0} matches specification: {1}".format(resource_type, spec) + raise Exception(_msg) + + +def get_attached_dev_ids(volume_dict): + if len(volume_dict['attachments']) == 0: + return [] + else: + return [a['device']['id'] for a in volume_dict['attachments']] + + +def act_on_volume_attachment(target_state, module, packet_conn): + return_dict = {'changed': False} + volspec = module.params.get("volume") + devspec = module.params.get("device") + if devspec is None and target_state == 'present': + raise Exception("If you want to attach a volume, you must specify a device.") + project_id = module.params.get("project_id") + volumes_api_method = "projects/{0}/storage".format(project_id) + volumes = packet_conn.call_api(volumes_api_method, + params={'include': 'facility,attachments.device'})['volumes'] + v_match = get_volume_selector(volspec) + matching_volumes = [v for v in volumes if v_match(v)] + validate_selected(matching_volumes, "volume", volspec) + volume = matching_volumes[0] + return_dict['volume_id'] = volume['id'] + + device = None + if devspec is not None: + devices_api_method = "projects/{0}/devices".format(project_id) + devices = packet_conn.call_api(devices_api_method)['devices'] + d_match = get_device_selector(devspec) + matching_devices = [d for d in devices if d_match(d)] + validate_selected(matching_devices, "device", devspec) + device = matching_devices[0] + return_dict['device_id'] = device['id'] + + attached_device_ids = get_attached_dev_ids(volume) + + if target_state == "present": + if len(attached_device_ids) == 0: + do_attach(packet_conn, volume['id'], device['id']) + return_dict['changed'] = True + elif device['id'] not in attached_device_ids: + # Don't reattach volume which is attached to a different device. + # Rather fail than force remove a device on state == 'present'. + raise Exception("volume {0} is already attached to device {1}".format( + volume, attached_device_ids)) + else: + if device is None: + if len(attached_device_ids) > 0: + do_detach(packet_conn, volume) + return_dict['changed'] = True + elif device['id'] in attached_device_ids: + do_detach(packet_conn, volume, device['id']) + return_dict['changed'] = True + + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=STATES, default="present"), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + volume=dict(type="str", required=True), + project_id=dict(type="str", required=True), + device=dict(type="str"), + ), + supports_check_mode=True, + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in STATES: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json( + **act_on_volume_attachment(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py new file mode 100644 index 0000000000..9c879f2d41 --- /dev/null +++ b/plugins/modules/cloud/profitbricks/profitbricks.py @@ -0,0 +1,641 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: profitbricks +short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine. +description: + - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait + for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 +options: + auto_increment: + description: + - Whether or not to increment a single number in the name for created virtual machines. + type: bool + default: 'yes' + name: + description: + - The name of the virtual machine. + required: true + image: + description: + - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. + required: true + image_password: + description: + - Password set for the administrative user. + ssh_keys: + description: + - Public SSH keys allowing access to the virtual machine. + datacenter: + description: + - The datacenter to provision this virtual machine. + cores: + description: + - The number of CPU cores to allocate to the virtual machine. + default: 2 + ram: + description: + - The amount of memory to allocate to the virtual machine. + default: 2048 + cpu_family: + description: + - The CPU family type to allocate to the virtual machine. + default: AMD_OPTERON + choices: [ "AMD_OPTERON", "INTEL_XEON" ] + volume_size: + description: + - The size in GB of the boot volume. + default: 10 + bus: + description: + - The bus type for the volume. + default: VIRTIO + choices: [ "IDE", "VIRTIO"] + instance_ids: + description: + - list of instance ids, currently only used when state='absent' to remove instances. + count: + description: + - The number of virtual machines to create. + default: 1 + location: + description: + - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. + default: us/las + choices: [ "us/las", "de/fra", "de/fkb" ] + assign_public_ip: + description: + - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. + type: bool + default: 'no' + lan: + description: + - The ID of the LAN you wish to add the servers to. + default: 1 + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + wait: + description: + - wait for the instance to be in state 'running' before returning + type: bool + default: 'yes' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + remove_boot_volume: + description: + - remove the bootVolume of the virtual machine you're destroying. + type: bool + default: 'yes' + state: + description: + - create or terminate instances + default: 'present' + choices: [ "running", "stopped", "absent", "present" ] + +requirements: + - "profitbricks" + - "python >= 2.6" +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' + +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Provisioning example. This will create three servers and enumerate their names. + +- profitbricks: + datacenter: Tardis One + name: web%02d.stackpointcloud.com + cores: 4 + ram: 2048 + volume_size: 50 + cpu_family: INTEL_XEON + image: a3eae284-a2fe-11e4-b187-5f1f641608c8 + location: us/las + count: 3 + assign_public_ip: true + +# Removing Virtual machines + +- profitbricks: + datacenter: Tardis One + instance_ids: + - 'web001.stackpointcloud.com' + - 'web002.stackpointcloud.com' + - 'web003.stackpointcloud.com' + wait_timeout: 500 + state: absent + +# Starting Virtual Machines. + +- profitbricks: + datacenter: Tardis One + instance_ids: + - 'web001.stackpointcloud.com' + - 'web002.stackpointcloud.com' + - 'web003.stackpointcloud.com' + wait_timeout: 500 + state: running + +# Stopping Virtual Machines + +- profitbricks: + datacenter: Tardis One + instance_ids: + - 'web001.stackpointcloud.com' + - 'web002.stackpointcloud.com' + - 'web003.stackpointcloud.com' + wait_timeout: 500 + state: stopped + +''' + +import re +import uuid +import time +import traceback + +HAS_PB_SDK = True + +try: + from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xrange +from ansible.module_utils._text import to_native + + +LOCATIONS = ['us/las', + 'de/fra', + 'de/fkb'] + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def _create_machine(module, profitbricks, datacenter, name): + cores = module.params.get('cores') + ram = module.params.get('ram') + cpu_family = module.params.get('cpu_family') + volume_size = module.params.get('volume_size') + disk_type = module.params.get('disk_type') + image_password = module.params.get('image_password') + ssh_keys = module.params.get('ssh_keys') + bus = module.params.get('bus') + lan = module.params.get('lan') + assign_public_ip = module.params.get('assign_public_ip') + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + location = module.params.get('location') + image = module.params.get('image') + assign_public_ip = module.boolean(module.params.get('assign_public_ip')) + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + if assign_public_ip: + public_found = False + + lans = profitbricks.list_lans(datacenter) + for lan in lans['items']: + if lan['properties']['public']: + public_found = True + lan = lan['id'] + + if not public_found: + i = LAN( + name='public', + public=True) + + lan_response = profitbricks.create_lan(datacenter, i) + _wait_for_completion(profitbricks, lan_response, + wait_timeout, "_create_machine") + lan = lan_response['id'] + + v = Volume( + name=str(uuid.uuid4()).replace('-', '')[:10], + size=volume_size, + image=image, + image_password=image_password, + ssh_keys=ssh_keys, + disk_type=disk_type, + bus=bus) + + n = NIC( + lan=int(lan) + ) + + s = Server( + name=name, + ram=ram, + cores=cores, + cpu_family=cpu_family, + create_volumes=[v], + nics=[n], + ) + + try: + create_server_response = profitbricks.create_server( + datacenter_id=datacenter, server=s) + + _wait_for_completion(profitbricks, create_server_response, + wait_timeout, "create_virtual_machine") + + server_response = profitbricks.get_server( + datacenter_id=datacenter, + server_id=create_server_response['id'], + depth=3 + ) + except Exception as e: + module.fail_json(msg="failed to create the new server: %s" % str(e)) + else: + return server_response + + +def _startstop_machine(module, profitbricks, datacenter_id, server_id): + state = module.params.get('state') + + try: + if state == 'running': + profitbricks.start_server(datacenter_id, server_id) + else: + profitbricks.stop_server(datacenter_id, server_id) + + return True + except Exception as e: + module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e))) + + +def _create_datacenter(module, profitbricks): + datacenter = module.params.get('datacenter') + location = module.params.get('location') + wait_timeout = module.params.get('wait_timeout') + + i = Datacenter( + name=datacenter, + location=location + ) + + try: + datacenter_response = profitbricks.create_datacenter(datacenter=i) + + _wait_for_completion(profitbricks, datacenter_response, + wait_timeout, "_create_datacenter") + + return datacenter_response + except Exception as e: + module.fail_json(msg="failed to create the new server(s): %s" % str(e)) + + +def create_virtual_machine(module, profitbricks): + """ + Create new virtual machine + + module : AnsibleModule object + profitbricks: authenticated profitbricks object + + Returns: + True if a new virtual machine was created, false otherwise + """ + datacenter = module.params.get('datacenter') + name = module.params.get('name') + auto_increment = module.params.get('auto_increment') + count = module.params.get('count') + lan = module.params.get('lan') + wait_timeout = module.params.get('wait_timeout') + failed = True + datacenter_found = False + + virtual_machines = [] + virtual_machine_ids = [] + + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if datacenter_id: + datacenter_found = True + + if not datacenter_found: + datacenter_response = _create_datacenter(module, profitbricks) + datacenter_id = datacenter_response['id'] + + _wait_for_completion(profitbricks, datacenter_response, + wait_timeout, "create_virtual_machine") + + if auto_increment: + numbers = set() + count_offset = 1 + + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message, exception=traceback.format_exc()) + + number_range = xrange(count_offset, count_offset + count + len(numbers)) + available_numbers = list(set(number_range).difference(numbers)) + names = [] + numbers_to_use = available_numbers[:count] + for number in numbers_to_use: + names.append(name % number) + else: + names = [name] + + # Prefetch a list of servers for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for name in names: + # Skip server creation if the server already exists. + if _get_server_id(server_list, name): + continue + + create_response = _create_machine(module, profitbricks, str(datacenter_id), name) + nics = profitbricks.list_nics(datacenter_id, create_response['id']) + for n in nics['items']: + if lan == n['properties']['lan']: + create_response.update({'public_ip': n['properties']['ips'][0]}) + + virtual_machines.append(create_response) + + failed = False + + results = { + 'failed': failed, + 'machines': virtual_machines, + 'action': 'create', + 'instance_ids': { + 'instances': [i['id'] for i in virtual_machines], + } + } + + return results + + +def remove_virtual_machine(module, profitbricks): + """ + Removes a virtual machine. + + This will remove the virtual machine along with the bootVolume. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Not yet supported: handle deletion of attached data disks. + + Returns: + True if a new virtual server was deleted, false otherwise + """ + datacenter = module.params.get('datacenter') + instance_ids = module.params.get('instance_ids') + remove_boot_volume = module.params.get('remove_boot_volume') + changed = False + + if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: + module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') + + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if not datacenter_id: + module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) + + # Prefetch server list for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for instance in instance_ids: + # Locate UUID for server if referenced by name. + server_id = _get_server_id(server_list, instance) + if server_id: + # Remove the server's boot volume + if remove_boot_volume: + _remove_boot_volume(module, profitbricks, datacenter_id, server_id) + + # Remove the server + try: + server_response = profitbricks.delete_server(datacenter_id, server_id) + except Exception as e: + module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc()) + else: + changed = True + + return changed + + +def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): + """ + Remove the boot volume from the server + """ + try: + server = profitbricks.get_server(datacenter_id, server_id) + volume_id = server['properties']['bootVolume']['id'] + volume_response = profitbricks.delete_volume(datacenter_id, volume_id) + except Exception as e: + module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc()) + + +def startstop_machine(module, profitbricks, state): + """ + Starts or Stops a virtual machine. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True when the servers process the action successfully, false otherwise. + """ + if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: + module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') + + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + changed = False + + datacenter = module.params.get('datacenter') + instance_ids = module.params.get('instance_ids') + + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if not datacenter_id: + module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) + + # Prefetch server list for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for instance in instance_ids: + # Locate UUID of server if referenced by name. + server_id = _get_server_id(server_list, instance) + if server_id: + _startstop_machine(module, profitbricks, datacenter_id, server_id) + changed = True + + if wait: + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + matched_instances = [] + for res in profitbricks.list_servers(datacenter_id)['items']: + if state == 'running': + if res['properties']['vmState'].lower() == state: + matched_instances.append(res) + elif state == 'stopped': + if res['properties']['vmState'].lower() == 'shutoff': + matched_instances.append(res) + + if len(matched_instances) < len(instance_ids): + time.sleep(5) + else: + break + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) + + return (changed) + + +def _get_datacenter_id(datacenters, identity): + """ + Fetch and return datacenter UUID by datacenter name if found. + """ + for datacenter in datacenters['items']: + if identity in (datacenter['properties']['name'], datacenter['id']): + return datacenter['id'] + return None + + +def _get_server_id(servers, identity): + """ + Fetch and return server UUID by server name if found. + """ + for server in servers['items']: + if identity in (server['properties']['name'], server['id']): + return server['id'] + return None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(), + name=dict(), + image=dict(), + cores=dict(type='int', default=2), + ram=dict(type='int', default=2048), + cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], + default='AMD_OPTERON'), + volume_size=dict(type='int', default=10), + disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), + image_password=dict(default=None, no_log=True), + ssh_keys=dict(type='list', default=[]), + bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), + lan=dict(type='int', default=1), + count=dict(type='int', default=1), + auto_increment=dict(type='bool', default=True), + instance_ids=dict(type='list', default=[]), + subscription_user=dict(), + subscription_password=dict(no_log=True), + location=dict(choices=LOCATIONS, default='us/las'), + assign_public_ip=dict(type='bool', default=False), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + remove_boot_volume=dict(type='bool', default=True), + state=dict(default='present'), + ) + ) + + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required ' + + 'for running or stopping machines.') + + try: + (changed) = remove_virtual_machine(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) + + elif state in ('running', 'stopped'): + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required for ' + + 'running or stopping machines.') + try: + (changed) = startstop_machine(module, profitbricks, state) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) + + elif state == 'present': + if not module.params.get('name'): + module.fail_json(msg='name parameter is required for new instance') + if not module.params.get('image'): + module.fail_json(msg='image parameter is required for new instance') + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is ' + + 'required for new instance') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is ' + + 'required for new instance') + + try: + (machine_dict_array) = create_virtual_machine(module, profitbricks) + module.exit_json(**machine_dict_array) + except Exception as e: + module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py new file mode 100644 index 0000000000..20f8636f61 --- /dev/null +++ b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: profitbricks_datacenter +short_description: Create or destroy a ProfitBricks Virtual Datacenter. +description: + - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency + on profitbricks >= 1.0.0 +options: + name: + description: + - The name of the virtual datacenter. + required: true + description: + description: + - The description of the virtual datacenter. + required: false + location: + description: + - The datacenter location. + required: false + default: us/las + choices: [ "us/las", "de/fra", "de/fkb" ] + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + required: false + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + required: false + wait: + description: + - wait for the datacenter to be created before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + state: + description: + - create or terminate datacenters + required: false + default: 'present' + choices: [ "present", "absent" ] + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' + +# Create a Datacenter +- profitbricks_datacenter: + datacenter: Tardis One + wait_timeout: 500 + +# Destroy a Datacenter. This will remove all servers, volumes, and other objects in the datacenter. +- profitbricks_datacenter: + datacenter: Tardis One + wait_timeout: 500 + state: absent + +''' + +import re +import time + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService, Datacenter +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +LOCATIONS = ['us/las', + 'de/fra', + 'de/fkb'] + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def _remove_datacenter(module, profitbricks, datacenter): + try: + profitbricks.delete_datacenter(datacenter) + except Exception as e: + module.fail_json(msg="failed to remove the datacenter: %s" % str(e)) + + +def create_datacenter(module, profitbricks): + """ + Creates a Datacenter + + This will create a new Datacenter in the specified location. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if a new datacenter was created, false otherwise + """ + name = module.params.get('name') + location = module.params.get('location') + description = module.params.get('description') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + i = Datacenter( + name=name, + location=location, + description=description + ) + + try: + datacenter_response = profitbricks.create_datacenter(datacenter=i) + + if wait: + _wait_for_completion(profitbricks, datacenter_response, + wait_timeout, "_create_datacenter") + + results = { + 'datacenter_id': datacenter_response['id'] + } + + return results + + except Exception as e: + module.fail_json(msg="failed to create the new datacenter: %s" % str(e)) + + +def remove_datacenter(module, profitbricks): + """ + Removes a Datacenter. + + This will remove a datacenter. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the datacenter was deleted, false otherwise + """ + name = module.params.get('name') + changed = False + + if(uuid_match.match(name)): + _remove_datacenter(module, profitbricks, name) + changed = True + else: + datacenters = profitbricks.list_datacenters() + + for d in datacenters['items']: + vdc = profitbricks.get_datacenter(d['id']) + + if name == vdc['properties']['name']: + name = d['id'] + _remove_datacenter(module, profitbricks, name) + changed = True + + return changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(), + description=dict(), + location=dict(choices=LOCATIONS, default='us/las'), + subscription_user=dict(), + subscription_password=dict(no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=600, type='int'), + state=dict(default='present'), + ) + ) + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is required') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is required') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json(msg='name parameter is required deleting a virtual datacenter.') + + try: + (changed) = remove_datacenter(module, profitbricks) + module.exit_json( + changed=changed) + except Exception as e: + module.fail_json(msg='failed to set datacenter state: %s' % str(e)) + + elif state == 'present': + if not module.params.get('name'): + module.fail_json(msg='name parameter is required for a new datacenter') + if not module.params.get('location'): + module.fail_json(msg='location parameter is required for a new datacenter') + + try: + (datacenter_dict_array) = create_datacenter(module, profitbricks) + module.exit_json(**datacenter_dict_array) + except Exception as e: + module.fail_json(msg='failed to set datacenter state: %s' % str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/plugins/modules/cloud/profitbricks/profitbricks_nic.py new file mode 100644 index 0000000000..12b8547de5 --- /dev/null +++ b/plugins/modules/cloud/profitbricks/profitbricks_nic.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: profitbricks_nic +short_description: Create or Remove a NIC. +description: + - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0 +options: + datacenter: + description: + - The datacenter in which to operate. + required: true + server: + description: + - The server name or ID. + required: true + name: + description: + - The name or ID of the NIC. This is only required on deletes, but not on create. + required: true + lan: + description: + - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create. + required: true + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + required: false + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + required: false + wait: + description: + - wait for the operation to complete before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + state: + description: + - Indicate desired state of the resource + required: false + default: 'present' + choices: ["present", "absent"] + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' + +# Create a NIC +- profitbricks_nic: + datacenter: Tardis One + server: node002 + lan: 2 + wait_timeout: 500 + state: present + +# Remove a NIC +- profitbricks_nic: + datacenter: Tardis One + server: node002 + name: 7341c2454f + wait_timeout: 500 + state: absent + +''' + +import re +import uuid +import time + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService, NIC +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def create_nic(module, profitbricks): + """ + Creates a NIC. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the nic creates, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + lan = module.params.get('lan') + name = module.params.get('name') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + try: + n = NIC( + name=name, + lan=lan + ) + + nic_response = profitbricks.create_nic(datacenter, server, n) + + if wait: + _wait_for_completion(profitbricks, nic_response, + wait_timeout, "create_nic") + + return nic_response + + except Exception as e: + module.fail_json(msg="failed to create the NIC: %s" % str(e)) + + +def delete_nic(module, profitbricks): + """ + Removes a NIC + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the NIC was removed, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + name = module.params.get('name') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + server_found = False + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server_found = True + server = s['id'] + break + + if not server_found: + return False + + # Locate UUID for NIC + nic_found = False + if not (uuid_match.match(name)): + nic_list = profitbricks.list_nics(datacenter, server) + for n in nic_list['items']: + if name == n['properties']['name']: + nic_found = True + name = n['id'] + break + + if not nic_found: + return False + + try: + nic_response = profitbricks.delete_nic(datacenter, server, name) + return nic_response + except Exception as e: + module.fail_json(msg="failed to remove the NIC: %s" % str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(), + server=dict(), + name=dict(default=str(uuid.uuid4()).replace('-', '')[:10]), + lan=dict(), + subscription_user=dict(), + subscription_password=dict(no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + state=dict(default='present'), + ) + ) + + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is required') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is required') + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required') + if not module.params.get('server'): + module.fail_json(msg='server parameter is required') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json(msg='name parameter is required') + + try: + (changed) = delete_nic(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set nic state: %s' % str(e)) + + elif state == 'present': + if not module.params.get('lan'): + module.fail_json(msg='lan parameter is required') + + try: + (nic_dict) = create_nic(module, profitbricks) + module.exit_json(nics=nic_dict) + except Exception as e: + module.fail_json(msg='failed to set nic state: %s' % str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py new file mode 100644 index 0000000000..3b1e3eb8eb --- /dev/null +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume.py @@ -0,0 +1,422 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: profitbricks_volume +short_description: Create or destroy a volume. +description: + - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0 +options: + datacenter: + description: + - The datacenter in which to create the volumes. + required: true + name: + description: + - The name of the volumes. You can enumerate the names using auto_increment. + required: true + size: + description: + - The size of the volume. + required: false + default: 10 + bus: + description: + - The bus type. + required: false + default: VIRTIO + choices: [ "IDE", "VIRTIO"] + image: + description: + - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID. + required: true + image_password: + description: + - Password set for the administrative user. + required: false + ssh_keys: + description: + - Public SSH keys allowing access to the virtual machine. + required: false + disk_type: + description: + - The disk type of the volume. + required: false + default: HDD + choices: [ "HDD", "SSD" ] + licence_type: + description: + - The licence type for the volume. This is used when the image is non-standard. + required: false + default: UNKNOWN + choices: ["LINUX", "WINDOWS", "UNKNOWN" , "OTHER"] + count: + description: + - The number of volumes you wish to create. + required: false + default: 1 + auto_increment: + description: + - Whether or not to increment a single number in the name for created virtual machines. + default: yes + type: bool + instance_ids: + description: + - list of instance ids, currently only used when state='absent' to remove instances. + required: false + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + required: false + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + required: false + wait: + description: + - wait for the datacenter to be created before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + state: + description: + - create or terminate datacenters + required: false + default: 'present' + choices: ["present", "absent"] + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' + +# Create Multiple Volumes + +- profitbricks_volume: + datacenter: Tardis One + name: vol%02d + count: 5 + auto_increment: yes + wait_timeout: 500 + state: present + +# Remove Volumes + +- profitbricks_volume: + datacenter: Tardis One + instance_ids: + - 'vol01' + - 'vol02' + wait_timeout: 500 + state: absent + +''' + +import re +import time +import traceback + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService, Volume +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xrange +from ansible.module_utils._text import to_native + + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def _create_volume(module, profitbricks, datacenter, name): + size = module.params.get('size') + bus = module.params.get('bus') + image = module.params.get('image') + image_password = module.params.get('image_password') + ssh_keys = module.params.get('ssh_keys') + disk_type = module.params.get('disk_type') + licence_type = module.params.get('licence_type') + wait_timeout = module.params.get('wait_timeout') + wait = module.params.get('wait') + + try: + v = Volume( + name=name, + size=size, + bus=bus, + image=image, + image_password=image_password, + ssh_keys=ssh_keys, + disk_type=disk_type, + licence_type=licence_type + ) + + volume_response = profitbricks.create_volume(datacenter, v) + + if wait: + _wait_for_completion(profitbricks, volume_response, + wait_timeout, "_create_volume") + + except Exception as e: + module.fail_json(msg="failed to create the volume: %s" % str(e)) + + return volume_response + + +def _delete_volume(module, profitbricks, datacenter, volume): + try: + profitbricks.delete_volume(datacenter, volume) + except Exception as e: + module.fail_json(msg="failed to remove the volume: %s" % str(e)) + + +def create_volume(module, profitbricks): + """ + Creates a volume. + + This will create a volume in a datacenter. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was created, false otherwise + """ + datacenter = module.params.get('datacenter') + name = module.params.get('name') + auto_increment = module.params.get('auto_increment') + count = module.params.get('count') + + datacenter_found = False + failed = True + volumes = [] + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + datacenter_found = True + break + + if not datacenter_found: + module.fail_json(msg='datacenter could not be found.') + + if auto_increment: + numbers = set() + count_offset = 1 + + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message, exception=traceback.format_exc()) + + number_range = xrange(count_offset, count_offset + count + len(numbers)) + available_numbers = list(set(number_range).difference(numbers)) + names = [] + numbers_to_use = available_numbers[:count] + for number in numbers_to_use: + names.append(name % number) + else: + names = [name] * count + + for name in names: + create_response = _create_volume(module, profitbricks, str(datacenter), name) + volumes.append(create_response) + _attach_volume(module, profitbricks, datacenter, create_response['id']) + failed = False + + results = { + 'failed': failed, + 'volumes': volumes, + 'action': 'create', + 'instance_ids': { + 'instances': [i['id'] for i in volumes], + } + } + + return results + + +def delete_volume(module, profitbricks): + """ + Removes a volume. + + This will create a volume in a datacenter. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was removed, false otherwise + """ + if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: + module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') + + datacenter = module.params.get('datacenter') + changed = False + instance_ids = module.params.get('instance_ids') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + for n in instance_ids: + if(uuid_match.match(n)): + _delete_volume(module, profitbricks, datacenter, n) + changed = True + else: + volumes = profitbricks.list_volumes(datacenter) + for v in volumes['items']: + if n == v['properties']['name']: + volume_id = v['id'] + _delete_volume(module, profitbricks, datacenter, volume_id) + changed = True + + return changed + + +def _attach_volume(module, profitbricks, datacenter, volume): + """ + Attaches a volume. + + This will attach a volume to the server. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was attached, false otherwise + """ + server = module.params.get('server') + + # Locate UUID for Server + if server: + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + + try: + return profitbricks.attach_volume(datacenter, server, volume) + except Exception as e: + module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc()) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(), + server=dict(), + name=dict(), + size=dict(type='int', default=10), + bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), + image=dict(), + image_password=dict(default=None, no_log=True), + ssh_keys=dict(type='list', default=[]), + disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), + licence_type=dict(default='UNKNOWN'), + count=dict(type='int', default=1), + auto_increment=dict(type='bool', default=True), + instance_ids=dict(type='list', default=[]), + subscription_user=dict(), + subscription_password=dict(no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + state=dict(default='present'), + ) + ) + + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is required') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is required') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required for running or stopping machines.') + + try: + (changed) = delete_volume(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) + + elif state == 'present': + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required for new instance') + if not module.params.get('name'): + module.fail_json(msg='name parameter is required for new instance') + + try: + (volume_dict_array) = create_volume(module, profitbricks) + module.exit_json(**volume_dict_array) + except Exception as e: + module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py new file mode 100644 index 0000000000..82bb95ae55 --- /dev/null +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: profitbricks_volume_attachments +short_description: Attach or detach a volume. +description: + - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0 +options: + datacenter: + description: + - The datacenter in which to operate. + required: true + server: + description: + - The name of the server you wish to detach or attach the volume. + required: true + volume: + description: + - The volume name or ID. + required: true + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + required: false + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + required: false + wait: + description: + - wait for the operation to complete before returning + required: false + default: "yes" + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 600 + state: + description: + - Indicate desired state of the resource + required: false + default: 'present' + choices: ["present", "absent"] + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' + +# Attach a Volume + +- profitbricks_volume_attachments: + datacenter: Tardis One + server: node002 + volume: vol01 + wait_timeout: 500 + state: present + +# Detach a Volume + +- profitbricks_volume_attachments: + datacenter: Tardis One + server: node002 + volume: vol01 + wait_timeout: 500 + state: absent + +''' + +import re +import time + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def attach_volume(module, profitbricks): + """ + Attaches a volume. + + This will attach a volume to the server. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was attached, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + volume = module.params.get('volume') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + + # Locate UUID for Volume + if not (uuid_match.match(volume)): + volume_list = profitbricks.list_volumes(datacenter) + for v in volume_list['items']: + if volume == v['properties']['name']: + volume = v['id'] + break + + return profitbricks.attach_volume(datacenter, server, volume) + + +def detach_volume(module, profitbricks): + """ + Detaches a volume. + + This will remove a volume from the server. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was detached, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + volume = module.params.get('volume') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + + # Locate UUID for Volume + if not (uuid_match.match(volume)): + volume_list = profitbricks.list_volumes(datacenter) + for v in volume_list['items']: + if volume == v['properties']['name']: + volume = v['id'] + break + + return profitbricks.detach_volume(datacenter, server, volume) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(), + server=dict(), + volume=dict(), + subscription_user=dict(), + subscription_password=dict(no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + state=dict(default='present'), + ) + ) + + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is required') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is required') + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required') + if not module.params.get('server'): + module.fail_json(msg='server parameter is required') + if not module.params.get('volume'): + module.fail_json(msg='volume parameter is required') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + try: + (changed) = detach_volume(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) + elif state == 'present': + try: + attach_volume(module, profitbricks) + module.exit_json() + except Exception as e: + module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py new file mode 100644 index 0000000000..f1dd6f013b --- /dev/null +++ b/plugins/modules/cloud/pubnub/pubnub_blocks.py @@ -0,0 +1,621 @@ +#!/usr/bin/python +# +# PubNub Real-time Cloud-Hosted Push API and Push Notification Client +# Frameworks +# Copyright (C) 2016 PubNub Inc. +# http://www.pubnub.com/ +# http://www.pubnub.com/terms +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pubnub_blocks +short_description: PubNub blocks management module. +description: + - "This module allows Ansible to interface with the PubNub BLOCKS + infrastructure by providing the following operations: create / remove, + start / stop and rename for blocks and create / modify / remove for event + handlers" +author: + - PubNub (@pubnub) + - Sergey Mamontov (@parfeon) +requirements: + - "python >= 2.7" + - "pubnub_blocks_client >= 1.0" +options: + email: + description: + - Email from account for which new session should be started. + - "Not required if C(cache) contains result of previous module call (in + same play)." + required: false + password: + description: + - Password which match to account to which specified C(email) belong. + - "Not required if C(cache) contains result of previous module call (in + same play)." + required: false + cache: + description: > + In case if single play use blocks management module few times it is + preferred to enabled 'caching' by making previous module to share + gathered artifacts and pass them to this parameter. + required: false + default: {} + account: + description: + - "Name of PubNub account for from which C(application) will be used to + manage blocks." + - "User's account will be used if value not set or empty." + required: false + application: + description: + - "Name of target PubNub application for which blocks configuration on + specific C(keyset) will be done." + required: true + keyset: + description: + - Name of application's keys set which is bound to managed blocks. + required: true + state: + description: + - "Intended block state after event handlers creation / update process + will be completed." + required: false + default: 'started' + choices: ['started', 'stopped', 'present', 'absent'] + name: + description: + - Name of managed block which will be later visible on admin.pubnub.com. + required: true + description: + description: + - Short block description which will be later visible on + admin.pubnub.com. Used only if block doesn't exists and won't change + description for existing block. + required: false + default: 'New block' + event_handlers: + description: + - "List of event handlers which should be updated for specified block + C(name)." + - "Each entry for new event handler should contain: C(name), C(src), + C(channels), C(event). C(name) used as event handler name which can be + used later to make changes to it." + - C(src) is full path to file with event handler code. + - "C(channels) is name of channel from which event handler is waiting + for events." + - "C(event) is type of event which is able to trigger event handler: + I(js-before-publish), I(js-after-publish), I(js-after-presence)." + - "Each entry for existing handlers should contain C(name) (so target + handler can be identified). Rest parameters (C(src), C(channels) and + C(event)) can be added if changes required for them." + - "It is possible to rename event handler by adding C(changes) key to + event handler payload and pass dictionary, which will contain single key + C(name), where new name should be passed." + - "To remove particular event handler it is possible to set C(state) for + it to C(absent) and it will be removed." + required: false + default: [] + changes: + description: + - "List of fields which should be changed by block itself (doesn't + affect any event handlers)." + - "Possible options for change is: C(name)." + required: false + default: {} + validate_certs: + description: + - "This key allow to try skip certificates check when performing REST API + calls. Sometimes host may have issues with certificates on it and this + will cause problems to call PubNub REST API." + - If check should be ignored C(False) should be passed to this parameter. + required: false + default: true + type: bool +''' + +EXAMPLES = ''' +# Event handler create example. +- name: Create single event handler + pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + event_handlers: + - + src: '{{ path_to_handler_source }}' + name: '{{ handler_name }}' + event: 'js-before-publish' + channels: '{{ handler_channel }}' + +# Change event handler trigger event type. +- name: Change event handler 'event' + pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + event_handlers: + - + name: '{{ handler_name }}' + event: 'js-after-publish' + +# Stop block and event handlers. +- name: Stopping block + pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: stop + +# Multiple module calls with cached result passing +- name: Create '{{ block_name }}' block + register: module_cache + pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present +- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}' + register: module_cache + pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present + event_handlers: + - + src: '{{ path_to_handler_1_source }}' + name: '{{ event_handler_1_name }}' + channels: '{{ event_handler_1_channel }}' + event: 'js-before-publish' +- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}' + register: module_cache + pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present + event_handlers: + - + src: '{{ path_to_handler_2_source }}' + name: '{{ event_handler_2_name }}' + channels: '{{ event_handler_2_channel }}' + event: 'js-before-publish' +- name: Start '{{ block_name }}' block + register: module_cache + pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: started +''' + +RETURN = ''' +module_cache: + description: "Cached account information. In case if with single play module + used few times it is better to pass cached data to next module calls to speed + up process." + type: dict + returned: always +''' +import copy +import os + +try: + # Import PubNub BLOCKS client. + from pubnub_blocks_client import User, Account, Owner, Application, Keyset + from pubnub_blocks_client import Block, EventHandler + from pubnub_blocks_client import exceptions + HAS_PUBNUB_BLOCKS_CLIENT = True +except ImportError: + HAS_PUBNUB_BLOCKS_CLIENT = False + User = None + Account = None + Owner = None + Application = None + Keyset = None + Block = None + EventHandler = None + exceptions = None + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text + + +def pubnub_user(module): + """Create and configure user model if it possible. + + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + + :rtype: User + :return: Reference on initialized and ready to use user or 'None' in + case if not all required information has been passed to block. + """ + user = None + params = module.params + + if params.get('cache') and params['cache'].get('module_cache'): + cache = params['cache']['module_cache'] + user = User() + user.restore(cache=copy.deepcopy(cache['pnm_user'])) + elif params.get('email') and params.get('password'): + user = User(email=params.get('email'), password=params.get('password')) + else: + err_msg = 'It looks like not account credentials has been passed or ' \ + '\'cache\' field doesn\'t have result of previous module ' \ + 'call.' + module.fail_json(msg='Missing account credentials.', + description=err_msg, changed=False) + + return user + + +def pubnub_account(module, user): + """Create and configure account if it is possible. + + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type user: User + :param user: Reference on authorized user for which one of accounts + should be used during manipulations with block. + + :rtype: Account + :return: Reference on initialized and ready to use account or 'None' in + case if not all required information has been passed to block. + """ + params = module.params + if params.get('account'): + account_name = params.get('account') + account = user.account(name=params.get('account')) + if account is None: + err_frmt = 'It looks like there is no \'{0}\' account for ' \ + 'authorized user. Please make sure what correct ' \ + 'name has been passed during module configuration.' + module.fail_json(msg='Missing account.', + description=err_frmt.format(account_name), + changed=False) + else: + account = user.accounts()[0] + + return account + + +def pubnub_application(module, account): + """Retrieve reference on target application from account model. + + NOTE: In case if account authorization will fail or there is no + application with specified name, module will exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model from which reference + on application should be fetched. + + :rtype: Application + :return: Reference on initialized and ready to use application model. + """ + application = None + params = module.params + try: + application = account.application(params['application']) + except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc: + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, + module_cache=dict(account)) + + if application is None: + err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \ + 'correct application name has been passed. If application ' \ + 'doesn\'t exist you can create it on admin.pubnub.com.' + email = account.owner.email + module.fail_json(msg=err_fmt.format(params['application'], email), + changed=account.changed, module_cache=dict(account)) + + return application + + +def pubnub_keyset(module, account, application): + """Retrieve reference on target keyset from application model. + + NOTE: In case if there is no keyset with specified name, module will + exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model which will be + used in case of error to export cached data. + :type application: Application + :param application: Reference on PubNub application model from which + reference on keyset should be fetched. + + :rtype: Keyset + :return: Reference on initialized and ready to use keyset model. + """ + params = module.params + keyset = application.keyset(params['keyset']) + if keyset is None: + err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \ + 'sure what correct keyset name has been passed. If keyset ' \ + 'doesn\'t exist you can create it on admin.pubnub.com.' + module.fail_json(msg=err_fmt.format(params['keyset'], + application.name), + changed=account.changed, module_cache=dict(account)) + + return keyset + + +def pubnub_block(module, account, keyset): + """Retrieve reference on target keyset from application model. + + NOTE: In case if there is no block with specified name and module + configured to start/stop it, module will exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model which will be used in + case of error to export cached data. + :type keyset: Keyset + :param keyset: Reference on keyset model from which reference on block + should be fetched. + + :rtype: Block + :return: Reference on initialized and ready to use keyset model. + """ + block = None + params = module.params + try: + block = keyset.block(params['name']) + except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc: + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, module_cache=dict(account)) + + # Report error because block doesn't exists and at the same time + # requested to start/stop. + if block is None and params['state'] in ['started', 'stopped']: + block_name = params.get('name') + module.fail_json(msg="'{0}' block doesn't exists.".format(block_name), + changed=account.changed, module_cache=dict(account)) + + if block is None and params['state'] == 'present': + block = Block(name=params.get('name'), + description=params.get('description')) + keyset.add_block(block) + + if block: + # Update block information if required. + if params.get('changes') and params['changes'].get('name'): + block.name = params['changes']['name'] + if params.get('description'): + block.description = params.get('description') + + return block + + +def pubnub_event_handler(block, data): + """Retrieve reference on target event handler from application model. + + :type block: Block + :param block: Reference on block model from which reference on event + handlers should be fetched. + :type data: dict + :param data: Reference on dictionary which contain information about + event handler and whether it should be created or not. + + :rtype: EventHandler + :return: Reference on initialized and ready to use event handler model. + 'None' will be returned in case if there is no handler with + specified name and no request to create it. + """ + event_handler = block.event_handler(data['name']) + + # Prepare payload for event handler update. + changed_name = (data.pop('changes').get('name') + if 'changes' in data else None) + name = data.get('name') or changed_name + channels = data.get('channels') + event = data.get('event') + code = _content_of_file_at_path(data.get('src')) + state = data.get('state') or 'present' + + # Create event handler if required. + if event_handler is None and state == 'present': + event_handler = EventHandler(name=name, channels=channels, event=event, + code=code) + block.add_event_handler(event_handler) + + # Update event handler if required. + if event_handler is not None and state == 'present': + if name is not None: + event_handler.name = name + if channels is not None: + event_handler.channels = channels + if event is not None: + event_handler.event = event + if code is not None: + event_handler.code = code + + return event_handler + + +def _failure_title_from_exception(exception): + """Compose human-readable title for module error title. + + Title will be based on status codes if they has been provided. + :type exception: exceptions.GeneralPubNubError + :param exception: Reference on exception for which title should be + composed. + + :rtype: str + :return: Reference on error tile which should be shown on module + failure. + """ + title = 'General REST API access error.' + if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS: + title = 'Authorization error: missing credentials.' + elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS: + title = 'Authorization error: wrong credentials.' + elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS: + title = 'API access error: insufficient access rights.' + elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED: + title = 'API access error: time token expired.' + elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS: + title = 'Block create did fail: block with same name already exists).' + elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL: + title = 'Unable fetch list of blocks for keyset.' + elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL: + title = 'Block creation did fail.' + elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL: + title = 'Block update did fail.' + elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL: + title = 'Block removal did fail.' + elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL: + title = 'Block start/stop did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS: + title = 'Event handler creation did fail: missing fields.' + elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS: + title = 'Event handler creation did fail: missing fields.' + elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL: + title = 'Event handler creation did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL: + title = 'Event handler update did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL: + title = 'Event handler removal did fail.' + + return title + + +def _content_of_file_at_path(path): + """Read file content. + + Try read content of file at specified path. + :type path: str + :param path: Full path to location of file which should be read'ed. + :rtype: content + :return: File content or 'None' + """ + content = None + if path and os.path.exists(path): + with open(path, mode="rt") as opened_file: + b_content = opened_file.read() + try: + content = to_text(b_content, errors='surrogate_or_strict') + except UnicodeError: + pass + + return content + + +def main(): + fields = dict( + email=dict(default='', required=False, type='str'), + password=dict(default='', required=False, type='str', no_log=True), + account=dict(default='', required=False, type='str'), + application=dict(required=True, type='str'), + keyset=dict(required=True, type='str'), + state=dict(default='present', type='str', + choices=['started', 'stopped', 'present', 'absent']), + name=dict(required=True, type='str'), description=dict(type='str'), + event_handlers=dict(default=list(), type='list'), + changes=dict(default=dict(), type='dict'), + cache=dict(default=dict(), type='dict'), + validate_certs=dict(default=True, type='bool')) + module = AnsibleModule(argument_spec=fields, supports_check_mode=True) + + if not HAS_PUBNUB_BLOCKS_CLIENT: + module.fail_json(msg='pubnub_blocks_client required for this module.') + + params = module.params + + # Authorize user. + user = pubnub_user(module) + # Initialize PubNub account instance. + account = pubnub_account(module, user=user) + # Try fetch application with which module should work. + application = pubnub_application(module, account=account) + # Try fetch keyset with which module should work. + keyset = pubnub_keyset(module, account=account, application=application) + # Try fetch block with which module should work. + block = pubnub_block(module, account=account, keyset=keyset) + is_new_block = block is not None and block.uid == -1 + + # Check whether block should be removed or not. + if block is not None and params['state'] == 'absent': + keyset.remove_block(block) + block = None + + if block is not None: + # Update block information if required. + if params.get('changes') and params['changes'].get('name'): + block.name = params['changes']['name'] + + # Process event changes to event handlers. + for event_handler_data in params.get('event_handlers') or list(): + state = event_handler_data.get('state') or 'present' + event_handler = pubnub_event_handler(data=event_handler_data, + block=block) + if state == 'absent' and event_handler: + block.delete_event_handler(event_handler) + + # Update block operation state if required. + if block and not is_new_block: + if params['state'] == 'started': + block.start() + elif params['state'] == 'stopped': + block.stop() + + # Save current account state. + if not module.check_mode: + try: + account.save() + except (exceptions.APIAccessError, exceptions.KeysetError, + exceptions.BlockError, exceptions.EventHandlerError, + exceptions.GeneralPubNubError) as exc: + module_cache = dict(account) + module_cache.update(dict(pnm_user=dict(user))) + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, + module_cache=module_cache) + + # Report module execution results. + module_cache = dict(account) + module_cache.update(dict(pnm_user=dict(user))) + changed_will_change = account.changed or account.will_change + module.exit_json(changed=changed_will_change, module_cache=module_cache) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax.py b/plugins/modules/cloud/rackspace/rax.py new file mode 100644 index 0000000000..b89c01a865 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax.py @@ -0,0 +1,883 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax +short_description: create / delete an instance in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud instance and optionally + waits for it to be 'running'. +options: + auto_increment: + description: + - Whether or not to increment a single number with the name of the + created servers. Only applicable when used with the I(group) attribute + or meta key. + type: bool + default: 'yes' + boot_from_volume: + description: + - Whether or not to boot the instance from a Cloud Block Storage volume. + If C(yes) and I(image) is specified a new volume will be created at + boot time. I(boot_volume_size) is required with I(image) to create a + new volume at boot time. + type: bool + default: 'no' + boot_volume: + description: + - Cloud Block Storage ID or Name to use as the boot volume of the + instance + boot_volume_size: + description: + - Size of the volume to create in Gigabytes. This is only required with + I(image) and I(boot_from_volume). + default: 100 + boot_volume_terminate: + description: + - Whether the I(boot_volume) or newly created volume from I(image) will + be terminated when the server is terminated + type: bool + default: 'no' + config_drive: + description: + - Attach read-only configuration drive to server as label config-2 + type: bool + default: 'no' + count: + description: + - number of instances to launch + default: 1 + count_offset: + description: + - number count to start at + default: 1 + disk_config: + description: + - Disk partitioning strategy + choices: + - auto + - manual + default: auto + exact_count: + description: + - Explicitly ensure an exact count of instances, used with + state=active/present. If specified as C(yes) and I(count) is less than + the servers matched, servers will be deleted to match the count. If + the number of matched servers is fewer than specified in I(count) + additional servers will be added. + type: bool + default: 'no' + extra_client_args: + description: + - A hash of key/value pairs to be used when creating the cloudservers + client. This is considered an advanced option, use it wisely and + with caution. + extra_create_args: + description: + - A hash of key/value pairs to be used when creating a new server. + This is considered an advanced option, use it wisely and with caution. + files: + description: + - Files to insert into the instance. remotefilename:localcontent + flavor: + description: + - flavor to use for the instance + group: + description: + - host group to assign to server, is also used for idempotent operations + to ensure a specific number of instances + image: + description: + - image to use for the instance. Can be an C(id), C(human_id) or C(name). + With I(boot_from_volume), a Cloud Block Storage volume will be created + with this image + instance_ids: + description: + - list of instance ids, currently only used when state='absent' to + remove instances + key_name: + description: + - key pair to use on the instance + aliases: + - keypair + meta: + description: + - A hash of metadata to associate with the instance + name: + description: + - Name to give the instance + networks: + description: + - The network to attach to the instances. If specified, you must include + ALL networks including the public and private interfaces. Can be C(id) + or C(label). + default: + - public + - private + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + user_data: + description: + - Data to be uploaded to the servers config drive. This option implies + I(config_drive). Can be a file path or a string + wait: + description: + - wait for the instance to be in state 'running' before returning + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +author: + - "Jesse Keating (@omgjlk)" + - "Matt Martz (@sivel)" +notes: + - I(exact_count) can be "destructive" if the number of running servers in + the I(group) is larger than that specified in I(count). In such a case, the + I(state) is effectively set to C(absent) and the extra servers are deleted. + In the case of deletion, the returned data structure will have C(action) + set to C(delete), and the oldest servers in the group will be deleted. +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build a Cloud Server + gather_facts: False + tasks: + - name: Server build request + local_action: + module: rax + credentials: ~/.raxpub + name: rax-test1 + flavor: 5 + image: b11d9567-e412-4255-96b9-bd63ab23bcfe + key_name: my_rackspace_key + files: + /root/test.txt: /home/localuser/test.txt + wait: yes + state: present + networks: + - private + - public + register: rax + +- name: Build an exact count of cloud servers with incremented names + hosts: local + gather_facts: False + tasks: + - name: Server build requests + local_action: + module: rax + credentials: ~/.raxpub + name: test%03d.example.org + flavor: performance1-1 + image: ubuntu-1204-lts-precise-pangolin + state: present + count: 10 + count_offset: 10 + exact_count: yes + group: test + wait: yes + register: rax +''' + +import json +import os +import re +import time + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume, + rax_find_image, rax_find_network, rax_find_volume, + rax_required_together, rax_to_dict, setup_rax_module) +from ansible.module_utils.six.moves import xrange +from ansible.module_utils.six import string_types + + +def rax_find_server_image(module, server, image, boot_volume): + if not image and boot_volume: + vol = rax_find_bootable_volume(module, pyrax, server, + exit=False) + if not vol: + return None + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if vol_image_id: + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if server_image: + server.image = dict(id=server_image) + + # Match image IDs taking care of boot from volume + if image and not server.image: + vol = rax_find_bootable_volume(module, pyrax, server) + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if not vol_image_id: + return None + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if image != server_image: + return None + + server.image = dict(id=server_image) + elif image and server.image['id'] != image: + return None + + return server.image + + +def create(module, names=None, flavor=None, image=None, meta=None, key_name=None, + files=None, wait=True, wait_timeout=300, disk_config=None, + group=None, nics=None, extra_create_args=None, user_data=None, + config_drive=False, existing=None, block_device_mapping_v2=None): + names = [] if names is None else names + meta = {} if meta is None else meta + files = {} if files is None else files + nics = [] if nics is None else nics + extra_create_args = {} if extra_create_args is None else extra_create_args + existing = [] if existing is None else existing + block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2 + + cs = pyrax.cloudservers + changed = False + + if user_data: + config_drive = True + + if user_data and os.path.isfile(os.path.expanduser(user_data)): + try: + user_data = os.path.expanduser(user_data) + f = open(user_data) + user_data = f.read() + f.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % user_data) + + # Handle the file contents + for rpath in files.keys(): + lpath = os.path.expanduser(files[rpath]) + try: + fileobj = open(lpath, 'r') + files[rpath] = fileobj.read() + fileobj.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % lpath) + try: + servers = [] + bdmv2 = block_device_mapping_v2 + for name in names: + servers.append(cs.servers.create(name=name, image=image, + flavor=flavor, meta=meta, + key_name=key_name, + files=files, nics=nics, + disk_config=disk_config, + config_drive=config_drive, + userdata=user_data, + block_device_mapping_v2=bdmv2, + **extra_create_args)) + except Exception as e: + if e.message: + msg = str(e.message) + else: + msg = repr(e) + module.fail_json(msg=msg) + else: + changed = True + + if wait: + end_time = time.time() + wait_timeout + infinite = wait_timeout == 0 + while infinite or time.time() < end_time: + for server in servers: + try: + server.get() + except Exception: + server.status = 'ERROR' + + if not filter(lambda s: s.status not in FINAL_STATUSES, + servers): + break + time.sleep(5) + + success = [] + error = [] + timeout = [] + for server in servers: + try: + server.get() + except Exception: + server.status = 'ERROR' + instance = rax_to_dict(server, 'server') + if server.status == 'ACTIVE' or not wait: + success.append(instance) + elif server.status == 'ERROR': + error.append(instance) + elif wait: + timeout.append(instance) + + untouched = [rax_to_dict(s, 'server') for s in existing] + instances = success + untouched + + results = { + 'changed': changed, + 'action': 'create', + 'instances': instances, + 'success': success, + 'error': error, + 'timeout': timeout, + 'instance_ids': { + 'instances': [i['id'] for i in instances], + 'success': [i['id'] for i in success], + 'error': [i['id'] for i in error], + 'timeout': [i['id'] for i in timeout] + } + } + + if timeout: + results['msg'] = 'Timeout waiting for all servers to build' + elif error: + results['msg'] = 'Failed to build all servers' + + if 'msg' in results: + module.fail_json(**results) + else: + module.exit_json(**results) + + +def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None): + instance_ids = [] if instance_ids is None else instance_ids + kept = [] if kept is None else kept + + cs = pyrax.cloudservers + + changed = False + instances = {} + servers = [] + + for instance_id in instance_ids: + servers.append(cs.servers.get(instance_id)) + + for server in servers: + try: + server.delete() + except Exception as e: + module.fail_json(msg=e.message) + else: + changed = True + + instance = rax_to_dict(server, 'server') + instances[instance['id']] = instance + + # If requested, wait for server deletion + if wait: + end_time = time.time() + wait_timeout + infinite = wait_timeout == 0 + while infinite or time.time() < end_time: + for server in servers: + instance_id = server.id + try: + server.get() + except Exception: + instances[instance_id]['status'] = 'DELETED' + instances[instance_id]['rax_status'] = 'DELETED' + + if not filter(lambda s: s['status'] not in ('', 'DELETED', + 'ERROR'), + instances.values()): + break + + time.sleep(5) + + timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), + instances.values()) + error = filter(lambda s: s['status'] in ('ERROR'), + instances.values()) + success = filter(lambda s: s['status'] in ('', 'DELETED'), + instances.values()) + + instances = [rax_to_dict(s, 'server') for s in kept] + + results = { + 'changed': changed, + 'action': 'delete', + 'instances': instances, + 'success': success, + 'error': error, + 'timeout': timeout, + 'instance_ids': { + 'instances': [i['id'] for i in instances], + 'success': [i['id'] for i in success], + 'error': [i['id'] for i in error], + 'timeout': [i['id'] for i in timeout] + } + } + + if timeout: + results['msg'] = 'Timeout waiting for all servers to delete' + elif error: + results['msg'] = 'Failed to delete all servers' + + if 'msg' in results: + module.fail_json(**results) + else: + module.exit_json(**results) + + +def cloudservers(module, state=None, name=None, flavor=None, image=None, + meta=None, key_name=None, files=None, wait=True, wait_timeout=300, + disk_config=None, count=1, group=None, instance_ids=None, + exact_count=False, networks=None, count_offset=0, + auto_increment=False, extra_create_args=None, user_data=None, + config_drive=False, boot_from_volume=False, + boot_volume=None, boot_volume_size=None, + boot_volume_terminate=False): + meta = {} if meta is None else meta + files = {} if files is None else files + instance_ids = [] if instance_ids is None else instance_ids + networks = [] if networks is None else networks + extra_create_args = {} if extra_create_args is None else extra_create_args + + cs = pyrax.cloudservers + cnw = pyrax.cloud_networks + if not cnw: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present' or (state == 'absent' and instance_ids is None): + if not boot_from_volume and not boot_volume and not image: + module.fail_json(msg='image is required for the "rax" module') + + for arg, value in dict(name=name, flavor=flavor).items(): + if not value: + module.fail_json(msg='%s is required for the "rax" module' % + arg) + + if boot_from_volume and not image and not boot_volume: + module.fail_json(msg='image or boot_volume are required for the ' + '"rax" with boot_from_volume') + + if boot_from_volume and image and not boot_volume_size: + module.fail_json(msg='boot_volume_size is required for the "rax" ' + 'module with boot_from_volume and image') + + if boot_from_volume and image and boot_volume: + image = None + + servers = [] + + # Add the group meta key + if group and 'group' not in meta: + meta['group'] = group + elif 'group' in meta and group is None: + group = meta['group'] + + # Normalize and ensure all metadata values are strings + for k, v in meta.items(): + if isinstance(v, list): + meta[k] = ','.join(['%s' % i for i in v]) + elif isinstance(v, dict): + meta[k] = json.dumps(v) + elif not isinstance(v, string_types): + meta[k] = '%s' % v + + # When using state=absent with group, the absent block won't match the + # names properly. Use the exact_count functionality to decrease the count + # to the desired level + was_absent = False + if group is not None and state == 'absent': + exact_count = True + state = 'present' + was_absent = True + + if image: + image = rax_find_image(module, pyrax, image) + + nics = [] + if networks: + for network in networks: + nics.extend(rax_find_network(module, pyrax, network)) + + # act on the state + if state == 'present': + # Idempotent ensurance of a specific count of servers + if exact_count is not False: + # See if we can find servers that match our options + if group is None: + module.fail_json(msg='"group" must be provided when using ' + '"exact_count"') + + if auto_increment: + numbers = set() + + # See if the name is a printf like string, if not append + # %d to the end + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message) + + # regex pattern to match printf formatting + pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, count_offset + count) + available_numbers = list(set(number_range) + .difference(numbers)) + else: # Not auto incrementing + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + # available_numbers not needed here, we inspect auto_increment + # again later + + # If state was absent but the count was changed, + # assume we only wanted to remove that number of instances + if was_absent: + diff = len(servers) - count + if diff < 0: + count = 0 + else: + count = diff + + if len(servers) > count: + # We have more servers than we need, set state='absent' + # and delete the extras, this should delete the oldest + state = 'absent' + kept = servers[:count] + del servers[:count] + instance_ids = [] + for server in servers: + instance_ids.append(server.id) + delete(module, instance_ids=instance_ids, wait=wait, + wait_timeout=wait_timeout, kept=kept) + elif len(servers) < count: + # we have fewer servers than we need + if auto_increment: + # auto incrementing server numbers + names = [] + name_slice = count - len(servers) + numbers_to_use = available_numbers[:name_slice] + for number in numbers_to_use: + names.append(name % number) + else: + # We are not auto incrementing server numbers, + # create a list of 'name' that matches how many we need + names = [name] * (count - len(servers)) + else: + # we have the right number of servers, just return info + # about all of the matched servers + instances = [] + instance_ids = [] + for server in servers: + instances.append(rax_to_dict(server, 'server')) + instance_ids.append(server.id) + module.exit_json(changed=False, action=None, + instances=instances, + success=[], error=[], timeout=[], + instance_ids={'instances': instance_ids, + 'success': [], 'error': [], + 'timeout': []}) + else: # not called with exact_count=True + if group is not None: + if auto_increment: + # we are auto incrementing server numbers, but not with + # exact_count + numbers = set() + + # See if the name is a printf like string, if not append + # %d to the end + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message) + + # regex pattern to match printf formatting + pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, + count_offset + count + len(numbers)) + available_numbers = list(set(number_range) + .difference(numbers)) + names = [] + numbers_to_use = available_numbers[:count] + for number in numbers_to_use: + names.append(name % number) + else: + # Not auto incrementing + names = [name] * count + else: + # No group was specified, and not using exact_count + # Perform more simplistic matching + search_opts = { + 'name': '^%s$' % name, + 'flavor': flavor + } + servers = [] + for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + + if not rax_find_server_image(module, server, image, + boot_volume): + continue + + # Ignore servers with non matching metadata + if server.metadata != meta: + continue + servers.append(server) + + if len(servers) >= count: + # We have more servers than were requested, don't do + # anything. Not running with exact_count=True, so we assume + # more is OK + instances = [] + for server in servers: + instances.append(rax_to_dict(server, 'server')) + + instance_ids = [i['id'] for i in instances] + module.exit_json(changed=False, action=None, + instances=instances, success=[], error=[], + timeout=[], + instance_ids={'instances': instance_ids, + 'success': [], 'error': [], + 'timeout': []}) + + # We need more servers to reach out target, create names for + # them, we aren't performing auto_increment here + names = [name] * (count - len(servers)) + + block_device_mapping_v2 = [] + if boot_from_volume: + mapping = { + 'boot_index': '0', + 'delete_on_termination': boot_volume_terminate, + 'destination_type': 'volume', + } + if image: + mapping.update({ + 'uuid': image, + 'source_type': 'image', + 'volume_size': boot_volume_size, + }) + image = None + elif boot_volume: + volume = rax_find_volume(module, pyrax, boot_volume) + mapping.update({ + 'uuid': pyrax.utils.get_id(volume), + 'source_type': 'volume', + }) + block_device_mapping_v2.append(mapping) + + create(module, names=names, flavor=flavor, image=image, + meta=meta, key_name=key_name, files=files, wait=wait, + wait_timeout=wait_timeout, disk_config=disk_config, group=group, + nics=nics, extra_create_args=extra_create_args, + user_data=user_data, config_drive=config_drive, + existing=servers, + block_device_mapping_v2=block_device_mapping_v2) + + elif state == 'absent': + if instance_ids is None: + # We weren't given an explicit list of server IDs to delete + # Let's match instead + search_opts = { + 'name': '^%s$' % name, + 'flavor': flavor + } + for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + + if not rax_find_server_image(module, server, image, + boot_volume): + continue + + # Ignore servers with non matching metadata + if meta != server.metadata: + continue + + servers.append(server) + + # Build a list of server IDs to delete + instance_ids = [] + for server in servers: + if len(instance_ids) < count: + instance_ids.append(server.id) + else: + break + + if not instance_ids: + # No server IDs were matched for deletion, or no IDs were + # explicitly provided, just exit and don't do anything + module.exit_json(changed=False, action=None, instances=[], + success=[], error=[], timeout=[], + instance_ids={'instances': [], + 'success': [], 'error': [], + 'timeout': []}) + + delete(module, instance_ids=instance_ids, wait=wait, + wait_timeout=wait_timeout) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + auto_increment=dict(default=True, type='bool'), + boot_from_volume=dict(default=False, type='bool'), + boot_volume=dict(type='str'), + boot_volume_size=dict(type='int', default=100), + boot_volume_terminate=dict(type='bool', default=False), + config_drive=dict(default=False, type='bool'), + count=dict(default=1, type='int'), + count_offset=dict(default=1, type='int'), + disk_config=dict(choices=['auto', 'manual']), + exact_count=dict(default=False, type='bool'), + extra_client_args=dict(type='dict', default={}), + extra_create_args=dict(type='dict', default={}), + files=dict(type='dict', default={}), + flavor=dict(), + group=dict(), + image=dict(), + instance_ids=dict(type='list'), + key_name=dict(aliases=['keypair']), + meta=dict(type='dict', default={}), + name=dict(), + networks=dict(type='list', default=['public', 'private']), + service=dict(), + state=dict(default='present', choices=['present', 'absent']), + user_data=dict(no_log=True), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=300, type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + service = module.params.get('service') + + if service is not None: + module.fail_json(msg='The "service" attribute has been deprecated, ' + 'please remove "service: cloudservers" from your ' + 'playbook pertaining to the "rax" module') + + auto_increment = module.params.get('auto_increment') + boot_from_volume = module.params.get('boot_from_volume') + boot_volume = module.params.get('boot_volume') + boot_volume_size = module.params.get('boot_volume_size') + boot_volume_terminate = module.params.get('boot_volume_terminate') + config_drive = module.params.get('config_drive') + count = module.params.get('count') + count_offset = module.params.get('count_offset') + disk_config = module.params.get('disk_config') + if disk_config: + disk_config = disk_config.upper() + exact_count = module.params.get('exact_count', False) + extra_client_args = module.params.get('extra_client_args') + extra_create_args = module.params.get('extra_create_args') + files = module.params.get('files') + flavor = module.params.get('flavor') + group = module.params.get('group') + image = module.params.get('image') + instance_ids = module.params.get('instance_ids') + key_name = module.params.get('key_name') + meta = module.params.get('meta') + name = module.params.get('name') + networks = module.params.get('networks') + state = module.params.get('state') + user_data = module.params.get('user_data') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + setup_rax_module(module, pyrax) + + if extra_client_args: + pyrax.cloudservers = pyrax.connect_to_cloudservers( + region=pyrax.cloudservers.client.region_name, + **extra_client_args) + client = pyrax.cloudservers.client + if 'bypass_url' in extra_client_args: + client.management_url = extra_client_args['bypass_url'] + + if pyrax.cloudservers is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + cloudservers(module, state=state, name=name, flavor=flavor, + image=image, meta=meta, key_name=key_name, files=files, + wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, + count=count, group=group, instance_ids=instance_ids, + exact_count=exact_count, networks=networks, + count_offset=count_offset, auto_increment=auto_increment, + extra_create_args=extra_create_args, user_data=user_data, + config_drive=config_drive, boot_from_volume=boot_from_volume, + boot_volume=boot_volume, boot_volume_size=boot_volume_size, + boot_volume_terminate=boot_volume_terminate) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_cbs.py b/plugins/modules/cloud/rackspace/rax_cbs.py new file mode 100644 index 0000000000..f6395199c9 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_cbs.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_cbs +short_description: Manipulate Rackspace Cloud Block Storage Volumes +description: + - Manipulate Rackspace Cloud Block Storage Volumes +options: + description: + description: + - Description to give the volume being created + image: + description: + - image to use for bootable volumes. Can be an C(id), C(human_id) or + C(name). This option requires C(pyrax>=1.9.3) + meta: + description: + - A hash of metadata to associate with the volume + name: + description: + - Name to give the volume being created + required: true + size: + description: + - Size of the volume to create in Gigabytes + default: 100 + required: true + snapshot_id: + description: + - The id of the snapshot to create the volume from + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + required: true + volume_type: + description: + - Type of the volume being created + choices: + - SATA + - SSD + default: SATA + required: true + wait: + description: + - wait for the volume to be in state 'available' before returning + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build a Block Storage Volume + gather_facts: False + hosts: local + connection: local + tasks: + - name: Storage volume create request + local_action: + module: rax_cbs + credentials: ~/.raxpub + name: my-volume + description: My Volume + volume_type: SSD + size: 150 + region: DFW + wait: yes + state: present + meta: + app: my-cool-app + register: my_volume +''' + +from distutils.version import LooseVersion + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume, + rax_required_together, rax_to_dict, setup_rax_module) + + +def cloud_block_storage(module, state, name, description, meta, size, + snapshot_id, volume_type, wait, wait_timeout, + image): + changed = False + volume = None + instance = {} + + cbs = pyrax.cloud_blockstorage + + if cbs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if image: + # pyrax<1.9.3 did not have support for specifying an image when + # creating a volume which is required for bootable volumes + if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): + module.fail_json(msg='Creating a bootable volume requires ' + 'pyrax>=1.9.3') + image = rax_find_image(module, pyrax, image) + + volume = rax_find_volume(module, pyrax, name) + + if state == 'present': + if not volume: + kwargs = dict() + if image: + kwargs['image'] = image + try: + volume = cbs.create(name, size=size, volume_type=volume_type, + description=description, + metadata=meta, + snapshot_id=snapshot_id, **kwargs) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + if wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_for_build(volume, interval=5, + attempts=attempts) + + volume.get() + instance = rax_to_dict(volume) + + result = dict(changed=changed, volume=instance) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + elif wait and volume.status not in VOLUME_STATUS: + result['msg'] = 'Timeout waiting on %s' % volume.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + if volume: + instance = rax_to_dict(volume) + try: + volume.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, volume=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + description=dict(type='str'), + image=dict(type='str'), + meta=dict(type='dict', default={}), + name=dict(required=True), + size=dict(type='int', default=100), + snapshot_id=dict(), + state=dict(default='present', choices=['present', 'absent']), + volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + description = module.params.get('description') + image = module.params.get('image') + meta = module.params.get('meta') + name = module.params.get('name') + size = module.params.get('size') + snapshot_id = module.params.get('snapshot_id') + state = module.params.get('state') + volume_type = module.params.get('volume_type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + + cloud_block_storage(module, state, name, description, meta, size, + snapshot_id, volume_type, wait, wait_timeout, + image) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py new file mode 100644 index 0000000000..4440499699 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_cbs_attachments +short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments +description: + - Manipulate Rackspace Cloud Block Storage Volume Attachments +options: + device: + description: + - The device path to attach the volume to, e.g. /dev/xvde. + - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name. + volume: + description: + - Name or id of the volume to attach/detach + required: true + server: + description: + - Name or id of the server to attach/detach + required: true + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + required: true + wait: + description: + - wait for the volume to be in 'in-use'/'available' state before returning + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Attach a Block Storage Volume + gather_facts: False + hosts: local + connection: local + tasks: + - name: Storage volume attach request + local_action: + module: rax_cbs_attachments + credentials: ~/.raxpub + volume: my-volume + server: my-server + device: /dev/xvdd + region: DFW + wait: yes + state: present + register: my_volume +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES, + rax_argument_spec, + rax_find_server, + rax_find_volume, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def cloud_block_storage_attachments(module, state, volume, server, device, + wait, wait_timeout): + cbs = pyrax.cloud_blockstorage + cs = pyrax.cloudservers + + if cbs is None or cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + changed = False + instance = {} + + volume = rax_find_volume(module, pyrax, volume) + + if not volume: + module.fail_json(msg='No matching storage volumes were found') + + if state == 'present': + server = rax_find_server(module, pyrax, server) + + if (volume.attachments and + volume.attachments[0]['server_id'] == server.id): + changed = False + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') + else: + try: + volume.attach_to_instance(server, mountpoint=device) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + volume.get() + + for key, value in vars(volume).items(): + if (isinstance(value, NON_CALLABLES) and + not key.startswith('_')): + instance[key] = value + + result = dict(changed=changed) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + elif wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_until(volume, 'status', 'in-use', + interval=5, attempts=attempts) + + volume.get() + result['volume'] = rax_to_dict(volume) + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + server = rax_find_server(module, pyrax, server) + + if (volume.attachments and + volume.attachments[0]['server_id'] == server.id): + try: + volume.detach() + if wait: + pyrax.utils.wait_until(volume, 'status', 'available', + interval=3, attempts=0, + verbose=False) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + volume.get() + changed = True + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') + + result = dict(changed=changed, volume=rax_to_dict(volume)) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + module.exit_json(changed=changed, volume=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + device=dict(required=False), + volume=dict(required=True), + server=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + device = module.params.get('device') + volume = module.params.get('volume') + server = module.params.get('server') + state = module.params.get('state') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + + cloud_block_storage_attachments(module, state, volume, server, device, + wait, wait_timeout) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb.py b/plugins/modules/cloud/rackspace/rax_cdb.py new file mode 100644 index 0000000000..c85b252f0d --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_cdb.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_cdb +short_description: create/delete or resize a Rackspace Cloud Databases instance +description: + - creates / deletes or resize a Rackspace Cloud Databases instance + and optionally waits for it to be 'running'. The name option needs to be + unique since it's used to identify the instance. +options: + name: + description: + - Name of the databases server instance + flavor: + description: + - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) + default: 1 + volume: + description: + - Volume size of the database 1-150GB + default: 2 + cdb_type: + description: + - type of instance (i.e. MySQL, MariaDB, Percona) + default: MySQL + aliases: ['type'] + cdb_version: + description: + - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6) + choices: ['5.1', '5.6', '10'] + aliases: ['version'] + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + wait: + description: + - wait for the instance to be in state 'running' before returning + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +author: "Simon JAILLET (@jails)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build a Cloud Databases + gather_facts: False + tasks: + - name: Server build request + local_action: + module: rax_cdb + credentials: ~/.raxpub + region: IAD + name: db-server1 + flavor: 1 + volume: 2 + cdb_type: MySQL + cdb_version: 5.6 + wait: yes + state: present + register: rax_db_server +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module + + +def find_instance(name): + + cdb = pyrax.cloud_databases + instances = cdb.list() + if instances: + for instance in instances: + if instance.name == name: + return instance + return False + + +def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, + wait_timeout): + + for arg, value in dict(name=name, flavor=flavor, + volume=volume, type=cdb_type, version=cdb_version + ).items(): + if not value: + module.fail_json(msg='%s is required for the "rax_cdb"' + ' module' % arg) + + if not (volume >= 1 and volume <= 150): + module.fail_json(msg='volume is required to be between 1 and 150') + + cdb = pyrax.cloud_databases + + flavors = [] + for item in cdb.list_flavors(): + flavors.append(item.id) + + if not (flavor in flavors): + module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) + + changed = False + + instance = find_instance(name) + + if not instance: + action = 'create' + try: + instance = cdb.create(name=name, flavor=flavor, volume=volume, + type=cdb_type, version=cdb_version) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + else: + action = None + + if instance.volume.size != volume: + action = 'resize' + if instance.volume.size > volume: + module.fail_json(changed=False, action=action, + msg='The new volume size must be larger than ' + 'the current volume size', + cdb=rax_to_dict(instance)) + instance.resize_volume(volume) + changed = True + + if int(instance.flavor.id) != flavor: + action = 'resize' + pyrax.utils.wait_until(instance, 'status', 'ACTIVE', + attempts=wait_timeout) + instance.resize(flavor) + changed = True + + if wait: + pyrax.utils.wait_until(instance, 'status', 'ACTIVE', + attempts=wait_timeout) + + if wait and instance.status != 'ACTIVE': + module.fail_json(changed=changed, action=action, + cdb=rax_to_dict(instance), + msg='Timeout waiting for "%s" databases instance to ' + 'be created' % name) + + module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) + + +def delete_instance(module, name, wait, wait_timeout): + + if not name: + module.fail_json(msg='name is required for the "rax_cdb" module') + + changed = False + + instance = find_instance(name) + if not instance: + module.exit_json(changed=False, action='delete') + + try: + instance.delete() + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + if wait: + pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', + attempts=wait_timeout) + + if wait and instance.status != 'SHUTDOWN': + module.fail_json(changed=changed, action='delete', + cdb=rax_to_dict(instance), + msg='Timeout waiting for "%s" databases instance to ' + 'be deleted' % name) + + module.exit_json(changed=changed, action='delete', + cdb=rax_to_dict(instance)) + + +def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, + wait_timeout): + + # act on the state + if state == 'present': + save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, + wait_timeout) + elif state == 'absent': + delete_instance(module, name, wait, wait_timeout) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + name=dict(type='str', required=True), + flavor=dict(type='int', default=1), + volume=dict(type='int', default=2), + cdb_type=dict(type='str', default='MySQL', aliases=['type']), + cdb_version=dict(type='str', default='5.6', aliases=['version']), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + name = module.params.get('name') + flavor = module.params.get('flavor') + volume = module.params.get('volume') + cdb_type = module.params.get('cdb_type') + cdb_version = module.params.get('cdb_version') + state = module.params.get('state') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb_database.py b/plugins/modules/cloud/rackspace/rax_cdb_database.py new file mode 100644 index 0000000000..83f63b532a --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_cdb_database.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: rax_cdb_database +short_description: 'create / delete a database in the Cloud Databases' +description: + - create / delete a database in the Cloud Databases. +options: + cdb_id: + description: + - The databases server UUID + name: + description: + - Name to give to the database + character_set: + description: + - Set of symbols and encodings + default: 'utf8' + collate: + description: + - Set of rules for comparing characters in a character set + default: 'utf8_general_ci' + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present +author: "Simon JAILLET (@jails)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build a database in Cloud Databases + tasks: + - name: Database build request + local_action: + module: rax_cdb_database + credentials: ~/.raxpub + region: IAD + cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 + name: db1 + state: present + register: rax_db_database +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module + + +def find_database(instance, name): + try: + database = instance.get_database(name) + except Exception: + return False + + return database + + +def save_database(module, cdb_id, name, character_set, collate): + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + database = find_database(instance, name) + + if not database: + try: + database = instance.create_database(name=name, + character_set=character_set, + collate=collate) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action='create', + database=rax_to_dict(database)) + + +def delete_database(module, cdb_id, name): + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + database = find_database(instance, name) + + if database: + try: + database.delete() + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action='delete', + database=rax_to_dict(database)) + + +def rax_cdb_database(module, state, cdb_id, name, character_set, collate): + + # act on the state + if state == 'present': + save_database(module, cdb_id, name, character_set, collate) + elif state == 'absent': + delete_database(module, cdb_id, name) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + cdb_id=dict(type='str', required=True), + name=dict(type='str', required=True), + character_set=dict(type='str', default='utf8'), + collate=dict(type='str', default='utf8_general_ci'), + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + cdb_id = module.params.get('cdb_id') + name = module.params.get('name') + character_set = module.params.get('character_set') + collate = module.params.get('collate') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + rax_cdb_database(module, state, cdb_id, name, character_set, collate) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py new file mode 100644 index 0000000000..287eb4055e --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_cdb_user.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_cdb_user +short_description: create / delete a Rackspace Cloud Database +description: + - create / delete a database in the Cloud Databases. +options: + cdb_id: + description: + - The databases server UUID + db_username: + description: + - Name of the database user + db_password: + description: + - Database user password + databases: + description: + - Name of the databases that the user can access + default: [] + host: + description: + - Specifies the host from which a user is allowed to connect to + the database. Possible values are a string containing an IPv4 address + or "%" to allow connecting from any host + default: '%' + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present +author: "Simon JAILLET (@jails)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build a user in Cloud Databases + tasks: + - name: User build request + local_action: + module: rax_cdb_user + credentials: ~/.raxpub + region: IAD + cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 + db_username: user1 + db_password: user1 + databases: ['db1'] + state: present + register: rax_db_user +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module + + +def find_user(instance, name): + try: + user = instance.get_user(name) + except Exception: + return False + + return user + + +def save_user(module, cdb_id, name, password, databases, host): + + for arg, value in dict(cdb_id=cdb_id, name=name).items(): + if not value: + module.fail_json(msg='%s is required for the "rax_cdb_user" ' + 'module' % arg) + + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + user = find_user(instance, name) + + if not user: + action = 'create' + try: + user = instance.create_user(name=name, + password=password, + database_names=databases, + host=host) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + else: + action = 'update' + + if user.host != host: + changed = True + + user.update(password=password, host=host) + + former_dbs = set([item.name for item in user.list_user_access()]) + databases = set(databases) + + if databases != former_dbs: + try: + revoke_dbs = [db for db in former_dbs if db not in databases] + user.revoke_user_access(db_names=revoke_dbs) + + new_dbs = [db for db in databases if db not in former_dbs] + user.grant_user_access(db_names=new_dbs) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) + + +def delete_user(module, cdb_id, name): + + for arg, value in dict(cdb_id=cdb_id, name=name).items(): + if not value: + module.fail_json(msg='%s is required for the "rax_cdb_user"' + ' module' % arg) + + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + user = find_user(instance, name) + + if user: + try: + user.delete() + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action='delete') + + +def rax_cdb_user(module, state, cdb_id, name, password, databases, host): + + # act on the state + if state == 'present': + save_user(module, cdb_id, name, password, databases, host) + elif state == 'absent': + delete_user(module, cdb_id, name) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + cdb_id=dict(type='str', required=True), + db_username=dict(type='str', required=True), + db_password=dict(type='str', required=True, no_log=True), + databases=dict(type='list', default=[]), + host=dict(type='str', default='%'), + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + cdb_id = module.params.get('cdb_id') + name = module.params.get('db_username') + password = module.params.get('db_password') + databases = module.params.get('databases') + host = to_text(module.params.get('host'), errors='surrogate_or_strict') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + rax_cdb_user(module, state, cdb_id, name, password, databases, host) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_clb.py b/plugins/modules/cloud/rackspace/rax_clb.py new file mode 100644 index 0000000000..c275243384 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_clb.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_clb +short_description: create / delete a load balancer in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud load balancer. +options: + algorithm: + description: + - algorithm for the balancer being created + choices: + - RANDOM + - LEAST_CONNECTIONS + - ROUND_ROBIN + - WEIGHTED_LEAST_CONNECTIONS + - WEIGHTED_ROUND_ROBIN + default: LEAST_CONNECTIONS + meta: + description: + - A hash of metadata to associate with the instance + name: + description: + - Name to give the load balancer + port: + description: + - Port for the balancer being created + default: 80 + protocol: + description: + - Protocol for the balancer being created + choices: + - DNS_TCP + - DNS_UDP + - FTP + - HTTP + - HTTPS + - IMAPS + - IMAPv4 + - LDAP + - LDAPS + - MYSQL + - POP3 + - POP3S + - SMTP + - TCP + - TCP_CLIENT_FIRST + - UDP + - UDP_STREAM + - SFTP + default: HTTP + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + timeout: + description: + - timeout for communication between the balancer and the node + default: 30 + type: + description: + - type of interface for the balancer being created + choices: + - PUBLIC + - SERVICENET + default: PUBLIC + vip_id: + description: + - Virtual IP ID to use when creating the load balancer for purposes of + sharing an IP with another load balancer of another protocol + wait: + description: + - wait for the balancer to be in state 'running' before returning + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build a Load Balancer + gather_facts: False + hosts: local + connection: local + tasks: + - name: Load Balancer create request + local_action: + module: rax_clb + credentials: ~/.raxpub + name: my-lb + port: 8080 + protocol: HTTP + type: SERVICENET + timeout: 30 + region: DFW + wait: yes + state: present + meta: + app: my-cool-app + register: my_lb +''' + + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS, + CLB_PROTOCOLS, + rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, + vip_type, timeout, wait, wait_timeout, vip_id): + if int(timeout) < 30: + module.fail_json(msg='"timeout" must be greater than or equal to 30') + + changed = False + balancers = [] + + clb = pyrax.cloud_loadbalancers + if not clb: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + balancer_list = clb.list() + while balancer_list: + retrieved = clb.list(marker=balancer_list.pop().id) + balancer_list.extend(retrieved) + if len(retrieved) < 2: + break + + for balancer in balancer_list: + if name != balancer.name and name != balancer.id: + continue + + balancers.append(balancer) + + if len(balancers) > 1: + module.fail_json(msg='Multiple Load Balancers were matched by name, ' + 'try using the Load Balancer ID instead') + + if state == 'present': + if isinstance(meta, dict): + metadata = [dict(key=k, value=v) for k, v in meta.items()] + + if not balancers: + try: + virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] + balancer = clb.create(name, metadata=metadata, port=port, + algorithm=algorithm, protocol=protocol, + timeout=timeout, virtual_ips=virtual_ips) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + balancer = balancers[0] + setattr(balancer, 'metadata', + [dict(key=k, value=v) for k, v in + balancer.get_metadata().items()]) + atts = { + 'name': name, + 'algorithm': algorithm, + 'port': port, + 'protocol': protocol, + 'timeout': timeout + } + for att, value in atts.items(): + current = getattr(balancer, att) + if current != value: + changed = True + + if changed: + balancer.update(**atts) + + if balancer.metadata != metadata: + balancer.set_metadata(meta) + changed = True + + virtual_ips = [clb.VirtualIP(type=vip_type)] + current_vip_types = set([v.type for v in balancer.virtual_ips]) + vip_types = set([v.type for v in virtual_ips]) + if current_vip_types != vip_types: + module.fail_json(msg='Load balancer Virtual IP type cannot ' + 'be changed') + + if wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + balancer.get() + instance = rax_to_dict(balancer, 'clb') + + result = dict(changed=changed, balancer=instance) + + if balancer.status == 'ERROR': + result['msg'] = '%s failed to build' % balancer.id + elif wait and balancer.status not in ('ACTIVE', 'ERROR'): + result['msg'] = 'Timeout waiting on %s' % balancer.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + if balancers: + balancer = balancers[0] + try: + balancer.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + instance = rax_to_dict(balancer, 'clb') + + if wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_until(balancer, 'status', ('DELETED'), + interval=5, attempts=attempts) + else: + instance = {} + + module.exit_json(changed=changed, balancer=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + algorithm=dict(choices=CLB_ALGORITHMS, + default='LEAST_CONNECTIONS'), + meta=dict(type='dict', default={}), + name=dict(required=True), + port=dict(type='int', default=80), + protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), + state=dict(default='present', choices=['present', 'absent']), + timeout=dict(type='int', default=30), + type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), + vip_id=dict(), + wait=dict(type='bool'), + wait_timeout=dict(type='int', default=300), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + algorithm = module.params.get('algorithm') + meta = module.params.get('meta') + name = module.params.get('name') + port = module.params.get('port') + protocol = module.params.get('protocol') + state = module.params.get('state') + timeout = int(module.params.get('timeout')) + vip_id = module.params.get('vip_id') + vip_type = module.params.get('type') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + setup_rax_module(module, pyrax) + + cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, + vip_type, timeout, wait, wait_timeout, vip_id) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/plugins/modules/cloud/rackspace/rax_clb_nodes.py new file mode 100644 index 0000000000..5379012bc8 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_clb_nodes.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_clb_nodes +short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer +description: + - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer +options: + address: + required: false + description: + - IP address or domain name of the node + condition: + required: false + choices: + - enabled + - disabled + - draining + description: + - Condition for the node, which determines its role within the load + balancer + load_balancer_id: + required: true + description: + - Load balancer id + node_id: + required: false + description: + - Node id + port: + required: false + description: + - Port number of the load balanced service on the node + state: + required: false + default: "present" + choices: + - present + - absent + description: + - Indicate desired state of the node + type: + required: false + choices: + - primary + - secondary + description: + - Type of node + wait: + required: false + default: "no" + type: bool + description: + - Wait for the load balancer to become active before returning + wait_timeout: + required: false + default: 30 + description: + - How long to wait before giving up and returning an error + weight: + required: false + description: + - Weight of node +author: "Lukasz Kawczynski (@neuroid)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +# Add a new node to the load balancer +- local_action: + module: rax_clb_nodes + load_balancer_id: 71 + address: 10.2.2.3 + port: 80 + condition: enabled + type: primary + wait: yes + credentials: /path/to/credentials + +# Drain connections from a node +- local_action: + module: rax_clb_nodes + load_balancer_id: 71 + node_id: 410 + condition: draining + wait: yes + credentials: /path/to/credentials + +# Remove a node from the load balancer +- local_action: + module: rax_clb_nodes + load_balancer_id: 71 + node_id: 410 + state: absent + wait: yes + credentials: /path/to/credentials +''' + +import os + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module + + +def _activate_virtualenv(path): + activate_this = os.path.join(path, 'bin', 'activate_this.py') + with open(activate_this) as f: + code = compile(f.read(), activate_this, 'exec') + exec(code) + + +def _get_node(lb, node_id=None, address=None, port=None): + """Return a matching node""" + for node in getattr(lb, 'nodes', []): + match_list = [] + if node_id is not None: + match_list.append(getattr(node, 'id', None) == node_id) + if address is not None: + match_list.append(getattr(node, 'address', None) == address) + if port is not None: + match_list.append(getattr(node, 'port', None) == port) + + if match_list and all(match_list): + return node + + return None + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + address=dict(), + condition=dict(choices=['enabled', 'disabled', 'draining']), + load_balancer_id=dict(required=True, type='int'), + node_id=dict(type='int'), + port=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']), + type=dict(choices=['primary', 'secondary']), + virtualenv=dict(type='path'), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=30, type='int'), + weight=dict(type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + address = module.params['address'] + condition = (module.params['condition'] and + module.params['condition'].upper()) + load_balancer_id = module.params['load_balancer_id'] + node_id = module.params['node_id'] + port = module.params['port'] + state = module.params['state'] + typ = module.params['type'] and module.params['type'].upper() + virtualenv = module.params['virtualenv'] + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] or 1 + weight = module.params['weight'] + + if virtualenv: + try: + _activate_virtualenv(virtualenv) + except IOError as e: + module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( + virtualenv, e)) + + setup_rax_module(module, pyrax) + + if not pyrax.cloud_loadbalancers: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + try: + lb = pyrax.cloud_loadbalancers.get(load_balancer_id) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + + node = _get_node(lb, node_id, address, port) + + result = rax_clb_node_to_dict(node) + + if state == 'absent': + if not node: # Removing a non-existent node + module.exit_json(changed=False, state=state) + try: + lb.delete_node(node) + result = {} + except pyrax.exc.NotFound: + module.exit_json(changed=False, state=state) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + else: # present + if not node: + if node_id: # Updating a non-existent node + msg = 'Node %d not found' % node_id + if lb.nodes: + msg += (' (available nodes: %s)' % + ', '.join([str(x.id) for x in lb.nodes])) + module.fail_json(msg=msg) + else: # Creating a new node + try: + node = pyrax.cloudloadbalancers.Node( + address=address, port=port, condition=condition, + weight=weight, type=typ) + resp, body = lb.add_nodes([node]) + result.update(body['nodes'][0]) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + else: # Updating an existing node + mutable = { + 'condition': condition, + 'type': typ, + 'weight': weight, + } + + for name, value in mutable.items(): + if value is None or value == getattr(node, name): + mutable.pop(name) + + if not mutable: + module.exit_json(changed=False, state=state, node=result) + + try: + # The diff has to be set explicitly to update node's weight and + # type; this should probably be fixed in pyrax + lb.update_node(node, diff=mutable) + result.update(mutable) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + + if wait: + pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, + attempts=wait_timeout) + if lb.status != 'ACTIVE': + module.fail_json( + msg='Load balancer not active after %ds (current status: %s)' % + (wait_timeout, lb.status.lower())) + + kwargs = {'node': result} if result else {} + module.exit_json(changed=True, state=state, **kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/plugins/modules/cloud/rackspace/rax_clb_ssl.py new file mode 100644 index 0000000000..72ea2e3b86 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_clb_ssl.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: rax_clb_ssl +short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. +description: +- Set up, reconfigure, or remove SSL termination for an existing load balancer. +options: + loadbalancer: + description: + - Name or ID of the load balancer on which to manage SSL termination. + required: true + state: + description: + - If set to "present", SSL termination will be added to this load balancer. + - If "absent", SSL termination will be removed instead. + choices: + - present + - absent + default: present + enabled: + description: + - If set to "false", temporarily disable SSL termination without discarding + - existing credentials. + default: true + type: bool + private_key: + description: + - The private SSL key as a string in PEM format. + certificate: + description: + - The public SSL certificates as a string in PEM format. + intermediate_certificate: + description: + - One or more intermediate certificate authorities as a string in PEM + - format, concatenated into a single string. + secure_port: + description: + - The port to listen for secure traffic. + default: 443 + secure_traffic_only: + description: + - If "true", the load balancer will *only* accept secure traffic. + default: false + type: bool + https_redirect: + description: + - If "true", the load balancer will redirect HTTP traffic to HTTPS. + - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL + - termination is also applied or removed. + type: bool + wait: + description: + - Wait for the balancer to be in state "running" before turning. + default: false + type: bool + wait_timeout: + description: + - How long before "wait" gives up, in seconds. + default: 300 +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Enable SSL termination on a load balancer + rax_clb_ssl: + loadbalancer: the_loadbalancer + state: present + private_key: "{{ lookup('file', 'credentials/server.key' ) }}" + certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" + intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" + secure_traffic_only: true + wait: true + +- name: Disable SSL termination + rax_clb_ssl: + loadbalancer: "{{ registered_lb.balancer.id }}" + state: absent + wait: true +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_find_loadbalancer, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, + certificate, intermediate_certificate, secure_port, + secure_traffic_only, https_redirect, + wait, wait_timeout): + # Validate arguments. + + if state == 'present': + if not private_key: + module.fail_json(msg="private_key must be provided.") + else: + private_key = private_key.strip() + + if not certificate: + module.fail_json(msg="certificate must be provided.") + else: + certificate = certificate.strip() + + attempts = wait_timeout // 5 + + # Locate the load balancer. + + balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) + existing_ssl = balancer.get_ssl_termination() + + changed = False + + if state == 'present': + # Apply or reconfigure SSL termination on the load balancer. + ssl_attrs = dict( + securePort=secure_port, + privatekey=private_key, + certificate=certificate, + intermediateCertificate=intermediate_certificate, + enabled=enabled, + secureTrafficOnly=secure_traffic_only + ) + + needs_change = False + + if existing_ssl: + for ssl_attr, value in ssl_attrs.items(): + if ssl_attr == 'privatekey': + # The private key is not included in get_ssl_termination's + # output (as it shouldn't be). Also, if you're changing the + # private key, you'll also be changing the certificate, + # so we don't lose anything by not checking it. + continue + + if value is not None and existing_ssl.get(ssl_attr) != value: + # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) + needs_change = True + else: + needs_change = True + + if needs_change: + try: + balancer.add_ssl_termination(**ssl_attrs) + except pyrax.exceptions.PyraxException as e: + module.fail_json(msg='%s' % e.message) + changed = True + elif state == 'absent': + # Remove SSL termination if it's already configured. + if existing_ssl: + try: + balancer.delete_ssl_termination() + except pyrax.exceptions.PyraxException as e: + module.fail_json(msg='%s' % e.message) + changed = True + + if https_redirect is not None and balancer.httpsRedirect != https_redirect: + if changed: + # This wait is unavoidable because load balancers are immutable + # while the SSL termination changes above are being applied. + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + try: + balancer.update(httpsRedirect=https_redirect) + except pyrax.exceptions.PyraxException as e: + module.fail_json(msg='%s' % e.message) + changed = True + + if changed and wait: + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + balancer.get() + new_ssl_termination = balancer.get_ssl_termination() + + # Intentionally omit the private key from the module output, so you don't + # accidentally echo it with `ansible-playbook -v` or `debug`, and the + # certificate, which is just long. Convert other attributes to snake_case + # and include https_redirect at the top-level. + if new_ssl_termination: + new_ssl = dict( + enabled=new_ssl_termination['enabled'], + secure_port=new_ssl_termination['securePort'], + secure_traffic_only=new_ssl_termination['secureTrafficOnly'] + ) + else: + new_ssl = None + + result = dict( + changed=changed, + https_redirect=balancer.httpsRedirect, + ssl_termination=new_ssl, + balancer=rax_to_dict(balancer, 'clb') + ) + success = True + + if balancer.status == 'ERROR': + result['msg'] = '%s failed to build' % balancer.id + success = False + elif wait and balancer.status not in ('ACTIVE', 'ERROR'): + result['msg'] = 'Timeout waiting on %s' % balancer.id + success = False + + if success: + module.exit_json(**result) + else: + module.fail_json(**result) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update(dict( + loadbalancer=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + enabled=dict(type='bool', default=True), + private_key=dict(), + certificate=dict(), + intermediate_certificate=dict(), + secure_port=dict(type='int', default=443), + secure_traffic_only=dict(type='bool', default=False), + https_redirect=dict(type='bool'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module.') + + loadbalancer = module.params.get('loadbalancer') + state = module.params.get('state') + enabled = module.boolean(module.params.get('enabled')) + private_key = module.params.get('private_key') + certificate = module.params.get('certificate') + intermediate_certificate = module.params.get('intermediate_certificate') + secure_port = module.params.get('secure_port') + secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) + https_redirect = module.boolean(module.params.get('https_redirect')) + wait = module.boolean(module.params.get('wait')) + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + + cloud_load_balancer_ssl( + module, loadbalancer, state, enabled, private_key, certificate, + intermediate_certificate, secure_port, secure_traffic_only, + https_redirect, wait, wait_timeout + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_dns.py b/plugins/modules/cloud/rackspace/rax_dns.py new file mode 100644 index 0000000000..b5f539a2d1 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_dns.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_dns +short_description: Manage domains on Rackspace Cloud DNS +description: + - Manage domains on Rackspace Cloud DNS +options: + comment: + description: + - Brief description of the domain. Maximum length of 160 characters + email: + description: + - Email address of the domain administrator + name: + description: + - Domain name to create + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + ttl: + description: + - Time to live of domain in seconds + default: 3600 +notes: + - "It is recommended that plays utilizing this module be run with + C(serial: 1) to avoid exceeding the API request limit imposed by + the Rackspace CloudDNS API" +author: "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Create domain + hosts: all + gather_facts: False + tasks: + - name: Domain create request + local_action: + module: rax_dns + credentials: ~/.raxpub + name: example.org + email: admin@example.org + register: rax_dns +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_dns(module, comment, email, name, state, ttl): + changed = False + + dns = pyrax.cloud_dns + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present': + if not email: + module.fail_json(msg='An "email" attribute is required for ' + 'creating a domain') + + try: + domain = dns.find(name=name) + except pyrax.exceptions.NoUniqueMatch as e: + module.fail_json(msg='%s' % e.message) + except pyrax.exceptions.NotFound: + try: + domain = dns.create(name=name, emailAddress=email, ttl=ttl, + comment=comment) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + update = {} + if comment != getattr(domain, 'comment', None): + update['comment'] = comment + if ttl != getattr(domain, 'ttl', None): + update['ttl'] = ttl + if email != getattr(domain, 'emailAddress', None): + update['emailAddress'] = email + + if update: + try: + domain.update(**update) + changed = True + domain.get() + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + domain = dns.find(name=name) + except pyrax.exceptions.NotFound: + domain = {} + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if domain: + try: + domain.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, domain=rax_to_dict(domain)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + comment=dict(), + email=dict(), + name=dict(), + state=dict(default='present', choices=['present', 'absent']), + ttl=dict(type='int', default=3600), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + comment = module.params.get('comment') + email = module.params.get('email') + name = module.params.get('name') + state = module.params.get('state') + ttl = module.params.get('ttl') + + setup_rax_module(module, pyrax, False) + + rax_dns(module, comment, email, name, state, ttl) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_dns_record.py b/plugins/modules/cloud/rackspace/rax_dns_record.py new file mode 100644 index 0000000000..f130e82275 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_dns_record.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_dns_record +short_description: Manage DNS records on Rackspace Cloud DNS +description: + - Manage DNS records on Rackspace Cloud DNS +options: + comment: + description: + - Brief description of the domain. Maximum length of 160 characters + data: + description: + - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for + SRV/TXT + required: True + domain: + description: + - Domain name to create the record in. This is an invalid option when + type=PTR + loadbalancer: + description: + - Load Balancer ID to create a PTR record for. Only used with type=PTR + name: + description: + - FQDN record name to create + required: True + overwrite: + description: + - Add new records if data doesn't match, instead of updating existing + record with matching name. If there are already multiple records with + matching name and overwrite=true, this module will fail. + default: true + type: bool + priority: + description: + - Required for MX and SRV records, but forbidden for other record types. + If specified, must be an integer from 0 to 65535. + server: + description: + - Server ID to create a PTR record for. Only used with type=PTR + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + ttl: + description: + - Time to live of record in seconds + default: 3600 + type: + description: + - DNS record type + choices: + - A + - AAAA + - CNAME + - MX + - NS + - SRV + - TXT + - PTR + required: true +notes: + - "It is recommended that plays utilizing this module be run with + C(serial: 1) to avoid exceeding the API request limit imposed by + the Rackspace CloudDNS API" + - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be + supplied + - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. + - C(PTR) record support was added in version 1.7 +author: "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Create DNS Records + hosts: all + gather_facts: False + tasks: + - name: Create A record + local_action: + module: rax_dns_record + credentials: ~/.raxpub + domain: example.org + name: www.example.org + data: "{{ rax_accessipv4 }}" + type: A + register: a_record + + - name: Create PTR record + local_action: + module: rax_dns_record + credentials: ~/.raxpub + server: "{{ rax_id }}" + name: "{{ inventory_hostname }}" + region: DFW + register: ptr_record +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_find_loadbalancer, + rax_find_server, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, + name=None, server=None, state='present', ttl=7200): + changed = False + results = [] + + dns = pyrax.cloud_dns + + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if loadbalancer: + item = rax_find_loadbalancer(module, pyrax, loadbalancer) + elif server: + item = rax_find_server(module, pyrax, server) + + if state == 'present': + current = dns.list_ptr_records(item) + for record in current: + if record.data == data: + if record.ttl != ttl or record.name != name: + try: + dns.update_ptr_record(item, record, name, data, ttl) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + record.ttl = ttl + record.name = name + results.append(rax_to_dict(record)) + break + else: + results.append(rax_to_dict(record)) + break + + if not results: + record = dict(name=name, type='PTR', data=data, ttl=ttl, + comment=comment) + try: + results = dns.add_ptr_records(item, [record]) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, records=results) + + elif state == 'absent': + current = dns.list_ptr_records(item) + for record in current: + if record.data == data: + results.append(rax_to_dict(record)) + break + + if results: + try: + dns.delete_ptr_records(item, data) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, records=results) + + +def rax_dns_record(module, comment=None, data=None, domain=None, name=None, + overwrite=True, priority=None, record_type='A', + state='present', ttl=7200): + """Function for manipulating record types other than PTR""" + + changed = False + + dns = pyrax.cloud_dns + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present': + if not priority and record_type in ['MX', 'SRV']: + module.fail_json(msg='A "priority" attribute is required for ' + 'creating a MX or SRV record') + + try: + domain = dns.find(name=domain) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + try: + if overwrite: + record = domain.find_record(record_type, name=name) + else: + record = domain.find_record(record_type, name=name, data=data) + except pyrax.exceptions.DomainRecordNotUnique as e: + module.fail_json(msg='overwrite=true and there are multiple matching records') + except pyrax.exceptions.DomainRecordNotFound as e: + try: + record_data = { + 'type': record_type, + 'name': name, + 'data': data, + 'ttl': ttl + } + if comment: + record_data.update(dict(comment=comment)) + if priority and record_type.upper() in ['MX', 'SRV']: + record_data.update(dict(priority=priority)) + + record = domain.add_records([record_data])[0] + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + update = {} + if comment != getattr(record, 'comment', None): + update['comment'] = comment + if ttl != getattr(record, 'ttl', None): + update['ttl'] = ttl + if priority != getattr(record, 'priority', None): + update['priority'] = priority + if data != getattr(record, 'data', None): + update['data'] = data + + if update: + try: + record.update(**update) + changed = True + record.get() + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + domain = dns.find(name=domain) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + try: + record = domain.find_record(record_type, name=name, data=data) + except pyrax.exceptions.DomainRecordNotFound as e: + record = {} + except pyrax.exceptions.DomainRecordNotUnique as e: + module.fail_json(msg='%s' % e.message) + + if record: + try: + record.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, record=rax_to_dict(record)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + comment=dict(), + data=dict(required=True), + domain=dict(), + loadbalancer=dict(), + name=dict(required=True), + overwrite=dict(type='bool', default=True), + priority=dict(type='int'), + server=dict(), + state=dict(default='present', choices=['present', 'absent']), + ttl=dict(type='int', default=3600), + type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', + 'SRV', 'TXT', 'PTR']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[ + ['server', 'loadbalancer', 'domain'], + ], + required_one_of=[ + ['server', 'loadbalancer', 'domain'], + ], + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + comment = module.params.get('comment') + data = module.params.get('data') + domain = module.params.get('domain') + loadbalancer = module.params.get('loadbalancer') + name = module.params.get('name') + overwrite = module.params.get('overwrite') + priority = module.params.get('priority') + server = module.params.get('server') + state = module.params.get('state') + ttl = module.params.get('ttl') + record_type = module.params.get('type') + + setup_rax_module(module, pyrax, False) + + if record_type.upper() == 'PTR': + if not server and not loadbalancer: + module.fail_json(msg='one of the following is required: ' + 'server,loadbalancer') + rax_dns_record_ptr(module, data=data, comment=comment, + loadbalancer=loadbalancer, name=name, server=server, + state=state, ttl=ttl) + else: + rax_dns_record(module, comment=comment, data=data, domain=domain, + name=name, overwrite=overwrite, priority=priority, + record_type=record_type, state=state, ttl=ttl) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_facts.py b/plugins/modules/cloud/rackspace/rax_facts.py new file mode 100644 index 0000000000..614fe0de0e --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_facts.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_facts +short_description: Gather facts for Rackspace Cloud Servers +description: + - Gather facts for Rackspace Cloud Servers. +options: + address: + description: + - Server IP address to retrieve facts for, will match any IP assigned to + the server + id: + description: + - Server ID to retrieve facts for + name: + description: + - Server name to retrieve facts for +author: "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Gather info about servers + hosts: all + gather_facts: False + tasks: + - name: Get facts about servers + local_action: + module: rax_facts + credentials: ~/.raxpub + name: "{{ inventory_hostname }}" + region: DFW + - name: Map some facts + set_fact: + ansible_ssh_host: "{{ rax_accessipv4 }}" +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_facts(module, address, name, server_id): + changed = False + + cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + ansible_facts = {} + + search_opts = {} + if name: + search_opts = dict(name='^%s$' % name) + try: + servers = cs.servers.list(search_opts=search_opts) + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif address: + servers = [] + try: + for server in cs.servers.list(): + for addresses in server.networks.values(): + if address in addresses: + servers.append(server) + break + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif server_id: + servers = [] + try: + servers.append(cs.servers.get(server_id)) + except Exception as e: + pass + + servers[:] = [server for server in servers if server.status != "DELETED"] + + if len(servers) > 1: + module.fail_json(msg='Multiple servers found matching provided ' + 'search parameters') + elif len(servers) == 1: + ansible_facts = rax_to_dict(servers[0], 'server') + + module.exit_json(changed=changed, ansible_facts=ansible_facts) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + address=dict(), + id=dict(), + name=dict(), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[['address', 'id', 'name']], + required_one_of=[['address', 'id', 'name']], + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + address = module.params.get('address') + server_id = module.params.get('id') + name = module.params.get('name') + + setup_rax_module(module, pyrax) + + rax_facts(module, address, name, server_id) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_files.py b/plugins/modules/cloud/rackspace/rax_files.py new file mode 100644 index 0000000000..1ce314745d --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_files.py @@ -0,0 +1,389 @@ +#!/usr/bin/python + +# (c) 2013, Paul Durivage +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_files +short_description: Manipulate Rackspace Cloud Files Containers +description: + - Manipulate Rackspace Cloud Files Containers +options: + clear_meta: + description: + - Optionally clear existing metadata when applying metadata to existing containers. + Selecting this option is only appropriate when setting type=meta + type: bool + default: "no" + container: + description: + - The container to use for container or metadata operations. + required: true + meta: + description: + - A hash of items to set as metadata values on a container + private: + description: + - Used to set a container as private, removing it from the CDN. B(Warning!) + Private containers, if previously made public, can have live objects + available until the TTL on cached objects expires + type: bool + public: + description: + - Used to set a container as public, available via the Cloud Files CDN + type: bool + region: + description: + - Region to create an instance in + default: DFW + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + ttl: + description: + - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. + Setting a TTL is only appropriate for containers that are public + type: + description: + - Type of object to do work on, i.e. metadata object or a container object + choices: + - file + - meta + default: file + web_error: + description: + - Sets an object to be presented as the HTTP error page when accessed by the CDN URL + web_index: + description: + - Sets an object to be presented as the HTTP index page when accessed by the CDN URL +author: "Paul Durivage (@angstwad)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: "Test Cloud Files Containers" + hosts: local + gather_facts: no + tasks: + - name: "List all containers" + rax_files: + state: list + + - name: "Create container called 'mycontainer'" + rax_files: + container: mycontainer + + - name: "Create container 'mycontainer2' with metadata" + rax_files: + container: mycontainer2 + meta: + key: value + file_for: someuser@example.com + + - name: "Set a container's web index page" + rax_files: + container: mycontainer + web_index: index.html + + - name: "Set a container's web error page" + rax_files: + container: mycontainer + web_error: error.html + + - name: "Make container public" + rax_files: + container: mycontainer + public: yes + + - name: "Make container public with a 24 hour TTL" + rax_files: + container: mycontainer + public: yes + ttl: 86400 + + - name: "Make container private" + rax_files: + container: mycontainer + private: yes + +- name: "Test Cloud Files Containers Metadata Storage" + hosts: local + gather_facts: no + tasks: + - name: "Get mycontainer2 metadata" + rax_files: + container: mycontainer2 + type: meta + + - name: "Set mycontainer2 metadata" + rax_files: + container: mycontainer2 + type: meta + meta: + uploaded_by: someuser@example.com + + - name: "Remove mycontainer2 metadata" + rax_files: + container: "mycontainer2" + type: meta + state: absent + meta: + key: "" + file_for: "" +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError as e: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +EXIT_DICT = dict(success=True) +META_PREFIX = 'x-container-meta-' + + +def _get_container(module, cf, container): + try: + return cf.get_container(container) + except pyrax.exc.NoSuchContainer as e: + module.fail_json(msg=e.message) + + +def _fetch_meta(module, container): + EXIT_DICT['meta'] = dict() + try: + for k, v in container.get_metadata().items(): + split_key = k.split(META_PREFIX)[-1] + EXIT_DICT['meta'][split_key] = v + except Exception as e: + module.fail_json(msg=e.message) + + +def meta(cf, module, container_, state, meta_, clear_meta): + c = _get_container(module, cf, container_) + + if meta_ and state == 'present': + try: + meta_set = c.set_metadata(meta_, clear=clear_meta) + except Exception as e: + module.fail_json(msg=e.message) + elif meta_ and state == 'absent': + remove_results = [] + for k, v in meta_.items(): + c.remove_metadata_key(k) + remove_results.append(k) + EXIT_DICT['deleted_meta_keys'] = remove_results + elif state == 'absent': + remove_results = [] + for k, v in c.get_metadata().items(): + c.remove_metadata_key(k) + remove_results.append(k) + EXIT_DICT['deleted_meta_keys'] = remove_results + + _fetch_meta(module, c) + _locals = locals().keys() + + EXIT_DICT['container'] = c.name + if 'meta_set' in _locals or 'remove_results' in _locals: + EXIT_DICT['changed'] = True + + module.exit_json(**EXIT_DICT) + + +def container(cf, module, container_, state, meta_, clear_meta, ttl, public, + private, web_index, web_error): + if public and private: + module.fail_json(msg='container cannot be simultaneously ' + 'set to public and private') + + if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): + module.fail_json(msg='state cannot be omitted when setting/removing ' + 'attributes on a container') + + if state == 'list': + # We don't care if attributes are specified, let's list containers + EXIT_DICT['containers'] = cf.list_containers() + module.exit_json(**EXIT_DICT) + + try: + c = cf.get_container(container_) + except pyrax.exc.NoSuchContainer as e: + # Make the container if state=present, otherwise bomb out + if state == 'present': + try: + c = cf.create_container(container_) + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['changed'] = True + EXIT_DICT['created'] = True + else: + module.fail_json(msg=e.message) + else: + # Successfully grabbed a container object + # Delete if state is absent + if state == 'absent': + try: + cont_deleted = c.delete() + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['deleted'] = True + + if meta_: + try: + meta_set = c.set_metadata(meta_, clear=clear_meta) + except Exception as e: + module.fail_json(msg=e.message) + finally: + _fetch_meta(module, c) + + if ttl: + try: + c.cdn_ttl = ttl + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['ttl'] = c.cdn_ttl + + if public: + try: + cont_public = c.make_public() + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, + ssl_url=c.cdn_ssl_uri, + streaming_url=c.cdn_streaming_uri, + ios_uri=c.cdn_ios_uri) + + if private: + try: + cont_private = c.make_private() + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['set_private'] = True + + if web_index: + try: + cont_web_index = c.set_web_index_page(web_index) + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['set_index'] = True + finally: + _fetch_meta(module, c) + + if web_error: + try: + cont_err_index = c.set_web_error_page(web_error) + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['set_error'] = True + finally: + _fetch_meta(module, c) + + EXIT_DICT['container'] = c.name + EXIT_DICT['objs_in_container'] = c.object_count + EXIT_DICT['total_bytes'] = c.total_bytes + + _locals = locals().keys() + if ('cont_deleted' in _locals + or 'meta_set' in _locals + or 'cont_public' in _locals + or 'cont_private' in _locals + or 'cont_web_index' in _locals + or 'cont_err_index' in _locals): + EXIT_DICT['changed'] = True + + module.exit_json(**EXIT_DICT) + + +def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, + private, web_index, web_error): + """ Dispatch from here to work with metadata or file objects """ + cf = pyrax.cloudfiles + + if cf is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if typ == "container": + container(cf, module, container_, state, meta_, clear_meta, ttl, + public, private, web_index, web_error) + else: + meta(cf, module, container_, state, meta_, clear_meta) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + container=dict(), + state=dict(choices=['present', 'absent', 'list'], + default='present'), + meta=dict(type='dict', default=dict()), + clear_meta=dict(default=False, type='bool'), + type=dict(choices=['container', 'meta'], default='container'), + ttl=dict(type='int'), + public=dict(default=False, type='bool'), + private=dict(default=False, type='bool'), + web_index=dict(), + web_error=dict() + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + container_ = module.params.get('container') + state = module.params.get('state') + meta_ = module.params.get('meta') + clear_meta = module.params.get('clear_meta') + typ = module.params.get('type') + ttl = module.params.get('ttl') + public = module.params.get('public') + private = module.params.get('private') + web_index = module.params.get('web_index') + web_error = module.params.get('web_error') + + if state in ['present', 'absent'] and not container_: + module.fail_json(msg='please specify a container name') + if clear_meta and not typ == 'meta': + module.fail_json(msg='clear_meta can only be used when setting ' + 'metadata') + + setup_rax_module(module, pyrax) + cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, + private, web_index, web_error) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_files_objects.py b/plugins/modules/cloud/rackspace/rax_files_objects.py new file mode 100644 index 0000000000..9cd72ac340 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_files_objects.py @@ -0,0 +1,611 @@ +#!/usr/bin/python + +# (c) 2013, Paul Durivage +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_files_objects +short_description: Upload, download, and delete objects in Rackspace Cloud Files +description: + - Upload, download, and delete objects in Rackspace Cloud Files +options: + clear_meta: + description: + - Optionally clear existing metadata when applying metadata to existing objects. + Selecting this option is only appropriate when setting type=meta + type: bool + default: 'no' + container: + description: + - The container to use for file object operations. + required: true + dest: + description: + - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder". + Used to specify the destination of an operation on a remote object; i.e. a file name, + "file1", or a comma-separated list of remote objects, "file1,file2,file17" + expires: + description: + - Used to set an expiration on a file or folder uploaded to Cloud Files. + Requires an integer, specifying expiration in seconds + meta: + description: + - A hash of items to set as metadata values on an uploaded file or folder + method: + description: + - The method of operation to be performed. For example, put to upload files + to Cloud Files, get to download files from Cloud Files or delete to delete + remote objects in Cloud Files + choices: + - get + - put + - delete + default: get + src: + description: + - Source from which to upload files. Used to specify a remote object as a source for + an operation, i.e. a file name, "file1", or a comma-separated list of remote objects, + "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations + structure: + description: + - Used to specify whether to maintain nested directory structure when downloading objects + from Cloud Files. Setting to false downloads the contents of a container to a single, + flat directory + type: bool + default: 'yes' + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + type: + description: + - Type of object to do work on + - Metadata object or a file object + choices: + - file + - meta + default: file +author: "Paul Durivage (@angstwad)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: "Test Cloud Files Objects" + hosts: local + gather_facts: False + tasks: + - name: "Get objects from test container" + rax_files_objects: + container: testcont + dest: ~/Downloads/testcont + + - name: "Get single object from test container" + rax_files_objects: + container: testcont + src: file1 + dest: ~/Downloads/testcont + + - name: "Get several objects from test container" + rax_files_objects: + container: testcont + src: file1,file2,file3 + dest: ~/Downloads/testcont + + - name: "Delete one object in test container" + rax_files_objects: + container: testcont + method: delete + dest: file1 + + - name: "Delete several objects in test container" + rax_files_objects: + container: testcont + method: delete + dest: file2,file3,file4 + + - name: "Delete all objects in test container" + rax_files_objects: + container: testcont + method: delete + + - name: "Upload all files to test container" + rax_files_objects: + container: testcont + method: put + src: ~/Downloads/onehundred + + - name: "Upload one file to test container" + rax_files_objects: + container: testcont + method: put + src: ~/Downloads/testcont/file1 + + - name: "Upload one file to test container with metadata" + rax_files_objects: + container: testcont + src: ~/Downloads/testcont/file2 + method: put + meta: + testkey: testdata + who_uploaded_this: someuser@example.com + + - name: "Upload one file to test container with TTL of 60 seconds" + rax_files_objects: + container: testcont + method: put + src: ~/Downloads/testcont/file3 + expires: 60 + + - name: "Attempt to get remote object that does not exist" + rax_files_objects: + container: testcont + method: get + src: FileThatDoesNotExist.jpg + dest: ~/Downloads/testcont + ignore_errors: yes + + - name: "Attempt to delete remote object that does not exist" + rax_files_objects: + container: testcont + method: delete + dest: FileThatDoesNotExist.jpg + ignore_errors: yes + +- name: "Test Cloud Files Objects Metadata" + hosts: local + gather_facts: false + tasks: + - name: "Get metadata on one object" + rax_files_objects: + container: testcont + type: meta + dest: file2 + + - name: "Get metadata on several objects" + rax_files_objects: + container: testcont + type: meta + src: file2,file1 + + - name: "Set metadata on an object" + rax_files_objects: + container: testcont + type: meta + dest: file17 + method: put + meta: + key1: value1 + key2: value2 + clear_meta: true + + - name: "Verify metadata is set" + rax_files_objects: + container: testcont + type: meta + src: file17 + + - name: "Delete metadata" + rax_files_objects: + container: testcont + type: meta + dest: file17 + method: delete + meta: + key1: '' + key2: '' + + - name: "Get metadata on all objects" + rax_files_objects: + container: testcont + type: meta +''' + +import os + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +EXIT_DICT = dict(success=False) +META_PREFIX = 'x-object-meta-' + + +def _get_container(module, cf, container): + try: + return cf.get_container(container) + except pyrax.exc.NoSuchContainer as e: + module.fail_json(msg=e.message) + + +def _upload_folder(cf, folder, container, ttl=None, headers=None): + """ Uploads a folder to Cloud Files. + """ + total_bytes = 0 + for root, dirs, files in os.walk(folder): + for fname in files: + full_path = os.path.join(root, fname) + obj_name = os.path.relpath(full_path, folder) + obj_size = os.path.getsize(full_path) + cf.upload_file(container, full_path, + obj_name=obj_name, return_none=True, ttl=ttl, headers=headers) + total_bytes += obj_size + return total_bytes + + +def upload(module, cf, container, src, dest, meta, expires): + """ Uploads a single object or a folder to Cloud Files Optionally sets an + metadata, TTL value (expires), or Content-Disposition and Content-Encoding + headers. + """ + if not src: + module.fail_json(msg='src must be specified when uploading') + + c = _get_container(module, cf, container) + src = os.path.abspath(os.path.expanduser(src)) + is_dir = os.path.isdir(src) + + if not is_dir and not os.path.isfile(src) or not os.path.exists(src): + module.fail_json(msg='src must be a file or a directory') + if dest and is_dir: + module.fail_json(msg='dest cannot be set when whole ' + 'directories are uploaded') + + cont_obj = None + total_bytes = 0 + if dest and not is_dir: + try: + cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) + except Exception as e: + module.fail_json(msg=e.message) + elif is_dir: + try: + total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) + except Exception as e: + module.fail_json(msg=e.message) + else: + try: + cont_obj = c.upload_file(src, ttl=expires, headers=meta) + except Exception as e: + module.fail_json(msg=e.message) + + EXIT_DICT['success'] = True + EXIT_DICT['container'] = c.name + EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) + if cont_obj or total_bytes > 0: + EXIT_DICT['changed'] = True + if meta: + EXIT_DICT['meta'] = dict(updated=True) + + if cont_obj: + EXIT_DICT['bytes'] = cont_obj.total_bytes + EXIT_DICT['etag'] = cont_obj.etag + else: + EXIT_DICT['bytes'] = total_bytes + + module.exit_json(**EXIT_DICT) + + +def download(module, cf, container, src, dest, structure): + """ Download objects from Cloud Files to a local path specified by "dest". + Optionally disable maintaining a directory structure by by passing a + false value to "structure". + """ + # Looking for an explicit destination + if not dest: + module.fail_json(msg='dest is a required argument when ' + 'downloading from Cloud Files') + + # Attempt to fetch the container by name + c = _get_container(module, cf, container) + + # Accept a single object name or a comma-separated list of objs + # If not specified, get the entire container + if src: + objs = src.split(',') + objs = map(str.strip, objs) + else: + objs = c.get_object_names() + + dest = os.path.abspath(os.path.expanduser(dest)) + is_dir = os.path.isdir(dest) + + if not is_dir: + module.fail_json(msg='dest must be a directory') + + results = [] + for obj in objs: + try: + c.download_object(obj, dest, structure=structure) + except Exception as e: + module.fail_json(msg=e.message) + else: + results.append(obj) + + len_results = len(results) + len_objs = len(objs) + + EXIT_DICT['container'] = c.name + EXIT_DICT['requested_downloaded'] = results + if results: + EXIT_DICT['changed'] = True + if len_results == len_objs: + EXIT_DICT['success'] = True + EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) + else: + EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ + "downloaded" % (len_results, len_objs) + module.exit_json(**EXIT_DICT) + + +def delete(module, cf, container, src, dest): + """ Delete specific objects by proving a single file name or a + comma-separated list to src OR dest (but not both). Omitting file name(s) + assumes the entire container is to be deleted. + """ + objs = None + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; files to be deleted " + "have been specified on both src and dest args") + elif dest: + objs = dest + else: + objs = src + + c = _get_container(module, cf, container) + + if objs: + objs = objs.split(',') + objs = map(str.strip, objs) + else: + objs = c.get_object_names() + + num_objs = len(objs) + + results = [] + for obj in objs: + try: + result = c.delete_object(obj) + except Exception as e: + module.fail_json(msg=e.message) + else: + results.append(result) + + num_deleted = results.count(True) + + EXIT_DICT['container'] = c.name + EXIT_DICT['deleted'] = num_deleted + EXIT_DICT['requested_deleted'] = objs + + if num_deleted: + EXIT_DICT['changed'] = True + + if num_objs == num_deleted: + EXIT_DICT['success'] = True + EXIT_DICT['msg'] = "%s objects deleted" % num_deleted + else: + EXIT_DICT['msg'] = ("Error: only %s of %s objects " + "deleted" % (num_deleted, num_objs)) + module.exit_json(**EXIT_DICT) + + +def get_meta(module, cf, container, src, dest): + """ Get metadata for a single file, comma-separated list, or entire + container + """ + c = _get_container(module, cf, container) + + objs = None + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; files to be deleted " + "have been specified on both src and dest args") + elif dest: + objs = dest + else: + objs = src + + if objs: + objs = objs.split(',') + objs = map(str.strip, objs) + else: + objs = c.get_object_names() + + results = dict() + for obj in objs: + try: + meta = c.get_object(obj).get_metadata() + except Exception as e: + module.fail_json(msg=e.message) + else: + results[obj] = dict() + for k, v in meta.items(): + meta_key = k.split(META_PREFIX)[-1] + results[obj][meta_key] = v + + EXIT_DICT['container'] = c.name + if results: + EXIT_DICT['meta_results'] = results + EXIT_DICT['success'] = True + module.exit_json(**EXIT_DICT) + + +def put_meta(module, cf, container, src, dest, meta, clear_meta): + """ Set metadata on a container, single file, or comma-separated list. + Passing a true value to clear_meta clears the metadata stored in Cloud + Files before setting the new metadata to the value of "meta". + """ + objs = None + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; files to set meta" + " have been specified on both src and dest args") + elif dest: + objs = dest + else: + objs = src + + objs = objs.split(',') + objs = map(str.strip, objs) + + c = _get_container(module, cf, container) + + results = [] + for obj in objs: + try: + result = c.get_object(obj).set_metadata(meta, clear=clear_meta) + except Exception as e: + module.fail_json(msg=e.message) + else: + results.append(result) + + EXIT_DICT['container'] = c.name + EXIT_DICT['success'] = True + if results: + EXIT_DICT['changed'] = True + EXIT_DICT['num_changed'] = True + module.exit_json(**EXIT_DICT) + + +def delete_meta(module, cf, container, src, dest, meta): + """ Removes metadata keys and values specified in meta, if any. Deletes on + all objects specified by src or dest (but not both), if any; otherwise it + deletes keys on all objects in the container + """ + objs = None + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; meta keys to be " + "deleted have been specified on both src and dest" + " args") + elif dest: + objs = dest + else: + objs = src + + objs = objs.split(',') + objs = map(str.strip, objs) + + c = _get_container(module, cf, container) + + results = [] # Num of metadata keys removed, not objects affected + for obj in objs: + if meta: + for k, v in meta.items(): + try: + result = c.get_object(obj).remove_metadata_key(k) + except Exception as e: + module.fail_json(msg=e.message) + else: + results.append(result) + else: + try: + o = c.get_object(obj) + except pyrax.exc.NoSuchObject as e: + module.fail_json(msg=e.message) + + for k, v in o.get_metadata().items(): + try: + result = o.remove_metadata_key(k) + except Exception as e: + module.fail_json(msg=e.message) + results.append(result) + + EXIT_DICT['container'] = c.name + EXIT_DICT['success'] = True + if results: + EXIT_DICT['changed'] = True + EXIT_DICT['num_deleted'] = len(results) + module.exit_json(**EXIT_DICT) + + +def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, + structure, expires): + """ Dispatch from here to work with metadata or file objects """ + cf = pyrax.cloudfiles + + if cf is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if typ == "file": + if method == 'put': + upload(module, cf, container, src, dest, meta, expires) + + elif method == 'get': + download(module, cf, container, src, dest, structure) + + elif method == 'delete': + delete(module, cf, container, src, dest) + + else: + if method == 'get': + get_meta(module, cf, container, src, dest) + + if method == 'put': + put_meta(module, cf, container, src, dest, meta, clear_meta) + + if method == 'delete': + delete_meta(module, cf, container, src, dest, meta) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + container=dict(required=True), + src=dict(), + dest=dict(), + method=dict(default='get', choices=['put', 'get', 'delete']), + type=dict(default='file', choices=['file', 'meta']), + meta=dict(type='dict', default=dict()), + clear_meta=dict(default=False, type='bool'), + structure=dict(default=True, type='bool'), + expires=dict(type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + container = module.params.get('container') + src = module.params.get('src') + dest = module.params.get('dest') + method = module.params.get('method') + typ = module.params.get('type') + meta = module.params.get('meta') + clear_meta = module.params.get('clear_meta') + structure = module.params.get('structure') + expires = module.params.get('expires') + + if clear_meta and not typ == 'meta': + module.fail_json(msg='clear_meta can only be used when setting metadata') + + setup_rax_module(module, pyrax) + cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_identity.py b/plugins/modules/cloud/rackspace/rax_identity.py new file mode 100644 index 0000000000..d9a39dc6cd --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_identity.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_identity +short_description: Load Rackspace Cloud Identity +description: + - Verifies Rackspace Cloud credentials and returns identity information +options: + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + required: false +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Load Rackspace Cloud Identity + gather_facts: False + hosts: local + connection: local + tasks: + - name: Load Identity + local_action: + module: rax_identity + credentials: ~/.raxpub + region: DFW + register: rackspace_identity +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict, + setup_rax_module) + + +def cloud_identity(module, state, identity): + instance = dict( + authenticated=identity.authenticated, + credentials=identity._creds_file + ) + changed = False + + instance.update(rax_to_dict(identity)) + instance['services'] = instance.get('services', {}).keys() + + if state == 'present': + if not identity.authenticated: + module.fail_json(msg='Credentials could not be verified!') + + module.exit_json(changed=changed, identity=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + if not pyrax.identity: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + cloud_identity(module, state, pyrax.identity) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_keypair.py b/plugins/modules/cloud/rackspace/rax_keypair.py new file mode 100644 index 0000000000..90d1be93d9 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_keypair.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_keypair +short_description: Create a keypair for use with Rackspace Cloud Servers +description: + - Create a keypair for use with Rackspace Cloud Servers +options: + name: + description: + - Name of keypair + required: true + public_key: + description: + - Public Key string to upload. Can be a file path or string + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present +author: "Matt Martz (@sivel)" +notes: + - Keypairs cannot be manipulated, only created and deleted. To "update" a + keypair you must first delete and then recreate. + - The ability to specify a file path for the public key was added in 1.7 +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Create a keypair + hosts: localhost + gather_facts: False + tasks: + - name: keypair request + local_action: + module: rax_keypair + credentials: ~/.raxpub + name: my_keypair + region: DFW + register: keypair + - name: Create local public key + local_action: + module: copy + content: "{{ keypair.keypair.public_key }}" + dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" + - name: Create local private key + local_action: + module: copy + content: "{{ keypair.keypair.private_key }}" + dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" + +- name: Create a keypair + hosts: localhost + gather_facts: False + tasks: + - name: keypair request + local_action: + module: rax_keypair + credentials: ~/.raxpub + name: my_keypair + public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" + region: DFW + register: keypair +''' +import os + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_keypair(module, name, public_key, state): + changed = False + + cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + keypair = {} + + if state == 'present': + if public_key and os.path.isfile(public_key): + try: + f = open(public_key) + public_key = f.read() + f.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % public_key) + + try: + keypair = cs.keypairs.find(name=name) + except cs.exceptions.NotFound: + try: + keypair = cs.keypairs.create(name, public_key) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + keypair = cs.keypairs.find(name=name) + except Exception: + pass + + if keypair: + try: + keypair.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + public_key=dict(), + state=dict(default='present', choices=['absent', 'present']), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + name = module.params.get('name') + public_key = module.params.get('public_key') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + rax_keypair(module, name, public_key, state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_meta.py b/plugins/modules/cloud/rackspace/rax_meta.py new file mode 100644 index 0000000000..dbc0849981 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_meta.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_meta +short_description: Manipulate metadata for Rackspace Cloud Servers +description: + - Manipulate metadata for Rackspace Cloud Servers +options: + address: + description: + - Server IP address to modify metadata for, will match any IP assigned to + the server + id: + description: + - Server ID to modify metadata for + name: + description: + - Server name to modify metadata for + meta: + description: + - A hash of metadata to associate with the instance +author: "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Set metadata for a server + hosts: all + gather_facts: False + tasks: + - name: Set metadata + local_action: + module: rax_meta + credentials: ~/.raxpub + name: "{{ inventory_hostname }}" + region: DFW + meta: + group: primary_group + groups: + - group_two + - group_three + app: my_app + + - name: Clear metadata + local_action: + module: rax_meta + credentials: ~/.raxpub + name: "{{ inventory_hostname }}" + region: DFW +''' + +import json + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module +from ansible.module_utils.six import string_types + + +def rax_meta(module, address, name, server_id, meta): + changed = False + + cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + search_opts = {} + if name: + search_opts = dict(name='^%s$' % name) + try: + servers = cs.servers.list(search_opts=search_opts) + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif address: + servers = [] + try: + for server in cs.servers.list(): + for addresses in server.networks.values(): + if address in addresses: + servers.append(server) + break + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif server_id: + servers = [] + try: + servers.append(cs.servers.get(server_id)) + except Exception as e: + pass + + if len(servers) > 1: + module.fail_json(msg='Multiple servers found matching provided ' + 'search parameters') + elif not servers: + module.fail_json(msg='Failed to find a server matching provided ' + 'search parameters') + + # Normalize and ensure all metadata values are strings + for k, v in meta.items(): + if isinstance(v, list): + meta[k] = ','.join(['%s' % i for i in v]) + elif isinstance(v, dict): + meta[k] = json.dumps(v) + elif not isinstance(v, string_types): + meta[k] = '%s' % v + + server = servers[0] + if server.metadata == meta: + changed = False + else: + changed = True + removed = set(server.metadata.keys()).difference(meta.keys()) + cs.servers.delete_meta(server, list(removed)) + cs.servers.set_meta(server, meta) + server.get() + + module.exit_json(changed=changed, meta=server.metadata) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + address=dict(), + id=dict(), + name=dict(), + meta=dict(type='dict', default=dict()), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[['address', 'id', 'name']], + required_one_of=[['address', 'id', 'name']], + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + address = module.params.get('address') + server_id = module.params.get('id') + name = module.params.get('name') + meta = module.params.get('meta') + + setup_rax_module(module, pyrax) + + rax_meta(module, address, name, server_id, meta) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/plugins/modules/cloud/rackspace/rax_mon_alarm.py new file mode 100644 index 0000000000..9ccfa09dec --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_mon_alarm.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_mon_alarm +short_description: Create or delete a Rackspace Cloud Monitoring alarm. +description: +- Create or delete a Rackspace Cloud Monitoring alarm that associates an + existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with + criteria that specify what conditions will trigger which levels of + notifications. Rackspace monitoring module flow | rax_mon_entity -> + rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> + *rax_mon_alarm* +options: + state: + description: + - Ensure that the alarm with this C(label) exists or does not exist. + choices: [ "present", "absent" ] + required: false + default: present + label: + description: + - Friendly name for this alarm, used to achieve idempotence. Must be a String + between 1 and 255 characters long. + required: true + entity_id: + description: + - ID of the entity this alarm is attached to. May be acquired by registering + the value of a rax_mon_entity task. + required: true + check_id: + description: + - ID of the check that should be alerted on. May be acquired by registering + the value of a rax_mon_check task. + required: true + notification_plan_id: + description: + - ID of the notification plan to trigger if this alarm fires. May be acquired + by registering the value of a rax_mon_notification_plan task. + required: true + criteria: + description: + - Alarm DSL that describes alerting conditions and their output states. Must + be between 1 and 16384 characters long. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html + for a reference on the alerting language. + disabled: + description: + - If yes, create this alarm, but leave it in an inactive state. Defaults to + no. + type: bool + metadata: + description: + - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String + keys and values between 1 and 255 characters long. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Alarm example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure that a specific alarm exists. + rax_mon_alarm: + credentials: ~/.rax_pub + state: present + label: uhoh + entity_id: "{{ the_entity['entity']['id'] }}" + check_id: "{{ the_check['check']['id'] }}" + notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" + criteria: > + if (rate(metric['average']) > 10) { + return new AlarmStatus(WARNING); + } + return new AlarmStatus(OK); + register: the_alarm +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, + disabled, metadata): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + if criteria and len(criteria) < 1 or len(criteria) > 16384: + module.fail_json(msg='criteria must be between 1 and 16384 characters long') + + # Coerce attributes. + + changed = False + alarm = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [a for a in cm.list_alarms(entity_id) if a.label == label] + + if existing: + alarm = existing[0] + + if state == 'present': + should_create = False + should_update = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s existing alarms have the label %s.' % + (len(existing), label)) + + if alarm: + if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: + should_delete = should_create = True + + should_update = (disabled and disabled != alarm.disabled) or \ + (metadata and metadata != alarm.metadata) or \ + (criteria and criteria != alarm.criteria) + + if should_update and not should_delete: + cm.update_alarm(entity=entity_id, alarm=alarm, + criteria=criteria, disabled=disabled, + label=label, metadata=metadata) + changed = True + + if should_delete: + alarm.delete() + changed = True + else: + should_create = True + + if should_create: + alarm = cm.create_alarm(entity=entity_id, check=check_id, + notification_plan=notification_plan_id, + criteria=criteria, disabled=disabled, label=label, + metadata=metadata) + changed = True + else: + for a in existing: + a.delete() + changed = True + + if alarm: + alarm_dict = { + "id": alarm.id, + "label": alarm.label, + "check_id": alarm.check_id, + "notification_plan_id": alarm.notification_plan_id, + "criteria": alarm.criteria, + "disabled": alarm.disabled, + "metadata": alarm.metadata + } + module.exit_json(changed=changed, alarm=alarm_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + entity_id=dict(required=True), + check_id=dict(required=True), + notification_plan_id=dict(required=True), + criteria=dict(), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + label = module.params.get('label') + entity_id = module.params.get('entity_id') + check_id = module.params.get('check_id') + notification_plan_id = module.params.get('notification_plan_id') + criteria = module.params.get('criteria') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + alarm(module, state, label, entity_id, check_id, notification_plan_id, + criteria, disabled, metadata) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_check.py b/plugins/modules/cloud/rackspace/rax_mon_check.py new file mode 100644 index 0000000000..331fb994eb --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_mon_check.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_mon_check +short_description: Create or delete a Rackspace Cloud Monitoring check for an + existing entity. +description: +- Create or delete a Rackspace Cloud Monitoring check associated with an + existing rax_mon_entity. A check is a specific test or measurement that is + performed, possibly from different monitoring zones, on the systems you + monitor. Rackspace monitoring module flow | rax_mon_entity -> + *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> + rax_mon_alarm +options: + state: + description: + - Ensure that a check with this C(label) exists or does not exist. + choices: ["present", "absent"] + entity_id: + description: + - ID of the rax_mon_entity to target with this check. + required: true + label: + description: + - Defines a label for this check, between 1 and 64 characters long. + required: true + check_type: + description: + - The type of check to create. C(remote.) checks may be created on any + rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities + that have a non-null C(agent_id). + choices: + - remote.dns + - remote.ftp-banner + - remote.http + - remote.imap-banner + - remote.mssql-banner + - remote.mysql-banner + - remote.ping + - remote.pop3-banner + - remote.postgresql-banner + - remote.smtp-banner + - remote.smtp + - remote.ssh + - remote.tcp + - remote.telnet-banner + - agent.filesystem + - agent.memory + - agent.load_average + - agent.cpu + - agent.disk + - agent.network + - agent.plugin + required: true + monitoring_zones_poll: + description: + - Comma-separated list of the names of the monitoring zones the check should + run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, + mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. + target_hostname: + description: + - One of `target_hostname` and `target_alias` is required for remote.* checks, + but prohibited for agent.* checks. The hostname this check should target. + Must be a valid IPv4, IPv6, or FQDN. + target_alias: + description: + - One of `target_alias` and `target_hostname` is required for remote.* checks, + but prohibited for agent.* checks. Use the corresponding key in the entity's + `ip_addresses` hash to resolve an IP address to target. + details: + description: + - Additional details specific to the check type. Must be a hash of strings + between 1 and 255 characters long, or an array or object containing 0 to + 256 items. + disabled: + description: + - If "yes", ensure the check is created, but don't actually use it yet. + type: bool + metadata: + description: + - Hash of arbitrary key-value pairs to accompany this check if it fires. + Keys and values must be strings between 1 and 255 characters long. + period: + description: + - The number of seconds between each time the check is performed. Must be + greater than the minimum period set on your account. + timeout: + description: + - The number of seconds this check will wait when attempting to collect + results. Must be less than the period. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Create a monitoring check + gather_facts: False + hosts: local + connection: local + tasks: + - name: Associate a check with an existing entity. + rax_mon_check: + credentials: ~/.rax_pub + state: present + entity_id: "{{ the_entity['entity']['id'] }}" + label: the_check + check_type: remote.ping + monitoring_zones_poll: mziad,mzord,mzdfw + details: + count: 10 + meta: + hurf: durf + register: the_check +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout): + + # Coerce attributes. + + if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): + monitoring_zones_poll = [monitoring_zones_poll] + + if period: + period = int(period) + + if timeout: + timeout = int(timeout) + + changed = False + check = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + entity = cm.get_entity(entity_id) + if not entity: + module.fail_json(msg='Failed to instantiate entity. "%s" may not be' + ' a valid entity id.' % entity_id) + + existing = [e for e in entity.list_checks() if e.label == label] + + if existing: + check = existing[0] + + if state == 'present': + if len(existing) > 1: + module.fail_json(msg='%s existing checks have a label of %s.' % + (len(existing), label)) + + should_delete = False + should_create = False + should_update = False + + if check: + # Details may include keys set to default values that are not + # included in the initial creation. + # + # Only force a recreation of the check if one of the *specified* + # keys is missing or has a different value. + if details: + for (key, value) in details.items(): + if key not in check.details: + should_delete = should_create = True + elif value != check.details[key]: + should_delete = should_create = True + + should_update = label != check.label or \ + (target_hostname and target_hostname != check.target_hostname) or \ + (target_alias and target_alias != check.target_alias) or \ + (disabled != check.disabled) or \ + (metadata and metadata != check.metadata) or \ + (period and period != check.period) or \ + (timeout and timeout != check.timeout) or \ + (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) + + if should_update and not should_delete: + check.update(label=label, + disabled=disabled, + metadata=metadata, + monitoring_zones_poll=monitoring_zones_poll, + timeout=timeout, + period=period, + target_alias=target_alias, + target_hostname=target_hostname) + changed = True + else: + # The check doesn't exist yet. + should_create = True + + if should_delete: + check.delete() + + if should_create: + check = cm.create_check(entity, + label=label, + check_type=check_type, + target_hostname=target_hostname, + target_alias=target_alias, + monitoring_zones_poll=monitoring_zones_poll, + details=details, + disabled=disabled, + metadata=metadata, + period=period, + timeout=timeout) + changed = True + elif state == 'absent': + if check: + check.delete() + changed = True + else: + module.fail_json(msg='state must be either present or absent.') + + if check: + check_dict = { + "id": check.id, + "label": check.label, + "type": check.type, + "target_hostname": check.target_hostname, + "target_alias": check.target_alias, + "monitoring_zones_poll": check.monitoring_zones_poll, + "details": check.details, + "disabled": check.disabled, + "metadata": check.metadata, + "period": check.period, + "timeout": check.timeout + } + module.exit_json(changed=changed, check=check_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + entity_id=dict(required=True), + label=dict(required=True), + check_type=dict(required=True), + monitoring_zones_poll=dict(), + target_hostname=dict(), + target_alias=dict(), + details=dict(type='dict', default={}), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict', default={}), + period=dict(type='int'), + timeout=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + entity_id = module.params.get('entity_id') + label = module.params.get('label') + check_type = module.params.get('check_type') + monitoring_zones_poll = module.params.get('monitoring_zones_poll') + target_hostname = module.params.get('target_hostname') + target_alias = module.params.get('target_alias') + details = module.params.get('details') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + period = module.params.get('period') + timeout = module.params.get('timeout') + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_entity.py b/plugins/modules/cloud/rackspace/rax_mon_entity.py new file mode 100644 index 0000000000..759c3fbd3b --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_mon_entity.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_mon_entity +short_description: Create or delete a Rackspace Cloud Monitoring entity +description: +- Create or delete a Rackspace Cloud Monitoring entity, which represents a device + to monitor. Entities associate checks and alarms with a target system and + provide a convenient, centralized place to store IP addresses. Rackspace + monitoring module flow | *rax_mon_entity* -> rax_mon_check -> + rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm +options: + label: + description: + - Defines a name for this entity. Must be a non-empty string between 1 and + 255 characters long. + required: true + state: + description: + - Ensure that an entity with this C(name) exists or does not exist. + choices: ["present", "absent"] + agent_id: + description: + - Rackspace monitoring agent on the target device to which this entity is + bound. Necessary to collect C(agent.) rax_mon_checks against this entity. + named_ip_addresses: + description: + - Hash of IP addresses that may be referenced by name by rax_mon_checks + added to this entity. Must be a dictionary of with keys that are names + between 1 and 64 characters long, and values that are valid IPv4 or IPv6 + addresses. + metadata: + description: + - Hash of arbitrary C(name), C(value) pairs that are passed to associated + rax_mon_alarms. Names and values must all be between 1 and 255 characters + long. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Entity example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Ensure an entity exists + rax_mon_entity: + credentials: ~/.rax_pub + state: present + label: my_entity + named_ip_addresses: + web_box: 192.0.2.4 + db_box: 192.0.2.5 + meta: + hurf: durf + register: the_entity +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, + metadata): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for entity in cm.list_entities(): + if label == entity.label: + existing.append(entity) + + entity = None + + if existing: + entity = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing entities have the label %s.' % + (len(existing), label)) + + if entity: + if named_ip_addresses and named_ip_addresses != entity.ip_addresses: + should_delete = should_create = True + + # Change an existing Entity, unless there's nothing to do. + should_update = agent_id and agent_id != entity.agent_id or \ + (metadata and metadata != entity.metadata) + + if should_update and not should_delete: + entity.update(agent_id, metadata) + changed = True + + if should_delete: + entity.delete() + else: + should_create = True + + if should_create: + # Create a new Entity. + entity = cm.create_entity(label=label, agent=agent_id, + ip_addresses=named_ip_addresses, + metadata=metadata) + changed = True + else: + # Delete the existing Entities. + for e in existing: + e.delete() + changed = True + + if entity: + entity_dict = { + "id": entity.id, + "name": entity.name, + "agent_id": entity.agent_id, + } + module.exit_json(changed=changed, entity=entity_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + agent_id=dict(), + named_ip_addresses=dict(type='dict', default={}), + metadata=dict(type='dict', default={}) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + agent_id = module.params.get('agent_id') + named_ip_addresses = module.params.get('named_ip_addresses') + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification.py b/plugins/modules/cloud/rackspace/rax_mon_notification.py new file mode 100644 index 0000000000..f8d67ac60d --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_mon_notification.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_mon_notification +short_description: Create or delete a Rackspace Cloud Monitoring notification. +description: +- Create or delete a Rackspace Cloud Monitoring notification that specifies a + channel that can be used to communicate alarms, such as email, webhooks, or + PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> + *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm +options: + state: + description: + - Ensure that the notification with this C(label) exists or does not exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification. String between 1 and 255 + characters long. + required: true + notification_type: + description: + - A supported notification type. + choices: ["webhook", "email", "pagerduty"] + required: true + details: + description: + - Dictionary of key-value pairs used to initialize the notification. + Required keys and meanings vary with notification type. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ + service-notification-types-crud.html for details. + required: true +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Monitoring notification example + gather_facts: False + hosts: local + connection: local + tasks: + - name: Email me when something goes wrong. + rax_mon_entity: + credentials: ~/.rax_pub + label: omg + type: email + details: + address: me@mailhost.com + register: the_notification +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def notification(module, state, label, notification_type, details): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notifications(): + if n.label == label: + existing.append(n) + + if existing: + notification = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing notifications are labelled %s.' % + (len(existing), label)) + + if notification: + should_delete = (notification_type != notification.type) + + should_update = (details != notification.details) + + if should_update and not should_delete: + notification.update(details=notification.details) + changed = True + + if should_delete: + notification.delete() + else: + should_create = True + + if should_create: + notification = cm.create_notification(notification_type, + label=label, details=details) + changed = True + else: + for n in existing: + n.delete() + changed = True + + if notification: + notification_dict = { + "id": notification.id, + "type": notification.type, + "label": notification.label, + "details": notification.details + } + module.exit_json(changed=changed, notification=notification_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), + details=dict(required=True, type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + notification_type = module.params.get('notification_type') + details = module.params.get('details') + + setup_rax_module(module, pyrax) + + notification(module, state, label, notification_type, details) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py new file mode 100644 index 0000000000..d44535f4f3 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_mon_notification_plan +short_description: Create or delete a Rackspace Cloud Monitoring notification + plan. +description: +- Create or delete a Rackspace Cloud Monitoring notification plan by + associating existing rax_mon_notifications with severity levels. Rackspace + monitoring module flow | rax_mon_entity -> rax_mon_check -> + rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm +options: + state: + description: + - Ensure that the notification plan with this C(label) exists or does not + exist. + choices: ['present', 'absent'] + label: + description: + - Defines a friendly name for this notification plan. String between 1 and + 255 characters long. + required: true + critical_state: + description: + - Notification list to use when the alarm state is CRITICAL. Must be an + array of valid rax_mon_notification ids. + warning_state: + description: + - Notification list to use when the alarm state is WARNING. Must be an array + of valid rax_mon_notification ids. + ok_state: + description: + - Notification list to use when the alarm state is OK. Must be an array of + valid rax_mon_notification ids. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Example notification plan + gather_facts: False + hosts: local + connection: local + tasks: + - name: Establish who gets called when. + rax_mon_notification_plan: + credentials: ~/.rax_pub + state: present + label: defcon1 + critical_state: + - "{{ everyone['notification']['id'] }}" + warning_state: + - "{{ opsfloor['notification']['id'] }}" + register: defcon1 +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def notification_plan(module, state, label, critical_state, warning_state, ok_state): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification_plan = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notification_plans(): + if n.label == label: + existing.append(n) + + if existing: + notification_plan = existing[0] + + if state == 'present': + should_create = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s notification plans are labelled %s.' % + (len(existing), label)) + + if notification_plan: + should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ + (warning_state and warning_state != notification_plan.warning_state) or \ + (ok_state and ok_state != notification_plan.ok_state) + + if should_delete: + notification_plan.delete() + should_create = True + else: + should_create = True + + if should_create: + notification_plan = cm.create_notification_plan(label=label, + critical_state=critical_state, + warning_state=warning_state, + ok_state=ok_state) + changed = True + else: + for np in existing: + np.delete() + changed = True + + if notification_plan: + notification_plan_dict = { + "id": notification_plan.id, + "critical_state": notification_plan.critical_state, + "warning_state": notification_plan.warning_state, + "ok_state": notification_plan.ok_state, + "metadata": notification_plan.metadata + } + module.exit_json(changed=changed, notification_plan=notification_plan_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + critical_state=dict(type='list'), + warning_state=dict(type='list'), + ok_state=dict(type='list') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + critical_state = module.params.get('critical_state') + warning_state = module.params.get('warning_state') + ok_state = module.params.get('ok_state') + + setup_rax_module(module, pyrax) + + notification_plan(module, state, label, critical_state, warning_state, ok_state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_network.py b/plugins/modules/cloud/rackspace/rax_network.py new file mode 100644 index 0000000000..4a37705c2d --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_network.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_network +short_description: create / delete an isolated network in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud isolated network. +options: + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + label: + description: + - Label (name) to give the network + cidr: + description: + - cidr of the network being created +author: + - "Christopher H. Laco (@claco)" + - "Jesse Keating (@omgjlk)" +extends_documentation_fragment: +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build an Isolated Network + gather_facts: False + + tasks: + - name: Network create request + local_action: + module: rax_network + credentials: ~/.raxpub + label: my-net + cidr: 192.168.3.0/24 + state: present +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_network(module, state, label, cidr): + changed = False + network = None + networks = [] + + if not pyrax.cloud_networks: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present': + if not cidr: + module.fail_json(msg='missing required arguments: cidr') + + try: + network = pyrax.cloud_networks.find_network_by_label(label) + except pyrax.exceptions.NetworkNotFound: + try: + network = pyrax.cloud_networks.create(label, cidr=cidr) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + network = pyrax.cloud_networks.find_network_by_label(label) + network.delete() + changed = True + except pyrax.exceptions.NetworkNotFound: + pass + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if network: + instance = dict(id=network.id, + label=network.label, + cidr=network.cidr) + networks.append(instance) + + module.exit_json(changed=changed, networks=networks) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', + choices=['present', 'absent']), + label=dict(required=True), + cidr=dict() + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + label = module.params.get('label') + cidr = module.params.get('cidr') + + setup_rax_module(module, pyrax) + + cloud_network(module, state, label, cidr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_queue.py b/plugins/modules/cloud/rackspace/rax_queue.py new file mode 100644 index 0000000000..cd0d9c7446 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_queue.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_queue +short_description: create / delete a queue in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud queue. +options: + name: + description: + - Name to give the queue + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +- name: Build a Queue + gather_facts: False + hosts: local + connection: local + tasks: + - name: Queue create request + local_action: + module: rax_queue + credentials: ~/.raxpub + name: my-queue + region: DFW + state: present + register: my_queue +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_queue(module, state, name): + for arg in (state, name): + if not arg: + module.fail_json(msg='%s is required for rax_queue' % arg) + + changed = False + queues = [] + instance = {} + + cq = pyrax.queues + if not cq: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + for queue in cq.list(): + if name != queue.name: + continue + + queues.append(queue) + + if len(queues) > 1: + module.fail_json(msg='Multiple Queues were matched by name') + + if state == 'present': + if not queues: + try: + queue = cq.create(name) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + queue = queues[0] + + instance = dict(name=queue.name) + result = dict(changed=changed, queue=instance) + module.exit_json(**result) + + elif state == 'absent': + if queues: + queue = queues[0] + try: + queue.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, queue=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + name=dict(), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + name = module.params.get('name') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_queue(module, state, name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_scaling_group.py b/plugins/modules/cloud/rackspace/rax_scaling_group.py new file mode 100644 index 0000000000..d7fb527eb2 --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_scaling_group.py @@ -0,0 +1,426 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_scaling_group +short_description: Manipulate Rackspace Cloud Autoscale Groups +description: + - Manipulate Rackspace Cloud Autoscale Groups +options: + config_drive: + description: + - Attach read-only configuration drive to server as label config-2 + type: bool + default: 'no' + cooldown: + description: + - The period of time, in seconds, that must pass before any scaling can + occur after the previous scaling. Must be an integer between 0 and + 86400 (24 hrs). + disk_config: + description: + - Disk partitioning strategy + choices: + - auto + - manual + default: auto + files: + description: + - 'Files to insert into the instance. Hash of C(remotepath: localpath)' + flavor: + description: + - flavor to use for the instance + required: true + image: + description: + - image to use for the instance. Can be an C(id), C(human_id) or C(name) + required: true + key_name: + description: + - key pair to use on the instance + loadbalancers: + description: + - List of load balancer C(id) and C(port) hashes + max_entities: + description: + - The maximum number of entities that are allowed in the scaling group. + Must be an integer between 0 and 1000. + required: true + meta: + description: + - A hash of metadata to associate with the instance + min_entities: + description: + - The minimum number of entities that are allowed in the scaling group. + Must be an integer between 0 and 1000. + required: true + name: + description: + - Name to give the scaling group + required: true + networks: + description: + - The network to attach to the instances. If specified, you must include + ALL networks including the public and private interfaces. Can be C(id) + or C(label). + default: + - public + - private + server_name: + description: + - The base name for servers created by Autoscale + required: true + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + user_data: + description: + - Data to be uploaded to the servers config drive. This option implies + I(config_drive). Can be a file path or a string + wait: + description: + - wait for the scaling group to finish provisioning the minimum amount of + servers + type: bool + default: 'no' + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +author: "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: false + connection: local + tasks: + - rax_scaling_group: + credentials: ~/.raxpub + region: ORD + cooldown: 300 + flavor: performance1-1 + image: bb02b1a3-bc77-4d17-ab5b-421d89850fca + min_entities: 5 + max_entities: 10 + name: ASG Test + server_name: asgtest + loadbalancers: + - id: 228385 + port: 80 + register: asg +''' + +import base64 +import json +import os +import time + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network, + rax_required_together, rax_to_dict, setup_rax_module) +from ansible.module_utils.six import string_types + + +def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, + image=None, key_name=None, loadbalancers=None, meta=None, + min_entities=0, max_entities=0, name=None, networks=None, + server_name=None, state='present', user_data=None, + config_drive=False, wait=True, wait_timeout=300): + files = {} if files is None else files + loadbalancers = [] if loadbalancers is None else loadbalancers + meta = {} if meta is None else meta + networks = [] if networks is None else networks + + changed = False + + au = pyrax.autoscale + if not au: + module.fail_json(msg='Failed to instantiate clients. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if user_data: + config_drive = True + + if user_data and os.path.isfile(user_data): + try: + f = open(user_data) + user_data = f.read() + f.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % user_data) + + if state == 'present': + # Normalize and ensure all metadata values are strings + if meta: + for k, v in meta.items(): + if isinstance(v, list): + meta[k] = ','.join(['%s' % i for i in v]) + elif isinstance(v, dict): + meta[k] = json.dumps(v) + elif not isinstance(v, string_types): + meta[k] = '%s' % v + + if image: + image = rax_find_image(module, pyrax, image) + + nics = [] + if networks: + for network in networks: + nics.extend(rax_find_network(module, pyrax, network)) + + for nic in nics: + # pyrax is currently returning net-id, but we need uuid + # this check makes this forward compatible for a time when + # pyrax uses uuid instead + if nic.get('net-id'): + nic.update(uuid=nic['net-id']) + del nic['net-id'] + + # Handle the file contents + personality = [] + if files: + for rpath in files.keys(): + lpath = os.path.expanduser(files[rpath]) + try: + f = open(lpath, 'r') + personality.append({ + 'path': rpath, + 'contents': f.read() + }) + f.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % lpath) + + lbs = [] + if loadbalancers: + for lb in loadbalancers: + try: + lb_id = int(lb.get('id')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer ID is not an integer: ' + '%s' % lb.get('id')) + try: + port = int(lb.get('port')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer port is not an ' + 'integer: %s' % lb.get('port')) + if not lb_id or not port: + continue + lbs.append((lb_id, port)) + + try: + sg = au.find(name=name) + except pyrax.exceptions.NoUniqueMatch as e: + module.fail_json(msg='%s' % e.message) + except pyrax.exceptions.NotFound: + try: + sg = au.create(name, cooldown=cooldown, + min_entities=min_entities, + max_entities=max_entities, + launch_config_type='launch_server', + server_name=server_name, image=image, + flavor=flavor, disk_config=disk_config, + metadata=meta, personality=personality, + networks=nics, load_balancers=lbs, + key_name=key_name, config_drive=config_drive, + user_data=user_data) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if not changed: + # Scaling Group Updates + group_args = {} + if cooldown != sg.cooldown: + group_args['cooldown'] = cooldown + + if min_entities != sg.min_entities: + group_args['min_entities'] = min_entities + + if max_entities != sg.max_entities: + group_args['max_entities'] = max_entities + + if group_args: + changed = True + sg.update(**group_args) + + # Launch Configuration Updates + lc = sg.get_launch_config() + lc_args = {} + if server_name != lc.get('name'): + lc_args['server_name'] = server_name + + if image != lc.get('image'): + lc_args['image'] = image + + if flavor != lc.get('flavor'): + lc_args['flavor'] = flavor + + disk_config = disk_config or 'AUTO' + if ((disk_config or lc.get('disk_config')) and + disk_config != lc.get('disk_config', 'AUTO')): + lc_args['disk_config'] = disk_config + + if (meta or lc.get('meta')) and meta != lc.get('metadata'): + lc_args['metadata'] = meta + + test_personality = [] + for p in personality: + test_personality.append({ + 'path': p['path'], + 'contents': base64.b64encode(p['contents']) + }) + if ((test_personality or lc.get('personality')) and + test_personality != lc.get('personality')): + lc_args['personality'] = personality + + if nics != lc.get('networks'): + lc_args['networks'] = nics + + if lbs != lc.get('load_balancers'): + # Work around for https://github.com/rackspace/pyrax/pull/393 + lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) + + if key_name != lc.get('key_name'): + lc_args['key_name'] = key_name + + if config_drive != lc.get('config_drive', False): + lc_args['config_drive'] = config_drive + + if (user_data and + base64.b64encode(user_data) != lc.get('user_data')): + lc_args['user_data'] = user_data + + if lc_args: + # Work around for https://github.com/rackspace/pyrax/pull/389 + if 'flavor' not in lc_args: + lc_args['flavor'] = lc.get('flavor') + changed = True + sg.update_launch_config(**lc_args) + + sg.get() + + if wait: + end_time = time.time() + wait_timeout + infinite = wait_timeout == 0 + while infinite or time.time() < end_time: + state = sg.get_state() + if state["pending_capacity"] == 0: + break + + time.sleep(5) + + module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) + + else: + try: + sg = au.find(name=name) + sg.delete() + changed = True + except pyrax.exceptions.NotFound as e: + sg = {} + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + config_drive=dict(default=False, type='bool'), + cooldown=dict(type='int', default=300), + disk_config=dict(choices=['auto', 'manual']), + files=dict(type='dict', default={}), + flavor=dict(required=True), + image=dict(required=True), + key_name=dict(), + loadbalancers=dict(type='list'), + meta=dict(type='dict', default={}), + min_entities=dict(type='int', required=True), + max_entities=dict(type='int', required=True), + name=dict(required=True), + networks=dict(type='list', default=['public', 'private']), + server_name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + user_data=dict(no_log=True), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=300, type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + config_drive = module.params.get('config_drive') + cooldown = module.params.get('cooldown') + disk_config = module.params.get('disk_config') + if disk_config: + disk_config = disk_config.upper() + files = module.params.get('files') + flavor = module.params.get('flavor') + image = module.params.get('image') + key_name = module.params.get('key_name') + loadbalancers = module.params.get('loadbalancers') + meta = module.params.get('meta') + min_entities = module.params.get('min_entities') + max_entities = module.params.get('max_entities') + name = module.params.get('name') + networks = module.params.get('networks') + server_name = module.params.get('server_name') + state = module.params.get('state') + user_data = module.params.get('user_data') + + if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: + module.fail_json(msg='min_entities and max_entities must be an ' + 'integer between 0 and 1000') + + if not 0 <= cooldown <= 86400: + module.fail_json(msg='cooldown must be an integer between 0 and 86400') + + setup_rax_module(module, pyrax) + + rax_asg(module, cooldown=cooldown, disk_config=disk_config, + files=files, flavor=flavor, image=image, meta=meta, + key_name=key_name, loadbalancers=loadbalancers, + min_entities=min_entities, max_entities=max_entities, + name=name, networks=networks, server_name=server_name, + state=state, config_drive=config_drive, user_data=user_data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/plugins/modules/cloud/rackspace/rax_scaling_policy.py new file mode 100644 index 0000000000..562facb77f --- /dev/null +++ b/plugins/modules/cloud/rackspace/rax_scaling_policy.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rax_scaling_policy +short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy +description: + - Manipulate Rackspace Cloud Autoscale Scaling Policy +options: + at: + description: + - The UTC time when this policy will be executed. The time must be + formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as + C(2013-05-19T08:07:08Z) + change: + description: + - The change, either as a number of servers or as a percentage, to make + in the scaling group. If this is a percentage, you must set + I(is_percent) to C(true) also. + cron: + description: + - The time when the policy will be executed, as a cron entry. For + example, if this is parameter is set to C(1 0 * * *) + cooldown: + description: + - The period of time, in seconds, that must pass before any scaling can + occur after the previous scaling. Must be an integer between 0 and + 86400 (24 hrs). + desired_capacity: + description: + - The desired server capacity of the scaling the group; that is, how + many servers should be in the scaling group. + is_percent: + description: + - Whether the value in I(change) is a percent value + default: false + type: bool + name: + description: + - Name to give the policy + required: true + policy_type: + description: + - The type of policy that will be executed for the current release. + choices: + - webhook + - schedule + required: true + scaling_group: + description: + - Name of the scaling group that this policy will be added to + required: true + state: + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present +author: "Matt Martz (@sivel)" +extends_documentation_fragment: +- community.general.rackspace +- community.general.rackspace.openstack + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: false + connection: local + tasks: + - rax_scaling_policy: + credentials: ~/.raxpub + region: ORD + at: '2013-05-19T08:07:08Z' + change: 25 + cooldown: 300 + is_percent: true + name: ASG Test Policy - at + policy_type: schedule + scaling_group: ASG Test + register: asps_at + + - rax_scaling_policy: + credentials: ~/.raxpub + region: ORD + cron: '1 0 * * *' + change: 25 + cooldown: 300 + is_percent: true + name: ASG Test Policy - cron + policy_type: schedule + scaling_group: ASG Test + register: asp_cron + + - rax_scaling_policy: + credentials: ~/.raxpub + region: ORD + cooldown: 300 + desired_capacity: 5 + name: ASG Test Policy - webhook + policy_type: webhook + scaling_group: ASG Test + register: asp_webhook +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict, + setup_rax_module) + + +def rax_asp(module, at=None, change=0, cron=None, cooldown=300, + desired_capacity=0, is_percent=False, name=None, + policy_type=None, scaling_group=None, state='present'): + changed = False + + au = pyrax.autoscale + if not au: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + try: + UUID(scaling_group) + except ValueError: + try: + sg = au.find(name=scaling_group) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + try: + sg = au.get(scaling_group) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if state == 'present': + policies = filter(lambda p: name == p.name, sg.list_policies()) + if len(policies) > 1: + module.fail_json(msg='No unique policy match found by name') + if at: + args = dict(at=at) + elif cron: + args = dict(cron=cron) + else: + args = None + + if not policies: + try: + policy = sg.add_policy(name, policy_type=policy_type, + cooldown=cooldown, change=change, + is_percent=is_percent, + desired_capacity=desired_capacity, + args=args) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + else: + policy = policies[0] + kwargs = {} + if policy_type != policy.type: + kwargs['policy_type'] = policy_type + + if cooldown != policy.cooldown: + kwargs['cooldown'] = cooldown + + if hasattr(policy, 'change') and change != policy.change: + kwargs['change'] = change + + if hasattr(policy, 'changePercent') and is_percent is False: + kwargs['change'] = change + kwargs['is_percent'] = False + elif hasattr(policy, 'change') and is_percent is True: + kwargs['change'] = change + kwargs['is_percent'] = True + + if hasattr(policy, 'desiredCapacity') and change: + kwargs['change'] = change + elif ((hasattr(policy, 'change') or + hasattr(policy, 'changePercent')) and desired_capacity): + kwargs['desired_capacity'] = desired_capacity + + if hasattr(policy, 'args') and args != policy.args: + kwargs['args'] = args + + if kwargs: + policy.update(**kwargs) + changed = True + + policy.get() + + module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) + + else: + try: + policies = filter(lambda p: name == p.name, sg.list_policies()) + if len(policies) > 1: + module.fail_json(msg='No unique policy match found by name') + elif not policies: + policy = {} + else: + policy.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + at=dict(), + change=dict(type='int'), + cron=dict(), + cooldown=dict(type='int', default=300), + desired_capacity=dict(type='int'), + is_percent=dict(type='bool', default=False), + name=dict(required=True), + policy_type=dict(required=True, choices=['webhook', 'schedule']), + scaling_group=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[ + ['cron', 'at'], + ['change', 'desired_capacity'], + ] + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + at = module.params.get('at') + change = module.params.get('change') + cron = module.params.get('cron') + cooldown = module.params.get('cooldown') + desired_capacity = module.params.get('desired_capacity') + is_percent = module.params.get('is_percent') + name = module.params.get('name') + policy_type = module.params.get('policy_type') + scaling_group = module.params.get('scaling_group') + state = module.params.get('state') + + if (at or cron) and policy_type == 'webhook': + module.fail_json(msg='policy_type=schedule is required for a time ' + 'based policy') + + setup_rax_module(module, pyrax) + + rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, + desired_capacity=desired_capacity, is_percent=is_percent, + name=name, policy_type=policy_type, scaling_group=scaling_group, + state=state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_compute.py b/plugins/modules/cloud/scaleway/scaleway_compute.py new file mode 100644 index 0000000000..1874d2eed9 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_compute.py @@ -0,0 +1,660 @@ +#!/usr/bin/python +# +# Scaleway Compute management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: scaleway_compute +short_description: Scaleway compute management module +author: Remy Leone (@sieben) +description: + - "This module manages compute instances on Scaleway." +extends_documentation_fragment: +- community.general.scaleway + + +options: + + public_ip: + description: + - Manage public IP on a Scaleway server + - Could be Scaleway IP address UUID + - C(dynamic) Means that IP is destroyed at the same time the host is destroyed + - C(absent) Means no public IP at all + default: absent + + enable_ipv6: + description: + - Enable public IPv6 connectivity on the instance + default: false + type: bool + + image: + description: + - Image identifier used to start the instance with + required: true + + name: + description: + - Name of the instance + + organization: + description: + - Organization identifier + required: true + + state: + description: + - Indicate desired state of the instance. + default: present + choices: + - present + - absent + - running + - restarted + - stopped + + tags: + description: + - List of tags to apply to the instance (5 max) + required: false + default: [] + + region: + description: + - Scaleway compute zone + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + + commercial_type: + description: + - Commercial name of the compute node + required: true + + wait: + description: + - Wait for the instance to reach its desired state before returning. + type: bool + default: 'no' + + wait_timeout: + description: + - Time to wait for the server to reach the expected state + required: false + default: 300 + + wait_sleep_time: + description: + - Time to wait before every attempt to check the state of the server + required: false + default: 3 + + security_group: + description: + - Security group unique identifier + - If no value provided, the default security group or current security group will be used + required: false +''' + +EXAMPLES = ''' +- name: Create a server + scaleway_compute: + name: foobar + state: present + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + organization: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S + tags: + - test + - www + +- name: Create a server attached to a security group + scaleway_compute: + name: foobar + state: present + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + organization: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S + security_group: 4a31b633-118e-4900-bd52-facf1085fc8d + tags: + - test + - www + +- name: Destroy it right after + scaleway_compute: + name: foobar + state: absent + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + organization: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S +''' + +RETURN = ''' +''' + +import datetime +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import quote as urlquote +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway + +SCALEWAY_SERVER_STATES = ( + 'stopped', + 'stopping', + 'starting', + 'running', + 'locked' +) + +SCALEWAY_TRANSITIONS_STATES = ( + "stopping", + "starting", + "pending" +) + + +def check_image_id(compute_api, image_id): + response = compute_api.get(path="images/%s" % image_id) + + if not response.ok: + msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json) + compute_api.module.fail_json(msg=msg) + + +def fetch_state(compute_api, server): + compute_api.module.debug("fetch_state of server: %s" % server["id"]) + response = compute_api.get(path="servers/%s" % server["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"])) + return response.json["server"]["state"] + except KeyError: + compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json) + + +def wait_to_complete_state_transition(compute_api, server): + wait = compute_api.module.params["wait"] + if not wait: + return + wait_timeout = compute_api.module.params["wait_timeout"] + wait_sleep_time = compute_api.module.params["wait_sleep_time"] + + start = datetime.datetime.utcnow() + end = start + datetime.timedelta(seconds=wait_timeout) + while datetime.datetime.utcnow() < end: + compute_api.module.debug("We are going to wait for the server to finish its transition") + if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES: + compute_api.module.debug("It seems that the server is not in transition anymore.") + compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server)) + break + time.sleep(wait_sleep_time) + else: + compute_api.module.fail_json(msg="Server takes too long to finish its transition") + + +def public_ip_payload(compute_api, public_ip): + # We don't want a public ip + if public_ip in ("absent",): + return {"dynamic_ip_required": False} + + # IP is only attached to the instance and is released as soon as the instance terminates + if public_ip in ("dynamic", "allocated"): + return {"dynamic_ip_required": True} + + # We check that the IP we want to attach exists, if so its ID is returned + response = compute_api.get("ips") + if not response.ok: + msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + ip_list = [] + try: + ip_list = response.json["ips"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json) + + lookup = [ip["id"] for ip in ip_list] + if public_ip in lookup: + return {"public_ip": public_ip} + + +def create_server(compute_api, server): + compute_api.module.debug("Starting a create_server") + target_server = None + data = {"enable_ipv6": server["enable_ipv6"], + "tags": server["tags"], + "commercial_type": server["commercial_type"], + "image": server["image"], + "dynamic_ip_required": server["dynamic_ip_required"], + "name": server["name"], + "organization": server["organization"] + } + + if server["security_group"]: + data["security_group"] = server["security_group"] + + response = compute_api.post(path="servers", data=data) + + if not response.ok: + msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + target_server = response.json["server"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + return target_server + + +def restart_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="reboot") + + +def stop_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="poweroff") + + +def start_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="poweron") + + +def perform_action(compute_api, server, action): + response = compute_api.post(path="servers/%s/action" % server["id"], + data={"action": action}) + if not response.ok: + msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=server) + + return response + + +def remove_server(compute_api, server): + compute_api.module.debug("Starting remove server strategy") + response = compute_api.delete(path="servers/%s" % server["id"]) + if not response.ok: + msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=server) + + return response + + +def present_strategy(compute_api, wished_server): + compute_api.module.debug("Starting present strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + return changed, target_server + + +def absent_strategy(compute_api, wished_server): + compute_api.module.debug("Starting absent strategy") + changed = False + target_server = None + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + return changed, {"status": "Server already absent."} + else: + target_server = query_results[0] + + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be made absent." % target_server["id"]} + + # A server MUST be stopped to be deleted. + while fetch_state(compute_api=compute_api, server=target_server) != "stopped": + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + response = stop_server(compute_api=compute_api, server=target_server) + + if not response.ok: + err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=err_msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + response = remove_server(compute_api=compute_api, server=target_server) + + if not response.ok: + err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=err_msg) + + return changed, {"status": "Server %s deleted" % target_server["id"]} + + +def running_strategy(compute_api, wished_server): + compute_api.module.debug("Starting running strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being run."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + current_state = fetch_state(compute_api=compute_api, server=target_server) + if current_state not in ("running", "starting"): + compute_api.module.debug("running_strategy: Server in state: %s" % current_state) + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} + + response = start_server(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +def stop_strategy(compute_api, wished_server): + compute_api.module.debug("Starting stop strategy") + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + changed = False + + if not query_results: + + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being stopped."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + changed = True + else: + target_server = query_results[0] + + compute_api.module.debug("stop_strategy: Servers are found.") + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, { + "status": "Server %s attributes would be changed before stopping it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + current_state = fetch_state(compute_api=compute_api, server=target_server) + if current_state not in ("stopped",): + compute_api.module.debug("stop_strategy: Server in state: %s" % current_state) + + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be stopped." % target_server["id"]} + + response = stop_server(compute_api=compute_api, server=target_server) + compute_api.module.debug(response.json) + compute_api.module.debug(response.ok) + + if not response.ok: + msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +def restart_strategy(compute_api, wished_server): + compute_api.module.debug("Starting restart strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being rebooted."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, + target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, { + "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + changed = True + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be rebooted." % target_server["id"]} + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + if fetch_state(compute_api=compute_api, server=target_server) in ("running",): + response = restart_server(compute_api=compute_api, server=target_server) + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=msg) + + if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",): + response = restart_server(compute_api=compute_api, server=target_server) + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +state_strategy = { + "present": present_strategy, + "restarted": restart_strategy, + "stopped": stop_strategy, + "running": running_strategy, + "absent": absent_strategy +} + + +def find(compute_api, wished_server, per_page=1): + compute_api.module.debug("Getting inside find") + # Only the name attribute is accepted in the Compute query API + response = compute_api.get("servers", params={"name": wished_server["name"], + "per_page": per_page}) + + if not response.ok: + msg = 'Error during server search: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + search_results = response.json["servers"] + + return search_results + + +PATCH_MUTABLE_SERVER_ATTRIBUTES = ( + "ipv6", + "tags", + "name", + "dynamic_ip_required", + "security_group", +) + + +def server_attributes_should_be_changed(compute_api, target_server, wished_server): + compute_api.module.debug("Checking if server attributes should be changed") + compute_api.module.debug("Current Server: %s" % target_server) + compute_api.module.debug("Wished Server: %s" % wished_server) + debug_dict = dict((x, (target_server[x], wished_server[x])) + for x in PATCH_MUTABLE_SERVER_ATTRIBUTES + if x in target_server and x in wished_server) + compute_api.module.debug("Debug dict %s" % debug_dict) + try: + for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: + if key in target_server and key in wished_server: + # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook + if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys( + ) and target_server[key]["id"] != wished_server[key]: + return True + # Handling other structure compare simply the two objects content + elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]: + return True + return False + except AttributeError: + compute_api.module.fail_json(msg="Error while checking if attributes should be changed") + + +def server_change_attributes(compute_api, target_server, wished_server): + compute_api.module.debug("Starting patching server attributes") + patch_payload = dict() + + for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: + if key in target_server and key in wished_server: + # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook + if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: + # Setting all key to current value except ID + key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id") + # Setting ID to the user specified ID + key_dict["id"] = wished_server[key] + patch_payload[key] = key_dict + elif not isinstance(target_server[key], dict): + patch_payload[key] = wished_server[key] + + response = compute_api.patch(path="servers/%s" % target_server["id"], + data=patch_payload) + if not response.ok: + msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + target_server = response.json["server"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + return target_server + + +def core(module): + region = module.params["region"] + wished_server = { + "state": module.params["state"], + "image": module.params["image"], + "name": module.params["name"], + "commercial_type": module.params["commercial_type"], + "enable_ipv6": module.params["enable_ipv6"], + "tags": module.params["tags"], + "organization": module.params["organization"], + "security_group": module.params["security_group"] + } + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + compute_api = Scaleway(module=module) + + check_image_id(compute_api, wished_server["image"]) + + # IP parameters of the wished server depends on the configuration + ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"]) + wished_server.update(ip_payload) + + changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server) + module.exit_json(changed=changed, msg=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + image=dict(required=True), + name=dict(), + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + commercial_type=dict(required=True), + enable_ipv6=dict(default=False, type="bool"), + public_ip=dict(default="absent"), + state=dict(choices=state_strategy.keys(), default='present'), + tags=dict(type="list", default=[]), + organization=dict(required=True), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + security_group=dict(), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_image_facts.py b/plugins/modules/cloud/scaleway/scaleway_image_facts.py new file mode 100644 index 0000000000..3be09ec7bf --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_image_facts.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_image_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(scaleway_image_info) instead. +short_description: Gather facts about the Scaleway images available. +description: + - Gather facts about the Scaleway images available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + + +options: + + region: + description: + - Scaleway compute zone + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway images facts + scaleway_image_facts: + region: par1 +''' + +RETURN = r''' +--- +scaleway_image_facts: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_image_facts": [ + { + "arch": "x86_64", + "creation_date": "2018-07-17T16:18:49.276456+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": false, + "dtb": "", + "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.9.93 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", + "modification_date": "2018-07-17T16:42:06.319315+00:00", + "name": "Debian Stretch", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", + "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) + + +class ScalewayImageFacts(Scaleway): + + def __init__(self, module): + super(ScalewayImageFacts, self).__init__(module) + self.name = 'images' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'scaleway_image_facts': ScalewayImageFacts(module).get_resources()} + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_image_info.py b/plugins/modules/cloud/scaleway/scaleway_image_info.py new file mode 100644 index 0000000000..5cda9ce8a7 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_image_info.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_image_info +short_description: Gather information about the Scaleway images available. +description: + - Gather information about the Scaleway images available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + + +options: + + region: + description: + - Scaleway compute zone + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway images information + scaleway_image_info: + region: par1 + register: result + +- debug: + msg: "{{ result.scaleway_image_info }}" +''' + +RETURN = r''' +--- +scaleway_image_info: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_image_info": [ + { + "arch": "x86_64", + "creation_date": "2018-07-17T16:18:49.276456+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": false, + "dtb": "", + "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.9.93 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", + "modification_date": "2018-07-17T16:42:06.319315+00:00", + "name": "Debian Stretch", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", + "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) + + +class ScalewayImageInfo(Scaleway): + + def __init__(self, module): + super(ScalewayImageInfo, self).__init__(module) + self.name = 'images' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_image_info=ScalewayImageInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_ip.py b/plugins/modules/cloud/scaleway/scaleway_ip.py new file mode 100644 index 0000000000..cc2cee7fa1 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_ip.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# +# Scaleway IP management module +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: scaleway_ip +short_description: Scaleway IP management module +author: Remy Leone (@sieben) +description: + - This module manages IP on Scaleway account + U(https://developer.scaleway.com) +extends_documentation_fragment: +- community.general.scaleway + + +options: + state: + description: + - Indicate desired state of the IP. + default: present + choices: + - present + - absent + + organization: + description: + - Scaleway organization identifier + required: true + + region: + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + + id: + description: + - id of the Scaleway IP (UUID) + + server: + description: + - id of the server you want to attach an IP to. + - To unattach an IP don't specify this option + + reverse: + description: + - Reverse to assign to the IP +''' + +EXAMPLES = ''' + - name: Create an IP + scaleway_ip: + organization: '{{ scw_org }}' + state: present + region: par1 + register: ip_creation_task + + - name: Make sure IP deleted + scaleway_ip: + id: '{{ ip_creation_task.scaleway_ip.id }}' + state: absent + region: par1 + +''' + +RETURN = ''' +data: + description: This is only present when C(state=present) + returned: when C(state=present) + type: dict + sample: { + "ips": [ + { + "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", + "reverse": null, + "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", + "server": { + "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", + "name": "ansible_tuto-1" + }, + "address": "212.47.232.136" + } + ] + } +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def ip_attributes_should_be_changed(api, target_ip, wished_ip): + patch_payload = {} + + if target_ip["reverse"] != wished_ip["reverse"]: + patch_payload["reverse"] = wished_ip["reverse"] + + # IP is assigned to a server + if target_ip["server"] is None and wished_ip["server"]: + patch_payload["server"] = wished_ip["server"] + + # IP is unassigned to a server + try: + if target_ip["server"]["id"] and wished_ip["server"] is None: + patch_payload["server"] = wished_ip["server"] + except (TypeError, KeyError): + pass + + # IP is migrated between 2 different servers + try: + if target_ip["server"]["id"] != wished_ip["server"]: + patch_payload["server"] = wished_ip["server"] + except (TypeError, KeyError): + pass + + return patch_payload + + +def payload_from_wished_ip(wished_ip): + return dict( + (k, v) + for k, v in wished_ip.items() + if k != 'id' and v is not None + ) + + +def present_strategy(api, wished_ip): + changed = False + + response = api.get('ips') + if not response.ok: + api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( + response.status_code, response.json['message'])) + + ips_list = response.json["ips"] + ip_lookup = dict((ip["id"], ip) + for ip in ips_list) + + if wished_ip["id"] not in ip_lookup.keys(): + changed = True + if api.module.check_mode: + return changed, {"status": "An IP would be created."} + + # Create IP + creation_response = api.post('/ips', + data=payload_from_wished_ip(wished_ip)) + + if not creation_response.ok: + msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + return changed, creation_response.json["ip"] + + target_ip = ip_lookup[wished_ip["id"]] + patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip) + + if not patch_payload: + return changed, target_ip + + changed = True + if api.module.check_mode: + return changed, {"status": "IP attributes would be changed."} + + ip_patch_response = api.patch(path="ips/%s" % target_ip["id"], + data=patch_payload) + + if not ip_patch_response.ok: + api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format( + ip_patch_response.status_code, ip_patch_response.json['message'])) + + return changed, ip_patch_response.json["ip"] + + +def absent_strategy(api, wished_ip): + response = api.get('ips') + changed = False + + status_code = response.status_code + ips_json = response.json + ips_list = ips_json["ips"] + + if not response.ok: + api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( + status_code, response.json['message'])) + + ip_lookup = dict((ip["id"], ip) + for ip in ips_list) + if wished_ip["id"] not in ip_lookup.keys(): + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "IP would be destroyed"} + + response = api.delete('/ips/' + wished_ip["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + wished_ip = { + "organization": module.params['organization'], + "reverse": module.params["reverse"], + "id": module.params["id"], + "server": module.params["server"] + } + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, wished_ip=wished_ip) + else: + changed, summary = present_strategy(api=api, wished_ip=wished_ip) + module.exit_json(changed=changed, scaleway_ip=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + organization=dict(required=True), + server=dict(), + reverse=dict(), + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + id=dict() + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_ip_facts.py b/plugins/modules/cloud/scaleway/scaleway_ip_facts.py new file mode 100644 index 0000000000..c53f856aad --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_ip_facts.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_ip_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(scaleway_ip_info) instead. +short_description: Gather facts about the Scaleway ips available. +description: + - Gather facts about the Scaleway ips available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway ips facts + scaleway_ip_facts: + region: par1 +''' + +RETURN = r''' +--- +scaleway_ip_facts: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_ip_facts": [ + { + "address": "163.172.170.243", + "id": "ea081794-a581-8899-8451-386ddaf0a451", + "organization": "3f709602-5e6c-4619-b80c-e324324324af", + "reverse": null, + "server": { + "id": "12f19bc7-109c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayIpFacts(Scaleway): + + def __init__(self, module): + super(ScalewayIpFacts, self).__init__(module) + self.name = 'ips' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'scaleway_ip_facts': ScalewayIpFacts(module).get_resources()} + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/plugins/modules/cloud/scaleway/scaleway_ip_info.py new file mode 100644 index 0000000000..44b69895e9 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_ip_info.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_ip_info +short_description: Gather information about the Scaleway ips available. +description: + - Gather information about the Scaleway ips available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway ips information + scaleway_ip_info: + region: par1 + register: result + +- debug: + msg: "{{ result.scaleway_ip_info }}" +''' + +RETURN = r''' +--- +scaleway_ip_info: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_ip_info": [ + { + "address": "163.172.170.243", + "id": "ea081794-a581-8899-8451-386ddaf0a451", + "organization": "3f709602-5e6c-4619-b80c-e324324324af", + "reverse": null, + "server": { + "id": "12f19bc7-109c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayIpInfo(Scaleway): + + def __init__(self, module): + super(ScalewayIpInfo, self).__init__(module) + self.name = 'ips' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_ip_info=ScalewayIpInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_lb.py b/plugins/modules/cloud/scaleway/scaleway_lb.py new file mode 100644 index 0000000000..062ddcf8bf --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_lb.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# +# Scaleway Load-balancer management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: scaleway_lb +short_description: Scaleway load-balancer management module +author: Remy Leone (@sieben) +description: + - "This module manages load-balancers on Scaleway." +extends_documentation_fragment: +- community.general.scaleway + + +options: + + name: + description: + - Name of the load-balancer + required: true + + description: + description: + - Description of the load-balancer + required: true + + organization_id: + description: + - Organization identifier + required: true + + state: + description: + - Indicate desired state of the instance. + default: present + choices: + - present + - absent + + region: + description: + - Scaleway zone + required: true + choices: + - nl-ams + - fr-par + + tags: + description: + - List of tags to apply to the load-balancer + + wait: + description: + - Wait for the load-balancer to reach its desired state before returning. + type: bool + default: 'no' + + wait_timeout: + description: + - Time to wait for the load-balancer to reach the expected state + required: false + default: 300 + + wait_sleep_time: + description: + - Time to wait before every attempt to check the state of the load-balancer + required: false + default: 3 +''' + +EXAMPLES = ''' +- name: Create a load-balancer + scaleway_lb: + name: foobar + state: present + organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: fr-par + tags: + - hello + +- name: Delete a load-balancer + scaleway_lb: + name: foobar + state: absent + organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: fr-par +''' + +RETURNS = ''' +{ + "scaleway_lb": { + "backend_count": 0, + "frontend_count": 0, + "description": "Description of my load-balancer", + "id": "00000000-0000-0000-0000-000000000000", + "instances": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.1", + "region": "fr-par", + "status": "ready" + }, + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.2", + "region": "fr-par", + "status": "ready" + } + ], + "ip": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "192.168.0.1", + "lb_id": "00000000-0000-0000-0000-000000000000", + "region": "fr-par", + "organization_id": "00000000-0000-0000-0000-000000000000", + "reverse": "" + } + ], + "name": "lb_ansible_test", + "organization_id": "00000000-0000-0000-0000-000000000000", + "region": "fr-par", + "status": "ready", + "tags": [ + "first_tag", + "second_tag" + ] + } +} +''' + +import datetime +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "name", + "description" +) + + +def payload_from_wished_lb(wished_lb): + return { + "organization_id": wished_lb["organization_id"], + "name": wished_lb["name"], + "tags": wished_lb["tags"], + "description": wished_lb["description"] + } + + +def fetch_state(api, lb): + api.module.debug("fetch_state of load-balancer: %s" % lb["id"]) + response = api.get(path=api.api_path + "/%s" % lb["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + api.module.fail_json(msg=msg) + + try: + api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"])) + return response.json["status"] + except KeyError: + api.module.fail_json(msg="Could not fetch state in %s" % response.json) + + +def wait_to_complete_state_transition(api, lb, force_wait=False): + wait = api.module.params["wait"] + if not (wait or force_wait): + return + wait_timeout = api.module.params["wait_timeout"] + wait_sleep_time = api.module.params["wait_sleep_time"] + + start = datetime.datetime.utcnow() + end = start + datetime.timedelta(seconds=wait_timeout) + while datetime.datetime.utcnow() < end: + api.module.debug("We are going to wait for the load-balancer to finish its transition") + state = fetch_state(api, lb) + if state in STABLE_STATES: + api.module.debug("It seems that the load-balancer is not in transition anymore.") + api.module.debug("load-balancer in state: %s" % fetch_state(api, lb)) + break + time.sleep(wait_sleep_time) + else: + api.module.fail_json(msg="Server takes too long to finish its transition") + + +def lb_attributes_should_be_changed(target_lb, wished_lb): + diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]) + + if diff: + return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES) + else: + return diff + + +def present_strategy(api, wished_lb): + changed = False + + response = api.get(path=api.api_path) + if not response.ok: + api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( + response.status_code, response.json['message'])) + + lbs_list = response.json["lbs"] + lb_lookup = dict((lb["name"], lb) + for lb in lbs_list) + + if wished_lb["name"] not in lb_lookup.keys(): + changed = True + if api.module.check_mode: + return changed, {"status": "A load-balancer would be created."} + + # Create Load-balancer + api.warn(payload_from_wished_lb(wished_lb)) + creation_response = api.post(path=api.api_path, + data=payload_from_wished_lb(wished_lb)) + + if not creation_response.ok: + msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(api=api, lb=creation_response.json) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_lb = lb_lookup[wished_lb["name"]] + patch_payload = lb_attributes_should_be_changed(target_lb=target_lb, + wished_lb=wished_lb) + + if not patch_payload: + return changed, target_lb + + changed = True + if api.module.check_mode: + return changed, {"status": "Load-balancer attributes would be changed."} + + lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"], + data=patch_payload) + + if not lb_patch_response.ok: + api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format( + lb_patch_response.status_code, lb_patch_response.json['message'])) + + wait_to_complete_state_transition(api=api, lb=target_lb) + return changed, lb_patch_response.json + + +def absent_strategy(api, wished_lb): + response = api.get(path=api.api_path) + changed = False + + status_code = response.status_code + lbs_json = response.json + lbs_list = lbs_json["lbs"] + + if not response.ok: + api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( + status_code, response.json['message'])) + + lb_lookup = dict((lb["name"], lb) + for lb in lbs_list) + if wished_lb["name"] not in lb_lookup.keys(): + return changed, {} + + target_lb = lb_lookup[wished_lb["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Load-balancer would be destroyed"} + + wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_lb["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format( + response.status_code, response.json)) + + wait_to_complete_state_transition(api=api, lb=target_lb) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + region = module.params["region"] + wished_load_balancer = { + "state": module.params["state"], + "name": module.params["name"], + "description": module.params["description"], + "tags": module.params["tags"], + "organization_id": module.params["organization_id"] + } + module.params['api_url'] = SCALEWAY_ENDPOINT + api = Scaleway(module=module) + api.api_path = "lb/v1/regions/%s/lbs" % region + + changed, summary = state_strategy[wished_load_balancer["state"]](api=api, + wished_lb=wished_load_balancer) + module.exit_json(changed=changed, scaleway_lb=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + description=dict(required=True), + region=dict(required=True, choices=SCALEWAY_REGIONS), + state=dict(choices=state_strategy.keys(), default='present'), + tags=dict(type="list", default=[]), + organization_id=dict(required=True), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_organization_facts.py b/plugins/modules/cloud/scaleway/scaleway_organization_facts.py new file mode 100644 index 0000000000..d3efcffd4b --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_organization_facts.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_organization_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(scaleway_organization_info) instead. +short_description: Gather facts about the Scaleway organizations available. +description: + - Gather facts about the Scaleway organizations available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +options: + api_url: + description: + - Scaleway API URL + default: 'https://account.scaleway.com' + aliases: ['base_url'] +extends_documentation_fragment: +- community.general.scaleway + +''' + +EXAMPLES = r''' +- name: Gather Scaleway organizations facts + scaleway_organization_facts: +''' + +RETURN = r''' +--- +scaleway_organization_facts: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_organization_facts": [ + { + "address_city_name": "Paris", + "address_country_code": "FR", + "address_line1": "42 Rue de l'univers", + "address_line2": null, + "address_postal_code": "75042", + "address_subdivision_code": "FR-75", + "creation_date": "2018-08-06T13:43:28.508575+00:00", + "currency": "EUR", + "customer_class": "individual", + "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", + "locale": "fr_FR", + "modification_date": "2018-08-06T14:56:41.401685+00:00", + "name": "James Bond", + "support_id": "694324", + "support_level": "basic", + "support_pin": "9324", + "users": [], + "vat_number": null, + "warnings": [] + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec +) + + +class ScalewayOrganizationFacts(Scaleway): + + def __init__(self, module): + super(ScalewayOrganizationFacts, self).__init__(module) + self.name = 'organizations' + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'scaleway_organization_facts': ScalewayOrganizationFacts(module).get_resources()} + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_organization_info.py b/plugins/modules/cloud/scaleway/scaleway_organization_info.py new file mode 100644 index 0000000000..4573f428ff --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_organization_info.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_organization_info +short_description: Gather information about the Scaleway organizations available. +description: + - Gather information about the Scaleway organizations available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +options: + api_url: + description: + - Scaleway API URL + default: 'https://account.scaleway.com' + aliases: ['base_url'] +extends_documentation_fragment: +- community.general.scaleway + +''' + +EXAMPLES = r''' +- name: Gather Scaleway organizations information + scaleway_organization_info: + register: result + +- debug: + msg: "{{ result.scaleway_organization_info }}" +''' + +RETURN = r''' +--- +scaleway_organization_info: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_organization_info": [ + { + "address_city_name": "Paris", + "address_country_code": "FR", + "address_line1": "42 Rue de l'univers", + "address_line2": null, + "address_postal_code": "75042", + "address_subdivision_code": "FR-75", + "creation_date": "2018-08-06T13:43:28.508575+00:00", + "currency": "EUR", + "customer_class": "individual", + "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", + "locale": "fr_FR", + "modification_date": "2018-08-06T14:56:41.401685+00:00", + "name": "James Bond", + "support_id": "694324", + "support_level": "basic", + "support_pin": "9324", + "users": [], + "vat_number": null, + "warnings": [] + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec +) + + +class ScalewayOrganizationInfo(Scaleway): + + def __init__(self, module): + super(ScalewayOrganizationInfo, self).__init__(module) + self.name = 'organizations' + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group.py b/plugins/modules/cloud/scaleway/scaleway_security_group.py new file mode 100644 index 0000000000..36b1ad288a --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_security_group.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# +# Scaleway Security Group management module +# +# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: scaleway_security_group +short_description: Scaleway Security Group management module +author: Antoine Barbare (@abarbare) +description: + - This module manages Security Group on Scaleway account + U(https://developer.scaleway.com). +extends_documentation_fragment: +- community.general.scaleway + + +options: + state: + description: + - Indicate desired state of the Security Group. + type: str + choices: [ absent, present ] + default: present + + organization: + description: + - Organization identifier. + type: str + required: true + + region: + description: + - Scaleway region to use (for example C(par1)). + type: str + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + + name: + description: + - Name of the Security Group. + type: str + required: true + + description: + description: + - Description of the Security Group. + type: str + + stateful: + description: + - Create a stateful security group which allows established connections in and out. + type: bool + required: true + + inbound_default_policy: + description: + - Default policy for incoming traffic. + type: str + choices: [ accept, drop ] + + outbound_default_policy: + description: + - Default policy for outcoming traffic. + type: str + choices: [ accept, drop ] + + organization_default: + description: + - Create security group to be the default one. + type: bool +''' + +EXAMPLES = ''' + - name: Create a Security Group + scaleway_security_group: + state: present + region: par1 + name: security_group + description: "my security group description" + organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9" + stateful: false + inbound_default_policy: accept + outbound_default_policy: accept + organization_default: false + register: security_group_creation_task +''' + +RETURN = ''' +data: + description: This is only present when C(state=present) + returned: when C(state=present) + type: dict + sample: { + "scaleway_security_group": { + "description": "my security group description", + "enable_default_security": true, + "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", + "inbound_default_policy": "accept", + "name": "security_group", + "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", + "organization_default": false, + "outbound_default_policy": "accept", + "servers": [], + "stateful": false + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule +from uuid import uuid4 + + +def payload_from_security_group(security_group): + return dict( + (k, v) + for k, v in security_group.items() + if k != 'id' and v is not None + ) + + +def present_strategy(api, security_group): + ret = {'changed': False} + + response = api.get('security_groups') + if not response.ok: + api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + security_group_lookup = dict((sg['name'], sg) + for sg in response.json['security_groups']) + + if security_group['name'] not in security_group_lookup.keys(): + ret['changed'] = True + if api.module.check_mode: + # Help user when check mode is enabled by defining id key + ret['scaleway_security_group'] = {'id': str(uuid4())} + return ret + + # Create Security Group + response = api.post('/security_groups', + data=payload_from_security_group(security_group)) + + if not response.ok: + msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + ret['scaleway_security_group'] = response.json['security_group'] + + else: + ret['scaleway_security_group'] = security_group_lookup[security_group['name']] + + return ret + + +def absent_strategy(api, security_group): + response = api.get('security_groups') + ret = {'changed': False} + + if not response.ok: + api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + security_group_lookup = dict((sg['name'], sg) + for sg in response.json['security_groups']) + if security_group['name'] not in security_group_lookup.keys(): + return ret + + ret['changed'] = True + if api.module.check_mode: + return ret + + response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id']) + if not response.ok: + api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + return ret + + +def core(module): + security_group = { + 'organization': module.params['organization'], + 'name': module.params['name'], + 'description': module.params['description'], + 'stateful': module.params['stateful'], + 'inbound_default_policy': module.params['inbound_default_policy'], + 'outbound_default_policy': module.params['outbound_default_policy'], + 'organization_default': module.params['organization_default'], + } + + region = module.params['region'] + module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + + api = Scaleway(module=module) + if module.params['state'] == 'present': + summary = present_strategy(api=api, security_group=security_group) + else: + summary = absent_strategy(api=api, security_group=security_group) + module.exit_json(**summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + organization=dict(type='str', required=True), + name=dict(type='str', required=True), + description=dict(type='str'), + region=dict(type='str', required=True, choices=SCALEWAY_LOCATION.keys()), + stateful=dict(type='bool', required=True), + inbound_default_policy=dict(type='str', choices=['accept', 'drop']), + outbound_default_policy=dict(type='str', choices=['accept', 'drop']), + organization_default=dict(type='bool'), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]] + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py b/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py new file mode 100644 index 0000000000..82907ac27f --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py @@ -0,0 +1,111 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_security_group_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(scaleway_security_group_info) instead. +short_description: Gather facts about the Scaleway security groups available. +description: + - Gather facts about the Scaleway security groups available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +options: + region: + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +extends_documentation_fragment: +- community.general.scaleway + +''' + +EXAMPLES = r''' +- name: Gather Scaleway security groups facts + scaleway_security_group_facts: + region: par1 +''' + +RETURN = r''' +--- +scaleway_security_group_facts: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_security_group_facts": [ + { + "description": "test-ams", + "enable_default_security": true, + "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", + "name": "test-ams", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "organization_default": false, + "servers": [ + { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + ] + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewaySecurityGroupFacts(Scaleway): + + def __init__(self, module): + super(ScalewaySecurityGroupFacts, self).__init__(module) + self.name = 'security_groups' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'scaleway_security_group_facts': ScalewaySecurityGroupFacts(module).get_resources()} + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/plugins/modules/cloud/scaleway/scaleway_security_group_info.py new file mode 100644 index 0000000000..de90fde1cc --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_info.py @@ -0,0 +1,111 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_security_group_info +short_description: Gather information about the Scaleway security groups available. +description: + - Gather information about the Scaleway security groups available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +options: + region: + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +extends_documentation_fragment: +- community.general.scaleway + +''' + +EXAMPLES = r''' +- name: Gather Scaleway security groups information + scaleway_security_group_info: + region: par1 + register: result + +- debug: + msg: "{{ result.scaleway_security_group_info }}" +''' + +RETURN = r''' +--- +scaleway_security_group_info: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_security_group_info": [ + { + "description": "test-ams", + "enable_default_security": true, + "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", + "name": "test-ams", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "organization_default": false, + "servers": [ + { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + ] + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewaySecurityGroupInfo(Scaleway): + + def __init__(self, module): + super(ScalewaySecurityGroupInfo, self).__init__(module) + self.name = 'security_groups' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py new file mode 100644 index 0000000000..2844288aac --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# +# Scaleway Security Group Rule management module +# +# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). +# +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: scaleway_security_group_rule +short_description: Scaleway Security Group Rule management module +author: Antoine Barbare (@abarbare) +description: + - This module manages Security Group Rule on Scaleway account + U(https://developer.scaleway.com) +extends_documentation_fragment: +- community.general.scaleway + + +options: + state: + description: + - Indicate desired state of the Security Group Rule. + default: present + choices: + - present + - absent + + region: + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + + protocol: + description: + - Network protocol to use + choices: + - TCP + - UDP + - ICMP + required: true + + port: + description: + - Port related to the rule, null value for all the ports + required: true + type: int + + ip_range: + description: + - IPV4 CIDR notation to apply to the rule + default: 0.0.0.0/0 + + direction: + description: + - Rule direction + choices: + - inbound + - outbound + required: true + + action: + description: + - Rule action + choices: + - accept + - drop + required: true + + security_group: + description: + - Security Group unique identifier + required: true +''' + +EXAMPLES = ''' + - name: Create a Security Group Rule + scaleway_security_group_rule: + state: present + region: par1 + protocol: TCP + port: 80 + ip_range: 0.0.0.0/0 + direction: inbound + action: accept + security_group: b57210ee-1281-4820-a6db-329f78596ecb + register: security_group_rule_creation_task +''' + +RETURN = ''' +data: + description: This is only present when C(state=present) + returned: when C(state=present) + type: dict + sample: { + "scaleway_security_group_rule": { + "direction": "inbound", + "protocol": "TCP", + "ip_range": "0.0.0.0/0", + "dest_port_from": 80, + "action": "accept", + "position": 2, + "dest_port_to": null, + "editable": null, + "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object +from ansible_collections.ansible.netcommon.plugins.module_utils.compat.ipaddress import ip_network +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule + + +def get_sgr_from_api(security_group_rules, security_group_rule): + """ Check if a security_group_rule specs are present in security_group_rules + Return None if no rules match the specs + Return the rule if found + """ + for sgr in security_group_rules: + if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and + sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and + sgr['protocol'] == security_group_rule['protocol']): + return sgr + + return None + + +def present_strategy(api, security_group_id, security_group_rule): + ret = {'changed': False} + + response = api.get('security_groups/%s/rules' % security_group_id) + if not response.ok: + api.module.fail_json( + msg='Error getting security group rules "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + existing_rule = get_sgr_from_api( + response.json['rules'], security_group_rule) + + if not existing_rule: + ret['changed'] = True + if api.module.check_mode: + return ret + + # Create Security Group Rule + response = api.post('/security_groups/%s/rules' % security_group_id, + data=payload_from_object(security_group_rule)) + + if not response.ok: + api.module.fail_json( + msg='Error during security group rule creation: "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + ret['scaleway_security_group_rule'] = response.json['rule'] + + else: + ret['scaleway_security_group_rule'] = existing_rule + + return ret + + +def absent_strategy(api, security_group_id, security_group_rule): + ret = {'changed': False} + + response = api.get('security_groups/%s/rules' % security_group_id) + if not response.ok: + api.module.fail_json( + msg='Error getting security group rules "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + existing_rule = get_sgr_from_api( + response.json['rules'], security_group_rule) + + if not existing_rule: + return ret + + ret['changed'] = True + if api.module.check_mode: + return ret + + response = api.delete( + '/security_groups/%s/rules/%s' % + (security_group_id, existing_rule['id'])) + if not response.ok: + api.module.fail_json( + msg='Error deleting security group rule "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + return ret + + +def core(module): + api = Scaleway(module=module) + + security_group_rule = { + 'protocol': module.params['protocol'], + 'dest_port_from': module.params['port'], + 'ip_range': module.params['ip_range'], + 'direction': module.params['direction'], + 'action': module.params['action'], + } + + region = module.params['region'] + module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + + if module.params['state'] == 'present': + summary = present_strategy( + api=api, + security_group_id=module.params['security_group'], + security_group_rule=security_group_rule) + else: + summary = absent_strategy( + api=api, + security_group_id=module.params['security_group'], + security_group_rule=security_group_rule) + module.exit_json(**summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update( + state=dict(type='str', default='present', choices=['absent', 'present']), + region=dict(type='str', required=True, choices=SCALEWAY_LOCATION.keys()), + protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']), + port=dict(type='int', required=True), + ip_range=dict(type='str', default='0.0.0.0/0'), + direction=dict(type='str', required=True, choices=['inbound', 'outbound']), + action=dict(type='str', required=True, choices=['accept', 'drop']), + security_group=dict(type='str', required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_server_facts.py b/plugins/modules/cloud/scaleway/scaleway_server_facts.py new file mode 100644 index 0000000000..c38fe299ab --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_server_facts.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_server_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(scaleway_server_info) instead. +short_description: Gather facts about the Scaleway servers available. +description: + - Gather facts about the Scaleway servers available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway servers facts + scaleway_server_facts: + region: par1 +''' + +RETURN = r''' +--- +scaleway_server_facts: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_server_facts": [ + { + "arch": "x86_64", + "boot_type": "local", + "bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "commercial_type": "START1-XS", + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "dynamic_ip_required": false, + "enable_ipv6": false, + "extra_networks": [], + "hostname": "scw-e0d256", + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "image": { + "arch": "x86_64", + "creation_date": "2018-04-26T12:42:21.619844+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", + "modification_date": "2018-04-26T12:49:07.573004+00:00", + "name": "Ubuntu Xenial", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + }, + "ipv6": null, + "location": { + "cluster_id": "5", + "hypervisor_id": "412", + "node_id": "2", + "platform_id": "13", + "zone_id": "par1" + }, + "maintenances": [], + "modification_date": "2018-08-14T21:37:28.630882+00:00", + "name": "scw-e0d256", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "private_ip": "10.14.222.131", + "protected": false, + "public_ip": { + "address": "163.172.170.197", + "dynamic": false, + "id": "ea081794-a581-4495-8451-386ddaf0a451" + }, + "security_group": { + "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", + "name": "Default security group" + }, + "state": "running", + "state_detail": "booted", + "tags": [], + "volumes": { + "0": { + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "export_uri": "device://dev/vda", + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "modification_date": "2018-08-14T21:36:56.271545+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d256" + }, + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + } + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayServerFacts(Scaleway): + + def __init__(self, module): + super(ScalewayServerFacts, self).__init__(module) + self.name = 'servers' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()} + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_server_info.py b/plugins/modules/cloud/scaleway/scaleway_server_info.py new file mode 100644 index 0000000000..30bb01a811 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_server_info.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_server_info +short_description: Gather information about the Scaleway servers available. +description: + - Gather information about the Scaleway servers available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway servers information + scaleway_server_info: + region: par1 + register: result + +- debug: + msg: "{{ result.scaleway_server_info }}" +''' + +RETURN = r''' +--- +scaleway_server_info: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_server_info": [ + { + "arch": "x86_64", + "boot_type": "local", + "bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "commercial_type": "START1-XS", + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "dynamic_ip_required": false, + "enable_ipv6": false, + "extra_networks": [], + "hostname": "scw-e0d256", + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "image": { + "arch": "x86_64", + "creation_date": "2018-04-26T12:42:21.619844+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", + "modification_date": "2018-04-26T12:49:07.573004+00:00", + "name": "Ubuntu Xenial", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + }, + "ipv6": null, + "location": { + "cluster_id": "5", + "hypervisor_id": "412", + "node_id": "2", + "platform_id": "13", + "zone_id": "par1" + }, + "maintenances": [], + "modification_date": "2018-08-14T21:37:28.630882+00:00", + "name": "scw-e0d256", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "private_ip": "10.14.222.131", + "protected": false, + "public_ip": { + "address": "163.172.170.197", + "dynamic": false, + "id": "ea081794-a581-4495-8451-386ddaf0a451" + }, + "security_group": { + "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", + "name": "Default security group" + }, + "state": "running", + "state_detail": "booted", + "tags": [], + "volumes": { + "0": { + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "export_uri": "device://dev/vda", + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "modification_date": "2018-08-14T21:36:56.271545+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d256" + }, + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + } + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayServerInfo(Scaleway): + + def __init__(self, module): + super(ScalewayServerInfo, self).__init__(module) + self.name = 'servers' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_server_info=ScalewayServerInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py b/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py new file mode 100644 index 0000000000..45a01051b0 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_snapshot_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(scaleway_snapshot_info) instead. +short_description: Gather facts about the Scaleway snapshots available. +description: + - Gather facts about the Scaleway snapshot available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway snapshots facts + scaleway_snapshot_facts: + region: par1 +''' + +RETURN = r''' +--- +scaleway_snapshot_facts: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_snapshot_facts": [ + { + "base_volume": { + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" + }, + "creation_date": "2018-08-14T22:34:35.299461+00:00", + "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", + "modification_date": "2018-08-14T22:34:54.520560+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION +) + + +class ScalewaySnapshotFacts(Scaleway): + + def __init__(self, module): + super(ScalewaySnapshotFacts, self).__init__(module) + self.name = 'snapshots' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'scaleway_snapshot_facts': ScalewaySnapshotFacts(module).get_resources()} + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py new file mode 100644 index 0000000000..00fcf9894e --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_snapshot_info +short_description: Gather information about the Scaleway snapshots available. +description: + - Gather information about the Scaleway snapshot available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway snapshots information + scaleway_snapshot_info: + region: par1 + register: result + +- debug: + msg: "{{ result.scaleway_snapshot_info }}" +''' + +RETURN = r''' +--- +scaleway_snapshot_info: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_snapshot_info": [ + { + "base_volume": { + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" + }, + "creation_date": "2018-08-14T22:34:35.299461+00:00", + "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", + "modification_date": "2018-08-14T22:34:54.520560+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION +) + + +class ScalewaySnapshotInfo(Scaleway): + + def __init__(self, module): + super(ScalewaySnapshotInfo, self).__init__(module) + self.name = 'snapshots' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/plugins/modules/cloud/scaleway/scaleway_sshkey.py new file mode 100644 index 0000000000..1bb1647884 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_sshkey.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# +# Scaleway SSH keys management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: scaleway_sshkey +short_description: Scaleway SSH keys management module +author: Remy Leone (@sieben) +description: + - This module manages SSH keys on Scaleway account + U(https://developer.scaleway.com) +extends_documentation_fragment: +- community.general.scaleway + + +options: + state: + description: + - Indicate desired state of the SSH key. + default: present + choices: + - present + - absent + ssh_pub_key: + description: + - The public SSH key as a string to add. + required: true + api_url: + description: + - Scaleway API URL + default: 'https://account.scaleway.com' + aliases: ['base_url'] +''' + +EXAMPLES = ''' +- name: "Add SSH key" + scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + +- name: "Delete SSH key" + scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "absent" + +- name: "Add SSH key with explicit token" + scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" +''' + +RETURN = ''' +data: + description: This is only present when C(state=present) + returned: when C(state=present) + type: dict + sample: { + "ssh_public_keys": [ + {"key": "ssh-rsa AAAA...."} + ] + } +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway + + +def extract_present_sshkeys(raw_organization_dict): + ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"] + ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list] + return ssh_key_lookup + + +def extract_user_id(raw_organization_dict): + return raw_organization_dict["organizations"][0]["users"][0]["id"] + + +def sshkey_user_patch(ssh_lookup): + ssh_list = {"ssh_public_keys": [{"key": key} + for key in ssh_lookup]} + return ssh_list + + +def core(module): + ssh_pub_key = module.params['ssh_pub_key'] + state = module.params["state"] + account_api = Scaleway(module) + response = account_api.get('organizations') + + status_code = response.status_code + organization_json = response.json + + if not response.ok: + module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format( + status_code, response.json['message'])) + + user_id = extract_user_id(organization_json) + present_sshkeys = [] + try: + present_sshkeys = extract_present_sshkeys(organization_json) + except (KeyError, IndexError) as e: + module.fail_json(changed=False, data="Error while extracting present SSH keys from API") + + if state in ('present',): + if ssh_pub_key in present_sshkeys: + module.exit_json(changed=False) + + # If key not found create it! + if module.check_mode: + module.exit_json(changed=True) + + present_sshkeys.append(ssh_pub_key) + payload = sshkey_user_patch(present_sshkeys) + + response = account_api.patch('/users/%s' % user_id, data=payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format( + response.status_code, response.json)) + + elif state in ('absent',): + if ssh_pub_key not in present_sshkeys: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + present_sshkeys.remove(ssh_pub_key) + payload = sshkey_user_patch(present_sshkeys) + + response = account_api.patch('/users/%s' % user_id, data=payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format( + response.status_code, response.json)) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + ssh_pub_key=dict(required=True), + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_user_data.py b/plugins/modules/cloud/scaleway/scaleway_user_data.py new file mode 100644 index 0000000000..99db51cd25 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_user_data.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# +# Scaleway user data management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: scaleway_user_data +short_description: Scaleway user_data management module +author: Remy Leone (@sieben) +description: + - "This module manages user_data on compute instances on Scaleway." + - "It can be used to configure cloud-init for instance" +extends_documentation_fragment: +- community.general.scaleway + + +options: + + server_id: + description: + - Scaleway Compute instance ID of the server + required: true + + user_data: + description: + - User defined data. Typically used with `cloud-init`. + - Pass your cloud-init script here as a string + required: false + + region: + description: + - Scaleway compute zone + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = ''' +- name: Update the cloud-init + scaleway_user_data: + server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' + region: ams1 + user_data: + cloud-init: 'final_message: "Hello World!"' +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway + + +def patch_user_data(compute_api, server_id, key, value): + compute_api.module.debug("Starting patching user_data attributes") + + path = "servers/%s/user_data/%s" % (server_id, key) + response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"}) + if not response.ok: + msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) + compute_api.module.fail_json(msg=msg) + + return response + + +def delete_user_data(compute_api, server_id, key): + compute_api.module.debug("Starting deleting user_data attributes: %s" % key) + + response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key)) + + if not response.ok: + msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body + compute_api.module.fail_json(msg=msg) + + return response + + +def get_user_data(compute_api, server_id, key): + compute_api.module.debug("Starting patching user_data attributes") + + path = "servers/%s/user_data/%s" % (server_id, key) + response = compute_api.get(path=path) + if not response.ok: + msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) + compute_api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + server_id = module.params["server_id"] + user_data = module.params["user_data"] + changed = False + + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + compute_api = Scaleway(module=module) + + user_data_list = compute_api.get(path="servers/%s/user_data" % server_id) + if not user_data_list.ok: + msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body + compute_api.module.fail_json(msg=msg) + + present_user_data_keys = user_data_list.json["user_data"] + present_user_data = dict( + (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key)) + for key in present_user_data_keys + ) + + if present_user_data == user_data: + module.exit_json(changed=changed, msg=user_data_list.json) + + # First we remove keys that are not defined in the wished user_data + for key in present_user_data: + if key not in user_data: + + changed = True + if compute_api.module.check_mode: + module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) + + delete_user_data(compute_api=compute_api, server_id=server_id, key=key) + + # Then we patch keys that are different + for key, value in user_data.items(): + if key not in present_user_data or user_data[key] != present_user_data[key]: + + changed = True + if compute_api.module.check_mode: + module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) + + patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value) + + module.exit_json(changed=changed, msg=user_data) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + user_data=dict(type="dict"), + server_id=dict(required=True), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_volume.py b/plugins/modules/cloud/scaleway/scaleway_volume.py new file mode 100644 index 0000000000..dfaacee7c3 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_volume.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# +# Scaleway volumes management module +# +# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com). +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: scaleway_volume +short_description: Scaleway volumes management module +author: Henryk Konsek (@hekonsek) +description: + - This module manages volumes on Scaleway account + U(https://developer.scaleway.com) +extends_documentation_fragment: +- community.general.scaleway + + +options: + state: + description: + - Indicate desired state of the volume. + default: present + choices: + - present + - absent + region: + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + name: + description: + - Name used to identify the volume. + required: true + organization: + description: + - ScaleWay organization ID to which volume belongs. + size: + description: + - Size of the volume in bytes. + volume_type: + description: + - Type of the volume (for example 'l_ssd'). +''' + +EXAMPLES = ''' + - name: Create 10GB volume + scaleway_volume: + name: my-volume + state: present + region: par1 + organization: "{{ scw_org }}" + "size": 10000000000 + volume_type: l_ssd + register: server_creation_check_task + + - name: Make sure volume deleted + scaleway_volume: + name: my-volume + state: absent + region: par1 + +''' + +RETURN = ''' +data: + description: This is only present when C(state=present) + returned: when C(state=present) + type: dict + sample: { + "volume": { + "export_uri": null, + "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd", + "name": "volume-0-3", + "organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a", + "server": null, + "size": 10000000000, + "volume_type": "l_ssd" + } +} +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def core(module): + state = module.params['state'] + name = module.params['name'] + organization = module.params['organization'] + size = module.params['size'] + volume_type = module.params['volume_type'] + + account_api = Scaleway(module) + response = account_api.get('volumes') + status_code = response.status_code + volumes_json = response.json + + if not response.ok: + module.fail_json(msg='Error getting volume [{0}: {1}]'.format( + status_code, response.json['message'])) + + volumeByName = None + for volume in volumes_json['volumes']: + if volume['organization'] == organization and volume['name'] == name: + volumeByName = volume + + if state in ('present',): + if volumeByName is not None: + module.exit_json(changed=False) + + payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type} + + response = account_api.post('/volumes', payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error creating volume [{0}: {1}]'.format( + response.status_code, response.json)) + + elif state in ('absent',): + if volumeByName is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + response = account_api.delete('/volumes/' + volumeByName['id']) + if response.status_code == 204: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error deleting volume [{0}: {1}]'.format( + response.status_code, response.json)) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + name=dict(required=True), + size=dict(type='int'), + organization=dict(), + volume_type=dict(), + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_volume_facts.py b/plugins/modules/cloud/scaleway/scaleway_volume_facts.py new file mode 100644 index 0000000000..05de332ab5 --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_volume_facts.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_volume_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(scaleway_volume_info) instead. +short_description: Gather facts about the Scaleway volumes available. +description: + - Gather facts about the Scaleway volumes available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway volumes facts + scaleway_volume_facts: + region: par1 +''' + +RETURN = r''' +--- +scaleway_volume_facts: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_volume_facts": [ + { + "creation_date": "2018-08-14T20:56:24.949660+00:00", + "export_uri": null, + "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", + "modification_date": "2018-08-14T20:56:24.949660+00:00", + "name": "test-volume", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": null, + "size": 50000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, + SCALEWAY_LOCATION) + + +class ScalewayVolumeFacts(Scaleway): + + def __init__(self, module): + super(ScalewayVolumeFacts, self).__init__(module) + self.name = 'volumes' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + ansible_facts={'scaleway_volume_facts': ScalewayVolumeFacts(module).get_resources()} + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/plugins/modules/cloud/scaleway/scaleway_volume_info.py new file mode 100644 index 0000000000..6d6c53c42f --- /dev/null +++ b/plugins/modules/cloud/scaleway/scaleway_volume_info.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: scaleway_volume_info +short_description: Gather information about the Scaleway volumes available. +description: + - Gather information about the Scaleway volumes available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@sieben)" +extends_documentation_fragment: +- community.general.scaleway + +options: + region: + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway volumes information + scaleway_volume_info: + region: par1 + register: result + +- debug: + msg: "{{ result.scaleway_volume_info }}" +''' + +RETURN = r''' +--- +scaleway_volume_info: + description: Response from Scaleway API + returned: success + type: complex + sample: + "scaleway_volume_info": [ + { + "creation_date": "2018-08-14T20:56:24.949660+00:00", + "export_uri": null, + "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", + "modification_date": "2018-08-14T20:56:24.949660+00:00", + "name": "test-volume", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": null, + "size": 50000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, + SCALEWAY_LOCATION) + + +class ScalewayVolumeInfo(Scaleway): + + def __init__(self, module): + super(ScalewayVolumeInfo, self).__init__(module) + self.name = 'volumes' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=SCALEWAY_LOCATION.keys()), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_volume_info=ScalewayVolumeInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/smartos/imgadm.py b/plugins/modules/cloud/smartos/imgadm.py new file mode 100644 index 0000000000..98dea6c71e --- /dev/null +++ b/plugins/modules/cloud/smartos/imgadm.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, 2017 Jasper Lievisse Adriaanse +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: imgadm +short_description: Manage SmartOS images +description: + - Manage SmartOS virtual machine images through imgadm(1M) +author: Jasper Lievisse Adriaanse (@jasperla) +options: + force: + required: false + type: bool + description: + - Force a given operation (where supported by imgadm(1M)). + pool: + required: false + default: zones + description: + - zpool to import to or delete images from. + source: + required: false + description: + - URI for the image source. + state: + required: true + choices: [ present, absent, deleted, imported, updated, vacuumed ] + description: + - State the object operated on should be in. C(imported) is an alias for + for C(present) and C(deleted) for C(absent). When set to C(vacuumed) + and C(uuid) to C(*), it will remove all unused images. + type: + required: false + choices: [ imgapi, docker, dsapi ] + default: imgapi + description: + - Type for image sources. + uuid: + required: false + description: + - Image UUID. Can either be a full UUID or C(*) for all images. +requirements: + - python >= 2.6 +''' + +EXAMPLES = ''' +- name: Import an image + imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: imported + +- name: Delete an image + imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: deleted + +- name: Update all images + imgadm: + uuid: '*' + state: updated + +- name: Update a single image + imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: updated + +- name: Add a source + imgadm: + source: 'https://datasets.project-fifo.net' + state: present + +- name: Add a Docker source + imgadm: + source: 'https://docker.io' + type: docker + state: present + +- name: Remove a source + imgadm: + source: 'https://docker.io' + state: absent +''' + +RETURN = ''' +source: + description: Source that is managed. + returned: When not managing an image. + type: str + sample: https://datasets.project-fifo.net +uuid: + description: UUID for an image operated on. + returned: When not managing an image source. + type: str + sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 +state: + description: State of the target, after execution. + returned: success + type: str + sample: 'present' +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + +# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a +# -E option to return any errors in JSON, the generated JSON does not play well +# with the JSON parsers of Python. The returned message contains '\n' as part of +# the stacktrace, which breaks the parsers. + + +class Imgadm(object): + def __init__(self, module): + self.module = module + self.params = module.params + self.cmd = module.get_bin_path('imgadm', required=True) + self.changed = False + self.uuid = module.params['uuid'] + + # Since there are a number of (natural) aliases, prevent having to look + # them up everytime we operate on `state`. + if self.params['state'] in ['present', 'imported', 'updated']: + self.present = True + else: + self.present = False + + # Perform basic UUID validation upfront. + if self.uuid and self.uuid != '*': + if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE): + module.fail_json(msg='Provided value for uuid option is not a valid UUID.') + + # Helper method to massage stderr + def errmsg(self, stderr): + match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr) + if match: + return match.groups()[0] + else: + return 'Unexpected failure' + + def update_images(self): + if self.uuid == '*': + cmd = '{0} update'.format(self.cmd) + else: + cmd = '{0} update {1}'.format(self.cmd, self.uuid) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr))) + + # There is no feedback from imgadm(1M) to determine if anything + # was actually changed. So treat this as an 'always-changes' operation. + # Note that 'imgadm -v' produces unparseable JSON... + self.changed = True + + def manage_sources(self): + force = self.params['force'] + source = self.params['source'] + imgtype = self.params['type'] + + cmd = '{0} sources'.format(self.cmd) + + if force: + cmd += ' -f' + + if self.present: + cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype) + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr))) + + # Check the various responses. + # Note that trying to add a source with the wrong type is handled + # above as it results in a non-zero status. + + regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source) + if re.match(regex, stdout): + self.changed = False + + regex = 'Added "%s" image source "%s"' % (imgtype, source) + if re.match(regex, stdout): + self.changed = True + else: + # Type is ignored by imgadm(1M) here + cmd += ' -d %s' % source + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr))) + + regex = 'Do not have image source "%s", no change' % source + if re.match(regex, stdout): + self.changed = False + + regex = 'Deleted ".*" image source "%s"' % source + if re.match(regex, stdout): + self.changed = True + + def manage_images(self): + pool = self.params['pool'] + state = self.params['state'] + + if state == 'vacuumed': + # Unconditionally pass '--force', otherwise we're prompted with 'y/N' + cmd = '{0} vacuum -f'.format(self.cmd) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr))) + else: + if stdout == '': + self.changed = False + else: + self.changed = True + if self.present: + cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr))) + + regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid) + if re.match(regex, stdout): + self.changed = False + + regex = '.*ActiveImageNotFound.*' + if re.match(regex, stderr): + self.changed = False + + regex = 'Imported image {0}.*'.format(self.uuid) + if re.match(regex, stdout.splitlines()[-1]): + self.changed = True + else: + cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + regex = '.*ImageNotInstalled.*' + if re.match(regex, stderr): + # Even if the 'rc' was non-zero (3), we handled the situation + # in order to determine if there was a change. + self.changed = False + + regex = 'Deleted image {0}'.format(self.uuid) + if re.match(regex, stdout): + self.changed = True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + force=dict(default=None, type='bool'), + pool=dict(default='zones'), + source=dict(default=None), + state=dict(default=None, required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']), + type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']), + uuid=dict(default=None) + ), + # This module relies largely on imgadm(1M) to enforce idempotency, which does not + # provide a "noop" (or equivalent) mode to do a dry-run. + supports_check_mode=False, + ) + + imgadm = Imgadm(module) + + uuid = module.params['uuid'] + source = module.params['source'] + state = module.params['state'] + + result = {'state': state} + + # Either manage sources or images. + if source: + result['source'] = source + imgadm.manage_sources() + else: + result['uuid'] = uuid + + if state == 'updated': + imgadm.update_images() + else: + # Make sure operate on a single image for the following actions + if (uuid == '*') and (state != 'vacuumed'): + module.fail_json(msg='Can only specify uuid as "*" when updating image(s)') + imgadm.manage_images() + + result['changed'] = imgadm.changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/smartos/nictagadm.py b/plugins/modules/cloud/smartos/nictagadm.py new file mode 100644 index 0000000000..9bf4373a45 --- /dev/null +++ b/plugins/modules/cloud/smartos/nictagadm.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Bruce Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: nictagadm +short_description: Manage nic tags on SmartOS systems +description: + - Create or delete nic tags on SmartOS systems. +author: +- Bruce Smith (@SmithX10) +options: + name: + description: + - Name of the nic tag. + required: true + type: str + mac: + description: + - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub). + - Parameters I(mac) and I(etherstub) are mutually exclusive. + type: str + etherstub: + description: + - Specifies that the nic tag will be attached to a created I(etherstub). + - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac). + type: bool + default: no + mtu: + description: + - Specifies the size of the I(mtu) of the desired nic tag. + - Parameters I(mtu) and I(etherstub) are mutually exclusive. + type: int + force: + description: + - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. + type: bool + default: no + state: + description: + - Create or delete a SmartOS nic tag. + type: str + choices: [ absent, present ] + default: present +''' + +EXAMPLES = r''' +- name: Create 'storage0' on '00:1b:21:a3:f5:4d' + nictagadm: + name: storage0 + mac: 00:1b:21:a3:f5:4d + mtu: 9000 + state: present + +- name: Remove 'storage0' nic tag + nictagadm: + name: storage0 + state: absent +''' + +RETURN = r''' +name: + description: nic tag name + returned: always + type: str + sample: storage0 +mac: + description: MAC Address that the nic tag was attached to. + returned: always + type: str + sample: 00:1b:21:a3:f5:4d +etherstub: + description: specifies if the nic tag will create and attach to an etherstub. + returned: always + type: bool + sample: False +mtu: + description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. + returned: always + type: int + sample: 1500 +force: + description: Shows if -f was used during the deletion of a nic tag + returned: always + type: bool + sample: False +state: + description: state of the target + returned: always + type: str + sample: present +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.network import is_mac + + +class NicTag(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.mac = module.params['mac'] + self.etherstub = module.params['etherstub'] + self.mtu = module.params['mtu'] + self.force = module.params['force'] + self.state = module.params['state'] + + self.nictagadm_bin = self.module.get_bin_path('nictagadm', True) + + def is_valid_mac(self): + return is_mac(self.mac.lower()) + + def nictag_exists(self): + cmd = [self.nictagadm_bin] + + cmd.append('exists') + cmd.append(self.name) + + (rc, dummy, dummy) = self.module.run_command(cmd) + + return rc == 0 + + def add_nictag(self): + cmd = [self.nictagadm_bin] + + cmd.append('-v') + cmd.append('add') + + if self.etherstub: + cmd.append('-l') + + if self.mtu: + cmd.append('-p') + cmd.append('mtu=' + str(self.mtu)) + + if self.mac: + cmd.append('-p') + cmd.append('mac=' + str(self.mac)) + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_nictag(self): + cmd = [self.nictagadm_bin] + + cmd.append('-v') + cmd.append('delete') + + if self.force: + cmd.append('-f') + + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + mac=dict(type='str'), + etherstub=dict(type='bool', default=False), + mtu=dict(type='int'), + force=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + mutually_exclusive=[ + ['etherstub', 'mac'], + ['etherstub', 'mtu'], + ], + required_if=[ + ['etherstub', False, ['name', 'mac']], + ['state', 'absent', ['name', 'force']], + ], + supports_check_mode=True + ) + + nictag = NicTag(module) + + rc = None + out = '' + err = '' + result = dict( + changed=False, + etherstub=nictag.etherstub, + force=nictag.force, + name=nictag.name, + mac=nictag.mac, + mtu=nictag.mtu, + state=nictag.state, + ) + + if not nictag.is_valid_mac(): + module.fail_json(msg='Invalid MAC Address Value', + name=nictag.name, + mac=nictag.mac, + etherstub=nictag.etherstub) + + if nictag.state == 'absent': + if nictag.nictag_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nictag.delete_nictag() + if rc != 0: + module.fail_json(name=nictag.name, msg=err, rc=rc) + elif nictag.state == 'present': + if not nictag.nictag_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nictag.add_nictag() + if rc is not None and rc != 0: + module.fail_json(name=nictag.name, msg=err, rc=rc) + + if rc is not None: + result['changed'] = True + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/smartos/smartos_image_facts.py b/plugins/modules/cloud/smartos/smartos_image_facts.py new file mode 120000 index 0000000000..d7206ed938 --- /dev/null +++ b/plugins/modules/cloud/smartos/smartos_image_facts.py @@ -0,0 +1 @@ +smartos_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/cloud/smartos/smartos_image_info.py new file mode 100644 index 0000000000..17df4a115f --- /dev/null +++ b/plugins/modules/cloud/smartos/smartos_image_info.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: smartos_image_info +short_description: Get SmartOS image details. +description: + - Retrieve information about all installed images on SmartOS. + - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(smartos_image_info) module no longer returns C(ansible_facts)! +author: Adam Števko (@xen0l) +options: + filters: + description: + - Criteria for selecting image. Can be any value from image + manifest and 'published_date', 'published', 'source', 'clones', + and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm) + under 'imgadm list'. +''' + +EXAMPLES = ''' +# Return information about all installed images. +- smartos_image_info: + register: result + +# Return all private active Linux images. +- smartos_image_info: filters="os=linux state=active public=false" + register: result + +# Show, how many clones does every image have. +- smartos_image_info: + register: result + +- debug: msg="{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} + has {{ result.smartos_images[item]['clones'] }} VM(s)" + with_items: "{{ result.smartos_images.keys() | list }}" + +# When the module is called as smartos_image_facts, return values are published +# in ansible_facts['smartos_images'] and can be used as follows. +# Note that this is deprecated and will stop working in Ansible 2.13. +- debug: msg="{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} + has {{ smartos_images[item]['clones'] }} VM(s)" + with_items: "{{ smartos_images.keys() | list }}" +''' + +RETURN = ''' +''' + +import json +from ansible.module_utils.basic import AnsibleModule + + +class ImageFacts(object): + + def __init__(self, module): + self.module = module + + self.filters = module.params['filters'] + + def return_all_installed_images(self): + cmd = [self.module.get_bin_path('imgadm')] + + cmd.append('list') + cmd.append('-j') + + if self.filters: + cmd.append(self.filters) + + (rc, out, err) = self.module.run_command(cmd) + + if rc != 0: + self.module.exit_json( + msg='Failed to get all installed images', stderr=err) + + images = json.loads(out) + + result = {} + for image in images: + result[image['manifest']['uuid']] = image['manifest'] + # Merge additional attributes with the image manifest. + for attrib in ['clones', 'source', 'zpool']: + result[image['manifest']['uuid']][attrib] = image[attrib] + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filters=dict(default=None), + ), + supports_check_mode=False, + ) + is_old_facts = module._name == 'smartos_image_facts' + if is_old_facts: + module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + image_facts = ImageFacts(module) + + data = dict(smartos_images=image_facts.return_all_installed_images()) + + if is_old_facts: + module.exit_json(ansible_facts=data) + else: + module.exit_json(**data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/smartos/vmadm.py b/plugins/modules/cloud/smartos/vmadm.py new file mode 100644 index 0000000000..fd6f98c349 --- /dev/null +++ b/plugins/modules/cloud/smartos/vmadm.py @@ -0,0 +1,748 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Jasper Lievisse Adriaanse +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: vmadm +short_description: Manage SmartOS virtual machines and zones. +description: + - Manage SmartOS virtual machines through vmadm(1M). +author: Jasper Lievisse Adriaanse (@jasperla) +options: + archive_on_delete: + required: false + description: + - When enabled, the zone dataset will be mounted on C(/zones/archive) + upon removal. + autoboot: + required: false + description: + - Whether or not a VM is booted when the system is rebooted. + brand: + required: true + choices: [ joyent, joyent-minimal, lx, kvm, bhyve ] + default: joyent + description: + - Type of virtual machine. The C(bhyve) option was added in Ansible 2.10. + boot: + required: false + description: + - Set the boot order for KVM VMs. + cpu_cap: + required: false + description: + - Sets a limit on the amount of CPU time that can be used by a VM. + Use C(0) for no cap. + cpu_shares: + required: false + description: + - Sets a limit on the number of fair share scheduler (FSS) CPU shares for + a VM. This limit is relative to all other VMs on the system. + cpu_type: + required: false + choices: [ qemu64, host ] + default: qemu64 + description: + - Control the type of virtual CPU exposed to KVM VMs. + customer_metadata: + required: false + description: + - Metadata to be set and associated with this VM, this contain customer + modifiable keys. + delegate_dataset: + required: false + description: + - Whether to delegate a ZFS dataset to an OS VM. + disk_driver: + required: false + description: + - Default value for a virtual disk model for KVM guests. + disks: + required: false + description: + - A list of disks to add, valid properties are documented in vmadm(1M). + dns_domain: + required: false + description: + - Domain value for C(/etc/hosts). + docker: + required: false + description: + - Docker images need this flag enabled along with the I(brand) set to C(lx). + filesystems: + required: false + description: + - Mount additional filesystems into an OS VM. + firewall_enabled: + required: false + description: + - Enables the firewall, allowing fwadm(1M) rules to be applied. + force: + required: false + description: + - Force a particular action (i.e. stop or delete a VM). + fs_allowed: + required: false + description: + - Comma separated list of filesystem types this zone is allowed to mount. + hostname: + required: false + description: + - Zone/VM hostname. + image_uuid: + required: false + description: + - Image UUID. + indestructible_delegated: + required: false + description: + - Adds an C(@indestructible) snapshot to delegated datasets. + indestructible_zoneroot: + required: false + description: + - Adds an C(@indestructible) snapshot to zoneroot. + internal_metadata: + required: false + description: + - Metadata to be set and associated with this VM, this contains operator + generated keys. + internal_metadata_namespace: + required: false + description: + - List of namespaces to be set as I(internal_metadata-only); these namespaces + will come from I(internal_metadata) rather than I(customer_metadata). + kernel_version: + required: false + description: + - Kernel version to emulate for LX VMs. + limit_priv: + required: false + description: + - Set (comma separated) list of privileges the zone is allowed to use. + maintain_resolvers: + required: false + description: + - Resolvers in C(/etc/resolv.conf) will be updated when updating + the I(resolvers) property. + max_locked_memory: + required: false + description: + - Total amount of memory (in MiBs) on the host that can be locked by this VM. + max_lwps: + required: false + description: + - Maximum number of lightweight processes this VM is allowed to have running. + max_physical_memory: + required: false + description: + - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use. + max_swap: + required: false + description: + - Maximum amount of virtual memory (in MiBs) the VM is allowed to use. + mdata_exec_timeout: + required: false + description: + - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service + that runs user-scripts in the zone. + name: + required: false + aliases: [ alias ] + description: + - Name of the VM. vmadm(1M) uses this as an optional name. + nic_driver: + required: false + description: + - Default value for a virtual NIC model for KVM guests. + nics: + required: false + description: + - A list of nics to add, valid properties are documented in vmadm(1M). + nowait: + required: false + description: + - Consider the provisioning complete when the VM first starts, rather than + when the VM has rebooted. + qemu_opts: + required: false + description: + - Additional qemu arguments for KVM guests. This overwrites the default arguments + provided by vmadm(1M) and should only be used for debugging. + qemu_extra_opts: + required: false + description: + - Additional qemu cmdline arguments for KVM guests. + quota: + required: false + description: + - Quota on zone filesystems (in MiBs). + ram: + required: false + description: + - Amount of virtual RAM for a KVM guest (in MiBs). + resolvers: + required: false + description: + - List of resolvers to be put into C(/etc/resolv.conf). + routes: + required: false + description: + - Dictionary that maps destinations to gateways, these will be set as static + routes in the VM. + spice_opts: + required: false + description: + - Addition options for SPICE-enabled KVM VMs. + spice_password: + required: false + description: + - Password required to connect to SPICE. By default no password is set. + Please note this can be read from the Global Zone. + state: + required: true + choices: [ present, absent, stopped, restarted ] + description: + - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted) + operate on a VM that is currently provisioned. C(present) means that the VM will be + created if it was absent, and that it will be in a running state. C(absent) will + shutdown the zone before removing it. + C(stopped) means the zone will be created if it doesn't exist already, before shutting + it down. + tmpfs: + required: false + description: + - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem. + uuid: + required: false + description: + - UUID of the VM. Can either be a full UUID or C(*) for all VMs. + vcpus: + required: false + description: + - Number of virtual CPUs for a KVM guest. + vga: + required: false + description: + - Specify VGA emulation used by KVM VMs. + virtio_txburst: + required: false + description: + - Number of packets that can be sent in a single flush of the tx queue of virtio NICs. + virtio_txtimer: + required: false + description: + - Timeout (in nanoseconds) for the TX timer of virtio NICs. + vnc_password: + required: false + description: + - Password required to connect to VNC. By default no password is set. + Please note this can be read from the Global Zone. + vnc_port: + required: false + description: + - TCP port to listen of the VNC server. Or set C(0) for random, + or C(-1) to disable. + zfs_data_compression: + required: false + description: + - Specifies compression algorithm used for this VMs data dataset. This option + only has effect on delegated datasets. + zfs_data_recsize: + required: false + description: + - Suggested block size (power of 2) for files in the delegated dataset's filesystem. + zfs_filesystem_limit: + required: false + description: + - Maximum number of filesystems the VM can have. + zfs_io_priority: + required: false + description: + - IO throttle priority value relative to other VMs. + zfs_root_compression: + required: false + description: + - Specifies compression algorithm used for this VMs root dataset. This option + only has effect on the zoneroot dataset. + zfs_root_recsize: + required: false + description: + - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem. + zfs_snapshot_limit: + required: false + description: + - Number of snapshots the VM can have. + zpool: + required: false + description: + - ZFS pool the VM's zone dataset will be created in. +requirements: + - python >= 2.6 +''' + +EXAMPLES = ''' +- name: create SmartOS zone + vmadm: + brand: joyent + state: present + alias: fw_zone + image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5 + firewall_enabled: yes + indestructible_zoneroot: yes + nics: + - nic_tag: admin + ip: dhcp + primary: true + internal_metadata: + root_pw: 'secret' + quota: 1 + +- name: Delete a zone + vmadm: + alias: test_zone + state: deleted + +- name: Stop all zones + vmadm: + uuid: '*' + state: stopped +''' + +RETURN = ''' +uuid: + description: UUID of the managed VM. + returned: always + type: str + sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33' +alias: + description: Alias of the managed VM. + returned: When addressing a VM by alias. + type: str + sample: 'dns-zone' +state: + description: State of the target, after execution. + returned: success + type: str + sample: 'running' +''' + +import json +import os +import re +import tempfile +import traceback + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +# While vmadm(1M) supports a -E option to return any errors in JSON, the +# generated JSON does not play well with the JSON parsers of Python. +# The returned message contains '\n' as part of the stacktrace, +# which breaks the parsers. + + +def get_vm_prop(module, uuid, prop): + # Lookup a property for the given VM. + # Returns the property, or None if not found. + cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid) + + (rc, stdout, stderr) = module.run_command(cmd) + + if rc != 0: + module.fail_json( + msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr) + + try: + stdout_json = json.loads(stdout) + except Exception as e: + module.fail_json( + msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop), + details=to_native(e), exception=traceback.format_exc()) + + if len(stdout_json) > 0 and prop in stdout_json[0]: + return stdout_json[0][prop] + else: + return None + + +def get_vm_uuid(module, alias): + # Lookup the uuid that goes with the given alias. + # Returns the uuid or '' if not found. + cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias) + + (rc, stdout, stderr) = module.run_command(cmd) + + if rc != 0: + module.fail_json( + msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr) + + # If no VM was found matching the given alias, we get back an empty array. + # That is not an error condition as we might be explicitly checking it's + # absence. + if stdout.strip() == '[]': + return None + else: + try: + stdout_json = json.loads(stdout) + except Exception as e: + module.fail_json( + msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias), + details=to_native(e), exception=traceback.format_exc()) + + if len(stdout_json) > 0 and 'uuid' in stdout_json[0]: + return stdout_json[0]['uuid'] + + +def get_all_vm_uuids(module): + # Retrieve the UUIDs for all VMs. + cmd = '{0} lookup -j -o uuid'.format(module.vmadm) + + (rc, stdout, stderr) = module.run_command(cmd) + + if rc != 0: + module.fail_json(msg='Failed to get VMs list', exception=stderr) + + try: + stdout_json = json.loads(stdout) + return [v['uuid'] for v in stdout_json] + except Exception as e: + module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e), + exception=traceback.format_exc()) + + +def new_vm(module, uuid, vm_state): + payload_file = create_payload(module, uuid) + + (rc, stdout, stderr) = vmadm_create_vm(module, payload_file) + + if rc != 0: + changed = False + module.fail_json(msg='Could not create VM', exception=stderr) + else: + changed = True + # 'vmadm create' returns all output to stderr... + match = re.match('Successfully created VM (.*)', stderr) + if match: + vm_uuid = match.groups()[0] + if not is_valid_uuid(vm_uuid): + module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid)) + else: + module.fail_json(msg='Could not retrieve UUID of newly created(?) VM') + + # Now that the VM is created, ensure it is in the desired state (if not 'running') + if vm_state != 'running': + ret = set_vm_state(module, vm_uuid, vm_state) + if not ret: + module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state)) + + try: + os.unlink(payload_file) + except Exception as e: + # Since the payload may contain sensitive information, fail hard + # if we cannot remove the file so the operator knows about it. + module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)), + exception=traceback.format_exc()) + + return changed, vm_uuid + + +def vmadm_create_vm(module, payload_file): + # Create a new VM using the provided payload. + cmd = '{0} create -f {1}'.format(module.vmadm, payload_file) + + return module.run_command(cmd) + + +def set_vm_state(module, vm_uuid, vm_state): + p = module.params + + # Check if the VM is already in the desired state. + state = get_vm_prop(module, vm_uuid, 'state') + if state and (state == vm_state): + return None + + # Lookup table for the state to be in, and which command to use for that. + # vm_state: [vmadm commandm, forceable?] + cmds = { + 'stopped': ['stop', True], + 'running': ['start', False], + 'deleted': ['delete', True], + 'rebooted': ['reboot', False] + } + + if p['force'] and cmds[vm_state][1]: + force = '-F' + else: + force = '' + + cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid) + + (rc, stdout, stderr) = module.run_command(cmd) + + match = re.match('^Successfully.*', stderr) + if match: + return True + else: + return False + + +def create_payload(module, uuid): + # Create the JSON payload (vmdef) and return the filename. + + p = module.params + + # Filter out the few options that are not valid VM properties. + module_options = ['debug', 'force', 'state'] + vmattrs = filter(lambda prop: prop not in module_options, p) + + vmdef = {} + + for attr in vmattrs: + if p[attr]: + vmdef[attr] = p[attr] + + try: + vmdef_json = json.dumps(vmdef) + except Exception as e: + module.fail_json( + msg='Could not create valid JSON payload', exception=traceback.format_exc()) + + # Create the temporary file that contains our payload, and set tight + # permissions for it may container sensitive information. + try: + # XXX: When there's a way to get the current ansible temporary directory + # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain + # the payload (thus removing the `save_payload` option). + fname = tempfile.mkstemp()[1] + os.chmod(fname, 0o400) + with open(fname, 'w') as fh: + fh.write(vmdef_json) + except Exception as e: + module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc()) + + return fname + + +def vm_state_transition(module, uuid, vm_state): + ret = set_vm_state(module, uuid, vm_state) + + # Whether the VM changed state. + if ret is None: + return False + elif ret: + return True + else: + module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state)) + + +def is_valid_uuid(uuid): + if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE): + return True + else: + return False + + +def validate_uuids(module): + # Perform basic UUID validation. + failed = [] + + for u in [['uuid', module.params['uuid']], + ['image_uuid', module.params['image_uuid']]]: + if u[1] and u[1] != '*': + if not is_valid_uuid(u[1]): + failed.append(u[0]) + + if len(failed) > 0: + module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed))) + + +def manage_all_vms(module, vm_state): + # Handle operations for all VMs, which can by definition only + # be state transitions. + state = module.params['state'] + + if state == 'created': + module.fail_json(msg='State "created" is only valid for tasks with a single VM') + + # If any of the VMs has a change, the task as a whole has a change. + any_changed = False + + # First get all VM uuids and for each check their state, and adjust it if needed. + for uuid in get_all_vm_uuids(module): + current_vm_state = get_vm_prop(module, uuid, 'state') + if not current_vm_state and vm_state == 'deleted': + any_changed = False + else: + if module.check_mode: + if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): + any_changed = True + else: + any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed) + + return any_changed + + +def main(): + # In order to reduce the clutter and boilerplate for trivial options, + # abstract the vmadm properties and build the dict of arguments later. + # Dict of all options that are simple to define based on their type. + # They're not required and have a default of None. + properties = { + 'str': [ + 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname', + 'image_uuid', 'internal_metadata_namespace', 'kernel_version', + 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts', + 'spice_opts', 'uuid', 'vga', 'zfs_data_compression', + 'zfs_root_compression', 'zpool' + ], + 'bool': [ + 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset', + 'docker', 'firewall_enabled', 'force', 'indestructible_delegated', + 'indestructible_zoneroot', 'maintain_resolvers', 'nowait' + ], + 'int': [ + 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps', + 'max_physical_memory', 'max_swap', 'mdata_exec_timeout', + 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst', + 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize', + 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize', + 'zfs_snapshot_limit' + ], + 'dict': ['customer_metadata', 'internal_metadata', 'routes'], + 'list': ['disks', 'nics', 'resolvers', 'filesystems'] + } + + # Start with the options that are not as trivial as those above. + options = dict( + state=dict( + default='running', + type='str', + choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted'] + ), + name=dict( + default=None, type='str', + aliases=['alias'] + ), + brand=dict( + default='joyent', + type='str', + choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve'] + ), + cpu_type=dict( + default='qemu64', + type='str', + choices=['host', 'qemu64'] + ), + # Regular strings, however these require additional options. + spice_password=dict(type='str', no_log=True), + vnc_password=dict(type='str', no_log=True), + ) + + # Add our 'simple' options to options dict. + for type in properties: + for p in properties[type]: + option = dict(default=None, type=type) + options[p] = option + + module = AnsibleModule( + argument_spec=options, + supports_check_mode=True, + required_one_of=[['name', 'uuid']] + ) + + module.vmadm = module.get_bin_path('vmadm', required=True) + + p = module.params + uuid = p['uuid'] + state = p['state'] + + # Translate the state parameter into something we can use later on. + if state in ['present', 'running']: + vm_state = 'running' + elif state in ['stopped', 'created']: + vm_state = 'stopped' + elif state in ['absent', 'deleted']: + vm_state = 'deleted' + elif state in ['restarted', 'rebooted']: + vm_state = 'rebooted' + + result = {'state': state} + + # While it's possible to refer to a given VM by it's `alias`, it's easier + # to operate on VMs by their UUID. So if we're not given a `uuid`, look + # it up. + if not uuid: + uuid = get_vm_uuid(module, p['name']) + # Bit of a chicken and egg problem here for VMs with state == deleted. + # If they're going to be removed in this play, we have to lookup the + # uuid. If they're already deleted there's nothing to lookup. + # So if state == deleted and get_vm_uuid() returned '', the VM is already + # deleted and there's nothing else to do. + if uuid is None and vm_state == 'deleted': + result['name'] = p['name'] + module.exit_json(**result) + + validate_uuids(module) + + if p['name']: + result['name'] = p['name'] + result['uuid'] = uuid + + if uuid == '*': + result['changed'] = manage_all_vms(module, vm_state) + module.exit_json(**result) + + # The general flow is as follows: + # - first the current state of the VM is obtained by it's UUID. + # - If the state was not found and the desired state is 'deleted', return. + # - If the state was not found, it means the VM has to be created. + # Subsequently the VM will be set to the desired state (i.e. stopped) + # - Otherwise, it means the VM exists already and we operate on it's + # state (i.e. reboot it.) + # + # In the future it should be possible to query the VM for a particular + # property as a valid state (i.e. queried) so the result can be + # registered. + # Also, VMs should be able to get their properties updated. + # Managing VM snapshots should be part of a standalone module. + + # First obtain the VM state to determine what needs to be done with it. + current_vm_state = get_vm_prop(module, uuid, 'state') + + # First handle the case where the VM should be deleted and is not present. + if not current_vm_state and vm_state == 'deleted': + result['changed'] = False + elif module.check_mode: + # Shortcut for check mode, if there is no VM yet, it will need to be created. + # Or, if the VM is not in the desired state yet, it needs to transition. + if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): + result['changed'] = True + else: + result['changed'] = False + + module.exit_json(**result) + # No VM was found that matched the given ID (alias or uuid), so we create it. + elif not current_vm_state: + result['changed'], result['uuid'] = new_vm(module, uuid, vm_state) + else: + # VM was found, operate on its state directly. + result['changed'] = vm_state_transition(module, uuid, vm_state) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/softlayer/sl_vm.py b/plugins/modules/cloud/softlayer/sl_vm.py new file mode 100644 index 0000000000..284a8b7be1 --- /dev/null +++ b/plugins/modules/cloud/softlayer/sl_vm.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: sl_vm +short_description: create or cancel a virtual instance in SoftLayer +description: + - Creates or cancels SoftLayer instances. + - When created, optionally waits for it to be 'running'. +options: + instance_id: + description: + - Instance Id of the virtual instance to perform action option. + hostname: + description: + - Hostname to be provided to a virtual instance. + domain: + description: + - Domain name to be provided to a virtual instance. + datacenter: + description: + - Datacenter for the virtual instance to be deployed. + tags: + description: + - Tag or list of tags to be provided to a virtual instance. + hourly: + description: + - Flag to determine if the instance should be hourly billed. + type: bool + default: 'yes' + private: + description: + - Flag to determine if the instance should be private only. + type: bool + default: 'no' + dedicated: + description: + - Flag to determine if the instance should be deployed in dedicated space. + type: bool + default: 'no' + local_disk: + description: + - Flag to determine if local disk should be used for the new instance. + type: bool + default: 'yes' + cpus: + description: + - Count of cpus to be assigned to new virtual instance. + required: true + memory: + description: + - Amount of memory to be assigned to new virtual instance. + required: true + flavor: + description: + - Specify which SoftLayer flavor template to use instead of cpus and memory. + disks: + description: + - List of disk sizes to be assigned to new virtual instance. + required: true + default: [ 25 ] + os_code: + description: + - OS Code to be used for new virtual instance. + image_id: + description: + - Image Template to be used for new virtual instance. + nic_speed: + description: + - NIC Speed to be assigned to new virtual instance. + default: 10 + public_vlan: + description: + - VLAN by its Id to be assigned to the public NIC. + private_vlan: + description: + - VLAN by its Id to be assigned to the private NIC. + ssh_keys: + description: + - List of ssh keys by their Id to be assigned to a virtual instance. + post_uri: + description: + - URL of a post provisioning script to be loaded and executed on virtual instance. + state: + description: + - Create, or cancel a virtual instance. + - Specify C(present) for create, C(absent) to cancel. + choices: [ absent, present ] + default: present + wait: + description: + - Flag used to wait for active status before returning. + type: bool + default: 'yes' + wait_time: + description: + - Time in seconds before wait returns. + default: 600 +requirements: + - python >= 2.6 + - softlayer >= 4.1.1 +author: +- Matt Colton (@mcltn) +''' + +EXAMPLES = ''' +- name: Build instance + hosts: localhost + gather_facts: no + tasks: + - name: Build instance request + sl_vm: + hostname: instance-1 + domain: anydomain.com + datacenter: dal09 + tags: ansible-module-test + hourly: yes + private: no + dedicated: no + local_disk: yes + cpus: 1 + memory: 1024 + disks: [25] + os_code: UBUNTU_LATEST + wait: no + +- name: Build additional instances + hosts: localhost + gather_facts: no + tasks: + - name: Build instances request + sl_vm: + hostname: "{{ item.hostname }}" + domain: "{{ item.domain }}" + datacenter: "{{ item.datacenter }}" + tags: "{{ item.tags }}" + hourly: "{{ item.hourly }}" + private: "{{ item.private }}" + dedicated: "{{ item.dedicated }}" + local_disk: "{{ item.local_disk }}" + cpus: "{{ item.cpus }}" + memory: "{{ item.memory }}" + disks: "{{ item.disks }}" + os_code: "{{ item.os_code }}" + ssh_keys: "{{ item.ssh_keys }}" + wait: "{{ item.wait }}" + with_items: + - hostname: instance-2 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-slaves + hourly: yes + private: no + dedicated: no + local_disk: yes + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: True + - hostname: instance-3 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-slaves + hourly: yes + private: no + dedicated: no + local_disk: yes + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: yes + +- name: Cancel instances + hosts: localhost + gather_facts: no + tasks: + - name: Cancel by tag + sl_vm: + state: absent + tags: ansible-module-test +''' + +# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. +RETURN = '''# ''' + +import json +import time + +try: + import SoftLayer + from SoftLayer import VSManager + + HAS_SL = True + vsManager = VSManager(SoftLayer.create_client_from_env()) +except ImportError: + HAS_SL = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types + + +# TODO: get this info from API +STATES = ['present', 'absent'] +DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02', + 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01', + 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04', + 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07'] +CPU_SIZES = [1, 2, 4, 8, 16, 32, 56] +MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] +INITIALDISK_SIZES = [25, 100] +LOCALDISK_SIZES = [25, 100, 150, 200, 300] +SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000] +NIC_SPEEDS = [10, 100, 1000] + + +def create_virtual_instance(module): + + instances = vsManager.list_instances( + hostname=module.params.get('hostname'), + domain=module.params.get('domain'), + datacenter=module.params.get('datacenter') + ) + + if instances: + return False, None + + # Check if OS or Image Template is provided (Can't be both, defaults to OS) + if (module.params.get('os_code') is not None and module.params.get('os_code') != ''): + module.params['image_id'] = '' + elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''): + module.params['os_code'] = '' + module.params['disks'] = [] # Blank out disks since it will use the template + else: + return False, None + + tags = module.params.get('tags') + if isinstance(tags, list): + tags = ','.join(map(str, module.params.get('tags'))) + + instance = vsManager.create_instance( + hostname=module.params.get('hostname'), + domain=module.params.get('domain'), + cpus=module.params.get('cpus'), + memory=module.params.get('memory'), + flavor=module.params.get('flavor'), + hourly=module.params.get('hourly'), + datacenter=module.params.get('datacenter'), + os_code=module.params.get('os_code'), + image_id=module.params.get('image_id'), + local_disk=module.params.get('local_disk'), + disks=module.params.get('disks'), + ssh_keys=module.params.get('ssh_keys'), + nic_speed=module.params.get('nic_speed'), + private=module.params.get('private'), + public_vlan=module.params.get('public_vlan'), + private_vlan=module.params.get('private_vlan'), + dedicated=module.params.get('dedicated'), + post_uri=module.params.get('post_uri'), + tags=tags, + ) + + if instance is not None and instance['id'] > 0: + return True, instance + else: + return False, None + + +def wait_for_instance(module, id): + instance = None + completed = False + wait_timeout = time.time() + module.params.get('wait_time') + while not completed and wait_timeout > time.time(): + try: + completed = vsManager.wait_for_ready(id, 10, 2) + if completed: + instance = vsManager.get_instance(id) + except Exception: + completed = False + + return completed, instance + + +def cancel_instance(module): + canceled = True + if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): + tags = module.params.get('tags') + if isinstance(tags, string_types): + tags = [module.params.get('tags')] + instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain')) + for instance in instances: + try: + vsManager.cancel_instance(instance['id']) + except Exception: + canceled = False + elif module.params.get('instance_id') and module.params.get('instance_id') != 0: + try: + vsManager.cancel_instance(instance['id']) + except Exception: + canceled = False + else: + return False, None + + return canceled, None + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + instance_id=dict(type='str'), + hostname=dict(type='str'), + domain=dict(type='str'), + datacenter=dict(type='str', choices=DATACENTERS), + tags=dict(type='str'), + hourly=dict(type='bool', default=True), + private=dict(type='bool', default=False), + dedicated=dict(type='bool', default=False), + local_disk=dict(type='bool', default=True), + cpus=dict(type='int', choices=CPU_SIZES), + memory=dict(type='int', choices=MEMORY_SIZES), + flavor=dict(type='str'), + disks=dict(type='list', default=[25]), + os_code=dict(type='str'), + image_id=dict(type='str'), + nic_speed=dict(type='int', choices=NIC_SPEEDS), + public_vlan=dict(type='str'), + private_vlan=dict(type='str'), + ssh_keys=dict(type='list', default=[]), + post_uri=dict(type='str'), + state=dict(type='str', default='present', choices=STATES), + wait=dict(type='bool', default=True), + wait_time=dict(type='int', default=600), + ) + ) + + if not HAS_SL: + module.fail_json(msg='softlayer python library required for this module') + + if module.params.get('state') == 'absent': + (changed, instance) = cancel_instance(module) + + elif module.params.get('state') == 'present': + (changed, instance) = create_virtual_instance(module) + if module.params.get('wait') is True and instance: + (changed, instance) = wait_for_instance(module, instance['id']) + + module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py new file mode 100644 index 0000000000..562b81dc08 --- /dev/null +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -0,0 +1,1521 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} +DOCUMENTATION = ''' +--- +module: spotinst_aws_elastigroup +short_description: Create, update or delete Spotinst AWS Elastigroups +author: Spotinst (@talzur) +description: + - Can create, update, or delete Spotinst AWS Elastigroups + Launch configuration is part of the elastigroup configuration, + so no additional modules are necessary for handling the launch configuration. + You will have to have a credentials file in this location - /.spotinst/credentials + The credentials file must contain a row that looks like this + token = + Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- +requirements: + - python >= 2.7 + - spotinst_sdk >= 1.0.38 +options: + + credentials_path: + description: + - (String) Optional parameter that allows to set a non-default credentials path. + Default is ~/.spotinst/credentials + + account_id: + description: + - (String) Optional parameter that allows to set an account-id inside the module configuration + By default this is retrieved from the credentials path + + availability_vs_cost: + choices: + - availabilityOriented + - costOriented + - balanced + description: + - (String) The strategy orientation. + required: true + + availability_zones: + description: + - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are + name (String), + subnet_id (String), + placement_group_name (String), + required: true + + block_device_mappings: + description: + - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances; + You can specify virtual devices and EBS volumes.; + '[{"key":"value", "key":"value"}]'; + keys allowed are + device_name (List of Strings), + virtual_name (String), + no_device (String), + ebs (Object, expects the following keys- + delete_on_termination(Boolean), + encrypted(Boolean), + iops (Integer), + snapshot_id(Integer), + volume_type(String), + volume_size(Integer)) + + chef: + description: + - (Object) The Chef integration configuration.; + Expects the following keys - chef_server (String), + organization (String), + user (String), + pem_key (String), + chef_version (String) + + draining_timeout: + description: + - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination. + + ebs_optimized: + description: + - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.; + Note - additional charges will be applied. + type: bool + + ebs_volume_pool: + description: + - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + volume_ids (List of Strings), + device_name (String) + + ecs: + description: + - (Object) The ECS integration configuration.; + Expects the following key - + cluster_name (String) + + + elastic_ips: + description: + - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + + fallback_to_od: + description: + - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead + type: bool + health_check_grace_period: + description: + - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health. + default: 300 + + health_check_unhealthy_duration_before_replacement: + description: + - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy. + + health_check_type: + choices: + - ELB + - HCS + - TARGET_GROUP + - MLB + - EC2 + description: + - (String) The service to use for the health check. + + iam_role_name: + description: + - (String) The instance profile iamRole name + - Only use iam_role_arn, or iam_role_name + + iam_role_arn: + description: + - (String) The instance profile iamRole arn + - Only use iam_role_arn, or iam_role_name + + id: + description: + - (String) The group id if it already exists and you want to update, or delete it. + This will not work unless the uniqueness_by field is set to id. + When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. + + ignore_changes: + choices: + - image_id + - target + description: + - (List of Strings) list of fields on which changes should be ignored when updating + + image_id: + description: + - (String) The image Id used to launch the instance.; + In case of conflict between Instance type and image type, an error will be returned + required: true + + key_pair: + description: + - (String) Specify a Key Pair to attach to the instances + required: true + + kubernetes: + description: + - (Object) The Kubernetes integration configuration. + Expects the following keys - + api_server (String), + token (String) + + lifetime_period: + description: + - (String) lifetime period + + load_balancers: + description: + - (List of Strings) List of classic ELB names + + max_size: + description: + - (Integer) The upper limit number of instances that you can scale up to + required: true + + mesosphere: + description: + - (Object) The Mesosphere integration configuration. + Expects the following key - + api_server (String) + + min_size: + description: + - (Integer) The lower limit number of instances that you can scale down to + required: true + + monitoring: + description: + - (Boolean) Describes whether instance Enhanced Monitoring is enabled + required: true + + name: + description: + - (String) Unique name for elastigroup to be created, updated or deleted + required: true + + network_interfaces: + description: + - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + description (String), + device_index (Integer), + secondary_private_ip_address_count (Integer), + associate_public_ip_address (Boolean), + delete_on_termination (Boolean), + groups (List of Strings), + network_interface_id (String), + private_ip_address (String), + subnet_id (String), + associate_ipv6_address (Boolean), + private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) + + on_demand_count: + description: + - (Integer) Required if risk is not set + - Number of on demand instances to launch. All other instances will be spot instances.; + Either set this parameter or the risk parameter + + on_demand_instance_type: + description: + - (String) On-demand instance type that will be provisioned + required: true + + opsworks: + description: + - (Object) The elastigroup OpsWorks integration configration.; + Expects the following key - + layer_id (String) + + persistence: + description: + - (Object) The Stateful elastigroup configration.; + Accepts the following keys - + should_persist_root_device (Boolean), + should_persist_block_devices (Boolean), + should_persist_private_ip (Boolean) + + product: + choices: + - Linux/UNIX + - SUSE Linux + - Windows + - Linux/UNIX (Amazon VPC) + - SUSE Linux (Amazon VPC) + - Windows + description: + - (String) Operation system type._ + required: true + + rancher: + description: + - (Object) The Rancher integration configuration.; + Expects the following keys - + version (String), + access_key (String), + secret_key (String), + master_host (String) + + right_scale: + description: + - (Object) The Rightscale integration configuration.; + Expects the following keys - + account_id (String), + refresh_token (String) + + risk: + description: + - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100). + + roll_config: + description: + - (Object) Roll configuration.; + If you would like the group to roll after updating, please use this feature. + Accepts the following keys - + batch_size_percentage(Integer, Required), + grace_period - (Integer, Required), + health_check_type(String, Optional) + + scheduled_tasks: + description: + - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + adjustment (Integer), + scale_target_capacity (Integer), + scale_min_capacity (Integer), + scale_max_capacity (Integer), + adjustment_percentage (Integer), + batch_size_percentage (Integer), + cron_expression (String), + frequency (String), + grace_period (Integer), + task_type (String, required), + is_enabled (Boolean) + + security_group_ids: + description: + - (List of Strings) One or more security group IDs. ; + In case of update it will override the existing Security Group with the new given array + required: true + + shutdown_script: + description: + - (String) The Base64-encoded shutdown script that executes prior to instance termination. + Encode before setting. + + signals: + description: + - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup; + keys allowed are - + name (String, required), + timeout (Integer) + + spin_up_time: + description: + - (Integer) spin up time, in seconds, for the instance + + spot_instance_types: + description: + - (List of Strings) Spot instance type that will be provisioned. + required: true + + state: + choices: + - present + - absent + description: + - (String) create or delete the elastigroup + + tags: + description: + - (List of tagKey:tagValue paris) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + + target: + description: + - (Integer) The number of instances to launch + required: true + + target_group_arns: + description: + - (List of Strings) List of target group arns instances should be registered to + + tenancy: + choices: + - default + - dedicated + description: + - (String) dedicated vs shared tenancy + + terminate_at_end_of_billing_hour: + description: + - (Boolean) terminate at the end of billing hour + type: bool + unit: + choices: + - instance + - weight + description: + - (String) The capacity unit to launch instances by. + required: true + + up_scaling_policies: + description: + - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + policy_name (String, required), + namespace (String, required), + metric_name (String, required), + dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), + statistic (String, required) + evaluation_periods (String, required), + period (String, required), + threshold (String, required), + cooldown (String, required), + unit (String, required), + operator (String, required), + action_type (String, required), + adjustment (String), + min_target_capacity (String), + target (String), + maximum (String), + minimum (String) + + + down_scaling_policies: + description: + - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + policy_name (String, required), + namespace (String, required), + metric_name (String, required), + dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), + statistic (String, required), + evaluation_periods (String, required), + period (String, required), + threshold (String, required), + cooldown (String, required), + unit (String, required), + operator (String, required), + action_type (String, required), + adjustment (String), + max_target_capacity (String), + target (String), + maximum (String), + minimum (String) + + target_tracking_policies: + description: + - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + policy_name (String, required), + namespace (String, required), + source (String, required), + metric_name (String, required), + statistic (String, required), + unit (String, required), + cooldown (String, required), + target (String, required) + + uniqueness_by: + choices: + - id + - name + description: + - (String) If your group names are not unique, you may use this feature to update or delete a specific group. + Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. + + + user_data: + description: + - (String) Base64-encoded MIME user data. Encode before setting the value. + + + utilize_reserved_instances: + description: + - (Boolean) In case of any available Reserved Instances, + Elastigroup will utilize your reservations before purchasing Spot instances. + type: bool + + wait_for_instances: + description: + - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin + type: bool + + wait_timeout: + description: + - (Integer) How long the module should wait for instances before failing the action.; + Only works if wait_for_instances is True. + +''' +EXAMPLES = ''' +# Basic configuration YAML example + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: True + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - debug: var=result + +# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: True + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/sda1' + ebs: + volume_size: 100 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: True + wait_timeout: 600 + register: result + + - name: Store private ips to file + shell: echo {{ item.private_ip }}\\n >> list-of-private-ips + with_items: "{{ result.instances }}" + - debug: var=result + +# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id +# In organizations with more than one account, it is required to specify an account_id + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: True + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/xvda' + ebs: + volume_size: 60 + volume_type: gp2 + - device_name: '/dev/xvdb' + ebs: + volume_size: 120 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: True + wait_timeout: 600 + register: result + + - name: Store private ips to file + shell: echo {{ item.private_ip }}\\n >> list-of-private-ips + with_items: "{{ result.instances }}" + - debug: var=result + +# In this example we have set up block device mapping with ephemeral devices + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + block_device_mappings: + - device_name: '/dev/xvda' + virtual_name: ephemeral0 + - device_name: '/dev/xvdb/' + virtual_name: ephemeral1 + monitoring: True + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - debug: var=result + +# In this example we create a basic group configuration with a network interface defined. +# Each network interface must have a device index + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + network_interfaces: + - associate_public_ip_address: true + device_index: 0 + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: True + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - debug: var=result + + +# In this example we create a basic group configuration with a target tracking scaling policy defined + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + account_id: act-92d45673 + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-79da021e + image_id: ami-f173cc91 + fallback_to_od: true + tags: + - Creator: ValueOfCreatorTag + - Environment: ValueOfEnvironmentTag + key_pair: spotinst-labs-oregon + max_size: 10 + min_size: 0 + target: 2 + unit: instance + monitoring: True + name: ansible-group-1 + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-46cdc13d + spot_instance_types: + - c3.large + target_tracking_policies: + - policy_name: target-tracking-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + target: 50 + cooldown: 120 + do_not_update: + - image_id + register: result + - debug: var=result + +''' +RETURN = ''' +--- +instances: + description: List of active elastigroup instances and their details. + returned: success + type: dict + sample: [ + { + "spotInstanceRequestId": "sir-regs25zp", + "instanceId": "i-09640ad8678234c", + "instanceType": "m4.large", + "product": "Linux/UNIX", + "availabilityZone": "us-west-2b", + "privateIp": "180.0.2.244", + "createdAt": "2017-07-17T12:46:18.000Z", + "status": "fulfilled" + } + ] +group_id: + description: Created / Updated group's ID. + returned: success + type: str + sample: "sig-12345" + +''' + +HAS_SPOTINST_SDK = False +__metaclass__ = type + +import os +import time +from ansible.module_utils.basic import AnsibleModule + +try: + import spotinst_sdk as spotinst + from spotinst_sdk import SpotinstClientException + + HAS_SPOTINST_SDK = True + +except ImportError: + pass + +eni_fields = ('description', + 'device_index', + 'secondary_private_ip_address_count', + 'associate_public_ip_address', + 'delete_on_termination', + 'groups', + 'network_interface_id', + 'private_ip_address', + 'subnet_id', + 'associate_ipv6_address') + +private_ip_fields = ('private_ip_address', + 'primary') + +capacity_fields = (dict(ansible_field_name='min_size', + spotinst_field_name='minimum'), + dict(ansible_field_name='max_size', + spotinst_field_name='maximum'), + 'target', + 'unit') + +lspec_fields = ('user_data', + 'key_pair', + 'tenancy', + 'shutdown_script', + 'monitoring', + 'ebs_optimized', + 'image_id', + 'health_check_type', + 'health_check_grace_period', + 'health_check_unhealthy_duration_before_replacement', + 'security_group_ids') + +iam_fields = (dict(ansible_field_name='iam_role_name', + spotinst_field_name='name'), + dict(ansible_field_name='iam_role_arn', + spotinst_field_name='arn')) + +scheduled_task_fields = ('adjustment', + 'adjustment_percentage', + 'batch_size_percentage', + 'cron_expression', + 'frequency', + 'grace_period', + 'task_type', + 'is_enabled', + 'scale_target_capacity', + 'scale_min_capacity', + 'scale_max_capacity') + +scaling_policy_fields = ('policy_name', + 'namespace', + 'metric_name', + 'dimensions', + 'statistic', + 'evaluation_periods', + 'period', + 'threshold', + 'cooldown', + 'unit', + 'operator') + +tracking_policy_fields = ('policy_name', + 'namespace', + 'source', + 'metric_name', + 'statistic', + 'unit', + 'cooldown', + 'target', + 'threshold') + +action_fields = (dict(ansible_field_name='action_type', + spotinst_field_name='type'), + 'adjustment', + 'min_target_capacity', + 'max_target_capacity', + 'target', + 'minimum', + 'maximum') + +signal_fields = ('name', + 'timeout') + +multai_lb_fields = ('balancer_id', + 'project_id', + 'target_set_id', + 'az_awareness', + 'auto_weight') + +persistence_fields = ('should_persist_root_device', + 'should_persist_block_devices', + 'should_persist_private_ip') + +strategy_fields = ('risk', + 'utilize_reserved_instances', + 'fallback_to_od', + 'on_demand_count', + 'availability_vs_cost', + 'draining_timeout', + 'spin_up_time', + 'lifetime_period') + +ebs_fields = ('delete_on_termination', + 'encrypted', + 'iops', + 'snapshot_id', + 'volume_type', + 'volume_size') + +bdm_fields = ('device_name', + 'virtual_name', + 'no_device') + +kubernetes_fields = ('api_server', + 'token') + +right_scale_fields = ('account_id', + 'refresh_token') + +rancher_fields = ('access_key', + 'secret_key', + 'master_host', + 'version') + +chef_fields = ('chef_server', + 'organization', + 'user', + 'pem_key', + 'chef_version') + +az_fields = ('name', + 'subnet_id', + 'placement_group_name') + +opsworks_fields = ('layer_id',) + +scaling_strategy_fields = ('terminate_at_end_of_billing_hour',) + +mesosphere_fields = ('api_server',) + +ecs_fields = ('cluster_name',) + +multai_fields = ('multai_token',) + + +def handle_elastigroup(client, module): + has_changed = False + should_create = False + group_id = None + message = 'None' + + name = module.params.get('name') + state = module.params.get('state') + uniqueness_by = module.params.get('uniqueness_by') + external_group_id = module.params.get('id') + + if uniqueness_by == 'id': + if external_group_id is None: + should_create = True + else: + should_create = False + group_id = external_group_id + else: + groups = client.get_elastigroups() + should_create, group_id = find_group_with_same_name(groups, name) + + if should_create is True: + if state == 'present': + eg = expand_elastigroup(module, is_update=False) + module.debug(str(" [INFO] " + message + "\n")) + group = client.create_elastigroup(group=eg) + group_id = group['id'] + message = 'Created group Successfully.' + has_changed = True + + elif state == 'absent': + message = 'Cannot delete non-existent group.' + has_changed = False + else: + eg = expand_elastigroup(module, is_update=True) + + if state == 'present': + group = client.update_elastigroup(group_update=eg, group_id=group_id) + message = 'Updated group successfully.' + + try: + roll_config = module.params.get('roll_config') + if roll_config: + eg_roll = spotinst.aws_elastigroup.Roll( + batch_size_percentage=roll_config.get('batch_size_percentage'), + grace_period=roll_config.get('grace_period'), + health_check_type=roll_config.get('health_check_type') + ) + roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id) + message = 'Updated and started rolling the group successfully.' + + except SpotinstClientException as exc: + message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc) + has_changed = True + + elif state == 'absent': + try: + client.delete_elastigroup(group_id=group_id) + except SpotinstClientException as exc: + if "GROUP_DOESNT_EXIST" in exc.message: + pass + else: + module.fail_json(msg="Error while attempting to delete group : " + exc.message) + + message = 'Deleted group successfully.' + has_changed = True + + return group_id, message, has_changed + + +def retrieve_group_instances(client, module, group_id): + wait_timeout = module.params.get('wait_timeout') + wait_for_instances = module.params.get('wait_for_instances') + + health_check_type = module.params.get('health_check_type') + + if wait_timeout is None: + wait_timeout = 300 + + wait_timeout = time.time() + wait_timeout + target = module.params.get('target') + state = module.params.get('state') + instances = list() + + if state == 'present' and group_id is not None and wait_for_instances is True: + + is_amount_fulfilled = False + while is_amount_fulfilled is False and wait_timeout > time.time(): + instances = list() + amount_of_fulfilled_instances = 0 + + if health_check_type is not None: + healthy_instances = client.get_instance_healthiness(group_id=group_id) + + for healthy_instance in healthy_instances: + if(healthy_instance.get('healthStatus') == 'HEALTHY'): + amount_of_fulfilled_instances += 1 + instances.append(healthy_instance) + + else: + active_instances = client.get_elastigroup_active_instances(group_id=group_id) + + for active_instance in active_instances: + if active_instance.get('private_ip') is not None: + amount_of_fulfilled_instances += 1 + instances.append(active_instance) + + if amount_of_fulfilled_instances >= target: + is_amount_fulfilled = True + + time.sleep(10) + + return instances + + +def find_group_with_same_name(groups, name): + for group in groups: + if group['name'] == name: + return False, group.get('id') + + return True, None + + +def expand_elastigroup(module, is_update): + do_not_update = module.params['do_not_update'] + name = module.params.get('name') + + eg = spotinst.aws_elastigroup.Elastigroup() + description = module.params.get('description') + + if name is not None: + eg.name = name + if description is not None: + eg.description = description + + # Capacity + expand_capacity(eg, module, is_update, do_not_update) + # Strategy + expand_strategy(eg, module) + # Scaling + expand_scaling(eg, module) + # Third party integrations + expand_integrations(eg, module) + # Compute + expand_compute(eg, module, is_update, do_not_update) + # Multai + expand_multai(eg, module) + # Scheduling + expand_scheduled_tasks(eg, module) + + return eg + + +def expand_compute(eg, module, is_update, do_not_update): + elastic_ips = module.params['elastic_ips'] + on_demand_instance_type = module.params.get('on_demand_instance_type') + spot_instance_types = module.params['spot_instance_types'] + ebs_volume_pool = module.params['ebs_volume_pool'] + availability_zones_list = module.params['availability_zones'] + product = module.params.get('product') + + eg_compute = spotinst.aws_elastigroup.Compute() + + if product is not None: + # Only put product on group creation + if is_update is not True: + eg_compute.product = product + + if elastic_ips is not None: + eg_compute.elastic_ips = elastic_ips + + if on_demand_instance_type or spot_instance_types is not None: + eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() + + if on_demand_instance_type is not None: + eg_instance_types.spot = spot_instance_types + if spot_instance_types is not None: + eg_instance_types.ondemand = on_demand_instance_type + + if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None: + eg_compute.instance_types = eg_instance_types + + expand_ebs_volume_pool(eg_compute, ebs_volume_pool) + + eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone') + + expand_launch_spec(eg_compute, module, is_update, do_not_update) + + eg.compute = eg_compute + + +def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): + if ebs_volumes_list is not None: + eg_volumes = [] + + for volume in ebs_volumes_list: + eg_volume = spotinst.aws_elastigroup.EbsVolume() + + if volume.get('device_name') is not None: + eg_volume.device_name = volume.get('device_name') + if volume.get('volume_ids') is not None: + eg_volume.volume_ids = volume.get('volume_ids') + + if eg_volume.device_name is not None: + eg_volumes.append(eg_volume) + + if len(eg_volumes) > 0: + eg_compute.ebs_volume_pool = eg_volumes + + +def expand_launch_spec(eg_compute, module, is_update, do_not_update): + eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') + + if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: + eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') + + tags = module.params['tags'] + load_balancers = module.params['load_balancers'] + target_group_arns = module.params['target_group_arns'] + block_device_mappings = module.params['block_device_mappings'] + network_interfaces = module.params['network_interfaces'] + + if is_update is True: + if 'image_id' in do_not_update: + delattr(eg_launch_spec, 'image_id') + + expand_tags(eg_launch_spec, tags) + + expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) + + expand_block_device_mappings(eg_launch_spec, block_device_mappings) + + expand_network_interfaces(eg_launch_spec, network_interfaces) + + eg_compute.launch_specification = eg_launch_spec + + +def expand_integrations(eg, module): + rancher = module.params.get('rancher') + mesosphere = module.params.get('mesosphere') + ecs = module.params.get('ecs') + kubernetes = module.params.get('kubernetes') + right_scale = module.params.get('right_scale') + opsworks = module.params.get('opsworks') + chef = module.params.get('chef') + + integration_exists = False + + eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() + + if mesosphere is not None: + eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere') + integration_exists = True + + if ecs is not None: + eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') + integration_exists = True + + if kubernetes is not None: + eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') + integration_exists = True + + if right_scale is not None: + eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration') + integration_exists = True + + if opsworks is not None: + eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration') + integration_exists = True + + if rancher is not None: + eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher') + integration_exists = True + + if chef is not None: + eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration') + integration_exists = True + + if integration_exists: + eg.third_parties_integration = eg_integrations + + +def expand_capacity(eg, module, is_update, do_not_update): + eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') + + if is_update is True: + delattr(eg_capacity, 'unit') + + if 'target' in do_not_update: + delattr(eg_capacity, 'target') + + eg.capacity = eg_capacity + + +def expand_strategy(eg, module): + persistence = module.params.get('persistence') + signals = module.params.get('signals') + + eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') + + terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') + + if terminate_at_end_of_billing_hour is not None: + eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, + module.params, 'ScalingStrategy') + + if persistence is not None: + eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') + + if signals is not None: + eg_signals = expand_list(signals, signal_fields, 'Signal') + + if len(eg_signals) > 0: + eg_strategy.signals = eg_signals + + eg.strategy = eg_strategy + + +def expand_multai(eg, module): + multai_load_balancers = module.params.get('multai_load_balancers') + + eg_multai = expand_fields(multai_fields, module.params, 'Multai') + + if multai_load_balancers is not None: + eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer') + + if len(eg_multai_load_balancers) > 0: + eg_multai.balancers = eg_multai_load_balancers + eg.multai = eg_multai + + +def expand_scheduled_tasks(eg, module): + scheduled_tasks = module.params.get('scheduled_tasks') + + if scheduled_tasks is not None: + eg_scheduling = spotinst.aws_elastigroup.Scheduling() + + eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask') + + if len(eg_tasks) > 0: + eg_scheduling.tasks = eg_tasks + eg.scheduling = eg_scheduling + + +def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): + if load_balancers is not None or target_group_arns is not None: + eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() + eg_total_lbs = [] + + if load_balancers is not None: + for elb_name in load_balancers: + eg_elb = spotinst.aws_elastigroup.LoadBalancer() + if elb_name is not None: + eg_elb.name = elb_name + eg_elb.type = 'CLASSIC' + eg_total_lbs.append(eg_elb) + + if target_group_arns is not None: + for target_arn in target_group_arns: + eg_elb = spotinst.aws_elastigroup.LoadBalancer() + if target_arn is not None: + eg_elb.arn = target_arn + eg_elb.type = 'TARGET_GROUP' + eg_total_lbs.append(eg_elb) + + if len(eg_total_lbs) > 0: + eg_load_balancers_config.load_balancers = eg_total_lbs + eg_launchspec.load_balancers_config = eg_load_balancers_config + + +def expand_tags(eg_launchspec, tags): + if tags is not None: + eg_tags = [] + + for tag in tags: + eg_tag = spotinst.aws_elastigroup.Tag() + if tag.keys(): + eg_tag.tag_key = tag.keys()[0] + if tag.values(): + eg_tag.tag_value = tag.values()[0] + + eg_tags.append(eg_tag) + + if len(eg_tags) > 0: + eg_launchspec.tags = eg_tags + + +def expand_block_device_mappings(eg_launchspec, bdms): + if bdms is not None: + eg_bdms = [] + + for bdm in bdms: + eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping') + + if bdm.get('ebs') is not None: + eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS') + + eg_bdms.append(eg_bdm) + + if len(eg_bdms) > 0: + eg_launchspec.block_device_mappings = eg_bdms + + +def expand_network_interfaces(eg_launchspec, enis): + if enis is not None: + eg_enis = [] + + for eni in enis: + eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface') + + eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress') + + if eg_pias is not None: + eg_eni.private_ip_addresses = eg_pias + + eg_enis.append(eg_eni) + + if len(eg_enis) > 0: + eg_launchspec.network_interfaces = eg_enis + + +def expand_scaling(eg, module): + up_scaling_policies = module.params['up_scaling_policies'] + down_scaling_policies = module.params['down_scaling_policies'] + target_tracking_policies = module.params['target_tracking_policies'] + + eg_scaling = spotinst.aws_elastigroup.Scaling() + + if up_scaling_policies is not None: + eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies) + if len(eg_up_scaling_policies) > 0: + eg_scaling.up = eg_up_scaling_policies + + if down_scaling_policies is not None: + eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies) + if len(eg_down_scaling_policies) > 0: + eg_scaling.down = eg_down_scaling_policies + + if target_tracking_policies is not None: + eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies) + if len(eg_target_tracking_policies) > 0: + eg_scaling.target = eg_target_tracking_policies + + if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None: + eg.scaling = eg_scaling + + +def expand_list(items, fields, class_name): + if items is not None: + new_objects_list = [] + for item in items: + new_obj = expand_fields(fields, item, class_name) + new_objects_list.append(new_obj) + + return new_objects_list + + +def expand_fields(fields, item, class_name): + class_ = getattr(spotinst.aws_elastigroup, class_name) + new_obj = class_() + + # Handle primitive fields + if item is not None: + for field in fields: + if isinstance(field, dict): + ansible_field_name = field['ansible_field_name'] + spotinst_field_name = field['spotinst_field_name'] + else: + ansible_field_name = field + spotinst_field_name = field + if item.get(ansible_field_name) is not None: + setattr(new_obj, spotinst_field_name, item.get(ansible_field_name)) + + return new_obj + + +def expand_scaling_policies(scaling_policies): + eg_scaling_policies = [] + + for policy in scaling_policies: + eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy') + eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction') + eg_scaling_policies.append(eg_policy) + + return eg_scaling_policies + + +def expand_target_tracking_policies(tracking_policies): + eg_tracking_policies = [] + + for policy in tracking_policies: + eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy') + eg_tracking_policies.append(eg_policy) + + return eg_tracking_policies + + +def main(): + fields = dict( + account_id=dict(type='str'), + availability_vs_cost=dict(type='str', required=True), + availability_zones=dict(type='list', required=True), + block_device_mappings=dict(type='list'), + chef=dict(type='dict'), + credentials_path=dict(type='path', default="~/.spotinst/credentials"), + do_not_update=dict(default=[], type='list'), + down_scaling_policies=dict(type='list'), + draining_timeout=dict(type='int'), + ebs_optimized=dict(type='bool'), + ebs_volume_pool=dict(type='list'), + ecs=dict(type='dict'), + elastic_beanstalk=dict(type='dict'), + elastic_ips=dict(type='list'), + fallback_to_od=dict(type='bool'), + id=dict(type='str'), + health_check_grace_period=dict(type='int'), + health_check_type=dict(type='str'), + health_check_unhealthy_duration_before_replacement=dict(type='int'), + iam_role_arn=dict(type='str'), + iam_role_name=dict(type='str'), + image_id=dict(type='str', required=True), + key_pair=dict(type='str'), + kubernetes=dict(type='dict'), + lifetime_period=dict(type='int'), + load_balancers=dict(type='list'), + max_size=dict(type='int', required=True), + mesosphere=dict(type='dict'), + min_size=dict(type='int', required=True), + monitoring=dict(type='str'), + multai_load_balancers=dict(type='list'), + multai_token=dict(type='str'), + name=dict(type='str', required=True), + network_interfaces=dict(type='list'), + on_demand_count=dict(type='int'), + on_demand_instance_type=dict(type='str'), + opsworks=dict(type='dict'), + persistence=dict(type='dict'), + product=dict(type='str', required=True), + rancher=dict(type='dict'), + right_scale=dict(type='dict'), + risk=dict(type='int'), + roll_config=dict(type='dict'), + scheduled_tasks=dict(type='list'), + security_group_ids=dict(type='list', required=True), + shutdown_script=dict(type='str'), + signals=dict(type='list'), + spin_up_time=dict(type='int'), + spot_instance_types=dict(type='list', required=True), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='list'), + target=dict(type='int', required=True), + target_group_arns=dict(type='list'), + tenancy=dict(type='str'), + terminate_at_end_of_billing_hour=dict(type='bool'), + token=dict(type='str'), + unit=dict(type='str'), + user_data=dict(type='str'), + utilize_reserved_instances=dict(type='bool'), + uniqueness_by=dict(default='name', choices=['name', 'id']), + up_scaling_policies=dict(type='list'), + target_tracking_policies=dict(type='list'), + wait_for_instances=dict(type='bool', default=False), + wait_timeout=dict(type='int') + ) + + module = AnsibleModule(argument_spec=fields) + + if not HAS_SPOTINST_SDK: + module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") + + # Retrieve creds file variables + creds_file_loaded_vars = dict() + + credentials_path = module.params.get('credentials_path') + + try: + with open(credentials_path, "r") as creds: + for line in creds: + eq_index = line.find('=') + var_name = line[:eq_index].strip() + string_value = line[eq_index + 1:].strip() + creds_file_loaded_vars[var_name] = string_value + except IOError: + pass + # End of creds file retrieval + + token = module.params.get('token') + if not token: + token = os.environ.get('SPOTINST_TOKEN') + if not token: + token = creds_file_loaded_vars.get("token") + + account = module.params.get('account_id') + if not account: + account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') + if not account: + account = creds_file_loaded_vars.get("account") + + client = spotinst.SpotinstClient(auth_token=token, print_output=False) + + if account is not None: + client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) + + group_id, message, has_changed = handle_elastigroup(client=client, module=module) + + instances = retrieve_group_instances(client=client, module=module, group_id=group_id) + + module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/univention/udm_dns_record.py b/plugins/modules/cloud/univention/udm_dns_record.py new file mode 100644 index 0000000000..adfe466897 --- /dev/null +++ b/plugins/modules/cloud/univention/udm_dns_record.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright: (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: udm_dns_record +author: +- Tobias Rüetschi (@keachi) +short_description: Manage dns entries on a univention corporate server +description: + - "This module allows to manage dns records on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 + - Univention +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the dns record is present or not. + name: + required: true + description: + - "Name of the record, this is also the DNS record. E.g. www for + www.example.com." + zone: + required: true + description: + - Corresponding DNS zone for this record, e.g. example.com. + type: + required: true + choices: [ host_record, alias, ptr_record, srv_record, txt_record ] + description: + - "Define the record type. C(host_record) is a A or AAAA record, + C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record) + is a SRV record and C(txt_record) is a TXT record." + data: + required: false + default: [] + description: + - "Additional data for this record, e.g. ['a': '192.0.2.1']. + Required if C(state=present)." +''' + + +EXAMPLES = ''' +# Create a DNS record on a UCS +- udm_dns_record: + name: www + zone: example.com + type: host_record + data: + - a: 192.0.2.1 +''' + + +RETURN = '''# ''' + +HAVE_UNIVENTION = False +try: + from univention.admin.handlers.dns import ( + forward_zone, + reverse_zone, + ) + HAVE_UNIVENTION = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, + config, + uldap, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + type=dict(required=True, + type='str'), + zone=dict(required=True, + type='str'), + name=dict(required=True, + type='str'), + data=dict(default=[], + type='dict'), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['data']) + ]) + ) + + if not HAVE_UNIVENTION: + module.fail_json(msg="This module requires univention python bindings") + + type = module.params['type'] + zone = module.params['zone'] + name = module.params['name'] + data = module.params['data'] + state = module.params['state'] + changed = False + diff = None + + obj = list(ldap_search( + '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name), + attr=['dNSZone'] + )) + + exists = bool(len(obj)) + container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn()) + dn = 'relativeDomainName={0},{1}'.format(name, container) + + if state == 'present': + try: + if not exists: + so = forward_zone.lookup( + config(), + uldap(), + '(zone={0})'.format(zone), + scope='domain', + ) or reverse_zone.lookup( + config(), + uldap(), + '(zone={0})'.format(zone), + scope='domain', + ) + obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0]) + else: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + obj['name'] = name + for k, v in data.items(): + obj[k] = v + diff = obj.diff() + changed = obj.diff() != [] + if not module.check_mode: + if not exists: + obj.create() + else: + obj.modify() + except Exception as e: + module.fail_json( + msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception as e: + module.fail_json( + msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/univention/udm_dns_zone.py b/plugins/modules/cloud/univention/udm_dns_zone.py new file mode 100644 index 0000000000..28d5c19c7c --- /dev/null +++ b/plugins/modules/cloud/univention/udm_dns_zone.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright: (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: udm_dns_zone +author: +- Tobias Rüetschi (@keachi) +short_description: Manage dns zones on a univention corporate server +description: + - "This module allows to manage dns zones on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the dns zone is present or not. + type: + required: true + choices: [ forward_zone, reverse_zone ] + description: + - Define if the zone is a forward or reverse DNS zone. + zone: + required: true + description: + - DNS zone name, e.g. C(example.com). + nameserver: + required: false + description: + - List of appropriate name servers. Required if C(state=present). + interfaces: + required: false + description: + - List of interface IP addresses, on which the server should + response this zone. Required if C(state=present). + + refresh: + required: false + default: 3600 + description: + - Interval before the zone should be refreshed. + retry: + required: false + default: 1800 + description: + - Interval that should elapse before a failed refresh should be retried. + expire: + required: false + default: 604800 + description: + - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. + ttl: + required: false + default: 600 + description: + - Minimum TTL field that should be exported with any RR from this zone. + + contact: + required: false + default: '' + description: + - Contact person in the SOA record. + mx: + required: false + default: [] + description: + - List of MX servers. (Must declared as A or AAAA records). +''' + + +EXAMPLES = ''' +# Create a DNS zone on a UCS +- udm_dns_zone: + zone: example.com + type: forward_zone + nameserver: + - ucs.example.com + interfaces: + - 192.0.2.1 +''' + + +RETURN = '''# ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def convert_time(time): + """Convert a time in seconds into the biggest unit""" + units = [ + (24 * 60 * 60, 'days'), + (60 * 60, 'hours'), + (60, 'minutes'), + (1, 'seconds'), + ] + + if time == 0: + return ('0', 'seconds') + for unit in units: + if time >= unit[0]: + return ('{0}'.format(time // unit[0]), unit[1]) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + type=dict(required=True, + type='str'), + zone=dict(required=True, + aliases=['name'], + type='str'), + nameserver=dict(default=[], + type='list'), + interfaces=dict(default=[], + type='list'), + refresh=dict(default=3600, + type='int'), + retry=dict(default=1800, + type='int'), + expire=dict(default=604800, + type='int'), + ttl=dict(default=600, + type='int'), + contact=dict(default='', + type='str'), + mx=dict(default=[], + type='list'), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['nameserver', 'interfaces']) + ]) + ) + type = module.params['type'] + zone = module.params['zone'] + nameserver = module.params['nameserver'] + interfaces = module.params['interfaces'] + refresh = module.params['refresh'] + retry = module.params['retry'] + expire = module.params['expire'] + ttl = module.params['ttl'] + contact = module.params['contact'] + mx = module.params['mx'] + state = module.params['state'] + changed = False + diff = None + + obj = list(ldap_search( + '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone), + attr=['dNSZone'] + )) + + exists = bool(len(obj)) + container = 'cn=dns,{0}'.format(base_dn()) + dn = 'zoneName={0},{1}'.format(zone, container) + if contact == '': + contact = 'root@{0}.'.format(zone) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('dns/{0}'.format(type), container) + else: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + obj['zone'] = zone + obj['nameserver'] = nameserver + obj['a'] = interfaces + obj['refresh'] = convert_time(refresh) + obj['retry'] = convert_time(retry) + obj['expire'] = convert_time(expire) + obj['ttl'] = convert_time(ttl) + obj['contact'] = contact + obj['mx'] = mx + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except Exception as e: + module.fail_json( + msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception as e: + module.fail_json( + msg='Removing dns zone {0} failed: {1}'.format(zone, e) + ) + + module.exit_json( + changed=changed, + diff=diff, + zone=zone + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/univention/udm_group.py b/plugins/modules/cloud/univention/udm_group.py new file mode 100644 index 0000000000..9df20d966e --- /dev/null +++ b/plugins/modules/cloud/univention/udm_group.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright: (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: udm_group +author: +- Tobias Rüetschi (@keachi) +short_description: Manage of the posix group +description: + - "This module allows to manage user groups on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the group is present or not. + name: + required: true + description: + - Name of the posix group. + description: + required: false + description: + - Group description. + position: + required: false + description: + - define the whole ldap position of the group, e.g. + C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). + ou: + required: false + description: + - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com). + subpath: + required: false + description: + - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups). +''' + + +EXAMPLES = ''' +# Create a POSIX group +- udm_group: + name: g123m-1A + +# Create a POSIX group with the exact DN +# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com) +- udm_group: + name: g123m-1A + subpath: 'cn=classes,cn=students,cn=groups' + ou: school +# or +- udm_group: + name: g123m-1A + position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com' +''' + + +RETURN = '''# ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, + type='str'), + description=dict(default=None, + type='str'), + position=dict(default='', + type='str'), + ou=dict(default='', + type='str'), + subpath=dict(default='cn=groups', + type='str'), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True + ) + name = module.params['name'] + description = module.params['description'] + position = module.params['position'] + ou = module.params['ou'] + subpath = module.params['subpath'] + state = module.params['state'] + changed = False + diff = None + + groups = list(ldap_search( + '(&(objectClass=posixGroup)(cn={0}))'.format(name), + attr=['cn'] + )) + if position != '': + container = position + else: + if ou != '': + ou = 'ou={0},'.format(ou) + if subpath != '': + subpath = '{0},'.format(subpath) + container = '{0}{1}{2}'.format(subpath, ou, base_dn()) + group_dn = 'cn={0},{1}'.format(name, container) + + exists = bool(len(groups)) + + if state == 'present': + try: + if not exists: + grp = umc_module_for_add('groups/group', container) + else: + grp = umc_module_for_edit('groups/group', group_dn) + grp['name'] = name + grp['description'] = description + diff = grp.diff() + changed = grp.diff() != [] + if not module.check_mode: + if not exists: + grp.create() + else: + grp.modify() + except Exception: + module.fail_json( + msg="Creating/editing group {0} in {1} failed".format(name, container) + ) + + if state == 'absent' and exists: + try: + grp = umc_module_for_edit('groups/group', group_dn) + if not module.check_mode: + grp.remove() + changed = True + except Exception: + module.fail_json( + msg="Removing group {0} failed".format(name) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/univention/udm_share.py b/plugins/modules/cloud/univention/udm_share.py new file mode 100644 index 0000000000..e0bfae6dd7 --- /dev/null +++ b/plugins/modules/cloud/univention/udm_share.py @@ -0,0 +1,550 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright: (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: udm_share +author: +- Tobias Rüetschi (@keachi) +short_description: Manage samba shares on a univention corporate server +description: + - "This module allows to manage samba shares on a univention corporate + server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + default: "present" + choices: [ present, absent ] + description: + - Whether the share is present or not. + name: + required: true + description: + - Name + host: + required: false + description: + - Host FQDN (server which provides the share), e.g. C({{ + ansible_fqdn }}). Required if C(state=present). + path: + required: false + description: + - Directory on the providing server, e.g. C(/home). Required if C(state=present). + samba_name: + required: false + description: + - Windows name. Required if C(state=present). + aliases: [ sambaName ] + ou: + required: true + description: + - Organisational unit, inside the LDAP Base DN. + owner: + default: 0 + description: + - Directory owner of the share's root directory. + group: + default: '0' + description: + - Directory owner group of the share's root directory. + directorymode: + default: '00755' + description: + - Permissions for the share's root directory. + root_squash: + default: '1' + choices: [ '0', '1' ] + description: + - Modify user ID for root user (root squashing). + subtree_checking: + default: '1' + choices: [ '0', '1' ] + description: + - Subtree checking. + sync: + default: 'sync' + description: + - NFS synchronisation. + writeable: + default: '1' + choices: [ '0', '1' ] + description: + - NFS write access. + samba_block_size: + description: + - Blocking size. + aliases: [ sambaBlockSize ] + samba_blocking_locks: + default: '1' + choices: [ '0', '1' ] + description: + - Blocking locks. + aliases: [ sambaBlockingLocks ] + samba_browseable: + default: '1' + choices: [ '0', '1' ] + description: + - Show in Windows network environment. + aliases: [ sambaBrowseable ] + samba_create_mode: + default: '0744' + description: + - File mode. + aliases: [ sambaCreateMode ] + samba_csc_policy: + default: 'manual' + description: + - Client-side caching policy. + aliases: [ sambaCscPolicy ] + samba_custom_settings: + default: [] + description: + - Option name in smb.conf and its value. + aliases: [ sambaCustomSettings ] + samba_directory_mode: + default: '0755' + description: + - Directory mode. + aliases: [ sambaDirectoryMode ] + samba_directory_security_mode: + default: '0777' + description: + - Directory security mode. + aliases: [ sambaDirectorySecurityMode ] + samba_dos_filemode: + default: '0' + choices: [ '0', '1' ] + description: + - Users with write access may modify permissions. + aliases: [ sambaDosFilemode ] + samba_fake_oplocks: + default: '0' + choices: [ '0', '1' ] + description: + - Fake oplocks. + aliases: [ sambaFakeOplocks ] + samba_force_create_mode: + default: '0' + choices: [ '0', '1' ] + description: + - Force file mode. + aliases: [ sambaForceCreateMode ] + samba_force_directory_mode: + default: '0' + choices: [ '0', '1' ] + description: + - Force directory mode. + aliases: [ sambaForceDirectoryMode ] + samba_force_directory_security_mode: + default: '0' + choices: [ '0', '1' ] + description: + - Force directory security mode. + aliases: [ sambaForceDirectorySecurityMode ] + samba_force_group: + description: + - Force group. + aliases: [ sambaForceGroup ] + samba_force_security_mode: + default: '0' + choices: [ '0', '1' ] + description: + - Force security mode. + aliases: [ sambaForceSecurityMode ] + samba_force_user: + description: + - Force user. + aliases: [ sambaForceUser ] + samba_hide_files: + description: + - Hide files. + aliases: [ sambaHideFiles ] + samba_hide_unreadable: + default: '0' + choices: [ '0', '1' ] + description: + - Hide unreadable files/directories. + aliases: [ sambaHideUnreadable ] + samba_hosts_allow: + default: [] + description: + - Allowed host/network. + aliases: [ sambaHostsAllow ] + samba_hosts_deny: + default: [] + description: + - Denied host/network. + aliases: [ sambaHostsDeny ] + samba_inherit_acls: + default: '1' + choices: [ '0', '1' ] + description: + - Inherit ACLs. + aliases: [ sambaInheritAcls ] + samba_inherit_owner: + default: '0' + choices: [ '0', '1' ] + description: + - Create files/directories with the owner of the parent directory. + aliases: [ sambaInheritOwner ] + samba_inherit_permissions: + default: '0' + choices: [ '0', '1' ] + description: + - Create files/directories with permissions of the parent directory. + aliases: [ sambaInheritPermissions ] + samba_invalid_users: + description: + - Invalid users or groups. + aliases: [ sambaInvalidUsers ] + samba_level_2_oplocks: + default: '1' + choices: [ '0', '1' ] + description: + - Level 2 oplocks. + aliases: [ sambaLevel2Oplocks ] + samba_locking: + default: '1' + choices: [ '0', '1' ] + description: + - Locking. + aliases: [ sambaLocking ] + samba_msdfs_root: + default: '0' + choices: [ '0', '1' ] + description: + - MSDFS root. + aliases: [ sambaMSDFSRoot ] + samba_nt_acl_support: + default: '1' + choices: [ '0', '1' ] + description: + - NT ACL support. + aliases: [ sambaNtAclSupport ] + samba_oplocks: + default: '1' + choices: [ '0', '1' ] + description: + - Oplocks. + aliases: [ sambaOplocks ] + samba_postexec: + description: + - Postexec script. + aliases: [ sambaPostexec ] + samba_preexec: + description: + - Preexec script. + aliases: [ sambaPreexec ] + samba_public: + default: '0' + choices: [ '0', '1' ] + description: + - Allow anonymous read-only access with a guest user. + aliases: [ sambaPublic ] + samba_security_mode: + default: '0777' + description: + - Security mode. + aliases: [ sambaSecurityMode ] + samba_strict_locking: + default: 'Auto' + description: + - Strict locking. + aliases: [ sambaStrictLocking ] + samba_vfs_objects: + description: + - VFS objects. + aliases: [ sambaVFSObjects ] + samba_valid_users: + description: + - Valid users or groups. + aliases: [ sambaValidUsers ] + samba_write_list: + description: + - Restrict write access to these users/groups. + aliases: [ sambaWriteList ] + samba_writeable: + default: '1' + choices: [ '0', '1' ] + description: + - Samba write access. + aliases: [ sambaWriteable ] + nfs_hosts: + default: [] + description: + - Only allow access for this host, IP address or network. + nfs_custom_settings: + default: [] + description: + - Option name in exports file. + aliases: [ nfsCustomSettings ] +''' + + +EXAMPLES = ''' +# Create a share named home on the server ucs.example.com with the path /home. +- udm_share: + name: home + path: /home + host: ucs.example.com + sambaName: Home +''' + + +RETURN = '''# ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, + type='str'), + ou=dict(required=True, + type='str'), + owner=dict(type='str', + default='0'), + group=dict(type='str', + default='0'), + path=dict(type='path', + default=None), + directorymode=dict(type='str', + default='00755'), + host=dict(type='str', + default=None), + root_squash=dict(type='bool', + default=True), + subtree_checking=dict(type='bool', + default=True), + sync=dict(type='str', + default='sync'), + writeable=dict(type='bool', + default=True), + sambaBlockSize=dict(type='str', + aliases=['samba_block_size'], + default=None), + sambaBlockingLocks=dict(type='bool', + aliases=['samba_blocking_locks'], + default=True), + sambaBrowseable=dict(type='bool', + aliases=['samba_browsable'], + default=True), + sambaCreateMode=dict(type='str', + aliases=['samba_create_mode'], + default='0744'), + sambaCscPolicy=dict(type='str', + aliases=['samba_csc_policy'], + default='manual'), + sambaCustomSettings=dict(type='list', + aliases=['samba_custom_settings'], + default=[]), + sambaDirectoryMode=dict(type='str', + aliases=['samba_directory_mode'], + default='0755'), + sambaDirectorySecurityMode=dict(type='str', + aliases=['samba_directory_security_mode'], + default='0777'), + sambaDosFilemode=dict(type='bool', + aliases=['samba_dos_filemode'], + default=False), + sambaFakeOplocks=dict(type='bool', + aliases=['samba_fake_oplocks'], + default=False), + sambaForceCreateMode=dict(type='bool', + aliases=['samba_force_create_mode'], + default=False), + sambaForceDirectoryMode=dict(type='bool', + aliases=['samba_force_directory_mode'], + default=False), + sambaForceDirectorySecurityMode=dict(type='bool', + aliases=['samba_force_directory_security_mode'], + default=False), + sambaForceGroup=dict(type='str', + aliases=['samba_force_group'], + default=None), + sambaForceSecurityMode=dict(type='bool', + aliases=['samba_force_security_mode'], + default=False), + sambaForceUser=dict(type='str', + aliases=['samba_force_user'], + default=None), + sambaHideFiles=dict(type='str', + aliases=['samba_hide_files'], + default=None), + sambaHideUnreadable=dict(type='bool', + aliases=['samba_hide_unreadable'], + default=False), + sambaHostsAllow=dict(type='list', + aliases=['samba_hosts_allow'], + default=[]), + sambaHostsDeny=dict(type='list', + aliases=['samba_hosts_deny'], + default=[]), + sambaInheritAcls=dict(type='bool', + aliases=['samba_inherit_acls'], + default=True), + sambaInheritOwner=dict(type='bool', + aliases=['samba_inherit_owner'], + default=False), + sambaInheritPermissions=dict(type='bool', + aliases=['samba_inherit_permissions'], + default=False), + sambaInvalidUsers=dict(type='str', + aliases=['samba_invalid_users'], + default=None), + sambaLevel2Oplocks=dict(type='bool', + aliases=['samba_level_2_oplocks'], + default=True), + sambaLocking=dict(type='bool', + aliases=['samba_locking'], + default=True), + sambaMSDFSRoot=dict(type='bool', + aliases=['samba_msdfs_root'], + default=False), + sambaName=dict(type='str', + aliases=['samba_name'], + default=None), + sambaNtAclSupport=dict(type='bool', + aliases=['samba_nt_acl_support'], + default=True), + sambaOplocks=dict(type='bool', + aliases=['samba_oplocks'], + default=True), + sambaPostexec=dict(type='str', + aliases=['samba_postexec'], + default=None), + sambaPreexec=dict(type='str', + aliases=['samba_preexec'], + default=None), + sambaPublic=dict(type='bool', + aliases=['samba_public'], + default=False), + sambaSecurityMode=dict(type='str', + aliases=['samba_security_mode'], + default='0777'), + sambaStrictLocking=dict(type='str', + aliases=['samba_strict_locking'], + default='Auto'), + sambaVFSObjects=dict(type='str', + aliases=['samba_vfs_objects'], + default=None), + sambaValidUsers=dict(type='str', + aliases=['samba_valid_users'], + default=None), + sambaWriteList=dict(type='str', + aliases=['samba_write_list'], + default=None), + sambaWriteable=dict(type='bool', + aliases=['samba_writeable'], + default=True), + nfs_hosts=dict(type='list', + default=[]), + nfsCustomSettings=dict(type='list', + aliases=['nfs_custom_settings'], + default=[]), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['path', 'host', 'sambaName']) + ]) + ) + name = module.params['name'] + state = module.params['state'] + changed = False + diff = None + + obj = list(ldap_search( + '(&(objectClass=univentionShare)(cn={0}))'.format(name), + attr=['cn'] + )) + + exists = bool(len(obj)) + container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn()) + dn = 'cn={0},{1}'.format(name, container) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('shares/share', container) + else: + obj = umc_module_for_edit('shares/share', dn) + + module.params['printablename'] = '{0} ({1})'.format(name, module.params['host']) + for k in obj.keys(): + if module.params[k] is True: + module.params[k] = '1' + elif module.params[k] is False: + module.params[k] = '0' + obj[k] = module.params[k] + + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except Exception as err: + module.fail_json( + msg='Creating/editing share {0} in {1} failed: {2}'.format( + name, + container, + err, + ) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('shares/share', dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception as err: + module.fail_json( + msg='Removing share {0} in {1} failed: {2}'.format( + name, + container, + err, + ) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/univention/udm_user.py b/plugins/modules/cloud/univention/udm_user.py new file mode 100644 index 0000000000..1dfe56a7b1 --- /dev/null +++ b/plugins/modules/cloud/univention/udm_user.py @@ -0,0 +1,525 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright: (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: udm_user +author: +- Tobias Rüetschi (@keachi) +short_description: Manage posix users on a univention corporate server +description: + - "This module allows to manage posix users on a univention corporate + server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + default: "present" + choices: [ present, absent ] + description: + - Whether the user is present or not. + username: + required: true + description: + - User name + aliases: ['name'] + firstname: + description: + - First name. Required if C(state=present). + lastname: + description: + - Last name. Required if C(state=present). + password: + description: + - Password. Required if C(state=present). + birthday: + description: + - Birthday + city: + description: + - City of users business address. + country: + description: + - Country of users business address. + department_number: + description: + - Department number of users business address. + aliases: [ departmentNumber ] + description: + description: + - Description (not gecos) + display_name: + description: + - Display name (not gecos) + aliases: [ displayName ] + email: + default: [] + description: + - A list of e-mail addresses. + employee_number: + description: + - Employee number + aliases: [ employeeNumber ] + employee_type: + description: + - Employee type + aliases: [ employeeType ] + gecos: + description: + - GECOS + groups: + default: [] + description: + - "POSIX groups, the LDAP DNs of the groups will be found with the + LDAP filter for each group as $GROUP: + C((&(objectClass=posixGroup)(cn=$GROUP)))." + home_share: + description: + - "Home NFS share. Must be a LDAP DN, e.g. + C(cn=home,cn=shares,ou=school,dc=example,dc=com)." + aliases: [ homeShare ] + home_share_path: + description: + - Path to home NFS share, inside the homeShare. + aliases: [ homeSharePath ] + home_telephone_number: + default: [] + description: + - List of private telephone numbers. + aliases: [ homeTelephoneNumber ] + homedrive: + description: + - Windows home drive, e.g. C("H:"). + mail_alternative_address: + default: [] + description: + - List of alternative e-mail addresses. + aliases: [ mailAlternativeAddress ] + mail_home_server: + description: + - FQDN of mail server + aliases: [ mailHomeServer ] + mail_primary_address: + description: + - Primary e-mail address + aliases: [ mailPrimaryAddress ] + mobile_telephone_number: + default: [] + description: + - Mobile phone number + aliases: [ mobileTelephoneNumber ] + organisation: + description: + - Organisation + aliases: [ organization ] + override_pw_history: + type: bool + default: 'no' + description: + - Override password history + aliases: [ overridePWHistory ] + override_pw_length: + type: bool + default: 'no' + description: + - Override password check + aliases: [ overridePWLength ] + pager_telephonenumber: + default: [] + description: + - List of pager telephone numbers. + aliases: [ pagerTelephonenumber ] + phone: + description: + - List of telephone numbers. + postcode: + description: + - Postal code of users business address. + primary_group: + default: cn=Domain Users,cn=groups,$LDAP_BASE_DN + description: + - Primary group. This must be the group LDAP DN. + aliases: [ primaryGroup ] + profilepath: + description: + - Windows profile directory + pwd_change_next_login: + choices: [ '0', '1' ] + description: + - Change password on next login. + aliases: [ pwdChangeNextLogin ] + room_number: + description: + - Room number of users business address. + aliases: [ roomNumber ] + samba_privileges: + description: + - "Samba privilege, like allow printer administration, do domain + join." + aliases: [ sambaPrivileges ] + samba_user_workstations: + description: + - Allow the authentication only on this Microsoft Windows host. + aliases: [ sambaUserWorkstations ] + sambahome: + description: + - Windows home path, e.g. C('\\$FQDN\$USERNAME'). + scriptpath: + description: + - Windows logon script. + secretary: + default: [] + description: + - A list of superiors as LDAP DNs. + serviceprovider: + default: [] + description: + - Enable user for the following service providers. + shell: + default: '/bin/bash' + description: + - Login shell + street: + description: + - Street of users business address. + title: + description: + - Title, e.g. C(Prof.). + unixhome: + default: '/home/$USERNAME' + description: + - Unix home directory + userexpiry: + default: Today + 1 year + description: + - Account expiry date, e.g. C(1999-12-31). + position: + default: '' + description: + - "Define the whole position of users object inside the LDAP tree, + e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)." + update_password: + default: always + description: + - "C(always) will update passwords if they differ. + C(on_create) will only set the password for newly created users." + ou: + default: '' + description: + - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for + LDAP OU C(ou=school,dc=example,dc=com)." + subpath: + default: 'cn=users' + description: + - "LDAP subpath inside the organizational unit, e.g. + C(cn=teachers,cn=users) for LDAP container + C(cn=teachers,cn=users,dc=example,dc=com)." +''' + + +EXAMPLES = ''' +# Create a user on a UCS +- udm_user: + name: FooBar + password: secure_password + firstname: Foo + lastname: Bar + +# Create a user with the DN +# C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) +- udm_user: + name: foo + password: secure_password + firstname: Foo + lastname: Bar + ou: school + subpath: 'cn=teachers,cn=users' +# or define the position +- udm_user: + name: foo + password: secure_password + firstname: Foo + lastname: Bar + position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' +''' + + +RETURN = '''# ''' + +import crypt +from datetime import date, timedelta + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def main(): + expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") + module = AnsibleModule( + argument_spec=dict( + birthday=dict(default=None, + type='str'), + city=dict(default=None, + type='str'), + country=dict(default=None, + type='str'), + department_number=dict(default=None, + type='str', + aliases=['departmentNumber']), + description=dict(default=None, + type='str'), + display_name=dict(default=None, + type='str', + aliases=['displayName']), + email=dict(default=[''], + type='list'), + employee_number=dict(default=None, + type='str', + aliases=['employeeNumber']), + employee_type=dict(default=None, + type='str', + aliases=['employeeType']), + firstname=dict(default=None, + type='str'), + gecos=dict(default=None, + type='str'), + groups=dict(default=[], + type='list'), + home_share=dict(default=None, + type='str', + aliases=['homeShare']), + home_share_path=dict(default=None, + type='str', + aliases=['homeSharePath']), + home_telephone_number=dict(default=[], + type='list', + aliases=['homeTelephoneNumber']), + homedrive=dict(default=None, + type='str'), + lastname=dict(default=None, + type='str'), + mail_alternative_address=dict(default=[], + type='list', + aliases=['mailAlternativeAddress']), + mail_home_server=dict(default=None, + type='str', + aliases=['mailHomeServer']), + mail_primary_address=dict(default=None, + type='str', + aliases=['mailPrimaryAddress']), + mobile_telephone_number=dict(default=[], + type='list', + aliases=['mobileTelephoneNumber']), + organisation=dict(default=None, + type='str', + aliases=['organization']), + overridePWHistory=dict(default=False, + type='bool', + aliases=['override_pw_history']), + overridePWLength=dict(default=False, + type='bool', + aliases=['override_pw_length']), + pager_telephonenumber=dict(default=[], + type='list', + aliases=['pagerTelephonenumber']), + password=dict(default=None, + type='str', + no_log=True), + phone=dict(default=[], + type='list'), + postcode=dict(default=None, + type='str'), + primary_group=dict(default=None, + type='str', + aliases=['primaryGroup']), + profilepath=dict(default=None, + type='str'), + pwd_change_next_login=dict(default=None, + type='str', + choices=['0', '1'], + aliases=['pwdChangeNextLogin']), + room_number=dict(default=None, + type='str', + aliases=['roomNumber']), + samba_privileges=dict(default=[], + type='list', + aliases=['sambaPrivileges']), + samba_user_workstations=dict(default=[], + type='list', + aliases=['sambaUserWorkstations']), + sambahome=dict(default=None, + type='str'), + scriptpath=dict(default=None, + type='str'), + secretary=dict(default=[], + type='list'), + serviceprovider=dict(default=[''], + type='list'), + shell=dict(default='/bin/bash', + type='str'), + street=dict(default=None, + type='str'), + title=dict(default=None, + type='str'), + unixhome=dict(default=None, + type='str'), + userexpiry=dict(default=expiry, + type='str'), + username=dict(required=True, + aliases=['name'], + type='str'), + position=dict(default='', + type='str'), + update_password=dict(default='always', + choices=['always', 'on_create'], + type='str'), + ou=dict(default='', + type='str'), + subpath=dict(default='cn=users', + type='str'), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['firstname', 'lastname', 'password']) + ]) + ) + username = module.params['username'] + position = module.params['position'] + ou = module.params['ou'] + subpath = module.params['subpath'] + state = module.params['state'] + changed = False + diff = None + + users = list(ldap_search( + '(&(objectClass=posixAccount)(uid={0}))'.format(username), + attr=['uid'] + )) + if position != '': + container = position + else: + if ou != '': + ou = 'ou={0},'.format(ou) + if subpath != '': + subpath = '{0},'.format(subpath) + container = '{0}{1}{2}'.format(subpath, ou, base_dn()) + user_dn = 'uid={0},{1}'.format(username, container) + + exists = bool(len(users)) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('users/user', container) + else: + obj = umc_module_for_edit('users/user', user_dn) + + if module.params['displayName'] is None: + module.params['displayName'] = '{0} {1}'.format( + module.params['firstname'], + module.params['lastname'] + ) + if module.params['unixhome'] is None: + module.params['unixhome'] = '/home/{0}'.format( + module.params['username'] + ) + for k in obj.keys(): + if (k != 'password' and + k != 'groups' and + k != 'overridePWHistory' and + k in module.params and + module.params[k] is not None): + obj[k] = module.params[k] + # handle some special values + obj['e-mail'] = module.params['email'] + password = module.params['password'] + if obj['password'] is None: + obj['password'] = password + if module.params['update_password'] == 'always': + old_password = obj['password'].split('}', 2)[1] + if crypt.crypt(password, old_password) != old_password: + obj['overridePWHistory'] = module.params['overridePWHistory'] + obj['overridePWLength'] = module.params['overridePWLength'] + obj['password'] = password + + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except Exception: + module.fail_json( + msg="Creating/editing user {0} in {1} failed".format( + username, + container + ) + ) + try: + groups = module.params['groups'] + if groups: + filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format( + ')(cn='.join(groups) + ) + group_dns = list(ldap_search(filter, attr=['dn'])) + for dn in group_dns: + grp = umc_module_for_edit('groups/group', dn[0]) + if user_dn not in grp['users']: + grp['users'].append(user_dn) + if not module.check_mode: + grp.modify() + changed = True + except Exception: + module.fail_json( + msg="Adding groups to user {0} failed".format(username) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('users/user', user_dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception: + module.fail_json( + msg="Removing user {0} failed".format(username) + ) + + module.exit_json( + changed=changed, + username=username, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/_vultr_account_facts.py b/plugins/modules/cloud/vultr/_vultr_account_facts.py new file mode 100644 index 0000000000..98245b63c4 --- /dev/null +++ b/plugins/modules/cloud/vultr/_vultr_account_facts.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_account_facts +short_description: Gather facts about the Vultr account. +description: + - Gather facts about account balance, charges and payments. +deprecated: + removed_in: "2.12" + why: Transformed into an info module. + alternative: Use M(vultr_account_info) instead. +author: "René Moser (@resmo)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr account facts + local_action: + module: vultr_account_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_account_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_account_facts: + description: Response from Vultr API + returned: success + type: complex + contains: + balance: + description: Your account balance. + returned: success + type: float + sample: -214.69 + pending_charges: + description: Charges pending. + returned: success + type: float + sample: 57.03 + last_payment_date: + description: Date of the last payment. + returned: success + type: str + sample: "2017-08-26 12:47:48" + last_payment_amount: + description: The amount of the last payment transaction. + returned: success + type: float + sample: -250.0 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrAccountFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrAccountFacts, self).__init__(module, "vultr_account_facts") + + self.returns = { + 'balance': dict(convert_to='float'), + 'pending_charges': dict(convert_to='float'), + 'last_payment_date': dict(), + 'last_payment_amount': dict(convert_to='float'), + } + + def get_account_info(self): + return self.api_query(path="/v1/account/info") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + account_facts = AnsibleVultrAccountFacts(module) + result = account_facts.get_result(account_facts.get_account_info()) + ansible_facts = { + 'vultr_account_facts': result['vultr_account_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vr_account_facts.py b/plugins/modules/cloud/vultr/vr_account_facts.py new file mode 120000 index 0000000000..cbff6b7a82 --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_account_facts.py @@ -0,0 +1 @@ +_vultr_account_facts.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_dns_domain.py b/plugins/modules/cloud/vultr/vr_dns_domain.py new file mode 120000 index 0000000000..8b4fc85f05 --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_dns_domain.py @@ -0,0 +1 @@ +vultr_dns_domain.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_dns_record.py b/plugins/modules/cloud/vultr/vr_dns_record.py new file mode 120000 index 0000000000..8910005435 --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_dns_record.py @@ -0,0 +1 @@ +vultr_dns_record.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_firewall_group.py b/plugins/modules/cloud/vultr/vr_firewall_group.py new file mode 120000 index 0000000000..8e94cad98a --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_firewall_group.py @@ -0,0 +1 @@ +vultr_firewall_group.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_firewall_rule.py b/plugins/modules/cloud/vultr/vr_firewall_rule.py new file mode 120000 index 0000000000..51a0eb58af --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_firewall_rule.py @@ -0,0 +1 @@ +vultr_firewall_rule.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_server.py b/plugins/modules/cloud/vultr/vr_server.py new file mode 120000 index 0000000000..e515d735f6 --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_server.py @@ -0,0 +1 @@ +vultr_server.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_ssh_key.py b/plugins/modules/cloud/vultr/vr_ssh_key.py new file mode 120000 index 0000000000..f4014d4348 --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_ssh_key.py @@ -0,0 +1 @@ +vultr_ssh_key.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_startup_script.py b/plugins/modules/cloud/vultr/vr_startup_script.py new file mode 120000 index 0000000000..00eb18340c --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_startup_script.py @@ -0,0 +1 @@ +vultr_startup_script.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vr_user.py b/plugins/modules/cloud/vultr/vr_user.py new file mode 120000 index 0000000000..7c9c770fe0 --- /dev/null +++ b/plugins/modules/cloud/vultr/vr_user.py @@ -0,0 +1 @@ +vultr_user.py \ No newline at end of file diff --git a/plugins/modules/cloud/vultr/vultr_account_facts.py b/plugins/modules/cloud/vultr/vultr_account_facts.py new file mode 100644 index 0000000000..98245b63c4 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_account_facts.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_account_facts +short_description: Gather facts about the Vultr account. +description: + - Gather facts about account balance, charges and payments. +deprecated: + removed_in: "2.12" + why: Transformed into an info module. + alternative: Use M(vultr_account_info) instead. +author: "René Moser (@resmo)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr account facts + local_action: + module: vultr_account_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_account_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_account_facts: + description: Response from Vultr API + returned: success + type: complex + contains: + balance: + description: Your account balance. + returned: success + type: float + sample: -214.69 + pending_charges: + description: Charges pending. + returned: success + type: float + sample: 57.03 + last_payment_date: + description: Date of the last payment. + returned: success + type: str + sample: "2017-08-26 12:47:48" + last_payment_amount: + description: The amount of the last payment transaction. + returned: success + type: float + sample: -250.0 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrAccountFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrAccountFacts, self).__init__(module, "vultr_account_facts") + + self.returns = { + 'balance': dict(convert_to='float'), + 'pending_charges': dict(convert_to='float'), + 'last_payment_date': dict(), + 'last_payment_amount': dict(convert_to='float'), + } + + def get_account_info(self): + return self.api_query(path="/v1/account/info") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + account_facts = AnsibleVultrAccountFacts(module) + result = account_facts.get_result(account_facts.get_account_info()) + ansible_facts = { + 'vultr_account_facts': result['vultr_account_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_account_info.py b/plugins/modules/cloud/vultr/vultr_account_info.py new file mode 100644 index 0000000000..d46cd2ead0 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_account_info.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_account_info +short_description: Get information about the Vultr account. +description: + - Get infos about account balance, charges and payments. +author: "René Moser (@resmo)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Get Vultr account infos + vultr_account_info: + register: result + +- name: Print the infos + debug: + var: result.vultr_account_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_account_info: + description: Response from Vultr API + returned: success + type: complex + contains: + balance: + description: Your account balance. + returned: success + type: float + sample: -214.69 + pending_charges: + description: Charges pending. + returned: success + type: float + sample: 57.03 + last_payment_date: + description: Date of the last payment. + returned: success + type: str + sample: "2017-08-26 12:47:48" + last_payment_amount: + description: The amount of the last payment transaction. + returned: success + type: float + sample: -250.0 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrAccountInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrAccountInfo, self).__init__(module, "vultr_account_info") + + self.returns = { + 'balance': dict(convert_to='float'), + 'pending_charges': dict(convert_to='float'), + 'last_payment_date': dict(), + 'last_payment_amount': dict(convert_to='float'), + } + + def get_account_info(self): + return self.api_query(path="/v1/account/info") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + account_info = AnsibleVultrAccountInfo(module) + result = account_info.get_result(account_info.get_account_info()) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_block_storage.py b/plugins/modules/cloud/vultr/vultr_block_storage.py new file mode 100644 index 0000000000..897f7673ec --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_block_storage.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: vultr_block_storage +short_description: Manages block storage volumes on Vultr. +description: + - Manage block storage volumes on Vultr. +author: "Yanis Guenane (@Spredzy)" +options: + name: + description: + - Name of the block storage volume. + required: true + aliases: [ description, label ] + size: + description: + - Size of the block storage volume in GB. + required: true + region: + description: + - Region the block storage volume is deployed into. + required: true + state: + description: + - State of the block storage volume. + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = ''' +- name: Ensure a block storage volume is present + local_action: + module: vultr_block_storage + name: myvolume + size: 10 + region: Amsterdam + +- name: Ensure a block storage volume is absent + local_action: + module: vultr_block_storage + name: myvolume + state: absent +''' + +RETURN = ''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_block_storage: + description: Response from Vultr API + returned: success + type: complex + contains: + attached_to_id: + description: The ID of the server the volume is attached to + returned: success + type: str + sample: "10194376" + cost_per_month: + description: Cost per month for the volume + returned: success + type: float + sample: 1.00 + date_created: + description: Date when the volume was created + returned: success + type: str + sample: "2017-08-26 12:47:48" + id: + description: ID of the block storage volume + returned: success + type: str + sample: "1234abcd" + name: + description: Name of the volume + returned: success + type: str + sample: "ansible-test-volume" + region: + description: Region the volume was deployed into + returned: success + type: str + sample: "New Jersey" + size: + description: Information about the volume size in GB + returned: success + type: int + sample: 10 + status: + description: Status about the deployment of the volume + returned: success + type: str + sample: "active" + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrBlockStorage(Vultr): + + def __init__(self, module): + super(AnsibleVultrBlockStorage, self).__init__(module, "vultr_block_storage") + + self.returns = { + 'SUBID': dict(key='id'), + 'label': dict(key='name'), + 'DCID': dict(key='region', transform=self._get_region_name), + 'attached_to_SUBID': dict(key='attached_to_id'), + 'cost_per_month': dict(convert_to='float'), + 'date_created': dict(), + 'size_gb': dict(key='size', convert_to='int'), + 'status': dict() + } + + def _get_region_name(self, region): + return self.get_region(region, 'DCID').get('name') + + def get_block_storage_volumes(self): + volumes = self.api_query(path="/v1/block/list") + if volumes: + for volume in volumes: + if volume.get('label') == self.module.params.get('name'): + return volume + return {} + + def present_block_storage_volume(self): + volume = self.get_block_storage_volumes() + if not volume: + volume = self._create_block_storage_volume(volume) + return volume + + def _create_block_storage_volume(self, volume): + self.result['changed'] = True + data = { + 'label': self.module.params.get('name'), + 'DCID': self.get_region().get('DCID'), + 'size_gb': self.module.params.get('size') + } + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/block/create", + method="POST", + data=data + ) + volume = self.get_block_storage_volumes() + return volume + + def absent_block_storage_volume(self): + volume = self.get_block_storage_volumes() + if volume: + self.result['changed'] = True + + data = { + 'SUBID': volume['SUBID'], + } + + self.result['diff']['before'] = volume + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/block/delete", + method="POST", + data=data + ) + return volume + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['description', 'label']), + size=dict(type='int'), + region=dict(), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['size', 'region']]] + ) + + vultr_block_storage = AnsibleVultrBlockStorage(module) + if module.params.get('state') == "absent": + volume = vultr_block_storage.absent_block_storage_volume() + else: + volume = vultr_block_storage.present_block_storage_volume() + + result = vultr_block_storage.get_result(volume) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_block_storage_facts.py b/plugins/modules/cloud/vultr/vultr_block_storage_facts.py new file mode 100644 index 0000000000..8b1a7161dd --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_block_storage_facts.py @@ -0,0 +1,138 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_block_storage_facts +short_description: Gather facts about the Vultr block storage volumes available. +description: + - Gather facts about block storage volumes available in Vultr. +author: "Yanis Guenane (@Spredzy)" +deprecated: + removed_in: "2.12" + why: Transformed into an info module. + alternative: Use M(vultr_block_storage_info) instead. +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr block storage volumes facts + local_action: + module: vultr_block_storage_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_block_storage_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_block_storage_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_block_storage_facts": [ + { + "attached_to_id": null, + "cost_per_month": 1.0, + "date_created": "2018-07-24 12:59:59", + "id": 17332323, + "name": "ansible-test-volume", + "region": "New Jersey", + "size": 10, + "status": "active" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrBlockStorageFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrBlockStorageFacts, self).__init__(module, "vultr_block_storage_facts") + + self.returns = { + 'attached_to_SUBID': dict(key='attached_to_id'), + 'cost_per_month': dict(convert_to='float'), + 'date_created': dict(), + 'SUBID': dict(key='id'), + 'label': dict(key='name'), + 'DCID': dict(key='region', transform=self._get_region_name), + 'size_gb': dict(key='size', convert_to='int'), + 'status': dict() + } + + def _get_region_name(self, region): + return self.get_region(region, 'DCID').get('name') + + def get_block_storage_volumes(self): + return self.api_query(path="/v1/block/list") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + volume_facts = AnsibleVultrBlockStorageFacts(module) + result = volume_facts.get_result(volume_facts.get_block_storage_volumes()) + ansible_facts = { + 'vultr_block_storage_facts': result['vultr_block_storage_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_block_storage_info.py b/plugins/modules/cloud/vultr/vultr_block_storage_info.py new file mode 100644 index 0000000000..0fe0c6ecd8 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_block_storage_info.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# Copyright (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_block_storage_info +short_description: Get information about the Vultr block storage volumes available. +description: + - Get infos about block storage volumes available in Vultr. +author: + - "Yanis Guenane (@Spredzy)" + - "René Moser (@resmo)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Get Vultr block storage infos + vultr_block_storage_info: + register: result + +- name: Print the infos + debug: + var: result.vultr_block_storage_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_block_storage_info: + description: Response from Vultr API as list + returned: success + type: complex + contains: + id: + description: ID of the block storage. + returned: success + type: int + sample: 17332323 + size: + description: Size in GB of the block storage. + returned: success + type: int + sample: 10 + region: + description: Region the block storage is located in. + returned: success + type: str + sample: New Jersey + name: + description: Name of the block storage. + returned: success + type: str + sample: my volume + cost_per_month: + description: Cost per month of the block storage. + returned: success + type: float + sample: 1.0 + date_created: + description: Date created of the block storage. + returned: success + type: str + sample: "2018-07-24 12:59:59" + status: + description: Status of the block storage. + returned: success + type: str + sample: active + attached_to_id: + description: Block storage is attached to this server ID. + returned: success + type: str + sample: null +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrBlockStorageFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrBlockStorageFacts, self).__init__(module, "vultr_block_storage_info") + + self.returns = { + 'attached_to_SUBID': dict(key='attached_to_id'), + 'cost_per_month': dict(convert_to='float'), + 'date_created': dict(), + 'SUBID': dict(key='id'), + 'label': dict(key='name'), + 'DCID': dict(key='region', transform=self._get_region_name), + 'size_gb': dict(key='size', convert_to='int'), + 'status': dict() + } + + def _get_region_name(self, region): + return self.get_region(region, 'DCID').get('name') + + def get_block_storage_volumes(self): + return self.api_query(path="/v1/block/list") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + volume_info = AnsibleVultrBlockStorageFacts(module) + result = volume_info.get_result(volume_info.get_block_storage_volumes()) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_dns_domain.py b/plugins/modules/cloud/vultr/vultr_dns_domain.py new file mode 100644 index 0000000000..c31d2d3ffa --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_dns_domain.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_dns_domain +short_description: Manages DNS domains on Vultr. +description: + - Create and remove DNS domains. +author: "René Moser (@resmo)" +options: + name: + description: + - The domain name. + required: true + aliases: [ domain ] + server_ip: + description: + - The default server IP. + - Use M(vultr_dns_record) to change it once the domain is created. + - Required if C(state=present). + state: + description: + - State of the DNS domain. + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Ensure a domain exists + local_action: + module: vultr_dns_domain + name: example.com + server_ip: 10.10.10.10 + +- name: Ensure a domain is absent + local_action: + module: vultr_dns_domain + name: example.com + state: absent +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_dns_domain: + description: Response from Vultr API + returned: success + type: complex + contains: + name: + description: Name of the DNS Domain. + returned: success + type: str + sample: example.com + date_created: + description: Date the DNS domain was created. + returned: success + type: str + sample: "2017-08-26 12:47:48" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrDnsDomain(Vultr): + + def __init__(self, module): + super(AnsibleVultrDnsDomain, self).__init__(module, "vultr_dns_domain") + + self.returns = { + 'domain': dict(key='name'), + 'date_created': dict(), + } + + def get_domain(self): + domains = self.api_query(path="/v1/dns/list") + name = self.module.params.get('name').lower() + if domains: + for domain in domains: + if domain.get('domain').lower() == name: + return domain + return {} + + def present_domain(self): + domain = self.get_domain() + if not domain: + domain = self._create_domain(domain) + return domain + + def _create_domain(self, domain): + self.result['changed'] = True + data = { + 'domain': self.module.params.get('name'), + 'serverip': self.module.params.get('server_ip'), + } + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/dns/create_domain", + method="POST", + data=data + ) + domain = self.get_domain() + return domain + + def absent_domain(self): + domain = self.get_domain() + if domain: + self.result['changed'] = True + + data = { + 'domain': domain['domain'], + } + + self.result['diff']['before'] = domain + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/dns/delete_domain", + method="POST", + data=data + ) + return domain + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['domain']), + server_ip=dict(), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['server_ip']), + ], + supports_check_mode=True, + ) + + vultr_domain = AnsibleVultrDnsDomain(module) + if module.params.get('state') == "absent": + domain = vultr_domain.absent_domain() + else: + domain = vultr_domain.present_domain() + + result = vultr_domain.get_result(domain) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_dns_domain_facts.py b/plugins/modules/cloud/vultr/vultr_dns_domain_facts.py new file mode 100644 index 0000000000..9fc90ebeb6 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_dns_domain_facts.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_dns_domain_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(vultr_dns_domain_info) instead. +short_description: Gather facts about the Vultr DNS domains available. +description: + - Gather facts about DNS domains available in Vultr. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr DNS domains facts + local_action: + module: vultr_dns_domains_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_dns_domain_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_dns_domain_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_dns_domain_facts": [ + { + "date_created": "2018-07-19 07:14:21", + "domain": "ansibletest.com" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrDnsDomainFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrDnsDomainFacts, self).__init__(module, "vultr_dns_domain_facts") + + self.returns = { + "date_created": dict(), + "domain": dict(), + } + + def get_domains(self): + return self.api_query(path="/v1/dns/list") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + domain_facts = AnsibleVultrDnsDomainFacts(module) + result = domain_facts.get_result(domain_facts.get_domains()) + ansible_facts = { + 'vultr_dns_domain_facts': result['vultr_dns_domain_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_dns_domain_info.py b/plugins/modules/cloud/vultr/vultr_dns_domain_info.py new file mode 100644 index 0000000000..b8e810efe8 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_dns_domain_info.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_dns_domain_info +short_description: Gather information about the Vultr DNS domains available. +description: + - Gather information about DNS domains available in Vultr. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr DNS domains information + local_action: + module: vultr_dns_domains_info + register: result + +- name: Print the gathered information + debug: + var: result.vultr_dns_domain_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_dns_domain_info: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_dns_domain_info": [ + { + "date_created": "2018-07-19 07:14:21", + "domain": "ansibletest.com" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrDnsDomainInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrDnsDomainInfo, self).__init__(module, "vultr_dns_domain_info") + + self.returns = { + "date_created": dict(), + "domain": dict(), + } + + def get_domains(self): + return self.api_query(path="/v1/dns/list") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + domain_info = AnsibleVultrDnsDomainInfo(module) + result = domain_info.get_result(domain_info.get_domains()) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_dns_record.py b/plugins/modules/cloud/vultr/vultr_dns_record.py new file mode 100644 index 0000000000..3d455d851a --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_dns_record.py @@ -0,0 +1,373 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: vultr_dns_record +short_description: Manages DNS records on Vultr. +description: + - Create, update and remove DNS records. +author: "René Moser (@resmo)" +options: + name: + description: + - The record name (subrecord). + default: "" + aliases: [ subrecord ] + domain: + description: + - The domain the record is related to. + required: true + record_type: + description: + - Type of the record. + default: A + choices: + - A + - AAAA + - CNAME + - MX + - SRV + - CAA + - TXT + - NS + - SSHFP + aliases: [ type ] + data: + description: + - Data of the record. + - Required if C(state=present) or C(multiple=yes). + ttl: + description: + - TTL of the record. + default: 300 + multiple: + description: + - Whether to use more than one record with similar C(name) including no name and C(record_type). + - Only allowed for a few record types, e.g. C(record_type=A), C(record_type=NS) or C(record_type=MX). + - C(data) will not be updated, instead it is used as a key to find existing records. + default: no + type: bool + priority: + description: + - Priority of the record. + default: 0 + state: + description: + - State of the DNS record. + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = ''' +- name: Ensure an A record exists + vultr_dns_record: + name: www + domain: example.com + data: 10.10.10.10 + ttl: 3600 + +- name: Ensure a second A record exists for round robin LB + vultr_dns_record: + name: www + domain: example.com + data: 10.10.10.11 + ttl: 60 + multiple: yes + +- name: Ensure a CNAME record exists + vultr_dns_record: + name: web + record_type: CNAME + domain: example.com + data: www.example.com + +- name: Ensure MX record exists + vultr_dns_record: + record_type: MX + domain: example.com + data: "{{ item.data }}" + priority: "{{ item.priority }}" + multiple: yes + with_items: + - { data: mx1.example.com, priority: 10 } + - { data: mx2.example.com, priority: 10 } + - { data: mx3.example.com, priority: 20 } + +- name: Ensure a record is absent + local_action: + module: vultr_dns_record + name: www + domain: example.com + state: absent + +- name: Ensure MX record is absent in case multiple exists + vultr_dns_record: + record_type: MX + domain: example.com + data: mx1.example.com + multiple: yes + state: absent +''' + +RETURN = ''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_dns_record: + description: Response from Vultr API + returned: success + type: complex + contains: + id: + description: The ID of the DNS record. + returned: success + type: int + sample: 1265277 + name: + description: The name of the DNS record. + returned: success + type: str + sample: web + record_type: + description: The name of the DNS record. + returned: success + type: str + sample: web + data: + description: Data of the DNS record. + returned: success + type: str + sample: 10.10.10.10 + domain: + description: Domain the DNS record is related to. + returned: success + type: str + sample: example.com + priority: + description: Priority of the DNS record. + returned: success + type: int + sample: 10 + ttl: + description: Time to live of the DNS record. + returned: success + type: int + sample: 300 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + +RECORD_TYPES = [ + 'A', + 'AAAA', + 'CNAME', + 'MX', + 'TXT', + 'NS', + 'SRV', + 'CAA', + 'SSHFP' +] + + +class AnsibleVultrDnsRecord(Vultr): + + def __init__(self, module): + super(AnsibleVultrDnsRecord, self).__init__(module, "vultr_dns_record") + + self.returns = { + 'RECORDID': dict(key='id'), + 'name': dict(), + 'record': dict(), + 'priority': dict(), + 'data': dict(), + 'type': dict(key='record_type'), + 'ttl': dict(), + } + + def get_record(self): + records = self.api_query(path="/v1/dns/records?domain=%s" % self.module.params.get('domain')) + + multiple = self.module.params.get('multiple') + data = self.module.params.get('data') + name = self.module.params.get('name') + record_type = self.module.params.get('record_type') + + result = {} + for record in records or []: + if record.get('type') != record_type: + continue + + if record.get('name') == name: + if not multiple: + if result: + self.module.fail_json(msg="More than one record with record_type=%s and name=%s params. " + "Use multiple=yes for more than one record." % (record_type, name)) + else: + result = record + elif record.get('data') == data: + return record + + return result + + def present_record(self): + record = self.get_record() + if not record: + record = self._create_record(record) + else: + record = self._update_record(record) + return record + + def _create_record(self, record): + self.result['changed'] = True + data = { + 'name': self.module.params.get('name'), + 'domain': self.module.params.get('domain'), + 'data': self.module.params.get('data'), + 'type': self.module.params.get('record_type'), + 'priority': self.module.params.get('priority'), + 'ttl': self.module.params.get('ttl'), + } + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/dns/create_record", + method="POST", + data=data + ) + record = self.get_record() + return record + + def _update_record(self, record): + data = { + 'RECORDID': record['RECORDID'], + 'name': self.module.params.get('name'), + 'domain': self.module.params.get('domain'), + 'data': self.module.params.get('data'), + 'type': self.module.params.get('record_type'), + 'priority': self.module.params.get('priority'), + 'ttl': self.module.params.get('ttl'), + } + has_changed = [k for k in data if k in record and data[k] != record[k]] + if has_changed: + self.result['changed'] = True + + self.result['diff']['before'] = record + self.result['diff']['after'] = record.copy() + self.result['diff']['after'].update(data) + + if not self.module.check_mode: + self.api_query( + path="/v1/dns/update_record", + method="POST", + data=data + ) + record = self.get_record() + return record + + def absent_record(self): + record = self.get_record() + if record: + self.result['changed'] = True + + data = { + 'RECORDID': record['RECORDID'], + 'domain': self.module.params.get('domain'), + } + + self.result['diff']['before'] = record + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/dns/delete_record", + method="POST", + data=data + ) + return record + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + domain=dict(required=True), + name=dict(default="", aliases=['subrecord']), + state=dict(choices=['present', 'absent'], default='present'), + ttl=dict(type='int', default=300), + record_type=dict(choices=RECORD_TYPES, default='A', aliases=['type']), + multiple=dict(type='bool', default=False), + priority=dict(type='int', default=0), + data=dict() + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['data']), + ('multiple', True, ['data']), + ], + + supports_check_mode=True, + ) + + vultr_record = AnsibleVultrDnsRecord(module) + if module.params.get('state') == "absent": + record = vultr_record.absent_record() + else: + record = vultr_record.present_record() + + result = vultr_record.get_result(record) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_firewall_group.py b/plugins/modules/cloud/vultr/vultr_firewall_group.py new file mode 100644 index 0000000000..d433eda4a0 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_firewall_group.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: vultr_firewall_group +short_description: Manages firewall groups on Vultr. +description: + - Create and remove firewall groups. +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the firewall group. + required: true + aliases: [ description ] + state: + description: + - State of the firewall group. + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = ''' +- name: ensure a firewall group is present + local_action: + module: vultr_firewall_group + name: my http firewall + +- name: ensure a firewall group is absent + local_action: + module: vultr_firewall_group + name: my http firewall + state: absent +''' + +RETURN = ''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_firewall_group: + description: Response from Vultr API + returned: success + type: complex + contains: + id: + description: ID of the firewall group + returned: success + type: str + sample: 1234abcd + name: + description: Name of the firewall group + returned: success + type: str + sample: my firewall group + date_created: + description: Date the firewall group was created + returned: success + type: str + sample: "2017-08-26 12:47:48" + date_modified: + description: Date the firewall group was modified + returned: success + type: str + sample: "2017-08-26 12:47:48" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrFirewallGroup(Vultr): + + def __init__(self, module): + super(AnsibleVultrFirewallGroup, self).__init__(module, "vultr_firewall_group") + + self.returns = { + 'FIREWALLGROUPID': dict(key='id'), + 'description': dict(key='name'), + 'date_created': dict(), + 'date_modified': dict(), + } + + def get_firewall_group(self): + firewall_groups = self.api_query(path="/v1/firewall/group_list") + if firewall_groups: + for firewall_group_id, firewall_group_data in firewall_groups.items(): + if firewall_group_data.get('description') == self.module.params.get('name'): + return firewall_group_data + return {} + + def present_firewall_group(self): + firewall_group = self.get_firewall_group() + if not firewall_group: + firewall_group = self._create_firewall_group(firewall_group) + return firewall_group + + def _create_firewall_group(self, firewall_group): + self.result['changed'] = True + data = { + 'description': self.module.params.get('name'), + } + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/firewall/group_create", + method="POST", + data=data + ) + firewall_group = self.get_firewall_group() + return firewall_group + + def absent_firewall_group(self): + firewall_group = self.get_firewall_group() + if firewall_group: + self.result['changed'] = True + + data = { + 'FIREWALLGROUPID': firewall_group['FIREWALLGROUPID'], + } + + self.result['diff']['before'] = firewall_group + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/firewall/group_delete", + method="POST", + data=data + ) + return firewall_group + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['description']), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + vultr_firewall_group = AnsibleVultrFirewallGroup(module) + if module.params.get('state') == "absent": + firewall_group = vultr_firewall_group.absent_firewall_group() + else: + firewall_group = vultr_firewall_group.present_firewall_group() + + result = vultr_firewall_group.get_result(firewall_group) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_firewall_group_facts.py b/plugins/modules/cloud/vultr/vultr_firewall_group_facts.py new file mode 100644 index 0000000000..e2c2429171 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_firewall_group_facts.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_firewall_group_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(vultr_firewall_group_info) instead. +short_description: Gather facts about the Vultr firewall groups available. +description: + - Gather facts about firewall groups available in Vultr. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr firewall groups facts + local_action: + module: vultr_firewall_group_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_firewall_group_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_firewall_group_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_firewall_group_facts": [ + { + "date_created": "2018-07-12 10:27:14", + "date_modified": "2018-07-12 10:27:14", + "description": "test", + "id": "5e128ff0", + "instance_count": 0, + "max_rule_count": 50, + "rule_count": 0 + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrFirewallGroupFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrFirewallGroupFacts, self).__init__(module, "vultr_firewall_group_facts") + + self.returns = { + "FIREWALLGROUPID": dict(key='id'), + "date_created": dict(), + "date_modified": dict(), + "description": dict(), + "instance_count": dict(convert_to='int'), + "max_rule_count": dict(convert_to='int'), + "rule_count": dict(convert_to='int') + } + + def get_firewall_group(self): + return self.api_query(path="/v1/firewall/group_list") + + +def parse_fw_group_list(fwgroups_list): + if not fwgroups_list: + return [] + + return [group for id, group in fwgroups_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + fw_group_facts = AnsibleVultrFirewallGroupFacts(module) + result = fw_group_facts.get_result(parse_fw_group_list(fw_group_facts.get_firewall_group())) + ansible_facts = { + 'vultr_firewall_group_facts': result['vultr_firewall_group_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_firewall_group_info.py b/plugins/modules/cloud/vultr/vultr_firewall_group_info.py new file mode 100644 index 0000000000..57c7b7eed1 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_firewall_group_info.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_firewall_group_info +short_description: Gather information about the Vultr firewall groups available. +description: + - Gather information about firewall groups available in Vultr. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr firewall groups information + local_action: + module: vultr_firewall_group_info + register: result + +- name: Print the gathered information + debug: + var: result.vultr_firewall_group_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_firewall_group_info: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_firewall_group_info": [ + { + "date_created": "2018-07-12 10:27:14", + "date_modified": "2018-07-12 10:27:14", + "description": "test", + "id": "5e128ff0", + "instance_count": 0, + "max_rule_count": 50, + "rule_count": 0 + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrFirewallGroupInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrFirewallGroupInfo, self).__init__(module, "vultr_firewall_group_info") + + self.returns = { + "FIREWALLGROUPID": dict(key='id'), + "date_created": dict(), + "date_modified": dict(), + "description": dict(), + "instance_count": dict(convert_to='int'), + "max_rule_count": dict(convert_to='int'), + "rule_count": dict(convert_to='int') + } + + def get_firewall_group(self): + return self.api_query(path="/v1/firewall/group_list") + + +def parse_fw_group_list(fwgroups_list): + if not fwgroups_list: + return [] + + return [group for id, group in fwgroups_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + fw_group_info = AnsibleVultrFirewallGroupInfo(module) + result = fw_group_info.get_result(parse_fw_group_list(fw_group_info.get_firewall_group())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_firewall_rule.py b/plugins/modules/cloud/vultr/vultr_firewall_rule.py new file mode 100644 index 0000000000..32e5d91064 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_firewall_rule.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: vultr_firewall_rule +short_description: Manages firewall rules on Vultr. +description: + - Create and remove firewall rules. +author: "René Moser (@resmo)" +options: + group: + description: + - Name of the firewall group. + required: true + ip_version: + description: + - IP address version + choices: [ v4, v6 ] + default: v4 + aliases: [ ip_type ] + protocol: + description: + - Protocol of the firewall rule. + choices: [ icmp, tcp, udp, gre ] + default: tcp + cidr: + description: + - Network in CIDR format + - The CIDR format must match with the C(ip_version) value. + - Required if C(state=present). + - Defaulted to 0.0.0.0/0 or ::/0 depending on C(ip_version). + start_port: + description: + - Start port for the firewall rule. + - Required if C(protocol) is tcp or udp and I(state=present). + aliases: [ port ] + end_port: + description: + - End port for the firewall rule. + - Only considered if C(protocol) is tcp or udp and I(state=present). + state: + description: + - State of the firewall rule. + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = ''' +- name: ensure a firewall rule is present + local_action: + module: vultr_firewall_rule + group: application + protocol: tcp + start_port: 8000 + end_port: 9000 + cidr: 17.17.17.0/24 + +- name: open DNS port for all ipv4 and ipv6 + local_action: + module: vultr_firewall_rule + group: dns + protocol: udp + port: 53 + ip_version: "{{ item }}" + with_items: [ v4, v6 ] + +- name: allow ping + local_action: + module: vultr_firewall_rule + group: web + protocol: icmp + +- name: ensure a firewall rule is absent + local_action: + module: vultr_firewall_rule + group: application + protocol: tcp + start_port: 8000 + end_port: 9000 + cidr: 17.17.17.0/24 + state: absent +''' + +RETURN = ''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_firewall_rule: + description: Response from Vultr API + returned: success + type: complex + contains: + rule_number: + description: Rule number of the firewall rule + returned: success + type: int + sample: 2 + action: + description: Action of the firewall rule + returned: success + type: str + sample: accept + protocol: + description: Protocol of the firewall rule + returned: success + type: str + sample: tcp + start_port: + description: Start port of the firewall rule + returned: success and protocol is tcp or udp + type: int + sample: 80 + end_port: + description: End port of the firewall rule + returned: success and when port range and protocol is tcp or udp + type: int + sample: 8080 + cidr: + description: CIDR of the firewall rule (IPv4 or IPv6) + returned: success and when port range + type: str + sample: 0.0.0.0/0 + group: + description: Firewall group the rule is into. + returned: success + type: str + sample: web +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrFirewallRule(Vultr): + + def __init__(self, module): + super(AnsibleVultrFirewallRule, self).__init__(module, "vultr_firewall_rule") + + self.returns = { + 'rulenumber': dict(key='rule_number'), + 'action': dict(), + 'protocol': dict(), + 'start_port': dict(convert_to='int'), + 'end_port': dict(convert_to='int'), + 'cidr': dict(), + 'group': dict(), + } + self.firewall_group = None + + def get_firewall_group(self): + if self.firewall_group is not None: + return self.firewall_group + + firewall_groups = self.api_query(path="/v1/firewall/group_list") + if firewall_groups: + for firewall_group_id, firewall_group_data in firewall_groups.items(): + if firewall_group_data.get('description') == self.module.params.get('group'): + self.firewall_group = firewall_group_data + return self.firewall_group + self.fail_json(msg="Firewall group not found: %s" % self.module.params.get('group')) + + def _transform_cidr(self): + cidr = self.module.params.get('cidr') + ip_version = self.module.params.get('ip_version') + if cidr is None: + if ip_version == "v6": + cidr = "::/0" + else: + cidr = "0.0.0.0/0" + elif cidr.count('/') != 1: + self.fail_json(msg="CIDR has an invalid format: %s" % cidr) + + return cidr.split('/') + + def get_firewall_rule(self): + ip_version = self.module.params.get('ip_version') + firewall_group_id = self.get_firewall_group()['FIREWALLGROUPID'] + + firewall_rules = self.api_query( + path="/v1/firewall/rule_list" + "?FIREWALLGROUPID=%s" + "&direction=in" + "&ip_type=%s" + % (firewall_group_id, ip_version)) + + if firewall_rules: + subnet, subnet_size = self._transform_cidr() + + for firewall_rule_id, firewall_rule_data in firewall_rules.items(): + if firewall_rule_data.get('protocol') != self.module.params.get('protocol'): + continue + + if ip_version == 'v4' and (firewall_rule_data.get('subnet') or "0.0.0.0") != subnet: + continue + + if ip_version == 'v6' and (firewall_rule_data.get('subnet') or "::") != subnet: + continue + + if int(firewall_rule_data.get('subnet_size')) != int(subnet_size): + continue + + if firewall_rule_data.get('protocol') in ['tcp', 'udp']: + rule_port = firewall_rule_data.get('port') + + end_port = self.module.params.get('end_port') + start_port = self.module.params.get('start_port') + + # Port range "8000 - 8080" from the API + if ' - ' in rule_port: + if end_port is None: + continue + + port_range = "%s - %s" % (start_port, end_port) + if rule_port == port_range: + return firewall_rule_data + + # Single port + elif int(rule_port) == start_port: + return firewall_rule_data + + else: + return firewall_rule_data + + return {} + + def present_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if not firewall_rule: + firewall_rule = self._create_firewall_rule(firewall_rule) + return firewall_rule + + def _create_firewall_rule(self, firewall_rule): + protocol = self.module.params.get('protocol') + if protocol in ['tcp', 'udp']: + start_port = self.module.params.get('start_port') + + if start_port is None: + self.module.fail_on_missing_params(['start_port']) + + end_port = self.module.params.get('end_port') + if end_port is not None: + + if start_port >= end_port: + self.module.fail_json(msg="end_port must be higher than start_port") + + port_range = "%s:%s" % (start_port, end_port) + else: + port_range = start_port + else: + port_range = None + + self.result['changed'] = True + + subnet, subnet_size = self._transform_cidr() + + data = { + 'FIREWALLGROUPID': self.get_firewall_group()['FIREWALLGROUPID'], + 'direction': 'in', # currently the only option + 'ip_type': self.module.params.get('ip_version'), + 'protocol': protocol, + 'subnet': subnet, + 'subnet_size': subnet_size, + 'port': port_range + } + + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/firewall/rule_create", + method="POST", + data=data + ) + firewall_rule = self.get_firewall_rule() + return firewall_rule + + def absent_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if firewall_rule: + self.result['changed'] = True + + data = { + 'FIREWALLGROUPID': self.get_firewall_group()['FIREWALLGROUPID'], + 'rulenumber': firewall_rule['rulenumber'] + } + + self.result['diff']['before'] = firewall_rule + + if not self.module.check_mode: + self.api_query( + path="/v1/firewall/rule_delete", + method="POST", + data=data + ) + return firewall_rule + + def get_result(self, resource): + if resource: + if 'port' in resource and resource['protocol'] in ['tcp', 'udp']: + if ' - ' in resource['port']: + resource['start_port'], resource['end_port'] = resource['port'].split(' - ') + else: + resource['start_port'] = resource['port'] + if 'subnet' in resource: + resource['cidr'] = "%s/%s" % (resource['subnet'], resource['subnet_size']) + resource['group'] = self.get_firewall_group()['description'] + return super(AnsibleVultrFirewallRule, self).get_result(resource) + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + group=dict(required=True), + start_port=dict(type='int', aliases=['port']), + end_port=dict(type='int'), + protocol=dict(choices=['tcp', 'udp', 'gre', 'icmp'], default='tcp'), + cidr=dict(), + ip_version=dict(choices=['v4', 'v6'], default='v4', aliases=['ip_type']), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + vultr_firewall_rule = AnsibleVultrFirewallRule(module) + if module.params.get('state') == "absent": + firewall_rule = vultr_firewall_rule.absent_firewall_rule() + else: + firewall_rule = vultr_firewall_rule.present_firewall_rule() + + result = vultr_firewall_rule.get_result(firewall_rule) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_network.py b/plugins/modules/cloud/vultr/vultr_network.py new file mode 100644 index 0000000000..917afdc1d3 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_network.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: vultr_network +short_description: Manages networks on Vultr. +description: + - Manage networks on Vultr. A network cannot be updated. It needs to be deleted and re-created. +author: "Yanis Guenane (@Spredzy)" +options: + name: + description: + - Name of the network. + required: true + aliases: [ description, label ] + cidr: + description: + - The CIDR IPv4 network block to be used when attaching servers to this network. Required if I(state=present). + region: + description: + - Region the network is deployed into. Required if I(state=present). + state: + description: + - State of the network. + default: present + choices: [ present, absent ] +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = ''' +- name: Ensure a network is present + local_action: + module: vultr_network + name: mynet + cidr: 192.168.42.0/24 + region: Amsterdam + +- name: Ensure a network is absent + local_action: + module: vultr_network + name: mynet + state: absent +''' + +RETURN = ''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_network: + description: Response from Vultr API + returned: success + type: complex + contains: + id: + description: ID of the network + returned: success + type: str + sample: "net5b62c6dc63ef5" + name: + description: Name (label) of the network + returned: success + type: str + sample: "mynetwork" + date_created: + description: Date when the network was created + returned: success + type: str + sample: "2018-08-02 08:54:52" + region: + description: Region the network was deployed into + returned: success + type: str + sample: "Amsterdam" + v4_subnet: + description: IPv4 Network address + returned: success + type: str + sample: "192.168.42.0" + v4_subnet_mask: + description: Ipv4 Network mask + returned: success + type: int + sample: 24 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrNetwork(Vultr): + + def __init__(self, module): + super(AnsibleVultrNetwork, self).__init__(module, "vultr_network") + + self.returns = { + 'NETWORKID': dict(key='id'), + 'DCID': dict(key='region', transform=self._get_region_name), + 'date_created': dict(), + 'description': dict(key='name'), + 'v4_subnet': dict(), + 'v4_subnet_mask': dict(convert_to='int'), + } + + def _get_region_name(self, region_id=None): + return self.get_region().get('name') + + def get_network(self): + networks = self.api_query(path="/v1/network/list") + if networks: + for id, network in networks.items(): + if network.get('description') == self.module.params.get('name'): + return network + return {} + + def present_network(self): + network = self.get_network() + if not network: + network = self._create_network(network) + return network + + def _create_network(self, network): + self.result['changed'] = True + data = { + 'description': self.module.params.get('name'), + 'DCID': self.get_region()['DCID'], + 'v4_subnet': self.module.params.get('cidr').split('/')[0], + 'v4_subnet_mask': self.module.params.get('cidr').split('/')[1] + } + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/network/create", + method="POST", + data=data + ) + network = self.get_network() + return network + + def absent_network(self): + network = self.get_network() + if network: + self.result['changed'] = True + + data = { + 'NETWORKID': network['NETWORKID'], + } + + self.result['diff']['before'] = network + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/network/destroy", + method="POST", + data=data + ) + return network + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['description', 'label']), + cidr=dict(), + region=dict(), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['cidr', 'region']]] + ) + + vultr_network = AnsibleVultrNetwork(module) + if module.params.get('state') == "absent": + network = vultr_network.absent_network() + else: + network = vultr_network.present_network() + + result = vultr_network.get_result(network) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_network_facts.py b/plugins/modules/cloud/vultr/vultr_network_facts.py new file mode 100644 index 0000000000..1a75824863 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_network_facts.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_network_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(vultr_network_info) instead. +short_description: Gather facts about the Vultr networks available. +description: + - Gather facts about networks available in Vultr. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr networks facts + local_action: + module: vultr_network_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_network_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_network_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_network_facts": [ + { + "date_created": "2018-08-02 11:18:49", + "id": "net5b62e8991adfg", + "name": "mynet", + "region": "Amsterdam", + "v4_subnet": "192.168.42.0", + "v4_subnet_mask": 24 + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrNetworkFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrNetworkFacts, self).__init__(module, "vultr_network_facts") + + self.returns = { + 'DCID': dict(key='region', transform=self._get_region_name), + 'NETWORKID': dict(key='id'), + 'date_created': dict(), + 'description': dict(key='name'), + 'v4_subnet': dict(), + 'v4_subnet_mask': dict(convert_to='int'), + } + + def _get_region_name(self, region): + return self.query_resource_by_key( + key='DCID', + value=region, + resource='regions', + use_cache=True + )['name'] + + def get_networks(self): + return self.api_query(path="/v1/network/list") + + +def parse_network_list(network_list): + if isinstance(network_list, list): + return [] + + return [network for id, network in network_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + network_facts = AnsibleVultrNetworkFacts(module) + result = network_facts.get_result(parse_network_list(network_facts.get_networks())) + ansible_facts = { + 'vultr_network_facts': result['vultr_network_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_network_info.py b/plugins/modules/cloud/vultr/vultr_network_info.py new file mode 100644 index 0000000000..3fdeaeced0 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_network_info.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_network_info +short_description: Gather information about the Vultr networks available. +description: + - Gather information about networks available in Vultr. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr networks information + local_action: + module: vultr_network_info + register: result + +- name: Print the gathered information + debug: + var: result.vultr_network_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_network_info: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_network_info": [ + { + "date_created": "2018-08-02 11:18:49", + "id": "net5b62e8991adfg", + "name": "mynet", + "region": "Amsterdam", + "v4_subnet": "192.168.42.0", + "v4_subnet_mask": 24 + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrNetworkInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrNetworkInfo, self).__init__(module, "vultr_network_info") + + self.returns = { + 'DCID': dict(key='region', transform=self._get_region_name), + 'NETWORKID': dict(key='id'), + 'date_created': dict(), + 'description': dict(key='name'), + 'v4_subnet': dict(), + 'v4_subnet_mask': dict(convert_to='int'), + } + + def _get_region_name(self, region): + return self.query_resource_by_key( + key='DCID', + value=region, + resource='regions', + use_cache=True + )['name'] + + def get_networks(self): + return self.api_query(path="/v1/network/list") + + +def parse_network_list(network_list): + if isinstance(network_list, list): + return [] + + return [network for id, network in network_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + network_info = AnsibleVultrNetworkInfo(module) + result = network_info.get_result(parse_network_list(network_info.get_networks())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_os_facts.py b/plugins/modules/cloud/vultr/vultr_os_facts.py new file mode 100644 index 0000000000..14f9c09ecd --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_os_facts.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_os_facts +short_description: Gather facts about the Vultr OSes available. +description: + - Gather facts about OSes available to boot servers. +author: "Yanis Guenane (@Spredzy)" +deprecated: + removed_in: "2.12" + why: Transformed into an info module. + alternative: Use M(vultr_os_info) instead. +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr OSes facts + local_action: + module: vultr_os_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_os_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +ansible_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_os_facts": [ + { + "arch": "x64", + "family": "openbsd", + "id": 234, + "name": "OpenBSD 6 x64", + "windows": false + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrOSFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrOSFacts, self).__init__(module, "vultr_os_facts") + + self.returns = { + "OSID": dict(key='id', convert_to='int'), + "arch": dict(), + "family": dict(), + "name": dict(), + "windows": dict(convert_to='bool') + } + + def get_oses(self): + return self.api_query(path="/v1/os/list") + + +def parse_oses_list(oses_list): + return [os for id, os in oses_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + os_facts = AnsibleVultrOSFacts(module) + result = os_facts.get_result(parse_oses_list(os_facts.get_oses())) + ansible_facts = { + 'vultr_os_facts': result['vultr_os_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_os_info.py b/plugins/modules/cloud/vultr/vultr_os_info.py new file mode 100644 index 0000000000..8a95719e50 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_os_info.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# Copyright (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_os_info +short_description: Get information about the Vultr OSes available. +description: + - Get infos about OSes available to boot servers. +author: + - "Yanis Guenane (@Spredzy)" + - "René Moser (@resmo)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Get Vultr OSes infos + vultr_os_info: + register: results + +- name: Print the gathered infos + debug: + var: results.vultr_os_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_os_info: + description: Response from Vultr API as list + returned: available + type: complex + contains: + arch: + description: OS Architecture + returned: success + type: str + sample: x64 + family: + description: OS family + returned: success + type: str + sample: openbsd + name: + description: OS name + returned: success + type: str + sample: OpenBSD 6 x64 + windows: + description: OS is a MS Windows + returned: success + type: bool +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrOSInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrOSInfo, self).__init__(module, "vultr_os_info") + + self.returns = { + "OSID": dict(key='id', convert_to='int'), + "arch": dict(), + "family": dict(), + "name": dict(), + "windows": dict(convert_to='bool') + } + + def get_oses(self): + return self.api_query(path="/v1/os/list") + + +def parse_oses_list(oses_list): + return [os for id, os in oses_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + os_info = AnsibleVultrOSInfo(module) + result = os_info.get_result(parse_oses_list(os_info.get_oses())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_plan_facts.py b/plugins/modules/cloud/vultr/vultr_plan_facts.py new file mode 100644 index 0000000000..2cb4de7564 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_plan_facts.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_plan_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(vultr_plan_info) instead. +short_description: Gather facts about the Vultr plans available. +description: + - Gather facts about plans available to boot servers. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr plans facts + local_action: + module: vultr_plan_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_plan_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_plan_facts: + description: Response from Vultr API + returned: success + type: complex + contains: + plan: + description: List of the plans available. + returned: success + type: list + sample: [{ + "available_locations": [ + 1 + ], + "bandwidth": 40.0, + "bandwidth_gb": 40960, + "disk": 110, + "id": 118, + "name": "32768 MB RAM,110 GB SSD,40.00 TB BW", + "plan_type": "DEDICATED", + "price_per_month": 240.0, + "ram": 32768, + "vcpu_count": 8, + "windows": false + }] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrPlanFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrPlanFacts, self).__init__(module, "vultr_plan_facts") + + self.returns = { + "VPSPLANID": dict(key='id', convert_to='int'), + "available_locations": dict(), + "bandwidth": dict(convert_to='float'), + "bandwidth_gb": dict(convert_to='int'), + "disk": dict(convert_to='int'), + "name": dict(), + "plan_type": dict(), + "price_per_month": dict(convert_to='float'), + "ram": dict(convert_to='int'), + "vcpu_count": dict(convert_to='int'), + "windows": dict(convert_to='bool') + } + + def get_plans(self): + return self.api_query(path="/v1/plans/list") + + +def parse_plans_list(plans_list): + return [plan for id, plan in plans_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + plan_facts = AnsibleVultrPlanFacts(module) + result = plan_facts.get_result(parse_plans_list(plan_facts.get_plans())) + ansible_facts = { + 'vultr_plan_facts': result['vultr_plan_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_plan_info.py b/plugins/modules/cloud/vultr/vultr_plan_info.py new file mode 100644 index 0000000000..fe4d9d3ae1 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_plan_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_plan_info +short_description: Gather information about the Vultr plans available. +description: + - Gather information about plans available to boot servers. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr plans information + local_action: + module: vultr_plan_info + register: result + +- name: Print the gathered information + debug: + var: result.vultr_plan_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_plan_info: + description: Response from Vultr API + returned: success + type: complex + contains: + plan: + description: List of the plans available. + returned: success + type: list + sample: [{ + "available_locations": [ + 1 + ], + "bandwidth": 40.0, + "bandwidth_gb": 40960, + "disk": 110, + "id": 118, + "name": "32768 MB RAM,110 GB SSD,40.00 TB BW", + "plan_type": "DEDICATED", + "price_per_month": 240.0, + "ram": 32768, + "vcpu_count": 8, + "windows": false + }] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrPlanInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrPlanInfo, self).__init__(module, "vultr_plan_info") + + self.returns = { + "VPSPLANID": dict(key='id', convert_to='int'), + "available_locations": dict(), + "bandwidth": dict(convert_to='float'), + "bandwidth_gb": dict(convert_to='int'), + "disk": dict(convert_to='int'), + "name": dict(), + "plan_type": dict(), + "price_per_month": dict(convert_to='float'), + "ram": dict(convert_to='int'), + "vcpu_count": dict(convert_to='int'), + "windows": dict(convert_to='bool') + } + + def get_plans(self): + return self.api_query(path="/v1/plans/list") + + +def parse_plans_list(plans_list): + return [plan for id, plan in plans_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + plan_info = AnsibleVultrPlanInfo(module) + result = plan_info.get_result(parse_plans_list(plan_info.get_plans())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_region_facts.py b/plugins/modules/cloud/vultr/vultr_region_facts.py new file mode 100644 index 0000000000..d426a2731c --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_region_facts.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_region_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(vultr_region_info) instead. +short_description: Gather facts about the Vultr regions available. +description: + - Gather facts about regions available to boot servers. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr regions facts + local_action: + module: vultr_region_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_region_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_region_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_region_facts": [ + { + "block_storage": false, + "continent": "Europe", + "country": "GB", + "ddos_protection": true, + "id": 8, + "name": "London", + "regioncode": "LHR", + "state": "" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrRegionFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrRegionFacts, self).__init__(module, "vultr_region_facts") + + self.returns = { + "DCID": dict(key='id', convert_to='int'), + "block_storage": dict(convert_to='bool'), + "continent": dict(), + "country": dict(), + "ddos_protection": dict(convert_to='bool'), + "name": dict(), + "regioncode": dict(), + "state": dict() + } + + def get_regions(self): + return self.api_query(path="/v1/regions/list") + + +def parse_regions_list(regions_list): + return [region for id, region in regions_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + region_facts = AnsibleVultrRegionFacts(module) + result = region_facts.get_result(parse_regions_list(region_facts.get_regions())) + ansible_facts = { + 'vultr_region_facts': result['vultr_region_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_region_info.py b/plugins/modules/cloud/vultr/vultr_region_info.py new file mode 100644 index 0000000000..ce4fbeb312 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_region_info.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_region_info +short_description: Gather information about the Vultr regions available. +description: + - Gather information about regions available to boot servers. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr regions information + local_action: + module: vultr_region_info + register: result + +- name: Print the gathered information + debug: + var: result.vultr_region_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_region_info: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_region_info": [ + { + "block_storage": false, + "continent": "Europe", + "country": "GB", + "ddos_protection": true, + "id": 8, + "name": "London", + "regioncode": "LHR", + "state": "" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrRegionInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrRegionInfo, self).__init__(module, "vultr_region_info") + + self.returns = { + "DCID": dict(key='id', convert_to='int'), + "block_storage": dict(convert_to='bool'), + "continent": dict(), + "country": dict(), + "ddos_protection": dict(convert_to='bool'), + "name": dict(), + "regioncode": dict(), + "state": dict() + } + + def get_regions(self): + return self.api_query(path="/v1/regions/list") + + +def parse_regions_list(regions_list): + return [region for id, region in regions_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + region_info = AnsibleVultrRegionInfo(module) + result = region_info.get_result(parse_regions_list(region_info.get_regions())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_server.py b/plugins/modules/cloud/vultr/vultr_server.py new file mode 100644 index 0000000000..538b956036 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_server.py @@ -0,0 +1,941 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: vultr_server +short_description: Manages virtual servers on Vultr. +description: + - Deploy, start, stop, update, restart, reinstall servers. +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the server. + required: true + aliases: [ label ] + type: str + hostname: + description: + - The hostname to assign to this server. + type: str + os: + description: + - The operating system name or ID. + - Required if the server does not yet exist and is not restoring from a snapshot. + type: str + snapshot: + description: + - Name or ID of the snapshot to restore the server from. + type: str + firewall_group: + description: + - The firewall group description or ID to assign this server to. + type: str + plan: + description: + - Plan name or ID to use for the server. + - Required if the server does not yet exist. + type: str + force: + description: + - Force stop/start the server if required to apply changes + - Otherwise a running server will not be changed. + type: bool + default: no + notify_activate: + description: + - Whether to send an activation email when the server is ready or not. + - Only considered on creation. + type: bool + private_network_enabled: + description: + - Whether to enable private networking or not. + type: bool + auto_backup_enabled: + description: + - Whether to enable automatic backups or not. + type: bool + ipv6_enabled: + description: + - Whether to enable IPv6 or not. + type: bool + tag: + description: + - Tag for the server. + type: str + user_data: + description: + - User data to be passed to the server. + type: str + startup_script: + description: + - Name or ID of the startup script to execute on boot. + - Only considered while creating the server. + type: str + ssh_keys: + description: + - List of SSH key names or IDs passed to the server on creation. + aliases: [ ssh_key ] + type: list + reserved_ip_v4: + description: + - IP address of the floating IP to use as the main IP of this server. + - Only considered on creation. + type: str + region: + description: + - Region name or ID the server is deployed into. + - Required if the server does not yet exist. + type: str + state: + description: + - State of the server. + default: present + choices: [ present, absent, restarted, reinstalled, started, stopped ] + type: str +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = ''' +- name: create server + delegate_to: localhost + vultr_server: + name: "{{ vultr_server_name }}" + os: CentOS 7 x64 + plan: 1024 MB RAM,25 GB SSD,1.00 TB BW + ssh_keys: + - my_key + - your_key + region: Amsterdam + state: present + +- name: ensure a server is present and started + delegate_to: localhost + vultr_server: + name: "{{ vultr_server_name }}" + os: CentOS 7 x64 + plan: 1024 MB RAM,25 GB SSD,1.00 TB BW + firewall_group: my_group + ssh_key: my_key + region: Amsterdam + state: started + +- name: ensure a server is present and stopped provisioned using IDs + delegate_to: localhost + vultr_server: + name: "{{ vultr_server_name }}" + os: "167" + plan: "201" + region: "7" + state: stopped + +- name: ensure an existing server is stopped + delegate_to: localhost + vultr_server: + name: "{{ vultr_server_name }}" + state: stopped + +- name: ensure an existing server is started + delegate_to: localhost + vultr_server: + name: "{{ vultr_server_name }}" + state: started + +- name: ensure a server is absent + delegate_to: localhost + vultr_server: + name: "{{ vultr_server_name }}" + state: absent +''' + +RETURN = ''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_server: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + id: + description: ID of the server + returned: success + type: str + sample: 10194376 + name: + description: Name (label) of the server + returned: success + type: str + sample: "ansible-test-vm" + plan: + description: Plan used for the server + returned: success + type: str + sample: "1024 MB RAM,25 GB SSD,1.00 TB BW" + allowed_bandwidth_gb: + description: Allowed bandwidth to use in GB + returned: success + type: int + sample: 1000 + auto_backup_enabled: + description: Whether automatic backups are enabled + returned: success + type: bool + sample: false + cost_per_month: + description: Cost per month for the server + returned: success + type: float + sample: 5.00 + current_bandwidth_gb: + description: Current bandwidth used for the server + returned: success + type: int + sample: 0 + date_created: + description: Date when the server was created + returned: success + type: str + sample: "2017-08-26 12:47:48" + default_password: + description: Password to login as root into the server + returned: success + type: str + sample: "!p3EWYJm$qDWYaFr" + disk: + description: Information about the disk + returned: success + type: str + sample: "Virtual 25 GB" + v4_gateway: + description: IPv4 gateway + returned: success + type: str + sample: "45.32.232.1" + internal_ip: + description: Internal IP + returned: success + type: str + sample: "" + kvm_url: + description: URL to the VNC + returned: success + type: str + sample: "https://my.vultr.com/subs/vps/novnc/api.php?data=xyz" + region: + description: Region the server was deployed into + returned: success + type: str + sample: "Amsterdam" + v4_main_ip: + description: Main IPv4 + returned: success + type: str + sample: "45.32.233.154" + v4_netmask: + description: Netmask IPv4 + returned: success + type: str + sample: "255.255.254.0" + os: + description: Operating system used for the server + returned: success + type: str + sample: "CentOS 6 x64" + firewall_group: + description: Firewall group the server is assigned to + returned: success and available + type: str + sample: "CentOS 6 x64" + pending_charges: + description: Pending charges + returned: success + type: float + sample: 0.01 + power_status: + description: Power status of the server + returned: success + type: str + sample: "running" + ram: + description: Information about the RAM size + returned: success + type: str + sample: "1024 MB" + server_state: + description: State about the server + returned: success + type: str + sample: "ok" + status: + description: Status about the deployment of the server + returned: success + type: str + sample: "active" + tag: + description: TBD + returned: success + type: str + sample: "" + v6_main_ip: + description: Main IPv6 + returned: success + type: str + sample: "" + v6_network: + description: Network IPv6 + returned: success + type: str + sample: "" + v6_network_size: + description: Network size IPv6 + returned: success + type: str + sample: "" + v6_networks: + description: Networks IPv6 + returned: success + type: list + sample: [] + vcpu_count: + description: Virtual CPU count + returned: success + type: int + sample: 1 +''' + +import time +import base64 +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text, to_bytes +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrServer(Vultr): + + def __init__(self, module): + super(AnsibleVultrServer, self).__init__(module, "vultr_server") + + self.server = None + self.returns = { + 'SUBID': dict(key='id'), + 'label': dict(key='name'), + 'date_created': dict(), + 'allowed_bandwidth_gb': dict(convert_to='int'), + 'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'), + 'current_bandwidth_gb': dict(), + 'kvm_url': dict(), + 'default_password': dict(), + 'internal_ip': dict(), + 'disk': dict(), + 'cost_per_month': dict(convert_to='float'), + 'location': dict(key='region'), + 'main_ip': dict(key='v4_main_ip'), + 'network_v4': dict(key='v4_network'), + 'gateway_v4': dict(key='v4_gateway'), + 'os': dict(), + 'pending_charges': dict(convert_to='float'), + 'power_status': dict(), + 'ram': dict(), + 'plan': dict(), + 'server_state': dict(), + 'status': dict(), + 'firewall_group': dict(), + 'tag': dict(), + 'v6_main_ip': dict(), + 'v6_network': dict(), + 'v6_network_size': dict(), + 'v6_networks': dict(), + 'vcpu_count': dict(convert_to='int'), + } + self.server_power_state = None + + def get_startup_script(self): + return self.query_resource_by_key( + key='name', + value=self.module.params.get('startup_script'), + resource='startupscript', + ) + + def get_os(self): + if self.module.params.get('snapshot'): + os_name = 'Snapshot' + else: + os_name = self.module.params.get('os') + + return self.query_resource_by_key( + key='name', + value=os_name, + resource='os', + use_cache=True, + id_key='OSID', + ) + + def get_snapshot(self): + return self.query_resource_by_key( + key='description', + value=self.module.params.get('snapshot'), + resource='snapshot', + id_key='SNAPSHOTID', + ) + + def get_ssh_keys(self): + ssh_key_names = self.module.params.get('ssh_keys') + if not ssh_key_names: + return [] + + ssh_keys = [] + for ssh_key_name in ssh_key_names: + ssh_key = self.query_resource_by_key( + key='name', + value=ssh_key_name, + resource='sshkey', + use_cache=True, + id_key='SSHKEYID', + ) + if ssh_key: + ssh_keys.append(ssh_key) + return ssh_keys + + def get_region(self): + return self.query_resource_by_key( + key='name', + value=self.module.params.get('region'), + resource='regions', + use_cache=True, + id_key='DCID', + ) + + def get_firewall_group(self): + return self.query_resource_by_key( + key='description', + value=self.module.params.get('firewall_group'), + resource='firewall', + query_by='group_list', + id_key='FIREWALLGROUPID' + ) + + def get_user_data(self): + user_data = self.module.params.get('user_data') + if user_data is not None: + user_data = to_text(base64.b64encode(to_bytes(user_data))) + return user_data + + def get_server_user_data(self, server): + if not server or not server.get('SUBID'): + return None + + user_data = self.api_query(path="/v1/server/get_user_data?SUBID=%s" % server.get('SUBID')) + return user_data.get('userdata') + + def get_server(self, refresh=False): + if self.server is None or refresh: + self.server = None + server_list = self.api_query(path="/v1/server/list") + if server_list: + for server_id, server_data in server_list.items(): + if server_data.get('label') == self.module.params.get('name'): + self.server = server_data + + plan = self.query_resource_by_key( + key='VPSPLANID', + value=server_data['VPSPLANID'], + resource='plans', + use_cache=True + ) + self.server['plan'] = plan.get('name') + + os = self.query_resource_by_key( + key='OSID', + value=int(server_data['OSID']), + resource='os', + use_cache=True + ) + self.server['os'] = os.get('name') + + fwg_id = server_data.get('FIREWALLGROUPID') + fw = self.query_resource_by_key( + key='FIREWALLGROUPID', + value=server_data.get('FIREWALLGROUPID') if fwg_id and fwg_id != "0" else None, + resource='firewall', + query_by='group_list', + use_cache=True + ) + self.server['firewall_group'] = fw.get('description') + return self.server + + def present_server(self, start_server=True): + server = self.get_server() + if not server: + server = self._create_server(server=server) + else: + server = self._update_server(server=server, start_server=start_server) + return server + + def _create_server(self, server=None): + required_params = [ + 'os', + 'plan', + 'region', + ] + + snapshot_restore = self.module.params.get('snapshot') is not None + if snapshot_restore: + required_params.remove('os') + + self.module.fail_on_missing_params(required_params=required_params) + + self.result['changed'] = True + if not self.module.check_mode: + data = { + 'DCID': self.get_region().get('DCID'), + 'VPSPLANID': self.get_plan().get('VPSPLANID'), + 'FIREWALLGROUPID': self.get_firewall_group().get('FIREWALLGROUPID'), + 'OSID': self.get_os().get('OSID'), + 'SNAPSHOTID': self.get_snapshot().get('SNAPSHOTID'), + 'label': self.module.params.get('name'), + 'hostname': self.module.params.get('hostname'), + 'SSHKEYID': ','.join([ssh_key['SSHKEYID'] for ssh_key in self.get_ssh_keys()]), + 'enable_ipv6': self.get_yes_or_no('ipv6_enabled'), + 'enable_private_network': self.get_yes_or_no('private_network_enabled'), + 'auto_backups': self.get_yes_or_no('auto_backup_enabled'), + 'notify_activate': self.get_yes_or_no('notify_activate'), + 'tag': self.module.params.get('tag'), + 'reserved_ip_v4': self.module.params.get('reserved_ip_v4'), + 'user_data': self.get_user_data(), + 'SCRIPTID': self.get_startup_script().get('SCRIPTID'), + } + self.api_query( + path="/v1/server/create", + method="POST", + data=data + ) + server = self._wait_for_state(key='status', state='active') + server = self._wait_for_state(state='running', timeout=3600 if snapshot_restore else 60) + return server + + def _update_auto_backups_setting(self, server, start_server): + auto_backup_enabled_changed = self.switch_enable_disable(server, 'auto_backup_enabled', 'auto_backups') + + if auto_backup_enabled_changed: + if auto_backup_enabled_changed == "enable" and server['auto_backups'] == 'disable': + self.module.warn("Backups are disabled. Once disabled, backups can only be enabled again by customer support") + else: + server, warned = self._handle_power_status_for_update(server, start_server) + if not warned: + self.result['changed'] = True + self.result['diff']['before']['auto_backup_enabled'] = server.get('auto_backups') + self.result['diff']['after']['auto_backup_enabled'] = self.get_yes_or_no('auto_backup_enabled') + + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'] + } + self.api_query( + path="/v1/server/backup_%s" % auto_backup_enabled_changed, + method="POST", + data=data + ) + return server + + def _update_ipv6_setting(self, server, start_server): + ipv6_enabled_changed = self.switch_enable_disable(server, 'ipv6_enabled', 'v6_main_ip') + + if ipv6_enabled_changed: + if ipv6_enabled_changed == "disable": + self.module.warn("The Vultr API does not allow to disable IPv6") + else: + server, warned = self._handle_power_status_for_update(server, start_server) + if not warned: + self.result['changed'] = True + self.result['diff']['before']['ipv6_enabled'] = False + self.result['diff']['after']['ipv6_enabled'] = True + + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'] + } + self.api_query( + path="/v1/server/ipv6_%s" % ipv6_enabled_changed, + method="POST", + data=data + ) + server = self._wait_for_state(key='v6_main_ip') + return server + + def _update_private_network_setting(self, server, start_server): + private_network_enabled_changed = self.switch_enable_disable(server, 'private_network_enabled', 'internal_ip') + if private_network_enabled_changed: + if private_network_enabled_changed == "disable": + self.module.warn("The Vultr API does not allow to disable private network") + else: + server, warned = self._handle_power_status_for_update(server, start_server) + if not warned: + self.result['changed'] = True + self.result['diff']['before']['private_network_enabled'] = False + self.result['diff']['after']['private_network_enabled'] = True + + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'] + } + self.api_query( + path="/v1/server/private_network_%s" % private_network_enabled_changed, + method="POST", + data=data + ) + return server + + def _update_plan_setting(self, server, start_server): + # Verify the exising plan is not discontined by Vultr and therefore won't be found by the API + server_plan = self.get_plan(plan=server.get('VPSPLANID'), optional=True) + if not server_plan: + plan = self.get_plan(optional=True) + if not plan: + self.module.warn("The plan used to create the server is not longer available as well as the desired plan. Assuming same plan, keeping as is.") + return server + else: + plan = self.get_plan() + + plan_changed = True if plan and plan['VPSPLANID'] != server.get('VPSPLANID') else False + if plan_changed: + server, warned = self._handle_power_status_for_update(server, start_server) + if not warned: + self.result['changed'] = True + self.result['diff']['before']['plan'] = server.get('plan') + self.result['diff']['after']['plan'] = plan['name'] + + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'], + 'VPSPLANID': plan['VPSPLANID'], + } + self.api_query( + path="/v1/server/upgrade_plan", + method="POST", + data=data + ) + return server + + def _handle_power_status_for_update(self, server, start_server): + # Remember the power state before we handle any action + if self.server_power_state is None: + self.server_power_state = server['power_status'] + + # A stopped server can be updated + if self.server_power_state == "stopped": + return server, False + + # A running server must be forced to update unless the wanted state is stopped + elif self.module.params.get('force') or not start_server: + warned = False + if not self.module.check_mode: + # Some update APIs would restart the VM, we handle the restart manually + # by stopping the server and start it at the end of the changes + server = self.stop_server(skip_results=True) + + # Warn the user that a running server won't get changed + else: + warned = True + self.module.warn("Some changes won't be applied to running instances. " + + "Use force=true to allow the instance %s to be stopped/started." % server['label']) + + return server, warned + + def _update_server(self, server=None, start_server=True): + # Wait for server to unlock if restoring + if server.get('os').strip() == 'Snapshot': + server = self._wait_for_state(key='server_status', state='ok', timeout=3600) + + # Update auto backups settings, stops server + server = self._update_auto_backups_setting(server=server, start_server=start_server) + + # Update IPv6 settings, stops server + server = self._update_ipv6_setting(server=server, start_server=start_server) + + # Update private network settings, stops server + server = self._update_private_network_setting(server=server, start_server=start_server) + + # Update plan settings, stops server + server = self._update_plan_setting(server=server, start_server=start_server) + + # User data + user_data = self.get_user_data() + server_user_data = self.get_server_user_data(server=server) + if user_data is not None and user_data != server_user_data: + self.result['changed'] = True + self.result['diff']['before']['user_data'] = server_user_data + self.result['diff']['after']['user_data'] = user_data + + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'], + 'userdata': user_data, + } + self.api_query( + path="/v1/server/set_user_data", + method="POST", + data=data + ) + + # Tags + tag = self.module.params.get('tag') + if tag is not None and tag != server.get('tag'): + self.result['changed'] = True + self.result['diff']['before']['tag'] = server.get('tag') + self.result['diff']['after']['tag'] = tag + + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'], + 'tag': tag, + } + self.api_query( + path="/v1/server/tag_set", + method="POST", + data=data + ) + + # Firewall group + firewall_group = self.get_firewall_group() + if firewall_group and firewall_group.get('description') != server.get('firewall_group'): + self.result['changed'] = True + self.result['diff']['before']['firewall_group'] = server.get('firewall_group') + self.result['diff']['after']['firewall_group'] = firewall_group.get('description') + + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'], + 'FIREWALLGROUPID': firewall_group.get('FIREWALLGROUPID'), + } + self.api_query( + path="/v1/server/firewall_group_set", + method="POST", + data=data + ) + # Start server again if it was running before the changes + if not self.module.check_mode: + if self.server_power_state in ['starting', 'running'] and start_server: + server = self.start_server(skip_results=True) + + server = self._wait_for_state(key='status', state='active') + return server + + def absent_server(self): + server = self.get_server() + if server: + self.result['changed'] = True + self.result['diff']['before']['id'] = server['SUBID'] + self.result['diff']['after']['id'] = "" + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'] + } + self.api_query( + path="/v1/server/destroy", + method="POST", + data=data + ) + for s in range(0, 60): + if server is not None: + break + time.sleep(2) + server = self.get_server(refresh=True) + else: + self.fail_json(msg="Wait for server '%s' to get deleted timed out" % server['label']) + return server + + def restart_server(self): + self.result['changed'] = True + server = self.get_server() + if server: + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'] + } + self.api_query( + path="/v1/server/reboot", + method="POST", + data=data + ) + server = self._wait_for_state(state='running') + return server + + def reinstall_server(self): + self.result['changed'] = True + server = self.get_server() + if server: + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'] + } + self.api_query( + path="/v1/server/reinstall", + method="POST", + data=data + ) + server = self._wait_for_state(state='running') + return server + + def _wait_for_state(self, key='power_status', state=None, timeout=60): + time.sleep(1) + server = self.get_server(refresh=True) + for s in range(0, timeout): + # Check for Truely if wanted state is None + if state is None and server.get(key): + break + elif server.get(key) == state: + break + time.sleep(2) + server = self.get_server(refresh=True) + + # Timed out + else: + if state is None: + msg = "Wait for '%s' timed out" % key + else: + msg = "Wait for '%s' to get into state '%s' timed out" % (key, state) + self.fail_json(msg=msg) + return server + + def start_server(self, skip_results=False): + server = self.get_server() + if server: + if server['power_status'] == 'starting': + server = self._wait_for_state(state='running') + + elif server['power_status'] != 'running': + if not skip_results: + self.result['changed'] = True + self.result['diff']['before']['power_status'] = server['power_status'] + self.result['diff']['after']['power_status'] = "running" + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'] + } + self.api_query( + path="/v1/server/start", + method="POST", + data=data + ) + server = self._wait_for_state(state='running') + return server + + def stop_server(self, skip_results=False): + server = self.get_server() + if server and server['power_status'] != "stopped": + if not skip_results: + self.result['changed'] = True + self.result['diff']['before']['power_status'] = server['power_status'] + self.result['diff']['after']['power_status'] = "stopped" + if not self.module.check_mode: + data = { + 'SUBID': server['SUBID'], + } + self.api_query( + path="/v1/server/halt", + method="POST", + data=data + ) + server = self._wait_for_state(state='stopped') + return server + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(required=True, aliases=['label']), + hostname=dict(type='str'), + os=dict(type='str'), + snapshot=dict(type='str'), + plan=dict(type='str'), + force=dict(type='bool', default=False), + notify_activate=dict(type='bool', default=False), + private_network_enabled=dict(type='bool'), + auto_backup_enabled=dict(type='bool'), + ipv6_enabled=dict(type='bool'), + tag=dict(type='str'), + reserved_ip_v4=dict(type='str'), + firewall_group=dict(type='str'), + startup_script=dict(type='str'), + user_data=dict(type='str'), + ssh_keys=dict(type='list', aliases=['ssh_key']), + region=dict(type='str'), + state=dict(choices=['present', 'absent', 'restarted', 'reinstalled', 'started', 'stopped'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + vultr_server = AnsibleVultrServer(module) + if module.params.get('state') == "absent": + server = vultr_server.absent_server() + else: + if module.params.get('state') == "started": + server = vultr_server.present_server() + server = vultr_server.start_server() + elif module.params.get('state') == "stopped": + server = vultr_server.present_server(start_server=False) + server = vultr_server.stop_server() + elif module.params.get('state') == "restarted": + server = vultr_server.present_server() + server = vultr_server.restart_server() + elif module.params.get('state') == "reinstalled": + server = vultr_server.reinstall_server() + else: + server = vultr_server.present_server() + + result = vultr_server.get_result(server) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_server_facts.py b/plugins/modules/cloud/vultr/vultr_server_facts.py new file mode 100644 index 0000000000..395b400cdd --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_server_facts.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_server_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(vultr_server_info) instead. +short_description: Gather facts about the Vultr servers available. +description: + - Gather facts about servers available. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr servers facts + local_action: + module: vultr_server_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_server_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_server_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_server_facts": [ + { + "allowed_bandwidth_gb": 1000, + "auto_backup_enabled": false, + "application": null, + "cost_per_month": 5.00, + "current_bandwidth_gb": 0, + "date_created": "2018-07-19 08:23:03", + "default_password": "p4ssw0rd!", + "disk": "Virtual 25 GB", + "firewallgroup": null, + "id": 17241096, + "internal_ip": "", + "kvm_url": "https://my.vultr.com/subs/vps/novnc/api.php?data=OFB...", + "name": "ansibletest", + "os": "CentOS 7 x64", + "pending_charges": 0.01, + "plan": "1024 MB RAM,25 GB SSD,1.00 TB BW", + "power_status": "running", + "ram": "1024 MB", + "region": "Amsterdam", + "server_state": "ok", + "status": "active", + "tag": "", + "v4_gateway": "105.178.158.1", + "v4_main_ip": "105.178.158.181", + "v4_netmask": "255.255.254.0", + "v6_main_ip": "", + "v6_network": "", + "v6_network_size": "", + "v6_networks": [], + "vcpu_count": 1 + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrServerFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrServerFacts, self).__init__(module, "vultr_server_facts") + + self.returns = { + "APPID": dict(key='application', convert_to='int', transform=self._get_application_name), + "FIREWALLGROUPID": dict(key='firewallgroup', transform=self._get_firewallgroup_name), + "SUBID": dict(key='id', convert_to='int'), + "VPSPLANID": dict(key='plan', convert_to='int', transform=self._get_plan_name), + "allowed_bandwidth_gb": dict(convert_to='int'), + 'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'), + "cost_per_month": dict(convert_to='float'), + "current_bandwidth_gb": dict(convert_to='float'), + "date_created": dict(), + "default_password": dict(), + "disk": dict(), + "gateway_v4": dict(key='v4_gateway'), + "internal_ip": dict(), + "kvm_url": dict(), + "label": dict(key='name'), + "location": dict(key='region'), + "main_ip": dict(key='v4_main_ip'), + "netmask_v4": dict(key='v4_netmask'), + "os": dict(), + "pending_charges": dict(convert_to='float'), + "power_status": dict(), + "ram": dict(), + "server_state": dict(), + "status": dict(), + "tag": dict(), + "v6_main_ip": dict(), + "v6_network": dict(), + "v6_network_size": dict(), + "v6_networks": dict(), + "vcpu_count": dict(convert_to='int'), + } + + def _get_application_name(self, application): + if application == 0: + return None + + return self.get_application(application, 'APPID').get('name') + + def _get_firewallgroup_name(self, firewallgroup): + if firewallgroup == 0: + return None + + return self.get_firewallgroup(firewallgroup, 'FIREWALLGROUPID').get('description') + + def _get_plan_name(self, plan): + return self.get_plan(plan, 'VPSPLANID').get('name') + + def get_servers(self): + return self.api_query(path="/v1/server/list") + + +def parse_servers_list(servers_list): + return [server for id, server in servers_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + server_facts = AnsibleVultrServerFacts(module) + result = server_facts.get_result(parse_servers_list(server_facts.get_servers())) + ansible_facts = { + 'vultr_server_facts': result['vultr_server_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_server_info.py b/plugins/modules/cloud/vultr/vultr_server_info.py new file mode 100644 index 0000000000..c67ff98a15 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_server_info.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_server_info +short_description: Gather information about the Vultr servers available. +description: + - Gather information about servers available. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr servers information + local_action: + module: vultr_server_info + register: result + +- name: Print the gathered information + debug: + var: result.vultr_server_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_server_info: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_server_info": [ + { + "allowed_bandwidth_gb": 1000, + "auto_backup_enabled": false, + "application": null, + "cost_per_month": 5.00, + "current_bandwidth_gb": 0, + "date_created": "2018-07-19 08:23:03", + "default_password": "p4ssw0rd!", + "disk": "Virtual 25 GB", + "firewallgroup": null, + "id": 17241096, + "internal_ip": "", + "kvm_url": "https://my.vultr.com/subs/vps/novnc/api.php?data=OFB...", + "name": "ansibletest", + "os": "CentOS 7 x64", + "pending_charges": 0.01, + "plan": "1024 MB RAM,25 GB SSD,1.00 TB BW", + "power_status": "running", + "ram": "1024 MB", + "region": "Amsterdam", + "server_state": "ok", + "status": "active", + "tag": "", + "v4_gateway": "105.178.158.1", + "v4_main_ip": "105.178.158.181", + "v4_netmask": "255.255.254.0", + "v6_main_ip": "", + "v6_network": "", + "v6_network_size": "", + "v6_networks": [], + "vcpu_count": 1 + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrServerInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrServerInfo, self).__init__(module, "vultr_server_info") + + self.returns = { + "APPID": dict(key='application', convert_to='int', transform=self._get_application_name), + "FIREWALLGROUPID": dict(key='firewallgroup', transform=self._get_firewallgroup_name), + "SUBID": dict(key='id', convert_to='int'), + "VPSPLANID": dict(key='plan', convert_to='int', transform=self._get_plan_name), + "allowed_bandwidth_gb": dict(convert_to='int'), + 'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'), + "cost_per_month": dict(convert_to='float'), + "current_bandwidth_gb": dict(convert_to='float'), + "date_created": dict(), + "default_password": dict(), + "disk": dict(), + "gateway_v4": dict(key='v4_gateway'), + "internal_ip": dict(), + "kvm_url": dict(), + "label": dict(key='name'), + "location": dict(key='region'), + "main_ip": dict(key='v4_main_ip'), + "netmask_v4": dict(key='v4_netmask'), + "os": dict(), + "pending_charges": dict(convert_to='float'), + "power_status": dict(), + "ram": dict(), + "server_state": dict(), + "status": dict(), + "tag": dict(), + "v6_main_ip": dict(), + "v6_network": dict(), + "v6_network_size": dict(), + "v6_networks": dict(), + "vcpu_count": dict(convert_to='int'), + } + + def _get_application_name(self, application): + if application == 0: + return None + + return self.get_application(application, 'APPID').get('name') + + def _get_firewallgroup_name(self, firewallgroup): + if firewallgroup == 0: + return None + + return self.get_firewallgroup(firewallgroup, 'FIREWALLGROUPID').get('description') + + def _get_plan_name(self, plan): + return self.get_plan(plan, 'VPSPLANID', optional=True).get('name') or 'N/A' + + def get_servers(self): + return self.api_query(path="/v1/server/list") + + +def parse_servers_list(servers_list): + return [server for id, server in servers_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + server_info = AnsibleVultrServerInfo(module) + result = server_info.get_result(parse_servers_list(server_info.get_servers())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_ssh_key.py b/plugins/modules/cloud/vultr/vultr_ssh_key.py new file mode 100644 index 0000000000..d8d4b5b290 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_ssh_key.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: vultr_ssh_key +short_description: Manages ssh keys on Vultr. +description: + - Create, update and remove ssh keys. +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the ssh key. + required: true + type: str + ssh_key: + description: + - SSH public key. + - Required if C(state=present). + type: str + state: + description: + - State of the ssh key. + default: present + choices: [ present, absent ] + type: str +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = ''' +- name: ensure an SSH key is present + vultr_ssh_key: + name: my ssh key + ssh_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + +- name: ensure an SSH key is absent + vultr_ssh_key: + name: my ssh key + state: absent +''' + +RETURN = ''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_ssh_key: + description: Response from Vultr API + returned: success + type: complex + contains: + id: + description: ID of the ssh key + returned: success + type: str + sample: 5904bc6ed9234 + name: + description: Name of the ssh key + returned: success + type: str + sample: my ssh key + date_created: + description: Date the ssh key was created + returned: success + type: str + sample: "2017-08-26 12:47:48" + ssh_key: + description: SSH public key + returned: success + type: str + sample: "ssh-rsa AA... someother@example.com" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrSshKey(Vultr): + + def __init__(self, module): + super(AnsibleVultrSshKey, self).__init__(module, "vultr_ssh_key") + + self.returns = { + 'SSHKEYID': dict(key='id'), + 'name': dict(), + 'ssh_key': dict(), + 'date_created': dict(), + } + + def get_ssh_key(self): + ssh_keys = self.api_query(path="/v1/sshkey/list") + if ssh_keys: + for ssh_key_id, ssh_key_data in ssh_keys.items(): + if ssh_key_data.get('name') == self.module.params.get('name'): + return ssh_key_data + return {} + + def present_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + ssh_key = self._create_ssh_key(ssh_key) + else: + ssh_key = self._update_ssh_key(ssh_key) + return ssh_key + + def _create_ssh_key(self, ssh_key): + self.result['changed'] = True + data = { + 'name': self.module.params.get('name'), + 'ssh_key': self.module.params.get('ssh_key'), + } + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/sshkey/create", + method="POST", + data=data + ) + ssh_key = self.get_ssh_key() + return ssh_key + + def _update_ssh_key(self, ssh_key): + param_ssh_key = self.module.params.get('ssh_key') + if param_ssh_key != ssh_key['ssh_key']: + self.result['changed'] = True + + data = { + 'SSHKEYID': ssh_key['SSHKEYID'], + 'ssh_key': param_ssh_key, + } + + self.result['diff']['before'] = ssh_key + self.result['diff']['after'] = data + self.result['diff']['after'].update({'date_created': ssh_key['date_created']}) + + if not self.module.check_mode: + self.api_query( + path="/v1/sshkey/update", + method="POST", + data=data + ) + ssh_key = self.get_ssh_key() + return ssh_key + + def absent_ssh_key(self): + ssh_key = self.get_ssh_key() + if ssh_key: + self.result['changed'] = True + + data = { + 'SSHKEYID': ssh_key['SSHKEYID'], + } + + self.result['diff']['before'] = ssh_key + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/sshkey/destroy", + method="POST", + data=data + ) + return ssh_key + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=True), + ssh_key=dict(type='str',), + state=dict(type='str', choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['ssh_key']), + ], + supports_check_mode=True, + ) + + vultr_ssh_key = AnsibleVultrSshKey(module) + if module.params.get('state') == "absent": + ssh_key = vultr_ssh_key.absent_ssh_key() + else: + ssh_key = vultr_ssh_key.present_ssh_key() + + result = vultr_ssh_key.get_result(ssh_key) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_ssh_key_facts.py b/plugins/modules/cloud/vultr/vultr_ssh_key_facts.py new file mode 100644 index 0000000000..3b2605e588 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_ssh_key_facts.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_ssh_key_facts +short_description: Gather facts about the Vultr SSH keys available. +description: + - Gather facts about SSH keys available. +author: "Yanis Guenane (@Spredzy)" +deprecated: + removed_in: "2.12" + why: Transformed into an info module. + alternative: Use M(vultr_ssh_key_info) instead. +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr SSH keys facts + local_action: + module: vultr_ssh_key_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_ssh_key_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +ansible_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_ssh_key_facts": [ + { + "date_created": "2018-02-24 15:04:01", + "id": "5abf426403479", + "name": "me@home", + "ssh_key": "ssh-rsa AAAAB3Nz...NnPz me@home" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrSSHKeyFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrSSHKeyFacts, self).__init__(module, "vultr_ssh_key_facts") + + self.returns = { + 'SSHKEYID': dict(key='id'), + 'name': dict(), + 'ssh_key': dict(), + 'date_created': dict(), + } + + def get_sshkeys(self): + return self.api_query(path="/v1/sshkey/list") + + +def parse_keys_list(keys_list): + if not keys_list: + return [] + + return [key for id, key in keys_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + sshkey_facts = AnsibleVultrSSHKeyFacts(module) + result = sshkey_facts.get_result(parse_keys_list(sshkey_facts.get_sshkeys())) + ansible_facts = { + 'vultr_ssh_key_facts': result['vultr_ssh_key_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_ssh_key_info.py b/plugins/modules/cloud/vultr/vultr_ssh_key_info.py new file mode 100644 index 0000000000..11a9f54553 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_ssh_key_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# (c) 2019, René Moser + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_ssh_key_info +short_description: Get information about the Vultr SSH keys available. +description: + - Get infos about SSH keys available. +author: + - "Yanis Guenane (@Spredzy)" + - "René Moser (@resmo)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Get Vultr SSH keys infos + vultr_ssh_key_info: + register: result + +- name: Print the infos + debug: + var: result.vultr_ssh_key_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_ssh_key_info: + description: Response from Vultr API as list + returned: success + type: complex + contains: + id: + description: ID of the ssh key + returned: success + type: str + sample: 5904bc6ed9234 + name: + description: Name of the ssh key + returned: success + type: str + sample: my ssh key + date_created: + description: Date the ssh key was created + returned: success + type: str + sample: "2017-08-26 12:47:48" + ssh_key: + description: SSH public key + returned: success + type: str + sample: "ssh-rsa AA... someother@example.com" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrSSHKeyInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrSSHKeyInfo, self).__init__(module, "vultr_ssh_key_info") + + self.returns = { + 'SSHKEYID': dict(key='id'), + 'name': dict(), + 'ssh_key': dict(), + 'date_created': dict(), + } + + def get_sshkeys(self): + return self.api_query(path="/v1/sshkey/list") + + +def parse_keys_list(keys_list): + if not keys_list: + return [] + + return [key for id, key in keys_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + sshkey_info = AnsibleVultrSSHKeyInfo(module) + result = sshkey_info.get_result(parse_keys_list(sshkey_info.get_sshkeys())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_startup_script.py b/plugins/modules/cloud/vultr/vultr_startup_script.py new file mode 100644 index 0000000000..0b6fdb7f16 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_startup_script.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_startup_script +short_description: Manages startup scripts on Vultr. +description: + - Create, update and remove startup scripts. +author: "René Moser (@resmo)" +options: + name: + description: + - The script name. + required: true + type: str + script_type: + description: + - The script type, can not be changed once created. + default: boot + choices: [ boot, pxe ] + aliases: [ type ] + type: str + script: + description: + - The script source code. + - Required if I(state=present). + type: str + state: + description: + - State of the script. + default: present + choices: [ present, absent ] + type: str +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: ensure a pxe script exists, source from a file + vultr_startup_script: + name: my_web_script + script_type: pxe + script: "{{ lookup('file', 'path/to/script') }}" + +- name: ensure a boot script exists + vultr_startup_script: + name: vultr_startup_script + script: "#!/bin/bash\necho Hello World > /root/hello" + +- name: ensure a script is absent + vultr_startup_script: + name: my_web_script + state: absent +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_startup_script: + description: Response from Vultr API + returned: success + type: complex + contains: + id: + description: ID of the startup script. + returned: success + type: str + sample: 249395 + name: + description: Name of the startup script. + returned: success + type: str + sample: my startup script + script: + description: The source code of the startup script. + returned: success + type: str + sample: "#!/bin/bash\necho Hello World > /root/hello" + script_type: + description: The type of the startup script. + returned: success + type: str + sample: pxe + date_created: + description: Date the startup script was created. + returned: success + type: str + sample: "2017-08-26 12:47:48" + date_modified: + description: Date the startup script was modified. + returned: success + type: str + sample: "2017-08-26 12:47:48" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrStartupScript(Vultr): + + def __init__(self, module): + super(AnsibleVultrStartupScript, self).__init__(module, "vultr_startup_script") + + self.returns = { + 'SCRIPTID': dict(key='id'), + 'type': dict(key='script_type'), + 'name': dict(), + 'script': dict(), + 'date_created': dict(), + 'date_modified': dict(), + } + + def get_script(self): + scripts = self.api_query(path="/v1/startupscript/list") + name = self.module.params.get('name') + if scripts: + for script_id, script_data in scripts.items(): + if script_data.get('name') == name: + return script_data + return {} + + def present_script(self): + script = self.get_script() + if not script: + script = self._create_script(script) + else: + script = self._update_script(script) + return script + + def _create_script(self, script): + self.result['changed'] = True + + data = { + 'name': self.module.params.get('name'), + 'script': self.module.params.get('script'), + 'type': self.module.params.get('script_type'), + } + + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + self.api_query( + path="/v1/startupscript/create", + method="POST", + data=data + ) + script = self.get_script() + return script + + def _update_script(self, script): + if script['script'] != self.module.params.get('script'): + self.result['changed'] = True + + data = { + 'SCRIPTID': script['SCRIPTID'], + 'script': self.module.params.get('script'), + } + + self.result['diff']['before'] = script + self.result['diff']['after'] = script.copy() + self.result['diff']['after'].update(data) + + if not self.module.check_mode: + self.api_query( + path="/v1/startupscript/update", + method="POST", + data=data + ) + script = self.get_script() + return script + + def absent_script(self): + script = self.get_script() + if script: + self.result['changed'] = True + + data = { + 'SCRIPTID': script['SCRIPTID'], + } + + self.result['diff']['before'] = script + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/startupscript/destroy", + method="POST", + data=data + ) + return script + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=True), + script=dict(type='str',), + script_type=dict(type='str', default='boot', choices=['boot', 'pxe'], aliases=['type']), + state=dict(type='str', choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['script']), + ], + supports_check_mode=True, + ) + + vultr_script = AnsibleVultrStartupScript(module) + if module.params.get('state') == "absent": + script = vultr_script.absent_script() + else: + script = vultr_script.present_script() + + result = vultr_script.get_result(script) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_startup_script_facts.py b/plugins/modules/cloud/vultr/vultr_startup_script_facts.py new file mode 100644 index 0000000000..beed572a1b --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_startup_script_facts.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_startup_script_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(vultr_startup_script_info) instead. +short_description: Gather facts about the Vultr startup scripts available. +description: + - Gather facts about vultr_startup_scripts available. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr startup scripts facts + local_action: + module: vultr_startup_script_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_startup_script_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_startup_script_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_startup_script_facts": [ + { + "date_created": "2018-07-19 08:38:36", + "date_modified": "2018-07-19 08:38:36", + "id": 327133, + "name": "lolo", + "script": "#!/bin/bash\necho Hello World > /root/hello", + "type": "boot" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrStartupScriptFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrStartupScriptFacts, self).__init__(module, "vultr_startup_script_facts") + + self.returns = { + "SCRIPTID": dict(key='id', convert_to='int'), + "date_created": dict(), + "date_modified": dict(), + "name": dict(), + "script": dict(), + "type": dict(), + } + + def get_startupscripts(self): + return self.api_query(path="/v1/startupscript/list") + + +def parse_startupscript_list(startupscipts_list): + if not startupscipts_list: + return [] + + return [startupscript for id, startupscript in startupscipts_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + startupscript_facts = AnsibleVultrStartupScriptFacts(module) + result = startupscript_facts.get_result(parse_startupscript_list(startupscript_facts.get_startupscripts())) + ansible_facts = { + 'vultr_startup_script_facts': result['vultr_startup_script_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_startup_script_info.py b/plugins/modules/cloud/vultr/vultr_startup_script_info.py new file mode 100644 index 0000000000..9aa5358650 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_startup_script_info.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_startup_script_info +short_description: Gather information about the Vultr startup scripts available. +description: + - Gather information about vultr_startup_scripts available. +author: "Yanis Guenane (@Spredzy)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr startup scripts information + local_action: + module: vultr_startup_script_info + register: result + +- name: Print the gathered information + debug: + var: result.vultr_startup_script_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_startup_script_info: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_startup_script_info": [ + { + "date_created": "2018-07-19 08:38:36", + "date_modified": "2018-07-19 08:38:36", + "id": 327133, + "name": "lolo", + "script": "#!/bin/bash\necho Hello World > /root/hello", + "type": "boot" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrStartupScriptInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrStartupScriptInfo, self).__init__(module, "vultr_startup_script_info") + + self.returns = { + "SCRIPTID": dict(key='id', convert_to='int'), + "date_created": dict(), + "date_modified": dict(), + "name": dict(), + "script": dict(), + "type": dict(), + } + + def get_startupscripts(self): + return self.api_query(path="/v1/startupscript/list") + + +def parse_startupscript_list(startupscipts_list): + if not startupscipts_list: + return [] + + return [startupscript for id, startupscript in startupscipts_list.items()] + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + startupscript_info = AnsibleVultrStartupScriptInfo(module) + result = startupscript_info.get_result(parse_startupscript_list(startupscript_info.get_startupscripts())) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_user.py b/plugins/modules/cloud/vultr/vultr_user.py new file mode 100644 index 0000000000..778127b0e0 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_user.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_user +short_description: Manages users on Vultr. +description: + - Create, update and remove users. +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the user + required: true + type: str + email: + description: + - Email of the user. + - Required if C(state=present). + type: str + password: + description: + - Password of the user. + - Only considered while creating a user or when C(force=yes). + type: str + force: + description: + - Password will only be changed with enforcement. + default: no + type: bool + api_enabled: + description: + - Whether the API is enabled or not. + default: yes + type: bool + acls: + description: + - List of ACLs this users should have, see U(https://www.vultr.com/api/#user_user_list). + - Required if C(state=present). + - One or more of the choices list, some depend on each other. + choices: + - manage_users + - subscriptions + - provisioning + - billing + - support + - abuse + - dns + - upgrade + aliases: [ acl ] + type: list + state: + description: + - State of the user. + default: present + choices: [ present, absent ] + type: str +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Ensure a user exists + local_action: + module: vultr_user + name: john + email: john.doe@example.com + password: s3cr3t + acls: + - upgrade + - dns + - manage_users + - subscriptions + - upgrade + +- name: Remove a user + local_action: + module: vultr_user + name: john + state: absent +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_user: + description: Response from Vultr API + returned: success + type: complex + contains: + id: + description: ID of the user. + returned: success + type: str + sample: 5904bc6ed9234 + api_key: + description: API key of the user. + returned: only after resource was created + type: str + sample: 567E6K567E6K567E6K567E6K567E6K + name: + description: Name of the user. + returned: success + type: str + sample: john + email: + description: Email of the user. + returned: success + type: str + sample: "john@example.com" + api_enabled: + description: Whether the API is enabled or not. + returned: success + type: bool + sample: true + acls: + description: List of ACLs of the user. + returned: success + type: list + sample: [manage_users, support, upgrade] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +ACLS = [ + 'manage_users', + 'subscriptions', + 'provisioning', + 'billing', + 'support', + 'abuse', + 'dns', + 'upgrade', +] + + +class AnsibleVultrUser(Vultr): + + def __init__(self, module): + super(AnsibleVultrUser, self).__init__(module, "vultr_user") + + self.returns = { + 'USERID': dict(key='id'), + 'name': dict(), + 'email': dict(), + 'api_enabled': dict(convert_to='bool'), + 'acls': dict(), + 'api_key': dict() + } + + def _common_args(self): + return { + 'name': self.module.params.get('name'), + 'email': self.module.params.get('email'), + 'acls': self.module.params.get('acls'), + 'password': self.module.params.get('password'), + 'api_enabled': self.get_yes_or_no('api_enabled'), + } + + def get_user(self): + users = self.api_query(path="/v1/user/list") + for user in users or []: + if user.get('name') == self.module.params.get('name'): + return user + return {} + + def present_user(self): + user = self.get_user() + if not user: + user = self._create_user(user) + else: + user = self._update_user(user) + return user + + def _has_changed(self, user, data): + for k, v in data.items(): + if k not in user: + continue + elif isinstance(v, list): + for i in v: + if i not in user[k]: + return True + elif data[k] != user[k]: + return True + return False + + def _create_user(self, user): + self.module.fail_on_missing_params(required_params=['password']) + + self.result['changed'] = True + + data = self._common_args() + self.result['diff']['before'] = {} + self.result['diff']['after'] = data + + if not self.module.check_mode: + user = self.api_query( + path="/v1/user/create", + method="POST", + data=data + ) + user.update(self.get_user()) + return user + + def _update_user(self, user): + data = self._common_args() + data.update({ + 'USERID': user['USERID'], + }) + + force = self.module.params.get('force') + if not force: + del data['password'] + + if force or self._has_changed(user=user, data=data): + self.result['changed'] = True + + self.result['diff']['before'] = user + self.result['diff']['after'] = user.copy() + self.result['diff']['after'].update(data) + + if not self.module.check_mode: + self.api_query( + path="/v1/user/update", + method="POST", + data=data + ) + user = self.get_user() + return user + + def absent_user(self): + user = self.get_user() + if user: + self.result['changed'] = True + + data = { + 'USERID': user['USERID'], + } + + self.result['diff']['before'] = user + self.result['diff']['after'] = {} + + if not self.module.check_mode: + self.api_query( + path="/v1/user/delete", + method="POST", + data=data + ) + return user + + +def main(): + argument_spec = vultr_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=True), + email=dict(type='str',), + password=dict(type='str', no_log=True), + force=dict(type='bool', default=False), + api_enabled=dict(type='bool', default=True), + acls=dict(type='list', choices=ACLS, aliases=['acl']), + state=dict(type='str', choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['email', 'acls']), + ], + supports_check_mode=True, + ) + + vultr_user = AnsibleVultrUser(module) + if module.params.get('state') == "absent": + user = vultr_user.absent_user() + else: + user = vultr_user.present_user() + + result = vultr_user.get_result(user) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_user_facts.py b/plugins/modules/cloud/vultr/vultr_user_facts.py new file mode 100644 index 0000000000..1e23c00f28 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_user_facts.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# +# (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_user_facts +short_description: Gather facts about the Vultr user available. +description: + - Gather facts about users available in Vultr. +author: "Yanis Guenane (@Spredzy)" +deprecated: + removed_in: "2.12" + why: Transformed into an info module. + alternative: Use M(vultr_user_info) instead. +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Gather Vultr user facts + local_action: + module: vultr_user_facts + +- name: Print the gathered facts + debug: + var: ansible_facts.vultr_user_facts +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_user_facts: + description: Response from Vultr API + returned: success + type: complex + sample: + "vultr_user_facts": [ + { + "acls": [], + "api_enabled": "yes", + "email": "mytestuser@example.com", + "id": "a235b4f45e87f", + "name": "mytestuser" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrUserFacts(Vultr): + + def __init__(self, module): + super(AnsibleVultrUserFacts, self).__init__(module, "vultr_user_facts") + + self.returns = { + "USERID": dict(key='id'), + "acls": dict(), + "api_enabled": dict(), + "email": dict(), + "name": dict() + } + + def get_regions(self): + return self.api_query(path="/v1/user/list") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + user_facts = AnsibleVultrUserFacts(module) + result = user_facts.get_result(user_facts.get_regions()) + ansible_facts = { + 'vultr_user_facts': result['vultr_user_facts'] + } + module.exit_json(ansible_facts=ansible_facts, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/vultr/vultr_user_info.py b/plugins/modules/cloud/vultr/vultr_user_info.py new file mode 100644 index 0000000000..268a0acbb8 --- /dev/null +++ b/plugins/modules/cloud/vultr/vultr_user_info.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# Copyright (c) 2019, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: vultr_user_info +short_description: Get information about the Vultr user available. +description: + - Get infos about users available in Vultr. +author: + - "Yanis Guenane (@Spredzy)" + - "René Moser (@resmo)" +extends_documentation_fragment: +- community.general.vultr + +''' + +EXAMPLES = r''' +- name: Get Vultr user infos + vultr_user_info: + register: result + +- name: Print the infos + debug: + var: result.vultr_user_info +''' + +RETURN = r''' +--- +vultr_api: + description: Response from Vultr API with a few additions/modification + returned: success + type: complex + contains: + api_account: + description: Account used in the ini file to select the key + returned: success + type: str + sample: default + api_timeout: + description: Timeout used for the API requests + returned: success + type: int + sample: 60 + api_retries: + description: Amount of max retries for the API requests + returned: success + type: int + sample: 5 + api_retry_max_delay: + description: Exponential backoff delay in seconds between retries up to this max delay value. + returned: success + type: int + sample: 12 + version_added: '2.9' + api_endpoint: + description: Endpoint used for the API requests + returned: success + type: str + sample: "https://api.vultr.com" +vultr_user_info: + description: Response from Vultr API as list + returned: available + type: complex + contains: + id: + description: ID of the user. + returned: success + type: str + sample: 5904bc6ed9234 + api_key: + description: API key of the user. + returned: only after resource was created + type: str + sample: 567E6K567E6K567E6K567E6K567E6K + name: + description: Name of the user. + returned: success + type: str + sample: john + email: + description: Email of the user. + returned: success + type: str + sample: "john@example.com" + api_enabled: + description: Whether the API is enabled or not. + returned: success + type: bool + sample: true + acls: + description: List of ACLs of the user. + returned: success + type: list + sample: [ manage_users, support, upgrade ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vultr import ( + Vultr, + vultr_argument_spec, +) + + +class AnsibleVultrUserInfo(Vultr): + + def __init__(self, module): + super(AnsibleVultrUserInfo, self).__init__(module, "vultr_user_info") + + self.returns = { + "USERID": dict(key='id'), + "acls": dict(), + "api_enabled": dict(), + "email": dict(), + "name": dict() + } + + def get_regions(self): + return self.api_query(path="/v1/user/list") + + +def main(): + argument_spec = vultr_argument_spec() + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + user_info = AnsibleVultrUserInfo(module) + result = user_info.get_result(user_info.get_regions()) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/webfaction/webfaction_app.py b/plugins/modules/cloud/webfaction/webfaction_app.py new file mode 100644 index 0000000000..f1dd0d41a6 --- /dev/null +++ b/plugins/modules/cloud/webfaction/webfaction_app.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: +# * Andy Baker +# * Federico Tarantini +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Create a Webfaction application using Ansible and the Webfaction API +# +# Valid application types can be found by looking here: +# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: webfaction_app +short_description: Add or remove applications on a Webfaction host +description: + - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction). +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. + +options: + name: + description: + - The name of the application + required: true + + state: + description: + - Whether the application should exist + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list. + required: true + + autostart: + description: + - Whether the app should restart with an C(autostart.cgi) script + type: bool + default: 'no' + + extra_info: + description: + - Any extra parameters required by the app + default: '' + + port_open: + description: + - IF the port should be opened + type: bool + default: 'no' + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true + + machine: + description: + - The machine name to use (optional for accounts with only one machine) + +''' + +EXAMPLES = ''' + - name: Create a test app + webfaction_app: + name: "my_wsgi_app1" + state: present + type: mod_wsgi35-python27 + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + machine: "{{webfaction_machine}}" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=False, choices=['present', 'absent'], default='present'), + type=dict(required=True), + autostart=dict(required=False, type='bool', default=False), + extra_info=dict(required=False, default=""), + port_open=dict(required=False, type='bool', default=False), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + machine=dict(required=False, default=None), + ), + supports_check_mode=True + ) + app_name = module.params['name'] + app_type = module.params['type'] + app_state = module.params['state'] + + if module.params['machine']: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'], + module.params['machine'] + ) + else: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + app_list = webfaction.list_apps(session_id) + app_map = dict([(i['name'], i) for i in app_list]) + existing_app = app_map.get(app_name) + + result = {} + + # Here's where the real stuff happens + + if app_state == 'present': + + # Does an app with this name already exist? + if existing_app: + if existing_app['type'] != app_type: + module.fail_json(msg="App already exists with different type. Please fix by hand.") + + # If it exists with the right type, we don't change it + # Should check other parameters. + module.exit_json( + changed=False, + result=existing_app, + ) + + if not module.check_mode: + # If this isn't a dry run, create the app + result.update( + webfaction.create_app( + session_id, app_name, app_type, + module.boolean(module.params['autostart']), + module.params['extra_info'], + module.boolean(module.params['port_open']) + ) + ) + + elif app_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_app: + module.exit_json( + changed=False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_app(session_id, app_name) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(app_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/webfaction/webfaction_db.py b/plugins/modules/cloud/webfaction/webfaction_db.py new file mode 100644 index 0000000000..831be8e358 --- /dev/null +++ b/plugins/modules/cloud/webfaction/webfaction_db.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: +# * Andy Baker +# * Federico Tarantini +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Create a webfaction database using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: webfaction_db +short_description: Add or remove a database on Webfaction +description: + - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. +options: + + name: + description: + - The name of the database + required: true + + state: + description: + - Whether the database should exist + choices: ['present', 'absent'] + default: "present" + + type: + description: + - The type of database to create. + required: true + choices: ['mysql', 'postgresql'] + + password: + description: + - The password for the new database user. + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true + + machine: + description: + - The machine name to use (optional for accounts with only one machine) +''' + +EXAMPLES = ''' + # This will also create a default DB user with the same + # name as the database, and the specified password. + + - name: Create a database + webfaction_db: + name: "{{webfaction_user}}_db1" + password: mytestsql + type: mysql + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + machine: "{{webfaction_machine}}" + + # Note that, for symmetry's sake, deleting a database using + # 'state: absent' will also delete the matching user. + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=False, choices=['present', 'absent'], default='present'), + # You can specify an IP address or hostname. + type=dict(required=True, choices=['mysql', 'postgresql']), + password=dict(required=False, default=None, no_log=True), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + machine=dict(required=False, default=None), + ), + supports_check_mode=True + ) + db_name = module.params['name'] + db_state = module.params['state'] + db_type = module.params['type'] + db_passwd = module.params['password'] + + if module.params['machine']: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'], + module.params['machine'] + ) + else: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + db_list = webfaction.list_dbs(session_id) + db_map = dict([(i['name'], i) for i in db_list]) + existing_db = db_map.get(db_name) + + user_list = webfaction.list_db_users(session_id) + user_map = dict([(i['username'], i) for i in user_list]) + existing_user = user_map.get(db_name) + + result = {} + + # Here's where the real stuff happens + + if db_state == 'present': + + # Does a database with this name already exist? + if existing_db: + # Yes, but of a different type - fail + if existing_db['db_type'] != db_type: + module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") + + # If it exists with the right type, we don't change anything. + module.exit_json( + changed=False, + ) + + if not module.check_mode: + # If this isn't a dry run, create the db + # and default user. + result.update( + webfaction.create_db( + session_id, db_name, db_type, db_passwd + ) + ) + + elif db_state == 'absent': + + # If this isn't a dry run... + if not module.check_mode: + + if not (existing_db or existing_user): + module.exit_json(changed=False,) + + if existing_db: + # Delete the db if it exists + result.update( + webfaction.delete_db(session_id, db_name, db_type) + ) + + if existing_user: + # Delete the default db user if it exists + result.update( + webfaction.delete_db_user(session_id, db_name, db_type) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(db_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/webfaction/webfaction_domain.py b/plugins/modules/cloud/webfaction/webfaction_domain.py new file mode 100644 index 0000000000..b75cf1cffa --- /dev/null +++ b/plugins/modules/cloud/webfaction/webfaction_domain.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Quentin Stafford-Fraser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Create Webfaction domains and subdomains using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: webfaction_domain +short_description: Add or remove domains and subdomains on Webfaction +description: + - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. + If you don't specify subdomains, the domain will be deleted. + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the domain + required: true + + state: + description: + - Whether the domain should exist + choices: ['present', 'absent'] + default: "present" + + subdomains: + description: + - Any subdomains to create. + default: [] + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: Create a test domain + webfaction_domain: + name: mydomain.com + state: present + subdomains: + - www + - blog + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + + - name: Delete test domain and any subdomains + webfaction_domain: + name: mydomain.com + state: absent + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=False, choices=['present', 'absent'], default='present'), + subdomains=dict(required=False, default=[], type='list'), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + ), + supports_check_mode=True + ) + domain_name = module.params['name'] + domain_state = module.params['state'] + domain_subdomains = module.params['subdomains'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + domain_list = webfaction.list_domains(session_id) + domain_map = dict([(i['domain'], i) for i in domain_list]) + existing_domain = domain_map.get(domain_name) + + result = {} + + # Here's where the real stuff happens + + if domain_state == 'present': + + # Does an app with this name already exist? + if existing_domain: + + if set(existing_domain['subdomains']) >= set(domain_subdomains): + # If it exists with the right subdomains, we don't change anything. + module.exit_json( + changed=False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, create the app + # print positional_args + result.update( + webfaction.create_domain( + *positional_args + ) + ) + + elif domain_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_domain: + module.exit_json( + changed=False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_domain(*positional_args) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(domain_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/plugins/modules/cloud/webfaction/webfaction_mailbox.py new file mode 100644 index 0000000000..7a50b3ef95 --- /dev/null +++ b/plugins/modules/cloud/webfaction/webfaction_mailbox.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Create webfaction mailbox using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: webfaction_mailbox +short_description: Add or remove mailboxes on Webfaction +description: + - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. +options: + + mailbox_name: + description: + - The name of the mailbox + required: true + + mailbox_password: + description: + - The password for the mailbox + required: true + + state: + description: + - Whether the mailbox should exist + choices: ['present', 'absent'] + default: "present" + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: Create a mailbox + webfaction_mailbox: + mailbox_name="mybox" + mailbox_password="myboxpw" + state=present + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + mailbox_name=dict(required=True), + mailbox_password=dict(required=True, no_log=True), + state=dict(required=False, choices=['present', 'absent'], default='present'), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + ), + supports_check_mode=True + ) + + mailbox_name = module.params['mailbox_name'] + site_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)] + existing_mailbox = mailbox_name in mailbox_list + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a mailbox with this name already exist? + if existing_mailbox: + module.exit_json(changed=False,) + + positional_args = [session_id, mailbox_name] + + if not module.check_mode: + # If this isn't a dry run, create the mailbox + result.update(webfaction.create_mailbox(*positional_args)) + + elif site_state == 'absent': + + # If the mailbox is already not there, nothing changed. + if not existing_mailbox: + module.exit_json(changed=False) + + if not module.check_mode: + # If this isn't a dry run, delete the mailbox + result.update(webfaction.delete_mailbox(session_id, mailbox_name)) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(site_state)) + + module.exit_json(changed=True, result=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/webfaction/webfaction_site.py b/plugins/modules/cloud/webfaction/webfaction_site.py new file mode 100644 index 0000000000..aa107aa9aa --- /dev/null +++ b/plugins/modules/cloud/webfaction/webfaction_site.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Quentin Stafford-Fraser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Create Webfaction website using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: webfaction_site +short_description: Add or remove a website on a Webfaction host +description: + - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP + address. You can use a DNS name. + - If a site of the same name exists in the account but on a different host, the operation will exit. + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. + +options: + + name: + description: + - The name of the website + required: true + + state: + description: + - Whether the website should exist + choices: ['present', 'absent'] + default: "present" + + host: + description: + - The webfaction host on which the site should be created. + required: true + + https: + description: + - Whether or not to use HTTPS + type: bool + default: 'no' + + site_apps: + description: + - A mapping of URLs to apps + default: [] + + subdomains: + description: + - A list of subdomains associated with this site. + default: [] + + login_name: + description: + - The webfaction account to use + required: true + + login_password: + description: + - The webfaction password to use + required: true +''' + +EXAMPLES = ''' + - name: create website + webfaction_site: + name: testsite1 + state: present + host: myhost.webfaction.com + subdomains: + - 'testsite1.my_domain.org' + site_apps: + - ['testapp1', '/'] + https: no + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" +''' + +import socket + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=False, choices=['present', 'absent'], default='present'), + # You can specify an IP address or hostname. + host=dict(required=True), + https=dict(required=False, type='bool', default=False), + subdomains=dict(required=False, type='list', default=[]), + site_apps=dict(required=False, type='list', default=[]), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + ), + supports_check_mode=True + ) + site_name = module.params['name'] + site_state = module.params['state'] + site_host = module.params['host'] + site_ip = socket.gethostbyname(site_host) + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + site_list = webfaction.list_websites(session_id) + site_map = dict([(i['name'], i) for i in site_list]) + existing_site = site_map.get(site_name) + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a site with this name already exist? + if existing_site: + + # If yes, but it's on a different IP address, then fail. + # If we wanted to allow relocation, we could add a 'relocate=true' option + # which would get the existing IP address, delete the site there, and create it + # at the new address. A bit dangerous, perhaps, so for now we'll require manual + # deletion if it's on another host. + + if existing_site['ip'] != site_ip: + module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") + + # If it's on this host and the key parameters are the same, nothing needs to be done. + + if (existing_site['https'] == module.boolean(module.params['https'])) and \ + (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ + (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): + module.exit_json( + changed=False + ) + + positional_args = [ + session_id, site_name, site_ip, + module.boolean(module.params['https']), + module.params['subdomains'], + ] + for a in module.params['site_apps']: + positional_args.append((a[0], a[1])) + + if not module.check_mode: + # If this isn't a dry run, create or modify the site + result.update( + webfaction.create_website( + *positional_args + ) if not existing_site else webfaction.update_website( + *positional_args + ) + ) + + elif site_state == 'absent': + + # If the site's already not there, nothing changed. + if not existing_site: + module.exit_json( + changed=False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the site + result.update( + webfaction.delete_website(session_id, site_name, site_ip) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(site_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest.py b/plugins/modules/cloud/xenserver/xenserver_guest.py new file mode 100644 index 0000000000..df54aeef42 --- /dev/null +++ b/plugins/modules/cloud/xenserver/xenserver_guest.py @@ -0,0 +1,1934 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: xenserver_guest +short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool +description: > + This module can be used to create new virtual machines from templates or other virtual machines, + modify various virtual machine components like network and disk, rename a virtual machine and + remove a virtual machine with associated components. +author: +- Bojan Vitnik (@bvitnik) +notes: +- Minimal supported version of XenServer is 5.6. +- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. +- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside + Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your + Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' +- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are + accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' +- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) + which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' +- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on + XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to + detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest + agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6) + values C(none) and C(dhcp) have same effect. More info here: + U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)' +- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore + C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough + WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user + to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters. + Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any + parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most + useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here: + U(https://support.citrix.com/article/CTX226713)' +requirements: +- python >= 2.6 +- XenAPI +options: + state: + description: + - Specify the state VM should be in. + - If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters. + - If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters. + - If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components. + - If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. + type: str + default: present + choices: [ present, absent, poweredon ] + name: + description: + - Name of the VM to work with. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. + type: str + required: yes + aliases: [ name_label ] + name_desc: + description: + - VM description. + type: str + uuid: + description: + - UUID of the VM to manage if known. This is XenServer's unique identifier. + - It is required if name is not unique. + - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally. + type: str + template: + description: + - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. + - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found. + - In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template. + - If VM already exists, this setting will be ignored. + - This parameter is case sensitive. + type: str + aliases: [ template_src ] + template_uuid: + description: + - UUID of a template, an existing VM or a snapshot that should be used to create VM. + - It is required if template name is not unique. + type: str + is_template: + description: + - Convert VM to template. + type: bool + default: no + folder: + description: + - Destination folder for VM. + - This parameter is case sensitive. + - 'Example:' + - ' folder: /folder1/folder2' + type: str + hardware: + description: + - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters. + - 'Valid parameters are:' + - ' - C(num_cpus) (integer): Number of CPUs.' + - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).' + - ' - C(memory_mb) (integer): Amount of memory in MB.' + type: dict + disks: + description: + - A list of disks to add to VM. + - All parameters are case sensitive. + - Removing or detaching existing disks of VM is not supported. + - 'Required parameters per entry:' + - ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.' + - 'Optional parameters per entry:' + - ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.' + - ' - C(name_desc) (string): Disk description.' + - ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.' + - ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.' + type: list + aliases: [ disk ] + cdrom: + description: + - A CD-ROM configuration for the VM. + - All parameters are case sensitive. + - 'Valid parameters are:' + - ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.' + - ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)). + Required if C(type) is set to C(iso).' + type: dict + networks: + description: + - A list of networks (in the order of the NICs). + - All parameters are case sensitive. + - 'Required parameters per entry:' + - ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.' + - 'Optional parameters per entry (used for VM hardware):' + - ' - C(mac) (string): Customize MAC address of the interface.' + - 'Optional parameters per entry (used for OS customization):' + - ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS. + On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).' + - ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format / instead of using C(netmask).' + - ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.' + - ' - C(gateway) (string): Static IPv4 gateway.' + - ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS. + On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).' + - ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format /.' + - ' - C(gateway6) (string): Static IPv6 gateway.' + type: list + aliases: [ network ] + home_server: + description: + - Name of a XenServer host that will be a Home Server for the VM. + - This parameter is case sensitive. + type: str + custom_params: + description: + - Define a list of custom VM params to set on VM. + - Useful for advanced users familiar with managing VM params trough xe CLI. + - A custom value object takes two fields C(key) and C(value) (see example below). + type: list + wait_for_ip_address: + description: + - Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored. + - This requires XenServer Tools to be preinstalled on the VM to work properly. + type: bool + default: no + state_change_timeout: + description: + - 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).' + - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. + - In case of timeout, module will generate an error message. + type: int + default: 0 + linked_clone: + description: + - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy. + - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter. + type: bool + default: no + force: + description: + - Ignore warnings and complete the actions. + - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down. + type: bool + default: no +extends_documentation_fragment: +- community.general.xenserver.documentation + +''' + +EXAMPLES = r''' +- name: Create a VM from a template + xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + validate_certs: no + folder: /testvms + name: testvm_2 + state: poweredon + template: CentOS 7 + disks: + - size_gb: 10 + sr: my_sr + hardware: + num_cpus: 6 + num_cpu_cores_per_socket: 3 + memory_mb: 512 + cdrom: + type: iso + iso_name: guest-tools.iso + networks: + - name: VM Network + mac: aa:bb:dd:aa:00:14 + wait_for_ip_address: yes + delegate_to: localhost + register: deploy + +- name: Create a VM template + xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + validate_certs: no + folder: /testvms + name: testvm_6 + is_template: yes + disk: + - size_gb: 10 + sr: my_sr + hardware: + memory_mb: 512 + num_cpus: 1 + delegate_to: localhost + register: deploy + +- name: Rename a VM (requires the VM's UUID) + xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + uuid: 421e4592-c069-924d-ce20-7e7533fab926 + name: new_name + state: present + delegate_to: localhost + +- name: Remove a VM by UUID + xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + uuid: 421e4592-c069-924d-ce20-7e7533fab926 + state: absent + delegate_to: localhost + +- name: Modify custom params (boot order) + xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_8 + state: present + custom_params: + - key: HVM_boot_params + value: { "order": "ndc" } + delegate_to: localhost + +- name: Customize network parameters + xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_10 + networks: + - name: VM Network + ip: 192.168.1.100/24 + gateway: 192.168.1.1 + - type: dhcp + delegate_to: localhost +''' + +RETURN = r''' +instance: + description: Metadata about the VM + returned: always + type: dict + sample: { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "testvm_11-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "testvm_11-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "testvm_11", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } + } +changes: + description: Detected or made changes to VM + returned: always + type: list + sample: [ + { + "hardware": [ + "num_cpus" + ] + }, + { + "disks_changed": [ + [], + [ + "size" + ] + ] + }, + { + "disks_new": [ + { + "name": "new-disk", + "name_desc": "", + "position": 2, + "size_gb": "4", + "vbd_userdevice": "2" + } + ] + }, + { + "cdrom": [ + "type", + "iso_name" + ] + }, + { + "networks_changed": [ + [ + "mac" + ], + ] + }, + { + "networks_new": [ + { + "name": "Pool-wide network associated with eth2", + "position": 1, + "vif_device": "1" + } + ] + }, + "need_poweredoff" + ] +''' + +import re + +HAS_XENAPI = False +try: + import XenAPI + HAS_XENAPI = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.network import is_mac +from ansible.module_utils import six +from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, + gather_vm_params, gather_vm_facts, set_vm_power_state, wait_for_vm_ip_address, + is_valid_ip_addr, is_valid_ip_netmask, is_valid_ip_prefix, + ip_prefix_to_netmask, ip_netmask_to_prefix, + is_valid_ip6_addr, is_valid_ip6_prefix) + + +class XenServerVM(XenServerObject): + """Class for managing XenServer VM. + + Attributes: + vm_ref (str): XAPI reference to VM. + vm_params (dict): A dictionary with VM parameters as returned + by gather_vm_params() function. + """ + + def __init__(self, module): + """Inits XenServerVM using module parameters. + + Args: + module: Reference to Ansible module object. + """ + super(XenServerVM, self).__init__(module) + + self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ") + self.gather_params() + + def exists(self): + """Returns True if VM exists, else False.""" + return True if self.vm_ref is not None else False + + def gather_params(self): + """Gathers all VM parameters available in XAPI database.""" + self.vm_params = gather_vm_params(self.module, self.vm_ref) + + def gather_facts(self): + """Gathers and returns VM facts.""" + return gather_vm_facts(self.module, self.vm_params) + + def set_power_state(self, power_state): + """Controls VM power state.""" + state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) + + # If state has changed, update vm_params. + if state_changed: + self.vm_params['power_state'] = current_state.capitalize() + + return state_changed + + def wait_for_ip_address(self): + """Waits for VM to acquire an IP address.""" + self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) + + def deploy(self): + """Deploys new VM from template.""" + # Safety check. + if self.exists(): + self.module.fail_json(msg="Called deploy on existing VM!") + + try: + templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True, + msg_prefix="VM deploy: ") + + # Is this an existing running VM? + if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted': + self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!") + + # Find a SR we can use for VM.copy(). We use SR of the first disk + # if specified or default SR if not specified. + disk_params_list = self.module.params['disks'] + + sr_ref = None + + if disk_params_list: + disk_params = disk_params_list[0] + + disk_sr_uuid = disk_params.get('sr_uuid') + disk_sr = disk_params.get('sr') + + if disk_sr_uuid is not None or disk_sr is not None: + sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, + msg_prefix="VM deploy disks[0]: ") + + if not sr_ref: + if self.default_sr_ref != "OpaqueRef:NULL": + sr_ref = self.default_sr_ref + else: + self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.") + + # VM name could be an empty string which is bad. + if self.module.params['name'] is not None and not self.module.params['name']: + self.module.fail_json(msg="VM deploy: VM name must not be an empty string!") + + # Support for Ansible check mode. + if self.module.check_mode: + return + + # Now we can instantiate VM. We use VM.clone for linked_clone and + # VM.copy for non linked_clone. + if self.module.params['linked_clone']: + self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name']) + else: + self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref) + + # Description is copied over from template so we reset it. + self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "") + + # If template is one of built-in XenServer templates, we have to + # do some additional steps. + # Note: VM.get_is_default_template() is supported from XenServer 7.2 + # onward so we use an alternative way. + templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref) + + if "default_template" in templ_other_config and templ_other_config['default_template']: + # other_config of built-in XenServer templates have a key called + # 'disks' with the following content: + # disks: + # This value of other_data is copied to cloned or copied VM and + # it prevents provisioning of VM because sr is not specified and + # XAPI returns an error. To get around this, we remove the + # 'disks' key and add disks to VM later ourselves. + vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref) + + if "disks" in vm_other_config: + del vm_other_config['disks'] + + self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config) + + # At this point we have VM ready for provisioning. + self.xapi_session.xenapi.VM.provision(self.vm_ref) + + # After provisioning we can prepare vm_params for reconfigure(). + self.gather_params() + + # VM is almost ready. We just need to reconfigure it... + self.reconfigure() + + # Power on VM if needed. + if self.module.params['state'] == "poweredon": + self.set_power_state("poweredon") + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + def reconfigure(self): + """Reconfigures an existing VM. + + Returns: + list: parameters that were reconfigured. + """ + # Safety check. + if not self.exists(): + self.module.fail_json(msg="Called reconfigure on non existing VM!") + + config_changes = self.get_changes() + + vm_power_state_save = self.vm_params['power_state'].lower() + + if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']: + self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!") + + # Support for Ansible check mode. + if self.module.check_mode: + return config_changes + + if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']: + self.set_power_state("shutdownguest") + + try: + for change in config_changes: + if isinstance(change, six.string_types): + if change == "name": + self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name']) + elif change == "name_desc": + self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc']) + elif change == "folder": + self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder') + + if self.module.params['folder']: + self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder']) + elif change == "home_server": + if self.module.params['home_server']: + host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0] + else: + host_ref = "OpaqueRef:NULL" + + self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref) + elif isinstance(change, dict): + if change.get('hardware'): + for hardware_change in change['hardware']: + if hardware_change == "num_cpus": + num_cpus = int(self.module.params['hardware']['num_cpus']) + + if num_cpus < int(self.vm_params['VCPUs_at_startup']): + self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) + self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) + else: + self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) + self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) + elif hardware_change == "num_cpu_cores_per_socket": + self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket') + num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket']) + + if num_cpu_cores_per_socket > 1: + self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket)) + elif hardware_change == "memory_mb": + memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576) + vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min']))) + + self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b) + elif change.get('disks_changed'): + vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + position = 0 + + for disk_change_list in change['disks_changed']: + for disk_change in disk_change_list: + vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid']) + + if disk_change == "name": + self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name']) + elif disk_change == "name_desc": + self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc']) + elif disk_change == "size": + self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position], + "VM reconfigure disks[%s]: " % position))) + + position += 1 + elif change.get('disks_new'): + for position, disk_userdevice in change['disks_new']: + disk_params = self.module.params['disks'][position] + + disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position) + disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else "" + + if disk_params.get('sr_uuid'): + sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid']) + elif disk_params.get('sr'): + sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0] + else: + sr_ref = self.default_sr_ref + + disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position)) + + new_disk_vdi = { + "name_label": disk_name, + "name_description": disk_name_desc, + "SR": sr_ref, + "virtual_size": disk_size, + "type": "user", + "sharable": False, + "read_only": False, + "other_config": {}, + } + + new_disk_vbd = { + "VM": self.vm_ref, + "VDI": None, + "userdevice": disk_userdevice, + "bootable": False, + "mode": "RW", + "type": "Disk", + "empty": False, + "other_config": {}, + "qos_algorithm_type": "", + "qos_algorithm_params": {}, + } + + new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi) + vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd) + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VBD.plug(vbd_ref_new) + + elif change.get('cdrom'): + vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] + + # If there is no CD present, we have to create one. + if not vm_cdrom_params_list: + # We will try to place cdrom at userdevice position + # 3 (which is default) if it is not already occupied + # else we will place it at first allowed position. + cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) + + if "3" in cdrom_userdevices_allowed: + cdrom_userdevice = "3" + else: + cdrom_userdevice = cdrom_userdevices_allowed[0] + + cdrom_vbd = { + "VM": self.vm_ref, + "VDI": "OpaqueRef:NULL", + "userdevice": cdrom_userdevice, + "bootable": False, + "mode": "RO", + "type": "CD", + "empty": True, + "other_config": {}, + "qos_algorithm_type": "", + "qos_algorithm_params": {}, + } + + cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd) + else: + cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid']) + + cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref) + + for cdrom_change in change['cdrom']: + if cdrom_change == "type": + cdrom_type = self.module.params['cdrom']['type'] + + if cdrom_type == "none" and not cdrom_is_empty: + self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) + elif cdrom_type == "host": + # Unimplemented! + pass + + elif cdrom_change == "iso_name": + if not cdrom_is_empty: + self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) + + cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0] + self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref) + elif change.get('networks_changed'): + position = 0 + + for network_change_list in change['networks_changed']: + if network_change_list: + vm_vif_params = self.vm_params['VIFs'][position] + network_params = self.module.params['networks'][position] + + vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid']) + network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid']) + + vif_recreated = False + + if "name" in network_change_list or "mac" in network_change_list: + # To change network or MAC, we destroy old + # VIF and then create a new one with changed + # parameters. That's how XenCenter does it. + + # Copy all old parameters to new VIF record. + vif = { + "device": vm_vif_params['device'], + "network": network_ref, + "VM": vm_vif_params['VM'], + "MAC": vm_vif_params['MAC'], + "MTU": vm_vif_params['MTU'], + "other_config": vm_vif_params['other_config'], + "qos_algorithm_type": vm_vif_params['qos_algorithm_type'], + "qos_algorithm_params": vm_vif_params['qos_algorithm_params'], + "locking_mode": vm_vif_params['locking_mode'], + "ipv4_allowed": vm_vif_params['ipv4_allowed'], + "ipv6_allowed": vm_vif_params['ipv6_allowed'], + } + + if "name" in network_change_list: + network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] + vif['network'] = network_ref_new + vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new) + + if "mac" in network_change_list: + vif['MAC'] = network_params['mac'].lower() + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VIF.unplug(vif_ref) + + self.xapi_session.xenapi.VIF.destroy(vif_ref) + vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VIF.plug(vif_ref_new) + + vif_ref = vif_ref_new + vif_recreated = True + + if self.vm_params['customization_agent'] == "native": + vif_reconfigure_needed = False + + if "type" in network_change_list: + network_type = network_params['type'].capitalize() + vif_reconfigure_needed = True + else: + network_type = vm_vif_params['ipv4_configuration_mode'] + + if "ip" in network_change_list: + network_ip = network_params['ip'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv4_addresses']: + network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0] + else: + network_ip = "" + + if "prefix" in network_change_list: + network_prefix = "/%s" % network_params['prefix'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: + network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1] + else: + network_prefix = "" + + if "gateway" in network_change_list: + network_gateway = network_params['gateway'] + vif_reconfigure_needed = True + else: + network_gateway = vm_vif_params['ipv4_gateway'] + + if vif_recreated or vif_reconfigure_needed: + self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type, + "%s%s" % (network_ip, network_prefix), network_gateway) + + vif_reconfigure_needed = False + + if "type6" in network_change_list: + network_type6 = network_params['type6'].capitalize() + vif_reconfigure_needed = True + else: + network_type6 = vm_vif_params['ipv6_configuration_mode'] + + if "ip6" in network_change_list: + network_ip6 = network_params['ip6'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv6_addresses']: + network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0] + else: + network_ip6 = "" + + if "prefix6" in network_change_list: + network_prefix6 = "/%s" % network_params['prefix6'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: + network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1] + else: + network_prefix6 = "" + + if "gateway6" in network_change_list: + network_gateway6 = network_params['gateway6'] + vif_reconfigure_needed = True + else: + network_gateway6 = vm_vif_params['ipv6_gateway'] + + if vif_recreated or vif_reconfigure_needed: + self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6, + "%s%s" % (network_ip6, network_prefix6), network_gateway6) + + elif self.vm_params['customization_agent'] == "custom": + vif_device = vm_vif_params['device'] + + # A user could have manually changed network + # or mac e.g. trough XenCenter and then also + # make those changes in playbook manually. + # In that case, module will not detect any + # changes and info in xenstore_data will + # become stale. For that reason we always + # update name and mac in xenstore_data. + + # Since we handle name and mac differently, + # we have to remove them from + # network_change_list. + network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']] + + for network_change in network_change_list_tmp + ['name', 'mac']: + self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, network_change)) + + if network_params.get('name'): + network_name = network_params['name'] + else: + network_name = vm_vif_params['network']['name_label'] + + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name) + + if network_params.get('mac'): + network_mac = network_params['mac'].lower() + else: + network_mac = vm_vif_params['MAC'].lower() + + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac) + + for network_change in network_change_list_tmp: + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, network_change), + network_params[network_change]) + + position += 1 + elif change.get('networks_new'): + for position, vif_device in change['networks_new']: + network_params = self.module.params['networks'][position] + + network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] + + network_name = network_params['name'] + network_mac = network_params['mac'] if network_params.get('mac') else "" + network_type = network_params.get('type') + network_ip = network_params['ip'] if network_params.get('ip') else "" + network_prefix = network_params['prefix'] if network_params.get('prefix') else "" + network_netmask = network_params['netmask'] if network_params.get('netmask') else "" + network_gateway = network_params['gateway'] if network_params.get('gateway') else "" + network_type6 = network_params.get('type6') + network_ip6 = network_params['ip6'] if network_params.get('ip6') else "" + network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else "" + network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else "" + + vif = { + "device": vif_device, + "network": network_ref, + "VM": self.vm_ref, + "MAC": network_mac, + "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref), + "other_config": {}, + "qos_algorithm_type": "", + "qos_algorithm_params": {}, + } + + vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VIF.plug(vif_ref_new) + + if self.vm_params['customization_agent'] == "native": + if network_type and network_type == "static": + self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static", + "%s/%s" % (network_ip, network_prefix), network_gateway) + + if network_type6 and network_type6 == "static": + self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static", + "%s/%s" % (network_ip6, network_prefix6), network_gateway6) + elif self.vm_params['customization_agent'] == "custom": + # We first have to remove any existing data + # from xenstore_data because there could be + # some old leftover data from some interface + # that once occupied same device location as + # our new interface. + for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: + self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param)) + + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name) + + # We get MAC from VIF itself instead of + # networks.mac because it could be + # autogenerated. + vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac) + + if network_type: + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type) + + if network_type == "static": + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/ip" % vif_device, network_ip) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/prefix" % vif_device, network_prefix) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/netmask" % vif_device, network_netmask) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/gateway" % vif_device, network_gateway) + + if network_type6: + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6) + + if network_type6 == "static": + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/ip6" % vif_device, network_ip6) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/prefix6" % vif_device, network_prefix6) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/gateway6" % vif_device, network_gateway6) + + elif change.get('custom_params'): + for position in change['custom_params']: + custom_param_key = self.module.params['custom_params'][position]['key'] + custom_param_value = self.module.params['custom_params'][position]['value'] + self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value)) + + if self.module.params['is_template']: + self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True) + elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted': + self.set_power_state("poweredon") + + # Gather new params after reconfiguration. + self.gather_params() + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + return config_changes + + def destroy(self): + """Removes an existing VM with associated disks""" + # Safety check. + if not self.exists(): + self.module.fail_json(msg="Called destroy on non existing VM!") + + if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']: + self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!") + + # Support for Ansible check mode. + if self.module.check_mode: + return + + # Make sure that VM is poweredoff before we can destroy it. + self.set_power_state("poweredoff") + + try: + # Destroy VM! + self.xapi_session.xenapi.VM.destroy(self.vm_ref) + + vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + + # Destroy all VDIs associated with VM! + for vm_disk_params in vm_disk_params_list: + vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid']) + + self.xapi_session.xenapi.VDI.destroy(vdi_ref) + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + def get_changes(self): + """Finds VM parameters that differ from specified ones. + + This method builds a dictionary with hierarchy of VM parameters + that differ from those specified in module parameters. + + Returns: + list: VM parameters that differ from those specified in + module parameters. + """ + # Safety check. + if not self.exists(): + self.module.fail_json(msg="Called get_changes on non existing VM!") + + need_poweredoff = False + + if self.module.params['is_template']: + need_poweredoff = True + + try: + # This VM could be a template or a snapshot. In that case we fail + # because we can't reconfigure them or it would just be too + # dangerous. + if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']: + self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.") + + if self.vm_params['is_a_snapshot']: + self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.") + + # Let's build a list of parameters that changed. + config_changes = [] + + # Name could only differ if we found an existing VM by uuid. + if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']: + if self.module.params['name']: + config_changes.append('name') + else: + self.module.fail_json(msg="VM check name: VM name cannot be an empty string!") + + if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']: + config_changes.append('name_desc') + + # Folder parameter is found in other_config. + vm_other_config = self.vm_params['other_config'] + vm_folder = vm_other_config.get('folder', '') + + if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder: + config_changes.append('folder') + + if self.module.params['home_server'] is not None: + if (self.module.params['home_server'] and + (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])): + + # Check existance only. Ignore return value. + get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True, + msg_prefix="VM check home_server: ") + + config_changes.append('home_server') + elif not self.module.params['home_server'] and self.vm_params['affinity']: + config_changes.append('home_server') + + config_changes_hardware = [] + + if self.module.params['hardware']: + num_cpus = self.module.params['hardware'].get('num_cpus') + + if num_cpus is not None: + # Kept for compatibility with older Ansible versions that + # do not support subargument specs. + try: + num_cpus = int(num_cpus) + except ValueError as e: + self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!") + + if num_cpus < 1: + self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!") + + # We can use VCPUs_at_startup or VCPUs_max parameter. I'd + # say the former is the way to go but this needs + # confirmation and testing. + if num_cpus != int(self.vm_params['VCPUs_at_startup']): + config_changes_hardware.append('num_cpus') + # For now, we don't support hotpluging so VM has to be in + # poweredoff state to reconfigure. + need_poweredoff = True + + num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket') + + if num_cpu_cores_per_socket is not None: + # Kept for compatibility with older Ansible versions that + # do not support subargument specs. + try: + num_cpu_cores_per_socket = int(num_cpu_cores_per_socket) + except ValueError as e: + self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!") + + if num_cpu_cores_per_socket < 1: + self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!") + + if num_cpus and num_cpus % num_cpu_cores_per_socket != 0: + self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!") + + vm_platform = self.vm_params['platform'] + vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1)) + + if num_cpu_cores_per_socket != vm_cores_per_socket: + config_changes_hardware.append('num_cpu_cores_per_socket') + # For now, we don't support hotpluging so VM has to be + # in poweredoff state to reconfigure. + need_poweredoff = True + + memory_mb = self.module.params['hardware'].get('memory_mb') + + if memory_mb is not None: + # Kept for compatibility with older Ansible versions that + # do not support subargument specs. + try: + memory_mb = int(memory_mb) + except ValueError as e: + self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!") + + if memory_mb < 1: + self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!") + + # There are multiple memory parameters: + # - memory_dynamic_max + # - memory_dynamic_min + # - memory_static_max + # - memory_static_min + # - memory_target + # + # memory_target seems like a good candidate but it returns 0 for + # halted VMs so we can't use it. + # + # I decided to use memory_dynamic_max and memory_static_max + # and use whichever is larger. This strategy needs validation + # and testing. + # + # XenServer stores memory size in bytes so we need to divide + # it by 1024*1024 = 1048576. + if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576): + config_changes_hardware.append('memory_mb') + # For now, we don't support hotpluging so VM has to be in + # poweredoff state to reconfigure. + need_poweredoff = True + + if config_changes_hardware: + config_changes.append({"hardware": config_changes_hardware}) + + config_changes_disks = [] + config_new_disks = [] + + # Find allowed userdevices. + vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) + + if self.module.params['disks']: + # Get the list of all disk. Filter out any CDs found. + vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + + # Number of disks defined in module params have to be same or + # higher than a number of existing disks attached to the VM. + # We don't support removal or detachment of disks. + if len(self.module.params['disks']) < len(vm_disk_params_list): + self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" % + (len(self.module.params['disks']), len(vm_disk_params_list))) + + # Find the highest disk occupied userdevice. + if not vm_disk_params_list: + vm_disk_userdevice_highest = "-1" + else: + vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice'] + + for position in range(len(self.module.params['disks'])): + if position < len(vm_disk_params_list): + vm_disk_params = vm_disk_params_list[position] + else: + vm_disk_params = None + + disk_params = self.module.params['disks'][position] + + disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position) + + disk_name = disk_params.get('name') + + if disk_name is not None and not disk_name: + self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position) + + # If this is an existing disk. + if vm_disk_params and vm_disk_params['VDI']: + disk_changes = [] + + if disk_name and disk_name != vm_disk_params['VDI']['name_label']: + disk_changes.append('name') + + disk_name_desc = disk_params.get('name_desc') + + if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']: + disk_changes.append('name_desc') + + if disk_size: + if disk_size > int(vm_disk_params['VDI']['virtual_size']): + disk_changes.append('size') + need_poweredoff = True + elif disk_size < int(vm_disk_params['VDI']['virtual_size']): + self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). " + "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size'])) + + config_changes_disks.append(disk_changes) + # If this is a new disk. + else: + if not disk_size: + self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position) + + disk_sr_uuid = disk_params.get('sr_uuid') + disk_sr = disk_params.get('sr') + + if disk_sr_uuid is not None or disk_sr is not None: + # Check existance only. Ignore return value. + get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, + msg_prefix="VM check disks[%s]: " % position) + elif self.default_sr_ref == 'OpaqueRef:NULL': + self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position) + + if not vbd_userdevices_allowed: + self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position) + + disk_userdevice = None + + # We need to place a new disk right above the highest + # placed existing disk to maintain relative disk + # positions pairable with disk specifications in + # module params. That place must not be occupied by + # some other device like CD-ROM. + for userdevice in vbd_userdevices_allowed: + if int(userdevice) > int(vm_disk_userdevice_highest): + disk_userdevice = userdevice + vbd_userdevices_allowed.remove(userdevice) + vm_disk_userdevice_highest = userdevice + break + + # If no place was found. + if disk_userdevice is None: + # Highest occupied place could be a CD-ROM device + # so we have to include all devices regardless of + # type when calculating out-of-bound position. + disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1) + self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice)) + + # For new disks we only track their position. + config_new_disks.append((position, disk_userdevice)) + + # We should append config_changes_disks to config_changes only + # if there is at least one changed disk, else skip. + for disk_change in config_changes_disks: + if disk_change: + config_changes.append({"disks_changed": config_changes_disks}) + break + + if config_new_disks: + config_changes.append({"disks_new": config_new_disks}) + + config_changes_cdrom = [] + + if self.module.params['cdrom']: + # Get the list of all CD-ROMs. Filter out any regular disks + # found. If we found no existing CD-ROM, we will create it + # later else take the first one found. + vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] + + # If no existing CD-ROM is found, we will need to add one. + # We need to check if there is any userdevice allowed. + if not vm_cdrom_params_list and not vbd_userdevices_allowed: + self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!") + + cdrom_type = self.module.params['cdrom'].get('type') + cdrom_iso_name = self.module.params['cdrom'].get('iso_name') + + # If cdrom.iso_name is specified but cdrom.type is not, + # then set cdrom.type to 'iso', unless cdrom.iso_name is + # an empty string, in that case set cdrom.type to 'none'. + if not cdrom_type: + if cdrom_iso_name: + cdrom_type = "iso" + elif cdrom_iso_name is not None: + cdrom_type = "none" + + self.module.params['cdrom']['type'] = cdrom_type + + # If type changed. + if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])): + config_changes_cdrom.append('type') + + if cdrom_type == "iso": + # Check if ISO exists. + # Check existance only. Ignore return value. + get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True, + msg_prefix="VM check cdrom.iso_name: ") + + # Is ISO image changed? + if (cdrom_iso_name and + (not vm_cdrom_params_list or + not vm_cdrom_params_list[0]['VDI'] or + cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])): + config_changes_cdrom.append('iso_name') + + if config_changes_cdrom: + config_changes.append({"cdrom": config_changes_cdrom}) + + config_changes_networks = [] + config_new_networks = [] + + # Find allowed devices. + vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref) + + if self.module.params['networks']: + # Number of VIFs defined in module params have to be same or + # higher than a number of existing VIFs attached to the VM. + # We don't support removal of VIFs. + if len(self.module.params['networks']) < len(self.vm_params['VIFs']): + self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" % + (len(self.module.params['networks']), len(self.vm_params['VIFs']))) + + # Find the highest occupied device. + if not self.vm_params['VIFs']: + vif_device_highest = "-1" + else: + vif_device_highest = self.vm_params['VIFs'][-1]['device'] + + for position in range(len(self.module.params['networks'])): + if position < len(self.vm_params['VIFs']): + vm_vif_params = self.vm_params['VIFs'][position] + else: + vm_vif_params = None + + network_params = self.module.params['networks'][position] + + network_name = network_params.get('name') + + if network_name is not None and not network_name: + self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position) + + if network_name: + # Check existance only. Ignore return value. + get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True, + msg_prefix="VM check networks[%s]: " % position) + + network_mac = network_params.get('mac') + + if network_mac is not None: + network_mac = network_mac.lower() + + if not is_mac(network_mac): + self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac)) + + # IPv4 reconfiguration. + network_type = network_params.get('type') + network_ip = network_params.get('ip') + network_netmask = network_params.get('netmask') + network_prefix = None + + # If networks.ip is specified and networks.type is not, + # then set networks.type to 'static'. + if not network_type and network_ip: + network_type = "static" + + # XenServer natively supports only 'none' and 'static' + # type with 'none' being the same as 'dhcp'. + if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp": + network_type = "none" + + if network_type and network_type == "static": + if network_ip is not None: + network_ip_split = network_ip.split('/') + network_ip = network_ip_split[0] + + if network_ip and not is_valid_ip_addr(network_ip): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip)) + + if len(network_ip_split) > 1: + network_prefix = network_ip_split[1] + + if not is_valid_ip_prefix(network_prefix): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix)) + + if network_netmask is not None: + if not is_valid_ip_netmask(network_netmask): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask)) + + network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True) + elif network_prefix is not None: + network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True) + + # If any parameter is overridden at this point, update it. + if network_type: + network_params['type'] = network_type + + if network_ip: + network_params['ip'] = network_ip + + if network_netmask: + network_params['netmask'] = network_netmask + + if network_prefix: + network_params['prefix'] = network_prefix + + network_gateway = network_params.get('gateway') + + # Gateway can be an empty string (when removing gateway + # configuration) but if it is not, it should be validated. + if network_gateway and not is_valid_ip_addr(network_gateway): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway)) + + # IPv6 reconfiguration. + network_type6 = network_params.get('type6') + network_ip6 = network_params.get('ip6') + network_prefix6 = None + + # If networks.ip6 is specified and networks.type6 is not, + # then set networks.type6 to 'static'. + if not network_type6 and network_ip6: + network_type6 = "static" + + # XenServer natively supports only 'none' and 'static' + # type with 'none' being the same as 'dhcp'. + if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp": + network_type6 = "none" + + if network_type6 and network_type6 == "static": + if network_ip6 is not None: + network_ip6_split = network_ip6.split('/') + network_ip6 = network_ip6_split[0] + + if network_ip6 and not is_valid_ip6_addr(network_ip6): + self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6)) + + if len(network_ip6_split) > 1: + network_prefix6 = network_ip6_split[1] + + if not is_valid_ip6_prefix(network_prefix6): + self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6)) + + # If any parameter is overridden at this point, update it. + if network_type6: + network_params['type6'] = network_type6 + + if network_ip6: + network_params['ip6'] = network_ip6 + + if network_prefix6: + network_params['prefix6'] = network_prefix6 + + network_gateway6 = network_params.get('gateway6') + + # Gateway can be an empty string (when removing gateway + # configuration) but if it is not, it should be validated. + if network_gateway6 and not is_valid_ip6_addr(network_gateway6): + self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6)) + + # If this is an existing VIF. + if vm_vif_params and vm_vif_params['network']: + network_changes = [] + + if network_name and network_name != vm_vif_params['network']['name_label']: + network_changes.append('name') + + if network_mac and network_mac != vm_vif_params['MAC'].lower(): + network_changes.append('mac') + + if self.vm_params['customization_agent'] == "native": + if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower(): + network_changes.append('type') + + if network_type and network_type == "static": + if network_ip and (not vm_vif_params['ipv4_addresses'] or + not vm_vif_params['ipv4_addresses'][0] or + network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]): + network_changes.append('ip') + + if network_prefix and (not vm_vif_params['ipv4_addresses'] or + not vm_vif_params['ipv4_addresses'][0] or + network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]): + network_changes.append('prefix') + network_changes.append('netmask') + + if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']: + network_changes.append('gateway') + + if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower(): + network_changes.append('type6') + + if network_type6 and network_type6 == "static": + if network_ip6 and (not vm_vif_params['ipv6_addresses'] or + not vm_vif_params['ipv6_addresses'][0] or + network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]): + network_changes.append('ip6') + + if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or + not vm_vif_params['ipv6_addresses'][0] or + network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]): + network_changes.append('prefix6') + + if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']: + network_changes.append('gateway6') + + elif self.vm_params['customization_agent'] == "custom": + vm_xenstore_data = self.vm_params['xenstore_data'] + + if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"): + network_changes.append('type') + need_poweredoff = True + + if network_type and network_type == "static": + if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""): + network_changes.append('ip') + need_poweredoff = True + + if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""): + network_changes.append('prefix') + network_changes.append('netmask') + need_poweredoff = True + + if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' % + vm_vif_params['device'], ""): + network_changes.append('gateway') + need_poweredoff = True + + if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"): + network_changes.append('type6') + need_poweredoff = True + + if network_type6 and network_type6 == "static": + if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""): + network_changes.append('ip6') + need_poweredoff = True + + if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""): + network_changes.append('prefix6') + need_poweredoff = True + + if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' % + vm_vif_params['device'], ""): + network_changes.append('gateway6') + need_poweredoff = True + + config_changes_networks.append(network_changes) + # If this is a new VIF. + else: + if not network_name: + self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position) + + if network_type and network_type == "static" and network_ip and not network_netmask: + self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position) + + if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6: + self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position) + + # Restart is needed if we are adding new network + # interface with IP/gateway parameters specified + # and custom agent is used. + if self.vm_params['customization_agent'] == "custom": + for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: + if network_params.get(parameter): + need_poweredoff = True + break + + if not vif_devices_allowed: + self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position) + + # We need to place a new network interface right above the + # highest placed existing interface to maintain relative + # positions pairable with network interface specifications + # in module params. + vif_device = str(int(vif_device_highest) + 1) + + if vif_device not in vif_devices_allowed: + self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device)) + + vif_devices_allowed.remove(vif_device) + vif_device_highest = vif_device + + # For new VIFs we only track their position. + config_new_networks.append((position, vif_device)) + + # We should append config_changes_networks to config_changes only + # if there is at least one changed network, else skip. + for network_change in config_changes_networks: + if network_change: + config_changes.append({"networks_changed": config_changes_networks}) + break + + if config_new_networks: + config_changes.append({"networks_new": config_new_networks}) + + config_changes_custom_params = [] + + if self.module.params['custom_params']: + for position in range(len(self.module.params['custom_params'])): + custom_param = self.module.params['custom_params'][position] + + custom_param_key = custom_param['key'] + custom_param_value = custom_param['value'] + + if custom_param_key not in self.vm_params: + self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key)) + + if custom_param_value != self.vm_params[custom_param_key]: + # We only need to track custom param position. + config_changes_custom_params.append(position) + + if config_changes_custom_params: + config_changes.append({"custom_params": config_changes_custom_params}) + + if need_poweredoff: + config_changes.append('need_poweredoff') + + return config_changes + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + def get_normalized_disk_size(self, disk_params, msg_prefix=""): + """Parses disk size parameters and returns disk size in bytes. + + This method tries to parse disk size module parameters. It fails + with an error message if size cannot be parsed. + + Args: + disk_params (dist): A dictionary with disk parameters. + msg_prefix (str): A string error messages should be prefixed + with (default: ""). + + Returns: + int: disk size in bytes if disk size is successfully parsed or + None if no disk size parameters were found. + """ + # There should be only single size spec but we make a list of all size + # specs just in case. Priority is given to 'size' but if not found, we + # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one + # found. + disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')] + + if disk_size_spec: + try: + # size + if "size" in disk_size_spec: + size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)') + disk_size_m = size_regex.match(disk_params['size']) + + if disk_size_m: + size = disk_size_m.group(1) + unit = disk_size_m.group(2) + else: + raise ValueError + # size_tb, size_gb, size_mb, size_kb, size_b + else: + size = disk_params[disk_size_spec[0]] + unit = disk_size_spec[0].split('_')[-1] + + if not unit: + unit = "b" + else: + unit = unit.lower() + + if re.match(r'\d+\.\d+', size): + # We found float value in string, let's typecast it. + if unit == "b": + # If we found float but unit is bytes, we get the integer part only. + size = int(float(size)) + else: + size = float(size) + else: + # We found int value in string, let's typecast it. + size = int(size) + + if not size or size < 0: + raise ValueError + + except (TypeError, ValueError, NameError): + # Common failure + self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix) + + disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0) + + if unit in disk_units: + return int(size * (1024 ** disk_units[unit])) + else: + self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." % + (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key])))) + else: + return None + + @staticmethod + def get_cdrom_type(vm_cdrom_params): + """Returns VM CD-ROM type.""" + # TODO: implement support for detecting type host. No server to test + # this on at the moment. + if vm_cdrom_params['empty']: + return "none" + else: + return "iso" + + +def main(): + argument_spec = xenserver_common_argument_spec() + argument_spec.update( + state=dict(type='str', default='present', + choices=['present', 'absent', 'poweredon']), + name=dict(type='str', aliases=['name_label']), + name_desc=dict(type='str'), + uuid=dict(type='str'), + template=dict(type='str', aliases=['template_src']), + template_uuid=dict(type='str'), + is_template=dict(type='bool', default=False), + folder=dict(type='str'), + hardware=dict( + type='dict', + options=dict( + num_cpus=dict(type='int'), + num_cpu_cores_per_socket=dict(type='int'), + memory_mb=dict(type='int'), + ), + ), + disks=dict( + type='list', + elements='dict', + options=dict( + size=dict(type='str'), + size_tb=dict(type='str'), + size_gb=dict(type='str'), + size_mb=dict(type='str'), + size_kb=dict(type='str'), + size_b=dict(type='str'), + name=dict(type='str', aliases=['name_label']), + name_desc=dict(type='str'), + sr=dict(type='str'), + sr_uuid=dict(type='str'), + ), + aliases=['disk'], + mutually_exclusive=[ + ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'], + ['sr', 'sr_uuid'], + ], + ), + cdrom=dict( + type='dict', + options=dict( + type=dict(type='str', choices=['none', 'iso']), + iso_name=dict(type='str'), + ), + required_if=[ + ['type', 'iso', ['iso_name']], + ], + ), + networks=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', aliases=['name_label']), + mac=dict(type='str'), + type=dict(type='str', choices=['none', 'dhcp', 'static']), + ip=dict(type='str'), + netmask=dict(type='str'), + gateway=dict(type='str'), + type6=dict(type='str', choices=['none', 'dhcp', 'static']), + ip6=dict(type='str'), + gateway6=dict(type='str'), + ), + aliases=['network'], + required_if=[ + ['type', 'static', ['ip']], + ['type6', 'static', ['ip6']], + ], + ), + home_server=dict(type='str'), + custom_params=dict( + type='list', + elements='dict', + options=dict( + key=dict(type='str', required=True), + value=dict(type='raw', required=True), + ), + ), + wait_for_ip_address=dict(type='bool', default=False), + state_change_timeout=dict(type='int', default=0), + linked_clone=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ['name', 'uuid'], + ], + mutually_exclusive=[ + ['template', 'template_uuid'], + ], + ) + + result = {'failed': False, 'changed': False} + + vm = XenServerVM(module) + + # Find existing VM + if vm.exists(): + if module.params['state'] == "absent": + vm.destroy() + result['changed'] = True + elif module.params['state'] == "present": + config_changes = vm.reconfigure() + + if config_changes: + result['changed'] = True + + # Make new disk and network changes more user friendly + # and informative. + for change in config_changes: + if isinstance(change, dict): + if change.get('disks_new'): + disks_new = [] + + for position, userdevice in change['disks_new']: + disk_new_params = {"position": position, "vbd_userdevice": userdevice} + disk_params = module.params['disks'][position] + + for k in disk_params.keys(): + if disk_params[k] is not None: + disk_new_params[k] = disk_params[k] + + disks_new.append(disk_new_params) + + if disks_new: + change['disks_new'] = disks_new + + elif change.get('networks_new'): + networks_new = [] + + for position, device in change['networks_new']: + network_new_params = {"position": position, "vif_device": device} + network_params = module.params['networks'][position] + + for k in network_params.keys(): + if network_params[k] is not None: + network_new_params[k] = network_params[k] + + networks_new.append(network_new_params) + + if networks_new: + change['networks_new'] = networks_new + + result['changes'] = config_changes + + elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]: + result['changed'] = vm.set_power_state(module.params['state']) + elif module.params['state'] != "absent": + vm.deploy() + result['changed'] = True + + if module.params['wait_for_ip_address'] and module.params['state'] != "absent": + vm.wait_for_ip_address() + + result['instance'] = vm.gather_facts() + + if result['failed']: + module.fail_json(**result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_facts.py b/plugins/modules/cloud/xenserver/xenserver_guest_facts.py new file mode 120000 index 0000000000..e4f2c814ea --- /dev/null +++ b/plugins/modules/cloud/xenserver/xenserver_guest_facts.py @@ -0,0 +1 @@ +xenserver_guest_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/plugins/modules/cloud/xenserver/xenserver_guest_info.py new file mode 100644 index 0000000000..f55665c39c --- /dev/null +++ b/plugins/modules/cloud/xenserver/xenserver_guest_info.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: xenserver_guest_info +short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool +description: > + This module can be used to gather essential VM facts. +author: +- Bojan Vitnik (@bvitnik) +notes: +- Minimal supported version of XenServer is 5.6. +- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. +- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside + Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your + Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' +- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are + accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' +- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) + which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' +- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change. +requirements: +- python >= 2.6 +- XenAPI +options: + name: + description: + - Name of the VM to gather facts from. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. + type: str + required: yes + aliases: [ name_label ] + uuid: + description: + - UUID of the VM to gather fact of. This is XenServer's unique identifier. + - It is required if name is not unique. + type: str +extends_documentation_fragment: +- community.general.xenserver.documentation + +''' + +EXAMPLES = r''' +- name: Gather facts + xenserver_guest_info: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_11 + delegate_to: localhost + register: facts +''' + +RETURN = r''' +instance: + description: Metadata about the VM + returned: always + type: dict + sample: { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "testvm_11-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "testvm_11-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "testvm_11", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } + } +''' + +HAS_XENAPI = False +try: + import XenAPI + HAS_XENAPI = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, + gather_vm_params, gather_vm_facts) + + +class XenServerVM(XenServerObject): + """Class for managing XenServer VM. + + Attributes: + vm_ref (str): XAPI reference to VM. + vm_params (dict): A dictionary with VM parameters as returned + by gather_vm_params() function. + """ + + def __init__(self, module): + """Inits XenServerVM using module parameters. + + Args: + module: Reference to AnsibleModule object. + """ + super(XenServerVM, self).__init__(module) + + self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") + self.gather_params() + + def gather_params(self): + """Gathers all VM parameters available in XAPI database.""" + self.vm_params = gather_vm_params(self.module, self.vm_ref) + + def gather_facts(self): + """Gathers and returns VM facts.""" + return gather_vm_facts(self.module, self.vm_params) + + +def main(): + argument_spec = xenserver_common_argument_spec() + argument_spec.update( + name=dict(type='str', aliases=['name_label']), + uuid=dict(type='str'), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ['name', 'uuid'], + ], + ) + + if module._name == 'xenserver_guest_facts': + module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'", version='2.13') + + result = {'failed': False, 'changed': False} + + # Module will exit with an error message if no VM is found. + vm = XenServerVM(module) + + # Gather facts. + result['instance'] = vm.gather_facts() + + if result['failed']: + module.fail_json(**result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py b/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py new file mode 100644 index 0000000000..379e2a3216 --- /dev/null +++ b/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: xenserver_guest_powerstate +short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool +description: > + This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. +author: +- Bojan Vitnik (@bvitnik) +notes: +- Minimal supported version of XenServer is 5.6. +- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. +- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside + Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your + Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' +- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are + accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' +- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) + which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' +requirements: +- python >= 2.6 +- XenAPI +options: + state: + description: + - Specify the state VM should be in. + - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned. + - If C(state) is set to C(present), then VM is just checked for existence and facts are returned. + type: str + default: present + choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ] + name: + description: + - Name of the VM to manage. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. + type: str + required: yes + aliases: [ name_label ] + uuid: + description: + - UUID of the VM to manage if known. This is XenServer's unique identifier. + - It is required if name is not unique. + type: str + wait_for_ip_address: + description: + - Wait until XenServer detects an IP address for the VM. + - This requires XenServer Tools to be preinstalled on the VM to work properly. + type: bool + default: no + state_change_timeout: + description: + - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).' + - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. + - In case of timeout, module will generate an error message. + type: int + default: 0 +extends_documentation_fragment: +- community.general.xenserver.documentation + +''' + +EXAMPLES = r''' +- name: Power on VM + xenserver_guest_powerstate: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_11 + state: powered-on + delegate_to: localhost + register: facts +''' + +RETURN = r''' +instance: + description: Metadata about the VM + returned: always + type: dict + sample: { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "windows-template-testing-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "windows-template-testing-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "windows-template-testing", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } + } +''' + +import re + +HAS_XENAPI = False +try: + import XenAPI + HAS_XENAPI = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, + gather_vm_params, gather_vm_facts, set_vm_power_state, wait_for_vm_ip_address) + + +class XenServerVM(XenServerObject): + """Class for managing XenServer VM. + + Attributes: + vm_ref (str): XAPI reference to VM. + vm_params (dict): A dictionary with VM parameters as returned + by gather_vm_params() function. + """ + + def __init__(self, module): + """Inits XenServerVM using module parameters. + + Args: + module: Reference to Ansible module object. + """ + super(XenServerVM, self).__init__(module) + + self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") + self.gather_params() + + def gather_params(self): + """Gathers all VM parameters available in XAPI database.""" + self.vm_params = gather_vm_params(self.module, self.vm_ref) + + def gather_facts(self): + """Gathers and returns VM facts.""" + return gather_vm_facts(self.module, self.vm_params) + + def set_power_state(self, power_state): + """Controls VM power state.""" + state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) + + # If state has changed, update vm_params. + if state_changed: + self.vm_params['power_state'] = current_state.capitalize() + + return state_changed + + def wait_for_ip_address(self): + """Waits for VM to acquire an IP address.""" + self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) + + +def main(): + argument_spec = xenserver_common_argument_spec() + argument_spec.update( + state=dict(type='str', default='present', + choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']), + name=dict(type='str', aliases=['name_label']), + uuid=dict(type='str'), + wait_for_ip_address=dict(type='bool', default=False), + state_change_timeout=dict(type='int', default=0), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ['name', 'uuid'], + ], + ) + + result = {'failed': False, 'changed': False} + + # Module will exit with an error message if no VM is found. + vm = XenServerVM(module) + + # Set VM power state. + if module.params['state'] != "present": + result['changed'] = vm.set_power_state(module.params['state']) + + if module.params['wait_for_ip_address']: + vm.wait_for_ip_address() + + result['instance'] = vm.gather_facts() + + if result['failed']: + module.fail_json(**result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/consul/consul.py b/plugins/modules/clustering/consul/consul.py new file mode 100644 index 0000000000..38af833578 --- /dev/null +++ b/plugins/modules/clustering/consul/consul.py @@ -0,0 +1,575 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: consul +short_description: "Add, modify & delete services within a consul cluster." +description: + - Registers services and checks for an agent with a consul cluster. + A service is some process running on the agent node that should be advertised by + consul's discovery mechanism. It may optionally supply a check definition, + a periodic service test to notify the consul cluster of service's health. + - "Checks may also be registered per node e.g. disk usage, or cpu usage and + notify the health of the entire node to the cluster. + Service level checks do not require a check name or id as these are derived + by Consul from the Service name and id respectively by appending 'service:' + Node level checks require a I(check_name) and optionally a I(check_id)." + - Currently, there is no complete way to retrieve the script, interval or ttl + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An API method is planned to supply this metadata so at that + stage change management will be added. + - "See U(http://consul.io) for more details." +requirements: + - python-consul + - requests +author: "Steve Gargan (@sgargan)" +options: + state: + description: + - register or deregister the consul service, defaults to present + default: present + choices: ['present', 'absent'] + service_name: + type: str + description: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be omitted if registering + a node level check + service_id: + type: str + description: + - the ID for the service, must be unique per node. If I(state=absent), + defaults to the service name if supplied. + host: + type: str + description: + - host of the consul agent defaults to localhost + default: localhost + port: + type: int + description: + - the port on which the consul agent is running + default: 8500 + scheme: + type: str + description: + - the protocol scheme on which the consul agent is running + default: http + validate_certs: + description: + - whether to verify the TLS certificate of the consul agent + type: bool + default: 'yes' + notes: + type: str + description: + - Notes to attach to check when registering it. + service_port: + type: int + description: + - the port on which the service is listening. Can optionally be supplied for + registration of a service, i.e. if I(service_name) or I(service_id) is set + service_address: + type: str + description: + - the address to advertise that the service will be listening on. + This value will be passed as the I(address) parameter to Consul's + U(/v1/agent/service/register) API method, so refer to the Consul API + documentation for further details. + tags: + type: list + description: + - tags that will be attached to the service registration. + script: + type: str + description: + - the script/command that will be run periodically to check the health + of the service. Scripts require I(interval) and vice versa. + interval: + type: str + description: + - the interval at which the service check will be run. This is a number + with a s or m suffix to signify the units of seconds or minutes e.g + C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g. + C(1) will be C(1m). Required if the I(script) parameter is specified. + check_id: + type: str + description: + - an ID for the service check. If I(state=absent), defaults to + I(check_name). Ignored if part of a service definition. + check_name: + type: str + description: + - a name for the service check. Required if standalone, ignored if + part of service definition. + ttl: + type: str + description: + - checks can be registered with a ttl instead of a I(script) and I(interval) + this means that the service will check in with the agent before the + ttl expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a s or m suffix to + signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix + is supplied, C(m) will be used by default e.g. C(1) will be C(1m) + http: + type: str + description: + - checks can be registered with an HTTP endpoint. This means that consul + will check that the http endpoint returns a successful HTTP status. + I(interval) must also be provided with this option. + timeout: + type: str + description: + - A custom HTTP check timeout. The consul default is 10 seconds. + Similar to the interval this is a number with a C(s) or C(m) suffix to + signify the units of seconds or minutes, e.g. C(15s) or C(1m). + token: + type: str + description: + - the token key identifying an ACL rule set. May be required to register services. +''' + +EXAMPLES = ''' +- name: register nginx service with the local consul agent + consul: + service_name: nginx + service_port: 80 + +- name: register nginx service with curl check + consul: + service_name: nginx + service_port: 80 + script: curl http://localhost + interval: 60s + +- name: register nginx with an http check + consul: + service_name: nginx + service_port: 80 + interval: 60s + http: http://localhost:80/status + +- name: register external service nginx available at 10.1.5.23 + consul: + service_name: nginx + service_port: 80 + service_address: 10.1.5.23 + +- name: register nginx with some service tags + consul: + service_name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: remove nginx service + consul: + service_name: nginx + state: absent + +- name: register celery worker service + consul: + service_name: celery-worker + tags: + - prod + - worker + +- name: create a node level check to test disk usage + consul: + check_name: Disk usage + check_id: disk_usage + script: /opt/disk_usage.py + interval: 5m + +- name: register an http check against a service that's already registered + consul: + check_name: nginx-check2 + check_id: nginx-check2 + service_id: nginx + interval: 60s + http: http://localhost:80/morestatus +''' + +try: + import consul + from requests.exceptions import ConnectionError + + class PatchedConsulAgentService(consul.Consul.Agent.Service): + def deregister(self, service_id, token=None): + params = {} + if token: + params['token'] = token + return self.agent.http.put(consul.base.CB.bool(), + '/v1/agent/service/deregister/%s' % service_id, + params=params) + + python_consul_installed = True +except ImportError: + python_consul_installed = False + +from ansible.module_utils.basic import AnsibleModule + + +def register_with_consul(module): + state = module.params.get('state') + + if state == 'present': + add(module) + else: + remove(module) + + +def add(module): + ''' adds a service or a check depending on supplied configuration''' + check = parse_check(module) + service = parse_service(module) + + if not service and not check: + module.fail_json(msg='a name and port are required to register a service') + + if service: + if check: + service.add_check(check) + add_service(module, service) + elif check: + add_check(module, check) + + +def remove(module): + ''' removes a service or a check ''' + service_id = module.params.get('service_id') or module.params.get('service_name') + check_id = module.params.get('check_id') or module.params.get('check_name') + if not (service_id or check_id): + module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name') + if service_id: + remove_service(module, service_id) + else: + remove_check(module, check_id) + + +def add_check(module, check): + ''' registers a check with the given agent. currently there is no way + retrieve the full metadata of an existing check through the consul api. + Without this we can't compare to the supplied check and so we must assume + a change. ''' + if not check.name and not check.service_id: + module.fail_json(msg='a check name is required for a node level check, one not attached to a service') + + consul_api = get_consul_api(module) + check.register(consul_api) + + module.exit_json(changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl, + http=check.http, + timeout=check.timeout, + service_id=check.service_id) + + +def remove_check(module, check_id): + ''' removes a check using its id ''' + consul_api = get_consul_api(module) + + if check_id in consul_api.agent.checks(): + consul_api.agent.check.deregister(check_id) + module.exit_json(changed=True, id=check_id) + + module.exit_json(changed=False, id=check_id) + + +def add_service(module, service): + ''' registers a service with the current agent ''' + result = service + changed = False + + consul_api = get_consul_api(module) + existing = get_service_by_id_or_name(consul_api, service.id) + + # there is no way to retrieve the details of checks so if a check is present + # in the service it must be re-registered + if service.has_checks() or not existing or not existing == service: + + service.register(consul_api) + # check that it registered correctly + registered = get_service_by_id_or_name(consul_api, service.id) + if registered: + result = registered + changed = True + + module.exit_json(changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=[check.to_dict() for check in service.checks], + tags=result.tags) + + +def remove_service(module, service_id): + ''' deregister a service from the given agent using its service id ''' + consul_api = get_consul_api(module) + service = get_service_by_id_or_name(consul_api, service_id) + if service: + consul_api.agent.service.deregister(service_id, token=module.params.get('token')) + module.exit_json(changed=True, id=service_id) + + module.exit_json(changed=False, id=service_id) + + +def get_consul_api(module, token=None): + consulClient = consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), + token=module.params.get('token')) + consulClient.agent.service = PatchedConsulAgentService(consulClient) + return consulClient + + +def get_service_by_id_or_name(consul_api, service_id_or_name): + ''' iterate the registered services and find one with the given id ''' + for name, service in consul_api.agent.services().items(): + if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name: + return ConsulService(loaded=service) + + +def parse_check(module): + if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('http')) if p]) > 1: + module.fail_json( + msg='checks are either script, http or ttl driven, supplying more than one does not make sense') + + if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('http'): + + return ConsulCheck( + module.params.get('check_id'), + module.params.get('check_name'), + module.params.get('check_node'), + module.params.get('check_host'), + module.params.get('script'), + module.params.get('interval'), + module.params.get('ttl'), + module.params.get('notes'), + module.params.get('http'), + module.params.get('timeout'), + module.params.get('service_id'), + ) + + +def parse_service(module): + if module.params.get('service_name'): + return ConsulService( + module.params.get('service_id'), + module.params.get('service_name'), + module.params.get('service_address'), + module.params.get('service_port'), + module.params.get('tags'), + ) + elif not module.params.get('service_name'): + module.fail_json(msg="service_name is required to configure a service.") + + +class ConsulService(): + + def __init__(self, service_id=None, name=None, address=None, port=-1, + tags=None, loaded=None): + self.id = self.name = name + if service_id: + self.id = service_id + self.address = address + self.port = port + self.tags = tags + self.checks = [] + if loaded: + self.id = loaded['ID'] + self.name = loaded['Service'] + self.port = loaded['Port'] + self.tags = loaded['Tags'] + + def register(self, consul_api): + optional = {} + + if self.port: + optional['port'] = self.port + + if len(self.checks) > 0: + optional['check'] = self.checks[0].check + + consul_api.agent.service.register( + self.name, + service_id=self.id, + address=self.address, + tags=self.tags, + **optional) + + def add_check(self, check): + self.checks.append(check) + + def checks(self): + return self.checks + + def has_checks(self): + return len(self.checks) > 0 + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.id == other.id and + self.name == other.name and + self.port == other.port and + self.tags == other.tags) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {'id': self.id, "name": self.name} + if self.port: + data['port'] = self.port + if self.tags and len(self.tags) > 0: + data['tags'] = self.tags + if len(self.checks) > 0: + data['check'] = self.checks[0].to_dict() + return data + + +class ConsulCheck(object): + + def __init__(self, check_id, name, node=None, host='localhost', + script=None, interval=None, ttl=None, notes=None, http=None, timeout=None, service_id=None): + self.check_id = self.name = name + if check_id: + self.check_id = check_id + self.service_id = service_id + self.notes = notes + self.node = node + self.host = host + + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) + self.script = script + self.http = http + self.timeout = self.validate_duration('timeout', timeout) + + self.check = None + + if script: + self.check = consul.Check.script(script, self.interval) + + if ttl: + self.check = consul.Check.ttl(self.ttl) + + if http: + if interval is None: + raise Exception('http check must specify interval') + + self.check = consul.Check.http(http, self.interval, self.timeout) + + def validate_duration(self, name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + duration = "{0}s".format(duration) + return duration + + def register(self, consul_api): + consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id, + notes=self.notes, + check=self.check) + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.check_id == other.check_id and + self.service_id == other.service_id and + self.name == other.name and + self.script == other.script and + self.interval == other.interval) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {} + self._add(data, 'id', attr='check_id') + self._add(data, 'name', attr='check_name') + self._add(data, 'script') + self._add(data, 'node') + self._add(data, 'notes') + self._add(data, 'host') + self._add(data, 'interval') + self._add(data, 'ttl') + self._add(data, 'http') + self._add(data, 'timeout') + self._add(data, 'service_id') + return data + + def _add(self, data, key, attr=None): + try: + if attr is None: + attr = key + data[key] = getattr(self, attr) + except Exception: + pass + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + scheme=dict(required=False, default='http'), + validate_certs=dict(required=False, default=True, type='bool'), + check_id=dict(required=False), + check_name=dict(required=False), + check_node=dict(required=False), + check_host=dict(required=False), + notes=dict(required=False), + script=dict(required=False), + service_id=dict(required=False), + service_name=dict(required=False), + service_address=dict(required=False, type='str', default=None), + service_port=dict(required=False, type='int', default=None), + state=dict(default='present', choices=['present', 'absent']), + interval=dict(required=False, type='str'), + ttl=dict(required=False, type='str'), + http=dict(required=False, type='str'), + timeout=dict(required=False, type='str'), + tags=dict(required=False, type='list'), + token=dict(required=False, no_log=True) + ), + supports_check_mode=False, + ) + + test_dependencies(module) + + try: + register_with_consul(module) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py new file mode 100644 index 0000000000..928fb99509 --- /dev/null +++ b/plugins/modules/clustering/consul/consul_acl.py @@ -0,0 +1,660 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: consul_acl +short_description: Manipulate Consul ACL keys and rules +description: + - Allows the addition, modification and deletion of ACL keys and associated + rules in a consul cluster via the agent. For more details on using and + configuring ACLs, see https://www.consul.io/docs/guides/acl.html. +author: + - Steve Gargan (@sgargan) + - Colin Nolan (@colin-nolan) +options: + mgmt_token: + description: + - a management token is required to manipulate the acl lists + state: + description: + - whether the ACL pair should be present or absent + required: false + choices: ['present', 'absent'] + default: present + token_type: + description: + - the type of token that should be created + choices: ['client', 'management'] + default: client + name: + description: + - the name that should be associated with the acl key, this is opaque + to Consul + required: false + token: + description: + - the token key identifying an ACL rule set. If generated by consul + this will be a UUID + required: false + rules: + type: list + description: + - rules that should be associated with a given token + required: false + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + type: int + description: + - the port on which the consul agent is running + required: false + default: 8500 + scheme: + description: + - the protocol scheme on which the consul agent is running + required: false + default: http + validate_certs: + type: bool + description: + - whether to verify the tls certificate of the consul agent + required: false + default: True +requirements: + - python-consul + - pyhcl + - requests +''' + +EXAMPLES = """ +- name: create an ACL with rules + consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + name: Foo access + rules: + - key: "foo" + policy: read + - key: "private/foo" + policy: deny + +- name: create an ACL with a specific token + consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + name: Foo access + token: my-token + rules: + - key: "foo" + policy: read + +- name: update the rules associated to an ACL token + consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + name: Foo access + token: some_client_token + rules: + - event: "bbq" + policy: write + - key: "foo" + policy: read + - key: "private" + policy: deny + - keyring: write + - node: "hgs4" + policy: write + - operator: read + - query: "" + policy: write + - service: "consul" + policy: write + - session: "standup" + policy: write + +- name: remove a token + consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e + state: absent +""" + +RETURN = """ +token: + description: the token associated to the ACL (the ACL's ID) + returned: success + type: str + sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da +rules: + description: the HCL JSON representation of the rules associated to the ACL, in the format described in the + Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification). + returned: I(status) == "present" + type: str + sample: { + "key": { + "foo": { + "policy": "write" + }, + "bar": { + "policy": "deny" + } + } + } +operation: + description: the operation performed on the ACL + returned: changed + type: str + sample: update +""" + + +try: + import consul + python_consul_installed = True +except ImportError: + python_consul_installed = False + +try: + import hcl + pyhcl_installed = True +except ImportError: + pyhcl_installed = False + +try: + from requests.exceptions import ConnectionError + has_requests = True +except ImportError: + has_requests = False + +from collections import defaultdict +from ansible.module_utils.basic import to_text, AnsibleModule + + +RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"] + +MANAGEMENT_PARAMETER_NAME = "mgmt_token" +HOST_PARAMETER_NAME = "host" +SCHEME_PARAMETER_NAME = "scheme" +VALIDATE_CERTS_PARAMETER_NAME = "validate_certs" +NAME_PARAMETER_NAME = "name" +PORT_PARAMETER_NAME = "port" +RULES_PARAMETER_NAME = "rules" +STATE_PARAMETER_NAME = "state" +TOKEN_PARAMETER_NAME = "token" +TOKEN_TYPE_PARAMETER_NAME = "token_type" + +PRESENT_STATE_VALUE = "present" +ABSENT_STATE_VALUE = "absent" + +CLIENT_TOKEN_TYPE_VALUE = "client" +MANAGEMENT_TOKEN_TYPE_VALUE = "management" + +REMOVE_OPERATION = "remove" +UPDATE_OPERATION = "update" +CREATE_OPERATION = "create" + +_POLICY_JSON_PROPERTY = "policy" +_RULES_JSON_PROPERTY = "Rules" +_TOKEN_JSON_PROPERTY = "ID" +_TOKEN_TYPE_JSON_PROPERTY = "Type" +_NAME_JSON_PROPERTY = "Name" +_POLICY_YML_PROPERTY = "policy" +_POLICY_HCL_PROPERTY = "policy" + +_ARGUMENT_SPEC = { + MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), + HOST_PARAMETER_NAME: dict(default='localhost'), + SCHEME_PARAMETER_NAME: dict(required=False, default='http'), + VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True), + NAME_PARAMETER_NAME: dict(required=False), + PORT_PARAMETER_NAME: dict(default=8500, type='int'), + RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'), + STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), + TOKEN_PARAMETER_NAME: dict(required=False), + TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], + default=CLIENT_TOKEN_TYPE_VALUE) +} + + +def set_acl(consul_client, configuration): + """ + Sets an ACL based on the given configuration. + :param consul_client: the consul client + :param configuration: the run configuration + :return: the output of setting the ACL + """ + acls_as_json = decode_acls_as_json(consul_client.acl.list()) + existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) + existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) + if None in existing_acls_mapped_by_token: + raise AssertionError("expecting ACL list to be associated to a token: %s" % + existing_acls_mapped_by_token[None]) + + if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name: + # No token but name given so can get token from name + configuration.token = existing_acls_mapped_by_name[configuration.name].token + + if configuration.token and configuration.token in existing_acls_mapped_by_token: + return update_acl(consul_client, configuration) + else: + if configuration.token in existing_acls_mapped_by_token: + raise AssertionError() + if configuration.name in existing_acls_mapped_by_name: + raise AssertionError() + return create_acl(consul_client, configuration) + + +def update_acl(consul_client, configuration): + """ + Updates an ACL. + :param consul_client: the consul client + :param configuration: the run configuration + :return: the output of the update + """ + existing_acl = load_acl_with_token(consul_client, configuration.token) + changed = existing_acl.rules != configuration.rules + + if changed: + name = configuration.name if configuration.name is not None else existing_acl.name + rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) + updated_token = consul_client.acl.update( + configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl) + if updated_token != configuration.token: + raise AssertionError() + + return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION) + + +def create_acl(consul_client, configuration): + """ + Creates an ACL. + :param consul_client: the consul client + :param configuration: the run configuration + :return: the output of the creation + """ + rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None + token = consul_client.acl.create( + name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token) + rules = configuration.rules + return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION) + + +def remove_acl(consul, configuration): + """ + Removes an ACL. + :param consul: the consul client + :param configuration: the run configuration + :return: the output of the removal + """ + token = configuration.token + changed = consul.acl.info(token) is not None + if changed: + consul.acl.destroy(token) + return Output(changed=changed, token=token, operation=REMOVE_OPERATION) + + +def load_acl_with_token(consul, token): + """ + Loads the ACL with the given token (token == rule ID). + :param consul: the consul client + :param token: the ACL "token"/ID (not name) + :return: the ACL associated to the given token + :exception ConsulACLTokenNotFoundException: raised if the given token does not exist + """ + acl_as_json = consul.acl.info(token) + if acl_as_json is None: + raise ConsulACLNotFoundException(token) + return decode_acl_as_json(acl_as_json) + + +def encode_rules_as_hcl_string(rules): + """ + Converts the given rules into the equivalent HCL (string) representation. + :param rules: the rules + :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal + note for justification) + """ + if len(rules) == 0: + # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty + # string if there is no rules... + return None + rules_as_hcl = "" + for rule in rules: + rules_as_hcl += encode_rule_as_hcl_string(rule) + return rules_as_hcl + + +def encode_rule_as_hcl_string(rule): + """ + Converts the given rule into the equivalent HCL (string) representation. + :param rule: the rule + :return: the equivalent HCL (string) representation of the rule + """ + if rule.pattern is not None: + return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy) + else: + return '%s = "%s"\n' % (rule.scope, rule.policy) + + +def decode_rules_as_hcl_string(rules_as_hcl): + """ + Converts the given HCL (string) representation of rules into a list of rule domain models. + :param rules_as_hcl: the HCL (string) representation of a collection of rules + :return: the equivalent domain model to the given rules + """ + rules_as_hcl = to_text(rules_as_hcl) + rules_as_json = hcl.loads(rules_as_hcl) + return decode_rules_as_json(rules_as_json) + + +def decode_rules_as_json(rules_as_json): + """ + Converts the given JSON representation of rules into a list of rule domain models. + :param rules_as_json: the JSON representation of a collection of rules + :return: the equivalent domain model to the given rules + """ + rules = RuleCollection() + for scope in rules_as_json: + if not isinstance(rules_as_json[scope], dict): + rules.add(Rule(scope, rules_as_json[scope])) + else: + for pattern, policy in rules_as_json[scope].items(): + rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern)) + return rules + + +def encode_rules_as_json(rules): + """ + Converts the given rules into the equivalent JSON representation according to the documentation: + https://www.consul.io/docs/guides/acl.html#rule-specification. + :param rules: the rules + :return: JSON representation of the given rules + """ + rules_as_json = defaultdict(dict) + for rule in rules: + if rule.pattern is not None: + if rule.pattern in rules_as_json[rule.scope]: + raise AssertionError() + rules_as_json[rule.scope][rule.pattern] = { + _POLICY_JSON_PROPERTY: rule.policy + } + else: + if rule.scope in rules_as_json: + raise AssertionError() + rules_as_json[rule.scope] = rule.policy + return rules_as_json + + +def decode_rules_as_yml(rules_as_yml): + """ + Converts the given YAML representation of rules into a list of rule domain models. + :param rules_as_yml: the YAML representation of a collection of rules + :return: the equivalent domain model to the given rules + """ + rules = RuleCollection() + if rules_as_yml: + for rule_as_yml in rules_as_yml: + rule_added = False + for scope in RULE_SCOPES: + if scope in rule_as_yml: + if rule_as_yml[scope] is None: + raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope) + policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \ + else rule_as_yml[scope] + pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None + rules.add(Rule(scope, policy, pattern)) + rule_added = True + break + if not rule_added: + raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES))) + return rules + + +def decode_acl_as_json(acl_as_json): + """ + Converts the given JSON representation of an ACL into the equivalent domain model. + :param acl_as_json: the JSON representation of an ACL + :return: the equivalent domain model to the given ACL + """ + rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY] + rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \ + else RuleCollection() + return ACL( + rules=rules, + token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY], + token=acl_as_json[_TOKEN_JSON_PROPERTY], + name=acl_as_json[_NAME_JSON_PROPERTY] + ) + + +def decode_acls_as_json(acls_as_json): + """ + Converts the given JSON representation of ACLs into a list of ACL domain models. + :param acls_as_json: the JSON representation of a collection of ACLs + :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same) + """ + return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json] + + +class ConsulACLNotFoundException(Exception): + """ + Exception raised if an ACL with is not found. + """ + + +class Configuration: + """ + Configuration for this module. + """ + + def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None, + rules=None, state=None, token=None, token_type=None): + self.management_token = management_token # type: str + self.host = host # type: str + self.scheme = scheme # type: str + self.validate_certs = validate_certs # type: bool + self.name = name # type: str + self.port = port # type: int + self.rules = rules # type: RuleCollection + self.state = state # type: str + self.token = token # type: str + self.token_type = token_type # type: str + + +class Output: + """ + Output of an action of this module. + """ + + def __init__(self, changed=None, token=None, rules=None, operation=None): + self.changed = changed # type: bool + self.token = token # type: str + self.rules = rules # type: RuleCollection + self.operation = operation # type: str + + +class ACL: + """ + Consul ACL. See: https://www.consul.io/docs/guides/acl.html. + """ + + def __init__(self, rules, token_type, token, name): + self.rules = rules + self.token_type = token_type + self.token = token + self.name = name + + def __eq__(self, other): + return other \ + and isinstance(other, self.__class__) \ + and self.rules == other.rules \ + and self.token_type == other.token_type \ + and self.token == other.token \ + and self.name == other.name + + def __hash__(self): + return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name) + + +class Rule: + """ + ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope. + """ + + def __init__(self, scope, policy, pattern=None): + self.scope = scope + self.policy = policy + self.pattern = pattern + + def __eq__(self, other): + return isinstance(other, self.__class__) \ + and self.scope == other.scope \ + and self.policy == other.policy \ + and self.pattern == other.pattern + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern) + + def __str__(self): + return encode_rule_as_hcl_string(self) + + +class RuleCollection: + """ + Collection of ACL rules, which are part of a Consul ACL. + """ + + def __init__(self): + self._rules = {} + for scope in RULE_SCOPES: + self._rules[scope] = {} + + def __iter__(self): + all_rules = [] + for scope, pattern_keyed_rules in self._rules.items(): + for pattern, rule in pattern_keyed_rules.items(): + all_rules.append(rule) + return iter(all_rules) + + def __len__(self): + count = 0 + for scope in RULE_SCOPES: + count += len(self._rules[scope]) + return count + + def __eq__(self, other): + return isinstance(other, self.__class__) \ + and set(self) == set(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __str__(self): + return encode_rules_as_hcl_string(self) + + def add(self, rule): + """ + Adds the given rule to this collection. + :param rule: model of a rule + :raises ValueError: raised if there already exists a rule for a given scope and pattern + """ + if rule.pattern in self._rules[rule.scope]: + patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else "" + raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info)) + self._rules[rule.scope][rule.pattern] = rule + + +def get_consul_client(configuration): + """ + Gets a Consul client for the given configuration. + + Does not check if the Consul client can connect. + :param configuration: the run configuration + :return: Consul client + """ + token = configuration.management_token + if token is None: + token = configuration.token + if token is None: + raise AssertionError("Expecting the management token to always be set") + return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme, + verify=configuration.validate_certs, token=token) + + +def check_dependencies(): + """ + Checks that the required dependencies have been imported. + :exception ImportError: if it is detected that any of the required dependencies have not been imported + """ + if not python_consul_installed: + raise ImportError("python-consul required for this module. " + "See: https://python-consul.readthedocs.io/en/latest/#installation") + + if not pyhcl_installed: + raise ImportError("pyhcl required for this module. " + "See: https://pypi.org/project/pyhcl/") + + if not has_requests: + raise ImportError("requests required for this module. See https://pypi.org/project/requests/") + + +def main(): + """ + Main method. + """ + module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False) + + try: + check_dependencies() + except ImportError as e: + module.fail_json(msg=str(e)) + + configuration = Configuration( + management_token=module.params.get(MANAGEMENT_PARAMETER_NAME), + host=module.params.get(HOST_PARAMETER_NAME), + scheme=module.params.get(SCHEME_PARAMETER_NAME), + validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME), + name=module.params.get(NAME_PARAMETER_NAME), + port=module.params.get(PORT_PARAMETER_NAME), + rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)), + state=module.params.get(STATE_PARAMETER_NAME), + token=module.params.get(TOKEN_PARAMETER_NAME), + token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME) + ) + consul_client = get_consul_client(configuration) + + try: + if configuration.state == PRESENT_STATE_VALUE: + output = set_acl(consul_client, configuration) + else: + output = remove_acl(consul_client, configuration) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + configuration.host, configuration.port, str(e))) + raise + + return_values = dict(changed=output.changed, token=output.token, operation=output.operation) + if output.rules is not None: + return_values["rules"] = encode_rules_as_json(output.rules) + module.exit_json(**return_values) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py new file mode 100644 index 0000000000..757d6a5d7e --- /dev/null +++ b/plugins/modules/clustering/consul/consul_kv.py @@ -0,0 +1,333 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# (c) 2018 Genome Research Ltd. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: consul_kv +short_description: Manipulate entries in the key/value store of a consul cluster +description: + - Allows the retrieval, addition, modification and deletion of key/value entries in a + consul cluster via the agent. The entire contents of the record, including + the indices, flags and session are returned as C(value). + - If the C(key) represents a prefix then note that when a value is removed, the existing + value if any is returned as part of the results. + - See http://www.consul.io/docs/agent/http.html#kv for more details. +requirements: + - python-consul + - requests +author: + - Steve Gargan (@sgargan) + - Colin Nolan (@colin-nolan) +options: + state: + description: + - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key + contents will be set to the value supplied and `changed` will be set to `true` only if the value was + different to the current contents. If the state is 'present' and `value` is not set, the existing value + associated to the key will be returned. The state 'absent' will remove the key/value pair, + again 'changed' will be set to true only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the + lock associated with a key/value pair with the states 'acquire' or + 'release' respectively. a valid session must be supplied to make the + attempt changed will be true if the attempt is successful, false + otherwise. + choices: [ absent, acquire, present, release ] + default: present + key: + description: + - The key at which the value should be stored. + type: str + required: yes + value: + description: + - The value should be associated with the given key, required if C(state) + is C(present). + type: str + required: yes + recurse: + description: + - If the key represents a prefix, each entry with the prefix can be + retrieved by setting this to C(yes). + type: bool + default: 'no' + retrieve: + description: + - If the I(state) is C(present) and I(value) is set, perform a + read after setting the value and return this value. + default: True + type: bool + session: + description: + - The session that should be used to acquire or release a lock + associated with a key/value pair. + type: str + token: + description: + - The token key identifying an ACL rule set that controls access to + the key value pair + type: str + cas: + description: + - Used when acquiring a lock with a session. If the C(cas) is C(0), then + Consul will only put the key if it does not already exist. If the + C(cas) value is non-zero, then the key is only set if the index matches + the ModifyIndex of that key. + type: str + flags: + description: + - Opaque positive integer value that can be passed when setting a value. + type: str + host: + description: + - Host of the consul agent. + type: str + default: localhost + port: + description: + - The port on which the consul agent is running. + type: int + default: 8500 + scheme: + description: + - The protocol scheme on which the consul agent is running. + type: str + default: http + validate_certs: + description: + - Whether to verify the tls certificate of the consul agent. + type: bool + default: 'yes' +''' + + +EXAMPLES = ''' +# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None` +# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None` +- name: retrieve a value from the key/value store + consul_kv: + key: somekey + register: retrieved_key + +- name: Add or update the value associated with a key in the key/value store + consul_kv: + key: somekey + value: somevalue + +- name: Remove a key from the store + consul_kv: + key: somekey + state: absent + +- name: Add a node to an arbitrary group via consul inventory (see consul.ini) + consul_kv: + key: ansible/groups/dc1/somenode + value: top_secret + +- name: Register a key/value pair with an associated session + consul_kv: + key: stg/node/server_birthday + value: 20160509 + session: "{{ sessionid }}" + state: acquire +''' + +from ansible.module_utils._text import to_text + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError: + python_consul_installed = False + +from ansible.module_utils.basic import AnsibleModule + +# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a +# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call, +# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key) +NOT_SET = None + + +def _has_value_changed(consul_client, key, target_value): + """ + Uses the given Consul client to determine if the value associated to the given key is different to the given target + value. + :param consul_client: Consul connected client + :param key: key in Consul + :param target_value: value to be associated to the key + :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the + value has changed (i.e. the stored value is not the target value) + """ + index, existing = consul_client.kv.get(key) + if not existing: + return index, True + try: + changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value + return index, changed + except UnicodeError: + # Existing value was not decodable but all values we set are valid utf-8 + return index, True + + +def execute(module): + state = module.params.get('state') + + if state == 'acquire' or state == 'release': + lock(module, state) + elif state == 'present': + if module.params.get('value') is NOT_SET: + get_value(module) + else: + set_value(module) + elif state == 'absent': + remove_value(module) + else: + module.exit_json(msg="Unsupported state: %s" % (state, )) + + +def lock(module, state): + + consul_api = get_consul_api(module) + + session = module.params.get('session') + key = module.params.get('key') + value = module.params.get('value') + + if not session: + module.fail( + msg='%s of lock for %s requested but no session supplied' % + (state, key)) + + index, changed = _has_value_changed(consul_api, key, value) + + if changed and not module.check_mode: + if state == 'acquire': + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) + + module.exit_json(changed=changed, + index=index, + key=key) + + +def get_value(module): + consul_api = get_consul_api(module) + key = module.params.get('key') + + index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse')) + + module.exit_json(changed=False, index=index, data=existing_value) + + +def set_value(module): + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + if value is NOT_SET: + raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key) + + index, changed = _has_value_changed(consul_api, key, value) + + if changed and not module.check_mode: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + flags=module.params.get('flags')) + + stored = None + if module.params.get('retrieve'): + index, stored = consul_api.kv.get(key) + + module.exit_json(changed=changed, + index=index, + key=key, + data=stored) + + +def remove_value(module): + ''' remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed. ''' + consul_api = get_consul_api(module) + + key = module.params.get('key') + + index, existing = consul_api.kv.get( + key, recurse=module.params.get('recurse')) + + changed = existing is not None + if changed and not module.check_mode: + consul_api.kv.delete(key, module.params.get('recurse')) + + module.exit_json(changed=changed, + index=index, + key=key, + data=existing) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), + token=module.params.get('token')) + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. " + "see https://python-consul.readthedocs.io/en/latest/#installation") + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cas=dict(type='str'), + flags=dict(type='str'), + key=dict(type='str', required=True), + host=dict(type='str', default='localhost'), + scheme=dict(type='str', default='http'), + validate_certs=dict(type='bool', default=True), + port=dict(type='int', default=8500), + recurse=dict(type='bool'), + retrieve=dict(type='bool', default=True), + state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']), + token=dict(type='str', no_log=True), + value=dict(type='str', default=NOT_SET), + session=dict(type='str'), + ), + supports_check_mode=True + ) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), e)) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/consul/consul_session.py b/plugins/modules/clustering/consul/consul_session.py new file mode 100644 index 0000000000..7d180c9824 --- /dev/null +++ b/plugins/modules/clustering/consul/consul_session.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: consul_session +short_description: Manipulate consul sessions +description: + - Allows the addition, modification and deletion of sessions in a consul + cluster. These sessions can then be used in conjunction with key value pairs + to implement distributed locks. In depth documentation for working with + sessions can be found at http://www.consul.io/docs/internals/sessions.html +requirements: + - python-consul + - requests +author: +- Steve Gargan (@sgargan) +options: + id: + description: + - ID of the session, required when I(state) is either C(info) or + C(remove). + type: str + state: + description: + - Whether the session should be present i.e. created if it doesn't + exist, or absent, removed if present. If created, the I(id) for the + session is returned in the output. If C(absent), I(id) is + required to remove the session. Info for a single session, all the + sessions for a node or all available sessions can be retrieved by + specifying C(info), C(node) or C(list) for the I(state); for C(node) + or C(info), the node I(name) or session I(id) is required as parameter. + choices: [ absent, info, list, node, present ] + type: str + default: present + name: + description: + - The name that should be associated with the session. Required when + I(state=node) is used. + type: str + delay: + description: + - The optional lock delay that can be attached to the session when it + is created. Locks for invalidated sessions ar blocked from being + acquired until this delay has expired. Durations are in seconds. + type: int + default: 15 + node: + description: + - The name of the node that with which the session will be associated. + by default this is the name of the agent. + type: str + datacenter: + description: + - The name of the datacenter in which the session exists or should be + created. + type: str + checks: + description: + - Checks that will be used to verify the session health. If + all the checks fail, the session will be invalidated and any locks + associated with the session will be release and can be acquired once + the associated lock delay has expired. + type: list + host: + description: + - The host of the consul agent defaults to localhost. + type: str + default: localhost + port: + description: + - The port on which the consul agent is running. + type: int + default: 8500 + scheme: + description: + - The protocol scheme on which the consul agent is running. + type: str + default: http + validate_certs: + description: + - Whether to verify the TLS certificate of the consul agent. + type: bool + default: True + behavior: + description: + - The optional behavior that can be attached to the session when it + is created. This controls the behavior when a session is invalidated. + choices: [ delete, release ] + type: str + default: release +''' + +EXAMPLES = ''' +- name: register basic session with consul + consul_session: + name: session1 + +- name: register a session with an existing check + consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: register a session with lock_delay + consul_session: + name: session_with_delay + delay: 20s + +- name: retrieve info about session by id + consul_session: + id: session_id + state: info + +- name: retrieve active sessions + consul_session: + state: list +''' + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError: + python_consul_installed = False + +from ansible.module_utils.basic import AnsibleModule + + +def execute(module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module) + elif state == 'present': + update_session(module) + else: + remove_session(module) + + +def lookup_sessions(module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + consul_client = get_consul_api(module) + try: + if state == 'list': + sessions_list = consul_client.session.list(dc=datacenter) + # Ditch the index, this can be grabbed from the results + if sessions_list and len(sessions_list) >= 2: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + sessions = consul_client.session.node(node, dc=datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + + session_by_id = consul_client.session.info(session_id, dc=datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception as e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def update_session(module): + + name = module.params.get('name') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + behavior = module.params.get('behavior') + + consul_client = get_consul_api(module) + + try: + session = consul_client.session.create( + name=name, + behavior=behavior, + node=node, + lock_delay=delay, + dc=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + behavior=behavior, + delay=delay, + checks=checks, + node=node) + except Exception as e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def remove_session(module): + session_id = module.params.get('id') + + consul_client = get_consul_api(module) + + try: + consul_client.session.destroy(session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception as e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs')) + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. " + "see https://python-consul.readthedocs.io/en/latest/#installation") + + +def main(): + argument_spec = dict( + checks=dict(type='list'), + delay=dict(type='int', default='15'), + behavior=dict(type='str', default='release', choices=['release', 'delete']), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=8500), + scheme=dict(type='str', default='http'), + validate_certs=dict(type='bool', default=True), + id=dict(type='str'), + name=dict(type='str'), + node=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']), + datacenter=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'node', ['name']), + ('state', 'info', ['id']), + ('state', 'remove', ['id']), + ], + supports_check_mode=False + ) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), e)) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py new file mode 100644 index 0000000000..b3dbf96c69 --- /dev/null +++ b/plugins/modules/clustering/etcd3.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# +# (c) 2018, Jean-Philippe Evrard +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: etcd3 +short_description: "Set or delete key value pairs from an etcd3 cluster" +requirements: + - etcd3 +description: + - Sets or deletes values in etcd3 cluster using its v3 api. + - Needs python etcd3 lib to work +options: + key: + description: + - the key where the information is stored in the cluster + required: true + value: + description: + - the information stored + required: true + host: + description: + - the IP address of the cluster + default: 'localhost' + port: + description: + - the port number used to connect to the cluster + default: 2379 + state: + description: + - the state of the value for the key. + - can be present or absent + required: true + user: + description: + - The etcd user to authenticate with. + password: + description: + - The password to use for authentication. + - Required if I(user) is defined. + ca_cert: + description: + - The Certificate Authority to use to verify the etcd host. + - Required if I(client_cert) and I(client_key) are defined. + client_cert: + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if I(client_key) is defined. + client_key: + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if I(client_cert) is defined. + timeout: + description: + - The socket level timeout in seconds. +author: + - Jean-Philippe Evrard (@evrardjp) + - Victor Fauth (@vfauth) +''' + +EXAMPLES = """ +# Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379" +- etcd3: + key: "foo" + value: "baz3" + host: "localhost" + port: 2379 + state: "present" + +# Authenticate using user/password combination with a timeout of 10 seconds +- etcd3: + key: "foo" + value: "baz3" + state: "present" + user: "someone" + password: "password123" + timeout: 10 + +# Authenticate using TLS certificates +- etcd3: + key: "foo" + value: "baz3" + state: "present" + ca_cert: "/etc/ssl/certs/CA_CERT.pem" + client_cert: "/etc/ssl/certs/cert.crt" + client_key: "/etc/ssl/private/key.pem" +""" + +RETURN = ''' +key: + description: The key that was queried + returned: always + type: str +old_value: + description: The previous value in the cluster + returned: always + type: str +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +try: + import etcd3 + HAS_ETCD = True +except ImportError: + ETCD_IMP_ERR = traceback.format_exc() + HAS_ETCD = False + + +def run_module(): + # define the available arguments/parameters that a user can pass to + # the module + module_args = dict( + key=dict(type='str', required=True), + value=dict(type='str', required=True), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=2379), + state=dict(type='str', required=True, choices=['present', 'absent']), + user=dict(type='str'), + password=dict(type='str', no_log=True), + ca_cert=dict(type='path'), + client_cert=dict(type='path'), + client_key=dict(type='path'), + timeout=dict(type='int'), + ) + + # seed the result dict in the object + # we primarily care about changed and state + # change is if this module effectively modified the target + # state will include any data that you want your module to pass back + # for consumption, for example, in a subsequent task + result = dict( + changed=False, + ) + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_together=[['client_cert', 'client_key'], ['user', 'password']], + ) + + # It is possible to set `ca_cert` to verify the server identity without + # setting `client_cert` or `client_key` to authenticate the client + # so required_together is enough + # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence + # of either `client_cert` or `client_key` is enough + if module.params['ca_cert'] is None and module.params['client_cert'] is not None: + module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.") + + result['key'] = module.params.get('key') + module.params['cert_cert'] = module.params.pop('client_cert') + module.params['cert_key'] = module.params.pop('client_key') + + if not HAS_ETCD: + module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR) + + allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', + 'timeout', 'user', 'password'] + # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is + # the minimum supported version + # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} + client_params = dict() + for key, value in module.params.items(): + if key in allowed_keys: + client_params[key] = value + try: + etcd = etcd3.client(**client_params) + except Exception as exp: + module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)), + exception=traceback.format_exc()) + try: + cluster_value = etcd.get(module.params['key']) + except Exception as exp: + module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)), + exception=traceback.format_exc()) + + # Make the cluster_value[0] a string for string comparisons + result['old_value'] = to_native(cluster_value[0]) + + if module.params['state'] == 'absent': + if cluster_value[0] is not None: + if module.check_mode: + result['changed'] = True + else: + try: + etcd.delete(module.params['key']) + except Exception as exp: + module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)), + exception=traceback.format_exc()) + else: + result['changed'] = True + elif module.params['state'] == 'present': + if result['old_value'] != module.params['value']: + if module.check_mode: + result['changed'] = True + else: + try: + etcd.put(module.params['key'], module.params['value']) + except Exception as exp: + module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)), + exception=traceback.format_exc()) + else: + result['changed'] = True + else: + module.fail_json(msg="State not recognized") + + # manipulate or modify the state as needed (this is going to be the + # part where your module will do what it needs to do) + + # during the execution of the module, if there is an exception or a + # conditional state that effectively causes a failure, run + # AnsibleModule.fail_json() to pass in the message and the result + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/pacemaker_cluster.py b/plugins/modules/clustering/pacemaker_cluster.py new file mode 100644 index 0000000000..f50f6dd527 --- /dev/null +++ b/plugins/modules/clustering/pacemaker_cluster.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Mathieu Bultel +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: pacemaker_cluster +short_description: Manage pacemaker clusters +author: +- Mathieu Bultel (@matbu) +description: + - This module can manage a pacemaker cluster and nodes from Ansible using + the pacemaker cli. +options: + state: + description: + - Indicate desired state of the cluster + choices: [ cleanup, offline, online, restart ] + required: yes + node: + description: + - Specify which node of the cluster you want to manage. None == the + cluster status itself, 'all' == check the status of all nodes. + timeout: + description: + - Timeout when the module should considered that the action has failed + default: 300 + force: + description: + - Force the change of the cluster state + type: bool + default: 'yes' +''' +EXAMPLES = ''' +--- +- name: Set cluster Online + hosts: localhost + gather_facts: no + tasks: + - name: Get cluster state + pacemaker_cluster: + state: online +''' + +RETURN = ''' +changed: + description: True if the cluster state has changed + type: bool + returned: always +out: + description: The output of the current state of the cluster. It return a + list of the nodes state. + type: str + sample: 'out: [[" overcloud-controller-0", " Online"]]}' + returned: always +rc: + description: exit code of the module + type: bool + returned: always +''' + +import time + +from ansible.module_utils.basic import AnsibleModule + + +_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node" + + +def get_cluster_status(module): + cmd = "pcs cluster status" + rc, out, err = module.run_command(cmd) + if out in _PCS_CLUSTER_DOWN: + return 'offline' + else: + return 'online' + + +def get_node_status(module, node='all'): + if node == 'all': + cmd = "pcs cluster pcsd-status %s" % node + else: + cmd = "pcs cluster pcsd-status" + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + status = [] + for o in out.splitlines(): + status.append(o.split(':')) + return status + + +def clean_cluster(module, timeout): + cmd = "pcs resource cleanup" + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + + +def set_cluster(module, state, timeout, force): + if state == 'online': + cmd = "pcs cluster start" + if state == 'offline': + cmd = "pcs cluster stop" + if force: + cmd = "%s --force" % cmd + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + + t = time.time() + ready = False + while time.time() < t + timeout: + cluster_state = get_cluster_status(module) + if cluster_state == state: + ready = True + break + if not ready: + module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) + + +def set_node(module, state, timeout, force, node='all'): + # map states + if state == 'online': + cmd = "pcs cluster start" + if state == 'offline': + cmd = "pcs cluster stop" + if force: + cmd = "%s --force" % cmd + + nodes_state = get_node_status(module, node) + for node in nodes_state: + if node[1].strip().lower() != state: + cmd = "%s %s" % (cmd, node[0].strip()) + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + + t = time.time() + ready = False + while time.time() < t + timeout: + nodes_state = get_node_status(module) + for node in nodes_state: + if node[1].strip().lower() == state: + ready = True + break + if not ready: + module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) + + +def main(): + argument_spec = dict( + state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']), + node=dict(type='str'), + timeout=dict(type='int', default=300), + force=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + ) + changed = False + state = module.params['state'] + node = module.params['node'] + force = module.params['force'] + timeout = module.params['timeout'] + + if state in ['online', 'offline']: + # Get cluster status + if node is None: + cluster_state = get_cluster_status(module) + if cluster_state == state: + module.exit_json(changed=changed, out=cluster_state) + else: + set_cluster(module, state, timeout, force) + cluster_state = get_cluster_status(module) + if cluster_state == state: + module.exit_json(changed=True, out=cluster_state) + else: + module.fail_json(msg="Fail to bring the cluster %s" % state) + else: + cluster_state = get_node_status(module, node) + # Check cluster state + for node_state in cluster_state: + if node_state[1].strip().lower() == state: + module.exit_json(changed=changed, out=cluster_state) + else: + # Set cluster status if needed + set_cluster(module, state, timeout, force) + cluster_state = get_node_status(module, node) + module.exit_json(changed=True, out=cluster_state) + + if state in ['restart']: + set_cluster(module, 'offline', timeout, force) + cluster_state = get_cluster_status(module) + if cluster_state == 'offline': + set_cluster(module, 'online', timeout, force) + cluster_state = get_cluster_status(module) + if cluster_state == 'online': + module.exit_json(changed=True, out=cluster_state) + else: + module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started") + else: + module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped") + + if state in ['cleanup']: + clean_cluster(module, timeout) + cluster_state = get_cluster_status(module) + module.exit_json(changed=True, + out=cluster_state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/znode.py b/plugins/modules/clustering/znode.py new file mode 100644 index 0000000000..f43ee169f0 --- /dev/null +++ b/plugins/modules/clustering/znode.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# Copyright 2015 WP Engine, Inc. All rights reserved. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: znode +short_description: Create, delete, retrieve, and update znodes using ZooKeeper +description: + - Create, delete, retrieve, and update znodes using ZooKeeper. +options: + hosts: + description: + - A list of ZooKeeper servers (format '[server]:[port]'). + required: true + name: + description: + - The path of the znode. + required: true + value: + description: + - The value assigned to the znode. + op: + description: + - An operation to perform. Mutually exclusive with state. + state: + description: + - The state to enforce. Mutually exclusive with op. + timeout: + description: + - The amount of time to wait for a node to appear. + default: 300 + recursive: + description: + - Recursively delete node and all its children. + type: bool + default: 'no' +requirements: + - kazoo >= 2.1 + - python >= 2.6 +author: "Trey Perry (@treyperry)" +''' + +EXAMPLES = """ +# Creating or updating a znode with a given value +- znode: + hosts: 'localhost:2181' + name: /mypath + value: myvalue + state: present + +# Getting the value and stat structure for a znode +- znode: + hosts: 'localhost:2181' + name: /mypath + op: get + +# Listing a particular znode's children +- znode: + hosts: 'localhost:2181' + name: /zookeeper + op: list + +# Waiting 20 seconds for a znode to appear at path /mypath +- znode: + hosts: 'localhost:2181' + name: /mypath + op: wait + timeout: 20 + +# Deleting a znode at path /mypath +- znode: + hosts: 'localhost:2181' + name: /mypath + state: absent + +# Creating or updating a znode with a given value on a remote Zookeeper +- znode: + hosts: 'my-zookeeper-node:2181' + name: /mypath + value: myvalue + state: present + delegate_to: 127.0.0.1 +""" + +import time +import traceback + +KAZOO_IMP_ERR = None +try: + from kazoo.client import KazooClient + from kazoo.handlers.threading import KazooTimeoutError + KAZOO_INSTALLED = True +except ImportError: + KAZOO_IMP_ERR = traceback.format_exc() + KAZOO_INSTALLED = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_bytes + + +def main(): + module = AnsibleModule( + argument_spec=dict( + hosts=dict(required=True, type='str'), + name=dict(required=True, type='str'), + value=dict(required=False, default=None, type='str'), + op=dict(required=False, default=None, choices=['get', 'wait', 'list']), + state=dict(choices=['present', 'absent']), + timeout=dict(required=False, default=300, type='int'), + recursive=dict(required=False, default=False, type='bool') + ), + supports_check_mode=False + ) + + if not KAZOO_INSTALLED: + module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR) + + check = check_params(module.params) + if not check['success']: + module.fail_json(msg=check['msg']) + + zoo = KazooCommandProxy(module) + try: + zoo.start() + except KazooTimeoutError: + module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.') + + command_dict = { + 'op': { + 'get': zoo.get, + 'list': zoo.list, + 'wait': zoo.wait + }, + 'state': { + 'present': zoo.present, + 'absent': zoo.absent + } + } + + command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state' + method = module.params[command_type] + result, result_dict = command_dict[command_type][method]() + zoo.shutdown() + + if result: + module.exit_json(**result_dict) + else: + module.fail_json(**result_dict) + + +def check_params(params): + if not params['state'] and not params['op']: + return {'success': False, 'msg': 'Please define an operation (op) or a state.'} + + if params['state'] and params['op']: + return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'} + + return {'success': True} + + +class KazooCommandProxy(): + def __init__(self, module): + self.module = module + self.zk = KazooClient(module.params['hosts']) + + def absent(self): + return self._absent(self.module.params['name']) + + def exists(self, znode): + return self.zk.exists(znode) + + def list(self): + children = self.zk.get_children(self.module.params['name']) + return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.', + 'znode': self.module.params['name']} + + def present(self): + return self._present(self.module.params['name'], self.module.params['value']) + + def get(self): + return self._get(self.module.params['name']) + + def shutdown(self): + self.zk.stop() + self.zk.close() + + def start(self): + self.zk.start() + + def wait(self): + return self._wait(self.module.params['name'], self.module.params['timeout']) + + def _absent(self, znode): + if self.exists(znode): + self.zk.delete(znode, recursive=self.module.params['recursive']) + return True, {'changed': True, 'msg': 'The znode was deleted.'} + else: + return True, {'changed': False, 'msg': 'The znode does not exist.'} + + def _get(self, path): + if self.exists(path): + value, zstat = self.zk.get(path) + stat_dict = {} + for i in dir(zstat): + if not i.startswith('_'): + attr = getattr(zstat, i) + if isinstance(attr, (int, str)): + stat_dict[i] = attr + result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value, + 'stat': stat_dict} + else: + result = False, {'msg': 'The requested node does not exist.'} + + return result + + def _present(self, path, value): + if self.exists(path): + (current_value, zstat) = self.zk.get(path) + if value != current_value: + self.zk.set(path, to_bytes(value)) + return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path, + 'value': value} + else: + return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value} + else: + self.zk.create(path, to_bytes(value), makepath=True) + return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value} + + def _wait(self, path, timeout, interval=5): + lim = time.time() + timeout + + while time.time() < lim: + if self.exists(path): + return True, {'msg': 'The node appeared before the configured timeout.', + 'znode': path, 'timeout': timeout} + else: + time.sleep(interval) + + return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout, + 'znode': path} + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/database/aerospike/aerospike_migrations.py new file mode 100644 index 0000000000..21e628fc9d --- /dev/null +++ b/plugins/modules/database/aerospike/aerospike_migrations.py @@ -0,0 +1,522 @@ +#!/usr/bin/python +"""short_description: Check or wait for migrations between nodes""" + +# Copyright: (c) 2018, Albert Autin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: aerospike_migrations +short_description: Check or wait for migrations between nodes +description: + - This can be used to check for migrations in a cluster. + This makes it easy to do a rolling upgrade/update on Aerospike nodes. + - If waiting for migrations is not desired, simply just poll until + port 3000 if available or asinfo -v status returns ok +author: "Albert Autin (@Alb0t)" +options: + host: + description: + - Which host do we use as seed for info connection + required: False + type: str + default: localhost + port: + description: + - Which port to connect to Aerospike on (service port) + required: False + type: int + default: 3000 + connect_timeout: + description: + - How long to try to connect before giving up (milliseconds) + required: False + type: int + default: 1000 + consecutive_good_checks: + description: + - How many times should the cluster report "no migrations" + consecutively before returning OK back to ansible? + required: False + type: int + default: 3 + sleep_between_checks: + description: + - How long to sleep between each check (seconds). + required: False + type: int + default: 60 + tries_limit: + description: + - How many times do we poll before giving up and failing? + default: 300 + required: False + type: int + local_only: + description: + - Do you wish to only check for migrations on the local node + before returning, or do you want all nodes in the cluster + to finish before returning? + required: True + type: bool + min_cluster_size: + description: + - Check will return bad until cluster size is met + or until tries is exhausted + required: False + type: int + default: 1 + fail_on_cluster_change: + description: + - Fail if the cluster key changes + if something else is changing the cluster, we may want to fail + required: False + type: bool + default: True + migrate_tx_key: + description: + - The metric key used to determine if we have tx migrations + remaining. Changeable due to backwards compatibility. + required: False + type: str + default: migrate_tx_partitions_remaining + migrate_rx_key: + description: + - The metric key used to determine if we have rx migrations + remaining. Changeable due to backwards compatibility. + required: False + type: str + default: migrate_rx_partitions_remaining + target_cluster_size: + description: + - When all aerospike builds in the cluster are greater than + version 4.3, then the C(cluster-stable) info command will be used. + Inside this command, you can optionally specify what the target + cluster size is - but it is not necessary. You can still rely on + min_cluster_size if you don't want to use this option. + - If this option is specified on a cluster that has at least 1 + host <4.3 then it will be ignored until the min version reaches + 4.3. + required: False + type: int +''' +EXAMPLES = ''' +# check for migrations on local node +- name: wait for migrations on local node before proceeding + aerospike_migrations: + host: "localhost" + connect_timeout: 2000 + consecutive_good_checks: 5 + sleep_between_checks: 15 + tries_limit: 600 + local_only: False + +# example playbook: +--- +- name: upgrade aerospike + hosts: all + become: true + serial: 1 + tasks: + - name: Install dependencies + apt: + name: + - python + - python-pip + - python-setuptools + state: latest + - name: setup aerospike + pip: + name: aerospike +# check for migrations every (sleep_between_checks) +# If at least (consecutive_good_checks) checks come back OK in a row, then return OK. +# Will exit if any exception, which can be caused by bad nodes, +# nodes not returning data, or other reasons. +# Maximum runtime before giving up in this case will be: +# Tries Limit * Sleep Between Checks * delay * retries + - name: wait for aerospike migrations + aerospike_migrations: + local_only: True + sleep_between_checks: 1 + tries_limit: 5 + consecutive_good_checks: 3 + fail_on_cluster_change: true + min_cluster_size: 3 + target_cluster_size: 4 + register: migrations_check + until: migrations_check is succeeded + changed_when: false + delay: 60 + retries: 120 + - name: another thing + shell: | + echo foo + - name: reboot + reboot: +''' + +RETURN = ''' +# Returns only a success/failure result. Changed is always false. +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +LIB_FOUND_ERR = None +try: + import aerospike + from time import sleep + import re +except ImportError as ie: + LIB_FOUND = False + LIB_FOUND_ERR = traceback.format_exc() +else: + LIB_FOUND = True + + +def run_module(): + """run ansible module""" + module_args = dict( + host=dict(type='str', required=False, default='localhost'), + port=dict(type='int', required=False, default=3000), + connect_timeout=dict(type='int', required=False, default=1000), + consecutive_good_checks=dict(type='int', required=False, default=3), + sleep_between_checks=dict(type='int', required=False, default=60), + tries_limit=dict(type='int', required=False, default=300), + local_only=dict(type='bool', required=True), + min_cluster_size=dict(type='int', required=False, default=1), + target_cluster_size=dict(type='int', required=False, default=None), + fail_on_cluster_change=dict(type='bool', required=False, default=True), + migrate_tx_key=dict(type='str', required=False, + default="migrate_tx_partitions_remaining"), + migrate_rx_key=dict(type='str', required=False, + default="migrate_rx_partitions_remaining") + ) + + result = dict( + changed=False, + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + if not LIB_FOUND: + module.fail_json(msg=missing_required_lib('aerospike'), + exception=LIB_FOUND_ERR) + + try: + if module.check_mode: + has_migrations, skip_reason = False, None + else: + migrations = Migrations(module) + has_migrations, skip_reason = migrations.has_migs( + module.params['local_only'] + ) + + if has_migrations: + module.fail_json(msg="Failed.", skip_reason=skip_reason) + except Exception as e: + module.fail_json(msg="Error: {0}".format(e)) + + module.exit_json(**result) + + +class Migrations: + """ Check or wait for migrations between nodes """ + + def __init__(self, module): + self.module = module + self._client = self._create_client().connect() + self._nodes = {} + self._update_nodes_list() + self._cluster_statistics = {} + self._update_cluster_statistics() + self._namespaces = set() + self._update_cluster_namespace_list() + self._build_list = set() + self._update_build_list() + self._start_cluster_key = \ + self._cluster_statistics[self._nodes[0]]['cluster_key'] + + def _create_client(self): + """ TODO: add support for auth, tls, and other special features + I won't use those features, so I'll wait until somebody complains + or does it for me (Cross fingers) + create the client object""" + config = { + 'hosts': [ + (self.module.params['host'], self.module.params['port']) + ], + 'policies': { + 'timeout': self.module.params['connect_timeout'] + } + } + return aerospike.client(config) + + def _info_cmd_helper(self, cmd, node=None, delimiter=';'): + """delimiter is for separate stats that come back, NOT for kv + separation which is =""" + if node is None: # If no node passed, use the first one (local) + node = self._nodes[0] + data = self._client.info_node(cmd, node) + data = data.split("\t") + if len(data) != 1 and len(data) != 2: + self.module.fail_json( + msg="Unexpected number of values returned in info command: " + + str(len(data)) + ) + # data will be in format 'command\touput' + data = data[-1] + data = data.rstrip("\n\r") + data_arr = data.split(delimiter) + + # some commands don't return in kv format + # so we dont want a dict from those. + if '=' in data: + retval = dict( + metric.split("=", 1) for metric in data_arr + ) + else: + # if only 1 element found, and not kv, return just the value. + if len(data_arr) == 1: + retval = data_arr[0] + else: + retval = data_arr + return retval + + def _update_build_list(self): + """creates self._build_list which is a unique list + of build versions.""" + self._build_list = set() + for node in self._nodes: + build = self._info_cmd_helper('build', node) + self._build_list.add(build) + + # just checks to see if the version is 4.3 or greater + def _can_use_cluster_stable(self): + # if version <4.3 we can't use cluster-stable info cmd + # regex hack to check for versions beginning with 0-3 or + # beginning with 4.0,4.1,4.2 + if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)): + return False + return True + + def _update_cluster_namespace_list(self): + """ make a unique list of namespaces + TODO: does this work on a rolling namespace add/deletion? + thankfully if it doesn't, we dont need this on builds >=4.3""" + self._namespaces = set() + for node in self._nodes: + namespaces = self._info_cmd_helper('namespaces', node) + for namespace in namespaces: + self._namespaces.add(namespace) + + def _update_cluster_statistics(self): + """create a dict of nodes with their related stats """ + self._cluster_statistics = {} + for node in self._nodes: + self._cluster_statistics[node] = \ + self._info_cmd_helper('statistics', node) + + def _update_nodes_list(self): + """get a fresh list of all the nodes""" + self._nodes = self._client.get_nodes() + if not self._nodes: + self.module.fail_json("Failed to retrieve at least 1 node.") + + def _namespace_has_migs(self, namespace, node=None): + """returns a True or False. + Does the namespace have migrations for the node passed? + If no node passed, uses the local node or the first one in the list""" + namespace_stats = self._info_cmd_helper("namespace/" + namespace, node) + try: + namespace_tx = \ + int(namespace_stats[self.module.params['migrate_tx_key']]) + namespace_rx = \ + int(namespace_stats[self.module.params['migrate_tx_key']]) + except KeyError: + self.module.fail_json( + msg="Did not find partition remaining key:" + + self.module.params['migrate_tx_key'] + + " or key:" + + self.module.params['migrate_rx_key'] + + " in 'namespace/" + + namespace + + "' output." + ) + except TypeError: + self.module.fail_json( + msg="namespace stat returned was not numerical" + ) + return namespace_tx != 0 or namespace_rx != 0 + + def _node_has_migs(self, node=None): + """just calls namespace_has_migs and + if any namespace has migs returns true""" + migs = 0 + self._update_cluster_namespace_list() + for namespace in self._namespaces: + if self._namespace_has_migs(namespace, node): + migs += 1 + return migs != 0 + + def _cluster_key_consistent(self): + """create a dictionary to store what each node + returns the cluster key as. we should end up with only 1 dict key, + with the key being the cluster key.""" + cluster_keys = {} + for node in self._nodes: + cluster_key = self._cluster_statistics[node][ + 'cluster_key'] + if cluster_key not in cluster_keys: + cluster_keys[cluster_key] = 1 + else: + cluster_keys[cluster_key] += 1 + if len(cluster_keys.keys()) == 1 and \ + self._start_cluster_key in cluster_keys: + return True + return False + + def _cluster_migrates_allowed(self): + """ensure all nodes have 'migrate_allowed' in their stats output""" + for node in self._nodes: + node_stats = self._info_cmd_helper('statistics', node) + allowed = node_stats['migrate_allowed'] + if allowed == "false": + return False + return True + + def _cluster_has_migs(self): + """calls node_has_migs for each node""" + migs = 0 + for node in self._nodes: + if self._node_has_migs(node): + migs += 1 + if migs == 0: + return False + return True + + def _has_migs(self, local): + if local: + return self._local_node_has_migs() + return self._cluster_has_migs() + + def _local_node_has_migs(self): + return self._node_has_migs(None) + + def _is_min_cluster_size(self): + """checks that all nodes in the cluster are returning the + minimum cluster size specified in their statistics output""" + sizes = set() + for node in self._cluster_statistics: + sizes.add(int(self._cluster_statistics[node]['cluster_size'])) + + if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no + return False + if (min(sizes)) >= self.module.params['min_cluster_size']: + return True + return False + + def _cluster_stable(self): + """Added 4.3: + cluster-stable:size=;ignore-migrations=;namespace= + Returns the current 'cluster_key' when the following are satisfied: + + If 'size' is specified then the target node's 'cluster-size' + must match size. + If 'ignore-migrations' is either unspecified or 'false' then + the target node's migrations counts must be zero for the provided + 'namespace' or all namespaces if 'namespace' is not provided.""" + cluster_key = set() + cluster_key.add(self._info_cmd_helper('statistics')['cluster_key']) + cmd = "cluster-stable:" + target_cluster_size = self.module.params['target_cluster_size'] + if target_cluster_size is not None: + cmd = cmd + "size=" + str(target_cluster_size) + ";" + for node in self._nodes: + cluster_key.add(self._info_cmd_helper(cmd, node)) + if len(cluster_key) == 1: + return True + return False + + def _cluster_good_state(self): + """checks a few things to make sure we're OK to say the cluster + has no migs. It could be in a unhealthy condition that does not allow + migs, or a split brain""" + if self._cluster_key_consistent() is not True: + return False, "Cluster key inconsistent." + if self._is_min_cluster_size() is not True: + return False, "Cluster min size not reached." + if self._cluster_migrates_allowed() is not True: + return False, "migrate_allowed is false somewhere." + return True, "OK." + + def has_migs(self, local=True): + """returns a boolean, False if no migrations otherwise True""" + consecutive_good = 0 + try_num = 0 + skip_reason = list() + while \ + try_num < int(self.module.params['tries_limit']) and \ + consecutive_good < \ + int(self.module.params['consecutive_good_checks']): + + self._update_nodes_list() + self._update_cluster_statistics() + + # These checks are outside of the while loop because + # we probably want to skip & sleep instead of failing entirely + stable, reason = self._cluster_good_state() + if stable is not True: + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + reason + ) + else: + if self._can_use_cluster_stable(): + if self._cluster_stable(): + consecutive_good += 1 + else: + consecutive_good = 0 + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + " cluster_stable" + ) + elif self._has_migs(local): + # print("_has_migs") + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + " migrations" + ) + consecutive_good = 0 + else: + consecutive_good += 1 + if consecutive_good == self.module.params[ + 'consecutive_good_checks']: + break + try_num += 1 + sleep(self.module.params['sleep_between_checks']) + # print(skip_reason) + if consecutive_good == self.module.params['consecutive_good_checks']: + return False, None + return True, skip_reason + + +def main(): + """main method for ansible module""" + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/influxdb/influxdb_database.py b/plugins/modules/database/influxdb/influxdb_database.py new file mode 100644 index 0000000000..8cfdff335a --- /dev/null +++ b/plugins/modules/database/influxdb/influxdb_database.py @@ -0,0 +1,146 @@ +#!/usr/bin/python + +# Copyright: (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: influxdb_database +short_description: Manage InfluxDB databases +description: + - Manage InfluxDB databases. +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9 & <= 1.2.4" + - requests +options: + database_name: + description: + - Name of the database. + required: true + type: str + state: + description: + - Determines if the database should be created or destroyed. + choices: [ absent, present ] + default: present + type: str +extends_documentation_fragment: +- community.general.influxdb + +''' + +EXAMPLES = r''' +# Example influxdb_database command from Ansible Playbooks +- name: Create database + influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + +- name: Destroy database + influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: absent + +- name: Create database using custom credentials + influxdb_database: + hostname: "{{influxdb_ip_address}}" + username: "{{influxdb_username}}" + password: "{{influxdb_password}}" + database_name: "{{influxdb_database_name}}" + ssl: yes + validate_certs: yes +''' + +RETURN = r''' +# only defaults +''' + +try: + import requests.exceptions + from influxdb import exceptions +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +def find_database(module, client, database_name): + database = None + + try: + databases = client.get_list_database() + for db in databases: + if db['name'] == database_name: + database = db + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + return database + + +def create_database(module, client, database_name): + if not module.check_mode: + try: + client.create_database(database_name) + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=True) + + +def drop_database(module, client, database_name): + if not module.check_mode: + try: + client.drop_database(database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + module.exit_json(changed=True) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + database_name=dict(required=True, type='str'), + state=dict(default='present', type='str', choices=['present', 'absent']) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params['state'] + + influxdb = InfluxDb(module) + client = influxdb.connect_to_influxdb() + database_name = influxdb.database_name + database = find_database(module, client, database_name) + + if state == 'present': + if database: + module.exit_json(changed=False) + else: + create_database(module, client, database_name) + + if state == 'absent': + if database: + drop_database(module, client, database_name) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/influxdb/influxdb_query.py b/plugins/modules/database/influxdb/influxdb_query.py new file mode 100644 index 0000000000..b0f883da3b --- /dev/null +++ b/plugins/modules/database/influxdb/influxdb_query.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: influxdb_query +short_description: Query data points from InfluxDB +description: + - Query data points from InfluxDB. +author: "René Moser (@resmo)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +options: + query: + description: + - Query to be executed. + required: true + type: str + database_name: + description: + - Name of the database. + required: true + type: str +extends_documentation_fragment: +- community.general.influxdb + +''' + +EXAMPLES = r''' +- name: Query connections + influxdb_query: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + query: "select mean(value) from connections" + register: connection + +- name: Query connections with tags filters + influxdb_query: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + query: "select mean(value) from connections where region='zue01' and host='server01'" + register: connection + +- name: Print results from the query + debug: + var: connection.query_results +''' + +RETURN = r''' +query_results: + description: Result from the query + returned: success + type: list + sample: + - mean: 1245.5333333333333 + time: "1970-01-01T00:00:00Z" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +class AnsibleInfluxDBRead(InfluxDb): + + def read_by_query(self, query): + client = self.connect_to_influxdb() + try: + rs = client.query(query) + if rs: + return list(rs.get_points()) + except Exception as e: + self.module.fail_json(msg=to_native(e)) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + query=dict(type='str', required=True), + database_name=dict(required=True, type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + influx = AnsibleInfluxDBRead(module) + query = module.params.get('query') + results = influx.read_by_query(query) + module.exit_json(changed=True, query_results=results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py new file mode 100644 index 0000000000..2e4008806a --- /dev/null +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# Copyright: (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: influxdb_retention_policy +short_description: Manage InfluxDB retention policies +description: + - Manage InfluxDB retention policies. +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" + - requests +options: + database_name: + description: + - Name of the database. + required: true + type: str + policy_name: + description: + - Name of the retention policy. + required: true + type: str + duration: + description: + - Determines how long InfluxDB should keep the data. + required: true + type: str + replication: + description: + - Determines how many independent copies of each point are stored in the cluster. + required: true + type: int + default: + description: + - Sets the retention policy as default retention policy. + type: bool +extends_documentation_fragment: +- community.general.influxdb + +''' + +EXAMPLES = r''' +# Example influxdb_retention_policy command from Ansible Playbooks +- name: create 1 hour retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: 1h + replication: 1 + ssl: yes + validate_certs: yes + +- name: create 1 day retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: 1d + replication: 1 + +- name: create 1 week retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: 1w + replication: 1 + +- name: create infinite retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: INF + replication: 1 + ssl: no + validate_certs: no +''' + +RETURN = r''' +# only defaults +''' + +import re + +try: + import requests.exceptions + from influxdb import exceptions +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb +from ansible.module_utils._text import to_native + + +def find_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + hostname = module.params['hostname'] + retention_policy = None + + try: + retention_policies = client.get_list_retention_policies(database=database_name) + for policy in retention_policies: + if policy['name'] == policy_name: + retention_policy = policy + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e))) + return retention_policy + + +def create_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + + if not module.check_mode: + try: + client.create_retention_policy(policy_name, duration, replication, database_name, default) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + +def alter_retention_policy(module, client, retention_policy): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}') + changed = False + + duration_lookup = duration_regexp.search(duration) + + if duration_lookup.group(2) == 'h': + influxdb_duration_format = '%s0m0s' % duration + elif duration_lookup.group(2) == 'd': + influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24) + elif duration_lookup.group(2) == 'w': + influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7) + elif duration == 'INF': + influxdb_duration_format = '0' + + if (not retention_policy['duration'] == influxdb_duration_format or + not retention_policy['replicaN'] == int(replication) or + not retention_policy['default'] == default): + if not module.check_mode: + try: + client.alter_retention_policy(policy_name, database_name, duration, replication, default) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + changed = True + module.exit_json(changed=changed) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + database_name=dict(required=True, type='str'), + policy_name=dict(required=True, type='str'), + duration=dict(required=True, type='str'), + replication=dict(required=True, type='int'), + default=dict(default=False, type='bool') + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + influxdb = InfluxDb(module) + client = influxdb.connect_to_influxdb() + + retention_policy = find_retention_policy(module, client) + + if retention_policy: + alter_retention_policy(module, client, retention_policy) + else: + create_retention_policy(module, client) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py new file mode 100644 index 0000000000..075a0debd1 --- /dev/null +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -0,0 +1,265 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Vitaliy Zhhuta +# insipred by Kamil Szczygiel influxdb_database module +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: influxdb_user +short_description: Manage InfluxDB users +description: + - Manage InfluxDB users. +author: "Vitaliy Zhhuta (@zhhuta)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +options: + user_name: + description: + - Name of the user. + required: True + type: str + user_password: + description: + - Password to be set for the user. + required: false + type: str + admin: + description: + - Whether the user should be in the admin role or not. + - Since version 2.8, the role will also be updated. + default: no + type: bool + state: + description: + - State of the user. + choices: [ absent, present ] + default: present + type: str + grants: + description: + - Privileges to grant to this user. + - Takes a list of dicts containing the "database" and "privilege" keys. + - If this argument is not provided, the current grants will be left alone. + - If an empty list is provided, all grants for the user will be removed. + type: list + elements: dict +extends_documentation_fragment: +- community.general.influxdb + +''' + +EXAMPLES = r''' +- name: Create a user on localhost using default login credentials + influxdb_user: + user_name: john + user_password: s3cr3t + +- name: Create a user on localhost using custom login credentials + influxdb_user: + user_name: john + user_password: s3cr3t + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + +- name: Create an admin user on a remote host using custom login credentials + influxdb_user: + user_name: john + user_password: s3cr3t + admin: yes + hostname: "{{ influxdb_hostname }}" + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + +- name: Create a user on localhost with privileges + influxdb_user: + user_name: john + user_password: s3cr3t + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + grants: + - database: 'collectd' + privilege: 'WRITE' + - database: 'graphite' + privilege: 'READ' + +- name: Destroy a user using custom login credentials + influxdb_user: + user_name: john + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + state: absent +''' + +RETURN = r''' +#only defaults +''' + +from ansible.module_utils.urls import ConnectionError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.community.general.plugins.module_utils.influxdb as influx + + +def find_user(module, client, user_name): + user_result = None + + try: + users = client.get_list_users() + for user in users: + if user['user'] == user_name: + user_result = user + break + except (ConnectionError, influx.exceptions.InfluxDBClientError) as e: + module.fail_json(msg=to_native(e)) + return user_result + + +def check_user_password(module, client, user_name, user_password): + try: + client.switch_user(user_name, user_password) + client.get_list_users() + except influx.exceptions.InfluxDBClientError as e: + if e.code == 401: + return False + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + finally: + # restore previous user + client.switch_user(module.params['username'], module.params['password']) + return True + + +def set_user_password(module, client, user_name, user_password): + if not module.check_mode: + try: + client.set_user_password(user_name, user_password) + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + + +def create_user(module, client, user_name, user_password, admin): + if not module.check_mode: + try: + client.create_user(user_name, user_password, admin) + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + + +def drop_user(module, client, user_name): + if not module.check_mode: + try: + client.drop_user(user_name) + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + module.exit_json(changed=True) + + +def set_user_grants(module, client, user_name, grants): + changed = False + + try: + current_grants = client.get_list_privileges(user_name) + # Fix privileges wording + for i, v in enumerate(current_grants): + if v['privilege'] == 'ALL PRIVILEGES': + v['privilege'] = 'ALL' + current_grants[i] = v + elif v['privilege'] == 'NO PRIVILEGES': + del(current_grants[i]) + + # check if the current grants are included in the desired ones + for current_grant in current_grants: + if current_grant not in grants: + if not module.check_mode: + client.revoke_privilege(current_grant['privilege'], + current_grant['database'], + user_name) + changed = True + + # check if the desired grants are included in the current ones + for grant in grants: + if grant not in current_grants: + if not module.check_mode: + client.grant_privilege(grant['privilege'], + grant['database'], + user_name) + changed = True + + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + return changed + + +def main(): + argument_spec = influx.InfluxDb.influxdb_argument_spec() + argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']), + user_name=dict(required=True, type='str'), + user_password=dict(required=False, type='str', no_log=True), + admin=dict(default='False', type='bool'), + grants=dict(type='list', elements='dict'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params['state'] + user_name = module.params['user_name'] + user_password = module.params['user_password'] + admin = module.params['admin'] + grants = module.params['grants'] + influxdb = influx.InfluxDb(module) + client = influxdb.connect_to_influxdb() + user = find_user(module, client, user_name) + + changed = False + + if state == 'present': + if user: + if not check_user_password(module, client, user_name, user_password) and user_password is not None: + set_user_password(module, client, user_name, user_password) + changed = True + + try: + if admin and not user['admin']: + client.grant_admin_privileges(user_name) + changed = True + elif not admin and user['admin']: + client.revoke_admin_privileges(user_name) + changed = True + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=to_native(e)) + + else: + user_password = user_password or '' + create_user(module, client, user_name, user_password, admin) + changed = True + + if grants is not None: + if set_user_grants(module, client, user_name, grants): + changed = True + + module.exit_json(changed=changed) + + if state == 'absent': + if user: + drop_user(module, client, user_name) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/influxdb/influxdb_write.py b/plugins/modules/database/influxdb/influxdb_write.py new file mode 100644 index 0000000000..3187a6f971 --- /dev/null +++ b/plugins/modules/database/influxdb/influxdb_write.py @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: influxdb_write +short_description: Write data points into InfluxDB +description: + - Write data points into InfluxDB. +author: "René Moser (@resmo)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +options: + data_points: + description: + - Data points as dict to write into the database. + required: true + type: list + elements: dict + database_name: + description: + - Name of the database. + required: true + type: str +extends_documentation_fragment: +- community.general.influxdb + +''' + +EXAMPLES = r''' +- name: Write points into database + influxdb_write: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + data_points: + - measurement: connections + tags: + host: server01 + region: us-west + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 2000 + - measurement: connections + tags: + host: server02 + region: us-east + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 3000 +''' + +RETURN = r''' +# only defaults +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +class AnsibleInfluxDBWrite(InfluxDb): + + def write_data_point(self, data_points): + client = self.connect_to_influxdb() + + try: + client.write_points(data_points) + except Exception as e: + self.module.fail_json(msg=to_native(e)) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + data_points=dict(required=True, type='list', elements='dict'), + database_name=dict(required=True, type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + ) + + influx = AnsibleInfluxDBWrite(module) + data_points = module.params.get('data_points') + influx.write_data_point(data_points) + module.exit_json(changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/misc/elasticsearch_plugin.py b/plugins/modules/database/misc/elasticsearch_plugin.py new file mode 100644 index 0000000000..410685b534 --- /dev/null +++ b/plugins/modules/database/misc/elasticsearch_plugin.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2015, Mathew Davies +# (c) 2017, Sam Doran +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: elasticsearch_plugin +short_description: Manage Elasticsearch plugins +description: + - Manages Elasticsearch plugins. +author: + - Mathew Davies (@ThePixelDeveloper) + - Sam Doran (@samdoran) +options: + name: + description: + - Name of the plugin to install. + required: True + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + src: + description: + - Optionally set the source location to retrieve the plugin from. This can be a file:// + URL to install from a local file, or a remote URL. If this is not set, the plugin + location is just based on the name. + - The name parameter must match the descriptor in the plugin ZIP specified. + - Is only used if the state would change, which is solely checked based on the name + parameter. If, for example, the plugin is already installed, changing this has no + effect. + - For ES 1.x use url. + required: False + url: + description: + - Set exact URL to download the plugin from (Only works for ES 1.x). + - For ES 2.x and higher, use src. + required: False + timeout: + description: + - "Timeout setting: 30s, 1m, 1h..." + - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. + default: 1m + force: + description: + - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails." + default: False + type: bool + plugin_bin: + description: + - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. + - The default changed in Ansible 2.4 to None. + plugin_dir: + description: + - Your configured plugin directory specified in Elasticsearch + default: /usr/share/elasticsearch/plugins/ + proxy_host: + description: + - Proxy host to use during plugin installation + proxy_port: + description: + - Proxy port to use during plugin installation + version: + description: + - Version of the plugin to be installed. + If plugin exists with previous version, it will NOT be updated +''' + +EXAMPLES = ''' +# Install Elasticsearch Head plugin in Elasticsearch 2.x +- elasticsearch_plugin: + name: mobz/elasticsearch-head + state: present + +# Install a specific version of Elasticsearch Head in Elasticsearch 2.x +- elasticsearch_plugin: + name: mobz/elasticsearch-head + version: 2.0.0 + +# Uninstall Elasticsearch head plugin in Elasticsearch 2.x +- elasticsearch_plugin: + name: mobz/elasticsearch-head + state: absent + +# Install a specific plugin in Elasticsearch >= 5.0 +- elasticsearch_plugin: + name: analysis-icu + state: present + +# Install the ingest-geoip plugin with a forced installation +- elasticsearch_plugin: + name: ingest-geoip + state: present + force: yes +''' + +import os + +from ansible.module_utils.basic import AnsibleModule + + +PACKAGE_STATE_MAP = dict( + present="install", + absent="remove" +) + +PLUGIN_BIN_PATHS = tuple([ + '/usr/share/elasticsearch/bin/elasticsearch-plugin', + '/usr/share/elasticsearch/bin/plugin' +]) + + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + + +def is_plugin_present(plugin_name, plugin_dir): + return os.path.isdir(os.path.join(plugin_dir, plugin_name)) + + +def parse_error(string): + reason = "ERROR: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]] + is_old_command = (os.path.basename(plugin_bin) == 'plugin') + + # Timeout and version are only valid for plugin, not elasticsearch-plugin + if is_old_command: + if timeout: + cmd_args.append("--timeout %s" % timeout) + + if version: + plugin_name = plugin_name + '/' + version + cmd_args[2] = plugin_name + + if proxy_host and proxy_port: + cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + + # Legacy ES 1.x + if url: + cmd_args.append("--url %s" % url) + + if force: + cmd_args.append("--batch") + if src: + cmd_args.append(src) + else: + cmd_args.append(plugin_name) + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err) + + return True, cmd, out, err + + +def remove_plugin(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err) + + return True, cmd, out, err + + +def get_plugin_bin(module, plugin_bin=None): + # Use the plugin_bin that was supplied first before trying other options + valid_plugin_bin = None + if plugin_bin and os.path.isfile(plugin_bin): + valid_plugin_bin = plugin_bin + + else: + # Add the plugin_bin passed into the module to the top of the list of paths to test, + # testing for that binary name first before falling back to the default paths. + bin_paths = list(PLUGIN_BIN_PATHS) + if plugin_bin and plugin_bin not in bin_paths: + bin_paths.insert(0, plugin_bin) + + # Get separate lists of dirs and binary names from the full paths to the + # plugin binaries. + plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths])) + plugin_bins = list(set([os.path.basename(x) for x in bin_paths])) + + # Check for the binary names in the default system paths as well as the path + # specified in the module arguments. + for bin_file in plugin_bins: + valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs) + if valid_plugin_bin: + break + + if not valid_plugin_bin: + module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin) + + return valid_plugin_bin + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), + src=dict(default=None), + url=dict(default=None), + timeout=dict(default="1m"), + force=dict(type='bool', default=False), + plugin_bin=dict(type="path"), + plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), + proxy_host=dict(default=None), + proxy_port=dict(default=None), + version=dict(default=None) + ), + mutually_exclusive=[("src", "url")], + supports_check_mode=True + ) + + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + src = module.params["src"] + timeout = module.params["timeout"] + force = module.params["force"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + version = module.params["version"] + + # Search provided path and system paths for valid binary + plugin_bin = get_plugin_bin(module, plugin_bin) + + repo = parse_plugin_repo(name) + present = is_plugin_present(repo, plugin_dir) + + # skip if the state is correct + if (present and state == "present") or (state == "absent" and not present): + module.exit_json(changed=False, name=name, state=state) + + if state == "present": + changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force) + + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/misc/kibana_plugin.py b/plugins/modules/database/misc/kibana_plugin.py new file mode 100644 index 0000000000..94b28227dc --- /dev/null +++ b/plugins/modules/database/misc/kibana_plugin.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Thierno IB. BARRY @barryib +# Sponsored by Polyconseil http://polyconseil.fr. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: kibana_plugin +short_description: Manage Kibana plugins +description: + - This module can be used to manage Kibana plugins. +author: Thierno IB. BARRY (@barryib) +options: + name: + description: + - Name of the plugin to install. + required: True + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + url: + description: + - Set exact URL to download the plugin from. + - For local file, prefix its absolute path with file:// + timeout: + description: + - "Timeout setting: 30s, 1m, 1h etc." + default: 1m + plugin_bin: + description: + - Location of the Kibana binary. + default: /opt/kibana/bin/kibana + plugin_dir: + description: + - Your configured plugin directory specified in Kibana. + default: /opt/kibana/installedPlugins/ + version: + description: + - Version of the plugin to be installed. + - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes. + force: + description: + - Delete and re-install the plugin. Can be useful for plugins update. + type: bool + default: 'no' +''' + +EXAMPLES = ''' +- name: Install Elasticsearch head plugin + kibana_plugin: + state: present + name: elasticsearch/marvel + +- name: Install specific version of a plugin + kibana_plugin: + state: present + name: elasticsearch/marvel + version: '2.3.3' + +- name: Uninstall Elasticsearch head plugin + kibana_plugin: + state: absent + name: elasticsearch/marvel +''' + +RETURN = ''' +cmd: + description: the launched command during plugin management (install / remove) + returned: success + type: str +name: + description: the plugin name to install or remove + returned: success + type: str +url: + description: the url from where the plugin is installed from + returned: success + type: str +timeout: + description: the timeout for plugin download + returned: success + type: str +stdout: + description: the command stdout + returned: success + type: str +stderr: + description: the command stderr + returned: success + type: str +state: + description: the state for the managed plugin + returned: success + type: str +''' + +import os +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule + + +PACKAGE_STATE_MAP = dict( + present="--install", + absent="--remove" +) + + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + + +def is_plugin_present(plugin_dir, working_dir): + return os.path.isdir(os.path.join(working_dir, plugin_dir)) + + +def parse_error(string): + reason = "reason: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'): + if LooseVersion(kibana_version) > LooseVersion('4.6'): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') + cmd_args = [kibana_plugin_bin, "install"] + if url: + cmd_args.append(url) + else: + cmd_args.append(plugin_name) + else: + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] + + if url: + cmd_args.append("--url %s" % url) + + if timeout: + cmd_args.append("--timeout %s" % timeout) + + cmd = " ".join(cmd_args) + + if module.check_mode: + return True, cmd, "check mode", "" + + rc, out, err = module.run_command(cmd) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'): + if LooseVersion(kibana_version) > LooseVersion('4.6'): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') + cmd_args = [kibana_plugin_bin, "remove", plugin_name] + else: + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] + + cmd = " ".join(cmd_args) + + if module.check_mode: + return True, cmd, "check mode", "" + + rc, out, err = module.run_command(cmd) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def get_kibana_version(module, plugin_bin): + cmd_args = [plugin_bin, '--version'] + cmd = " ".join(cmd_args) + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="Failed to get Kibana version : %s" % err) + + return out.strip() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), + url=dict(default=None), + timeout=dict(default="1m"), + plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), + plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), + version=dict(default=None), + force=dict(default="no", type="bool") + ), + supports_check_mode=True, + ) + + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + timeout = module.params["timeout"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + version = module.params["version"] + force = module.params["force"] + + changed, cmd, out, err = False, '', '', '' + + kibana_version = get_kibana_version(module, plugin_bin) + + present = is_plugin_present(parse_plugin_repo(name), plugin_dir) + + # skip if the state is correct + if (present and state == "present" and not force) or (state == "absent" and not present and not force): + module.exit_json(changed=False, name=name, state=state) + + if version: + name = name + '/' + version + + if state == "present": + if force: + remove_plugin(module, plugin_bin, name) + changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version) + + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/misc/redis.py b/plugins/modules/database/misc/redis.py new file mode 100644 index 0000000000..f998cc477e --- /dev/null +++ b/plugins/modules/database/misc/redis.py @@ -0,0 +1,312 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: redis +short_description: Various redis commands, slave and flush +description: + - Unified utility to interact with redis instances. +options: + command: + description: + - The selected redis command + - C(config) (new in 1.6), ensures a configuration setting on an instance. + - C(flush) flushes all the instance or a specified db. + - C(slave) sets a redis instance in slave or master mode. + required: true + choices: [ config, flush, slave ] + login_password: + description: + - The password used to authenticate with (usually not used) + login_host: + description: + - The host running the database + default: localhost + login_port: + description: + - The port to connect to + default: 6379 + master_host: + description: + - The host of the master instance [slave command] + master_port: + description: + - The port of the master instance [slave command] + slave_mode: + description: + - the mode of the redis instance [slave command] + default: slave + choices: [ master, slave ] + db: + description: + - The database to flush (used in db mode) [flush command] + flush_mode: + description: + - Type of flush (all the dbs in a redis instance or a specific one) + [flush command] + default: all + choices: [ all, db ] + name: + description: + - A redis config key. + value: + description: + - A redis config value. When memory size is needed, it is possible + to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024. + Units are case insensitive i.e. 1m = 1mb = 1M = 1MB. + +notes: + - Requires the redis-py Python package on the remote host. You can + install it with pip (pip install redis) or with a package manager. + https://github.com/andymccurdy/redis-py + - If the redis master instance we are making slave of is password protected + this needs to be in the redis.conf in the masterauth variable + +requirements: [ redis ] +author: "Xabier Larrakoetxea (@slok)" +''' + +EXAMPLES = ''' +- name: Set local redis instance to be slave of melee.island on port 6377 + redis: + command: slave + master_host: melee.island + master_port: 6377 + +- name: Deactivate slave mode + redis: + command: slave + slave_mode: master + +- name: Flush all the redis db + redis: + command: flush + flush_mode: all + +- name: Flush only one db in a redis instance + redis: + command: flush + db: 1 + flush_mode: db + +- name: Configure local redis to have 10000 max clients + redis: + command: config + name: maxclients + value: 10000 + +- name: Configure local redis maxmemory to 4GB + redis: + command: config + name: maxmemory + value: 4GB + +- name: Configure local redis to have lua time limit of 100 ms + redis: + command: config + name: lua-time-limit + value: 100 +''' + +import traceback + +REDIS_IMP_ERR = None +try: + import redis +except ImportError: + REDIS_IMP_ERR = traceback.format_exc() + redis_found = False +else: + redis_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils._text import to_native + + +# Redis module specific support methods. +def set_slave_mode(client, master_host, master_port): + try: + return client.slaveof(master_host, master_port) + except Exception: + return False + + +def set_master_mode(client): + try: + return client.slaveof() + except Exception: + return False + + +def flush(client, db=None): + try: + if not isinstance(db, int): + return client.flushall() + else: + # The passed client has been connected to the database already + return client.flushdb() + except Exception: + return False + + +# Module execution. +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(type='str', choices=['config', 'flush', 'slave']), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=6379), + master_host=dict(type='str'), + master_port=dict(type='int'), + slave_mode=dict(type='str', default='slave', choices=['master', 'slave']), + db=dict(type='int'), + flush_mode=dict(type='str', default='all', choices=['all', 'db']), + name=dict(type='str'), + value=dict(type='str') + ), + supports_check_mode=True, + ) + + if not redis_found: + module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR) + + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + command = module.params['command'] + + # Slave Command section ----------- + if command == "slave": + master_host = module.params['master_host'] + master_port = module.params['master_port'] + mode = module.params['slave_mode'] + + # Check if we have all the data + if mode == "slave": # Only need data if we want to be slave + if not master_host: + module.fail_json(msg='In slave mode master host must be provided') + + if not master_port: + module.fail_json(msg='In slave mode master port must be provided') + + # Connect and check + r = redis.StrictRedis(host=login_host, port=login_port, password=login_password) + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + # Check if we are already in the mode that we want + info = r.info() + if mode == "master" and info["role"] == "master": + module.exit_json(changed=False, mode=mode) + + elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: + status = dict( + status=mode, + master_host=master_host, + master_port=master_port, + ) + module.exit_json(changed=False, mode=status) + else: + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "slave": + if module.check_mode or\ + set_slave_mode(r, master_host, master_port): + info = r.info() + status = { + 'status': mode, + 'master_host': master_host, + 'master_port': master_port, + } + module.exit_json(changed=True, mode=status) + else: + module.fail_json(msg='Unable to set slave mode') + + else: + if module.check_mode or set_master_mode(r): + module.exit_json(changed=True, mode=mode) + else: + module.fail_json(msg='Unable to set master mode') + + # flush Command section ----------- + elif command == "flush": + db = module.params['db'] + mode = module.params['flush_mode'] + + # Check if we have all the data + if mode == "db": + if db is None: + module.fail_json(msg="In db mode the db number must be provided") + + # Connect and check + r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db) + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "all": + if module.check_mode or flush(r): + module.exit_json(changed=True, flushed=True) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush all databases") + + else: + if module.check_mode or flush(r, db): + module.exit_json(changed=True, flushed=True, db=db) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush '%d' database" % db) + elif command == 'config': + name = module.params['name'] + + try: # try to parse the value as if it were the memory size + value = str(human_to_bytes(module.params['value'].upper())) + except ValueError: + value = module.params['value'] + + r = redis.StrictRedis(host=login_host, port=login_port, password=login_password) + + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + try: + old_value = r.config_get(name)[name] + except Exception as e: + module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc()) + changed = old_value != value + + if module.check_mode or not changed: + module.exit_json(changed=changed, name=name, value=value) + else: + try: + r.config_set(name, value) + except Exception as e: + module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc()) + module.exit_json(changed=changed, name=name, value=value) + else: + module.fail_json(msg='A valid command must be provided') + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/misc/riak.py b/plugins/modules/database/misc/riak.py new file mode 100644 index 0000000000..dd9c551de6 --- /dev/null +++ b/plugins/modules/database/misc/riak.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, James Martin , Drew Kerrigan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: riak +short_description: This module handles some common Riak operations +description: + - This module can be used to join nodes to a cluster, check + the status of the cluster. +author: + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" +options: + command: + description: + - The command you would like to perform against the cluster. + choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] + config_dir: + description: + - The path to the riak configuration directory + default: /etc/riak + http_conn: + description: + - The ip address and port that is listening for Riak HTTP queries + default: 127.0.0.1:8098 + target_node: + description: + - The target node for certain operations (join, ping) + default: riak@127.0.0.1 + wait_for_handoffs: + description: + - Number of seconds to wait for handoffs to complete. + wait_for_ring: + description: + - Number of seconds to wait for all nodes to agree on the ring. + wait_for_service: + description: + - Waits for a riak service to come online before continuing. + choices: ['kv'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +# Join's a Riak node to another node +- riak: + command: join + target_node: riak@10.1.1.1 + +# Wait for handoffs to finish. Use with async and poll. +- riak: + wait_for_handoffs: yes + +# Wait for riak_kv service to startup +- riak: + wait_for_service: kv +''' + +import json +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def ring_check(module, riak_admin_bin): + cmd = '%s ringready' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0 and 'TRUE All nodes agree on the ring' in out: + return True + else: + return False + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + command=dict(required=False, default=None, choices=[ + 'ping', 'kv_test', 'join', 'plan', 'commit']), + config_dir=dict(default='/etc/riak', type='path'), + http_conn=dict(required=False, default='127.0.0.1:8098'), + target_node=dict(default='riak@127.0.0.1', required=False), + wait_for_handoffs=dict(default=False, type='int'), + wait_for_ring=dict(default=False, type='int'), + wait_for_service=dict( + required=False, default=None, choices=['kv']), + validate_certs=dict(default='yes', type='bool')) + ) + + command = module.params.get('command') + http_conn = module.params.get('http_conn') + target_node = module.params.get('target_node') + wait_for_handoffs = module.params.get('wait_for_handoffs') + wait_for_ring = module.params.get('wait_for_ring') + wait_for_service = module.params.get('wait_for_service') + + # make sure riak commands are on the path + riak_bin = module.get_bin_path('riak') + riak_admin_bin = module.get_bin_path('riak-admin') + + timeout = time.time() + 120 + while True: + if time.time() > timeout: + module.fail_json(msg='Timeout, could not fetch Riak stats.') + (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) + if info['status'] == 200: + stats_raw = response.read() + break + time.sleep(5) + + # here we attempt to load those stats, + try: + stats = json.loads(stats_raw) + except Exception: + module.fail_json(msg='Could not parse Riak stats.') + + node_name = stats['nodename'] + nodes = stats['ring_members'] + ring_size = stats['ring_creation_size'] + rc, out, err = module.run_command([riak_bin, 'version']) + version = out.strip() + + result = dict(node_name=node_name, + nodes=nodes, + ring_size=ring_size, + version=version) + + if command == 'ping': + cmd = '%s ping %s' % (riak_bin, target_node) + rc, out, err = module.run_command(cmd) + if rc == 0: + result['ping'] = out + else: + module.fail_json(msg=out) + + elif command == 'kv_test': + cmd = '%s test' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['kv_test'] = out + else: + module.fail_json(msg=out) + + elif command == 'join': + if nodes.count(node_name) == 1 and len(nodes) > 1: + result['join'] = 'Node is already in cluster or staged to be in cluster.' + else: + cmd = '%s cluster join %s' % (riak_admin_bin, target_node) + rc, out, err = module.run_command(cmd) + if rc == 0: + result['join'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'plan': + cmd = '%s cluster plan' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['plan'] = out + if 'Staged Changes' in out: + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'commit': + cmd = '%s cluster commit' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['commit'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + +# this could take a while, recommend to run in async mode + if wait_for_handoffs: + timeout = time.time() + wait_for_handoffs + while True: + cmd = '%s transfers' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if 'No transfers active' in out: + result['handoffs'] = 'No transfers active.' + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for handoffs.') + + if wait_for_service: + cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name] + rc, out, err = module.run_command(cmd) + result['service'] = out + + if wait_for_ring: + timeout = time.time() + wait_for_ring + while True: + if ring_check(module, riak_admin_bin): + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for nodes to agree on ring.') + + result['ring_ready'] = ring_check(module, riak_admin_bin) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/mssql/mssql_db.py b/plugins/modules/database/mssql/mssql_db.py new file mode 100644 index 0000000000..1b1cbca305 --- /dev/null +++ b/plugins/modules/database/mssql/mssql_db.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Vedit Firat Arig +# Outline and parts are reused from Mark Theunissen's mysql_db module +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: mssql_db +short_description: Add or remove MSSQL databases from a remote host. +description: + - Add or remove MSSQL databases from a remote host. +options: + name: + description: + - name of the database to add or remove + required: true + aliases: [ db ] + login_user: + description: + - The username used to authenticate with + login_password: + description: + - The password used to authenticate with + login_host: + description: + - Host running the database + login_port: + description: + - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used + default: 1433 + state: + description: + - The database state + default: present + choices: [ "present", "absent", "import" ] + target: + description: + - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL + files (C(.sql)) files are supported. + autocommit: + description: + - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed + within a transaction. + type: bool + default: 'no' +notes: + - Requires the pymssql Python package on the remote host. For Ubuntu, this + is as easy as pip install pymssql (See M(pip).) +requirements: + - python >= 2.7 + - pymssql +author: Vedit Firat Arig (@vedit) +''' + +EXAMPLES = ''' +# Create a new database with name 'jackdata' +- mssql_db: + name: jackdata + state: present + +# Copy database dump file to remote host and restore it to database 'my_db' +- copy: + src: dump.sql + dest: /tmp + +- mssql_db: + name: my_db + state: import + target: /tmp/dump.sql +''' + +RETURN = ''' +# +''' + +import os +import traceback + +PYMSSQL_IMP_ERR = None +try: + import pymssql +except ImportError: + PYMSSQL_IMP_ERR = traceback.format_exc() + mssql_found = False +else: + mssql_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def db_exists(conn, cursor, db): + cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db) + conn.commit() + return bool(cursor.rowcount) + + +def db_create(conn, cursor, db): + cursor.execute("CREATE DATABASE [%s]" % db) + return db_exists(conn, cursor, db) + + +def db_delete(conn, cursor, db): + try: + cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db) + except Exception: + pass + cursor.execute("DROP DATABASE [%s]" % db) + return not db_exists(conn, cursor, db) + + +def db_import(conn, cursor, module, db, target): + if os.path.isfile(target): + with open(target, 'r') as backup: + sqlQuery = "USE [%s]\n" % db + for line in backup: + if line is None: + break + elif line.startswith('GO'): + cursor.execute(sqlQuery) + sqlQuery = "USE [%s]\n" % db + else: + sqlQuery += line + cursor.execute(sqlQuery) + conn.commit() + return 0, "import successful", "" + else: + return 1, "cannot find target file", "cannot find target file" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['db']), + login_user=dict(default=''), + login_password=dict(default='', no_log=True), + login_host=dict(required=True), + login_port=dict(default='1433'), + target=dict(default=None), + autocommit=dict(type='bool', default=False), + state=dict( + default='present', choices=['present', 'absent', 'import']) + ) + ) + + if not mssql_found: + module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR) + + db = module.params['name'] + state = module.params['state'] + autocommit = module.params['autocommit'] + target = module.params["target"] + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + + login_querystring = login_host + if login_port != "1433": + login_querystring = "%s:%s" % (login_host, login_port) + + if login_user != "" and login_password == "": + module.fail_json(msg="when supplying login_user arguments login_password must be provided") + + try: + conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master') + cursor = conn.cursor() + except Exception as e: + if "Unknown database" in str(e): + errno, errstr = e.args + module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) + else: + module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " + "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") + + conn.autocommit(True) + changed = False + + if db_exists(conn, cursor, db): + if state == "absent": + try: + changed = db_delete(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error deleting database: " + str(e)) + elif state == "import": + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg="%s" % stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + else: + if state == "present": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + elif state == "import": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg="%s" % stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + + module.exit_json(changed=changed, db=db) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/mysql/mysql_db.py b/plugins/modules/database/mysql/mysql_db.py new file mode 100644 index 0000000000..ec835e6064 --- /dev/null +++ b/plugins/modules/database/mysql/mysql_db.py @@ -0,0 +1,610 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Mark Theunissen +# Sponsored by Four Kitchens http://fourkitchens.com. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: mysql_db +short_description: Add or remove MySQL databases from a remote host +description: +- Add or remove MySQL databases from a remote host. +options: + name: + description: + - Name of the database to add or remove. + - I(name=all) may only be provided if I(state) is C(dump) or C(import). + - List of databases is provided with I(state=dump), I(state=present) and I(state=absent). + - If I(name=all) it works like --all-databases option for mysqldump (Added in 2.0). + required: true + type: list + elements: str + aliases: [db] + state: + description: + - The database state + type: str + default: present + choices: ['absent', 'dump', 'import', 'present'] + collation: + description: + - Collation mode (sorting). This only applies to new table/databases and + does not update existing ones, this is a limitation of MySQL. + type: str + default: '' + encoding: + description: + - Encoding mode to use, examples include C(utf8) or C(latin1_swedish_ci), + at creation of database, dump or importation of sql script. + type: str + default: '' + target: + description: + - Location, on the remote host, of the dump file to read from or write to. + - Uncompressed SQL files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and + xz (Added in 2.0) compressed files are supported. + type: path + single_transaction: + description: + - Execute the dump in a single transaction. + type: bool + default: no + quick: + description: + - Option used for dumping large tables. + type: bool + default: yes + ignore_tables: + description: + - A list of table names that will be ignored in the dump + of the form database_name.table_name. + type: list + elements: str + required: no + default: [] + hex_blob: + description: + - Dump binary columns using hexadecimal notation. + required: no + default: no + type: bool + force: + description: + - Continue dump or import even if we get an SQL error. + - Used only when I(state) is C(dump) or C(import). + required: no + type: bool + default: no + master_data: + description: + - Option to dump a master replication server to produce a dump file + that can be used to set up another server as a slave of the master. + - C(0) to not include master data. + - C(1) to generate a 'CHANGE MASTER TO' statement + required on the slave to start the replication process. + - C(2) to generate a commented 'CHANGE MASTER TO'. + - Can be used when I(state=dump). + required: no + type: int + choices: [0, 1, 2] + default: 0 + skip_lock_tables: + description: + - Skip locking tables for read. Used when I(state=dump), ignored otherwise. + required: no + type: bool + default: no + dump_extra_args: + description: + - Provide additional arguments for mysqldump. + Used when I(state=dump) only, ignored otherwise. + required: no + type: str +seealso: +- module: mysql_info +- module: mysql_variables +- module: mysql_user +- module: mysql_replication +- name: MySQL command-line client reference + description: Complete reference of the MySQL command-line client documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/mysql.html +- name: mysqldump reference + description: Complete reference of the ``mysqldump`` client utility documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html +- name: CREATE DATABASE reference + description: Complete reference of the CREATE DATABASE command documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/create-database.html +- name: DROP DATABASE reference + description: Complete reference of the DROP DATABASE command documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/drop-database.html +author: "Ansible Core Team" +requirements: + - mysql (command line binary) + - mysqldump (command line binary) +notes: + - Requires the mysql and mysqldump binaries on the remote host. + - This module is B(not idempotent) when I(state) is C(import), + and will import the dump file each time if run more than once. +extends_documentation_fragment: +- community.general.mysql + +''' + +EXAMPLES = r''' +- name: Create a new database with name 'bobdata' + mysql_db: + name: bobdata + state: present + +- name: Create new databases with names 'foo' and 'bar' + mysql_db: + name: + - foo + - bar + state: present + +# Copy database dump file to remote host and restore it to database 'my_db' +- name: Copy database dump file + copy: + src: dump.sql.bz2 + dest: /tmp + +- name: Restore database + mysql_db: + name: my_db + state: import + target: /tmp/dump.sql.bz2 + +- name: Restore database ignoring errors + mysql_db: + name: my_db + state: import + target: /tmp/dump.sql.bz2 + force: yes + +- name: Dump multiple databases + mysql_db: + state: dump + name: db_1,db_2 + target: /tmp/dump.sql + +- name: Dump multiple databases + mysql_db: + state: dump + name: + - db_1 + - db_2 + target: /tmp/dump.sql + +- name: Dump all databases to hostname.sql + mysql_db: + state: dump + name: all + target: /tmp/dump.sql + +- name: Dump all databases to hostname.sql including master data + mysql_db: + state: dump + name: all + target: /tmp/dump.sql + master_data: 1 + +# Import of sql script with encoding option +- name: > + Import dump.sql with specific latin1 encoding, + similar to mysql -u --default-character-set=latin1 -p < dump.sql + mysql_db: + state: import + name: all + encoding: latin1 + target: /tmp/dump.sql + +# Dump of database with encoding option +- name: > + Dump of Databse with specific latin1 encoding, + similar to mysqldump -u --default-character-set=latin1 -p + mysql_db: + state: dump + name: db_1 + encoding: latin1 + target: /tmp/dump.sql + +- name: Delete database with name 'bobdata' + mysql_db: + name: bobdata + state: absent + +- name: Make sure there is neither a database with name 'foo', nor one with name 'bar' + mysql_db: + name: + - foo + - bar + state: absent + +# Dump database with argument not directly supported by this module +# using dump_extra_args parameter +- name: Dump databases without including triggers + mysql_db: + state: dump + name: foo + target: /tmp/dump.sql + dump_extra_args: --skip-triggers +''' + +RETURN = r''' +db: + description: Database names in string format delimited by white space. + returned: always + type: str + sample: "foo bar" +db_list: + description: List of database names. + returned: always + type: list + sample: ["foo", "bar"] + version_added: '2.9' +executed_commands: + description: List of commands which tried to run. + returned: if executed + type: list + sample: ["CREATE DATABASE acme"] + version_added: '2.10' +''' + +import os +import subprocess +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import mysql_quote_identifier +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_native + +executed_commands = [] + +# =========================================== +# MySQL module specific support methods. +# + + +def db_exists(cursor, db): + res = 0 + for each_db in db: + res += cursor.execute("SHOW DATABASES LIKE %s", (each_db.replace("_", r"\_"),)) + return res == len(db) + + +def db_delete(cursor, db): + if not db: + return False + for each_db in db: + query = "DROP DATABASE %s" % mysql_quote_identifier(each_db, 'database') + executed_commands.append(query) + cursor.execute(query) + return True + + +def db_dump(module, host, user, password, db_name, target, all_databases, port, + config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, + single_transaction=None, quick=None, ignore_tables=None, hex_blob=None, + encoding=None, force=False, master_data=0, skip_lock_tables=False, dump_extra_args=None): + cmd = module.get_bin_path('mysqldump', True) + # If defined, mysqldump demands --defaults-extra-file be the first option + if config_file: + cmd += " --defaults-extra-file=%s" % shlex_quote(config_file) + if user is not None: + cmd += " --user=%s" % shlex_quote(user) + if password is not None: + cmd += " --password=%s" % shlex_quote(password) + if ssl_cert is not None: + cmd += " --ssl-cert=%s" % shlex_quote(ssl_cert) + if ssl_key is not None: + cmd += " --ssl-key=%s" % shlex_quote(ssl_key) + if ssl_ca is not None: + cmd += " --ssl-ca=%s" % shlex_quote(ssl_ca) + if force: + cmd += " --force" + if socket is not None: + cmd += " --socket=%s" % shlex_quote(socket) + else: + cmd += " --host=%s --port=%i" % (shlex_quote(host), port) + + if all_databases: + cmd += " --all-databases" + elif len(db_name) > 1: + cmd += " --databases {0}".format(' '.join(db_name)) + else: + cmd += " %s" % shlex_quote(' '.join(db_name)) + + if skip_lock_tables: + cmd += " --skip-lock-tables" + if (encoding is not None) and (encoding != ""): + cmd += " --default-character-set=%s" % shlex_quote(encoding) + if single_transaction: + cmd += " --single-transaction=true" + if quick: + cmd += " --quick" + if ignore_tables: + for an_ignored_table in ignore_tables: + cmd += " --ignore-table={0}".format(an_ignored_table) + if hex_blob: + cmd += " --hex-blob" + if master_data: + cmd += " --master-data=%s" % master_data + if dump_extra_args is not None: + cmd += " " + dump_extra_args + + path = None + if os.path.splitext(target)[-1] == '.gz': + path = module.get_bin_path('gzip', True) + elif os.path.splitext(target)[-1] == '.bz2': + path = module.get_bin_path('bzip2', True) + elif os.path.splitext(target)[-1] == '.xz': + path = module.get_bin_path('xz', True) + + if path: + cmd = '%s | %s > %s' % (cmd, path, shlex_quote(target)) + else: + cmd += " > %s" % shlex_quote(target) + + executed_commands.append(cmd) + rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) + return rc, stdout, stderr + + +def db_import(module, host, user, password, db_name, target, all_databases, port, config_file, + socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, encoding=None, force=False): + if not os.path.exists(target): + return module.fail_json(msg="target %s does not exist on the host" % target) + + cmd = [module.get_bin_path('mysql', True)] + # --defaults-file must go first, or errors out + if config_file: + cmd.append("--defaults-extra-file=%s" % shlex_quote(config_file)) + if user: + cmd.append("--user=%s" % shlex_quote(user)) + if password: + cmd.append("--password=%s" % shlex_quote(password)) + if ssl_cert is not None: + cmd.append("--ssl-cert=%s" % shlex_quote(ssl_cert)) + if ssl_key is not None: + cmd.append("--ssl-key=%s" % shlex_quote(ssl_key)) + if ssl_ca is not None: + cmd.append("--ssl-ca=%s" % shlex_quote(ssl_ca)) + if force: + cmd.append("-f") + if socket is not None: + cmd.append("--socket=%s" % shlex_quote(socket)) + else: + cmd.append("--host=%s" % shlex_quote(host)) + cmd.append("--port=%i" % port) + if (encoding is not None) and (encoding != ""): + cmd.append("--default-character-set=%s" % shlex_quote(encoding)) + if not all_databases: + cmd.append("--one-database") + cmd.append(shlex_quote(''.join(db_name))) + + comp_prog_path = None + if os.path.splitext(target)[-1] == '.gz': + comp_prog_path = module.get_bin_path('gzip', required=True) + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzip2', required=True) + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xz', required=True) + if comp_prog_path: + # The line above is for returned data only: + executed_commands.append('%s -dc %s | %s' % (comp_prog_path, target, ' '.join(cmd))) + p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1 + else: + return p2.returncode, stdout2, stderr2 + else: + cmd = ' '.join(cmd) + cmd += " < %s" % shlex_quote(target) + executed_commands.append(cmd) + rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) + return rc, stdout, stderr + + +def db_create(cursor, db, encoding, collation): + if not db: + return False + query_params = dict(enc=encoding, collate=collation) + res = 0 + for each_db in db: + query = ['CREATE DATABASE %s' % mysql_quote_identifier(each_db, 'database')] + if encoding: + query.append("CHARACTER SET %(enc)s") + if collation: + query.append("COLLATE %(collate)s") + query = ' '.join(query) + res += cursor.execute(query, query_params) + try: + executed_commands.append(cursor.mogrify(query, query_params)) + except AttributeError: + executed_commands.append(cursor._executed) + except Exception: + executed_commands.append(query) + return res > 0 + + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(type='str'), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + name=dict(type='list', required=True, aliases=['db']), + encoding=dict(type='str', default=''), + collation=dict(type='str', default=''), + target=dict(type='path'), + state=dict(type='str', default='present', choices=['absent', 'dump', 'import', 'present']), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + connect_timeout=dict(type='int', default=30), + config_file=dict(type='path', default='~/.my.cnf'), + single_transaction=dict(type='bool', default=False), + quick=dict(type='bool', default=True), + ignore_tables=dict(type='list', default=[]), + hex_blob=dict(default=False, type='bool'), + force=dict(type='bool', default=False), + master_data=dict(type='int', default=0, choices=[0, 1, 2]), + skip_lock_tables=dict(type='bool', default=False), + dump_extra_args=dict(type='str'), + ), + supports_check_mode=True, + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + db = module.params["name"] + if not db: + module.exit_json(changed=False, db=db, db_list=[]) + db = [each_db.strip() for each_db in db] + + encoding = module.params["encoding"] + collation = module.params["collation"] + state = module.params["state"] + target = module.params["target"] + socket = module.params["login_unix_socket"] + login_port = module.params["login_port"] + if login_port < 0 or login_port > 65535: + module.fail_json(msg="login_port must be a valid unix port number (0-65535)") + ssl_cert = module.params["client_cert"] + ssl_key = module.params["client_key"] + ssl_ca = module.params["ca_cert"] + connect_timeout = module.params['connect_timeout'] + config_file = module.params['config_file'] + login_password = module.params["login_password"] + login_user = module.params["login_user"] + login_host = module.params["login_host"] + ignore_tables = module.params["ignore_tables"] + for a_table in ignore_tables: + if a_table == "": + module.fail_json(msg="Name of ignored table cannot be empty") + single_transaction = module.params["single_transaction"] + quick = module.params["quick"] + hex_blob = module.params["hex_blob"] + force = module.params["force"] + master_data = module.params["master_data"] + skip_lock_tables = module.params["skip_lock_tables"] + dump_extra_args = module.params["dump_extra_args"] + + if len(db) > 1 and state == 'import': + module.fail_json(msg="Multiple databases are not supported with state=import") + db_name = ' '.join(db) + + all_databases = False + if state in ['dump', 'import']: + if target is None: + module.fail_json(msg="with state=%s target is required" % state) + if db == ['all']: + all_databases = True + else: + if db == ['all']: + module.fail_json(msg="name is not allowed to equal 'all' unless state equals import, or dump.") + try: + cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, + connect_timeout=connect_timeout) + except Exception as e: + if os.path.exists(config_file): + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. " + "Exception message: %s" % (config_file, to_native(e))) + else: + module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e))) + + changed = False + if not os.path.exists(config_file): + config_file = None + + existence_list = [] + non_existence_list = [] + + if not all_databases: + for each_database in db: + if db_exists(cursor, [each_database]): + existence_list.append(each_database) + else: + non_existence_list.append(each_database) + + if state == "absent": + if module.check_mode: + module.exit_json(changed=bool(existence_list), db=db_name, db_list=db) + try: + changed = db_delete(cursor, existence_list) + except Exception as e: + module.fail_json(msg="error deleting database: %s" % to_native(e)) + module.exit_json(changed=changed, db=db_name, db_list=db, executed_commands=executed_commands) + elif state == "present": + if module.check_mode: + module.exit_json(changed=bool(non_existence_list), db=db_name, db_list=db) + changed = False + if non_existence_list: + try: + changed = db_create(cursor, non_existence_list, encoding, collation) + except Exception as e: + module.fail_json(msg="error creating database: %s" % to_native(e), + exception=traceback.format_exc()) + module.exit_json(changed=changed, db=db_name, db_list=db, executed_commands=executed_commands) + elif state == "dump": + if non_existence_list and not all_databases: + module.fail_json(msg="Cannot dump database(s) %r - not found" % (', '.join(non_existence_list))) + if module.check_mode: + module.exit_json(changed=True, db=db_name, db_list=db) + rc, stdout, stderr = db_dump(module, login_host, login_user, + login_password, db, target, all_databases, + login_port, config_file, socket, ssl_cert, ssl_key, + ssl_ca, single_transaction, quick, ignore_tables, + hex_blob, encoding, force, master_data, skip_lock_tables, + dump_extra_args) + if rc != 0: + module.fail_json(msg="%s" % stderr) + module.exit_json(changed=True, db=db_name, db_list=db, msg=stdout, + executed_commands=executed_commands) + elif state == "import": + if module.check_mode: + module.exit_json(changed=True, db=db_name, db_list=db) + if non_existence_list and not all_databases: + try: + db_create(cursor, non_existence_list, encoding, collation) + except Exception as e: + module.fail_json(msg="error creating database: %s" % to_native(e), + exception=traceback.format_exc()) + rc, stdout, stderr = db_import(module, login_host, login_user, + login_password, db, target, + all_databases, + login_port, config_file, + socket, ssl_cert, ssl_key, ssl_ca, encoding, force) + if rc != 0: + module.fail_json(msg="%s" % stderr) + module.exit_json(changed=True, db=db_name, db_list=db, msg=stdout, + executed_commands=executed_commands) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/mysql/mysql_info.py b/plugins/modules/database/mysql/mysql_info.py new file mode 100644 index 0000000000..281ac60b86 --- /dev/null +++ b/plugins/modules/database/mysql/mysql_info.py @@ -0,0 +1,529 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: mysql_info +short_description: Gather information about MySQL servers +description: +- Gathers information about MySQL servers. + +options: + filter: + description: + - Limit the collected information by comma separated string or YAML list. + - Allowable values are C(version), C(databases), C(settings), C(global_status), + C(users), C(engines), C(master_status), C(slave_status), C(slave_hosts). + - By default, collects all subsets. + - You can use '!' before value (for example, C(!settings)) to exclude it from the information. + - If you pass including and excluding values to the filter, for example, I(filter=!settings,version), + the excluding values, C(!settings) in this case, will be ignored. + type: list + elements: str + login_db: + description: + - Database name to connect to. + - It makes sense if I(login_user) is allowed to connect to a specific database only. + type: str + exclude_fields: + description: + - List of fields which are not needed to collect. + - "Supports elements: C(db_size). Unsupported elements will be ignored" + type: list + elements: str + return_empty_dbs: + description: + - Includes names of empty databases to returned dictionary. + type: bool + default: no + +notes: +- Calculating the size of a database might be slow, depending on the number and size of tables in it. + To avoid this, use I(exclude_fields=db_size). + +seealso: +- module: mysql_variables +- module: mysql_db +- module: mysql_user +- module: mysql_replication + +author: +- Andrew Klychkov (@Andersson007) + +extends_documentation_fragment: +- community.general.mysql + +''' + +EXAMPLES = r''' +# Display info from mysql-hosts group (using creds from ~/.my.cnf to connect): +# ansible mysql-hosts -m mysql_info + +# Display only databases and users info: +# ansible mysql-hosts -m mysql_info -a 'filter=databases,users' + +# Display only slave status: +# ansible standby -m mysql_info -a 'filter=slave_status' + +# Display all info from databases group except settings: +# ansible databases -m mysql_info -a 'filter=!settings' + +- name: Collect all possible information using passwordless root access + mysql_info: + login_user: root + +- name: Get MySQL version with non-default credentials + mysql_info: + login_user: mysuperuser + login_password: mysuperpass + filter: version + +- name: Collect all info except settings and users by root + mysql_info: + login_user: root + login_password: rootpass + filter: "!settings,!users" + +- name: Collect info about databases and version using ~/.my.cnf as a credential file + become: yes + mysql_info: + filter: + - databases + - version + +- name: Collect info about databases and version using ~alice/.my.cnf as a credential file + become: yes + mysql_info: + config_file: /home/alice/.my.cnf + filter: + - databases + - version + +- name: Collect info about databases including empty and excluding their sizes + become: yes + mysql_info: + config_file: /home/alice/.my.cnf + filter: + - databases + exclude_fields: db_size + return_empty_dbs: yes +''' + +RETURN = r''' +version: + description: Database server version. + returned: if not excluded by filter + type: dict + sample: { "version": { "major": 5, "minor": 5, "release": 60 } } + contains: + major: + description: Major server version. + returned: if not excluded by filter + type: int + sample: 5 + minor: + description: Minor server version. + returned: if not excluded by filter + type: int + sample: 5 + release: + description: Release server version. + returned: if not excluded by filter + type: int + sample: 60 +databases: + description: Information about databases. + returned: if not excluded by filter + type: dict + sample: + - { "mysql": { "size": 656594 }, "information_schema": { "size": 73728 } } + contains: + size: + description: Database size in bytes. + returned: if not excluded by filter + type: dict + sample: { 'size': 656594 } +settings: + description: Global settings (variables) information. + returned: if not excluded by filter + type: dict + sample: + - { "innodb_open_files": 300, innodb_page_size": 16384 } +global_status: + description: Global status information. + returned: if not excluded by filter + type: dict + sample: + - { "Innodb_buffer_pool_read_requests": 123, "Innodb_buffer_pool_reads": 32 } + version_added: "2.10" +users: + description: Users information. + returned: if not excluded by filter + type: dict + sample: + - { "localhost": { "root": { "Alter_priv": "Y", "Alter_routine_priv": "Y" } } } +engines: + description: Information about the server's storage engines. + returned: if not excluded by filter + type: dict + sample: + - { "CSV": { "Comment": "CSV storage engine", "Savepoints": "NO", "Support": "YES", "Transactions": "NO", "XA": "NO" } } +master_status: + description: Master status information. + returned: if master + type: dict + sample: + - { "Binlog_Do_DB": "", "Binlog_Ignore_DB": "mysql", "File": "mysql-bin.000001", "Position": 769 } +slave_status: + description: Slave status information. + returned: if standby + type: dict + sample: + - { "192.168.1.101": { "3306": { "replication_user": { "Connect_Retry": 60, "Exec_Master_Log_Pos": 769, "Last_Errno": 0 } } } } +slave_hosts: + description: Slave status information. + returned: if master + type: dict + sample: + - { "2": { "Host": "", "Master_id": 1, "Port": 3306 } } +''' + +from decimal import Decimal + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import ( + mysql_connect, + mysql_common_argument_spec, + mysql_driver, + mysql_driver_fail_msg, +) +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + + +# =========================================== +# MySQL module specific support methods. +# + +class MySQL_Info(object): + + """Class for collection MySQL instance information. + + Arguments: + module (AnsibleModule): Object of AnsibleModule class. + cursor (pymysql/mysql-python): Cursor class for interaction with + the database. + + Note: + If you need to add a new subset: + 1. add a new key with the same name to self.info attr in self.__init__() + 2. add a new private method to get the information + 3. add invocation of the new method to self.__collect() + 4. add info about the new subset to the DOCUMENTATION block + 5. add info about the new subset with an example to RETURN block + """ + + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.info = { + 'version': {}, + 'databases': {}, + 'settings': {}, + 'global_status': {}, + 'engines': {}, + 'users': {}, + 'master_status': {}, + 'slave_hosts': {}, + 'slave_status': {}, + } + + def get_info(self, filter_, exclude_fields, return_empty_dbs): + """Get MySQL instance information based on filter_. + + Arguments: + filter_ (list): List of collected subsets (e.g., databases, users, etc.), + when it is empty, return all available information. + """ + self.__collect(exclude_fields, return_empty_dbs) + + inc_list = [] + exc_list = [] + + if filter_: + partial_info = {} + + for fi in filter_: + if fi.lstrip('!') not in self.info: + self.module.warn('filter element: %s is not allowable, ignored' % fi) + continue + + if fi[0] == '!': + exc_list.append(fi.lstrip('!')) + + else: + inc_list.append(fi) + + if inc_list: + for i in self.info: + if i in inc_list: + partial_info[i] = self.info[i] + + else: + for i in self.info: + if i not in exc_list: + partial_info[i] = self.info[i] + + return partial_info + + else: + return self.info + + def __collect(self, exclude_fields, return_empty_dbs): + """Collect all possible subsets.""" + self.__get_databases(exclude_fields, return_empty_dbs) + self.__get_global_variables() + self.__get_global_status() + self.__get_engines() + self.__get_users() + self.__get_master_status() + self.__get_slave_status() + self.__get_slaves() + + def __get_engines(self): + """Get storage engines info.""" + res = self.__exec_sql('SHOW ENGINES') + + if res: + for line in res: + engine = line['Engine'] + self.info['engines'][engine] = {} + + for vname, val in iteritems(line): + if vname != 'Engine': + self.info['engines'][engine][vname] = val + + def __convert(self, val): + """Convert unserializable data.""" + try: + if isinstance(val, Decimal): + val = float(val) + else: + val = int(val) + + except ValueError: + pass + + except TypeError: + pass + + return val + + def __get_global_variables(self): + """Get global variables (instance settings).""" + res = self.__exec_sql('SHOW GLOBAL VARIABLES') + + if res: + for var in res: + self.info['settings'][var['Variable_name']] = self.__convert(var['Value']) + + ver = self.info['settings']['version'].split('.') + release = ver[2].split('-')[0] + + self.info['version'] = dict( + major=int(ver[0]), + minor=int(ver[1]), + release=int(release), + ) + + def __get_global_status(self): + """Get global status.""" + res = self.__exec_sql('SHOW GLOBAL STATUS') + + if res: + for var in res: + self.info['global_status'][var['Variable_name']] = self.__convert(var['Value']) + + def __get_master_status(self): + """Get master status if the instance is a master.""" + res = self.__exec_sql('SHOW MASTER STATUS') + if res: + for line in res: + for vname, val in iteritems(line): + self.info['master_status'][vname] = self.__convert(val) + + def __get_slave_status(self): + """Get slave status if the instance is a slave.""" + res = self.__exec_sql('SHOW SLAVE STATUS') + if res: + for line in res: + host = line['Master_Host'] + if host not in self.info['slave_status']: + self.info['slave_status'][host] = {} + + port = line['Master_Port'] + if port not in self.info['slave_status'][host]: + self.info['slave_status'][host][port] = {} + + user = line['Master_User'] + if user not in self.info['slave_status'][host][port]: + self.info['slave_status'][host][port][user] = {} + + for vname, val in iteritems(line): + if vname not in ('Master_Host', 'Master_Port', 'Master_User'): + self.info['slave_status'][host][port][user][vname] = self.__convert(val) + + def __get_slaves(self): + """Get slave hosts info if the instance is a master.""" + res = self.__exec_sql('SHOW SLAVE HOSTS') + if res: + for line in res: + srv_id = line['Server_id'] + if srv_id not in self.info['slave_hosts']: + self.info['slave_hosts'][srv_id] = {} + + for vname, val in iteritems(line): + if vname != 'Server_id': + self.info['slave_hosts'][srv_id][vname] = self.__convert(val) + + def __get_users(self): + """Get user info.""" + res = self.__exec_sql('SELECT * FROM mysql.user') + if res: + for line in res: + host = line['Host'] + if host not in self.info['users']: + self.info['users'][host] = {} + + user = line['User'] + self.info['users'][host][user] = {} + + for vname, val in iteritems(line): + if vname not in ('Host', 'User'): + self.info['users'][host][user][vname] = self.__convert(val) + + def __get_databases(self, exclude_fields, return_empty_dbs): + """Get info about databases.""" + if not exclude_fields: + query = ('SELECT table_schema AS "name", ' + 'SUM(data_length + index_length) AS "size" ' + 'FROM information_schema.TABLES GROUP BY table_schema') + else: + if 'db_size' in exclude_fields: + query = ('SELECT table_schema AS "name" ' + 'FROM information_schema.TABLES GROUP BY table_schema') + + res = self.__exec_sql(query) + + if res: + for db in res: + self.info['databases'][db['name']] = {} + + if not exclude_fields or 'db_size' not in exclude_fields: + self.info['databases'][db['name']]['size'] = int(db['size']) + + # If empty dbs are not needed in the returned dict, exit from the method + if not return_empty_dbs: + return None + + # Add info about empty databases (issue #65727): + res = self.__exec_sql('SHOW DATABASES') + if res: + for db in res: + if db['Database'] not in self.info['databases']: + self.info['databases'][db['Database']] = {} + + if not exclude_fields or 'db_size' not in exclude_fields: + self.info['databases'][db['Database']]['size'] = 0 + + def __exec_sql(self, query, ddl=False): + """Execute SQL. + + Arguments: + ddl (bool): If True, return True or False. + Used for queries that don't return any rows + (mainly for DDL queries) (default False). + """ + try: + self.cursor.execute(query) + + if not ddl: + res = self.cursor.fetchall() + return res + return True + + except Exception as e: + self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + return False + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = mysql_common_argument_spec() + argument_spec.update( + login_db=dict(type='str'), + filter=dict(type='list'), + exclude_fields=dict(type='list'), + return_empty_dbs=dict(type='bool', default=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + db = module.params['login_db'] + connect_timeout = module.params['connect_timeout'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + ssl_cert = module.params['client_cert'] + ssl_key = module.params['client_key'] + ssl_ca = module.params['ca_cert'] + config_file = module.params['config_file'] + filter_ = module.params['filter'] + exclude_fields = module.params['exclude_fields'] + return_empty_dbs = module.params['return_empty_dbs'] + + if filter_: + filter_ = [f.strip() for f in filter_] + + if exclude_fields: + exclude_fields = set([f.strip() for f in exclude_fields]) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + try: + cursor, db_conn = mysql_connect(module, login_user, login_password, + config_file, ssl_cert, ssl_key, ssl_ca, db, + connect_timeout=connect_timeout, cursor_class='DictCursor') + except Exception as e: + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. " + "Exception message: %s" % (config_file, to_native(e))) + + ############################### + # Create object and do main job + + mysql = MySQL_Info(module, cursor) + + module.exit_json(changed=False, **mysql.get_info(filter_, exclude_fields, return_empty_dbs)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/mysql/mysql_query.py b/plugins/modules/database/mysql/mysql_query.py new file mode 100644 index 0000000000..ddd07a9fd6 --- /dev/null +++ b/plugins/modules/database/mysql/mysql_query.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: mysql_query +short_description: Run MySQL queries +description: +- Runs arbitrary MySQL queries. +- Pay attention, the module does not support check mode! + All queries will be executed in autocommit mode. +options: + query: + description: + - SQL query to run. Multiple queries can be passed using YAML list syntax. + type: list + elements: str + required: yes + positional_args: + description: + - List of values to be passed as positional arguments to the query. + - Mutually exclusive with I(named_args). + type: list + named_args: + description: + - Dictionary of key-value arguments to pass to the query. + - Mutually exclusive with I(positional_args). + type: dict + login_db: + description: + - Name of database to connect to and run queries against. + type: str + single_transaction: + description: + - Where passed queries run in a single transaction (C(yes)) or commit them one-by-one (C(no)). + type: bool + default: no +notes: +- To pass a query containing commas, use YAML list notation with hyphen (see EXAMPLES block). +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.mysql + +''' + +EXAMPLES = r''' +- name: Simple select query to acme db + mysql_query: + login_db: acme + query: SELECT * FROM orders + +- name: Select query to db acme with positional arguments + mysql_query: + login_db: acme + query: SELECT * FROM acme WHERE id = %s AND story = %s + positional_args: + - 1 + - test + +- name: Select query to test_db with named_args + mysql_query: + login_db: test_db + query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s + named_args: + id_val: 1 + story_val: test + +- name: Run several insert queries against db test_db in single transaction + mysql_query: + login_db: test_db + query: + - INSERT INTO articles (id, story) VALUES (2, 'my_long_story') + - INSERT INTO prices (id, price) VALUES (123, '100.00') + single_transaction: yes +''' + +RETURN = r''' +executed_queries: + description: List of executed queries. + returned: always + type: list + sample: ['SELECT * FROM bar', 'UPDATE bar SET id = 1 WHERE id = 2'] +query_result: + description: + - List of lists (sublist for each query) containing dictionaries + in column:value form representing returned rows. + returned: changed + type: list + sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"ID": 1}, {"ID": 2}]] +rowcount: + description: Number of affected rows for each subquery. + returned: changed + type: list + sample: [5, 1] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import ( + mysql_connect, + mysql_common_argument_spec, + mysql_driver, + mysql_driver_fail_msg, +) +from ansible.module_utils._text import to_native + +DML_QUERY_KEYWORDS = ('INSERT', 'UPDATE', 'DELETE') +# TRUNCATE is not DDL query but it also returns 0 rows affected: +DDL_QUERY_KEYWORDS = ('CREATE', 'DROP', 'ALTER', 'RENAME', 'TRUNCATE') + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = mysql_common_argument_spec() + argument_spec.update( + query=dict(type='list', elements='str', required=True), + login_db=dict(type='str'), + positional_args=dict(type='list'), + named_args=dict(type='dict'), + single_transaction=dict(type='bool', default=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=( + ('positional_args', 'named_args'), + ), + ) + + db = module.params['login_db'] + connect_timeout = module.params['connect_timeout'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + ssl_cert = module.params['client_cert'] + ssl_key = module.params['client_key'] + ssl_ca = module.params['ca_cert'] + config_file = module.params['config_file'] + query = module.params["query"] + if module.params["single_transaction"]: + autocommit = False + else: + autocommit = True + # Prepare args: + if module.params.get("positional_args"): + arguments = module.params["positional_args"] + elif module.params.get("named_args"): + arguments = module.params["named_args"] + else: + arguments = None + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + # Connect to DB: + try: + cursor, db_connection = mysql_connect(module, login_user, login_password, + config_file, ssl_cert, ssl_key, ssl_ca, db, + connect_timeout=connect_timeout, + cursor_class='DictCursor', autocommit=autocommit) + except Exception as e: + module.fail_json(msg="unable to connect to database, check login_user and " + "login_password are correct or %s has the credentials. " + "Exception message: %s" % (config_file, to_native(e))) + # Set defaults: + changed = False + + max_keyword_len = len(max(DML_QUERY_KEYWORDS + DDL_QUERY_KEYWORDS, key=len)) + + # Execute query: + query_result = [] + executed_queries = [] + rowcount = [] + for q in query: + try: + cursor.execute(q, arguments) + + except Exception as e: + if not autocommit: + db_connection.rollback() + + cursor.close() + module.fail_json(msg="Cannot execute SQL '%s' args [%s]: %s" % (q, arguments, to_native(e))) + + try: + query_result.append([dict(row) for row in cursor.fetchall()]) + + except Exception as e: + if not autocommit: + db_connection.rollback() + + module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e)) + + # Check DML or DDL keywords in query and set changed accordingly: + q = q.lstrip()[0:max_keyword_len].upper() + for keyword in DML_QUERY_KEYWORDS: + if keyword in q and cursor.rowcount > 0: + changed = True + + for keyword in DDL_QUERY_KEYWORDS: + if keyword in q: + changed = True + + executed_queries.append(cursor._last_executed) + rowcount.append(cursor.rowcount) + + # When the module run with the single_transaction == True: + if not autocommit: + db_connection.commit() + + # Create dict with returned values: + kw = { + 'changed': changed, + 'executed_queries': executed_queries, + 'query_result': query_result, + 'rowcount': rowcount, + } + + # Exit: + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/mysql/mysql_replication.py b/plugins/modules/database/mysql/mysql_replication.py new file mode 100644 index 0000000000..a1743f907f --- /dev/null +++ b/plugins/modules/database/mysql/mysql_replication.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Balazs Pocze +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# Certain parts are taken from Mark Theunissen's mysqldb module +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: mysql_replication +short_description: Manage MySQL replication +description: +- Manages MySQL server replication, slave, master status, get and change master host. +author: +- Balazs Pocze (@banyek) +- Andrew Klychkov (@Andersson007) +options: + mode: + description: + - Module operating mode. Could be + C(changemaster) (CHANGE MASTER TO), + C(getmaster) (SHOW MASTER STATUS), + C(getslave) (SHOW SLAVE STATUS), + C(startslave) (START SLAVE), + C(stopslave) (STOP SLAVE), + C(resetmaster) (RESET MASTER) - supported from Ansible 2.10, + C(resetslave) (RESET SLAVE), + C(resetslaveall) (RESET SLAVE ALL). + type: str + choices: + - changemaster + - getmaster + - getslave + - startslave + - stopslave + - resetmaster + - resetslave + - resetslaveall + default: getslave + master_host: + description: + - Same as mysql variable. + type: str + master_user: + description: + - Same as mysql variable. + type: str + master_password: + description: + - Same as mysql variable. + type: str + master_port: + description: + - Same as mysql variable. + type: int + master_connect_retry: + description: + - Same as mysql variable. + type: int + master_log_file: + description: + - Same as mysql variable. + type: str + master_log_pos: + description: + - Same as mysql variable. + type: int + relay_log_file: + description: + - Same as mysql variable. + type: str + relay_log_pos: + description: + - Same as mysql variable. + type: int + master_ssl: + description: + - Same as mysql variable. + type: bool + master_ssl_ca: + description: + - Same as mysql variable. + type: str + master_ssl_capath: + description: + - Same as mysql variable. + type: str + master_ssl_cert: + description: + - Same as mysql variable. + type: str + master_ssl_key: + description: + - Same as mysql variable. + type: str + master_ssl_cipher: + description: + - Same as mysql variable. + type: str + master_auto_position: + description: + - Whether the host uses GTID based replication or not. + type: bool + master_use_gtid: + description: + - Configures the slave to use the MariaDB Global Transaction ID. + - C(disabled) equals MASTER_USE_GTID=no command. + - To find information about available values see + U(https://mariadb.com/kb/en/library/change-master-to/#master_use_gtid). + - Available since MariaDB 10.0.2. + choices: [current_pos, slave_pos, disabled] + type: str + master_delay: + description: + - Time lag behind the master's state (in seconds). + - Available from MySQL 5.6. + - For more information see U(https://dev.mysql.com/doc/refman/8.0/en/replication-delayed.html). + type: int + connection_name: + description: + - Name of the master connection. + - Supported from MariaDB 10.0.1. + - Mutually exclusive with I(channel). + - For more information see U(https://mariadb.com/kb/en/library/multi-source-replication/). + type: str + channel: + description: + - Name of replication channel. + - Multi-source replication is supported from MySQL 5.7. + - Mutually exclusive with I(connection_name). + - For more information see U(https://dev.mysql.com/doc/refman/8.0/en/replication-multi-source.html). + type: str + fail_on_error: + description: + - Fails on error when calling mysql. + type: bool + default: False + +notes: +- If an empty value for the parameter of string type is needed, use an empty string. + +extends_documentation_fragment: +- community.general.mysql + + +seealso: +- module: mysql_info +- name: MySQL replication reference + description: Complete reference of the MySQL replication documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/replication.html +- name: MariaDB replication reference + description: Complete reference of the MariaDB replication documentation. + link: https://mariadb.com/kb/en/library/setting-up-replication/ +''' + +EXAMPLES = r''' +- name: Stop mysql slave thread + mysql_replication: + mode: stopslave + +- name: Get master binlog file name and binlog position + mysql_replication: + mode: getmaster + +- name: Change master to master server 192.0.2.1 and use binary log 'mysql-bin.000009' with position 4578 + mysql_replication: + mode: changemaster + master_host: 192.0.2.1 + master_log_file: mysql-bin.000009 + master_log_pos: 4578 + +- name: Check slave status using port 3308 + mysql_replication: + mode: getslave + login_host: ansible.example.com + login_port: 3308 + +- name: On MariaDB change master to use GTID current_pos + mysql_replication: + mode: changemaster + master_use_gtid: current_pos + +- name: Change master to use replication delay 3600 seconds + mysql_replication: + mode: changemaster + master_host: 192.0.2.1 + master_delay: 3600 + +- name: Start MariaDB standby with connection name master-1 + mysql_replication: + mode: startslave + connection_name: master-1 + +- name: Stop replication in channel master-1 + mysql_replication: + mode: stopslave + channel: master-1 + +- name: > + Run RESET MASTER command which will delete all existing binary log files + and reset the binary log index file on the master + mysql_replication: + mode: resetmaster + +- name: Run start slave and fail the task on errors + mysql_replication: + mode: startslave + connection_name: master-1 + fail_on_error: yes + +- name: Change master and fail on error (like when slave thread is running) + mysql_replication: + mode: changemaster + fail_on_error: yes + +''' + +RETURN = r''' +queries: + description: List of executed queries which modified DB's state. + returned: always + type: list + sample: ["CHANGE MASTER TO MASTER_HOST='master2.example.com',MASTER_PORT=3306"] + version_added: '2.10' +''' + +import os +import warnings + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +executed_queries = [] + + +def get_master_status(cursor): + cursor.execute("SHOW MASTER STATUS") + masterstatus = cursor.fetchone() + return masterstatus + + +def get_slave_status(cursor, connection_name='', channel=''): + if connection_name: + query = "SHOW SLAVE '%s' STATUS" % connection_name + else: + query = "SHOW SLAVE STATUS" + + if channel: + query += " FOR CHANNEL '%s'" % channel + + cursor.execute(query) + slavestatus = cursor.fetchone() + return slavestatus + + +def stop_slave(module, cursor, connection_name='', channel='', fail_on_error=False): + if connection_name: + query = "STOP SLAVE '%s'" % connection_name + else: + query = 'STOP SLAVE' + + if channel: + query += " FOR CHANNEL '%s'" % channel + + try: + executed_queries.append(query) + cursor.execute(query) + stopped = True + except mysql_driver.Warning as e: + stopped = False + except Exception as e: + if fail_on_error: + module.fail_json(msg="STOP SLAVE failed: %s" % to_native(e)) + stopped = False + return stopped + + +def reset_slave(module, cursor, connection_name='', channel='', fail_on_error=False): + if connection_name: + query = "RESET SLAVE '%s'" % connection_name + else: + query = 'RESET SLAVE' + + if channel: + query += " FOR CHANNEL '%s'" % channel + + try: + executed_queries.append(query) + cursor.execute(query) + reset = True + except mysql_driver.Warning as e: + reset = False + except Exception as e: + if fail_on_error: + module.fail_json(msg="RESET SLAVE failed: %s" % to_native(e)) + reset = False + return reset + + +def reset_slave_all(module, cursor, connection_name='', channel='', fail_on_error=False): + if connection_name: + query = "RESET SLAVE '%s' ALL" % connection_name + else: + query = 'RESET SLAVE ALL' + + if channel: + query += " FOR CHANNEL '%s'" % channel + + try: + executed_queries.append(query) + cursor.execute(query) + reset = True + except mysql_driver.Warning as e: + reset = False + except Exception as e: + if fail_on_error: + module.fail_json(msg="RESET SLAVE ALL failed: %s" % to_native(e)) + reset = False + return reset + + +def reset_master(module, cursor, fail_on_error=False): + query = 'RESET MASTER' + try: + executed_queries.append(query) + cursor.execute(query) + reset = True + except mysql_driver.Warning as e: + reset = False + except Exception as e: + if fail_on_error: + module.fail_json(msg="RESET MASTER failed: %s" % to_native(e)) + reset = False + return reset + + +def start_slave(module, cursor, connection_name='', channel='', fail_on_error=False): + if connection_name: + query = "START SLAVE '%s'" % connection_name + else: + query = 'START SLAVE' + + if channel: + query += " FOR CHANNEL '%s'" % channel + + try: + executed_queries.append(query) + cursor.execute(query) + started = True + except mysql_driver.Warning as e: + started = False + except Exception as e: + if fail_on_error: + module.fail_json(msg="START SLAVE failed: %s" % to_native(e)) + started = False + return started + + +def changemaster(cursor, chm, connection_name='', channel=''): + if connection_name: + query = "CHANGE MASTER '%s' TO %s" % (connection_name, ','.join(chm)) + else: + query = 'CHANGE MASTER TO %s' % ','.join(chm) + + if channel: + query += " FOR CHANNEL '%s'" % channel + + executed_queries.append(query) + cursor.execute(query) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(type='str'), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + mode=dict(type='str', default='getslave', choices=[ + 'getmaster', 'getslave', 'changemaster', 'stopslave', + 'startslave', 'resetmaster', 'resetslave', 'resetslaveall']), + master_auto_position=dict(type='bool', default=False), + master_host=dict(type='str'), + master_user=dict(type='str'), + master_password=dict(type='str', no_log=True), + master_port=dict(type='int'), + master_connect_retry=dict(type='int'), + master_log_file=dict(type='str'), + master_log_pos=dict(type='int'), + relay_log_file=dict(type='str'), + relay_log_pos=dict(type='int'), + master_ssl=dict(type='bool', default=False), + master_ssl_ca=dict(type='str'), + master_ssl_capath=dict(type='str'), + master_ssl_cert=dict(type='str'), + master_ssl_key=dict(type='str'), + master_ssl_cipher=dict(type='str'), + connect_timeout=dict(type='int', default=30), + config_file=dict(type='path', default='~/.my.cnf'), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + master_use_gtid=dict(type='str', choices=['current_pos', 'slave_pos', 'disabled']), + master_delay=dict(type='int'), + connection_name=dict(type='str'), + channel=dict(type='str'), + fail_on_error=dict(type='bool', default=False), + ), + mutually_exclusive=[ + ['connection_name', 'channel'] + ], + ) + mode = module.params["mode"] + master_host = module.params["master_host"] + master_user = module.params["master_user"] + master_password = module.params["master_password"] + master_port = module.params["master_port"] + master_connect_retry = module.params["master_connect_retry"] + master_log_file = module.params["master_log_file"] + master_log_pos = module.params["master_log_pos"] + relay_log_file = module.params["relay_log_file"] + relay_log_pos = module.params["relay_log_pos"] + master_ssl = module.params["master_ssl"] + master_ssl_ca = module.params["master_ssl_ca"] + master_ssl_capath = module.params["master_ssl_capath"] + master_ssl_cert = module.params["master_ssl_cert"] + master_ssl_key = module.params["master_ssl_key"] + master_ssl_cipher = module.params["master_ssl_cipher"] + master_auto_position = module.params["master_auto_position"] + ssl_cert = module.params["client_cert"] + ssl_key = module.params["client_key"] + ssl_ca = module.params["ca_cert"] + connect_timeout = module.params['connect_timeout'] + config_file = module.params['config_file'] + master_delay = module.params['master_delay'] + if module.params.get("master_use_gtid") == 'disabled': + master_use_gtid = 'no' + else: + master_use_gtid = module.params["master_use_gtid"] + connection_name = module.params["connection_name"] + channel = module.params['channel'] + fail_on_error = module.params['fail_on_error'] + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + else: + warnings.filterwarnings('error', category=mysql_driver.Warning) + + login_password = module.params["login_password"] + login_user = module.params["login_user"] + + try: + cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, + ssl_cert, ssl_key, ssl_ca, None, cursor_class='DictCursor', + connect_timeout=connect_timeout) + except Exception as e: + if os.path.exists(config_file): + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. " + "Exception message: %s" % (config_file, to_native(e))) + else: + module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e))) + + if mode in "getmaster": + status = get_master_status(cursor) + if not isinstance(status, dict): + status = dict(Is_Master=False, msg="Server is not configured as mysql master") + else: + status['Is_Master'] = True + module.exit_json(queries=executed_queries, **status) + + elif mode in "getslave": + status = get_slave_status(cursor, connection_name, channel) + if not isinstance(status, dict): + status = dict(Is_Slave=False, msg="Server is not configured as mysql slave") + else: + status['Is_Slave'] = True + module.exit_json(queries=executed_queries, **status) + + elif mode in "changemaster": + chm = [] + result = {} + if master_host is not None: + chm.append("MASTER_HOST='%s'" % master_host) + if master_user is not None: + chm.append("MASTER_USER='%s'" % master_user) + if master_password is not None: + chm.append("MASTER_PASSWORD='%s'" % master_password) + if master_port is not None: + chm.append("MASTER_PORT=%s" % master_port) + if master_connect_retry is not None: + chm.append("MASTER_CONNECT_RETRY=%s" % master_connect_retry) + if master_log_file is not None: + chm.append("MASTER_LOG_FILE='%s'" % master_log_file) + if master_log_pos is not None: + chm.append("MASTER_LOG_POS=%s" % master_log_pos) + if master_delay is not None: + chm.append("MASTER_DELAY=%s" % master_delay) + if relay_log_file is not None: + chm.append("RELAY_LOG_FILE='%s'" % relay_log_file) + if relay_log_pos is not None: + chm.append("RELAY_LOG_POS=%s" % relay_log_pos) + if master_ssl: + chm.append("MASTER_SSL=1") + if master_ssl_ca is not None: + chm.append("MASTER_SSL_CA='%s'" % master_ssl_ca) + if master_ssl_capath is not None: + chm.append("MASTER_SSL_CAPATH='%s'" % master_ssl_capath) + if master_ssl_cert is not None: + chm.append("MASTER_SSL_CERT='%s'" % master_ssl_cert) + if master_ssl_key is not None: + chm.append("MASTER_SSL_KEY='%s'" % master_ssl_key) + if master_ssl_cipher is not None: + chm.append("MASTER_SSL_CIPHER='%s'" % master_ssl_cipher) + if master_auto_position: + chm.append("MASTER_AUTO_POSITION=1") + if master_use_gtid is not None: + chm.append("MASTER_USE_GTID=%s" % master_use_gtid) + try: + changemaster(cursor, chm, connection_name, channel) + except mysql_driver.Warning as e: + result['warning'] = to_native(e) + except Exception as e: + module.fail_json(msg='%s. Query == CHANGE MASTER TO %s' % (to_native(e), chm)) + result['changed'] = True + module.exit_json(queries=executed_queries, **result) + elif mode in "startslave": + started = start_slave(module, cursor, connection_name, channel, fail_on_error) + if started is True: + module.exit_json(msg="Slave started ", changed=True, queries=executed_queries) + else: + module.exit_json(msg="Slave already started (Or cannot be started)", changed=False, queries=executed_queries) + elif mode in "stopslave": + stopped = stop_slave(module, cursor, connection_name, channel, fail_on_error) + if stopped is True: + module.exit_json(msg="Slave stopped", changed=True, queries=executed_queries) + else: + module.exit_json(msg="Slave already stopped", changed=False, queries=executed_queries) + elif mode in "resetmaster": + reset = reset_master(module, cursor, fail_on_error) + if reset is True: + module.exit_json(msg="Master reset", changed=True, queries=executed_queries) + else: + module.exit_json(msg="Master already reset", changed=False, queries=executed_queries) + elif mode in "resetslave": + reset = reset_slave(module, cursor, connection_name, channel, fail_on_error) + if reset is True: + module.exit_json(msg="Slave reset", changed=True, queries=executed_queries) + else: + module.exit_json(msg="Slave already reset", changed=False, queries=executed_queries) + elif mode in "resetslaveall": + reset = reset_slave_all(module, cursor, connection_name, channel, fail_on_error) + if reset is True: + module.exit_json(msg="Slave reset", changed=True, queries=executed_queries) + else: + module.exit_json(msg="Slave already reset", changed=False, queries=executed_queries) + + warnings.simplefilter("ignore") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/mysql/mysql_user.py b/plugins/modules/database/mysql/mysql_user.py new file mode 100644 index 0000000000..767bbd9f81 --- /dev/null +++ b/plugins/modules/database/mysql/mysql_user.py @@ -0,0 +1,807 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Mark Theunissen +# Sponsored by Four Kitchens http://fourkitchens.com. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: mysql_user +short_description: Adds or removes a user from a MySQL database +description: + - Adds or removes a user from a MySQL database. +options: + name: + description: + - Name of the user (role) to add or remove. + type: str + required: true + password: + description: + - Set the user's password.. + type: str + encrypted: + description: + - Indicate that the 'password' field is a `mysql_native_password` hash. + type: bool + default: no + host: + description: + - The 'host' part of the MySQL username. + type: str + default: localhost + host_all: + description: + - Override the host option, making ansible apply changes to all hostnames for a given user. + - This option cannot be used when creating users. + type: bool + default: no + priv: + description: + - "MySQL privileges string in the format: C(db.table:priv1,priv2)." + - "Multiple privileges can be specified by separating each one using + a forward slash: C(db.table:priv/db.table:priv)." + - The format is based on MySQL C(GRANT) statement. + - Database and table names can be quoted, MySQL-style. + - If column privileges are used, the C(priv1,priv2) part must be + exactly as returned by a C(SHOW GRANT) statement. If not followed, + the module will always report changes. It includes grouping columns + by permission (C(SELECT(col1,col2)) instead of C(SELECT(col1),SELECT(col2))). + - Can be passed as a dictionary (see the examples). + type: raw + append_privs: + description: + - Append the privileges defined by priv to the existing ones for this + user instead of overwriting existing ones. + type: bool + default: no + sql_log_bin: + description: + - Whether binary logging should be enabled or disabled for the connection. + type: bool + default: yes + state: + description: + - Whether the user should exist. + - When C(absent), removes the user. + type: str + choices: [ absent, present ] + default: present + check_implicit_admin: + description: + - Check if mysql allows login as root/nopassword before trying supplied credentials. + type: bool + default: no + update_password: + description: + - C(always) will update passwords if they differ. + - C(on_create) will only set the password for newly created users. + type: str + choices: [ always, on_create ] + default: always + plugin: + description: + - User's plugin to authenticate (``CREATE USER user IDENTIFIED WITH plugin``). + type: str + plugin_hash_string: + description: + - User's plugin hash string (``CREATE USER user IDENTIFIED WITH plugin AS plugin_hash_string``). + type: str + plugin_auth_string: + description: + - User's plugin auth_string (``CREATE USER user IDENTIFIED WITH plugin BY plugin_auth_string``). + type: str + +notes: + - "MySQL server installs with default login_user of 'root' and no password. To secure this user + as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password, + without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing + the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from + the file." + - Currently, there is only support for the `mysql_native_password` encrypted password hash module. + +seealso: +- module: mysql_info +- name: MySQL access control and account management reference + description: Complete reference of the MySQL access control and account management documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/access-control.html +- name: MySQL provided privileges reference + description: Complete reference of the MySQL provided privileges documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html + +author: +- Jonathan Mainguy (@Jmainguy) +- Benjamin Malynovytch (@bmalynovytch) +- Lukasz Tomaszkiewicz (@tomaszkiewicz) +extends_documentation_fragment: +- community.general.mysql + +''' + +EXAMPLES = r''' +- name: Removes anonymous user account for localhost + mysql_user: + name: '' + host: localhost + state: absent + +- name: Removes all anonymous user accounts + mysql_user: + name: '' + host_all: yes + state: absent + +- name: Create database user with name 'bob' and password '12345' with all database privileges + mysql_user: + name: bob + password: 12345 + priv: '*.*:ALL' + state: present + +- name: Create database user using hashed password with all database privileges + mysql_user: + name: bob + password: '*EE0D72C1085C46C5278932678FBE2C6A782821B4' + encrypted: yes + priv: '*.*:ALL' + state: present + +- name: Create database user with password and all database privileges and 'WITH GRANT OPTION' + mysql_user: + name: bob + password: 12345 + priv: '*.*:ALL,GRANT' + state: present + +- name: Create user with password, all database privileges and 'WITH GRANT OPTION' in db1 and db2 + mysql_user: + state: present + name: bob + password: 12345dd + priv: + 'db1.*': 'ALL,GRANT' + 'db2.*': 'ALL,GRANT' + +# Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. +- name: Modify user to require SSL connections. + mysql_user: + name: bob + append_privs: yes + priv: '*.*:REQUIRESSL' + state: present + +- name: Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials. + mysql_user: + login_user: root + login_password: 123456 + name: sally + state: absent + +- name: Ensure no user named 'sally' exists at all + mysql_user: + name: sally + host_all: yes + state: absent + +- name: Specify grants composed of more than one word + mysql_user: + name: replication + password: 12345 + priv: "*.*:REPLICATION CLIENT" + state: present + +- name: Revoke all privileges for user 'bob' and password '12345' + mysql_user: + name: bob + password: 12345 + priv: "*.*:USAGE" + state: present + +# Example privileges string format +# mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL + +- name: Example using login_unix_socket to connect to server + mysql_user: + name: root + password: abc123 + login_unix_socket: /var/run/mysqld/mysqld.sock + +- name: Example of skipping binary logging while adding user 'bob' + mysql_user: + name: bob + password: 12345 + priv: "*.*:USAGE" + state: present + sql_log_bin: no + +- name: Create user 'bob' authenticated with plugin 'AWSAuthenticationPlugin' + mysql_user: + name: bob + plugin: AWSAuthenticationPlugin + plugin_hash_string: RDS + priv: '*.*:ALL' + state: present + +# Example .my.cnf file for setting the root password +# [client] +# user=root +# password=n<_665{vS43y +''' + +import re +import string + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import SQLParseError +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + + +VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', + 'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER', + 'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE', + 'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW', + 'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE', + 'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER', + 'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT', + 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN', + 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL', + 'CREATE ROLE', 'DROP ROLE', 'APPLICATION_PASSWORD_ADMIN', + 'AUDIT_ADMIN', 'BACKUP_ADMIN', 'BINLOG_ADMIN', + 'BINLOG_ENCRYPTION_ADMIN', 'CLONE_ADMIN', 'CONNECTION_ADMIN', + 'ENCRYPTION_KEY_ADMIN', 'FIREWALL_ADMIN', 'FIREWALL_USER', + 'GROUP_REPLICATION_ADMIN', 'INNODB_REDO_LOG_ARCHIVE', + 'NDB_STORED_USER', 'PERSIST_RO_VARIABLES_ADMIN', + 'REPLICATION_APPLIER', 'REPLICATION_SLAVE_ADMIN', + 'RESOURCE_GROUP_ADMIN', 'RESOURCE_GROUP_USER', + 'ROLE_ADMIN', 'SESSION_VARIABLES_ADMIN', 'SET_USER_ID', + 'SYSTEM_USER', 'SYSTEM_VARIABLES_ADMIN', 'SYSTEM_USER', + 'TABLE_ENCRYPTION_ADMIN', 'VERSION_TOKEN_ADMIN', + 'XA_RECOVER_ADMIN', 'LOAD FROM S3', 'SELECT INTO S3')) + + +class InvalidPrivsError(Exception): + pass + +# =========================================== +# MySQL module specific support methods. +# + + +# User Authentication Management changed in MySQL 5.7 and MariaDB 10.2.0 +def use_old_user_mgmt(cursor): + cursor.execute("SELECT VERSION()") + result = cursor.fetchone() + version_str = result[0] + version = version_str.split('.') + + if 'mariadb' in version_str.lower(): + # Prior to MariaDB 10.2 + if int(version[0]) * 1000 + int(version[1]) < 10002: + return True + else: + return False + else: + # Prior to MySQL 5.7 + if int(version[0]) * 1000 + int(version[1]) < 5007: + return True + else: + return False + + +def get_mode(cursor): + cursor.execute('SELECT @@GLOBAL.sql_mode') + result = cursor.fetchone() + mode_str = result[0] + if 'ANSI' in mode_str: + mode = 'ANSI' + else: + mode = 'NOTANSI' + return mode + + +def user_exists(cursor, user, host, host_all): + if host_all: + cursor.execute("SELECT count(*) FROM mysql.user WHERE user = %s", ([user])) + else: + cursor.execute("SELECT count(*) FROM mysql.user WHERE user = %s AND host = %s", (user, host)) + + count = cursor.fetchone() + return count[0] > 0 + + +def user_add(cursor, user, host, host_all, password, encrypted, + plugin, plugin_hash_string, plugin_auth_string, new_priv, check_mode): + # we cannot create users without a proper hostname + if host_all: + return False + + if check_mode: + return True + + if password and encrypted: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user, host, password)) + elif password and not encrypted: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user, host, password)) + elif plugin and plugin_hash_string: + cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s AS %s", (user, host, plugin, plugin_hash_string)) + elif plugin and plugin_auth_string: + cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s BY %s", (user, host, plugin, plugin_auth_string)) + elif plugin: + cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s", (user, host, plugin)) + else: + cursor.execute("CREATE USER %s@%s", (user, host)) + if new_priv is not None: + for db_table, priv in iteritems(new_priv): + privileges_grant(cursor, user, host, db_table, priv) + return True + + +def is_hash(password): + ishash = False + if len(password) == 41 and password[0] == '*': + if frozenset(password[1:]).issubset(string.hexdigits): + ishash = True + return ishash + + +def user_mod(cursor, user, host, host_all, password, encrypted, + plugin, plugin_hash_string, plugin_auth_string, new_priv, append_privs, module): + changed = False + msg = "User unchanged" + grant_option = False + + if host_all: + hostnames = user_get_hostnames(cursor, [user]) + else: + hostnames = [host] + + for host in hostnames: + # Handle clear text and hashed passwords. + if bool(password): + # Determine what user management method server uses + old_user_mgmt = use_old_user_mgmt(cursor) + + # Get a list of valid columns in mysql.user table to check if Password and/or authentication_string exist + cursor.execute(""" + SELECT COLUMN_NAME FROM information_schema.COLUMNS + WHERE TABLE_SCHEMA = 'mysql' AND TABLE_NAME = 'user' AND COLUMN_NAME IN ('Password', 'authentication_string') + ORDER BY COLUMN_NAME DESC LIMIT 1 + """) + colA = cursor.fetchone() + + cursor.execute(""" + SELECT COLUMN_NAME FROM information_schema.COLUMNS + WHERE TABLE_SCHEMA = 'mysql' AND TABLE_NAME = 'user' AND COLUMN_NAME IN ('Password', 'authentication_string') + ORDER BY COLUMN_NAME ASC LIMIT 1 + """) + colB = cursor.fetchone() + + # Select hash from either Password or authentication_string, depending which one exists and/or is filled + cursor.execute(""" + SELECT COALESCE( + CASE WHEN %s = '' THEN NULL ELSE %s END, + CASE WHEN %s = '' THEN NULL ELSE %s END + ) + FROM mysql.user WHERE user = %%s AND host = %%s + """ % (colA[0], colA[0], colB[0], colB[0]), (user, host)) + current_pass_hash = cursor.fetchone()[0] + if isinstance(current_pass_hash, bytes): + current_pass_hash = current_pass_hash.decode('ascii') + + if encrypted: + encrypted_password = password + if not is_hash(encrypted_password): + module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))") + else: + if old_user_mgmt: + cursor.execute("SELECT PASSWORD(%s)", (password,)) + else: + cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,)) + encrypted_password = cursor.fetchone()[0] + + if current_pass_hash != encrypted_password: + msg = "Password updated" + if module.check_mode: + return (True, msg) + if old_user_mgmt: + cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, encrypted_password)) + msg = "Password updated (old style)" + else: + try: + cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, encrypted_password)) + msg = "Password updated (new style)" + except (mysql_driver.Error) as e: + # https://stackoverflow.com/questions/51600000/authentication-string-of-root-user-on-mysql + # Replacing empty root password with new authentication mechanisms fails with error 1396 + if e.args[0] == 1396: + cursor.execute( + "UPDATE user SET plugin = %s, authentication_string = %s, Password = '' WHERE User = %s AND Host = %s", + ('mysql_native_password', encrypted_password, user, host) + ) + cursor.execute("FLUSH PRIVILEGES") + msg = "Password forced update" + else: + raise e + changed = True + + # Handle plugin authentication + if plugin: + cursor.execute("SELECT plugin, authentication_string FROM mysql.user " + "WHERE user = %s AND host = %s", (user, host)) + current_plugin = cursor.fetchone() + + update = False + + if current_plugin[0] != plugin: + update = True + + if plugin_hash_string and current_plugin[1] != plugin_hash_string: + update = True + + if plugin_auth_string and current_plugin[1] != plugin_auth_string: + # this case can cause more updates than expected, + # as plugin can hash auth_string in any way it wants + # and there's no way to figure it out for + # a check, so I prefer to update more often than never + update = True + + if update: + if plugin_hash_string: + cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s AS %s", (user, host, plugin, plugin_hash_string)) + elif plugin_auth_string: + cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s BY %s", (user, host, plugin, plugin_auth_string)) + else: + cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s", (user, host, plugin)) + changed = True + + # Handle privileges + if new_priv is not None: + curr_priv = privileges_get(cursor, user, host) + + # If the user has privileges on a db.table that doesn't appear at all in + # the new specification, then revoke all privileges on it. + for db_table, priv in iteritems(curr_priv): + # If the user has the GRANT OPTION on a db.table, revoke it first. + if "GRANT" in priv: + grant_option = True + if db_table not in new_priv: + if user != "root" and "PROXY" not in priv and not append_privs: + msg = "Privileges updated" + if module.check_mode: + return (True, msg) + privileges_revoke(cursor, user, host, db_table, priv, grant_option) + changed = True + + # If the user doesn't currently have any privileges on a db.table, then + # we can perform a straight grant operation. + for db_table, priv in iteritems(new_priv): + if db_table not in curr_priv: + msg = "New privileges granted" + if module.check_mode: + return (True, msg) + privileges_grant(cursor, user, host, db_table, priv) + changed = True + + # If the db.table specification exists in both the user's current privileges + # and in the new privileges, then we need to see if there's a difference. + db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys()) + for db_table in db_table_intersect: + priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) + if len(priv_diff) > 0: + msg = "Privileges updated" + if module.check_mode: + return (True, msg) + if not append_privs: + privileges_revoke(cursor, user, host, db_table, curr_priv[db_table], grant_option) + privileges_grant(cursor, user, host, db_table, new_priv[db_table]) + changed = True + + return (changed, msg) + + +def user_delete(cursor, user, host, host_all, check_mode): + if check_mode: + return True + + if host_all: + hostnames = user_get_hostnames(cursor, [user]) + + for hostname in hostnames: + cursor.execute("DROP USER %s@%s", (user, hostname)) + else: + cursor.execute("DROP USER %s@%s", (user, host)) + + return True + + +def user_get_hostnames(cursor, user): + cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user) + hostnames_raw = cursor.fetchall() + hostnames = [] + + for hostname_raw in hostnames_raw: + hostnames.append(hostname_raw[0]) + + return hostnames + + +def privileges_get(cursor, user, host): + """ MySQL doesn't have a better method of getting privileges aside from the + SHOW GRANTS query syntax, which requires us to then parse the returned string. + Here's an example of the string that is returned from MySQL: + + GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass'; + + This function makes the query and returns a dictionary containing the results. + The dictionary format is the same as that returned by privileges_unpack() below. + """ + output = {} + cursor.execute("SHOW GRANTS FOR %s@%s", (user, host)) + grants = cursor.fetchall() + + def pick(x): + if x == 'ALL PRIVILEGES': + return 'ALL' + else: + return x + + for grant in grants: + res = re.match("""GRANT (.+) ON (.+) TO (['`"]).*\\3@(['`"]).*\\4( IDENTIFIED BY PASSWORD (['`"]).+\\6)? ?(.*)""", grant[0]) + if res is None: + raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0]) + privileges = res.group(1).split(", ") + privileges = [pick(x) for x in privileges] + if "WITH GRANT OPTION" in res.group(7): + privileges.append('GRANT') + if "REQUIRE SSL" in res.group(7): + privileges.append('REQUIRESSL') + db = res.group(2) + output[db] = privileges + return output + + +def privileges_unpack(priv, mode): + """ Take a privileges string, typically passed as a parameter, and unserialize + it into a dictionary, the same format as privileges_get() above. We have this + custom format to avoid using YAML/JSON strings inside YAML playbooks. Example + of a privileges string: + + mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL + + The privilege USAGE stands for no privileges, so we add that in on *.* if it's + not specified in the string, as MySQL will always provide this by default. + """ + if mode == 'ANSI': + quote = '"' + else: + quote = '`' + output = {} + privs = [] + for item in priv.strip().split('/'): + pieces = item.strip().rsplit(':', 1) + dbpriv = pieces[0].rsplit(".", 1) + + # Check for FUNCTION or PROCEDURE object types + parts = dbpriv[0].split(" ", 1) + object_type = '' + if len(parts) > 1 and (parts[0] == 'FUNCTION' or parts[0] == 'PROCEDURE'): + object_type = parts[0] + ' ' + dbpriv[0] = parts[1] + + # Do not escape if privilege is for database or table, i.e. + # neither quote *. nor .* + for i, side in enumerate(dbpriv): + if side.strip('`') != '*': + dbpriv[i] = '%s%s%s' % (quote, side.strip('`'), quote) + pieces[0] = object_type + '.'.join(dbpriv) + + if '(' in pieces[1]: + output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) + for i in output[pieces[0]]: + privs.append(re.sub(r'\s*\(.*\)', '', i)) + else: + output[pieces[0]] = pieces[1].upper().split(',') + privs = output[pieces[0]] + new_privs = frozenset(privs) + if not new_privs.issubset(VALID_PRIVS): + raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) + + if '*.*' not in output: + output['*.*'] = ['USAGE'] + + # if we are only specifying something like REQUIRESSL and/or GRANT (=WITH GRANT OPTION) in *.* + # we still need to add USAGE as a privilege to avoid syntax errors + if 'REQUIRESSL' in priv and not set(output['*.*']).difference(set(['GRANT', 'REQUIRESSL'])): + output['*.*'].append('USAGE') + + return output + + +def privileges_revoke(cursor, user, host, db_table, priv, grant_option): + # Escape '%' since mysql db.execute() uses a format string + db_table = db_table.replace('%', '%%') + if grant_option: + query = ["REVOKE GRANT OPTION ON %s" % db_table] + query.append("FROM %s@%s") + query = ' '.join(query) + cursor.execute(query, (user, host)) + priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')]) + query = ["REVOKE %s ON %s" % (priv_string, db_table)] + query.append("FROM %s@%s") + query = ' '.join(query) + cursor.execute(query, (user, host)) + + +def privileges_grant(cursor, user, host, db_table, priv): + # Escape '%' since mysql db.execute uses a format string and the + # specification of db and table often use a % (SQL wildcard) + db_table = db_table.replace('%', '%%') + priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')]) + query = ["GRANT %s ON %s" % (priv_string, db_table)] + query.append("TO %s@%s") + if 'REQUIRESSL' in priv: + query.append("REQUIRE SSL") + if 'GRANT' in priv: + query.append("WITH GRANT OPTION") + query = ' '.join(query) + cursor.execute(query, (user, host)) + + +def convert_priv_dict_to_str(priv): + """Converts privs dictionary to string of certain format. + + Args: + priv (dict): Dict of privileges that needs to be converted to string. + + Returns: + priv (str): String representation of input argument. + """ + priv_list = ['%s:%s' % (key, val) for key, val in iteritems(priv)] + + return '/'.join(priv_list) + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(type='str'), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + user=dict(type='str', required=True, aliases=['name']), + password=dict(type='str', no_log=True), + encrypted=dict(type='bool', default=False), + host=dict(type='str', default='localhost'), + host_all=dict(type="bool", default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + priv=dict(type='raw'), + append_privs=dict(type='bool', default=False), + check_implicit_admin=dict(type='bool', default=False), + update_password=dict(type='str', default='always', choices=['always', 'on_create']), + connect_timeout=dict(type='int', default=30), + config_file=dict(type='path', default='~/.my.cnf'), + sql_log_bin=dict(type='bool', default=True), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + plugin=dict(default=None, type='str'), + plugin_hash_string=dict(default=None, type='str'), + plugin_auth_string=dict(default=None, type='str'), + ), + supports_check_mode=True, + ) + login_user = module.params["login_user"] + login_password = module.params["login_password"] + user = module.params["user"] + password = module.params["password"] + encrypted = module.boolean(module.params["encrypted"]) + host = module.params["host"].lower() + host_all = module.params["host_all"] + state = module.params["state"] + priv = module.params["priv"] + check_implicit_admin = module.params['check_implicit_admin'] + connect_timeout = module.params['connect_timeout'] + config_file = module.params['config_file'] + append_privs = module.boolean(module.params["append_privs"]) + update_password = module.params['update_password'] + ssl_cert = module.params["client_cert"] + ssl_key = module.params["client_key"] + ssl_ca = module.params["ca_cert"] + db = '' + sql_log_bin = module.params["sql_log_bin"] + plugin = module.params["plugin"] + plugin_hash_string = module.params["plugin_hash_string"] + plugin_auth_string = module.params["plugin_auth_string"] + if priv and not (isinstance(priv, str) or isinstance(priv, dict)): + module.fail_json(msg="priv parameter must be str or dict but %s was passed" % type(priv)) + + if priv and isinstance(priv, dict): + priv = convert_priv_dict_to_str(priv) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + cursor = None + try: + if check_implicit_admin: + try: + cursor, db_conn = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db, + connect_timeout=connect_timeout) + except Exception: + pass + + if not cursor: + cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db, + connect_timeout=connect_timeout) + except Exception as e: + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. " + "Exception message: %s" % (config_file, to_native(e))) + + if not sql_log_bin: + cursor.execute("SET SQL_LOG_BIN=0;") + + if priv is not None: + try: + mode = get_mode(cursor) + except Exception as e: + module.fail_json(msg=to_native(e)) + try: + priv = privileges_unpack(priv, mode) + except Exception as e: + module.fail_json(msg="invalid privileges string: %s" % to_native(e)) + + if state == "present": + if user_exists(cursor, user, host, host_all): + try: + if update_password == 'always': + changed, msg = user_mod(cursor, user, host, host_all, password, encrypted, + plugin, plugin_hash_string, plugin_auth_string, + priv, append_privs, module) + else: + changed, msg = user_mod(cursor, user, host, host_all, None, encrypted, + plugin, plugin_hash_string, plugin_auth_string, + priv, append_privs, module) + + except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e: + module.fail_json(msg=to_native(e)) + else: + if host_all: + module.fail_json(msg="host_all parameter cannot be used when adding a user") + try: + changed = user_add(cursor, user, host, host_all, password, encrypted, + plugin, plugin_hash_string, plugin_auth_string, + priv, module.check_mode) + if changed: + msg = "User added" + + except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e: + module.fail_json(msg=to_native(e)) + elif state == "absent": + if user_exists(cursor, user, host, host_all): + changed = user_delete(cursor, user, host, host_all, module.check_mode) + msg = "User deleted" + else: + changed = False + msg = "User doesn't exist" + module.exit_json(changed=changed, user=user, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/mysql/mysql_variables.py b/plugins/modules/database/mysql/mysql_variables.py new file mode 100644 index 0000000000..3ca348a706 --- /dev/null +++ b/plugins/modules/database/mysql/mysql_variables.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Balazs Pocze +# Certain parts are taken from Mark Theunissen's mysqldb module +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: mysql_variables + +short_description: Manage MySQL global variables +description: +- Query / Set MySQL variables. +author: +- Balazs Pocze (@banyek) +options: + variable: + description: + - Variable name to operate + type: str + required: yes + value: + description: + - If set, then sets variable value to this + type: str + mode: + description: + - C(global) assigns C(value) to a global system variable which will be changed at runtime + but won't persist across server restarts. + - C(persist) assigns C(value) to a global system variable and persists it to + the mysqld-auto.cnf option file in the data directory + (the variable will survive service restarts). + - C(persist_only) persists C(value) to the mysqld-auto.cnf option file in the data directory + but without setting the global variable runtime value + (the value will be changed after the next service restart). + - Supported by MySQL 8.0 or later. + - For more information see U(https://dev.mysql.com/doc/refman/8.0/en/set-variable.html). + type: str + choices: ['global', 'persist', 'persist_only'] + default: global + +seealso: +- module: mysql_info +- name: MySQL SET command reference + description: Complete reference of the MySQL SET command documentation. + link: https://dev.mysql.com/doc/refman/8.0/en/set-statement.html + +extends_documentation_fragment: +- community.general.mysql + +''' + +EXAMPLES = r''' +- name: Check for sync_binlog setting + mysql_variables: + variable: sync_binlog + +- name: Set read_only variable to 1 persistently + mysql_variables: + variable: read_only + value: 1 + mode: persist +''' + +RETURN = r''' +queries: + description: List of executed queries which modified DB's state. + returned: if executed + type: list + sample: ["SET GLOBAL `read_only` = 1"] + version_added: '2.10' +''' + +import os +import warnings +from re import match + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import SQLParseError, mysql_quote_identifier +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +executed_queries = [] + + +def check_mysqld_auto(module, cursor, mysqlvar): + """Check variable's value in mysqld-auto.cnf.""" + query = ("SELECT VARIABLE_VALUE " + "FROM performance_schema.persisted_variables " + "WHERE VARIABLE_NAME = %s") + try: + cursor.execute(query, (mysqlvar,)) + res = cursor.fetchone() + except Exception as e: + if "Table 'performance_schema.persisted_variables' doesn't exist" in str(e): + module.fail_json(msg='Server version must be 8.0 or greater.') + + if res: + return res[0] + else: + return None + + +def typedvalue(value): + """ + Convert value to number whenever possible, return same value + otherwise. + + >>> typedvalue('3') + 3 + >>> typedvalue('3.0') + 3.0 + >>> typedvalue('foobar') + 'foobar' + + """ + try: + return int(value) + except ValueError: + pass + + try: + return float(value) + except ValueError: + pass + + return value + + +def getvariable(cursor, mysqlvar): + cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,)) + mysqlvar_val = cursor.fetchall() + if len(mysqlvar_val) == 1: + return mysqlvar_val[0][1] + else: + return None + + +def setvariable(cursor, mysqlvar, value, mode='global'): + """ Set a global mysql variable to a given value + + The DB driver will handle quoting of the given value based on its + type, thus numeric strings like '3.0' or '8' are illegal, they + should be passed as numeric literals. + + """ + if mode == 'persist': + query = "SET PERSIST %s = " % mysql_quote_identifier(mysqlvar, 'vars') + elif mode == 'global': + query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars') + elif mode == 'persist_only': + query = "SET PERSIST_ONLY %s = " % mysql_quote_identifier(mysqlvar, 'vars') + + try: + cursor.execute(query + "%s", (value,)) + executed_queries.append(query + "%s" % value) + cursor.fetchall() + result = True + except Exception as e: + result = to_native(e) + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(type='str'), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + variable=dict(type='str'), + value=dict(type='str'), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + connect_timeout=dict(type='int', default=30), + config_file=dict(type='path', default='~/.my.cnf'), + mode=dict(type='str', choices=['global', 'persist', 'persist_only'], default='global'), + ), + ) + user = module.params["login_user"] + password = module.params["login_password"] + connect_timeout = module.params['connect_timeout'] + ssl_cert = module.params["client_cert"] + ssl_key = module.params["client_key"] + ssl_ca = module.params["ca_cert"] + config_file = module.params['config_file'] + db = 'mysql' + + mysqlvar = module.params["variable"] + value = module.params["value"] + mode = module.params["mode"] + + if mysqlvar is None: + module.fail_json(msg="Cannot run without variable to operate with") + if match('^[0-9a-z_.]+$', mysqlvar) is None: + module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar) + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + else: + warnings.filterwarnings('error', category=mysql_driver.Warning) + + try: + cursor, db_conn = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db, + connect_timeout=connect_timeout) + except Exception as e: + if os.path.exists(config_file): + module.fail_json(msg=("unable to connect to database, check login_user and " + "login_password are correct or %s has the credentials. " + "Exception message: %s" % (config_file, to_native(e)))) + else: + module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e))) + + mysqlvar_val = None + var_in_mysqld_auto_cnf = None + + mysqlvar_val = getvariable(cursor, mysqlvar) + if mysqlvar_val is None: + module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False) + + if value is None: + module.exit_json(msg=mysqlvar_val) + + if mode in ('persist', 'persist_only'): + var_in_mysqld_auto_cnf = check_mysqld_auto(module, cursor, mysqlvar) + + if mode == 'persist_only': + if var_in_mysqld_auto_cnf is None: + mysqlvar_val = False + else: + mysqlvar_val = var_in_mysqld_auto_cnf + + # Type values before using them + value_wanted = typedvalue(value) + value_actual = typedvalue(mysqlvar_val) + value_in_auto_cnf = None + if var_in_mysqld_auto_cnf is not None: + value_in_auto_cnf = typedvalue(var_in_mysqld_auto_cnf) + + if value_wanted == value_actual and mode in ('global', 'persist'): + if mode == 'persist' and value_wanted == value_in_auto_cnf: + module.exit_json(msg="Variable is already set to requested value globally" + "and stored into mysqld-auto.cnf file.", changed=False) + + elif mode == 'global': + module.exit_json(msg="Variable is already set to requested value.", changed=False) + + if mode == 'persist_only' and value_wanted == value_in_auto_cnf: + module.exit_json(msg="Variable is already stored into mysqld-auto.cnf " + "with requested value.", changed=False) + + try: + result = setvariable(cursor, mysqlvar, value_wanted, mode) + except SQLParseError as e: + result = to_native(e) + + if result is True: + module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, + changed=True, queries=executed_queries) + else: + module.fail_json(msg=result, changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_copy.py b/plugins/modules/database/postgresql/postgresql_copy.py new file mode 100644 index 0000000000..8f17158796 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_copy.py @@ -0,0 +1,401 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: postgresql_copy +short_description: Copy data between a file/program and a PostgreSQL table +description: +- Copy data between a file/program and a PostgreSQL table. + +options: + copy_to: + description: + - Copy the contents of a table to a file. + - Can also copy the results of a SELECT query. + - Mutually exclusive with I(copy_from) and I(dst). + type: path + aliases: [ to ] + copy_from: + description: + - Copy data from a file to a table (appending the data to whatever is in the table already). + - Mutually exclusive with I(copy_to) and I(src). + type: path + aliases: [ from ] + src: + description: + - Copy data from I(copy_from) to I(src=tablename). + - Used with I(copy_to) only. + type: str + aliases: [ source ] + dst: + description: + - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file). + - Used with I(copy_from) only. + type: str + aliases: [ destination ] + columns: + description: + - List of column names for the src/dst table to COPY FROM/TO. + type: list + elements: str + aliases: [ column ] + program: + description: + - Mark I(src)/I(dst) as a program. Data will be copied to/from a program. + - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html). + type: bool + default: no + options: + description: + - Options of COPY command. + - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html). + type: dict + db: + description: + - Name of database to connect to. + type: str + aliases: [ login_db ] + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + +notes: +- Supports PostgreSQL version 9.4+. +- COPY command is only allowed to database superusers. +- if I(check_mode=yes), we just check the src/dst table availability + and return the COPY query that actually has not been executed. +- If i(check_mode=yes) and the source has been passed as SQL, the module + will execute it and rolled the transaction back but pay attention + it can affect database performance (e.g., if SQL collects a lot of data). + +seealso: +- name: COPY command reference + description: Complete reference of the COPY command documentation. + link: https://www.postgresql.org/docs/current/sql-copy.html + +author: +- Andrew Klychkov (@Andersson007) + +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Copy text TAB-separated data from file /tmp/data.txt to acme table + postgresql_copy: + copy_from: /tmp/data.txt + dst: acme + +- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme + postgresql_copy: + copy_from: /tmp/data.csv + dst: acme + columns: id,name + options: + format: csv + +- name: > + Copy text vertical-bar-separated data from file /tmp/data.txt to bar table. + The NULL values are specified as N + postgresql_copy: + copy_from: /tmp/data.csv + dst: bar + options: + delimiter: '|' + null: 'N' + +- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated + postgresql_copy: + src: acme + copy_to: /tmp/data.txt + +- name: Copy data from SELECT query to/tmp/data.csv in CSV format + postgresql_copy: + src: 'SELECT * FROM acme' + copy_to: /tmp/data.csv + options: + format: csv + +- name: Copy CSV data from my_table to gzip + postgresql_copy: + src: my_table + copy_to: 'gzip > /tmp/data.csv.gz' + program: yes + options: + format: csv + +- name: > + Copy data from columns id, name of table bar to /tmp/data.txt. + Output format is text, vertical-bar-separated, NULL as N + postgresql_copy: + src: bar + columns: + - id + - name + copy_to: /tmp/data.csv + options: + delimiter: '|' + null: 'N' +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ] +src: + description: Data source. + returned: always + type: str + sample: "mytable" +dst: + description: Data destination. + returned: always + type: str + sample: "/tmp/data.csv" +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + + +class PgCopyData(object): + + """Implements behavior of COPY FROM, COPY TO PostgreSQL command. + + Arguments: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + + Attributes: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + changed (bool) -- something was changed after execution or not + executed_queries (list) -- executed queries + dst (str) -- data destination table (when copy_from) + src (str) -- data source table (when copy_to) + opt_need_quotes (tuple) -- values of these options must be passed + to SQL in quotes + """ + + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.executed_queries = [] + self.changed = False + self.dst = '' + self.src = '' + self.opt_need_quotes = ( + 'DELIMITER', + 'NULL', + 'QUOTE', + 'ESCAPE', + 'ENCODING', + ) + + def copy_from(self): + """Implements COPY FROM command behavior.""" + self.src = self.module.params['copy_from'] + self.dst = self.module.params['dst'] + + query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')] + + if self.module.params.get('columns'): + query_fragments.append('(%s)' % ','.join(self.module.params['columns'])) + + query_fragments.append('FROM') + + if self.module.params.get('program'): + query_fragments.append('PROGRAM') + + query_fragments.append("'%s'" % self.src) + + if self.module.params.get('options'): + query_fragments.append(self.__transform_options()) + + # Note: check mode is implemented here: + if self.module.check_mode: + self.changed = self.__check_table(self.dst) + + if self.changed: + self.executed_queries.append(' '.join(query_fragments)) + else: + if exec_sql(self, ' '.join(query_fragments), ddl=True): + self.changed = True + + def copy_to(self): + """Implements COPY TO command behavior.""" + self.src = self.module.params['src'] + self.dst = self.module.params['copy_to'] + + if 'SELECT ' in self.src.upper(): + # If src is SQL SELECT statement: + query_fragments = ['COPY (%s)' % self.src] + else: + # If src is a table: + query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')] + + if self.module.params.get('columns'): + query_fragments.append('(%s)' % ','.join(self.module.params['columns'])) + + query_fragments.append('TO') + + if self.module.params.get('program'): + query_fragments.append('PROGRAM') + + query_fragments.append("'%s'" % self.dst) + + if self.module.params.get('options'): + query_fragments.append(self.__transform_options()) + + # Note: check mode is implemented here: + if self.module.check_mode: + self.changed = self.__check_table(self.src) + + if self.changed: + self.executed_queries.append(' '.join(query_fragments)) + else: + if exec_sql(self, ' '.join(query_fragments), ddl=True): + self.changed = True + + def __transform_options(self): + """Transform options dict into a suitable string.""" + for (key, val) in iteritems(self.module.params['options']): + if key.upper() in self.opt_need_quotes: + self.module.params['options'][key] = "'%s'" % val + + opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])] + return '(%s)' % ', '.join(opt) + + def __check_table(self, table): + """Check table or SQL in transaction mode for check_mode. + + Return True if it is OK. + + Arguments: + table (str) - Table name that needs to be checked. + It can be SQL SELECT statement that was passed + instead of the table name. + """ + if 'SELECT ' in table.upper(): + # In this case table is actually SQL SELECT statement. + # If SQL fails, it's handled by exec_sql(): + exec_sql(self, table, add_to_executed=False) + # If exec_sql was passed, it means all is OK: + return True + + exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'), + add_to_executed=False) + # If SQL was executed successfully: + return True + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + copy_to=dict(type='path', aliases=['to']), + copy_from=dict(type='path', aliases=['from']), + src=dict(type='str', aliases=['source']), + dst=dict(type='str', aliases=['destination']), + columns=dict(type='list', elements='str', aliases=['column']), + options=dict(type='dict'), + program=dict(type='bool', default=False), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['copy_from', 'copy_to'], + ['copy_from', 'src'], + ['copy_to', 'dst'], + ] + ) + + # Note: we don't need to check mutually exclusive params here, because they are + # checked automatically by AnsibleModule (mutually_exclusive=[] list above). + if module.params.get('copy_from') and not module.params.get('dst'): + module.fail_json(msg='dst param is necessary with copy_from') + + elif module.params.get('copy_to') and not module.params.get('src'): + module.fail_json(msg='src param is necessary with copy_to') + + # Connect to DB and make cursor object: + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + data = PgCopyData(module, cursor) + + # Note: parameters like dst, src, etc. are got + # from module object into data object of PgCopyData class. + # Therefore not need to pass args to the methods below. + # Note: check mode is implemented inside the methods below + # by checking passed module.check_mode arg. + if module.params.get('copy_to'): + data.copy_to() + + elif module.params.get('copy_from'): + data.copy_from() + + # Finish: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Return some values: + module.exit_json( + changed=data.changed, + queries=data.executed_queries, + src=data.src, + dst=data.dst, + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_db.py b/plugins/modules/database/postgresql/postgresql_db.py new file mode 100644 index 0000000000..552d6edeec --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_db.py @@ -0,0 +1,650 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_db +short_description: Add or remove PostgreSQL databases from a remote host. +description: + - Add or remove PostgreSQL databases from a remote host. +options: + name: + description: + - Name of the database to add or remove + type: str + required: true + aliases: [ db ] + port: + description: + - Database port to connect (if needed) + type: int + default: 5432 + aliases: + - login_port + owner: + description: + - Name of the role to set as owner of the database + type: str + template: + description: + - Template used to create the database + type: str + encoding: + description: + - Encoding of the database + type: str + lc_collate: + description: + - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template. + type: str + lc_ctype: + description: + - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) + is used as template. + type: str + session_role: + description: + - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + state: + description: + - The database state. + - C(present) implies that the database should be created if necessary. + - C(absent) implies that the database should be removed if present. + - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4) + Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module, + returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.), + so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of + pg_dump returns rc 1 in this case. + - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4) + - The format of the backup will be detected based on the target name. + - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz) + - Supported formats for dump and restore include C(.sql) and C(.tar) + type: str + choices: [ absent, dump, present, restore ] + default: present + target: + description: + - File to back up or restore from. + - Used when I(state) is C(dump) or C(restore). + type: path + target_opts: + description: + - Further arguments for pg_dump or pg_restore. + - Used when I(state) is C(dump) or C(restore). + type: str + maintenance_db: + description: + - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to. + type: str + default: postgres + conn_limit: + description: + - Specifies the database connection limit. + type: str + tablespace: + description: + - The tablespace to set for the database + U(https://www.postgresql.org/docs/current/sql-alterdatabase.html). + - If you want to move the database back to the default tablespace, + explicitly set this to pg_default. + type: path + dump_extra_args: + description: + - Provides additional arguments when I(state) is C(dump). + - Cannot be used with dump-file-format-related arguments like ``--format=d``. + type: str +seealso: +- name: CREATE DATABASE reference + description: Complete reference of the CREATE DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-createdatabase.html +- name: DROP DATABASE reference + description: Complete reference of the DROP DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-dropdatabase.html +- name: pg_dump reference + description: Complete reference of pg_dump documentation. + link: https://www.postgresql.org/docs/current/app-pgdump.html +- name: pg_restore reference + description: Complete reference of pg_restore documentation. + link: https://www.postgresql.org/docs/current/app-pgrestore.html +- module: postgresql_tablespace +- module: postgresql_info +- module: postgresql_ping +notes: +- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8. +author: "Ansible Core Team" +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create a new database with name "acme" + postgresql_db: + name: acme + +# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template. +- name: Create a new database with name "acme" and specific encoding and locale # settings. + postgresql_db: + name: acme + encoding: UTF-8 + lc_collate: de_DE.UTF-8 + lc_ctype: de_DE.UTF-8 + template: template0 + +# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited" +- name: Create a new database with name "acme" which has a limit of 100 concurrent connections + postgresql_db: + name: acme + conn_limit: "100" + +- name: Dump an existing database to a file + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + +- name: Dump an existing database to a file excluding the test table + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + dump_extra_args: --exclude-table=test + +- name: Dump an existing database to a file (with compression) + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql.gz + +- name: Dump a single schema for an existing database + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + target_opts: "-n public" + +# Note: In the example below, if database foo exists and has another tablespace +# the tablespace will be changed to foo. Access to the database will be locked +# until the copying of database files is finished. +- name: Create a new database called foo in tablespace bar + postgresql_db: + name: foo + tablespace: bar +''' + +RETURN = r''' +executed_commands: + description: List of commands which tried to run. + returned: always + type: list + sample: ["CREATE DATABASE acme"] + version_added: '2.10' +''' + + +import os +import subprocess +import traceback + +try: + import psycopg2 + import psycopg2.extras +except ImportError: + HAS_PSYCOPG2 = False +else: + HAS_PSYCOPG2 = True + +import ansible_collections.community.general.plugins.module_utils.postgres as pgutils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import SQLParseError, pg_quote_identifier +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_native + +executed_commands = [] + + +class NotSupportedError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def set_owner(cursor, db, owner): + query = 'ALTER DATABASE %s OWNER TO "%s"' % ( + pg_quote_identifier(db, 'database'), + owner) + executed_commands.append(query) + cursor.execute(query) + return True + + +def set_conn_limit(cursor, db, conn_limit): + query = "ALTER DATABASE %s CONNECTION LIMIT %s" % ( + pg_quote_identifier(db, 'database'), + conn_limit) + executed_commands.append(query) + cursor.execute(query) + return True + + +def get_encoding_id(cursor, encoding): + query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;" + cursor.execute(query, {'encoding': encoding}) + return cursor.fetchone()['encoding_id'] + + +def get_db_info(cursor, db): + query = """ + SELECT rolname AS owner, + pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, + datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit, + spcname AS tablespace + FROM pg_database + JOIN pg_roles ON pg_roles.oid = pg_database.datdba + JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace + WHERE datname = %(db)s + """ + cursor.execute(query, {'db': db}) + return cursor.fetchone() + + +def db_exists(cursor, db): + query = "SELECT * FROM pg_database WHERE datname=%(db)s" + cursor.execute(query, {'db': db}) + return cursor.rowcount == 1 + + +def db_delete(cursor, db): + if db_exists(cursor, db): + query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database') + executed_commands.append(query) + cursor.execute(query) + return True + else: + return False + + +def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace) + if not db_exists(cursor, db): + query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')] + if owner: + query_fragments.append('OWNER "%s"' % owner) + if template: + query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database')) + if encoding: + query_fragments.append('ENCODING %(enc)s') + if lc_collate: + query_fragments.append('LC_COLLATE %(collate)s') + if lc_ctype: + query_fragments.append('LC_CTYPE %(ctype)s') + if tablespace: + query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace')) + if conn_limit: + query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query = ' '.join(query_fragments) + executed_commands.append(cursor.mogrify(query, params)) + cursor.execute(query, params) + return True + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + raise NotSupportedError( + 'Changing database encoding is not supported. ' + 'Current encoding: %s' % db_info['encoding'] + ) + elif lc_collate and lc_collate != db_info['lc_collate']: + raise NotSupportedError( + 'Changing LC_COLLATE is not supported. ' + 'Current LC_COLLATE: %s' % db_info['lc_collate'] + ) + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + raise NotSupportedError( + 'Changing LC_CTYPE is not supported.' + 'Current LC_CTYPE: %s' % db_info['lc_ctype'] + ) + else: + changed = False + + if owner and owner != db_info['owner']: + changed = set_owner(cursor, db, owner) + + if conn_limit and conn_limit != str(db_info['conn_limit']): + changed = set_conn_limit(cursor, db, conn_limit) + + if tablespace and tablespace != db_info['tablespace']: + changed = set_tablespace(cursor, db, tablespace) + + return changed + + +def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + if not db_exists(cursor, db): + return False + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + return False + elif lc_collate and lc_collate != db_info['lc_collate']: + return False + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + return False + elif owner and owner != db_info['owner']: + return False + elif conn_limit and conn_limit != str(db_info['conn_limit']): + return False + elif tablespace and tablespace != db_info['tablespace']: + return False + else: + return True + + +def db_dump(module, target, target_opts="", + db=None, + dump_extra_args=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user, db_prefix=False) + cmd = module.get_bin_path('pg_dump', True) + comp_prog_path = None + + if os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=t') + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=c') + if os.path.splitext(target)[-1] == '.gz': + if module.get_bin_path('pigz'): + comp_prog_path = module.get_bin_path('pigz', True) + else: + comp_prog_path = module.get_bin_path('gzip', True) + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzip2', True) + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xz', True) + + cmd += "".join(flags) + + if dump_extra_args: + cmd += " {0} ".format(dump_extra_args) + + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + # Use a fifo to be notified of an error in pg_dump + # Using shell pipe has no way to return the code of the first command + # in a portable way. + fifo = os.path.join(module.tmpdir, 'pg_fifo') + os.mkfifo(fifo) + cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo) + else: + cmd = '{0} > {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def db_restore(module, target, target_opts="", + db=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user) + comp_prog_path = None + cmd = module.get_bin_path('psql', True) + + if os.path.splitext(target)[-1] == '.sql': + flags.append(' --file={0}'.format(target)) + + elif os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=Tar') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=Custom') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.gz': + comp_prog_path = module.get_bin_path('zcat', True) + + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzcat', True) + + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xzcat', True) + + cmd += "".join(flags) + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + env = os.environ.copy() + if password: + env = {"PGPASSWORD": password} + p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1, 'cmd: ****' + else: + return p2.returncode, '', stderr2, 'cmd: ****' + else: + cmd = '{0} < {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def login_flags(db, host, port, user, db_prefix=True): + """ + returns a list of connection argument strings each prefixed + with a space and quoted where necessary to later be combined + in a single shell string with `"".join(rv)` + + db_prefix determines if "--dbname" is prefixed to the db argument, + since the argument was introduced in 9.3. + """ + flags = [] + if db: + if db_prefix: + flags.append(' --dbname={0}'.format(shlex_quote(db))) + else: + flags.append(' {0}'.format(shlex_quote(db))) + if host: + flags.append(' --host={0}'.format(host)) + if port: + flags.append(' --port={0}'.format(port)) + if user: + flags.append(' --username={0}'.format(user)) + return flags + + +def do_with_password(module, cmd, password): + env = {} + if password: + env = {"PGPASSWORD": password} + executed_commands.append(cmd) + rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env) + return rc, stderr, stdout, cmd + + +def set_tablespace(cursor, db, tablespace): + query = "ALTER DATABASE %s SET TABLESPACE %s" % ( + pg_quote_identifier(db, 'database'), + pg_quote_identifier(tablespace, 'tablespace')) + executed_commands.append(query) + cursor.execute(query) + return True + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = pgutils.postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', required=True, aliases=['name']), + owner=dict(type='str', default=''), + template=dict(type='str', default=''), + encoding=dict(type='str', default=''), + lc_collate=dict(type='str', default=''), + lc_ctype=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']), + target=dict(type='path', default=''), + target_opts=dict(type='str', default=''), + maintenance_db=dict(type='str', default="postgres"), + session_role=dict(type='str'), + conn_limit=dict(type='str', default=''), + tablespace=dict(type='path', default=''), + dump_extra_args=dict(type='str', default=None), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + db = module.params["db"] + owner = module.params["owner"] + template = module.params["template"] + encoding = module.params["encoding"] + lc_collate = module.params["lc_collate"] + lc_ctype = module.params["lc_ctype"] + target = module.params["target"] + target_opts = module.params["target_opts"] + state = module.params["state"] + changed = False + maintenance_db = module.params['maintenance_db'] + session_role = module.params["session_role"] + conn_limit = module.params['conn_limit'] + tablespace = module.params['tablespace'] + dump_extra_args = module.params['dump_extra_args'] + + raw_connection = state in ("dump", "restore") + + if not raw_connection: + pgutils.ensure_required_libs(module) + + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "login_host": "host", + "login_user": "user", + "login_password": "password", + "port": "port", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) + if k in params_map and v != '' and v is not None) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + + if target == "": + target = "{0}/{1}.sql".format(os.getcwd(), db) + target = os.path.expanduser(target) + + if not raw_connection: + try: + db_connection = psycopg2.connect(database=maintenance_db, **kw) + + # Enable autocommit so we can create databases + if psycopg2.__version__ >= '2.4.2': + db_connection.autocommit = True + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + if session_role: + try: + cursor.execute('SET ROLE "%s"' % session_role) + except Exception as e: + module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc()) + + try: + if module.check_mode: + if state == "absent": + changed = db_exists(cursor, db) + elif state == "present": + changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + if state == "absent": + try: + changed = db_delete(cursor, db) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state == "present": + try: + changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state in ("dump", "restore"): + method = state == "dump" and db_dump or db_restore + try: + if state == 'dump': + rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw) + else: + rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw) + + if rc != 0: + module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd) + else: + module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd, + executed_commands=executed_commands) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # Avoid catching this on Python 2.4 + raise + except Exception as e: + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_ext.py b/plugins/modules/database/postgresql/postgresql_ext.py new file mode 100644 index 0000000000..eaf8f6dc19 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_ext.py @@ -0,0 +1,409 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_ext +short_description: Add or remove PostgreSQL extensions from a database +description: +- Add or remove PostgreSQL extensions from a database. +options: + name: + description: + - Name of the extension to add or remove. + required: true + type: str + aliases: + - ext + db: + description: + - Name of the database to add or remove the extension to/from. + required: true + type: str + aliases: + - login_db + schema: + description: + - Name of the schema to add the extension to. + type: str + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + state: + description: + - The database extension state. + default: present + choices: [ absent, present ] + type: str + cascade: + description: + - Automatically install/remove any extensions that this extension depends on + that are not already installed/removed (supported since PostgreSQL 9.6). + type: bool + default: no + login_unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + version: + description: + - Extension version to add or update to. Has effect with I(state=present) only. + - If not specified, the latest extension version will be created. + - It can't downgrade an extension version. + When version downgrade is needed, remove the extension and create new one with appropriate version. + - Set I(version=latest) to update the extension to the latest available version. + type: str +seealso: +- name: PostgreSQL extensions + description: General information about PostgreSQL extensions. + link: https://www.postgresql.org/docs/current/external-extensions.html +- name: CREATE EXTENSION reference + description: Complete reference of the CREATE EXTENSION command documentation. + link: https://www.postgresql.org/docs/current/sql-createextension.html +- name: ALTER EXTENSION reference + description: Complete reference of the ALTER EXTENSION command documentation. + link: https://www.postgresql.org/docs/current/sql-alterextension.html +- name: DROP EXTENSION reference + description: Complete reference of the DROP EXTENSION command documentation. + link: https://www.postgresql.org/docs/current/sql-droppublication.html +notes: +- The default authentication assumes that you are either logging in as + or sudo'ing to the C(postgres) account on the host. +- This module uses I(psycopg2), a Python PostgreSQL database adapter. +- You must ensure that C(psycopg2) is installed on the host before using this module. +- If the remote host is the PostgreSQL server (which is the default case), + then PostgreSQL must also be installed on the remote host. +- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), + and C(python-psycopg2) packages on the remote host before using this module. +requirements: [ psycopg2 ] +author: +- Daniel Schep (@dschep) +- Thomas O'Donnell (@andytom) +- Sandro Santilli (@strk) +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Adds postgis extension to the database acme in the schema foo + postgresql_ext: + name: postgis + db: acme + schema: foo + +- name: Removes postgis extension to the database acme + postgresql_ext: + name: postgis + db: acme + state: absent + +- name: Adds earthdistance extension to the database template1 cascade + postgresql_ext: + name: earthdistance + db: template1 + cascade: true + +# In the example below, if earthdistance extension is installed, +# it will be removed too because it depends on cube: +- name: Removes cube extension from the database acme cascade + postgresql_ext: + name: cube + db: acme + cascade: yes + state: absent + +- name: Create extension foo of version 1.2 or update it if it's already created + postgresql_ext: + db: acme + name: foo + version: 1.2 + +- name: Assuming extension foo is created, update it to the latest version + postgresql_ext: + db: acme + name: foo + version: latest +''' + +RETURN = r''' +query: + description: List of executed queries. + returned: always + type: list + sample: ["DROP EXTENSION \"acme\""] + +''' + +import traceback + +from distutils.version import LooseVersion + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native + +executed_queries = [] + + +class NotSupportedError(Exception): + pass + + +# =========================================== +# PostgreSQL module specific support methods. +# + +def ext_exists(cursor, ext): + query = "SELECT * FROM pg_extension WHERE extname=%(ext)s" + cursor.execute(query, {'ext': ext}) + return cursor.rowcount == 1 + + +def ext_delete(cursor, ext, cascade): + if ext_exists(cursor, ext): + query = "DROP EXTENSION \"%s\"" % ext + if cascade: + query += " CASCADE" + cursor.execute(query) + executed_queries.append(query) + return True + else: + return False + + +def ext_update_version(cursor, ext, version): + """Update extension version. + + Return True if success. + + Args: + cursor (cursor) -- cursor object of psycopg2 library + ext (str) -- extension name + version (str) -- extension version + """ + if version != 'latest': + query = ("ALTER EXTENSION \"%s\"" % ext) + cursor.execute(query + " UPDATE TO %(ver)s", {'ver': version}) + executed_queries.append(cursor.mogrify(query + " UPDATE TO %(ver)s", {'ver': version})) + else: + query = ("ALTER EXTENSION \"%s\" UPDATE" % ext) + cursor.execute(query) + executed_queries.append(query) + return True + + +def ext_create(cursor, ext, schema, cascade, version): + query = "CREATE EXTENSION \"%s\"" % ext + if schema: + query += " WITH SCHEMA \"%s\"" % schema + if version: + query += " VERSION %(ver)s" + if cascade: + query += " CASCADE" + + if version: + cursor.execute(query, {'ver': version}) + executed_queries.append(cursor.mogrify(query, {'ver': version})) + else: + cursor.execute(query) + executed_queries.append(query) + return True + + +def ext_get_versions(cursor, ext): + """ + Get the current created extension version and available versions. + + Return tuple (current_version, [list of available versions]). + + Note: the list of available versions contains only versions + that higher than the current created version. + If the extension is not created, this list will contain all + available versions. + + Args: + cursor (cursor) -- cursor object of psycopg2 library + ext (str) -- extension name + """ + + # 1. Get the current extension version: + query = ("SELECT extversion FROM pg_catalog.pg_extension " + "WHERE extname = %(ext)s") + + current_version = '0' + cursor.execute(query, {'ext': ext}) + res = cursor.fetchone() + if res: + current_version = res[0] + + # 2. Get available versions: + query = ("SELECT version FROM pg_available_extension_versions " + "WHERE name = %(ext)s") + cursor.execute(query, {'ext': ext}) + res = cursor.fetchall() + + available_versions = [] + if res: + # Make the list of available versions: + for line in res: + if LooseVersion(line[0]) > LooseVersion(current_version): + available_versions.append(line['version']) + + if current_version == '0': + current_version = False + + return (current_version, available_versions) + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type="str", required=True, aliases=["login_db"]), + ext=dict(type="str", required=True, aliases=["name"]), + schema=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + cascade=dict(type="bool", default=False), + session_role=dict(type="str"), + version=dict(type="str"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + ext = module.params["ext"] + schema = module.params["schema"] + state = module.params["state"] + cascade = module.params["cascade"] + version = module.params["version"] + changed = False + + if version and state == 'absent': + module.warn("Parameter version is ignored when state=absent") + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + # Get extension info and available versions: + curr_version, available_versions = ext_get_versions(cursor, ext) + + if state == "present": + if version == 'latest': + if available_versions: + version = available_versions[-1] + else: + version = '' + + if version: + # If the specific version is passed and it is not available for update: + if version not in available_versions: + if not curr_version: + module.fail_json(msg="Passed version '%s' is not available" % version) + + elif LooseVersion(curr_version) == LooseVersion(version): + changed = False + + else: + module.fail_json(msg="Passed version '%s' is lower than " + "the current created version '%s' or " + "the passed version is not available" % (version, curr_version)) + + # If the specific version is passed and it is higher that the current version: + if curr_version and version: + if LooseVersion(curr_version) < LooseVersion(version): + if module.check_mode: + changed = True + else: + changed = ext_update_version(cursor, ext, version) + + # If the specific version is passed and it is created now: + if curr_version == version: + changed = False + + # If the ext doesn't exist and installed: + elif not curr_version and available_versions: + if module.check_mode: + changed = True + else: + changed = ext_create(cursor, ext, schema, cascade, version) + + # If version is not passed: + else: + if not curr_version: + # If the ext doesn't exist and it's installed: + if available_versions: + if module.check_mode: + changed = True + else: + changed = ext_create(cursor, ext, schema, cascade, version) + + # If the ext doesn't exist and not installed: + else: + module.fail_json(msg="Extension %s is not installed" % ext) + + elif state == "absent": + if curr_version: + if module.check_mode: + changed = True + else: + changed = ext_delete(cursor, ext, cascade) + else: + changed = False + + except Exception as e: + db_connection.close() + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + db_connection.close() + module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_idx.py b/plugins/modules/database/postgresql/postgresql_idx.py new file mode 100644 index 0000000000..1b50c2a04e --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_idx.py @@ -0,0 +1,586 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_idx +short_description: Create or drop indexes from a PostgreSQL database +description: +- Create or drop indexes from a PostgreSQL database. + +options: + idxname: + description: + - Name of the index to create or drop. + type: str + required: true + aliases: + - name + db: + description: + - Name of database to connect to and where the index will be created/dropped. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + schema: + description: + - Name of a database schema where the index will be created. + type: str + state: + description: + - Index state. + - C(present) implies the index will be created if it does not exist. + - C(absent) implies the index will be dropped if it exists. + type: str + default: present + choices: [ absent, present ] + table: + description: + - Table to create index on it. + - Mutually exclusive with I(state=absent). + type: str + columns: + description: + - List of index columns that need to be covered by index. + - Mutually exclusive with I(state=absent). + type: list + elements: str + aliases: + - column + cond: + description: + - Index conditions. + - Mutually exclusive with I(state=absent). + type: str + idxtype: + description: + - Index type (like btree, gist, gin, etc.). + - Mutually exclusive with I(state=absent). + type: str + aliases: + - type + concurrent: + description: + - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY). + - Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process. + For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html). + - If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid. + In this case it should be dropped and created again. + - Mutually exclusive with I(cascade=yes). + type: bool + default: yes + unique: + description: + - Enable unique index. + - Only btree currently supports unique indexes. + type: bool + default: no + tablespace: + description: + - Set a tablespace for the index. + - Mutually exclusive with I(state=absent). + required: false + type: str + storage_params: + description: + - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc. + - Mutually exclusive with I(state=absent). + type: list + elements: str + cascade: + description: + - Automatically drop objects that depend on the index, + and in turn all objects that depend on those objects. + - It used only with I(state=absent). + - Mutually exclusive with I(concurrent=yes) + type: bool + default: no + +seealso: +- module: postgresql_table +- module: postgresql_tablespace +- name: PostgreSQL indexes reference + description: General information about PostgreSQL indexes. + link: https://www.postgresql.org/docs/current/indexes.html +- name: CREATE INDEX reference + description: Complete reference of the CREATE INDEX command documentation. + link: https://www.postgresql.org/docs/current/sql-createindex.html +- name: ALTER INDEX reference + description: Complete reference of the ALTER INDEX command documentation. + link: https://www.postgresql.org/docs/current/sql-alterindex.html +- name: DROP INDEX reference + description: Complete reference of the DROP INDEX command documentation. + link: https://www.postgresql.org/docs/current/sql-dropindex.html + +notes: +- The index building process can affect database performance. +- To avoid table locks on production databases, use I(concurrent=yes) (default behavior). + +author: +- Andrew Klychkov (@Andersson007) + +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products + postgresql_idx: + db: acme + table: products + columns: id,name + name: test_idx + +- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter + postgresql_idx: + db: acme + table: products + columns: + - id + - name + idxname: test_idx + tablespace: ssd + storage_params: + - fillfactor=90 + +- name: Create gist index test_gist_idx concurrently on column geo_data of table map + postgresql_idx: + db: somedb + table: map + idxtype: gist + columns: geo_data + idxname: test_gist_idx + +# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops +- name: Create gin index gin0_idx not concurrently on column comment of table test + postgresql_idx: + idxname: gin0_idx + table: test + columns: comment gin_trgm_ops + concurrent: no + idxtype: gin + +- name: Drop btree test_idx concurrently + postgresql_idx: + db: mydb + idxname: test_idx + state: absent + +- name: Drop test_idx cascade + postgresql_idx: + db: mydb + idxname: test_idx + state: absent + cascade: yes + concurrent: no + +- name: Create btree index test_idx concurrently on columns id,comment where column id > 1 + postgresql_idx: + db: mydb + table: test + columns: id,comment + idxname: test_idx + cond: id > 1 + +- name: Create unique btree index if not exists test_unique_idx on column name of table products + postgresql_idx: + db: acme + table: products + columns: name + name: test_unique_idx + unique: yes + concurrent: no +''' + +RETURN = r''' +name: + description: Index name. + returned: always + type: str + sample: 'foo_idx' +state: + description: Index state. + returned: always + type: str + sample: 'present' +schema: + description: Schema where index exists. + returned: always + type: str + sample: 'public' +tablespace: + description: Tablespace where index exists. + returned: always + type: str + sample: 'ssd' +query: + description: Query that was tried to be executed. + returned: always + type: str + sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)' +storage_params: + description: Index storage parameters. + returned: always + type: list + sample: [ "fillfactor=90" ] +valid: + description: Index validity. + returned: always + type: bool + sample: true +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN') + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class Index(object): + + """Class for working with PostgreSQL indexes. + + TODO: + 1. Add possibility to change ownership + 2. Add possibility to change tablespace + 3. Add list called executed_queries (executed_query should be left too) + 4. Use self.module instead of passing arguments to the methods whenever possible + + Args: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + schema (str) -- name of the index schema + name (str) -- name of the index + + Attrs: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + schema (str) -- name of the index schema + name (str) -- name of the index + exists (bool) -- flag the index exists in the DB or not + info (dict) -- dict that contents information about the index + executed_query (str) -- executed query + """ + + def __init__(self, module, cursor, schema, name): + self.name = name + if schema: + self.schema = schema + else: + self.schema = 'public' + self.module = module + self.cursor = cursor + self.info = { + 'name': self.name, + 'state': 'absent', + 'schema': '', + 'tblname': '', + 'tblspace': '', + 'valid': True, + 'storage_params': [], + } + self.exists = False + self.__exists_in_db() + self.executed_query = '' + + def get_info(self): + """Refresh index info. + + Return self.info dict. + """ + self.__exists_in_db() + return self.info + + def __exists_in_db(self): + """Check index existence, collect info, add it to self.info dict. + + Return True if the index exists, otherwise, return False. + """ + query = ("SELECT i.schemaname, i.tablename, i.tablespace, " + "pi.indisvalid, c.reloptions " + "FROM pg_catalog.pg_indexes AS i " + "JOIN pg_catalog.pg_class AS c " + "ON i.indexname = c.relname " + "JOIN pg_catalog.pg_index AS pi " + "ON c.oid = pi.indexrelid " + "WHERE i.indexname = %(name)s") + + res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False) + if res: + self.exists = True + self.info = dict( + name=self.name, + state='present', + schema=res[0][0], + tblname=res[0][1], + tblspace=res[0][2] if res[0][2] else '', + valid=res[0][3], + storage_params=res[0][4] if res[0][4] else [], + ) + return True + + else: + self.exists = False + return False + + def create(self, tblname, idxtype, columns, cond, tblspace, storage_params, concurrent=True, unique=False): + """Create PostgreSQL index. + + Return True if success, otherwise, return False. + + Args: + tblname (str) -- name of a table for the index + idxtype (str) -- type of the index like BTREE, BRIN, etc + columns (str) -- string of comma-separated columns that need to be covered by index + tblspace (str) -- tablespace for storing the index + storage_params (str) -- string of comma-separated storage parameters + + Kwargs: + concurrent (bool) -- build index in concurrent mode, default True + """ + if self.exists: + return False + + if idxtype is None: + idxtype = "BTREE" + + query = 'CREATE' + + if unique: + query += ' UNIQUE' + + query += ' INDEX' + + if concurrent: + query += ' CONCURRENTLY' + + query += ' %s' % self.name + + if self.schema: + query += ' ON %s.%s ' % (self.schema, tblname) + else: + query += 'public.%s ' % tblname + + query += 'USING %s (%s)' % (idxtype, columns) + + if storage_params: + query += ' WITH (%s)' % storage_params + + if tblspace: + query += ' TABLESPACE %s' % tblspace + + if cond: + query += ' WHERE %s' % cond + + self.executed_query = query + + if exec_sql(self, query, ddl=True, add_to_executed=False): + return True + + return False + + def drop(self, schema, cascade=False, concurrent=True): + """Drop PostgreSQL index. + + Return True if success, otherwise, return False. + + Args: + schema (str) -- name of the index schema + + Kwargs: + cascade (bool) -- automatically drop objects that depend on the index, + default False + concurrent (bool) -- build index in concurrent mode, default True + """ + if not self.exists: + return False + + query = 'DROP INDEX' + + if concurrent: + query += ' CONCURRENTLY' + + if not schema: + query += ' public.%s' % self.name + else: + query += ' %s.%s' % (schema, self.name) + + if cascade: + query += ' CASCADE' + + self.executed_query = query + + if exec_sql(self, query, ddl=True, add_to_executed=False): + return True + + return False + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + idxname=dict(type='str', required=True, aliases=['name']), + db=dict(type='str', aliases=['login_db']), + state=dict(type='str', default='present', choices=['absent', 'present']), + concurrent=dict(type='bool', default=True), + unique=dict(type='bool', default=False), + table=dict(type='str'), + idxtype=dict(type='str', aliases=['type']), + columns=dict(type='list', elements='str', aliases=['column']), + cond=dict(type='str'), + session_role=dict(type='str'), + tablespace=dict(type='str'), + storage_params=dict(type='list', elements='str'), + cascade=dict(type='bool', default=False), + schema=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + idxname = module.params["idxname"] + state = module.params["state"] + concurrent = module.params["concurrent"] + unique = module.params["unique"] + table = module.params["table"] + idxtype = module.params["idxtype"] + columns = module.params["columns"] + cond = module.params["cond"] + tablespace = module.params["tablespace"] + storage_params = module.params["storage_params"] + cascade = module.params["cascade"] + schema = module.params["schema"] + + if concurrent and cascade: + module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive") + + if unique and (idxtype and idxtype != 'btree'): + module.fail_json(msg="Only btree currently supports unique indexes") + + if state == 'present': + if not table: + module.fail_json(msg="Table must be specified") + if not columns: + module.fail_json(msg="At least one column must be specified") + else: + if table or columns or cond or idxtype or tablespace: + module.fail_json(msg="Index %s is going to be removed, so it does not " + "make sense to pass a table name, columns, conditions, " + "index type, or tablespace" % idxname) + + if cascade and state != 'absent': + module.fail_json(msg="cascade parameter used only with state=absent") + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Set defaults: + changed = False + + # Do job: + index = Index(module, cursor, schema, idxname) + kw = index.get_info() + kw['query'] = '' + + # + # check_mode start + if module.check_mode: + if state == 'present' and index.exists: + kw['changed'] = False + module.exit_json(**kw) + + elif state == 'present' and not index.exists: + kw['changed'] = True + module.exit_json(**kw) + + elif state == 'absent' and not index.exists: + kw['changed'] = False + module.exit_json(**kw) + + elif state == 'absent' and index.exists: + kw['changed'] = True + module.exit_json(**kw) + # check_mode end + # + + if state == "present": + if idxtype and idxtype.upper() not in VALID_IDX_TYPES: + module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname)) + + columns = ','.join(columns) + + if storage_params: + storage_params = ','.join(storage_params) + + changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique) + + if changed: + kw = index.get_info() + kw['state'] = 'present' + kw['query'] = index.executed_query + + else: + changed = index.drop(schema, cascade, concurrent) + + if changed: + kw['state'] = 'absent' + kw['query'] = index.executed_query + + if not kw['valid']: + db_connection.rollback() + module.warn("Index %s is invalid! ROLLBACK" % idxname) + + if not concurrent: + db_connection.commit() + + kw['changed'] = changed + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_info.py b/plugins/modules/database/postgresql/postgresql_info.py new file mode 100644 index 0000000000..692ff09b1d --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_info.py @@ -0,0 +1,1012 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_info +short_description: Gather information about PostgreSQL servers +description: +- Gathers information about PostgreSQL servers. +options: + filter: + description: + - Limit the collected information by comma separated string or YAML list. + - Allowable values are C(version), + C(databases), C(settings), C(tablespaces), C(roles), + C(replications), C(repl_slots). + - By default, collects all subsets. + - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples). + - You can use '!' before value (for example, C(!settings)) to exclude it from the information. + - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver), + the excluding values will be ignored. + type: list + elements: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str +seealso: +- module: postgresql_ping +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +# Display info from postgres hosts. +# ansible postgres -m postgresql_info + +# Display only databases and roles info from all hosts using shell-style wildcards: +# ansible all -m postgresql_info -a 'filter=dat*,rol*' + +# Display only replications and repl_slots info from standby hosts using shell-style wildcards: +# ansible standby -m postgresql_info -a 'filter=repl*' + +# Display all info from databases hosts except settings: +# ansible databases -m postgresql_info -a 'filter=!settings' + +- name: Collect PostgreSQL version and extensions + become: yes + become_user: postgres + postgresql_info: + filter: ver*,ext* + +- name: Collect all info except settings and roles + become: yes + become_user: postgres + postgresql_info: + filter: "!settings,!roles" + +# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become +# and pass "postgres" as a database to connect to +- name: Collect tablespaces and repl_slots info + become: yes + become_user: pgsql + postgresql_info: + db: postgres + filter: + - tablesp* + - repl_sl* + +- name: Collect all info except databases + become: yes + become_user: postgres + postgresql_info: + filter: + - "!databases" +''' + +RETURN = r''' +version: + description: Database server version U(https://www.postgresql.org/support/versioning/). + returned: always + type: dict + sample: { "version": { "major": 10, "minor": 6 } } + contains: + major: + description: Major server version. + returned: always + type: int + sample: 11 + minor: + description: Minor server version. + returned: always + type: int + sample: 1 +databases: + description: Information about databases. + returned: always + type: dict + sample: + - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8", + "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } } + contains: + database_name: + description: Database name. + returned: always + type: dict + sample: template1 + contains: + access_priv: + description: Database access privileges. + returned: always + type: str + sample: "=c/postgres_npostgres=CTc/postgres" + collate: + description: + - Database collation U(https://www.postgresql.org/docs/current/collation.html). + returned: always + type: str + sample: en_US.UTF-8 + ctype: + description: + - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html). + returned: always + type: str + sample: en_US.UTF-8 + encoding: + description: + - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html). + returned: always + type: str + sample: UTF8 + owner: + description: + - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html). + returned: always + type: str + sample: postgres + size: + description: Database size in bytes. + returned: always + type: str + sample: 8189415 + extensions: + description: + - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html). + returned: always + type: dict + sample: + - { "plpgsql": { "description": "PL/pgSQL procedural language", + "extversion": { "major": 1, "minor": 0 } } } + contains: + extdescription: + description: Extension description. + returned: if existent + type: str + sample: PL/pgSQL procedural language + extversion: + description: Extension description. + returned: always + type: dict + contains: + major: + description: Extension major version. + returned: always + type: int + sample: 1 + minor: + description: Extension minor version. + returned: always + type: int + sample: 0 + nspname: + description: Namespace where the extension is. + returned: always + type: str + sample: pg_catalog + languages: + description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html). + returned: always + type: dict + sample: { "sql": { "lanacl": "", "lanowner": "postgres" } } + contains: + lanacl: + description: + - Language access privileges + U(https://www.postgresql.org/docs/current/catalog-pg-language.html). + returned: always + type: str + sample: "{postgres=UC/postgres,=U/postgres}" + lanowner: + description: + - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html). + returned: always + type: str + sample: postgres + namespaces: + description: + - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html). + returned: always + type: dict + sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } } + contains: + nspacl: + description: + - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html). + returned: always + type: str + sample: "{postgres=UC/postgres,=U/postgres}" + nspowner: + description: + - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html). + returned: always + type: str + sample: postgres + publications: + description: + - Information about logical replication publications (available for PostgreSQL 10 and higher) + U(https://www.postgresql.org/docs/current/logical-replication-publication.html). + - Content depends on PostgreSQL server version. + returned: if configured + type: dict + version_added: "2.10" + sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } } + subscriptions: + description: + - Information about replication subscriptions (available for PostgreSQL 10 and higher) + U(https://www.postgresql.org/docs/current/logical-replication-subscription.html). + - Content depends on PostgreSQL server version. + returned: if configured + type: dict + version_added: "2.10" + sample: + - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } } +repl_slots: + description: + - Replication slots (available in 9.4 and later) + U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html). + returned: if existent + type: dict + sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } } + contains: + active: + description: + - True means that a receiver has connected to it, and it is currently reserving archives. + returned: always + type: bool + sample: true + database: + description: Database name this slot is associated with, or null. + returned: always + type: str + sample: acme + plugin: + description: + - Base name of the shared object containing the output plugin + this logical slot is using, or null for physical slots. + returned: always + type: str + sample: pgoutput + slot_type: + description: The slot type - physical or logical. + returned: always + type: str + sample: logical +replications: + description: + - Information about the current replications by process PIDs + U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE). + returned: if pg_stat_replication view existent + type: dict + sample: + - { 76580: { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03", + "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } } + contains: + usename: + description: + - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view). + returned: always + type: str + sample: replication_user + app_name: + description: Name of the application that is connected to this WAL sender. + returned: if existent + type: str + sample: acme_srv + client_addr: + description: + - IP address of the client connected to this WAL sender. + - If this field is null, it indicates that the client is connected + via a Unix socket on the server machine. + returned: always + type: str + sample: 10.0.0.101 + client_hostname: + description: + - Host name of the connected client, as reported by a reverse DNS lookup of client_addr. + - This field will only be non-null for IP connections, and only when log_hostname is enabled. + returned: always + type: str + sample: dbsrv1 + backend_start: + description: Time when this process was started, i.e., when the client connected to this WAL sender. + returned: always + type: str + sample: "2019-02-03 00:14:33.908593+03" + state: + description: Current WAL sender state. + returned: always + type: str + sample: streaming +tablespaces: + description: + - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html). + returned: always + type: dict + sample: + - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ], + "spcowner": "postgres" } } + contains: + spcacl: + description: Tablespace access privileges. + returned: always + type: str + sample: "{postgres=C/postgres,andreyk=C/postgres}" + spcoptions: + description: Tablespace-level options. + returned: always + type: list + sample: [ "seq_page_cost=1" ] + spcowner: + description: Owner of the tablespace. + returned: always + type: str + sample: test_user +roles: + description: + - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html). + returned: always + type: dict + sample: + - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false, + "valid_until": "9999-12-31T23:59:59.999999+00:00" } } + contains: + canlogin: + description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html). + returned: always + type: bool + sample: true + member_of: + description: + - Role membership U(https://www.postgresql.org/docs/current/role-membership.html). + returned: always + type: list + sample: [ "read_only_users" ] + superuser: + description: User is a superuser or not. + returned: always + type: bool + sample: false + valid_until: + description: + - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html). + returned: always + type: str + sample: "9999-12-31T23:59:59.999999+00:00" +pending_restart_settings: + description: + - List of settings that are pending restart to be set. + returned: always + type: list + sample: [ "shared_buffers" ] +settings: + description: + - Information about run-time server parameters + U(https://www.postgresql.org/docs/current/view-pg-settings.html). + returned: always + type: dict + sample: + - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647", + "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf", + "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } } + contains: + setting: + description: Current value of the parameter. + returned: always + type: str + sample: 49152 + unit: + description: Implicit unit of the parameter. + returned: always + type: str + sample: kB + boot_val: + description: + - Parameter value assumed at server startup if the parameter is not otherwise set. + returned: always + type: str + sample: 4096 + min_val: + description: + - Minimum allowed value of the parameter (null for non-numeric values). + returned: always + type: str + sample: 64 + max_val: + description: + - Maximum allowed value of the parameter (null for non-numeric values). + returned: always + type: str + sample: 2147483647 + sourcefile: + description: + - Configuration file the current value was set in. + - Null for values set from sources other than configuration files, + or when examined by a user who is neither a superuser or a member of pg_read_all_settings. + - Helpful when using include directives in configuration files. + returned: always + type: str + sample: /var/lib/pgsql/10/data/postgresql.auto.conf + context: + description: + - Context required to set the parameter's value. + - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html). + returned: always + type: str + sample: user + vartype: + description: + - Parameter type (bool, enum, integer, real, or string). + returned: always + type: str + sample: integer + val_in_bytes: + description: + - Current value of the parameter in bytes. + returned: if supported + type: int + sample: 2147483647 + pretty_val: + description: + - Value presented in the pretty form. + returned: always + type: str + sample: 2MB + pending_restart: + description: + - True if the value has been changed in the configuration file but needs a restart; or false otherwise. + - Returns only if C(settings) is passed. + returned: always + type: bool + sample: false +''' + +from fnmatch import fnmatch + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class PgDbConn(object): + + """Auxiliary class for working with PostgreSQL connection objects. + + Arguments: + module (AnsibleModule): Object of AnsibleModule class that + contains connection parameters. + """ + + def __init__(self, module): + self.module = module + self.db_conn = None + self.cursor = None + + def connect(self): + """Connect to a PostgreSQL database and return a cursor object. + + Note: connection parameters are passed by self.module object. + """ + conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False) + self.db_conn = connect_to_db(self.module, conn_params) + return self.db_conn.cursor(cursor_factory=DictCursor) + + def reconnect(self, dbname): + """Reconnect to another database and return a PostgreSQL cursor object. + + Arguments: + dbname (string): Database name to connect to. + """ + self.db_conn.close() + + self.module.params['database'] = dbname + return self.connect() + + +class PgClusterInfo(object): + + """Class for collection information about a PostgreSQL instance. + + Arguments: + module (AnsibleModule): Object of AnsibleModule class. + db_conn_obj (psycopg2.connect): PostgreSQL connection object. + """ + + def __init__(self, module, db_conn_obj): + self.module = module + self.db_obj = db_conn_obj + self.cursor = db_conn_obj.connect() + self.pg_info = { + "version": {}, + "tablespaces": {}, + "databases": {}, + "replications": {}, + "repl_slots": {}, + "settings": {}, + "roles": {}, + "pending_restart_settings": [], + } + + def collect(self, val_list=False): + """Collect information based on 'filter' option.""" + subset_map = { + "version": self.get_pg_version, + "tablespaces": self.get_tablespaces, + "databases": self.get_db_info, + "replications": self.get_repl_info, + "repl_slots": self.get_rslot_info, + "settings": self.get_settings, + "roles": self.get_role_info, + } + + incl_list = [] + excl_list = [] + # Notice: incl_list and excl_list + # don't make sense together, therefore, + # if incl_list is not empty, we collect + # only values from it: + if val_list: + for i in val_list: + if i[0] != '!': + incl_list.append(i) + else: + excl_list.append(i.lstrip('!')) + + if incl_list: + for s in subset_map: + for i in incl_list: + if fnmatch(s, i): + subset_map[s]() + break + elif excl_list: + found = False + # Collect info: + for s in subset_map: + for e in excl_list: + if fnmatch(s, e): + found = True + + if not found: + subset_map[s]() + else: + found = False + + # Default behaviour, if include or exclude is not passed: + else: + # Just collect info for each item: + for s in subset_map: + subset_map[s]() + + return self.pg_info + + def get_pub_info(self): + """Get publication statistics.""" + query = ("SELECT p.*, r.rolname AS ownername " + "FROM pg_catalog.pg_publication AS p " + "JOIN pg_catalog.pg_roles AS r " + "ON p.pubowner = r.oid") + + result = self.__exec_sql(query) + + if result: + result = [dict(row) for row in result] + else: + return {} + + publications = {} + + for elem in result: + if not publications.get(elem['pubname']): + publications[elem['pubname']] = {} + + for key, val in iteritems(elem): + if key != 'pubname': + publications[elem['pubname']][key] = val + + return publications + + def get_subscr_info(self): + """Get subscription statistics.""" + query = ("SELECT s.*, r.rolname AS ownername, d.datname AS dbname " + "FROM pg_catalog.pg_subscription s " + "JOIN pg_catalog.pg_database d " + "ON s.subdbid = d.oid " + "JOIN pg_catalog.pg_roles AS r " + "ON s.subowner = r.oid") + + result = self.__exec_sql(query) + + if result: + result = [dict(row) for row in result] + else: + return {} + + subscr_info = {} + + for elem in result: + if not subscr_info.get(elem['dbname']): + subscr_info[elem['dbname']] = {} + + if not subscr_info[elem['dbname']].get(elem['subname']): + subscr_info[elem['dbname']][elem['subname']] = {} + + for key, val in iteritems(elem): + if key not in ('subname', 'dbname'): + subscr_info[elem['dbname']][elem['subname']][key] = val + + return subscr_info + + def get_tablespaces(self): + """Get information about tablespaces.""" + # Check spcoption exists: + opt = self.__exec_sql("SELECT column_name " + "FROM information_schema.columns " + "WHERE table_name = 'pg_tablespace' " + "AND column_name = 'spcoptions'") + + if not opt: + query = ("SELECT s.spcname, a.rolname, s.spcacl " + "FROM pg_tablespace AS s " + "JOIN pg_authid AS a ON s.spcowner = a.oid") + else: + query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions " + "FROM pg_tablespace AS s " + "JOIN pg_authid AS a ON s.spcowner = a.oid") + + res = self.__exec_sql(query) + ts_dict = {} + for i in res: + ts_name = i[0] + ts_info = dict( + spcowner=i[1], + spcacl=i[2] if i[2] else '', + ) + if opt: + ts_info['spcoptions'] = i[3] if i[3] else [] + + ts_dict[ts_name] = ts_info + + self.pg_info["tablespaces"] = ts_dict + + def get_ext_info(self): + """Get information about existing extensions.""" + # Check that pg_extension exists: + res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " + "information_schema.tables " + "WHERE table_name = 'pg_extension')") + if not res[0][0]: + return True + + query = ("SELECT e.extname, e.extversion, n.nspname, c.description " + "FROM pg_catalog.pg_extension AS e " + "LEFT JOIN pg_catalog.pg_namespace AS n " + "ON n.oid = e.extnamespace " + "LEFT JOIN pg_catalog.pg_description AS c " + "ON c.objoid = e.oid " + "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass") + res = self.__exec_sql(query) + ext_dict = {} + for i in res: + ext_ver = i[1].split('.') + + ext_dict[i[0]] = dict( + extversion=dict( + major=int(ext_ver[0]), + minor=int(ext_ver[1]), + ), + nspname=i[2], + description=i[3], + ) + + return ext_dict + + def get_role_info(self): + """Get information about roles (in PgSQL groups and users are roles).""" + query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, " + "r.rolvaliduntil, " + "ARRAY(SELECT b.rolname " + "FROM pg_catalog.pg_auth_members AS m " + "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) " + "WHERE m.member = r.oid) AS memberof " + "FROM pg_catalog.pg_roles AS r " + "WHERE r.rolname !~ '^pg_'") + + res = self.__exec_sql(query) + rol_dict = {} + for i in res: + rol_dict[i[0]] = dict( + superuser=i[1], + canlogin=i[2], + valid_until=i[3] if i[3] else '', + member_of=i[4] if i[4] else [], + ) + + self.pg_info["roles"] = rol_dict + + def get_rslot_info(self): + """Get information about replication slots if exist.""" + # Check that pg_replication_slots exists: + res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " + "information_schema.tables " + "WHERE table_name = 'pg_replication_slots')") + if not res[0][0]: + return True + + query = ("SELECT slot_name, plugin, slot_type, database, " + "active FROM pg_replication_slots") + res = self.__exec_sql(query) + + # If there is no replication: + if not res: + return True + + rslot_dict = {} + for i in res: + rslot_dict[i[0]] = dict( + plugin=i[1], + slot_type=i[2], + database=i[3], + active=i[4], + ) + + self.pg_info["repl_slots"] = rslot_dict + + def get_settings(self): + """Get server settings.""" + # Check pending restart column exists: + pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_settings' " + "AND column_name = 'pending_restart'") + if not pend_rest_col_exists: + query = ("SELECT name, setting, unit, context, vartype, " + "boot_val, min_val, max_val, sourcefile " + "FROM pg_settings") + else: + query = ("SELECT name, setting, unit, context, vartype, " + "boot_val, min_val, max_val, sourcefile, pending_restart " + "FROM pg_settings") + + res = self.__exec_sql(query) + + set_dict = {} + for i in res: + val_in_bytes = None + setting = i[1] + if i[2]: + unit = i[2] + else: + unit = '' + + if unit == 'kB': + val_in_bytes = int(setting) * 1024 + + elif unit == '8kB': + val_in_bytes = int(setting) * 1024 * 8 + + elif unit == 'MB': + val_in_bytes = int(setting) * 1024 * 1024 + + if val_in_bytes is not None and val_in_bytes < 0: + val_in_bytes = 0 + + setting_name = i[0] + pretty_val = self.__get_pretty_val(setting_name) + + pending_restart = None + if pend_rest_col_exists: + pending_restart = i[9] + + set_dict[setting_name] = dict( + setting=setting, + unit=unit, + context=i[3], + vartype=i[4], + boot_val=i[5] if i[5] else '', + min_val=i[6] if i[6] else '', + max_val=i[7] if i[7] else '', + sourcefile=i[8] if i[8] else '', + pretty_val=pretty_val, + ) + if val_in_bytes is not None: + set_dict[setting_name]['val_in_bytes'] = val_in_bytes + + if pending_restart is not None: + set_dict[setting_name]['pending_restart'] = pending_restart + if pending_restart: + self.pg_info["pending_restart_settings"].append(setting_name) + + self.pg_info["settings"] = set_dict + + def get_repl_info(self): + """Get information about replication if the server is a master.""" + # Check that pg_replication_slots exists: + res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " + "information_schema.tables " + "WHERE table_name = 'pg_stat_replication')") + if not res[0][0]: + return True + + query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, " + "r.client_hostname, r.backend_start::text, r.state " + "FROM pg_stat_replication AS r " + "JOIN pg_authid AS a ON r.usesysid = a.oid") + res = self.__exec_sql(query) + + # If there is no replication: + if not res: + return True + + repl_dict = {} + for i in res: + repl_dict[i[0]] = dict( + usename=i[1], + app_name=i[2] if i[2] else '', + client_addr=i[3], + client_hostname=i[4] if i[4] else '', + backend_start=i[5], + state=i[6], + ) + + self.pg_info["replications"] = repl_dict + + def get_lang_info(self): + """Get information about current supported languages.""" + query = ("SELECT l.lanname, a.rolname, l.lanacl " + "FROM pg_language AS l " + "JOIN pg_authid AS a ON l.lanowner = a.oid") + res = self.__exec_sql(query) + lang_dict = {} + for i in res: + lang_dict[i[0]] = dict( + lanowner=i[1], + lanacl=i[2] if i[2] else '', + ) + + return lang_dict + + def get_namespaces(self): + """Get information about namespaces.""" + query = ("SELECT n.nspname, a.rolname, n.nspacl " + "FROM pg_catalog.pg_namespace AS n " + "JOIN pg_authid AS a ON a.oid = n.nspowner") + res = self.__exec_sql(query) + + nsp_dict = {} + for i in res: + nsp_dict[i[0]] = dict( + nspowner=i[1], + nspacl=i[2] if i[2] else '', + ) + + return nsp_dict + + def get_pg_version(self): + """Get major and minor PostgreSQL server version.""" + query = "SELECT version()" + raw = self.__exec_sql(query)[0][0] + raw = raw.split()[1].split('.') + self.pg_info["version"] = dict( + major=int(raw[0]), + minor=int(raw[1]), + ) + + def get_db_info(self): + """Get information about the current database.""" + # Following query returns: + # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size + query = ("SELECT d.datname, " + "pg_catalog.pg_get_userbyid(d.datdba), " + "pg_catalog.pg_encoding_to_char(d.encoding), " + "d.datcollate, " + "d.datctype, " + "pg_catalog.array_to_string(d.datacl, E'\n'), " + "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') " + "THEN pg_catalog.pg_database_size(d.datname)::text " + "ELSE 'No Access' END, " + "t.spcname " + "FROM pg_catalog.pg_database AS d " + "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid " + "WHERE d.datname != 'template0'") + + res = self.__exec_sql(query) + + db_dict = {} + for i in res: + db_dict[i[0]] = dict( + owner=i[1], + encoding=i[2], + collate=i[3], + ctype=i[4], + access_priv=i[5] if i[5] else '', + size=i[6], + ) + + if self.cursor.connection.server_version >= 100000: + subscr_info = self.get_subscr_info() + + for datname in db_dict: + self.cursor = self.db_obj.reconnect(datname) + db_dict[datname]['namespaces'] = self.get_namespaces() + db_dict[datname]['extensions'] = self.get_ext_info() + db_dict[datname]['languages'] = self.get_lang_info() + if self.cursor.connection.server_version >= 100000: + db_dict[datname]['publications'] = self.get_pub_info() + db_dict[datname]['subscriptions'] = subscr_info.get(datname, {}) + + self.pg_info["databases"] = db_dict + + def __get_pretty_val(self, setting): + """Get setting's value represented by SHOW command.""" + return self.__exec_sql("SHOW %s" % setting)[0][0] + + def __exec_sql(self, query): + """Execute SQL and return the result.""" + try: + self.cursor.execute(query) + res = self.cursor.fetchall() + if res: + return res + except Exception as e: + self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + self.cursor.close() + return False + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', aliases=['login_db']), + filter=dict(type='list', elements='str'), + session_role=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + filter_ = module.params["filter"] + + db_conn_obj = PgDbConn(module) + + # Do job: + pg_info = PgClusterInfo(module, db_conn_obj) + + module.exit_json(**pg_info.collect(filter_)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_lang.py b/plugins/modules/database/postgresql/postgresql_lang.py new file mode 100644 index 0000000000..b5aae27bd3 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_lang.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2014, Jens Depuydt +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_lang +short_description: Adds, removes or changes procedural languages with a PostgreSQL database +description: +- Adds, removes or changes procedural languages with a PostgreSQL database. +- This module allows you to add a language, remote a language or change the trust + relationship with a PostgreSQL database. +- The module can be used on the machine where executed or on a remote host. +- When removing a language from a database, it is possible that dependencies prevent + the database from being removed. In that case, you can specify I(cascade=yes) to + automatically drop objects that depend on the language (such as functions in the + language). +- In case the language can't be deleted because it is required by the + database system, you can specify I(fail_on_drop=no) to ignore the error. +- Be careful when marking a language as trusted since this could be a potential + security breach. Untrusted languages allow only users with the PostgreSQL superuser + privilege to use this language to create new functions. +options: + lang: + description: + - Name of the procedural language to add, remove or change. + required: true + type: str + aliases: + - name + trust: + description: + - Make this language trusted for the selected db. + type: bool + default: 'no' + db: + description: + - Name of database to connect to and where the language will be added, removed or changed. + type: str + aliases: + - login_db + required: true + force_trust: + description: + - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate. + - Use with care! + type: bool + default: 'no' + fail_on_drop: + description: + - If C(yes), fail when removing a language. Otherwise just log and continue. + - In some cases, it is not possible to remove a language (used by the db-system). + - When dependencies block the removal, consider using I(cascade). + type: bool + default: 'yes' + cascade: + description: + - When dropping a language, also delete object that depend on this language. + - Only used when I(state=absent). + type: bool + default: 'no' + session_role: + description: + - Switch to session_role after connecting. + - The specified I(session_role) must be a role that the current I(login_user) is a member of. + - Permissions checking for SQL commands is carried out as though the I(session_role) were the one that had logged in originally. + type: str + state: + description: + - The state of the language for the selected database. + type: str + default: present + choices: [ absent, present ] + login_unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + owner: + description: + - Set an owner for the language. + - Ignored when I(state=absent). + type: str +seealso: +- name: PostgreSQL languages + description: General information about PostgreSQL languages. + link: https://www.postgresql.org/docs/current/xplang.html +- name: CREATE LANGUAGE reference + description: Complete reference of the CREATE LANGUAGE command documentation. + link: https://www.postgresql.org/docs/current/sql-createlanguage.html +- name: ALTER LANGUAGE reference + description: Complete reference of the ALTER LANGUAGE command documentation. + link: https://www.postgresql.org/docs/current/sql-alterlanguage.html +- name: DROP LANGUAGE reference + description: Complete reference of the DROP LANGUAGE command documentation. + link: https://www.postgresql.org/docs/current/sql-droplanguage.html +author: +- Jens Depuydt (@jensdepuydt) +- Thomas O'Donnell (@andytom) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Add language pltclu to database testdb if it doesn't exist + postgresql_lang: db=testdb lang=pltclu state=present + +# Add language pltclu to database testdb if it doesn't exist and mark it as trusted. +# Marks the language as trusted if it exists but isn't trusted yet. +# force_trust makes sure that the language will be marked as trusted +- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted + postgresql_lang: + db: testdb + lang: pltclu + state: present + trust: yes + force_trust: yes + +- name: Remove language pltclu from database testdb + postgresql_lang: + db: testdb + lang: pltclu + state: absent + +- name: Remove language pltclu from database testdb and remove all dependencies + postgresql_lang: + db: testdb + lang: pltclu + state: absent + cascade: yes + +- name: Remove language c from database testdb but ignore errors if something prevents the removal + postgresql_lang: + db: testdb + lang: pltclu + state: absent + fail_on_drop: no + +- name: In testdb change owner of mylang to alice + postgresql_lang: + db: testdb + lang: mylang + owner: alice +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['CREATE LANGUAGE "acme"'] + version_added: '2.8' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) + +executed_queries = [] + + +def lang_exists(cursor, lang): + """Checks if language exists for db""" + query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s" + cursor.execute(query, {'lang': lang}) + return cursor.rowcount > 0 + + +def lang_istrusted(cursor, lang): + """Checks if language is trusted for db""" + query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s" + cursor.execute(query, {'lang': lang}) + return cursor.fetchone()[0] + + +def lang_altertrust(cursor, lang, trust): + """Changes if language is trusted for db""" + query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s" + cursor.execute(query, {'trust': trust, 'lang': lang}) + executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang})) + return True + + +def lang_add(cursor, lang, trust): + """Adds language for db""" + if trust: + query = 'CREATE TRUSTED LANGUAGE "%s"' % lang + else: + query = 'CREATE LANGUAGE "%s"' % lang + executed_queries.append(query) + cursor.execute(query) + return True + + +def lang_drop(cursor, lang, cascade): + """Drops language for db""" + cursor.execute("SAVEPOINT ansible_pgsql_lang_drop") + try: + if cascade: + query = "DROP LANGUAGE \"%s\" CASCADE" % lang + else: + query = "DROP LANGUAGE \"%s\"" % lang + executed_queries.append(query) + cursor.execute(query) + except Exception: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return False + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return True + + +def get_lang_owner(cursor, lang): + """Get language owner. + + Args: + cursor (cursor): psycopg2 cursor object. + lang (str): language name. + """ + query = ("SELECT r.rolname FROM pg_language l " + "JOIN pg_roles r ON l.lanowner = r.oid " + "WHERE l.lanname = %(lang)s") + cursor.execute(query, {'lang': lang}) + return cursor.fetchone()[0] + + +def set_lang_owner(cursor, lang, owner): + """Set language owner. + + Args: + cursor (cursor): psycopg2 cursor object. + lang (str): language name. + owner (str): name of new owner. + """ + query = "ALTER LANGUAGE \"%s\" OWNER TO %s" % (lang, owner) + executed_queries.append(query) + cursor.execute(query) + return True + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type="str", required=True, aliases=["login_db"]), + lang=dict(type="str", required=True, aliases=["name"]), + state=dict(type="str", default="present", choices=["absent", "present"]), + trust=dict(type="bool", default="no"), + force_trust=dict(type="bool", default="no"), + cascade=dict(type="bool", default="no"), + fail_on_drop=dict(type="bool", default="yes"), + session_role=dict(type="str"), + owner=dict(type="str"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + db = module.params["db"] + lang = module.params["lang"] + state = module.params["state"] + trust = module.params["trust"] + force_trust = module.params["force_trust"] + cascade = module.params["cascade"] + fail_on_drop = module.params["fail_on_drop"] + owner = module.params["owner"] + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor() + + changed = False + kw = {'db': db, 'lang': lang, 'trust': trust} + + if state == "present": + if lang_exists(cursor, lang): + lang_trusted = lang_istrusted(cursor, lang) + if (lang_trusted and not trust) or (not lang_trusted and trust): + if module.check_mode: + changed = True + else: + changed = lang_altertrust(cursor, lang, trust) + else: + if module.check_mode: + changed = True + else: + changed = lang_add(cursor, lang, trust) + if force_trust: + changed = lang_altertrust(cursor, lang, trust) + + else: + if lang_exists(cursor, lang): + if module.check_mode: + changed = True + kw['lang_dropped'] = True + else: + changed = lang_drop(cursor, lang, cascade) + if fail_on_drop and not changed: + msg = ("unable to drop language, use cascade " + "to delete dependencies or fail_on_drop=no to ignore") + module.fail_json(msg=msg) + kw['lang_dropped'] = changed + + if owner and state == 'present': + if lang_exists(cursor, lang): + if owner != get_lang_owner(cursor, lang): + changed = set_lang_owner(cursor, lang, owner) + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + kw['queries'] = executed_queries + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_membership.py b/plugins/modules/database/postgresql/postgresql_membership.py new file mode 100644 index 0000000000..d5e336c5b2 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_membership.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: postgresql_membership +short_description: Add or remove PostgreSQL roles from groups +description: +- Adds or removes PostgreSQL roles from groups (other roles). +- Users are roles with login privilege. +- Groups are PostgreSQL roles usually without LOGIN privilege. +- "Common use case:" +- 1) add a new group (groups) by M(postgresql_user) module with I(role_attr_flags=NOLOGIN) +- 2) grant them desired privileges by M(postgresql_privs) module +- 3) add desired PostgreSQL users to the new group (groups) by this module +options: + groups: + description: + - The list of groups (roles) that need to be granted to or revoked from I(target_roles). + required: yes + type: list + elements: str + aliases: + - group + - source_role + - source_roles + target_roles: + description: + - The list of target roles (groups will be granted to them). + required: yes + type: list + elements: str + aliases: + - target_role + - users + - user + fail_on_role: + description: + - If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue. + default: yes + type: bool + state: + description: + - Membership state. + - I(state=present) implies the I(groups)must be granted to I(target_roles). + - I(state=absent) implies the I(groups) must be revoked from I(target_roles). + type: str + default: present + choices: [ absent, present ] + db: + description: + - Name of database to connect to. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str +seealso: +- module: postgresql_user +- module: postgresql_privs +- module: postgresql_owner +- name: PostgreSQL role membership reference + description: Complete reference of the PostgreSQL role membership documentation. + link: https://www.postgresql.org/docs/current/role-membership.html +- name: PostgreSQL role attributes reference + description: Complete reference of the PostgreSQL role attributes documentation. + link: https://www.postgresql.org/docs/current/role-attributes.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Grant role read_only to alice and bob + postgresql_membership: + group: read_only + target_roles: + - alice + - bob + state: present + +# you can also use target_roles: alice,bob,etc to pass the role list + +- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist + postgresql_membership: + groups: + - read_only + - exec_func + target_role: bob + fail_on_role: no + state: absent +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ "GRANT \"user_ro\" TO \"alice\"" ] +granted: + description: Dict of granted groups and roles. + returned: if I(state=present) + type: dict + sample: { "ro_group": [ "alice", "bob" ] } +revoked: + description: Dict of revoked groups and roles. + returned: if I(state=absent) + type: dict + sample: { "ro_group": [ "alice", "bob" ] } +state: + description: Membership state that tried to be set. + returned: always + type: str + sample: "present" +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + PgMembership, + postgres_common_argument_spec, +) + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']), + target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']), + fail_on_role=dict(type='bool', default=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + groups = module.params['groups'] + target_roles = module.params['target_roles'] + fail_on_role = module.params['fail_on_role'] + state = module.params['state'] + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + + pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role) + + if state == 'present': + pg_membership.grant() + + elif state == 'absent': + pg_membership.revoke() + + # Rollback if it's possible and check_mode: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Make return values: + return_dict = dict( + changed=pg_membership.changed, + state=state, + groups=pg_membership.groups, + target_roles=pg_membership.target_roles, + queries=pg_membership.executed_queries, + ) + + if state == 'present': + return_dict['granted'] = pg_membership.granted + elif state == 'absent': + return_dict['revoked'] = pg_membership.revoked + + module.exit_json(**return_dict) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_owner.py b/plugins/modules/database/postgresql/postgresql_owner.py new file mode 100644 index 0000000000..93d7f59ca5 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_owner.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: postgresql_owner +short_description: Change an owner of PostgreSQL database object +description: +- Change an owner of PostgreSQL database object. +- Also allows to reassign the ownership of database objects owned by a database role to another role. + +options: + new_owner: + description: + - Role (user/group) to set as an I(obj_name) owner. + type: str + required: yes + obj_name: + description: + - Name of a database object to change ownership. + - Mutually exclusive with I(reassign_owned_by). + type: str + obj_type: + description: + - Type of a database object. + - Mutually exclusive with I(reassign_owned_by). + type: str + choices: [ database, function, matview, sequence, schema, table, tablespace, view ] + aliases: + - type + reassign_owned_by: + description: + - The list of role names. The ownership of all the objects within the current database, + and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner). + - Pay attention - it reassigns all objects owned by this role(s) in the I(db)! + - If role(s) exists, always returns changed True. + - Cannot reassign ownership of objects that are required by the database system. + - Mutually exclusive with C(obj_type). + type: list + elements: str + fail_on_role: + description: + - If C(yes), fail when I(reassign_owned_by) role does not exist. + Otherwise just warn and continue. + - Mutually exclusive with I(obj_name) and I(obj_type). + default: yes + type: bool + db: + description: + - Name of database to connect to. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str +seealso: +- module: postgresql_user +- module: postgresql_privs +- module: postgresql_membership +- name: PostgreSQL REASSIGN OWNED command reference + description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation. + link: https://www.postgresql.org/docs/current/sql-reassign-owned.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +# Set owner as alice for function myfunc in database bar by ansible ad-hoc command: +# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function" + +- name: The same as above by playbook + postgresql_owner: + db: bar + new_owner: alice + obj_name: myfunc + obj_type: function + +- name: Set owner as bob for table acme in database bar + postgresql_owner: + db: bar + new_owner: bob + obj_name: acme + obj_type: table + +- name: Set owner as alice for view test_view in database bar + postgresql_owner: + db: bar + new_owner: alice + obj_name: test_view + obj_type: view + +- name: Set owner as bob for tablespace ssd in database foo + postgresql_owner: + db: foo + new_owner: bob + obj_name: ssd + obj_type: tablespace + +- name: Reassign all object in database bar owned by bob to alice + postgresql_owner: + db: bar + new_owner: alice + reassign_owned_by: bob + +- name: Reassign all object in database bar owned by bob and bill to alice + postgresql_owner: + db: bar + new_owner: alice + reassign_owned_by: + - bob + - bill +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +class PgOwnership(object): + + """Class for changing ownership of PostgreSQL objects. + + Arguments: + module (AnsibleModule): Object of Ansible module class. + cursor (psycopg2.connect.cursor): Cursor object for interaction with the database. + role (str): Role name to set as a new owner of objects. + + Important: + If you want to add handling of a new type of database objects: + 1. Add a specific method for this like self.__set_db_owner(), etc. + 2. Add a condition with a check of ownership for new type objects to self.__is_owner() + 3. Add a condition with invocation of the specific method to self.set_owner() + 4. Add the information to the module documentation + That's all. + """ + + def __init__(self, module, cursor, role): + self.module = module + self.cursor = cursor + self.check_role_exists(role) + self.role = role + self.changed = False + self.executed_queries = [] + self.obj_name = '' + self.obj_type = '' + + def check_role_exists(self, role, fail_on_role=True): + """Check the role exists or not. + + Arguments: + role (str): Role name. + fail_on_role (bool): If True, fail when the role does not exist. + Otherwise just warn and continue. + """ + if not self.__role_exists(role): + if fail_on_role: + self.module.fail_json(msg="Role '%s' does not exist" % role) + else: + self.module.warn("Role '%s' does not exist, pass" % role) + + return False + + else: + return True + + def reassign(self, old_owners, fail_on_role): + """Implements REASSIGN OWNED BY command. + + If success, set self.changed as True. + + Arguments: + old_owners (list): The ownership of all the objects within + the current database, and of all shared objects (databases, tablespaces), + owned by these roles will be reassigned to self.role. + fail_on_role (bool): If True, fail when a role from old_owners does not exist. + Otherwise just warn and continue. + """ + roles = [] + for r in old_owners: + if self.check_role_exists(r, fail_on_role): + roles.append(pg_quote_identifier(r, 'role')) + + # Roles do not exist, nothing to do, exit: + if not roles: + return False + + old_owners = ','.join(roles) + + query = ['REASSIGN OWNED BY'] + query.append(old_owners) + query.append('TO %s' % pg_quote_identifier(self.role, 'role')) + query = ' '.join(query) + + self.changed = exec_sql(self, query, ddl=True) + + def set_owner(self, obj_type, obj_name): + """Change owner of a database object. + + Arguments: + obj_type (str): Type of object (like database, table, view, etc.). + obj_name (str): Object name. + """ + self.obj_name = obj_name + self.obj_type = obj_type + + # if a new_owner is the object owner now, + # nothing to do: + if self.__is_owner(): + return False + + if obj_type == 'database': + self.__set_db_owner() + + elif obj_type == 'function': + self.__set_func_owner() + + elif obj_type == 'sequence': + self.__set_seq_owner() + + elif obj_type == 'schema': + self.__set_schema_owner() + + elif obj_type == 'table': + self.__set_table_owner() + + elif obj_type == 'tablespace': + self.__set_tablespace_owner() + + elif obj_type == 'view': + self.__set_view_owner() + + elif obj_type == 'matview': + self.__set_mat_view_owner() + + def __is_owner(self): + """Return True if self.role is the current object owner.""" + if self.obj_type == 'table': + query = ("SELECT 1 FROM pg_tables " + "WHERE tablename = %(obj_name)s " + "AND tableowner = %(role)s") + + elif self.obj_type == 'database': + query = ("SELECT 1 FROM pg_database AS d " + "JOIN pg_roles AS r ON d.datdba = r.oid " + "WHERE d.datname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'function': + query = ("SELECT 1 FROM pg_proc AS f " + "JOIN pg_roles AS r ON f.proowner = r.oid " + "WHERE f.proname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'sequence': + query = ("SELECT 1 FROM pg_class AS c " + "JOIN pg_roles AS r ON c.relowner = r.oid " + "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'schema': + query = ("SELECT 1 FROM information_schema.schemata " + "WHERE schema_name = %(obj_name)s " + "AND schema_owner = %(role)s") + + elif self.obj_type == 'tablespace': + query = ("SELECT 1 FROM pg_tablespace AS t " + "JOIN pg_roles AS r ON t.spcowner = r.oid " + "WHERE t.spcname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'view': + query = ("SELECT 1 FROM pg_views " + "WHERE viewname = %(obj_name)s " + "AND viewowner = %(role)s") + + elif self.obj_type == 'matview': + query = ("SELECT 1 FROM pg_matviews " + "WHERE matviewname = %(obj_name)s " + "AND matviewowner = %(role)s") + + query_params = {'obj_name': self.obj_name, 'role': self.role} + return exec_sql(self, query, query_params, add_to_executed=False) + + def __set_db_owner(self): + """Set the database owner.""" + query = "ALTER DATABASE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'database'), + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __set_func_owner(self): + """Set the function owner.""" + query = "ALTER FUNCTION %s OWNER TO %s" % (self.obj_name, + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __set_seq_owner(self): + """Set the sequence owner.""" + query = "ALTER SEQUENCE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'), + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __set_schema_owner(self): + """Set the schema owner.""" + query = "ALTER SCHEMA %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'schema'), + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __set_table_owner(self): + """Set the table owner.""" + query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'), + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __set_tablespace_owner(self): + """Set the tablespace owner.""" + query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'database'), + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __set_view_owner(self): + """Set the view owner.""" + query = "ALTER VIEW %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'), + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __set_mat_view_owner(self): + """Set the materialized view owner.""" + query = "ALTER MATERIALIZED VIEW %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'), + pg_quote_identifier(self.role, 'role')) + self.changed = exec_sql(self, query, ddl=True) + + def __role_exists(self, role): + """Return True if role exists, otherwise return False.""" + query_params = {'role': role} + query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s" + return exec_sql(self, query, query_params, add_to_executed=False) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + new_owner=dict(type='str', required=True), + obj_name=dict(type='str'), + obj_type=dict(type='str', aliases=['type'], choices=[ + 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']), + reassign_owned_by=dict(type='list', elements='str'), + fail_on_role=dict(type='bool', default=True), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['obj_name', 'reassign_owned_by'], + ['obj_type', 'reassign_owned_by'], + ['obj_name', 'fail_on_role'], + ['obj_type', 'fail_on_role'], + ], + supports_check_mode=True, + ) + + new_owner = module.params['new_owner'] + obj_name = module.params['obj_name'] + obj_type = module.params['obj_type'] + reassign_owned_by = module.params['reassign_owned_by'] + fail_on_role = module.params['fail_on_role'] + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + pg_ownership = PgOwnership(module, cursor, new_owner) + + # if we want to change ownership: + if obj_name: + pg_ownership.set_owner(obj_type, obj_name) + + # if we want to reassign objects owned by roles: + elif reassign_owned_by: + pg_ownership.reassign(reassign_owned_by, fail_on_role) + + # Rollback if it's possible and check_mode: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + module.exit_json( + changed=pg_ownership.changed, + queries=pg_ownership.executed_queries, + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_pg_hba.py b/plugins/modules/database/postgresql/postgresql_pg_hba.py new file mode 100644 index 0000000000..72caadad3c --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_pg_hba.py @@ -0,0 +1,748 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +''' +This module is used to manage postgres pg_hba files with Ansible. +''' + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_pg_hba +short_description: Add, remove or modify a rule in a pg_hba file +description: + - The fundamental function of the module is to create, or delete lines in pg_hba files. + - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source). + If they are not unique and the SID is 'the one to change', only one for C(state=present) or none for C(state=absent) of the SID's will remain. +extends_documentation_fragment: files +options: + address: + description: + - The source address/net where the connections could come from. + - Will not be used for entries of I(type)=C(local). + - You can also use keywords C(all), C(samehost), and C(samenet). + default: samehost + type: str + aliases: [ source, src ] + backup: + description: + - If set, create a backup of the C(pg_hba) file before it is modified. + The location of the backup is returned in the (backup) variable by this module. + default: false + type: bool + backup_file: + description: + - Write backup to a specific backupfile rather than a temp file. + type: str + create: + description: + - Create an C(pg_hba) file if none exists. + - When set to false, an error is raised when the C(pg_hba) file doesn't exist. + default: false + type: bool + contype: + description: + - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents. + type: str + choices: [ local, host, hostnossl, hostssl ] + databases: + description: + - Databases this line applies to. + default: all + type: str + dest: + description: + - Path to C(pg_hba) file to modify. + type: path + required: true + method: + description: + - Authentication method to be used. + type: str + choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ] + default: md5 + netmask: + description: + - The netmask of the source address. + type: str + options: + description: + - Additional options for the authentication I(method). + type: str + order: + description: + - The entries will be written out in a specific order. + With this option you can control by which field they are ordered first, second and last. + s=source, d=databases, u=users. + This option is deprecated since 2.9 and will be removed in 2.11. + Sortorder is now hardcoded to sdu. + type: str + default: sdu + choices: [ sdu, sud, dsu, dus, usd, uds ] + state: + description: + - The lines will be added/modified when C(state=present) and removed when C(state=absent). + type: str + default: present + choices: [ absent, present ] + users: + description: + - Users this line applies to. + type: str + default: all + +notes: + - The default authentication assumes that on the host, you are either logging in as or + sudo'ing to an account with appropriate permissions to read and modify the file. + - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest). + The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule. + - This module will sort resulting C(pg_hba) files if a rule change is required. + This could give unexpected results with manual created hba files, if it was improperly sorted. + For example a rule was created for a net first and for a ip in that net range next. + In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete. + After the C(pg_hba) file is rewritten by the M(postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule. + And then it will hit, which will give unexpected results. + - With the 'order' parameter you can control which field is used to sort first, next and last. + - The module supports a check mode and a diff mode. + +seealso: +- name: PostgreSQL pg_hba.conf file reference + description: Complete reference of the PostgreSQL pg_hba.conf file documentation. + link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html + +requirements: + - ipaddress + +author: Sebastiaan Mannem (@sebasmannem) +''' + +EXAMPLES = ''' +- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication. + postgresql_pg_hba: + dest: /var/lib/postgres/data/pg_hba.conf + contype: host + users: joe,simon + source: ::1 + databases: sales,logistics + method: peer + create: true + +- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication. + postgresql_pg_hba: + dest: /var/lib/postgres/data/pg_hba.conf + contype: host + users: replication + source: 192.168.0.100/24 + databases: replication + method: cert + +- name: Revoke access from local user mary on database mydb. + postgresql_pg_hba: + dest: /var/lib/postgres/data/pg_hba.conf + contype: local + users: mary + databases: mydb + state: absent +''' + +RETURN = r''' +msgs: + description: List of textual messages what was done + returned: always + type: list + sample: + "msgs": [ + "Removing", + "Changed", + "Writing" + ] +backup_file: + description: File that the original pg_hba file was backed up to + returned: changed + type: str + sample: /tmp/pg_hba_jxobj_p +pg_hba: + description: List of the pg_hba rules as they are configured in the specified hba file + returned: always + type: list + sample: + "pg_hba": [ + { + "db": "all", + "method": "md5", + "src": "samehost", + "type": "host", + "usr": "all" + } + ] +''' + +import os +import re +import traceback + +IPADDRESS_IMP_ERR = None +try: + import ipaddress +except ImportError: + IPADDRESS_IMP_ERR = traceback.format_exc() + +import tempfile +import shutil +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +# from ansible.module_utils.postgres import postgres_common_argument_spec + +PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer", + "ldap", "radius", "cert", "pam", "scram-sha-256"] +PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"] +PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"] +PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options'] + +WHITESPACES_RE = re.compile(r'\s+') + + +class PgHbaError(Exception): + ''' + This exception is raised when parsing the pg_hba file ends in an error. + ''' + + +class PgHbaRuleError(PgHbaError): + ''' + This exception is raised when parsing the pg_hba file ends in an error. + ''' + + +class PgHbaRuleChanged(PgHbaRuleError): + ''' + This exception is raised when a new parsed rule is a changed version of an existing rule. + ''' + + +class PgHbaValueError(PgHbaError): + ''' + This exception is raised when a new parsed rule is a changed version of an existing rule. + ''' + + +class PgHbaRuleValueError(PgHbaRuleError): + ''' + This exception is raised when a new parsed rule is a changed version of an existing rule. + ''' + + +class PgHba(object): + """ + PgHba object to read/write entries to/from. + pg_hba_file - the pg_hba file almost always /etc/pg_hba + """ + def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False): + if order not in PG_HBA_ORDERS: + msg = "invalid order setting {0} (should be one of '{1}')." + raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS))) + self.pg_hba_file = pg_hba_file + self.rules = None + self.comment = None + self.order = order + self.backup = backup + self.last_backup = None + self.create = create + self.unchanged() + # self.databases will be update by add_rule and gives some idea of the number of databases + # (at least that are handled by this pg_hba) + self.databases = set(['postgres', 'template0', 'template1']) + + # self.databases will be update by add_rule and gives some idea of the number of users + # (at least that are handled by this pg_hba) since this might also be groups with multiple + # users, this might be totally off, but at least it is some info... + self.users = set(['postgres']) + + self.read() + + def unchanged(self): + ''' + This method resets self.diff to a empty default + ''' + self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []}, + 'after': {'file': self.pg_hba_file, 'pg_hba': []}} + + def read(self): + ''' + Read in the pg_hba from the system + ''' + self.rules = {} + self.comment = [] + # read the pg_hbafile + try: + with open(self.pg_hba_file, 'r') as file: + for line in file: + line = line.strip() + # uncomment + if '#' in line: + line, comment = line.split('#', 1) + self.comment.append('#' + comment) + try: + self.add_rule(PgHbaRule(line=line)) + except PgHbaRuleError: + pass + self.unchanged() + except IOError: + pass + + def write(self, backup_file=''): + ''' + This method writes the PgHba rules (back) to a file. + ''' + if not self.changed(): + return False + + contents = self.render() + if self.pg_hba_file: + if not (os.path.isfile(self.pg_hba_file) or self.create): + raise PgHbaError("pg_hba file '{0}' doesn't exist. " + "Use create option to autocreate.".format(self.pg_hba_file)) + if self.backup and os.path.isfile(self.pg_hba_file): + if backup_file: + self.last_backup = backup_file + else: + __backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba') + shutil.copy(self.pg_hba_file, self.last_backup) + fileh = open(self.pg_hba_file, 'w') + else: + filed, __path = tempfile.mkstemp(prefix='pg_hba') + fileh = os.fdopen(filed, 'w') + + fileh.write(contents) + self.unchanged() + fileh.close() + return True + + def add_rule(self, rule): + ''' + This method can be used to add a rule to the list of rules in this PgHba object + ''' + key = rule.key() + try: + try: + oldrule = self.rules[key] + except KeyError: + raise PgHbaRuleChanged + ekeys = set(list(oldrule.keys()) + list(rule.keys())) + ekeys.remove('line') + for k in ekeys: + if oldrule[k] != rule[k]: + raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule)) + except PgHbaRuleChanged: + self.rules[key] = rule + self.diff['after']['pg_hba'].append(rule.line()) + if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']: + databases = set(rule['db'].split(',')) + self.databases.update(databases) + if rule['usr'] != 'all': + user = rule['usr'] + if user[0] == '+': + user = user[1:] + self.users.add(user) + + def remove_rule(self, rule): + ''' + This method can be used to find and remove a rule. It doesn't look for the exact rule, only + the rule with the same key. + ''' + keys = rule.key() + try: + del self.rules[keys] + self.diff['before']['pg_hba'].append(rule.line()) + except KeyError: + pass + + def get_rules(self, with_lines=False): + ''' + This method returns all the rules of the PgHba object + ''' + rules = sorted(self.rules.values()) + for rule in rules: + ret = {} + for key, value in rule.items(): + ret[key] = value + if not with_lines: + if 'line' in ret: + del ret['line'] + else: + ret['line'] = rule.line() + + yield ret + + def render(self): + ''' + This method renders the content of the PgHba rules and comments. + The returning value can be used directly to write to a new file. + ''' + comment = '\n'.join(self.comment) + rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)]) + result = comment + '\n' + rule_lines + # End it properly with a linefeed (if not already). + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def changed(self): + ''' + This method can be called to detect if the PgHba file has been changed. + ''' + return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba']) + + +class PgHbaRule(dict): + ''' + This class represents one rule as defined in a line in a PgHbaFile. + ''' + + def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None, + method=None, options=None, line=None): + ''' + This function can be called with a comma seperated list of databases and a comma seperated + list of users and it will act as a generator that returns a expanded list of rules one by + one. + ''' + + super(PgHbaRule, self).__init__() + + if line: + # Read values from line if parsed + self.fromline(line) + + # read rule cols from parsed items + rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options])) + for key, value in rule.items(): + if value: + self[key] = value + + # Some sanity checks + for key in ['method', 'type']: + if key not in self: + raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self)) + + if self['method'] not in PG_HBA_METHODS: + msg = "invalid method {0} (should be one of '{1}')." + raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS))) + + if self['type'] not in PG_HBA_TYPES: + msg = "invalid connection type {0} (should be one of '{1}')." + raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES))) + + if self['type'] == 'local': + self.unset('src') + self.unset('mask') + elif 'src' not in self: + raise PgHbaRuleError('Missing src in rule {1}'.format(self)) + elif '/' in self['src']: + self.unset('mask') + else: + self['src'] = str(self.source()) + self.unset('mask') + + def unset(self, key): + ''' + This method is used to unset certain columns if they exist + ''' + if key in self: + del self[key] + + def line(self): + ''' + This method can be used to return (or generate) the line + ''' + try: + return self['line'] + except KeyError: + self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()]) + return self['line'] + + def fromline(self, line): + ''' + split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols + ''' + if WHITESPACES_RE.sub('', line) == '': + # empty line. skip this one... + return + cols = WHITESPACES_RE.split(line) + if len(cols) < 4: + msg = "Rule {0} has too few columns." + raise PgHbaValueError(msg.format(line)) + if cols[0] not in PG_HBA_TYPES: + msg = "Rule {0} has unknown type: {1}." + raise PgHbaValueError(msg.format(line, cols[0])) + if cols[0] == 'local': + cols.insert(3, None) # No address + cols.insert(3, None) # No IP-mask + if len(cols) < 6: + cols.insert(4, None) # No IP-mask + elif cols[5] not in PG_HBA_METHODS: + cols.insert(4, None) # No IP-mask + if cols[5] not in PG_HBA_METHODS: + raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5])) + + if len(cols) < 7: + cols.insert(6, None) # No auth-options + else: + cols[6] = " ".join(cols[6:]) # combine all auth-options + rule = dict(zip(PG_HBA_HDR, cols[:7])) + for key, value in rule.items(): + if value: + self[key] = value + + def key(self): + ''' + This method can be used to get the key from a rule. + ''' + if self['type'] == 'local': + source = 'local' + else: + source = str(self.source()) + return (source, self['db'], self['usr']) + + def source(self): + ''' + This method is used to get the source of a rule as an ipaddress object if possible. + ''' + if 'mask' in self.keys(): + try: + ipaddress.ip_address(u'{0}'.format(self['src'])) + except ValueError: + raise PgHbaValueError('Mask was specified, but source "{0}" ' + 'is no valid ip'.format(self['src'])) + # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen + # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a + # mask error that doesn't seem to describe what is going on. + try: + mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask'])) + except ValueError: + raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask'])) + binvalue = "{0:b}".format(int(mask_as_ip)) + if '01' in binvalue: + raise PgHbaValueError('IP mask {0} seems invalid ' + '(binary value has 1 after 0)'.format(self['mask'])) + prefixlen = binvalue.count('1') + sourcenw = '{0}/{1}'.format(self['src'], prefixlen) + try: + return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False) + except ValueError: + raise PgHbaValueError('{0} is no valid address range'.format(sourcenw)) + + try: + return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False) + except ValueError: + return self['src'] + + def __lt__(self, other): + """This function helps sorted to decide how to sort. + + It just checks itself against the other and decides on some key values + if it should be sorted higher or lower in the list. + The way it works: + For networks, every 1 in 'netmask in binary' makes the subnet more specific. + Therefore I chose to use prefix as the weight. + So a single IP (/32) should have twice the weight of a /16 network. + To keep everything in the same weight scale, + - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip) + - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip) + Therefore for ipv4, we use prefixlen (0-32) * 4 for weight, + which corresponds to ipv6 (0-128). + """ + myweight = self.source_weight() + hisweight = other.source_weight() + if myweight != hisweight: + return myweight > hisweight + + myweight = self.db_weight() + hisweight = other.db_weight() + if myweight != hisweight: + return myweight < hisweight + + myweight = self.user_weight() + hisweight = other.user_weight() + if myweight != hisweight: + return myweight < hisweight + try: + return self['src'] < other['src'] + except TypeError: + return self.source_type_weight() < other.source_type_weight() + except Exception: + # When all else fails, just compare the exact line. + return self.line() < other.line() + + def source_weight(self): + """Report the weight of this source net. + + Basically this is the netmask, where IPv4 is normalized to IPv6 + (IPv4/32 has the same weight as IPv6/128). + """ + if self['type'] == 'local': + return 130 + + sourceobj = self.source() + if isinstance(sourceobj, ipaddress.IPv4Network): + return sourceobj.prefixlen * 4 + if isinstance(sourceobj, ipaddress.IPv6Network): + return sourceobj.prefixlen + if isinstance(sourceobj, str): + # You can also write all to match any IP address, + # samehost to match any of the server's own IP addresses, + # or samenet to match any address in any subnet that the server is connected to. + if sourceobj == 'all': + # (all is considered the full range of all ips, which has a weight of 0) + return 0 + if sourceobj == 'samehost': + # (sort samehost second after local) + return 129 + if sourceobj == 'samenet': + # Might write some fancy code to determine all prefix's + # from all interfaces and find a sane value for this one. + # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96). + return 96 + if sourceobj[0] == '.': + # suffix matching (domain name), let's assume a very large scale + # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64). + return 64 + # hostname, let's assume only one host matches, which is + # IPv4/32 or IPv6/128 (both have weight 128) + return 128 + raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj)) + + def source_type_weight(self): + """Give a weight on the type of this source. + + Basically make sure that IPv6Networks are sorted higher than IPv4Networks. + This is a 'when all else fails' solution in __lt__. + """ + if self['type'] == 'local': + return 3 + + sourceobj = self.source() + if isinstance(sourceobj, ipaddress.IPv4Network): + return 2 + if isinstance(sourceobj, ipaddress.IPv6Network): + return 1 + if isinstance(sourceobj, str): + return 0 + raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj)) + + def db_weight(self): + """Report the weight of the database. + + Normally, just 1, but for replication this is 0, and for 'all', this is more than 2. + """ + if self['db'] == 'all': + return 100000 + if self['db'] == 'replication': + return 0 + if self['db'] in ['samerole', 'samegroup']: + return 1 + return 1 + self['db'].count(',') + + def user_weight(self): + """Report weight when comparing users.""" + if self['usr'] == 'all': + return 1000000 + return 1 + + +def main(): + ''' + This function is the main function of this module + ''' + # argument_spec = postgres_common_argument_spec() + argument_spec = dict() + argument_spec.update( + address=dict(type='str', default='samehost', aliases=['source', 'src']), + backup=dict(type='bool', default=False), + backup_file=dict(type='str'), + contype=dict(type='str', default=None, choices=PG_HBA_TYPES), + create=dict(type='bool', default=False), + databases=dict(type='str', default='all'), + dest=dict(type='path', required=True), + method=dict(type='str', default='md5', choices=PG_HBA_METHODS), + netmask=dict(type='str'), + options=dict(type='str'), + order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS), + state=dict(type='str', default="present", choices=["absent", "present"]), + users=dict(type='str', default='all') + ) + module = AnsibleModule( + argument_spec=argument_spec, + add_file_common_args=True, + supports_check_mode=True + ) + if IPADDRESS_IMP_ERR is not None: + module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) + + contype = module.params["contype"] + create = bool(module.params["create"] or module.check_mode) + if module.check_mode: + backup = False + else: + backup = module.params['backup'] + backup_file = module.params['backup_file'] + databases = module.params["databases"] + dest = module.params["dest"] + + method = module.params["method"] + netmask = module.params["netmask"] + options = module.params["options"] + order = module.params["order"] + source = module.params["address"] + state = module.params["state"] + users = module.params["users"] + + ret = {'msgs': []} + try: + pg_hba = PgHba(dest, order, backup=backup, create=create) + except PgHbaError as error: + module.fail_json(msg='Error reading file:\n{0}'.format(error)) + + if contype: + try: + for database in databases.split(','): + for user in users.split(','): + rule = PgHbaRule(contype, database, user, source, netmask, method, options) + if state == "present": + ret['msgs'].append('Adding') + pg_hba.add_rule(rule) + else: + ret['msgs'].append('Removing') + pg_hba.remove_rule(rule) + except PgHbaError as error: + module.fail_json(msg='Error modifying rules:\n{0}'.format(error)) + file_args = module.load_file_common_arguments(module.params) + ret['changed'] = changed = pg_hba.changed() + if changed: + ret['msgs'].append('Changed') + ret['diff'] = pg_hba.diff + + if not module.check_mode: + ret['msgs'].append('Writing') + try: + if pg_hba.write(backup_file): + module.set_fs_attributes_if_different(file_args, True, pg_hba.diff, + expand=False) + except PgHbaError as error: + module.fail_json(msg='Error writing file:\n{0}'.format(error)) + if pg_hba.last_backup: + ret['backup_file'] = pg_hba.last_backup + + ret['pg_hba'] = list(pg_hba.get_rules()) + module.exit_json(**ret) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_ping.py b/plugins/modules/database/postgresql/postgresql_ping.py new file mode 100644 index 0000000000..8280ce93f7 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_ping.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_ping +short_description: Check remote PostgreSQL server availability +description: +- Simple module to check remote PostgreSQL server availability. +options: + db: + description: + - Name of a database to connect to. + type: str + aliases: + - login_db +seealso: +- module: postgresql_info +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +# PostgreSQL ping dbsrv server from the shell: +# ansible dbsrv -m postgresql_ping + +# In the example below you need to generate certificates previously. +# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information. +- name: PostgreSQL ping dbsrv server using not default credentials and ssl + postgresql_ping: + db: protected_db + login_host: dbsrv + login_user: secret + login_password: secret_pass + ca_cert: /root/root.crt + ssl_mode: verify-full +''' + +RETURN = r''' +is_available: + description: PostgreSQL server availability. + returned: always + type: bool + sample: true +server_version: + description: PostgreSQL server version. + returned: always + type: dict + sample: { major: 10, minor: 1 } +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + + +class PgPing(object): + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.is_available = False + self.version = {} + + def do(self): + self.get_pg_version() + return (self.is_available, self.version) + + def get_pg_version(self): + query = "SELECT version()" + raw = exec_sql(self, query, add_to_executed=False)[0][0] + if raw: + self.is_available = True + raw = raw.split()[1].split('.') + self.version = dict( + major=int(raw[0]), + minor=int(raw[1]), + ) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', aliases=['login_db']), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Set some default values: + cursor = False + db_connection = False + result = dict( + changed=False, + is_available=False, + server_version=dict(), + ) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, fail_on_conn=False) + + if db_connection is not None: + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Do job: + pg_ping = PgPing(module, cursor) + if cursor: + # If connection established: + result["is_available"], result["server_version"] = pg_ping.do() + db_connection.rollback() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_privs.py b/plugins/modules/database/postgresql/postgresql_privs.py new file mode 100644 index 0000000000..41bb373c31 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_privs.py @@ -0,0 +1,1092 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_privs +short_description: Grant or revoke privileges on PostgreSQL database objects +description: +- Grant or revoke privileges on PostgreSQL database objects. +- This module is basically a wrapper around most of the functionality of + PostgreSQL's GRANT and REVOKE statements with detection of changes + (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)). +options: + database: + description: + - Name of database to connect to. + required: yes + type: str + aliases: + - db + - login_db + state: + description: + - If C(present), the specified privileges are granted, if C(absent) they are revoked. + type: str + default: present + choices: [ absent, present ] + privs: + description: + - Comma separated list of privileges to grant/revoke. + type: str + aliases: + - priv + type: + description: + - Type of database object to set privileges on. + - The C(default_privs) choice is available starting at version 2.7. + - The C(foreign_data_wrapper) and C(foreign_server) object types are available from Ansible version '2.8'. + - The C(type) choice is available from Ansible version '2.10'. + type: str + default: table + choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function, + group, language, table, tablespace, schema, sequence, type ] + objs: + description: + - Comma separated list of database objects to set privileges on. + - If I(type) is C(table), C(partition table), C(sequence) or C(function), + the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all + database objects of type I(type) in the schema specified via I(schema). + (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available + for C(function) and C(partition table) from version 2.8) + - If I(type) is C(database), this parameter can be omitted, in which case + privileges are set for the database specified via I(database). + - 'If I(type) is I(function), colons (":") in object names will be + replaced with commas (needed to specify function signatures, see examples)' + type: str + aliases: + - obj + schema: + description: + - Schema that contains the database objects specified via I(objs). + - May only be provided if I(type) is C(table), C(sequence), C(function), C(type), + or C(default_privs). Defaults to C(public) in these cases. + - Pay attention, for embedded types when I(type=type) + I(schema) can be C(pg_catalog) or C(information_schema) respectively. + type: str + roles: + description: + - Comma separated list of role (user/group) names to set permissions for. + - The special value C(PUBLIC) can be provided instead to set permissions + for the implicitly defined PUBLIC group. + type: str + required: yes + aliases: + - role + fail_on_role: + description: + - If C(yes), fail when target role (for whom privs need to be granted) does not exist. + Otherwise just warn and continue. + default: yes + type: bool + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + target_roles: + description: + - A list of existing role (user/group) names to set as the + default permissions for database objects subsequently created by them. + - Parameter I(target_roles) is only available with C(type=default_privs). + type: str + grant_option: + description: + - Whether C(role) may grant/revoke the specified privileges/group memberships to others. + - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes. + - I(grant_option) only has an effect if I(state) is C(present). + type: bool + aliases: + - admin_option + host: + description: + - Database host address. If unspecified, connect via Unix socket. + type: str + aliases: + - login_host + port: + description: + - Database port to connect to. + type: int + default: 5432 + aliases: + - login_port + unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + aliases: + - login_unix_socket + login: + description: + - The username to authenticate with. + type: str + default: postgres + aliases: + - login_user + password: + description: + - The password to authenticate with. + type: str + aliases: + - login_password + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: + - ssl_rootcert + +notes: +- Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) + have singular alias names (I(priv), I(obj), I(role)). +- To revoke only C(GRANT OPTION) for a specific object, set I(state) to + C(present) and I(grant_option) to C(no) (see examples). +- Note that when revoking privileges from a role R, this role may still have + access via privileges granted to any role R is a member of including C(PUBLIC). +- Note that when revoking privileges from a role R, you do so as the user + specified via I(login). If R has been granted the same privileges by + another user also, R can still access database objects via these privileges. +- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). + +seealso: +- module: postgresql_user +- module: postgresql_owner +- module: postgresql_membership +- name: PostgreSQL privileges + description: General information about PostgreSQL privileges. + link: https://www.postgresql.org/docs/current/ddl-priv.html +- name: PostgreSQL GRANT command reference + description: Complete reference of the PostgreSQL GRANT command documentation. + link: https://www.postgresql.org/docs/current/sql-grant.html +- name: PostgreSQL REVOKE command reference + description: Complete reference of the PostgreSQL REVOKE command documentation. + link: https://www.postgresql.org/docs/current/sql-revoke.html + +extends_documentation_fragment: +- community.general.postgres + + +author: +- Bernhard Weitzhofer (@b6d) +- Tobias Birkefeld (@tcraxs) +''' + +EXAMPLES = r''' +# On database "library": +# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors +# TO librarian, reader WITH GRANT OPTION +- name: Grant privs to librarian and reader on database library + postgresql_privs: + database: library + state: present + privs: SELECT,INSERT,UPDATE + type: table + objs: books,authors + schema: public + roles: librarian,reader + grant_option: yes + +- name: Same as above leveraging default values + postgresql_privs: + db: library + privs: SELECT,INSERT,UPDATE + objs: books,authors + roles: librarian,reader + grant_option: yes + +# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader +# Note that role "reader" will be *granted* INSERT privilege itself if this +# isn't already the case (since state: present). +- name: Revoke privs from reader + postgresql_privs: + db: library + state: present + priv: INSERT + obj: books + role: reader + grant_option: no + +# "public" is the default schema. This also works for PostgreSQL 8.x. +- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader + postgresql_privs: + db: library + state: absent + privs: INSERT,UPDATE + objs: ALL_IN_SCHEMA + role: reader + +- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian + postgresql_privs: + db: library + privs: ALL + type: schema + objs: public,math + role: librarian + +# Note the separation of arguments with colons. +- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader + postgresql_privs: + db: library + privs: ALL + type: function + obj: add(int:int) + schema: math + roles: librarian,reader + +# Note that group role memberships apply cluster-wide and therefore are not +# restricted to database "library" here. +- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION + postgresql_privs: + db: library + type: group + objs: librarian,reader + roles: alice,bob + admin_option: yes + +# Note that here "db: postgres" specifies the database to connect to, not the +# database to grant privileges on (which is specified via the "objs" param) +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: postgres + privs: ALL + type: database + obj: library + role: librarian + +# If objs is omitted for type "database", it defaults to the database +# to which the connection is established +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: library + privs: ALL + type: database + role: librarian + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: library + objs: ALL_DEFAULT + privs: ALL + type: default_privs + role: librarian + grant_option: yes + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1 + postgresql_privs: + db: library + objs: TABLES,SEQUENCES + privs: SELECT + type: default_privs + role: reader + +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2 + postgresql_privs: + db: library + objs: TYPES + privs: USAGE + type: default_privs + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader + postgresql_privs: + db: test + objs: fdw + privs: ALL + type: foreign_data_wrapper + role: reader + +# Available since version 2.10 +- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader + postgresql_privs: + db: test + objs: customtype + privs: ALL + type: type + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader + postgresql_privs: + db: test + objs: fdw_server + privs: ALL + type: foreign_server + role: reader + +# Available since version 2.8 +# Grant 'execute' permissions on all functions in schema 'common' to role 'caller' +- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller + postgresql_privs: + type: function + state: present + privs: EXECUTE + roles: caller + objs: ALL_IN_SCHEMA + schema: common + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader +# GRANT SELECT privileges for new TABLES objects created by librarian as +# default to the role reader. +# For specific +- name: ALTER privs + postgresql_privs: + db: library + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader +# REVOKE SELECT privileges for new TABLES objects created by librarian as +# default from the role reader. +# For specific +- name: ALTER privs + postgresql_privs: + db: library + state: absent + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since version 2.10 +- name: Grant type privileges for pg_catalog.numeric type to alice + postgresql_privs: + type: type + roles: alice + privs: ALL + objs: numeric + schema: pg_catalog + db: acme +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";'] + version_added: '2.8' +''' + +import traceback + +PSYCOPG2_IMP_ERR = None +try: + import psycopg2 + import psycopg2.extensions +except ImportError: + PSYCOPG2_IMP_ERR = traceback.format_exc() + psycopg2 = None + +# import module snippets +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier +from ansible_collections.community.general.plugins.module_utils.postgres import postgres_common_argument_spec +from ansible.module_utils._text import to_native + +VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', + 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', + 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) +VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'), + 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'), + 'FUNCTIONS': ('ALL', 'EXECUTE'), + 'TYPES': ('ALL', 'USAGE')} + +executed_queries = [] + + +class Error(Exception): + pass + + +def role_exists(module, cursor, rolname): + """Check user exists or not""" + query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname + try: + cursor.execute(query) + return cursor.rowcount > 0 + + except Exception as e: + module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + + return False + + +# We don't have functools.partial in Python < 2.5 +def partial(f, *args, **kwargs): + """Partial function application""" + + def g(*g_args, **g_kwargs): + new_kwargs = kwargs.copy() + new_kwargs.update(g_kwargs) + return f(*(args + g_args), **g_kwargs) + + g.f = f + g.args = args + g.kwargs = kwargs + return g + + +class Connection(object): + """Wrapper around a psycopg2 connection with some convenience methods""" + + def __init__(self, params, module): + self.database = params.database + self.module = module + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "host": "host", + "login": "user", + "password": "password", + "port": "port", + "database": "database", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + + kw = dict((params_map[k], getattr(params, k)) for k in params_map + if getattr(params, k) != '' and getattr(params, k) is not None) + + # If a unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and params.unix_socket != "": + kw["host"] = params.unix_socket + + sslrootcert = params.ca_cert + if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: + raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter') + + self.connection = psycopg2.connect(**kw) + self.cursor = self.connection.cursor() + + def commit(self): + self.connection.commit() + + def rollback(self): + self.connection.rollback() + + @property + def encoding(self): + """Connection encoding in Python-compatible form""" + return psycopg2.extensions.encodings[self.connection.encoding] + + # Methods for querying database objects + + # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like + # phrases in GRANT or REVOKE statements, therefore alternative methods are + # provided here. + + def schema_exists(self, schema): + query = """SELECT count(*) + FROM pg_catalog.pg_namespace WHERE nspname = %s""" + self.cursor.execute(query, (schema,)) + return self.cursor.fetchone()[0] > 0 + + def get_all_tables_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_sequences_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S'""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_functions_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT p.proname, oidvectortypes(p.proargtypes) + FROM pg_catalog.pg_proc p + JOIN pg_namespace n ON n.oid = p.pronamespace + WHERE nspname = %s""" + self.cursor.execute(query, (schema,)) + return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()] + + # Methods for getting access control lists and group membership info + + # To determine whether anything has changed after granting/revoking + # privileges, we compare the access control lists of the specified database + # objects before and afterwards. Python's list/string comparison should + # suffice for change detection, we should not actually have to parse ACLs. + # The same should apply to group membership information. + + def get_table_acls(self, schema, tables): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, tables)) + return [t[0] for t in self.cursor.fetchall()] + + def get_sequence_acls(self, schema, sequences): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, sequences)) + return [t[0] for t in self.cursor.fetchall()] + + def get_function_acls(self, schema, function_signatures): + funcnames = [f.split('(', 1)[0] for f in function_signatures] + query = """SELECT proacl + FROM pg_catalog.pg_proc p + JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace + WHERE nspname = %s AND proname = ANY (%s) + ORDER BY proname, proargtypes""" + self.cursor.execute(query, (schema, funcnames)) + return [t[0] for t in self.cursor.fetchall()] + + def get_schema_acls(self, schemas): + query = """SELECT nspacl FROM pg_catalog.pg_namespace + WHERE nspname = ANY (%s) ORDER BY nspname""" + self.cursor.execute(query, (schemas,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_language_acls(self, languages): + query = """SELECT lanacl FROM pg_catalog.pg_language + WHERE lanname = ANY (%s) ORDER BY lanname""" + self.cursor.execute(query, (languages,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_tablespace_acls(self, tablespaces): + query = """SELECT spcacl FROM pg_catalog.pg_tablespace + WHERE spcname = ANY (%s) ORDER BY spcname""" + self.cursor.execute(query, (tablespaces,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_database_acls(self, databases): + query = """SELECT datacl FROM pg_catalog.pg_database + WHERE datname = ANY (%s) ORDER BY datname""" + self.cursor.execute(query, (databases,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_group_memberships(self, groups): + query = """SELECT roleid, grantor, member, admin_option + FROM pg_catalog.pg_auth_members am + JOIN pg_catalog.pg_roles r ON r.oid = am.roleid + WHERE r.rolname = ANY(%s) + ORDER BY roleid, grantor, member""" + self.cursor.execute(query, (groups,)) + return self.cursor.fetchall() + + def get_default_privs(self, schema, *args): + query = """SELECT defaclacl + FROM pg_default_acl a + JOIN pg_namespace b ON a.defaclnamespace=b.oid + WHERE b.nspname = %s;""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_data_wrapper_acls(self, fdws): + query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper + WHERE fdwname = ANY (%s) ORDER BY fdwname""" + self.cursor.execute(query, (fdws,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_server_acls(self, fs): + query = """SELECT srvacl FROM pg_catalog.pg_foreign_server + WHERE srvname = ANY (%s) ORDER BY srvname""" + self.cursor.execute(query, (fs,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_type_acls(self, schema, types): + query = """SELECT t.typacl FROM pg_catalog.pg_type t + JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname""" + self.cursor.execute(query, (schema, types)) + return [t[0] for t in self.cursor.fetchall()] + + # Manipulating privileges + + def manipulate_privs(self, obj_type, privs, objs, roles, target_roles, + state, grant_option, schema_qualifier=None, fail_on_role=True): + """Manipulate database object privileges. + + :param obj_type: Type of database object to grant/revoke + privileges for. + :param privs: Either a list of privileges to grant/revoke + or None if type is "group". + :param objs: List of database objects to grant/revoke + privileges for. + :param roles: Either a list of role names or "PUBLIC" + for the implicitly defined "PUBLIC" group + :param target_roles: List of role names to grant/revoke + default privileges as. + :param state: "present" to grant privileges, "absent" to revoke. + :param grant_option: Only for state "present": If True, set + grant/admin option. If False, revoke it. + If None, don't change grant option. + :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", + "FUNCTION") must be qualified by schema. + Ignored for other Types. + """ + # get_status: function to get current status + if obj_type == 'table': + get_status = partial(self.get_table_acls, schema_qualifier) + elif obj_type == 'sequence': + get_status = partial(self.get_sequence_acls, schema_qualifier) + elif obj_type == 'function': + get_status = partial(self.get_function_acls, schema_qualifier) + elif obj_type == 'schema': + get_status = self.get_schema_acls + elif obj_type == 'language': + get_status = self.get_language_acls + elif obj_type == 'tablespace': + get_status = self.get_tablespace_acls + elif obj_type == 'database': + get_status = self.get_database_acls + elif obj_type == 'group': + get_status = self.get_group_memberships + elif obj_type == 'default_privs': + get_status = partial(self.get_default_privs, schema_qualifier) + elif obj_type == 'foreign_data_wrapper': + get_status = self.get_foreign_data_wrapper_acls + elif obj_type == 'foreign_server': + get_status = self.get_foreign_server_acls + elif obj_type == 'type': + get_status = partial(self.get_type_acls, schema_qualifier) + else: + raise Error('Unsupported database object type "%s".' % obj_type) + + # Return False (nothing has changed) if there are no objs to work on. + if not objs: + return False + + # obj_ids: quoted db object identifiers (sometimes schema-qualified) + if obj_type == 'function': + obj_ids = [] + for obj in objs: + try: + f, args = obj.split('(', 1) + except Exception: + raise Error('Illegal function signature: "%s".' % obj) + obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) + elif obj_type in ['table', 'sequence', 'type']: + obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] + else: + obj_ids = ['"%s"' % o for o in objs] + + # set_what: SQL-fragment specifying what to set for the target roles: + # Either group membership or privileges on objects of a certain type + if obj_type == 'group': + set_what = ','.join('"%s"' % i for i in obj_ids) + elif obj_type == 'default_privs': + # We don't want privs to be quoted here + set_what = ','.join(privs) + else: + # function types are already quoted above + if obj_type != 'function': + obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] + # Note: obj_type has been checked against a set of string literals + # and privs was escaped when it was parsed + # Note: Underscores are replaced with spaces to support multi-word obj_type + set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '), + ','.join(obj_ids)) + + # for_whom: SQL-fragment specifying for whom to set the above + if roles == 'PUBLIC': + for_whom = 'PUBLIC' + else: + for_whom = [] + for r in roles: + if not role_exists(self.module, self.cursor, r): + if fail_on_role: + self.module.fail_json(msg="Role '%s' does not exist" % r.strip()) + + else: + self.module.warn("Role '%s' does not exist, pass it" % r.strip()) + else: + for_whom.append('"%s"' % r) + + if not for_whom: + return False + + for_whom = ','.join(for_whom) + + # as_who: + as_who = None + if target_roles: + as_who = ','.join('"%s"' % r for r in target_roles) + + status_before = get_status(objs) + + query = QueryBuilder(state) \ + .for_objtype(obj_type) \ + .with_grant_option(grant_option) \ + .for_whom(for_whom) \ + .as_who(as_who) \ + .for_schema(schema_qualifier) \ + .set_what(set_what) \ + .for_objs(objs) \ + .build() + + executed_queries.append(query) + self.cursor.execute(query) + status_after = get_status(objs) + + def nonesorted(e): + # For python 3+ that can fail trying + # to compare NoneType elements by sort method. + if e is None: + return '' + return e + + status_before.sort(key=nonesorted) + status_after.sort(key=nonesorted) + return status_before != status_after + + +class QueryBuilder(object): + def __init__(self, state): + self._grant_option = None + self._for_whom = None + self._as_who = None + self._set_what = None + self._obj_type = None + self._state = state + self._schema = None + self._objs = None + self.query = [] + + def for_objs(self, objs): + self._objs = objs + return self + + def for_schema(self, schema): + self._schema = schema + return self + + def with_grant_option(self, option): + self._grant_option = option + return self + + def for_whom(self, who): + self._for_whom = who + return self + + def as_who(self, target_roles): + self._as_who = target_roles + return self + + def set_what(self, what): + self._set_what = what + return self + + def for_objtype(self, objtype): + self._obj_type = objtype + return self + + def build(self): + if self._state == 'present': + self.build_present() + elif self._state == 'absent': + self.build_absent() + else: + self.build_absent() + return '\n'.join(self.query) + + def add_default_revoke(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + + def add_grant_option(self): + if self._grant_option: + if self._obj_type == 'group': + self.query[-1] += ' WITH ADMIN OPTION;' + else: + self.query[-1] += ' WITH GRANT OPTION;' + else: + self.query[-1] += ';' + if self._obj_type == 'group': + self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + elif not self._obj_type == 'default_privs': + self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + + def add_default_priv(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who, + self._schema, + self._set_what, + obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema, + self._set_what, + obj, + self._for_whom)) + self.add_grant_option() + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who, + self._schema, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom)) + self.add_grant_option() + + def build_present(self): + if self._obj_type == 'default_privs': + self.add_default_revoke() + self.add_default_priv() + else: + self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom)) + self.add_grant_option() + + def build_absent(self): + if self._obj_type == 'default_privs': + self.query = [] + for obj in ['TABLES', 'SEQUENCES', 'TYPES']: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + else: + self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom)) + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + database=dict(required=True, aliases=['db', 'login_db']), + state=dict(default='present', choices=['present', 'absent']), + privs=dict(required=False, aliases=['priv']), + type=dict(default='table', + choices=['table', + 'sequence', + 'function', + 'database', + 'schema', + 'language', + 'tablespace', + 'group', + 'default_privs', + 'foreign_data_wrapper', + 'foreign_server', + 'type', ]), + objs=dict(required=False, aliases=['obj']), + schema=dict(required=False), + roles=dict(required=True, aliases=['role']), + session_role=dict(required=False), + target_roles=dict(required=False), + grant_option=dict(required=False, type='bool', + aliases=['admin_option']), + host=dict(default='', aliases=['login_host']), + unix_socket=dict(default='', aliases=['login_unix_socket']), + login=dict(default='postgres', aliases=['login_user']), + password=dict(default='', aliases=['login_password'], no_log=True), + fail_on_role=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + fail_on_role = module.params['fail_on_role'] + + # Create type object as namespace for module params + p = type('Params', (), module.params) + # param "schema": default, allowed depends on param "type" + if p.type in ['table', 'sequence', 'function', 'type', 'default_privs']: + p.schema = p.schema or 'public' + elif p.schema: + module.fail_json(msg='Argument "schema" is not allowed ' + 'for type "%s".' % p.type) + + # param "objs": default, required depends on param "type" + if p.type == 'database': + p.objs = p.objs or p.database + elif not p.objs: + module.fail_json(msg='Argument "objs" is required ' + 'for type "%s".' % p.type) + + # param "privs": allowed, required depends on param "type" + if p.type == 'group': + if p.privs: + module.fail_json(msg='Argument "privs" is not allowed ' + 'for type "group".') + elif not p.privs: + module.fail_json(msg='Argument "privs" is required ' + 'for type "%s".' % p.type) + + # Connect to Database + if not psycopg2: + module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) + try: + conn = Connection(p, module) + except psycopg2.Error as e: + module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert') + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + except ValueError as e: + # We raise this when the psycopg library is too old + module.fail_json(msg=to_native(e)) + + if p.session_role: + try: + conn.cursor.execute('SET ROLE "%s"' % p.session_role) + except Exception as e: + module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc()) + + try: + # privs + if p.privs: + privs = frozenset(pr.upper() for pr in p.privs.split(',')) + if not privs.issubset(VALID_PRIVS): + module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) + else: + privs = None + # objs: + if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_tables_in_schema(p.schema) + elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_sequences_in_schema(p.schema) + elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_functions_in_schema(p.schema) + elif p.type == 'default_privs': + if p.objs == 'ALL_DEFAULT': + objs = frozenset(VALID_DEFAULT_OBJS.keys()) + else: + objs = frozenset(obj.upper() for obj in p.objs.split(',')) + if not objs.issubset(VALID_DEFAULT_OBJS): + module.fail_json( + msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys())) + # Again, do we have valid privs specified for object type: + valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj])) + if not valid_objects_for_priv == objs: + module.fail_json( + msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format( + valid_objects_for_priv, objs)) + else: + objs = p.objs.split(',') + + # function signatures are encoded using ':' to separate args + if p.type == 'function': + objs = [obj.replace(':', ',') for obj in objs] + + # roles + if p.roles == 'PUBLIC': + roles = 'PUBLIC' + else: + roles = p.roles.split(',') + + if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]): + module.exit_json(changed=False) + + if fail_on_role: + module.fail_json(msg="Role '%s' does not exist" % roles[0].strip()) + + else: + module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip()) + + # check if target_roles is set with type: default_privs + if p.target_roles and not p.type == 'default_privs': + module.warn('"target_roles" will be ignored ' + 'Argument "type: default_privs" is required for usage of "target_roles".') + + # target roles + if p.target_roles: + target_roles = p.target_roles.split(',') + else: + target_roles = None + + changed = conn.manipulate_privs( + obj_type=p.type, + privs=privs, + objs=objs, + roles=roles, + target_roles=target_roles, + state=p.state, + grant_option=p.grant_option, + schema_qualifier=p.schema, + fail_on_role=fail_on_role, + ) + + except Error as e: + conn.rollback() + module.fail_json(msg=e.message, exception=traceback.format_exc()) + + except psycopg2.Error as e: + conn.rollback() + module.fail_json(msg=to_native(e.message)) + + if module.check_mode: + conn.rollback() + else: + conn.commit() + module.exit_json(changed=changed, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_publication.py b/plugins/modules/database/postgresql/postgresql_publication.py new file mode 100644 index 0000000000..10737f47df --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_publication.py @@ -0,0 +1,655 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Loic Blot (@nerzhul) +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: postgresql_publication +short_description: Add, update, or remove PostgreSQL publication +description: +- Add, update, or remove PostgreSQL publication. +options: + name: + description: + - Name of the publication to add, update, or remove. + required: true + type: str + db: + description: + - Name of the database to connect to and where + the publication state will be changed. + aliases: [ login_db ] + type: str + tables: + description: + - List of tables to add to the publication. + - If no value is set all tables are targeted. + - If the publication already exists for specific tables and I(tables) is not passed, + nothing will be changed. If you need to add all tables to the publication with the same name, + drop existent and create new without passing I(tables). + type: list + elements: str + state: + description: + - The publication state. + default: present + choices: [ absent, present ] + type: str + parameters: + description: + - Dictionary with optional publication parameters. + - Available parameters depend on PostgreSQL version. + type: dict + owner: + description: + - Publication owner. + - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role). + type: str + cascade: + description: + - Drop publication dependencies. Has effect with I(state=absent) only. + type: bool + default: false +notes: +- PostgreSQL version must be 10 or greater. +seealso: +- name: CREATE PUBLICATION reference + description: Complete reference of the CREATE PUBLICATION command documentation. + link: https://www.postgresql.org/docs/current/sql-createpublication.html +- name: ALTER PUBLICATION reference + description: Complete reference of the ALTER PUBLICATION command documentation. + link: https://www.postgresql.org/docs/current/sql-alterpublication.html +- name: DROP PUBLICATION reference + description: Complete reference of the DROP PUBLICATION command documentation. + link: https://www.postgresql.org/docs/current/sql-droppublication.html +author: +- Loic Blot (@nerzhul) +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create a new publication with name "acme" targeting all tables in database "test". + postgresql_publication: + db: test + name: acme + +- name: Create publication "acme" publishing only prices and vehicles tables. + postgresql_publication: + name: acme + tables: + - prices + - vehicles + +- name: > + Create publication "acme", set user alice as an owner, targeting all tables. + Allowable DML operations are INSERT and UPDATE only + postgresql_publication: + name: acme + owner: alice + parameters: + publish: 'insert,update' + +- name: > + Assuming publication "acme" exists and there are targeted + tables "prices" and "vehicles", add table "stores" to the publication. + postgresql_publication: + name: acme + tables: + - prices + - vehicles + - stores + +- name: Remove publication "acme" if exists in database "test". + postgresql_publication: + db: test + name: acme + state: absent +''' + +RETURN = r''' +exists: + description: + - Flag indicates the publication exists or not at the end of runtime. + returned: always + type: bool + sample: true +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'DROP PUBLICATION "acme" CASCADE' ] +owner: + description: Owner of the publication at the end of runtime. + returned: if publication exists + type: str + sample: "alice" +tables: + description: + - List of tables in the publication at the end of runtime. + - If all tables are published, returns empty list. + returned: if publication exists + type: list + sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""] +alltables: + description: + - Flag indicates that all tables are published. + returned: if publication exists + type: bool + sample: false +parameters: + description: Publication parameters at the end of runtime. + returned: if publication exists + type: dict + sample: {'publish': {'insert': false, 'delete': false, 'update': true}} +''' + + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + +SUPPORTED_PG_VERSION = 10000 + + +################################ +# Module functions and classes # +################################ + +def transform_tables_representation(tbl_list): + """Add 'public.' to names of tables where a schema identifier is absent + and add quotes to each element. + + Args: + tbl_list (list): List of table names. + + Returns: + tbl_list (list): Changed list. + """ + for i, table in enumerate(tbl_list): + if '.' not in table: + tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table') + else: + tbl_list[i] = pg_quote_identifier(table.strip(), 'table') + + return tbl_list + + +class PgPublication(): + """Class to work with PostgreSQL publication. + + Args: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): The name of the publication. + + Attributes: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): Name of the publication. + executed_queries (list): List of executed queries. + attrs (dict): Dict with publication attributes. + exists (bool): Flag indicates the publication exists or not. + """ + + def __init__(self, module, cursor, name): + self.module = module + self.cursor = cursor + self.name = name + self.executed_queries = [] + self.attrs = { + 'alltables': False, + 'tables': [], + 'parameters': {}, + 'owner': '', + } + self.exists = self.check_pub() + + def get_info(self): + """Refresh the publication information. + + Returns: + ``self.attrs``. + """ + self.exists = self.check_pub() + return self.attrs + + def check_pub(self): + """Check the publication and refresh ``self.attrs`` publication attribute. + + Returns: + True if the publication with ``self.name`` exists, False otherwise. + """ + + pub_info = self.__get_general_pub_info() + + if not pub_info: + # Publication does not exist: + return False + + self.attrs['owner'] = pub_info.get('pubowner') + + # Publication DML operations: + self.attrs['parameters']['publish'] = {} + self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False) + self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False) + self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False) + if pub_info.get('pubtruncate'): + self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate') + + # If alltables flag is False, get the list of targeted tables: + if not pub_info.get('puballtables'): + table_info = self.__get_tables_pub_info() + # Join sublists [['schema', 'table'], ...] to ['schema.table', ...] + # for better representation: + for i, schema_and_table in enumerate(table_info): + table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table') + + self.attrs['tables'] = table_info + else: + self.attrs['alltables'] = True + + # Publication exists: + return True + + def create(self, tables, params, owner, check_mode=True): + """Create the publication. + + Args: + tables (list): List with names of the tables that need to be added to the publication. + params (dict): Dict contains optional publication parameters and their values. + owner (str): Name of the publication owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if publication has been created, otherwise False. + """ + changed = True + + query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')] + + if tables: + query_fragments.append("FOR TABLE %s" % ', '.join(tables)) + else: + query_fragments.append("FOR ALL TABLES") + + if params: + params_list = [] + # Make list ["param = 'value'", ...] from params dict: + for (key, val) in iteritems(params): + params_list.append("%s = '%s'" % (key, val)) + + # Add the list to query_fragments: + query_fragments.append("WITH (%s)" % ', '.join(params_list)) + + changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + if owner: + # If check_mode, just add possible SQL to + # executed_queries and return: + self.__pub_set_owner(owner, check_mode=check_mode) + + return changed + + def update(self, tables, params, owner, check_mode=True): + """Update the publication. + + Args: + tables (list): List with names of the tables that need to be presented in the publication. + params (dict): Dict contains optional publication parameters and their values. + owner (str): Name of the publication owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if publication has been updated, otherwise False. + """ + changed = False + + # Add or drop tables from published tables suit: + if tables and not self.attrs['alltables']: + + # 1. If needs to add table to the publication: + for tbl in tables: + if tbl not in self.attrs['tables']: + # If needs to add table to the publication: + changed = self.__pub_add_table(tbl, check_mode=check_mode) + + # 2. if there is a table in targeted tables + # that's not presented in the passed tables: + for tbl in self.attrs['tables']: + if tbl not in tables: + changed = self.__pub_drop_table(tbl, check_mode=check_mode) + + elif tables and self.attrs['alltables']: + changed = self.__pub_set_tables(tables, check_mode=check_mode) + + # Update pub parameters: + if params: + for key, val in iteritems(params): + if self.attrs['parameters'].get(key): + + # In PostgreSQL 10/11 only 'publish' optional parameter is presented. + if key == 'publish': + # 'publish' value can be only a string with comma-separated items + # of allowed DML operations like 'insert,update' or + # 'insert,update,delete', etc. + # Make dictionary to compare with current attrs later: + val_dict = self.attrs['parameters']['publish'].copy() + val_list = val.split(',') + for v in val_dict: + if v in val_list: + val_dict[v] = True + else: + val_dict[v] = False + + # Compare val_dict and the dict with current 'publish' parameters, + # if they're different, set new values: + if val_dict != self.attrs['parameters']['publish']: + changed = self.__pub_set_param(key, val, check_mode=check_mode) + + # Default behavior for other cases: + elif self.attrs['parameters'][key] != val: + changed = self.__pub_set_param(key, val, check_mode=check_mode) + + else: + # If the parameter was not set before: + changed = self.__pub_set_param(key, val, check_mode=check_mode) + + # Update pub owner: + if owner: + if owner != self.attrs['owner']: + changed = self.__pub_set_owner(owner, check_mode=check_mode) + + return changed + + def drop(self, cascade=False, check_mode=True): + """Drop the publication. + + Kwargs: + cascade (bool): Flag indicates that publication needs to be deleted + with its dependencies. + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if publication has been updated, otherwise False. + """ + if self.exists: + query_fragments = [] + query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')) + if cascade: + query_fragments.append("CASCADE") + + return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + def __get_general_pub_info(self): + """Get and return general publication information. + + Returns: + Dict with publication information if successful, False otherwise. + """ + # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11): + pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_publication' " + "AND column_name = 'pubtruncate'"), add_to_executed=False) + + if pgtrunc_sup: + query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, " + "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p " + "JOIN pg_catalog.pg_roles AS r " + "ON p.pubowner = r.oid " + "WHERE p.pubname = %(pname)s") + else: + query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, " + "p.pubupdate , p.pubdelete FROM pg_publication AS p " + "JOIN pg_catalog.pg_roles AS r " + "ON p.pubowner = r.oid " + "WHERE p.pubname = %(pname)s") + + result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False) + if result: + return result[0] + else: + return False + + def __get_tables_pub_info(self): + """Get and return tables that are published by the publication. + + Returns: + List of dicts with published tables. + """ + query = ("SELECT schemaname, tablename " + "FROM pg_publication_tables WHERE pubname = %(pname)s") + return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False) + + def __pub_add_table(self, table, check_mode=False): + """Add a table to the publication. + + Args: + table (str): Table name. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'), + pg_quote_identifier(table, 'table'))) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_drop_table(self, table, check_mode=False): + """Drop a table from the publication. + + Args: + table (str): Table name. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'), + pg_quote_identifier(table, 'table'))) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_set_tables(self, tables, check_mode=False): + """Set a table suit that need to be published by the publication. + + Args: + tables (list): List of tables. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + quoted_tables = [pg_quote_identifier(t, 'table') for t in tables] + query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'), + ', '.join(quoted_tables))) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_set_param(self, param, value, check_mode=False): + """Set an optional publication parameter. + + Args: + param (str): Name of the parameter. + value (str): Parameter value. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'), + param, value)) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_set_owner(self, role, check_mode=False): + """Set a publication owner. + + Args: + role (str): Role (user) name that needs to be set as a publication owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ("ALTER PUBLICATION %s OWNER TO %s" % (pg_quote_identifier(self.name, 'publication'), + pg_quote_identifier(role, 'role'))) + return self.__exec_sql(query, check_mode=check_mode) + + def __exec_sql(self, query, check_mode=False): + """Execute SQL query. + + Note: If we need just to get information from the database, + we use ``exec_sql`` function directly. + + Args: + query (str): Query that needs to be executed. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just add ``query`` to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + if check_mode: + self.executed_queries.append(query) + return True + else: + return exec_sql(self, query, ddl=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(required=True), + db=dict(type='str', aliases=['login_db']), + state=dict(type='str', default='present', choices=['absent', 'present']), + tables=dict(type='list', elements='str'), + parameters=dict(type='dict'), + owner=dict(type='str'), + cascade=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Parameters handling: + name = module.params['name'] + state = module.params['state'] + tables = module.params['tables'] + params = module.params['parameters'] + owner = module.params['owner'] + cascade = module.params['cascade'] + + if state == 'absent': + if tables: + module.warn('parameter "tables" is ignored when "state=absent"') + if params: + module.warn('parameter "parameters" is ignored when "state=absent"') + if owner: + module.warn('parameter "owner" is ignored when "state=absent"') + + if state == 'present' and cascade: + module.warn('parameter "cascade" is ignored when "state=present"') + + # Connect to DB and make cursor object: + conn_params = get_conn_params(module, module.params) + # We check publication state without DML queries execution, so set autocommit: + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Check version: + if cursor.connection.server_version < SUPPORTED_PG_VERSION: + module.fail_json(msg="PostgreSQL server version should be 10.0 or greater") + + # Nothing was changed by default: + changed = False + + ################################### + # Create object and do rock'n'roll: + publication = PgPublication(module, cursor, name) + + if tables: + tables = transform_tables_representation(tables) + + # If module.check_mode=True, nothing will be changed: + if state == 'present': + if not publication.exists: + changed = publication.create(tables, params, owner, check_mode=module.check_mode) + + else: + changed = publication.update(tables, params, owner, check_mode=module.check_mode) + + elif state == 'absent': + changed = publication.drop(cascade=cascade, check_mode=module.check_mode) + + # Get final publication info: + pub_fin_info = {} + if state == 'present' or (state == 'absent' and module.check_mode): + pub_fin_info = publication.get_info() + elif state == 'absent' and not module.check_mode: + publication.exists = False + + # Connection is not needed any more: + cursor.close() + db_connection.close() + + # Update publication info and return ret values: + module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_query.py b/plugins/modules/database/postgresql/postgresql_query.py new file mode 100644 index 0000000000..589b8e2564 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_query.py @@ -0,0 +1,363 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Felix Archambault +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: postgresql_query +short_description: Run PostgreSQL queries +description: +- Runs arbitrary PostgreSQL queries. +- Can run queries from SQL script files. +- Does not run against backup files. Use M(postgresql_db) with I(state=restore) + to run queries on files made by pg_dump/pg_dumpall utilities. +options: + query: + description: + - SQL query to run. Variables can be escaped with psycopg2 syntax + U(http://initd.org/psycopg/docs/usage.html). + type: str + positional_args: + description: + - List of values to be passed as positional arguments to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(named_args). + type: list + elements: raw + named_args: + description: + - Dictionary of key-value arguments to pass to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(positional_args). + type: dict + path_to_script: + description: + - Path to SQL script on the remote host. + - Returns result of the last query in the script. + - Mutually exclusive with I(query). + type: path + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - login_db + autocommit: + description: + - Execute in autocommit mode when the query can't be run inside a transaction block + (e.g., VACUUM). + - Mutually exclusive with I(check_mode). + type: bool + default: no + encoding: + description: + - Set the client encoding for the current session (e.g. C(UTF-8)). + - The default is the encoding defined by the database. + type: str +seealso: +- module: postgresql_db +author: +- Felix Archambault (@archf) +- Andrew Klychkov (@Andersson007) +- Will Rouesnel (@wrouesnel) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Simple select query to acme db + postgresql_query: + db: acme + query: SELECT version() + +- name: Select query to db acme with positional arguments and non-default credentials + postgresql_query: + db: acme + login_user: django + login_password: mysecretpass + query: SELECT * FROM acme WHERE id = %s AND story = %s + positional_args: + - 1 + - test + +- name: Select query to test_db with named_args + postgresql_query: + db: test_db + query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s + named_args: + id_val: 1 + story_val: test + +- name: Insert query to test_table in db test_db + postgresql_query: + db: test_db + query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story') + +- name: Run queries from SQL script using UTF-8 client encoding for session + postgresql_query: + db: test_db + path_to_script: /var/lib/pgsql/test.sql + positional_args: + - 1 + encoding: UTF-8 + +- name: Example of using autocommit parameter + postgresql_query: + db: test_db + query: VACUUM + autocommit: yes + +- name: > + Insert data to the column of array type using positional_args. + Note that we use quotes here, the same as for passing JSON, etc. + postgresql_query: + query: INSERT INTO test_table (array_column) VALUES (%s) + positional_args: + - '{1,2,3}' + +# Pass list and string vars as positional_args +- name: Set vars + set_fact: + my_list: + - 1 + - 2 + - 3 + my_arr: '{1, 2, 3}' + +- name: Select from test table by passing positional_args as arrays + postgresql_query: + query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s + positional_args: + - '{{ my_list }}' + - '{{ my_arr|string }}' +''' + +RETURN = r''' +query: + description: Query that was tried to be executed. + returned: always + type: str + sample: 'SELECT * FROM bar' +statusmessage: + description: Attribute containing the message returned by the command. + returned: always + type: str + sample: 'INSERT 0 1' +query_result: + description: + - List of dictionaries in column:value form representing returned rows. + returned: changed + type: list + sample: [{"Column": "Value1"},{"Column": "Value2"}] +rowcount: + description: Number of affected rows. + returned: changed + type: int + sample: 5 +''' + +try: + from psycopg2 import ProgrammingError as Psycopg2ProgrammingError + from psycopg2.extras import DictCursor +except ImportError: + # it is needed for checking 'no result to fetch' in main(), + # psycopg2 availability will be checked by connect_to_db() into + # ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems + + +# =========================================== +# Module execution. +# + +def list_to_pg_array(elem): + """Convert the passed list to PostgreSQL array + represented as a string. + + Args: + elem (list): List that needs to be converted. + + Returns: + elem (str): String representation of PostgreSQL array. + """ + elem = str(elem).strip('[]') + elem = '{' + elem + '}' + return elem + + +def convert_elements_to_pg_arrays(obj): + """Convert list elements of the passed object + to PostgreSQL arrays represented as strings. + + Args: + obj (dict or list): Object whose elements need to be converted. + + Returns: + obj (dict or list): Object with converted elements. + """ + if isinstance(obj, dict): + for (key, elem) in iteritems(obj): + if isinstance(elem, list): + obj[key] = list_to_pg_array(elem) + + elif isinstance(obj, list): + for i, elem in enumerate(obj): + if isinstance(elem, list): + obj[i] = list_to_pg_array(elem) + + return obj + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + query=dict(type='str'), + db=dict(type='str', aliases=['login_db']), + positional_args=dict(type='list', elements='raw'), + named_args=dict(type='dict'), + session_role=dict(type='str'), + path_to_script=dict(type='path'), + autocommit=dict(type='bool', default=False), + encoding=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=(('positional_args', 'named_args'),), + supports_check_mode=True, + ) + + query = module.params["query"] + positional_args = module.params["positional_args"] + named_args = module.params["named_args"] + path_to_script = module.params["path_to_script"] + autocommit = module.params["autocommit"] + encoding = module.params["encoding"] + + if autocommit and module.check_mode: + module.fail_json(msg="Using autocommit is mutually exclusive with check_mode") + + if path_to_script and query: + module.fail_json(msg="path_to_script is mutually exclusive with query") + + if positional_args: + positional_args = convert_elements_to_pg_arrays(positional_args) + + elif named_args: + named_args = convert_elements_to_pg_arrays(named_args) + + if path_to_script: + try: + with open(path_to_script, 'rb') as f: + query = to_native(f.read()) + except Exception as e: + module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e))) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=autocommit) + if encoding is not None: + db_connection.set_client_encoding(encoding) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Prepare args: + if module.params.get("positional_args"): + arguments = module.params["positional_args"] + elif module.params.get("named_args"): + arguments = module.params["named_args"] + else: + arguments = None + + # Set defaults: + changed = False + + # Execute query: + try: + cursor.execute(query, arguments) + except Exception as e: + if not autocommit: + db_connection.rollback() + + cursor.close() + db_connection.close() + module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e))) + + statusmessage = cursor.statusmessage + rowcount = cursor.rowcount + + try: + query_result = [dict(row) for row in cursor.fetchall()] + except Psycopg2ProgrammingError as e: + if to_native(e) == 'no results to fetch': + query_result = {} + + except Exception as e: + module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e)) + + if 'SELECT' not in statusmessage: + if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage: + s = statusmessage.split() + if len(s) == 3: + if statusmessage.split()[2] != '0': + changed = True + + elif len(s) == 2: + if statusmessage.split()[1] != '0': + changed = True + + else: + changed = True + + else: + changed = True + + if module.check_mode: + db_connection.rollback() + else: + if not autocommit: + db_connection.commit() + + kw = dict( + changed=changed, + query=cursor.query, + statusmessage=statusmessage, + query_result=query_result, + rowcount=rowcount if rowcount >= 0 else 0, + ) + + cursor.close() + db_connection.close() + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_schema.py b/plugins/modules/database/postgresql/postgresql_schema.py new file mode 100644 index 0000000000..46135609e0 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_schema.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_schema +short_description: Add or remove PostgreSQL schema +description: +- Add or remove PostgreSQL schema. +options: + name: + description: + - Name of the schema to add or remove. + required: true + type: str + aliases: + - schema + database: + description: + - Name of the database to connect to and add or remove the schema. + type: str + default: postgres + aliases: + - db + - login_db + owner: + description: + - Name of the role to set as owner of the schema. + type: str + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role + were the one that had logged in originally. + type: str + state: + description: + - The schema state. + type: str + default: present + choices: [ absent, present ] + cascade_drop: + description: + - Drop schema with CASCADE to remove child objects. + type: bool + default: false + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] +seealso: +- name: PostgreSQL schemas + description: General information about PostgreSQL schemas. + link: https://www.postgresql.org/docs/current/ddl-schemas.html +- name: CREATE SCHEMA reference + description: Complete reference of the CREATE SCHEMA command documentation. + link: https://www.postgresql.org/docs/current/sql-createschema.html +- name: ALTER SCHEMA reference + description: Complete reference of the ALTER SCHEMA command documentation. + link: https://www.postgresql.org/docs/current/sql-alterschema.html +- name: DROP SCHEMA reference + description: Complete reference of the DROP SCHEMA command documentation. + link: https://www.postgresql.org/docs/current/sql-dropschema.html +author: +- Flavien Chantelot (@Dorn-) +- Thomas O'Donnell (@andytom) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create a new schema with name acme in test database + postgresql_schema: + db: test + name: acme + +- name: Create a new schema acme with a user bob who will own it + postgresql_schema: + name: acme + owner: bob + +- name: Drop schema "acme" with cascade + postgresql_schema: + name: acme + state: absent + cascade_drop: yes +''' + +RETURN = r''' +schema: + description: Name of the schema. + returned: success, changed + type: str + sample: "acme" +queries: + description: List of executed queries. + returned: always + type: list + sample: ["CREATE SCHEMA \"acme\""] +''' + +import traceback + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible_collections.community.general.plugins.module_utils.database import SQLParseError, pg_quote_identifier +from ansible.module_utils._text import to_native + +executed_queries = [] + + +class NotSupportedError(Exception): + pass + + +# =========================================== +# PostgreSQL module specific support methods. +# + +def set_owner(cursor, schema, owner): + query = "ALTER SCHEMA %s OWNER TO %s" % ( + pg_quote_identifier(schema, 'schema'), + pg_quote_identifier(owner, 'role')) + cursor.execute(query) + executed_queries.append(query) + return True + + +def get_schema_info(cursor, schema): + query = ("SELECT schema_owner AS owner " + "FROM information_schema.schemata " + "WHERE schema_name = %(schema)s") + cursor.execute(query, {'schema': schema}) + return cursor.fetchone() + + +def schema_exists(cursor, schema): + query = ("SELECT schema_name FROM information_schema.schemata " + "WHERE schema_name = %(schema)s") + cursor.execute(query, {'schema': schema}) + return cursor.rowcount == 1 + + +def schema_delete(cursor, schema, cascade): + if schema_exists(cursor, schema): + query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema') + if cascade: + query += " CASCADE" + cursor.execute(query) + executed_queries.append(query) + return True + else: + return False + + +def schema_create(cursor, schema, owner): + if not schema_exists(cursor, schema): + query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')] + if owner: + query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role')) + query = ' '.join(query_fragments) + cursor.execute(query) + executed_queries.append(query) + return True + else: + schema_info = get_schema_info(cursor, schema) + if owner and owner != schema_info['owner']: + return set_owner(cursor, schema, owner) + else: + return False + + +def schema_matches(cursor, schema, owner): + if not schema_exists(cursor, schema): + return False + else: + schema_info = get_schema_info(cursor, schema) + if owner and owner != schema_info['owner']: + return False + else: + return True + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + schema=dict(type="str", required=True, aliases=['name']), + owner=dict(type="str", default=""), + database=dict(type="str", default="postgres", aliases=["db", "login_db"]), + cascade_drop=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), + session_role=dict(type="str"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + schema = module.params["schema"] + owner = module.params["owner"] + state = module.params["state"] + cascade_drop = module.params["cascade_drop"] + changed = False + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + if module.check_mode: + if state == "absent": + changed = not schema_exists(cursor, schema) + elif state == "present": + changed = not schema_matches(cursor, schema, owner) + module.exit_json(changed=changed, schema=schema) + + if state == "absent": + try: + changed = schema_delete(cursor, schema, cascade_drop) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state == "present": + try: + changed = schema_create(cursor, schema, owner) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # Avoid catching this on Python 2.4 + raise + except Exception as e: + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + db_connection.close() + module.exit_json(changed=changed, schema=schema, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_sequence.py b/plugins/modules/database/postgresql/postgresql_sequence.py new file mode 100644 index 0000000000..3834184ce6 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_sequence.py @@ -0,0 +1,611 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_sequence +short_description: Create, drop, or alter a PostgreSQL sequence +description: +- Allows to create, drop or change the definition of a sequence generator. +options: + sequence: + description: + - The name of the sequence. + required: true + type: str + aliases: + - name + state: + description: + - The sequence state. + - If I(state=absent) other options will be ignored except of I(name) and + I(schema). + default: present + choices: [ absent, present ] + type: str + data_type: + description: + - Specifies the data type of the sequence. Valid types are bigint, integer, + and smallint. bigint is the default. The data type determines the default + minimum and maximum values of the sequence. For more info see the + documentation + U(https://www.postgresql.org/docs/current/sql-createsequence.html). + - Supported from PostgreSQL 10. + choices: [ bigint, integer, smallint ] + type: str + increment: + description: + - Increment specifies which value is added to the current sequence value + to create a new value. + - A positive value will make an ascending sequence, a negative one a + descending sequence. The default value is 1. + type: int + minvalue: + description: + - Minvalue determines the minimum value a sequence can generate. The + default for an ascending sequence is 1. The default for a descending + sequence is the minimum value of the data type. + type: int + aliases: + - min + maxvalue: + description: + - Maxvalue determines the maximum value for the sequence. The default for + an ascending sequence is the maximum + value of the data type. The default for a descending sequence is -1. + type: int + aliases: + - max + start: + description: + - Start allows the sequence to begin anywhere. The default starting value + is I(minvalue) for ascending sequences and I(maxvalue) for descending + ones. + type: int + cache: + description: + - Cache specifies how many sequence numbers are to be preallocated and + stored in memory for faster access. The minimum value is 1 (only one + value can be generated at a time, i.e., no cache), and this is also + the default. + type: int + cycle: + description: + - The cycle option allows the sequence to wrap around when the I(maxvalue) + or I(minvalue) has been reached by an ascending or descending sequence + respectively. If the limit is reached, the next number generated will be + the minvalue or maxvalue, respectively. + - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence + has reached its maximum value will return an error. False (NO CYCLE) is + the default. + type: bool + default: no + cascade: + description: + - Automatically drop objects that depend on the sequence, and in turn all + objects that depend on those objects. + - Ignored if I(state=present). + - Only used with I(state=absent). + type: bool + default: no + rename_to: + description: + - The new name for the I(sequence). + - Works only for existing sequences. + type: str + owner: + description: + - Set the owner for the I(sequence). + type: str + schema: + description: + - The schema of the I(sequence). This is be used to create and relocate + a I(sequence) in the given schema. + default: public + type: str + newschema: + description: + - The new schema for the I(sequence). Will be used for moving a + I(sequence) to another I(schema). + - Works only for existing sequences. + type: str + session_role: + description: + - Switch to session_role after connecting. The specified I(session_role) + must be a role that the current I(login_user) is a member of. + - Permissions checking for SQL commands is carried out as though + the I(session_role) were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - database + - login_db +notes: +- If you do not pass db parameter, sequence will be created in the database + named postgres. +seealso: +- module: postgresql_table +- module: postgresql_owner +- module: postgresql_privs +- module: postgresql_tablespace +- name: CREATE SEQUENCE reference + description: Complete reference of the CREATE SEQUENCE command documentation. + link: https://www.postgresql.org/docs/current/sql-createsequence.html +- name: ALTER SEQUENCE reference + description: Complete reference of the ALTER SEQUENCE command documentation. + link: https://www.postgresql.org/docs/current/sql-altersequence.html +- name: DROP SEQUENCE reference + description: Complete reference of the DROP SEQUENCE command documentation. + link: https://www.postgresql.org/docs/current/sql-dropsequence.html +author: +- Tobias Birkefeld (@tcraxs) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create an ascending bigint sequence called foobar in the default + database + postgresql_sequence: + name: foobar + +- name: Create an ascending integer sequence called foobar, starting at 101 + postgresql_sequence: + name: foobar + data_type: integer + start: 101 + +- name: Create an descending sequence called foobar, starting at 101 and + preallocated 10 sequence numbers in cache + postgresql_sequence: + name: foobar + increment: -1 + cache: 10 + start: 101 + +- name: Create an ascending sequence called foobar, which cycle between 1 to 10 + postgresql_sequence: + name: foobar + cycle: yes + min: 1 + max: 10 + +- name: Create an ascending bigint sequence called foobar in the default + database with owner foobar + postgresql_sequence: + name: foobar + owner: foobar + +- name: Rename an existing sequence named foo to bar + postgresql_sequence: + name: foo + rename_to: bar + +- name: Change the schema of an existing sequence to foobar + postgresql_sequence: + name: foobar + newschema: foobar + +- name: Change the owner of an existing sequence to foobar + postgresql_sequence: + name: foobar + owner: foobar + +- name: Drop a sequence called foobar + postgresql_sequence: + name: foobar + state: absent + +- name: Drop a sequence called foobar with cascade + postgresql_sequence: + name: foobar + cascade: yes + state: absent +''' + +RETURN = r''' +state: + description: Sequence state at the end of execution. + returned: always + type: str + sample: 'present' +sequence: + description: Sequence name. + returned: always + type: str + sample: 'foobar' +queries: + description: List of queries that was tried to be executed. + returned: always + type: str + sample: [ "CREATE SEQUENCE \"foo\"" ] +schema: + description: Name of the schema of the sequence + returned: always + type: str + sample: 'foo' +data_type: + description: Shows the current data type of the sequence. + returned: always + type: str + sample: 'bigint' +increment: + description: The value of increment of the sequence. A positive value will + make an ascending sequence, a negative one a descending + sequence. + returned: always + type: int + sample: '-1' +minvalue: + description: The value of minvalue of the sequence. + returned: always + type: int + sample: '1' +maxvalue: + description: The value of maxvalue of the sequence. + returned: always + type: int + sample: '9223372036854775807' +start: + description: The value of start of the sequence. + returned: always + type: int + sample: '12' +cycle: + description: Shows if the sequence cycle or not. + returned: always + type: str + sample: 'NO' +owner: + description: Shows the current owner of the sequence + after the successful run of the task. + returned: always + type: str + sample: 'postgres' +newname: + description: Shows the new sequence name after rename. + returned: on success + type: str + sample: 'barfoo' +newschema: + description: Shows the new schema of the sequence after schema change. + returned: on success + type: str + sample: 'foobar' +''' + + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +class Sequence(object): + """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command. + + Arguments: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + + Attributes: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + changed (bool) -- something was changed after execution or not + executed_queries (list) -- executed queries + name (str) -- name of the sequence + owner (str) -- name of the owner of the sequence + schema (str) -- name of the schema (default: public) + data_type (str) -- data type of the sequence + start_value (int) -- value of the sequence start + minvalue (int) -- minimum value of the sequence + maxvalue (int) -- maximum value of the sequence + increment (int) -- increment value of the sequence + cycle (bool) -- sequence can cycle or not + new_name (str) -- name of the renamed sequence + new_schema (str) -- name of the new schema + exists (bool) -- sequence exists or not + """ + + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.executed_queries = [] + self.name = self.module.params['sequence'] + self.owner = '' + self.schema = self.module.params['schema'] + self.data_type = '' + self.start_value = '' + self.minvalue = '' + self.maxvalue = '' + self.increment = '' + self.cycle = '' + self.new_name = '' + self.new_schema = '' + self.exists = False + # Collect info + self.get_info() + + def get_info(self): + """Getter to refresh and get sequence info""" + query = ("SELECT " + "s.sequence_schema AS schemaname, " + "s.sequence_name AS sequencename, " + "pg_get_userbyid(c.relowner) AS sequenceowner, " + "s.data_type::regtype AS data_type, " + "s.start_value AS start_value, " + "s.minimum_value AS min_value, " + "s.maximum_value AS max_value, " + "s.increment AS increment_by, " + "s.cycle_option AS cycle " + "FROM information_schema.sequences s " + "JOIN pg_class c ON c.relname = s.sequence_name " + "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace " + "WHERE NOT pg_is_other_temp_schema(n.oid) " + "AND c.relkind = 'S'::\"char\" " + "AND sequence_name = %(name)s " + "AND sequence_schema = %(schema)s") + + res = exec_sql(self, query, + query_params={'name': self.name, 'schema': self.schema}, + add_to_executed=False) + + if not res: + self.exists = False + return False + + if res: + self.exists = True + self.schema = res[0]['schemaname'] + self.name = res[0]['sequencename'] + self.owner = res[0]['sequenceowner'] + self.data_type = res[0]['data_type'] + self.start_value = res[0]['start_value'] + self.minvalue = res[0]['min_value'] + self.maxvalue = res[0]['max_value'] + self.increment = res[0]['increment_by'] + self.cycle = res[0]['cycle'] + + def create(self): + """Implements CREATE SEQUENCE command behavior.""" + query = ['CREATE SEQUENCE'] + query.append(self.__add_schema()) + + if self.module.params.get('data_type'): + query.append('AS %s' % self.module.params['data_type']) + + if self.module.params.get('increment'): + query.append('INCREMENT BY %s' % self.module.params['increment']) + + if self.module.params.get('minvalue'): + query.append('MINVALUE %s' % self.module.params['minvalue']) + + if self.module.params.get('maxvalue'): + query.append('MAXVALUE %s' % self.module.params['maxvalue']) + + if self.module.params.get('start'): + query.append('START WITH %s' % self.module.params['start']) + + if self.module.params.get('cache'): + query.append('CACHE %s' % self.module.params['cache']) + + if self.module.params.get('cycle'): + query.append('CYCLE') + + return exec_sql(self, ' '.join(query), ddl=True) + + def drop(self): + """Implements DROP SEQUENCE command behavior.""" + query = ['DROP SEQUENCE'] + query.append(self.__add_schema()) + + if self.module.params.get('cascade'): + query.append('CASCADE') + + return exec_sql(self, ' '.join(query), ddl=True) + + def rename(self): + """Implements ALTER SEQUENCE RENAME TO command behavior.""" + query = ['ALTER SEQUENCE'] + query.append(self.__add_schema()) + query.append('RENAME TO %s' % pg_quote_identifier(self.module.params['rename_to'], 'sequence')) + + return exec_sql(self, ' '.join(query), ddl=True) + + def set_owner(self): + """Implements ALTER SEQUENCE OWNER TO command behavior.""" + query = ['ALTER SEQUENCE'] + query.append(self.__add_schema()) + query.append('OWNER TO %s' % pg_quote_identifier(self.module.params['owner'], 'role')) + + return exec_sql(self, ' '.join(query), ddl=True) + + def set_schema(self): + """Implements ALTER SEQUENCE SET SCHEMA command behavior.""" + query = ['ALTER SEQUENCE'] + query.append(self.__add_schema()) + query.append('SET SCHEMA %s' % pg_quote_identifier(self.module.params['newschema'], 'schema')) + + return exec_sql(self, ' '.join(query), ddl=True) + + def __add_schema(self): + return '.'.join([pg_quote_identifier(self.schema, 'schema'), + pg_quote_identifier(self.name, 'sequence')]) + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + sequence=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default='present', choices=['absent', 'present']), + data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']), + increment=dict(type='int'), + minvalue=dict(type='int', aliases=['min']), + maxvalue=dict(type='int', aliases=['max']), + start=dict(type='int'), + cache=dict(type='int'), + cycle=dict(type='bool', default=False), + schema=dict(type='str', default='public'), + cascade=dict(type='bool', default=False), + rename_to=dict(type='str'), + owner=dict(type='str'), + newschema=dict(type='str'), + db=dict(type='str', default='', aliases=['login_db', 'database']), + session_role=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['rename_to', 'data_type'], + ['rename_to', 'increment'], + ['rename_to', 'minvalue'], + ['rename_to', 'maxvalue'], + ['rename_to', 'start'], + ['rename_to', 'cache'], + ['rename_to', 'cycle'], + ['rename_to', 'cascade'], + ['rename_to', 'owner'], + ['rename_to', 'newschema'], + ['cascade', 'data_type'], + ['cascade', 'increment'], + ['cascade', 'minvalue'], + ['cascade', 'maxvalue'], + ['cascade', 'start'], + ['cascade', 'cache'], + ['cascade', 'cycle'], + ['cascade', 'owner'], + ['cascade', 'newschema'], + ] + ) + + # Note: we don't need to check mutually exclusive params here, because they are + # checked automatically by AnsibleModule (mutually_exclusive=[] list above). + + # Change autocommit to False if check_mode: + autocommit = not module.check_mode + # Connect to DB and make cursor object: + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=autocommit) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + data = Sequence(module, cursor) + + # Set defaults: + changed = False + + # Create new sequence + if not data.exists and module.params['state'] == 'present': + if module.params.get('rename_to'): + module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence']) + if module.params.get('newschema'): + module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence']) + + changed = data.create() + + # Drop non-existing sequence + elif not data.exists and module.params['state'] == 'absent': + # Nothing to do + changed = False + + # Drop existing sequence + elif data.exists and module.params['state'] == 'absent': + changed = data.drop() + + # Rename sequence + if data.exists and module.params.get('rename_to'): + if data.name != module.params['rename_to']: + changed = data.rename() + if changed: + data.new_name = module.params['rename_to'] + + # Refresh information + if module.params['state'] == 'present': + data.get_info() + + # Change owner, schema and settings + if module.params['state'] == 'present' and data.exists: + # change owner + if module.params.get('owner'): + if data.owner != module.params['owner']: + changed = data.set_owner() + + # Set schema + if module.params.get('newschema'): + if data.schema != module.params['newschema']: + changed = data.set_schema() + if changed: + data.new_schema = module.params['newschema'] + + # Rollback if it's possible and check_mode: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Make return values: + kw = dict( + changed=changed, + state='present', + sequence=data.name, + queries=data.executed_queries, + schema=data.schema, + data_type=data.data_type, + increment=data.increment, + minvalue=data.minvalue, + maxvalue=data.maxvalue, + start=data.start_value, + cycle=data.cycle, + owner=data.owner, + ) + + if module.params['state'] == 'present': + if data.new_name: + kw['newname'] = data.new_name + if data.new_schema: + kw['newschema'] = data.new_schema + + elif module.params['state'] == 'absent': + kw['state'] = 'absent' + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_set.py b/plugins/modules/database/postgresql/postgresql_set.py new file mode 100644 index 0000000000..166f7bc63f --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_set.py @@ -0,0 +1,435 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_set +short_description: Change a PostgreSQL server configuration parameter +description: + - Allows to change a PostgreSQL server configuration parameter. + - The module uses ALTER SYSTEM command and applies changes by reload server configuration. + - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster. + - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file. + - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file, + which is read in addition to postgresql.conf. + - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter + string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required). + - After change you can see in the ansible output the previous and + the new parameter value and other information using returned values and M(debug) module. +options: + name: + description: + - Name of PostgreSQL server parameter. + type: str + required: true + value: + description: + - Parameter value to set. + - To remove parameter string from postgresql.auto.conf and + reload the server configuration you must pass I(value=default). + With I(value=default) the playbook always returns changed is true. + type: str + reset: + description: + - Restore parameter to initial state (boot_val). Mutually exclusive with I(value). + type: bool + default: false + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db +notes: +- Supported version of PostgreSQL is 9.4 and later. +- Pay attention, change setting with 'postmaster' context can return changed is true + when actually nothing changes because the same value may be presented in + several different form, for example, 1024MB, 1GB, etc. However in pg_settings + system view it can be defined like 131072 number of 8kB pages. + The final check of the parameter value cannot compare it because the server was + not restarted and the value in pg_settings is not updated yet. +- For some parameters restart of PostgreSQL server is required. + See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html). +seealso: +- module: postgresql_info +- name: PostgreSQL server configuration + description: General information about PostgreSQL server configuration. + link: https://www.postgresql.org/docs/current/runtime-config.html +- name: PostgreSQL view pg_settings reference + description: Complete reference of the pg_settings view documentation. + link: https://www.postgresql.org/docs/current/view-pg-settings.html +- name: PostgreSQL ALTER SYSTEM command reference + description: Complete reference of the ALTER SYSTEM command documentation. + link: https://www.postgresql.org/docs/current/sql-altersystem.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Restore wal_keep_segments parameter to initial state + postgresql_set: + name: wal_keep_segments + reset: yes + +# Set work_mem parameter to 32MB and show what's been changed and restart is required or not +# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False") +- name: Set work mem parameter + postgresql_set: + name: work_mem + value: 32mb + register: set + +- debug: + msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}" + when: set.changed +# Ensure that the restart of PostgreSQL server must be required for some parameters. +# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True' +# (If you passed the value that was different from the current server setting). + +- name: Set log_min_duration_statement parameter to 1 second + postgresql_set: + name: log_min_duration_statement + value: 1s + +- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf) + postgresql_set: + name: wal_log_hints + value: default +''' + +RETURN = r''' +name: + description: Name of PostgreSQL server parameter. + returned: always + type: str + sample: 'shared_buffers' +restart_required: + description: Information about parameter current state. + returned: always + type: bool + sample: true +prev_val_pretty: + description: Information about previous state of the parameter. + returned: always + type: str + sample: '4MB' +value_pretty: + description: Information about current state of the parameter. + returned: always + type: str + sample: '64MB' +value: + description: + - Dictionary that contains the current parameter value (at the time of playbook finish). + - Pay attention that for real change some parameters restart of PostgreSQL server is required. + - Returns the current value in the check mode. + returned: always + type: dict + sample: { "value": 67108864, "unit": "b" } +context: + description: + - PostgreSQL setting context. + returned: always + type: str + sample: user +''' + +try: + from psycopg2.extras import DictCursor +except Exception: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native + +PG_REQ_VER = 90400 + +# To allow to set value like 1mb instead of 1MB, etc: +POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb") + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def param_get(cursor, module, name): + query = ("SELECT name, setting, unit, context, boot_val " + "FROM pg_settings WHERE name = %(name)s") + try: + cursor.execute(query, {'name': name}) + info = cursor.fetchall() + cursor.execute("SHOW %s" % name) + val = cursor.fetchone() + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + raw_val = info[0][1] + unit = info[0][2] + context = info[0][3] + boot_val = info[0][4] + + if val[0] == 'True': + val[0] = 'on' + elif val[0] == 'False': + val[0] = 'off' + + if unit == 'kB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 + + unit = 'b' + + elif unit == 'MB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 * 1024 + + unit = 'b' + + return (val[0], raw_val, unit, boot_val, context) + + +def pretty_to_bytes(pretty_val): + # The function returns a value in bytes + # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'. + # Otherwise it returns the passed argument. + + val_in_bytes = None + + if 'kB' in pretty_val: + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 + + elif 'MB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 + + elif 'GB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 * 1024 + + elif 'TB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024 + + elif 'B' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part + + else: + return pretty_val + + return val_in_bytes + + +def param_set(cursor, module, name, value, context): + try: + if str(value).lower() == 'default': + query = "ALTER SYSTEM SET %s = DEFAULT" % name + else: + query = "ALTER SYSTEM SET %s = '%s'" % (name, value) + cursor.execute(query) + + if context != 'postmaster': + cursor.execute("SELECT pg_reload_conf()") + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + return True + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(type='str', required=True), + db=dict(type='str', aliases=['login_db']), + value=dict(type='str'), + reset=dict(type='bool'), + session_role=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params["name"] + value = module.params["value"] + reset = module.params["reset"] + + # Allow to pass values like 1mb instead of 1MB, etc: + if value: + for unit in POSSIBLE_SIZE_UNITS: + if value[:-2].isdigit() and unit in value[-2:]: + value = value.upper() + + if value and reset: + module.fail_json(msg="%s: value and reset params are mutually exclusive" % name) + + if not value and not reset: + module.fail_json(msg="%s: at least one of value or reset param must be specified" % name) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + kw = {} + # Check server version (needs 9.4 or later): + ver = db_connection.server_version + if ver < PG_REQ_VER: + module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER)) + kw = dict( + changed=False, + restart_required=False, + value_pretty="", + prev_val_pretty="", + value={"value": "", "unit": ""}, + ) + kw['name'] = name + db_connection.close() + module.exit_json(**kw) + + # Set default returned values: + restart_required = False + changed = False + kw['name'] = name + kw['restart_required'] = False + + # Get info about param state: + res = param_get(cursor, module, name) + current_value = res[0] + raw_val = res[1] + unit = res[2] + boot_val = res[3] + context = res[4] + + if value == 'True': + value = 'on' + elif value == 'False': + value = 'off' + + kw['prev_val_pretty'] = current_value + kw['value_pretty'] = deepcopy(kw['prev_val_pretty']) + kw['context'] = context + + # Do job + if context == "internal": + module.fail_json(msg="%s: cannot be changed (internal context). See " + "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name) + + if context == "postmaster": + restart_required = True + + # If check_mode, just compare and exit: + if module.check_mode: + if pretty_to_bytes(value) == pretty_to_bytes(current_value): + kw['changed'] = False + + else: + kw['value_pretty'] = value + kw['changed'] = True + + # Anyway returns current raw value in the check_mode: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + kw['restart_required'] = restart_required + module.exit_json(**kw) + + # Set param: + if value and value != current_value: + changed = param_set(cursor, module, name, value, context) + + kw['value_pretty'] = value + + # Reset param: + elif reset: + if raw_val == boot_val: + # nothing to change, exit: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + module.exit_json(**kw) + + changed = param_set(cursor, module, name, boot_val, context) + + if restart_required: + module.warn("Restart of PostgreSQL is required for setting %s" % name) + + cursor.close() + db_connection.close() + + # Reconnect and recheck current value: + if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'): + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + res = param_get(cursor, module, name) + # f_ means 'final' + f_value = res[0] + f_raw_val = res[1] + + if raw_val == f_raw_val: + changed = False + + else: + changed = True + + kw['value_pretty'] = f_value + kw['value'] = dict( + value=f_raw_val, + unit=unit, + ) + + cursor.close() + db_connection.close() + + kw['changed'] = changed + kw['restart_required'] = restart_required + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_slot.py b/plugins/modules/database/postgresql/postgresql_slot.py new file mode 100644 index 0000000000..c21c303156 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_slot.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: postgresql_slot +short_description: Add or remove replication slots from a PostgreSQL database +description: +- Add or remove physical or logical replication slots from a PostgreSQL database. + +options: + name: + description: + - Name of the replication slot to add or remove. + type: str + required: yes + aliases: + - slot_name + slot_type: + description: + - Slot type. + type: str + default: physical + choices: [ logical, physical ] + state: + description: + - The slot state. + - I(state=present) implies the slot must be present in the system. + - I(state=absent) implies the I(groups) must be revoked from I(target_roles). + type: str + default: present + choices: [ absent, present ] + immediately_reserve: + description: + - Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved + immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection + from a streaming replication client. + - Is available from PostgreSQL version 9.6. + - Uses only with I(slot_type=physical). + - Mutually exclusive with I(slot_type=logical). + type: bool + default: no + output_plugin: + description: + - All logical slots must indicate which output plugin decoder they're using. + - This parameter does not apply to physical slots. + - It will be ignored with I(slot_type=physical). + type: str + default: "test_decoding" + db: + description: + - Name of database to connect to. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + +notes: +- Physical replication slots were introduced to PostgreSQL with version 9.4, + while logical replication slots were added beginning with version 10.0. + +seealso: +- name: PostgreSQL pg_replication_slots view reference + description: Complete reference of the PostgreSQL pg_replication_slots view. + link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html +- name: PostgreSQL streaming replication protocol reference + description: Complete reference of the PostgreSQL streaming replication protocol documentation. + link: https://www.postgresql.org/docs/current/protocol-replication.html +- name: PostgreSQL logical replication protocol reference + description: Complete reference of the PostgreSQL logical replication protocol documentation. + link: https://www.postgresql.org/docs/current/protocol-logical-replication.html + +author: +- John Scalia (@jscalia) +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create physical_one physical slot if doesn't exist + become_user: postgres + postgresql_slot: + slot_name: physical_one + db: ansible + +- name: Remove physical_one slot if exists + become_user: postgres + postgresql_slot: + slot_name: physical_one + db: ansible + state: absent + +- name: Create logical_one logical slot to the database acme if doesn't exist + postgresql_slot: + name: logical_slot_one + slot_type: logical + state: present + output_plugin: custom_decoder_one + db: "acme" + +- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port + postgresql_slot: + name: logical_one + login_host: mydatabase.example.org + port: 5433 + login_user: ourSuperuser + login_password: thePassword + state: absent +''' + +RETURN = r''' +name: + description: Name of the slot + returned: always + type: str + sample: "physical_one" +queries: + description: List of executed queries. + returned: always + type: str + sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class PgSlot(object): + def __init__(self, module, cursor, name): + self.module = module + self.cursor = cursor + self.name = name + self.exists = False + self.kind = '' + self.__slot_exists() + self.changed = False + self.executed_queries = [] + + def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False): + if self.exists: + if self.kind == kind: + return False + else: + self.module.warn("slot with name '%s' already exists " + "but has another type '%s'" % (self.name, self.kind)) + return False + + if just_check: + return None + + if kind == 'physical': + # Check server version (needs for immedately_reserverd needs 9.6+): + if self.cursor.connection.server_version < 96000: + query = "SELECT pg_create_physical_replication_slot(%(name)s)" + + else: + query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)" + + self.changed = exec_sql(self, query, + query_params={'name': self.name, 'i_reserve': immediately_reserve}, + ddl=True) + + elif kind == 'logical': + query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)" + self.changed = exec_sql(self, query, + query_params={'name': self.name, 'o_plugin': output_plugin}, ddl=True) + + def drop(self): + if not self.exists: + return False + + query = "SELECT pg_drop_replication_slot(%(name)s)" + self.changed = exec_sql(self, query, query_params={'name': self.name}, ddl=True) + + def __slot_exists(self): + query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s" + res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False) + if res: + self.exists = True + self.kind = res[0][0] + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type="str", aliases=["login_db"]), + name=dict(type="str", required=True, aliases=["slot_name"]), + slot_type=dict(type="str", default="physical", choices=["logical", "physical"]), + immediately_reserve=dict(type="bool", default=False), + session_role=dict(type="str"), + output_plugin=dict(type="str", default="test_decoding"), + state=dict(type="str", default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params["name"] + slot_type = module.params["slot_type"] + immediately_reserve = module.params["immediately_reserve"] + state = module.params["state"] + output_plugin = module.params["output_plugin"] + + if immediately_reserve and slot_type == 'logical': + module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive") + + # When slot_type is logical and parameter db is not passed, + # the default database will be used to create the slot and + # the user should know about this. + # When the slot type is physical, + # it doesn't matter which database will be used + # because physical slots are global objects. + if slot_type == 'logical': + warn_db_default = True + else: + warn_db_default = False + + conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ################################## + # Create an object and do main job + pg_slot = PgSlot(module, cursor, name) + + changed = False + + if module.check_mode: + if state == "present": + if not pg_slot.exists: + changed = True + + pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True) + + elif state == "absent": + if pg_slot.exists: + changed = True + else: + if state == "absent": + pg_slot.drop() + + elif state == "present": + pg_slot.create(slot_type, immediately_reserve, output_plugin) + + changed = pg_slot.changed + + db_connection.close() + module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_subscription.py b/plugins/modules/database/postgresql/postgresql_subscription.py new file mode 100644 index 0000000000..856e423aa7 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_subscription.py @@ -0,0 +1,685 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: postgresql_subscription +short_description: Add, update, or remove PostgreSQL subscription +description: +- Add, update, or remove PostgreSQL subscription. + +options: + name: + description: + - Name of the subscription to add, update, or remove. + type: str + required: yes + db: + description: + - Name of the database to connect to and where + the subscription state will be changed. + aliases: [ login_db ] + type: str + required: yes + state: + description: + - The subscription state. + - C(present) implies that if I(name) subscription doesn't exist, it will be created. + - C(absent) implies that if I(name) subscription exists, it will be removed. + - C(refresh) implies that if I(name) subscription exists, it will be refreshed. + Fetch missing table information from publisher. Always returns ``changed`` is ``True``. + This will start replication of tables that were added to the subscribed-to publications + since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION. + The existing data in the publications that are being subscribed to + should be copied once the replication starts. + - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html). + type: str + choices: [ absent, present, refresh ] + default: present + owner: + description: + - Subscription owner. + - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role). + - Ignored when I(state) is not C(present). + type: str + publications: + description: + - The publication names on the publisher to use for the subscription. + - Ignored when I(state) is not C(present). + type: list + elements: str + connparams: + description: + - The connection dict param-value to connect to the publisher. + - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). + - Ignored when I(state) is not C(present). + type: dict + cascade: + description: + - Drop subscription dependencies. Has effect with I(state=absent) only. + - Ignored when I(state) is not C(absent). + type: bool + default: false + subsparams: + description: + - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc. + - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name). + - See available parameters to create a new subscription + on U(https://www.postgresql.org/docs/current/sql-createsubscription.html). + - Ignored when I(state) is not C(present). + type: dict + +notes: +- PostgreSQL version must be 10 or greater. + +seealso: +- module: postgresql_publication +- module: postgresql_info +- name: CREATE SUBSCRIPTION reference + description: Complete reference of the CREATE SUBSCRIPTION command documentation. + link: https://www.postgresql.org/docs/current/sql-createsubscription.html +- name: ALTER SUBSCRIPTION reference + description: Complete reference of the ALTER SUBSCRIPTION command documentation. + link: https://www.postgresql.org/docs/current/sql-altersubscription.html +- name: DROP SUBSCRIPTION reference + description: Complete reference of the DROP SUBSCRIPTION command documentation. + link: https://www.postgresql.org/docs/current/sql-dropsubscription.html + +author: +- Andrew Klychkov (@Andersson007) + +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: > + Create acme subscription in mydb database using acme_publication and + the following connection parameters to connect to the publisher. + Set the subscription owner as alice. + postgresql_subscription: + db: mydb + name: acme + state: present + publications: acme_publication + owner: alice + connparams: + host: 127.0.0.1 + port: 5432 + user: repl + password: replpass + dbname: mydb + +- name: Assuming that acme subscription exists, try to change conn parameters + postgresql_subscription: + db: mydb + name: acme + connparams: + host: 127.0.0.1 + port: 5432 + user: repl + password: replpass + connect_timeout: 100 + +- name: Refresh acme publication + postgresql_subscription: + db: mydb + name: acme + state: refresh + +- name: Drop acme subscription from mydb with dependencies (cascade=yes) + postgresql_subscription: + db: mydb + name: acme + state: absent + cascade: yes + +- name: Assuming that acme subscription exists and enabled, disable the subscription + postgresql_subscription: + db: mydb + name: acme + state: present + subsparams: + enabled: no +''' + +RETURN = r''' +name: + description: + - Name of the subscription. + returned: always + type: str + sample: acme +exists: + description: + - Flag indicates the subscription exists or not at the end of runtime. + returned: always + type: bool + sample: true +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'DROP SUBSCRIPTION "mysubscription"' ] +initial_state: + description: Subscription configuration at the beginning of runtime. + returned: always + type: dict + sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true} +final_state: + description: Subscription configuration at the end of runtime. + returned: always + type: dict + sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true} +''' + +from copy import deepcopy + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + +SUPPORTED_PG_VERSION = 10000 + +SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name') + + +################################ +# Module functions and classes # +################################ + +def convert_conn_params(conn_dict): + """Converts the passed connection dictionary to string. + + Args: + conn_dict (list): Dictionary which needs to be converted. + + Returns: + Connection string. + """ + conn_list = [] + for (param, val) in iteritems(conn_dict): + conn_list.append('%s=%s' % (param, val)) + + return ' '.join(conn_list) + + +def convert_subscr_params(params_dict): + """Converts the passed params dictionary to string. + + Args: + params_dict (list): Dictionary which needs to be converted. + + Returns: + Parameters string. + """ + params_list = [] + for (param, val) in iteritems(params_dict): + if val is False: + val = 'false' + elif val is True: + val = 'true' + + params_list.append('%s = %s' % (param, val)) + + return ', '.join(params_list) + + +class PgSubscription(): + """Class to work with PostgreSQL subscription. + + Args: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): The name of the subscription. + db (str): The database name the subscription will be associated with. + + Attributes: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): Name of subscription. + executed_queries (list): List of executed queries. + attrs (dict): Dict with subscription attributes. + exists (bool): Flag indicates the subscription exists or not. + """ + + def __init__(self, module, cursor, name, db): + self.module = module + self.cursor = cursor + self.name = name + self.db = db + self.executed_queries = [] + self.attrs = { + 'owner': None, + 'enabled': None, + 'synccommit': None, + 'conninfo': {}, + 'slotname': None, + 'publications': [], + } + self.empty_attrs = deepcopy(self.attrs) + self.exists = self.check_subscr() + + def get_info(self): + """Refresh the subscription information. + + Returns: + ``self.attrs``. + """ + self.exists = self.check_subscr() + return self.attrs + + def check_subscr(self): + """Check the subscription and refresh ``self.attrs`` subscription attribute. + + Returns: + True if the subscription with ``self.name`` exists, False otherwise. + """ + + subscr_info = self.__get_general_subscr_info() + + if not subscr_info: + # The subscription does not exist: + self.attrs = deepcopy(self.empty_attrs) + return False + + self.attrs['owner'] = subscr_info.get('rolname') + self.attrs['enabled'] = subscr_info.get('subenabled') + self.attrs['synccommit'] = subscr_info.get('subenabled') + self.attrs['slotname'] = subscr_info.get('subslotname') + self.attrs['publications'] = subscr_info.get('subpublications') + if subscr_info.get('subconninfo'): + for param in subscr_info['subconninfo'].split(' '): + tmp = param.split('=') + try: + self.attrs['conninfo'][tmp[0]] = int(tmp[1]) + except ValueError: + self.attrs['conninfo'][tmp[0]] = tmp[1] + + return True + + def create(self, connparams, publications, subsparams, check_mode=True): + """Create the subscription. + + Args: + connparams (str): Connection string in libpq style. + publications (list): Publications on the master to use. + subsparams (str): Parameters string in WITH () clause style. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if the subscription has been created, otherwise False. + """ + query_fragments = [] + query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' " + "PUBLICATION %s" % (self.name, connparams, ', '.join(publications))) + + if subsparams: + query_fragments.append("WITH (%s)" % subsparams) + + changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + return changed + + def update(self, connparams, publications, subsparams, check_mode=True): + """Update the subscription. + + Args: + connparams (str): Connection string in libpq style. + publications (list): Publications on the master to use. + subsparams (dict): Dictionary of optional parameters. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if subscription has been updated, otherwise False. + """ + changed = False + + if connparams: + if connparams != self.attrs['conninfo']: + changed = self.__set_conn_params(convert_conn_params(connparams), + check_mode=check_mode) + + if publications: + if sorted(self.attrs['publications']) != sorted(publications): + changed = self.__set_publications(publications, check_mode=check_mode) + + if subsparams: + params_to_update = [] + + for (param, value) in iteritems(subsparams): + if param == 'enabled': + if self.attrs['enabled'] and value is False: + changed = self.enable(enabled=False, check_mode=check_mode) + elif not self.attrs['enabled'] and value is True: + changed = self.enable(enabled=True, check_mode=check_mode) + + elif param == 'synchronous_commit': + if self.attrs['synccommit'] is True and value is False: + params_to_update.append("%s = false" % param) + elif self.attrs['synccommit'] is False and value is True: + params_to_update.append("%s = true" % param) + + elif param == 'slot_name': + if self.attrs['slotname'] and self.attrs['slotname'] != value: + params_to_update.append("%s = %s" % (param, value)) + + else: + self.module.warn("Parameter '%s' is not in params supported " + "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE)) + + if params_to_update: + changed = self.__set_params(params_to_update, check_mode=check_mode) + + return changed + + def drop(self, cascade=False, check_mode=True): + """Drop the subscription. + + Kwargs: + cascade (bool): Flag indicates that the subscription needs to be deleted + with its dependencies. + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if the subscription has been removed, otherwise False. + """ + if self.exists: + query_fragments = ["DROP SUBSCRIPTION %s" % self.name] + if cascade: + query_fragments.append("CASCADE") + + return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + def set_owner(self, role, check_mode=True): + """Set a subscription owner. + + Args: + role (str): Role (user) name that needs to be set as a subscription owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role) + return self.__exec_sql(query, check_mode=check_mode) + + def refresh(self, check_mode=True): + """Refresh publication. + + Fetches missing table info from publisher. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name + return self.__exec_sql(query, check_mode=check_mode) + + def __set_params(self, params_to_update, check_mode=True): + """Update optional subscription parameters. + + Args: + params_to_update (list): Parameters with values to update. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update)) + return self.__exec_sql(query, check_mode=check_mode) + + def __set_conn_params(self, connparams, check_mode=True): + """Update connection parameters. + + Args: + connparams (str): Connection string in libpq style. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams) + return self.__exec_sql(query, check_mode=check_mode) + + def __set_publications(self, publications, check_mode=True): + """Update publications. + + Args: + publications (list): Publications on the master to use. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications)) + return self.__exec_sql(query, check_mode=check_mode) + + def enable(self, enabled=True, check_mode=True): + """Enable or disable the subscription. + + Kwargs: + enable (bool): Flag indicates that the subscription needs + to be enabled or disabled. + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + if enabled: + query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name + else: + query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name + + return self.__exec_sql(query, check_mode=check_mode) + + def __get_general_subscr_info(self): + """Get and return general subscription information. + + Returns: + Dict with subscription information if successful, False otherwise. + """ + query = ("SELECT d.datname, r.rolname, s.subenabled, " + "s.subconninfo, s.subslotname, s.subsynccommit, " + "s.subpublications FROM pg_catalog.pg_subscription s " + "JOIN pg_catalog.pg_database d " + "ON s.subdbid = d.oid " + "JOIN pg_catalog.pg_roles AS r " + "ON s.subowner = r.oid " + "WHERE s.subname = %(name)s AND d.datname = %(db)s") + + result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False) + if result: + return result[0] + else: + return False + + def __exec_sql(self, query, check_mode=False): + """Execute SQL query. + + Note: If we need just to get information from the database, + we use ``exec_sql`` function directly. + + Args: + query (str): Query that needs to be executed. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just add ``query`` to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + if check_mode: + self.executed_queries.append(query) + return True + else: + return exec_sql(self, query, ddl=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(type='str', required=True), + db=dict(type='str', required=True, aliases=['login_db']), + state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']), + publications=dict(type='list', elements='str'), + connparams=dict(type='dict'), + cascade=dict(type='bool', default=False), + owner=dict(type='str'), + subsparams=dict(type='dict'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Parameters handling: + db = module.params['db'] + name = module.params['name'] + state = module.params['state'] + publications = module.params['publications'] + cascade = module.params['cascade'] + owner = module.params['owner'] + subsparams = module.params['subsparams'] + connparams = module.params['connparams'] + + if state == 'present' and cascade: + module.warn('parameter "cascade" is ignored when state is not absent') + + if state != 'present': + if owner: + module.warn("parameter 'owner' is ignored when state is not 'present'") + if publications: + module.warn("parameter 'publications' is ignored when state is not 'present'") + if connparams: + module.warn("parameter 'connparams' is ignored when state is not 'present'") + if subsparams: + module.warn("parameter 'subsparams' is ignored when state is not 'present'") + + # Connect to DB and make cursor object: + pg_conn_params = get_conn_params(module, module.params) + # We check subscription state without DML queries execution, so set autocommit: + db_connection = connect_to_db(module, pg_conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Check version: + if cursor.connection.server_version < SUPPORTED_PG_VERSION: + module.fail_json(msg="PostgreSQL server version should be 10.0 or greater") + + # Set defaults: + changed = False + initial_state = {} + final_state = {} + + ################################### + # Create object and do rock'n'roll: + subscription = PgSubscription(module, cursor, name, db) + + if subscription.exists: + initial_state = deepcopy(subscription.attrs) + final_state = deepcopy(initial_state) + + if state == 'present': + if not subscription.exists: + if subsparams: + subsparams = convert_subscr_params(subsparams) + + if connparams: + connparams = convert_conn_params(connparams) + + changed = subscription.create(connparams, + publications, + subsparams, + check_mode=module.check_mode) + + else: + changed = subscription.update(connparams, + publications, + subsparams, + check_mode=module.check_mode) + + if owner and subscription.attrs['owner'] != owner: + changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed + + elif state == 'absent': + changed = subscription.drop(cascade, check_mode=module.check_mode) + + elif state == 'refresh': + if not subscription.exists: + module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name) + + # Always returns True: + changed = subscription.refresh(check_mode=module.check_mode) + + # Get final subscription info: + final_state = subscription.get_info() + + # Connection is not needed any more: + cursor.close() + db_connection.close() + + # Return ret values and exit: + module.exit_json(changed=changed, + name=name, + exists=subscription.exists, + queries=subscription.executed_queries, + initial_state=initial_state, + final_state=final_state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_table.py b/plugins/modules/database/postgresql/postgresql_table.py new file mode 100644 index 0000000000..92190d5d5f --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_table.py @@ -0,0 +1,601 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_table +short_description: Create, drop, or modify a PostgreSQL table +description: +- Allows to create, drop, rename, truncate a table, or change some table attributes. +options: + table: + description: + - Table name. + required: true + aliases: + - name + type: str + state: + description: + - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename). + type: str + default: present + choices: [ absent, present ] + tablespace: + description: + - Set a tablespace for the table. + required: false + type: str + owner: + description: + - Set a table owner. + type: str + unlogged: + description: + - Create an unlogged table. + type: bool + default: no + like: + description: + - Create a table like another table (with similar DDL). + Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + including: + description: + - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL. + Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + columns: + description: + - Columns that are needed. + type: list + elements: str + rename: + description: + - New table name. Mutually exclusive with I(tablespace), I(owner), + I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params). + type: str + truncate: + description: + - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(rename), and I(storage_params). + type: bool + default: no + storage_params: + description: + - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc. + Mutually exclusive with I(rename) and I(truncate). + type: list + elements: str + db: + description: + - Name of database to connect and where the table will be created. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + cascade: + description: + - Automatically drop objects that depend on the table (such as views). + Used with I(state=absent) only. + type: bool + default: no +notes: +- If you do not pass db parameter, tables will be created in the database + named postgres. +- PostgreSQL allows to create columnless table, so columns param is optional. +- Unlogged tables are available from PostgreSQL server version 9.1. +seealso: +- module: postgresql_sequence +- module: postgresql_idx +- module: postgresql_info +- module: postgresql_tablespace +- module: postgresql_owner +- module: postgresql_privs +- module: postgresql_copy +- name: CREATE TABLE reference + description: Complete reference of the CREATE TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-createtable.html +- name: ALTER TABLE reference + description: Complete reference of the ALTER TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-altertable.html +- name: DROP TABLE reference + description: Complete reference of the DROP TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-droptable.html +- name: PostgreSQL data types + description: Complete reference of the PostgreSQL data types documentation. + link: https://www.postgresql.org/docs/current/datatype.html +author: +- Andrei Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner + postgresql_table: + db: acme + name: tbl2 + like: tbl1 + owner: testuser + +- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes + postgresql_table: + db: acme + table: tbl2 + like: tbl1 + including: comments, indexes + tablespace: ssd + +- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1 + postgresql_table: + name: test_table + columns: + - id bigserial primary key + - num bigint + - stories text + tablespace: ssd + storage_params: + - fillfactor=10 + - autovacuum_analyze_threshold=1 + +- name: Create an unlogged table in schema acme + postgresql_table: + name: acme.useless_data + columns: waste_id int + unlogged: true + +- name: Rename table foo to bar + postgresql_table: + table: foo + rename: bar + +- name: Rename table foo from schema acme to bar + postgresql_table: + name: acme.foo + rename: bar + +- name: Set owner to someuser + postgresql_table: + name: foo + owner: someuser + +- name: Change tablespace of foo table to new_tablespace and set owner to new_user + postgresql_table: + name: foo + tablespace: new_tablespace + owner: new_user + +- name: Truncate table foo + postgresql_table: + name: foo + truncate: yes + +- name: Drop table foo from schema acme + postgresql_table: + name: acme.foo + state: absent + +- name: Drop table bar cascade + postgresql_table: + name: bar + state: absent + cascade: yes +''' + +RETURN = r''' +table: + description: Name of a table. + returned: always + type: str + sample: 'foo' +state: + description: Table state. + returned: always + type: str + sample: 'present' +owner: + description: Table owner. + returned: always + type: str + sample: 'postgres' +tablespace: + description: Tablespace. + returned: always + type: str + sample: 'ssd_tablespace' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'CREATE TABLE "test_table" (id bigint)' ] +storage_params: + description: Storage parameters. + returned: always + type: list + sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class Table(object): + def __init__(self, name, module, cursor): + self.name = name + self.module = module + self.cursor = cursor + self.info = { + 'owner': '', + 'tblspace': '', + 'storage_params': [], + } + self.exists = False + self.__exists_in_db() + self.executed_queries = [] + + def get_info(self): + """Getter to refresh and get table info""" + self.__exists_in_db() + + def __exists_in_db(self): + """Check table exists and refresh info""" + if "." in self.name: + schema = self.name.split('.')[-2] + tblname = self.name.split('.')[-1] + else: + schema = 'public' + tblname = self.name + + query = ("SELECT t.tableowner, t.tablespace, c.reloptions " + "FROM pg_tables AS t " + "INNER JOIN pg_class AS c ON c.relname = t.tablename " + "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid " + "WHERE t.tablename = %(tblname)s " + "AND n.nspname = %(schema)s") + res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema}, + add_to_executed=False) + if res: + self.exists = True + self.info = dict( + owner=res[0][0], + tblspace=res[0][1] if res[0][1] else '', + storage_params=res[0][2] if res[0][2] else [], + ) + + return True + else: + self.exists = False + return False + + def create(self, columns='', params='', tblspace='', + unlogged=False, owner=''): + """ + Create table. + If table exists, check passed args (params, tblspace, owner) and, + if they're different from current, change them. + Arguments: + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + columns - column string (comma separated). + """ + name = pg_quote_identifier(self.name, 'table') + + changed = False + + if self.exists: + if tblspace == 'pg_default' and self.info['tblspace'] is None: + pass # Because they have the same meaning + elif tblspace and self.info['tblspace'] != tblspace: + self.set_tblspace(tblspace) + changed = True + + if owner and self.info['owner'] != owner: + self.set_owner(owner) + changed = True + + if params: + param_list = [p.strip(' ') for p in params.split(',')] + + new_param = False + for p in param_list: + if p not in self.info['storage_params']: + new_param = True + + if new_param: + self.set_stor_params(params) + changed = True + + if changed: + return True + return False + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + if columns: + query += " (%s)" % columns + else: + query += " ()" + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database') + + if exec_sql(self, query, ddl=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def create_like(self, src_table, including='', tblspace='', + unlogged=False, params='', owner=''): + """ + Create table like another table (with similar DDL). + Arguments: + src_table - source table. + including - corresponds to optional INCLUDING expression + in CREATE TABLE ... LIKE statement. + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + """ + changed = False + + name = pg_quote_identifier(self.name, 'table') + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + query += " (LIKE %s" % pg_quote_identifier(src_table, 'table') + + if including: + including = including.split(',') + for i in including: + query += " INCLUDING %s" % i + + query += ')' + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database') + + if exec_sql(self, query, ddl=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def truncate(self): + query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table') + return exec_sql(self, query, ddl=True) + + def rename(self, newname): + query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(newname, 'table')) + return exec_sql(self, query, ddl=True) + + def set_owner(self, username): + query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(username, 'role')) + return exec_sql(self, query, ddl=True) + + def drop(self, cascade=False): + if not self.exists: + return False + + query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table') + if cascade: + query += " CASCADE" + return exec_sql(self, query, ddl=True) + + def set_tblspace(self, tblspace): + query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(tblspace, 'database')) + return exec_sql(self, query, ddl=True) + + def set_stor_params(self, params): + query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params) + return exec_sql(self, query, ddl=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + table=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default="present", choices=["absent", "present"]), + db=dict(type='str', default='', aliases=['login_db']), + tablespace=dict(type='str'), + owner=dict(type='str'), + unlogged=dict(type='bool', default=False), + like=dict(type='str'), + including=dict(type='str'), + rename=dict(type='str'), + truncate=dict(type='bool', default=False), + columns=dict(type='list', elements='str'), + storage_params=dict(type='list', elements='str'), + session_role=dict(type='str'), + cascade=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + table = module.params["table"] + state = module.params["state"] + tablespace = module.params["tablespace"] + owner = module.params["owner"] + unlogged = module.params["unlogged"] + like = module.params["like"] + including = module.params["including"] + newname = module.params["rename"] + storage_params = module.params["storage_params"] + truncate = module.params["truncate"] + columns = module.params["columns"] + cascade = module.params["cascade"] + + if state == 'present' and cascade: + module.warn("cascade=true is ignored when state=present") + + # Check mutual exclusive parameters: + if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including): + module.fail_json(msg="%s: state=absent is mutually exclusive with: " + "truncate, rename, columns, tablespace, " + "including, like, storage_params, unlogged, owner" % table) + + if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: truncate is mutually exclusive with: " + "rename, columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if newname and (columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: rename is mutually exclusive with: " + "columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if like and columns: + module.fail_json(msg="%s: like and columns params are mutually exclusive" % table) + if including and not like: + module.fail_json(msg="%s: including param needs like param specified" % table) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + if storage_params: + storage_params = ','.join(storage_params) + + if columns: + columns = ','.join(columns) + + ############## + # Do main job: + table_obj = Table(table, module, cursor) + + # Set default returned values: + changed = False + kw = {} + kw['table'] = table + kw['state'] = '' + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + + if state == 'absent': + changed = table_obj.drop(cascade=cascade) + + elif truncate: + changed = table_obj.truncate() + + elif newname: + changed = table_obj.rename(newname) + q = table_obj.executed_queries + table_obj = Table(newname, module, cursor) + table_obj.executed_queries = q + + elif state == 'present' and not like: + changed = table_obj.create(columns, storage_params, + tablespace, unlogged, owner) + + elif state == 'present' and like: + changed = table_obj.create_like(like, including, tablespace, + unlogged, storage_params) + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + # Refresh table info for RETURN. + # Note, if table has been renamed, it gets info by newname: + table_obj.get_info() + db_connection.commit() + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + else: + # We just change the table state here + # to keep other information about the dropped table: + kw['state'] = 'absent' + + kw['queries'] = table_obj.executed_queries + kw['changed'] = changed + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_tablespace.py b/plugins/modules/database/postgresql/postgresql_tablespace.py new file mode 100644 index 0000000000..923ed2e8a2 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_tablespace.py @@ -0,0 +1,520 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Flavien Chantelot (@Dorn-) +# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell) +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: postgresql_tablespace +short_description: Add or remove PostgreSQL tablespaces from remote hosts +description: +- Adds or removes PostgreSQL tablespaces from remote hosts. +options: + tablespace: + description: + - Name of the tablespace to add or remove. + required: true + type: str + aliases: + - name + location: + description: + - Path to the tablespace directory in the file system. + - Ensure that the location exists and has right privileges. + type: path + aliases: + - path + state: + description: + - Tablespace state. + - I(state=present) implies the tablespace must be created if it doesn't exist. + - I(state=absent) implies the tablespace must be removed if present. + I(state=absent) is mutually exclusive with I(location), I(owner), i(set). + - See the Notes section for information about check mode restrictions. + type: str + default: present + choices: [ absent, present ] + owner: + description: + - Name of the role to set as an owner of the tablespace. + - If this option is not specified, the tablespace owner is a role that creates the tablespace. + type: str + set: + description: + - Dict of tablespace options to set. Supported from PostgreSQL 9.0. + - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html). + - When reset is passed as an option's value, if the option was set previously, it will be removed. + type: dict + rename_to: + description: + - New name of the tablespace. + - The new name cannot begin with pg_, as such names are reserved for system tablespaces. + type: str + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - login_db + +notes: +- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not + support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands + can not be run inside the transaction block. + +seealso: +- name: PostgreSQL tablespaces + description: General information about PostgreSQL tablespaces. + link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html +- name: CREATE TABLESPACE reference + description: Complete reference of the CREATE TABLESPACE command documentation. + link: https://www.postgresql.org/docs/current/sql-createtablespace.html +- name: ALTER TABLESPACE reference + description: Complete reference of the ALTER TABLESPACE command documentation. + link: https://www.postgresql.org/docs/current/sql-altertablespace.html +- name: DROP TABLESPACE reference + description: Complete reference of the DROP TABLESPACE command documentation. + link: https://www.postgresql.org/docs/current/sql-droptablespace.html + +author: +- Flavien Chantelot (@Dorn-) +- Antoine Levy-Lambert (@antoinell) +- Andrew Klychkov (@Andersson007) + +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Create a new tablespace called acme and set bob as an its owner + postgresql_tablespace: + name: acme + owner: bob + location: /data/foo + +- name: Create a new tablespace called bar with tablespace options + postgresql_tablespace: + name: bar + set: + random_page_cost: 1 + seq_page_cost: 1 + +- name: Reset random_page_cost option + postgresql_tablespace: + name: bar + set: + random_page_cost: reset + +- name: Rename the tablespace from bar to pcie_ssd + postgresql_tablespace: + name: bar + rename_to: pcie_ssd + +- name: Drop tablespace called bloat + postgresql_tablespace: + name: bloat + state: absent +''' + +RETURN = r''' +queries: + description: List of queries that was tried to be executed. + returned: always + type: str + sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ] +tablespace: + description: Tablespace name. + returned: always + type: str + sample: 'ssd' +owner: + description: Tablespace owner. + returned: always + type: str + sample: 'Bob' +options: + description: Tablespace options. + returned: always + type: dict + sample: { 'random_page_cost': 1, 'seq_page_cost': 1 } +location: + description: Path to the tablespace in the file system. + returned: always + type: str + sample: '/incredible/fast/ssd' +newname: + description: New tablespace name + returned: if existent + type: str + sample: new_ssd +state: + description: Tablespace state at the end of execution. + returned: always + type: str + sample: 'present' +''' + +try: + from psycopg2 import __version__ as PSYCOPG2_VERSION + from psycopg2.extras import DictCursor + from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT + from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +class PgTablespace(object): + + """Class for working with PostgreSQL tablespaces. + + Args: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + name (str) -- name of the tablespace + + Attrs: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + name (str) -- name of the tablespace + exists (bool) -- flag the tablespace exists in the DB or not + owner (str) -- tablespace owner + location (str) -- path to the tablespace directory in the file system + executed_queries (list) -- list of executed queries + new_name (str) -- new name for the tablespace + opt_not_supported (bool) -- flag indicates a tablespace option is supported or not + """ + + def __init__(self, module, cursor, name): + self.module = module + self.cursor = cursor + self.name = name + self.exists = False + self.owner = '' + self.settings = {} + self.location = '' + self.executed_queries = [] + self.new_name = '' + self.opt_not_supported = False + # Collect info: + self.get_info() + + def get_info(self): + """Get tablespace information.""" + # Check that spcoptions exists: + opt = exec_sql(self, "SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_tablespace' " + "AND column_name = 'spcoptions'", add_to_executed=False) + + # For 9.1 version and earlier: + location = exec_sql(self, "SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_tablespace' " + "AND column_name = 'spclocation'", add_to_executed=False) + if location: + location = 'spclocation' + else: + location = 'pg_tablespace_location(t.oid)' + + if not opt: + self.opt_not_supported = True + query = ("SELECT r.rolname, (SELECT Null), %s " + "FROM pg_catalog.pg_tablespace AS t " + "JOIN pg_catalog.pg_roles AS r " + "ON t.spcowner = r.oid " % location) + else: + query = ("SELECT r.rolname, t.spcoptions, %s " + "FROM pg_catalog.pg_tablespace AS t " + "JOIN pg_catalog.pg_roles AS r " + "ON t.spcowner = r.oid " % location) + + res = exec_sql(self, query + "WHERE t.spcname = %(name)s", + query_params={'name': self.name}, add_to_executed=False) + + if not res: + self.exists = False + return False + + if res[0][0]: + self.exists = True + self.owner = res[0][0] + + if res[0][1]: + # Options exist: + for i in res[0][1]: + i = i.split('=') + self.settings[i[0]] = i[1] + + if res[0][2]: + # Location exists: + self.location = res[0][2] + + def create(self, location): + """Create tablespace. + + Return True if success, otherwise, return False. + + args: + location (str) -- tablespace directory path in the FS + """ + query = ("CREATE TABLESPACE %s LOCATION '%s'" % (pg_quote_identifier(self.name, 'database'), location)) + return exec_sql(self, query, ddl=True) + + def drop(self): + """Drop tablespace. + + Return True if success, otherwise, return False. + """ + return exec_sql(self, "DROP TABLESPACE %s" % pg_quote_identifier(self.name, 'database'), ddl=True) + + def set_owner(self, new_owner): + """Set tablespace owner. + + Return True if success, otherwise, return False. + + args: + new_owner (str) -- name of a new owner for the tablespace" + """ + if new_owner == self.owner: + return False + + query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'database'), new_owner) + return exec_sql(self, query, ddl=True) + + def rename(self, newname): + """Rename tablespace. + + Return True if success, otherwise, return False. + + args: + newname (str) -- new name for the tablespace" + """ + query = "ALTER TABLESPACE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'database'), newname) + self.new_name = newname + return exec_sql(self, query, ddl=True) + + def set_settings(self, new_settings): + """Set tablespace settings (options). + + If some setting has been changed, set changed = True. + After all settings list is handling, return changed. + + args: + new_settings (list) -- list of new settings + """ + # settings must be a dict {'key': 'value'} + if self.opt_not_supported: + return False + + changed = False + + # Apply new settings: + for i in new_settings: + if new_settings[i] == 'reset': + if i in self.settings: + changed = self.__reset_setting(i) + self.settings[i] = None + + elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]): + changed = self.__set_setting("%s = '%s'" % (i, new_settings[i])) + + return changed + + def __reset_setting(self, setting): + """Reset tablespace setting. + + Return True if success, otherwise, return False. + + args: + setting (str) -- string in format "setting_name = 'setting_value'" + """ + query = "ALTER TABLESPACE %s RESET (%s)" % (pg_quote_identifier(self.name, 'database'), setting) + return exec_sql(self, query, ddl=True) + + def __set_setting(self, setting): + """Set tablespace setting. + + Return True if success, otherwise, return False. + + args: + setting (str) -- string in format "setting_name = 'setting_value'" + """ + query = "ALTER TABLESPACE %s SET (%s)" % (pg_quote_identifier(self.name, 'database'), setting) + return exec_sql(self, query, ddl=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + tablespace=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default="present", choices=["absent", "present"]), + location=dict(type='path', aliases=['path']), + owner=dict(type='str'), + set=dict(type='dict'), + rename_to=dict(type='str'), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=(('positional_args', 'named_args'),), + supports_check_mode=True, + ) + + tablespace = module.params["tablespace"] + state = module.params["state"] + location = module.params["location"] + owner = module.params["owner"] + rename_to = module.params["rename_to"] + settings = module.params["set"] + + if state == 'absent' and (location or owner or rename_to or settings): + module.fail_json(msg="state=absent is mutually exclusive location, " + "owner, rename_to, and set") + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Change autocommit to False if check_mode: + if module.check_mode: + if PSYCOPG2_VERSION >= '2.4.2': + db_connection.set_session(autocommit=False) + else: + db_connection.set_isolation_level(READ_COMMITTED) + + # Set defaults: + autocommit = False + changed = False + + ############## + # Create PgTablespace object and do main job: + tblspace = PgTablespace(module, cursor, tablespace) + + # If tablespace exists with different location, exit: + if tblspace.exists and location and location != tblspace.location: + module.fail_json(msg="Tablespace '%s' exists with different location '%s'" % (tblspace.name, tblspace.location)) + + # Create new tablespace: + if not tblspace.exists and state == 'present': + if rename_to: + module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace) + + if not location: + module.fail_json(msg="'location' parameter must be passed with " + "state=present if the tablespace doesn't exist") + + # Because CREATE TABLESPACE can not be run inside the transaction block: + autocommit = True + if PSYCOPG2_VERSION >= '2.4.2': + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(AUTOCOMMIT) + + changed = tblspace.create(location) + + # Drop non-existing tablespace: + elif not tblspace.exists and state == 'absent': + # Nothing to do: + module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name) + + # Drop existing tablespace: + elif tblspace.exists and state == 'absent': + # Because DROP TABLESPACE can not be run inside the transaction block: + autocommit = True + if PSYCOPG2_VERSION >= '2.4.2': + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(AUTOCOMMIT) + + changed = tblspace.drop() + + # Rename tablespace: + elif tblspace.exists and rename_to: + if tblspace.name != rename_to: + changed = tblspace.rename(rename_to) + + if state == 'present': + # Refresh information: + tblspace.get_info() + + # Change owner and settings: + if state == 'present' and tblspace.exists: + if owner: + changed = tblspace.set_owner(owner) + + if settings: + changed = tblspace.set_settings(settings) + + tblspace.get_info() + + # Rollback if it's possible and check_mode: + if not autocommit: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Make return values: + kw = dict( + changed=changed, + state='present', + tablespace=tblspace.name, + owner=tblspace.owner, + queries=tblspace.executed_queries, + options=tblspace.settings, + location=tblspace.location, + ) + + if state == 'present': + kw['state'] = 'present' + + if tblspace.new_name: + kw['newname'] = tblspace.new_name + + elif state == 'absent': + kw['state'] = 'absent' + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_user.py b/plugins/modules/database/postgresql/postgresql_user.py new file mode 100644 index 0000000000..3012c34a6f --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_user.py @@ -0,0 +1,919 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_user +short_description: Add or remove a user (role) from a PostgreSQL server instance +description: +- Adds or removes a user (role) from a PostgreSQL server instance + ("cluster" in PostgreSQL terminology) and, optionally, + grants the user access to an existing database or tables. +- A user is a role with login privilege. +- The fundamental function of the module is to create, or delete, users from + a PostgreSQL instances. Privilege assignment, or removal, is an optional + step, which works on one database at a time. This allows for the module to + be called several times in the same module to modify the permissions on + different databases, or to grant permissions to already existing users. +- A user cannot be removed until all the privileges have been stripped from + the user. In such situation, if the module tries to remove the user it + will fail. To avoid this from happening the fail_on_user option signals + the module to try to remove the user, but if not possible keep going; the + module will report if changes happened and separately if the user was + removed or not. +options: + name: + description: + - Name of the user (role) to add or remove. + type: str + required: true + aliases: + - user + password: + description: + - Set the user's password, before 1.4 this was required. + - Password can be passed unhashed or hashed (MD5-hashed). + - Unhashed password will automatically be hashed when saved into the + database if C(encrypted) parameter is set, otherwise it will be save in + plain text format. + - When passing a hashed password it must be generated with the format + C('str["md5"] + md5[ password + username ]'), resulting in a total of + 35 characters. An easy way to do this is C(echo "md5$(echo -n + 'verysecretpasswordJOE' | md5sum | awk '{print $1}')"). + - Note that if the provided password string is already in MD5-hashed + format, then it is used as-is, regardless of C(encrypted) parameter. + type: str + db: + description: + - Name of database to connect to and where user's permissions will be granted. + type: str + aliases: + - login_db + fail_on_user: + description: + - If C(yes), fail when user (role) can't be removed. Otherwise just log and continue. + default: 'yes' + type: bool + aliases: + - fail_on_role + priv: + description: + - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where + privileges can be defined for database ( allowed options - 'CREATE', + 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or + for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE', + 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example + C(table:SELECT) ). Mixed example of this string: + C(CONNECT/CREATE/table1:SELECT/table2:INSERT)." + type: str + role_attr_flags: + description: + - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER." + - Note that '[NO]CREATEUSER' is deprecated. + - To create a simple role for using it like a group, use C(NOLOGIN) flag. + type: str + choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB', + '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ] + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + state: + description: + - The user (role) state. + type: str + default: present + choices: [ absent, present ] + encrypted: + description: + - Whether the password is stored hashed in the database. + - Passwords can be passed already hashed or unhashed, and postgresql + ensures the stored password is hashed when C(encrypted) is set. + - "Note: Postgresql 10 and newer doesn't support unhashed passwords." + - Previous to Ansible 2.6, this was C(no) by default. + default: 'yes' + type: bool + expires: + description: + - The date at which the user's password is to expire. + - If set to C('infinity'), user's password never expire. + - Note that this value should be a valid SQL date and time type. + type: str + no_password_changes: + description: + - If C(yes), don't inspect database for password changes. Effective when + C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make + password changes as necessary. + default: 'no' + type: bool + conn_limit: + description: + - Specifies the user (role) connection limit. + type: int + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + groups: + description: + - The list of groups (roles) that need to be granted to the user. + type: list + elements: str + comment: + description: + - Add a comment on the user (equal to the COMMENT ON ROLE statement result). + type: str +notes: +- The module creates a user (role) with login privilege by default. + Use NOLOGIN role_attr_flags to change this behaviour. +- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles). + You may not specify password or role_attr_flags when the PUBLIC user is specified. +seealso: +- module: postgresql_privs +- module: postgresql_membership +- module: postgresql_owner +- name: PostgreSQL database roles + description: Complete reference of the PostgreSQL database roles documentation. + link: https://www.postgresql.org/docs/current/user-manag.html +author: +- Ansible Core Team +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Connect to acme database, create django user, and grant access to database and products table + postgresql_user: + db: acme + name: django + password: ceec4eif7ya + priv: "CONNECT/products:ALL" + expires: "Jan 31 2020" + +- name: Add a comment on django user + postgresql_user: + db: acme + name: django + comment: This is a test user + +# Connect to default database, create rails user, set its password (MD5-hashed), +# and grant privilege to create other databases and demote rails from super user status if user exists +- name: Create rails user, set MD5-hashed password, grant privs + postgresql_user: + name: rails + password: md59543f1d82624df2b31672ec0f7050460 + role_attr_flags: CREATEDB,NOSUPERUSER + +- name: Connect to acme database and remove test user privileges from there + postgresql_user: + db: acme + name: test + priv: "ALL/products:ALL" + state: absent + fail_on_user: no + +- name: Connect to test database, remove test user from cluster + postgresql_user: + db: test + name: test + priv: ALL + state: absent + +- name: Connect to acme database and set user's password with no expire date + postgresql_user: + db: acme + name: django + password: mysupersecretword + priv: "CONNECT/products:ALL" + expires: infinity + +# Example privileges string format +# INSERT,UPDATE/table:SELECT/anothertable:ALL + +- name: Connect to test database and remove an existing user's password + postgresql_user: + db: test + user: test + password: "" + +- name: Create user test and grant group user_ro and user_rw to it + postgresql_user: + name: test + groups: + - user_ro + - user_rw +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"'] + version_added: '2.8' +''' + +import itertools +import re +import traceback +from hashlib import md5 + +try: + import psycopg2 + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier, SQLParseError +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + PgMembership, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.six import iteritems + + +FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') +FLAGS_BY_VERSION = {'BYPASSRLS': 90500} + +VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), + database=frozenset( + ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), + ) + +# map to cope with idiosyncracies of SUPERUSER and LOGIN +PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole', + CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin', + REPLICATION='rolreplication', BYPASSRLS='rolbypassrls') + +executed_queries = [] + + +class InvalidFlagsError(Exception): + pass + + +class InvalidPrivsError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def user_exists(cursor, user): + # The PUBLIC user is a special case that is always there + if user == 'PUBLIC': + return True + query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s" + cursor.execute(query, {'user': user}) + return cursor.rowcount > 0 + + +def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit): + """Create a new database user (role).""" + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + query_password_data = dict(password=password, expires=expires) + query = ['CREATE USER "%(user)s"' % + {"user": user}] + if password is not None and password != '': + query.append("WITH %(crypt)s" % {"crypt": encrypted}) + query.append("PASSWORD %(password)s") + if expires is not None: + query.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query.append(role_attr_flags) + query = ' '.join(query) + executed_queries.append(query) + cursor.execute(query, query_password_data) + return True + + +def user_should_we_change_password(current_role_attrs, user, password, encrypted): + """Check if we should change the user's password. + + Compare the proposed password with the existing one, comparing + hashes if encrypted. If we can't access it assume yes. + """ + + if current_role_attrs is None: + # on some databases, E.g. AWS RDS instances, there is no access to + # the pg_authid relation to check the pre-existing password, so we + # just assume password is different + return True + + # Do we actually need to do anything? + pwchanging = False + if password is not None: + # Empty password means that the role shouldn't have a password, which + # means we need to check if the current password is None. + if password == '': + if current_role_attrs['rolpassword'] is not None: + pwchanging = True + # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits + # 3: The size of the 'md5' prefix + # When the provided password looks like a MD5-hash, value of + # 'encrypted' is ignored. + elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED': + if password != current_role_attrs['rolpassword']: + pwchanging = True + elif encrypted == 'ENCRYPTED': + hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest()) + if hashed_password != current_role_attrs['rolpassword']: + pwchanging = True + + return pwchanging + + +def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit): + """Change user password and/or attributes. Return True if changed, False otherwise.""" + changed = False + + cursor = db_connection.cursor(cursor_factory=DictCursor) + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + if user == 'PUBLIC': + if password is not None: + module.fail_json(msg="cannot change the password for PUBLIC user") + elif role_attr_flags != '': + module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user") + else: + return False + + # Handle passwords. + if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None): + # Select password and all flag-like columns in order to verify changes. + try: + select = "SELECT * FROM pg_authid where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError: + current_role_attrs = None + db_connection.rollback() + + pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted) + + if current_role_attrs is None: + try: + # AWS RDS instances does not allow user to access pg_authid + # so try to get current_role_attrs from pg_roles tables + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes from pg_roles + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError as e: + db_connection.rollback() + module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e)) + + role_attr_flags_changing = False + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if expires is not None: + cursor.execute("SELECT %s::timestamptz;", (expires,)) + expires_with_tz = cursor.fetchone()[0] + expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil') + else: + expires_changing = False + + conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit']) + + if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing: + return False + + alter = ['ALTER USER "%(user)s"' % {"user": user}] + if pwchanging: + if password != '': + alter.append("WITH %(crypt)s" % {"crypt": encrypted}) + alter.append("PASSWORD %(password)s") + else: + alter.append("WITH PASSWORD NULL") + alter.append(role_attr_flags) + elif role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + if expires is not None: + alter.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + + query_password_data = dict(password=password, expires=expires) + try: + cursor.execute(' '.join(alter), query_password_data) + changed = True + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + except psycopg2.NotSupportedError as e: + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + + elif no_password_changes and role_attr_flags != '': + # Grab role information from pg_roles instead of pg_authid + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + + role_attr_flags_changing = False + + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if not role_attr_flags_changing: + return False + + alter = ['ALTER USER "%(user)s"' % + {"user": user}] + if role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + + try: + cursor.execute(' '.join(alter)) + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + + # Grab new role attributes. + cursor.execute(select, {"user": user}) + new_role_attrs = cursor.fetchone() + + # Detect any differences between current_ and new_role_attrs. + changed = current_role_attrs != new_role_attrs + + return changed + + +def user_delete(cursor, user): + """Try to remove a user. Returns True if successful otherwise False""" + cursor.execute("SAVEPOINT ansible_pgsql_user_delete") + try: + query = 'DROP USER "%s"' % user + executed_queries.append(query) + cursor.execute(query) + except Exception: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return False + + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return True + + +def has_table_privileges(cursor, user, table, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_table_privileges(cursor, user, table) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def get_table_privileges(cursor, user, table): + if '.' in table: + schema, table = table.split('.', 1) + else: + schema = 'public' + query = ("SELECT privilege_type FROM information_schema.role_table_grants " + "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s") + cursor.execute(query, {'user': user, 'table': table, 'schema': schema}) + return frozenset([x[0] for x in cursor.fetchall()]) + + +def grant_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'GRANT %s ON TABLE %s TO "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def revoke_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'REVOKE %s ON TABLE %s FROM "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def get_database_privileges(cursor, user, db): + priv_map = { + 'C': 'CREATE', + 'T': 'TEMPORARY', + 'c': 'CONNECT', + } + query = 'SELECT datacl FROM pg_database WHERE datname = %s' + cursor.execute(query, (db,)) + datacl = cursor.fetchone()[0] + if datacl is None: + return set() + r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl) + if r is None: + return set() + o = set() + for v in r.group(1): + o.add(priv_map[v]) + return normalize_privileges(o, 'database') + + +def has_database_privileges(cursor, user, db, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_database_privileges(cursor, user, db) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def grant_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'GRANT %s ON DATABASE %s TO "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'REVOKE %s ON DATABASE %s FROM "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_privileges(cursor, user, privs): + if privs is None: + return False + + revoke_funcs = dict(table=revoke_table_privileges, + database=revoke_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested to be removed are + # currently granted to the user + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[0]: + revoke_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def grant_privileges(cursor, user, privs): + if privs is None: + return False + + grant_funcs = dict(table=grant_table_privileges, + database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested for the user are + # currently missing + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[2]: + grant_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def parse_role_attrs(cursor, role_attr_flags): + """ + Parse role attributes string for user creation. + Format: + + attributes[,attributes,...] + + Where: + + attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... + [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB", + "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION", + "[NO]BYPASSRLS" ] + + Note: "[NO]BYPASSRLS" role attribute introduced in 9.5 + Note: "[NO]CREATEUSER" role attribute is deprecated. + + """ + flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role) + + valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor))) + valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags))) + + if not flags.issubset(valid_flags): + raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % + ' '.join(flags.difference(valid_flags))) + + return ' '.join(flags) + + +def normalize_privileges(privs, type_): + new_privs = set(privs) + if 'ALL' in new_privs: + new_privs.update(VALID_PRIVS[type_]) + new_privs.remove('ALL') + if 'TEMP' in new_privs: + new_privs.add('TEMPORARY') + new_privs.remove('TEMP') + + return new_privs + + +def parse_privs(privs, db): + """ + Parse privilege string to determine permissions for database db. + Format: + + privileges[/privileges/...] + + Where: + + privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] | + TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...] + """ + if privs is None: + return privs + + o_privs = { + 'database': {}, + 'table': {} + } + for token in privs.split('/'): + if ':' not in token: + type_ = 'database' + name = db + priv_set = frozenset(x.strip().upper() + for x in token.split(',') if x.strip()) + else: + type_ = 'table' + name, privileges = token.split(':', 1) + priv_set = frozenset(x.strip().upper() + for x in privileges.split(',') if x.strip()) + + if not priv_set.issubset(VALID_PRIVS[type_]): + raise InvalidPrivsError('Invalid privs specified for %s: %s' % + (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) + + priv_set = normalize_privileges(priv_set, type_) + o_privs[type_][name] = priv_set + + return o_privs + + +def get_valid_flags_by_version(cursor): + """ + Some role attributes were introduced after certain versions. We want to + compile a list of valid flags against the current Postgres version. + """ + current_version = cursor.connection.server_version + + return [ + flag + for flag, version_introduced in FLAGS_BY_VERSION.items() + if current_version >= version_introduced + ] + + +def get_comment(cursor, user): + """Get user's comment.""" + query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') " + "FROM pg_catalog.pg_roles r " + "WHERE r.rolname = %(user)s") + cursor.execute(query, {'user': user}) + return cursor.fetchone()[0] + + +def add_comment(cursor, user, comment): + """Add comment on user.""" + if comment != get_comment(cursor, user): + query = 'COMMENT ON ROLE "%s" IS ' % user + cursor.execute(query + '%(comment)s', {'comment': comment}) + executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment})) + return True + else: + return False + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + user=dict(type='str', required=True, aliases=['name']), + password=dict(type='str', default=None, no_log=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + priv=dict(type='str', default=None), + db=dict(type='str', default='', aliases=['login_db']), + fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']), + role_attr_flags=dict(type='str', default=''), + encrypted=dict(type='bool', default='yes'), + no_password_changes=dict(type='bool', default='no'), + expires=dict(type='str', default=None), + conn_limit=dict(type='int', default=None), + session_role=dict(type='str'), + groups=dict(type='list', elements='str'), + comment=dict(type='str', default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + user = module.params["user"] + password = module.params["password"] + state = module.params["state"] + fail_on_user = module.params["fail_on_user"] + if module.params['db'] == '' and module.params["priv"] is not None: + module.fail_json(msg="privileges require a database to be specified") + privs = parse_privs(module.params["priv"], module.params["db"]) + no_password_changes = module.params["no_password_changes"] + if module.params["encrypted"]: + encrypted = "ENCRYPTED" + else: + encrypted = "UNENCRYPTED" + expires = module.params["expires"] + conn_limit = module.params["conn_limit"] + role_attr_flags = module.params["role_attr_flags"] + groups = module.params["groups"] + if groups: + groups = [e.strip() for e in groups] + comment = module.params["comment"] + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + role_attr_flags = parse_role_attrs(cursor, role_attr_flags) + except InvalidFlagsError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + kw = dict(user=user) + changed = False + user_removed = False + + if state == "present": + if user_exists(cursor, user): + try: + changed = user_alter(db_connection, module, user, password, + role_attr_flags, encrypted, expires, no_password_changes, conn_limit) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + else: + try: + changed = user_add(cursor, user, password, + role_attr_flags, encrypted, expires, conn_limit) + except psycopg2.ProgrammingError as e: + module.fail_json(msg="Unable to add user with given requirement " + "due to : %s" % to_native(e), + exception=traceback.format_exc()) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + try: + changed = grant_privileges(cursor, user, privs) or changed + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + if groups: + target_roles = [] + target_roles.append(user) + pg_membership = PgMembership(module, cursor, groups, target_roles) + changed = pg_membership.grant() or changed + executed_queries.extend(pg_membership.executed_queries) + + if comment is not None: + try: + changed = add_comment(cursor, user, comment) or changed + except Exception as e: + module.fail_json(msg='Unable to add comment on role: %s' % to_native(e), + exception=traceback.format_exc()) + + else: + if user_exists(cursor, user): + if module.check_mode: + changed = True + kw['user_removed'] = True + else: + try: + changed = revoke_privileges(cursor, user, privs) + user_removed = user_delete(cursor, user) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + changed = changed or user_removed + if fail_on_user and not user_removed: + msg = "Unable to remove user" + module.fail_json(msg=msg) + kw['user_removed'] = user_removed + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + kw['queries'] = executed_queries + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py b/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py new file mode 100644 index 0000000000..eec41aedd0 --- /dev/null +++ b/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_user_obj_stat_info +short_description: Gather statistics about PostgreSQL user objects +description: +- Gathers statistics about PostgreSQL user objects. +options: + filter: + description: + - Limit the collected information by comma separated string or YAML list. + - Allowable values are C(functions), C(indexes), C(tables). + - By default, collects all subsets. + - Unsupported values are ignored. + type: list + elements: str + schema: + description: + - Restrict the output by certain schema. + type: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str +notes: +- C(size) and C(total_size) returned values are presented in bytes. +- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled. + See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information. +seealso: +- module: postgresql_info +- module: postgresql_ping +- name: PostgreSQL statistics collector reference + description: Complete reference of the PostgreSQL statistics collector documentation. + link: https://www.postgresql.org/docs/current/monitoring-stats.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.general.postgres + +''' + +EXAMPLES = r''' +- name: Collect information about all supported user objects of the acme database + postgresql_user_obj_stat_info: + db: acme + +- name: Collect information about all supported user objects in the custom schema of the acme database + postgresql_user_obj_stat_info: + db: acme + schema: custom + +- name: Collect information about user tables and indexes in the acme database + postgresql_user_obj_stat_info: + db: acme + filter: tables, indexes +''' + +RETURN = r''' +indexes: + description: User index statistics + returned: always + type: dict + sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}} +tables: + description: User table statistics. + returned: always + type: dict + sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}} +functions: + description: User function statistics. + returned: always + type: dict + sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}} +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + + +# =========================================== +# PostgreSQL module specific support methods. +# + + +class PgUserObjStatInfo(): + """Class to collect information about PostgreSQL user objects. + + Args: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + + Attributes: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + executed_queries (list): List of executed queries. + info (dict): Statistics dictionary. + obj_func_mapping (dict): Mapping of object types to corresponding functions. + schema (str): Name of a schema to restrict stat collecting. + """ + + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.info = { + 'functions': {}, + 'indexes': {}, + 'tables': {}, + } + self.obj_func_mapping = { + 'functions': self.get_func_stat, + 'indexes': self.get_idx_stat, + 'tables': self.get_tbl_stat, + } + self.schema = None + + def collect(self, filter_=None, schema=None): + """Collect statistics information of user objects. + + Kwargs: + filter_ (list): List of subsets which need to be collected. + schema (str): Restrict stat collecting by certain schema. + + Returns: + ``self.info``. + """ + if schema: + self.set_schema(schema) + + if filter_: + for obj_type in filter_: + obj_type = obj_type.strip() + obj_func = self.obj_func_mapping.get(obj_type) + + if obj_func is not None: + obj_func() + else: + self.module.warn("Unknown filter option '%s'" % obj_type) + + else: + for obj_func in self.obj_func_mapping.values(): + obj_func() + + return self.info + + def get_func_stat(self): + """Get function statistics and fill out self.info dictionary.""" + if not self.schema: + query = "SELECT * FROM pg_stat_user_functions" + result = exec_sql(self, query, add_to_executed=False) + else: + query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s" + result = exec_sql(self, query, query_params=(self.schema,), + add_to_executed=False) + + if not result: + return + + self.__fill_out_info(result, + info_key='functions', + schema_key='schemaname', + name_key='funcname') + + def get_idx_stat(self): + """Get index statistics and fill out self.info dictionary.""" + if not self.schema: + query = "SELECT * FROM pg_stat_user_indexes" + result = exec_sql(self, query, add_to_executed=False) + else: + query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s" + result = exec_sql(self, query, query_params=(self.schema,), + add_to_executed=False) + + if not result: + return + + self.__fill_out_info(result, + info_key='indexes', + schema_key='schemaname', + name_key='indexrelname') + + def get_tbl_stat(self): + """Get table statistics and fill out self.info dictionary.""" + if not self.schema: + query = "SELECT * FROM pg_stat_user_tables" + result = exec_sql(self, query, add_to_executed=False) + else: + query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s" + result = exec_sql(self, query, query_params=(self.schema,), + add_to_executed=False) + + if not result: + return + + self.__fill_out_info(result, + info_key='tables', + schema_key='schemaname', + name_key='relname') + + def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None): + # Convert result to list of dicts to handle it easier: + result = [dict(row) for row in result] + + for elem in result: + # Add schema name as a key if not presented: + if not self.info[info_key].get(elem[schema_key]): + self.info[info_key][elem[schema_key]] = {} + + # Add object name key as a subkey + # (they must be uniq over a schema, so no need additional checks): + self.info[info_key][elem[schema_key]][elem[name_key]] = {} + + # Add other other attributes to a certain index: + for key, val in iteritems(elem): + if key not in (schema_key, name_key): + self.info[info_key][elem[schema_key]][elem[name_key]][key] = val + + if info_key in ('tables', 'indexes'): + relname = elem[name_key] + schemaname = elem[schema_key] + if not self.schema: + result = exec_sql(self, "SELECT pg_relation_size ('%s.%s')" % (schemaname, relname), + add_to_executed=False) + else: + relname = '%s.%s' % (self.schema, relname) + result = exec_sql(self, "SELECT pg_relation_size (%s)", + query_params=(relname,), + add_to_executed=False) + + self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0] + + if info_key == 'tables': + relname = elem[name_key] + schemaname = elem[schema_key] + if not self.schema: + result = exec_sql(self, "SELECT pg_total_relation_size ('%s.%s')" % (schemaname, relname), + add_to_executed=False) + else: + relname = '%s.%s' % (self.schema, relname) + result = exec_sql(self, "SELECT pg_total_relation_size (%s)", + query_params=(relname,), + add_to_executed=False) + + self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0] + + def set_schema(self, schema): + """If schema exists, sets self.schema, otherwise fails.""" + query = ("SELECT 1 FROM information_schema.schemata " + "WHERE schema_name = %s") + result = exec_sql(self, query, query_params=(schema,), + add_to_executed=False) + + if result and result[0][0]: + self.schema = schema + else: + self.module.fail_json(msg="Schema '%s' does not exist" % (schema)) + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', aliases=['login_db']), + filter=dict(type='list', elements='str'), + session_role=dict(type='str'), + schema=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + filter_ = module.params["filter"] + schema = module.params["schema"] + + # Connect to DB and make cursor object: + pg_conn_params = get_conn_params(module, module.params) + # We don't need to commit anything, so, set it to False: + db_connection = connect_to_db(module, pg_conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############################ + # Create object and do work: + pg_obj_info = PgUserObjStatInfo(module, cursor) + + info_dict = pg_obj_info.collect(filter_, schema) + + # Clean up: + cursor.close() + db_connection.close() + + # Return information: + module.exit_json(**info_dict) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/proxysql/proxysql_backend_servers.py b/plugins/modules/database/proxysql/proxysql_backend_servers.py new file mode 100644 index 0000000000..f5fc61a9d3 --- /dev/null +++ b/plugins/modules/database/proxysql/proxysql_backend_servers.py @@ -0,0 +1,509 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_backend_servers +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes mysql hosts from proxysql admin interface. +description: + - The M(proxysql_backend_servers) module adds or removes mysql hosts using + the proxysql admin interface. +options: + hostgroup_id: + description: + - The hostgroup in which this mysqld instance is included. An instance + can be part of one or more hostgroups. + default: 0 + hostname: + description: + - The ip address at which the mysqld instance can be contacted. + required: True + port: + description: + - The port at which the mysqld instance can be contacted. + default: 3306 + status: + description: + - ONLINE - Backend server is fully operational. + OFFLINE_SOFT - When a server is put into C(OFFLINE_SOFT) mode, + connections are kept in use until the current + transaction is completed. This allows to gracefully + detach a backend. + OFFLINE_HARD - When a server is put into C(OFFLINE_HARD) mode, the + existing connections are dropped, while new incoming + connections aren't accepted either. + + If omitted the proxysql database default for I(status) is C(ONLINE). + choices: [ "ONLINE", "OFFLINE_SOFT", "OFFLINE_HARD"] + weight: + description: + - The bigger the weight of a server relative to other weights, the higher + the probability of the server being chosen from the hostgroup. If + omitted the proxysql database default for I(weight) is 1. + compression: + description: + - If the value of I(compression) is greater than 0, new connections to + that server will use compression. If omitted the proxysql database + default for I(compression) is 0. + max_connections: + description: + - The maximum number of connections ProxySQL will open to this backend + server. If omitted the proxysql database default for I(max_connections) + is 1000. + max_replication_lag: + description: + - If greater than 0, ProxySQL will regularly monitor replication lag. If + replication lag goes above I(max_replication_lag), proxysql will + temporarily shun the server until replication catches up. If omitted + the proxysql database default for I(max_replication_lag) is 0. + use_ssl: + description: + - If I(use_ssl) is set to C(True), connections to this server will be + made using SSL connections. If omitted the proxysql database default + for I(use_ssl) is C(False). + type: bool + max_latency_ms: + description: + - Ping time is monitored regularly. If a host has a ping time greater + than I(max_latency_ms) it is excluded from the connection pool + (although the server stays ONLINE). If omitted the proxysql database + default for I(max_latency_ms) is 0. + comment: + description: + - Text field that can be used for any purposed defined by the user. + Could be a description of what the host stores, a reminder of when the + host was added or disabled, or a JSON processed by some checker script. + default: '' + state: + description: + - When C(present) - adds the host, when C(absent) - removes the host. + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.general.proxysql.managing_config +- community.general.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a server, it saves the mysql server config to disk, but +# avoids loading the mysql server config to runtime (this might be because +# several servers are being added and the user wants to push the config to +# runtime in a single batch using the M(proxysql_manage_config) module). It +# uses supplied credentials to connect to the proxysql admin interface. + +- proxysql_backend_servers: + login_user: 'admin' + login_password: 'admin' + hostname: 'mysql01' + state: present + load_to_runtime: False + +# This example removes a server, saves the mysql server config to disk, and +# dynamically loads the mysql server config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- proxysql_backend_servers: + config_file: '~/proxysql.cnf' + hostname: 'mysql02' + state: absent +''' + +RETURN = ''' +stdout: + description: The mysql host modified or removed from proxysql + returned: On create/update will return the newly modified host, on delete + it will return the deleted record. + type: dict + "sample": { + "changed": true, + "hostname": "192.168.52.1", + "msg": "Added server to mysql_hosts", + "server": { + "comment": "", + "compression": "0", + "hostgroup_id": "1", + "hostname": "192.168.52.1", + "max_connections": "1000", + "max_latency_ms": "0", + "max_replication_lag": "0", + "port": "3306", + "status": "ONLINE", + "use_ssl": "0", + "weight": "1" + }, + "state": "present" + } +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["port"] < 0 \ + or module.params["port"] > 65535: + module.fail_json( + msg="port must be a valid unix port number (0-65535)" + ) + + if module.params["compression"]: + if module.params["compression"] < 0 \ + or module.params["compression"] > 102400: + module.fail_json( + msg="compression must be set between 0 and 102400" + ) + + if module.params["max_replication_lag"]: + if module.params["max_replication_lag"] < 0 \ + or module.params["max_replication_lag"] > 126144000: + module.fail_json( + msg="max_replication_lag must be set between 0 and 102400" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL SERVERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL SERVERS TO RUNTIME") + return True + + +class ProxySQLServer(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + self.hostgroup_id = module.params["hostgroup_id"] + self.hostname = module.params["hostname"] + self.port = module.params["port"] + + config_data_keys = ["status", + "weight", + "compression", + "max_connections", + "max_replication_lag", + "use_ssl", + "max_latency_ms", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_server_config_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `host_count` + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['host_count']) > 0) + + def check_server_config(self, cursor): + query_string = \ + """SELECT count(*) AS `host_count` + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + + if isinstance(check_count, tuple): + return int(check_count[0]) > 0 + + return (int(check_count['host_count']) > 0) + + def get_server_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + server = cursor.fetchone() + return server + + def create_server_config(self, cursor): + query_string = \ + """INSERT INTO mysql_servers ( + hostgroup_id, + hostname, + port""" + + cols = 3 + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + return True + + def update_server_config(self, cursor): + query_string = """UPDATE mysql_servers""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += ("\nWHERE hostgroup_id = %s\n AND hostname = %s" + + "\n AND port = %s") + + query_data.append(self.hostgroup_id) + query_data.append(self.hostname) + query_data.append(self.port) + + cursor.execute(query_string, query_data) + return True + + def delete_server_config(self, cursor): + query_string = \ + """DELETE FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_server(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_server_config(cursor) + result['msg'] = "Added server to mysql_hosts" + result['server'] = \ + self.get_server_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been added to" + + " mysql_hosts, however check_mode" + + " is enabled.") + + def update_server(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_server_config(cursor) + result['msg'] = "Updated server in mysql_hosts" + result['server'] = \ + self.get_server_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been updated in" + + " mysql_hosts, however check_mode" + + " is enabled.") + + def delete_server(self, check_mode, result, cursor): + if not check_mode: + result['server'] = \ + self.get_server_config(cursor) + result['changed'] = \ + self.delete_server_config(cursor) + result['msg'] = "Deleted server from mysql_hosts" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been deleted from" + + " mysql_hosts, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default='127.0.0.1'), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default='', type='path'), + hostgroup_id=dict(default=0, type='int'), + hostname=dict(required=True, type='str'), + port=dict(default=3306, type='int'), + status=dict(choices=['ONLINE', + 'OFFLINE_SOFT', + 'OFFLINE_HARD']), + weight=dict(type='int'), + compression=dict(type='int'), + max_connections=dict(type='int'), + max_replication_lag=dict(type='int'), + use_ssl=dict(type='bool'), + max_latency_ms=dict(type='int'), + comment=dict(default='', type='str'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_server = ProxySQLServer(module) + result = {} + + result['state'] = proxysql_server.state + if proxysql_server.hostname: + result['hostname'] = proxysql_server.hostname + + if proxysql_server.state == "present": + try: + if not proxysql_server.check_server_config(cursor): + if not proxysql_server.check_server_config_exists(cursor): + proxysql_server.create_server(module.check_mode, + result, + cursor) + else: + proxysql_server.update_server(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The server already exists in mysql_hosts" + + " and doesn't need to be updated.") + result['server'] = \ + proxysql_server.get_server_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify server.. %s" % to_native(e) + ) + + elif proxysql_server.state == "absent": + try: + if proxysql_server.check_server_config_exists(cursor): + proxysql_server.delete_server(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The server is already absent from the" + + " mysql_hosts memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove server.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/proxysql/proxysql_global_variables.py b/plugins/modules/database/proxysql/proxysql_global_variables.py new file mode 100644 index 0000000000..8054a2e95b --- /dev/null +++ b/plugins/modules/database/proxysql/proxysql_global_variables.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_global_variables +author: "Ben Mildren (@bmildren)" +short_description: Gets or sets the proxysql global variables. +description: + - The M(proxysql_global_variables) module gets or sets the proxysql global + variables. +options: + variable: + description: + - Defines which variable should be returned, or if I(value) is specified + which variable should be updated. + required: True + value: + description: + - Defines a value the variable specified using I(variable) should be set + to. +extends_documentation_fragment: +- community.general.proxysql.managing_config +- community.general.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example sets the value of a variable, saves the mysql admin variables +# config to disk, and dynamically loads the mysql admin variables config to +# runtime. It uses supplied credentials to connect to the proxysql admin +# interface. + +- proxysql_global_variables: + login_user: 'admin' + login_password: 'admin' + variable: 'mysql-max_connections' + value: 4096 + +# This example gets the value of a variable. It uses credentials in a +# supplied config file to connect to the proxysql admin interface. + +- proxysql_global_variables: + config_file: '~/proxysql.cnf' + variable: 'mysql-default_query_delay' +''' + +RETURN = ''' +stdout: + description: Returns the mysql variable supplied with it's associated value. + returned: Returns the current variable and value, or the newly set value + for the variable supplied.. + type: dict + "sample": { + "changed": false, + "msg": "The variable is already been set to the supplied value", + "var": { + "variable_name": "mysql-poll_timeout", + "variable_value": "3000" + } + } +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(variable, cursor): + if variable.startswith("admin"): + cursor.execute("SAVE ADMIN VARIABLES TO DISK") + else: + cursor.execute("SAVE MYSQL VARIABLES TO DISK") + return True + + +def load_config_to_runtime(variable, cursor): + if variable.startswith("admin"): + cursor.execute("LOAD ADMIN VARIABLES TO RUNTIME") + else: + cursor.execute("LOAD MYSQL VARIABLES TO RUNTIME") + return True + + +def check_config(variable, value, cursor): + query_string = \ + """SELECT count(*) AS `variable_count` + FROM global_variables + WHERE variable_name = %s and variable_value = %s""" + + query_data = \ + [variable, value] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + + if isinstance(check_count, tuple): + return int(check_count[0]) > 0 + + return (int(check_count['variable_count']) > 0) + + +def get_config(variable, cursor): + + query_string = \ + """SELECT * + FROM global_variables + WHERE variable_name = %s""" + + query_data = \ + [variable, ] + + cursor.execute(query_string, query_data) + row_count = cursor.rowcount + resultset = cursor.fetchone() + + if row_count > 0: + return resultset + else: + return False + + +def set_config(variable, value, cursor): + + query_string = \ + """UPDATE global_variables + SET variable_value = %s + WHERE variable_name = %s""" + + query_data = \ + [value, variable] + + cursor.execute(query_string, query_data) + return True + + +def manage_config(variable, save_to_disk, load_to_runtime, cursor, state): + if state: + if save_to_disk: + save_config_to_disk(variable, cursor) + if load_to_runtime: + load_config_to_runtime(variable, cursor) + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + variable=dict(required=True, type='str'), + value=dict(), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + variable = module.params["variable"] + value = module.params["value"] + save_to_disk = module.params["save_to_disk"] + load_to_runtime = module.params["load_to_runtime"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + result = {} + + if not value: + try: + if get_config(variable, cursor): + result['changed'] = False + result['msg'] = \ + "Returned the variable and it's current value" + result['var'] = get_config(variable, cursor) + else: + module.fail_json( + msg="The variable \"%s\" was not found" % variable + ) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to get config.. %s" % to_native(e) + ) + else: + try: + if get_config(variable, cursor): + if not check_config(variable, value, cursor): + if not module.check_mode: + result['changed'] = set_config(variable, value, cursor) + result['msg'] = \ + "Set the variable to the supplied value" + result['var'] = get_config(variable, cursor) + manage_config(variable, + save_to_disk, + load_to_runtime, + cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Variable would have been set to" + + " the supplied value, however" + + " check_mode is enabled.") + else: + result['changed'] = False + result['msg'] = ("The variable is already been set to" + + " the supplied value") + result['var'] = get_config(variable, cursor) + else: + module.fail_json( + msg="The variable \"%s\" was not found" % variable + ) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to set config.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/proxysql/proxysql_manage_config.py b/plugins/modules/database/proxysql/proxysql_manage_config.py new file mode 100644 index 0000000000..6b17c4b1f0 --- /dev/null +++ b/plugins/modules/database/proxysql/proxysql_manage_config.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_manage_config + +author: "Ben Mildren (@bmildren)" +short_description: Writes the proxysql configuration settings between layers. +description: + - The M(proxysql_global_variables) module writes the proxysql configuration + settings between layers. Currently this module will always report a + changed state, so should typically be used with WHEN however this will + change in a future version when the CHECKSUM table commands are available + for all tables in proxysql. +options: + action: + description: + - The supplied I(action) combines with the supplied I(direction) to + provide the semantics of how we want to move the I(config_settings) + between the I(config_layers). + choices: [ "LOAD", "SAVE" ] + required: True + config_settings: + description: + - The I(config_settings) specifies which configuration we're writing. + choices: [ "MYSQL USERS", "MYSQL SERVERS", "MYSQL QUERY RULES", + "MYSQL VARIABLES", "ADMIN VARIABLES", "SCHEDULER" ] + required: True + direction: + description: + - FROM - denotes we're reading values FROM the supplied I(config_layer) + and writing to the next layer. + TO - denotes we're reading from the previous layer and writing TO the + supplied I(config_layer)." + choices: [ "FROM", "TO" ] + required: True + config_layer: + description: + - RUNTIME - represents the in-memory data structures of ProxySQL used by + the threads that are handling the requests. + MEMORY - (sometimes also referred as main) represents the in-memory + SQLite3 database. + DISK - represents the on-disk SQLite3 database. + CONFIG - is the classical config file. You can only LOAD FROM the + config file. + choices: [ "MEMORY", "DISK", "RUNTIME", "CONFIG" ] + required: True +extends_documentation_fragment: +- community.general.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example saves the mysql users config from memory to disk. It uses +# supplied credentials to connect to the proxysql admin interface. + +- proxysql_manage_config: + login_user: 'admin' + login_password: 'admin' + action: "SAVE" + config_settings: "MYSQL USERS" + direction: "FROM" + config_layer: "MEMORY" + +# This example loads the mysql query rules config from memory to to runtime. It +# uses supplied credentials to connect to the proxysql admin interface. + +- proxysql_manage_config: + config_file: '~/proxysql.cnf' + action: "LOAD" + config_settings: "MYSQL QUERY RULES" + direction: "TO" + config_layer: "RUNTIME" +''' + +RETURN = ''' +stdout: + description: Simply reports whether the action reported a change. + returned: Currently the returned value with always be changed=True. + type: dict + "sample": { + "changed": true + } +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["config_layer"] == 'CONFIG' and \ + (module.params["action"] != 'LOAD' or + module.params["direction"] != 'FROM'): + + if (module.params["action"] != 'LOAD' and + module.params["direction"] != 'FROM'): + msg_string = ("Neither the action \"%s\" nor the direction" + + " \"%s\" are valid combination with the CONFIG" + + " config_layer") + module.fail_json(msg=msg_string % (module.params["action"], + module.params["direction"])) + + elif module.params["action"] != 'LOAD': + msg_string = ("The action \"%s\" is not a valid combination" + + " with the CONFIG config_layer") + module.fail_json(msg=msg_string % module.params["action"]) + + else: + msg_string = ("The direction \"%s\" is not a valid combination" + + " with the CONFIG config_layer") + module.fail_json(msg=msg_string % module.params["direction"]) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def manage_config(manage_config_settings, cursor): + + query_string = "%s" % ' '.join(manage_config_settings) + + cursor.execute(query_string) + return True + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + action=dict(required=True, choices=['LOAD', + 'SAVE']), + config_settings=dict(required=True, choices=['MYSQL USERS', + 'MYSQL SERVERS', + 'MYSQL QUERY RULES', + 'MYSQL VARIABLES', + 'ADMIN VARIABLES', + 'SCHEDULER']), + direction=dict(required=True, choices=['FROM', + 'TO']), + config_layer=dict(required=True, choices=['MEMORY', + 'DISK', + 'RUNTIME', + 'CONFIG']) + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + action = module.params["action"] + config_settings = module.params["config_settings"] + direction = module.params["direction"] + config_layer = module.params["config_layer"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + result = {} + + manage_config_settings = \ + [action, config_settings, direction, config_layer] + + try: + result['changed'] = manage_config(manage_config_settings, + cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to manage config.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/proxysql/proxysql_mysql_users.py b/plugins/modules/database/proxysql/proxysql_mysql_users.py new file mode 100644 index 0000000000..0cc3ff8cd6 --- /dev/null +++ b/plugins/modules/database/proxysql/proxysql_mysql_users.py @@ -0,0 +1,477 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_mysql_users +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes mysql users from proxysql admin interface. +description: + - The M(proxysql_mysql_users) module adds or removes mysql users using the + proxysql admin interface. +options: + username: + description: + - Name of the user connecting to the mysqld or ProxySQL instance. + required: True + password: + description: + - Password of the user connecting to the mysqld or ProxySQL instance. + active: + description: + - A user with I(active) set to C(False) will be tracked in the database, + but will be never loaded in the in-memory data structures. If omitted + the proxysql database default for I(active) is C(True). + type: bool + use_ssl: + description: + - If I(use_ssl) is set to C(True), connections by this user will be made + using SSL connections. If omitted the proxysql database default for + I(use_ssl) is C(False). + type: bool + default_hostgroup: + description: + - If there is no matching rule for the queries sent by this user, the + traffic it generates is sent to the specified hostgroup. + If omitted the proxysql database default for I(use_ssl) is 0. + default_schema: + description: + - The schema to which the connection should change to by default. + transaction_persistent: + description: + - If this is set for the user with which the MySQL client is connecting + to ProxySQL (thus a "frontend" user), transactions started within a + hostgroup will remain within that hostgroup regardless of any other + rules. + If omitted the proxysql database default for I(transaction_persistent) + is C(False). + type: bool + fast_forward: + description: + - If I(fast_forward) is set to C(True), I(fast_forward) will bypass the + query processing layer (rewriting, caching) and pass through the query + directly as is to the backend server. If omitted the proxysql database + default for I(fast_forward) is C(False). + type: bool + backend: + description: + - If I(backend) is set to C(True), this (username, password) pair is + used for authenticating to the ProxySQL instance. + default: True + type: bool + frontend: + description: + - If I(frontend) is set to C(True), this (username, password) pair is + used for authenticating to the mysqld servers against any hostgroup. + default: True + type: bool + max_connections: + description: + - The maximum number of connections ProxySQL will open to the backend for + this user. If omitted the proxysql database default for + I(max_connections) is 10000. + state: + description: + - When C(present) - adds the user, when C(absent) - removes the user. + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.general.proxysql.managing_config +- community.general.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a user, it saves the mysql user config to disk, but +# avoids loading the mysql user config to runtime (this might be because +# several users are being added and the user wants to push the config to +# runtime in a single batch using the M(proxysql_manage_config) module). It +# uses supplied credentials to connect to the proxysql admin interface. + +- proxysql_mysql_users: + login_user: 'admin' + login_password: 'admin' + username: 'productiondba' + state: present + load_to_runtime: False + +# This example removes a user, saves the mysql user config to disk, and +# dynamically loads the mysql user config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- proxysql_mysql_users: + config_file: '~/proxysql.cnf' + username: 'mysqlboy' + state: absent +''' + +RETURN = ''' +stdout: + description: The mysql user modified or removed from proxysql + returned: On create/update will return the newly modified user, on delete + it will return the deleted record. + type: dict + sample: + changed: true + msg: Added user to mysql_users + state: present + user: + active: 1 + backend: 1 + default_hostgroup: 1 + default_schema: null + fast_forward: 0 + frontend: 1 + max_connections: 10000 + password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + schema_locked: 0 + transaction_persistent: 0 + use_ssl: 0 + username: guest_ro + username: guest_ro +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL USERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL USERS TO RUNTIME") + return True + + +class ProxySQLUser(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + self.username = module.params["username"] + self.backend = module.params["backend"] + self.frontend = module.params["frontend"] + + config_data_keys = ["password", + "active", + "use_ssl", + "default_hostgroup", + "default_schema", + "transaction_persistent", + "fast_forward", + "max_connections"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_user_config_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `user_count` + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['user_count']) > 0) + + def check_user_privs(self, cursor): + query_string = \ + """SELECT count(*) AS `user_count` + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['user_count']) > 0) + + def get_user_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + user = cursor.fetchone() + return user + + def create_user_config(self, cursor): + query_string = \ + """INSERT INTO mysql_users ( + username, + backend, + frontend""" + + cols = 3 + query_data = \ + [self.username, + self.backend, + self.frontend] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + return True + + def update_user_config(self, cursor): + query_string = """UPDATE mysql_users""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += ("\nWHERE username = %s\n AND backend = %s" + + "\n AND frontend = %s") + + query_data.append(self.username) + query_data.append(self.backend) + query_data.append(self.frontend) + + cursor.execute(query_string, query_data) + return True + + def delete_user_config(self, cursor): + query_string = \ + """DELETE FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_user(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_user_config(cursor) + result['msg'] = "Added user to mysql_users" + result['user'] = \ + self.get_user_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been added to" + + " mysql_users, however check_mode" + + " is enabled.") + + def update_user(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_user_config(cursor) + result['msg'] = "Updated user in mysql_users" + result['user'] = \ + self.get_user_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been updated in" + + " mysql_users, however check_mode" + + " is enabled.") + + def delete_user(self, check_mode, result, cursor): + if not check_mode: + result['user'] = \ + self.get_user_config(cursor) + result['changed'] = \ + self.delete_user_config(cursor) + result['msg'] = "Deleted user from mysql_users" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been deleted from" + + " mysql_users, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default='', type='path'), + username=dict(required=True, type='str'), + password=dict(no_log=True, type='str'), + active=dict(type='bool'), + use_ssl=dict(type='bool'), + default_hostgroup=dict(type='int'), + default_schema=dict(type='str'), + transaction_persistent=dict(type='bool'), + fast_forward=dict(type='bool'), + backend=dict(default=True, type='bool'), + frontend=dict(default=True, type='bool'), + max_connections=dict(type='int'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_user = ProxySQLUser(module) + result = {} + + result['state'] = proxysql_user.state + if proxysql_user.username: + result['username'] = proxysql_user.username + + if proxysql_user.state == "present": + try: + if not proxysql_user.check_user_privs(cursor): + if not proxysql_user.check_user_config_exists(cursor): + proxysql_user.create_user(module.check_mode, + result, + cursor) + else: + proxysql_user.update_user(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The user already exists in mysql_users" + + " and doesn't need to be updated.") + result['user'] = \ + proxysql_user.get_user_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify user.. %s" % to_native(e) + ) + + elif proxysql_user.state == "absent": + try: + if proxysql_user.check_user_config_exists(cursor): + proxysql_user.delete_user(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The user is already absent from the" + + " mysql_users memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove user.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/proxysql/proxysql_query_rules.py b/plugins/modules/database/proxysql/proxysql_query_rules.py new file mode 100644 index 0000000000..ae3b71a702 --- /dev/null +++ b/plugins/modules/database/proxysql/proxysql_query_rules.py @@ -0,0 +1,613 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_query_rules +author: "Ben Mildren (@bmildren)" +short_description: Modifies query rules using the proxysql admin interface. +description: + - The M(proxysql_query_rules) module modifies query rules using the + proxysql admin interface. +options: + rule_id: + description: + - The unique id of the rule. Rules are processed in rule_id order. + active: + description: + - A rule with I(active) set to C(False) will be tracked in the database, + but will be never loaded in the in-memory data structures. + type: bool + username: + description: + - Filtering criteria matching username. If I(username) is non-NULL, a + query will match only if the connection is made with the correct + username. + schemaname: + description: + - Filtering criteria matching schemaname. If I(schemaname) is non-NULL, a + query will match only if the connection uses schemaname as its default + schema. + flagIN: + description: + - Used in combination with I(flagOUT) and I(apply) to create chains of + rules. + client_addr: + description: + - Match traffic from a specific source. + proxy_addr: + description: + - Match incoming traffic on a specific local IP. + proxy_port: + description: + - Match incoming traffic on a specific local port. + digest: + description: + - Match queries with a specific digest, as returned by + stats_mysql_query_digest.digest. + match_digest: + description: + - Regular expression that matches the query digest. The dialect of + regular expressions used is that of re2 - https://github.com/google/re2 + match_pattern: + description: + - Regular expression that matches the query text. The dialect of regular + expressions used is that of re2 - https://github.com/google/re2 + negate_match_pattern: + description: + - If I(negate_match_pattern) is set to C(True), only queries not matching + the query text will be considered as a match. This acts as a NOT + operator in front of the regular expression matching against + match_pattern. + type: bool + flagOUT: + description: + - Used in combination with I(flagIN) and apply to create chains of rules. + When set, I(flagOUT) signifies the I(flagIN) to be used in the next + chain of rules. + replace_pattern: + description: + - This is the pattern with which to replace the matched pattern. Note + that this is optional, and when omitted, the query processor will only + cache, route, or set other parameters without rewriting. + destination_hostgroup: + description: + - Route matched queries to this hostgroup. This happens unless there is a + started transaction and the logged in user has + I(transaction_persistent) set to C(True) (see M(proxysql_mysql_users)). + cache_ttl: + description: + - The number of milliseconds for which to cache the result of the query. + Note in ProxySQL 1.1 I(cache_ttl) was in seconds. + timeout: + description: + - The maximum timeout in milliseconds with which the matched or rewritten + query should be executed. If a query run for longer than the specific + threshold, the query is automatically killed. If timeout is not + specified, the global variable mysql-default_query_timeout applies. + retries: + description: + - The maximum number of times a query needs to be re-executed in case of + detected failure during the execution of the query. If retries is not + specified, the global variable mysql-query_retries_on_failure applies. + delay: + description: + - Number of milliseconds to delay the execution of the query. This is + essentially a throttling mechanism and QoS, and allows a way to give + priority to queries over others. This value is added to the + mysql-default_query_delay global variable that applies to all queries. + mirror_flagOUT: + description: + - Enables query mirroring. If set I(mirror_flagOUT) can be used to + evaluates the mirrored query against the specified chain of rules. + mirror_hostgroup: + description: + - Enables query mirroring. If set I(mirror_hostgroup) can be used to + mirror queries to the same or different hostgroup. + error_msg: + description: + - Query will be blocked, and the specified error_msg will be returned to + the client. + log: + description: + - Query will be logged. + type: bool + apply: + description: + - Used in combination with I(flagIN) and I(flagOUT) to create chains of + rules. Setting apply to True signifies the last rule to be applied. + type: bool + comment: + description: + - Free form text field, usable for a descriptive comment of the query + rule. + state: + description: + - When C(present) - adds the rule, when C(absent) - removes the rule. + choices: [ "present", "absent" ] + default: present + force_delete: + description: + - By default we avoid deleting more than one schedule in a single batch, + however if you need this behaviour and you're not concerned about the + schedules deleted, you can set I(force_delete) to C(True). + default: False + type: bool +extends_documentation_fragment: +- community.general.proxysql.managing_config +- community.general.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a rule to redirect queries from a specific user to another +# hostgroup, it saves the mysql query rule config to disk, but avoids loading +# the mysql query config config to runtime (this might be because several +# rules are being added and the user wants to push the config to runtime in a +# single batch using the M(proxysql_manage_config) module). It uses supplied +# credentials to connect to the proxysql admin interface. + +- proxysql_query_rules: + login_user: admin + login_password: admin + username: 'guest_ro' + match_pattern: "^SELECT.*" + destination_hostgroup: 1 + active: 1 + retries: 3 + state: present + load_to_runtime: False + +# This example removes all rules that use the username 'guest_ro', saves the +# mysql query rule config to disk, and dynamically loads the mysql query rule +# config to runtime. It uses credentials in a supplied config file to connect +# to the proxysql admin interface. + +- proxysql_query_rules: + config_file: '~/proxysql.cnf' + username: 'guest_ro' + state: absent + force_delete: true +''' + +RETURN = ''' +stdout: + description: The mysql user modified or removed from proxysql + returned: On create/update will return the newly modified rule, in all + other cases will return a list of rules that match the supplied + criteria. + type: dict + "sample": { + "changed": true, + "msg": "Added rule to mysql_query_rules", + "rules": [ + { + "active": "0", + "apply": "0", + "cache_ttl": null, + "client_addr": null, + "comment": null, + "delay": null, + "destination_hostgroup": 1, + "digest": null, + "error_msg": null, + "flagIN": "0", + "flagOUT": null, + "log": null, + "match_digest": null, + "match_pattern": null, + "mirror_flagOUT": null, + "mirror_hostgroup": null, + "negate_match_pattern": "0", + "proxy_addr": null, + "proxy_port": null, + "reconnect": null, + "replace_pattern": null, + "retries": null, + "rule_id": "1", + "schemaname": null, + "timeout": null, + "username": "guest_ro" + } + ], + "state": "present" + } +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL QUERY RULES TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL QUERY RULES TO RUNTIME") + return True + + +class ProxyQueryRule(object): + + def __init__(self, module): + self.state = module.params["state"] + self.force_delete = module.params["force_delete"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + config_data_keys = ["rule_id", + "active", + "username", + "schemaname", + "flagIN", + "client_addr", + "proxy_addr", + "proxy_port", + "digest", + "match_digest", + "match_pattern", + "negate_match_pattern", + "flagOUT", + "replace_pattern", + "destination_hostgroup", + "cache_ttl", + "timeout", + "retries", + "delay", + "mirror_flagOUT", + "mirror_hostgroup", + "error_msg", + "log", + "apply", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_rule_pk_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `rule_count` + FROM mysql_query_rules + WHERE rule_id = %s""" + + query_data = \ + [self.config_data["rule_id"]] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['rule_count']) > 0) + + def check_rule_cfg_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `rule_count` + FROM mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + check_count = cursor.fetchone() + return int(check_count['rule_count']) + + def get_rule_config(self, cursor, created_rule_id=None): + query_string = \ + """SELECT * + FROM mysql_query_rules""" + + if created_rule_id: + query_data = [created_rule_id, ] + query_string += "\nWHERE rule_id = %s" + + cursor.execute(query_string, query_data) + rule = cursor.fetchone() + else: + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + rule = cursor.fetchall() + + return rule + + def create_rule_config(self, cursor): + query_string = \ + """INSERT INTO mysql_query_rules (""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += "\n" + col + "," + + query_string = query_string[:-1] + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + new_rule_id = cursor.lastrowid + return True, new_rule_id + + def update_rule_config(self, cursor): + query_string = """UPDATE mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None and col != "rule_id": + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += "\nWHERE rule_id = %s" + + query_data.append(self.config_data["rule_id"]) + + cursor.execute(query_string, query_data) + return True + + def delete_rule_config(self, cursor): + query_string = \ + """DELETE FROM mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + check_count = cursor.rowcount + return True, int(check_count) + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_rule(self, check_mode, result, cursor): + if not check_mode: + result['changed'], new_rule_id = \ + self.create_rule_config(cursor) + result['msg'] = "Added rule to mysql_query_rules" + self.manage_config(cursor, + result['changed']) + result['rules'] = \ + self.get_rule_config(cursor, new_rule_id) + else: + result['changed'] = True + result['msg'] = ("Rule would have been added to" + + " mysql_query_rules, however" + + " check_mode is enabled.") + + def update_rule(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_rule_config(cursor) + result['msg'] = "Updated rule in mysql_query_rules" + self.manage_config(cursor, + result['changed']) + result['rules'] = \ + self.get_rule_config(cursor) + else: + result['changed'] = True + result['msg'] = ("Rule would have been updated in" + + " mysql_query_rules, however" + + " check_mode is enabled.") + + def delete_rule(self, check_mode, result, cursor): + if not check_mode: + result['rules'] = \ + self.get_rule_config(cursor) + result['changed'], result['rows_affected'] = \ + self.delete_rule_config(cursor) + result['msg'] = "Deleted rule from mysql_query_rules" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Rule would have been deleted from" + + " mysql_query_rules, however" + + " check_mode is enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + rule_id=dict(type='int'), + active=dict(type='bool'), + username=dict(type='str'), + schemaname=dict(type='str'), + flagIN=dict(type='int'), + client_addr=dict(type='str'), + proxy_addr=dict(type='str'), + proxy_port=dict(type='int'), + digest=dict(type='str'), + match_digest=dict(type='str'), + match_pattern=dict(type='str'), + negate_match_pattern=dict(type='bool'), + flagOUT=dict(type='int'), + replace_pattern=dict(type='str'), + destination_hostgroup=dict(type='int'), + cache_ttl=dict(type='int'), + timeout=dict(type='int'), + retries=dict(type='int'), + delay=dict(type='int'), + mirror_flagOUT=dict(type='int'), + mirror_hostgroup=dict(type='int'), + error_msg=dict(type='str'), + log=dict(type='bool'), + apply=dict(type='bool'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + force_delete=dict(default=False, type='bool'), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_query_rule = ProxyQueryRule(module) + result = {} + + result['state'] = proxysql_query_rule.state + + if proxysql_query_rule.state == "present": + try: + if not proxysql_query_rule.check_rule_cfg_exists(cursor): + if proxysql_query_rule.config_data["rule_id"] and \ + proxysql_query_rule.check_rule_pk_exists(cursor): + proxysql_query_rule.update_rule(module.check_mode, + result, + cursor) + else: + proxysql_query_rule.create_rule(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The rule already exists in" + + " mysql_query_rules and doesn't need to be" + + " updated.") + result['rules'] = \ + proxysql_query_rule.get_rule_config(cursor) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify rule.. %s" % to_native(e) + ) + + elif proxysql_query_rule.state == "absent": + try: + existing_rules = proxysql_query_rule.check_rule_cfg_exists(cursor) + if existing_rules > 0: + if existing_rules == 1 or \ + proxysql_query_rule.force_delete: + proxysql_query_rule.delete_rule(module.check_mode, + result, + cursor) + else: + module.fail_json( + msg=("Operation would delete multiple rules" + + " use force_delete to override this") + ) + else: + result['changed'] = False + result['msg'] = ("The rule is already absent from the" + + " mysql_query_rules memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove rule.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/proxysql/proxysql_replication_hostgroups.py b/plugins/modules/database/proxysql/proxysql_replication_hostgroups.py new file mode 100644 index 0000000000..f28b6d2aca --- /dev/null +++ b/plugins/modules/database/proxysql/proxysql_replication_hostgroups.py @@ -0,0 +1,380 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_replication_hostgroups +author: "Ben Mildren (@bmildren)" +short_description: Manages replication hostgroups using the proxysql admin + interface. +description: + - Each row in mysql_replication_hostgroups represent a pair of + writer_hostgroup and reader_hostgroup. ProxySQL will monitor the value of + read_only for all the servers in specified hostgroups, and based on the + value of read_only will assign the server to the writer or reader + hostgroups. +options: + writer_hostgroup: + description: + - Id of the writer hostgroup. + required: True + reader_hostgroup: + description: + - Id of the reader hostgroup. + required: True + comment: + description: + - Text field that can be used for any purposes defined by the user. + state: + description: + - When C(present) - adds the replication hostgroup, when C(absent) - + removes the replication hostgroup. + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.general.proxysql.managing_config +- community.general.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a replication hostgroup, it saves the mysql server config +# to disk, but avoids loading the mysql server config to runtime (this might be +# because several replication hostgroup are being added and the user wants to +# push the config to runtime in a single batch using the +# M(proxysql_manage_config) module). It uses supplied credentials to connect +# to the proxysql admin interface. + +- proxysql_replication_hostgroups: + login_user: 'admin' + login_password: 'admin' + writer_hostgroup: 1 + reader_hostgroup: 2 + state: present + load_to_runtime: False + +# This example removes a replication hostgroup, saves the mysql server config +# to disk, and dynamically loads the mysql server config to runtime. It uses +# credentials in a supplied config file to connect to the proxysql admin +# interface. + +- proxysql_replication_hostgroups: + config_file: '~/proxysql.cnf' + writer_hostgroup: 3 + reader_hostgroup: 4 + state: absent +''' + +RETURN = ''' +stdout: + description: The replication hostgroup modified or removed from proxysql + returned: On create/update will return the newly modified group, on delete + it will return the deleted record. + type: dict + "sample": { + "changed": true, + "msg": "Added server to mysql_hosts", + "repl_group": { + "comment": "", + "reader_hostgroup": "1", + "writer_hostgroup": "2" + }, + "state": "present" + } +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if not module.params["writer_hostgroup"] >= 0: + module.fail_json( + msg="writer_hostgroup must be a integer greater than or equal to 0" + ) + + if not module.params["reader_hostgroup"] == \ + module.params["writer_hostgroup"]: + if not module.params["reader_hostgroup"] > 0: + module.fail_json( + msg=("writer_hostgroup must be a integer greater than" + + " or equal to 0") + ) + else: + module.fail_json( + msg="reader_hostgroup cannot equal writer_hostgroup" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL SERVERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL SERVERS TO RUNTIME") + return True + + +class ProxySQLReplicationHostgroup(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + self.writer_hostgroup = module.params["writer_hostgroup"] + self.reader_hostgroup = module.params["reader_hostgroup"] + self.comment = module.params["comment"] + + def check_repl_group_config(self, cursor, keys): + query_string = \ + """SELECT count(*) AS `repl_groups` + FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + if self.comment and not keys: + query_string += "\n AND comment = %s" + query_data.append(self.comment) + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['repl_groups']) > 0) + + def get_repl_group_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + repl_group = cursor.fetchone() + return repl_group + + def create_repl_group_config(self, cursor): + query_string = \ + """INSERT INTO mysql_replication_hostgroups ( + writer_hostgroup, + reader_hostgroup, + comment) + VALUES (%s, %s, %s)""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup, + self.comment or ''] + + cursor.execute(query_string, query_data) + return True + + def update_repl_group_config(self, cursor): + query_string = \ + """UPDATE mysql_replication_hostgroups + SET comment = %s + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.comment, + self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + return True + + def delete_repl_group_config(self, cursor): + query_string = \ + """DELETE FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_repl_group_config(cursor) + result['msg'] = "Added server to mysql_hosts" + result['repl_group'] = \ + self.get_repl_group_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been added to" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + + def update_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_repl_group_config(cursor) + result['msg'] = "Updated server in mysql_hosts" + result['repl_group'] = \ + self.get_repl_group_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been updated in" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + + def delete_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['repl_group'] = \ + self.get_repl_group_config(cursor) + result['changed'] = \ + self.delete_repl_group_config(cursor) + result['msg'] = "Deleted server from mysql_hosts" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been deleted from" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + writer_hostgroup=dict(required=True, type='int'), + reader_hostgroup=dict(required=True, type='int'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_repl_group = ProxySQLReplicationHostgroup(module) + result = {} + + result['state'] = proxysql_repl_group.state + + if proxysql_repl_group.state == "present": + try: + if not proxysql_repl_group.check_repl_group_config(cursor, + keys=True): + proxysql_repl_group.create_repl_group(module.check_mode, + result, + cursor) + else: + if not proxysql_repl_group.check_repl_group_config(cursor, + keys=False): + proxysql_repl_group.update_repl_group(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The repl group already exists in" + + " mysql_replication_hostgroups and" + + " doesn't need to be updated.") + result['repl_group'] = \ + proxysql_repl_group.get_repl_group_config(cursor) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify replication hostgroup.. %s" % to_native(e) + ) + + elif proxysql_repl_group.state == "absent": + try: + if proxysql_repl_group.check_repl_group_config(cursor, + keys=True): + proxysql_repl_group.delete_repl_group(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The repl group is already absent from the" + + " mysql_replication_hostgroups memory" + + " configuration") + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to delete replication hostgroup.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/proxysql/proxysql_scheduler.py b/plugins/modules/database/proxysql/proxysql_scheduler.py new file mode 100644 index 0000000000..464eedd461 --- /dev/null +++ b/plugins/modules/database/proxysql/proxysql_scheduler.py @@ -0,0 +1,417 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_scheduler +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes schedules from proxysql admin interface. +description: + - The M(proxysql_scheduler) module adds or removes schedules using the + proxysql admin interface. +options: + active: + description: + - A schedule with I(active) set to C(False) will be tracked in the + database, but will be never loaded in the in-memory data structures. + default: True + type: bool + interval_ms: + description: + - How often (in millisecond) the job will be started. The minimum value + for I(interval_ms) is 100 milliseconds. + default: 10000 + filename: + description: + - Full path of the executable to be executed. + required: True + arg1: + description: + - Argument that can be passed to the job. + arg2: + description: + - Argument that can be passed to the job. + arg3: + description: + - Argument that can be passed to the job. + arg4: + description: + - Argument that can be passed to the job. + arg5: + description: + - Argument that can be passed to the job. + comment: + description: + - Text field that can be used for any purposed defined by the user. + state: + description: + - When C(present) - adds the schedule, when C(absent) - removes the + schedule. + choices: [ "present", "absent" ] + default: present + force_delete: + description: + - By default we avoid deleting more than one schedule in a single batch, + however if you need this behaviour and you're not concerned about the + schedules deleted, you can set I(force_delete) to C(True). + default: False + type: bool +extends_documentation_fragment: +- community.general.proxysql.managing_config +- community.general.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a schedule, it saves the scheduler config to disk, but +# avoids loading the scheduler config to runtime (this might be because +# several servers are being added and the user wants to push the config to +# runtime in a single batch using the M(proxysql_manage_config) module). It +# uses supplied credentials to connect to the proxysql admin interface. + +- proxysql_scheduler: + login_user: 'admin' + login_password: 'admin' + interval_ms: 1000 + filename: "/opt/maintenance.py" + state: present + load_to_runtime: False + +# This example removes a schedule, saves the scheduler config to disk, and +# dynamically loads the scheduler config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- proxysql_scheduler: + config_file: '~/proxysql.cnf' + filename: "/opt/old_script.py" + state: absent +''' + +RETURN = ''' +stdout: + description: The schedule modified or removed from proxysql + returned: On create/update will return the newly modified schedule, on + delete it will return the deleted record. + type: dict + "sample": { + "changed": true, + "filename": "/opt/test.py", + "msg": "Added schedule to scheduler", + "schedules": [ + { + "active": "1", + "arg1": null, + "arg2": null, + "arg3": null, + "arg4": null, + "arg5": null, + "comment": "", + "filename": "/opt/test.py", + "id": "1", + "interval_ms": "10000" + } + ], + "state": "present" + } +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["interval_ms"] < 100 \ + or module.params["interval_ms"] > 100000000: + module.fail_json( + msg="interval_ms must between 100ms & 100000000ms" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE SCHEDULER TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD SCHEDULER TO RUNTIME") + return True + + +class ProxySQLSchedule(object): + + def __init__(self, module): + self.state = module.params["state"] + self.force_delete = module.params["force_delete"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + self.active = module.params["active"] + self.interval_ms = module.params["interval_ms"] + self.filename = module.params["filename"] + + config_data_keys = ["arg1", + "arg2", + "arg3", + "arg4", + "arg5", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_schedule_config(self, cursor): + query_string = \ + """SELECT count(*) AS `schedule_count` + FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return int(check_count['schedule_count']) + + def get_schedule_config(self, cursor): + query_string = \ + """SELECT * + FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + schedule = cursor.fetchall() + return schedule + + def create_schedule_config(self, cursor): + query_string = \ + """INSERT INTO scheduler ( + active, + interval_ms, + filename""" + + cols = 0 + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (%s, %s, %s" + + ", %s" * cols + + ")") + + cursor.execute(query_string, query_data) + return True + + def delete_schedule_config(self, cursor): + query_string = \ + """DELETE FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.rowcount + return True, int(check_count) + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_schedule(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_schedule_config(cursor) + result['msg'] = "Added schedule to scheduler" + result['schedules'] = \ + self.get_schedule_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Schedule would have been added to" + + " scheduler, however check_mode" + + " is enabled.") + + def delete_schedule(self, check_mode, result, cursor): + if not check_mode: + result['schedules'] = \ + self.get_schedule_config(cursor) + result['changed'] = \ + self.delete_schedule_config(cursor) + result['msg'] = "Deleted schedule from scheduler" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Schedule would have been deleted from" + + " scheduler, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + active=dict(default=True, type='bool'), + interval_ms=dict(default=10000, type='int'), + filename=dict(required=True, type='str'), + arg1=dict(type='str'), + arg2=dict(type='str'), + arg3=dict(type='str'), + arg4=dict(type='str'), + arg5=dict(type='str'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + force_delete=dict(default=False, type='bool'), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_schedule = ProxySQLSchedule(module) + result = {} + + result['state'] = proxysql_schedule.state + result['filename'] = proxysql_schedule.filename + + if proxysql_schedule.state == "present": + try: + if proxysql_schedule.check_schedule_config(cursor) <= 0: + proxysql_schedule.create_schedule(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The schedule already exists and doesn't" + + " need to be updated.") + result['schedules'] = \ + proxysql_schedule.get_schedule_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify schedule.. %s" % to_native(e) + ) + + elif proxysql_schedule.state == "absent": + try: + existing_schedules = \ + proxysql_schedule.check_schedule_config(cursor) + if existing_schedules > 0: + if existing_schedules == 1 or proxysql_schedule.force_delete: + proxysql_schedule.delete_schedule(module.check_mode, + result, + cursor) + else: + module.fail_json( + msg=("Operation would delete multiple records" + + " use force_delete to override this") + ) + else: + result['changed'] = False + result['msg'] = ("The schedule is already absent from the" + + " memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove schedule.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/vertica/vertica_configuration.py b/plugins/modules/database/vertica/vertica_configuration.py new file mode 100644 index 0000000000..5e3ee45765 --- /dev/null +++ b/plugins/modules/database/vertica/vertica_configuration.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: vertica_configuration +short_description: Updates Vertica configuration parameters. +description: + - Updates Vertica configuration parameters. +options: + name: + description: + - Name of the parameter to update. + required: true + aliases: [parameter] + value: + description: + - Value of the parameter to be set. + required: true + db: + description: + - Name of the Vertica database. + cluster: + description: + - Name of the Vertica cluster. + default: localhost + port: + description: + - Vertica cluster port to connect to. + default: 5433 + login_user: + description: + - The username used to authenticate with. + default: dbadmin + login_password: + description: + - The password used to authenticate with. +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: updating load_balance_policy + vertica_configuration: name=failovertostandbyafter value='8 hours' +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_configuration_facts(cursor, parameter_name=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter_name, parameter_name) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + + +def check(configuration_facts, parameter_name, current_value): + parameter_key = parameter_name.lower() + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + return False + return True + + +def present(configuration_facts, cursor, parameter_name, current_value): + parameter_key = parameter_name.lower() + changed = False + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) + changed = True + if changed: + configuration_facts.update(get_configuration_facts(cursor, parameter_name)) + return changed + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + parameter=dict(required=True, aliases=['name']), + value=dict(default=None), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None, no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + parameter_name = module.params['parameter'] + current_value = module.params['value'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)), + exception=traceback.format_exc()) + + try: + configuration_facts = get_configuration_facts(cursor) + if module.check_mode: + changed = not check(configuration_facts, parameter_name, current_value) + else: + try: + changed = present(configuration_facts, cursor, parameter_name, current_value) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/vertica/vertica_facts.py b/plugins/modules/database/vertica/vertica_facts.py new file mode 120000 index 0000000000..bf964af0ae --- /dev/null +++ b/plugins/modules/database/vertica/vertica_facts.py @@ -0,0 +1 @@ +vertica_info.py \ No newline at end of file diff --git a/plugins/modules/database/vertica/vertica_info.py b/plugins/modules/database/vertica/vertica_info.py new file mode 100644 index 0000000000..bdb541ccd3 --- /dev/null +++ b/plugins/modules/database/vertica/vertica_info.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: vertica_info +short_description: Gathers Vertica database facts. +description: + - Gathers Vertica database information. + - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(vertica_info) module no longer returns C(ansible_facts)! +options: + cluster: + description: + - Name of the cluster running the schema. + default: localhost + port: + description: + Database port to connect to. + default: 5433 + db: + description: + - Name of the database running the schema. + login_user: + description: + - The username used to authenticate with. + default: dbadmin + login_password: + description: + - The password used to authenticate with. +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: gathering vertica facts + vertica_info: db=db_name + register: result + +- name: Print schemas + debug: + msg: "{{ result.vertica_schemas }}" +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class NotSupportedError(Exception): + pass + +# module specific functions + + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee = r.name and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + + +def get_configuration_facts(cursor, parameter=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter, parameter) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + + +def get_node_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select node_name, node_address, export_address, node_state, node_type, + catalog_path + from nodes + """) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.node_address] = { + 'node_name': row.node_name, + 'export_address': row.export_address, + 'node_state': row.node_state, + 'node_type': row.node_type, + 'catalog_path': row.catalog_path} + return facts + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cluster=dict(default='localhost'), + port=dict(default='5433'), + db=dict(default=None), + login_user=dict(default='dbadmin'), + login_password=dict(default=None, no_log=True), + ), supports_check_mode=True) + is_old_facts = module._name == 'vertica_facts' + if is_old_facts: + module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + db = '' + if module.params['db']: + db = module.params['db'] + + try: + dsn = ( + "Driver=Vertica;" + "Server=%s;" + "Port=%s;" + "Database=%s;" + "User=%s;" + "Password=%s;" + "ConnectionLoadBalance=%s" + ) % (module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc()) + + try: + schema_facts = get_schema_facts(cursor) + user_facts = get_user_facts(cursor) + role_facts = get_role_facts(cursor) + configuration_facts = get_configuration_facts(cursor) + node_facts = get_node_facts(cursor) + + if is_old_facts: + module.exit_json(changed=False, + ansible_facts={'vertica_schemas': schema_facts, + 'vertica_users': user_facts, + 'vertica_roles': role_facts, + 'vertica_configuration': configuration_facts, + 'vertica_nodes': node_facts}) + else: + module.exit_json(changed=False, + vertica_schemas=schema_facts, + vertica_users=user_facts, + vertica_roles=role_facts, + vertica_configuration=configuration_facts, + vertica_nodes=node_facts) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/vertica/vertica_role.py b/plugins/modules/database/vertica/vertica_role.py new file mode 100644 index 0000000000..c4e8f563f8 --- /dev/null +++ b/plugins/modules/database/vertica/vertica_role.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: vertica_role +short_description: Adds or removes Vertica database roles and assigns roles to them. +description: + - Adds or removes Vertica database role and, optionally, assign other roles. +options: + name: + description: + - Name of the role to add or remove. + required: true + assigned_roles: + description: + - Comma separated list of roles to assign to the role. + aliases: ['assigned_role'] + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a role. + choices: ['present', 'absent'] + default: present + db: + description: + - Name of the Vertica database. + cluster: + description: + - Name of the Vertica cluster. + default: localhost + port: + description: + - Vertica cluster port to connect to. + default: 5433 + login_user: + description: + - The username used to authenticate with. + default: dbadmin + login_password: + description: + - The password used to authenticate with. +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: creating a new vertica role + vertica_role: name=role_name db=db_name state=present + +- name: creating a new vertica role with other role assigned + vertica_role: name=role_name assigned_role=other_role_name state=present +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + + +def update_roles(role_facts, cursor, role, + existing, required): + for assigned_role in set(existing) - set(required): + cursor.execute("revoke {0} from {1}".format(assigned_role, role)) + for assigned_role in set(required) - set(existing): + cursor.execute("grant {0} to {1}".format(assigned_role, role)) + + +def check(role_facts, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + return False + if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']): + return False + return True + + +def present(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + cursor.execute("create role {0}".format(role)) + update_roles(role_facts, cursor, role, [], assigned_roles) + role_facts.update(get_role_facts(cursor, role)) + return True + else: + changed = False + if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])): + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], assigned_roles) + changed = True + if changed: + role_facts.update(get_role_facts(cursor, role)) + return changed + + +def absent(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key in role_facts: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], []) + cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) + del role_facts[role_key] + return True + else: + return False + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + role=dict(required=True, aliases=['name']), + assigned_roles=dict(default=None, aliases=['assigned_role']), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None, no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + role = module.params['role'] + assigned_roles = [] + if module.params['assigned_roles']: + assigned_roles = module.params['assigned_roles'].split(',') + assigned_roles = filter(None, assigned_roles) + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) + + try: + role_facts = get_role_facts(cursor) + if module.check_mode: + changed = not check(role_facts, role, assigned_roles) + elif state == 'absent': + try: + changed = absent(role_facts, cursor, role, assigned_roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + elif state == 'present': + try: + changed = present(role_facts, cursor, role, assigned_roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/vertica/vertica_schema.py b/plugins/modules/database/vertica/vertica_schema.py new file mode 100644 index 0000000000..4840cabc05 --- /dev/null +++ b/plugins/modules/database/vertica/vertica_schema.py @@ -0,0 +1,313 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: vertica_schema +short_description: Adds or removes Vertica database schema and roles. +description: + - Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + - A schema will not be removed until all the objects have been dropped. + - In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. +options: + name: + description: + - Name of the schema to add or remove. + required: true + usage_roles: + description: + - Comma separated list of roles to create and grant usage access to the schema. + aliases: ['usage_role'] + create_roles: + description: + - Comma separated list of roles to create and grant usage and create access to the schema. + aliases: ['create_role'] + owner: + description: + - Name of the user to set as owner of the schema. + state: + description: + - Whether to create C(present), or drop C(absent) a schema. + default: present + choices: ['present', 'absent'] + db: + description: + - Name of the Vertica database. + cluster: + description: + - Name of the Vertica cluster. + default: localhost + port: + description: + - Vertica cluster port to connect to. + default: 5433 + login_user: + description: + - The username used to authenticate with. + default: dbadmin + login_password: + description: + - The password used to authenticate with. +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: creating a new vertica schema + vertica_schema: name=schema_name db=db_name state=present + +- name: creating a new schema with specific schema owner + vertica_schema: name=schema_name owner=dbowner db=db_name state=present + +- name: creating a new schema with roles + vertica_schema: + name=schema_name + create_roles=schema_name_all + usage_roles=schema_name_ro,schema_name_rw + db=db_name + state=present +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public', 'TxtIndex') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee_id = r.role_id and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + + +def update_roles(schema_facts, cursor, schema, + existing, required, + create_existing, create_required): + for role in set(existing + create_existing) - set(required + create_required): + cursor.execute("drop role {0} cascade".format(role)) + for role in set(create_existing) - set(create_required): + cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) + for role in set(required + create_required) - set(existing + create_existing): + cursor.execute("create role {0}".format(role)) + cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) + for role in set(create_required) - set(create_existing): + cursor.execute("grant create on schema {0} to {1}".format(schema, role)) + + +def check(schema_facts, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + return False + if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): + return False + if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']): + return False + if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): + return False + return True + + +def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + query_fragments = ["create schema {0}".format(schema)] + if owner: + query_fragments.append("authorization {0}".format(owner)) + cursor.execute(' '.join(query_fragments)) + update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) + schema_facts.update(get_schema_facts(cursor, schema)) + return True + else: + changed = False + if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): + raise NotSupportedError(( + "Changing schema owner is not supported. " + "Current owner: {0}." + ).format(schema_facts[schema_key]['owner'])) + if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \ + sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): + + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], usage_roles, + schema_facts[schema_key]['create_roles'], create_roles) + changed = True + if changed: + schema_facts.update(get_schema_facts(cursor, schema)) + return changed + + +def absent(schema_facts, cursor, schema, usage_roles, create_roles): + schema_key = schema.lower() + if schema_key in schema_facts: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) + try: + cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping schema failed due to dependencies.") + del schema_facts[schema_key] + return True + else: + return False + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + schema=dict(required=True, aliases=['name']), + usage_roles=dict(default=None, aliases=['usage_role']), + create_roles=dict(default=None, aliases=['create_role']), + owner=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None, no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + schema = module.params['schema'] + usage_roles = [] + if module.params['usage_roles']: + usage_roles = module.params['usage_roles'].split(',') + usage_roles = filter(None, usage_roles) + create_roles = [] + if module.params['create_roles']: + create_roles = module.params['create_roles'].split(',') + create_roles = filter(None, create_roles) + owner = module.params['owner'] + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) + + try: + schema_facts = get_schema_facts(cursor) + if module.check_mode: + changed = not check(schema_facts, schema, usage_roles, create_roles, owner) + elif state == 'absent': + try: + changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + elif state == 'present': + try: + changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/vertica/vertica_user.py b/plugins/modules/database/vertica/vertica_user.py new file mode 100644 index 0000000000..fc0148709e --- /dev/null +++ b/plugins/modules/database/vertica/vertica_user.py @@ -0,0 +1,378 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: vertica_user +short_description: Adds or removes Vertica database users and assigns roles. +description: + - Adds or removes Vertica database user and, optionally, assigns roles. + - A user will not be removed until all the dependencies have been dropped. + - In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. +options: + name: + description: + - Name of the user to add or remove. + required: true + profile: + description: + - Sets the user's profile. + resource_pool: + description: + - Sets the user's resource pool. + password: + description: + - The user's password encrypted by the MD5 algorithm. + - The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). + expired: + description: + - Sets the user's password expiration. + type: bool + ldap: + description: + - Set to true if users are authenticated via LDAP. + - The user will be created with password expired and set to I($ldap$). + type: bool + roles: + description: + - Comma separated list of roles to assign to the user. + aliases: ['role'] + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a user. + choices: ['present', 'absent', 'locked'] + default: present + db: + description: + - Name of the Vertica database. + cluster: + description: + - Name of the Vertica cluster. + default: localhost + port: + description: + - Vertica cluster port to connect to. + default: 5433 + login_user: + description: + - The username used to authenticate with. + default: dbadmin + login_password: + description: + - The password used to authenticate with. +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: creating a new vertica user with password + vertica_user: name=user_name password=md5 db=db_name state=present + +- name: creating a new vertica user authenticated via ldap with roles assigned + vertica_user: + name=user_name + ldap=true + db=db_name + roles=schema_name_ro + state=present +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + + +def update_roles(user_facts, cursor, user, + existing_all, existing_default, required): + del_roles = list(set(existing_all) - set(required)) + if del_roles: + cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) + new_roles = list(set(required) - set(existing_all)) + if new_roles: + cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) + if required: + cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) + + +def check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + return False + if profile and profile != user_facts[user_key]['profile']: + return False + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + return False + if locked != (user_facts[user_key]['locked'] == 'True'): + return False + if password and password != user_facts[user_key]['password']: + return False + if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or + ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')): + return False + if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or + sorted(roles) != sorted(user_facts[user_key]['default_roles'])): + return False + return True + + +def present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + query_fragments = ["create user {0}".format(user)] + if locked: + query_fragments.append("account lock") + if password or ldap: + if password: + query_fragments.append("identified by '{0}'".format(password)) + else: + query_fragments.append("identified by '$ldap$'") + if expired or ldap: + query_fragments.append("password expire") + if profile: + query_fragments.append("profile {0}".format(profile)) + if resource_pool: + query_fragments.append("resource pool {0}".format(resource_pool)) + cursor.execute(' '.join(query_fragments)) + if resource_pool and resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + update_roles(user_facts, cursor, user, [], [], roles) + user_facts.update(get_user_facts(cursor, user)) + return True + else: + changed = False + query_fragments = ["alter user {0}".format(user)] + if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): + if locked: + state = 'lock' + else: + state = 'unlock' + query_fragments.append("account {0}".format(state)) + changed = True + if password and password != user_facts[user_key]['password']: + query_fragments.append("identified by '{0}'".format(password)) + changed = True + if ldap: + if ldap != (user_facts[user_key]['expired'] == 'True'): + query_fragments.append("password expire") + changed = True + elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): + if expired: + query_fragments.append("password expire") + changed = True + else: + raise NotSupportedError("Unexpiring user password is not supported.") + if profile and profile != user_facts[user_key]['profile']: + query_fragments.append("profile {0}".format(profile)) + changed = True + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + query_fragments.append("resource pool {0}".format(resource_pool)) + if user_facts[user_key]['resource_pool'] != 'general': + cursor.execute("revoke usage on resource pool {0} from {1}".format( + user_facts[user_key]['resource_pool'], user)) + if resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + changed = True + if changed: + cursor.execute(' '.join(query_fragments)) + if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or + sorted(roles) != sorted(user_facts[user_key]['default_roles'])): + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) + changed = True + if changed: + user_facts.update(get_user_facts(cursor, user)) + return changed + + +def absent(user_facts, cursor, user, roles): + user_key = user.lower() + if user_key in user_facts: + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) + try: + cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping user failed due to dependencies.") + del user_facts[user_key] + return True + else: + return False + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True, aliases=['name']), + profile=dict(default=None), + resource_pool=dict(default=None), + password=dict(default=None, no_log=True), + expired=dict(type='bool', default=None), + ldap=dict(type='bool', default=None), + roles=dict(default=None, aliases=['role']), + state=dict(default='present', choices=['absent', 'present', 'locked']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None, no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + user = module.params['user'] + profile = module.params['profile'] + if profile: + profile = profile.lower() + resource_pool = module.params['resource_pool'] + if resource_pool: + resource_pool = resource_pool.lower() + password = module.params['password'] + expired = module.params['expired'] + ldap = module.params['ldap'] + roles = [] + if module.params['roles']: + roles = module.params['roles'].split(',') + roles = filter(None, roles) + state = module.params['state'] + if state == 'locked': + locked = True + else: + locked = False + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + user_facts = get_user_facts(cursor) + if module.check_mode: + changed = not check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles) + elif state == 'absent': + try: + changed = absent(user_facts, cursor, user, roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + elif state in ['present', 'locked']: + try: + changed = present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py new file mode 100644 index 0000000000..f1cc678270 --- /dev/null +++ b/plugins/modules/files/archive.py @@ -0,0 +1,576 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ben Doherty +# Sponsored by Oomph, Inc. http://www.oomphinc.com +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: archive +short_description: Creates a compressed archive of one or more files or trees +extends_documentation_fragment: files +description: + - Creates or extends an archive. + - The source and archive are on the remote host, and the archive I(is not) copied to the local host. + - Source files can be deleted after archival by specifying I(remove=True). +options: + path: + description: + - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. + type: list + required: true + format: + description: + - The type of compression to use. + - Support for xz was added in Ansible 2.5. + type: str + choices: [ bz2, gz, tar, xz, zip ] + default: gz + dest: + description: + - The file name of the destination archive. The parent directory must exists on the remote host. + - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. + type: path + exclude_path: + description: + - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from the archive. + type: list + force_archive: + description: + - Allow you to force the module to treat this as an archive even if only a single file is specified. + - By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived). + type: bool + default: false + remove: + description: + - Remove any added source files and trees after adding to archive. + type: bool + default: no +notes: + - Requires tarfile, zipfile, gzip and bzip2 packages on target host. + - Requires lzma or backports.lzma if using xz format. + - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives. +seealso: +- module: unarchive +author: +- Ben Doherty (@bendoh) +''' + +EXAMPLES = r''' +- name: Compress directory /path/to/foo/ into /path/to/foo.tgz + archive: + path: /path/to/foo + dest: /path/to/foo.tgz + +- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it + archive: + path: /path/to/foo + remove: yes + +- name: Create a zip archive of /path/to/foo + archive: + path: /path/to/foo + format: zip + +- name: Create a bz2 archive of multiple files, rooted at /path + archive: + path: + - /path/to/foo + - /path/wong/foo + dest: /path/file.tar.bz2 + format: bz2 + +- name: Create a bz2 archive of a globbed path, while excluding specific dirnames + archive: + path: + - /path/to/foo/* + dest: /path/file.tar.bz2 + exclude_path: + - /path/to/foo/bar + - /path/to/foo/baz + format: bz2 + +- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames + archive: + path: + - /path/to/foo/* + dest: /path/file.tar.bz2 + exclude_path: + - /path/to/foo/ba* + format: bz2 + +- name: Use gzip to compress a single archive (i.e don't archive it first with tar) + archive: + path: /path/to/foo/single.file + dest: /path/file.gz + format: gz + +- name: Create a tar.gz archive of a single file. + archive: + path: /path/to/foo/single.file + dest: /path/file.tar.gz + format: gz + force_archive: true +''' + +RETURN = r''' +state: + description: + The current state of the archived file. + If 'absent', then no source files were found and the archive does not exist. + If 'compress', then the file source file is in the compressed state. + If 'archive', then the source file or paths are currently archived. + If 'incomplete', then an archive was created, but not all source paths were found. + type: str + returned: always +missing: + description: Any files that were missing from the source. + type: list + returned: success +archived: + description: Any files that were compressed or added to the archive. + type: list + returned: success +arcroot: + description: The archive root. + type: str + returned: always +expanded_paths: + description: The list of matching paths from paths argument. + type: list + returned: always +expanded_exclude_paths: + description: The list of matching exclude paths from the exclude_path argument. + type: list + returned: always +''' + +import bz2 +import filecmp +import glob +import gzip +import io +import os +import re +import shutil +import tarfile +import zipfile +from traceback import format_exc + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.six import PY3 + + +LZMA_IMP_ERR = None +if PY3: + try: + import lzma + HAS_LZMA = True + except ImportError: + LZMA_IMP_ERR = format_exc() + HAS_LZMA = False +else: + try: + from backports import lzma + HAS_LZMA = True + except ImportError: + LZMA_IMP_ERR = format_exc() + HAS_LZMA = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='list', required=True), + format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), + dest=dict(type='path'), + exclude_path=dict(type='list'), + force_archive=dict(type='bool', default=False), + remove=dict(type='bool', default=False), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + params = module.params + check_mode = module.check_mode + paths = params['path'] + dest = params['dest'] + b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict') + exclude_paths = params['exclude_path'] + remove = params['remove'] + + b_expanded_paths = [] + b_expanded_exclude_paths = [] + fmt = params['format'] + b_fmt = to_bytes(fmt, errors='surrogate_or_strict') + force_archive = params['force_archive'] + globby = False + changed = False + state = 'absent' + + # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) + archive = False + b_successes = [] + + # Fail early + if not HAS_LZMA and fmt == 'xz': + module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), + exception=LZMA_IMP_ERR) + module.fail_json(msg="lzma or backports.lzma is required when using xz format.") + + for path in paths: + b_path = os.path.expanduser( + os.path.expandvars( + to_bytes(path, errors='surrogate_or_strict') + ) + ) + + # Expand any glob characters. If found, add the expanded glob to the + # list of expanded_paths, which might be empty. + if (b'*' in b_path or b'?' in b_path): + b_expanded_paths.extend(glob.glob(b_path)) + globby = True + + # If there are no glob characters the path is added to the expanded paths + # whether the path exists or not + else: + b_expanded_paths.append(b_path) + + # Only attempt to expand the exclude paths if it exists + if exclude_paths: + for exclude_path in exclude_paths: + b_exclude_path = os.path.expanduser( + os.path.expandvars( + to_bytes(exclude_path, errors='surrogate_or_strict') + ) + ) + + # Expand any glob characters. If found, add the expanded glob to the + # list of expanded_paths, which might be empty. + if (b'*' in b_exclude_path or b'?' in b_exclude_path): + b_expanded_exclude_paths.extend(glob.glob(b_exclude_path)) + + # If there are no glob character the exclude path is added to the expanded + # exclude paths whether the path exists or not. + else: + b_expanded_exclude_paths.append(b_exclude_path) + + if not b_expanded_paths: + return module.fail_json( + path=', '.join(paths), + expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'), + msg='Error, no source paths were found' + ) + + # Only try to determine if we are working with an archive or not if we haven't set archive to true + if not force_archive: + # If we actually matched multiple files or TRIED to, then + # treat this as a multi-file archive + archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1 + else: + archive = True + + # Default created file name (for single-file archives) to + # . + if not b_dest and not archive: + b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt) + + # Force archives to specify 'dest' + if archive and not b_dest: + module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') + + b_sep = to_bytes(os.sep, errors='surrogate_or_strict') + + b_archive_paths = [] + b_missing = [] + b_arcroot = b'' + + for b_path in b_expanded_paths: + # Use the longest common directory name among all the files + # as the archive root path + if b_arcroot == b'': + b_arcroot = os.path.dirname(b_path) + b_sep + else: + for i in range(len(b_arcroot)): + if b_path[i] != b_arcroot[i]: + break + + if i < len(b_arcroot): + b_arcroot = os.path.dirname(b_arcroot[0:i + 1]) + + b_arcroot += b_sep + + # Don't allow archives to be created anywhere within paths to be removed + if remove and os.path.isdir(b_path): + b_path_dir = b_path + if not b_path.endswith(b'/'): + b_path_dir += b'/' + + if b_dest.startswith(b_path_dir): + module.fail_json( + path=', '.join(paths), + msg='Error, created archive can not be contained in source paths when remove=True' + ) + + if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths: + b_archive_paths.append(b_path) + else: + b_missing.append(b_path) + + # No source files were found but the named archive exists: are we 'compress' or 'archive' now? + if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest): + # Just check the filename to know if it's an archive or simple compressed file + if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE): + state = 'archive' + else: + state = 'compress' + + # Multiple files, or globbiness + elif archive: + if not b_archive_paths: + # No source files were found, but the archive is there. + if os.path.lexists(b_dest): + state = 'archive' + elif b_missing: + # SOME source files were found, but not all of them + state = 'incomplete' + + archive = None + size = 0 + errors = [] + + if os.path.lexists(b_dest): + size = os.path.getsize(b_dest) + + if state != 'archive': + if check_mode: + changed = True + + else: + try: + # Slightly more difficult (and less efficient!) compression using zipfile module + if fmt == 'zip': + arcfile = zipfile.ZipFile( + to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), + 'w', + zipfile.ZIP_DEFLATED, + True + ) + + # Easier compression using tarfile module + elif fmt == 'gz' or fmt == 'bz2': + arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt) + + # python3 tarfile module allows xz format but for python2 we have to create the tarfile + # in memory and then compress it with lzma. + elif fmt == 'xz': + arcfileIO = io.BytesIO() + arcfile = tarfile.open(fileobj=arcfileIO, mode='w') + + # Or plain tar archiving + elif fmt == 'tar': + arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w') + + b_match_root = re.compile(br'^%s' % re.escape(b_arcroot)) + for b_path in b_archive_paths: + if os.path.isdir(b_path): + # Recurse into directories + for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True): + if not b_dirpath.endswith(b_sep): + b_dirpath += b_sep + + for b_dirname in b_dirnames: + b_fullpath = b_dirpath + b_dirname + n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii') + n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') + + try: + if fmt == 'zip': + arcfile.write(n_fullpath, n_arcname) + else: + arcfile.add(n_fullpath, n_arcname, recursive=False) + + except Exception as e: + errors.append('%s: %s' % (n_fullpath, to_native(e))) + + for b_filename in b_filenames: + b_fullpath = b_dirpath + b_filename + n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii') + n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') + + try: + if fmt == 'zip': + arcfile.write(n_fullpath, n_arcname) + else: + arcfile.add(n_fullpath, n_arcname, recursive=False) + + b_successes.append(b_fullpath) + except Exception as e: + errors.append('Adding %s: %s' % (to_native(b_path), to_native(e))) + else: + path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii') + arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict') + if fmt == 'zip': + arcfile.write(path, arcname) + else: + arcfile.add(path, arcname, recursive=False) + + b_successes.append(b_path) + + except Exception as e: + expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt) + module.fail_json( + msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)), + exception=format_exc() + ) + + if arcfile: + arcfile.close() + state = 'archive' + + if fmt == 'xz': + with lzma.open(b_dest, 'wb') as f: + f.write(arcfileIO.getvalue()) + arcfileIO.close() + + if errors: + module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors))) + + if state in ['archive', 'incomplete'] and remove: + for b_path in b_successes: + try: + if os.path.isdir(b_path): + shutil.rmtree(b_path) + elif not check_mode: + os.remove(b_path) + except OSError as e: + errors.append(to_native(b_path)) + + if errors: + module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors) + + # Rudimentary check: If size changed then file changed. Not perfect, but easy. + if not check_mode and os.path.getsize(b_dest) != size: + changed = True + + if b_successes and state != 'incomplete': + state = 'archive' + + # Simple, single-file compression + else: + b_path = b_expanded_paths[0] + + # No source or compressed file + if not (os.path.exists(b_path) or os.path.lexists(b_dest)): + state = 'absent' + + # if it already exists and the source file isn't there, consider this done + elif not os.path.lexists(b_path) and os.path.lexists(b_dest): + state = 'compress' + + else: + if module.check_mode: + if not os.path.exists(b_dest): + changed = True + else: + size = 0 + f_in = f_out = arcfile = None + + if os.path.lexists(b_dest): + size = os.path.getsize(b_dest) + + try: + if fmt == 'zip': + arcfile = zipfile.ZipFile( + to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), + 'w', + zipfile.ZIP_DEFLATED, + True + ) + arcfile.write( + to_native(b_path, errors='surrogate_or_strict', encoding='ascii'), + to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict') + ) + arcfile.close() + state = 'archive' # because all zip files are archives + elif fmt == 'tar': + arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w') + arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii')) + arcfile.close() + else: + f_in = open(b_path, 'rb') + + n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii') + if fmt == 'gz': + f_out = gzip.open(n_dest, 'wb') + elif fmt == 'bz2': + f_out = bz2.BZ2File(n_dest, 'wb') + elif fmt == 'xz': + f_out = lzma.LZMAFile(n_dest, 'wb') + else: + raise OSError("Invalid format") + + shutil.copyfileobj(f_in, f_out) + + b_successes.append(b_path) + + except OSError as e: + module.fail_json( + path=to_native(b_path), + dest=dest, + msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc() + ) + + if arcfile: + arcfile.close() + if f_in: + f_in.close() + if f_out: + f_out.close() + + # Rudimentary check: If size changed then file changed. Not perfect, but easy. + if os.path.getsize(b_dest) != size: + changed = True + + state = 'compress' + + if remove and not check_mode: + try: + os.remove(b_path) + + except OSError as e: + module.fail_json( + path=to_native(b_path), + msg='Unable to remove source file: %s' % to_native(e), exception=format_exc() + ) + + file_args = module.load_file_common_arguments(params, path=b_dest) + + if not check_mode: + changed = module.set_fs_attributes_if_different(file_args, changed) + + module.exit_json( + archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes], + dest=dest, + changed=changed, + state=state, + arcroot=to_native(b_arcroot, errors='surrogate_or_strict'), + missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing], + expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths], + expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths], + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py new file mode 100644 index 0000000000..e092fcbd7d --- /dev/null +++ b/plugins/modules/files/ini_file.py @@ -0,0 +1,338 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Jan-Piet Mens +# Copyright: (c) 2015, Ales Nosek +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ini_file +short_description: Tweak settings in INI files +extends_documentation_fragment: files +description: + - Manage (add, remove, change) individual settings in an INI-style file without having + to manage the file as a whole with, say, M(template) or M(assemble). + - Adds missing sections if they don't exist. + - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. + - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when + no other modifications need to be applied. +options: + path: + description: + - Path to the INI-style file; this file is created if required. + - Before Ansible 2.3 this option was only usable as I(dest). + type: path + required: true + aliases: [ dest ] + section: + description: + - Section name in INI file. This is added if C(state=present) automatically when + a single value is being set. + - If left empty or set to C(null), the I(option) will be placed before the first I(section). + - Using C(null) is also required if the config format does not support sections. + type: str + required: true + option: + description: + - If set (required for changing a I(value)), this is the name of the option. + - May be omitted if adding/removing a whole I(section). + type: str + value: + description: + - The string value to be associated with an I(option). + - May be omitted when removing an I(option). + type: str + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: no + state: + description: + - If set to C(absent) the option or section will be removed if present instead of created. + type: str + choices: [ absent, present ] + default: present + no_extra_spaces: + description: + - Do not insert spaces before and after '=' symbol. + type: bool + default: no + create: + description: + - If set to C(no), the module will fail if the file does not already exist. + - By default it will create the file if it is missing. + type: bool + default: yes + allow_no_value: + description: + - Allow option without value and without '=' symbol. + type: bool + default: no +notes: + - While it is possible to add an I(option) without specifying a I(value), this makes no sense. + - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. +author: + - Jan-Piet Mens (@jpmens) + - Ales Nosek (@noseka1) +''' + +EXAMPLES = r''' +# Before Ansible 2.3, option 'dest' was used instead of 'path' +- name: Ensure "fav=lemonade is in section "[drinks]" in specified file + ini_file: + path: /etc/conf + section: drinks + option: fav + value: lemonade + mode: '0600' + backup: yes + +- name: Ensure "temperature=cold is in section "[drinks]" in specified file + ini_file: + path: /etc/anotherconf + section: drinks + option: temperature + value: cold + backup: yes +''' + +import os +import re +import tempfile +import traceback + +from ansible.module_utils.basic import AnsibleModule + + +def match_opt(option, line): + option = re.escape(option) + return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \ + or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \ + or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line) + + +def match_active_opt(option, line): + option = re.escape(option) + return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) + + +def do_ini(module, filename, section=None, option=None, value=None, + state='present', backup=False, no_extra_spaces=False, create=True, + allow_no_value=False): + + diff = dict( + before='', + after='', + before_header='%s (content)' % filename, + after_header='%s (content)' % filename, + ) + + if not os.path.exists(filename): + if not create: + module.fail_json(rc=257, msg='Destination %s does not exist !' % filename) + destpath = os.path.dirname(filename) + if not os.path.exists(destpath) and not module.check_mode: + os.makedirs(destpath) + ini_lines = [] + else: + ini_file = open(filename, 'r') + try: + ini_lines = ini_file.readlines() + finally: + ini_file.close() + + if module._diff: + diff['before'] = ''.join(ini_lines) + + changed = False + + # ini file could be empty + if not ini_lines: + ini_lines.append('\n') + + # last line of file may not contain a trailing newline + if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': + ini_lines[-1] += '\n' + changed = True + + # append fake section lines to simplify the logic + # At top: + # Fake random section to do not match any other in the file + # Using commit hash as fake section name + fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5" + + # Insert it at the beginning + ini_lines.insert(0, '[%s]' % fake_section_name) + + # At botton: + ini_lines.append('[') + + # If no section is defined, fake section is used + if not section: + section = fake_section_name + + within_section = not section + section_start = 0 + msg = 'OK' + if no_extra_spaces: + assignment_format = '%s=%s\n' + else: + assignment_format = '%s = %s\n' + + for index, line in enumerate(ini_lines): + if line.startswith('[%s]' % section): + within_section = True + section_start = index + elif line.startswith('['): + if within_section: + if state == 'present': + # insert missing option line at the end of the section + for i in range(index, 0, -1): + # search backwards for previous non-blank or non-comment line + if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]): + if not value and allow_no_value: + ini_lines.insert(i, '%s\n' % option) + else: + ini_lines.insert(i, assignment_format % (option, value)) + msg = 'option added' + changed = True + break + elif state == 'absent' and not option: + # remove the entire section + del ini_lines[section_start:index] + msg = 'section removed' + changed = True + break + else: + if within_section and option: + if state == 'present': + # change the existing option line + if match_opt(option, line): + if not value and allow_no_value: + newline = '%s\n' % option + else: + newline = assignment_format % (option, value) + option_changed = ini_lines[index] != newline + changed = changed or option_changed + if option_changed: + msg = 'option changed' + ini_lines[index] = newline + if option_changed: + # remove all possible option occurrences from the rest of the section + index = index + 1 + while index < len(ini_lines): + line = ini_lines[index] + if line.startswith('['): + break + if match_active_opt(option, line): + del ini_lines[index] + else: + index = index + 1 + break + elif state == 'absent': + # delete the existing line + if match_active_opt(option, line): + del ini_lines[index] + changed = True + msg = 'option changed' + break + + # remove the fake section line + del ini_lines[0] + del ini_lines[-1:] + + if not within_section and option and state == 'present': + ini_lines.append('[%s]\n' % section) + if not value and allow_no_value: + ini_lines.append('%s\n' % option) + else: + ini_lines.append(assignment_format % (option, value)) + changed = True + msg = 'section and option added' + + if module._diff: + diff['after'] = ''.join(ini_lines) + + backup_file = None + if changed and not module.check_mode: + if backup: + backup_file = module.backup_local(filename) + + try: + tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) + f = os.fdopen(tmpfd, 'w') + f.writelines(ini_lines) + f.close() + except IOError: + module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) + + try: + module.atomic_move(tmpfile, filename) + except IOError: + module.ansible.fail_json(msg='Unable to move temporary \ + file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc()) + + return (changed, backup_file, diff, msg) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['dest']), + section=dict(type='str', required=True), + option=dict(type='str'), + value=dict(type='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + no_extra_spaces=dict(type='bool', default=False), + allow_no_value=dict(type='bool', default=False), + create=dict(type='bool', default=True) + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + path = module.params['path'] + section = module.params['section'] + option = module.params['option'] + value = module.params['value'] + state = module.params['state'] + backup = module.params['backup'] + no_extra_spaces = module.params['no_extra_spaces'] + allow_no_value = module.params['allow_no_value'] + create = module.params['create'] + + (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value) + + if not module.check_mode and os.path.exists(path): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + results = dict( + changed=changed, + diff=diff, + msg=msg, + path=path, + ) + if backup_file is not None: + results['backup_file'] = backup_file + + # Mission complete + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/iso_extract.py b/plugins/modules/files/iso_extract.py new file mode 100644 index 0000000000..18e304e2f5 --- /dev/null +++ b/plugins/modules/files/iso_extract.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Jeroen Hoekx +# Copyright: (c) 2016, Matt Robinson +# Copyright: (c) 2017, Dag Wieers +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +author: +- Jeroen Hoekx (@jhoekx) +- Matt Robinson (@ribbons) +- Dag Wieers (@dagwieers) +module: iso_extract +short_description: Extract files from an ISO image +description: +- This module has two possible ways of operation. +- If 7zip is installed on the system, this module extracts files from an ISO + into a temporary directory and copies files to a given destination, + if needed. +- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module + mounts the ISO image to a temporary location, and copies files to a given + destination, if needed. +requirements: +- Either 7z (from I(7zip) or I(p7zip) package) +- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) +options: + image: + description: + - The ISO image to extract files from. + type: path + required: yes + aliases: [ path, src ] + dest: + description: + - The destination directory to extract files to. + type: path + required: yes + files: + description: + - A list of files to extract from the image. + - Extracting directories does not work. + type: list + required: yes + force: + description: + - If C(yes), which will replace the remote file when contents are different than the source. + - If C(no), the file will only be extracted and copied if the destination does not already exist. + - Alias C(thirsty) has been deprecated and will be removed in 2.13. + type: bool + default: yes + aliases: [ thirsty ] + executable: + description: + - The path to the C(7z) executable to use for extracting files from the ISO. + type: path + default: '7z' +notes: +- Only the file checksum (content) is taken into account when extracting files + from the ISO image. If C(force=no), only checks the presence of the file. +- In Ansible 2.3 this module was using C(mount) and C(umount) commands only, + requiring root access. This is no longer needed with the introduction of 7zip + for extraction. +''' + +EXAMPLES = r''' +- name: Extract kernel and ramdisk from a LiveCD + iso_extract: + image: /tmp/rear-test.iso + dest: /tmp/virt-rear/ + files: + - isolinux/kernel + - isolinux/initrd.cgz +''' + +RETURN = r''' +# +''' + +import os.path +import shutil +import tempfile + +try: # python 3.3+ + from shlex import quote +except ImportError: # older python + from pipes import quote + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + image=dict(type='path', required=True, aliases=['path', 'src']), + dest=dict(type='path', required=True), + files=dict(type='list', required=True), + force=dict(type='bool', default=True, aliases=['thirsty']), + executable=dict(type='path'), # No default on purpose + ), + supports_check_mode=True, + ) + image = module.params['image'] + dest = module.params['dest'] + files = module.params['files'] + force = module.params['force'] + executable = module.params['executable'] + + if module.params.get('thirsty'): + module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead', version='2.13') + + result = dict( + changed=False, + dest=dest, + image=image, + ) + + # We want to know if the user provided it or not, so we set default here + if executable is None: + executable = '7z' + + binary = module.get_bin_path(executable, None) + + # When executable was provided and binary not found, warn user ! + if module.params['executable'] is not None and not binary: + module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable) + + if not os.path.exists(dest): + module.fail_json(msg="Directory '%s' does not exist" % dest) + + if not os.path.exists(os.path.dirname(image)): + module.fail_json(msg="ISO image '%s' does not exist" % image) + + result['files'] = [] + extract_files = list(files) + + if not force: + # Check if we have to process any files based on existence + for f in files: + dest_file = os.path.join(dest, os.path.basename(f)) + if os.path.exists(dest_file): + result['files'].append(dict( + checksum=None, + dest=dest_file, + src=f, + )) + extract_files.remove(f) + + if not extract_files: + module.exit_json(**result) + + tmp_dir = tempfile.mkdtemp() + + # Use 7zip when we have a binary, otherwise try to mount + if binary: + cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files])) + else: + cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir) + + rc, out, err = module.run_command(cmd) + if rc != 0: + result.update(dict( + cmd=cmd, + rc=rc, + stderr=err, + stdout=out, + )) + shutil.rmtree(tmp_dir) + + if binary: + module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result) + else: + module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result) + + try: + for f in extract_files: + tmp_src = os.path.join(tmp_dir, f) + if not os.path.exists(tmp_src): + module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result) + + src_checksum = module.sha1(tmp_src) + + dest_file = os.path.join(dest, os.path.basename(f)) + + if os.path.exists(dest_file): + dest_checksum = module.sha1(dest_file) + else: + dest_checksum = None + + result['files'].append(dict( + checksum=src_checksum, + dest=dest_file, + src=f, + )) + + if src_checksum != dest_checksum: + if not module.check_mode: + shutil.copy(tmp_src, dest_file) + + result['changed'] = True + finally: + if not binary: + module.run_command('umount "%s"' % tmp_dir) + + shutil.rmtree(tmp_dir) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/patch.py b/plugins/modules/files/patch.py new file mode 100644 index 0000000000..fdd046b5c3 --- /dev/null +++ b/plugins/modules/files/patch.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Luis Alberto Perez Lazaro +# Copyright: (c) 2015, Jakub Jirutka +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: patch +author: + - Jakub Jirutka (@jirutka) + - Luis Alberto Perez Lazaro (@luisperlaz) +description: + - Apply patch files using the GNU patch tool. +short_description: Apply patch files using the GNU patch tool +options: + basedir: + description: + - Path of a base directory in which the patch file will be applied. + - May be omitted when C(dest) option is specified, otherwise required. + type: path + dest: + description: + - Path of the file on the remote machine to be patched. + - The names of the files to be patched are usually taken from the patch + file, but if there's just one file to be patched it can specified with + this option. + type: path + aliases: [ originalfile ] + src: + description: + - Path of the patch file as accepted by the GNU patch tool. If + C(remote_src) is 'no', the patch source file is looked up from the + module's I(files) directory. + type: path + required: true + aliases: [ patchfile ] + state: + description: + - Whether the patch should be applied or reverted. + type: str + choices: [ absent, present ] + default: present + remote_src: + description: + - If C(no), it will search for src at originating/master machine, if C(yes) it will + go to the remote/target machine for the C(src). + type: bool + default: no + strip: + description: + - Number that indicates the smallest prefix containing leading slashes + that will be stripped from each file name found in the patch file. + - For more information see the strip parameter of the GNU patch tool. + type: int + default: 0 + backup: + description: + - Passes C(--backup --version-control=numbered) to patch, producing numbered backup copies. + type: bool + default: no + binary: + description: + - Setting to C(yes) will disable patch's heuristic for transforming CRLF + line endings into LF. + - Line endings of src and dest must match. + - If set to C(no), C(patch) will replace CRLF in C(src) files on POSIX. + type: bool + default: no +notes: + - This module requires GNU I(patch) utility to be installed on the remote host. +''' + +EXAMPLES = r''' +- name: Apply patch to one file + patch: + src: /tmp/index.html.patch + dest: /var/www/index.html + +- name: Apply patch to multiple files under basedir + patch: + src: /tmp/customize.patch + basedir: /var/www + strip: 1 + +- name: Revert patch to one file + patch: + src: /tmp/index.html.patch + dest: /var/www/index.html + state: absent +''' + +import os +import platform +from traceback import format_exc +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class PatchError(Exception): + pass + + +def add_dry_run_option(opts): + # Older versions of FreeBSD, OpenBSD and NetBSD support the --check option only. + if platform.system().lower() in ['openbsd', 'netbsd', 'freebsd']: + opts.append('--check') + else: + opts.append('--dry-run') + + +def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, state='present'): + opts = ['--quiet', '--forward', + "--strip=%s" % strip, "--directory='%s'" % basedir, + "--input='%s'" % patch_file] + add_dry_run_option(opts) + if binary: + opts.append('--binary') + if dest_file: + opts.append("'%s'" % dest_file) + if state == 'present': + opts.append('--reverse') + + (rc, _, _) = patch_func(opts) + return rc == 0 + + +def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False, state='present'): + opts = ['--quiet', '--forward', '--batch', '--reject-file=-', + "--strip=%s" % strip, "--directory='%s'" % basedir, + "--input='%s'" % patch_file] + if dry_run: + add_dry_run_option(opts) + if binary: + opts.append('--binary') + if dest_file: + opts.append("'%s'" % dest_file) + if backup: + opts.append('--backup --version-control=numbered') + if state == 'absent': + opts.append('--reverse') + + (rc, out, err) = patch_func(opts) + if rc != 0: + msg = err or out + raise PatchError(msg) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(type='path', required=True, aliases=['patchfile']), + dest=dict(type='path', aliases=['originalfile']), + basedir=dict(type='path'), + strip=dict(type='int', default=0), + remote_src=dict(type='bool', default=False), + # NB: for 'backup' parameter, semantics is slightly different from standard + # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~") + backup=dict(type='bool', default=False), + binary=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + required_one_of=[['dest', 'basedir']], + supports_check_mode=True, + ) + + # Create type object as namespace for module params + p = type('Params', (), module.params) + + if not os.access(p.src, os.R_OK): + module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) + + if p.dest and not os.access(p.dest, os.W_OK): + module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest)) + + if p.basedir and not os.path.exists(p.basedir): + module.fail_json(msg="basedir %s doesn't exist" % (p.basedir)) + + if not p.basedir: + p.basedir = os.path.dirname(p.dest) + + patch_bin = module.get_bin_path('patch') + if patch_bin is None: + module.fail_json(msg="patch command not found") + + def patch_func(opts): + return module.run_command('%s %s' % (patch_bin, ' '.join(opts))) + + # patch need an absolute file name + p.src = os.path.abspath(p.src) + + changed = False + if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, state=p.state): + try: + apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, + dry_run=module.check_mode, backup=p.backup, state=p.state) + changed = True + except PatchError as e: + module.fail_json(msg=to_native(e), exception=format_exc()) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/read_csv.py b/plugins/modules/files/read_csv.py new file mode 100644 index 0000000000..fd2eedea63 --- /dev/null +++ b/plugins/modules/files/read_csv.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: read_csv +short_description: Read a CSV file +description: +- Read a CSV file and return a list or a dictionary, containing one dictionary per row. +author: +- Dag Wieers (@dagwieers) +options: + path: + description: + - The CSV filename to read data from. + type: path + required: yes + aliases: [ filename ] + key: + description: + - The column name used as a key for the resulting dictionary. + - If C(key) is unset, the module returns a list of dictionaries, + where each dictionary is a row in the CSV file. + type: str + dialect: + description: + - The CSV dialect to use when parsing the CSV file. + - Possible values include C(excel), C(excel-tab) or C(unix). + type: str + default: excel + fieldnames: + description: + - A list of field names for every column. + - This is needed if the CSV does not have a header. + type: list + unique: + description: + - Whether the C(key) used is expected to be unique. + type: bool + default: yes + delimiter: + description: + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by C(dialect). + - The default value depends on the dialect used. + type: str + skipinitialspace: + description: + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by C(dialect). + - The default value depends on the dialect used. + type: bool + strict: + description: + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by C(dialect). + - The default value depends on the dialect used. + type: bool +notes: +- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja. +''' + +EXAMPLES = r''' +# Example CSV file with header +# +# name,uid,gid +# dag,500,500 +# jeroen,501,500 + +# Read a CSV file and access user 'dag' +- name: Read users from CSV file and return a dictionary + read_csv: + path: users.csv + key: name + register: users + delegate_to: localhost + +- debug: + msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}' + +# Read a CSV file and access the first item +- name: Read users from CSV file and return a list + read_csv: + path: users.csv + register: users + delegate_to: localhost + +- debug: + msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}' + +# Example CSV file without header and semi-colon delimiter +# +# dag;500;500 +# jeroen;501;500 + +# Read a CSV file without headers +- name: Read users from CSV file and return a list + read_csv: + path: users.csv + fieldnames: name,uid,gid + delimiter: ';' + register: users + delegate_to: localhost +''' + +RETURN = r''' +dict: + description: The CSV content as a dictionary. + returned: success + type: dict + sample: + dag: + name: dag + uid: 500 + gid: 500 + jeroen: + name: jeroen + uid: 501 + gid: 500 +list: + description: The CSV content as a list. + returned: success + type: list + sample: + - name: dag + uid: 500 + gid: 500 + - name: jeroen + uid: 501 + gid: 500 +''' + +import csv +from io import BytesIO, StringIO + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text +from ansible.module_utils.six import PY3 + + +# Add Unix dialect from Python 3 +class unix_dialect(csv.Dialect): + """Describe the usual properties of Unix-generated CSV files.""" + delimiter = ',' + quotechar = '"' + doublequote = True + skipinitialspace = False + lineterminator = '\n' + quoting = csv.QUOTE_ALL + + +csv.register_dialect("unix", unix_dialect) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['filename']), + dialect=dict(type='str', default='excel'), + key=dict(type='str'), + fieldnames=dict(type='list'), + unique=dict(type='bool', default=True), + delimiter=dict(type='str'), + skipinitialspace=dict(type='bool'), + strict=dict(type='bool'), + ), + supports_check_mode=True, + ) + + path = module.params['path'] + dialect = module.params['dialect'] + key = module.params['key'] + fieldnames = module.params['fieldnames'] + unique = module.params['unique'] + + if dialect not in csv.list_dialects(): + module.fail_json(msg="Dialect '%s' is not supported by your version of python." % dialect) + + dialect_options = dict( + delimiter=module.params['delimiter'], + skipinitialspace=module.params['skipinitialspace'], + strict=module.params['strict'], + ) + + # Create a dictionary from only set options + dialect_params = dict((k, v) for k, v in dialect_options.items() if v is not None) + if dialect_params: + try: + csv.register_dialect('custom', dialect, **dialect_params) + except TypeError as e: + module.fail_json(msg="Unable to create custom dialect: %s" % to_text(e)) + dialect = 'custom' + + try: + with open(path, 'rb') as f: + data = f.read() + except (IOError, OSError) as e: + module.fail_json(msg="Unable to open file: %s" % to_text(e)) + + if PY3: + # Manually decode on Python3 so that we can use the surrogateescape error handler + data = to_text(data, errors='surrogate_or_strict') + fake_fh = StringIO(data) + else: + fake_fh = BytesIO(data) + + reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect) + + if key and key not in reader.fieldnames: + module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames))) + + data_dict = dict() + data_list = list() + + if key is None: + try: + for row in reader: + data_list.append(row) + except csv.Error as e: + module.fail_json(msg="Unable to process file: %s" % to_text(e)) + else: + try: + for row in reader: + if unique and row[key] in data_dict: + module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key])) + data_dict[row[key]] = row + except csv.Error as e: + module.fail_json(msg="Unable to process file: %s" % to_text(e)) + + module.exit_json(dict=data_dict, list=data_list) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py new file mode 100644 index 0000000000..c608b01d68 --- /dev/null +++ b/plugins/modules/files/xattr.py @@ -0,0 +1,241 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: xattr +short_description: Manage user defined extended attributes +description: + - Manages filesystem user defined extended attributes. + - Requires that extended attributes are enabled on the target filesystem + and that the setfattr/getfattr utilities are present. +options: + path: + description: + - The full path of the file/object to get the facts of. + - Before 2.3 this option was only usable as I(name). + type: path + required: true + aliases: [ name ] + namespace: + description: + - Namespace of the named name/key. + type: str + default: user + key: + description: + - The name of a specific Extended attribute key to set/retrieve. + type: str + value: + description: + - The value to set the named name/key to, it automatically sets the C(state) to 'set'. + type: str + state: + description: + - defines which state you want to do. + C(read) retrieves the current value for a C(key) (default) + C(present) sets C(name) to C(value), default if value is set + C(all) dumps all data + C(keys) retrieves all keys + C(absent) deletes the key + type: str + choices: [ absent, all, keys, present, read ] + default: read + follow: + description: + - If C(yes), dereferences symlinks and sets/gets attributes on symlink target, + otherwise acts on symlink itself. + type: bool + default: yes +notes: + - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. +author: +- Brian Coca (@bcoca) +''' + +EXAMPLES = ''' +- name: Obtain the extended attributes of /etc/foo.conf + xattr: + path: /etc/foo.conf + +- name: Set the key 'user.foo' to value 'bar' + xattr: + path: /etc/foo.conf + key: foo + value: bar + +- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914' + xattr: + path: /mnt/bricks/brick1 + namespace: trusted + key: glusterfs.volume-id + value: "0x817b94343f164f199e5b573b4ea1f914" + +- name: Remove the key 'user.foo' + xattr: + path: /etc/foo.conf + key: foo + state: absent + +- name: Remove the key 'trusted.glusterfs.volume-id' + xattr: + path: /mnt/bricks/brick1 + namespace: trusted + key: glusterfs.volume-id + state: absent +''' + +import os + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def get_xattr_keys(module, path, follow): + cmd = [module.get_bin_path('getfattr', True)] + # prevents warning and not sure why it's not default + cmd.append('--absolute-names') + if not follow: + cmd.append('-h') + cmd.append(path) + + return _run_xattr(module, cmd) + + +def get_xattr(module, path, key, follow): + + cmd = [module.get_bin_path('getfattr', True)] + # prevents warning and not sure why it's not default + cmd.append('--absolute-names') + if not follow: + cmd.append('-h') + if key is None: + cmd.append('-d') + else: + cmd.append('-n %s' % key) + cmd.append(path) + + return _run_xattr(module, cmd, False) + + +def set_xattr(module, path, key, value, follow): + + cmd = [module.get_bin_path('setfattr', True)] + if not follow: + cmd.append('-h') + cmd.append('-n %s' % key) + cmd.append('-v %s' % value) + cmd.append(path) + + return _run_xattr(module, cmd) + + +def rm_xattr(module, path, key, follow): + + cmd = [module.get_bin_path('setfattr', True)] + if not follow: + cmd.append('-h') + cmd.append('-x %s' % key) + cmd.append(path) + + return _run_xattr(module, cmd, False) + + +def _run_xattr(module, cmd, check_rc=True): + + try: + (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) + except Exception as e: + module.fail_json(msg="%s!" % to_native(e)) + + # result = {'raw': out} + result = {} + for line in out.splitlines(): + if line.startswith('#') or line == '': + pass + elif '=' in line: + (key, val) = line.split('=') + result[key] = val.strip('"') + else: + result[line] = '' + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['name']), + namespace=dict(type='str', default='user'), + key=dict(type='str'), + value=dict(type='str'), + state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']), + follow=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + path = module.params.get('path') + namespace = module.params.get('namespace') + key = module.params.get('key') + value = module.params.get('value') + state = module.params.get('state') + follow = module.params.get('follow') + + if not os.path.exists(path): + module.fail_json(msg="path not found or not accessible!") + + changed = False + msg = "" + res = {} + + if key is None and state in ['absent', 'present']: + module.fail_json(msg="%s needs a key parameter" % state) + + # Prepend the key with the namespace if defined + if ( + key is not None and + namespace is not None and + len(namespace) > 0 and + not (namespace == 'user' and key.startswith('user.'))): + key = '%s.%s' % (namespace, key) + + if (state == 'present' or value is not None): + current = get_xattr(module, path, key, follow) + if current is None or key not in current or value != current[key]: + if not module.check_mode: + res = set_xattr(module, path, key, value, follow) + changed = True + res = current + msg = "%s set to %s" % (key, value) + elif state == 'absent': + current = get_xattr(module, path, key, follow) + if current is not None and key in current: + if not module.check_mode: + res = rm_xattr(module, path, key, follow) + changed = True + res = current + msg = "%s removed" % (key) + elif state == 'keys': + res = get_xattr_keys(module, path, follow) + msg = "returning all keys" + elif state == 'all': + res = get_xattr(module, path, None, follow) + msg = "dumping all" + else: + res = get_xattr(module, path, key, follow) + msg = "returning %s" % key + + module.exit_json(changed=changed, msg=msg, xattr=res) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py new file mode 100644 index 0000000000..c9a0a874b8 --- /dev/null +++ b/plugins/modules/files/xml.py @@ -0,0 +1,961 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Red Hat, Inc. +# Copyright: (c) 2014, Tim Bielawa +# Copyright: (c) 2014, Magnus Hedemark +# Copyright: (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: xml +short_description: Manage bits and pieces of XML files or strings +description: +- A CRUD-like interface to managing bits of XML files. +options: + path: + description: + - Path to the file to operate on. + - This file must exist ahead of time. + - This parameter is required, unless C(xmlstring) is given. + type: path + required: yes + aliases: [ dest, file ] + xmlstring: + description: + - A string containing XML on which to operate. + - This parameter is required, unless C(path) is given. + type: str + required: yes + xpath: + description: + - A valid XPath expression describing the item(s) you want to manipulate. + - Operates on the document root, C(/), by default. + type: str + namespaces: + description: + - The namespace C(prefix:uri) mapping for the XPath expression. + - Needs to be a C(dict), not a C(list) of items. + type: dict + state: + description: + - Set or remove an xpath selection (node(s), attribute(s)). + type: str + choices: [ absent, present ] + default: present + aliases: [ ensure ] + attribute: + description: + - The attribute to select when using parameter C(value). + - This is a string, not prepended with C(@). + type: raw + value: + description: + - Desired state of the selected attribute. + - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)). + - Elements default to no value (but present). + - Attributes default to an empty string. + type: raw + add_children: + description: + - Add additional child-element(s) to a selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: list + set_children: + description: + - Set the child-element(s) of a selected element for a given C(xpath). + - Removes any existing children. + - Child elements must be specified as in C(add_children). + - This parameter requires C(xpath) to be set. + type: list + count: + description: + - Search for a given C(xpath) and provide the count of any matches. + - This parameter requires C(xpath) to be set. + type: bool + default: no + print_match: + description: + - Search for a given C(xpath) and print out any matches. + - This parameter requires C(xpath) to be set. + type: bool + default: no + pretty_print: + description: + - Pretty print XML output. + type: bool + default: no + content: + description: + - Search for a given C(xpath) and get content. + - This parameter requires C(xpath) to be set. + type: str + choices: [ attribute, text ] + input_type: + description: + - Type of input for C(add_children) and C(set_children). + type: str + choices: [ xml, yaml ] + default: yaml + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: no + strip_cdata_tags: + description: + - Remove CDATA tags surrounding text values. + - Note that this might break your XML file if text values contain characters that could be interpreted as XML. + type: bool + default: no + insertbefore: + description: + - Add additional child-element(s) before the first selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: bool + default: no + insertafter: + description: + - Add additional child-element(s) after the last selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: bool + default: no +requirements: +- lxml >= 2.3.0 +notes: +- Use the C(--check) and C(--diff) options when testing your expressions. +- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. +- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. +- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples. +- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. +seealso: +- name: Xml module development community wiki + description: More information related to the development of this xml module. + link: https://github.com/ansible/community/wiki/Module:-xml +- name: Introduction to XPath + description: A brief tutorial on XPath (w3schools.com). + link: https://www.w3schools.com/xml/xpath_intro.asp +- name: XPath Reference document + description: The reference documentation on XSLT/XPath (developer.mozilla.org). + link: https://developer.mozilla.org/en-US/docs/Web/XPath +author: +- Tim Bielawa (@tbielawa) +- Magnus Hedemark (@magnus919) +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +# Consider the following XML file: +# +# +# Tasty Beverage Co. +# +# Rochefort 10 +# St. Bernardus Abbot 12 +# Schlitz +# +# 10 +# +# +#
http://tastybeverageco.com
+#
+#
+ +- name: Remove the 'subjective' attribute of the 'rating' element + xml: + path: /foo/bar.xml + xpath: /business/rating/@subjective + state: absent + +- name: Set the rating to '11' + xml: + path: /foo/bar.xml + xpath: /business/rating + value: 11 + +# Retrieve and display the number of nodes +- name: Get count of 'beers' nodes + xml: + path: /foo/bar.xml + xpath: /business/beers/beer + count: yes + register: hits + +- debug: + var: hits.count + +# Example where parent XML nodes are created automatically +- name: Add a 'phonenumber' element to the 'business' element + xml: + path: /foo/bar.xml + xpath: /business/phonenumber + value: 555-555-1234 + +- name: Add several more beers to the 'beers' element + xml: + path: /foo/bar.xml + xpath: /business/beers + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element + xml: + path: /foo/bar.xml + xpath: '/business/beers/beer[text()="Rochefort 10"]' + insertbefore: yes + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements +- name: Add a 'validxhtml' element to the 'website' element + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + +- name: Add an empty 'validatedon' attribute to the 'validxhtml' element + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml/@validatedon + +- name: Add or modify an attribute, add element if needed + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + attribute: validatedon + value: 1976-08-05 + +# How to read an attribute value and access it in Ansible +- name: Read an element's attribute values + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + content: attribute + register: xmlresp + +- name: Show an attribute value + debug: + var: xmlresp.matches[0].validxhtml.validatedon + +- name: Remove all children from the 'website' element (option 1) + xml: + path: /foo/bar.xml + xpath: /business/website/* + state: absent + +- name: Remove all children from the 'website' element (option 2) + xml: + path: /foo/bar.xml + xpath: /business/website + children: [] + +# In case of namespaces, like in below XML, they have to be explicitly stated. +# +# +# +# +# +# + +# NOTE: There is the prefix 'x' in front of the 'bar' element, too. +- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false' + xml: + path: foo.xml + xpath: /x:foo/x:bar/y:baz + namespaces: + x: http://x.test + y: http://y.test + z: http://z.test + attribute: z:my_namespaced_attribute + value: 'false' +''' + +RETURN = r''' +actions: + description: A dictionary with the original xpath, namespaces and state. + type: dict + returned: success + sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present} +backup_file: + description: The name of the backup file that was created + type: str + returned: when backup=yes + sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ +count: + description: The count of xpath matches. + type: int + returned: when parameter 'count' is set + sample: 2 +matches: + description: The xpath matches found. + type: list + returned: when parameter 'print_match' is set +msg: + description: A message related to the performed action(s). + type: str + returned: always +xmlstring: + description: An XML string of the resulting output. + type: str + returned: when parameter 'xmlstring' is set +''' + +import copy +import json +import os +import re +import traceback + +from distutils.version import LooseVersion +from io import BytesIO + +LXML_IMP_ERR = None +try: + from lxml import etree, objectify + HAS_LXML = True +except ImportError: + LXML_IMP_ERR = traceback.format_exc() + HAS_LXML = False + +from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common._collections_compat import MutableMapping + +_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" +_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT +# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate +# strings wrapped by the other delimiter' XPath trick, especially as simple XPath. +_XPSTR = "('(?:.*)'|\"(?:.*)\")" + +_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$") +_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$") +_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$") +_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$") +_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$") +_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$") + + +def has_changed(doc): + orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc))) + obj = etree.tostring(objectify.fromstring(etree.tostring(doc))) + return (orig_obj != obj) + + +def do_print_match(module, tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + match_xpaths = [] + for m in match: + match_xpaths.append(tree.getpath(m)) + match_str = json.dumps(match_xpaths) + msg = "selector '%s' match: %s" % (xpath, match_str) + finish(module, tree, xpath, namespaces, changed=False, msg=msg) + + +def count_nodes(module, tree, xpath, namespaces): + """ Return the count of nodes matching the xpath """ + hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces) + msg = "found %d nodes" % hits + finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits)) + + +def is_node(tree, xpath, namespaces): + """ Test if a given xpath matches anything and if that match is a node. + + For now we just assume you're only searching for one specific thing.""" + if xpath_matches(tree, xpath, namespaces): + # OK, it found something + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._Element): + return True + + return False + + +def is_attribute(tree, xpath, namespaces): + """ Test if a given xpath matches and that match is an attribute + + An xpath attribute search will only match one item""" + if xpath_matches(tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._ElementStringResult): + return True + elif isinstance(match[0], etree._ElementUnicodeResult): + return True + return False + + +def xpath_matches(tree, xpath, namespaces): + """ Test if a node exists """ + if tree.xpath(xpath, namespaces=namespaces): + return True + return False + + +def delete_xpath_target(module, tree, xpath, namespaces): + """ Delete an attribute or element from a tree """ + try: + for result in tree.xpath(xpath, namespaces=namespaces): + # Get the xpath for this result + if is_attribute(tree, xpath, namespaces): + # Delete an attribute + parent = result.getparent() + # Pop this attribute match out of the parent + # node's 'attrib' dict by using this match's + # 'attrname' attribute for the key + parent.attrib.pop(result.attrname) + elif is_node(tree, xpath, namespaces): + # Delete an element + result.getparent().remove(result) + else: + raise Exception("Impossible error") + except Exception as e: + module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e)) + else: + finish(module, tree, xpath, namespaces, changed=True) + + +def replace_children_of(children, match): + for element in list(match): + match.remove(element) + match.extend(children) + + +def set_target_children_inner(module, tree, xpath, namespaces, children, in_type): + matches = tree.xpath(xpath, namespaces=namespaces) + + # Create a list of our new children + children = children_to_nodes(module, children, in_type) + children_as_string = [etree.tostring(c) for c in children] + + changed = False + + # xpaths always return matches as a list, so.... + for match in matches: + # Check if elements differ + if len(list(match)) == len(children): + for idx, element in enumerate(list(match)): + if etree.tostring(element) != children_as_string[idx]: + replace_children_of(children, match) + changed = True + break + else: + replace_children_of(children, match) + changed = True + + return changed + + +def set_target_children(module, tree, xpath, namespaces, children, in_type): + changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type) + # Write it out + finish(module, tree, xpath, namespaces, changed=changed) + + +def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter): + if is_node(tree, xpath, namespaces): + new_kids = children_to_nodes(module, children, in_type) + if insertbefore or insertafter: + insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter) + else: + for node in tree.xpath(xpath, namespaces=namespaces): + node.extend(new_kids) + finish(module, tree, xpath, namespaces, changed=True) + else: + finish(module, tree, xpath, namespaces) + + +def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter): + """ + Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the + first xpath hit, with insertafter, it is inserted after the last xpath hit. + """ + insert_target = tree.xpath(xpath, namespaces=namespaces) + loc_index = 0 if insertbefore else -1 + index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index]) + parent = insert_target[0].getparent() + if insertafter: + index_in_parent += 1 + for child in children: + parent.insert(index_in_parent, child) + index_in_parent += 1 + + +def _extract_xpstr(g): + return g[1:-1] + + +def split_xpath_last(xpath): + """split an XPath of the form /foo/bar/baz into /foo/bar and baz""" + xpath = xpath.strip() + m = _RE_SPLITSIMPLELAST.match(xpath) + if m: + # requesting an element to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath) + if m: + # requesting an element to exist with an inner text + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSIMPLEATTRLAST.match(xpath) + if m: + # requesting an attribute to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath) + if m: + # requesting an attribute to exist with a value + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSUBLAST.match(xpath) + if m: + content = [x.strip() for x in m.group(3).split(" and ")] + return (m.group(1), [('/' + m.group(2), content)]) + + m = _RE_SPLITONLYEQVALUE.match(xpath) + if m: + # requesting a change of inner text + return (m.group(1), [("", _extract_xpstr(m.group(2)))]) + return (xpath, []) + + +def nsnameToClark(name, namespaces): + if ":" in name: + (nsname, rawname) = name.split(":") + # return "{{%s}}%s" % (namespaces[nsname], rawname) + return "{{{0}}}{1}".format(namespaces[nsname], rawname) + + # no namespace name here + return name + + +def check_or_make_target(module, tree, xpath, namespaces): + (inner_xpath, changes) = split_xpath_last(xpath) + if (inner_xpath == xpath) or (changes is None): + module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + return False + + changed = False + + if not is_node(tree, inner_xpath, namespaces): + changed = check_or_make_target(module, tree, inner_xpath, namespaces) + + # we test again after calling check_or_make_target + if is_node(tree, inner_xpath, namespaces) and changes: + for (eoa, eoa_value) in changes: + if eoa and eoa[0] != '@' and eoa[0] != '/': + # implicitly creating an element + new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml") + if eoa_value: + for nk in new_kids: + nk.text = eoa_value + + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + changed = True + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa and eoa[0] == '/': + element = eoa[1:] + new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml") + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + for nk in new_kids: + for subexpr in eoa_value: + # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" % + # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True)) + check_or_make_target(module, nk, "./" + subexpr, namespaces) + changed = True + + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa == "": + for node in tree.xpath(inner_xpath, namespaces=namespaces): + if (node.text != eoa_value): + node.text = eoa_value + changed = True + + elif eoa and eoa[0] == '@': + attribute = nsnameToClark(eoa[1:], namespaces) + + for element in tree.xpath(inner_xpath, namespaces=namespaces): + changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value) + + if changing: + changed = changed or changing + if eoa_value is None: + value = "" + else: + value = eoa_value + element.attrib[attribute] = value + + # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" % + # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True))) + + else: + module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True)) + + return changed + + +def ensure_xpath_exists(module, tree, xpath, namespaces): + changed = False + + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + + finish(module, tree, xpath, namespaces, changed) + + +def set_target_inner(module, tree, xpath, namespaces, attribute, value): + changed = False + + try: + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + except Exception as e: + missing_namespace = "" + # NOTE: This checks only the namespaces defined in root element! + # TODO: Implement a more robust check to check for child namespaces' existence + if tree.getroot().nsmap and ":" not in xpath: + missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n" + module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" % + (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc()) + + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + + for element in tree.xpath(xpath, namespaces=namespaces): + if not attribute: + changed = changed or (element.text != value) + if element.text != value: + element.text = value + else: + changed = changed or (element.get(attribute) != value) + if ":" in attribute: + attr_ns, attr_name = attribute.split(":") + # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name) + attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name) + if element.get(attribute) != value: + element.set(attribute, value) + + return changed + + +def set_target(module, tree, xpath, namespaces, attribute, value): + changed = set_target_inner(module, tree, xpath, namespaces, attribute, value) + finish(module, tree, xpath, namespaces, changed) + + +def get_element_text(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + elements.append({element.tag: element.text}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def get_element_attr(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + child = {} + for key in element.keys(): + value = element.get(key) + child.update({key: value}) + elements.append({element.tag: child}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def child_to_element(module, child, in_type): + if in_type == 'xml': + infile = BytesIO(to_bytes(child, errors='surrogate_or_strict')) + + try: + parser = etree.XMLParser() + node = etree.parse(infile, parser) + return node.getroot() + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing child element: %s" % e) + elif in_type == 'yaml': + if isinstance(child, string_types): + return etree.Element(child) + elif isinstance(child, MutableMapping): + if len(child) > 1: + module.fail_json(msg="Can only create children from hashes with one key") + + (key, value) = next(iteritems(child)) + if isinstance(value, MutableMapping): + children = value.pop('_', None) + + node = etree.Element(key, value) + + if children is not None: + if not isinstance(children, list): + module.fail_json(msg="Invalid children type: %s, must be list." % type(children)) + + subnodes = children_to_nodes(module, children) + node.extend(subnodes) + else: + node = etree.Element(key) + node.text = value + return node + else: + module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child)) + else: + module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type) + + +def children_to_nodes(module=None, children=None, type='yaml'): + """turn a str/hash/list of str&hash into a list of elements""" + children = [] if children is None else children + + return [child_to_element(module, child, type) for child in children] + + +def make_pretty(module, tree): + xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + result = dict( + changed=False, + ) + + if module.params['path']: + xml_file = module.params['path'] + with open(xml_file, 'rb') as xml_content: + if xml_string != xml_content.read(): + result['changed'] = True + if not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + elif module.params['xmlstring']: + result['xmlstring'] = xml_string + # NOTE: Modifying a string is not considered a change ! + if xml_string != module.params['xmlstring']: + result['changed'] = True + + module.exit_json(**result) + + +def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()): + + result = dict( + actions=dict( + xpath=xpath, + namespaces=namespaces, + state=module.params['state'] + ), + changed=has_changed(tree), + ) + + if module.params['count'] or hitcount: + result['count'] = hitcount + + if module.params['print_match'] or matches: + result['matches'] = matches + + if msg: + result['msg'] = msg + + if result['changed']: + if module._diff: + result['diff'] = dict( + before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True), + after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True), + ) + + if module.params['path'] and not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + if module.params['xmlstring']: + result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + module.exit_json(**result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', aliases=['dest', 'file']), + xmlstring=dict(type='str'), + xpath=dict(type='str'), + namespaces=dict(type='dict', default={}), + state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), + value=dict(type='raw'), + attribute=dict(type='raw'), + add_children=dict(type='list'), + set_children=dict(type='list'), + count=dict(type='bool', default=False), + print_match=dict(type='bool', default=False), + pretty_print=dict(type='bool', default=False), + content=dict(type='str', choices=['attribute', 'text']), + input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), + backup=dict(type='bool', default=False), + strip_cdata_tags=dict(type='bool', default=False), + insertbefore=dict(type='bool', default=False), + insertafter=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_by=dict( + add_children=['xpath'], + # TODO: Reinstate this in Ansible v2.12 when we have deprecated the incorrect use below + # attribute=['value'], + content=['xpath'], + set_children=['xpath'], + value=['xpath'], + ), + required_if=[ + ['count', True, ['xpath']], + ['print_match', True, ['xpath']], + ['insertbefore', True, ['xpath']], + ['insertafter', True, ['xpath']], + ], + required_one_of=[ + ['path', 'xmlstring'], + ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'], + ], + mutually_exclusive=[ + ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'], + ['path', 'xmlstring'], + ['insertbefore', 'insertafter'], + ], + ) + + xml_file = module.params['path'] + xml_string = module.params['xmlstring'] + xpath = module.params['xpath'] + namespaces = module.params['namespaces'] + state = module.params['state'] + value = json_dict_bytes_to_unicode(module.params['value']) + attribute = module.params['attribute'] + set_children = json_dict_bytes_to_unicode(module.params['set_children']) + add_children = json_dict_bytes_to_unicode(module.params['add_children']) + pretty_print = module.params['pretty_print'] + content = module.params['content'] + input_type = module.params['input_type'] + print_match = module.params['print_match'] + count = module.params['count'] + backup = module.params['backup'] + strip_cdata_tags = module.params['strip_cdata_tags'] + insertbefore = module.params['insertbefore'] + insertafter = module.params['insertafter'] + + # Check if we have lxml 2.3.0 or newer installed + if not HAS_LXML: + module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): + module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine') + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): + module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') + + # Report wrongly used attribute parameter when using content=attribute + # TODO: Remove this in Ansible v2.12 (and reinstate strict parameter test above) and remove the integration test example + if content == 'attribute' and attribute is not None: + module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute, '2.12') + + # Check if the file exists + if xml_string: + infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) + elif os.path.isfile(xml_file): + infile = open(xml_file, 'rb') + else: + module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) + + # Parse and evaluate xpath expression + if xpath is not None: + try: + etree.XPath(xpath) + except etree.XPathSyntaxError as e: + module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) + except etree.XPathEvalError as e: + module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) + + # Try to parse in the target XML file + try: + parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) + doc = etree.parse(infile, parser) + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) + + # Ensure we have the original copy to compare + global orig_doc + orig_doc = copy.deepcopy(doc) + + if print_match: + do_print_match(module, doc, xpath, namespaces) + + if count: + count_nodes(module, doc, xpath, namespaces) + + if content == 'attribute': + get_element_attr(module, doc, xpath, namespaces) + elif content == 'text': + get_element_text(module, doc, xpath, namespaces) + + # File exists: + if state == 'absent': + # - absent: delete xpath target + delete_xpath_target(module, doc, xpath, namespaces) + + # - present: carry on + + # children && value both set?: should have already aborted by now + # add_children && set_children both set?: should have already aborted by now + + # set_children set? + if set_children: + set_target_children(module, doc, xpath, namespaces, set_children, input_type) + + # add_children set? + if add_children: + add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter) + + # No?: Carry on + + # Is the xpath target an attribute selector? + if value is not None: + set_target(module, doc, xpath, namespaces, attribute, value) + + # If an xpath was provided, we need to do something with the data + if xpath is not None: + ensure_xpath_exists(module, doc, xpath, namespaces) + + # Otherwise only reformat the xml data? + if pretty_print: + make_pretty(module, doc) + + module.fail_json(msg="Don't know what to do") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/identity/ipa/ipa_config.py new file mode 100644 index 0000000000..6a61ccfc35 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_config.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Fran Fitzpatrick +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_config +author: Fran Fitzpatrick (@fxfitz) +short_description: Manage Global FreeIPA Configuration Settings +description: +- Modify global configuration settings of a FreeIPA Server. +options: + ipadefaultloginshell: + description: Default shell for new users. + aliases: ["loginshell"] + type: str + ipadefaultemaildomain: + description: Default e-mail domain for new users. + aliases: ["emaildomain"] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure the default login shell is bash. + ipa_config: + ipadefaultloginshell: /bin/bash + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default e-mail domain is ansible.com. + ipa_config: + ipadefaultemaildomain: ansible.com + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret +''' + +RETURN = r''' +config: + description: Configuration as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class ConfigIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(ConfigIPAClient, self).__init__(module, host, port, protocol) + + def config_show(self): + return self._post_json(method='config_show', name=None) + + def config_mod(self, name, item): + return self._post_json(method='config_mod', name=name, item=item) + + +def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None): + config = {} + if ipadefaultloginshell is not None: + config['ipadefaultloginshell'] = ipadefaultloginshell + if ipadefaultemaildomain is not None: + config['ipadefaultemaildomain'] = ipadefaultemaildomain + + return config + + +def get_config_diff(client, ipa_config, module_config): + return client.get_diff(ipa_data=ipa_config, module_data=module_config) + + +def ensure(module, client): + module_config = get_config_dict( + ipadefaultloginshell=module.params.get('ipadefaultloginshell'), + ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), + ) + ipa_config = client.config_show() + diff = get_config_diff(client, ipa_config, module_config) + + changed = False + new_config = {} + for module_key in diff: + if module_config.get(module_key) != ipa_config.get(module_key, None): + changed = True + new_config.update({module_key: module_config.get(module_key)}) + + if changed and not module.check_mode: + client.config_mod(name=None, item=new_config) + + return changed, client.config_show() + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + ipadefaultloginshell=dict(type='str', aliases=['loginshell']), + ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = ConfigIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, user = ensure(module, client) + module.exit_json(changed=changed, user=user) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_dnsrecord.py b/plugins/modules/identity/ipa/ipa_dnsrecord.py new file mode 100644 index 0000000000..6f4ede8ca8 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_dnsrecord.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = r''' +--- +module: ipa_dnsrecord +author: Abhijeet Kasurde (@Akasurde) +short_description: Manage FreeIPA DNS records +description: +- Add, modify and delete an IPA DNS Record using IPA API. +options: + zone_name: + description: + - The DNS zone name to which DNS record needs to be managed. + required: true + type: str + record_name: + description: + - The DNS record name to manage. + required: true + aliases: ["name"] + type: str + record_type: + description: + - The type of DNS record name. + - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported. + - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." + - "'SRV' and 'MX' are added in version 2.8." + required: false + default: 'A' + choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT'] + type: str + record_value: + description: + - Manage DNS record name with this value. + - In the case of 'A' or 'AAAA' record types, this will be the IP address. + - In the case of 'A6' record type, this will be the A6 Record data. + - In the case of 'CNAME' record type, this will be the hostname. + - In the case of 'DNAME' record type, this will be the DNAME target. + - In the case of 'PTR' record type, this will be the hostname. + - In the case of 'TXT' record type, this will be a text. + - In the case of 'SRV' record type, this will be a service record. + - In the case of 'MX' record type, this will be a mail exchanger record. + required: true + type: str + record_ttl: + description: + - Set the TTL for the record. + - Applies only when adding a new or changing the value of record_value. + required: false + type: int + state: + description: State to ensure + required: false + default: present + choices: ["absent", "present"] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure dns record is present + ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: vm-001 + record_type: 'AAAA' + record_value: '::1' + +- name: Ensure that dns record exists with a TTL + ipa_dnsrecord: + name: host02 + zone_name: example.com + record_type: 'AAAA' + record_value: '::1' + record_ttl: 300 + ipa_host: ipa.example.com + ipa_pass: topsecret + state: present + +- name: Ensure a PTR record is present + ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: 2.168.192.in-addr.arpa + record_name: 5 + record_type: 'PTR' + record_value: 'internal.ipa.example.com' + +- name: Ensure a TXT record is present + ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: _kerberos + record_type: 'TXT' + record_value: 'EXAMPLE.COM' + +- name: Ensure an SRV record is present + ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: _kerberos._udp.example.com + record_type: 'SRV' + record_value: '10 50 88 ipa.example.com' + +- name: Ensure an MX record is present + ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: '@' + record_type: 'MX' + record_value: '1 mailserver.example.com' + +- name: Ensure that dns record is removed + ipa_dnsrecord: + name: host01 + zone_name: example.com + record_type: 'AAAA' + record_value: '::1' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + state: absent +''' + +RETURN = r''' +dnsrecord: + description: DNS record as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class DNSRecordIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(DNSRecordIPAClient, self).__init__(module, host, port, protocol) + + def dnsrecord_find(self, zone_name, record_name): + if record_name == '@': + return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True}) + else: + return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True}) + + def dnsrecord_add(self, zone_name=None, record_name=None, details=None): + item = dict(idnsname=record_name) + if details['record_type'] == 'A': + item.update(a_part_ip_address=details['record_value']) + elif details['record_type'] == 'AAAA': + item.update(aaaa_part_ip_address=details['record_value']) + elif details['record_type'] == 'A6': + item.update(a6_part_data=details['record_value']) + elif details['record_type'] == 'CNAME': + item.update(cname_part_hostname=details['record_value']) + elif details['record_type'] == 'DNAME': + item.update(dname_part_target=details['record_value']) + elif details['record_type'] == 'PTR': + item.update(ptr_part_hostname=details['record_value']) + elif details['record_type'] == 'TXT': + item.update(txtrecord=details['record_value']) + elif details['record_type'] == 'SRV': + item.update(srvrecord=details['record_value']) + elif details['record_type'] == 'MX': + item.update(mxrecord=details['record_value']) + + if details.get('record_ttl'): + item.update(dnsttl=details['record_ttl']) + + return self._post_json(method='dnsrecord_add', name=zone_name, item=item) + + def dnsrecord_mod(self, zone_name=None, record_name=None, details=None): + item = get_dnsrecord_dict(details) + item.update(idnsname=record_name) + if details.get('record_ttl'): + item.update(dnsttl=details['record_ttl']) + return self._post_json(method='dnsrecord_mod', name=zone_name, item=item) + + def dnsrecord_del(self, zone_name=None, record_name=None, details=None): + item = get_dnsrecord_dict(details) + item.update(idnsname=record_name) + return self._post_json(method='dnsrecord_del', name=zone_name, item=item) + + +def get_dnsrecord_dict(details=None): + module_dnsrecord = dict() + if details['record_type'] == 'A' and details['record_value']: + module_dnsrecord.update(arecord=details['record_value']) + elif details['record_type'] == 'AAAA' and details['record_value']: + module_dnsrecord.update(aaaarecord=details['record_value']) + elif details['record_type'] == 'A6' and details['record_value']: + module_dnsrecord.update(a6record=details['record_value']) + elif details['record_type'] == 'CNAME' and details['record_value']: + module_dnsrecord.update(cnamerecord=details['record_value']) + elif details['record_type'] == 'DNAME' and details['record_value']: + module_dnsrecord.update(dnamerecord=details['record_value']) + elif details['record_type'] == 'PTR' and details['record_value']: + module_dnsrecord.update(ptrrecord=details['record_value']) + elif details['record_type'] == 'TXT' and details['record_value']: + module_dnsrecord.update(txtrecord=details['record_value']) + elif details['record_type'] == 'SRV' and details['record_value']: + module_dnsrecord.update(srvrecord=details['record_value']) + elif details['record_type'] == 'MX' and details['record_value']: + module_dnsrecord.update(mxrecord=details['record_value']) + + if details.get('record_ttl'): + module_dnsrecord.update(dnsttl=details['record_ttl']) + + return module_dnsrecord + + +def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord): + details = get_dnsrecord_dict(module_dnsrecord) + return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details) + + +def ensure(module, client): + zone_name = module.params['zone_name'] + record_name = module.params['record_name'] + record_ttl = module.params.get('record_ttl') + state = module.params['state'] + + ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name) + + module_dnsrecord = dict( + record_type=module.params['record_type'], + record_value=module.params['record_value'], + record_ttl=to_native(record_ttl, nonstring='passthru'), + ) + + # ttl is not required to change records + if module_dnsrecord['record_ttl'] is None: + module_dnsrecord.pop('record_ttl') + + changed = False + if state == 'present': + if not ipa_dnsrecord: + changed = True + if not module.check_mode: + client.dnsrecord_add(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + else: + diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord) + if len(diff) > 0: + changed = True + if not module.check_mode: + client.dnsrecord_mod(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + else: + if ipa_dnsrecord: + changed = True + if not module.check_mode: + client.dnsrecord_del(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + + return changed, client.dnsrecord_find(zone_name, record_name) + + +def main(): + record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX'] + argument_spec = ipa_argument_spec() + argument_spec.update( + zone_name=dict(type='str', required=True), + record_name=dict(type='str', aliases=['name'], required=True), + record_type=dict(type='str', default='A', choices=record_types), + record_value=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + record_ttl=dict(type='int', required=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = DNSRecordIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, record = ensure(module, client) + module.exit_json(changed=changed, record=record) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_dnszone.py b/plugins/modules/identity/ipa/ipa_dnszone.py new file mode 100644 index 0000000000..e089731a6f --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_dnszone.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com) +# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_dnszone +author: Fran Fitzpatrick (@fxfitz) +short_description: Manage FreeIPA DNS Zones +description: +- Add and delete an IPA DNS Zones using IPA API +options: + zone_name: + description: + - The DNS zone name to which needs to be managed. + required: true + type: str + state: + description: State to ensure + required: false + default: present + choices: ["absent", "present"] + type: str + dynamicupdate: + description: Apply dynamic update to zone + required: false + default: "false" + choices: ["false", "true"] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure dns zone is present + ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + +- name: Ensure dns zone is present and is dynamic update + ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + dynamicupdate: true + +- name: Ensure that dns zone is removed + ipa_dnszone: + zone_name: example.com + ipa_host: localhost + ipa_user: admin + ipa_pass: topsecret + state: absent +''' + +RETURN = r''' +zone: + description: DNS zone as returned by IPA API. + returned: always + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class DNSZoneIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(DNSZoneIPAClient, self).__init__(module, host, port, protocol) + + def dnszone_find(self, zone_name, details=None): + itens = {'idnsname': zone_name} + if details is not None: + itens.update(details) + + return self._post_json( + method='dnszone_find', + name=zone_name, + item=itens + ) + + def dnszone_add(self, zone_name=None, details=None): + itens = {} + if details is not None: + itens.update(details) + + return self._post_json( + method='dnszone_add', + name=zone_name, + item=itens + ) + + def dnszone_del(self, zone_name=None, record_name=None, details=None): + return self._post_json( + method='dnszone_del', name=zone_name, item={}) + + +def ensure(module, client): + zone_name = module.params['zone_name'] + state = module.params['state'] + dynamicupdate = module.params['dynamicupdate'] + + ipa_dnszone = client.dnszone_find(zone_name) + + changed = False + if state == 'present': + if not ipa_dnszone: + changed = True + if not module.check_mode: + client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate}) + else: + changed = False + else: + if ipa_dnszone: + changed = True + if not module.check_mode: + client.dnszone_del(zone_name=zone_name) + + return changed, client.dnszone_find(zone_name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(zone_name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + dynamicupdate=dict(type='str', required=False, default='false', choices=['true', 'false']), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = DNSZoneIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, zone = ensure(module, client) + module.exit_json(changed=changed, zone=zone) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_group.py b/plugins/modules/identity/ipa/ipa_group.py new file mode 100644 index 0000000000..73f78a354b --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_group.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ipa_group +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA group +description: +- Add, modify and delete group within IPA server +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + description: + description: + - Description of the group. + type: str + external: + description: + - Allow adding external non-IPA members from trusted domains. + type: bool + gidnumber: + description: + - GID (use this option to set it manually). + aliases: ['gid'] + type: str + group: + description: + - List of group names assigned to this group. + - If an empty list is passed all groups will be removed from this group. + - If option is omitted assigned groups will not be checked or changed. + - Groups that are already assigned but not passed will be removed. + type: list + elements: str + nonposix: + description: + - Create as a non-POSIX group. + type: bool + user: + description: + - List of user names assigned to this group. + - If an empty list is passed all users will be removed from this group. + - If option is omitted assigned users will not be checked or changed. + - Users that are already assigned but not passed will be removed. + type: list + elements: str + state: + description: + - State to ensure + default: "present" + choices: ["absent", "present"] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure group is present + ipa_group: + name: oinstall + gidnumber: 54321 + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that groups sysops and appops are assigned to ops but no other group + ipa_group: + name: ops + group: + - sysops + - appops + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that users linus and larry are assign to the group, but no other user + ipa_group: + name: sysops + user: + - linus + - larry + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure group is absent + ipa_group: + name: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +group: + description: Group as returned by IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class GroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(GroupIPAClient, self).__init__(module, host, port, protocol) + + def group_find(self, name): + return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name}) + + def group_add(self, name, item): + return self._post_json(method='group_add', name=name, item=item) + + def group_mod(self, name, item): + return self._post_json(method='group_mod', name=name, item=item) + + def group_del(self, name): + return self._post_json(method='group_del', name=name) + + def group_add_member(self, name, item): + return self._post_json(method='group_add_member', name=name, item=item) + + def group_add_member_group(self, name, item): + return self.group_add_member(name=name, item={'group': item}) + + def group_add_member_user(self, name, item): + return self.group_add_member(name=name, item={'user': item}) + + def group_remove_member(self, name, item): + return self._post_json(method='group_remove_member', name=name, item=item) + + def group_remove_member_group(self, name, item): + return self.group_remove_member(name=name, item={'group': item}) + + def group_remove_member_user(self, name, item): + return self.group_remove_member(name=name, item={'user': item}) + + +def get_group_dict(description=None, external=None, gid=None, nonposix=None): + group = {} + if description is not None: + group['description'] = description + if external is not None: + group['external'] = external + if gid is not None: + group['gidnumber'] = gid + if nonposix is not None: + group['nonposix'] = nonposix + return group + + +def get_group_diff(client, ipa_group, module_group): + data = [] + # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed. + if 'nonposix' in module_group: + # Only non-posix groups can be changed to posix + if not module_group['nonposix'] and ipa_group.get('nonposix'): + module_group['posix'] = True + del module_group['nonposix'] + + if 'external' in module_group: + if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'): + del module_group['external'] + + return client.get_diff(ipa_data=ipa_group, module_data=module_group) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + group = module.params['group'] + user = module.params['user'] + + module_group = get_group_dict(description=module.params['description'], external=module.params['external'], + gid=module.params['gidnumber'], nonposix=module.params['nonposix']) + ipa_group = client.group_find(name=name) + + changed = False + if state == 'present': + if not ipa_group: + changed = True + if not module.check_mode: + ipa_group = client.group_add(name, item=module_group) + else: + diff = get_group_diff(client, ipa_group, module_group) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_group.get(key) + client.group_mod(name=name, item=data) + + if group is not None: + changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group, + client.group_add_member_group, + client.group_remove_member_group) or changed + + if user is not None: + changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user, + client.group_add_member_user, + client.group_remove_member_user) or changed + + else: + if ipa_group: + changed = True + if not module.check_mode: + client.group_del(name) + + return changed, client.group_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + external=dict(type='bool'), + gidnumber=dict(type='str', aliases=['gid']), + group=dict(type='list', elements='str'), + nonposix=dict(type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent']), + user=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = GroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, group = ensure(module, client) + module.exit_json(changed=changed, group=group) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_hbacrule.py b/plugins/modules/identity/ipa/ipa_hbacrule.py new file mode 100644 index 0000000000..8f34ed7fbd --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_hbacrule.py @@ -0,0 +1,359 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_hbacrule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA HBAC rule +description: +- Add, modify or delete an IPA HBAC rule using IPA API. +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: Description + type: str + host: + description: + - List of host names to assign. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. + required: false + type: list + elements: str + hostcategory: + description: Host category + choices: ['all'] + type: str + hostgroup: + description: + - List of hostgroup names to assign. + - If an empty list is passed all hostgroups will be removed. from the rule + - If option is omitted hostgroups will not be checked or changed. + type: list + elements: str + service: + description: + - List of service names to assign. + - If an empty list is passed all services will be removed from the rule. + - If option is omitted services will not be checked or changed. + type: list + elements: str + servicecategory: + description: Service category + choices: ['all'] + type: str + servicegroup: + description: + - List of service group names to assign. + - If an empty list is passed all assigned service groups will be removed from the rule. + - If option is omitted service groups will not be checked or changed. + type: list + elements: str + sourcehost: + description: + - List of source host names to assign. + - If an empty list if passed all assigned source hosts will be removed from the rule. + - If option is omitted source hosts will not be checked or changed. + type: list + elements: str + sourcehostcategory: + description: Source host category + choices: ['all'] + type: str + sourcehostgroup: + description: + - List of source host group names to assign. + - If an empty list if passed all assigned source host groups will be removed from the rule. + - If option is omitted source host groups will not be checked or changed. + type: list + elements: str + state: + description: State to ensure + default: "present" + choices: ["absent", "disabled", "enabled","present"] + type: str + user: + description: + - List of user names to assign. + - If an empty list if passed all assigned users will be removed from the rule. + - If option is omitted users will not be checked or changed. + type: list + elements: str + usercategory: + description: User category + choices: ['all'] + type: str + usergroup: + description: + - List of user group names to assign. + - If an empty list if passed all assigned user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. + type: list +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure rule to allow all users to access any host from any host + ipa_hbacrule: + name: allow_all + description: Allow all users to access any host from any host + hostcategory: all + servicecategory: all + usercategory: all + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure rule with certain limitations + ipa_hbacrule: + name: allow_all_developers_access_to_db + description: Allow all developers to access any database from any host + hostgroup: + - db-server + usergroup: + - developers + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure rule is absent + ipa_hbacrule: + name: rule_to_be_deleted + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +hbacrule: + description: HBAC rule as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class HBACRuleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HBACRuleIPAClient, self).__init__(module, host, port, protocol) + + def hbacrule_find(self, name): + return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name}) + + def hbacrule_add(self, name, item): + return self._post_json(method='hbacrule_add', name=name, item=item) + + def hbacrule_mod(self, name, item): + return self._post_json(method='hbacrule_mod', name=name, item=item) + + def hbacrule_del(self, name): + return self._post_json(method='hbacrule_del', name=name) + + def hbacrule_add_host(self, name, item): + return self._post_json(method='hbacrule_add_host', name=name, item=item) + + def hbacrule_remove_host(self, name, item): + return self._post_json(method='hbacrule_remove_host', name=name, item=item) + + def hbacrule_add_service(self, name, item): + return self._post_json(method='hbacrule_add_service', name=name, item=item) + + def hbacrule_remove_service(self, name, item): + return self._post_json(method='hbacrule_remove_service', name=name, item=item) + + def hbacrule_add_user(self, name, item): + return self._post_json(method='hbacrule_add_user', name=name, item=item) + + def hbacrule_remove_user(self, name, item): + return self._post_json(method='hbacrule_remove_user', name=name, item=item) + + def hbacrule_add_sourcehost(self, name, item): + return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item) + + def hbacrule_remove_sourcehost(self, name, item): + return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item) + + +def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None, + sourcehostcategory=None, + usercategory=None): + data = {} + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if servicecategory is not None: + data['servicecategory'] = servicecategory + if sourcehostcategory is not None: + data['sourcehostcategory'] = sourcehostcategory + if usercategory is not None: + data['usercategory'] = usercategory + return data + + +def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule): + return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + + if state in ['present', 'enabled']: + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = 'FALSE' + + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + service = module.params['service'] + servicecategory = module.params['servicecategory'] + servicegroup = module.params['servicegroup'] + sourcehost = module.params['sourcehost'] + sourcehostcategory = module.params['sourcehostcategory'] + sourcehostgroup = module.params['sourcehostgroup'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_hbacrule = get_hbacrule_dict(description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + servicecategory=servicecategory, + sourcehostcategory=sourcehostcategory, + usercategory=usercategory) + ipa_hbacrule = client.hbacrule_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_hbacrule: + changed = True + if not module.check_mode: + ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule) + else: + diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hbacrule.get(key) + client.hbacrule_mod(name=name, item=data) + + if host is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'host') or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'hostgroup') or changed + + if service is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvc') or changed + + if servicegroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []), + servicegroup, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvcgroup') or changed + + if sourcehost is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'host') or changed + + if sourcehostgroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'hostgroup') or changed + + if user is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'user') or changed + + if usergroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'group') or changed + else: + if ipa_hbacrule: + changed = True + if not module.check_mode: + client.hbacrule_del(name=name) + + return changed, client.hbacrule_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostcategory=dict(type='str', choices=['all']), + hostgroup=dict(type='list', elements='str'), + service=dict(type='list', elements='str'), + servicecategory=dict(type='str', choices=['all']), + servicegroup=dict(type='list', elements='str'), + sourcehost=dict(type='list', elements='str'), + sourcehostcategory=dict(type='str', choices=['all']), + sourcehostgroup=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', elements='str'), + usercategory=dict(type='str', choices=['all']), + usergroup=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True + ) + + client = HBACRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hbacrule = ensure(module, client) + module.exit_json(changed=changed, hbacrule=hbacrule) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_host.py b/plugins/modules/identity/ipa/ipa_host.py new file mode 100644 index 0000000000..d95c2e6043 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_host.py @@ -0,0 +1,312 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_host +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host +description: +- Add, modify and delete an IPA host using IPA API. +options: + fqdn: + description: + - Full qualified domain name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - A description of this host. + type: str + force: + description: + - Force host name even if not in DNS. + required: false + type: bool + ip_address: + description: + - Add the host to DNS with this IP address. + type: str + mac_address: + description: + - List of Hardware MAC address(es) off this host. + - If option is omitted MAC addresses will not be checked or changed. + - If an empty list is passed all assigned MAC addresses will be removed. + - MAC addresses that are already assigned but not passed will be removed. + aliases: ["macaddress"] + type: list + elements: str + ns_host_location: + description: + - Host location (e.g. "Lab 2") + aliases: ["nshostlocation"] + type: str + ns_hardware_platform: + description: + - Host hardware platform (e.g. "Lenovo T61") + aliases: ["nshardwareplatform"] + type: str + ns_os_version: + description: + - Host operating system and version (e.g. "Fedora 9") + aliases: ["nsosversion"] + type: str + user_certificate: + description: + - List of Base-64 encoded server certificates. + - If option is omitted certificates will not be checked or changed. + - If an empty list is passed all assigned certificates will be removed. + - Certificates already assigned but not passed will be removed. + aliases: ["usercertificate"] + type: list + elements: str + state: + description: State to ensure. + default: present + choices: ["absent", "disabled", "enabled", "present"] + type: str + update_dns: + description: + - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS. + - This option has no effect for states other than "absent". + default: false + type: bool + random_password: + description: Generate a random password to be used in bulk enrollment. + default: False + type: bool +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure host is present + ipa_host: + name: host01.example.com + description: Example host + ip_address: 192.168.0.123 + ns_host_location: Lab + ns_os_version: CentOS 7 + ns_hardware_platform: Lenovo T61 + mac_address: + - "08:00:27:E3:B1:2D" + - "52:54:00:BD:97:1E" + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Generate a random password for bulk enrolment + ipa_host: + name: host01.example.com + description: Example host + ip_address: 192.168.0.123 + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + validate_certs: False + random_password: True + +- name: Ensure host is disabled + ipa_host: + name: host01.example.com + state: disabled + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that all user certificates are removed + ipa_host: + name: host01.example.com + user_certificate: [] + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host is absent + ipa_host: + name: host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host and its DNS record is absent + ipa_host: + name: host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + update_dns: True +''' + +RETURN = r''' +host: + description: Host as returned by IPA API. + returned: always + type: dict +host_diff: + description: List of options that differ and would be changed + returned: if check mode and a difference is found + type: list +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class HostIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HostIPAClient, self).__init__(module, host, port, protocol) + + def host_show(self, name): + return self._post_json(method='host_show', name=name) + + def host_find(self, name): + return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name}) + + def host_add(self, name, host): + return self._post_json(method='host_add', name=name, item=host) + + def host_mod(self, name, host): + return self._post_json(method='host_mod', name=name, item=host) + + def host_del(self, name, update_dns): + return self._post_json(method='host_del', name=name, item={'updatedns': update_dns}) + + def host_disable(self, name): + return self._post_json(method='host_disable', name=name) + + +def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None, + ns_os_version=None, user_certificate=None, mac_address=None, random_password=None): + data = {} + if description is not None: + data['description'] = description + if force is not None: + data['force'] = force + if ip_address is not None: + data['ip_address'] = ip_address + if ns_host_location is not None: + data['nshostlocation'] = ns_host_location + if ns_hardware_platform is not None: + data['nshardwareplatform'] = ns_hardware_platform + if ns_os_version is not None: + data['nsosversion'] = ns_os_version + if user_certificate is not None: + data['usercertificate'] = [{"__base64__": item} for item in user_certificate] + if mac_address is not None: + data['macaddress'] = mac_address + if random_password is not None: + data['random'] = random_password + return data + + +def get_host_diff(client, ipa_host, module_host): + non_updateable_keys = ['force', 'ip_address'] + if not module_host.get('random'): + non_updateable_keys.append('random') + for key in non_updateable_keys: + if key in module_host: + del module_host[key] + + return client.get_diff(ipa_data=ipa_host, module_data=module_host) + + +def ensure(module, client): + name = module.params['fqdn'] + state = module.params['state'] + + ipa_host = client.host_find(name=name) + module_host = get_host_dict(description=module.params['description'], + force=module.params['force'], ip_address=module.params['ip_address'], + ns_host_location=module.params['ns_host_location'], + ns_hardware_platform=module.params['ns_hardware_platform'], + ns_os_version=module.params['ns_os_version'], + user_certificate=module.params['user_certificate'], + mac_address=module.params['mac_address'], + random_password=module.params.get('random_password'), + ) + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_host: + changed = True + if not module.check_mode: + # OTP password generated by FreeIPA is visible only for host_add command + # so, return directly from here. + return changed, client.host_add(name=name, host=module_host) + else: + diff = get_host_diff(client, ipa_host, module_host) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_host.get(key) + ipa_host_show = client.host_show(name=name) + if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'): + client.host_disable(name=name) + return changed, client.host_mod(name=name, host=data) + + else: + if ipa_host: + changed = True + update_dns = module.params.get('update_dns', False) + if not module.check_mode: + client.host_del(name=name, update_dns=update_dns) + + return changed, client.host_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(description=dict(type='str'), + fqdn=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool'), + ip_address=dict(type='str'), + ns_host_location=dict(type='str', aliases=['nshostlocation']), + ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), + ns_os_version=dict(type='str', aliases=['nsosversion']), + user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), + mac_address=dict(type='list', aliases=['macaddress'], elements='str'), + update_dns=dict(type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + random_password=dict(type='bool'),) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = HostIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, host = ensure(module, client) + module.exit_json(changed=changed, host=host) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_hostgroup.py b/plugins/modules/identity/ipa/ipa_hostgroup.py new file mode 100644 index 0000000000..0ba9e719cb --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_hostgroup.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_hostgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host-group +description: +- Add, modify and delete an IPA host-group using IPA API. +options: + cn: + description: + - Name of host-group. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + host: + description: + - List of hosts that belong to the host-group. + - If an empty list is passed all hosts will be removed from the group. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the group. + type: list + elements: str + hostgroup: + description: + - List of host-groups than belong to that host-group. + - If an empty list is passed all host-groups will be removed from the group. + - If option is omitted host-groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. + type: list + elements: str + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "disabled", "enabled", "present"] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure host-group databases is present + ipa_hostgroup: + name: databases + state: present + host: + - db.example.com + hostgroup: + - mysql-server + - oracle-server + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host-group databases is absent + ipa_hostgroup: + name: databases + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +hostgroup: + description: Hostgroup as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class HostGroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HostGroupIPAClient, self).__init__(module, host, port, protocol) + + def hostgroup_find(self, name): + return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name}) + + def hostgroup_add(self, name, item): + return self._post_json(method='hostgroup_add', name=name, item=item) + + def hostgroup_mod(self, name, item): + return self._post_json(method='hostgroup_mod', name=name, item=item) + + def hostgroup_del(self, name): + return self._post_json(method='hostgroup_del', name=name) + + def hostgroup_add_member(self, name, item): + return self._post_json(method='hostgroup_add_member', name=name, item=item) + + def hostgroup_add_host(self, name, item): + return self.hostgroup_add_member(name=name, item={'host': item}) + + def hostgroup_add_hostgroup(self, name, item): + return self.hostgroup_add_member(name=name, item={'hostgroup': item}) + + def hostgroup_remove_member(self, name, item): + return self._post_json(method='hostgroup_remove_member', name=name, item=item) + + def hostgroup_remove_host(self, name, item): + return self.hostgroup_remove_member(name=name, item={'host': item}) + + def hostgroup_remove_hostgroup(self, name, item): + return self.hostgroup_remove_member(name=name, item={'hostgroup': item}) + + +def get_hostgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup): + return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + + ipa_hostgroup = client.hostgroup_find(name=name) + module_hostgroup = get_hostgroup_dict(description=module.params['description']) + + changed = False + if state == 'present': + if not ipa_hostgroup: + changed = True + if not module.check_mode: + ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup) + else: + diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hostgroup.get(key) + client.hostgroup_mod(name=name, item=data) + + if host is not None: + changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host], + client.hostgroup_add_host, client.hostgroup_remove_host) or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []), + [item.lower() for item in hostgroup], + client.hostgroup_add_hostgroup, + client.hostgroup_remove_hostgroup) or changed + + else: + if ipa_hostgroup: + changed = True + if not module.check_mode: + client.hostgroup_del(name=name) + + return changed, client.hostgroup_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostgroup=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = HostGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hostgroup = ensure(module, client) + module.exit_json(changed=changed, hostgroup=hostgroup) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_role.py b/plugins/modules/identity/ipa/ipa_role.py new file mode 100644 index 0000000000..0560706d03 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_role.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_role +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA role +description: +- Add, modify and delete a role within FreeIPA server using FreeIPA API. +options: + cn: + description: + - Role name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + description: + description: + - A description of this role-group. + type: str + group: + description: + - List of group names assign to this role. + - If an empty list is passed all assigned groups will be unassigned from the role. + - If option is omitted groups will not be checked or changed. + - If option is passed all assigned groups that are not passed will be unassigned from the role. + type: list + elements: str + host: + description: + - List of host names to assign. + - If an empty list is passed all assigned hosts will be unassigned from the role. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the role. + type: list + elements: str + hostgroup: + description: + - List of host group names to assign. + - If an empty list is passed all assigned host groups will be removed from the role. + - If option is omitted host groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. + type: list + elements: str + privilege: + description: + - List of privileges granted to the role. + - If an empty list is passed all assigned privileges will be removed. + - If option is omitted privileges will not be checked or changed. + - If option is passed all assigned privileges that are not passed will be removed. + type: list + elements: str + service: + description: + - List of service names to assign. + - If an empty list is passed all assigned services will be removed from the role. + - If option is omitted services will not be checked or changed. + - If option is passed all assigned services that are not passed will be removed from the role. + type: list + elements: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + user: + description: + - List of user names to assign. + - If an empty list is passed all assigned users will be removed from the role. + - If option is omitted users will not be checked or changed. + type: list + elements: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure role is present + ipa_role: + name: dba + description: Database Administrators + state: present + user: + - pinky + - brain + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure role with certain details + ipa_role: + name: another-role + description: Just another role + group: + - editors + host: + - host01.example.com + hostgroup: + - hostgroup01 + privilege: + - Group Administrators + - User Administrators + service: + - service01 + +- name: Ensure role is absent + ipa_role: + name: dba + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +role: + description: Role as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class RoleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(RoleIPAClient, self).__init__(module, host, port, protocol) + + def role_find(self, name): + return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name}) + + def role_add(self, name, item): + return self._post_json(method='role_add', name=name, item=item) + + def role_mod(self, name, item): + return self._post_json(method='role_mod', name=name, item=item) + + def role_del(self, name): + return self._post_json(method='role_del', name=name) + + def role_add_member(self, name, item): + return self._post_json(method='role_add_member', name=name, item=item) + + def role_add_group(self, name, item): + return self.role_add_member(name=name, item={'group': item}) + + def role_add_host(self, name, item): + return self.role_add_member(name=name, item={'host': item}) + + def role_add_hostgroup(self, name, item): + return self.role_add_member(name=name, item={'hostgroup': item}) + + def role_add_service(self, name, item): + return self.role_add_member(name=name, item={'service': item}) + + def role_add_user(self, name, item): + return self.role_add_member(name=name, item={'user': item}) + + def role_remove_member(self, name, item): + return self._post_json(method='role_remove_member', name=name, item=item) + + def role_remove_group(self, name, item): + return self.role_remove_member(name=name, item={'group': item}) + + def role_remove_host(self, name, item): + return self.role_remove_member(name=name, item={'host': item}) + + def role_remove_hostgroup(self, name, item): + return self.role_remove_member(name=name, item={'hostgroup': item}) + + def role_remove_service(self, name, item): + return self.role_remove_member(name=name, item={'service': item}) + + def role_remove_user(self, name, item): + return self.role_remove_member(name=name, item={'user': item}) + + def role_add_privilege(self, name, item): + return self._post_json(method='role_add_privilege', name=name, item={'privilege': item}) + + def role_remove_privilege(self, name, item): + return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item}) + + +def get_role_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_role_diff(client, ipa_role, module_role): + return client.get_diff(ipa_data=ipa_role, module_data=module_role) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + group = module.params['group'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + privilege = module.params['privilege'] + service = module.params['service'] + user = module.params['user'] + + module_role = get_role_dict(description=module.params['description']) + ipa_role = client.role_find(name=name) + + changed = False + if state == 'present': + if not ipa_role: + changed = True + if not module.check_mode: + ipa_role = client.role_add(name=name, item=module_role) + else: + diff = get_role_diff(client, ipa_role, module_role) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_role.get(key) + client.role_mod(name=name, item=data) + + if group is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group, + client.role_add_group, + client.role_remove_group) or changed + if host is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host, + client.role_add_host, + client.role_remove_host) or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup, + client.role_add_hostgroup, + client.role_remove_hostgroup) or changed + + if privilege is not None: + changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege, + client.role_add_privilege, + client.role_remove_privilege) or changed + if service is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service, + client.role_add_service, + client.role_remove_service) or changed + if user is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user, + client.role_add_user, + client.role_remove_user) or changed + + else: + if ipa_role: + changed = True + if not module.check_mode: + client.role_del(name) + + return changed, client.role_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + group=dict(type='list', elements='str'), + host=dict(type='list', elements='str'), + hostgroup=dict(type='list', elements='str'), + privilege=dict(type='list', elements='str'), + service=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + user=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = RoleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, role = ensure(module, client) + module.exit_json(changed=changed, role=role) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_service.py b/plugins/modules/identity/ipa/ipa_service.py new file mode 100644 index 0000000000..444ee4055e --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_service.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_service +author: Cédric Parent (@cprh) +short_description: Manage FreeIPA service +description: +- Add and delete an IPA service using IPA API. +options: + krbcanonicalname: + description: + - Principal of the service. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + hosts: + description: + - Defines the list of 'ManagedBy' hosts. + required: false + type: list + elements: str + force: + description: + - Force principal name even if host is not in DNS. + required: false + type: bool + state: + description: State to ensure. + required: false + default: present + choices: ["absent", "present"] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure service is present + ipa_service: + name: http/host01.example.com + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure service is absent + ipa_service: + name: http/host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Changing Managing hosts list + ipa_service: + name: http/host01.example.com + host: + - host01.example.com + - host02.example.com + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +service: + description: Service as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class ServiceIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(ServiceIPAClient, self).__init__(module, host, port, protocol) + + def service_find(self, name): + return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name}) + + def service_add(self, name, service): + return self._post_json(method='service_add', name=name, item=service) + + def service_mod(self, name, service): + return self._post_json(method='service_mod', name=name, item=service) + + def service_del(self, name): + return self._post_json(method='service_del', name=name) + + def service_disable(self, name): + return self._post_json(method='service_disable', name=name) + + def service_add_host(self, name, item): + return self._post_json(method='service_add_host', name=name, item={'host': item}) + + def service_remove_host(self, name, item): + return self._post_json(method='service_remove_host', name=name, item={'host': item}) + + +def get_service_dict(force=None, krbcanonicalname=None): + data = {} + if force is not None: + data['force'] = force + if krbcanonicalname is not None: + data['krbcanonicalname'] = krbcanonicalname + return data + + +def get_service_diff(client, ipa_host, module_service): + non_updateable_keys = ['force', 'krbcanonicalname'] + for key in non_updateable_keys: + if key in module_service: + del module_service[key] + + return client.get_diff(ipa_data=ipa_host, module_data=module_service) + + +def ensure(module, client): + name = module.params['krbcanonicalname'] + state = module.params['state'] + hosts = module.params['hosts'] + + ipa_service = client.service_find(name=name) + module_service = get_service_dict(force=module.params['force']) + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_service: + changed = True + if not module.check_mode: + client.service_add(name=name, service=module_service) + else: + diff = get_service_diff(client, ipa_service, module_service) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_service.get(key) + client.service_mod(name=name, service=data) + if hosts is not None: + if 'managedby_host' in ipa_service: + for host in ipa_service['managedby_host']: + if host not in hosts: + if not module.check_mode: + client.service_remove_host(name=name, item=host) + changed = True + for host in hosts: + if host not in ipa_service['managedby_host']: + if not module.check_mode: + client.service_add_host(name=name, item=host) + changed = True + else: + for host in hosts: + if not module.check_mode: + client.service_add_host(name=name, item=host) + changed = True + + else: + if ipa_service: + changed = True + if not module.check_mode: + client.service_del(name=name) + + return changed, client.service_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + krbcanonicalname=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool', required=False), + hosts=dict(type='list', required=False, elements='str'), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = ServiceIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, host = ensure(module, client) + module.exit_json(changed=changed, host=host) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_subca.py b/plugins/modules/identity/ipa/ipa_subca.py new file mode 100644 index 0000000000..d5b97b327b --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_subca.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_subca +author: Abhijeet Kasurde (@Akasurde) +short_description: Manage FreeIPA Lightweight Sub Certificate Authorities. +description: +- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. +options: + subca_name: + description: + - The Sub Certificate Authority name which needs to be managed. + required: true + aliases: ["name"] + type: str + subca_subject: + description: + - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'. + required: true + type: str + subca_desc: + description: + - The Sub Certificate Authority's description. + type: str + state: + description: + - State to ensure. + - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards. + required: false + default: present + choices: ["absent", "disabled", "enabled", "present"] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = ''' +- name: Ensure IPA Sub CA is present + ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + subca_name: AnsibleSubCA1 + subca_subject: 'CN=AnsibleSubCA1,O=example.com' + subca_desc: Ansible Sub CA + +- name: Ensure that IPA Sub CA is removed + ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: absent + subca_name: AnsibleSubCA1 + +- name: Ensure that IPA Sub CA is disabled + ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: disable + subca_name: AnsibleSubCA1 +''' + +RETURN = r''' +subca: + description: IPA Sub CA record as returned by IPA API. + returned: always + type: dict +''' + +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class SubCAIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SubCAIPAClient, self).__init__(module, host, port, protocol) + + def subca_find(self, subca_name): + return self._post_json(method='ca_find', name=subca_name, item=None) + + def subca_add(self, subca_name=None, subject_dn=None, details=None): + item = dict(ipacasubjectdn=subject_dn) + subca_desc = details.get('description', None) + if subca_desc is not None: + item.update(description=subca_desc) + return self._post_json(method='ca_add', name=subca_name, item=item) + + def subca_mod(self, subca_name=None, diff=None, details=None): + item = get_subca_dict(details) + for change in diff: + update_detail = dict() + if item[change] is not None: + update_detail.update(setattr="{0}={1}".format(change, item[change])) + self._post_json(method='ca_mod', name=subca_name, item=update_detail) + + def subca_del(self, subca_name=None): + return self._post_json(method='ca_del', name=subca_name) + + def subca_disable(self, subca_name=None): + return self._post_json(method='ca_disable', name=subca_name) + + def subca_enable(self, subca_name=None): + return self._post_json(method='ca_enable', name=subca_name) + + +def get_subca_dict(details=None): + module_subca = dict() + if details['description'] is not None: + module_subca['description'] = details['description'] + if details['subca_subject'] is not None: + module_subca['ipacasubjectdn'] = details['subca_subject'] + return module_subca + + +def get_subca_diff(client, ipa_subca, module_subca): + details = get_subca_dict(module_subca) + return client.get_diff(ipa_data=ipa_subca, module_data=details) + + +def ensure(module, client): + subca_name = module.params['subca_name'] + subca_subject_dn = module.params['subca_subject'] + subca_desc = module.params['subca_desc'] + + state = module.params['state'] + + ipa_subca = client.subca_find(subca_name) + module_subca = dict(description=subca_desc, + subca_subject=subca_subject_dn) + + changed = False + if state == 'present': + if not ipa_subca: + changed = True + if not module.check_mode: + client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca) + else: + diff = get_subca_diff(client, ipa_subca, module_subca) + # IPA does not allow to modify Sub CA's subject DN + # So skip it for now. + if 'ipacasubjectdn' in diff: + diff.remove('ipacasubjectdn') + del module_subca['subca_subject'] + + if len(diff) > 0: + changed = True + if not module.check_mode: + client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca) + elif state == 'absent': + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_del(subca_name=subca_name) + elif state == 'disable': + ipa_version = client.get_ipa_version() + if LooseVersion(ipa_version) < LooseVersion('4.4.2'): + module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to " + "version greater than 4.4.2") + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_disable(subca_name=subca_name) + elif state == 'enable': + ipa_version = client.get_ipa_version() + if LooseVersion(ipa_version) < LooseVersion('4.4.2'): + module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to " + "version greater than 4.4.2") + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_enable(subca_name=subca_name) + + return changed, client.subca_find(subca_name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']), + subca_subject=dict(type='str', required=True), + subca_desc=dict(type='str'), + state=dict(type='str', default='present', + choices=['present', 'absent', 'enabled', 'disabled']),) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True,) + + client = SubCAIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, record = ensure(module, client) + module.exit_json(changed=changed, record=record) + except Exception as exc: + module.fail_json(msg=to_native(exc)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_sudocmd.py b/plugins/modules/identity/ipa/ipa_sudocmd.py new file mode 100644 index 0000000000..c0bc17cdb7 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_sudocmd.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_sudocmd +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command +description: +- Add, modify or delete sudo command within FreeIPA server using FreeIPA API. +options: + sudocmd: + description: + - Sudo command. + aliases: ['name'] + required: true + type: str + description: + description: + - A description of this command. + type: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure sudo command exists + ipa_sudocmd: + name: su + description: Allow to run su via sudo + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure sudo command does not exist + ipa_sudocmd: + name: su + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +sudocmd: + description: Sudo command as return from IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class SudoCmdIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoCmdIPAClient, self).__init__(module, host, port, protocol) + + def sudocmd_find(self, name): + return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name}) + + def sudocmd_add(self, name, item): + return self._post_json(method='sudocmd_add', name=name, item=item) + + def sudocmd_mod(self, name, item): + return self._post_json(method='sudocmd_mod', name=name, item=item) + + def sudocmd_del(self, name): + return self._post_json(method='sudocmd_del', name=name) + + +def get_sudocmd_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd): + return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd) + + +def ensure(module, client): + name = module.params['sudocmd'] + state = module.params['state'] + + module_sudocmd = get_sudocmd_dict(description=module.params['description']) + ipa_sudocmd = client.sudocmd_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_add(name=name, item=module_sudocmd) + else: + diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmd.get(key) + client.sudocmd_mod(name=name, item=data) + else: + if ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_del(name=name) + + return changed, client.sudocmd_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(description=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='str', required=True, aliases=['name'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = SudoCmdIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmd = ensure(module, client) + module.exit_json(changed=changed, sudocmd=sudocmd) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/plugins/modules/identity/ipa/ipa_sudocmdgroup.py new file mode 100644 index 0000000000..38ed255cf9 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_sudocmdgroup.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_sudocmdgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command group +description: +- Add, modify or delete sudo command group within IPA server using IPA API. +options: + cn: + description: + - Sudo Command Group. + aliases: ['name'] + required: true + type: str + description: + description: + - Group description. + type: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str + sudocmd: + description: + - List of sudo commands to assign to the group. + - If an empty list is passed all assigned commands will be removed from the group. + - If option is omitted sudo commands will not be checked or changed. + type: list + elements: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure sudo command group exists + ipa_sudocmdgroup: + name: group01 + description: Group of important commands + sudocmd: + - su + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure sudo command group does not exist + ipa_sudocmdgroup: + name: group01 + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +sudocmdgroup: + description: Sudo command group as returned by IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class SudoCmdGroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol) + + def sudocmdgroup_find(self, name): + return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) + + def sudocmdgroup_add(self, name, item): + return self._post_json(method='sudocmdgroup_add', name=name, item=item) + + def sudocmdgroup_mod(self, name, item): + return self._post_json(method='sudocmdgroup_mod', name=name, item=item) + + def sudocmdgroup_del(self, name): + return self._post_json(method='sudocmdgroup_del', name=name) + + def sudocmdgroup_add_member(self, name, item): + return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) + + def sudocmdgroup_add_member_sudocmd(self, name, item): + return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) + + def sudocmdgroup_remove_member(self, name, item): + return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) + + def sudocmdgroup_remove_member_sudocmd(self, name, item): + return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) + + +def get_sudocmdgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup): + return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + sudocmd = module.params['sudocmd'] + + module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) + ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmdgroup: + changed = True + if not module.check_mode: + ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup) + else: + diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmdgroup.get(key) + client.sudocmdgroup_mod(name=name, item=data) + + if sudocmd is not None: + changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, + client.sudocmdgroup_add_member_sudocmd, + client.sudocmdgroup_remove_member_sudocmd) + else: + if ipa_sudocmdgroup: + changed = True + if not module.check_mode: + client.sudocmdgroup_del(name=name) + + return changed, client.sudocmdgroup_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = SudoCmdGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmdgroup = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudocmdgroup) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/identity/ipa/ipa_sudorule.py new file mode 100644 index 0000000000..a93f041b4b --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_sudorule.py @@ -0,0 +1,405 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_sudorule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo rule +description: +- Add, modify or delete sudo rule within IPA server using IPA API. +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + cmdcategory: + description: + - Command category the rule applies to. + choices: ['all'] + type: str + cmd: + description: + - List of commands assigned to the rule. + - If an empty list is passed all commands will be removed from the rule. + - If option is omitted commands will not be checked or changed. + type: list + elements: str + description: + description: + - Description of the sudo rule. + type: str + host: + description: + - List of hosts assigned to the rule. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. + - Option C(hostcategory) must be omitted to assign hosts. + type: list + elements: str + hostcategory: + description: + - Host category the rule applies to. + - If 'all' is passed one must omit C(host) and C(hostgroup). + - Option C(host) and C(hostgroup) must be omitted to assign 'all'. + choices: ['all'] + type: str + hostgroup: + description: + - List of host groups assigned to the rule. + - If an empty list is passed all host groups will be removed from the rule. + - If option is omitted host groups will not be checked or changed. + - Option C(hostcategory) must be omitted to assign host groups. + type: list + elements: str + runasusercategory: + description: + - RunAs User category the rule applies to. + choices: ['all'] + type: str + runasgroupcategory: + description: + - RunAs Group category the rule applies to. + choices: ['all'] + type: str + sudoopt: + description: + - List of options to add to the sudo rule. + type: list + elements: str + user: + description: + - List of users assigned to the rule. + - If an empty list is passed all users will be removed from the rule. + - If option is omitted users will not be checked or changed. + type: list + elements: str + usercategory: + description: + - User category the rule applies to. + choices: ['all'] + type: str + usergroup: + description: + - List of user groups assigned to the rule. + - If an empty list is passed all user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. + type: list + elements: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password. + ipa_sudorule: + name: sudo_all_nopasswd + cmdcategory: all + description: Allow to run every command with sudo without password + hostcategory: all + sudoopt: + - '!authenticate' + usercategory: all + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com. + ipa_sudorule: + name: sudo_dev_dbserver + description: Allow developers to run every command with sudo on all database server + cmdcategory: all + host: + - db01.example.com + hostgroup: + - db-server + sudoopt: + - '!authenticate' + usergroup: + - developers + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +sudorule: + description: Sudorule as returned by IPA + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class SudoRuleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoRuleIPAClient, self).__init__(module, host, port, protocol) + + def sudorule_find(self, name): + return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) + + def sudorule_add(self, name, item): + return self._post_json(method='sudorule_add', name=name, item=item) + + def sudorule_mod(self, name, item): + return self._post_json(method='sudorule_mod', name=name, item=item) + + def sudorule_del(self, name): + return self._post_json(method='sudorule_del', name=name) + + def sudorule_add_option(self, name, item): + return self._post_json(method='sudorule_add_option', name=name, item=item) + + def sudorule_add_option_ipasudoopt(self, name, item): + return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) + + def sudorule_remove_option(self, name, item): + return self._post_json(method='sudorule_remove_option', name=name, item=item) + + def sudorule_remove_option_ipasudoopt(self, name, item): + return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) + + def sudorule_add_host(self, name, item): + return self._post_json(method='sudorule_add_host', name=name, item=item) + + def sudorule_add_host_host(self, name, item): + return self.sudorule_add_host(name=name, item={'host': item}) + + def sudorule_add_host_hostgroup(self, name, item): + return self.sudorule_add_host(name=name, item={'hostgroup': item}) + + def sudorule_remove_host(self, name, item): + return self._post_json(method='sudorule_remove_host', name=name, item=item) + + def sudorule_remove_host_host(self, name, item): + return self.sudorule_remove_host(name=name, item={'host': item}) + + def sudorule_remove_host_hostgroup(self, name, item): + return self.sudorule_remove_host(name=name, item={'hostgroup': item}) + + def sudorule_add_allow_command(self, name, item): + return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) + + def sudorule_remove_allow_command(self, name, item): + return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) + + def sudorule_add_user(self, name, item): + return self._post_json(method='sudorule_add_user', name=name, item=item) + + def sudorule_add_user_user(self, name, item): + return self.sudorule_add_user(name=name, item={'user': item}) + + def sudorule_add_user_group(self, name, item): + return self.sudorule_add_user(name=name, item={'group': item}) + + def sudorule_remove_user(self, name, item): + return self._post_json(method='sudorule_remove_user', name=name, item=item) + + def sudorule_remove_user_user(self, name, item): + return self.sudorule_remove_user(name=name, item={'user': item}) + + def sudorule_remove_user_group(self, name, item): + return self.sudorule_remove_user(name=name, item={'group': item}) + + +def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None, + runasgroupcategory=None, runasusercategory=None): + data = {} + if cmdcategory is not None: + data['cmdcategory'] = cmdcategory + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if usercategory is not None: + data['usercategory'] = usercategory + if runasusercategory is not None: + data['ipasudorunasusercategory'] = runasusercategory + if runasgroupcategory is not None: + data['ipasudorunasgroupcategory'] = runasgroupcategory + return data + + +def category_changed(module, client, category_name, ipa_sudorule): + if ipa_sudorule.get(category_name, None) == ['all']: + if not module.check_mode: + # cn is returned as list even with only a single value. + client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) + return True + return False + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + cmd = module.params['cmd'] + cmdcategory = module.params['cmdcategory'] + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + runasusercategory = module.params['runasusercategory'] + runasgroupcategory = module.params['runasgroupcategory'] + + if state in ['present', 'enabled']: + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = 'FALSE' + + sudoopt = module.params['sudoopt'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, + description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + usercategory=usercategory, + runasusercategory=runasusercategory, + runasgroupcategory=runasgroupcategory) + ipa_sudorule = client.sudorule_find(name=name) + + changed = False + if state in ['present', 'disabled', 'enabled']: + if not ipa_sudorule: + changed = True + if not module.check_mode: + ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule) + else: + diff = client.get_diff(ipa_sudorule, module_sudorule) + if len(diff) > 0: + changed = True + if not module.check_mode: + if 'hostcategory' in diff: + if ipa_sudorule.get('memberhost_host', None) is not None: + client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) + if ipa_sudorule.get('memberhost_hostgroup', None) is not None: + client.sudorule_remove_host_hostgroup(name=name, + item=ipa_sudorule.get('memberhost_hostgroup')) + + client.sudorule_mod(name=name, item=module_sudorule) + + if cmd is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_allow_command(name=name, item=cmd) + + if runasusercategory is not None: + changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed + + if runasgroupcategory is not None: + changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed + + if host is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host, + client.sudorule_add_host_host, + client.sudorule_remove_host_host) or changed + + if hostgroup is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, + client.sudorule_add_host_hostgroup, + client.sudorule_remove_host_hostgroup) or changed + if sudoopt is not None: + # client.modify_if_diff does not work as each option must be removed/added by its own + ipa_list = ipa_sudorule.get('ipasudoopt', []) + module_list = sudoopt + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_remove_option_ipasudoopt(name, item) + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_add_option_ipasudoopt(name, item) + + if user is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, + client.sudorule_add_user_user, + client.sudorule_remove_user_user) or changed + if usergroup is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup, + client.sudorule_add_user_group, + client.sudorule_remove_user_group) or changed + else: + if ipa_sudorule: + changed = True + if not module.check_mode: + client.sudorule_del(name) + + return changed, client.sudorule_find(name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cmd=dict(type='list', elements='str'), + cmdcategory=dict(type='str', choices=['all']), + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostcategory=dict(type='str', choices=['all']), + hostgroup=dict(type='list', elements='str'), + runasusercategory=dict(type='str', choices=['all']), + runasgroupcategory=dict(type='str', choices=['all']), + sudoopt=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', elements='str'), + usercategory=dict(type='str', choices=['all']), + usergroup=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['cmdcategory', 'cmd'], + ['hostcategory', 'host'], + ['hostcategory', 'hostgroup'], + ['usercategory', 'user'], + ['usercategory', 'usergroup']], + supports_check_mode=True) + + client = SudoRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudorule = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudorule) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_user.py b/plugins/modules/identity/ipa/ipa_user.py new file mode 100644 index 0000000000..f78f702500 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_user.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipa_user +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA users +description: +- Add, modify and delete user within IPA server. +options: + displayname: + description: Display name. + type: str + update_password: + description: + - Set password for a user. + type: str + default: 'always' + choices: [ always, on_create ] + givenname: + description: First name. + type: str + krbpasswordexpiration: + description: + - Date at which the user password will expire. + - In the format YYYYMMddHHmmss. + - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22. + type: str + loginshell: + description: Login shell. + type: str + mail: + description: + - List of mail addresses assigned to the user. + - If an empty list is passed all assigned email addresses will be deleted. + - If None is passed email addresses will not be checked or changed. + type: list + elements: str + password: + description: + - Password for a user. + - Will not be set for an existing user unless I(update_password=always), which is the default. + type: str + sn: + description: Surname. + type: str + sshpubkey: + description: + - List of public SSH key. + - If an empty list is passed all assigned public keys will be deleted. + - If None is passed SSH public keys will not be checked or changed. + type: list + elements: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "disabled", "enabled", "present"] + type: str + telephonenumber: + description: + - List of telephone numbers assigned to the user. + - If an empty list is passed all assigned telephone numbers will be deleted. + - If None is passed telephone numbers will not be checked or changed. + type: list + elements: str + title: + description: Title. + type: str + uid: + description: uid of the user. + required: true + aliases: ["name"] + type: str + uidnumber: + description: + - Account Settings UID/Posix User ID number. + type: str + gidnumber: + description: + - Posix Group ID. + type: str + homedirectory: + description: + - Default home directory of the user. + type: str +extends_documentation_fragment: +- community.general.ipa.documentation + +requirements: +- base64 +- hashlib +''' + +EXAMPLES = r''' +- name: Ensure pinky is present and always reset password + ipa_user: + name: pinky + state: present + krbpasswordexpiration: 20200119235959 + givenname: Pinky + sn: Acme + mail: + - pinky@acme.com + telephonenumber: + - '+555123456' + sshpubkey: + - ssh-rsa .... + - ssh-dsa .... + uidnumber: 1001 + gidnumber: 100 + homedirectory: /home/pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure brain is absent + ipa_user: + name: brain + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure pinky is present but don't reset password if already exists + ipa_user: + name: pinky + state: present + givenname: Pinky + sn: Acme + password: zounds + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + update_password: on_create +''' + +RETURN = r''' +user: + description: User as returned by IPA API + returned: always + type: dict +''' + +import base64 +import hashlib +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class UserIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(UserIPAClient, self).__init__(module, host, port, protocol) + + def user_find(self, name): + return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) + + def user_add(self, name, item): + return self._post_json(method='user_add', name=name, item=item) + + def user_mod(self, name, item): + return self._post_json(method='user_mod', name=name, item=item) + + def user_del(self, name): + return self._post_json(method='user_del', name=name) + + def user_disable(self, name): + return self._post_json(method='user_disable', name=name) + + def user_enable(self, name): + return self._post_json(method='user_enable', name=name) + + +def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None, + mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None, + title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None): + user = {} + if displayname is not None: + user['displayname'] = displayname + if krbpasswordexpiration is not None: + user['krbpasswordexpiration'] = krbpasswordexpiration + "Z" + if givenname is not None: + user['givenname'] = givenname + if loginshell is not None: + user['loginshell'] = loginshell + if mail is not None: + user['mail'] = mail + user['nsaccountlock'] = nsaccountlock + if sn is not None: + user['sn'] = sn + if sshpubkey is not None: + user['ipasshpubkey'] = sshpubkey + if telephonenumber is not None: + user['telephonenumber'] = telephonenumber + if title is not None: + user['title'] = title + if userpassword is not None: + user['userpassword'] = userpassword + if gidnumber is not None: + user['gidnumber'] = gidnumber + if uidnumber is not None: + user['uidnumber'] = uidnumber + if homedirectory is not None: + user['homedirectory'] = homedirectory + + return user + + +def get_user_diff(client, ipa_user, module_user): + """ + Return the keys of each dict whereas values are different. Unfortunately the IPA + API returns everything as a list even if only a single value is possible. + Therefore some more complexity is needed. + The method will check if the value type of module_user.attr is not a list and + create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method + must not be changed if the returned API dict is changed. + :param ipa_user: + :param module_user: + :return: + """ + # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. + # These are used for comparison. + sshpubkey = None + if 'ipasshpubkey' in module_user: + hash_algo = 'md5' + if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:': + hash_algo = 'sha256' + module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']] + # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on + sshpubkey = module_user['ipasshpubkey'] + del module_user['ipasshpubkey'] + + result = client.get_diff(ipa_data=ipa_user, module_data=module_user) + + # If there are public keys, remove the fingerprints and add them back to the dict + if sshpubkey is not None: + del module_user['sshpubkeyfp'] + module_user['ipasshpubkey'] = sshpubkey + return result + + +def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): + """ + Return the public key fingerprint of a given public SSH key + in format "[fp] [user@host] (ssh-rsa)" where fp is of the format: + FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 + for md5 or + SHA256:[base64] + for sha256 + :param ssh_key: + :param hash_algo: + :return: + """ + parts = ssh_key.strip().split() + if len(parts) == 0: + return None + key_type = parts[0] + key = base64.b64decode(parts[1].encode('ascii')) + + if hash_algo == 'md5': + fp_plain = hashlib.md5(key).hexdigest() + key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() + elif hash_algo == 'sha256': + fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=') + key_fp = 'SHA256:{fp}'.format(fp=fp_plain) + if len(parts) < 3: + return "%s (%s)" % (key_fp, key_type) + else: + user_host = parts[2] + return "%s %s (%s)" % (key_fp, user_host, key_type) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['uid'] + nsaccountlock = state == 'disabled' + + module_user = get_user_dict(displayname=module.params.get('displayname'), + krbpasswordexpiration=module.params.get('krbpasswordexpiration'), + givenname=module.params.get('givenname'), + loginshell=module.params['loginshell'], + mail=module.params['mail'], sn=module.params['sn'], + sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, + telephonenumber=module.params['telephonenumber'], title=module.params['title'], + userpassword=module.params['password'], + gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'), + homedirectory=module.params.get('homedirectory')) + + update_password = module.params.get('update_password') + ipa_user = client.user_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_user: + changed = True + if not module.check_mode: + ipa_user = client.user_add(name=name, item=module_user) + else: + if update_password == 'on_create': + module_user.pop('userpassword', None) + diff = get_user_diff(client, ipa_user, module_user) + if len(diff) > 0: + changed = True + if not module.check_mode: + ipa_user = client.user_mod(name=name, item=module_user) + else: + if ipa_user: + changed = True + if not module.check_mode: + client.user_del(name) + + return changed, ipa_user + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(displayname=dict(type='str'), + givenname=dict(type='str'), + update_password=dict(type='str', default="always", + choices=['always', 'on_create']), + krbpasswordexpiration=dict(type='str'), + loginshell=dict(type='str'), + mail=dict(type='list', elements='str'), + sn=dict(type='str'), + uid=dict(type='str', required=True, aliases=['name']), + gidnumber=dict(type='str'), + uidnumber=dict(type='str'), + password=dict(type='str', no_log=True), + sshpubkey=dict(type='list', elements='str'), + state=dict(type='str', default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + telephonenumber=dict(type='list', elements='str'), + title=dict(type='str'), + homedirectory=dict(type='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = UserIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). + # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey + # as different which should be avoided. + if module.params['sshpubkey'] is not None: + if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "": + module.params['sshpubkey'] = None + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, user = ensure(module, client) + module.exit_json(changed=changed, user=user) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_vault.py b/plugins/modules/identity/ipa/ipa_vault.py new file mode 100644 index 0000000000..c158ec3e2b --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_vault.py @@ -0,0 +1,253 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Juan Manuel Parrilla +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ipa_vault +author: Juan Manuel Parrilla (@jparrill) +short_description: Manage FreeIPA vaults +description: +- Add, modify and delete vaults and secret vaults. +- KRA service should be enabled to use this module. +options: + cn: + description: + - Vault name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + ipavaulttype: + description: + - Vault types are based on security level. + default: "symmetric" + choices: ["asymmetric", "standard", "symmetric"] + aliases: ["vault_type"] + type: str + ipavaultpublickey: + description: + - Public key. + aliases: ["vault_public_key"] + type: str + ipavaultsalt: + description: + - Vault Salt. + aliases: ["vault_salt"] + type: str + username: + description: + - Any user can own one or more user vaults. + - Mutually exclusive with service. + aliases: ["user"] + type: list + elements: str + service: + description: + - Any service can own one or more service vaults. + - Mutually exclusive with user. + type: str + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + replace: + description: + - Force replace the existant vault on IPA server. + type: bool + default: False + choices: ["True", "False"] + validate_certs: + description: + - Validate IPA server certificates. + type: bool + default: true +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure vault is present + ipa_vault: + name: vault01 + vault_type: standard + user: user01 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + validate_certs: false + +- name: Ensure vault is present for Admin user + ipa_vault: + name: vault01 + vault_type: standard + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure vault is absent + ipa_vault: + name: vault01 + vault_type: standard + user: user01 + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Modify vault if already exists + ipa_vault: + name: vault01 + vault_type: standard + description: "Vault for test" + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + replace: True + +- name: Get vault info if already exists + ipa_vault: + name: vault01 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +vault: + description: Vault as returned by IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class VaultIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(VaultIPAClient, self).__init__(module, host, port, protocol) + + def vault_find(self, name): + return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name}) + + def vault_add_internal(self, name, item): + return self._post_json(method='vault_add_internal', name=name, item=item) + + def vault_mod_internal(self, name, item): + return self._post_json(method='vault_mod_internal', name=name, item=item) + + def vault_del(self, name): + return self._post_json(method='vault_del', name=name) + + +def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None): + vault = {} + + if description is not None: + vault['description'] = description + if vault_type is not None: + vault['ipavaulttype'] = vault_type + if vault_salt is not None: + vault['ipavaultsalt'] = vault_salt + if vault_public_key is not None: + vault['ipavaultpublickey'] = vault_public_key + if service is not None: + vault['service'] = service + return vault + + +def get_vault_diff(client, ipa_vault, module_vault, module): + return client.get_diff(ipa_data=ipa_vault, module_data=module_vault) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + user = module.params['username'] + replace = module.params['replace'] + + module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'], + vault_salt=module.params['ipavaultsalt'], + vault_public_key=module.params['ipavaultpublickey'], + service=module.params['service']) + ipa_vault = client.vault_find(name=name) + + changed = False + if state == 'present': + if not ipa_vault: + # New vault + changed = True + if not module.check_mode: + ipa_vault = client.vault_add_internal(name, item=module_vault) + else: + # Already exists + if replace: + diff = get_vault_diff(client, ipa_vault, module_vault, module) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_vault.get(key) + client.vault_mod_internal(name=name, item=data) + + else: + if ipa_vault: + changed = True + if not module.check_mode: + client.vault_del(name) + + return changed, client.vault_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + ipavaulttype=dict(type='str', default='symmetric', + choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']), + ipavaultsalt=dict(type='str', aliases=['vault_salt']), + ipavaultpublickey=dict(type='str', aliases=['vault_public_key']), + service=dict(type='str'), + replace=dict(type='bool', default=False, choices=[True, False]), + state=dict(type='str', default='present', choices=['present', 'absent']), + username=dict(type='list', elements='str', aliases=['user'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['username', 'service']]) + + client = VaultIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, vault = ensure(module, client) + module.exit_json(changed=changed, vault=vault) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py new file mode 100644 index 0000000000..17e05a33c7 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_client.py @@ -0,0 +1,853 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: keycloak_client + +short_description: Allows administration of Keycloak clients via Keycloak API + + +description: + - This module allows the administration of Keycloak clients via the Keycloak REST API. It + requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + Aliases are provided so camelCased versions can be used as well. + + - The Keycloak API does not always sanity check inputs e.g. you can set + SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. + If you do not specify a setting, usually a sensible default is chosen. + +options: + state: + description: + - State of the client + - On C(present), the client will be created (or updated if it exists already). + - On C(absent), the client will be removed if it exists + choices: ['present', 'absent'] + default: 'present' + + realm: + description: + - The realm to create the client in. + + client_id: + description: + - Client id of client to be worked on. This is usually an alphanumeric name chosen by + you. Either this or I(id) is required. If you specify both, I(id) takes precedence. + This is 'clientId' in the Keycloak REST API. + aliases: + - clientId + + id: + description: + - Id of client to be worked on. This is usually an UUID. Either this or I(client_id) + is required. If you specify both, this takes precedence. + + name: + description: + - Name of the client (this is not the same as I(client_id)) + + description: + description: + - Description of the client in Keycloak + + root_url: + description: + - Root URL appended to relative URLs for this client + This is 'rootUrl' in the Keycloak REST API. + aliases: + - rootUrl + + admin_url: + description: + - URL to the admin interface of the client + This is 'adminUrl' in the Keycloak REST API. + aliases: + - adminUrl + + base_url: + description: + - Default URL to use when the auth server needs to redirect or link back to the client + This is 'baseUrl' in the Keycloak REST API. + aliases: + - baseUrl + + enabled: + description: + - Is this client enabled or not? + type: bool + + client_authenticator_type: + description: + - How do clients authenticate with the auth server? Either C(client-secret) or + C(client-jwt) can be chosen. When using C(client-secret), the module parameter + I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url), + C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter + to configure its behavior. + This is 'clientAuthenticatorType' in the Keycloak REST API. + choices: ['client-secret', 'client-jwt'] + aliases: + - clientAuthenticatorType + + secret: + description: + - When using I(client_authenticator_type) C(client-secret) (the default), you can + specify a secret here (otherwise one will be generated if it does not exit). If + changing this secret, the module will not register a change currently (but the + changed secret will be saved). + + registration_access_token: + description: + - The registration access token provides access for clients to the client registration + service. + This is 'registrationAccessToken' in the Keycloak REST API. + aliases: + - registrationAccessToken + + default_roles: + description: + - list of default roles for this client. If the client roles referenced do not exist + yet, they will be created. + This is 'defaultRoles' in the Keycloak REST API. + aliases: + - defaultRoles + + redirect_uris: + description: + - Acceptable redirect URIs for this client. + This is 'redirectUris' in the Keycloak REST API. + aliases: + - redirectUris + + web_origins: + description: + - List of allowed CORS origins. + This is 'webOrigins' in the Keycloak REST API. + aliases: + - webOrigins + + not_before: + description: + - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). + This is 'notBefore' in the Keycloak REST API. + aliases: + - notBefore + + bearer_only: + description: + - The access type of this client is bearer-only. + This is 'bearerOnly' in the Keycloak REST API. + aliases: + - bearerOnly + type: bool + + consent_required: + description: + - If enabled, users have to consent to client access. + This is 'consentRequired' in the Keycloak REST API. + aliases: + - consentRequired + type: bool + + standard_flow_enabled: + description: + - Enable standard flow for this client or not (OpenID connect). + This is 'standardFlowEnabled' in the Keycloak REST API. + aliases: + - standardFlowEnabled + type: bool + + implicit_flow_enabled: + description: + - Enable implicit flow for this client or not (OpenID connect). + This is 'implicitFlowEnabled' in the Keycloak REST API. + aliases: + - implicitFlowEnabled + type: bool + + direct_access_grants_enabled: + description: + - Are direct access grants enabled for this client or not (OpenID connect). + This is 'directAccessGrantsEnabled' in the Keycloak REST API. + aliases: + - directAccessGrantsEnabled + type: bool + + service_accounts_enabled: + description: + - Are service accounts enabled for this client or not (OpenID connect). + This is 'serviceAccountsEnabled' in the Keycloak REST API. + aliases: + - serviceAccountsEnabled + type: bool + + authorization_services_enabled: + description: + - Are authorization services enabled for this client or not (OpenID connect). + This is 'authorizationServicesEnabled' in the Keycloak REST API. + aliases: + - authorizationServicesEnabled + type: bool + + public_client: + description: + - Is the access type for this client public or not. + This is 'publicClient' in the Keycloak REST API. + aliases: + - publicClient + type: bool + + frontchannel_logout: + description: + - Is frontchannel logout enabled for this client or not. + This is 'frontchannelLogout' in the Keycloak REST API. + aliases: + - frontchannelLogout + type: bool + + protocol: + description: + - Type of client (either C(openid-connect) or C(saml). + choices: ['openid-connect', 'saml'] + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client or not. + This is 'fullScopeAllowed' in the Keycloak REST API. + aliases: + - fullScopeAllowed + type: bool + + node_re_registration_timeout: + description: + - Cluster node re-registration timeout for this client. + This is 'nodeReRegistrationTimeout' in the Keycloak REST API. + aliases: + - nodeReRegistrationTimeout + + registered_nodes: + description: + - dict of registered cluster nodes (with C(nodename) as the key and last registration + time as the value). + This is 'registeredNodes' in the Keycloak REST API. + aliases: + - registeredNodes + + client_template: + description: + - Client template to use for this client. If it does not exist this field will silently + be dropped. + This is 'clientTemplate' in the Keycloak REST API. + aliases: + - clientTemplate + + use_template_config: + description: + - Whether or not to use configuration from the I(client_template). + This is 'useTemplateConfig' in the Keycloak REST API. + aliases: + - useTemplateConfig + type: bool + + use_template_scope: + description: + - Whether or not to use scope configuration from the I(client_template). + This is 'useTemplateScope' in the Keycloak REST API. + aliases: + - useTemplateScope + type: bool + + use_template_mappers: + description: + - Whether or not to use mapper configuration from the I(client_template). + This is 'useTemplateMappers' in the Keycloak REST API. + aliases: + - useTemplateMappers + type: bool + + surrogate_auth_required: + description: + - Whether or not surrogate auth is required. + This is 'surrogateAuthRequired' in the Keycloak REST API. + aliases: + - surrogateAuthRequired + type: bool + + authorization_settings: + description: + - a data structure defining the authorization settings for this client. For reference, + please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). + This is 'authorizationSettings' in the Keycloak REST API. + aliases: + - authorizationSettings + + protocol_mappers: + description: + - a list of dicts defining protocol mappers for this client. + This is 'protocolMappers' in the Keycloak REST API. + aliases: + - protocolMappers + suboptions: + consentRequired: + description: + - Specifies whether a user needs to provide consent to a client for this mapper to be active. + + consentText: + description: + - The human-readable name of the consent the user is presented to accept. + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + + name: + description: + - The name of this protocol mapper. + + protocol: + description: + - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper + is active. + choices: ['openid-connect', 'saml'] + + protocolMapper: + description: + - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is + impossible to provide since this may be extended through SPIs by the user of Keycloak, + by default Keycloak as of 3.4 ships with at least + - C(docker-v2-allow-all-mapper) + - C(oidc-address-mapper) + - C(oidc-full-name-mapper) + - C(oidc-group-membership-mapper) + - C(oidc-hardcoded-claim-mapper) + - C(oidc-hardcoded-role-mapper) + - C(oidc-role-name-mapper) + - C(oidc-script-based-protocol-mapper) + - C(oidc-sha256-pairwise-sub-mapper) + - C(oidc-usermodel-attribute-mapper) + - C(oidc-usermodel-client-role-mapper) + - C(oidc-usermodel-property-mapper) + - C(oidc-usermodel-realm-role-mapper) + - C(oidc-usersessionmodel-note-mapper) + - C(saml-group-membership-mapper) + - C(saml-hardcode-attribute-mapper) + - C(saml-hardcode-role-mapper) + - C(saml-role-list-mapper) + - C(saml-role-name-mapper) + - C(saml-user-attribute-mapper) + - C(saml-user-property-mapper) + - C(saml-user-session-note-mapper) + - An exhaustive list of available mappers on your installation can be obtained on + the admin console by going to Server Info -> Providers and looking under + 'protocol-mapper'. + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the + contents differ depending on the value of I(protocolMapper) and are not documented + other than by the source of the mappers and its parent class(es). An example is given + below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the I(existing) field. + + attributes: + description: + - A dict of further attributes for this client. This can contain various configuration + settings; an example is given in the examples section. While an exhaustive list of + permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak + API does not validate whether a given option is appropriate for the protocol used; if specified + anyway, Keycloak will simply not use it. + suboptions: + saml.authnstatement: + description: + - For SAML clients, boolean specifying whether or not a statement containing method and timestamp + should be included in the login response. + + saml.client.signature: + description: + - For SAML clients, boolean specifying whether a client signature is required and validated. + + saml.encrypt: + description: + - Boolean specifying whether SAML assertions should be encrypted with the client's public key. + + saml.force.post.binding: + description: + - For SAML clients, boolean specifying whether always to use POST binding for responses. + + saml.onetimeuse.condition: + description: + - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses. + + saml.server.signature: + description: + - Boolean specifying whether SAML documents should be signed by the realm. + + saml.server.signature.keyinfo.ext: + description: + - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion + of the signing key id in the SAML Extensions element. + + saml.signature.algorithm: + description: + - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1). + + saml.signing.certificate: + description: + - SAML signing key certificate, base64-encoded. + + saml.signing.private.key: + description: + - SAML signing key private key, base64-encoded. + + saml_assertion_consumer_url_post: + description: + - SAML POST Binding URL for the client's assertion consumer service (login responses). + + saml_assertion_consumer_url_redirect: + description: + - SAML Redirect Binding URL for the client's assertion consumer service (login responses). + + + saml_force_name_id_format: + description: + - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. + + saml_name_id_format: + description: + - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent)) + + saml_signature_canonicalization_method: + description: + - SAML signature canonicalization method. This is one of four values, namely + C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE, + C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, + C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and + C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. + + saml_single_logout_service_url_post: + description: + - SAML POST binding url for the client's single logout service. + + saml_single_logout_service_url_redirect: + description: + - SAML redirect binding url for the client's single logout service. + + user.info.response.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned). + + request.object.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending + OIDC request object. One of C(any), C(none), C(RS256). + + use.jwks.url: + description: + - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client + public keys. + + jwks.url: + description: + - For OpenID-Connect clients, URL where client keys in JWK are stored. + + jwt.credential.certificate: + description: + - For OpenID-Connect clients, client certificate for validating JWT issued by + client and signed by its key, base64-encoded. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Eike Frost (@eikef) +''' + +EXAMPLES = ''' +- name: Create or update Keycloak client (minimal example) + local_action: + module: keycloak_client + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: present + +- name: Delete a Keycloak client + local_action: + module: keycloak_client + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: absent + +- name: Create or update a Keycloak client (with all the bells and whistles) + local_action: + module: keycloak_client + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + realm: master + client_id: test + id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95 + name: this_is_a_test + description: Description of this wonderful client + root_url: https://www.example.com/ + admin_url: https://www.example.com/admin_url + base_url: basepath + enabled: True + client_authenticator_type: client-secret + secret: REALLYWELLKEPTSECRET + redirect_uris: + - https://www.example.com/* + - http://localhost:8888/ + web_origins: + - https://www.example.com/* + not_before: 1507825725 + bearer_only: False + consent_required: False + standard_flow_enabled: True + implicit_flow_enabled: False + direct_access_grants_enabled: False + service_accounts_enabled: False + authorization_services_enabled: False + public_client: False + frontchannel_logout: False + protocol: openid-connect + full_scope_allowed: false + node_re_registration_timeout: -1 + client_template: test + use_template_config: False + use_template_scope: false + use_template_mappers: no + registered_nodes: + node01.example.com: 1507828202 + registration_access_token: eyJWT_TOKEN + surrogate_auth_required: false + default_roles: + - test01 + - test02 + protocol_mappers: + - config: + access.token.claim: True + claim.name: "family_name" + id.token.claim: True + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: True + consentRequired: True + consentText: "${familyName}" + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + consentRequired: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + saml.authnstatement: True + saml.client.signature: True + saml.force.post.binding: True + saml.server.signature: True + saml.signature.algorithm: RSA_SHA256 + saml.signing.certificate: CERTIFICATEHERE + saml.signing.private.key: PRIVATEKEYHERE + saml_force_name_id_format: False + saml_name_id_format: username + saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#" + user.info.response.signature.alg: RS256 + request.object.signature.alg: RS256 + use.jwks.url: true + jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT + jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Client testclient has been updated" + +proposed: + description: client representation of proposed changes to client + returned: always + type: dict + sample: { + clientId: "test" + } +existing: + description: client representation of existing client (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +end_state: + description: client representation of client after module execution (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_cr(clientrep): + """ Removes probably sensitive details from a client representation + + :param clientrep: the clientrep dict to be sanitized + :return: sanitized clientrep dict + """ + result = clientrep.copy() + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes']['saml.signing.private.key'] = 'no_log' + return result + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + consentRequired=dict(type='bool'), + consentText=dict(type='str'), + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + + id=dict(type='str'), + client_id=dict(type='str', aliases=['clientId']), + name=dict(type='str'), + description=dict(type='str'), + root_url=dict(type='str', aliases=['rootUrl']), + admin_url=dict(type='str', aliases=['adminUrl']), + base_url=dict(type='str', aliases=['baseUrl']), + surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), + enabled=dict(type='bool'), + client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), + secret=dict(type='str', no_log=True), + registration_access_token=dict(type='str', aliases=['registrationAccessToken']), + default_roles=dict(type='list', aliases=['defaultRoles']), + redirect_uris=dict(type='list', aliases=['redirectUris']), + web_origins=dict(type='list', aliases=['webOrigins']), + not_before=dict(type='int', aliases=['notBefore']), + bearer_only=dict(type='bool', aliases=['bearerOnly']), + consent_required=dict(type='bool', aliases=['consentRequired']), + standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), + implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), + direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), + service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), + authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), + public_client=dict(type='bool', aliases=['publicClient']), + frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + attributes=dict(type='dict'), + full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), + node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), + registered_nodes=dict(type='dict', aliases=['registeredNodes']), + client_template=dict(type='str', aliases=['clientTemplate']), + use_template_config=dict(type='bool', aliases=['useTemplateConfig']), + use_template_scope=dict(type='bool', aliases=['useTemplateScope']), + use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + authorization_settings=dict(type='dict', aliases=['authorizationSettings']), + ) + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['client_id', 'id']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token( + base_url=module.params.get('auth_keycloak_url'), + validate_certs=module.params.get('validate_certs'), + auth_realm=module.params.get('auth_realm'), + client_id=module.params.get('auth_client_id'), + auth_username=module.params.get('auth_username'), + auth_password=module.params.get('auth_password'), + client_secret=module.params.get('auth_client_secret'), + ) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + cid = module.params.get('id') + state = module.params.get('state') + + # convert module parameters to client representation parameters (if they belong in there) + client_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + keycloak_argument_spec().keys() + # See whether the client already exists in Keycloak + if cid is None: + before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) + if before_client is not None: + cid = before_client['id'] + else: + before_client = kc.get_client_by_id(cid, realm=realm) + + if before_client is None: + before_client = dict() + + # Build a proposed changeset from parameters given to this module + changeset = dict() + + for client_param in client_params: + new_param_value = module.params.get(client_param) + + # some lists in the Keycloak API are sorted, some are not. + if isinstance(new_param_value, list): + if client_param in ['attributes']: + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if client_param == 'protocol_mappers': + new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + + changeset[camel(client_param)] = new_param_value + + # Whether creating or updating a client, take the before-state and merge the changeset into it + updated_client = before_client.copy() + updated_client.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + result['existing'] = sanitize_cr(before_client) + + # If the client does not exist yet, before_client is still empty + if before_client == dict(): + if state == 'absent': + # do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = 'Client does not exist, doing nothing.' + module.exit_json(**result) + + # create new client + result['changed'] = True + if 'clientId' not in updated_client: + module.fail_json(msg='client_id needs to be specified when creating a new client') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(updated_client)) + + if module.check_mode: + module.exit_json(**result) + + kc.create_client(updated_client, realm=realm) + after_client = kc.get_client_by_clientid(updated_client['clientId'], realm=realm) + + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been created.' % updated_client['clientId'] + module.exit_json(**result) + else: + if state == 'present': + # update existing client + result['changed'] = True + if module.check_mode: + # We can only compare the current client with the proposed updates we have + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), + after=sanitize_cr(updated_client)) + result['changed'] = (before_client != updated_client) + + module.exit_json(**result) + + kc.update_client(cid, updated_client, realm=realm) + + after_client = kc.get_client_by_id(cid, realm=realm) + if before_client == after_client: + result['changed'] = False + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), + after=sanitize_cr(after_client)) + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been updated.' % updated_client['clientId'] + module.exit_json(**result) + else: + # Delete existing client + result['changed'] = True + if module._diff: + result['diff']['before'] = sanitize_cr(before_client) + result['diff']['after'] = '' + + if module.check_mode: + module.exit_json(**result) + + kc.delete_client(cid, realm=realm) + result['proposed'] = dict() + result['end_state'] = dict() + result['msg'] = 'Client %s has been deleted.' % before_client['clientId'] + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/keycloak/keycloak_clienttemplate.py b/plugins/modules/identity/keycloak/keycloak_clienttemplate.py new file mode 100644 index 0000000000..45bf5820e9 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_clienttemplate.py @@ -0,0 +1,420 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: keycloak_clienttemplate + +short_description: Allows administration of Keycloak client templates via Keycloak API + + +description: + - This module allows the administration of Keycloak client templates via the Keycloak REST API. It + requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html) + + - The Keycloak API does not always enforce for only sensible settings to be used -- you can set + SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. + If you do not specify a setting, usually a sensible default is chosen. + +options: + state: + description: + - State of the client template + - On C(present), the client template will be created (or updated if it exists already). + - On C(absent), the client template will be removed if it exists + choices: ['present', 'absent'] + default: 'present' + + id: + description: + - Id of client template to be worked on. This is usually a UUID. + + realm: + description: + - Realm this client template is found in. + + name: + description: + - Name of the client template + + description: + description: + - Description of the client template in Keycloak + + protocol: + description: + - Type of client template (either C(openid-connect) or C(saml). + choices: ['openid-connect', 'saml'] + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client template or not. + This is 'fullScopeAllowed' in the Keycloak REST API. + type: bool + + protocol_mappers: + description: + - a list of dicts defining protocol mappers for this client template. + This is 'protocolMappers' in the Keycloak REST API. + suboptions: + consentRequired: + description: + - Specifies whether a user needs to provide consent to a client for this mapper to be active. + + consentText: + description: + - The human-readable name of the consent the user is presented to accept. + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + + name: + description: + - The name of this protocol mapper. + + protocol: + description: + - is either 'openid-connect' or 'saml', this specifies for which protocol this protocol mapper + is active. + choices: ['openid-connect', 'saml'] + + protocolMapper: + description: + - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is + impossible to provide since this may be extended through SPIs by the user of Keycloak, + by default Keycloak as of 3.4 ships with at least + - C(docker-v2-allow-all-mapper) + - C(oidc-address-mapper) + - C(oidc-full-name-mapper) + - C(oidc-group-membership-mapper) + - C(oidc-hardcoded-claim-mapper) + - C(oidc-hardcoded-role-mapper) + - C(oidc-role-name-mapper) + - C(oidc-script-based-protocol-mapper) + - C(oidc-sha256-pairwise-sub-mapper) + - C(oidc-usermodel-attribute-mapper) + - C(oidc-usermodel-client-role-mapper) + - C(oidc-usermodel-property-mapper) + - C(oidc-usermodel-realm-role-mapper) + - C(oidc-usersessionmodel-note-mapper) + - C(saml-group-membership-mapper) + - C(saml-hardcode-attribute-mapper) + - C(saml-hardcode-role-mapper) + - C(saml-role-list-mapper) + - C(saml-role-name-mapper) + - C(saml-user-attribute-mapper) + - C(saml-user-property-mapper) + - C(saml-user-session-note-mapper) + - An exhaustive list of available mappers on your installation can be obtained on + the admin console by going to Server Info -> Providers and looking under + 'protocol-mapper'. + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the + contents differ depending on the value of I(protocolMapper) and are not documented + other than by the source of the mappers and its parent class(es). An example is given + below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the "existing" field. + + attributes: + description: + - A dict of further attributes for this client template. This can contain various + configuration settings, though in the default installation of Keycloak as of 3.4, none + are documented or known, so this is usually empty. + +notes: +- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled), + I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and + I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on + Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such, + they are not available through this module. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Eike Frost (@eikef) +''' + +EXAMPLES = ''' +- name: Create or update Keycloak client template (minimal) + local_action: + module: keycloak_clienttemplate + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + name: this_is_a_test + +- name: delete Keycloak client template + local_action: + module: keycloak_clienttemplate + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + state: absent + name: test01 + +- name: Create or update Keycloak client template (with a protocol mapper) + local_action: + module: keycloak_clienttemplate + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + name: this_is_a_test + protocol_mappers: + - config: + access.token.claim: True + claim.name: "family_name" + id.token.claim: True + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: True + consentRequired: True + consentText: "${familyName}" + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + full_scope_allowed: false + id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Client template testclient has been updated" + +proposed: + description: client template representation of proposed changes to client template + returned: always + type: dict + sample: { + name: "test01" + } +existing: + description: client template representation of existing client template (sample is truncated) + returned: always + type: dict + sample: { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } +end_state: + description: client template representation of client template after module execution (sample is truncated) + returned: always + type: dict + sample: { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + consentRequired=dict(type='bool'), + consentText=dict(type='str'), + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + realm=dict(type='str', default='master'), + state=dict(default='present', choices=['present', 'absent']), + + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + attributes=dict(type='dict'), + full_scope_allowed=dict(type='bool'), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), + ) + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token( + base_url=module.params.get('auth_keycloak_url'), + validate_certs=module.params.get('validate_certs'), + auth_realm=module.params.get('auth_realm'), + client_id=module.params.get('auth_client_id'), + auth_username=module.params.get('auth_username'), + auth_password=module.params.get('auth_password'), + client_secret=module.params.get('auth_client_secret'), + ) + except KeycloakError as e: + module.fail_json(msg=str(e)) + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('id') + + # convert module parameters to client representation parameters (if they belong in there) + clientt_params = [x for x in module.params + if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm', + 'auth_client_secret', 'auth_username', 'auth_password', + 'validate_certs', 'realm'] and module.params.get(x) is not None] + + # See whether the client template already exists in Keycloak + if cid is None: + before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm) + if before_clientt is not None: + cid = before_clientt['id'] + else: + before_clientt = kc.get_client_template_by_id(cid, realm=realm) + + if before_clientt is None: + before_clientt = dict() + + result['existing'] = before_clientt + + # Build a proposed changeset from parameters given to this module + changeset = dict() + + for clientt_param in clientt_params: + # lists in the Keycloak API are sorted + new_param_value = module.params.get(clientt_param) + if isinstance(new_param_value, list): + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + changeset[camel(clientt_param)] = new_param_value + + # Whether creating or updating a client, take the before-state and merge the changeset into it + updated_clientt = before_clientt.copy() + updated_clientt.update(changeset) + + result['proposed'] = changeset + + # If the client template does not exist yet, before_client is still empty + if before_clientt == dict(): + if state == 'absent': + # do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = 'Client template does not exist, doing nothing.' + module.exit_json(**result) + + # create new client template + result['changed'] = True + if 'name' not in updated_clientt: + module.fail_json(msg='name needs to be specified when creating a new client') + + if module._diff: + result['diff'] = dict(before='', after=updated_clientt) + + if module.check_mode: + module.exit_json(**result) + + kc.create_client_template(updated_clientt, realm=realm) + after_clientt = kc.get_client_template_by_name(updated_clientt['name'], realm=realm) + + result['end_state'] = after_clientt + + result['msg'] = 'Client template %s has been created.' % updated_clientt['name'] + module.exit_json(**result) + else: + if state == 'present': + # update existing client template + result['changed'] = True + if module.check_mode: + # We can only compare the current client template with the proposed updates we have + if module._diff: + result['diff'] = dict(before=before_clientt, + after=updated_clientt) + + module.exit_json(**result) + + kc.update_client_template(cid, updated_clientt, realm=realm) + + after_clientt = kc.get_client_template_by_id(cid, realm=realm) + if before_clientt == after_clientt: + result['changed'] = False + if module._diff: + result['diff'] = dict(before=before_clientt, + after=after_clientt) + result['end_state'] = after_clientt + + result['msg'] = 'Client template %s has been updated.' % updated_clientt['name'] + module.exit_json(**result) + else: + # Delete existing client + result['changed'] = True + if module._diff: + result['diff']['before'] = before_clientt + result['diff']['after'] = '' + + if module.check_mode: + module.exit_json(**result) + + kc.delete_client_template(cid, realm=realm) + result['proposed'] = dict() + result['end_state'] = dict() + result['msg'] = 'Client template %s has been deleted.' % before_clientt['name'] + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/keycloak/keycloak_group.py b/plugins/modules/identity/keycloak/keycloak_group.py new file mode 100644 index 0000000000..b8a5e91de3 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_group.py @@ -0,0 +1,370 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Adam Goossens +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: keycloak_group + +short_description: Allows administration of Keycloak groups via Keycloak API + +description: + - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a group, where possible provide the group ID to the module. This removes a lookup + to the API to translate the name into the group ID. + + +options: + state: + description: + - State of the group. + - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the group will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + description: + - Name of the group. + - This parameter is required only when creating or updating the group. + + realm: + type: str + description: + - They Keycloak realm under which this group resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this group. + - This parameter is not required for updating or deleting a group but + providing it will reduce the number of API calls required. + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the group. + - Values may be single values (e.g. a string) or a list of strings. + +notes: + - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API + are read-only for groups. This limitation will be removed in a later version of this module. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Adam Goossens (@adamgoossens) +''' + +EXAMPLES = ''' +- name: Create a Keycloak group + keycloak_group: + name: my-new-kc-group + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a keycloak group + keycloak_group: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + state: absent + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak group based on name + keycloak_group: + name: my-group-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Update the name of a Keycloak group + keycloak_group: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + name: an-updated-kc-group-name + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a keycloak group with some custom attributes + keycloak_group: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new_group + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +''' + +RETURN = ''' +group: + description: Group representation of the group after module execution (sample is truncated). + returned: always + type: complex + contains: + id: + description: GUID that identifies the group + type: str + returned: always + sample: 23f38145-3195-462c-97e7-97041ccea73e + name: + description: Name of the group + type: str + returned: always + sample: grp-test-123 + attributes: + description: Attributes applied to this group + type: dict + returned: always + sample: + attr1: ["val1", "val2", "val3"] + path: + description: URI path to the group + type: str + returned: always + sample: /grp-test-123 + realmRoles: + description: An array of the realm-level roles granted to this group + type: list + returned: always + sample: [] + subGroups: + description: A list of groups that are children of this group. These groups will have the same parameters as + documented here. + type: list + returned: always + clientRoles: + description: A list of client-level roles granted to this group + type: list + returned: always + sample: [] + access: + description: A dict describing the accesses you have to this group based on the credentials used. + type: dict + returned: always + sample: + manage: true + manageMembership: true + view: true +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + id=dict(type='str'), + name=dict(type='str'), + attributes=dict(type='dict') + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name']])) + + result = dict(changed=False, msg='', diff={}, group='') + + # Obtain access token, initialize API + try: + connection_header = get_token( + base_url=module.params.get('auth_keycloak_url'), + validate_certs=module.params.get('validate_certs'), + auth_realm=module.params.get('auth_realm'), + client_id=module.params.get('auth_client_id'), + auth_username=module.params.get('auth_username'), + auth_password=module.params.get('auth_password'), + client_secret=module.params.get('auth_client_secret'), + ) + except KeycloakError as e: + module.fail_json(msg=str(e)) + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + gid = module.params.get('id') + name = module.params.get('name') + attributes = module.params.get('attributes') + + before_group = None # current state of the group, for merging. + + # does the group already exist? + if gid is None: + before_group = kc.get_group_by_name(name, realm=realm) + else: + before_group = kc.get_group_by_groupid(gid, realm=realm) + + before_group = {} if before_group is None else before_group + + # attributes in Keycloak have their values returned as lists + # via the API. attributes is a dict, so we'll transparently convert + # the values to lists. + if attributes is not None: + for key, val in module.params['attributes'].items(): + module.params['attributes'][key] = [val] if not isinstance(val, list) else val + + group_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # build a changeset + changeset = {} + for param in group_params: + new_param_value = module.params.get(param) + old_value = before_group[param] if param in before_group else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # prepare the new group + updated_group = before_group.copy() + updated_group.update(changeset) + + # if before_group is none, the group doesn't exist. + if before_group == {}: + if state == 'absent': + # nothing to do. + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = 'Group does not exist; doing nothing.' + result['group'] = dict() + module.exit_json(**result) + + # for 'present', create a new group. + result['changed'] = True + if name is None: + module.fail_json(msg='name must be specified when creating a new group') + + if module._diff: + result['diff'] = dict(before='', after=updated_group) + + if module.check_mode: + module.exit_json(**result) + + # do it for real! + kc.create_group(updated_group, realm=realm) + after_group = kc.get_group_by_name(name, realm) + + result['group'] = after_group + result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'], + id=after_group['id']) + + else: + if state == 'present': + # no changes + if updated_group == before_group: + result['changed'] = False + result['group'] = updated_group + result['msg'] = "No changes required to group {name}.".format(name=before_group['name']) + module.exit_json(**result) + + # update the existing group + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_group, after=updated_group) + + if module.check_mode: + module.exit_json(**result) + + # do the update + kc.update_group(updated_group, realm=realm) + + after_group = kc.get_group_by_groupid(updated_group['id'], realm=realm) + + result['group'] = after_group + result['msg'] = "Group {id} has been updated".format(id=after_group['id']) + + module.exit_json(**result) + + elif state == 'absent': + result['group'] = dict() + + if module._diff: + result['diff'] = dict(before=before_group, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete for real + gid = before_group['id'] + kc.delete_group(groupid=gid, realm=realm) + + result['changed'] = True + result['msg'] = "Group {name} has been deleted".format(name=before_group['name']) + + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/onepassword_facts.py b/plugins/modules/identity/onepassword_facts.py new file mode 120000 index 0000000000..4e4c2b117c --- /dev/null +++ b/plugins/modules/identity/onepassword_facts.py @@ -0,0 +1 @@ +onepassword_info.py \ No newline at end of file diff --git a/plugins/modules/identity/onepassword_info.py b/plugins/modules/identity/onepassword_info.py new file mode 100644 index 0000000000..f39ab43640 --- /dev/null +++ b/plugins/modules/identity/onepassword_info.py @@ -0,0 +1,395 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, Ryan Conway (@rylon) +# (c) 2018, Scott Buchanan (onepassword.py used as starting point) +# (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: onepassword_info +author: + - Ryan Conway (@Rylon) +requirements: + - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) +notes: + - Tested with C(op) version 0.5.5 + - "Based on the C(onepassword) lookup plugin by Scott Buchanan ." + - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data + from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this + data could be stored in clear text on disk or in a database. +short_description: Gather items from 1Password +description: + - M(onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. + - A fatal error occurs if any of the items being searched for can not be found. + - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved. + - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(onepassword_info) module no longer returns C(ansible_facts)! + You must now use the C(register) option to use the facts in other tasks. +options: + search_terms: + type: list + description: + - A list of one or more search terms. + - Each search term can either be a simple string or it can be a dictionary for more control. + - When passing a simple string, I(field) is assumed to be C(password). + - When passing a dictionary, the following fields are available. + suboptions: + name: + type: str + description: + - The name of the 1Password item to search for (required). + field: + type: str + description: + - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment). + section: + type: str + description: + - The name of a section within this item containing the specified field (optional, will search all sections if not specified). + vault: + type: str + description: + - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional). + required: True + auto_login: + type: dict + description: + - A dictionary containing authentication details. If this is set, M(onepassword_info) will attempt to sign in to 1Password automatically. + - Without this option, you must have already logged in via the 1Password CLI before running Ansible. + - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt + the Ansible Vault is equal to or greater in strength than the 1Password master password. + suboptions: + subdomain: + type: str + description: + - 1Password subdomain name (.1password.com). + - If this is not specified, the most recent subdomain will be used. + username: + type: str + description: + - 1Password username. + - Only required for initial sign in. + master_password: + type: str + description: + - The master password for your subdomain. + - This is always required when specifying C(auto_login). + required: True + secret_key: + type: str + description: + - The secret key for your subdomain. + - Only required for initial sign in. + default: {} + required: False + cli_path: + type: path + description: Used to specify the exact path to the C(op) command line interface + required: False + default: 'op' +''' + +EXAMPLES = ''' +# Gather secrets from 1Password, assuming there is a 'password' field: +- name: Get a password + onepassword_info: + search_terms: My 1Password item + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +# Gather secrets from 1Password, with more advanced search terms: +- name: Get a password + onepassword_info: + search_terms: + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + delegate_to: localhost + register: my_1password_item + no_log: True # Don't want to log the secrets to the console! + +# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two +# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the +# second, 'Custom field name' is fetched, as that is specified explicitly. +- name: Get a password + onepassword_info: + search_terms: + - My 1Password item # 'name' is optional when passing a simple string... + - name: My Other 1Password item # ...but it can also be set for consistency + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + - name: A 1Password item with document attachment + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +- name: Debug a password (for example) + debug: + msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}" +''' + +RETURN = ''' +--- +# One or more dictionaries for each matching item from 1Password, along with the appropriate fields. +# This shows the response you would expect to receive from the third example documented above. +onepassword: + description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above. + returned: success + type: dict + sample: + "My 1Password item": + password: the value of this field + Custom field name: the value of this field + "My Other 1Password item": + password: the value of this field + "A 1Password item with document attachment": + document: the contents of the document attached to this item +''' + + +import errno +import json +import os +import re + +from subprocess import Popen, PIPE + +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.basic import AnsibleModule + + +class AnsibleModuleError(Exception): + def __init__(self, results): + self.results = results + + def __repr__(self): + return self.results + + +class OnePasswordInfo(object): + + def __init__(self): + self.cli_path = module.params.get('cli_path') + self.config_file_path = '~/.op/config' + self.auto_login = module.params.get('auto_login') + self.logged_in = False + self.token = None + + terms = module.params.get('search_terms') + self.terms = self.parse_search_terms(terms) + + def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): + if self.token: + # Adds the session token to all commands if we're logged in. + args += [to_bytes('--session=') + self.token] + + command = [self.cli_path] + args + p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(input=command_input) + rc = p.wait() + if not ignore_errors and rc != expected_rc: + raise AnsibleModuleError(to_native(err)) + return rc, out, err + + def _parse_field(self, data_json, item_id, field_name, section_title=None): + data = json.loads(data_json) + + if ('documentAttributes' in data['details']): + # This is actually a document, let's fetch the document data instead! + document = self._run(["get", "document", data['overview']['title']]) + return {'document': document[1].strip()} + + else: + # This is not a document, let's try to find the requested field + + # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, + # not inside it, so we need to check there first. + if (field_name in data['details']): + return {field_name: data['details'][field_name]} + + # Otherwise we continue looking inside the 'fields' attribute for the specified field. + else: + if section_title is None: + for field_data in data['details'].get('fields', []): + if field_data.get('name', '').lower() == field_name.lower(): + return {field_name: field_data.get('value', '')} + + # Not found it yet, so now lets see if there are any sections defined + # and search through those for the field. If a section was given, we skip + # any non-matching sections, otherwise we search them all until we find the field. + for section_data in data['details'].get('sections', []): + if section_title is not None and section_title.lower() != section_data['title'].lower(): + continue + for field_data in section_data.get('fields', []): + if field_data.get('t', '').lower() == field_name.lower(): + return {field_name: field_data.get('v', '')} + + # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded. + optional_section_title = '' if section_title is None else " in the section '%s'" % section_title + module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title)) + + def parse_search_terms(self, terms): + processed_terms = [] + + for term in terms: + if not isinstance(term, dict): + term = {'name': term} + + if 'name' not in term: + module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term)) + + term['field'] = term.get('field', 'password') + term['section'] = term.get('section', None) + term['vault'] = term.get('vault', None) + + processed_terms.append(term) + + return processed_terms + + def get_raw(self, item_id, vault=None): + try: + args = ["get", "item", item_id] + if vault is not None: + args += ['--vault={0}'.format(vault)] + rc, output, dummy = self._run(args) + return output + + except Exception as e: + if re.search(".*not found.*", to_native(e)): + module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id) + else: + module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e))) + + def get_field(self, item_id, field, section=None, vault=None): + output = self.get_raw(item_id, vault) + return self._parse_field(output, item_id, field, section) if output != '' else '' + + def full_login(self): + if self.auto_login is not None: + if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'), + self.auto_login.get('secret_key'), self.auto_login.get('master_password')]: + module.fail_json(msg='Unable to perform initial sign in to 1Password. ' + 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') + + args = [ + 'signin', + '{0}.1password.com'.format(self.auto_login['subdomain']), + to_bytes(self.auto_login['username']), + to_bytes(self.auto_login['secret_key']), + '--output=raw', + ] + + try: + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + self.token = out.strip() + except AnsibleModuleError as e: + module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e)) + else: + module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' " + "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path) + + def get_token(self): + # If the config file exists, assume an initial signin has taken place and try basic sign in + if os.path.isfile(self.config_file_path): + + if self.auto_login is not None: + + # Since we are not currently signed in, master_password is required at a minimum + if not self.auto_login.get('master_password'): + module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.") + + # Try signing in using the master_password and a subdomain if one is provided + try: + args = ['signin', '--output=raw'] + + if self.auto_login.get('subdomain'): + args = ['signin', self.auto_login['subdomain'], '--output=raw'] + + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + self.token = out.strip() + + except AnsibleModuleError: + self.full_login() + + else: + self.full_login() + + else: + # Attempt a full sign in since there appears to be no existing sign in + self.full_login() + + def assert_logged_in(self): + try: + rc, out, err = self._run(['get', 'account'], ignore_errors=True) + if rc == 0: + self.logged_in = True + if not self.logged_in: + self.get_token() + except OSError as e: + if e.errno == errno.ENOENT: + module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) + raise e + + def run(self): + result = {} + + self.assert_logged_in() + + for term in self.terms: + value = self.get_field(term['name'], term['field'], term['section'], term['vault']) + + if term['name'] in result: + # If we already have a result for this key, we have to append this result dictionary + # to the existing one. This is only applicable when there is a single item + # in 1Password which has two different fields, and we want to retrieve both of them. + result[term['name']].update(value) + else: + # If this is the first result for this key, simply set it. + result[term['name']] = value + + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + cli_path=dict(type='path', default='op'), + auto_login=dict(type='dict', options=dict( + subdomain=dict(type='str'), + username=dict(type='str'), + master_password=dict(required=True, type='str', no_log=True), + secret_key=dict(type='str', no_log=True), + ), default=None), + search_terms=dict(required=True, type='list') + ), + supports_check_mode=True + ) + + results = {'onepassword': OnePasswordInfo().run()} + + if module._name == 'onepassword_facts': + module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. " + "When called with the new name it no longer returns 'ansible_facts'", version='2.13') + module.exit_json(changed=False, ansible_facts=results) + else: + module.exit_json(changed=False, **results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/opendj/opendj_backendprop.py b/plugins/modules/identity/opendj/opendj_backendprop.py new file mode 100644 index 0000000000..78667a3bd0 --- /dev/null +++ b/plugins/modules/identity/opendj/opendj_backendprop.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: opendj_backendprop +short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command. +description: + - This module will update settings for OpenDJ with the command set-backend-prop. + - It will check first via de get-backend-prop if configuration needs to be applied. +author: + - Werner Dijkerman (@dj-wasabi) +options: + opendj_bindir: + description: + - The path to the bin directory of OpenDJ. + required: false + default: /opt/opendj/bin + hostname: + description: + - The hostname of the OpenDJ server. + required: true + port: + description: + - The Admin port on which the OpenDJ instance is available. + required: true + username: + description: + - The username to connect to. + required: false + default: cn=Directory Manager + password: + description: + - The password for the cn=Directory Manager user. + - Either password or passwordfile is needed. + required: false + passwordfile: + description: + - Location to the password file which holds the password for the cn=Directory Manager user. + - Either password or passwordfile is needed. + required: false + backend: + description: + - The name of the backend on which the property needs to be updated. + required: true + name: + description: + - The configuration setting to update. + required: true + value: + description: + - The value for the configuration item. + required: true + state: + description: + - If configuration needs to be added/updated + required: false + default: "present" +''' + +EXAMPLES = ''' + - name: "Add or update OpenDJ backend properties" + action: opendj_backendprop + hostname=localhost + port=4444 + username="cn=Directory Manager" + password=password + backend=userRoot + name=index-entry-limit + value=5000 +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule + + +class BackendProp(object): + + def __init__(self, module): + self._module = module + + def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): + my_command = [ + opendj_bindir + '/dsconfig', + 'get-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '-n', '-X', '-s' + ] + password_method + rc, stdout, stderr = self._module.run_command(my_command) + if rc == 0: + return stdout + else: + self._module.fail_json(msg="Error message: " + str(stderr)) + + def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value): + my_command = [ + opendj_bindir + '/dsconfig', + 'set-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '--set', name + ":" + value, + '-n', '-X' + ] + password_method + rc, stdout, stderr = self._module.run_command(my_command) + if rc == 0: + return True + else: + self._module.fail_json(msg="Error message: " + stderr) + + def validate_data(self, data=None, name=None, value=None): + for config_line in data.split('\n'): + if config_line: + split_line = config_line.split() + if split_line[0] == name: + if split_line[1] == value: + return True + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + opendj_bindir=dict(default="/opt/opendj/bin", type="path"), + hostname=dict(required=True), + port=dict(required=True), + username=dict(default="cn=Directory Manager", required=False), + password=dict(required=False, no_log=True), + passwordfile=dict(required=False, type="path"), + backend=dict(required=True), + name=dict(required=True), + value=dict(required=True), + state=dict(default="present"), + ), + supports_check_mode=True, + mutually_exclusive=[['password', 'passwordfile']], + required_one_of=[['password', 'passwordfile']] + ) + + opendj_bindir = module.params['opendj_bindir'] + hostname = module.params['hostname'] + port = module.params['port'] + username = module.params['username'] + password = module.params['password'] + passwordfile = module.params['passwordfile'] + backend_name = module.params['backend'] + name = module.params['name'] + value = module.params['value'] + state = module.params['state'] + + if module.params["password"] is not None: + password_method = ['-w', password] + elif module.params["passwordfile"] is not None: + password_method = ['-j', passwordfile] + + opendj = BackendProp(module) + validate = opendj.get_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name) + + if validate: + if not opendj.validate_data(data=validate, name=name, value=value): + if module.check_mode: + module.exit_json(changed=True) + if opendj.set_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name, + name=name, + value=value): + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_binding.py b/plugins/modules/messaging/rabbitmq/rabbitmq_binding.py new file mode 100644 index 0000000000..4e418842d5 --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_binding.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Manuel Sousa +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: rabbitmq_binding +author: Manuel Sousa (@manuel-sousa) + +short_description: Manage rabbitMQ bindings +description: + - This module uses rabbitMQ REST APIs to create / delete bindings. +requirements: [ "requests >= 1.0.0" ] +options: + state: + description: + - Whether the bindings should be present or absent. + choices: [ "present", "absent" ] + default: present + name: + description: + - source exchange to create binding on. + required: true + aliases: [ "src", "source" ] + destination: + description: + - destination exchange or queue for the binding. + required: true + aliases: [ "dst", "dest" ] + destination_type: + description: + - Either queue or exchange. + required: true + choices: [ "queue", "exchange" ] + aliases: [ "type", "dest_type" ] + routing_key: + description: + - routing key for the binding. + default: "#" + arguments: + description: + - extra arguments for exchange. If defined this argument is a key/value dictionary + required: false + default: {} +extends_documentation_fragment: +- community.general.rabbitmq + +''' + +EXAMPLES = ''' +# Bind myQueue to directExchange with routing key info +- rabbitmq_binding: + name: directExchange + destination: myQueue + type: queue + routing_key: info + +# Bind directExchange to topicExchange with routing key *.info +- rabbitmq_binding: + name: topicExchange + destination: topicExchange + type: exchange + routing_key: '*.info' +''' + +import json +import traceback + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.six.moves.urllib import parse as urllib_parse +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.rabbitmq import rabbitmq_argument_spec + + +class RabbitMqBinding(object): + def __init__(self, module): + """ + :param module: + """ + self.module = module + self.name = self.module.params['name'] + self.login_user = self.module.params['login_user'] + self.login_password = self.module.params['login_password'] + self.login_host = self.module.params['login_host'] + self.login_port = self.module.params['login_port'] + self.login_protocol = self.module.params['login_protocol'] + self.vhost = self.module.params['vhost'] + self.destination = self.module.params['destination'] + self.destination_type = 'q' if self.module.params['destination_type'] == 'queue' else 'e' + self.routing_key = self.module.params['routing_key'] + self.arguments = self.module.params['arguments'] + self.verify = self.module.params['ca_cert'] + self.cert = self.module.params['client_cert'] + self.key = self.module.params['client_key'] + self.props = urllib_parse.quote(self.routing_key) if self.routing_key != '' else '~' + self.base_url = '{0}://{1}:{2}/api/bindings'.format(self.login_protocol, + self.login_host, + self.login_port) + self.url = '{0}/{1}/e/{2}/{3}/{4}/{5}'.format(self.base_url, + urllib_parse.quote(self.vhost, safe=''), + urllib_parse.quote(self.name, safe=''), + self.destination_type, + urllib_parse.quote(self.destination, safe=''), + self.props) + self.result = { + 'changed': False, + 'name': self.module.params['name'], + } + self.authentication = ( + self.login_user, + self.login_password + ) + self.request = requests + self.http_check_states = { + 200: True, + 404: False, + } + self.http_actionable_states = { + 201: True, + 204: True, + } + self.api_result = self.request.get(self.url, auth=self.authentication, verify=self.verify, cert=(self.cert, self.key)) + + def run(self): + """ + :return: + """ + self.check_presence() + self.check_mode() + self.action_mode() + + def check_presence(self): + """ + :return: + """ + if self.check_should_throw_fail(): + self.fail() + + def change_required(self): + """ + :return: + """ + if self.module.params['state'] == 'present': + if not self.is_present(): + return True + elif self.module.params['state'] == 'absent': + if self.is_present(): + return True + return False + + def is_present(self): + """ + :return: + """ + return self.http_check_states.get(self.api_result.status_code, False) + + def check_mode(self): + """ + :return: + """ + if self.module.check_mode: + result = self.result + result['changed'] = self.change_required() + result['details'] = self.api_result.json() if self.is_present() else self.api_result.text + result['arguments'] = self.module.params['arguments'] + self.module.exit_json(**result) + + def check_reply_is_correct(self): + """ + :return: + """ + if self.api_result.status_code in self.http_check_states: + return True + return False + + def check_should_throw_fail(self): + """ + :return: + """ + if not self.is_present(): + if not self.check_reply_is_correct(): + return True + return False + + def action_mode(self): + """ + :return: + """ + result = self.result + if self.change_required(): + if self.module.params['state'] == 'present': + self.create() + if self.module.params['state'] == 'absent': + self.remove() + if self.action_should_throw_fail(): + self.fail() + result['changed'] = True + result['destination'] = self.module.params['destination'] + self.module.exit_json(**result) + else: + result['changed'] = False + self.module.exit_json(**result) + + def action_reply_is_correct(self): + """ + :return: + """ + if self.api_result.status_code in self.http_actionable_states: + return True + return False + + def action_should_throw_fail(self): + """ + :return: + """ + if not self.action_reply_is_correct(): + return True + return False + + def create(self): + """ + :return: + """ + self.url = '{0}/{1}/e/{2}/{3}/{4}'.format(self.base_url, + urllib_parse.quote(self.vhost, safe=''), + urllib_parse.quote(self.name, safe=''), + self.destination_type, + urllib_parse.quote(self.destination, safe='')) + self.api_result = self.request.post(self.url, + auth=self.authentication, + verify=self.verify, + cert=(self.cert, self.key), + headers={"content-type": "application/json"}, + data=json.dumps({ + 'routing_key': self.routing_key, + 'arguments': self.arguments + })) + + def remove(self): + """ + :return: + """ + self.api_result = self.request.delete(self.url, auth=self.authentication, verify=self.verify, cert=(self.cert, self.key)) + + def fail(self): + """ + :return: + """ + self.module.fail_json( + msg="Unexpected reply from API", + status=self.api_result.status_code, + details=self.api_result.text + ) + + +def main(): + + argument_spec = rabbitmq_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, aliases=["src", "source"], type='str'), + destination=dict(required=True, aliases=["dst", "dest"], type='str'), + destination_type=dict(required=True, aliases=["type", "dest_type"], choices=["queue", "exchange"], + type='str'), + routing_key=dict(default='#', type='str'), + arguments=dict(default=dict(), type='dict') + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR) + + RabbitMqBinding(module).run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_exchange.py b/plugins/modules/messaging/rabbitmq/rabbitmq_exchange.py new file mode 100644 index 0000000000..ba4e6cc3c2 --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_exchange.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Manuel Sousa +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_exchange +author: Manuel Sousa (@manuel-sousa) + +short_description: Manage rabbitMQ exchanges +description: + - This module uses rabbitMQ Rest API to create/delete exchanges +requirements: [ "requests >= 1.0.0" ] +options: + name: + description: + - Name of the exchange to create + required: true + state: + description: + - Whether the exchange should be present or absent + choices: [ "present", "absent" ] + required: false + default: present + durable: + description: + - whether exchange is durable or not + required: false + type: bool + default: yes + exchange_type: + description: + - type for the exchange + required: false + choices: [ "fanout", "direct", "headers", "topic" ] + aliases: [ "type" ] + default: direct + auto_delete: + description: + - if the exchange should delete itself after all queues/exchanges unbound from it + required: false + type: bool + default: no + internal: + description: + - exchange is available only for other exchanges + required: false + type: bool + default: no + arguments: + description: + - extra arguments for exchange. If defined this argument is a key/value dictionary + required: false + default: {} +extends_documentation_fragment: +- community.general.rabbitmq + +''' + +EXAMPLES = ''' +# Create direct exchange +- rabbitmq_exchange: + name: directExchange + +# Create topic exchange on vhost +- rabbitmq_exchange: + name: topicExchange + type: topic + vhost: myVhost +''' + +import json +import traceback + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib import parse as urllib_parse +from ansible_collections.community.general.plugins.module_utils.rabbitmq import rabbitmq_argument_spec + + +def main(): + + argument_spec = rabbitmq_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + durable=dict(default=True, type='bool'), + auto_delete=dict(default=False, type='bool'), + internal=dict(default=False, type='bool'), + exchange_type=dict(default='direct', aliases=['type'], type='str'), + arguments=dict(default=dict(), type='dict') + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + url = "%s://%s:%s/api/exchanges/%s/%s" % ( + module.params['login_protocol'], + module.params['login_host'], + module.params['login_port'], + urllib_parse.quote(module.params['vhost'], ''), + urllib_parse.quote(module.params['name'], '') + ) + + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR) + + result = dict(changed=False, name=module.params['name']) + + # Check if exchange already exists + r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + if r.status_code == 200: + exchange_exists = True + response = r.json() + elif r.status_code == 404: + exchange_exists = False + response = r.text + else: + module.fail_json( + msg="Invalid response from RESTAPI when trying to check if exchange exists", + details=r.text + ) + + if module.params['state'] == 'present': + change_required = not exchange_exists + else: + change_required = exchange_exists + + # Check if attributes change on existing exchange + if not change_required and r.status_code == 200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['auto_delete'] and + response['internal'] == module.params['internal'] and + response['type'] == module.params['exchange_type'] + ): + module.fail_json( + msg="RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges" + ) + + # Exit if check_mode + if module.check_mode: + result['changed'] = change_required + result['details'] = response + result['arguments'] = module.params['arguments'] + module.exit_json(**result) + + # Do changes + if change_required: + if module.params['state'] == 'present': + r = requests.put( + url, + auth=(module.params['login_user'], module.params['login_password']), + headers={"content-type": "application/json"}, + data=json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['auto_delete'], + "internal": module.params['internal'], + "type": module.params['exchange_type'], + "arguments": module.params['arguments'] + }), + verify=module.params['ca_cert'], + cert=(module.params['client_cert'], module.params['client_key']) + ) + elif module.params['state'] == 'absent': + r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + # RabbitMQ 3.6.7 changed this response code from 204 to 201 + if r.status_code == 204 or r.status_code == 201: + result['changed'] = True + module.exit_json(**result) + else: + module.fail_json( + msg="Error creating exchange", + status=r.status_code, + details=r.text + ) + + else: + module.exit_json( + changed=False, + name=module.params['name'] + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_global_parameter.py b/plugins/modules/messaging/rabbitmq/rabbitmq_global_parameter.py new file mode 100644 index 0000000000..357058274d --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_global_parameter.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Chatham Financial +# Copyright: (c) 2017, Juergen Kirschbaum +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_global_parameter +short_description: Manage RabbitMQ global parameters +description: + - Manage dynamic, cluster-wide global parameters for RabbitMQ +author: "Juergen Kirschbaum (@jgkirschbaum)" +options: + name: + description: + - Name of the global parameter being set + required: true + default: null + value: + description: + - Value of the global parameter, as a JSON term + required: false + default: null + node: + description: + - erlang node name of the rabbit we wish to configure + required: false + default: rabbit + state: + description: + - Specify if user is to be added or removed + required: false + default: present + choices: [ 'present', 'absent'] +''' + +EXAMPLES = ''' +# Set the global parameter 'cluster_name' to a value of 'mq-cluster' (in quotes) +- rabbitmq_global_parameter: + name: cluster_name + value: "{{ 'mq-cluster' | to_json }}" + state: present +''' + +RETURN = ''' +name: + description: name of the global parameter being set + returned: success + type: str + sample: "cluster_name" +value: + description: value of the global parameter, as a JSON term + returned: changed + type: str + sample: "the-cluster-name" +''' + +import json +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqGlobalParameter(object): + def __init__(self, module, name, value, node): + self.module = module + self.name = name + self.value = value + self.node = node + + self._value = None + + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmqctl, '-q', '-n', self.node] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get(self): + global_parameters = self._exec(['list_global_parameters'], True) + + for param_item in global_parameters: + name, value = param_item.split('\t') + + if name == self.name: + self._value = json.loads(value) + return True + return False + + def set(self): + self._exec(['set_global_parameter', + self.name, + json.dumps(self.value)]) + + def delete(self): + self._exec(['clear_global_parameter', self.name]) + + def has_modifications(self): + return self.value != self._value + + +def main(): + arg_spec = dict( + name=dict(type='str', required=True), + value=dict(type='str', default=None), + state=dict(default='present', choices=['present', 'absent']), + node=dict(type='str', default='rabbit') + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + value = module.params['value'] + if isinstance(value, str): + value = json.loads(value) + state = module.params['state'] + node = module.params['node'] + + result = dict(changed=False) + rabbitmq_global_parameter = RabbitMqGlobalParameter(module, name, value, node) + + if rabbitmq_global_parameter.get(): + if state == 'absent': + rabbitmq_global_parameter.delete() + result['changed'] = True + else: + if rabbitmq_global_parameter.has_modifications(): + rabbitmq_global_parameter.set() + result['changed'] = True + elif state == 'present': + rabbitmq_global_parameter.set() + result['changed'] = True + + result['name'] = name + result['value'] = value + result['state'] = state + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_parameter.py b/plugins/modules/messaging/rabbitmq/rabbitmq_parameter.py new file mode 100644 index 0000000000..baeba0dcb5 --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_parameter.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Chatham Financial +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_parameter +short_description: Manage RabbitMQ parameters +description: + - Manage dynamic, cluster-wide parameters for RabbitMQ +author: Chris Hoffman (@chrishoffman) +options: + component: + description: + - Name of the component of which the parameter is being set + required: true + name: + description: + - Name of the parameter being set + required: true + value: + description: + - Value of the parameter, as a JSON term + vhost: + description: + - vhost to apply access privileges. + default: / + node: + description: + - erlang node name of the rabbit we wish to configure + default: rabbit + state: + description: + - Specify if user is to be added or removed + default: present + choices: [ 'present', 'absent'] +''' + +EXAMPLES = """ +# Set the federation parameter 'local_username' to a value of 'guest' (in quotes) +- rabbitmq_parameter: + component: federation + name: local-username + value: '"guest"' + state: present +""" +import json +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqParameter(object): + def __init__(self, module, component, name, value, vhost, node): + self.module = module + self.component = component + self.name = name + self.value = value + self.vhost = vhost + self.node = node + + self._value = None + + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmqctl, '-q', '-n', self.node] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.strip().splitlines() + return list() + + def get(self): + parameters = [param for param in self._exec(['list_parameters', '-p', self.vhost], True) if param.strip()] + + for param_item in parameters: + component, name, value = param_item.split('\t') + + if component == self.component and name == self.name: + self._value = json.loads(value) + return True + return False + + def set(self): + self._exec(['set_parameter', + '-p', + self.vhost, + self.component, + self.name, + json.dumps(self.value)]) + + def delete(self): + self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name]) + + def has_modifications(self): + return self.value != self._value + + +def main(): + arg_spec = dict( + component=dict(required=True), + name=dict(required=True), + value=dict(default=None), + vhost=dict(default='/'), + state=dict(default='present', choices=['present', 'absent']), + node=dict(default='rabbit') + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + component = module.params['component'] + name = module.params['name'] + value = module.params['value'] + if isinstance(value, str): + value = json.loads(value) + vhost = module.params['vhost'] + state = module.params['state'] + node = module.params['node'] + + result = dict(changed=False) + rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node) + + if rabbitmq_parameter.get(): + if state == 'absent': + rabbitmq_parameter.delete() + result['changed'] = True + else: + if rabbitmq_parameter.has_modifications(): + rabbitmq_parameter.set() + result['changed'] = True + elif state == 'present': + rabbitmq_parameter.set() + result['changed'] = True + + result['component'] = component + result['name'] = name + result['vhost'] = vhost + result['state'] = state + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_plugin.py b/plugins/modules/messaging/rabbitmq/rabbitmq_plugin.py new file mode 100644 index 0000000000..da0249ba97 --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_plugin.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Chatham Financial +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_plugin +short_description: Manage RabbitMQ plugins +description: + - This module can be used to enable or disable RabbitMQ plugins. +author: + - Chris Hoffman (@chrishoffman) +options: + names: + description: + - Comma-separated list of plugin names. Also, accepts plugin name. + required: true + aliases: [name] + new_only: + description: + - Only enable missing plugins. + - Does not disable plugins that are not in the names list. + type: bool + default: "no" + state: + description: + - Specify if plugins are to be enabled or disabled. + default: enabled + choices: [enabled, disabled] + prefix: + description: + - Specify a custom install prefix to a Rabbit. +''' + +EXAMPLES = ''' +- name: Enables the rabbitmq_management plugin + rabbitmq_plugin: + names: rabbitmq_management + state: enabled + +- name: Enable multiple rabbitmq plugins + rabbitmq_plugin: + names: rabbitmq_management,rabbitmq_management_visualiser + state: enabled + +- name: Disable plugin + rabbitmq_plugin: + names: rabbitmq_management + state: disabled + +- name: Enable every plugin in list with existing plugins + rabbitmq_plugin: + names: rabbitmq_management,rabbitmq_management_visualiser,rabbitmq_shovel,rabbitmq_shovel_management + state: enabled + new_only: 'yes' +''' + +RETURN = ''' +enabled: + description: list of plugins enabled during task run + returned: always + type: list + sample: ["rabbitmq_management"] +disabled: + description: list of plugins disabled during task run + returned: always + type: list + sample: ["rabbitmq_management"] +''' + +import os +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqPlugins(object): + + def __init__(self, module): + self.module = module + bin_path = '' + if module.params['prefix']: + if os.path.isdir(os.path.join(module.params['prefix'], 'bin')): + bin_path = os.path.join(module.params['prefix'], 'bin') + elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')): + bin_path = os.path.join(module.params['prefix'], 'sbin') + else: + # No such path exists. + module.fail_json(msg="No binary folder in prefix %s" % module.params['prefix']) + + self._rabbitmq_plugins = os.path.join(bin_path, "rabbitmq-plugins") + else: + self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmq_plugins] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get_all(self): + list_output = self._exec(['list', '-E', '-m'], True) + plugins = [] + for plugin in list_output: + if not plugin: + break + plugins.append(plugin) + + return plugins + + def enable(self, name): + self._exec(['enable', name]) + + def disable(self, name): + self._exec(['disable', name]) + + +def main(): + arg_spec = dict( + names=dict(required=True, aliases=['name']), + new_only=dict(default='no', type='bool'), + state=dict(default='enabled', choices=['enabled', 'disabled']), + prefix=dict(required=False, default=None) + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + result = dict() + names = module.params['names'].split(',') + new_only = module.params['new_only'] + state = module.params['state'] + + rabbitmq_plugins = RabbitMqPlugins(module) + enabled_plugins = rabbitmq_plugins.get_all() + + enabled = [] + disabled = [] + if state == 'enabled': + if not new_only: + for plugin in enabled_plugins: + if " " in plugin: + continue + if plugin not in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + for name in names: + if name not in enabled_plugins: + rabbitmq_plugins.enable(name) + enabled.append(name) + else: + for plugin in enabled_plugins: + if plugin in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + result['changed'] = len(enabled) > 0 or len(disabled) > 0 + result['enabled'] = enabled + result['disabled'] = disabled + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_policy.py b/plugins/modules/messaging/rabbitmq/rabbitmq_policy.py new file mode 100644 index 0000000000..3f53abc58b --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_policy.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, John Dewey +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_policy +short_description: Manage the state of policies in RabbitMQ +description: + - Manage the state of a policy in RabbitMQ. +author: John Dewey (@retr0h) +options: + name: + description: + - The name of the policy to manage. + required: true + vhost: + description: + - The name of the vhost to apply to. + default: / + apply_to: + description: + - What the policy applies to. Requires RabbitMQ 3.2.0 or later. + default: all + choices: [all, exchanges, queues] + pattern: + description: + - A regex of queues to apply the policy to. Required when + C(state=present). This option is no longer required as of Ansible 2.9. + required: false + default: null + tags: + description: + - A dict or string describing the policy. Required when + C(state=present). This option is no longer required as of Ansible 2.9. + required: false + default: null + priority: + description: + - The priority of the policy. + default: 0 + node: + description: + - Erlang node name of the rabbit we wish to configure. + default: rabbit + state: + description: + - The state of the policy. + default: present + choices: [present, absent] +''' + +EXAMPLES = ''' +- name: ensure the default vhost contains the HA policy via a dict + rabbitmq_policy: + name: HA + pattern: .* + args: + tags: + ha-mode: all + +- name: ensure the default vhost contains the HA policy + rabbitmq_policy: + name: HA + pattern: .* + tags: + ha-mode: all +''' + +import json +import re +from distutils.version import LooseVersion as Version + +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqPolicy(object): + + def __init__(self, module, name): + self._module = module + self._name = name + self._vhost = module.params['vhost'] + self._pattern = module.params['pattern'] + self._apply_to = module.params['apply_to'] + self._tags = module.params['tags'] + self._priority = module.params['priority'] + self._node = module.params['node'] + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + self._version = self._rabbit_version() + + def _exec(self, + args, + run_in_check_mode=False, + split_lines=True, + add_vhost=True): + if (not self._module.check_mode + or (self._module.check_mode and run_in_check_mode)): + cmd = [self._rabbitmqctl, '-q', '-n', self._node] + + if add_vhost: + args.insert(1, '-p') + args.insert(2, self._vhost) + + rc, out, err = self._module.run_command(cmd + args, check_rc=True) + if split_lines: + return out.splitlines() + + return out + return list() + + def _rabbit_version(self): + status = self._exec(['status'], True, False, False) + + # 3.7.x erlang style output + version_match = re.search('{rabbit,".*","(?P.*)"}', status) + if version_match: + return Version(version_match.group('version')) + + # 3.8.x style ouput + version_match = re.search('RabbitMQ version: (?P.*)', status) + if version_match: + return Version(version_match.group('version')) + + return None + + def _list_policies(self): + if self._version and self._version >= Version('3.7.9'): + # Remove first header line from policies list for version > 3.7.9 + return self._exec(['list_policies'], True)[1:] + + return self._exec(['list_policies'], True) + + def has_modifications(self): + if self._pattern is None or self._tags is None: + self._module.fail_json( + msg=('pattern and tags are required for ' + 'state=present')) + + if self._version and self._version >= Version('3.7.0'): + # Change fields order in rabbitmqctl output in version 3.7 + return not any( + self._policy_check(policy, apply_to_fno=3, pattern_fno=2) + for policy in self._list_policies()) + else: + return not any( + self._policy_check(policy) for policy in self._list_policies()) + + def should_be_deleted(self): + return any( + self._policy_check_by_name(policy) + for policy in self._list_policies()) + + def set(self): + args = ['set_policy'] + args.append(self._name) + args.append(self._pattern) + args.append(json.dumps(self._tags)) + args.append('--priority') + args.append(self._priority) + if self._apply_to != 'all': + args.append('--apply-to') + args.append(self._apply_to) + return self._exec(args) + + def clear(self): + return self._exec(['clear_policy', self._name]) + + def _policy_check(self, + policy, + name_fno=1, + apply_to_fno=2, + pattern_fno=3, + tags_fno=4, + priority_fno=5): + if not policy: + return False + + policy_data = policy.split('\t') + + policy_name = policy_data[name_fno] + apply_to = policy_data[apply_to_fno] + pattern = policy_data[pattern_fno].replace('\\\\', '\\') + tags = json.loads(policy_data[tags_fno]) + priority = policy_data[priority_fno] + + return (policy_name == self._name and apply_to == self._apply_to + and tags == self._tags and priority == self._priority + and pattern == self._pattern) + + def _policy_check_by_name(self, policy): + if not policy: + return False + + policy_name = policy.split('\t')[1] + + return policy_name == self._name + + +def main(): + arg_spec = dict( + name=dict(required=True), + vhost=dict(default='/'), + pattern=dict(required=False, default=None), + apply_to=dict(default='all', choices=['all', 'exchanges', 'queues']), + tags=dict(type='dict', required=False, default=None), + priority=dict(default='0'), + node=dict(default='rabbit'), + state=dict(default='present', choices=['present', 'absent']), + ) + + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + rabbitmq_policy = RabbitMqPolicy(module, name) + + result = dict(changed=False, name=name, state=state) + + if state == 'present' and rabbitmq_policy.has_modifications(): + rabbitmq_policy.set() + result['changed'] = True + elif state == 'absent' and rabbitmq_policy.should_be_deleted(): + rabbitmq_policy.clear() + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_queue.py b/plugins/modules/messaging/rabbitmq/rabbitmq_queue.py new file mode 100644 index 0000000000..169e294f0d --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_queue.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Manuel Sousa +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_queue +author: Manuel Sousa (@manuel-sousa) + +short_description: Manage rabbitMQ queues +description: + - This module uses rabbitMQ Rest API to create/delete queues +requirements: [ "requests >= 1.0.0" ] +options: + name: + description: + - Name of the queue + required: true + state: + description: + - Whether the queue should be present or absent + choices: [ "present", "absent" ] + default: present + durable: + description: + - whether queue is durable or not + type: bool + default: 'yes' + auto_delete: + description: + - if the queue should delete itself after all queues/queues unbound from it + type: bool + default: 'no' + message_ttl: + description: + - How long a message can live in queue before it is discarded (milliseconds) + default: forever + auto_expires: + description: + - How long a queue can be unused before it is automatically deleted (milliseconds) + default: forever + max_length: + description: + - How many messages can the queue contain before it starts rejecting + default: no limit + dead_letter_exchange: + description: + - Optional name of an exchange to which messages will be republished if they + - are rejected or expire + dead_letter_routing_key: + description: + - Optional replacement routing key to use when a message is dead-lettered. + - Original routing key will be used if unset + max_priority: + description: + - Maximum number of priority levels for the queue to support. + - If not set, the queue will not support message priorities. + - Larger numbers indicate higher priority. + arguments: + description: + - extra arguments for queue. If defined this argument is a key/value dictionary + default: {} +extends_documentation_fragment: +- community.general.rabbitmq + +''' + +EXAMPLES = ''' +# Create a queue +- rabbitmq_queue: + name: myQueue + +# Create a queue on remote host +- rabbitmq_queue: + name: myRemoteQueue + login_user: user + login_password: secret + login_host: remote.example.org +''' + +import json +import traceback + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib import parse as urllib_parse +from ansible_collections.community.general.plugins.module_utils.rabbitmq import rabbitmq_argument_spec + + +def main(): + + argument_spec = rabbitmq_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + durable=dict(default=True, type='bool'), + auto_delete=dict(default=False, type='bool'), + message_ttl=dict(default=None, type='int'), + auto_expires=dict(default=None, type='int'), + max_length=dict(default=None, type='int'), + dead_letter_exchange=dict(default=None, type='str'), + dead_letter_routing_key=dict(default=None, type='str'), + arguments=dict(default=dict(), type='dict'), + max_priority=dict(default=None, type='int') + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + url = "%s://%s:%s/api/queues/%s/%s" % ( + module.params['login_protocol'], + module.params['login_host'], + module.params['login_port'], + urllib_parse.quote(module.params['vhost'], ''), + module.params['name'] + ) + + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR) + + result = dict(changed=False, name=module.params['name']) + + # Check if queue already exists + r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + if r.status_code == 200: + queue_exists = True + response = r.json() + elif r.status_code == 404: + queue_exists = False + response = r.text + else: + module.fail_json( + msg="Invalid response from RESTAPI when trying to check if queue exists", + details=r.text + ) + + if module.params['state'] == 'present': + change_required = not queue_exists + else: + change_required = queue_exists + + # Check if attributes change on existing queue + if not change_required and r.status_code == 200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['auto_delete'] and + ( + ('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or + ('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None) + ) and + ( + ('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or + ('x-expires' not in response['arguments'] and module.params['auto_expires'] is None) + ) and + ( + ('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or + ('x-max-length' not in response['arguments'] and module.params['max_length'] is None) + ) and + ( + ('x-dead-letter-exchange' in response['arguments'] and + response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or + ('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None) + ) and + ( + ('x-dead-letter-routing-key' in response['arguments'] and + response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or + ('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None) + ) and + ( + ('x-max-priority' in response['arguments'] and + response['arguments']['x-max-priority'] == module.params['max_priority']) or + ('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None) + ) + ): + module.fail_json( + msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues", + ) + + # Copy parameters to arguments as used by RabbitMQ + for k, v in { + 'message_ttl': 'x-message-ttl', + 'auto_expires': 'x-expires', + 'max_length': 'x-max-length', + 'dead_letter_exchange': 'x-dead-letter-exchange', + 'dead_letter_routing_key': 'x-dead-letter-routing-key', + 'max_priority': 'x-max-priority' + }.items(): + if module.params[k] is not None: + module.params['arguments'][v] = module.params[k] + + # Exit if check_mode + if module.check_mode: + result['changed'] = change_required + result['details'] = response + result['arguments'] = module.params['arguments'] + module.exit_json(**result) + + # Do changes + if change_required: + if module.params['state'] == 'present': + r = requests.put( + url, + auth=(module.params['login_user'], module.params['login_password']), + headers={"content-type": "application/json"}, + data=json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['auto_delete'], + "arguments": module.params['arguments'] + }), + verify=module.params['ca_cert'], + cert=(module.params['client_cert'], module.params['client_key']) + ) + elif module.params['state'] == 'absent': + r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + # RabbitMQ 3.6.7 changed this response code from 204 to 201 + if r.status_code == 204 or r.status_code == 201: + result['changed'] = True + module.exit_json(**result) + else: + module.fail_json( + msg="Error creating queue", + status=r.status_code, + details=r.text + ) + + else: + module.exit_json( + changed=False, + name=module.params['name'] + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_user.py b/plugins/modules/messaging/rabbitmq/rabbitmq_user.py new file mode 100644 index 0000000000..15767abfdc --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_user.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Chatham Financial +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_user +short_description: Manage RabbitMQ users +description: + - Add or remove users to RabbitMQ and assign permissions +author: Chris Hoffman (@chrishoffman) +options: + user: + description: + - Name of user to add + required: true + aliases: [username, name] + password: + description: + - Password of user to add. + - To change the password of an existing user, you must also specify + C(update_password=always). + tags: + description: + - User tags specified as comma delimited + permissions: + description: + - a list of dicts, each dict contains vhost, configure_priv, write_priv, and read_priv, + and represents a permission rule for that vhost. + - This option should be preferable when you care about all permissions of the user. + - You should use vhost, configure_priv, write_priv, and read_priv options instead + if you care about permissions for just some vhosts. + default: [] + vhost: + description: + - vhost to apply access privileges. + - This option will be ignored when permissions option is used. + default: / + node: + description: + - erlang node name of the rabbit we wish to configure + default: rabbit + configure_priv: + description: + - Regular expression to restrict configure actions on a resource + for the specified vhost. + - By default all actions are restricted. + - This option will be ignored when permissions option is used. + default: ^$ + write_priv: + description: + - Regular expression to restrict configure actions on a resource + for the specified vhost. + - By default all actions are restricted. + - This option will be ignored when permissions option is used. + default: ^$ + read_priv: + description: + - Regular expression to restrict configure actions on a resource + for the specified vhost. + - By default all actions are restricted. + - This option will be ignored when permissions option is used. + default: ^$ + force: + description: + - Deletes and recreates the user. + type: bool + default: 'no' + state: + description: + - Specify if user is to be added or removed + default: present + choices: [present, absent] + update_password: + description: + - C(on_create) will only set the password for newly created users. C(always) will update passwords if they differ. + required: false + default: on_create + choices: [ on_create, always ] +''' + +EXAMPLES = ''' +# Add user to server and assign full access control on / vhost. +# The user might have permission rules for other vhost but you don't care. +- rabbitmq_user: + user: joe + password: changeme + vhost: / + configure_priv: .* + read_priv: .* + write_priv: .* + state: present + +# Add user to server and assign full access control on / vhost. +# The user doesn't have permission rules for other vhosts +- rabbitmq_user: + user: joe + password: changeme + permissions: + - vhost: / + configure_priv: .* + read_priv: .* + write_priv: .* + state: present +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.collections import count + + +class RabbitMqUser(object): + def __init__(self, module, username, password, tags, permissions, + node, bulk_permissions=False): + self.module = module + self.username = username + self.password = password + self.node = node + if not tags: + self.tags = list() + else: + self.tags = tags.split(',') + + self.permissions = permissions + self.bulk_permissions = bulk_permissions + + self._tags = None + self._permissions = [] + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or run_in_check_mode: + cmd = [self._rabbitmqctl, '-q'] + if self.node: + cmd.extend(['-n', self.node]) + rc, out, err = self.module.run_command(cmd + args, check_rc=check_rc) + return out.splitlines() + return list() + + def get(self): + users = self._exec(['list_users'], True) + + for user_tag in users: + if '\t' not in user_tag: + continue + + user, tags = user_tag.split('\t') + + if user == self.username: + for c in ['[', ']', ' ']: + tags = tags.replace(c, '') + + if tags != '': + self._tags = tags.split(',') + else: + self._tags = list() + + self._permissions = self._get_permissions() + return True + return False + + def _get_permissions(self): + """Get permissions of the user from RabbitMQ.""" + perms_out = [perm for perm in self._exec(['list_user_permissions', self.username], True) if perm.strip()] + + perms_list = list() + for perm in perms_out: + vhost, configure_priv, write_priv, read_priv = perm.split('\t') + if not self.bulk_permissions: + if vhost == self.permissions[0]['vhost']: + perms_list.append(dict(vhost=vhost, configure_priv=configure_priv, + write_priv=write_priv, read_priv=read_priv)) + break + else: + perms_list.append(dict(vhost=vhost, configure_priv=configure_priv, + write_priv=write_priv, read_priv=read_priv)) + return perms_list + + def check_password(self): + return self._exec(['authenticate_user', self.username, self.password], + run_in_check_mode=True, check_rc=False) + + def add(self): + if self.password is not None: + self._exec(['add_user', self.username, self.password]) + else: + self._exec(['add_user', self.username, '']) + self._exec(['clear_password', self.username]) + + def delete(self): + self._exec(['delete_user', self.username]) + + def change_password(self): + if self.password is not None: + self._exec(['change_password', self.username, self.password]) + else: + self._exec(['clear_password', self.username]) + + def set_tags(self): + self._exec(['set_user_tags', self.username] + self.tags) + + def set_permissions(self): + permissions_to_clear = [permission for permission in self._permissions if permission not in self.permissions] + permissions_to_add = [permission for permission in self.permissions if permission not in self._permissions] + for permission in permissions_to_clear: + cmd = 'clear_permissions -p {vhost} {username}'.format(username=self.username, + vhost=permission['vhost']) + self._exec(cmd.split(' ')) + for permission in permissions_to_add: + cmd = ('set_permissions -p {vhost} {username} {configure_priv} {write_priv} {read_priv}' + .format(username=self.username, **permission)) + self._exec(cmd.split(' ')) + + def has_tags_modifications(self): + return set(self.tags) != set(self._tags) + + def has_permissions_modifications(self): + def to_permission_tuple(vhost_permission_dict): + return vhost_permission_dict['vhost'], vhost_permission_dict + + def permission_dict(vhost_permission_list): + return dict(map(to_permission_tuple, vhost_permission_list)) + + return permission_dict(self._permissions) != permission_dict(self.permissions) + + +def main(): + arg_spec = dict( + user=dict(required=True, aliases=['username', 'name']), + password=dict(default=None, no_log=True), + tags=dict(default=None), + permissions=dict(default=list(), type='list'), + vhost=dict(default='/'), + configure_priv=dict(default='^$'), + write_priv=dict(default='^$'), + read_priv=dict(default='^$'), + force=dict(default='no', type='bool'), + state=dict(default='present', choices=['present', 'absent']), + node=dict(default='rabbit'), + update_password=dict(default='on_create', choices=['on_create', 'always']) + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + username = module.params['user'] + password = module.params['password'] + tags = module.params['tags'] + permissions = module.params['permissions'] + vhost = module.params['vhost'] + configure_priv = module.params['configure_priv'] + write_priv = module.params['write_priv'] + read_priv = module.params['read_priv'] + force = module.params['force'] + state = module.params['state'] + node = module.params['node'] + update_password = module.params['update_password'] + + if permissions: + vhosts = map(lambda permission: permission.get('vhost', '/'), permissions) + if any(map(lambda count: count > 1, count(vhosts).values())): + module.fail_json(msg="Error parsing permissions: You can't have two permission dicts for the same vhost") + bulk_permissions = True + else: + perm = { + 'vhost': vhost, + 'configure_priv': configure_priv, + 'write_priv': write_priv, + 'read_priv': read_priv + } + permissions.append(perm) + bulk_permissions = False + + rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions, + node, bulk_permissions=bulk_permissions) + + result = dict(changed=False, user=username, state=state) + if rabbitmq_user.get(): + if state == 'absent': + rabbitmq_user.delete() + result['changed'] = True + else: + if force: + rabbitmq_user.delete() + rabbitmq_user.add() + rabbitmq_user.get() + result['changed'] = True + elif update_password == 'always': + if not rabbitmq_user.check_password(): + rabbitmq_user.change_password() + result['changed'] = True + + if rabbitmq_user.has_tags_modifications(): + rabbitmq_user.set_tags() + result['changed'] = True + + if rabbitmq_user.has_permissions_modifications(): + rabbitmq_user.set_permissions() + result['changed'] = True + elif state == 'present': + rabbitmq_user.add() + rabbitmq_user.set_tags() + rabbitmq_user.set_permissions() + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_vhost.py b/plugins/modules/messaging/rabbitmq/rabbitmq_vhost.py new file mode 100644 index 0000000000..ddd7383943 --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_vhost.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Chatham Financial +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_vhost +short_description: Manage the state of a virtual host in RabbitMQ +description: + - Manage the state of a virtual host in RabbitMQ +author: Chris Hoffman (@chrishoffman) +options: + name: + description: + - The name of the vhost to manage + required: true + aliases: [vhost] + node: + description: + - erlang node name of the rabbit we wish to configure + default: rabbit + tracing: + description: + - Enable/disable tracing for a vhost + type: bool + default: 'no' + aliases: [trace] + state: + description: + - The state of vhost + default: present + choices: [present, absent] +''' + +EXAMPLES = ''' +# Ensure that the vhost /test exists. +- rabbitmq_vhost: + name: /test + state: present +''' + +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqVhost(object): + def __init__(self, module, name, tracing, node): + self.module = module + self.name = name + self.tracing = tracing + self.node = node + + self._tracing = False + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmqctl, '-q', '-n', self.node] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get(self): + vhosts = self._exec(['list_vhosts', 'name', 'tracing'], True) + + for vhost in vhosts: + if '\t' not in vhost: + continue + + name, tracing = vhost.split('\t') + if name == self.name: + self._tracing = self.module.boolean(tracing) + return True + return False + + def add(self): + return self._exec(['add_vhost', self.name]) + + def delete(self): + return self._exec(['delete_vhost', self.name]) + + def set_tracing(self): + if self.tracing != self._tracing: + if self.tracing: + self._enable_tracing() + else: + self._disable_tracing() + return True + return False + + def _enable_tracing(self): + return self._exec(['trace_on', '-p', self.name]) + + def _disable_tracing(self): + return self._exec(['trace_off', '-p', self.name]) + + +def main(): + arg_spec = dict( + name=dict(required=True, aliases=['vhost']), + tracing=dict(default='off', aliases=['trace'], type='bool'), + state=dict(default='present', choices=['present', 'absent']), + node=dict(default='rabbit'), + ) + + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + tracing = module.params['tracing'] + state = module.params['state'] + node = module.params['node'] + result = dict(changed=False, name=name, state=state) + rabbitmq_vhost = RabbitMqVhost(module, name, tracing, node) + + if rabbitmq_vhost.get(): + if state == 'absent': + rabbitmq_vhost.delete() + result['changed'] = True + else: + if rabbitmq_vhost.set_tracing(): + result['changed'] = True + elif state == 'present': + rabbitmq_vhost.add() + rabbitmq_vhost.set_tracing() + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/messaging/rabbitmq/rabbitmq_vhost_limits.py b/plugins/modules/messaging/rabbitmq/rabbitmq_vhost_limits.py new file mode 100644 index 0000000000..e58c50650d --- /dev/null +++ b/plugins/modules/messaging/rabbitmq/rabbitmq_vhost_limits.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Hiroyuki Matsuo +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: rabbitmq_vhost_limits +author: Hiroyuki Matsuo (@h-matsuo) + +short_description: Manage the state of virtual host limits in RabbitMQ +description: + - This module sets/clears certain limits on a virtual host. + - The configurable limits are I(max_connections) and I(max-queues). + +options: + max_connections: + description: + - Max number of concurrent client connections. + - Negative value means "no limit". + - Ignored when the I(state) is C(absent). + default: -1 + max_queues: + description: + - Max number of queues. + - Negative value means "no limit". + - Ignored when the I(state) is C(absent). + default: -1 + node: + description: + - Name of the RabbitMQ Erlang node to manage. + state: + description: + - Specify whether the limits are to be set or cleared. + - If set to C(absent), the limits of both I(max_connections) and I(max-queues) will be cleared. + default: present + choices: [present, absent] + vhost: + description: + - Name of the virtual host to manage. + default: / +''' + +EXAMPLES = ''' +# Limit both of the max number of connections and queues on the vhost '/'. +- rabbitmq_vhost_limits: + vhost: / + max_connections: 64 + max_queues: 256 + state: present + +# Limit the max number of connections on the vhost '/'. +# This task implicitly clears the max number of queues limit using default value: -1. +- rabbitmq_vhost_limits: + vhost: / + max_connections: 64 + state: present + +# Clear the limits on the vhost '/'. +- rabbitmq_vhost_limits: + vhost: / + state: absent +''' + +RETURN = ''' # ''' + + +import json +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqVhostLimits(object): + def __init__(self, module): + self._module = module + self._max_connections = module.params['max_connections'] + self._max_queues = module.params['max_queues'] + self._node = module.params['node'] + self._state = module.params['state'] + self._vhost = module.params['vhost'] + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args): + cmd = [self._rabbitmqctl, '-q', '-p', self._vhost] + if self._node is not None: + cmd.extend(['-n', self._node]) + rc, out, err = self._module.run_command(cmd + args, check_rc=True) + return dict(rc=rc, out=out.splitlines(), err=err.splitlines()) + + def list(self): + exec_result = self._exec(['list_vhost_limits']) + vhost_limits = exec_result['out'][0] + max_connections = None + max_queues = None + if vhost_limits: + vhost_limits = json.loads(vhost_limits) + if 'max-connections' in vhost_limits: + max_connections = vhost_limits['max-connections'] + if 'max-queues' in vhost_limits: + max_queues = vhost_limits['max-queues'] + return dict( + max_connections=max_connections, + max_queues=max_queues + ) + + def set(self): + if self._module.check_mode: + return + json_str = '{{"max-connections": {0}, "max-queues": {1}}}'.format(self._max_connections, self._max_queues) + self._exec(['set_vhost_limits', json_str]) + + def clear(self): + if self._module.check_mode: + return + self._exec(['clear_vhost_limits']) + + +def main(): + arg_spec = dict( + max_connections=dict(default=-1, type='int'), + max_queues=dict(default=-1, type='int'), + node=dict(default=None, type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'), + vhost=dict(default='/', type='str') + ) + + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + max_connections = module.params['max_connections'] + max_queues = module.params['max_queues'] + node = module.params['node'] + state = module.params['state'] + vhost = module.params['vhost'] + + module_result = dict(changed=False) + rabbitmq_vhost_limits = RabbitMqVhostLimits(module) + current_status = rabbitmq_vhost_limits.list() + + if state == 'present': + wanted_status = dict( + max_connections=max_connections, + max_queues=max_queues + ) + else: # state == 'absent' + wanted_status = dict( + max_connections=None, + max_queues=None + ) + + if current_status != wanted_status: + module_result['changed'] = True + if state == 'present': + rabbitmq_vhost_limits.set() + else: # state == 'absent' + rabbitmq_vhost_limits.clear() + + module.exit_json(**module_result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/airbrake_deployment.py b/plugins/modules/monitoring/airbrake_deployment.py new file mode 100644 index 0000000000..7fd736a011 --- /dev/null +++ b/plugins/modules/monitoring/airbrake_deployment.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Bruce Pennypacker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: airbrake_deployment +author: "Bruce Pennypacker (@bpennypacker)" +short_description: Notify airbrake about app deployments +description: + - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) +options: + token: + description: + - API token. + required: true + environment: + description: + - The airbrake environment name, typically 'production', 'staging', etc. + required: true + user: + description: + - The username of the person doing the deployment + required: false + repo: + description: + - URL of the project repository + required: false + revision: + description: + - A hash, number, tag, or other identifier showing what revision was deployed + required: false + url: + description: + - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. + required: false + default: "https://airbrake.io/deploys.txt" + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + type: bool + +requirements: [] +''' + +EXAMPLES = ''' +- airbrake_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlencode + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + environment=dict(required=True), + user=dict(required=False), + repo=dict(required=False), + revision=dict(required=False), + url=dict(required=False, default='https://api.airbrake.io/deploys.txt'), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + # build list of params + params = {} + + if module.params["environment"]: + params["deploy[rails_env]"] = module.params["environment"] + + if module.params["user"]: + params["deploy[local_username]"] = module.params["user"] + + if module.params["repo"]: + params["deploy[scm_repository]"] = module.params["repo"] + + if module.params["revision"]: + params["deploy[scm_revision]"] = module.params["revision"] + + params["api_key"] = module.params["token"] + + url = module.params.get('url') + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # Send the data to airbrake + data = urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/bigpanda.py b/plugins/modules/monitoring/bigpanda.py new file mode 100644 index 0000000000..b9de6c1066 --- /dev/null +++ b/plugins/modules/monitoring/bigpanda.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: bigpanda +author: "Hagai Kariti (@hkariti)" +short_description: Notify BigPanda about deployments +description: + - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. +options: + component: + description: + - "The name of the component being deployed. Ex: billing" + required: true + aliases: ['name'] + version: + description: + - The deployment version. + required: true + token: + description: + - API token. + required: true + state: + description: + - State of the deployment. + required: true + choices: ['started', 'finished', 'failed'] + hosts: + description: + - Name of affected host name. Can be a list. + required: false + default: machine's hostname + aliases: ['host'] + env: + description: + - The environment name, typically 'production', 'staging', etc. + required: false + owner: + description: + - The person responsible for the deployment. + required: false + description: + description: + - Free text description of the deployment. + required: false + url: + description: + - Base URL of the API server. + required: False + default: https://api.bigpanda.io + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + type: bool + deployment_message: + description: + - Message about the deployment. + - C(message) alias is deprecated in Ansible 2.10, since it is used internally by Ansible Core Engine. + aliases: ['message'] + +# informational: requirements for nodes +requirements: [ ] +''' + +EXAMPLES = ''' +- bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: started + +- bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: finished + +# If outside servers aren't reachable from your machine, use delegate_to and override hosts: +- bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + hosts: '{{ ansible_hostname }}' + state: started + delegate_to: localhost + register: deployment + +- bigpanda: + component: '{{ deployment.component }}' + version: '{{ deployment.version }}' + token: '{{ deployment.token }}' + state: finished + delegate_to: localhost +''' + +# =========================================== +# Module execution. +# +import json +import socket +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + component=dict(required=True, aliases=['name']), + version=dict(required=True), + token=dict(required=True, no_log=True), + state=dict(required=True, choices=['started', 'finished', 'failed']), + hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']), + env=dict(required=False), + owner=dict(required=False), + description=dict(required=False), + deployment_message=dict(required=False, aliases=['message'], deprecated_aliases=[dict(name='message', version='2.14')]), + source_system=dict(required=False, default='ansible'), + validate_certs=dict(default='yes', type='bool'), + url=dict(required=False, default='https://api.bigpanda.io'), + ), + supports_check_mode=True, + ) + + token = module.params['token'] + state = module.params['state'] + url = module.params['url'] + + # Build the common request body + body = dict() + for k in ('component', 'version', 'hosts'): + v = module.params[k] + if v is not None: + body[k] = v + + if not isinstance(body['hosts'], list): + body['hosts'] = [body['hosts']] + + # Insert state-specific attributes to body + if state == 'started': + for k in ('source_system', 'env', 'owner', 'description'): + v = module.params[k] + if v is not None: + body[k] = v + + request_url = url + '/data/events/deployments/start' + else: + message = module.params['message'] + if message is not None: + body['errorMessage'] = message + + if state == 'finished': + body['status'] = 'success' + else: + body['status'] = 'failure' + + request_url = url + '/data/events/deployments/end' + + # Build the deployment object we return + deployment = dict(token=token, url=url) + deployment.update(body) + if 'errorMessage' in deployment: + message = deployment.pop('errorMessage') + deployment['message'] = message + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True, **deployment) + + # Send the data to bigpanda + data = json.dumps(body) + headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} + try: + response, info = fetch_url(module, request_url, data=data, headers=headers) + if info['status'] == 200: + module.exit_json(changed=True, **deployment) + else: + module.fail_json(msg=json.dumps(info)) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/circonus_annotation.py b/plugins/modules/monitoring/circonus_annotation.py new file mode 100644 index 0000000000..3fa7c89ba5 --- /dev/null +++ b/plugins/modules/monitoring/circonus_annotation.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2014-2015, Epic Games, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: circonus_annotation +short_description: create an annotation in circonus +description: + - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided +author: "Nick Harring (@NickatEpic)" +requirements: + - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2) +notes: + - Check mode isn't supported. +options: + api_key: + description: + - Circonus API key + required: true + category: + description: + - Annotation Category + required: true + description: + description: + - Description of annotation + required: true + title: + description: + - Title of annotation + required: true + start: + description: + - Unix timestamp of event start + default: I(now) + stop: + description: + - Unix timestamp of event end + default: I(now) + I(duration) + duration: + description: + - Duration in seconds of annotation + default: 0 +''' +EXAMPLES = ''' +# Create a simple annotation event with a source, defaults to start and end time of now +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations +# Create an annotation with a duration of 5 minutes and a default start time of now +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + duration: 300 +# Create an annotation with a start_time and end_time +- circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + start_time: 1395940006 + end_time: 1395954407 +''' + +RETURN = ''' +annotation: + description: details about the created annotation + returned: success + type: complex + contains: + _cid: + description: annotation identifier + returned: success + type: str + sample: /annotation/100000 + _created: + description: creation timestamp + returned: success + type: int + sample: 1502236928 + _last_modified: + description: last modification timestamp + returned: success + type: int + sample: 1502236928 + _last_modified_by: + description: last modified by + returned: success + type: str + sample: /user/1000 + category: + description: category of the created annotation + returned: success + type: str + sample: alerts + title: + description: title of the created annotation + returned: success + type: str + sample: WARNING + description: + description: description of the created annotation + returned: success + type: str + sample: Host is down. + start: + description: timestamp, since annotation applies + returned: success + type: int + sample: Host is down. + stop: + description: timestamp, since annotation ends + returned: success + type: str + sample: Host is down. + rel_metrics: + description: Array of metrics related to this annotation, each metrics is a string. + returned: success + type: list + sample: + - 54321_kbps +''' +import json +import time +import traceback +from distutils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six import PY3 +from ansible.module_utils._text import to_native + + +def check_requests_dep(module): + """Check if an adequate requests version is available""" + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + else: + required_version = '2.0.0' if PY3 else '1.0.0' + if LooseVersion(requests.__version__) < LooseVersion(required_version): + module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__)) + + +def post_annotation(annotation, api_key): + ''' Takes annotation dict and api_key string''' + base_url = 'https://api.circonus.com/v2' + anootate_post_endpoint = '/annotation' + resp = requests.post(base_url + anootate_post_endpoint, + headers=build_headers(api_key), data=json.dumps(annotation)) + resp.raise_for_status() + return resp + + +def create_annotation(module): + ''' Takes ansible module object ''' + annotation = {} + duration = module.params['duration'] + if module.params['start'] is not None: + start = module.params['start'] + else: + start = int(time.time()) + if module.params['stop'] is not None: + stop = module.params['stop'] + else: + stop = int(time.time()) + duration + annotation['start'] = start + annotation['stop'] = stop + annotation['category'] = module.params['category'] + annotation['description'] = module.params['description'] + annotation['title'] = module.params['title'] + return annotation + + +def build_headers(api_token): + '''Takes api token, returns headers with it included.''' + headers = {'X-Circonus-App-Name': 'ansible', + 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, + 'Accept': 'application/json'} + return headers + + +def main(): + '''Main function, dispatches logic''' + module = AnsibleModule( + argument_spec=dict( + start=dict(type='int'), + stop=dict(type='int'), + category=dict(required=True), + title=dict(required=True), + description=dict(required=True), + duration=dict(default=0, type='int'), + api_key=dict(required=True, no_log=True) + ) + ) + + check_requests_dep(module) + + annotation = create_annotation(module) + try: + resp = post_annotation(annotation, module.params['api_key']) + except requests.exceptions.RequestException as e: + module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc()) + module.exit_json(changed=True, annotation=resp.json()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py new file mode 100644 index 0000000000..866d487f0e --- /dev/null +++ b/plugins/modules/monitoring/datadog/datadog_event.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Author: Artūras 'arturaz' Šlajus +# Author: Naoya Nakazawa +# +# This module is proudly sponsored by iGeolise (www.igeolise.com) and +# Tiny Lab Productions (www.tinylabproductions.com). +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: datadog_event +short_description: Posts events to Datadog service +description: +- "Allows to post events to Datadog (www.datadoghq.com) service." +- "Uses http://docs.datadoghq.com/api/#events API." +author: +- "Artūras `arturaz` Šlajus (@arturaz)" +- "Naoya Nakazawa (@n0ts)" +options: + api_key: + description: ["Your DataDog API key."] + required: true + app_key: + description: ["Your DataDog app key."] + required: true + title: + description: ["The event title."] + required: true + text: + description: ["The body of the event."] + required: true + date_happened: + description: + - POSIX timestamp of the event. + - Default value is now. + default: now + priority: + description: ["The priority of the event."] + default: normal + choices: [normal, low] + host: + description: ["Host name to associate with the event."] + default: "{{ ansible_hostname }}" + tags: + description: ["Comma separated list of tags to apply to the event."] + alert_type: + description: ["Type of alert."] + default: info + choices: ['error', 'warning', 'info', 'success'] + aggregation_key: + description: ["An arbitrary string to use for aggregation."] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +# Post an event with low priority +- datadog_event: + title: Testing from ansible + text: Test + priority: low + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN +# Post an event with several tags +- datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + tags: 'aa,bb,#host:{{ inventory_hostname }}' +''' + +import platform +import traceback + +# Import Datadog +DATADOG_IMP_ERR = None +try: + from datadog import initialize, api + HAS_DATADOG = True +except Exception: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + app_key=dict(required=True, no_log=True), + title=dict(required=True), + text=dict(required=True), + date_happened=dict(required=False, default=None, type='int'), + priority=dict( + required=False, default='normal', choices=['normal', 'low'] + ), + host=dict(required=False, default=None), + tags=dict(required=False, default=None, type='list'), + alert_type=dict( + required=False, default='info', + choices=['error', 'warning', 'info', 'success'] + ), + aggregation_key=dict(required=False, default=None), + validate_certs=dict(default='yes', type='bool'), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + + options = { + 'api_key': module.params['api_key'], + 'app_key': module.params['app_key'] + } + + initialize(**options) + + _post_event(module) + + +def _post_event(module): + try: + if module.params['host'] is None: + module.params['host'] = platform.node().split('.')[0] + msg = api.Event.create(title=module.params['title'], + text=module.params['text'], + host=module.params['host'], + tags=module.params['tags'], + priority=module.params['priority'], + alert_type=module.params['alert_type'], + aggregation_key=module.params['aggregation_key'], + source_type_name='ansible') + if msg['status'] != 'ok': + module.fail_json(msg=msg) + + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py new file mode 100644 index 0000000000..465669dfd6 --- /dev/null +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -0,0 +1,396 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Sebastian Kornehl +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: datadog_monitor +short_description: Manages Datadog monitors +description: + - Manages monitors within Datadog. + - Options as described on https://docs.datadoghq.com/api/. +author: Sebastian Kornehl (@skornehl) +requirements: [datadog] +options: + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. Default value is C(https://api.datadoghq.com). + - This value can also be set with the C(DATADOG_HOST) environment variable. + required: false + type: str + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the monitor. + required: true + choices: ['present', 'absent', 'mute', 'unmute'] + type: str + tags: + description: + - A list of tags to associate with your monitor when creating or updating. + - This can help you categorize and filter monitors. + type: list + type: + description: + - The type of the monitor. + choices: ['metric alert', 'service check', 'event alert', 'process alert'] + type: str + query: + description: + - The monitor query to notify on. + - Syntax varies depending on what type of monitor you are creating. + type: str + name: + description: + - The name of the alert. + required: true + type: str + notification_message: + description: + - A message to include with notifications for this monitor. + - Email notifications can be sent to specific users by using the same '@username' notation as events. + - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'. + - C(message) alias is deprecated in Ansible 2.10, since it is used internally by Ansible Core Engine. + type: str + aliases: [ 'message' ] + silenced: + description: + - Dictionary of scopes to silence, with timestamps or None. + - Each scope will be muted until the given POSIX timestamp or forever if the value is None. + default: "" + notify_no_data: + description: + - Whether this monitor will notify when data stops reporting. + type: bool + default: 'no' + no_data_timeframe: + description: + - The number of minutes before a monitor will notify when data stops reporting. + - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. + default: 2x timeframe for metric, 2 minutes for service + type: str + timeout_h: + description: + - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state. + type: str + renotify_interval: + description: + - The number of minutes after the last notification before a monitor will re-notify on the current status. + - It will only re-notify if it is not resolved. + type: str + escalation_message: + description: + - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. + - Not applicable if I(renotify_interval=None). + type: str + notify_audit: + description: + - Whether tagged users will be notified on changes to this monitor. + type: bool + default: 'no' + thresholds: + description: + - A dictionary of thresholds by status. + - Only available for service checks and metric alerts. + - Because each of them can have multiple thresholds, we do not define them directly in the query. + default: {'ok': 1, 'critical': 1, 'warning': 1} + locked: + description: + - Whether changes to this monitor should be restricted to the creator or admins. + type: bool + default: 'no' + require_full_window: + description: + - Whether this monitor needs a full window of data before it gets evaluated. + - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped. + type: bool + new_host_delay: + description: + - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. + - This gives the host time to fully initialize. + type: str + evaluation_delay: + description: + - Time to delay evaluation (in seconds). + - Effective for sparse values. + type: str + id: + description: + - The ID of the alert. + - If set, will be used instead of the name to locate the alert. + type: str +''' + +EXAMPLES = ''' +# Create a metric monitor +- datadog_monitor: + type: "metric alert" + name: "Test monitor" + state: "present" + query: "datadog.agent.up.over('host:host1').last(2).count_by_status()" + notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Deletes a monitor +- datadog_monitor: + name: "Test monitor" + state: "absent" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Mutes a monitor +- datadog_monitor: + name: "Test monitor" + state: "mute" + silenced: '{"*":None}' + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Unmutes a monitor +- datadog_monitor: + name: "Test monitor" + state: "unmute" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +# Use datadoghq.eu platform instead of datadoghq.com +- datadog_monitor: + name: "Test monitor" + state: "absent" + api_host: https://api.datadoghq.eu + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" +''' +import traceback + +# Import Datadog +DATADOG_IMP_ERR = None +try: + from datadog import initialize, api + HAS_DATADOG = True +except Exception: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_host=dict(required=False), + app_key=dict(required=True, no_log=True), + state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), + type=dict(required=False, choices=['metric alert', 'service check', 'event alert', 'process alert']), + name=dict(required=True), + query=dict(required=False), + notification_message=dict(required=False, default=None, aliases=['message'], deprecated_aliases=[dict(name='message', version='2.14')]), + silenced=dict(required=False, default=None, type='dict'), + notify_no_data=dict(required=False, default=False, type='bool'), + no_data_timeframe=dict(required=False, default=None), + timeout_h=dict(required=False, default=None), + renotify_interval=dict(required=False, default=None), + escalation_message=dict(required=False, default=None), + notify_audit=dict(required=False, default=False, type='bool'), + thresholds=dict(required=False, type='dict', default=None), + tags=dict(required=False, type='list', default=None), + locked=dict(required=False, default=False, type='bool'), + require_full_window=dict(required=False, default=None, type='bool'), + new_host_delay=dict(required=False, default=None), + evaluation_delay=dict(required=False, default=None), + id=dict(required=False) + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + + if 'message' in module.params: + module.fail_json(msg="'message' is reserved keyword, please change this parameter to 'notification_message'") + + options = { + 'api_key': module.params['api_key'], + 'api_host': module.params['api_host'], + 'app_key': module.params['app_key'] + } + + initialize(**options) + + # Check if api_key and app_key is correct or not + # if not, then fail here. + response = api.Monitor.get_all() + if isinstance(response, dict): + msg = response.get('errors', None) + if msg: + module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0])) + + if module.params['state'] == 'present': + install_monitor(module) + elif module.params['state'] == 'absent': + delete_monitor(module) + elif module.params['state'] == 'mute': + mute_monitor(module) + elif module.params['state'] == 'unmute': + unmute_monitor(module) + + +def _fix_template_vars(message): + if message: + return message.replace('[[', '{{').replace(']]', '}}') + return message + + +def _get_monitor(module): + if module.params['id'] is not None: + monitor = api.Monitor.get(module.params['id']) + if 'errors' in monitor: + module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors']))) + return monitor + else: + monitors = api.Monitor.get_all() + for monitor in monitors: + if monitor['name'] == _fix_template_vars(module.params['name']): + return monitor + return {} + + +def _post_monitor(module, options): + try: + kwargs = dict(type=module.params['type'], query=module.params['query'], + name=_fix_template_vars(module.params['name']), + message=_fix_template_vars(module.params['notification_message']), + escalation_message=_fix_template_vars(module.params['escalation_message']), + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.create(**kwargs) + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + else: + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) + + +def _update_monitor(module, monitor, options): + try: + kwargs = dict(id=monitor['id'], query=module.params['query'], + name=_fix_template_vars(module.params['name']), + message=_fix_template_vars(module.params['message']), + escalation_message=_fix_template_vars(module.params['escalation_message']), + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.update(**kwargs) + + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']): + module.exit_json(changed=False, msg=msg) + else: + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def install_monitor(module): + options = { + "silenced": module.params['silenced'], + "notify_no_data": module.boolean(module.params['notify_no_data']), + "no_data_timeframe": module.params['no_data_timeframe'], + "timeout_h": module.params['timeout_h'], + "renotify_interval": module.params['renotify_interval'], + "escalation_message": module.params['escalation_message'], + "notify_audit": module.boolean(module.params['notify_audit']), + "locked": module.boolean(module.params['locked']), + "require_full_window": module.params['require_full_window'], + "new_host_delay": module.params['new_host_delay'], + "evaluation_delay": module.params['evaluation_delay'] + } + + if module.params['type'] == "service check": + options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} + if module.params['type'] in ["metric alert", "log alert"] and module.params['thresholds'] is not None: + options["thresholds"] = module.params['thresholds'] + + monitor = _get_monitor(module) + if not monitor: + _post_monitor(module, options) + else: + _update_monitor(module, monitor, options) + + +def delete_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.exit_json(changed=False) + try: + msg = api.Monitor.delete(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def mute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif monitor['options']['silenced']: + module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") + elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0): + module.exit_json(changed=False) + try: + if module.params['silenced'] is None or module.params['silenced'] == "": + msg = api.Monitor.mute(id=monitor['id']) + else: + msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def unmute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif not monitor['options']['silenced']: + module.exit_json(changed=False) + try: + msg = api.Monitor.unmute(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/honeybadger_deployment.py b/plugins/modules/monitoring/honeybadger_deployment.py new file mode 100644 index 0000000000..90e13ca003 --- /dev/null +++ b/plugins/modules/monitoring/honeybadger_deployment.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014 Benjamin Curtis +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: honeybadger_deployment +author: "Benjamin Curtis (@stympy)" +short_description: Notify Honeybadger.io about app deployments +description: + - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) +options: + token: + description: + - API token. + required: true + environment: + description: + - The environment name, typically 'production', 'staging', etc. + required: true + user: + description: + - The username of the person doing the deployment + repo: + description: + - URL of the project repository + revision: + description: + - A hash, number, tag, or other identifier showing what revision was deployed + url: + description: + - Optional URL to submit the notification to. + default: "https://api.honeybadger.io/v1/deploys" + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + +''' + +EXAMPLES = ''' +- honeybadger_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: b6826b8 + repo: 'git@github.com:user/repo.git' +''' + +RETURN = '''# ''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + environment=dict(required=True), + user=dict(required=False), + repo=dict(required=False), + revision=dict(required=False), + url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + params = {} + + if module.params["environment"]: + params["deploy[environment]"] = module.params["environment"] + + if module.params["user"]: + params["deploy[local_username]"] = module.params["user"] + + if module.params["repo"]: + params["deploy[repository]"] = module.params["repo"] + + if module.params["revision"]: + params["deploy[revision]"] = module.params["revision"] + + params["api_key"] = module.params["token"] + + url = module.params.get('url') + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + try: + data = urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception as e: + module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc()) + else: + if info['status'] == 201: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/icinga2_feature.py b/plugins/modules/monitoring/icinga2_feature.py new file mode 100644 index 0000000000..2381c49289 --- /dev/null +++ b/plugins/modules/monitoring/icinga2_feature.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Loic Blot +# Copyright (c) 2018, Ansible Project +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: icinga2_feature + +short_description: Manage Icinga2 feature +description: + - This module can be used to enable or disable an Icinga2 feature. +author: "Loic Blot (@nerzhul)" +options: + name: + description: + - This is the feature name to enable or disable. + required: True + state: + description: + - If set to C(present) and feature is disabled, then feature is enabled. + - If set to C(present) and feature is already enabled, then nothing is changed. + - If set to C(absent) and feature is enabled, then feature is disabled. + - If set to C(absent) and feature is already disabled, then nothing is changed. + choices: [ "present", "absent" ] + default: present +''' + +EXAMPLES = ''' +- name: Enable ido-pgsql feature + icinga2_feature: + name: ido-pgsql + state: present + +- name: Disable api feature + icinga2_feature: + name: api + state: absent +''' + +RETURN = ''' +# +''' + +import re +from ansible.module_utils.basic import AnsibleModule + + +class Icinga2FeatureHelper: + def __init__(self, module): + self.module = module + self._icinga2 = module.get_bin_path('icinga2', True) + self.feature_name = self.module.params['name'] + self.state = self.module.params['state'] + + def _exec(self, args): + cmd = [self._icinga2, 'feature'] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return rc, out + + def manage(self): + rc, out = self._exec(["list"]) + if rc != 0: + self.module.fail_json(msg="Unable to list icinga2 features. " + "Ensure icinga2 is installed and present in binary path.") + + # If feature is already in good state, just exit + if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \ + (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"): + self.module.exit_json(changed=False) + + if self.module.check_mode: + self.module.exit_json(changed=True) + + feature_enable_str = "enable" if self.state == "present" else "disable" + + rc, out = self._exec([feature_enable_str, self.feature_name]) + + change_applied = False + if self.state == "present": + if rc != 0: + self.module.fail_json(msg="Failed to %s feature %s." + " icinga2 command returned %s" % (feature_enable_str, + self.feature_name, + out)) + + if re.search("already enabled", out) is None: + change_applied = True + else: + if rc == 0: + change_applied = True + # RC is not 0 for this already disabled feature, handle it as no change applied + elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out): + change_applied = False + else: + self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out) + + self.module.exit_json(changed=change_applied) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=["present", "absent"], default="present") + ), + supports_check_mode=True + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + Icinga2FeatureHelper(module).manage() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/icinga2_host.py b/plugins/modules/monitoring/icinga2_host.py new file mode 100644 index 0000000000..a8b23b52fc --- /dev/null +++ b/plugins/modules/monitoring/icinga2_host.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This module is proudly sponsored by CGI (www.cgi.com) and +# KPN (www.kpn.com). +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icinga2_host +short_description: Manage a host in Icinga2 +description: + - "Add or remove a host to Icinga2 through the API." + - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)" +author: "Jurgen Brand (@t794104)" +options: + url: + description: + - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path + required: true + use_proxy: + description: + - If C(no), it will not use a proxy, even if one is defined in + an environment variable on the target hosts. + type: bool + default: 'yes' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + url_username: + description: + - The username for use in HTTP basic authentication. + - This parameter can be used without C(url_password) for sites that allow empty passwords. + url_password: + description: + - The password for use in HTTP basic authentication. + - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used. + force_basic_auth: + description: + - httplib2, the library used by the uri module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly + send a 401, logins will fail. This option forces the sending of the Basic authentication header + upon initial request. + type: bool + default: 'no' + client_cert: + description: + - PEM formatted certificate chain file to be used for SSL client + authentication. This file can also include the key as well, and if + the key is included, C(client_key) is not required. + client_key: + description: + - PEM formatted file that contains your private key to be used for SSL + client authentication. If C(client_cert) contains both the certificate + and key, this option is not required. + state: + description: + - Apply feature state. + choices: [ "present", "absent" ] + default: present + name: + description: + - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique. + required: true + zone: + description: + - The zone from where this host should be polled. + template: + description: + - The template used to define the host. + - Template cannot be modified after object creation. + check_command: + description: + - The command used to check if the host is alive. + default: "hostalive" + display_name: + description: + - The name used to display the host. + default: if none is give it is the value of the parameter + ip: + description: + - The IP address of the host. + required: true + variables: + description: + - List of variables. +''' + +EXAMPLES = ''' +- name: Add host to icinga + icinga2_host: + url: "https://icinga2.example.com" + url_username: "ansible" + url_password: "a_secret" + state: present + name: "{{ ansible_fqdn }}" + ip: "{{ ansible_default_ipv4.address }}" + delegate_to: 127.0.0.1 +''' + +RETURN = ''' +name: + description: The name used to create, modify or delete the host + type: str + returned: always +data: + description: The data structure used for create, modify or delete of the host + type: dict + returned: always +''' + +import json +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, url_argument_spec + + +# =========================================== +# Icinga2 API class +# +class icinga2_api: + module = None + + def __init__(self, module): + self.module = module + + def call_url(self, path, data='', method='GET'): + headers = { + 'Accept': 'application/json', + 'X-HTTP-Method-Override': method, + } + url = self.module.params.get("url") + "/" + path + rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy']) + body = '' + if rsp: + body = json.loads(rsp.read()) + if info['status'] >= 400: + body = info['body'] + return {'code': info['status'], 'data': body} + + def check_connection(self): + ret = self.call_url('v1/status') + if ret['code'] == 200: + return True + return False + + def exists(self, hostname): + data = { + "filter": "match(\"" + hostname + "\", host.name)", + } + ret = self.call_url( + path="v1/objects/hosts", + data=self.module.jsonify(data) + ) + if ret['code'] == 200: + if len(ret['data']['results']) == 1: + return True + return False + + def create(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="PUT" + ) + return ret + + def delete(self, hostname): + data = {"cascade": 1} + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="DELETE" + ) + return ret + + def modify(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="POST" + ) + return ret + + def diff(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + method="GET" + ) + changed = False + ic_data = ret['data']['results'][0] + for key in data['attrs']: + if key not in ic_data['attrs'].keys(): + changed = True + elif data['attrs'][key] != ic_data['attrs'][key]: + changed = True + return changed + + +# =========================================== +# Module execution. +# +def main(): + # use the predefined argument spec for url + argument_spec = url_argument_spec() + # remove unnecessary argument 'force' + del argument_spec['force'] + # add our own arguments + argument_spec.update( + state=dict(default="present", choices=["absent", "present"]), + name=dict(required=True, aliases=['host']), + zone=dict(), + template=dict(default=None), + check_command=dict(default="hostalive"), + display_name=dict(default=None), + ip=dict(required=True), + variables=dict(type='dict', default=None), + ) + + # Define the main module + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params["state"] + name = module.params["name"] + zone = module.params["zone"] + template = [] + template.append(name) + if module.params["template"]: + template.append(module.params["template"]) + check_command = module.params["check_command"] + ip = module.params["ip"] + display_name = module.params["display_name"] + if not display_name: + display_name = name + variables = module.params["variables"] + + try: + icinga = icinga2_api(module=module) + icinga.check_connection() + except Exception as e: + module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e)) + + data = { + 'attrs': { + 'address': ip, + 'display_name': display_name, + 'check_command': check_command, + 'zone': zone, + 'vars': { + 'made_by': "ansible", + }, + 'templates': template, + } + } + + if variables: + data['attrs']['vars'].update(variables) + + changed = False + if icinga.exists(name): + if state == "absent": + if module.check_mode: + module.exit_json(changed=True, name=name, data=data) + else: + try: + ret = icinga.delete(name) + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code deleting host: %s" % (ret['data'])) + except Exception as e: + module.fail_json(msg="exception deleting host: " + str(e)) + + elif icinga.diff(name, data): + if module.check_mode: + module.exit_json(changed=False, name=name, data=data) + + # Template attribute is not allowed in modification + del data['attrs']['templates'] + + ret = icinga.modify(name, data) + + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code modifying host: %s" % (ret['data'])) + + else: + if state == "present": + if module.check_mode: + changed = True + else: + try: + ret = icinga.create(name, data) + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code creating host: %s" % (ret['data'])) + except Exception as e: + module.fail_json(msg="exception creating host: " + str(e)) + + module.exit_json(changed=changed, name=name, data=data) + + +# import module snippets +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/librato_annotation.py b/plugins/modules/monitoring/librato_annotation.py new file mode 100644 index 0000000000..6ee3895763 --- /dev/null +++ b/plugins/modules/monitoring/librato_annotation.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) Seth Edwards, 2014 +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: librato_annotation +short_description: create an annotation in librato +description: + - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically +author: "Seth Edwards (@Sedward)" +requirements: [] +options: + user: + description: + - Librato account username + required: true + api_key: + description: + - Librato account api key + required: true + name: + description: + - The annotation stream name + - If the annotation stream does not exist, it will be created automatically + required: false + title: + description: + - The title of an annotation is a string and may contain spaces + - The title should be a short, high-level summary of the annotation e.g. v45 Deployment + required: true + source: + description: + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population + required: false + description: + description: + - The description contains extra metadata about a particular annotation + - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! + required: false + start_time: + description: + - The unix timestamp indicating the time at which the event referenced by this annotation started + required: false + end_time: + description: + - The unix timestamp indicating the time at which the event referenced by this annotation ended + - For events that have a duration, this is a useful way to annotate the duration of the event + required: false + links: + description: + - See examples + required: true +''' + +EXAMPLES = ''' +# Create a simple annotation event with a source +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + source: foo.bar + description: This is a detailed description of the config change + +# Create an annotation that includes a link +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: code.deploy + title: app code deploy + description: this is a detailed description of a deployment + links: + - rel: example + href: http://www.example.com/deploy + +# Create an annotation with a start_time and end_time +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: maintenance + title: Maintenance window + description: This is a detailed description of maintenance + start_time: 1395940006 + end_time: 1395954406 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def post_annotation(module): + user = module.params['user'] + api_key = module.params['api_key'] + name = module.params['name'] + title = module.params['title'] + + url = 'https://metrics-api.librato.com/v1/annotations/%s' % name + params = {} + params['title'] = title + + if module.params['source'] is not None: + params['source'] = module.params['source'] + if module.params['description'] is not None: + params['description'] = module.params['description'] + if module.params['start_time'] is not None: + params['start_time'] = module.params['start_time'] + if module.params['end_time'] is not None: + params['end_time'] = module.params['end_time'] + if module.params['links'] is not None: + params['links'] = module.params['links'] + + json_body = module.jsonify(params) + + headers = {} + headers['Content-Type'] = 'application/json' + + # Hack send parameters the way fetch_url wants them + module.params['url_username'] = user + module.params['url_password'] = api_key + response, info = fetch_url(module, url, data=json_body, headers=headers) + response_code = str(info['status']) + response_body = info['body'] + if info['status'] != 201: + if info['status'] >= 400: + module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body) + else: + module.fail_json(msg="Request Failed. Response code: " + response_code) + response = response.read() + module.exit_json(changed=True, annotation=response) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True), + api_key=dict(required=True), + name=dict(required=False), + title=dict(required=True), + source=dict(required=False), + description=dict(required=False), + start_time=dict(required=False, default=None, type='int'), + end_time=dict(required=False, default=None, type='int'), + links=dict(type='list') + ) + ) + + post_annotation(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/logentries.py b/plugins/modules/monitoring/logentries.py new file mode 100644 index 0000000000..beffad1979 --- /dev/null +++ b/plugins/modules/monitoring/logentries.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Ivan Vanderbyl +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: logentries +author: "Ivan Vanderbyl (@ivanvanderbyl)" +short_description: Module for tracking logs via logentries.com +description: + - Sends logs to LogEntries in realtime +options: + path: + description: + - path to a log file + required: true + state: + description: + - following state of the log + choices: [ 'present', 'absent' ] + required: false + default: present + name: + description: + - name of the log + required: false + logtype: + description: + - type of the log + required: false + +notes: + - Requires the LogEntries agent which can be installed following the instructions at logentries.com +''' +EXAMPLES = ''' +# Track nginx logs +- logentries: + path: /var/log/nginx/access.log + state: present + name: nginx-access-log + +# Stop tracking nginx logs +- logentries: + path: /var/log/nginx/error.log + state: absent +''' + +from ansible.module_utils.basic import AnsibleModule + + +def query_log_status(module, le_path, path, state="present"): + """ Returns whether a log is followed or not. """ + + if state == "present": + rc, out, err = module.run_command("%s followed %s" % (le_path, path)) + if rc == 0: + return True + + return False + + +def follow_log(module, le_path, logs, name=None, logtype=None): + """ Follows one or more logs if not already followed. """ + + followed_count = 0 + + for log in logs: + if query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + + cmd = [le_path, 'follow', log] + if name: + cmd.extend(['--name', name]) + if logtype: + cmd.extend(['--type', logtype]) + rc, out, err = module.run_command(' '.join(cmd)) + + if not query_log_status(module, le_path, log): + module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) + + followed_count += 1 + + if followed_count > 0: + module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) + + module.exit_json(changed=False, msg="logs(s) already followed") + + +def unfollow_log(module, le_path, logs): + """ Unfollows one or more logs if followed. """ + + removed_count = 0 + + # Using a for loop in case of error, we can report the package that failed + for log in logs: + # Query the log first, to see if we even need to remove. + if not query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command([le_path, 'rm', log]) + + if query_log_status(module, le_path, log): + module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) + + removed_count += 1 + + if removed_count > 0: + module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) + + module.exit_json(changed=False, msg="logs(s) already unfollowed") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(required=True), + state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), + name=dict(required=False, default=None, type='str'), + logtype=dict(required=False, default=None, type='str', aliases=['type']) + ), + supports_check_mode=True + ) + + le_path = module.get_bin_path('le', True, ['/usr/local/bin']) + + p = module.params + + # Handle multiple log files + logs = p["path"].split(",") + logs = filter(None, logs) + + if p["state"] in ["present", "followed"]: + follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) + + elif p["state"] in ["absent", "unfollowed"]: + unfollow_log(module, le_path, logs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/logicmonitor.py b/plugins/modules/monitoring/logicmonitor.py new file mode 100644 index 0000000000..533739760b --- /dev/null +++ b/plugins/modules/monitoring/logicmonitor.py @@ -0,0 +1,2135 @@ +#!/usr/bin/python + +# LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups +# Copyright (C) 2015 LogicMonitor +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +RETURN = ''' +--- +success: + description: flag indicating that execution was successful + returned: success + type: bool + sample: True +... +''' + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: logicmonitor +short_description: Manage your LogicMonitor account through Ansible Playbooks +description: + - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform. + - This module manages hosts, host groups, and collectors within your LogicMonitor account. +author: +- Ethan Culler-Mayeno (@ethanculler) +- Jeff Wozniak (@woz5999) +notes: + - You must have an existing LogicMonitor account for this module to function. +requirements: ["An existing LogicMonitor account", "Linux"] +options: + target: + description: + - The type of LogicMonitor object you wish to manage. + - "Collector: Perform actions on a LogicMonitor collector." + - NOTE You should use Ansible service modules such as M(service) or M(supervisorctl) for managing the Collector 'logicmonitor-agent' and + 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services + before a Collector remove. + - "Host: Perform actions on a host device." + - "Hostgroup: Perform actions on a LogicMonitor host group." + - > + NOTE Host and Hostgroup tasks should always be performed via delegate_to: localhost. There are no benefits to running these tasks on the + remote host and doing so will typically cause problems. + required: true + choices: ['collector', 'host', 'datasource', 'hostgroup'] + action: + description: + - The action you wish to perform on target. + - "Add: Add an object to your LogicMonitor account." + - "Remove: Remove an object from your LogicMonitor account." + - "Update: Update properties, description, or groups (target=host) for an object in your LogicMonitor account." + - "SDT: Schedule downtime for an object in your LogicMonitor account." + required: true + choices: ['add', 'remove', 'update', 'sdt'] + company: + description: + - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes." + required: true + user: + description: + - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user. + required: true + password: + description: + - The password of the specified LogicMonitor user + required: true + collector: + description: + - The fully qualified domain name of a collector in your LogicMonitor account. + - This is required for the creation of a LogicMonitor host (target=host action=add). + - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't + specified (target=host action=update action=remove action=sdt). + hostname: + description: + - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to manage. + - Optional for managing hosts (target=host). + default: 'hostname -f' + displayname: + description: + - The display name of a host in your LogicMonitor account or the desired display name of a device to manage. + - Optional for managing hosts (target=host). + default: 'hostname -f' + description: + description: + - The long text description of the object in your LogicMonitor account. + - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update). + default: "" + properties: + description: + - A dictionary of properties to set on the LogicMonitor host or host group. + - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update). + - This parameter will add or update existing properties in your LogicMonitor account. + default: {} + groups: + description: + - A list of groups that the host should be a member of. + - Optional for managing hosts (target=host; action=add or action=update). + default: [] + id: + description: + - ID of the datasource to target. + - Required for management of LogicMonitor datasources (target=datasource). + fullpath: + description: + - The fullpath of the host group object you would like to manage. + - Recommend running on a single Ansible host. + - Required for management of LogicMonitor host groups (target=hostgroup). + alertenable: + description: + - A boolean flag to turn alerting on or off for an object. + - Optional for managing all hosts (action=add or action=update). + type: bool + default: 'yes' + starttime: + description: + - The time that the Scheduled Down Time (SDT) should begin. + - Optional for managing SDT (action=sdt). + - Y-m-d H:M + default: Now + duration: + description: + - The duration (minutes) of the Scheduled Down Time (SDT). + - Optional for putting an object into SDT (action=sdt). + default: 30 +... +''' +EXAMPLES = ''' +# example of adding a new LogicMonitor collector to these devices +--- +- hosts: collectors + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Deploy/verify LogicMonitor collectors + become: yes + logicmonitor: + target: collector + action: add + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + +#example of adding a list of hosts into monitoring +--- +- hosts: hosts + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Deploy LogicMonitor Host + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: host + action: add + collector: mycompany-Collector + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + groups: /servers/production,/datacenter1 + properties: + snmp.community: secret + dc: 1 + type: prod + delegate_to: localhost + +#example of putting a datasource in SDT +--- +- hosts: localhost + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: SDT a datasource + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: datasource + action: sdt + id: 123 + duration: 3000 + starttime: '2017-03-04 05:06' + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + +#example of creating a hostgroup +--- +- hosts: localhost + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Create a host group + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: hostgroup + action: add + fullpath: /servers/development + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + properties: + snmp.community: commstring + type: dev + +#example of putting a list of hosts into SDT +--- +- hosts: hosts + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: SDT hosts + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: host + action: sdt + duration: 3000 + starttime: '2016-11-10 09:08' + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + collector: mycompany-Collector + delegate_to: localhost + +#example of putting a host group in SDT +--- +- hosts: localhost + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: SDT a host group + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: hostgroup + action: sdt + fullpath: /servers/development + duration: 3000 + starttime: '2017-03-04 05:06' + company=: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + +#example of updating a list of hosts +--- +- hosts: hosts + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Update a list of hosts + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: host + action: update + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + collector: mycompany-Collector + groups: /servers/production,/datacenter5 + properties: + snmp.community: commstring + dc: 5 + delegate_to: localhost + +#example of updating a hostgroup +--- +- hosts: hosts + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Update a host group + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: hostgroup + action: update + fullpath: /servers/development + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + properties: + snmp.community: hg + type: dev + status: test + delegate_to: localhost + +#example of removing a list of hosts from monitoring +--- +- hosts: hosts + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Remove LogicMonitor hosts + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: host + action: remove + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + collector: mycompany-Collector + delegate_to: localhost + +#example of removing a host group +--- +- hosts: hosts + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Remove LogicMonitor development servers hostgroup + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: hostgroup + action: remove + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + fullpath: /servers/development + delegate_to: localhost + - name: Remove LogicMonitor servers hostgroup + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: hostgroup + action: remove + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + fullpath: /servers + delegate_to: localhost + - name: Remove LogicMonitor datacenter1 hostgroup + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: hostgroup + action: remove + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + fullpath: /datacenter1 + delegate_to: localhost + - name: Remove LogicMonitor datacenter5 hostgroup + # All tasks except for target=collector should use delegate_to: localhost + logicmonitor: + target: hostgroup + action: remove + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + fullpath: /datacenter5 + delegate_to: localhost + +### example of removing a new LogicMonitor collector to these devices +--- +- hosts: collectors + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Remove LogicMonitor collectors + become: yes + logicmonitor: + target: collector + action: remove + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + +#complete example +--- +- hosts: localhost + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Create a host group + logicmonitor: + target: hostgroup + action: add + fullpath: /servers/production/database + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + properties: + snmp.community: commstring + - name: SDT a host group + logicmonitor: + target: hostgroup + action: sdt + fullpath: /servers/production/web + duration: 3000 + starttime: '2012-03-04 05:06' + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + +- hosts: collectors + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: Deploy/verify LogicMonitor collectors + logicmonitor: + target: collector + action: add + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + - name: Place LogicMonitor collectors into 30 minute Scheduled downtime + logicmonitor: + target: collector + action: sdt + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + - name: Deploy LogicMonitor Host + logicmonitor: + target: host + action: add + collector: agent1.ethandev.com + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + properties: + snmp.community: commstring + dc: 1 + groups: /servers/production/collectors, /datacenter1 + delegate_to: localhost + +- hosts: database-servers + remote_user: '{{ username }}' + vars: + company: mycompany + user: myusername + password: mypassword + tasks: + - name: deploy logicmonitor hosts + logicmonitor: + target: host + action: add + collector: monitoring.dev.com + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + properties: + snmp.community: commstring + type: db + dc: 1 + groups: /servers/production/database, /datacenter1 + delegate_to: localhost + - name: schedule 5 hour downtime for 2012-11-10 09:08 + logicmonitor: + target: host + action: sdt + duration: 3000 + starttime: '2012-11-10 09:08' + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' + delegate_to: localhost +''' + +import datetime +import json +import os +import platform +import socket +import sys +import types + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import open_url + + +class LogicMonitor(object): + + def __init__(self, module, **params): + self.__version__ = "1.0-python" + self.module = module + self.module.debug("Instantiating LogicMonitor object") + + self.check_mode = False + self.company = params["company"] + self.user = params["user"] + self.password = params["password"] + self.fqdn = socket.getfqdn() + self.lm_url = "logicmonitor.com/santaba" + self.__version__ = self.__version__ + "-ansible-module" + + def rpc(self, action, params): + """Make a call to the LogicMonitor RPC library + and return the response""" + self.module.debug("Running LogicMonitor.rpc") + + param_str = urlencode(params) + creds = urlencode( + {"c": self.company, + "u": self.user, + "p": self.password}) + + if param_str: + param_str = param_str + "&" + + param_str = param_str + creds + + try: + url = ("https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "?" + param_str) + + # Set custom LogicMonitor header with version + headers = {"X-LM-User-Agent": self.__version__} + + # Set headers + f = open_url(url, headers=headers) + + raw = f.read() + resp = json.loads(raw) + if resp["status"] == 403: + self.module.debug("Authentication failed.") + self.fail(msg="Error: " + resp["errmsg"]) + else: + return raw + except IOError as ioe: + self.fail(msg="Error: Exception making RPC call to " + + "https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "\nException" + str(ioe)) + + def do(self, action, params): + """Make a call to the LogicMonitor + server \"do\" function""" + self.module.debug("Running LogicMonitor.do...") + + param_str = urlencode(params) + creds = (urlencode( + {"c": self.company, + "u": self.user, + "p": self.password})) + + if param_str: + param_str = param_str + "&" + param_str = param_str + creds + + try: + self.module.debug("Attempting to open URL: " + + "https://" + self.company + "." + self.lm_url + + "/do/" + action + "?" + param_str) + f = open_url( + "https://" + self.company + "." + self.lm_url + + "/do/" + action + "?" + param_str) + return f.read() + except IOError as ioe: + self.fail(msg="Error: Exception making RPC call to " + + "https://" + self.company + "." + self.lm_url + + "/do/" + action + "\nException" + str(ioe)) + + def get_collectors(self): + """Returns a JSON object containing a list of + LogicMonitor collectors""" + self.module.debug("Running LogicMonitor.get_collectors...") + + self.module.debug("Making RPC call to 'getAgents'") + resp = self.rpc("getAgents", {}) + resp_json = json.loads(resp) + + if resp_json["status"] == 200: + self.module.debug("RPC call succeeded") + return resp_json["data"] + else: + self.fail(msg=resp) + + def get_host_by_hostname(self, hostname, collector): + """Returns a host object for the host matching the + specified hostname""" + self.module.debug("Running LogicMonitor.get_host_by_hostname...") + + self.module.debug("Looking for hostname " + hostname) + self.module.debug("Making RPC call to 'getHosts'") + hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1})) + + if collector: + if hostlist_json["status"] == 200: + self.module.debug("RPC call succeeded") + + hosts = hostlist_json["data"]["hosts"] + + self.module.debug( + "Looking for host matching: hostname " + hostname + + " and collector " + str(collector["id"])) + + for host in hosts: + if (host["hostName"] == hostname and + host["agentId"] == collector["id"]): + + self.module.debug("Host match found") + return host + self.module.debug("No host match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(hostlist_json) + else: + self.module.debug("No collector specified") + return None + + def get_host_by_displayname(self, displayname): + """Returns a host object for the host matching the + specified display name""" + self.module.debug("Running LogicMonitor.get_host_by_displayname...") + + self.module.debug("Looking for displayname " + displayname) + self.module.debug("Making RPC call to 'getHost'") + host_json = (json.loads(self.rpc("getHost", + {"displayName": displayname}))) + + if host_json["status"] == 200: + self.module.debug("RPC call succeeded") + return host_json["data"] + else: + self.module.debug("RPC call failed") + self.module.debug(host_json) + return None + + def get_collector_by_description(self, description): + """Returns a JSON collector object for the collector + matching the specified FQDN (description)""" + self.module.debug( + "Running LogicMonitor.get_collector_by_description..." + ) + + collector_list = self.get_collectors() + if collector_list is not None: + self.module.debug("Looking for collector with description {0}" + + description) + for collector in collector_list: + if collector["description"] == description: + self.module.debug("Collector match found") + return collector + self.module.debug("No collector match found") + return None + + def get_group(self, fullpath): + """Returns a JSON group object for the group matching the + specified path""" + self.module.debug("Running LogicMonitor.get_group...") + + self.module.debug("Making RPC call to getHostGroups") + resp = json.loads(self.rpc("getHostGroups", {})) + + if resp["status"] == 200: + self.module.debug("RPC called succeeded") + groups = resp["data"] + + self.module.debug("Looking for group matching " + fullpath) + for group in groups: + if group["fullPath"] == fullpath.lstrip('/'): + self.module.debug("Group match found") + return group + + self.module.debug("No group match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + + return None + + def create_group(self, fullpath): + """Recursively create a path of host groups. + Returns the id of the newly created hostgroup""" + self.module.debug("Running LogicMonitor.create_group...") + + res = self.get_group(fullpath) + if res: + self.module.debug("Group {0} exists." + fullpath) + return res["id"] + + if fullpath == "/": + self.module.debug("Specified group is root. Doing nothing.") + return 1 + else: + self.module.debug("Creating group named " + fullpath) + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + parentpath, name = fullpath.rsplit('/', 1) + parentgroup = self.get_group(parentpath) + + parentid = 1 + + if parentpath == "": + parentid = 1 + elif parentgroup: + parentid = parentgroup["id"] + else: + parentid = self.create_group(parentpath) + + h = None + + # Determine if we're creating a group from host or hostgroup class + if hasattr(self, '_build_host_group_hash'): + h = self._build_host_group_hash( + fullpath, + self.description, + self.properties, + self.alertenable) + h["name"] = name + h["parentId"] = parentid + else: + h = {"name": name, + "parentId": parentid, + "alertEnable": True, + "description": ""} + + self.module.debug("Making RPC call to 'addHostGroup'") + resp = json.loads( + self.rpc("addHostGroup", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["id"] + elif resp["errmsg"] == "The record already exists": + self.module.debug("The hostgroup already exists") + group = self.get_group(fullpath) + return group["id"] + else: + self.module.debug("RPC call failed") + self.fail( + msg="Error: unable to create new hostgroup \"" + + name + "\".\n" + resp["errmsg"]) + + def fail(self, msg): + self.module.fail_json(msg=msg, changed=self.change, failed=True) + + def exit(self, changed): + self.module.debug("Changed: " + changed) + self.module.exit_json(changed=changed, success=True) + + def output_info(self, info): + self.module.debug("Registering properties as Ansible facts") + self.module.exit_json(changed=False, ansible_facts=info) + + +class Collector(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor Collector object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **params) + self.module.debug("Instantiating Collector object") + + if self.params['description']: + self.description = self.params['description'] + else: + self.description = self.fqdn + + self.info = self._get() + self.installdir = "/usr/local/logicmonitor" + self.platform = platform.system() + self.is_64bits = sys.maxsize > 2**32 + self.duration = self.params['duration'] + self.starttime = self.params['starttime'] + + if self.info is None: + self.id = None + else: + self.id = self.info["id"] + + def create(self): + """Idempotent function to make sure that there is + a running collector installed and registered""" + self.module.debug("Running Collector.create...") + + self._create() + self.get_installer_binary() + self.install() + + def remove(self): + """Idempotent function to make sure that there is + not a running collector installed and registered""" + self.module.debug("Running Collector.destroy...") + + self._unreigster() + self.uninstall() + + def get_installer_binary(self): + """Download the LogicMonitor collector installer binary""" + self.module.debug("Running Collector.get_installer_binary...") + + arch = 32 + + if self.is_64bits: + self.module.debug("64 bit system") + arch = 64 + else: + self.module.debug("32 bit system") + + if self.platform == "Linux" and self.id is not None: + self.module.debug("Platform is Linux") + self.module.debug("Agent ID is " + str(self.id)) + + installfilepath = (self.installdir + + "/logicmonitorsetup" + + str(self.id) + "_" + str(arch) + + ".bin") + + self.module.debug("Looking for existing installer at " + + installfilepath) + if not os.path.isfile(installfilepath): + self.module.debug("No previous installer found") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Downloading installer file") + # attempt to create the install dir before download + self.module.run_command("mkdir " + self.installdir) + + try: + installer = (self.do("logicmonitorsetup", + {"id": self.id, + "arch": arch})) + with open(installfilepath, "w") as write_file: + write_file.write(installer) + except Exception: + self.fail(msg="Unable to open installer file for writing") + else: + self.module.debug("Collector installer already exists") + return installfilepath + + elif self.id is None: + self.fail( + msg="Error: There is currently no collector " + + "associated with this device. To download " + + " the installer, first create a collector " + + "for this device.") + elif self.platform != "Linux": + self.fail( + msg="Error: LogicMonitor Collector must be " + + "installed on a Linux device.") + else: + self.fail( + msg="Error: Unable to retrieve the installer from the server") + + def install(self): + """Execute the LogicMonitor installer if not + already installed""" + self.module.debug("Running Collector.install...") + + if self.platform == "Linux": + self.module.debug("Platform is Linux") + + installer = self.get_installer_binary() + + if self.info is None: + self.module.debug("Retrieving collector information") + self.info = self._get() + + if not os.path.exists(self.installdir + "/agent"): + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Setting installer file permissions") + os.chmod(installer, 484) # decimal for 0o744 + + self.module.debug("Executing installer") + ret_code, out, err = self.module.run_command(installer + " -y") + + if ret_code != 0: + self.fail(msg="Error: Unable to install collector: " + err) + else: + self.module.debug("Collector installed successfully") + else: + self.module.debug("Collector already installed") + else: + self.fail( + msg="Error: LogicMonitor Collector must be " + + "installed on a Linux device") + + def uninstall(self): + """Uninstall LogicMontitor collector from the system""" + self.module.debug("Running Collector.uninstall...") + + uninstallfile = self.installdir + "/agent/bin/uninstall.pl" + + if os.path.isfile(uninstallfile): + self.module.debug("Collector uninstall file exists") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Running collector uninstaller") + ret_code, out, err = self.module.run_command(uninstallfile) + + if ret_code != 0: + self.fail( + msg="Error: Unable to uninstall collector: " + err) + else: + self.module.debug("Collector successfully uninstalled") + else: + if os.path.exists(self.installdir + "/agent"): + (self.fail( + msg="Unable to uninstall LogicMonitor " + + "Collector. Can not find LogicMonitor " + + "uninstaller.")) + + def sdt(self): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Collector.sdt...") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offsetstart = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail(msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) + + h = {"agentId": self.id, + "type": 1, + "notifyCC": True, + "year": offsetstart.year, + "month": offsetstart.month - 1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month - 1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to 'setAgentSDT'") + resp = json.loads(self.rpc("setAgentSDT", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + + def site_facts(self): + """Output current properties information for the Collector""" + self.module.debug("Running Collector.site_facts...") + + if self.info: + self.module.debug("Collector exists") + props = self.get_properties(True) + + self.output_info(props) + else: + self.fail(msg="Error: Collector doesn't exit.") + + def _get(self): + """Returns a JSON object representing this collector""" + self.module.debug("Running Collector._get...") + collector_list = self.get_collectors() + + if collector_list is not None: + self.module.debug("Collectors returned") + for collector in collector_list: + if collector["description"] == self.description: + return collector + else: + self.module.debug("No collectors returned") + return None + + def _create(self): + """Create a new collector in the associated + LogicMonitor account""" + self.module.debug("Running Collector._create...") + + if self.platform == "Linux": + self.module.debug("Platform is Linux") + ret = self.info or self._get() + + if ret is None: + self.change = True + self.module.debug("System changed") + + if self.check_mode: + self.exit(changed=True) + + h = {"autogen": True, + "description": self.description} + + self.module.debug("Making RPC call to 'addAgent'") + create = (json.loads(self.rpc("addAgent", h))) + + if create["status"] == 200: + self.module.debug("RPC call succeeded") + self.info = create["data"] + self.id = create["data"]["id"] + return create["data"] + else: + self.fail(msg=create["errmsg"]) + else: + self.info = ret + self.id = ret["id"] + return ret + else: + self.fail( + msg="Error: LogicMonitor Collector must be " + + "installed on a Linux device.") + + def _unreigster(self): + """Delete this collector from the associated + LogicMonitor account""" + self.module.debug("Running Collector._unreigster...") + + if self.info is None: + self.module.debug("Retrieving collector information") + self.info = self._get() + + if self.info is not None: + self.module.debug("Collector found") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Making RPC call to 'deleteAgent'") + delete = json.loads(self.rpc("deleteAgent", + {"id": self.id})) + + if delete["status"] == 200: + self.module.debug("RPC call succeeded") + return delete + else: + # The collector couldn't unregister. Start the service again + self.module.debug("Error unregistering collecting. " + + delete["errmsg"]) + self.fail(msg=delete["errmsg"]) + else: + self.module.debug("Collector not found") + return None + + +class Host(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor host object""" + self.change = False + self.params = params + self.collector = None + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Host object") + + if self.params["hostname"]: + self.module.debug("Hostname is " + self.params["hostname"]) + self.hostname = self.params['hostname'] + else: + self.module.debug("No hostname specified. Using " + self.fqdn) + self.hostname = self.fqdn + + if self.params["displayname"]: + self.module.debug("Display name is " + self.params["displayname"]) + self.displayname = self.params['displayname'] + else: + self.module.debug("No display name specified. Using " + self.fqdn) + self.displayname = self.fqdn + + # Attempt to host information via display name of host name + self.module.debug("Attempting to find host by displayname " + + self.displayname) + info = self.get_host_by_displayname(self.displayname) + + if info is not None: + self.module.debug("Host found by displayname") + # Used the host information to grab the collector description + # if not provided + if (not hasattr(self.params, "collector") and + "agentDescription" in info): + self.module.debug("Setting collector from host response. " + + "Collector " + info["agentDescription"]) + self.params["collector"] = info["agentDescription"] + else: + self.module.debug("Host not found by displayname") + + # At this point, a valid collector description is required for success + # Check that the description exists or fail + if self.params["collector"]: + self.module.debug( + "Collector specified is " + + self.params["collector"] + ) + self.collector = (self.get_collector_by_description( + self.params["collector"])) + else: + self.fail(msg="No collector specified.") + + # If the host wasn't found via displayname, attempt by hostname + if info is None: + self.module.debug("Attempting to find host by hostname " + + self.hostname) + info = self.get_host_by_hostname(self.hostname, self.collector) + + self.info = info + self.properties = self.params["properties"] + self.description = self.params["description"] + self.starttime = self.params["starttime"] + self.duration = self.params["duration"] + self.alertenable = self.params["alertenable"] + if self.params["groups"] is not None: + self.groups = self._strip_groups(self.params["groups"]) + else: + self.groups = None + + def create(self): + """Idempotent function to create if missing, + update if changed, or skip""" + self.module.debug("Running Host.create...") + + self.update() + + def get_properties(self): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Host.get_properties...") + + if self.info: + self.module.debug("Making RPC call to 'getHostProperties'") + properties_json = (json.loads(self.rpc("getHostProperties", + {'hostId': self.info["id"], + "filterSystemProperties": True}))) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("Error: there was an issue retrieving the " + + "host properties") + self.module.debug(properties_json["errmsg"]) + + self.fail(msg=properties_json["status"]) + else: + self.module.debug( + "Unable to find LogicMonitor host which matches " + + self.displayname + " (" + self.hostname + ")" + ) + return None + + def set_properties(self, propertyhash): + """update the host to have the properties + contained in the property hash""" + self.module.debug("Running Host.set_properties...") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Assigning property hash to host object") + self.properties = propertyhash + + def add(self): + """Add this device to monitoring + in your LogicMonitor account""" + self.module.debug("Running Host.add...") + + if self.collector and not self.info: + self.module.debug("Host not registered. Registering.") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + h = self._build_host_hash( + self.hostname, + self.displayname, + self.collector, + self.description, + self.groups, + self.properties, + self.alertenable) + + self.module.debug("Making RPC call to 'addHost'") + resp = json.loads(self.rpc("addHost", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + return resp["errmsg"] + elif self.collector is None: + self.fail(msg="Specified collector doesn't exist") + else: + self.module.debug("Host already registered") + + def update(self): + """This method takes changes made to this host + and applies them to the corresponding host + in your LogicMonitor account.""" + self.module.debug("Running Host.update...") + + if self.info: + self.module.debug("Host already registered") + if self.is_changed(): + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + h = (self._build_host_hash( + self.hostname, + self.displayname, + self.collector, + self.description, + self.groups, + self.properties, + self.alertenable)) + h["id"] = self.info["id"] + h["opType"] = "replace" + + self.module.debug("Making RPC call to 'updateHost'") + resp = json.loads(self.rpc("updateHost", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + else: + self.module.debug("RPC call failed") + self.fail(msg="Error: unable to update the host.") + else: + self.module.debug( + "Host properties match supplied properties. " + + "No changes to make." + ) + return self.info + else: + self.module.debug("Host not registered. Registering") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + return self.add() + + def remove(self): + """Remove this host from your LogicMonitor account""" + self.module.debug("Running Host.remove...") + + if self.info: + self.module.debug("Host registered") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Making RPC call to 'deleteHost'") + resp = json.loads(self.rpc("deleteHost", + {"hostId": self.info["id"], + "deleteFromSystem": True, + "hostGroupId": 1})) + + if resp["status"] == 200: + self.module.debug(resp) + self.module.debug("RPC call succeeded") + return resp + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + self.fail(msg=resp["errmsg"]) + + else: + self.module.debug("Host not registered") + + def is_changed(self): + """Return true if the host doesn't + match the LogicMonitor account""" + self.module.debug("Running Host.is_changed") + + ignore = ['system.categories', 'snmp.version'] + + hostresp = self.get_host_by_displayname(self.displayname) + + if hostresp is None: + hostresp = self.get_host_by_hostname(self.hostname, self.collector) + + if hostresp: + self.module.debug("Comparing simple host properties") + if hostresp["alertEnable"] != self.alertenable: + return True + + if hostresp["description"] != self.description: + return True + + if hostresp["displayedAs"] != self.displayname: + return True + + if (self.collector and + hasattr(self.collector, "id") and + hostresp["agentId"] != self.collector["id"]): + return True + + self.module.debug("Comparing groups.") + if self._compare_groups(hostresp) is True: + return True + + propresp = self.get_properties() + + if propresp: + self.module.debug("Comparing properties.") + if self._compare_props(propresp, ignore) is True: + return True + else: + self.fail( + msg="Error: Unknown error retrieving host properties") + + return False + else: + self.fail(msg="Error: Unknown error retrieving host information") + + def sdt(self): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Host.sdt...") + if self.info: + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offset = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = (json.loads(self.rpc("getTimeZoneSetting", {}))) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail( + msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) + + h = {"hostId": self.info["id"], + "type": 1, + "year": offsetstart.year, + "month": offsetstart.month - 1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month - 1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to 'setHostSDT'") + resp = (json.loads(self.rpc("setHostSDT", h))) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + else: + self.fail(msg="Error: Host doesn't exit.") + + def site_facts(self): + """Output current properties information for the Host""" + self.module.debug("Running Host.site_facts...") + + if self.info: + self.module.debug("Host exists") + props = self.get_properties() + + self.output_info(props) + else: + self.fail(msg="Error: Host doesn't exit.") + + def _build_host_hash(self, + hostname, + displayname, + collector, + description, + groups, + properties, + alertenable): + """Return a property formatted hash for the + creation of a host using the rpc function""" + self.module.debug("Running Host._build_host_hash...") + + h = {} + h["hostName"] = hostname + h["displayedAs"] = displayname + h["alertEnable"] = alertenable + + if collector: + self.module.debug("Collector property exists") + h["agentId"] = collector["id"] + else: + self.fail( + msg="Error: No collector found. Unable to build host hash.") + + if description: + h["description"] = description + + if groups is not None and groups is not []: + self.module.debug("Group property exists") + groupids = "" + + for group in groups: + groupids = groupids + str(self.create_group(group)) + "," + + h["hostGroupIds"] = groupids.rstrip(',') + + if properties is not None and properties is not {}: + self.module.debug("Properties hash exists") + propnum = 0 + for key, value in properties.items(): + h["propName" + str(propnum)] = key + h["propValue" + str(propnum)] = value + propnum = propnum + 1 + + return h + + def _verify_property(self, propname): + """Check with LogicMonitor server to + verify property is unchanged""" + self.module.debug("Running Host._verify_property...") + + if self.info: + self.module.debug("Host is registered") + if propname not in self.properties: + self.module.debug("Property " + propname + " does not exist") + return False + else: + self.module.debug("Property " + propname + " exists") + h = {"hostId": self.info["id"], + "propName0": propname, + "propValue0": self.properties[propname]} + + self.module.debug("Making RCP call to 'verifyProperties'") + resp = json.loads(self.rpc('verifyProperties', h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["match"] + else: + self.fail( + msg="Error: unable to get verification " + + "from server.\n%s" % resp["errmsg"]) + else: + self.fail( + msg="Error: Host doesn't exist. Unable to verify properties") + + def _compare_groups(self, hostresp): + """Function to compare the host's current + groups against provided groups""" + self.module.debug("Running Host._compare_groups") + + g = [] + fullpathinids = hostresp["fullPathInIds"] + self.module.debug("Building list of groups") + for path in fullpathinids: + if path != []: + h = {'hostGroupId': path[-1]} + + hgresp = json.loads(self.rpc("getHostGroup", h)) + + if (hgresp["status"] == 200 and + hgresp["data"]["appliesTo"] == ""): + + g.append(path[-1]) + + if self.groups is not None: + self.module.debug("Comparing group lists") + for group in self.groups: + groupjson = self.get_group(group) + + if groupjson is None: + self.module.debug("Group mismatch. No result.") + return True + elif groupjson['id'] not in g: + self.module.debug("Group mismatch. ID doesn't exist.") + return True + else: + g.remove(groupjson['id']) + + if g != []: + self.module.debug("Group mismatch. New ID exists.") + return True + self.module.debug("Groups match") + + def _compare_props(self, propresp, ignore): + """Function to compare the host's current + properties against provided properties""" + self.module.debug("Running Host._compare_props...") + p = {} + + self.module.debug("Creating list of properties") + for prop in propresp: + if prop["name"] not in ignore: + if ("*******" in prop["value"] and + self._verify_property(prop["name"])): + p[prop["name"]] = self.properties[prop["name"]] + else: + p[prop["name"]] = prop["value"] + + self.module.debug("Comparing properties") + # Iterate provided properties and compare to received properties + for prop in self.properties: + if (prop not in p or + p[prop] != self.properties[prop]): + self.module.debug("Properties mismatch") + return True + self.module.debug("Properties match") + + def _strip_groups(self, groups): + """Function to strip whitespace from group list. + This function provides the user some flexibility when + formatting group arguments """ + self.module.debug("Running Host._strip_groups...") + return map(lambda x: x.strip(), groups) + + +class Datasource(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor Datasource object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **params) + self.module.debug("Instantiating Datasource object") + + self.id = self.params["id"] + self.starttime = self.params["starttime"] + self.duration = self.params["duration"] + + def sdt(self): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Datasource.sdt...") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offsetstart = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail(msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) + + h = {"hostDataSourceId": self.id, + "type": 1, + "notifyCC": True, + "year": offsetstart.year, + "month": offsetstart.month - 1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month - 1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to 'setHostDataSourceSDT'") + resp = json.loads(self.rpc("setHostDataSourceSDT", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + + +class Hostgroup(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor host object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Hostgroup object") + + self.fullpath = self.params["fullpath"] + self.info = self.get_group(self.fullpath) + self.properties = self.params["properties"] + self.description = self.params["description"] + self.starttime = self.params["starttime"] + self.duration = self.params["duration"] + self.alertenable = self.params["alertenable"] + + def create(self): + """Wrapper for self.update()""" + self.module.debug("Running Hostgroup.create...") + self.update() + + def get_properties(self, final=False): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Hostgroup.get_properties...") + + if self.info: + self.module.debug("Group found") + + self.module.debug("Making RPC call to 'getHostGroupProperties'") + properties_json = json.loads(self.rpc( + "getHostGroupProperties", + {'hostGroupId': self.info["id"], + "finalResult": final})) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=properties_json["status"]) + else: + self.module.debug("Group not found") + return None + + def set_properties(self, propertyhash): + """Update the host to have the properties + contained in the property hash""" + self.module.debug("Running Hostgroup.set_properties") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Assigning property has to host object") + self.properties = propertyhash + + def add(self): + """Idempotent function to ensure that the host + group exists in your LogicMonitor account""" + self.module.debug("Running Hostgroup.add") + + if self.info is None: + self.module.debug("Group doesn't exist. Creating.") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.create_group(self.fullpath) + self.info = self.get_group(self.fullpath) + + self.module.debug("Group created") + return self.info + else: + self.module.debug("Group already exists") + + def update(self): + """Idempotent function to ensure the host group settings + (alertenable, properties, etc) in the + LogicMonitor account match the current object.""" + self.module.debug("Running Hostgroup.update") + + if self.info: + if self.is_changed(): + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + h = self._build_host_group_hash( + self.fullpath, + self.description, + self.properties, + self.alertenable) + h["opType"] = "replace" + + if self.fullpath != "/": + h["id"] = self.info["id"] + + self.module.debug("Making RPC call to 'updateHostGroup'") + resp = json.loads(self.rpc("updateHostGroup", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg="Error: Unable to update the " + + "host.\n" + resp["errmsg"]) + else: + self.module.debug( + "Group properties match supplied properties. " + + "No changes to make" + ) + return self.info + else: + self.module.debug("Group doesn't exist. Creating.") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + return self.add() + + def remove(self): + """Idempotent function to ensure the host group + does not exist in your LogicMonitor account""" + self.module.debug("Running Hostgroup.remove...") + + if self.info: + self.module.debug("Group exists") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Making RPC call to 'deleteHostGroup'") + resp = json.loads(self.rpc("deleteHostGroup", + {"hgId": self.info["id"]})) + + if resp["status"] == 200: + self.module.debug(resp) + self.module.debug("RPC call succeeded") + return resp + elif resp["errmsg"] == "No such group": + self.module.debug("Group doesn't exist") + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + self.fail(msg=resp["errmsg"]) + else: + self.module.debug("Group doesn't exist") + + def is_changed(self): + """Return true if the host doesn't match + the LogicMonitor account""" + self.module.debug("Running Hostgroup.is_changed...") + + ignore = [] + group = self.get_group(self.fullpath) + properties = self.get_properties() + + if properties is not None and group is not None: + self.module.debug("Comparing simple group properties") + if (group["alertEnable"] != self.alertenable or + group["description"] != self.description): + + return True + + p = {} + + self.module.debug("Creating list of properties") + for prop in properties: + if prop["name"] not in ignore: + if ("*******" in prop["value"] and + self._verify_property(prop["name"])): + + p[prop["name"]] = ( + self.properties[prop["name"]]) + else: + p[prop["name"]] = prop["value"] + + self.module.debug("Comparing properties") + if set(p) != set(self.properties): + return True + else: + self.module.debug("No property information received") + return False + + def sdt(self, duration=30, starttime=None): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Hostgroup.sdt") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offset = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail( + msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) + + h = {"hostGroupId": self.info["id"], + "type": 1, + "year": offsetstart.year, + "month": offsetstart.month - 1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month - 1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to setHostGroupSDT") + resp = json.loads(self.rpc("setHostGroupSDT", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + + def site_facts(self): + """Output current properties information for the Hostgroup""" + self.module.debug("Running Hostgroup.site_facts...") + + if self.info: + self.module.debug("Group exists") + props = self.get_properties(True) + + self.output_info(props) + else: + self.fail(msg="Error: Group doesn't exit.") + + def _build_host_group_hash(self, + fullpath, + description, + properties, + alertenable): + """Return a property formatted hash for the + creation of a hostgroup using the rpc function""" + self.module.debug("Running Hostgroup._build_host_hash") + + h = {} + h["alertEnable"] = alertenable + + if fullpath == "/": + self.module.debug("Group is root") + h["id"] = 1 + else: + self.module.debug("Determining group path") + parentpath, name = fullpath.rsplit('/', 1) + parent = self.get_group(parentpath) + + h["name"] = name + + if parent: + self.module.debug("Parent group " + + str(parent["id"]) + " found.") + h["parentID"] = parent["id"] + else: + self.module.debug("No parent group found. Using root.") + h["parentID"] = 1 + + if description: + self.module.debug("Description property exists") + h["description"] = description + + if properties != {}: + self.module.debug("Properties hash exists") + propnum = 0 + for key, value in properties.items(): + h["propName" + str(propnum)] = key + h["propValue" + str(propnum)] = value + propnum = propnum + 1 + + return h + + def _verify_property(self, propname): + """Check with LogicMonitor server + to verify property is unchanged""" + self.module.debug("Running Hostgroup._verify_property") + + if self.info: + self.module.debug("Group exists") + if propname not in self.properties: + self.module.debug("Property " + propname + " does not exist") + return False + else: + self.module.debug("Property " + propname + " exists") + h = {"hostGroupId": self.info["id"], + "propName0": propname, + "propValue0": self.properties[propname]} + + self.module.debug("Making RCP call to 'verifyProperties'") + resp = json.loads(self.rpc('verifyProperties', h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["match"] + else: + self.fail( + msg="Error: unable to get verification " + + "from server.\n%s" % resp["errmsg"]) + else: + self.fail( + msg="Error: Group doesn't exist. Unable to verify properties") + + +def selector(module): + """Figure out which object and which actions + to take given the right parameters""" + + if module.params["target"] == "collector": + target = Collector(module.params, module) + elif module.params["target"] == "host": + # Make sure required parameter collector is specified + if ((module.params["action"] == "add" or + module.params["displayname"] is None) and + module.params["collector"] is None): + module.fail_json( + msg="Parameter 'collector' required.") + + target = Host(module.params, module) + elif module.params["target"] == "datasource": + # Validate target specific required parameters + if module.params["id"] is not None: + # make sure a supported action was specified + if module.params["action"] == "sdt": + target = Datasource(module.params, module) + else: + errmsg = ("Error: Unexpected action \"" + + module.params["action"] + "\" was specified.") + module.fail_json(msg=errmsg) + + elif module.params["target"] == "hostgroup": + # Validate target specific required parameters + if module.params["fullpath"] is not None: + target = Hostgroup(module.params, module) + else: + module.fail_json( + msg="Parameter 'fullpath' required for target 'hostgroup'") + else: + module.fail_json( + msg="Error: Unexpected target \"" + module.params["target"] + + "\" was specified.") + + if module.params["action"].lower() == "add": + action = target.create + elif module.params["action"].lower() == "remove": + action = target.remove + elif module.params["action"].lower() == "sdt": + action = target.sdt + elif module.params["action"].lower() == "update": + action = target.update + else: + errmsg = ("Error: Unexpected action \"" + module.params["action"] + + "\" was specified.") + module.fail_json(msg=errmsg) + + action() + module.exit_json(changed=target.change) + + +def main(): + TARGETS = [ + "collector", + "host", + "datasource", + "hostgroup"] + + ACTIONS = [ + "add", + "remove", + "sdt", + "update"] + + module = AnsibleModule( + argument_spec=dict( + target=dict(required=True, default=None, choices=TARGETS), + action=dict(required=True, default=None, choices=ACTIONS), + company=dict(required=True, default=None), + user=dict(required=True, default=None), + password=dict(required=True, default=None, no_log=True), + + collector=dict(required=False, default=None), + hostname=dict(required=False, default=None), + displayname=dict(required=False, default=None), + id=dict(required=False, default=None), + description=dict(required=False, default=""), + fullpath=dict(required=False, default=None), + starttime=dict(required=False, default=None), + duration=dict(required=False, default=30, type='int'), + properties=dict(required=False, default={}, type="dict"), + groups=dict(required=False, default=[], type="list"), + alertenable=dict(required=False, default="true", type="bool") + ), + supports_check_mode=True + ) + + selector(module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/monitoring/logicmonitor_facts.py b/plugins/modules/monitoring/logicmonitor_facts.py new file mode 100644 index 0000000000..bc20e8e82f --- /dev/null +++ b/plugins/modules/monitoring/logicmonitor_facts.py @@ -0,0 +1,563 @@ +#!/usr/bin/python + +# Copyright (C) 2015 LogicMonitor +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: logicmonitor_facts +short_description: Collect facts about LogicMonitor objects +description: + - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform. + - This module collects facts about hosts and host groups within your LogicMonitor account. +author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)] +notes: + - You must have an existing LogicMonitor account for this module to function. +requirements: ["An existing LogicMonitor account", "Linux"] +options: + target: + description: + - The LogicMonitor object you wish to manage. + required: true + choices: ['host', 'hostgroup'] + company: + description: + - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes". + required: true + user: + description: + - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user. + required: true + password: + description: + - The password for the chosen LogicMonitor User. + - If an md5 hash is used, the digest flag must be set to true. + required: true + collector: + description: + - The fully qualified domain name of a collector in your LogicMonitor account. + - This is optional for querying a LogicMonitor host when a displayname is specified. + - This is required for querying a LogicMonitor host when a displayname is not specified. + hostname: + description: + - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to add into monitoring. + - Required for managing hosts (target=host). + default: 'hostname -f' + displayname: + description: + - The display name of a host in your LogicMonitor account or the desired display name of a device to add into monitoring. + default: 'hostname -f' + fullpath: + description: + - The fullpath of the hostgroup object you would like to manage. + - Recommend running on a single ansible host. + - Required for management of LogicMonitor host groups (target=hostgroup). +... +''' + +EXAMPLES = ''' +# Always run those modules on localhost using delegate_to:localhost, or localaction + +- name: query a list of hosts + logicmonitor_facts: + target: host + company: yourcompany + user: Luigi + password: ImaLuigi,number1! + delegate_to: localhost + +- name: query a host group + logicmonitor_facts: + target: hostgroup + fullpath: /servers/production + company: yourcompany + user: mario + password: itsame.Mario! + delegate_to: localhost +''' + + +RETURN = ''' +--- + ansible_facts: + description: LogicMonitor properties set for the specified object + returned: success + type: list + example: > + { + "name": "dc", + "value": "1" + }, + { + "name": "type", + "value": "prod" + }, + { + "name": "system.categories", + "value": "" + }, + { + "name": "snmp.community", + "value": "********" + } +... +''' + +import json +import socket +import types + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + + +class LogicMonitor(object): + + def __init__(self, module, **params): + self.__version__ = "1.0-python" + self.module = module + self.module.debug("Instantiating LogicMonitor object") + + self.check_mode = False + self.company = params["company"] + self.user = params["user"] + self.password = params["password"] + self.fqdn = socket.getfqdn() + self.lm_url = "logicmonitor.com/santaba" + self.__version__ = self.__version__ + "-ansible-module" + + def rpc(self, action, params): + """Make a call to the LogicMonitor RPC library + and return the response""" + self.module.debug("Running LogicMonitor.rpc") + + param_str = urlencode(params) + creds = urlencode( + {"c": self.company, + "u": self.user, + "p": self.password}) + + if param_str: + param_str = param_str + "&" + + param_str = param_str + creds + + try: + url = ("https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "?" + param_str) + + # Set custom LogicMonitor header with version + headers = {"X-LM-User-Agent": self.__version__} + + # Set headers + f = open_url(url, headers=headers) + + raw = f.read() + resp = json.loads(raw) + if resp["status"] == 403: + self.module.debug("Authentication failed.") + self.fail(msg="Error: " + resp["errmsg"]) + else: + return raw + except IOError as ioe: + self.fail(msg="Error: Exception making RPC call to " + + "https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "\nException" + to_native(ioe)) + + def get_collectors(self): + """Returns a JSON object containing a list of + LogicMonitor collectors""" + self.module.debug("Running LogicMonitor.get_collectors...") + + self.module.debug("Making RPC call to 'getAgents'") + resp = self.rpc("getAgents", {}) + resp_json = json.loads(resp) + + if resp_json["status"] == 200: + self.module.debug("RPC call succeeded") + return resp_json["data"] + else: + self.fail(msg=resp) + + def get_host_by_hostname(self, hostname, collector): + """Returns a host object for the host matching the + specified hostname""" + self.module.debug("Running LogicMonitor.get_host_by_hostname...") + + self.module.debug("Looking for hostname " + hostname) + self.module.debug("Making RPC call to 'getHosts'") + hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1})) + + if collector: + if hostlist_json["status"] == 200: + self.module.debug("RPC call succeeded") + + hosts = hostlist_json["data"]["hosts"] + + self.module.debug( + "Looking for host matching: hostname " + hostname + + " and collector " + str(collector["id"])) + + for host in hosts: + if (host["hostName"] == hostname and + host["agentId"] == collector["id"]): + + self.module.debug("Host match found") + return host + self.module.debug("No host match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(hostlist_json) + else: + self.module.debug("No collector specified") + return None + + def get_host_by_displayname(self, displayname): + """Returns a host object for the host matching the + specified display name""" + self.module.debug("Running LogicMonitor.get_host_by_displayname...") + + self.module.debug("Looking for displayname " + displayname) + self.module.debug("Making RPC call to 'getHost'") + host_json = (json.loads(self.rpc("getHost", + {"displayName": displayname}))) + + if host_json["status"] == 200: + self.module.debug("RPC call succeeded") + return host_json["data"] + else: + self.module.debug("RPC call failed") + self.module.debug(host_json) + return None + + def get_collector_by_description(self, description): + """Returns a JSON collector object for the collector + matching the specified FQDN (description)""" + self.module.debug( + "Running LogicMonitor.get_collector_by_description..." + ) + + collector_list = self.get_collectors() + if collector_list is not None: + self.module.debug("Looking for collector with description " + + description) + for collector in collector_list: + if collector["description"] == description: + self.module.debug("Collector match found") + return collector + self.module.debug("No collector match found") + return None + + def get_group(self, fullpath): + """Returns a JSON group object for the group matching the + specified path""" + self.module.debug("Running LogicMonitor.get_group...") + + self.module.debug("Making RPC call to getHostGroups") + resp = json.loads(self.rpc("getHostGroups", {})) + + if resp["status"] == 200: + self.module.debug("RPC called succeeded") + groups = resp["data"] + + self.module.debug("Looking for group matching " + fullpath) + for group in groups: + if group["fullPath"] == fullpath.lstrip('/'): + self.module.debug("Group match found") + return group + + self.module.debug("No group match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + + return None + + def create_group(self, fullpath): + """Recursively create a path of host groups. + Returns the id of the newly created hostgroup""" + self.module.debug("Running LogicMonitor.create_group...") + + res = self.get_group(fullpath) + if res: + self.module.debug("Group " + fullpath + " exists.") + return res["id"] + + if fullpath == "/": + self.module.debug("Specified group is root. Doing nothing.") + return 1 + else: + self.module.debug("Creating group named " + fullpath) + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + parentpath, name = fullpath.rsplit('/', 1) + parentgroup = self.get_group(parentpath) + + parentid = 1 + + if parentpath == "": + parentid = 1 + elif parentgroup: + parentid = parentgroup["id"] + else: + parentid = self.create_group(parentpath) + + h = None + + # Determine if we're creating a group from host or hostgroup class + if hasattr(self, '_build_host_group_hash'): + h = self._build_host_group_hash( + fullpath, + self.description, + self.properties, + self.alertenable) + h["name"] = name + h["parentId"] = parentid + else: + h = {"name": name, + "parentId": parentid, + "alertEnable": True, + "description": ""} + + self.module.debug("Making RPC call to 'addHostGroup'") + resp = json.loads( + self.rpc("addHostGroup", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["id"] + elif resp["errmsg"] == "The record already exists": + self.module.debug("The hostgroup already exists") + group = self.get_group(fullpath) + return group["id"] + else: + self.module.debug("RPC call failed") + self.fail( + msg="Error: unable to create new hostgroup \"" + name + + "\".\n" + resp["errmsg"]) + + def fail(self, msg): + self.module.fail_json(msg=msg, changed=self.change) + + def exit(self, changed): + self.module.debug("Changed: " + changed) + self.module.exit_json(changed=changed) + + def output_info(self, info): + self.module.debug("Registering properties as Ansible facts") + self.module.exit_json(changed=False, ansible_facts=info) + + +class Host(LogicMonitor): + + def __init__(self, params, module=None): + """Initializer for the LogicMonitor host object""" + self.change = False + self.params = params + self.collector = None + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Host object") + + if self.params["hostname"]: + self.module.debug("Hostname is " + self.params["hostname"]) + self.hostname = self.params['hostname'] + else: + self.module.debug("No hostname specified. Using " + self.fqdn) + self.hostname = self.fqdn + + if self.params["displayname"]: + self.module.debug("Display name is " + self.params["displayname"]) + self.displayname = self.params['displayname'] + else: + self.module.debug("No display name specified. Using " + self.fqdn) + self.displayname = self.fqdn + + # Attempt to host information via display name of host name + self.module.debug("Attempting to find host by displayname " + + self.displayname) + info = self.get_host_by_displayname(self.displayname) + + if info is not None: + self.module.debug("Host found by displayname") + # Used the host information to grab the collector description + # if not provided + if (not hasattr(self.params, "collector") and + "agentDescription" in info): + self.module.debug("Setting collector from host response. " + + "Collector " + info["agentDescription"]) + self.params["collector"] = info["agentDescription"] + else: + self.module.debug("Host not found by displayname") + + # At this point, a valid collector description is required for success + # Check that the description exists or fail + if self.params["collector"]: + self.module.debug("Collector specified is " + + self.params["collector"]) + self.collector = (self.get_collector_by_description( + self.params["collector"])) + else: + self.fail(msg="No collector specified.") + + # If the host wasn't found via displayname, attempt by hostname + if info is None: + self.module.debug("Attempting to find host by hostname " + + self.hostname) + info = self.get_host_by_hostname(self.hostname, self.collector) + + self.info = info + + def get_properties(self): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Host.get_properties...") + + if self.info: + self.module.debug("Making RPC call to 'getHostProperties'") + properties_json = (json.loads(self.rpc("getHostProperties", + {'hostId': self.info["id"], + "filterSystemProperties": True}))) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("Error: there was an issue retrieving the " + + "host properties") + self.module.debug(properties_json["errmsg"]) + + self.fail(msg=properties_json["status"]) + else: + self.module.debug( + "Unable to find LogicMonitor host which matches " + + self.displayname + " (" + self.hostname + ")" + ) + return None + + def site_facts(self): + """Output current properties information for the Host""" + self.module.debug("Running Host.site_facts...") + + if self.info: + self.module.debug("Host exists") + props = self.get_properties() + + self.output_info(props) + else: + self.fail(msg="Error: Host doesn't exit.") + + +class Hostgroup(LogicMonitor): + + def __init__(self, params, module=None): + """Initializer for the LogicMonitor host object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Hostgroup object") + + self.fullpath = self.params["fullpath"] + self.info = self.get_group(self.fullpath) + + def get_properties(self, final=False): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Hostgroup.get_properties...") + + if self.info: + self.module.debug("Group found") + + self.module.debug("Making RPC call to 'getHostGroupProperties'") + properties_json = json.loads(self.rpc( + "getHostGroupProperties", + {'hostGroupId': self.info["id"], + "finalResult": final})) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=properties_json["status"]) + else: + self.module.debug("Group not found") + return None + + def site_facts(self): + """Output current properties information for the Hostgroup""" + self.module.debug("Running Hostgroup.site_facts...") + + if self.info: + self.module.debug("Group exists") + props = self.get_properties(True) + + self.output_info(props) + else: + self.fail(msg="Error: Group doesn't exit.") + + +def selector(module): + """Figure out which object and which actions + to take given the right parameters""" + + if module.params["target"] == "host": + target = Host(module.params, module) + target.site_facts() + elif module.params["target"] == "hostgroup": + # Validate target specific required parameters + if module.params["fullpath"] is not None: + target = Hostgroup(module.params, module) + target.site_facts() + else: + module.fail_json( + msg="Parameter 'fullpath' required for target 'hostgroup'") + else: + module.fail_json( + msg="Error: Unexpected target \"" + module.params["target"] + + "\" was specified.") + + +def main(): + TARGETS = [ + "host", + "hostgroup"] + + module = AnsibleModule( + argument_spec=dict( + target=dict(required=True, default=None, choices=TARGETS), + company=dict(required=True, default=None), + user=dict(required=True, default=None), + password=dict(required=True, default=None, no_log=True), + + collector=dict(required=False, default=None), + hostname=dict(required=False, default=None), + displayname=dict(required=False, default=None), + fullpath=dict(required=False, default=None) + ), + supports_check_mode=True + ) + + selector(module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/monitoring/logstash_plugin.py b/plugins/modules/monitoring/logstash_plugin.py new file mode 100644 index 0000000000..57bf3a913e --- /dev/null +++ b/plugins/modules/monitoring/logstash_plugin.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2017, Loic Blot +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: logstash_plugin +short_description: Manage Logstash plugins +description: + - Manages Logstash plugins. +author: Loic Blot (@nerzhul) +options: + name: + description: + - Install plugin with that name. + required: True + state: + description: + - Apply plugin state. + choices: ["present", "absent"] + default: present + plugin_bin: + description: + - Specify logstash-plugin to use for plugin management. + default: /usr/share/logstash/bin/logstash-plugin + proxy_host: + description: + - Proxy host to use during plugin installation. + proxy_port: + description: + - Proxy port to use during plugin installation. + version: + description: + - Specify plugin Version of the plugin to install. + If plugin exists with previous version, it will NOT be updated. +''' + +EXAMPLES = ''' +- name: Install Logstash beats input plugin + logstash_plugin: + state: present + name: logstash-input-beats + +- name: Install specific version of a plugin + logstash_plugin: + state: present + name: logstash-input-syslog + version: '3.2.0' + +- name: Uninstall Logstash plugin + logstash_plugin: + state: absent + name: logstash-filter-multiline + +- name: install Logstash plugin with alternate heap size + logstash_plugin: + state: present + name: logstash-input-beats + environment: + LS_JAVA_OPTS: "-Xms256m -Xmx256m" +''' + +from ansible.module_utils.basic import AnsibleModule + + +PACKAGE_STATE_MAP = dict( + present="install", + absent="remove" +) + + +def is_plugin_present(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, "list", plugin_name] + rc, out, err = module.run_command(" ".join(cmd_args)) + return rc == 0 + + +def parse_error(string): + reason = "reason: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] + + if version: + cmd_args.append("--version %s" % version) + + if proxy_host and proxy_port: + cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def remove_plugin(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name] + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), + plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"), + proxy_host=dict(default=None), + proxy_port=dict(default=None), + version=dict(default=None) + ), + supports_check_mode=True + ) + + name = module.params["name"] + state = module.params["state"] + plugin_bin = module.params["plugin_bin"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + version = module.params["version"] + + present = is_plugin_present(module, plugin_bin, name) + + # skip if the state is correct + if (present and state == "present") or (state == "absent" and not present): + module.exit_json(changed=False, name=name, state=state) + + if state == "present": + changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port) + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/monit.py b/plugins/modules/monitoring/monit.py new file mode 100644 index 0000000000..53200ef35a --- /dev/null +++ b/plugins/modules/monitoring/monit.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Darryl Stoflet +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: monit +short_description: Manage the state of a program monitored via Monit +description: + - Manage the state of a program monitored via I(Monit) +options: + name: + description: + - The name of the I(monit) program/process to manage + required: true + state: + description: + - The state of service + required: true + choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] + timeout: + description: + - If there are pending actions for the service monitored by monit, then Ansible will check + for up to this many seconds to verify the requested action has been performed. + Ansible will sleep for five seconds between each check. + default: 300 +author: "Darryl Stoflet (@dstoflet)" +''' + +EXAMPLES = ''' +# Manage the state of program "httpd" to be in "started" state. +- monit: + name: httpd + state: started +''' + +import time +import re + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + arg_spec = dict( + name=dict(required=True), + timeout=dict(default=300, type='int'), + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + state = module.params['state'] + timeout = module.params['timeout'] + + MONIT = module.get_bin_path('monit', True) + + def monit_version(): + rc, out, err = module.run_command('%s -V' % MONIT, check_rc=True) + version_line = out.split('\n')[0] + version = re.search(r"[0-9]+\.[0-9]+", version_line).group().split('.') + # Use only major and minor even if there are more these should be enough + return int(version[0]), int(version[1]) + + def is_version_higher_than_5_18(): + return (MONIT_MAJOR_VERSION, MONIT_MINOR_VERSION) > (5, 18) + + def parse(parts): + if is_version_higher_than_5_18(): + return parse_current(parts) + else: + return parse_older_versions(parts) + + def parse_older_versions(parts): + if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name: + return ' '.join(parts[2:]).lower() + else: + return '' + + def parse_current(parts): + if len(parts) > 2 and parts[2].lower() == 'process' and parts[0] == name: + return ''.join(parts[1]).lower() + else: + return '' + + def get_status(): + """Return the status of the process in monit, or the empty string if not present.""" + rc, out, err = module.run_command('%s %s' % (MONIT, SUMMARY_COMMAND), check_rc=True) + for line in out.split('\n'): + # Sample output lines: + # Process 'name' Running + # Process 'name' Running - restart pending + parts = parse(line.split()) + if parts != '': + return parts + + return '' + + def run_command(command): + """Runs a monit command, and returns the new status.""" + module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) + return get_status() + + def wait_for_monit_to_stop_pending(): + """Fails this run if there is no status or it's pending/initializing for timeout""" + timeout_time = time.time() + timeout + sleep_time = 5 + + running_status = get_status() + while running_status == '' or 'pending' in running_status or 'initializing' in running_status: + if time.time() >= timeout_time: + module.fail_json( + msg='waited too long for "pending", or "initiating" status to go away ({0})'.format( + running_status + ), + state=state + ) + + time.sleep(sleep_time) + running_status = get_status() + + MONIT_MAJOR_VERSION, MONIT_MINOR_VERSION = monit_version() + + SUMMARY_COMMAND = ('summary', 'summary -B')[is_version_higher_than_5_18()] + + if state == 'reloaded': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command('%s reload' % MONIT) + if rc != 0: + module.fail_json(msg='monit reload failed', stdout=out, stderr=err) + wait_for_monit_to_stop_pending() + module.exit_json(changed=True, name=name, state=state) + + present = get_status() != '' + + if not present and not state == 'present': + module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) + + if state == 'present': + if not present: + if module.check_mode: + module.exit_json(changed=True) + status = run_command('reload') + if status == '': + wait_for_monit_to_stop_pending() + module.exit_json(changed=True, name=name, state=state) + module.exit_json(changed=False, name=name, state=state) + + wait_for_monit_to_stop_pending() + running = 'running' in get_status() + + if running and state in ['started', 'monitored']: + module.exit_json(changed=False, name=name, state=state) + + if running and state == 'stopped': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('stop') + if status in ['not monitored'] or 'stop pending' in status: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not stopped' % name, status=status) + + if running and state == 'unmonitored': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('unmonitor') + if status in ['not monitored'] or 'unmonitor pending' in status: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not unmonitored' % name, status=status) + + elif state == 'restarted': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('restart') + if status in ['initializing', 'running'] or 'restart pending' in status: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not restarted' % name, status=status) + + elif not running and state == 'started': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('start') + if status in ['initializing', 'running'] or 'start pending' in status: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not started' % name, status=status) + + elif not running and state == 'monitored': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('monitor') + if status not in ['not monitored']: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not monitored' % name, status=status) + + module.exit_json(changed=False, name=name, state=state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/nagios.py b/plugins/modules/monitoring/nagios.py new file mode 100644 index 0000000000..85b2e97db2 --- /dev/null +++ b/plugins/modules/monitoring/nagios.py @@ -0,0 +1,1086 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is largely copied from the Nagios module included in the +# Func project. Original copyright follows: +# +# func-nagios - Schedule downtime and enables/disable notifications +# Copyright 2011, Red Hat, Inc. +# Tim Bielawa +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: nagios +short_description: Perform common tasks in Nagios related to downtime and notifications. +description: + - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." + - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer + to the host the playbook is currently running on. + - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). + - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), + e.g., C(service=host). This keyword may not be given with other services at the same time. + I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all + services on particular host use keyword "all", e.g., C(service=all). + - When using the C(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter. +options: + action: + description: + - Action to take. + - servicegroup options were added in 2.0. + - delete_downtime options were added in 2.2. + required: true + choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", + "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", + "servicegroup_host_downtime" ] + host: + description: + - Host to operate on in Nagios. + cmdfile: + description: + - Path to the nagios I(command file) (FIFO pipe). + Only required if auto-detection fails. + default: auto-detected + author: + description: + - Author to leave downtime comments as. + Only usable with the C(downtime) action. + default: Ansible + comment: + description: + - Comment for C(downtime) action. + default: Scheduling downtime + start: + description: + - When downtime should start, in time_t format (epoch seconds). + minutes: + description: + - Minutes to schedule downtime for. + - Only usable with the C(downtime) action. + type: int + default: 30 + services: + description: + - What to manage downtime/alerts for. Separate multiple services with commas. + C(service) is an alias for C(services). + B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions. + aliases: [ "service" ] + required: true + servicegroup: + description: + - The Servicegroup we want to set downtimes/alerts for. + B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). + command: + description: + - The raw command to send to nagios, which + should not include the submitted time header or the line-feed + B(Required) option when using the C(command) action. + required: true + +author: "Tim Bielawa (@tbielawa)" +''' + +EXAMPLES = ''' +# set 30 minutes of apache downtime +- nagios: + action: downtime + minutes: 30 + service: httpd + host: '{{ inventory_hostname }}' + +# schedule an hour of HOST downtime +- nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + +# schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00 +- nagios: + action: downtime + start: 1555984800 + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + +# schedule an hour of HOST downtime, with a comment describing the reason +- nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + comment: Rebuilding machine + +# schedule downtime for ALL services on HOST +- nagios: + action: downtime + minutes: 45 + service: all + host: '{{ inventory_hostname }}' + +# schedule downtime for a few services +- nagios: + action: downtime + services: frob,foobar,qeuz + host: '{{ inventory_hostname }}' + +# set 30 minutes downtime for all services in servicegroup foo +- nagios: + action: servicegroup_service_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' + +# set 30 minutes downtime for all host in servicegroup foo +- nagios: + action: servicegroup_host_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' + +# delete all downtime for a given host +- nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: all + +# delete all downtime for HOST with a particular comment +- nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: host + comment: Planned maintenance + +# enable SMART disk alerts +- nagios: + action: enable_alerts + service: smart + host: '{{ inventory_hostname }}' + +# "two services at once: disable httpd and nfs alerts" +- nagios: + action: disable_alerts + service: httpd,nfs + host: '{{ inventory_hostname }}' + +# disable HOST alerts +- nagios: + action: disable_alerts + service: host + host: '{{ inventory_hostname }}' + +# silence ALL alerts +- nagios: + action: silence + host: '{{ inventory_hostname }}' + +# unsilence all alerts +- nagios: + action: unsilence + host: '{{ inventory_hostname }}' + +# SHUT UP NAGIOS +- nagios: + action: silence_nagios + +# ANNOY ME NAGIOS +- nagios: + action: unsilence_nagios + +# command something +- nagios: + action: command + command: DISABLE_FAILURE_PREDICTION +''' + +import time +import os.path +import stat + +from ansible.module_utils.basic import AnsibleModule + + +###################################################################### + +def which_cmdfile(): + locations = [ + # rhel + '/etc/nagios/nagios.cfg', + # debian + '/etc/nagios3/nagios.cfg', + # older debian + '/etc/nagios2/nagios.cfg', + # bsd, solaris + '/usr/local/etc/nagios/nagios.cfg', + # groundwork it monitoring + '/usr/local/groundwork/nagios/etc/nagios.cfg', + # open monitoring distribution + '/omd/sites/oppy/tmp/nagios/nagios.cfg', + # ??? + '/usr/local/nagios/etc/nagios.cfg', + '/usr/local/nagios/nagios.cfg', + '/opt/nagios/etc/nagios.cfg', + '/opt/nagios/nagios.cfg', + # icinga on debian/ubuntu + '/etc/icinga/icinga.cfg', + # icinga installed from source (default location) + '/usr/local/icinga/etc/icinga.cfg', + ] + + for path in locations: + if os.path.exists(path): + for line in open(path): + if line.startswith('command_file'): + return line.split('=')[1].strip() + + return None + +###################################################################### + + +def main(): + ACTION_CHOICES = [ + 'downtime', + 'delete_downtime', + 'silence', + 'unsilence', + 'enable_alerts', + 'disable_alerts', + 'silence_nagios', + 'unsilence_nagios', + 'command', + 'servicegroup_host_downtime', + 'servicegroup_service_downtime', + ] + + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True, default=None, choices=ACTION_CHOICES), + author=dict(default='Ansible'), + comment=dict(default='Scheduling downtime'), + host=dict(required=False, default=None), + servicegroup=dict(required=False, default=None), + start=dict(required=False, default=None), + minutes=dict(default=30, type='int'), + cmdfile=dict(default=which_cmdfile()), + services=dict(default=None, aliases=['service']), + command=dict(required=False, default=None), + ) + ) + + action = module.params['action'] + host = module.params['host'] + servicegroup = module.params['servicegroup'] + start = module.params['start'] + services = module.params['services'] + cmdfile = module.params['cmdfile'] + command = module.params['command'] + + ################################################################## + # Required args per action: + # downtime = (minutes, service, host) + # (un)silence = (host) + # (enable/disable)_alerts = (service, host) + # command = command + # + # AnsibleModule will verify most stuff, we need to verify + # 'service' manually. + + ################################################################## + if action not in ['command', 'silence_nagios', 'unsilence_nagios']: + if not host: + module.fail_json(msg='no host specified for action requiring one') + ###################################################################### + if action == 'downtime': + # Make sure there's an actual service selected + if not services: + module.fail_json(msg='no service selected to set downtime for') + + ###################################################################### + if action == 'delete_downtime': + # Make sure there's an actual service selected + if not services: + module.fail_json(msg='no service selected to set downtime for') + + ###################################################################### + + if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: + # Make sure there's an actual servicegroup selected + if not servicegroup: + module.fail_json(msg='no servicegroup selected to set downtime for') + + ################################################################## + if action in ['enable_alerts', 'disable_alerts']: + if not services: + module.fail_json(msg='a service is required when setting alerts') + + if action in ['command']: + if not command: + module.fail_json(msg='no command passed for command action') + ################################################################## + if not cmdfile: + module.fail_json(msg='unable to locate nagios.cfg') + + ################################################################## + ansible_nagios = Nagios(module, **module.params) + if module.check_mode: + module.exit_json(changed=True) + else: + ansible_nagios.act() + ################################################################## + + +###################################################################### +class Nagios(object): + """ + Perform common tasks in Nagios related to downtime and + notifications. + + The complete set of external commands Nagios handles is documented + on their website: + + http://old.nagios.org/developerinfo/externalcommands/commandlist.php + + Note that in the case of `schedule_svc_downtime`, + `enable_svc_notifications`, and `disable_svc_notifications`, the + service argument should be passed as a list. + """ + + def __init__(self, module, **kwargs): + self.module = module + self.action = kwargs['action'] + self.author = kwargs['author'] + self.comment = kwargs['comment'] + self.host = kwargs['host'] + self.servicegroup = kwargs['servicegroup'] + self.start = int(kwargs['start']) + self.minutes = kwargs['minutes'] + self.cmdfile = kwargs['cmdfile'] + self.command = kwargs['command'] + + if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): + self.services = kwargs['services'] + else: + self.services = kwargs['services'].split(',') + + self.command_results = [] + + def _now(self): + """ + The time in seconds since 12:00:00AM Jan 1, 1970 + """ + + return int(time.time()) + + def _write_command(self, cmd): + """ + Write the given command to the Nagios command file + """ + + if not os.path.exists(self.cmdfile): + self.module.fail_json(msg='nagios command file does not exist', + cmdfile=self.cmdfile) + if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode): + self.module.fail_json(msg='nagios command file is not a fifo file', + cmdfile=self.cmdfile) + try: + fp = open(self.cmdfile, 'w') + fp.write(cmd) + fp.flush() + fp.close() + self.command_results.append(cmd.strip()) + except IOError: + self.module.fail_json(msg='unable to write to nagios command file', + cmdfile=self.cmdfile) + + def _fmt_dt_str(self, cmd, host, duration, author=None, + comment=None, start=None, + svc=None, fixed=1, trigger=0): + """ + Format an external-command downtime string. + + cmd - Nagios command ID + host - Host schedule downtime on + duration - Minutes to schedule downtime for + author - Name to file the downtime as + comment - Reason for running this command (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + Default is to use the entry time (now) + svc - Service to schedule downtime for, omit when for host downtime + fixed - Start now if 1, start when a problem is detected if 0 + trigger - Optional ID of event to start downtime from. Leave as 0 for + fixed downtime. + + Syntax: [submitted] COMMAND;;[] + ;;;;;; + + """ + + entry_time = self._now() + if start is None: + start = entry_time + + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + duration_s = (duration * 60) + end = start + duration_s + + if not author: + author = self.author + + if not comment: + comment = self.comment + + if svc is not None: + dt_args = [svc, str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + else: + # Downtime for a host if no svc specified + dt_args = [str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + + dt_arg_str = ";".join(dt_args) + dt_str = hdr + dt_arg_str + "\n" + + return dt_str + + def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None): + """ + Format an external-command downtime deletion string. + + cmd - Nagios command ID + host - Host to remove scheduled downtime from + comment - Reason downtime was added (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + svc - Service to remove downtime for, omit to remove all downtime for the host + + Syntax: [submitted] COMMAND;; + [];[];[] + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if comment is None: + comment = self.comment + + dt_del_args = [] + if svc is not None: + dt_del_args.append(svc) + else: + dt_del_args.append('') + + if start is not None: + dt_del_args.append(str(start)) + else: + dt_del_args.append('') + + if comment is not None: + dt_del_args.append(comment) + else: + dt_del_args.append('') + + dt_del_arg_str = ";".join(dt_del_args) + dt_del_str = hdr + dt_del_arg_str + "\n" + + return dt_del_str + + def _fmt_notif_str(self, cmd, host=None, svc=None): + """ + Format an external-command notification string. + + cmd - Nagios command ID. + host - Host to en/disable notifications on.. A value is not required + for global downtime + svc - Service to schedule downtime for. A value is not required + for host downtime. + + Syntax: [submitted] COMMAND;[;] + """ + + entry_time = self._now() + notif_str = "[%s] %s" % (entry_time, cmd) + if host is not None: + notif_str += ";%s" % host + + if svc is not None: + notif_str += ";%s" % svc + + notif_str += "\n" + + return notif_str + + def schedule_svc_downtime(self, host, services=None, minutes=30, start=None): + """ + This command is used to schedule downtime for a particular + service. + + During the specified downtime, Nagios will not send + notifications out about the service. + + Syntax: SCHEDULE_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SVC_DOWNTIME" + + if services is None: + services = [] + + for service in services: + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service) + self._write_command(dt_cmd_str) + + def schedule_host_downtime(self, host, minutes=30, start=None): + """ + This command is used to schedule downtime for a particular + host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + Syntax: SCHEDULE_HOST_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_host_svc_downtime(self, host, minutes=30, start=None): + """ + This command is used to schedule downtime for + all services associated with a particular host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + SCHEDULE_HOST_SVC_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) + self._write_command(dt_cmd_str) + + def delete_host_downtime(self, host, services=None, comment=None): + """ + This command is used to remove scheduled downtime for a particular + host. + + Syntax: DEL_DOWNTIME_BY_HOST_NAME;; + [];[];[] + """ + + cmd = "DEL_DOWNTIME_BY_HOST_NAME" + + if services is None: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment) + self._write_command(dt_del_cmd_str) + else: + for service in services: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment) + self._write_command(dt_del_cmd_str) + + def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all hosts in a + particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all services in + a particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all hosts in a + particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all services in + a particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def disable_host_svc_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for all services on the specified host. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_host_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for the specified host. + + Note that this command does not disable notifications for + services associated with this host. + + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_svc_notifications(self, host, services=None): + """ + This command is used to prevent notifications from being sent + out for the specified service. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "DISABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + self._write_command(notif_str) + + def disable_servicegroup_host_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all hosts in the specified servicegroup. + + Note that this command does not disable notifications for + services associated with hosts in this service group. + + Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_servicegroup_svc_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all services in the specified servicegroup. + + Note that this does not prevent notifications from being sent + out about the hosts in this servicegroup. + + Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_hostgroup_host_notifications(self, hostgroup): + """ + Disables notifications for all hosts in a particular + hostgroup. + + Note that this does not disable notifications for the services + associated with the hosts in the hostgroup - see the + DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. + + Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def disable_hostgroup_svc_notifications(self, hostgroup): + """ + Disables notifications for all services associated with hosts + in a particular hostgroup. + + Note that this does not disable notifications for the hosts in + the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS + command for that. + + Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def enable_host_notifications(self, host): + """ + Enables notifications for a particular host. + + Note that this command does not enable notifications for + services associated with this host. + + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def enable_host_svc_notifications(self, host): + """ + Enables notifications for all services on the specified host. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_svc_notifications(self, host, services=None): + """ + Enables notifications for a particular service. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "ENABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + + nagios_return = True + return_str_list = [] + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_host_notifications(self, hostgroup): + """ + Enables notifications for all hosts in a particular hostgroup. + + Note that this command does not enable notifications for + services associated with the hosts in this hostgroup. + + Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_svc_notifications(self, hostgroup): + """ + Enables notifications for all services that are associated + with hosts in a particular hostgroup. + + Note that this does not enable notifications for the hosts in + this hostgroup. + + Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_host_notifications(self, servicegroup): + """ + Enables notifications for all hosts that have services that + are members of a particular servicegroup. + + Note that this command does not enable notifications for + services associated with the hosts in this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_svc_notifications(self, servicegroup): + """ + Enables notifications for all services that are members of a + particular servicegroup. + + Note that this does not enable notifications for the hosts in + this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def silence_host(self, host): + """ + This command is used to prevent notifications from being sent + out for the host and all services on the specified host. + + This is equivalent to calling disable_host_svc_notifications + and disable_host_notifications. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "DISABLE_HOST_SVC_NOTIFICATIONS", + "DISABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def unsilence_host(self, host): + """ + This command is used to enable notifications for the host and + all services on the specified host. + + This is equivalent to calling enable_host_svc_notifications + and enable_host_notifications. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "ENABLE_HOST_SVC_NOTIFICATIONS", + "ENABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def silence_nagios(self): + """ + This command is used to disable notifications for all hosts and services + in nagios. + + This is a 'SHUT UP, NAGIOS' command + """ + cmd = 'DISABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def unsilence_nagios(self): + """ + This command is used to enable notifications for all hosts and services + in nagios. + + This is a 'OK, NAGIOS, GO'' command + """ + cmd = 'ENABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def nagios_cmd(self, cmd): + """ + This sends an arbitrary command to nagios + + It prepends the submitted time and appends a \n + + You just have to provide the properly formatted command + """ + + pre = '[%s]' % int(time.time()) + + post = '\n' + cmdstr = '%s %s%s' % (pre, cmd, post) + self._write_command(cmdstr) + + def act(self): + """ + Figure out what you want to do from ansible, and then do the + needful (at the earliest). + """ + # host or service downtime? + if self.action == 'downtime': + if self.services == 'host': + self.schedule_host_downtime(self.host, minutes=self.minutes, + start=self.start) + elif self.services == 'all': + self.schedule_host_svc_downtime(self.host, minutes=self.minutes, + start=self.start) + else: + self.schedule_svc_downtime(self.host, + services=self.services, + minutes=self.minutes, + start=self.start) + + elif self.action == 'delete_downtime': + if self.services == 'host': + self.delete_host_downtime(self.host) + elif self.services == 'all': + self.delete_host_downtime(self.host, comment='') + else: + self.delete_host_downtime(self.host, services=self.services) + + elif self.action == "servicegroup_host_downtime": + if self.servicegroup: + self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + elif self.action == "servicegroup_service_downtime": + if self.servicegroup: + self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + + # toggle the host AND service alerts + elif self.action == 'silence': + self.silence_host(self.host) + + elif self.action == 'unsilence': + self.unsilence_host(self.host) + + # toggle host/svc alerts + elif self.action == 'enable_alerts': + if self.services == 'host': + self.enable_host_notifications(self.host) + elif self.services == 'all': + self.enable_host_svc_notifications(self.host) + else: + self.enable_svc_notifications(self.host, + services=self.services) + + elif self.action == 'disable_alerts': + if self.services == 'host': + self.disable_host_notifications(self.host) + elif self.services == 'all': + self.disable_host_svc_notifications(self.host) + else: + self.disable_svc_notifications(self.host, + services=self.services) + elif self.action == 'silence_nagios': + self.silence_nagios() + + elif self.action == 'unsilence_nagios': + self.unsilence_nagios() + + elif self.action == 'command': + self.nagios_cmd(self.command) + + # wtf? + else: + self.module.fail_json(msg="unknown action specified: '%s'" % + self.action) + + self.module.exit_json(nagios_commands=self.command_results, + changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/newrelic_deployment.py b/plugins/modules/monitoring/newrelic_deployment.py new file mode 100644 index 0000000000..1647198a82 --- /dev/null +++ b/plugins/modules/monitoring/newrelic_deployment.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Matt Coddington +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: newrelic_deployment +author: "Matt Coddington (@mcodd)" +short_description: Notify newrelic about app deployments +description: + - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api) +options: + token: + description: + - API token, to place in the x-api-key header. + required: true + app_name: + description: + - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application + required: false + application_id: + description: + - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM + required: false + changelog: + description: + - A list of changes for this deployment + required: false + description: + description: + - Text annotation for the deployment - notes for you + required: false + revision: + description: + - A revision number (e.g., git commit SHA) + required: false + user: + description: + - The name of the user/process that triggered this deployment + required: false + appname: + description: + - Name of the application + required: false + environment: + description: + - The environment for this deployment + required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + type: bool + +requirements: [] +''' + +EXAMPLES = ''' +- newrelic_deployment: + token: AAAAAA + app_name: myapp + user: ansible deployment + revision: '1.0' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlencode + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + app_name=dict(required=False), + application_id=dict(required=False), + changelog=dict(required=False), + description=dict(required=False), + revision=dict(required=False), + user=dict(required=False), + appname=dict(required=False), + environment=dict(required=False), + validate_certs=dict(default='yes', type='bool'), + ), + required_one_of=[['app_name', 'application_id']], + supports_check_mode=True + ) + + # build list of params + params = {} + if module.params["app_name"] and module.params["application_id"]: + module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") + + if module.params["app_name"]: + params["app_name"] = module.params["app_name"] + elif module.params["application_id"]: + params["application_id"] = module.params["application_id"] + else: + module.fail_json(msg="you must set one of 'app_name' or 'application_id'") + + for item in ["changelog", "description", "revision", "user", "appname", "environment"]: + if module.params[item]: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # Send the data to NewRelic + url = "https://rpm.newrelic.com/deployments.xml" + data = urlencode(params) + headers = { + 'x-api-key': module.params["token"], + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] in (200, 201): + module.exit_json(changed=True) + else: + module.fail_json(msg="unable to update newrelic: %s" % info['msg']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/pagerduty.py b/plugins/modules/monitoring/pagerduty.py new file mode 100644 index 0000000000..50d43caa8f --- /dev/null +++ b/plugins/modules/monitoring/pagerduty.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: pagerduty +short_description: Create PagerDuty maintenance windows +description: + - This module will let you create PagerDuty maintenance windows +author: + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" + - "Bruce Pennypacker (@bpennypacker)" +requirements: + - PagerDuty API access +options: + state: + description: + - Create a maintenance window or get a list of ongoing windows. + required: true + choices: [ "running", "started", "ongoing", "absent" ] + name: + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + user: + description: + - PagerDuty user ID. Obsolete. Please, use I(token) for authorization. + token: + description: + - A pagerduty token, generated on the pagerduty site. It is used for authorization. + required: true + requester_id: + description: + - ID of user making the request. Only needed when creating a maintenance_window. + service: + description: + - A comma separated list of PagerDuty service IDs. + aliases: [ services ] + window_id: + description: + - ID of maintenance window. Only needed when absent a maintenance_window. + hours: + description: + - Length of maintenance window in hours. + default: 1 + minutes: + description: + - Maintenance window in minutes (this is added to the hours). + default: 0 + desc: + description: + - Short description of maintenance window. + default: Created by Ansible + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +# List ongoing maintenance windows using a token +- pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + state: ongoing + +# Create a 1 hour maintenance window for service FOO123 +- pagerduty: + name: companyabc + user: example@example.com + token: yourtoken + state: running + service: FOO123 + +# Create a 5 minute maintenance window for service FOO123 +- pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + hours: 0 + minutes: 5 + state: running + service: FOO123 + + +# Create a 4 hour maintenance window for service FOO123 with the description "deployment". +- pagerduty: + name: companyabc + user: example@example.com + state: running + service: FOO123 + hours: 4 + desc: deployment + register: pd_window + +# Delete the previous maintenance window +- pagerduty: + name: companyabc + user: example@example.com + state: absent + window_id: '{{ pd_window.result.maintenance_window.id }}' + +# Delete a maintenance window from a separate playbook than its creation, and if it is the only existing maintenance window. +- pagerduty: + requester_id: XXXXXXX + token: yourtoken + state: ongoing + register: pd_window + +- pagerduty: + requester_id: XXXXXXX + token: yourtoken + state: absent + window_id: "{{ pd_window.result.maintenance_windows[0].id }}" + +''' + +import datetime +import json +import base64 + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_bytes + + +class PagerDutyRequest(object): + def __init__(self, module, name, user, token): + self.module = module + self.name = name + self.user = user + self.token = token + self.headers = { + 'Content-Type': 'application/json', + "Authorization": self._auth_header(), + 'Accept': 'application/vnd.pagerduty+json;version=2' + } + + def ongoing(self, http_call=fetch_url): + url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing" + headers = dict(self.headers) + + response, info = http_call(self.module, url, headers=headers) + if info['status'] != 200: + self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, False + + def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url): + if not requester_id: + self.module.fail_json(msg="requester_id is required when maintenance window should be created") + + url = 'https://api.pagerduty.com/maintenance_windows' + + headers = dict(self.headers) + headers.update({'From': requester_id}) + + start, end = self._compute_start_end_time(hours, minutes) + services = self._create_services_payload(service) + + request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}} + + data = json.dumps(request_data) + response, info = http_call(self.module, url, data=data, headers=headers, method='POST') + if info['status'] != 201: + self.module.fail_json(msg="failed to create the window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, True + + def _create_services_payload(self, service): + if (isinstance(service, list)): + return [{'id': s, 'type': 'service_reference'} for s in service] + else: + return [{'id': service, 'type': 'service_reference'}] + + def _compute_start_end_time(self, hours, minutes): + now = datetime.datetime.utcnow() + later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) + start = now.strftime("%Y-%m-%dT%H:%M:%SZ") + end = later.strftime("%Y-%m-%dT%H:%M:%SZ") + return start, end + + def absent(self, window_id, http_call=fetch_url): + url = "https://api.pagerduty.com/maintenance_windows/" + window_id + headers = dict(self.headers) + + response, info = http_call(self.module, url, headers=headers, method='DELETE') + if info['status'] != 204: + self.module.fail_json(msg="failed to delete the window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, True + + def _auth_header(self): + return "Token token=%s" % self.token + + def _read_response(self, response): + try: + return json.loads(response.read()) + except Exception: + return "" + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), + name=dict(required=False), + user=dict(required=False), + token=dict(required=True, no_log=True), + service=dict(required=False, type='list', aliases=["services"]), + window_id=dict(required=False), + requester_id=dict(required=False), + hours=dict(default='1', required=False), + minutes=dict(default='0', required=False), + desc=dict(default='Created by Ansible', required=False), + validate_certs=dict(default='yes', type='bool'), + ) + ) + + state = module.params['state'] + name = module.params['name'] + user = module.params['user'] + service = module.params['service'] + window_id = module.params['window_id'] + hours = module.params['hours'] + minutes = module.params['minutes'] + token = module.params['token'] + desc = module.params['desc'] + requester_id = module.params['requester_id'] + + pd = PagerDutyRequest(module, name, user, token) + + if state == "running" or state == "started": + if not service: + module.fail_json(msg="service not specified") + (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc) + if rc == 0: + changed = True + + if state == "ongoing": + (rc, out, changed) = pd.ongoing() + + if state == "absent": + (rc, out, changed) = pd.absent(window_id) + + if rc != 0: + module.fail_json(msg="failed", result=out) + + module.exit_json(msg="success", result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/pagerduty_alert.py b/plugins/modules/monitoring/pagerduty_alert.py new file mode 100644 index 0000000000..96f367bcd0 --- /dev/null +++ b/plugins/modules/monitoring/pagerduty_alert.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: pagerduty_alert +short_description: Trigger, acknowledge or resolve PagerDuty incidents +description: + - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events +author: + - "Amanpreet Singh (@ApsOps)" +requirements: + - PagerDuty API access +options: + name: + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + service_id: + description: + - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. + required: true + service_key: + description: + - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key). + integration_key: + description: + - The GUID of one of your "Generic API" services. + - This is the "integration key" listed on a "Integrations" tab of PagerDuty service. + required: true + state: + description: + - Type of event to be sent. + required: true + choices: + - 'triggered' + - 'acknowledged' + - 'resolved' + api_key: + description: + - The pagerduty API key (readonly access), generated on the pagerduty site. + required: true + desc: + description: + - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) + will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. + The maximum length is 1024 characters. + - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event. + required: false + default: Created via Ansible + incident_key: + description: + - Identifies the incident to which this I(state) should be applied. + - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an + open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" + problem reports. + - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a + trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. + required: false + client: + description: + - The name of the monitoring client that is triggering this event. + required: false + client_url: + description: + - The URL of the monitoring client that is triggering this event. + required: false +''' + +EXAMPLES = ''' +# Trigger an incident with just the basic options +- pagerduty_alert: + name: companyabc + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + +# Trigger an incident with more options +- pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + incident_key: somekey + client: Sample Monitoring Service + client_url: http://service.example.com + +# Acknowledge an incident based on incident_key +- pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: acknowledged + incident_key: somekey + desc: "some text for incident's log" + +# Resolve an incident based on incident_key +- pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: resolved + incident_key: somekey + desc: "some text for incident's log" +''' +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse + + +def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): + url = 'https://api.pagerduty.com/incidents' + headers = { + "Content-type": "application/json", + "Authorization": "Token token=%s" % api_key, + 'Accept': 'application/vnd.pagerduty+json;version=2' + } + + params = { + 'service_ids[]': service_id, + 'sort_by': 'incident_number:desc', + 'time_zone': 'UTC' + } + if incident_key: + params['incident_key'] = incident_key + + url_parts = list(urlparse(url)) + url_parts[4] = urlencode(params, True) + + url = urlunparse(url_parts) + + response, info = http_call(module, url, method='get', headers=headers) + + if info['status'] != 200: + module.fail_json(msg="failed to check current incident status." + "Reason: %s" % info['msg']) + + incidents = json.loads(response.read())["incidents"] + msg = "No corresponding incident" + + if len(incidents) == 0: + if state in ('acknowledged', 'resolved'): + return msg, False + return msg, True + elif state != incidents[0]["status"]: + return incidents[0], True + + return incidents[0], False + + +def send_event(module, service_key, event_type, desc, + incident_key=None, client=None, client_url=None): + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + headers = { + "Content-type": "application/json" + } + + data = { + "service_key": service_key, + "event_type": event_type, + "incident_key": incident_key, + "description": desc, + "client": client, + "client_url": client_url + } + + response, info = fetch_url(module, url, method='post', + headers=headers, data=json.dumps(data)) + if info['status'] != 200: + module.fail_json(msg="failed to %s. Reason: %s" % + (event_type, info['msg'])) + json_out = json.loads(response.read()) + return json_out + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=False), + service_id=dict(required=True), + service_key=dict(required=False), + integration_key=dict(required=False), + api_key=dict(required=True), + state=dict(required=True, + choices=['triggered', 'acknowledged', 'resolved']), + client=dict(required=False, default=None), + client_url=dict(required=False, default=None), + desc=dict(required=False, default='Created via Ansible'), + incident_key=dict(required=False, default=None) + ), + supports_check_mode=True + ) + + name = module.params['name'] + service_id = module.params['service_id'] + integration_key = module.params['integration_key'] + service_key = module.params['service_key'] + api_key = module.params['api_key'] + state = module.params['state'] + client = module.params['client'] + client_url = module.params['client_url'] + desc = module.params['desc'] + incident_key = module.params['incident_key'] + + if integration_key is None: + if service_key is not None: + integration_key = service_key + module.warn('"service_key" is obsolete parameter and will be removed.' + ' Please, use "integration_key" instead') + else: + module.fail_json(msg="'integration_key' is required parameter") + + state_event_dict = { + 'triggered': 'trigger', + 'acknowledged': 'acknowledge', + 'resolved': 'resolve' + } + + event_type = state_event_dict[state] + + if event_type != 'trigger' and incident_key is None: + module.fail_json(msg="incident_key is required for " + "acknowledge or resolve events") + + out, changed = check(module, name, state, service_id, + integration_key, api_key, incident_key) + + if not module.check_mode and changed is True: + out = send_event(module, integration_key, event_type, desc, + incident_key, client, client_url) + + module.exit_json(result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/pingdom.py b/plugins/modules/monitoring/pingdom.py new file mode 100644 index 0000000000..b24b29f2d4 --- /dev/null +++ b/plugins/modules/monitoring/pingdom.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: pingdom +short_description: Pause/unpause Pingdom alerts +description: + - This module will let you pause/unpause Pingdom alerts +author: + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" +requirements: + - "This pingdom python library: https://github.com/mbabineau/pingdom-python" +options: + state: + description: + - Define whether or not the check should be running or paused. + required: true + choices: [ "running", "paused" ] + checkid: + description: + - Pingdom ID of the check. + required: true + uid: + description: + - Pingdom user ID. + required: true + passwd: + description: + - Pingdom user password. + required: true + key: + description: + - Pingdom API key. + required: true +notes: + - This module does not yet have support to add/remove checks. +''' + +EXAMPLES = ''' +# Pause the check with the ID of 12345. +- pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: paused + +# Unpause the check with the ID of 12345. +- pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: running +''' + +import traceback + +PINGDOM_IMP_ERR = None +try: + import pingdom + HAS_PINGDOM = True +except Exception: + PINGDOM_IMP_ERR = traceback.format_exc() + HAS_PINGDOM = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def pause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=True) + check = c.get_check(checkid) + name = check.name + result = check.status + # if result != "paused": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def unpause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=False) + check = c.get_check(checkid) + name = check.name + result = check.status + # if result != "up": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), + checkid=dict(required=True), + uid=dict(required=True), + passwd=dict(required=True, no_log=True), + key=dict(required=True) + ) + ) + + if not HAS_PINGDOM: + module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR) + + checkid = module.params['checkid'] + state = module.params['state'] + uid = module.params['uid'] + passwd = module.params['passwd'] + key = module.params['key'] + + if (state == "paused" or state == "stopped"): + (rc, name, result) = pause(checkid, uid, passwd, key) + + if (state == "running" or state == "started"): + (rc, name, result) = unpause(checkid, uid, passwd, key) + + if rc != 0: + module.fail_json(checkid=checkid, name=name, status=result) + + module.exit_json(checkid=checkid, name=name, status=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/rollbar_deployment.py b/plugins/modules/monitoring/rollbar_deployment.py new file mode 100644 index 0000000000..acc85de7e3 --- /dev/null +++ b/plugins/modules/monitoring/rollbar_deployment.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014, Max Riveiro, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rollbar_deployment +author: "Max Riveiro (@kavu)" +short_description: Notify Rollbar about app deployments +description: + - Notify Rollbar about app deployments + (see https://rollbar.com/docs/deploys_other/) +options: + token: + description: + - Your project access token. + required: true + environment: + description: + - Name of the environment being deployed, e.g. 'production'. + required: true + revision: + description: + - Revision number/sha being deployed. + required: true + user: + description: + - User who deployed. + required: false + rollbar_user: + description: + - Rollbar username of the user who deployed. + required: false + comment: + description: + - Deploy comment (e.g. what is being deployed). + required: false + url: + description: + - Optional URL to submit the notification to. + required: false + default: 'https://api.rollbar.com/api/1/deploy/' + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. + This should only be used on personally controlled sites using + self-signed certificates. + required: false + default: 'yes' + type: bool +''' + +EXAMPLES = ''' + - name: Rollbar deployment notification + rollbar_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' + rollbar_user: admin + comment: Test Deploy + + - name: Notify rollbar about current git revision deployment by current user + rollbar_deployment: + token: "{{ rollbar_access_token }}" + environment: production + revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" + user: "{{ lookup('env', 'USER') }}" +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + environment=dict(required=True), + revision=dict(required=True), + user=dict(required=False), + rollbar_user=dict(required=False), + comment=dict(required=False), + url=dict( + required=False, + default='https://api.rollbar.com/api/1/deploy/' + ), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + if module.check_mode: + module.exit_json(changed=True) + + params = dict( + access_token=module.params['token'], + environment=module.params['environment'], + revision=module.params['revision'] + ) + + if module.params['user']: + params['local_username'] = module.params['user'] + + if module.params['rollbar_user']: + params['rollbar_username'] = module.params['rollbar_user'] + + if module.params['comment']: + params['comment'] = module.params['comment'] + + url = module.params.get('url') + + try: + data = urlencode(params) + response, info = fetch_url(module, url, data=data, method='POST') + except Exception as e: + module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc()) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/sensu/sensu_check.py b/plugins/modules/monitoring/sensu/sensu_check.py new file mode 100644 index 0000000000..31f0a1d251 --- /dev/null +++ b/plugins/modules/monitoring/sensu/sensu_check.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: sensu_check +short_description: Manage Sensu checks +description: + - Manage the checks that should be run on a machine by I(Sensu). + - Most options do not have a default and will not be added to the check definition unless specified. + - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, + - they are simply specified for your convenience. +options: + name: + description: + - The name of the check + - This is the key that is used to determine whether a check exists + required: true + state: + description: + - Whether the check should be present or not + choices: [ 'present', 'absent' ] + default: present + path: + description: + - Path to the json file of the check to be added/removed. + - Will be created if it does not exist (unless I(state=absent)). + - The parent folders need to exist when I(state=present), otherwise an error will be thrown + default: /etc/sensu/conf.d/checks.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so + - you can get the original file back if you somehow clobbered it incorrectly. + type: bool + default: 'no' + command: + description: + - Path to the sensu check to run (not required when I(state=absent)) + required: true + handlers: + description: + - List of handlers to notify when the check fails + default: [] + subscribers: + description: + - List of subscribers/channels this check should run for + - See sensu_subscribers to subscribe a machine to a channel + default: [] + interval: + description: + - Check interval in seconds + timeout: + description: + - Timeout for the check + default: 10 + ttl: + description: + - Time to live in seconds until the check is considered stale + handle: + description: + - Whether the check should be handled or not + type: bool + default: 'yes' + subdue_begin: + description: + - When to disable handling of check failures + subdue_end: + description: + - When to enable handling of check failures + dependencies: + description: + - Other checks this check depends on, if dependencies fail, + - handling of this check will be disabled + default: [] + metric: + description: + - Whether the check is a metric + type: bool + default: 'no' + standalone: + description: + - Whether the check should be scheduled by the sensu client or server + - This option obviates the need for specifying the I(subscribers) option + type: bool + default: 'no' + publish: + description: + - Whether the check should be scheduled at all. + - You can still issue it via the sensu api + type: bool + default: 'yes' + occurrences: + description: + - Number of event occurrences before the handler should take action + default: 1 + refresh: + description: + - Number of seconds handlers should wait before taking second action + aggregate: + description: + - Classifies the check as an aggregate check, + - making it available via the aggregate API + type: bool + default: 'no' + low_flap_threshold: + description: + - The low threshold for flap detection + high_flap_threshold: + description: + - The high threshold for flap detection + custom: + description: + - A hash/dictionary of custom parameters for mixing to the configuration. + - You can't rewrite others module parameters using this + default: {} + source: + description: + - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). +author: "Anders Ingemann (@andsens)" +''' + +EXAMPLES = ''' +# Fetch metrics about the CPU load every 60 seconds, +# the sensu server has a handler called 'relay' which forwards stats to graphite +- name: get cpu metrics + sensu_check: + name: cpu_load + command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb + metric: yes + handlers: relay + subscribers: common + interval: 60 + +# Check whether nginx is running +- name: check nginx process + sensu_check: + name: nginx_running + command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid + handlers: default + subscribers: nginx + interval: 60 + +# Stop monitoring the disk capacity. +# Note that the check will still show up in the sensu dashboard, +# to remove it completely you need to issue a DELETE request to the sensu api. +- name: check disk + sensu_check: + name: check_disk_capacity + state: absent +''' + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def sensu_check(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + stream = None + try: + try: + stream = open(path, 'r') + config = json.load(stream) + except IOError as e: + if e.errno == 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + finally: + if stream: + stream.close() + + if 'checks' not in config: + if state == 'absent': + reasons.append('`checks\' section did not exist and state is `absent\'') + return changed, reasons + config['checks'] = {} + changed = True + reasons.append('`checks\' section did not exist') + + if state == 'absent': + if name in config['checks']: + del config['checks'][name] + changed = True + reasons.append('check was present and state is `absent\'') + + if state == 'present': + if name not in config['checks']: + check = {} + config['checks'][name] = check + changed = True + reasons.append('check was absent and state is `present\'') + else: + check = config['checks'][name] + simple_opts = ['command', + 'handlers', + 'subscribers', + 'interval', + 'timeout', + 'ttl', + 'handle', + 'dependencies', + 'standalone', + 'publish', + 'occurrences', + 'refresh', + 'aggregate', + 'low_flap_threshold', + 'high_flap_threshold', + 'source', + ] + for opt in simple_opts: + if module.params[opt] is not None: + if opt not in check or check[opt] != module.params[opt]: + check[opt] = module.params[opt] + changed = True + reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) + else: + if opt in check: + del check[opt] + changed = True + reasons.append('`{opt}\' was removed'.format(opt=opt)) + + if module.params['custom']: + # Convert to json + custom_params = module.params['custom'] + overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']) + if overwrited_fields: + msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) + module.fail_json(msg=msg) + + for k, v in custom_params.items(): + if k in config['checks'][name]: + if not config['checks'][name][k] == v: + changed = True + reasons.append('`custom param {opt}\' was changed'.format(opt=k)) + else: + changed = True + reasons.append('`custom param {opt}\' was added'.format(opt=k)) + check[k] = v + simple_opts += custom_params.keys() + + # Remove obsolete custom params + for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']): + changed = True + reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) + del check[opt] + + if module.params['metric']: + if 'type' not in check or check['type'] != 'metric': + check['type'] = 'metric' + changed = True + reasons.append('`type\' was not defined or not `metric\'') + if not module.params['metric'] and 'type' in check: + del check['type'] + changed = True + reasons.append('`type\' was defined') + + if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: + subdue = {'begin': module.params['subdue_begin'], + 'end': module.params['subdue_end'], + } + if 'subdue' not in check or check['subdue'] != subdue: + check['subdue'] = subdue + changed = True + reasons.append('`subdue\' did not exist or was different') + else: + if 'subdue' in check: + del check['subdue'] + changed = True + reasons.append('`subdue\' was removed') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + try: + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + finally: + if stream: + stream.close() + + return changed, reasons + + +def main(): + + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': 'no'}, + 'command': {'type': 'str'}, + 'handlers': {'type': 'list'}, + 'subscribers': {'type': 'list'}, + 'interval': {'type': 'int'}, + 'timeout': {'type': 'int'}, + 'ttl': {'type': 'int'}, + 'handle': {'type': 'bool'}, + 'subdue_begin': {'type': 'str'}, + 'subdue_end': {'type': 'str'}, + 'dependencies': {'type': 'list'}, + 'metric': {'type': 'bool', 'default': 'no'}, + 'standalone': {'type': 'bool'}, + 'publish': {'type': 'bool'}, + 'occurrences': {'type': 'int'}, + 'refresh': {'type': 'int'}, + 'aggregate': {'type': 'bool'}, + 'low_flap_threshold': {'type': 'int'}, + 'high_flap_threshold': {'type': 'int'}, + 'custom': {'type': 'dict'}, + 'source': {'type': 'str'}, + } + + required_together = [['subdue_begin', 'subdue_end']] + + module = AnsibleModule(argument_spec=arg_spec, + required_together=required_together, + supports_check_mode=True) + if module.params['state'] != 'absent' and module.params['command'] is None: + module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_check(module, path, name, state, backup) + + module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/sensu/sensu_client.py b/plugins/modules/monitoring/sensu/sensu_client.py new file mode 100644 index 0000000000..6053169154 --- /dev/null +++ b/plugins/modules/monitoring/sensu/sensu_client.py @@ -0,0 +1,250 @@ +#!/usr/bin/python + +# (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: sensu_client +author: "David Moreau Simard (@dmsimard)" +short_description: Manages Sensu client configuration +description: + - Manages Sensu client configuration. + - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)' +options: + state: + description: + - Whether the client should be present or not + choices: [ 'present', 'absent' ] + default: present + name: + description: + - A unique name for the client. The name cannot contain special characters or spaces. + default: System hostname as determined by Ruby Socket.gethostname (provided by Sensu) + address: + description: + - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. + default: Non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu) + subscriptions: + description: + - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver). + - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. + - The subscriptions array items must be strings. + required: True + safe_mode: + description: + - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check. + type: bool + default: 'no' + redact: + description: + - Client definition attributes to redact (values) when logging and sending client keepalives. + socket: + description: + - The socket definition scope, used to configure the Sensu client socket. + keepalives: + description: + - If Sensu should monitor keepalives for this client. + type: bool + default: 'yes' + keepalive: + description: + - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc). + registration: + description: + - The registration definition scope, used to configure Sensu registration event handlers. + deregister: + description: + - If a deregistration event should be created upon Sensu client process stop. + type: bool + default: 'no' + deregistration: + description: + - The deregistration definition scope, used to configure automated Sensu client de-registration. + ec2: + description: + - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only). + chef: + description: + - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only). + puppet: + description: + - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only). + servicenow: + description: + - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). +notes: + - Check mode is supported +''' + +EXAMPLES = ''' +# Minimum possible configuration +- name: Configure Sensu client + sensu_client: + subscriptions: + - default + +# With customization +- name: Configure Sensu client + sensu_client: + name: "{{ ansible_fqdn }}" + address: "{{ ansible_default_ipv4['address'] }}" + subscriptions: + - default + - webserver + redact: + - password + socket: + bind: 127.0.0.1 + port: 3030 + keepalive: + thresholds: + warning: 180 + critical: 300 + handlers: + - email + custom: + - broadcast: irc + occurrences: 3 + register: client + notify: + - Restart sensu-client + +- name: Secure Sensu client configuration file + file: + path: "{{ client['file'] }}" + owner: "sensu" + group: "sensu" + mode: "0600" + +- name: Delete the Sensu client configuration + sensu_client: + state: "absent" +''' + +RETURN = ''' +config: + description: Effective client configuration, when state is present + returned: success + type: dict + sample: {'name': 'client', 'subscriptions': ['default']} +file: + description: Path to the client configuration file + returned: success + type: str + sample: "/etc/sensu/conf.d/client.json" +''' + +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), + name=dict(type='str', required=False), + address=dict(type='str', required=False), + subscriptions=dict(type='list', required=False), + safe_mode=dict(type='bool', required=False, default=False), + redact=dict(type='list', required=False), + socket=dict(type='dict', required=False), + keepalives=dict(type='bool', required=False, default=True), + keepalive=dict(type='dict', required=False), + registration=dict(type='dict', required=False), + deregister=dict(type='bool', required=False), + deregistration=dict(type='dict', required=False), + ec2=dict(type='dict', required=False), + chef=dict(type='dict', required=False), + puppet=dict(type='dict', required=False), + servicenow=dict(type='dict', required=False) + ), + required_if=[ + ['state', 'present', ['subscriptions']] + ] + ) + + state = module.params['state'] + path = "/etc/sensu/conf.d/client.json" + + if state == 'absent': + if os.path.exists(path): + if module.check_mode: + msg = '{path} would have been deleted'.format(path=path) + module.exit_json(msg=msg, changed=True) + else: + try: + os.remove(path) + msg = '{path} deleted successfully'.format(path=path) + module.exit_json(msg=msg, changed=True) + except OSError as e: + msg = 'Exception when trying to delete {path}: {exception}' + module.fail_json( + msg=msg.format(path=path, exception=str(e))) + else: + # Idempotency: it's okay if the file doesn't exist + msg = '{path} already does not exist'.format(path=path) + module.exit_json(msg=msg) + + # Build client configuration from module arguments + config = {'client': {}} + args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact', + 'socket', 'keepalives', 'keepalive', 'registration', 'deregister', + 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow'] + + for arg in args: + if arg in module.params and module.params[arg] is not None: + config['client'][arg] = module.params[arg] + + # Load the current config, if there is one, so we can compare + current_config = None + try: + current_config = json.load(open(path, 'r')) + except (IOError, ValueError): + # File either doesn't exist or it's invalid JSON + pass + + if current_config is not None and current_config == config: + # Config is the same, let's not change anything + module.exit_json(msg='Client configuration is already up to date', + config=config['client'], + file=path) + + # Validate that directory exists before trying to write to it + if not module.check_mode and not os.path.exists(os.path.dirname(path)): + try: + os.makedirs(os.path.dirname(path)) + except OSError as e: + module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), + str(e))) + + if module.check_mode: + module.exit_json(msg='Client configuration would have been updated', + changed=True, + config=config['client'], + file=path) + + try: + with open(path, 'w') as client: + client.write(json.dumps(config, indent=4)) + module.exit_json(msg='Client configuration updated', + changed=True, + config=config['client'], + file=path) + except (OSError, IOError) as e: + module.fail_json(msg='Unable to write file {0}: {1}'.format(path, + str(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/sensu/sensu_handler.py b/plugins/modules/monitoring/sensu/sensu_handler.py new file mode 100644 index 0000000000..03f101ee5a --- /dev/null +++ b/plugins/modules/monitoring/sensu/sensu_handler.py @@ -0,0 +1,263 @@ +#!/usr/bin/python + +# (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: sensu_handler +author: "David Moreau Simard (@dmsimard)" +short_description: Manages Sensu handler configuration +description: + - Manages Sensu handler configuration + - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' +options: + state: + description: + - Whether the handler should be present or not + choices: [ 'present', 'absent' ] + default: present + name: + description: + - A unique name for the handler. The name cannot contain special characters or spaces. + required: True + type: + description: + - The handler type + choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ] + required: True + filter: + description: + - The Sensu event filter (name) to use when filtering events for the handler. + filters: + description: + - An array of Sensu event filters (names) to use when filtering events for the handler. + - Each array item must be a string. + severities: + description: + - An array of check result severities the handler will handle. + - 'NOTE: event resolution bypasses this filtering.' + choices: [ 'warning', 'critical', 'unknown' ] + mutator: + description: + - The Sensu event mutator (name) to use to mutate event data for the handler. + timeout: + description: + - The handler execution duration timeout in seconds (hard stop). + - Only used by pipe and tcp handler types. + default: 10 + handle_silenced: + description: + - If events matching one or more silence entries should be handled. + type: bool + default: 'no' + handle_flapping: + description: + - If events in the flapping state should be handled. + type: bool + default: 'no' + command: + description: + - The handler command to be executed. + - The event data is passed to the process via STDIN. + - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").' + socket: + description: + - The socket definition scope, used to configure the TCP/UDP handler socket. + - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").' + pipe: + description: + - The pipe definition scope, used to configure the Sensu transport pipe. + - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").' + handlers: + description: + - An array of Sensu event handlers (names) to use for events using the handler set. + - Each array item must be a string. + - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' +notes: + - Check mode is supported +''' + +EXAMPLES = ''' +# Configure a handler that sends event data as STDIN (pipe) +- name: Configure IRC Sensu handler + sensu_handler: + name: "irc_handler" + type: "pipe" + command: "/usr/local/bin/notify-irc.sh" + severities: + - "ok" + - "critical" + - "warning" + - "unknown" + timeout: 15 + notify: + - Restart sensu-client + - Restart sensu-server + +# Delete a handler +- name: Delete IRC Sensu handler + sensu_handler: + name: "irc_handler" + state: "absent" + +# Example of a TCP handler +- name: Configure TCP Sensu handler + sensu_handler: + name: "tcp_handler" + type: "tcp" + timeout: 30 + socket: + host: "10.0.1.99" + port: 4444 + register: handler + notify: + - Restart sensu-client + - Restart sensu-server + +- name: Secure Sensu handler configuration file + file: + path: "{{ handler['file'] }}" + owner: "sensu" + group: "sensu" + mode: "0600" +''' + +RETURN = ''' +config: + description: Effective handler configuration, when state is present + returned: success + type: dict + sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} +file: + description: Path to the handler configuration file + returned: success + type: str + sample: "/etc/sensu/conf.d/handlers/irc.json" +name: + description: Name of the handler + returned: success + type: str + sample: "irc" +''' + +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), + name=dict(type='str', required=True), + type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']), + filter=dict(type='str', required=False), + filters=dict(type='list', required=False), + severities=dict(type='list', required=False), + mutator=dict(type='str', required=False), + timeout=dict(type='int', required=False, default=10), + handle_silenced=dict(type='bool', required=False, default=False), + handle_flapping=dict(type='bool', required=False, default=False), + command=dict(type='str', required=False), + socket=dict(type='dict', required=False), + pipe=dict(type='dict', required=False), + handlers=dict(type='list', required=False), + ), + required_if=[ + ['state', 'present', ['type']], + ['type', 'pipe', ['command']], + ['type', 'tcp', ['socket']], + ['type', 'udp', ['socket']], + ['type', 'transport', ['pipe']], + ['type', 'set', ['handlers']] + ] + ) + + state = module.params['state'] + name = module.params['name'] + path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name) + + if state == 'absent': + if os.path.exists(path): + if module.check_mode: + msg = '{path} would have been deleted'.format(path=path) + module.exit_json(msg=msg, changed=True) + else: + try: + os.remove(path) + msg = '{path} deleted successfully'.format(path=path) + module.exit_json(msg=msg, changed=True) + except OSError as e: + msg = 'Exception when trying to delete {path}: {exception}' + module.fail_json( + msg=msg.format(path=path, exception=str(e))) + else: + # Idempotency: it's okay if the file doesn't exist + msg = '{path} already does not exist'.format(path=path) + module.exit_json(msg=msg) + + # Build handler configuration from module arguments + config = {'handlers': {name: {}}} + args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout', + 'handle_silenced', 'handle_flapping', 'command', 'socket', + 'pipe', 'handlers'] + + for arg in args: + if arg in module.params and module.params[arg] is not None: + config['handlers'][name][arg] = module.params[arg] + + # Load the current config, if there is one, so we can compare + current_config = None + try: + current_config = json.load(open(path, 'r')) + except (IOError, ValueError): + # File either doesn't exist or it's invalid JSON + pass + + if current_config is not None and current_config == config: + # Config is the same, let's not change anything + module.exit_json(msg='Handler configuration is already up to date', + config=config['handlers'][name], + file=path, + name=name) + + # Validate that directory exists before trying to write to it + if not module.check_mode and not os.path.exists(os.path.dirname(path)): + try: + os.makedirs(os.path.dirname(path)) + except OSError as e: + module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), + str(e))) + + if module.check_mode: + module.exit_json(msg='Handler configuration would have been updated', + changed=True, + config=config['handlers'][name], + file=path, + name=name) + + try: + with open(path, 'w') as handler: + handler.write(json.dumps(config, indent=4)) + module.exit_json(msg='Handler configuration updated', + changed=True, + config=config['handlers'][name], + file=path, + name=name) + except (OSError, IOError) as e: + module.fail_json(msg='Unable to write file {0}: {1}'.format(path, + str(e))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/sensu/sensu_silence.py b/plugins/modules/monitoring/sensu/sensu_silence.py new file mode 100644 index 0000000000..d6f157738f --- /dev/null +++ b/plugins/modules/monitoring/sensu/sensu_silence.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Steven Bambling +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: sensu_silence +author: Steven Bambling (@smbambling) +short_description: Manage Sensu silence entries +description: + - Create and clear (delete) a silence entries via the Sensu API + for subscriptions and checks. +options: + check: + description: + - Specifies the check which the silence entry applies to. + creator: + description: + - Specifies the entity responsible for this entry. + expire: + description: + - If specified, the silence entry will be automatically cleared + after this number of seconds. + expire_on_resolve: + description: + - If specified as true, the silence entry will be automatically + cleared once the condition it is silencing is resolved. + type: bool + reason: + description: + - If specified, this free-form string is used to provide context or + rationale for the reason this silence entry was created. + state: + description: + - Specifies to create or clear (delete) a silence entry via the Sensu API + required: true + default: present + choices: ['present', 'absent'] + subscription: + description: + - Specifies the subscription which the silence entry applies to. + - To create a silence entry for a client prepend C(client:) to client name. + Example - C(client:server1.example.dev) + required: true + default: [] + url: + description: + - Specifies the URL of the Sensu monitoring host server. + required: false + default: http://127.0.01:4567 +''' + +EXAMPLES = ''' +# Silence ALL checks for a given client +- name: Silence server1.example.dev + sensu_silence: + subscription: client:server1.example.dev + creator: "{{ ansible_user_id }}" + reason: Performing maintenance + +# Silence specific check for a client +- name: Silence CPU_Usage check for server1.example.dev + sensu_silence: + subscription: client:server1.example.dev + check: CPU_Usage + creator: "{{ ansible_user_id }}" + reason: Investigation alert issue + +# Silence multiple clients from a dict + silence: + server1.example.dev: + reason: 'Deployment in progress' + server2.example.dev: + reason: 'Deployment in progress' + +- name: Silence several clients from a dict + sensu_silence: + subscription: "client:{{ item.key }}" + reason: "{{ item.value.reason }}" + creator: "{{ ansible_user_id }}" + with_dict: "{{ silence }}" +''' + +RETURN = ''' +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def query(module, url, check, subscription): + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced' + + request_data = { + 'check': check, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='GET', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] == 500: + module.fail_json( + msg="Failed to query silence %s. Reason: %s" % (subscription, info) + ) + + try: + json_out = json.loads(response.read()) + except Exception: + json_out = "" + + return False, json_out, False + + +def clear(module, url, check, subscription): + # Test if silence exists before clearing + (rc, out, changed) = query(module, url, check, subscription) + + d = dict((i['subscription'], i['check']) for i in out) + subscription_exists = subscription in d + if check and subscription_exists: + exists = (check == d[subscription]) + else: + exists = subscription_exists + + # If check/subscription doesn't exist + # exit with changed state of False + if not exists: + return False, out, changed + + # module.check_mode is inherited from the AnsibleMOdule class + if not module.check_mode: + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced/clear' + + request_data = { + 'check': check, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='POST', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] != 204: + module.fail_json( + msg="Failed to silence %s. Reason: %s" % (subscription, info) + ) + + try: + json_out = json.loads(response.read()) + except Exception: + json_out = "" + + return False, json_out, True + return False, out, True + + +def create( + module, url, check, creator, expire, + expire_on_resolve, reason, subscription): + (rc, out, changed) = query(module, url, check, subscription) + for i in out: + if (i['subscription'] == subscription): + if ( + (check is None or check == i['check']) and + ( + creator == '' or + creator == i['creator'])and + ( + reason == '' or + reason == i['reason']) and + ( + expire is None or expire == i['expire']) and + ( + expire_on_resolve is None or + expire_on_resolve == i['expire_on_resolve'] + ) + ): + return False, out, False + + # module.check_mode is inherited from the AnsibleMOdule class + if not module.check_mode: + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced' + + request_data = { + 'check': check, + 'creator': creator, + 'expire': expire, + 'expire_on_resolve': expire_on_resolve, + 'reason': reason, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='POST', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] != 201: + module.fail_json( + msg="Failed to silence %s. Reason: %s" % + (subscription, info['msg']) + ) + + try: + json_out = json.loads(response.read()) + except Exception: + json_out = "" + + return False, json_out, True + return False, out, True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + check=dict(required=False), + creator=dict(required=False), + expire=dict(type='int', required=False), + expire_on_resolve=dict(type='bool', required=False), + reason=dict(required=False), + state=dict(default='present', choices=['present', 'absent']), + subscription=dict(required=True), + url=dict(required=False, default='http://127.0.01:4567'), + ), + supports_check_mode=True + ) + + url = module.params['url'] + check = module.params['check'] + creator = module.params['creator'] + expire = module.params['expire'] + expire_on_resolve = module.params['expire_on_resolve'] + reason = module.params['reason'] + subscription = module.params['subscription'] + state = module.params['state'] + + if state == 'present': + (rc, out, changed) = create( + module, url, check, creator, + expire, expire_on_resolve, reason, subscription + ) + + if state == 'absent': + (rc, out, changed) = clear(module, url, check, subscription) + + if rc != 0: + module.fail_json(msg="failed", result=out) + module.exit_json(msg="success", result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/sensu/sensu_subscription.py b/plugins/modules/monitoring/sensu/sensu_subscription.py new file mode 100644 index 0000000000..5e3545315a --- /dev/null +++ b/plugins/modules/monitoring/sensu/sensu_subscription.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: sensu_subscription +short_description: Manage Sensu subscriptions +description: + - Manage which I(sensu channels) a machine should subscribe to +options: + name: + description: + - The name of the channel + required: true + state: + description: + - Whether the machine should subscribe or unsubscribe from the channel + choices: [ 'present', 'absent' ] + required: false + default: present + path: + description: + - Path to the subscriptions json file + required: false + default: /etc/sensu/conf.d/subscriptions.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so you + - can get the original file back if you somehow clobbered it incorrectly. + type: bool + required: false + default: no +requirements: [ ] +author: Anders Ingemann (@andsens) +''' + +RETURN = ''' +reasons: + description: the reasons why the module changed or did not change something + returned: success + type: list + sample: ["channel subscription was absent and state is `present'"] +''' + +EXAMPLES = ''' +# Subscribe to the nginx channel +- name: subscribe to nginx checks + sensu_subscription: name=nginx + +# Unsubscribe from the common checks channel +- name: unsubscribe from common checks + sensu_subscription: name=common state=absent +''' + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def sensu_subscription(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + try: + config = json.load(open(path)) + except IOError as e: + if e.errno == 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + + if 'client' not in config: + if state == 'absent': + reasons.append('`client\' did not exist and state is `absent\'') + return changed, reasons + config['client'] = {} + changed = True + reasons.append('`client\' did not exist') + + if 'subscriptions' not in config['client']: + if state == 'absent': + reasons.append('`client.subscriptions\' did not exist and state is `absent\'') + return changed, reasons + config['client']['subscriptions'] = [] + changed = True + reasons.append('`client.subscriptions\' did not exist') + + if name not in config['client']['subscriptions']: + if state == 'absent': + reasons.append('channel subscription was absent') + return changed, reasons + config['client']['subscriptions'].append(name) + changed = True + reasons.append('channel subscription was absent and state is `present\'') + else: + if state == 'absent': + config['client']['subscriptions'].remove(name) + changed = True + reasons.append('channel subscription was present and state is `absent\'') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + open(path, 'w').write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)), + exception=traceback.format_exc()) + + return changed, reasons + + +def main(): + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': 'no'}, + } + + module = AnsibleModule(argument_spec=arg_spec, + supports_check_mode=True) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_subscription(module, path, name, state, backup) + + module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/spectrum_device.py b/plugins/modules/monitoring/spectrum_device.py new file mode 100644 index 0000000000..cf02820809 --- /dev/null +++ b/plugins/modules/monitoring/spectrum_device.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Renato Orgito +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: spectrum_device +short_description: Creates/deletes devices in CA Spectrum. +description: + - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). + - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1 +author: "Renato Orgito (@orgito)" +options: + device: + aliases: [ host, name ] + required: true + description: + - IP address of the device. + - If a hostname is given, it will be resolved to the IP address. + community: + description: + - SNMP community used for device discovery. + - Required when C(state=present). + landscape: + required: true + description: + - Landscape handle of the SpectroServer to which add or remove the device. + state: + required: false + description: + - On C(present) creates the device when it does not exist. + - On C(absent) removes the device when it exists. + choices: ['present', 'absent'] + default: 'present' + url: + aliases: [ oneclick_url ] + required: true + description: + - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port] + url_username: + aliases: [ oneclick_user ] + required: true + description: + - Oneclick user name. + url_password: + aliases: [ oneclick_password ] + required: true + description: + - Oneclick user password. + use_proxy: + required: false + description: + - if C(no), it will not use a proxy, even if one is defined in an environment + variable on the target hosts. + default: 'yes' + type: bool + validate_certs: + required: false + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + default: 'yes' + type: bool + agentport: + required: false + description: + - UDP port used for SNMP discovery. + default: 161 +notes: + - The devices will be created inside the I(Universe) container of the specified landscape. + - All the operations will be performed only on the specified landscape. +''' + +EXAMPLES = ''' +- name: Add device to CA Spectrum + local_action: + module: spectrum_device + device: '{{ ansible_host }}' + community: secret + landscape: '0x100000' + oneclick_url: http://oneclick.example.com:8080 + oneclick_user: username + oneclick_password: password + state: present + + +- name: Remove device from CA Spectrum + local_action: + module: spectrum_device + device: '{{ ansible_host }}' + landscape: '{{ landscape_handle }}' + oneclick_url: http://oneclick.example.com:8080 + oneclick_user: username + oneclick_password: password + use_proxy: no + state: absent +''' + +RETURN = ''' +device: + description: device data when state = present + returned: success + type: dict + sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} +''' + +from socket import gethostbyname, gaierror +import xml.etree.ElementTree as ET + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def request(resource, xml=None, method=None): + headers = { + "Content-Type": "application/xml", + "Accept": "application/xml" + } + + url = module.params['oneclick_url'] + '/spectrum/restful/' + resource + + response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45) + + if info['status'] == 401: + module.fail_json(msg="failed to authenticate to Oneclick server") + + if info['status'] not in (200, 201, 204): + module.fail_json(msg=info['msg']) + + return response.read() + + +def post(resource, xml=None): + return request(resource, xml=xml, method='POST') + + +def delete(resource): + return request(resource, xml=None, method='DELETE') + + +def get_ip(): + try: + device_ip = gethostbyname(module.params.get('device')) + except gaierror: + module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device')) + + return device_ip + + +def get_device(device_ip): + """Query OneClick for the device using the IP Address""" + resource = '/models' + landscape_min = "0x%x" % int(module.params.get('landscape'), 16) + landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000) + + xml = """ + + + + + + + + + SearchManager + + + + {mh_min} + + + + + {mh_max} + + + + + FIND_DEV_MODELS_BY_IP + + {search_ip} + + + + + + + + """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max) + + result = post(resource, xml=xml) + + root = ET.fromstring(result) + + if root.get('total-models') == '0': + return None + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + + # get the first device + model = root.find('ca:model-responses', namespace).find('ca:model', namespace) + + if model.get('error'): + module.fail_json(msg="error checking device: %s" % model.get('error')) + + # get the attributes + model_handle = model.get('mh') + + model_address = model.find('./*[@id="0x12d7f"]').text + + # derive the landscape handler from the model handler of the device + model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) + + device = dict( + model_handle=model_handle, + address=model_address, + landscape=model_landscape) + + return device + + +def add_device(): + device_ip = get_ip() + device = get_device(device_ip) + + if device: + module.exit_json(changed=False, device=device) + + if module.check_mode: + device = dict( + model_handle=None, + address=device_ip, + landscape="0x%x" % int(module.params.get('landscape'), 16)) + module.exit_json(changed=True, device=device) + + resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community') + resource += '&landscapeid=' + module.params.get('landscape') + + if module.params.get('agentport', None): + resource += '&agentport=' + str(module.params.get('agentport', 161)) + + result = post(resource) + root = ET.fromstring(result) + + if root.get('error') != 'Success': + module.fail_json(msg=root.get('error-message')) + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + model = root.find('ca:model', namespace) + + model_handle = model.get('mh') + model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) + + device = dict( + model_handle=model_handle, + address=device_ip, + landscape=model_landscape, + ) + + module.exit_json(changed=True, device=device) + + +def remove_device(): + device_ip = get_ip() + device = get_device(device_ip) + + if device is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + resource = '/model/' + device['model_handle'] + result = delete(resource) + + root = ET.fromstring(result) + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + error = root.find('ca:error', namespace).text + + if error != 'Success': + error_message = root.find('ca:error-message', namespace).text + module.fail_json(msg="%s %s" % (error, error_message)) + + module.exit_json(changed=True) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + device=dict(required=True, aliases=['host', 'name']), + landscape=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + community=dict(required=True, no_log=True), + agentport=dict(type='int', default=161), + url=dict(required=True, aliases=['oneclick_url']), + url_username=dict(required=True, aliases=['oneclick_user']), + url_password=dict(required=True, no_log=True, aliases=['oneclick_password']), + use_proxy=dict(type='bool', default='yes'), + validate_certs=dict(type='bool', default='yes'), + ), + required_if=[('state', 'present', ['community'])], + supports_check_mode=True + ) + + if module.params.get('state') == 'present': + add_device() + else: + remove_device() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py new file mode 100644 index 0000000000..536a532987 --- /dev/null +++ b/plugins/modules/monitoring/stackdriver.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: stackdriver +short_description: Send code deploy and annotation events to stackdriver +description: + - Send code deploy and annotation events to Stackdriver +author: "Ben Whaley (@bwhaley)" +options: + key: + description: + - API key. + required: true + event: + description: + - The type of event to send, either annotation or deploy + choices: ['annotation', 'deploy'] + revision_id: + description: + - The revision of the code that was deployed. Required for deploy events + deployed_by: + description: + - The person or robot responsible for deploying the code + default: "Ansible" + deployed_to: + description: + - "The environment code was deployed to. (ie: development, staging, production)" + repository: + description: + - The repository (or project) deployed + msg: + description: + - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation. + annotated_by: + description: + - The person or robot who the annotation should be attributed to. + default: "Ansible" + level: + description: + - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display. + choices: ['INFO', 'WARN', 'ERROR'] + default: 'INFO' + instance_id: + description: + - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown + event_epoch: + description: + - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." +''' + +EXAMPLES = ''' +- stackdriver: + key: AAAAAA + event: deploy + deployed_to: production + deployed_by: leeroyjenkins + repository: MyWebApp + revision_id: abcd123 + +- stackdriver: + key: AAAAAA + event: annotation + msg: Greetings from Ansible + annotated_by: leeroyjenkins + level: WARN + instance_id: i-abcd1234 +''' + +# =========================================== +# Stackdriver module specific support methods. +# + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url + + +def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): + """Send a deploy event to Stackdriver""" + deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" + + params = {} + params['revision_id'] = revision_id + params['deployed_by'] = deployed_by + if deployed_to: + params['deployed_to'] = deployed_to + if repository: + params['repository'] = repository + + return do_send_request(module, deploy_api, params, key) + + +def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): + """Send an annotation event to Stackdriver""" + annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" + + params = {} + params['message'] = msg + if annotated_by: + params['annotated_by'] = annotated_by + if level: + params['level'] = level + if instance_id: + params['instance_id'] = instance_id + if event_epoch: + params['event_epoch'] = event_epoch + + return do_send_request(module, annotation_api, params, key) + + +def do_send_request(module, url, params, key): + data = json.dumps(params) + headers = { + 'Content-Type': 'application/json', + 'x-stackdriver-apikey': key + } + response, info = fetch_url(module, url, headers=headers, data=data, method='POST') + if info['status'] != 200: + module.fail_json(msg="Unable to send msg: %s" % info['msg']) + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + key=dict(required=True), + event=dict(required=True, choices=['deploy', 'annotation']), + msg=dict(), + revision_id=dict(), + annotated_by=dict(default='Ansible'), + level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), + instance_id=dict(), + event_epoch=dict(), + deployed_by=dict(default='Ansible'), + deployed_to=dict(), + repository=dict(), + ), + supports_check_mode=True + ) + + key = module.params["key"] + event = module.params["event"] + + # Annotation params + msg = module.params["msg"] + annotated_by = module.params["annotated_by"] + level = module.params["level"] + instance_id = module.params["instance_id"] + event_epoch = module.params["event_epoch"] + + # Deploy params + revision_id = module.params["revision_id"] + deployed_by = module.params["deployed_by"] + deployed_to = module.params["deployed_to"] + repository = module.params["repository"] + + ################################################################## + # deploy requires revision_id + # annotation requires msg + # We verify these manually + ################################################################## + + if event == 'deploy': + if not revision_id: + module.fail_json(msg="revision_id required for deploy events") + try: + send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) + except Exception as e: + module.fail_json(msg="unable to sent deploy event: %s" % to_native(e), + exception=traceback.format_exc()) + + if event == 'annotation': + if not msg: + module.fail_json(msg="msg required for annotation events") + try: + send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) + except Exception as e: + module.fail_json(msg="unable to sent annotation event: %s" % to_native(e), + exception=traceback.format_exc()) + + changed = True + module.exit_json(changed=changed, deployed_by=deployed_by) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/statusio_maintenance.py b/plugins/modules/monitoring/statusio_maintenance.py new file mode 100644 index 0000000000..8728da8abc --- /dev/null +++ b/plugins/modules/monitoring/statusio_maintenance.py @@ -0,0 +1,457 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Benjamin Copeland (@bhcopeland) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: statusio_maintenance +short_description: Create maintenance windows for your status.io dashboard +description: + - Creates a maintenance window for status.io + - Deletes a maintenance window for status.io +notes: + - You can use the apiary API url (http://docs.statusio.apiary.io/) to + capture API traffic + - Use start_date and start_time with minutes to set future maintenance window +author: Benjamin Copeland (@bhcopeland) +options: + title: + description: + - A descriptive title for the maintenance window + default: "A new maintenance window" + desc: + description: + - Message describing the maintenance window + default: "Created by Ansible" + state: + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent"] + api_id: + description: + - Your unique API ID from status.io + required: true + api_key: + description: + - Your unique API Key from status.io + required: true + statuspage: + description: + - Your unique StatusPage ID from status.io + required: true + url: + description: + - Status.io API URL. A private apiary can be used instead. + default: "https://api.status.io" + components: + description: + - The given name of your component (server name) + aliases: ['component'] + containers: + description: + - The given name of your container (data center) + aliases: ['container'] + all_infrastructure_affected: + description: + - If it affects all components and containers + type: bool + default: 'no' + automation: + description: + - Automatically start and end the maintenance window + type: bool + default: 'no' + maintenance_notify_now: + description: + - Notify subscribers now + type: bool + default: 'no' + maintenance_notify_72_hr: + description: + - Notify subscribers 72 hours before maintenance start time + type: bool + default: 'no' + maintenance_notify_24_hr: + description: + - Notify subscribers 24 hours before maintenance start time + type: bool + default: 'no' + maintenance_notify_1_hr: + description: + - Notify subscribers 1 hour before maintenance start time + type: bool + default: 'no' + maintenance_id: + description: + - The maintenance id number when deleting a maintenance window + minutes: + description: + - The length of time in UTC that the maintenance will run + (starting from playbook runtime) + default: 10 + start_date: + description: + - Date maintenance is expected to start (Month/Day/Year) (UTC) + - End Date is worked out from start_date + minutes + start_time: + description: + - Time maintenance is expected to start (Hour:Minutes) (UTC) + - End Time is worked out from start_time + minutes +''' + +EXAMPLES = ''' +- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance + statusio_maintenance: + title: Router Upgrade from ansible + desc: Performing a Router Upgrade + components: server1.example.com + api_id: api_id + api_key: api_key + statuspage: statuspage_id + maintenance_notify_1_hr: True + automation: True + +- name: Create a maintenance window for 60 minutes on server1 and server2 + statusio_maintenance: + title: Routine maintenance + desc: Some security updates + components: + - server1.example.com + - server2.example.com + minutes: 60 + api_id: api_id + api_key: api_key + statuspage: statuspage_id + maintenance_notify_1_hr: True + automation: True + delegate_to: localhost + +- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center + statusio_maintenance: + title: Data center downtime + desc: Performing a Upgrade to our data center + components: Primary Data Center + api_id: api_id + api_key: api_key + statuspage: statuspage_id + start_date: 01/01/2016 + start_time: 12:00 + minutes: 1440 + +- name: Delete a maintenance window + statusio_maintenance: + title: Remove a maintenance window + maintenance_id: 561f90faf74bc94a4700087b + statuspage: statuspage_id + api_id: api_id + api_key: api_key + state: absent + +''' +# TODO: Add RETURN documentation. +RETURN = ''' # ''' + +import datetime +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + + +def get_api_auth_headers(api_id, api_key, url, statuspage): + + headers = { + "x-api-id": api_id, + "x-api-key": api_key, + "Content-Type": "application/json" + } + + try: + response = open_url( + url + "/v2/component/list/" + statuspage, headers=headers) + data = json.loads(response.read()) + if data['status']['message'] == 'Authentication failed': + return 1, None, None, "Authentication failed: " \ + "Check api_id/api_key and statuspage id." + else: + auth_headers = headers + auth_content = data + except Exception as e: + return 1, None, None, to_native(e) + return 0, auth_headers, auth_content, None + + +def get_component_ids(auth_content, components): + host_ids = [] + lower_components = [x.lower() for x in components] + for result in auth_content["result"]: + if result['name'].lower() in lower_components: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_components.remove(result['name'].lower()) + if len(lower_components): + # items not found in the api + return 1, None, lower_components + return 0, host_ids, None + + +def get_container_ids(auth_content, containers): + host_ids = [] + lower_containers = [x.lower() for x in containers] + for result in auth_content["result"]: + if result["containers"][0]["name"].lower() in lower_containers: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_containers.remove(result["containers"][0]["name"].lower()) + + if len(lower_containers): + # items not found in the api + return 1, None, lower_containers + return 0, host_ids, None + + +def get_date_time(start_date, start_time, minutes): + returned_date = [] + if start_date and start_time: + try: + datetime.datetime.strptime(start_date, '%m/%d/%Y') + returned_date.append(start_date) + except (NameError, ValueError): + return 1, None, "Not a valid start_date format." + try: + datetime.datetime.strptime(start_time, '%H:%M') + returned_date.append(start_time) + except (NameError, ValueError): + return 1, None, "Not a valid start_time format." + try: + # Work out end date/time based on minutes + date_time_start = datetime.datetime.strptime( + start_time + start_date, '%H:%M%m/%d/%Y') + delta = date_time_start + datetime.timedelta(minutes=minutes) + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + except (NameError, ValueError): + return 1, None, "Couldn't work out a valid date" + else: + now = datetime.datetime.utcnow() + delta = now + datetime.timedelta(minutes=minutes) + # start_date + returned_date.append(now.strftime("%m/%d/%Y")) + returned_date.append(now.strftime("%H:%M")) + # end_date + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + return 0, returned_date, None + + +def create_maintenance(auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, title, desc, + returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr): + returned_dates = [[x] for x in returned_date] + component_id = [] + container_id = [] + for val in host_ids: + component_id.append(val['component_id']) + container_id.append(val['container_id']) + try: + values = json.dumps({ + "statuspage_id": statuspage, + "components": component_id, + "containers": container_id, + "all_infrastructure_affected": str(int(all_infrastructure_affected)), + "automation": str(int(automation)), + "maintenance_name": title, + "maintenance_details": desc, + "date_planned_start": returned_dates[0], + "time_planned_start": returned_dates[1], + "date_planned_end": returned_dates[2], + "time_planned_end": returned_dates[3], + "maintenance_notify_now": str(int(maintenance_notify_now)), + "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), + "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), + "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)) + }) + response = open_url( + url + "/v2/maintenance/schedule", data=values, + headers=auth_headers) + data = json.loads(response.read()) + + if data["status"]["error"] == "yes": + return 1, None, data["status"]["message"] + except Exception as e: + return 1, None, to_native(e) + return 0, None, None + + +def delete_maintenance(auth_headers, url, statuspage, maintenance_id): + try: + values = json.dumps({ + "statuspage_id": statuspage, + "maintenance_id": maintenance_id, + }) + response = open_url( + url=url + "/v2/maintenance/delete", + data=values, + headers=auth_headers) + data = json.loads(response.read()) + if data["status"]["error"] == "yes": + return 1, None, "Invalid maintenance_id" + except Exception as e: + return 1, None, to_native(e) + return 0, None, None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_id=dict(required=True), + api_key=dict(required=True, no_log=True), + statuspage=dict(required=True), + state=dict(required=False, default='present', + choices=['present', 'absent']), + url=dict(default='https://api.status.io', required=False), + components=dict(type='list', required=False, default=None, + aliases=['component']), + containers=dict(type='list', required=False, default=None, + aliases=['container']), + all_infrastructure_affected=dict(type='bool', default=False, + required=False), + automation=dict(type='bool', default=False, required=False), + title=dict(required=False, default='A new maintenance window'), + desc=dict(required=False, default='Created by Ansible'), + minutes=dict(type='int', required=False, default=10), + maintenance_notify_now=dict(type='bool', default=False, + required=False), + maintenance_notify_72_hr=dict(type='bool', default=False, + required=False), + maintenance_notify_24_hr=dict(type='bool', default=False, + required=False), + maintenance_notify_1_hr=dict(type='bool', default=False, + required=False), + maintenance_id=dict(required=False, default=None), + start_date=dict(default=None, required=False), + start_time=dict(default=None, required=False) + ), + supports_check_mode=True, + ) + + api_id = module.params['api_id'] + api_key = module.params['api_key'] + statuspage = module.params['statuspage'] + state = module.params['state'] + url = module.params['url'] + components = module.params['components'] + containers = module.params['containers'] + all_infrastructure_affected = module.params['all_infrastructure_affected'] + automation = module.params['automation'] + title = module.params['title'] + desc = module.params['desc'] + minutes = module.params['minutes'] + maintenance_notify_now = module.params['maintenance_notify_now'] + maintenance_notify_72_hr = module.params['maintenance_notify_72_hr'] + maintenance_notify_24_hr = module.params['maintenance_notify_24_hr'] + maintenance_notify_1_hr = module.params['maintenance_notify_1_hr'] + maintenance_id = module.params['maintenance_id'] + start_date = module.params['start_date'] + start_time = module.params['start_time'] + + if state == "present": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + auth_content = {} + + if minutes or start_time and start_date: + (rc, returned_date, error) = get_date_time( + start_date, start_time, minutes) + if rc != 0: + module.fail_json(msg="Failed to set date/time: %s" % error) + + if not components and not containers: + return module.fail_json(msg="A Component or Container must be " + "defined") + elif components and containers: + return module.fail_json(msg="Components and containers cannot " + "be used together") + else: + if components: + (rc, host_ids, error) = get_component_ids(auth_content, + components) + if rc != 0: + module.fail_json(msg="Failed to find component %s" % error) + + if containers: + (rc, host_ids, error) = get_container_ids(auth_content, + containers) + if rc != 0: + module.fail_json(msg="Failed to find container %s" % error) + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, _, error) = create_maintenance( + auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, + title, desc, returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr) + if rc == 0: + module.exit_json(changed=True, result="Successfully created " + "maintenance") + else: + module.fail_json(msg="Failed to create maintenance: %s" + % error) + + if state == "absent": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, _, error) = delete_maintenance( + auth_headers, url, statuspage, maintenance_id) + if rc == 0: + module.exit_json( + changed=True, + result="Successfully deleted maintenance" + ) + else: + module.fail_json( + msg="Failed to delete maintenance: %s" % error) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/uptimerobot.py b/plugins/modules/monitoring/uptimerobot.py new file mode 100644 index 0000000000..81ed8e9de3 --- /dev/null +++ b/plugins/modules/monitoring/uptimerobot.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: uptimerobot +short_description: Pause and start Uptime Robot monitoring +description: + - This module will let you start and pause Uptime Robot Monitoring +author: "Nate Kingsley (@nate-kingsley)" +requirements: + - Valid Uptime Robot API Key +options: + state: + description: + - Define whether or not the monitor should be running or paused. + required: true + choices: [ "started", "paused" ] + monitorid: + description: + - ID of the monitor to check. + required: true + apikey: + description: + - Uptime Robot API key. + required: true +notes: + - Support for adding and removing monitors and alert contacts has not yet been implemented. +''' + +EXAMPLES = ''' +# Pause the monitor with an ID of 12345. +- uptimerobot: + monitorid: 12345 + apikey: 12345-1234512345 + state: paused + +# Start the monitor with an ID of 12345. +- uptimerobot: + monitorid: 12345 + apikey: 12345-1234512345 + state: started +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text + + +API_BASE = "https://api.uptimerobot.com/" + +API_ACTIONS = dict( + status='getMonitors?', + editMonitor='editMonitor?' +) + +API_FORMAT = 'json' +API_NOJSONCALLBACK = 1 +CHANGED_STATE = False +SUPPORTS_CHECK_MODE = False + + +def checkID(module, params): + + data = urlencode(params) + full_uri = API_BASE + API_ACTIONS['status'] + data + req, info = fetch_url(module, full_uri) + result = to_text(req.read()) + jsonresult = json.loads(result) + req.close() + return jsonresult + + +def startMonitor(module, params): + + params['monitorStatus'] = 1 + data = urlencode(params) + full_uri = API_BASE + API_ACTIONS['editMonitor'] + data + req, info = fetch_url(module, full_uri) + result = to_text(req.read()) + jsonresult = json.loads(result) + req.close() + return jsonresult['stat'] + + +def pauseMonitor(module, params): + + params['monitorStatus'] = 0 + data = urlencode(params) + full_uri = API_BASE + API_ACTIONS['editMonitor'] + data + req, info = fetch_url(module, full_uri) + result = to_text(req.read()) + jsonresult = json.loads(result) + req.close() + return jsonresult['stat'] + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['started', 'paused']), + apikey=dict(required=True, no_log=True), + monitorid=dict(required=True) + ), + supports_check_mode=SUPPORTS_CHECK_MODE + ) + + params = dict( + apiKey=module.params['apikey'], + monitors=module.params['monitorid'], + monitorID=module.params['monitorid'], + format=API_FORMAT, + noJsonCallback=API_NOJSONCALLBACK + ) + + check_result = checkID(module, params) + + if check_result['stat'] != "ok": + module.fail_json( + msg="failed", + result=check_result['message'] + ) + + if module.params['state'] == 'started': + monitor_result = startMonitor(module, params) + else: + monitor_result = pauseMonitor(module, params) + + module.exit_json( + msg="success", + result=monitor_result + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_action.py b/plugins/modules/monitoring/zabbix/zabbix_action.py new file mode 100644 index 0000000000..af0743ef35 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_action.py @@ -0,0 +1,2113 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: zabbix_action + +short_description: Create/Delete/Update Zabbix actions + + +description: + - This module allows you to create, modify and delete Zabbix actions. + +author: + - Ruben Tsirunyan (@rubentsirunyan) + - Ruben Harutyunov (@K-DOT) + +requirements: + - "zabbix-api >= 0.5.4" + +options: + name: + description: + - Name of the action + required: true + event_source: + description: + - Type of events that the action will handle. + - Required when C(state=present). + required: false + choices: ['trigger', 'discovery', 'auto_registration', 'internal'] + state: + description: + - State of the action. + - On C(present), it will create an action if it does not exist or update the action if the associated data is different. + - On C(absent), it will remove the action if it exists. + choices: ['present', 'absent'] + default: 'present' + status: + description: + - Status of the action. + choices: ['enabled', 'disabled'] + default: 'enabled' + pause_in_maintenance: + description: + - Whether to pause escalation during maintenance periods or not. + - Can be used when I(event_source=trigger). + type: 'bool' + default: true + esc_period: + description: + - Default operation step duration. Must be greater than 60 seconds. + - Accepts only seconds in int for <= Zabbix 3.2 + - Accepts seconds, time unit with suffix and user macro since => Zabbix 3.4 + - Required when C(state=present). + required: false + conditions: + type: list + description: + - List of dictionaries of conditions to evaluate. + - For more information about suboptions of this option please + check out Zabbix API documentation U(https://www.zabbix.com/documentation/4.0/manual/api/reference/action/object#action_filter_condition) + suboptions: + type: + description: + - Type (label) of the condition. + - 'Possible values when I(event_source=trigger):' + - ' - C(host_group)' + - ' - C(host)' + - ' - C(trigger)' + - ' - C(trigger_name)' + - ' - C(trigger_severity)' + - ' - C(time_period)' + - ' - C(host_template)' + - ' - C(application)' + - ' - C(maintenance_status)' + - ' - C(event_tag)' + - ' - C(event_tag_value)' + - 'Possible values when I(event_source=discovery):' + - ' - C(host_IP)' + - ' - C(discovered_service_type)' + - ' - C(discovered_service_port)' + - ' - C(discovery_status)' + - ' - C(uptime_or_downtime_duration)' + - ' - C(received_value)' + - ' - C(discovery_rule)' + - ' - C(discovery_check)' + - ' - C(proxy)' + - ' - C(discovery_object)' + - 'Possible values when I(event_source=auto_registration):' + - ' - C(proxy)' + - ' - C(host_name)' + - ' - C(host_metadata)' + - 'Possible values when I(event_source=internal):' + - ' - C(host_group)' + - ' - C(host)' + - ' - C(host_template)' + - ' - C(application)' + - ' - C(event_type)' + value: + description: + - Value to compare with. + - 'When I(type=discovery_status), the choices are:' + - ' - C(up)' + - ' - C(down)' + - ' - C(discovered)' + - ' - C(lost)' + - 'When I(type=discovery_object), the choices are:' + - ' - C(host)' + - ' - C(service)' + - 'When I(type=event_type), the choices are:' + - ' - C(item in not supported state)' + - ' - C(item in normal state)' + - ' - C(LLD rule in not supported state)' + - ' - C(LLD rule in normal state)' + - ' - C(trigger in unknown state)' + - ' - C(trigger in normal state)' + - 'When I(type=trigger_severity), the choices are (case-insensitive):' + - ' - C(not classified)' + - ' - C(information)' + - ' - C(warning)' + - ' - C(average)' + - ' - C(high)' + - ' - C(disaster)' + - Irrespective of user-visible names being changed in Zabbix. Defaults to C(not classified) if omitted. + - Besides the above options, this is usually either the name + of the object or a string to compare with. + operator: + description: + - Condition operator. + - When I(type) is set to C(time_period), the choices are C(in), C(not in). + - C(matches), C(does not match), C(Yes) and C(No) condition operators work only with >= Zabbix 4.0 + choices: + - '=' + - '<>' + - 'like' + - 'not like' + - 'in' + - '>=' + - '<=' + - 'not in' + - 'matches' + - 'does not match' + - 'Yes' + - 'No' + formulaid: + description: + - Arbitrary unique ID that is used to reference the condition from a custom expression. + - Can only contain upper-case letters. + - Required for custom expression filters. + eval_type: + description: + - Filter condition evaluation method. + - Defaults to C(andor) if conditions are less then 2 or if + I(formula) is not specified. + - Defaults to C(custom_expression) when formula is specified. + choices: + - 'andor' + - 'and' + - 'or' + - 'custom_expression' + formula: + description: + - User-defined expression to be used for evaluating conditions of filters with a custom expression. + - The expression must contain IDs that reference specific filter conditions by its formulaid. + - The IDs used in the expression must exactly match the ones + defined in the filter conditions. No condition can remain unused or omitted. + - Required for custom expression filters. + - Use sequential IDs that start at "A". If non-sequential IDs are used, Zabbix re-indexes them. + This makes each module run notice the difference in IDs and update the action. + default_message: + description: + - Problem message default text. + default_subject: + description: + - Problem message default subject. + recovery_default_message: + description: + - Recovery message text. + - Works only with >= Zabbix 3.2 + recovery_default_subject: + description: + - Recovery message subject. + - Works only with >= Zabbix 3.2 + acknowledge_default_message: + description: + - Update operation (known as "Acknowledge operation" before Zabbix 4.0) message text. + - Works only with >= Zabbix 3.4 + acknowledge_default_subject: + description: + - Update operation (known as "Acknowledge operation" before Zabbix 4.0) message subject. + - Works only with >= Zabbix 3.4 + operations: + type: list + description: + - List of action operations + suboptions: + type: + description: + - Type of operation. + choices: + - send_message + - remote_command + - add_host + - remove_host + - add_to_host_group + - remove_from_host_group + - link_to_template + - unlink_from_template + - enable_host + - disable_host + - set_host_inventory_mode + esc_period: + description: + - Duration of an escalation step in seconds. + - Must be greater than 60 seconds. + - Accepts only seconds in int for <= Zabbix 3.2 + - Accepts seconds, time unit with suffix and user macro since => Zabbix 3.4 + - If set to 0 or 0s, the default action escalation period will be used. + default: 0s + esc_step_from: + description: + - Step to start escalation from. + default: 1 + esc_step_to: + description: + - Step to end escalation at. + default: 1 + send_to_groups: + type: list + description: + - User groups to send messages to. + send_to_users: + type: list + description: + - Users (usernames or aliases) to send messages to. + message: + description: + - Operation message text. + - Will check the 'default message' and use the text from I(default_message) if this and I(default_subject) are not specified + subject: + description: + - Operation message subject. + - Will check the 'default message' and use the text from I(default_subject) if this and I(default_subject) are not specified + media_type: + description: + - Media type that will be used to send the message. + - Set to C(all) for all media types + default: 'all' + operation_condition: + type: 'str' + description: + - The action operation condition object defines a condition that must be met to perform the current operation. + choices: + - acknowledged + - not_acknowledged + host_groups: + type: list + description: + - List of host groups host should be added to. + - Required when I(type=add_to_host_group) or I(type=remove_from_host_group). + templates: + type: list + description: + - List of templates host should be linked to. + - Required when I(type=link_to_template) or I(type=unlink_from_template). + inventory: + description: + - Host inventory mode. + - Required when I(type=set_host_inventory_mode). + command_type: + description: + - Type of operation command. + - Required when I(type=remote_command). + choices: + - custom_script + - ipmi + - ssh + - telnet + - global_script + command: + description: + - Command to run. + - Required when I(type=remote_command) and I(command_type!=global_script). + execute_on: + description: + - Target on which the custom script operation command will be executed. + - Required when I(type=remote_command) and I(command_type=custom_script). + choices: + - agent + - server + - proxy + run_on_groups: + description: + - Host groups to run remote commands on. + - Required when I(type=remote_command) if I(run_on_hosts) is not set. + run_on_hosts: + description: + - Hosts to run remote commands on. + - Required when I(type=remote_command) if I(run_on_groups) is not set. + - If set to 0 the command will be run on the current host. + ssh_auth_type: + description: + - Authentication method used for SSH commands. + - Required when I(type=remote_command) and I(command_type=ssh). + choices: + - password + - public_key + ssh_privatekey_file: + description: + - Name of the private key file used for SSH commands with public key authentication. + - Required when I(type=remote_command) and I(command_type=ssh). + ssh_publickey_file: + description: + - Name of the public key file used for SSH commands with public key authentication. + - Required when I(type=remote_command) and I(command_type=ssh). + username: + description: + - User name used for authentication. + - Required when I(type=remote_command) and I(command_type in [ssh, telnet]). + password: + description: + - Password used for authentication. + - Required when I(type=remote_command) and I(command_type in [ssh, telnet]). + port: + description: + - Port number used for authentication. + - Required when I(type=remote_command) and I(command_type in [ssh, telnet]). + script_name: + description: + - The name of script used for global script commands. + - Required when I(type=remote_command) and I(command_type=global_script). + recovery_operations: + type: list + description: + - List of recovery operations. + - C(Suboptions) are the same as for I(operations). + - Works only with >= Zabbix 3.2 + acknowledge_operations: + type: list + description: + - List of acknowledge operations. + - C(Suboptions) are the same as for I(operations). + - Works only with >= Zabbix 3.4 + +notes: + - Only Zabbix >= 3.0 is supported. + + +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = ''' +# Trigger action with only one condition +- name: Deploy trigger action + zabbix_action: + server_url: "http://zabbix.example.com/zabbix/" + login_user: Admin + login_password: secret + name: "Send alerts to Admin" + event_source: 'trigger' + state: present + status: enabled + esc_period: 60 + conditions: + - type: 'trigger_severity' + operator: '>=' + value: 'Information' + operations: + - type: send_message + subject: "Something bad is happening" + message: "Come on, guys do something" + media_type: 'Email' + send_to_users: + - 'Admin' + +# Trigger action with multiple conditions and operations +- name: Deploy trigger action + zabbix_action: + server_url: "http://zabbix.example.com/zabbix/" + login_user: Admin + login_password: secret + name: "Send alerts to Admin" + event_source: 'trigger' + state: present + status: enabled + esc_period: 1m + conditions: + - type: 'trigger_name' + operator: 'like' + value: 'Zabbix agent is unreachable' + formulaid: A + - type: 'trigger_severity' + operator: '>=' + value: 'disaster' + formulaid: B + formula: A or B + operations: + - type: send_message + media_type: 'Email' + send_to_users: + - 'Admin' + - type: remote_command + command: 'systemctl restart zabbix-agent' + command_type: custom_script + execute_on: server + run_on_hosts: + - 0 + +# Trigger action with recovery and acknowledge operations +- name: Deploy trigger action + zabbix_action: + server_url: "http://zabbix.example.com/zabbix/" + login_user: Admin + login_password: secret + name: "Send alerts to Admin" + event_source: 'trigger' + state: present + status: enabled + esc_period: 1h + conditions: + - type: 'trigger_severity' + operator: '>=' + value: 'Information' + operations: + - type: send_message + subject: "Something bad is happening" + message: "Come on, guys do something" + media_type: 'Email' + send_to_users: + - 'Admin' + recovery_operations: + - type: send_message + subject: "Host is down" + message: "Come on, guys do something" + media_type: 'Email' + send_to_users: + - 'Admin' + acknowledge_operations: + - type: send_message + media_type: 'Email' + send_to_users: + - 'Admin' +''' + +RETURN = ''' +msg: + description: The result of the operation + returned: success + type: str + sample: 'Action Deleted: Register webservers, ID: 0001' +''' + + +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class Zapi(object): + """ + A simple wrapper over the Zabbix API + """ + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + def check_if_action_exists(self, name): + """Check if action exists. + + Args: + name: Name of the action. + + Returns: + The return value. True for success, False otherwise. + + """ + try: + _action = self._zapi.action.get({ + "selectOperations": "extend", + "selectRecoveryOperations": "extend", + "selectAcknowledgeOperations": "extend", + "selectFilter": "extend", + 'selectInventory': 'extend', + 'filter': {'name': [name]} + }) + if len(_action) > 0: + _action[0]['recovery_operations'] = _action[0].pop('recoveryOperations', []) + _action[0]['acknowledge_operations'] = _action[0].pop('acknowledgeOperations', []) + return _action + except Exception as e: + self._module.fail_json(msg="Failed to check if action '%s' exists: %s" % (name, e)) + + def get_action_by_name(self, name): + """Get action by name + + Args: + name: Name of the action. + + Returns: + dict: Zabbix action + + """ + try: + action_list = self._zapi.action.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'name': [name]} + }) + if len(action_list) < 1: + self._module.fail_json(msg="Action not found: " % name) + else: + return action_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get ID of '%s': %s" % (name, e)) + + def get_host_by_host_name(self, host_name): + """Get host by host name + + Args: + host_name: host name. + + Returns: + host matching host name + + """ + try: + host_list = self._zapi.host.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'host': [host_name]} + }) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + return host_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get host '%s': %s" % (host_name, e)) + + def get_hostgroup_by_hostgroup_name(self, hostgroup_name): + """Get host group by host group name + + Args: + hostgroup_name: host group name. + + Returns: + host group matching host group name + + """ + try: + hostgroup_list = self._zapi.hostgroup.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'name': [hostgroup_name]} + }) + if len(hostgroup_list) < 1: + self._module.fail_json(msg="Host group not found: %s" % hostgroup_name) + else: + return hostgroup_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get host group '%s': %s" % (hostgroup_name, e)) + + def get_template_by_template_name(self, template_name): + """Get template by template name + + Args: + template_name: template name. + + Returns: + template matching template name + + """ + try: + template_list = self._zapi.template.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'host': [template_name]} + }) + if len(template_list) < 1: + self._module.fail_json(msg="Template not found: %s" % template_name) + else: + return template_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get template '%s': %s" % (template_name, e)) + + def get_trigger_by_trigger_name(self, trigger_name): + """Get trigger by trigger name + + Args: + trigger_name: trigger name. + + Returns: + trigger matching trigger name + + """ + try: + trigger_list = self._zapi.trigger.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'description': [trigger_name]} + }) + if len(trigger_list) < 1: + self._module.fail_json(msg="Trigger not found: %s" % trigger_name) + else: + return trigger_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get trigger '%s': %s" % (trigger_name, e)) + + def get_discovery_rule_by_discovery_rule_name(self, discovery_rule_name): + """Get discovery rule by discovery rule name + + Args: + discovery_rule_name: discovery rule name. + + Returns: + discovery rule matching discovery rule name + + """ + try: + discovery_rule_list = self._zapi.drule.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'name': [discovery_rule_name]} + }) + if len(discovery_rule_list) < 1: + self._module.fail_json(msg="Discovery rule not found: %s" % discovery_rule_name) + else: + return discovery_rule_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get discovery rule '%s': %s" % (discovery_rule_name, e)) + + def get_discovery_check_by_discovery_check_name(self, discovery_check_name): + """Get discovery check by discovery check name + + Args: + discovery_check_name: discovery check name. + + Returns: + discovery check matching discovery check name + + """ + try: + discovery_check_list = self._zapi.dcheck.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'name': [discovery_check_name]} + }) + if len(discovery_check_list) < 1: + self._module.fail_json(msg="Discovery check not found: %s" % discovery_check_name) + else: + return discovery_check_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get discovery check '%s': %s" % (discovery_check_name, e)) + + def get_proxy_by_proxy_name(self, proxy_name): + """Get proxy by proxy name + + Args: + proxy_name: proxy name. + + Returns: + proxy matching proxy name + + """ + try: + proxy_list = self._zapi.proxy.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'host': [proxy_name]} + }) + if len(proxy_list) < 1: + self._module.fail_json(msg="Proxy not found: %s" % proxy_name) + else: + return proxy_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get proxy '%s': %s" % (proxy_name, e)) + + def get_mediatype_by_mediatype_name(self, mediatype_name): + """Get mediatype by mediatype name + + Args: + mediatype_name: mediatype name + + Returns: + mediatype matching mediatype name + + """ + try: + if str(mediatype_name).lower() == 'all': + return '0' + mediatype_list = self._zapi.mediatype.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'description': [mediatype_name]} + }) + if len(mediatype_list) < 1: + self._module.fail_json(msg="Media type not found: %s" % mediatype_name) + else: + return mediatype_list[0]['mediatypeid'] + except Exception as e: + self._module.fail_json(msg="Failed to get mediatype '%s': %s" % (mediatype_name, e)) + + def get_user_by_user_name(self, user_name): + """Get user by user name + + Args: + user_name: user name + + Returns: + user matching user name + + """ + try: + user_list = self._zapi.user.get({ + 'output': 'extend', + 'selectInventory': + 'extend', 'filter': {'alias': [user_name]} + }) + if len(user_list) < 1: + self._module.fail_json(msg="User not found: %s" % user_name) + else: + return user_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get user '%s': %s" % (user_name, e)) + + def get_usergroup_by_usergroup_name(self, usergroup_name): + """Get usergroup by usergroup name + + Args: + usergroup_name: usergroup name + + Returns: + usergroup matching usergroup name + + """ + try: + usergroup_list = self._zapi.usergroup.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'name': [usergroup_name]} + }) + if len(usergroup_list) < 1: + self._module.fail_json(msg="User group not found: %s" % usergroup_name) + else: + return usergroup_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get user group '%s': %s" % (usergroup_name, e)) + + # get script by script name + def get_script_by_script_name(self, script_name): + """Get script by script name + + Args: + script_name: script name + + Returns: + script matching script name + + """ + try: + if script_name is None: + return {} + script_list = self._zapi.script.get({ + 'output': 'extend', + 'selectInventory': 'extend', + 'filter': {'name': [script_name]} + }) + if len(script_list) < 1: + self._module.fail_json(msg="Script not found: %s" % script_name) + else: + return script_list[0] + except Exception as e: + self._module.fail_json(msg="Failed to get script '%s': %s" % (script_name, e)) + + +class Action(object): + """ + Restructures the user defined action data to fit the Zabbix API requirements + """ + def __init__(self, module, zbx, zapi_wrapper): + self._module = module + self._zapi = zbx + self._zapi_wrapper = zapi_wrapper + + def _construct_parameters(self, **kwargs): + """Construct parameters. + + Args: + **kwargs: Arbitrary keyword parameters. + + Returns: + dict: dictionary of specified parameters + """ + + _params = { + 'name': kwargs['name'], + 'eventsource': to_numeric_value([ + 'trigger', + 'discovery', + 'auto_registration', + 'internal'], kwargs['event_source']), + 'esc_period': kwargs.get('esc_period'), + 'filter': kwargs['conditions'], + 'def_longdata': kwargs['default_message'], + 'def_shortdata': kwargs['default_subject'], + 'r_longdata': kwargs['recovery_default_message'], + 'r_shortdata': kwargs['recovery_default_subject'], + 'ack_longdata': kwargs['acknowledge_default_message'], + 'ack_shortdata': kwargs['acknowledge_default_subject'], + 'operations': kwargs['operations'], + 'recovery_operations': kwargs.get('recovery_operations'), + 'acknowledge_operations': kwargs.get('acknowledge_operations'), + 'status': to_numeric_value([ + 'enabled', + 'disabled'], kwargs['status']) + } + if kwargs['event_source'] == 'trigger': + if float(self._zapi.api_version().rsplit('.', 1)[0]) >= 4.0: + _params['pause_suppressed'] = '1' if kwargs['pause_in_maintenance'] else '0' + else: + _params['maintenance_mode'] = '1' if kwargs['pause_in_maintenance'] else '0' + + return _params + + def check_difference(self, **kwargs): + """Check difference between action and user specified parameters. + + Args: + **kwargs: Arbitrary keyword parameters. + + Returns: + dict: dictionary of differences + """ + existing_action = convert_unicode_to_str(self._zapi_wrapper.check_if_action_exists(kwargs['name'])[0]) + parameters = convert_unicode_to_str(self._construct_parameters(**kwargs)) + change_parameters = {} + _diff = cleanup_data(compare_dictionaries(parameters, existing_action, change_parameters)) + return _diff + + def update_action(self, **kwargs): + """Update action. + + Args: + **kwargs: Arbitrary keyword parameters. + + Returns: + action: updated action + """ + try: + if self._module.check_mode: + self._module.exit_json(msg="Action would be updated if check mode was not specified: %s" % kwargs, changed=True) + kwargs['actionid'] = kwargs.pop('action_id') + return self._zapi.action.update(kwargs) + except Exception as e: + self._module.fail_json(msg="Failed to update action '%s': %s" % (kwargs['actionid'], e)) + + def add_action(self, **kwargs): + """Add action. + + Args: + **kwargs: Arbitrary keyword parameters. + + Returns: + action: added action + """ + try: + if self._module.check_mode: + self._module.exit_json(msg="Action would be added if check mode was not specified", changed=True) + parameters = self._construct_parameters(**kwargs) + action_list = self._zapi.action.create(parameters) + return action_list['actionids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create action '%s': %s" % (kwargs['name'], e)) + + def delete_action(self, action_id): + """Delete action. + + Args: + action_id: Action id + + Returns: + action: deleted action + """ + try: + if self._module.check_mode: + self._module.exit_json(msg="Action would be deleted if check mode was not specified", changed=True) + return self._zapi.action.delete([action_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete action '%s': %s" % (action_id, e)) + + +class Operations(object): + """ + Restructures the user defined operation data to fit the Zabbix API requirements + """ + def __init__(self, module, zbx, zapi_wrapper): + self._module = module + # self._zapi = zbx + self._zapi_wrapper = zapi_wrapper + + def _construct_operationtype(self, operation): + """Construct operation type. + + Args: + operation: operation to construct + + Returns: + str: constructed operation + """ + try: + return to_numeric_value([ + "send_message", + "remote_command", + "add_host", + "remove_host", + "add_to_host_group", + "remove_from_host_group", + "link_to_template", + "unlink_from_template", + "enable_host", + "disable_host", + "set_host_inventory_mode"], operation['type'] + ) + except Exception as e: + self._module.fail_json(msg="Unsupported value '%s' for operation type." % operation['type']) + + def _construct_opmessage(self, operation): + """Construct operation message. + + Args: + operation: operation to construct the message + + Returns: + dict: constructed operation message + """ + try: + return { + 'default_msg': '0' if operation.get('message') is not None or operation.get('subject')is not None else '1', + 'mediatypeid': self._zapi_wrapper.get_mediatype_by_mediatype_name( + operation.get('media_type') + ) if operation.get('media_type') is not None else '0', + 'message': operation.get('message'), + 'subject': operation.get('subject'), + } + except Exception as e: + self._module.fail_json(msg="Failed to construct operation message. The error was: %s" % e) + + def _construct_opmessage_usr(self, operation): + """Construct operation message user. + + Args: + operation: operation to construct the message user + + Returns: + list: constructed operation message user or None if operation not found + """ + if operation.get('send_to_users') is None: + return None + return [{ + 'userid': self._zapi_wrapper.get_user_by_user_name(_user)['userid'] + } for _user in operation.get('send_to_users')] + + def _construct_opmessage_grp(self, operation): + """Construct operation message group. + + Args: + operation: operation to construct the message group + + Returns: + list: constructed operation message group or None if operation not found + """ + if operation.get('send_to_groups') is None: + return None + return [{ + 'usrgrpid': self._zapi_wrapper.get_usergroup_by_usergroup_name(_group)['usrgrpid'] + } for _group in operation.get('send_to_groups')] + + def _construct_opcommand(self, operation): + """Construct operation command. + + Args: + operation: operation to construct command + + Returns: + list: constructed operation command + """ + try: + return { + 'type': to_numeric_value([ + 'custom_script', + 'ipmi', + 'ssh', + 'telnet', + 'global_script'], operation.get('command_type', 'custom_script')), + 'command': operation.get('command'), + 'execute_on': to_numeric_value([ + 'agent', + 'server', + 'proxy'], operation.get('execute_on', 'server')), + 'scriptid': self._zapi_wrapper.get_script_by_script_name( + operation.get('script_name') + ).get('scriptid'), + 'authtype': to_numeric_value([ + 'password', + 'private_key' + ], operation.get('ssh_auth_type', 'password')), + 'privatekey': operation.get('ssh_privatekey_file'), + 'publickey': operation.get('ssh_publickey_file'), + 'username': operation.get('username'), + 'password': operation.get('password'), + 'port': operation.get('port') + } + except Exception as e: + self._module.fail_json(msg="Failed to construct operation command. The error was: %s" % e) + + def _construct_opcommand_hst(self, operation): + """Construct operation command host. + + Args: + operation: operation to construct command host + + Returns: + list: constructed operation command host + """ + if operation.get('run_on_hosts') is None: + return None + return [{ + 'hostid': self._zapi_wrapper.get_host_by_host_name(_host)['hostid'] + } if str(_host) != '0' else {'hostid': '0'} for _host in operation.get('run_on_hosts')] + + def _construct_opcommand_grp(self, operation): + """Construct operation command group. + + Args: + operation: operation to construct command group + + Returns: + list: constructed operation command group + """ + if operation.get('run_on_groups') is None: + return None + return [{ + 'groupid': self._zapi_wrapper.get_hostgroup_by_hostgroup_name(_group)['hostid'] + } for _group in operation.get('run_on_groups')] + + def _construct_opgroup(self, operation): + """Construct operation group. + + Args: + operation: operation to construct group + + Returns: + list: constructed operation group + """ + return [{ + 'groupid': self._zapi_wrapper.get_hostgroup_by_hostgroup_name(_group)['groupid'] + } for _group in operation.get('host_groups', [])] + + def _construct_optemplate(self, operation): + """Construct operation template. + + Args: + operation: operation to construct template + + Returns: + list: constructed operation template + """ + return [{ + 'templateid': self._zapi_wrapper.get_template_by_template_name(_template)['templateid'] + } for _template in operation.get('templates', [])] + + def _construct_opinventory(self, operation): + """Construct operation inventory. + + Args: + operation: operation to construct inventory + + Returns: + dict: constructed operation inventory + """ + return {'inventory_mode': operation.get('inventory')} + + def _construct_opconditions(self, operation): + """Construct operation conditions. + + Args: + operation: operation to construct the conditions + + Returns: + list: constructed operation conditions + """ + _opcond = operation.get('operation_condition') + if _opcond is not None: + if _opcond == 'acknowledged': + _value = '1' + elif _opcond == 'not_acknowledged': + _value = '0' + return [{ + 'conditiontype': '14', + 'operator': '0', + 'value': _value + }] + return [] + + def construct_the_data(self, operations): + """Construct the operation data using helper methods. + + Args: + operation: operation to construct + + Returns: + list: constructed operation data + """ + constructed_data = [] + for op in operations: + operation_type = self._construct_operationtype(op) + constructed_operation = { + 'operationtype': operation_type, + 'esc_period': op.get('esc_period'), + 'esc_step_from': op.get('esc_step_from'), + 'esc_step_to': op.get('esc_step_to') + } + # Send Message type + if constructed_operation['operationtype'] == '0': + constructed_operation['opmessage'] = self._construct_opmessage(op) + constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op) + constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op) + constructed_operation['opconditions'] = self._construct_opconditions(op) + + # Send Command type + if constructed_operation['operationtype'] == '1': + constructed_operation['opcommand'] = self._construct_opcommand(op) + constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op) + constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op) + constructed_operation['opconditions'] = self._construct_opconditions(op) + + # Add to/Remove from host group + if constructed_operation['operationtype'] in ('4', '5'): + constructed_operation['opgroup'] = self._construct_opgroup(op) + + # Link/Unlink template + if constructed_operation['operationtype'] in ('6', '7'): + constructed_operation['optemplate'] = self._construct_optemplate(op) + + # Set inventory mode + if constructed_operation['operationtype'] == '10': + constructed_operation['opinventory'] = self._construct_opinventory(op) + + constructed_data.append(constructed_operation) + + return cleanup_data(constructed_data) + + +class RecoveryOperations(Operations): + """ + Restructures the user defined recovery operations data to fit the Zabbix API requirements + """ + def _construct_operationtype(self, operation): + """Construct operation type. + + Args: + operation: operation to construct type + + Returns: + str: constructed operation type + """ + try: + return to_numeric_value([ + "send_message", + "remote_command", + None, + None, + None, + None, + None, + None, + None, + None, + None, + "notify_all_involved"], operation['type'] + ) + except Exception as e: + self._module.fail_json(msg="Unsupported value '%s' for recovery operation type." % operation['type']) + + def construct_the_data(self, operations): + """Construct the recovery operations data using helper methods. + + Args: + operation: operation to construct + + Returns: + list: constructed recovery operations data + """ + constructed_data = [] + for op in operations: + operation_type = self._construct_operationtype(op) + constructed_operation = { + 'operationtype': operation_type, + } + + # Send Message type + if constructed_operation['operationtype'] in ('0', '11'): + constructed_operation['opmessage'] = self._construct_opmessage(op) + constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op) + constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op) + + # Send Command type + if constructed_operation['operationtype'] == '1': + constructed_operation['opcommand'] = self._construct_opcommand(op) + constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op) + constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op) + + constructed_data.append(constructed_operation) + + return cleanup_data(constructed_data) + + +class AcknowledgeOperations(Operations): + """ + Restructures the user defined acknowledge operations data to fit the Zabbix API requirements + """ + def _construct_operationtype(self, operation): + """Construct operation type. + + Args: + operation: operation to construct type + + Returns: + str: constructed operation type + """ + try: + return to_numeric_value([ + "send_message", + "remote_command", + None, + None, + None, + None, + None, + None, + None, + None, + None, + None, + "notify_all_involved"], operation['type'] + ) + except Exception as e: + self._module.fail_json(msg="Unsupported value '%s' for acknowledge operation type." % operation['type']) + + def construct_the_data(self, operations): + """Construct the acknowledge operations data using helper methods. + + Args: + operation: operation to construct + + Returns: + list: constructed acknowledge operations data + """ + constructed_data = [] + for op in operations: + operation_type = self._construct_operationtype(op) + constructed_operation = { + 'operationtype': operation_type, + } + + # Send Message type + if constructed_operation['operationtype'] in ('0', '11'): + constructed_operation['opmessage'] = self._construct_opmessage(op) + constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op) + constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op) + + # Send Command type + if constructed_operation['operationtype'] == '1': + constructed_operation['opcommand'] = self._construct_opcommand(op) + constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op) + constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op) + + constructed_data.append(constructed_operation) + + return cleanup_data(constructed_data) + + +class Filter(object): + """ + Restructures the user defined filter conditions to fit the Zabbix API requirements + """ + def __init__(self, module, zbx, zapi_wrapper): + self._module = module + self._zapi = zbx + self._zapi_wrapper = zapi_wrapper + + def _construct_evaltype(self, _eval_type, _formula, _conditions): + """Construct the eval type + + Args: + _formula: zabbix condition evaluation formula + _conditions: list of conditions to check + + Returns: + dict: constructed acknowledge operations data + """ + if len(_conditions) <= 1: + return { + 'evaltype': '0', + 'formula': None + } + if _eval_type == 'andor': + return { + 'evaltype': '0', + 'formula': None + } + if _eval_type == 'and': + return { + 'evaltype': '1', + 'formula': None + } + if _eval_type == 'or': + return { + 'evaltype': '2', + 'formula': None + } + if _eval_type == 'custom_expression': + if _formula is not None: + return { + 'evaltype': '3', + 'formula': _formula + } + else: + self._module.fail_json(msg="'formula' is required when 'eval_type' is set to 'custom_expression'") + if _formula is not None: + return { + 'evaltype': '3', + 'formula': _formula + } + return { + 'evaltype': '0', + 'formula': None + } + + def _construct_conditiontype(self, _condition): + """Construct the condition type + + Args: + _condition: condition to check + + Returns: + str: constructed condition type data + """ + try: + return to_numeric_value([ + "host_group", + "host", + "trigger", + "trigger_name", + "trigger_severity", + "trigger_value", + "time_period", + "host_ip", + "discovered_service_type", + "discovered_service_port", + "discovery_status", + "uptime_or_downtime_duration", + "received_value", + "host_template", + None, + "application", + "maintenance_status", + None, + "discovery_rule", + "discovery_check", + "proxy", + "discovery_object", + "host_name", + "event_type", + "host_metadata", + "event_tag", + "event_tag_value"], _condition['type'] + ) + except Exception as e: + self._module.fail_json(msg="Unsupported value '%s' for condition type." % _condition['type']) + + def _construct_operator(self, _condition): + """Construct operator + + Args: + _condition: condition to construct + + Returns: + str: constructed operator + """ + try: + return to_numeric_value([ + "=", + "<>", + "like", + "not like", + "in", + ">=", + "<=", + "not in", + "matches", + "does not match", + "Yes", + "No"], _condition['operator'] + ) + except Exception as e: + self._module.fail_json(msg="Unsupported value '%s' for operator." % _condition['operator']) + + def _construct_value(self, conditiontype, value): + """Construct operator + + Args: + conditiontype: type of condition to construct + value: value to construct + + Returns: + str: constructed value + """ + try: + # Host group + if conditiontype == '0': + return self._zapi_wrapper.get_hostgroup_by_hostgroup_name(value)['groupid'] + # Host + if conditiontype == '1': + return self._zapi_wrapper.get_host_by_host_name(value)['hostid'] + # Trigger + if conditiontype == '2': + return self._zapi_wrapper.get_trigger_by_trigger_name(value)['triggerid'] + # Trigger name: return as is + # Trigger severity + if conditiontype == '4': + return to_numeric_value([ + "not classified", + "information", + "warning", + "average", + "high", + "disaster"], value or "not classified" + ) + + # Trigger value + if conditiontype == '5': + return to_numeric_value([ + "ok", + "problem"], value or "ok" + ) + # Time period: return as is + # Host IP: return as is + # Discovered service type + if conditiontype == '8': + return to_numeric_value([ + "SSH", + "LDAP", + "SMTP", + "FTP", + "HTTP", + "POP", + "NNTP", + "IMAP", + "TCP", + "Zabbix agent", + "SNMPv1 agent", + "SNMPv2 agent", + "ICMP ping", + "SNMPv3 agent", + "HTTPS", + "Telnet"], value + ) + # Discovered service port: return as is + # Discovery status + if conditiontype == '10': + return to_numeric_value([ + "up", + "down", + "discovered", + "lost"], value + ) + if conditiontype == '13': + return self._zapi_wrapper.get_template_by_template_name(value)['templateid'] + if conditiontype == '18': + return self._zapi_wrapper.get_discovery_rule_by_discovery_rule_name(value)['druleid'] + if conditiontype == '19': + return self._zapi_wrapper.get_discovery_check_by_discovery_check_name(value)['dcheckid'] + if conditiontype == '20': + return self._zapi_wrapper.get_proxy_by_proxy_name(value)['proxyid'] + if conditiontype == '21': + return to_numeric_value([ + "pchldrfor0", + "host", + "service"], value + ) + if conditiontype == '23': + return to_numeric_value([ + "item in not supported state", + "item in normal state", + "LLD rule in not supported state", + "LLD rule in normal state", + "trigger in unknown state", + "trigger in normal state"], value + ) + return value + except Exception as e: + self._module.fail_json( + msg="""Unsupported value '%s' for specified condition type. + Check out Zabbix API documentation for supported values for + condition type '%s' at + https://www.zabbix.com/documentation/3.4/manual/api/reference/action/object#action_filter_condition""" % (value, conditiontype) + ) + + def construct_the_data(self, _eval_type, _formula, _conditions): + """Construct the user defined filter conditions to fit the Zabbix API + requirements operations data using helper methods. + + Args: + _formula: zabbix condition evaluation formula + _conditions: conditions to construct + + Returns: + dict: user defined filter conditions + """ + if _conditions is None: + return None + constructed_data = {} + constructed_data['conditions'] = [] + for cond in _conditions: + condition_type = self._construct_conditiontype(cond) + constructed_data['conditions'].append({ + "conditiontype": condition_type, + "value": self._construct_value(condition_type, cond.get("value")), + "value2": cond.get("value2"), + "formulaid": cond.get("formulaid"), + "operator": self._construct_operator(cond) + }) + _constructed_evaltype = self._construct_evaltype( + _eval_type, + _formula, + constructed_data['conditions'] + ) + constructed_data['evaltype'] = _constructed_evaltype['evaltype'] + constructed_data['formula'] = _constructed_evaltype['formula'] + return cleanup_data(constructed_data) + + +def convert_unicode_to_str(data): + """Converts unicode objects to strings in dictionary + args: + data: unicode object + + Returns: + dict: strings in dictionary + """ + if isinstance(data, dict): + return dict(map(convert_unicode_to_str, data.items())) + elif isinstance(data, (list, tuple, set)): + return type(data)(map(convert_unicode_to_str, data)) + elif data is None: + return data + else: + return str(data) + + +def to_numeric_value(strs, value): + """Converts string values to integers + Args: + value: string value + + Returns: + int: converted integer + """ + strs = [s.lower() if isinstance(s, str) else s for s in strs] + value = value.lower() + tmp_dict = dict(zip(strs, list(range(len(strs))))) + return str(tmp_dict[value]) + + +def compare_lists(l1, l2, diff_dict): + """ + Compares l1 and l2 lists and adds the items that are different + to the diff_dict dictionary. + Used in recursion with compare_dictionaries() function. + Args: + l1: first list to compare + l2: second list to compare + diff_dict: dictionary to store the difference + + Returns: + dict: items that are different + """ + if len(l1) != len(l2): + diff_dict.append(l1) + return diff_dict + for i, item in enumerate(l1): + if isinstance(item, dict): + diff_dict.insert(i, {}) + diff_dict[i] = compare_dictionaries(item, l2[i], diff_dict[i]) + else: + if item != l2[i]: + diff_dict.append(item) + while {} in diff_dict: + diff_dict.remove({}) + return diff_dict + + +def compare_dictionaries(d1, d2, diff_dict): + """ + Compares d1 and d2 dictionaries and adds the items that are different + to the diff_dict dictionary. + Used in recursion with compare_lists() function. + Args: + d1: first dictionary to compare + d2: second dictionary to compare + diff_dict: dictionary to store the difference + + Returns: + dict: items that are different + """ + for k, v in d1.items(): + if k not in d2: + diff_dict[k] = v + continue + if isinstance(v, dict): + diff_dict[k] = {} + compare_dictionaries(v, d2[k], diff_dict[k]) + if diff_dict[k] == {}: + del diff_dict[k] + else: + diff_dict[k] = v + elif isinstance(v, list): + diff_dict[k] = [] + compare_lists(v, d2[k], diff_dict[k]) + if diff_dict[k] == []: + del diff_dict[k] + else: + diff_dict[k] = v + else: + if v != d2[k]: + diff_dict[k] = v + return diff_dict + + +def cleanup_data(obj): + """Removes the None values from the object and returns the object + Args: + obj: object to cleanup + + Returns: + object: cleaned object + """ + if isinstance(obj, (list, tuple, set)): + return type(obj)(cleanup_data(x) for x in obj if x is not None) + elif isinstance(obj, dict): + return type(obj)((cleanup_data(k), cleanup_data(v)) + for k, v in obj.items() if k is not None and v is not None) + else: + return obj + + +def main(): + """Main ansible module function + """ + + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + esc_period=dict(type='str', required=False), + timeout=dict(type='int', default=10), + name=dict(type='str', required=True), + event_source=dict(type='str', required=False, choices=['trigger', 'discovery', 'auto_registration', 'internal']), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), + status=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']), + pause_in_maintenance=dict(type='bool', required=False, default=True), + default_message=dict(type='str', required=False, default=''), + default_subject=dict(type='str', required=False, default=''), + recovery_default_message=dict(type='str', required=False, default=''), + recovery_default_subject=dict(type='str', required=False, default=''), + acknowledge_default_message=dict(type='str', required=False, default=''), + acknowledge_default_subject=dict(type='str', required=False, default=''), + conditions=dict( + type='list', + required=False, + default=[], + elements='dict', + options=dict( + formulaid=dict(type='str', required=False), + operator=dict(type='str', required=True), + type=dict(type='str', required=True), + value=dict(type='str', required=True), + value2=dict(type='str', required=False) + ) + ), + formula=dict(type='str', required=False, default=None), + eval_type=dict(type='str', required=False, default=None, choices=['andor', 'and', 'or', 'custom_expression']), + operations=dict( + type='list', + required=False, + default=[], + elements='dict', + options=dict( + type=dict( + type='str', + required=True, + choices=[ + 'send_message', + 'remote_command', + 'add_host', + 'remove_host', + 'add_to_host_group', + 'remove_from_host_group', + 'link_to_template', + 'unlink_from_template', + 'enable_host', + 'disable_host', + 'set_host_inventory_mode', + ] + ), + esc_period=dict(type='str', required=False), + esc_step_from=dict(type='int', required=False, default=1), + esc_step_to=dict(type='int', required=False, default=1), + operation_condition=dict( + type='str', + required=False, + default=None, + choices=['acknowledged', 'not_acknowledged'] + ), + # when type is remote_command + command_type=dict( + type='str', + required=False, + choices=[ + 'custom_script', + 'ipmi', + 'ssh', + 'telnet', + 'global_script' + ] + ), + command=dict(type='str', required=False), + execute_on=dict( + type='str', + required=False, + choices=['agent', 'server', 'proxy'] + ), + password=dict(type='str', required=False), + port=dict(type='int', required=False), + run_on_groups=dict(type='list', required=False), + run_on_hosts=dict(type='list', required=False), + script_name=dict(type='str', required=False), + ssh_auth_type=dict( + type='str', + required=False, + default='password', + choices=['password', 'public_key'] + ), + ssh_privatekey_file=dict(type='str', required=False), + ssh_publickey_file=dict(type='str', required=False), + username=dict(type='str', required=False), + # when type is send_message + media_type=dict(type='str', required=False), + subject=dict(type='str', required=False), + message=dict(type='str', required=False), + send_to_groups=dict(type='list', required=False), + send_to_users=dict(type='list', required=False), + # when type is add_to_host_group or remove_from_host_group + host_groups=dict(type='list', required=False), + # when type is set_host_inventory_mode + inventory=dict(type='str', required=False), + # when type is link_to_template or unlink_from_template + templates=dict(type='list', required=False) + ), + required_if=[ + ['type', 'remote_command', ['command_type']], + ['type', 'remote_command', ['run_on_groups', 'run_on_hosts'], True], + ['command_type', 'custom_script', [ + 'command', + 'execute_on' + ]], + ['command_type', 'ipmi', ['command']], + ['command_type', 'ssh', [ + 'command', + 'password', + 'username', + 'port', + 'ssh_auth_type', + 'ssh_privatekey_file', + 'ssh_publickey_file' + ]], + ['command_type', 'telnet', [ + 'command', + 'password', + 'username', + 'port' + ]], + ['command_type', 'global_script', ['script_name']], + ['type', 'add_to_host_group', ['host_groups']], + ['type', 'remove_from_host_group', ['host_groups']], + ['type', 'link_to_template', ['templates']], + ['type', 'unlink_from_template', ['templates']], + ['type', 'set_host_inventory_mode', ['inventory']], + ['type', 'send_message', ['send_to_users', 'send_to_groups'], True] + ] + ), + recovery_operations=dict( + type='list', + required=False, + default=[], + elements='dict', + options=dict( + type=dict( + type='str', + required=True, + choices=[ + 'send_message', + 'remote_command', + 'notify_all_involved' + ] + ), + # when type is remote_command + command_type=dict( + type='str', + required=False, + choices=[ + 'custom_script', + 'ipmi', + 'ssh', + 'telnet', + 'global_script' + ] + ), + command=dict(type='str', required=False), + execute_on=dict( + type='str', + required=False, + choices=['agent', 'server', 'proxy'] + ), + password=dict(type='str', required=False), + port=dict(type='int', required=False), + run_on_groups=dict(type='list', required=False), + run_on_hosts=dict(type='list', required=False), + script_name=dict(type='str', required=False), + ssh_auth_type=dict( + type='str', + required=False, + default='password', + choices=['password', 'public_key'] + ), + ssh_privatekey_file=dict(type='str', required=False), + ssh_publickey_file=dict(type='str', required=False), + username=dict(type='str', required=False), + # when type is send_message + media_type=dict(type='str', required=False), + subject=dict(type='str', required=False), + message=dict(type='str', required=False), + send_to_groups=dict(type='list', required=False), + send_to_users=dict(type='list', required=False), + ), + required_if=[ + ['type', 'remote_command', ['command_type']], + ['type', 'remote_command', [ + 'run_on_groups', + 'run_on_hosts' + ], True], + ['command_type', 'custom_script', [ + 'command', + 'execute_on' + ]], + ['command_type', 'ipmi', ['command']], + ['command_type', 'ssh', [ + 'command', + 'password', + 'username', + 'port', + 'ssh_auth_type', + 'ssh_privatekey_file', + 'ssh_publickey_file' + ]], + ['command_type', 'telnet', [ + 'command', + 'password', + 'username', + 'port' + ]], + ['command_type', 'global_script', ['script_name']], + ['type', 'send_message', ['send_to_users', 'send_to_groups'], True] + ] + ), + acknowledge_operations=dict( + type='list', + required=False, + default=[], + elements='dict', + options=dict( + type=dict( + type='str', + required=True, + choices=[ + 'send_message', + 'remote_command', + 'notify_all_involved' + ] + ), + # when type is remote_command + command_type=dict( + type='str', + required=False, + choices=[ + 'custom_script', + 'ipmi', + 'ssh', + 'telnet', + 'global_script' + ] + ), + command=dict(type='str', required=False), + execute_on=dict( + type='str', + required=False, + choices=['agent', 'server', 'proxy'] + ), + password=dict(type='str', required=False), + port=dict(type='int', required=False), + run_on_groups=dict(type='list', required=False), + run_on_hosts=dict(type='list', required=False), + script_name=dict(type='str', required=False), + ssh_auth_type=dict( + type='str', + required=False, + default='password', + choices=['password', 'public_key'] + ), + ssh_privatekey_file=dict(type='str', required=False), + ssh_publickey_file=dict(type='str', required=False), + username=dict(type='str', required=False), + # when type is send_message + media_type=dict(type='str', required=False), + subject=dict(type='str', required=False), + message=dict(type='str', required=False), + send_to_groups=dict(type='list', required=False), + send_to_users=dict(type='list', required=False), + ), + required_if=[ + ['type', 'remote_command', ['command_type']], + ['type', 'remote_command', [ + 'run_on_groups', + 'run_on_hosts' + ], True], + ['command_type', 'custom_script', [ + 'command', + 'execute_on' + ]], + ['command_type', 'ipmi', ['command']], + ['command_type', 'ssh', [ + 'command', + 'password', + 'username', + 'port', + 'ssh_auth_type', + 'ssh_privatekey_file', + 'ssh_publickey_file' + ]], + ['command_type', 'telnet', [ + 'command', + 'password', + 'username', + 'port' + ]], + ['command_type', 'global_script', ['script_name']], + ['type', 'send_message', ['send_to_users', 'send_to_groups'], True] + ] + ) + ), + required_if=[ + ['state', 'present', [ + 'esc_period', + 'event_source' + ]] + ], + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + timeout = module.params['timeout'] + name = module.params['name'] + esc_period = module.params['esc_period'] + event_source = module.params['event_source'] + state = module.params['state'] + status = module.params['status'] + pause_in_maintenance = module.params['pause_in_maintenance'] + default_message = module.params['default_message'] + default_subject = module.params['default_subject'] + recovery_default_message = module.params['recovery_default_message'] + recovery_default_subject = module.params['recovery_default_subject'] + acknowledge_default_message = module.params['acknowledge_default_message'] + acknowledge_default_subject = module.params['acknowledge_default_subject'] + conditions = module.params['conditions'] + formula = module.params['formula'] + eval_type = module.params['eval_type'] + operations = module.params['operations'] + recovery_operations = module.params['recovery_operations'] + acknowledge_operations = module.params['acknowledge_operations'] + + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, + passwd=http_login_password, validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + zapi_wrapper = Zapi(module, zbx) + + action = Action(module, zbx, zapi_wrapper) + + action_exists = zapi_wrapper.check_if_action_exists(name) + ops = Operations(module, zbx, zapi_wrapper) + recovery_ops = RecoveryOperations(module, zbx, zapi_wrapper) + acknowledge_ops = AcknowledgeOperations(module, zbx, zapi_wrapper) + fltr = Filter(module, zbx, zapi_wrapper) + + if action_exists: + action_id = zapi_wrapper.get_action_by_name(name)['actionid'] + if state == "absent": + result = action.delete_action(action_id) + module.exit_json(changed=True, msg="Action Deleted: %s, ID: %s" % (name, result)) + else: + difference = action.check_difference( + action_id=action_id, + name=name, + event_source=event_source, + esc_period=esc_period, + status=status, + pause_in_maintenance=pause_in_maintenance, + default_message=default_message, + default_subject=default_subject, + recovery_default_message=recovery_default_message, + recovery_default_subject=recovery_default_subject, + acknowledge_default_message=acknowledge_default_message, + acknowledge_default_subject=acknowledge_default_subject, + operations=ops.construct_the_data(operations), + recovery_operations=recovery_ops.construct_the_data(recovery_operations), + acknowledge_operations=acknowledge_ops.construct_the_data(acknowledge_operations), + conditions=fltr.construct_the_data(eval_type, formula, conditions) + ) + + if difference == {}: + module.exit_json(changed=False, msg="Action is up to date: %s" % (name)) + else: + result = action.update_action( + action_id=action_id, + **difference + ) + module.exit_json(changed=True, msg="Action Updated: %s, ID: %s" % (name, result)) + else: + if state == "absent": + module.exit_json(changed=False) + else: + action_id = action.add_action( + name=name, + event_source=event_source, + esc_period=esc_period, + status=status, + pause_in_maintenance=pause_in_maintenance, + default_message=default_message, + default_subject=default_subject, + recovery_default_message=recovery_default_message, + recovery_default_subject=recovery_default_subject, + acknowledge_default_message=acknowledge_default_message, + acknowledge_default_subject=acknowledge_default_subject, + operations=ops.construct_the_data(operations), + recovery_operations=recovery_ops.construct_the_data(recovery_operations), + acknowledge_operations=acknowledge_ops.construct_the_data(acknowledge_operations), + conditions=fltr.construct_the_data(eval_type, formula, conditions) + ) + module.exit_json(changed=True, msg="Action created: %s, ID: %s" % (name, action_id)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_group.py b/plugins/modules/monitoring/zabbix/zabbix_group.py new file mode 100644 index 0000000000..9f984e6ec7 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_group.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2013-2014, Epic Games, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_group +short_description: Create/delete Zabbix host groups +description: + - Create host groups if they do not exist. + - Delete existing host groups if they exist. +author: + - "Cove (@cove)" + - "Tony Minfei Ding (!UNKNOWN)" + - "Harrison Gu (@harrisongu)" +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + state: + description: + - Create or delete host group. + required: false + type: str + default: "present" + choices: [ "present", "absent" ] + host_groups: + description: + - List of host groups to create or delete. + required: true + type: list + elements: str + aliases: [ "host_group" ] + +extends_documentation_fragment: +- community.general.zabbix + + +notes: + - Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed. +''' + +EXAMPLES = r''' +# Base create host groups example +- name: Create host groups + local_action: + module: zabbix_group + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + host_groups: + - Example group1 + - Example group2 + +# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurrent updates +- name: Create host groups + local_action: + module: zabbix_group + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + host_groups: + - Example group1 + - Example group2 + when: inventory_hostname==groups['group_name'][0] +''' + + +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + from zabbix_api import Already_Exists + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class HostGroup(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # create host group(s) if not exists + def create_host_group(self, group_names): + try: + group_add_list = [] + for group_name in group_names: + result = self._zapi.hostgroup.get({'filter': {'name': group_name}}) + if not result: + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.hostgroup.create({'name': group_name}) + group_add_list.append(group_name) + except Already_Exists: + return group_add_list + return group_add_list + except Exception as e: + self._module.fail_json(msg="Failed to create host group(s): %s" % e) + + # delete host group(s) + def delete_host_group(self, group_ids): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.hostgroup.delete(group_ids) + except Exception as e: + self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e) + + # get group ids by name + def get_group_ids(self, host_groups): + group_ids = [] + + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append(group_id) + return group_ids, group_list + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + host_groups=dict(type='list', required=True, aliases=['host_group']), + state=dict(type='str', default="present", choices=['present', 'absent']), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + host_groups = module.params['host_groups'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + hostGroup = HostGroup(module, zbx) + + group_ids = [] + group_list = [] + if host_groups: + group_ids, group_list = hostGroup.get_group_ids(host_groups) + + if state == "absent": + # delete host groups + if group_ids: + delete_group_names = [] + hostGroup.delete_host_group(group_ids) + for group in group_list: + delete_group_names.append(group['name']) + module.exit_json(changed=True, + result="Successfully deleted host group(s): %s." % ",".join(delete_group_names)) + else: + module.exit_json(changed=False, result="No host group(s) to delete.") + else: + # create host groups + group_add_list = hostGroup.create_host_group(host_groups) + if len(group_add_list) > 0: + module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_group_facts.py b/plugins/modules/monitoring/zabbix/zabbix_group_facts.py new file mode 120000 index 0000000000..20451bef68 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_group_facts.py @@ -0,0 +1 @@ +zabbix_group_info.py \ No newline at end of file diff --git a/plugins/modules/monitoring/zabbix/zabbix_group_info.py b/plugins/modules/monitoring/zabbix/zabbix_group_info.py new file mode 100644 index 0000000000..6da08d5103 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_group_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) me@mimiko.me +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +RETURN = r''' +--- +host_groups: + description: List of Zabbix groups. + returned: success + type: dict + sample: [ { "flags": "0", "groupid": "33", "internal": "0", "name": "Hostgruup A" } ] +''' + +DOCUMENTATION = r''' +--- +module: zabbix_group_info +short_description: Gather information about Zabbix hostgroup +description: + - This module allows you to search for Zabbix hostgroup entries. + - This module was called C(zabbix_group_facts) before Ansible 2.9. The usage did not change. +author: + - "Michael Miko (@RedWhiteMiko)" +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + hostgroup_name: + description: + - Name of the hostgroup in Zabbix. + - hostgroup is the unique identifier used and cannot be updated using this module. + required: true + type: list + elements: str +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = r''' +- name: Get hostgroup info + local_action: + module: zabbix_group_info + server_url: http://monitor.example.com + login_user: username + login_password: password + hostgroup_name: + - ExampleHostgroup + timeout: 10 +''' + + +import atexit +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + def get_group_ids_by_group_names(self, group_names): + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}}) + if len(group_list) < 1: + self._module.fail_json(msg="Hostgroup not found: %s" % group_names) + return group_list + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + hostgroup_name=dict(type='list', required=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + if module._name == 'zabbix_group_facts': + module.deprecate("The 'zabbix_group_facts' module has been renamed to 'zabbix_group_info'", version='2.13') + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + hostgroup_name = module.params['hostgroup_name'] + timeout = module.params['timeout'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + host_groups = host.get_group_ids_by_group_names(hostgroup_name) + module.exit_json(host_groups=host_groups) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_host.py b/plugins/modules/monitoring/zabbix/zabbix_host.py new file mode 100644 index 0000000000..a6743d9998 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_host.py @@ -0,0 +1,1058 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_host +short_description: Create/update/delete Zabbix hosts +description: + - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. +author: + - "Cove (@cove)" + - Tony Minfei Ding (!UNKNOWN) + - Harrison Gu (@harrisongu) + - Werner Dijkerman (@dj-wasabi) + - Eike Frost (@eikef) +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + host_name: + description: + - Name of the host in Zabbix. + - I(host_name) is the unique identifier used and cannot be updated using this module. + required: true + type: str + visible_name: + description: + - Visible name of the host in Zabbix. + type: str + description: + description: + - Description of the host in Zabbix. + type: str + host_groups: + description: + - List of host groups the host is part of. + type: list + elements: str + link_templates: + description: + - List of templates linked to the host. + type: list + elements: str + inventory_mode: + description: + - Configure the inventory mode. + choices: ['automatic', 'manual', 'disabled'] + type: str + inventory_zabbix: + description: + - Add Facts for a zabbix inventory (e.g. Tag) (see example below). + - Please review the interface documentation for more information on the supported properties + - U(https://www.zabbix.com/documentation/3.2/manual/api/reference/host/object#host_inventory) + type: dict + status: + description: + - Monitoring status of the host. + choices: ['enabled', 'disabled'] + default: 'enabled' + type: str + state: + description: + - State of the host. + - On C(present), it will create if host does not exist or update the host if the associated data is different. + - On C(absent) will remove a host if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + proxy: + description: + - The name of the Zabbix proxy to be used. + type: str + interfaces: + type: list + elements: dict + description: + - List of interfaces to be created for the host (see example below). + - For more information, review host interface documentation at + - U(https://www.zabbix.com/documentation/4.0/manual/api/reference/hostinterface/object) + suboptions: + type: + description: + - Interface type to add + - Numerical values are also accepted for interface type + - 1 = agent + - 2 = snmp + - 3 = ipmi + - 4 = jmx + choices: ['agent', 'snmp', 'ipmi', 'jmx'] + required: true + main: + type: int + description: + - Whether the interface is used as default. + - If multiple interfaces with the same type are provided, only one can be default. + - 0 (not default), 1 (default) + default: 0 + choices: [0, 1] + useip: + type: int + description: + - Connect to host interface with IP address instead of DNS name. + - 0 (don't use ip), 1 (use ip) + default: 0 + choices: [0, 1] + ip: + type: str + description: + - IP address used by host interface. + - Required if I(useip=1). + default: '' + dns: + type: str + description: + - DNS name of the host interface. + - Required if I(useip=0). + default: '' + port: + type: str + description: + - Port used by host interface. + - If not specified, default port for each type of interface is used + - 10050 if I(type='agent') + - 161 if I(type='snmp') + - 623 if I(type='ipmi') + - 12345 if I(type='jmx') + bulk: + type: int + description: + - Whether to use bulk SNMP requests. + - 0 (don't use bulk requests), 1 (use bulk requests) + choices: [0, 1] + default: 1 + default: [] + tls_connect: + description: + - Specifies what encryption to use for outgoing connections. + - Possible values, 1 (no encryption), 2 (PSK), 4 (certificate). + - Works only with >= Zabbix 3.0 + default: 1 + type: int + tls_accept: + description: + - Specifies what types of connections are allowed for incoming connections. + - The tls_accept parameter accepts values of 1 to 7 + - Possible values, 1 (no encryption), 2 (PSK), 4 (certificate). + - Values can be combined. + - Works only with >= Zabbix 3.0 + default: 1 + type: int + tls_psk_identity: + description: + - It is a unique name by which this specific PSK is referred to by Zabbix components + - Do not put sensitive information in the PSK identity string, it is transmitted over the network unencrypted. + - Works only with >= Zabbix 3.0 + type: str + tls_psk: + description: + - PSK value is a hard to guess string of hexadecimal digits. + - The preshared key, at least 32 hex digits. Required if either I(tls_connect) or I(tls_accept) has PSK enabled. + - Works only with >= Zabbix 3.0 + type: str + ca_cert: + description: + - Required certificate issuer. + - Works only with >= Zabbix 3.0 + aliases: [ tls_issuer ] + type: str + tls_subject: + description: + - Required certificate subject. + - Works only with >= Zabbix 3.0 + type: str + ipmi_authtype: + description: + - IPMI authentication algorithm. + - Please review the Host object documentation for more information on the supported properties + - 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object' + - Possible values are, C(0) (none), C(1) (MD2), C(2) (MD5), C(4) (straight), C(5) (OEM), C(6) (RMCP+), + with -1 being the API default. + - Please note that the Zabbix API will treat absent settings as default when updating + any of the I(ipmi_)-options; this means that if you attempt to set any of the four + options individually, the rest will be reset to default values. + type: int + ipmi_privilege: + description: + - IPMI privilege level. + - Please review the Host object documentation for more information on the supported properties + - 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object' + - Possible values are C(1) (callback), C(2) (user), C(3) (operator), C(4) (admin), C(5) (OEM), with C(2) + being the API default. + - also see the last note in the I(ipmi_authtype) documentation + type: int + ipmi_username: + description: + - IPMI username. + - also see the last note in the I(ipmi_authtype) documentation + type: str + ipmi_password: + description: + - IPMI password. + - also see the last note in the I(ipmi_authtype) documentation + type: str + force: + description: + - Overwrite the host configuration, even if already present. + type: bool + default: 'yes' + macros: + description: + - List of user macros to assign to the zabbix host. + - Providing I(macros=[]) with I(force=yes) will clean all of the existing user macros from the host. + type: list + elements: dict + suboptions: + macro: + description: + - Name of the user macro. + - Can be in zabbix native format "{$MACRO}" or short format "MACRO". + type: str + required: true + value: + description: + - Value of the user macro. + type: str + required: true + description: + description: + - Description of the user macro. + - Works only with >= Zabbix 4.4. + type: str + required: false + default: '' + aliases: [ user_macros ] + tags: + description: + - List of host tags to assign to the zabbix host. + - Works only with >= Zabbix 4.2. + - Providing I(tags=[]) with I(force=yes) will clean all of the tags from the host. + type: list + elements: dict + suboptions: + tag: + description: + - Name of the host tag. + type: str + required: true + value: + description: + - Value of the host tag. + type: str + default: '' + aliases: [ host_tags ] + +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = r''' +- name: Create a new host or update an existing host's info + local_action: + module: zabbix_host + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + visible_name: ExampleName + description: My ExampleHost Description + host_groups: + - Example group1 + - Example group2 + link_templates: + - Example template1 + - Example template2 + status: enabled + state: present + inventory_mode: manual + inventory_zabbix: + tag: "{{ your_tag }}" + alias: "{{ your_alias }}" + notes: "Special Informations: {{ your_informations | default('None') }}" + location: "{{ your_location }}" + site_rack: "{{ your_site_rack }}" + os: "{{ your_os }}" + hardware: "{{ your_hardware }}" + ipmi_authtype: 2 + ipmi_privilege: 4 + ipmi_username: username + ipmi_password: password + interfaces: + - type: 1 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: "10050" + - type: 4 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: "12345" + proxy: a.zabbix.proxy + macros: + - macro: '{$EXAMPLEMACRO}' + value: ExampleMacroValue + - macro: EXAMPLEMACRO2 + value: ExampleMacroValue2 + description: Example desc that work only with Zabbix 4.4 and higher + tags: + - tag: ExampleHostsTag + - tag: ExampleHostsTag2 + value: ExampleTagValue + +- name: Update an existing host's TLS settings + local_action: + module: zabbix_host + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + visible_name: ExampleName + host_groups: + - Example group1 + tls_psk_identity: test + tls_connect: 2 + tls_psk: 123456789abcdef123456789abcdef12 +''' + + +import atexit +import copy +import traceback + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + self._zbx_api_version = zbx.api_version()[:5] + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.get({'filter': {'host': host_name}}) + return result + + # check if host group exists + def check_host_group_exist(self, group_names): + for group_name in group_names: + result = self._zapi.hostgroup.get({'filter': {'name': group_name}}) + if not result: + self._module.fail_json(msg="Hostgroup not found: %s" % group_name) + return True + + def get_template_ids(self, template_list): + template_ids = [] + if template_list is None or len(template_list) == 0: + return template_ids + for template in template_list: + template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}}) + if len(template_list) < 1: + self._module.fail_json(msg="Template not found: %s" % template) + else: + template_id = template_list[0]['templateid'] + template_ids.append(template_id) + return template_ids + + def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect, + tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, + ipmi_username, ipmi_password, macros, tags): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status, + 'tls_connect': tls_connect, 'tls_accept': tls_accept} + if proxy_id: + parameters['proxy_hostid'] = proxy_id + if visible_name: + parameters['name'] = visible_name + if tls_psk_identity is not None: + parameters['tls_psk_identity'] = tls_psk_identity + if tls_psk is not None: + parameters['tls_psk'] = tls_psk + if tls_issuer is not None: + parameters['tls_issuer'] = tls_issuer + if tls_subject is not None: + parameters['tls_subject'] = tls_subject + if description: + parameters['description'] = description + if ipmi_authtype is not None: + parameters['ipmi_authtype'] = ipmi_authtype + if ipmi_privilege is not None: + parameters['ipmi_privilege'] = ipmi_privilege + if ipmi_username is not None: + parameters['ipmi_username'] = ipmi_username + if ipmi_password is not None: + parameters['ipmi_password'] = ipmi_password + if macros is not None: + parameters['macros'] = macros + if tags is not None: + parameters['tags'] = tags + + host_list = self._zapi.host.create(parameters) + if len(host_list) >= 1: + return host_list['hostids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) + + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id, + visible_name, description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, + tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password, macros, tags): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'tls_connect': tls_connect, + 'tls_accept': tls_accept} + if proxy_id >= 0: + parameters['proxy_hostid'] = proxy_id + if visible_name: + parameters['name'] = visible_name + if tls_psk_identity: + parameters['tls_psk_identity'] = tls_psk_identity + if tls_psk: + parameters['tls_psk'] = tls_psk + if tls_issuer: + parameters['tls_issuer'] = tls_issuer + if tls_subject: + parameters['tls_subject'] = tls_subject + if description: + parameters['description'] = description + if ipmi_authtype: + parameters['ipmi_authtype'] = ipmi_authtype + if ipmi_privilege: + parameters['ipmi_privilege'] = ipmi_privilege + if ipmi_username: + parameters['ipmi_username'] = ipmi_username + if ipmi_password: + parameters['ipmi_password'] = ipmi_password + if macros is not None: + parameters['macros'] = macros + if tags is not None: + parameters['tags'] = tags + + self._zapi.host.update(parameters) + interface_list_copy = exist_interface_list + if interfaces: + for interface in interfaces: + flag = False + interface_str = interface + for exist_interface in exist_interface_list: + interface_type = int(interface['type']) + exist_interface_type = int(exist_interface['type']) + if interface_type == exist_interface_type: + # update + interface_str['interfaceid'] = exist_interface['interfaceid'] + self._zapi.hostinterface.update(interface_str) + flag = True + interface_list_copy.remove(exist_interface) + break + if not flag: + # add + interface_str['hostid'] = host_id + self._zapi.hostinterface.create(interface_str) + # remove + remove_interface_ids = [] + for remove_interface in interface_list_copy: + interface_id = remove_interface['interfaceid'] + remove_interface_ids.append(interface_id) + if len(remove_interface_ids) > 0: + self._zapi.hostinterface.delete(remove_interface_ids) + except Exception as e: + self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e)) + + def delete_host(self, host_id, host_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.delete([host_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e)) + + # get host by host name + def get_host_by_host_name(self, host_name): + params = { + 'output': 'extend', + 'selectInventory': 'extend', + 'selectMacros': 'extend', + 'filter': { + 'host': [host_name] + } + } + + if LooseVersion(self._zbx_api_version) >= LooseVersion('4.2.0'): + params.update({'selectTags': 'extend'}) + + host_list = self._zapi.host.get(params) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + return host_list[0] + + # get proxyid by proxy name + def get_proxyid_by_proxy_name(self, proxy_name): + proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}}) + if len(proxy_list) < 1: + self._module.fail_json(msg="Proxy not found: %s" % proxy_name) + else: + return int(proxy_list[0]['proxyid']) + + # get group ids by group names + def get_group_ids_by_group_names(self, group_names): + if self.check_host_group_exist(group_names): + return self._zapi.hostgroup.get({'output': 'groupid', 'filter': {'name': group_names}}) + + # get host groups ids by host id + def get_group_ids_by_host_id(self, host_id): + return self._zapi.hostgroup.get({'output': 'groupid', 'hostids': host_id}) + + # get host templates by host id + def get_host_templates_by_host_id(self, host_id): + template_ids = [] + template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id}) + for template in template_list: + template_ids.append(template['templateid']) + return template_ids + + # check the exist_interfaces whether it equals the interfaces or not + def check_interface_properties(self, exist_interface_list, interfaces): + interfaces_port_list = [] + + if interfaces is not None: + if len(interfaces) >= 1: + for interface in interfaces: + interfaces_port_list.append(str(interface['port'])) + + exist_interface_ports = [] + if len(exist_interface_list) >= 1: + for exist_interface in exist_interface_list: + exist_interface_ports.append(str(exist_interface['port'])) + + if set(interfaces_port_list) != set(exist_interface_ports): + return True + + for exist_interface in exist_interface_list: + exit_interface_port = str(exist_interface['port']) + for interface in interfaces: + interface_port = str(interface['port']) + if interface_port == exit_interface_port: + for key in interface.keys(): + if str(exist_interface[key]) != str(interface[key]): + return True + + return False + + # get the status of host by host + def get_host_status_by_host(self, host): + return host['status'] + + # check all the properties before link or clear template + def check_all_properties(self, host_id, group_ids, status, interfaces, template_ids, + exist_interfaces, host, proxy_id, visible_name, description, host_name, + inventory_mode, inventory_zabbix, tls_accept, tls_psk_identity, tls_psk, + tls_issuer, tls_subject, tls_connect, ipmi_authtype, ipmi_privilege, + ipmi_username, ipmi_password, macros, tags): + # get the existing host's groups + exist_host_groups = sorted(self.get_group_ids_by_host_id(host_id), key=lambda k: k['groupid']) + if sorted(group_ids, key=lambda k: k['groupid']) != exist_host_groups: + return True + + # get the existing status + exist_status = self.get_host_status_by_host(host) + if int(status) != int(exist_status): + return True + + # check the exist_interfaces whether it equals the interfaces or not + if self.check_interface_properties(exist_interfaces, interfaces): + return True + + # get the existing templates + exist_template_ids = self.get_host_templates_by_host_id(host_id) + if set(list(template_ids)) != set(exist_template_ids): + return True + + if int(host['proxy_hostid']) != int(proxy_id): + return True + + # Check whether the visible_name has changed; Zabbix defaults to the technical hostname if not set. + if visible_name: + if host['name'] != visible_name: + return True + + # Only compare description if it is given as a module parameter + if description: + if host['description'] != description: + return True + + if inventory_mode: + if LooseVersion(self._zbx_api_version) <= LooseVersion('4.4.0'): + if host['inventory']: + if int(host['inventory']['inventory_mode']) != self.inventory_mode_numeric(inventory_mode): + return True + elif inventory_mode != 'disabled': + return True + else: + if int(host['inventory_mode']) != self.inventory_mode_numeric(inventory_mode): + return True + + if inventory_zabbix: + proposed_inventory = copy.deepcopy(host['inventory']) + proposed_inventory.update(inventory_zabbix) + if proposed_inventory != host['inventory']: + return True + + if tls_accept is not None and 'tls_accept' in host: + if int(host['tls_accept']) != tls_accept: + return True + + if tls_psk_identity is not None and 'tls_psk_identity' in host: + if host['tls_psk_identity'] != tls_psk_identity: + return True + + if tls_psk is not None and 'tls_psk' in host: + if host['tls_psk'] != tls_psk: + return True + + if tls_issuer is not None and 'tls_issuer' in host: + if host['tls_issuer'] != tls_issuer: + return True + + if tls_subject is not None and 'tls_subject' in host: + if host['tls_subject'] != tls_subject: + return True + + if tls_connect is not None and 'tls_connect' in host: + if int(host['tls_connect']) != tls_connect: + return True + if ipmi_authtype is not None: + if int(host['ipmi_authtype']) != ipmi_authtype: + return True + if ipmi_privilege is not None: + if int(host['ipmi_privilege']) != ipmi_privilege: + return True + if ipmi_username is not None: + if host['ipmi_username'] != ipmi_username: + return True + if ipmi_password is not None: + if host['ipmi_password'] != ipmi_password: + return True + + # hostmacroid and hostid are present in every item of host['macros'] and need to be removed + if macros is not None and 'macros' in host: + existing_macros = sorted(host['macros'], key=lambda k: k['macro']) + for macro in existing_macros: + macro.pop('hostid', False) + macro.pop('hostmacroid', False) + + if sorted(macros, key=lambda k: k['macro']) != existing_macros: + return True + + if tags is not None and 'tags' in host: + if sorted(tags, key=lambda k: k['tag']) != sorted(host['tags'], key=lambda k: k['tag']): + return True + + return False + + # link or clear template of the host + def link_or_clear_template(self, host_id, template_id_list, tls_connect, tls_accept, tls_psk_identity, tls_psk, + tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password): + # get host's exist template ids + exist_template_id_list = self.get_host_templates_by_host_id(host_id) + + exist_template_ids = set(exist_template_id_list) + template_ids = set(template_id_list) + template_id_list = list(template_ids) + + # get unlink and clear templates + templates_clear = exist_template_ids.difference(template_ids) + templates_clear_list = list(templates_clear) + request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list, + 'tls_connect': tls_connect, 'tls_accept': tls_accept, 'ipmi_authtype': ipmi_authtype, + 'ipmi_privilege': ipmi_privilege, 'ipmi_username': ipmi_username, 'ipmi_password': ipmi_password} + if tls_psk_identity is not None: + request_str['tls_psk_identity'] = tls_psk_identity + if tls_psk is not None: + request_str['tls_psk'] = tls_psk + if tls_issuer is not None: + request_str['tls_issuer'] = tls_issuer + if tls_subject is not None: + request_str['tls_subject'] = tls_subject + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception as e: + self._module.fail_json(msg="Failed to link template to host: %s" % e) + + def inventory_mode_numeric(self, inventory_mode): + if inventory_mode == "automatic": + return int(1) + elif inventory_mode == "manual": + return int(0) + elif inventory_mode == "disabled": + return int(-1) + return inventory_mode + + # Update the host inventory_mode + def update_inventory_mode(self, host_id, inventory_mode): + + # nothing was set, do nothing + if not inventory_mode: + return + + inventory_mode = self.inventory_mode_numeric(inventory_mode) + + # watch for - https://support.zabbix.com/browse/ZBX-6033 + request_str = {'hostid': host_id, 'inventory_mode': inventory_mode} + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception as e: + self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e) + + def update_inventory_zabbix(self, host_id, inventory): + + if not inventory: + return + + request_str = {'hostid': host_id, 'inventory': inventory} + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception as e: + self._module.fail_json(msg="Failed to set inventory to host: %s" % e) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + host_name=dict(type='str', required=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + host_groups=dict(type='list', required=False), + link_templates=dict(type='list', required=False), + status=dict(type='str', default="enabled", choices=['enabled', 'disabled']), + state=dict(type='str', default="present", choices=['present', 'absent']), + inventory_mode=dict(type='str', required=False, choices=['automatic', 'manual', 'disabled']), + ipmi_authtype=dict(type='int', default=None), + ipmi_privilege=dict(type='int', default=None), + ipmi_username=dict(type='str', required=False, default=None), + ipmi_password=dict(type='str', required=False, default=None, no_log=True), + tls_connect=dict(type='int', default=1), + tls_accept=dict(type='int', default=1), + tls_psk_identity=dict(type='str', required=False), + tls_psk=dict(type='str', required=False), + ca_cert=dict(type='str', required=False, aliases=['tls_issuer']), + tls_subject=dict(type='str', required=False), + inventory_zabbix=dict(type='dict', required=False), + timeout=dict(type='int', default=10), + interfaces=dict(type='list', required=False), + force=dict(type='bool', default=True), + proxy=dict(type='str', required=False), + visible_name=dict(type='str', required=False), + description=dict(type='str', required=False), + macros=dict( + type='list', + elements='dict', + aliases=['user_macros'], + options=dict( + macro=dict(type='str', required=True), + value=dict(type='str', required=True), + description=dict(type='str', required=False, default='') + ) + ), + tags=dict( + type='list', + elements='dict', + aliases=['host_tags'], + options=dict( + tag=dict(type='str', required=True), + value=dict(type='str', default='') + ) + ) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + host_name = module.params['host_name'] + visible_name = module.params['visible_name'] + description = module.params['description'] + host_groups = module.params['host_groups'] + link_templates = module.params['link_templates'] + inventory_mode = module.params['inventory_mode'] + ipmi_authtype = module.params['ipmi_authtype'] + ipmi_privilege = module.params['ipmi_privilege'] + ipmi_username = module.params['ipmi_username'] + ipmi_password = module.params['ipmi_password'] + tls_connect = module.params['tls_connect'] + tls_accept = module.params['tls_accept'] + tls_psk_identity = module.params['tls_psk_identity'] + tls_psk = module.params['tls_psk'] + tls_issuer = module.params['ca_cert'] + tls_subject = module.params['tls_subject'] + inventory_zabbix = module.params['inventory_zabbix'] + status = module.params['status'] + state = module.params['state'] + timeout = module.params['timeout'] + interfaces = module.params['interfaces'] + force = module.params['force'] + proxy = module.params['proxy'] + macros = module.params['macros'] + tags = module.params['tags'] + + # convert enabled to 0; disabled to 1 + status = 1 if status == "disabled" else 0 + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + + template_ids = [] + if link_templates: + template_ids = host.get_template_ids(link_templates) + + group_ids = [] + + if host_groups: + group_ids = host.get_group_ids_by_group_names(host_groups) + + ip = "" + if interfaces: + # ensure interfaces are well-formed + for interface in interfaces: + if 'type' not in interface: + module.fail_json(msg="(interface) type needs to be specified for interface '%s'." % interface) + interfacetypes = {'agent': 1, 'snmp': 2, 'ipmi': 3, 'jmx': 4} + if interface['type'] in interfacetypes.keys(): + interface['type'] = interfacetypes[interface['type']] + if interface['type'] < 1 or interface['type'] > 4: + module.fail_json(msg="Interface type can only be 1-4 for interface '%s'." % interface) + if 'useip' not in interface: + interface['useip'] = 0 + if 'dns' not in interface: + if interface['useip'] == 0: + module.fail_json(msg="dns needs to be set if useip is 0 on interface '%s'." % interface) + interface['dns'] = '' + if 'ip' not in interface: + if interface['useip'] == 1: + module.fail_json(msg="ip needs to be set if useip is 1 on interface '%s'." % interface) + interface['ip'] = '' + if 'main' not in interface: + interface['main'] = 0 + if 'port' in interface and not isinstance(interface['port'], str): + try: + interface['port'] = str(interface['port']) + except ValueError: + module.fail_json(msg="port should be convertable to string on interface '%s'." % interface) + if 'port' not in interface: + if interface['type'] == 1: + interface['port'] = "10050" + elif interface['type'] == 2: + interface['port'] = "161" + elif interface['type'] == 3: + interface['port'] = "623" + elif interface['type'] == 4: + interface['port'] = "12345" + + if interface['type'] == 1: + ip = interface['ip'] + + if macros: + # convert macros to zabbix native format - {$MACRO} + for macro in macros: + macro['macro'] = macro['macro'].upper() + if not macro['macro'].startswith('{$'): + macro['macro'] = '{$' + macro['macro'] + if not macro['macro'].endswith('}'): + macro['macro'] = macro['macro'] + '}' + if LooseVersion(zbx.api_version()[:5]) <= LooseVersion('4.4.0'): + if 'description' in macro: + macro.pop('description', False) + + # Use proxy specified, or set to 0 + if proxy: + proxy_id = host.get_proxyid_by_proxy_name(proxy) + else: + proxy_id = 0 + + # check if host exist + is_host_exist = host.is_host_exist(host_name) + + if is_host_exist: + # get host id by host name + zabbix_host_obj = host.get_host_by_host_name(host_name) + host_id = zabbix_host_obj['hostid'] + + # If proxy is not specified as a module parameter, use the existing setting + if proxy is None: + proxy_id = int(zabbix_host_obj['proxy_hostid']) + + if state == "absent": + # remove host + host.delete_host(host_id, host_name) + module.exit_json(changed=True, result="Successfully delete host %s" % host_name) + else: + if not host_groups: + # if host_groups have not been specified when updating an existing host, just + # get the group_ids from the existing host without updating them. + group_ids = host.get_group_ids_by_host_id(host_id) + + # get existing host's interfaces + exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) + + # if no interfaces were specified with the module, start with an empty list + if not interfaces: + interfaces = [] + + # When force=no is specified, append existing interfaces to interfaces to update. When + # no interfaces have been specified, copy existing interfaces as specified from the API. + # Do the same with templates and host groups. + if not force or not interfaces: + for interface in copy.deepcopy(exist_interfaces): + # remove values not used during hostinterface.add/update calls + for key in tuple(interface.keys()): + if key in ['interfaceid', 'hostid', 'bulk']: + interface.pop(key, None) + + for index in interface.keys(): + if index in ['useip', 'main', 'type']: + interface[index] = int(interface[index]) + + if interface not in interfaces: + interfaces.append(interface) + + if not force or link_templates is None: + template_ids = list(set(template_ids + host.get_host_templates_by_host_id(host_id))) + + if not force: + for group_id in host.get_group_ids_by_host_id(host_id): + if group_id not in group_ids: + group_ids.append(group_id) + + # Macros not present in host.update will be removed if we dont copy them when force=no + if macros is not None and 'macros' in zabbix_host_obj.keys(): + provided_macros = [m['macro'] for m in macros] + existing_macros = zabbix_host_obj['macros'] + for macro in existing_macros: + if macro['macro'] not in provided_macros: + macros.append(macro) + + # Tags not present in host.update will be removed if we dont copy them when force=no + if tags is not None and 'tags' in zabbix_host_obj.keys(): + provided_tags = [t['tag'] for t in tags] + existing_tags = zabbix_host_obj['tags'] + for tag in existing_tags: + if tag['tag'] not in provided_tags: + tags.append(tag) + + # update host + if host.check_all_properties( + host_id, group_ids, status, interfaces, template_ids, exist_interfaces, zabbix_host_obj, proxy_id, + visible_name, description, host_name, inventory_mode, inventory_zabbix, tls_accept, + tls_psk_identity, tls_psk, tls_issuer, tls_subject, tls_connect, ipmi_authtype, ipmi_privilege, + ipmi_username, ipmi_password, macros, tags): + + host.update_host( + host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name, + description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, + ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password, macros, tags) + + host.link_or_clear_template( + host_id, template_ids, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, + tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password) + + host.update_inventory_mode(host_id, inventory_mode) + host.update_inventory_zabbix(host_id, inventory_zabbix) + + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + + else: + if state == "absent": + # the host is already deleted. + module.exit_json(changed=False) + + if not group_ids: + module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name) + + if not interfaces or (interfaces and len(interfaces) == 0): + module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) + + # create host + host_id = host.add_host( + host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect, tls_accept, + tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, + ipmi_password, macros, tags) + + host.link_or_clear_template( + host_id, template_ids, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, + ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password) + + host.update_inventory_mode(host_id, inventory_mode) + host.update_inventory_zabbix(host_id, inventory_zabbix) + + module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( + host_name, ip, link_templates)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_host_events_info.py b/plugins/modules/monitoring/zabbix/zabbix_host_events_info.py new file mode 100644 index 0000000000..74d297337a --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_host_events_info.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) stephane.travassac@fr.clara.net +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +RETURN = ''' +--- +triggers_ok: + description: Host Zabbix Triggers in OK state + returned: On success + type: complex + contains: + comments: + description: Additional description of the trigger + type: str + description: + description: Name of the trigger + type: str + error: + description: Error text if there have been any problems when updating the state of the trigger + type: str + expression: + description: Reduced trigger expression + type: str + flags: + description: Origin of the trigger + type: int + lastchange: + description: Time when the trigger last changed its state (timestamp) + type: int + priority: + description: Severity of the trigger + type: int + state: + description: State of the trigger + type: int + status: + description: Whether the trigger is enabled or disabled + type: int + templateid: + description: ID of the parent template trigger + type: int + triggerid: + description: ID of the trigger + type: int + type: + description: Whether the trigger can generate multiple problem events + type: int + url: + description: URL associated with the trigger + type: str + value: + description: Whether the trigger is in OK or problem state + type: int +triggers_problem: + description: Host Zabbix Triggers in problem state. See trigger and event objects in API documentation of your zabbix version for more + returned: On success + type: complex + contains: + comments: + description: Additional description of the trigger + type: str + description: + description: Name of the trigger + type: str + error: + description: Error text if there have been any problems when updating the state of the trigger + type: str + expression: + description: Reduced trigger expression + type: str + flags: + description: Origin of the trigger + type: int + last_event: + description: last event informations + type: complex + contains: + acknowledged: + description: If set to true return only acknowledged events + type: int + acknowledges: + description: acknowledges informations + type: complex + contains: + alias: + description: Account who acknowledge + type: str + clock: + description: Time when the event was created (timestamp) + type: int + message: + description: Text of the acknowledgement message + type: str + clock: + description: Time when the event was created (timestamp) + type: int + eventid: + description: ID of the event + type: int + value: + description: State of the related object + type: int + lastchange: + description: Time when the trigger last changed its state (timestamp) + type: int + priority: + description: Severity of the trigger + type: int + state: + description: State of the trigger + type: int + status: + description: Whether the trigger is enabled or disabled + type: int + templateid: + description: ID of the parent template trigger + type: int + triggerid: + description: ID of the trigger + type: int + type: + description: Whether the trigger can generate multiple problem events + type: int + url: + description: URL associated with the trigger + type: str + value: + description: Whether the trigger is in OK or problem state + type: int +''' + +DOCUMENTATION = ''' +--- +module: zabbix_host_events_info +short_description: Get all triggers about a Zabbix host +description: + - This module allows you to see if a Zabbix host have no active alert to make actions on it. + For this case use module Ansible 'fail' to exclude host in trouble. + - Length of "triggers_ok" allow if template's triggers exist for Zabbix Host +author: + - "Stéphane Travassac (@stravassac)" +requirements: + - "python >= 2.7" + - "zabbix-api >= 0.5.3" +options: + host_identifier: + description: + - Identifier of Zabbix Host + required: true + type: str + host_id_type: + description: + - Type of host_identifier + choices: + - hostname + - visible_name + - hostid + required: false + default: hostname + type: str + trigger_severity: + description: + - Zabbix severity for search filter + default: average + required: false + choices: + - not_classified + - information + - warning + - average + - high + - disaster + type: str +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = ''' +- name: exclude machine if alert active on it + zabbix_host_events_info: + server_url: "{{ zabbix_url }}" + login_user: "{{ lookup('env','ZABBIX_USER') }}" + login_password: "{{ lookup('env','ZABBIX_PASSWORD') }}" + host_identifier: "{{inventory_hostname}}" + host_id_type: "hostname" + timeout: 120 + register: zbx_host + delegate_to: localhost +- fail: + msg: "machine alert in zabbix" + when: zbx_host['triggers_problem']|length > 0 +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + def get_host(self, host_identifier, host_inventory, search_key): + """ Get host by hostname|visible_name|hostid """ + host = self._zapi.host.get( + {'output': 'extend', 'selectParentTemplates': ['name'], 'filter': {search_key: host_identifier}, + 'selectInventory': host_inventory}) + if len(host) < 1: + self._module.fail_json(msg="Host not found: %s" % host_identifier) + else: + return host[0] + + def get_triggers_by_host_id_in_problem_state(self, host_id, trigger_severity): + """ Get triggers in problem state from a hostid""" + # https://www.zabbix.com/documentation/3.4/manual/api/reference/trigger/get + output = 'extend' + triggers_list = self._zapi.trigger.get({'output': output, 'hostids': host_id, + 'min_severity': trigger_severity}) + return triggers_list + + def get_last_event_by_trigger_id(self, triggers_id): + """ Get the last event from triggerid""" + output = ['eventid', 'clock', 'acknowledged', 'value'] + select_acknowledges = ['clock', 'alias', 'message'] + event = self._zapi.event.get({'output': output, 'objectids': triggers_id, + 'select_acknowledges': select_acknowledges, "limit": 1, "sortfield": "clock", + "sortorder": "DESC"}) + return event[0] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + host_identifier=dict(type='str', required=True), + host_id_type=dict( + default='hostname', + type='str', + choices=['hostname', 'visible_name', 'hostid']), + trigger_severity=dict( + type='str', + required=False, + default='average', + choices=['not_classified', 'information', 'warning', 'average', 'high', 'disaster']), + validate_certs=dict(type='bool', required=False, default=True), + timeout=dict(type='int', default=10), + + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), + exception=ZBX_IMP_ERR) + + trigger_severity_map = {'not_classified': 0, 'information': 1, 'warning': 2, 'average': 3, 'high': 4, 'disaster': 5} + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + host_id = module.params['host_identifier'] + host_id_type = module.params['host_id_type'] + trigger_severity = trigger_severity_map[module.params['trigger_severity']] + timeout = module.params['timeout'] + + host_inventory = 'hostid' + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + + if host_id_type == 'hostname': + zabbix_host = host.get_host(host_id, host_inventory, 'host') + host_id = zabbix_host['hostid'] + + elif host_id_type == 'visible_name': + zabbix_host = host.get_host(host_id, host_inventory, 'name') + host_id = zabbix_host['hostid'] + + elif host_id_type == 'hostid': + ''' check hostid exist''' + zabbix_host = host.get_host(host_id, host_inventory, 'hostid') + + triggers = host.get_triggers_by_host_id_in_problem_state(host_id, trigger_severity) + + triggers_ok = [] + triggers_problem = [] + for trigger in triggers: + # tGet last event for trigger with problem value = 1 + # https://www.zabbix.com/documentation/3.4/manual/api/reference/trigger/object + if int(trigger['value']) == 1: + event = host.get_last_event_by_trigger_id(trigger['triggerid']) + trigger['last_event'] = event + triggers_problem.append(trigger) + else: + triggers_ok.append(trigger) + + module.exit_json(ok=True, triggers_ok=triggers_ok, triggers_problem=triggers_problem) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_host_facts.py b/plugins/modules/monitoring/zabbix/zabbix_host_facts.py new file mode 120000 index 0000000000..c6d22f6237 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_host_facts.py @@ -0,0 +1 @@ +zabbix_host_info.py \ No newline at end of file diff --git a/plugins/modules/monitoring/zabbix/zabbix_host_info.py b/plugins/modules/monitoring/zabbix/zabbix_host_info.py new file mode 100644 index 0000000000..3c10d7a6bd --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_host_info.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) me@mimiko.me +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +RETURN = r''' +--- +hosts: + description: List of Zabbix hosts. See https://www.zabbix.com/documentation/4.0/manual/api/reference/host/get for list of host values. + returned: success + type: dict + sample: [ { "available": "1", "description": "", "disable_until": "0", "error": "", "flags": "0", "groups": ["1"], "host": "Host A", ... } ] +''' + +DOCUMENTATION = r''' +--- +module: zabbix_host_info +short_description: Gather information about Zabbix host +description: + - This module allows you to search for Zabbix host entries. + - This module was called C(zabbix_host_facts) before Ansible 2.9. The usage did not change. +author: + - "Michael Miko (@RedWhiteMiko)" +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + host_name: + description: + - Name of the host in Zabbix. + - host_name is the unique identifier used and cannot be updated using this module. + - Required when I(host_ip) is not used. + required: false + type: str + host_ip: + description: + - Host interface IP of the host in Zabbix. + - Required when I(host_name) is not used. + required: false + type: list + elements: str + exact_match: + description: + - Find the exact match + type: bool + default: no + remove_duplicate: + description: + - Remove duplicate host from host result + type: bool + default: yes + host_inventory: + description: + - List of host inventory keys to display in result. + - Whole host inventory is retrieved if keys are not specified. + type: list + elements: str + required: false +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = r''' +- name: Get host info + local_action: + module: zabbix_host_info + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + host_ip: 127.0.0.1 + timeout: 10 + exact_match: no + remove_duplicate: yes + +- name: Reduce host inventory information to provided keys + local_action: + module: zabbix_host_info + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + host_inventory: + - os + - tag + host_ip: 127.0.0.1 + timeout: 10 + exact_match: no + remove_duplicate: yes +''' + + +import atexit +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + def get_hosts_by_host_name(self, host_name, exact_match, host_inventory): + """ Get host by host name """ + search_key = 'search' + if exact_match: + search_key = 'filter' + host_list = self._zapi.host.get({ + 'output': 'extend', + 'selectParentTemplates': ['name'], + search_key: {'host': [host_name]}, + 'selectInventory': host_inventory, + 'selectGroups': 'extend', + 'selectTags': 'extend', + 'selectMacros': 'extend' + }) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + return host_list + + def get_hosts_by_ip(self, host_ips, host_inventory): + """ Get host by host ip(s) """ + hostinterfaces = self._zapi.hostinterface.get({ + 'output': 'extend', + 'filter': { + 'ip': host_ips + } + }) + if len(hostinterfaces) < 1: + self._module.fail_json(msg="Host not found: %s" % host_ips) + host_list = [] + for hostinterface in hostinterfaces: + host = self._zapi.host.get({ + 'output': 'extend', + 'selectGroups': 'extend', + 'selectParentTemplates': ['name'], + 'hostids': hostinterface['hostid'], + 'selectInventory': host_inventory, + 'selectTags': 'extend', + 'selectMacros': 'extend' + }) + host[0]['hostinterfaces'] = hostinterface + host_list.append(host[0]) + return host_list + + def delete_duplicate_hosts(self, hosts): + """ Delete duplicated hosts """ + unique_hosts = [] + listed_hostnames = [] + for zabbix_host in hosts: + if zabbix_host['name'] in listed_hostnames: + continue + unique_hosts.append(zabbix_host) + listed_hostnames.append(zabbix_host['name']) + return unique_hosts + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + host_name=dict(type='str', default='', required=False), + host_ip=dict(type='list', default=[], required=False), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + timeout=dict(type='int', default=10), + exact_match=dict(type='bool', required=False, default=False), + remove_duplicate=dict(type='bool', required=False, default=True), + host_inventory=dict(type='list', default=[], required=False) + ), + supports_check_mode=True + ) + if module._name == 'zabbix_host_facts': + module.deprecate("The 'zabbix_host_facts' module has been renamed to 'zabbix_host_info'", version='2.13') + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + host_name = module.params['host_name'] + host_ips = module.params['host_ip'] + timeout = module.params['timeout'] + exact_match = module.params['exact_match'] + is_remove_duplicate = module.params['remove_duplicate'] + host_inventory = module.params['host_inventory'] + + if not host_inventory: + host_inventory = 'extend' + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + + if host_name: + hosts = host.get_hosts_by_host_name(host_name, exact_match, host_inventory) + if is_remove_duplicate: + hosts = host.delete_duplicate_hosts(hosts) + extended_hosts = [] + for zabbix_host in hosts: + zabbix_host['hostinterfaces'] = host._zapi.hostinterface.get({ + 'output': 'extend', 'hostids': zabbix_host['hostid'] + }) + extended_hosts.append(zabbix_host) + module.exit_json(ok=True, hosts=extended_hosts) + + elif host_ips: + extended_hosts = host.get_hosts_by_ip(host_ips, host_inventory) + if is_remove_duplicate: + hosts = host.delete_duplicate_hosts(extended_hosts) + module.exit_json(ok=True, hosts=extended_hosts) + else: + module.exit_json(ok=False, hosts=[], result="No Host present") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_hostmacro.py b/plugins/modules/monitoring/zabbix/zabbix_hostmacro.py new file mode 100644 index 0000000000..bf29841ad0 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_hostmacro.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_hostmacro +short_description: Create/update/delete Zabbix host macros +description: + - manages Zabbix host macros, it can create, update or delete them. +author: + - "Cove (@cove)" + - Dean Hailin Song (!UNKNOWN) +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + host_name: + description: + - Name of the host. + required: true + type: str + macro_name: + description: + - Name of the host macro in zabbix native format C({$MACRO}) or simple format C(MACRO). + required: true + type: str + macro_value: + description: + - Value of the host macro. + - Required if I(state=present). + type: str + state: + description: + - State of the macro. + - On C(present), it will create if macro does not exist or update the macro if the associated data is different. + - On C(absent) will remove a macro if it exists. + required: false + choices: ['present', 'absent'] + type: str + default: "present" + force: + description: + - Only updates an existing macro if set to C(yes). + default: 'yes' + type: bool + +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = r''' +- name: Create new host macro or update an existing macro's value + local_action: + module: zabbix_hostmacro + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + macro_name: EXAMPLE.MACRO + macro_value: Example value + state: present + +# Values with curly brackets need to be quoted otherwise they will be interpreted as a dictionary +- name: Create new host macro in Zabbix native format + local_action: + module: zabbix_hostmacro + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + macro_name: "{$EXAMPLE.MACRO}" + macro_value: Example value + state: present + +- name: Delete existing host macro + local_action: + module: zabbix_hostmacro + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + macro_name: "{$EXAMPLE.MACRO}" + state: absent +''' + + +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class HostMacro(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # get host id by host name + def get_host_id(self, host_name): + try: + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + host_id = host_list[0]['hostid'] + return host_id + except Exception as e: + self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e)) + + # get host macro + def get_host_macro(self, macro_name, host_id): + try: + host_macro_list = self._zapi.usermacro.get( + {"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': macro_name}}) + if len(host_macro_list) > 0: + return host_macro_list[0] + return None + except Exception as e: + self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e)) + + # create host macro + def create_host_macro(self, macro_name, macro_value, host_id): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.create({'hostid': host_id, 'macro': macro_name, 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully added host macro %s" % macro_name) + except Exception as e: + self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e)) + + # update host macro + def update_host_macro(self, host_macro_obj, macro_name, macro_value): + host_macro_id = host_macro_obj['hostmacroid'] + if host_macro_obj['macro'] == macro_name and host_macro_obj['value'] == macro_value: + self._module.exit_json(changed=False, result="Host macro %s already up to date" % macro_name) + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully updated host macro %s" % macro_name) + except Exception as e: + self._module.fail_json(msg="Failed to update host macro %s: %s" % (macro_name, e)) + + # delete host macro + def delete_host_macro(self, host_macro_obj, macro_name): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.delete([host_macro_id]) + self._module.exit_json(changed=True, result="Successfully deleted host macro %s" % macro_name) + except Exception as e: + self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e)) + + +def normalize_macro_name(macro_name): + # Zabbix handles macro names in upper case characters + if ':' in macro_name: + macro_name = ':'.join([macro_name.split(':')[0].upper(), ':'.join(macro_name.split(':')[1:])]) + else: + macro_name = macro_name.upper() + + # Valid format for macro is {$MACRO} + if not macro_name.startswith('{$'): + macro_name = '{$' + macro_name + if not macro_name.endswith('}'): + macro_name = macro_name + '}' + + return macro_name + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + host_name=dict(type='str', required=True), + macro_name=dict(type='str', required=True), + macro_value=dict(type='str', required=False), + state=dict(type='str', default='present', choices=['present', 'absent']), + timeout=dict(type='int', default=10), + force=dict(type='bool', default=True) + ), + required_if=[ + ['state', 'present', ['macro_value']] + ], + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + host_name = module.params['host_name'] + macro_name = normalize_macro_name(module.params['macro_name']) + macro_value = module.params['macro_value'] + state = module.params['state'] + timeout = module.params['timeout'] + force = module.params['force'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host_macro_class_obj = HostMacro(module, zbx) + + if host_name: + host_id = host_macro_class_obj.get_host_id(host_name) + host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id) + + if state == 'absent': + if not host_macro_obj: + module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name) + else: + # delete a macro + host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name) + else: + if not host_macro_obj: + # create host macro + host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id) + elif force: + # update host macro + host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value) + else: + module.exit_json(changed=False, result="Host macro %s already exists and force is set to no" % macro_name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_maintenance.py b/plugins/modules/monitoring/zabbix/zabbix_maintenance.py new file mode 100644 index 0000000000..954fc20333 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_maintenance.py @@ -0,0 +1,402 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Alexander Bulimov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' + +module: zabbix_maintenance +short_description: Create Zabbix maintenance windows +description: + - This module will let you create Zabbix maintenance windows. +author: "Alexander Bulimov (@abulimov)" +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + state: + description: + - Create or remove a maintenance window. Maintenance window to remove is identified by name. + default: present + choices: [ "present", "absent" ] + type: str + host_names: + description: + - Hosts to manage maintenance window for. + - B(Required) option when I(state=present) and I(host_groups) is not used. + aliases: [ "host_name" ] + type: list + elements: str + host_groups: + description: + - Host groups to manage maintenance window for. + - B(Required) option when I(state=present) and I(host_names) is not used. + aliases: [ "host_group" ] + type: list + elements: str + minutes: + description: + - Length of maintenance window in minutes. + default: 10 + type: int + name: + description: + - Unique name of maintenance window. + required: true + type: str + desc: + description: + - Short description of maintenance window. + default: Created by Ansible + type: str + collect_data: + description: + - Type of maintenance. With data collection, or without. + type: bool + default: 'yes' + +extends_documentation_fragment: +- community.general.zabbix + + +notes: + - Useful for setting hosts in maintenance mode before big update, + and removing maintenance window after update. + - Module creates maintenance window from now() to now() + minutes, + so if Zabbix server's time and host's time are not synchronized, + you will get strange results. + - Install required module with 'pip install zabbix-api' command. +''' + +EXAMPLES = r''' +- name: Create a named maintenance window for host www1 for 90 minutes + zabbix_maintenance: + name: Update of www1 + host_name: www1.example.com + state: present + minutes: 90 + server_url: https://monitoring.example.com + login_user: ansible + login_password: pAsSwOrD + +- name: Create a named maintenance window for host www1 and host groups Office and Dev + zabbix_maintenance: + name: Update of www1 + host_name: www1.example.com + host_groups: + - Office + - Dev + state: present + server_url: https://monitoring.example.com + login_user: ansible + login_password: pAsSwOrD + +- name: Create a named maintenance window for hosts www1 and db1, without data collection. + zabbix_maintenance: + name: update + host_names: + - www1.example.com + - db1.example.com + state: present + collect_data: False + server_url: https://monitoring.example.com + login_user: ansible + login_password: pAsSwOrD + +- name: Remove maintenance window by name + zabbix_maintenance: + name: Test1 + state: absent + server_url: https://monitoring.example.com + login_user: ansible + login_password: pAsSwOrD +''' + + +import atexit +import datetime +import time +import traceback + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc): + end_time = start_time + period + try: + zbx.maintenance.create( + { + "groupids": group_ids, + "hostids": host_ids, + "name": name, + "maintenance_type": maintenance_type, + "active_since": str(start_time), + "active_till": str(end_time), + "description": desc, + "timeperiods": [{ + "timeperiod_type": "0", + "start_date": str(start_time), + "period": str(period), + }] + } + ) + # zabbix_api can call sys.exit() so we need to catch SystemExit here + except (Exception, SystemExit) as e: + return 1, None, str(e) + return 0, None, None + + +def update_maintenance(zbx, maintenance_id, group_ids, host_ids, start_time, maintenance_type, period, desc): + end_time = start_time + period + try: + zbx.maintenance.update( + { + "maintenanceid": maintenance_id, + "groupids": group_ids, + "hostids": host_ids, + "maintenance_type": maintenance_type, + "active_since": str(start_time), + "active_till": str(end_time), + "description": desc, + "timeperiods": [{ + "timeperiod_type": "0", + "start_date": str(start_time), + "period": str(period), + }] + } + ) + # zabbix_api can call sys.exit() so we need to catch SystemExit here + except (Exception, SystemExit) as e: + return 1, None, str(e) + return 0, None, None + + +def get_maintenance(zbx, name): + try: + maintenances = zbx.maintenance.get( + { + "filter": + { + "name": name, + }, + "selectGroups": "extend", + "selectHosts": "extend" + } + ) + # zabbix_api can call sys.exit() so we need to catch SystemExit here + except (Exception, SystemExit) as e: + return 1, None, str(e) + + for maintenance in maintenances: + maintenance["groupids"] = [group["groupid"] for group in maintenance["groups"]] if "groups" in maintenance else [] + maintenance["hostids"] = [host["hostid"] for host in maintenance["hosts"]] if "hosts" in maintenance else [] + return 0, maintenance, None + + return 0, None, None + + +def delete_maintenance(zbx, maintenance_id): + try: + zbx.maintenance.delete([maintenance_id]) + # zabbix_api can call sys.exit() so we need to catch SystemExit here + except (Exception, SystemExit) as e: + return 1, None, str(e) + return 0, None, None + + +def get_group_ids(zbx, host_groups): + group_ids = [] + for group in host_groups: + try: + result = zbx.hostgroup.get( + { + "output": "extend", + "filter": + { + "name": group + } + } + ) + # zabbix_api can call sys.exit() so we need to catch SystemExit here + except (Exception, SystemExit) as e: + return 1, None, str(e) + + if not result: + return 1, None, "Group id for group %s not found" % group + + group_ids.append(result[0]["groupid"]) + + return 0, group_ids, None + + +def get_host_ids(zbx, host_names): + host_ids = [] + for host in host_names: + try: + result = zbx.host.get( + { + "output": "extend", + "filter": + { + "name": host + } + } + ) + # zabbix_api can call sys.exit() so we need to catch SystemExit here + except (Exception, SystemExit) as e: + return 1, None, str(e) + + if not result: + return 1, None, "Host id for host %s not found" % host + + host_ids.append(result[0]["hostid"]) + + return 0, host_ids, None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), + server_url=dict(type='str', required=True, aliases=['url']), + host_names=dict(type='list', required=False, default=None, aliases=['host_name']), + minutes=dict(type='int', required=False, default=10), + host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + name=dict(type='str', required=True), + desc=dict(type='str', required=False, default="Created by Ansible"), + collect_data=dict(type='bool', required=False, default=True), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + host_names = module.params['host_names'] + host_groups = module.params['host_groups'] + state = module.params['state'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + minutes = module.params['minutes'] + name = module.params['name'] + desc = module.params['desc'] + server_url = module.params['server_url'] + collect_data = module.params['collect_data'] + timeout = module.params['timeout'] + + if collect_data: + maintenance_type = 0 + else: + maintenance_type = 1 + + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + # zabbix_api can call sys.exit() so we need to catch SystemExit here + except (Exception, SystemExit) as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + changed = False + + if state == "present": + + if not host_names and not host_groups: + module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.") + + now = datetime.datetime.now().replace(second=0) + start_time = time.mktime(now.timetuple()) + period = 60 * int(minutes) # N * 60 seconds + + if host_groups: + (rc, group_ids, error) = get_group_ids(zbx, host_groups) + if rc != 0: + module.fail_json(msg="Failed to get group_ids: %s" % error) + else: + group_ids = [] + + if host_names: + (rc, host_ids, error) = get_host_ids(zbx, host_names) + if rc != 0: + module.fail_json(msg="Failed to get host_ids: %s" % error) + else: + host_ids = [] + + (rc, maintenance, error) = get_maintenance(zbx, name) + if rc != 0: + module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error)) + + if maintenance and ( + sorted(group_ids) != sorted(maintenance["groupids"]) or + sorted(host_ids) != sorted(maintenance["hostids"]) or + str(maintenance_type) != maintenance["maintenance_type"] or + str(int(start_time)) != maintenance["active_since"] or + str(int(start_time + period)) != maintenance["active_till"] + ): + if module.check_mode: + changed = True + else: + (rc, data, error) = update_maintenance(zbx, maintenance["maintenanceid"], group_ids, host_ids, start_time, maintenance_type, period, desc) + if rc == 0: + changed = True + else: + module.fail_json(msg="Failed to update maintenance: %s" % error) + + if not maintenance: + if module.check_mode: + changed = True + else: + (rc, data, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc) + if rc == 0: + changed = True + else: + module.fail_json(msg="Failed to create maintenance: %s" % error) + + if state == "absent": + + (rc, maintenance, error) = get_maintenance(zbx, name) + if rc != 0: + module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error)) + + if maintenance: + if module.check_mode: + changed = True + else: + (rc, data, error) = delete_maintenance(zbx, maintenance["maintenanceid"]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Failed to remove maintenance: %s" % error) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_map.py b/plugins/modules/monitoring/zabbix/zabbix_map.py new file mode 100644 index 0000000000..5b15c87181 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_map.py @@ -0,0 +1,829 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017-2018, Antony Alekseyev +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: zabbix_map +author: + - "Antony Alekseyev (@Akint)" +short_description: Create/update/delete Zabbix maps +description: + - "This module allows you to create, modify and delete Zabbix map entries, + using Graphviz binaries and text description written in DOT language. + Nodes of the graph will become map elements and edges will become links between map elements. + See U(https://en.wikipedia.org/wiki/DOT_(graph_description_language)) and U(https://www.graphviz.org/) for details. + Inspired by U(http://blog.zabbix.com/maps-for-the-lazy/)." + - "The following extra node attributes are supported: + C(zbx_host) contains name of the host in Zabbix. Use this if desired type of map element is C(host). + C(zbx_group) contains name of the host group in Zabbix. Use this if desired type of map element is C(host group). + C(zbx_map) contains name of the map in Zabbix. Use this if desired type of map element is C(map). + C(zbx_label) contains label of map element. + C(zbx_image) contains name of the image used to display the element in default state. + C(zbx_image_disabled) contains name of the image used to display disabled map element. + C(zbx_image_maintenance) contains name of the image used to display map element in maintenance. + C(zbx_image_problem) contains name of the image used to display map element with problems. + C(zbx_url) contains map element URL in C(name:url) format. + More than one URL could be specified by adding a postfix (e.g., C(zbx_url1), C(zbx_url2))." + - "The following extra link attributes are supported: + C(zbx_draw_style) contains link line draw style. Possible values: C(line), C(bold), C(dotted), C(dashed). + C(zbx_trigger) contains name of the trigger used as a link indicator in C(host_name:trigger_name) format. + More than one trigger could be specified by adding a postfix (e.g., C(zbx_trigger1), C(zbx_trigger2)). + C(zbx_trigger_color) contains indicator color specified either as CSS3 name or as a hexadecimal code starting with C(#). + C(zbx_trigger_draw_style) contains indicator draw style. Possible values are the same as for C(zbx_draw_style)." +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" + - pydotplus + - webcolors + - Pillow + - Graphviz +options: + name: + description: + - Name of the map. + required: true + aliases: [ "map_name" ] + type: str + data: + description: + - Graph written in DOT language. + required: false + aliases: [ "dot_data" ] + type: str + state: + description: + - State of the map. + - On C(present), it will create if map does not exist or update the map if the associated data is different. + - On C(absent) will remove the map if it exists. + required: false + choices: ['present', 'absent'] + default: "present" + type: str + width: + description: + - Width of the map. + required: false + default: 800 + type: int + height: + description: + - Height of the map. + required: false + default: 600 + type: int + margin: + description: + - Size of white space between map's borders and its elements. + required: false + default: 40 + type: int + expand_problem: + description: + - Whether the problem trigger will be displayed for elements with a single problem. + required: false + type: bool + default: true + highlight: + description: + - Whether icon highlighting is enabled. + required: false + type: bool + default: true + label_type: + description: + - Map element label type. + required: false + choices: ['label', 'ip', 'name', 'status', 'nothing', 'custom'] + default: "name" + type: str + default_image: + description: + - Name of the Zabbix image used to display the element if this element doesn't have the C(zbx_image) attribute defined. + required: false + aliases: [ "image" ] + type: str + +extends_documentation_fragment: +- community.general.zabbix + +''' + +RETURN = r''' # ''' + +EXAMPLES = r''' +### +### Example inventory: +# [web] +# web[01:03].example.com ansible_host=127.0.0.1 +# [db] +# db.example.com ansible_host=127.0.0.1 +# [backup] +# backup.example.com ansible_host=127.0.0.1 +### +### Each inventory host is present in Zabbix with a matching name. +### +### Contents of 'map.j2': +# digraph G { +# graph [layout=dot splines=false overlap=scale] +# INTERNET [zbx_url="Google:https://google.com" zbx_image="Cloud_(96)"] +# {% for web_host in groups.web %} +# {% set web_loop = loop %} +# web{{ '%03d' % web_loop.index }} [zbx_host="{{ web_host }}"] +# INTERNET -> web{{ '%03d' % web_loop.index }} [zbx_trigger="{{ web_host }}:Zabbix agent on {HOST.NAME} is unreachable for 5 minutes"] +# {% for db_host in groups.db %} +# {% set db_loop = loop %} +# web{{ '%03d' % web_loop.index }} -> db{{ '%03d' % db_loop.index }} +# {% endfor %} +# {% endfor %} +# { rank=same +# {% for db_host in groups.db %} +# {% set db_loop = loop %} +# db{{ '%03d' % db_loop.index }} [zbx_host="{{ db_host }}"] +# {% for backup_host in groups.backup %} +# {% set backup_loop = loop %} +# db{{ '%03d' % db_loop.index }} -> backup{{ '%03d' % backup_loop.index }} [color="blue"] +# {% endfor %} +# {% endfor %} +# {% for backup_host in groups.backup %} +# {% set backup_loop = loop %} +# backup{{ '%03d' % backup_loop.index }} [zbx_host="{{ backup_host }}"] +# {% endfor %} +# } +# } +### +### Create Zabbix map "Demo Map" made of template 'map.j2' +- name: Create Zabbix map + zabbix_map: + server_url: http://zabbix.example.com + login_user: username + login_password: password + name: Demo map + state: present + data: "{{ lookup('template', 'map.j2') }}" + default_image: Server_(64) + expand_problem: no + highlight: no + label_type: label + delegate_to: localhost + run_once: yes +''' + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + + +import atexit +import base64 +import traceback + +from io import BytesIO +from operator import itemgetter +from distutils.version import StrictVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import pydotplus + HAS_PYDOTPLUS = True +except ImportError: + PYDOT_IMP_ERR = traceback.format_exc() + HAS_PYDOTPLUS = False + +try: + import webcolors + HAS_WEBCOLORS = True +except ImportError: + WEBCOLORS_IMP_ERR = traceback.format_exc() + HAS_WEBCOLORS = False + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +try: + from PIL import Image + HAS_PIL = True +except ImportError: + PIL_IMP_ERR = traceback.format_exc() + HAS_PIL = False + + +class Map(): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + self.map_name = module.params['name'] + self.dot_data = module.params['data'] + self.width = module.params['width'] + self.height = module.params['height'] + self.state = module.params['state'] + self.default_image = module.params['default_image'] + self.map_id = self._get_sysmap_id(self.map_name) + self.margin = module.params['margin'] + self.expand_problem = module.params['expand_problem'] + self.highlight = module.params['highlight'] + self.label_type = module.params['label_type'] + self.api_version = self._zapi.api_version() + self.selements_sort_keys = self._get_selements_sort_keys() + + def _build_graph(self): + try: + graph_without_positions = pydotplus.graph_from_dot_data(self.dot_data) + dot_data_with_positions = graph_without_positions.create_dot() + graph_with_positions = pydotplus.graph_from_dot_data(dot_data_with_positions) + if graph_with_positions: + return graph_with_positions + except Exception as e: + self._module.fail_json(msg="Failed to build graph from DOT data: %s" % e) + + def get_map_config(self): + if not self.dot_data: + self._module.fail_json(msg="'data' is mandatory with state 'present'") + graph = self._build_graph() + nodes = self._get_graph_nodes(graph) + edges = self._get_graph_edges(graph) + icon_ids = self._get_icon_ids() + map_config = { + 'name': self.map_name, + 'label_type': self._get_label_type_id(self.label_type), + 'expandproblem': int(self.expand_problem), + 'highlight': int(self.highlight), + 'width': self.width, + 'height': self.height, + 'selements': self._get_selements(graph, nodes, icon_ids), + 'links': self._get_links(nodes, edges), + } + return map_config + + def _get_label_type_id(self, label_type): + label_type_ids = { + 'label': 0, + 'ip': 1, + 'name': 2, + 'status': 3, + 'nothing': 4, + 'custom': 5, + } + try: + label_type_id = label_type_ids[label_type] + except Exception as e: + self._module.fail_json(msg="Failed to find id for label type '%s': %s" % (label_type, e)) + return label_type_id + + def _get_images_info(self, data, icon_ids): + images = [ + { + 'dot_tag': 'zbx_image', + 'zbx_property': 'iconid_off', + 'mandatory': True + }, + { + 'dot_tag': 'zbx_image_disabled', + 'zbx_property': 'iconid_disabled', + 'mandatory': False + }, + { + 'dot_tag': 'zbx_image_maintenance', + 'zbx_property': 'iconid_maintenance', + 'mandatory': False + }, + { + 'dot_tag': 'zbx_image_problem', + 'zbx_property': 'iconid_on', + 'mandatory': False + } + ] + images_info = {} + default_image = self.default_image if self.default_image else sorted(icon_ids.items())[0][0] + for image in images: + image_name = data.get(image['dot_tag'], None) + if not image_name: + if image['mandatory']: + image_name = default_image + else: + continue + image_name = remove_quotes(image_name) + if image_name in icon_ids: + images_info[image['zbx_property']] = icon_ids[image_name] + if not image['mandatory']: + images_info['use_iconmap'] = 0 + else: + self._module.fail_json(msg="Failed to find id for image '%s'" % image_name) + return images_info + + def _get_element_type(self, data): + types = { + 'host': 0, + 'sysmap': 1, + 'trigger': 2, + 'group': 3, + 'image': 4 + } + element_type = { + 'elementtype': types['image'], + } + if StrictVersion(self.api_version) < StrictVersion('3.4'): + element_type.update({ + 'elementid': "0", + }) + for type_name, type_id in sorted(types.items()): + field_name = 'zbx_' + type_name + if field_name in data: + method_name = '_get_' + type_name + '_id' + element_name = remove_quotes(data[field_name]) + get_element_id = getattr(self, method_name, None) + if get_element_id: + elementid = get_element_id(element_name) + if elementid and int(elementid) > 0: + element_type.update({ + 'elementtype': type_id, + 'label': element_name + }) + if StrictVersion(self.api_version) < StrictVersion('3.4'): + element_type.update({ + 'elementid': elementid, + }) + else: + element_type.update({ + 'elements': [{ + type_name + 'id': elementid, + }], + }) + break + else: + self._module.fail_json(msg="Failed to find id for %s '%s'" % (type_name, element_name)) + return element_type + + # get list of map elements (nodes) + def _get_selements(self, graph, nodes, icon_ids): + selements = [] + icon_sizes = {} + scales = self._get_scales(graph) + for selementid, (node, data) in enumerate(nodes.items(), start=1): + selement = { + 'selementid': selementid + } + data['selementid'] = selementid + + images_info = self._get_images_info(data, icon_ids) + selement.update(images_info) + image_id = images_info['iconid_off'] + if image_id not in icon_sizes: + icon_sizes[image_id] = self._get_icon_size(image_id) + + pos = self._convert_coordinates(data['pos'], scales, icon_sizes[image_id]) + selement.update(pos) + + selement['label'] = remove_quotes(node) + element_type = self._get_element_type(data) + selement.update(element_type) + + label = self._get_label(data) + if label: + selement['label'] = label + + urls = self._get_urls(data) + if urls: + selement['urls'] = urls + + selements.append(selement) + return selements + + def _get_links(self, nodes, edges): + links = {} + for edge in edges: + link_id = tuple(sorted(edge.obj_dict['points'])) + node1, node2 = link_id + data = edge.obj_dict['attributes'] + + if "style" in data and data['style'] == "invis": + continue + + if link_id not in links: + links[link_id] = { + 'selementid1': min(nodes[node1]['selementid'], nodes[node2]['selementid']), + 'selementid2': max(nodes[node1]['selementid'], nodes[node2]['selementid']), + } + link = links[link_id] + + if "color" not in link: + link['color'] = self._get_color_hex(remove_quotes(data.get('color', 'green'))) + + if "zbx_draw_style" not in link: + link['drawtype'] = self._get_link_draw_style_id(remove_quotes(data.get('zbx_draw_style', 'line'))) + + label = self._get_label(data) + if label and "label" not in link: + link['label'] = label + + triggers = self._get_triggers(data) + if triggers: + if "linktriggers" not in link: + link['linktriggers'] = [] + link['linktriggers'] += triggers + + return list(links.values()) + + def _get_urls(self, data): + urls = [] + for url_raw in [remove_quotes(value) for key, value in data.items() if key.startswith("zbx_url")]: + try: + name, url = url_raw.split(':', 1) + except Exception as e: + self._module.fail_json(msg="Failed to parse zbx_url='%s': %s" % (url_raw, e)) + urls.append({ + 'name': name, + 'url': url, + }) + return urls + + def _get_triggers(self, data): + triggers = [] + for trigger_definition in [remove_quotes(value) for key, value in data.items() if key.startswith("zbx_trigger")]: + triggerid = self._get_trigger_id(trigger_definition) + if triggerid: + triggers.append({ + 'triggerid': triggerid, + 'color': self._get_color_hex(remove_quotes(data.get('zbx_trigger_color', 'red'))), + 'drawtype': self._get_link_draw_style_id(remove_quotes(data.get('zbx_trigger_draw_style', 'bold'))), + }) + else: + self._module.fail_json(msg="Failed to find trigger '%s'" % (trigger_definition)) + return triggers + + @staticmethod + def _get_label(data, default=None): + if "zbx_label" in data: + label = remove_quotes(data['zbx_label']).replace('\\n', '\n') + elif "label" in data: + label = remove_quotes(data['label']) + else: + label = default + return label + + def _get_sysmap_id(self, map_name): + exist_map = self._zapi.map.get({'filter': {'name': map_name}}) + if exist_map: + return exist_map[0]['sysmapid'] + return None + + def _get_group_id(self, group_name): + exist_group = self._zapi.hostgroup.get({'filter': {'name': group_name}}) + if exist_group: + return exist_group[0]['groupid'] + return None + + def map_exists(self): + return bool(self.map_id) + + def create_map(self, map_config): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + result = self._zapi.map.create(map_config) + if result: + return result + except Exception as e: + self._module.fail_json(msg="Failed to create map: %s" % e) + + def update_map(self, map_config): + if not self.map_id: + self._module.fail_json(msg="Failed to update map: map_id is unknown. Try to create_map instead.") + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + map_config['sysmapid'] = self.map_id + result = self._zapi.map.update(map_config) + if result: + return result + except Exception as e: + self._module.fail_json(msg="Failed to update map: %s" % e) + + def delete_map(self): + if not self.map_id: + self._module.fail_json(msg="Failed to delete map: map_id is unknown.") + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.map.delete([self.map_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete map, Exception: %s" % e) + + def is_exist_map_correct(self, generated_map_config): + exist_map_configs = self._zapi.map.get({ + 'sysmapids': self.map_id, + 'selectLinks': 'extend', + 'selectSelements': 'extend' + }) + exist_map_config = exist_map_configs[0] + if not self._is_dicts_equal(generated_map_config, exist_map_config): + return False + if not self._is_selements_equal(generated_map_config['selements'], exist_map_config['selements']): + return False + self._update_ids(generated_map_config, exist_map_config) + if not self._is_links_equal(generated_map_config['links'], exist_map_config['links']): + return False + return True + + def _get_selements_sort_keys(self): + keys_to_sort = ['label'] + if StrictVersion(self.api_version) < StrictVersion('3.4'): + keys_to_sort.insert(0, 'elementid') + return keys_to_sort + + def _is_selements_equal(self, generated_selements, exist_selements): + if len(generated_selements) != len(exist_selements): + return False + generated_selements_sorted = sorted(generated_selements, key=itemgetter(*self.selements_sort_keys)) + exist_selements_sorted = sorted(exist_selements, key=itemgetter(*self.selements_sort_keys)) + for (generated_selement, exist_selement) in zip(generated_selements_sorted, exist_selements_sorted): + if StrictVersion(self.api_version) >= StrictVersion("3.4"): + if not self._is_elements_equal(generated_selement.get('elements', []), exist_selement.get('elements', [])): + return False + if not self._is_dicts_equal(generated_selement, exist_selement, ['selementid']): + return False + if not self._is_urls_equal(generated_selement.get('urls', []), exist_selement.get('urls', [])): + return False + return True + + def _is_urls_equal(self, generated_urls, exist_urls): + if len(generated_urls) != len(exist_urls): + return False + generated_urls_sorted = sorted(generated_urls, key=itemgetter('name', 'url')) + exist_urls_sorted = sorted(exist_urls, key=itemgetter('name', 'url')) + for (generated_url, exist_url) in zip(generated_urls_sorted, exist_urls_sorted): + if not self._is_dicts_equal(generated_url, exist_url, ['selementid']): + return False + return True + + def _is_elements_equal(self, generated_elements, exist_elements): + if len(generated_elements) != len(exist_elements): + return False + generated_elements_sorted = sorted(generated_elements, key=lambda k: k.values()[0]) + exist_elements_sorted = sorted(exist_elements, key=lambda k: k.values()[0]) + for (generated_element, exist_element) in zip(generated_elements_sorted, exist_elements_sorted): + if not self._is_dicts_equal(generated_element, exist_element, ['selementid']): + return False + return True + + # since generated IDs differ from real Zabbix ones, make real IDs match generated ones + def _update_ids(self, generated_map_config, exist_map_config): + generated_selements_sorted = sorted(generated_map_config['selements'], key=itemgetter(*self.selements_sort_keys)) + exist_selements_sorted = sorted(exist_map_config['selements'], key=itemgetter(*self.selements_sort_keys)) + id_mapping = {} + for (generated_selement, exist_selement) in zip(generated_selements_sorted, exist_selements_sorted): + id_mapping[exist_selement['selementid']] = generated_selement['selementid'] + for link in exist_map_config['links']: + link['selementid1'] = id_mapping[link['selementid1']] + link['selementid2'] = id_mapping[link['selementid2']] + if link['selementid2'] < link['selementid1']: + link['selementid1'], link['selementid2'] = link['selementid2'], link['selementid1'] + + def _is_links_equal(self, generated_links, exist_links): + if len(generated_links) != len(exist_links): + return False + generated_links_sorted = sorted(generated_links, key=itemgetter('selementid1', 'selementid2', 'color', 'drawtype')) + exist_links_sorted = sorted(exist_links, key=itemgetter('selementid1', 'selementid2', 'color', 'drawtype')) + for (generated_link, exist_link) in zip(generated_links_sorted, exist_links_sorted): + if not self._is_dicts_equal(generated_link, exist_link, ['selementid1', 'selementid2']): + return False + if not self._is_triggers_equal(generated_link.get('linktriggers', []), exist_link.get('linktriggers', [])): + return False + return True + + def _is_triggers_equal(self, generated_triggers, exist_triggers): + if len(generated_triggers) != len(exist_triggers): + return False + generated_triggers_sorted = sorted(generated_triggers, key=itemgetter('triggerid')) + exist_triggers_sorted = sorted(exist_triggers, key=itemgetter('triggerid')) + for (generated_trigger, exist_trigger) in zip(generated_triggers_sorted, exist_triggers_sorted): + if not self._is_dicts_equal(generated_trigger, exist_trigger): + return False + return True + + @staticmethod + def _is_dicts_equal(d1, d2, exclude_keys=None): + if exclude_keys is None: + exclude_keys = [] + for key in d1.keys(): + if isinstance(d1[key], dict) or isinstance(d1[key], list): + continue + if key in exclude_keys: + continue + # compare as strings since Zabbix API returns everything as strings + if key not in d2 or str(d2[key]) != str(d1[key]): + return False + return True + + def _get_host_id(self, hostname): + hostid = self._zapi.host.get({'filter': {'host': hostname}}) + if hostid: + return str(hostid[0]['hostid']) + + def _get_trigger_id(self, trigger_definition): + try: + host, trigger = trigger_definition.split(':', 1) + except Exception as e: + self._module.fail_json(msg="Failed to parse zbx_trigger='%s': %s" % (trigger_definition, e)) + triggerid = self._zapi.trigger.get({ + 'host': host, + 'filter': { + 'description': trigger + } + }) + if triggerid: + return str(triggerid[0]['triggerid']) + + def _get_icon_ids(self): + icons_list = self._zapi.image.get({}) + icon_ids = {} + for icon in icons_list: + icon_ids[icon['name']] = icon['imageid'] + return icon_ids + + def _get_icon_size(self, icon_id): + icons_list = self._zapi.image.get({ + 'imageids': [ + icon_id + ], + 'select_image': True + }) + if len(icons_list) > 0: + icon_base64 = icons_list[0]['image'] + else: + self._module.fail_json(msg="Failed to find image with id %s" % icon_id) + image = Image.open(BytesIO(base64.b64decode(icon_base64))) + icon_width, icon_height = image.size + return icon_width, icon_height + + @staticmethod + def _get_node_attributes(node): + attr = {} + if "attributes" in node.obj_dict: + attr.update(node.obj_dict['attributes']) + pos = node.get_pos() + if pos is not None: + pos = remove_quotes(pos) + xx, yy = pos.split(",") + attr['pos'] = (float(xx), float(yy)) + return attr + + def _get_graph_nodes(self, parent): + nodes = {} + for node in parent.get_nodes(): + node_name = node.get_name() + if node_name in ('node', 'graph', 'edge'): + continue + nodes[node_name] = self._get_node_attributes(node) + for subgraph in parent.get_subgraphs(): + nodes.update(self._get_graph_nodes(subgraph)) + return nodes + + def _get_graph_edges(self, parent): + edges = [] + for edge in parent.get_edges(): + edges.append(edge) + for subgraph in parent.get_subgraphs(): + edges += self._get_graph_edges(subgraph) + return edges + + def _get_scales(self, graph): + bb = remove_quotes(graph.get_bb()) + min_x, min_y, max_x, max_y = bb.split(",") + scale_x = (self.width - self.margin * 2) / (float(max_x) - float(min_x)) if float(max_x) != float(min_x) else 0 + scale_y = (self.height - self.margin * 2) / (float(max_y) - float(min_y)) if float(max_y) != float(min_y) else 0 + return { + 'min_x': float(min_x), + 'min_y': float(min_y), + 'max_x': float(max_x), + 'max_y': float(max_y), + 'scale_x': float(scale_x), + 'scale_y': float(scale_y), + } + + # transform Graphviz coordinates to Zabbix's ones + def _convert_coordinates(self, pos, scales, icon_size): + return { + 'x': int((pos[0] - scales['min_x']) * scales['scale_x'] - icon_size[0] / 2 + self.margin), + 'y': int((scales['max_y'] - pos[1] + scales['min_y']) * scales['scale_y'] - icon_size[1] / 2 + self.margin), + } + + def _get_color_hex(self, color_name): + if color_name.startswith('#'): + color_hex = color_name + else: + try: + color_hex = webcolors.name_to_hex(color_name) + except Exception as e: + self._module.fail_json(msg="Failed to get RGB hex for color '%s': %s" % (color_name, e)) + color_hex = color_hex.strip('#').upper() + return color_hex + + def _get_link_draw_style_id(self, draw_style): + draw_style_ids = { + 'line': 0, + 'bold': 2, + 'dotted': 3, + 'dashed': 4 + } + try: + draw_style_id = draw_style_ids[draw_style] + except Exception as e: + self._module.fail_json(msg="Failed to find id for draw type '%s': %s" % (draw_style, e)) + return draw_style_id + + +# If a string has single or double quotes around it, remove them. +def remove_quotes(s): + if (s[0] == s[-1]) and s.startswith(("'", '"')): + s = s[1:-1] + return s + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + timeout=dict(type='int', default=10), + validate_certs=dict(type='bool', required=False, default=True), + name=dict(type='str', required=True, aliases=['map_name']), + data=dict(type='str', required=False, aliases=['dot_data']), + width=dict(type='int', default=800), + height=dict(type='int', default=600), + state=dict(type='str', default="present", choices=['present', 'absent']), + default_image=dict(type='str', required=False, aliases=['image']), + margin=dict(type='int', default=40), + expand_problem=dict(type='bool', default=True), + highlight=dict(type='bool', default=True), + label_type=dict(type='str', default='name', choices=['label', 'ip', 'name', 'status', 'nothing', 'custom']), + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + if not HAS_PYDOTPLUS: + module.fail_json(msg=missing_required_lib('pydotplus', url='https://pypi.org/project/pydotplus/'), exception=PYDOT_IMP_ERR) + if not HAS_WEBCOLORS: + module.fail_json(msg=missing_required_lib('webcolors', url='https://pypi.org/project/webcolors/'), exception=WEBCOLORS_IMP_ERR) + if not HAS_PIL: + module.fail_json(msg=missing_required_lib('Pillow', url='https://pypi.org/project/Pillow/'), exception=PIL_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + timeout = module.params['timeout'] + validate_certs = module.params['validate_certs'] + + zbx = None + + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + sysmap = Map(module, zbx) + + if sysmap.state == "absent": + if sysmap.map_exists(): + sysmap.delete_map() + module.exit_json(changed=True, result="Successfully deleted map: %s" % sysmap.map_name) + else: + module.exit_json(changed=False) + else: + map_config = sysmap.get_map_config() + if sysmap.map_exists(): + if sysmap.is_exist_map_correct(map_config): + module.exit_json(changed=False) + else: + sysmap.update_map(map_config) + module.exit_json(changed=True, result="Successfully updated map: %s" % sysmap.map_name) + else: + sysmap.create_map(map_config) + module.exit_json(changed=True, result="Successfully created map: %s" % sysmap.map_name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_mediatype.py b/plugins/modules/monitoring/zabbix/zabbix_mediatype.py new file mode 100644 index 0000000000..62ea174757 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_mediatype.py @@ -0,0 +1,705 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_mediatype +short_description: Create/Update/Delete Zabbix media types +description: + - This module allows you to create, modify and delete Zabbix media types. +author: + - Ruben Tsirunyan (@rubentsirunyan) +requirements: + - "zabbix-api >= 0.5.4" + +options: + name: + type: 'str' + description: + - Name of the media type. + required: true + state: + type: 'str' + description: + - Desired state of the mediatype. + - On C(present), it will create a mediatype if it does not exist or update the mediatype if the associated data is different. + - On C(absent), it will remove the mediatype if it exists. + choices: + - present + - absent + default: 'present' + type: + type: 'str' + description: + - Type of the media type. + - Media types I(jabber) and I(ez_texting) workable only with Zabbix 4.2 or less. + choices: + - email + - script + - sms + - jabber + - ez_texting + required: true + status: + type: 'str' + description: + - Whether the media type is enabled or no. + choices: + - enabled + - disabled + default: 'enabled' + max_sessions: + type: 'int' + description: + - The maximum number of alerts that can be processed in parallel. + - Possible value is 1 when I(type=sms) and 0-100 otherwise. + default: 1 + max_attempts: + type: 'int' + description: + - The maximum number of attempts to send an alert. + - Possible range is 0-10 + default: 3 + attempt_interval: + type: 'int' + description: + - The interval between retry attempts. + - Possible range is 0-60 + default: 10 + script_name: + type: 'str' + description: + - The name of the executed script. + - Required when I(type=script). + script_params: + type: 'list' + elements: str + description: + - List of script parameters. + - Required when I(type=script). + gsm_modem: + type: 'str' + description: + - Serial device name of the gsm modem. + - Required when I(type=sms). + username: + type: 'str' + description: + - Username or Jabber identifier. + - Required when I(type=jabber) or I(type=ez_texting). + - Required when I(type=email) and I(smtp_authentication=true). + password: + type: 'str' + description: + - Authentication password. + - Required when I(type=jabber) or I(type=ez_texting). + - Required when I(type=email) and I(smtp_authentication=true). + smtp_server: + type: 'str' + description: + - SMTP server host. + - Required when I(type=email). + default: 'localhost' + smtp_server_port: + type: 'int' + description: + - SMTP server port. + - Required when I(type=email). + default: 25 + smtp_helo: + type: 'str' + description: + - SMTP HELO. + - Required when I(type=email). + default: 'localhost' + smtp_email: + type: 'str' + description: + - Email address from which notifications will be sent. + - Required when I(type=email). + smtp_authentication: + type: 'bool' + description: + - Whether SMTP authentication with username and password should be enabled or not. + - If set to C(true), C(username) and C(password) should be specified. + default: false + smtp_security: + type: 'str' + description: + - SMTP connection security level to use. + choices: + - None + - STARTTLS + - SSL/TLS + smtp_verify_host: + type: 'bool' + description: + - SSL verify host for SMTP. + - Can be specified when I(smtp_security=STARTTLS) or I(smtp_security=SSL/TLS) + default: false + smtp_verify_peer: + type: 'bool' + description: + - SSL verify peer for SMTP. + - Can be specified when I(smtp_security=STARTTLS) or I(smtp_security=SSL/TLS) + default: false + message_text_limit: + type: 'str' + description: + - The message text limit. + - Required when I(type=ez_texting). + - 160 characters for USA and 136 characters for Canada. + choices: + - USA + - Canada +extends_documentation_fragment: +- community.general.zabbix + + +''' + +RETURN = r''' # ''' + +EXAMPLES = r''' +- name: 'Create an email mediatype with SMTP authentication' + zabbix_mediatype: + name: "Ops email" + server_url: "http://example.com/zabbix/" + login_user: Admin + login_password: "zabbix" + type: 'email' + smtp_server: 'example.com' + smtp_server_port: 2000 + smtp_email: 'ops@example.com' + smtp_authentication: true + username: 'smtp_user' + password: 'smtp_pass' + +- name: 'Create a script mediatype' + zabbix_mediatype: + name: "my script" + server_url: "http://example.com/zabbix/" + login_user: Admin + login_password: "zabbix" + type: 'script' + script_name: 'my_script.py' + script_params: + - 'arg1' + - 'arg2' + +- name: 'Create a jabber mediatype' + zabbix_mediatype: + name: "My jabber" + server_url: "http://example.com/zabbix/" + login_user: Admin + login_password: "zabbix" + type: 'jabber' + username: 'jabber_id' + password: 'jabber_pass' + +- name: 'Create an SMS mediatype' + zabbix_mediatype: + name: "My SMS Mediatype" + server_url: "http://example.com/zabbix/" + login_user: Admin + login_password: "zabbix" + type: 'sms' + gsm_modem: '/dev/ttyS0' +''' + + +import atexit +import traceback + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from distutils.version import LooseVersion + + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + + +def to_numeric_value(value, strs): + return strs.get(value) + + +def validate_params(module, params): + """Validates arguments that are required together. + + Fails the module with the message that shows the missing + requirements if there are some. + + Args: + module: AnsibleModule object. + params (list): Each element of this list + is a list like + ['argument_key', 'argument_value', ['required_arg_1', + 'required_arg_2']]. + Format is the same as `required_if` parameter of AnsibleModule. + """ + for param in params: + if module.params[param[0]] == param[1]: + if None in [module.params[i] for i in param[2]]: + module.fail_json( + msg="Following arguments are required when {key} is {value}: {arguments}".format( + key=param[0], + value=param[1], + arguments=', '.join(param[2]) + ) + ) + + +def construct_parameters(**kwargs): + """Translates data to a format suitable for Zabbix API and filters + the ones that are related to the specified mediatype type. + + Args: + **kwargs: Arguments passed to the module. + + Returns: + A dictionary of arguments that are related to kwargs['transport_type'], + and are in a format that is understandable by Zabbix API. + """ + if kwargs['transport_type'] == 'email': + return dict( + description=kwargs['name'], + status=to_numeric_value(kwargs['status'], + {'enabled': '0', + 'disabled': '1'}), + type=to_numeric_value(kwargs['transport_type'], + {'email': '0', + 'script': '1', + 'sms': '2', + 'jabber': '3', + 'ez_texting': '100'}), + maxsessions=str(kwargs['max_sessions']), + maxattempts=str(kwargs['max_attempts']), + attempt_interval=str(kwargs['attempt_interval']), + smtp_server=kwargs['smtp_server'], + smtp_port=str(kwargs['smtp_server_port']), + smtp_helo=kwargs['smtp_helo'], + smtp_email=kwargs['smtp_email'], + smtp_security=to_numeric_value(str(kwargs['smtp_security']), + {'None': '0', + 'STARTTLS': '1', + 'SSL/TLS': '2'}), + smtp_authentication=to_numeric_value(str(kwargs['smtp_authentication']), + {'False': '0', + 'True': '1'}), + smtp_verify_host=to_numeric_value(str(kwargs['smtp_verify_host']), + {'False': '0', + 'True': '1'}), + smtp_verify_peer=to_numeric_value(str(kwargs['smtp_verify_peer']), + {'False': '0', + 'True': '1'}), + username=kwargs['username'], + passwd=kwargs['password'] + ) + + elif kwargs['transport_type'] == 'script': + if kwargs['script_params'] is None: + _script_params = '' # ZBX-15706 + else: + _script_params = '\n'.join(str(i) for i in kwargs['script_params']) + '\n' + return dict( + description=kwargs['name'], + status=to_numeric_value(kwargs['status'], + {'enabled': '0', + 'disabled': '1'}), + type=to_numeric_value(kwargs['transport_type'], + {'email': '0', + 'script': '1', + 'sms': '2', + 'jabber': '3', + 'ez_texting': '100'}), + maxsessions=str(kwargs['max_sessions']), + maxattempts=str(kwargs['max_attempts']), + attempt_interval=str(kwargs['attempt_interval']), + exec_path=kwargs['script_name'], + exec_params=_script_params + ) + elif kwargs['transport_type'] == 'sms': + return dict( + description=kwargs['name'], + status=to_numeric_value(kwargs['status'], + {'enabled': '0', + 'disabled': '1'}), + type=to_numeric_value(kwargs['transport_type'], + {'email': '0', + 'script': '1', + 'sms': '2', + 'jabber': '3', + 'ez_texting': '100'}), + maxsessions=str(kwargs['max_sessions']), + maxattempts=str(kwargs['max_attempts']), + attempt_interval=str(kwargs['attempt_interval']), + gsm_modem=kwargs['gsm_modem'] + ) + elif kwargs['transport_type'] == 'jabber' and LooseVersion(kwargs['zbx_api_version']) <= LooseVersion('4.2'): + return dict( + description=kwargs['name'], + status=to_numeric_value(kwargs['status'], + {'enabled': '0', + 'disabled': '1'}), + type=to_numeric_value(kwargs['transport_type'], + {'email': '0', + 'script': '1', + 'sms': '2', + 'jabber': '3', + 'ez_texting': '100'}), + maxsessions=str(kwargs['max_sessions']), + maxattempts=str(kwargs['max_attempts']), + attempt_interval=str(kwargs['attempt_interval']), + username=kwargs['username'], + passwd=kwargs['password'] + ) + elif kwargs['transport_type'] == 'ez_texting' and LooseVersion(kwargs['zbx_api_version']) <= LooseVersion('4.2'): + return dict( + description=kwargs['name'], + status=to_numeric_value(kwargs['status'], + {'enabled': '0', + 'disabled': '1'}), + type=to_numeric_value(kwargs['transport_type'], + {'email': '0', + 'script': '1', + 'sms': '2', + 'jabber': '3', + 'ez_texting': '100'}), + maxsessions=str(kwargs['max_sessions']), + maxattempts=str(kwargs['max_attempts']), + attempt_interval=str(kwargs['attempt_interval']), + username=kwargs['username'], + passwd=kwargs['password'], + exec_path=to_numeric_value(kwargs['message_text_limit'], + {'USA': '0', + 'Canada': '1'}), + ) + + return {'unsupported_parameter': kwargs['transport_type'], 'zbx_api_version': kwargs['zbx_api_version']} + + +def check_if_mediatype_exists(module, zbx, name, zbx_api_version): + """Checks if mediatype exists. + + Args: + module: AnsibleModule object + zbx: ZabbixAPI object + name: Zabbix mediatype name + + Returns: + Tuple of (True, `id of the mediatype`) if mediatype exists, (False, None) otherwise + """ + filter_key_name = 'description' + if LooseVersion(zbx_api_version) >= LooseVersion('4.4'): + # description key changed to name key from zabbix 4.4 + filter_key_name = 'name' + + try: + mediatype_list = zbx.mediatype.get({ + 'output': 'extend', + 'filter': {filter_key_name: [name]} + }) + if len(mediatype_list) < 1: + return False, None + else: + return True, mediatype_list[0]['mediatypeid'] + except Exception as e: + module.fail_json(msg="Failed to get ID of the mediatype '{name}': {e}".format(name=name, e=e)) + + +def diff(existing, new): + """Constructs the diff for Ansible's --diff option. + + Args: + existing (dict): Existing mediatype data. + new (dict): New mediatype data. + + Returns: + A dictionary like {'before': existing, 'after': new} + with filtered empty values. + """ + before = {} + after = {} + for key in new: + before[key] = existing[key] + if new[key] is None: + after[key] = '' + else: + after[key] = new[key] + return {'before': before, 'after': after} + + +def get_update_params(module, zbx, mediatype_id, **kwargs): + """Filters only the parameters that are different and need to be updated. + + Args: + module: AnsibleModule object. + zbx: ZabbixAPI object. + mediatype_id (int): ID of the mediatype to be updated. + **kwargs: Parameters for the new mediatype. + + Returns: + A tuple where the first element is a dictionary of parameters + that need to be updated and the second one is a dictionary + returned by diff() function with + existing mediatype data and new params passed to it. + """ + existing_mediatype = zbx.mediatype.get({ + 'output': 'extend', + 'mediatypeids': [mediatype_id] + })[0] + + if existing_mediatype['type'] != kwargs['type']: + return kwargs, diff(existing_mediatype, kwargs) + else: + params_to_update = {} + for key in kwargs: + if (not (kwargs[key] is None and existing_mediatype[key] == '')) and kwargs[key] != existing_mediatype[key]: + params_to_update[key] = kwargs[key] + return params_to_update, diff(existing_mediatype, kwargs) + + +def delete_mediatype(module, zbx, mediatype_id): + try: + return zbx.mediatype.delete([mediatype_id]) + except Exception as e: + module.fail_json(msg="Failed to delete mediatype '{_id}': {e}".format(_id=mediatype_id, e=e)) + + +def update_mediatype(module, zbx, **kwargs): + try: + mediatype_id = zbx.mediatype.update(kwargs) + except Exception as e: + module.fail_json(msg="Failed to update mediatype '{_id}': {e}".format(_id=kwargs['mediatypeid'], e=e)) + + +def create_mediatype(module, zbx, **kwargs): + try: + mediatype_id = zbx.mediatype.create(kwargs) + except Exception as e: + module.fail_json(msg="Failed to create mediatype '{name}': {e}".format(name=kwargs['description'], e=e)) + + +def main(): + argument_spec = dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), timeout=dict(type='int', default=10), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + type=dict(type='str', choices=['email', 'script', 'sms', 'jabber', 'ez_texting'], required=True), + status=dict(type='str', default='enabled', choices=['enabled', 'disabled'], required=False), + max_sessions=dict(type='int', default=1, required=False), + max_attempts=dict(type='int', default=3, required=False), + attempt_interval=dict(type='int', default=10, required=False), + # Script + script_name=dict(type='str', required=False), + script_params=dict(type='list', required=False), + # SMS + gsm_modem=dict(type='str', required=False), + # Jabber + username=dict(type='str', required=False), + password=dict(type='str', required=False, no_log=True), + # Email + smtp_server=dict(type='str', default='localhost', required=False), + smtp_server_port=dict(type='int', default=25, required=False), + smtp_helo=dict(type='str', default='localhost', required=False), + smtp_email=dict(type='str', required=False), + smtp_security=dict(type='str', required=False, choices=['None', 'STARTTLS', 'SSL/TLS']), + smtp_authentication=dict(type='bool', default=False, required=False), + smtp_verify_host=dict(type='bool', default=False, required=False), + smtp_verify_peer=dict(type='bool', default=False, required=False), + # EZ Text + message_text_limit=dict(type='str', required=False, choices=['USA', 'Canada']) + ) + + required_params = [ + ['type', 'email', ['smtp_email']], + ['type', 'script', ['script_name']], + ['type', 'sms', ['gsm_modem']], + ['type', 'jabber', ['username', 'password']], + ['type', 'ez_texting', ['username', 'password', 'message_text_limit']], + ['smtp_authentication', True, ['username', 'password']] + ] + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params['state'] == 'present': + validate_params(module, required_params) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + state = module.params['state'] + timeout = module.params['timeout'] + name = module.params['name'] + transport_type = module.params['type'] + status = module.params['status'] + max_sessions = module.params['max_sessions'] + max_attempts = module.params['max_attempts'] + attempt_interval = module.params['attempt_interval'] + # Script + script_name = module.params['script_name'] + script_params = module.params['script_params'] + # SMS + gsm_modem = module.params['gsm_modem'] + # Jabber + username = module.params['username'] + password = module.params['password'] + # Email + smtp_server = module.params['smtp_server'] + smtp_server_port = module.params['smtp_server_port'] + smtp_helo = module.params['smtp_helo'] + smtp_email = module.params['smtp_email'] + smtp_security = module.params['smtp_security'] + smtp_authentication = module.params['smtp_authentication'] + smtp_verify_host = module.params['smtp_verify_host'] + smtp_verify_peer = module.params['smtp_verify_peer'] + # EZ Text + message_text_limit = module.params['message_text_limit'] + + zbx = None + + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + zbx_api_version = zbx.api_version()[:3] + mediatype_exists, mediatype_id = check_if_mediatype_exists(module, zbx, name, zbx_api_version) + + parameters = construct_parameters( + name=name, + transport_type=transport_type, + status=status, + max_sessions=max_sessions, + max_attempts=max_attempts, + attempt_interval=attempt_interval, + script_name=script_name, + script_params=script_params, + gsm_modem=gsm_modem, + username=username, + password=password, + smtp_server=smtp_server, + smtp_server_port=smtp_server_port, + smtp_helo=smtp_helo, + smtp_email=smtp_email, + smtp_security=smtp_security, + smtp_authentication=smtp_authentication, + smtp_verify_host=smtp_verify_host, + smtp_verify_peer=smtp_verify_peer, + message_text_limit=message_text_limit, + zbx_api_version=zbx_api_version + ) + + if 'unsupported_parameter' in parameters: + module.fail_json(msg="%s is unsupported for Zabbix version %s" % (parameters['unsupported_parameter'], parameters['zbx_api_version'])) + + if LooseVersion(zbx_api_version) >= LooseVersion('4.4'): + # description key changed to name key from zabbix 4.4 + parameters['name'] = parameters.pop('description') + + if mediatype_exists: + if state == 'absent': + if module.check_mode: + module.exit_json( + changed=True, + msg="Mediatype would have been deleted. Name: {name}, ID: {_id}".format( + name=name, + _id=mediatype_id + ) + ) + mediatype_id = delete_mediatype(module, zbx, mediatype_id) + module.exit_json( + changed=True, + msg="Mediatype deleted. Name: {name}, ID: {_id}".format( + name=name, + _id=mediatype_id + ) + ) + else: + params_to_update, diff = get_update_params(module, zbx, mediatype_id, **parameters) + if params_to_update == {}: + module.exit_json( + changed=False, + msg="Mediatype is up to date: {name}".format(name=name) + ) + else: + if module.check_mode: + module.exit_json( + changed=True, + diff=diff, + msg="Mediatype would have been updated. Name: {name}, ID: {_id}".format( + name=name, + _id=mediatype_id + ) + ) + mediatype_id = update_mediatype( + module, zbx, + mediatypeid=mediatype_id, + **params_to_update + ) + module.exit_json( + changed=True, + diff=diff, + msg="Mediatype updated. Name: {name}, ID: {_id}".format( + name=name, + _id=mediatype_id + ) + ) + else: + if state == "absent": + module.exit_json(changed=False) + else: + if module.check_mode: + module.exit_json( + changed=True, + msg="Mediatype would have been created. Name: {name}, ID: {_id}".format( + name=name, + _id=mediatype_id + ) + ) + mediatype_id = create_mediatype(module, zbx, **parameters) + module.exit_json( + changed=True, + msg="Mediatype created: {name}, ID: {_id}".format( + name=name, + _id=mediatype_id + ) + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_proxy.py b/plugins/modules/monitoring/zabbix/zabbix_proxy.py new file mode 100644 index 0000000000..a4f2e70083 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_proxy.py @@ -0,0 +1,471 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2017, Alen Komic +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_proxy +short_description: Create/delete/get/update Zabbix proxies +description: + - This module allows you to create, modify, get and delete Zabbix proxy entries. +author: + - "Alen Komic (@akomic)" +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + proxy_name: + description: + - Name of the proxy in Zabbix. + required: true + type: str + proxy_address: + description: + - Comma-delimited list of IP/CIDR addresses or DNS names to accept active proxy requests from. + - Requires I(status=active). + - Works only with >= Zabbix 4.0. ( remove option for <= 4.0 ) + required: false + type: str + description: + description: + - Description of the proxy. + required: false + type: str + status: + description: + - Type of proxy. (4 - active, 5 - passive) + required: false + choices: ['active', 'passive'] + default: "active" + type: str + tls_connect: + description: + - Connections to proxy. + required: false + choices: ['no_encryption','PSK','certificate'] + default: 'no_encryption' + type: str + tls_accept: + description: + - Connections from proxy. + required: false + choices: ['no_encryption','PSK','certificate'] + default: 'no_encryption' + type: str + ca_cert: + description: + - Certificate issuer. + required: false + aliases: [ tls_issuer ] + type: str + tls_subject: + description: + - Certificate subject. + required: false + type: str + tls_psk_identity: + description: + - PSK identity. Required if either I(tls_connect) or I(tls_accept) has PSK enabled. + required: false + type: str + tls_psk: + description: + - The preshared key, at least 32 hex digits. Required if either I(tls_connect) or I(tls_accept) has PSK enabled. + required: false + type: str + state: + description: + - State of the proxy. + - On C(present), it will create if proxy does not exist or update the proxy if the associated data is different. + - On C(absent) will remove a proxy if it exists. + required: false + choices: ['present', 'absent'] + default: "present" + type: str + interface: + description: + - Dictionary with params for the interface when proxy is in passive mode. + - For more information, review proxy interface documentation at + - U(https://www.zabbix.com/documentation/4.0/manual/api/reference/proxy/object#proxy_interface). + required: false + suboptions: + useip: + type: int + description: + - Connect to proxy interface with IP address instead of DNS name. + - 0 (don't use ip), 1 (use ip). + default: 0 + choices: [0, 1] + ip: + type: str + description: + - IP address used by proxy interface. + - Required if I(useip=1). + default: '' + dns: + type: str + description: + - DNS name of the proxy interface. + - Required if I(useip=0). + default: '' + port: + type: str + description: + - Port used by proxy interface. + default: '10051' + type: + type: int + description: + - Interface type to add. + - This suboption is currently ignored for Zabbix proxy. + - This suboption is deprecated since Ansible 2.10 and will eventually be removed in 2.14. + required: false + default: 0 + main: + type: int + description: + - Whether the interface is used as default. + - This suboption is currently ignored for Zabbix proxy. + - This suboption is deprecated since Ansible 2.10 and will eventually be removed in 2.14. + required: false + default: 0 + default: {} + type: dict + +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = r''' +- name: Create or update a proxy with proxy type active + local_action: + module: zabbix_proxy + server_url: http://monitor.example.com + login_user: username + login_password: password + proxy_name: ExampleProxy + description: ExampleProxy + status: active + state: present + proxy_address: ExampleProxy.local + +- name: Create a new passive proxy using only it's IP + local_action: + module: zabbix_proxy + server_url: http://monitor.example.com + login_user: username + login_password: password + proxy_name: ExampleProxy + description: ExampleProxy + status: passive + state: present + interface: + useip: 1 + ip: 10.1.1.2 + port: 10051 + +- name: Create a new passive proxy using only it's DNS + local_action: + module: zabbix_proxy + server_url: http://monitor.example.com + login_user: username + login_password: password + proxy_name: ExampleProxy + description: ExampleProxy + status: passive + state: present + interface: + dns: proxy.example.com + port: 10051 +''' + +RETURN = r''' # ''' + + +import traceback +import atexit + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +try: + from zabbix_api import ZabbixAPI + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + + +class Proxy(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + self.existing_data = None + + def proxy_exists(self, proxy_name): + result = self._zapi.proxy.get({ + 'output': 'extend', 'selectInterface': 'extend', + 'filter': {'host': proxy_name}}) + + if len(result) > 0 and 'proxyid' in result[0]: + self.existing_data = result[0] + return result[0]['proxyid'] + else: + return result + + def add_proxy(self, data): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + + parameters = {} + for item in data: + if data[item]: + parameters[item] = data[item] + + if 'proxy_address' in data and data['status'] != '5': + parameters.pop('proxy_address', False) + + if 'interface' in data and data['status'] != '6': + parameters.pop('interface', False) + + proxy_ids_list = self._zapi.proxy.create(parameters) + self._module.exit_json(changed=True, + result="Successfully added proxy %s (%s)" % + (data['host'], data['status'])) + if len(proxy_ids_list) >= 1: + return proxy_ids_list['proxyids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create proxy %s: %s" % + (data['host'], e)) + + def delete_proxy(self, proxy_id, proxy_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.proxy.delete([proxy_id]) + self._module.exit_json(changed=True, + result="Successfully deleted" + + " proxy %s" % proxy_name) + except Exception as e: + self._module.fail_json(msg="Failed to delete proxy %s: %s" % + (proxy_name, str(e))) + + def compile_interface_params(self, new_interface): + old_interface = {} + if 'interface' in self.existing_data and \ + len(self.existing_data['interface']) > 0: + old_interface = self.existing_data['interface'] + + for item in ['type', 'main']: + new_interface.pop(item, False) + + final_interface = old_interface.copy() + final_interface.update(new_interface) + final_interface = dict((k, str(v)) for k, v in final_interface.items()) + + if final_interface != old_interface: + return final_interface + else: + return {} + + def update_proxy(self, proxy_id, data): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + parameters = {'proxyid': proxy_id} + + for item in data: + if data[item] and item in self.existing_data and \ + self.existing_data[item] != data[item]: + parameters[item] = data[item] + + if 'interface' in parameters: + parameters.pop('interface') + + if 'proxy_address' in data and data['status'] != '5': + parameters.pop('proxy_address', False) + + if 'interface' in data and data['status'] != '6': + parameters.pop('interface', False) + + if 'interface' in data and data['status'] == '6': + new_interface = self.compile_interface_params(data['interface']) + if len(new_interface) > 0: + parameters['interface'] = new_interface + + if len(parameters) > 1: + self._zapi.proxy.update(parameters) + self._module.exit_json( + changed=True, + result="Successfully updated proxy %s (%s)" % + (data['host'], proxy_id) + ) + else: + self._module.exit_json(changed=False) + except Exception as e: + self._module.fail_json(msg="Failed to update proxy %s: %s" % + (data['host'], e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + proxy_name=dict(type='str', required=True), + proxy_address=dict(type='str', required=False), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, + default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + status=dict(type='str', default="active", choices=['active', 'passive']), + state=dict(type='str', default="present", choices=['present', 'absent']), + description=dict(type='str', required=False), + tls_connect=dict(type='str', default='no_encryption', + choices=['no_encryption', 'PSK', 'certificate']), + tls_accept=dict(type='str', default='no_encryption', + choices=['no_encryption', 'PSK', 'certificate']), + ca_cert=dict(type='str', required=False, default=None, aliases=['tls_issuer']), + tls_subject=dict(type='str', required=False, default=None), + tls_psk_identity=dict(type='str', required=False, default=None), + tls_psk=dict(type='str', required=False, default=None), + timeout=dict(type='int', default=10), + interface=dict( + type='dict', + required=False, + default={}, + options=dict( + useip=dict(type='int', choices=[0, 1], default=0), + ip=dict(type='str', default=''), + dns=dict(type='str', default=''), + port=dict(type='str', default='10051'), + type=dict(type='int', default=0, removed_in_version='2.14'), + main=dict(type='int', default=0, removed_in_version='2.14') + ), + ) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + proxy_name = module.params['proxy_name'] + proxy_address = module.params['proxy_address'] + description = module.params['description'] + status = module.params['status'] + tls_connect = module.params['tls_connect'] + tls_accept = module.params['tls_accept'] + tls_issuer = module.params['ca_cert'] + tls_subject = module.params['tls_subject'] + tls_psk_identity = module.params['tls_psk_identity'] + tls_psk = module.params['tls_psk'] + state = module.params['state'] + timeout = module.params['timeout'] + interface = module.params['interface'] + + # convert enabled to 0; disabled to 1 + status = 6 if status == "passive" else 5 + + if tls_connect == 'certificate': + tls_connect = 4 + elif tls_connect == 'PSK': + tls_connect = 2 + else: + tls_connect = 1 + + if tls_accept == 'certificate': + tls_accept = 4 + elif tls_accept == 'PSK': + tls_accept = 2 + else: + tls_accept = 1 + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, + user=http_login_user, + passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + proxy = Proxy(module, zbx) + + # check if proxy already exists + proxy_id = proxy.proxy_exists(proxy_name) + + if proxy_id: + if state == "absent": + # remove proxy + proxy.delete_proxy(proxy_id, proxy_name) + else: + proxy.update_proxy(proxy_id, { + 'host': proxy_name, + 'description': description, + 'status': str(status), + 'tls_connect': str(tls_connect), + 'tls_accept': str(tls_accept), + 'tls_issuer': tls_issuer, + 'tls_subject': tls_subject, + 'tls_psk_identity': tls_psk_identity, + 'tls_psk': tls_psk, + 'interface': interface, + 'proxy_address': proxy_address + }) + else: + if state == "absent": + # the proxy is already deleted. + module.exit_json(changed=False) + + proxy_id = proxy.add_proxy(data={ + 'host': proxy_name, + 'description': description, + 'status': str(status), + 'tls_connect': str(tls_connect), + 'tls_accept': str(tls_accept), + 'tls_issuer': tls_issuer, + 'tls_subject': tls_subject, + 'tls_psk_identity': tls_psk_identity, + 'tls_psk': tls_psk, + 'interface': interface, + 'proxy_address': proxy_address + }) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_screen.py b/plugins/modules/monitoring/zabbix/zabbix_screen.py new file mode 100644 index 0000000000..c82a8a0d9c --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_screen.py @@ -0,0 +1,471 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_screen +short_description: Create/update/delete Zabbix screens +description: + - This module allows you to create, modify and delete Zabbix screens and associated graph data. +author: + - "Cove (@cove)" + - "Tony Minfei Ding (!UNKNOWN)" + - "Harrison Gu (@harrisongu)" +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + screens: + description: + - List of screens to be created/updated/deleted (see example). + type: list + elements: dict + required: true + suboptions: + screen_name: + description: + - Screen name will be used. + - If a screen has already been added, the screen name won't be updated. + type: str + required: true + host_group: + description: + - Host group will be used for searching hosts. + - Required if I(state=present). + type: str + state: + description: + - I(present) - Create a screen if it doesn't exist. If the screen already exists, the screen will be updated as needed. + - I(absent) - If a screen exists, the screen will be deleted. + type: str + default: present + choices: + - absent + - present + graph_names: + description: + - Graph names will be added to a screen. Case insensitive. + - Required if I(state=present). + type: list + elements: str + graph_width: + description: + - Graph width will be set in graph settings. + type: int + graph_height: + description: + - Graph height will be set in graph settings. + type: int + graphs_in_row: + description: + - Limit columns of a screen and make multiple rows. + type: int + default: 3 + sort: + description: + - Sort hosts alphabetically. + - If there are numbers in hostnames, leading zero should be used. + type: bool + default: no + +extends_documentation_fragment: +- community.general.zabbix + + +notes: + - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. +''' + +EXAMPLES = r''' +# Create/update a screen. +- name: Create a new screen or update an existing screen's items 5 in a row + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + graphs_in_row: 5 + +# Create/update multi-screen +- name: Create two of new screens or update the existing screens' items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + - screen_name: ExampleScreen2 + host_group: Example group2 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurrent updates +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + screens: + - screen_name: ExampleScreen + host_group: Example group + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + when: inventory_hostname==groups['group_name'][0] +''' + + +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + from zabbix_api import ZabbixAPIException + from zabbix_api import Already_Exists + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class Screen(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # get group id by group name + def get_host_group_id(self, group_name): + if group_name == "": + self._module.fail_json(msg="group_name is required") + hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}}) + if len(hostGroup_list) < 1: + self._module.fail_json(msg="Host group not found: %s" % group_name) + else: + hostGroup_id = hostGroup_list[0]['groupid'] + return hostGroup_id + + # get monitored host_id by host_group_id + def get_host_ids_by_group_id(self, group_id, sort): + host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1}) + if len(host_list) < 1: + self._module.fail_json(msg="No host in the group.") + else: + if sort: + host_list = sorted(host_list, key=lambda name: name['name']) + host_ids = [] + for i in host_list: + host_id = i['hostid'] + host_ids.append(host_id) + return host_ids + + # get screen + def get_screen_id(self, screen_name): + if screen_name == "": + self._module.fail_json(msg="screen_name is required") + try: + screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}}) + if len(screen_id_list) >= 1: + screen_id = screen_id_list[0]['screenid'] + return screen_id + return None + except Exception as e: + self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e)) + + # create screen + def create_screen(self, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size}) + return screen['screenids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e)) + + # update screen + def update_screen(self, screen_id, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size}) + except Exception as e: + self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e)) + + # delete screen + def delete_screen(self, screen_id, screen_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.delete([screen_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e)) + + # get graph ids + def get_graph_ids(self, hosts, graph_name_list): + graph_id_lists = [] + vsize = 1 + for host in hosts: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + size = len(graph_id_list) + if size > 0: + graph_id_lists.extend(graph_id_list) + if vsize < size: + vsize = size + return graph_id_lists, vsize + + # getGraphs + def get_graphs_by_host_id(self, graph_name_list, host_id): + graph_ids = [] + for graph_name in graph_name_list: + graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id}) + graph_id_list = [] + if len(graphs_list) > 0: + for graph in graphs_list: + graph_id = graph['graphid'] + graph_id_list.append(graph_id) + if len(graph_id_list) > 0: + graph_ids.extend(graph_id_list) + return graph_ids + + # get screen items + def get_screen_items(self, screen_id): + screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id}) + return screen_item_list + + # delete screen items + def delete_screen_items(self, screen_id, screen_item_id_list): + try: + if len(screen_item_id_list) == 0: + return True + screen_item_list = self.get_screen_items(screen_id) + if len(screen_item_list) > 0: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screenitem.delete(screen_item_id_list) + return True + return False + except ZabbixAPIException: + pass + + # get screen's hsize and vsize + def get_hsize_vsize(self, hosts, v_size, graphs_in_row): + h_size = len(hosts) + # when there is only one host, put all graphs in a row + if h_size == 1: + if v_size <= graphs_in_row: + h_size = v_size + else: + h_size = graphs_in_row + v_size = (v_size - 1) // h_size + 1 + # when len(hosts) is more then graphs_in_row + elif len(hosts) > graphs_in_row: + h_size = graphs_in_row + v_size = (len(hosts) // graphs_in_row + 1) * v_size + + return h_size, v_size + + # create screen_items + def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size, graphs_in_row): + if len(hosts) < 4: + if width is None or width < 0: + width = 500 + else: + if width is None or width < 0: + width = 200 + if height is None or height < 0: + height = 100 + + try: + # when there're only one host, only one row is not good. + if len(hosts) == 1: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0]) + for i, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i % h_size, 'y': i // h_size, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + else: + for i, host in enumerate(hosts): + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + for j, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i % graphs_in_row, 'y': len(graph_id_list) * (i // graphs_in_row) + j, + 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + except Already_Exists: + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + timeout=dict(type='int', default=10), + screens=dict( + type='list', + elements='dict', + required=True, + options=dict( + screen_name=dict(type='str', required=True), + host_group=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + graph_names=dict(type='list', elements='str'), + graph_width=dict(type='int', default=None), + graph_height=dict(type='int', default=None), + graphs_in_row=dict(type='int', default=3), + sort=dict(default=False, type='bool'), + ), + required_if=[ + ['state', 'present', ['host_group']] + ] + ) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + timeout = module.params['timeout'] + screens = module.params['screens'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + screen = Screen(module, zbx) + created_screens = [] + changed_screens = [] + deleted_screens = [] + + for zabbix_screen in screens: + screen_name = zabbix_screen['screen_name'] + screen_id = screen.get_screen_id(screen_name) + state = zabbix_screen['state'] + sort = zabbix_screen['sort'] + + if state == "absent": + if screen_id: + screen_item_list = screen.get_screen_items(screen_id) + screen_item_id_list = [] + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + screen_item_id_list.append(screen_item_id) + screen.delete_screen_items(screen_id, screen_item_id_list) + screen.delete_screen(screen_id, screen_name) + + deleted_screens.append(screen_name) + else: + host_group = zabbix_screen['host_group'] + graph_names = zabbix_screen['graph_names'] + graphs_in_row = zabbix_screen['graphs_in_row'] + graph_width = zabbix_screen['graph_width'] + graph_height = zabbix_screen['graph_height'] + host_group_id = screen.get_host_group_id(host_group) + hosts = screen.get_host_ids_by_group_id(host_group_id, sort) + + screen_item_id_list = [] + resource_id_list = [] + + graph_ids, v_size = screen.get_graph_ids(hosts, graph_names) + h_size, v_size = screen.get_hsize_vsize(hosts, v_size, graphs_in_row) + + if not screen_id: + # create screen + screen_id = screen.create_screen(screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size, graphs_in_row) + created_screens.append(screen_name) + else: + screen_item_list = screen.get_screen_items(screen_id) + + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + resource_id = screen_item['resourceid'] + screen_item_id_list.append(screen_item_id) + resource_id_list.append(resource_id) + + # when the screen items changed, then update + if graph_ids != resource_id_list: + deleted = screen.delete_screen_items(screen_id, screen_item_id_list) + if deleted: + screen.update_screen(screen_id, screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size, graphs_in_row) + changed_screens.append(screen_name) + + if created_screens and changed_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), + ",".join(changed_screens))) + elif created_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens)) + elif changed_screens: + module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens)) + elif deleted_screens: + module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens)) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_service.py b/plugins/modules/monitoring/zabbix/zabbix_service.py new file mode 100644 index 0000000000..f28dd958fd --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_service.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2019, OVH SAS +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zabbix_service +short_description: Create/update/delete Zabbix service +description: + - Create/update/delete Zabbix service. +author: + - "Emmanuel Riviere (@emriver)" +requirements: + - "python >= 2.7" + - "zabbix-api >= 0.5.4" +options: + name: + description: + - Name of Zabbix service + required: true + type: str + parent: + description: + - Name of Zabbix service parent + required: false + type: str + sla: + description: + - Sla value (i.e 99.99), goodsla in Zabbix API + required: false + type: float + calculate_sla: + description: + - If yes, calculate the SLA value for this service, showsla in Zabbix API + required: false + type: bool + algorithm: + description: + - Algorithm used to calculate the sla + - C(no), sla is not calculated + - C(one_child), problem if at least one child has a problem + - C(all_children), problem if all children have problems + required: false + type: str + choices: ["no", "one_child", "all_children"] + default: one_child + trigger_name: + description: + - Name of trigger linked to the service + required: false + type: str + trigger_host: + description: + - Name of host linked to the service + required: false + type: str + state: + description: + - 'State: present - create/update service; absent - delete service' + required: false + choices: [present, absent] + default: "present" + type: str + +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = ''' +--- +# Creates a new Zabbix service +- name: Manage services + local_action: + module: zabbix_service + server_url: "https://192.168.1.1" + login_user: username + login_password: password + name: apache2 service + sla: 99.99 + calculate_sla: yes + algorithm: one_child + trigger_name: apache2 service status + trigger_host: webserver01 + state: present +''' + +RETURN = ''' +--- +''' + +import atexit +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +try: + from zabbix_api import ZabbixAPI, ZabbixAPIException + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + + +class Service(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + def get_service_ids(self, service_name): + service_ids = [] + services = self._zapi.service.get({'filter': {'name': service_name}}) + for service in services: + service_ids.append(service['serviceid']) + return service_ids + + def delete_service(self, service_ids): + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.service.delete(service_ids) + + def dump_services(self, service_ids): + services = self._zapi.service.get({'output': 'extend', 'filter': {'serviceid': service_ids}, 'selectParent': '1'}) + return services + + def generate_service_config(self, name, parent, sla, calculate_sla, trigger_name, trigger_host, algorithm): + algorithms = {'no': '0', 'one_child': '1', 'all_children': '2'} + algorithm = algorithms[algorithm] + + if calculate_sla: + calculate_sla = 1 + else: + calculate_sla = 0 + + # Zabbix api return when no trigger + trigger_id = 0 + if trigger_host and trigger_name: + # Retrieving the host to get the trigger + hosts = self._zapi.host.get({'filter': {'host': trigger_host}}) + if not hosts: + self._module.fail_json(msg="Target host %s not found" % trigger_host) + host_id = hosts[0]['hostid'] + + triggers = self._zapi.trigger.get({'filter': {'description': trigger_name}, 'hostids': [host_id]}) + if not triggers: + self._module.fail_json(msg="Trigger %s not found on host %s" % (trigger_name, trigger_host)) + trigger_id = triggers[0]['triggerid'] + + request = { + 'name': name, + 'algorithm': algorithm, + 'showsla': calculate_sla, + 'sortorder': 1, + 'goodsla': format(sla, '.4f'), # Sla has 4 decimals + 'triggerid': trigger_id + } + + if parent: + parent_ids = self.get_service_ids(parent) + if not parent_ids: + self._module.fail_json(msg="Parent %s not found" % parent) + request['parentid'] = parent_ids[0] + return request + + def create_service(self, name, parent, sla, calculate_sla, trigger_name, trigger_host, algorithm): + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.service.create(self.generate_service_config(name, parent, sla, calculate_sla, trigger_name, trigger_host, algorithm)) + + def update_service(self, service_id, name, parent, sla, calculate_sla, trigger_name, trigger_host, algorithm): + generated_config = self.generate_service_config(name, parent, sla, calculate_sla, trigger_name, trigger_host, algorithm) + live_config = self.dump_services(service_id)[0] + + item_to_check = ['name', 'showsla', 'algorithm', 'triggerid', 'sortorder', 'goodsla'] + change = False + for item in item_to_check: + if str(generated_config[item]) != str(live_config[item]): + change = True + + # In Zabbix 4.0 + # No parent returns : "parent": [] + # A parent returns : "parent": { "serviceid": 12 } + if 'parentid' in generated_config: + if 'serviceid' in live_config['parent']: + if generated_config['parentid'] != live_config['parent']['serviceid']: + change = True + else: + change = True + elif 'serviceid' in live_config['parent']: + change = True + + if not change: + self._module.exit_json(changed=False, msg="Service %s up to date" % name) + + if self._module.check_mode: + self._module.exit_json(changed=True) + generated_config['serviceid'] = service_id + self._zapi.service.update(generated_config) + self._module.exit_json(changed=True, msg="Service %s updated" % name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + name=dict(type='str', required=True), + parent=dict(type='str', required=False), + sla=dict(type='float', required=False), + calculate_sla=dict(type='bool', required=False, default=False), + algorithm=dict(default='one_child', required=False, choices=['no', 'one_child', 'all_children']), + trigger_name=dict(type='str', required=False), + trigger_host=dict(type='str', required=False), + state=dict(default="present", choices=['present', 'absent']), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + name = module.params['name'] + parent = module.params['parent'] + sla = module.params['sla'] + calculate_sla = module.params['calculate_sla'] + algorithm = module.params['algorithm'] + trigger_name = module.params['trigger_name'] + trigger_host = module.params['trigger_host'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + + # Login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except ZabbixAPIException as error: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % error) + + # Load service module + service = Service(module, zbx) + service_ids = service.get_service_ids(name) + if service_ids: + service_json = service.dump_services(service_ids) + + # Delete service + if state == "absent": + if not service_ids: + module.exit_json(changed=False, msg="Service not found, no change: %s" % name) + service.delete_service(service_ids) + module.exit_json(changed=True, result="Successfully deleted service(s) %s" % name) + + elif state == "present": + if (trigger_name and not trigger_host) or (trigger_host and not trigger_name): + module.fail_json(msg="Specify either both trigger_host and trigger_name or none to create or update a service") + # Does not exists going to create it + if not service_ids: + service.create_service(name, parent, sla, calculate_sla, trigger_name, trigger_host, algorithm) + module.exit_json(changed=True, msg="Service %s created" % name) + # Else we update it if needed + else: + service.update_service(service_ids[0], name, parent, sla, calculate_sla, trigger_name, trigger_host, algorithm) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_template.py b/plugins/modules/monitoring/zabbix/zabbix_template.py new file mode 100644 index 0000000000..86fb659f88 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_template.py @@ -0,0 +1,795 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, sookido +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_template +short_description: Create/update/delete/dump Zabbix template +description: + - This module allows you to create, modify, delete and dump Zabbix templates. + - Multiple templates can be created or modified at once if passing JSON or XML to module. +author: + - "sookido (@sookido)" + - "Logan Vig (@logan2211)" + - "Dusan Matejka (@D3DeFi)" +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + template_name: + description: + - Name of Zabbix template. + - Required when I(template_json) or I(template_xml) are not used. + - Mutually exclusive with I(template_json) and I(template_xml). + required: false + type: str + template_json: + description: + - JSON dump of templates to import. + - Multiple templates can be imported this way. + - Mutually exclusive with I(template_name) and I(template_xml). + required: false + type: json + template_xml: + description: + - XML dump of templates to import. + - Multiple templates can be imported this way. + - You are advised to pass XML structure matching the structure used by your version of Zabbix server. + - Custom XML structure can be imported as long as it is valid, but may not yield consistent idempotent + results on subsequent runs. + - Mutually exclusive with I(template_name) and I(template_json). + required: false + type: str + template_groups: + description: + - List of host groups to add template to when template is created. + - Replaces the current host groups the template belongs to if the template is already present. + - Required when creating a new template with C(state=present) and I(template_name) is used. + Not required when updating an existing template. + required: false + type: list + elements: str + link_templates: + description: + - List of template names to be linked to the template. + - Templates that are not specified and are linked to the existing template will be only unlinked and not + cleared from the template. + required: false + type: list + elements: str + clear_templates: + description: + - List of template names to be unlinked and cleared from the template. + - This option is ignored if template is being created for the first time. + required: false + type: list + elements: str + macros: + description: + - List of user macros to create for the template. + - Macros that are not specified and are present on the existing template will be replaced. + - See examples on how to pass macros. + required: false + type: list + elements: dict + suboptions: + name: + description: + - Name of the macro. + - Must be specified in {$NAME} format. + type: str + value: + description: + - Value of the macro. + type: str + dump_format: + description: + - Format to use when dumping template with C(state=dump). + - This option is deprecated and will eventually be removed in 2.14. + required: false + choices: [json, xml] + default: "json" + type: str + omit_date: + description: + - Removes the date field for the exported/dumped template + - Requires C(state=dump) + required: false + type: bool + default: false + state: + description: + - Required state of the template. + - On C(state=present) template will be created/imported or updated depending if it is already present. + - On C(state=dump) template content will get dumped into required format specified in I(dump_format). + - On C(state=absent) template will be deleted. + - The C(state=dump) is deprecated and will eventually be removed in 2.14. The M(zabbix_template_info) module should be used instead. + required: false + choices: [present, absent, dump] + default: "present" + type: str + +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = r''' +--- +- name: Create a new Zabbix template linked to groups, macros and templates + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_name: ExampleHost + template_groups: + - Role + - Role2 + link_templates: + - Example template1 + - Example template2 + macros: + - macro: '{$EXAMPLE_MACRO1}' + value: 30000 + - macro: '{$EXAMPLE_MACRO2}' + value: 3 + - macro: '{$EXAMPLE_MACRO3}' + value: 'Example' + state: present + +- name: Unlink and clear templates from the existing Zabbix template + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_name: ExampleHost + clear_templates: + - Example template3 + - Example template4 + state: present + +- name: Import Zabbix templates from JSON + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_json: "{{ lookup('file', 'zabbix_apache2.json') }}" + state: present + +- name: Import Zabbix templates from XML + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_xml: "{{ lookup('file', 'zabbix_apache2.json') }}" + state: present + +- name: Import Zabbix template from Ansible dict variable + zabbix_template: + login_user: username + login_password: password + server_url: http://127.0.0.1 + template_json: + zabbix_export: + version: '3.2' + templates: + - name: Template for Testing + description: 'Testing template import' + template: Test Template + groups: + - name: Templates + applications: + - name: Test Application + state: present + +- name: Configure macros on the existing Zabbix template + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_name: Template + macros: + - macro: '{$TEST_MACRO}' + value: 'Example' + state: present + +- name: Delete Zabbix template + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_name: Template + state: absent + +- name: Dump Zabbix template as JSON + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_name: Template + omit_date: yes + state: dump + register: template_dump + +- name: Dump Zabbix template as XML + local_action: + module: zabbix_template + server_url: http://127.0.0.1 + login_user: username + login_password: password + template_name: Template + dump_format: xml + omit_date: false + state: dump + register: template_dump +''' + +RETURN = r''' +--- +template_json: + description: The JSON dump of the template + returned: when state is dump and omit_date is no + type: str + sample: { + "zabbix_export":{ + "date":"2017-11-29T16:37:24Z", + "templates":[{ + "templates":[], + "description":"", + "httptests":[], + "screens":[], + "applications":[], + "discovery_rules":[], + "groups":[{"name":"Templates"}], + "name":"Test Template", + "items":[], + "macros":[], + "template":"test" + }], + "version":"3.2", + "groups":[{ + "name":"Templates" + }] + } + } + +template_xml: + description: dump of the template in XML representation + returned: when state is dump, dump_format is xml and omit_date is yes + type: str + sample: |- + + + 4.2 + + + Templates + + + + + + +''' + + +import atexit +import json +import traceback +import xml.etree.ElementTree as ET + +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +try: + from zabbix_api import ZabbixAPI, ZabbixAPIException + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + + +class Template(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # check if host group exists + def check_host_group_exist(self, group_names): + for group_name in group_names: + result = self._zapi.hostgroup.get({'filter': {'name': group_name}}) + if not result: + self._module.fail_json(msg="Hostgroup not found: %s" % + group_name) + return True + + # get group ids by group names + def get_group_ids_by_group_names(self, group_names): + group_ids = [] + if group_names is None or len(group_names) == 0: + return group_ids + if self.check_host_group_exist(group_names): + group_list = self._zapi.hostgroup.get( + {'output': 'extend', + 'filter': {'name': group_names}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append({'groupid': group_id}) + return group_ids + + def get_template_ids(self, template_list): + template_ids = [] + if template_list is None or len(template_list) == 0: + return template_ids + for template in template_list: + template_list = self._zapi.template.get( + {'output': 'extend', + 'filter': {'host': template}}) + if len(template_list) < 1: + continue + else: + template_id = template_list[0]['templateid'] + template_ids.append(template_id) + return template_ids + + def add_template(self, template_name, group_ids, link_template_ids, macros): + if self._module.check_mode: + self._module.exit_json(changed=True) + + self._zapi.template.create({'host': template_name, 'groups': group_ids, 'templates': link_template_ids, + 'macros': macros}) + + def check_template_changed(self, template_ids, template_groups, link_templates, clear_templates, + template_macros, template_content, template_type): + """Compares template parameters to already existing values if any are found. + + template_json - JSON structures are compared as deep sorted dictionaries, + template_xml - XML structures are compared as strings, but filtered and formatted first, + If none above is used, all the other arguments are compared to their existing counterparts + retrieved from Zabbix API.""" + changed = False + # Compare filtered and formatted XMLs strings for any changes. It is expected that provided + # XML has same structure as Zabbix uses (e.g. it was optimally exported via Zabbix GUI or API) + if template_content is not None and template_type == 'xml': + existing_template = self.dump_template(template_ids, template_type='xml') + + if self.filter_xml_template(template_content) != self.filter_xml_template(existing_template): + changed = True + + return changed + + existing_template = self.dump_template(template_ids, template_type='json') + # Compare JSON objects as deep sorted python dictionaries + if template_content is not None and template_type == 'json': + parsed_template_json = self.load_json_template(template_content) + if self.diff_template(parsed_template_json, existing_template): + changed = True + + return changed + + # If neither template_json or template_xml were used, user provided all parameters via module options + if template_groups is not None: + existing_groups = [g['name'] for g in existing_template['zabbix_export']['groups']] + + if set(template_groups) != set(existing_groups): + changed = True + + if 'templates' not in existing_template['zabbix_export']['templates'][0]: + existing_template['zabbix_export']['templates'][0]['templates'] = [] + + # Check if any new templates would be linked or any existing would be unlinked + exist_child_templates = [t['name'] for t in existing_template['zabbix_export']['templates'][0]['templates']] + if link_templates is not None: + if set(link_templates) != set(exist_child_templates): + changed = True + else: + if set([]) != set(exist_child_templates): + changed = True + + # Mark that there will be changes when at least one existing template will be unlinked + if clear_templates is not None: + for t in clear_templates: + if t in exist_child_templates: + changed = True + break + + if 'macros' not in existing_template['zabbix_export']['templates'][0]: + existing_template['zabbix_export']['templates'][0]['macros'] = [] + + if template_macros is not None: + existing_macros = existing_template['zabbix_export']['templates'][0]['macros'] + if template_macros != existing_macros: + changed = True + + return changed + + def update_template(self, template_ids, group_ids, link_template_ids, clear_template_ids, template_macros): + template_changes = {} + if group_ids is not None: + template_changes.update({'groups': group_ids}) + + if link_template_ids is not None: + template_changes.update({'templates': link_template_ids}) + else: + template_changes.update({'templates': []}) + + if clear_template_ids is not None: + template_changes.update({'templates_clear': clear_template_ids}) + + if template_macros is not None: + template_changes.update({'macros': template_macros}) + + if template_changes: + # If we got here we know that only one template was provided via template_name + template_changes.update({'templateid': template_ids[0]}) + self._zapi.template.update(template_changes) + + def delete_template(self, templateids): + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.template.delete(templateids) + + def ordered_json(self, obj): + # Deep sort json dicts for comparison + if isinstance(obj, dict): + return sorted((k, self.ordered_json(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(self.ordered_json(x) for x in obj) + else: + return obj + + def dump_template(self, template_ids, template_type='json', omit_date=False): + if self._module.check_mode: + self._module.exit_json(changed=True) + + try: + dump = self._zapi.configuration.export({'format': template_type, 'options': {'templates': template_ids}}) + if template_type == 'xml': + xmlroot = ET.fromstring(dump.encode('utf-8')) + # remove date field if requested + if omit_date: + date = xmlroot.find(".date") + if date is not None: + xmlroot.remove(date) + return str(ET.tostring(xmlroot, encoding='utf-8').decode('utf-8')) + else: + return self.load_json_template(dump, omit_date=omit_date) + + except ZabbixAPIException as e: + self._module.fail_json(msg='Unable to export template: %s' % e) + + def diff_template(self, template_json_a, template_json_b): + # Compare 2 zabbix templates and return True if they differ. + template_json_a = self.filter_template(template_json_a) + template_json_b = self.filter_template(template_json_b) + if self.ordered_json(template_json_a) == self.ordered_json(template_json_b): + return False + return True + + def filter_template(self, template_json): + # Filter the template json to contain only the keys we will update + keep_keys = set(['graphs', 'templates', 'triggers', 'value_maps']) + unwanted_keys = set(template_json['zabbix_export']) - keep_keys + for unwanted_key in unwanted_keys: + del template_json['zabbix_export'][unwanted_key] + + # Versions older than 2.4 do not support description field within template + desc_not_supported = False + if LooseVersion(self._zapi.api_version()).version[:2] < LooseVersion('2.4').version: + desc_not_supported = True + + # Filter empty attributes from template object to allow accurate comparison + for template in template_json['zabbix_export']['templates']: + for key in list(template.keys()): + if not template[key] or (key == 'description' and desc_not_supported): + template.pop(key) + + return template_json + + def filter_xml_template(self, template_xml): + """Filters out keys from XML template that may wary between exports (e.g date or version) and + keys that are not imported via this module. + + It is advised that provided XML template exactly matches XML structure used by Zabbix""" + # Strip last new line and convert string to ElementTree + parsed_xml_root = self.load_xml_template(template_xml.strip()) + keep_keys = ['graphs', 'templates', 'triggers', 'value_maps'] + + # Remove unwanted XML nodes + for node in list(parsed_xml_root): + if node.tag not in keep_keys: + parsed_xml_root.remove(node) + + # Filter empty attributes from template objects to allow accurate comparison + for template in list(parsed_xml_root.find('templates')): + for element in list(template): + if element.text is None and len(list(element)) == 0: + template.remove(element) + + # Filter new lines and indentation + xml_root_text = list(line.strip() for line in ET.tostring(parsed_xml_root, encoding='utf8', method='xml').decode().split('\n')) + return ''.join(xml_root_text) + + def load_json_template(self, template_json, omit_date=False): + try: + jsondoc = json.loads(template_json) + if omit_date and 'date' in jsondoc['zabbix_export']: + del jsondoc['zabbix_export']['date'] + return jsondoc + except ValueError as e: + self._module.fail_json(msg='Invalid JSON provided', details=to_native(e), exception=traceback.format_exc()) + + def load_xml_template(self, template_xml): + try: + return ET.fromstring(template_xml) + except ET.ParseError as e: + self._module.fail_json(msg='Invalid XML provided', details=to_native(e), exception=traceback.format_exc()) + + def import_template(self, template_content, template_type='json'): + # rules schema latest version + update_rules = { + 'applications': { + 'createMissing': True, + 'deleteMissing': True + }, + 'discoveryRules': { + 'createMissing': True, + 'updateExisting': True, + 'deleteMissing': True + }, + 'graphs': { + 'createMissing': True, + 'updateExisting': True, + 'deleteMissing': True + }, + 'groups': { + 'createMissing': True + }, + 'httptests': { + 'createMissing': True, + 'updateExisting': True, + 'deleteMissing': True + }, + 'items': { + 'createMissing': True, + 'updateExisting': True, + 'deleteMissing': True + }, + 'templates': { + 'createMissing': True, + 'updateExisting': True + }, + 'templateLinkage': { + 'createMissing': True + }, + 'templateScreens': { + 'createMissing': True, + 'updateExisting': True, + 'deleteMissing': True + }, + 'triggers': { + 'createMissing': True, + 'updateExisting': True, + 'deleteMissing': True + }, + 'valueMaps': { + 'createMissing': True, + 'updateExisting': True + } + } + + try: + # old api version support here + api_version = self._zapi.api_version() + # updateExisting for application removed from zabbix api after 3.2 + if LooseVersion(api_version).version[:2] <= LooseVersion('3.2').version: + update_rules['applications']['updateExisting'] = True + + # templateLinkage.deleteMissing only available in 4.0 branch higher .16 and higher 4.4.4 + # it's not available in 4.2 branches or lower 4.0.16 + if LooseVersion(api_version).version[:2] == LooseVersion('4.0').version and \ + LooseVersion(api_version).version[:3] >= LooseVersion('4.0.16').version: + update_rules['templateLinkage']['deleteMissing'] = True + if LooseVersion(api_version).version[:3] >= LooseVersion('4.4.4').version: + update_rules['templateLinkage']['deleteMissing'] = True + + import_data = {'format': template_type, 'source': template_content, 'rules': update_rules} + self._zapi.configuration.import_(import_data) + except ZabbixAPIException as e: + self._module.fail_json(msg='Unable to import template', details=to_native(e), + exception=traceback.format_exc()) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + template_name=dict(type='str', required=False), + template_json=dict(type='json', required=False), + template_xml=dict(type='str', required=False), + template_groups=dict(type='list', required=False), + link_templates=dict(type='list', required=False), + clear_templates=dict(type='list', required=False), + macros=dict(type='list', required=False), + omit_date=dict(type='bool', required=False, default=False), + dump_format=dict(type='str', required=False, default='json', choices=['json', 'xml']), + state=dict(type='str', default="present", choices=['present', 'absent', 'dump']), + timeout=dict(type='int', default=10) + ), + required_one_of=[ + ['template_name', 'template_json', 'template_xml'] + ], + mutually_exclusive=[ + ['template_name', 'template_json', 'template_xml'] + ], + required_if=[ + ['state', 'absent', ['template_name']], + ['state', 'dump', ['template_name']] + ], + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + template_name = module.params['template_name'] + template_json = module.params['template_json'] + template_xml = module.params['template_xml'] + template_groups = module.params['template_groups'] + link_templates = module.params['link_templates'] + clear_templates = module.params['clear_templates'] + template_macros = module.params['macros'] + omit_date = module.params['omit_date'] + dump_format = module.params['dump_format'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except ZabbixAPIException as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + template = Template(module, zbx) + + # Identify template names for IDs retrieval + # Template names are expected to reside in ['zabbix_export']['templates'][*]['template'] for both data types + template_content, template_type = None, None + if template_json is not None: + template_type = 'json' + template_content = template_json + json_parsed = template.load_json_template(template_content) + template_names = list(t['template'] for t in json_parsed['zabbix_export']['templates']) + + elif template_xml is not None: + template_type = 'xml' + template_content = template_xml + xml_parsed = template.load_xml_template(template_content) + template_names = list(t.find('template').text for t in list(xml_parsed.find('templates'))) + + else: + template_names = [template_name] + + template_ids = template.get_template_ids(template_names) + + if state == "absent": + if not template_ids: + module.exit_json(changed=False, msg="Template not found. No changed: %s" % template_name) + + template.delete_template(template_ids) + module.exit_json(changed=True, result="Successfully deleted template %s" % template_name) + + elif state == "dump": + module.deprecate("The 'dump' state has been deprecated and will be removed, use 'zabbix_template_info' module instead.", version='2.14') + if not template_ids: + module.fail_json(msg='Template not found: %s' % template_name) + + if dump_format == 'json': + module.exit_json(changed=False, template_json=template.dump_template(template_ids, template_type='json', omit_date=omit_date)) + elif dump_format == 'xml': + module.exit_json(changed=False, template_xml=template.dump_template(template_ids, template_type='xml', omit_date=omit_date)) + + elif state == "present": + # Load all subelements for template that were provided by user + group_ids = None + if template_groups is not None: + group_ids = template.get_group_ids_by_group_names(template_groups) + + link_template_ids = None + if link_templates is not None: + link_template_ids = template.get_template_ids(link_templates) + + clear_template_ids = None + if clear_templates is not None: + clear_template_ids = template.get_template_ids(clear_templates) + + if template_macros is not None: + # Zabbix configuration.export does not differentiate python types (numbers are returned as strings) + for macroitem in template_macros: + for key in macroitem: + macroitem[key] = str(macroitem[key]) + + if not template_ids: + # Assume new templates are being added when no ID's were found + if template_content is not None: + template.import_template(template_content, template_type) + module.exit_json(changed=True, result="Template import successful") + + else: + if group_ids is None: + module.fail_json(msg='template_groups are required when creating a new Zabbix template') + + template.add_template(template_name, group_ids, link_template_ids, template_macros) + module.exit_json(changed=True, result="Successfully added template: %s" % template_name) + + else: + changed = template.check_template_changed(template_ids, template_groups, link_templates, clear_templates, + template_macros, template_content, template_type) + + if module.check_mode: + module.exit_json(changed=changed) + + if changed: + if template_type is not None: + template.import_template(template_content, template_type) + else: + template.update_template(template_ids, group_ids, link_template_ids, clear_template_ids, + template_macros) + + module.exit_json(changed=changed, result="Template successfully updated") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_template_info.py b/plugins/modules/monitoring/zabbix/zabbix_template_info.py new file mode 100644 index 0000000000..cdc5d573da --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_template_info.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, sky-joker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: zabbix_template_info +short_description: Gather information about Zabbix template +author: + - sky-joker (@sky-joker) +description: + - This module allows you to search for Zabbix template. +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + template_name: + description: + - Name of the template in Zabbix. + required: true + type: str + format: + description: + - Format to use when dumping template. + choices: ['json', 'xml'] + default: json + type: str + omit_date: + description: + - Removes the date field for the dumped template + required: false + type: bool + default: false +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = ''' +- name: Get Zabbix template as JSON + zabbix_template_info: + server_url: "http://zabbix.example.com/zabbix/" + login_user: admin + login_password: secret + template_name: Template + format: json + omit_date: yes + register: template_json + +- name: Get Zabbix template as XML + zabbix_template_info: + server_url: "http://zabbix.example.com/zabbix/" + login_user: admin + login_password: secret + template_name: Template + format: xml + omit_date: no + register: template_json +''' + +RETURN = ''' +--- +template_json: + description: The JSON of the template + returned: when format is json and omit_date is true + type: str + sample: { + "zabbix_export": { + "version": "4.0", + "groups": [ + { + "name": "Templates" + } + ], + "templates": [ + { + "template": "Test Template", + "name": "Template for Testing", + "description": "Testing template import", + "groups": [ + { + "name": "Templates" + } + ], + "applications": [ + { + "name": "Test Application" + } + ], + "items": [], + "discovery_rules": [], + "httptests": [], + "macros": [], + "templates": [], + "screens": [] + } + ] + } + } + +template_xml: + description: The XML of the template + returned: when format is xml and omit_date is false + type: str + sample: >- + + 4.0 + 2019-10-27T14:49:57Z + + + Templates + + + + + + +''' + +import atexit +import traceback +import json +import xml.etree.ElementTree as ET + +try: + from zabbix_api import ZabbixAPI + from zabbix_api import Already_Exists + from zabbix_api import ZabbixAPIException + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class TemplateInfo(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + def get_template_id(self, template_name): + template_id = [] + try: + template_list = self._zapi.template.get( + { + 'output': 'extend', + 'filter': { + 'host': template_name + } + } + ) + except ZabbixAPIException as e: + self._module.fail_json(msg='Failed to get template: %s' % e) + + if template_list: + template_id.append(template_list[0]['templateid']) + + return template_id + + def load_json_template(self, template_json, omit_date=False): + try: + jsondoc = json.loads(template_json) + # remove date field if requested + if omit_date and 'date' in jsondoc['zabbix_export']: + del jsondoc['zabbix_export']['date'] + return jsondoc + except ValueError as e: + self._module.fail_json(msg='Invalid JSON provided', details=to_native(e), exception=traceback.format_exc()) + + def dump_template(self, template_id, template_type='json', omit_date=False): + try: + dump = self._zapi.configuration.export({'format': template_type, 'options': {'templates': template_id}}) + if template_type == 'xml': + xmlroot = ET.fromstring(dump.encode('utf-8')) + # remove date field if requested + if omit_date: + date = xmlroot.find(".date") + if date is not None: + xmlroot.remove(date) + return str(ET.tostring(xmlroot, encoding='utf-8').decode('utf-8')) + else: + return self.load_json_template(dump, omit_date) + + except ZabbixAPIException as e: + self._module.fail_json(msg='Unable to export template: %s' % e) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + timeout=dict(type='int', default=10), + template_name=dict(type='str', required=True), + omit_date=dict(type='bool', required=False, default=False), + format=dict(type='str', choices=['json', 'xml'], default='json') + ), + supports_check_mode=False + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), + exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + timeout = module.params['timeout'] + template_name = module.params['template_name'] + omit_date = module.params['omit_date'] + format = module.params['format'] + + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + template_info = TemplateInfo(module, zbx) + + template_id = template_info.get_template_id(template_name) + + if not template_id: + module.fail_json(msg='Template not found: %s' % template_name) + + if format == 'json': + module.exit_json(changed=False, template_json=template_info.dump_template(template_id, template_type='json', omit_date=omit_date)) + elif format == 'xml': + module.exit_json(changed=False, template_xml=template_info.dump_template(template_id, template_type='xml', omit_date=omit_date)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_user.py b/plugins/modules/monitoring/zabbix/zabbix_user.py new file mode 100644 index 0000000000..d68e384b32 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_user.py @@ -0,0 +1,663 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, sky-joker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +module: zabbix_user +short_description: Create/update/delete Zabbix users +author: + - sky-joker (@sky-joker) +description: + - This module allows you to create, modify and delete Zabbix users. +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + alias: + description: + - Name of the user alias in Zabbix. + - alias is the unique identifier used and cannot be updated using this module. + required: true + type: str + name: + description: + - Name of the user. + default: '' + type: str + surname: + description: + - Surname of the user. + default: '' + type: str + usrgrps: + description: + - User groups to add the user to. + required: true + type: list + elements: str + passwd: + description: + - User's password. + required: true + type: str + override_passwd: + description: + - Override password. + default: no + type: bool + lang: + description: + - Language code of the user's language. + default: 'en_GB' + choices: + - 'en_GB' + - 'en_US' + - 'zh_CN' + - 'cs_CZ' + - 'fr_FR' + - 'he_IL' + - 'it_IT' + - 'ko_KR' + - 'ja_JP' + - 'nb_NO' + - 'pl_PL' + - 'pt_BR' + - 'pt_PT' + - 'ru_RU' + - 'sk_SK' + - 'tr_TR' + - 'uk_UA' + type: str + theme: + description: + - User's theme. + default: 'default' + choices: + - 'default' + - 'blue-theme' + - 'dark-theme' + type: str + autologin: + description: + - Whether to enable auto-login. + - If enable autologin, cannot enable autologout. + default: false + type: bool + autologout: + description: + - User session life time in seconds. If set to 0, the session will never expire. + - If enable autologout, cannot enable autologin. + default: '0' + type: str + refresh: + description: + - Automatic refresh period in seconds. + default: '30' + type: str + rows_per_page: + description: + - Amount of object rows to show per page. + default: '50' + type: str + after_login_url: + description: + - URL of the page to redirect the user to after logging in. + default: '' + type: str + user_medias: + description: + - Set the user's media. + default: [] + suboptions: + mediatype: + description: + - Media type name to set. + default: 'Email' + type: str + sendto: + description: + - Address, user name or other identifier of the recipient. + required: true + type: str + period: + description: + - Time when the notifications can be sent as a time period or user macros separated by a semicolon. + - Please review the documentation for more information on the supported time period. + - https://www.zabbix.com/documentation/4.0/manual/appendix/time_period + default: '1-7,00:00-24:00' + type: str + severity: + description: + - Trigger severities to send notifications about. + suboptions: + not_classified: + description: + - severity not_classified enable/disable. + default: True + type: bool + information: + description: + - severity information enable/disable. + default: True + type: bool + warning: + description: + - severity warning enable/disable. + default: True + type: bool + average: + description: + - severity average enable/disable. + default: True + type: bool + high: + description: + - severity high enable/disable. + default: True + type: bool + disaster: + description: + - severity disaster enable/disable. + default: True + type: bool + default: + not_classified: True + information: True + warning: True + average: True + high: True + disaster: True + type: dict + active: + description: + - Whether the media is enabled. + default: true + type: bool + type: list + elements: dict + type: + description: + - Type of the user. + default: 'Zabbix user' + choices: + - 'Zabbix user' + - 'Zabbix admin' + - 'Zabbix super admin' + type: str + state: + description: + - State of the user. + - On C(present), it will create if user does not exist or update the user if the associated data is different. + - On C(absent) will remove a user if it exists. + default: 'present' + choices: ['present', 'absent'] + type: str +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = r''' +- name: create of zabbix user. + zabbix_user: + server_url: "http://zabbix.example.com/zabbix/" + login_user: Admin + login_password: secret + alias: example + name: user name + surname: user surname + usrgrps: + - Guests + - Disabled + passwd: password + lang: en_GB + theme: blue-theme + autologin: no + autologout: '0' + refresh: '30' + rows_per_page: '200' + after_login_url: '' + user_medias: + - mediatype: Email + sendto: example@example.com + period: 1-7,00:00-24:00 + severity: + not_classified: no + information: yes + warning: yes + average: yes + high: yes + disaster: yes + active: no + type: Zabbix super admin + state: present + +- name: delete of zabbix user. + zabbix_user: + server_url: "http://zabbix.example.com/zabbix/" + login_user: admin + login_password: secret + alias: example + usrgrps: + - Guests + passwd: password + user_medias: + - sendto: example@example.com + state: absent +''' + +RETURN = r''' +user_ids: + description: User id created or changed + returned: success + type: dict + sample: { "userids": [ "5" ] } +''' + +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + from zabbix_api import Already_Exists + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import copy + + +class User(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + self._zbx_api_version = zbx.api_version()[:3] + + def get_usergroupid_by_user_group_name(self, usrgrps): + user_group_ids = [] + for user_group_name in usrgrps: + user_group = self._zapi.usergroup.get({'output': 'extend', 'filter': {'name': user_group_name}}) + if user_group: + user_group_ids.append({'usrgrpid': user_group[0]['usrgrpid']}) + else: + self._module.fail_json(msg="User group not found: %s" % user_group_name) + return user_group_ids + + def check_user_exist(self, alias): + zbx_user = self._zapi.user.get({'output': 'extend', 'filter': {'alias': alias}, + 'getAccess': True, 'selectMedias': 'extend', + 'selectUsrgrps': 'extend'}) + + return zbx_user + + def convert_user_medias_parameter_types(self, user_medias): + copy_user_medias = copy.deepcopy(user_medias) + for user_media in copy_user_medias: + media_types = self._zapi.mediatype.get({'output': 'extend'}) + for media_type in media_types: + if LooseVersion(self._zbx_api_version) < LooseVersion('4.4'): + if media_type['description'] == user_media['mediatype']: + user_media['mediatypeid'] = media_type['mediatypeid'] + break + else: + if media_type['name'] == user_media['mediatype']: + user_media['mediatypeid'] = media_type['mediatypeid'] + break + + if 'mediatypeid' not in user_media: + self._module.fail_json(msg="Media type not found: %s" % user_media['mediatype']) + else: + del user_media['mediatype'] + + severity_binary_number = '' + for severity_key in 'disaster', 'high', 'average', 'warning', 'information', 'not_classified': + if user_media['severity'][severity_key]: + severity_binary_number = severity_binary_number + '1' + else: + severity_binary_number = severity_binary_number + '0' + user_media['severity'] = str(int(severity_binary_number, 2)) + + if user_media['active']: + user_media['active'] = '0' + else: + user_media['active'] = '1' + + return copy_user_medias + + def user_parameter_difference_check(self, zbx_user, alias, name, surname, user_group_ids, passwd, lang, theme, + autologin, autologout, refresh, rows_per_page, url, user_medias, user_type, + override_passwd): + + user_medias = self.convert_user_medias_parameter_types(user_medias) + + # existing data + existing_data = copy.deepcopy(zbx_user[0]) + usrgrpids = [] + for usrgrp in existing_data['usrgrps']: + usrgrpids.append({'usrgrpid': usrgrp['usrgrpid']}) + + existing_data['usrgrps'] = sorted(usrgrpids, key=lambda x: x['usrgrpid']) + + # Processing for zabbix 4.0 and above. + # In zabbix 4.0 and above, Email sendto is of type list. + # This module, one media supports only one Email sendto. + # Therefore following processing extract one Email from list. + if LooseVersion(self._zbx_api_version) >= LooseVersion('4.0'): + for media in existing_data['medias']: + if isinstance(media['sendto'], list): + media['sendto'] = media['sendto'][0] + + existing_data['user_medias'] = sorted(existing_data['medias'], key=lambda x: x['sendto']) + for del_key in ['medias', 'attempt_clock', 'attempt_failed', 'attempt_ip', 'debug_mode', 'users_status', + 'gui_access']: + del existing_data[del_key] + + for user_media in existing_data['user_medias']: + for del_key in ['mediaid', 'userid']: + del user_media[del_key] + + # request data + request_data = { + 'userid': zbx_user[0]['userid'], + 'alias': alias, + 'name': name, + 'surname': surname, + 'usrgrps': sorted(user_group_ids, key=lambda x: x['usrgrpid']), + 'lang': lang, + 'theme': theme, + 'autologin': autologin, + 'autologout': autologout, + 'refresh': refresh, + 'rows_per_page': rows_per_page, + 'url': url, + 'user_medias': sorted(user_medias, key=lambda x: x['sendto']), + 'type': user_type + } + + if override_passwd: + request_data['passwd'] = passwd + + user_parameter_difference_check_result = True + if existing_data == request_data: + user_parameter_difference_check_result = False + + diff_params = { + "before": existing_data, + "after": request_data + } + + return user_parameter_difference_check_result, diff_params + + def add_user(self, alias, name, surname, user_group_ids, passwd, lang, theme, autologin, autologout, refresh, + rows_per_page, url, user_medias, user_type): + + user_medias = self.convert_user_medias_parameter_types(user_medias) + + user_ids = {} + + request_data = { + 'alias': alias, + 'name': name, + 'surname': surname, + 'usrgrps': user_group_ids, + 'passwd': passwd, + 'lang': lang, + 'theme': theme, + 'autologin': autologin, + 'autologout': autologout, + 'refresh': refresh, + 'rows_per_page': rows_per_page, + 'url': url, + 'user_medias': user_medias, + 'type': user_type + } + + diff_params = {} + if not self._module.check_mode: + try: + user_ids = self._zapi.user.create(request_data) + except Exception as e: + self._module.fail_json(msg="Failed to create user %s: %s" % (alias, e)) + else: + diff_params = { + "before": "", + "after": request_data + } + + return user_ids, diff_params + + def update_user(self, zbx_user, alias, name, surname, user_group_ids, passwd, lang, theme, autologin, autologout, + refresh, rows_per_page, url, user_medias, user_type, override_passwd): + + user_medias = self.convert_user_medias_parameter_types(user_medias) + + user_ids = {} + + request_data = { + 'userid': zbx_user[0]['userid'], + 'alias': alias, + 'name': name, + 'surname': surname, + 'usrgrps': user_group_ids, + 'lang': lang, + 'theme': theme, + 'autologin': autologin, + 'autologout': autologout, + 'refresh': refresh, + 'rows_per_page': rows_per_page, + 'url': url, + 'type': user_type + } + + if override_passwd: + request_data['passwd'] = passwd + + # In the case of zabbix 3.2 or less, it is necessary to use updatemedia method to update media. + if LooseVersion(self._zbx_api_version) <= LooseVersion('3.2'): + try: + user_ids = self._zapi.user.update(request_data) + except Exception as e: + self._module.fail_json(msg="Failed to update user %s: %s" % (alias, e)) + + try: + user_ids = self._zapi.user.updatemedia({ + 'users': [{'userid': zbx_user[0]['userid']}], + 'medias': user_medias + }) + except Exception as e: + self._module.fail_json(msg="Failed to update user medias %s: %s" % (alias, e)) + + if LooseVersion(self._zbx_api_version) >= LooseVersion('3.4'): + try: + request_data['user_medias'] = user_medias + user_ids = self._zapi.user.update(request_data) + except Exception as e: + self._module.fail_json(msg="Failed to update user %s: %s" % (alias, e)) + + return user_ids + + def delete_user(self, zbx_user, alias): + user_ids = {} + diff_params = {} + + if not self._module.check_mode: + try: + user_ids = self._zapi.user.delete([zbx_user[0]['userid']]) + except Exception as e: + self._module.fail_json(msg="Failed to delete user %s: %s" % (alias, e)) + else: + diff_params = { + "before": zbx_user[0], + "after": "" + } + + return user_ids, diff_params + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + alias=dict(type='str', required=True), + name=dict(type='str', default=''), + surname=dict(type='str', default=''), + usrgrps=dict(type='list', required=True), + passwd=dict(type='str', required=True, no_log=True), + override_passwd=dict(type='bool', required=False, default=False), + lang=dict(type='str', default='en_GB', choices=['en_GB', 'en_US', 'zh_CN', 'cs_CZ', 'fr_FR', + 'he_IL', 'it_IT', 'ko_KR', 'ja_JP', 'nb_NO', + 'pl_PL', 'pt_BR', 'pt_PT', 'ru_RU', 'sk_SK', + 'tr_TR', 'uk_UA']), + theme=dict(type='str', default='default', choices=['default', 'blue-theme', 'dark-theme']), + autologin=dict(type='bool', default=False), + autologout=dict(type='str', default='0'), + refresh=dict(type='str', default='30'), + rows_per_page=dict(type='str', default='50'), + after_login_url=dict(type='str', default=''), + user_medias=dict(type='list', default=[], + elements='dict', + options=dict( + mediatype=dict(type='str', default='Email'), + sendto=dict(type='str', required=True), + period=dict(type='str', default='1-7,00:00-24:00'), + severity=dict(type='dict', + options=dict( + not_classified=dict(type='bool', default=True), + information=dict(type='bool', default=True), + warning=dict(type='bool', default=True), + average=dict(type='bool', default=True), + high=dict(type='bool', default=True), + disaster=dict(type='bool', default=True)), + default=dict( + not_classified=True, + information=True, + warning=True, + average=True, + high=True, + disaster=True + )), + active=dict(type='bool', default=True) + )), + type=dict(type='str', default='Zabbix user', choices=['Zabbix user', 'Zabbix admin', 'Zabbix super admin']), + state=dict(type='str', default="present", choices=['present', 'absent']), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), + exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + alias = module.params['alias'] + name = module.params['name'] + surname = module.params['surname'] + usrgrps = module.params['usrgrps'] + passwd = module.params['passwd'] + override_passwd = module.params['override_passwd'] + lang = module.params['lang'] + theme = module.params['theme'] + autologin = module.params['autologin'] + autologout = module.params['autologout'] + refresh = module.params['refresh'] + rows_per_page = module.params['rows_per_page'] + after_login_url = module.params['after_login_url'] + user_medias = module.params['user_medias'] + user_type = module.params['type'] + state = module.params['state'] + timeout = module.params['timeout'] + + if autologin: + autologin = '1' + else: + autologin = '0' + + user_type_dict = { + 'Zabbix user': '1', + 'Zabbix admin': '2', + 'Zabbix super admin': '3' + } + user_type = user_type_dict[user_type] + + zbx = None + + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + user = User(module, zbx) + + user_ids = {} + zbx_user = user.check_user_exist(alias) + if state == 'present': + user_group_ids = user.get_usergroupid_by_user_group_name(usrgrps) + if zbx_user: + diff_check_result, diff_params = user.user_parameter_difference_check(zbx_user, alias, name, surname, + user_group_ids, passwd, lang, theme, + autologin, autologout, refresh, + rows_per_page, after_login_url, + user_medias, user_type, + override_passwd) + + if not module.check_mode and diff_check_result: + user_ids = user.update_user(zbx_user, alias, name, surname, user_group_ids, passwd, lang, + theme, autologin, autologout, refresh, rows_per_page, after_login_url, + user_medias, user_type, override_passwd) + else: + diff_check_result = True + user_ids, diff_params = user.add_user(alias, name, surname, user_group_ids, passwd, lang, theme, autologin, + autologout, refresh, rows_per_page, after_login_url, user_medias, + user_type) + + if state == 'absent': + if zbx_user: + diff_check_result = True + user_ids, diff_params = user.delete_user(zbx_user, alias) + else: + diff_check_result = False + diff_params = {} + + if not module.check_mode: + if user_ids: + module.exit_json(changed=True, user_ids=user_ids) + else: + module.exit_json(changed=False) + else: + if diff_check_result: + module.exit_json(changed=True, diff=diff_params) + else: + module.exit_json(changed=False, diff=diff_params) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_user_info.py b/plugins/modules/monitoring/zabbix/zabbix_user_info.py new file mode 100644 index 0000000000..c5f753fd60 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_user_info.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, sky-joker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: zabbix_user_info +short_description: Gather information about Zabbix user +author: + - sky-joker (@sky-joker) +description: + - This module allows you to search for Zabbix user entries. +requirements: + - "python >= 2.6" + - "zabbix-api >= 0.5.4" +options: + alias: + description: + - Name of the user alias in Zabbix. + required: true + type: str +extends_documentation_fragment: +- community.general.zabbix + +''' + +EXAMPLES = ''' +- name: Get zabbix user info + zabbix_user_info: + server_url: "http://zabbix.example.com/zabbix/" + login_user: admin + login_password: secret + alias: example +''' + +RETURN = ''' +zabbix_user: + description: example + returned: always + type: dict + sample: { + "alias": "example", + "attempt_clock": "0", + "attempt_failed": "0", + "attempt_ip": "", + "autologin": "0", + "autologout": "0", + "debug_mode": "0", + "gui_access": "0", + "lang": "en_GB", + "medias": [ + { + "active": "0", + "mediaid": "668", + "mediatypeid": "1", + "period": "1-7,00:00-24:00", + "sendto": "example@example.com", + "severity": "63", + "userid": "660" + } + ], + "name": "user", + "refresh": "30s", + "rows_per_page": "50", + "surname": "example", + "theme": "default", + "type": "1", + "url": "", + "userid": "660", + "users_status": "0", + "usrgrps": [ + { + "debug_mode": "0", + "gui_access": "0", + "name": "Guests", + "users_status": "0", + "usrgrpid": "8" + } + ] + } +''' + +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class User(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + def get_user_by_user_alias(self, alias): + zabbix_user = "" + try: + zabbix_user = self._zapi.user.get({'output': 'extend', 'filter': {'alias': alias}, + 'getAccess': True, 'selectMedias': 'extend', + 'selectUsrgrps': 'extend'}) + except Exception as e: + self._zapi.logout() + self._module.fail_json(msg="Failed to get user information: %s" % e) + + if not zabbix_user: + zabbix_user = {} + else: + zabbix_user = zabbix_user[0] + + return zabbix_user + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + alias=dict(type='str', required=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), + exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + alias = module.params['alias'] + timeout = module.params['timeout'] + + zbx = None + + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + user = User(module, zbx) + zabbix_user = user.get_user_by_user_alias(alias) + zbx.logout() + module.exit_json(changed=False, zabbix_user=zabbix_user) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/monitoring/zabbix/zabbix_valuemap.py b/plugins/modules/monitoring/zabbix/zabbix_valuemap.py new file mode 100644 index 0000000000..adf093b666 --- /dev/null +++ b/plugins/modules/monitoring/zabbix/zabbix_valuemap.py @@ -0,0 +1,339 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2019, Ruben Tsirunyan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: zabbix_valuemap +short_description: Create/update/delete Zabbix value maps +description: + - This module allows you to create, modify and delete Zabbix value maps. +author: + - "Ruben Tsirunyan (@rubentsirunyan)" +requirements: + - "zabbix-api >= 0.5.4" +options: + name: + type: 'str' + description: + - Name of the value map. + required: true + state: + type: 'str' + description: + - State of the value map. + - On C(present), it will create a value map if it does not exist or update the value map if the associated data is different. + - On C(absent), it will remove the value map if it exists. + choices: ['present', 'absent'] + default: 'present' + mappings: + type: 'list' + elements: dict + description: + - List of value mappings for the value map. + - Required when I(state=present). + suboptions: + value: + type: 'str' + description: Original value. + required: true + map_to: + type: 'str' + description: Value to which the original value is mapped to. + required: true + +extends_documentation_fragment: +- community.general.zabbix + +''' + +RETURN = r''' +''' + +EXAMPLES = r''' +- name: Create a value map + local_action: + module: zabbix_valuemap + server_url: http://zabbix.example.com + login_user: username + login_password: password + name: Numbers + mappings: + - value: 1 + map_to: one + - value: 2 + map_to: two + state: present +''' + + +import atexit +import traceback + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + ZBX_IMP_ERR = traceback.format_exc() + HAS_ZABBIX_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def construct_parameters(**kwargs): + """Translates data to a format suitable for Zabbix API + + Args: + **kwargs: Arguments passed to the module. + + Returns: + A dictionary of arguments in a format that is understandable by Zabbix API. + """ + if kwargs['mappings'] is None: + return dict( + name=kwargs['name'] + ) + return dict( + name=kwargs['name'], + mappings=[ + dict( + value=mapping['value'], + newvalue=mapping['map_to'] + ) for mapping in kwargs['mappings'] + ] + ) + + +def check_if_valuemap_exists(module, zbx, name): + """Checks if value map exists. + + Args: + module: AnsibleModule object + zbx: ZabbixAPI object + name: Zabbix valuemap name + + Returns: + tuple: First element is True if valuemap exists and False otherwise. + Second element is a dictionary of valuemap object if it exists. + """ + try: + valuemap_list = zbx.valuemap.get({ + 'output': 'extend', + 'selectMappings': 'extend', + 'filter': {'name': [name]} + }) + if len(valuemap_list) < 1: + return False, None + else: + return True, valuemap_list[0] + except Exception as e: + module.fail_json(msg="Failed to get ID of the valuemap '{name}': {e}".format(name=name, e=e)) + + +def diff(existing, new): + """Constructs the diff for Ansible's --diff option. + + Args: + existing (dict): Existing valuemap data. + new (dict): New valuemap data. + + Returns: + A dictionary like {'before': existing, 'after': new} + with filtered empty values. + """ + before = {} + after = {} + for key in new: + before[key] = existing[key] + if new[key] is None: + after[key] = '' + else: + after[key] = new[key] + return {'before': before, 'after': after} + + +def get_update_params(module, zbx, existing_valuemap, **kwargs): + """Filters only the parameters that are different and need to be updated. + + Args: + module: AnsibleModule object. + zbx: ZabbixAPI object. + existing_valuemap (dict): Existing valuemap. + **kwargs: Parameters for the new valuemap. + + Returns: + A tuple where the first element is a dictionary of parameters + that need to be updated and the second one is a dictionary + returned by diff() function with + existing valuemap data and new params passed to it. + """ + + params_to_update = {} + if sorted(existing_valuemap['mappings'], key=lambda k: k['value']) != sorted(kwargs['mappings'], key=lambda k: k['value']): + params_to_update['mappings'] = kwargs['mappings'] + return params_to_update, diff(existing_valuemap, kwargs) + + +def delete_valuemap(module, zbx, valuemap_id): + try: + return zbx.valuemap.delete([valuemap_id]) + except Exception as e: + module.fail_json(msg="Failed to delete valuemap '{_id}': {e}".format(_id=valuemap_id, e=e)) + + +def update_valuemap(module, zbx, **kwargs): + try: + valuemap_id = zbx.valuemap.update(kwargs) + except Exception as e: + module.fail_json(msg="Failed to update valuemap '{_id}': {e}".format(_id=kwargs['valuemapid'], e=e)) + + +def create_valuemap(module, zbx, **kwargs): + try: + valuemap_id = zbx.valuemap.create(kwargs) + except Exception as e: + module.fail_json(msg="Failed to create valuemap '{name}': {e}".format(name=kwargs['description'], e=e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + mappings=dict( + type='list', + elements='dict', + options=dict( + value=dict(type='str', required=True), + map_to=dict(type='str', required=True) + ) + ), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True, + required_if=[ + ['state', 'present', ['mappings']], + ] + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] + validate_certs = module.params['validate_certs'] + name = module.params['name'] + state = module.params['state'] + mappings = module.params['mappings'] + timeout = module.params['timeout'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, + validate_certs=validate_certs) + zbx.login(login_user, login_password) + atexit.register(zbx.logout) + except Exception as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + valuemap_exists, valuemap_object = check_if_valuemap_exists(module, zbx, name) + + parameters = construct_parameters( + name=name, + mappings=mappings + ) + + if valuemap_exists: + valuemap_id = valuemap_object['valuemapid'] + if state == 'absent': + if module.check_mode: + module.exit_json( + changed=True, + msg="Value map would have been deleted. Name: {name}, ID: {_id}".format( + name=name, + _id=valuemap_id + ) + ) + valuemap_id = delete_valuemap(module, zbx, valuemap_id) + module.exit_json( + changed=True, + msg="Value map deleted. Name: {name}, ID: {_id}".format( + name=name, + _id=valuemap_id + ) + ) + else: + params_to_update, diff = get_update_params(module, zbx, valuemap_object, **parameters) + if params_to_update == {}: + module.exit_json( + changed=False, + msg="Value map is up to date: {name}".format(name=name) + ) + else: + if module.check_mode: + module.exit_json( + changed=True, + diff=diff, + msg="Value map would have been updated. Name: {name}, ID: {_id}".format( + name=name, + _id=valuemap_id + ) + ) + valuemap_id = update_valuemap( + module, zbx, + valuemapid=valuemap_id, + **params_to_update + ) + module.exit_json( + changed=True, + diff=diff, + msg="Value map updated. Name: {name}, ID: {_id}".format( + name=name, + _id=valuemap_id + ) + ) + else: + if state == "absent": + module.exit_json(changed=False) + else: + if module.check_mode: + module.exit_json( + changed=True, + msg="Value map would have been created. Name: {name}, ID: {_id}".format( + name=name, + _id=valuemap_id + ) + ) + valuemap_id = create_valuemap(module, zbx, **parameters) + module.exit_json( + changed=True, + msg="Value map created: {name}, ID: {_id}".format( + name=name, + _id=valuemap_id + ) + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/cloudflare_dns.py b/plugins/modules/net_tools/cloudflare_dns.py new file mode 100644 index 0000000000..ee9704e9e5 --- /dev/null +++ b/plugins/modules/net_tools/cloudflare_dns.py @@ -0,0 +1,875 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016 Michael Gruener +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: cloudflare_dns +author: +- Michael Gruener (@mgruener) +requirements: + - python >= 2.6 +short_description: Manage Cloudflare DNS records +description: + - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)" +options: + api_token: + description: + - API token. + - Required for api token authentication. + - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)" + type: str + required: false + account_api_key: + description: + - Account API key. + - Required for api keys authentication. + - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)" + type: str + required: false + aliases: [ account_api_token ] + account_email: + description: + - Account email. Required for api keys authentication. + type: str + required: false + algorithm: + description: + - Algorithm number. + - Required for C(type=DS) and C(type=SSHFP) when C(state=present). + type: int + cert_usage: + description: + - Certificate usage number. + - Required for C(type=TLSA) when C(state=present). + type: int + choices: [ 0, 1, 2, 3 ] + hash_type: + description: + - Hash type number. + - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present). + type: int + choices: [ 1, 2 ] + key_tag: + description: + - DNSSEC key tag. + - Needed for C(type=DS) when C(state=present). + type: int + port: + description: + - Service port. + - Required for C(type=SRV) and C(type=TLSA). + type: int + priority: + description: + - Record priority. + - Required for C(type=MX) and C(type=SRV) + default: 1 + proto: + description: + - Service protocol. Required for C(type=SRV) and C(type=TLSA). + - Common values are TCP and UDP. + - Before Ansible 2.6 only TCP and UDP were available. + type: str + proxied: + description: + - Proxy through Cloudflare network or just use DNS. + type: bool + default: no + record: + description: + - Record to add. + - Required if C(state=present). + - Default is C(@) (e.g. the zone name). + type: str + default: '@' + aliases: [ name ] + selector: + description: + - Selector number. + - Required for C(type=TLSA) when C(state=present). + choices: [ 0, 1 ] + type: int + service: + description: + - Record service. + - Required for C(type=SRV) + solo: + description: + - Whether the record should be the only one for that record type and record name. + - Only use with C(state=present). + - This will delete all other records with the same record name and type. + type: bool + state: + description: + - Whether the record(s) should exist or not. + type: str + choices: [ absent, present ] + default: present + timeout: + description: + - Timeout for Cloudflare API calls. + type: int + default: 30 + ttl: + description: + - The TTL to give the new record. + - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic. + type: int + default: 1 + type: + description: + - The type of DNS record to create. Required if C(state=present). + - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7. + type: str + choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ] + value: + description: + - The record value. + - Required for C(state=present). + type: str + aliases: [ content ] + weight: + description: + - Service weight. + - Required for C(type=SRV). + type: int + default: 1 + zone: + description: + - The name of the Zone to work with (e.g. "example.com"). + - The Zone must already exist. + type: str + required: true + aliases: [ domain ] +''' + +EXAMPLES = r''' +- name: Create a test.example.net A record to point to 127.0.0.1 + cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + account_email: test@example.com + account_api_key: dummyapitoken + register: record + +- name: Create a record using api token + cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + api_token: dummyapitoken + +- name: Create a example.net CNAME record to example.com + cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Change its TTL + cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + ttl: 600 + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Delete the record + cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + account_email: test@example.com + account_api_key: dummyapitoken + state: absent + +- name: create a example.net CNAME record to example.com and proxy through Cloudflare's network + cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + proxied: yes + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +# This deletes all other TXT records named "test.example.net" +- name: Create TXT record "test.example.net" with value "unique value" + cloudflare_dns: + domain: example.net + record: test + type: TXT + value: unique value + solo: true + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Create an SRV record _foo._tcp.example.net + cloudflare_dns: + domain: example.net + service: foo + proto: tcp + port: 3500 + priority: 10 + weight: 20 + type: SRV + value: fooserver.example.net + +- name: Create a SSHFP record login.example.com + cloudflare_dns: + zone: example.com + record: login + type: SSHFP + algorithm: 4 + hash_type: 2 + value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1 + +- name: Create a TLSA record _25._tcp.mail.example.com + cloudflare_dns: + zone: example.com + record: mail + port: 25 + proto: tcp + type: TLSA + cert_usage: 3 + selector: 1 + hash_type: 1 + value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3 + +- name: Create a DS record for subdomain.example.com + cloudflare_dns: + zone: example.com + record: subdomain + type: DS + key_tag: 5464 + algorithm: 8 + hash_type: 2 + value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB +''' + +RETURN = r''' +record: + description: A dictionary containing the record data. + returned: success, except on record deletion + type: complex + contains: + content: + description: The record content (details depend on record type). + returned: success + type: str + sample: 192.0.2.91 + created_on: + description: The record creation date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + data: + description: Additional record data. + returned: success, if type is SRV, DS, SSHFP or TLSA + type: dict + sample: { + name: "jabber", + port: 8080, + priority: 10, + proto: "_tcp", + service: "_xmpp", + target: "jabberhost.sample.com", + weight: 5, + } + id: + description: The record ID. + returned: success + type: str + sample: f9efb0549e96abcb750de63b38c9576e + locked: + description: No documentation available. + returned: success + type: bool + sample: False + meta: + description: No documentation available. + returned: success + type: dict + sample: { auto_added: false } + modified_on: + description: Record modification date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + name: + description: The record name as FQDN (including _service and _proto for SRV). + returned: success + type: str + sample: www.sample.com + priority: + description: Priority of the MX record. + returned: success, if type is MX + type: int + sample: 10 + proxiable: + description: Whether this record can be proxied through Cloudflare. + returned: success + type: bool + sample: False + proxied: + description: Whether the record is proxied through Cloudflare. + returned: success + type: bool + sample: False + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + zone_id: + description: The ID of the zone containing the record. + returned: success + type: str + sample: abcede0bf9f0066f94029d2e6b73856a + zone_name: + description: The name of the zone containing the record. + returned: success + type: str + sample: sample.com +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.urls import fetch_url + + +def lowercase_string(param): + if not isinstance(param, str): + return param + return param.lower() + + +class CloudflareAPI(object): + + cf_api_endpoint = 'https://api.cloudflare.com/client/v4' + changed = False + + def __init__(self, module): + self.module = module + self.api_token = module.params['api_token'] + self.account_api_key = module.params['account_api_key'] + self.account_email = module.params['account_email'] + self.algorithm = module.params['algorithm'] + self.cert_usage = module.params['cert_usage'] + self.hash_type = module.params['hash_type'] + self.key_tag = module.params['key_tag'] + self.port = module.params['port'] + self.priority = module.params['priority'] + self.proto = lowercase_string(module.params['proto']) + self.proxied = module.params['proxied'] + self.selector = module.params['selector'] + self.record = lowercase_string(module.params['record']) + self.service = lowercase_string(module.params['service']) + self.is_solo = module.params['solo'] + self.state = module.params['state'] + self.timeout = module.params['timeout'] + self.ttl = module.params['ttl'] + self.type = module.params['type'] + self.value = module.params['value'] + self.weight = module.params['weight'] + self.zone = lowercase_string(module.params['zone']) + + if self.record == '@': + self.record = self.zone + + if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None): + self.value = self.value.rstrip('.').lower() + + if (self.type == 'AAAA') and (self.value is not None): + self.value = self.value.lower() + + if (self.type == 'SRV'): + if (self.proto is not None) and (not self.proto.startswith('_')): + self.proto = '_' + self.proto + if (self.service is not None) and (not self.service.startswith('_')): + self.service = '_' + self.service + + if (self.type == 'TLSA'): + if (self.proto is not None) and (not self.proto.startswith('_')): + self.proto = '_' + self.proto + if (self.port is not None): + self.port = '_' + str(self.port) + + if not self.record.endswith(self.zone): + self.record = self.record + '.' + self.zone + + if (self.type == 'DS'): + if self.record == self.zone: + self.module.fail_json(msg="DS records only apply to subdomains.") + + def _cf_simple_api_call(self, api_call, method='GET', payload=None): + if self.api_token: + headers = { + 'Authorization': 'Bearer ' + self.api_token, + 'Content-Type': 'application/json', + } + else: + headers = { + 'X-Auth-Email': self.account_email, + 'X-Auth-Key': self.account_api_key, + 'Content-Type': 'application/json', + } + data = None + if payload: + try: + data = json.dumps(payload) + except Exception as e: + self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) + + resp, info = fetch_url(self.module, + self.cf_api_endpoint + api_call, + headers=headers, + data=data, + method=method, + timeout=self.timeout) + + if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]: + self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call, info['status'])) + + error_msg = '' + if info['status'] == 401: + # Unauthorized + error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 403: + # Forbidden + error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 429: + # Too many requests + error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 405: + # Method not allowed + error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 415: + # Unsupported Media Type + error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 400: + # Bad Request + error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + + result = None + try: + content = resp.read() + except AttributeError: + if info['body']: + content = info['body'] + else: + error_msg += "; The API response was empty" + + if content: + try: + result = json.loads(to_text(content, errors='surrogate_or_strict')) + except (getattr(json, 'JSONDecodeError', ValueError)) as e: + error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) + + # Without a valid/parsed JSON response no more error processing can be done + if result is None: + self.module.fail_json(msg=error_msg) + + if not result['success']: + error_msg += "; Error details: " + for error in result['errors']: + error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message']) + if 'error_chain' in error: + for chain_error in error['error_chain']: + error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message']) + self.module.fail_json(msg=error_msg) + + return result, info['status'] + + def _cf_api_call(self, api_call, method='GET', payload=None): + result, status = self._cf_simple_api_call(api_call, method, payload) + + data = result['result'] + + if 'result_info' in result: + pagination = result['result_info'] + if pagination['total_pages'] > 1: + next_page = int(pagination['page']) + 1 + parameters = ['page={0}'.format(next_page)] + # strip "page" parameter from call parameters (if there are any) + if '?' in api_call: + raw_api_call, query = api_call.split('?', 1) + parameters += [param for param in query.split('&') if not param.startswith('page')] + else: + raw_api_call = api_call + while next_page <= pagination['total_pages']: + raw_api_call += '?' + '&'.join(parameters) + result, status = self._cf_simple_api_call(raw_api_call, method, payload) + data += result['result'] + next_page += 1 + + return data, status + + def _get_zone_id(self, zone=None): + if not zone: + zone = self.zone + + zones = self.get_zones(zone) + if len(zones) > 1: + self.module.fail_json(msg="More than one zone matches {0}".format(zone)) + + if len(zones) < 1: + self.module.fail_json(msg="No zone found with name {0}".format(zone)) + + return zones[0]['id'] + + def get_zones(self, name=None): + if not name: + name = self.zone + param = '' + if name: + param = '?' + urlencode({'name': name}) + zones, status = self._cf_api_call('/zones' + param) + return zones + + def get_dns_records(self, zone_name=None, type=None, record=None, value=''): + if not zone_name: + zone_name = self.zone + if not type: + type = self.type + if not record: + record = self.record + # necessary because None as value means to override user + # set module value + if (not value) and (value is not None): + value = self.value + + zone_id = self._get_zone_id() + api_call = '/zones/{0}/dns_records'.format(zone_id) + query = {} + if type: + query['type'] = type + if record: + query['name'] = record + if value: + query['content'] = value + if query: + api_call += '?' + urlencode(query) + + records, status = self._cf_api_call(api_call) + return records + + def delete_dns_records(self, **kwargs): + params = {} + for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone', + 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: + if param in kwargs: + params[param] = kwargs[param] + else: + params[param] = getattr(self, param) + + records = [] + content = params['value'] + search_record = params['record'] + if params['type'] == 'SRV': + if not (params['value'] is None or params['value'] == ''): + content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] + search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] + elif params['type'] == 'DS': + if not (params['value'] is None or params['value'] == ''): + content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + elif params['type'] == 'SSHFP': + if not (params['value'] is None or params['value'] == ''): + content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + elif params['type'] == 'TLSA': + if not (params['value'] is None or params['value'] == ''): + content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] + if params['solo']: + search_value = None + else: + search_value = content + + records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + + for rr in records: + if params['solo']: + if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + else: + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + return self.changed + + def ensure_dns_record(self, **kwargs): + params = {} + for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', + 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: + if param in kwargs: + params[param] = kwargs[param] + else: + params[param] = getattr(self, param) + + search_value = params['value'] + search_record = params['record'] + new_record = None + if (params['type'] is None) or (params['record'] is None): + self.module.fail_json(msg="You must provide a type and a record to create a new record") + + if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']): + if not params['value']: + self.module.fail_json(msg="You must provide a non-empty value to create this record type") + + # there can only be one CNAME per record + # ignoring the value when searching for existing + # CNAME records allows us to update the value if it + # changes + if params['type'] == 'CNAME': + search_value = None + + new_record = { + "type": params['type'], + "name": params['record'], + "content": params['value'], + "ttl": params['ttl'] + } + + if (params['type'] in ['A', 'AAAA', 'CNAME']): + new_record["proxied"] = params["proxied"] + + if params['type'] == 'MX': + for attr in [params['priority'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide priority and a value to create this record type") + new_record = { + "type": params['type'], + "name": params['record'], + "content": params['value'], + "priority": params['priority'], + "ttl": params['ttl'] + } + + if params['type'] == 'SRV': + for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") + srv_data = { + "target": params['value'], + "port": params['port'], + "weight": params['weight'], + "priority": params['priority'], + "name": params['record'][:-len('.' + params['zone'])], + "proto": params['proto'], + "service": params['service'] + } + new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} + search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] + search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] + + if params['type'] == 'DS': + for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") + ds_data = { + "key_tag": params['key_tag'], + "algorithm": params['algorithm'], + "digest_type": params['hash_type'], + "digest": params['value'], + } + new_record = { + "type": params['type'], + "name": params['record'], + 'data': ds_data, + "ttl": params['ttl'], + } + search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + + if params['type'] == 'SSHFP': + for attr in [params['algorithm'], params['hash_type'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") + sshfp_data = { + "fingerprint": params['value'], + "type": params['hash_type'], + "algorithm": params['algorithm'], + } + new_record = { + "type": params['type'], + "name": params['record'], + 'data': sshfp_data, + "ttl": params['ttl'], + } + search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + + if params['type'] == 'TLSA': + for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") + search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] + tlsa_data = { + "usage": params['cert_usage'], + "selector": params['selector'], + "matching_type": params['hash_type'], + "certificate": params['value'], + } + new_record = { + "type": params['type'], + "name": search_record, + 'data': tlsa_data, + "ttl": params['ttl'], + } + search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + + zone_id = self._get_zone_id(params['zone']) + records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + # in theory this should be impossible as cloudflare does not allow + # the creation of duplicate records but lets cover it anyways + if len(records) > 1: + self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") + # record already exists, check if it must be updated + if len(records) == 1: + cur_record = records[0] + do_update = False + if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']): + do_update = True + if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): + do_update = True + if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']): + do_update = True + if ('data' in new_record) and ('data' in cur_record): + if (cur_record['data'] != new_record['data']): + do_update = True + if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): + do_update = True + if do_update: + if self.module.check_mode: + result = new_record + else: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record) + self.changed = True + return result, self.changed + else: + return records, self.changed + if self.module.check_mode: + result = new_record + else: + result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record) + self.changed = True + return result, self.changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_token=dict(type='str', required=False, no_log=True), + account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']), + account_email=dict(type='str', required=False), + algorithm=dict(type='int'), + cert_usage=dict(type='int', choices=[0, 1, 2, 3]), + hash_type=dict(type='int', choices=[1, 2]), + key_tag=dict(type='int'), + port=dict(type='int'), + priority=dict(type='int', default=1), + proto=dict(type='str'), + proxied=dict(type='bool', default=False), + record=dict(type='str', default='@', aliases=['name']), + selector=dict(type='int', choices=[0, 1]), + service=dict(type='str'), + solo=dict(type='bool'), + state=dict(type='str', default='present', choices=['absent', 'present']), + timeout=dict(type='int', default=30), + ttl=dict(type='int', default=1), + type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']), + value=dict(type='str', aliases=['content']), + weight=dict(type='int', default=1), + zone=dict(type='str', required=True, aliases=['domain']), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['record', 'type', 'value']), + ('state', 'absent', ['record']), + ('type', 'SRV', ['proto', 'service']), + ('type', 'TLSA', ['proto', 'port']), + ], + ) + + if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']): + module.fail_json(msg="Either api_token or account_api_key and account_email params are required.") + if module.params['type'] == 'SRV': + if not ((module.params['weight'] is not None and module.params['port'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['weight'] is None and module.params['port'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.") + + if module.params['type'] == 'SSHFP': + if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['algorithm'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.") + + if module.params['type'] == 'TLSA': + if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") + + if module.params['type'] == 'DS': + if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.") + + changed = False + cf_api = CloudflareAPI(module) + + # sanity checks + if cf_api.is_solo and cf_api.state == 'absent': + module.fail_json(msg="solo=true can only be used with state=present") + + # perform add, delete or update (only the TTL can be updated) of one or + # more records + if cf_api.state == 'present': + # delete all records matching record name + type + if cf_api.is_solo: + changed = cf_api.delete_dns_records(solo=cf_api.is_solo) + result, changed = cf_api.ensure_dns_record() + if isinstance(result, list): + module.exit_json(changed=changed, result={'record': result[0]}) + + module.exit_json(changed=changed, result={'record': result}) + else: + # force solo to False, just to be sure + changed = cf_api.delete_dns_records(solo=False) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py new file mode 100644 index 0000000000..be376d0284 --- /dev/null +++ b/plugins/modules/net_tools/dnsimple.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: dnsimple +short_description: Interface with dnsimple.com (a DNS hosting service) +description: + - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." +notes: + - DNSimple API v1 is deprecated. Please install dnsimple-python>=1.0.0 which uses v2 API. +options: + account_email: + description: + - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. + - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." + type: str + account_api_token: + description: + - Account API token. See I(account_email) for more information. + type: str + domain: + description: + - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. + - If omitted, a list of domains will be returned. + - If domain is present but the domain doesn't exist, it will be created. + type: str + record: + description: + - Record to add, if blank a record for the domain will be created, supports the wildcard (*). + type: str + record_ids: + description: + - List of records to ensure they either exist or do not exist. + type: list + type: + description: + - The type of DNS record to create. + choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ] + type: str + ttl: + description: + - The TTL to give the new record in seconds. + default: 3600 + type: int + value: + description: + - Record value. + - Must be specified when trying to ensure a record exists. + type: str + priority: + description: + - Record priority. + type: int + state: + description: + - whether the record should exist or not. + choices: [ 'present', 'absent' ] + default: present + type: str + solo: + description: + - Whether the record should be the only one for that record type and record name. + - Only use with C(state) is set to C(present) on a record. + type: 'bool' + default: no +requirements: + - "dnsimple >= 1.0.0" +author: "Alex Coomans (@drcapulet)" +''' + +EXAMPLES = ''' +- name: Authenticate using email and API token and fetch all domains + dnsimple: + account_email: test@example.com + account_api_token: dummyapitoken + delegate_to: localhost + +- name: Fetch my.com domain records + dnsimple: + domain: my.com + state: present + delegate_to: localhost + register: records + +- name: Delete a domain + dnsimple: + domain: my.com + state: absent + delegate_to: localhost + +- name: Create a test.my.com A record to point to 127.0.0.1 + dnsimple: + domain: my.com + record: test + type: A + value: 127.0.0.1 + delegate_to: localhost + register: record + +- name: Delete record using record_ids + dnsimple: + domain: my.com + record_ids: '{{ record["id"] }}' + state: absent + delegate_to: localhost + +- name: Create a my.com CNAME record to example.com + dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + state: present + delegate_to: localhost + +- name: change TTL value for a record + dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + ttl: 600 + state: present + delegate_to: localhost + +- name: Delete the record + dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + state: absent + delegate_to: localhost +''' + +RETURN = r"""# """ + +import os +import traceback +from distutils.version import LooseVersion + +DNSIMPLE_IMP_ERR = None +try: + from dnsimple import DNSimple + from dnsimple.dnsimple import __version__ as dnsimple_version + from dnsimple.dnsimple import DNSimpleException + HAS_DNSIMPLE = True +except ImportError: + DNSIMPLE_IMP_ERR = traceback.format_exc() + HAS_DNSIMPLE = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + account_email=dict(type='str'), + account_api_token=dict(type='str', no_log=True), + domain=dict(type='str'), + record=dict(type='str'), + record_ids=dict(type='list'), + type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', + 'POOL']), + ttl=dict(type='int', default=3600), + value=dict(type='str'), + priority=dict(type='int'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + solo=dict(type='bool', default=False), + ), + required_together=[ + ['record', 'value'] + ], + supports_check_mode=True, + ) + + if not HAS_DNSIMPLE: + module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR) + + if LooseVersion(dnsimple_version) < LooseVersion('1.0.0'): + module.fail_json(msg="Current version of dnsimple Python module [%s] uses 'v1' API which is deprecated." + " Please upgrade to version 1.0.0 and above to use dnsimple 'v2' API." % dnsimple_version) + + account_email = module.params.get('account_email') + account_api_token = module.params.get('account_api_token') + domain = module.params.get('domain') + record = module.params.get('record') + record_ids = module.params.get('record_ids') + record_type = module.params.get('type') + ttl = module.params.get('ttl') + value = module.params.get('value') + priority = module.params.get('priority') + state = module.params.get('state') + is_solo = module.params.get('solo') + + if account_email and account_api_token: + client = DNSimple(email=account_email, api_token=account_api_token) + elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): + client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) + else: + client = DNSimple() + + try: + # Let's figure out what operation we want to do + + # No domain, return a list + if not domain: + domains = client.domains() + module.exit_json(changed=False, result=[d['domain'] for d in domains]) + + # Domain & No record + if domain and record is None and not record_ids: + domains = [d['domain'] for d in client.domains()] + if domain.isdigit(): + dr = next((d for d in domains if d['id'] == int(domain)), None) + else: + dr = next((d for d in domains if d['name'] == domain), None) + if state == 'present': + if dr: + module.exit_json(changed=False, result=dr) + else: + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.add_domain(domain)['domain']) + + # state is absent + else: + if dr: + if not module.check_mode: + client.delete(domain) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + # need the not none check since record could be an empty string + if domain and record is not None: + records = [r['record'] for r in client.records(str(domain), params={'name': record})] + + if not record_type: + module.fail_json(msg="Missing the record type") + + if not value: + module.fail_json(msg="Missing the record value") + + rr = next((r for r in records if r['name'] == record and r['type'] == record_type and r['content'] == value), None) + + if state == 'present': + changed = False + if is_solo: + # delete any records that have the same name and record type + same_type = [r['id'] for r in records if r['name'] == record and r['type'] == record_type] + if rr: + same_type = [rid for rid in same_type if rid != rr['id']] + if same_type: + if not module.check_mode: + for rid in same_type: + client.delete_record(str(domain), rid) + changed = True + if rr: + # check if we need to update + if rr['ttl'] != ttl or rr['priority'] != priority: + data = {} + if ttl: + data['ttl'] = ttl + if priority: + data['priority'] = priority + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) + else: + module.exit_json(changed=changed, result=rr) + else: + # create it + data = { + 'name': record, + 'type': record_type, + 'content': value, + } + if ttl: + data['ttl'] = ttl + if priority: + data['priority'] = priority + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) + + # state is absent + else: + if rr: + if not module.check_mode: + client.delete_record(str(domain), rr['id']) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + # Make sure these record_ids either all exist or none + if domain and record_ids: + current_records = [str(r['record']['id']) for r in client.records(str(domain))] + wanted_records = [str(r) for r in record_ids] + if state == 'present': + difference = list(set(wanted_records) - set(current_records)) + if difference: + module.fail_json(msg="Missing the following records: %s" % difference) + else: + module.exit_json(changed=False) + + # state is absent + else: + difference = list(set(wanted_records) & set(current_records)) + if difference: + if not module.check_mode: + for rid in difference: + client.delete_record(str(domain), rid) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + except DNSimpleException as e: + module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) + + module.fail_json(msg="Unknown what you wanted me to do") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/dnsmadeeasy.py b/plugins/modules/net_tools/dnsmadeeasy.py new file mode 100644 index 0000000000..3c5fc253c5 --- /dev/null +++ b/plugins/modules/net_tools/dnsmadeeasy.py @@ -0,0 +1,697 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dnsmadeeasy +short_description: Interface with dnsmadeeasy.com (a DNS hosting service). +description: + - > + Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or + monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) +options: + account_key: + description: + - Account API Key. + required: true + + account_secret: + description: + - Account Secret Key. + required: true + + domain: + description: + - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster + resolution + required: true + + sandbox: + description: + - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used. + type: bool + default: 'no' + + record_name: + description: + - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless + of the state argument. + + record_type: + description: + - Record type. + choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] + + record_value: + description: + - > + Record value. HTTPRED: , MX: , NS: , PTR: , + SRV: , TXT: " + - > + If record_value is not specified; no changes will be made and the record will be returned in 'result' + (in other words, this module can be used to fetch a record's current id, type, and ttl) + + record_ttl: + description: + - record's "Time to live". Number of seconds the record remains cached in DNS servers. + default: 1800 + + state: + description: + - whether the record should exist or not + required: true + choices: [ 'present', 'absent' ] + + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + + monitor: + description: + - If C(yes), add or change the monitor. This is applicable only for A records. + type: bool + default: 'no' + + systemDescription: + description: + - Description used by the monitor. + required: true + default: '' + + maxEmails: + description: + - Number of emails sent to the contact list by the monitor. + required: true + default: 1 + + protocol: + description: + - Protocol used by the monitor. + required: true + default: 'HTTP' + choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS'] + + port: + description: + - Port used by the monitor. + required: true + default: 80 + + sensitivity: + description: + - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3. + required: true + default: 'Medium' + choices: ['Low', 'Medium', 'High'] + + contactList: + description: + - Name or id of the contact list that the monitor will notify. + - The default C('') means the Account Owner. + required: true + default: '' + + httpFqdn: + description: + - The fully qualified domain name used by the monitor. + + httpFile: + description: + - The file at the Fqdn that the monitor queries for HTTP or HTTPS. + + httpQueryString: + description: + - The string in the httpFile that the monitor queries for HTTP or HTTPS. + + failover: + description: + - If C(yes), add or change the failover. This is applicable only for A records. + type: bool + default: 'no' + + autoFailover: + description: + - If true, fallback to the primary IP address is manual after a failover. + - If false, fallback to the primary IP address is automatic after a failover. + type: bool + default: 'no' + + ip1: + description: + - Primary IP address for the failover. + - Required if adding or changing the monitor or failover. + + ip2: + description: + - Secondary IP address for the failover. + - Required if adding or changing the failover. + + ip3: + description: + - Tertiary IP address for the failover. + + ip4: + description: + - Quaternary IP address for the failover. + + ip5: + description: + - Quinary IP address for the failover. + +notes: + - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few + seconds of actual time by using NTP. + - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'. + These values can be be registered and used in your playbooks. + - Only A records can have a monitor or failover. + - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required. + - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required. + - The monitor and the failover will share 'port', 'protocol', and 'ip1' options. + +requirements: [ hashlib, hmac ] +author: "Brice Burgess (@briceburg)" +''' + +EXAMPLES = ''' +# fetch my.com domain records +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + register: response + +# create / ensure the presence of a record +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + +# update the previously created record +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_value: 192.0.2.23 + +# fetch a specific record +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + register: response + +# delete a record / ensure it is absent +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + record_type: A + state: absent + record_name: test + +# Add a failover +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: True + ip1: 127.0.0.2 + ip2: 127.0.0.3 + +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: True + ip1: 127.0.0.2 + ip2: 127.0.0.3 + ip3: 127.0.0.4 + ip4: 127.0.0.5 + ip5: 127.0.0.6 + +# Add a monitor +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: yes + ip1: 127.0.0.2 + protocol: HTTP # default + port: 80 # default + maxEmails: 1 + systemDescription: Monitor Test A record + contactList: my contact list + +# Add a monitor with http options +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: yes + ip1: 127.0.0.2 + protocol: HTTP # default + port: 80 # default + maxEmails: 1 + systemDescription: Monitor Test A record + contactList: 1174 # contact list id + httpFqdn: http://my.com + httpFile: example + httpQueryString: some string + +# Add a monitor and a failover +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: True + ip1: 127.0.0.2 + ip2: 127.0.0.3 + monitor: yes + protocol: HTTPS + port: 443 + maxEmails: 1 + systemDescription: monitoring my.com status + contactList: emergencycontacts + +# Remove a failover +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: no + +# Remove a monitor +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: no +''' + +# ============================================ +# DNSMadeEasy module specific support methods. +# + +import json +import hashlib +import hmac +import locale +from time import strftime, gmtime + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six import string_types + + +class DME2(object): + + def __init__(self, apikey, secret, domain, sandbox, module): + self.module = module + + self.api = apikey + self.secret = secret + + if sandbox: + self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/' + self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl) + else: + self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' + + self.domain = str(domain) + self.domain_map = None # ["domain_name"] => ID + self.record_map = None # ["record_name"] => ID + self.records = None # ["record_ID"] => + self.all_records = None + self.contactList_map = None # ["contactList_name"] => ID + + # Lookup the domain ID if passed as a domain name vs. ID + if not self.domain.isdigit(): + self.domain = self.getDomainByName(self.domain)['id'] + + self.record_url = 'dns/managed/' + str(self.domain) + '/records' + self.monitor_url = 'monitor' + self.contactList_url = 'contactList' + + def _headers(self): + currTime = self._get_date() + hashstring = self._create_hash(currTime) + headers = {'x-dnsme-apiKey': self.api, + 'x-dnsme-hmac': hashstring, + 'x-dnsme-requestDate': currTime, + 'content-type': 'application/json'} + return headers + + def _get_date(self): + locale.setlocale(locale.LC_TIME, 'C') + return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) + + def _create_hash(self, rightnow): + return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() + + def query(self, resource, method, data=None): + url = self.baseurl + resource + if data and not isinstance(data, string_types): + data = urlencode(data) + + response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) + if info['status'] not in (200, 201, 204): + self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) + + try: + return json.load(response) + except Exception: + return {} + + def getDomain(self, domain_id): + if not self.domain_map: + self._instMap('domain') + + return self.domains.get(domain_id, False) + + def getDomainByName(self, domain_name): + if not self.domain_map: + self._instMap('domain') + + return self.getDomain(self.domain_map.get(domain_name, 0)) + + def getDomains(self): + return self.query('dns/managed', 'GET')['data'] + + def getRecord(self, record_id): + if not self.record_map: + self._instMap('record') + + return self.records.get(record_id, False) + + # Try to find a single record matching this one. + # How we do this depends on the type of record. For instance, there + # can be several MX records for a single record_name while there can + # only be a single CNAME for a particular record_name. Note also that + # there can be several records with different types for a single name. + def getMatchingRecord(self, record_name, record_type, record_value): + # Get all the records if not already cached + if not self.all_records: + self.all_records = self.getRecords() + + if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]: + for result in self.all_records: + if result['name'] == record_name and result['type'] == record_type: + return result + return False + elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]: + for result in self.all_records: + if record_type == "MX": + value = record_value.split(" ")[1] + elif record_type == "SRV": + value = record_value.split(" ")[3] + else: + value = record_value + if result['name'] == record_name and result['type'] == record_type and result['value'] == value: + return result + return False + else: + raise Exception('record_type not yet supported') + + def getRecords(self): + return self.query(self.record_url, 'GET')['data'] + + def _instMap(self, type): + # @TODO cache this call so it's executed only once per ansible execution + map = {} + results = {} + + # iterate over e.g. self.getDomains() || self.getRecords() + for result in getattr(self, 'get' + type.title() + 's')(): + + map[result['name']] = result['id'] + results[result['id']] = result + + # e.g. self.domain_map || self.record_map + setattr(self, type + '_map', map) + setattr(self, type + 's', results) # e.g. self.domains || self.records + + def prepareRecord(self, data): + return json.dumps(data, separators=(',', ':')) + + def createRecord(self, data): + # @TODO update the cache w/ resultant record + id when impleneted + return self.query(self.record_url, 'POST', data) + + def updateRecord(self, record_id, data): + # @TODO update the cache w/ resultant record + id when impleneted + return self.query(self.record_url + '/' + str(record_id), 'PUT', data) + + def deleteRecord(self, record_id): + # @TODO remove record from the cache when impleneted + return self.query(self.record_url + '/' + str(record_id), 'DELETE') + + def getMonitor(self, record_id): + return self.query(self.monitor_url + '/' + str(record_id), 'GET') + + def updateMonitor(self, record_id, data): + return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data) + + def prepareMonitor(self, data): + return json.dumps(data, separators=(',', ':')) + + def getContactList(self, contact_list_id): + if not self.contactList_map: + self._instMap('contactList') + + return self.contactLists.get(contact_list_id, False) + + def getContactlists(self): + return self.query(self.contactList_url, 'GET')['data'] + + def getContactListByName(self, name): + if not self.contactList_map: + self._instMap('contactList') + + return self.getContactList(self.contactList_map.get(name, 0)) + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + account_key=dict(required=True), + account_secret=dict(required=True, no_log=True), + domain=dict(required=True), + sandbox=dict(default='no', type='bool'), + state=dict(required=True, choices=['present', 'absent']), + record_name=dict(required=False), + record_type=dict(required=False, choices=[ + 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), + record_value=dict(required=False), + record_ttl=dict(required=False, default=1800, type='int'), + monitor=dict(default='no', type='bool'), + systemDescription=dict(default=''), + maxEmails=dict(default=1, type='int'), + protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), + port=dict(default=80, type='int'), + sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), + contactList=dict(default=None), + httpFqdn=dict(required=False), + httpFile=dict(required=False), + httpQueryString=dict(required=False), + failover=dict(default='no', type='bool'), + autoFailover=dict(default='no', type='bool'), + ip1=dict(required=False), + ip2=dict(required=False), + ip3=dict(required=False), + ip4=dict(required=False), + ip5=dict(required=False), + validate_certs=dict(default='yes', type='bool'), + ), + required_together=[ + ['record_value', 'record_ttl', 'record_type'] + ], + required_if=[ + ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']], + ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']] + ] + ) + + protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6) + sensitivities = dict(Low=8, Medium=5, High=3) + + DME = DME2(module.params["account_key"], module.params[ + "account_secret"], module.params["domain"], module.params["sandbox"], module) + state = module.params["state"] + record_name = module.params["record_name"] + record_type = module.params["record_type"] + record_value = module.params["record_value"] + + # Follow Keyword Controlled Behavior + if record_name is None: + domain_records = DME.getRecords() + if not domain_records: + module.fail_json( + msg="The requested domain name is not accessible with this api_key; try using its ID if known.") + module.exit_json(changed=False, result=domain_records) + + # Fetch existing record + Build new one + current_record = DME.getMatchingRecord(record_name, record_type, record_value) + new_record = {'name': record_name} + for i in ["record_value", "record_type", "record_ttl"]: + if not module.params[i] is None: + new_record[i[len("record_"):]] = module.params[i] + # Special handling for mx record + if new_record["type"] == "MX": + new_record["mxLevel"] = new_record["value"].split(" ")[0] + new_record["value"] = new_record["value"].split(" ")[1] + + # Special handling for SRV records + if new_record["type"] == "SRV": + new_record["priority"] = new_record["value"].split(" ")[0] + new_record["weight"] = new_record["value"].split(" ")[1] + new_record["port"] = new_record["value"].split(" ")[2] + new_record["value"] = new_record["value"].split(" ")[3] + + # Fetch existing monitor if the A record indicates it should exist and build the new monitor + current_monitor = dict() + new_monitor = dict() + if current_record and current_record['type'] == 'A': + current_monitor = DME.getMonitor(current_record['id']) + + # Build the new monitor + for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails', + 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString', + 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']: + if module.params[i] is not None: + if i == 'protocol': + # The API requires protocol to be a numeric in the range 1-6 + new_monitor['protocolId'] = protocols[module.params[i]] + elif i == 'sensitivity': + # The API requires sensitivity to be a numeric of 8, 5, or 3 + new_monitor[i] = sensitivities[module.params[i]] + elif i == 'contactList': + # The module accepts either the name or the id of the contact list + contact_list_id = module.params[i] + if not contact_list_id.isdigit() and contact_list_id != '': + contact_list = DME.getContactListByName(contact_list_id) + if not contact_list: + module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id)) + contact_list_id = contact_list.get('id', '') + new_monitor['contactListId'] = contact_list_id + else: + # The module option names match the API field names + new_monitor[i] = module.params[i] + + # Compare new record against existing one + record_changed = False + if current_record: + for i in new_record: + if str(current_record[i]) != str(new_record[i]): + record_changed = True + new_record['id'] = str(current_record['id']) + + monitor_changed = False + if current_monitor: + for i in new_monitor: + if str(current_monitor.get(i)) != str(new_monitor[i]): + monitor_changed = True + + # Follow Keyword Controlled Behavior + if state == 'present': + # return the record if no value is specified + if "value" not in new_record: + if not current_record: + module.fail_json( + msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) + module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) + + # create record and monitor as the record does not exist + if not current_record: + record = DME.createRecord(DME.prepareRecord(new_record)) + monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor)) + module.exit_json(changed=True, result=dict(record=record, monitor=monitor)) + + # update the record + updated = False + if record_changed: + DME.updateRecord(current_record['id'], DME.prepareRecord(new_record)) + updated = True + if monitor_changed: + DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor)) + updated = True + if updated: + module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor)) + + # return the record (no changes) + module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) + + elif state == 'absent': + changed = False + # delete the record (and the monitor/failover) if it exists + if current_record: + DME.deleteRecord(current_record['id']) + module.exit_json(changed=True) + + # record does not exist, return w/o change. + module.exit_json(changed=changed) + + else: + module.fail_json( + msg="'%s' is an unknown value for the state argument" % state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/exoscale/exo_dns_domain.py b/plugins/modules/net_tools/exoscale/exo_dns_domain.py new file mode 100644 index 0000000000..b9696a91e5 --- /dev/null +++ b/plugins/modules/net_tools/exoscale/exo_dns_domain.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: exo_dns_domain +short_description: Manages domain records on Exoscale DNS API. +description: + - Create and remove domain records. +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the record. + required: true + type: str + state: + description: + - State of the resource. + default: present + choices: [ present, absent ] + type: str +extends_documentation_fragment: +- community.general.exoscale + +''' + +EXAMPLES = ''' +- name: Create a domain + exo_dns_domain: + name: example.com + +- name: Remove a domain + exo_dns_domain: + name: example.com + state: absent +''' + +RETURN = ''' +--- +exo_dns_domain: + description: API domain results + returned: success + type: complex + contains: + account_id: + description: Your account ID + returned: success + type: int + sample: 34569 + auto_renew: + description: Whether domain is auto renewed or not + returned: success + type: bool + sample: false + created_at: + description: When the domain was created + returned: success + type: str + sample: "2016-08-12T15:24:23.989Z" + expires_on: + description: When the domain expires + returned: success + type: str + sample: "2016-08-12T15:24:23.989Z" + id: + description: ID of the domain + returned: success + type: int + sample: "2016-08-12T15:24:23.989Z" + lockable: + description: Whether the domain is lockable or not + returned: success + type: bool + sample: true + name: + description: Domain name + returned: success + type: str + sample: example.com + record_count: + description: Number of records related to this domain + returned: success + type: int + sample: 5 + registrant_id: + description: ID of the registrant + returned: success + type: int + sample: null + service_count: + description: Number of services + returned: success + type: int + sample: 0 + state: + description: State of the domain + returned: success + type: str + sample: "hosted" + token: + description: Token + returned: success + type: str + sample: "r4NzTRp6opIeFKfaFYvOd6MlhGyD07jl" + unicode_name: + description: Domain name as unicode + returned: success + type: str + sample: "example.com" + updated_at: + description: When the domain was updated last. + returned: success + type: str + sample: "2016-08-12T15:24:23.989Z" + user_id: + description: ID of the user + returned: success + type: int + sample: null + whois_protected: + description: Whether the whois is protected or not + returned: success + type: bool + sample: false +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.exoscale import ExoDns, exo_dns_argument_spec, exo_dns_required_together + + +class ExoDnsDomain(ExoDns): + + def __init__(self, module): + super(ExoDnsDomain, self).__init__(module) + self.name = self.module.params.get('name').lower() + + def get_domain(self): + domains = self.api_query("/domains", "GET") + for z in domains: + if z['domain']['name'].lower() == self.name: + return z + return None + + def present_domain(self): + domain = self.get_domain() + data = { + 'domain': { + 'name': self.name, + } + } + if not domain: + self.result['diff']['after'] = data['domain'] + self.result['changed'] = True + if not self.module.check_mode: + domain = self.api_query("/domains", "POST", data) + return domain + + def absent_domain(self): + domain = self.get_domain() + if domain: + self.result['diff']['before'] = domain + self.result['changed'] = True + if not self.module.check_mode: + self.api_query("/domains/%s" % domain['domain']['name'], "DELETE") + return domain + + def get_result(self, resource): + if resource: + self.result['exo_dns_domain'] = resource['domain'] + return self.result + + +def main(): + argument_spec = exo_dns_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=exo_dns_required_together(), + supports_check_mode=True + ) + + exo_dns_domain = ExoDnsDomain(module) + if module.params.get('state') == "present": + resource = exo_dns_domain.present_domain() + else: + resource = exo_dns_domain.absent_domain() + result = exo_dns_domain.get_result(resource) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/exoscale/exo_dns_record.py b/plugins/modules/net_tools/exoscale/exo_dns_record.py new file mode 100644 index 0000000000..3e71bbb1f9 --- /dev/null +++ b/plugins/modules/net_tools/exoscale/exo_dns_record.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: exo_dns_record +short_description: Manages DNS records on Exoscale DNS. +description: + - Create, update and delete records. +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the record. + default: "" + type: str + domain: + description: + - Domain the record is related to. + required: true + type: str + record_type: + description: + - Type of the record. + default: A + choices: [ A, ALIAS, CNAME, MX, SPF, URL, TXT, NS, SRV, NAPTR, PTR, AAAA, SSHFP, HINFO, POOL ] + aliases: [ rtype, type ] + type: str + content: + description: + - Content of the record. + - Required if C(state=present) or C(multiple=yes). + aliases: [ value, address ] + type: str + ttl: + description: + - TTL of the record in seconds. + default: 3600 + type: int + prio: + description: + - Priority of the record. + aliases: [ priority ] + type: int + multiple: + description: + - Whether there are more than one records with similar I(name) and I(record_type). + - Only allowed for a few record types, e.g. C(record_type=A), C(record_type=NS) or C(record_type=MX). + - I(content) will not be updated, instead it is used as a key to find existing records. + type: bool + default: no + state: + description: + - State of the record. + default: present + choices: [ present, absent ] + type: str +extends_documentation_fragment: +- community.general.exoscale + +''' + +EXAMPLES = ''' +- name: Create or update an A record + exo_dns_record: + name: web-vm-1 + domain: example.com + content: 1.2.3.4 + +- name: Update an existing A record with a new IP + exo_dns_record: + name: web-vm-1 + domain: example.com + content: 1.2.3.5 + +- name: Create another A record with same name + exo_dns_record: + name: web-vm-1 + domain: example.com + content: 1.2.3.6 + multiple: yes + +- name: Create or update a CNAME record + exo_dns_record: + name: www + domain: example.com + record_type: CNAME + content: web-vm-1 + +- name: Create another MX record + exo_dns_record: + domain: example.com + record_type: MX + content: mx1.example.com + prio: 10 + multiple: yes + +- name: Delete one MX record out of multiple + exo_dns_record: + domain: example.com + record_type: MX + content: mx1.example.com + multiple: yes + state: absent + +- name: Remove a single A record + exo_dns_record: + name: www + domain: example.com + state: absent +''' + +RETURN = ''' +--- +exo_dns_record: + description: API record results + returned: success + type: complex + contains: + content: + description: value of the record + returned: success + type: str + sample: 1.2.3.4 + created_at: + description: When the record was created + returned: success + type: str + sample: "2016-08-12T15:24:23.989Z" + domain: + description: Name of the domain + returned: success + type: str + sample: example.com + domain_id: + description: ID of the domain + returned: success + type: int + sample: 254324 + id: + description: ID of the record + returned: success + type: int + sample: 254324 + name: + description: name of the record + returned: success + type: str + sample: www + parent_id: + description: ID of the parent + returned: success + type: int + sample: null + prio: + description: Priority of the record + returned: success + type: int + sample: 10 + record_type: + description: Priority of the record + returned: success + type: str + sample: A + system_record: + description: Whether the record is a system record or not + returned: success + type: bool + sample: false + ttl: + description: Time to live of the record + returned: success + type: int + sample: 3600 + updated_at: + description: When the record was updated + returned: success + type: str + sample: "2016-08-12T15:24:23.989Z" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.exoscale import ExoDns, exo_dns_argument_spec, exo_dns_required_together + + +EXO_RECORD_TYPES = [ + 'A', + 'ALIAS', + 'CNAME', + 'MX', + 'SPF', + 'URL', + 'TXT', + 'NS', + 'SRV', + 'NAPTR', + 'PTR', + 'AAAA', + 'SSHFP', + 'HINFO', + 'POOL' +] + + +class ExoDnsRecord(ExoDns): + + def __init__(self, module): + super(ExoDnsRecord, self).__init__(module) + + self.domain = self.module.params.get('domain').lower() + self.name = self.module.params.get('name').lower() + if self.name == self.domain: + self.name = "" + + self.multiple = self.module.params.get('multiple') + self.record_type = self.module.params.get('record_type') + self.content = self.module.params.get('content') + + def _create_record(self, record): + self.result['changed'] = True + data = { + 'record': { + 'name': self.name, + 'record_type': self.record_type, + 'content': self.content, + 'ttl': self.module.params.get('ttl'), + 'prio': self.module.params.get('prio'), + } + } + self.result['diff']['after'] = data['record'] + if not self.module.check_mode: + record = self.api_query("/domains/%s/records" % self.domain, "POST", data) + return record + + def _update_record(self, record): + data = { + 'record': { + 'name': self.name, + 'content': self.content, + 'ttl': self.module.params.get('ttl'), + 'prio': self.module.params.get('prio'), + } + } + if self.has_changed(data['record'], record['record']): + self.result['changed'] = True + if not self.module.check_mode: + record = self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "PUT", data) + return record + + def get_record(self): + domain = self.module.params.get('domain') + records = self.api_query("/domains/%s/records" % domain, "GET") + + result = {} + for r in records: + + if r['record']['record_type'] != self.record_type: + continue + + r_name = r['record']['name'].lower() + r_content = r['record']['content'] + + if r_name == self.name: + if not self.multiple: + if result: + self.module.fail_json(msg="More than one record with record_type=%s and name=%s params. " + "Use multiple=yes for more than one record." % (self.record_type, self.name)) + else: + result = r + elif r_content == self.content: + return r + + return result + + def present_record(self): + record = self.get_record() + if not record: + record = self._create_record(record) + else: + record = self._update_record(record) + return record + + def absent_record(self): + record = self.get_record() + if record: + self.result['diff']['before'] = record + self.result['changed'] = True + if not self.module.check_mode: + self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "DELETE") + return record + + def get_result(self, resource): + if resource: + self.result['exo_dns_record'] = resource['record'] + self.result['exo_dns_record']['domain'] = self.domain + return self.result + + +def main(): + argument_spec = exo_dns_argument_spec() + argument_spec.update(dict( + name=dict(type='str', default=''), + record_type=dict(type='str', choices=EXO_RECORD_TYPES, aliases=['rtype', 'type'], default='A'), + content=dict(type='str', aliases=['value', 'address']), + multiple=(dict(type='bool', default=False)), + ttl=dict(type='int', default=3600), + prio=dict(type='int', aliases=['priority']), + domain=dict(type='str', required=True), + state=dict(type='str', choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=exo_dns_required_together(), + required_if=[ + ('state', 'present', ['content']), + ('multiple', True, ['content']), + ], + supports_check_mode=True, + ) + + exo_dns_record = ExoDnsRecord(module) + if module.params.get('state') == "present": + resource = exo_dns_record.present_record() + else: + resource = exo_dns_record.absent_record() + + result = exo_dns_record.get_result(resource) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/net_tools/haproxy.py new file mode 100644 index 0000000000..e6a5d3b0bb --- /dev/null +++ b/plugins/modules/net_tools/haproxy.py @@ -0,0 +1,450 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Ravi Bhure +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: haproxy +short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands +author: +- Ravi Bhure (@ravibhure) +description: + - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. +notes: + - Enable, disable and drain commands are restricted and can only be issued on + sockets configured for level 'admin'. For example, you can add the line + 'stats socket /var/run/haproxy.sock level admin' to the general section of + haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). + - Depends on netcat (nc) being available; you need to install the appropriate + package for your operating system before this module can be used. +options: + backend: + description: + - Name of the HAProxy backend pool. + - If this parameter is unset, it will be auto-detected. + type: str + drain: + description: + - Wait until the server has no active connections or until the timeout + determined by wait_interval and wait_retries is reached. + - Continue only after the status changes to 'MAINT'. + - This overrides the shutdown_sessions option. + type: bool + host: + description: + - Name of the backend host to change. + type: str + required: true + shutdown_sessions: + description: + - When disabling a server, immediately terminate all the sessions attached + to the specified server. + - This can be used to terminate long-running sessions after a server is put + into maintenance mode. Overridden by the drain option. + type: bool + default: no + socket: + description: + - Path to the HAProxy socket file. + type: path + default: /var/run/haproxy.sock + state: + description: + - Desired state of the provided backend host. + - Note that C(drain) state was added in version 2.4. + - It is supported only by HAProxy version 1.5 or later, + - When used on versions < 1.5, it will be ignored. + type: str + required: true + choices: [ disabled, drain, enabled ] + fail_on_not_found: + description: + - Fail whenever trying to enable/disable a backend host that does not exist + type: bool + default: no + wait: + description: + - Wait until the server reports a status of 'UP' when C(state=enabled), + status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain) + type: bool + default: no + wait_interval: + description: + - Number of seconds to wait between retries. + type: int + default: 5 + wait_retries: + description: + - Number of times to check for status after changing the state. + type: int + default: 25 + weight: + description: + - The value passed in argument. + - If the value ends with the `%` sign, then the new weight will be + relative to the initially configured weight. + - Relative weights are only permitted between 0 and 100% and absolute + weights are permitted between 0 and 256. + type: str +''' + +EXAMPLES = r''' +- name: Disable server in 'www' backend pool + haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www + +- name: Disable server without backend pool name (apply to all available backend pool) + haproxy: + state: disabled + host: '{{ inventory_hostname }}' + +- name: Disable server, provide socket file + haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + +- name: Disable server, provide socket file, wait until status reports in maintenance + haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + wait: yes + +# Place server in drain mode, providing a socket file. Then check the server's +# status every minute to see if it changes to maintenance mode, continuing if it +# does in an hour and failing otherwise. +- haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + wait: yes + drain: yes + wait_interval: 1 + wait_retries: 60 + +- name: Disable backend server in 'www' backend pool and drop open sessions to it + haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www + socket: /var/run/haproxy.sock + shutdown_sessions: yes + +- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found + haproxy: + state: disabled + host: '{{ inventory_hostname }}' + fail_on_not_found: yes + +- name: Enable server in 'www' backend pool + haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + +- name: Enable server in 'www' backend pool wait until healthy + haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: yes + +- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health + haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: yes + wait_retries: 10 + wait_interval: 5 + +- name: Enable server in 'www' backend pool with change server(s) weight + haproxy: + state: enabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + weight: 10 + backend: www + +- name: Set the server in 'www' backend pool to drain mode + haproxy: + state: drain + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www +''' + +import csv +import socket +import time +from string import Template + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_text + + +DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" +RECV_SIZE = 1024 +ACTION_CHOICES = ['enabled', 'disabled', 'drain'] +WAIT_RETRIES = 25 +WAIT_INTERVAL = 5 + + +###################################################################### +class TimeoutException(Exception): + pass + + +class HAProxy(object): + """ + Used for communicating with HAProxy through its local UNIX socket interface. + Perform common tasks in Haproxy related to enable server and + disable server. + + The complete set of external commands Haproxy handles is documented + on their website: + + http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands + """ + + def __init__(self, module): + self.module = module + + self.state = self.module.params['state'] + self.host = self.module.params['host'] + self.backend = self.module.params['backend'] + self.weight = self.module.params['weight'] + self.socket = self.module.params['socket'] + self.shutdown_sessions = self.module.params['shutdown_sessions'] + self.fail_on_not_found = self.module.params['fail_on_not_found'] + self.wait = self.module.params['wait'] + self.wait_retries = self.module.params['wait_retries'] + self.wait_interval = self.module.params['wait_interval'] + self._drain = self.module.params['drain'] + self.command_results = {} + + def execute(self, cmd, timeout=200, capture_output=True): + """ + Executes a HAProxy command by sending a message to a HAProxy's local + UNIX socket and waiting up to 'timeout' milliseconds for the response. + """ + self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.client.connect(self.socket) + self.client.sendall(to_bytes('%s\n' % cmd)) + + result = b'' + buf = b'' + buf = self.client.recv(RECV_SIZE) + while buf: + result += buf + buf = self.client.recv(RECV_SIZE) + result = to_text(result, errors='surrogate_or_strict') + + if capture_output: + self.capture_command_output(cmd, result.strip()) + self.client.close() + return result + + def capture_command_output(self, cmd, output): + """ + Capture the output for a command + """ + if 'command' not in self.command_results: + self.command_results['command'] = [] + self.command_results['command'].append(cmd) + if 'output' not in self.command_results: + self.command_results['output'] = [] + self.command_results['output'].append(output) + + def discover_all_backends(self): + """ + Discover all entries with svname = 'BACKEND' and return a list of their corresponding + pxnames + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))) + + def discover_version(self): + """ + Attempt to extract the haproxy version. + Return a tuple containing major and minor version. + """ + data = self.execute('show info', 200, False) + lines = data.splitlines() + line = [x for x in lines if 'Version:' in x] + try: + version_values = line[0].partition(':')[2].strip().split('.', 3) + version = (int(version_values[0]), int(version_values[1])) + except (ValueError, TypeError, IndexError): + version = None + + return version + + def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None): + """ + Run some command on the specified backends. If no backends are provided they will + be discovered automatically (all backends) + """ + # Discover backends if none are given + if pxname is None: + backends = self.discover_all_backends() + else: + backends = [pxname] + + # Run the command for each requested backend + for backend in backends: + # Fail when backends were not found + state = self.get_state_for(backend, svname) + if (self.fail_on_not_found) and state is None: + self.module.fail_json( + msg="The specified backend '%s/%s' was not found!" % (backend, svname)) + + if state is not None: + self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) + if self.wait: + self.wait_until_status(backend, svname, wait_for_status) + + def get_state_for(self, pxname, svname): + """ + Find the state of specific services. When pxname is not set, get all backends for a specific host. + Returns a list of dictionaries containing the status and weight for those services. + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + state = tuple( + map( + lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']}, + filter(lambda d: (pxname is None or d['pxname'] + == pxname) and d['svname'] == svname, r) + ) + ) + return state or None + + def wait_until_status(self, pxname, svname, status): + """ + Wait for a service to reach the specified status. Try RETRIES times + with INTERVAL seconds of sleep in between. If the service has not reached + the expected status in that time, the module will fail. If the service was + not found, the module will fail. + """ + for i in range(1, self.wait_retries): + state = self.get_state_for(pxname, svname) + + # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here + # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching + if status in state[0]['status']: + if not self._drain or (state[0]['scur'] == '0' and 'MAINT' in state): + return True + else: + time.sleep(self.wait_interval) + + self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % + (pxname, svname, status, self.wait_retries)) + + def enabled(self, host, backend, weight): + """ + Enabled action, marks server to UP and checks are re-enabled, + also supports to get current weight for server (default) and + set the weight for haproxy backend server when provides. + """ + cmd = "get weight $pxname/$svname; enable server $pxname/$svname" + if weight: + cmd += "; set weight $pxname/$svname %s" % weight + self.execute_for_backends(cmd, backend, host, 'UP') + + def disabled(self, host, backend, shutdown_sessions): + """ + Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be + performed on the server until it leaves maintenance, + also it shutdown sessions while disabling backend host server. + """ + cmd = "get weight $pxname/$svname; disable server $pxname/$svname" + if shutdown_sessions: + cmd += "; shutdown sessions server $pxname/$svname" + self.execute_for_backends(cmd, backend, host, 'MAINT') + + def drain(self, host, backend, status='DRAIN'): + """ + Drain action, sets the server to DRAIN mode. + In this mode mode, the server will not accept any new connections + other than those that are accepted via persistence. + """ + haproxy_version = self.discover_version() + + # check if haproxy version suppots DRAIN state (starting with 1.5) + if haproxy_version and (1, 5) <= haproxy_version: + cmd = "set server $pxname/$svname state drain" + self.execute_for_backends(cmd, backend, host, status) + + def act(self): + """ + Figure out what you want to do from ansible, and then do it. + """ + # Get the state before the run + self.command_results['state_before'] = self.get_state_for(self.backend, self.host) + + # toggle enable/disbale server + if self.state == 'enabled': + self.enabled(self.host, self.backend, self.weight) + elif self.state == 'disabled' and self._drain: + self.drain(self.host, self.backend, status='MAINT') + elif self.state == 'disabled': + self.disabled(self.host, self.backend, self.shutdown_sessions) + elif self.state == 'drain': + self.drain(self.host, self.backend) + else: + self.module.fail_json(msg="unknown state specified: '%s'" % self.state) + + # Get the state after the run + self.command_results['state_after'] = self.get_state_for(self.backend, self.host) + + # Report change status + self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after']) + + self.module.exit_json(**self.command_results) + + +def main(): + + # load ansible module object + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', required=True, choices=ACTION_CHOICES), + host=dict(type='str', required=True), + backend=dict(type='str'), + weight=dict(type='str'), + socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION), + shutdown_sessions=dict(type='bool', default=False), + fail_on_not_found=dict(type='bool', default=False), + wait=dict(type='bool', default=False), + wait_retries=dict(type='int', default=WAIT_RETRIES), + wait_interval=dict(type='int', default=WAIT_INTERVAL), + drain=dict(type='bool', default=False), + ), + ) + + if not socket: + module.fail_json(msg="unable to locate haproxy socket") + + ansible_haproxy = HAProxy(module) + ansible_haproxy.act() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/hetzner_failover_ip.py b/plugins/modules/net_tools/hetzner_failover_ip.py new file mode 100644 index 0000000000..ebc18c9d48 --- /dev/null +++ b/plugins/modules/net_tools/hetzner_failover_ip.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2019 Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: hetzner_failover_ip +short_description: Manage Hetzner's failover IPs +author: + - Felix Fontein (@felixfontein) +description: + - Manage Hetzner's failover IPs. +seealso: + - name: Failover IP documentation + description: Hetzner's documentation on failover IPs. + link: https://wiki.hetzner.de/index.php/Failover/en + - module: hetzner_failover_ip_info + description: Retrieve information on failover IPs. +extends_documentation_fragment: +- community.general.hetzner + +options: + failover_ip: + description: The failover IP address. + type: str + required: yes + state: + description: + - Defines whether the IP will be routed or not. + - If set to C(routed), I(value) must be specified. + type: str + choices: + - routed + - unrouted + default: routed + value: + description: + - The new value for the failover IP address. + - Required when setting I(state) to C(routed). + type: str + timeout: + description: + - Timeout to use when routing or unrouting the failover IP. + - Note that the API call returns when the failover IP has been + successfully routed to the new address, respectively successfully + unrouted. + type: int + default: 180 +''' + +EXAMPLES = r''' +- name: Set value of failover IP 1.2.3.4 to 5.6.7.8 + hetzner_failover_ip: + hetzner_user: foo + hetzner_password: bar + failover_ip: 1.2.3.4 + value: 5.6.7.8 + +- name: Set value of failover IP 1.2.3.4 to unrouted + hetzner_failover_ip: + hetzner_user: foo + hetzner_password: bar + failover_ip: 1.2.3.4 + state: unrouted +''' + +RETURN = r''' +value: + description: + - The value of the failover IP. + - Will be C(none) if the IP is unrouted. + returned: success + type: str +state: + description: + - Will be C(routed) or C(unrouted). + returned: success + type: str +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.hetzner import ( + HETZNER_DEFAULT_ARGUMENT_SPEC, + get_failover, + set_failover, + get_failover_state, +) + + +def main(): + argument_spec = dict( + failover_ip=dict(type='str', required=True), + state=dict(type='str', default='routed', choices=['routed', 'unrouted']), + value=dict(type='str'), + timeout=dict(type='int', default=180), + ) + argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=( + ('state', 'routed', ['value']), + ), + ) + + failover_ip = module.params['failover_ip'] + value = get_failover(module, failover_ip) + changed = False + before = get_failover_state(value) + + if module.params['state'] == 'routed': + new_value = module.params['value'] + else: + new_value = None + + if value != new_value: + if module.check_mode: + value = new_value + changed = True + else: + value, changed = set_failover(module, failover_ip, new_value, timeout=module.params['timeout']) + + after = get_failover_state(value) + module.exit_json( + changed=changed, + diff=dict( + before=before, + after=after, + ), + **after + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/hetzner_failover_ip_info.py b/plugins/modules/net_tools/hetzner_failover_ip_info.py new file mode 100644 index 0000000000..0c9882e171 --- /dev/null +++ b/plugins/modules/net_tools/hetzner_failover_ip_info.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2019 Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: hetzner_failover_ip_info +short_description: Retrieve information on Hetzner's failover IPs +author: + - Felix Fontein (@felixfontein) +description: + - Retrieve information on Hetzner's failover IPs. +seealso: + - name: Failover IP documentation + description: Hetzner's documentation on failover IPs. + link: https://wiki.hetzner.de/index.php/Failover/en + - module: hetzner_failover_ip + description: Manage failover IPs. +extends_documentation_fragment: +- community.general.hetzner + +options: + failover_ip: + description: The failover IP address. + type: str + required: yes +''' + +EXAMPLES = r''' +- name: Get value of failover IP 1.2.3.4 + hetzner_failover_ip_info: + hetzner_user: foo + hetzner_password: bar + failover_ip: 1.2.3.4 + value: 5.6.7.8 + register: result + +- name: Print value of failover IP 1.2.3.4 in case it is routed + debug: + msg: "1.2.3.4 routes to {{ result.value }}" + when: result.state == 'routed' +''' + +RETURN = r''' +value: + description: + - The value of the failover IP. + - Will be C(none) if the IP is unrouted. + returned: success + type: str +state: + description: + - Will be C(routed) or C(unrouted). + returned: success + type: str +failover_ip: + description: + - The failover IP. + returned: success + type: str + sample: '1.2.3.4' +failover_netmask: + description: + - The netmask for the failover IP. + returned: success + type: str + sample: '255.255.255.255' +server_ip: + description: + - The main IP of the server this failover IP is associated to. + - This is I(not) the server the failover IP is routed to. + returned: success + type: str +server_number: + description: + - The number of the server this failover IP is associated to. + - This is I(not) the server the failover IP is routed to. + returned: success + type: int +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.hetzner import ( + HETZNER_DEFAULT_ARGUMENT_SPEC, + get_failover_record, + get_failover_state, +) + + +def main(): + argument_spec = dict( + failover_ip=dict(type='str', required=True), + ) + argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + failover = get_failover_record(module, module.params['failover_ip']) + result = get_failover_state(failover['active_server_ip']) + result['failover_ip'] = failover['ip'] + result['failover_netmask'] = failover['netmask'] + result['server_ip'] = failover['server_ip'] + result['server_number'] = failover['server_number'] + result['changed'] = False + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/hetzner_firewall.py b/plugins/modules/net_tools/hetzner_firewall.py new file mode 100644 index 0000000000..c17b8ecada --- /dev/null +++ b/plugins/modules/net_tools/hetzner_firewall.py @@ -0,0 +1,513 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2019 Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: hetzner_firewall +short_description: Manage Hetzner's dedicated server firewall +author: + - Felix Fontein (@felixfontein) +description: + - Manage Hetzner's dedicated server firewall. + - Note that idempotency check for TCP flags simply compares strings and doesn't + try to interpret the rules. This might change in the future. +seealso: + - name: Firewall documentation + description: Hetzner's documentation on the stateless firewall for dedicated servers + link: https://wiki.hetzner.de/index.php/Robot_Firewall/en + - module: hetzner_firewall_info + description: Retrieve information on firewall configuration. +extends_documentation_fragment: +- community.general.hetzner + +options: + server_ip: + description: The server's main IP address. + required: yes + type: str + port: + description: + - Switch port of firewall. + type: str + choices: [ main, kvm ] + default: main + state: + description: + - Status of the firewall. + - Firewall is active if state is C(present), and disabled if state is C(absent). + type: str + default: present + choices: [ present, absent ] + whitelist_hos: + description: + - Whether Hetzner services have access. + type: bool + rules: + description: + - Firewall rules. + type: dict + suboptions: + input: + description: + - Input firewall rules. + type: list + elements: dict + suboptions: + name: + description: + - Name of the firewall rule. + type: str + ip_version: + description: + - Internet protocol version. + - Note that currently, only IPv4 is supported by Hetzner. + required: yes + type: str + choices: [ ipv4, ipv6 ] + dst_ip: + description: + - Destination IP address or subnet address. + - CIDR notation. + type: str + dst_port: + description: + - Destination port or port range. + type: str + src_ip: + description: + - Source IP address or subnet address. + - CIDR notation. + type: str + src_port: + description: + - Source port or port range. + type: str + protocol: + description: + - Protocol above IP layer + type: str + tcp_flags: + description: + - TCP flags or logical combination of flags. + - Flags supported by Hetzner are C(syn), C(fin), C(rst), C(psh) and C(urg). + - They can be combined with C(|) (logical or) and C(&) (logical and). + - See L(the documentation,https://wiki.hetzner.de/index.php/Robot_Firewall/en#Parameter) + for more information. + type: str + action: + description: + - Action if rule matches. + required: yes + type: str + choices: [ accept, discard ] + update_timeout: + description: + - Timeout to use when configuring the firewall. + - Note that the API call returns before the firewall has been + successfully set up. + type: int + default: 30 + wait_for_configured: + description: + - Whether to wait until the firewall has been successfully configured before + determining what to do, and before returning from the module. + - The API returns status C(in progress) when the firewall is currently + being configured. If this happens, the module will try again until + the status changes to C(active) or C(disabled). + - Please note that there is a request limit. If you have to do multiple + updates, it can be better to disable waiting, and regularly use + M(hetzner_firewall_info) to query status. + type: bool + default: yes + wait_delay: + description: + - Delay to wait (in seconds) before checking again whether the firewall has + been configured. + type: int + default: 10 + timeout: + description: + - Timeout (in seconds) for waiting for firewall to be configured. + type: int + default: 180 +''' + +EXAMPLES = r''' +- name: Configure firewall for server with main IP 1.2.3.4 + hetzner_firewall: + hetzner_user: foo + hetzner_password: bar + server_ip: 1.2.3.4 + status: active + whitelist_hos: yes + rules: + input: + - name: Allow everything to ports 20-23 from 4.3.2.1/24 + ip_version: ipv4 + src_ip: 4.3.2.1/24 + dst_port: '20-23' + action: accept + - name: Allow everything to port 443 + ip_version: ipv4 + dst_port: '443' + action: accept + - name: Drop everything else + ip_version: ipv4 + action: discard + register: result + +- debug: + msg: "{{ result }}" +''' + +RETURN = r''' +firewall: + description: + - The firewall configuration. + type: dict + returned: success + contains: + port: + description: + - Switch port of firewall. + - C(main) or C(kvm). + type: str + sample: main + server_ip: + description: + - Server's main IP address. + type: str + sample: 1.2.3.4 + server_number: + description: + - Hetzner's internal server number. + type: int + sample: 12345 + status: + description: + - Status of the firewall. + - C(active) or C(disabled). + - Will be C(in process) if the firewall is currently updated, and + I(wait_for_configured) is set to C(no) or I(timeout) to a too small value. + type: str + sample: active + whitelist_hos: + description: + - Whether Hetzner services have access. + type: bool + sample: true + rules: + description: + - Firewall rules. + type: dict + contains: + input: + description: + - Input firewall rules. + type: list + elements: dict + contains: + name: + description: + - Name of the firewall rule. + type: str + sample: Allow HTTP access to server + ip_version: + description: + - Internet protocol version. + type: str + sample: ipv4 + dst_ip: + description: + - Destination IP address or subnet address. + - CIDR notation. + type: str + sample: 1.2.3.4/32 + dst_port: + description: + - Destination port or port range. + type: str + sample: "443" + src_ip: + description: + - Source IP address or subnet address. + - CIDR notation. + type: str + sample: null + src_port: + description: + - Source port or port range. + type: str + sample: null + protocol: + description: + - Protocol above IP layer + type: str + sample: tcp + tcp_flags: + description: + - TCP flags or logical combination of flags. + type: str + sample: null + action: + description: + - Action if rule matches. + - C(accept) or C(discard). + type: str + sample: accept +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.compat import ipaddress as compat_ipaddress +from ansible_collections.community.general.plugins.module_utils.hetzner import ( + HETZNER_DEFAULT_ARGUMENT_SPEC, + BASE_URL, + fetch_url_json, + fetch_url_json_with_retries, + CheckDoneTimeoutException, +) +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils._text import to_native, to_text + + +RULE_OPTION_NAMES = [ + 'name', 'ip_version', 'dst_ip', 'dst_port', 'src_ip', 'src_port', + 'protocol', 'tcp_flags', 'action', +] + +RULES = ['input'] + + +def restrict_dict(dictionary, fields): + result = dict() + for k, v in dictionary.items(): + if k in fields: + result[k] = v + return result + + +def restrict_firewall_config(config): + result = restrict_dict(config, ['port', 'status', 'whitelist_hos']) + result['rules'] = dict() + for ruleset in RULES: + result['rules'][ruleset] = [ + restrict_dict(rule, RULE_OPTION_NAMES) + for rule in config['rules'].get(ruleset) or [] + ] + return result + + +def update(before, after, params, name): + bv = before.get(name) + after[name] = bv + changed = False + pv = params[name] + if pv is not None: + changed = pv != bv + if changed: + after[name] = pv + return changed + + +def normalize_ip(ip, ip_version): + if ip is None: + return ip + if '/' in ip: + ip, range = ip.split('/') + else: + ip, range = ip, '' + ip_addr = to_native(compat_ipaddress.ip_address(to_text(ip)).compressed) + if range == '': + range = '32' if ip_version.lower() == 'ipv4' else '128' + return ip_addr + '/' + range + + +def update_rules(before, after, params, ruleset): + before_rules = before['rules'][ruleset] + after_rules = after['rules'][ruleset] + params_rules = params['rules'][ruleset] + changed = len(before_rules) != len(params_rules) + for no, rule in enumerate(params_rules): + rule['src_ip'] = normalize_ip(rule['src_ip'], rule['ip_version']) + rule['dst_ip'] = normalize_ip(rule['dst_ip'], rule['ip_version']) + if no < len(before_rules): + before_rule = before_rules[no] + before_rule['src_ip'] = normalize_ip(before_rule['src_ip'], before_rule['ip_version']) + before_rule['dst_ip'] = normalize_ip(before_rule['dst_ip'], before_rule['ip_version']) + if before_rule != rule: + changed = True + after_rules.append(rule) + return changed + + +def encode_rule(output, rulename, input): + for i, rule in enumerate(input['rules'][rulename]): + for k, v in rule.items(): + if v is not None: + output['rules[{0}][{1}][{2}]'.format(rulename, i, k)] = v + + +def create_default_rules_object(): + rules = dict() + for ruleset in RULES: + rules[ruleset] = [] + return rules + + +def firewall_configured(result, error): + return result['firewall']['status'] != 'in process' + + +def main(): + argument_spec = dict( + server_ip=dict(type='str', required=True), + port=dict(type='str', default='main', choices=['main', 'kvm']), + state=dict(type='str', default='present', choices=['present', 'absent']), + whitelist_hos=dict(type='bool'), + rules=dict(type='dict', options=dict( + input=dict(type='list', elements='dict', options=dict( + name=dict(type='str'), + ip_version=dict(type='str', required=True, choices=['ipv4', 'ipv6']), + dst_ip=dict(type='str'), + dst_port=dict(type='str'), + src_ip=dict(type='str'), + src_port=dict(type='str'), + protocol=dict(type='str'), + tcp_flags=dict(type='str'), + action=dict(type='str', required=True, choices=['accept', 'discard']), + )), + )), + update_timeout=dict(type='int', default=30), + wait_for_configured=dict(type='bool', default=True), + wait_delay=dict(type='int', default=10), + timeout=dict(type='int', default=180), + ) + argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Sanitize input + module.params['status'] = 'active' if (module.params['state'] == 'present') else 'disabled' + if module.params['rules'] is None: + module.params['rules'] = {} + if module.params['rules'].get('input') is None: + module.params['rules']['input'] = [] + + server_ip = module.params['server_ip'] + + # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip + url = "{0}/firewall/{1}".format(BASE_URL, server_ip) + if module.params['wait_for_configured']: + try: + result, error = fetch_url_json_with_retries( + module, + url, + check_done_callback=firewall_configured, + check_done_delay=module.params['wait_delay'], + check_done_timeout=module.params['timeout'], + ) + except CheckDoneTimeoutException as dummy: + module.fail_json(msg='Timeout while waiting for firewall to be configured.') + else: + result, error = fetch_url_json(module, url) + if not firewall_configured(result, error): + module.fail_json(msg='Firewall configuration cannot be read as it is not configured.') + + full_before = result['firewall'] + if not full_before.get('rules'): + full_before['rules'] = create_default_rules_object() + before = restrict_firewall_config(full_before) + + # Build wanted (after) state and compare + after = dict(before) + changed = False + changed |= update(before, after, module.params, 'port') + changed |= update(before, after, module.params, 'status') + changed |= update(before, after, module.params, 'whitelist_hos') + after['rules'] = create_default_rules_object() + if module.params['status'] == 'active': + for ruleset in RULES: + changed |= update_rules(before, after, module.params, ruleset) + + # Update if different + construct_result = True + construct_status = None + if changed and not module.check_mode: + # https://robot.your-server.de/doc/webservice/en.html#post-firewall-server-ip + url = "{0}/firewall/{1}".format(BASE_URL, server_ip) + headers = {"Content-type": "application/x-www-form-urlencoded"} + data = dict(after) + data['whitelist_hos'] = str(data['whitelist_hos']).lower() + del data['rules'] + for ruleset in RULES: + encode_rule(data, ruleset, after) + result, error = fetch_url_json( + module, + url, + method='POST', + timeout=module.params['update_timeout'], + data=urlencode(data), + headers=headers, + ) + if module.params['wait_for_configured'] and not firewall_configured(result, error): + try: + result, error = fetch_url_json_with_retries( + module, + url, + check_done_callback=firewall_configured, + check_done_delay=module.params['wait_delay'], + check_done_timeout=module.params['timeout'], + skip_first=True, + ) + except CheckDoneTimeoutException as e: + result, error = e.result, e.error + module.warn('Timeout while waiting for firewall to be configured.') + + full_after = result['firewall'] + if not full_after.get('rules'): + full_after['rules'] = create_default_rules_object() + construct_status = full_after['status'] + if construct_status != 'in process': + # Only use result if configuration is done, so that diff will be ok + after = restrict_firewall_config(full_after) + construct_result = False + + if construct_result: + # Construct result (used for check mode, and configuration still in process) + full_after = dict(full_before) + for k, v in after.items(): + if k != 'rules': + full_after[k] = after[k] + if construct_status is not None: + # We want 'in process' here + full_after['status'] = construct_status + full_after['rules'] = dict() + for ruleset in RULES: + full_after['rules'][ruleset] = after['rules'][ruleset] + + module.exit_json( + changed=changed, + diff=dict( + before=before, + after=after, + ), + firewall=full_after, + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/hetzner_firewall_info.py b/plugins/modules/net_tools/hetzner_firewall_info.py new file mode 100644 index 0000000000..3cab76758d --- /dev/null +++ b/plugins/modules/net_tools/hetzner_firewall_info.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2019 Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: hetzner_firewall_info +short_description: Manage Hetzner's dedicated server firewall +author: + - Felix Fontein (@felixfontein) +description: + - Manage Hetzner's dedicated server firewall. +seealso: + - name: Firewall documentation + description: Hetzner's documentation on the stateless firewall for dedicated servers + link: https://wiki.hetzner.de/index.php/Robot_Firewall/en + - module: hetzner_firewall + description: Configure firewall. +extends_documentation_fragment: +- community.general.hetzner + +options: + server_ip: + description: The server's main IP address. + type: str + required: yes + wait_for_configured: + description: + - Whether to wait until the firewall has been successfully configured before + determining what to do, and before returning from the module. + - The API returns status C(in progress) when the firewall is currently + being configured. If this happens, the module will try again until + the status changes to C(active) or C(disabled). + - Please note that there is a request limit. If you have to do multiple + updates, it can be better to disable waiting, and regularly use + M(hetzner_firewall_info) to query status. + type: bool + default: yes + wait_delay: + description: + - Delay to wait (in seconds) before checking again whether the firewall has + been configured. + type: int + default: 10 + timeout: + description: + - Timeout (in seconds) for waiting for firewall to be configured. + type: int + default: 180 +''' + +EXAMPLES = r''' +- name: Get firewall configuration for server with main IP 1.2.3.4 + hetzner_firewall_info: + hetzner_user: foo + hetzner_password: bar + server_ip: 1.2.3.4 + register: result + +- debug: + msg: "{{ result.firewall }}" +''' + +RETURN = r''' +firewall: + description: + - The firewall configuration. + type: dict + returned: success + contains: + port: + description: + - Switch port of firewall. + - C(main) or C(kvm). + type: str + sample: main + server_ip: + description: + - Server's main IP address. + type: str + sample: 1.2.3.4 + server_number: + description: + - Hetzner's internal server number. + type: int + sample: 12345 + status: + description: + - Status of the firewall. + - C(active) or C(disabled). + - Will be C(in process) if the firewall is currently updated, and + I(wait_for_configured) is set to C(no) or I(timeout) to a too small value. + type: str + sample: active + whitelist_hos: + description: + - Whether Hetzner services have access. + type: bool + sample: true + rules: + description: + - Firewall rules. + type: dict + contains: + input: + description: + - Input firewall rules. + type: list + elements: dict + contains: + name: + description: + - Name of the firewall rule. + type: str + sample: Allow HTTP access to server + ip_version: + description: + - Internet protocol version. + type: str + sample: ipv4 + dst_ip: + description: + - Destination IP address or subnet address. + - CIDR notation. + type: str + sample: 1.2.3.4/32 + dst_port: + description: + - Destination port or port range. + type: str + sample: "443" + src_ip: + description: + - Source IP address or subnet address. + - CIDR notation. + type: str + sample: null + src_port: + description: + - Source port or port range. + type: str + sample: null + protocol: + description: + - Protocol above IP layer + type: str + sample: tcp + tcp_flags: + description: + - TCP flags or logical combination of flags. + type: str + sample: null + action: + description: + - Action if rule matches. + - C(accept) or C(discard). + type: str + sample: accept +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.hetzner import ( + HETZNER_DEFAULT_ARGUMENT_SPEC, + BASE_URL, + fetch_url_json, + fetch_url_json_with_retries, + CheckDoneTimeoutException, +) + + +def firewall_configured(result, error): + return result['firewall']['status'] != 'in process' + + +def main(): + argument_spec = dict( + server_ip=dict(type='str', required=True), + wait_for_configured=dict(type='bool', default=True), + wait_delay=dict(type='int', default=10), + timeout=dict(type='int', default=180), + ) + argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + server_ip = module.params['server_ip'] + + # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip + url = "{0}/firewall/{1}".format(BASE_URL, server_ip) + if module.params['wait_for_configured']: + try: + result, error = fetch_url_json_with_retries( + module, + url, + check_done_callback=firewall_configured, + check_done_delay=module.params['wait_delay'], + check_done_timeout=module.params['timeout'], + ) + except CheckDoneTimeoutException as dummy: + module.fail_json(msg='Timeout while waiting for firewall to be configured.') + else: + result, error = fetch_url_json(module, url) + + firewall = result['firewall'] + if not firewall.get('rules'): + firewall['rules'] = dict() + for ruleset in ['input']: + firewall['rules'][ruleset] = [] + + module.exit_json( + changed=False, + firewall=firewall, + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/infinity/infinity.py b/plugins/modules/net_tools/infinity/infinity.py new file mode 100644 index 0000000000..658409d448 --- /dev/null +++ b/plugins/modules/net_tools/infinity/infinity.py @@ -0,0 +1,569 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +module: infinity +short_description: Manage Infinity IPAM using Rest API +description: + - Manage Infinity IPAM using REST API. +author: + - Meirong Liu (@MeganLiu) +options: + server_ip: + description: + - Infinity server_ip with IP address. + type: str + required: true + username: + description: + - Username to access Infinity. + - The user must have REST API privileges. + type: str + required: true + password: + description: + - Infinity password. + type: str + required: true + action: + description: + - Action to perform + type: str + required: true + choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ] + network_id: + description: + - Network ID. + type: str + default: '' + ip_address: + description: + - IP Address for a reservation or a release. + type: str + default: '' + network_address: + description: + - Network address with CIDR format (e.g., 192.168.310.0). + type: str + default: '' + network_size: + description: + - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26). + type: str + default: '' + network_name: + description: + - The name of a network. + type: str + default: '' + network_location: + description: + - The parent network id for a given network. + type: int + default: -1 + network_type: + description: + - Network type defined by Infinity + type: str + choices: [ lan, shared_lan, supernet ] + default: lan + network_family: + description: + - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack + type: str + choices: [ 4, 6, dual ] + default: 4 +''' + +EXAMPLES = r''' +--- +- hosts: localhost + connection: local + strategy: debug + tasks: + - name: Reserve network into Infinity IPAM + infinity: + server_ip: 80.75.107.12 + username: username + password: password + action: reserve_network + network_name: reserve_new_ansible_network + network_family: 4 + network_type: lan + network_id: 1201 + network_size: /28 + register: infinity +''' + +RETURN = r''' +network_id: + description: id for a given network + returned: success + type: str + sample: '1501' +ip_info: + description: when reserve next available ip address from a network, the ip address info ) is returned. + returned: success + type: str + sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' +network_info: + description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. + returned: success + type: str + sample: {"network_address": "192.168.10.32/28","network_family": "4", "network_id": 3102, + "network_size": null,"description": null,"network_location": "3085", + "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, + "network_type": "lan","network_name": "'reserve_new_ansible_network'"} +''' + + +from ansible.module_utils.basic import AnsibleModule, json +from ansible.module_utils.urls import open_url + + +class Infinity(object): + """ + Class for manage REST API calls with the Infinity. + """ + + def __init__(self, module, server_ip, username, password): + self.module = module + self.auth_user = username + self.auth_pass = password + self.base_url = "https://%s/rest/v1/" % (str(server_ip)) + + def _get_api_call_ansible_handler( + self, + method='get', + resource_url='', + stat_codes=None, + params=None, + payload_data=None): + """ + Perform the HTTPS request by using ansible get/delete method + """ + stat_codes = [200] if stat_codes is None else stat_codes + request_url = str(self.base_url) + str(resource_url) + response = None + headers = {'Content-Type': 'application/json'} + if not request_url: + self.module.exit_json( + msg="When sending Rest api call , the resource URL is empty, please check.") + if payload_data and not isinstance(payload_data, str): + payload_data = json.dumps(payload_data) + response_raw = open_url( + str(request_url), + method=method, + timeout=20, + headers=headers, + url_username=self.auth_user, + url_password=self.auth_pass, + validate_certs=False, + force_basic_auth=True, + data=payload_data) + + response = response_raw.read() + payload = '' + if response_raw.code not in stat_codes: + self.module.exit_json( + changed=False, + meta=" openurl response_raw.code show error and error code is %r" % + (response_raw.code)) + else: + if isinstance(response, str) and len(response) > 0: + payload = response + elif method.lower() == 'delete' and response_raw.code == 204: + payload = 'Delete is done.' + if isinstance(payload, dict) and "text" in payload: + self.module.exit_json( + changed=False, + meta="when calling rest api, returned data is not json ") + raise Exception(payload["text"]) + return payload + + # --------------------------------------------------------------------------- + # get_network() + # --------------------------------------------------------------------------- + def get_network(self, network_id, network_name, limit=-1): + """ + Search network_name inside Infinity by using rest api + Network id or network_name needs to be provided + return the details of a given with given network_id or name + """ + if network_name is None and network_id is None: + self.module.exit_json( + msg="You must specify one of the options 'network_name' or 'network_id'.") + method = "get" + resource_url = '' + params = {} + response = None + if network_id: + resource_url = "networks/" + str(network_id) + response = self._get_api_call_ansible_handler(method, resource_url) + if network_id is None and network_name: + method = "get" + resource_url = "search" + params = {"query": json.dumps( + {"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler( + method, resource_url, payload_data=json.dumps(params)) + if response and isinstance(response, str): + response = json.loads(response) + if response and isinstance(response, list) and len( + response) > 1 and limit == 1: + response = response[0] + response = json.dumps(response) + return response + + # --------------------------------------------------------------------------- + # get_network_id() + # --------------------------------------------------------------------------- + def get_network_id(self, network_name="", network_type='lan'): + """ + query network_id from Infinity via rest api based on given network_name + """ + method = 'get' + resource_url = 'search' + response = None + if network_name is None: + self.module.exit_json( + msg="You must specify the option 'network_name'") + params = {"query": json.dumps( + {"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler( + method, resource_url, payload_data=json.dumps(params)) + network_id = "" + if response and isinstance(response, str): + response = json.loads(response) + if response and isinstance(response, list): + response = response[0] + network_id = response['id'] + return network_id + + # --------------------------------------------------------------------------- + # reserve_next_available_ip() + # --------------------------------------------------------------------------- + def reserve_next_available_ip(self, network_id=""): + """ + Reserve ip address via Infinity by using rest api + network_id: the id of the network that users would like to reserve network from + return the next available ip address from that given network + """ + method = "post" + resource_url = '' + response = None + ip_info = '' + if not network_id: + self.module.exit_json( + msg="You must specify the option 'network_id'.") + if network_id: + resource_url = "networks/" + str(network_id) + "/reserve_ip" + response = self._get_api_call_ansible_handler(method, resource_url) + if response and response.find( + "[") >= 0 and response.find("]") >= 0: + start_pos = response.find("{") + end_pos = response.find("}") + ip_info = response[start_pos: (end_pos + 1)] + return ip_info + + # ------------------------- + # release_ip() + # ------------------------- + def release_ip(self, network_id="", ip_address=""): + """ + Reserve ip address via Infinity by using rest api + """ + method = "get" + resource_url = '' + response = None + if ip_address is None or network_id is None: + self.module.exit_json( + msg="You must specify those two options: 'network_id' and 'ip_address'.") + + resource_url = "networks/" + str(network_id) + "/children" + response = self._get_api_call_ansible_handler(method, resource_url) + if not response: + self.module.exit_json( + msg="There is an error in release ip %s from network %s." % + (ip_address, network_id)) + + ip_list = json.loads(response) + ip_idlist = [] + for ip_item in ip_list: + ip_id = ip_item['id'] + ip_idlist.append(ip_id) + deleted_ip_id = '' + for ip_id in ip_idlist: + ip_response = '' + resource_url = "ip_addresses/" + str(ip_id) + ip_response = self._get_api_call_ansible_handler( + method, + resource_url, + stat_codes=[200]) + if ip_response and json.loads( + ip_response)['address'] == str(ip_address): + deleted_ip_id = ip_id + break + if deleted_ip_id: + method = 'delete' + resource_url = "ip_addresses/" + str(deleted_ip_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + else: + self.module.exit_json( + msg=" When release ip, could not find the ip address %r from the given network %r' ." % + (ip_address, network_id)) + + return response + + # ------------------- + # delete_network() + # ------------------- + def delete_network(self, network_id="", network_name=""): + """ + delete network from Infinity by using rest api + """ + method = 'delete' + resource_url = '' + response = None + if network_id is None and network_name is None: + self.module.exit_json( + msg="You must specify one of those options: 'network_id','network_name' .") + if network_id is None and network_name: + network_id = self.get_network_id(network_name=network_name) + if network_id: + resource_url = "networks/" + str(network_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + return response + + # reserve_network() + # --------------------------------------------------------------------------- + def reserve_network(self, network_id="", + reserved_network_name="", reserved_network_description="", + reserved_network_size="", reserved_network_family='4', + reserved_network_type='lan', reserved_network_address="",): + """ + Reserves the first available network of specified size from a given supernet +
network_name (required)
Name of the network
+
description (optional)
Free description
+
network_family (required)
Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'
+
network_address (optional)
Address of the new network. If not given, the first network available will be created.
+
network_size (required)
Size of the new network in /<prefix> notation.
+
network_type (required)
Type of network. One of 'supernet', 'lan', 'shared_lan'
+ + """ + method = 'post' + resource_url = '' + network_info = None + if network_id is None or reserved_network_name is None or reserved_network_size is None: + self.module.exit_json( + msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'") + if network_id: + resource_url = "networks/" + str(network_id) + "/reserve_network" + if not reserved_network_family: + reserved_network_family = '4' + if not reserved_network_type: + reserved_network_type = 'lan' + payload_data = { + "network_name": reserved_network_name, + 'description': reserved_network_description, + 'network_size': reserved_network_size, + 'network_family': reserved_network_family, + 'network_type': reserved_network_type, + 'network_location': int(network_id)} + if reserved_network_address: + payload_data.update({'network_address': reserved_network_address}) + + network_info = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[200, 201], payload_data=payload_data) + + return network_info + + # --------------------------------------------------------------------------- + # release_network() + # --------------------------------------------------------------------------- + def release_network( + self, + network_id="", + released_network_name="", + released_network_type='lan'): + """ + Release the network with name 'released_network_name' from the given supernet network_id + """ + method = 'get' + response = None + if network_id is None or released_network_name is None: + self.module.exit_json( + msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'") + matched_network_id = "" + resource_url = "networks/" + str(network_id) + "/children" + response = self._get_api_call_ansible_handler(method, resource_url) + if not response: + self.module.exit_json( + msg=" there is an error in releasing network %r from network %s." % + (network_id, released_network_name)) + if response: + response = json.loads(response) + for child_net in response: + if child_net['network'] and child_net['network']['network_name'] == released_network_name: + matched_network_id = child_net['network']['network_id'] + break + response = None + if matched_network_id: + method = 'delete' + resource_url = "networks/" + str(matched_network_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + else: + self.module.exit_json( + msg=" When release network , could not find the network %r from the given superent %r' " % + (released_network_name, network_id)) + + return response + + # --------------------------------------------------------------------------- + # add_network() + # --------------------------------------------------------------------------- + def add_network( + self, network_name="", network_address="", + network_size="", network_family='4', + network_type='lan', network_location=-1): + """ + add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet + required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ] + """ + method = 'post' + resource_url = 'networks' + response = None + if network_name is None or network_address is None or network_size is None: + self.module.exit_json( + msg="You must specify those options 'network_name', 'network_address' and 'network_size'") + + if not network_family: + network_family = '4' + if not network_type: + network_type = 'lan' + if not network_location: + network_location = -1 + payload_data = { + "network_name": network_name, + 'network_address': network_address, + 'network_size': network_size, + 'network_family': network_family, + 'network_type': network_type, + 'network_location': network_location} + response = self._get_api_call_ansible_handler( + method='post', resource_url=resource_url, + stat_codes=[200], payload_data=payload_data) + return response + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_ip=dict(type='str', required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + network_id=dict(type='str'), + ip_address=dict(type='str'), + network_name=dict(type='str'), + network_location=dict(type='int', default=-1), + network_family=dict(type='str', default='4', choices=['4', '6', 'dual']), + network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']), + network_address=dict(type='str'), + network_size=dict(type='str'), + action=dict(type='str', required=True, choices=[ + 'add_network', + 'delete_network', + 'get_network', + 'get_network_id', + 'release_ip', + 'release_network', + 'reserve_network', + 'reserve_next_available_ip', + ],), + ), + required_together=( + ['username', 'password'], + ), + ) + server_ip = module.params["server_ip"] + username = module.params["username"] + password = module.params["password"] + action = module.params["action"] + network_id = module.params["network_id"] + released_ip = module.params["ip_address"] + network_name = module.params["network_name"] + network_family = module.params["network_family"] + network_type = module.params["network_type"] + network_address = module.params["network_address"] + network_size = module.params["network_size"] + network_location = module.params["network_location"] + my_infinity = Infinity(module, server_ip, username, password) + result = '' + if action == "reserve_next_available_ip": + if network_id: + result = my_infinity.reserve_next_available_ip(network_id) + if not result: + result = 'There is an error in calling method of reserve_next_available_ip' + module.exit_json(changed=False, meta=result) + module.exit_json(changed=True, meta=result) + elif action == "release_ip": + if network_id and released_ip: + result = my_infinity.release_ip( + network_id=network_id, ip_address=released_ip) + module.exit_json(changed=True, meta=result) + elif action == "delete_network": + result = my_infinity.delete_network( + network_id=network_id, network_name=network_name) + module.exit_json(changed=True, meta=result) + + elif action == "get_network_id": + result = my_infinity.get_network_id( + network_name=network_name, network_type=network_type) + module.exit_json(changed=True, meta=result) + elif action == "get_network": + result = my_infinity.get_network( + network_id=network_id, network_name=network_name) + module.exit_json(changed=True, meta=result) + elif action == "reserve_network": + result = my_infinity.reserve_network( + network_id=network_id, + reserved_network_name=network_name, + reserved_network_size=network_size, + reserved_network_family=network_family, + reserved_network_type=network_type, + reserved_network_address=network_address) + module.exit_json(changed=True, meta=result) + elif action == "release_network": + result = my_infinity.release_network( + network_id=network_id, + released_network_name=network_name, + released_network_type=network_type) + module.exit_json(changed=True, meta=result) + + elif action == "add_network": + result = my_infinity.add_network( + network_name=network_name, + network_location=network_location, + network_address=network_address, + network_size=network_size, + network_family=network_family, + network_type=network_type) + + module.exit_json(changed=True, meta=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ip_netns.py b/plugins/modules/net_tools/ip_netns.py new file mode 100644 index 0000000000..49552c50b9 --- /dev/null +++ b/plugins/modules/net_tools/ip_netns.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# (c) 2017, Arie Bregman +# +# This file is a module for Ansible that interacts with Network Manager +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ip_netns +author: "Arie Bregman (@bregman-arie)" +short_description: Manage network namespaces +requirements: [ ip ] +description: + - Create or delete network namespaces using the ip command. +options: + name: + required: false + description: + - Name of the namespace + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the namespace should exist +''' + +EXAMPLES = ''' +# Create a namespace named mario +- name: Create a namespace named mario + namespace: + name: mario + state: present +- name: Delete a namespace named luigi + namespace: + name: luigi + state: absent +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text + + +class Namespace(object): + """Interface to network namespaces. """ + + def __init__(self, module): + self.module = module + self.name = module.params['name'] + self.state = module.params['state'] + + def _netns(self, command): + '''Run ip nents command''' + return self.module.run_command(['ip', 'netns'] + command) + + def exists(self): + '''Check if the namespace already exists''' + rc, out, err = self.module.run_command('ip netns list') + if rc != 0: + self.module.fail_json(msg=to_text(err)) + return self.name in out + + def add(self): + '''Create network namespace''' + rtc, out, err = self._netns(['add', self.name]) + + if rtc != 0: + self.module.fail_json(msg=err) + + def delete(self): + '''Delete network namespace''' + rtc, out, err = self._netns(['del', self.name]) + if rtc != 0: + self.module.fail_json(msg=err) + + def check(self): + '''Run check mode''' + changed = False + + if self.state == 'present' and self.exists(): + changed = True + + elif self.state == 'absent' and self.exists(): + changed = True + elif self.state == 'present' and not self.exists(): + changed = True + + self.module.exit_json(changed=changed) + + def run(self): + '''Make the necessary changes''' + changed = False + + if self.state == 'absent': + if self.exists(): + self.delete() + changed = True + elif self.state == 'present': + if not self.exists(): + self.add() + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + """Entry point.""" + module = AnsibleModule( + argument_spec={ + 'name': {'default': None}, + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + }, + supports_check_mode=True, + ) + + network_namespace = Namespace(module) + if module.check_mode: + network_namespace.check() + else: + network_namespace.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ipify_facts.py b/plugins/modules/net_tools/ipify_facts.py new file mode 100644 index 0000000000..2aad06fab6 --- /dev/null +++ b/plugins/modules/net_tools/ipify_facts.py @@ -0,0 +1,110 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2015, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: ipify_facts +short_description: Retrieve the public IP of your internet gateway +description: + - If behind NAT and need to know the public IP of your internet gateway. +author: +- René Moser (@resmo) +options: + api_url: + description: + - URL of the ipify.org API service. + - C(?format=json) will be appended per default. + type: str + default: https://api.ipify.org/ + timeout: + description: + - HTTP connection timeout in seconds. + type: int + default: 10 + validate_certs: + description: + - When set to C(NO), SSL certificates will not be validated. + type: bool + default: yes +notes: + - Visit https://www.ipify.org to get more information. +''' + +EXAMPLES = r''' +# Gather IP facts from ipify.org +- name: Get my public IP + ipify_facts: + +# Gather IP facts from your own ipify service endpoint with a custom timeout +- name: Get my public IP + ipify_facts: + api_url: http://api.example.com/ipify + timeout: 20 +''' + +RETURN = r''' +--- +ipify_public_ip: + description: Public IP of the internet gateway. + returned: success + type: str + sample: 1.2.3.4 +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text + + +class IpifyFacts(object): + + def __init__(self): + self.api_url = module.params.get('api_url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'ipify_public_ip': None + } + (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout) + + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout)) + + data = json.loads(to_text(response.read())) + result['ipify_public_ip'] = data.get('ip') + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_url=dict(type='str', default='https://api.ipify.org/'), + timeout=dict(type='int', default=10), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + ipify_facts = IpifyFacts().run() + ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts) + module.exit_json(**ipify_facts_result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ipinfoio_facts.py b/plugins/modules/net_tools/ipinfoio_facts.py new file mode 100644 index 0000000000..dcd55dd9ff --- /dev/null +++ b/plugins/modules/net_tools/ipinfoio_facts.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright: (c) 2016, Aleksei Kostiuk +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipinfoio_facts +short_description: "Retrieve IP geolocation facts of a host's IP address" +description: + - "Gather IP geolocation facts of a host's IP address using ipinfo.io API" +author: "Aleksei Kostiuk (@akostyuk)" +options: + timeout: + description: + - HTTP connection timeout in seconds + required: false + default: 10 + http_agent: + description: + - Set http user agent + required: false + default: "ansible-ipinfoio-module/0.0.1" +notes: + - "Check http://ipinfo.io/ for more information" +''' + +EXAMPLES = ''' +# Retrieve geolocation data of a host's IP address +- name: get IP geolocation data + ipinfoio_facts: +''' + +RETURN = ''' +ansible_facts: + description: "Dictionary of ip geolocation facts for a host's IP address" + returned: changed + type: complex + contains: + ip: + description: "Public IP address of a host" + type: str + sample: "8.8.8.8" + hostname: + description: Domain name + type: str + sample: "google-public-dns-a.google.com" + country: + description: ISO 3166-1 alpha-2 country code + type: str + sample: "US" + region: + description: State or province name + type: str + sample: "California" + city: + description: City name + type: str + sample: "Mountain View" + loc: + description: Latitude and Longitude of the location + type: str + sample: "37.3860,-122.0838" + org: + description: "organization's name" + type: str + sample: "AS3356 Level 3 Communications, Inc." + postal: + description: Postal code + type: str + sample: "94035" +''' +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.urls import fetch_url + + +USER_AGENT = 'ansible-ipinfoio-module/0.0.1' + + +class IpinfoioFacts(object): + + def __init__(self, module): + self.url = 'https://ipinfo.io/json' + self.timeout = module.params.get('timeout') + self.module = module + + def get_geo_data(self): + response, info = fetch_url(self.module, self.url, force=True, # NOQA + timeout=self.timeout) + try: + info['status'] == 200 + except AssertionError: + self.module.fail_json(msg='Could not get {0} page, ' + 'check for connectivity!'.format(self.url)) + else: + try: + content = response.read() + result = self.module.from_json(content.decode('utf8')) + except ValueError: + self.module.fail_json( + msg='Failed to parse the ipinfo.io response: ' + '{0} {1}'.format(self.url, content)) + else: + return result + + +def main(): + module = AnsibleModule( # NOQA + argument_spec=dict( + http_agent=dict(default=USER_AGENT), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + ipinfoio = IpinfoioFacts(module) + ipinfoio_result = dict( + changed=False, ansible_facts=ipinfoio.get_geo_data()) + module.exit_json(**ipinfoio_result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ipwcli_dns.py b/plugins/modules/net_tools/ipwcli_dns.py new file mode 100644 index 0000000000..626f8ea96f --- /dev/null +++ b/plugins/modules/net_tools/ipwcli_dns.py @@ -0,0 +1,362 @@ +#!/usr/bin/python + +# Copyright: (c) 2020, Christian Wollinger +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ipwcli_dns + +short_description: Manage DNS Records for Ericsson IPWorks via ipwcli + + +description: + - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records." + +requirements: + - ipwcli (installed on Ericsson IPWorks) + +notes: + - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. + +options: + dnsname: + description: + - Name of the record. + required: true + type: str + type: + description: + - Type of the record. + required: true + type: str + choices: [ NAPTR, SRV, A, AAAA ] + container: + description: + - Sets the container zone for the record. + required: true + type: str + address: + description: + - The IP address for the A or AAAA record. + - Required for C(type=A) or C(type=AAAA) + type: str + ttl: + description: + - Sets the TTL of the record. + type: int + default: 3600 + state: + description: + - Whether the record should exist or not. + type: str + choices: [ absent, present ] + default: present + priority: + description: + - Sets the priority of the SRV record. + type: int + default: 10 + weight: + description: + - Sets the weight of the SRV record. + type: int + default: 10 + port: + description: + - Sets the port of the SRV record. + - Required for C(type=SRV) + type: int + target: + description: + - Sets the target of the SRV record. + - Required for C(type=SRV) + type: str + order: + description: + - Sets the order of the NAPTR record. + - Required for C(type=NAPTR) + type: int + preference: + description: + - Sets the preference of the NAPTR record. + - Required for C(type=NAPTR) + type: int + flags: + description: + - Sets one of the possible flags of NAPTR record. + - Required for C(type=NAPTR) + type: str + choices: ['S', 'A', 'U', 'P'] + service: + description: + - Sets the service of the NAPTR record. + - Required for C(type=NAPTR) + type: str + replacement: + description: + - Sets the replacement of the NAPTR record. + - Required for C(type=NAPTR) + type: str + username: + description: + - Username to login on ipwcli. + type: str + required: true + password: + description: + - Password to login on ipwcli. + type: str + required: true + +author: + - Christian Wollinger (@cwollinger) +''' + +EXAMPLES = ''' +- name: Create A record + ipwcli_dns: + dnsname: example.com + type: A + container: ZoneOne + address: 127.0.0.1 + +- name: Remove SRV record if exists + ipwcli_dns: + dnsname: _sip._tcp.test.example.com + type: SRV + container: ZoneOne + ttl: 100 + state: absent + target: example.com + port: 5060 + +- name: Create NAPTR record + ipwcli_dns: + dnsname: test.example.com + type: NAPTR + preference: 10 + container: ZoneOne + ttl: 100 + order: 10 + service: 'SIP+D2T' + replacement: '_sip._tcp.test.example.com.' + flags: S +''' + +RETURN = ''' +record: + description: The created record from the input params + type: str + returned: always +''' + +from ansible.module_utils.basic import AnsibleModule +import os + + +class ResourceRecord(object): + + def __init__(self, module): + self.module = module + self.dnsname = module.params['dnsname'] + self.dnstype = module.params['type'] + self.container = module.params['container'] + self.address = module.params['address'] + self.ttl = module.params['ttl'] + self.state = module.params['state'] + self.priority = module.params['priority'] + self.weight = module.params['weight'] + self.port = module.params['port'] + self.target = module.params['target'] + self.order = module.params['order'] + self.preference = module.params['preference'] + self.flags = module.params['flags'] + self.service = module.params['service'] + self.replacement = module.params['replacement'] + self.user = module.params['username'] + self.password = module.params['password'] + + def create_naptrrecord(self): + # create NAPTR record with the given params + if not self.preference: + self.module.fail_json(msg='missing required arguments for NAPTR record: preference') + + if not self.order: + self.module.fail_json(msg='missing required arguments for NAPTR record: order') + + if not self.service: + self.module.fail_json(msg='missing required arguments for NAPTR record: service') + + if not self.replacement: + self.module.fail_json(msg='missing required arguments for NAPTR record: replacement') + + record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"' + % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement)) + return record + + def create_srvrecord(self): + # create SRV record with the given params + if not self.port: + self.module.fail_json(msg='missing required arguments for SRV record: port') + + if not self.target: + self.module.fail_json(msg='missing required arguments for SRV record: target') + + record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s' + % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target)) + return record + + def create_arecord(self): + # create A record with the given params + if not self.address: + self.module.fail_json(msg='missing required arguments for A record: address') + + if self.dnstype == 'AAAA': + record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) + else: + record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) + + return record + + def list_record(self, record): + # check if the record exists via list on ipwcli + search = 'list %s' % (record.replace(';', '&&').replace('set', 'where')) + cmd = [self.module.get_bin_path('ipwcli', True)] + cmd.append('-user=%s' % (self.user)) + cmd.append('-password=%s' % (self.password)) + rc, out, err = self.module.run_command(cmd, data=search) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or + ('NAPTRRecord %s' % self.dnsname in out and rc == 0)): + return True, rc, out, err + + return False, rc, out, err + + def deploy_record(self, record): + # check what happens if create fails on ipworks + stdin = 'create %s' % (record) + cmd = [self.module.get_bin_path('ipwcli', True)] + cmd.append('-user=%s' % (self.user)) + cmd.append('-password=%s' % (self.password)) + rc, out, err = self.module.run_command(cmd, data=stdin) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if '1 object(s) created.' in out: + return rc, out, err + else: + self.module.fail_json(msg='record creation failed', stderr=out) + + def delete_record(self, record): + # check what happens if create fails on ipworks + stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where')) + cmd = [self.module.get_bin_path('ipwcli', True)] + cmd.append('-user=%s' % (self.user)) + cmd.append('-password=%s' % (self.password)) + rc, out, err = self.module.run_command(cmd, data=stdin) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if '1 object(s) were updated.' in out: + return rc, out, err + else: + self.module.fail_json(msg='record deletion failed', stderr=out) + + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + dnsname=dict(type='str', required=True), + type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), + container=dict(type='str', required=True), + address=dict(type='str', required=False), + ttl=dict(type='int', required=False, default=3600), + state=dict(type='str', default='present', choices=['absent', 'present']), + priority=dict(type='int', required=False, default=10), + weight=dict(type='int', required=False, default=10), + port=dict(type='int', required=False), + target=dict(type='str', required=False), + order=dict(type='int', required=False), + preference=dict(type='int', required=False), + flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']), + service=dict(type='str', required=False), + replacement=dict(type='str', required=False), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True) + ) + + # define result + result = dict( + changed=False, + stdout='', + stderr='', + rc=0, + record='' + ) + + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + user = ResourceRecord(module) + + if user.dnstype == 'NAPTR': + record = user.create_naptrrecord() + elif user.dnstype == 'SRV': + record = user.create_srvrecord() + elif user.dnstype == 'A' or user.dnstype == 'AAAA': + record = user.create_arecord() + + found, rc, out, err = user.list_record(record) + + if found and user.state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = user.delete_record(record) + result['changed'] = True + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + elif not found and user.state == 'present': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = user.deploy_record(record) + result['changed'] = True + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + else: + result['changed'] = False + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ldap/ldap_attr.py b/plugins/modules/net_tools/ldap/ldap_attr.py new file mode 100644 index 0000000000..80dde691b8 --- /dev/null +++ b/plugins/modules/net_tools/ldap/ldap_attr.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Peter Sagerson +# Copyright: (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: ldap_attr +short_description: Add or remove LDAP attribute values +description: + - Add or remove LDAP attribute values. +notes: + - This only deals with attributes on existing entries. To add or remove + whole entries, see M(ldap_entry). + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a cn=peercred,cn=external,cn=auth ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). + - For I(state=present) and I(state=absent), all value comparisons are + performed on the server for maximum accuracy. For I(state=exact), values + have to be compared in Python, which obviously ignores LDAP matching + rules. This should work out in most cases, but it is theoretically + possible to see spurious changes when target and actual values are + semantically identical but lexically distinct. +deprecated: + removed_in: '2.14' + why: 'The current "ldap_attr" module does not support LDAP attribute insertions or deletions with objectClass dependencies.' + alternative: 'Use M(ldap_attrs) instead. Deprecated in 2.10.' +author: + - Jiri Tyr (@jtyr) +requirements: + - python-ldap +options: + name: + description: + - The name of the attribute to modify. + type: str + required: true + state: + description: + - The state of the attribute values. + - If C(present), all given values will be added if they're missing. + - If C(absent), all given values will be removed if present. + - If C(exact), the set of values will be forced to exactly those provided and no others. + - If I(state=exact) and I(value) is an empty list, all values for this attribute will be removed. + type: str + choices: [ absent, exact, present ] + default: present + values: + description: + - The value(s) to add or remove. This can be a string or a list of + strings. The complex argument format is required in order to pass + a list of strings (see examples). + type: raw + required: true + params: + description: + - Additional module parameters. + type: dict +extends_documentation_fragment: +- community.general.ldap.documentation + +''' + +EXAMPLES = r''' +- name: Configure directory number 1 for example.com + ldap_attr: + dn: olcDatabase={1}hdb,cn=config + name: olcSuffix + values: dc=example,dc=com + state: exact + +# The complex argument format is required here to pass a list of ACL strings. +- name: Set up the ACL + ldap_attr: + dn: olcDatabase={1}hdb,cn=config + name: olcAccess + values: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + state: exact + +- name: Declare some indexes + ldap_attr: + dn: olcDatabase={1}hdb,cn=config + name: olcDbIndex + values: "{{ item }}" + with_items: + - objectClass eq + - uid eq + +- name: Set up a root user, which we can use later to bootstrap the directory + ldap_attr: + dn: olcDatabase={1}hdb,cn=config + name: "{{ item.key }}" + values: "{{ item.value }}" + state: exact + with_dict: + olcRootDN: cn=root,dc=example,dc=com + olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + +- name: Get rid of an unneeded attribute + ldap_attr: + dn: uid=jdoe,ou=people,dc=example,dc=com + name: shadowExpire + values: [] + state: exact + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password + +# +# The same as in the previous example but with the authentication details +# stored in the ldap_auth variable: +# +# ldap_auth: +# server_uri: ldap://localhost/ +# bind_dn: cn=admin,dc=example,dc=com +# bind_pw: password +- name: Get rid of an unneeded attribute + ldap_attr: + dn: uid=jdoe,ou=people,dc=example,dc=com + name: shadowExpire + values: [] + state: exact + params: "{{ ldap_auth }}" +''' + +RETURN = r''' +modlist: + description: list of modified parameters + returned: success + type: list + sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapAttr(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.name = self.module.params['name'] + self.state = self.module.params['state'] + + # Normalize values + if isinstance(self.module.params['values'], list): + self.values = list(map(to_bytes, self.module.params['values'])) + else: + self.values = [to_bytes(self.module.params['values'])] + + def add(self): + values_to_add = list(filter(self._is_value_absent, self.values)) + + if len(values_to_add) > 0: + modlist = [(ldap.MOD_ADD, self.name, values_to_add)] + else: + modlist = [] + + return modlist + + def delete(self): + values_to_delete = list(filter(self._is_value_present, self.values)) + + if len(values_to_delete) > 0: + modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)] + else: + modlist = [] + + return modlist + + def exact(self): + try: + results = self.connection.search_s( + self.dn, ldap.SCOPE_BASE, attrlist=[self.name]) + except ldap.LDAPError as e: + self.fail("Cannot search for attribute %s" % self.name, e) + + current = results[0][1].get(self.name, []) + modlist = [] + + if frozenset(self.values) != frozenset(current): + if len(current) == 0: + modlist = [(ldap.MOD_ADD, self.name, self.values)] + elif len(self.values) == 0: + modlist = [(ldap.MOD_DELETE, self.name, None)] + else: + modlist = [(ldap.MOD_REPLACE, self.name, self.values)] + + return modlist + + def _is_value_present(self, value): + """ True if the target attribute has the given value. """ + try: + is_present = bool( + self.connection.compare_s(self.dn, self.name, value)) + except ldap.NO_SUCH_ATTRIBUTE: + is_present = False + + return is_present + + def _is_value_absent(self, value): + """ True if the target attribute doesn't have the given value. """ + return not self._is_value_present(value) + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + name=dict(type='str', required=True), + params=dict(type='dict'), + state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), + values=dict(type='raw', required=True), + ), + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + # Update module parameters with user's parameters if defined + if 'params' in module.params and isinstance(module.params['params'], dict): + module.params.update(module.params['params']) + # Remove the params + module.params.pop('params', None) + + # Instantiate the LdapAttr object + ldap = LdapAttr(module) + + state = module.params['state'] + + # Perform action + if state == 'present': + modlist = ldap.add() + elif state == 'absent': + modlist = ldap.delete() + elif state == 'exact': + modlist = ldap.exact() + + changed = False + + if len(modlist) > 0: + changed = True + + if not module.check_mode: + try: + ldap.connection.modify_s(ldap.dn, modlist) + except Exception as e: + module.fail_json(msg="Attribute action failed.", details=to_native(e)) + + module.exit_json(changed=changed, modlist=modlist) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ldap/ldap_attrs.py b/plugins/modules/net_tools/ldap/ldap_attrs.py new file mode 100644 index 0000000000..72077609ae --- /dev/null +++ b/plugins/modules/net_tools/ldap/ldap_attrs.py @@ -0,0 +1,324 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Maciej Delmanowski +# Copyright: (c) 2017, Alexander Korinek +# Copyright: (c) 2016, Peter Sagerson +# Copyright: (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = r''' +--- +module: ldap_attrs +short_description: Add or remove multiple LDAP attribute values +description: + - Add or remove multiple LDAP attribute values. +notes: + - This only deals with attributes on existing entries. To add or remove + whole entries, see M(ldap_entry). + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a cn=peercred,cn=external,cn=auth ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). + - For I(state=present) and I(state=absent), all value comparisons are + performed on the server for maximum accuracy. For I(state=exact), values + have to be compared in Python, which obviously ignores LDAP matching + rules. This should work out in most cases, but it is theoretically + possible to see spurious changes when target and actual values are + semantically identical but lexically distinct. +author: + - Jiri Tyr (@jtyr) + - Alexander Korinek (@noles) + - Maciej Delmanowski (@drybjed) +requirements: + - python-ldap +options: + state: + required: false + type: str + choices: [present, absent, exact] + default: present + description: + - The state of the attribute values. If C(present), all given attribute + values will be added if they're missing. If C(absent), all given + attribute values will be removed if present. If C(exact), the set of + attribute values will be forced to exactly those provided and no others. + If I(state=exact) and the attribute I(value) is empty, all values for + this attribute will be removed. + attributes: + required: true + type: dict + description: + - The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass + a list of strings (see examples). + ordered: + required: false + type: bool + default: 'no' + description: + - If C(yes), prepend list values with X-ORDERED index numbers in all + attributes specified in the current task. This is useful mostly with + I(olcAccess) attribute to easily manage LDAP Access Control Lists. +extends_documentation_fragment: +- community.general.ldap.documentation + +''' + + +EXAMPLES = r''' +- name: Configure directory number 1 for example.com + ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcSuffix: dc=example,dc=com + state: exact + +# The complex argument format is required here to pass a list of ACL strings. +- name: Set up the ACL + ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + state: exact + +# An alternative approach with automatic X-ORDERED numbering +- name: Set up the ACL + ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcAccess: + - >- + to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + ordered: yes + state: exact + +- name: Declare some indexes + ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcDbIndex: + - objectClass eq + - uid eq + +- name: Set up a root user, which we can use later to bootstrap the directory + ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcRootDN: cn=root,dc=example,dc=com + olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + state: exact + +- name: Remove an attribute with a specific value + ldap_attrs: + dn: uid=jdoe,ou=people,dc=example,dc=com + attributes: + description: "An example user account" + state: absent + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password + +- name: Remove specified attribute(s) from an entry + ldap_attrs: + dn: uid=jdoe,ou=people,dc=example,dc=com + attributes: + description: [] + state: exact + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password +''' + + +RETURN = r''' +modlist: + description: list of modified parameters + returned: success + type: list + sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs +import re + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapAttrs(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.attrs = self.module.params['attributes'] + self.state = self.module.params['state'] + self.ordered = self.module.params['ordered'] + + def _order_values(self, values): + """ Preprend X-ORDERED index numbers to attribute's values. """ + ordered_values = [] + + if isinstance(values, list): + for index, value in enumerate(values): + cleaned_value = re.sub(r'^\{\d+\}', '', value) + ordered_values.append('{' + str(index) + '}' + cleaned_value) + + return ordered_values + + def _normalize_values(self, values): + """ Normalize attribute's values. """ + norm_values = [] + + if isinstance(values, list): + if self.ordered: + norm_values = list(map(to_bytes, + self._order_values(list(map(str, + values))))) + else: + norm_values = list(map(to_bytes, values)) + else: + norm_values = [to_bytes(str(values))] + + return norm_values + + def add(self): + modlist = [] + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + for value in norm_values: + if self._is_value_absent(name, value): + modlist.append((ldap.MOD_ADD, name, value)) + + return modlist + + def delete(self): + modlist = [] + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + for value in norm_values: + if self._is_value_present(name, value): + modlist.append((ldap.MOD_DELETE, name, value)) + + return modlist + + def exact(self): + modlist = [] + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + try: + results = self.connection.search_s( + self.dn, ldap.SCOPE_BASE, attrlist=[name]) + except ldap.LDAPError as e: + self.fail("Cannot search for attribute %s" % name, e) + + current = results[0][1].get(name, []) + + if frozenset(norm_values) != frozenset(current): + if len(current) == 0: + modlist.append((ldap.MOD_ADD, name, norm_values)) + elif len(norm_values) == 0: + modlist.append((ldap.MOD_DELETE, name, None)) + else: + modlist.append((ldap.MOD_REPLACE, name, norm_values)) + + return modlist + + def _is_value_present(self, name, value): + """ True if the target attribute has the given value. """ + try: + is_present = bool( + self.connection.compare_s(self.dn, name, value)) + except ldap.NO_SUCH_ATTRIBUTE: + is_present = False + + return is_present + + def _is_value_absent(self, name, value): + """ True if the target attribute doesn't have the given value. """ + return not self._is_value_present(name, value) + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attributes=dict(type='dict', required=True), + ordered=dict(type='bool', default=False, required=False), + state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), + ), + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + # Instantiate the LdapAttr object + ldap = LdapAttrs(module) + + state = module.params['state'] + + # Perform action + if state == 'present': + modlist = ldap.add() + elif state == 'absent': + modlist = ldap.delete() + elif state == 'exact': + modlist = ldap.exact() + + changed = False + + if len(modlist) > 0: + changed = True + + if not module.check_mode: + try: + ldap.connection.modify_s(ldap.dn, modlist) + except Exception as e: + module.fail_json(msg="Attribute action failed.", details=to_native(e)) + + module.exit_json(changed=changed, modlist=modlist) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ldap/ldap_entry.py b/plugins/modules/net_tools/ldap/ldap_entry.py new file mode 100644 index 0000000000..6466a40263 --- /dev/null +++ b/plugins/modules/net_tools/ldap/ldap_entry.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Peter Sagerson +# Copyright: (c) 2016, Jiri Tyr +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: ldap_entry +short_description: Add or remove LDAP entries. +description: + - Add or remove LDAP entries. This module only asserts the existence or + non-existence of an LDAP entry, not its attributes. To assert the + attribute values of an entry, see M(ldap_attr). +notes: + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a cn=peercred,cn=external,cn=auth ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). +author: + - Jiri Tyr (@jtyr) +requirements: + - python-ldap +options: + attributes: + description: + - If I(state=present), attributes necessary to create an entry. Existing + entries are never modified. To assert specific attribute values on an + existing entry, use M(ldap_attr) module instead. + objectClass: + description: + - If I(state=present), value or list of values to use when creating + the entry. It can either be a string or an actual list of + strings. + params: + description: + - List of options which allows to overwrite any of the task or the + I(attributes) options. To remove an option, set the value of the option + to C(null). + state: + description: + - The target state of the entry. + choices: [present, absent] + default: present +extends_documentation_fragment: +- community.general.ldap.documentation + +''' + + +EXAMPLES = """ +- name: Make sure we have a parent entry for users + ldap_entry: + dn: ou=users,dc=example,dc=com + objectClass: organizationalUnit + +- name: Make sure we have an admin user + ldap_entry: + dn: cn=admin,dc=example,dc=com + objectClass: + - simpleSecurityObject + - organizationalRole + attributes: + description: An LDAP administrator + userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + +- name: Get rid of an old entry + ldap_entry: + dn: ou=stuff,dc=example,dc=com + state: absent + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password + +# +# The same as in the previous example but with the authentication details +# stored in the ldap_auth variable: +# +# ldap_auth: +# server_uri: ldap://localhost/ +# bind_dn: cn=admin,dc=example,dc=com +# bind_pw: password +- name: Get rid of an old entry + ldap_entry: + dn: ou=stuff,dc=example,dc=com + state: absent + params: "{{ ldap_auth }}" +""" + + +RETURN = """ +# Default return values +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs + +LDAP_IMP_ERR = None +try: + import ldap.modlist + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapEntry(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.state = self.module.params['state'] + + # Add the objectClass into the list of attributes + self.module.params['attributes']['objectClass'] = ( + self.module.params['objectClass']) + + # Load attributes + if self.state == 'present': + self.attrs = self._load_attrs() + + def _load_attrs(self): + """ Turn attribute's value to array. """ + attrs = {} + + for name, value in self.module.params['attributes'].items(): + if name not in attrs: + attrs[name] = [] + + if isinstance(value, list): + attrs[name] = list(map(to_bytes, value)) + else: + attrs[name].append(to_bytes(value)) + + return attrs + + def add(self): + """ If self.dn does not exist, returns a callable that will add it. """ + def _add(): + self.connection.add_s(self.dn, modlist) + + if not self._is_entry_present(): + modlist = ldap.modlist.addModlist(self.attrs) + action = _add + else: + action = None + + return action + + def delete(self): + """ If self.dn exists, returns a callable that will delete it. """ + def _delete(): + self.connection.delete_s(self.dn) + + if self._is_entry_present(): + action = _delete + else: + action = None + + return action + + def _is_entry_present(self): + try: + self.connection.search_s(self.dn, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + is_present = False + else: + is_present = True + + return is_present + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attributes=dict(default={}, type='dict'), + objectClass=dict(type='raw'), + params=dict(type='dict'), + state=dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + state = module.params['state'] + + # Check if objectClass is present when needed + if state == 'present' and module.params['objectClass'] is None: + module.fail_json(msg="At least one objectClass must be provided.") + + # Check if objectClass is of the correct type + if ( + module.params['objectClass'] is not None and not ( + isinstance(module.params['objectClass'], string_types) or + isinstance(module.params['objectClass'], list))): + module.fail_json(msg="objectClass must be either a string or a list.") + + # Update module parameters with user's parameters if defined + if 'params' in module.params and isinstance(module.params['params'], dict): + for key, val in module.params['params'].items(): + if key in module.argument_spec: + module.params[key] = val + else: + module.params['attributes'][key] = val + + # Remove the params + module.params.pop('params', None) + + # Instantiate the LdapEntry object + ldap = LdapEntry(module) + + # Get the action function + if state == 'present': + action = ldap.add() + elif state == 'absent': + action = ldap.delete() + + # Perform the action + if action is not None and not module.check_mode: + try: + action() + except Exception as e: + module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=(action is not None)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ldap/ldap_passwd.py b/plugins/modules/net_tools/ldap/ldap_passwd.py new file mode 100644 index 0000000000..8b4c49e663 --- /dev/null +++ b/plugins/modules/net_tools/ldap/ldap_passwd.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017-2018, Keller Fuchs +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: ldap_passwd +short_description: Set passwords in LDAP. +description: + - Set a password for an LDAP entry. This module only asserts that + a given password is valid for a given entry. To assert the + existence of an entry, see M(ldap_entry). +notes: + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a cn=peercred,cn=external,cn=auth ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). +author: + - Keller Fuchs (@KellerFuchs) +requirements: + - python-ldap +options: + passwd: + required: true + description: + - The (plaintext) password to be set for I(dn). +extends_documentation_fragment: +- community.general.ldap.documentation + +''' + +EXAMPLES = """ +- name: Set a password for the admin user + ldap_passwd: + dn: cn=admin,dc=example,dc=com + passwd: "{{ vault_secret }}" + +- name: Setting passwords in bulk + ldap_passwd: + dn: "{{ item.key }}" + passwd: "{{ item.value }}" + with_dict: + alice: alice123123 + bob: "|30b!" + admin: "{{ vault_secret }}" +""" + +RETURN = """ +modlist: + description: list of modified parameters + returned: success + type: list + sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapPasswd(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.passwd = self.module.params['passwd'] + + def passwd_check(self): + try: + tmp_con = ldap.initialize(self.server_uri) + except ldap.LDAPError as e: + self.fail("Cannot initialize LDAP connection", e) + + if self.start_tls: + try: + tmp_con.start_tls_s() + except ldap.LDAPError as e: + self.fail("Cannot start TLS.", e) + + try: + tmp_con.simple_bind_s(self.dn, self.passwd) + except ldap.INVALID_CREDENTIALS: + return True + except ldap.LDAPError as e: + self.fail("Cannot bind to the server.", e) + else: + return False + finally: + tmp_con.unbind() + + def passwd_set(self): + # Exit early if the password is already valid + if not self.passwd_check(): + return False + + # Change the password (or throw an exception) + try: + self.connection.passwd_s(self.dn, None, self.passwd) + except ldap.LDAPError as e: + self.fail("Unable to set password", e) + + # Password successfully changed + return True + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs(passwd=dict(no_log=True)), + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + ldap = LdapPasswd(module) + + if module.check_mode: + module.exit_json(changed=ldap.passwd_check()) + + module.exit_json(changed=ldap.passwd_set()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/lldp.py b/plugins/modules/net_tools/lldp.py new file mode 100644 index 0000000000..16141fc791 --- /dev/null +++ b/plugins/modules/net_tools/lldp.py @@ -0,0 +1,84 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: lldp +requirements: [ lldpctl ] +short_description: get details reported by lldp +description: + - Reads data out of lldpctl +options: {} +author: "Andy Hill (@andyhky)" +notes: + - Requires lldpd running and lldp enabled on switches +''' + +EXAMPLES = ''' +# Retrieve switch/port information + - name: Gather information from lldp + lldp: + + - name: Print each switch/port + debug: + msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" + with_items: "{{ lldp.keys() }}" + +# TASK: [Print each switch/port] *********************************************************** +# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} +# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} +# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} + +''' + +from ansible.module_utils.basic import AnsibleModule + + +def gather_lldp(module): + cmd = ['lldpctl', '-f', 'keyvalue'] + rc, output, err = module.run_command(cmd) + if output: + output_dict = {} + current_dict = {} + lldp_entries = output.split("\n") + + for entry in lldp_entries: + if entry.startswith('lldp'): + path, value = entry.strip().split("=", 1) + path = path.split(".") + path_components, final = path[:-1], path[-1] + else: + value = current_dict[final] + '\n' + entry + + current_dict = output_dict + for path_component in path_components: + current_dict[path_component] = current_dict.get(path_component, {}) + current_dict = current_dict[path_component] + current_dict[final] = value + return output_dict + + +def main(): + module = AnsibleModule({}) + + lldp_output = gather_lldp(module) + try: + data = {'lldp': lldp_output['lldp']} + module.exit_json(ansible_facts=data) + except TypeError: + module.fail_json(msg="lldpctl command failed. is lldpd running?") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/netcup_dns.py b/plugins/modules/net_tools/netcup_dns.py new file mode 100644 index 0000000000..4f057c7571 --- /dev/null +++ b/plugins/modules/net_tools/netcup_dns.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018 Nicolai Buchwitz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: netcup_dns +notes: [] +short_description: manage Netcup DNS records +description: + - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)" +options: + api_key: + description: + - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net)) + required: True + api_password: + description: + - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net) + required: True + customer_id: + description: + - Netcup customer id + required: True + domain: + description: + - Domainname the records should be added / removed + required: True + record: + description: + - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name) + default: "@" + aliases: [ name ] + type: + description: + - Record type + choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS'] + required: True + value: + description: + - Record value + required: true + solo: + type: bool + default: False + description: + - Whether the record should be the only one for that record type and record name. Only use with C(state=present) + - This will delete all other records with the same record name and type. + priority: + description: + - Record priority. Required for C(type=MX) + required: False + state: + description: + - Whether the record should exist or not + required: False + default: present + choices: [ 'present', 'absent' ] +requirements: + - "nc-dnsapi >= 0.1.3" +author: "Nicolai Buchwitz (@nbuchwitz)" + +''' + +EXAMPLES = ''' +- name: Create a record of type A + netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + +- name: Delete that record + netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + state: absent + +- name: Create a wildcard record + netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "*" + type: "A" + value: "127.0.1.1" + +- name: Set the MX record for example.com + netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + type: "MX" + value: "mail.example.com" + +- name: Set a record and ensure that this is the only one + netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + name: "demo" + domain: "example.com" + type: "AAAA" + value: "::1" + solo: true +''' + +RETURN = ''' +records: + description: list containing all records + returned: success + type: complex + contains: + name: + description: the record name + returned: success + type: str + sample: fancy-hostname + type: + description: the record type + returned: succcess + type: str + sample: A + value: + description: the record destination + returned: success + type: str + sample: 127.0.0.1 + priority: + description: the record priority (only relevant if type=MX) + returned: success + type: int + sample: 0 + id: + description: internal id of the record + returned: success + type: int + sample: 12345 +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +NCDNSAPI_IMP_ERR = None +try: + import nc_dnsapi + from nc_dnsapi import DNSRecord + + HAS_NCDNSAPI = True +except ImportError: + NCDNSAPI_IMP_ERR = traceback.format_exc() + HAS_NCDNSAPI = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_password=dict(required=True, no_log=True), + customer_id=dict(required=True, type='int'), + + domain=dict(required=True), + record=dict(required=False, default='@', aliases=['name']), + type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']), + value=dict(required=True), + priority=dict(required=False, type='int'), + solo=dict(required=False, type='bool', default=False), + state=dict(required=False, choices=['present', 'absent'], default='present'), + + ), + supports_check_mode=True + ) + + if not HAS_NCDNSAPI: + module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR) + + api_key = module.params.get('api_key') + api_password = module.params.get('api_password') + customer_id = module.params.get('customer_id') + domain = module.params.get('domain') + record_type = module.params.get('type') + record = module.params.get('record') + value = module.params.get('value') + priority = module.params.get('priority') + solo = module.params.get('solo') + state = module.params.get('state') + + if record_type == 'MX' and not priority: + module.fail_json(msg="record type MX required the 'priority' argument") + + has_changed = False + all_records = [] + try: + with nc_dnsapi.Client(customer_id, api_key, api_password) as api: + all_records = api.dns_records(domain) + record = DNSRecord(record, record_type, value, priority=priority) + + # try to get existing record + record_exists = False + for r in all_records: + if r == record: + record_exists = True + record = r + + break + + if state == 'present': + if solo: + obsolete_records = [r for r in all_records if + r.hostname == record.hostname + and r.type == record.type + and not r.destination == record.destination] + + if obsolete_records: + if not module.check_mode: + all_records = api.delete_dns_records(domain, obsolete_records) + + has_changed = True + + if not record_exists: + if not module.check_mode: + all_records = api.add_dns_record(domain, record) + + has_changed = True + elif state == 'absent' and record_exists: + if not module.check_mode: + all_records = api.delete_dns_record(domain, record) + + has_changed = True + + except Exception as ex: + module.fail_json(msg=ex.message) + + module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]}) + + +def record_data(r): + return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id} + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_a_record.py b/plugins/modules/net_tools/nios/nios_a_record.py new file mode 100644 index 0000000000..b76d95434b --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_a_record.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_a_record +author: "Blair Rampling (@brampling)" +short_description: Configure Infoblox NIOS A records +description: + - Adds and/or removes instances of A record objects from + Infoblox NIOS servers. This module manages NIOS C(record:a) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system + required: true + view: + description: + - Sets the DNS view to associate this A record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + ipv4addr: + description: + - Configures the IPv4 address for this A record. Users can dynamically + allocate ipv4 address to A record by passing dictionary containing, + I(nios_next_ip) and I(CIDR network range). See example + required: true + aliases: + - ipv4 + ttl: + description: + - Configures the TTL to be associated with this A record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure an A record + nios_a_record: + name: a.ansible.com + ipv4: 192.168.10.1 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: add a comment to an existing A record + nios_a_record: + name: a.ansible.com + ipv4: 192.168.10.1 + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: remove an A record from the system + nios_a_record: + name: a.ansible.com + ipv4: 192.168.10.1 + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: update an A record name + nios_a_record: + name: {new_name: a_new.ansible.com, old_name: a.ansible.com} + ipv4: 192.168.10.1 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: dynamically add a record to next available ip + nios_a_record: + name: a.ansible.com + ipv4: {nios_next_ip: 192.168.10.0/24} + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_A_RECORD + + +def main(): + ''' Main entry point for module execution + ''' + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + ipv4addr=dict(aliases=['ipv4'], ib_req=True), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_A_RECORD, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_aaaa_record.py b/plugins/modules/net_tools/nios/nios_aaaa_record.py new file mode 100644 index 0000000000..26ef492414 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_aaaa_record.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_aaaa_record +author: "Blair Rampling (@brampling)" +short_description: Configure Infoblox NIOS AAAA records +description: + - Adds and/or removes instances of AAAA record objects from + Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system + required: true + view: + description: + - Sets the DNS view to associate this AAAA record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + ipv6addr: + description: + - Configures the IPv6 address for this AAAA record. + required: true + aliases: + - ipv6 + ttl: + description: + - Configures the TTL to be associated with this AAAA record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure an AAAA record + nios_aaaa_record: + name: aaaa.ansible.com + ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: add a comment to an existing AAAA record + nios_aaaa_record: + name: aaaa.ansible.com + ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: remove an AAAA record from the system + nios_aaaa_record: + name: aaaa.ansible.com + ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: update an AAAA record name + nios_aaaa_record: + name: {new_name: aaaa_new.ansible.com, old_name: aaaa.ansible.com} + ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_AAAA_RECORD + + +def main(): + ''' Main entry point for module execution + ''' + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + ipv6addr=dict(aliases=['ipv6'], ib_req=True), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_AAAA_RECORD, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_cname_record.py b/plugins/modules/net_tools/nios/nios_cname_record.py new file mode 100644 index 0000000000..adbf81a027 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_cname_record.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_cname_record +author: "Blair Rampling (@brampling)" +short_description: Configure Infoblox NIOS CNAME records +description: + - Adds and/or removes instances of CNAME record objects from + Infoblox NIOS servers. This module manages NIOS C(record:cname) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system + required: true + view: + description: + - Sets the DNS view to associate this CNAME record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + canonical: + description: + - Configures the canonical name for this CNAME record. + required: true + aliases: + - cname + ttl: + description: + - Configures the TTL to be associated with this CNAME record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure a CNAME record + nios_cname_record: + name: cname.ansible.com + canonical: realhost.ansible.com + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: add a comment to an existing CNAME record + nios_cname_record: + name: cname.ansible.com + canonical: realhost.ansible.com + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: remove a CNAME record from the system + nios_cname_record: + name: cname.ansible.com + canonical: realhost.ansible.com + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_CNAME_RECORD + + +def main(): + ''' Main entry point for module execution + ''' + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + canonical=dict(aliases=['cname'], ib_req=True), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_CNAME_RECORD, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_dns_view.py b/plugins/modules/net_tools/nios/nios_dns_view.py new file mode 100644 index 0000000000..e569f7df40 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_dns_view.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_dns_view +author: "Peter Sprygada (@privateip)" +short_description: Configure Infoblox NIOS DNS views +description: + - Adds and/or removes instances of DNS view objects from + Infoblox NIOS servers. This module manages NIOS C(view) objects + using the Infoblox WAPI interface over REST. + - Updates instances of DNS view object from Infoblox NIOS servers. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system. User can also update the hostname as it is possible + to pass a dict containing I(new_name), I(old_name). See examples. + required: true + aliases: + - view + network_view: + description: + - Specifies the name of the network view to assign the configured + DNS view to. The network view must already be configured on the + target system. + required: true + default: default + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + required: false + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + required: false + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + required: false + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure a new dns view instance + nios_dns_view: + name: ansible-dns + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: update the comment for dns view + nios_dns_view: + name: ansible-dns + comment: this is an example comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove the dns view instance + nios_dns_view: + name: ansible-dns + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: update the dns view instance + nios_dns_view: + name: {new_name: ansible-dns-new, old_name: ansible-dns} + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_DNS_VIEW + + +def main(): + ''' Main entry point for module execution + ''' + ib_spec = dict( + name=dict(required=True, aliases=['view'], ib_req=True), + network_view=dict(default='default', ib_req=True), + + extattrs=dict(type='dict'), + comment=dict() + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_DNS_VIEW, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_fixed_address.py b/plugins/modules/net_tools/nios/nios_fixed_address.py new file mode 100644 index 0000000000..255a18b8cf --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_fixed_address.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_fixed_address +author: "Sumit Jaiswal (@sjaiswal)" +short_description: Configure Infoblox NIOS DHCP Fixed Address +description: + - A fixed address is a specific IP address that a DHCP server + always assigns when a lease request comes from a particular + MAC address of the client. + - Supports both IPV4 and IPV6 internet protocols +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the hostname with which fixed DHCP ip-address is stored + for respective mac. + required: false + ipaddr: + description: + - IPV4/V6 address of the fixed address. + required: true + mac: + description: + - The MAC address of the interface. + required: true + network: + description: + - Specifies the network range in which ipaddr exists. + aliases: + - network + network_view: + description: + - Configures the name of the network view to associate with this + configured instance. + required: false + default: default + options: + description: + - Configures the set of DHCP options to be included as part of + the configured network instance. This argument accepts a list + of values (see suboptions). When configuring suboptions at + least one of C(name) or C(num) must be specified. + suboptions: + name: + description: + - The name of the DHCP option to configure + num: + description: + - The number of the DHCP option to configure + value: + description: + - The value of the DHCP option specified by C(name) + required: true + use_option: + description: + - Only applies to a subset of options (see NIOS API documentation) + type: bool + default: 'yes' + vendor_class: + description: + - The name of the space this DHCP option is associated to + default: DHCP + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure ipv4 dhcp fixed address + nios_fixed_address: + name: ipv4_fixed + ipaddr: 192.168.10.1 + mac: 08:6d:41:e8:fd:e8 + network: 192.168.10.0/24 + network_view: default + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: configure a ipv6 dhcp fixed address + nios_fixed_address: + name: ipv6_fixed + ipaddr: fe80::1/10 + mac: 08:6d:41:e8:fd:e8 + network: fe80::/64 + network_view: default + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: set dhcp options for a ipv4 fixed address + nios_fixed_address: + name: ipv4_fixed + ipaddr: 192.168.10.1 + mac: 08:6d:41:e8:fd:e8 + network: 192.168.10.0/24 + network_view: default + comment: this is a test comment + options: + - name: domain-name + value: ansible.com + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove a ipv4 dhcp fixed address + nios_fixed_address: + name: ipv4_fixed + ipaddr: 192.168.10.1 + mac: 08:6d:41:e8:fd:e8 + network: 192.168.10.0/24 + network_view: default + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_FIXED_ADDRESS, NIOS_IPV6_FIXED_ADDRESS + + +def options(module): + ''' Transforms the module argument into a valid WAPI struct + This function will transform the options argument into a structure that + is a valid WAPI structure in the format of: + { + name: , + num: , + value: , + use_option: , + vendor_class: + } + It will remove any options that are set to None since WAPI will error on + that condition. The use_option field only applies + to special options that are displayed separately from other options and + have a use flag. This function removes the use_option flag from all + other options. It will also verify that either `name` or `num` is + set in the structure but does not validate the values are equal. + The remainder of the value validation is performed by WAPI + ''' + special_options = ['routers', 'router-templates', 'domain-name-servers', + 'domain-name', 'broadcast-address', 'broadcast-address-offset', + 'dhcp-lease-time', 'dhcp6.name-servers'] + options = list() + for item in module.params['options']: + opt = dict([(k, v) for k, v in iteritems(item) if v is not None]) + if 'name' not in opt and 'num' not in opt: + module.fail_json(msg='one of `name` or `num` is required for option value') + if opt['name'] not in special_options: + del opt['use_option'] + options.append(opt) + return options + + +def validate_ip_addr_type(ip, arg_spec, module): + '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox network type + ''' + check_ip = ip.split('/') + + if validate_ip_address(check_ip[0]) and 'ipaddr' in arg_spec: + arg_spec['ipv4addr'] = arg_spec.pop('ipaddr') + module.params['ipv4addr'] = module.params.pop('ipaddr') + return NIOS_IPV4_FIXED_ADDRESS, arg_spec, module + elif validate_ip_v6_address(check_ip[0]) and 'ipaddr' in arg_spec: + arg_spec['ipv6addr'] = arg_spec.pop('ipaddr') + module.params['ipv6addr'] = module.params.pop('ipaddr') + return NIOS_IPV6_FIXED_ADDRESS, arg_spec, module + + +def main(): + ''' Main entry point for module execution + ''' + option_spec = dict( + # one of name or num is required; enforced by the function options() + name=dict(), + num=dict(type='int'), + + value=dict(required=True), + + use_option=dict(type='bool', default=True), + vendor_class=dict(default='DHCP') + ) + + ib_spec = dict( + name=dict(required=True), + ipaddr=dict(required=True, aliases=['ipaddr'], ib_req=True), + mac=dict(required=True, aliases=['mac'], ib_req=True), + network=dict(required=True, aliases=['network']), + network_view=dict(default='default', aliases=['network_view']), + + options=dict(type='list', elements='dict', options=option_spec, transform=options), + + extattrs=dict(type='dict'), + comment=dict() + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + # to get the argument ipaddr + obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')]) + # to modify argument based on ipaddr type i.e. IPV4/IPV6 + fixed_address_ip_type, ib_spec, module = validate_ip_addr_type(obj_filter['ipaddr'], ib_spec, module) + + wapi = WapiModule(module) + + result = wapi.run(fixed_address_ip_type, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_host_record.py b/plugins/modules/net_tools/nios/nios_host_record.py new file mode 100644 index 0000000000..5e16b74f18 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_host_record.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_host_record +author: "Peter Sprygada (@privateip)" +short_description: Configure Infoblox NIOS host records +description: + - Adds and/or removes instances of host record objects from + Infoblox NIOS servers. This module manages NIOS C(record:host) objects + using the Infoblox WAPI interface over REST. + - Updates instances of host record object from Infoblox NIOS servers. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system. User can also update the hostname as it is possible + to pass a dict containing I(new_name), I(old_name). See examples. + required: true + view: + description: + - Sets the DNS view to associate this host record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + configure_for_dns: + description: + - Sets the DNS to particular parent. If user needs to bypass DNS + user can make the value to false. + type: bool + required: false + default: true + aliases: + - dns + ipv4addrs: + description: + - Configures the IPv4 addresses for this host record. This argument + accepts a list of values (see suboptions) + aliases: + - ipv4 + suboptions: + ipv4addr: + description: + - Configures the IPv4 address for the host record. Users can dynamically + allocate ipv4 address to host record by passing dictionary containing, + I(nios_next_ip) and I(CIDR network range). If user wants to add or + remove the ipv4 address from existing record, I(add/remove) + params need to be used. See examples + required: true + aliases: + - address + configure_for_dhcp: + description: + - Configure the host_record over DHCP instead of DNS, if user + changes it to true, user need to mention MAC address to configure + required: false + aliases: + - dhcp + mac: + description: + - Configures the hardware MAC address for the host record. If user makes + DHCP to true, user need to mention MAC address. + required: false + aliases: + - mac + add: + description: + - If user wants to add the ipv4 address to an existing host record. + Note that with I(add) user will have to keep the I(state) as I(present), + as new IP address is allocated to existing host record. See examples. + type: bool + required: false + aliases: + - add + remove: + description: + - If user wants to remove the ipv4 address from an existing host record. + Note that with I(remove) user will have to change the I(state) to I(absent), + as IP address is de-allocated from an existing host record. See examples. + type: bool + required: false + aliases: + - remove + ipv6addrs: + description: + - Configures the IPv6 addresses for the host record. This argument + accepts a list of values (see options) + aliases: + - ipv6 + suboptions: + ipv6addr: + description: + - Configures the IPv6 address for the host record + required: true + aliases: + - address + configure_for_dhcp: + description: + - Configure the host_record over DHCP instead of DNS, if user + changes it to true, user need to mention MAC address to configure + required: false + aliases: + - dhcp + aliases: + description: + - Configures an optional list of additional aliases to add to the host + record. These are equivalent to CNAMEs but held within a host + record. Must be in list format. + ttl: + description: + - Configures the TTL to be associated with this host record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure an ipv4 host record + nios_host_record: + name: host.ansible.com + ipv4: + - address: 192.168.10.1 + aliases: + - cname.ansible.com + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: add a comment to an existing host record + nios_host_record: + name: host.ansible.com + ipv4: + - address: 192.168.10.1 + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove a host record from the system + nios_host_record: + name: host.ansible.com + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: update an ipv4 host record + nios_host_record: + name: {new_name: host-new.ansible.com, old_name: host.ansible.com} + ipv4: + - address: 192.168.10.1 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: create an ipv4 host record bypassing DNS + nios_host_record: + name: new_host + ipv4: + - address: 192.168.10.1 + dns: false + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: create an ipv4 host record over DHCP + nios_host_record: + name: host.ansible.com + ipv4: + - address: 192.168.10.1 + dhcp: true + mac: 00-80-C8-E3-4C-BD + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: dynamically add host record to next available ip + nios_host_record: + name: host.ansible.com + ipv4: + - address: {nios_next_ip: 192.168.10.0/24} + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: add ip to host record + nios_host_record: + name: host.ansible.com + ipv4: + - address: 192.168.10.2 + add: true + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove ip to host record + nios_host_record: + name: host.ansible.com + ipv4: + - address: 192.168.10.1 + remove: true + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_HOST_RECORD + + +def ipaddr(module, key, filtered_keys=None): + ''' Transforms the input value into a struct supported by WAPI + This function will transform the input from the playbook into a struct + that is valid for WAPI in the form of: + { + ipv4addr: , + mac: + } + This function does not validate the values are properly formatted or in + the acceptable range, that is left to WAPI. + ''' + filtered_keys = filtered_keys or list() + objects = list() + for item in module.params[key]: + objects.append(dict([(k, v) for k, v in iteritems(item) if v is not None and k not in filtered_keys])) + return objects + + +def ipv4addrs(module): + return ipaddr(module, 'ipv4addrs', filtered_keys=['address', 'dhcp']) + + +def ipv6addrs(module): + return ipaddr(module, 'ipv6addrs', filtered_keys=['address', 'dhcp']) + + +def main(): + ''' Main entry point for module execution + ''' + ipv4addr_spec = dict( + ipv4addr=dict(required=True, aliases=['address'], ib_req=True), + configure_for_dhcp=dict(type='bool', required=False, aliases=['dhcp'], ib_req=True), + mac=dict(required=False, aliases=['mac'], ib_req=True), + add=dict(type='bool', aliases=['add'], required=False), + remove=dict(type='bool', aliases=['remove'], required=False) + ) + + ipv6addr_spec = dict( + ipv6addr=dict(required=True, aliases=['address'], ib_req=True), + configure_for_dhcp=dict(type='bool', required=False, aliases=['configure_for_dhcp'], ib_req=True), + mac=dict(required=False, aliases=['mac'], ib_req=True) + ) + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + ipv4addrs=dict(type='list', aliases=['ipv4'], elements='dict', options=ipv4addr_spec, transform=ipv4addrs), + ipv6addrs=dict(type='list', aliases=['ipv6'], elements='dict', options=ipv6addr_spec, transform=ipv6addrs), + configure_for_dns=dict(type='bool', default=True, required=False, aliases=['dns'], ib_req=True), + aliases=dict(type='list'), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_HOST_RECORD, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_member.py b/plugins/modules/net_tools/nios/nios_member.py new file mode 100644 index 0000000000..ae5a95bfeb --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_member.py @@ -0,0 +1,493 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +--- +module: nios_member +author: "Krishna Vasudevan (@krisvasudevan)" +short_description: Configure Infoblox NIOS members +description: + - Adds and/or removes Infoblox NIOS servers. This module manages NIOS C(member) objects using the Infoblox WAPI interface over REST. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + host_name: + description: + - Specifies the host name of the member to either add or remove from + the NIOS instance. + required: true + aliases: + - name + vip_setting: + description: + - Configures the network settings for the grid member. + required: true + suboptions: + address: + description: + - The IPv4 Address of the Grid Member + subnet_mask: + description: + - The subnet mask for the Grid Member + gateway: + description: + - The default gateway for the Grid Member + ipv6_setting: + description: + - Configures the IPv6 settings for the grid member. + required: true + suboptions: + virtual_ip: + description: + - The IPv6 Address of the Grid Member + cidr_prefix: + description: + - The IPv6 CIDR prefix for the Grid Member + gateway: + description: + - The gateway address for the Grid Member + config_addr_type: + description: + - Address configuration type (IPV4/IPV6/BOTH) + default: IPV4 + comment: + description: + - A descriptive comment of the Grid member. + extattrs: + description: + - Extensible attributes associated with the object. + enable_ha: + description: + - If set to True, the member has two physical nodes (HA pair). + type: bool + router_id: + description: + - Virtual router identifier. Provide this ID if "ha_enabled" is set to "true". This is a unique VRID number (from 1 to 255) for the local subnet. + lan2_enabled: + description: + - When set to "true", the LAN2 port is enabled as an independent port or as a port for failover purposes. + type: bool + lan2_port_setting: + description: + - Settings for the Grid member LAN2 port if 'lan2_enabled' is set to "true". + suboptions: + enabled: + description: + - If set to True, then it has its own IP settings. + type: bool + network_setting: + description: + - If the 'enable' field is set to True, this defines IPv4 network settings for LAN2. + suboptions: + address: + description: + - The IPv4 Address of LAN2 + subnet_mask: + description: + - The subnet mask of LAN2 + gateway: + description: + - The default gateway of LAN2 + v6_network_setting: + description: + - If the 'enable' field is set to True, this defines IPv6 network settings for LAN2. + suboptions: + virtual_ip: + description: + - The IPv6 Address of LAN2 + cidr_prefix: + description: + - The IPv6 CIDR prefix of LAN2 + gateway: + description: + - The gateway address of LAN2 + platform: + description: + - Configures the Hardware Platform. + default: INFOBLOX + node_info: + description: + - Configures the node information list with detailed status report on the operations of the Grid Member. + suboptions: + lan2_physical_setting: + description: + - Physical port settings for the LAN2 interface. + suboptions: + auto_port_setting_enabled: + description: + - Enable or disalbe the auto port setting. + type: bool + duplex: + description: + - The port duplex; if speed is 1000, duplex must be FULL. + speed: + description: + - The port speed; if speed is 1000, duplex is FULL. + lan_ha_port_setting: + description: + - LAN/HA port settings for the node. + suboptions: + ha_ip_address: + description: + - HA IP address. + ha_port_setting: + description: + - Physical port settings for the HA interface. + suboptions: + auto_port_setting_enabled: + description: + - Enable or disalbe the auto port setting. + type: bool + duplex: + description: + - The port duplex; if speed is 1000, duplex must be FULL. + speed: + description: + - The port speed; if speed is 1000, duplex is FULL. + lan_port_setting: + description: + - Physical port settings for the LAN interface. + suboptions: + auto_port_setting_enabled: + description: + - Enable or disalbe the auto port setting. + type: bool + duplex: + description: + - The port duplex; if speed is 1000, duplex must be FULL. + speed: + description: + - The port speed; if speed is 1000, duplex is FULL. + mgmt_ipv6addr: + description: + - Public IPv6 address for the LAN1 interface. + mgmt_lan: + description: + - Public IPv4 address for the LAN1 interface. + mgmt_network_setting: + description: + - Network settings for the MGMT port of the node. + suboptions: + address: + description: + - The IPv4 Address of MGMT + subnet_mask: + description: + - The subnet mask of MGMT + gateway: + description: + - The default gateway of MGMT + v6_mgmt_network_setting: + description: + - The network settings for the IPv6 MGMT port of the node. + suboptions: + virtual_ip: + description: + - The IPv6 Address of MGMT + cidr_prefix: + description: + - The IPv6 CIDR prefix of MGMT + gateway: + description: + - The gateway address of MGMT + mgmt_port_setting: + description: + - Settings for the member MGMT port. + suboptions: + enabled: + description: + - Determines if MGMT port settings should be enabled. + type: bool + security_access_enabled: + description: + - Determines if security access on the MGMT port is enabled or not. + type: bool + vpn_enabled: + description: + - Determines if VPN on the MGMT port is enabled or not. + type: bool + upgrade_group: + description: + - The name of the upgrade group to which this Grid member belongs. + default: Default + use_syslog_proxy_setting: + description: + - Use flag for external_syslog_server_enable , syslog_servers, syslog_proxy_setting, syslog_size + type: bool + external_syslog_server_enable: + description: + - Determines if external syslog servers should be enabled + type: bool + syslog_servers: + description: + - The list of external syslog servers. + suboptions: + address: + description: + - The server address. + category_list: + description: + - The list of all syslog logging categories. + connection_type: + description: + - The connection type for communicating with this server.(STCP/TCP?UDP) + default: UDP + local_interface: + description: + - The local interface through which the appliance sends syslog messages to the syslog server.(ANY/LAN/MGMT) + default: ANY + message_node_id: + description: + - Identify the node in the syslog message. (HOSTNAME/IP_HOSTNAME/LAN/MGMT) + default: LAN + message_source: + description: + - The source of syslog messages to be sent to the external syslog server. + default: ANY + only_category_list: + description: + - The list of selected syslog logging categories. The appliance forwards syslog messages that belong to the selected categories. + type: bool + port: + description: + - The port this server listens on. + default: 514 + severity: + description: + - The severity filter. The appliance sends log messages of the specified severity and above to the external syslog server. + default: DEBUG + pre_provisioning: + description: + - Pre-provisioning information. + suboptions: + hardware_info: + description: + - An array of structures that describe the hardware being pre-provisioned. + suboptions: + hwmodel: + description: + - Hardware model + hwtype: + description: + - Hardware type. + licenses: + description: + - An array of license types. + create_token: + description: + - Flag for initiating a create token request for pre-provisioned members. + type: bool + default: False + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: add a member to the grid with IPv4 address + nios_member: + host_name: member01.localdomain + vip_setting: + - address: 192.168.1.100 + subnet_mask: 255.255.255.0 + gateway: 192.168.1.1 + config_addr_type: IPV4 + platform: VNIOS + comment: "Created by Ansible" + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: add a HA member to the grid + nios_member: + host_name: memberha.localdomain + vip_setting: + - address: 192.168.1.100 + subnet_mask: 255.255.255.0 + gateway: 192.168.1.1 + config_addr_type: IPV4 + platform: VNIOS + enable_ha: true + router_id: 150 + node_info: + - lan_ha_port_setting: + - ha_ip_address: 192.168.1.70 + mgmt_lan: 192.168.1.80 + - lan_ha_port_setting: + - ha_ip_address: 192.168.1.71 + mgmt_lan: 192.168.1.81 + comment: "Created by Ansible" + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: update the member with pre-provisioning details specified + nios_member: + name: member01.localdomain + pre_provisioning: + - hardware_info: + - hwmodel: IB-VM-820 + hwtype: IB-VNIOS + licenses: + - dns + - dhcp + - enterprise + - vnios + comment: "Updated by Ansible" + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove the member + nios_member: + name: member01.localdomain + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MEMBER + + +def main(): + ''' Main entry point for module execution + ''' + ipv4_spec = dict( + address=dict(), + subnet_mask=dict(), + gateway=dict(), + ) + + ipv6_spec = dict( + virtual_ip=dict(), + cidr_prefix=dict(type='int'), + gateway=dict(), + ) + + port_spec = dict( + auto_port_setting_enabled=dict(type='bool'), + duplex=dict(), + speed=dict(), + ) + + lan2_port_spec = dict( + enabled=dict(type='bool'), + network_setting=dict(type='list', elements='dict', options=ipv4_spec), + v6_network_setting=dict(type='list', elements='dict', options=ipv6_spec), + ) + + ha_port_spec = dict( + ha_ip_address=dict(), + ha_port_setting=dict(type='list', elements='dict', options=port_spec), + lan_port_setting=dict(type='list', elements='dict', options=port_spec), + mgmt_lan=dict(), + mgmt_ipv6addr=dict(), + ) + + node_spec = dict( + lan2_physical_setting=dict(type='list', elements='dict', options=port_spec), + lan_ha_port_setting=dict(type='list', elements='dict', options=ha_port_spec), + mgmt_network_setting=dict(type='list', elements='dict', options=ipv4_spec), + v6_mgmt_network_setting=dict(type='list', elements='dict', options=ipv6_spec), + ) + + mgmt_port_spec = dict( + enabled=dict(type='bool'), + security_access_enabled=dict(type='bool'), + vpn_enabled=dict(type='bool'), + ) + + syslog_spec = dict( + address=dict(), + category_list=dict(type='list'), + connection_type=dict(default='UDP'), + local_interface=dict(default='ANY'), + message_node_id=dict(default='LAN'), + message_source=dict(default='ANY'), + only_category_list=dict(type='bool'), + port=dict(type='int', default=514), + severity=dict(default='DEBUG'), + ) + + hw_spec = dict( + hwmodel=dict(), + hwtype=dict(), + ) + + pre_prov_spec = dict( + hardware_info=dict(type='list', elements='dict', options=hw_spec), + licenses=dict(type='list'), + ) + + ib_spec = dict( + host_name=dict(required=True, aliases=['name'], ib_req=True), + vip_setting=dict(type='list', elements='dict', options=ipv4_spec), + ipv6_setting=dict(type='list', elements='dict', options=ipv6_spec), + config_addr_type=dict(default='IPV4'), + comment=dict(), + enable_ha=dict(type='bool', default=False), + router_id=dict(type='int'), + lan2_enabled=dict(type='bool', default=False), + lan2_port_setting=dict(type='list', elements='dict', options=lan2_port_spec), + platform=dict(default='INFOBLOX'), + node_info=dict(type='list', elements='dict', options=node_spec), + mgmt_port_setting=dict(type='list', elements='dict', options=mgmt_port_spec), + upgrade_group=dict(default='Default'), + use_syslog_proxy_setting=dict(type='bool'), + external_syslog_server_enable=dict(type='bool'), + syslog_servers=dict(type='list', elements='dict', options=syslog_spec), + pre_provisioning=dict(type='list', elements='dict', options=pre_prov_spec), + extattrs=dict(type='dict'), + create_token=dict(type='bool', default=False), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_MEMBER, ib_spec) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_mx_record.py b/plugins/modules/net_tools/nios/nios_mx_record.py new file mode 100644 index 0000000000..2813b709f7 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_mx_record.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_mx_record +author: "Blair Rampling (@brampling)" +short_description: Configure Infoblox NIOS MX records +description: + - Adds and/or removes instances of MX record objects from + Infoblox NIOS servers. This module manages NIOS C(record:mx) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system + required: true + view: + description: + - Sets the DNS view to associate this a record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + mail_exchanger: + description: + - Configures the mail exchanger FQDN for this MX record. + required: true + aliases: + - mx + preference: + description: + - Configures the preference (0-65535) for this MX record. + required: true + ttl: + description: + - Configures the TTL to be associated with this host record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure an MX record + nios_mx_record: + name: ansible.com + mx: mailhost.ansible.com + preference: 0 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: add a comment to an existing MX record + nios_mx_record: + name: ansible.com + mx: mailhost.ansible.com + preference: 0 + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: remove an MX record from the system + nios_mx_record: + name: ansible.com + mx: mailhost.ansible.com + preference: 0 + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MX_RECORD + + +def main(): + ''' Main entry point for module execution + ''' + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + mail_exchanger=dict(aliases=['mx'], ib_req=True), + preference=dict(type='int', ib_req=True), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_MX_RECORD, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_naptr_record.py b/plugins/modules/net_tools/nios/nios_naptr_record.py new file mode 100644 index 0000000000..b4808f1c87 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_naptr_record.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_naptr_record +author: "Blair Rampling (@brampling)" +short_description: Configure Infoblox NIOS NAPTR records +description: + - Adds and/or removes instances of NAPTR record objects from + Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox_client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system + required: true + view: + description: + - Sets the DNS view to associate this a record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + order: + description: + - Configures the order (0-65535) for this NAPTR record. This parameter + specifies the order in which the NAPTR rules are applied when + multiple rules are present. + required: true + preference: + description: + - Configures the preference (0-65535) for this NAPTR record. The + preference field determines the order NAPTR records are processed + when multiple records with the same order parameter are present. + required: true + replacement: + description: + - Configures the replacement field for this NAPTR record. + For nonterminal NAPTR records, this field specifies the + next domain name to look up. + required: true + services: + description: + - Configures the services field (128 characters maximum) for this + NAPTR record. The services field contains protocol and service + identifiers, such as "http+E2U" or "SIPS+D2T". + required: false + flags: + description: + - Configures the flags field for this NAPTR record. These control the + interpretation of the fields for an NAPTR record object. Supported + values for the flags field are "U", "S", "P" and "A". + required: false + regexp: + description: + - Configures the regexp field for this NAPTR record. This is the + regular expression-based rewriting rule of the NAPTR record. This + should be a POSIX compliant regular expression, including the + substitution rule and flags. Refer to RFC 2915 for the field syntax + details. + required: false + ttl: + description: + - Configures the TTL to be associated with this NAPTR record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure a NAPTR record + nios_naptr_record: + name: '*.subscriber-100.ansiblezone.com' + order: 1000 + preference: 10 + replacement: replacement1.network.ansiblezone.com + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: add a comment to an existing NAPTR record + nios_naptr_record: + name: '*.subscriber-100.ansiblezone.com' + order: 1000 + preference: 10 + replacement: replacement1.network.ansiblezone.com + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: remove a NAPTR record from the system + nios_naptr_record: + name: '*.subscriber-100.ansiblezone.com' + order: 1000 + preference: 10 + replacement: replacement1.network.ansiblezone.com + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule + + +def main(): + ''' Main entry point for module execution + ''' + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + order=dict(type='int', ib_req=True), + preference=dict(type='int', ib_req=True), + replacement=dict(ib_req=True), + services=dict(), + flags=dict(), + regexp=dict(), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run('record:naptr', ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_network.py b/plugins/modules/net_tools/nios/nios_network.py new file mode 100644 index 0000000000..bd67cd8510 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_network.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_network +author: "Peter Sprygada (@privateip)" +short_description: Configure Infoblox NIOS network object +description: + - Adds and/or removes instances of network objects from + Infoblox NIOS servers. This module manages NIOS C(network) objects + using the Infoblox WAPI interface over REST. + - Supports both IPV4 and IPV6 internet protocols +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + network: + description: + - Specifies the network to add or remove from the system. The value + should use CIDR notation. + required: true + aliases: + - name + - cidr + network_view: + description: + - Configures the name of the network view to associate with this + configured instance. + required: true + default: default + options: + description: + - Configures the set of DHCP options to be included as part of + the configured network instance. This argument accepts a list + of values (see suboptions). When configuring suboptions at + least one of C(name) or C(num) must be specified. + suboptions: + name: + description: + - The name of the DHCP option to configure. The standard options are + C(router), C(router-templates), C(domain-name-servers), C(domain-name), + C(broadcast-address), C(broadcast-address-offset), C(dhcp-lease-time), + and C(dhcp6.name-servers). + num: + description: + - The number of the DHCP option to configure + value: + description: + - The value of the DHCP option specified by C(name) + required: true + use_option: + description: + - Only applies to a subset of options (see NIOS API documentation) + type: bool + default: 'yes' + vendor_class: + description: + - The name of the space this DHCP option is associated to + default: DHCP + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + container: + description: + - If set to true it'll create the network container to be added or removed + from the system. + type: bool + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure a network ipv4 + nios_network: + network: 192.168.10.0/24 + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: configure a network ipv6 + nios_network: + network: fe80::/64 + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: set dhcp options for a network ipv4 + nios_network: + network: 192.168.10.0/24 + comment: this is a test comment + options: + - name: domain-name + value: ansible.com + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove a network ipv4 + nios_network: + network: 192.168.10.0/24 + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: configure a ipv4 network container + nios_network: + network: 192.168.10.0/24 + container: true + comment: test network container + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: configure a ipv6 network container + nios_network: + network: fe80::/64 + container: true + comment: test network container + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove a ipv4 network container + nios_network: + networkr: 192.168.10.0/24 + container: true + comment: test network container + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK, NIOS_IPV6_NETWORK +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK_CONTAINER, NIOS_IPV6_NETWORK_CONTAINER + + +def options(module): + ''' Transforms the module argument into a valid WAPI struct + This function will transform the options argument into a structure that + is a valid WAPI structure in the format of: + { + name: , + num: , + value: , + use_option: , + vendor_class: + } + It will remove any options that are set to None since WAPI will error on + that condition. It will also verify that either `name` or `num` is + set in the structure but does not validate the values are equal. + The remainder of the value validation is performed by WAPI + ''' + options = list() + for item in module.params['options']: + opt = dict([(k, v) for k, v in iteritems(item) if v is not None]) + if 'name' not in opt and 'num' not in opt: + module.fail_json(msg='one of `name` or `num` is required for option value') + options.append(opt) + return options + + +def check_ip_addr_type(obj_filter, ib_spec): + '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox + network/networkcontainer type + ''' + + ip = obj_filter['network'] + if 'container' in obj_filter and obj_filter['container']: + check_ip = ip.split('/') + del ib_spec['container'] # removing the container key from post arguments + del ib_spec['options'] # removing option argument as for network container it's not supported + if validate_ip_address(check_ip[0]): + return NIOS_IPV4_NETWORK_CONTAINER, ib_spec + elif validate_ip_v6_address(check_ip[0]): + return NIOS_IPV6_NETWORK_CONTAINER, ib_spec + else: + check_ip = ip.split('/') + del ib_spec['container'] # removing the container key from post arguments + if validate_ip_address(check_ip[0]): + return NIOS_IPV4_NETWORK, ib_spec + elif validate_ip_v6_address(check_ip[0]): + return NIOS_IPV6_NETWORK, ib_spec + + +def check_vendor_specific_dhcp_option(module, ib_spec): + '''This function will check if the argument dhcp option belongs to vendor-specific and if yes then will remove + use_options flag which is not supported with vendor-specific dhcp options. + ''' + for key, value in iteritems(ib_spec): + if isinstance(module.params[key], list): + temp_dict = module.params[key][0] + if 'num' in temp_dict: + if temp_dict['num'] in (43, 124, 125): + del module.params[key][0]['use_option'] + return ib_spec + + +def main(): + ''' Main entry point for module execution + ''' + option_spec = dict( + # one of name or num is required; enforced by the function options() + name=dict(), + num=dict(type='int'), + + value=dict(required=True), + + use_option=dict(type='bool', default=True), + vendor_class=dict(default='DHCP') + ) + + ib_spec = dict( + network=dict(required=True, aliases=['name', 'cidr'], ib_req=True), + network_view=dict(default='default', ib_req=True), + + options=dict(type='list', elements='dict', options=option_spec, transform=options), + + extattrs=dict(type='dict'), + comment=dict(), + container=dict(type='bool', ib_req=True) + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + # to get the argument ipaddr + obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')]) + network_type, ib_spec = check_ip_addr_type(obj_filter, ib_spec) + + wapi = WapiModule(module) + # to check for vendor specific dhcp option + ib_spec = check_vendor_specific_dhcp_option(module, ib_spec) + + result = wapi.run(network_type, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_network_view.py b/plugins/modules/net_tools/nios/nios_network_view.py new file mode 100644 index 0000000000..aae69c632c --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_network_view.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_network_view +author: "Peter Sprygada (@privateip)" +short_description: Configure Infoblox NIOS network views +description: + - Adds and/or removes instances of network view objects from + Infoblox NIOS servers. This module manages NIOS C(networkview) objects + using the Infoblox WAPI interface over REST. + - Updates instances of network view object from Infoblox NIOS servers. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system. User can also update the hostname as it is possible + to pass a dict containing I(new_name), I(old_name). See examples. + required: true + aliases: + - network_view + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure a new network view + nios_network_view: + name: ansible + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: update the comment for network view + nios_network_view: + name: ansible + comment: this is an example comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove the network view + nios_network_view: + name: ansible + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: update a existing network view + nios_network_view: + name: {new_name: ansible-new, old_name: ansible} + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NETWORK_VIEW + + +def main(): + ''' Main entry point for module execution + ''' + ib_spec = dict( + name=dict(required=True, aliases=['network_view'], ib_req=True), + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_NETWORK_VIEW, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_nsgroup.py b/plugins/modules/net_tools/nios/nios_nsgroup.py new file mode 100644 index 0000000000..60b55f5e8a --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_nsgroup.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +--- +module: nios_nsgroup +short_description: Configure InfoBlox DNS Nameserver Groups +extends_documentation_fragment: +- community.general.nios + +author: + - Erich Birngruber (@ebirn) + - Sumit Jaiswal (@sjaiswal) +description: + - Adds and/or removes nameserver groups form Infoblox NIOS servers. + This module manages NIOS C(nsgroup) objects using the Infoblox. WAPI interface over REST. +requirements: + - infoblox_client +options: + name: + description: + - Specifies the name of the NIOS nameserver group to be managed. + required: true + grid_primary: + description: + - This host is to be used as primary server in this nameserver group. It must be a grid member. + This option is required when setting I(use_external_primaries) to C(false). + suboptions: + name: + description: + - Provide the name of the grid member to identify the host. + required: true + enable_preferred_primaries: + description: + - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs). + default: false + type: bool + grid_replicate: + description: + - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False). + type: bool + default: false + lead: + description: + - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries. + type: bool + default: false + stealth: + description: + - Configure the external nameserver as stealth server (without NS record) in the zones. + type: bool + default: false + grid_secondaries: + description: + - Configures the list of grid member hosts that act as secondary nameservers. + This option is required when setting I(use_external_primaries) to C(true). + suboptions: + name: + description: + - Provide the name of the grid member to identify the host. + required: true + enable_preferred_primaries: + description: + - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs). + default: false + type: bool + grid_replicate: + description: + - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False) + type: bool + default: false + lead: + description: + - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries. + type: bool + default: false + stealth: + description: + - Configure the external nameserver as stealth server (without NS record) in the zones. + type: bool + default: false + preferred_primaries: + description: + - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers. + is_grid_default: + description: + - If set to C(True) this nsgroup will become the default nameserver group for new zones. + type: bool + required: false + default: false + use_external_primary: + description: + - This flag controls whether the group is using an external primary nameserver. + Note that modification of this field requires passing values for I(grid_secondaries) and I(external_primaries). + type: bool + required: false + default: false + external_primaries: + description: + - Configures a list of external nameservers (non-members of the grid). + This option is required when setting I(use_external_primaries) to C(true). + suboptions: + address: + description: + - Configures the IP address of the external nameserver + required: true + name: + description: + - Set a label for the external nameserver + required: true + stealth: + description: + - Configure the external nameserver as stealth server (without NS record) in the zones. + type: bool + default: false + tsig_key_name: + description: + - Sets a label for the I(tsig_key) value + tsig_key_alg: + description: + - Provides the algorithm used for the I(tsig_key) in use. + choices: ['HMAC-MD5', 'HMAC-SHA256'] + default: 'HMAC-MD5' + tsig_key: + description: + - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs). + required: false + external_secondaries: + description: + - Allows to provide a list of external secondary nameservers, that are not members of the grid. + suboptions: + address: + description: + - Configures the IP address of the external nameserver + required: true + name: + description: + - Set a label for the external nameserver + required: true + stealth: + description: + - Configure the external nameserver as stealth server (without NS record) in the zones. + type: bool + default: false + tsig_key_name: + description: + - Sets a label for the I(tsig_key) value + tsig_key_alg: + description: + - Provides the algorithm used for the I(tsig_key) in use. + choices: ['HMAC-MD5', 'HMAC-SHA256'] + default: 'HMAC-MD5' + tsig_key: + description: + - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs). + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + required: false + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + required: false + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + choices: [present, absent] + default: present +''' + +EXAMPLES = ''' +- name: create simple infoblox nameserver group + nios_nsgroup: + name: my-simple-group + comment: "this is a simple nameserver group" + grid_primary: + - name: infoblox-test.example.com + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: create infoblox nameserver group with external primaries + nios_nsgroup: + name: my-example-group + use_external_primary: true + comment: "this is my example nameserver group" + external_primaries: "{{ ext_nameservers }}" + grid_secondaries: + - name: infoblox-test.example.com + lead: True + preferred_primaries: "{{ ext_nameservers }}" + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: delete infoblox nameserver group + nios_nsgroup: + name: my-simple-group + comment: "this is a simple nameserver group" + grid_primary: + - name: infoblox-test.example.com + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NSGROUP + + +# from infoblox documentation +# Fields List +# Field Type Req R/O Base Search +# comment String N N Y : = ~ +# extattrs Extattr N N N ext +# external_primaries [struct] N N N N/A +# external_secondaries [struct] N N N N/A +# grid_primary [struct] N N N N/A +# grid_secondaries [struct] N N N N/A +# is_grid_default Bool N N N N/A +# is_multimaster Bool N Y N N/A +# name String Y N Y : = ~ +# use_external_primary Bool N N N N/A + + +def main(): + '''entrypoint for module execution.''' + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + + # cleanup tsig fields + def clean_tsig(ext): + if 'tsig_key' in ext and not ext['tsig_key']: + del ext['tsig_key'] + if 'tsig_key' not in ext and 'tsig_key_name' in ext and not ext['tsig_key_name']: + del ext['tsig_key_name'] + if 'tsig_key' not in ext and 'tsig_key_alg' in ext: + del ext['tsig_key_alg'] + + def clean_grid_member(member): + if member['preferred_primaries']: + for ext in member['preferred_primaries']: + clean_tsig(ext) + if member['enable_preferred_primaries'] is False: + del member['enable_preferred_primaries'] + del member['preferred_primaries'] + if member['lead'] is False: + del member['lead'] + if member['grid_replicate'] is False: + del member['grid_replicate'] + + def ext_primaries_transform(module): + if module.params['external_primaries']: + for ext in module.params['external_primaries']: + clean_tsig(ext) + return module.params['external_primaries'] + + def ext_secondaries_transform(module): + if module.params['external_secondaries']: + for ext in module.params['external_secondaries']: + clean_tsig(ext) + return module.params['external_secondaries'] + + def grid_primary_preferred_transform(module): + for member in module.params['grid_primary']: + clean_grid_member(member) + return module.params['grid_primary'] + + def grid_secondaries_preferred_primaries_transform(module): + for member in module.params['grid_secondaries']: + clean_grid_member(member) + return module.params['grid_secondaries'] + + extserver_spec = dict( + address=dict(required=True, ib_req=True), + name=dict(required=True, ib_req=True), + stealth=dict(type='bool', default=False), + tsig_key=dict(), + tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'), + tsig_key_name=dict(required=True) + ) + + memberserver_spec = dict( + name=dict(required=True, ib_req=True), + enable_preferred_primaries=dict(type='bool', default=False), + grid_replicate=dict(type='bool', default=False), + lead=dict(type='bool', default=False), + preferred_primaries=dict(type='list', elements='dict', options=extserver_spec, default=[]), + stealth=dict(type='bool', default=False), + ) + + ib_spec = dict( + name=dict(required=True, ib_req=True), + grid_primary=dict(type='list', elements='dict', options=memberserver_spec, + transform=grid_primary_preferred_transform), + grid_secondaries=dict(type='list', elements='dict', options=memberserver_spec, + transform=grid_secondaries_preferred_primaries_transform), + external_primaries=dict(type='list', elements='dict', options=extserver_spec, transform=ext_primaries_transform), + external_secondaries=dict(type='list', elements='dict', options=extserver_spec, + transform=ext_secondaries_transform), + is_grid_default=dict(type='bool', default=False), + use_external_primary=dict(type='bool', default=False), + extattrs=dict(), + comment=dict(), + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_NSGROUP, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_ptr_record.py b/plugins/modules/net_tools/nios/nios_ptr_record.py new file mode 100644 index 0000000000..a2a3a6e090 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_ptr_record.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +--- +module: nios_ptr_record +author: "Trebuchet Clement (@clementtrebuchet)" +short_description: Configure Infoblox NIOS PTR records +description: + - Adds and/or removes instances of PTR record objects from + Infoblox NIOS servers. This module manages NIOS C(record:ptr) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox_client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - The name of the DNS PTR record in FQDN format to add or remove from + the system. + The field is required only for an PTR object in Forward Mapping Zone. + required: false + view: + description: + - Sets the DNS view to associate this a record with. The DNS + view must already be configured on the system + required: false + aliases: + - dns_view + ipv4addr: + description: + - The IPv4 Address of the record. Mutually exclusive with the ipv6addr. + required: true + aliases: + - ipv4 + ipv6addr: + description: + - The IPv6 Address of the record. Mutually exclusive with the ipv4addr. + required: true + aliases: + - ipv6 + ptrdname: + description: + - The domain name of the DNS PTR record in FQDN format. + required: true + ttl: + description: + - Time To Live (TTL) value for the record. + A 32-bit unsigned integer that represents the duration, in seconds, that the record is valid (cached). + Zero indicates that the record should not be cached. + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. Maximum 256 characters. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: Create a PTR Record + nios_ptr_record: + ipv4: 192.168.10.1 + ptrdname: host.ansible.com + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: Delete a PTR Record + nios_ptr_record: + ipv4: 192.168.10.1 + ptrdname: host.ansible.com + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_PTR_RECORD + + +def main(): + # Module entry point + ib_spec = dict( + name=dict(required=False), + view=dict(aliases=['dns_view'], ib_req=True), + ipv4addr=dict(aliases=['ipv4'], ib_req=True), + ipv6addr=dict(aliases=['ipv6'], ib_req=True), + ptrdname=dict(ib_req=True), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + mutually_exclusive = [('ipv4addr', 'ipv6addr')] + required_one_of = [ + ['ipv4addr', 'ipv6addr'] + ] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + required_one_of=required_one_of) + + if module.params['ipv4addr']: + del ib_spec['ipv6addr'] + elif module.params['ipv6addr']: + del ib_spec['ipv4addr'] + + wapi = WapiModule(module) + result = wapi.run(NIOS_PTR_RECORD, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_srv_record.py b/plugins/modules/net_tools/nios/nios_srv_record.py new file mode 100644 index 0000000000..709f83b6ae --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_srv_record.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_srv_record +author: "Blair Rampling (@brampling)" +short_description: Configure Infoblox NIOS SRV records +description: + - Adds and/or removes instances of SRV record objects from + Infoblox NIOS servers. This module manages NIOS C(record:srv) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system + required: true + view: + description: + - Sets the DNS view to associate this a record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + port: + description: + - Configures the port (0-65535) of this SRV record. + required: true + priority: + description: + - Configures the priority (0-65535) for this SRV record. + required: true + target: + description: + - Configures the target FQDN for this SRV record. + required: true + weight: + description: + - Configures the weight (0-65535) for this SRV record. + required: true + ttl: + description: + - Configures the TTL to be associated with this host record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure an SRV record + nios_srv_record: + name: _sip._tcp.service.ansible.com + port: 5080 + priority: 10 + target: service1.ansible.com + weight: 10 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: add a comment to an existing SRV record + nios_srv_record: + name: _sip._tcp.service.ansible.com + port: 5080 + priority: 10 + target: service1.ansible.com + weight: 10 + comment: this is a test comment + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local + +- name: remove an SRV record from the system + nios_srv_record: + name: _sip._tcp.service.ansible.com + port: 5080 + priority: 10 + target: service1.ansible.com + weight: 10 + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_SRV_RECORD + + +def main(): + ''' Main entry point for module execution + ''' + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + port=dict(type='int', ib_req=True), + priority=dict(type='int', ib_req=True), + target=dict(ib_req=True), + weight=dict(type='int', ib_req=True), + + ttl=dict(type='int'), + + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run(NIOS_SRV_RECORD, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_txt_record.py b/plugins/modules/net_tools/nios/nios_txt_record.py new file mode 100644 index 0000000000..2de6f6ece7 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_txt_record.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_txt_record +author: "Corey Wanless (@coreywan)" +short_description: Configure Infoblox NIOS txt records +description: + - Adds and/or removes instances of txt record objects from + Infoblox NIOS servers. This module manages NIOS C(record:txt) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox_client +extends_documentation_fragment: +- community.general.nios + +options: + name: + description: + - Specifies the fully qualified hostname to add or remove from + the system + required: true + view: + description: + - Sets the DNS view to associate this tst record with. The DNS + view must already be configured on the system + required: true + default: default + aliases: + - dns_view + text: + description: + - Text associated with the record. It can contain up to 255 bytes + per substring, up to a total of 512 bytes. To enter leading, + trailing, or embedded spaces in the text, add quotes around the + text to preserve the spaces. + required: true + ttl: + description: + - Configures the TTL to be associated with this tst record + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' + - name: Ensure a text Record Exists + nios_txt_record: + name: fqdn.txt.record.com + text: mytext + state: present + view: External + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + + - name: Ensure a text Record does not exist + nios_txt_record: + name: fqdn.txt.record.com + text: mytext + state: absent + view: External + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule + + +def main(): + ''' Main entry point for module execution + ''' + + ib_spec = dict( + name=dict(required=True, ib_req=True), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + text=dict(ib_req=True), + ttl=dict(type='int'), + extattrs=dict(type='dict'), + comment=dict(), + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + wapi = WapiModule(module) + result = wapi.run('record:txt', ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nios/nios_zone.py b/plugins/modules/net_tools/nios/nios_zone.py new file mode 100644 index 0000000000..5761ec3e94 --- /dev/null +++ b/plugins/modules/net_tools/nios/nios_zone.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: nios_zone +author: "Peter Sprygada (@privateip)" +short_description: Configure Infoblox NIOS DNS zones +description: + - Adds and/or removes instances of DNS zone objects from + Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects + using the Infoblox WAPI interface over REST. +requirements: + - infoblox-client +extends_documentation_fragment: +- community.general.nios + +options: + fqdn: + description: + - Specifies the qualified domain name to either add or remove from + the NIOS instance based on the configured C(state) value. + required: true + aliases: + - name + view: + description: + - Configures the DNS view name for the configured resource. The + specified DNS zone must already exist on the running NIOS instance + prior to configuring zones. + required: true + default: default + aliases: + - dns_view + grid_primary: + description: + - Configures the grid primary servers for this zone. + suboptions: + name: + description: + - The name of the grid primary server + grid_secondaries: + description: + - Configures the grid secondary servers for this zone. + suboptions: + name: + description: + - The name of the grid secondary server + ns_group: + description: + - Configures the name server group for this zone. Name server group is + mutually exclusive with grid primary and grid secondaries. + restart_if_needed: + description: + - If set to true, causes the NIOS DNS service to restart and load the + new zone configuration + type: bool + zone_format: + description: + - Create an authorative Reverse-Mapping Zone which is an area of network + space for which one or more name servers-primary and secondary-have the + responsibility to respond to address-to-name queries. It supports + reverse-mapping zones for both IPv4 and IPv6 addresses. + default: FORWARD + extattrs: + description: + - Allows for the configuration of Extensible Attributes on the + instance of the object. This argument accepts a set of key / value + pairs for configuration. + comment: + description: + - Configures a text string comment to be associated with the instance + of this object. The provided text string will be configured on the + object instance. + state: + description: + - Configures the intended state of the instance of the object on + the NIOS server. When this value is set to C(present), the object + is configured on the device and when this value is set to C(absent) + the value is removed (if necessary) from the device. + default: present + choices: + - present + - absent +''' + +EXAMPLES = ''' +- name: configure a zone on the system using grid primary and secondaries + nios_zone: + name: ansible.com + grid_primary: + - name: gridprimary.grid.com + grid_secondaries: + - name: gridsecondary1.grid.com + - name: gridsecondary2.grid.com + restart_if_needed: true + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: configure a zone on the system using a name server group + nios_zone: + name: ansible.com + ns_group: examplensg + restart_if_needed: true + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: configure a reverse mapping zone on the system using IPV4 zone format + nios_zone: + name: 10.10.10.0/24 + zone_format: IPV4 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: configure a reverse mapping zone on the system using IPV6 zone format + nios_zone: + name: 100::1/128 + zone_format: IPV6 + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: update the comment and ext attributes for an existing zone + nios_zone: + name: ansible.com + comment: this is an example comment + extattrs: + Site: west-dc + state: present + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove the dns zone + nios_zone: + name: ansible.com + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +- name: remove the reverse mapping dns zone from the system with IPV4 zone format + nios_zone: + name: 10.10.10.0/24 + zone_format: IPV4 + state: absent + provider: + host: "{{ inventory_hostname_short }}" + username: admin + password: admin + connection: local +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_ZONE + + +def main(): + ''' Main entry point for module execution + ''' + grid_spec = dict( + name=dict(required=True), + ) + + ib_spec = dict( + fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False), + zone_format=dict(default='FORWARD', aliases=['zone_format'], ib_req=False), + view=dict(default='default', aliases=['dns_view'], ib_req=True), + + grid_primary=dict(type='list', elements='dict', options=grid_spec), + grid_secondaries=dict(type='list', elements='dict', options=grid_spec), + ns_group=dict(), + restart_if_needed=dict(type='bool'), + + extattrs=dict(type='dict'), + comment=dict() + ) + + argument_spec = dict( + provider=dict(required=True), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ib_spec) + argument_spec.update(WapiModule.provider_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['ns_group', 'grid_primary'], + ['ns_group', 'grid_secondaries'] + ]) + + wapi = WapiModule(module) + result = wapi.run(NIOS_ZONE, ib_spec) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py new file mode 100644 index 0000000000..ff09f6804d --- /dev/null +++ b/plugins/modules/net_tools/nmcli.py @@ -0,0 +1,1547 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Chris Long +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: nmcli +author: +- Chris Long (@alcamie101) +short_description: Manage Networking +requirements: +- dbus +- NetworkManager-libnm (or NetworkManager-glib on older systems) +- nmcli +description: + - Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc. + - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager-nmlib, + libsemanage-python, policycoreutils-python.' + - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-glib, + libnm-qt-devel.x86_64, nm-connection-editor.x86_64, libsemanage-python, policycoreutils-python.' + - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager, + python-dbus (or python3-dbus, depending on the Python version in use), libnm-dev.' + - 'On older Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager, + python-dbus (or python3-dbus, depending on the Python version in use), libnm-glib-dev.' + - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager, python2-dbus-python (or + python3-dbus-python), typelib-1_0-NMClient-1_0 and typelib-1_0-NetworkManager-1_0.' +options: + state: + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + type: str + required: true + choices: [ absent, present ] + autoconnect: + description: + - Whether the connection should start on boot. + - Whether the connection profile can be automatically activated + type: bool + default: yes + conn_name: + description: + - The name used to call the connection. Pattern is [-][-]. + type: str + required: true + ifname: + description: + - The interface to bind the connection to. + - The connection will only be applicable to this interface name. + - A special value of C('*') can be used for interface-independent connections. + - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. + - This parameter defaults to C(conn_name) when left unset. + type: str + type: + description: + - This is the type of device or network connection that you wish to create or modify. + - Type C(generic) is added in Ansible 2.5. + type: str + choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, ipip, sit, team, team-slave, vlan, vxlan ] + mode: + description: + - This is the type of device or network connection that you wish to create for a bond, team or bridge. + type: str + choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] + default: balance-rr + master: + description: + - Master ] STP forwarding delay, in seconds. + type: int + default: 15 + hellotime: + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. + type: int + default: 2 + maxage: + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. + type: int + default: 20 + ageingtime: + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. + type: int + default: 300 + mac: + description: + - This is only used with bridge - MAC address of the bridge. + - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. + slavepriority: + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. + type: int + default: 32 + path_cost: + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave. + type: int + default: 100 + hairpin: + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the + frame was received on. + type: bool + default: yes + vlanid: + description: + - This is only used with VLAN - VLAN ID in range <0-4095>. + type: int + vlandev: + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname. + type: str + flags: + description: + - This is only used with VLAN - flags. + type: str + ingress: + description: + - This is only used with VLAN - VLAN ingress priority mapping. + type: str + egress: + description: + - This is only used with VLAN - VLAN egress priority mapping. + type: str + vxlan_id: + description: + - This is only used with VXLAN - VXLAN ID. + type: int + vxlan_remote: + description: + - This is only used with VXLAN - VXLAN destination IP address. + type: str + vxlan_local: + description: + - This is only used with VXLAN - VXLAN local IP address. + type: str + ip_tunnel_dev: + description: + - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname. + type: str + ip_tunnel_remote: + description: + - This is used with IPIP/SIT - IPIP/SIT destination IP address. + type: str + ip_tunnel_local: + description: + - This is used with IPIP/SIT - IPIP/SIT local IP address. + type: str +''' + +EXAMPLES = r''' +# These examples are using the following inventory: +# +# ## Directory layout: +# +# |_/inventory/cloud-hosts +# | /group_vars/openstack-stage.yml +# | /host_vars/controller-01.openstack.host.com +# | /host_vars/controller-02.openstack.host.com +# |_/playbook/library/nmcli.py +# | /playbook-add.yml +# | /playbook-del.yml +# ``` +# +# ## inventory examples +# ### groups_vars +# ```yml +# --- +# #devops_os_define_network +# storage_gw: "192.0.2.254" +# external_gw: "198.51.100.254" +# tenant_gw: "203.0.113.254" +# +# #Team vars +# nmcli_team: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# nmcli_team_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #bond vars +# nmcli_bond: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# mode: balance-rr +# nmcli_bond_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #ethernet vars +# nmcli_ethernet: +# - conn_name: em1 +# ifname: em1 +# ip4: '{{ tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: em2 +# ifname: em2 +# ip4: '{{ tenant_ip1 }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: p2p1 +# ifname: p2p1 +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# - conn_name: p2p2 +# ifname: p2p2 +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# ``` +# +# ### host_vars +# ```yml +# --- +# storage_ip: "192.0.2.91/23" +# external_ip: "198.51.100.23/21" +# tenant_ip: "203.0.113.77/23" +# ``` + + + +## playbook-add.yml example + +--- +- hosts: openstack-stage + remote_user: root + tasks: + + - name: install needed network manager libs + package: + name: + - NetworkManager-libnm + - nm-connection-editor + - libsemanage-python + - policycoreutils-python + state: present + +##### Working with all cloud nodes - Teaming + - name: Try nmcli add team - conn_name only & ip4 gw4 + nmcli: + type: team + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_team }}' + + - name: Try nmcli add teams-slave + nmcli: + type: team-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_team_slave }}' + +###### Working with all cloud nodes - Bonding + - name: Try nmcli add bond - conn_name only & ip4 gw4 mode + nmcli: + type: bond + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + mode: '{{ item.mode }}' + state: present + with_items: + - '{{ nmcli_bond }}' + + - name: Try nmcli add bond-slave + nmcli: + type: bond-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_bond_slave }}' + +##### Working with all cloud nodes - Ethernet + - name: Try nmcli add Ethernet - conn_name only & ip4 gw4 + nmcli: + type: ethernet + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_ethernet }}' + +## playbook-del.yml example +- hosts: openstack-stage + remote_user: root + tasks: + + - name: Try nmcli del team - multiple + nmcli: + conn_name: '{{ item.conn_name }}' + state: absent + with_items: + - conn_name: em1 + - conn_name: em2 + - conn_name: p1p1 + - conn_name: p1p2 + - conn_name: p2p1 + - conn_name: p2p2 + - conn_name: tenant + - conn_name: storage + - conn_name: external + - conn_name: team-em1 + - conn_name: team-em2 + - conn_name: team-p1p1 + - conn_name: team-p1p2 + - conn_name: team-p2p1 + - conn_name: team-p2p2 + + - name: Add an Ethernet connection with static IP configuration + nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + + - name: Add an Team connection with static IP configuration + nmcli: + conn_name: my-team1 + ifname: my-team1 + type: team + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + autoconnect: yes + + - name: Optionally, at the same time specify IPv6 addresses for the device + nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + ip6: 2001:db8::cafe + gw6: 2001:db8::1 + state: present + + - name: Add two IPv4 DNS server addresses + nmcli: + conn_name: my-eth1 + type: ethernet + dns4: + - 192.0.2.53 + - 198.51.100.53 + state: present + + - name: Make a profile usable for all compatible Ethernet interfaces + nmcli: + ctype: ethernet + name: my-eth1 + ifname: '*' + state: present + + - name: Change the property of a setting e.g. MTU + nmcli: + conn_name: my-eth1 + mtu: 9000 + type: ethernet + state: present + + - name: Add VxLan + nmcli: + type: vxlan + conn_name: vxlan_test1 + vxlan_id: 16 + vxlan_local: 192.168.1.2 + vxlan_remote: 192.168.1.5 + + - name: Add ipip + nmcli: + type: ipip + conn_name: ipip_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add sit + nmcli: + type: sit + conn_name: sit_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + +# nmcli exits with status 0 if it succeeds and exits with a status greater +# than zero when there is a failure. The following list of status codes may be +# returned: +# +# - 0 Success - indicates the operation succeeded +# - 1 Unknown or unspecified error +# - 2 Invalid user input, wrong nmcli invocation +# - 3 Timeout expired (see --wait option) +# - 4 Connection activation failed +# - 5 Connection deactivation failed +# - 6 Disconnecting device failed +# - 7 Connection deletion failed +# - 8 NetworkManager is not running +# - 9 nmcli and NetworkManager versions mismatch +# - 10 Connection, device, or access point does not exist. +''' + +RETURN = r"""# +""" + +import traceback + +DBUS_IMP_ERR = None +try: + import dbus + HAVE_DBUS = True +except ImportError: + DBUS_IMP_ERR = traceback.format_exc() + HAVE_DBUS = False + +NM_CLIENT_IMP_ERR = None +HAVE_NM_CLIENT = True +try: + import gi + gi.require_version('NM', '1.0') + from gi.repository import NM +except (ImportError, ValueError): + try: + import gi + gi.require_version('NMClient', '1.0') + gi.require_version('NetworkManager', '1.0') + from gi.repository import NetworkManager, NMClient + except (ImportError, ValueError): + NM_CLIENT_IMP_ERR = traceback.format_exc() + HAVE_NM_CLIENT = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class Nmcli(object): + """ + This is the generic nmcli manipulation class that is subclassed based on platform. + A subclass may wish to override the following action methods:- + - create_connection() + - delete_connection() + - modify_connection() + - show_connection() + - up_connection() + - down_connection() + All subclasses MUST define platform and distribution (which may be None). + """ + + platform = 'Generic' + distribution = None + if HAVE_DBUS: + bus = dbus.SystemBus() + # The following is going to be used in dbus code + DEVTYPES = { + 1: "Ethernet", + 2: "Wi-Fi", + 5: "Bluetooth", + 6: "OLPC", + 7: "WiMAX", + 8: "Modem", + 9: "InfiniBand", + 10: "Bond", + 11: "VLAN", + 12: "ADSL", + 13: "Bridge", + 14: "Generic", + 15: "Team", + 16: "VxLan", + 17: "ipip", + 18: "sit", + } + STATES = { + 0: "Unknown", + 10: "Unmanaged", + 20: "Unavailable", + 30: "Disconnected", + 40: "Prepare", + 50: "Config", + 60: "Need Auth", + 70: "IP Config", + 80: "IP Check", + 90: "Secondaries", + 100: "Activated", + 110: "Deactivating", + 120: "Failed" + } + + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.autoconnect = module.params['autoconnect'] + self.conn_name = module.params['conn_name'] + self.master = module.params['master'] + self.ifname = module.params['ifname'] + self.type = module.params['type'] + self.ip4 = module.params['ip4'] + self.gw4 = module.params['gw4'] + self.dns4 = ' '.join(module.params['dns4']) if module.params.get('dns4') else None + self.dns4_search = ' '.join(module.params['dns4_search']) if module.params.get('dns4_search') else None + self.ip6 = module.params['ip6'] + self.gw6 = module.params['gw6'] + self.dns6 = ' '.join(module.params['dns6']) if module.params.get('dns6') else None + self.dns6_search = ' '.join(module.params['dns6_search']) if module.params.get('dns6_search') else None + self.mtu = module.params['mtu'] + self.stp = module.params['stp'] + self.priority = module.params['priority'] + self.mode = module.params['mode'] + self.miimon = module.params['miimon'] + self.primary = module.params['primary'] + self.downdelay = module.params['downdelay'] + self.updelay = module.params['updelay'] + self.arp_interval = module.params['arp_interval'] + self.arp_ip_target = module.params['arp_ip_target'] + self.slavepriority = module.params['slavepriority'] + self.forwarddelay = module.params['forwarddelay'] + self.hellotime = module.params['hellotime'] + self.maxage = module.params['maxage'] + self.ageingtime = module.params['ageingtime'] + self.hairpin = module.params['hairpin'] + self.path_cost = module.params['path_cost'] + self.mac = module.params['mac'] + self.vlanid = module.params['vlanid'] + self.vlandev = module.params['vlandev'] + self.flags = module.params['flags'] + self.ingress = module.params['ingress'] + self.egress = module.params['egress'] + self.vxlan_id = module.params['vxlan_id'] + self.vxlan_local = module.params['vxlan_local'] + self.vxlan_remote = module.params['vxlan_remote'] + self.ip_tunnel_dev = module.params['ip_tunnel_dev'] + self.ip_tunnel_local = module.params['ip_tunnel_local'] + self.ip_tunnel_remote = module.params['ip_tunnel_remote'] + self.nmcli_bin = self.module.get_bin_path('nmcli', True) + self.dhcp_client_id = module.params['dhcp_client_id'] + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + + def merge_secrets(self, proxy, config, setting_name): + try: + # returns a dict of dicts mapping name::setting, where setting is a dict + # mapping key::value. Each member of the 'setting' dict is a secret + secrets = proxy.GetSecrets(setting_name) + + # Copy the secrets into our connection config + for setting in secrets: + for key in secrets[setting]: + config[setting_name][key] = secrets[setting][key] + except Exception: + pass + + def dict_to_string(self, d): + # Try to trivially translate a dictionary's elements into nice string + # formatting. + dstr = "" + for key in d: + val = d[key] + str_val = "" + add_string = True + if isinstance(val, dbus.Array): + for elt in val: + if isinstance(elt, dbus.Byte): + str_val += "%s " % int(elt) + elif isinstance(elt, dbus.String): + str_val += "%s" % elt + elif isinstance(val, dbus.Dictionary): + dstr += self.dict_to_string(val) + add_string = False + else: + str_val = val + if add_string: + dstr += "%s: %s\n" % (key, str_val) + return dstr + + def connection_to_string(self, config): + # dump a connection configuration to use in list_connection_info + setting_list = [] + for setting_name in config: + setting_list.append(self.dict_to_string(config[setting_name])) + return setting_list + + @staticmethod + def bool_to_string(boolean): + if boolean: + return "yes" + else: + return "no" + + def list_connection_info(self): + # Ask the settings service for the list of connections it provides + bus = dbus.SystemBus() + + service_name = "org.freedesktop.NetworkManager" + settings = None + try: + proxy = bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings") + settings = dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings") + except dbus.exceptions.DBusException as e: + self.module.fail_json(msg="Unable to read Network Manager settings from DBus system bus: %s" % to_native(e), + details="Please check if NetworkManager is installed and" + "service network-manager is started.") + connection_paths = settings.ListConnections() + connection_list = [] + # List each connection's name, UUID, and type + for path in connection_paths: + con_proxy = bus.get_object(service_name, path) + settings_connection = dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection") + config = settings_connection.GetSettings() + + # Now get secrets too; we grab the secrets for each type of connection + # (since there isn't a "get all secrets" call because most of the time + # you only need 'wifi' secrets or '802.1x' secrets, not everything) and + # merge that into the configuration data - To use at a later stage + self.merge_secrets(settings_connection, config, '802-11-wireless') + self.merge_secrets(settings_connection, config, '802-11-wireless-security') + self.merge_secrets(settings_connection, config, '802-1x') + self.merge_secrets(settings_connection, config, 'gsm') + self.merge_secrets(settings_connection, config, 'cdma') + self.merge_secrets(settings_connection, config, 'ppp') + + # Get the details of the 'connection' setting + s_con = config['connection'] + connection_list.append(s_con['id']) + connection_list.append(s_con['uuid']) + connection_list.append(s_con['type']) + connection_list.append(self.connection_to_string(config)) + return connection_list + + def connection_exists(self): + # we are going to use name and type in this instance to find if that connection exists and is of type x + connections = self.list_connection_info() + + for con_item in connections: + if self.conn_name == con_item: + return True + + def down_connection(self): + cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] + return self.execute_command(cmd) + + def up_connection(self): + cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] + return self.execute_command(cmd) + + def create_connection_team(self): + cmd = [self.nmcli_bin, 'con', 'add', 'type', 'team', 'con-name'] + # format for creating team interface + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + + options = { + 'ip4': self.ip4, + 'gw4': self.gw4, + 'ip6': self.ip6, + 'gw6': self.gw6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'ipv4.dns-search': self.dns4_search, + 'ipv6.dns-search': self.dns6_search, + 'ipv4.dhcp-client-id': self.dhcp_client_id, + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def modify_connection_team(self): + cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name] + options = { + 'ipv4.address': self.ip4, + 'ipv4.gateway': self.gw4, + 'ipv4.dns': self.dns4, + 'ipv6.address': self.ip6, + 'ipv6.gateway': self.gw6, + 'ipv6.dns': self.dns6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'ipv4.dns-search': self.dns4_search, + 'ipv6.dns-search': self.dns6_search, + 'ipv4.dhcp-client-id': self.dhcp_client_id, + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def create_connection_team_slave(self): + cmd = [self.nmcli_bin, 'connection', 'add', 'type', self.type, 'con-name'] + # format for creating team-slave interface + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + cmd.append('master') + if self.conn_name is not None: + cmd.append(self.master) + return cmd + + def modify_connection_team_slave(self): + cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name, 'connection.master', self.master] + # format for modifying team-slave interface + if self.mtu is not None: + cmd.append('802-3-ethernet.mtu') + cmd.append(self.mtu) + return cmd + + def create_connection_bond(self): + cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bond', 'con-name'] + # format for creating bond interface + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + options = { + 'mode': self.mode, + 'ip4': self.ip4, + 'gw4': self.gw4, + 'ip6': self.ip6, + 'gw6': self.gw6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'ipv4.dns-search': self.dns4_search, + 'ipv6.dns-search': self.dns6_search, + 'miimon': self.miimon, + 'downdelay': self.downdelay, + 'updelay': self.updelay, + 'arp-interval': self.arp_interval, + 'arp-ip-target': self.arp_ip_target, + 'primary': self.primary, + 'ipv4.dhcp-client-id': self.dhcp_client_id, + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + return cmd + + def modify_connection_bond(self): + cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name] + # format for modifying bond interface + + options = { + 'ipv4.address': self.ip4, + 'ipv4.gateway': self.gw4, + 'ipv4.dns': self.dns4, + 'ipv6.address': self.ip6, + 'ipv6.gateway': self.gw6, + 'ipv6.dns': self.dns6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'ipv4.dns-search': self.dns4_search, + 'ipv6.dns-search': self.dns6_search, + 'miimon': self.miimon, + 'downdelay': self.downdelay, + 'updelay': self.updelay, + 'arp-interval': self.arp_interval, + 'arp-ip-target': self.arp_ip_target, + 'ipv4.dhcp-client-id': self.dhcp_client_id, + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def create_connection_bond_slave(self): + cmd = [self.nmcli_bin, 'connection', 'add', 'type', 'bond-slave', 'con-name'] + # format for creating bond-slave interface + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + cmd.append('master') + if self.conn_name is not None: + cmd.append(self.master) + return cmd + + def modify_connection_bond_slave(self): + cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name, 'connection.master', self.master] + # format for modifying bond-slave interface + return cmd + + def create_connection_ethernet(self, conn_type='ethernet'): + # format for creating ethernet interface + # To add an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1 + cmd = [self.nmcli_bin, 'con', 'add', 'type'] + if conn_type == 'ethernet': + cmd.append('ethernet') + elif conn_type == 'generic': + cmd.append('generic') + cmd.append('con-name') + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + + options = { + 'ip4': self.ip4, + 'gw4': self.gw4, + 'ip6': self.ip6, + 'gw6': self.gw6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'ipv4.dns-search': self.dns4_search, + 'ipv6.dns-search': self.dns6_search, + 'ipv4.dhcp-client-id': self.dhcp_client_id, + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def modify_connection_ethernet(self, conn_type='ethernet'): + cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name] + # format for modifying ethernet interface + # To modify an Ethernet connection with static IP configuration, issue a command as follows + # - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present + # nmcli con mod con-name my-eth1 ifname eth1 type ethernet ipv4.address 192.0.2.100/24 ipv4.gateway 192.0.2.1 + options = { + 'ipv4.address': self.ip4, + 'ipv4.gateway': self.gw4, + 'ipv4.dns': self.dns4, + 'ipv6.address': self.ip6, + 'ipv6.gateway': self.gw6, + 'ipv6.dns': self.dns6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'ipv4.dns-search': self.dns4_search, + 'ipv6.dns-search': self.dns6_search, + '802-3-ethernet.mtu': self.mtu, + 'ipv4.dhcp-client-id': self.dhcp_client_id, + } + + for key, value in options.items(): + if value is not None: + if key == '802-3-ethernet.mtu' and conn_type != 'ethernet': + continue + cmd.extend([key, value]) + + return cmd + + def create_connection_bridge(self): + # format for creating bridge interface + # To add an Bridge connection with static IP configuration, issue a command as follows + # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1 + cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bridge', 'con-name'] + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + + options = { + 'ip4': self.ip4, + 'gw4': self.gw4, + 'ip6': self.ip6, + 'gw6': self.gw6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'bridge.ageing-time': self.ageingtime, + 'bridge.forward-delay': self.forwarddelay, + 'bridge.hello-time': self.hellotime, + 'bridge.mac-address': self.mac, + 'bridge.max-age': self.maxage, + 'bridge.priority': self.priority, + 'bridge.stp': self.bool_to_string(self.stp) + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def modify_connection_bridge(self): + # format for modifying bridge interface + # To add an Bridge connection with static IP configuration, issue a command as follows + # - nmcli: name=mod conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present + # nmcli con mod my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1 + cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name] + + options = { + 'ipv4.address': self.ip4, + 'ipv4.gateway': self.gw4, + 'ipv6.address': self.ip6, + 'ipv6.gateway': self.gw6, + 'autoconnect': self.bool_to_string(self.autoconnect), + 'bridge.ageing-time': self.ageingtime, + 'bridge.forward-delay': self.forwarddelay, + 'bridge.hello-time': self.hellotime, + 'bridge.mac-address': self.mac, + 'bridge.max-age': self.maxage, + 'bridge.priority': self.priority, + 'bridge.stp': self.bool_to_string(self.stp) + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def create_connection_bridge_slave(self): + # format for creating bond-slave interface + cmd = [self.nmcli_bin, 'con', 'add', 'type', 'bridge-slave', 'con-name'] + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + + options = { + 'master': self.master, + 'bridge-port.path-cost': self.path_cost, + 'bridge-port.hairpin': self.bool_to_string(self.hairpin), + 'bridge-port.priority': self.slavepriority, + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def modify_connection_bridge_slave(self): + # format for modifying bond-slave interface + cmd = [self.nmcli_bin, 'con', 'mod', self.conn_name] + options = { + 'master': self.master, + 'bridge-port.path-cost': self.path_cost, + 'bridge-port.hairpin': self.bool_to_string(self.hairpin), + 'bridge-port.priority': self.slavepriority, + } + + for key, value in options.items(): + if value is not None: + cmd.extend([key, value]) + + return cmd + + def create_connection_vlan(self): + cmd = [self.nmcli_bin] + cmd.append('con') + cmd.append('add') + cmd.append('type') + cmd.append('vlan') + cmd.append('con-name') + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + else: + cmd.append('vlan%s' % self.vlanid) + + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + else: + cmd.append('vlan%s' % self.vlanid) + + params = {'dev': self.vlandev, + 'id': str(self.vlanid), + 'ip4': self.ip4 or '', + 'gw4': self.gw4 or '', + 'ip6': self.ip6 or '', + 'gw6': self.gw6 or '', + 'autoconnect': self.bool_to_string(self.autoconnect) + } + for k, v in params.items(): + cmd.extend([k, v]) + + return cmd + + def modify_connection_vlan(self): + cmd = [self.nmcli_bin] + cmd.append('con') + cmd.append('mod') + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + else: + cmd.append('vlan%s' % self.vlanid) + + params = {'vlan.parent': self.vlandev, + 'vlan.id': str(self.vlanid), + 'ipv4.address': self.ip4 or '', + 'ipv4.gateway': self.gw4 or '', + 'ipv4.dns': self.dns4 or '', + 'ipv6.address': self.ip6 or '', + 'ipv6.gateway': self.gw6 or '', + 'ipv6.dns': self.dns6 or '', + 'autoconnect': self.bool_to_string(self.autoconnect) + } + + for k, v in params.items(): + cmd.extend([k, v]) + + return cmd + + def create_connection_vxlan(self): + cmd = [self.nmcli_bin, 'con', 'add', 'type', 'vxlan', 'con-name'] + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + else: + cmd.append('vxlan%s' % self.vxlanid) + + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + else: + cmd.append('vxan%s' % self.vxlanid) + + params = {'vxlan.id': self.vxlan_id, + 'vxlan.local': self.vxlan_local, + 'vxlan.remote': self.vxlan_remote, + 'autoconnect': self.bool_to_string(self.autoconnect) + } + for k, v in params.items(): + cmd.extend([k, v]) + + return cmd + + def modify_connection_vxlan(self): + cmd = [self.nmcli_bin, 'con', 'mod'] + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + else: + cmd.append('vxlan%s' % self.vxlanid) + + params = {'vxlan.id': self.vxlan_id, + 'vxlan.local': self.vxlan_local, + 'vxlan.remote': self.vxlan_remote, + 'autoconnect': self.bool_to_string(self.autoconnect) + } + for k, v in params.items(): + cmd.extend([k, v]) + return cmd + + def create_connection_ipip(self): + cmd = [self.nmcli_bin, 'con', 'add', 'type', 'ip-tunnel', 'mode', 'ipip', 'con-name'] + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + elif self.ip_tunnel_dev is not None: + cmd.append('ipip%s' % self.ip_tunnel_dev) + + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + else: + cmd.append('ipip%s' % self.ipip_dev) + + if self.ip_tunnel_dev is not None: + cmd.append('dev') + cmd.append(self.ip_tunnel_dev) + + params = {'ip-tunnel.local': self.ip_tunnel_local, + 'ip-tunnel.remote': self.ip_tunnel_remote, + 'autoconnect': self.bool_to_string(self.autoconnect) + } + for k, v in params.items(): + cmd.extend([k, v]) + + return cmd + + def modify_connection_ipip(self): + cmd = [self.nmcli_bin, 'con', 'mod'] + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + elif self.ip_tunnel_dev is not None: + cmd.append('ipip%s' % self.ip_tunnel_dev) + + params = {'ip-tunnel.local': self.ip_tunnel_local, + 'ip-tunnel.remote': self.ip_tunnel_remote, + 'autoconnect': self.bool_to_string(self.autoconnect) + } + for k, v in params.items(): + cmd.extend([k, v]) + return cmd + + def create_connection_sit(self): + cmd = [self.nmcli_bin, 'con', 'add', 'type', 'ip-tunnel', 'mode', 'sit', 'con-name'] + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + elif self.ip_tunnel_dev is not None: + cmd.append('sit%s' % self.ip_tunnel_dev) + + cmd.append('ifname') + if self.ifname is not None: + cmd.append(self.ifname) + elif self.conn_name is not None: + cmd.append(self.conn_name) + else: + cmd.append('sit%s' % self.ipip_dev) + + if self.ip_tunnel_dev is not None: + cmd.append('dev') + cmd.append(self.ip_tunnel_dev) + + params = {'ip-tunnel.local': self.ip_tunnel_local, + 'ip-tunnel.remote': self.ip_tunnel_remote, + 'autoconnect': self.bool_to_string(self.autoconnect) + } + for k, v in params.items(): + cmd.extend([k, v]) + + return cmd + + def modify_connection_sit(self): + cmd = [self.nmcli_bin, 'con', 'mod'] + + if self.conn_name is not None: + cmd.append(self.conn_name) + elif self.ifname is not None: + cmd.append(self.ifname) + elif self.ip_tunnel_dev is not None: + cmd.append('sit%s' % self.ip_tunnel_dev) + + params = {'ip-tunnel.local': self.ip_tunnel_local, + 'ip-tunnel.remote': self.ip_tunnel_remote, + 'autoconnect': self.bool_to_string(self.autoconnect) + } + for k, v in params.items(): + cmd.extend([k, v]) + return cmd + + def create_connection(self): + cmd = [] + if self.type == 'team': + if (self.dns4 is not None) or (self.dns6 is not None): + cmd = self.create_connection_team() + self.execute_command(cmd) + cmd = self.modify_connection_team() + self.execute_command(cmd) + return self.up_connection() + elif (self.dns4 is None) or (self.dns6 is None): + cmd = self.create_connection_team() + elif self.type == 'team-slave': + if self.mtu is not None: + cmd = self.create_connection_team_slave() + self.execute_command(cmd) + cmd = self.modify_connection_team_slave() + return self.execute_command(cmd) + else: + cmd = self.create_connection_team_slave() + elif self.type == 'bond': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd = self.create_connection_bond() + self.execute_command(cmd) + cmd = self.modify_connection_bond() + self.execute_command(cmd) + return self.up_connection() + else: + cmd = self.create_connection_bond() + elif self.type == 'bond-slave': + cmd = self.create_connection_bond_slave() + elif self.type == 'ethernet': + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + cmd = self.create_connection_ethernet() + self.execute_command(cmd) + cmd = self.modify_connection_ethernet() + self.execute_command(cmd) + return self.up_connection() + else: + cmd = self.create_connection_ethernet() + elif self.type == 'bridge': + cmd = self.create_connection_bridge() + elif self.type == 'bridge-slave': + cmd = self.create_connection_bridge_slave() + elif self.type == 'vlan': + cmd = self.create_connection_vlan() + elif self.type == 'vxlan': + cmd = self.create_connection_vxlan() + elif self.type == 'ipip': + cmd = self.create_connection_ipip() + elif self.type == 'sit': + cmd = self.create_connection_sit() + elif self.type == 'generic': + cmd = self.create_connection_ethernet(conn_type='generic') + + if cmd: + return self.execute_command(cmd) + else: + self.module.fail_json(msg="Type of device or network connection is required " + "while performing 'create' operation. Please specify 'type' as an argument.") + + def remove_connection(self): + # self.down_connection() + cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] + return self.execute_command(cmd) + + def modify_connection(self): + cmd = [] + if self.type == 'team': + cmd = self.modify_connection_team() + elif self.type == 'team-slave': + cmd = self.modify_connection_team_slave() + elif self.type == 'bond': + cmd = self.modify_connection_bond() + elif self.type == 'bond-slave': + cmd = self.modify_connection_bond_slave() + elif self.type == 'ethernet': + cmd = self.modify_connection_ethernet() + elif self.type == 'bridge': + cmd = self.modify_connection_bridge() + elif self.type == 'bridge-slave': + cmd = self.modify_connection_bridge_slave() + elif self.type == 'vlan': + cmd = self.modify_connection_vlan() + elif self.type == 'vxlan': + cmd = self.modify_connection_vxlan() + elif self.type == 'ipip': + cmd = self.modify_connection_ipip() + elif self.type == 'sit': + cmd = self.modify_connection_sit() + elif self.type == 'generic': + cmd = self.modify_connection_ethernet(conn_type='generic') + if cmd: + return self.execute_command(cmd) + else: + self.module.fail_json(msg="Type of device or network connection is required " + "while performing 'modify' operation. Please specify 'type' as an argument.") + + +def main(): + # Parsing argument file + module = AnsibleModule( + argument_spec=dict( + autoconnect=dict(type='bool', default=True), + state=dict(type='str', required=True, choices=['absent', 'present']), + conn_name=dict(type='str', required=True), + master=dict(type='str'), + ifname=dict(type='str'), + type=dict(type='str', + choices=['bond', 'bond-slave', 'bridge', 'bridge-slave', 'ethernet', 'generic', 'ipip', 'sit', 'team', 'team-slave', 'vlan', 'vxlan']), + ip4=dict(type='str'), + gw4=dict(type='str'), + dns4=dict(type='list'), + dns4_search=dict(type='list'), + dhcp_client_id=dict(type='str'), + ip6=dict(type='str'), + gw6=dict(type='str'), + dns6=dict(type='list'), + dns6_search=dict(type='list'), + # Bond Specific vars + mode=dict(type='str', default='balance-rr', + choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), + miimon=dict(type='int'), + downdelay=dict(type='int'), + updelay=dict(type='int'), + arp_interval=dict(type='int'), + arp_ip_target=dict(type='str'), + primary=dict(type='str'), + # general usage + mtu=dict(type='int'), + mac=dict(type='str'), + # bridge specific vars + stp=dict(type='bool', default=True), + priority=dict(type='int', default=128), + slavepriority=dict(type='int', default=32), + forwarddelay=dict(type='int', default=15), + hellotime=dict(type='int', default=2), + maxage=dict(type='int', default=20), + ageingtime=dict(type='int', default=300), + hairpin=dict(type='bool', default=True), + path_cost=dict(type='int', default=100), + # vlan specific vars + vlanid=dict(type='int'), + vlandev=dict(type='str'), + flags=dict(type='str'), + ingress=dict(type='str'), + egress=dict(type='str'), + # vxlan specific vars + vxlan_id=dict(type='int'), + vxlan_local=dict(type='str'), + vxlan_remote=dict(type='str'), + # ip-tunnel specific vars + ip_tunnel_dev=dict(type='str'), + ip_tunnel_local=dict(type='str'), + ip_tunnel_remote=dict(type='str'), + ), + supports_check_mode=True, + ) + + if not HAVE_DBUS: + module.fail_json(msg=missing_required_lib('dbus'), exception=DBUS_IMP_ERR) + + if not HAVE_NM_CLIENT: + module.fail_json(msg=missing_required_lib('NetworkManager glib API'), exception=NM_CLIENT_IMP_ERR) + + nmcli = Nmcli(module) + + (rc, out, err) = (None, '', '') + result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} + + # check for issues + if nmcli.conn_name is None: + nmcli.module.fail_json(msg="Please specify a name for the connection") + # team-slave checks + if nmcli.type == 'team-slave' and nmcli.master is None: + nmcli.module.fail_json(msg="Please specify a name for the master") + if nmcli.type == 'team-slave' and nmcli.ifname is None: + nmcli.module.fail_json(msg="Please specify an interface name for the connection") + + if nmcli.state == 'absent': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nmcli.down_connection() + (rc, out, err) = nmcli.remove_connection() + if rc != 0: + module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'present': + if nmcli.connection_exists(): + # modify connection (note: this function is check mode aware) + # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) + result['Exists'] = 'Connections do exist so we are modifying them' + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nmcli.modify_connection() + if not nmcli.connection_exists(): + result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nmcli.create_connection() + if rc is not None and rc != 0: + module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/net_tools/nsupdate.py new file mode 100644 index 0000000000..3c9d17ac7c --- /dev/null +++ b/plugins/modules/net_tools/nsupdate.py @@ -0,0 +1,474 @@ +#!/usr/bin/python + +# (c) 2016, Marcin Skarbek +# (c) 2016, Andreas Olsson +# (c) 2017, Loic Blot +# +# This module was ported from https://github.com/mskarbek/ansible-nsupdate +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: nsupdate + +short_description: Manage DNS records. +description: + - Create, update and remove DNS records using DDNS updates +requirements: + - dnspython +author: "Loic Blot (@nerzhul)" +options: + state: + description: + - Manage DNS record. + choices: ['present', 'absent'] + default: 'present' + server: + description: + - Apply DNS modification on this server, specified by IPv4 or IPv6 address. + required: true + port: + description: + - Use this TCP port when connecting to C(server). + default: 53 + key_name: + description: + - Use TSIG key name to authenticate against DNS C(server) + key_secret: + description: + - Use TSIG key secret, associated with C(key_name), to authenticate against C(server) + key_algorithm: + description: + - Specify key algorithm used by C(key_secret). + choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', + 'hmac-sha512'] + default: 'hmac-md5' + zone: + description: + - DNS record will be modified on this C(zone). + - When omitted DNS will be queried to attempt finding the correct zone. + - Starting with Ansible 2.7 this parameter is optional. + record: + description: + - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). + required: true + type: + description: + - Sets the record type. + default: 'A' + ttl: + description: + - Sets the record TTL. + default: 3600 + value: + description: + - Sets the record value. + protocol: + description: + - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. + default: 'tcp' + choices: ['tcp', 'udp'] +''' + +EXAMPLES = ''' +- name: Add or modify ansible.example.org A to 192.168.1.1" + nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "ansible" + value: "192.168.1.1" + +- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3" + nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "ansible" + value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"] + +- name: Remove puppet.example.org CNAME + nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "puppet" + type: "CNAME" + state: absent + +- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org + nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + record: "1.1.168.192.in-addr.arpa." + type: "PTR" + value: "ansible.example.org." + state: present + +- name: Remove 1.1.168.192.in-addr.arpa. PTR + nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + record: "1.1.168.192.in-addr.arpa." + type: "PTR" + state: absent +''' + +RETURN = ''' +changed: + description: If module has modified record + returned: success + type: str +record: + description: DNS record + returned: success + type: str + sample: 'ansible' +ttl: + description: DNS record TTL + returned: success + type: int + sample: 86400 +type: + description: DNS record type + returned: success + type: str + sample: 'CNAME' +value: + description: DNS record value(s) + returned: success + type: list + sample: '192.168.1.1' +zone: + description: DNS record zone + returned: success + type: str + sample: 'example.org.' +dns_rc: + description: dnspython return code + returned: always + type: int + sample: 4 +dns_rc_str: + description: dnspython return code (string representation) + returned: always + type: str + sample: 'REFUSED' +''' + +import traceback + +from binascii import Error as binascii_error +from socket import error as socket_error + +DNSPYTHON_IMP_ERR = None +try: + import dns.update + import dns.query + import dns.tsigkeyring + import dns.message + import dns.resolver + + HAVE_DNSPYTHON = True +except ImportError: + DNSPYTHON_IMP_ERR = traceback.format_exc() + HAVE_DNSPYTHON = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class RecordManager(object): + def __init__(self, module): + self.module = module + + if module.params['key_name']: + try: + self.keyring = dns.tsigkeyring.from_text({ + module.params['key_name']: module.params['key_secret'] + }) + except TypeError: + module.fail_json(msg='Missing key_secret') + except binascii_error as e: + module.fail_json(msg='TSIG key error: %s' % to_native(e)) + else: + self.keyring = None + + if module.params['key_algorithm'] == 'hmac-md5': + self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT' + else: + self.algorithm = module.params['key_algorithm'] + + if module.params['zone'] is None: + if module.params['record'][-1] != '.': + self.module.fail_json(msg='record must be absolute when omitting zone parameter') + self.zone = self.lookup_zone() + else: + self.zone = module.params['zone'] + + if self.zone[-1] != '.': + self.zone += '.' + + if module.params['record'][-1] != '.': + self.fqdn = module.params['record'] + '.' + self.zone + else: + self.fqdn = module.params['record'] + + if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None: + self.value = list(map(self.txt_helper, self.module.params['value'])) + else: + self.value = self.module.params['value'] + + self.dns_rc = 0 + + def txt_helper(self, entry): + if entry[0] == '"' and entry[-1] == '"': + return entry + return '"{text}"'.format(text=entry) + + def lookup_zone(self): + name = dns.name.from_text(self.module.params['record']) + while True: + query = dns.message.make_query(name, dns.rdatatype.SOA) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]: + self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % ( + self.module.params['server'], self.module.params['record'])) + try: + zone = lookup.authority[0].name + if zone == name: + return zone.to_text() + except IndexError: + pass + try: + name = name.parent() + except dns.name.NoParent: + self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record'])) + + def __do_update(self, update): + response = None + try: + if self.module.params['protocol'] == 'tcp': + response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + return response + + def create_or_update_record(self): + result = {'changed': False, 'failed': False} + + exists = self.record_exists() + if exists in [0, 2]: + if self.module.check_mode: + self.module.exit_json(changed=True) + + if exists == 0: + self.dns_rc = self.create_record() + if self.dns_rc != 0: + result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc + + elif exists == 2: + self.dns_rc = self.modify_record() + if self.dns_rc != 0: + result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc + + if self.dns_rc != 0: + result['failed'] = True + else: + result['changed'] = True + + else: + result['changed'] = False + + return result + + def create_record(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + for entry in self.value: + try: + update.add(self.module.params['record'], + self.module.params['ttl'], + self.module.params['type'], + entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + + response = self.__do_update(update) + return dns.message.Message.rcode(response) + + def modify_record(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + update.delete(self.module.params['record'], self.module.params['type']) + for entry in self.value: + try: + update.add(self.module.params['record'], + self.module.params['ttl'], + self.module.params['type'], + entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + response = self.__do_update(update) + + return dns.message.Message.rcode(response) + + def remove_record(self): + result = {'changed': False, 'failed': False} + + if self.record_exists() == 0: + return result + + # Check mode and record exists, declared fake change. + if self.module.check_mode: + self.module.exit_json(changed=True) + + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + update.delete(self.module.params['record'], self.module.params['type']) + + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + + if self.dns_rc != 0: + result['failed'] = True + result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc + else: + result['changed'] = True + + return result + + def record_exists(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + try: + update.present(self.module.params['record'], self.module.params['type']) + except dns.rdatatype.UnknownRdatatype as e: + self.module.fail_json(msg='Record error: {0}'.format(to_native(e))) + + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + if self.dns_rc == 0: + if self.module.params['state'] == 'absent': + return 1 + for entry in self.value: + try: + update.present(self.module.params['record'], self.module.params['type'], entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + if self.dns_rc == 0: + if self.ttl_changed(): + return 2 + else: + return 1 + else: + return 2 + else: + return 0 + + def ttl_changed(self): + query = dns.message.make_query(self.fqdn, self.module.params['type']) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + + if lookup.rcode() != dns.rcode.NOERROR: + self.module.fail_json(msg='Failed to lookup TTL of existing matching record.') + + current_ttl = lookup.answer[0].ttl + return current_ttl != self.module.params['ttl'] + + +def main(): + tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', + 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + server=dict(required=True, type='str'), + port=dict(required=False, default=53, type='int'), + key_name=dict(required=False, type='str'), + key_secret=dict(required=False, type='str', no_log=True), + key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'), + zone=dict(required=False, default=None, type='str'), + record=dict(required=True, type='str'), + type=dict(required=False, default='A', type='str'), + ttl=dict(required=False, default=3600, type='int'), + value=dict(required=False, default=None, type='list'), + protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str') + ), + supports_check_mode=True + ) + + if not HAVE_DNSPYTHON: + module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR) + + if len(module.params["record"]) == 0: + module.fail_json(msg='record cannot be empty.') + + record = RecordManager(module) + result = {} + if module.params["state"] == 'absent': + result = record.remove_record() + elif module.params["state"] == 'present': + result = record.create_or_update_record() + + result['dns_rc'] = record.dns_rc + result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc) + if result['failed']: + module.fail_json(**result) + else: + result['record'] = dict(zone=record.zone, + record=module.params['record'], + type=module.params['type'], + ttl=module.params['ttl'], + value=record.value) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/omapi_host.py b/plugins/modules/net_tools/omapi_host.py new file mode 100644 index 0000000000..d5607d4d12 --- /dev/null +++ b/plugins/modules/net_tools/omapi_host.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# copyright: (c) 2016, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: omapi_host +short_description: Setup OMAPI hosts. +description: Manage OMAPI hosts into compatible DHCPd servers +requirements: + - pypureomapi +author: +- Loic Blot (@nerzhul) +options: + state: + description: + - Create or remove OMAPI host. + type: str + required: true + choices: [ absent, present ] + hostname: + description: + - Sets the host lease hostname (mandatory if state=present). + type: str + aliases: [ name ] + host: + description: + - Sets OMAPI server host to interact with. + type: str + default: localhost + port: + description: + - Sets the OMAPI server port to interact with. + type: int + default: 7911 + key_name: + description: + - Sets the TSIG key name for authenticating against OMAPI server. + type: str + required: true + key: + description: + - Sets the TSIG key content for authenticating against OMAPI server. + type: str + required: true + macaddr: + description: + - Sets the lease host MAC address. + type: str + required: true + ip: + description: + - Sets the lease host IP address. + type: str + statements: + description: + - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). + type: list + default: [] + ddns: + description: + - Enable dynamic DNS updates for this host. + type: bool + default: no + +''' +EXAMPLES = r''' +- name: Add a host using OMAPI + omapi_host: + key_name: defomapi + key: +bFQtBCta6j2vWkjPkNFtgA== + host: 10.98.4.55 + macaddr: 44:dd:ab:dd:11:44 + name: server01 + ip: 192.168.88.99 + ddns: yes + statements: + - filename "pxelinux.0" + - next-server 1.1.1.1 + state: present + +- name: Remove a host using OMAPI + omapi_host: + key_name: defomapi + key: +bFQtBCta6j2vWkjPkNFtgA== + host: 10.1.1.1 + macaddr: 00:66:ab:dd:11:44 + state: absent +''' + +RETURN = r''' +lease: + description: dictionary containing host information + returned: success + type: complex + contains: + ip-address: + description: IP address, if there is. + returned: success + type: str + sample: '192.168.1.5' + hardware-address: + description: MAC address + returned: success + type: str + sample: '00:11:22:33:44:55' + hardware-type: + description: hardware type, generally '1' + returned: success + type: int + sample: 1 + name: + description: hostname + returned: success + type: str + sample: 'mydesktop' +''' + +import binascii +import socket +import struct +import traceback + +PUREOMAPI_IMP_ERR = None +try: + from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound + from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac + from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE + pureomapi_found = True +except ImportError: + PUREOMAPI_IMP_ERR = traceback.format_exc() + pureomapi_found = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_bytes, to_native + + +class OmapiHostManager: + def __init__(self, module): + self.module = module + self.omapi = None + self.connect() + + def connect(self): + try: + self.omapi = Omapi(self.module.params['host'], self.module.params['port'], self.module.params['key_name'], + self.module.params['key']) + except binascii.Error: + self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.") + except OmapiError as e: + self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' " + "are valid. Exception was: %s" % to_native(e)) + except socket.error as e: + self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e)) + + def get_host(self, macaddr): + msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict')) + msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr))) + msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1))) + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_UPDATE: + return None + return response + + @staticmethod + def unpack_facts(obj): + result = dict(obj) + if 'hardware-address' in result: + result['hardware-address'] = unpack_mac(result['hardware-address']) + + if 'ip-address' in result: + result['ip-address'] = unpack_ip(result['ip-address']) + + if 'hardware-type' in result: + result['hardware-type'] = struct.unpack("!I", result['hardware-type']) + + return result + + def setup_host(self): + if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0: + self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.") + + msg = None + host_response = self.get_host(self.module.params['macaddr']) + # If host was not found using macaddr, add create message + if host_response is None: + msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict')) + msg.message.append(('create', struct.pack('!I', 1))) + msg.message.append(('exclusive', struct.pack('!I', 1))) + msg.obj.append(('hardware-address', pack_mac(self.module.params['macaddr']))) + msg.obj.append(('hardware-type', struct.pack('!I', 1))) + msg.obj.append(('name', self.module.params['hostname'])) + if self.module.params['ip'] is not None: + msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip']))) + + stmt_join = "" + if self.module.params['ddns']: + stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname']) + + try: + if len(self.module.params['statements']) > 0: + stmt_join += "; ".join(self.module.params['statements']) + stmt_join += "; " + except TypeError as e: + self.module.fail_json(msg="Invalid statements found: %s" % to_native(e)) + + if len(stmt_join) > 0: + msg.obj.append(('statements', stmt_join)) + + try: + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_UPDATE: + self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters " + "are valid.") + self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj)) + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + # Forge update message + else: + response_obj = self.unpack_facts(host_response.obj) + fields_to_update = {} + + if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \ + unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']: + fields_to_update['ip-address'] = pack_ip(self.module.params['ip']) + + # Name cannot be changed + if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']: + self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. " + "Please delete host and add new." % + (response_obj['name'], self.module.params['hostname'])) + + """ + # It seems statements are not returned by OMAPI, then we cannot modify them at this moment. + if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \ + response_obj['statements'] != self.module.params['statements']: + with open('/tmp/omapi', 'w') as fb: + for (k,v) in iteritems(response_obj): + fb.writelines('statements: %s %s\n' % (k, v)) + """ + if len(fields_to_update) == 0: + self.module.exit_json(changed=False, lease=response_obj) + else: + msg = OmapiMessage.update(host_response.handle) + msg.update_object(fields_to_update) + + try: + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_STATUS: + self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters " + "are valid.") + self.module.exit_json(changed=True) + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + + def remove_host(self): + try: + self.omapi.del_host(self.module.params['macaddr']) + self.module.exit_json(changed=True) + except OmapiErrorNotFound: + self.module.exit_json() + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', required=True, choices=['absent', 'present']), + host=dict(type='str', default="localhost"), + port=dict(type='int', default=7911), + key_name=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=True), + macaddr=dict(type='str', required=True), + hostname=dict(type='str', aliases=['name']), + ip=dict(type='str'), + ddns=dict(type='bool', default=False), + statements=dict(type='list', default=[]), + ), + supports_check_mode=False, + ) + + if not pureomapi_found: + module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR) + + if module.params['key'] is None or len(module.params["key"]) == 0: + module.fail_json(msg="'key' parameter cannot be empty.") + + if module.params['key_name'] is None or len(module.params["key_name"]) == 0: + module.fail_json(msg="'key_name' parameter cannot be empty.") + + host_manager = OmapiHostManager(module) + try: + if module.params['state'] == 'present': + host_manager.setup_host() + elif module.params['state'] == 'absent': + host_manager.remove_host() + except ValueError as e: + module.fail_json(msg="OMAPI input value error: %s" % to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/net_tools/snmp_facts.py new file mode 100644 index 0000000000..78fcbce3f3 --- /dev/null +++ b/plugins/modules/net_tools/snmp_facts.py @@ -0,0 +1,464 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Networklore's snmp library for Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: snmp_facts +author: +- Patrick Ogenstad (@ogenstad) +short_description: Retrieve facts for a device using SNMP +description: + - Retrieve facts for a device using SNMP, the facts will be + inserted to the ansible_facts key. +requirements: + - pysnmp +options: + host: + description: + - Set to target snmp server (normally C({{ inventory_hostname }})). + type: str + required: true + version: + description: + - SNMP Version to use, v2/v2c or v3. + type: str + required: true + choices: [ v2, v2c, v3 ] + community: + description: + - The SNMP community string, required if version is v2/v2c. + type: str + level: + description: + - Authentication level. + - Required if version is v3. + type: str + choices: [ authNoPriv, authPriv ] + username: + description: + - Username for SNMPv3. + - Required if version is v3. + type: str + integrity: + description: + - Hashing algorithm. + - Required if version is v3. + type: str + choices: [ md5, sha ] + authkey: + description: + - Authentication key. + - Required if version is v3. + type: str + privacy: + description: + - Encryption algorithm. + - Required if level is authPriv. + type: str + choices: [ aes, des ] + privkey: + description: + - Encryption key. + - Required if version is authPriv. + type: str +''' + +EXAMPLES = r''' +- name: Gather facts with SNMP version 2 + snmp_facts: + host: '{{ inventory_hostname }}' + version: v2c + community: public + delegate_to: local + +- name: Gather facts using SNMP version 3 + snmp_facts: + host: '{{ inventory_hostname }}' + version: v3 + level: authPriv + integrity: sha + privacy: aes + username: snmp-user + authkey: abc12345 + privkey: def6789 + delegate_to: localhost +''' + +RETURN = r''' +ansible_sysdescr: + description: A textual description of the entity. + returned: success + type: str + sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64 +ansible_sysobjectid: + description: The vendor's authoritative identification of the network management subsystem contained in the entity. + returned: success + type: str + sample: 1.3.6.1.4.1.8072.3.2.10 +ansible_sysuptime: + description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized. + returned: success + type: int + sample: 42388 +ansible_syscontact: + description: The textual identification of the contact person for this managed node, together with information on how to contact this person. + returned: success + type: str + sample: Me +ansible_sysname: + description: An administratively-assigned name for this managed node. + returned: success + type: str + sample: ubuntu-user +ansible_syslocation: + description: The physical location of this node (e.g., `telephone closet, 3rd floor'). + returned: success + type: str + sample: Sitting on the Dock of the Bay +ansible_all_ipv4_addresses: + description: List of all IPv4 addresses. + returned: success + type: list + sample: ["127.0.0.1", "172.17.0.1"] +ansible_interfaces: + description: Dictionary of each network interface and its metadata. + returned: success + type: dict + sample: { + "1": { + "adminstatus": "up", + "description": "", + "ifindex": "1", + "ipv4": [ + { + "address": "127.0.0.1", + "netmask": "255.0.0.0" + } + ], + "mac": "", + "mtu": "65536", + "name": "lo", + "operstatus": "up", + "speed": "65536" + }, + "2": { + "adminstatus": "up", + "description": "", + "ifindex": "2", + "ipv4": [ + { + "address": "192.168.213.128", + "netmask": "255.255.255.0" + } + ], + "mac": "000a305a52a1", + "mtu": "1500", + "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", + "operstatus": "up", + "speed": "1500" + } + } +''' + +import binascii +import traceback +from collections import defaultdict + +PYSNMP_IMP_ERR = None +try: + from pysnmp.entity.rfc3413.oneliner import cmdgen + has_pysnmp = True +except Exception: + PYSNMP_IMP_ERR = traceback.format_exc() + has_pysnmp = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_text + + +class DefineOid(object): + + def __init__(self, dotprefix=False): + if dotprefix: + dp = "." + else: + dp = "" + + # From SNMPv2-MIB + self.sysDescr = dp + "1.3.6.1.2.1.1.1.0" + self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0" + self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0" + self.sysContact = dp + "1.3.6.1.2.1.1.4.0" + self.sysName = dp + "1.3.6.1.2.1.1.5.0" + self.sysLocation = dp + "1.3.6.1.2.1.1.6.0" + + # From IF-MIB + self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1" + self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2" + self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4" + self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5" + self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6" + self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7" + self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8" + self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18" + + # From IP-MIB + self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1" + self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2" + self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3" + + +def decode_hex(hexstring): + + if len(hexstring) < 3: + return hexstring + if hexstring[:2] == "0x": + return to_text(binascii.unhexlify(hexstring[2:])) + else: + return hexstring + + +def decode_mac(hexstring): + + if len(hexstring) != 14: + return hexstring + if hexstring[:2] == "0x": + return hexstring[2:] + else: + return hexstring + + +def lookup_adminstatus(int_adminstatus): + adminstatus_options = { + 1: 'up', + 2: 'down', + 3: 'testing' + } + if int_adminstatus in adminstatus_options: + return adminstatus_options[int_adminstatus] + else: + return "" + + +def lookup_operstatus(int_operstatus): + operstatus_options = { + 1: 'up', + 2: 'down', + 3: 'testing', + 4: 'unknown', + 5: 'dormant', + 6: 'notPresent', + 7: 'lowerLayerDown' + } + if int_operstatus in operstatus_options: + return operstatus_options[int_operstatus] + else: + return "" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']), + community=dict(type='str'), + username=dict(type='str'), + level=dict(type='str', choices=['authNoPriv', 'authPriv']), + integrity=dict(type='str', choices=['md5', 'sha']), + privacy=dict(type='str', choices=['aes', 'des']), + authkey=dict(type='str'), + privkey=dict(type='str'), + ), + required_together=( + ['username', 'level', 'integrity', 'authkey'], + ['privacy', 'privkey'], + ), + supports_check_mode=False, + ) + + m_args = module.params + + if not has_pysnmp: + module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR) + + cmdGen = cmdgen.CommandGenerator() + + # Verify that we receive a community when using snmp v2 + if m_args['version'] == "v2" or m_args['version'] == "v2c": + if m_args['community'] is None: + module.fail_json(msg='Community not set when using snmp version 2') + + if m_args['version'] == "v3": + if m_args['username'] is None: + module.fail_json(msg='Username not set when using snmp version 3') + + if m_args['level'] == "authPriv" and m_args['privacy'] is None: + module.fail_json(msg='Privacy algorithm not set when using authPriv') + + if m_args['integrity'] == "sha": + integrity_proto = cmdgen.usmHMACSHAAuthProtocol + elif m_args['integrity'] == "md5": + integrity_proto = cmdgen.usmHMACMD5AuthProtocol + + if m_args['privacy'] == "aes": + privacy_proto = cmdgen.usmAesCfb128Protocol + elif m_args['privacy'] == "des": + privacy_proto = cmdgen.usmDESPrivProtocol + + # Use SNMP Version 2 + if m_args['version'] == "v2" or m_args['version'] == "v2c": + snmp_auth = cmdgen.CommunityData(m_args['community']) + + # Use SNMP Version 3 with authNoPriv + elif m_args['level'] == "authNoPriv": + snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto) + + # Use SNMP Version 3 with authPriv + else: + snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, + privProtocol=privacy_proto) + + # Use p to prefix OIDs with a dot for polling + p = DefineOid(dotprefix=True) + # Use v without a prefix to use with return values + v = DefineOid(dotprefix=False) + + def Tree(): + return defaultdict(Tree) + + results = Tree() + + errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.MibVariable(p.sysDescr,), + cmdgen.MibVariable(p.sysObjectId,), + cmdgen.MibVariable(p.sysUpTime,), + cmdgen.MibVariable(p.sysContact,), + cmdgen.MibVariable(p.sysName,), + cmdgen.MibVariable(p.sysLocation,), + lookupMib=False + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication)) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if current_oid == v.sysDescr: + results['ansible_sysdescr'] = decode_hex(current_val) + elif current_oid == v.sysObjectId: + results['ansible_sysobjectid'] = current_val + elif current_oid == v.sysUpTime: + results['ansible_sysuptime'] = current_val + elif current_oid == v.sysContact: + results['ansible_syscontact'] = current_val + elif current_oid == v.sysName: + results['ansible_sysname'] = current_val + elif current_oid == v.sysLocation: + results['ansible_syslocation'] = current_val + + errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.MibVariable(p.ifIndex,), + cmdgen.MibVariable(p.ifDescr,), + cmdgen.MibVariable(p.ifMtu,), + cmdgen.MibVariable(p.ifSpeed,), + cmdgen.MibVariable(p.ifPhysAddress,), + cmdgen.MibVariable(p.ifAdminStatus,), + cmdgen.MibVariable(p.ifOperStatus,), + cmdgen.MibVariable(p.ipAdEntAddr,), + cmdgen.MibVariable(p.ipAdEntIfIndex,), + cmdgen.MibVariable(p.ipAdEntNetMask,), + + cmdgen.MibVariable(p.ifAlias,), + lookupMib=False + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication)) + + interface_indexes = [] + + all_ipv4_addresses = [] + ipv4_networks = Tree() + + for varBinds in varTable: + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if v.ifIndex in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['ifindex'] = current_val + interface_indexes.append(ifIndex) + if v.ifDescr in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['name'] = current_val + if v.ifMtu in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['mtu'] = current_val + if v.ifSpeed in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['speed'] = current_val + if v.ifPhysAddress in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val) + if v.ifAdminStatus in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val)) + if v.ifOperStatus in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val)) + if v.ipAdEntAddr in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['address'] = current_val + all_ipv4_addresses.append(current_val) + if v.ipAdEntIfIndex in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['interface'] = current_val + if v.ipAdEntNetMask in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['netmask'] = current_val + + if v.ifAlias in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['description'] = current_val + + interface_to_ipv4 = {} + for ipv4_network in ipv4_networks: + current_interface = ipv4_networks[ipv4_network]['interface'] + current_network = { + 'address': ipv4_networks[ipv4_network]['address'], + 'netmask': ipv4_networks[ipv4_network]['netmask'] + } + if current_interface not in interface_to_ipv4: + interface_to_ipv4[current_interface] = [] + interface_to_ipv4[current_interface].append(current_network) + else: + interface_to_ipv4[current_interface].append(current_network) + + for interface in interface_to_ipv4: + results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] + + results['ansible_all_ipv4_addresses'] = all_ipv4_addresses + + module.exit_json(ansible_facts=results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/a10/a10_server.py b/plugins/modules/network/a10/a10_server.py new file mode 100644 index 0000000000..be24f32bd8 --- /dev/null +++ b/plugins/modules/network/a10/a10_server.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Mischa Peters , +# (c) 2016, Eric Chou +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: a10_server +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' server object. +description: + - Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv2. +author: + - Eric Chou (@ericchou1) + - Mischa Peters (@mischapeters) +notes: + - Requires A10 Networks aXAPI 2.1. +extends_documentation_fragment: +- community.general.a10 +- url + +options: + partition: + description: + - set active-partition + server_name: + description: + - The SLB (Server Load Balancer) server name. + required: true + aliases: ['server'] + server_ip: + description: + - The SLB server IPv4 address. + aliases: ['ip', 'address'] + server_status: + description: + - The SLB virtual server status. + default: enabled + aliases: ['status'] + choices: ['enabled', 'disabled'] + server_ports: + description: + - A list of ports to create for the server. Each list item should be a + dictionary which specifies the C(port:) and C(protocol:), but can also optionally + specify the C(status:). See the examples below for details. This parameter is + required when C(state) is C(present). + aliases: ['port'] + state: + description: + - This is to specify the operation to create, update or remove SLB server. + default: present + choices: ['present', 'absent'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + type: bool + default: 'yes' + +''' + +EXAMPLES = ''' +# Create a new server +- a10_server: + host: a10.mydomain.com + username: myadmin + password: mypassword + partition: mypartition + server: test + server_ip: 1.1.1.100 + server_ports: + - port_num: 8080 + protocol: tcp + - port_num: 8443 + protocol: TCP + +''' + +RETURN = ''' +content: + description: the full info regarding the slb_server + returned: success + type: str + sample: "mynewserver" +''' +import json + +from ansible_collections.community.general.plugins.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_get_port_protocol, + axapi_enabled_disabled, AXAPI_PORT_PROTOCOLS) +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec + + +VALID_PORT_FIELDS = ['port_num', 'protocol', 'status'] + + +def validate_ports(module, ports): + for item in ports: + for key in item: + if key not in VALID_PORT_FIELDS: + module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) + + # validate the port number is present and an integer + if 'port_num' in item: + try: + item['port_num'] = int(item['port_num']) + except Exception: + module.fail_json(msg="port_num entries in the port definitions must be integers") + else: + module.fail_json(msg="port definitions must define the port_num field") + + # validate the port protocol is present, and convert it to + # the internal API integer value (and validate it) + if 'protocol' in item: + protocol = axapi_get_port_protocol(item['protocol']) + if not protocol: + module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS)) + else: + item['protocol'] = protocol + else: + module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS)) + + # convert the status to the internal API integer value + if 'status' in item: + item['status'] = axapi_enabled_disabled(item['status']) + else: + item['status'] = 1 + + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + server_name=dict(type='str', aliases=['server'], required=True), + server_ip=dict(type='str', aliases=['ip', 'address']), + server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), + server_ports=dict(type='list', aliases=['port'], default=[]), + partition=dict(type='str', default=[]), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + partition = module.params['partition'] + username = module.params['username'] + password = module.params['password'] + state = module.params['state'] + write_config = module.params['write_config'] + slb_server = module.params['server_name'] + slb_server_ip = module.params['server_ip'] + slb_server_status = module.params['server_status'] + slb_server_ports = module.params['server_ports'] + + if slb_server is None: + module.fail_json(msg='server_name is required') + + axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host + session_url = axapi_authenticate(module, axapi_base_url, username, password) + + # validate the ports data structure + validate_ports(module, slb_server_ports) + + json_post = { + 'server': { + 'name': slb_server, + } + } + + # add optional module parameters + if slb_server_ip: + json_post['server']['host'] = slb_server_ip + + if slb_server_ports: + json_post['server']['port_list'] = slb_server_ports + + if slb_server_status: + json_post['server']['status'] = axapi_enabled_disabled(slb_server_status) + + axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition})) + + slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) + slb_server_exists = not axapi_failure(slb_server_data) + + changed = False + if state == 'present': + if not slb_server_exists: + if not slb_server_ip: + module.fail_json(msg='you must specify an IP address when creating a server') + + result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) + changed = True + else: + def port_needs_update(src_ports, dst_ports): + ''' + Checks to determine if the port definitions of the src_ports + array are in or different from those in dst_ports. If there is + a difference, this function returns true, otherwise false. + ''' + for src_port in src_ports: + found = False + different = False + for dst_port in dst_ports: + if src_port['port_num'] == dst_port['port_num']: + found = True + for valid_field in VALID_PORT_FIELDS: + if src_port[valid_field] != dst_port[valid_field]: + different = True + break + if found or different: + break + if not found or different: + return True + # every port from the src exists in the dst, and none of them were different + return False + + def status_needs_update(current_status, new_status): + ''' + Check to determine if we want to change the status of a server. + If there is a difference between the current status of the server and + the desired status, return true, otherwise false. + ''' + if current_status != new_status: + return True + return False + + defined_ports = slb_server_data.get('server', {}).get('port_list', []) + current_status = slb_server_data.get('server', {}).get('status') + + # we check for a needed update several ways + # - in case ports are missing from the ones specified by the user + # - in case ports are missing from those on the device + # - in case we are change the status of a server + if (port_needs_update(defined_ports, slb_server_ports) or + port_needs_update(slb_server_ports, defined_ports) or + status_needs_update(current_status, axapi_enabled_disabled(slb_server_status))): + result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) + changed = True + + # if we changed things, get the full info regarding + # the service group for the return data below + if changed: + result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) + else: + result = slb_server_data + elif state == 'absent': + if slb_server_exists: + result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) + changed = True + else: + result = dict(msg="the server was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call(module, session_url + '&method=system.action.write_memory') + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out of the session nicely and exit + axapi_call(module, session_url + '&method=session.close') + module.exit_json(changed=changed, content=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/a10/a10_server_axapi3.py b/plugins/modules/network/a10/a10_server_axapi3.py new file mode 100644 index 0000000000..cf2eae0614 --- /dev/null +++ b/plugins/modules/network/a10/a10_server_axapi3.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Mischa Peters +# Copyright: (c) 2016, Eric Chou +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: a10_server_axapi3 +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices +description: + - Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv3. +author: + - Eric Chou (@ericchou1) +extends_documentation_fragment: +- community.general.a10 +- url + +options: + server_name: + description: + - The SLB (Server Load Balancer) server name. + required: true + aliases: ['server'] + server_ip: + description: + - The SLB (Server Load Balancer) server IPv4 address. + required: true + aliases: ['ip', 'address'] + server_status: + description: + - The SLB (Server Load Balancer) virtual server status. + default: enable + aliases: ['action'] + choices: ['enable', 'disable'] + server_ports: + description: + - A list of ports to create for the server. Each list item should be a dictionary which specifies the C(port:) + and C(protocol:). + aliases: ['port'] + operation: + description: + - Create, Update or Remove SLB server. For create and update operation, we use the IP address and server + name specified in the POST message. For delete operation, we use the server name in the request URI. + default: create + choices: ['create', 'update', 'remove'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + type: bool + default: 'yes' + +''' + +RETURN = ''' +# +''' + +EXAMPLES = ''' +# Create a new server +- a10_server: + host: a10.mydomain.com + username: myadmin + password: mypassword + server: test + server_ip: 1.1.1.100 + validate_certs: false + server_status: enable + write_config: yes + operation: create + server_ports: + - port-number: 8080 + protocol: tcp + action: enable + - port-number: 8443 + protocol: TCP + +''' +import json + +from ansible_collections.community.general.plugins.module_utils.network.a10.a10 import axapi_call_v3, a10_argument_spec, axapi_authenticate_v3, axapi_failure +from ansible_collections.community.general.plugins.module_utils.network.a10.a10 import AXAPI_PORT_PROTOCOLS +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec + + +VALID_PORT_FIELDS = ['port-number', 'protocol', 'action'] + + +def validate_ports(module, ports): + for item in ports: + for key in item: + if key not in VALID_PORT_FIELDS: + module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) + + # validate the port number is present and an integer + if 'port-number' in item: + try: + item['port-number'] = int(item['port-number']) + except Exception: + module.fail_json(msg="port-number entries in the port definitions must be integers") + else: + module.fail_json(msg="port definitions must define the port-number field") + + # validate the port protocol is present, no need to convert to the internal API integer value in v3 + if 'protocol' in item: + protocol = item['protocol'] + if not protocol: + module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS)) + else: + item['protocol'] = protocol + else: + module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS)) + + # 'status' is 'action' in AXAPIv3 + # no need to convert the status, a.k.a action, to the internal API integer value in v3 + # action is either enabled or disabled + if 'action' in item: + action = item['action'] + if action not in ['enable', 'disable']: + module.fail_json(msg="server action must be enable or disable") + else: + item['action'] = 'enable' + + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + operation=dict(type='str', default='create', choices=['create', 'update', 'delete']), + server_name=dict(type='str', aliases=['server'], required=True), + server_ip=dict(type='str', aliases=['ip', 'address'], required=True), + server_status=dict(type='str', default='enable', aliases=['action'], choices=['enable', 'disable']), + server_ports=dict(type='list', aliases=['port'], default=[]), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + username = module.params['username'] + password = module.params['password'] + operation = module.params['operation'] + write_config = module.params['write_config'] + slb_server = module.params['server_name'] + slb_server_ip = module.params['server_ip'] + slb_server_status = module.params['server_status'] + slb_server_ports = module.params['server_ports'] + + axapi_base_url = 'https://{0}/axapi/v3/'.format(host) + axapi_auth_url = axapi_base_url + 'auth/' + signature = axapi_authenticate_v3(module, axapi_auth_url, username, password) + + # validate the ports data structure + validate_ports(module, slb_server_ports) + + json_post = { + "server-list": [ + { + "name": slb_server, + "host": slb_server_ip + } + ] + } + + # add optional module parameters + if slb_server_ports: + json_post['server-list'][0]['port-list'] = slb_server_ports + + if slb_server_status: + json_post['server-list'][0]['action'] = slb_server_status + + slb_server_data = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='GET', body='', signature=signature) + + # for empty slb server list + if axapi_failure(slb_server_data): + slb_server_exists = False + else: + slb_server_list = [server['name'] for server in slb_server_data['server-list']] + if slb_server in slb_server_list: + slb_server_exists = True + else: + slb_server_exists = False + + changed = False + if operation == 'create': + if slb_server_exists is False: + result = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='POST', body=json.dumps(json_post), signature=signature) + if axapi_failure(result): + module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) + changed = True + else: + module.fail_json(msg="server already exists, use state='update' instead") + changed = False + # if we changed things, get the full info regarding result + if changed: + result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='GET', body='', signature=signature) + else: + result = slb_server_data + elif operation == 'delete': + if slb_server_exists: + result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='DELETE', body='', signature=signature) + if axapi_failure(result): + module.fail_json(msg="failed to delete server: %s" % result['response']['err']['msg']) + changed = True + else: + result = dict(msg="the server was not present") + elif operation == 'update': + if slb_server_exists: + result = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='PUT', body=json.dumps(json_post), signature=signature) + if axapi_failure(result): + module.fail_json(msg="failed to update server: %s" % result['response']['err']['msg']) + changed = True + else: + result = dict(msg="the server was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call_v3(module, axapi_base_url + 'write/memory/', method='POST', body='', signature=signature) + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out gracefully and exit + axapi_call_v3(module, axapi_base_url + 'logoff/', method='POST', body='', signature=signature) + module.exit_json(changed=changed, content=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/a10/a10_service_group.py b/plugins/modules/network/a10/a10_service_group.py new file mode 100644 index 0000000000..bebfc01334 --- /dev/null +++ b/plugins/modules/network/a10/a10_service_group.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Mischa Peters , +# Eric Chou +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: a10_service_group +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' service groups. +description: + - Manage SLB (Server Load Balancing) service-group objects on A10 Networks devices via aXAPIv2. +author: + - Eric Chou (@ericchou1) + - Mischa Peters (@mischapeters) +notes: + - Requires A10 Networks aXAPI 2.1. + - When a server doesn't exist and is added to the service-group the server will be created. +extends_documentation_fragment: +- community.general.a10 +- url + +options: + state: + description: + - If the specified service group should exists. + default: present + choices: ['present', 'absent'] + partition: + description: + - set active-partition + service_group: + description: + - The SLB (Server Load Balancing) service-group name + required: true + aliases: ['service', 'pool', 'group'] + service_group_protocol: + description: + - The SLB service-group protocol of TCP or UDP. + default: tcp + aliases: ['proto', 'protocol'] + choices: ['tcp', 'udp'] + service_group_method: + description: + - The SLB service-group load balancing method, such as round-robin or weighted-rr. + default: round-robin + aliases: ['method'] + choices: + - 'round-robin' + - 'weighted-rr' + - 'least-connection' + - 'weighted-least-connection' + - 'service-least-connection' + - 'service-weighted-least-connection' + - 'fastest-response' + - 'least-request' + - 'round-robin-strict' + - 'src-ip-only-hash' + - 'src-ip-hash' + servers: + description: + - A list of servers to add to the service group. Each list item should be a + dictionary which specifies the C(server:) and C(port:), but can also optionally + specify the C(status:). See the examples below for details. + aliases: ['server', 'member'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + type: bool + default: 'yes' + +''' + +EXAMPLES = ''' +# Create a new service-group +- a10_service_group: + host: a10.mydomain.com + username: myadmin + password: mypassword + partition: mypartition + service_group: sg-80-tcp + servers: + - server: foo1.mydomain.com + port: 8080 + - server: foo2.mydomain.com + port: 8080 + - server: foo3.mydomain.com + port: 8080 + - server: foo4.mydomain.com + port: 8080 + status: disabled + +''' + +RETURN = ''' +content: + description: the full info regarding the slb_service_group + returned: success + type: str + sample: "mynewservicegroup" +''' +import json + +from ansible_collections.community.general.plugins.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled) +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec + + +VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method'] +VALID_SERVER_FIELDS = ['server', 'port', 'status'] + + +def validate_servers(module, servers): + for item in servers: + for key in item: + if key not in VALID_SERVER_FIELDS: + module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS))) + + # validate the server name is present + if 'server' not in item: + module.fail_json(msg="server definitions must define the server field") + + # validate the port number is present and an integer + if 'port' in item: + try: + item['port'] = int(item['port']) + except Exception: + module.fail_json(msg="server port definitions must be integers") + else: + module.fail_json(msg="server definitions must define the port field") + + # convert the status to the internal API integer value + if 'status' in item: + item['status'] = axapi_enabled_disabled(item['status']) + else: + item['status'] = 1 + + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True), + service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']), + service_group_method=dict(type='str', default='round-robin', + aliases=['method'], + choices=['round-robin', + 'weighted-rr', + 'least-connection', + 'weighted-least-connection', + 'service-least-connection', + 'service-weighted-least-connection', + 'fastest-response', + 'least-request', + 'round-robin-strict', + 'src-ip-only-hash', + 'src-ip-hash']), + servers=dict(type='list', aliases=['server', 'member'], default=[]), + partition=dict(type='str', default=[]), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + username = module.params['username'] + password = module.params['password'] + partition = module.params['partition'] + state = module.params['state'] + write_config = module.params['write_config'] + slb_service_group = module.params['service_group'] + slb_service_group_proto = module.params['service_group_protocol'] + slb_service_group_method = module.params['service_group_method'] + slb_servers = module.params['servers'] + + if slb_service_group is None: + module.fail_json(msg='service_group is required') + + axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json' + load_balancing_methods = {'round-robin': 0, + 'weighted-rr': 1, + 'least-connection': 2, + 'weighted-least-connection': 3, + 'service-least-connection': 4, + 'service-weighted-least-connection': 5, + 'fastest-response': 6, + 'least-request': 7, + 'round-robin-strict': 8, + 'src-ip-only-hash': 14, + 'src-ip-hash': 15} + + if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp': + protocol = 2 + else: + protocol = 3 + + # validate the server data list structure + validate_servers(module, slb_servers) + + json_post = { + 'service_group': { + 'name': slb_service_group, + 'protocol': protocol, + 'lb_method': load_balancing_methods[slb_service_group_method], + } + } + + # first we authenticate to get a session id + session_url = axapi_authenticate(module, axapi_base_url, username, password) + # then we select the active-partition + axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition})) + # then we check to see if the specified group exists + slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) + slb_service_group_exist = not axapi_failure(slb_result) + + changed = False + if state == 'present': + # before creating/updating we need to validate that servers + # defined in the servers list exist to prevent errors + checked_servers = [] + for server in slb_servers: + result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']})) + if axapi_failure(result): + module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server']) + checked_servers.append(server['server']) + + if not slb_service_group_exist: + result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg=result['response']['err']['msg']) + changed = True + else: + # check to see if the service group definition without the + # server members is different, and update that individually + # if it needs it + do_update = False + for field in VALID_SERVICE_GROUP_FIELDS: + if json_post['service_group'][field] != slb_result['service_group'][field]: + do_update = True + break + + if do_update: + result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg=result['response']['err']['msg']) + changed = True + + # next we pull the defined list of servers out of the returned + # results to make it a bit easier to iterate over + defined_servers = slb_result.get('service_group', {}).get('member_list', []) + + # next we add/update new member servers from the user-specified + # list if they're different or not on the target device + for server in slb_servers: + found = False + different = False + for def_server in defined_servers: + if server['server'] == def_server['server']: + found = True + for valid_field in VALID_SERVER_FIELDS: + if server[valid_field] != def_server[valid_field]: + different = True + break + if found or different: + break + # add or update as required + server_data = { + "name": slb_service_group, + "member": server, + } + if not found: + result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data)) + changed = True + elif different: + result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data)) + changed = True + + # finally, remove any servers that are on the target + # device but were not specified in the list given + for server in defined_servers: + found = False + for slb_server in slb_servers: + if server['server'] == slb_server['server']: + found = True + break + # remove if not found + server_data = { + "name": slb_service_group, + "member": server, + } + if not found: + result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data)) + changed = True + + # if we changed things, get the full info regarding + # the service group for the return data below + if changed: + result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) + else: + result = slb_result + elif state == 'absent': + if slb_service_group_exist: + result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group})) + changed = True + else: + result = dict(msg="the service group was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call(module, session_url + '&method=system.action.write_memory') + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out of the session nicely and exit + axapi_call(module, session_url + '&method=session.close') + module.exit_json(changed=changed, content=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/a10/a10_virtual_server.py b/plugins/modules/network/a10/a10_virtual_server.py new file mode 100644 index 0000000000..6e7b7829b1 --- /dev/null +++ b/plugins/modules/network/a10/a10_virtual_server.py @@ -0,0 +1,283 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Mischa Peters , +# Eric Chou +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: a10_virtual_server +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers. +description: + - Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2. +author: + - Eric Chou (@ericchou1) + - Mischa Peters (@mischapeters) +notes: + - Requires A10 Networks aXAPI 2.1. +extends_documentation_fragment: +- community.general.a10 +- url + +options: + state: + description: + - If the specified virtual server should exist. + choices: ['present', 'absent'] + default: present + partition: + description: + - set active-partition + virtual_server: + description: + - The SLB (Server Load Balancing) virtual server name. + required: true + aliases: ['vip', 'virtual'] + virtual_server_ip: + description: + - The SLB virtual server IPv4 address. + aliases: ['ip', 'address'] + virtual_server_status: + description: + - The SLB virtual server status, such as enabled or disabled. + default: enable + aliases: ['status'] + choices: ['enabled', 'disabled'] + virtual_server_ports: + description: + - A list of ports to create for the virtual server. Each list item should be a + dictionary which specifies the C(port:) and C(type:), but can also optionally + specify the C(service_group:) as well as the C(status:). See the examples + below for details. This parameter is required when C(state) is C(present). + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + type: bool + default: 'yes' + +''' + + +EXAMPLES = ''' +# Create a new virtual server +- a10_virtual_server: + host: a10.mydomain.com + username: myadmin + password: mypassword + partition: mypartition + virtual_server: vserver1 + virtual_server_ip: 1.1.1.1 + virtual_server_ports: + - port: 80 + protocol: TCP + service_group: sg-80-tcp + - port: 443 + protocol: HTTPS + service_group: sg-443-https + - port: 8080 + protocol: http + status: disabled + +''' + +RETURN = ''' +content: + description: the full info regarding the slb_virtual + returned: success + type: str + sample: "mynewvirtualserver" +''' +import json + +from ansible_collections.community.general.plugins.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, + axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS) +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec + + +VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status'] + + +def validate_ports(module, ports): + for item in ports: + for key in item: + if key not in VALID_PORT_FIELDS: + module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) + + # validate the port number is present and an integer + if 'port' in item: + try: + item['port'] = int(item['port']) + except Exception: + module.fail_json(msg="port definitions must be integers") + else: + module.fail_json(msg="port definitions must define the port field") + + # validate the port protocol is present, and convert it to + # the internal API integer value (and validate it) + if 'protocol' in item: + protocol = axapi_get_vport_protocol(item['protocol']) + if not protocol: + module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS)) + else: + item['protocol'] = protocol + else: + module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS)) + + # convert the status to the internal API integer value + if 'status' in item: + item['status'] = axapi_enabled_disabled(item['status']) + else: + item['status'] = 1 + + # ensure the service_group field is at least present + if 'service_group' not in item: + item['service_group'] = '' + + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True), + virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True), + virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), + virtual_server_ports=dict(type='list', required=True), + partition=dict(type='str', default=[]), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + username = module.params['username'] + password = module.params['password'] + partition = module.params['partition'] + state = module.params['state'] + write_config = module.params['write_config'] + slb_virtual = module.params['virtual_server'] + slb_virtual_ip = module.params['virtual_server_ip'] + slb_virtual_status = module.params['virtual_server_status'] + slb_virtual_ports = module.params['virtual_server_ports'] + + if slb_virtual is None: + module.fail_json(msg='virtual_server is required') + + validate_ports(module, slb_virtual_ports) + + axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host + session_url = axapi_authenticate(module, axapi_base_url, username, password) + + axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition})) + slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) + slb_virtual_exists = not axapi_failure(slb_virtual_data) + + changed = False + if state == 'present': + json_post = { + 'virtual_server': { + 'name': slb_virtual, + 'address': slb_virtual_ip, + 'status': axapi_enabled_disabled(slb_virtual_status), + 'vport_list': slb_virtual_ports, + } + } + + # before creating/updating we need to validate that any + # service groups defined in the ports list exist since + # since the API will still create port definitions for + # them while indicating a failure occurred + checked_service_groups = [] + for port in slb_virtual_ports: + if 'service_group' in port and port['service_group'] not in checked_service_groups: + # skip blank service group entries + if port['service_group'] == '': + continue + result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']})) + if axapi_failure(result): + module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group']) + checked_service_groups.append(port['service_group']) + + if not slb_virtual_exists: + result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) + changed = True + else: + def needs_update(src_ports, dst_ports): + ''' + Checks to determine if the port definitions of the src_ports + array are in or different from those in dst_ports. If there is + a difference, this function returns true, otherwise false. + ''' + for src_port in src_ports: + found = False + different = False + for dst_port in dst_ports: + if src_port['port'] == dst_port['port']: + found = True + for valid_field in VALID_PORT_FIELDS: + if src_port[valid_field] != dst_port[valid_field]: + different = True + break + if found or different: + break + if not found or different: + return True + # every port from the src exists in the dst, and none of them were different + return False + + defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', []) + + # we check for a needed update both ways, in case ports + # are missing from either the ones specified by the user + # or from those on the device + if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports): + result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) + changed = True + + # if we changed things, get the full info regarding + # the service group for the return data below + if changed: + result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) + else: + result = slb_virtual_data + elif state == 'absent': + if slb_virtual_exists: + result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual})) + changed = True + else: + result = dict(msg="the virtual server was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call(module, session_url + '&method=system.action.write_memory') + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out of the session nicely and exit + axapi_call(module, session_url + '&method=session.close') + module.exit_json(changed=changed, content=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/aci/aci_interface_policy_fc.py b/plugins/modules/network/aci/aci_interface_policy_fc.py new file mode 100644 index 0000000000..bf9128dabf --- /dev/null +++ b/plugins/modules/network/aci/aci_interface_policy_fc.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: aci_interface_policy_fc +short_description: Manage Fibre Channel interface policies (fc:IfPol) +description: +- Manage ACI Fiber Channel interface policies on Cisco ACI fabrics. +options: + fc_policy: + description: + - The name of the Fiber Channel interface policy. + type: str + required: yes + aliases: [ name ] + description: + description: + - The description of the Fiber Channel interface policy. + type: str + aliases: [ descr ] + port_mode: + description: + - The Port Mode to use. + - The APIC defaults to C(f) when unset during creation. + type: str + choices: [ f, np ] + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present + name_alias: + description: + - The alias for the current object. This relates to the nameAlias field in ACI. + type: str +extends_documentation_fragment: +- cisco.aci.aci + +seealso: +- name: APIC Management Information Model reference + description: More information about the internal APIC class B(fc:IfPol). + link: https://developer.cisco.com/docs/apic-mim-ref/ +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- aci_interface_policy_fc: + host: '{{ hostname }}' + username: '{{ username }}' + password: '{{ password }}' + fc_policy: '{{ fc_policy }}' + port_mode: '{{ port_mode }}' + description: '{{ description }}' + state: present + delegate_to: localhost +''' + +RETURN = r''' +current: + description: The existing configuration from the APIC after the module has finished + returned: success + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +error: + description: The error information as returned from the APIC + returned: failure + type: dict + sample: + { + "code": "122", + "text": "unknown managed object class foo" + } +raw: + description: The raw output returned by the APIC REST API (xml or json) + returned: parse error + type: str + sample: '' +sent: + description: The actual/minimal configuration pushed to the APIC + returned: info + type: list + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment" + } + } + } +previous: + description: The original configuration from the APIC before the module has started + returned: info + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +proposed: + description: The assembled configuration from the user-provided parameters + returned: info + type: dict + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "name": "production" + } + } + } +filter_string: + description: The filter string used for the request + returned: failure or debug + type: str + sample: ?rsp-prop-include=config-only +method: + description: The HTTP method used for the request to the APIC + returned: failure or debug + type: str + sample: POST +response: + description: The HTTP response from the APIC + returned: failure or debug + type: str + sample: OK (30 bytes) +status: + description: The HTTP status from the APIC + returned: failure or debug + type: int + sample: 200 +url: + description: The HTTP url used for the request to the APIC + returned: failure or debug + type: str + sample: https://10.11.12.13/api/mo/uni/tn-production.json +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.aci.plugins.module_utils.network.aci.aci import ACIModule, aci_argument_spec + + +def main(): + argument_spec = aci_argument_spec() + argument_spec.update( + fc_policy=dict(type='str', aliases=['name']), # Not required for querying all objects + description=dict(type='str', aliases=['descr']), + port_mode=dict(type='str', choices=['f', 'np']), # No default provided on purpose + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + name_alias=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['fc_policy']], + ['state', 'present', ['fc_policy']], + ], + ) + + fc_policy = module.params.get('fc_policy') + port_mode = module.params.get('port_mode') + description = module.params.get('description') + state = module.params.get('state') + name_alias = module.params.get('name_alias') + + aci = ACIModule(module) + aci.construct_url( + root_class=dict( + aci_class='fcIfPol', + aci_rn='infra/fcIfPol-{0}'.format(fc_policy), + module_object=fc_policy, + target_filter={'name': fc_policy}, + ), + ) + + aci.get_existing() + + if state == 'present': + aci.payload( + aci_class='fcIfPol', + class_config=dict( + name=fc_policy, + descr=description, + portMode=port_mode, + nameAlias=name_alias, + ), + ) + + aci.get_diff(aci_class='fcIfPol') + + aci.post_config() + + elif state == 'absent': + aci.delete_config() + + aci.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aci/aci_interface_policy_l2.py b/plugins/modules/network/aci/aci_interface_policy_l2.py new file mode 100644 index 0000000000..f5d57e682a --- /dev/null +++ b/plugins/modules/network/aci/aci_interface_policy_l2.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: aci_interface_policy_l2 +short_description: Manage Layer 2 interface policies (l2:IfPol) +description: +- Manage Layer 2 interface policies on Cisco ACI fabrics. +options: + l2_policy: + description: + - The name of the Layer 2 interface policy. + type: str + required: yes + aliases: [ name ] + description: + description: + - The description of the Layer 2 interface policy. + type: str + aliases: [ descr ] + qinq: + description: + - Determines if QinQ is disabled or if the port should be considered a core or edge port. + - The APIC defaults to C(disabled) when unset during creation. + type: str + choices: [ core, disabled, edge ] + vepa: + description: + - Determines if Virtual Ethernet Port Aggregator is disabled or enabled. + - The APIC defaults to C(no) when unset during creation. + type: bool + vlan_scope: + description: + - The scope of the VLAN. + - The APIC defaults to C(global) when unset during creation. + type: str + choices: [ global, portlocal ] + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present + name_alias: + description: + - The alias for the current object. This relates to the nameAlias field in ACI. + type: str +extends_documentation_fragment: +- cisco.aci.aci + +seealso: +- name: APIC Management Information Model reference + description: More information about the internal APIC class B(l2:IfPol). + link: https://developer.cisco.com/docs/apic-mim-ref/ +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- aci_interface_policy_l2: + host: '{{ hostname }}' + username: '{{ username }}' + password: '{{ password }}' + l2_policy: '{{ l2_policy }}' + vlan_scope: '{{ vlan_policy }}' + description: '{{ description }}' + delegate_to: localhost +''' + +RETURN = r''' +current: + description: The existing configuration from the APIC after the module has finished + returned: success + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +error: + description: The error information as returned from the APIC + returned: failure + type: dict + sample: + { + "code": "122", + "text": "unknown managed object class foo" + } +raw: + description: The raw output returned by the APIC REST API (xml or json) + returned: parse error + type: str + sample: '' +sent: + description: The actual/minimal configuration pushed to the APIC + returned: info + type: list + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment" + } + } + } +previous: + description: The original configuration from the APIC before the module has started + returned: info + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +proposed: + description: The assembled configuration from the user-provided parameters + returned: info + type: dict + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "name": "production" + } + } + } +filter_string: + description: The filter string used for the request + returned: failure or debug + type: str + sample: ?rsp-prop-include=config-only +method: + description: The HTTP method used for the request to the APIC + returned: failure or debug + type: str + sample: POST +response: + description: The HTTP response from the APIC + returned: failure or debug + type: str + sample: OK (30 bytes) +status: + description: The HTTP status from the APIC + returned: failure or debug + type: int + sample: 200 +url: + description: The HTTP url used for the request to the APIC + returned: failure or debug + type: str + sample: https://10.11.12.13/api/mo/uni/tn-production.json +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.aci.plugins.module_utils.network.aci.aci import ACIModule, aci_argument_spec + +# Mapping dicts are used to normalize the proposed data to what the APIC expects, which will keep diffs accurate +QINQ_MAPPING = dict( + core='corePort', + disabled='disabled', + edge='edgePort', +) + + +def main(): + argument_spec = aci_argument_spec() + argument_spec.update( + l2_policy=dict(type='str', aliases=['name']), # Not required for querying all policies + description=dict(type='str', aliases=['descr']), + vlan_scope=dict(type='str', choices=['global', 'portlocal']), # No default provided on purpose + qinq=dict(type='str', choices=['core', 'disabled', 'edge']), + vepa=dict(type='bool'), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + name_alias=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['l2_policy']], + ['state', 'present', ['l2_policy']], + ], + ) + + aci = ACIModule(module) + + l2_policy = module.params.get('l2_policy') + vlan_scope = module.params.get('vlan_scope') + qinq = module.params.get('qinq') + if qinq is not None: + qinq = QINQ_MAPPING.get(qinq) + vepa = aci.boolean(module.params.get('vepa'), 'enabled', 'disabled') + description = module.params.get('description') + state = module.params.get('state') + name_alias = module.params.get('name_alias') + + aci.construct_url( + root_class=dict( + aci_class='l2IfPol', + aci_rn='infra/l2IfP-{0}'.format(l2_policy), + module_object=l2_policy, + target_filter={'name': l2_policy}, + ), + ) + + aci.get_existing() + + if state == 'present': + aci.payload( + aci_class='l2IfPol', + class_config=dict( + name=l2_policy, + descr=description, + vlanScope=vlan_scope, + qinq=qinq, vepa=vepa, + nameAlias=name_alias, + ), + ) + + aci.get_diff(aci_class='l2IfPol') + + aci.post_config() + + elif state == 'absent': + aci.delete_config() + + aci.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aci/aci_interface_policy_lldp.py b/plugins/modules/network/aci/aci_interface_policy_lldp.py new file mode 100644 index 0000000000..bc5aff2637 --- /dev/null +++ b/plugins/modules/network/aci/aci_interface_policy_lldp.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: aci_interface_policy_lldp +short_description: Manage LLDP interface policies (lldp:IfPol) +description: +- Manage LLDP interface policies on Cisco ACI fabrics. +options: + lldp_policy: + description: + - The LLDP interface policy name. + type: str + required: yes + aliases: [ name ] + description: + description: + - The description for the LLDP interface policy name. + type: str + aliases: [ descr ] + receive_state: + description: + - Enable or disable Receive state. + - The APIC defaults to C(yes) when unset during creation. + type: bool + transmit_state: + description: + - Enable or Disable Transmit state. + - The APIC defaults to C(yes) when unset during creation. + type: bool + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present + name_alias: + description: + - The alias for the current object. This relates to the nameAlias field in ACI. + type: str +extends_documentation_fragment: +- cisco.aci.aci + +seealso: +- name: APIC Management Information Model reference + description: More information about the internal APIC class B(lldp:IfPol). + link: https://developer.cisco.com/docs/apic-mim-ref/ +author: +- Dag Wieers (@dagwieers) +''' + +# FIXME: Add more, better examples +EXAMPLES = r''' +- aci_interface_policy_lldp: + host: '{{ hostname }}' + username: '{{ username }}' + password: '{{ password }}' + lldp_policy: '{{ lldp_policy }}' + description: '{{ description }}' + receive_state: '{{ receive_state }}' + transmit_state: '{{ transmit_state }}' + delegate_to: localhost +''' + +RETURN = r''' +current: + description: The existing configuration from the APIC after the module has finished + returned: success + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +error: + description: The error information as returned from the APIC + returned: failure + type: dict + sample: + { + "code": "122", + "text": "unknown managed object class foo" + } +raw: + description: The raw output returned by the APIC REST API (xml or json) + returned: parse error + type: str + sample: '' +sent: + description: The actual/minimal configuration pushed to the APIC + returned: info + type: list + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment" + } + } + } +previous: + description: The original configuration from the APIC before the module has started + returned: info + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +proposed: + description: The assembled configuration from the user-provided parameters + returned: info + type: dict + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "name": "production" + } + } + } +filter_string: + description: The filter string used for the request + returned: failure or debug + type: str + sample: ?rsp-prop-include=config-only +method: + description: The HTTP method used for the request to the APIC + returned: failure or debug + type: str + sample: POST +response: + description: The HTTP response from the APIC + returned: failure or debug + type: str + sample: OK (30 bytes) +status: + description: The HTTP status from the APIC + returned: failure or debug + type: int + sample: 200 +url: + description: The HTTP url used for the request to the APIC + returned: failure or debug + type: str + sample: https://10.11.12.13/api/mo/uni/tn-production.json +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.aci.plugins.module_utils.network.aci.aci import ACIModule, aci_argument_spec + + +def main(): + argument_spec = aci_argument_spec() + argument_spec.update( + lldp_policy=dict(type='str', aliases=['name']), # Not required for querying all objects + description=dict(type='str', aliases=['descr']), + receive_state=dict(type='bool'), + transmit_state=dict(type='bool'), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + name_alias=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['lldp_policy']], + ['state', 'present', ['lldp_policy']], + ], + ) + + aci = ACIModule(module) + + lldp_policy = module.params.get('lldp_policy') + description = module.params.get('description') + receive_state = aci.boolean(module.params.get('receive_state'), 'enabled', 'disabled') + transmit_state = aci.boolean(module.params.get('transmit_state'), 'enabled', 'disabled') + state = module.params.get('state') + name_alias = module.params.get('name_alias') + + aci.construct_url( + root_class=dict( + aci_class='lldpIfPol', + aci_rn='infra/lldpIfP-{0}'.format(lldp_policy), + module_object=lldp_policy, + target_filter={'name': lldp_policy}, + ), + ) + + aci.get_existing() + + if state == 'present': + aci.payload( + aci_class='lldpIfPol', + class_config=dict( + name=lldp_policy, + descr=description, + adminRxSt=receive_state, + adminTxSt=transmit_state, + nameAlias=name_alias, + ), + ) + + aci.get_diff(aci_class='lldpIfPol') + + aci.post_config() + + elif state == 'absent': + aci.delete_config() + + aci.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aci/aci_interface_policy_mcp.py b/plugins/modules/network/aci/aci_interface_policy_mcp.py new file mode 100644 index 0000000000..221d4a437b --- /dev/null +++ b/plugins/modules/network/aci/aci_interface_policy_mcp.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: aci_interface_policy_mcp +short_description: Manage MCP interface policies (mcp:IfPol) +description: +- Manage MCP interface policies on Cisco ACI fabrics. +options: + mcp: + description: + - The name of the MCP interface. + type: str + required: yes + aliases: [ mcp_interface, name ] + description: + description: + - The description for the MCP interface. + type: str + aliases: [ descr ] + admin_state: + description: + - Enable or disable admin state. + - The APIC defaults to C(yes) when unset during creation. + type: bool + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present + name_alias: + description: + - The alias for the current object. This relates to the nameAlias field in ACI. + type: str +extends_documentation_fragment: +- cisco.aci.aci + +seealso: +- name: APIC Management Information Model reference + description: More information about the internal APIC class B(mcp:IfPol). + link: https://developer.cisco.com/docs/apic-mim-ref/ +author: +- Dag Wieers (@dagwieers) +''' + +# FIXME: Add more, better examples +EXAMPLES = r''' +- aci_interface_policy_mcp: + host: '{{ hostname }}' + username: '{{ username }}' + password: '{{ password }}' + mcp: '{{ mcp }}' + description: '{{ descr }}' + admin_state: '{{ admin_state }}' + delegate_to: localhost +''' + +RETURN = r''' +current: + description: The existing configuration from the APIC after the module has finished + returned: success + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +error: + description: The error information as returned from the APIC + returned: failure + type: dict + sample: + { + "code": "122", + "text": "unknown managed object class foo" + } +raw: + description: The raw output returned by the APIC REST API (xml or json) + returned: parse error + type: str + sample: '' +sent: + description: The actual/minimal configuration pushed to the APIC + returned: info + type: list + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment" + } + } + } +previous: + description: The original configuration from the APIC before the module has started + returned: info + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +proposed: + description: The assembled configuration from the user-provided parameters + returned: info + type: dict + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "name": "production" + } + } + } +filter_string: + description: The filter string used for the request + returned: failure or debug + type: str + sample: ?rsp-prop-include=config-only +method: + description: The HTTP method used for the request to the APIC + returned: failure or debug + type: str + sample: POST +response: + description: The HTTP response from the APIC + returned: failure or debug + type: str + sample: OK (30 bytes) +status: + description: The HTTP status from the APIC + returned: failure or debug + type: int + sample: 200 +url: + description: The HTTP url used for the request to the APIC + returned: failure or debug + type: str + sample: https://10.11.12.13/api/mo/uni/tn-production.json +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.aci.plugins.module_utils.network.aci.aci import ACIModule, aci_argument_spec + + +def main(): + argument_spec = aci_argument_spec() + argument_spec.update( + mcp=dict(type='str', aliases=['mcp_interface', 'name']), # Not required for querying all objects + description=dict(type='str', aliases=['descr']), + admin_state=dict(type='bool'), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + name_alias=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['mcp']], + ['state', 'present', ['mcp']], + ], + ) + + aci = ACIModule(module) + + mcp = module.params.get('mcp') + description = module.params.get('description') + admin_state = aci.boolean(module.params.get('admin_state'), 'enabled', 'disabled') + state = module.params.get('state') + name_alias = module.params.get('name_alias') + + aci.construct_url( + root_class=dict( + aci_class='mcpIfPol', + aci_rn='infra/mcpIfP-{0}'.format(mcp), + module_object=mcp, + target_filter={'name': mcp}, + ), + ) + + aci.get_existing() + + if state == 'present': + aci.payload( + aci_class='mcpIfPol', + class_config=dict( + name=mcp, + descr=description, + adminSt=admin_state, + nameAlias=name_alias, + ), + ) + + aci.get_diff(aci_class='mcpIfPol') + + aci.post_config() + + elif state == 'absent': + aci.delete_config() + + aci.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aci/aci_interface_policy_port_channel.py b/plugins/modules/network/aci/aci_interface_policy_port_channel.py new file mode 100644 index 0000000000..e74269aebf --- /dev/null +++ b/plugins/modules/network/aci/aci_interface_policy_port_channel.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: aci_interface_policy_port_channel +short_description: Manage port channel interface policies (lacp:LagPol) +description: +- Manage port channel interface policies on Cisco ACI fabrics. +options: + port_channel: + description: + - Name of the port channel. + type: str + required: yes + aliases: [ name ] + description: + description: + - The description for the port channel. + type: str + aliases: [ descr ] + max_links: + description: + - Maximum links. + - Accepted values range between 1 and 16. + - The APIC defaults to C(16) when unset during creation. + type: int + min_links: + description: + - Minimum links. + - Accepted values range between 1 and 16. + - The APIC defaults to C(1) when unset during creation. + type: int + mode: + description: + - Port channel interface policy mode. + - Determines the LACP method to use for forming port-channels. + - The APIC defaults to C(off) when unset during creation. + type: str + choices: [ active, mac-pin, mac-pin-nicload, 'off', passive ] + fast_select: + description: + - Determines if Fast Select is enabled for Hot Standby Ports. + - This makes up the LACP Policy Control Policy; if one setting is defined, then all other Control Properties + left undefined or set to false will not exist after the task is ran. + - The APIC defaults to C(yes) when unset during creation. + type: bool + graceful_convergence: + description: + - Determines if Graceful Convergence is enabled. + - This makes up the LACP Policy Control Policy; if one setting is defined, then all other Control Properties + left undefined or set to false will not exist after the task is ran. + - The APIC defaults to C(yes) when unset during creation. + type: bool + load_defer: + description: + - Determines if Load Defer is enabled. + - This makes up the LACP Policy Control Policy; if one setting is defined, then all other Control Properties + left undefined or set to false will not exist after the task is ran. + - The APIC defaults to C(no) when unset during creation. + type: bool + suspend_individual: + description: + - Determines if Suspend Individual is enabled. + - This makes up the LACP Policy Control Policy; if one setting is defined, then all other Control Properties + left undefined or set to false will not exist after the task is ran. + - The APIC defaults to C(yes) when unset during creation. + type: bool + symmetric_hash: + description: + - Determines if Symmetric Hashing is enabled. + - This makes up the LACP Policy Control Policy; if one setting is defined, then all other Control Properties + left undefined or set to false will not exist after the task is ran. + - The APIC defaults to C(no) when unset during creation. + type: bool + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present + name_alias: + description: + - The alias for the current object. This relates to the nameAlias field in ACI. + type: str +extends_documentation_fragment: +- cisco.aci.aci + +seealso: +- name: APIC Management Information Model reference + description: More information about the internal APIC class B(lacp:LagPol). + link: https://developer.cisco.com/docs/apic-mim-ref/ +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- aci_interface_policy_port_channel: + host: '{{ inventory_hostname }}' + username: '{{ username }}' + password: '{{ password }}' + port_channel: '{{ port_channel }}' + description: '{{ description }}' + min_links: '{{ min_links }}' + max_links: '{{ max_links }}' + mode: '{{ mode }}' + delegate_to: localhost +''' + +RETURN = r''' +current: + description: The existing configuration from the APIC after the module has finished + returned: success + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +error: + description: The error information as returned from the APIC + returned: failure + type: dict + sample: + { + "code": "122", + "text": "unknown managed object class foo" + } +raw: + description: The raw output returned by the APIC REST API (xml or json) + returned: parse error + type: str + sample: '' +sent: + description: The actual/minimal configuration pushed to the APIC + returned: info + type: list + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment" + } + } + } +previous: + description: The original configuration from the APIC before the module has started + returned: info + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +proposed: + description: The assembled configuration from the user-provided parameters + returned: info + type: dict + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "name": "production" + } + } + } +filter_string: + description: The filter string used for the request + returned: failure or debug + type: str + sample: ?rsp-prop-include=config-only +method: + description: The HTTP method used for the request to the APIC + returned: failure or debug + type: str + sample: POST +response: + description: The HTTP response from the APIC + returned: failure or debug + type: str + sample: OK (30 bytes) +status: + description: The HTTP status from the APIC + returned: failure or debug + type: int + sample: 200 +url: + description: The HTTP url used for the request to the APIC + returned: failure or debug + type: str + sample: https://10.11.12.13/api/mo/uni/tn-production.json +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.aci.plugins.module_utils.network.aci.aci import ACIModule, aci_argument_spec + + +def main(): + argument_spec = aci_argument_spec() + argument_spec.update( + port_channel=dict(type='str', aliases=['name']), # Not required for querying all objects + description=dict(type='str', aliases=['descr']), + min_links=dict(type='int'), + max_links=dict(type='int'), + mode=dict(type='str', choices=['active', 'mac-pin', 'mac-pin-nicload', 'off', 'passive']), + fast_select=dict(type='bool'), + graceful_convergence=dict(type='bool'), + load_defer=dict(type='bool'), + suspend_individual=dict(type='bool'), + symmetric_hash=dict(type='bool'), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + name_alias=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['port_channel']], + ['state', 'present', ['port_channel']], + ], + ) + + port_channel = module.params.get('port_channel') + description = module.params.get('description') + min_links = module.params.get('min_links') + if min_links is not None and min_links not in range(1, 17): + module.fail_json(msg='The "min_links" must be a value between 1 and 16') + max_links = module.params.get('max_links') + if max_links is not None and max_links not in range(1, 17): + module.fail_json(msg='The "max_links" must be a value between 1 and 16') + mode = module.params.get('mode') + state = module.params.get('state') + name_alias = module.params.get('name_alias') + + # Build ctrl value for request + ctrl = [] + if module.params.get('fast_select') is True: + ctrl.append('fast-sel-hot-stdby') + if module.params.get('graceful_convergence') is True: + ctrl.append('graceful-conv') + if module.params.get('load_defer') is True: + ctrl.append('load-defer') + if module.params.get('suspend_individual') is True: + ctrl.append('susp-individual') + if module.params.get('symmetric_hash') is True: + ctrl.append('symmetric-hash') + if not ctrl: + ctrl = None + else: + ctrl = ",".join(ctrl) + + aci = ACIModule(module) + aci.construct_url( + root_class=dict( + aci_class='lacpLagPol', + aci_rn='infra/lacplagp-{0}'.format(port_channel), + module_object=port_channel, + target_filter={'name': port_channel}, + ), + ) + + aci.get_existing() + + if state == 'present': + aci.payload( + aci_class='lacpLagPol', + class_config=dict( + name=port_channel, + ctrl=ctrl, + descr=description, + minLinks=min_links, + maxLinks=max_links, + mode=mode, + nameAlias=name_alias, + ), + ) + + aci.get_diff(aci_class='lacpLagPol') + + aci.post_config() + + elif state == 'absent': + aci.delete_config() + + aci.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aci/aci_interface_policy_port_security.py b/plugins/modules/network/aci/aci_interface_policy_port_security.py new file mode 100644 index 0000000000..5caf927644 --- /dev/null +++ b/plugins/modules/network/aci/aci_interface_policy_port_security.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: aci_interface_policy_port_security +short_description: Manage port security (l2:PortSecurityPol) +description: +- Manage port security on Cisco ACI fabrics. +options: + port_security: + description: + - The name of the port security. + type: str + required: yes + aliases: [ name ] + description: + description: + - The description for the contract. + type: str + aliases: [ descr ] + max_end_points: + description: + - Maximum number of end points. + - Accepted values range between C(0) and C(12000). + - The APIC defaults to C(0) when unset during creation. + type: int + port_security_timeout: + description: + - The delay time in seconds before MAC learning is re-enabled + - Accepted values range between C(60) and C(3600) + - The APIC defaults to C(60) when unset during creation + type: int + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present + name_alias: + description: + - The alias for the current object. This relates to the nameAlias field in ACI. + type: str +extends_documentation_fragment: +- cisco.aci.aci + +seealso: +- name: APIC Management Information Model reference + description: More information about the internal APIC class B(l2:PortSecurityPol). + link: https://developer.cisco.com/docs/apic-mim-ref/ +author: +- Dag Wieers (@dagwieers) +''' + +# FIXME: Add more, better examples +EXAMPLES = r''' +- aci_interface_policy_port_security: + host: '{{ inventory_hostname }}' + username: '{{ username }}' + password: '{{ password }}' + port_security: '{{ port_security }}' + description: '{{ descr }}' + max_end_points: '{{ max_end_points }}' + port_security_timeout: '{{ port_security_timeout }}' + delegate_to: localhost +''' + +RETURN = r''' +current: + description: The existing configuration from the APIC after the module has finished + returned: success + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +error: + description: The error information as returned from the APIC + returned: failure + type: dict + sample: + { + "code": "122", + "text": "unknown managed object class foo" + } +raw: + description: The raw output returned by the APIC REST API (xml or json) + returned: parse error + type: str + sample: '' +sent: + description: The actual/minimal configuration pushed to the APIC + returned: info + type: list + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment" + } + } + } +previous: + description: The original configuration from the APIC before the module has started + returned: info + type: list + sample: + [ + { + "fvTenant": { + "attributes": { + "descr": "Production", + "dn": "uni/tn-production", + "name": "production", + "nameAlias": "", + "ownerKey": "", + "ownerTag": "" + } + } + } + ] +proposed: + description: The assembled configuration from the user-provided parameters + returned: info + type: dict + sample: + { + "fvTenant": { + "attributes": { + "descr": "Production environment", + "name": "production" + } + } + } +filter_string: + description: The filter string used for the request + returned: failure or debug + type: str + sample: ?rsp-prop-include=config-only +method: + description: The HTTP method used for the request to the APIC + returned: failure or debug + type: str + sample: POST +response: + description: The HTTP response from the APIC + returned: failure or debug + type: str + sample: OK (30 bytes) +status: + description: The HTTP status from the APIC + returned: failure or debug + type: int + sample: 200 +url: + description: The HTTP url used for the request to the APIC + returned: failure or debug + type: str + sample: https://10.11.12.13/api/mo/uni/tn-production.json +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.aci.plugins.module_utils.network.aci.aci import ACIModule, aci_argument_spec + + +def main(): + argument_spec = aci_argument_spec() + argument_spec.update( + port_security=dict(type='str', aliases=['name']), # Not required for querying all objects + description=dict(type='str', aliases=['descr']), + max_end_points=dict(type='int'), + port_security_timeout=dict(type='int'), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + name_alias=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['port_security']], + ['state', 'present', ['port_security']], + ], + ) + + port_security = module.params.get('port_security') + description = module.params.get('description') + max_end_points = module.params.get('max_end_points') + port_security_timeout = module.params.get('port_security_timeout') + name_alias = module.params.get('name_alias') + if max_end_points is not None and max_end_points not in range(12001): + module.fail_json(msg='The "max_end_points" must be between 0 and 12000') + if port_security_timeout is not None and port_security_timeout not in range(60, 3601): + module.fail_json(msg='The "port_security_timeout" must be between 60 and 3600') + state = module.params.get('state') + + aci = ACIModule(module) + aci.construct_url( + root_class=dict( + aci_class='l2PortSecurityPol', + aci_rn='infra/portsecurityP-{0}'.format(port_security), + module_object=port_security, + target_filter={'name': port_security}, + ), + ) + + aci.get_existing() + + if state == 'present': + aci.payload( + aci_class='l2PortSecurityPol', + class_config=dict( + name=port_security, + descr=description, + maximum=max_end_points, + nameAlias=name_alias, + ), + ) + + aci.get_diff(aci_class='l2PortSecurityPol') + + aci.post_config() + + elif state == 'absent': + aci.delete_config() + + aci.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aci/aci_intf_policy_fc.py b/plugins/modules/network/aci/aci_intf_policy_fc.py new file mode 120000 index 0000000000..4f1118aa6d --- /dev/null +++ b/plugins/modules/network/aci/aci_intf_policy_fc.py @@ -0,0 +1 @@ +aci_interface_policy_fc.py \ No newline at end of file diff --git a/plugins/modules/network/aci/aci_intf_policy_l2.py b/plugins/modules/network/aci/aci_intf_policy_l2.py new file mode 120000 index 0000000000..924390568c --- /dev/null +++ b/plugins/modules/network/aci/aci_intf_policy_l2.py @@ -0,0 +1 @@ +aci_interface_policy_l2.py \ No newline at end of file diff --git a/plugins/modules/network/aci/aci_intf_policy_lldp.py b/plugins/modules/network/aci/aci_intf_policy_lldp.py new file mode 120000 index 0000000000..5d751deab3 --- /dev/null +++ b/plugins/modules/network/aci/aci_intf_policy_lldp.py @@ -0,0 +1 @@ +aci_interface_policy_lldp.py \ No newline at end of file diff --git a/plugins/modules/network/aci/aci_intf_policy_mcp.py b/plugins/modules/network/aci/aci_intf_policy_mcp.py new file mode 120000 index 0000000000..89b6309246 --- /dev/null +++ b/plugins/modules/network/aci/aci_intf_policy_mcp.py @@ -0,0 +1 @@ +aci_interface_policy_mcp.py \ No newline at end of file diff --git a/plugins/modules/network/aci/aci_intf_policy_port_channel.py b/plugins/modules/network/aci/aci_intf_policy_port_channel.py new file mode 120000 index 0000000000..bed0d32886 --- /dev/null +++ b/plugins/modules/network/aci/aci_intf_policy_port_channel.py @@ -0,0 +1 @@ +aci_interface_policy_port_channel.py \ No newline at end of file diff --git a/plugins/modules/network/aci/aci_intf_policy_port_security.py b/plugins/modules/network/aci/aci_intf_policy_port_security.py new file mode 120000 index 0000000000..ff1bc0fdc7 --- /dev/null +++ b/plugins/modules/network/aci/aci_intf_policy_port_security.py @@ -0,0 +1 @@ +aci_interface_policy_port_security.py \ No newline at end of file diff --git a/plugins/modules/network/aci/mso_schema_template_external_epg_contract.py b/plugins/modules/network/aci/mso_schema_template_external_epg_contract.py new file mode 100644 index 0000000000..6a5b003605 --- /dev/null +++ b/plugins/modules/network/aci/mso_schema_template_external_epg_contract.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: mso_schema_template_external_epg_contract +short_description: Manage Extrnal EPG contracts in schema templates +description: +- Manage External EPG contracts in schema templates on Cisco ACI Multi-Site. +author: +- Devarshi Shah (@devarshishah3) +options: + schema: + description: + - The name of the schema. + type: str + required: yes + template: + description: + - The name of the template to change. + type: str + required: yes + external_epg: + description: + - The name of the EPG to manage. + type: str + required: yes + contract: + description: + - A contract associated to this EPG. + type: dict + suboptions: + name: + description: + - The name of the Contract to associate with. + required: true + type: str + schema: + description: + - The schema that defines the referenced BD. + - If this parameter is unspecified, it defaults to the current schema. + type: str + template: + description: + - The template that defines the referenced BD. + type: str + type: + description: + - The type of contract. + type: str + required: true + choices: [ consumer, provider ] + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present +seealso: +- module: cisco.mso.mso_schema_template_externalepg +- module: cisco.mso.mso_schema_template_contract_filter +extends_documentation_fragment: +- cisco.mso.mso + +''' + +EXAMPLES = r''' +- name: Add a contract to an EPG + mso_schema_template_external_epg_contract: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + epg: EPG 1 + contract: + name: Contract 1 + type: consumer + state: present + delegate_to: localhost + +- name: Remove a Contract + mso_schema_template_external_epg_contract: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + epg: EPG 1 + contract: + name: Contract 1 + state: absent + delegate_to: localhost + +- name: Query a specific Contract + mso_schema_template_external_epg_contract: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + epg: EPG 1 + contract: + name: Contract 1 + state: query + delegate_to: localhost + register: query_result + +- name: Query all Contracts + mso_schema_template_external_epg_contract: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + state: query + delegate_to: localhost + register: query_result +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.mso.plugins.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_contractref_spec, issubset + + +def main(): + argument_spec = mso_argument_spec() + argument_spec.update( + schema=dict(type='str', required=True), + template=dict(type='str', required=True), + external_epg=dict(type='str', required=True), + contract=dict(type='dict', options=mso_contractref_spec()), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['contract']], + ['state', 'present', ['contract']], + ], + ) + + schema = module.params['schema'] + template = module.params['template'] + external_epg = module.params['external_epg'] + contract = module.params['contract'] + state = module.params['state'] + + mso = MSOModule(module) + + if contract: + if contract.get('schema') is None: + contract['schema'] = schema + contract['schema_id'] = mso.lookup_schema(contract['schema']) + if contract.get('template') is None: + contract['template'] = template + + # Get schema_id + schema_obj = mso.get_obj('schemas', displayName=schema) + if schema_obj: + schema_id = schema_obj['id'] + else: + mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema)) + + schema_path = 'schemas/{id}'.format(**schema_obj) + + # Get template + templates = [t['name'] for t in schema_obj['templates']] + if template not in templates: + mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates))) + template_idx = templates.index(template) + + # Get EPG + epgs = [e['name'] for e in schema_obj['templates'][template_idx]['externalEpgs']] + if external_epg not in epgs: + mso.fail_json(msg="Provided epg '{epg}' does not exist. Existing epgs: {epgs}".format(epg=external_epg, epgs=', '.join(epgs))) + epg_idx = epgs.index(external_epg) + + # Get Contract + if contract: + contracts = [(c['contractRef'], + c['relationshipType']) for c in schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['contractRelationships']] + contract_ref = mso.contract_ref(**contract) + if (contract_ref, contract['type']) in contracts: + contract_idx = contracts.index((contract_ref, contract['type'])) + contract_path = '/templates/{0}/externalEpgs/{1}/contractRelationships/{2}'.format(template, external_epg, contract) + mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['contractRelationships'][contract_idx] + + if state == 'query': + if not contract: + mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['contractRelationships'] + elif not mso.existing: + mso.fail_json(msg="Contract '{0}' not found".format(contract_ref)) + mso.exit_json() + + contracts_path = '/templates/{0}/externalEpgs/{1}/contractRelationships'.format(template, external_epg) + ops = [] + + mso.previous = mso.existing + if state == 'absent': + if mso.existing: + mso.sent = mso.existing = {} + ops.append(dict(op='remove', path=contract_path)) + + elif state == 'present': + payload = dict( + relationshipType=contract['type'], + contractRef=dict( + contractName=contract['name'], + templateName=contract['template'], + schemaId=contract['schema_id'], + ), + ) + + mso.sanitize(payload, collate=True) + + if mso.existing: + ops.append(dict(op='replace', path=contract_path, value=mso.sent)) + else: + ops.append(dict(op='add', path=contracts_path + '/-', value=mso.sent)) + + mso.existing = mso.proposed + + if not module.check_mode: + mso.request(schema_path, method='PATCH', data=ops) + + mso.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aci/mso_schema_template_external_epg_subnet.py b/plugins/modules/network/aci/mso_schema_template_external_epg_subnet.py new file mode 100644 index 0000000000..c914452c22 --- /dev/null +++ b/plugins/modules/network/aci/mso_schema_template_external_epg_subnet.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: mso_schema_template_external_epg_subnet +short_description: Manage External EPG subnets in schema templates +description: +- Manage External EPG subnets in schema templates on Cisco ACI Multi-Site. +author: +- Devarshi Shah (@devarshishah3) +options: + schema: + description: + - The name of the schema. + type: str + required: yes + template: + description: + - The name of the template to change. + type: str + required: yes + external_epg: + description: + - The name of the External EPG to manage. + type: str + required: yes + subnet: + description: + - The IP range in CIDR notation. + type: str + required: true + scope: + description: + - The scope of the subnet. + type: list + aggregate: + description: + - The aggregate option for the subnet. + type: list + state: + description: + - Use C(present) or C(absent) for adding or removing. + - Use C(query) for listing an object or multiple objects. + type: str + choices: [ absent, present, query ] + default: present +notes: +- Due to restrictions of the MSO REST API concurrent modifications to EPG subnets can be dangerous and corrupt data. +extends_documentation_fragment: +- cisco.mso.mso + +''' + +EXAMPLES = r''' +- name: Add a new subnet to an External EPG + mso_schema_template_external_epg_subnet: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + external_epg: EPG 1 + subnet: 10.0.0.0/24 + state: present + delegate_to: localhost + +- name: Remove a subnet from an External EPG + mso_schema_template_external_epg_subnet: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + external_epg: EPG 1 + subnet: 10.0.0.0/24 + state: absent + delegate_to: localhost + +- name: Query a specific External EPG subnet + mso_schema_template_external_epg_subnet: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + external_epg: EPG 1 + subnet: 10.0.0.0/24 + state: query + delegate_to: localhost + register: query_result + +- name: Query all External EPGs subnets + mso_schema_template_external_epg_subnet: + host: mso_host + username: admin + password: SomeSecretPassword + schema: Schema 1 + template: Template 1 + state: query + delegate_to: localhost + register: query_result +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.mso.plugins.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, mso_subnet_spec + + +def main(): + argument_spec = mso_argument_spec() + argument_spec.update( + schema=dict(type='str', required=True), + template=dict(type='str', required=True), + external_epg=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + subnet=dict(type='str', required=True), + scope=dict(type='list', default=[]), + aggregate=dict(type='list', default=[]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['subnet']], + ['state', 'present', ['subnet']], + ], + ) + + schema = module.params['schema'] + template = module.params['template'] + external_epg = module.params['external_epg'] + subnet = module.params['subnet'] + scope = module.params['scope'] + aggregate = module.params['aggregate'] + state = module.params['state'] + + mso = MSOModule(module) + + # Get schema + schema_obj = mso.get_obj('schemas', displayName=schema) + if not schema_obj: + mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema)) + + schema_path = 'schemas/{id}'.format(**schema_obj) + + # Get template + templates = [t['name'] for t in schema_obj['templates']] + if template not in templates: + mso.fail_json(msg="Provided template '{template}' does not exist. Existing templates: {templates}".format(template=template, + templates=', '.join(templates))) + template_idx = templates.index(template) + + # Get EPG + external_epgs = [e['name'] for e in schema_obj['templates'][template_idx]['externalEpgs']] + if external_epg not in external_epgs: + mso.fail_json(msg="Provided External EPG '{epg}' does not exist. Existing epgs: {epgs}".format(epg=external_epg, epgs=', '.join(external_epgs))) + epg_idx = external_epgs.index(external_epg) + + # Get Subnet + subnets = [s['ip'] for s in schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['subnets']] + if subnet in subnets: + subnet_idx = subnets.index(subnet) + # FIXME: Changes based on index are DANGEROUS + subnet_path = '/templates/{0}/externalEpgs/{1}/subnets/{2}'.format(template, external_epg, subnet_idx) + mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['subnets'][subnet_idx] + + if state == 'query': + if subnet is None: + mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['subnets'] + elif not mso.existing: + mso.fail_json(msg="Subnet '{subnet}' not found".format(subnet=subnet)) + mso.exit_json() + + subnets_path = '/templates/{0}/externalEpgs/{1}/subnets'.format(template, external_epg) + ops = [] + + mso.previous = mso.existing + if state == 'absent': + if mso.existing: + mso.existing = {} + ops.append(dict(op='remove', path=subnet_path)) + + elif state == 'present': + payload = dict( + ip=subnet, + scope=scope, + aggregate=aggregate, + ) + + mso.sanitize(payload, collate=True) + + if mso.existing: + ops.append(dict(op='replace', path=subnet_path, value=mso.sent)) + else: + ops.append(dict(op='add', path=subnets_path + '/-', value=mso.sent)) + + mso.existing = mso.proposed + + if not module.check_mode: + mso.request(schema_path, method='PATCH', data=ops) + + mso.exit_json() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/aireos/aireos_command.py b/plugins/modules/network/aireos/aireos_command.py new file mode 100644 index 0000000000..da5da5155e --- /dev/null +++ b/plugins/modules/network/aireos/aireos_command.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# +# Copyright: Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aireos_command +author: "James Mighion (@jmighion)" +short_description: Run commands on remote devices running Cisco WLC +description: + - Sends arbitrary commands to an aireos node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - Commands run in configuration mode with this module are not + idempotent. Please use M(aireos_config) to configure WLC devices. +extends_documentation_fragment: +- community.general.aireos + +options: + commands: + description: + - List of commands to send to the remote aireos device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run show sysinfo on remote devices + aireos_command: + commands: show sysinfo + + - name: run show sysinfo and check to see if output contains Cisco Controller + aireos_command: + commands: show sysinfo + wait_for: result[0] contains 'Cisco Controller' + + - name: run multiple commands on remote nodes + aireos_command: + commands: + - show sysinfo + - show interface summary + + - name: run multiple commands and evaluate the output + aireos_command: + commands: + - show sysinfo + - show interface summary + wait_for: + - result[0] contains Cisco Controller + - result[1] contains Loopback0 +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import time + +from ansible_collections.community.general.plugins.module_utils.network.aireos.aireos import run_commands +from ansible_collections.community.general.plugins.module_utils.network.aireos.aireos import aireos_argument_spec, check_args +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = to_text(item, errors='surrogate_then_replace').split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + elif item['command'].startswith('conf'): + warnings.append( + 'commands run in config mode with aireos_command are not ' + 'idempotent. Please use aireos_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(aireos_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/aireos/aireos_config.py b/plugins/modules/network/aireos/aireos_config.py new file mode 100644 index 0000000000..c2244db774 --- /dev/null +++ b/plugins/modules/network/aireos/aireos_config.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# +# Copyright: Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aireos_config +author: "James Mighion (@jmighion)" +short_description: Manage Cisco WLC configurations +description: + - AireOS does not use a block indent file syntax, so there are no sections or parents. + This module provides an implementation for working with AireOS configurations in + a deterministic way. +extends_documentation_fragment: +- community.general.aireos + +options: + lines: + description: + - The ordered set of commands that should be configured. + The commands must be the exact same commands as found + in the device run-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. + If match is set to I(none), the module will not attempt to + compare the source configuration with the running + configuration on the remote device. + default: line + choices: ['line', 'none'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(running_config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + aliases: ['config'] + save: + description: + - The C(save) argument instructs the module to save the + running-config to startup-config. This operation is performed + after any changes are made to the current running config. If + no changes are made, the configuration is still saved to the + startup config. This option will always cause the module to + return changed. This argument is mutually exclusive with I(save_when). + - This option is deprecated as of Ansible 2.7, use C(save_when) + type: bool + default: 'no' + save_when: + description: + - When changes are made to the device running-configuration, the + changes are not copied to non-volatile storage by default. Using + this argument will change that. If the argument is set to + I(always), then the running-config will always be copied to the + startup-config and the module will always return as changed. + If the argument is set to I(never), the running-config will never + be copied to the startup-config. If the argument is set to I(changed), + then the running-config will only be copied to the startup-config if + the task has made a change. + default: never + choices: ['always', 'never', 'changed'] + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument + the module can generate diffs against different sources. + - When this option is configured as I(intended), the module will + return the diff of the running-config against the configuration + provided in the C(intended_config) argument. + - When this option is configured as I(running), the module will + return the before and after diff of the running-config with respect + to any changes made to the device configuration. + choices: ['intended', 'running'] + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + intended_config: + description: + - The C(intended_config) provides the master configuration that + the node should conform to and is used to check the final + running-config against. This argument will not modify any settings + on the remote device and is strictly used to check the compliance + of the current device's configuration against. When specifying this + argument, the task should also modify the C(diff_against) value and + set it to I(intended). + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure configuration + aireos_config: + lines: sysname testDevice + +- name: diff the running-config against a provided config + aireos_config: + diff_against: intended + intended: "{{ lookup('file', 'master.cfg') }}" + +- name: load new acl into device + aireos_config: + lines: + - acl create testACL + - acl rule protocol testACL 1 any + - acl rule direction testACL 3 in + before: acl delete testACL + +- name: configurable backup path + aireos_config: + backup: yes + lines: sysname testDevice + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'vlan 1', 'name default'] +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'vlan 1', 'name default'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/aireos_config.2016-07-16@22:28:34 +""" +from ansible_collections.community.general.plugins.module_utils.network.aireos.aireos import run_commands, get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.aireos.aireos import aireos_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.aireos.aireos import check_args as aireos_check_args +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +def get_running_config(module, config=None): + contents = module.params['running_config'] + if not contents: + if config: + contents = config + else: + contents = get_config(module) + return NetworkConfig(indent=1, contents=contents) + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + candidate.add(module.params['lines']) + return candidate + + +def save_config(module, result): + result['changed'] = True + if not module.check_mode: + command = {"command": "save config", "prompt": "Are you sure you want to save", "answer": "y"} + run_commands(module, command) + else: + module.warn('Skipping command `save config` due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'none']), + + running_config=dict(aliases=['config']), + intended_config=dict(), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + + # save is deprecated as of 2.7, use save_when instead + save=dict(type='bool', default=False, removed_in_version='2.11'), + save_when=dict(choices=['always', 'never', 'changed'], default='never'), + + diff_against=dict(choices=['running', 'intended']), + diff_ignore_lines=dict(type='list') + ) + + argument_spec.update(aireos_argument_spec) + + mutually_exclusive = [('lines', 'src'), + ('save', 'save_when')] + + required_if = [('diff_against', 'intended', ['intended_config'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + warnings = list() + aireos_check_args(module, warnings) + result = {'changed': False, 'warnings': warnings} + + config = None + + if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'): + contents = get_config(module) + config = NetworkConfig(indent=1, contents=contents) + if module.params['backup']: + result['__backup__'] = contents + + if any((module.params['src'], module.params['lines'])): + match = module.params['match'] + + candidate = get_candidate(module) + + if match != 'none': + config = get_running_config(module, config) + configobjs = candidate.difference(config, match=match) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + result['updates'] = commands + + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + diff_ignore_lines = module.params['diff_ignore_lines'] + + if module.params['save_when'] == 'always' or module.params['save']: + save_config(module, result) + elif module.params['save_when'] == 'changed' and result['changed']: + save_config(module, result) + + if module._diff: + output = run_commands(module, 'show run-config commands') + contents = output[0] + + # recreate the object in order to process diff_ignore_lines + running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if module.params['diff_against'] == 'running': + if module.check_mode: + module.warn("unable to perform diff against running-config due to check mode") + contents = None + else: + contents = config.config_text + elif module.params['diff_against'] == 'intended': + contents = module.params['intended_config'] + + if contents is not None: + base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if running_config.sha1 != base_config.sha1: + result.update({ + 'changed': True, + 'diff': {'before': str(base_config), 'after': str(running_config)} + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/apconos/apconos_command.py b/plugins/modules/network/apconos/apconos_command.py new file mode 100644 index 0000000000..dc14fa0eaf --- /dev/null +++ b/plugins/modules/network/apconos/apconos_command.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 APCON. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Module to execute apconos Commands on Apcon Switches. +# Apcon Networking + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: apconos_command +author: "David Lee (@davidlee-ap)" +short_description: Run arbitrary commands on APCON devices +description: + - Sends arbitrary commands to an apcon device and returns the results + read from the device. The module includes an argument that will + cause the module to wait for a specific condition before returning + or timing out if the condition is not met. +notes: + - Tested against apcon iis+ii +options: + commands: + description: + - List of commands to send to the remote device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retires as expired. + required: true + type: list + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + type: list + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + type: str + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + type: int + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 + type: int +''' + +EXAMPLES = """ +- name: Basic Configuration + apconos_command: + commands: + - show version + - enable ssh + register: result + +- name: Get output from single command + apconos_command: + commands: ['show version'] + register: result +""" + +RETURN = """ +""" + +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_lines +from ansible_collections.community.general.plugins.module_utils.network.apconos.apconos import run_commands +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional + + +def parse_commands(module, warnings): + + commands = module.params['commands'] + + if module.check_mode: + for item in list(commands): + if not item.startswith('show'): + warnings.append( + 'Only show commands are supported when using check mode, not ' + 'executing %s' % item + ) + commands.remove(item) + + return commands + + +def main(): + spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=spec, supports_check_mode=False) + warnings = list() + result = {'changed': False, 'warnings': warnings} + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + commands = parse_commands(module, warnings) + commands = module.params['commands'] + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + for item in responses: + if len(item) == 0: + if module.check_mode: + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + else: + result.update({ + 'changed': True, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + elif 'ERROR' in item: + result.update({ + 'failed': True, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + else: + result.update({ + 'stdout': item, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/aruba/aruba_command.py b/plugins/modules/network/aruba/aruba_command.py new file mode 100644 index 0000000000..d59756e2e9 --- /dev/null +++ b/plugins/modules/network/aruba/aruba_command.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright: Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aruba_command +author: "James Mighion (@jmighion)" +short_description: Run commands on remote devices running Aruba Mobility Controller +description: + - Sends arbitrary commands to an aruba node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(aruba_config) to configure Aruba devices. +extends_documentation_fragment: +- community.general.aruba + +options: + commands: + description: + - List of commands to send to the remote aruba device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + aruba_command: + commands: show version + + - name: run show version and check to see if output contains Aruba + aruba_command: + commands: show version + wait_for: result[0] contains Aruba + + - name: run multiple commands on remote nodes + aruba_command: + commands: + - show version + - show interfaces + + - name: run multiple commands and evaluate the output + aruba_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains Aruba + - result[1] contains Loopback0 +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import time + +from ansible_collections.community.general.plugins.module_utils.network.aruba.aruba import run_commands +from ansible_collections.community.general.plugins.module_utils.network.aruba.aruba import aruba_argument_spec, check_args +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + elif item['command'].startswith('conf'): + module.fail_json( + msg='aruba_command does not support running config mode ' + 'commands. Please use aruba_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(aruba_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/aruba/aruba_config.py b/plugins/modules/network/aruba/aruba_config.py new file mode 100644 index 0000000000..4511ccd3dc --- /dev/null +++ b/plugins/modules/network/aruba/aruba_config.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# +# Copyright: Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: aruba_config +author: "James Mighion (@jmighion)" +short_description: Manage Aruba configuration sections +description: + - Aruba configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with Aruba configuration sections in + a deterministic way. +extends_documentation_fragment: +- community.general.aruba + +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(running_config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + aliases: ['config'] + save_when: + description: + - When changes are made to the device running-configuration, the + changes are not copied to non-volatile storage by default. Using + this argument will change that before. If the argument is set to + I(always), then the running-config will always be copied to the + startup configuration and the I(modified) flag will always be set to + True. If the argument is set to I(modified), then the running-config + will only be copied to the startup configuration if it has changed since + the last save to startup configuration. If the argument is set to + I(never), the running-config will never be copied to the + startup configuration. If the argument is set to I(changed), then the running-config + will only be copied to the startup configuration if the task has made a change. + default: never + choices: ['always', 'never', 'modified', 'changed'] + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument + the module can generate diffs against different sources. + - When this option is configure as I(startup), the module will return + the diff of the running-config against the startup configuration. + - When this option is configured as I(intended), the module will + return the diff of the running-config against the configuration + provided in the C(intended_config) argument. + - When this option is configured as I(running), the module will + return the before and after diff of the running-config with respect + to any changes made to the device configuration. + choices: ['startup', 'intended', 'running'] + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + intended_config: + description: + - The C(intended_config) provides the master configuration that + the node should conform to and is used to check the final + running-config against. This argument will not modify any settings + on the remote device and is strictly used to check the compliance + of the current device's configuration against. When specifying this + argument, the task should also modify the C(diff_against) value and + set it to I(intended). + encrypt: + description: + - This allows an Aruba controller's passwords and keys to be displayed in plain + text when set to I(false) or encrypted when set to I(true). + If set to I(false), the setting will re-encrypt at the end of the module run. + Backups are still encrypted even when set to I(false). + type: bool + default: 'yes' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure top level configuration + aruba_config: + lines: hostname {{ inventory_hostname }} + +- name: diff the running-config against a provided config + aruba_config: + diff_against: intended + intended_config: "{{ lookup('file', 'master.cfg') }}" + +- name: configure interface settings + aruba_config: + lines: + - description test interface + - ip access-group 1 in + parents: interface gigabitethernet 0/0/0 + +- name: load new acl into device + aruba_config: + lines: + - permit host 10.10.10.10 + - ipv6 permit host fda9:97d6:32a3:3e59::3333 + parents: ip access-list standard 1 + before: no ip access-list standard 1 + match: exact + +- name: configurable backup path + aruba_config: + backup: yes + lines: hostname {{ inventory_hostname }} + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'vlan 1', 'name default'] +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'vlan 1', 'name default'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/aruba_config.2016-07-16@22:28:34 +""" + + +from ansible_collections.community.general.plugins.module_utils.network.aruba.aruba import run_commands, get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.aruba.aruba import aruba_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.aruba.aruba import check_args as aruba_check_args +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +def get_running_config(module, config=None): + contents = module.params['running_config'] + if not contents: + if config: + contents = config + else: + contents = get_config(module) + return NetworkConfig(contents=contents) + + +def get_candidate(module): + candidate = NetworkConfig() + + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def save_config(module, result): + result['changed'] = True + if not module.check_mode: + run_commands(module, 'write memory') + else: + module.warn('Skipping command `write memory` ' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + running_config=dict(aliases=['config']), + intended_config=dict(), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + + save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'), + + diff_against=dict(choices=['running', 'startup', 'intended']), + diff_ignore_lines=dict(type='list'), + + encrypt=dict(type='bool', default=True), + ) + + argument_spec.update(aruba_argument_spec) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('diff_against', 'intended', ['intended_config'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + warnings = list() + aruba_check_args(module, warnings) + result = {'changed': False, 'warnings': warnings} + + config = None + + if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'): + contents = get_config(module) + config = NetworkConfig(contents=contents) + if module.params['backup']: + result['__backup__'] = contents + + if not module.params['encrypt']: + run_commands(module, 'encrypt disable') + + if any((module.params['src'], module.params['lines'])): + match = module.params['match'] + replace = module.params['replace'] + + candidate = get_candidate(module) + + if match != 'none': + config = get_running_config(module, config) + path = module.params['parents'] + configobjs = candidate.difference(config, match=match, replace=replace, path=path) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + result['updates'] = commands + + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + running_config = None + startup_config = None + + diff_ignore_lines = module.params['diff_ignore_lines'] + + if module.params['save_when'] == 'always': + save_config(module, result) + elif module.params['save_when'] == 'modified': + output = run_commands(module, ['show running-config', 'show configuration']) + + running_config = NetworkConfig(contents=output[0], ignore_lines=diff_ignore_lines) + startup_config = NetworkConfig(contents=output[1], ignore_lines=diff_ignore_lines) + + if running_config.sha1 != startup_config.sha1: + save_config(module, result) + elif module.params['save_when'] == 'changed': + if result['changed']: + save_config(module, result) + + if module._diff: + if not running_config: + output = run_commands(module, 'show running-config') + contents = output[0] + else: + contents = running_config.config_text + + # recreate the object in order to process diff_ignore_lines + running_config = NetworkConfig(contents=contents, ignore_lines=diff_ignore_lines) + + if module.params['diff_against'] == 'running': + if module.check_mode: + module.warn("unable to perform diff against running-config due to check mode") + contents = None + else: + contents = config.config_text + + elif module.params['diff_against'] == 'startup': + if not startup_config: + output = run_commands(module, 'show configuration') + contents = output[0] + else: + contents = startup_config.config_text + + elif module.params['diff_against'] == 'intended': + contents = module.params['intended_config'] + + if contents is not None: + base_config = NetworkConfig(contents=contents, ignore_lines=diff_ignore_lines) + + if running_config.sha1 != base_config.sha1: + result.update({ + 'changed': True, + 'diff': {'before': str(base_config), 'after': str(running_config)} + }) + + # make sure 'encrypt enable' is applied if it was ever disabled + if not module.params['encrypt']: + run_commands(module, 'encrypt enable') + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_actiongroupconfig.py b/plugins/modules/network/avi/avi_actiongroupconfig.py new file mode 100644 index 0000000000..9374845364 --- /dev/null +++ b/plugins/modules/network/avi/avi_actiongroupconfig.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_actiongroupconfig +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ActionGroupConfig Avi RESTful Object +description: + - This module is used to configure ActionGroupConfig object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + action_script_config_ref: + description: + - Reference of the action script configuration to be used. + - It is a reference to an object of type alertscriptconfig. + autoscale_trigger_notification: + description: + - Trigger notification to autoscale manager. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + description: + description: + - User defined description for the object. + email_config_ref: + description: + - Select the email notification configuration to use when sending alerts via email. + - It is a reference to an object of type alertemailconfig. + external_only: + description: + - Generate alert only to external destinations. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + required: true + type: bool + level: + description: + - When an alert is generated, mark its priority via the alert level. + - Enum options - ALERT_LOW, ALERT_MEDIUM, ALERT_HIGH. + - Default value when not specified in API or module is interpreted by Avi Controller as ALERT_LOW. + required: true + name: + description: + - Name of the object. + required: true + snmp_trap_profile_ref: + description: + - Select the snmp trap notification to use when sending alerts via snmp trap. + - It is a reference to an object of type snmptrapprofile. + syslog_config_ref: + description: + - Select the syslog notification configuration to use when sending alerts via syslog. + - It is a reference to an object of type alertsyslogconfig. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ActionGroupConfig object + avi_actiongroupconfig: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_actiongroupconfig +""" + +RETURN = ''' +obj: + description: ActionGroupConfig (api/actiongroupconfig) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + action_script_config_ref=dict(type='str',), + autoscale_trigger_notification=dict(type='bool',), + description=dict(type='str',), + email_config_ref=dict(type='str',), + external_only=dict(type='bool', required=True), + level=dict(type='str', required=True), + name=dict(type='str', required=True), + snmp_trap_profile_ref=dict(type='str',), + syslog_config_ref=dict(type='str',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'actiongroupconfig', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_alertconfig.py b/plugins/modules/network/avi/avi_alertconfig.py new file mode 100644 index 0000000000..b512195ba9 --- /dev/null +++ b/plugins/modules/network/avi/avi_alertconfig.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_alertconfig +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of AlertConfig Avi RESTful Object +description: + - This module is used to configure AlertConfig object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + action_group_ref: + description: + - The alert config will trigger the selected alert action, which can send notifications and execute a controlscript. + - It is a reference to an object of type actiongroupconfig. + alert_rule: + description: + - List of filters matching on events or client logs used for triggering alerts. + required: true + autoscale_alert: + description: + - This alert config applies to auto scale alerts. + type: bool + category: + description: + - Determines whether an alert is raised immediately when event occurs (realtime) or after specified number of events occurs within rolling time + - window. + - Enum options - REALTIME, ROLLINGWINDOW, WATERMARK. + - Default value when not specified in API or module is interpreted by Avi Controller as REALTIME. + required: true + description: + description: + - A custom description field. + enabled: + description: + - Enable or disable this alert config from generating new alerts. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + expiry_time: + description: + - An alert is expired and deleted after the expiry time has elapsed. + - The original event triggering the alert remains in the event's log. + - Allowed values are 1-31536000. + - Default value when not specified in API or module is interpreted by Avi Controller as 86400. + name: + description: + - Name of the alert configuration. + required: true + obj_uuid: + description: + - Uuid of the resource for which alert was raised. + object_type: + description: + - The object type to which the alert config is associated with. + - Valid object types are - virtual service, pool, service engine. + - Enum options - VIRTUALSERVICE, POOL, HEALTHMONITOR, NETWORKPROFILE, APPLICATIONPROFILE, HTTPPOLICYSET, DNSPOLICY, SECURITYPOLICY, IPADDRGROUP, + - STRINGGROUP, SSLPROFILE, SSLKEYANDCERTIFICATE, NETWORKSECURITYPOLICY, APPLICATIONPERSISTENCEPROFILE, ANALYTICSPROFILE, VSDATASCRIPTSET, TENANT, + - PKIPROFILE, AUTHPROFILE, CLOUD, SERVERAUTOSCALEPOLICY, AUTOSCALELAUNCHCONFIG, MICROSERVICEGROUP, IPAMPROFILE, HARDWARESECURITYMODULEGROUP, + - POOLGROUP, PRIORITYLABELS, POOLGROUPDEPLOYMENTPOLICY, GSLBSERVICE, GSLBSERVICERUNTIME, SCHEDULER, GSLBGEODBPROFILE, + - GSLBAPPLICATIONPERSISTENCEPROFILE, TRAFFICCLONEPROFILE, VSVIP, WAFPOLICY, WAFPROFILE, ERRORPAGEPROFILE, ERRORPAGEBODY, L4POLICYSET, + - GSLBSERVICERUNTIMEBATCH, WAFPOLICYPSMGROUP, PINGACCESSAGENT, SERVICEENGINEPOLICY, NATPOLICY, SSOPOLICY, PROTOCOLPARSER, SERVICEENGINE, + - DEBUGSERVICEENGINE, DEBUGCONTROLLER, DEBUGVIRTUALSERVICE, SERVICEENGINEGROUP, SEPROPERTIES, NETWORK, CONTROLLERNODE, CONTROLLERPROPERTIES, + - SYSTEMCONFIGURATION, VRFCONTEXT, USER, ALERTCONFIG, ALERTSYSLOGCONFIG, ALERTEMAILCONFIG, ALERTTYPECONFIG, APPLICATION, ROLE, CLOUDPROPERTIES, + - SNMPTRAPPROFILE, ACTIONGROUPPROFILE, MICROSERVICE, ALERTPARAMS, ACTIONGROUPCONFIG, CLOUDCONNECTORUSER, GSLB, GSLBDNSUPDATE, GSLBSITEOPS, + - GLBMGRWARMSTART, IPAMDNSRECORD, GSLBDNSGSSTATUS, GSLBDNSGEOFILEOPS, GSLBDNSGEOUPDATE, GSLBDNSGEOCLUSTEROPS, GSLBDNSCLEANUP, GSLBSITEOPSRESYNC, + - IPAMDNSPROVIDERPROFILE, TCPSTATRUNTIME, UDPSTATRUNTIME, IPSTATRUNTIME, ARPSTATRUNTIME, MBSTATRUNTIME, IPSTKQSTATSRUNTIME, MALLOCSTATRUNTIME, + - SHMALLOCSTATRUNTIME, CPUUSAGERUNTIME, L7GLOBALSTATSRUNTIME, L7VIRTUALSERVICESTATSRUNTIME, SEAGENTVNICDBRUNTIME, SEAGENTGRAPHDBRUNTIME, + - SEAGENTSTATERUNTIME, INTERFACERUNTIME, ARPTABLERUNTIME, DISPATCHERSTATRUNTIME, DISPATCHERSTATCLEARRUNTIME, DISPATCHERTABLEDUMPRUNTIME, + - DISPATCHERREMOTETIMERLISTDUMPRUNTIME, METRICSAGENTMESSAGE, HEALTHMONITORSTATRUNTIME, METRICSENTITYRUNTIME, PERSISTENCEINTERNAL, + - HTTPPOLICYSETINTERNAL, DNSPOLICYINTERNAL, CONNECTIONDUMPRUNTIME, SHAREDDBSTATS, SHAREDDBSTATSCLEAR, ICMPSTATRUNTIME, ROUTETABLERUNTIME, + - VIRTUALMACHINE, POOLSERVER, SEVSLIST, MEMINFORUNTIME, RTERINGSTATRUNTIME, ALGOSTATRUNTIME, HEALTHMONITORRUNTIME, CPUSTATRUNTIME, SEVM, HOST, + - PORTGROUP, CLUSTER, DATACENTER, VCENTER, HTTPPOLICYSETSTATS, DNSPOLICYSTATS, METRICSSESTATS, RATELIMITERSTATRUNTIME, NETWORKSECURITYPOLICYSTATS, + - TCPCONNRUNTIME, POOLSTATS, CONNPOOLINTERNAL, CONNPOOLSTATS, VSHASHSHOWRUNTIME, SELOGSTATSRUNTIME, NETWORKSECURITYPOLICYDETAIL, LICENSERUNTIME, + - SERVERRUNTIME, METRICSRUNTIMESUMMARY, METRICSRUNTIMEDETAIL, DISPATCHERSEHMPROBETEMPDISABLERUNTIME, POOLDEBUG, VSLOGMGRMAP, SERUMINSERTIONSTATS, + - HTTPCACHE, HTTPCACHESTATS, SEDOSSTATRUNTIME, VSDOSSTATRUNTIME, SERVERUPDATEREQ, VSSCALEOUTLIST, SEMEMDISTRUNTIME, TCPCONNRUNTIMEDETAIL, + - SEUPGRADESTATUS, SEUPGRADEPREVIEW, SEFAULTINJECTEXHAUSTM, SEFAULTINJECTEXHAUSTMCL, SEFAULTINJECTEXHAUSTMCLSMALL, SEFAULTINJECTEXHAUSTCONN, + - SEHEADLESSONLINEREQ, SEUPGRADE, SEUPGRADESTATUSDETAIL, SERESERVEDVS, SERESERVEDVSCLEAR, VSCANDIDATESEHOSTLIST, SEGROUPUPGRADE, REBALANCE, + - SEGROUPREBALANCE, SEAUTHSTATSRUNTIME, AUTOSCALESTATE, VIRTUALSERVICEAUTHSTATS, NETWORKSECURITYPOLICYDOS, KEYVALINTERNAL, KEYVALSUMMARYINTERNAL, + - SERVERSTATEUPDATEINFO, CLTRACKINTERNAL, CLTRACKSUMMARYINTERNAL, MICROSERVICERUNTIME, SEMICROSERVICE, VIRTUALSERVICEANALYSIS, CLIENTINTERNAL, + - CLIENTSUMMARYINTERNAL, MICROSERVICEGROUPRUNTIME, BGPRUNTIME, REQUESTQUEUERUNTIME, MIGRATEALL, MIGRATEALLSTATUSSUMMARY, MIGRATEALLSTATUSDETAIL, + - INTERFACESUMMARYRUNTIME, INTERFACELACPRUNTIME, DNSTABLE, GSLBSERVICEDETAIL, GSLBSERVICEINTERNAL, GSLBSERVICEHMONSTAT, SETROLESREQUEST, + - TRAFFICCLONERUNTIME, GEOLOCATIONINFO, SEVSHBSTATRUNTIME, GEODBINTERNAL, GSLBSITEINTERNAL, WAFSTATS, USERDEFINEDDATASCRIPTCOUNTERS, LLDPRUNTIME, + - VSESSHARINGPOOL, NDTABLERUNTIME, IP6STATRUNTIME, ICMP6STATRUNTIME, SEVSSPLACEMENT, L4POLICYSETSTATS, L4POLICYSETINTERNAL, BGPDEBUGINFO, SHARD, + - CPUSTATRUNTIMEDETAIL, SEASSERTSTATRUNTIME, SEFAULTINJECTINFRA, SEAGENTASSERTSTATRUNTIME, SEDATASTORESTATUS, DIFFQUEUESTATUS, IP6ROUTETABLERUNTIME, + - SECURITYMGRSTATE, VIRTUALSERVICESESCALEOUTSTATUS, SHARDSERVERSTATUS, SEAGENTSHARDCLIENTRESOURCEMAP, SEAGENTCONSISTENTHASH, SEAGENTVNICDBHISTORY, + - SEAGENTSHARDCLIENTAPPMAP, SEAGENTSHARDCLIENTEVENTHISTORY, SENATSTATRUNTIME, SENATFLOWRUNTIME, SERESOURCEPROTO, SECONSUMERPROTO, + - SECREATEPENDINGPROTO, PLACEMENTSTATS, SEVIPPROTO, RMVRFPROTO, VCENTERMAP, VIMGRVCENTERRUNTIME, INTERESTEDVMS, INTERESTEDHOSTS, + - VCENTERSUPPORTEDCOUNTERS, ENTITYCOUNTERS, TRANSACTIONSTATS, SEVMCREATEPROGRESS, PLACEMENTSTATUS, VISUBFOLDERS, VIDATASTORE, VIHOSTRESOURCES, + - CLOUDCONNECTOR, VINETWORKSUBNETVMS, VIDATASTORECONTENTS, VIMGRVCENTERCLOUDRUNTIME, VIVCENTERPORTGROUPS, VIVCENTERDATACENTERS, VIMGRHOSTRUNTIME, + - PLACEMENTGLOBALS, APICCONFIGURATION, CIFTABLE, APICTRANSACTION, VIRTUALSERVICESTATEDBCACHESUMMARY, POOLSTATEDBCACHESUMMARY, + - SERVERSTATEDBCACHESUMMARY, APICAGENTINTERNAL, APICTRANSACTIONFLAP, APICGRAPHINSTANCES, APICEPGS, APICEPGEPS, APICDEVICEPKGVER, APICTENANTS, + - APICVMMDOMAINS, NSXCONFIGURATION, NSXSGTABLE, NSXAGENTINTERNAL, NSXSGINFO, NSXSGIPS, NSXAGENTINTERNALCLI, MAXOBJECTS. + recommendation: + description: + - Recommendation of alertconfig. + rolling_window: + description: + - Only if the number of events is reached or exceeded within the time window will an alert be generated. + - Allowed values are 1-31536000. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + source: + description: + - Signifies system events or the type of client logsused in this alert configuration. + - Enum options - CONN_LOGS, APP_LOGS, EVENT_LOGS, METRICS. + required: true + summary: + description: + - Summary of reason why alert is generated. + tenant_ref: + description: + - It is a reference to an object of type tenant. + threshold: + description: + - An alert is created only when the number of events meets or exceeds this number within the chosen time frame. + - Allowed values are 1-65536. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + throttle: + description: + - Alerts are suppressed (throttled) for this duration of time since the last alert was raised for this alert config. + - Allowed values are 0-31536000. + - Default value when not specified in API or module is interpreted by Avi Controller as 600. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create AlertConfig object + avi_alertconfig: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_alertconfig +""" + +RETURN = ''' +obj: + description: AlertConfig (api/alertconfig) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + action_group_ref=dict(type='str',), + alert_rule=dict(type='dict', required=True), + autoscale_alert=dict(type='bool',), + category=dict(type='str', required=True), + description=dict(type='str',), + enabled=dict(type='bool',), + expiry_time=dict(type='int',), + name=dict(type='str', required=True), + obj_uuid=dict(type='str',), + object_type=dict(type='str',), + recommendation=dict(type='str',), + rolling_window=dict(type='int',), + source=dict(type='str', required=True), + summary=dict(type='str',), + tenant_ref=dict(type='str',), + threshold=dict(type='int',), + throttle=dict(type='int',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'alertconfig', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_alertemailconfig.py b/plugins/modules/network/avi/avi_alertemailconfig.py new file mode 100644 index 0000000000..8ed5db1ad6 --- /dev/null +++ b/plugins/modules/network/avi/avi_alertemailconfig.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_alertemailconfig +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of AlertEmailConfig Avi RESTful Object +description: + - This module is used to configure AlertEmailConfig object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cc_emails: + description: + - Alerts are copied to the comma separated list of email recipients. + description: + description: + - User defined description for the object. + name: + description: + - A user-friendly name of the email notification service. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + to_emails: + description: + - Alerts are sent to the comma separated list of email recipients. + required: true + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create AlertEmailConfig object + avi_alertemailconfig: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_alertemailconfig +""" + +RETURN = ''' +obj: + description: AlertEmailConfig (api/alertemailconfig) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cc_emails=dict(type='str',), + description=dict(type='str',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + to_emails=dict(type='str', required=True), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'alertemailconfig', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_alertscriptconfig.py b/plugins/modules/network/avi/avi_alertscriptconfig.py new file mode 100644 index 0000000000..f616be8d6b --- /dev/null +++ b/plugins/modules/network/avi/avi_alertscriptconfig.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_alertscriptconfig +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of AlertScriptConfig Avi RESTful Object +description: + - This module is used to configure AlertScriptConfig object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + action_script: + description: + - User defined alert action script. + - Please refer to kb.avinetworks.com for more information. + name: + description: + - A user-friendly name of the script. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create Alert Script to perform AWS server autoscaling + avi_alertscriptconfig: + username: '{{ username }}' + controller: '{{ controller }}' + password: '{{ password }}' + action_script: "echo Hello" + name: AWS-Launch-Script + tenant_ref: Demo +""" + +RETURN = ''' +obj: + description: AlertScriptConfig (api/alertscriptconfig) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + action_script=dict(type='str',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'alertscriptconfig', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_alertsyslogconfig.py b/plugins/modules/network/avi/avi_alertsyslogconfig.py new file mode 100644 index 0000000000..1a71f3d587 --- /dev/null +++ b/plugins/modules/network/avi/avi_alertsyslogconfig.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_alertsyslogconfig +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of AlertSyslogConfig Avi RESTful Object +description: + - This module is used to configure AlertSyslogConfig object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + description: + description: + - User defined description for alert syslog config. + name: + description: + - A user-friendly name of the syslog notification. + required: true + syslog_servers: + description: + - The list of syslog servers. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create Alert Syslog object to forward all events to external syslog server + avi_alertsyslogconfig: + controller: '{{ controller }}' + name: Roberts-syslog + password: '{{ password }}' + syslog_servers: + - syslog_server: 10.10.0.100 + syslog_server_port: 514 + udp: true + tenant_ref: admin + username: '{{ username }}' +""" + +RETURN = ''' +obj: + description: AlertSyslogConfig (api/alertsyslogconfig) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + description=dict(type='str',), + name=dict(type='str', required=True), + syslog_servers=dict(type='list',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'alertsyslogconfig', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_analyticsprofile.py b/plugins/modules/network/avi/avi_analyticsprofile.py new file mode 100644 index 0000000000..866ee9c306 --- /dev/null +++ b/plugins/modules/network/avi/avi_analyticsprofile.py @@ -0,0 +1,611 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_analyticsprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of AnalyticsProfile Avi RESTful Object +description: + - This module is used to configure AnalyticsProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + apdex_response_threshold: + description: + - If a client receives an http response in less than the satisfactory latency threshold, the request is considered satisfied. + - It is considered tolerated if it is not satisfied and less than tolerated latency factor multiplied by the satisfactory latency threshold. + - Greater than this number and the client's request is considered frustrated. + - Allowed values are 1-30000. + - Default value when not specified in API or module is interpreted by Avi Controller as 500. + apdex_response_tolerated_factor: + description: + - Client tolerated response latency factor. + - Client must receive a response within this factor times the satisfactory threshold (apdex_response_threshold) to be considered tolerated. + - Allowed values are 1-1000. + - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. + apdex_rtt_threshold: + description: + - Satisfactory client to avi round trip time(rtt). + - Allowed values are 1-2000. + - Default value when not specified in API or module is interpreted by Avi Controller as 250. + apdex_rtt_tolerated_factor: + description: + - Tolerated client to avi round trip time(rtt) factor. + - It is a multiple of apdex_rtt_tolerated_factor. + - Allowed values are 1-1000. + - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. + apdex_rum_threshold: + description: + - If a client is able to load a page in less than the satisfactory latency threshold, the pageload is considered satisfied. + - It is considered tolerated if it is greater than satisfied but less than the tolerated latency multiplied by satisfied latency. + - Greater than this number and the client's request is considered frustrated. + - A pageload includes the time for dns lookup, download of all http objects, and page render time. + - Allowed values are 1-30000. + - Default value when not specified in API or module is interpreted by Avi Controller as 5000. + apdex_rum_tolerated_factor: + description: + - Virtual service threshold factor for tolerated page load time (plt) as multiple of apdex_rum_threshold. + - Allowed values are 1-1000. + - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. + apdex_server_response_threshold: + description: + - A server http response is considered satisfied if latency is less than the satisfactory latency threshold. + - The response is considered tolerated when it is greater than satisfied but less than the tolerated latency factor * s_latency. + - Greater than this number and the server response is considered frustrated. + - Allowed values are 1-30000. + - Default value when not specified in API or module is interpreted by Avi Controller as 400. + apdex_server_response_tolerated_factor: + description: + - Server tolerated response latency factor. + - Servermust response within this factor times the satisfactory threshold (apdex_server_response_threshold) to be considered tolerated. + - Allowed values are 1-1000. + - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. + apdex_server_rtt_threshold: + description: + - Satisfactory client to avi round trip time(rtt). + - Allowed values are 1-2000. + - Default value when not specified in API or module is interpreted by Avi Controller as 125. + apdex_server_rtt_tolerated_factor: + description: + - Tolerated client to avi round trip time(rtt) factor. + - It is a multiple of apdex_rtt_tolerated_factor. + - Allowed values are 1-1000. + - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. + client_log_config: + description: + - Configure which logs are sent to the avi controller from ses and how they are processed. + client_log_streaming_config: + description: + - Configure to stream logs to an external server. + - Field introduced in 17.1.1. + conn_lossy_ooo_threshold: + description: + - A connection between client and avi is considered lossy when more than this percentage of out of order packets are received. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 50. + conn_lossy_timeo_rexmt_threshold: + description: + - A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted due to timeout. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + conn_lossy_total_rexmt_threshold: + description: + - A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 50. + conn_lossy_zero_win_size_event_threshold: + description: + - A client connection is considered lossy when percentage of times a packet could not be transmitted due to tcp zero window is above this threshold. + - Allowed values are 0-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 2. + conn_server_lossy_ooo_threshold: + description: + - A connection between avi and server is considered lossy when more than this percentage of out of order packets are received. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 50. + conn_server_lossy_timeo_rexmt_threshold: + description: + - A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted due to timeout. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + conn_server_lossy_total_rexmt_threshold: + description: + - A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 50. + conn_server_lossy_zero_win_size_event_threshold: + description: + - A server connection is considered lossy when percentage of times a packet could not be transmitted due to tcp zero window is above this threshold. + - Allowed values are 0-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 2. + description: + description: + - User defined description for the object. + disable_ondemand_metrics: + description: + - Virtual service (vs) metrics are processed only when there is live data traffic on the vs. + - In case, vs is idle for a period of time as specified by ondemand_metrics_idle_timeout then metrics processing is suspended for that vs. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + disable_se_analytics: + description: + - Disable node (service engine) level analytics forvs metrics. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + disable_server_analytics: + description: + - Disable analytics on backend servers. + - This may be desired in container environment when there are large number of ephemeral servers. + - Additionally, no healthscore of servers is computed when server analytics is disabled. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + disable_vs_analytics: + description: + - Disable virtualservice (frontend) analytics. + - This flag disables metrics and healthscore for virtualservice. + - Field introduced in 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + enable_advanced_analytics: + description: + - Enables advanced analytics features like anomaly detection. + - If set to false, anomaly computation (and associated rules/events) for vs, pool and server metrics will be disabled. + - However, setting it to false reduces cpu and memory requirements for analytics subsystem. + - Field introduced in 17.2.13, 18.1.5, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + exclude_client_close_before_request_as_error: + description: + - Exclude client closed connection before an http request could be completed from being classified as an error. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_dns_policy_drop_as_significant: + description: + - Exclude dns policy drops from the list of errors. + - Field introduced in 17.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_gs_down_as_error: + description: + - Exclude queries to gslb services that are operationally down from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_http_error_codes: + description: + - List of http status codes to be excluded from being classified as an error. + - Error connections or responses impacts health score, are included as significant logs, and may be classified as part of a dos attack. + exclude_invalid_dns_domain_as_error: + description: + - Exclude dns queries to domains outside the domains configured in the dns application profile from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_invalid_dns_query_as_error: + description: + - Exclude invalid dns queries from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_no_dns_record_as_error: + description: + - Exclude queries to domains that did not have configured services/records from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_no_valid_gs_member_as_error: + description: + - Exclude queries to gslb services that have no available members from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_persistence_change_as_error: + description: + - Exclude persistence server changed while load balancing' from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_server_dns_error_as_error: + description: + - Exclude server dns error response from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_server_tcp_reset_as_error: + description: + - Exclude server tcp reset from errors. + - It is common for applications like ms exchange. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_sip_error_codes: + description: + - List of sip status codes to be excluded from being classified as an error. + - Field introduced in 17.2.13, 18.1.5, 18.2.1. + exclude_syn_retransmit_as_error: + description: + - Exclude 'server unanswered syns' from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_tcp_reset_as_error: + description: + - Exclude tcp resets by client from the list of potential errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + exclude_unsupported_dns_query_as_error: + description: + - Exclude unsupported dns queries from the list of errors. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + healthscore_max_server_limit: + description: + - Skips health score computation of pool servers when number of servers in a pool is more than this setting. + - Allowed values are 0-5000. + - Special values are 0- 'server health score is disabled'. + - Field introduced in 17.2.13, 18.1.4. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + hs_event_throttle_window: + description: + - Time window (in secs) within which only unique health change events should occur. + - Default value when not specified in API or module is interpreted by Avi Controller as 1209600. + hs_max_anomaly_penalty: + description: + - Maximum penalty that may be deducted from health score for anomalies. + - Allowed values are 0-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + hs_max_resources_penalty: + description: + - Maximum penalty that may be deducted from health score for high resource utilization. + - Allowed values are 0-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 25. + hs_max_security_penalty: + description: + - Maximum penalty that may be deducted from health score based on security assessment. + - Allowed values are 0-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 100. + hs_min_dos_rate: + description: + - Dos connection rate below which the dos security assessment will not kick in. + - Default value when not specified in API or module is interpreted by Avi Controller as 1000. + hs_performance_boost: + description: + - Adds free performance score credits to health score. + - It can be used for compensating health score for known slow applications. + - Allowed values are 0-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + hs_pscore_traffic_threshold_l4_client: + description: + - Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed. + - Default value when not specified in API or module is interpreted by Avi Controller as 10.0. + hs_pscore_traffic_threshold_l4_server: + description: + - Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed. + - Default value when not specified in API or module is interpreted by Avi Controller as 10.0. + hs_security_certscore_expired: + description: + - Score assigned when the certificate has expired. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 0.0. + hs_security_certscore_gt30d: + description: + - Score assigned when the certificate expires in more than 30 days. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 5.0. + hs_security_certscore_le07d: + description: + - Score assigned when the certificate expires in less than or equal to 7 days. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 2.0. + hs_security_certscore_le30d: + description: + - Score assigned when the certificate expires in less than or equal to 30 days. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. + hs_security_chain_invalidity_penalty: + description: + - Penalty for allowing certificates with invalid chain. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 1.0. + hs_security_cipherscore_eq000b: + description: + - Score assigned when the minimum cipher strength is 0 bits. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 0.0. + hs_security_cipherscore_ge128b: + description: + - Score assigned when the minimum cipher strength is greater than equal to 128 bits. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 5.0. + hs_security_cipherscore_lt128b: + description: + - Score assigned when the minimum cipher strength is less than 128 bits. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 3.5. + hs_security_encalgo_score_none: + description: + - Score assigned when no algorithm is used for encryption. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 0.0. + hs_security_encalgo_score_rc4: + description: + - Score assigned when rc4 algorithm is used for encryption. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 2.5. + hs_security_hsts_penalty: + description: + - Penalty for not enabling hsts. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 1.0. + hs_security_nonpfs_penalty: + description: + - Penalty for allowing non-pfs handshakes. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 1.0. + hs_security_selfsignedcert_penalty: + description: + - Deprecated. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 1.0. + hs_security_ssl30_score: + description: + - Score assigned when supporting ssl3.0 encryption protocol. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 3.5. + hs_security_tls10_score: + description: + - Score assigned when supporting tls1.0 encryption protocol. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 5.0. + hs_security_tls11_score: + description: + - Score assigned when supporting tls1.1 encryption protocol. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 5.0. + hs_security_tls12_score: + description: + - Score assigned when supporting tls1.2 encryption protocol. + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 5.0. + hs_security_weak_signature_algo_penalty: + description: + - Penalty for allowing weak signature algorithm(s). + - Allowed values are 0-5. + - Default value when not specified in API or module is interpreted by Avi Controller as 1.0. + name: + description: + - The name of the analytics profile. + required: true + ondemand_metrics_idle_timeout: + description: + - This flag sets the time duration of no live data traffic after which virtual service metrics processing is suspended. + - It is applicable only when disable_ondemand_metrics is set to false. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 1800. + ranges: + description: + - List of http status code ranges to be excluded from being classified as an error. + resp_code_block: + description: + - Block of http response codes to be excluded from being classified as an error. + - Enum options - AP_HTTP_RSP_4XX, AP_HTTP_RSP_5XX. + sensitive_log_profile: + description: + - Rules applied to the http application log for filtering sensitive information. + - Field introduced in 17.2.10, 18.1.2. + sip_log_depth: + description: + - Maximum number of sip messages added in logs for a sip transaction. + - By default, this value is 20. + - Allowed values are 1-1000. + - Field introduced in 17.2.13, 18.1.5, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the analytics profile. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create a custom Analytics profile object + avi_analyticsprofile: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + apdex_response_threshold: 500 + apdex_response_tolerated_factor: 4.0 + apdex_rtt_threshold: 250 + apdex_rtt_tolerated_factor: 4.0 + apdex_rum_threshold: 5000 + apdex_rum_tolerated_factor: 4.0 + apdex_server_response_threshold: 400 + apdex_server_response_tolerated_factor: 4.0 + apdex_server_rtt_threshold: 125 + apdex_server_rtt_tolerated_factor: 4.0 + conn_lossy_ooo_threshold: 50 + conn_lossy_timeo_rexmt_threshold: 20 + conn_lossy_total_rexmt_threshold: 50 + conn_lossy_zero_win_size_event_threshold: 2 + conn_server_lossy_ooo_threshold: 50 + conn_server_lossy_timeo_rexmt_threshold: 20 + conn_server_lossy_total_rexmt_threshold: 50 + conn_server_lossy_zero_win_size_event_threshold: 2 + disable_se_analytics: false + disable_server_analytics: false + exclude_client_close_before_request_as_error: false + exclude_persistence_change_as_error: false + exclude_server_tcp_reset_as_error: false + exclude_syn_retransmit_as_error: false + exclude_tcp_reset_as_error: false + hs_event_throttle_window: 1209600 + hs_max_anomaly_penalty: 10 + hs_max_resources_penalty: 25 + hs_max_security_penalty: 100 + hs_min_dos_rate: 1000 + hs_performance_boost: 20 + hs_pscore_traffic_threshold_l4_client: 10.0 + hs_pscore_traffic_threshold_l4_server: 10.0 + hs_security_certscore_expired: 0.0 + hs_security_certscore_gt30d: 5.0 + hs_security_certscore_le07d: 2.0 + hs_security_certscore_le30d: 4.0 + hs_security_chain_invalidity_penalty: 1.0 + hs_security_cipherscore_eq000b: 0.0 + hs_security_cipherscore_ge128b: 5.0 + hs_security_cipherscore_lt128b: 3.5 + hs_security_encalgo_score_none: 0.0 + hs_security_encalgo_score_rc4: 2.5 + hs_security_hsts_penalty: 0.0 + hs_security_nonpfs_penalty: 1.0 + hs_security_selfsignedcert_penalty: 1.0 + hs_security_ssl30_score: 3.5 + hs_security_tls10_score: 5.0 + hs_security_tls11_score: 5.0 + hs_security_tls12_score: 5.0 + hs_security_weak_signature_algo_penalty: 1.0 + name: jason-analytics-profile + tenant_ref: Demo +""" + +RETURN = ''' +obj: + description: AnalyticsProfile (api/analyticsprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + apdex_response_threshold=dict(type='int',), + apdex_response_tolerated_factor=dict(type='float',), + apdex_rtt_threshold=dict(type='int',), + apdex_rtt_tolerated_factor=dict(type='float',), + apdex_rum_threshold=dict(type='int',), + apdex_rum_tolerated_factor=dict(type='float',), + apdex_server_response_threshold=dict(type='int',), + apdex_server_response_tolerated_factor=dict(type='float',), + apdex_server_rtt_threshold=dict(type='int',), + apdex_server_rtt_tolerated_factor=dict(type='float',), + client_log_config=dict(type='dict',), + client_log_streaming_config=dict(type='dict',), + conn_lossy_ooo_threshold=dict(type='int',), + conn_lossy_timeo_rexmt_threshold=dict(type='int',), + conn_lossy_total_rexmt_threshold=dict(type='int',), + conn_lossy_zero_win_size_event_threshold=dict(type='int',), + conn_server_lossy_ooo_threshold=dict(type='int',), + conn_server_lossy_timeo_rexmt_threshold=dict(type='int',), + conn_server_lossy_total_rexmt_threshold=dict(type='int',), + conn_server_lossy_zero_win_size_event_threshold=dict(type='int',), + description=dict(type='str',), + disable_ondemand_metrics=dict(type='bool',), + disable_se_analytics=dict(type='bool',), + disable_server_analytics=dict(type='bool',), + disable_vs_analytics=dict(type='bool',), + enable_advanced_analytics=dict(type='bool',), + exclude_client_close_before_request_as_error=dict(type='bool',), + exclude_dns_policy_drop_as_significant=dict(type='bool',), + exclude_gs_down_as_error=dict(type='bool',), + exclude_http_error_codes=dict(type='list',), + exclude_invalid_dns_domain_as_error=dict(type='bool',), + exclude_invalid_dns_query_as_error=dict(type='bool',), + exclude_no_dns_record_as_error=dict(type='bool',), + exclude_no_valid_gs_member_as_error=dict(type='bool',), + exclude_persistence_change_as_error=dict(type='bool',), + exclude_server_dns_error_as_error=dict(type='bool',), + exclude_server_tcp_reset_as_error=dict(type='bool',), + exclude_sip_error_codes=dict(type='list',), + exclude_syn_retransmit_as_error=dict(type='bool',), + exclude_tcp_reset_as_error=dict(type='bool',), + exclude_unsupported_dns_query_as_error=dict(type='bool',), + healthscore_max_server_limit=dict(type='int',), + hs_event_throttle_window=dict(type='int',), + hs_max_anomaly_penalty=dict(type='int',), + hs_max_resources_penalty=dict(type='int',), + hs_max_security_penalty=dict(type='int',), + hs_min_dos_rate=dict(type='int',), + hs_performance_boost=dict(type='int',), + hs_pscore_traffic_threshold_l4_client=dict(type='float',), + hs_pscore_traffic_threshold_l4_server=dict(type='float',), + hs_security_certscore_expired=dict(type='float',), + hs_security_certscore_gt30d=dict(type='float',), + hs_security_certscore_le07d=dict(type='float',), + hs_security_certscore_le30d=dict(type='float',), + hs_security_chain_invalidity_penalty=dict(type='float',), + hs_security_cipherscore_eq000b=dict(type='float',), + hs_security_cipherscore_ge128b=dict(type='float',), + hs_security_cipherscore_lt128b=dict(type='float',), + hs_security_encalgo_score_none=dict(type='float',), + hs_security_encalgo_score_rc4=dict(type='float',), + hs_security_hsts_penalty=dict(type='float',), + hs_security_nonpfs_penalty=dict(type='float',), + hs_security_selfsignedcert_penalty=dict(type='float',), + hs_security_ssl30_score=dict(type='float',), + hs_security_tls10_score=dict(type='float',), + hs_security_tls11_score=dict(type='float',), + hs_security_tls12_score=dict(type='float',), + hs_security_weak_signature_algo_penalty=dict(type='float',), + name=dict(type='str', required=True), + ondemand_metrics_idle_timeout=dict(type='int',), + ranges=dict(type='list',), + resp_code_block=dict(type='list',), + sensitive_log_profile=dict(type='dict',), + sip_log_depth=dict(type='int',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'analyticsprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_api_session.py b/plugins/modules/network/avi/avi_api_session.py new file mode 100644 index 0000000000..b46bb06fd4 --- /dev/null +++ b/plugins/modules/network/avi/avi_api_session.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +""" +# Created on Aug 12, 2016 +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) GitHub ID: grastogi23 +# +# module_check: not supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +""" + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: avi_api_session +author: Gaurav Rastogi (@grastogi23) + +short_description: Avi API Module +description: + - This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/) + - This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them. +requirements: [ avisdk ] +options: + http_method: + description: + - Allowed HTTP methods for RESTful services and are supported by Avi Controller. + choices: ["get", "put", "post", "patch", "delete"] + required: true + data: + description: + - HTTP body in YAML or JSON format. + params: + description: + - Query parameters passed to the HTTP API. + path: + description: + - 'Path for Avi API resource. For example, C(path: virtualservice) will translate to C(api/virtualserivce).' + timeout: + description: + - Timeout (in seconds) for Avi API calls. + default: 60 +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = ''' + + - name: Get Pool Information using avi_api_session + avi_api_session: + controller: "{{ controller }}" + username: "{{ username }}" + password: "{{ password }}" + http_method: get + path: pool + params: + name: "{{ pool_name }}" + api_version: 16.4 + register: pool_results + + - name: Patch Pool with list of servers + avi_api_session: + controller: "{{ controller }}" + username: "{{ username }}" + password: "{{ password }}" + http_method: patch + path: "{{ pool_path }}" + api_version: 16.4 + data: + add: + servers: + - ip: + addr: 10.10.10.10 + type: V4 + - ip: + addr: 20.20.20.20 + type: V4 + register: updated_pool + + - name: Fetch Pool metrics bandwidth and connections rate + avi_api_session: + controller: "{{ controller }}" + username: "{{ username }}" + password: "{{ password }}" + http_method: get + path: analytics/metrics/pool + api_version: 16.4 + params: + name: "{{ pool_name }}" + metric_id: l4_server.avg_bandwidth,l4_server.avg_complete_conns + step: 300 + limit: 10 + register: pool_metrics + +''' + + +RETURN = ''' +obj: + description: Avi REST resource + returned: success, changed + type: dict +''' + + +import json +import time +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy + +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, ansible_return, avi_obj_cmp, + cleanup_absent_fields, HAS_AVI) + from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import ( + ApiSession, AviCredentials) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + http_method=dict(required=True, + choices=['get', 'put', 'post', 'patch', + 'delete']), + path=dict(type='str', required=True), + params=dict(type='dict'), + data=dict(type='jsonarg'), + timeout=dict(type='int', default=60) + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule(argument_spec=argument_specs) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + api_creds = AviCredentials() + api_creds.update_from_ansible_module(module) + api = ApiSession.get_session( + api_creds.controller, api_creds.username, password=api_creds.password, + timeout=api_creds.timeout, tenant=api_creds.tenant, + tenant_uuid=api_creds.tenant_uuid, token=api_creds.token, + port=api_creds.port) + + tenant_uuid = api_creds.tenant_uuid + tenant = api_creds.tenant + timeout = int(module.params.get('timeout')) + # path is a required argument + path = module.params.get('path', '') + params = module.params.get('params', None) + data = module.params.get('data', None) + # Get the api_version from module. + api_version = api_creds.api_version + if data is not None: + data = json.loads(data) + method = module.params['http_method'] + + existing_obj = None + changed = method != 'get' + gparams = deepcopy(params) if params else {} + gparams.update({'include_refs': '', 'include_name': ''}) + + # API methods not allowed + api_get_not_allowed = ["cluster", "gslbsiteops"] + api_post_not_allowed = ["alert", "fileservice"] + api_put_not_allowed = ["backup"] + + if method == 'post' and not any(path.startswith(uri) for uri in api_post_not_allowed): + # TODO: Above condition should be updated after AV-38981 is fixed + # need to check if object already exists. In that case + # change the method to be put + try: + using_collection = False + if not any(path.startswith(uri) for uri in api_get_not_allowed): + if 'name' in data: + gparams['name'] = data['name'] + using_collection = True + if not any(path.startswith(uri) for uri in api_get_not_allowed): + rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid, + params=gparams, api_version=api_version) + existing_obj = rsp.json() + if using_collection: + existing_obj = existing_obj['results'][0] + except (IndexError, KeyError): + # object is not found + pass + else: + if not any(path.startswith(uri) for uri in api_get_not_allowed): + # object is present + method = 'put' + path += '/' + existing_obj['uuid'] + + if method == 'put' and not any(path.startswith(uri) for uri in api_put_not_allowed): + # put can happen with when full path is specified or it is put + post + if existing_obj is None: + using_collection = False + if ((len(path.split('/')) == 1) and ('name' in data) and + (not any(path.startswith(uri) for uri in api_get_not_allowed))): + gparams['name'] = data['name'] + using_collection = True + rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid, + params=gparams, api_version=api_version) + rsp_data = rsp.json() + if using_collection: + if rsp_data['results']: + existing_obj = rsp_data['results'][0] + path += '/' + existing_obj['uuid'] + else: + method = 'post' + else: + if rsp.status_code == 404: + method = 'post' + else: + existing_obj = rsp_data + if existing_obj: + changed = not avi_obj_cmp(data, existing_obj) + cleanup_absent_fields(data) + if method == 'patch': + rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid, + params=gparams, api_version=api_version) + existing_obj = rsp.json() + + if (method == 'put' and changed) or (method != 'put'): + fn = getattr(api, method) + rsp = fn(path, tenant=tenant, tenant_uuid=tenant, timeout=timeout, + params=params, data=data, api_version=api_version) + else: + rsp = None + if method == 'delete' and rsp.status_code == 404: + changed = False + rsp.status_code = 200 + if method == 'patch' and existing_obj and rsp.status_code < 299: + # Ideally the comparison should happen with the return values + # from the patch API call. However, currently Avi API are + # returning different hostname when GET is used vs Patch. + # tracked as AV-12561 + if path.startswith('pool'): + time.sleep(1) + gparams = deepcopy(params) if params else {} + gparams.update({'include_refs': '', 'include_name': ''}) + rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid, + params=gparams, api_version=api_version) + new_obj = rsp.json() + changed = not avi_obj_cmp(new_obj, existing_obj) + if rsp is None: + return module.exit_json(changed=changed, obj=existing_obj) + return ansible_return(module, rsp, changed, req=data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_api_version.py b/plugins/modules/network/avi/avi_api_version.py new file mode 100644 index 0000000000..b509483cc2 --- /dev/null +++ b/plugins/modules/network/avi/avi_api_version.py @@ -0,0 +1,94 @@ +#!/usr/bin/python +""" +# Created on July 24, 2017 +# +# @author: Vilian Atmadzhov (vilian.atmadzhov@paddypowerbetfair.com) GitHub ID: vivobg +# +# module_check: not supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# Vilian Atmadzhov, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +""" + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: avi_api_version +author: Vilian Atmadzhov (@vivobg) + +short_description: Avi API Version Module +description: + - This module can be used to obtain the version of the Avi REST API. U(https://avinetworks.com/) +requirements: [ avisdk ] +options: {} +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = ''' + - name: Get AVI API version + avi_api_version: + controller: "" + username: "" + password: "" + tenant: "" + register: avi_controller_version +''' + + +RETURN = ''' +obj: + description: Avi REST resource + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule + +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, ansible_return, HAS_AVI) + from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import ( + ApiSession, AviCredentials) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict() + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule(argument_spec=argument_specs) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + try: + api_creds = AviCredentials() + api_creds.update_from_ansible_module(module) + api = ApiSession.get_session( + api_creds.controller, api_creds.username, + password=api_creds.password, + timeout=api_creds.timeout, tenant=api_creds.tenant, + tenant_uuid=api_creds.tenant_uuid, token=api_creds.token, + port=api_creds.port) + + remote_api_version = api.remote_api_version + remote = {} + for key in remote_api_version.keys(): + remote[key.lower()] = remote_api_version[key] + api.close() + module.exit_json(changed=False, obj=remote) + except Exception as e: + module.fail_json(msg=("Unable to get an AVI session. %s" % e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_applicationpersistenceprofile.py b/plugins/modules/network/avi/avi_applicationpersistenceprofile.py new file mode 100644 index 0000000000..24072d0f4a --- /dev/null +++ b/plugins/modules/network/avi/avi_applicationpersistenceprofile.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_applicationpersistenceprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ApplicationPersistenceProfile Avi RESTful Object +description: + - This module is used to configure ApplicationPersistenceProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + app_cookie_persistence_profile: + description: + - Specifies the application cookie persistence profile parameters. + description: + description: + - User defined description for the object. + hdr_persistence_profile: + description: + - Specifies the custom http header persistence profile parameters. + http_cookie_persistence_profile: + description: + - Specifies the http cookie persistence profile parameters. + ip_persistence_profile: + description: + - Specifies the client ip persistence profile parameters. + is_federated: + description: + - This field describes the object's replication scope. + - If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines. + - If the field is set to true, then the object is replicated across the federation. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + name: + description: + - A user-friendly name for the persistence profile. + required: true + persistence_type: + description: + - Method used to persist clients to the same server for a duration of time or a session. + - Enum options - PERSISTENCE_TYPE_CLIENT_IP_ADDRESS, PERSISTENCE_TYPE_HTTP_COOKIE, PERSISTENCE_TYPE_TLS, PERSISTENCE_TYPE_CLIENT_IPV6_ADDRESS, + - PERSISTENCE_TYPE_CUSTOM_HTTP_HEADER, PERSISTENCE_TYPE_APP_COOKIE, PERSISTENCE_TYPE_GSLB_SITE. + - Default value when not specified in API or module is interpreted by Avi Controller as PERSISTENCE_TYPE_CLIENT_IP_ADDRESS. + required: true + server_hm_down_recovery: + description: + - Specifies behavior when a persistent server has been marked down by a health monitor. + - Enum options - HM_DOWN_PICK_NEW_SERVER, HM_DOWN_ABORT_CONNECTION, HM_DOWN_CONTINUE_PERSISTENT_SERVER. + - Default value when not specified in API or module is interpreted by Avi Controller as HM_DOWN_PICK_NEW_SERVER. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the persistence profile. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create an Application Persistence setting using http cookie. + avi_applicationpersistenceprofile: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + http_cookie_persistence_profile: + always_send_cookie: false + cookie_name: My-HTTP + key: + - aes_key: ShYGZdMks8j6Bpvm2sCvaXWzvXms2Z9ob+TTjRy46lQ= + name: c1276819-550c-4adf-912d-59efa5fd7269 + - aes_key: OGsyVk84VCtyMENFOW0rMnRXVnNrb0RzdG5mT29oamJRb0dlbHZVSjR1az0= + name: a080de57-77c3-4580-a3ea-e7a6493c14fd + - aes_key: UVN0cU9HWmFUM2xOUzBVcmVXaHFXbnBLVUUxMU1VSktSVU5HWjJOWmVFMTBUMUV4UmxsNk4xQmFZejA9 + name: 60478846-33c6-484d-868d-bbc324fce4a5 + timeout: 15 + name: My-HTTP-Cookie + persistence_type: PERSISTENCE_TYPE_HTTP_COOKIE + server_hm_down_recovery: HM_DOWN_PICK_NEW_SERVER + tenant_ref: Demo +""" + +RETURN = ''' +obj: + description: ApplicationPersistenceProfile (api/applicationpersistenceprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + app_cookie_persistence_profile=dict(type='dict',), + description=dict(type='str',), + hdr_persistence_profile=dict(type='dict',), + http_cookie_persistence_profile=dict(type='dict',), + ip_persistence_profile=dict(type='dict',), + is_federated=dict(type='bool',), + name=dict(type='str', required=True), + persistence_type=dict(type='str', required=True), + server_hm_down_recovery=dict(type='str',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'applicationpersistenceprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_applicationprofile.py b/plugins/modules/network/avi/avi_applicationprofile.py new file mode 100644 index 0000000000..10f8b0038f --- /dev/null +++ b/plugins/modules/network/avi/avi_applicationprofile.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_applicationprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ApplicationProfile Avi RESTful Object +description: + - This module is used to configure ApplicationProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cloud_config_cksum: + description: + - Checksum of application profiles. + - Internally set by cloud connector. + - Field introduced in 17.2.14, 18.1.5, 18.2.1. + created_by: + description: + - Name of the application profile creator. + - Field introduced in 17.2.14, 18.1.5, 18.2.1. + description: + description: + - User defined description for the object. + dns_service_profile: + description: + - Specifies various dns service related controls for virtual service. + dos_rl_profile: + description: + - Specifies various security related controls for virtual service. + http_profile: + description: + - Specifies the http application proxy profile parameters. + name: + description: + - The name of the application profile. + required: true + preserve_client_ip: + description: + - Specifies if client ip needs to be preserved for backend connection. + - Not compatible with connection multiplexing. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + preserve_client_port: + description: + - Specifies if we need to preserve client port while preserving client ip for backend connections. + - Field introduced in 17.2.7. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + sip_service_profile: + description: + - Specifies various sip service related controls for virtual service. + - Field introduced in 17.2.8, 18.1.3, 18.2.1. + tcp_app_profile: + description: + - Specifies the tcp application proxy profile parameters. + tenant_ref: + description: + - It is a reference to an object of type tenant. + type: + description: + - Specifies which application layer proxy is enabled for the virtual service. + - Enum options - APPLICATION_PROFILE_TYPE_L4, APPLICATION_PROFILE_TYPE_HTTP, APPLICATION_PROFILE_TYPE_SYSLOG, APPLICATION_PROFILE_TYPE_DNS, + - APPLICATION_PROFILE_TYPE_SSL, APPLICATION_PROFILE_TYPE_SIP. + required: true + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the application profile. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create an Application Profile for HTTP application enabled for SSL traffic + avi_applicationprofile: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + http_profile: + cache_config: + age_header: true + aggressive: false + date_header: true + default_expire: 600 + enabled: false + heuristic_expire: false + max_cache_size: 0 + max_object_size: 4194304 + mime_types_group_refs: + - admin:System-Cacheable-Resource-Types + min_object_size: 100 + query_cacheable: false + xcache_header: true + client_body_timeout: 0 + client_header_timeout: 10000 + client_max_body_size: 0 + client_max_header_size: 12 + client_max_request_size: 48 + compression_profile: + compressible_content_ref: admin:System-Compressible-Content-Types + compression: false + remove_accept_encoding_header: true + type: AUTO_COMPRESSION + connection_multiplexing_enabled: true + hsts_enabled: false + hsts_max_age: 365 + http_to_https: false + httponly_enabled: false + keepalive_header: false + keepalive_timeout: 30000 + max_bad_rps_cip: 0 + max_bad_rps_cip_uri: 0 + max_bad_rps_uri: 0 + max_rps_cip: 0 + max_rps_cip_uri: 0 + max_rps_unknown_cip: 0 + max_rps_unknown_uri: 0 + max_rps_uri: 0 + post_accept_timeout: 30000 + secure_cookie_enabled: false + server_side_redirect_to_https: false + spdy_enabled: false + spdy_fwd_proxy_mode: false + ssl_client_certificate_mode: SSL_CLIENT_CERTIFICATE_NONE + ssl_everywhere_enabled: false + websockets_enabled: true + x_forwarded_proto_enabled: false + xff_alternate_name: X-Forwarded-For + xff_enabled: true + name: System-HTTP + tenant_ref: admin + type: APPLICATION_PROFILE_TYPE_HTTP +""" + +RETURN = ''' +obj: + description: ApplicationProfile (api/applicationprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cloud_config_cksum=dict(type='str',), + created_by=dict(type='str',), + description=dict(type='str',), + dns_service_profile=dict(type='dict',), + dos_rl_profile=dict(type='dict',), + http_profile=dict(type='dict',), + name=dict(type='str', required=True), + preserve_client_ip=dict(type='bool',), + preserve_client_port=dict(type='bool',), + sip_service_profile=dict(type='dict',), + tcp_app_profile=dict(type='dict',), + tenant_ref=dict(type='str',), + type=dict(type='str', required=True), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'applicationprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_authprofile.py b/plugins/modules/network/avi/avi_authprofile.py new file mode 100644 index 0000000000..dd5b2f3a0d --- /dev/null +++ b/plugins/modules/network/avi/avi_authprofile.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_authprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of AuthProfile Avi RESTful Object +description: + - This module is used to configure AuthProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + description: + description: + - User defined description for the object. + http: + description: + - Http user authentication params. + ldap: + description: + - Ldap server and directory settings. + name: + description: + - Name of the auth profile. + required: true + pa_agent_ref: + description: + - Pingaccessagent uuid. + - It is a reference to an object of type pingaccessagent. + - Field introduced in 18.2.3. + saml: + description: + - Saml settings. + - Field introduced in 17.2.3. + tacacs_plus: + description: + - Tacacs+ settings. + tenant_ref: + description: + - It is a reference to an object of type tenant. + type: + description: + - Type of the auth profile. + - Enum options - AUTH_PROFILE_LDAP, AUTH_PROFILE_TACACS_PLUS, AUTH_PROFILE_SAML, AUTH_PROFILE_PINGACCESS. + required: true + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the auth profile. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create user authorization profile based on the LDAP + avi_authprofile: + controller: '{{ controller }}' + password: '{{ password }}' + username: '{{ username }}' + http: + cache_expiration_time: 5 + group_member_is_full_dn: false + ldap: + base_dn: dc=avi,dc=local + bind_as_administrator: true + port: 389 + security_mode: AUTH_LDAP_SECURE_NONE + server: + - 10.10.0.100 + settings: + admin_bind_dn: user@avi.local + group_filter: (objectClass=*) + group_member_attribute: member + group_member_is_full_dn: true + group_search_dn: dc=avi,dc=local + group_search_scope: AUTH_LDAP_SCOPE_SUBTREE + ignore_referrals: true + password: password + user_id_attribute: samAccountname + user_search_dn: dc=avi,dc=local + user_search_scope: AUTH_LDAP_SCOPE_ONE + name: ProdAuth + tenant_ref: admin + type: AUTH_PROFILE_LDAP +""" + +RETURN = ''' +obj: + description: AuthProfile (api/authprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + description=dict(type='str',), + http=dict(type='dict',), + ldap=dict(type='dict',), + name=dict(type='str', required=True), + pa_agent_ref=dict(type='str',), + saml=dict(type='dict',), + tacacs_plus=dict(type='dict',), + tenant_ref=dict(type='str',), + type=dict(type='str', required=True), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'authprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_autoscalelaunchconfig.py b/plugins/modules/network/avi/avi_autoscalelaunchconfig.py new file mode 100644 index 0000000000..a1655f617d --- /dev/null +++ b/plugins/modules/network/avi/avi_autoscalelaunchconfig.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_autoscalelaunchconfig +author: Chaitanya Deshpande (@chaitanyaavi) + +short_description: Module for setup of AutoScaleLaunchConfig Avi RESTful Object +description: + - This module is used to configure AutoScaleLaunchConfig object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + description: + description: + - User defined description for the object. + image_id: + description: + - Unique id of the amazon machine image (ami) or openstack vm id. + mesos: + description: + - Autoscalemesossettings settings for autoscalelaunchconfig. + name: + description: + - Name of the object. + required: true + openstack: + description: + - Autoscaleopenstacksettings settings for autoscalelaunchconfig. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + use_external_asg: + description: + - If set to true, serverautoscalepolicy will use the autoscaling group (external_autoscaling_groups) from pool to perform scale up and scale down. + - Pool should have single autoscaling group configured. + - Field introduced in 17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create an Autoscale Launch configuration. + avi_autoscalelaunchconfig: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + image_id: default + name: default-autoscalelaunchconfig + tenant_ref: admin +""" + +RETURN = ''' +obj: + description: AutoScaleLaunchConfig (api/autoscalelaunchconfig) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + description=dict(type='str',), + image_id=dict(type='str',), + mesos=dict(type='dict',), + name=dict(type='str', required=True), + openstack=dict(type='dict',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + use_external_asg=dict(type='bool',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'autoscalelaunchconfig', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_backup.py b/plugins/modules/network/avi/avi_backup.py new file mode 100644 index 0000000000..8935563b3a --- /dev/null +++ b/plugins/modules/network/avi/avi_backup.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_backup +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Backup Avi RESTful Object +description: + - This module is used to configure Backup object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + backup_config_ref: + description: + - Backupconfiguration information. + - It is a reference to an object of type backupconfiguration. + file_name: + description: + - The file name of backup. + required: true + local_file_url: + description: + - Url to download the backup file. + remote_file_url: + description: + - Url to download the backup file. + scheduler_ref: + description: + - Scheduler information. + - It is a reference to an object of type scheduler. + tenant_ref: + description: + - It is a reference to an object of type tenant. + timestamp: + description: + - Unix timestamp of when the backup file is created. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create Backup object + avi_backup: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_backup +""" + +RETURN = ''' +obj: + description: Backup (api/backup) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + backup_config_ref=dict(type='str',), + file_name=dict(type='str', required=True), + local_file_url=dict(type='str',), + remote_file_url=dict(type='str',), + scheduler_ref=dict(type='str',), + tenant_ref=dict(type='str',), + timestamp=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'backup', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_backupconfiguration.py b/plugins/modules/network/avi/avi_backupconfiguration.py new file mode 100644 index 0000000000..17f7a755fb --- /dev/null +++ b/plugins/modules/network/avi/avi_backupconfiguration.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_backupconfiguration +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of BackupConfiguration Avi RESTful Object +description: + - This module is used to configure BackupConfiguration object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + aws_access_key: + description: + - Aws access key id. + - Field introduced in 18.2.3. + aws_bucket_id: + description: + - Aws bucket. + - Field introduced in 18.2.3. + aws_secret_access: + description: + - Aws secret access key. + - Field introduced in 18.2.3. + backup_file_prefix: + description: + - Prefix of the exported configuration file. + - Field introduced in 17.1.1. + backup_passphrase: + description: + - Passphrase of backup configuration. + maximum_backups_stored: + description: + - Rotate the backup files based on this count. + - Allowed values are 1-20. + - Default value when not specified in API or module is interpreted by Avi Controller as 4. + name: + description: + - Name of backup configuration. + required: true + remote_directory: + description: + - Directory at remote destination with write permission for ssh user. + remote_hostname: + description: + - Remote destination. + save_local: + description: + - Local backup. + type: bool + ssh_user_ref: + description: + - Access credentials for remote destination. + - It is a reference to an object of type cloudconnectoruser. + tenant_ref: + description: + - It is a reference to an object of type tenant. + upload_to_remote_host: + description: + - Remote backup. + type: bool + upload_to_s3: + description: + - Cloud backup. + - Field introduced in 18.2.3. + type: bool + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create BackupConfiguration object + avi_backupconfiguration: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_backupconfiguration +""" + +RETURN = ''' +obj: + description: BackupConfiguration (api/backupconfiguration) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + aws_access_key=dict(type='str', no_log=True,), + aws_bucket_id=dict(type='str',), + aws_secret_access=dict(type='str', no_log=True,), + backup_file_prefix=dict(type='str',), + backup_passphrase=dict(type='str', no_log=True,), + maximum_backups_stored=dict(type='int',), + name=dict(type='str', required=True), + remote_directory=dict(type='str',), + remote_hostname=dict(type='str',), + save_local=dict(type='bool',), + ssh_user_ref=dict(type='str',), + tenant_ref=dict(type='str',), + upload_to_remote_host=dict(type='bool',), + upload_to_s3=dict(type='bool',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'backupconfiguration', + set(['backup_passphrase', 'aws_access_key', 'aws_secret_access'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_certificatemanagementprofile.py b/plugins/modules/network/avi/avi_certificatemanagementprofile.py new file mode 100644 index 0000000000..b704c410cf --- /dev/null +++ b/plugins/modules/network/avi/avi_certificatemanagementprofile.py @@ -0,0 +1,118 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_certificatemanagementprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of CertificateManagementProfile Avi RESTful Object +description: + - This module is used to configure CertificateManagementProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + name: + description: + - Name of the pki profile. + required: true + script_params: + description: + - List of customparams. + script_path: + description: + - Script_path of certificatemanagementprofile. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create CertificateManagementProfile object + avi_certificatemanagementprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_certificatemanagementprofile +""" + +RETURN = ''' +obj: + description: CertificateManagementProfile (api/certificatemanagementprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + name=dict(type='str', required=True), + script_params=dict(type='list',), + script_path=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'certificatemanagementprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_cloud.py b/plugins/modules/network/avi/avi_cloud.py new file mode 100644 index 0000000000..a46b7e775b --- /dev/null +++ b/plugins/modules/network/avi/avi_cloud.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_cloud +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Cloud Avi RESTful Object +description: + - This module is used to configure Cloud object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + apic_configuration: + description: + - Apicconfiguration settings for cloud. + apic_mode: + description: + - Boolean flag to set apic_mode. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + autoscale_polling_interval: + description: + - Cloudconnector polling interval for external autoscale groups. + - Field introduced in 18.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + aws_configuration: + description: + - Awsconfiguration settings for cloud. + azure_configuration: + description: + - Field introduced in 17.2.1. + cloudstack_configuration: + description: + - Cloudstackconfiguration settings for cloud. + custom_tags: + description: + - Custom tags for all avi created resources in the cloud infrastructure. + - Field introduced in 17.1.5. + dhcp_enabled: + description: + - Select the ip address management scheme. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + dns_provider_ref: + description: + - Dns profile for the cloud. + - It is a reference to an object of type ipamdnsproviderprofile. + docker_configuration: + description: + - Dockerconfiguration settings for cloud. + east_west_dns_provider_ref: + description: + - Dns profile for east-west services. + - It is a reference to an object of type ipamdnsproviderprofile. + east_west_ipam_provider_ref: + description: + - Ipam profile for east-west services. + - Warning - please use virtual subnets in this ipam profile that do not conflict with the underlay networks or any overlay networks in the cluster. + - For example in aws and gcp, 169.254.0.0/16 is used for storing instance metadata. + - Hence, it should not be used in this profile. + - It is a reference to an object of type ipamdnsproviderprofile. + enable_vip_static_routes: + description: + - Use static routes for vip side network resolution during virtualservice placement. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + gcp_configuration: + description: + - Google cloud platform configuration. + - Field introduced in 18.2.1. + ip6_autocfg_enabled: + description: + - Enable ipv6 auto configuration. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + ipam_provider_ref: + description: + - Ipam profile for the cloud. + - It is a reference to an object of type ipamdnsproviderprofile. + license_tier: + description: + - Specifies the default license tier which would be used by new se groups. + - This field by default inherits the value from system configuration. + - Enum options - ENTERPRISE_16, ENTERPRISE_18. + - Field introduced in 17.2.5. + license_type: + description: + - If no license type is specified then default license enforcement for the cloud type is chosen. + - The default mappings are container cloud is max ses, openstack and vmware is cores and linux it is sockets. + - Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS, LIC_SE_BANDWIDTH, LIC_METERED_SE_BANDWIDTH. + linuxserver_configuration: + description: + - Linuxserverconfiguration settings for cloud. + mesos_configuration: + description: + - Field deprecated in 18.2.2. + mtu: + description: + - Mtu setting for the cloud. + - Default value when not specified in API or module is interpreted by Avi Controller as 1500. + name: + description: + - Name of the object. + required: true + nsx_configuration: + description: + - Configuration parameters for nsx manager. + - Field introduced in 17.1.1. + obj_name_prefix: + description: + - Default prefix for all automatically created objects in this cloud. + - This prefix can be overridden by the se-group template. + openstack_configuration: + description: + - Openstackconfiguration settings for cloud. + oshiftk8s_configuration: + description: + - Oshiftk8sconfiguration settings for cloud. + prefer_static_routes: + description: + - Prefer static routes over interface routes during virtualservice placement. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + proxy_configuration: + description: + - Proxyconfiguration settings for cloud. + rancher_configuration: + description: + - Rancherconfiguration settings for cloud. + state_based_dns_registration: + description: + - Dns records for vips are added/deleted based on the operational state of the vips. + - Field introduced in 17.1.12. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. + vca_configuration: + description: + - Vcloudairconfiguration settings for cloud. + vcenter_configuration: + description: + - Vcenterconfiguration settings for cloud. + vtype: + description: + - Cloud type. + - Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP, + - CLOUD_RANCHER, CLOUD_OSHIFT_K8S, CLOUD_AZURE, CLOUD_GCP. + - Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE. + required: true +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create a VMware cloud with write access mode + avi_cloud: + username: '{{ username }}' + controller: '{{ controller }}' + password: '{{ password }}' + apic_mode: false + dhcp_enabled: true + enable_vip_static_routes: false + license_type: LIC_CORES + mtu: 1500 + name: vCenter Cloud + prefer_static_routes: false + tenant_ref: admin + vcenter_configuration: + datacenter_ref: /api/vimgrdcruntime/datacenter-2-10.10.20.100 + management_network: /api/vimgrnwruntime/dvportgroup-103-10.10.20.100 + password: password + privilege: WRITE_ACCESS + username: user + vcenter_url: 10.10.20.100 + vtype: CLOUD_VCENTER +""" + +RETURN = ''' +obj: + description: Cloud (api/cloud) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + apic_configuration=dict(type='dict',), + apic_mode=dict(type='bool',), + autoscale_polling_interval=dict(type='int',), + aws_configuration=dict(type='dict',), + azure_configuration=dict(type='dict',), + cloudstack_configuration=dict(type='dict',), + custom_tags=dict(type='list',), + dhcp_enabled=dict(type='bool',), + dns_provider_ref=dict(type='str',), + docker_configuration=dict(type='dict',), + east_west_dns_provider_ref=dict(type='str',), + east_west_ipam_provider_ref=dict(type='str',), + enable_vip_static_routes=dict(type='bool',), + gcp_configuration=dict(type='dict',), + ip6_autocfg_enabled=dict(type='bool',), + ipam_provider_ref=dict(type='str',), + license_tier=dict(type='str',), + license_type=dict(type='str',), + linuxserver_configuration=dict(type='dict',), + mesos_configuration=dict(type='dict',), + mtu=dict(type='int',), + name=dict(type='str', required=True), + nsx_configuration=dict(type='dict',), + obj_name_prefix=dict(type='str',), + openstack_configuration=dict(type='dict',), + oshiftk8s_configuration=dict(type='dict',), + prefer_static_routes=dict(type='bool',), + proxy_configuration=dict(type='dict',), + rancher_configuration=dict(type='dict',), + state_based_dns_registration=dict(type='bool',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + vca_configuration=dict(type='dict',), + vcenter_configuration=dict(type='dict',), + vtype=dict(type='str', required=True), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'cloud', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_cloudconnectoruser.py b/plugins/modules/network/avi/avi_cloudconnectoruser.py new file mode 100644 index 0000000000..21082ecf96 --- /dev/null +++ b/plugins/modules/network/avi/avi_cloudconnectoruser.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_cloudconnectoruser +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of CloudConnectorUser Avi RESTful Object +description: + - This module is used to configure CloudConnectorUser object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + azure_serviceprincipal: + description: + - Field introduced in 17.2.1. + azure_userpass: + description: + - Field introduced in 17.2.1. + gcp_credentials: + description: + - Credentials for google cloud platform. + - Field introduced in 18.2.1. + name: + description: + - Name of the object. + required: true + oci_credentials: + description: + - Credentials for oracle cloud infrastructure. + - Field introduced in 18.2.1,18.1.3. + private_key: + description: + - Private_key of cloudconnectoruser. + public_key: + description: + - Public_key of cloudconnectoruser. + tenant_ref: + description: + - It is a reference to an object of type tenant. + tencent_credentials: + description: + - Credentials for tencent cloud. + - Field introduced in 18.2.3. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create a Cloud connector user that is used for integration into cloud platforms + avi_cloudconnectoruser: + controller: '{{ controller }}' + name: root + password: '{{ password }}' + private_key: | + -----BEGIN RSA PRIVATE KEY----- + -----END RSA PRIVATE KEY-----' + public_key: 'ssh-rsa ...' + tenant_ref: admin + username: '{{ username }}' +""" + +RETURN = ''' +obj: + description: CloudConnectorUser (api/cloudconnectoruser) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + azure_serviceprincipal=dict(type='dict',), + azure_userpass=dict(type='dict',), + gcp_credentials=dict(type='dict',), + name=dict(type='str', required=True), + oci_credentials=dict(type='dict',), + private_key=dict(type='str', no_log=True,), + public_key=dict(type='str',), + tenant_ref=dict(type='str',), + tencent_credentials=dict(type='dict',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'cloudconnectoruser', + set(['private_key'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_cloudproperties.py b/plugins/modules/network/avi/avi_cloudproperties.py new file mode 100644 index 0000000000..636aa23306 --- /dev/null +++ b/plugins/modules/network/avi/avi_cloudproperties.py @@ -0,0 +1,118 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_cloudproperties +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of CloudProperties Avi RESTful Object +description: + - This module is used to configure CloudProperties object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cc_props: + description: + - Cloudconnector properties. + cc_vtypes: + description: + - Cloud types supported by cloudconnector. + - Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP, + - CLOUD_RANCHER, CLOUD_OSHIFT_K8S, CLOUD_AZURE, CLOUD_GCP. + hyp_props: + description: + - Hypervisor properties. + info: + description: + - Properties specific to a cloud type. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create CloudProperties object + avi_cloudproperties: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_cloudproperties +""" + +RETURN = ''' +obj: + description: CloudProperties (api/cloudproperties) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cc_props=dict(type='dict',), + cc_vtypes=dict(type='list',), + hyp_props=dict(type='list',), + info=dict(type='list',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'cloudproperties', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_cluster.py b/plugins/modules/network/avi/avi_cluster.py new file mode 100644 index 0000000000..fe983d77ea --- /dev/null +++ b/plugins/modules/network/avi/avi_cluster.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_cluster +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Cluster Avi RESTful Object +description: + - This module is used to configure Cluster object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + name: + description: + - Name of the object. + required: true + nodes: + description: + - List of clusternode. + rejoin_nodes_automatically: + description: + - Re-join cluster nodes automatically in the event one of the node is reset to factory. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. + virtual_ip: + description: + - A virtual ip address. + - This ip address will be dynamically reconfigured so that it always is the ip of the cluster leader. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create Cluster object + avi_cluster: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_cluster +""" + +RETURN = ''' +obj: + description: Cluster (api/cluster) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + name=dict(type='str', required=True), + nodes=dict(type='list',), + rejoin_nodes_automatically=dict(type='bool',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + virtual_ip=dict(type='dict',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'cluster', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_clusterclouddetails.py b/plugins/modules/network/avi/avi_clusterclouddetails.py new file mode 100644 index 0000000000..c7692d3181 --- /dev/null +++ b/plugins/modules/network/avi/avi_clusterclouddetails.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_clusterclouddetails +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ClusterCloudDetails Avi RESTful Object +description: + - This module is used to configure ClusterCloudDetails object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + azure_info: + description: + - Azure info to configure cluster_vip on the controller. + - Field introduced in 17.2.5. + name: + description: + - Field introduced in 17.2.5. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.2.5. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Field introduced in 17.2.5. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ClusterCloudDetails object + avi_clusterclouddetails: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_clusterclouddetails +""" + +RETURN = ''' +obj: + description: ClusterCloudDetails (api/clusterclouddetails) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + azure_info=dict(type='dict',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'clusterclouddetails', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_controllerproperties.py b/plugins/modules/network/avi/avi_controllerproperties.py new file mode 100644 index 0000000000..d5d3b6c42b --- /dev/null +++ b/plugins/modules/network/avi/avi_controllerproperties.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.2 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_controllerproperties +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ControllerProperties Avi RESTful Object +description: + - This module is used to configure ControllerProperties object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + allow_ip_forwarding: + description: + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + allow_unauthenticated_apis: + description: + - Allow unauthenticated access for special apis. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + allow_unauthenticated_nodes: + description: + - Boolean flag to set allow_unauthenticated_nodes. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + api_idle_timeout: + description: + - Allowed values are 0-1440. + - Default value when not specified in API or module is interpreted by Avi Controller as 15. + api_perf_logging_threshold: + description: + - Threshold to log request timing in portal_performance.log and server-timing response header. + - Any stage taking longer than 1% of the threshold will be included in the server-timing header. + - Field introduced in 18.1.4, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 10000. + appviewx_compat_mode: + description: + - Export configuration in appviewx compatibility mode. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + attach_ip_retry_interval: + description: + - Number of attach_ip_retry_interval. + - Default value when not specified in API or module is interpreted by Avi Controller as 360. + attach_ip_retry_limit: + description: + - Number of attach_ip_retry_limit. + - Default value when not specified in API or module is interpreted by Avi Controller as 4. + bm_use_ansible: + description: + - Use ansible for se creation in baremetal. + - Field introduced in 17.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + cleanup_expired_authtoken_timeout_period: + description: + - Period for auth token cleanup job. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + cleanup_sessions_timeout_period: + description: + - Period for sessions cleanup job. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + cloud_reconcile: + description: + - Enable/disable periodic reconcile for all the clouds. + - Field introduced in 17.2.14,18.1.5,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + cluster_ip_gratuitous_arp_period: + description: + - Period for cluster ip gratuitous arp job. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + consistency_check_timeout_period: + description: + - Period for consistency check job. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + crashed_se_reboot: + description: + - Number of crashed_se_reboot. + - Default value when not specified in API or module is interpreted by Avi Controller as 900. + dead_se_detection_timer: + description: + - Number of dead_se_detection_timer. + - Default value when not specified in API or module is interpreted by Avi Controller as 360. + dns_refresh_period: + description: + - Period for refresh pool and gslb dns job. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + dummy: + description: + - Number of dummy. + enable_api_sharding: + description: + - This setting enables the controller leader to shard api requests to the followers (if any). + - Field introduced in 18.1.5, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + enable_memory_balancer: + description: + - Enable/disable memory balancer. + - Field introduced in 17.2.8. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + fatal_error_lease_time: + description: + - Number of fatal_error_lease_time. + - Default value when not specified in API or module is interpreted by Avi Controller as 120. + max_dead_se_in_grp: + description: + - Number of max_dead_se_in_grp. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + max_pcap_per_tenant: + description: + - Maximum number of pcap files stored per tenant. + - Default value when not specified in API or module is interpreted by Avi Controller as 4. + max_seq_attach_ip_failures: + description: + - Maximum number of consecutive attach ip failures that halts vs placement. + - Field introduced in 17.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 3. + max_seq_vnic_failures: + description: + - Number of max_seq_vnic_failures. + - Default value when not specified in API or module is interpreted by Avi Controller as 3. + persistence_key_rotate_period: + description: + - Period for rotate app persistence keys job. + - Allowed values are 1-1051200. + - Special values are 0 - 'disabled'. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + portal_token: + description: + - Token used for uploading tech-support to portal. + - Field introduced in 16.4.6,17.1.2. + process_locked_useraccounts_timeout_period: + description: + - Period for process locked user accounts job. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + process_pki_profile_timeout_period: + description: + - Period for process pki profile job. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 1440. + query_host_fail: + description: + - Number of query_host_fail. + - Default value when not specified in API or module is interpreted by Avi Controller as 180. + safenet_hsm_version: + description: + - Version of the safenet package installed on the controller. + - Field introduced in 16.5.2,17.2.3. + se_create_timeout: + description: + - Number of se_create_timeout. + - Default value when not specified in API or module is interpreted by Avi Controller as 900. + se_failover_attempt_interval: + description: + - Interval between attempting failovers to an se. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + se_from_marketplace: + description: + - This setting decides whether se is to be deployed from the cloud marketplace or to be created by the controller. + - The setting is applicable only when byol license is selected. + - Enum options - MARKETPLACE, IMAGE. + - Field introduced in 18.1.4, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as IMAGE. + se_offline_del: + description: + - Number of se_offline_del. + - Default value when not specified in API or module is interpreted by Avi Controller as 172000. + se_vnic_cooldown: + description: + - Number of se_vnic_cooldown. + - Default value when not specified in API or module is interpreted by Avi Controller as 120. + secure_channel_cleanup_timeout: + description: + - Period for secure channel cleanup job. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + secure_channel_controller_token_timeout: + description: + - Number of secure_channel_controller_token_timeout. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + secure_channel_se_token_timeout: + description: + - Number of secure_channel_se_token_timeout. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + seupgrade_fabric_pool_size: + description: + - Pool size used for all fabric commands during se upgrade. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + seupgrade_segroup_min_dead_timeout: + description: + - Time to wait before marking segroup upgrade as stuck. + - Default value when not specified in API or module is interpreted by Avi Controller as 360. + ssl_certificate_expiry_warning_days: + description: + - Number of days for ssl certificate expiry warning. + unresponsive_se_reboot: + description: + - Number of unresponsive_se_reboot. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + upgrade_dns_ttl: + description: + - Time to account for dns ttl during upgrade. + - This is in addition to vs_scalein_timeout_for_upgrade in se_group. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 5. + upgrade_lease_time: + description: + - Number of upgrade_lease_time. + - Default value when not specified in API or module is interpreted by Avi Controller as 360. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. + vnic_op_fail_time: + description: + - Number of vnic_op_fail_time. + - Default value when not specified in API or module is interpreted by Avi Controller as 180. + vs_apic_scaleout_timeout: + description: + - Time to wait for the scaled out se to become ready before marking the scaleout done, applies to apic configuration only. + - Default value when not specified in API or module is interpreted by Avi Controller as 360. + vs_awaiting_se_timeout: + description: + - Number of vs_awaiting_se_timeout. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + vs_key_rotate_period: + description: + - Period for rotate vs keys job. + - Allowed values are 1-1051200. + - Special values are 0 - 'disabled'. + - Default value when not specified in API or module is interpreted by Avi Controller as 360. + vs_scaleout_ready_check_interval: + description: + - Interval for checking scaleout_ready status while controller is waiting for scaleoutready rpc from the service engine. + - Field introduced in 18.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + vs_se_attach_ip_fail: + description: + - Time to wait before marking attach ip operation on an se as failed. + - Field introduced in 17.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 600. + vs_se_bootup_fail: + description: + - Number of vs_se_bootup_fail. + - Default value when not specified in API or module is interpreted by Avi Controller as 480. + vs_se_create_fail: + description: + - Number of vs_se_create_fail. + - Default value when not specified in API or module is interpreted by Avi Controller as 1500. + vs_se_ping_fail: + description: + - Number of vs_se_ping_fail. + - Default value when not specified in API or module is interpreted by Avi Controller as 60. + vs_se_vnic_fail: + description: + - Number of vs_se_vnic_fail. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + vs_se_vnic_ip_fail: + description: + - Number of vs_se_vnic_ip_fail. + - Default value when not specified in API or module is interpreted by Avi Controller as 120. + warmstart_se_reconnect_wait_time: + description: + - Number of warmstart_se_reconnect_wait_time. + - Default value when not specified in API or module is interpreted by Avi Controller as 480. + warmstart_vs_resync_wait_time: + description: + - Timeout for warmstart vs resync. + - Field introduced in 18.1.4, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ControllerProperties object + avi_controllerproperties: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_controllerproperties +""" + +RETURN = ''' +obj: + description: ControllerProperties (api/controllerproperties) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + allow_ip_forwarding=dict(type='bool',), + allow_unauthenticated_apis=dict(type='bool',), + allow_unauthenticated_nodes=dict(type='bool',), + api_idle_timeout=dict(type='int',), + api_perf_logging_threshold=dict(type='int',), + appviewx_compat_mode=dict(type='bool',), + attach_ip_retry_interval=dict(type='int',), + attach_ip_retry_limit=dict(type='int',), + bm_use_ansible=dict(type='bool',), + cleanup_expired_authtoken_timeout_period=dict(type='int',), + cleanup_sessions_timeout_period=dict(type='int',), + cloud_reconcile=dict(type='bool',), + cluster_ip_gratuitous_arp_period=dict(type='int',), + consistency_check_timeout_period=dict(type='int',), + crashed_se_reboot=dict(type='int',), + dead_se_detection_timer=dict(type='int',), + dns_refresh_period=dict(type='int',), + dummy=dict(type='int',), + enable_api_sharding=dict(type='bool',), + enable_memory_balancer=dict(type='bool',), + fatal_error_lease_time=dict(type='int',), + max_dead_se_in_grp=dict(type='int',), + max_pcap_per_tenant=dict(type='int',), + max_seq_attach_ip_failures=dict(type='int',), + max_seq_vnic_failures=dict(type='int',), + persistence_key_rotate_period=dict(type='int',), + portal_token=dict(type='str', no_log=True,), + process_locked_useraccounts_timeout_period=dict(type='int',), + process_pki_profile_timeout_period=dict(type='int',), + query_host_fail=dict(type='int',), + safenet_hsm_version=dict(type='str',), + se_create_timeout=dict(type='int',), + se_failover_attempt_interval=dict(type='int',), + se_from_marketplace=dict(type='str',), + se_offline_del=dict(type='int',), + se_vnic_cooldown=dict(type='int',), + secure_channel_cleanup_timeout=dict(type='int',), + secure_channel_controller_token_timeout=dict(type='int',), + secure_channel_se_token_timeout=dict(type='int',), + seupgrade_fabric_pool_size=dict(type='int',), + seupgrade_segroup_min_dead_timeout=dict(type='int',), + ssl_certificate_expiry_warning_days=dict(type='list',), + unresponsive_se_reboot=dict(type='int',), + upgrade_dns_ttl=dict(type='int',), + upgrade_lease_time=dict(type='int',), + url=dict(type='str',), + uuid=dict(type='str',), + vnic_op_fail_time=dict(type='int',), + vs_apic_scaleout_timeout=dict(type='int',), + vs_awaiting_se_timeout=dict(type='int',), + vs_key_rotate_period=dict(type='int',), + vs_scaleout_ready_check_interval=dict(type='int',), + vs_se_attach_ip_fail=dict(type='int',), + vs_se_bootup_fail=dict(type='int',), + vs_se_create_fail=dict(type='int',), + vs_se_ping_fail=dict(type='int',), + vs_se_vnic_fail=dict(type='int',), + vs_se_vnic_ip_fail=dict(type='int',), + warmstart_se_reconnect_wait_time=dict(type='int',), + warmstart_vs_resync_wait_time=dict(type='int',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'controllerproperties', + set(['portal_token'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_customipamdnsprofile.py b/plugins/modules/network/avi/avi_customipamdnsprofile.py new file mode 100644 index 0000000000..05beed19a0 --- /dev/null +++ b/plugins/modules/network/avi/avi_customipamdnsprofile.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_customipamdnsprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of CustomIpamDnsProfile Avi RESTful Object +description: + - This module is used to configure CustomIpamDnsProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + name: + description: + - Name of the custom ipam dns profile. + - Field introduced in 17.1.1. + required: true + script_params: + description: + - Parameters that are always passed to the ipam/dns script. + - Field introduced in 17.1.1. + script_uri: + description: + - Script uri of form controller //ipamdnsscripts/. + - Field introduced in 17.1.1. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.1.1. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Field introduced in 17.1.1. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create CustomIpamDnsProfile object + avi_customipamdnsprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_customipamdnsprofile +""" + +RETURN = ''' +obj: + description: CustomIpamDnsProfile (api/customipamdnsprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + name=dict(type='str', required=True), + script_params=dict(type='list',), + script_uri=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'customipamdnsprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_dnspolicy.py b/plugins/modules/network/avi/avi_dnspolicy.py new file mode 100644 index 0000000000..79b1bfc184 --- /dev/null +++ b/plugins/modules/network/avi/avi_dnspolicy.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_dnspolicy +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of DnsPolicy Avi RESTful Object +description: + - This module is used to configure DnsPolicy object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + created_by: + description: + - Creator name. + - Field introduced in 17.1.1. + description: + description: + - Field introduced in 17.1.1. + name: + description: + - Name of the dns policy. + - Field introduced in 17.1.1. + required: true + rule: + description: + - Dns rules. + - Field introduced in 17.1.1. + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.1.1. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the dns policy. + - Field introduced in 17.1.1. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create DnsPolicy object + avi_dnspolicy: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_dnspolicy +""" + +RETURN = ''' +obj: + description: DnsPolicy (api/dnspolicy) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + created_by=dict(type='str',), + description=dict(type='str',), + name=dict(type='str', required=True), + rule=dict(type='list',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'dnspolicy', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_errorpagebody.py b/plugins/modules/network/avi/avi_errorpagebody.py new file mode 100644 index 0000000000..2404c98a84 --- /dev/null +++ b/plugins/modules/network/avi/avi_errorpagebody.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_errorpagebody +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ErrorPageBody Avi RESTful Object +description: + - This module is used to configure ErrorPageBody object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + error_page_body: + description: + - Error page body sent to client when match. + - Field introduced in 17.2.4. + format: + description: + - Format of an error page body html or json. + - Enum options - ERROR_PAGE_FORMAT_HTML, ERROR_PAGE_FORMAT_JSON. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as ERROR_PAGE_FORMAT_HTML. + name: + description: + - Field introduced in 17.2.4. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.2.4. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Field introduced in 17.2.4. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ErrorPageBody object + avi_errorpagebody: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_errorpagebody +""" + +RETURN = ''' +obj: + description: ErrorPageBody (api/errorpagebody) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + error_page_body=dict(type='str',), + format=dict(type='str',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'errorpagebody', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_errorpageprofile.py b/plugins/modules/network/avi/avi_errorpageprofile.py new file mode 100644 index 0000000000..67cff153f2 --- /dev/null +++ b/plugins/modules/network/avi/avi_errorpageprofile.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_errorpageprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ErrorPageProfile Avi RESTful Object +description: + - This module is used to configure ErrorPageProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + app_name: + description: + - Name of the virtual service which generated the error page. + - Field deprecated in 18.1.1. + - Field introduced in 17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as VS Name. + company_name: + description: + - Name of the company to show in error page. + - Field deprecated in 18.1.1. + - Field introduced in 17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as Avi Networks. + error_pages: + description: + - Defined error pages for http status codes. + - Field introduced in 17.2.4. + host_name: + description: + - Fully qualified domain name for which the error page is generated. + - Field deprecated in 18.1.1. + - Field introduced in 17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as Host Header. + name: + description: + - Field introduced in 17.2.4. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.2.4. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Field introduced in 17.2.4. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ErrorPageProfile object + avi_errorpageprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_errorpageprofile +""" + +RETURN = ''' +obj: + description: ErrorPageProfile (api/errorpageprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + app_name=dict(type='str',), + company_name=dict(type='str',), + error_pages=dict(type='list',), + host_name=dict(type='str',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'errorpageprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_gslb.py b/plugins/modules/network/avi/avi_gslb.py new file mode 100644 index 0000000000..186543e9b4 --- /dev/null +++ b/plugins/modules/network/avi/avi_gslb.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_gslb +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Gslb Avi RESTful Object +description: + - This module is used to configure Gslb object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + async_interval: + description: + - Frequency with which messages are propagated to vs mgr. + - Value of 0 disables async behavior and rpc are sent inline. + - Allowed values are 0-5. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + clear_on_max_retries: + description: + - Max retries after which the remote site is treated as a fresh start. + - In fresh start all the configs are downloaded. + - Allowed values are 1-1024. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + client_ip_addr_group: + description: + - Group to specify if the client ip addresses are public or private. + - Field introduced in 17.1.2. + description: + description: + - User defined description for the object. + dns_configs: + description: + - Sub domain configuration for the gslb. + - Gslb service's fqdn must be a match one of these subdomains. + is_federated: + description: + - This field indicates that this object is replicated across gslb federation. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + leader_cluster_uuid: + description: + - Mark this site as leader of gslb configuration. + - This site is the one among the avi sites. + required: true + maintenance_mode: + description: + - This field disables the configuration operations on the leader for all federated objects. + - Cud operations on gslb, gslbservice, gslbgeodbprofile and other federated objects will be rejected. + - The rest-api disabling helps in upgrade scenarios where we don't want configuration sync operations to the gslb member when the member is being + - upgraded. + - This configuration programmatically blocks the leader from accepting new gslb configuration when member sites are undergoing upgrade. + - Field introduced in 17.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + name: + description: + - Name for the gslb object. + required: true + send_interval: + description: + - Frequency with which group members communicate. + - Allowed values are 1-3600. + - Default value when not specified in API or module is interpreted by Avi Controller as 15. + send_interval_prior_to_maintenance_mode: + description: + - The user can specify a send-interval while entering maintenance mode. + - The validity of this 'maintenance send-interval' is only during maintenance mode. + - When the user leaves maintenance mode, the original send-interval is reinstated. + - This internal variable is used to store the original send-interval. + - Field introduced in 18.2.3. + sites: + description: + - Select avi site member belonging to this gslb. + tenant_ref: + description: + - It is a reference to an object of type tenant. + third_party_sites: + description: + - Third party site member belonging to this gslb. + - Field introduced in 17.1.1. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the gslb object. + view_id: + description: + - The view-id is used in change-leader mode to differentiate partitioned groups while they have the same gslb namespace. + - Each partitioned group will be able to operate independently by using the view-id. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create Gslb object + avi_gslb: + name: "test-gslb" + avi_credentials: + username: '{{ username }}' + password: '{{ password }}' + controller: '{{ controller }}' + sites: + - name: "test-site1" + username: "gslb_username" + password: "gslb_password" + ip_addresses: + - type: "V4" + addr: "10.10.28.83" + enabled: True + member_type: "GSLB_ACTIVE_MEMBER" + port: 443 + cluster_uuid: "cluster-d4ee5fcc-3e0a-4d4f-9ae6-4182bc605829" + - name: "test-site2" + username: "gslb_username" + password: "gslb_password" + ip_addresses: + - type: "V4" + addr: "10.10.28.86" + enabled: True + member_type: "GSLB_ACTIVE_MEMBER" + port: 443 + cluster_uuid: "cluster-0c37ae8d-ab62-410c-ad3e-06fa831950b1" + dns_configs: + - domain_name: "test1.com" + - domain_name: "test2.com" + leader_cluster_uuid: "cluster-d4ee5fcc-3e0a-4d4f-9ae6-4182bc605829" + +- name: Update Gslb site's configurations (Patch Add Operation) + avi_gslb: + avi_credentials: + username: '{{ username }}' + password: '{{ password }}' + controller: '{{ controller }}' + avi_api_update_method: patch + avi_api_patch_op: add + leader_cluster_uuid: "cluster-d4ee5fcc-3e0a-4d4f-9ae6-4182bc605829" + name: "test-gslb" + dns_configs: + - domain_name: "temp1.com" + - domain_name: "temp2.com" + gslb_sites_config: + - ip_addr: "10.10.28.83" + dns_vses: + - dns_vs_uuid: "virtualservice-f2a711cd-5e78-473f-8f47-d12de660fd62" + domain_names: + - "test1.com" + - "test2.com" + - ip_addr: "10.10.28.86" + dns_vses: + - dns_vs_uuid: "virtualservice-c1a63a16-f2a1-4f41-aab4-1e90f92a5e49" + domain_names: + - "temp1.com" + - "temp2.com" + +- name: Update Gslb site's configurations (Patch Replace Operation) + avi_gslb: + avi_credentials: + username: "{{ username }}" + password: "{{ password }}" + controller: "{{ controller }}" + # On basis of cluster leader uuid dns_configs is set for that particular leader cluster + leader_cluster_uuid: "cluster-84aa795f-8f09-42bb-97a4-5103f4a53da9" + name: "test-gslb" + avi_api_update_method: patch + avi_api_patch_op: replace + dns_configs: + - domain_name: "test3.com" + - domain_name: "temp3.com" + gslb_sites_config: + # Ip address is mapping key for dns_vses field update. For the given IP address, + # dns_vses is updated. + - ip_addr: "10.10.28.83" + dns_vses: + - dns_vs_uuid: "virtualservice-7c947ed4-77f3-4a52-909c-4f12afaf5bb0" + domain_names: + - "test3.com" + - ip_addr: "10.10.28.86" + dns_vses: + - dns_vs_uuid: "virtualservice-799b2c6d-7f2d-4c3f-94c6-6e813b20b674" + domain_names: + - "temp3.com" + +- name: Update Gslb site's configurations (Patch Delete Operation) + avi_gslb: + avi_credentials: + username: "{{ username }}" + password: "{{ password }}" + controller: "{{ controller }}" + # On basis of cluster leader uuid dns_configs is set for that particular leader cluster + leader_cluster_uuid: "cluster-84aa795f-8f09-42bb-97a4-5103f4a53da9" + name: "test-gslb" + avi_api_update_method: patch + avi_api_patch_op: delete + dns_configs: + gslb_sites_config: + - ip_addr: "10.10.28.83" + - ip_addr: "10.10.28.86" +""" + +RETURN = ''' +obj: + description: Gslb (api/gslb) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) + from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import ApiSession, AviCredentials +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + async_interval=dict(type='int',), + clear_on_max_retries=dict(type='int',), + client_ip_addr_group=dict(type='dict',), + description=dict(type='str',), + dns_configs=dict(type='list',), + is_federated=dict(type='bool',), + leader_cluster_uuid=dict(type='str', required=True), + maintenance_mode=dict(type='bool',), + name=dict(type='str', required=True), + send_interval=dict(type='int',), + send_interval_prior_to_maintenance_mode=dict(type='int',), + sites=dict(type='list',), + tenant_ref=dict(type='str',), + third_party_sites=dict(type='list',), + url=dict(type='str',), + uuid=dict(type='str',), + view_id=dict(type='int',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + api_method = module.params['avi_api_update_method'] + if str(api_method).lower() == 'patch': + patch_op = module.params['avi_api_patch_op'] + # Create controller session + api_creds = AviCredentials() + api_creds.update_from_ansible_module(module) + api = ApiSession.get_session( + api_creds.controller, api_creds.username, password=api_creds.password, + timeout=api_creds.timeout, tenant=api_creds.tenant, + tenant_uuid=api_creds.tenant_uuid, token=api_creds.token, + port=api_creds.port) + # Get existing gslb objects + rsp = api.get('gslb', api_version=api_creds.api_version) + existing_gslb = rsp.json() + gslb = existing_gslb['results'] + sites = module.params['gslb_sites_config'] + for gslb_obj in gslb: + # Update/Delete domain names in dns_configs fields in gslb object. + if 'dns_configs' in module.params: + if gslb_obj['leader_cluster_uuid'] == module.params['leader_cluster_uuid']: + if str(patch_op).lower() == 'delete': + gslb_obj['dns_configs'] = [] + elif str(patch_op).lower() == 'add': + if module.params['dns_configs'] not in gslb_obj['dns_configs']: + gslb_obj['dns_configs'].extend(module.params['dns_configs']) + else: + gslb_obj['dns_configs'] = module.params['dns_configs'] + # Update/Delete sites configuration + if sites: + for site_obj in gslb_obj['sites']: + dns_vses = site_obj.get('dns_vses', []) + for obj in sites: + config_for = obj.get('ip_addr', None) + if not config_for: + return module.fail_json(msg=( + "ip_addr of site in a configuration is mandatory. " + "Please provide ip_addr i.e. gslb site's ip.")) + if config_for == site_obj['ip_addresses'][0]['addr']: + if str(patch_op).lower() == 'delete': + site_obj['dns_vses'] = [] + else: + # Modify existing gslb sites object + for key, val in obj.items(): + if key == 'dns_vses' and str(patch_op).lower() == 'add': + found = False + # Check dns_vses field already exists on the controller + for v in dns_vses: + if val[0]['dns_vs_uuid'] != v['dns_vs_uuid']: + found = True + break + if not found: + dns_vses.extend(val) + else: + site_obj[key] = val + if str(patch_op).lower() == 'add': + site_obj['dns_vses'] = dns_vses + uni_dns_configs = [dict(tupleized) for tupleized in set(tuple(item.items()) + for item in gslb_obj['dns_configs'])] + gslb_obj['dns_configs'] = uni_dns_configs + module.params.update(gslb_obj) + module.params.update( + { + 'avi_api_update_method': 'put', + 'state': 'present' + } + ) + return avi_ansible_api(module, 'gslb', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_gslbgeodbprofile.py b/plugins/modules/network/avi/avi_gslbgeodbprofile.py new file mode 100644 index 0000000000..8db544ff2c --- /dev/null +++ b/plugins/modules/network/avi/avi_gslbgeodbprofile.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.2 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_gslbgeodbprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of GslbGeoDbProfile Avi RESTful Object +description: + - This module is used to configure GslbGeoDbProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + description: + description: + - Field introduced in 17.1.1. + entries: + description: + - List of geodb entries. + - An entry can either be a geodb file or an ip address group with geo properties. + - Field introduced in 17.1.1. + is_federated: + description: + - This field indicates that this object is replicated across gslb federation. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + name: + description: + - A user-friendly name for the geodb profile. + - Field introduced in 17.1.1. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.1.1. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the geodb profile. + - Field introduced in 17.1.1. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create GslbGeoDbProfile object + avi_gslbgeodbprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_gslbgeodbprofile +""" + +RETURN = ''' +obj: + description: GslbGeoDbProfile (api/gslbgeodbprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + description=dict(type='str',), + entries=dict(type='list',), + is_federated=dict(type='bool',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'gslbgeodbprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_gslbservice.py b/plugins/modules/network/avi/avi_gslbservice.py new file mode 100644 index 0000000000..71de354626 --- /dev/null +++ b/plugins/modules/network/avi/avi_gslbservice.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_gslbservice +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of GslbService Avi RESTful Object +description: + - This module is used to configure GslbService object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + application_persistence_profile_ref: + description: + - The federated application persistence associated with gslbservice site persistence functionality. + - It is a reference to an object of type applicationpersistenceprofile. + - Field introduced in 17.2.1. + controller_health_status_enabled: + description: + - Gs member's overall health status is derived based on a combination of controller and datapath health-status inputs. + - Note that the datapath status is determined by the association of health monitor profiles. + - Only the controller provided status is determined through this configuration. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + created_by: + description: + - Creator name. + - Field introduced in 17.1.2. + description: + description: + - User defined description for the object. + domain_names: + description: + - Fully qualified domain name of the gslb service. + down_response: + description: + - Response to the client query when the gslb service is down. + enabled: + description: + - Enable or disable the gslb service. + - If the gslb service is enabled, then the vips are sent in the dns responses based on reachability and configured algorithm. + - If the gslb service is disabled, then the vips are no longer available in the dns response. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + groups: + description: + - Select list of pools belonging to this gslb service. + health_monitor_refs: + description: + - Verify vs health by applying one or more health monitors. + - Active monitors generate synthetic traffic from dns service engine and to mark a vs up or down based on the response. + - It is a reference to an object of type healthmonitor. + health_monitor_scope: + description: + - Health monitor probe can be executed for all the members or it can be executed only for third-party members. + - This operational mode is useful to reduce the number of health monitor probes in case of a hybrid scenario. + - In such a case, avi members can have controller derived status while non-avi members can be probed by via health monitor probes in dataplane. + - Enum options - GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS, GSLB_SERVICE_HEALTH_MONITOR_ONLY_NON_AVI_MEMBERS. + - Default value when not specified in API or module is interpreted by Avi Controller as GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS. + hm_off: + description: + - This field is an internal field and is used in se. + - Field introduced in 18.2.2. + type: bool + is_federated: + description: + - This field indicates that this object is replicated across gslb federation. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + min_members: + description: + - The minimum number of members to distribute traffic to. + - Allowed values are 1-65535. + - Special values are 0 - 'disable'. + - Field introduced in 17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + name: + description: + - Name for the gslb service. + required: true + num_dns_ip: + description: + - Number of ip addresses of this gslb service to be returned by the dns service. + - Enter 0 to return all ip addresses. + - Allowed values are 1-20. + - Special values are 0- 'return all ip addresses'. + pool_algorithm: + description: + - The load balancing algorithm will pick a gslb pool within the gslb service list of available pools. + - Enum options - GSLB_SERVICE_ALGORITHM_PRIORITY, GSLB_SERVICE_ALGORITHM_GEO. + - Field introduced in 17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as GSLB_SERVICE_ALGORITHM_PRIORITY. + site_persistence_enabled: + description: + - Enable site-persistence for the gslbservice. + - Field introduced in 17.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + tenant_ref: + description: + - It is a reference to an object of type tenant. + ttl: + description: + - Ttl value (in seconds) for records served for this gslb service by the dns service. + - Allowed values are 0-86400. + url: + description: + - Avi controller URL of the object. + use_edns_client_subnet: + description: + - Use the client ip subnet from the edns option as source ipaddress for client geo-location and consistent hash algorithm. + - Default is true. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + uuid: + description: + - Uuid of the gslb service. + wildcard_match: + description: + - Enable wild-card match of fqdn if an exact match is not found in the dns table, the longest match is chosen by wild-carding the fqdn in the dns + - request. + - Default is false. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create GslbService object + avi_gslbservice: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_gslbservice +""" + +RETURN = ''' +obj: + description: GslbService (api/gslbservice) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + application_persistence_profile_ref=dict(type='str',), + controller_health_status_enabled=dict(type='bool',), + created_by=dict(type='str',), + description=dict(type='str',), + domain_names=dict(type='list',), + down_response=dict(type='dict',), + enabled=dict(type='bool',), + groups=dict(type='list',), + health_monitor_refs=dict(type='list',), + health_monitor_scope=dict(type='str',), + hm_off=dict(type='bool',), + is_federated=dict(type='bool',), + min_members=dict(type='int',), + name=dict(type='str', required=True), + num_dns_ip=dict(type='int',), + pool_algorithm=dict(type='str',), + site_persistence_enabled=dict(type='bool',), + tenant_ref=dict(type='str',), + ttl=dict(type='int',), + url=dict(type='str',), + use_edns_client_subnet=dict(type='bool',), + uuid=dict(type='str',), + wildcard_match=dict(type='bool',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'gslbservice', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_gslbservice_patch_member.py b/plugins/modules/network/avi/avi_gslbservice_patch_member.py new file mode 100644 index 0000000000..eb23ded793 --- /dev/null +++ b/plugins/modules/network/avi/avi_gslbservice_patch_member.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +""" +# Created on Aug 12, 2016 +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) GitHub ID: grastogi23 +# +# module_check: supported +# +# Copyright: (c) 2016 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +""" + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: avi_gslbservice_patch_member +author: Gaurav Rastogi (@grastogi23) + +short_description: Avi API Module +description: + - This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/) + - This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them. +requirements: [ avisdk ] +options: + data: + description: + - HTTP body of GSLB Service Member in YAML or JSON format. + params: + description: + - Query parameters passed to the HTTP API. + name: + description: + - Name of the GSLB Service + required: true + state: + description: + - The state that should be applied to the member. Member is + - identified using field member.ip.addr. + default: present + choices: ["absent","present"] +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = ''' + - name: Patch GSLB Service to add a new member and group + avi_gslbservice_patch_member: + controller: "{{ controller }}" + username: "{{ username }}" + password: "{{ password }}" + name: gs-3 + api_version: 17.2.1 + data: + group: + name: newfoo + priority: 60 + members: + - enabled: true + ip: + addr: 10.30.10.66 + type: V4 + ratio: 3 + - name: Patch GSLB Service to delete an existing member + avi_gslbservice_patch_member: + controller: "{{ controller }}" + username: "{{ username }}" + password: "{{ password }}" + name: gs-3 + state: absent + api_version: 17.2.1 + data: + group: + name: newfoo + members: + - enabled: true + ip: + addr: 10.30.10.68 + type: V4 + ratio: 3 + - name: Update priority of GSLB Service Pool + avi_gslbservice_patch_member: + controller: "" + username: "" + password: "" + name: gs-3 + state: present + api_version: 17.2.1 + data: + group: + name: newfoo + priority: 42 +''' + + +RETURN = ''' +obj: + description: Avi REST resource + returned: success, changed + type: dict +''' + +import json +import time +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy + +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_obj_cmp, cleanup_absent_fields, + ansible_return, AviCheckModeResponse, HAS_AVI) + from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import ( + ApiSession, AviCredentials) +except ImportError: + HAS_AVI = False + + +def delete_member(module, check_mode, api, tenant, tenant_uuid, + existing_obj, data, api_version): + members = data.get('group', {}).get('members', []) + patched_member_ids = set([m['ip']['addr'] for m in members if 'fqdn' not in m]) + patched_member_fqdns = set([m['fqdn'] for m in members if 'fqdn' in m]) + + changed = False + rsp = None + + if existing_obj and (patched_member_ids or patched_member_fqdns): + groups = [group for group in existing_obj.get('groups', []) + if group['name'] == data['group']['name']] + if groups: + changed = any( + [(lambda g: g['ip']['addr'] in patched_member_ids)(m) + for m in groups[0].get('members', []) if 'fqdn' not in m]) + changed = changed or any( + [(lambda g: g['fqdn'] in patched_member_fqdns)(m) + for m in groups[0].get('members', []) if 'fqdn' in m]) + if check_mode or not changed: + return changed, rsp + # should not come here if not found + group = groups[0] + new_members = [] + for m in group.get('members', []): + if 'fqdn' in m: + if m['fqdn'] not in patched_member_fqdns: + new_members.append(m) + elif 'ip' in m: + if m['ip']['addr'] not in patched_member_ids: + new_members.append(m) + group['members'] = new_members + if not group['members']: + # Delete this group from the existing objects if it is empty. + # Controller also does not allow empty group. + existing_obj['groups'] = [ + grp for grp in existing_obj.get('groups', []) if + grp['name'] != data['group']['name']] + # remove the members that are part of the list + # update the object + # added api version for AVI api call. + rsp = api.put('gslbservice/%s' % existing_obj['uuid'], data=existing_obj, + tenant=tenant, tenant_uuid=tenant_uuid, api_version=api_version) + return changed, rsp + + +def add_member(module, check_mode, api, tenant, tenant_uuid, + existing_obj, data, name, api_version): + rsp = None + if not existing_obj: + # create the object + changed = True + if check_mode: + rsp = AviCheckModeResponse(obj=None) + else: + # creates group with single member + req = {'name': name, + 'groups': [data['group']] + } + # added api version for AVI api call. + rsp = api.post('gslbservice', data=req, tenant=tenant, + tenant_uuid=tenant_uuid, api_version=api_version) + else: + # found GSLB object + req = deepcopy(existing_obj) + if 'groups' not in req: + req['groups'] = [] + groups = [group for group in req['groups'] + if group['name'] == data['group']['name']] + if not groups: + # did not find the group + req['groups'].append(data['group']) + else: + # just update the existing group with members + group = groups[0] + group_info_wo_members = deepcopy(data['group']) + group_info_wo_members.pop('members', None) + group.update(group_info_wo_members) + if 'members' not in group: + group['members'] = [] + new_members = [] + for patch_member in data['group'].get('members', []): + found = False + for m in group['members']: + if 'fqdn' in patch_member and m.get('fqdn', '') == patch_member['fqdn']: + found = True + break + elif m['ip']['addr'] == patch_member['ip']['addr']: + found = True + break + if not found: + new_members.append(patch_member) + else: + m.update(patch_member) + # add any new members + group['members'].extend(new_members) + cleanup_absent_fields(req) + changed = not avi_obj_cmp(req, existing_obj) + if changed and not check_mode: + obj_path = '%s/%s' % ('gslbservice', existing_obj['uuid']) + # added api version for AVI api call. + rsp = api.put(obj_path, data=req, tenant=tenant, + tenant_uuid=tenant_uuid, api_version=api_version) + return changed, rsp + + +def main(): + argument_specs = dict( + params=dict(type='dict'), + data=dict(type='dict'), + name=dict(type='str', required=True), + state=dict(default='present', + choices=['absent', 'present']) + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule(argument_spec=argument_specs) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or ansible>=2.8 is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + api_creds = AviCredentials() + api_creds.update_from_ansible_module(module) + api = ApiSession.get_session( + api_creds.controller, api_creds.username, password=api_creds.password, + timeout=api_creds.timeout, tenant=api_creds.tenant, + tenant_uuid=api_creds.tenant_uuid, token=api_creds.token, + port=api_creds.port) + + tenant = api_creds.tenant + tenant_uuid = api_creds.tenant_uuid + params = module.params.get('params', None) + data = module.params.get('data', None) + gparams = deepcopy(params) if params else {} + gparams.update({'include_refs': '', 'include_name': ''}) + name = module.params.get('name', '') + state = module.params['state'] + # Get the api version from module. + api_version = api_creds.api_version + """ + state: present + 1. Check if the GSLB service is present + 2. If not then create the GSLB service with the member + 3. Check if the group exists + 4. if not then create the group with the member + 5. Check if the member is present + if not then add the member + state: absent + 1. check if GSLB service is present if not then exit + 2. check if group is present. if not then exit + 3. check if member is present. if present then remove it. + """ + obj_type = 'gslbservice' + # Added api version to call + existing_obj = api.get_object_by_name( + obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid, + params={'include_refs': '', 'include_name': ''}, api_version=api_version) + check_mode = module.check_mode + if state == 'absent': + # Added api version to call + changed, rsp = delete_member(module, check_mode, api, tenant, + tenant_uuid, existing_obj, data, api_version) + else: + # Added api version to call + changed, rsp = add_member(module, check_mode, api, tenant, tenant_uuid, + existing_obj, data, name, api_version) + if check_mode or not changed: + return module.exit_json(changed=changed, obj=existing_obj) + return ansible_return(module, rsp, changed, req=data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_hardwaresecuritymodulegroup.py b/plugins/modules/network/avi/avi_hardwaresecuritymodulegroup.py new file mode 100644 index 0000000000..e8502187bb --- /dev/null +++ b/plugins/modules/network/avi/avi_hardwaresecuritymodulegroup.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_hardwaresecuritymodulegroup +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of HardwareSecurityModuleGroup Avi RESTful Object +description: + - This module is used to configure HardwareSecurityModuleGroup object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + hsm: + description: + - Hardware security module configuration. + required: true + name: + description: + - Name of the hsm group configuration object. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the hsm group configuration object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create HardwareSecurityModuleGroup object + avi_hardwaresecuritymodulegroup: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_hardwaresecuritymodulegroup +""" + +RETURN = ''' +obj: + description: HardwareSecurityModuleGroup (api/hardwaresecuritymodulegroup) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + hsm=dict(type='dict', required=True), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'hardwaresecuritymodulegroup', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_healthmonitor.py b/plugins/modules/network/avi/avi_healthmonitor.py new file mode 100644 index 0000000000..de273fe1de --- /dev/null +++ b/plugins/modules/network/avi/avi_healthmonitor.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_healthmonitor +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of HealthMonitor Avi RESTful Object +description: + - This module is used to configure HealthMonitor object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + description: + description: + - User defined description for the object. + dns_monitor: + description: + - Healthmonitordns settings for healthmonitor. + external_monitor: + description: + - Healthmonitorexternal settings for healthmonitor. + failed_checks: + description: + - Number of continuous failed health checks before the server is marked down. + - Allowed values are 1-50. + - Default value when not specified in API or module is interpreted by Avi Controller as 2. + http_monitor: + description: + - Healthmonitorhttp settings for healthmonitor. + https_monitor: + description: + - Healthmonitorhttp settings for healthmonitor. + is_federated: + description: + - This field describes the object's replication scope. + - If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines. + - If the field is set to true, then the object is replicated across the federation. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + monitor_port: + description: + - Use this port instead of the port defined for the server in the pool. + - If the monitor succeeds to this port, the load balanced traffic will still be sent to the port of the server defined within the pool. + - Allowed values are 1-65535. + - Special values are 0 - 'use server port'. + name: + description: + - A user friendly name for this health monitor. + required: true + radius_monitor: + description: + - Health monitor for radius. + - Field introduced in 18.2.3. + receive_timeout: + description: + - A valid response from the server is expected within the receive timeout window. + - This timeout must be less than the send interval. + - If server status is regularly flapping up and down, consider increasing this value. + - Allowed values are 1-2400. + - Default value when not specified in API or module is interpreted by Avi Controller as 4. + send_interval: + description: + - Frequency, in seconds, that monitors are sent to a server. + - Allowed values are 1-3600. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + sip_monitor: + description: + - Health monitor for sip. + - Field introduced in 17.2.8, 18.1.3, 18.2.1. + successful_checks: + description: + - Number of continuous successful health checks before server is marked up. + - Allowed values are 1-50. + - Default value when not specified in API or module is interpreted by Avi Controller as 2. + tcp_monitor: + description: + - Healthmonitortcp settings for healthmonitor. + tenant_ref: + description: + - It is a reference to an object of type tenant. + type: + description: + - Type of the health monitor. + - Enum options - HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP, HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS, HEALTH_MONITOR_EXTERNAL, HEALTH_MONITOR_UDP, + - HEALTH_MONITOR_DNS, HEALTH_MONITOR_GSLB, HEALTH_MONITOR_SIP, HEALTH_MONITOR_RADIUS. + required: true + udp_monitor: + description: + - Healthmonitorudp settings for healthmonitor. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the health monitor. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Create a HTTPS health monitor + avi_healthmonitor: + controller: 10.10.27.90 + username: admin + password: AviNetworks123! + https_monitor: + http_request: HEAD / HTTP/1.0 + http_response_code: + - HTTP_2XX + - HTTP_3XX + receive_timeout: 4 + failed_checks: 3 + send_interval: 10 + successful_checks: 3 + type: HEALTH_MONITOR_HTTPS + name: MyWebsite-HTTPS +""" + +RETURN = ''' +obj: + description: HealthMonitor (api/healthmonitor) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + description=dict(type='str',), + dns_monitor=dict(type='dict',), + external_monitor=dict(type='dict',), + failed_checks=dict(type='int',), + http_monitor=dict(type='dict',), + https_monitor=dict(type='dict',), + is_federated=dict(type='bool',), + monitor_port=dict(type='int',), + name=dict(type='str', required=True), + radius_monitor=dict(type='dict',), + receive_timeout=dict(type='int',), + send_interval=dict(type='int',), + sip_monitor=dict(type='dict',), + successful_checks=dict(type='int',), + tcp_monitor=dict(type='dict',), + tenant_ref=dict(type='str',), + type=dict(type='str', required=True), + udp_monitor=dict(type='dict',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'healthmonitor', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_httppolicyset.py b/plugins/modules/network/avi/avi_httppolicyset.py new file mode 100644 index 0000000000..9bc047927a --- /dev/null +++ b/plugins/modules/network/avi/avi_httppolicyset.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_httppolicyset +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of HTTPPolicySet Avi RESTful Object +description: + - This module is used to configure HTTPPolicySet object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cloud_config_cksum: + description: + - Checksum of cloud configuration for pool. + - Internally set by cloud connector. + created_by: + description: + - Creator name. + description: + description: + - User defined description for the object. + http_request_policy: + description: + - Http request policy for the virtual service. + http_response_policy: + description: + - Http response policy for the virtual service. + http_security_policy: + description: + - Http security policy for the virtual service. + is_internal_policy: + description: + - Boolean flag to set is_internal_policy. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + name: + description: + - Name of the http policy set. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the http policy set. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Create a HTTP Policy set two switch between testpool1 and testpool2 + avi_httppolicyset: + controller: 10.10.27.90 + username: admin + password: AviNetworks123! + name: test-HTTP-Policy-Set + tenant_ref: admin + http_request_policy: + rules: + - index: 1 + enable: true + name: test-test1 + match: + path: + match_case: INSENSITIVE + match_str: + - /test1 + match_criteria: EQUALS + switching_action: + action: HTTP_SWITCHING_SELECT_POOL + status_code: HTTP_LOCAL_RESPONSE_STATUS_CODE_200 + pool_ref: "/api/pool?name=testpool1" + - index: 2 + enable: true + name: test-test2 + match: + path: + match_case: INSENSITIVE + match_str: + - /test2 + match_criteria: CONTAINS + switching_action: + action: HTTP_SWITCHING_SELECT_POOL + status_code: HTTP_LOCAL_RESPONSE_STATUS_CODE_200 + pool_ref: "/api/pool?name=testpool2" + is_internal_policy: false +""" + +RETURN = ''' +obj: + description: HTTPPolicySet (api/httppolicyset) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cloud_config_cksum=dict(type='str',), + created_by=dict(type='str',), + description=dict(type='str',), + http_request_policy=dict(type='dict',), + http_response_policy=dict(type='dict',), + http_security_policy=dict(type='dict',), + is_internal_policy=dict(type='bool',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'httppolicyset', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_ipaddrgroup.py b/plugins/modules/network/avi/avi_ipaddrgroup.py new file mode 100644 index 0000000000..b9f73a9321 --- /dev/null +++ b/plugins/modules/network/avi/avi_ipaddrgroup.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_ipaddrgroup +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of IpAddrGroup Avi RESTful Object +description: + - This module is used to configure IpAddrGroup object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + addrs: + description: + - Configure ip address(es). + apic_epg_name: + description: + - Populate ip addresses from members of this cisco apic epg. + country_codes: + description: + - Populate the ip address ranges from the geo database for this country. + description: + description: + - User defined description for the object. + ip_ports: + description: + - Configure (ip address, port) tuple(s). + marathon_app_name: + description: + - Populate ip addresses from tasks of this marathon app. + marathon_service_port: + description: + - Task port associated with marathon service port. + - If marathon app has multiple service ports, this is required. + - Else, the first task port is used. + name: + description: + - Name of the ip address group. + required: true + prefixes: + description: + - Configure ip address prefix(es). + ranges: + description: + - Configure ip address range(s). + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the ip address group. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create an IP Address Group configuration + avi_ipaddrgroup: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + name: Client-Source-Block + prefixes: + - ip_addr: + addr: 10.0.0.0 + type: V4 + mask: 8 + - ip_addr: + addr: 172.16.0.0 + type: V4 + mask: 12 + - ip_addr: + addr: 192.168.0.0 + type: V4 + mask: 16 +""" + +RETURN = ''' +obj: + description: IpAddrGroup (api/ipaddrgroup) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + addrs=dict(type='list',), + apic_epg_name=dict(type='str',), + country_codes=dict(type='list',), + description=dict(type='str',), + ip_ports=dict(type='list',), + marathon_app_name=dict(type='str',), + marathon_service_port=dict(type='int',), + name=dict(type='str', required=True), + prefixes=dict(type='list',), + ranges=dict(type='list',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'ipaddrgroup', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_ipamdnsproviderprofile.py b/plugins/modules/network/avi/avi_ipamdnsproviderprofile.py new file mode 100644 index 0000000000..cfa0ad34bb --- /dev/null +++ b/plugins/modules/network/avi/avi_ipamdnsproviderprofile.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_ipamdnsproviderprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of IpamDnsProviderProfile Avi RESTful Object +description: + - This module is used to configure IpamDnsProviderProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + allocate_ip_in_vrf: + description: + - If this flag is set, only allocate ip from networks in the virtual service vrf. + - Applicable for avi vantage ipam only. + - Field introduced in 17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + aws_profile: + description: + - Provider details if type is aws. + azure_profile: + description: + - Provider details if type is microsoft azure. + - Field introduced in 17.2.1. + custom_profile: + description: + - Provider details if type is custom. + - Field introduced in 17.1.1. + gcp_profile: + description: + - Provider details if type is google cloud. + infoblox_profile: + description: + - Provider details if type is infoblox. + internal_profile: + description: + - Provider details if type is avi. + name: + description: + - Name for the ipam/dns provider profile. + required: true + oci_profile: + description: + - Provider details for oracle cloud. + - Field introduced in 18.2.1,18.1.3. + openstack_profile: + description: + - Provider details if type is openstack. + proxy_configuration: + description: + - Field introduced in 17.1.1. + tenant_ref: + description: + - It is a reference to an object of type tenant. + tencent_profile: + description: + - Provider details for tencent cloud. + - Field introduced in 18.2.3. + type: + description: + - Provider type for the ipam/dns provider profile. + - Enum options - IPAMDNS_TYPE_INFOBLOX, IPAMDNS_TYPE_AWS, IPAMDNS_TYPE_OPENSTACK, IPAMDNS_TYPE_GCP, IPAMDNS_TYPE_INFOBLOX_DNS, IPAMDNS_TYPE_CUSTOM, + - IPAMDNS_TYPE_CUSTOM_DNS, IPAMDNS_TYPE_AZURE, IPAMDNS_TYPE_OCI, IPAMDNS_TYPE_TENCENT, IPAMDNS_TYPE_INTERNAL, IPAMDNS_TYPE_INTERNAL_DNS, + - IPAMDNS_TYPE_AWS_DNS, IPAMDNS_TYPE_AZURE_DNS. + required: true + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the ipam/dns provider profile. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create IPAM DNS provider setting + avi_ipamdnsproviderprofile: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + internal_profile: + dns_service_domain: + - domain_name: ashish.local + num_dns_ip: 1 + pass_through: true + record_ttl: 100 + - domain_name: guru.local + num_dns_ip: 1 + pass_through: true + record_ttl: 200 + ttl: 300 + name: Ashish-DNS + tenant_ref: Demo + type: IPAMDNS_TYPE_INTERNAL +""" + +RETURN = ''' +obj: + description: IpamDnsProviderProfile (api/ipamdnsproviderprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + allocate_ip_in_vrf=dict(type='bool',), + aws_profile=dict(type='dict',), + azure_profile=dict(type='dict',), + custom_profile=dict(type='dict',), + gcp_profile=dict(type='dict',), + infoblox_profile=dict(type='dict',), + internal_profile=dict(type='dict',), + name=dict(type='str', required=True), + oci_profile=dict(type='dict',), + openstack_profile=dict(type='dict',), + proxy_configuration=dict(type='dict',), + tenant_ref=dict(type='str',), + tencent_profile=dict(type='dict',), + type=dict(type='str', required=True), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'ipamdnsproviderprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_l4policyset.py b/plugins/modules/network/avi/avi_l4policyset.py new file mode 100644 index 0000000000..e4d069a97b --- /dev/null +++ b/plugins/modules/network/avi/avi_l4policyset.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_l4policyset +author: Chaitanya Deshpande (@chaitanyaavi) + +short_description: Module for setup of L4PolicySet Avi RESTful Object +description: + - This module is used to configure L4PolicySet object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + created_by: + description: + - Creator name. + - Field introduced in 17.2.7. + description: + description: + - Field introduced in 17.2.7. + is_internal_policy: + description: + - Field introduced in 17.2.7. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + l4_connection_policy: + description: + - Policy to apply when a new transport connection is setup. + - Field introduced in 17.2.7. + name: + description: + - Name of the l4 policy set. + - Field introduced in 17.2.7. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.2.7. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Id of the l4 policy set. + - Field introduced in 17.2.7. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create L4PolicySet object + avi_l4policyset: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_l4policyset +""" + +RETURN = ''' +obj: + description: L4PolicySet (api/l4policyset) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + created_by=dict(type='str',), + description=dict(type='str',), + is_internal_policy=dict(type='bool',), + l4_connection_policy=dict(type='dict',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'l4policyset', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_microservicegroup.py b/plugins/modules/network/avi/avi_microservicegroup.py new file mode 100644 index 0000000000..c1cb187e2d --- /dev/null +++ b/plugins/modules/network/avi/avi_microservicegroup.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_microservicegroup +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of MicroServiceGroup Avi RESTful Object +description: + - This module is used to configure MicroServiceGroup object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + created_by: + description: + - Creator name. + description: + description: + - User defined description for the object. + name: + description: + - Name of the microservice group. + required: true + service_refs: + description: + - Configure microservice(es). + - It is a reference to an object of type microservice. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the microservice group. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create a Microservice Group that can be used for setting up Network security policy + avi_microservicegroup: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + description: Group created by my Secure My App UI. + name: vs-msg-marketing + tenant_ref: admin +""" + +RETURN = ''' +obj: + description: MicroServiceGroup (api/microservicegroup) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + created_by=dict(type='str',), + description=dict(type='str',), + name=dict(type='str', required=True), + service_refs=dict(type='list',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'microservicegroup', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_network.py b/plugins/modules/network/avi/avi_network.py new file mode 100644 index 0000000000..b52d665caf --- /dev/null +++ b/plugins/modules/network/avi/avi_network.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_network +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Network Avi RESTful Object +description: + - This module is used to configure Network object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cloud_ref: + description: + - It is a reference to an object of type cloud. + configured_subnets: + description: + - List of subnet. + dhcp_enabled: + description: + - Select the ip address management scheme for this network. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + exclude_discovered_subnets: + description: + - When selected, excludes all discovered subnets in this network from consideration for virtual service placement. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + ip6_autocfg_enabled: + description: + - Enable ipv6 auto configuration. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + name: + description: + - Name of the object. + required: true + synced_from_se: + description: + - Boolean flag to set synced_from_se. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. + vcenter_dvs: + description: + - Boolean flag to set vcenter_dvs. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + vimgrnw_ref: + description: + - It is a reference to an object of type vimgrnwruntime. + vrf_context_ref: + description: + - It is a reference to an object of type vrfcontext. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create Network object + avi_network: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_network +""" + +RETURN = ''' +obj: + description: Network (api/network) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cloud_ref=dict(type='str',), + configured_subnets=dict(type='list',), + dhcp_enabled=dict(type='bool',), + exclude_discovered_subnets=dict(type='bool',), + ip6_autocfg_enabled=dict(type='bool',), + name=dict(type='str', required=True), + synced_from_se=dict(type='bool',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + vcenter_dvs=dict(type='bool',), + vimgrnw_ref=dict(type='str',), + vrf_context_ref=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'network', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_networkprofile.py b/plugins/modules/network/avi/avi_networkprofile.py new file mode 100644 index 0000000000..b71db84733 --- /dev/null +++ b/plugins/modules/network/avi/avi_networkprofile.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_networkprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of NetworkProfile Avi RESTful Object +description: + - This module is used to configure NetworkProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + connection_mirror: + description: + - When enabled, avi mirrors all tcp fastpath connections to standby. + - Applicable only in legacy ha mode. + - Field introduced in 18.1.3,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + description: + description: + - User defined description for the object. + name: + description: + - The name of the network profile. + required: true + profile: + description: + - Networkprofileunion settings for networkprofile. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the network profile. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create a network profile for an UDP application + avi_networkprofile: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + name: System-UDP-Fast-Path + profile: + type: PROTOCOL_TYPE_UDP_FAST_PATH + udp_fast_path_profile: + per_pkt_loadbalance: false + session_idle_timeout: 10 + snat: true + tenant_ref: admin +""" + +RETURN = ''' +obj: + description: NetworkProfile (api/networkprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + connection_mirror=dict(type='bool',), + description=dict(type='str',), + name=dict(type='str', required=True), + profile=dict(type='dict', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'networkprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_networksecuritypolicy.py b/plugins/modules/network/avi/avi_networksecuritypolicy.py new file mode 100644 index 0000000000..781e9f6a5c --- /dev/null +++ b/plugins/modules/network/avi/avi_networksecuritypolicy.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_networksecuritypolicy +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of NetworkSecurityPolicy Avi RESTful Object +description: + - This module is used to configure NetworkSecurityPolicy object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cloud_config_cksum: + description: + - Checksum of cloud configuration for network sec policy. + - Internally set by cloud connector. + created_by: + description: + - Creator name. + description: + description: + - User defined description for the object. + name: + description: + - Name of the object. + rules: + description: + - List of networksecurityrule. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create a network security policy to block clients represented by ip group known_attackers + avi_networksecuritypolicy: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + name: vs-gurutest-ns + rules: + - action: NETWORK_SECURITY_POLICY_ACTION_TYPE_DENY + age: 0 + enable: true + index: 1 + log: false + match: + client_ip: + group_refs: + - Demo:known_attackers + match_criteria: IS_IN + name: Rule 1 + tenant_ref: Demo +""" + +RETURN = ''' +obj: + description: NetworkSecurityPolicy (api/networksecuritypolicy) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cloud_config_cksum=dict(type='str',), + created_by=dict(type='str',), + description=dict(type='str',), + name=dict(type='str',), + rules=dict(type='list',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'networksecuritypolicy', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_pkiprofile.py b/plugins/modules/network/avi/avi_pkiprofile.py new file mode 100644 index 0000000000..2b19ebd4d4 --- /dev/null +++ b/plugins/modules/network/avi/avi_pkiprofile.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_pkiprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of PKIProfile Avi RESTful Object +description: + - This module is used to configure PKIProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + ca_certs: + description: + - List of certificate authorities (root and intermediate) trusted that is used for certificate validation. + created_by: + description: + - Creator name. + crl_check: + description: + - When enabled, avi will verify via crl checks that certificates in the trust chain have not been revoked. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + crls: + description: + - Certificate revocation lists. + ignore_peer_chain: + description: + - When enabled, avi will not trust intermediate and root certs presented by a client. + - Instead, only the chain certs configured in the certificate authority section will be used to verify trust of the client's cert. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + is_federated: + description: + - This field describes the object's replication scope. + - If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines. + - If the field is set to true, then the object is replicated across the federation. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + name: + description: + - Name of the pki profile. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. + validate_only_leaf_crl: + description: + - When enabled, avi will only validate the revocation status of the leaf certificate using crl. + - To enable validation for the entire chain, disable this option and provide all the relevant crls. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create PKIProfile object + avi_pkiprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_pkiprofile +""" + +RETURN = ''' +obj: + description: PKIProfile (api/pkiprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + ca_certs=dict(type='list',), + created_by=dict(type='str',), + crl_check=dict(type='bool',), + crls=dict(type='list',), + ignore_peer_chain=dict(type='bool',), + is_federated=dict(type='bool',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + validate_only_leaf_crl=dict(type='bool',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'pkiprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_pool.py b/plugins/modules/network/avi/avi_pool.py new file mode 100644 index 0000000000..fb51aed329 --- /dev/null +++ b/plugins/modules/network/avi/avi_pool.py @@ -0,0 +1,498 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_pool +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Pool Avi RESTful Object +description: + - This module is used to configure Pool object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + a_pool: + description: + - Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app. + - Field deprecated in 18.1.2. + ab_pool: + description: + - A/b pool configuration. + - Field deprecated in 18.1.2. + ab_priority: + description: + - Priority of this pool in a a-b pool pair. + - Internally used. + - Field deprecated in 18.1.2. + analytics_policy: + description: + - Determines analytics settings for the pool. + - Field introduced in 18.1.5, 18.2.1. + analytics_profile_ref: + description: + - Specifies settings related to analytics. + - It is a reference to an object of type analyticsprofile. + - Field introduced in 18.1.4,18.2.1. + apic_epg_name: + description: + - Synchronize cisco apic epg members with pool servers. + application_persistence_profile_ref: + description: + - Persistence will ensure the same user sticks to the same server for a desired duration of time. + - It is a reference to an object of type applicationpersistenceprofile. + autoscale_launch_config_ref: + description: + - If configured then avi will trigger orchestration of pool server creation and deletion. + - It is only supported for container clouds like mesos, openshift, kubernetes, docker, etc. + - It is a reference to an object of type autoscalelaunchconfig. + autoscale_networks: + description: + - Network ids for the launch configuration. + autoscale_policy_ref: + description: + - Reference to server autoscale policy. + - It is a reference to an object of type serverautoscalepolicy. + capacity_estimation: + description: + - Inline estimation of capacity of servers. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + capacity_estimation_ttfb_thresh: + description: + - The maximum time-to-first-byte of a server. + - Allowed values are 1-5000. + - Special values are 0 - 'automatic'. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + cloud_config_cksum: + description: + - Checksum of cloud configuration for pool. + - Internally set by cloud connector. + cloud_ref: + description: + - It is a reference to an object of type cloud. + conn_pool_properties: + description: + - Connection pool properties. + - Field introduced in 18.2.1. + connection_ramp_duration: + description: + - Duration for which new connections will be gradually ramped up to a server recently brought online. + - Useful for lb algorithms that are least connection based. + - Allowed values are 1-300. + - Special values are 0 - 'immediate'. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + created_by: + description: + - Creator name. + default_server_port: + description: + - Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute. + - The ssl checkbox enables avi to server encryption. + - Allowed values are 1-65535. + - Default value when not specified in API or module is interpreted by Avi Controller as 80. + delete_server_on_dns_refresh: + description: + - Indicates whether existing ips are disabled(false) or deleted(true) on dns hostname refreshdetail -- on a dns refresh, some ips set on pool may + - no longer be returned by the resolver. + - These ips are deleted from the pool when this knob is set to true. + - They are disabled, if the knob is set to false. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + description: + description: + - A description of the pool. + domain_name: + description: + - Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates. + - It is performed only when common name check host_check_enabled is enabled. + east_west: + description: + - Inherited config from virtualservice. + type: bool + enabled: + description: + - Enable or disable the pool. + - Disabling will terminate all open connections and pause health monitors. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + external_autoscale_groups: + description: + - Names of external auto-scale groups for pool servers. + - Currently available only for aws and azure. + - Field introduced in 17.1.2. + fail_action: + description: + - Enable an action - close connection, http redirect or local http response - when a pool failure happens. + - By default, a connection will be closed, in case the pool experiences a failure. + fewest_tasks_feedback_delay: + description: + - Periodicity of feedback for fewest tasks server selection algorithm. + - Allowed values are 1-300. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + graceful_disable_timeout: + description: + - Used to gracefully disable a server. + - Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled. + - Allowed values are 1-7200. + - Special values are 0 - 'immediate', -1 - 'infinite'. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + gslb_sp_enabled: + description: + - Indicates if the pool is a site-persistence pool. + - Field introduced in 17.2.1. + type: bool + health_monitor_refs: + description: + - Verify server health by applying one or more health monitors. + - Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response. + - The passive monitor listens only to client to server communication. + - It raises or lowers the ratio of traffic destined to a server based on successful responses. + - It is a reference to an object of type healthmonitor. + host_check_enabled: + description: + - Enable common name check for server certificate. + - If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + inline_health_monitor: + description: + - The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses. + - This may alter the expected behavior of the lb method, such as round robin. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + ipaddrgroup_ref: + description: + - Use list of servers from ip address group. + - It is a reference to an object of type ipaddrgroup. + lb_algorithm: + description: + - The load balancing algorithm will pick a server within the pool's list of available servers. + - Enum options - LB_ALGORITHM_LEAST_CONNECTIONS, LB_ALGORITHM_ROUND_ROBIN, LB_ALGORITHM_FASTEST_RESPONSE, LB_ALGORITHM_CONSISTENT_HASH, + - LB_ALGORITHM_LEAST_LOAD, LB_ALGORITHM_FEWEST_SERVERS, LB_ALGORITHM_RANDOM, LB_ALGORITHM_FEWEST_TASKS, LB_ALGORITHM_NEAREST_SERVER, + - LB_ALGORITHM_CORE_AFFINITY, LB_ALGORITHM_TOPOLOGY. + - Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS. + lb_algorithm_consistent_hash_hdr: + description: + - Http header name to be used for the hash key. + lb_algorithm_core_nonaffinity: + description: + - Degree of non-affinity for core affinity based server selection. + - Allowed values are 1-65535. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 2. + lb_algorithm_hash: + description: + - Criteria used as a key for determining the hash between the client and server. + - Enum options - LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS, LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT, + - LB_ALGORITHM_CONSISTENT_HASH_URI, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_HEADER, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_STRING, + - LB_ALGORITHM_CONSISTENT_HASH_CALLID. + - Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS. + lookup_server_by_name: + description: + - Allow server lookup by name. + - Field introduced in 17.1.11,17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + max_concurrent_connections_per_server: + description: + - The maximum number of concurrent connections allowed to each server within the pool. + - Note applied value will be no less than the number of service engines that the pool is placed on. + - If set to 0, no limit is applied. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + max_conn_rate_per_server: + description: + - Rate limit connections to each server. + min_health_monitors_up: + description: + - Minimum number of health monitors in up state to mark server up. + - Field introduced in 18.2.1, 17.2.12. + min_servers_up: + description: + - Minimum number of servers in up state for marking the pool up. + - Field introduced in 18.2.1, 17.2.12. + name: + description: + - The name of the pool. + required: true + networks: + description: + - (internal-use) networks designated as containing servers for this pool. + - The servers may be further narrowed down by a filter. + - This field is used internally by avi, not editable by the user. + nsx_securitygroup: + description: + - A list of nsx service groups where the servers for the pool are created. + - Field introduced in 17.1.1. + pki_profile_ref: + description: + - Avi will validate the ssl certificate present by a server against the selected pki profile. + - It is a reference to an object of type pkiprofile. + placement_networks: + description: + - Manually select the networks and subnets used to provide reachability to the pool's servers. + - Specify the subnet using the following syntax 10-1-1-0/24. + - Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine. + prst_hdr_name: + description: + - Header name for custom header persistence. + - Field deprecated in 18.1.2. + request_queue_depth: + description: + - Minimum number of requests to be queued when pool is full. + - Default value when not specified in API or module is interpreted by Avi Controller as 128. + request_queue_enabled: + description: + - Enable request queue when pool is full. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + rewrite_host_header_to_server_name: + description: + - Rewrite incoming host header to server name of the server to which the request is proxied. + - Enabling this feature rewrites host header for requests to all servers in the pool. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + rewrite_host_header_to_sni: + description: + - If sni server name is specified, rewrite incoming host header to the sni server name. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + server_auto_scale: + description: + - Server autoscale. + - Not used anymore. + - Field deprecated in 18.1.2. + type: bool + server_count: + description: + - Field deprecated in 18.2.1. + server_name: + description: + - Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled. + - If no value is specified, avi will use the incoming host header instead. + server_reselect: + description: + - Server reselect configuration for http requests. + server_timeout: + description: + - Server timeout value specifies the time within which a server connection needs to be established and a request-response exchange completes + - between avi and the server. + - Value of 0 results in using default timeout of 60 minutes. + - Allowed values are 0-3600000. + - Field introduced in 18.1.5,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + servers: + description: + - The pool directs load balanced traffic to this list of destination servers. + - The servers can be configured by ip address, name, network or via ip address group. + service_metadata: + description: + - Metadata pertaining to the service provided by this pool. + - In openshift/kubernetes environments, app metadata info is stored. + - Any user input to this field will be overwritten by avi vantage. + - Field introduced in 17.2.14,18.1.5,18.2.1. + sni_enabled: + description: + - Enable tls sni for server connections. + - If disabled, avi will not send the sni extension as part of the handshake. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + ssl_key_and_certificate_ref: + description: + - Service engines will present a client ssl certificate to the server. + - It is a reference to an object of type sslkeyandcertificate. + ssl_profile_ref: + description: + - When enabled, avi re-encrypts traffic to the backend servers. + - The specific ssl profile defines which ciphers and ssl versions will be supported. + - It is a reference to an object of type sslprofile. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + use_service_port: + description: + - Do not translate the client's destination port when sending the connection to the server. + - The pool or servers specified service port will still be used for health monitoring. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + uuid: + description: + - Uuid of the pool. + vrf_ref: + description: + - Virtual routing context that the pool is bound to. + - This is used to provide the isolation of the set of networks the pool is attached to. + - The pool inherits the virtual routing context of the virtual service, and this field is used only internally, and is set by pb-transform. + - It is a reference to an object of type vrfcontext. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Create a Pool with two servers and HTTP monitor + avi_pool: + controller: 10.10.1.20 + username: avi_user + password: avi_password + name: testpool1 + description: testpool1 + state: present + health_monitor_refs: + - '/api/healthmonitor?name=System-HTTP' + servers: + - ip: + addr: 10.10.2.20 + type: V4 + - ip: + addr: 10.10.2.21 + type: V4 + +- name: Patch pool with a single server using patch op and avi_credentials + avi_pool: + avi_api_update_method: patch + avi_api_patch_op: delete + avi_credentials: "{{avi_credentials}}" + name: test-pool + servers: + - ip: + addr: 10.90.64.13 + type: 'V4' + register: pool + when: + - state | default("present") == "present" +""" + +RETURN = ''' +obj: + description: Pool (api/pool) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + a_pool=dict(type='str',), + ab_pool=dict(type='dict',), + ab_priority=dict(type='int',), + analytics_policy=dict(type='dict',), + analytics_profile_ref=dict(type='str',), + apic_epg_name=dict(type='str',), + application_persistence_profile_ref=dict(type='str',), + autoscale_launch_config_ref=dict(type='str',), + autoscale_networks=dict(type='list',), + autoscale_policy_ref=dict(type='str',), + capacity_estimation=dict(type='bool',), + capacity_estimation_ttfb_thresh=dict(type='int',), + cloud_config_cksum=dict(type='str',), + cloud_ref=dict(type='str',), + conn_pool_properties=dict(type='dict',), + connection_ramp_duration=dict(type='int',), + created_by=dict(type='str',), + default_server_port=dict(type='int',), + delete_server_on_dns_refresh=dict(type='bool',), + description=dict(type='str',), + domain_name=dict(type='list',), + east_west=dict(type='bool',), + enabled=dict(type='bool',), + external_autoscale_groups=dict(type='list',), + fail_action=dict(type='dict',), + fewest_tasks_feedback_delay=dict(type='int',), + graceful_disable_timeout=dict(type='int',), + gslb_sp_enabled=dict(type='bool',), + health_monitor_refs=dict(type='list',), + host_check_enabled=dict(type='bool',), + inline_health_monitor=dict(type='bool',), + ipaddrgroup_ref=dict(type='str',), + lb_algorithm=dict(type='str',), + lb_algorithm_consistent_hash_hdr=dict(type='str',), + lb_algorithm_core_nonaffinity=dict(type='int',), + lb_algorithm_hash=dict(type='str',), + lookup_server_by_name=dict(type='bool',), + max_concurrent_connections_per_server=dict(type='int',), + max_conn_rate_per_server=dict(type='dict',), + min_health_monitors_up=dict(type='int',), + min_servers_up=dict(type='int',), + name=dict(type='str', required=True), + networks=dict(type='list',), + nsx_securitygroup=dict(type='list',), + pki_profile_ref=dict(type='str',), + placement_networks=dict(type='list',), + prst_hdr_name=dict(type='str',), + request_queue_depth=dict(type='int',), + request_queue_enabled=dict(type='bool',), + rewrite_host_header_to_server_name=dict(type='bool',), + rewrite_host_header_to_sni=dict(type='bool',), + server_auto_scale=dict(type='bool',), + server_count=dict(type='int',), + server_name=dict(type='str',), + server_reselect=dict(type='dict',), + server_timeout=dict(type='int',), + servers=dict(type='list',), + service_metadata=dict(type='str',), + sni_enabled=dict(type='bool',), + ssl_key_and_certificate_ref=dict(type='str',), + ssl_profile_ref=dict(type='str',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + use_service_port=dict(type='bool',), + uuid=dict(type='str',), + vrf_ref=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'pool', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_poolgroup.py b/plugins/modules/network/avi/avi_poolgroup.py new file mode 100644 index 0000000000..8cf97197fe --- /dev/null +++ b/plugins/modules/network/avi/avi_poolgroup.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_poolgroup +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of PoolGroup Avi RESTful Object +description: + - This module is used to configure PoolGroup object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cloud_config_cksum: + description: + - Checksum of cloud configuration for poolgroup. + - Internally set by cloud connector. + cloud_ref: + description: + - It is a reference to an object of type cloud. + created_by: + description: + - Name of the user who created the object. + deployment_policy_ref: + description: + - When setup autoscale manager will automatically promote new pools into production when deployment goals are met. + - It is a reference to an object of type poolgroupdeploymentpolicy. + description: + description: + - Description of pool group. + fail_action: + description: + - Enable an action - close connection, http redirect, or local http response - when a pool group failure happens. + - By default, a connection will be closed, in case the pool group experiences a failure. + implicit_priority_labels: + description: + - Whether an implicit set of priority labels is generated. + - Field introduced in 17.1.9,17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + members: + description: + - List of pool group members object of type poolgroupmember. + min_servers: + description: + - The minimum number of servers to distribute traffic to. + - Allowed values are 1-65535. + - Special values are 0 - 'disable'. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + name: + description: + - The name of the pool group. + required: true + priority_labels_ref: + description: + - Uuid of the priority labels. + - If not provided, pool group member priority label will be interpreted as a number with a larger number considered higher priority. + - It is a reference to an object of type prioritylabels. + service_metadata: + description: + - Metadata pertaining to the service provided by this poolgroup. + - In openshift/kubernetes environments, app metadata info is stored. + - Any user input to this field will be overwritten by avi vantage. + - Field introduced in 17.2.14,18.1.5,18.2.1. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the pool group. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create PoolGroup object + avi_poolgroup: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_poolgroup +""" + +RETURN = ''' +obj: + description: PoolGroup (api/poolgroup) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cloud_config_cksum=dict(type='str',), + cloud_ref=dict(type='str',), + created_by=dict(type='str',), + deployment_policy_ref=dict(type='str',), + description=dict(type='str',), + fail_action=dict(type='dict',), + implicit_priority_labels=dict(type='bool',), + members=dict(type='list',), + min_servers=dict(type='int',), + name=dict(type='str', required=True), + priority_labels_ref=dict(type='str',), + service_metadata=dict(type='str',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'poolgroup', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_poolgroupdeploymentpolicy.py b/plugins/modules/network/avi/avi_poolgroupdeploymentpolicy.py new file mode 100644 index 0000000000..c52400466c --- /dev/null +++ b/plugins/modules/network/avi/avi_poolgroupdeploymentpolicy.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_poolgroupdeploymentpolicy +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of PoolGroupDeploymentPolicy Avi RESTful Object +description: + - This module is used to configure PoolGroupDeploymentPolicy object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + auto_disable_old_prod_pools: + description: + - It will automatically disable old production pools once there is a new production candidate. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + description: + description: + - User defined description for the object. + evaluation_duration: + description: + - Duration of evaluation period for automatic deployment. + - Allowed values are 60-86400. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + name: + description: + - The name of the pool group deployment policy. + required: true + rules: + description: + - List of pgdeploymentrule. + scheme: + description: + - Deployment scheme. + - Enum options - BLUE_GREEN, CANARY. + - Default value when not specified in API or module is interpreted by Avi Controller as BLUE_GREEN. + target_test_traffic_ratio: + description: + - Target traffic ratio before pool is made production. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 100. + tenant_ref: + description: + - It is a reference to an object of type tenant. + test_traffic_ratio_rampup: + description: + - Ratio of the traffic that is sent to the pool under test. + - Test ratio of 100 means blue green. + - Allowed values are 1-100. + - Default value when not specified in API or module is interpreted by Avi Controller as 100. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the pool group deployment policy. + webhook_ref: + description: + - Webhook configured with url that avi controller will pass back information about pool group, old and new pool information and current deployment + - rule results. + - It is a reference to an object of type webhook. + - Field introduced in 17.1.1. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create PoolGroupDeploymentPolicy object + avi_poolgroupdeploymentpolicy: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_poolgroupdeploymentpolicy +""" + +RETURN = ''' +obj: + description: PoolGroupDeploymentPolicy (api/poolgroupdeploymentpolicy) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + auto_disable_old_prod_pools=dict(type='bool',), + description=dict(type='str',), + evaluation_duration=dict(type='int',), + name=dict(type='str', required=True), + rules=dict(type='list',), + scheme=dict(type='str',), + target_test_traffic_ratio=dict(type='int',), + tenant_ref=dict(type='str',), + test_traffic_ratio_rampup=dict(type='int',), + url=dict(type='str',), + uuid=dict(type='str',), + webhook_ref=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'poolgroupdeploymentpolicy', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_prioritylabels.py b/plugins/modules/network/avi/avi_prioritylabels.py new file mode 100644 index 0000000000..2ecbaf42c4 --- /dev/null +++ b/plugins/modules/network/avi/avi_prioritylabels.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_prioritylabels +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of PriorityLabels Avi RESTful Object +description: + - This module is used to configure PriorityLabels object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cloud_ref: + description: + - It is a reference to an object of type cloud. + description: + description: + - A description of the priority labels. + equivalent_labels: + description: + - Equivalent priority labels in descending order. + name: + description: + - The name of the priority labels. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the priority labels. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create PriorityLabels object + avi_prioritylabels: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_prioritylabels +""" + +RETURN = ''' +obj: + description: PriorityLabels (api/prioritylabels) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cloud_ref=dict(type='str',), + description=dict(type='str',), + equivalent_labels=dict(type='list',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'prioritylabels', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_role.py b/plugins/modules/network/avi/avi_role.py new file mode 100644 index 0000000000..4184fce498 --- /dev/null +++ b/plugins/modules/network/avi/avi_role.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_role +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Role Avi RESTful Object +description: + - This module is used to configure Role object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + name: + description: + - Name of the object. + required: true + privileges: + description: + - List of permission. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create Role object + avi_role: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_role +""" + +RETURN = ''' +obj: + description: Role (api/role) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + name=dict(type='str', required=True), + privileges=dict(type='list',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'role', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_scheduler.py b/plugins/modules/network/avi/avi_scheduler.py new file mode 100644 index 0000000000..c79fabd785 --- /dev/null +++ b/plugins/modules/network/avi/avi_scheduler.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_scheduler +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Scheduler Avi RESTful Object +description: + - This module is used to configure Scheduler object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + backup_config_ref: + description: + - Backup configuration to be executed by this scheduler. + - It is a reference to an object of type backupconfiguration. + enabled: + description: + - Boolean flag to set enabled. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + end_date_time: + description: + - Scheduler end date and time. + frequency: + description: + - Frequency at which custom scheduler will run. + - Allowed values are 0-60. + frequency_unit: + description: + - Unit at which custom scheduler will run. + - Enum options - SCHEDULER_FREQUENCY_UNIT_MIN, SCHEDULER_FREQUENCY_UNIT_HOUR, SCHEDULER_FREQUENCY_UNIT_DAY, SCHEDULER_FREQUENCY_UNIT_WEEK, + - SCHEDULER_FREQUENCY_UNIT_MONTH. + name: + description: + - Name of scheduler. + required: true + run_mode: + description: + - Scheduler run mode. + - Enum options - RUN_MODE_PERIODIC, RUN_MODE_AT, RUN_MODE_NOW. + run_script_ref: + description: + - Control script to be executed by this scheduler. + - It is a reference to an object of type alertscriptconfig. + scheduler_action: + description: + - Define scheduler action. + - Enum options - SCHEDULER_ACTION_RUN_A_SCRIPT, SCHEDULER_ACTION_BACKUP. + - Default value when not specified in API or module is interpreted by Avi Controller as SCHEDULER_ACTION_BACKUP. + start_date_time: + description: + - Scheduler start date and time. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create Scheduler object + avi_scheduler: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_scheduler +""" + +RETURN = ''' +obj: + description: Scheduler (api/scheduler) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + backup_config_ref=dict(type='str',), + enabled=dict(type='bool',), + end_date_time=dict(type='str',), + frequency=dict(type='int',), + frequency_unit=dict(type='str',), + name=dict(type='str', required=True), + run_mode=dict(type='str',), + run_script_ref=dict(type='str',), + scheduler_action=dict(type='str',), + start_date_time=dict(type='str',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'scheduler', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_seproperties.py b/plugins/modules/network/avi/avi_seproperties.py new file mode 100644 index 0000000000..0eb92eeec6 --- /dev/null +++ b/plugins/modules/network/avi/avi_seproperties.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_seproperties +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of SeProperties Avi RESTful Object +description: + - This module is used to configure SeProperties object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + se_agent_properties: + description: + - Seagentproperties settings for seproperties. + se_bootup_properties: + description: + - Sebootupproperties settings for seproperties. + se_runtime_properties: + description: + - Seruntimeproperties settings for seproperties. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. + - Default value when not specified in API or module is interpreted by Avi Controller as default. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create SeProperties object + avi_seproperties: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_seproperties +""" + +RETURN = ''' +obj: + description: SeProperties (api/seproperties) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + se_agent_properties=dict(type='dict',), + se_bootup_properties=dict(type='dict',), + se_runtime_properties=dict(type='dict',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'seproperties', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_serverautoscalepolicy.py b/plugins/modules/network/avi/avi_serverautoscalepolicy.py new file mode 100644 index 0000000000..640258a3e6 --- /dev/null +++ b/plugins/modules/network/avi/avi_serverautoscalepolicy.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_serverautoscalepolicy +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ServerAutoScalePolicy Avi RESTful Object +description: + - This module is used to configure ServerAutoScalePolicy object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + description: + description: + - User defined description for the object. + intelligent_autoscale: + description: + - Use avi intelligent autoscale algorithm where autoscale is performed by comparing load on the pool against estimated capacity of all the servers. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + intelligent_scalein_margin: + description: + - Maximum extra capacity as percentage of load used by the intelligent scheme. + - Scalein is triggered when available capacity is more than this margin. + - Allowed values are 1-99. + - Default value when not specified in API or module is interpreted by Avi Controller as 40. + intelligent_scaleout_margin: + description: + - Minimum extra capacity as percentage of load used by the intelligent scheme. + - Scaleout is triggered when available capacity is less than this margin. + - Allowed values are 1-99. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + max_scalein_adjustment_step: + description: + - Maximum number of servers to scalein simultaneously. + - The actual number of servers to scalein is chosen such that target number of servers is always more than or equal to the min_size. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + max_scaleout_adjustment_step: + description: + - Maximum number of servers to scaleout simultaneously. + - The actual number of servers to scaleout is chosen such that target number of servers is always less than or equal to the max_size. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + max_size: + description: + - Maximum number of servers after scaleout. + - Allowed values are 0-400. + min_size: + description: + - No scale-in happens once number of operationally up servers reach min_servers. + - Allowed values are 0-400. + name: + description: + - Name of the object. + required: true + scalein_alertconfig_refs: + description: + - Trigger scalein when alerts due to any of these alert configurations are raised. + - It is a reference to an object of type alertconfig. + scalein_cooldown: + description: + - Cooldown period during which no new scalein is triggered to allow previous scalein to successfully complete. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + scaleout_alertconfig_refs: + description: + - Trigger scaleout when alerts due to any of these alert configurations are raised. + - It is a reference to an object of type alertconfig. + scaleout_cooldown: + description: + - Cooldown period during which no new scaleout is triggered to allow previous scaleout to successfully complete. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + use_predicted_load: + description: + - Use predicted load rather than current load. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ServerAutoScalePolicy object + avi_serverautoscalepolicy: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_serverautoscalepolicy +""" + +RETURN = ''' +obj: + description: ServerAutoScalePolicy (api/serverautoscalepolicy) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + description=dict(type='str',), + intelligent_autoscale=dict(type='bool',), + intelligent_scalein_margin=dict(type='int',), + intelligent_scaleout_margin=dict(type='int',), + max_scalein_adjustment_step=dict(type='int',), + max_scaleout_adjustment_step=dict(type='int',), + max_size=dict(type='int',), + min_size=dict(type='int',), + name=dict(type='str', required=True), + scalein_alertconfig_refs=dict(type='list',), + scalein_cooldown=dict(type='int',), + scaleout_alertconfig_refs=dict(type='list',), + scaleout_cooldown=dict(type='int',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + use_predicted_load=dict(type='bool',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'serverautoscalepolicy', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_serviceengine.py b/plugins/modules/network/avi/avi_serviceengine.py new file mode 100644 index 0000000000..e70722be81 --- /dev/null +++ b/plugins/modules/network/avi/avi_serviceengine.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_serviceengine +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ServiceEngine Avi RESTful Object +description: + - This module is used to configure ServiceEngine object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + availability_zone: + description: + - Availability_zone of serviceengine. + cloud_ref: + description: + - It is a reference to an object of type cloud. + container_mode: + description: + - Boolean flag to set container_mode. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + container_type: + description: + - Enum options - container_type_bridge, container_type_host, container_type_host_dpdk. + - Default value when not specified in API or module is interpreted by Avi Controller as CONTAINER_TYPE_HOST. + controller_created: + description: + - Boolean flag to set controller_created. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + controller_ip: + description: + - Controller_ip of serviceengine. + data_vnics: + description: + - List of vnic. + enable_state: + description: + - Inorder to disable se set this field appropriately. + - Enum options - SE_STATE_ENABLED, SE_STATE_DISABLED_FOR_PLACEMENT, SE_STATE_DISABLED, SE_STATE_DISABLED_FORCE. + - Default value when not specified in API or module is interpreted by Avi Controller as SE_STATE_ENABLED. + flavor: + description: + - Flavor of serviceengine. + host_ref: + description: + - It is a reference to an object of type vimgrhostruntime. + hypervisor: + description: + - Enum options - default, vmware_esx, kvm, vmware_vsan, xen. + mgmt_vnic: + description: + - Vnic settings for serviceengine. + name: + description: + - Name of the object. + - Default value when not specified in API or module is interpreted by Avi Controller as VM name unknown. + resources: + description: + - Seresources settings for serviceengine. + se_group_ref: + description: + - It is a reference to an object of type serviceenginegroup. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ServiceEngine object + avi_serviceengine: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_serviceengine +""" + +RETURN = ''' +obj: + description: ServiceEngine (api/serviceengine) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + availability_zone=dict(type='str',), + cloud_ref=dict(type='str',), + container_mode=dict(type='bool',), + container_type=dict(type='str',), + controller_created=dict(type='bool',), + controller_ip=dict(type='str',), + data_vnics=dict(type='list',), + enable_state=dict(type='str',), + flavor=dict(type='str',), + host_ref=dict(type='str',), + hypervisor=dict(type='str',), + mgmt_vnic=dict(type='dict',), + name=dict(type='str',), + resources=dict(type='dict',), + se_group_ref=dict(type='str',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'serviceengine', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_serviceenginegroup.py b/plugins/modules/network/avi/avi_serviceenginegroup.py new file mode 100644 index 0000000000..e0739777c5 --- /dev/null +++ b/plugins/modules/network/avi/avi_serviceenginegroup.py @@ -0,0 +1,1076 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_serviceenginegroup +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of ServiceEngineGroup Avi RESTful Object +description: + - This module is used to configure ServiceEngineGroup object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + accelerated_networking: + description: + - Enable accelerated networking option for azure se. + - Accelerated networking enables single root i/o virtualization (sr-iov) to a se vm. + - This improves networking performance. + - Field introduced in 17.2.14,18.1.5,18.2.1. + type: bool + active_standby: + description: + - Service engines in active/standby mode for ha failover. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + additional_config_memory: + description: + - Indicates the percent of config memory used for config updates. + - Allowed values are 0-90. + - Field deprecated in 18.1.2. + - Field introduced in 18.1.1. + advertise_backend_networks: + description: + - Advertise reach-ability of backend server networks via adc through bgp for default gateway feature. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + aggressive_failure_detection: + description: + - Enable aggressive failover configuration for ha. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + algo: + description: + - In compact placement, virtual services are placed on existing ses until max_vs_per_se limit is reached. + - Enum options - PLACEMENT_ALGO_PACKED, PLACEMENT_ALGO_DISTRIBUTED. + - Default value when not specified in API or module is interpreted by Avi Controller as PLACEMENT_ALGO_PACKED. + allow_burst: + description: + - Allow ses to be created using burst license. + - Field introduced in 17.2.5. + type: bool + app_cache_percent: + description: + - A percent value of total se memory reserved for application caching. + - This is an se bootup property and requires se restart. + - Allowed values are 0 - 100. + - Special values are 0- 'disable'. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + app_learning_memory_percent: + description: + - A percent value of total se memory reserved for application learning. + - This is an se bootup property and requires se restart. + - Allowed values are 0 - 10. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + archive_shm_limit: + description: + - Amount of se memory in gb until which shared memory is collected in core archive. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 8. + async_ssl: + description: + - Ssl handshakes will be handled by dedicated ssl threads. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + async_ssl_threads: + description: + - Number of async ssl threads per se_dp. + - Allowed values are 1-16. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + auto_rebalance: + description: + - If set, virtual services will be automatically migrated when load on an se is less than minimum or more than maximum thresholds. + - Only alerts are generated when the auto_rebalance is not set. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + auto_rebalance_capacity_per_se: + description: + - Capacities of se for auto rebalance for each criteria. + - Field introduced in 17.2.4. + auto_rebalance_criteria: + description: + - Set of criteria for se auto rebalance. + - Enum options - SE_AUTO_REBALANCE_CPU, SE_AUTO_REBALANCE_PPS, SE_AUTO_REBALANCE_MBPS, SE_AUTO_REBALANCE_OPEN_CONNS, SE_AUTO_REBALANCE_CPS. + - Field introduced in 17.2.3. + auto_rebalance_interval: + description: + - Frequency of rebalance, if 'auto rebalance' is enabled. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + auto_redistribute_active_standby_load: + description: + - Redistribution of virtual services from the takeover se to the replacement se can cause momentary traffic loss. + - If the auto-redistribute load option is left in its default off state, any desired rebalancing requires calls to rest api. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + bgp_state_update_interval: + description: + - Bgp peer state update interval. + - Allowed values are 5-100. + - Field introduced in 17.2.14,18.1.5,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + buffer_se: + description: + - Excess service engine capacity provisioned for ha failover. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + cloud_ref: + description: + - It is a reference to an object of type cloud. + config_debugs_on_all_cores: + description: + - Enable config debugs on all cores of se. + - Field introduced in 17.2.13,18.1.5,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + connection_memory_percentage: + description: + - Percentage of memory for connection state. + - This will come at the expense of memory used for http in-memory cache. + - Allowed values are 10-90. + - Default value when not specified in API or module is interpreted by Avi Controller as 50. + cpu_reserve: + description: + - Boolean flag to set cpu_reserve. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + cpu_socket_affinity: + description: + - Allocate all the cpu cores for the service engine virtual machines on the same cpu socket. + - Applicable only for vcenter cloud. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + custom_securitygroups_data: + description: + - Custom security groups to be associated with data vnics for se instances in openstack and aws clouds. + - Field introduced in 17.1.3. + custom_securitygroups_mgmt: + description: + - Custom security groups to be associated with management vnic for se instances in openstack and aws clouds. + - Field introduced in 17.1.3. + custom_tag: + description: + - Custom tag will be used to create the tags for se instance in aws. + - Note this is not the same as the prefix for se name. + data_network_id: + description: + - Subnet used to spin up the data nic for service engines, used only for azure cloud. + - Overrides the cloud level setting for service engine subnet. + - Field introduced in 18.2.3. + datascript_timeout: + description: + - Number of instructions before datascript times out. + - Allowed values are 0-100000000. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 1000000. + dedicated_dispatcher_core: + description: + - Dedicate the core that handles packet receive/transmit from the network to just the dispatching function. + - Don't use it for tcp/ip and ssl functions. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + description: + description: + - User defined description for the object. + disable_avi_securitygroups: + description: + - By default, avi creates and manages security groups along with custom sg provided by user. + - Set this to true to disallow avi to create and manage new security groups. + - Avi will only make use of custom security groups provided by user. + - This option is only supported for aws cloud type. + - Field introduced in 17.2.13,18.1.4,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + disable_csum_offloads: + description: + - Stop using tcp/udp and ip checksum offload features of nics. + - Field introduced in 17.1.14, 17.2.5, 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + disable_gro: + description: + - Disable generic receive offload (gro) in dpdk poll-mode driver packet receive path. + - Gro is on by default on nics that do not support lro (large receive offload) or do not gain performance boost from lro. + - Field introduced in 17.2.5, 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + disable_se_memory_check: + description: + - If set, disable the config memory check done in service engine. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + disable_tso: + description: + - Disable tcp segmentation offload (tso) in dpdk poll-mode driver packet transmit path. + - Tso is on by default on nics that support it. + - Field introduced in 17.2.5, 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + disk_per_se: + description: + - Amount of disk space for each of the service engine virtual machines. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + distribute_load_active_standby: + description: + - Use both the active and standby service engines for virtual service placement in the legacy active standby ha mode. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + distribute_queues: + description: + - Distributes queue ownership among cores so multiple cores handle dispatcher duties. + - Field introduced in 17.2.8. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + enable_hsm_priming: + description: + - (this is a beta feature). + - Enable hsm key priming. + - If enabled, key handles on the hsm will be synced to se before processing client connections. + - Field introduced in 17.2.7, 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + enable_multi_lb: + description: + - Applicable only for azure cloud with basic sku lb. + - If set, additional azure lbs will be automatically created if resources in existing lb are exhausted. + - Field introduced in 17.2.10, 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + enable_routing: + description: + - Enable routing for this serviceenginegroup . + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + enable_vip_on_all_interfaces: + description: + - Enable vip on all interfaces of se. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + enable_vmac: + description: + - Use virtual mac address for interfaces on which floating interface ips are placed. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + ephemeral_portrange_end: + description: + - End local ephemeral port number for outbound connections. + - Field introduced in 17.2.13, 18.1.5, 18.2.1. + ephemeral_portrange_start: + description: + - Start local ephemeral port number for outbound connections. + - Field introduced in 17.2.13, 18.1.5, 18.2.1. + extra_config_multiplier: + description: + - Multiplier for extra config to support large vs/pool config. + - Default value when not specified in API or module is interpreted by Avi Controller as 0.0. + extra_shared_config_memory: + description: + - Extra config memory to support large geo db configuration. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + floating_intf_ip: + description: + - If serviceenginegroup is configured for legacy 1+1 active standby ha mode, floating ip's will be advertised only by the active se in the pair. + - Virtual services in this group must be disabled/enabled for any changes to the floating ip's to take effect. + - Only active se hosting vs tagged with active standby se 1 tag will advertise this floating ip when manual load distribution is enabled. + floating_intf_ip_se_2: + description: + - If serviceenginegroup is configured for legacy 1+1 active standby ha mode, floating ip's will be advertised only by the active se in the pair. + - Virtual services in this group must be disabled/enabled for any changes to the floating ip's to take effect. + - Only active se hosting vs tagged with active standby se 2 tag will advertise this floating ip when manual load distribution is enabled. + flow_table_new_syn_max_entries: + description: + - Maximum number of flow table entries that have not completed tcp three-way handshake yet. + - Field introduced in 17.2.5. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + free_list_size: + description: + - Number of entries in the free list. + - Field introduced in 17.2.10, 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 1024. + ha_mode: + description: + - High availability mode for all the virtual services using this service engine group. + - Enum options - HA_MODE_SHARED_PAIR, HA_MODE_SHARED, HA_MODE_LEGACY_ACTIVE_STANDBY. + - Default value when not specified in API or module is interpreted by Avi Controller as HA_MODE_SHARED. + hardwaresecuritymodulegroup_ref: + description: + - It is a reference to an object of type hardwaresecuritymodulegroup. + heap_minimum_config_memory: + description: + - Minimum required heap memory to apply any configuration. + - Allowed values are 0-100. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 8. + hm_on_standby: + description: + - Enable active health monitoring from the standby se for all placed virtual services. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + host_attribute_key: + description: + - Key of a (key, value) pair identifying a label for a set of nodes usually in container clouds. + - Needs to be specified together with host_attribute_value. + - Ses can be configured differently including ha modes across different se groups. + - May also be used for isolation between different classes of virtualservices. + - Virtualservices' se group may be specified via annotations/labels. + - A openshift/kubernetes namespace maybe annotated with a matching se group label as openshift.io/node-selector apptype=prod. + - When multiple se groups are used in a cloud with host attributes specified,just a single se group can exist as a match-all se group without a + - host_attribute_key. + host_attribute_value: + description: + - Value of a (key, value) pair identifying a label for a set of nodes usually in container clouds. + - Needs to be specified together with host_attribute_key. + host_gateway_monitor: + description: + - Enable the host gateway monitor when service engine is deployed as docker container. + - Disabled by default. + - Field introduced in 17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + hypervisor: + description: + - Override default hypervisor. + - Enum options - DEFAULT, VMWARE_ESX, KVM, VMWARE_VSAN, XEN. + ignore_rtt_threshold: + description: + - Ignore rtt samples if it is above threshold. + - Field introduced in 17.1.6,17.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 5000. + ingress_access_data: + description: + - Program se security group ingress rules to allow vip data access from remote cidr type. + - Enum options - SG_INGRESS_ACCESS_NONE, SG_INGRESS_ACCESS_ALL, SG_INGRESS_ACCESS_VPC. + - Field introduced in 17.1.5. + - Default value when not specified in API or module is interpreted by Avi Controller as SG_INGRESS_ACCESS_ALL. + ingress_access_mgmt: + description: + - Program se security group ingress rules to allow ssh/icmp management access from remote cidr type. + - Enum options - SG_INGRESS_ACCESS_NONE, SG_INGRESS_ACCESS_ALL, SG_INGRESS_ACCESS_VPC. + - Field introduced in 17.1.5. + - Default value when not specified in API or module is interpreted by Avi Controller as SG_INGRESS_ACCESS_ALL. + instance_flavor: + description: + - Instance/flavor name for se instance. + iptables: + description: + - Iptables rules. + least_load_core_selection: + description: + - Select core with least load for new flow. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + license_tier: + description: + - Specifies the license tier which would be used. + - This field by default inherits the value from cloud. + - Enum options - ENTERPRISE_16, ENTERPRISE_18. + - Field introduced in 17.2.5. + license_type: + description: + - If no license type is specified then default license enforcement for the cloud type is chosen. + - Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS, LIC_SE_BANDWIDTH, LIC_METERED_SE_BANDWIDTH. + - Field introduced in 17.2.5. + log_disksz: + description: + - Maximum disk capacity (in mb) to be allocated to an se. + - This is exclusively used for debug and log data. + - Default value when not specified in API or module is interpreted by Avi Controller as 10000. + max_cpu_usage: + description: + - When cpu usage on an se exceeds this threshold, virtual services hosted on this se may be rebalanced to other ses to reduce load. + - A new se may be created as part of this process. + - Allowed values are 40-90. + - Default value when not specified in API or module is interpreted by Avi Controller as 80. + max_memory_per_mempool: + description: + - Max bytes that can be allocated in a single mempool. + - Field introduced in 18.1.5. + - Default value when not specified in API or module is interpreted by Avi Controller as 64. + max_public_ips_per_lb: + description: + - Applicable to azure platform only. + - Maximum number of public ips per azure lb. + - Field introduced in 17.2.12, 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 30. + max_rules_per_lb: + description: + - Applicable to azure platform only. + - Maximum number of rules per azure lb. + - Field introduced in 17.2.12, 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 150. + max_scaleout_per_vs: + description: + - Maximum number of active service engines for the virtual service. + - Allowed values are 1-64. + - Default value when not specified in API or module is interpreted by Avi Controller as 4. + max_se: + description: + - Maximum number of services engines in this group. + - Allowed values are 0-1000. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + max_vs_per_se: + description: + - Maximum number of virtual services that can be placed on a single service engine. + - East west virtual services are excluded from this limit. + - Allowed values are 1-1000. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + mem_reserve: + description: + - Boolean flag to set mem_reserve. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + memory_for_config_update: + description: + - Indicates the percent of memory reserved for config updates. + - Allowed values are 0-100. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 15. + memory_per_se: + description: + - Amount of memory for each of the service engine virtual machines. + - Default value when not specified in API or module is interpreted by Avi Controller as 2048. + mgmt_network_ref: + description: + - Management network to use for avi service engines. + - It is a reference to an object of type network. + mgmt_subnet: + description: + - Management subnet to use for avi service engines. + min_cpu_usage: + description: + - When cpu usage on an se falls below the minimum threshold, virtual services hosted on the se may be consolidated onto other underutilized ses. + - After consolidation, unused service engines may then be eligible for deletion. + - Allowed values are 20-60. + - Default value when not specified in API or module is interpreted by Avi Controller as 30. + min_scaleout_per_vs: + description: + - Minimum number of active service engines for the virtual service. + - Allowed values are 1-64. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + min_se: + description: + - Minimum number of services engines in this group (relevant for se autorebalance only). + - Allowed values are 0-1000. + - Field introduced in 17.2.13,18.1.3,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + minimum_connection_memory: + description: + - Indicates the percent of memory reserved for connections. + - Allowed values are 0-100. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 20. + minimum_required_config_memory: + description: + - Required available config memory to apply any configuration. + - Allowed values are 0-90. + - Field deprecated in 18.1.2. + - Field introduced in 18.1.1. + n_log_streaming_threads: + description: + - Number of threads to use for log streaming. + - Allowed values are 1-100. + - Field introduced in 17.2.12, 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + name: + description: + - Name of the object. + required: true + non_significant_log_throttle: + description: + - This setting limits the number of non-significant logs generated per second per core on this se. + - Default is 100 logs per second. + - Set it to zero (0) to disable throttling. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 100. + num_dispatcher_cores: + description: + - Number of dispatcher cores (0,1,2,4,8 or 16). + - If set to 0, then number of dispatcher cores is deduced automatically. + - Allowed values are 0,1,2,4,8,16. + - Field introduced in 17.2.12, 18.1.3, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + num_flow_cores_sum_changes_to_ignore: + description: + - Number of changes in num flow cores sum to ignore. + - Default value when not specified in API or module is interpreted by Avi Controller as 8. + openstack_availability_zone: + description: + - Field deprecated in 17.1.1. + openstack_availability_zones: + description: + - Field introduced in 17.1.1. + openstack_mgmt_network_name: + description: + - Avi management network name. + openstack_mgmt_network_uuid: + description: + - Management network uuid. + os_reserved_memory: + description: + - Amount of extra memory to be reserved for use by the operating system on a service engine. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + per_app: + description: + - Per-app se mode is designed for deploying dedicated load balancers per app (vs). + - In this mode, each se is limited to a max of 2 vss. + - Vcpus in per-app ses count towards licensing usage at 25% rate. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + placement_mode: + description: + - If placement mode is 'auto', virtual services are automatically placed on service engines. + - Enum options - PLACEMENT_MODE_AUTO. + - Default value when not specified in API or module is interpreted by Avi Controller as PLACEMENT_MODE_AUTO. + realtime_se_metrics: + description: + - Enable or disable real time se metrics. + reboot_on_stop: + description: + - Reboot the system if the se is stopped. + - Field introduced in 17.2.16,18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + se_bandwidth_type: + description: + - Select the se bandwidth for the bandwidth license. + - Enum options - SE_BANDWIDTH_UNLIMITED, SE_BANDWIDTH_25M, SE_BANDWIDTH_200M, SE_BANDWIDTH_1000M, SE_BANDWIDTH_10000M. + - Field introduced in 17.2.5. + se_deprovision_delay: + description: + - Duration to preserve unused service engine virtual machines before deleting them. + - If traffic to a virtual service were to spike up abruptly, this se would still be available to be utilized again rather than creating a new se. + - If this value is set to 0, controller will never delete any ses and administrator has to manually cleanup unused ses. + - Allowed values are 0-525600. + - Default value when not specified in API or module is interpreted by Avi Controller as 120. + se_dos_profile: + description: + - Dosthresholdprofile settings for serviceenginegroup. + se_dpdk_pmd: + description: + - Determines if dpdk pool mode driver should be used or not 0 automatically determine based on hypervisor/nic type 1 unconditionally use dpdk + - poll mode driver 2 don't use dpdk poll mode driver. + - Allowed values are 0-2. + - Field introduced in 18.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + se_flow_probe_retries: + description: + - Flow probe retry count if no replies are received. + - Allowed values are 0-5. + - Field introduced in 18.1.4, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 2. + se_flow_probe_timer: + description: + - Timeout in milliseconds for flow probe entries. + - Allowed values are 10-200. + - Field introduced in 18.1.4, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 100. + se_ipc_udp_port: + description: + - Udp port for se_dp ipc in docker bridge mode. + - Field introduced in 17.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 1500. + se_name_prefix: + description: + - Prefix to use for virtual machine name of service engines. + - Default value when not specified in API or module is interpreted by Avi Controller as Avi. + se_pcap_lookahead: + description: + - Enables lookahead mode of packet receive in pcap mode. + - Introduced to overcome an issue with hv_netvsc driver. + - Lookahead mode attempts to ensure that application and kernel's view of the receive rings are consistent. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + se_pcap_reinit_frequency: + description: + - Frequency in seconds at which periodically a pcap reinit check is triggered. + - May be used in conjunction with the configuration pcap_reinit_threshold. + - (valid range 15 mins - 12 hours, 0 - disables). + - Allowed values are 900-43200. + - Special values are 0- 'disable'. + - Field introduced in 17.2.13, 18.1.3, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + se_pcap_reinit_threshold: + description: + - Threshold for input packet receive errors in pcap mode exceeding which a pcap reinit is triggered. + - If not set, an unconditional reinit is performed. + - This value is checked every pcap_reinit_frequency interval. + - Field introduced in 17.2.13, 18.1.3, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + se_probe_port: + description: + - Tcp port on se where echo service will be run. + - Field introduced in 17.2.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 7. + se_remote_punt_udp_port: + description: + - Udp port for punted packets in docker bridge mode. + - Field introduced in 17.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 1501. + se_routing: + description: + - Enable routing via service engine datapath. + - When disabled, routing is done by the linux kernel. + - Ip routing needs to be enabled in service engine group for se routing to be effective. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + se_sb_dedicated_core: + description: + - Sideband traffic will be handled by a dedicated core. + - Field introduced in 16.5.2, 17.1.9, 17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + se_sb_threads: + description: + - Number of sideband threads per se. + - Allowed values are 1-128. + - Field introduced in 16.5.2, 17.1.9, 17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + se_thread_multiplier: + description: + - Multiplier for se threads based on vcpu. + - Allowed values are 1-10. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + se_tracert_port_range: + description: + - Traceroute port range. + - Field introduced in 17.2.8. + se_tunnel_mode: + description: + - Determines if dsr from secondary se is active or not 0 automatically determine based on hypervisor type. + - 1 disable dsr unconditionally. + - 2 enable dsr unconditionally. + - Allowed values are 0-2. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + se_tunnel_udp_port: + description: + - Udp port for tunneled packets from secondary to primary se in docker bridge mode. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 1550. + se_udp_encap_ipc: + description: + - Determines if se-se ipc messages are encapsulated in a udp header 0 automatically determine based on hypervisor type. + - 1 use udp encap unconditionally. + - Allowed values are 0-1. + - Field introduced in 17.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + se_use_dpdk: + description: + - Determines if dpdk library should be used or not 0 automatically determine based on hypervisor type 1 use dpdk if pcap is not enabled 2 + - don't use dpdk. + - Allowed values are 0-2. + - Field introduced in 18.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + se_vs_hb_max_pkts_in_batch: + description: + - Maximum number of aggregated vs heartbeat packets to send in a batch. + - Allowed values are 1-256. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 64. + se_vs_hb_max_vs_in_pkt: + description: + - Maximum number of virtualservices for which heartbeat messages are aggregated in one packet. + - Allowed values are 1-1024. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 256. + self_se_election: + description: + - Enable ses to elect a primary amongst themselves in the absence of a connectivity to controller. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + service_ip6_subnets: + description: + - Ipv6 subnets assigned to the se group. + - Required for vs group placement. + - Field introduced in 18.1.1. + service_ip_subnets: + description: + - Subnets assigned to the se group. + - Required for vs group placement. + - Field introduced in 17.1.1. + shm_minimum_config_memory: + description: + - Minimum required shared memory to apply any configuration. + - Allowed values are 0-100. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 4. + significant_log_throttle: + description: + - This setting limits the number of significant logs generated per second per core on this se. + - Default is 100 logs per second. + - Set it to zero (0) to disable throttling. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 100. + ssl_preprocess_sni_hostname: + description: + - (beta) preprocess ssl client hello for sni hostname extension.if set to true, this will apply sni child's ssl protocol(s), if they are different + - from sni parent's allowed ssl protocol(s). + - Field introduced in 17.2.12, 18.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + tenant_ref: + description: + - It is a reference to an object of type tenant. + udf_log_throttle: + description: + - This setting limits the number of udf logs generated per second per core on this se. + - Udf logs are generated due to the configured client log filters or the rules with logging enabled. + - Default is 100 logs per second. + - Set it to zero (0) to disable throttling. + - Field introduced in 17.1.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 100. + url: + description: + - Avi controller URL of the object. + use_standard_alb: + description: + - Use standard sku azure load balancer. + - By default cloud level flag is set. + - If not set, it inherits/uses the use_standard_alb flag from the cloud. + - Field introduced in 18.2.3. + type: bool + uuid: + description: + - Unique object identifier of the object. + vcenter_clusters: + description: + - Vcenterclusters settings for serviceenginegroup. + vcenter_datastore_mode: + description: + - Enum options - vcenter_datastore_any, vcenter_datastore_local, vcenter_datastore_shared. + - Default value when not specified in API or module is interpreted by Avi Controller as VCENTER_DATASTORE_ANY. + vcenter_datastores: + description: + - List of vcenterdatastore. + vcenter_datastores_include: + description: + - Boolean flag to set vcenter_datastores_include. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + vcenter_folder: + description: + - Folder to place all the service engine virtual machines in vcenter. + - Default value when not specified in API or module is interpreted by Avi Controller as AviSeFolder. + vcenter_hosts: + description: + - Vcenterhosts settings for serviceenginegroup. + vcpus_per_se: + description: + - Number of vcpus for each of the service engine virtual machines. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. + vip_asg: + description: + - When vip_asg is set, vip configuration will be managed by avi.user will be able to configure vip_asg or vips individually at the time of create. + - Field introduced in 17.2.12, 18.1.2. + vs_host_redundancy: + description: + - Ensure primary and secondary service engines are deployed on different physical hosts. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + vs_scalein_timeout: + description: + - Time to wait for the scaled in se to drain existing flows before marking the scalein done. + - Default value when not specified in API or module is interpreted by Avi Controller as 30. + vs_scalein_timeout_for_upgrade: + description: + - During se upgrade, time to wait for the scaled-in se to drain existing flows before marking the scalein done. + - Default value when not specified in API or module is interpreted by Avi Controller as 30. + vs_scaleout_timeout: + description: + - Time to wait for the scaled out se to become ready before marking the scaleout done. + - Default value when not specified in API or module is interpreted by Avi Controller as 600. + vs_se_scaleout_additional_wait_time: + description: + - Wait time for sending scaleout ready notification after virtual service is marked up. + - In certain deployments, there may be an additional delay to accept traffic. + - For example, for bgp, some time is needed for route advertisement. + - Allowed values are 0-20. + - Field introduced in 18.1.5,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + vs_se_scaleout_ready_timeout: + description: + - Timeout in seconds for service engine to sendscaleout ready notification of a virtual service. + - Allowed values are 0-60. + - Field introduced in 18.1.5,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 25. + vs_switchover_timeout: + description: + - During se upgrade in a legacy active/standby segroup, time to wait for the new primary se to accept flows before marking the switchover done. + - Field introduced in 17.2.13,18.1.4,18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as 300. + vss_placement: + description: + - Parameters to place virtual services on only a subset of the cores of an se. + - Field introduced in 17.2.5. + vss_placement_enabled: + description: + - If set, virtual services will be placed on only a subset of the cores of an se. + - Field introduced in 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + waf_learning_interval: + description: + - Frequency with which se publishes waf learning. + - Allowed values are 1-43200. + - Field deprecated in 18.2.3. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 10. + waf_learning_memory: + description: + - Amount of memory reserved on se for waf learning. + - Cannot exceed 5% of se memory. + - Field deprecated in 18.2.3. + - Field introduced in 18.1.2. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + waf_mempool: + description: + - Enable memory pool for waf. + - Field introduced in 17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + waf_mempool_size: + description: + - Memory pool size used for waf. + - Field introduced in 17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as 64. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create ServiceEngineGroup object + avi_serviceenginegroup: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_serviceenginegroup +""" + +RETURN = ''' +obj: + description: ServiceEngineGroup (api/serviceenginegroup) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + accelerated_networking=dict(type='bool',), + active_standby=dict(type='bool',), + additional_config_memory=dict(type='int',), + advertise_backend_networks=dict(type='bool',), + aggressive_failure_detection=dict(type='bool',), + algo=dict(type='str',), + allow_burst=dict(type='bool',), + app_cache_percent=dict(type='int',), + app_learning_memory_percent=dict(type='int',), + archive_shm_limit=dict(type='int',), + async_ssl=dict(type='bool',), + async_ssl_threads=dict(type='int',), + auto_rebalance=dict(type='bool',), + auto_rebalance_capacity_per_se=dict(type='list',), + auto_rebalance_criteria=dict(type='list',), + auto_rebalance_interval=dict(type='int',), + auto_redistribute_active_standby_load=dict(type='bool',), + bgp_state_update_interval=dict(type='int',), + buffer_se=dict(type='int',), + cloud_ref=dict(type='str',), + config_debugs_on_all_cores=dict(type='bool',), + connection_memory_percentage=dict(type='int',), + cpu_reserve=dict(type='bool',), + cpu_socket_affinity=dict(type='bool',), + custom_securitygroups_data=dict(type='list',), + custom_securitygroups_mgmt=dict(type='list',), + custom_tag=dict(type='list',), + data_network_id=dict(type='str',), + datascript_timeout=dict(type='int',), + dedicated_dispatcher_core=dict(type='bool',), + description=dict(type='str',), + disable_avi_securitygroups=dict(type='bool',), + disable_csum_offloads=dict(type='bool',), + disable_gro=dict(type='bool',), + disable_se_memory_check=dict(type='bool',), + disable_tso=dict(type='bool',), + disk_per_se=dict(type='int',), + distribute_load_active_standby=dict(type='bool',), + distribute_queues=dict(type='bool',), + enable_hsm_priming=dict(type='bool',), + enable_multi_lb=dict(type='bool',), + enable_routing=dict(type='bool',), + enable_vip_on_all_interfaces=dict(type='bool',), + enable_vmac=dict(type='bool',), + ephemeral_portrange_end=dict(type='int',), + ephemeral_portrange_start=dict(type='int',), + extra_config_multiplier=dict(type='float',), + extra_shared_config_memory=dict(type='int',), + floating_intf_ip=dict(type='list',), + floating_intf_ip_se_2=dict(type='list',), + flow_table_new_syn_max_entries=dict(type='int',), + free_list_size=dict(type='int',), + ha_mode=dict(type='str',), + hardwaresecuritymodulegroup_ref=dict(type='str',), + heap_minimum_config_memory=dict(type='int',), + hm_on_standby=dict(type='bool',), + host_attribute_key=dict(type='str',), + host_attribute_value=dict(type='str',), + host_gateway_monitor=dict(type='bool',), + hypervisor=dict(type='str',), + ignore_rtt_threshold=dict(type='int',), + ingress_access_data=dict(type='str',), + ingress_access_mgmt=dict(type='str',), + instance_flavor=dict(type='str',), + iptables=dict(type='list',), + least_load_core_selection=dict(type='bool',), + license_tier=dict(type='str',), + license_type=dict(type='str',), + log_disksz=dict(type='int',), + max_cpu_usage=dict(type='int',), + max_memory_per_mempool=dict(type='int',), + max_public_ips_per_lb=dict(type='int',), + max_rules_per_lb=dict(type='int',), + max_scaleout_per_vs=dict(type='int',), + max_se=dict(type='int',), + max_vs_per_se=dict(type='int',), + mem_reserve=dict(type='bool',), + memory_for_config_update=dict(type='int',), + memory_per_se=dict(type='int',), + mgmt_network_ref=dict(type='str',), + mgmt_subnet=dict(type='dict',), + min_cpu_usage=dict(type='int',), + min_scaleout_per_vs=dict(type='int',), + min_se=dict(type='int',), + minimum_connection_memory=dict(type='int',), + minimum_required_config_memory=dict(type='int',), + n_log_streaming_threads=dict(type='int',), + name=dict(type='str', required=True), + non_significant_log_throttle=dict(type='int',), + num_dispatcher_cores=dict(type='int',), + num_flow_cores_sum_changes_to_ignore=dict(type='int',), + openstack_availability_zone=dict(type='str',), + openstack_availability_zones=dict(type='list',), + openstack_mgmt_network_name=dict(type='str',), + openstack_mgmt_network_uuid=dict(type='str',), + os_reserved_memory=dict(type='int',), + per_app=dict(type='bool',), + placement_mode=dict(type='str',), + realtime_se_metrics=dict(type='dict',), + reboot_on_stop=dict(type='bool',), + se_bandwidth_type=dict(type='str',), + se_deprovision_delay=dict(type='int',), + se_dos_profile=dict(type='dict',), + se_dpdk_pmd=dict(type='int',), + se_flow_probe_retries=dict(type='int',), + se_flow_probe_timer=dict(type='int',), + se_ipc_udp_port=dict(type='int',), + se_name_prefix=dict(type='str',), + se_pcap_lookahead=dict(type='bool',), + se_pcap_reinit_frequency=dict(type='int',), + se_pcap_reinit_threshold=dict(type='int',), + se_probe_port=dict(type='int',), + se_remote_punt_udp_port=dict(type='int',), + se_routing=dict(type='bool',), + se_sb_dedicated_core=dict(type='bool',), + se_sb_threads=dict(type='int',), + se_thread_multiplier=dict(type='int',), + se_tracert_port_range=dict(type='dict',), + se_tunnel_mode=dict(type='int',), + se_tunnel_udp_port=dict(type='int',), + se_udp_encap_ipc=dict(type='int',), + se_use_dpdk=dict(type='int',), + se_vs_hb_max_pkts_in_batch=dict(type='int',), + se_vs_hb_max_vs_in_pkt=dict(type='int',), + self_se_election=dict(type='bool',), + service_ip6_subnets=dict(type='list',), + service_ip_subnets=dict(type='list',), + shm_minimum_config_memory=dict(type='int',), + significant_log_throttle=dict(type='int',), + ssl_preprocess_sni_hostname=dict(type='bool',), + tenant_ref=dict(type='str',), + udf_log_throttle=dict(type='int',), + url=dict(type='str',), + use_standard_alb=dict(type='bool',), + uuid=dict(type='str',), + vcenter_clusters=dict(type='dict',), + vcenter_datastore_mode=dict(type='str',), + vcenter_datastores=dict(type='list',), + vcenter_datastores_include=dict(type='bool',), + vcenter_folder=dict(type='str',), + vcenter_hosts=dict(type='dict',), + vcpus_per_se=dict(type='int',), + vip_asg=dict(type='dict',), + vs_host_redundancy=dict(type='bool',), + vs_scalein_timeout=dict(type='int',), + vs_scalein_timeout_for_upgrade=dict(type='int',), + vs_scaleout_timeout=dict(type='int',), + vs_se_scaleout_additional_wait_time=dict(type='int',), + vs_se_scaleout_ready_timeout=dict(type='int',), + vs_switchover_timeout=dict(type='int',), + vss_placement=dict(type='dict',), + vss_placement_enabled=dict(type='bool',), + waf_learning_interval=dict(type='int',), + waf_learning_memory=dict(type='int',), + waf_mempool=dict(type='bool',), + waf_mempool_size=dict(type='int',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'serviceenginegroup', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_snmptrapprofile.py b/plugins/modules/network/avi/avi_snmptrapprofile.py new file mode 100644 index 0000000000..eb10fe16a1 --- /dev/null +++ b/plugins/modules/network/avi/avi_snmptrapprofile.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_snmptrapprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of SnmpTrapProfile Avi RESTful Object +description: + - This module is used to configure SnmpTrapProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + name: + description: + - A user-friendly name of the snmp trap configuration. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + trap_servers: + description: + - The ip address or hostname of the snmp trap destination server. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the snmp trap profile object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create SnmpTrapProfile object + avi_snmptrapprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_snmptrapprofile +""" + +RETURN = ''' +obj: + description: SnmpTrapProfile (api/snmptrapprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + trap_servers=dict(type='list',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'snmptrapprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_sslkeyandcertificate.py b/plugins/modules/network/avi/avi_sslkeyandcertificate.py new file mode 100644 index 0000000000..6a2dd12c22 --- /dev/null +++ b/plugins/modules/network/avi/avi_sslkeyandcertificate.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_sslkeyandcertificate +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object +description: + - This module is used to configure SSLKeyAndCertificate object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + ca_certs: + description: + - Ca certificates in certificate chain. + certificate: + description: + - Sslcertificate settings for sslkeyandcertificate. + required: true + certificate_base64: + description: + - States if the certificate is base64 encoded. + - Field introduced in 18.1.2, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + certificate_management_profile_ref: + description: + - It is a reference to an object of type certificatemanagementprofile. + created_by: + description: + - Creator name. + dynamic_params: + description: + - Dynamic parameters needed for certificate management profile. + enckey_base64: + description: + - Encrypted private key corresponding to the private key (e.g. + - Those generated by an hsm such as thales nshield). + enckey_name: + description: + - Name of the encrypted private key (e.g. + - Those generated by an hsm such as thales nshield). + format: + description: + - Format of the key/certificate file. + - Enum options - SSL_PEM, SSL_PKCS12. + - Field introduced in 18.1.2, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as SSL_PEM. + hardwaresecuritymodulegroup_ref: + description: + - It is a reference to an object of type hardwaresecuritymodulegroup. + key: + description: + - Private key. + key_base64: + description: + - States if the private key is base64 encoded. + - Field introduced in 18.1.2, 18.2.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + key_params: + description: + - Sslkeyparams settings for sslkeyandcertificate. + key_passphrase: + description: + - Passphrase used to encrypt the private key. + - Field introduced in 18.1.2, 18.2.1. + name: + description: + - Name of the object. + required: true + status: + description: + - Enum options - ssl_certificate_finished, ssl_certificate_pending. + - Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED. + tenant_ref: + description: + - It is a reference to an object of type tenant. + type: + description: + - Enum options - ssl_certificate_type_virtualservice, ssl_certificate_type_system, ssl_certificate_type_ca. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Create a SSL Key and Certificate + avi_sslkeyandcertificate: + controller: 10.10.27.90 + username: admin + password: AviNetworks123! + key: | + -----BEGIN PRIVATE KEY----- + .... + -----END PRIVATE KEY----- + certificate: + self_signed: true + certificate: | + -----BEGIN CERTIFICATE----- + .... + -----END CERTIFICATE----- + type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE + name: MyTestCert +""" + +RETURN = ''' +obj: + description: SSLKeyAndCertificate (api/sslkeyandcertificate) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + ca_certs=dict(type='list',), + certificate=dict(type='dict', required=True), + certificate_base64=dict(type='bool',), + certificate_management_profile_ref=dict(type='str',), + created_by=dict(type='str',), + dynamic_params=dict(type='list',), + enckey_base64=dict(type='str',), + enckey_name=dict(type='str',), + format=dict(type='str',), + hardwaresecuritymodulegroup_ref=dict(type='str',), + key=dict(type='str', no_log=True,), + key_base64=dict(type='bool',), + key_params=dict(type='dict',), + key_passphrase=dict(type='str', no_log=True,), + name=dict(type='str', required=True), + status=dict(type='str',), + tenant_ref=dict(type='str',), + type=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'sslkeyandcertificate', + set(['key_passphrase', 'key'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_sslprofile.py b/plugins/modules/network/avi/avi_sslprofile.py new file mode 100644 index 0000000000..574d2fa855 --- /dev/null +++ b/plugins/modules/network/avi/avi_sslprofile.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_sslprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of SSLProfile Avi RESTful Object +description: + - This module is used to configure SSLProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + accepted_ciphers: + description: + - Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html). + - Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4. + accepted_versions: + description: + - Set of versions accepted by the server. + cipher_enums: + description: + - Enum options - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_aes_128_gcm_sha256, + - tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha384, + - tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha384, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_gcm_sha384, + - tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_256_cbc_sha256, tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_cbc_sha, + - tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_256_cbc_sha, + - tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_rc4_128_sha. + description: + description: + - User defined description for the object. + dhparam: + description: + - Dh parameters used in ssl. + - At this time, it is not configurable and is set to 2048 bits. + enable_ssl_session_reuse: + description: + - Enable ssl session re-use. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + name: + description: + - Name of the object. + required: true + prefer_client_cipher_ordering: + description: + - Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + send_close_notify: + description: + - Send 'close notify' alert message for a clean shutdown of the ssl connection. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + ssl_rating: + description: + - Sslrating settings for sslprofile. + ssl_session_timeout: + description: + - The amount of time in seconds before an ssl session expires. + - Default value when not specified in API or module is interpreted by Avi Controller as 86400. + tags: + description: + - List of tag. + tenant_ref: + description: + - It is a reference to an object of type tenant. + type: + description: + - Ssl profile type. + - Enum options - SSL_PROFILE_TYPE_APPLICATION, SSL_PROFILE_TYPE_SYSTEM. + - Field introduced in 17.2.8. + - Default value when not specified in API or module is interpreted by Avi Controller as SSL_PROFILE_TYPE_APPLICATION. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create SSL profile with list of allowed ciphers + avi_sslprofile: + controller: '{{ controller }}' + username: '{{ username }}' + password: '{{ password }}' + accepted_ciphers: > + ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA: + ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384: + AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA: + AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384: + ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA + accepted_versions: + - type: SSL_VERSION_TLS1 + - type: SSL_VERSION_TLS1_1 + - type: SSL_VERSION_TLS1_2 + cipher_enums: + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA + - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA + - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 + - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 + - TLS_RSA_WITH_AES_128_GCM_SHA256 + - TLS_RSA_WITH_AES_256_GCM_SHA384 + - TLS_RSA_WITH_AES_128_CBC_SHA256 + - TLS_RSA_WITH_AES_256_CBC_SHA256 + - TLS_RSA_WITH_AES_128_CBC_SHA + - TLS_RSA_WITH_AES_256_CBC_SHA + - TLS_RSA_WITH_3DES_EDE_CBC_SHA + - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA + - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 + - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 + - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA + name: PFS-BOTH-RSA-EC + send_close_notify: true + ssl_rating: + compatibility_rating: SSL_SCORE_EXCELLENT + performance_rating: SSL_SCORE_EXCELLENT + security_score: '100.0' + tenant_ref: Demo +""" + +RETURN = ''' +obj: + description: SSLProfile (api/sslprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + accepted_ciphers=dict(type='str',), + accepted_versions=dict(type='list',), + cipher_enums=dict(type='list',), + description=dict(type='str',), + dhparam=dict(type='str',), + enable_ssl_session_reuse=dict(type='bool',), + name=dict(type='str', required=True), + prefer_client_cipher_ordering=dict(type='bool',), + send_close_notify=dict(type='bool',), + ssl_rating=dict(type='dict',), + ssl_session_timeout=dict(type='int',), + tags=dict(type='list',), + tenant_ref=dict(type='str',), + type=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'sslprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_stringgroup.py b/plugins/modules/network/avi/avi_stringgroup.py new file mode 100644 index 0000000000..bf494cf841 --- /dev/null +++ b/plugins/modules/network/avi/avi_stringgroup.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_stringgroup +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of StringGroup Avi RESTful Object +description: + - This module is used to configure StringGroup object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + description: + description: + - User defined description for the object. + kv: + description: + - Configure key value in the string group. + name: + description: + - Name of the string group. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + type: + description: + - Type of stringgroup. + - Enum options - SG_TYPE_STRING, SG_TYPE_KEYVAL. + - Default value when not specified in API or module is interpreted by Avi Controller as SG_TYPE_STRING. + required: true + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the string group. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create a string group configuration + avi_stringgroup: + controller: '{{ controller }}' + password: '{{ password }}' + username: '{{ username }}' + kv: + - key: text/html + - key: text/xml + - key: text/plain + - key: text/css + - key: text/javascript + - key: application/javascript + - key: application/x-javascript + - key: application/xml + - key: application/pdf + name: System-Compressible-Content-Types + tenant_ref: admin + type: SG_TYPE_STRING +""" + +RETURN = ''' +obj: + description: StringGroup (api/stringgroup) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + description=dict(type='str',), + kv=dict(type='list',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + type=dict(type='str', required=True), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'stringgroup', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_systemconfiguration.py b/plugins/modules/network/avi/avi_systemconfiguration.py new file mode 100644 index 0000000000..b338720fec --- /dev/null +++ b/plugins/modules/network/avi/avi_systemconfiguration.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_systemconfiguration +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of SystemConfiguration Avi RESTful Object +description: + - This module is used to configure SystemConfiguration object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + admin_auth_configuration: + description: + - Adminauthconfiguration settings for systemconfiguration. + default_license_tier: + description: + - Specifies the default license tier which would be used by new clouds. + - Enum options - ENTERPRISE_16, ENTERPRISE_18. + - Field introduced in 17.2.5. + - Default value when not specified in API or module is interpreted by Avi Controller as ENTERPRISE_18. + dns_configuration: + description: + - Dnsconfiguration settings for systemconfiguration. + dns_virtualservice_refs: + description: + - Dns virtualservices hosting fqdn records for applications across avi vantage. + - If no virtualservices are provided, avi vantage will provide dns services for configured applications. + - Switching back to avi vantage from dns virtualservices is not allowed. + - It is a reference to an object of type virtualservice. + docker_mode: + description: + - Boolean flag to set docker_mode. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + email_configuration: + description: + - Emailconfiguration settings for systemconfiguration. + global_tenant_config: + description: + - Tenantconfiguration settings for systemconfiguration. + linux_configuration: + description: + - Linuxconfiguration settings for systemconfiguration. + mgmt_ip_access_control: + description: + - Configure ip access control for controller to restrict open access. + ntp_configuration: + description: + - Ntpconfiguration settings for systemconfiguration. + portal_configuration: + description: + - Portalconfiguration settings for systemconfiguration. + proxy_configuration: + description: + - Proxyconfiguration settings for systemconfiguration. + secure_channel_configuration: + description: + - Configure secure channel properties. + - Field introduced in 18.1.4, 18.2.1. + snmp_configuration: + description: + - Snmpconfiguration settings for systemconfiguration. + ssh_ciphers: + description: + - Allowed ciphers list for ssh to the management interface on the controller and service engines. + - If this is not specified, all the default ciphers are allowed. + ssh_hmacs: + description: + - Allowed hmac list for ssh to the management interface on the controller and service engines. + - If this is not specified, all the default hmacs are allowed. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. + welcome_workflow_complete: + description: + - This flag is set once the initial controller setup workflow is complete. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create SystemConfiguration object + avi_systemconfiguration: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_systemconfiguration +""" + +RETURN = ''' +obj: + description: SystemConfiguration (api/systemconfiguration) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + admin_auth_configuration=dict(type='dict',), + default_license_tier=dict(type='str',), + dns_configuration=dict(type='dict',), + dns_virtualservice_refs=dict(type='list',), + docker_mode=dict(type='bool',), + email_configuration=dict(type='dict',), + global_tenant_config=dict(type='dict',), + linux_configuration=dict(type='dict',), + mgmt_ip_access_control=dict(type='dict',), + ntp_configuration=dict(type='dict',), + portal_configuration=dict(type='dict',), + proxy_configuration=dict(type='dict',), + secure_channel_configuration=dict(type='dict',), + snmp_configuration=dict(type='dict',), + ssh_ciphers=dict(type='list',), + ssh_hmacs=dict(type='list',), + url=dict(type='str',), + uuid=dict(type='str',), + welcome_workflow_complete=dict(type='bool',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'systemconfiguration', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_tenant.py b/plugins/modules/network/avi/avi_tenant.py new file mode 100644 index 0000000000..3828f09e35 --- /dev/null +++ b/plugins/modules/network/avi/avi_tenant.py @@ -0,0 +1,128 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_tenant +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Tenant Avi RESTful Object +description: + - This module is used to configure Tenant object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + config_settings: + description: + - Tenantconfiguration settings for tenant. + created_by: + description: + - Creator of this tenant. + description: + description: + - User defined description for the object. + local: + description: + - Boolean flag to set local. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + name: + description: + - Name of the object. + required: true + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ + - name: Create Tenant using Service Engines in provider mode + avi_tenant: + controller: '{{ controller }}' + password: '{{ password }}' + username: '{{ username }}' + config_settings: + se_in_provider_context: false + tenant_access_to_provider_se: true + tenant_vrf: false + description: VCenter, Open Stack, AWS Virtual services + local: true + name: Demo +""" + +RETURN = ''' +obj: + description: Tenant (api/tenant) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + config_settings=dict(type='dict',), + created_by=dict(type='str',), + description=dict(type='str',), + local=dict(type='bool',), + name=dict(type='str', required=True), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'tenant', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_trafficcloneprofile.py b/plugins/modules/network/avi/avi_trafficcloneprofile.py new file mode 100644 index 0000000000..10b24c24fc --- /dev/null +++ b/plugins/modules/network/avi/avi_trafficcloneprofile.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_trafficcloneprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of TrafficCloneProfile Avi RESTful Object +description: + - This module is used to configure TrafficCloneProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + clone_servers: + description: + - Field introduced in 17.1.1. + cloud_ref: + description: + - It is a reference to an object of type cloud. + - Field introduced in 17.1.1. + name: + description: + - Name for the traffic clone profile. + - Field introduced in 17.1.1. + required: true + preserve_client_ip: + description: + - Specifies if client ip needs to be preserved to clone destination. + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.1.1. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the traffic clone profile. + - Field introduced in 17.1.1. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create TrafficCloneProfile object + avi_trafficcloneprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_trafficcloneprofile +""" + +RETURN = ''' +obj: + description: TrafficCloneProfile (api/trafficcloneprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + clone_servers=dict(type='list',), + cloud_ref=dict(type='str',), + name=dict(type='str', required=True), + preserve_client_ip=dict(type='bool',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'trafficcloneprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_user.py b/plugins/modules/network/avi/avi_user.py new file mode 100644 index 0000000000..f87bb5345f --- /dev/null +++ b/plugins/modules/network/avi/avi_user.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +""" +# Created on Aug 2, 2018 +# +# @author: Shrikant Chaudhari (shrikant.chaudhari@avinetworks.com) GitHub ID: gitshrikant +# +# module_check: supported +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_user +author: Shrikant Chaudhari (@gitshrikant) +short_description: Avi User Module +description: + - This module can be used for creation, updation and deletion of a user. +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + type: str + name: + description: + - Full name of the user. + required: true + type: str + obj_username: + description: + - Name that the user will supply when signing into Avi Vantage, such as jdoe or jdoe@avinetworks.com. + required: true + type: str + obj_password: + description: + - You may either enter a case-sensitive password in this field for the new or existing user. + required: true + type: str + email: + description: + - Email address of the user. This field is used when a user loses their password and requests to have it reset. See Password Recovery. + type: str + access: + description: + - Access settings (write, read, or no access) for each type of resource within Vantage. + type: list + is_superuser: + description: + - If the user will need to have the same privileges as the admin account, set it to true. + type: bool + is_active: + description: + - Activates the current user account. + type: bool + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["post", "put", "patch"] + type: str + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + type: str + user_profile_ref: + description: + - Refer user profile. + - This can also be full URI same as it comes in response payload + type: str + default_tenant_ref: + description: + - Default tenant reference. + - This can also be full URI same as it comes in response payload + default: /api/tenant?name=admin + type: str + + +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = ''' + - name: user creation + avi_user: + controller: "" + username: "" + password: "" + api_version: "" + name: "testuser" + obj_username: "testuser" + obj_password: "test123" + email: "test@abc.test" + access: + - role_ref: "/api/role?name=Tenant-Admin" + tenant_ref: "/api/tenant/admin#admin" + user_profile_ref: "/api/useraccountprofile?name=Default-User-Account-Profile" + is_active: true + is_superuser: true + default_tenant_ref: "/api/tenant?name=admin" + + - name: user creation + avi_user: + controller: "" + username: "" + password: "" + api_version: "" + name: "testuser" + obj_username: "testuser2" + obj_password: "password" + email: "testuser2@abc.test" + access: + - role_ref: "https://192.0.2.10/api/role?name=Tenant-Admin" + tenant_ref: "https://192.0.2.10/api/tenant/admin#admin" + user_profile_ref: "https://192.0.2.10/api/useraccountprofile?name=Default-User-Account-Profile" + is_active: true + is_superuser: true + default_tenant_ref: "https://192.0.2.10/api/tenant?name=admin" +''' + +RETURN = ''' +obj: + description: Avi REST resource + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule + +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, ansible_return, HAS_AVI) + from ansible_collections.community.general.plugins.module_utils.network.avi.ansible_utils import ( + avi_ansible_api) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + name=dict(type='str', required=True), + obj_username=dict(type='str', required=True), + obj_password=dict(type='str', required=True, no_log=True), + access=dict(type='list',), + email=dict(type='str',), + is_superuser=dict(type='bool',), + is_active=dict(type='bool',), + avi_api_update_method=dict(default='put', + choices=['post', 'put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + user_profile_ref=dict(type='str',), + default_tenant_ref=dict(type='str', default='/api/tenant?name=admin'), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule(argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'user', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_useraccount.py b/plugins/modules/network/avi/avi_useraccount.py new file mode 100644 index 0000000000..98cfc68c38 --- /dev/null +++ b/plugins/modules/network/avi/avi_useraccount.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +""" +# Created on Aug 12, 2016 +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) GitHub ID: grastogi23 +# +# module_check: not supported +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +""" + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_useraccount +author: Chaitanya Deshpande (@chaitanyaavi) +short_description: Avi UserAccount Module +description: + - This module can be used for updating the password of a user. + - This module is useful for setting up admin password for Controller bootstrap. +requirements: [ avisdk ] +options: + old_password: + description: + - Old password for update password or default password for bootstrap. + force_change: + description: + - If specifically set to true then old password is tried first for controller and then the new password is + tried. If not specified this flag then the new password is tried first. + +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = ''' + - name: Update user password + avi_useraccount: + controller: "" + username: "" + password: new_password + old_password: "" + api_version: "" + force_change: false + + - name: Update user password using avi_credentials + avi_useraccount: + avi_credentials: "" + old_password: "" + force_change: false +''' + +RETURN = ''' +obj: + description: Avi REST resource + returned: success, changed + type: dict +''' + +import json +import time +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy + +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, ansible_return, avi_obj_cmp, + cleanup_absent_fields, HAS_AVI) + from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import ( + ApiSession, AviCredentials) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + old_password=dict(type='str', required=True, no_log=True), + # Flag to specify priority of old/new password while establishing session with controller. + # To handle both Saas and conventional (Entire state in playbook) scenario. + force_change=dict(type='bool', default=False) + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule(argument_spec=argument_specs) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + api_creds = AviCredentials() + api_creds.update_from_ansible_module(module) + old_password = module.params.get('old_password') + force_change = module.params.get('force_change', False) + data = { + 'old_password': old_password, + 'password': api_creds.password + } + # First try old password if 'force_change' is set to true + if force_change: + first_pwd = old_password + second_pwd = api_creds.password + # First try new password if 'force_change' is set to false or not specified in playbook. + else: + first_pwd = api_creds.password + second_pwd = old_password + password_changed = False + try: + api = ApiSession.get_session( + api_creds.controller, api_creds.username, + password=first_pwd, timeout=api_creds.timeout, + tenant=api_creds.tenant, tenant_uuid=api_creds.tenant_uuid, + token=api_creds.token, port=api_creds.port) + if force_change: + rsp = api.put('useraccount', data=data) + if rsp: + password_changed = True + except Exception: + pass + if not password_changed: + api = ApiSession.get_session( + api_creds.controller, api_creds.username, password=second_pwd, + timeout=api_creds.timeout, tenant=api_creds.tenant, + tenant_uuid=api_creds.tenant_uuid, token=api_creds.token, + port=api_creds.port) + if not force_change: + rsp = api.put('useraccount', data=data) + if rsp: + password_changed = True + if password_changed: + return ansible_return(module, rsp, True, req=data) + else: + return ansible_return(module, rsp, False, req=data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_useraccountprofile.py b/plugins/modules/network/avi/avi_useraccountprofile.py new file mode 100644 index 0000000000..6786777945 --- /dev/null +++ b/plugins/modules/network/avi/avi_useraccountprofile.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_useraccountprofile +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of UserAccountProfile Avi RESTful Object +description: + - This module is used to configure UserAccountProfile object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + account_lock_timeout: + description: + - Lock timeout period (in minutes). + - Default is 30 minutes. + - Default value when not specified in API or module is interpreted by Avi Controller as 30. + credentials_timeout_threshold: + description: + - The time period after which credentials expire. + - Default is 180 days. + - Default value when not specified in API or module is interpreted by Avi Controller as 180. + max_concurrent_sessions: + description: + - Maximum number of concurrent sessions allowed. + - There are unlimited sessions by default. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + max_login_failure_count: + description: + - Number of login attempts before lockout. + - Default is 3 attempts. + - Default value when not specified in API or module is interpreted by Avi Controller as 3. + max_password_history_count: + description: + - Maximum number of passwords to be maintained in the password history. + - Default is 4 passwords. + - Default value when not specified in API or module is interpreted by Avi Controller as 4. + name: + description: + - Name of the object. + required: true + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create UserAccountProfile object + avi_useraccountprofile: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_useraccountprofile +""" + +RETURN = ''' +obj: + description: UserAccountProfile (api/useraccountprofile) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + account_lock_timeout=dict(type='int',), + credentials_timeout_threshold=dict(type='int',), + max_concurrent_sessions=dict(type='int',), + max_login_failure_count=dict(type='int',), + max_password_history_count=dict(type='int',), + name=dict(type='str', required=True), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'useraccountprofile', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_virtualservice.py b/plugins/modules/network/avi/avi_virtualservice.py new file mode 100644 index 0000000000..026855920c --- /dev/null +++ b/plugins/modules/network/avi/avi_virtualservice.py @@ -0,0 +1,653 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_virtualservice +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of VirtualService Avi RESTful Object +description: + - This module is used to configure VirtualService object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + active_standby_se_tag: + description: + - This configuration only applies if the virtualservice is in legacy active standby ha mode and load distribution among active standby is enabled. + - This field is used to tag the virtualservice so that virtualservices with the same tag will share the same active serviceengine. + - Virtualservices with different tags will have different active serviceengines. + - If one of the serviceengine's in the serviceenginegroup fails, all virtualservices will end up using the same active serviceengine. + - Redistribution of the virtualservices can be either manual or automated when the failed serviceengine recovers. + - Redistribution is based on the auto redistribute property of the serviceenginegroup. + - Enum options - ACTIVE_STANDBY_SE_1, ACTIVE_STANDBY_SE_2. + - Default value when not specified in API or module is interpreted by Avi Controller as ACTIVE_STANDBY_SE_1. + allow_invalid_client_cert: + description: + - Process request even if invalid client certificate is presented. + - Datascript apis need to be used for processing of such requests. + - Field introduced in 18.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + analytics_policy: + description: + - Determines analytics settings for the application. + analytics_profile_ref: + description: + - Specifies settings related to analytics. + - It is a reference to an object of type analyticsprofile. + apic_contract_graph: + description: + - The name of the contract/graph associated with the virtual service. + - Should be in the format. + - This is applicable only for service integration mode with cisco apic controller. + - Field introduced in 17.2.12,18.1.2. + application_profile_ref: + description: + - Enable application layer specific features for the virtual service. + - It is a reference to an object of type applicationprofile. + auto_allocate_floating_ip: + description: + - Auto-allocate floating/elastic ip from the cloud infrastructure. + - Field deprecated in 17.1.1. + type: bool + auto_allocate_ip: + description: + - Auto-allocate vip from the provided subnet. + - Field deprecated in 17.1.1. + type: bool + availability_zone: + description: + - Availability-zone to place the virtual service. + - Field deprecated in 17.1.1. + avi_allocated_fip: + description: + - (internal-use) fip allocated by avi in the cloud infrastructure. + - Field deprecated in 17.1.1. + type: bool + avi_allocated_vip: + description: + - (internal-use) vip allocated by avi in the cloud infrastructure. + - Field deprecated in 17.1.1. + type: bool + azure_availability_set: + description: + - (internal-use)applicable for azure only. + - Azure availability set to which this vs is associated. + - Internally set by the cloud connector. + - Field introduced in 17.2.12, 18.1.2. + bulk_sync_kvcache: + description: + - (this is a beta feature). + - Sync key-value cache to the new ses when vs is scaled out. + - For ex ssl sessions are stored using vs's key-value cache. + - When the vs is scaled out, the ssl session information is synced to the new se, allowing existing ssl sessions to be reused on the new se. + - Field introduced in 17.2.7, 18.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + client_auth: + description: + - Http authentication configuration for protected resources. + close_client_conn_on_config_update: + description: + - Close client connection on vs config update. + - Field introduced in 17.2.4. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + cloud_config_cksum: + description: + - Checksum of cloud configuration for vs. + - Internally set by cloud connector. + cloud_ref: + description: + - It is a reference to an object of type cloud. + cloud_type: + description: + - Enum options - cloud_none, cloud_vcenter, cloud_openstack, cloud_aws, cloud_vca, cloud_apic, cloud_mesos, cloud_linuxserver, cloud_docker_ucp, + - cloud_rancher, cloud_oshift_k8s, cloud_azure, cloud_gcp. + - Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE. + connections_rate_limit: + description: + - Rate limit the incoming connections to this virtual service. + content_rewrite: + description: + - Profile used to match and rewrite strings in request and/or response body. + created_by: + description: + - Creator name. + delay_fairness: + description: + - Select the algorithm for qos fairness. + - This determines how multiple virtual services sharing the same service engines will prioritize traffic over a congested network. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + description: + description: + - User defined description for the object. + discovered_network_ref: + description: + - (internal-use) discovered networks providing reachability for client facing virtual service ip. + - This field is deprecated. + - It is a reference to an object of type network. + - Field deprecated in 17.1.1. + discovered_networks: + description: + - (internal-use) discovered networks providing reachability for client facing virtual service ip. + - This field is used internally by avi, not editable by the user. + - Field deprecated in 17.1.1. + discovered_subnet: + description: + - (internal-use) discovered subnets providing reachability for client facing virtual service ip. + - This field is deprecated. + - Field deprecated in 17.1.1. + dns_info: + description: + - Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record. + - Note that only one of fqdn and dns_info setting is allowed. + dns_policies: + description: + - Dns policies applied on the dns traffic of the virtual service. + - Field introduced in 17.1.1. + east_west_placement: + description: + - Force placement on all se's in service group (mesos mode only). + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + enable_autogw: + description: + - Response traffic to clients will be sent back to the source mac address of the connection, rather than statically sent to a default gateway. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + enable_rhi: + description: + - Enable route health injection using the bgp config in the vrf context. + type: bool + enable_rhi_snat: + description: + - Enable route health injection for source nat'ted floating ip address using the bgp config in the vrf context. + type: bool + enabled: + description: + - Enable or disable the virtual service. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + error_page_profile_ref: + description: + - Error page profile to be used for this virtualservice.this profile is used to send the custom error page to the client generated by the proxy. + - It is a reference to an object of type errorpageprofile. + - Field introduced in 17.2.4. + floating_ip: + description: + - Floating ip to associate with this virtual service. + - Field deprecated in 17.1.1. + floating_subnet_uuid: + description: + - If auto_allocate_floating_ip is true and more than one floating-ip subnets exist, then the subnet for the floating ip address allocation. + - This field is applicable only if the virtualservice belongs to an openstack or aws cloud. + - In openstack or aws cloud it is required when auto_allocate_floating_ip is selected. + - Field deprecated in 17.1.1. + flow_dist: + description: + - Criteria for flow distribution among ses. + - Enum options - LOAD_AWARE, CONSISTENT_HASH_SOURCE_IP_ADDRESS, CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT. + - Default value when not specified in API or module is interpreted by Avi Controller as LOAD_AWARE. + flow_label_type: + description: + - Criteria for flow labelling. + - Enum options - NO_LABEL, APPLICATION_LABEL, SERVICE_LABEL. + - Default value when not specified in API or module is interpreted by Avi Controller as NO_LABEL. + fqdn: + description: + - Dns resolvable, fully qualified domain name of the virtualservice. + - Only one of 'fqdn' and 'dns_info' configuration is allowed. + host_name_xlate: + description: + - Translate the host name sent to the servers to this value. + - Translate the host name sent from servers back to the value used by the client. + http_policies: + description: + - Http policies applied on the data traffic of the virtual service. + ign_pool_net_reach: + description: + - Ignore pool servers network reachability constraints for virtual service placement. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + ip_address: + description: + - Ip address of the virtual service. + - Field deprecated in 17.1.1. + ipam_network_subnet: + description: + - Subnet and/or network for allocating virtualservice ip by ipam provider module. + - Field deprecated in 17.1.1. + l4_policies: + description: + - L4 policies applied to the data traffic of the virtual service. + - Field introduced in 17.2.7. + limit_doser: + description: + - Limit potential dos attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + max_cps_per_client: + description: + - Maximum connections per second per client ip. + - Allowed values are 10-1000. + - Special values are 0- 'unlimited'. + - Default value when not specified in API or module is interpreted by Avi Controller as 0. + microservice_ref: + description: + - Microservice representing the virtual service. + - It is a reference to an object of type microservice. + min_pools_up: + description: + - Minimum number of up pools to mark vs up. + - Field introduced in 18.2.1, 17.2.12. + name: + description: + - Name for the virtual service. + required: true + network_profile_ref: + description: + - Determines network settings such as protocol, tcp or udp, and related options for the protocol. + - It is a reference to an object of type networkprofile. + network_ref: + description: + - Manually override the network on which the virtual service is placed. + - It is a reference to an object of type network. + - Field deprecated in 17.1.1. + network_security_policy_ref: + description: + - Network security policies for the virtual service. + - It is a reference to an object of type networksecuritypolicy. + nsx_securitygroup: + description: + - A list of nsx service groups representing the clients which can access the virtual ip of the virtual service. + - Field introduced in 17.1.1. + performance_limits: + description: + - Optional settings that determine performance limits like max connections or bandwidth etc. + pool_group_ref: + description: + - The pool group is an object that contains pools. + - It is a reference to an object of type poolgroup. + pool_ref: + description: + - The pool is an object that contains destination servers and related attributes such as load-balancing and persistence. + - It is a reference to an object of type pool. + port_uuid: + description: + - (internal-use) network port assigned to the virtual service ip address. + - Field deprecated in 17.1.1. + remove_listening_port_on_vs_down: + description: + - Remove listening port if virtualservice is down. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + requests_rate_limit: + description: + - Rate limit the incoming requests to this virtual service. + saml_sp_config: + description: + - Application-specific saml config. + - Field introduced in 18.2.3. + scaleout_ecmp: + description: + - Disable re-distribution of flows across service engines for a virtual service. + - Enable if the network itself performs flow hashing with ecmp in environments such as gcp. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + se_group_ref: + description: + - The service engine group to use for this virtual service. + - Moving to a new se group is disruptive to existing connections for this vs. + - It is a reference to an object of type serviceenginegroup. + security_policy_ref: + description: + - Security policy applied on the traffic of the virtual service. + - This policy is used to perform security actions such as distributed denial of service (ddos) attack mitigation, etc. + - It is a reference to an object of type securitypolicy. + - Field introduced in 18.2.1. + server_network_profile_ref: + description: + - Determines the network settings profile for the server side of tcp proxied connections. + - Leave blank to use the same settings as the client to vs side of the connection. + - It is a reference to an object of type networkprofile. + service_metadata: + description: + - Metadata pertaining to the service provided by this virtual service. + - In openshift/kubernetes environments, egress pod info is stored. + - Any user input to this field will be overwritten by avi vantage. + service_pool_select: + description: + - Select pool based on destination port. + services: + description: + - List of services defined for this virtual service. + sideband_profile: + description: + - Sideband configuration to be used for this virtualservice.it can be used for sending traffic to sideband vips for external inspection etc. + snat_ip: + description: + - Nat'ted floating source ip address(es) for upstream connection to servers. + sp_pool_refs: + description: + - Gslb pools used to manage site-persistence functionality. + - Each site-persistence pool contains the virtualservices in all the other sites, that is auto-generated by the gslb manager. + - This is a read-only field for the user. + - It is a reference to an object of type pool. + - Field introduced in 17.2.2. + ssl_key_and_certificate_refs: + description: + - Select or create one or two certificates, ec and/or rsa, that will be presented to ssl/tls terminated connections. + - It is a reference to an object of type sslkeyandcertificate. + ssl_profile_ref: + description: + - Determines the set of ssl versions and ciphers to accept for ssl/tls terminated connections. + - It is a reference to an object of type sslprofile. + ssl_profile_selectors: + description: + - Select ssl profile based on client ip address match. + - Field introduced in 18.2.3. + ssl_sess_cache_avg_size: + description: + - Expected number of ssl session cache entries (may be exceeded). + - Allowed values are 1024-16383. + - Default value when not specified in API or module is interpreted by Avi Controller as 1024. + sso_policy: + description: + - Client authentication and authorization policy for the virtualservice. + - Field deprecated in 18.2.3. + - Field introduced in 18.2.1. + sso_policy_ref: + description: + - The sso policy attached to the virtualservice. + - It is a reference to an object of type ssopolicy. + - Field introduced in 18.2.3. + static_dns_records: + description: + - List of static dns records applied to this virtual service. + - These are static entries and no health monitoring is performed against the ip addresses. + subnet: + description: + - Subnet providing reachability for client facing virtual service ip. + - Field deprecated in 17.1.1. + subnet_uuid: + description: + - It represents subnet for the virtual service ip address allocation when auto_allocate_ip is true.it is only applicable in openstack or aws cloud. + - This field is required if auto_allocate_ip is true. + - Field deprecated in 17.1.1. + tenant_ref: + description: + - It is a reference to an object of type tenant. + topology_policies: + description: + - Topology policies applied on the dns traffic of the virtual service based ongslb topology algorithm. + - Field introduced in 18.2.3. + traffic_clone_profile_ref: + description: + - Server network or list of servers for cloning traffic. + - It is a reference to an object of type trafficcloneprofile. + - Field introduced in 17.1.1. + traffic_enabled: + description: + - Knob to enable the virtual service traffic on its assigned service engines. + - This setting is effective only when the enabled flag is set to true. + - Field introduced in 17.2.8. + - Default value when not specified in API or module is interpreted by Avi Controller as True. + type: bool + type: + description: + - Specify if this is a normal virtual service, or if it is the parent or child of an sni-enabled virtual hosted virtual service. + - Enum options - VS_TYPE_NORMAL, VS_TYPE_VH_PARENT, VS_TYPE_VH_CHILD. + - Default value when not specified in API or module is interpreted by Avi Controller as VS_TYPE_NORMAL. + url: + description: + - Avi controller URL of the object. + use_bridge_ip_as_vip: + description: + - Use bridge ip as vip on each host in mesos deployments. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + use_vip_as_snat: + description: + - Use the virtual ip as the snat ip for health monitoring and sending traffic to the backend servers instead of the service engine interface ip. + - The caveat of enabling this option is that the virtualservice cannot be configued in an active-active ha mode. + - Dns based multi vip solution has to be used for ha & non-disruptive upgrade purposes. + - Field introduced in 17.1.9,17.2.3. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + uuid: + description: + - Uuid of the virtualservice. + vh_domain_name: + description: + - The exact name requested from the client's sni-enabled tls hello domain name field. + - If this is a match, the parent vs will forward the connection to this child vs. + vh_parent_vs_uuid: + description: + - Specifies the virtual service acting as virtual hosting (sni) parent. + vip: + description: + - List of virtual service ips. + - While creating a 'shared vs',please use vsvip_ref to point to the shared entities. + - Field introduced in 17.1.1. + vrf_context_ref: + description: + - Virtual routing context that the virtual service is bound to. + - This is used to provide the isolation of the set of networks the application is attached to. + - It is a reference to an object of type vrfcontext. + vs_datascripts: + description: + - Datascripts applied on the data traffic of the virtual service. + vsvip_cloud_config_cksum: + description: + - Checksum of cloud configuration for vsvip. + - Internally set by cloud connector. + - Field introduced in 17.2.9, 18.1.2. + vsvip_ref: + description: + - Mostly used during the creation of shared vs, this field refers to entities that can be shared across virtual services. + - It is a reference to an object of type vsvip. + - Field introduced in 17.1.1. + waf_policy_ref: + description: + - Waf policy for the virtual service. + - It is a reference to an object of type wafpolicy. + - Field introduced in 17.2.1. + weight: + description: + - The quality of service weight to assign to traffic transmitted from this virtual service. + - A higher weight will prioritize traffic versus other virtual services sharing the same service engines. + - Allowed values are 1-128. + - Default value when not specified in API or module is interpreted by Avi Controller as 1. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Create SSL Virtual Service using Pool testpool2 + avi_virtualservice: + controller: 10.10.27.90 + username: admin + password: AviNetworks123! + name: newtestvs + state: present + performance_limits: + max_concurrent_connections: 1000 + services: + - port: 443 + enable_ssl: true + - port: 80 + ssl_profile_ref: '/api/sslprofile?name=System-Standard' + application_profile_ref: '/api/applicationprofile?name=System-Secure-HTTP' + ssl_key_and_certificate_refs: + - '/api/sslkeyandcertificate?name=System-Default-Cert' + ip_address: + addr: 10.90.131.103 + type: V4 + pool_ref: '/api/pool?name=testpool2' +""" + +RETURN = ''' +obj: + description: VirtualService (api/virtualservice) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + active_standby_se_tag=dict(type='str',), + allow_invalid_client_cert=dict(type='bool',), + analytics_policy=dict(type='dict',), + analytics_profile_ref=dict(type='str',), + apic_contract_graph=dict(type='str',), + application_profile_ref=dict(type='str',), + auto_allocate_floating_ip=dict(type='bool',), + auto_allocate_ip=dict(type='bool',), + availability_zone=dict(type='str',), + avi_allocated_fip=dict(type='bool',), + avi_allocated_vip=dict(type='bool',), + azure_availability_set=dict(type='str',), + bulk_sync_kvcache=dict(type='bool',), + client_auth=dict(type='dict',), + close_client_conn_on_config_update=dict(type='bool',), + cloud_config_cksum=dict(type='str',), + cloud_ref=dict(type='str',), + cloud_type=dict(type='str',), + connections_rate_limit=dict(type='dict',), + content_rewrite=dict(type='dict',), + created_by=dict(type='str',), + delay_fairness=dict(type='bool',), + description=dict(type='str',), + discovered_network_ref=dict(type='list',), + discovered_networks=dict(type='list',), + discovered_subnet=dict(type='list',), + dns_info=dict(type='list',), + dns_policies=dict(type='list',), + east_west_placement=dict(type='bool',), + enable_autogw=dict(type='bool',), + enable_rhi=dict(type='bool',), + enable_rhi_snat=dict(type='bool',), + enabled=dict(type='bool',), + error_page_profile_ref=dict(type='str',), + floating_ip=dict(type='dict',), + floating_subnet_uuid=dict(type='str',), + flow_dist=dict(type='str',), + flow_label_type=dict(type='str',), + fqdn=dict(type='str',), + host_name_xlate=dict(type='str',), + http_policies=dict(type='list',), + ign_pool_net_reach=dict(type='bool',), + ip_address=dict(type='dict',), + ipam_network_subnet=dict(type='dict',), + l4_policies=dict(type='list',), + limit_doser=dict(type='bool',), + max_cps_per_client=dict(type='int',), + microservice_ref=dict(type='str',), + min_pools_up=dict(type='int',), + name=dict(type='str', required=True), + network_profile_ref=dict(type='str',), + network_ref=dict(type='str',), + network_security_policy_ref=dict(type='str',), + nsx_securitygroup=dict(type='list',), + performance_limits=dict(type='dict',), + pool_group_ref=dict(type='str',), + pool_ref=dict(type='str',), + port_uuid=dict(type='str',), + remove_listening_port_on_vs_down=dict(type='bool',), + requests_rate_limit=dict(type='dict',), + saml_sp_config=dict(type='dict',), + scaleout_ecmp=dict(type='bool',), + se_group_ref=dict(type='str',), + security_policy_ref=dict(type='str',), + server_network_profile_ref=dict(type='str',), + service_metadata=dict(type='str',), + service_pool_select=dict(type='list',), + services=dict(type='list',), + sideband_profile=dict(type='dict',), + snat_ip=dict(type='list',), + sp_pool_refs=dict(type='list',), + ssl_key_and_certificate_refs=dict(type='list',), + ssl_profile_ref=dict(type='str',), + ssl_profile_selectors=dict(type='list',), + ssl_sess_cache_avg_size=dict(type='int',), + sso_policy=dict(type='dict',), + sso_policy_ref=dict(type='str',), + static_dns_records=dict(type='list',), + subnet=dict(type='dict',), + subnet_uuid=dict(type='str',), + tenant_ref=dict(type='str',), + topology_policies=dict(type='list',), + traffic_clone_profile_ref=dict(type='str',), + traffic_enabled=dict(type='bool',), + type=dict(type='str',), + url=dict(type='str',), + use_bridge_ip_as_vip=dict(type='bool',), + use_vip_as_snat=dict(type='bool',), + uuid=dict(type='str',), + vh_domain_name=dict(type='list',), + vh_parent_vs_uuid=dict(type='str',), + vip=dict(type='list',), + vrf_context_ref=dict(type='str',), + vs_datascripts=dict(type='list',), + vsvip_cloud_config_cksum=dict(type='str',), + vsvip_ref=dict(type='str',), + waf_policy_ref=dict(type='str',), + weight=dict(type='int',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'virtualservice', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_vrfcontext.py b/plugins/modules/network/avi/avi_vrfcontext.py new file mode 100644 index 0000000000..2f57f0399c --- /dev/null +++ b/plugins/modules/network/avi/avi_vrfcontext.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.2 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_vrfcontext +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of VrfContext Avi RESTful Object +description: + - This module is used to configure VrfContext object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + bgp_profile: + description: + - Bgp local and peer info. + cloud_ref: + description: + - It is a reference to an object of type cloud. + debugvrfcontext: + description: + - Configure debug flags for vrf. + - Field introduced in 17.1.1. + description: + description: + - User defined description for the object. + gateway_mon: + description: + - Configure ping based heartbeat check for gateway in service engines of vrf. + internal_gateway_monitor: + description: + - Configure ping based heartbeat check for all default gateways in service engines of vrf. + - Field introduced in 17.1.1. + name: + description: + - Name of the object. + required: true + static_routes: + description: + - List of staticroute. + system_default: + description: + - Boolean flag to set system_default. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Unique object identifier of the object. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create VrfContext object + avi_vrfcontext: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_vrfcontext +""" + +RETURN = ''' +obj: + description: VrfContext (api/vrfcontext) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + bgp_profile=dict(type='dict',), + cloud_ref=dict(type='str',), + debugvrfcontext=dict(type='dict',), + description=dict(type='str',), + gateway_mon=dict(type='list',), + internal_gateway_monitor=dict(type='dict',), + name=dict(type='str', required=True), + static_routes=dict(type='list',), + system_default=dict(type='bool',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'vrfcontext', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_vsdatascriptset.py b/plugins/modules/network/avi/avi_vsdatascriptset.py new file mode 100644 index 0000000000..07115c2c11 --- /dev/null +++ b/plugins/modules/network/avi/avi_vsdatascriptset.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.1 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_vsdatascriptset +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of VSDataScriptSet Avi RESTful Object +description: + - This module is used to configure VSDataScriptSet object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + created_by: + description: + - Creator name. + - Field introduced in 17.1.11,17.2.4. + datascript: + description: + - Datascripts to execute. + description: + description: + - User defined description for the object. + ipgroup_refs: + description: + - Uuid of ip groups that could be referred by vsdatascriptset objects. + - It is a reference to an object of type ipaddrgroup. + name: + description: + - Name for the virtual service datascript collection. + required: true + pool_group_refs: + description: + - Uuid of pool groups that could be referred by vsdatascriptset objects. + - It is a reference to an object of type poolgroup. + pool_refs: + description: + - Uuid of pools that could be referred by vsdatascriptset objects. + - It is a reference to an object of type pool. + protocol_parser_refs: + description: + - List of protocol parsers that could be referred by vsdatascriptset objects. + - It is a reference to an object of type protocolparser. + - Field introduced in 18.2.3. + string_group_refs: + description: + - Uuid of string groups that could be referred by vsdatascriptset objects. + - It is a reference to an object of type stringgroup. + tenant_ref: + description: + - It is a reference to an object of type tenant. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the virtual service datascript collection. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create VSDataScriptSet object + avi_vsdatascriptset: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_vsdatascriptset +""" + +RETURN = ''' +obj: + description: VSDataScriptSet (api/vsdatascriptset) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + created_by=dict(type='str',), + datascript=dict(type='list',), + description=dict(type='str',), + ipgroup_refs=dict(type='list',), + name=dict(type='str', required=True), + pool_group_refs=dict(type='list',), + pool_refs=dict(type='list',), + protocol_parser_refs=dict(type='list',), + string_group_refs=dict(type='list',), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'vsdatascriptset', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_vsvip.py b/plugins/modules/network/avi/avi_vsvip.py new file mode 100644 index 0000000000..fc54b3f11b --- /dev/null +++ b/plugins/modules/network/avi/avi_vsvip.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# Avi Version: 17.1.2 +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_vsvip +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of VsVip Avi RESTful Object +description: + - This module is used to configure VsVip object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + cloud_ref: + description: + - It is a reference to an object of type cloud. + - Field introduced in 17.1.1. + dns_info: + description: + - Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record. + - Field introduced in 17.1.1. + east_west_placement: + description: + - Force placement on all service engines in the service engine group (container clouds only). + - Field introduced in 17.1.1. + - Default value when not specified in API or module is interpreted by Avi Controller as False. + type: bool + name: + description: + - Name for the vsvip object. + - Field introduced in 17.1.1. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.1.1. + url: + description: + - Avi controller URL of the object. + use_standard_alb: + description: + - This overrides the cloud level default and needs to match the se group value in which it will be used if the se group use_standard_alb value is + - set. + - This is only used when fip is used for vs on azure cloud. + - Field introduced in 18.2.3. + type: bool + uuid: + description: + - Uuid of the vsvip object. + - Field introduced in 17.1.1. + vip: + description: + - List of virtual service ips and other shareable entities. + - Field introduced in 17.1.1. + vrf_context_ref: + description: + - Virtual routing context that the virtual service is bound to. + - This is used to provide the isolation of the set of networks the application is attached to. + - It is a reference to an object of type vrfcontext. + - Field introduced in 17.1.1. + vsvip_cloud_config_cksum: + description: + - Checksum of cloud configuration for vsvip. + - Internally set by cloud connector. + - Field introduced in 17.2.9, 18.1.2. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create VsVip object + avi_vsvip: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_vsvip +""" + +RETURN = ''' +obj: + description: VsVip (api/vsvip) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + cloud_ref=dict(type='str',), + dns_info=dict(type='list',), + east_west_placement=dict(type='bool',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + use_standard_alb=dict(type='bool',), + uuid=dict(type='str',), + vip=dict(type='list',), + vrf_context_ref=dict(type='str',), + vsvip_cloud_config_cksum=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'vsvip', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/avi/avi_webhook.py b/plugins/modules/network/avi/avi_webhook.py new file mode 100644 index 0000000000..e42b8a5cbf --- /dev/null +++ b/plugins/modules/network/avi/avi_webhook.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# +# @author: Gaurav Rastogi (grastogi@avinetworks.com) +# Eric Anderson (eanderson@avinetworks.com) +# module_check: supported +# +# Copyright: (c) 2017 Gaurav Rastogi, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: avi_webhook +author: Gaurav Rastogi (@grastogi23) + +short_description: Module for setup of Webhook Avi RESTful Object +description: + - This module is used to configure Webhook object + - more examples at U(https://github.com/avinetworks/devops) +requirements: [ avisdk ] +options: + state: + description: + - The state that should be applied on the entity. + default: present + choices: ["absent", "present"] + avi_api_update_method: + description: + - Default method for object update is HTTP PUT. + - Setting to patch will override that behavior to use HTTP PATCH. + default: put + choices: ["put", "patch"] + avi_api_patch_op: + description: + - Patch operation to use when using avi_api_update_method as patch. + choices: ["add", "replace", "delete"] + callback_url: + description: + - Callback url for the webhook. + - Field introduced in 17.1.1. + description: + description: + - Field introduced in 17.1.1. + name: + description: + - The name of the webhook profile. + - Field introduced in 17.1.1. + required: true + tenant_ref: + description: + - It is a reference to an object of type tenant. + - Field introduced in 17.1.1. + url: + description: + - Avi controller URL of the object. + uuid: + description: + - Uuid of the webhook profile. + - Field introduced in 17.1.1. + verification_token: + description: + - Verification token sent back with the callback asquery parameters. + - Field introduced in 17.1.1. +extends_documentation_fragment: +- community.general.avi + +''' + +EXAMPLES = """ +- name: Example to create Webhook object + avi_webhook: + controller: 10.10.25.42 + username: admin + password: something + state: present + name: sample_webhook +""" + +RETURN = ''' +obj: + description: Webhook (api/webhook) object + returned: success, changed + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible_collections.community.general.plugins.module_utils.network.avi.avi import ( + avi_common_argument_spec, avi_ansible_api, HAS_AVI) +except ImportError: + HAS_AVI = False + + +def main(): + argument_specs = dict( + state=dict(default='present', + choices=['absent', 'present']), + avi_api_update_method=dict(default='put', + choices=['put', 'patch']), + avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), + callback_url=dict(type='str',), + description=dict(type='str',), + name=dict(type='str', required=True), + tenant_ref=dict(type='str',), + url=dict(type='str',), + uuid=dict(type='str',), + verification_token=dict(type='str',), + ) + argument_specs.update(avi_common_argument_spec()) + module = AnsibleModule( + argument_spec=argument_specs, supports_check_mode=True) + if not HAS_AVI: + return module.fail_json(msg=( + 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' + 'For more details visit https://github.com/avinetworks/sdk.')) + return avi_ansible_api(module, 'webhook', + set([])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/bigswitch/bcf_switch.py b/plugins/modules/network/bigswitch/bcf_switch.py new file mode 100644 index 0000000000..dedabc8cce --- /dev/null +++ b/plugins/modules/network/bigswitch/bcf_switch.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ted Elhourani +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: bcf_switch +author: "Ted (@tedelhourani)" +short_description: Create and remove a bcf switch. +description: + - Create and remove a Big Cloud Fabric switch. +options: + name: + description: + - The name of the switch. + required: true + fabric_role: + description: + - Fabric role of the switch. + choices: ['spine', 'leaf'] + required: true + leaf_group: + description: + - The leaf group of the switch if the switch is a leaf. + required: false + mac: + description: + - The MAC address of the switch. + required: true + state: + description: + - Whether the switch should be present or absent. + default: present + choices: ['present', 'absent'] + controller: + description: + - The controller IP address. + required: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + required: false + default: true + type: bool + access_token: + description: + - Big Cloud Fabric access token. If this isn't set then the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used. +''' + + +EXAMPLES = ''' +- name: bcf leaf switch + bcf_switch: + name: Rack1Leaf1 + fabric_role: leaf + leaf_group: R1 + mac: 00:00:00:02:00:02 + controller: '{{ inventory_hostname }}' + state: present + validate_certs: false +''' + + +RETURN = ''' # ''' + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.bigswitch.bigswitch import Rest +from ansible.module_utils._text import to_native + + +def switch(module, check_mode): + try: + access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN'] + except KeyError as e: + module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc()) + + name = module.params['name'] + fabric_role = module.params['fabric_role'] + leaf_group = module.params['leaf_group'] + dpid = '00:00:' + module.params['mac'] + state = module.params['state'] + controller = module.params['controller'] + + rest = Rest(module, + {'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token}, + 'https://' + controller + ':8443/api/v1/data/controller/core') + + response = rest.get('switch-config', data={}) + if response.status_code != 200: + module.fail_json(msg="failed to obtain existing switch config: {0}".format(response.json['description'])) + + config_present = False + for switch in response.json: + if all((switch['name'] == name, + switch['fabric-role'] == fabric_role, + switch['dpid'] == dpid)): + config_present = switch.get('leaf-group', None) == leaf_group + if config_present: + break + + if state in ('present') and config_present: + module.exit_json(changed=False) + + if state in ('absent') and not config_present: + module.exit_json(changed=False) + + if check_mode: + module.exit_json(changed=True) + + if state in ('present'): + data = {'name': name, 'fabric-role': fabric_role, 'leaf-group': leaf_group, 'dpid': dpid} + response = rest.put('switch-config[name="%s"]' % name, data) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error configuring switch '{0}': {1}".format(name, response.json['description'])) + + if state in ('absent'): + response = rest.delete('switch-config[name="%s"]' % name, data={}) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error deleting switch '{0}': {1}".format(name, response.json['description'])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + fabric_role=dict(choices=['spine', 'leaf'], required=True), + leaf_group=dict(type='str', required=False), + mac=dict(type='str', required=True), + controller=dict(type='str', required=True), + state=dict(choices=['present', 'absent'], default='present'), + validate_certs=dict(type='bool', default='True'), + access_token=dict(type='str', no_log=True) + ), + supports_check_mode=True, + ) + + try: + switch(module, check_mode=module.check_mode) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/bigswitch/bigmon_chain.py b/plugins/modules/network/bigswitch/bigmon_chain.py new file mode 100644 index 0000000000..67428b257c --- /dev/null +++ b/plugins/modules/network/bigswitch/bigmon_chain.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ted Elhourani +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Ansible module to manage Big Monitoring Fabric service chains + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: bigmon_chain +author: "Ted (@tedelhourani)" +short_description: Create and remove a bigmon inline service chain. +description: + - Create and remove a bigmon inline service chain. +options: + name: + description: + - The name of the chain. + required: true + state: + description: + - Whether the service chain should be present or absent. + default: present + choices: ['present', 'absent'] + controller: + description: + - The controller IP address. + required: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + required: false + default: true + type: bool + access_token: + description: + - Bigmon access token. If this isn't set, the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used. +''' + + +EXAMPLES = ''' +- name: bigmon inline service chain + bigmon_chain: + name: MyChain + controller: '{{ inventory_hostname }}' + state: present + validate_certs: false +''' + + +RETURN = ''' # ''' + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.bigswitch.bigswitch import Rest +from ansible.module_utils._text import to_native + + +def chain(module): + try: + access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN'] + except KeyError as e: + module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc()) + + name = module.params['name'] + state = module.params['state'] + controller = module.params['controller'] + + rest = Rest(module, + {'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token}, + 'https://' + controller + ':8443/api/v1/data/controller/applications/bigchain') + + if None in (name, state, controller): + module.fail_json(msg='parameter `name` is missing') + + response = rest.get('chain?config=true', data={}) + if response.status_code != 200: + module.fail_json(msg="failed to obtain existing chain config: {0}".format(response.json['description'])) + + config_present = False + matching = [chain for chain in response.json if chain['name'] == name] + if matching: + config_present = True + + if state in ('present') and config_present: + module.exit_json(changed=False) + + if state in ('absent') and not config_present: + module.exit_json(changed=False) + + if state in ('present'): + response = rest.put('chain[name="%s"]' % name, data={'name': name}) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error creating chain '{0}': {1}".format(name, response.json['description'])) + + if state in ('absent'): + response = rest.delete('chain[name="%s"]' % name, data={}) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error deleting chain '{0}': {1}".format(name, response.json['description'])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + controller=dict(type='str', required=True), + state=dict(choices=['present', 'absent'], default='present'), + validate_certs=dict(type='bool', default='True'), + access_token=dict(type='str', no_log=True) + ) + ) + + try: + chain(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/bigswitch/bigmon_policy.py b/plugins/modules/network/bigswitch/bigmon_policy.py new file mode 100644 index 0000000000..421c76f225 --- /dev/null +++ b/plugins/modules/network/bigswitch/bigmon_policy.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ted Elhourani +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Ansible module to manage Big Monitoring Fabric service chains + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: bigmon_policy +author: "Ted (@tedelhourani)" +short_description: Create and remove a bigmon out-of-band policy. +description: + - Create and remove a bigmon out-of-band policy. +options: + name: + description: + - The name of the policy. + required: true + policy_description: + description: + - Description of policy. + action: + description: + - Forward matching packets to delivery interfaces, Drop is for measure rate of matching packets, + but do not forward to delivery interfaces, capture packets and write to a PCAP file, or enable NetFlow generation. + default: forward + choices: ['forward', 'drop', 'flow-gen'] + priority: + description: + - A priority associated with this policy. The higher priority policy takes precedence over a lower priority. + default: 100 + duration: + description: + - Run policy for duration duration or until delivery_packet_count packets are delivered, whichever comes first. + default: 0 + start_time: + description: + - Date the policy becomes active + default: ansible_date_time.iso8601 + delivery_packet_count: + description: + - Run policy until delivery_packet_count packets are delivered. + default: 0 + state: + description: + - Whether the policy should be present or absent. + default: present + choices: ['present', 'absent'] + controller: + description: + - The controller address. + required: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + required: false + default: true + type: bool + access_token: + description: + - Bigmon access token. If this isn't set, the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used. + +''' + +EXAMPLES = ''' +- name: policy to aggregate filter and deliver data center (DC) 1 traffic + bigmon_policy: + name: policy1 + policy_description: DC 1 traffic policy + action: drop + controller: '{{ inventory_hostname }}' + state: present + validate_certs: false +''' + +RETURN = ''' # ''' + +import datetime +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.bigswitch.bigswitch import Rest +from ansible.module_utils._text import to_native + + +def policy(module): + try: + access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN'] + except KeyError as e: + module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc()) + + name = module.params['name'] + policy_description = module.params['policy_description'] + action = module.params['action'] + priority = module.params['priority'] + duration = module.params['duration'] + start_time = module.params['start_time'] + delivery_packet_count = module.params['delivery_packet_count'] + state = module.params['state'] + controller = module.params['controller'] + + rest = Rest(module, + {'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token}, + 'https://' + controller + ':8443/api/v1/data/controller/applications/bigtap') + + if name is None: + module.fail_json(msg='parameter `name` is missing') + + response = rest.get('policy?config=true', data={}) + if response.status_code != 200: + module.fail_json(msg="failed to obtain existing policy config: {0}".format(response.json['description'])) + + config_present = False + + matching = [policy for policy in response.json + if policy['name'] == name and + policy['duration'] == duration and + policy['delivery-packet-count'] == delivery_packet_count and + policy['policy-description'] == policy_description and + policy['action'] == action and + policy['priority'] == priority] + + if matching: + config_present = True + + if state in ('present') and config_present: + module.exit_json(changed=False) + + if state in ('absent') and not config_present: + module.exit_json(changed=False) + + if state in ('present'): + data = {'name': name, 'action': action, 'policy-description': policy_description, + 'priority': priority, 'duration': duration, 'start-time': start_time, + 'delivery-packet-count': delivery_packet_count} + + response = rest.put('policy[name="%s"]' % name, data=data) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error creating policy '{0}': {1}".format(name, response.json['description'])) + + if state in ('absent'): + response = rest.delete('policy[name="%s"]' % name, data={}) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error deleting policy '{0}': {1}".format(name, response.json['description'])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + policy_description=dict(type='str', default=''), + action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'), + priority=dict(type='int', default=100), + duration=dict(type='int', default=0), + start_time=dict(type='str', default=datetime.datetime.now().isoformat() + '+00:00'), + delivery_packet_count=dict(type='int', default=0), + controller=dict(type='str', required=True), + state=dict(choices=['present', 'absent'], default='present'), + validate_certs=dict(type='bool', default='True'), + access_token=dict(type='str', no_log=True) + ) + ) + + try: + policy(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_access_layer_facts.py b/plugins/modules/network/check_point/checkpoint_access_layer_facts.py new file mode 100644 index 0000000000..c18de677e8 --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_access_layer_facts.py @@ -0,0 +1,101 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_access_layer_facts +short_description: Get access layer facts on Check Point over Web Services API +description: + - Get access layer facts on Check Point devices. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + uid: + description: + - UID of access layer object. + type: str + name: + description: + - Name of the access layer object. + type: str +''' + +EXAMPLES = """ +- name: Get object facts + checkpoint_access_layer_facts: +""" + +RETURN = """ +ansible_facts: + description: The checkpoint access layer facts. + returned: always. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection + + +def get_access_layer(module, connection): + uid = module.params['uid'] + name = module.params['name'] + + payload = {} + + if uid: + payload = {'uid': uid} + code, result = connection.send_request('/web_api/show-access-layer', payload) + elif name: + payload = {'name': name} + code, result = connection.send_request('/web_api/show-access-layer', payload) + else: + code, result = connection.send_request('/web_api/show-access-layers', payload) + + return code, result + + +def main(): + argument_spec = dict( + uid=dict(type='str', default=None), + name=dict(type='str', default=None) + ) + + module = AnsibleModule(argument_spec=argument_spec) + connection = Connection(module._socket_path) + + code, response = get_access_layer(module, connection) + + if code == 200: + module.exit_json(ansible_facts=dict(checkpoint_access_layers=response)) + else: + module.fail_json(msg='Check Point device returned error {0} with message {1}'.format(code, response)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_access_rule.py b/plugins/modules/network/check_point/checkpoint_access_rule.py new file mode 100644 index 0000000000..9aa78a53a9 --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_access_rule.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_access_rule +short_description: Manages access rules on Check Point over Web Services API +description: + - Manages access rules on Check Point devices including creating, updating, removing access rules objects, + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + name: + description: + - Name of the access rule. + type: str + layer: + description: + - Layer to attach the access rule to. + required: True + type: str + position: + description: + - Position of the access rule. + type: str + source: + description: + - Source object of the access rule. + type: str + destination: + description: + - Destination object of the access rule. + type: str + action: + description: + - Action of the access rule (accept, drop, inform, etc). + type: str + default: drop + enabled: + description: + - Enabled or disabled flag. + type: bool + default: True + state: + description: + - State of the access rule (present or absent). Defaults to present. + type: str + default: present + auto_publish_session: + description: + - Publish the current session if changes have been performed + after task completes. + type: bool + default: 'yes' + auto_install_policy: + description: + - Install the package policy if changes have been performed + after the task completes. + type: bool + default: 'yes' + policy_package: + description: + - Package policy name to be installed. + type: str + default: 'standard' + targets: + description: + - Targets to install the package policy on. + type: list +''' + +EXAMPLES = """ +- name: Create access rule + checkpoint_access_rule: + layer: Network + name: "Drop attacker" + position: top + source: attacker + destination: Any + action: Drop + +- name: Delete access rule + checkpoint_access_rule: + layer: Network + name: "Drop attacker" +""" + +RETURN = """ +checkpoint_access_rules: + description: The checkpoint access rule object created or updated. + returned: always, except when deleting the access rule. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.check_point.mgmt.plugins.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec, publish, install_policy + + +def get_access_rule(module, connection): + name = module.params['name'] + layer = module.params['layer'] + + payload = {'name': name, 'layer': layer} + + code, response = connection.send_request('/web_api/show-access-rule', payload) + + return code, response + + +def create_access_rule(module, connection): + name = module.params['name'] + layer = module.params['layer'] + position = module.params['position'] + source = module.params['source'] + destination = module.params['destination'] + action = module.params['action'] + + payload = {'name': name, + 'layer': layer, + 'position': position, + 'source': source, + 'destination': destination, + 'action': action} + + code, response = connection.send_request('/web_api/add-access-rule', payload) + + return code, response + + +def update_access_rule(module, connection): + name = module.params['name'] + layer = module.params['layer'] + position = module.params['position'] + source = module.params['source'] + destination = module.params['destination'] + action = module.params['action'] + enabled = module.params['enabled'] + + payload = {'name': name, + 'layer': layer, + 'position': position, + 'source': source, + 'destination': destination, + 'action': action, + 'enabled': enabled} + + code, response = connection.send_request('/web_api/set-access-rule', payload) + + return code, response + + +def delete_access_rule(module, connection): + name = module.params['name'] + layer = module.params['layer'] + + payload = {'name': name, + 'layer': layer, + } + + code, response = connection.send_request('/web_api/delete-access-rule', payload) + + return code, response + + +def needs_update(module, access_rule): + res = False + + if module.params['source'] and module.params['source'] != access_rule['source'][0]['name']: + res = True + if module.params['destination'] and module.params['destination'] != access_rule['destination'][0]['name']: + res = True + if module.params['action'] != access_rule['action']['name']: + res = True + if module.params['enabled'] != access_rule['enabled']: + res = True + + return res + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + layer=dict(type='str'), + position=dict(type='str'), + source=dict(type='str'), + destination=dict(type='str'), + action=dict(type='str', default='drop'), + enabled=dict(type='bool', default=True), + state=dict(type='str', default='present') + ) + argument_spec.update(checkpoint_argument_spec) + + required_if = [('state', 'present', ('layer', 'position'))] + module = AnsibleModule(argument_spec=argument_spec, required_if=required_if) + connection = Connection(module._socket_path) + code, response = get_access_rule(module, connection) + result = {'changed': False} + + if module.params['state'] == 'present': + if code == 200: + if needs_update(module, response): + code, response = update_access_rule(module, connection) + if code != 200: + module.fail_json(msg=response) + if module.params['auto_publish_session']: + publish(connection) + + if module.params['auto_install_policy']: + install_policy(connection, module.params['policy_package'], module.params['targets']) + + result['changed'] = True + result['checkpoint_access_rules'] = response + else: + pass + elif code == 404: + code, response = create_access_rule(module, connection) + if code != 200: + module.fail_json(msg=response) + if module.params['auto_publish_session']: + publish(connection) + + if module.params['auto_install_policy']: + install_policy(connection, module.params['policy_package'], module.params['targets']) + + result['changed'] = True + result['checkpoint_access_rules'] = response + else: + if code == 200: + code, response = delete_access_rule(module, connection) + if code != 200: + module.fail_json(msg=response) + if module.params['auto_publish_session']: + publish(connection) + + if module.params['auto_install_policy']: + install_policy(connection, module.params['policy_package'], module.params['targets']) + + result['changed'] = True + result['checkpoint_access_rules'] = response + elif code == 404: + pass + + result['checkpoint_session_uid'] = connection.get_session_uid() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_access_rule_facts.py b/plugins/modules/network/check_point/checkpoint_access_rule_facts.py new file mode 100644 index 0000000000..a477bd407e --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_access_rule_facts.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_access_rule_facts +short_description: Get access rules objects facts on Check Point over Web Services API +description: + - Get access rules objects facts on Check Point devices. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + name: + description: + - Name of the access rule. If not provided, UID is required. + type: str + uid: + description: + - UID of the access rule. If not provided, name is required. + type: str + layer: + description: + - Layer the access rule is attached to. + required: True + type: str +''' + +EXAMPLES = """ +- name: Get access rule facts + checkpoint_access_rule_facts: + layer: Network + name: "Drop attacker" +""" + +RETURN = """ +ansible_facts: + description: The checkpoint access rule object facts. + returned: always. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection + + +def get_access_rule(module, connection): + name = module.params['name'] + uid = module.params['uid'] + layer = module.params['layer'] + + if uid: + payload = {'uid': uid, 'layer': layer} + elif name: + payload = {'name': name, 'layer': layer} + + code, response = connection.send_request('/web_api/show-access-rule', payload) + + return code, response + + +def main(): + argument_spec = dict( + name=dict(type='str'), + uid=dict(type='str'), + layer=dict(type='str', required=True), + ) + + module = AnsibleModule(argument_spec=argument_spec) + connection = Connection(module._socket_path) + code, response = get_access_rule(module, connection) + if code == 200: + module.exit_json(ansible_facts=dict(checkpoint_access_rules=response)) + else: + module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_host.py b/plugins/modules/network/check_point/checkpoint_host.py new file mode 100644 index 0000000000..0ee8999a6e --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_host.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_host +short_description: Manages host objects on Check Point over Web Services API +description: + - Manages host objects on Check Point devices including creating, updating, removing access rules objects. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + name: + description: + - Name of the access rule. + type: str + required: True + ip_address: + description: + - IP address of the host object. + type: str + state: + description: + - State of the access rule (present or absent). Defaults to present. + type: str + default: present + auto_publish_session: + description: + - Publish the current session if changes have been performed + after task completes. + type: bool + default: 'yes' + auto_install_policy: + description: + - Install the package policy if changes have been performed + after the task completes. + type: bool + default: 'yes' + policy_package: + description: + - Package policy name to be installed. + type: str + default: 'standard' + targets: + description: + - Targets to install the package policy on. + type: list +''' + +EXAMPLES = """ +- name: Create host object + checkpoint_host: + name: attacker + ip_address: 192.168.0.15 + +- name: Delete host object + checkpoint_host: + name: attacker + state: absent +""" + +RETURN = """ +checkpoint_hosts: + description: The checkpoint host object created or updated. + returned: always, except when deleting the host. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.check_point.mgmt.plugins.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec, publish, install_policy + + +def get_host(module, connection): + name = module.params['name'] + + payload = {'name': name} + + code, response = connection.send_request('/web_api/show-host', payload) + + return code, response + + +def create_host(module, connection): + name = module.params['name'] + ip_address = module.params['ip_address'] + + payload = {'name': name, + 'ip-address': ip_address} + + code, response = connection.send_request('/web_api/add-host', payload) + + return code, response + + +def update_host(module, connection): + name = module.params['name'] + ip_address = module.params['ip_address'] + + payload = {'name': name, + 'ip-address': ip_address} + + code, response = connection.send_request('/web_api/set-host', payload) + + return code, response + + +def delete_host(module, connection): + name = module.params['name'] + + payload = {'name': name} + + code, response = connection.send_request('/web_api/delete-host', payload) + + return code, response + + +def needs_update(module, host): + res = False + + if module.params['ip_address'] != host['ipv4-address']: + res = True + + return res + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + ip_address=dict(type='str'), + state=dict(type='str', default='present') + ) + argument_spec.update(checkpoint_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec) + connection = Connection(module._socket_path) + code, response = get_host(module, connection) + result = {'changed': False} + + if module.params['state'] == 'present': + if code == 200: + if needs_update(module, response): + code, response = update_host(module, connection) + if code != 200: + module.fail_json(msg=response) + if module.params['auto_publish_session']: + publish(connection) + + if module.params['auto_install_policy']: + install_policy(connection, module.params['policy_package'], module.params['targets']) + + result['changed'] = True + result['checkpoint_hosts'] = response + else: + pass + elif code == 404: + code, response = create_host(module, connection) + if code != 200: + module.fail_json(msg=response) + if module.params['auto_publish_session']: + publish(connection) + + if module.params['auto_install_policy']: + install_policy(connection, module.params['policy_package'], module.params['targets']) + + result['changed'] = True + result['checkpoint_hosts'] = response + else: + if code == 200: + # Handle deletion + code, response = delete_host(module, connection) + if code != 200: + module.fail_json(msg=response) + if module.params['auto_publish_session']: + publish(connection) + + if module.params['auto_install_policy']: + install_policy(connection, module.params['policy_package'], module.params['targets']) + + result['changed'] = True + result['checkpoint_hosts'] = response + elif code == 404: + pass + + result['checkpoint_session_uid'] = connection.get_session_uid() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_host_facts.py b/plugins/modules/network/check_point/checkpoint_host_facts.py new file mode 100644 index 0000000000..c8a8d8aa38 --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_host_facts.py @@ -0,0 +1,99 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_host_facts +short_description: Get host objects facts on Check Point over Web Services API +description: + - Get host objects facts on Check Point devices. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + name: + description: + - Name of the host object. If name is not provided, UID is required. + type: str + uid: + description: + - UID of the host object. If UID is not provided, name is required. + type: str +''' + +EXAMPLES = """ +- name: Get host object facts + checkpoint_host_facts: + name: attacker +""" + +RETURN = """ +ansible_hosts: + description: The checkpoint host object facts. + returned: always. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection + + +def get_host(module, connection): + name = module.params['name'] + uid = module.params['uid'] + + if uid: + payload = {'uid': uid} + elif name: + payload = {'name': name} + + code, result = connection.send_request('/web_api/show-host', payload) + + return code, result + + +def main(): + argument_spec = dict( + name=dict(type='str'), + uid=dict(type='str'), + ) + + required_one_of = [('name', 'uid')] + module = AnsibleModule(argument_spec=argument_spec, required_one_of=required_one_of) + connection = Connection(module._socket_path) + + code, response = get_host(module, connection) + + if code == 200: + module.exit_json(ansible_facts=dict(checkpoint_hosts=response)) + else: + module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_object_facts.py b/plugins/modules/network/check_point/checkpoint_object_facts.py new file mode 100644 index 0000000000..96d0145126 --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_object_facts.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_object_facts +short_description: Get object facts on Check Point over Web Services API +description: + - Get object facts on Check Point devices. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + uid: + description: + - UID of the object. If UID is not provided, it will do a full search + which can be filtered with the filter argument. + object_filter: + description: + - Filter expression for search. It accepts AND/OR logical operators and performs a textual + and IP address search. To search only by IP address, set ip_only argument to True. + which can be filtered with the filter argument. + ip_only: + description: + - Filter only by IP address. + type: bool + default: false + object_type: + description: + - Type of the object to search. Must be a valid API resource name + type: str +''' + +EXAMPLES = """ +- name: Get object facts + checkpoint_object_facts: + object_filter: 192.168.30.30 + ip_only: yes +""" + +RETURN = """ +ansible_hosts: + description: The checkpoint object facts. + returned: always. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection + + +def get_object(module, connection): + uid = module.params['uid'] + object_filter = module.params['object_filter'] + ip_only = module.params['ip_only'] + object_type = module.params['object_type'] + + if uid: + payload = {'uid': uid} + code, result = connection.send_request('/web_api/show-object', payload) + else: + payload = {'filter': object_filter, 'ip-only': ip_only, 'type': object_type} + code, result = connection.send_request('/web_api/show-objects', payload) + + return code, result + + +def main(): + argument_spec = dict( + uid=dict(type='str', default=None), + object_filter=dict(type='str', default=None), + ip_only=dict(type='bool', default=False), + object_type=dict(type='str', default=None) + ) + + module = AnsibleModule(argument_spec=argument_spec) + connection = Connection(module._socket_path) + + code, response = get_object(module, connection) + + if code == 200: + module.exit_json(ansible_facts=dict(checkpoint_objects=response)) + else: + module.fail_json(msg='Check Point device returned error {0} with message {1}'.format(code, response)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_run_script.py b/plugins/modules/network/check_point/checkpoint_run_script.py new file mode 100644 index 0000000000..abee6b5093 --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_run_script.py @@ -0,0 +1,110 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_run_script +short_description: Run scripts on Check Point devices over Web Services API +description: + - Run scripts on Check Point devices. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + script_name: + description: + - Name of the script. + type: str + required: True + script: + description: + - Script body contents. + type: str + required: True + targets: + description: + - Targets the script should be run against. Can reference either name or UID. + type: list + required: True +''' + +EXAMPLES = """ +- name: Run script + checkpoint_run_script: + script_name: "List root" + script: ls -l / + targets: + - mycheckpointgw +""" + +RETURN = """ +checkpoint_run_script: + description: The checkpoint run script output. + returned: always. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection + + +def run_script(module, connection): + script_name = module.params['script_name'] + script = module.params['script'] + targets = module.params['targets'] + + payload = {'script-name': script_name, + 'script': script, + 'targets': targets} + + code, response = connection.send_request('/web_api/run-script', payload) + + return code, response + + +def main(): + argument_spec = dict( + script_name=dict(type='str', required=True), + script=dict(type='str', required=True), + targets=dict(type='list', required=True) + ) + + module = AnsibleModule(argument_spec=argument_spec) + connection = Connection(module._socket_path) + code, response = run_script(module, connection) + result = {'changed': True} + + if code == 200: + result['checkpoint_run_script'] = response + else: + module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_session.py b/plugins/modules/network/check_point/checkpoint_session.py new file mode 100644 index 0000000000..e1d45cd03c --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_session.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_session +short_description: Manages session objects on Check Point over Web Services API +description: + - Manages session objects on Check Point devices performing actions like publish and discard. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + uid: + description: + - UID of the session. + type: str + required: True + state: + description: + - Action to perform on the session object. Valid choices are published and discarded. + type: str + choices: ['published', 'discarded'] + default: published +''' + +EXAMPLES = """ +- name: Publish session + checkpoint_session: + uid: 7a13a360-9b24-40d7-acd3-5b50247be33e + state: published + +- name: Discard session + checkpoint_session: + uid: 7a13a360-9b24-40d7-acd3-5b50247be33e + state: discarded +""" + +RETURN = """ +checkpoint_session: + description: The checkpoint session output per return from API. It will differ depending on action. + returned: always. + type: list +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection + + +def get_session(module, connection): + payload = {'uid': module.params['uid']} + + code, result = connection.send_request('/web_api/show-session', payload) + + return code, result + + +def main(): + argument_spec = dict( + uid=dict(type='str', default=None), + state=dict(type='str', default='published', choices=['published', 'discarded']) + ) + + module = AnsibleModule(argument_spec=argument_spec) + connection = Connection(module._socket_path) + code, response = get_session(module, connection) + result = {'changed': False} + + if code == 200: + result['changed'] = True + payload = None + + if module.params['uid']: + payload = {'uid': module.params['uid']} + + if module.params['state'] == 'published': + code, response = connection.send_request('/web_api/publish', payload) + else: + code, response = connection.send_request('/web_api/discard', payload) + if code != 200: + module.fail_json(msg=response) + result['checkpoint_session'] = response + else: + module.fail_json(msg='Check Point device returned error {0} with message {1}'.format(code, response)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/checkpoint_task_facts.py b/plugins/modules/network/check_point/checkpoint_task_facts.py new file mode 100644 index 0000000000..632113a534 --- /dev/null +++ b/plugins/modules/network/check_point/checkpoint_task_facts.py @@ -0,0 +1,91 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'network'} + + +DOCUMENTATION = ''' +--- +module: checkpoint_task_facts +short_description: Get task objects facts on Check Point over Web Services API +description: + - Get task objects facts on Check Point devices. + All operations are performed over Web Services API. +author: "Ansible by Red Hat (@rcarrillocruz)" +options: + task_id: + description: + - ID of the task object. + type: str + required: True +''' + +EXAMPLES = """ +- name: Get task facts + checkpoint_task_facts: + task_id: 2eec70e5-78a8-4bdb-9a76-cfb5601d0bcb +""" + +RETURN = """ +ansible_facts: + description: The checkpoint task facts. + returned: always. + type: list +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection + + +def get_task(module, connection): + task_id = module.params['task_id'] + + if task_id: + payload = {'task-id': task_id, + 'details-level': 'full'} + + code, response = connection.send_request('/web_api/show-task', payload) + else: + code, response = connection.send_request('/web_api/show-tasks', None) + + return code, response + + +def main(): + argument_spec = dict( + task_id=dict(type='str'), + ) + + module = AnsibleModule(argument_spec=argument_spec) + connection = Connection(module._socket_path) + code, response = get_task(module, connection) + if code == 200: + module.exit_json(ansible_facts=dict(checkpoint_tasks=response)) + else: + module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/check_point/cp_publish.py b/plugins/modules/network/check_point/cp_publish.py new file mode 100644 index 0000000000..730bd3182b --- /dev/null +++ b/plugins/modules/network/check_point/cp_publish.py @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage Check Point Firewall (c) 2019 +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cp_publish +short_description: All the changes done by this user will be seen by all users only after publish is called. +description: + - All the changes done by this user will be seen by all users only after publish is called. + All operations are performed over Web Services API. +author: "Or Soffer (@chkp-orso)" +options: + uid: + description: + - Session unique identifier. Specify it to publish a different session than the one you currently use. + type: str +extends_documentation_fragment: +- check_point.mgmt.checkpoint_commands + +''' + +EXAMPLES = """ +- name: publish + cp_publish: +""" + +RETURN = """ +cp_publish: + description: The checkpoint publish output. + returned: always. + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.check_point.mgmt.plugins.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_commands, api_command + + +def main(): + argument_spec = dict( + uid=dict(type='str') + ) + argument_spec.update(checkpoint_argument_spec_for_commands) + + module = AnsibleModule(argument_spec=argument_spec) + + command = "publish" + + result = api_command(module, command) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_aaa_server.py b/plugins/modules/network/cloudengine/ce_aaa_server.py new file mode 100644 index 0000000000..c7da3fdeb3 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_aaa_server.py @@ -0,0 +1,2180 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ce_aaa_server +short_description: Manages AAA server global configuration on HUAWEI CloudEngine switches. +description: + - Manages AAA server global configuration on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + type: str + choices: [ absent, present ] + default: present + authen_scheme_name: + description: + - Name of an authentication scheme. + The value is a string of 1 to 32 characters. + type: str + first_authen_mode: + description: + - Preferred authentication mode. + type: str + choices: ['invalid', 'local', 'hwtacacs', 'radius', 'none'] + default: local + author_scheme_name: + description: + - Name of an authorization scheme. + The value is a string of 1 to 32 characters. + type: str + first_author_mode: + description: + - Preferred authorization mode. + type: str + choices: ['invalid', 'local', 'hwtacacs', 'if-authenticated', 'none'] + default: local + acct_scheme_name: + description: + - Accounting scheme name. + The value is a string of 1 to 32 characters. + type: str + accounting_mode: + description: + - Accounting Mode. + type: str + choices: ['invalid', 'hwtacacs', 'radius', 'none'] + default: none + domain_name: + description: + - Name of a domain. + The value is a string of 1 to 64 characters. + type: str + radius_server_group: + description: + - RADIUS server group's name. + The value is a string of 1 to 32 case-insensitive characters. + type: str + hwtacas_template: + description: + - Name of a HWTACACS template. + The value is a string of 1 to 32 case-insensitive characters. + type: str + local_user_group: + description: + - Name of the user group where the user belongs. The user inherits all the rights of the user group. + The value is a string of 1 to 32 characters. + type: str +''' + +EXAMPLES = r''' + +- name: AAA server test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Radius authentication Server Basic settings" + ce_aaa_server: + state: present + authen_scheme_name: test1 + first_authen_mode: radius + radius_server_group: test2 + provider: "{{ cli }}" + + - name: "Undo radius authentication Server Basic settings" + ce_aaa_server: + state: absent + authen_scheme_name: test1 + first_authen_mode: radius + radius_server_group: test2 + provider: "{{ cli }}" + + - name: "Hwtacacs accounting Server Basic settings" + ce_aaa_server: + state: present + acct_scheme_name: test1 + accounting_mode: hwtacacs + hwtacas_template: test2 + provider: "{{ cli }}" + + - name: "Undo hwtacacs accounting Server Basic settings" + ce_aaa_server: + state: absent + acct_scheme_name: test1 + accounting_mode: hwtacacs + hwtacas_template: test2 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"accounting_mode": "hwtacacs", "acct_scheme_name": "test1", + "hwtacas_template": "test2", "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"accounting scheme": [["hwtacacs"], ["default"]], + "hwtacacs template": ["huawei"]} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"accounting scheme": [["hwtacacs", "test1"]], + "hwtacacs template": ["huawei", "test2"]} +updates: + description: command sent to the device + returned: always + type: list + sample: ["accounting-scheme test1", + "accounting-mode hwtacacs", + "hwtacacs server template test2", + "hwtacacs enable"] +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +SUCCESS = """success""" +FAILED = """failed""" + +INVALID_SCHEME_CHAR = [' ', '/', '\\', ':', '*', '?', '"', '|', '<', '>'] +INVALID_DOMAIN_CHAR = [' ', '*', '?', '"', '\''] +INVALID_GROUP_CHAR = ['/', '\\', ':', '*', '?', '"', '|', '<', '>'] + + +# get authentication scheme +CE_GET_AUTHENTICATION_SCHEME = """ + + + + + + + + + + + +""" + +# merge authentication scheme +CE_MERGE_AUTHENTICATION_SCHEME = """ + + + + + %s + %s + invalid + + + + +""" + +# create authentication scheme +CE_CREATE_AUTHENTICATION_SCHEME = """ + + + + + %s + %s + invalid + + + + +""" + +# delete authentication scheme +CE_DELETE_AUTHENTICATION_SCHEME = """ + + + + + %s + %s + invalid + + + + +""" + +# get authorization scheme +CE_GET_AUTHORIZATION_SCHEME = """ + + + + + + + + + + + +""" + +# merge authorization scheme +CE_MERGE_AUTHORIZATION_SCHEME = """ + + + + + %s + %s + invalid + + + + +""" + +# create authorization scheme +CE_CREATE_AUTHORIZATION_SCHEME = """ + + + + + %s + %s + invalid + + + + +""" + +# delete authorization scheme +CE_DELETE_AUTHORIZATION_SCHEME = """ + + + + + %s + %s + invalid + + + + +""" + +# get accounting scheme +CE_GET_ACCOUNTING_SCHEME = """ + + + + + + + + + + +""" + +# merge accounting scheme +CE_MERGE_ACCOUNTING_SCHEME = """ + + + + + %s + %s + + + + +""" + +# create accounting scheme +CE_CREATE_ACCOUNTING_SCHEME = """ + + + + + %s + %s + + + + +""" + +# delete accounting scheme +CE_DELETE_ACCOUNTING_SCHEME = """ + + + + + %s + %s + + + + +""" + +# get authentication domain +CE_GET_AUTHENTICATION_DOMAIN = """ + + + + + + + + + + +""" + +# merge authentication domain +CE_MERGE_AUTHENTICATION_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# create authentication domain +CE_CREATE_AUTHENTICATION_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# delete authentication domain +CE_DELETE_AUTHENTICATION_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# get authorization domain +CE_GET_AUTHORIZATION_DOMAIN = """ + + + + + + + + + + +""" + +# merge authorization domain +CE_MERGE_AUTHORIZATION_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# create authorization domain +CE_CREATE_AUTHORIZATION_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# delete authorization domain +CE_DELETE_AUTHORIZATION_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# get accounting domain +CE_GET_ACCOUNTING_DOMAIN = """ + + + + + + + + + + +""" + +# merge accounting domain +CE_MERGE_ACCOUNTING_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# create accounting domain +CE_CREATE_ACCOUNTING_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# delete accounting domain +CE_DELETE_ACCOUNTING_DOMAIN = """ + + + + + %s + %s + + + + +""" + +# get radius template +CE_GET_RADIUS_TEMPLATE = """ + + + + + + + + + + + +""" + +# merge radius template +CE_MERGE_RADIUS_TEMPLATE = """ + + + + + %s + 3 + 5 + + + + +""" + +# create radius template +CE_CREATE_RADIUS_TEMPLATE = """ + + + + + %s + 3 + 5 + + + + +""" + +# delete radius template +CE_DELETE_RADIUS_TEMPLATE = """ + + + + + %s + 3 + 5 + + + + +""" + +# get hwtacacs template +CE_GET_HWTACACS_TEMPLATE = """ + + + + + + + + + + + +""" + +# merge hwtacacs template +CE_MERGE_HWTACACS_TEMPLATE = """ + + + + + %s + true + 5 + + + + +""" + +# create hwtacacs template +CE_CREATE_HWTACACS_TEMPLATE = """ + + + + + %s + true + 5 + + + + +""" + +# delete hwtacacs template +CE_DELETE_HWTACACS_TEMPLATE = """ + + + + + %s + + + + +""" + +# get radius client +CE_GET_RADIUS_CLIENT = """ + + + + + + + + + +""" + +# merge radius client +CE_MERGE_RADIUS_CLIENT = """ + + + + %s + + + +""" + +# get hwtacacs global config +CE_GET_HWTACACS_GLOBAL_CFG = """ + + + + + + + + + +""" + +# merge hwtacacs global config +CE_MERGE_HWTACACS_GLOBAL_CFG = """ + + + + %s + + + +""" + +# get local user group +CE_GET_LOCAL_USER_GROUP = """ + + + + + + + + + +""" +# merge local user group +CE_MERGE_LOCAL_USER_GROUP = """ + + + + + %s + + + + +""" +# delete local user group +CE_DELETE_LOCAL_USER_GROUP = """ + + + + + %s + + + + +""" + + +class AaaServer(object): + """ Manages aaa configuration """ + + def netconf_get_config(self, **kwargs): + """ Get configure by netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ Set configure by netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + recv_xml = set_nc_config(module, conf_str) + + return recv_xml + + def get_authentication_scheme(self, **kwargs): + """ Get scheme of authentication """ + + module = kwargs["module"] + conf_str = CE_GET_AUTHENTICATION_SCHEME + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*\s*' + r'(.*).*\s*' + r'(.*).*\s*', xml_str) + + if re_find: + return re_find + else: + return result + + def get_authentication_domain(self, **kwargs): + """ Get domain of authentication """ + + module = kwargs["module"] + conf_str = CE_GET_AUTHENTICATION_DOMAIN + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*\s*' + r'(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_authentication_scheme(self, **kwargs): + """ Merge scheme of authentication """ + + authen_scheme_name = kwargs["authen_scheme_name"] + first_authen_mode = kwargs["first_authen_mode"] + module = kwargs["module"] + conf_str = CE_MERGE_AUTHENTICATION_SCHEME % ( + authen_scheme_name, first_authen_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge authentication scheme failed.') + + cmds = [] + cmd = "authentication-scheme %s" % authen_scheme_name + cmds.append(cmd) + cmd = "authentication-mode %s" % first_authen_mode + cmds.append(cmd) + + return cmds + + def merge_authentication_domain(self, **kwargs): + """ Merge domain of authentication """ + + domain_name = kwargs["domain_name"] + authen_scheme_name = kwargs["authen_scheme_name"] + module = kwargs["module"] + conf_str = CE_MERGE_AUTHENTICATION_DOMAIN % ( + domain_name, authen_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge authentication domain failed.') + + cmds = [] + cmd = "domain %s" % domain_name + cmds.append(cmd) + cmd = "authentication-scheme %s" % authen_scheme_name + cmds.append(cmd) + + return cmds + + def create_authentication_scheme(self, **kwargs): + """ Create scheme of authentication """ + + authen_scheme_name = kwargs["authen_scheme_name"] + first_authen_mode = kwargs["first_authen_mode"] + module = kwargs["module"] + conf_str = CE_CREATE_AUTHENTICATION_SCHEME % ( + authen_scheme_name, first_authen_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create authentication scheme failed.') + + cmds = [] + cmd = "authentication-scheme %s" % authen_scheme_name + cmds.append(cmd) + cmd = "authentication-mode %s" % first_authen_mode + cmds.append(cmd) + + return cmds + + def create_authentication_domain(self, **kwargs): + """ Create domain of authentication """ + + domain_name = kwargs["domain_name"] + authen_scheme_name = kwargs["authen_scheme_name"] + module = kwargs["module"] + conf_str = CE_CREATE_AUTHENTICATION_DOMAIN % ( + domain_name, authen_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create authentication domain failed.') + + cmds = [] + cmd = "domain %s" % domain_name + cmds.append(cmd) + cmd = "authentication-scheme %s" % authen_scheme_name + cmds.append(cmd) + + return cmds + + def delete_authentication_scheme(self, **kwargs): + """ Delete scheme of authentication """ + + authen_scheme_name = kwargs["authen_scheme_name"] + first_authen_mode = kwargs["first_authen_mode"] + module = kwargs["module"] + + if authen_scheme_name == "default": + return SUCCESS + + conf_str = CE_DELETE_AUTHENTICATION_SCHEME % ( + authen_scheme_name, first_authen_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete authentication scheme failed.') + + cmds = [] + cmd = "undo authentication-scheme %s" % authen_scheme_name + cmds.append(cmd) + cmd = "authentication-mode none" + cmds.append(cmd) + + return cmds + + def delete_authentication_domain(self, **kwargs): + """ Delete domain of authentication """ + + domain_name = kwargs["domain_name"] + authen_scheme_name = kwargs["authen_scheme_name"] + module = kwargs["module"] + + if domain_name == "default": + return SUCCESS + + conf_str = CE_DELETE_AUTHENTICATION_DOMAIN % ( + domain_name, authen_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete authentication domain failed.') + + cmds = [] + cmd = "undo authentication-scheme" + cmds.append(cmd) + cmd = "undo domain %s" % domain_name + cmds.append(cmd) + + return cmds + + def get_authorization_scheme(self, **kwargs): + """ Get scheme of authorization """ + + module = kwargs["module"] + conf_str = CE_GET_AUTHORIZATION_SCHEME + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*\s*' + r'(.*).*\s*' + r'(.*).*\s*', xml_str) + + if re_find: + return re_find + else: + return result + + def get_authorization_domain(self, **kwargs): + """ Get domain of authorization """ + + module = kwargs["module"] + conf_str = CE_GET_AUTHORIZATION_DOMAIN + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*\s*' + r'(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_authorization_scheme(self, **kwargs): + """ Merge scheme of authorization """ + + author_scheme_name = kwargs["author_scheme_name"] + first_author_mode = kwargs["first_author_mode"] + module = kwargs["module"] + conf_str = CE_MERGE_AUTHORIZATION_SCHEME % ( + author_scheme_name, first_author_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge authorization scheme failed.') + + cmds = [] + cmd = "authorization-scheme %s" % author_scheme_name + cmds.append(cmd) + cmd = "authorization-mode %s" % first_author_mode + cmds.append(cmd) + + return cmds + + def merge_authorization_domain(self, **kwargs): + """ Merge domain of authorization """ + + domain_name = kwargs["domain_name"] + author_scheme_name = kwargs["author_scheme_name"] + module = kwargs["module"] + conf_str = CE_MERGE_AUTHORIZATION_DOMAIN % ( + domain_name, author_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge authorization domain failed.') + + cmds = [] + cmd = "domain %s" % domain_name + cmds.append(cmd) + cmd = "authorization-scheme %s" % author_scheme_name + cmds.append(cmd) + + return cmds + + def create_authorization_scheme(self, **kwargs): + """ Create scheme of authorization """ + + author_scheme_name = kwargs["author_scheme_name"] + first_author_mode = kwargs["first_author_mode"] + module = kwargs["module"] + conf_str = CE_CREATE_AUTHORIZATION_SCHEME % ( + author_scheme_name, first_author_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create authorization scheme failed.') + + cmds = [] + cmd = "authorization-scheme %s" % author_scheme_name + cmds.append(cmd) + cmd = "authorization-mode %s" % first_author_mode + cmds.append(cmd) + + return cmds + + def create_authorization_domain(self, **kwargs): + """ Create domain of authorization """ + + domain_name = kwargs["domain_name"] + author_scheme_name = kwargs["author_scheme_name"] + module = kwargs["module"] + conf_str = CE_CREATE_AUTHORIZATION_DOMAIN % ( + domain_name, author_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create authorization domain failed.') + + cmds = [] + cmd = "domain %s" % domain_name + cmds.append(cmd) + cmd = "authorization-scheme %s" % author_scheme_name + cmds.append(cmd) + + return cmds + + def delete_authorization_scheme(self, **kwargs): + """ Delete scheme of authorization """ + + author_scheme_name = kwargs["author_scheme_name"] + first_author_mode = kwargs["first_author_mode"] + module = kwargs["module"] + + if author_scheme_name == "default": + return SUCCESS + + conf_str = CE_DELETE_AUTHORIZATION_SCHEME % ( + author_scheme_name, first_author_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete authorization scheme failed.') + + cmds = [] + cmd = "undo authorization-scheme %s" % author_scheme_name + cmds.append(cmd) + cmd = "authorization-mode none" + cmds.append(cmd) + + return cmds + + def delete_authorization_domain(self, **kwargs): + """ Delete domain of authorization """ + + domain_name = kwargs["domain_name"] + author_scheme_name = kwargs["author_scheme_name"] + module = kwargs["module"] + + if domain_name == "default": + return SUCCESS + + conf_str = CE_DELETE_AUTHORIZATION_DOMAIN % ( + domain_name, author_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete authorization domain failed.') + + cmds = [] + cmd = "undo authorization-scheme" + cmds.append(cmd) + cmd = "undo domain %s" % domain_name + cmds.append(cmd) + + return cmds + + def get_accounting_scheme(self, **kwargs): + """ Get scheme of accounting """ + + module = kwargs["module"] + conf_str = CE_GET_ACCOUNTING_SCHEME + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall(r'.*(.*)\s*(.*)', xml_str) + if re_find: + return re_find + else: + return result + + def get_accounting_domain(self, **kwargs): + """ Get domain of accounting """ + + module = kwargs["module"] + conf_str = CE_GET_ACCOUNTING_DOMAIN + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*\s*' + r'(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_accounting_scheme(self, **kwargs): + """ Merge scheme of accounting """ + + acct_scheme_name = kwargs["acct_scheme_name"] + accounting_mode = kwargs["accounting_mode"] + module = kwargs["module"] + conf_str = CE_MERGE_ACCOUNTING_SCHEME % ( + acct_scheme_name, accounting_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge accounting scheme failed.') + + cmds = [] + cmd = "accounting-scheme %s" % acct_scheme_name + cmds.append(cmd) + cmd = "accounting-mode %s" % accounting_mode + cmds.append(cmd) + + return cmds + + def merge_accounting_domain(self, **kwargs): + """ Merge domain of accounting """ + + domain_name = kwargs["domain_name"] + acct_scheme_name = kwargs["acct_scheme_name"] + module = kwargs["module"] + conf_str = CE_MERGE_ACCOUNTING_DOMAIN % (domain_name, acct_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge accounting domain failed.') + + cmds = [] + cmd = "domain %s" % domain_name + cmds.append(cmd) + cmd = "accounting-scheme %s" % acct_scheme_name + cmds.append(cmd) + + return cmds + + def create_accounting_scheme(self, **kwargs): + """ Create scheme of accounting """ + + acct_scheme_name = kwargs["acct_scheme_name"] + accounting_mode = kwargs["accounting_mode"] + module = kwargs["module"] + conf_str = CE_CREATE_ACCOUNTING_SCHEME % ( + acct_scheme_name, accounting_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create accounting scheme failed.') + + cmds = [] + cmd = "accounting-scheme %s" % acct_scheme_name + cmds.append(cmd) + cmd = "accounting-mode %s" % accounting_mode + cmds.append(cmd) + + return cmds + + def create_accounting_domain(self, **kwargs): + """ Create domain of accounting """ + + domain_name = kwargs["domain_name"] + acct_scheme_name = kwargs["acct_scheme_name"] + module = kwargs["module"] + conf_str = CE_CREATE_ACCOUNTING_DOMAIN % ( + domain_name, acct_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create accounting domain failed.') + + cmds = [] + cmd = "domain %s" % domain_name + cmds.append(cmd) + cmd = "accounting-scheme %s" % acct_scheme_name + cmds.append(cmd) + + return cmds + + def delete_accounting_scheme(self, **kwargs): + """ Delete scheme of accounting """ + + acct_scheme_name = kwargs["acct_scheme_name"] + accounting_mode = kwargs["accounting_mode"] + module = kwargs["module"] + + if acct_scheme_name == "default": + return SUCCESS + + conf_str = CE_DELETE_ACCOUNTING_SCHEME % ( + acct_scheme_name, accounting_mode) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete accounting scheme failed.') + + cmds = [] + cmd = "undo accounting-scheme %s" % acct_scheme_name + cmds.append(cmd) + cmd = "accounting-mode none" + cmds.append(cmd) + + return cmds + + def delete_accounting_domain(self, **kwargs): + """ Delete domain of accounting """ + + domain_name = kwargs["domain_name"] + acct_scheme_name = kwargs["acct_scheme_name"] + module = kwargs["module"] + + if domain_name == "default": + return SUCCESS + + conf_str = CE_DELETE_ACCOUNTING_DOMAIN % ( + domain_name, acct_scheme_name) + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete accounting domain failed.') + + cmds = [] + cmd = "undo domain %s" % domain_name + cmds.append(cmd) + cmd = "undo accounting-scheme" + cmds.append(cmd) + + return cmds + + def get_radius_template(self, **kwargs): + """ Get radius template """ + + module = kwargs["module"] + conf_str = CE_GET_RADIUS_TEMPLATE + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_radius_template(self, **kwargs): + """ Merge radius template """ + + radius_server_group = kwargs["radius_server_group"] + module = kwargs["module"] + conf_str = CE_MERGE_RADIUS_TEMPLATE % radius_server_group + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge radius template failed.') + + cmds = [] + cmd = "radius server group %s" % radius_server_group + cmds.append(cmd) + + return cmds + + def create_radius_template(self, **kwargs): + """ Create radius template """ + + radius_server_group = kwargs["radius_server_group"] + module = kwargs["module"] + conf_str = CE_CREATE_RADIUS_TEMPLATE % radius_server_group + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create radius template failed.') + + cmds = [] + cmd = "radius server group %s" % radius_server_group + cmds.append(cmd) + + return cmds + + def delete_radius_template(self, **kwargs): + """ Delete radius template """ + + radius_server_group = kwargs["radius_server_group"] + module = kwargs["module"] + conf_str = CE_DELETE_RADIUS_TEMPLATE % radius_server_group + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete radius template failed.') + + cmds = [] + cmd = "undo radius server group %s" % radius_server_group + cmds.append(cmd) + + return cmds + + def get_radius_client(self, **kwargs): + """ Get radius client """ + + module = kwargs["module"] + conf_str = CE_GET_RADIUS_CLIENT + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_radius_client(self, **kwargs): + """ Merge radius client """ + + enable = kwargs["isEnable"] + module = kwargs["module"] + conf_str = CE_MERGE_RADIUS_CLIENT % enable + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge radius client failed.') + + cmds = [] + if enable == "true": + cmd = "radius enable" + else: + cmd = "undo radius enable" + cmds.append(cmd) + + return cmds + + def get_hwtacacs_template(self, **kwargs): + """ Get hwtacacs template """ + + module = kwargs["module"] + conf_str = CE_GET_HWTACACS_TEMPLATE + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_hwtacacs_template(self, **kwargs): + """ Merge hwtacacs template """ + + hwtacas_template = kwargs["hwtacas_template"] + module = kwargs["module"] + conf_str = CE_MERGE_HWTACACS_TEMPLATE % hwtacas_template + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge hwtacacs template failed.') + + cmds = [] + cmd = "hwtacacs server template %s" % hwtacas_template + cmds.append(cmd) + + return cmds + + def create_hwtacacs_template(self, **kwargs): + """ Create hwtacacs template """ + + hwtacas_template = kwargs["hwtacas_template"] + module = kwargs["module"] + conf_str = CE_CREATE_HWTACACS_TEMPLATE % hwtacas_template + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Create hwtacacs template failed.') + + cmds = [] + cmd = "hwtacacs server template %s" % hwtacas_template + cmds.append(cmd) + + return cmds + + def delete_hwtacacs_template(self, **kwargs): + """ Delete hwtacacs template """ + + hwtacas_template = kwargs["hwtacas_template"] + module = kwargs["module"] + conf_str = CE_DELETE_HWTACACS_TEMPLATE % hwtacas_template + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete hwtacacs template failed.') + + cmds = [] + cmd = "undo hwtacacs server template %s" % hwtacas_template + cmds.append(cmd) + + return cmds + + def get_hwtacacs_global_cfg(self, **kwargs): + """ Get hwtacacs global configure """ + + module = kwargs["module"] + conf_str = CE_GET_HWTACACS_GLOBAL_CFG + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_hwtacacs_global_cfg(self, **kwargs): + """ Merge hwtacacs global configure """ + + enable = kwargs["isEnable"] + module = kwargs["module"] + conf_str = CE_MERGE_HWTACACS_GLOBAL_CFG % enable + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge hwtacacs global config failed.') + + cmds = [] + + if enable == "true": + cmd = "hwtacacs enable" + else: + cmd = "undo hwtacacs enable" + cmds.append(cmd) + + return cmds + + def get_local_user_group(self, **kwargs): + """ Get local user group """ + + module = kwargs["module"] + conf_str = CE_GET_LOCAL_USER_GROUP + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_local_user_group(self, **kwargs): + """ Merge local user group """ + + local_user_group = kwargs["local_user_group"] + module = kwargs["module"] + conf_str = CE_MERGE_LOCAL_USER_GROUP % local_user_group + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Merge local user group failed.') + + cmds = [] + cmd = "user-group %s" % local_user_group + cmds.append(cmd) + + return cmds + + def delete_local_user_group(self, **kwargs): + """ Delete local user group """ + + local_user_group = kwargs["local_user_group"] + module = kwargs["module"] + conf_str = CE_DELETE_LOCAL_USER_GROUP % local_user_group + + xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in xml: + module.fail_json(msg='Error: Delete local user group failed.') + + cmds = [] + cmd = "undo user-group %s" % local_user_group + cmds.append(cmd) + + return cmds + + +def check_name(**kwargs): + """ Check invalid name """ + + module = kwargs["module"] + name = kwargs["name"] + invalid_char = kwargs["invalid_char"] + + for item in invalid_char: + if item in name: + module.fail_json( + msg='Error: invalid char %s is in the name %s.' % (item, name)) + + +def check_module_argument(**kwargs): + """ Check module argument """ + + module = kwargs["module"] + + authen_scheme_name = module.params['authen_scheme_name'] + author_scheme_name = module.params['author_scheme_name'] + acct_scheme_name = module.params['acct_scheme_name'] + domain_name = module.params['domain_name'] + radius_server_group = module.params['radius_server_group'] + hwtacas_template = module.params['hwtacas_template'] + local_user_group = module.params['local_user_group'] + + if authen_scheme_name: + if len(authen_scheme_name) > 32: + module.fail_json( + msg='Error: authen_scheme_name %s ' + 'is large than 32.' % authen_scheme_name) + check_name(module=module, name=authen_scheme_name, + invalid_char=INVALID_SCHEME_CHAR) + + if author_scheme_name: + if len(author_scheme_name) > 32: + module.fail_json( + msg='Error: author_scheme_name %s ' + 'is large than 32.' % author_scheme_name) + check_name(module=module, name=author_scheme_name, + invalid_char=INVALID_SCHEME_CHAR) + + if acct_scheme_name: + if len(acct_scheme_name) > 32: + module.fail_json( + msg='Error: acct_scheme_name %s ' + 'is large than 32.' % acct_scheme_name) + check_name(module=module, name=acct_scheme_name, + invalid_char=INVALID_SCHEME_CHAR) + + if domain_name: + if len(domain_name) > 64: + module.fail_json( + msg='Error: domain_name %s ' + 'is large than 64.' % domain_name) + check_name(module=module, name=domain_name, + invalid_char=INVALID_DOMAIN_CHAR) + if domain_name == "-" or domain_name == "--": + module.fail_json(msg='domain_name %s ' + 'is invalid.' % domain_name) + + if radius_server_group and len(radius_server_group) > 32: + module.fail_json(msg='Error: radius_server_group %s ' + 'is large than 32.' % radius_server_group) + + if hwtacas_template and len(hwtacas_template) > 32: + module.fail_json( + msg='Error: hwtacas_template %s ' + 'is large than 32.' % hwtacas_template) + + if local_user_group: + if len(local_user_group) > 32: + module.fail_json( + msg='Error: local_user_group %s ' + 'is large than 32.' % local_user_group) + check_name(module=module, name=local_user_group, invalid_char=INVALID_GROUP_CHAR) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + authen_scheme_name=dict(type='str'), + first_authen_mode=dict(default='local', choices=['invalid', 'local', 'hwtacacs', 'radius', 'none']), + author_scheme_name=dict(type='str'), + first_author_mode=dict(default='local', choices=['invalid', 'local', 'hwtacacs', 'if-authenticated', 'none']), + acct_scheme_name=dict(type='str'), + accounting_mode=dict(default='none', choices=['invalid', 'hwtacacs', 'radius', 'none']), + domain_name=dict(type='str'), + radius_server_group=dict(type='str'), + hwtacas_template=dict(type='str'), + local_user_group=dict(type='str') + ) + + argument_spec.update(ce_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + check_module_argument(module=module) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + state = module.params['state'] + authen_scheme_name = module.params['authen_scheme_name'] + first_authen_mode = module.params['first_authen_mode'] + author_scheme_name = module.params['author_scheme_name'] + first_author_mode = module.params['first_author_mode'] + acct_scheme_name = module.params['acct_scheme_name'] + accounting_mode = module.params['accounting_mode'] + domain_name = module.params['domain_name'] + radius_server_group = module.params['radius_server_group'] + hwtacas_template = module.params['hwtacas_template'] + local_user_group = module.params['local_user_group'] + + ce_aaa_server = AaaServer() + + if not ce_aaa_server: + module.fail_json(msg='Error: init module failed.') + + # get proposed + proposed["state"] = state + if authen_scheme_name: + proposed["authen_scheme_name"] = authen_scheme_name + if first_authen_mode: + proposed["first_authen_mode"] = first_authen_mode + if author_scheme_name: + proposed["author_scheme_name"] = author_scheme_name + if first_author_mode: + proposed["first_author_mode"] = first_author_mode + if acct_scheme_name: + proposed["acct_scheme_name"] = acct_scheme_name + if accounting_mode: + proposed["accounting_mode"] = accounting_mode + if domain_name: + proposed["domain_name"] = domain_name + if radius_server_group: + proposed["radius_server_group"] = radius_server_group + if hwtacas_template: + proposed["hwtacas_template"] = hwtacas_template + if local_user_group: + proposed["local_user_group"] = local_user_group + + # authentication + if authen_scheme_name: + + scheme_exist = ce_aaa_server.get_authentication_scheme(module=module) + scheme_new = (authen_scheme_name.lower(), first_authen_mode.lower(), "invalid") + + existing["authentication scheme"] = scheme_exist + + if state == "present": + # present authentication scheme + if len(scheme_exist) == 0: + cmd = ce_aaa_server.create_authentication_scheme( + module=module, + authen_scheme_name=authen_scheme_name, + first_authen_mode=first_authen_mode) + + updates.append(cmd) + changed = True + + elif scheme_new not in scheme_exist: + cmd = ce_aaa_server.merge_authentication_scheme( + module=module, + authen_scheme_name=authen_scheme_name, + first_authen_mode=first_authen_mode) + updates.append(cmd) + changed = True + + # present authentication domain + if domain_name: + domain_exist = ce_aaa_server.get_authentication_domain( + module=module) + domain_new = (domain_name.lower(), authen_scheme_name.lower()) + + if len(domain_exist) == 0: + cmd = ce_aaa_server.create_authentication_domain( + module=module, + domain_name=domain_name, + authen_scheme_name=authen_scheme_name) + updates.append(cmd) + changed = True + + elif domain_new not in domain_exist: + cmd = ce_aaa_server.merge_authentication_domain( + module=module, + domain_name=domain_name, + authen_scheme_name=authen_scheme_name) + updates.append(cmd) + changed = True + + else: + # absent authentication scheme + if not domain_name: + if len(scheme_exist) == 0: + pass + elif scheme_new not in scheme_exist: + pass + else: + cmd = ce_aaa_server.delete_authentication_scheme( + module=module, + authen_scheme_name=authen_scheme_name, + first_authen_mode=first_authen_mode) + updates.append(cmd) + changed = True + + # absent authentication domain + else: + domain_exist = ce_aaa_server.get_authentication_domain( + module=module) + domain_new = (domain_name.lower(), authen_scheme_name.lower()) + + if len(domain_exist) == 0: + pass + elif domain_new not in domain_exist: + pass + else: + cmd = ce_aaa_server.delete_authentication_domain( + module=module, + domain_name=domain_name, + authen_scheme_name=authen_scheme_name) + updates.append(cmd) + changed = True + + scheme_end = ce_aaa_server.get_authentication_scheme(module=module) + end_state["authentication scheme"] = scheme_end + + # authorization + if author_scheme_name: + + scheme_exist = ce_aaa_server.get_authorization_scheme(module=module) + scheme_new = (author_scheme_name.lower(), first_author_mode.lower(), "invalid") + + existing["authorization scheme"] = scheme_exist + + if state == "present": + # present authorization scheme + if len(scheme_exist) == 0: + cmd = ce_aaa_server.create_authorization_scheme( + module=module, + author_scheme_name=author_scheme_name, + first_author_mode=first_author_mode) + updates.append(cmd) + changed = True + elif scheme_new not in scheme_exist: + cmd = ce_aaa_server.merge_authorization_scheme( + module=module, + author_scheme_name=author_scheme_name, + first_author_mode=first_author_mode) + updates.append(cmd) + changed = True + + # present authorization domain + if domain_name: + domain_exist = ce_aaa_server.get_authorization_domain( + module=module) + domain_new = (domain_name.lower(), author_scheme_name.lower()) + + if len(domain_exist) == 0: + cmd = ce_aaa_server.create_authorization_domain( + module=module, + domain_name=domain_name, + author_scheme_name=author_scheme_name) + updates.append(cmd) + changed = True + elif domain_new not in domain_exist: + cmd = ce_aaa_server.merge_authorization_domain( + module=module, + domain_name=domain_name, + author_scheme_name=author_scheme_name) + updates.append(cmd) + changed = True + + else: + # absent authorization scheme + if not domain_name: + if len(scheme_exist) == 0: + pass + elif scheme_new not in scheme_exist: + pass + else: + cmd = ce_aaa_server.delete_authorization_scheme( + module=module, + author_scheme_name=author_scheme_name, + first_author_mode=first_author_mode) + updates.append(cmd) + changed = True + + # absent authorization domain + else: + domain_exist = ce_aaa_server.get_authorization_domain( + module=module) + domain_new = (domain_name.lower(), author_scheme_name.lower()) + + if len(domain_exist) == 0: + pass + elif domain_new not in domain_exist: + pass + else: + cmd = ce_aaa_server.delete_authorization_domain( + module=module, + domain_name=domain_name, + author_scheme_name=author_scheme_name) + updates.append(cmd) + changed = True + + scheme_end = ce_aaa_server.get_authorization_scheme(module=module) + end_state["authorization scheme"] = scheme_end + + # accounting + if acct_scheme_name: + + scheme_exist = ce_aaa_server.get_accounting_scheme(module=module) + scheme_new = (acct_scheme_name.lower(), accounting_mode.lower()) + + existing["accounting scheme"] = scheme_exist + + if state == "present": + # present accounting scheme + if len(scheme_exist) == 0: + cmd = ce_aaa_server.create_accounting_scheme( + module=module, + acct_scheme_name=acct_scheme_name, + accounting_mode=accounting_mode) + updates.append(cmd) + changed = True + elif scheme_new not in scheme_exist: + cmd = ce_aaa_server.merge_accounting_scheme( + module=module, + acct_scheme_name=acct_scheme_name, + accounting_mode=accounting_mode) + updates.append(cmd) + changed = True + + # present accounting domain + if domain_name: + domain_exist = ce_aaa_server.get_accounting_domain( + module=module) + domain_new = (domain_name.lower(), acct_scheme_name.lower()) + + if len(domain_exist) == 0: + cmd = ce_aaa_server.create_accounting_domain( + module=module, + domain_name=domain_name, + acct_scheme_name=acct_scheme_name) + updates.append(cmd) + changed = True + elif domain_new not in domain_exist: + cmd = ce_aaa_server.merge_accounting_domain( + module=module, + domain_name=domain_name, + acct_scheme_name=acct_scheme_name) + updates.append(cmd) + changed = True + + else: + # absent accounting scheme + if not domain_name: + if len(scheme_exist) == 0: + pass + elif scheme_new not in scheme_exist: + pass + else: + cmd = ce_aaa_server.delete_accounting_scheme( + module=module, + acct_scheme_name=acct_scheme_name, + accounting_mode=accounting_mode) + updates.append(cmd) + changed = True + + # absent accounting domain + else: + domain_exist = ce_aaa_server.get_accounting_domain( + module=module) + domain_new = (domain_name.lower(), acct_scheme_name.lower()) + if len(domain_exist) == 0: + pass + elif domain_new not in domain_exist: + pass + else: + cmd = ce_aaa_server.delete_accounting_domain( + module=module, + domain_name=domain_name, + acct_scheme_name=acct_scheme_name) + updates.append(cmd) + changed = True + + scheme_end = ce_aaa_server.get_accounting_scheme(module=module) + end_state["accounting scheme"] = scheme_end + + # radius group name + if (authen_scheme_name and first_authen_mode.lower() == "radius") \ + or (acct_scheme_name and accounting_mode.lower() == "radius"): + + if not radius_server_group: + module.fail_json(msg='please input radius_server_group when use radius.') + + rds_template_exist = ce_aaa_server.get_radius_template(module=module) + rds_template_new = (radius_server_group) + + rds_enable_exist = ce_aaa_server.get_radius_client(module=module) + + existing["radius template"] = rds_template_exist + existing["radius enable"] = rds_enable_exist + + if state == "present": + # present radius group name + if len(rds_template_exist) == 0: + cmd = ce_aaa_server.create_radius_template( + module=module, radius_server_group=radius_server_group) + updates.append(cmd) + changed = True + elif rds_template_new not in rds_template_exist: + cmd = ce_aaa_server.merge_radius_template( + module=module, radius_server_group=radius_server_group) + updates.append(cmd) + changed = True + + rds_enable_new = ("true") + if rds_enable_new not in rds_enable_exist: + cmd = ce_aaa_server.merge_radius_client( + module=module, isEnable="true") + updates.append(cmd) + changed = True + + else: + # absent radius group name + if len(rds_template_exist) == 0: + pass + elif rds_template_new not in rds_template_exist: + pass + else: + cmd = ce_aaa_server.delete_radius_template( + module=module, radius_server_group=radius_server_group) + updates.append(cmd) + changed = True + + rds_enable_new = ("false") + if rds_enable_new not in rds_enable_exist: + cmd = ce_aaa_server.merge_radius_client( + module=module, isEnable="false") + updates.append(cmd) + changed = True + else: + pass + + rds_template_end = ce_aaa_server.get_radius_template(module=module) + end_state["radius template"] = rds_template_end + + rds_enable_end = ce_aaa_server.get_radius_client(module=module) + end_state["radius enable"] = rds_enable_end + + tmp_scheme = author_scheme_name + + # hwtacas template + if (authen_scheme_name and first_authen_mode.lower() == "hwtacacs") \ + or (tmp_scheme and first_author_mode.lower() == "hwtacacs") \ + or (acct_scheme_name and accounting_mode.lower() == "hwtacacs"): + + if not hwtacas_template: + module.fail_json( + msg='please input hwtacas_template when use hwtacas.') + + hwtacacs_exist = ce_aaa_server.get_hwtacacs_template(module=module) + hwtacacs_new = (hwtacas_template) + + hwtacacs_enbale_exist = ce_aaa_server.get_hwtacacs_global_cfg( + module=module) + + existing["hwtacacs template"] = hwtacacs_exist + existing["hwtacacs enable"] = hwtacacs_enbale_exist + + if state == "present": + # present hwtacas template + if len(hwtacacs_exist) == 0: + cmd = ce_aaa_server.create_hwtacacs_template( + module=module, hwtacas_template=hwtacas_template) + updates.append(cmd) + changed = True + elif hwtacacs_new not in hwtacacs_exist: + cmd = ce_aaa_server.merge_hwtacacs_template( + module=module, hwtacas_template=hwtacas_template) + updates.append(cmd) + changed = True + + hwtacacs_enbale_new = ("true") + if hwtacacs_enbale_new not in hwtacacs_enbale_exist: + cmd = ce_aaa_server.merge_hwtacacs_global_cfg( + module=module, isEnable="true") + updates.append(cmd) + changed = True + + else: + # absent hwtacas template + if len(hwtacacs_exist) == 0: + pass + elif hwtacacs_new not in hwtacacs_exist: + pass + else: + cmd = ce_aaa_server.delete_hwtacacs_template( + module=module, hwtacas_template=hwtacas_template) + updates.append(cmd) + changed = True + + hwtacacs_enbale_new = ("false") + if hwtacacs_enbale_new not in hwtacacs_enbale_exist: + cmd = ce_aaa_server.merge_hwtacacs_global_cfg( + module=module, isEnable="false") + updates.append(cmd) + changed = True + else: + pass + + hwtacacs_end = ce_aaa_server.get_hwtacacs_template(module=module) + end_state["hwtacacs template"] = hwtacacs_end + + hwtacacs_enable_end = ce_aaa_server.get_hwtacacs_global_cfg( + module=module) + end_state["hwtacacs enable"] = hwtacacs_enable_end + + # local user group + if local_user_group: + + user_group_exist = ce_aaa_server.get_local_user_group(module=module) + user_group_new = (local_user_group) + + existing["local user group"] = user_group_exist + + if state == "present": + # present local user group + if len(user_group_exist) == 0: + cmd = ce_aaa_server.merge_local_user_group( + module=module, local_user_group=local_user_group) + updates.append(cmd) + changed = True + elif user_group_new not in user_group_exist: + cmd = ce_aaa_server.merge_local_user_group( + module=module, local_user_group=local_user_group) + updates.append(cmd) + changed = True + + else: + # absent local user group + if len(user_group_exist) == 0: + pass + elif user_group_new not in user_group_exist: + pass + else: + cmd = ce_aaa_server.delete_local_user_group( + module=module, local_user_group=local_user_group) + updates.append(cmd) + changed = True + + user_group_end = ce_aaa_server.get_local_user_group(module=module) + end_state["local user group"] = user_group_end + + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_aaa_server_host.py b/plugins/modules/network/cloudengine/ce_aaa_server_host.py new file mode 100644 index 0000000000..89777c75fd --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_aaa_server_host.py @@ -0,0 +1,2640 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_aaa_server_host +short_description: Manages AAA server host configuration on HUAWEI CloudEngine switches. +description: + - Manages AAA server host configuration on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present', 'absent'] + local_user_name: + description: + - Name of a local user. + The value is a string of 1 to 253 characters. + local_password: + description: + - Login password of a user. The password can contain letters, numbers, and special characters. + The value is a string of 1 to 255 characters. + local_service_type: + description: + - The type of local user login through, such as ftp ssh snmp telnet. + local_ftp_dir: + description: + - FTP user directory. + The value is a string of 1 to 255 characters. + local_user_level: + description: + - Login level of a local user. + The value is an integer ranging from 0 to 15. + local_user_group: + description: + - Name of the user group where the user belongs. The user inherits all the rights of the user group. + The value is a string of 1 to 32 characters. + radius_group_name: + description: + - RADIUS server group's name. + The value is a string of 1 to 32 case-insensitive characters. + radius_server_type: + description: + - Type of Radius Server. + choices: ['Authentication', 'Accounting'] + radius_server_ip: + description: + - IPv4 address of configured server. + The value is a string of 0 to 255 characters, in dotted decimal notation. + radius_server_ipv6: + description: + - IPv6 address of configured server. + The total length is 128 bits. + radius_server_port: + description: + - Configured server port for a particular server. + The value is an integer ranging from 1 to 65535. + radius_server_mode: + description: + - Configured primary or secondary server for a particular server. + choices: ['Secondary-server', 'Primary-server'] + radius_vpn_name: + description: + - Set VPN instance. + The value is a string of 1 to 31 case-sensitive characters. + radius_server_name: + description: + - Hostname of configured server. + The value is a string of 0 to 255 case-sensitive characters. + hwtacacs_template: + description: + - Name of a HWTACACS template. + The value is a string of 1 to 32 case-insensitive characters. + hwtacacs_server_ip: + description: + - Server IPv4 address. Must be a valid unicast IP address. + The value is a string of 0 to 255 characters, in dotted decimal notation. + hwtacacs_server_ipv6: + description: + - Server IPv6 address. Must be a valid unicast IP address. + The total length is 128 bits. + hwtacacs_server_type: + description: + - Hwtacacs server type. + choices: ['Authentication', 'Authorization', 'Accounting', 'Common'] + hwtacacs_is_secondary_server: + description: + - Whether the server is secondary. + type: bool + default: 'no' + hwtacacs_vpn_name: + description: + - VPN instance name. + hwtacacs_is_public_net: + description: + - Set the public-net. + type: bool + default: 'no' + hwtacacs_server_host_name: + description: + - Hwtacacs server host name. +''' + +EXAMPLES = ''' + +- name: AAA server host test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config local user when use local scheme" + ce_aaa_server_host: + state: present + local_user_name: user1 + local_password: 123456 + provider: "{{ cli }}" + + - name: "Undo local user when use local scheme" + ce_aaa_server_host: + state: absent + local_user_name: user1 + local_password: 123456 + provider: "{{ cli }}" + + - name: "Config radius server ip" + ce_aaa_server_host: + state: present + radius_group_name: group1 + radius_server_type: Authentication + radius_server_ip: 10.1.10.1 + radius_server_port: 2000 + radius_server_mode: Primary-server + radius_vpn_name: _public_ + provider: "{{ cli }}" + + - name: "Undo radius server ip" + ce_aaa_server_host: + state: absent + radius_group_name: group1 + radius_server_type: Authentication + radius_server_ip: 10.1.10.1 + radius_server_port: 2000 + radius_server_mode: Primary-server + radius_vpn_name: _public_ + provider: "{{ cli }}" + + - name: "Config hwtacacs server ip" + ce_aaa_server_host: + state: present + hwtacacs_template: template + hwtacacs_server_ip: 10.10.10.10 + hwtacacs_server_type: Authorization + hwtacacs_vpn_name: _public_ + provider: "{{ cli }}" + + - name: "Undo hwtacacs server ip" + ce_aaa_server_host: + state: absent + hwtacacs_template: template + hwtacacs_server_ip: 10.10.10.10 + hwtacacs_server_type: Authorization + hwtacacs_vpn_name: _public_ + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"hwtacacs_is_public_net": "false", + "hwtacacs_is_secondary_server": "false", + "hwtacacs_server_ip": "10.135.182.157", + "hwtacacs_server_type": "Authorization", + "hwtacacs_template": "wdz", + "hwtacacs_vpn_name": "_public_", + "local_password": "******", + "state": "present"} +existing: + description: k/v pairs of existing aaa server host + returned: always + type: dict + sample: {"radius server ipv4": []} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"radius server ipv4": [ + [ + "10.1.10.1", + "Authentication", + "2000", + "Primary-server", + "_public_" + ] + ]} +updates: + description: command sent to the device + returned: always + type: list + sample: ["hwtacacs server template test", + "hwtacacs server authorization 10.135.182.157 vpn-instance test_vpn public-net"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + +SUCCESS = """success""" +FAILED = """failed""" + +INVALID_USER_NAME_CHAR = [' ', '/', '\\', + ':', '*', '?', '"', '\'', '<', '>', '%'] + +# get local user name +CE_GET_LOCAL_USER_INFO_HEADER = """ + + + + + + + +""" +CE_GET_LOCAL_USER_INFO_TAIL = """ + + + + + +""" + +# merge local user name +CE_MERGE_LOCAL_USER_INFO_HEADER = """ + + + + + + %s +""" +CE_MERGE_LOCAL_USER_INFO_TAIL = """ + + + + + +""" + +# delete local user name +CE_DELETE_LOCAL_USER_INFO_HEADER = """ + + + + + + %s +""" +CE_DELETE_LOCAL_USER_INFO_TAIL = """ + + + + + +""" + +# get radius server config ipv4 +CE_GET_RADIUS_SERVER_CFG_IPV4 = """ + + + + + %s + + + + + + + + + + + + + +""" + +# merge radius server config ipv4 +CE_MERGE_RADIUS_SERVER_CFG_IPV4 = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + +# delete radius server config ipv4 +CE_DELETE_RADIUS_SERVER_CFG_IPV4 = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + +# get radius server config ipv6 +CE_GET_RADIUS_SERVER_CFG_IPV6 = """ + + + + + %s + + + + + + + + + + + + +""" + +# merge radius server config ipv6 +CE_MERGE_RADIUS_SERVER_CFG_IPV6 = """ + + + + + %s + + + %s + %s + %s + %s + + + + + + +""" + +# delete radius server config ipv6 +CE_DELETE_RADIUS_SERVER_CFG_IPV6 = """ + + + + + %s + + + %s + %s + %s + %s + + + + + + +""" + +# get radius server name +CE_GET_RADIUS_SERVER_NAME = """ + + + + + %s + + + + + + + + + + + + + +""" + +# merge radius server name +CE_MERGE_RADIUS_SERVER_NAME = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + +# delete radius server name +CE_DELETE_RADIUS_SERVER_NAME = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + +# get hwtacacs server config ipv4 +CE_GET_HWTACACS_SERVER_CFG_IPV4 = """ + + + + + %s + + + + + + + + + + + + + +""" + +# merge hwtacacs server config ipv4 +CE_MERGE_HWTACACS_SERVER_CFG_IPV4 = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + +# delete hwtacacs server config ipv4 +CE_DELETE_HWTACACS_SERVER_CFG_IPV4 = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + +# get hwtacacs server config ipv6 +CE_GET_HWTACACS_SERVER_CFG_IPV6 = """ + + + + + %s + + + + + + + + + + + + +""" + +# merge hwtacacs server config ipv6 +CE_MERGE_HWTACACS_SERVER_CFG_IPV6 = """ + + + + + %s + + + %s + %s + %s + %s + + + + + + +""" + +# delete hwtacacs server config ipv6 +CE_DELETE_HWTACACS_SERVER_CFG_IPV6 = """ + + + + + %s + + + %s + %s + %s + %s + + + + + + +""" + +# get hwtacacs host server config +CE_GET_HWTACACS_HOST_SERVER_CFG = """ + + + + + %s + + + + + + + + + + + + + +""" + +# merge hwtacacs host server config +CE_MERGE_HWTACACS_HOST_SERVER_CFG = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + +# delete hwtacacs host server config +CE_DELETE_HWTACACS_HOST_SERVER_CFG = """ + + + + + %s + + + %s + %s + %s + %s + %s + + + + + + +""" + + +class AaaServerHost(object): + """ Manages aaa server host configuration """ + + def netconf_get_config(self, **kwargs): + """ Get configure by netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ Set configure by netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + recv_xml = set_nc_config(module, conf_str) + + return recv_xml + + def get_local_user_info(self, **kwargs): + """ Get local user information """ + + module = kwargs["module"] + local_user_name = module.params['local_user_name'] + local_service_type = module.params['local_service_type'] + local_ftp_dir = module.params['local_ftp_dir'] + local_user_level = module.params['local_user_level'] + local_user_group = module.params['local_user_group'] + state = module.params['state'] + + result = dict() + result["local_user_info"] = [] + need_cfg = False + + conf_str = CE_GET_LOCAL_USER_INFO_HEADER + + if local_service_type: + if local_service_type == "none": + conf_str += "" + conf_str += "" + conf_str += "" + conf_str += "" + conf_str += "" + conf_str += "" + elif local_service_type == "dot1x": + conf_str += "" + else: + option = local_service_type.split(" ") + for tmp in option: + if tmp == "dot1x": + module.fail_json( + msg='Error: Do not input dot1x with other service type.') + elif tmp == "none": + module.fail_json( + msg='Error: Do not input none with other service type.') + elif tmp == "ftp": + conf_str += "" + elif tmp == "snmp": + conf_str += "" + elif tmp == "ssh": + conf_str += "" + elif tmp == "telnet": + conf_str += "" + elif tmp == "terminal": + conf_str += "" + else: + module.fail_json( + msg='Error: Do not support the type [%s].' % tmp) + + if local_ftp_dir: + conf_str += "" + + if local_user_level: + conf_str += "" + + if local_user_group: + conf_str += "" + + conf_str += CE_GET_LOCAL_USER_INFO_TAIL + + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + local_user_info = root.findall("aaa/lam/users/user") + if local_user_info: + for tmp in local_user_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["userName", "password", "userLevel", "ftpDir", "userGroupName", + "serviceTerminal", "serviceTelnet", "serviceFtp", "serviceSsh", + "serviceSnmp", "serviceDot1x"]: + tmp_dict[site.tag] = site.text + + result["local_user_info"].append(tmp_dict) + + if state == "present": + need_cfg = True + else: + if result["local_user_info"]: + for tmp in result["local_user_info"]: + if "userName" in tmp.keys(): + if tmp["userName"] == local_user_name: + + if not local_service_type and not local_user_level \ + and not local_ftp_dir and not local_user_group: + + need_cfg = True + + if local_service_type: + if local_service_type == "none": + if tmp.get("serviceTerminal") == "true" or \ + tmp.get("serviceTelnet") == "true" or \ + tmp.get("serviceFtp") == "true" or \ + tmp.get("serviceSsh") == "true" or \ + tmp.get("serviceSnmp") == "true" or \ + tmp.get("serviceDot1x") == "true": + need_cfg = True + elif local_service_type == "dot1x": + if tmp.get("serviceDot1x") == "true": + need_cfg = True + elif tmp == "ftp": + if tmp.get("serviceFtp") == "true": + need_cfg = True + elif tmp == "snmp": + if tmp.get("serviceSnmp") == "true": + need_cfg = True + elif tmp == "ssh": + if tmp.get("serviceSsh") == "true": + need_cfg = True + elif tmp == "telnet": + if tmp.get("serviceTelnet") == "true": + need_cfg = True + elif tmp == "terminal": + if tmp.get("serviceTerminal") == "true": + need_cfg = True + + if local_user_level: + if tmp.get("userLevel") == local_user_level: + need_cfg = True + + if local_ftp_dir: + if tmp.get("ftpDir") == local_ftp_dir: + need_cfg = True + + if local_user_group: + if tmp.get("userGroupName") == local_user_group: + need_cfg = True + + break + + result["need_cfg"] = need_cfg + return result + + def merge_local_user_info(self, **kwargs): + """ Merge local user information by netconf """ + + module = kwargs["module"] + local_user_name = module.params['local_user_name'] + local_password = module.params['local_password'] + local_service_type = module.params['local_service_type'] + local_ftp_dir = module.params['local_ftp_dir'] + local_user_level = module.params['local_user_level'] + local_user_group = module.params['local_user_group'] + state = module.params['state'] + + cmds = [] + + conf_str = CE_MERGE_LOCAL_USER_INFO_HEADER % local_user_name + + if local_password: + conf_str += "%s" % local_password + + if state == "present": + cmd = "local-user %s password cipher %s" % ( + local_user_name, local_password) + cmds.append(cmd) + + if local_service_type: + if local_service_type == "none": + conf_str += "false" + conf_str += "false" + conf_str += "false" + conf_str += "false" + conf_str += "false" + conf_str += "false" + + cmd = "local-user %s service-type none" % local_user_name + cmds.append(cmd) + + elif local_service_type == "dot1x": + if state == "present": + conf_str += "true" + cmd = "local-user %s service-type dot1x" % local_user_name + else: + conf_str += "false" + cmd = "undo local-user %s service-type" % local_user_name + + cmds.append(cmd) + + else: + option = local_service_type.split(" ") + for tmp in option: + if tmp == "dot1x": + module.fail_json( + msg='Error: Do not input dot1x with other service type.') + if tmp == "none": + module.fail_json( + msg='Error: Do not input none with other service type.') + + if state == "present": + if tmp == "ftp": + conf_str += "true" + cmd = "local-user %s service-type ftp" % local_user_name + elif tmp == "snmp": + conf_str += "true" + cmd = "local-user %s service-type snmp" % local_user_name + elif tmp == "ssh": + conf_str += "true" + cmd = "local-user %s service-type ssh" % local_user_name + elif tmp == "telnet": + conf_str += "true" + cmd = "local-user %s service-type telnet" % local_user_name + elif tmp == "terminal": + conf_str += "true" + cmd = "local-user %s service-type terminal" % local_user_name + + cmds.append(cmd) + + else: + if tmp == "ftp": + conf_str += "false" + elif tmp == "snmp": + conf_str += "false" + elif tmp == "ssh": + conf_str += "false" + elif tmp == "telnet": + conf_str += "false" + elif tmp == "terminal": + conf_str += "false" + + if state == "absent": + cmd = "undo local-user %s service-type" % local_user_name + cmds.append(cmd) + + if local_ftp_dir: + if state == "present": + conf_str += "%s" % local_ftp_dir + cmd = "local-user %s ftp-directory %s" % ( + local_user_name, local_ftp_dir) + cmds.append(cmd) + else: + conf_str += "" + cmd = "undo local-user %s ftp-directory" % local_user_name + cmds.append(cmd) + + if local_user_level: + if state == "present": + conf_str += "%s" % local_user_level + cmd = "local-user %s level %s" % ( + local_user_name, local_user_level) + cmds.append(cmd) + else: + conf_str += "" + cmd = "undo local-user %s level" % local_user_name + cmds.append(cmd) + + if local_user_group: + if state == "present": + conf_str += "%s" % local_user_group + cmd = "local-user %s user-group %s" % ( + local_user_name, local_user_group) + cmds.append(cmd) + else: + conf_str += "" + cmd = "undo local-user %s user-group" % local_user_name + cmds.append(cmd) + + conf_str += CE_MERGE_LOCAL_USER_INFO_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge local user info failed.') + + return cmds + + def delete_local_user_info(self, **kwargs): + """ Delete local user information by netconf """ + + module = kwargs["module"] + local_user_name = module.params['local_user_name'] + conf_str = CE_DELETE_LOCAL_USER_INFO_HEADER % local_user_name + conf_str += CE_DELETE_LOCAL_USER_INFO_TAIL + + cmds = [] + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete local user info failed.') + + cmd = "undo local-user %s" % local_user_name + cmds.append(cmd) + + return cmds + + def get_radius_server_cfg_ipv4(self, **kwargs): + """ Get radius server configure ipv4 """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_ip = module.params['radius_server_ip'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + radius_vpn_name = module.params['radius_vpn_name'] + state = module.params['state'] + + result = dict() + result["radius_server_ip_v4"] = [] + need_cfg = False + + conf_str = CE_GET_RADIUS_SERVER_CFG_IPV4 % radius_group_name + + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + radius_server_ip_v4 = root.findall( + "radius/rdsTemplates/rdsTemplate/rdsServerIPV4s/rdsServerIPV4") + if radius_server_ip_v4: + for tmp in radius_server_ip_v4: + tmp_dict = dict() + for site in tmp: + if site.tag in ["serverType", "serverIPAddress", "serverPort", "serverMode", "vpnName"]: + tmp_dict[site.tag] = site.text + + result["radius_server_ip_v4"].append(tmp_dict) + + if result["radius_server_ip_v4"]: + cfg = dict() + config_list = list() + if radius_server_type: + cfg["serverType"] = radius_server_type.lower() + if radius_server_ip: + cfg["serverIPAddress"] = radius_server_ip.lower() + if radius_server_port: + cfg["serverPort"] = radius_server_port.lower() + if radius_server_mode: + cfg["serverMode"] = radius_server_mode.lower() + if radius_vpn_name: + cfg["vpnName"] = radius_vpn_name.lower() + + for tmp in result["radius_server_ip_v4"]: + exist_cfg = dict() + if radius_server_type: + exist_cfg["serverType"] = tmp.get("serverType").lower() + if radius_server_ip: + exist_cfg["serverIPAddress"] = tmp.get("serverIPAddress").lower() + if radius_server_port: + exist_cfg["serverPort"] = tmp.get("serverPort").lower() + if radius_server_mode: + exist_cfg["serverMode"] = tmp.get("serverMode").lower() + if radius_vpn_name: + exist_cfg["vpnName"] = tmp.get("vpnName").lower() + config_list.append(exist_cfg) + if cfg in config_list: + if state == "present": + need_cfg = False + else: + need_cfg = True + else: + if state == "present": + need_cfg = True + else: + need_cfg = False + result["need_cfg"] = need_cfg + return result + + def merge_radius_server_cfg_ipv4(self, **kwargs): + """ Merge radius server configure ipv4 """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_ip = module.params['radius_server_ip'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + radius_vpn_name = module.params['radius_vpn_name'] + + conf_str = CE_MERGE_RADIUS_SERVER_CFG_IPV4 % ( + radius_group_name, radius_server_type, + radius_server_ip, radius_server_port, + radius_server_mode, radius_vpn_name) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Merge radius server config ipv4 failed.') + + cmds = [] + + cmd = "radius server group %s" % radius_group_name + cmds.append(cmd) + + if radius_server_type == "Authentication": + cmd = "radius server authentication %s %s" % ( + radius_server_ip, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + else: + cmd = "radius server accounting %s %s" % ( + radius_server_ip, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def delete_radius_server_cfg_ipv4(self, **kwargs): + """ Delete radius server configure ipv4 """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_ip = module.params['radius_server_ip'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + radius_vpn_name = module.params['radius_vpn_name'] + + conf_str = CE_DELETE_RADIUS_SERVER_CFG_IPV4 % ( + radius_group_name, radius_server_type, + radius_server_ip, radius_server_port, + radius_server_mode, radius_vpn_name) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Create radius server config ipv4 failed.') + + cmds = [] + + cmd = "radius server group %s" % radius_group_name + cmds.append(cmd) + + if radius_server_type == "Authentication": + cmd = "undo radius server authentication %s %s" % ( + radius_server_ip, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + else: + cmd = "undo radius server accounting %s %s" % ( + radius_server_ip, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def get_radius_server_cfg_ipv6(self, **kwargs): + """ Get radius server configure ipv6 """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_ipv6 = module.params['radius_server_ipv6'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + state = module.params['state'] + + result = dict() + result["radius_server_ip_v6"] = [] + need_cfg = False + + conf_str = CE_GET_RADIUS_SERVER_CFG_IPV6 % radius_group_name + + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + radius_server_ip_v6 = root.findall( + "radius/rdsTemplates/rdsTemplate/rdsServerIPV6s/rdsServerIPV6") + if radius_server_ip_v6: + for tmp in radius_server_ip_v6: + tmp_dict = dict() + for site in tmp: + if site.tag in ["serverType", "serverIPAddress", "serverPort", "serverMode"]: + tmp_dict[site.tag] = site.text + + result["radius_server_ip_v6"].append(tmp_dict) + + if result["radius_server_ip_v6"]: + cfg = dict() + config_list = list() + if radius_server_type: + cfg["serverType"] = radius_server_type.lower() + if radius_server_ipv6: + cfg["serverIPAddress"] = radius_server_ipv6.lower() + if radius_server_port: + cfg["serverPort"] = radius_server_port.lower() + if radius_server_mode: + cfg["serverMode"] = radius_server_mode.lower() + + for tmp in result["radius_server_ip_v6"]: + exist_cfg = dict() + if radius_server_type: + exist_cfg["serverType"] = tmp.get("serverType").lower() + if radius_server_ipv6: + exist_cfg["serverIPAddress"] = tmp.get("serverIPAddress").lower() + if radius_server_port: + exist_cfg["serverPort"] = tmp.get("serverPort").lower() + if radius_server_mode: + exist_cfg["serverMode"] = tmp.get("serverMode").lower() + config_list.append(exist_cfg) + if cfg in config_list: + if state == "present": + need_cfg = False + else: + need_cfg = True + else: + if state == "present": + need_cfg = True + else: + need_cfg = False + + result["need_cfg"] = need_cfg + return result + + def merge_radius_server_cfg_ipv6(self, **kwargs): + """ Merge radius server configure ipv6 """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_ipv6 = module.params['radius_server_ipv6'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + + conf_str = CE_MERGE_RADIUS_SERVER_CFG_IPV6 % ( + radius_group_name, radius_server_type, + radius_server_ipv6, radius_server_port, + radius_server_mode) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Merge radius server config ipv6 failed.') + + cmds = [] + + cmd = "radius server group %s" % radius_group_name + cmds.append(cmd) + + if radius_server_type == "Authentication": + cmd = "radius server authentication %s %s" % ( + radius_server_ipv6, radius_server_port) + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + else: + cmd = "radius server accounting %s %s" % ( + radius_server_ipv6, radius_server_port) + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def delete_radius_server_cfg_ipv6(self, **kwargs): + """ Delete radius server configure ipv6 """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_ipv6 = module.params['radius_server_ipv6'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + + conf_str = CE_DELETE_RADIUS_SERVER_CFG_IPV6 % ( + radius_group_name, radius_server_type, + radius_server_ipv6, radius_server_port, + radius_server_mode) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Create radius server config ipv6 failed.') + + cmds = [] + + cmd = "radius server group %s" % radius_group_name + cmds.append(cmd) + + if radius_server_type == "Authentication": + cmd = "undo radius server authentication %s %s" % ( + radius_server_ipv6, radius_server_port) + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + else: + cmd = "undo radius server accounting %s %s" % ( + radius_server_ipv6, radius_server_port) + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def get_radius_server_name(self, **kwargs): + """ Get radius server name """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_name = module.params['radius_server_name'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + radius_vpn_name = module.params['radius_vpn_name'] + state = module.params['state'] + + result = dict() + result["radius_server_name_cfg"] = [] + need_cfg = False + + conf_str = CE_GET_RADIUS_SERVER_NAME % radius_group_name + + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + radius_server_name_cfg = root.findall( + "radius/rdsTemplates/rdsTemplate/rdsServerNames/rdsServerName") + if radius_server_name_cfg: + for tmp in radius_server_name_cfg: + tmp_dict = dict() + for site in tmp: + if site.tag in ["serverType", "serverName", "serverPort", "serverMode", "vpnName"]: + tmp_dict[site.tag] = site.text + + result["radius_server_name_cfg"].append(tmp_dict) + + if result["radius_server_name_cfg"]: + cfg = dict() + config_list = list() + if radius_server_type: + cfg["serverType"] = radius_server_type.lower() + if radius_server_name: + cfg["serverName"] = radius_server_name.lower() + if radius_server_port: + cfg["serverPort"] = radius_server_port.lower() + if radius_server_mode: + cfg["serverMode"] = radius_server_mode.lower() + if radius_vpn_name: + cfg["vpnName"] = radius_vpn_name.lower() + + for tmp in result["radius_server_name_cfg"]: + exist_cfg = dict() + if radius_server_type: + exist_cfg["serverType"] = tmp.get("serverType").lower() + if radius_server_name: + exist_cfg["serverName"] = tmp.get("serverName").lower() + if radius_server_port: + exist_cfg["serverPort"] = tmp.get("serverPort").lower() + if radius_server_mode: + exist_cfg["serverMode"] = tmp.get("serverMode").lower() + if radius_vpn_name: + exist_cfg["vpnName"] = tmp.get("vpnName").lower() + config_list.append(exist_cfg) + if cfg in config_list: + if state == "present": + need_cfg = False + else: + need_cfg = True + else: + if state == "present": + need_cfg = True + else: + need_cfg = False + result["need_cfg"] = need_cfg + return result + + def merge_radius_server_name(self, **kwargs): + """ Merge radius server name """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_name = module.params['radius_server_name'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + radius_vpn_name = module.params['radius_vpn_name'] + + conf_str = CE_MERGE_RADIUS_SERVER_NAME % ( + radius_group_name, radius_server_type, + radius_server_name, radius_server_port, + radius_server_mode, radius_vpn_name) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge radius server name failed.') + + cmds = [] + + cmd = "radius server group %s" % radius_group_name + cmds.append(cmd) + + if radius_server_type == "Authentication": + cmd = "radius server authentication hostname %s %s" % ( + radius_server_name, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + else: + cmd = "radius server accounting hostname %s %s" % ( + radius_server_name, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def delete_radius_server_name(self, **kwargs): + """ Delete radius server name """ + + module = kwargs["module"] + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_name = module.params['radius_server_name'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + radius_vpn_name = module.params['radius_vpn_name'] + + conf_str = CE_DELETE_RADIUS_SERVER_NAME % ( + radius_group_name, radius_server_type, + radius_server_name, radius_server_port, + radius_server_mode, radius_vpn_name) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: delete radius server name failed.') + + cmds = [] + + cmd = "radius server group %s" % radius_group_name + cmds.append(cmd) + + if radius_server_type == "Authentication": + cmd = "undo radius server authentication hostname %s %s" % ( + radius_server_name, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + else: + cmd = "undo radius server accounting hostname %s %s" % ( + radius_server_name, radius_server_port) + + if radius_vpn_name and radius_vpn_name != "_public_": + cmd += " vpn-instance %s" % radius_vpn_name + + if radius_server_mode == "Secondary-server": + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def get_hwtacacs_server_cfg_ipv4(self, **kwargs): + """ Get hwtacacs server configure ipv4 """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_ip = module.params["hwtacacs_server_ip"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"] + state = module.params["state"] + + result = dict() + result["hwtacacs_server_cfg_ipv4"] = [] + need_cfg = False + + conf_str = CE_GET_HWTACACS_SERVER_CFG_IPV4 % hwtacacs_template + + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + hwtacacs_server_cfg_ipv4 = root.findall( + "hwtacacs/hwTacTempCfgs/hwTacTempCfg/hwTacSrvCfgs/hwTacSrvCfg") + if hwtacacs_server_cfg_ipv4: + for tmp in hwtacacs_server_cfg_ipv4: + tmp_dict = dict() + for site in tmp: + if site.tag in ["serverIpAddress", "serverType", "isSecondaryServer", "isPublicNet", "vpnName"]: + tmp_dict[site.tag] = site.text + + result["hwtacacs_server_cfg_ipv4"].append(tmp_dict) + + if result["hwtacacs_server_cfg_ipv4"]: + cfg = dict() + config_list = list() + + if hwtacacs_server_ip: + cfg["serverIpAddress"] = hwtacacs_server_ip.lower() + if hwtacacs_server_type: + cfg["serverType"] = hwtacacs_server_type.lower() + if hwtacacs_is_secondary_server: + cfg["isSecondaryServer"] = str(hwtacacs_is_secondary_server).lower() + if hwtacacs_is_public_net: + cfg["isPublicNet"] = str(hwtacacs_is_public_net).lower() + if hwtacacs_vpn_name: + cfg["vpnName"] = hwtacacs_vpn_name.lower() + + for tmp in result["hwtacacs_server_cfg_ipv4"]: + exist_cfg = dict() + if hwtacacs_server_ip: + exist_cfg["serverIpAddress"] = tmp.get("serverIpAddress").lower() + if hwtacacs_server_type: + exist_cfg["serverType"] = tmp.get("serverType").lower() + if hwtacacs_is_secondary_server: + exist_cfg["isSecondaryServer"] = tmp.get("isSecondaryServer").lower() + if hwtacacs_is_public_net: + exist_cfg["isPublicNet"] = tmp.get("isPublicNet").lower() + if hwtacacs_vpn_name: + exist_cfg["vpnName"] = tmp.get("vpnName").lower() + config_list.append(exist_cfg) + if cfg in config_list: + if state == "present": + need_cfg = False + else: + need_cfg = True + else: + if state == "present": + need_cfg = True + else: + need_cfg = False + result["need_cfg"] = need_cfg + return result + + def merge_hwtacacs_server_cfg_ipv4(self, **kwargs): + """ Merge hwtacacs server configure ipv4 """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_ip = module.params["hwtacacs_server_ip"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"] + + conf_str = CE_MERGE_HWTACACS_SERVER_CFG_IPV4 % ( + hwtacacs_template, hwtacacs_server_ip, + hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(), + hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower()) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Merge hwtacacs server config ipv4 failed.') + + cmds = [] + + cmd = "hwtacacs server template %s" % hwtacacs_template + cmds.append(cmd) + + if hwtacacs_server_type == "Authentication": + cmd = "hwtacacs server authentication %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Authorization": + cmd = "hwtacacs server authorization %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Accounting": + cmd = "hwtacacs server accounting %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Common": + cmd = "hwtacacs server %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def delete_hwtacacs_server_cfg_ipv4(self, **kwargs): + """ Delete hwtacacs server configure ipv4 """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_ip = module.params["hwtacacs_server_ip"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"] + + conf_str = CE_DELETE_HWTACACS_SERVER_CFG_IPV4 % ( + hwtacacs_template, hwtacacs_server_ip, + hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(), + hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower()) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Delete hwtacacs server config ipv4 failed.') + + cmds = [] + + cmd = "hwtacacs server template %s" % hwtacacs_template + cmds.append(cmd) + + if hwtacacs_server_type == "Authentication": + cmd = "undo hwtacacs server authentication %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Authorization": + cmd = "undo hwtacacs server authorization %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Accounting": + cmd = "undo hwtacacs server accounting %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Common": + cmd = "undo hwtacacs server %s" % hwtacacs_server_ip + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def get_hwtacacs_server_cfg_ipv6(self, **kwargs): + """ Get hwtacacs server configure ipv6 """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_ipv6 = module.params["hwtacacs_server_ipv6"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + state = module.params["state"] + + result = dict() + result["hwtacacs_server_cfg_ipv6"] = [] + need_cfg = False + + conf_str = CE_GET_HWTACACS_SERVER_CFG_IPV6 % hwtacacs_template + + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + hwtacacs_server_cfg_ipv6 = root.findall( + "hwtacacs/hwTacTempCfgs/hwTacTempCfg/hwTacIpv6SrvCfgs/hwTacIpv6SrvCfg") + if hwtacacs_server_cfg_ipv6: + for tmp in hwtacacs_server_cfg_ipv6: + tmp_dict = dict() + for site in tmp: + if site.tag in ["serverIpAddress", "serverType", "isSecondaryServer", "vpnName"]: + tmp_dict[site.tag] = site.text + + result["hwtacacs_server_cfg_ipv6"].append(tmp_dict) + + if result["hwtacacs_server_cfg_ipv6"]: + cfg = dict() + config_list = list() + + if hwtacacs_server_ipv6: + cfg["serverIpAddress"] = hwtacacs_server_ipv6.lower() + if hwtacacs_server_type: + cfg["serverType"] = hwtacacs_server_type.lower() + if hwtacacs_is_secondary_server: + cfg["isSecondaryServer"] = str(hwtacacs_is_secondary_server).lower() + if hwtacacs_vpn_name: + cfg["vpnName"] = hwtacacs_vpn_name.lower() + + for tmp in result["hwtacacs_server_cfg_ipv6"]: + exist_cfg = dict() + if hwtacacs_server_ipv6: + exist_cfg["serverIpAddress"] = tmp.get("serverIpAddress").lower() + if hwtacacs_server_type: + exist_cfg["serverType"] = tmp.get("serverType").lower() + if hwtacacs_is_secondary_server: + exist_cfg["isSecondaryServer"] = tmp.get("isSecondaryServer").lower() + if hwtacacs_vpn_name: + exist_cfg["vpnName"] = tmp.get("vpnName").lower() + config_list.append(exist_cfg) + if cfg in config_list: + if state == "present": + need_cfg = False + else: + need_cfg = True + else: + if state == "present": + need_cfg = True + else: + need_cfg = False + result["need_cfg"] = need_cfg + return result + + def merge_hwtacacs_server_cfg_ipv6(self, **kwargs): + """ Merge hwtacacs server configure ipv6 """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_ipv6 = module.params["hwtacacs_server_ipv6"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + + conf_str = CE_MERGE_HWTACACS_SERVER_CFG_IPV6 % ( + hwtacacs_template, hwtacacs_server_ipv6, + hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(), + hwtacacs_vpn_name) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Merge hwtacacs server config ipv6 failed.') + + cmds = [] + + cmd = "hwtacacs server template %s" % hwtacacs_template + cmds.append(cmd) + + if hwtacacs_server_type == "Authentication": + cmd = "hwtacacs server authentication %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Authorization": + cmd = "hwtacacs server authorization %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Accounting": + cmd = "hwtacacs server accounting %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Common": + cmd = "hwtacacs server %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def delete_hwtacacs_server_cfg_ipv6(self, **kwargs): + """ Delete hwtacacs server configure ipv6 """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_ipv6 = module.params["hwtacacs_server_ipv6"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + + conf_str = CE_DELETE_HWTACACS_SERVER_CFG_IPV6 % ( + hwtacacs_template, hwtacacs_server_ipv6, + hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(), + hwtacacs_vpn_name) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Delete hwtacacs server config ipv6 failed.') + + cmds = [] + + cmd = "hwtacacs server template %s" % hwtacacs_template + cmds.append(cmd) + + if hwtacacs_server_type == "Authentication": + cmd = "undo hwtacacs server authentication %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Authorization": + cmd = "undo hwtacacs server authorization %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Accounting": + cmd = "undo hwtacacs server accounting %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Common": + cmd = "undo hwtacacs server %s" % hwtacacs_server_ipv6 + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_secondary_server: + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def get_hwtacacs_host_server_cfg(self, **kwargs): + """ Get hwtacacs host server configure """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_host_name = module.params["hwtacacs_server_host_name"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = "true" if module.params[ + "hwtacacs_is_secondary_server"] is True else "false" + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + hwtacacs_is_public_net = "true" if module.params[ + "hwtacacs_is_public_net"] is True else "false" + state = module.params["state"] + + result = dict() + result["hwtacacs_server_name_cfg"] = [] + need_cfg = False + + conf_str = CE_GET_HWTACACS_HOST_SERVER_CFG % hwtacacs_template + + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + hwtacacs_server_name_cfg = root.findall( + "hwtacacs/hwTacTempCfgs/hwTacTempCfg/hwTacHostSrvCfgs/hwTacHostSrvCfg") + if hwtacacs_server_name_cfg: + for tmp in hwtacacs_server_name_cfg: + tmp_dict = dict() + for site in tmp: + if site.tag in ["serverHostName", "serverType", "isSecondaryServer", "isPublicNet", "vpnName"]: + tmp_dict[site.tag] = site.text + + result["hwtacacs_server_name_cfg"].append(tmp_dict) + + if result["hwtacacs_server_name_cfg"]: + cfg = dict() + config_list = list() + + if hwtacacs_server_host_name: + cfg["serverHostName"] = hwtacacs_server_host_name.lower() + if hwtacacs_server_type: + cfg["serverType"] = hwtacacs_server_type.lower() + if hwtacacs_is_secondary_server: + cfg["isSecondaryServer"] = str(hwtacacs_is_secondary_server).lower() + if hwtacacs_is_public_net: + cfg["isPublicNet"] = str(hwtacacs_is_public_net).lower() + if hwtacacs_vpn_name: + cfg["vpnName"] = hwtacacs_vpn_name.lower() + + for tmp in result["hwtacacs_server_name_cfg"]: + exist_cfg = dict() + if hwtacacs_server_host_name: + exist_cfg["serverHostName"] = tmp.get("serverHostName").lower() + if hwtacacs_server_type: + exist_cfg["serverType"] = tmp.get("serverType").lower() + if hwtacacs_is_secondary_server: + exist_cfg["isSecondaryServer"] = tmp.get("isSecondaryServer").lower() + if hwtacacs_is_public_net: + exist_cfg["isPublicNet"] = tmp.get("isPublicNet").lower() + if hwtacacs_vpn_name: + exist_cfg["vpnName"] = tmp.get("vpnName").lower() + config_list.append(exist_cfg) + if cfg in config_list: + if state == "present": + need_cfg = False + else: + need_cfg = True + else: + if state == "present": + need_cfg = True + else: + need_cfg = False + result["need_cfg"] = need_cfg + return result + + def merge_hwtacacs_host_server_cfg(self, **kwargs): + """ Merge hwtacacs host server configure """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_host_name = module.params["hwtacacs_server_host_name"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"] + + conf_str = CE_MERGE_HWTACACS_HOST_SERVER_CFG % ( + hwtacacs_template, hwtacacs_server_host_name, + hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(), + hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower()) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Merge hwtacacs host server config failed.') + + cmds = [] + + if hwtacacs_server_type == "Authentication": + cmd = "hwtacacs server authentication host %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Authorization": + cmd = "hwtacacs server authorization host %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Accounting": + cmd = "hwtacacs server accounting host %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Common": + cmd = "hwtacacs server host host-name %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + cmds.append(cmd) + return cmds + + def delete_hwtacacs_host_server_cfg(self, **kwargs): + """ Delete hwtacacs host server configure """ + + module = kwargs["module"] + hwtacacs_template = module.params["hwtacacs_template"] + hwtacacs_server_host_name = module.params["hwtacacs_server_host_name"] + hwtacacs_server_type = module.params["hwtacacs_server_type"] + hwtacacs_is_secondary_server = module.params[ + "hwtacacs_is_secondary_server"] + hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"] + hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"] + + conf_str = CE_DELETE_HWTACACS_HOST_SERVER_CFG % ( + hwtacacs_template, hwtacacs_server_host_name, + hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(), + hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower()) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Delete hwtacacs host server config failed.') + + cmds = [] + + if hwtacacs_server_type == "Authentication": + cmd = "undo hwtacacs server authentication host %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Authorization": + cmd = "undo hwtacacs server authorization host %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Accounting": + cmd = "undo hwtacacs server accounting host %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + elif hwtacacs_server_type == "Common": + cmd = "undo hwtacacs server host %s" % hwtacacs_server_host_name + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + cmd += " vpn-instance %s" % hwtacacs_vpn_name + if hwtacacs_is_public_net: + cmd += " public-net" + if hwtacacs_is_secondary_server: + cmd += " secondary" + + cmds.append(cmd) + return cmds + + +def check_name(**kwargs): + """ Check invalid name """ + + module = kwargs["module"] + name = kwargs["name"] + invalid_char = kwargs["invalid_char"] + + for item in invalid_char: + if item in name: + module.fail_json( + msg='Error: Invalid char %s is in the name %s ' % (item, name)) + + +def check_module_argument(**kwargs): + """ Check module argument """ + + module = kwargs["module"] + + # local para + local_user_name = module.params['local_user_name'] + local_password = module.params['local_password'] + local_ftp_dir = module.params['local_ftp_dir'] + local_user_level = module.params['local_user_level'] + local_user_group = module.params['local_user_group'] + + # radius para + radius_group_name = module.params['radius_group_name'] + radius_server_ip = module.params['radius_server_ip'] + radius_server_port = module.params['radius_server_port'] + radius_vpn_name = module.params['radius_vpn_name'] + radius_server_name = module.params['radius_server_name'] + + # hwtacacs para + hwtacacs_template = module.params['hwtacacs_template'] + hwtacacs_server_ip = module.params['hwtacacs_server_ip'] + hwtacacs_vpn_name = module.params['hwtacacs_vpn_name'] + hwtacacs_server_host_name = module.params['hwtacacs_server_host_name'] + + if local_user_name: + if len(local_user_name) > 253: + module.fail_json( + msg='Error: The local_user_name %s is large than 253.' % local_user_name) + check_name(module=module, name=local_user_name, + invalid_char=INVALID_USER_NAME_CHAR) + + if local_password and len(local_password) > 255: + module.fail_json( + msg='Error: The local_password %s is large than 255.' % local_password) + + if local_user_level: + if int(local_user_level) > 15 or int(local_user_level) < 0: + module.fail_json( + msg='Error: The local_user_level %s is out of [0 - 15].' % local_user_level) + + if local_ftp_dir: + if len(local_ftp_dir) > 255: + module.fail_json( + msg='Error: The local_ftp_dir %s is large than 255.' % local_ftp_dir) + + if local_user_group: + if len(local_user_group) > 32 or len(local_user_group) < 1: + module.fail_json( + msg='Error: The local_user_group %s is out of [1 - 32].' % local_user_group) + + if radius_group_name and len(radius_group_name) > 32: + module.fail_json( + msg='Error: The radius_group_name %s is large than 32.' % radius_group_name) + + if radius_server_ip and not check_ip_addr(radius_server_ip): + module.fail_json( + msg='Error: The radius_server_ip %s is invalid.' % radius_server_ip) + + if radius_server_port and not radius_server_port.isdigit(): + module.fail_json( + msg='Error: The radius_server_port %s is invalid.' % radius_server_port) + + if radius_vpn_name: + if len(radius_vpn_name) > 31: + module.fail_json( + msg='Error: The radius_vpn_name %s is large than 31.' % radius_vpn_name) + if ' ' in radius_vpn_name: + module.fail_json( + msg='Error: The radius_vpn_name %s include space.' % radius_vpn_name) + + if radius_server_name: + if len(radius_server_name) > 255: + module.fail_json( + msg='Error: The radius_server_name %s is large than 255.' % radius_server_name) + if ' ' in radius_server_name: + module.fail_json( + msg='Error: The radius_server_name %s include space.' % radius_server_name) + + if hwtacacs_template and len(hwtacacs_template) > 32: + module.fail_json( + msg='Error: The hwtacacs_template %s is large than 32.' % hwtacacs_template) + + if hwtacacs_server_ip and not check_ip_addr(hwtacacs_server_ip): + module.fail_json( + msg='Error: The hwtacacs_server_ip %s is invalid.' % hwtacacs_server_ip) + + if hwtacacs_vpn_name: + if len(hwtacacs_vpn_name) > 31: + module.fail_json( + msg='Error: The hwtacacs_vpn_name %s is large than 31.' % hwtacacs_vpn_name) + if ' ' in hwtacacs_vpn_name: + module.fail_json( + msg='Error: The hwtacacs_vpn_name %s include space.' % hwtacacs_vpn_name) + + if hwtacacs_server_host_name: + if len(hwtacacs_server_host_name) > 255: + module.fail_json( + msg='Error: The hwtacacs_server_host_name %s is large than 255.' % hwtacacs_server_host_name) + if ' ' in hwtacacs_server_host_name: + module.fail_json( + msg='Error: The hwtacacs_server_host_name %s include space.' % hwtacacs_server_host_name) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + local_user_name=dict(type='str'), + local_password=dict(type='str', no_log=True), + local_service_type=dict(type='str'), + local_ftp_dir=dict(type='str'), + local_user_level=dict(type='str'), + local_user_group=dict(type='str'), + radius_group_name=dict(type='str'), + radius_server_type=dict(choices=['Authentication', 'Accounting']), + radius_server_ip=dict(type='str'), + radius_server_ipv6=dict(type='str'), + radius_server_port=dict(type='str'), + radius_server_mode=dict( + choices=['Secondary-server', 'Primary-server']), + radius_vpn_name=dict(type='str'), + radius_server_name=dict(type='str'), + hwtacacs_template=dict(type='str'), + hwtacacs_server_ip=dict(type='str'), + hwtacacs_server_ipv6=dict(type='str'), + hwtacacs_server_type=dict( + choices=['Authentication', 'Authorization', 'Accounting', 'Common']), + hwtacacs_is_secondary_server=dict( + required=False, default=False, type='bool'), + hwtacacs_vpn_name=dict(type='str'), + hwtacacs_is_public_net=dict( + required=False, default=False, type='bool'), + hwtacacs_server_host_name=dict(type='str') + ) + + argument_spec.update(ce_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + check_module_argument(module=module) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + # common para + state = module.params['state'] + + # local para + local_user_name = module.params['local_user_name'] + local_password = module.params['local_password'] + local_service_type = module.params['local_service_type'] + local_ftp_dir = module.params['local_ftp_dir'] + local_user_level = module.params['local_user_level'] + local_user_group = module.params['local_user_group'] + + # radius para + radius_group_name = module.params['radius_group_name'] + radius_server_type = module.params['radius_server_type'] + radius_server_ip = module.params['radius_server_ip'] + radius_server_ipv6 = module.params['radius_server_ipv6'] + radius_server_port = module.params['radius_server_port'] + radius_server_mode = module.params['radius_server_mode'] + radius_vpn_name = module.params['radius_vpn_name'] + radius_server_name = module.params['radius_server_name'] + + # hwtacacs para + hwtacacs_template = module.params['hwtacacs_template'] + hwtacacs_server_ip = module.params['hwtacacs_server_ip'] + hwtacacs_server_ipv6 = module.params['hwtacacs_server_ipv6'] + hwtacacs_server_type = module.params['hwtacacs_server_type'] + hwtacacs_is_secondary_server = module.params[ + 'hwtacacs_is_secondary_server'] + hwtacacs_vpn_name = module.params['hwtacacs_vpn_name'] + hwtacacs_is_public_net = module.params['hwtacacs_is_public_net'] + hwtacacs_server_host_name = module.params['hwtacacs_server_host_name'] + + ce_aaa_server_host = AaaServerHost() + + if not ce_aaa_server_host: + module.fail_json(msg='Error: Construct ce_aaa_server failed.') + + # get proposed + proposed["state"] = state + if local_user_name: + proposed["local_user_name"] = local_user_name + if local_password: + proposed["local_password"] = "******" + if local_service_type: + proposed["local_service_type"] = local_service_type + if local_ftp_dir: + proposed["local_ftp_dir"] = local_ftp_dir + if local_user_level: + proposed["local_user_level"] = local_user_level + if local_user_group: + proposed["local_user_group"] = local_user_group + if radius_group_name: + proposed["radius_group_name"] = radius_group_name + if radius_server_type: + proposed["radius_server_type"] = radius_server_type + if radius_server_ip: + proposed["radius_server_ip"] = radius_server_ip + if radius_server_ipv6: + proposed["radius_server_ipv6"] = radius_server_ipv6 + if radius_server_port: + proposed["radius_server_port"] = radius_server_port + if radius_server_mode: + proposed["radius_server_mode"] = radius_server_mode + if radius_vpn_name: + proposed["radius_vpn_name"] = radius_vpn_name + if radius_server_name: + proposed["radius_server_name"] = radius_server_name + if hwtacacs_template: + proposed["hwtacacs_template"] = hwtacacs_template + if hwtacacs_server_ip: + proposed["hwtacacs_server_ip"] = hwtacacs_server_ip + if hwtacacs_server_ipv6: + proposed["hwtacacs_server_ipv6"] = hwtacacs_server_ipv6 + if hwtacacs_server_type: + proposed["hwtacacs_server_type"] = hwtacacs_server_type + proposed["hwtacacs_is_secondary_server"] = hwtacacs_is_secondary_server + if hwtacacs_vpn_name: + proposed["hwtacacs_vpn_name"] = hwtacacs_vpn_name + proposed["hwtacacs_is_public_net"] = hwtacacs_is_public_net + if hwtacacs_server_host_name: + proposed["hwtacacs_server_host_name"] = hwtacacs_server_host_name + + if local_user_name: + + if state == "present" and not local_password: + module.fail_json( + msg='Error: Please input local_password when config local user.') + + local_user_result = ce_aaa_server_host.get_local_user_info( + module=module) + existing["local user name"] = local_user_result["local_user_info"] + + if state == "present": + # present local user + if local_user_result["need_cfg"]: + cmd = ce_aaa_server_host.merge_local_user_info(module=module) + + changed = True + updates.append(cmd) + + else: + # absent local user + if local_user_result["need_cfg"]: + if not local_service_type and not local_ftp_dir and not local_user_level and not local_user_group: + cmd = ce_aaa_server_host.delete_local_user_info( + module=module) + else: + cmd = ce_aaa_server_host.merge_local_user_info( + module=module) + + changed = True + updates.append(cmd) + + local_user_result = ce_aaa_server_host.get_local_user_info( + module=module) + end_state["local user name"] = local_user_result["local_user_info"] + + if radius_group_name: + + if not radius_server_ip and not radius_server_ipv6 and not radius_server_name: + module.fail_json( + msg='Error: Please input radius_server_ip or radius_server_ipv6 or radius_server_name.') + + if radius_server_ip and radius_server_ipv6: + module.fail_json( + msg='Error: Please do not input radius_server_ip and radius_server_ipv6 at the same time.') + + if not radius_server_type or not radius_server_port or not radius_server_mode or not radius_vpn_name: + module.fail_json( + msg='Error: Please input radius_server_type radius_server_port radius_server_mode radius_vpn_name.') + + if radius_server_ip: + rds_server_ipv4_result = ce_aaa_server_host.get_radius_server_cfg_ipv4( + module=module) + if radius_server_ipv6: + rds_server_ipv6_result = ce_aaa_server_host.get_radius_server_cfg_ipv6( + module=module) + if radius_server_name: + rds_server_name_result = ce_aaa_server_host.get_radius_server_name( + module=module) + + if radius_server_ip and rds_server_ipv4_result["radius_server_ip_v4"]: + existing["radius server ipv4"] = rds_server_ipv4_result[ + "radius_server_ip_v4"] + if radius_server_ipv6 and rds_server_ipv6_result["radius_server_ip_v6"]: + existing["radius server ipv6"] = rds_server_ipv6_result[ + "radius_server_ip_v6"] + if radius_server_name and rds_server_name_result["radius_server_name_cfg"]: + existing["radius server name cfg"] = rds_server_name_result[ + "radius_server_name_cfg"] + + if state == "present": + if radius_server_ip and rds_server_ipv4_result["need_cfg"]: + cmd = ce_aaa_server_host.merge_radius_server_cfg_ipv4( + module=module) + changed = True + updates.append(cmd) + + if radius_server_ipv6 and rds_server_ipv6_result["need_cfg"]: + cmd = ce_aaa_server_host.merge_radius_server_cfg_ipv6( + module=module) + changed = True + updates.append(cmd) + + if radius_server_name and rds_server_name_result["need_cfg"]: + cmd = ce_aaa_server_host.merge_radius_server_name( + module=module) + changed = True + updates.append(cmd) + else: + if radius_server_ip and rds_server_ipv4_result["need_cfg"]: + cmd = ce_aaa_server_host.delete_radius_server_cfg_ipv4( + module=module) + changed = True + updates.append(cmd) + + if radius_server_ipv6 and rds_server_ipv6_result["need_cfg"]: + cmd = ce_aaa_server_host.delete_radius_server_cfg_ipv6( + module=module) + changed = True + updates.append(cmd) + + if radius_server_name and rds_server_name_result["need_cfg"]: + cmd = ce_aaa_server_host.delete_radius_server_name( + module=module) + changed = True + updates.append(cmd) + + if radius_server_ip: + rds_server_ipv4_result = ce_aaa_server_host.get_radius_server_cfg_ipv4( + module=module) + if radius_server_ipv6: + rds_server_ipv6_result = ce_aaa_server_host.get_radius_server_cfg_ipv6( + module=module) + if radius_server_name: + rds_server_name_result = ce_aaa_server_host.get_radius_server_name( + module=module) + + if radius_server_ip and rds_server_ipv4_result["radius_server_ip_v4"]: + end_state["radius server ipv4"] = rds_server_ipv4_result[ + "radius_server_ip_v4"] + if radius_server_ipv6 and rds_server_ipv6_result["radius_server_ip_v6"]: + end_state["radius server ipv6"] = rds_server_ipv6_result[ + "radius_server_ip_v6"] + if radius_server_name and rds_server_name_result["radius_server_name_cfg"]: + end_state["radius server name cfg"] = rds_server_name_result[ + "radius_server_name_cfg"] + + if hwtacacs_template: + + if not hwtacacs_server_ip and not hwtacacs_server_ipv6 and not hwtacacs_server_host_name: + module.fail_json( + msg='Error: Please input hwtacacs_server_ip or hwtacacs_server_ipv6 or hwtacacs_server_host_name.') + + if not hwtacacs_server_type or not hwtacacs_vpn_name: + module.fail_json( + msg='Error: Please input hwtacacs_server_type hwtacacs_vpn_name.') + + if hwtacacs_server_ip and hwtacacs_server_ipv6: + module.fail_json( + msg='Error: Please do not set hwtacacs_server_ip and hwtacacs_server_ipv6 at the same time.') + + if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_": + if hwtacacs_is_public_net: + module.fail_json( + msg='Error: Please do not set vpn and public net at the same time.') + + if hwtacacs_server_ip: + hwtacacs_server_ipv4_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv4( + module=module) + if hwtacacs_server_ipv6: + hwtacacs_server_ipv6_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv6( + module=module) + if hwtacacs_server_host_name: + hwtacacs_host_name_result = ce_aaa_server_host.get_hwtacacs_host_server_cfg( + module=module) + + if hwtacacs_server_ip and hwtacacs_server_ipv4_result["hwtacacs_server_cfg_ipv4"]: + existing["hwtacacs server cfg ipv4"] = hwtacacs_server_ipv4_result[ + "hwtacacs_server_cfg_ipv4"] + if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["hwtacacs_server_cfg_ipv6"]: + existing["hwtacacs server cfg ipv6"] = hwtacacs_server_ipv6_result[ + "hwtacacs_server_cfg_ipv6"] + if hwtacacs_server_host_name and hwtacacs_host_name_result["hwtacacs_server_name_cfg"]: + existing["hwtacacs server name cfg"] = hwtacacs_host_name_result[ + "hwtacacs_server_name_cfg"] + + if state == "present": + if hwtacacs_server_ip and hwtacacs_server_ipv4_result["need_cfg"]: + cmd = ce_aaa_server_host.merge_hwtacacs_server_cfg_ipv4( + module=module) + changed = True + updates.append(cmd) + + if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["need_cfg"]: + cmd = ce_aaa_server_host.merge_hwtacacs_server_cfg_ipv6( + module=module) + changed = True + updates.append(cmd) + + if hwtacacs_server_host_name and hwtacacs_host_name_result["need_cfg"]: + cmd = ce_aaa_server_host.merge_hwtacacs_host_server_cfg( + module=module) + changed = True + updates.append(cmd) + + else: + if hwtacacs_server_ip and hwtacacs_server_ipv4_result["need_cfg"]: + cmd = ce_aaa_server_host.delete_hwtacacs_server_cfg_ipv4( + module=module) + changed = True + updates.append(cmd) + + if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["need_cfg"]: + cmd = ce_aaa_server_host.delete_hwtacacs_server_cfg_ipv6( + module=module) + changed = True + updates.append(cmd) + + if hwtacacs_server_host_name and hwtacacs_host_name_result["need_cfg"]: + cmd = ce_aaa_server_host.delete_hwtacacs_host_server_cfg( + module=module) + changed = True + updates.append(cmd) + + if hwtacacs_server_ip: + hwtacacs_server_ipv4_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv4( + module=module) + if hwtacacs_server_ipv6: + hwtacacs_server_ipv6_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv6( + module=module) + if hwtacacs_server_host_name: + hwtacacs_host_name_result = ce_aaa_server_host.get_hwtacacs_host_server_cfg( + module=module) + + if hwtacacs_server_ip and hwtacacs_server_ipv4_result["hwtacacs_server_cfg_ipv4"]: + end_state["hwtacacs server cfg ipv4"] = hwtacacs_server_ipv4_result[ + "hwtacacs_server_cfg_ipv4"] + if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["hwtacacs_server_cfg_ipv6"]: + end_state["hwtacacs server cfg ipv6"] = hwtacacs_server_ipv6_result[ + "hwtacacs_server_cfg_ipv6"] + if hwtacacs_server_host_name and hwtacacs_host_name_result["hwtacacs_server_name_cfg"]: + end_state["hwtacacs server name cfg"] = hwtacacs_host_name_result[ + "hwtacacs_server_name_cfg"] + + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_acl.py b/plugins/modules/network/cloudengine/ce_acl.py new file mode 100644 index 0000000000..4b82418bb2 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_acl.py @@ -0,0 +1,1004 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_acl +short_description: Manages base ACL configuration on HUAWEI CloudEngine switches. +description: + - Manages base ACL configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent','delete_acl'] + acl_name: + description: + - ACL number or name. + For a numbered rule group, the value ranging from 2000 to 2999 indicates a basic ACL. + For a named rule group, the value is a string of 1 to 32 case-sensitive characters starting + with a letter, spaces not supported. + required: true + acl_num: + description: + - ACL number. + The value is an integer ranging from 2000 to 2999. + acl_step: + description: + - ACL step. + The value is an integer ranging from 1 to 20. The default value is 5. + acl_description: + description: + - ACL description. + The value is a string of 1 to 127 characters. + rule_name: + description: + - Name of a basic ACL rule. + The value is a string of 1 to 32 characters. + The value is case-insensitive, and cannot contain spaces or begin with an underscore (_). + rule_id: + description: + - ID of a basic ACL rule in configuration mode. + The value is an integer ranging from 0 to 4294967294. + rule_action: + description: + - Matching mode of basic ACL rules. + choices: ['permit','deny'] + source_ip: + description: + - Source IP address. + The value is a string of 0 to 255 characters.The default value is 0.0.0.0. + The value is in dotted decimal notation. + src_mask: + description: + - Mask of a source IP address. + The value is an integer ranging from 1 to 32. + frag_type: + description: + - Type of packet fragmentation. + choices: ['fragment', 'clear_fragment'] + vrf_name: + description: + - VPN instance name. + The value is a string of 1 to 31 characters.The default value is _public_. + time_range: + description: + - Name of a time range in which an ACL rule takes effect. + The value is a string of 1 to 32 characters. + The value is case-insensitive, and cannot contain spaces. The name must start with an uppercase + or lowercase letter. In addition, the word "all" cannot be specified as a time range name. + rule_description: + description: + - Description about an ACL rule. + The value is a string of 1 to 127 characters. + log_flag: + description: + - Flag of logging matched data packets. + type: bool + default: 'no' +''' + +EXAMPLES = ''' + +- name: CloudEngine acl test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config ACL" + ce_acl: + state: present + acl_name: 2200 + provider: "{{ cli }}" + + - name: "Undo ACL" + ce_acl: + state: delete_acl + acl_name: 2200 + provider: "{{ cli }}" + + - name: "Config ACL base rule" + ce_acl: + state: present + acl_name: 2200 + rule_name: test_rule + rule_id: 111 + rule_action: permit + source_ip: 10.10.10.10 + src_mask: 24 + frag_type: fragment + time_range: wdz_acl_time + provider: "{{ cli }}" + + - name: "undo ACL base rule" + ce_acl: + state: absent + acl_name: 2200 + rule_name: test_rule + rule_id: 111 + rule_action: permit + source_ip: 10.10.10.10 + src_mask: 24 + frag_type: fragment + time_range: wdz_acl_time + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"acl_name": "test", "state": "delete_acl"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"aclNumOrName": "test", "aclType": "Basic"} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {} +updates: + description: command sent to the device + returned: always + type: list + sample: ["undo acl name test"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + +# get acl +CE_GET_ACL_HEADER = """ + + + + + +""" +CE_GET_ACL_TAIL = """ + + + + +""" +# merge acl +CE_MERGE_ACL_HEADER = """ + + + + + %s +""" +CE_MERGE_ACL_TAIL = """ + + + + +""" +# delete acl +CE_DELETE_ACL_HEADER = """ + + + + + %s +""" +CE_DELETE_ACL_TAIL = """ + + + + +""" + +# get acl base rule +CE_GET_ACL_BASE_RULE_HEADER = """ + + + + + %s + + + +""" +CE_GET_ACL_BASE_RULE_TAIL = """ + + + + + + +""" +# merge acl base rule +CE_MERGE_ACL_BASE_RULE_HEADER = """ + + + + + %s + + + %s +""" +CE_MERGE_ACL_BASE_RULE_TAIL = """ + + + + + + +""" +# delete acl base rule +CE_DELETE_ACL_BASE_RULE_HEADER = """ + + + + + %s + + + %s +""" +CE_DELETE_ACL_BASE_RULE_TAIL = """ + + + + + + +""" + + +class BaseAcl(object): + """ Manages base acl configuration """ + + def __init__(self, **kwargs): + """ Class init """ + + # argument spec + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # module args + self.state = self.module.params['state'] + self.acl_name = self.module.params['acl_name'] or None + self.acl_num = self.module.params['acl_num'] or None + self.acl_type = None + self.acl_step = self.module.params['acl_step'] or None + self.acl_description = self.module.params['acl_description'] or None + self.rule_name = self.module.params['rule_name'] or None + self.rule_id = self.module.params['rule_id'] or None + self.rule_action = self.module.params['rule_action'] or None + self.source_ip = self.module.params['source_ip'] or None + self.src_mask = self.module.params['src_mask'] or None + self.src_wild = None + self.frag_type = self.module.params['frag_type'] or None + self.vrf_name = self.module.params['vrf_name'] or None + self.time_range = self.module.params['time_range'] or None + self.rule_description = self.module.params['rule_description'] or None + self.log_flag = self.module.params['log_flag'] + + # cur config + self.cur_acl_cfg = dict() + self.cur_base_rule_cfg = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def netconf_get_config(self, conf_str): + """ Get configure by netconf """ + + xml_str = get_nc_config(self.module, conf_str) + + return xml_str + + def netconf_set_config(self, conf_str): + """ Set configure by netconf """ + + xml_str = set_nc_config(self.module, conf_str) + + return xml_str + + def get_wildcard_mask(self): + """ convert mask length to ip address wildcard mask, i.e. 24 to 0.0.0.255 """ + + mask_int = ["255"] * 4 + value = int(self.src_mask) + + if value > 32: + self.module.fail_json(msg='Error: IPv4 ipaddress mask length is invalid.') + if value < 8: + mask_int[0] = str(int(~(0xFF << (8 - value % 8)) & 0xFF)) + if value >= 8: + mask_int[0] = '0' + mask_int[1] = str(int(~(0xFF << (16 - (value % 16))) & 0xFF)) + if value >= 16: + mask_int[1] = '0' + mask_int[2] = str(int(~(0xFF << (24 - (value % 24))) & 0xFF)) + if value >= 24: + mask_int[2] = '0' + mask_int[3] = str(int(~(0xFF << (32 - (value % 32))) & 0xFF)) + if value == 32: + mask_int[3] = '0' + + return '.'.join(mask_int) + + def check_acl_args(self): + """ Check acl invalid args """ + + need_cfg = False + find_flag = False + self.cur_acl_cfg["acl_info"] = [] + + if self.acl_name: + + if self.acl_name.isdigit(): + if int(self.acl_name) < 2000 or int(self.acl_name) > 2999: + self.module.fail_json( + msg='Error: The value of acl_name is out of [2000-2999] for base ACL.') + + if self.acl_num: + self.module.fail_json( + msg='Error: The acl_name is digit, so should not input acl_num at the same time.') + else: + + self.acl_type = "Basic" + + if len(self.acl_name) < 1 or len(self.acl_name) > 32: + self.module.fail_json( + msg='Error: The len of acl_name is out of [1 - 32].') + + if self.state == "present": + if not self.acl_num and not self.acl_type and not self.rule_name: + self.module.fail_json( + msg='Error: Please input acl_num or acl_type when config ACL.') + + if self.acl_num: + if self.acl_num.isdigit(): + if int(self.acl_num) < 2000 or int(self.acl_num) > 2999: + self.module.fail_json( + msg='Error: The value of acl_name is out of [2000-2999] for base ACL.') + else: + self.module.fail_json( + msg='Error: The acl_num is not digit.') + + if self.acl_step: + if self.acl_step.isdigit(): + if int(self.acl_step) < 1 or int(self.acl_step) > 20: + self.module.fail_json( + msg='Error: The value of acl_step is out of [1 - 20].') + else: + self.module.fail_json( + msg='Error: The acl_step is not digit.') + + if self.acl_description: + if len(self.acl_description) < 1 or len(self.acl_description) > 127: + self.module.fail_json( + msg='Error: The len of acl_description is out of [1 - 127].') + + conf_str = CE_GET_ACL_HEADER + + if self.acl_type: + conf_str += "" + if self.acl_num or self.acl_name.isdigit(): + conf_str += "" + if self.acl_step: + conf_str += "" + if self.acl_description: + conf_str += "" + + conf_str += CE_GET_ACL_TAIL + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + find_flag = False + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # parse acl + acl_info = root.findall( + "acl/aclGroups/aclGroup") + if acl_info: + for tmp in acl_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["aclNumOrName", "aclType", "aclNumber", "aclStep", "aclDescription"]: + tmp_dict[site.tag] = site.text + + self.cur_acl_cfg["acl_info"].append(tmp_dict) + + if self.cur_acl_cfg["acl_info"]: + find_list = list() + for tmp in self.cur_acl_cfg["acl_info"]: + cur_cfg_dict = dict() + exist_cfg_dict = dict() + if self.acl_name: + if self.acl_name.isdigit() and tmp.get("aclNumber"): + cur_cfg_dict["aclNumber"] = self.acl_name + exist_cfg_dict["aclNumber"] = tmp.get("aclNumber") + else: + cur_cfg_dict["aclNumOrName"] = self.acl_name + exist_cfg_dict["aclNumOrName"] = tmp.get("aclNumOrName") + if self.acl_type: + cur_cfg_dict["aclType"] = self.acl_type + exist_cfg_dict["aclType"] = tmp.get("aclType") + if self.acl_num: + cur_cfg_dict["aclNumber"] = self.acl_num + exist_cfg_dict["aclNumber"] = tmp.get("aclNumber") + if self.acl_step: + cur_cfg_dict["aclStep"] = self.acl_step + exist_cfg_dict["aclStep"] = tmp.get("aclStep") + if self.acl_description: + cur_cfg_dict["aclDescription"] = self.acl_description + exist_cfg_dict["aclDescription"] = tmp.get("aclDescription") + + if cur_cfg_dict == exist_cfg_dict: + find_bool = True + else: + find_bool = False + find_list.append(find_bool) + + for mem in find_list: + if mem: + find_flag = True + break + else: + find_flag = False + + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + elif self.state == "delete_acl": + need_cfg = bool(find_flag) + else: + need_cfg = False + + self.cur_acl_cfg["need_cfg"] = need_cfg + + def check_base_rule_args(self): + """ Check base rule invalid args """ + + need_cfg = False + find_flag = False + self.cur_base_rule_cfg["base_rule_info"] = [] + + if self.acl_name: + + if self.state == "absent": + if not self.rule_name: + self.module.fail_json( + msg='Error: Please input rule_name when state is absent.') + + # config rule + if self.rule_name: + if len(self.rule_name) < 1 or len(self.rule_name) > 32: + self.module.fail_json( + msg='Error: The len of rule_name is out of [1 - 32].') + + if self.state != "delete_acl" and not self.rule_id: + self.module.fail_json( + msg='Error: Please input rule_id.') + + if self.rule_id: + if self.rule_id.isdigit(): + if int(self.rule_id) < 0 or int(self.rule_id) > 4294967294: + self.module.fail_json( + msg='Error: The value of rule_id is out of [0 - 4294967294].') + else: + self.module.fail_json( + msg='Error: The rule_id is not digit.') + + if self.source_ip: + if not check_ip_addr(self.source_ip): + self.module.fail_json( + msg='Error: The source_ip %s is invalid.' % self.source_ip) + if not self.src_mask: + self.module.fail_json( + msg='Error: Please input src_mask.') + + if self.src_mask: + if self.src_mask.isdigit(): + if int(self.src_mask) < 1 or int(self.src_mask) > 32: + self.module.fail_json( + msg='Error: The src_mask is out of [1 - 32].') + self.src_wild = self.get_wildcard_mask() + else: + self.module.fail_json( + msg='Error: The src_mask is not digit.') + + if self.vrf_name: + if len(self.vrf_name) < 1 or len(self.vrf_name) > 31: + self.module.fail_json( + msg='Error: The len of vrf_name is out of [1 - 31].') + + if self.time_range: + if len(self.time_range) < 1 or len(self.time_range) > 32: + self.module.fail_json( + msg='Error: The len of time_range is out of [1 - 32].') + + if self.rule_description: + if len(self.rule_description) < 1 or len(self.rule_description) > 127: + self.module.fail_json( + msg='Error: The len of rule_description is out of [1 - 127].') + + if self.state != "delete_acl" and not self.rule_id: + self.module.fail_json( + msg='Error: Please input rule_id.') + + conf_str = CE_GET_ACL_BASE_RULE_HEADER % self.acl_name + + if self.rule_id: + conf_str += "" + if self.rule_action: + conf_str += "" + if self.source_ip: + conf_str += "" + if self.src_wild: + conf_str += "" + if self.frag_type: + conf_str += "" + if self.vrf_name: + conf_str += "" + if self.time_range: + conf_str += "" + if self.rule_description: + conf_str += "" + conf_str += "" + + conf_str += CE_GET_ACL_BASE_RULE_TAIL + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + find_flag = False + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + + # parse base rule + base_rule_info = root.findall( + "acl/aclGroups/aclGroup/aclRuleBas4s/aclRuleBas4") + if base_rule_info: + for tmp in base_rule_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["aclRuleName", "aclRuleID", "aclAction", "aclSourceIp", "aclSrcWild", + "aclFragType", "vrfName", "aclTimeName", "aclRuleDescription", + "aclLogFlag"]: + tmp_dict[site.tag] = site.text + + self.cur_base_rule_cfg[ + "base_rule_info"].append(tmp_dict) + + if self.cur_base_rule_cfg["base_rule_info"]: + for tmp in self.cur_base_rule_cfg["base_rule_info"]: + find_flag = True + + if self.rule_name and tmp.get("aclRuleName") != self.rule_name: + find_flag = False + if self.rule_id and tmp.get("aclRuleID") != self.rule_id: + find_flag = False + if self.rule_action and tmp.get("aclAction") != self.rule_action: + find_flag = False + if self.source_ip: + tmp_src_ip = self.source_ip.split(".") + tmp_src_wild = self.src_wild.split(".") + tmp_addr_item = [] + for idx in range(4): + item1 = 255 - int(tmp_src_wild[idx]) + item2 = item1 & int(tmp_src_ip[idx]) + tmp_addr_item.append(item2) + tmp_addr = "%s.%s.%s.%s" % (tmp_addr_item[0], tmp_addr_item[1], + tmp_addr_item[2], tmp_addr_item[3]) + if tmp_addr != tmp.get("aclSourceIp"): + find_flag = False + if self.src_wild and tmp.get("aclSrcWild") != self.src_wild: + find_flag = False + frag_type = "clear_fragment" if tmp.get("aclFragType") is None else tmp.get("aclFragType") + if self.frag_type and frag_type != self.frag_type: + find_flag = False + if self.vrf_name and tmp.get("vrfName") != self.vrf_name: + find_flag = False + if self.time_range and tmp.get("aclTimeName") != self.time_range: + find_flag = False + if self.rule_description and tmp.get("aclRuleDescription") != self.rule_description: + find_flag = False + if tmp.get("aclLogFlag") != str(self.log_flag).lower(): + find_flag = False + + if find_flag: + break + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + elif self.state == "absent": + need_cfg = bool(find_flag) + else: + need_cfg = False + + self.cur_base_rule_cfg["need_cfg"] = need_cfg + + def get_proposed(self): + """ Get proposed state """ + + self.proposed["state"] = self.state + + if self.acl_name: + self.proposed["acl_name"] = self.acl_name + if self.acl_num: + self.proposed["acl_num"] = self.acl_num + if self.acl_step: + self.proposed["acl_step"] = self.acl_step + if self.acl_description: + self.proposed["acl_description"] = self.acl_description + if self.rule_name: + self.proposed["rule_name"] = self.rule_name + if self.rule_id: + self.proposed["rule_id"] = self.rule_id + if self.rule_action: + self.proposed["rule_action"] = self.rule_action + if self.source_ip: + self.proposed["source_ip"] = self.source_ip + if self.src_mask: + self.proposed["src_mask"] = self.src_mask + if self.frag_type: + self.proposed["frag_type"] = self.frag_type + if self.vrf_name: + self.proposed["vrf_name"] = self.vrf_name + if self.time_range: + self.proposed["time_range"] = self.time_range + if self.rule_description: + self.proposed["rule_description"] = self.rule_description + if self.log_flag: + self.proposed["log_flag"] = self.log_flag + + def get_existing(self): + """ Get existing state """ + + self.existing["acl_info"] = self.cur_acl_cfg["acl_info"] + self.existing["base_rule_info"] = self.cur_base_rule_cfg[ + "base_rule_info"] + + def get_end_state(self): + """ Get end state """ + + self.check_acl_args() + self.end_state["acl_info"] = self.cur_acl_cfg["acl_info"] + + self.check_base_rule_args() + self.end_state["base_rule_info"] = self.cur_base_rule_cfg[ + "base_rule_info"] + + def merge_acl(self): + """ Merge acl operation """ + + conf_str = CE_MERGE_ACL_HEADER % self.acl_name + + if self.acl_type: + conf_str += "%s" % self.acl_type + if self.acl_num: + conf_str += "%s" % self.acl_num + if self.acl_step: + conf_str += "%s" % self.acl_step + if self.acl_description: + conf_str += "%s" % self.acl_description + + conf_str += CE_MERGE_ACL_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge acl failed.') + + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + if self.acl_type and not self.acl_num: + cmd = "acl name %s %s" % (self.acl_name, self.acl_type.lower()) + elif self.acl_type and self.acl_num: + cmd = "acl name %s number %s" % (self.acl_name, self.acl_num) + elif not self.acl_type and self.acl_num: + cmd = "acl name %s number %s" % (self.acl_name, self.acl_num) + self.updates_cmd.append(cmd) + + if self.acl_description: + cmd = "description %s" % self.acl_description + self.updates_cmd.append(cmd) + + if self.acl_step: + cmd = "step %s" % self.acl_step + self.updates_cmd.append(cmd) + + self.changed = True + + def delete_acl(self): + """ Delete acl operation """ + + conf_str = CE_DELETE_ACL_HEADER % self.acl_name + + if self.acl_type: + conf_str += "%s" % self.acl_type + if self.acl_num: + conf_str += "%s" % self.acl_num + if self.acl_step: + conf_str += "%s" % self.acl_step + if self.acl_description: + conf_str += "%s" % self.acl_description + + conf_str += CE_DELETE_ACL_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Delete acl failed.') + + if self.acl_description: + cmd = "undo description" + self.updates_cmd.append(cmd) + + if self.acl_step: + cmd = "undo step" + self.updates_cmd.append(cmd) + + if self.acl_name.isdigit(): + cmd = "undo acl number %s" % self.acl_name + else: + cmd = "undo acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + self.changed = True + + def merge_base_rule(self): + """ Merge base rule operation """ + + conf_str = CE_MERGE_ACL_BASE_RULE_HEADER % ( + self.acl_name, self.rule_name) + + if self.rule_id: + conf_str += "%s" % self.rule_id + if self.rule_action: + conf_str += "%s" % self.rule_action + if self.source_ip: + conf_str += "%s" % self.source_ip + if self.src_wild: + conf_str += "%s" % self.src_wild + if self.frag_type: + conf_str += "%s" % self.frag_type + if self.vrf_name: + conf_str += "%s" % self.vrf_name + if self.time_range: + conf_str += "%s" % self.time_range + if self.rule_description: + conf_str += "%s" % self.rule_description + conf_str += "%s" % str(self.log_flag).lower() + + conf_str += CE_MERGE_ACL_BASE_RULE_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge acl base rule failed.') + + if self.rule_action: + cmd = "rule" + if self.rule_id: + cmd += " %s" % self.rule_id + cmd += " %s" % self.rule_action + if self.frag_type == "fragment": + cmd += " fragment-type fragment" + if self.source_ip and self.src_wild: + cmd += " source %s %s" % (self.source_ip, self.src_wild) + if self.time_range: + cmd += " time-range %s" % self.time_range + if self.vrf_name: + cmd += " vpn-instance %s" % self.vrf_name + if self.log_flag: + cmd += " logging" + self.updates_cmd.append(cmd) + + if self.rule_description: + cmd = "rule %s description %s" % ( + self.rule_id, self.rule_description) + self.updates_cmd.append(cmd) + + self.changed = True + + def delete_base_rule(self): + """ Delete base rule operation """ + + conf_str = CE_DELETE_ACL_BASE_RULE_HEADER % ( + self.acl_name, self.rule_name) + + if self.rule_id: + conf_str += "%s" % self.rule_id + if self.rule_action: + conf_str += "%s" % self.rule_action + if self.source_ip: + conf_str += "%s" % self.source_ip + if self.src_wild: + conf_str += "%s" % self.src_wild + if self.frag_type: + conf_str += "%s" % self.frag_type + if self.vrf_name: + conf_str += "%s" % self.vrf_name + if self.time_range: + conf_str += "%s" % self.time_range + if self.rule_description: + conf_str += "%s" % self.rule_description + conf_str += "%s" % str(self.log_flag).lower() + + conf_str += CE_DELETE_ACL_BASE_RULE_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Delete acl base rule failed.') + + if self.rule_description: + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + cmd = "acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + cmd = "undo rule %s description" % self.rule_id + self.updates_cmd.append(cmd) + + if self.rule_id: + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + cmd = "acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + cmd = "undo rule %s" % self.rule_id + self.updates_cmd.append(cmd) + elif self.rule_action: + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + cmd = "acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + cmd = "undo rule" + cmd += " %s" % self.rule_action + if self.frag_type == "fragment": + cmd += " fragment-type fragment" + if self.source_ip and self.src_wild: + cmd += " source %s %s" % (self.source_ip, self.src_wild) + if self.time_range: + cmd += " time-range %s" % self.time_range + if self.vrf_name: + cmd += " vpn-instance %s" % self.vrf_name + if self.log_flag: + cmd += " logging" + self.updates_cmd.append(cmd) + + self.changed = True + + def work(self): + """ Main work function """ + + self.check_acl_args() + self.check_base_rule_args() + self.get_proposed() + self.get_existing() + + if self.state == "present": + if self.cur_acl_cfg["need_cfg"]: + self.merge_acl() + if self.cur_base_rule_cfg["need_cfg"]: + self.merge_base_rule() + + elif self.state == "absent": + if self.cur_base_rule_cfg["need_cfg"]: + self.delete_base_rule() + + elif self.state == "delete_acl": + if self.cur_acl_cfg["need_cfg"]: + self.delete_acl() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent', + 'delete_acl'], default='present'), + acl_name=dict(type='str', required=True), + acl_num=dict(type='str'), + acl_step=dict(type='str'), + acl_description=dict(type='str'), + rule_name=dict(type='str'), + rule_id=dict(type='str'), + rule_action=dict(choices=['permit', 'deny']), + source_ip=dict(type='str'), + src_mask=dict(type='str'), + frag_type=dict(choices=['fragment', 'clear_fragment']), + vrf_name=dict(type='str'), + time_range=dict(type='str'), + rule_description=dict(type='str'), + log_flag=dict(required=False, default=False, type='bool') + ) + + argument_spec.update(ce_argument_spec) + module = BaseAcl(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_acl_advance.py b/plugins/modules/network/cloudengine/ce_acl_advance.py new file mode 100644 index 0000000000..869963bda0 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_acl_advance.py @@ -0,0 +1,1750 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_acl_advance +short_description: Manages advanced ACL configuration on HUAWEI CloudEngine switches. +description: + - Manages advanced ACL configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + required: false + default: present + choices: ['present','absent','delete_acl'] + acl_name: + description: + - ACL number or name. + For a numbered rule group, the value ranging from 3000 to 3999 indicates a advance ACL. + For a named rule group, the value is a string of 1 to 32 case-sensitive characters starting + with a letter, spaces not supported. + required: true + acl_num: + description: + - ACL number. + The value is an integer ranging from 3000 to 3999. + acl_step: + description: + - ACL step. + The value is an integer ranging from 1 to 20. The default value is 5. + acl_description: + description: + - ACL description. + The value is a string of 1 to 127 characters. + rule_name: + description: + - Name of a basic ACL rule. + The value is a string of 1 to 32 characters. + rule_id: + description: + - ID of a basic ACL rule in configuration mode. + The value is an integer ranging from 0 to 4294967294. + rule_action: + description: + - Matching mode of basic ACL rules. + choices: ['permit','deny'] + protocol: + description: + - Protocol type. + choices: ['ip', 'icmp', 'igmp', 'ipinip', 'tcp', 'udp', 'gre', 'ospf'] + source_ip: + description: + - Source IP address. + The value is a string of 0 to 255 characters.The default value is 0.0.0.0. + The value is in dotted decimal notation. + src_mask: + description: + - Source IP address mask. + The value is an integer ranging from 1 to 32. + src_pool_name: + description: + - Name of a source pool. + The value is a string of 1 to 32 characters. + dest_ip: + description: + - Destination IP address. + The value is a string of 0 to 255 characters.The default value is 0.0.0.0. + The value is in dotted decimal notation. + dest_mask: + description: + - Destination IP address mask. + The value is an integer ranging from 1 to 32. + dest_pool_name: + description: + - Name of a destination pool. + The value is a string of 1 to 32 characters. + src_port_op: + description: + - Range type of the source port. + choices: ['lt','eq', 'gt', 'range'] + src_port_begin: + description: + - Start port number of the source port. + The value is an integer ranging from 0 to 65535. + src_port_end: + description: + - End port number of the source port. + The value is an integer ranging from 0 to 65535. + src_port_pool_name: + description: + - Name of a source port pool. + The value is a string of 1 to 32 characters. + dest_port_op: + description: + - Range type of the destination port. + choices: ['lt','eq', 'gt', 'range'] + dest_port_begin: + description: + - Start port number of the destination port. + The value is an integer ranging from 0 to 65535. + dest_port_end: + description: + - End port number of the destination port. + The value is an integer ranging from 0 to 65535. + dest_port_pool_name: + description: + - Name of a destination port pool. + The value is a string of 1 to 32 characters. + frag_type: + description: + - Type of packet fragmentation. + choices: ['fragment', 'clear_fragment'] + precedence: + description: + - Data packets can be filtered based on the priority field. + The value is an integer ranging from 0 to 7. + tos: + description: + - ToS value on which data packet filtering is based. + The value is an integer ranging from 0 to 15. + dscp: + description: + - Differentiated Services Code Point. + The value is an integer ranging from 0 to 63. + icmp_name: + description: + - ICMP name. + choices: ['unconfiged', 'echo', 'echo-reply', 'fragmentneed-DFset', 'host-redirect', + 'host-tos-redirect', 'host-unreachable', 'information-reply', 'information-request', + 'net-redirect', 'net-tos-redirect', 'net-unreachable', 'parameter-problem', + 'port-unreachable', 'protocol-unreachable', 'reassembly-timeout', 'source-quench', + 'source-route-failed', 'timestamp-reply', 'timestamp-request', 'ttl-exceeded', + 'address-mask-reply', 'address-mask-request', 'custom'] + icmp_type: + description: + - ICMP type. This parameter is available only when the packet protocol is ICMP. + The value is an integer ranging from 0 to 255. + icmp_code: + description: + - ICMP message code. Data packets can be filtered based on the ICMP message code. + The value is an integer ranging from 0 to 255. + ttl_expired: + description: + - Whether TTL Expired is matched, with the TTL value of 1. + type: bool + default: 'no' + vrf_name: + description: + - VPN instance name. + The value is a string of 1 to 31 characters.The default value is _public_. + syn_flag: + description: + - TCP flag value. + The value is an integer ranging from 0 to 63. + tcp_flag_mask: + description: + - TCP flag mask value. + The value is an integer ranging from 0 to 63. + established: + description: + - Match established connections. + type: bool + default: 'no' + time_range: + description: + - Name of a time range in which an ACL rule takes effect. + rule_description: + description: + - Description about an ACL rule. + igmp_type: + description: + - Internet Group Management Protocol. + choices: ['host-query', 'mrouter-adver', 'mrouter-solic', 'mrouter-termi', 'mtrace-resp', 'mtrace-route', + 'v1host-report', 'v2host-report', 'v2leave-group', 'v3host-report'] + log_flag: + description: + - Flag of logging matched data packets. + type: bool + default: 'no' +''' + +EXAMPLES = ''' + +- name: CloudEngine advance acl test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config ACL" + ce_acl_advance: + state: present + acl_name: 3200 + provider: "{{ cli }}" + + - name: "Undo ACL" + ce_acl_advance: + state: delete_acl + acl_name: 3200 + provider: "{{ cli }}" + + - name: "Config ACL advance rule" + ce_acl_advance: + state: present + acl_name: test + rule_name: test_rule + rule_id: 111 + rule_action: permit + protocol: tcp + source_ip: 10.10.10.10 + src_mask: 24 + frag_type: fragment + provider: "{{ cli }}" + + - name: "Undo ACL advance rule" + ce_acl_advance: + state: absent + acl_name: test + rule_name: test_rule + rule_id: 111 + rule_action: permit + protocol: tcp + source_ip: 10.10.10.10 + src_mask: 24 + frag_type: fragment + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"acl_name": "test", "state": "delete_acl"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"aclNumOrName": "test", "aclType": "Advance"} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {} +updates: + description: command sent to the device + returned: always + type: list + sample: ["undo acl name test"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + + +# get acl +CE_GET_ACL_HEADER = """ + + + + + +""" +CE_GET_ACL_TAIL = """ + + + + +""" +# merge acl +CE_MERGE_ACL_HEADER = """ + + + + + %s +""" +CE_MERGE_ACL_TAIL = """ + + + + +""" +# delete acl +CE_DELETE_ACL_HEADER = """ + + + + + %s +""" +CE_DELETE_ACL_TAIL = """ + + + + +""" + +# get acl advance rule +CE_GET_ACL_ADVANCE_RULE_HEADER = """ + + + + + %s + + + +""" +CE_GET_ACL_ADVANCE_RULE_TAIL = """ + + + + + + +""" +# merge acl advance rule +CE_MERGE_ACL_ADVANCE_RULE_HEADER = """ + + + + + %s + + + %s +""" +CE_MERGE_ACL_ADVANCE_RULE_TAIL = """ + + + + + + +""" +# delete acl advance rule +CE_DELETE_ACL_ADVANCE_RULE_HEADER = """ + + + + + %s + + + %s +""" +CE_DELETE_ACL_ADVANCE_RULE_TAIL = """ + + + + + + +""" + + +PROTOCOL_NUM = {"ip": "0", + "icmp": "1", + "igmp": "2", + "ipinip": "4", + "tcp": "6", + "udp": "17", + "gre": "47", + "ospf": "89"} + +IGMP_TYPE_NUM = {"host-query": "17", + "mrouter-adver": "48", + "mrouter-solic": "49", + "mrouter-termi": "50", + "mtrace-resp": "30", + "mtrace-route": "31", + "v1host-report": "18", + "v2host-report": "22", + "v2leave-group": "23", + "v3host-report": "34"} + + +def get_wildcard_mask(mask): + """ convert mask length to ip address wildcard mask, i.e. 24 to 0.0.0.255 """ + + mask_int = ["255"] * 4 + value = int(mask) + + if value > 32: + return None + if value < 8: + mask_int[0] = str(int(~(0xFF << (8 - value % 8)) & 0xFF)) + if value >= 8: + mask_int[0] = '0' + mask_int[1] = str(int(~(0xFF << (16 - (value % 16))) & 0xFF)) + if value >= 16: + mask_int[1] = '0' + mask_int[2] = str(int(~(0xFF << (24 - (value % 24))) & 0xFF)) + if value >= 24: + mask_int[2] = '0' + mask_int[3] = str(int(~(0xFF << (32 - (value % 32))) & 0xFF)) + if value == 32: + mask_int[3] = '0' + + return '.'.join(mask_int) + + +class AdvanceAcl(object): + """ Manages advance acl configuration """ + + def __init__(self, **kwargs): + """ Class init """ + + # argument spec + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # module args + self.state = self.module.params['state'] + self.acl_name = self.module.params['acl_name'] or None + self.acl_num = self.module.params['acl_num'] or None + self.acl_type = None + self.acl_step = self.module.params['acl_step'] or None + self.acl_description = self.module.params['acl_description'] or None + self.rule_name = self.module.params['rule_name'] or None + self.rule_id = self.module.params['rule_id'] or None + self.rule_action = self.module.params['rule_action'] or None + self.protocol = self.module.params['protocol'] or None + self.protocol_num = None + self.source_ip = self.module.params['source_ip'] or None + self.src_mask = self.module.params['src_mask'] or None + self.src_wild = None + self.src_pool_name = self.module.params['src_pool_name'] or None + self.dest_ip = self.module.params['dest_ip'] or None + self.dest_mask = self.module.params['dest_mask'] or None + self.dest_wild = None + self.dest_pool_name = self.module.params['dest_pool_name'] or None + self.src_port_op = self.module.params['src_port_op'] or None + self.src_port_begin = self.module.params['src_port_begin'] or None + self.src_port_end = self.module.params['src_port_end'] or None + self.src_port_pool_name = self.module.params[ + 'src_port_pool_name'] or None + self.dest_port_op = self.module.params['dest_port_op'] or None + self.dest_port_begin = self.module.params['dest_port_begin'] or None + self.dest_port_end = self.module.params['dest_port_end'] or None + self.dest_port_pool_name = self.module.params[ + 'dest_port_pool_name'] or None + self.frag_type = self.module.params['frag_type'] or None + self.precedence = self.module.params['precedence'] or None + self.tos = self.module.params['tos'] or None + self.dscp = self.module.params['dscp'] or None + self.icmp_name = self.module.params['icmp_name'] or None + self.icmp_type = self.module.params['icmp_type'] or None + self.icmp_code = self.module.params['icmp_code'] or None + self.ttl_expired = self.module.params['ttl_expired'] + self.vrf_name = self.module.params['vrf_name'] or None + self.syn_flag = self.module.params['syn_flag'] or None + self.tcp_flag_mask = self.module.params['tcp_flag_mask'] or None + self.established = self.module.params['established'] + self.time_range = self.module.params['time_range'] or None + self.rule_description = self.module.params['rule_description'] or None + self.igmp_type = self.module.params['igmp_type'] or None + self.igmp_type_num = None + self.log_flag = self.module.params['log_flag'] + + self.precedence_name = dict() + self.precedence_name["0"] = "routine" + self.precedence_name["1"] = "priority" + self.precedence_name["2"] = "immediate" + self.precedence_name["3"] = "flash" + self.precedence_name["4"] = "flash-override" + self.precedence_name["5"] = "critical" + self.precedence_name["6"] = "internet" + self.precedence_name["7"] = "network" + + # cur config + self.cur_acl_cfg = dict() + self.cur_advance_rule_cfg = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def netconf_get_config(self, conf_str): + """ Get configure by netconf """ + + xml_str = get_nc_config(self.module, conf_str) + + return xml_str + + def netconf_set_config(self, conf_str): + """ Set configure by netconf """ + + xml_str = set_nc_config(self.module, conf_str) + + return xml_str + + def get_protocol_num(self): + """ Get protocol num by name """ + + if self.protocol: + self.protocol_num = PROTOCOL_NUM.get(self.protocol) + + def get_igmp_type_num(self): + """ Get igmp type num by type """ + + if self.igmp_type: + self.igmp_type_num = IGMP_TYPE_NUM.get(self.igmp_type) + + def check_acl_args(self): + """ Check acl invalid args """ + + need_cfg = False + find_flag = False + self.cur_acl_cfg["acl_info"] = [] + + if self.acl_name: + + if self.acl_name.isdigit(): + if int(self.acl_name) < 3000 or int(self.acl_name) > 3999: + self.module.fail_json( + msg='Error: The value of acl_name is out of [3000-3999] for advance ACL.') + + if self.acl_num: + self.module.fail_json( + msg='Error: The acl_name is digit, so should not input acl_num at the same time.') + else: + + self.acl_type = "Advance" + + if len(self.acl_name) < 1 or len(self.acl_name) > 32: + self.module.fail_json( + msg='Error: The len of acl_name is out of [1 - 32].') + + if self.state == "present": + if not self.acl_num and not self.acl_type and not self.rule_name: + self.module.fail_json( + msg='Error: Please input acl_num or acl_type when config ACL.') + + if self.acl_num: + if self.acl_num.isdigit(): + if int(self.acl_num) < 3000 or int(self.acl_num) > 3999: + self.module.fail_json( + msg='Error: The value of acl_name is out of [3000-3999] for advance ACL.') + else: + self.module.fail_json( + msg='Error: The acl_num is not digit.') + + if self.acl_step: + if self.acl_step.isdigit(): + if int(self.acl_step) < 1 or int(self.acl_step) > 20: + self.module.fail_json( + msg='Error: The value of acl_step is out of [1 - 20].') + else: + self.module.fail_json( + msg='Error: The acl_step is not digit.') + + if self.acl_description: + if len(self.acl_description) < 1 or len(self.acl_description) > 127: + self.module.fail_json( + msg='Error: The len of acl_description is out of [1 - 127].') + + conf_str = CE_GET_ACL_HEADER + + if self.acl_type: + conf_str += "" + if self.acl_num or self.acl_name.isdigit(): + conf_str += "" + if self.acl_step: + conf_str += "" + if self.acl_description: + conf_str += "" + + conf_str += CE_GET_ACL_TAIL + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + find_flag = False + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + + # parse acl + acl_info = root.findall( + "acl/aclGroups/aclGroup") + if acl_info: + for tmp in acl_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["aclNumOrName", "aclType", "aclNumber", "aclStep", "aclDescription"]: + tmp_dict[site.tag] = site.text + + self.cur_acl_cfg["acl_info"].append(tmp_dict) + + if self.cur_acl_cfg["acl_info"]: + find_list = list() + for tmp in self.cur_acl_cfg["acl_info"]: + cur_cfg_dict = dict() + exist_cfg_dict = dict() + + if self.acl_name: + if self.acl_name.isdigit() and tmp.get("aclNumber"): + cur_cfg_dict["aclNumber"] = self.acl_name + exist_cfg_dict["aclNumber"] = tmp.get("aclNumber") + else: + cur_cfg_dict["aclNumOrName"] = self.acl_name + exist_cfg_dict["aclNumOrName"] = tmp.get("aclNumOrName") + if self.acl_type: + cur_cfg_dict["aclType"] = self.acl_type + exist_cfg_dict["aclType"] = tmp.get("aclType") + if self.acl_num: + cur_cfg_dict["aclNumber"] = self.acl_num + exist_cfg_dict["aclNumber"] = tmp.get("aclNumber") + if self.acl_step: + cur_cfg_dict["aclStep"] = self.acl_step + exist_cfg_dict["aclStep"] = tmp.get("aclStep") + if self.acl_description: + cur_cfg_dict["aclDescription"] = self.acl_description + exist_cfg_dict["aclDescription"] = tmp.get("aclDescription") + + if cur_cfg_dict == exist_cfg_dict: + find_bool = True + else: + find_bool = False + find_list.append(find_bool) + for mem in find_list: + if mem: + find_flag = True + break + else: + find_flag = False + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + elif self.state == "delete_acl": + need_cfg = bool(find_flag) + else: + need_cfg = False + + self.cur_acl_cfg["need_cfg"] = need_cfg + + def check_advance_rule_args(self): + """ Check advance rule invalid args """ + + need_cfg = False + find_flag = False + self.cur_advance_rule_cfg["adv_rule_info"] = [] + + if self.acl_name: + + if self.state == "absent": + if not self.rule_name: + self.module.fail_json( + msg='Error: Please input rule_name when state is absent.') + + # config rule + if self.rule_name: + if len(self.rule_name) < 1 or len(self.rule_name) > 32: + self.module.fail_json( + msg='Error: The len of rule_name is out of [1 - 32].') + + if self.state != "delete_acl" and not self.rule_id: + self.module.fail_json( + msg='Error: Please input rule_id.') + + if self.rule_id: + if self.rule_id.isdigit(): + if int(self.rule_id) < 0 or int(self.rule_id) > 4294967294: + self.module.fail_json( + msg='Error: The value of rule_id is out of [0 - 4294967294].') + else: + self.module.fail_json( + msg='Error: The rule_id is not digit.') + + if self.rule_action and not self.protocol: + self.module.fail_json( + msg='Error: The rule_action and the protocol must input at the same time.') + + if not self.rule_action and self.protocol: + self.module.fail_json( + msg='Error: The rule_action and the protocol must input at the same time.') + + if self.protocol: + self.get_protocol_num() + + if self.source_ip: + if not check_ip_addr(self.source_ip): + self.module.fail_json( + msg='Error: The source_ip %s is invalid.' % self.source_ip) + if not self.src_mask: + self.module.fail_json( + msg='Error: Please input src_mask.') + + if self.src_mask: + if self.src_mask.isdigit(): + if int(self.src_mask) < 1 or int(self.src_mask) > 32: + self.module.fail_json( + msg='Error: The value of src_mask is out of [1 - 32].') + self.src_wild = get_wildcard_mask(self.src_mask) + else: + self.module.fail_json( + msg='Error: The src_mask is not digit.') + + if self.src_pool_name: + if len(self.src_pool_name) < 1 or len(self.src_pool_name) > 32: + self.module.fail_json( + msg='Error: The len of src_pool_name is out of [1 - 32].') + + if self.dest_ip: + if not check_ip_addr(self.dest_ip): + self.module.fail_json( + msg='Error: The dest_ip %s is invalid.' % self.dest_ip) + if not self.dest_mask: + self.module.fail_json( + msg='Error: Please input dest_mask.') + + if self.dest_mask: + if self.dest_mask.isdigit(): + if int(self.dest_mask) < 1 or int(self.dest_mask) > 32: + self.module.fail_json( + msg='Error: The value of dest_mask is out of [1 - 32].') + self.dest_wild = get_wildcard_mask(self.dest_mask) + else: + self.module.fail_json( + msg='Error: The dest_mask is not digit.') + + if self.dest_pool_name: + if len(self.dest_pool_name) < 1 or len(self.dest_pool_name) > 32: + self.module.fail_json( + msg='Error: The len of dest_pool_name is out of [1 - 32].') + + if self.src_port_op: + if self.src_port_op == "lt": + if not self.src_port_end: + self.module.fail_json( + msg='Error: The src_port_end must input.') + if self.src_port_begin: + self.module.fail_json( + msg='Error: The src_port_begin should not input.') + if self.src_port_op == "eq" or self.src_port_op == "gt": + if not self.src_port_begin: + self.module.fail_json( + msg='Error: The src_port_begin must input.') + if self.src_port_end: + self.module.fail_json( + msg='Error: The src_port_end should not input.') + if self.src_port_op == "range": + if not self.src_port_begin or not self.src_port_end: + self.module.fail_json( + msg='Error: The src_port_begin and src_port_end must input.') + + if self.src_port_begin: + if self.src_port_begin.isdigit(): + if int(self.src_port_begin) < 0 or int(self.src_port_begin) > 65535: + self.module.fail_json( + msg='Error: The value of src_port_begin is out of [0 - 65535].') + else: + self.module.fail_json( + msg='Error: The src_port_begin is not digit.') + + if self.src_port_end: + if self.src_port_end.isdigit(): + if int(self.src_port_end) < 0 or int(self.src_port_end) > 65535: + self.module.fail_json( + msg='Error: The value of src_port_end is out of [0 - 65535].') + else: + self.module.fail_json( + msg='Error: The src_port_end is not digit.') + + if self.src_port_pool_name: + if len(self.src_port_pool_name) < 1 or len(self.src_port_pool_name) > 32: + self.module.fail_json( + msg='Error: The len of src_port_pool_name is out of [1 - 32].') + + if self.dest_port_op: + if self.dest_port_op == "lt": + if not self.dest_port_end: + self.module.fail_json( + msg='Error: The dest_port_end must input.') + if self.dest_port_begin: + self.module.fail_json( + msg='Error: The dest_port_begin should not input.') + if self.dest_port_op == "eq" or self.dest_port_op == "gt": + if not self.dest_port_begin: + self.module.fail_json( + msg='Error: The dest_port_begin must input.') + if self.dest_port_end: + self.module.fail_json( + msg='Error: The dest_port_end should not input.') + if self.dest_port_op == "range": + if not self.dest_port_begin or not self.dest_port_end: + self.module.fail_json( + msg='Error: The dest_port_begin and dest_port_end must input.') + + if self.dest_port_begin: + if self.dest_port_begin.isdigit(): + if int(self.dest_port_begin) < 0 or int(self.dest_port_begin) > 65535: + self.module.fail_json( + msg='Error: The value of dest_port_begin is out of [0 - 65535].') + else: + self.module.fail_json( + msg='Error: The dest_port_begin is not digit.') + + if self.dest_port_end: + if self.dest_port_end.isdigit(): + if int(self.dest_port_end) < 0 or int(self.dest_port_end) > 65535: + self.module.fail_json( + msg='Error: The value of dest_port_end is out of [0 - 65535].') + else: + self.module.fail_json( + msg='Error: The dest_port_end is not digit.') + + if self.dest_port_pool_name: + if len(self.dest_port_pool_name) < 1 or len(self.dest_port_pool_name) > 32: + self.module.fail_json( + msg='Error: The len of dest_port_pool_name is out of [1 - 32].') + + if self.precedence: + if self.precedence.isdigit(): + if int(self.precedence) < 0 or int(self.precedence) > 7: + self.module.fail_json( + msg='Error: The value of precedence is out of [0 - 7].') + else: + self.module.fail_json( + msg='Error: The precedence is not digit.') + + if self.tos: + if self.tos.isdigit(): + if int(self.tos) < 0 or int(self.tos) > 15: + self.module.fail_json( + msg='Error: The value of tos is out of [0 - 15].') + else: + self.module.fail_json( + msg='Error: The tos is not digit.') + + if self.dscp: + if self.dscp.isdigit(): + if int(self.dscp) < 0 or int(self.dscp) > 63: + self.module.fail_json( + msg='Error: The value of dscp is out of [0 - 63].') + else: + self.module.fail_json( + msg='Error: The dscp is not digit.') + + if self.icmp_type: + if self.icmp_type.isdigit(): + if int(self.icmp_type) < 0 or int(self.icmp_type) > 255: + self.module.fail_json( + msg='Error: The value of icmp_type is out of [0 - 255].') + else: + self.module.fail_json( + msg='Error: The icmp_type is not digit.') + + if self.icmp_code: + if self.icmp_code.isdigit(): + if int(self.icmp_code) < 0 or int(self.icmp_code) > 255: + self.module.fail_json( + msg='Error: The value of icmp_code is out of [0 - 255].') + else: + self.module.fail_json( + msg='Error: The icmp_code is not digit.') + + if self.vrf_name: + if len(self.vrf_name) < 1 or len(self.vrf_name) > 31: + self.module.fail_json( + msg='Error: The len of vrf_name is out of [1 - 31].') + + if self.syn_flag: + if self.syn_flag.isdigit(): + if int(self.syn_flag) < 0 or int(self.syn_flag) > 63: + self.module.fail_json( + msg='Error: The value of syn_flag is out of [0 - 63].') + else: + self.module.fail_json( + msg='Error: The syn_flag is not digit.') + + if self.tcp_flag_mask: + if self.tcp_flag_mask.isdigit(): + if int(self.tcp_flag_mask) < 0 or int(self.tcp_flag_mask) > 63: + self.module.fail_json( + msg='Error: The value of tcp_flag_mask is out of [0 - 63].') + else: + self.module.fail_json( + msg='Error: The tcp_flag_mask is not digit.') + + if self.time_range: + if len(self.time_range) < 1 or len(self.time_range) > 32: + self.module.fail_json( + msg='Error: The len of time_range is out of [1 - 32].') + + if self.rule_description: + if len(self.rule_description) < 1 or len(self.rule_description) > 127: + self.module.fail_json( + msg='Error: The len of rule_description is out of [1 - 127].') + + if self.igmp_type: + self.get_igmp_type_num() + + conf_str = CE_GET_ACL_ADVANCE_RULE_HEADER % self.acl_name + + if self.rule_id: + conf_str += "" + if self.rule_action: + conf_str += "" + if self.protocol: + conf_str += "" + if self.source_ip: + conf_str += "" + if self.src_wild: + conf_str += "" + if self.src_pool_name: + conf_str += "" + if self.dest_ip: + conf_str += "" + if self.dest_wild: + conf_str += "" + if self.dest_pool_name: + conf_str += "" + if self.src_port_op: + conf_str += "" + if self.src_port_begin: + conf_str += "" + if self.src_port_end: + conf_str += "" + if self.src_port_pool_name: + conf_str += "" + if self.dest_port_op: + conf_str += "" + if self.dest_port_begin: + conf_str += "" + if self.dest_port_end: + conf_str += "" + if self.dest_port_pool_name: + conf_str += "" + if self.frag_type: + conf_str += "" + if self.precedence: + conf_str += "" + if self.tos: + conf_str += "" + if self.dscp: + conf_str += "" + if self.icmp_name: + conf_str += "" + if self.icmp_type: + conf_str += "" + if self.icmp_code: + conf_str += "" + conf_str += "" + if self.vrf_name: + conf_str += "" + if self.syn_flag: + conf_str += "" + if self.tcp_flag_mask: + conf_str += "" + conf_str += "" + if self.time_range: + conf_str += "" + if self.rule_description: + conf_str += "" + if self.igmp_type: + conf_str += "" + conf_str += "" + + conf_str += CE_GET_ACL_ADVANCE_RULE_TAIL + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + find_flag = False + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + + # parse advance rule + adv_rule_info = root.findall( + "acl/aclGroups/aclGroup/aclRuleAdv4s/aclRuleAdv4") + if adv_rule_info: + for tmp in adv_rule_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["aclRuleName", "aclRuleID", "aclAction", "aclProtocol", "aclSourceIp", + "aclSrcWild", "aclSPoolName", "aclDestIp", "aclDestWild", + "aclDPoolName", "aclSrcPortOp", "aclSrcPortBegin", "aclSrcPortEnd", + "aclSPortPoolName", "aclDestPortOp", "aclDestPortB", "aclDestPortE", + "aclDPortPoolName", "aclFragType", "aclPrecedence", "aclTos", + "aclDscp", "aclIcmpName", "aclIcmpType", "aclIcmpCode", "aclTtlExpired", + "vrfName", "aclSynFlag", "aclTcpFlagMask", "aclEstablished", + "aclTimeName", "aclRuleDescription", "aclIgmpType", "aclLogFlag"]: + tmp_dict[site.tag] = site.text + + self.cur_advance_rule_cfg[ + "adv_rule_info"].append(tmp_dict) + + if self.cur_advance_rule_cfg["adv_rule_info"]: + for tmp in self.cur_advance_rule_cfg["adv_rule_info"]: + find_flag = True + + if self.rule_name and tmp.get("aclRuleName") != self.rule_name: + find_flag = False + if self.rule_id and tmp.get("aclRuleID") != self.rule_id: + find_flag = False + if self.rule_action and tmp.get("aclAction") != self.rule_action: + find_flag = False + if self.protocol and tmp.get("aclProtocol") != self.protocol_num: + find_flag = False + if self.source_ip: + tmp_src_ip = self.source_ip.split(".") + tmp_src_wild = self.src_wild.split(".") + tmp_addr_item = [] + for idx in range(4): + item1 = 255 - int(tmp_src_wild[idx]) + item2 = item1 & int(tmp_src_ip[idx]) + tmp_addr_item.append(item2) + tmp_addr = "%s.%s.%s.%s" % (tmp_addr_item[0], tmp_addr_item[1], + tmp_addr_item[2], tmp_addr_item[3]) + if tmp_addr != tmp.get("aclSourceIp"): + find_flag = False + if self.src_wild and tmp.get("aclSrcWild") != self.src_wild: + find_flag = False + if self.src_pool_name and tmp.get("aclSPoolName") != self.src_pool_name: + find_flag = False + if self.dest_ip: + tmp_src_ip = self.dest_ip.split(".") + tmp_src_wild = self.dest_wild.split(".") + tmp_addr_item = [] + for idx in range(4): + item1 = 255 - int(tmp_src_wild[idx]) + item2 = item1 & int(tmp_src_ip[idx]) + tmp_addr_item.append(item2) + tmp_addr = "%s.%s.%s.%s" % (tmp_addr_item[0], tmp_addr_item[1], + tmp_addr_item[2], tmp_addr_item[3]) + if tmp_addr != tmp.get("aclDestIp"): + find_flag = False + if self.dest_wild and tmp.get("aclDestWild") != self.dest_wild: + find_flag = False + if self.dest_pool_name and tmp.get("aclDPoolName") != self.dest_pool_name: + find_flag = False + if self.src_port_op and tmp.get("aclSrcPortOp") != self.src_port_op: + find_flag = False + if self.src_port_begin and tmp.get("aclSrcPortBegin") != self.src_port_begin: + find_flag = False + if self.src_port_end and tmp.get("aclSrcPortEnd") != self.src_port_end: + find_flag = False + if self.src_port_pool_name and tmp.get("aclSPortPoolName") != self.src_port_pool_name: + find_flag = False + if self.dest_port_op and tmp.get("aclDestPortOp") != self.dest_port_op: + find_flag = False + if self.dest_port_begin and tmp.get("aclDestPortB") != self.dest_port_begin: + find_flag = False + if self.dest_port_end and tmp.get("aclDestPortE") != self.dest_port_end: + find_flag = False + if self.dest_port_pool_name and tmp.get("aclDPortPoolName") != self.dest_port_pool_name: + find_flag = False + frag_type = "clear_fragment" if tmp.get("aclFragType") is None else tmp.get("aclFragType") + if self.frag_type and frag_type != self.frag_type: + find_flag = False + if self.precedence and tmp.get("aclPrecedence") != self.precedence: + find_flag = False + if self.tos and tmp.get("aclTos") != self.tos: + find_flag = False + if self.dscp and tmp.get("aclDscp") != self.dscp: + find_flag = False + if self.icmp_name and tmp.get("aclIcmpName") != self.icmp_name: + find_flag = False + if self.icmp_type and tmp.get("aclIcmpType") != self.icmp_type: + find_flag = False + if self.icmp_code and tmp.get("aclIcmpCode") != self.icmp_code: + find_flag = False + if tmp.get("aclTtlExpired").lower() != str(self.ttl_expired).lower(): + find_flag = False + if self.vrf_name and tmp.get("vrfName") != self.vrf_name: + find_flag = False + if self.syn_flag and tmp.get("aclSynFlag") != self.syn_flag: + find_flag = False + if self.tcp_flag_mask and tmp.get("aclTcpFlagMask") != self.tcp_flag_mask: + find_flag = False + if self.protocol == "tcp" and \ + tmp.get("aclEstablished").lower() != str(self.established).lower(): + find_flag = False + if self.time_range and tmp.get("aclTimeName") != self.time_range: + find_flag = False + if self.rule_description and tmp.get("aclRuleDescription") != self.rule_description: + find_flag = False + if self.igmp_type and tmp.get("aclIgmpType") != self.igmp_type_num: + find_flag = False + if tmp.get("aclLogFlag").lower() != str(self.log_flag).lower(): + find_flag = False + + if find_flag: + break + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + elif self.state == "absent": + need_cfg = bool(find_flag) + else: + need_cfg = False + + self.cur_advance_rule_cfg["need_cfg"] = need_cfg + + def get_proposed(self): + """ Get proposed state """ + + self.proposed["state"] = self.state + + if self.acl_name: + self.proposed["acl_name"] = self.acl_name + if self.acl_num: + self.proposed["acl_num"] = self.acl_num + if self.acl_step: + self.proposed["acl_step"] = self.acl_step + if self.acl_description: + self.proposed["acl_description"] = self.acl_description + if self.rule_name: + self.proposed["rule_name"] = self.rule_name + if self.rule_id: + self.proposed["rule_id"] = self.rule_id + if self.rule_action: + self.proposed["rule_action"] = self.rule_action + if self.protocol: + self.proposed["protocol"] = self.protocol + if self.source_ip: + self.proposed["source_ip"] = self.source_ip + if self.src_mask: + self.proposed["src_mask"] = self.src_mask + if self.src_pool_name: + self.proposed["src_pool_name"] = self.src_pool_name + if self.dest_ip: + self.proposed["dest_ip"] = self.dest_ip + if self.dest_mask: + self.proposed["dest_mask"] = self.dest_mask + if self.dest_pool_name: + self.proposed["dest_pool_name"] = self.dest_pool_name + if self.src_port_op: + self.proposed["src_port_op"] = self.src_port_op + if self.src_port_begin: + self.proposed["src_port_begin"] = self.src_port_begin + if self.src_port_end: + self.proposed["src_port_end"] = self.src_port_end + if self.src_port_pool_name: + self.proposed["src_port_pool_name"] = self.src_port_pool_name + if self.dest_port_op: + self.proposed["dest_port_op"] = self.dest_port_op + if self.dest_port_begin: + self.proposed["dest_port_begin"] = self.dest_port_begin + if self.dest_port_end: + self.proposed["dest_port_end"] = self.dest_port_end + if self.dest_port_pool_name: + self.proposed["dest_port_pool_name"] = self.dest_port_pool_name + if self.frag_type: + self.proposed["frag_type"] = self.frag_type + if self.precedence: + self.proposed["precedence"] = self.precedence + if self.tos: + self.proposed["tos"] = self.tos + if self.dscp: + self.proposed["dscp"] = self.dscp + if self.icmp_name: + self.proposed["icmp_name"] = self.icmp_name + if self.icmp_type: + self.proposed["icmp_type"] = self.icmp_type + if self.icmp_code: + self.proposed["icmp_code"] = self.icmp_code + if self.ttl_expired: + self.proposed["ttl_expired"] = self.ttl_expired + if self.vrf_name: + self.proposed["vrf_name"] = self.vrf_name + if self.syn_flag: + self.proposed["syn_flag"] = self.syn_flag + if self.tcp_flag_mask: + self.proposed["tcp_flag_mask"] = self.tcp_flag_mask + self.proposed["established"] = self.established + if self.time_range: + self.proposed["time_range"] = self.time_range + if self.rule_description: + self.proposed["rule_description"] = self.rule_description + if self.igmp_type: + self.proposed["igmp_type"] = self.igmp_type + self.proposed["log_flag"] = self.log_flag + + def get_existing(self): + """ Get existing state """ + + self.existing["acl_info"] = self.cur_acl_cfg["acl_info"] + self.existing["adv_rule_info"] = self.cur_advance_rule_cfg[ + "adv_rule_info"] + + def get_end_state(self): + """ Get end state """ + + self.check_acl_args() + self.end_state["acl_info"] = self.cur_acl_cfg["acl_info"] + + self.check_advance_rule_args() + self.end_state["adv_rule_info"] = self.cur_advance_rule_cfg[ + "adv_rule_info"] + if self.end_state == self.existing: + self.changed = False + self.updates_cmd = list() + + def merge_acl(self): + """ Merge acl operation """ + + conf_str = CE_MERGE_ACL_HEADER % self.acl_name + + if self.acl_type: + conf_str += "%s" % self.acl_type + if self.acl_num: + conf_str += "%s" % self.acl_num + if self.acl_step: + conf_str += "%s" % self.acl_step + if self.acl_description: + conf_str += "%s" % self.acl_description + + conf_str += CE_MERGE_ACL_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge acl failed.') + + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + if self.acl_type and not self.acl_num: + cmd = "acl name %s %s" % (self.acl_name, self.acl_type.lower()) + elif self.acl_type and self.acl_num: + cmd = "acl name %s number %s" % (self.acl_name, self.acl_num) + elif not self.acl_type and self.acl_num: + cmd = "acl name %s number %s" % (self.acl_name, self.acl_num) + self.updates_cmd.append(cmd) + + if self.acl_description: + cmd = "description %s" % self.acl_description + self.updates_cmd.append(cmd) + + if self.acl_step: + cmd = "step %s" % self.acl_step + self.updates_cmd.append(cmd) + + self.changed = True + + def delete_acl(self): + """ Delete acl operation """ + + conf_str = CE_DELETE_ACL_HEADER % self.acl_name + + if self.acl_type: + conf_str += "%s" % self.acl_type + if self.acl_num: + conf_str += "%s" % self.acl_num + if self.acl_step: + conf_str += "%s" % self.acl_step + if self.acl_description: + conf_str += "%s" % self.acl_description + + conf_str += CE_DELETE_ACL_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Delete acl failed.') + + if self.acl_description: + cmd = "undo description" + self.updates_cmd.append(cmd) + + if self.acl_step: + cmd = "undo step" + self.updates_cmd.append(cmd) + + if self.acl_name.isdigit(): + cmd = "undo acl number %s" % self.acl_name + else: + cmd = "undo acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + self.changed = True + + def merge_adv_rule(self): + """ Merge advance rule operation """ + + conf_str = CE_MERGE_ACL_ADVANCE_RULE_HEADER % ( + self.acl_name, self.rule_name) + + if self.rule_id: + conf_str += "%s" % self.rule_id + if self.rule_action: + conf_str += "%s" % self.rule_action + if self.protocol: + conf_str += "%s" % self.protocol_num + if self.source_ip: + conf_str += "%s" % self.source_ip + if self.src_wild: + conf_str += "%s" % self.src_wild + if self.src_pool_name: + conf_str += "%s" % self.src_pool_name + if self.dest_ip: + conf_str += "%s" % self.dest_ip + if self.dest_wild: + conf_str += "%s" % self.dest_wild + if self.dest_pool_name: + conf_str += "%s" % self.dest_pool_name + if self.src_port_op: + conf_str += "%s" % self.src_port_op + if self.src_port_begin: + conf_str += "%s" % self.src_port_begin + if self.src_port_end: + conf_str += "%s" % self.src_port_end + if self.src_port_pool_name: + conf_str += "%s" % self.src_port_pool_name + if self.dest_port_op: + conf_str += "%s" % self.dest_port_op + if self.dest_port_begin: + conf_str += "%s" % self.dest_port_begin + if self.dest_port_end: + conf_str += "%s" % self.dest_port_end + if self.dest_port_pool_name: + conf_str += "%s" % self.dest_port_pool_name + if self.frag_type: + conf_str += "%s" % self.frag_type + if self.precedence: + conf_str += "%s" % self.precedence + if self.tos: + conf_str += "%s" % self.tos + if self.dscp: + conf_str += "%s" % self.dscp + if self.icmp_name: + conf_str += "%s" % self.icmp_name + if self.icmp_type: + conf_str += "%s" % self.icmp_type + if self.icmp_code: + conf_str += "%s" % self.icmp_code + conf_str += "%s" % str(self.ttl_expired).lower() + if self.vrf_name: + conf_str += "%s" % self.vrf_name + if self.syn_flag: + conf_str += "%s" % self.syn_flag + if self.tcp_flag_mask: + conf_str += "%s" % self.tcp_flag_mask + if self.protocol == "tcp": + conf_str += "%s" % str(self.established).lower() + if self.time_range: + conf_str += "%s" % self.time_range + if self.rule_description: + conf_str += "%s" % self.rule_description + if self.igmp_type: + conf_str += "%s" % self.igmp_type_num + conf_str += "%s" % str(self.log_flag).lower() + + conf_str += CE_MERGE_ACL_ADVANCE_RULE_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge acl base rule failed.') + + if self.rule_action and self.protocol: + cmd = "rule" + if self.rule_id: + cmd += " %s" % self.rule_id + cmd += " %s" % self.rule_action + cmd += " %s" % self.protocol + if self.dscp: + cmd += " dscp %s" % self.dscp + if self.tos: + cmd += " tos %s" % self.tos + if self.source_ip and self.src_wild: + cmd += " source %s %s" % (self.source_ip, self.src_wild) + if self.src_pool_name: + cmd += " source-pool %s" % self.src_pool_name + if self.src_port_op: + cmd += " source-port" + if self.src_port_op == "lt": + cmd += " lt %s" % self.src_port_end + elif self.src_port_op == "eq": + cmd += " eq %s" % self.src_port_begin + elif self.src_port_op == "gt": + cmd += " gt %s" % self.src_port_begin + elif self.src_port_op == "range": + cmd += " range %s %s" % (self.src_port_begin, + self.src_port_end) + if self.src_port_pool_name: + cmd += " source-port-pool %s" % self.src_port_pool_name + if self.dest_ip and self.dest_wild: + cmd += " destination %s %s" % (self.dest_ip, self.dest_wild) + if self.dest_pool_name: + cmd += " destination-pool %s" % self.dest_pool_name + if self.dest_port_op: + cmd += " destination-port" + if self.dest_port_op == "lt": + cmd += " lt %s" % self.dest_port_end + elif self.dest_port_op == "eq": + cmd += " eq %s" % self.dest_port_begin + elif self.dest_port_op == "gt": + cmd += " gt %s" % self.dest_port_begin + elif self.dest_port_op == "range": + cmd += " range %s %s" % (self.dest_port_begin, + self.dest_port_end) + if self.dest_port_pool_name: + cmd += " destination-port-pool %s" % self.dest_port_pool_name + if self.frag_type == "fragment": + cmd += " fragment-type fragment" + if self.precedence: + cmd += " precedence %s" % self.precedence_name[self.precedence] + + if self.protocol == "icmp": + if self.icmp_name: + cmd += " icmp-type %s" % self.icmp_name + elif self.icmp_type and self.icmp_code: + cmd += " icmp-type %s %s" % (self.icmp_type, self.icmp_code) + elif self.icmp_type: + cmd += " icmp-type %s" % self.icmp_type + if self.protocol == "tcp": + if self.syn_flag: + cmd += " tcp-flag %s" % self.syn_flag + if self.tcp_flag_mask: + cmd += " mask %s" % self.tcp_flag_mask + if self.established: + cmd += " established" + if self.protocol == "igmp": + if self.igmp_type: + cmd += " igmp-type %s" % self.igmp_type + if self.time_range: + cmd += " time-range %s" % self.time_range + if self.vrf_name: + cmd += " vpn-instance %s" % self.vrf_name + if self.ttl_expired: + cmd += " ttl-expired" + if self.log_flag: + cmd += " logging" + self.updates_cmd.append(cmd) + + if self.rule_description: + cmd = "rule %s description %s" % ( + self.rule_id, self.rule_description) + self.updates_cmd.append(cmd) + + self.changed = True + + def delete_adv_rule(self): + """ Delete advance rule operation """ + + conf_str = CE_DELETE_ACL_ADVANCE_RULE_HEADER % ( + self.acl_name, self.rule_name) + + if self.rule_id: + conf_str += "%s" % self.rule_id + if self.rule_action: + conf_str += "%s" % self.rule_action + if self.protocol: + conf_str += "%s" % self.protocol_num + if self.source_ip: + conf_str += "%s" % self.source_ip + if self.src_wild: + conf_str += "%s" % self.src_wild + if self.src_pool_name: + conf_str += "%s" % self.src_pool_name + if self.dest_ip: + conf_str += "%s" % self.dest_ip + if self.dest_wild: + conf_str += "%s" % self.dest_wild + if self.dest_pool_name: + conf_str += "%s" % self.dest_pool_name + if self.src_port_op: + conf_str += "%s" % self.src_port_op + if self.src_port_begin: + conf_str += "%s" % self.src_port_begin + if self.src_port_end: + conf_str += "%s" % self.src_port_end + if self.src_port_pool_name: + conf_str += "%s" % self.src_port_pool_name + if self.dest_port_op: + conf_str += "%s" % self.dest_port_op + if self.dest_port_begin: + conf_str += "%s" % self.dest_port_begin + if self.dest_port_end: + conf_str += "%s" % self.dest_port_end + if self.dest_port_pool_name: + conf_str += "%s" % self.dest_port_pool_name + if self.frag_type: + conf_str += "%s" % self.frag_type + if self.precedence: + conf_str += "%s" % self.precedence + if self.tos: + conf_str += "%s" % self.tos + if self.dscp: + conf_str += "%s" % self.dscp + if self.icmp_name: + conf_str += "%s" % self.icmp_name + if self.icmp_type: + conf_str += "%s" % self.icmp_type + if self.icmp_code: + conf_str += "%s" % self.icmp_code + conf_str += "%s" % str(self.ttl_expired).lower() + if self.vrf_name: + conf_str += "%s" % self.vrf_name + if self.syn_flag: + conf_str += "%s" % self.syn_flag + if self.tcp_flag_mask: + conf_str += "%s" % self.tcp_flag_mask + if self.protocol == "tcp": + conf_str += "%s" % str(self.established).lower() + if self.time_range: + conf_str += "%s" % self.time_range + if self.rule_description: + conf_str += "%s" % self.rule_description + if self.igmp_type: + conf_str += "%s" % self.igmp_type + conf_str += "%s" % str(self.log_flag).lower() + + conf_str += CE_DELETE_ACL_ADVANCE_RULE_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Delete acl base rule failed.') + + if self.rule_description: + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + cmd = "acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + cmd = "undo rule %s description" % self.rule_id + self.updates_cmd.append(cmd) + + if self.rule_id: + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + cmd = "acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + cmd = "undo rule %s" % self.rule_id + self.updates_cmd.append(cmd) + elif self.rule_action and self.protocol: + if self.acl_name.isdigit(): + cmd = "acl number %s" % self.acl_name + else: + cmd = "acl name %s" % self.acl_name + self.updates_cmd.append(cmd) + + cmd = "undo rule" + cmd += " %s" % self.rule_action + cmd += " %s" % self.protocol + if self.dscp: + cmd += " dscp %s" % self.dscp + if self.tos: + cmd += " tos %s" % self.tos + if self.source_ip and self.src_mask: + cmd += " source %s %s" % (self.source_ip, self.src_mask) + if self.src_pool_name: + cmd += " source-pool %s" % self.src_pool_name + if self.src_port_op: + cmd += " source-port" + if self.src_port_op == "lt": + cmd += " lt %s" % self.src_port_end + elif self.src_port_op == "eq": + cmd += " eq %s" % self.src_port_begin + elif self.src_port_op == "gt": + cmd += " gt %s" % self.src_port_begin + elif self.src_port_op == "range": + cmd += " range %s %s" % (self.src_port_begin, + self.src_port_end) + if self.src_port_pool_name: + cmd += " source-port-pool %s" % self.src_port_pool_name + if self.dest_ip and self.dest_mask: + cmd += " destination %s %s" % (self.dest_ip, self.dest_mask) + if self.dest_pool_name: + cmd += " destination-pool %s" % self.dest_pool_name + if self.dest_port_op: + cmd += " destination-port" + if self.dest_port_op == "lt": + cmd += " lt %s" % self.dest_port_end + elif self.dest_port_op == "eq": + cmd += " eq %s" % self.dest_port_begin + elif self.dest_port_op == "gt": + cmd += " gt %s" % self.dest_port_begin + elif self.dest_port_op == "range": + cmd += " range %s %s" % (self.dest_port_begin, + self.dest_port_end) + if self.dest_port_pool_name: + cmd += " destination-port-pool %s" % self.dest_port_pool_name + if self.frag_type == "fragment": + cmd += " fragment-type fragment" + if self.precedence: + cmd += " precedence %s" % self.precedence_name[self.precedence] + if self.time_range: + cmd += " time-range %s" % self.time_range + if self.vrf_name: + cmd += " vpn-instance %s" % self.vrf_name + if self.ttl_expired: + cmd += " ttl-expired" + if self.log_flag: + cmd += " logging" + self.updates_cmd.append(cmd) + + self.changed = True + + def work(self): + """ Main work function """ + + self.check_acl_args() + self.check_advance_rule_args() + self.get_proposed() + self.get_existing() + + if self.state == "present": + if self.cur_acl_cfg["need_cfg"]: + self.merge_acl() + if self.cur_advance_rule_cfg["need_cfg"]: + self.merge_adv_rule() + + elif self.state == "absent": + if self.cur_advance_rule_cfg["need_cfg"]: + self.delete_adv_rule() + + elif self.state == "delete_acl": + if self.cur_acl_cfg["need_cfg"]: + self.delete_acl() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent', + 'delete_acl'], default='present'), + acl_name=dict(type='str', required=True), + acl_num=dict(type='str'), + acl_step=dict(type='str'), + acl_description=dict(type='str'), + rule_name=dict(type='str'), + rule_id=dict(type='str'), + rule_action=dict(choices=['permit', 'deny']), + protocol=dict(choices=['ip', 'icmp', 'igmp', + 'ipinip', 'tcp', 'udp', 'gre', 'ospf']), + source_ip=dict(type='str'), + src_mask=dict(type='str'), + src_pool_name=dict(type='str'), + dest_ip=dict(type='str'), + dest_mask=dict(type='str'), + dest_pool_name=dict(type='str'), + src_port_op=dict(choices=['lt', 'eq', 'gt', 'range']), + src_port_begin=dict(type='str'), + src_port_end=dict(type='str'), + src_port_pool_name=dict(type='str'), + dest_port_op=dict(choices=['lt', 'eq', 'gt', 'range']), + dest_port_begin=dict(type='str'), + dest_port_end=dict(type='str'), + dest_port_pool_name=dict(type='str'), + frag_type=dict(choices=['fragment', 'clear_fragment']), + precedence=dict(type='str'), + tos=dict(type='str'), + dscp=dict(type='str'), + icmp_name=dict(choices=['unconfiged', 'echo', 'echo-reply', 'fragmentneed-DFset', 'host-redirect', + 'host-tos-redirect', 'host-unreachable', 'information-reply', 'information-request', + 'net-redirect', 'net-tos-redirect', 'net-unreachable', 'parameter-problem', + 'port-unreachable', 'protocol-unreachable', 'reassembly-timeout', 'source-quench', + 'source-route-failed', 'timestamp-reply', 'timestamp-request', 'ttl-exceeded', + 'address-mask-reply', 'address-mask-request', 'custom']), + icmp_type=dict(type='str'), + icmp_code=dict(type='str'), + ttl_expired=dict(required=False, default=False, type='bool'), + vrf_name=dict(type='str'), + syn_flag=dict(type='str'), + tcp_flag_mask=dict(type='str'), + established=dict(required=False, default=False, type='bool'), + time_range=dict(type='str'), + rule_description=dict(type='str'), + igmp_type=dict(choices=['host-query', 'mrouter-adver', 'mrouter-solic', 'mrouter-termi', 'mtrace-resp', + 'mtrace-route', 'v1host-report', 'v2host-report', 'v2leave-group', 'v3host-report']), + log_flag=dict(required=False, default=False, type='bool') + ) + + argument_spec.update(ce_argument_spec) + module = AdvanceAcl(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_acl_interface.py b/plugins/modules/network/cloudengine/ce_acl_interface.py new file mode 100644 index 0000000000..deb9c22f74 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_acl_interface.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_acl_interface +short_description: Manages applying ACLs to interfaces on HUAWEI CloudEngine switches. +description: + - Manages applying ACLs to interfaces on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + acl_name: + description: + - ACL number or name. + For a numbered rule group, the value ranging from 2000 to 4999. + For a named rule group, the value is a string of 1 to 32 case-sensitive characters starting + with a letter, spaces not supported. + required: true + interface: + description: + - Interface name. + Only support interface full name, such as "40GE2/0/1". + required: true + direction: + description: + - Direction ACL to be applied in on the interface. + required: true + choices: ['inbound', 'outbound'] + state: + description: + - Determines whether the config should be present or not on the device. + required: false + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' + +- name: CloudEngine acl interface test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Apply acl to interface" + ce_acl_interface: + state: present + acl_name: 2000 + interface: 40GE1/0/1 + direction: outbound + provider: "{{ cli }}" + + - name: "Undo acl from interface" + ce_acl_interface: + state: absent + acl_name: 2000 + interface: 40GE1/0/1 + direction: outbound + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"acl_name": "2000", + "direction": "outbound", + "interface": "40GE2/0/1", + "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"acl interface": "traffic-filter acl lb inbound"} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"acl interface": ["traffic-filter acl lb inbound", "traffic-filter acl 2000 outbound"]} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface 40ge2/0/1", + "traffic-filter acl 2000 outbound"] +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_config, exec_command, cli_err_msg +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + + +class AclInterface(object): + """ Manages acl interface configuration """ + + def __init__(self, **kwargs): + """ Class init """ + + # argument spec + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # config + self.cur_cfg = dict() + self.cur_cfg["acl interface"] = [] + + # module args + self.state = self.module.params['state'] + self.acl_name = self.module.params['acl_name'] + self.interface = self.module.params['interface'] + self.direction = self.module.params['direction'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def check_args(self): + """ Check args """ + + if self.acl_name: + if self.acl_name.isdigit(): + if int(self.acl_name) < 2000 or int(self.acl_name) > 4999: + self.module.fail_json( + msg='Error: The value of acl_name is out of [2000 - 4999].') + else: + if len(self.acl_name) < 1 or len(self.acl_name) > 32: + self.module.fail_json( + msg='Error: The len of acl_name is out of [1 - 32].') + + if self.interface: + cmd = "display current-configuration | ignore-case section include interface %s" % self.interface + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + result = str(out).strip() + if result: + tmp = result.split('\n') + if "display" in tmp[0]: + tmp.pop(0) + if not tmp: + self.module.fail_json( + msg='Error: The interface %s is not in the device.' % self.interface) + + def get_proposed(self): + """ Get proposed config """ + + self.proposed["state"] = self.state + + if self.acl_name: + self.proposed["acl_name"] = self.acl_name + + if self.interface: + self.proposed["interface"] = self.interface + + if self.direction: + self.proposed["direction"] = self.direction + + def get_existing(self): + """ Get existing config """ + + cmd = "display current-configuration | ignore-case section include interface %s | include traffic-filter" % self.interface + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + result = str(out).strip() + end = [] + if result: + tmp = result.split('\n') + if "display" in tmp[0]: + tmp.pop(0) + for item in tmp: + end.append(item.strip()) + self.cur_cfg["acl interface"] = end + self.existing["acl interface"] = end + + def get_end_state(self): + """ Get config end state """ + + cmd = "display current-configuration | ignore-case section include interface %s | include traffic-filter" % self.interface + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + result = str(out).strip() + end = [] + if result: + tmp = result.split('\n') + if "display" in tmp[0]: + tmp.pop(0) + for item in tmp: + end.append(item.strip()) + self.end_state["acl interface"] = end + + def load_config(self, config): + """Sends configuration commands to the remote device""" + + rc, out, err = exec_command(self.module, 'mmi-mode enable') + if rc != 0: + self.module.fail_json(msg='unable to set mmi-mode enable', output=err) + rc, out, err = exec_command(self.module, 'system-view immediately') + if rc != 0: + self.module.fail_json(msg='unable to enter system-view', output=err) + + for cmd in config: + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + if "unrecognized command found" in err.lower(): + self.module.fail_json(msg="Error:The parameter is incorrect or the interface does not support this parameter.") + else: + self.module.fail_json(msg=cli_err_msg(cmd.strip(), err)) + + exec_command(self.module, 'return') + + def cli_load_config(self, commands): + """ Cli method to load config """ + + if not self.module.check_mode: + self.load_config(commands) + + def work(self): + """ Work function """ + + self.check_args() + self.get_proposed() + self.get_existing() + + cmds = list() + tmp_cmd = "traffic-filter acl %s %s" % (self.acl_name, self.direction) + undo_tmp_cmd = "undo traffic-filter acl %s %s" % ( + self.acl_name, self.direction) + + if self.state == "present": + if tmp_cmd not in self.cur_cfg["acl interface"]: + interface_cmd = "interface %s" % self.interface.lower() + cmds.append(interface_cmd) + cmds.append(tmp_cmd) + + self.cli_load_config(cmds) + + self.changed = True + self.updates_cmd.append(interface_cmd) + self.updates_cmd.append(tmp_cmd) + + else: + if tmp_cmd in self.cur_cfg["acl interface"]: + interface_cmd = "interface %s" % self.interface + cmds.append(interface_cmd) + cmds.append(undo_tmp_cmd) + self.cli_load_config(cmds) + + self.changed = True + self.updates_cmd.append(interface_cmd) + self.updates_cmd.append(undo_tmp_cmd) + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + acl_name=dict(type='str', required=True), + interface=dict(type='str', required=True), + direction=dict(choices=['inbound', 'outbound'], required=True) + ) + + argument_spec.update(ce_argument_spec) + module = AclInterface(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_bfd_global.py b/plugins/modules/network/cloudengine/ce_bfd_global.py new file mode 100644 index 0000000000..924fdbddd0 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_bfd_global.py @@ -0,0 +1,558 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_bfd_global +short_description: Manages BFD global configuration on HUAWEI CloudEngine devices. +description: + - Manages BFD global configuration on HUAWEI CloudEngine devices. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + bfd_enable: + description: + - Enables the global Bidirectional Forwarding Detection (BFD) function. + choices: ['enable', 'disable'] + default_ip: + description: + - Specifies the default multicast IP address. + The value ranges from 224.0.0.107 to 224.0.0.250. + tos_exp_dynamic: + description: + - Indicates the priority of BFD control packets for dynamic BFD sessions. + The value is an integer ranging from 0 to 7. + The default priority is 7, which is the highest priority of BFD control packets. + tos_exp_static: + description: + - Indicates the priority of BFD control packets for static BFD sessions. + The value is an integer ranging from 0 to 7. + The default priority is 7, which is the highest priority of BFD control packets. + damp_init_wait_time: + description: + - Specifies an initial flapping suppression time for a BFD session. + The value is an integer ranging from 1 to 3600000, in milliseconds. + The default value is 2000. + damp_max_wait_time: + description: + - Specifies a maximum flapping suppression time for a BFD session. + The value is an integer ranging from 1 to 3600000, in milliseconds. + The default value is 15000. + damp_second_wait_time: + description: + - Specifies a secondary flapping suppression time for a BFD session. + The value is an integer ranging from 1 to 3600000, in milliseconds. + The default value is 5000. + delay_up_time: + description: + - Specifies the delay before a BFD session becomes Up. + The value is an integer ranging from 1 to 600, in seconds. + The default value is 0, indicating that a BFD session immediately becomes Up. + state: + description: + - Determines whether the config should be present or not on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +- name: bfd global module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Enable the global BFD function + ce_bfd_global: + bfd_enable: enable + provider: '{{ cli }}' + + - name: Set the default multicast IP address to 224.0.0.150 + ce_bfd_global: + bfd_enable: enable + default_ip: 224.0.0.150 + state: present + provider: '{{ cli }}' + + - name: Set the priority of BFD control packets for dynamic and static BFD sessions + ce_bfd_global: + bfd_enable: enable + tos_exp_dynamic: 5 + tos_exp_static: 6 + state: present + provider: '{{ cli }}' + + - name: Disable the global BFD function + ce_bfd_global: + bfd_enable: disable + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: { + "bfd_enalbe": "enable", + "damp_init_wait_time": null, + "damp_max_wait_time": null, + "damp_second_wait_time": null, + "default_ip": null, + "delayUpTimer": null, + "state": "present", + "tos_exp_dynamic": null, + "tos_exp_static": null + } +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: { + "global": { + "bfdEnable": "false", + "dampInitWaitTime": "2000", + "dampMaxWaitTime": "12000", + "dampSecondWaitTime": "5000", + "defaultIp": "224.0.0.184", + "delayUpTimer": null, + "tosExp": "7", + "tosExpStatic": "7" + } + } +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: { + "global": { + "bfdEnable": "true", + "dampInitWaitTime": "2000", + "dampMaxWaitTime": "12000", + "dampSecondWaitTime": "5000", + "defaultIp": "224.0.0.184", + "delayUpTimer": null, + "tosExp": "7", + "tosExpStatic": "7" + } + } +updates: + description: commands sent to the device + returned: always + type: list + sample: [ "bfd" ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import sys +import socket +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + +CE_NC_GET_BFD = """ + + + %s + + +""" + +CE_NC_GET_BFD_GLB = """ + + + + + + + + + + +""" + + +def check_default_ip(ipaddr): + """check the default multicast IP address""" + + # The value ranges from 224.0.0.107 to 224.0.0.250 + if not check_ip_addr(ipaddr): + return False + + if ipaddr.count(".") != 3: + return False + + ips = ipaddr.split(".") + if ips[0] != "224" or ips[1] != "0" or ips[2] != "0": + return False + + if not ips[3].isdigit() or int(ips[3]) < 107 or int(ips[3]) > 250: + return False + + return True + + +class BfdGlobal(object): + """Manages BFD Global""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.bfd_enable = self.module.params['bfd_enable'] + self.default_ip = self.module.params['default_ip'] + self.tos_exp_dynamic = self.module.params['tos_exp_dynamic'] + self.tos_exp_static = self.module.params['tos_exp_static'] + self.damp_init_wait_time = self.module.params['damp_init_wait_time'] + self.damp_max_wait_time = self.module.params['damp_max_wait_time'] + self.damp_second_wait_time = self.module.params['damp_second_wait_time'] + self.delay_up_time = self.module.params['delay_up_time'] + self.state = self.module.params['state'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.changed = False + self.bfd_dict = dict() + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + + required_together = [('damp_init_wait_time', 'damp_max_wait_time', 'damp_second_wait_time')] + self.module = AnsibleModule(argument_spec=self.spec, + required_together=required_together, + supports_check_mode=True) + + def get_bfd_dict(self): + """bfd config dict""" + + bfd_dict = dict() + bfd_dict["global"] = dict() + conf_str = CE_NC_GET_BFD % CE_NC_GET_BFD_GLB + + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return bfd_dict + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # get bfd global info + glb = root.find("bfd/bfdSchGlobal") + if glb: + for attr in glb: + if attr.text is not None: + bfd_dict["global"][attr.tag] = attr.text + + return bfd_dict + + def config_global(self): + """configures bfd global params""" + + xml_str = "" + damp_chg = False + + # bfd_enable + if self.bfd_enable: + if bool(self.bfd_dict["global"].get("bfdEnable", "false") == "true") != bool(self.bfd_enable == "enable"): + if self.bfd_enable == "enable": + xml_str = "true" + self.updates_cmd.append("bfd") + else: + xml_str = "false" + self.updates_cmd.append("undo bfd") + + # get bfd end state + bfd_state = "disable" + if self.bfd_enable: + bfd_state = self.bfd_enable + elif self.bfd_dict["global"].get("bfdEnable", "false") == "true": + bfd_state = "enable" + + # default_ip + if self.default_ip: + if bfd_state == "enable": + if self.state == "present" and self.default_ip != self.bfd_dict["global"].get("defaultIp"): + xml_str += "%s" % self.default_ip + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("default-ip-address %s" % self.default_ip) + elif self.state == "absent" and self.default_ip == self.bfd_dict["global"].get("defaultIp"): + xml_str += "" + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("undo default-ip-address") + + # tos_exp_dynamic + if self.tos_exp_dynamic is not None: + if bfd_state == "enable": + if self.state == "present" and self.tos_exp_dynamic != int(self.bfd_dict["global"].get("tosExp", "7")): + xml_str += "%s" % self.tos_exp_dynamic + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("tos-exp %s dynamic" % self.tos_exp_dynamic) + elif self.state == "absent" and self.tos_exp_dynamic == int(self.bfd_dict["global"].get("tosExp", "7")): + xml_str += "" + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("undo tos-exp dynamic") + + # tos_exp_static + if self.tos_exp_static is not None: + if bfd_state == "enable": + if self.state == "present" \ + and self.tos_exp_static != int(self.bfd_dict["global"].get("tosExpStatic", "7")): + xml_str += "%s" % self.tos_exp_static + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("tos-exp %s static" % self.tos_exp_static) + elif self.state == "absent" \ + and self.tos_exp_static == int(self.bfd_dict["global"].get("tosExpStatic", "7")): + xml_str += "" + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("undo tos-exp static") + + # delay_up_time + if self.delay_up_time is not None: + if bfd_state == "enable": + delay_time = self.bfd_dict["global"].get("delayUpTimer", "0") + if not delay_time or not delay_time.isdigit(): + delay_time = "0" + if self.state == "present" \ + and self.delay_up_time != int(delay_time): + xml_str += "%s" % self.delay_up_time + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("delay-up %s" % self.delay_up_time) + elif self.state == "absent" \ + and self.delay_up_time == int(delay_time): + xml_str += "" + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("undo delay-up") + + # damp_init_wait_time damp_max_wait_time damp_second_wait_time + if self.damp_init_wait_time is not None and self.damp_second_wait_time is not None \ + and self.damp_second_wait_time is not None: + if bfd_state == "enable": + if self.state == "present": + if self.damp_max_wait_time != int(self.bfd_dict["global"].get("dampMaxWaitTime", "2000")): + xml_str += "%s" % self.damp_max_wait_time + damp_chg = True + if self.damp_init_wait_time != int(self.bfd_dict["global"].get("dampInitWaitTime", "12000")): + xml_str += "%s" % self.damp_init_wait_time + damp_chg = True + if self.damp_second_wait_time != int(self.bfd_dict["global"].get("dampSecondWaitTime", "5000")): + xml_str += "%s" % self.damp_second_wait_time + damp_chg = True + if damp_chg: + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("dampening timer-interval maximum %s initial %s secondary %s" % ( + self.damp_max_wait_time, self.damp_init_wait_time, self.damp_second_wait_time)) + else: + damp_chg = True + if self.damp_max_wait_time != int(self.bfd_dict["global"].get("dampMaxWaitTime", "2000")): + damp_chg = False + if self.damp_init_wait_time != int(self.bfd_dict["global"].get("dampInitWaitTime", "12000")): + damp_chg = False + if self.damp_second_wait_time != int(self.bfd_dict["global"].get("dampSecondWaitTime", "5000")): + damp_chg = False + + if damp_chg: + xml_str += "" + if "bfd" not in self.updates_cmd: + self.updates_cmd.append("bfd") + self.updates_cmd.append("undo dampening timer-interval maximum %s initial %s secondary %s" % ( + self.damp_max_wait_time, self.damp_init_wait_time, self.damp_second_wait_time)) + if xml_str: + return '' + xml_str + '' + else: + return "" + + def netconf_load_config(self, xml_str): + """load bfd config by netconf""" + + if not xml_str: + return + + xml_cfg = """ + + + %s + + """ % xml_str + set_nc_config(self.module, xml_cfg) + self.changed = True + + def check_params(self): + """Check all input params""" + + # check default_ip + if self.default_ip: + if not check_default_ip(self.default_ip): + self.module.fail_json(msg="Error: Default ip is invalid.") + + # check tos_exp_dynamic + if self.tos_exp_dynamic is not None: + if self.tos_exp_dynamic < 0 or self.tos_exp_dynamic > 7: + self.module.fail_json(msg="Error: Session tos_exp_dynamic is not ranges from 0 to 7.") + + # check tos_exp_static + if self.tos_exp_static is not None: + if self.tos_exp_static < 0 or self.tos_exp_static > 7: + self.module.fail_json(msg="Error: Session tos_exp_static is not ranges from 0 to 7.") + + # check damp_init_wait_time + if self.damp_init_wait_time is not None: + if self.damp_init_wait_time < 1 or self.damp_init_wait_time > 3600000: + self.module.fail_json(msg="Error: Session damp_init_wait_time is not ranges from 1 to 3600000.") + + # check damp_max_wait_time + if self.damp_max_wait_time is not None: + if self.damp_max_wait_time < 1 or self.damp_max_wait_time > 3600000: + self.module.fail_json(msg="Error: Session damp_max_wait_time is not ranges from 1 to 3600000.") + + # check damp_second_wait_time + if self.damp_second_wait_time is not None: + if self.damp_second_wait_time < 1 or self.damp_second_wait_time > 3600000: + self.module.fail_json(msg="Error: Session damp_second_wait_time is not ranges from 1 to 3600000.") + + # check delay_up_time + if self.delay_up_time is not None: + if self.delay_up_time < 1 or self.delay_up_time > 600: + self.module.fail_json(msg="Error: Session delay_up_time is not ranges from 1 to 600.") + + def get_proposed(self): + """get proposed info""" + + self.proposed["bfd_enalbe"] = self.bfd_enable + self.proposed["default_ip"] = self.default_ip + self.proposed["tos_exp_dynamic"] = self.tos_exp_dynamic + self.proposed["tos_exp_static"] = self.tos_exp_static + self.proposed["damp_init_wait_time"] = self.damp_init_wait_time + self.proposed["damp_max_wait_time"] = self.damp_max_wait_time + self.proposed["damp_second_wait_time"] = self.damp_second_wait_time + self.proposed["delay_up_time"] = self.delay_up_time + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.bfd_dict: + return + + self.existing["global"] = self.bfd_dict.get("global") + + def get_end_state(self): + """get end state info""" + + bfd_dict = self.get_bfd_dict() + if not bfd_dict: + return + + self.end_state["global"] = bfd_dict.get("global") + if self.existing == self.end_state: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.bfd_dict = self.get_bfd_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = self.config_global() + + # update to device + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + bfd_enable=dict(required=False, type='str', choices=['enable', 'disable']), + default_ip=dict(required=False, type='str'), + tos_exp_dynamic=dict(required=False, type='int'), + tos_exp_static=dict(required=False, type='int'), + damp_init_wait_time=dict(required=False, type='int'), + damp_max_wait_time=dict(required=False, type='int'), + damp_second_wait_time=dict(required=False, type='int'), + delay_up_time=dict(required=False, type='int'), + state=dict(required=False, default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = BfdGlobal(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_bfd_session.py b/plugins/modules/network/cloudengine/ce_bfd_session.py new file mode 100644 index 0000000000..34538f6235 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_bfd_session.py @@ -0,0 +1,658 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_bfd_session +short_description: Manages BFD session configuration on HUAWEI CloudEngine devices. +description: + - Manages BFD session configuration, creates a BFD session or deletes a specified BFD session + on HUAWEI CloudEngine devices. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + session_name: + description: + - Specifies the name of a BFD session. + The value is a string of 1 to 15 case-sensitive characters without spaces. + required: true + create_type: + description: + - BFD session creation mode, the currently created BFD session + only supports static or static auto-negotiation mode. + choices: ['static', 'auto'] + default: static + addr_type: + description: + - Specifies the peer IP address type. + choices: ['ipv4'] + out_if_name: + description: + - Specifies the type and number of the interface bound to the BFD session. + dest_addr: + description: + - Specifies the peer IP address bound to the BFD session. + src_addr: + description: + - Indicates the source IP address carried in BFD packets. + local_discr: + description: + - The BFD session local identifier does not need to be configured when the mode is auto. + remote_discr: + description: + - The BFD session remote identifier does not need to be configured when the mode is auto. + vrf_name: + description: + - Specifies the name of a Virtual Private Network (VPN) instance that is bound to a BFD session. + The value is a string of 1 to 31 case-sensitive characters, spaces not supported. + When double quotation marks are used around the string, spaces are allowed in the string. + The value _public_ is reserved and cannot be used as the VPN instance name. + use_default_ip: + description: + - Indicates the default multicast IP address that is bound to a BFD session. + By default, BFD uses the multicast IP address 224.0.0.184. + You can set the multicast IP address by running the default-ip-address command. + The value is a bool type. + type: bool + default: 'no' + state: + description: + - Determines whether the config should be present or not on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +- name: bfd session module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Configuring Single-hop BFD for Detecting Faults on a Layer 2 Link + ce_bfd_session: + session_name: bfd_l2link + use_default_ip: true + out_if_name: 10GE1/0/1 + local_discr: 163 + remote_discr: 163 + provider: '{{ cli }}' + + - name: Configuring Single-Hop BFD on a VLANIF Interface + ce_bfd_session: + session_name: bfd_vlanif + dest_addr: 10.1.1.6 + out_if_name: Vlanif100 + local_discr: 163 + remote_discr: 163 + provider: '{{ cli }}' + + - name: Configuring Multi-Hop BFD + ce_bfd_session: + session_name: bfd_multi_hop + dest_addr: 10.1.1.1 + local_discr: 163 + remote_discr: 163 + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "addr_type": null, + "create_type": null, + "dest_addr": null, + "out_if_name": "10GE1/0/1", + "session_name": "bfd_l2link", + "src_addr": null, + "state": "present", + "use_default_ip": true, + "vrf_name": null + } +existing: + description: k/v pairs of existing configuration + returned: always + type: dict + sample: { + "session": {} + } +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: { + "session": { + "addrType": "IPV4", + "createType": "SESS_STATIC", + "destAddr": null, + "outIfName": "10GE1/0/1", + "sessName": "bfd_l2link", + "srcAddr": null, + "useDefaultIp": "true", + "vrfName": null + } + } +updates: + description: commands sent to the device + returned: always + type: list + sample: [ + "bfd bfd_l2link bind peer-ip default-ip interface 10ge1/0/1" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import sys +import socket +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + + +CE_NC_GET_BFD = """ + + + + + + + + + %s + + + + + + + + + + + + + +""" + + +def is_valid_ip_vpn(vpname): + """check ip vpn""" + + if not vpname: + return False + + if vpname == "_public_": + return False + + if len(vpname) < 1 or len(vpname) > 31: + return False + + return True + + +def check_default_ip(ipaddr): + """check the default multicast IP address""" + + # The value ranges from 224.0.0.107 to 224.0.0.250 + if not check_ip_addr(ipaddr): + return False + + if ipaddr.count(".") != 3: + return False + + ips = ipaddr.split(".") + if ips[0] != "224" or ips[1] != "0" or ips[2] != "0": + return False + + if not ips[3].isdigit() or int(ips[3]) < 107 or int(ips[3]) > 250: + return False + + return True + + +def get_interface_type(interface): + """get the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class BfdSession(object): + """Manages BFD Session""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.session_name = self.module.params['session_name'] + self.create_type = self.module.params['create_type'] + self.addr_type = self.module.params['addr_type'] + self.out_if_name = self.module.params['out_if_name'] + self.dest_addr = self.module.params['dest_addr'] + self.src_addr = self.module.params['src_addr'] + self.vrf_name = self.module.params['vrf_name'] + self.use_default_ip = self.module.params['use_default_ip'] + self.state = self.module.params['state'] + self.local_discr = self.module.params['local_discr'] + self.remote_discr = self.module.params['remote_discr'] + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.changed = False + self.bfd_dict = dict() + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + + mutually_exclusive = [('use_default_ip', 'dest_addr')] + self.module = AnsibleModule(argument_spec=self.spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def get_bfd_dict(self): + """bfd config dict""" + + bfd_dict = dict() + bfd_dict["global"] = dict() + bfd_dict["session"] = dict() + conf_str = CE_NC_GET_BFD % self.session_name + + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return bfd_dict + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # get bfd global info + glb = root.find("bfd/bfdSchGlobal") + if glb: + for attr in glb: + bfd_dict["global"][attr.tag] = attr.text + + # get bfd session info + sess = root.find("bfd/bfdCfgSessions/bfdCfgSession") + if sess: + for attr in sess: + bfd_dict["session"][attr.tag] = attr.text + + return bfd_dict + + def is_session_match(self): + """is bfd session match""" + + if not self.bfd_dict["session"] or not self.session_name: + return False + + session = self.bfd_dict["session"] + if self.session_name != session.get("sessName", ""): + return False + + if self.create_type and self.create_type.upper() not in session.get("createType", "").upper(): + return False + + if self.addr_type and self.addr_type != session.get("addrType").lower(): + return False + + if self.dest_addr and self.dest_addr != session.get("destAddr"): + return False + + if self.src_addr and self.src_addr != session.get("srcAddr"): + return False + + if self.out_if_name: + if not session.get("outIfName"): + return False + if self.out_if_name.replace(" ", "").lower() != session.get("outIfName").replace(" ", "").lower(): + return False + + if self.vrf_name and self.vrf_name != session.get("vrfName"): + return False + + if str(self.use_default_ip).lower() != session.get("useDefaultIp"): + return False + + if self.create_type == "static" and self.state == "present": + if str(self.local_discr).lower() != session.get("localDiscr", ""): + return False + if str(self.remote_discr).lower() != session.get("remoteDiscr", ""): + return False + + return True + + def config_session(self): + """configures bfd session""" + + xml_str = "" + cmd_list = list() + discr = list() + + if not self.session_name: + return xml_str + + if self.bfd_dict["global"].get("bfdEnable", "false") != "true": + self.module.fail_json(msg="Error: Please enable BFD globally first.") + + xml_str = "%s" % self.session_name + cmd_session = "bfd %s" % self.session_name + + if self.state == "present": + if not self.bfd_dict["session"]: + # Parameter check + if not self.dest_addr and not self.use_default_ip: + self.module.fail_json( + msg="Error: dest_addr or use_default_ip must be set when bfd session is creating.") + + # Creates a BFD session + if self.create_type == "auto": + xml_str += "SESS_%s" % self.create_type.upper() + else: + xml_str += "SESS_STATIC" + xml_str += "IP" + cmd_session += " bind" + if self.addr_type: + xml_str += "%s" % self.addr_type.upper() + else: + xml_str += "IPV4" + if self.dest_addr: + xml_str += "%s" % self.dest_addr + cmd_session += " peer-%s %s" % ("ipv6" if self.addr_type == "ipv6" else "ip", self.dest_addr) + if self.use_default_ip: + xml_str += "%s" % str(self.use_default_ip).lower() + cmd_session += " peer-ip default-ip" + if self.vrf_name: + xml_str += "%s" % self.vrf_name + cmd_session += " vpn-instance %s" % self.vrf_name + if self.out_if_name: + xml_str += "%s" % self.out_if_name + cmd_session += " interface %s" % self.out_if_name.lower() + if self.src_addr: + xml_str += "%s" % self.src_addr + cmd_session += " source-%s %s" % ("ipv6" if self.addr_type == "ipv6" else "ip", self.src_addr) + + if self.create_type == "auto": + cmd_session += " auto" + else: + xml_str += "%s" % self.local_discr + discr.append("discriminator local %s" % self.local_discr) + xml_str += "%s" % self.remote_discr + discr.append("discriminator remote %s" % self.remote_discr) + + elif not self.is_session_match(): + # Bfd session is not match + self.module.fail_json(msg="Error: The specified BFD configuration view has been created.") + else: + pass + else: # absent + if not self.bfd_dict["session"]: + self.module.fail_json(msg="Error: BFD session is not exist.") + if not self.is_session_match(): + self.module.fail_json(msg="Error: BFD session parameter is invalid.") + + if self.state == "present": + if xml_str.endswith(""): + # no config update + return "" + else: + cmd_list.insert(0, cmd_session) + cmd_list.extend(discr) + self.updates_cmd.extend(cmd_list) + return '' + xml_str\ + + '' + else: # absent + cmd_list.append("undo " + cmd_session) + self.updates_cmd.extend(cmd_list) + return '' + xml_str\ + + '' + + def netconf_load_config(self, xml_str): + """load bfd config by netconf""" + + if not xml_str: + return + + xml_cfg = """ + + + %s + + """ % xml_str + set_nc_config(self.module, xml_cfg) + self.changed = True + + def check_params(self): + """Check all input params""" + + # check session_name + if not self.session_name: + self.module.fail_json(msg="Error: Missing required arguments: session_name.") + + if self.session_name: + if len(self.session_name) < 1 or len(self.session_name) > 15: + self.module.fail_json(msg="Error: Session name is invalid.") + + # check local_discr + # check remote_discr + + if self.local_discr: + if self.local_discr < 1 or self.local_discr > 16384: + self.module.fail_json(msg="Error: Session local_discr is not ranges from 1 to 16384.") + if self.remote_discr: + if self.remote_discr < 1 or self.remote_discr > 4294967295: + self.module.fail_json(msg="Error: Session remote_discr is not ranges from 1 to 4294967295.") + + if self.state == "present" and self.create_type == "static": + if not self.local_discr: + self.module.fail_json(msg="Error: Missing required arguments: local_discr.") + if not self.remote_discr: + self.module.fail_json(msg="Error: Missing required arguments: remote_discr.") + + # check out_if_name + if self.out_if_name: + if not get_interface_type(self.out_if_name): + self.module.fail_json(msg="Error: Session out_if_name is invalid.") + + # check dest_addr + if self.dest_addr: + if not check_ip_addr(self.dest_addr): + self.module.fail_json(msg="Error: Session dest_addr is invalid.") + + # check src_addr + if self.src_addr: + if not check_ip_addr(self.src_addr): + self.module.fail_json(msg="Error: Session src_addr is invalid.") + + # check vrf_name + if self.vrf_name: + if not is_valid_ip_vpn(self.vrf_name): + self.module.fail_json(msg="Error: Session vrf_name is invalid.") + if not self.dest_addr: + self.module.fail_json(msg="Error: vrf_name and dest_addr must set at the same time.") + + # check use_default_ip + if self.use_default_ip and not self.out_if_name: + self.module.fail_json(msg="Error: use_default_ip and out_if_name must set at the same time.") + + def get_proposed(self): + """get proposed info""" + + # base config + self.proposed["session_name"] = self.session_name + self.proposed["create_type"] = self.create_type + self.proposed["addr_type"] = self.addr_type + self.proposed["out_if_name"] = self.out_if_name + self.proposed["dest_addr"] = self.dest_addr + self.proposed["src_addr"] = self.src_addr + self.proposed["vrf_name"] = self.vrf_name + self.proposed["use_default_ip"] = self.use_default_ip + self.proposed["state"] = self.state + self.proposed["local_discr"] = self.local_discr + self.proposed["remote_discr"] = self.remote_discr + + def get_existing(self): + """get existing info""" + + if not self.bfd_dict: + return + + self.existing["session"] = self.bfd_dict.get("session") + + def get_end_state(self): + """get end state info""" + + bfd_dict = self.get_bfd_dict() + if not bfd_dict: + return + + self.end_state["session"] = bfd_dict.get("session") + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.bfd_dict = self.get_bfd_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = '' + if self.session_name: + xml_str += self.config_session() + + # update to device + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + session_name=dict(required=True, type='str'), + create_type=dict(required=False, default='static', type='str', choices=['static', 'auto']), + addr_type=dict(required=False, type='str', choices=['ipv4']), + out_if_name=dict(required=False, type='str'), + dest_addr=dict(required=False, type='str'), + src_addr=dict(required=False, type='str'), + vrf_name=dict(required=False, type='str'), + use_default_ip=dict(required=False, type='bool', default=False), + state=dict(required=False, default='present', choices=['present', 'absent']), + local_discr=dict(required=False, type='int'), + remote_discr=dict(required=False, type='int') + ) + + argument_spec.update(ce_argument_spec) + module = BfdSession(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_bfd_view.py b/plugins/modules/network/cloudengine/ce_bfd_view.py new file mode 100644 index 0000000000..6f7fe79807 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_bfd_view.py @@ -0,0 +1,565 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_bfd_view +short_description: Manages BFD session view configuration on HUAWEI CloudEngine devices. +description: + - Manages BFD session view configuration on HUAWEI CloudEngine devices. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + session_name: + description: + - Specifies the name of a BFD session. + The value is a string of 1 to 15 case-sensitive characters without spaces. + required: true + local_discr: + description: + - Specifies the local discriminator of a BFD session. + The value is an integer that ranges from 1 to 16384. + remote_discr: + description: + - Specifies the remote discriminator of a BFD session. + The value is an integer that ranges from 1 to 4294967295. + min_tx_interval: + description: + - Specifies the minimum interval for receiving BFD packets. + The value is an integer that ranges from 50 to 1000, in milliseconds. + min_rx_interval: + description: + - Specifies the minimum interval for sending BFD packets. + The value is an integer that ranges from 50 to 1000, in milliseconds. + detect_multi: + description: + - Specifies the local detection multiplier of a BFD session. + The value is an integer that ranges from 3 to 50. + wtr_interval: + description: + - Specifies the WTR time of a BFD session. + The value is an integer that ranges from 1 to 60, in minutes. + The default value is 0. + tos_exp: + description: + - Specifies a priority for BFD control packets. + The value is an integer ranging from 0 to 7. + The default value is 7, which is the highest priority. + admin_down: + description: + - Enables the BFD session to enter the AdminDown state. + By default, a BFD session is enabled. + The default value is bool type. + type: bool + default: 'no' + description: + description: + - Specifies the description of a BFD session. + The value is a string of 1 to 51 case-sensitive characters with spaces. + state: + description: + - Determines whether the config should be present or not on the device. + default: present + choices: ['present', 'absent'] +extends_documentation_fragment: +- community.general.ce + +''' + +EXAMPLES = ''' +- name: bfd view module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Set the local discriminator of a BFD session to 80 and the remote discriminator to 800 + ce_bfd_view: + session_name: atob + local_discr: 80 + remote_discr: 800 + state: present + provider: '{{ cli }}' + + - name: Set the minimum interval for receiving BFD packets to 500 ms + ce_bfd_view: + session_name: atob + min_rx_interval: 500 + state: present + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "admin_down": false, + "description": null, + "detect_multi": null, + "local_discr": 80, + "min_rx_interval": null, + "min_tx_interval": null, + "remote_discr": 800, + "session_name": "atob", + "state": "present", + "tos_exp": null, + "wtr_interval": null + } +existing: + description: k/v pairs of existing configuration + returned: always + type: dict + sample: { + "session": { + "adminDown": "false", + "createType": "SESS_STATIC", + "description": null, + "detectMulti": "3", + "localDiscr": null, + "minRxInt": null, + "minTxInt": null, + "remoteDiscr": null, + "sessName": "atob", + "tosExp": null, + "wtrTimerInt": null + } + } +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: { + "session": { + "adminDown": "false", + "createType": "SESS_STATIC", + "description": null, + "detectMulti": "3", + "localDiscr": "80", + "minRxInt": null, + "minTxInt": null, + "remoteDiscr": "800", + "sessName": "atob", + "tosExp": null, + "wtrTimerInt": null + } + } +updates: + description: commands sent to the device + returned: always + type: list + sample: [ + "bfd atob", + "discriminator local 80", + "discriminator remote 800" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import sys +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_BFD = """ + + + %s + + +""" + +CE_NC_GET_BFD_GLB = """ + + + +""" + +CE_NC_GET_BFD_SESSION = """ + + + %s + + + + + + + + + + + + +""" + + +class BfdView(object): + """Manages BFD View""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.session_name = self.module.params['session_name'] + self.local_discr = self.module.params['local_discr'] + self.remote_discr = self.module.params['remote_discr'] + self.min_tx_interval = self.module.params['min_tx_interval'] + self.min_rx_interval = self.module.params['min_rx_interval'] + self.detect_multi = self.module.params['detect_multi'] + self.wtr_interval = self.module.params['wtr_interval'] + self.tos_exp = self.module.params['tos_exp'] + self.admin_down = self.module.params['admin_down'] + self.description = self.module.params['description'] + self.state = self.module.params['state'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.changed = False + self.bfd_dict = dict() + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + + self.module = AnsibleModule(argument_spec=self.spec, + supports_check_mode=True) + + def get_bfd_dict(self): + """bfd config dict""" + + bfd_dict = dict() + bfd_dict["global"] = dict() + bfd_dict["session"] = dict() + conf_str = CE_NC_GET_BFD % (CE_NC_GET_BFD_GLB + (CE_NC_GET_BFD_SESSION % self.session_name)) + + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return bfd_dict + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # get bfd global info + glb = root.find("bfd/bfdSchGlobal") + if glb: + for attr in glb: + bfd_dict["global"][attr.tag] = attr.text + + # get bfd session info + sess = root.find("bfd/bfdCfgSessions/bfdCfgSession") + if sess: + for attr in sess: + bfd_dict["session"][attr.tag] = attr.text + + return bfd_dict + + def config_session(self): + """configures bfd session""" + + xml_str = "" + cmd_list = list() + cmd_session = "" + + if not self.session_name: + return xml_str + + if self.bfd_dict["global"].get("bfdEnable", "false") != "true": + self.module.fail_json(msg="Error: Please enable BFD globally first.") + + if not self.bfd_dict["session"]: + self.module.fail_json(msg="Error: BFD session is not exist.") + + session = self.bfd_dict["session"] + xml_str = "%s" % self.session_name + cmd_session = "bfd %s" % self.session_name + + # BFD session view + if self.local_discr is not None: + if self.state == "present" and str(self.local_discr) != session.get("localDiscr"): + xml_str += "%s" % self.local_discr + cmd_list.append("discriminator local %s" % self.local_discr) + elif self.state == "absent" and str(self.local_discr) == session.get("localDiscr"): + xml_str += "" + cmd_list.append("undo discriminator local") + + if self.remote_discr is not None: + if self.state == "present" and str(self.remote_discr) != session.get("remoteDiscr"): + xml_str += "%s" % self.remote_discr + cmd_list.append("discriminator remote %s" % self.remote_discr) + elif self.state == "absent" and str(self.remote_discr) == session.get("remoteDiscr"): + xml_str += "" + cmd_list.append("undo discriminator remote") + + if self.min_tx_interval is not None: + if self.state == "present" and str(self.min_tx_interval) != session.get("minTxInt"): + xml_str += "%s" % self.min_tx_interval + cmd_list.append("min-tx-interval %s" % self.min_tx_interval) + elif self.state == "absent" and str(self.min_tx_interval) == session.get("minTxInt"): + xml_str += "" + cmd_list.append("undo min-tx-interval") + + if self.min_rx_interval is not None: + if self.state == "present" and str(self.min_rx_interval) != session.get("minRxInt"): + xml_str += "%s" % self.min_rx_interval + cmd_list.append("min-rx-interval %s" % self.min_rx_interval) + elif self.state == "absent" and str(self.min_rx_interval) == session.get("minRxInt"): + xml_str += "" + cmd_list.append("undo min-rx-interval") + + if self.detect_multi is not None: + if self.state == "present" and str(self.detect_multi) != session.get("detectMulti"): + xml_str += " %s" % self.detect_multi + cmd_list.append("detect-multiplier %s" % self.detect_multi) + elif self.state == "absent" and str(self.detect_multi) == session.get("detectMulti"): + xml_str += " " + cmd_list.append("undo detect-multiplier") + + if self.wtr_interval is not None: + if self.state == "present" and str(self.wtr_interval) != session.get("wtrTimerInt"): + xml_str += " %s" % self.wtr_interval + cmd_list.append("wtr %s" % self.wtr_interval) + elif self.state == "absent" and str(self.wtr_interval) == session.get("wtrTimerInt"): + xml_str += " " + cmd_list.append("undo wtr") + + if self.tos_exp is not None: + if self.state == "present" and str(self.tos_exp) != session.get("tosExp"): + xml_str += " %s" % self.tos_exp + cmd_list.append("tos-exp %s" % self.tos_exp) + elif self.state == "absent" and str(self.tos_exp) == session.get("tosExp"): + xml_str += " " + cmd_list.append("undo tos-exp") + + if self.admin_down and session.get("adminDown", "false") == "false": + xml_str += " true" + cmd_list.append("shutdown") + elif not self.admin_down and session.get("adminDown", "false") == "true": + xml_str += " false" + cmd_list.append("undo shutdown") + + if self.description: + if self.state == "present" and self.description != session.get("description"): + xml_str += "%s" % self.description + cmd_list.append("description %s" % self.description) + elif self.state == "absent" and self.description == session.get("description"): + xml_str += "" + cmd_list.append("undo description") + + if xml_str.endswith(""): + # no config update + return "" + else: + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + return '' + xml_str\ + + '' + + def netconf_load_config(self, xml_str): + """load bfd config by netconf""" + + if not xml_str: + return + + xml_cfg = """ + + + %s + + """ % xml_str + + set_nc_config(self.min_rx_interval, xml_cfg) + self.changed = True + + def check_params(self): + """Check all input params""" + + # check session_name + if not self.session_name: + self.module.fail_json(msg="Error: Missing required arguments: session_name.") + + if self.session_name: + if len(self.session_name) < 1 or len(self.session_name) > 15: + self.module.fail_json(msg="Error: Session name is invalid.") + + # check local_discr + if self.local_discr is not None: + if self.local_discr < 1 or self.local_discr > 16384: + self.module.fail_json(msg="Error: Session local_discr is not ranges from 1 to 16384.") + + # check remote_discr + if self.remote_discr is not None: + if self.remote_discr < 1 or self.remote_discr > 4294967295: + self.module.fail_json(msg="Error: Session remote_discr is not ranges from 1 to 4294967295.") + + # check min_tx_interval + if self.min_tx_interval is not None: + if self.min_tx_interval < 50 or self.min_tx_interval > 1000: + self.module.fail_json(msg="Error: Session min_tx_interval is not ranges from 50 to 1000.") + + # check min_rx_interval + if self.min_rx_interval is not None: + if self.min_rx_interval < 50 or self.min_rx_interval > 1000: + self.module.fail_json(msg="Error: Session min_rx_interval is not ranges from 50 to 1000.") + + # check detect_multi + if self.detect_multi is not None: + if self.detect_multi < 3 or self.detect_multi > 50: + self.module.fail_json(msg="Error: Session detect_multi is not ranges from 3 to 50.") + + # check wtr_interval + if self.wtr_interval is not None: + if self.wtr_interval < 1 or self.wtr_interval > 60: + self.module.fail_json(msg="Error: Session wtr_interval is not ranges from 1 to 60.") + + # check tos_exp + if self.tos_exp is not None: + if self.tos_exp < 0 or self.tos_exp > 7: + self.module.fail_json(msg="Error: Session tos_exp is not ranges from 0 to 7.") + + # check description + if self.description: + if len(self.description) < 1 or len(self.description) > 51: + self.module.fail_json(msg="Error: Session description is invalid.") + + def get_proposed(self): + """get proposed info""" + + # base config + self.proposed["session_name"] = self.session_name + self.proposed["local_discr"] = self.local_discr + self.proposed["remote_discr"] = self.remote_discr + self.proposed["min_tx_interval"] = self.min_tx_interval + self.proposed["min_rx_interval"] = self.min_rx_interval + self.proposed["detect_multi"] = self.detect_multi + self.proposed["wtr_interval"] = self.wtr_interval + self.proposed["tos_exp"] = self.tos_exp + self.proposed["admin_down"] = self.admin_down + self.proposed["description"] = self.description + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.bfd_dict: + return + + self.existing["session"] = self.bfd_dict.get("session") + + def get_end_state(self): + """get end state info""" + + bfd_dict = self.get_bfd_dict() + if not bfd_dict: + return + + self.end_state["session"] = bfd_dict.get("session") + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.bfd_dict = self.get_bfd_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = '' + if self.session_name: + xml_str += self.config_session() + + # update to device + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + session_name=dict(required=True, type='str'), + local_discr=dict(required=False, type='int'), + remote_discr=dict(required=False, type='int'), + min_tx_interval=dict(required=False, type='int'), + min_rx_interval=dict(required=False, type='int'), + detect_multi=dict(required=False, type='int'), + wtr_interval=dict(required=False, type='int'), + tos_exp=dict(required=False, type='int'), + admin_down=dict(required=False, type='bool', default=False), + description=dict(required=False, type='str'), + state=dict(required=False, default='present', choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = BfdView(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_bgp.py b/plugins/modules/network/cloudengine/ce_bgp.py new file mode 100644 index 0000000000..d4dcc87031 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_bgp.py @@ -0,0 +1,2331 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_bgp +short_description: Manages BGP configuration on HUAWEI CloudEngine switches. +description: + - Manages BGP configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] + as_number: + description: + - Local AS number. + The value is a string of 1 to 11 characters. + graceful_restart: + description: + - Enable GR of the BGP speaker in the specified address family, peer address, or peer group. + default: no_use + choices: ['no_use','true','false'] + time_wait_for_rib: + description: + - Period of waiting for the End-Of-RIB flag. + The value is an integer ranging from 3 to 3000. The default value is 600. + as_path_limit: + description: + - Maximum number of AS numbers in the AS_Path attribute. The default value is 255. + check_first_as: + description: + - Check the first AS in the AS_Path of the update messages from EBGP peers. + default: no_use + choices: ['no_use','true','false'] + confed_id_number: + description: + - Confederation ID. + The value is a string of 1 to 11 characters. + confed_nonstanded: + description: + - Configure the device to be compatible with devices in a nonstandard confederation. + default: no_use + choices: ['no_use','true','false'] + bgp_rid_auto_sel: + description: + - The function to automatically select router IDs for all VPN BGP instances is enabled. + default: no_use + choices: ['no_use','true','false'] + keep_all_routes: + description: + - If the value is true, the system stores all route update messages received from all peers (groups) after + BGP connection setup. + If the value is false, the system stores only BGP update messages that are received from peers and pass + the configured import policy. + default: no_use + choices: ['no_use','true','false'] + memory_limit: + description: + - Support BGP RIB memory protection. + default: no_use + choices: ['no_use','true','false'] + gr_peer_reset: + description: + - Peer disconnection through GR. + default: no_use + choices: ['no_use','true','false'] + is_shutdown: + description: + - Interrupt BGP all neighbor. + default: no_use + choices: ['no_use','true','false'] + suppress_interval: + description: + - Suppress interval. + hold_interval: + description: + - Hold interval. + clear_interval: + description: + - Clear interval. + confed_peer_as_num: + description: + - Confederation AS number, in two-byte or four-byte format. + The value is a string of 1 to 11 characters. + vrf_name: + description: + - Name of a BGP instance. The name is a case-sensitive string of characters. + vrf_rid_auto_sel: + description: + - If the value is true, VPN BGP instances are enabled to automatically select router IDs. + If the value is false, VPN BGP instances are disabled from automatically selecting router IDs. + default: no_use + choices: ['no_use','true','false'] + router_id: + description: + - ID of a router that is in IPv4 address format. + keepalive_time: + description: + - If the value of a timer changes, the BGP peer relationship between the routers is disconnected. + The value is an integer ranging from 0 to 21845. The default value is 60. + hold_time: + description: + - Hold time, in seconds. The value of the hold time can be 0 or range from 3 to 65535. + min_hold_time: + description: + - Min hold time, in seconds. The value of the hold time can be 0 or range from 20 to 65535. + conn_retry_time: + description: + - ConnectRetry interval. The value is an integer, in seconds. The default value is 32s. + ebgp_if_sensitive: + description: + - If the value is true, After the fast EBGP interface awareness function is enabled, EBGP sessions on + an interface are deleted immediately when the interface goes Down. + If the value is false, After the fast EBGP interface awareness function is enabled, EBGP sessions + on an interface are not deleted immediately when the interface goes Down. + default: no_use + choices: ['no_use','true','false'] + default_af_type: + description: + - Type of a created address family, which can be IPv4 unicast or IPv6 unicast. + The default type is IPv4 unicast. + choices: ['ipv4uni','ipv6uni'] +''' + +EXAMPLES = ''' + +- name: CloudEngine BGP test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Enable BGP" + ce_bgp: + state: present + as_number: 100 + confed_id_number: 250 + provider: "{{ cli }}" + + - name: "Disable BGP" + ce_bgp: + state: absent + as_number: 100 + confed_id_number: 250 + provider: "{{ cli }}" + + - name: "Create confederation peer AS num" + ce_bgp: + state: present + confed_peer_as_num: 260 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"as_number": "100", state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"bgp_enable": [["100"], ["true"]]} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"bgp_enable": [["100"], ["true"]]} +updates: + description: command sent to the device + returned: always + type: list + sample: ["bgp 100"] +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +SUCCESS = """success""" +FAILED = """failed""" + + +# get bgp enable +CE_GET_BGP_ENABLE = """ + + + + + + + + + + +""" + +CE_GET_BGP_ENABLE_HEADER = """ + + + + +""" + +CE_GET_BGP_ENABLE_TAIL = """ + + + + +""" + +# merge bgp enable +CE_MERGE_BGP_ENABLE_HEADER = """ + + + + +""" +CE_MERGE_BGP_ENABLE_TAIL = """ + + + + +""" + +# get bgp confederation peer as +CE_GET_BGP_CONFED_PEER_AS = """ + + + + + + + + + + + +""" + +# merge bgp confederation peer as +CE_MERGE_BGP_CONFED_PEER_AS = """ + + + + + + %s + + + + + +""" + +# create bgp confederation peer as +CE_CREATE_BGP_CONFED_PEER_AS = """ + + + + + + %s + + + + + +""" + +# delete bgp confederation peer as +CE_DELETE_BGP_CONFED_PEER_AS = """ + + + + + + %s + + + + + +""" + +# get bgp instance +CE_GET_BGP_INSTANCE = """ + + + + + + + + + + + +""" + +# get bgp instance +CE_GET_BGP_INSTANCE_HEADER = """ + + + + + +""" +CE_GET_BGP_INSTANCE_TAIL = """ + + + + + +""" + +# merge bgp instance +CE_MERGE_BGP_INSTANCE_HEADER = """ + + + + + +""" +CE_MERGE_BGP_INSTANCE_TAIL = """ + + + + + +""" + +# create bgp instance +CE_CREATE_BGP_INSTANCE_HEADER = """ + + + + + +""" +CE_CREATE_BGP_INSTANCE_TAIL = """ + + + + + +""" + +# delete bgp instance +CE_DELETE_BGP_INSTANCE_HEADER = """ + + + + + +""" +CE_DELETE_BGP_INSTANCE_TAIL = """ + + + + + +""" + + +def check_ip_addr(**kwargs): + """ check_ip_addr """ + + ipaddr = kwargs["ipaddr"] + + addr = ipaddr.strip().split('.') + + if len(addr) != 4: + return FAILED + + for i in range(4): + addr[i] = int(addr[i]) + + if addr[i] <= 255 and addr[i] >= 0: + pass + else: + return FAILED + return SUCCESS + + +def check_bgp_enable_args(**kwargs): + """ check_bgp_enable_args """ + + module = kwargs["module"] + + need_cfg = False + + as_number = module.params['as_number'] + if as_number: + if len(as_number) > 11 or len(as_number) == 0: + module.fail_json( + msg='Error: The len of as_number %s is out of [1 - 11].' % as_number) + else: + need_cfg = True + + return need_cfg + + +def check_bgp_confed_args(**kwargs): + """ check_bgp_confed_args """ + + module = kwargs["module"] + + need_cfg = False + + confed_peer_as_num = module.params['confed_peer_as_num'] + if confed_peer_as_num: + if len(confed_peer_as_num) > 11 or len(confed_peer_as_num) == 0: + module.fail_json( + msg='Error: The len of confed_peer_as_num %s is out of [1 - 11].' % confed_peer_as_num) + else: + need_cfg = True + + return need_cfg + + +class Bgp(object): + """ Manages BGP configuration """ + + def netconf_get_config(self, **kwargs): + """ netconf_get_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ netconf_set_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = set_nc_config(module, conf_str) + + return xml_str + + def check_bgp_enable_other_args(self, **kwargs): + """ check_bgp_enable_other_args """ + + module = kwargs["module"] + state = module.params['state'] + result = dict() + need_cfg = False + + graceful_restart = module.params['graceful_restart'] + if graceful_restart != 'no_use': + + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["graceful_restart"] = re_find + if re_find[0] != graceful_restart: + need_cfg = True + else: + need_cfg = True + + time_wait_for_rib = module.params['time_wait_for_rib'] + if time_wait_for_rib: + if int(time_wait_for_rib) > 3000 or int(time_wait_for_rib) < 3: + module.fail_json( + msg='Error: The time_wait_for_rib %s is out of [3 - 3000].' % time_wait_for_rib) + else: + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["time_wait_for_rib"] = re_find + if re_find[0] != time_wait_for_rib: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["time_wait_for_rib"] = re_find + if re_find[0] == time_wait_for_rib: + need_cfg = True + + as_path_limit = module.params['as_path_limit'] + if as_path_limit: + if int(as_path_limit) > 2000 or int(as_path_limit) < 1: + module.fail_json( + msg='Error: The as_path_limit %s is out of [1 - 2000].' % as_path_limit) + else: + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["as_path_limit"] = re_find + if re_find[0] != as_path_limit: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["as_path_limit"] = re_find + if re_find[0] == as_path_limit: + need_cfg = True + + check_first_as = module.params['check_first_as'] + if check_first_as != 'no_use': + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["check_first_as"] = re_find + if re_find[0] != check_first_as: + need_cfg = True + else: + need_cfg = True + + confed_id_number = module.params['confed_id_number'] + if confed_id_number: + if len(confed_id_number) > 11 or len(confed_id_number) == 0: + module.fail_json( + msg='Error: The len of confed_id_number %s is out of [1 - 11].' % confed_id_number) + else: + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["confed_id_number"] = re_find + if re_find[0] != confed_id_number: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["confed_id_number"] = re_find + if re_find[0] == confed_id_number: + need_cfg = True + + confed_nonstanded = module.params['confed_nonstanded'] + if confed_nonstanded != 'no_use': + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["confed_nonstanded"] = re_find + if re_find[0] != confed_nonstanded: + need_cfg = True + else: + need_cfg = True + + bgp_rid_auto_sel = module.params['bgp_rid_auto_sel'] + if bgp_rid_auto_sel != 'no_use': + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["bgp_rid_auto_sel"] = re_find + if re_find[0] != bgp_rid_auto_sel: + need_cfg = True + else: + need_cfg = True + + keep_all_routes = module.params['keep_all_routes'] + if keep_all_routes != 'no_use': + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["keep_all_routes"] = re_find + if re_find[0] != keep_all_routes: + need_cfg = True + else: + need_cfg = True + + memory_limit = module.params['memory_limit'] + if memory_limit != 'no_use': + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["memory_limit"] = re_find + if re_find[0] != memory_limit: + need_cfg = True + else: + need_cfg = True + + gr_peer_reset = module.params['gr_peer_reset'] + if gr_peer_reset != 'no_use': + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["gr_peer_reset"] = re_find + if re_find[0] != gr_peer_reset: + need_cfg = True + else: + need_cfg = True + + is_shutdown = module.params['is_shutdown'] + if is_shutdown != 'no_use': + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_shutdown"] = re_find + if re_find[0] != is_shutdown: + need_cfg = True + else: + need_cfg = True + + suppress_interval = module.params['suppress_interval'] + hold_interval = module.params['hold_interval'] + clear_interval = module.params['clear_interval'] + if suppress_interval: + + if not hold_interval or not clear_interval: + module.fail_json( + msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.') + + if int(suppress_interval) > 65535 or int(suppress_interval) < 1: + module.fail_json( + msg='Error: The suppress_interval %s is out of [1 - 65535].' % suppress_interval) + else: + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["suppress_interval"] = re_find + if re_find[0] != suppress_interval: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["suppress_interval"] = re_find + if re_find[0] == suppress_interval: + need_cfg = True + + if hold_interval: + + if not suppress_interval or not clear_interval: + module.fail_json( + msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.') + + if int(hold_interval) > 65535 or int(hold_interval) < 1: + module.fail_json( + msg='Error: The hold_interval %s is out of [1 - 65535].' % hold_interval) + else: + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["hold_interval"] = re_find + if re_find[0] != hold_interval: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["hold_interval"] = re_find + if re_find[0] == hold_interval: + need_cfg = True + + if clear_interval: + + if not suppress_interval or not hold_interval: + module.fail_json( + msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.') + + if int(clear_interval) > 65535 or int(clear_interval) < 1: + module.fail_json( + msg='Error: The clear_interval %s is out of [1 - 65535].' % clear_interval) + else: + conf_str = CE_GET_BGP_ENABLE_HEADER + \ + "" + CE_GET_BGP_ENABLE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["clear_interval"] = re_find + if re_find[0] != clear_interval: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["clear_interval"] = re_find + if re_find[0] == clear_interval: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def check_bgp_instance_args(self, **kwargs): + """ check_bgp_instance_args """ + + module = kwargs["module"] + state = module.params['state'] + need_cfg = False + + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='the len of vrf_name %s is out of [1 - 31].' % vrf_name) + conf_str = CE_GET_BGP_INSTANCE_HEADER + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + check_vrf_name = vrf_name + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if check_vrf_name not in re_find: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if check_vrf_name in re_find: + need_cfg = True + + return need_cfg + + def check_bgp_instance_other_args(self, **kwargs): + """ check_bgp_instance_other_args """ + + module = kwargs["module"] + state = module.params['state'] + result = dict() + need_cfg = False + + vrf_name = module.params['vrf_name'] + + router_id = module.params['router_id'] + if router_id: + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + if check_ip_addr(ipaddr=router_id) == FAILED: + module.fail_json( + msg='Error: The router_id %s is invalid.' % router_id) + + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["router_id"] = re_find + if re_find[0] != router_id: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["router_id"] = re_find + if re_find[0] == router_id: + need_cfg = True + + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + if vrf_rid_auto_sel != 'no_use': + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["vrf_rid_auto_sel"] = re_find + + if re_find[0] != vrf_rid_auto_sel: + need_cfg = True + else: + need_cfg = True + + keepalive_time = module.params['keepalive_time'] + if keepalive_time: + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + if int(keepalive_time) > 21845 or int(keepalive_time) < 0: + module.fail_json( + msg='keepalive_time %s is out of [0 - 21845].' % keepalive_time) + else: + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["keepalive_time"] = re_find + if re_find[0] != keepalive_time: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["keepalive_time"] = re_find + if re_find[0] == keepalive_time: + need_cfg = True + + hold_time = module.params['hold_time'] + if hold_time: + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + if int(hold_time) > 65535 or int(hold_time) < 3: + module.fail_json( + msg='hold_time %s is out of [3 - 65535].' % hold_time) + else: + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["hold_time"] = re_find + if re_find[0] != hold_time: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["hold_time"] = re_find + if re_find[0] == hold_time: + need_cfg = True + + min_hold_time = module.params['min_hold_time'] + if min_hold_time: + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + if int(min_hold_time) != 0 and (int(min_hold_time) > 65535 or int(min_hold_time) < 20): + module.fail_json( + msg='min_hold_time %s is out of [0, or 20 - 65535].' % min_hold_time) + else: + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["min_hold_time"] = re_find + if re_find[0] != min_hold_time: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["min_hold_time"] = re_find + if re_find[0] == min_hold_time: + need_cfg = True + + conn_retry_time = module.params['conn_retry_time'] + if conn_retry_time: + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + if int(conn_retry_time) > 65535 or int(conn_retry_time) < 1: + module.fail_json( + msg='conn_retry_time %s is out of [1 - 65535].' % conn_retry_time) + else: + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config( + module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["conn_retry_time"] = re_find + if re_find[0] != conn_retry_time: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["conn_retry_time"] = re_find + if re_find[0] == conn_retry_time: + need_cfg = True + else: + pass + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ebgp_if_sensitive"] = re_find + if re_find[0] != ebgp_if_sensitive: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ebgp_if_sensitive"] = re_find + if re_find[0] == ebgp_if_sensitive: + need_cfg = True + else: + pass + + default_af_type = module.params['default_af_type'] + if default_af_type: + + if not vrf_name: + module.fail_json( + msg='Error: Please input vrf_name.') + + conf_str = CE_GET_BGP_INSTANCE_HEADER + "%s" % vrf_name + \ + "" + CE_GET_BGP_INSTANCE_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_af_type"] = re_find + if re_find[0] != default_af_type: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_af_type"] = re_find + if re_find[0] == default_af_type: + need_cfg = True + else: + pass + + result["need_cfg"] = need_cfg + return result + + def get_bgp_enable(self, **kwargs): + """ get_bgp_enable """ + + module = kwargs["module"] + + conf_str = CE_GET_BGP_ENABLE + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*\s*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_bgp_enable(self, **kwargs): + """ merge_bgp_enable """ + + module = kwargs["module"] + conf_str = CE_MERGE_BGP_ENABLE_HEADER + + state = module.params['state'] + + if state == "present": + conf_str += "true" + else: + conf_str += "false" + + as_number = module.params['as_number'] + if as_number: + conf_str += "%s" % as_number + + conf_str += CE_MERGE_BGP_ENABLE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp enable failed.') + + cmds = [] + if state == "present": + cmd = "bgp %s" % as_number + else: + cmd = "undo bgp %s" % as_number + cmds.append(cmd) + + return cmds + + def merge_bgp_enable_other(self, **kwargs): + """ merge_bgp_enable_other """ + + module = kwargs["module"] + conf_str = CE_MERGE_BGP_ENABLE_HEADER + + cmds = [] + + graceful_restart = module.params['graceful_restart'] + if graceful_restart != 'no_use': + conf_str += "%s" % graceful_restart + + if graceful_restart == "true": + cmd = "graceful-restart" + else: + cmd = "undo graceful-restart" + cmds.append(cmd) + + time_wait_for_rib = module.params['time_wait_for_rib'] + if time_wait_for_rib: + conf_str += "%s" % time_wait_for_rib + + cmd = "graceful-restart timer wait-for-rib %s" % time_wait_for_rib + cmds.append(cmd) + + as_path_limit = module.params['as_path_limit'] + if as_path_limit: + conf_str += "%s" % as_path_limit + + cmd = "as-path-limit %s" % as_path_limit + cmds.append(cmd) + + check_first_as = module.params['check_first_as'] + if check_first_as != 'no_use': + conf_str += "%s" % check_first_as + + if check_first_as == "true": + cmd = "check-first-as" + else: + cmd = "undo check-first-as" + cmds.append(cmd) + + confed_id_number = module.params['confed_id_number'] + if confed_id_number: + conf_str += "%s" % confed_id_number + + cmd = "confederation id %s" % confed_id_number + cmds.append(cmd) + + confed_nonstanded = module.params['confed_nonstanded'] + if confed_nonstanded != 'no_use': + conf_str += "%s" % confed_nonstanded + + if confed_nonstanded == "true": + cmd = "confederation nonstandard" + else: + cmd = "undo confederation nonstandard" + cmds.append(cmd) + + bgp_rid_auto_sel = module.params['bgp_rid_auto_sel'] + if bgp_rid_auto_sel != 'no_use': + conf_str += "%s" % bgp_rid_auto_sel + + if bgp_rid_auto_sel == "true": + cmd = "router-id vpn-instance auto-select" + else: + cmd = "undo router-id" + cmds.append(cmd) + + keep_all_routes = module.params['keep_all_routes'] + if keep_all_routes != 'no_use': + conf_str += "%s" % keep_all_routes + + if keep_all_routes == "true": + cmd = "keep-all-routes" + else: + cmd = "undo keep-all-routes" + cmds.append(cmd) + + memory_limit = module.params['memory_limit'] + if memory_limit != 'no_use': + conf_str += "%s" % memory_limit + + if memory_limit == "true": + cmd = "prefix memory-limit" + else: + cmd = "undo prefix memory-limit" + cmds.append(cmd) + + gr_peer_reset = module.params['gr_peer_reset'] + if gr_peer_reset != 'no_use': + conf_str += "%s" % gr_peer_reset + + if gr_peer_reset == "true": + cmd = "graceful-restart peer-reset" + else: + cmd = "undo graceful-restart peer-reset" + cmds.append(cmd) + + is_shutdown = module.params['is_shutdown'] + if is_shutdown != 'no_use': + conf_str += "%s" % is_shutdown + + if is_shutdown == "true": + cmd = "shutdown" + else: + cmd = "undo shutdown" + cmds.append(cmd) + + suppress_interval = module.params['suppress_interval'] + hold_interval = module.params['hold_interval'] + clear_interval = module.params['clear_interval'] + if suppress_interval: + conf_str += "%s" % suppress_interval + + cmd = "nexthop recursive-lookup restrain suppress-interval %s hold-interval %s " \ + "clear-interval %s" % (suppress_interval, hold_interval, clear_interval) + cmds.append(cmd) + + if hold_interval: + conf_str += "%s" % hold_interval + + if clear_interval: + conf_str += "%s" % clear_interval + + conf_str += CE_MERGE_BGP_ENABLE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp enable failed.') + + return cmds + + def delete_bgp_enable_other(self, **kwargs): + """ delete bgp enable other args """ + + module = kwargs["module"] + conf_str = CE_MERGE_BGP_ENABLE_HEADER + + cmds = [] + + graceful_restart = module.params['graceful_restart'] + if graceful_restart != 'no_use': + conf_str += "%s" % graceful_restart + + if graceful_restart == "true": + cmd = "graceful-restart" + else: + cmd = "undo graceful-restart" + cmds.append(cmd) + + time_wait_for_rib = module.params['time_wait_for_rib'] + if time_wait_for_rib: + conf_str += "600" + + cmd = "undo graceful-restart timer wait-for-rib" + cmds.append(cmd) + + as_path_limit = module.params['as_path_limit'] + if as_path_limit: + conf_str += "255" + + cmd = "undo as-path-limit" + cmds.append(cmd) + + check_first_as = module.params['check_first_as'] + if check_first_as != 'no_use': + conf_str += "%s" % check_first_as + + if check_first_as == "true": + cmd = "check-first-as" + else: + cmd = "undo check-first-as" + cmds.append(cmd) + + confed_id_number = module.params['confed_id_number'] + confed_peer_as_num = module.params['confed_peer_as_num'] + if confed_id_number and not confed_peer_as_num: + conf_str += "" + + cmd = "undo confederation id" + cmds.append(cmd) + + confed_nonstanded = module.params['confed_nonstanded'] + if confed_nonstanded != 'no_use': + conf_str += "%s" % confed_nonstanded + + if confed_nonstanded == "true": + cmd = "confederation nonstandard" + else: + cmd = "undo confederation nonstandard" + cmds.append(cmd) + + bgp_rid_auto_sel = module.params['bgp_rid_auto_sel'] + if bgp_rid_auto_sel != 'no_use': + conf_str += "%s" % bgp_rid_auto_sel + + if bgp_rid_auto_sel == "true": + cmd = "router-id vpn-instance auto-select" + else: + cmd = "undo router-id" + cmds.append(cmd) + + keep_all_routes = module.params['keep_all_routes'] + if keep_all_routes != 'no_use': + conf_str += "%s" % keep_all_routes + + if keep_all_routes == "true": + cmd = "keep-all-routes" + else: + cmd = "undo keep-all-routes" + cmds.append(cmd) + + memory_limit = module.params['memory_limit'] + if memory_limit != 'no_use': + conf_str += "%s" % memory_limit + + if memory_limit == "true": + cmd = "prefix memory-limit" + else: + cmd = "undo prefix memory-limit" + cmds.append(cmd) + + gr_peer_reset = module.params['gr_peer_reset'] + if gr_peer_reset != 'no_use': + conf_str += "%s" % gr_peer_reset + + if gr_peer_reset == "true": + cmd = "graceful-restart peer-reset" + else: + cmd = "undo graceful-restart peer-reset" + cmds.append(cmd) + + is_shutdown = module.params['is_shutdown'] + if is_shutdown != 'no_use': + conf_str += "%s" % is_shutdown + + if is_shutdown == "true": + cmd = "shutdown" + else: + cmd = "undo shutdown" + cmds.append(cmd) + + suppress_interval = module.params['suppress_interval'] + hold_interval = module.params['hold_interval'] + clear_interval = module.params['clear_interval'] + if suppress_interval: + conf_str += "60" + + cmd = "undo nexthop recursive-lookup restrain suppress-interval hold-interval clear-interval" + cmds.append(cmd) + + if hold_interval: + conf_str += "120" + + if clear_interval: + conf_str += "600" + + conf_str += CE_MERGE_BGP_ENABLE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp enable failed.') + + return cmds + + def get_bgp_confed_peer_as(self, **kwargs): + """ get_bgp_confed_peer_as """ + + module = kwargs["module"] + + conf_str = CE_GET_BGP_CONFED_PEER_AS + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_bgp_confed_peer_as(self, **kwargs): + """ merge_bgp_confed_peer_as """ + + module = kwargs["module"] + confed_peer_as_num = module.params['confed_peer_as_num'] + + conf_str = CE_MERGE_BGP_CONFED_PEER_AS % confed_peer_as_num + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp confed peer as failed.') + + cmds = [] + cmd = "confederation peer-as %s" % confed_peer_as_num + cmds.append(cmd) + + return cmds + + def create_bgp_confed_peer_as(self, **kwargs): + """ create_bgp_confed_peer_as """ + + module = kwargs["module"] + confed_peer_as_num = module.params['confed_peer_as_num'] + + conf_str = CE_CREATE_BGP_CONFED_PEER_AS % confed_peer_as_num + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create bgp confed peer as failed.') + + cmds = [] + cmd = "confederation peer-as %s" % confed_peer_as_num + cmds.append(cmd) + + return cmds + + def delete_bgp_confed_peer_as(self, **kwargs): + """ delete_bgp_confed_peer_as """ + + module = kwargs["module"] + confed_peer_as_num = module.params['confed_peer_as_num'] + + conf_str = CE_DELETE_BGP_CONFED_PEER_AS % confed_peer_as_num + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp confed peer as failed.') + + cmds = [] + cmd = "undo confederation peer-as %s" % confed_peer_as_num + cmds.append(cmd) + + return cmds + + def get_bgp_instance(self, **kwargs): + """ get_bgp_instance """ + + module = kwargs["module"] + conf_str = CE_GET_BGP_INSTANCE + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_bgp_instance(self, **kwargs): + """ merge_bgp_instance """ + + module = kwargs["module"] + conf_str = CE_MERGE_BGP_INSTANCE_HEADER + + vrf_name = module.params['vrf_name'] + conf_str += "%s" % vrf_name + + conf_str += CE_MERGE_BGP_INSTANCE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp instance failed.') + + cmds = [] + + if vrf_name != "_public_": + cmd = "ipv4-family vpn-instance %s" % vrf_name + cmds.append(cmd) + + return cmds + + def create_bgp_instance(self, **kwargs): + """ create_bgp_instance """ + + module = kwargs["module"] + conf_str = CE_CREATE_BGP_INSTANCE_HEADER + + cmds = [] + + vrf_name = module.params['vrf_name'] + if vrf_name: + if vrf_name == "_public_": + return cmds + conf_str += "%s" % vrf_name + + conf_str += CE_CREATE_BGP_INSTANCE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create bgp instance failed.') + + if vrf_name != "_public_": + cmd = "ipv4-family vpn-instance %s" % vrf_name + cmds.append(cmd) + + return cmds + + def delete_bgp_instance(self, **kwargs): + """ delete_bgp_instance """ + + module = kwargs["module"] + conf_str = CE_DELETE_BGP_INSTANCE_HEADER + + vrf_name = module.params['vrf_name'] + if vrf_name: + conf_str += "%s" % vrf_name + + conf_str += CE_DELETE_BGP_INSTANCE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp instance failed.') + + cmds = [] + if vrf_name != "_public_": + cmd = "undo ipv4-family vpn-instance %s" % vrf_name + cmds.append(cmd) + + return cmds + + def merge_bgp_instance_other(self, **kwargs): + """ merge_bgp_instance_other """ + + module = kwargs["module"] + conf_str = CE_MERGE_BGP_INSTANCE_HEADER + + vrf_name = module.params['vrf_name'] + conf_str += "%s" % vrf_name + + cmds = [] + + default_af_type = module.params['default_af_type'] + if default_af_type: + conf_str += "%s" % default_af_type + + if vrf_name != "_public_": + if default_af_type == "ipv6uni": + cmd = "ipv6-family vpn-instance %s" % vrf_name + cmds.append(cmd) + + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + if vrf_rid_auto_sel != 'no_use': + conf_str += "%s" % vrf_rid_auto_sel + + if vrf_rid_auto_sel == "true": + cmd = "router-id auto-select" + else: + cmd = "undo router-id auto-select" + cmds.append(cmd) + + router_id = module.params['router_id'] + if router_id: + conf_str += "%s" % router_id + + cmd = "router-id %s" % router_id + cmds.append(cmd) + + keepalive_time = module.params['keepalive_time'] + if keepalive_time: + conf_str += "%s" % keepalive_time + + cmd = "timer keepalive %s" % keepalive_time + cmds.append(cmd) + + hold_time = module.params['hold_time'] + if hold_time: + conf_str += "%s" % hold_time + + cmd = "timer hold %s" % hold_time + cmds.append(cmd) + + min_hold_time = module.params['min_hold_time'] + if min_hold_time: + conf_str += "%s" % min_hold_time + + cmd = "timer min-holdtime %s" % min_hold_time + cmds.append(cmd) + + conn_retry_time = module.params['conn_retry_time'] + if conn_retry_time: + conf_str += "%s" % conn_retry_time + + cmd = "timer connect-retry %s" % conn_retry_time + cmds.append(cmd) + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + conf_str += "%s" % ebgp_if_sensitive + + if ebgp_if_sensitive == "true": + cmd = "ebgp-interface-sensitive" + else: + cmd = "undo ebgp-interface-sensitive" + cmds.append(cmd) + + conf_str += CE_MERGE_BGP_INSTANCE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp instance other failed.') + + return cmds + + def delete_bgp_instance_other_comm(self, **kwargs): + """ delete_bgp_instance_other_comm """ + + module = kwargs["module"] + conf_str = CE_DELETE_BGP_INSTANCE_HEADER + + vrf_name = module.params['vrf_name'] + conf_str += "%s" % vrf_name + + cmds = [] + + router_id = module.params['router_id'] + if router_id: + conf_str += "%s" % router_id + + cmd = "undo router-id" + cmds.append(cmd) + + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + if vrf_rid_auto_sel != 'no_use': + conf_str += "%s" % vrf_rid_auto_sel + + cmd = "undo router-id vpn-instance auto-select" + cmds.append(cmd) + + keepalive_time = module.params['keepalive_time'] + if keepalive_time: + conf_str += "%s" % keepalive_time + + cmd = "undo timer keepalive" + cmds.append(cmd) + + hold_time = module.params['hold_time'] + if hold_time: + conf_str += "%s" % hold_time + + cmd = "undo timer hold" + cmds.append(cmd) + + min_hold_time = module.params['min_hold_time'] + if min_hold_time: + conf_str += "%s" % min_hold_time + + cmd = "undo timer min-holdtime" + cmds.append(cmd) + + conn_retry_time = module.params['conn_retry_time'] + if conn_retry_time: + conf_str += "%s" % conn_retry_time + + cmd = "undo timer connect-retry" + cmds.append(cmd) + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + conf_str += "%s" % ebgp_if_sensitive + + cmd = "undo ebgp-interface-sensitive" + cmds.append(cmd) + + default_af_type = module.params['default_af_type'] + if default_af_type: + conf_str += "%s" % default_af_type + + if vrf_name != "_public_": + if default_af_type == "ipv6uni": + cmd = "undo ipv6-family vpn-instance %s" % vrf_name + cmds.append(cmd) + else: + cmd = "undo ipv4-family vpn-instance %s" % vrf_name + cmds.append(cmd) + else: + if vrf_name != "_public_": + cmd = "undo ipv4-family vpn-instance %s" % vrf_name + cmds.append(cmd) + + conf_str += CE_DELETE_BGP_INSTANCE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Delete common vpn bgp instance other args failed.') + + return cmds + + def delete_instance_other_public(self, **kwargs): + """ delete_instance_other_public """ + + module = kwargs["module"] + conf_str = CE_MERGE_BGP_INSTANCE_HEADER + + vrf_name = module.params['vrf_name'] + conf_str += "%s" % vrf_name + + cmds = [] + + router_id = module.params['router_id'] + if router_id: + conf_str += "" + + cmd = "undo router-id" + cmds.append(cmd) + + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + if vrf_rid_auto_sel != 'no_use': + conf_str += "%s" % "false" + + cmd = "undo router-id vpn-instance auto-select" + cmds.append(cmd) + + keepalive_time = module.params['keepalive_time'] + if keepalive_time: + conf_str += "%s" % "60" + + cmd = "undo timer keepalive" + cmds.append(cmd) + + hold_time = module.params['hold_time'] + if hold_time: + conf_str += "%s" % "180" + + cmd = "undo timer hold" + cmds.append(cmd) + + min_hold_time = module.params['min_hold_time'] + if min_hold_time: + conf_str += "%s" % "0" + + cmd = "undo timer min-holdtime" + cmds.append(cmd) + + conn_retry_time = module.params['conn_retry_time'] + if conn_retry_time: + conf_str += "%s" % "32" + + cmd = "undo timer connect-retry" + cmds.append(cmd) + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + conf_str += "%s" % "true" + + cmd = "ebgp-interface-sensitive" + cmds.append(cmd) + + default_af_type = module.params['default_af_type'] + if default_af_type: + conf_str += "%s" % "ipv4uni" + + if vrf_name != "_public_": + if default_af_type == "ipv6uni": + cmd = "undo ipv6-family vpn-instance %s" % vrf_name + cmds.append(cmd) + else: + cmd = "undo ipv4-family vpn-instance %s" % vrf_name + cmds.append(cmd) + else: + if vrf_name != "_public_": + cmd = "undo ipv4-family vpn-instance %s" % vrf_name + cmds.append(cmd) + + conf_str += CE_MERGE_BGP_INSTANCE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Delete default vpn bgp instance other args failed.') + + return cmds + + +def main(): + """ main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + as_number=dict(type='str'), + graceful_restart=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + time_wait_for_rib=dict(type='str'), + as_path_limit=dict(type='str'), + check_first_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + confed_id_number=dict(type='str'), + confed_nonstanded=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + bgp_rid_auto_sel=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + memory_limit=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + gr_peer_reset=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + is_shutdown=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + suppress_interval=dict(type='str'), + hold_interval=dict(type='str'), + clear_interval=dict(type='str'), + confed_peer_as_num=dict(type='str'), + vrf_name=dict(type='str'), + vrf_rid_auto_sel=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + router_id=dict(type='str'), + keepalive_time=dict(type='str'), + hold_time=dict(type='str'), + min_hold_time=dict(type='str'), + conn_retry_time=dict(type='str'), + ebgp_if_sensitive=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + default_af_type=dict(type='str', choices=['ipv4uni', 'ipv6uni']) + ) + + argument_spec.update(ce_argument_spec) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + state = module.params['state'] + as_number = module.params['as_number'] + graceful_restart = module.params['graceful_restart'] + time_wait_for_rib = module.params['time_wait_for_rib'] + as_path_limit = module.params['as_path_limit'] + check_first_as = module.params['check_first_as'] + confed_id_number = module.params['confed_id_number'] + confed_nonstanded = module.params['confed_nonstanded'] + bgp_rid_auto_sel = module.params['bgp_rid_auto_sel'] + keep_all_routes = module.params['keep_all_routes'] + memory_limit = module.params['memory_limit'] + gr_peer_reset = module.params['gr_peer_reset'] + is_shutdown = module.params['is_shutdown'] + suppress_interval = module.params['suppress_interval'] + hold_interval = module.params['hold_interval'] + clear_interval = module.params['clear_interval'] + confed_peer_as_num = module.params['confed_peer_as_num'] + router_id = module.params['router_id'] + vrf_name = module.params['vrf_name'] + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + keepalive_time = module.params['keepalive_time'] + hold_time = module.params['hold_time'] + min_hold_time = module.params['min_hold_time'] + conn_retry_time = module.params['conn_retry_time'] + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + default_af_type = module.params['default_af_type'] + + ce_bgp_obj = Bgp() + + if not ce_bgp_obj: + module.fail_json(msg='Error: Init module failed.') + + # get proposed + proposed["state"] = state + if as_number: + proposed["as_number"] = as_number + if graceful_restart != 'no_use': + proposed["graceful_restart"] = graceful_restart + if time_wait_for_rib: + proposed["time_wait_for_rib"] = time_wait_for_rib + if as_path_limit: + proposed["as_path_limit"] = as_path_limit + if check_first_as != 'no_use': + proposed["check_first_as"] = check_first_as + if confed_id_number: + proposed["confed_id_number"] = confed_id_number + if confed_nonstanded != 'no_use': + proposed["confed_nonstanded"] = confed_nonstanded + if bgp_rid_auto_sel != 'no_use': + proposed["bgp_rid_auto_sel"] = bgp_rid_auto_sel + if keep_all_routes != 'no_use': + proposed["keep_all_routes"] = keep_all_routes + if memory_limit != 'no_use': + proposed["memory_limit"] = memory_limit + if gr_peer_reset != 'no_use': + proposed["gr_peer_reset"] = gr_peer_reset + if is_shutdown != 'no_use': + proposed["is_shutdown"] = is_shutdown + if suppress_interval: + proposed["suppress_interval"] = suppress_interval + if hold_interval: + proposed["hold_interval"] = hold_interval + if clear_interval: + proposed["clear_interval"] = clear_interval + if confed_peer_as_num: + proposed["confed_peer_as_num"] = confed_peer_as_num + if router_id: + proposed["router_id"] = router_id + if vrf_name: + proposed["vrf_name"] = vrf_name + if vrf_rid_auto_sel != 'no_use': + proposed["vrf_rid_auto_sel"] = vrf_rid_auto_sel + if keepalive_time: + proposed["keepalive_time"] = keepalive_time + if hold_time: + proposed["hold_time"] = hold_time + if min_hold_time: + proposed["min_hold_time"] = min_hold_time + if conn_retry_time: + proposed["conn_retry_time"] = conn_retry_time + if ebgp_if_sensitive != 'no_use': + proposed["ebgp_if_sensitive"] = ebgp_if_sensitive + if default_af_type: + proposed["default_af_type"] = default_af_type + + need_bgp_enable = check_bgp_enable_args(module=module) + need_bgp_enable_other_rst = ce_bgp_obj.check_bgp_enable_other_args( + module=module) + need_bgp_confed = check_bgp_confed_args(module=module) + need_bgp_instance = ce_bgp_obj.check_bgp_instance_args(module=module) + need_bgp_instance_other_rst = ce_bgp_obj.check_bgp_instance_other_args( + module=module) + + router_id_exist = ce_bgp_obj.get_bgp_instance(module=module) + existing["bgp instance"] = router_id_exist + + # bgp enable/disable + if need_bgp_enable: + + bgp_enable_exist = ce_bgp_obj.get_bgp_enable(module=module) + existing["bgp enable"] = bgp_enable_exist + if bgp_enable_exist: + asnumber_exist = bgp_enable_exist[0][1] + bgpenable_exist = bgp_enable_exist[0][0] + else: + asnumber_exist = None + bgpenable_exist = None + + if state == "present": + bgp_enable_new = ("true", as_number) + + if bgp_enable_new in bgp_enable_exist: + pass + elif bgpenable_exist == "true" and asnumber_exist != as_number: + module.fail_json( + msg='Error: BGP is already running. The AS is %s.' % asnumber_exist) + else: + cmd = ce_bgp_obj.merge_bgp_enable(module=module) + changed = True + for item in cmd: + updates.append(item) + + else: + if need_bgp_enable_other_rst["need_cfg"] or need_bgp_confed or \ + need_bgp_instance_other_rst["need_cfg"] or need_bgp_instance: + pass + elif bgpenable_exist == "false": + pass + elif bgpenable_exist == "true" and asnumber_exist == as_number: + cmd = ce_bgp_obj.merge_bgp_enable(module=module) + changed = True + for item in cmd: + updates.append(item) + + else: + module.fail_json( + msg='Error: BGP is already running. The AS is %s.' % asnumber_exist) + + bgp_enable_end = ce_bgp_obj.get_bgp_enable(module=module) + end_state["bgp enable"] = bgp_enable_end + + # bgp enable/disable other args + exist_tmp = dict() + for item in need_bgp_enable_other_rst: + if item != "need_cfg": + exist_tmp[item] = need_bgp_enable_other_rst[item] + + if exist_tmp: + existing["bgp enable other"] = exist_tmp + + if need_bgp_enable_other_rst["need_cfg"]: + if state == "present": + cmd = ce_bgp_obj.merge_bgp_enable_other(module=module) + changed = True + for item in cmd: + updates.append(item) + else: + cmd = ce_bgp_obj.delete_bgp_enable_other(module=module) + changed = True + for item in cmd: + updates.append(item) + + need_bgp_enable_other_rst = ce_bgp_obj.check_bgp_enable_other_args( + module=module) + + end_tmp = dict() + for item in need_bgp_enable_other_rst: + if item != "need_cfg": + end_tmp[item] = need_bgp_enable_other_rst[item] + + if end_tmp: + end_state["bgp enable other"] = end_tmp + + # bgp confederation peer as + if need_bgp_confed: + confed_exist = ce_bgp_obj.get_bgp_confed_peer_as(module=module) + existing["confederation peer as"] = confed_exist + confed_new = (confed_peer_as_num) + + if state == "present": + if len(confed_exist) == 0: + cmd = ce_bgp_obj.create_bgp_confed_peer_as(module=module) + changed = True + for item in cmd: + updates.append(item) + + elif confed_new not in confed_exist: + cmd = ce_bgp_obj.merge_bgp_confed_peer_as(module=module) + changed = True + for item in cmd: + updates.append(item) + + else: + if len(confed_exist) == 0: + pass + + elif confed_new not in confed_exist: + pass + + else: + cmd = ce_bgp_obj.delete_bgp_confed_peer_as(module=module) + changed = True + for item in cmd: + updates.append(item) + + confed_end = ce_bgp_obj.get_bgp_confed_peer_as(module=module) + end_state["confederation peer as"] = confed_end + + # bgp instance + if need_bgp_instance and default_af_type != "ipv6uni": + router_id_new = vrf_name + + if state == "present": + if len(router_id_exist) == 0: + cmd = ce_bgp_obj.create_bgp_instance(module=module) + changed = True + updates.extend(cmd) + elif router_id_new not in router_id_exist: + cmd = ce_bgp_obj.merge_bgp_instance(module=module) + changed = True + updates.extend(cmd) + else: + if not need_bgp_instance_other_rst["need_cfg"]: + if vrf_name != "_public_": + if len(router_id_exist) == 0: + pass + elif router_id_new not in router_id_exist: + pass + else: + cmd = ce_bgp_obj.delete_bgp_instance(module=module) + changed = True + for item in cmd: + updates.append(item) + + # bgp instance other + exist_tmp = dict() + for item in need_bgp_instance_other_rst: + if item != "need_cfg": + exist_tmp[item] = need_bgp_instance_other_rst[item] + + if exist_tmp: + existing["bgp instance other"] = exist_tmp + + if need_bgp_instance_other_rst["need_cfg"]: + if state == "present": + cmd = ce_bgp_obj.merge_bgp_instance_other(module=module) + changed = True + for item in cmd: + updates.append(item) + + else: + if vrf_name == "_public_": + cmd = ce_bgp_obj.delete_instance_other_public( + module=module) + changed = True + for item in cmd: + updates.append(item) + else: + cmd = ce_bgp_obj.delete_bgp_instance_other_comm(module=module) + changed = True + for item in cmd: + updates.append(item) + + need_bgp_instance_other_rst = ce_bgp_obj.check_bgp_instance_other_args( + module=module) + + router_id_end = ce_bgp_obj.get_bgp_instance(module=module) + end_state["bgp instance"] = router_id_end + + end_tmp = dict() + for item in need_bgp_instance_other_rst: + if item != "need_cfg": + end_tmp[item] = need_bgp_instance_other_rst[item] + + if end_tmp: + end_state["bgp instance other"] = end_tmp + if end_state == existing: + changed = False + updates = list() + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_bgp_af.py b/plugins/modules/network/cloudengine/ce_bgp_af.py new file mode 100644 index 0000000000..089b3b3231 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_bgp_af.py @@ -0,0 +1,3434 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_bgp_af +short_description: Manages BGP Address-family configuration on HUAWEI CloudEngine switches. +description: + - Manages BGP Address-family configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] + vrf_name: + description: + - Name of a BGP instance. The name is a case-sensitive string of characters. + The BGP instance can be used only after the corresponding VPN instance is created. + The value is a string of 1 to 31 case-sensitive characters. + required: true + af_type: + description: + - Address family type of a BGP instance. + required: true + choices: ['ipv4uni','ipv4multi', 'ipv4vpn', 'ipv6uni', 'ipv6vpn', 'evpn'] + max_load_ibgp_num: + description: + - Specify the maximum number of equal-cost IBGP routes. + The value is an integer ranging from 1 to 65535. + ibgp_ecmp_nexthop_changed: + description: + - If the value is true, the next hop of an advertised route is changed to the advertiser itself in IBGP + load-balancing scenarios. + If the value is false, the next hop of an advertised route is not changed to the advertiser itself in + IBGP load-balancing scenarios. + choices: ['no_use','true','false'] + default: no_use + max_load_ebgp_num: + description: + - Specify the maximum number of equal-cost EBGP routes. + The value is an integer ranging from 1 to 65535. + ebgp_ecmp_nexthop_changed: + description: + - If the value is true, the next hop of an advertised route is changed to the advertiser itself in EBGP + load-balancing scenarios. + If the value is false, the next hop of an advertised route is not changed to the advertiser itself in + EBGP load-balancing scenarios. + choices: ['no_use','true','false'] + default: no_use + maximum_load_balance: + description: + - Specify the maximum number of equal-cost routes in the BGP routing table. + The value is an integer ranging from 1 to 65535. + ecmp_nexthop_changed: + description: + - If the value is true, the next hop of an advertised route is changed to the advertiser itself in BGP + load-balancing scenarios. + If the value is false, the next hop of an advertised route is not changed to the advertiser itself + in BGP load-balancing scenarios. + choices: ['no_use','true','false'] + default: no_use + default_local_pref: + description: + - Set the Local-Preference attribute. The value is an integer. + The value is an integer ranging from 0 to 4294967295. + default_med: + description: + - Specify the Multi-Exit-Discriminator (MED) of BGP routes. + The value is an integer ranging from 0 to 4294967295. + default_rt_import_enable: + description: + - If the value is true, importing default routes to the BGP routing table is allowed. + If the value is false, importing default routes to the BGP routing table is not allowed. + choices: ['no_use','true','false'] + default: no_use + router_id: + description: + - ID of a router that is in IPv4 address format. + The value is a string of 0 to 255 characters. + The value is in dotted decimal notation. + vrf_rid_auto_sel: + description: + - If the value is true, VPN BGP instances are enabled to automatically select router IDs. + If the value is false, VPN BGP instances are disabled from automatically selecting router IDs. + choices: ['no_use','true','false'] + default: no_use + nexthop_third_party: + description: + - If the value is true, the third-party next hop function is enabled. + If the value is false, the third-party next hop function is disabled. + choices: ['no_use','true','false'] + default: no_use + summary_automatic: + description: + - If the value is true, automatic aggregation is enabled for locally imported routes. + If the value is false, automatic aggregation is disabled for locally imported routes. + choices: ['no_use','true','false'] + default: no_use + auto_frr_enable: + description: + - If the value is true, BGP auto FRR is enabled. + If the value is false, BGP auto FRR is disabled. + choices: ['no_use','true','false'] + default: no_use + load_balancing_as_path_ignore: + description: + - Load balancing as path ignore. + choices: ['no_use','true','false'] + default: no_use + rib_only_enable: + description: + - If the value is true, BGP routes cannot be advertised to the IP routing table. + If the value is false, Routes preferred by BGP are advertised to the IP routing table. + choices: ['no_use','true','false'] + default: no_use + rib_only_policy_name: + description: + - Specify the name of a routing policy. + The value is a string of 1 to 40 characters. + active_route_advertise: + description: + - If the value is true, BGP is enabled to advertise only optimal routes in the RM to peers. + If the value is false, BGP is not enabled to advertise only optimal routes in the RM to peers. + choices: ['no_use','true','false'] + default: no_use + as_path_neglect: + description: + - If the value is true, the AS path attribute is ignored when BGP selects an optimal route. + If the value is false, the AS path attribute is not ignored when BGP selects an optimal route. + An AS path with a smaller length has a higher priority. + choices: ['no_use','true','false'] + default: no_use + med_none_as_maximum: + description: + - If the value is true, when BGP selects an optimal route, the system uses 4294967295 as the + MED value of a route if the route's attribute does not carry a MED value. + If the value is false, the system uses 0 as the MED value of a route if the route's attribute + does not carry a MED value. + choices: ['no_use','true','false'] + default: no_use + router_id_neglect: + description: + - If the value is true, the router ID attribute is ignored when BGP selects the optimal route. + If the value is false, the router ID attribute is not ignored when BGP selects the optimal route. + choices: ['no_use','true','false'] + default: no_use + igp_metric_ignore: + description: + - If the value is true, the metrics of next-hop IGP routes are not compared when BGP selects + an optimal route. + If the value is false, the metrics of next-hop IGP routes are not compared when BGP selects + an optimal route. + A route with a smaller metric has a higher priority. + choices: ['no_use','true','false'] + default: no_use + always_compare_med: + description: + - If the value is true, the MEDs of routes learned from peers in different autonomous systems + are compared when BGP selects an optimal route. + If the value is false, the MEDs of routes learned from peers in different autonomous systems + are not compared when BGP selects an optimal route. + choices: ['no_use','true','false'] + default: no_use + determin_med: + description: + - If the value is true, BGP deterministic-MED is enabled. + If the value is false, BGP deterministic-MED is disabled. + choices: ['no_use','true','false'] + default: no_use + preference_external: + description: + - Set the protocol priority of EBGP routes. + The value is an integer ranging from 1 to 255. + preference_internal: + description: + - Set the protocol priority of IBGP routes. + The value is an integer ranging from 1 to 255. + preference_local: + description: + - Set the protocol priority of a local BGP route. + The value is an integer ranging from 1 to 255. + prefrence_policy_name: + description: + - Set a routing policy to filter routes so that a configured priority is applied to + the routes that match the specified policy. + The value is a string of 1 to 40 characters. + reflect_between_client: + description: + - If the value is true, route reflection is enabled between clients. + If the value is false, route reflection is disabled between clients. + choices: ['no_use','true','false'] + default: no_use + reflector_cluster_id: + description: + - Set a cluster ID. Configuring multiple RRs in a cluster can enhance the stability of the network. + The value is an integer ranging from 1 to 4294967295. + reflector_cluster_ipv4: + description: + - Set a cluster ipv4 address. The value is expressed in the format of an IPv4 address. + rr_filter_number: + description: + - Set the number of the extended community filter supported by an RR group. + The value is a string of 1 to 51 characters. + policy_vpn_target: + description: + - If the value is true, VPN-Target filtering function is performed for received VPN routes. + If the value is false, VPN-Target filtering function is not performed for received VPN routes. + choices: ['no_use','true','false'] + default: no_use + next_hop_sel_depend_type: + description: + - Next hop select depend type. + choices: ['default','dependTunnel', 'dependIp'] + default: default + nhp_relay_route_policy_name: + description: + - Specify the name of a route-policy for route iteration. + The value is a string of 1 to 40 characters. + ebgp_if_sensitive: + description: + - If the value is true, after the fast EBGP interface awareness function is enabled, + EBGP sessions on an interface are deleted immediately when the interface goes Down. + If the value is false, after the fast EBGP interface awareness function is enabled, + EBGP sessions on an interface are not deleted immediately when the interface goes Down. + choices: ['no_use','true','false'] + default: no_use + reflect_chg_path: + description: + - If the value is true, the route reflector is enabled to modify route path attributes + based on an export policy. + If the value is false, the route reflector is disabled from modifying route path attributes + based on an export policy. + choices: ['no_use','true','false'] + default: no_use + add_path_sel_num: + description: + - Number of Add-Path routes. + The value is an integer ranging from 2 to 64. + route_sel_delay: + description: + - Route selection delay. + The value is an integer ranging from 0 to 3600. + allow_invalid_as: + description: + - Allow routes with BGP origin AS validation result Invalid to be selected. + If the value is true, invalid routes can participate in route selection. + If the value is false, invalid routes cannot participate in route selection. + choices: ['no_use','true','false'] + default: no_use + policy_ext_comm_enable: + description: + - If the value is true, modifying extended community attributes is allowed. + If the value is false, modifying extended community attributes is not allowed. + choices: ['no_use','true','false'] + default: no_use + supernet_uni_adv: + description: + - If the value is true, the function to advertise supernetwork unicast routes is enabled. + If the value is false, the function to advertise supernetwork unicast routes is disabled. + choices: ['no_use','true','false'] + default: no_use + supernet_label_adv: + description: + - If the value is true, the function to advertise supernetwork label is enabled. + If the value is false, the function to advertise supernetwork label is disabled. + choices: ['no_use','true','false'] + default: no_use + ingress_lsp_policy_name: + description: + - Ingress lsp policy name. + originator_prior: + description: + - Originator prior. + choices: ['no_use','true','false'] + default: no_use + lowest_priority: + description: + - If the value is true, enable reduce priority to advertise route. + If the value is false, disable reduce priority to advertise route. + choices: ['no_use','true','false'] + default: no_use + relay_delay_enable: + description: + - If the value is true, relay delay enable. + If the value is false, relay delay disable. + choices: ['no_use','true','false'] + default: no_use + import_protocol: + description: + - Routing protocol from which routes can be imported. + choices: ['direct', 'ospf', 'isis', 'static', 'rip', 'ospfv3', 'ripng'] + import_process_id: + description: + - Process ID of an imported routing protocol. + The value is an integer ranging from 0 to 4294967295. + network_address: + description: + - Specify the IP address advertised by BGP. + The value is a string of 0 to 255 characters. + mask_len: + description: + - Specify the mask length of an IP address. + The value is an integer ranging from 0 to 128. +''' + +EXAMPLES = ''' +- name: CloudEngine BGP address family test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + tasks: + - name: "Config BGP Address_Family" + ce_bgp_af: + state: present + vrf_name: js + af_type: ipv4uni + provider: "{{ cli }}" + - name: "Undo BGP Address_Family" + ce_bgp_af: + state: absent + vrf_name: js + af_type: ipv4uni + provider: "{{ cli }}" + - name: "Config import route" + ce_bgp_af: + state: present + vrf_name: js + af_type: ipv4uni + import_protocol: ospf + import_process_id: 123 + provider: "{{ cli }}" + - name: "Undo import route" + ce_bgp_af: + state: absent + vrf_name: js + af_type: ipv4uni + import_protocol: ospf + import_process_id: 123 + provider: "{{ cli }}" + - name: "Config network route" + ce_bgp_af: + state: present + vrf_name: js + af_type: ipv4uni + network_address: 1.1.1.1 + mask_len: 24 + provider: "{{ cli }}" + - name: "Undo network route" + ce_bgp_af: + state: absent + vrf_name: js + af_type: ipv4uni + network_address: 1.1.1.1 + mask_len: 24 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"af_type": "ipv4uni", + "state": "present", "vrf_name": "js"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"af_type": "ipv4uni", "vrf_name": "js"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["ipv4-family vpn-instance js"] +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + +# get bgp address family +CE_GET_BGP_ADDRESS_FAMILY_HEADER = """ + + + + + + %s + + + %s +""" +CE_GET_BGP_ADDRESS_FAMILY_TAIL = """ + + + + + + + +""" + +# merge bgp address family +CE_MERGE_BGP_ADDRESS_FAMILY_HEADER = """ + + + + + + %s + + + %s +""" +CE_MERGE_BGP_ADDRESS_FAMILY_TAIL = """ + + + + + + + +""" + +# create bgp address family +CE_CREATE_BGP_ADDRESS_FAMILY_HEADER = """ + + + + + + %s + + + %s +""" +CE_CREATE_BGP_ADDRESS_FAMILY_TAIL = """ + + + + + + + +""" + +# delete bgp address family +CE_DELETE_BGP_ADDRESS_FAMILY_HEADER = """ + + + + + + %s + + + %s +""" +CE_DELETE_BGP_ADDRESS_FAMILY_TAIL = """ + + + + + + + +""" + +# get bgp import route +CE_GET_BGP_IMPORT_AND_NETWORK_ROUTE = """ + + + + + + %s + + + %s + + + + + + + + + + + + + + + + + + + +""" + +# merge bgp import route +CE_MERGE_BGP_IMPORT_ROUTE_HEADER = """ + + + + + + %s + + + %s + + + %s + %s +""" +CE_MERGE_BGP_IMPORT_ROUTE_TAIL = """ + + + + + + + + + +""" + +# create bgp import route +CE_CREATE_BGP_IMPORT_ROUTE = """ + + + + + + %s + + + %s + + + %s + %s + + + + + + + + + +""" + +# delete bgp import route +CE_DELETE_BGP_IMPORT_ROUTE = """ + + + + + + %s + + + %s + + + %s + %s + + + + + + + + + +""" + +# get bgp network route +CE_GET_BGP_NETWORK_ROUTE_HEADER = """ + + + + + + %s + + + %s + + + + +""" +CE_GET_BGP_NETWORK_ROUTE_TAIL = """ + + + + + + + + + +""" + +# merge bgp network route +CE_MERGE_BGP_NETWORK_ROUTE_HEADER = """ + + + + + + %s + + + %s + + + %s + %s +""" +CE_MERGE_BGP_NETWORK_ROUTE_TAIL = """ + + + + + + + + + +""" + +# create bgp network route +CE_CREATE_BGP_NETWORK_ROUTE = """ + + + + + + %s + + + %s + + + %s + %s + + + + + + + + + +""" + +# delete bgp network route +CE_DELETE_BGP_NETWORK_ROUTE = """ + + + + + + %s + + + %s + + + %s + %s + + + + + + + + + +""" + +# bgp import and network route header +CE_BGP_IMPORT_NETWORK_ROUTE_HEADER = """ + + + + + + %s + + + %s +""" +CE_BGP_IMPORT_NETWORK_ROUTE_TAIL = """ + + + + + + + +""" +CE_BGP_MERGE_IMPORT_UNIT = """ + + + %s + %s + + +""" +CE_BGP_CREATE_IMPORT_UNIT = """ + + + %s + %s + + +""" +CE_BGP_DELETE_IMPORT_UNIT = """ + + + %s + %s + + +""" +CE_BGP_MERGE_NETWORK_UNIT = """ + + + %s + %s + + +""" +CE_BGP_CREATE_NETWORK_UNIT = """ + + + %s + %s + + +""" +CE_BGP_DELETE_NETWORK_UNIT = """ + + + %s + %s + + +""" + + +class BgpAf(object): + """ Manages BGP Address-family configuration """ + + def netconf_get_config(self, **kwargs): + """ netconf_get_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ netconf_set_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = set_nc_config(module, conf_str) + + return xml_str + + def check_bgp_af_args(self, **kwargs): + """ check_bgp_af_args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + else: + module.fail_json(msg='Error: Please input vrf_name.') + + state = module.params['state'] + af_type = module.params['af_type'] + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["af_type"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != af_type: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["af_type"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] == af_type: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def check_bgp_af_other_can_del(self, **kwargs): + """ check_bgp_af_other_can_del """ + module = kwargs["module"] + result = dict() + need_cfg = False + + state = module.params['state'] + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + + router_id = module.params['router_id'] + if router_id: + if len(router_id) > 255: + module.fail_json( + msg='Error: The len of router_id %s is out of [0 - 255].' % router_id) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] != router_id: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] == router_id: + need_cfg = True + else: + pass + + determin_med = module.params['determin_med'] + if determin_med != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] != determin_med: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] == determin_med: + need_cfg = True + else: + pass + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] != ebgp_if_sensitive: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] == ebgp_if_sensitive: + need_cfg = True + else: + pass + + relay_delay_enable = module.params['relay_delay_enable'] + if relay_delay_enable != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] != relay_delay_enable: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + if re_find[0] == relay_delay_enable: + need_cfg = True + else: + pass + + result["need_cfg"] = need_cfg + return result + + def check_bgp_af_other_args(self, **kwargs): + """ check_bgp_af_other_args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + + max_load_ibgp_num = module.params['max_load_ibgp_num'] + if max_load_ibgp_num: + if int(max_load_ibgp_num) > 65535 or int(max_load_ibgp_num) < 1: + module.fail_json( + msg='Error: The value of max_load_ibgp_num %s is out of [1 - 65535].' % max_load_ibgp_num) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["max_load_ibgp_num"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != max_load_ibgp_num: + need_cfg = True + else: + need_cfg = True + + ibgp_ecmp_nexthop_changed = module.params['ibgp_ecmp_nexthop_changed'] + if ibgp_ecmp_nexthop_changed != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ibgp_ecmp_nexthop_changed"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != ibgp_ecmp_nexthop_changed: + need_cfg = True + else: + need_cfg = True + + max_load_ebgp_num = module.params['max_load_ebgp_num'] + if max_load_ebgp_num: + if int(max_load_ebgp_num) > 65535 or int(max_load_ebgp_num) < 1: + module.fail_json( + msg='Error: The value of max_load_ebgp_num %s is out of [1 - 65535].' % max_load_ebgp_num) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["max_load_ebgp_num"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != max_load_ebgp_num: + need_cfg = True + else: + need_cfg = True + + ebgp_ecmp_nexthop_changed = module.params['ebgp_ecmp_nexthop_changed'] + if ebgp_ecmp_nexthop_changed != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ebgp_ecmp_nexthop_changed"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != ebgp_ecmp_nexthop_changed: + need_cfg = True + else: + need_cfg = True + + maximum_load_balance = module.params['maximum_load_balance'] + if maximum_load_balance: + if int(maximum_load_balance) > 65535 or int(maximum_load_balance) < 1: + module.fail_json( + msg='Error: The value of maximum_load_balance %s is out of [1 - 65535].' % maximum_load_balance) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["maximum_load_balance"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != maximum_load_balance: + need_cfg = True + else: + need_cfg = True + + ecmp_nexthop_changed = module.params['ecmp_nexthop_changed'] + if ecmp_nexthop_changed != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ecmp_nexthop_changed"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != ecmp_nexthop_changed: + need_cfg = True + else: + need_cfg = True + + default_local_pref = module.params['default_local_pref'] + if default_local_pref: + if int(default_local_pref) < 0: + module.fail_json( + msg='Error: The value of default_local_pref %s is out of [0 - 4294967295].' % default_local_pref) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_local_pref"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != default_local_pref: + need_cfg = True + else: + need_cfg = True + + default_med = module.params['default_med'] + if default_med: + if int(default_med) < 0: + module.fail_json( + msg='Error: The value of default_med %s is out of [0 - 4294967295].' % default_med) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_med"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != default_med: + need_cfg = True + else: + need_cfg = True + + default_rt_import_enable = module.params['default_rt_import_enable'] + if default_rt_import_enable != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_rt_import_enable"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != default_rt_import_enable: + need_cfg = True + else: + need_cfg = True + + router_id = module.params['router_id'] + if router_id: + if len(router_id) > 255: + module.fail_json( + msg='Error: The len of router_id %s is out of [0 - 255].' % router_id) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["router_id"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != router_id: + need_cfg = True + else: + need_cfg = True + + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + if vrf_rid_auto_sel != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["vrf_rid_auto_sel"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != vrf_rid_auto_sel: + need_cfg = True + else: + need_cfg = True + + nexthop_third_party = module.params['nexthop_third_party'] + if nexthop_third_party != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["nexthop_third_party"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != nexthop_third_party: + need_cfg = True + else: + need_cfg = True + + summary_automatic = module.params['summary_automatic'] + if summary_automatic != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["summary_automatic"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != summary_automatic: + need_cfg = True + else: + need_cfg = True + + auto_frr_enable = module.params['auto_frr_enable'] + if auto_frr_enable != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["auto_frr_enable"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != auto_frr_enable: + need_cfg = True + else: + need_cfg = True + + load_balancing_as_path_ignore = module.params['load_balancing_as_path_ignore'] + if load_balancing_as_path_ignore != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + \ + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["load_balancing_as_path_ignore"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != load_balancing_as_path_ignore: + need_cfg = True + else: + need_cfg = True + + rib_only_enable = module.params['rib_only_enable'] + if rib_only_enable != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["rib_only_enable"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != rib_only_enable: + need_cfg = True + else: + need_cfg = True + + rib_only_policy_name = module.params['rib_only_policy_name'] + if rib_only_policy_name: + if len(rib_only_policy_name) > 40 or len(rib_only_policy_name) < 1: + module.fail_json( + msg='Error: The len of rib_only_policy_name %s is out of [1 - 40].' % rib_only_policy_name) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["rib_only_policy_name"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != rib_only_policy_name: + need_cfg = True + else: + need_cfg = True + + active_route_advertise = module.params['active_route_advertise'] + if active_route_advertise != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["active_route_advertise"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != active_route_advertise: + need_cfg = True + else: + need_cfg = True + + as_path_neglect = module.params['as_path_neglect'] + if as_path_neglect != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["as_path_neglect"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != as_path_neglect: + need_cfg = True + else: + need_cfg = True + + med_none_as_maximum = module.params['med_none_as_maximum'] + if med_none_as_maximum != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["med_none_as_maximum"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != med_none_as_maximum: + need_cfg = True + else: + need_cfg = True + + router_id_neglect = module.params['router_id_neglect'] + if router_id_neglect != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["router_id_neglect"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != router_id_neglect: + need_cfg = True + else: + need_cfg = True + + igp_metric_ignore = module.params['igp_metric_ignore'] + if igp_metric_ignore != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["igp_metric_ignore"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != igp_metric_ignore: + need_cfg = True + else: + need_cfg = True + + always_compare_med = module.params['always_compare_med'] + if always_compare_med != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["always_compare_med"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != always_compare_med: + need_cfg = True + else: + need_cfg = True + + determin_med = module.params['determin_med'] + if determin_med != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["determin_med"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != determin_med: + need_cfg = True + else: + need_cfg = True + + preference_external = module.params['preference_external'] + if preference_external: + if int(preference_external) > 255 or int(preference_external) < 1: + module.fail_json( + msg='Error: The value of preference_external %s is out of [1 - 255].' % preference_external) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["preference_external"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != preference_external: + need_cfg = True + else: + need_cfg = True + + preference_internal = module.params['preference_internal'] + if preference_internal: + if int(preference_internal) > 255 or int(preference_internal) < 1: + module.fail_json( + msg='Error: The value of preference_internal %s is out of [1 - 255].' % preference_internal) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["preference_internal"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != preference_internal: + need_cfg = True + else: + need_cfg = True + + preference_local = module.params['preference_local'] + if preference_local: + if int(preference_local) > 255 or int(preference_local) < 1: + module.fail_json( + msg='Error: The value of preference_local %s is out of [1 - 255].' % preference_local) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["preference_local"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != preference_local: + need_cfg = True + else: + need_cfg = True + + prefrence_policy_name = module.params['prefrence_policy_name'] + if prefrence_policy_name: + if len(prefrence_policy_name) > 40 or len(prefrence_policy_name) < 1: + module.fail_json( + msg='Error: The len of prefrence_policy_name %s is out of [1 - 40].' % prefrence_policy_name) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["prefrence_policy_name"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != prefrence_policy_name: + need_cfg = True + else: + need_cfg = True + + reflect_between_client = module.params['reflect_between_client'] + if reflect_between_client != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["reflect_between_client"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != reflect_between_client: + need_cfg = True + else: + need_cfg = True + + reflector_cluster_id = module.params['reflector_cluster_id'] + if reflector_cluster_id: + if int(reflector_cluster_id) < 0: + module.fail_json( + msg='Error: The value of reflector_cluster_id %s is out of ' + '[1 - 4294967295].' % reflector_cluster_id) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["reflector_cluster_id"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != reflector_cluster_id: + need_cfg = True + else: + need_cfg = True + + reflector_cluster_ipv4 = module.params['reflector_cluster_ipv4'] + if reflector_cluster_ipv4: + if len(reflector_cluster_ipv4) > 255: + module.fail_json( + msg='Error: The len of reflector_cluster_ipv4 %s is out of [0 - 255].' % reflector_cluster_ipv4) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["reflector_cluster_ipv4"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != reflector_cluster_ipv4: + need_cfg = True + else: + need_cfg = True + + rr_filter_number = module.params['rr_filter_number'] + if rr_filter_number: + if len(rr_filter_number) > 51 or len(rr_filter_number) < 1: + module.fail_json( + msg='Error: The len of rr_filter_number %s is out of [1 - 51].' % rr_filter_number) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["rr_filter_number"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != rr_filter_number: + need_cfg = True + else: + need_cfg = True + + policy_vpn_target = module.params['policy_vpn_target'] + if policy_vpn_target != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["policy_vpn_target"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != policy_vpn_target: + need_cfg = True + else: + need_cfg = True + + next_hop_sel_depend_type = module.params['next_hop_sel_depend_type'] + if next_hop_sel_depend_type: + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["next_hop_sel_depend_type"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != next_hop_sel_depend_type: + need_cfg = True + else: + need_cfg = True + + nhp_relay_route_policy_name = module.params[ + 'nhp_relay_route_policy_name'] + if nhp_relay_route_policy_name: + if len(nhp_relay_route_policy_name) > 40 or len(nhp_relay_route_policy_name) < 1: + module.fail_json( + msg='Error: The len of nhp_relay_route_policy_name %s is ' + 'out of [1 - 40].' % nhp_relay_route_policy_name) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + \ + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["nhp_relay_route_policy_name"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != nhp_relay_route_policy_name: + need_cfg = True + else: + need_cfg = True + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ebgp_if_sensitive"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != ebgp_if_sensitive: + need_cfg = True + else: + need_cfg = True + + reflect_chg_path = module.params['reflect_chg_path'] + if reflect_chg_path != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["reflect_chg_path"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != reflect_chg_path: + need_cfg = True + else: + need_cfg = True + + add_path_sel_num = module.params['add_path_sel_num'] + if add_path_sel_num: + if int(add_path_sel_num) > 64 or int(add_path_sel_num) < 2: + module.fail_json( + msg='Error: The value of add_path_sel_num %s is out of [2 - 64].' % add_path_sel_num) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["add_path_sel_num"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != add_path_sel_num: + need_cfg = True + else: + need_cfg = True + + route_sel_delay = module.params['route_sel_delay'] + if route_sel_delay: + if int(route_sel_delay) > 3600 or int(route_sel_delay) < 0: + module.fail_json( + msg='Error: The value of route_sel_delay %s is out of [0 - 3600].' % route_sel_delay) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["route_sel_delay"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != route_sel_delay: + need_cfg = True + else: + need_cfg = True + + allow_invalid_as = module.params['allow_invalid_as'] + if allow_invalid_as != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["allow_invalid_as"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != allow_invalid_as: + need_cfg = True + else: + need_cfg = True + + policy_ext_comm_enable = module.params['policy_ext_comm_enable'] + if policy_ext_comm_enable != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["policy_ext_comm_enable"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != policy_ext_comm_enable: + need_cfg = True + else: + need_cfg = True + + supernet_uni_adv = module.params['supernet_uni_adv'] + if supernet_uni_adv != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["supernet_uni_adv"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != supernet_uni_adv: + need_cfg = True + else: + need_cfg = True + + supernet_label_adv = module.params['supernet_label_adv'] + if supernet_label_adv != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["supernet_label_adv"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != supernet_label_adv: + need_cfg = True + else: + need_cfg = True + + ingress_lsp_policy_name = module.params['ingress_lsp_policy_name'] + if ingress_lsp_policy_name: + if len(ingress_lsp_policy_name) > 40 or len(ingress_lsp_policy_name) < 1: + module.fail_json( + msg='Error: The len of ingress_lsp_policy_name %s is out of [1 - 40].' % ingress_lsp_policy_name) + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ingress_lsp_policy_name"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != ingress_lsp_policy_name: + need_cfg = True + else: + need_cfg = True + + originator_prior = module.params['originator_prior'] + if originator_prior != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["originator_prior"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != originator_prior: + need_cfg = True + else: + need_cfg = True + + lowest_priority = module.params['lowest_priority'] + if lowest_priority != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["lowest_priority"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != lowest_priority: + need_cfg = True + else: + need_cfg = True + + relay_delay_enable = module.params['relay_delay_enable'] + if relay_delay_enable != 'no_use': + + conf_str = CE_GET_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + \ + "" + CE_GET_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["relay_delay_enable"] = re_find + result["vrf_name"] = vrf_name + if re_find[0] != relay_delay_enable: + need_cfg = True + else: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def check_bgp_import_network_route(self, **kwargs): + """ check_bgp_import_network_route """ + + module = kwargs["module"] + result = dict() + import_need_cfg = False + network_need_cfg = False + + vrf_name = module.params['vrf_name'] + + state = module.params['state'] + af_type = module.params['af_type'] + import_protocol = module.params['import_protocol'] + import_process_id = module.params['import_process_id'] + + if import_protocol and (import_protocol != "direct" and import_protocol != "static"): + if not import_process_id: + module.fail_json( + msg='Error: Please input import_protocol and import_process_id value at the same time.') + else: + if int(import_process_id) < 0: + module.fail_json( + msg='Error: The value of import_process_id %s is out of [0 - 4294967295].' % import_process_id) + + if import_process_id: + if not import_protocol: + module.fail_json( + msg='Error: Please input import_protocol and import_process_id value at the same time.') + + network_address = module.params['network_address'] + mask_len = module.params['mask_len'] + + if network_address: + if not mask_len: + module.fail_json( + msg='Error: Please input network_address and mask_len value at the same time.') + if mask_len: + if not network_address: + module.fail_json( + msg='Error: Please input network_address and mask_len value at the same time.') + + conf_str = CE_GET_BGP_IMPORT_AND_NETWORK_ROUTE % (vrf_name, af_type) + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if import_protocol: + + if import_protocol == "direct" or import_protocol == "static": + import_process_id = "0" + else: + if not import_process_id or import_process_id == "0": + module.fail_json( + msg='Error: Please input import_process_id not 0 when import_protocol is ' + '[ospf, isis, rip, ospfv3, ripng].') + + bgp_import_route_new = (import_protocol, import_process_id) + + if state == "present": + if "" in recv_xml: + import_need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*\s.*(.*).*', + recv_xml) + + if re_find: + result["bgp_import_route"] = re_find + result["vrf_name"] = vrf_name + if bgp_import_route_new not in re_find: + import_need_cfg = True + else: + import_need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*\s.*(.*).*', + recv_xml) + + if re_find: + result["bgp_import_route"] = re_find + result["vrf_name"] = vrf_name + if bgp_import_route_new in re_find: + import_need_cfg = True + + if network_address and mask_len: + + bgp_network_route_new = (network_address, mask_len) + + if not check_ip_addr(ipaddr=network_address): + module.fail_json( + msg='Error: The network_address %s is invalid.' % network_address) + + if len(mask_len) > 128: + module.fail_json( + msg='Error: The len of mask_len %s is out of [0 - 128].' % mask_len) + + if state == "present": + if "" in recv_xml: + network_need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*\s.*(.*).*', recv_xml) + + if re_find: + result["bgp_network_route"] = re_find + result["vrf_name"] = vrf_name + if bgp_network_route_new not in re_find: + network_need_cfg = True + else: + network_need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*\s.*(.*).*', recv_xml) + + if re_find: + result["bgp_network_route"] = re_find + result["vrf_name"] = vrf_name + if bgp_network_route_new in re_find: + network_need_cfg = True + + result["import_need_cfg"] = import_need_cfg + result["network_need_cfg"] = network_need_cfg + return result + + def merge_bgp_af(self, **kwargs): + """ merge_bgp_af """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + + conf_str = CE_MERGE_BGP_ADDRESS_FAMILY_HEADER % ( + vrf_name, af_type) + CE_MERGE_BGP_ADDRESS_FAMILY_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp address family failed.') + + cmds = [] + + cmd = "ipv4-family vpn-instance %s" % vrf_name + + if af_type == "ipv4multi": + cmd = "ipv4-family multicast" + elif af_type == "ipv4vpn": + cmd = "ipv4-family vpnv4" + elif af_type == "ipv6uni": + cmd = "ipv6-family vpn-instance %s" % vrf_name + if vrf_name == "_public_": + cmd = "ipv6-family unicast" + elif af_type == "ipv6vpn": + cmd = "ipv6-family vpnv6" + elif af_type == "evpn": + cmd = "l2vpn-family evpn" + cmds.append(cmd) + + return cmds + + def create_bgp_af(self, **kwargs): + """ create_bgp_af """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + + conf_str = CE_CREATE_BGP_ADDRESS_FAMILY_HEADER % ( + vrf_name, af_type) + CE_CREATE_BGP_ADDRESS_FAMILY_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create bgp address family failed.') + + cmds = [] + + cmd = "ipv4-family vpn-instance %s" % vrf_name + + if af_type == "ipv4multi": + cmd = "ipv4-family multicast" + elif af_type == "ipv4vpn": + cmd = "ipv4-family vpnv4" + elif af_type == "ipv6uni": + cmd = "ipv6-family vpn-instance %s" % vrf_name + if vrf_name == "_public_": + cmd = "ipv6-family unicast" + elif af_type == "ipv6vpn": + cmd = "ipv6-family vpnv6" + elif af_type == "evpn": + cmd = "l2vpn-family evpn" + cmds.append(cmd) + + return cmds + + def delete_bgp_af(self, **kwargs): + """ delete_bgp_af """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + + conf_str = CE_DELETE_BGP_ADDRESS_FAMILY_HEADER % ( + vrf_name, af_type) + CE_DELETE_BGP_ADDRESS_FAMILY_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp address family failed.') + + cmds = [] + + cmd = "undo ipv4-family vpn-instance %s" % vrf_name + + if af_type == "ipv4multi": + cmd = "undo ipv4-family multicast" + elif af_type == "ipv4vpn": + cmd = "undo ipv4-family vpnv4" + elif af_type == "ipv6uni": + cmd = "undo ipv6-family vpn-instance %s" % vrf_name + if vrf_name == "_public_": + cmd = "undo ipv6-family unicast" + elif af_type == "ipv6vpn": + cmd = "undo ipv6-family vpnv6" + elif af_type == "evpn": + cmd = "l2vpn-family evpn" + cmds.append(cmd) + + return cmds + + def merge_bgp_af_other(self, **kwargs): + """ merge_bgp_af_other """ + + module = kwargs["module"] + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + + conf_str = CE_MERGE_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + + cmds = [] + + max_load_ibgp_num = module.params['max_load_ibgp_num'] + if max_load_ibgp_num: + conf_str += "%s" % max_load_ibgp_num + + cmd = "maximum load-balancing ibgp %s" % max_load_ibgp_num + cmds.append(cmd) + + ibgp_ecmp_nexthop_changed = module.params['ibgp_ecmp_nexthop_changed'] + if ibgp_ecmp_nexthop_changed != 'no_use': + conf_str += "%s" % ibgp_ecmp_nexthop_changed + + if ibgp_ecmp_nexthop_changed == "true": + cmd = "maximum load-balancing ibgp %s ecmp-nexthop-changed" % max_load_ibgp_num + cmds.append(cmd) + else: + cmd = "undo maximum load-balancing ibgp %s ecmp-nexthop-changed" % max_load_ibgp_num + cmds.append(cmd) + max_load_ebgp_num = module.params['max_load_ebgp_num'] + if max_load_ebgp_num: + conf_str += "%s" % max_load_ebgp_num + + cmd = "maximum load-balancing ebgp %s" % max_load_ebgp_num + cmds.append(cmd) + + ebgp_ecmp_nexthop_changed = module.params['ebgp_ecmp_nexthop_changed'] + if ebgp_ecmp_nexthop_changed != 'no_use': + conf_str += "%s" % ebgp_ecmp_nexthop_changed + + if ebgp_ecmp_nexthop_changed == "true": + cmd = "maximum load-balancing ebgp %s ecmp-nexthop-changed" % max_load_ebgp_num + else: + cmd = "undo maximum load-balancing ebgp %s ecmp-nexthop-changed" % max_load_ebgp_num + cmds.append(cmd) + + maximum_load_balance = module.params['maximum_load_balance'] + if maximum_load_balance: + conf_str += "%s" % maximum_load_balance + + cmd = "maximum load-balancing %s" % maximum_load_balance + cmds.append(cmd) + + ecmp_nexthop_changed = module.params['ecmp_nexthop_changed'] + if ecmp_nexthop_changed != 'no_use': + conf_str += "%s" % ecmp_nexthop_changed + + if ecmp_nexthop_changed == "true": + cmd = "maximum load-balancing %s ecmp-nexthop-changed" % maximum_load_balance + else: + cmd = "undo maximum load-balancing %s ecmp-nexthop-changed" % maximum_load_balance + cmds.append(cmd) + + default_local_pref = module.params['default_local_pref'] + if default_local_pref: + conf_str += "%s" % default_local_pref + + cmd = "default local-preference %s" % default_local_pref + cmds.append(cmd) + + default_med = module.params['default_med'] + if default_med: + conf_str += "%s" % default_med + + cmd = "default med %s" % default_med + cmds.append(cmd) + + default_rt_import_enable = module.params['default_rt_import_enable'] + if default_rt_import_enable != 'no_use': + conf_str += "%s" % default_rt_import_enable + + if default_rt_import_enable == "true": + cmd = "default-route imported" + else: + cmd = "undo default-route imported" + cmds.append(cmd) + + router_id = module.params['router_id'] + if router_id: + conf_str += "%s" % router_id + + cmd = "router-id %s" % router_id + cmds.append(cmd) + + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + if vrf_rid_auto_sel != 'no_use': + conf_str += "%s" % vrf_rid_auto_sel + family = "ipv4-family" + if af_type == "ipv6uni": + family = "ipv6-family" + if vrf_rid_auto_sel == "true": + cmd = "%s vpn-instance %s" % (family, vrf_name) + cmds.append(cmd) + cmd = "router-id auto-select" + cmds.append(cmd) + else: + cmd = "%s vpn-instance %s" % (family, vrf_name) + cmds.append(cmd) + cmd = "undo router-id auto-select" + cmds.append(cmd) + + nexthop_third_party = module.params['nexthop_third_party'] + if nexthop_third_party != 'no_use': + conf_str += "%s" % nexthop_third_party + + if nexthop_third_party == "true": + cmd = "nexthop third-party" + else: + cmd = "undo nexthop third-party" + cmds.append(cmd) + + summary_automatic = module.params['summary_automatic'] + if summary_automatic != 'no_use': + conf_str += "%s" % summary_automatic + + if summary_automatic == "true": + cmd = "summary automatic" + else: + cmd = "undo summary automatic" + cmds.append(cmd) + + auto_frr_enable = module.params['auto_frr_enable'] + if auto_frr_enable != 'no_use': + conf_str += "%s" % auto_frr_enable + + if auto_frr_enable == "true": + cmd = "auto-frr" + else: + cmd = "undo auto-frr" + cmds.append(cmd) + + load_balancing_as_path_ignore = module.params[ + 'load_balancing_as_path_ignore'] + if load_balancing_as_path_ignore != 'no_use': + conf_str += "%s" % load_balancing_as_path_ignore + + if load_balancing_as_path_ignore == "true": + cmd = "load-balancing as-path-ignore" + else: + cmd = "undo load-balancing as-path-ignore" + cmds.append(cmd) + + rib_only_enable = module.params['rib_only_enable'] + if rib_only_enable != 'no_use': + conf_str += "%s" % rib_only_enable + + if rib_only_enable == "true": + cmd = "routing-table rib-only" + else: + cmd = "undo routing-table rib-only" + cmds.append(cmd) + + rib_only_policy_name = module.params['rib_only_policy_name'] + if rib_only_policy_name and rib_only_enable == "true": + conf_str += "%s" % rib_only_policy_name + + cmd = "routing-table rib-only route-policy %s" % rib_only_policy_name + cmds.append(cmd) + + active_route_advertise = module.params['active_route_advertise'] + if active_route_advertise != 'no_use': + conf_str += "%s" % active_route_advertise + + if active_route_advertise == "true": + cmd = "active-route-advertise" + else: + cmd = "undo active-route-advertise" + cmds.append(cmd) + + as_path_neglect = module.params['as_path_neglect'] + if as_path_neglect != 'no_use': + conf_str += "%s" % as_path_neglect + + if as_path_neglect == "true": + cmd = "bestroute as-path-ignore" + else: + cmd = "undo bestroute as-path-ignore" + cmds.append(cmd) + + med_none_as_maximum = module.params['med_none_as_maximum'] + if med_none_as_maximum != 'no_use': + conf_str += "%s" % med_none_as_maximum + + if med_none_as_maximum == "true": + cmd = "bestroute med-none-as-maximum" + else: + cmd = "undo bestroute med-none-as-maximum" + cmds.append(cmd) + + router_id_neglect = module.params['router_id_neglect'] + if router_id_neglect != 'no_use': + conf_str += "%s" % router_id_neglect + + if router_id_neglect == "true": + cmd = "bestroute router-id-ignore" + else: + cmd = "undo bestroute router-id-ignore" + cmds.append(cmd) + + igp_metric_ignore = module.params['igp_metric_ignore'] + if igp_metric_ignore != 'no_use': + conf_str += "%s" % igp_metric_ignore + + if igp_metric_ignore == "true": + cmd = "bestroute igp-metric-ignore" + cmds.append(cmd) + else: + cmd = "undo bestroute igp-metric-ignore" + cmds.append(cmd) + always_compare_med = module.params['always_compare_med'] + if always_compare_med != 'no_use': + conf_str += "%s" % always_compare_med + + if always_compare_med == "true": + cmd = "compare-different-as-med" + cmds.append(cmd) + else: + cmd = "undo compare-different-as-med" + cmds.append(cmd) + determin_med = module.params['determin_med'] + if determin_med != 'no_use': + conf_str += "%s" % determin_med + + if determin_med == "true": + cmd = "deterministic-med" + cmds.append(cmd) + else: + cmd = "undo deterministic-med" + cmds.append(cmd) + + preference_external = module.params['preference_external'] + preference_internal = module.params['preference_internal'] + preference_local = module.params['preference_local'] + if any([preference_external, preference_internal, preference_local]): + preference_external = preference_external or "255" + preference_internal = preference_internal or "255" + preference_local = preference_local or "255" + + conf_str += "%s" % preference_external + conf_str += "%s" % preference_internal + conf_str += "%s" % preference_local + + cmd = "preference %s %s %s" % ( + preference_external, preference_internal, preference_local) + cmds.append(cmd) + + prefrence_policy_name = module.params['prefrence_policy_name'] + if prefrence_policy_name: + conf_str += "%s" % prefrence_policy_name + + cmd = "preference route-policy %s" % prefrence_policy_name + cmds.append(cmd) + + reflect_between_client = module.params['reflect_between_client'] + if reflect_between_client != 'no_use': + conf_str += "%s" % reflect_between_client + + if reflect_between_client == "true": + cmd = "reflect between-clients" + else: + cmd = "undo reflect between-clients" + cmds.append(cmd) + + reflector_cluster_id = module.params['reflector_cluster_id'] + if reflector_cluster_id: + conf_str += "%s" % reflector_cluster_id + + cmd = "reflector cluster-id %s" % reflector_cluster_id + cmds.append(cmd) + + reflector_cluster_ipv4 = module.params['reflector_cluster_ipv4'] + if reflector_cluster_ipv4: + conf_str += "%s" % reflector_cluster_ipv4 + + cmd = "reflector cluster-id %s" % reflector_cluster_ipv4 + cmds.append(cmd) + + rr_filter_number = module.params['rr_filter_number'] + if rr_filter_number: + conf_str += "%s" % rr_filter_number + cmd = 'rr-filter %s' % rr_filter_number + cmds.append(cmd) + + policy_vpn_target = module.params['policy_vpn_target'] + if policy_vpn_target != 'no_use': + conf_str += "%s" % policy_vpn_target + if policy_vpn_target == 'true': + cmd = 'policy vpn-target' + else: + cmd = 'undo policy vpn-target' + cmds.append(cmd) + + next_hop_sel_depend_type = module.params['next_hop_sel_depend_type'] + if next_hop_sel_depend_type: + conf_str += "%s" % next_hop_sel_depend_type + + nhp_relay_route_policy_name = module.params[ + 'nhp_relay_route_policy_name'] + if nhp_relay_route_policy_name: + conf_str += "%s" % nhp_relay_route_policy_name + + cmd = "nexthop recursive-lookup route-policy %s" % nhp_relay_route_policy_name + cmds.append(cmd) + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + conf_str += "%s" % ebgp_if_sensitive + + if ebgp_if_sensitive == "true": + cmd = "ebgp-interface-sensitive" + else: + cmd = "undo ebgp-interface-sensitive" + cmds.append(cmd) + + reflect_chg_path = module.params['reflect_chg_path'] + if reflect_chg_path != 'no_use': + conf_str += "%s" % reflect_chg_path + + if reflect_chg_path == "true": + cmd = "reflect change-path-attribute" + else: + cmd = "undo reflect change-path-attribute" + cmds.append(cmd) + + add_path_sel_num = module.params['add_path_sel_num'] + if add_path_sel_num: + conf_str += "%s" % add_path_sel_num + + cmd = "bestroute add-path path-number %s" % add_path_sel_num + cmds.append(cmd) + + route_sel_delay = module.params['route_sel_delay'] + if route_sel_delay: + conf_str += "%s" % route_sel_delay + + cmd = "route-select delay %s" % route_sel_delay + cmds.append(cmd) + + allow_invalid_as = module.params['allow_invalid_as'] + if allow_invalid_as != 'no_use': + conf_str += "%s" % allow_invalid_as + + policy_ext_comm_enable = module.params['policy_ext_comm_enable'] + if policy_ext_comm_enable != 'no_use': + conf_str += "%s" % policy_ext_comm_enable + + if policy_ext_comm_enable == "true": + cmd = "ext-community-change enable" + else: + cmd = "undo ext-community-change enable" + cmds.append(cmd) + + supernet_uni_adv = module.params['supernet_uni_adv'] + if supernet_uni_adv != 'no_use': + conf_str += "%s" % supernet_uni_adv + + if supernet_uni_adv == "true": + cmd = "supernet unicast advertise enable" + else: + cmd = "undo supernet unicast advertise enable" + cmds.append(cmd) + + supernet_label_adv = module.params['supernet_label_adv'] + if supernet_label_adv != 'no_use': + conf_str += "%s" % supernet_label_adv + + if supernet_label_adv == "true": + cmd = "supernet label-route advertise enable" + else: + cmd = "undo supernet label-route advertise enable" + cmds.append(cmd) + + ingress_lsp_policy_name = module.params['ingress_lsp_policy_name'] + if ingress_lsp_policy_name: + conf_str += "%s" % ingress_lsp_policy_name + cmd = "ingress-lsp trigger route-policy %s" % ingress_lsp_policy_name + cmds.append(cmd) + + originator_prior = module.params['originator_prior'] + if originator_prior != 'no_use': + conf_str += "%s" % originator_prior + if originator_prior == "true": + cmd = "bestroute routerid-prior-clusterlist" + else: + cmd = "undo bestroute routerid-prior-clusterlist" + cmds.append(cmd) + + lowest_priority = module.params['lowest_priority'] + if lowest_priority != 'no_use': + conf_str += "%s" % lowest_priority + + if lowest_priority == "true": + cmd = "advertise lowest-priority on-startup" + else: + cmd = "undo advertise lowest-priority on-startup" + cmds.append(cmd) + + relay_delay_enable = module.params['relay_delay_enable'] + if relay_delay_enable != 'no_use': + conf_str += "%s" % relay_delay_enable + + if relay_delay_enable == "true": + cmd = "nexthop recursive-lookup restrain enable" + else: + cmd = "nexthop recursive-lookup restrain disable" + cmds.append(cmd) + conf_str += CE_MERGE_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Merge bgp address family other agrus failed.') + + return cmds + + def delete_bgp_af_other(self, **kwargs): + """ delete_bgp_af_other """ + + module = kwargs["module"] + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + + conf_str = CE_MERGE_BGP_ADDRESS_FAMILY_HEADER % (vrf_name, af_type) + + cmds = [] + + router_id = module.params['router_id'] + if router_id: + conf_str += "" + + cmd = "undo router-id %s" % router_id + cmds.append(cmd) + + determin_med = module.params['determin_med'] + if determin_med != 'no_use': + conf_str += "" + + cmd = "undo deterministic-med" + cmds.append(cmd) + + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + if ebgp_if_sensitive != 'no_use': + conf_str += "" + + cmd = "undo ebgp-interface-sensitive" + cmds.append(cmd) + + relay_delay_enable = module.params['relay_delay_enable'] + if relay_delay_enable != 'no_use': + conf_str += "" + + conf_str += CE_MERGE_BGP_ADDRESS_FAMILY_TAIL + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Merge bgp address family other agrus failed.') + + return cmds + + def merge_bgp_import_route(self, **kwargs): + """ merge_bgp_import_route """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + import_protocol = module.params['import_protocol'] + import_process_id = module.params['import_process_id'] + + if import_protocol == "direct" or import_protocol == "static": + import_process_id = "0" + + conf_str = CE_MERGE_BGP_IMPORT_ROUTE_HEADER % ( + vrf_name, af_type, import_protocol, import_process_id) + CE_MERGE_BGP_IMPORT_ROUTE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp import route failed.') + + cmds = [] + cmd = "import-route %s %s" % (import_protocol, import_process_id) + if import_protocol == "direct" or import_protocol == "static": + cmd = "import-route %s" % import_protocol + cmds.append(cmd) + + return cmds + + def create_bgp_import_route(self, **kwargs): + """ create_bgp_import_route """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + import_protocol = module.params['import_protocol'] + import_process_id = module.params['import_process_id'] + + if import_protocol == "direct" or import_protocol == "static": + import_process_id = "0" + + conf_str = CE_CREATE_BGP_IMPORT_ROUTE % ( + vrf_name, af_type, import_protocol, import_process_id) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create bgp import route failed.') + + cmds = [] + cmd = "import-route %s %s" % (import_protocol, import_process_id) + if import_protocol == "direct" or import_protocol == "static": + cmd = "import-route %s" % import_protocol + cmds.append(cmd) + + return cmds + + def delete_bgp_import_route(self, **kwargs): + """ delete_bgp_import_route """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + import_protocol = module.params['import_protocol'] + import_process_id = module.params['import_process_id'] + + if import_protocol == "direct" or import_protocol == "static": + import_process_id = "0" + + conf_str = CE_DELETE_BGP_IMPORT_ROUTE % ( + vrf_name, af_type, import_protocol, import_process_id) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp import route failed.') + + cmds = [] + cmd = "undo import-route %s %s" % (import_protocol, import_process_id) + if import_protocol == "direct" or import_protocol == "static": + cmd = "undo import-route %s" % import_protocol + cmds.append(cmd) + + return cmds + + def merge_bgp_network_route(self, **kwargs): + """ merge_bgp_network_route """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + network_address = module.params['network_address'] + mask_len = module.params['mask_len'] + + conf_str = CE_MERGE_BGP_NETWORK_ROUTE_HEADER % ( + vrf_name, af_type, network_address, mask_len) + CE_MERGE_BGP_NETWORK_ROUTE_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp network route failed.') + + cmds = [] + cmd = "network %s %s" % (network_address, mask_len) + cmds.append(cmd) + + return cmds + + def create_bgp_network_route(self, **kwargs): + """ create_bgp_network_route """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + network_address = module.params['network_address'] + mask_len = module.params['mask_len'] + + conf_str = CE_CREATE_BGP_NETWORK_ROUTE % ( + vrf_name, af_type, network_address, mask_len) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create bgp network route failed.') + + cmds = [] + cmd = "network %s %s" % (network_address, mask_len) + cmds.append(cmd) + + return cmds + + def delete_bgp_network_route(self, **kwargs): + """ delete_bgp_network_route """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + af_type = module.params['af_type'] + network_address = module.params['network_address'] + mask_len = module.params['mask_len'] + + conf_str = CE_DELETE_BGP_NETWORK_ROUTE % ( + vrf_name, af_type, network_address, mask_len) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp network route failed.') + + cmds = [] + cmd = "undo network %s %s" % (network_address, mask_len) + cmds.append(cmd) + + return cmds + + +def main(): + """ main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + vrf_name=dict(type='str', required=True), + af_type=dict(choices=['ipv4uni', 'ipv4multi', 'ipv4vpn', + 'ipv6uni', 'ipv6vpn', 'evpn'], required=True), + max_load_ibgp_num=dict(type='str'), + ibgp_ecmp_nexthop_changed=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + max_load_ebgp_num=dict(type='str'), + ebgp_ecmp_nexthop_changed=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + maximum_load_balance=dict(type='str'), + ecmp_nexthop_changed=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + default_local_pref=dict(type='str'), + default_med=dict(type='str'), + default_rt_import_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + router_id=dict(type='str'), + vrf_rid_auto_sel=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + nexthop_third_party=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + summary_automatic=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + auto_frr_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + load_balancing_as_path_ignore=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + rib_only_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + rib_only_policy_name=dict(type='str'), + active_route_advertise=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + as_path_neglect=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + med_none_as_maximum=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + router_id_neglect=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + igp_metric_ignore=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + always_compare_med=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + determin_med=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + preference_external=dict(type='str'), + preference_internal=dict(type='str'), + preference_local=dict(type='str'), + prefrence_policy_name=dict(type='str'), + reflect_between_client=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + reflector_cluster_id=dict(type='str'), + reflector_cluster_ipv4=dict(type='str'), + rr_filter_number=dict(type='str'), + policy_vpn_target=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + next_hop_sel_depend_type=dict( + choices=['default', 'dependTunnel', 'dependIp']), + nhp_relay_route_policy_name=dict(type='str'), + ebgp_if_sensitive=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + reflect_chg_path=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + add_path_sel_num=dict(type='str'), + route_sel_delay=dict(type='str'), + allow_invalid_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + policy_ext_comm_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + supernet_uni_adv=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + supernet_label_adv=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + ingress_lsp_policy_name=dict(type='str'), + originator_prior=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + lowest_priority=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + relay_delay_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + import_protocol=dict( + choices=['direct', 'ospf', 'isis', 'static', 'rip', 'ospfv3', 'ripng']), + import_process_id=dict(type='str'), + network_address=dict(type='str'), + mask_len=dict(type='str')) + + argument_spec.update(ce_argument_spec) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + state = module.params['state'] + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + max_load_ibgp_num = module.params['max_load_ibgp_num'] + ibgp_ecmp_nexthop_changed = module.params['ibgp_ecmp_nexthop_changed'] + max_load_ebgp_num = module.params['max_load_ebgp_num'] + ebgp_ecmp_nexthop_changed = module.params['ebgp_ecmp_nexthop_changed'] + maximum_load_balance = module.params['maximum_load_balance'] + ecmp_nexthop_changed = module.params['ecmp_nexthop_changed'] + default_local_pref = module.params['default_local_pref'] + default_med = module.params['default_med'] + default_rt_import_enable = module.params['default_rt_import_enable'] + router_id = module.params['router_id'] + vrf_rid_auto_sel = module.params['vrf_rid_auto_sel'] + nexthop_third_party = module.params['nexthop_third_party'] + summary_automatic = module.params['summary_automatic'] + auto_frr_enable = module.params['auto_frr_enable'] + load_balancing_as_path_ignore = module.params[ + 'load_balancing_as_path_ignore'] + rib_only_enable = module.params['rib_only_enable'] + rib_only_policy_name = module.params['rib_only_policy_name'] + active_route_advertise = module.params['active_route_advertise'] + as_path_neglect = module.params['as_path_neglect'] + med_none_as_maximum = module.params['med_none_as_maximum'] + router_id_neglect = module.params['router_id_neglect'] + igp_metric_ignore = module.params['igp_metric_ignore'] + always_compare_med = module.params['always_compare_med'] + determin_med = module.params['determin_med'] + preference_external = module.params['preference_external'] + preference_internal = module.params['preference_internal'] + preference_local = module.params['preference_local'] + prefrence_policy_name = module.params['prefrence_policy_name'] + reflect_between_client = module.params['reflect_between_client'] + reflector_cluster_id = module.params['reflector_cluster_id'] + reflector_cluster_ipv4 = module.params['reflector_cluster_ipv4'] + rr_filter_number = module.params['rr_filter_number'] + policy_vpn_target = module.params['policy_vpn_target'] + next_hop_sel_depend_type = module.params['next_hop_sel_depend_type'] + nhp_relay_route_policy_name = module.params['nhp_relay_route_policy_name'] + ebgp_if_sensitive = module.params['ebgp_if_sensitive'] + reflect_chg_path = module.params['reflect_chg_path'] + add_path_sel_num = module.params['add_path_sel_num'] + route_sel_delay = module.params['route_sel_delay'] + allow_invalid_as = module.params['allow_invalid_as'] + policy_ext_comm_enable = module.params['policy_ext_comm_enable'] + supernet_uni_adv = module.params['supernet_uni_adv'] + supernet_label_adv = module.params['supernet_label_adv'] + ingress_lsp_policy_name = module.params['ingress_lsp_policy_name'] + originator_prior = module.params['originator_prior'] + lowest_priority = module.params['lowest_priority'] + relay_delay_enable = module.params['relay_delay_enable'] + import_protocol = module.params['import_protocol'] + import_process_id = module.params['import_process_id'] + network_address = module.params['network_address'] + mask_len = module.params['mask_len'] + + ce_bgp_af_obj = BgpAf() + + if not ce_bgp_af_obj: + module.fail_json(msg='Error: Init module failed.') + + # get proposed + proposed["state"] = state + if vrf_name: + proposed["vrf_name"] = vrf_name + if af_type: + proposed["af_type"] = af_type + if max_load_ibgp_num: + proposed["max_load_ibgp_num"] = max_load_ibgp_num + if ibgp_ecmp_nexthop_changed != 'no_use': + proposed["ibgp_ecmp_nexthop_changed"] = ibgp_ecmp_nexthop_changed + if max_load_ebgp_num: + proposed["max_load_ebgp_num"] = max_load_ebgp_num + if ebgp_ecmp_nexthop_changed != 'no_use': + proposed["ebgp_ecmp_nexthop_changed"] = ebgp_ecmp_nexthop_changed + if maximum_load_balance: + proposed["maximum_load_balance"] = maximum_load_balance + if ecmp_nexthop_changed != 'no_use': + proposed["ecmp_nexthop_changed"] = ecmp_nexthop_changed + if default_local_pref: + proposed["default_local_pref"] = default_local_pref + if default_med: + proposed["default_med"] = default_med + if default_rt_import_enable != 'no_use': + proposed["default_rt_import_enable"] = default_rt_import_enable + if router_id: + proposed["router_id"] = router_id + if vrf_rid_auto_sel != 'no_use': + proposed["vrf_rid_auto_sel"] = vrf_rid_auto_sel + if nexthop_third_party != 'no_use': + proposed["nexthop_third_party"] = nexthop_third_party + if summary_automatic != 'no_use': + proposed["summary_automatic"] = summary_automatic + if auto_frr_enable != 'no_use': + proposed["auto_frr_enable"] = auto_frr_enable + if load_balancing_as_path_ignore != 'no_use': + proposed["load_balancing_as_path_ignore"] = load_balancing_as_path_ignore + if rib_only_enable != 'no_use': + proposed["rib_only_enable"] = rib_only_enable + if rib_only_policy_name: + proposed["rib_only_policy_name"] = rib_only_policy_name + if active_route_advertise != 'no_use': + proposed["active_route_advertise"] = active_route_advertise + if as_path_neglect != 'no_use': + proposed["as_path_neglect"] = as_path_neglect + if med_none_as_maximum != 'no_use': + proposed["med_none_as_maximum"] = med_none_as_maximum + if router_id_neglect != 'no_use': + proposed["router_id_neglect"] = router_id_neglect + if igp_metric_ignore != 'no_use': + proposed["igp_metric_ignore"] = igp_metric_ignore + if always_compare_med != 'no_use': + proposed["always_compare_med"] = always_compare_med + if determin_med != 'no_use': + proposed["determin_med"] = determin_med + if preference_external: + proposed["preference_external"] = preference_external + if preference_internal: + proposed["preference_internal"] = preference_internal + if preference_local: + proposed["preference_local"] = preference_local + if prefrence_policy_name: + proposed["prefrence_policy_name"] = prefrence_policy_name + if reflect_between_client != 'no_use': + proposed["reflect_between_client"] = reflect_between_client + if reflector_cluster_id: + proposed["reflector_cluster_id"] = reflector_cluster_id + if reflector_cluster_ipv4: + proposed["reflector_cluster_ipv4"] = reflector_cluster_ipv4 + if rr_filter_number: + proposed["rr_filter_number"] = rr_filter_number + if policy_vpn_target != 'no_use': + proposed["policy_vpn_target"] = policy_vpn_target + if next_hop_sel_depend_type: + proposed["next_hop_sel_depend_type"] = next_hop_sel_depend_type + if nhp_relay_route_policy_name: + proposed["nhp_relay_route_policy_name"] = nhp_relay_route_policy_name + if ebgp_if_sensitive != 'no_use': + proposed["ebgp_if_sensitive"] = ebgp_if_sensitive + if reflect_chg_path != 'no_use': + proposed["reflect_chg_path"] = reflect_chg_path + if add_path_sel_num: + proposed["add_path_sel_num"] = add_path_sel_num + if route_sel_delay: + proposed["route_sel_delay"] = route_sel_delay + if allow_invalid_as != 'no_use': + proposed["allow_invalid_as"] = allow_invalid_as + if policy_ext_comm_enable != 'no_use': + proposed["policy_ext_comm_enable"] = policy_ext_comm_enable + if supernet_uni_adv != 'no_use': + proposed["supernet_uni_adv"] = supernet_uni_adv + if supernet_label_adv != 'no_use': + proposed["supernet_label_adv"] = supernet_label_adv + if ingress_lsp_policy_name: + proposed["ingress_lsp_policy_name"] = ingress_lsp_policy_name + if originator_prior != 'no_use': + proposed["originator_prior"] = originator_prior + if lowest_priority != 'no_use': + proposed["lowest_priority"] = lowest_priority + if relay_delay_enable != 'no_use': + proposed["relay_delay_enable"] = relay_delay_enable + if import_protocol: + proposed["import_protocol"] = import_protocol + if import_process_id: + proposed["import_process_id"] = import_process_id + if network_address: + proposed["network_address"] = network_address + if mask_len: + proposed["mask_len"] = mask_len + + bgp_af_rst = ce_bgp_af_obj.check_bgp_af_args(module=module) + bgp_af_other_rst = ce_bgp_af_obj.check_bgp_af_other_args(module=module) + bgp_af_other_can_del_rst = ce_bgp_af_obj.check_bgp_af_other_can_del( + module=module) + bgp_import_network_route_rst = ce_bgp_af_obj.check_bgp_import_network_route( + module=module) + + # state exist bgp address family config + exist_tmp = dict() + for item in bgp_af_rst: + if item != "need_cfg": + exist_tmp[item] = bgp_af_rst[item] + + if exist_tmp: + existing["bgp af"] = exist_tmp + # state exist bgp address family other config + exist_tmp = dict() + for item in bgp_af_other_rst: + if item != "need_cfg": + exist_tmp[item] = bgp_af_other_rst[item] + if exist_tmp: + existing["bgp af other"] = exist_tmp + # state exist bgp import route config + exist_tmp = dict() + for item in bgp_import_network_route_rst: + if item != "need_cfg": + exist_tmp[item] = bgp_import_network_route_rst[item] + + if exist_tmp: + existing["bgp import & network route"] = exist_tmp + + if state == "present": + if bgp_af_rst["need_cfg"] and bgp_import_network_route_rst["import_need_cfg"] and \ + bgp_import_network_route_rst["network_need_cfg"]: + changed = True + if "af_type" in bgp_af_rst.keys(): + conf_str = CE_MERGE_BGP_ADDRESS_FAMILY_HEADER % ( + vrf_name, af_type) + else: + conf_str = CE_CREATE_BGP_ADDRESS_FAMILY_HEADER % ( + vrf_name, af_type) + + if "bgp_import_route" in bgp_import_network_route_rst.keys(): + conf_str += CE_BGP_MERGE_IMPORT_UNIT % ( + import_protocol, import_process_id) + else: + conf_str += CE_BGP_CREATE_IMPORT_UNIT % ( + import_protocol, import_process_id) + + if "bgp_network_route" in bgp_import_network_route_rst.keys(): + conf_str += CE_BGP_MERGE_NETWORK_UNIT % ( + network_address, mask_len) + else: + conf_str += CE_BGP_CREATE_NETWORK_UNIT % ( + network_address, mask_len) + + conf_str += CE_MERGE_BGP_ADDRESS_FAMILY_TAIL + recv_xml = ce_bgp_af_obj.netconf_set_config( + module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Present bgp af_type import and network route failed.') + + cmd = "import-route %s %s" % (import_protocol, import_process_id) + updates.append(cmd) + cmd = "network %s %s" % (network_address, mask_len) + updates.append(cmd) + + elif bgp_import_network_route_rst["import_need_cfg"] and bgp_import_network_route_rst["network_need_cfg"]: + changed = True + conf_str = CE_BGP_IMPORT_NETWORK_ROUTE_HEADER % (vrf_name, af_type) + + if "bgp_import_route" in bgp_import_network_route_rst.keys(): + conf_str += CE_BGP_MERGE_IMPORT_UNIT % ( + import_protocol, import_process_id) + else: + conf_str += CE_BGP_CREATE_IMPORT_UNIT % ( + import_protocol, import_process_id) + + if "bgp_network_route" in bgp_import_network_route_rst.keys(): + conf_str += CE_BGP_MERGE_NETWORK_UNIT % ( + network_address, mask_len) + else: + conf_str += CE_BGP_CREATE_NETWORK_UNIT % ( + network_address, mask_len) + + conf_str += CE_BGP_IMPORT_NETWORK_ROUTE_TAIL + recv_xml = ce_bgp_af_obj.netconf_set_config( + module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Present bgp import and network route failed.') + + cmd = "import-route %s %s" % (import_protocol, import_process_id) + updates.append(cmd) + cmd = "network %s %s" % (network_address, mask_len) + updates.append(cmd) + + else: + if bgp_af_rst["need_cfg"]: + if "af_type" in bgp_af_rst.keys(): + cmd = ce_bgp_af_obj.merge_bgp_af(module=module) + changed = True + for item in cmd: + updates.append(item) + else: + cmd = ce_bgp_af_obj.create_bgp_af(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_af_other_rst["need_cfg"]: + cmd = ce_bgp_af_obj.merge_bgp_af_other(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_import_network_route_rst["import_need_cfg"]: + if "bgp_import_route" in bgp_import_network_route_rst.keys(): + cmd = ce_bgp_af_obj.merge_bgp_import_route(module=module) + changed = True + for item in cmd: + updates.append(item) + else: + cmd = ce_bgp_af_obj.create_bgp_import_route(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_import_network_route_rst["network_need_cfg"]: + if "bgp_network_route" in bgp_import_network_route_rst.keys(): + cmd = ce_bgp_af_obj.merge_bgp_network_route(module=module) + changed = True + for item in cmd: + updates.append(item) + else: + cmd = ce_bgp_af_obj.create_bgp_network_route(module=module) + changed = True + for item in cmd: + updates.append(item) + + else: + if bgp_import_network_route_rst["import_need_cfg"] and bgp_import_network_route_rst["network_need_cfg"]: + changed = True + conf_str = CE_BGP_IMPORT_NETWORK_ROUTE_HEADER % (vrf_name, af_type) + conf_str += CE_BGP_DELETE_IMPORT_UNIT % ( + import_protocol, import_process_id) + conf_str += CE_BGP_DELETE_NETWORK_UNIT % ( + network_address, mask_len) + + conf_str += CE_BGP_IMPORT_NETWORK_ROUTE_TAIL + recv_xml = ce_bgp_af_obj.netconf_set_config( + module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json( + msg='Error: Absent bgp import and network route failed.') + + cmd = "undo import-route %s %s" % (import_protocol, + import_process_id) + updates.append(cmd) + cmd = "undo network %s %s" % (network_address, mask_len) + updates.append(cmd) + + else: + if bgp_import_network_route_rst["import_need_cfg"]: + cmd = ce_bgp_af_obj.delete_bgp_import_route(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_import_network_route_rst["network_need_cfg"]: + cmd = ce_bgp_af_obj.delete_bgp_network_route(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_af_other_can_del_rst["need_cfg"]: + cmd = ce_bgp_af_obj.delete_bgp_af_other(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_af_rst["need_cfg"] and not bgp_af_other_can_del_rst["need_cfg"]: + cmd = ce_bgp_af_obj.delete_bgp_af(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_af_other_rst["need_cfg"]: + pass + + # state end bgp address family config + bgp_af_rst = ce_bgp_af_obj.check_bgp_af_args(module=module) + end_tmp = dict() + for item in bgp_af_rst: + if item != "need_cfg": + end_tmp[item] = bgp_af_rst[item] + if end_tmp: + end_state["bgp af"] = end_tmp + # state end bgp address family other config + bgp_af_other_rst = ce_bgp_af_obj.check_bgp_af_other_args(module=module) + end_tmp = dict() + for item in bgp_af_other_rst: + if item != "need_cfg": + end_tmp[item] = bgp_af_other_rst[item] + if end_tmp: + end_state["bgp af other"] = end_tmp + # state end bgp import route config + bgp_import_network_route_rst = ce_bgp_af_obj.check_bgp_import_network_route( + module=module) + end_tmp = dict() + for item in bgp_import_network_route_rst: + if item != "need_cfg": + end_tmp[item] = bgp_import_network_route_rst[item] + if end_tmp: + end_state["bgp import & network route"] = end_tmp + if end_state == existing: + changed = False + updates = list() + + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_bgp_neighbor.py b/plugins/modules/network/cloudengine/ce_bgp_neighbor.py new file mode 100644 index 0000000000..a61eaf1a11 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_bgp_neighbor.py @@ -0,0 +1,2051 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_bgp_neighbor +short_description: Manages BGP peer configuration on HUAWEI CloudEngine switches. +description: + - Manages BGP peer configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] + vrf_name: + description: + - Name of a BGP instance. The name is a case-sensitive string of characters. + The BGP instance can be used only after the corresponding VPN instance is created. + required: true + peer_addr: + description: + - Connection address of a peer, which can be an IPv4 or IPv6 address. + required: true + remote_as: + description: + - AS number of a peer. + The value is a string of 1 to 11 characters. + required: true + description: + description: + - Description of a peer, which can be letters or digits. + The value is a string of 1 to 80 characters. + fake_as: + description: + - Fake AS number that is specified for a local peer. + The value is a string of 1 to 11 characters. + dual_as: + description: + - If the value is true, the EBGP peer can use either a fake AS number or the actual AS number. + If the value is false, the EBGP peer can only use a fake AS number. + choices: ['no_use','true','false'] + default: no_use + conventional: + description: + - If the value is true, the router has all extended capabilities. + If the value is false, the router does not have all extended capabilities. + choices: ['no_use','true','false'] + default: no_use + route_refresh: + description: + - If the value is true, BGP is enabled to advertise REFRESH packets. + If the value is false, the route refresh function is enabled. + choices: ['no_use','true','false'] + default: no_use + is_ignore: + description: + - If the value is true, the session with a specified peer is torn down and all related + routing entries are cleared. + If the value is false, the session with a specified peer is retained. + choices: ['no_use','true','false'] + default: no_use + local_if_name: + description: + - Name of a source interface that sends BGP packets. + The value is a string of 1 to 63 characters. + ebgp_max_hop: + description: + - Maximum number of hops in an indirect EBGP connection. + The value is an ranging from 1 to 255. + valid_ttl_hops: + description: + - Enable GTSM on a peer or peer group. + The valid-TTL-Value parameter is used to specify the number of TTL hops to be detected. + The value is an integer ranging from 1 to 255. + connect_mode: + description: + - The value can be Connect-only, Listen-only, or Both. + is_log_change: + description: + - If the value is true, BGP is enabled to record peer session status and event information. + If the value is false, BGP is disabled from recording peer session status and event information. + choices: ['no_use','true','false'] + default: no_use + pswd_type: + description: + - Enable BGP peers to establish a TCP connection and perform the Message Digest 5 (MD5) + authentication for BGP messages. + choices: ['null','cipher','simple'] + pswd_cipher_text: + description: + - The character string in a password identifies the contents of the password, spaces not supported. + The value is a string of 1 to 255 characters. + keep_alive_time: + description: + - Specify the Keepalive time of a peer or peer group. + The value is an integer ranging from 0 to 21845. The default value is 60. + hold_time: + description: + - Specify the Hold time of a peer or peer group. + The value is 0 or an integer ranging from 3 to 65535. + min_hold_time: + description: + - Specify the Min hold time of a peer or peer group. + key_chain_name: + description: + - Specify the Keychain authentication name used when BGP peers establish a TCP connection. + The value is a string of 1 to 47 case-insensitive characters. + conn_retry_time: + description: + - ConnectRetry interval. + The value is an integer ranging from 1 to 65535. + tcp_MSS: + description: + - Maximum TCP MSS value used for TCP connection establishment for a peer. + The value is an integer ranging from 176 to 4096. + mpls_local_ifnet_disable: + description: + - If the value is true, peer create MPLS Local IFNET disable. + If the value is false, peer create MPLS Local IFNET enable. + choices: ['no_use','true','false'] + default: no_use + prepend_global_as: + description: + - Add the global AS number to the Update packets to be advertised. + choices: ['no_use','true','false'] + default: no_use + prepend_fake_as: + description: + - Add the Fake AS number to received Update packets. + choices: ['no_use','true','false'] + default: no_use + is_bfd_block: + description: + - If the value is true, peers are enabled to inherit the BFD function from the peer group. + If the value is false, peers are disabled to inherit the BFD function from the peer group. + choices: ['no_use','true','false'] + default: no_use + multiplier: + description: + - Specify the detection multiplier. The default value is 3. + The value is an integer ranging from 3 to 50. + is_bfd_enable: + description: + - If the value is true, BFD is enabled. + If the value is false, BFD is disabled. + choices: ['no_use','true','false'] + default: no_use + rx_interval: + description: + - Specify the minimum interval at which BFD packets are received. + The value is an integer ranging from 50 to 1000, in milliseconds. + tx_interval: + description: + - Specify the minimum interval at which BFD packets are sent. + The value is an integer ranging from 50 to 1000, in milliseconds. + is_single_hop: + description: + - If the value is true, the system is enabled to preferentially use the single-hop mode for + BFD session setup between IBGP peers. + If the value is false, the system is disabled from preferentially using the single-hop + mode for BFD session setup between IBGP peers. + choices: ['no_use','true','false'] + default: no_use +''' + +EXAMPLES = ''' + +- name: CloudEngine BGP neighbor test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config bgp peer" + ce_bgp_neighbor: + state: present + vrf_name: js + peer_addr: 192.168.10.10 + remote_as: 500 + provider: "{{ cli }}" + + - name: "Config bgp route id" + ce_bgp_neighbor: + state: absent + vrf_name: js + peer_addr: 192.168.10.10 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"peer_addr": "192.168.10.10", "remote_as": "500", "state": "present", "vrf_name": "js"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"bgp peer": []} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"bgp peer": [["192.168.10.10", "500"]]} +updates: + description: command sent to the device + returned: always + type: list + sample: ["peer 192.168.10.10 as-number 500"] +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + + +# get bgp peer +CE_GET_BGP_PEER_HEADER = """ + + + + + + %s + + + %s +""" +CE_GET_BGP_PEER_TAIL = """ + + + + + + + +""" + +# merge bgp peer +CE_MERGE_BGP_PEER_HEADER = """ + + + + + + %s + + + %s +""" +CE_MERGE_BGP_PEER_TAIL = """ + + + + + + + +""" + +# create bgp peer +CE_CREATE_BGP_PEER_HEADER = """ + + + + + + %s + + + %s +""" +CE_CREATE_BGP_PEER_TAIL = """ + + + + + + + +""" + +# delete bgp peer +CE_DELETE_BGP_PEER_HEADER = """ + + + + + + %s + + + %s +""" +CE_DELETE_BGP_PEER_TAIL = """ + + + + + + + +""" + +# get peer bfd +CE_GET_PEER_BFD_HEADER = """ + + + + + + %s + + + %s + +""" +CE_GET_PEER_BFD_TAIL = """ + + + + + + + + +""" + +# merge peer bfd +CE_MERGE_PEER_BFD_HEADER = """ + + + + + + %s + + + %s + +""" +CE_MERGE_PEER_BFD_TAIL = """ + + + + + + + + +""" + +# delete peer bfd +CE_DELETE_PEER_BFD_HEADER = """ + + + + + + %s + + + %s + +""" +CE_DELETE_PEER_BFD_TAIL = """ + + + + + + + + +""" + + +class BgpNeighbor(object): + """ Manages BGP peer configuration """ + + def netconf_get_config(self, **kwargs): + """ netconf_get_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ netconf_set_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = set_nc_config(module, conf_str) + + return xml_str + + def check_bgp_peer_args(self, **kwargs): + """ check_bgp_peer_args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + + peer_addr = module.params['peer_addr'] + if peer_addr: + if not check_ip_addr(ipaddr=peer_addr): + module.fail_json( + msg='Error: The peer_addr %s is invalid.' % peer_addr) + + need_cfg = True + + remote_as = module.params['remote_as'] + if remote_as: + if len(remote_as) > 11 or len(remote_as) < 1: + module.fail_json( + msg='Error: The len of remote_as %s is out of [1 - 11].' % remote_as) + + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def check_bgp_peer_other_args(self, **kwargs): + """ check_bgp_peer_other_args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + peerip = module.params['peer_addr'] + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + + description = module.params['description'] + if description: + if len(description) > 80 or len(description) < 1: + module.fail_json( + msg='Error: The len of description %s is out of [1 - 80].' % description) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["description"] = re_find + if re_find[0] != description: + need_cfg = True + else: + need_cfg = True + + fake_as = module.params['fake_as'] + if fake_as: + if len(fake_as) > 11 or len(fake_as) < 1: + module.fail_json( + msg='Error: The len of fake_as %s is out of [1 - 11].' % fake_as) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["fake_as"] = re_find + if re_find[0] != fake_as: + need_cfg = True + else: + need_cfg = True + + dual_as = module.params['dual_as'] + if dual_as != 'no_use': + if not fake_as: + module.fail_json(msg='fake_as must exist.') + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["dual_as"] = re_find + if re_find[0] != dual_as: + need_cfg = True + else: + need_cfg = True + + conventional = module.params['conventional'] + if conventional != 'no_use': + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["conventional"] = re_find + if re_find[0] != conventional: + need_cfg = True + else: + need_cfg = True + + route_refresh = module.params['route_refresh'] + if route_refresh != 'no_use': + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["route_refresh"] = re_find + if re_find[0] != route_refresh: + need_cfg = True + else: + need_cfg = True + + four_byte_as = module.params['four_byte_as'] + if four_byte_as != 'no_use': + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["four_byte_as"] = re_find + if re_find[0] != four_byte_as: + need_cfg = True + else: + need_cfg = True + + is_ignore = module.params['is_ignore'] + if is_ignore != 'no_use': + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_ignore"] = re_find + if re_find[0] != is_ignore: + need_cfg = True + else: + need_cfg = True + + local_if_name = module.params['local_if_name'] + if local_if_name: + if len(local_if_name) > 63 or len(local_if_name) < 1: + module.fail_json( + msg='Error: The len of local_if_name %s is out of [1 - 63].' % local_if_name) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["local_if_name"] = re_find + if re_find[0].lower() != local_if_name.lower(): + need_cfg = True + else: + need_cfg = True + + ebgp_max_hop = module.params['ebgp_max_hop'] + if ebgp_max_hop: + if int(ebgp_max_hop) > 255 or int(ebgp_max_hop) < 1: + module.fail_json( + msg='Error: The value of ebgp_max_hop %s is out of [1 - 255].' % ebgp_max_hop) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ebgp_max_hop"] = re_find + if re_find[0] != ebgp_max_hop: + need_cfg = True + else: + need_cfg = True + + valid_ttl_hops = module.params['valid_ttl_hops'] + if valid_ttl_hops: + if int(valid_ttl_hops) > 255 or int(valid_ttl_hops) < 1: + module.fail_json( + msg='Error: The value of valid_ttl_hops %s is out of [1 - 255].' % valid_ttl_hops) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["valid_ttl_hops"] = re_find + if re_find[0] != valid_ttl_hops: + need_cfg = True + else: + need_cfg = True + + connect_mode = module.params['connect_mode'] + if connect_mode: + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["connect_mode"] = re_find + if re_find[0] != connect_mode: + need_cfg = True + else: + need_cfg = True + + is_log_change = module.params['is_log_change'] + if is_log_change != 'no_use': + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_log_change"] = re_find + if re_find[0] != is_log_change: + need_cfg = True + else: + need_cfg = True + + pswd_type = module.params['pswd_type'] + if pswd_type: + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["pswd_type"] = re_find + if re_find[0] != pswd_type: + need_cfg = True + else: + need_cfg = True + + pswd_cipher_text = module.params['pswd_cipher_text'] + if pswd_cipher_text: + if len(pswd_cipher_text) > 255 or len(pswd_cipher_text) < 1: + module.fail_json( + msg='Error: The len of pswd_cipher_text %s is out of [1 - 255].' % pswd_cipher_text) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["pswd_cipher_text"] = re_find + if re_find[0] != pswd_cipher_text: + need_cfg = True + else: + need_cfg = True + + keep_alive_time = module.params['keep_alive_time'] + if keep_alive_time: + if int(keep_alive_time) > 21845 or len(keep_alive_time) < 0: + module.fail_json( + msg='Error: The len of keep_alive_time %s is out of [0 - 21845].' % keep_alive_time) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["keep_alive_time"] = re_find + if re_find[0] != keep_alive_time: + need_cfg = True + else: + need_cfg = True + + hold_time = module.params['hold_time'] + if hold_time: + if int(hold_time) != 0 and (int(hold_time) > 65535 or int(hold_time) < 3): + module.fail_json( + msg='Error: The value of hold_time %s is out of [0 or 3 - 65535].' % hold_time) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["hold_time"] = re_find + if re_find[0] != hold_time: + need_cfg = True + else: + need_cfg = True + + min_hold_time = module.params['min_hold_time'] + if min_hold_time: + if int(min_hold_time) != 0 and (int(min_hold_time) > 65535 or int(min_hold_time) < 20): + module.fail_json( + msg='Error: The value of min_hold_time %s is out of [0 or 20 - 65535].' % min_hold_time) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["min_hold_time"] = re_find + if re_find[0] != min_hold_time: + need_cfg = True + else: + need_cfg = True + + key_chain_name = module.params['key_chain_name'] + if key_chain_name: + if len(key_chain_name) > 47 or len(key_chain_name) < 1: + module.fail_json( + msg='Error: The len of key_chain_name %s is out of [1 - 47].' % key_chain_name) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["key_chain_name"] = re_find + if re_find[0] != key_chain_name: + need_cfg = True + else: + need_cfg = True + + conn_retry_time = module.params['conn_retry_time'] + if conn_retry_time: + if int(conn_retry_time) > 65535 or int(conn_retry_time) < 1: + module.fail_json( + msg='Error: The value of conn_retry_time %s is out of [1 - 65535].' % conn_retry_time) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["conn_retry_time"] = re_find + if re_find[0] != conn_retry_time: + need_cfg = True + else: + need_cfg = True + + tcp_mss = module.params['tcp_MSS'] + if tcp_mss: + if int(tcp_mss) > 4096 or int(tcp_mss) < 176: + module.fail_json( + msg='Error: The value of tcp_mss %s is out of [176 - 4096].' % tcp_mss) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["tcp_MSS"] = re_find + if re_find[0] != tcp_mss: + need_cfg = True + else: + need_cfg = True + + mpls_local_ifnet_disable = module.params['mpls_local_ifnet_disable'] + if mpls_local_ifnet_disable != 'no_use': + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["mpls_local_ifnet_disable"] = re_find + if re_find[0] != mpls_local_ifnet_disable: + need_cfg = True + else: + need_cfg = True + + prepend_global_as = module.params['prepend_global_as'] + if prepend_global_as != 'no_use': + if not fake_as: + module.fail_json(msg='fake_as must exist.') + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["prepend_global_as"] = re_find + if re_find[0] != prepend_global_as: + need_cfg = True + else: + need_cfg = True + + prepend_fake_as = module.params['prepend_fake_as'] + if prepend_fake_as != 'no_use': + if not fake_as: + module.fail_json(msg='fake_as must exist.') + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["prepend_fake_as"] = re_find + if re_find[0] != prepend_fake_as: + need_cfg = True + else: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def check_peer_bfd_merge_args(self, **kwargs): + """ check_peer_bfd_merge_args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + state = module.params['state'] + if state == "absent": + result["need_cfg"] = need_cfg + return result + + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + + peer_addr = module.params['peer_addr'] + + is_bfd_block = module.params['is_bfd_block'] + if is_bfd_block != 'no_use': + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_bfd_block"] = re_find + if re_find[0] != is_bfd_block: + need_cfg = True + else: + need_cfg = True + + multiplier = module.params['multiplier'] + if multiplier: + if int(multiplier) > 50 or int(multiplier) < 3: + module.fail_json( + msg='Error: The value of multiplier %s is out of [3 - 50].' % multiplier) + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["multiplier"] = re_find + if re_find[0] != multiplier: + need_cfg = True + else: + need_cfg = True + + is_bfd_enable = module.params['is_bfd_enable'] + if is_bfd_enable != 'no_use': + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_bfd_enable"] = re_find + if re_find[0] != is_bfd_enable: + need_cfg = True + else: + need_cfg = True + + rx_interval = module.params['rx_interval'] + if rx_interval: + if int(rx_interval) > 1000 or int(rx_interval) < 50: + module.fail_json( + msg='Error: The value of rx_interval %s is out of [50 - 1000].' % rx_interval) + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["rx_interval"] = re_find + if re_find[0] != rx_interval: + need_cfg = True + else: + need_cfg = True + + tx_interval = module.params['tx_interval'] + if tx_interval: + if int(tx_interval) > 1000 or int(tx_interval) < 50: + module.fail_json( + msg='Error: The value of tx_interval %s is out of [50 - 1000].' % tx_interval) + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["tx_interval"] = re_find + if re_find[0] != tx_interval: + need_cfg = True + else: + need_cfg = True + + is_single_hop = module.params['is_single_hop'] + if is_single_hop != 'no_use': + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_single_hop"] = re_find + if re_find[0] != is_single_hop: + need_cfg = True + else: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def check_peer_bfd_delete_args(self, **kwargs): + """ check_peer_bfd_delete_args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + state = module.params['state'] + if state == "present": + result["need_cfg"] = need_cfg + return result + + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + + peer_addr = module.params['peer_addr'] + + is_bfd_block = module.params['is_bfd_block'] + if is_bfd_block != 'no_use': + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_bfd_block"] = re_find + if re_find[0] == is_bfd_block: + need_cfg = True + + multiplier = module.params['multiplier'] + if multiplier: + if int(multiplier) > 50 or int(multiplier) < 3: + module.fail_json( + msg='Error: The value of multiplier %s is out of [3 - 50].' % multiplier) + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["multiplier"] = re_find + if re_find[0] == multiplier: + need_cfg = True + + is_bfd_enable = module.params['is_bfd_enable'] + if is_bfd_enable != 'no_use': + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_bfd_enable"] = re_find + if re_find[0] == is_bfd_enable: + need_cfg = True + + rx_interval = module.params['rx_interval'] + if rx_interval: + if int(rx_interval) > 1000 or int(rx_interval) < 50: + module.fail_json( + msg='Error: The value of rx_interval %s is out of [50 - 1000].' % rx_interval) + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["rx_interval"] = re_find + if re_find[0] == rx_interval: + need_cfg = True + + tx_interval = module.params['tx_interval'] + if tx_interval: + if int(tx_interval) > 1000 or int(tx_interval) < 50: + module.fail_json( + msg='Error: The value of tx_interval %s is out of [50 - 1000].' % tx_interval) + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["tx_interval"] = re_find + if re_find[0] == tx_interval: + need_cfg = True + + is_single_hop = module.params['is_single_hop'] + if is_single_hop != 'no_use': + + conf_str = CE_GET_PEER_BFD_HEADER % ( + vrf_name, peer_addr) + "" + CE_GET_PEER_BFD_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_single_hop"] = re_find + if re_find[0] == is_single_hop: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def get_bgp_peer(self, **kwargs): + """ get_bgp_peer """ + + module = kwargs["module"] + peerip = module.params['peer_addr'] + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + \ + "" + CE_GET_BGP_PEER_TAIL + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*\s.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def get_bgp_del_peer(self, **kwargs): + """ get_bgp_del_peer """ + + module = kwargs["module"] + peerip = module.params['peer_addr'] + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + + conf_str = CE_GET_BGP_PEER_HEADER % (vrf_name, peerip) + CE_GET_BGP_PEER_TAIL + + xml_str = self.netconf_get_config(module=module, conf_str=conf_str) + + result = list() + + if "" in xml_str: + return result + else: + re_find = re.findall( + r'.*(.*).*', xml_str) + + if re_find: + return re_find + else: + return result + + def merge_bgp_peer(self, **kwargs): + """ merge_bgp_peer """ + + module = kwargs["module"] + vrf_name = module.params['vrf_name'] + peer_addr = module.params['peer_addr'] + remote_as = module.params['remote_as'] + + conf_str = CE_MERGE_BGP_PEER_HEADER % ( + vrf_name, peer_addr) + "%s" % remote_as + CE_MERGE_BGP_PEER_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp peer failed.') + + cmds = [] + cmd = "peer %s as-number %s" % (peer_addr, remote_as) + cmds.append(cmd) + + return cmds + + def create_bgp_peer(self, **kwargs): + """ create_bgp_peer """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + + peer_addr = module.params['peer_addr'] + remote_as = module.params['remote_as'] + + conf_str = CE_CREATE_BGP_PEER_HEADER % ( + vrf_name, peer_addr) + "%s" % remote_as + CE_CREATE_BGP_PEER_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create bgp peer failed.') + + cmds = [] + cmd = "peer %s as-number %s" % (peer_addr, remote_as) + cmds.append(cmd) + + return cmds + + def delete_bgp_peer(self, **kwargs): + """ delete_bgp_peer """ + + module = kwargs["module"] + vrf_name = module.params['vrf_name'] + peer_addr = module.params['peer_addr'] + + conf_str = CE_DELETE_BGP_PEER_HEADER % ( + vrf_name, peer_addr) + CE_DELETE_BGP_PEER_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp peer failed.') + + cmds = [] + cmd = "undo peer %s" % peer_addr + cmds.append(cmd) + + return cmds + + def merge_bgp_peer_other(self, **kwargs): + """ merge_bgp_peer """ + + module = kwargs["module"] + vrf_name = module.params['vrf_name'] + peer_addr = module.params['peer_addr'] + + conf_str = CE_MERGE_BGP_PEER_HEADER % (vrf_name, peer_addr) + + cmds = [] + + description = module.params['description'] + if description: + conf_str += "%s" % description + + cmd = "peer %s description %s" % (peer_addr, description) + cmds.append(cmd) + + fake_as = module.params['fake_as'] + if fake_as: + conf_str += "%s" % fake_as + + cmd = "peer %s local-as %s" % (peer_addr, fake_as) + cmds.append(cmd) + + dual_as = module.params['dual_as'] + if dual_as != 'no_use': + conf_str += "%s" % dual_as + + if dual_as == "true": + cmd = "peer %s local-as %s dual-as" % (peer_addr, fake_as) + else: + cmd = "peer %s local-as %s" % (peer_addr, fake_as) + cmds.append(cmd) + + conventional = module.params['conventional'] + if conventional != 'no_use': + conf_str += "%s" % conventional + if conventional == "true": + cmd = "peer %s capability-advertise conventional" % peer_addr + else: + cmd = "undo peer %s capability-advertise conventional" % peer_addr + cmds.append(cmd) + + route_refresh = module.params['route_refresh'] + if route_refresh != 'no_use': + conf_str += "%s" % route_refresh + + if route_refresh == "true": + cmd = "peer %s capability-advertise route-refresh" % peer_addr + else: + cmd = "undo peer %s capability-advertise route-refresh" % peer_addr + cmds.append(cmd) + + four_byte_as = module.params['four_byte_as'] + if four_byte_as != 'no_use': + conf_str += "%s" % four_byte_as + + if four_byte_as == "true": + cmd = "peer %s capability-advertise 4-byte-as" % peer_addr + else: + cmd = "undo peer %s capability-advertise 4-byte-as" % peer_addr + cmds.append(cmd) + + is_ignore = module.params['is_ignore'] + if is_ignore != 'no_use': + conf_str += "%s" % is_ignore + + if is_ignore == "true": + cmd = "peer %s ignore" % peer_addr + else: + cmd = "undo peer %s ignore" % peer_addr + cmds.append(cmd) + + local_if_name = module.params['local_if_name'] + if local_if_name: + conf_str += "%s" % local_if_name + + cmd = "peer %s connect-interface %s" % (peer_addr, local_if_name) + cmds.append(cmd) + + ebgp_max_hop = module.params['ebgp_max_hop'] + if ebgp_max_hop: + conf_str += "%s" % ebgp_max_hop + + cmd = "peer %s ebgp-max-hop %s" % (peer_addr, ebgp_max_hop) + cmds.append(cmd) + + valid_ttl_hops = module.params['valid_ttl_hops'] + if valid_ttl_hops: + conf_str += "%s" % valid_ttl_hops + + cmd = "peer %s valid-ttl-hops %s" % (peer_addr, valid_ttl_hops) + cmds.append(cmd) + + connect_mode = module.params['connect_mode'] + if connect_mode: + + if connect_mode == "listenOnly": + cmd = "peer %s listen-only" % peer_addr + cmds.append(cmd) + elif connect_mode == "connectOnly": + cmd = "peer %s connect-only" % peer_addr + cmds.append(cmd) + elif connect_mode == "both": + connect_mode = "null" + cmd = "peer %s listen-only" % peer_addr + cmds.append(cmd) + cmd = "peer %s connect-only" % peer_addr + cmds.append(cmd) + conf_str += "%s" % connect_mode + + is_log_change = module.params['is_log_change'] + if is_log_change != 'no_use': + conf_str += "%s" % is_log_change + + if is_log_change == "true": + cmd = "peer %s log-change" % peer_addr + else: + cmd = "undo peer %s log-change" % peer_addr + cmds.append(cmd) + + pswd_type = module.params['pswd_type'] + if pswd_type: + conf_str += "%s" % pswd_type + + pswd_cipher_text = module.params['pswd_cipher_text'] + if pswd_cipher_text: + conf_str += "%s" % pswd_cipher_text + + if pswd_type == "cipher": + cmd = "peer %s password cipher %s" % ( + peer_addr, pswd_cipher_text) + elif pswd_type == "simple": + cmd = "peer %s password simple %s" % ( + peer_addr, pswd_cipher_text) + cmds.append(cmd) + + keep_alive_time = module.params['keep_alive_time'] + if keep_alive_time: + conf_str += "%s" % keep_alive_time + + cmd = "peer %s timer keepalive %s" % (peer_addr, keep_alive_time) + cmds.append(cmd) + + hold_time = module.params['hold_time'] + if hold_time: + conf_str += "%s" % hold_time + + cmd = "peer %s timer hold %s" % (peer_addr, hold_time) + cmds.append(cmd) + + min_hold_time = module.params['min_hold_time'] + if min_hold_time: + conf_str += "%s" % min_hold_time + + cmd = "peer %s timer min-holdtime %s" % (peer_addr, min_hold_time) + cmds.append(cmd) + + key_chain_name = module.params['key_chain_name'] + if key_chain_name: + conf_str += "%s" % key_chain_name + + cmd = "peer %s keychain %s" % (peer_addr, key_chain_name) + cmds.append(cmd) + + conn_retry_time = module.params['conn_retry_time'] + if conn_retry_time: + conf_str += "%s" % conn_retry_time + + cmd = "peer %s timer connect-retry %s" % ( + peer_addr, conn_retry_time) + cmds.append(cmd) + + tcp_mss = module.params['tcp_MSS'] + if tcp_mss: + conf_str += "%s" % tcp_mss + + cmd = "peer %s tcp-mss %s" % (peer_addr, tcp_mss) + cmds.append(cmd) + + mpls_local_ifnet_disable = module.params['mpls_local_ifnet_disable'] + if mpls_local_ifnet_disable != 'no_use': + conf_str += "%s" % mpls_local_ifnet_disable + + if mpls_local_ifnet_disable == "false": + cmd = "undo peer %s mpls-local-ifnet disable" % peer_addr + else: + cmd = "peer %s mpls-local-ifnet disable" % peer_addr + cmds.append(cmd) + + prepend_global_as = module.params['prepend_global_as'] + if prepend_global_as != 'no_use': + conf_str += "%s" % prepend_global_as + + if prepend_global_as == "true": + cmd = "peer %s local-as %s prepend-global-as" % (peer_addr, fake_as) + else: + cmd = "undo peer %s local-as %s prepend-global-as" % (peer_addr, fake_as) + cmds.append(cmd) + + prepend_fake_as = module.params['prepend_fake_as'] + if prepend_fake_as != 'no_use': + conf_str += "%s" % prepend_fake_as + + if prepend_fake_as == "true": + cmd = "peer %s local-as %s prepend-local-as" % (peer_addr, fake_as) + else: + cmd = "undo peer %s local-as %s prepend-local-as" % (peer_addr, fake_as) + cmds.append(cmd) + + conf_str += CE_MERGE_BGP_PEER_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp peer other failed.') + + return cmds + + def merge_peer_bfd(self, **kwargs): + """ merge_peer_bfd """ + + module = kwargs["module"] + vrf_name = module.params['vrf_name'] + peer_addr = module.params['peer_addr'] + + conf_str = CE_MERGE_PEER_BFD_HEADER % (vrf_name, peer_addr) + + cmds = [] + + is_bfd_block = module.params['is_bfd_block'] + if is_bfd_block != 'no_use': + conf_str += "%s" % is_bfd_block + + if is_bfd_block == "true": + cmd = "peer %s bfd block" % peer_addr + else: + cmd = "undo peer %s bfd block" % peer_addr + cmds.append(cmd) + + multiplier = module.params['multiplier'] + if multiplier: + conf_str += "%s" % multiplier + + cmd = "peer %s bfd detect-multiplier %s" % (peer_addr, multiplier) + cmds.append(cmd) + + is_bfd_enable = module.params['is_bfd_enable'] + if is_bfd_enable != 'no_use': + conf_str += "%s" % is_bfd_enable + + if is_bfd_enable == "true": + cmd = "peer %s bfd enable" % peer_addr + else: + cmd = "undo peer %s bfd enable" % peer_addr + cmds.append(cmd) + + rx_interval = module.params['rx_interval'] + if rx_interval: + conf_str += "%s" % rx_interval + + cmd = "peer %s bfd min-rx-interval %s" % (peer_addr, rx_interval) + cmds.append(cmd) + + tx_interval = module.params['tx_interval'] + if tx_interval: + conf_str += "%s" % tx_interval + + cmd = "peer %s bfd min-tx-interval %s" % (peer_addr, tx_interval) + cmds.append(cmd) + + is_single_hop = module.params['is_single_hop'] + if is_single_hop != 'no_use': + conf_str += "%s" % is_single_hop + + if is_single_hop == "true": + cmd = "peer %s bfd enable single-hop-prefer" % peer_addr + else: + cmd = "undo peer %s bfd enable single-hop-prefer" % peer_addr + cmds.append(cmd) + + conf_str += CE_MERGE_PEER_BFD_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge peer bfd failed.') + + return cmds + + def delete_peer_bfd(self, **kwargs): + """ delete_peer_bfd """ + + module = kwargs["module"] + vrf_name = module.params['vrf_name'] + peer_addr = module.params['peer_addr'] + + conf_str = CE_DELETE_PEER_BFD_HEADER % (vrf_name, peer_addr) + + cmds = [] + + is_bfd_block = module.params['is_bfd_block'] + if is_bfd_block != 'no_use': + conf_str += "%s" % is_bfd_block + + cmd = "undo peer %s bfd block" % peer_addr + cmds.append(cmd) + + multiplier = module.params['multiplier'] + if multiplier: + conf_str += "%s" % multiplier + + cmd = "undo peer %s bfd detect-multiplier %s" % ( + peer_addr, multiplier) + cmds.append(cmd) + + is_bfd_enable = module.params['is_bfd_enable'] + if is_bfd_enable != 'no_use': + conf_str += "%s" % is_bfd_enable + + cmd = "undo peer %s bfd enable" % peer_addr + cmds.append(cmd) + + rx_interval = module.params['rx_interval'] + if rx_interval: + conf_str += "%s" % rx_interval + + cmd = "undo peer %s bfd min-rx-interval %s" % ( + peer_addr, rx_interval) + cmds.append(cmd) + + tx_interval = module.params['tx_interval'] + if tx_interval: + conf_str += "%s" % tx_interval + + cmd = "undo peer %s bfd min-tx-interval %s" % ( + peer_addr, tx_interval) + cmds.append(cmd) + + is_single_hop = module.params['is_single_hop'] + if is_single_hop != 'no_use': + conf_str += "%s" % is_single_hop + + cmd = "undo peer %s bfd enable single-hop-prefer" % peer_addr + cmds.append(cmd) + + conf_str += CE_DELETE_PEER_BFD_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete peer bfd failed.') + + return cmds + + +def main(): + """ main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + vrf_name=dict(type='str', required=True), + peer_addr=dict(type='str', required=True), + remote_as=dict(type='str', required=True), + description=dict(type='str'), + fake_as=dict(type='str'), + dual_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + conventional=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + route_refresh=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + four_byte_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + is_ignore=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + local_if_name=dict(type='str'), + ebgp_max_hop=dict(type='str'), + valid_ttl_hops=dict(type='str'), + connect_mode=dict(choices=['listenOnly', 'connectOnly', 'both']), + is_log_change=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + pswd_type=dict(choices=['null', 'cipher', 'simple']), + pswd_cipher_text=dict(type='str', no_log=True), + keep_alive_time=dict(type='str'), + hold_time=dict(type='str'), + min_hold_time=dict(type='str'), + key_chain_name=dict(type='str'), + conn_retry_time=dict(type='str'), + tcp_MSS=dict(type='str'), + mpls_local_ifnet_disable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + prepend_global_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + prepend_fake_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + is_bfd_block=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + multiplier=dict(type='str'), + is_bfd_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + rx_interval=dict(type='str'), + tx_interval=dict(type='str'), + is_single_hop=dict(type='str', default='no_use', choices=['no_use', 'true', 'false'])) + + argument_spec.update(ce_argument_spec) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + state = module.params['state'] + vrf_name = module.params['vrf_name'] + peer_addr = module.params['peer_addr'] + remote_as = module.params['remote_as'] + description = module.params['description'] + fake_as = module.params['fake_as'] + dual_as = module.params['dual_as'] + conventional = module.params['conventional'] + route_refresh = module.params['route_refresh'] + four_byte_as = module.params['four_byte_as'] + is_ignore = module.params['is_ignore'] + local_if_name = module.params['local_if_name'] + ebgp_max_hop = module.params['ebgp_max_hop'] + valid_ttl_hops = module.params['valid_ttl_hops'] + connect_mode = module.params['connect_mode'] + is_log_change = module.params['is_log_change'] + pswd_type = module.params['pswd_type'] + pswd_cipher_text = module.params['pswd_cipher_text'] + keep_alive_time = module.params['keep_alive_time'] + hold_time = module.params['hold_time'] + min_hold_time = module.params['min_hold_time'] + key_chain_name = module.params['key_chain_name'] + conn_retry_time = module.params['conn_retry_time'] + tcp_mss = module.params['tcp_MSS'] + mpls_local_ifnet_disable = module.params['mpls_local_ifnet_disable'] + prepend_global_as = module.params['prepend_global_as'] + prepend_fake_as = module.params['prepend_fake_as'] + is_bfd_block = module.params['is_bfd_block'] + multiplier = module.params['multiplier'] + is_bfd_enable = module.params['is_bfd_enable'] + rx_interval = module.params['rx_interval'] + tx_interval = module.params['tx_interval'] + is_single_hop = module.params['is_single_hop'] + + ce_bgp_peer_obj = BgpNeighbor() + + # get proposed + proposed["state"] = state + if vrf_name: + proposed["vrf_name"] = vrf_name + if peer_addr: + proposed["peer_addr"] = peer_addr + if remote_as: + proposed["remote_as"] = remote_as + if description: + proposed["description"] = description + if fake_as: + proposed["fake_as"] = fake_as + if dual_as != 'no_use': + proposed["dual_as"] = dual_as + if conventional != 'no_use': + proposed["conventional"] = conventional + if route_refresh != 'no_use': + proposed["route_refresh"] = route_refresh + if four_byte_as != 'no_use': + proposed["four_byte_as"] = four_byte_as + if is_ignore != 'no_use': + proposed["is_ignore"] = is_ignore + if local_if_name: + proposed["local_if_name"] = local_if_name + if ebgp_max_hop: + proposed["ebgp_max_hop"] = ebgp_max_hop + if valid_ttl_hops: + proposed["valid_ttl_hops"] = valid_ttl_hops + if connect_mode: + proposed["connect_mode"] = connect_mode + if is_log_change != 'no_use': + proposed["is_log_change"] = is_log_change + if pswd_type: + proposed["pswd_type"] = pswd_type + if pswd_cipher_text: + proposed["pswd_cipher_text"] = pswd_cipher_text + if keep_alive_time: + proposed["keep_alive_time"] = keep_alive_time + if hold_time: + proposed["hold_time"] = hold_time + if min_hold_time: + proposed["min_hold_time"] = min_hold_time + if key_chain_name: + proposed["key_chain_name"] = key_chain_name + if conn_retry_time: + proposed["conn_retry_time"] = conn_retry_time + if tcp_mss: + proposed["tcp_MSS"] = tcp_mss + if mpls_local_ifnet_disable != 'no_use': + proposed["mpls_local_ifnet_disable"] = mpls_local_ifnet_disable + if prepend_global_as != 'no_use': + proposed["prepend_global_as"] = prepend_global_as + if prepend_fake_as != 'no_use': + proposed["prepend_fake_as"] = prepend_fake_as + if is_bfd_block != 'no_use': + proposed["is_bfd_block"] = is_bfd_block + if multiplier: + proposed["multiplier"] = multiplier + if is_bfd_enable != 'no_use': + proposed["is_bfd_enable"] = is_bfd_enable + if rx_interval: + proposed["rx_interval"] = rx_interval + if tx_interval: + proposed["tx_interval"] = tx_interval + if is_single_hop != 'no_use': + proposed["is_single_hop"] = is_single_hop + + if not ce_bgp_peer_obj: + module.fail_json(msg='Error: Init module failed.') + + need_bgp_peer_enable = ce_bgp_peer_obj.check_bgp_peer_args(module=module) + need_bgp_peer_other_rst = ce_bgp_peer_obj.check_bgp_peer_other_args( + module=module) + need_peer_bfd_merge_rst = ce_bgp_peer_obj.check_peer_bfd_merge_args( + module=module) + need_peer_bfd_del_rst = ce_bgp_peer_obj.check_peer_bfd_delete_args( + module=module) + + # bgp peer config + if need_bgp_peer_enable["need_cfg"]: + + if state == "present": + + if remote_as: + + bgp_peer_exist = ce_bgp_peer_obj.get_bgp_peer(module=module) + existing["bgp peer"] = bgp_peer_exist + + bgp_peer_new = (peer_addr, remote_as) + if len(bgp_peer_exist) == 0: + cmd = ce_bgp_peer_obj.create_bgp_peer(module=module) + changed = True + for item in cmd: + updates.append(item) + + elif bgp_peer_new in bgp_peer_exist: + pass + + else: + cmd = ce_bgp_peer_obj.merge_bgp_peer(module=module) + changed = True + for item in cmd: + updates.append(item) + + bgp_peer_end = ce_bgp_peer_obj.get_bgp_peer(module=module) + end_state["bgp peer"] = bgp_peer_end + + else: + + bgp_peer_exist = ce_bgp_peer_obj.get_bgp_del_peer(module=module) + existing["bgp peer"] = bgp_peer_exist + + bgp_peer_new = (peer_addr) + + if len(bgp_peer_exist) == 0: + pass + + elif bgp_peer_new in bgp_peer_exist: + cmd = ce_bgp_peer_obj.delete_bgp_peer(module=module) + changed = True + for item in cmd: + updates.append(item) + + bgp_peer_end = ce_bgp_peer_obj.get_bgp_del_peer(module=module) + end_state["bgp peer"] = bgp_peer_end + + # bgp peer other args + exist_tmp = dict() + for item in need_bgp_peer_other_rst: + if item != "need_cfg": + exist_tmp[item] = need_bgp_peer_other_rst[item] + if exist_tmp: + existing["bgp peer other"] = exist_tmp + + if need_bgp_peer_other_rst["need_cfg"]: + + if state == "present": + cmd = ce_bgp_peer_obj.merge_bgp_peer_other(module=module) + changed = True + for item in cmd: + updates.append(item) + + need_bgp_peer_other_rst = ce_bgp_peer_obj.check_bgp_peer_other_args( + module=module) + end_tmp = dict() + for item in need_bgp_peer_other_rst: + if item != "need_cfg": + end_tmp[item] = need_bgp_peer_other_rst[item] + if end_tmp: + end_state["bgp peer other"] = end_tmp + + # peer bfd args + if state == "present": + exist_tmp = dict() + for item in need_peer_bfd_merge_rst: + if item != "need_cfg": + exist_tmp[item] = need_peer_bfd_merge_rst[item] + if exist_tmp: + existing["peer bfd"] = exist_tmp + + if need_peer_bfd_merge_rst["need_cfg"]: + cmd = ce_bgp_peer_obj.merge_peer_bfd(module=module) + changed = True + for item in cmd: + updates.append(item) + + need_peer_bfd_merge_rst = ce_bgp_peer_obj.check_peer_bfd_merge_args( + module=module) + end_tmp = dict() + for item in need_peer_bfd_merge_rst: + if item != "need_cfg": + end_tmp[item] = need_peer_bfd_merge_rst[item] + if end_tmp: + end_state["peer bfd"] = end_tmp + else: + exist_tmp = dict() + for item in need_peer_bfd_del_rst: + if item != "need_cfg": + exist_tmp[item] = need_peer_bfd_del_rst[item] + if exist_tmp: + existing["peer bfd"] = exist_tmp + + # has already delete with bgp peer + + need_peer_bfd_del_rst = ce_bgp_peer_obj.check_peer_bfd_delete_args( + module=module) + end_tmp = dict() + for item in need_peer_bfd_del_rst: + if item != "need_cfg": + end_tmp[item] = need_peer_bfd_del_rst[item] + if end_tmp: + end_state["peer bfd"] = end_tmp + + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_bgp_neighbor_af.py b/plugins/modules/network/cloudengine/ce_bgp_neighbor_af.py new file mode 100644 index 0000000000..cf9339ef82 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_bgp_neighbor_af.py @@ -0,0 +1,2679 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_bgp_neighbor_af +short_description: Manages BGP neighbor Address-family configuration on HUAWEI CloudEngine switches. +description: + - Manages BGP neighbor Address-family configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + vrf_name: + description: + - Name of a BGP instance. The name is a case-sensitive string of characters. + The BGP instance can be used only after the corresponding VPN instance is created. + required: true + af_type: + description: + - Address family type of a BGP instance. + required: true + choices: ['ipv4uni', 'ipv4multi', 'ipv4vpn', 'ipv6uni', 'ipv6vpn', 'evpn'] + remote_address: + description: + - IPv4 or IPv6 peer connection address. + required: true + advertise_irb: + description: + - If the value is true, advertised IRB routes are distinguished. + If the value is false, advertised IRB routes are not distinguished. + default: no_use + choices: ['no_use','true','false'] + advertise_arp: + description: + - If the value is true, advertised ARP routes are distinguished. + If the value is false, advertised ARP routes are not distinguished. + default: no_use + choices: ['no_use','true','false'] + advertise_remote_nexthop: + description: + - If the value is true, the remote next-hop attribute is advertised to peers. + If the value is false, the remote next-hop attribute is not advertised to any peers. + default: no_use + choices: ['no_use','true','false'] + advertise_community: + description: + - If the value is true, the community attribute is advertised to peers. + If the value is false, the community attribute is not advertised to peers. + default: no_use + choices: ['no_use','true','false'] + advertise_ext_community: + description: + - If the value is true, the extended community attribute is advertised to peers. + If the value is false, the extended community attribute is not advertised to peers. + default: no_use + choices: ['no_use','true','false'] + discard_ext_community: + description: + - If the value is true, the extended community attribute in the peer route information is discarded. + If the value is false, the extended community attribute in the peer route information is not discarded. + default: no_use + choices: ['no_use','true','false'] + allow_as_loop_enable: + description: + - If the value is true, repetitive local AS numbers are allowed. + If the value is false, repetitive local AS numbers are not allowed. + default: no_use + choices: ['no_use','true','false'] + allow_as_loop_limit: + description: + - Set the maximum number of repetitive local AS number. + The value is an integer ranging from 1 to 10. + keep_all_routes: + description: + - If the value is true, the system stores all route update messages received from all peers (groups) + after BGP connection setup. + If the value is false, the system stores only BGP update messages that are received from peers + and pass the configured import policy. + default: no_use + choices: ['no_use','true','false'] + nexthop_configure: + description: + - null, The next hop is not changed. + local, The next hop is changed to the local IP address. + invariable, Prevent the device from changing the next hop of each imported IGP route + when advertising it to its BGP peers. + choices: ['null', 'local', 'invariable'] + preferred_value: + description: + - Assign a preferred value for the routes learned from a specified peer. + The value is an integer ranging from 0 to 65535. + public_as_only: + description: + - If the value is true, sent BGP update messages carry only the public AS number but do not carry + private AS numbers. + If the value is false, sent BGP update messages can carry private AS numbers. + default: no_use + choices: ['no_use','true','false'] + public_as_only_force: + description: + - If the value is true, sent BGP update messages carry only the public AS number but do not carry + private AS numbers. + If the value is false, sent BGP update messages can carry private AS numbers. + default: no_use + choices: ['no_use','true','false'] + public_as_only_limited: + description: + - Limited use public as number. + default: no_use + choices: ['no_use','true','false'] + public_as_only_replace: + description: + - Private as replaced by public as number. + default: no_use + choices: ['no_use','true','false'] + public_as_only_skip_peer_as: + description: + - Public as only skip peer as. + default: no_use + choices: ['no_use','true','false'] + route_limit: + description: + - Configure the maximum number of routes that can be accepted from a peer. + The value is an integer ranging from 1 to 4294967295. + route_limit_percent: + description: + - Specify the percentage of routes when a router starts to generate an alarm. + The value is an integer ranging from 1 to 100. + route_limit_type: + description: + - Noparameter, After the number of received routes exceeds the threshold and the timeout + timer expires,no action. + AlertOnly, An alarm is generated and no additional routes will be accepted if the maximum + number of routes allowed have been received. + IdleForever, The connection that is interrupted is not automatically re-established if the + maximum number of routes allowed have been received. + IdleTimeout, After the number of received routes exceeds the threshold and the timeout timer + expires, the connection that is interrupted is automatically re-established. + choices: ['noparameter', 'alertOnly', 'idleForever', 'idleTimeout'] + route_limit_idle_timeout: + description: + - Specify the value of the idle-timeout timer to automatically reestablish the connections after + they are cut off when the number of routes exceeds the set threshold. + The value is an integer ranging from 1 to 1200. + rt_updt_interval: + description: + - Specify the minimum interval at which Update packets are sent. The value is an integer, in seconds. + The value is an integer ranging from 0 to 600. + redirect_ip: + description: + - Redirect ip. + default: no_use + choices: ['no_use','true','false'] + redirect_ip_validation: + description: + - Redirect ip validation. + default: no_use + choices: ['no_use','true','false'] + aliases: ['redirect_ip_vaildation'] + reflect_client: + description: + - If the value is true, the local device functions as the route reflector and a peer functions + as a client of the route reflector. + If the value is false, the route reflector and client functions are not configured. + default: no_use + choices: ['no_use','true','false'] + substitute_as_enable: + description: + - If the value is true, the function to replace a specified peer's AS number in the AS-Path attribute with + the local AS number is enabled. + If the value is false, the function to replace a specified peer's AS number in the AS-Path attribute with + the local AS number is disabled. + default: no_use + choices: ['no_use','true','false'] + import_rt_policy_name: + description: + - Specify the filtering policy applied to the routes learned from a peer. + The value is a string of 1 to 40 characters. + export_rt_policy_name: + description: + - Specify the filtering policy applied to the routes to be advertised to a peer. + The value is a string of 1 to 40 characters. + import_pref_filt_name: + description: + - Specify the IPv4 filtering policy applied to the routes received from a specified peer. + The value is a string of 1 to 169 characters. + export_pref_filt_name: + description: + - Specify the IPv4 filtering policy applied to the routes to be advertised to a specified peer. + The value is a string of 1 to 169 characters. + import_as_path_filter: + description: + - Apply an AS_Path-based filtering policy to the routes received from a specified peer. + The value is an integer ranging from 1 to 256. + export_as_path_filter: + description: + - Apply an AS_Path-based filtering policy to the routes to be advertised to a specified peer. + The value is an integer ranging from 1 to 256. + import_as_path_name_or_num: + description: + - A routing strategy based on the AS path list for routing received by a designated peer. + export_as_path_name_or_num: + description: + - Application of a AS path list based filtering policy to the routing of a specified peer. + import_acl_name_or_num: + description: + - Apply an IPv4 ACL-based filtering policy to the routes received from a specified peer. + The value is a string of 1 to 32 characters. + export_acl_name_or_num: + description: + - Apply an IPv4 ACL-based filtering policy to the routes to be advertised to a specified peer. + The value is a string of 1 to 32 characters. + ipprefix_orf_enable: + description: + - If the value is true, the address prefix-based Outbound Route Filter (ORF) capability is + enabled for peers. + If the value is false, the address prefix-based Outbound Route Filter (ORF) capability is + disabled for peers. + default: no_use + choices: ['no_use','true','false'] + is_nonstd_ipprefix_mod: + description: + - If the value is true, Non-standard capability codes are used during capability negotiation. + If the value is false, RFC-defined standard ORF capability codes are used during capability negotiation. + default: no_use + choices: ['no_use','true','false'] + orftype: + description: + - ORF Type. + The value is an integer ranging from 0 to 65535. + orf_mode: + description: + - ORF mode. + null, Default value. + receive, ORF for incoming packets. + send, ORF for outgoing packets. + both, ORF for incoming and outgoing packets. + choices: ['null', 'receive', 'send', 'both'] + soostring: + description: + - Configure the Site-of-Origin (SoO) extended community attribute. + The value is a string of 3 to 21 characters. + default_rt_adv_enable: + description: + - If the value is true, the function to advertise default routes to peers is enabled. + If the value is false, the function to advertise default routes to peers is disabled. + default: no_use + choices: ['no_use','true', 'false'] + default_rt_adv_policy: + description: + - Specify the name of a used policy. The value is a string. + The value is a string of 1 to 40 characters. + default_rt_match_mode: + description: + - null, Null. + matchall, Advertise the default route if all matching conditions are met. + matchany, Advertise the default route if any matching condition is met. + choices: ['null', 'matchall', 'matchany'] + add_path_mode: + description: + - null, Null. + receive, Support receiving Add-Path routes. + send, Support sending Add-Path routes. + both, Support receiving and sending Add-Path routes. + choices: ['null', 'receive', 'send', 'both'] + adv_add_path_num: + description: + - The number of addPath advertise route. + The value is an integer ranging from 2 to 64. + origin_as_valid: + description: + - If the value is true, Application results of route announcement. + If the value is false, Routing application results are not notified. + default: no_use + choices: ['no_use','true', 'false'] + vpls_enable: + description: + - If the value is true, vpls enable. + If the value is false, vpls disable. + default: no_use + choices: ['no_use','true', 'false'] + vpls_ad_disable: + description: + - If the value is true, enable vpls-ad. + If the value is false, disable vpls-ad. + default: no_use + choices: ['no_use','true', 'false'] + update_pkt_standard_compatible: + description: + - If the value is true, When the vpnv4 multicast neighbor receives and updates the message, + the message has no label. + If the value is false, When the vpnv4 multicast neighbor receives and updates the message, + the message has label. + default: no_use + choices: ['no_use','true', 'false'] +''' + +EXAMPLES = ''' + +- name: CloudEngine BGP neighbor address family test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config BGP peer Address_Family" + ce_bgp_neighbor_af: + state: present + vrf_name: js + af_type: ipv4uni + remote_address: 192.168.10.10 + nexthop_configure: local + provider: "{{ cli }}" + + - name: "Undo BGP peer Address_Family" + ce_bgp_neighbor_af: + state: absent + vrf_name: js + af_type: ipv4uni + remote_address: 192.168.10.10 + nexthop_configure: local + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"af_type": "ipv4uni", "nexthop_configure": "local", + "remote_address": "192.168.10.10", + "state": "present", "vrf_name": "js"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10", + "vrf_name": "js"}, + "bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "null", + "vrf_name": "js"}} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10", + "vrf_name": "js"}, + "bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "local", + "vrf_name": "js"}} +updates: + description: command sent to the device + returned: always + type: list + sample: ["peer 192.168.10.10 next-hop-local"] +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + +# get bgp peer af +CE_GET_BGP_PEER_AF_HEADER = """ + + + + + + %s + + + %s + + + %s +""" +CE_GET_BGP_PEER_AF_TAIL = """ + + + + + + + + + +""" + +# merge bgp peer af +CE_MERGE_BGP_PEER_AF_HEADER = """ + + + + + + %s + + + %s + + + %s +""" +CE_MERGE_BGP_PEER_AF_TAIL = """ + + + + + + + + + +""" + +# create bgp peer af +CE_CREATE_BGP_PEER_AF = """ + + + + + + %s + + + %s + + + %s + + + + + + + + + +""" + +# delete bgp peer af +CE_DELETE_BGP_PEER_AF = """ + + + + + + %s + + + %s + + + %s + + + + + + + + + +""" + + +class BgpNeighborAf(object): + """ Manages BGP neighbor Address-family configuration """ + + def netconf_get_config(self, **kwargs): + """ netconf_get_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ netconf_set_config """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = set_nc_config(module, conf_str) + + return xml_str + + def check_bgp_neighbor_af_args(self, **kwargs): + """ check_bgp_neighbor_af_args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + vrf_name = module.params['vrf_name'] + if vrf_name: + if len(vrf_name) > 31 or len(vrf_name) == 0: + module.fail_json( + msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name) + + state = module.params['state'] + af_type = module.params['af_type'] + remote_address = module.params['remote_address'] + + if not check_ip_addr(ipaddr=remote_address): + module.fail_json( + msg='Error: The remote_address %s is invalid.' % remote_address) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if state == "present": + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + if re_find: + result["remote_address"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if remote_address not in re_find: + need_cfg = True + else: + need_cfg = True + else: + if "" in recv_xml: + pass + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["remote_address"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] == remote_address: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def check_bgp_neighbor_af_other(self, **kwargs): + """ check_bgp_neighbor_af_other """ + + module = kwargs["module"] + result = dict() + need_cfg = False + + state = module.params['state'] + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + remote_address = module.params['remote_address'] + + if state == "absent": + result["need_cfg"] = need_cfg + return result + + advertise_irb = module.params['advertise_irb'] + if advertise_irb != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall(r'.*%s\s*' + r'(.*).*' % remote_address, recv_xml) + if re_find: + result["advertise_irb"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != advertise_irb: + need_cfg = True + else: + need_cfg = True + + advertise_arp = module.params['advertise_arp'] + if advertise_arp != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall(r'.*%s\s*' + r'.*(.*).*' % remote_address, recv_xml) + + if re_find: + result["advertise_arp"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != advertise_arp: + need_cfg = True + else: + need_cfg = True + + advertise_remote_nexthop = module.params['advertise_remote_nexthop'] + if advertise_remote_nexthop != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["advertise_remote_nexthop"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != advertise_remote_nexthop: + need_cfg = True + else: + need_cfg = True + + advertise_community = module.params['advertise_community'] + if advertise_community != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["advertise_community"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != advertise_community: + need_cfg = True + else: + need_cfg = True + + advertise_ext_community = module.params['advertise_ext_community'] + if advertise_ext_community != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["advertise_ext_community"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != advertise_ext_community: + need_cfg = True + else: + need_cfg = True + + discard_ext_community = module.params['discard_ext_community'] + if discard_ext_community != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["discard_ext_community"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != discard_ext_community: + need_cfg = True + else: + need_cfg = True + + allow_as_loop_enable = module.params['allow_as_loop_enable'] + if allow_as_loop_enable != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["allow_as_loop_enable"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != allow_as_loop_enable: + need_cfg = True + else: + need_cfg = True + + allow_as_loop_limit = module.params['allow_as_loop_limit'] + if allow_as_loop_limit: + if int(allow_as_loop_limit) > 10 or int(allow_as_loop_limit) < 1: + module.fail_json( + msg='the value of allow_as_loop_limit %s is out of [1 - 10].' % allow_as_loop_limit) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["allow_as_loop_limit"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != allow_as_loop_limit: + need_cfg = True + else: + need_cfg = True + + keep_all_routes = module.params['keep_all_routes'] + if keep_all_routes != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["keep_all_routes"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != keep_all_routes: + need_cfg = True + else: + need_cfg = True + + nexthop_configure = module.params['nexthop_configure'] + if nexthop_configure: + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + self.exist_nexthop_configure = "null" + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + self.exist_nexthop_configure = re_find[0] + result["nexthop_configure"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != nexthop_configure: + need_cfg = True + else: + need_cfg = True + + preferred_value = module.params['preferred_value'] + if preferred_value: + if int(preferred_value) > 65535 or int(preferred_value) < 0: + module.fail_json( + msg='the value of preferred_value %s is out of [0 - 65535].' % preferred_value) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["preferred_value"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != preferred_value: + need_cfg = True + else: + need_cfg = True + + public_as_only = module.params['public_as_only'] + if public_as_only != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["public_as_only"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != public_as_only: + need_cfg = True + else: + need_cfg = True + + public_as_only_force = module.params['public_as_only_force'] + if public_as_only_force != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["public_as_only_force"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != public_as_only_force: + need_cfg = True + else: + need_cfg = True + + public_as_only_limited = module.params['public_as_only_limited'] + if public_as_only_limited != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["public_as_only_limited"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != public_as_only_limited: + need_cfg = True + else: + need_cfg = True + + public_as_only_replace = module.params['public_as_only_replace'] + if public_as_only_replace != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["public_as_only_replace"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != public_as_only_replace: + need_cfg = True + else: + need_cfg = True + + public_as_only_skip_peer_as = module.params[ + 'public_as_only_skip_peer_as'] + if public_as_only_skip_peer_as != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["public_as_only_skip_peer_as"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != public_as_only_skip_peer_as: + need_cfg = True + else: + need_cfg = True + + route_limit = module.params['route_limit'] + if route_limit: + + if int(route_limit) < 1: + module.fail_json( + msg='the value of route_limit %s is out of [1 - 4294967295].' % route_limit) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["route_limit"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != route_limit: + need_cfg = True + else: + need_cfg = True + + route_limit_percent = module.params['route_limit_percent'] + if route_limit_percent: + + if int(route_limit_percent) < 1 or int(route_limit_percent) > 100: + module.fail_json( + msg='Error: The value of route_limit_percent %s is out of [1 - 100].' % route_limit_percent) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["route_limit_percent"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != route_limit_percent: + need_cfg = True + else: + need_cfg = True + + route_limit_type = module.params['route_limit_type'] + if route_limit_type: + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["route_limit_type"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != route_limit_type: + need_cfg = True + else: + need_cfg = True + + route_limit_idle_timeout = module.params['route_limit_idle_timeout'] + if route_limit_idle_timeout: + + if int(route_limit_idle_timeout) < 1 or int(route_limit_idle_timeout) > 1200: + module.fail_json( + msg='Error: The value of route_limit_idle_timeout %s is out of ' + '[1 - 1200].' % route_limit_idle_timeout) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["route_limit_idle_timeout"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != route_limit_idle_timeout: + need_cfg = True + else: + need_cfg = True + + rt_updt_interval = module.params['rt_updt_interval'] + if rt_updt_interval: + + if int(rt_updt_interval) < 0 or int(rt_updt_interval) > 600: + module.fail_json( + msg='Error: The value of rt_updt_interval %s is out of [0 - 600].' % rt_updt_interval) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["rt_updt_interval"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != rt_updt_interval: + need_cfg = True + else: + need_cfg = True + + redirect_ip = module.params['redirect_ip'] + if redirect_ip != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["redirect_ip"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != redirect_ip: + need_cfg = True + else: + need_cfg = True + + redirect_ip_validation = module.params['redirect_ip_validation'] + if redirect_ip_validation != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["redirect_ip_validation"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != redirect_ip_validation: + need_cfg = True + else: + need_cfg = True + + reflect_client = module.params['reflect_client'] + if reflect_client != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["reflect_client"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != reflect_client: + need_cfg = True + else: + need_cfg = True + + substitute_as_enable = module.params['substitute_as_enable'] + if substitute_as_enable != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["substitute_as_enable"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != substitute_as_enable: + need_cfg = True + else: + need_cfg = True + + import_rt_policy_name = module.params['import_rt_policy_name'] + if import_rt_policy_name: + + if len(import_rt_policy_name) < 1 or len(import_rt_policy_name) > 40: + module.fail_json( + msg='Error: The len of import_rt_policy_name %s is out of [1 - 40].' % import_rt_policy_name) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["import_rt_policy_name"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != import_rt_policy_name: + need_cfg = True + else: + need_cfg = True + + export_rt_policy_name = module.params['export_rt_policy_name'] + if export_rt_policy_name: + + if len(export_rt_policy_name) < 1 or len(export_rt_policy_name) > 40: + module.fail_json( + msg='Error: The len of export_rt_policy_name %s is out of [1 - 40].' % export_rt_policy_name) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["export_rt_policy_name"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != export_rt_policy_name: + need_cfg = True + else: + need_cfg = True + + import_pref_filt_name = module.params['import_pref_filt_name'] + if import_pref_filt_name: + + if len(import_pref_filt_name) < 1 or len(import_pref_filt_name) > 169: + module.fail_json( + msg='Error: The len of import_pref_filt_name %s is out of [1 - 169].' % import_pref_filt_name) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["import_pref_filt_name"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != import_pref_filt_name: + need_cfg = True + else: + need_cfg = True + + export_pref_filt_name = module.params['export_pref_filt_name'] + if export_pref_filt_name: + + if len(export_pref_filt_name) < 1 or len(export_pref_filt_name) > 169: + module.fail_json( + msg='Error: The len of export_pref_filt_name %s is out of [1 - 169].' % export_pref_filt_name) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["export_pref_filt_name"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != export_pref_filt_name: + need_cfg = True + else: + need_cfg = True + + import_as_path_filter = module.params['import_as_path_filter'] + if import_as_path_filter: + + if int(import_as_path_filter) < 1 or int(import_as_path_filter) > 256: + module.fail_json( + msg='Error: The value of import_as_path_filter %s is out of [1 - 256].' % import_as_path_filter) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["import_as_path_filter"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != import_as_path_filter: + need_cfg = True + else: + need_cfg = True + + export_as_path_filter = module.params['export_as_path_filter'] + if export_as_path_filter: + + if int(export_as_path_filter) < 1 or int(export_as_path_filter) > 256: + module.fail_json( + msg='Error: The value of export_as_path_filter %s is out of [1 - 256].' % export_as_path_filter) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["export_as_path_filter"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != export_as_path_filter: + need_cfg = True + else: + need_cfg = True + + import_as_path_name_or_num = module.params[ + 'import_as_path_name_or_num'] + if import_as_path_name_or_num: + + if len(import_as_path_name_or_num) < 1 or len(import_as_path_name_or_num) > 51: + module.fail_json( + msg='Error: The len of import_as_path_name_or_num %s is out ' + 'of [1 - 51].' % import_as_path_name_or_num) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["import_as_path_name_or_num"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != import_as_path_name_or_num: + need_cfg = True + else: + need_cfg = True + + export_as_path_name_or_num = module.params[ + 'export_as_path_name_or_num'] + if export_as_path_name_or_num: + + if len(export_as_path_name_or_num) < 1 or len(export_as_path_name_or_num) > 51: + module.fail_json( + msg='Error: The len of export_as_path_name_or_num %s is out ' + 'of [1 - 51].' % export_as_path_name_or_num) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["export_as_path_name_or_num"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != export_as_path_name_or_num: + need_cfg = True + else: + need_cfg = True + + import_acl_name_or_num = module.params['import_acl_name_or_num'] + if import_acl_name_or_num: + + if len(import_acl_name_or_num) < 1 or len(import_acl_name_or_num) > 32: + module.fail_json( + msg='Error: The len of import_acl_name_or_num %s is out of [1 - 32].' % import_acl_name_or_num) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["import_acl_name_or_num"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != import_acl_name_or_num: + need_cfg = True + else: + need_cfg = True + + export_acl_name_or_num = module.params['export_acl_name_or_num'] + if export_acl_name_or_num: + + if len(export_acl_name_or_num) < 1 or len(export_acl_name_or_num) > 32: + module.fail_json( + msg='Error: The len of export_acl_name_or_num %s is out of [1 - 32].' % export_acl_name_or_num) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["export_acl_name_or_num"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != export_acl_name_or_num: + need_cfg = True + else: + need_cfg = True + + ipprefix_orf_enable = module.params['ipprefix_orf_enable'] + if ipprefix_orf_enable != 'no_use': + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["ipprefix_orf_enable"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != ipprefix_orf_enable: + need_cfg = True + else: + need_cfg = True + + is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod'] + if is_nonstd_ipprefix_mod != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["is_nonstd_ipprefix_mod"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != is_nonstd_ipprefix_mod: + need_cfg = True + else: + need_cfg = True + + orftype = module.params['orftype'] + if orftype: + + if int(orftype) < 0 or int(orftype) > 65535: + module.fail_json( + msg='Error: The value of orftype %s is out of [0 - 65535].' % orftype) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["orftype"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != orftype: + need_cfg = True + else: + need_cfg = True + + orf_mode = module.params['orf_mode'] + if orf_mode: + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["orf_mode"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != orf_mode: + need_cfg = True + else: + need_cfg = True + + soostring = module.params['soostring'] + if soostring: + + if len(soostring) < 3 or len(soostring) > 21: + module.fail_json( + msg='Error: The len of soostring %s is out of [3 - 21].' % soostring) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["soostring"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != soostring: + need_cfg = True + else: + need_cfg = True + + default_rt_adv_enable = module.params['default_rt_adv_enable'] + if default_rt_adv_enable != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_rt_adv_enable"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != default_rt_adv_enable: + need_cfg = True + else: + need_cfg = True + + default_rt_adv_policy = module.params['default_rt_adv_policy'] + if default_rt_adv_policy: + + if len(default_rt_adv_policy) < 1 or len(default_rt_adv_policy) > 40: + module.fail_json( + msg='Error: The len of default_rt_adv_policy %s is out of [1 - 40].' % default_rt_adv_policy) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_rt_adv_policy"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != default_rt_adv_policy: + need_cfg = True + else: + need_cfg = True + + default_rt_match_mode = module.params['default_rt_match_mode'] + if default_rt_match_mode: + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["default_rt_match_mode"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != default_rt_match_mode: + need_cfg = True + else: + need_cfg = True + + add_path_mode = module.params['add_path_mode'] + if add_path_mode: + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["add_path_mode"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != add_path_mode: + need_cfg = True + else: + need_cfg = True + + adv_add_path_num = module.params['adv_add_path_num'] + if adv_add_path_num: + + if int(adv_add_path_num) < 2 or int(adv_add_path_num) > 64: + module.fail_json( + msg='Error: The value of adv_add_path_num %s is out of [2 - 64].' % adv_add_path_num) + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["adv_add_path_num"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != adv_add_path_num: + need_cfg = True + else: + need_cfg = True + + origin_as_valid = module.params['origin_as_valid'] + if origin_as_valid != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["origin_as_valid"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != origin_as_valid: + need_cfg = True + else: + need_cfg = True + + vpls_enable = module.params['vpls_enable'] + if vpls_enable != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["vpls_enable"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != vpls_enable: + need_cfg = True + else: + need_cfg = True + + vpls_ad_disable = module.params['vpls_ad_disable'] + if vpls_ad_disable != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["vpls_ad_disable"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != vpls_ad_disable: + need_cfg = True + else: + need_cfg = True + + update_pkt_standard_compatible = module.params[ + 'update_pkt_standard_compatible'] + if update_pkt_standard_compatible != 'no_use': + + conf_str = CE_GET_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + "" + \ + CE_GET_BGP_PEER_AF_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + need_cfg = True + else: + re_find = re.findall( + r'.*(.*).*', recv_xml) + + if re_find: + result["update_pkt_standard_compatible"] = re_find + result["vrf_name"] = vrf_name + result["af_type"] = af_type + if re_find[0] != update_pkt_standard_compatible: + need_cfg = True + else: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def merge_bgp_peer_af(self, **kwargs): + """ merge_bgp_peer_af """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + remote_address = module.params['remote_address'] + + conf_str = CE_MERGE_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + CE_MERGE_BGP_PEER_AF_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp peer address family failed.') + + cmds = [] + cmd = af_type + if af_type == "ipv4uni": + if vrf_name == "_public_": + cmd = "ipv4-family unicast" + else: + cmd = "ipv4-family vpn-instance %s" % vrf_name + elif af_type == "ipv4multi": + cmd = "ipv4-family multicast" + elif af_type == "ipv6uni": + if vrf_name == "_public_": + cmd = "ipv6-family unicast" + else: + cmd = "ipv6-family vpn-instance %s" % vrf_name + elif af_type == "evpn": + cmd = "l2vpn-family evpn" + elif af_type == "ipv4vpn": + cmd = "ipv4-family vpnv4" + elif af_type == "ipv6vpn": + cmd = "ipv6-family vpnv6" + cmds.append(cmd) + if vrf_name == "_public_": + cmd = "peer %s enable" % remote_address + else: + cmd = "peer %s" % remote_address + cmds.append(cmd) + + return cmds + + def create_bgp_peer_af(self, **kwargs): + """ create_bgp_peer_af """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + remote_address = module.params['remote_address'] + + conf_str = CE_CREATE_BGP_PEER_AF % (vrf_name, af_type, remote_address) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create bgp peer address family failed.') + + cmds = [] + cmd = af_type + if af_type == "ipv4uni": + if vrf_name == "_public_": + cmd = "ipv4-family unicast" + else: + cmd = "ipv4-family vpn-instance %s" % vrf_name + elif af_type == "ipv4multi": + cmd = "ipv4-family multicast" + elif af_type == "ipv6uni": + if vrf_name == "_public_": + cmd = "ipv6-family unicast" + else: + cmd = "ipv6-family vpn-instance %s" % vrf_name + elif af_type == "evpn": + cmd = "l2vpn-family evpn" + elif af_type == "ipv4vpn": + cmd = "ipv4-family vpnv4" + elif af_type == "ipv6vpn": + cmd = "ipv6-family vpnv6" + cmds.append(cmd) + if vrf_name == "_public_": + cmd = "peer %s enable" % remote_address + else: + cmd = "peer %s" % remote_address + cmds.append(cmd) + + return cmds + + def delete_bgp_peer_af(self, **kwargs): + """ delete_bgp_peer_af """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + remote_address = module.params['remote_address'] + + conf_str = CE_DELETE_BGP_PEER_AF % (vrf_name, af_type, remote_address) + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete bgp peer address family failed.') + + cmds = [] + cmd = af_type + if af_type == "ipv4uni": + if vrf_name == "_public_": + cmd = "ipv4-family unicast" + else: + cmd = "ipv4-family vpn-instance %s" % vrf_name + elif af_type == "ipv4multi": + cmd = "ipv4-family multicast" + elif af_type == "ipv6uni": + if vrf_name == "_public_": + cmd = "ipv6-family unicast" + else: + cmd = "ipv6-family vpn-instance %s" % vrf_name + elif af_type == "evpn": + cmd = "l2vpn-family evpn" + elif af_type == "ipv4vpn": + cmd = "ipv4-family vpnv4" + elif af_type == "ipv6vpn": + cmd = "ipv6-family vpnv6" + cmds.append(cmd) + if vrf_name == "_public_": + cmd = "undo peer %s enable" % remote_address + else: + cmd = "undo peer %s" % remote_address + cmds.append(cmd) + + return cmds + + def merge_bgp_peer_af_other(self, **kwargs): + """ merge_bgp_peer_af_other """ + + module = kwargs["module"] + + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + remote_address = module.params['remote_address'] + + conf_str = CE_MERGE_BGP_PEER_AF_HEADER % ( + vrf_name, af_type, remote_address) + + cmds = [] + + advertise_irb = module.params['advertise_irb'] + if advertise_irb != 'no_use': + conf_str += "%s" % advertise_irb + + if advertise_irb == "true": + cmd = "peer %s advertise irb" % remote_address + else: + cmd = "undo peer %s advertise irb" % remote_address + cmds.append(cmd) + + advertise_arp = module.params['advertise_arp'] + if advertise_arp != 'no_use': + conf_str += "%s" % advertise_arp + + if advertise_arp == "true": + cmd = "peer %s advertise arp" % remote_address + else: + cmd = "undo peer %s advertise arp" % remote_address + cmds.append(cmd) + + advertise_remote_nexthop = module.params['advertise_remote_nexthop'] + if advertise_remote_nexthop != 'no_use': + conf_str += "%s" % advertise_remote_nexthop + + if advertise_remote_nexthop == "true": + cmd = "peer %s advertise remote-nexthop" % remote_address + else: + cmd = "undo peer %s advertise remote-nexthop" % remote_address + cmds.append(cmd) + + advertise_community = module.params['advertise_community'] + if advertise_community != 'no_use': + conf_str += "%s" % advertise_community + + if advertise_community == "true": + cmd = "peer %s advertise-community" % remote_address + else: + cmd = "undo peer %s advertise-community" % remote_address + cmds.append(cmd) + + advertise_ext_community = module.params['advertise_ext_community'] + if advertise_ext_community != 'no_use': + conf_str += "%s" % advertise_ext_community + + if advertise_ext_community == "true": + cmd = "peer %s advertise-ext-community" % remote_address + else: + cmd = "undo peer %s advertise-ext-community" % remote_address + cmds.append(cmd) + + discard_ext_community = module.params['discard_ext_community'] + if discard_ext_community != 'no_use': + conf_str += "%s" % discard_ext_community + + if discard_ext_community == "true": + cmd = "peer %s discard-ext-community" % remote_address + else: + cmd = "undo peer %s discard-ext-community" % remote_address + cmds.append(cmd) + + allow_as_loop_enable = module.params['allow_as_loop_enable'] + if allow_as_loop_enable != 'no_use': + conf_str += "%s" % allow_as_loop_enable + + if allow_as_loop_enable == "true": + cmd = "peer %s allow-as-loop" % remote_address + else: + cmd = "undo peer %s allow-as-loop" % remote_address + cmds.append(cmd) + + allow_as_loop_limit = module.params['allow_as_loop_limit'] + if allow_as_loop_limit: + conf_str += "%s" % allow_as_loop_limit + + if allow_as_loop_enable == "true": + cmd = "peer %s allow-as-loop %s" % (remote_address, allow_as_loop_limit) + else: + cmd = "undo peer %s allow-as-loop" % remote_address + cmds.append(cmd) + + keep_all_routes = module.params['keep_all_routes'] + if keep_all_routes != 'no_use': + conf_str += "%s" % keep_all_routes + + if keep_all_routes == "true": + cmd = "peer %s keep-all-routes" % remote_address + else: + cmd = "undo peer %s keep-all-routes" % remote_address + cmds.append(cmd) + + nexthop_configure = module.params['nexthop_configure'] + if nexthop_configure: + conf_str += "%s" % nexthop_configure + + if nexthop_configure == "local": + cmd = "peer %s next-hop-local" % remote_address + cmds.append(cmd) + elif nexthop_configure == "invariable": + cmd = "peer %s next-hop-invariable" % remote_address + cmds.append(cmd) + else: + if self.exist_nexthop_configure != "null": + if self.exist_nexthop_configure == "local": + cmd = "undo peer %s next-hop-local" % remote_address + cmds.append(cmd) + elif self.exist_nexthop_configure == "invariable": + cmd = "undo peer %s next-hop-invariable" % remote_address + cmds.append(cmd) + preferred_value = module.params['preferred_value'] + if preferred_value: + conf_str += "%s" % preferred_value + + cmd = "peer %s preferred-value %s" % (remote_address, preferred_value) + cmds.append(cmd) + + public_as_only = module.params['public_as_only'] + if public_as_only != 'no_use': + conf_str += "%s" % public_as_only + + if public_as_only == "true": + cmd = "peer %s public-as-only" % remote_address + else: + cmd = "undo peer %s public-as-only" % remote_address + cmds.append(cmd) + + public_as_only_force = module.params['public_as_only_force'] + if public_as_only_force != 'no_use': + conf_str += "%s" % public_as_only_force + + if public_as_only_force == "true": + cmd = "peer %s public-as-only force" % remote_address + else: + cmd = "undo peer %s public-as-only force" % remote_address + cmds.append(cmd) + + public_as_only_limited = module.params['public_as_only_limited'] + if public_as_only_limited != 'no_use': + conf_str += "%s" % public_as_only_limited + + if public_as_only_limited == "true": + cmd = "peer %s public-as-only limited" % remote_address + else: + cmd = "undo peer %s public-as-only limited" % remote_address + cmds.append(cmd) + + public_as_only_replace = module.params['public_as_only_replace'] + if public_as_only_replace != 'no_use': + conf_str += "%s" % public_as_only_replace + + if public_as_only_replace == "true": + if public_as_only_force != "no_use": + cmd = "peer %s public-as-only force replace" % remote_address + if public_as_only_limited != "no_use": + cmd = "peer %s public-as-only limited replace" % remote_address + else: + if public_as_only_force != "no_use": + cmd = "undo peer %s public-as-only force replace" % remote_address + if public_as_only_limited != "no_use": + cmd = "undo peer %s public-as-only limited replace" % remote_address + cmds.append(cmd) + + public_as_only_skip_peer_as = module.params[ + 'public_as_only_skip_peer_as'] + if public_as_only_skip_peer_as != 'no_use': + conf_str += "%s" % public_as_only_skip_peer_as + + if public_as_only_skip_peer_as == "true": + if public_as_only_force != "no_use": + cmd = "peer %s public-as-only force include-peer-as" % remote_address + if public_as_only_limited != "no_use": + cmd = "peer %s public-as-only limited include-peer-as" % remote_address + else: + if public_as_only_force != "no_use": + cmd = "undo peer %s public-as-only force include-peer-as" % remote_address + if public_as_only_limited != "no_use": + cmd = "undo peer %s public-as-only limited include-peer-as" % remote_address + cmds.append(cmd) + + route_limit_sign = "route-limit" + if af_type == "evpn": + route_limit_sign = "mac-limit" + route_limit = module.params['route_limit'] + if route_limit: + conf_str += "%s" % route_limit + + cmd = "peer %s %s %s" % (remote_address, route_limit_sign, route_limit) + cmds.append(cmd) + + route_limit_percent = module.params['route_limit_percent'] + if route_limit_percent: + conf_str += "%s" % route_limit_percent + + cmd = "peer %s %s %s %s" % (remote_address, route_limit_sign, route_limit, route_limit_percent) + cmds.append(cmd) + + route_limit_type = module.params['route_limit_type'] + if route_limit_type: + conf_str += "%s" % route_limit_type + + if route_limit_type == "alertOnly": + cmd = "peer %s %s %s %s alert-only" % (remote_address, route_limit_sign, route_limit, route_limit_percent) + cmds.append(cmd) + elif route_limit_type == "idleForever": + cmd = "peer %s %s %s %s idle-forever" % (remote_address, route_limit_sign, route_limit, route_limit_percent) + cmds.append(cmd) + elif route_limit_type == "idleTimeout": + cmd = "peer %s %s %s %s idle-timeout" % (remote_address, route_limit_sign, route_limit, route_limit_percent) + cmds.append(cmd) + + route_limit_idle_timeout = module.params['route_limit_idle_timeout'] + if route_limit_idle_timeout: + conf_str += "%s" % route_limit_idle_timeout + + cmd = "peer %s %s %s %s idle-timeout %s" % (remote_address, route_limit_sign, route_limit, route_limit_percent, route_limit_idle_timeout) + cmds.append(cmd) + + rt_updt_interval = module.params['rt_updt_interval'] + if rt_updt_interval: + conf_str += "%s" % rt_updt_interval + + cmd = "peer %s route-update-interval %s" % (remote_address, rt_updt_interval) + cmds.append(cmd) + + redirect_ip = module.params['redirect_ip'] + if redirect_ip != 'no_use': + conf_str += "%s" % redirect_ip + + redirect_ip_validation = module.params['redirect_ip_validation'] + if redirect_ip_validation != 'no_use': + conf_str += "%s" % redirect_ip_validation + + reflect_client = module.params['reflect_client'] + if reflect_client != 'no_use': + conf_str += "%s" % reflect_client + + if reflect_client == "true": + cmd = "peer %s reflect-client" % remote_address + else: + cmd = "undo peer %s reflect-client" % remote_address + cmds.append(cmd) + + substitute_as_enable = module.params['substitute_as_enable'] + if substitute_as_enable != 'no_use': + conf_str += "%s" % substitute_as_enable + + if substitute_as_enable == "true": + cmd = "peer %s substitute-as" % remote_address + else: + cmd = "undo peer %s substitute-as" % remote_address + cmds.append(cmd) + + import_rt_policy_name = module.params['import_rt_policy_name'] + if import_rt_policy_name: + conf_str += "%s" % import_rt_policy_name + + cmd = "peer %s route-policy %s import" % (remote_address, import_rt_policy_name) + cmds.append(cmd) + + export_rt_policy_name = module.params['export_rt_policy_name'] + if export_rt_policy_name: + conf_str += "%s" % export_rt_policy_name + + cmd = "peer %s route-policy %s export" % (remote_address, export_rt_policy_name) + cmds.append(cmd) + + import_pref_filt_name = module.params['import_pref_filt_name'] + if import_pref_filt_name: + conf_str += "%s" % import_pref_filt_name + + cmd = "peer %s ip-prefix %s import" % (remote_address, import_pref_filt_name) + cmds.append(cmd) + + export_pref_filt_name = module.params['export_pref_filt_name'] + if export_pref_filt_name: + conf_str += "%s" % export_pref_filt_name + + cmd = "peer %s ip-prefix %s export" % (remote_address, export_pref_filt_name) + cmds.append(cmd) + + import_as_path_filter = module.params['import_as_path_filter'] + if import_as_path_filter: + conf_str += "%s" % import_as_path_filter + + cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_filter) + cmds.append(cmd) + + export_as_path_filter = module.params['export_as_path_filter'] + if export_as_path_filter: + conf_str += "%s" % export_as_path_filter + + cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_filter) + cmds.append(cmd) + + import_as_path_name_or_num = module.params[ + 'import_as_path_name_or_num'] + if import_as_path_name_or_num: + conf_str += "%s" % import_as_path_name_or_num + + cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_name_or_num) + cmds.append(cmd) + + export_as_path_name_or_num = module.params[ + 'export_as_path_name_or_num'] + if export_as_path_name_or_num: + conf_str += "%s" % export_as_path_name_or_num + + cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_name_or_num) + cmds.append(cmd) + + import_acl_name_or_num = module.params['import_acl_name_or_num'] + if import_acl_name_or_num: + conf_str += "%s" % import_acl_name_or_num + if import_acl_name_or_num.isdigit(): + cmd = "peer %s filter-policy %s import" % (remote_address, import_acl_name_or_num) + else: + cmd = "peer %s filter-policy acl-name %s import" % (remote_address, import_acl_name_or_num) + cmds.append(cmd) + + export_acl_name_or_num = module.params['export_acl_name_or_num'] + if export_acl_name_or_num: + conf_str += "%s" % export_acl_name_or_num + if export_acl_name_or_num.isdigit(): + cmd = "peer %s filter-policy %s export" % (remote_address, export_acl_name_or_num) + else: + cmd = "peer %s filter-policy acl-name %s export" % (remote_address, export_acl_name_or_num) + cmds.append(cmd) + + ipprefix_orf_enable = module.params['ipprefix_orf_enable'] + if ipprefix_orf_enable != 'no_use': + conf_str += "%s" % ipprefix_orf_enable + + if ipprefix_orf_enable == "true": + cmd = "peer %s capability-advertise orf ip-prefix" % remote_address + else: + cmd = "undo peer %s capability-advertise orf ip-prefix" % remote_address + cmds.append(cmd) + + is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod'] + if is_nonstd_ipprefix_mod != 'no_use': + conf_str += "%s" % is_nonstd_ipprefix_mod + + if is_nonstd_ipprefix_mod == "true": + if ipprefix_orf_enable == "true": + cmd = "peer %s capability-advertise orf non-standard-compatible" % remote_address + else: + cmd = "undo peer %s capability-advertise orf non-standard-compatible" % remote_address + cmds.append(cmd) + else: + if ipprefix_orf_enable == "true": + cmd = "peer %s capability-advertise orf" % remote_address + else: + cmd = "undo peer %s capability-advertise orf" % remote_address + cmds.append(cmd) + + orftype = module.params['orftype'] + if orftype: + conf_str += "%s" % orftype + + orf_mode = module.params['orf_mode'] + if orf_mode: + conf_str += "%s" % orf_mode + + if ipprefix_orf_enable == "true": + cmd = "peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode) + else: + cmd = "undo peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode) + cmds.append(cmd) + + soostring = module.params['soostring'] + if soostring: + conf_str += "%s" % soostring + + cmd = "peer %s soo %s" % (remote_address, soostring) + cmds.append(cmd) + + cmd = "" + default_rt_adv_enable = module.params['default_rt_adv_enable'] + if default_rt_adv_enable != 'no_use': + conf_str += "%s" % default_rt_adv_enable + + if default_rt_adv_enable == "true": + cmd += "peer %s default-route-advertise" % remote_address + else: + cmd += "undo peer %s default-route-advertise" % remote_address + + default_rt_adv_policy = module.params['default_rt_adv_policy'] + if default_rt_adv_policy: + conf_str += "%s" % default_rt_adv_policy + cmd += " route-policy %s" % default_rt_adv_policy + + default_rt_match_mode = module.params['default_rt_match_mode'] + if default_rt_match_mode: + conf_str += "%s" % default_rt_match_mode + + if default_rt_match_mode == "matchall": + cmd += " conditional-route-match-all" + elif default_rt_match_mode == "matchany": + cmd += " conditional-route-match-any" + + if cmd: + cmds.append(cmd) + + add_path_mode = module.params['add_path_mode'] + if add_path_mode: + conf_str += "%s" % add_path_mode + if add_path_mode == "receive": + cmd = "peer %s capability-advertise add-path receive" % remote_address + elif add_path_mode == "send": + cmd = "peer %s capability-advertise add-path send" % remote_address + elif add_path_mode == "both": + cmd = "peer %s capability-advertise add-path both" % remote_address + cmds.append(cmd) + + adv_add_path_num = module.params['adv_add_path_num'] + if adv_add_path_num: + conf_str += "%s" % adv_add_path_num + cmd = "peer %s advertise add-path path-number %s" % (remote_address, adv_add_path_num) + cmds.append(cmd) + origin_as_valid = module.params['origin_as_valid'] + if origin_as_valid != 'no_use': + conf_str += "%s" % origin_as_valid + + vpls_enable = module.params['vpls_enable'] + if vpls_enable != 'no_use': + conf_str += "%s" % vpls_enable + + vpls_ad_disable = module.params['vpls_ad_disable'] + if vpls_ad_disable != 'no_use': + conf_str += "%s" % vpls_ad_disable + + update_pkt_standard_compatible = module.params[ + 'update_pkt_standard_compatible'] + if update_pkt_standard_compatible != 'no_use': + conf_str += "%s" % update_pkt_standard_compatible + + conf_str += CE_MERGE_BGP_PEER_AF_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge bgp peer address family other failed.') + + return cmds + + +def main(): + """ main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + vrf_name=dict(type='str', required=True), + af_type=dict(choices=['ipv4uni', 'ipv4multi', 'ipv4vpn', + 'ipv6uni', 'ipv6vpn', 'evpn'], required=True), + remote_address=dict(type='str', required=True), + advertise_irb=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + advertise_arp=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + advertise_remote_nexthop=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + advertise_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + advertise_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + discard_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + allow_as_loop_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + allow_as_loop_limit=dict(type='str'), + keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + nexthop_configure=dict(choices=['null', 'local', 'invariable']), + preferred_value=dict(type='str'), + public_as_only=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + public_as_only_force=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + public_as_only_limited=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + public_as_only_replace=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + public_as_only_skip_peer_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + route_limit=dict(type='str'), + route_limit_percent=dict(type='str'), + route_limit_type=dict( + choices=['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']), + route_limit_idle_timeout=dict(type='str'), + rt_updt_interval=dict(type='str'), + redirect_ip=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + redirect_ip_validation=dict( + type='str', default='no_use', + choices=['no_use', 'true', 'false'], aliases=['redirect_ip_vaildation']), + reflect_client=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + substitute_as_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + import_rt_policy_name=dict(type='str'), + export_rt_policy_name=dict(type='str'), + import_pref_filt_name=dict(type='str'), + export_pref_filt_name=dict(type='str'), + import_as_path_filter=dict(type='str'), + export_as_path_filter=dict(type='str'), + import_as_path_name_or_num=dict(type='str'), + export_as_path_name_or_num=dict(type='str'), + import_acl_name_or_num=dict(type='str'), + export_acl_name_or_num=dict(type='str'), + ipprefix_orf_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + is_nonstd_ipprefix_mod=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + orftype=dict(type='str'), + orf_mode=dict(choices=['null', 'receive', 'send', 'both']), + soostring=dict(type='str'), + default_rt_adv_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + default_rt_adv_policy=dict(type='str'), + default_rt_match_mode=dict(choices=['null', 'matchall', 'matchany']), + add_path_mode=dict(choices=['null', 'receive', 'send', 'both']), + adv_add_path_num=dict(type='str'), + origin_as_valid=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + vpls_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + vpls_ad_disable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + update_pkt_standard_compatible=dict(type='str', default='no_use', choices=['no_use', 'true', 'false'])) + + argument_spec.update(ce_argument_spec) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + state = module.params['state'] + vrf_name = module.params['vrf_name'] + af_type = module.params['af_type'] + remote_address = module.params['remote_address'] + advertise_irb = module.params['advertise_irb'] + advertise_arp = module.params['advertise_arp'] + advertise_remote_nexthop = module.params['advertise_remote_nexthop'] + advertise_community = module.params['advertise_community'] + advertise_ext_community = module.params['advertise_ext_community'] + discard_ext_community = module.params['discard_ext_community'] + allow_as_loop_enable = module.params['allow_as_loop_enable'] + allow_as_loop_limit = module.params['allow_as_loop_limit'] + keep_all_routes = module.params['keep_all_routes'] + nexthop_configure = module.params['nexthop_configure'] + preferred_value = module.params['preferred_value'] + public_as_only = module.params['public_as_only'] + public_as_only_force = module.params['public_as_only_force'] + public_as_only_limited = module.params['public_as_only_limited'] + public_as_only_replace = module.params['public_as_only_replace'] + public_as_only_skip_peer_as = module.params['public_as_only_skip_peer_as'] + route_limit = module.params['route_limit'] + route_limit_percent = module.params['route_limit_percent'] + route_limit_type = module.params['route_limit_type'] + route_limit_idle_timeout = module.params['route_limit_idle_timeout'] + rt_updt_interval = module.params['rt_updt_interval'] + redirect_ip = module.params['redirect_ip'] + redirect_ip_validation = module.params['redirect_ip_validation'] + reflect_client = module.params['reflect_client'] + substitute_as_enable = module.params['substitute_as_enable'] + import_rt_policy_name = module.params['import_rt_policy_name'] + export_rt_policy_name = module.params['export_rt_policy_name'] + import_pref_filt_name = module.params['import_pref_filt_name'] + export_pref_filt_name = module.params['export_pref_filt_name'] + import_as_path_filter = module.params['import_as_path_filter'] + export_as_path_filter = module.params['export_as_path_filter'] + import_as_path_name_or_num = module.params['import_as_path_name_or_num'] + export_as_path_name_or_num = module.params['export_as_path_name_or_num'] + import_acl_name_or_num = module.params['import_acl_name_or_num'] + export_acl_name_or_num = module.params['export_acl_name_or_num'] + ipprefix_orf_enable = module.params['ipprefix_orf_enable'] + is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod'] + orftype = module.params['orftype'] + orf_mode = module.params['orf_mode'] + soostring = module.params['soostring'] + default_rt_adv_enable = module.params['default_rt_adv_enable'] + default_rt_adv_policy = module.params['default_rt_adv_policy'] + default_rt_match_mode = module.params['default_rt_match_mode'] + add_path_mode = module.params['add_path_mode'] + adv_add_path_num = module.params['adv_add_path_num'] + origin_as_valid = module.params['origin_as_valid'] + vpls_enable = module.params['vpls_enable'] + vpls_ad_disable = module.params['vpls_ad_disable'] + update_pkt_standard_compatible = module.params[ + 'update_pkt_standard_compatible'] + + ce_bgp_peer_af_obj = BgpNeighborAf() + + # get proposed + proposed["state"] = state + if vrf_name: + proposed["vrf_name"] = vrf_name + if af_type: + proposed["af_type"] = af_type + if remote_address: + proposed["remote_address"] = remote_address + if advertise_irb != 'no_use': + proposed["advertise_irb"] = advertise_irb + if advertise_arp != 'no_use': + proposed["advertise_arp"] = advertise_arp + if advertise_remote_nexthop != 'no_use': + proposed["advertise_remote_nexthop"] = advertise_remote_nexthop + if advertise_community != 'no_use': + proposed["advertise_community"] = advertise_community + if advertise_ext_community != 'no_use': + proposed["advertise_ext_community"] = advertise_ext_community + if discard_ext_community != 'no_use': + proposed["discard_ext_community"] = discard_ext_community + if allow_as_loop_enable != 'no_use': + proposed["allow_as_loop_enable"] = allow_as_loop_enable + if allow_as_loop_limit: + proposed["allow_as_loop_limit"] = allow_as_loop_limit + if keep_all_routes != 'no_use': + proposed["keep_all_routes"] = keep_all_routes + if nexthop_configure: + proposed["nexthop_configure"] = nexthop_configure + if preferred_value: + proposed["preferred_value"] = preferred_value + if public_as_only != 'no_use': + proposed["public_as_only"] = public_as_only + if public_as_only_force != 'no_use': + proposed["public_as_only_force"] = public_as_only_force + if public_as_only_limited != 'no_use': + proposed["public_as_only_limited"] = public_as_only_limited + if public_as_only_replace != 'no_use': + proposed["public_as_only_replace"] = public_as_only_replace + if public_as_only_skip_peer_as != 'no_use': + proposed["public_as_only_skip_peer_as"] = public_as_only_skip_peer_as + if route_limit: + proposed["route_limit"] = route_limit + if route_limit_percent: + proposed["route_limit_percent"] = route_limit_percent + if route_limit_type: + proposed["route_limit_type"] = route_limit_type + if route_limit_idle_timeout: + proposed["route_limit_idle_timeout"] = route_limit_idle_timeout + if rt_updt_interval: + proposed["rt_updt_interval"] = rt_updt_interval + if redirect_ip != 'no_use': + proposed["redirect_ip"] = redirect_ip + if redirect_ip_validation != 'no_use': + proposed["redirect_ip_validation"] = redirect_ip_validation + if reflect_client != 'no_use': + proposed["reflect_client"] = reflect_client + if substitute_as_enable != 'no_use': + proposed["substitute_as_enable"] = substitute_as_enable + if import_rt_policy_name: + proposed["import_rt_policy_name"] = import_rt_policy_name + if export_rt_policy_name: + proposed["export_rt_policy_name"] = export_rt_policy_name + if import_pref_filt_name: + proposed["import_pref_filt_name"] = import_pref_filt_name + if export_pref_filt_name: + proposed["export_pref_filt_name"] = export_pref_filt_name + if import_as_path_filter: + proposed["import_as_path_filter"] = import_as_path_filter + if export_as_path_filter: + proposed["export_as_path_filter"] = export_as_path_filter + if import_as_path_name_or_num: + proposed["import_as_path_name_or_num"] = import_as_path_name_or_num + if export_as_path_name_or_num: + proposed["export_as_path_name_or_num"] = export_as_path_name_or_num + if import_acl_name_or_num: + proposed["import_acl_name_or_num"] = import_acl_name_or_num + if export_acl_name_or_num: + proposed["export_acl_name_or_num"] = export_acl_name_or_num + if ipprefix_orf_enable != 'no_use': + proposed["ipprefix_orf_enable"] = ipprefix_orf_enable + if is_nonstd_ipprefix_mod != 'no_use': + proposed["is_nonstd_ipprefix_mod"] = is_nonstd_ipprefix_mod + if orftype: + proposed["orftype"] = orftype + if orf_mode: + proposed["orf_mode"] = orf_mode + if soostring: + proposed["soostring"] = soostring + if default_rt_adv_enable != 'no_use': + proposed["default_rt_adv_enable"] = default_rt_adv_enable + if default_rt_adv_policy: + proposed["default_rt_adv_policy"] = default_rt_adv_policy + if default_rt_match_mode: + proposed["default_rt_match_mode"] = default_rt_match_mode + if add_path_mode: + proposed["add_path_mode"] = add_path_mode + if adv_add_path_num: + proposed["adv_add_path_num"] = adv_add_path_num + if origin_as_valid != 'no_use': + proposed["origin_as_valid"] = origin_as_valid + if vpls_enable != 'no_use': + proposed["vpls_enable"] = vpls_enable + if vpls_ad_disable != 'no_use': + proposed["vpls_ad_disable"] = vpls_ad_disable + if update_pkt_standard_compatible != 'no_use': + proposed["update_pkt_standard_compatible"] = update_pkt_standard_compatible + + if not ce_bgp_peer_af_obj: + module.fail_json(msg='Error: Init module failed.') + + bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args( + module=module) + bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other( + module=module) + + # state exist bgp peer address family config + exist_tmp = dict() + for item in bgp_peer_af_rst: + if item != "need_cfg": + exist_tmp[item] = bgp_peer_af_rst[item] + if exist_tmp: + existing["bgp neighbor af"] = exist_tmp + # state exist bgp peer address family other config + exist_tmp = dict() + for item in bgp_peer_af_other_rst: + if item != "need_cfg": + exist_tmp[item] = bgp_peer_af_other_rst[item] + if exist_tmp: + existing["bgp neighbor af other"] = exist_tmp + + if state == "present": + if bgp_peer_af_rst["need_cfg"]: + if "remote_address" in bgp_peer_af_rst.keys(): + cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af(module=module) + changed = True + for item in cmd: + updates.append(item) + else: + cmd = ce_bgp_peer_af_obj.create_bgp_peer_af(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_peer_af_other_rst["need_cfg"]: + cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af_other(module=module) + changed = True + for item in cmd: + updates.append(item) + + else: + if bgp_peer_af_rst["need_cfg"]: + cmd = ce_bgp_peer_af_obj.delete_bgp_peer_af(module=module) + changed = True + for item in cmd: + updates.append(item) + + if bgp_peer_af_other_rst["need_cfg"]: + pass + + # state end bgp peer address family config + bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args( + module=module) + end_tmp = dict() + for item in bgp_peer_af_rst: + if item != "need_cfg": + end_tmp[item] = bgp_peer_af_rst[item] + if end_tmp: + end_state["bgp neighbor af"] = end_tmp + # state end bgp peer address family other config + bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other( + module=module) + end_tmp = dict() + for item in bgp_peer_af_other_rst: + if item != "need_cfg": + end_tmp[item] = bgp_peer_af_other_rst[item] + if end_tmp: + end_state["bgp neighbor af other"] = end_tmp + if end_state == existing: + changed = False + updates = list() + + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_command.py b/plugins/modules/network/cloudengine/ce_command.py new file mode 100644 index 0000000000..c7aaf16919 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_command.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- + +module: ce_command +author: "JackyGao2016 (@CloudEngine-Ansible)" +short_description: Run arbitrary command on HUAWEI CloudEngine devices. +description: + - Sends an arbitrary command to an HUAWEI CloudEngine node and returns + the results read from the device. The ce_command module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + commands: + description: + - The commands to send to the remote HUAWEI CloudEngine device + over the configured provider. The resulting output from the + command is returned. If the I(wait_for) argument is provided, + the module is not returned until the condition is satisfied + or the number of I(retries) has been exceeded. + required: true + wait_for: + description: + - Specifies what to evaluate from the output of the command + and what conditionals to apply. This argument will cause + the task to wait for a particular conditional to be true + before moving forward. If the conditional is not true + by the configured retries, the task fails. See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the I(wait_for) must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the I(wait_for) + conditionals. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditional, the interval indicates how to long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. + +- name: CloudEngine command test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: "Run display version on remote devices" + ce_command: + commands: display version + provider: "{{ cli }}" + + - name: "Run display version and check to see if output contains HUAWEI" + ce_command: + commands: display version + wait_for: result[0] contains HUAWEI + provider: "{{ cli }}" + + - name: "Run multiple commands on remote nodes" + ce_command: + commands: + - display version + - display device + provider: "{{ cli }}" + + - name: "Run multiple commands and evaluate the output" + ce_command: + commands: + - display version + - display device + wait_for: + - result[0] contains HUAWEI + - result[1] contains Device + provider: "{{ cli }}" +""" + +RETURN = """ +stdout: + description: the set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: the conditionals that failed + returned: failed + type: list + sample: ['...', '...'] +""" + + +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, check_args +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import run_commands +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native + + +def to_lines(stdout): + lines = list() + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + lines.append(item) + return lines + + +def parse_commands(module, warnings): + transform = ComplexList(dict( + command=dict(key=True), + output=dict(), + prompt=dict(), + answer=dict() + ), module) + + commands = transform(module.params['commands']) + + for _, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('dis'): + warnings.append( + 'Only display commands are supported when using check_mode, not ' + 'executing %s' % item['command'] + ) + + return commands + + +def to_cli(obj): + cmd = obj['command'] + return cmd + + +def main(): + """entry point for module execution + """ + argument_spec = dict( + # { command: , output: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['any', 'all']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(ce_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + + try: + conditionals = [Conditional(c) for c in wait_for] + except AttributeError as exc: + module.fail_json(msg=to_native(exc), exception=traceback.format_exc()) + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'stdout': responses, + 'stdout_lines': to_lines(responses) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_config.py b/plugins/modules/network/cloudengine/ce_config.py new file mode 100644 index 0000000000..d4b3ab9652 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_config.py @@ -0,0 +1,496 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_config +author: "QijunPan (@QijunPan)" +short_description: Manage Huawei CloudEngine configuration sections. +description: + - Huawei CloudEngine configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with CloudEngine configuration sections in + a deterministic way. This module works with CLI transports. +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device current-configuration. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - The I(src) argument provides a path to the configuration file + to load into the remote system. The path can either be a full + system path to the configuration file if the value starts with / + or relative to the root of the implemented role or playbook. + This argument is mutually exclusive with the I(lines) and + I(parents) arguments. + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the current-configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(current-configuration) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current current-configuration to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current-configuration for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + defaults: + description: + - The I(defaults) argument will influence how the current-configuration + is collected from the device. When the value is set to true, + the command used to collect the current-configuration is append with + the all keyword. When the value is set to false, the command + is issued without the all keyword. + type: bool + default: 'no' + save: + description: + - The C(save) argument instructs the module to save the + current-configuration to saved-configuration. This operation is performed + after any changes are made to the current running config. If + no changes are made, the configuration is still saved to the + startup config. This option will always cause the module to + return changed. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. + +- name: CloudEngine config test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: "Configure top level configuration and save it" + ce_config: + lines: sysname {{ inventory_hostname }} + save: yes + provider: "{{ cli }}" + + - name: "Configure acl configuration and save it" + ce_config: + lines: + - rule 10 permit source 1.1.1.1 32 + - rule 20 permit source 2.2.2.2 32 + - rule 30 permit source 3.3.3.3 32 + - rule 40 permit source 4.4.4.4 32 + - rule 50 permit source 5.5.5.5 32 + parents: acl 2000 + before: undo acl 2000 + match: exact + provider: "{{ cli }}" + + - name: "Configure acl configuration and save it" + ce_config: + lines: + - rule 10 permit source 1.1.1.1 32 + - rule 20 permit source 2.2.2.2 32 + - rule 30 permit source 3.3.3.3 32 + - rule 40 permit source 4.4.4.4 32 + parents: acl 2000 + before: undo acl 2000 + replace: block + provider: "{{ cli }}" + + - name: configurable backup path + ce_config: + lines: sysname {{ inventory_hostname }} + provider: "{{ cli }}" + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: Only when lines is specified. + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/ce_config.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import ConnectionError, Connection +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig as _NetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import dumps, ConfigLine, ignore_line +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_config, run_commands, exec_command, cli_err_msg +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import check_args as ce_check_args +import re + + +def check_args(module, warnings): + ce_check_args(module, warnings) + + +def not_user_view(prompt): + return prompt is not None and prompt.strip().startswith("[") + + +def command_level(command): + regex_level = re.search(r"^(\s*)\S+", command) + if regex_level is not None: + level = str(regex_level.group(1)) + return len(level) + return 0 + + +def _load_config(module, config): + """Sends configuration commands to the remote device + """ + connection = Connection(module._socket_path) + rc, out, err = exec_command(module, 'mmi-mode enable') + if rc != 0: + module.fail_json(msg='unable to set mmi-mode enable', output=err) + rc, out, err = exec_command(module, 'system-view immediately') + if rc != 0: + module.fail_json(msg='unable to enter system-view', output=err) + current_view_prompt = system_view_prompt = connection.get_prompt() + + for index, cmd in enumerate(config): + level = command_level(cmd) + current_view_prompt = connection.get_prompt() + rc, out, err = exec_command(module, cmd) + if rc != 0: + print_msg = cli_err_msg(cmd.strip(), err) + # re-try command max 3 times + for i in (1, 2, 3): + current_view_prompt = connection.get_prompt() + if current_view_prompt != system_view_prompt and not_user_view(current_view_prompt): + exec_command(module, "quit") + current_view_prompt = connection.get_prompt() + # if current view is system-view, break. + if current_view_prompt == system_view_prompt and level > 0: + break + elif current_view_prompt == system_view_prompt or not not_user_view(current_view_prompt): + break + rc, out, err = exec_command(module, cmd) + if rc == 0: + print_msg = None + break + if print_msg is not None: + module.fail_json(msg=print_msg) + + +def get_running_config(module): + contents = module.params['config'] + if not contents: + command = "display current-configuration " + if module.params['defaults']: + command += 'include-default' + resp = run_commands(module, command) + contents = resp[0] + return NetworkConfig(indent=1, contents=contents) + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + config = module.params['src'] + candidate.load(config) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + + candidate = get_candidate(module) + + if match != 'none': + before = get_running_config(module) + path = module.params['parents'] + configobjs = candidate.difference(before, match=match, replace=replace, path=path) + else: + configobjs = candidate.items + + if configobjs: + out_type = "commands" + if module.params["src"] is not None: + out_type = "raw" + commands = dumps(configobjs, out_type).split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + command_display = [] + for per_command in commands: + if per_command.strip() not in ['quit', 'return', 'system-view']: + command_display.append(per_command) + + result['commands'] = command_display + result['updates'] = command_display + + if not module.check_mode: + if module.params['parents'] is not None: + load_config(module, commands) + else: + _load_config(module, commands) + if match != "none": + after = get_running_config(module) + path = module.params["parents"] + if path is not None and match != 'line': + before_objs = before.get_block(path) + after_objs = after.get_block(path) + update = [] + if len(before_objs) == len(after_objs): + for b_item, a_item in zip(before_objs, after_objs): + if b_item != a_item: + update.append(a_item.text) + else: + update = [item.text for item in after_objs] + if len(update) == 0: + result["changed"] = False + result['updates'] = [] + else: + result["changed"] = True + result['updates'] = update + else: + configobjs = after.difference(before, match=match, replace=replace, path=path) + if len(configobjs) > 0: + result["changed"] = True + else: + result["changed"] = False + result['updates'] = [] + else: + result['changed'] = True + + +class NetworkConfig(_NetworkConfig): + + def add(self, lines, parents=None): + ancestors = list() + offset = 0 + obj = None + + # global config command + if not parents: + for line in lines: + # handle ignore lines + if ignore_line(line): + continue + + item = ConfigLine(line) + item.raw = line + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_block(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self._indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj._parents = list(ancestors) + ancestors[-1]._children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in lines: + # handle ignore lines + if ignore_line(line): + continue + + # check if child already exists + for child in ancestors[-1]._children: + if child.text == line: + break + else: + offset = len(parents) * self._indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item._parents = ancestors + ancestors[-1]._children.append(item) + self.items.append(item) + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + config=dict(), + defaults=dict(type='bool', default=False), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + save=dict(type='bool', default=False), + ) + + argument_spec.update(ce_argument_spec) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + result = dict(changed=False, warnings=warnings) + + if module.params['backup']: + result['__backup__'] = get_config(module) + + if any((module.params['src'], module.params['lines'])): + run(module, result) + + if module.params['save']: + if not module.check_mode: + run_commands(module, ['return', 'mmi-mode enable', 'save']) + result["changed"] = True + run_commands(module, ['return', 'undo mmi-mode enable']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_dldp.py b/plugins/modules/network/cloudengine/ce_dldp.py new file mode 100644 index 0000000000..926e9e50ba --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_dldp.py @@ -0,0 +1,553 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: ce_dldp +short_description: Manages global DLDP configuration on HUAWEI CloudEngine switches. +description: + - Manages global DLDP configuration on HUAWEI CloudEngine switches. +author: + - Zhijin Zhou (@QijunPan) +notes: + - The relevant configurations will be deleted if DLDP is disabled using enable=disable. + - When using auth_mode=none, it will restore the default DLDP authentication mode. By default, + DLDP packets are not authenticated. + - By default, the working mode of DLDP is enhance, so you are advised to use work_mode=enhance to restore default + DLDP working mode. + - The default interval for sending Advertisement packets is 5 seconds, so you are advised to use time_interval=5 to + restore default DLDP interval. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + enable: + description: + - Set global DLDP enable state. + choices: ['enable', 'disable'] + work_mode: + description: + - Set global DLDP work-mode. + choices: ['enhance', 'normal'] + time_internal: + description: + - Specifies the interval for sending Advertisement packets. + The value is an integer ranging from 1 to 100, in seconds. + The default interval for sending Advertisement packets is 5 seconds. + auth_mode: + description: + - Specifies authentication algorithm of DLDP. + choices: ['md5', 'simple', 'sha', 'hmac-sha256', 'none'] + auth_pwd: + description: + - Specifies authentication password. + The value is a string of 1 to 16 case-sensitive plaintexts or 24/32/48/108/128 case-sensitive encrypted + characters. The string excludes a question mark (?). + reset: + description: + - Specify whether reset DLDP state of disabled interfaces. + choices: ['enable', 'disable'] +''' + +EXAMPLES = ''' +- name: DLDP test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Configure global DLDP enable state" + ce_dldp: + enable: enable + provider: "{{ cli }}" + + - name: "Configure DLDP work-mode and ensure global DLDP state is already enabled" + ce_dldp: + enable: enable + work_mode: normal + provider: "{{ cli }}" + + - name: "Configure advertisement message time interval in seconds and ensure global DLDP state is already enabled" + ce_dldp: + enable: enable + time_interval: 6 + provider: "{{ cli }}" + + - name: "Configure a DLDP authentication mode and ensure global DLDP state is already enabled" + ce_dldp: + enable: enable + auth_mode: md5 + auth_pwd: abc + provider: "{{ cli }}" + + - name: "Reset DLDP state of disabled interfaces and ensure global DLDP state is already enabled" + ce_dldp: + enable: enable + reset: enable + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "enable": "enable", + "reset": "enable", + "time_internal": "12", + "work_mode": "normal" + } +existing: + description: k/v pairs of existing global DLDP configuration + returned: always + type: dict + sample: { + "enable": "disable", + "reset": "disable", + "time_internal": "5", + "work_mode": "enhance" + } +end_state: + description: k/v pairs of global DLDP configuration after module execution + returned: always + type: dict + sample: { + "enable": "enable", + "reset": "enable", + "time_internal": "12", + "work_mode": "normal" + } +updates: + description: command sent to the device + returned: always + type: list + sample: [ + "dldp enable", + "dldp work-mode normal", + "dldp interval 12", + "dldp reset" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import copy +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, set_nc_config, get_nc_config, execute_nc_action + +CE_NC_ACTION_RESET_DLDP = """ + + + + + +""" + +CE_NC_GET_GLOBAL_DLDP_CONFIG = """ + + + + + + + + + + +""" + +CE_NC_MERGE_DLDP_GLOBAL_CONFIG_HEAD = """ + + + + %s + %s + %s +""" + +CE_NC_MERGE_DLDP_GLOBAL_CONFIG_TAIL = """ + + + +""" + + +class Dldp(object): + """Manage global dldp configuration""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # DLDP global configuration info + self.enable = self.module.params['enable'] or None + self.work_mode = self.module.params['work_mode'] or None + self.internal = self.module.params['time_interval'] or None + self.reset = self.module.params['reset'] or None + self.auth_mode = self.module.params['auth_mode'] + self.auth_pwd = self.module.params['auth_pwd'] + + self.dldp_conf = dict() + self.same_conf = False + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = list() + self.end_state = list() + + def check_config_if_same(self): + """Judge whether current config is the same as what we excepted""" + + if self.enable and self.enable != self.dldp_conf['dldpEnable']: + return False + + if self.internal and self.internal != self.dldp_conf['dldpInterval']: + return False + + work_mode = 'normal' + if self.dldp_conf['dldpWorkMode'] == 'dldpEnhance': + work_mode = 'enhance' + if self.work_mode and self.work_mode != work_mode: + return False + + if self.auth_mode: + if self.auth_mode != 'none': + return False + + if self.auth_mode == 'none' and self.dldp_conf['dldpAuthMode'] != 'dldpAuthNone': + return False + + if self.reset and self.reset == 'enable': + return False + + return True + + def check_params(self): + """Check all input params""" + + if (self.auth_mode and self.auth_mode != 'none' and not self.auth_pwd) \ + or (self.auth_pwd and not self.auth_mode): + self.module.fail_json(msg="Error: auth_mode and auth_pwd must both exist or not exist.") + + if self.dldp_conf['dldpEnable'] == 'disable' and not self.enable: + if self.work_mode or self.reset or self.internal or self.auth_mode: + self.module.fail_json(msg="Error: when DLDP is already disabled globally, " + "work_mode, time_internal auth_mode and reset parameters are not " + "expected to configure.") + + if self.enable == 'disable' and (self.work_mode or self.internal or self.reset or self.auth_mode): + self.module.fail_json(msg="Error: when using enable=disable, work_mode, " + "time_internal auth_mode and reset parameters are not expected " + "to configure.") + + if self.internal: + if not self.internal.isdigit(): + self.module.fail_json( + msg='Error: time_interval must be digit.') + + if int(self.internal) < 1 or int(self.internal) > 100: + self.module.fail_json( + msg='Error: The value of time_internal should be between 1 and 100.') + + if self.auth_pwd: + if '?' in self.auth_pwd: + self.module.fail_json( + msg='Error: The auth_pwd string excludes a question mark (?).') + if (len(self.auth_pwd) != 24) and (len(self.auth_pwd) != 32) and (len(self.auth_pwd) != 48) and \ + (len(self.auth_pwd) != 108) and (len(self.auth_pwd) != 128): + if (len(self.auth_pwd) < 1) or (len(self.auth_pwd) > 16): + self.module.fail_json( + msg='Error: The value is a string of 1 to 16 case-sensitive plaintexts or 24/32/48/108/128 ' + 'case-sensitive encrypted characters.') + + def init_module(self): + """Init module object""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_dldp_exist_config(self): + """Get current dldp existed configuration""" + + dldp_conf = dict() + xml_str = CE_NC_GET_GLOBAL_DLDP_CONFIG + con_obj = get_nc_config(self.module, xml_str) + if "" in con_obj: + return dldp_conf + + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get global DLDP info + root = ElementTree.fromstring(xml_str) + topo = root.find("dldp/dldpSys") + if not topo: + self.module.fail_json( + msg="Error: Get current DLDP configuration failed.") + + for eles in topo: + if eles.tag in ["dldpEnable", "dldpInterval", "dldpWorkMode", "dldpAuthMode"]: + if eles.tag == 'dldpEnable': + if eles.text == 'true': + value = 'enable' + else: + value = 'disable' + else: + value = eles.text + dldp_conf[eles.tag] = value + + return dldp_conf + + def config_global_dldp(self): + """Config global dldp""" + + if self.same_conf: + return + + enable = self.enable + if not self.enable: + enable = self.dldp_conf['dldpEnable'] + if enable == 'enable': + enable = 'true' + else: + enable = 'false' + + internal = self.internal + if not self.internal: + internal = self.dldp_conf['dldpInterval'] + + work_mode = self.work_mode + if not self.work_mode: + work_mode = self.dldp_conf['dldpWorkMode'] + + if work_mode == 'enhance' or work_mode == 'dldpEnhance': + work_mode = 'dldpEnhance' + else: + work_mode = 'dldpNormal' + + auth_mode = self.auth_mode + if not self.auth_mode: + auth_mode = self.dldp_conf['dldpAuthMode'] + if auth_mode == 'md5': + auth_mode = 'dldpAuthMD5' + elif auth_mode == 'simple': + auth_mode = 'dldpAuthSimple' + elif auth_mode == 'sha': + auth_mode = 'dldpAuthSHA' + elif auth_mode == 'hmac-sha256': + auth_mode = 'dldpAuthHMAC-SHA256' + elif auth_mode == 'none': + auth_mode = 'dldpAuthNone' + + xml_str = CE_NC_MERGE_DLDP_GLOBAL_CONFIG_HEAD % ( + enable, internal, work_mode) + if self.auth_mode: + if self.auth_mode == 'none': + xml_str += "dldpAuthNone" + else: + xml_str += "%s" % auth_mode + xml_str += "%s" % self.auth_pwd + + xml_str += CE_NC_MERGE_DLDP_GLOBAL_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "MERGE_DLDP_GLOBAL_CONFIG") + + if self.reset == 'enable': + xml_str = CE_NC_ACTION_RESET_DLDP + ret_xml = execute_nc_action(self.module, xml_str) + self.check_response(ret_xml, "ACTION_RESET_DLDP") + + self.changed = True + + def get_existing(self): + """Get existing info""" + + dldp_conf = dict() + + dldp_conf['enable'] = self.dldp_conf.get('dldpEnable', None) + dldp_conf['time_interval'] = self.dldp_conf.get('dldpInterval', None) + work_mode = self.dldp_conf.get('dldpWorkMode', None) + if work_mode == 'dldpEnhance': + dldp_conf['work_mode'] = 'enhance' + else: + dldp_conf['work_mode'] = 'normal' + + auth_mode = self.dldp_conf.get('dldpAuthMode', None) + if auth_mode == 'dldpAuthNone': + dldp_conf['auth_mode'] = 'none' + elif auth_mode == 'dldpAuthSimple': + dldp_conf['auth_mode'] = 'simple' + elif auth_mode == 'dldpAuthMD5': + dldp_conf['auth_mode'] = 'md5' + elif auth_mode == 'dldpAuthSHA': + dldp_conf['auth_mode'] = 'sha' + else: + dldp_conf['auth_mode'] = 'hmac-sha256' + + dldp_conf['reset'] = 'disable' + + self.existing = copy.deepcopy(dldp_conf) + + def get_proposed(self): + """Get proposed result""" + + self.proposed = dict(enable=self.enable, work_mode=self.work_mode, + time_interval=self.internal, reset=self.reset, + auth_mode=self.auth_mode, auth_pwd=self.auth_pwd) + + def get_update_cmd(self): + """Get update commands""" + if self.same_conf: + return + + if self.enable and self.enable != self.dldp_conf['dldpEnable']: + if self.enable == 'enable': + self.updates_cmd.append("dldp enable") + elif self.enable == 'disable': + self.updates_cmd.append("undo dldp enable") + return + + work_mode = 'normal' + if self.dldp_conf['dldpWorkMode'] == 'dldpEnhance': + work_mode = 'enhance' + if self.work_mode and self.work_mode != work_mode: + if self.work_mode == 'enhance': + self.updates_cmd.append("dldp work-mode enhance") + else: + self.updates_cmd.append("dldp work-mode normal") + + if self.internal and self.internal != self.dldp_conf['dldpInterval']: + self.updates_cmd.append("dldp interval %s" % self.internal) + + if self.auth_mode: + if self.auth_mode == 'none': + self.updates_cmd.append("undo dldp authentication-mode") + else: + self.updates_cmd.append("dldp authentication-mode %s %s" % (self.auth_mode, self.auth_pwd)) + + if self.reset and self.reset == 'enable': + self.updates_cmd.append('dldp reset') + + def get_end_state(self): + """Get end state info""" + + dldp_conf = dict() + self.dldp_conf = self.get_dldp_exist_config() + + dldp_conf['enable'] = self.dldp_conf.get('dldpEnable', None) + dldp_conf['time_interval'] = self.dldp_conf.get('dldpInterval', None) + work_mode = self.dldp_conf.get('dldpWorkMode', None) + if work_mode == 'dldpEnhance': + dldp_conf['work_mode'] = 'enhance' + else: + dldp_conf['work_mode'] = 'normal' + + auth_mode = self.dldp_conf.get('dldpAuthMode', None) + if auth_mode == 'dldpAuthNone': + dldp_conf['auth_mode'] = 'none' + elif auth_mode == 'dldpAuthSimple': + dldp_conf['auth_mode'] = 'simple' + elif auth_mode == 'dldpAuthMD5': + dldp_conf['auth_mode'] = 'md5' + elif auth_mode == 'dldpAuthSHA': + dldp_conf['auth_mode'] = 'sha' + else: + dldp_conf['auth_mode'] = 'hmac-sha256' + + dldp_conf['reset'] = 'disable' + if self.reset == 'enable': + dldp_conf['reset'] = 'enable' + self.end_state = copy.deepcopy(dldp_conf) + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def work(self): + """Worker""" + + self.dldp_conf = self.get_dldp_exist_config() + self.check_params() + self.same_conf = self.check_config_if_same() + self.get_existing() + self.get_proposed() + self.config_global_dldp() + self.get_update_cmd() + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + enable=dict(choices=['enable', 'disable'], type='str'), + work_mode=dict(choices=['enhance', 'normal'], type='str'), + time_interval=dict(type='str'), + reset=dict(choices=['enable', 'disable'], type='str'), + auth_mode=dict(choices=['md5', 'simple', 'sha', 'hmac-sha256', 'none'], type='str'), + auth_pwd=dict(type='str', no_log=True), + ) + argument_spec.update(ce_argument_spec) + dldp_obj = Dldp(argument_spec) + dldp_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_dldp_interface.py b/plugins/modules/network/cloudengine/ce_dldp_interface.py new file mode 100644 index 0000000000..3efdd21a3b --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_dldp_interface.py @@ -0,0 +1,662 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: ce_dldp_interface +short_description: Manages interface DLDP configuration on HUAWEI CloudEngine switches. +description: + - Manages interface DLDP configuration on HUAWEI CloudEngine switches. +author: + - Zhou Zhijin (@QijunPan) +notes: + - If C(state=present, enable=disable), interface DLDP enable will be turned off and + related interface DLDP configuration will be cleared. + - If C(state=absent), only local_mac is supported to configure. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - Must be fully qualified interface name, i.e. GE1/0/1, 10GE1/0/1, 40GE1/0/22, 100GE1/0/1. + required: true + enable: + description: + - Set interface DLDP enable state. + choices: ['enable', 'disable'] + mode_enable: + description: + - Set DLDP compatible-mode enable state. + choices: ['enable', 'disable'] + local_mac: + description: + - Set the source MAC address for DLDP packets sent in the DLDP-compatible mode. + The value of MAC address is in H-H-H format. H contains 1 to 4 hexadecimal digits. + reset: + description: + - Specify whether reseting interface DLDP state. + choices: ['enable', 'disable'] + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: DLDP interface test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Configure interface DLDP enable state and ensure global dldp enable is turned on" + ce_dldp_interface: + interface: 40GE2/0/1 + enable: enable + provider: "{{ cli }}" + + - name: "Configuire interface DLDP compatible-mode enable state and ensure interface DLDP state is already enabled" + ce_dldp_interface: + interface: 40GE2/0/1 + enable: enable + mode_enable: enable + provider: "{{ cli }}" + + - name: "Configuire the source MAC address for DLDP packets sent in the DLDP-compatible mode and + ensure interface DLDP state and compatible-mode enable state is already enabled" + ce_dldp_interface: + interface: 40GE2/0/1 + enable: enable + mode_enable: enable + local_mac: aa-aa-aa + provider: "{{ cli }}" + + - name: "Reset DLDP state of specified interface and ensure interface DLDP state is already enabled" + ce_dldp_interface: + interface: 40GE2/0/1 + enable: enable + reset: enable + provider: "{{ cli }}" + + - name: "Unconfigure interface DLDP local mac address when C(state=absent)" + ce_dldp_interface: + interface: 40GE2/0/1 + state: absent + local_mac: aa-aa-aa + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "enable": "enalbe", + "interface": "40GE2/0/22", + "local_mac": "aa-aa-aa", + "mode_enable": "enable", + "reset": "enable" + } +existing: + description: k/v pairs of existing interface DLDP configuration + returned: always + type: dict + sample: { + "enable": "disable", + "interface": "40GE2/0/22", + "local_mac": null, + "mode_enable": null, + "reset": "disable" + } +end_state: + description: k/v pairs of interface DLDP configuration after module execution + returned: always + type: dict + sample: { + "enable": "enable", + "interface": "40GE2/0/22", + "local_mac": "00aa-00aa-00aa", + "mode_enable": "enable", + "reset": "enable" + } +updates: + description: command sent to the device + returned: always + type: list + sample: [ + "dldp enable", + "dldp compatible-mode enable", + "dldp compatible-mode local-mac aa-aa-aa", + "dldp reset" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import copy +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, set_nc_config, get_nc_config, execute_nc_action + + +CE_NC_ACTION_RESET_INTF_DLDP = """ + + + + %s + + + +""" + +CE_NC_GET_INTF_DLDP_CONFIG = """ + + + + + %s + + + + + + + +""" + +CE_NC_MERGE_DLDP_INTF_CONFIG = """ + + + + + %s + %s + %s + %s + + + + +""" + +CE_NC_CREATE_DLDP_INTF_CONFIG = """ + + + + + %s + %s + %s + %s + + + + +""" + +CE_NC_DELETE_DLDP_INTF_CONFIG = """ + + + + + %s + + + + +""" + + +def judge_is_mac_same(mac1, mac2): + """Judge whether two macs are the same""" + + if mac1 == mac2: + return True + + list1 = re.findall(r'([0-9A-Fa-f]+)', mac1) + list2 = re.findall(r'([0-9A-Fa-f]+)', mac2) + if len(list1) != len(list2): + return False + + for index, value in enumerate(list1, start=0): + if value.lstrip('0').lower() != list2[index].lstrip('0').lower(): + return False + + return True + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-Port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class DldpInterface(object): + """Manage interface dldp configuration""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # DLDP interface configuration info + self.interface = self.module.params['interface'] + self.enable = self.module.params['enable'] or None + self.reset = self.module.params['reset'] or None + self.mode_enable = self.module.params['mode_enable'] or None + self.local_mac = self.module.params['local_mac'] or None + self.state = self.module.params['state'] + + self.dldp_intf_conf = dict() + self.same_conf = False + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = list() + self.end_state = list() + + def check_config_if_same(self): + """Judge whether current config is the same as what we excepted""" + + if self.state == 'absent': + return False + else: + if self.enable and self.enable != self.dldp_intf_conf['dldpEnable']: + return False + + if self.mode_enable and self.mode_enable != self.dldp_intf_conf['dldpCompatibleEnable']: + return False + + if self.local_mac: + flag = judge_is_mac_same( + self.local_mac, self.dldp_intf_conf['dldpLocalMac']) + if not flag: + return False + + if self.reset and self.reset == 'enable': + return False + return True + + def check_macaddr(self): + """Check mac-address whether valid""" + + valid_char = '0123456789abcdef-' + mac = self.local_mac + + if len(mac) > 16: + return False + + mac_list = re.findall(r'([0-9a-fA-F]+)', mac) + if len(mac_list) != 3: + return False + + if mac.count('-') != 2: + return False + + for _, value in enumerate(mac, start=0): + if value.lower() not in valid_char: + return False + + return True + + def check_params(self): + """Check all input params""" + + if not self.interface: + self.module.fail_json(msg='Error: Interface name cannot be empty.') + + if self.interface: + intf_type = get_interface_type(self.interface) + if not intf_type: + self.module.fail_json( + msg='Error: Interface name of %s ' + 'is error.' % self.interface) + + if (self.state == 'absent') and (self.reset or self.mode_enable or self.enable): + self.module.fail_json(msg="Error: It's better to use state=present when " + "configuring or unconfiguring enable, mode_enable " + "or using reset flag. state=absent is just for " + "when using local_mac param.") + + if self.state == 'absent' and not self.local_mac: + self.module.fail_json( + msg="Error: Please specify local_mac parameter.") + + if self.state == 'present': + if (self.dldp_intf_conf['dldpEnable'] == 'disable' and not self.enable and + (self.mode_enable or self.local_mac or self.reset)): + self.module.fail_json(msg="Error: when DLDP is already disabled on this port, " + "mode_enable, local_mac and reset parameters are not " + "expected to configure.") + + if self.enable == 'disable' and (self.mode_enable or self.local_mac or self.reset): + self.module.fail_json(msg="Error: when using enable=disable, " + "mode_enable, local_mac and reset parameters " + "are not expected to configure.") + + if self.local_mac and (self.mode_enable == 'disable' or + (self.dldp_intf_conf['dldpCompatibleEnable'] == 'disable' and self.mode_enable != 'enable')): + self.module.fail_json(msg="Error: when DLDP compatible-mode is disabled on this port, " + "Configuring local_mac is not allowed.") + + if self.local_mac: + if not self.check_macaddr(): + self.module.fail_json( + msg="Error: local_mac has invalid value %s." % self.local_mac) + + def init_module(self): + """Init module object""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_dldp_intf_exist_config(self): + """Get current dldp existed config""" + + dldp_conf = dict() + xml_str = CE_NC_GET_INTF_DLDP_CONFIG % self.interface + con_obj = get_nc_config(self.module, xml_str) + if "" in con_obj: + dldp_conf['dldpEnable'] = 'disable' + dldp_conf['dldpCompatibleEnable'] = "" + dldp_conf['dldpLocalMac'] = "" + return dldp_conf + + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get global DLDP info + root = ElementTree.fromstring(xml_str) + topo = root.find("dldp/dldpInterfaces/dldpInterface") + if topo is None: + self.module.fail_json( + msg="Error: Get current DLDP configuration failed.") + for eles in topo: + if eles.tag in ["dldpEnable", "dldpCompatibleEnable", "dldpLocalMac"]: + if not eles.text: + dldp_conf[eles.tag] = "" + else: + if eles.tag == "dldpEnable" or eles.tag == "dldpCompatibleEnable": + if eles.text == 'true': + value = 'enable' + else: + value = 'disable' + else: + value = eles.text + dldp_conf[eles.tag] = value + + return dldp_conf + + def config_intf_dldp(self): + """Config global dldp""" + + if self.same_conf: + return + + if self.state == "present": + enable = self.enable + if not self.enable: + enable = self.dldp_intf_conf['dldpEnable'] + if enable == 'enable': + enable = 'true' + else: + enable = 'false' + + mode_enable = self.mode_enable + if not self.mode_enable: + mode_enable = self.dldp_intf_conf['dldpCompatibleEnable'] + if mode_enable == 'enable': + mode_enable = 'true' + else: + mode_enable = 'false' + + local_mac = self.local_mac + if not self.local_mac: + local_mac = self.dldp_intf_conf['dldpLocalMac'] + + if self.enable == 'disable' and self.enable != self.dldp_intf_conf['dldpEnable']: + xml_str = CE_NC_DELETE_DLDP_INTF_CONFIG % self.interface + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "DELETE_DLDP_INTF_CONFIG") + elif self.dldp_intf_conf['dldpEnable'] == 'disable' and self.enable == 'enable': + xml_str = CE_NC_CREATE_DLDP_INTF_CONFIG % ( + self.interface, 'true', mode_enable, local_mac) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "CREATE_DLDP_INTF_CONFIG") + elif self.dldp_intf_conf['dldpEnable'] == 'enable': + if mode_enable == 'false': + local_mac = '' + xml_str = CE_NC_MERGE_DLDP_INTF_CONFIG % ( + self.interface, enable, mode_enable, local_mac) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "MERGE_DLDP_INTF_CONFIG") + + if self.reset == 'enable': + xml_str = CE_NC_ACTION_RESET_INTF_DLDP % self.interface + ret_xml = execute_nc_action(self.module, xml_str) + self.check_response(ret_xml, "ACTION_RESET_INTF_DLDP") + + self.changed = True + else: + if self.local_mac and judge_is_mac_same(self.local_mac, self.dldp_intf_conf['dldpLocalMac']): + if self.dldp_intf_conf['dldpEnable'] == 'enable': + dldp_enable = 'true' + else: + dldp_enable = 'false' + if self.dldp_intf_conf['dldpCompatibleEnable'] == 'enable': + dldp_compat_enable = 'true' + else: + dldp_compat_enable = 'false' + xml_str = CE_NC_MERGE_DLDP_INTF_CONFIG % (self.interface, dldp_enable, dldp_compat_enable, '') + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "UNDO_DLDP_INTF_LOCAL_MAC_CONFIG") + self.changed = True + + def get_existing(self): + """Get existing info""" + + dldp_conf = dict() + + dldp_conf['interface'] = self.interface + dldp_conf['enable'] = self.dldp_intf_conf.get('dldpEnable', None) + dldp_conf['mode_enable'] = self.dldp_intf_conf.get( + 'dldpCompatibleEnable', None) + dldp_conf['local_mac'] = self.dldp_intf_conf.get('dldpLocalMac', None) + dldp_conf['reset'] = 'disable' + + self.existing = copy.deepcopy(dldp_conf) + + def get_proposed(self): + """Get proposed result """ + + self.proposed = dict(interface=self.interface, enable=self.enable, + mode_enable=self.mode_enable, local_mac=self.local_mac, + reset=self.reset, state=self.state) + + def get_update_cmd(self): + """Get updated commands""" + + if self.same_conf: + return + + if self.state == "present": + if self.enable and self.enable != self.dldp_intf_conf['dldpEnable']: + if self.enable == 'enable': + self.updates_cmd.append("dldp enable") + elif self.enable == 'disable': + self.updates_cmd.append("undo dldp enable") + + if self.mode_enable and self.mode_enable != self.dldp_intf_conf['dldpCompatibleEnable']: + if self.mode_enable == 'enable': + self.updates_cmd.append("dldp compatible-mode enable") + else: + self.updates_cmd.append("undo dldp compatible-mode enable") + + if self.local_mac: + flag = judge_is_mac_same( + self.local_mac, self.dldp_intf_conf['dldpLocalMac']) + if not flag: + self.updates_cmd.append( + "dldp compatible-mode local-mac %s" % self.local_mac) + + if self.reset and self.reset == 'enable': + self.updates_cmd.append('dldp reset') + else: + if self.changed: + self.updates_cmd.append("undo dldp compatible-mode local-mac") + + def get_end_state(self): + """Get end state info""" + + dldp_conf = dict() + self.dldp_intf_conf = self.get_dldp_intf_exist_config() + + dldp_conf['interface'] = self.interface + dldp_conf['enable'] = self.dldp_intf_conf.get('dldpEnable', None) + dldp_conf['mode_enable'] = self.dldp_intf_conf.get( + 'dldpCompatibleEnable', None) + dldp_conf['local_mac'] = self.dldp_intf_conf.get('dldpLocalMac', None) + dldp_conf['reset'] = 'disable' + if self.reset == 'enable': + dldp_conf['reset'] = 'enable' + + self.end_state = copy.deepcopy(dldp_conf) + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def work(self): + """Execute task""" + + self.dldp_intf_conf = self.get_dldp_intf_exist_config() + self.check_params() + self.same_conf = self.check_config_if_same() + self.get_existing() + self.get_proposed() + self.config_intf_dldp() + self.get_update_cmd() + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + interface=dict(required=True, type='str'), + enable=dict(choices=['enable', 'disable'], type='str'), + reset=dict(choices=['enable', 'disable'], type='str'), + mode_enable=dict(choices=['enable', 'disable'], type='str'), + local_mac=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + argument_spec.update(ce_argument_spec) + dldp_intf_obj = DldpInterface(argument_spec) + dldp_intf_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_eth_trunk.py b/plugins/modules/network/cloudengine/ce_eth_trunk.py new file mode 100644 index 0000000000..3f2dde7ec4 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_eth_trunk.py @@ -0,0 +1,676 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_eth_trunk +short_description: Manages Eth-Trunk interfaces on HUAWEI CloudEngine switches. +description: + - Manages Eth-Trunk specific configuration parameters on HUAWEI CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - C(state=absent) removes the Eth-Trunk config and interface if it + already exists. If members to be removed are not explicitly + passed, all existing members (if any), are removed, + and Eth-Trunk removed. + - Members must be a list. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + trunk_id: + description: + - Eth-Trunk interface number. + The value is an integer. + The value range depends on the assign forward eth-trunk mode command. + When 256 is specified, the value ranges from 0 to 255. + When 512 is specified, the value ranges from 0 to 511. + When 1024 is specified, the value ranges from 0 to 1023. + required: true + mode: + description: + - Specifies the working mode of an Eth-Trunk interface. + choices: ['manual','lacp-dynamic','lacp-static'] + min_links: + description: + - Specifies the minimum number of Eth-Trunk member links in the Up state. + The value is an integer ranging from 1 to the maximum number of interfaces + that can be added to a Eth-Trunk interface. + hash_type: + description: + - Hash algorithm used for load balancing among Eth-Trunk member interfaces. + choices: ['src-dst-ip', 'src-dst-mac', 'enhanced', 'dst-ip', 'dst-mac', 'src-ip', 'src-mac'] + members: + description: + - List of interfaces that will be managed in a given Eth-Trunk. + The interface name must be full name. + force: + description: + - When true it forces Eth-Trunk members to match what is + declared in the members param. This can be used to remove + members. + type: bool + default: 'no' + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- name: eth_trunk module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Ensure Eth-Trunk100 is created, add two members, and set to mode lacp-static + ce_eth_trunk: + trunk_id: 100 + members: ['10GE1/0/24','10GE1/0/25'] + mode: 'lacp-static' + state: present + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"trunk_id": "100", "members": ['10GE1/0/24','10GE1/0/25'], "mode": "lacp-static"} +existing: + description: k/v pairs of existing Eth-Trunk + returned: always + type: dict + sample: {"trunk_id": "100", "hash_type": "mac", "members_detail": [ + {"memberIfName": "10GE1/0/25", "memberIfState": "Down"}], + "min_links": "1", "mode": "manual"} +end_state: + description: k/v pairs of Eth-Trunk info after module execution + returned: always + type: dict + sample: {"trunk_id": "100", "hash_type": "mac", "members_detail": [ + {"memberIfName": "10GE1/0/24", "memberIfState": "Down"}, + {"memberIfName": "10GE1/0/25", "memberIfState": "Down"}], + "min_links": "1", "mode": "lacp-static"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface Eth-Trunk 100", + "mode lacp-static", + "interface 10GE1/0/25", + "eth-trunk 100"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_TRUNK = """ + + + + + Eth-Trunk%s + + + + + + + + + + + + + + + + + +""" + +CE_NC_XML_BUILD_TRUNK_CFG = """ + + + %s + + +""" + +CE_NC_XML_DELETE_TRUNK = """ + + Eth-Trunk%s + +""" + +CE_NC_XML_CREATE_TRUNK = """ + + Eth-Trunk%s + +""" + +CE_NC_XML_MERGE_MINUPNUM = """ + + Eth-Trunk%s + %s + +""" + +CE_NC_XML_MERGE_HASHTYPE = """ + + Eth-Trunk%s + %s + +""" + +CE_NC_XML_MERGE_WORKMODE = """ + + Eth-Trunk%s + %s + +""" + +CE_NC_XML_BUILD_MEMBER_CFG = """ + + Eth-Trunk%s + %s + +""" + +CE_NC_XML_MERGE_MEMBER = """ + + %s + +""" + +CE_NC_XML_DELETE_MEMBER = """ + + %s + +""" + +MODE_XML2CLI = {"Manual": "manual", "Dynamic": "lacp-dynamic", "Static": "lacp-static"} +MODE_CLI2XML = {"manual": "Manual", "lacp-dynamic": "Dynamic", "lacp-static": "Static"} +HASH_XML2CLI = {"IP": "src-dst-ip", "MAC": "src-dst-mac", "Enhanced": "enhanced", + "Desip": "dst-ip", "Desmac": "dst-mac", "Sourceip": "src-ip", "Sourcemac": "src-mac"} +HASH_CLI2XML = {"src-dst-ip": "IP", "src-dst-mac": "MAC", "enhanced": "Enhanced", + "dst-ip": "Desip", "dst-mac": "Desmac", "src-ip": "Sourceip", "src-mac": "Sourcemac"} + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +def mode_xml_to_cli_str(mode): + """convert mode to cli format string""" + + if not mode: + return "" + + return MODE_XML2CLI.get(mode) + + +def hash_type_xml_to_cli_str(hash_type): + """convert trunk hash type netconf xml to cli format string""" + + if not hash_type: + return "" + + return HASH_XML2CLI.get(hash_type) + + +class EthTrunk(object): + """ + Manages Eth-Trunk interfaces. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.trunk_id = self.module.params['trunk_id'] + self.mode = self.module.params['mode'] + self.min_links = self.module.params['min_links'] + self.hash_type = self.module.params['hash_type'] + self.members = self.module.params['members'] + self.state = self.module.params['state'] + self.force = self.module.params['force'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + # interface info + self.trunk_info = dict() + + def __init_module__(self): + """ init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def netconf_set_config(self, xml_str, xml_name): + """ netconf set config """ + + recv_xml = set_nc_config(self.module, xml_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_trunk_dict(self, trunk_id): + """ get one interface attributes dict.""" + + trunk_info = dict() + conf_str = CE_NC_GET_TRUNK % trunk_id + recv_xml = get_nc_config(self.module, conf_str) + + if "" in recv_xml: + return trunk_info + + # get trunk base info + base = re.findall( + r'.*(.*).*\s*' + r'(.*).*\s*' + r'(.*).*\s*' + r'(.*).*\s*' + r'(.*).*\s*' + r'(.*).*\s*' + r'(.*).*\s*' + r'(.*).*', recv_xml) + + if base: + trunk_info = dict(ifName=base[0][0], + trunkId=base[0][0].lower().replace("eth-trunk", "").replace(" ", ""), + minUpNum=base[0][1], + maxUpNum=base[0][2], + trunkType=base[0][3], + hashType=base[0][4], + workMode=base[0][5], + upMemberIfNum=base[0][6], + memberIfNum=base[0][7]) + + # get trunk member interface info + member = re.findall( + r'.*(.*).*\s*' + r'(.*).*', recv_xml) + trunk_info["TrunkMemberIfs"] = list() + + for mem in member: + trunk_info["TrunkMemberIfs"].append( + dict(memberIfName=mem[0], memberIfState=mem[1])) + + return trunk_info + + def is_member_exist(self, ifname): + """is trunk member exist""" + + if not self.trunk_info["TrunkMemberIfs"]: + return False + + for mem in self.trunk_info["TrunkMemberIfs"]: + if ifname.replace(" ", "").upper() == mem["memberIfName"].replace(" ", "").upper(): + return True + + return False + + def get_mode_xml_str(self): + """trunk mode netconf xml format string""" + + return MODE_CLI2XML.get(self.mode) + + def get_hash_type_xml_str(self): + """trunk hash type netconf xml format string""" + + return HASH_CLI2XML.get(self.hash_type) + + def create_eth_trunk(self): + """Create Eth-Trunk interface""" + + xml_str = CE_NC_XML_CREATE_TRUNK % self.trunk_id + self.updates_cmd.append("interface Eth-Trunk %s" % self.trunk_id) + + if self.hash_type: + self.updates_cmd.append("load-balance %s" % self.hash_type) + xml_str += CE_NC_XML_MERGE_HASHTYPE % (self.trunk_id, self.get_hash_type_xml_str()) + + if self.mode: + self.updates_cmd.append("mode %s" % self.mode) + xml_str += CE_NC_XML_MERGE_WORKMODE % (self.trunk_id, self.get_mode_xml_str()) + + if self.min_links: + self.updates_cmd.append("least active-linknumber %s" % self.min_links) + xml_str += CE_NC_XML_MERGE_MINUPNUM % (self.trunk_id, self.min_links) + + if self.members: + mem_xml = "" + for mem in self.members: + mem_xml += CE_NC_XML_MERGE_MEMBER % mem.upper() + self.updates_cmd.append("interface %s" % mem) + self.updates_cmd.append("eth-trunk %s" % self.trunk_id) + xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_xml) + cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str + self.netconf_set_config(cfg_xml, "CREATE_TRUNK") + self.changed = True + + def delete_eth_trunk(self): + """Delete Eth-Trunk interface and remove all member""" + + if not self.trunk_info: + return + + xml_str = "" + mem_str = "" + if self.trunk_info["TrunkMemberIfs"]: + for mem in self.trunk_info["TrunkMemberIfs"]: + mem_str += CE_NC_XML_DELETE_MEMBER % mem["memberIfName"] + self.updates_cmd.append("interface %s" % mem["memberIfName"]) + self.updates_cmd.append("undo eth-trunk") + if mem_str: + xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_str) + + xml_str += CE_NC_XML_DELETE_TRUNK % self.trunk_id + self.updates_cmd.append("undo interface Eth-Trunk %s" % self.trunk_id) + cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str + self.netconf_set_config(cfg_xml, "DELETE_TRUNK") + self.changed = True + + def remove_member(self): + """delete trunk member""" + + if not self.members: + return + + change = False + mem_xml = "" + xml_str = "" + for mem in self.members: + if self.is_member_exist(mem): + mem_xml += CE_NC_XML_DELETE_MEMBER % mem.upper() + self.updates_cmd.append("interface %s" % mem) + self.updates_cmd.append("undo eth-trunk") + if mem_xml: + xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_xml) + change = True + + if not change: + return + + cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str + self.netconf_set_config(cfg_xml, "REMOVE_TRUNK_MEMBER") + self.changed = True + + def merge_eth_trunk(self): + """Create or merge Eth-Trunk""" + + change = False + xml_str = "" + self.updates_cmd.append("interface Eth-Trunk %s" % self.trunk_id) + if self.hash_type and self.get_hash_type_xml_str() != self.trunk_info["hashType"]: + self.updates_cmd.append("load-balance %s" % + self.hash_type) + xml_str += CE_NC_XML_MERGE_HASHTYPE % ( + self.trunk_id, self.get_hash_type_xml_str()) + change = True + if self.min_links and self.min_links != self.trunk_info["minUpNum"]: + self.updates_cmd.append( + "least active-linknumber %s" % self.min_links) + xml_str += CE_NC_XML_MERGE_MINUPNUM % ( + self.trunk_id, self.min_links) + change = True + if self.mode and self.get_mode_xml_str() != self.trunk_info["workMode"]: + self.updates_cmd.append("mode %s" % self.mode) + xml_str += CE_NC_XML_MERGE_WORKMODE % ( + self.trunk_id, self.get_mode_xml_str()) + change = True + + if not change: + self.updates_cmd.pop() # remove 'interface Eth-Trunk' command + + # deal force: + # When true it forces Eth-Trunk members to match + # what is declared in the members param. + if self.force and self.trunk_info["TrunkMemberIfs"]: + mem_xml = "" + for mem in self.trunk_info["TrunkMemberIfs"]: + if not self.members or mem["memberIfName"].replace(" ", "").upper() not in self.members: + mem_xml += CE_NC_XML_DELETE_MEMBER % mem["memberIfName"] + self.updates_cmd.append("interface %s" % mem["memberIfName"]) + self.updates_cmd.append("undo eth-trunk") + if mem_xml: + xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_xml) + change = True + + if self.members: + mem_xml = "" + for mem in self.members: + if not self.is_member_exist(mem): + mem_xml += CE_NC_XML_MERGE_MEMBER % mem.upper() + self.updates_cmd.append("interface %s" % mem) + self.updates_cmd.append("eth-trunk %s" % self.trunk_id) + if mem_xml: + xml_str += CE_NC_XML_BUILD_MEMBER_CFG % ( + self.trunk_id, mem_xml) + change = True + + if not change: + return + + cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str + self.netconf_set_config(cfg_xml, "MERGE_TRUNK") + self.changed = True + + def check_params(self): + """Check all input params""" + + # trunk_id check + if not self.trunk_id.isdigit(): + self.module.fail_json(msg='The parameter of trunk_id is invalid.') + + # min_links check + if self.min_links and not self.min_links.isdigit(): + self.module.fail_json(msg='The parameter of min_links is invalid.') + + # members check and convert members to upper + if self.members: + for mem in self.members: + if not get_interface_type(mem.replace(" ", "")): + self.module.fail_json( + msg='The parameter of members is invalid.') + + for mem_id in range(len(self.members)): + self.members[mem_id] = self.members[mem_id].replace(" ", "").upper() + + def get_proposed(self): + """get proposed info""" + + self.proposed["trunk_id"] = self.trunk_id + self.proposed["mode"] = self.mode + if self.min_links: + self.proposed["min_links"] = self.min_links + self.proposed["hash_type"] = self.hash_type + if self.members: + self.proposed["members"] = self.members + self.proposed["state"] = self.state + self.proposed["force"] = self.force + + def get_existing(self): + """get existing info""" + + if not self.trunk_info: + return + + self.existing["trunk_id"] = self.trunk_info["trunkId"] + self.existing["min_links"] = self.trunk_info["minUpNum"] + self.existing["hash_type"] = hash_type_xml_to_cli_str(self.trunk_info["hashType"]) + self.existing["mode"] = mode_xml_to_cli_str(self.trunk_info["workMode"]) + self.existing["members_detail"] = self.trunk_info["TrunkMemberIfs"] + + def get_end_state(self): + """get end state info""" + + trunk_info = self.get_trunk_dict(self.trunk_id) + if not trunk_info: + return + + self.end_state["trunk_id"] = trunk_info["trunkId"] + self.end_state["min_links"] = trunk_info["minUpNum"] + self.end_state["hash_type"] = hash_type_xml_to_cli_str(trunk_info["hashType"]) + self.end_state["mode"] = mode_xml_to_cli_str(trunk_info["workMode"]) + self.end_state["members_detail"] = trunk_info["TrunkMemberIfs"] + + def work(self): + """worker""" + + self.check_params() + self.trunk_info = self.get_trunk_dict(self.trunk_id) + self.get_existing() + self.get_proposed() + + # deal present or absent + if self.state == "present": + if not self.trunk_info: + # create + self.create_eth_trunk() + else: + # merge trunk + self.merge_eth_trunk() + else: + if self.trunk_info: + if not self.members: + # remove all members and delete trunk + self.delete_eth_trunk() + else: + # remove some trunk members + self.remove_member() + else: + self.module.fail_json(msg='Error: Eth-Trunk does not exist.') + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + trunk_id=dict(required=True), + mode=dict(required=False, + choices=['manual', 'lacp-dynamic', 'lacp-static'], + type='str'), + min_links=dict(required=False, type='str'), + hash_type=dict(required=False, + choices=['src-dst-ip', 'src-dst-mac', 'enhanced', + 'dst-ip', 'dst-mac', 'src-ip', 'src-mac'], + type='str'), + members=dict(required=False, default=None, type='list'), + force=dict(required=False, default=False, type='bool'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = EthTrunk(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_evpn_bd_vni.py b/plugins/modules/network/cloudengine/ce_evpn_bd_vni.py new file mode 100644 index 0000000000..caf396d912 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_evpn_bd_vni.py @@ -0,0 +1,1057 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_evpn_bd_vni +short_description: Manages EVPN VXLAN Network Identifier (VNI) on HUAWEI CloudEngine switches. +description: + - Manages Ethernet Virtual Private Network (EVPN) VXLAN Network + Identifier (VNI) configurations on HUAWEI CloudEngine switches. +author: Zhijin Zhou (@QijunPan) +notes: + - Ensure that EVPN has been configured to serve as the VXLAN control plane when state is present. + - Ensure that a bridge domain (BD) has existed when state is present. + - Ensure that a VNI has been created and associated with a broadcast domain (BD) when state is present. + - If you configure evpn:false to delete an EVPN instance, all configurations in the EVPN instance are deleted. + - After an EVPN instance has been created in the BD view, you can configure an RD using route_distinguisher + parameter in BD-EVPN instance view. + - Before configuring VPN targets for a BD EVPN instance, ensure that an RD has been configured + for the BD EVPN instance + - If you unconfigure route_distinguisher, all VPN target attributes for the BD EVPN instance will be removed at the same time. + - When using state:absent, evpn is not supported and it will be ignored. + - When using state:absent to delete VPN target attributes, ensure the configuration of VPN target attributes has + existed and otherwise it will report an error. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + bridge_domain_id: + description: + - Specify an existed bridge domain (BD).The value is an integer ranging from 1 to 16777215. + required: true + evpn: + description: + - Create or delete an EVPN instance for a VXLAN in BD view. + choices: ['enable','disable'] + default: 'enable' + route_distinguisher: + description: + - Configures a route distinguisher (RD) for a BD EVPN instance. + The format of an RD can be as follows + - 1) 2-byte AS number:4-byte user-defined number, for example, 1:3. An AS number is an integer ranging from + 0 to 65535, and a user-defined number is an integer ranging from 0 to 4294967295. The AS and user-defined + numbers cannot be both 0s. This means that an RD cannot be 0:0. + - 2) Integral 4-byte AS number:2-byte user-defined number, for example, 65537:3. An AS number is an integer + ranging from 65536 to 4294967295, and a user-defined number is an integer ranging from 0 to 65535. + - 3) 4-byte AS number in dotted notation:2-byte user-defined number, for example, 0.0:3 or 0.1:0. A 4-byte + AS number in dotted notation is in the format of x.y, where x and y are integers ranging from 0 to 65535. + - 4) A user-defined number is an integer ranging from 0 to 65535. The AS and user-defined numbers cannot be + both 0s. This means that an RD cannot be 0.0:0. + - 5) 32-bit IP address:2-byte user-defined number. For example, 192.168.122.15:1. An IP address ranges from + 0.0.0.0 to 255.255.255.255, and a user-defined number is an integer ranging from 0 to 65535. + - 6) 'auto' specifies the RD that is automatically generated. + vpn_target_both: + description: + - Add VPN targets to both the import and export VPN target lists of a BD EVPN instance. + The format is the same as route_distinguisher. + vpn_target_import: + description: + - Add VPN targets to the import VPN target list of a BD EVPN instance. + The format is the same as route_distinguisher. + required: true + vpn_target_export: + description: + - Add VPN targets to the export VPN target list of a BD EVPN instance. + The format is the same as route_distinguisher. + state: + description: + - Manage the state of the resource. + choices: ['present','absent'] + default: 'present' +''' + +EXAMPLES = ''' +- name: EVPN BD VNI test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Configure an EVPN instance for a VXLAN in BD view" + ce_evpn_bd_vni: + bridge_domain_id: 20 + evpn: enable + provider: "{{ cli }}" + + - name: "Configure a route distinguisher (RD) for a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + route_distinguisher: '22:22' + provider: "{{ cli }}" + + - name: "Configure VPN targets to both the import and export VPN target lists of a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + vpn_target_both: 22:100,22:101 + provider: "{{ cli }}" + + - name: "Configure VPN targets to the import VPN target list of a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + vpn_target_import: 22:22,22:23 + provider: "{{ cli }}" + + - name: "Configure VPN targets to the export VPN target list of a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + vpn_target_export: 22:38,22:39 + provider: "{{ cli }}" + + - name: "Unconfigure VPN targets to both the import and export VPN target lists of a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + vpn_target_both: '22:100' + state: absent + provider: "{{ cli }}" + + - name: "Unconfigure VPN targets to the import VPN target list of a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + vpn_target_import: '22:22' + state: absent + provider: "{{ cli }}" + + - name: "Unconfigure VPN targets to the export VPN target list of a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + vpn_target_export: '22:38' + state: absent + provider: "{{ cli }}" + + - name: "Unconfigure a route distinguisher (RD) of a BD EVPN instance" + ce_evpn_bd_vni: + bridge_domain_id: 20 + route_distinguisher: '22:22' + state: absent + provider: "{{ cli }}" + + - name: "Unconfigure an EVPN instance for a VXLAN in BD view" + ce_evpn_bd_vni: + bridge_domain_id: 20 + evpn: disable + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "bridge_domain_id": "2", + "evpn": "enable", + "route_distinguisher": "22:22", + "state": "present", + "vpn_target_both": [ + "22:100", + "22:101" + ], + "vpn_target_export": [ + "22:38", + "22:39" + ], + "vpn_target_import": [ + "22:22", + "22:23" + ] + } +existing: + description: k/v pairs of existing attributes on the device + returned: always + type: dict + sample: { + "bridge_domain_id": "2", + "evpn": "disable", + "route_distinguisher": null, + "vpn_target_both": [], + "vpn_target_export": [], + "vpn_target_import": [] + } +end_state: + description: k/v pairs of end attributes on the device + returned: always + type: dict + sample: { + "bridge_domain_id": "2", + "evpn": "enable", + "route_distinguisher": "22:22", + "vpn_target_both": [ + "22:100", + "22:101" + ], + "vpn_target_export": [ + "22:38", + "22:39" + ], + "vpn_target_import": [ + "22:22", + "22:23" + ] + } +updates: + description: command list sent to the device + returned: always + type: list + sample: [ + "bridge-domain 2", + " evpn", + " route-distinguisher 22:22", + " vpn-target 22:38 export-extcommunity", + " vpn-target 22:39 export-extcommunity", + " vpn-target 22:100 export-extcommunity", + " vpn-target 22:101 export-extcommunity", + " vpn-target 22:22 import-extcommunity", + " vpn-target 22:23 import-extcommunity", + " vpn-target 22:100 import-extcommunity", + " vpn-target 22:101 import-extcommunity" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +import copy +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_VNI_BD = """ + + + + + + + + + + +""" + +CE_NC_GET_EVPN_CONFIG = """ + + + + + %s + %s + + + + + + + + + + + + + + + + + +""" + +CE_NC_DELETE_EVPN_CONFIG = """ + + + + + %s + %s + + + + +""" + +CE_NC_DELETE_EVPN_CONFIG_HEAD = """ + + + + + %s + %s +""" + +CE_NC_MERGE_EVPN_CONFIG_HEAD = """ + + + + + %s + %s +""" + +CE_NC_MERGE_EVPN_AUTORTS_HEAD = """ + +""" + +CE_NC_MERGE_EVPN_AUTORTS_TAIL = """ + +""" + +CE_NC_DELETE_EVPN_AUTORTS_CONTEXT = """ + + %s + +""" + +CE_NC_MERGE_EVPN_AUTORTS_CONTEXT = """ + + %s + +""" + +CE_NC_MERGE_EVPN_RTS_HEAD = """ + +""" + +CE_NC_MERGE_EVPN_RTS_TAIL = """ + +""" + +CE_NC_DELETE_EVPN_RTS_CONTEXT = """ + + %s + %s + +""" + +CE_NC_MERGE_EVPN_RTS_CONTEXT = """ + + %s + %s + +""" + +CE_NC_MERGE_EVPN_CONFIG_TAIL = """ + + + + +""" + + +def is_valid_value(vrf_targe_value): + """check whether VPN target value is valid""" + + each_num = None + if len(vrf_targe_value) > 21 or len(vrf_targe_value) < 3: + return False + if vrf_targe_value.find(':') == -1: + return False + elif vrf_targe_value == '0:0': + return False + elif vrf_targe_value == '0.0:0': + return False + else: + value_list = vrf_targe_value.split(':') + if value_list[0].find('.') != -1: + if not value_list[1].isdigit(): + return False + if int(value_list[1]) > 65535: + return False + value = value_list[0].split('.') + if len(value) == 4: + for each_num in value: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + elif len(value) == 2: + for each_num in value: + if not each_num.isdigit(): + return False + if int(each_num) > 65535: + return False + return True + else: + return False + elif not value_list[0].isdigit(): + return False + elif not value_list[1].isdigit(): + return False + elif int(value_list[0]) < 65536 and int(value_list[1]) < 4294967296: + return True + elif int(value_list[0]) > 65535 and int(value_list[0]) < 4294967296: + return bool(int(value_list[1]) < 65536) + else: + return False + + +class EvpnBd(object): + """Manage evpn instance in BD view""" + + def __init__(self, argument_spec, ): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # EVPN instance info + self.bridge_domain_id = self.module.params['bridge_domain_id'] + self.evpn = self.module.params['evpn'] + self.route_distinguisher = self.module.params['route_distinguisher'] + self.vpn_target_both = self.module.params['vpn_target_both'] or list() + self.vpn_target_import = self.module.params[ + 'vpn_target_import'] or list() + self.vpn_target_export = self.module.params[ + 'vpn_target_export'] or list() + self.state = self.module.params['state'] + self.__string_to_lowercase__() + + self.commands = list() + self.evpn_info = dict() + self.conf_exist = False + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """Init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def __check_response__(self, xml_str, xml_name): + """Check if response message is already succeed""" + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def __string_to_lowercase__(self): + """Convert string to lowercase""" + + if self.route_distinguisher: + self.route_distinguisher = self.route_distinguisher.lower() + + if self.vpn_target_export: + for index, ele in enumerate(self.vpn_target_export): + self.vpn_target_export[index] = ele.lower() + + if self.vpn_target_import: + for index, ele in enumerate(self.vpn_target_import): + self.vpn_target_import[index] = ele.lower() + + if self.vpn_target_both: + for index, ele in enumerate(self.vpn_target_both): + self.vpn_target_both[index] = ele.lower() + + def get_all_evpn_rts(self, evpn_rts): + """Get all EVPN RTS""" + + rts = evpn_rts.findall("evpnRT") + if not rts: + return + + for ele in rts: + vrf_rttype = ele.find('vrfRTType') + vrf_rtvalue = ele.find('vrfRTValue') + + if vrf_rttype.text == 'export_extcommunity': + self.evpn_info['vpn_target_export'].append(vrf_rtvalue.text) + elif vrf_rttype.text == 'import_extcommunity': + self.evpn_info['vpn_target_import'].append(vrf_rtvalue.text) + + def get_all_evpn_autorts(self, evpn_autorts): + """"Get all EVPN AUTORTS""" + + autorts = evpn_autorts.findall("evpnAutoRT") + if not autorts: + return + + for autort in autorts: + vrf_rttype = autort.find('vrfRTType') + + if vrf_rttype.text == 'export_extcommunity': + self.evpn_info['vpn_target_export'].append('auto') + elif vrf_rttype.text == 'import_extcommunity': + self.evpn_info['vpn_target_import'].append('auto') + + def process_rts_info(self): + """Process RTS information""" + + if not self.evpn_info['vpn_target_export'] or\ + not self.evpn_info['vpn_target_import']: + return + + vpn_target_export = copy.deepcopy(self.evpn_info['vpn_target_export']) + for ele in vpn_target_export: + if ele in self.evpn_info['vpn_target_import']: + self.evpn_info['vpn_target_both'].append(ele) + self.evpn_info['vpn_target_export'].remove(ele) + self.evpn_info['vpn_target_import'].remove(ele) + + def get_evpn_instance_info(self): + """Get current EVPN instance information""" + + if not self.bridge_domain_id: + self.module.fail_json(msg='Error: The value of bridge_domain_id cannot be empty.') + + self.evpn_info['route_distinguisher'] = None + self.evpn_info['vpn_target_import'] = list() + self.evpn_info['vpn_target_export'] = list() + self.evpn_info['vpn_target_both'] = list() + self.evpn_info['evpn_inst'] = 'enable' + + xml_str = CE_NC_GET_EVPN_CONFIG % ( + self.bridge_domain_id, self.bridge_domain_id) + xml_str = get_nc_config(self.module, xml_str) + if "" in xml_str: + self.evpn_info['evpn_inst'] = 'disable' + return + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + evpn_inst = root.find("evpn/evpnInstances/evpnInstance") + if evpn_inst: + for eles in evpn_inst: + if eles.tag in ["evpnAutoRD", "evpnRD", "evpnRTs", "evpnAutoRTs"]: + if eles.tag == 'evpnAutoRD' and eles.text == 'true': + self.evpn_info['route_distinguisher'] = 'auto' + elif eles.tag == 'evpnRD' and self.evpn_info['route_distinguisher'] != 'auto': + self.evpn_info['route_distinguisher'] = eles.text + elif eles.tag == 'evpnRTs': + self.get_all_evpn_rts(eles) + elif eles.tag == 'evpnAutoRTs': + self.get_all_evpn_autorts(eles) + self.process_rts_info() + + def get_existing(self): + """Get existing config""" + + self.existing = dict(bridge_domain_id=self.bridge_domain_id, + evpn=self.evpn_info['evpn_inst'], + route_distinguisher=self.evpn_info[ + 'route_distinguisher'], + vpn_target_both=self.evpn_info['vpn_target_both'], + vpn_target_import=self.evpn_info[ + 'vpn_target_import'], + vpn_target_export=self.evpn_info['vpn_target_export']) + + def get_proposed(self): + """Get proposed config""" + + self.proposed = dict(bridge_domain_id=self.bridge_domain_id, + evpn=self.evpn, + route_distinguisher=self.route_distinguisher, + vpn_target_both=self.vpn_target_both, + vpn_target_import=self.vpn_target_import, + vpn_target_export=self.vpn_target_export, + state=self.state) + + def get_end_state(self): + """Get end config""" + + self.get_evpn_instance_info() + self.end_state = dict(bridge_domain_id=self.bridge_domain_id, + evpn=self.evpn_info['evpn_inst'], + route_distinguisher=self.evpn_info[ + 'route_distinguisher'], + vpn_target_both=self.evpn_info[ + 'vpn_target_both'], + vpn_target_import=self.evpn_info[ + 'vpn_target_import'], + vpn_target_export=self.evpn_info['vpn_target_export']) + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def judge_if_vpn_target_exist(self, vpn_target_type): + """Judge whether proposed vpn target has existed""" + + vpn_target = list() + if vpn_target_type == 'vpn_target_import': + vpn_target.extend(self.existing['vpn_target_both']) + vpn_target.extend(self.existing['vpn_target_import']) + return set(self.proposed['vpn_target_import']).issubset(vpn_target) + elif vpn_target_type == 'vpn_target_export': + vpn_target.extend(self.existing['vpn_target_both']) + vpn_target.extend(self.existing['vpn_target_export']) + return set(self.proposed['vpn_target_export']).issubset(vpn_target) + + return False + + def judge_if_config_exist(self): + """Judge whether configuration has existed""" + + if self.state == 'absent': + if self.route_distinguisher or self.vpn_target_import or self.vpn_target_export or self.vpn_target_both: + return False + else: + return True + + if self.evpn_info['evpn_inst'] != self.evpn: + return False + + if self.evpn == 'disable' and self.evpn_info['evpn_inst'] == 'disable': + return True + + if self.proposed['bridge_domain_id'] != self.existing['bridge_domain_id']: + return False + + if self.proposed['route_distinguisher']: + if self.proposed['route_distinguisher'] != self.existing['route_distinguisher']: + return False + + if self.proposed['vpn_target_both']: + if not self.existing['vpn_target_both']: + return False + if not set(self.proposed['vpn_target_both']).issubset(self.existing['vpn_target_both']): + return False + + if self.proposed['vpn_target_import']: + if not self.judge_if_vpn_target_exist('vpn_target_import'): + return False + + if self.proposed['vpn_target_export']: + if not self.judge_if_vpn_target_exist('vpn_target_export'): + return False + + return True + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def unconfig_evpn_instance(self): + """Unconfigure EVPN instance""" + + self.updates_cmd.append("bridge-domain %s" % self.bridge_domain_id) + xml_str = CE_NC_MERGE_EVPN_CONFIG_HEAD % ( + self.bridge_domain_id, self.bridge_domain_id) + self.updates_cmd.append(" evpn") + + # unconfigure RD + if self.route_distinguisher: + if self.route_distinguisher.lower() == 'auto': + xml_str += 'false' + self.updates_cmd.append(" undo route-distinguisher auto") + else: + xml_str += '' + self.updates_cmd.append( + " undo route-distinguisher %s" % self.route_distinguisher) + xml_str += CE_NC_MERGE_EVPN_CONFIG_TAIL + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "UNDO_EVPN_BD_RD") + self.changed = True + return + + # process VPN target list + vpn_target_export = copy.deepcopy(self.vpn_target_export) + vpn_target_import = copy.deepcopy(self.vpn_target_import) + if self.vpn_target_both: + for ele in self.vpn_target_both: + if ele not in vpn_target_export: + vpn_target_export.append(ele) + if ele not in vpn_target_import: + vpn_target_import.append(ele) + + # unconfig EVPN auto RTS + head_flag = False + if vpn_target_export: + for ele in vpn_target_export: + if ele.lower() == 'auto': + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_AUTORTS_HEAD + head_flag = True + xml_str += CE_NC_DELETE_EVPN_AUTORTS_CONTEXT % ( + 'export_extcommunity') + self.updates_cmd.append( + " undo vpn-target auto export-extcommunity") + if vpn_target_import: + for ele in vpn_target_import: + if ele.lower() == 'auto': + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_AUTORTS_HEAD + head_flag = True + xml_str += CE_NC_DELETE_EVPN_AUTORTS_CONTEXT % ( + 'import_extcommunity') + self.updates_cmd.append( + " undo vpn-target auto import-extcommunity") + + if head_flag: + xml_str += CE_NC_MERGE_EVPN_AUTORTS_TAIL + + # unconfig EVPN RTS + head_flag = False + if vpn_target_export: + for ele in vpn_target_export: + if ele.lower() != 'auto': + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_RTS_HEAD + head_flag = True + xml_str += CE_NC_DELETE_EVPN_RTS_CONTEXT % ( + 'export_extcommunity', ele) + self.updates_cmd.append( + " undo vpn-target %s export-extcommunity" % ele) + + if vpn_target_import: + for ele in vpn_target_import: + if ele.lower() != 'auto': + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_RTS_HEAD + head_flag = True + xml_str += CE_NC_DELETE_EVPN_RTS_CONTEXT % ( + 'import_extcommunity', ele) + self.updates_cmd.append( + " undo vpn-target %s import-extcommunity" % ele) + + if head_flag: + xml_str += CE_NC_MERGE_EVPN_RTS_TAIL + + xml_str += CE_NC_MERGE_EVPN_CONFIG_TAIL + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "MERGE_EVPN_BD_VPN_TARGET_CONFIG") + self.changed = True + + def config_evpn_instance(self): + """Configure EVPN instance""" + + self.updates_cmd.append("bridge-domain %s" % self.bridge_domain_id) + + if self.evpn == 'disable': + xml_str = CE_NC_DELETE_EVPN_CONFIG % ( + self.bridge_domain_id, self.bridge_domain_id) + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "MERGE_EVPN_BD_CONFIG") + self.updates_cmd.append(" undo evpn") + self.changed = True + return + + xml_str = CE_NC_MERGE_EVPN_CONFIG_HEAD % ( + self.bridge_domain_id, self.bridge_domain_id) + self.updates_cmd.append(" evpn") + + # configure RD + if self.route_distinguisher: + if not self.existing['route_distinguisher']: + if self.route_distinguisher.lower() == 'auto': + xml_str += 'true' + self.updates_cmd.append(" route-distinguisher auto") + else: + xml_str += '%s' % self.route_distinguisher + self.updates_cmd.append( + " route-distinguisher %s" % self.route_distinguisher) + + # process VPN target list + vpn_target_export = copy.deepcopy(self.vpn_target_export) + vpn_target_import = copy.deepcopy(self.vpn_target_import) + if self.vpn_target_both: + for ele in self.vpn_target_both: + if ele not in vpn_target_export: + vpn_target_export.append(ele) + if ele not in vpn_target_import: + vpn_target_import.append(ele) + + # config EVPN auto RTS + head_flag = False + if vpn_target_export: + for ele in vpn_target_export: + if ele.lower() == 'auto' and \ + (not self.is_vpn_target_exist('export_extcommunity', ele.lower())): + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_AUTORTS_HEAD + head_flag = True + xml_str += CE_NC_MERGE_EVPN_AUTORTS_CONTEXT % ( + 'export_extcommunity') + self.updates_cmd.append( + " vpn-target auto export-extcommunity") + if vpn_target_import: + for ele in vpn_target_import: + if ele.lower() == 'auto' and \ + (not self.is_vpn_target_exist('import_extcommunity', ele.lower())): + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_AUTORTS_HEAD + head_flag = True + xml_str += CE_NC_MERGE_EVPN_AUTORTS_CONTEXT % ( + 'import_extcommunity') + self.updates_cmd.append( + " vpn-target auto import-extcommunity") + + if head_flag: + xml_str += CE_NC_MERGE_EVPN_AUTORTS_TAIL + + # config EVPN RTS + head_flag = False + if vpn_target_export: + for ele in vpn_target_export: + if ele.lower() != 'auto' and \ + (not self.is_vpn_target_exist('export_extcommunity', ele.lower())): + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_RTS_HEAD + head_flag = True + xml_str += CE_NC_MERGE_EVPN_RTS_CONTEXT % ( + 'export_extcommunity', ele) + self.updates_cmd.append( + " vpn-target %s export-extcommunity" % ele) + + if vpn_target_import: + for ele in vpn_target_import: + if ele.lower() != 'auto' and \ + (not self.is_vpn_target_exist('import_extcommunity', ele.lower())): + if not head_flag: + xml_str += CE_NC_MERGE_EVPN_RTS_HEAD + head_flag = True + xml_str += CE_NC_MERGE_EVPN_RTS_CONTEXT % ( + 'import_extcommunity', ele) + self.updates_cmd.append( + " vpn-target %s import-extcommunity" % ele) + + if head_flag: + xml_str += CE_NC_MERGE_EVPN_RTS_TAIL + + xml_str += CE_NC_MERGE_EVPN_CONFIG_TAIL + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "MERGE_EVPN_BD_CONFIG") + self.changed = True + + def is_vpn_target_exist(self, target_type, value): + """Judge whether VPN target has existed""" + + if target_type == 'export_extcommunity': + if (value not in self.existing['vpn_target_export']) and\ + (value not in self.existing['vpn_target_both']): + return False + return True + + if target_type == 'import_extcommunity': + if (value not in self.existing['vpn_target_import']) and\ + (value not in self.existing['vpn_target_both']): + return False + return True + + return False + + def config_evnp_bd(self): + """Configure EVPN in BD view""" + + if not self.conf_exist: + if self.state == 'present': + self.config_evpn_instance() + else: + self.unconfig_evpn_instance() + + def process_input_params(self): + """Process input parameters""" + + if self.state == 'absent': + self.evpn = None + else: + if self.evpn == 'disable': + return + + if self.vpn_target_both: + for ele in self.vpn_target_both: + if ele in self.vpn_target_export: + self.vpn_target_export.remove(ele) + if ele in self.vpn_target_import: + self.vpn_target_import.remove(ele) + + if self.vpn_target_export and self.vpn_target_import: + vpn_target_export = copy.deepcopy(self.vpn_target_export) + for ele in vpn_target_export: + if ele in self.vpn_target_import: + self.vpn_target_both.append(ele) + self.vpn_target_import.remove(ele) + self.vpn_target_export.remove(ele) + + def check_vpn_target_para(self): + """Check whether VPN target value is valid""" + + if self.route_distinguisher: + if self.route_distinguisher.lower() != 'auto' and\ + not is_valid_value(self.route_distinguisher): + self.module.fail_json( + msg='Error: Route distinguisher has invalid value %s.' % self.route_distinguisher) + + if self.vpn_target_export: + for ele in self.vpn_target_export: + if ele.lower() != 'auto' and not is_valid_value(ele): + self.module.fail_json( + msg='Error: VPN target extended community attribute has invalid value %s.' % ele) + + if self.vpn_target_import: + for ele in self.vpn_target_import: + if ele.lower() != 'auto' and not is_valid_value(ele): + self.module.fail_json( + msg='Error: VPN target extended community attribute has invalid value %s.' % ele) + + if self.vpn_target_both: + for ele in self.vpn_target_both: + if ele.lower() != 'auto' and not is_valid_value(ele): + self.module.fail_json( + msg='Error: VPN target extended community attribute has invalid value %s.' % ele) + + def check_undo_params_if_exist(self): + """Check whether all undo parameters is existed""" + + if self.vpn_target_import: + for ele in self.vpn_target_import: + if ele not in self.evpn_info['vpn_target_import'] and ele not in self.evpn_info['vpn_target_both']: + self.module.fail_json( + msg='Error: VPN target import attribute value %s does not exist.' % ele) + + if self.vpn_target_export: + for ele in self.vpn_target_export: + if ele not in self.evpn_info['vpn_target_export'] and ele not in self.evpn_info['vpn_target_both']: + self.module.fail_json( + msg='Error: VPN target export attribute value %s does not exist.' % ele) + + if self.vpn_target_both: + for ele in self.vpn_target_both: + if ele not in self.evpn_info['vpn_target_both']: + self.module.fail_json( + msg='Error: VPN target export and import attribute value %s does not exist.' % ele) + + def check_params(self): + """Check all input params""" + + # bridge_domain_id check + if self.bridge_domain_id: + if not self.bridge_domain_id.isdigit(): + self.module.fail_json( + msg='Error: The parameter of bridge domain id is invalid.') + if int(self.bridge_domain_id) > 16777215 or int(self.bridge_domain_id) < 1: + self.module.fail_json( + msg='Error: The bridge domain id must be an integer between 1 and 16777215.') + + if self.state == 'absent': + self.check_undo_params_if_exist() + + # check bd whether binding the vxlan vni + self.check_vni_bd() + self.check_vpn_target_para() + + if self.state == 'absent': + if self.route_distinguisher: + if not self.evpn_info['route_distinguisher']: + self.module.fail_json( + msg='Error: Route distinguisher has not been configured.') + else: + if self.route_distinguisher != self.evpn_info['route_distinguisher']: + self.module.fail_json( + msg='Error: Current route distinguisher value is %s.' % + self.evpn_info['route_distinguisher']) + + if self.state == 'present': + if self.route_distinguisher: + if self.evpn_info['route_distinguisher'] and\ + self.route_distinguisher != self.evpn_info['route_distinguisher']: + self.module.fail_json( + msg='Error: Route distinguisher has already been configured.') + + def check_vni_bd(self): + """Check whether vxlan vni is configured in BD view""" + + xml_str = CE_NC_GET_VNI_BD + xml_str = get_nc_config(self.module, xml_str) + if "" in xml_str or not re.findall(r'\S+\s+%s' % self.bridge_domain_id, xml_str): + self.module.fail_json( + msg='Error: The vxlan vni is not configured or the bridge domain id is invalid.') + + def work(self): + """Execute task""" + + self.get_evpn_instance_info() + self.process_input_params() + self.check_params() + self.get_existing() + self.get_proposed() + self.conf_exist = self.judge_if_config_exist() + + self.config_evnp_bd() + + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + bridge_domain_id=dict(required=True, type='str'), + evpn=dict(required=False, type='str', + default='enable', choices=['enable', 'disable']), + route_distinguisher=dict(required=False, type='str'), + vpn_target_both=dict(required=False, type='list'), + vpn_target_import=dict(required=False, type='list'), + vpn_target_export=dict(required=False, type='list'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + evpn_bd = EvpnBd(argument_spec) + evpn_bd.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_evpn_bgp.py b/plugins/modules/network/cloudengine/ce_evpn_bgp.py new file mode 100644 index 0000000000..4789a1594b --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_evpn_bgp.py @@ -0,0 +1,731 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_evpn_bgp +short_description: Manages BGP EVPN configuration on HUAWEI CloudEngine switches. +description: + - This module offers the ability to configure a BGP EVPN peer relationship on HUAWEI CloudEngine switches. +author: + - Li Yanfeng (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + bgp_instance: + description: + - Name of a BGP instance. The value is a string of 1 to 31 case-sensitive characters, spaces not supported. + required: True + as_number: + description: + - Specifies integral AS number. The value is an integer ranging from 1 to 4294967295. + peer_address: + description: + - Specifies the IPv4 address of a BGP EVPN peer. The value is in dotted decimal notation. + peer_group_name: + description: + - Specify the name of a peer group that BGP peers need to join. + The value is a string of 1 to 47 case-sensitive characters, spaces not supported. + peer_enable: + description: + - Enable or disable a BGP device to exchange routes with a specified peer or peer group in the address + family view. + choices: ['true','false'] + advertise_router_type: + description: + - Configures a device to advertise routes to its BGP EVPN peers. + choices: ['arp','irb'] + vpn_name: + description: + - Associates a specified VPN instance with the IPv4 address family. + The value is a string of 1 to 31 case-sensitive characters, spaces not supported. + advertise_l2vpn_evpn: + description: + - Enable or disable a device to advertise IP routes imported to a VPN instance to its EVPN instance. + choices: ['enable','disable'] + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- name: evpn bgp module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Enable peer address. + ce_evpn_bgp: + bgp_instance: 100 + peer_address: 1.1.1.1 + as_number: 100 + peer_enable: true + provider: "{{ cli }}" + + - name: Enable peer group arp. + ce_evpn_bgp: + bgp_instance: 100 + peer_group_name: aaa + advertise_router_type: arp + provider: "{{ cli }}" + + - name: Enable advertise l2vpn evpn. + ce_evpn_bgp: + bgp_instance: 100 + vpn_name: aaa + advertise_l2vpn_evpn: enable + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"advertise_router_type": "arp", "bgp_instance": "100", "peer_group_name": "aaa", "state": "present"} +existing: + description: k/v pairs of existing rollback + returned: always + type: dict + sample: {"bgp_instance": "100", "peer_group_advertise_type": []} + +updates: + description: command sent to the device + returned: always + type: list + sample: ["peer 1.1.1.1 enable", + "peer aaa advertise arp"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"advertise_l2vpn_evpn": "enable", "bgp_instance": "100", "vpn_name": "aaa"} +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + + +def is_config_exist(cmp_cfg, test_cfg): + """check configuration is exist""" + + if not cmp_cfg or not test_cfg: + return False + + return bool(test_cfg in cmp_cfg) + + +def is_valid_address(address): + """check ip-address is valid""" + + if address.find('.') != -1: + addr_list = address.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +def is_valid_as_number(as_number): + """check as-number is valid""" + + if as_number.isdigit(): + if int(as_number) > 4294967295 or int(as_number) < 1: + return False + return True + else: + if as_number.find('.') != -1: + number_list = as_number.split('.') + if len(number_list) != 2: + return False + if number_list[1] == 0: + return False + for each_num in number_list: + if not each_num.isdigit(): + return False + if int(each_num) > 65535: + return False + return True + + return False + + +class EvpnBgp(object): + """ + Manages evpn bgp configuration. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.netconf = None + self.init_module() + + # module input info + self.as_number = self.module.params['as_number'] + self.bgp_instance = self.module.params['bgp_instance'] + self.peer_address = self.module.params['peer_address'] + self.peer_group_name = self.module.params['peer_group_name'] + self.peer_enable = self.module.params['peer_enable'] + self.advertise_router_type = self.module.params[ + 'advertise_router_type'] + self.vpn_name = self.module.params['vpn_name'] + self.advertise_l2vpn_evpn = self.module.params['advertise_l2vpn_evpn'] + self.state = self.module.params['state'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.config = "" + self.config_list = list() + self.l2vpn_evpn_exist = False + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + def init_module(self): + """ init module """ + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def get_evpn_overlay_config(self): + """get evpn-overlay enable configuration""" + + cmd = "display current-configuration | include ^evpn-overlay enable" + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + return out + + def get_current_config(self): + """get current configuration""" + + cmd = "display current-configuration | section include bgp %s" % self.bgp_instance + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + return out + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) # show updates result + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def check_params(self): + """Check all input params""" + + # as_number check + if not self.bgp_instance: + self.module.fail_json( + msg='Error: The bgp_instance can not be none.') + if not self.peer_enable and not self.advertise_router_type and not self.advertise_l2vpn_evpn: + self.module.fail_json( + msg='Error: The peer_enable, advertise_router_type, advertise_l2vpn_evpn ' + 'can not be none at the same time.') + if self.as_number: + if not is_valid_as_number(self.as_number): + self.module.fail_json( + msg='Error: The parameter of as_number %s is invalid.' % self.as_number) + # bgp_instance check + if self.bgp_instance: + if not is_valid_as_number(self.bgp_instance): + self.module.fail_json( + msg='Error: The parameter of bgp_instance %s is invalid.' % self.bgp_instance) + + # peer_address check + if self.peer_address: + if not is_valid_address(self.peer_address): + self.module.fail_json( + msg='Error: The %s is not a valid ip address.' % self.peer_address) + + # peer_group_name check + if self.peer_group_name: + if len(self.peer_group_name) > 47 \ + or len(self.peer_group_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: peer group name is not in the range from 1 to 47.') + + # vpn_name check + if self.vpn_name: + if len(self.vpn_name) > 31 \ + or len(self.vpn_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: peer group name is not in the range from 1 to 31.') + + def get_proposed(self): + """get proposed info""" + + if self.as_number: + self.proposed["as_number"] = self.as_number + if self.bgp_instance: + self.proposed["bgp_instance"] = self.bgp_instance + if self.peer_address: + self.proposed["peer_address"] = self.peer_address + if self.peer_group_name: + self.proposed["peer_group_name"] = self.peer_group_name + if self.peer_enable: + self.proposed["peer_enable"] = self.peer_enable + if self.advertise_router_type: + self.proposed["advertise_router_type"] = self.advertise_router_type + if self.vpn_name: + self.proposed["vpn_name"] = self.vpn_name + if self.advertise_l2vpn_evpn: + self.proposed["advertise_l2vpn_evpn"] = self.advertise_l2vpn_evpn + if not self.peer_enable or not self.advertise_l2vpn_evpn: + if self.state: + self.proposed["state"] = self.state + + def get_peers_enable(self): + """get evpn peer address enable list""" + + if len(self.config_list) != 2: + return None + self.config_list = self.config.split('l2vpn-family evpn') + get = re.findall( + r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s?as-number\s?(\S*)", self.config_list[0]) + if not get: + return None + else: + peers = list() + for item in get: + cmd = "peer %s enable" % item[0] + exist = is_config_exist(self.config_list[1], cmd) + if exist: + peers.append( + dict(peer_address=item[0], as_number=item[1], peer_enable='true')) + else: + peers.append(dict(peer_address=item[0], as_number=item[1], peer_enable='false')) + return peers + + def get_peers_advertise_type(self): + """get evpn peer address advertise type list""" + + if len(self.config_list) != 2: + return None + self.config_list = self.config.split('l2vpn-family evpn') + get = re.findall( + r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s?as-number\s?(\S*)", self.config_list[0]) + if not get: + return None + else: + peers = list() + for item in get: + cmd = "peer %s advertise arp" % item[0] + exist1 = is_config_exist(self.config_list[1], cmd) + cmd = "peer %s advertise irb" % item[0] + exist2 = is_config_exist(self.config_list[1], cmd) + if exist1: + peers.append(dict(peer_address=item[0], as_number=item[1], advertise_type='arp')) + if exist2: + peers.append(dict(peer_address=item[0], as_number=item[1], advertise_type='irb')) + return peers + + def get_peers_group_enable(self): + """get evpn peer group name enable list""" + + if len(self.config_list) != 2: + return None + self.config_list = self.config.split('l2vpn-family evpn') + get1 = re.findall( + r"group (\S+) external", self.config_list[0]) + + get2 = re.findall( + r"group (\S+) internal", self.config_list[0]) + + if not get1 and not get2: + return None + else: + peer_groups = list() + for item in get1: + cmd = "peer %s enable" % item + exist = is_config_exist(self.config_list[1], cmd) + if exist: + peer_groups.append( + dict(peer_group_name=item, peer_enable='true')) + else: + peer_groups.append( + dict(peer_group_name=item, peer_enable='false')) + + for item in get2: + cmd = "peer %s enable" % item + exist = is_config_exist(self.config_list[1], cmd) + if exist: + peer_groups.append( + dict(peer_group_name=item, peer_enable='true')) + else: + peer_groups.append( + dict(peer_group_name=item, peer_enable='false')) + return peer_groups + + def get_peer_groups_advertise_type(self): + """get evpn peer group name advertise type list""" + + if len(self.config_list) != 2: + return None + self.config_list = self.config.split('l2vpn-family evpn') + get1 = re.findall( + r"group (\S+) external", self.config_list[0]) + + get2 = re.findall( + r"group (\S+) internal", self.config_list[0]) + if not get1 and not get2: + return None + else: + peer_groups = list() + for item in get1: + cmd = "peer %s advertise arp" % item + exist1 = is_config_exist(self.config_list[1], cmd) + cmd = "peer %s advertise irb" % item + exist2 = is_config_exist(self.config_list[1], cmd) + if exist1: + peer_groups.append( + dict(peer_group_name=item, advertise_type='arp')) + if exist2: + peer_groups.append( + dict(peer_group_name=item, advertise_type='irb')) + + for item in get2: + cmd = "peer %s advertise arp" % item + exist1 = is_config_exist(self.config_list[1], cmd) + cmd = "peer %s advertise irb" % item + exist2 = is_config_exist(self.config_list[1], cmd) + if exist1: + peer_groups.append( + dict(peer_group_name=item, advertise_type='arp')) + if exist2: + peer_groups.append( + dict(peer_group_name=item, advertise_type='irb')) + return peer_groups + + def get_existing(self): + """get existing info""" + + if not self.config: + return + if self.bgp_instance: + self.existing["bgp_instance"] = self.bgp_instance + + if self.peer_address and self.peer_enable: + if self.l2vpn_evpn_exist: + self.existing["peer_address_enable"] = self.get_peers_enable() + + if self.peer_group_name and self.peer_enable: + if self.l2vpn_evpn_exist: + self.existing[ + "peer_group_enable"] = self.get_peers_group_enable() + + if self.peer_address and self.advertise_router_type: + if self.l2vpn_evpn_exist: + self.existing[ + "peer_address_advertise_type"] = self.get_peers_advertise_type() + + if self.peer_group_name and self.advertise_router_type: + if self.l2vpn_evpn_exist: + self.existing[ + "peer_group_advertise_type"] = self.get_peer_groups_advertise_type() + + if self.advertise_l2vpn_evpn and self.vpn_name: + cmd = " ipv4-family vpn-instance %s" % self.vpn_name + exist = is_config_exist(self.config, cmd) + if exist: + self.existing["vpn_name"] = self.vpn_name + l2vpn_cmd = "advertise l2vpn evpn" + l2vpn_exist = is_config_exist(self.config, l2vpn_cmd) + if l2vpn_exist: + self.existing["advertise_l2vpn_evpn"] = 'enable' + else: + self.existing["advertise_l2vpn_evpn"] = 'disable' + + def get_end_state(self): + """get end state info""" + + self.config = self.get_current_config() + if not self.config: + return + + self.config_list = self.config.split('l2vpn-family evpn') + if len(self.config_list) == 2: + self.l2vpn_evpn_exist = True + + if self.bgp_instance: + self.end_state["bgp_instance"] = self.bgp_instance + + if self.peer_address and self.peer_enable: + if self.l2vpn_evpn_exist: + self.end_state["peer_address_enable"] = self.get_peers_enable() + + if self.peer_group_name and self.peer_enable: + if self.l2vpn_evpn_exist: + self.end_state[ + "peer_group_enable"] = self.get_peers_group_enable() + + if self.peer_address and self.advertise_router_type: + if self.l2vpn_evpn_exist: + self.end_state[ + "peer_address_advertise_type"] = self.get_peers_advertise_type() + + if self.peer_group_name and self.advertise_router_type: + if self.l2vpn_evpn_exist: + self.end_state[ + "peer_group_advertise_type"] = self.get_peer_groups_advertise_type() + + if self.advertise_l2vpn_evpn and self.vpn_name: + cmd = " ipv4-family vpn-instance %s" % self.vpn_name + exist = is_config_exist(self.config, cmd) + if exist: + self.end_state["vpn_name"] = self.vpn_name + l2vpn_cmd = "advertise l2vpn evpn" + l2vpn_exist = is_config_exist(self.config, l2vpn_cmd) + if l2vpn_exist: + self.end_state["advertise_l2vpn_evpn"] = 'enable' + else: + self.end_state["advertise_l2vpn_evpn"] = 'disable' + + def config_peer(self): + """configure evpn bgp peer command""" + + if self.as_number and self.peer_address: + cmd = "peer %s as-number %s" % (self.peer_address, self.as_number) + exist = is_config_exist(self.config, cmd) + if not exist: + self.module.fail_json( + msg='Error: The peer session %s does not exist or the peer already ' + 'exists in another as-number.' % self.peer_address) + cmd = "bgp %s" % self.bgp_instance + self.cli_add_command(cmd) + cmd = "l2vpn-family evpn" + self.cli_add_command(cmd) + exist_l2vpn = is_config_exist(self.config, cmd) + if self.peer_enable: + cmd = "peer %s enable" % self.peer_address + if exist_l2vpn: + exist = is_config_exist(self.config_list[1], cmd) + if self.peer_enable == "true" and not exist: + self.cli_add_command(cmd) + self.changed = True + elif self.peer_enable == "false" and exist: + self.cli_add_command(cmd, undo=True) + self.changed = True + else: + self.cli_add_command(cmd) + self.changed = True + + if self.advertise_router_type: + cmd = "peer %s advertise %s" % ( + self.peer_address, self.advertise_router_type) + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + self.cli_add_command(cmd) + self.changed = True + elif self.state == "absent" and exist: + self.cli_add_command(cmd, undo=True) + self.changed = True + elif self.peer_group_name: + cmd_1 = "group %s external" % self.peer_group_name + exist_1 = is_config_exist(self.config, cmd_1) + cmd_2 = "group %s internal" % self.peer_group_name + exist_2 = is_config_exist(self.config, cmd_2) + exist = False + if exist_1: + exist = True + if exist_2: + exist = True + if not exist: + self.module.fail_json( + msg='Error: The peer-group %s does not exist.' % self.peer_group_name) + cmd = "bgp %s" % self.bgp_instance + self.cli_add_command(cmd) + cmd = "l2vpn-family evpn" + self.cli_add_command(cmd) + exist_l2vpn = is_config_exist(self.config, cmd) + if self.peer_enable: + cmd = "peer %s enable" % self.peer_group_name + if exist_l2vpn: + exist = is_config_exist(self.config_list[1], cmd) + if self.peer_enable == "true" and not exist: + self.cli_add_command(cmd) + self.changed = True + elif self.peer_enable == "false" and exist: + self.cli_add_command(cmd, undo=True) + self.changed = True + else: + self.cli_add_command(cmd) + self.changed = True + + if self.advertise_router_type: + cmd = "peer %s advertise %s" % ( + self.peer_group_name, self.advertise_router_type) + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + self.cli_add_command(cmd) + self.changed = True + elif self.state == "absent" and exist: + self.cli_add_command(cmd, undo=True) + self.changed = True + + def config_advertise_l2vpn_evpn(self): + """configure advertise l2vpn evpn""" + + cmd = "ipv4-family vpn-instance %s" % self.vpn_name + exist = is_config_exist(self.config, cmd) + if not exist: + self.module.fail_json( + msg='Error: The VPN instance name %s does not exist.' % self.vpn_name) + config_vpn_list = self.config.split(cmd) + cmd = "ipv4-family vpn-instance" + exist_vpn = is_config_exist(config_vpn_list[1], cmd) + cmd_l2vpn = "advertise l2vpn evpn" + if exist_vpn: + config_vpn = config_vpn_list[1].split('ipv4-family vpn-instance') + exist_l2vpn = is_config_exist(config_vpn[0], cmd_l2vpn) + else: + exist_l2vpn = is_config_exist(config_vpn_list[1], cmd_l2vpn) + cmd = "advertise l2vpn evpn" + if self.advertise_l2vpn_evpn == "enable" and not exist_l2vpn: + cmd = "bgp %s" % self.bgp_instance + self.cli_add_command(cmd) + cmd = "ipv4-family vpn-instance %s" % self.vpn_name + self.cli_add_command(cmd) + cmd = "advertise l2vpn evpn" + self.cli_add_command(cmd) + self.changed = True + elif self.advertise_l2vpn_evpn == "disable" and exist_l2vpn: + cmd = "bgp %s" % self.bgp_instance + self.cli_add_command(cmd) + cmd = "ipv4-family vpn-instance %s" % self.vpn_name + self.cli_add_command(cmd) + cmd = "advertise l2vpn evpn" + self.cli_add_command(cmd, undo=True) + self.changed = True + + def work(self): + """worker""" + + self.check_params() + evpn_config = self.get_evpn_overlay_config() + if not evpn_config: + self.module.fail_json( + msg="Error: evpn-overlay enable is not configured.") + self.config = self.get_current_config() + if not self.config: + self.module.fail_json( + msg="Error: Bgp instance %s does not exist." % self.bgp_instance) + + self.config_list = self.config.split('l2vpn-family evpn') + if len(self.config_list) == 2: + self.l2vpn_evpn_exist = True + self.get_existing() + self.get_proposed() + + if self.peer_enable or self.advertise_router_type: + self.config_peer() + + if self.advertise_l2vpn_evpn: + self.config_advertise_l2vpn_evpn() + if self.commands: + self.cli_load_config(self.commands) + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + bgp_instance=dict(required=True, type='str'), + as_number=dict(required=False, type='str'), + peer_address=dict(required=False, type='str'), + peer_group_name=dict(required=False, type='str'), + peer_enable=dict(required=False, type='str', choices=[ + 'true', 'false']), + advertise_router_type=dict(required=False, type='str', choices=[ + 'arp', 'irb']), + + vpn_name=dict(required=False, type='str'), + advertise_l2vpn_evpn=dict(required=False, type='str', choices=[ + 'enable', 'disable']), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = EvpnBgp(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_evpn_bgp_rr.py b/plugins/modules/network/cloudengine/ce_evpn_bgp_rr.py new file mode 100644 index 0000000000..2e86d0f4e8 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_evpn_bgp_rr.py @@ -0,0 +1,531 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_evpn_bgp_rr +short_description: Manages RR for the VXLAN Network on HUAWEI CloudEngine switches. +description: + - Configure an RR in BGP-EVPN address family view on HUAWEI CloudEngine switches. +author: Zhijin Zhou (@QijunPan) +notes: + - Ensure that BGP view is existed. + - The peer, peer_type, and reflect_client arguments must all exist or not exist. + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + as_number: + description: + - Specifies the number of the AS, in integer format. + The value is an integer that ranges from 1 to 4294967295. + required: true + bgp_instance: + description: + - Specifies the name of a BGP instance. + The value of instance-name can be an integer 1 or a string of 1 to 31. + bgp_evpn_enable: + description: + - Enable or disable the BGP-EVPN address family. + choices: ['enable','disable'] + default: 'enable' + peer_type: + description: + - Specify the peer type. + choices: ['group_name','ipv4_address'] + peer: + description: + - Specifies the IPv4 address or the group name of a peer. + reflect_client: + description: + - Configure the local device as the route reflector and the peer or peer group as the client of the route reflector. + choices: ['enable','disable'] + policy_vpn_target: + description: + - Enable or disable the VPN-Target filtering. + choices: ['enable','disable'] +''' + +EXAMPLES = ''' +- name: BGP RR test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Configure BGP-EVPN address family view and ensure that BGP view has existed." + ce_evpn_bgp_rr: + as_number: 20 + bgp_evpn_enable: enable + provider: "{{ cli }}" + + - name: "Configure reflect client and ensure peer has existed." + ce_evpn_bgp_rr: + as_number: 20 + peer_type: ipv4_address + peer: 192.8.3.3 + reflect_client: enable + provider: "{{ cli }}" + + - name: "Configure the VPN-Target filtering." + ce_evpn_bgp_rr: + as_number: 20 + policy_vpn_target: enable + provider: "{{ cli }}" + + - name: "Configure an RR in BGP-EVPN address family view." + ce_evpn_bgp_rr: + as_number: 20 + bgp_evpn_enable: enable + peer_type: ipv4_address + peer: 192.8.3.3 + reflect_client: enable + policy_vpn_target: disable + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "as_number": "20", + "bgp_evpn_enable": "enable", + "bgp_instance": null, + "peer": "192.8.3.3", + "peer_type": "ipv4_address", + "policy_vpn_target": "disable", + "reflect_client": "enable" + } +existing: + description: k/v pairs of existing attributes on the device + returned: always + type: dict + sample: { + "as_number": "20", + "bgp_evpn_enable": "disable", + "bgp_instance": null, + "peer": null, + "peer_type": null, + "policy_vpn_target": "disable", + "reflect_client": "disable" + } +end_state: + description: k/v pairs of end attributes on the device + returned: always + type: dict + sample: { + "as_number": "20", + "bgp_evpn_enable": "enable", + "bgp_instance": null, + "peer": "192.8.3.3", + "peer_type": "ipv4_address", + "policy_vpn_target": "disable", + "reflect_client": "enable" + } +updates: + description: command list sent to the device + returned: always + type: list + sample: [ + "bgp 20", + " l2vpn-family evpn", + " peer 192.8.3.3 enable", + " peer 192.8.3.3 reflect-client", + " undo policy vpn-target" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config, ce_argument_spec + + +def is_config_exist(cmp_cfg, test_cfg): + """is configuration exist""" + + if not cmp_cfg or not test_cfg: + return False + + return bool(test_cfg in cmp_cfg) + + +class EvpnBgpRr(object): + """Manage RR in BGP-EVPN address family view""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # RR configuration parameters + self.as_number = self.module.params['as_number'] + self.bgp_instance = self.module.params['bgp_instance'] + self.peer_type = self.module.params['peer_type'] + self.peer = self.module.params['peer'] + self.bgp_evpn_enable = self.module.params['bgp_evpn_enable'] + self.reflect_client = self.module.params['reflect_client'] + self.policy_vpn_target = self.module.params['policy_vpn_target'] + + self.commands = list() + self.config = None + self.bgp_evpn_config = "" + self.cur_config = dict() + self.conf_exist = False + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """Init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def cli_load_config(self, commands): + """Load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def is_bgp_view_exist(self): + """Judge whether BGP view has existed""" + + if self.bgp_instance: + view_cmd = "bgp %s instance %s" % ( + self.as_number, self.bgp_instance) + else: + view_cmd = "bgp %s" % self.as_number + + return is_config_exist(self.config, view_cmd) + + def is_l2vpn_family_evpn_exist(self): + """Judge whether BGP-EVPN address family view has existed""" + + view_cmd = "l2vpn-family evpn" + return is_config_exist(self.config, view_cmd) + + def is_reflect_client_exist(self): + """Judge whether reflect client is configured""" + + view_cmd = "peer %s reflect-client" % self.peer + return is_config_exist(self.bgp_evpn_config, view_cmd) + + def is_policy_vpn_target_exist(self): + """Judge whether the VPN-Target filtering is enabled""" + + view_cmd = "undo policy vpn-target" + if is_config_exist(self.bgp_evpn_config, view_cmd): + return False + else: + return True + + def get_config_in_bgp_view(self): + """Get configuration in BGP view""" + + cmd = "display current-configuration | section include" + if self.as_number: + if self.bgp_instance: + cmd += " bgp %s instance %s" % (self.as_number, + self.bgp_instance) + else: + cmd += " bgp %s" % self.as_number + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + config = out.strip() if out else "" + if cmd == config: + return '' + + return config + + def get_config_in_bgp_evpn_view(self): + """Get configuration in BGP_EVPN view""" + + self.bgp_evpn_config = "" + if not self.config: + return "" + + index = self.config.find("l2vpn-family evpn") + if index == -1: + return "" + + return self.config[index:] + + def get_current_config(self): + """Get current configuration""" + + if not self.as_number: + self.module.fail_json(msg='Error: The value of as-number cannot be empty.') + + self.cur_config['bgp_exist'] = False + self.cur_config['bgp_evpn_enable'] = 'disable' + self.cur_config['reflect_client'] = 'disable' + self.cur_config['policy_vpn_target'] = 'disable' + self.cur_config['peer_type'] = None + self.cur_config['peer'] = None + + self.config = self.get_config_in_bgp_view() + + if not self.is_bgp_view_exist(): + return + self.cur_config['bgp_exist'] = True + + if not self.is_l2vpn_family_evpn_exist(): + return + self.cur_config['bgp_evpn_enable'] = 'enable' + + self.bgp_evpn_config = self.get_config_in_bgp_evpn_view() + if self.is_reflect_client_exist(): + self.cur_config['reflect_client'] = 'enable' + self.cur_config['peer_type'] = self.peer_type + self.cur_config['peer'] = self.peer + + if self.is_policy_vpn_target_exist(): + self.cur_config['policy_vpn_target'] = 'enable' + + def get_existing(self): + """Get existing config""" + + self.existing = dict(as_number=self.as_number, + bgp_instance=self.bgp_instance, + peer_type=self.cur_config['peer_type'], + peer=self.cur_config['peer'], + bgp_evpn_enable=self.cur_config[ + 'bgp_evpn_enable'], + reflect_client=self.cur_config['reflect_client'], + policy_vpn_target=self.cur_config[ + 'policy_vpn_target']) + + def get_proposed(self): + """Get proposed config""" + + self.proposed = dict(as_number=self.as_number, + bgp_instance=self.bgp_instance, + peer_type=self.peer_type, + peer=self.peer, + bgp_evpn_enable=self.bgp_evpn_enable, + reflect_client=self.reflect_client, + policy_vpn_target=self.policy_vpn_target) + + def get_end_state(self): + """Get end config""" + + self.get_current_config() + self.end_state = dict(as_number=self.as_number, + bgp_instance=self.bgp_instance, + peer_type=self.cur_config['peer_type'], + peer=self.cur_config['peer'], + bgp_evpn_enable=self.cur_config[ + 'bgp_evpn_enable'], + reflect_client=self.cur_config['reflect_client'], + policy_vpn_target=self.cur_config['policy_vpn_target']) + if self.end_state == self.existing: + self.changed = False + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def judge_if_config_exist(self): + """Judge whether configuration has existed""" + + if self.bgp_evpn_enable and self.bgp_evpn_enable != self.cur_config['bgp_evpn_enable']: + return False + + if self.bgp_evpn_enable == 'disable' and self.cur_config['bgp_evpn_enable'] == 'disable': + return True + + if self.reflect_client and self.reflect_client == 'enable': + if self.peer_type and self.peer_type != self.cur_config['peer_type']: + return False + if self.peer and self.peer != self.cur_config['peer']: + return False + if self.reflect_client and self.reflect_client != self.cur_config['reflect_client']: + return False + + if self.policy_vpn_target and self.policy_vpn_target != self.cur_config['policy_vpn_target']: + return False + + return True + + def cli_add_command(self, command, undo=False): + """Add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) # show updates result + + def config_rr(self): + """Configure RR""" + + if self.conf_exist: + return + + if self.bgp_instance: + view_cmd = "bgp %s instance %s" % ( + self.as_number, self.bgp_instance) + else: + view_cmd = "bgp %s" % self.as_number + self.cli_add_command(view_cmd) + + if self.bgp_evpn_enable == 'disable': + self.cli_add_command("undo l2vpn-family evpn") + else: + self.cli_add_command("l2vpn-family evpn") + if self.reflect_client and self.reflect_client != self.cur_config['reflect_client']: + if self.reflect_client == 'enable': + self.cli_add_command("peer %s enable" % self.peer) + self.cli_add_command( + "peer %s reflect-client" % self.peer) + else: + self.cli_add_command( + "undo peer %s reflect-client" % self.peer) + self.cli_add_command("undo peer %s enable" % self.peer) + if self.cur_config['bgp_evpn_enable'] == 'enable': + if self.policy_vpn_target and self.policy_vpn_target != self.cur_config['policy_vpn_target']: + if self.policy_vpn_target == 'enable': + self.cli_add_command("policy vpn-target") + else: + self.cli_add_command("undo policy vpn-target") + else: + if self.policy_vpn_target and self.policy_vpn_target == 'disable': + self.cli_add_command("undo policy vpn-target") + + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + def check_is_ipv4_addr(self): + """Check ipaddress validate""" + + rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.' + rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])' + ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$') + + return bool(re.match(ipv4_regex, self.peer)) + + def check_params(self): + """Check all input params""" + + if self.cur_config['bgp_exist'] == 'false': + self.module.fail_json(msg="Error: BGP view does not exist.") + + if self.bgp_instance: + if len(self.bgp_instance) < 1 or len(self.bgp_instance) > 31: + self.module.fail_json( + msg="Error: The length of BGP instance-name must be between 1 or a string of 1 to and 31.") + + if self.as_number: + if len(self.as_number) > 11 or len(self.as_number) == 0: + self.module.fail_json( + msg='Error: The len of as_number %s is out of [1 - 11].' % self.as_number) + + tmp_dict1 = dict(peer_type=self.peer_type, + peer=self.peer, + reflect_client=self.reflect_client) + tmp_dict2 = dict((k, v) + for k, v in tmp_dict1.items() if v is not None) + if len(tmp_dict2) != 0 and len(tmp_dict2) != 3: + self.module.fail_json( + msg='Error: The peer, peer_type, and reflect_client arguments must all exist or not exist.') + + if self.peer_type: + if self.peer_type == 'ipv4_address' and not self.check_is_ipv4_addr(): + self.module.fail_json(msg='Error: Illegal IPv4 address.') + elif self.peer_type == 'group_name' and self.check_is_ipv4_addr(): + self.module.fail_json( + msg='Error: Ip address cannot be configured as group-name.') + + def work(self): + """Execute task""" + + self.get_current_config() + self.check_params() + self.get_existing() + self.get_proposed() + self.conf_exist = self.judge_if_config_exist() + + self.config_rr() + + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + as_number=dict(required=True, type='str'), + bgp_instance=dict(required=False, type='str'), + bgp_evpn_enable=dict(required=False, type='str', + default='enable', choices=['enable', 'disable']), + peer_type=dict(required=False, type='str', choices=[ + 'group_name', 'ipv4_address']), + peer=dict(required=False, type='str'), + reflect_client=dict(required=False, type='str', + choices=['enable', 'disable']), + policy_vpn_target=dict(required=False, choices=['enable', 'disable']), + ) + argument_spec.update(ce_argument_spec) + evpn_bgp_rr = EvpnBgpRr(argument_spec) + evpn_bgp_rr.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_evpn_global.py b/plugins/modules/network/cloudengine/ce_evpn_global.py new file mode 100644 index 0000000000..8f17b2139a --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_evpn_global.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_evpn_global +short_description: Manages global configuration of EVPN on HUAWEI CloudEngine switches. +description: + - Manages global configuration of EVPN on HUAWEI CloudEngine switches. +author: Zhijin Zhou (@QijunPan) +notes: + - Before configuring evpn_overlay_enable=disable, delete other EVPN configurations. + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + evpn_overlay_enable: + description: + - Configure EVPN as the VXLAN control plane. + required: true + choices: ['enable','disable'] +''' + +EXAMPLES = ''' +- name: evpn global module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configure EVPN as the VXLAN control plan + ce_evpn_global: + evpn_overlay_enable: enable + provider: "{{ cli }}" + + - name: Undo EVPN as the VXLAN control plan + ce_evpn_global: + evpn_overlay_enable: disable + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "evpn_overlay_enable": "enable" + } +existing: + description: k/v pairs of existing attributes on the device + returned: always + type: dict + sample: { + "evpn_overlay_enable": "disable" + } +end_state: + description: k/v pairs of end attributes on the interface + returned: always + type: dict + sample: { + "evpn_overlay_enable": "enable" + } +updates: + description: command list sent to the device + returned: always + type: list + sample: [ + "evpn-overlay enable", + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + + +class EvpnGlobal(object): + """Manage global configuration of EVPN""" + + def __init__(self, argument_spec, ): + self.spec = argument_spec + self.module = None + self.init_module() + + # EVPN global configuration parameters + self.overlay_enable = self.module.params['evpn_overlay_enable'] + + self.commands = list() + self.global_info = dict() + self.conf_exist = False + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init_module""" + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def cli_load_config(self, commands): + """load config by cli""" + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) # show updates result + + def get_evpn_global_info(self): + """ get current EVPN global configuration""" + + self.global_info['evpnOverLay'] = 'disable' + cmd = "display current-configuration | include ^evpn-overlay enable" + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + if out: + self.global_info['evpnOverLay'] = 'enable' + + def get_existing(self): + """get existing config""" + self.existing = dict( + evpn_overlay_enable=self.global_info['evpnOverLay']) + + def get_proposed(self): + """get proposed config""" + self.proposed = dict(evpn_overlay_enable=self.overlay_enable) + + def get_end_state(self): + """get end config""" + self.get_evpn_global_info() + self.end_state = dict( + evpn_overlay_enable=self.global_info['evpnOverLay']) + + def show_result(self): + """ show result""" + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def judge_if_config_exist(self): + """ judge whether configuration has existed""" + if self.overlay_enable == self.global_info['evpnOverLay']: + return True + + return False + + def config_evnp_global(self): + """ set global EVPN configuration""" + if not self.conf_exist: + if self.overlay_enable == 'enable': + self.cli_add_command('evpn-overlay enable') + else: + self.cli_add_command('evpn-overlay enable', True) + + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + def work(self): + """execute task""" + self.get_evpn_global_info() + self.get_existing() + self.get_proposed() + self.conf_exist = self.judge_if_config_exist() + + self.config_evnp_global() + + self.get_end_state() + self.show_result() + + +def main(): + """main function entry""" + + argument_spec = dict( + evpn_overlay_enable=dict( + required=True, type='str', choices=['enable', 'disable']), + ) + argument_spec.update(ce_argument_spec) + evpn_global = EvpnGlobal(argument_spec) + evpn_global.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_facts.py b/plugins/modules/network/cloudengine/ce_facts.py new file mode 100644 index 0000000000..004bbce294 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_facts.py @@ -0,0 +1,418 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_facts +author: "wangdezhuang (@QijunPan)" +short_description: Gets facts about HUAWEI CloudEngine switches. +description: + - Collects facts from CloudEngine devices running the CloudEngine + operating system. Fact collection is supported over Cli + transport. This module prepends all of the base network fact keys + with C(ansible_net_). The facts module will always collect a + base set of facts from the device and can enable or disable + collection of additional facts. +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a + list of values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. + +- name: CloudEngine facts test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Gather_subset is all" + ce_facts: + gather_subset: all + provider: "{{ cli }}" + + - name: "Collect only the config facts" + ce_facts: + gather_subset: config + provider: "{{ cli }}" + + - name: "Do not collect hardware facts" + ce_facts: + gather_subset: "!hardware" + provider: "{{ cli }}" +""" + +RETURN = """ +gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +BIOS Version: + description: The BIOS version running on the remote device + returned: always + type: str +Board Type: + description: The board type of the remote device + returned: always + type: str +CPLD1 Version: + description: The CPLD1 Version running the remote device + returned: always + type: str +CPLD2 Version: + description: The CPLD2 Version running the remote device + returned: always + type: str +MAB Version: + description: The MAB Version running the remote device + returned: always + type: str +PCB Version: + description: The PCB Version running the remote device + returned: always + type: str +hostname: + description: The hostname of the remote device + returned: always + type: str + +# hardware +FAN: + description: The fan state on the device + returned: when hardware is configured + type: str +PWR: + description: The power state on the device + returned: when hardware is configured + type: str +filesystems: + description: The filesystems on the device + returned: when hardware is configured + type: str +flash_free: + description: The flash free space on the device + returned: when hardware is configured + type: str +flash_total: + description: The flash total space on the device + returned: when hardware is configured + type: str +memory_free: + description: The memory free space on the remote device + returned: when hardware is configured + type: str +memory_total: + description: The memory total space on the remote device + returned: when hardware is configured + type: str + +# config +config: + description: The current system configuration on the device + returned: when config is configured + type: str + +# interfaces +all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" + +import re + +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import run_commands +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, check_args +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = frozenset() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, list(self.COMMANDS)) + + +class Default(FactsBase): + """ Class default """ + + COMMANDS = [ + 'display version', + 'display current-configuration | include sysname' + ] + + def populate(self): + """ Populate method """ + + super(Default, self).populate() + + data = self.responses[0] + if data: + version = data.split("\n") + for item in version: + if re.findall(r"^\d+\S\s+", item.strip()): + tmp_item = item.split() + tmp_key = tmp_item[1] + " " + tmp_item[2] + if len(tmp_item) > 5: + self.facts[tmp_key] = " ".join(tmp_item[4:]) + else: + self.facts[tmp_key] = tmp_item[4] + + data = self.responses[1] + if data: + tmp_value = re.findall(r'sysname (.*)', data) + self.facts['hostname'] = tmp_value[0] + + +class Config(FactsBase): + """ Class config """ + + COMMANDS = [ + 'display current-configuration configuration system' + ] + + def populate(self): + """ Populate method """ + + super(Config, self).populate() + + data = self.responses[0] + if data: + self.facts['config'] = data.split("\n") + + +class Hardware(FactsBase): + """ Class hardware """ + + COMMANDS = [ + 'dir', + 'display memory', + 'display device' + ] + + def populate(self): + """ Populate method """ + + super(Hardware, self).populate() + + data = self.responses[0] + if data: + self.facts['filesystems'] = re.findall(r'Directory of (.*)/', data)[0] + self.facts['flash_total'] = re.findall(r'(.*) total', data)[0].replace(",", "") + self.facts['flash_free'] = re.findall(r'total \((.*) free\)', data)[0].replace(",", "") + + data = self.responses[1] + if data: + memory_total = re.findall(r'Total Memory Used: (.*) Kbytes', data)[0] + use_percent = re.findall(r'Memory Using Percentage: (.*)%', data)[0] + memory_free = str(int(memory_total) - int(memory_total) * int(use_percent) / 100) + self.facts['memory_total'] = memory_total + " Kb" + self.facts['memory_free'] = memory_free + " Kb" + + data = self.responses[2] + if data: + device_info = data.split("\n") + tmp_device_info = device_info[4:-1] + for item in tmp_device_info: + tmp_item = item.split() + if len(tmp_item) == 8: + self.facts[tmp_item[2]] = tmp_item[6] + elif len(tmp_item) == 7: + self.facts[tmp_item[0]] = tmp_item[5] + + +class Interfaces(FactsBase): + """ Class interfaces """ + + COMMANDS = [ + 'display interface brief', + 'display ip interface brief', + 'display lldp neighbor brief' + ] + + def populate(self): + """ Populate method""" + + interface_dict = dict() + ipv4_addr_dict = dict() + neighbors_dict = dict() + + super(Interfaces, self).populate() + + data = self.responses[0] + begin = False + if data: + interface_info = data.split("\n") + for item in interface_info: + if begin: + tmp_item = item.split() + interface_dict[tmp_item[0]] = tmp_item[1] + + if re.findall(r"^Interface", item.strip()): + begin = True + + self.facts['interfaces'] = interface_dict + + data = self.responses[1] + if data: + ipv4_addr = data.split("\n") + tmp_ipv4 = ipv4_addr[11:] + for item in tmp_ipv4: + tmp_item = item.split() + ipv4_addr_dict[tmp_item[0]] = tmp_item[1] + self.facts['all_ipv4_addresses'] = ipv4_addr_dict + + data = self.responses[2] + if data: + neighbors = data.split("\n") + tmp_neighbors = neighbors[2:] + for item in tmp_neighbors: + tmp_item = item.split() + if len(tmp_item) > 3: + neighbors_dict[tmp_item[0]] = tmp_item[3] + else: + neighbors_dict[tmp_item[0]] = None + self.facts['neighbors'] = neighbors_dict + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """ Module main """ + + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + spec.update(ce_argument_spec) + + module = AnsibleModule(argument_spec=spec, supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + # this is to maintain capability with nxos_facts 2.1 + if key.startswith('_'): + ansible_facts[key[1:]] = value + else: + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_file_copy.py b/plugins/modules/network/cloudengine/ce_file_copy.py new file mode 100644 index 0000000000..70e1ae74c0 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_file_copy.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_file_copy +short_description: Copy a file to a remote cloudengine device over SCP on HUAWEI CloudEngine switches. +description: + - Copy a file to a remote cloudengine device over SCP on HUAWEI CloudEngine switches. +author: + - Zhou Zhijin (@QijunPan) +notes: + - The feature must be enabled with feature scp-server. + - If the file is already present, no transfer will take place. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +requirements: + - paramiko +options: + local_file: + description: + - Path to local file. Local directory must exist. + The maximum length of I(local_file) is C(4096). + required: true + remote_file: + description: + - Remote file path of the copy. Remote directories must exist. + If omitted, the name of the local file will be used. + The maximum length of I(remote_file) is C(4096). + file_system: + description: + - The remote file system of the device. If omitted, + devices that support a I(file_system) parameter will use + their default values. + File system indicates the storage medium and can be set to as follows, + 1) C(flash) is root directory of the flash memory on the master MPU. + 2) C(slave#flash) is root directory of the flash memory on the slave MPU. + If no slave MPU exists, this drive is unavailable. + 3) C(chassis ID/slot number#flash) is root directory of the flash memory on + a device in a stack. For example, C(1/5#flash) indicates the flash memory + whose chassis ID is 1 and slot number is 5. + default: 'flash:' +''' + +EXAMPLES = ''' +- name: File copy test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Copy a local file to remote device" + ce_file_copy: + local_file: /usr/vrpcfg.cfg + remote_file: /vrpcfg.cfg + file_system: 'flash:' + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +transfer_result: + description: information about transfer result. + returned: always + type: str + sample: 'The local file has been successfully transferred to the device.' +local_file: + description: The path of the local file. + returned: always + type: str + sample: '/usr/work/vrpcfg.zip' +remote_file: + description: The path of the remote file. + returned: always + type: str + sample: '/vrpcfg.zip' +''' + +import re +import os +import sys +import time +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config +from ansible.module_utils.connection import ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_v6_address + +try: + import paramiko + HAS_PARAMIKO = True +except ImportError: + HAS_PARAMIKO = False + +try: + from scp import SCPClient + HAS_SCP = True +except ImportError: + HAS_SCP = False + +CE_NC_GET_DISK_INFO = """ + + + + + + + + + + + + +""" + +CE_NC_GET_FILE_INFO = """ + + + + + %s + %s + + + + + +""" + +CE_NC_GET_SCP_ENABLE = """ + + + + +""" + + +def get_cli_exception(exc=None): + """Get cli exception message""" + + msg = list() + if not exc: + exc = sys.exc_info[1] + if exc: + errs = str(exc).split("\r\n") + for err in errs: + if not err: + continue + if "matched error in response:" in err: + continue + if " at '^' position" in err: + err = err.replace(" at '^' position", "") + if err.replace(" ", "") == "^": + continue + if len(err) > 2 and err[0] in ["<", "["] and err[-1] in [">", "]"]: + continue + if err[-1] == ".": + err = err[:-1] + if err.replace(" ", "") == "": + continue + msg.append(err) + else: + msg = ["Error: Fail to get cli exception message."] + + while msg[-1][-1] == ' ': + msg[-1] = msg[-1][:-1] + + if msg[-1][-1] != ".": + msg[-1] += "." + + return ", ".join(msg).capitalize() + + +class FileCopy(object): + """File copy function class""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # file copy parameters + self.local_file = self.module.params['local_file'] + self.remote_file = self.module.params['remote_file'] + self.file_system = self.module.params['file_system'] + self.host_is_ipv6 = validate_ip_v6_address(self.module.params['provider']['host']) + + # state + self.transfer_result = None + self.changed = False + + def init_module(self): + """Init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def remote_file_exists(self, dst, file_system='flash:'): + """Remote file whether exists""" + + full_path = file_system + dst + file_name = os.path.basename(full_path) + file_path = os.path.dirname(full_path) + file_path = file_path + '/' + xml_str = CE_NC_GET_FILE_INFO % (file_name, file_path) + ret_xml = get_nc_config(self.module, xml_str) + if "" in ret_xml: + return False, 0 + + xml_str = ret_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get file info + root = ElementTree.fromstring(xml_str) + topo = root.find("vfm/dirs/dir") + if topo is None: + return False, 0 + + for eles in topo: + if eles.tag in ["DirSize"]: + return True, int(eles.text.replace(',', '')) + + return False, 0 + + def local_file_exists(self): + """Local file whether exists""" + + return os.path.isfile(self.local_file) + + def enough_space(self): + """Whether device has enough space""" + + xml_str = CE_NC_GET_DISK_INFO + ret_xml = get_nc_config(self.module, xml_str) + if "" in ret_xml: + return + + xml_str = ret_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + topo = root.find("vfm/dfs/df/freeSize") + kbytes_free = topo.text + + file_size = os.path.getsize(self.local_file) + if int(kbytes_free) * 1024 > file_size: + return True + + return False + + def transfer_file(self, dest): + """Begin to transfer file by scp""" + + if not self.local_file_exists(): + self.module.fail_json( + msg='Could not transfer file. Local file doesn\'t exist.') + + if not self.enough_space(): + self.module.fail_json( + msg='Could not transfer file. Not enough space on device.') + + hostname = self.module.params['provider']['host'] + username = self.module.params['provider']['username'] + password = self.module.params['provider']['password'] + port = self.module.params['provider']['port'] + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(hostname=hostname, username=username, password=password, port=port) + full_remote_path = '{0}{1}'.format(self.file_system, dest) + scp = SCPClient(ssh.get_transport()) + try: + scp.put(self.local_file, full_remote_path) + except Exception: + time.sleep(10) + file_exists, temp_size = self.remote_file_exists( + dest, self.file_system) + file_size = os.path.getsize(self.local_file) + if file_exists and int(temp_size) == int(file_size): + pass + else: + scp.close() + self.module.fail_json(msg='Could not transfer file. There was an error ' + 'during transfer. Please make sure the format of ' + 'input parameters is right.') + scp.close() + return True + + def get_scp_enable(self): + """Get scp enable state""" + + ret_xml = '' + try: + ret_xml = get_nc_config(self.module, CE_NC_GET_SCP_ENABLE) + except ConnectionError: + self.module.fail_json(msg='Error: The NETCONF API of scp_enable is not supported.') + + if "" in ret_xml: + return False + + xml_str = ret_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get file info + root = ElementTree.fromstring(xml_str) + topo1 = root.find("sshs/sshServer/scpEnable") + topo2 = root.find("sshs/sshServerEnable/scpIpv4Enable") + topo3 = root.find("sshs/sshServerEnable/scpIpv6Enable") + if topo1 is not None: + return str(topo1.text).strip().lower() == 'enable' + elif self.host_is_ipv6 and topo3 is not None: + return str(topo3.text).strip().lower() == 'enable' + elif topo2 is not None: + return str(topo2.text).strip().lower() == 'enable' + return False + + def work(self): + """Execute task """ + + if not HAS_SCP: + self.module.fail_json( + msg="'Error: No scp package, please install it.'") + + if not HAS_PARAMIKO: + self.module.fail_json( + msg="'Error: No paramiko package, please install it.'") + + if self.local_file and len(self.local_file) > 4096: + self.module.fail_json( + msg="'Error: The maximum length of local_file is 4096.'") + + if self.remote_file and len(self.remote_file) > 4096: + self.module.fail_json( + msg="'Error: The maximum length of remote_file is 4096.'") + + scp_enable = self.get_scp_enable() + if not scp_enable: + if self.host_is_ipv6: + self.module.fail_json( + msg="'Error: Please ensure ipv6 SCP server are enabled.'") + else: + self.module.fail_json( + msg="'Error: Please ensure ipv4 SCP server are enabled.'") + + if not os.path.isfile(self.local_file): + self.module.fail_json( + msg="Local file {0} not found".format(self.local_file)) + + dest = self.remote_file or ('/' + os.path.basename(self.local_file)) + remote_exists, file_size = self.remote_file_exists( + dest, file_system=self.file_system) + if remote_exists and (os.path.getsize(self.local_file) != file_size): + remote_exists = False + + if not remote_exists: + self.changed = True + file_exists = False + else: + file_exists = True + self.transfer_result = 'The local file already exists on the device.' + + if not file_exists: + self.transfer_file(dest) + self.transfer_result = 'The local file has been successfully transferred to the device.' + + if self.remote_file is None: + self.remote_file = '/' + os.path.basename(self.local_file) + + self.module.exit_json( + changed=self.changed, + transfer_result=self.transfer_result, + local_file=self.local_file, + remote_file=self.remote_file, + file_system=self.file_system) + + +def main(): + """Main function entry""" + + argument_spec = dict( + local_file=dict(required=True), + remote_file=dict(required=False), + file_system=dict(required=False, default='flash:') + ) + argument_spec.update(ce_argument_spec) + filecopy_obj = FileCopy(argument_spec) + filecopy_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_info_center_debug.py b/plugins/modules/network/cloudengine/ce_info_center_debug.py new file mode 100644 index 0000000000..57487c3d70 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_info_center_debug.py @@ -0,0 +1,617 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_info_center_debug +short_description: Manages information center debug configuration on HUAWEI CloudEngine switches. +description: + - Manages information center debug configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] + debug_time_stamp: + description: + - Timestamp type of debugging information. + choices: ['date_boot', 'date_second', 'date_tenthsecond', 'date_millisecond', 'shortdate_second', + 'shortdate_tenthsecond', 'shortdate_millisecond', 'formatdate_second', 'formatdate_tenthsecond', + 'formatdate_millisecond'] + module_name: + description: + - Module name of the rule. + The value is a string of 1 to 31 case-insensitive characters. The default value is default. + Please use lower-case letter, such as [aaa, acl, arp, bfd]. + channel_id: + description: + - Number of a channel. + The value is an integer ranging from 0 to 9. The default value is 0. + debug_enable: + description: + - Whether a device is enabled to output debugging information. + default: no_use + choices: ['no_use','true','false'] + debug_level: + description: + - Debug level permitted to output. + choices: ['emergencies', 'alert', 'critical', 'error', 'warning', 'notification', + 'informational', 'debugging'] +''' + +EXAMPLES = ''' + +- name: CloudEngine info center debug test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config debug time stamp" + ce_info_center_debug: + state: present + debug_time_stamp: date_boot + provider: "{{ cli }}" + + - name: "Undo debug time stamp" + ce_info_center_debug: + state: absent + debug_time_stamp: date_boot + provider: "{{ cli }}" + + - name: "Config debug module log level" + ce_info_center_debug: + state: present + module_name: aaa + channel_id: 1 + debug_enable: true + debug_level: error + provider: "{{ cli }}" + + - name: "Undo debug module log level" + ce_info_center_debug: + state: absent + module_name: aaa + channel_id: 1 + debug_enable: true + debug_level: error + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"state": "present", "debug_time_stamp": "date_boot"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"debugTimeStamp": "DATE_MILLISECOND"} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"debugTimeStamp": "DATE_BOOT"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["info-center timestamp debugging boot"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +# get info center debug global +CE_GET_DEBUG_GLOBAL_HEADER = """ + + + +""" +CE_GET_DEBUG_GLOBAL_TAIL = """ + + + +""" +# merge info center debug global +CE_MERGE_DEBUG_GLOBAL_HEADER = """ + + + +""" +CE_MERGE_DEBUG_GLOBAL_TAIL = """ + + + +""" + +# get info center debug source +CE_GET_DEBUG_SOURCE_HEADER = """ + + + + +""" +CE_GET_DEBUG_SOURCE_TAIL = """ + + + + +""" +# merge info center debug source +CE_MERGE_DEBUG_SOURCE_HEADER = """ + + + + +""" +CE_MERGE_DEBUG_SOURCE_TAIL = """ + + + + +""" +# delete info center debug source +CE_DELETE_DEBUG_SOURCE_HEADER = """ + + + + +""" +CE_DELETE_DEBUG_SOURCE_TAIL = """ + + + + +""" + +TIME_STAMP_DICT = {"date_boot": "boot", + "date_second": "date precision-time second", + "date_tenthsecond": "date precision-time tenth-second", + "date_millisecond": "date precision-time millisecond", + "shortdate_second": "short-date precision-time second", + "shortdate_tenthsecond": "short-date precision-time tenth-second", + "shortdate_millisecond": "short-date precision-time millisecond", + "formatdate_second": "format-date precision-time second", + "formatdate_tenthsecond": "format-date precision-time tenth-second", + "formatdate_millisecond": "format-date precision-time millisecond"} + +CHANNEL_DEFAULT_DBG_STATE = {"0": "true", + "1": "true", + "2": "false", + "3": "false", + "4": "false", + "5": "false", + "6": "false", + "7": "false", + "8": "false", + "9": "false"} + +CHANNEL_DEFAULT_DBG_LEVEL = {"0": "debugging", + "1": "debugging", + "2": "debugging", + "3": "debugging", + "4": "debugging", + "5": "debugging", + "6": "debugging", + "7": "debugging", + "8": "debugging", + "9": "debugging"} + + +class InfoCenterDebug(object): + """ Manages info center debug configuration """ + + def __init__(self, **kwargs): + """ Init function """ + + # argument spec + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # module args + self.state = self.module.params['state'] + self.debug_time_stamp = self.module.params['debug_time_stamp'] or None + self.module_name = self.module.params['module_name'] or None + self.channel_id = self.module.params['channel_id'] or None + self.debug_enable = self.module.params['debug_enable'] + self.debug_level = self.module.params['debug_level'] or None + + # cur config + self.cur_global_cfg = dict() + self.cur_source_cfg = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def check_global_args(self): + """ Check global args """ + + need_cfg = False + find_flag = False + self.cur_global_cfg["global_cfg"] = [] + + if self.debug_time_stamp: + + conf_str = CE_GET_DEBUG_GLOBAL_HEADER + conf_str += "" + conf_str += CE_GET_DEBUG_GLOBAL_TAIL + + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + find_flag = False + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + global_cfg = root.findall("syslog/globalParam") + if global_cfg: + for tmp in global_cfg: + tmp_dict = dict() + for site in tmp: + if site.tag in ["debugTimeStamp"]: + tmp_dict[site.tag] = site.text + + self.cur_global_cfg["global_cfg"].append(tmp_dict) + + if self.cur_global_cfg["global_cfg"]: + for tmp in self.cur_global_cfg["global_cfg"]: + find_flag = True + + if tmp.get("debugTimeStamp").lower() != self.debug_time_stamp: + find_flag = False + + if find_flag: + break + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + else: + need_cfg = bool(find_flag) + + self.cur_global_cfg["need_cfg"] = need_cfg + + def check_source_args(self): + """ Check source args """ + + need_cfg = False + find_flag = False + self.cur_source_cfg["source_cfg"] = [] + + if self.module_name: + if len(self.module_name) < 1 or len(self.module_name) > 31: + self.module.fail_json( + msg='Error: The module_name is out of [1 - 31].') + + if not self.channel_id: + self.module.fail_json( + msg='Error: Please input channel_id at the same time.') + + if self.channel_id: + if self.channel_id.isdigit(): + if int(self.channel_id) < 0 or int(self.channel_id) > 9: + self.module.fail_json( + msg='Error: The value of channel_id is out of [0 - 9].') + else: + self.module.fail_json( + msg='Error: The channel_id is not digit.') + + conf_str = CE_GET_DEBUG_SOURCE_HEADER + + if self.module_name != "default": + conf_str += "%s" % self.module_name.upper() + else: + conf_str += "default" + + if self.channel_id: + conf_str += "" + if self.debug_enable != 'no_use': + conf_str += "" + if self.debug_level: + conf_str += "" + + conf_str += CE_GET_DEBUG_SOURCE_TAIL + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + find_flag = False + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + source_cfg = root.findall("syslog/icSources/icSource") + if source_cfg: + for tmp in source_cfg: + tmp_dict = dict() + for site in tmp: + if site.tag in ["moduleName", "icChannelId", "dbgEnFlg", "dbgEnLevel"]: + tmp_dict[site.tag] = site.text + + self.cur_source_cfg["source_cfg"].append(tmp_dict) + + if self.cur_source_cfg["source_cfg"]: + for tmp in self.cur_source_cfg["source_cfg"]: + find_flag = True + + if self.module_name and tmp.get("moduleName").lower() != self.module_name.lower(): + find_flag = False + if self.channel_id and tmp.get("icChannelId") != self.channel_id: + find_flag = False + if self.debug_enable != 'no_use' and tmp.get("dbgEnFlg") != self.debug_enable: + find_flag = False + if self.debug_level and tmp.get("dbgEnLevel") != self.debug_level: + find_flag = False + + if find_flag: + break + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + else: + need_cfg = bool(find_flag) + + self.cur_source_cfg["need_cfg"] = need_cfg + + def get_proposed(self): + """ Get proposed """ + + self.proposed["state"] = self.state + + if self.debug_time_stamp: + self.proposed["debug_time_stamp"] = self.debug_time_stamp + if self.module_name: + self.proposed["module_name"] = self.module_name + if self.channel_id: + self.proposed["channel_id"] = self.channel_id + if self.debug_enable != 'no_use': + self.proposed["debug_enable"] = self.debug_enable + if self.debug_level: + self.proposed["debug_level"] = self.debug_level + + def get_existing(self): + """ Get existing """ + + if self.cur_global_cfg["global_cfg"]: + self.existing["global_cfg"] = self.cur_global_cfg["global_cfg"] + if self.cur_source_cfg["source_cfg"]: + self.existing["source_cfg"] = self.cur_source_cfg["source_cfg"] + + def get_end_state(self): + """ Get end state """ + + self.check_global_args() + if self.cur_global_cfg["global_cfg"]: + self.end_state["global_cfg"] = self.cur_global_cfg["global_cfg"] + + self.check_source_args() + if self.cur_source_cfg["source_cfg"]: + self.end_state["source_cfg"] = self.cur_source_cfg["source_cfg"] + + def merge_debug_global(self): + """ Merge debug global """ + + conf_str = CE_MERGE_DEBUG_GLOBAL_HEADER + + if self.debug_time_stamp: + conf_str += "%s" % self.debug_time_stamp.upper() + + conf_str += CE_MERGE_DEBUG_GLOBAL_TAIL + + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge debug global failed.') + + if self.debug_time_stamp: + cmd = "info-center timestamp debugging " + TIME_STAMP_DICT.get(self.debug_time_stamp) + self.updates_cmd.append(cmd) + + self.changed = True + + def delete_debug_global(self): + """ Delete debug global """ + + conf_str = CE_MERGE_DEBUG_GLOBAL_HEADER + + if self.debug_time_stamp: + conf_str += "DATE_MILLISECOND" + + conf_str += CE_MERGE_DEBUG_GLOBAL_TAIL + + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: delete debug global failed.') + + if self.debug_time_stamp: + cmd = "undo info-center timestamp debugging" + self.updates_cmd.append(cmd) + + self.changed = True + + def merge_debug_source(self): + """ Merge debug source """ + + conf_str = CE_MERGE_DEBUG_SOURCE_HEADER + + if self.module_name: + conf_str += "%s" % self.module_name + if self.channel_id: + conf_str += "%s" % self.channel_id + if self.debug_enable != 'no_use': + conf_str += "%s" % self.debug_enable + if self.debug_level: + conf_str += "%s" % self.debug_level + + conf_str += CE_MERGE_DEBUG_SOURCE_TAIL + + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge debug source failed.') + + cmd = "info-center source" + if self.module_name: + cmd += " %s" % self.module_name + if self.channel_id: + cmd += " channel %s" % self.channel_id + if self.debug_enable != 'no_use': + if self.debug_enable == "true": + cmd += " debug state on" + else: + cmd += " debug state off" + if self.debug_level: + cmd += " level %s" % self.debug_level + + self.updates_cmd.append(cmd) + self.changed = True + + def delete_debug_source(self): + """ Delete debug source """ + + if self.debug_enable == 'no_use' and not self.debug_level: + conf_str = CE_DELETE_DEBUG_SOURCE_HEADER + if self.module_name: + conf_str += "%s" % self.module_name + if self.channel_id: + conf_str += "%s" % self.channel_id + conf_str += CE_DELETE_DEBUG_SOURCE_TAIL + else: + conf_str = CE_MERGE_DEBUG_SOURCE_HEADER + if self.module_name: + conf_str += "%s" % self.module_name + if self.channel_id: + conf_str += "%s" % self.channel_id + if self.debug_enable != 'no_use': + conf_str += "%s" % CHANNEL_DEFAULT_DBG_STATE.get(self.channel_id) + if self.debug_level: + conf_str += "%s" % CHANNEL_DEFAULT_DBG_LEVEL.get(self.channel_id) + conf_str += CE_MERGE_DEBUG_SOURCE_TAIL + + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: Delete debug source failed.') + + cmd = "undo info-center source" + if self.module_name: + cmd += " %s" % self.module_name + if self.channel_id: + cmd += " channel %s" % self.channel_id + if self.debug_enable != 'no_use': + cmd += " debug state" + if self.debug_level: + cmd += " level" + + self.updates_cmd.append(cmd) + self.changed = True + + def work(self): + """ work function """ + + self.check_global_args() + self.check_source_args() + self.get_proposed() + self.get_existing() + + if self.state == "present": + if self.cur_global_cfg["need_cfg"]: + self.merge_debug_global() + if self.cur_source_cfg["need_cfg"]: + self.merge_debug_source() + + else: + if self.cur_global_cfg["need_cfg"]: + self.delete_debug_global() + if self.cur_source_cfg["need_cfg"]: + self.delete_debug_source() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + debug_time_stamp=dict(choices=['date_boot', 'date_second', 'date_tenthsecond', + 'date_millisecond', 'shortdate_second', 'shortdate_tenthsecond', + 'shortdate_millisecond', 'formatdate_second', 'formatdate_tenthsecond', + 'formatdate_millisecond']), + module_name=dict(type='str'), + channel_id=dict(type='str'), + debug_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + debug_level=dict(choices=['emergencies', 'alert', 'critical', 'error', 'warning', 'notification', + 'informational', 'debugging']) + ) + + argument_spec.update(ce_argument_spec) + module = InfoCenterDebug(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_info_center_global.py b/plugins/modules/network/cloudengine/ce_info_center_global.py new file mode 100644 index 0000000000..96355e99ca --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_info_center_global.py @@ -0,0 +1,1725 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_info_center_global +short_description: Manages outputting logs on HUAWEI CloudEngine switches. +description: + - This module offers the ability to be output to the log buffer, log file, console, terminal, or log host on HUAWEI CloudEngine switches. +author: + - Li Yanfeng (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + info_center_enable: + description: + - Whether the info-center function is enabled. The value is of the Boolean type. + choices: ['true','false'] + packet_priority: + description: + - Set the priority of the syslog packet.The value is an integer ranging from 0 to 7. The default value is 0. + suppress_enable: + description: + - Whether a device is enabled to suppress duplicate statistics. The value is of the Boolean type. + choices: [ 'false', 'true' ] + logfile_max_num: + description: + - Maximum number of log files of the same type. The default value is 200. + - The value range for log files is[3, 500], for security files is [1, 3],and for operation files is [1, 7]. + logfile_max_size: + description: + - Maximum size (in MB) of a log file. The default value is 32. + - The value range for log files is [4, 8, 16, 32], for security files is [1, 4], + - and for operation files is [1, 4]. + default: 32 + choices: ['4', '8', '16', '32'] + channel_id: + description: + - Number for channel. The value is an integer ranging from 0 to 9. The default value is 0. + channel_cfg_name: + description: + - Channel name.The value is a string of 1 to 30 case-sensitive characters. The default value is console. + default: console + channel_out_direct: + description: + - Direction of information output. + choices: ['console','monitor','trapbuffer','logbuffer','snmp','logfile'] + filter_feature_name: + description: + - Feature name of the filtered log. The value is a string of 1 to 31 case-insensitive characters. + filter_log_name: + description: + - Name of the filtered log. The value is a string of 1 to 63 case-sensitive characters. + ip_type: + description: + - Log server address type, IPv4 or IPv6. + choices: ['ipv4','ipv6'] + server_ip: + description: + - Log server address, IPv4 or IPv6 type. The value is a string of 0 to 255 characters. + The value can be an valid IPv4 or IPv6 address. + server_domain: + description: + - Server name. The value is a string of 1 to 255 case-sensitive characters. + is_default_vpn: + description: + - Use the default VPN or not. + type: bool + default: 'no' + vrf_name: + description: + - VPN name on a log server. The value is a string of 1 to 31 case-sensitive characters. + The default value is _public_. + level: + description: + - Level of logs saved on a log server. + choices: ['emergencies','alert','critical','error','warning','notification','informational','debugging'] + server_port: + description: + - Number of a port sending logs.The value is an integer ranging from 1 to 65535. + For UDP, the default value is 514. For TCP, the default value is 601. For TSL, the default value is 6514. + facility: + description: + - Log record tool. + choices: ['local0','local1','local2','local3','local4','local5','local6','local7'] + channel_name: + description: + - Channel name. The value is a string of 1 to 30 case-sensitive characters. + timestamp: + description: + - Log server timestamp. The value is of the enumerated type and case-sensitive. + choices: ['UTC', 'localtime'] + transport_mode: + description: + - Transport mode. The value is of the enumerated type and case-sensitive. + choices: ['tcp','udp'] + ssl_policy_name: + description: + - SSL policy name. The value is a string of 1 to 23 case-sensitive characters. + source_ip: + description: + - Log source ip address, IPv4 or IPv6 type. The value is a string of 0 to 255. + The value can be an valid IPv4 or IPv6 address. + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- name: info center global module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Config info-center enable + ce_info_center_global: + info_center_enable: true + state: present + provider: "{{ cli }}" + + - name: Config statistic-suppress enable + ce_info_center_global: + suppress_enable: true + state: present + provider: "{{ cli }}" + + - name: Config info-center syslog packet-priority 1 + ce_info_center_global: + packet_priority: 2 + state: present + provider: "{{ cli }}" + + - name: Config info-center channel 1 name aaa + ce_info_center_global: + channel_id: 1 + channel_cfg_name: aaa + state: present + provider: "{{ cli }}" + + - name: Config info-center logfile size 10 + ce_info_center_global: + logfile_max_num: 10 + state: present + provider: "{{ cli }}" + + - name: Config info-center console channel 1 + ce_info_center_global: + channel_out_direct: console + channel_id: 1 + state: present + provider: "{{ cli }}" + + - name: Config info-center filter-id bymodule-alias snmp snmp_ipunlock + ce_info_center_global: + filter_feature_name: SNMP + filter_log_name: SNMP_IPLOCK + state: present + provider: "{{ cli }}" + + + - name: Config info-center max-logfile-number 16 + ce_info_center_global: + logfile_max_size: 16 + state: present + provider: "{{ cli }}" + + - name: Config syslog loghost domain. + ce_info_center_global: + server_domain: aaa + vrf_name: aaa + channel_id: 1 + transport_mode: tcp + facility: local4 + server_port: 100 + level: alert + timestamp: UTC + state: present + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"channel_id": "1", "facility": "local4", "is_default_vpn": True, "level": "alert", "server_domain": "aaa", + "server_port": "100", "state": "present", "timestamp": "localtime", "transport_mode": "tcp"} +existing: + description: k/v pairs of existing rollback + returned: always + type: dict + sample: + "server_domain_info": [ + { + "chnlId": "1", + "chnlName": "monitor", + "facility": "local4", + "isBriefFmt": "false", + "isDefaultVpn": "false", + "level": "alert", + "serverDomain": "aaa", + "serverPort": "100", + "sourceIP": "0.0.0.0", + "sslPolicyName": "gmc", + "timestamp": "UTC", + "transportMode": "tcp", + "vrfName": "aaa" + } + ] +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: + "server_domain_info": [ + { + "chnlId": "1", + "chnlName": "monitor", + "facility": "local4", + "isBriefFmt": "false", + "isDefaultVpn": "true", + "level": "alert", + "serverDomain": "aaa", + "serverPort": "100", + "sourceIP": "0.0.0.0", + "sslPolicyName": null, + "timestamp": "localtime", + "transportMode": "tcp", + "vrfName": "_public_" + }, + { + "chnlId": "1", + "chnlName": "monitor", + "facility": "local4", + "isBriefFmt": "false", + "isDefaultVpn": "false", + "level": "alert", + "serverDomain": "aaa", + "serverPort": "100", + "sourceIP": "0.0.0.0", + "sslPolicyName": "gmc", + "timestamp": "UTC", + "transportMode": "tcp", + "vrfName": "aaa" + } + ] +updates: + description: command sent to the device + returned: always + type: list + sample: ["info-center loghost domain aaa level alert port 100 facility local4 channel 1 localtime transport tcp"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, set_nc_config, check_ip_addr + + +CE_NC_GET_CENTER_GLOBAL_INFO_HEADER = """ + + + +""" +CE_NC_GET_CENTER_GLOBAL_INFO_TAIL = """ + + + +""" + +CE_NC_MERGE_CENTER_GLOBAL_INFO_HEADER = """ + + + +""" + +CE_NC_MERGE_CENTER_GLOBAL_INFO_TAIL = """ + + + +""" + +CE_NC_GET_LOG_FILE_INFO_HEADER = """ + + + + +""" +CE_NC_GET_LOG_FILE_INFO_TAIL = """ + + + + +""" + +CE_NC_MERGE_LOG_FILE_INFO_HEADER = """ + + + + +""" + +CE_NC_MERGE_LOG_FILE_INFO_TAIL = """ + + + + +""" + + +CE_NC_GET_CHANNEL_INFO = """ + + + + + %s + + + + + +""" + +CE_NC_MERGE_CHANNEL_INFO_HEADER = """ + + + + +""" +CE_NC_MERGE_CHANNEL_INFO_TAIL = """ + + + + +""" + +CE_NC_GET_CHANNEL_DIRECT_INFO = """ + + + + + %s + + + + + +""" +CE_NC_MERGE_CHANNEL_DIRECT_HEADER = """ + + + + +""" + +CE_NC_MERGE_CHANNEL_DIRECT_TAIL = """ + + + + +""" + +CE_NC_GET_FILTER_INFO = """ + + + + + + + + + + +""" + +CE_NC_CREATE_CHANNEL_FILTER_HEADER = """ + + + + + +""" +CE_NC_CREATE_CHANNEL_FILTER_TAIL = """ + + + + +""" +CE_NC_DELETE_CHANNEL_FILTER_HEADER = """ + + + + + +""" +CE_NC_DELETE_CHANNEL_FILTER_TAIL = """ + + + + +""" + +CE_NC_GET_SERVER_IP_INFO_HEADER = """ + + + + + %s + %s + %s + %s +""" +CE_NC_GET_SERVER_IP_INFO_TAIL = """ + + + + +""" +CE_NC_MERGE_SERVER_IP_INFO_HEADER = """ + + + + + %s + %s + %s + %s +""" +CE_NC_MERGE_SERVER_IP_INFO_TAIL = """ + + + + +""" +CE_NC_DELETE_SERVER_IP_INFO_HEADER = """ + + + + + %s + %s + %s + %s +""" +CE_NC_DELETE_SERVER_IP_INFO_TAIL = """ + + + + +""" +CE_NC_GET_SERVER_DNS_INFO_HEADER = """ + + + + +""" + +CE_NC_GET_SERVER_DNS_INFO_TAIL = """ + + + + +""" + +CE_NC_MERGE_SERVER_DNS_INFO_HEADER = """ + + + + + %s + %s + %s +""" +CE_NC_MERGE_SERVER_DNS_INFO_TAIL = """ + + + + +""" + +CE_NC_DELETE_SERVER_DNS_INFO_HEADER = """ + + + + + %s + %s + %s +""" +CE_NC_DELETE_SERVER_DNS_INFO_TAIL = """ + + + + +""" + + +def get_out_direct_default(out_direct): + """get default out direct""" + + outdict = {"console": "1", "monitor": "2", "trapbuffer": "3", + "logbuffer": "4", "snmp": "5", "logfile": "6"} + channel_id_default = outdict.get(out_direct) + return channel_id_default + + +def get_channel_name_default(channel_id): + """get default out direct""" + + channel_dict = {"0": "console", "1": "monitor", "2": "loghost", "3": "trapbuffer", "4": "logbuffer", + "5": "snmpagent", "6": "channel6", "7": "channel7", "8": "channel8", "9": "channel9"} + channel_name_default = channel_dict.get(channel_id) + return channel_name_default + + +class InfoCenterGlobal(object): + """ + Manages info center global configuration. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.info_center_enable = self.module.params['info_center_enable'] or None + self.packet_priority = self.module.params['packet_priority'] or None + self.suppress_enable = self.module.params['suppress_enable'] or None + self.logfile_max_num = self.module.params['logfile_max_num'] or None + self.logfile_max_size = self.module.params['logfile_max_size'] or None + self.channel_id = self.module.params['channel_id'] or None + self.channel_cfg_name = self.module.params['channel_cfg_name'] or None + self.channel_out_direct = self.module.params['channel_out_direct'] or None + self.filter_feature_name = self.module.params['filter_feature_name'] or None + self.filter_log_name = self.module.params['filter_log_name'] or None + self.ip_type = self.module.params['ip_type'] or None + self.server_ip = self.module.params['server_ip'] or None + self.server_domain = self.module.params['server_domain'] or None + self.is_default_vpn = self.module.params['is_default_vpn'] or None + self.vrf_name = self.module.params['vrf_name'] or None + self.level = self.module.params['level'] or None + self.server_port = self.module.params['server_port'] or None + self.facility = self.module.params['facility'] or None + self.channel_name = self.module.params['channel_name'] or None + self.timestamp = self.module.params['timestamp'] or None + self.transport_mode = self.module.params['transport_mode'] or None + self.ssl_policy_name = self.module.params['ssl_policy_name'] or None + self.source_ip = self.module.params['source_ip'] or None + self.state = self.module.params['state'] or None + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + # syslog info + self.cur_global_info = None + self.cur_logfile_info = None + self.channel_info = None + self.channel_direct_info = None + self.filter_info = None + self.server_ip_info = None + self.server_domain_info = None + + def init_module(self): + """ init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, con_obj, xml_name): + """Check if response message is already succeed.""" + + xml_str = con_obj.xml + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_channel_dict(self): + """ get channel attributes dict.""" + + channel_info = dict() + # get channel info + conf_str = CE_NC_GET_CHANNEL_INFO % self.channel_id + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return channel_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + channel_info["channelInfos"] = list() + channels = root.findall("syslog/icChannels/icChannel") + if channels: + for channel in channels: + channel_dict = dict() + for ele in channel: + if ele.tag in ["icChnlId", "icChnlCfgName"]: + channel_dict[ele.tag] = ele.text + channel_info["channelInfos"].append(channel_dict) + return channel_info + + def is_exist_channel_id_name(self, channel_id, channel_name): + """if channel id exist""" + + if not self.channel_info: + return False + + for id2name in self.channel_info["channelInfos"]: + if id2name["icChnlId"] == channel_id and id2name["icChnlCfgName"] == channel_name: + return True + return False + + def config_merge_syslog_channel(self, channel_id, channel_name): + """config channel id""" + + if not self.is_exist_channel_id_name(channel_id, channel_name): + conf_str = CE_NC_MERGE_CHANNEL_INFO_HEADER + if channel_id: + conf_str += "%s" % channel_id + if channel_name: + conf_str += "%s" % channel_name + + conf_str += CE_NC_MERGE_CHANNEL_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge syslog channel id failed.') + + self.updates_cmd.append( + "info-center channel %s name %s" % (channel_id, channel_name)) + self.changed = True + + def delete_merge_syslog_channel(self, channel_id, channel_name): + """delete channel id""" + + change_flag = False + + if channel_name: + for id2name in self.channel_info["channelInfos"]: + channel_default_name = get_channel_name_default( + id2name["icChnlId"]) + if id2name["icChnlId"] == channel_id and id2name["icChnlCfgName"] == channel_name: + channel_name = channel_default_name + change_flag = True + + if not channel_name: + for id2name in self.channel_info["channelInfos"]: + channel_default_name = get_channel_name_default( + id2name["icChnlId"]) + if id2name["icChnlId"] == channel_id and id2name["icChnlCfgName"] != channel_default_name: + channel_name = channel_default_name + change_flag = True + if change_flag: + conf_str = CE_NC_MERGE_CHANNEL_INFO_HEADER + if channel_id: + conf_str += "%s" % channel_id + if channel_name: + conf_str += "%s" % channel_name + + conf_str += CE_NC_MERGE_CHANNEL_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge syslog channel id failed.') + + self.updates_cmd.append("undo info-center channel %s" % channel_id) + self.changed = True + + def get_channel_direct_dict(self): + """ get channel direct attributes dict.""" + + channel_direct_info = dict() + # get channel direct info + conf_str = CE_NC_GET_CHANNEL_DIRECT_INFO % self.channel_out_direct + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return channel_direct_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + channel_direct_info["channelDirectInfos"] = list() + dir_channels = root.findall("syslog/icDirChannels/icDirChannel") + if dir_channels: + for ic_dir_channel in dir_channels: + channel_direct_dict = dict() + for ele in ic_dir_channel: + if ele.tag in ["icOutDirect", "icCfgChnlId"]: + channel_direct_dict[ele.tag] = ele.text + channel_direct_info["channelDirectInfos"].append( + channel_direct_dict) + return channel_direct_info + + def is_exist_out_direct(self, out_direct, channel_id): + """if channel out direct exist""" + + if not self.channel_direct_info: + return False + + for id2name in self.channel_direct_info["channelDirectInfos"]: + if id2name["icOutDirect"] == out_direct and id2name["icCfgChnlId"] == channel_id: + return True + return False + + def config_merge_out_direct(self, out_direct, channel_id): + """config out direct""" + + if not self.is_exist_out_direct(out_direct, channel_id): + conf_str = CE_NC_MERGE_CHANNEL_DIRECT_HEADER + if out_direct: + conf_str += "%s" % out_direct + if channel_id: + conf_str += "%s" % channel_id + + conf_str += CE_NC_MERGE_CHANNEL_DIRECT_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge syslog channel out direct failed.') + + self.updates_cmd.append( + "info-center %s channel %s" % (out_direct, channel_id)) + self.changed = True + + def delete_merge_out_direct(self, out_direct, channel_id): + """delete out direct""" + + change_flag = False + channel_id_default = get_out_direct_default(out_direct) + if channel_id: + for id2name in self.channel_direct_info["channelDirectInfos"]: + if id2name["icOutDirect"] == out_direct and id2name["icCfgChnlId"] == channel_id: + if channel_id != channel_id_default: + channel_id = channel_id_default + change_flag = True + + if not channel_id: + for id2name in self.channel_direct_info["channelDirectInfos"]: + if id2name["icOutDirect"] == out_direct and id2name["icCfgChnlId"] != channel_id_default: + channel_id = channel_id_default + change_flag = True + + if change_flag: + conf_str = CE_NC_MERGE_CHANNEL_DIRECT_HEADER + if out_direct: + conf_str += "%s" % out_direct + if channel_id: + conf_str += "%s" % channel_id + + conf_str += CE_NC_MERGE_CHANNEL_DIRECT_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge syslog channel out direct failed.') + + self.updates_cmd.append("undo info-center logfile channel") + self.changed = True + + def get_filter_dict(self): + """ get syslog filter attributes dict.""" + + filter_info = dict() + # get filter info + conf_str = CE_NC_GET_FILTER_INFO + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return filter_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + filter_info["filterInfos"] = list() + ic_filters = root.findall("syslog/icFilters/icFilter") + if ic_filters: + for ic_filter in ic_filters: + filter_dict = dict() + for ele in ic_filter: + if ele.tag in ["icFeatureName", "icFilterLogName"]: + filter_dict[ele.tag] = ele.text + filter_info["filterInfos"].append(filter_dict) + return filter_info + + def is_exist_filter(self, filter_feature_name, filter_log_name): + """if filter info exist""" + + if not self.filter_info: + return False + for id2name in self.filter_info["filterInfos"]: + if id2name["icFeatureName"] == filter_feature_name and id2name["icFilterLogName"] == filter_log_name: + return True + return False + + def config_merge_filter(self, filter_feature_name, filter_log_name): + """config filter""" + + if not self.is_exist_filter(filter_feature_name, filter_log_name): + conf_str = CE_NC_CREATE_CHANNEL_FILTER_HEADER + conf_str += "true" + if filter_feature_name: + conf_str += "%s" % filter_feature_name + if filter_log_name: + conf_str += "%s" % filter_log_name + + conf_str += CE_NC_CREATE_CHANNEL_FILTER_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge syslog filter failed.') + + self.updates_cmd.append("info-center filter-id bymodule-alias %s %s" + % (filter_feature_name, filter_log_name)) + self.changed = True + + def delete_merge_filter(self, filter_feature_name, filter_log_name): + """delete filter""" + + change_flag = False + if self.is_exist_filter(filter_feature_name, filter_log_name): + for id2name in self.filter_info["filterInfos"]: + if id2name["icFeatureName"] == filter_feature_name and id2name["icFilterLogName"] == filter_log_name: + change_flag = True + if change_flag: + conf_str = CE_NC_DELETE_CHANNEL_FILTER_HEADER + conf_str += "true" + if filter_feature_name: + conf_str += "%s" % filter_feature_name + if filter_log_name: + conf_str += "%s" % filter_log_name + + conf_str += CE_NC_DELETE_CHANNEL_FILTER_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge syslog channel out direct failed.') + self.updates_cmd.append("undo info-center filter-id bymodule-alias %s %s" + % (filter_feature_name, filter_log_name)) + self.changed = True + + def get_server_ip_dict(self): + """ get server ip attributes dict.""" + + server_ip_info = dict() + # get server ip info + is_default_vpn = "false" + if not self.is_default_vpn: + self.is_default_vpn = False + if self.is_default_vpn is True: + is_default_vpn = "true" + if not self.vrf_name: + self.vrf_name = "_public_" + conf_str = CE_NC_GET_SERVER_IP_INFO_HEADER % ( + self.ip_type, self.server_ip, self.vrf_name, is_default_vpn) + conf_str += CE_NC_GET_SERVER_IP_INFO_TAIL + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return server_ip_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + server_ip_info["serverIpInfos"] = list() + syslog_servers = root.findall("syslog/syslogServers/syslogServer") + if syslog_servers: + for syslog_server in syslog_servers: + server_dict = dict() + for ele in syslog_server: + if ele.tag in ["ipType", "serverIp", "vrfName", "level", "serverPort", "facility", "chnlId", + "chnlName", "timestamp", "transportMode", "sslPolicyName", "isDefaultVpn", + "sourceIP", "isBriefFmt"]: + server_dict[ele.tag] = ele.text + server_ip_info["serverIpInfos"].append(server_dict) + return server_ip_info + + def config_merge_loghost(self): + """config loghost ip or dns""" + + conf_str = "" + is_default_vpn = "false" + if self.is_default_vpn is True: + is_default_vpn = "true" + if self.ip_type: + conf_str = CE_NC_MERGE_SERVER_IP_INFO_HEADER % (self.ip_type, self.server_ip, self.vrf_name, + is_default_vpn) + elif self.server_domain: + conf_str = CE_NC_MERGE_SERVER_DNS_INFO_HEADER % ( + self.server_domain, self.vrf_name, is_default_vpn) + if self.level: + conf_str += "%s" % self.level + if self.server_port: + conf_str += "%s" % self.server_port + if self.facility: + conf_str += "%s" % self.facility + if self.channel_id: + conf_str += "%s" % self.channel_id + if self.channel_name: + conf_str += "%s" % self.channel_name + if self.timestamp: + conf_str += "%s" % self.timestamp + if self.transport_mode: + conf_str += "%s" % self.transport_mode + if self.ssl_policy_name: + conf_str += "%s" % self.ssl_policy_name + if self.source_ip: + conf_str += "%s" % self.source_ip + if self.ip_type: + conf_str += CE_NC_MERGE_SERVER_IP_INFO_TAIL + elif self.server_domain: + conf_str += CE_NC_MERGE_SERVER_DNS_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge server loghost failed.') + + cmd = "info-center loghost" + if self.ip_type == "ipv4" and self.server_ip: + cmd += " %s" % self.server_ip + if self.ip_type == "ipv6" and self.server_ip: + cmd += " ipv6 %s" % self.server_ip + if self.server_domain: + cmd += " domain %s" % self.server_domain + if self.channel_id: + cmd += " channel %s" % self.channel_id + if self.channel_name: + cmd += " channel %s" % self.channel_name + if self.vrf_name: + if self.vrf_name != "_public_": + cmd += " vpn-instance %s" % self.vrf_name + if self.source_ip: + cmd += " source-ip %s" % self.source_ip + if self.facility: + cmd += " facility %s" % self.facility + if self.server_port: + cmd += " port %s" % self.server_port + if self.level: + cmd += " level %s" % self.level + if self.timestamp: + if self.timestamp == "localtime": + cmd += " local-time" + else: + cmd += " utc" + if self.transport_mode: + cmd += " transport %s" % self.transport_mode + if self.ssl_policy_name: + cmd += " ssl-policy %s" % self.ssl_policy_name + self.updates_cmd.append(cmd) + self.changed = True + + def delete_merge_loghost(self): + """delete loghost ip or dns""" + + conf_str = "" + is_default_vpn = "false" + if self.is_default_vpn is True: + is_default_vpn = "true" + if self.ip_type: + conf_str = CE_NC_DELETE_SERVER_IP_INFO_HEADER % (self.ip_type, self.server_ip, self.vrf_name, + is_default_vpn) + elif self.server_domain: + conf_str = CE_NC_DELETE_SERVER_DNS_INFO_HEADER % ( + self.server_domain, self.vrf_name, is_default_vpn) + if self.level: + conf_str += "%s" % self.level + if self.server_port: + conf_str += "%s" % self.server_port + if self.facility: + conf_str += "%s" % self.facility + if self.channel_id: + conf_str += "%s" % self.channel_id + if self.channel_name: + conf_str += "%s" % self.channel_name + if self.timestamp: + conf_str += "%s" % self.timestamp + if self.transport_mode: + conf_str += "%s" % self.transport_mode + if self.ssl_policy_name: + conf_str += "%s" % self.ssl_policy_name + if self.source_ip: + conf_str += "%s" % self.source_ip + if self.ip_type: + conf_str += CE_NC_DELETE_SERVER_IP_INFO_TAIL + elif self.server_domain: + conf_str += CE_NC_DELETE_SERVER_DNS_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge server loghost failed.') + + cmd = "undo info-center loghost" + if self.ip_type == "ipv4" and self.server_ip: + cmd += " %s" % self.server_ip + if self.ip_type == "ipv6" and self.server_ip: + cmd += " ipv6 %s" % self.server_ip + if self.server_domain: + cmd += " domain %s" % self.server_domain + if self.vrf_name: + if self.vrf_name != "_public_": + cmd += " vpn-instance %s" % self.vrf_name + self.updates_cmd.append(cmd) + self.changed = True + + def get_server_domain_dict(self): + """ get server domain attributes dict""" + + server_domain_info = dict() + # get server domain info + if not self.is_default_vpn: + self.is_default_vpn = False + if not self.vrf_name: + self.vrf_name = "_public_" + conf_str = CE_NC_GET_SERVER_DNS_INFO_HEADER + conf_str += CE_NC_GET_SERVER_DNS_INFO_TAIL + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return server_domain_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + server_domain_info["serverAddressInfos"] = list() + syslog_dnss = root.findall("syslog/syslogDNSs/syslogDNS") + if syslog_dnss: + for syslog_dns in syslog_dnss: + dns_dict = dict() + for ele in syslog_dns: + if ele.tag in ["serverDomain", "vrfName", "level", "serverPort", "facility", "chnlId", + "chnlName", "timestamp", "transportMode", "sslPolicyName", "isDefaultVpn", + "sourceIP", "isBriefFmt"]: + dns_dict[ele.tag] = ele.text + server_domain_info["serverAddressInfos"].append(dns_dict) + + return server_domain_info + + def check_need_loghost_cfg(self): + """ check need cfg""" + + need_cfg = False + find_flag = False + if self.ip_type and self.server_ip: + if self.server_ip_info: + for tmp in self.server_ip_info["serverIpInfos"]: + find_flag = True + if self.ip_type and tmp.get("ipType") != self.ip_type: + find_flag = False + if self.server_ip and tmp.get("serverIp") != self.server_ip: + find_flag = False + if self.vrf_name and tmp.get("vrfName") != self.vrf_name: + find_flag = False + if self.level and tmp.get("level") != self.level: + find_flag = False + if self.server_port and tmp.get("serverPort") != self.server_port: + find_flag = False + if self.facility and tmp.get("facility") != self.facility: + find_flag = False + if self.channel_id and tmp.get("chnlId") != self.channel_id: + find_flag = False + if self.channel_name and tmp.get("chnlName") != self.channel_name: + find_flag = False + if self.timestamp and tmp.get("timestamp") != self.timestamp: + find_flag = False + if self.transport_mode and tmp.get("transportMode") != self.transport_mode: + find_flag = False + if self.ssl_policy_name and tmp.get("sslPolicyName") != self.ssl_policy_name: + find_flag = False + if self.source_ip and tmp.get("sourceIP") != self.source_ip: + find_flag = False + if find_flag: + break + elif self.server_domain: + if self.server_domain_info: + for tmp in self.server_domain_info["serverAddressInfos"]: + find_flag = True + if self.server_domain and tmp.get("serverDomain") != self.server_domain: + find_flag = False + if self.vrf_name and tmp.get("vrfName") != self.vrf_name: + find_flag = False + if self.level and tmp.get("level") != self.level: + find_flag = False + if self.server_port and tmp.get("serverPort") != self.server_port: + find_flag = False + if self.facility and tmp.get("facility") != self.facility: + find_flag = False + if self.channel_id and tmp.get("chnlId") != self.channel_id: + find_flag = False + if self.channel_name and tmp.get("chnlName") != self.channel_name: + find_flag = False + if self.timestamp and tmp.get("timestamp") != self.timestamp: + find_flag = False + if self.transport_mode and tmp.get("transportMode") != self.transport_mode: + find_flag = False + if self.ssl_policy_name and tmp.get("sslPolicyName") != self.ssl_policy_name: + find_flag = False + if self.source_ip and tmp.get("sourceIP") != self.source_ip: + find_flag = False + if find_flag: + break + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + elif self.state == "absent": + need_cfg = bool(find_flag) + return need_cfg + + def get_syslog_global(self): + """get syslog global attributes""" + + cur_global_info = dict() + conf_str = CE_NC_GET_CENTER_GLOBAL_INFO_HEADER + if self.info_center_enable: + conf_str += "" + if self.packet_priority: + conf_str += "" + if self.suppress_enable: + conf_str += "" + conf_str += CE_NC_GET_CENTER_GLOBAL_INFO_TAIL + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return cur_global_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + global_info = root.findall( + "syslog/globalParam") + + if global_info: + for tmp in global_info: + for site in tmp: + if site.tag in ["icEnable", "packetPriority", "suppressEnable"]: + cur_global_info[site.tag] = site.text + return cur_global_info + + def merge_syslog_global(self): + """config global""" + + conf_str = CE_NC_MERGE_CENTER_GLOBAL_INFO_HEADER + if self.info_center_enable: + conf_str += "%s" % self.info_center_enable + if self.packet_priority: + if self.state == "present": + packet_priority = self.packet_priority + else: + packet_priority = 0 + conf_str += "%s" % packet_priority + if self.suppress_enable: + conf_str += "%s" % self.suppress_enable + + conf_str += CE_NC_MERGE_CENTER_GLOBAL_INFO_TAIL + + if self.info_center_enable == "true" and self.cur_global_info["icEnable"] != self.info_center_enable: + cmd = "info-center enable" + self.updates_cmd.append(cmd) + self.changed = True + if self.suppress_enable == "true" and self.cur_global_info["suppressEnable"] != self.suppress_enable: + cmd = "info-center statistic-suppress enable" + self.updates_cmd.append(cmd) + self.changed = True + if self.info_center_enable == "false" and self.cur_global_info["icEnable"] != self.info_center_enable: + cmd = "undo info-center enable" + self.updates_cmd.append(cmd) + self.changed = True + if self.suppress_enable == "false" and self.cur_global_info["suppressEnable"] != self.suppress_enable: + cmd = "undo info-center statistic-suppress enable" + self.updates_cmd.append(cmd) + self.changed = True + + if self.state == "present": + if self.packet_priority: + if self.cur_global_info["packetPriority"] != self.packet_priority: + cmd = "info-center syslog packet-priority %s" % self.packet_priority + self.updates_cmd.append(cmd) + self.changed = True + if self.state == "absent": + if self.packet_priority: + if self.cur_global_info["packetPriority"] == self.packet_priority: + cmd = "undo info-center syslog packet-priority %s" % self.packet_priority + self.updates_cmd.append(cmd) + self.changed = True + if self.changed: + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge syslog global failed.') + + def get_syslog_logfile(self): + """get syslog logfile""" + + cur_logfile_info = dict() + conf_str = CE_NC_GET_LOG_FILE_INFO_HEADER + conf_str += "log" + if self.logfile_max_num: + conf_str += "" + if self.logfile_max_size: + conf_str += "" + conf_str += CE_NC_GET_LOG_FILE_INFO_TAIL + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return cur_logfile_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + logfile_info = root.findall( + "syslog/icLogFileInfos/icLogFileInfo") + if logfile_info: + for tmp in logfile_info: + for site in tmp: + if site.tag in ["maxFileNum", "maxFileSize"]: + cur_logfile_info[site.tag] = site.text + return cur_logfile_info + + def merge_syslog_logfile(self): + """config logfile""" + + logfile_max_num = "200" + conf_str = CE_NC_MERGE_LOG_FILE_INFO_HEADER + if self.logfile_max_num: + if self.state == "present": + logfile_max_num = self.logfile_max_num + else: + if self.logfile_max_num != "200" and self.cur_logfile_info["maxFileNum"] == self.logfile_max_num: + logfile_max_num = "200" + conf_str += "%s" % logfile_max_num + + if self.logfile_max_size: + logfile_max_size = "32" + if self.state == "present": + logfile_max_size = self.logfile_max_size + else: + if self.logfile_max_size != "32" and self.cur_logfile_info["maxFileSize"] == self.logfile_max_size: + logfile_max_size = "32" + conf_str += "%s" % logfile_max_size + + conf_str += "log" + conf_str += CE_NC_MERGE_LOG_FILE_INFO_TAIL + + if self.state == "present": + if self.logfile_max_num: + if self.cur_logfile_info["maxFileNum"] != self.logfile_max_num: + cmd = "info-center max-logfile-number %s" % self.logfile_max_num + self.updates_cmd.append(cmd) + self.changed = True + if self.logfile_max_size: + if self.cur_logfile_info["maxFileSize"] != self.logfile_max_size: + cmd = "info-center logfile size %s" % self.logfile_max_size + self.updates_cmd.append(cmd) + self.changed = True + if self.state == "absent": + if self.logfile_max_num and self.logfile_max_num != "200": + if self.cur_logfile_info["maxFileNum"] == self.logfile_max_num: + cmd = "undo info-center max-logfile-number" + self.updates_cmd.append(cmd) + self.changed = True + if self.logfile_max_size and self.logfile_max_size != "32": + if self.cur_logfile_info["maxFileSize"] == self.logfile_max_size: + cmd = "undo info-center logfile size" + self.updates_cmd.append(cmd) + self.changed = True + + if self.changed: + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge syslog logfile failed.') + + def check_params(self): + """Check all input params""" + + # packet_priority check + if self.packet_priority: + if not self.packet_priority.isdigit(): + self.module.fail_json( + msg='Error: The parameter of packet priority is invalid.') + if int(self.packet_priority) > 7 or int(self.packet_priority) < 0: + self.module.fail_json( + msg='Error: The packet priority must be an integer between 0 and 7.') + + # logfile_max_num check + if self.logfile_max_num: + if not self.logfile_max_num.isdigit(): + self.module.fail_json( + msg='Error: The parameter of logfile_max_num is invalid.') + if int(self.logfile_max_num) > 500 or int(self.logfile_max_num) < 3: + self.module.fail_json( + msg='Error: The logfile_max_num must be an integer between 3 and 500.') + + # channel_id check + if self.channel_id: + if not self.channel_id.isdigit(): + self.module.fail_json( + msg='Error: The parameter of channel_id is invalid.') + if int(self.channel_id) > 9 or int(self.channel_id) < 0: + self.module.fail_json( + msg='Error: The channel_id must be an integer between 0 and 9.') + + # channel_cfg_name check + if self.channel_cfg_name: + if len(self.channel_cfg_name) > 30 \ + or len(self.channel_cfg_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: channel_cfg_name is not in the range from 1 to 30.') + + # filter_feature_name check + if self.filter_feature_name: + if len(self.filter_feature_name) > 31 \ + or len(self.filter_feature_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: filter_feature_name is not in the range from 1 to 31.') + + # filter_log_name check + if self.filter_log_name: + if len(self.filter_log_name) > 63 \ + or len(self.filter_log_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: filter_log_name is not in the range from 1 to 63.') + + # server_ip check + if self.server_ip: + if not check_ip_addr(self.server_ip): + self.module.fail_json( + msg='Error: The %s is not a valid ip address' % self.server_ip) + # source_ip check + if self.source_ip: + if not check_ip_addr(self.source_ip): + self.module.fail_json( + msg='Error: The %s is not a valid ip address' % self.source_ip) + + # server_domain check + if self.server_domain: + if len(self.server_domain) > 255 \ + or len(self.server_domain.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: server_domain is not in the range from 1 to 255.') + + # vrf_name check + if self.vrf_name: + if len(self.vrf_name) > 31 \ + or len(self.vrf_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: vrf_name is not in the range from 1 to 31.') + + # server_port check + if self.server_port: + if not self.server_port.isdigit(): + self.module.fail_json( + msg='Error: The parameter of server_port is invalid.') + if int(self.server_port) > 65535 or int(self.server_port) < 1: + self.module.fail_json( + msg='Error: The server_port must be an integer between 1 and 65535.') + + # channel_name check + if self.channel_name: + if len(self.channel_name) > 31 \ + or len(self.channel_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: channel_name is not in the range from 1 to 30.') + + # ssl_policy_name check + if self.ssl_policy_name: + if len(self.ssl_policy_name) > 23 \ + or len(self.ssl_policy_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: ssl_policy_name is not in the range from 1 to 23.') + + def get_proposed(self): + """get proposed info""" + + if self.info_center_enable: + self.proposed["info_center_enable"] = self.info_center_enable + if self.packet_priority: + self.proposed["packet_priority"] = self.packet_priority + if self.suppress_enable: + self.proposed["suppress_enable"] = self.suppress_enable + if self.logfile_max_num: + self.proposed["logfile_max_num"] = self.logfile_max_num + if self.logfile_max_size: + self.proposed["logfile_max_size"] = self.logfile_max_size + if self.channel_id: + self.proposed["channel_id"] = self.channel_id + if self.channel_cfg_name: + self.proposed["channel_cfg_name"] = self.channel_cfg_name + if self.channel_out_direct: + self.proposed["channel_out_direct"] = self.channel_out_direct + if self.filter_feature_name: + self.proposed["filter_feature_name"] = self.filter_feature_name + if self.filter_log_name: + self.proposed["filter_log_name"] = self.filter_log_name + if self.ip_type: + self.proposed["ip_type"] = self.ip_type + if self.server_ip: + self.proposed["server_ip"] = self.server_ip + if self.server_domain: + self.proposed["server_domain"] = self.server_domain + if self.vrf_name: + self.proposed["vrf_name"] = self.vrf_name + if self.level: + self.proposed["level"] = self.level + if self.server_port: + self.proposed["server_port"] = self.server_port + if self.facility: + self.proposed["facility"] = self.facility + if self.channel_name: + self.proposed["channel_name"] = self.channel_name + if self.timestamp: + self.proposed["timestamp"] = self.timestamp + if self.ssl_policy_name: + self.proposed["ssl_policy_name"] = self.ssl_policy_name + if self.transport_mode: + self.proposed["transport_mode"] = self.transport_mode + if self.is_default_vpn: + self.proposed["is_default_vpn"] = self.is_default_vpn + if self.source_ip: + self.proposed["source_ip"] = self.source_ip + if self.state: + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if self.info_center_enable: + self.existing["info_center_enable"] = self.cur_global_info[ + "icEnable"] + if self.packet_priority: + self.existing["packet_priority"] = self.cur_global_info[ + "packetPriority"] + if self.suppress_enable: + self.existing["suppress_enable"] = self.cur_global_info[ + "suppressEnable"] + if self.logfile_max_num: + self.existing["logfile_max_num"] = self.cur_logfile_info[ + "maxFileNum"] + if self.logfile_max_size: + self.existing["logfile_max_size"] = self.cur_logfile_info[ + "maxFileSize"] + + if self.channel_id and self.channel_cfg_name: + if self.channel_info: + self.existing["channel_id_info"] = self.channel_info[ + "channelInfos"] + if self.channel_out_direct and self.channel_id: + if self.channel_direct_info: + self.existing["channel_out_direct_info"] = self.channel_direct_info[ + "channelDirectInfos"] + if self.filter_feature_name and self.filter_log_name: + if self.filter_info: + self.existing["filter_id_info"] = self.filter_info[ + "filterInfos"] + if self.ip_type: + if self.server_ip_info: + self.existing["server_ip_info"] = self.server_ip_info[ + "serverIpInfos"] + + if self.server_domain: + if self.server_domain_info: + self.existing["server_domain_info"] = self.server_domain_info[ + "serverAddressInfos"] + + def get_end_state(self): + """get end state info""" + + if self.info_center_enable or self.packet_priority or self.suppress_enable: + self.cur_global_info = self.get_syslog_global() + if self.logfile_max_num or self.logfile_max_size: + self.cur_logfile_info = self.get_syslog_logfile() + if self.channel_id and self.channel_cfg_name: + self.channel_info = self.get_channel_dict() + if self.channel_out_direct and self.channel_id: + self.channel_direct_info = self.get_channel_direct_dict() + if self.filter_feature_name and self.filter_log_name: + self.filter_info = self.get_filter_dict() + if self.ip_type: + self.server_ip_info = self.get_server_ip_dict() + if self.server_domain: + self.server_domain_info = self.get_server_domain_dict() + + if self.info_center_enable: + self.end_state[ + "info_center_enable"] = self.cur_global_info["icEnable"] + if self.packet_priority: + self.end_state["packet_priority"] = self.cur_global_info[ + "packetPriority"] + if self.suppress_enable: + self.end_state["suppress_enable"] = self.cur_global_info[ + "suppressEnable"] + if self.logfile_max_num: + self.end_state["logfile_max_num"] = self.cur_logfile_info[ + "maxFileNum"] + if self.logfile_max_size: + self.end_state["logfile_max_size"] = self.cur_logfile_info[ + "maxFileSize"] + + if self.channel_id and self.channel_cfg_name: + if self.channel_info: + self.end_state["channel_id_info"] = self.channel_info[ + "channelInfos"] + + if self.channel_out_direct and self.channel_id: + if self.channel_direct_info: + self.end_state["channel_out_direct_info"] = self.channel_direct_info[ + "channelDirectInfos"] + + if self.filter_feature_name and self.filter_log_name: + if self.filter_info: + self.end_state["filter_id_info"] = self.filter_info[ + "filterInfos"] + + if self.ip_type: + if self.server_ip_info: + self.end_state["server_ip_info"] = self.server_ip_info[ + "serverIpInfos"] + + if self.server_domain: + if self.server_domain_info: + self.end_state["server_domain_info"] = self.server_domain_info[ + "serverAddressInfos"] + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + if self.info_center_enable or self.packet_priority or self.suppress_enable: + self.cur_global_info = self.get_syslog_global() + if self.logfile_max_num or self.logfile_max_size: + self.cur_logfile_info = self.get_syslog_logfile() + if self.channel_id: + self.channel_info = self.get_channel_dict() + if self.channel_out_direct: + self.channel_direct_info = self.get_channel_direct_dict() + if self.filter_feature_name and self.filter_log_name: + self.filter_info = self.get_filter_dict() + if self.ip_type: + self.server_ip_info = self.get_server_ip_dict() + if self.server_domain: + self.server_domain_info = self.get_server_domain_dict() + self.get_existing() + self.get_proposed() + if self.info_center_enable or self.packet_priority or self.suppress_enable: + self.merge_syslog_global() + + if self.logfile_max_num or self.logfile_max_size: + self.merge_syslog_logfile() + + if self.server_ip: + if not self.ip_type: + self.module.fail_json( + msg='Error: ip_type and server_ip must be exist at the same time.') + if self.ip_type: + if not self.server_ip: + self.module.fail_json( + msg='Error: ip_type and server_ip must be exist at the same time.') + + if self.ip_type or self.server_domain or self.channel_id or self.filter_feature_name: + if self.ip_type and self.server_domain: + self.module.fail_json( + msg='Error: ip_type and server_domain can not be exist at the same time.') + if self.channel_id and self.channel_name: + self.module.fail_json( + msg='Error: channel_id and channel_name can not be exist at the same time.') + if self.ssl_policy_name: + if self.transport_mode == "udp": + self.module.fail_json( + msg='Error: transport_mode: udp does not support ssl_policy.') + if not self.transport_mode: + self.module.fail_json( + msg='Error: transport_mode, ssl_policy_name must be exist at the same time.') + if self.ip_type == "ipv6": + if self.vrf_name and self.vrf_name != "_public_": + self.module.fail_json( + msg='Error: ipType:ipv6 only support default vpn:_public_.') + if self.is_default_vpn is True: + if self.vrf_name: + if self.vrf_name != "_public_": + self.module.fail_json( + msg='Error: vrf_name should be _public_ when is_default_vpn is True.') + else: + self.vrf_name = "_public_" + else: + if self.vrf_name == "_public_": + self.module.fail_json( + msg='Error: The default vpn value is _public_, but is_default_vpn is False.') + if self.state == "present": + # info-center channel channel-number name channel-name + if self.channel_id and self.channel_cfg_name: + self.config_merge_syslog_channel( + self.channel_id, self.channel_cfg_name) + # info-center { console | logfile | monitor | snmp | logbuffer + # | trapbuffer } channel channel-number + if self.channel_out_direct and self.channel_id: + self.config_merge_out_direct( + self.channel_out_direct, self.channel_id) + # info-center filter-id bymodule-alias modname alias + if self.filter_feature_name and self.filter_log_name: + self.config_merge_filter( + self.filter_feature_name, self.filter_log_name) + if self.ip_type and self.server_ip: + if not self.vrf_name: + self.vrf_name = "_public_" + if self.check_need_loghost_cfg(): + self.config_merge_loghost() + if self.server_domain: + if not self.vrf_name: + self.vrf_name = "_public_" + if self.check_need_loghost_cfg(): + self.config_merge_loghost() + + elif self.state == "absent": + if self.channel_id: + self.delete_merge_syslog_channel( + self.channel_id, self.channel_cfg_name) + if self.channel_out_direct: + self.delete_merge_out_direct( + self.channel_out_direct, self.channel_id) + if self.filter_feature_name and self.filter_log_name: + self.delete_merge_filter( + self.filter_feature_name, self.filter_log_name) + if self.ip_type and self.server_ip: + if not self.vrf_name: + self.vrf_name = "_public_" + if self.check_need_loghost_cfg(): + self.delete_merge_loghost() + if self.server_domain: + if not self.vrf_name: + self.vrf_name = "_public_" + if self.check_need_loghost_cfg(): + self.delete_merge_loghost() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + info_center_enable=dict(choices=['true', 'false']), + packet_priority=dict(type='str'), + suppress_enable=dict(choices=['true', 'false']), + logfile_max_num=dict(type='str'), + logfile_max_size=dict(choices=['4', '8', '16', '32']), + channel_id=dict(type='str'), + channel_cfg_name=dict(type='str'), + channel_out_direct=dict(choices=['console', 'monitor', + 'trapbuffer', 'logbuffer', 'snmp', 'logfile']), + filter_feature_name=dict(type='str'), + filter_log_name=dict(type='str'), + ip_type=dict(choices=['ipv4', 'ipv6']), + server_ip=dict(type='str'), + server_domain=dict(type='str'), + is_default_vpn=dict(default=False, type='bool'), + vrf_name=dict(type='str'), + level=dict(choices=['emergencies', 'alert', 'critical', 'error', 'warning', 'notification', + 'informational', 'debugging']), + server_port=dict(type='str'), + facility=dict(choices=['local0', 'local1', 'local2', + 'local3', 'local4', 'local5', 'local6', 'local7']), + channel_name=dict(type='str'), + timestamp=dict(choices=['UTC', 'localtime']), + transport_mode=dict(choices=['tcp', 'udp']), + ssl_policy_name=dict(type='str'), + source_ip=dict(type='str'), + state=dict(choices=['present', 'absent'], default='present') + + ) + argument_spec.update(ce_argument_spec) + module = InfoCenterGlobal(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_info_center_log.py b/plugins/modules/network/cloudengine/ce_info_center_log.py new file mode 100644 index 0000000000..fc04cffdfb --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_info_center_log.py @@ -0,0 +1,548 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_info_center_log +short_description: Manages information center log configuration on HUAWEI CloudEngine switches. +description: + - Setting the Timestamp Format of Logs. + Configuring the Device to Output Logs to the Log Buffer. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + log_time_stamp: + description: + - Sets the timestamp format of logs. + choices: ['date_boot', 'date_second', 'date_tenthsecond', 'date_millisecond', + 'shortdate_second', 'shortdate_tenthsecond', 'shortdate_millisecond', + 'formatdate_second', 'formatdate_tenthsecond', 'formatdate_millisecond'] + log_buff_enable: + description: + - Enables the Switch to send logs to the log buffer. + default: no_use + choices: ['no_use','true', 'false'] + log_buff_size: + description: + - Specifies the maximum number of logs in the log buffer. + The value is an integer that ranges from 0 to 10240. If logbuffer-size is 0, logs are not displayed. + module_name: + description: + - Specifies the name of a module. + The value is a module name in registration logs. + channel_id: + description: + - Specifies a channel ID. + The value is an integer ranging from 0 to 9. + log_enable: + description: + - Indicates whether log filtering is enabled. + default: no_use + choices: ['no_use','true', 'false'] + log_level: + description: + - Specifies a log severity. + choices: ['emergencies', 'alert', 'critical', 'error', + 'warning', 'notification', 'informational', 'debugging'] + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' + +- name: CloudEngine info center log test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Setting the timestamp format of logs" + ce_info_center_log: + log_time_stamp: date_tenthsecond + provider: "{{ cli }}" + + - name: "Enabled to output information to the log buffer" + ce_info_center_log: + log_buff_enable: true + provider: "{{ cli }}" + + - name: "Set the maximum number of logs in the log buffer" + ce_info_center_log: + log_buff_size: 100 + provider: "{{ cli }}" + + - name: "Set a rule for outputting logs to a channel" + ce_info_center_log: + module_name: aaa + channel_id: 1 + log_enable: true + log_level: critical + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"log_time_stamp": "date_tenthsecond", "state": "present"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"log_time_stamp": "date_second"} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"log_time_stamp": "date_tenthsecond"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["info-center timestamp log date precision-time tenth-second"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_LOG = """ + + + + + + + + + + %s + %s + + + + + + + +""" + +CE_NC_GET_LOG_GLOBAL = """ + + + + + + + + + +""" + +TIME_STAMP_DICT = {"date_boot": "boot", + "date_second": "date precision-time second", + "date_tenthsecond": "date precision-time tenth-second", + "date_millisecond": "date precision-time millisecond", + "shortdate_second": "short-date precision-time second", + "shortdate_tenthsecond": "short-date precision-time tenth-second", + "shortdate_millisecond": "short-date precision-time millisecond", + "formatdate_second": "format-date precision-time second", + "formatdate_tenthsecond": "format-date precision-time tenth-second", + "formatdate_millisecond": "format-date precision-time millisecond"} + +CHANNEL_DEFAULT_LOG_STATE = {"0": "true", + "1": "true", + "2": "true", + "3": "false", + "4": "true", + "5": "false", + "6": "true", + "7": "true", + "8": "true", + "9": "true"} + +CHANNEL_DEFAULT_LOG_LEVEL = {"0": "warning", + "1": "warning", + "2": "informational", + "3": "informational", + "4": "warning", + "5": "debugging", + "6": "debugging", + "7": "warning", + "8": "debugging", + "9": "debugging"} + + +class InfoCenterLog(object): + """ + Manages information center log configuration + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.log_time_stamp = self.module.params['log_time_stamp'] + self.log_buff_enable = self.module.params['log_buff_enable'] + self.log_buff_size = self.module.params['log_buff_size'] + self.module_name = self.module.params['module_name'] + self.channel_id = self.module.params['channel_id'] + self.log_enable = self.module.params['log_enable'] + self.log_level = self.module.params['log_level'] + self.state = self.module.params['state'] + + # state + self.log_dict = dict() + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init module""" + + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_log_dict(self): + """ log config dict""" + + log_dict = dict() + if self.module_name: + if self.module_name.lower() == "default": + conf_str = CE_NC_GET_LOG % (self.module_name.lower(), self.channel_id) + else: + conf_str = CE_NC_GET_LOG % (self.module_name.upper(), self.channel_id) + else: + conf_str = CE_NC_GET_LOG_GLOBAL + + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return log_dict + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # get global param info + glb = root.find("syslog/globalParam") + if glb: + for attr in glb: + if attr.tag in ["bufferSize", "logTimeStamp", "icLogBuffEn"]: + log_dict[attr.tag] = attr.text + + # get info-center source info + log_dict["source"] = dict() + src = root.find("syslog/icSources/icSource") + if src: + for attr in src: + if attr.tag in ["moduleName", "icChannelId", "icChannelName", "logEnFlg", "logEnLevel"]: + log_dict["source"][attr.tag] = attr.text + + return log_dict + + def config_log_global(self): + """config log global param""" + + xml_str = '' + if self.log_time_stamp: + if self.state == "present" and self.log_time_stamp.upper() != self.log_dict.get("logTimeStamp"): + xml_str += '%s' % self.log_time_stamp.upper() + self.updates_cmd.append( + "info-center timestamp log %s" % TIME_STAMP_DICT.get(self.log_time_stamp)) + elif self.state == "absent" and self.log_time_stamp.upper() == self.log_dict.get("logTimeStamp"): + xml_str += 'DATE_SECOND' # set default + self.updates_cmd.append("undo info-center timestamp log") + else: + pass + + if self.log_buff_enable != 'no_use': + if self.log_dict.get("icLogBuffEn") != self.log_buff_enable: + xml_str += '%s' % self.log_buff_enable + if self.log_buff_enable == "true": + self.updates_cmd.append("info-center logbuffer") + else: + self.updates_cmd.append("undo info-center logbuffer") + + if self.log_buff_size: + if self.state == "present" and self.log_dict.get("bufferSize") != self.log_buff_size: + xml_str += '%s' % self.log_buff_size + self.updates_cmd.append( + "info-center logbuffer size %s" % self.log_buff_size) + elif self.state == "absent" and self.log_dict.get("bufferSize") == self.log_buff_size: + xml_str += '512' + self.updates_cmd.append("undo info-center logbuffer size") + + if xml_str == '': + return "" + else: + xml_str += '' + return xml_str + + def config_log_soruce(self): + """config info-center sources""" + + xml_str = '' + if not self.module_name or not self.channel_id: + return xml_str + + source = self.log_dict["source"] + if self.state == "present": + xml_str = '' + cmd = 'info-center source %s channel %s log' % ( + self.module_name, self.channel_id) + else: + if not source or self.module_name != source.get("moduleName").lower() or \ + self.channel_id != source.get("icChannelId"): + return '' + + if self.log_enable == 'no_use' and not self.log_level: + xml_str = '' + else: + xml_str = '' + cmd = 'undo info-center source %s channel %s log' % ( + self.module_name, self.channel_id) + + xml_str += '%s%s' % ( + self.module_name, self.channel_id) + + # log_enable + if self.log_enable != 'no_use': + if self.state == "present" and (not source or self.log_enable != source.get("logEnFlg")): + xml_str += '%s' % self.log_enable + if self.log_enable == "true": + cmd += ' state on' + else: + cmd += ' state off' + elif self.state == "absent" and source and self.log_level == source.get("logEnLevel"): + xml_str += '%s' % CHANNEL_DEFAULT_LOG_STATE.get(self.channel_id) + cmd += ' state' + + # log_level + if self.log_level: + if self.state == "present" and (not source or self.log_level != source.get("logEnLevel")): + xml_str += '%s' % self.log_level + cmd += ' level %s' % self.log_level + elif self.state == "absent" and source and self.log_level == source.get("logEnLevel"): + xml_str += '%s' % CHANNEL_DEFAULT_LOG_LEVEL.get(self.channel_id) + cmd += ' level' + + if xml_str.endswith(""): + if self.log_enable == 'no_use' and not self.log_level and self.state == "absent": + xml_str += '' + self.updates_cmd.append(cmd) + return xml_str + else: + return '' + else: + xml_str += '' + self.updates_cmd.append(cmd) + return xml_str + + def netconf_load_config(self, xml_str): + """load log config by netconf""" + + if not xml_str: + return + + xml_cfg = """ + + + %s + + """ % xml_str + + recv_xml = set_nc_config(self.module, xml_cfg) + self.check_response(recv_xml, "SET_LOG") + self.changed = True + + def check_params(self): + """Check all input params""" + + # check log_buff_size ranges from 0 to 10240 + if self.log_buff_size: + if not self.log_buff_size.isdigit(): + self.module.fail_json( + msg="Error: log_buff_size is not digit.") + if int(self.log_buff_size) < 0 or int(self.log_buff_size) > 10240: + self.module.fail_json( + msg="Error: log_buff_size is not ranges from 0 to 10240.") + + # check channel_id ranging from 0 to 9 + if self.channel_id: + if not self.channel_id.isdigit(): + self.module.fail_json(msg="Error: channel_id is not digit.") + if int(self.channel_id) < 0 or int(self.channel_id) > 9: + self.module.fail_json( + msg="Error: channel_id is not ranges from 0 to 9.") + + # module_name and channel_id must be set at the same time + if bool(self.module_name) != bool(self.channel_id): + self.module.fail_json( + msg="Error: module_name and channel_id must be set at the same time.") + + def get_proposed(self): + """get proposed info""" + + if self.log_time_stamp: + self.proposed["log_time_stamp"] = self.log_time_stamp + if self.log_buff_enable != 'no_use': + self.proposed["log_buff_enable"] = self.log_buff_enable + if self.log_buff_size: + self.proposed["log_buff_size"] = self.log_buff_size + if self.module_name: + self.proposed["module_name"] = self.module_name + if self.channel_id: + self.proposed["channel_id"] = self.channel_id + if self.log_enable != 'no_use': + self.proposed["log_enable"] = self.log_enable + if self.log_level: + self.proposed["log_level"] = self.log_level + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.log_dict: + return + + if self.log_time_stamp: + self.existing["log_time_stamp"] = self.log_dict.get("logTimeStamp").lower() + if self.log_buff_enable != 'no_use': + self.existing["log_buff_enable"] = self.log_dict.get("icLogBuffEn") + if self.log_buff_size: + self.existing["log_buff_size"] = self.log_dict.get("bufferSize") + if self.module_name: + self.existing["source"] = self.log_dict.get("source") + + def get_end_state(self): + """get end state info""" + + log_dict = self.get_log_dict() + if not log_dict: + return + + if self.log_time_stamp: + self.end_state["log_time_stamp"] = log_dict.get("logTimeStamp").lower() + if self.log_buff_enable != 'no_use': + self.end_state["log_buff_enable"] = log_dict.get("icLogBuffEn") + if self.log_buff_size: + self.end_state["log_buff_size"] = log_dict.get("bufferSize") + if self.module_name: + self.end_state["source"] = log_dict.get("source") + + def work(self): + """worker""" + + self.check_params() + self.log_dict = self.get_log_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = '' + if self.log_time_stamp or self.log_buff_enable != 'no_use' or self.log_buff_size: + xml_str += self.config_log_global() + + if self.module_name: + xml_str += self.config_log_soruce() + + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + log_time_stamp=dict(required=False, type='str', + choices=['date_boot', 'date_second', 'date_tenthsecond', 'date_millisecond', + 'shortdate_second', 'shortdate_tenthsecond', 'shortdate_millisecond', + 'formatdate_second', 'formatdate_tenthsecond', 'formatdate_millisecond']), + log_buff_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + log_buff_size=dict(required=False, type='str'), + module_name=dict(required=False, type='str'), + channel_id=dict(required=False, type='str'), + log_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + log_level=dict(required=False, type='str', + choices=['emergencies', 'alert', 'critical', 'error', + 'warning', 'notification', 'informational', 'debugging']), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = InfoCenterLog(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_info_center_trap.py b/plugins/modules/network/cloudengine/ce_info_center_trap.py new file mode 100644 index 0000000000..31d5276e69 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_info_center_trap.py @@ -0,0 +1,697 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_info_center_trap +short_description: Manages information center trap configuration on HUAWEI CloudEngine switches. +description: + - Manages information center trap configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] + trap_time_stamp: + description: + - Timestamp format of alarm information. + choices: ['date_boot', 'date_second', 'date_tenthsecond', 'date_millisecond', 'shortdate_second', + 'shortdate_tenthsecond', 'shortdate_millisecond', 'formatdate_second', 'formatdate_tenthsecond', + 'formatdate_millisecond'] + trap_buff_enable: + description: + - Whether a trap buffer is enabled to output information. + default: no_use + choices: ['no_use','true','false'] + trap_buff_size: + description: + - Size of a trap buffer. + The value is an integer ranging from 0 to 1024. The default value is 256. + module_name: + description: + - Module name of the rule. + The value is a string of 1 to 31 case-insensitive characters. The default value is default. + Please use lower-case letter, such as [aaa, acl, arp, bfd]. + channel_id: + description: + - Number of a channel. + The value is an integer ranging from 0 to 9. The default value is 0. + trap_enable: + description: + - Whether a device is enabled to output alarms. + default: no_use + choices: ['no_use','true','false'] + trap_level: + description: + - Trap level permitted to output. + choices: ['emergencies', 'alert', 'critical', 'error', 'warning', 'notification', + 'informational', 'debugging'] +''' + +EXAMPLES = ''' + +- name: CloudEngine info center trap test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config trap buffer" + ce_info_center_trap: + state: present + trap_buff_enable: true + trap_buff_size: 768 + provider: "{{ cli }}" + + - name: "Undo trap buffer" + ce_info_center_trap: + state: absent + trap_buff_enable: true + trap_buff_size: 768 + provider: "{{ cli }}" + + - name: "Config trap module log level" + ce_info_center_trap: + state: present + module_name: aaa + channel_id: 1 + trap_enable: true + trap_level: error + provider: "{{ cli }}" + + - name: "Undo trap module log level" + ce_info_center_trap: + state: absent + module_name: aaa + channel_id: 1 + trap_enable: true + trap_level: error + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"state": "present", "trap_buff_enable": "true", "trap_buff_size": "768"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"icTrapBuffEn": "false", "trapBuffSize": "256"} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"icTrapBuffEn": "true", "trapBuffSize": "768"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["info-center trapbuffer", "info-center trapbuffer size 768"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +# get info center trap global +CE_GET_TRAP_GLOBAL_HEADER = """ + + + +""" +CE_GET_TRAP_GLOBAL_TAIL = """ + + + +""" +# merge info center trap global +CE_MERGE_TRAP_GLOBAL_HEADER = """ + + + +""" +CE_MERGE_TRAP_GLOBAL_TAIL = """ + + + +""" + +# get info center trap source +CE_GET_TRAP_SOURCE_HEADER = """ + + + + +""" +CE_GET_TRAP_SOURCE_TAIL = """ + + + + +""" +# merge info center trap source +CE_MERGE_TRAP_SOURCE_HEADER = """ + + + + +""" +CE_MERGE_TRAP_SOURCE_TAIL = """ + + + + +""" +# delete info center trap source +CE_DELETE_TRAP_SOURCE_HEADER = """ + + + + +""" +CE_DELETE_TRAP_SOURCE_TAIL = """ + + + + +""" + +TIME_STAMP_DICT = {"date_boot": "boot", + "date_second": "date precision-time second", + "date_tenthsecond": "date precision-time tenth-second", + "date_millisecond": "date precision-time millisecond", + "shortdate_second": "short-date precision-time second", + "shortdate_tenthsecond": "short-date precision-time tenth-second", + "shortdate_millisecond": "short-date precision-time millisecond", + "formatdate_second": "format-date precision-time second", + "formatdate_tenthsecond": "format-date precision-time tenth-second", + "formatdate_millisecond": "format-date precision-time millisecond"} + +CHANNEL_DEFAULT_TRAP_STATE = {"0": "true", + "1": "true", + "2": "true", + "3": "true", + "4": "false", + "5": "true", + "6": "true", + "7": "true", + "8": "true", + "9": "true"} + +CHANNEL_DEFAULT_TRAP_LEVEL = {"0": "debugging", + "1": "debugging", + "2": "debugging", + "3": "debugging", + "4": "debugging", + "5": "debugging", + "6": "debugging", + "7": "debugging", + "8": "debugging", + "9": "debugging"} + + +class InfoCenterTrap(object): + """ Manages info center trap configuration """ + + def __init__(self, **kwargs): + """ Init function """ + + # argument spec + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # module args + self.state = self.module.params['state'] + self.trap_time_stamp = self.module.params['trap_time_stamp'] or None + self.trap_buff_enable = self.module.params['trap_buff_enable'] + self.trap_buff_size = self.module.params['trap_buff_size'] or None + self.module_name = self.module.params['module_name'] or None + self.channel_id = self.module.params['channel_id'] or None + self.trap_enable = self.module.params['trap_enable'] + self.trap_level = self.module.params['trap_level'] or None + + # cur config + self.cur_global_cfg = dict() + self.cur_source_cfg = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def netconf_get_config(self, conf_str): + """ Netconf get config """ + + xml_str = get_nc_config(self.module, conf_str) + + return xml_str + + def netconf_set_config(self, conf_str): + """ Netconf set config """ + + xml_str = set_nc_config(self.module, conf_str) + + return xml_str + + def check_global_args(self): + """ Check global args """ + + need_cfg = False + find_flag = False + self.cur_global_cfg["global_cfg"] = [] + + if self.trap_time_stamp or self.trap_buff_enable != 'no_use' or self.trap_buff_size: + if self.trap_buff_size: + if self.trap_buff_size.isdigit(): + if int(self.trap_buff_size) < 0 or int(self.trap_buff_size) > 1024: + self.module.fail_json( + msg='Error: The value of trap_buff_size is out of [0 - 1024].') + else: + self.module.fail_json( + msg='Error: The trap_buff_size is not digit.') + + conf_str = CE_GET_TRAP_GLOBAL_HEADER + + if self.trap_time_stamp: + conf_str += "" + if self.trap_buff_enable != 'no_use': + conf_str += "" + if self.trap_buff_size: + conf_str += "" + + conf_str += CE_GET_TRAP_GLOBAL_TAIL + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + find_flag = False + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + global_cfg = root.findall("syslog/globalParam") + if global_cfg: + for tmp in global_cfg: + tmp_dict = dict() + for site in tmp: + if site.tag in ["trapTimeStamp", "icTrapBuffEn", "trapBuffSize"]: + tmp_dict[site.tag] = site.text + + self.cur_global_cfg["global_cfg"].append(tmp_dict) + + if self.cur_global_cfg["global_cfg"]: + for tmp in self.cur_global_cfg["global_cfg"]: + find_flag = True + + if self.trap_time_stamp and tmp.get("trapTimeStamp").lower() != self.trap_time_stamp: + find_flag = False + if self.trap_buff_enable != 'no_use' and tmp.get("icTrapBuffEn") != self.trap_buff_enable: + find_flag = False + if self.trap_buff_size and tmp.get("trapBuffSize") != self.trap_buff_size: + find_flag = False + + if find_flag: + break + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + else: + need_cfg = bool(find_flag) + + self.cur_global_cfg["need_cfg"] = need_cfg + + def check_source_args(self): + """ Check source args """ + + need_cfg = False + find_flag = False + self.cur_source_cfg["source_cfg"] = list() + + if self.module_name: + if len(self.module_name) < 1 or len(self.module_name) > 31: + self.module.fail_json( + msg='Error: The module_name is out of [1 - 31].') + + if not self.channel_id: + self.module.fail_json( + msg='Error: Please input channel_id at the same time.') + + if self.channel_id: + if self.channel_id.isdigit(): + if int(self.channel_id) < 0 or int(self.channel_id) > 9: + self.module.fail_json( + msg='Error: The value of channel_id is out of [0 - 9].') + else: + self.module.fail_json( + msg='Error: The channel_id is not digit.') + + conf_str = CE_GET_TRAP_SOURCE_HEADER + + if self.module_name != "default": + conf_str += "%s" % self.module_name.upper() + else: + conf_str += "default" + + if self.channel_id: + conf_str += "" + if self.trap_enable != 'no_use': + conf_str += "" + if self.trap_level: + conf_str += "" + + conf_str += CE_GET_TRAP_SOURCE_TAIL + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + find_flag = False + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + source_cfg = root.findall("syslog/icSources/icSource") + if source_cfg: + for tmp in source_cfg: + tmp_dict = dict() + for site in tmp: + if site.tag in ["moduleName", "icChannelId", "trapEnFlg", "trapEnLevel"]: + tmp_dict[site.tag] = site.text + + self.cur_source_cfg["source_cfg"].append(tmp_dict) + + if self.cur_source_cfg["source_cfg"]: + for tmp in self.cur_source_cfg["source_cfg"]: + find_flag = True + + if self.module_name and tmp.get("moduleName").lower() != self.module_name.lower(): + find_flag = False + if self.channel_id and tmp.get("icChannelId") != self.channel_id: + find_flag = False + if self.trap_enable != 'no_use' and tmp.get("trapEnFlg") != self.trap_enable: + find_flag = False + if self.trap_level and tmp.get("trapEnLevel") != self.trap_level: + find_flag = False + + if find_flag: + break + else: + find_flag = False + + if self.state == "present": + need_cfg = bool(not find_flag) + else: + need_cfg = bool(find_flag) + + self.cur_source_cfg["need_cfg"] = need_cfg + + def get_proposed(self): + """ Get proposed """ + + self.proposed["state"] = self.state + + if self.trap_time_stamp: + self.proposed["trap_time_stamp"] = self.trap_time_stamp + if self.trap_buff_enable != 'no_use': + self.proposed["trap_buff_enable"] = self.trap_buff_enable + if self.trap_buff_size: + self.proposed["trap_buff_size"] = self.trap_buff_size + if self.module_name: + self.proposed["module_name"] = self.module_name + if self.channel_id: + self.proposed["channel_id"] = self.channel_id + if self.trap_enable != 'no_use': + self.proposed["trap_enable"] = self.trap_enable + if self.trap_level: + self.proposed["trap_level"] = self.trap_level + + def get_existing(self): + """ Get existing """ + + if self.cur_global_cfg["global_cfg"]: + self.existing["global_cfg"] = self.cur_global_cfg["global_cfg"] + if self.cur_source_cfg["source_cfg"]: + self.existing["source_cfg"] = self.cur_source_cfg["source_cfg"] + + def get_end_state(self): + """ Get end state """ + + self.check_global_args() + if self.cur_global_cfg["global_cfg"]: + self.end_state["global_cfg"] = self.cur_global_cfg["global_cfg"] + + self.check_source_args() + if self.cur_source_cfg["source_cfg"]: + self.end_state["source_cfg"] = self.cur_source_cfg["source_cfg"] + + def merge_trap_global(self): + """ Merge trap global """ + + conf_str = CE_MERGE_TRAP_GLOBAL_HEADER + + if self.trap_time_stamp: + conf_str += "%s" % self.trap_time_stamp.upper() + if self.trap_buff_enable != 'no_use': + conf_str += "%s" % self.trap_buff_enable + if self.trap_buff_size: + conf_str += "%s" % self.trap_buff_size + + conf_str += CE_MERGE_TRAP_GLOBAL_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge trap global failed.') + + if self.trap_time_stamp: + cmd = "info-center timestamp trap " + TIME_STAMP_DICT.get(self.trap_time_stamp) + self.updates_cmd.append(cmd) + if self.trap_buff_enable != 'no_use': + if self.trap_buff_enable == "true": + cmd = "info-center trapbuffer" + else: + cmd = "undo info-center trapbuffer" + self.updates_cmd.append(cmd) + if self.trap_buff_size: + cmd = "info-center trapbuffer size %s" % self.trap_buff_size + self.updates_cmd.append(cmd) + + self.changed = True + + def delete_trap_global(self): + """ Delete trap global """ + + conf_str = CE_MERGE_TRAP_GLOBAL_HEADER + + if self.trap_time_stamp: + conf_str += "DATE_SECOND" + if self.trap_buff_enable != 'no_use': + conf_str += "false" + if self.trap_buff_size: + conf_str += "256" + + conf_str += CE_MERGE_TRAP_GLOBAL_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: delete trap global failed.') + + if self.trap_time_stamp: + cmd = "undo info-center timestamp trap" + self.updates_cmd.append(cmd) + if self.trap_buff_enable != 'no_use': + cmd = "undo info-center trapbuffer" + self.updates_cmd.append(cmd) + if self.trap_buff_size: + cmd = "undo info-center trapbuffer size" + self.updates_cmd.append(cmd) + + self.changed = True + + def merge_trap_source(self): + """ Merge trap source """ + + conf_str = CE_MERGE_TRAP_SOURCE_HEADER + + if self.module_name: + conf_str += "%s" % self.module_name + if self.channel_id: + conf_str += "%s" % self.channel_id + if self.trap_enable != 'no_use': + conf_str += "%s" % self.trap_enable + if self.trap_level: + conf_str += "%s" % self.trap_level + + conf_str += CE_MERGE_TRAP_SOURCE_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge trap source failed.') + + cmd = "info-center source" + if self.module_name: + cmd += " %s" % self.module_name + if self.channel_id: + cmd += " channel %s" % self.channel_id + if self.trap_enable != 'no_use': + if self.trap_enable == "true": + cmd += " trap state on" + else: + cmd += " trap state off" + if self.trap_level: + cmd += " level %s" % self.trap_level + + self.updates_cmd.append(cmd) + self.changed = True + + def delete_trap_source(self): + """ Delete trap source """ + + if self.trap_enable == 'no_use' and not self.trap_level: + conf_str = CE_DELETE_TRAP_SOURCE_HEADER + if self.module_name: + conf_str += "%s" % self.module_name + if self.channel_id: + conf_str += "%s" % self.channel_id + conf_str += CE_DELETE_TRAP_SOURCE_TAIL + else: + conf_str = CE_MERGE_TRAP_SOURCE_HEADER + if self.module_name: + conf_str += "%s" % self.module_name + if self.channel_id: + conf_str += "%s" % self.channel_id + if self.trap_enable != 'no_use': + conf_str += "%s" % CHANNEL_DEFAULT_TRAP_STATE.get(self.channel_id) + if self.trap_level: + conf_str += "%s" % CHANNEL_DEFAULT_TRAP_LEVEL.get(self.channel_id) + conf_str += CE_MERGE_TRAP_SOURCE_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Delete trap source failed.') + + cmd = "undo info-center source" + if self.module_name: + cmd += " %s" % self.module_name + if self.channel_id: + cmd += " channel %s" % self.channel_id + if self.trap_enable != 'no_use': + cmd += " trap state" + if self.trap_level: + cmd += " level" + + self.updates_cmd.append(cmd) + self.changed = True + + def work(self): + """ work function """ + + self.check_global_args() + self.check_source_args() + self.get_proposed() + self.get_existing() + + if self.state == "present": + if self.cur_global_cfg["need_cfg"]: + self.merge_trap_global() + if self.cur_source_cfg["need_cfg"]: + self.merge_trap_source() + + else: + if self.cur_global_cfg["need_cfg"]: + self.delete_trap_global() + if self.cur_source_cfg["need_cfg"]: + self.delete_trap_source() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + trap_time_stamp=dict(choices=['date_boot', 'date_second', 'date_tenthsecond', + 'date_millisecond', 'shortdate_second', 'shortdate_tenthsecond', + 'shortdate_millisecond', 'formatdate_second', 'formatdate_tenthsecond', + 'formatdate_millisecond']), + trap_buff_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + trap_buff_size=dict(type='str'), + module_name=dict(type='str'), + channel_id=dict(type='str'), + trap_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + trap_level=dict(choices=['emergencies', 'alert', 'critical', 'error', 'warning', 'notification', + 'informational', 'debugging']) + ) + + argument_spec.update(ce_argument_spec) + module = InfoCenterTrap(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_interface.py b/plugins/modules/network/cloudengine/ce_interface.py new file mode 100644 index 0000000000..930a27fd0b --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_interface.py @@ -0,0 +1,895 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_interface +short_description: Manages physical attributes of interfaces on HUAWEI CloudEngine switches. +description: + - Manages physical attributes of interfaces on HUAWEI CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - This module is also used to create logical interfaces such as + vlanif and loopbacks. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - Full name of interface, i.e. 40GE1/0/10, Tunnel1. + interface_type: + description: + - Interface type to be configured from the device. + choices: ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'vlanif', 'loopback', 'meth', + 'eth-trunk', 'nve', 'tunnel', 'ethernet', 'fcoe-port', 'fabric-port', 'stack-port', 'null'] + admin_state: + description: + - Specifies the interface management status. + The value is an enumerated type. + up, An interface is in the administrative Up state. + down, An interface is in the administrative Down state. + choices: ['up', 'down'] + description: + description: + - Specifies an interface description. + The value is a string of 1 to 242 case-sensitive characters, + spaces supported but question marks (?) not supported. + mode: + description: + - Manage Layer 2 or Layer 3 state of the interface. + choices: ['layer2', 'layer3'] + l2sub: + description: + - Specifies whether the interface is a Layer 2 sub-interface. + type: bool + default: 'no' + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present', 'absent', 'default'] +''' + +EXAMPLES = ''' +- name: interface module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Ensure an interface is a Layer 3 port and that it has the proper description + ce_interface: + interface: 10GE1/0/22 + description: 'Configured by Ansible' + mode: layer3 + provider: '{{ cli }}' + + - name: Admin down an interface + ce_interface: + interface: 10GE1/0/22 + admin_state: down + provider: '{{ cli }}' + + - name: Remove all tunnel interfaces + ce_interface: + interface_type: tunnel + state: absent + provider: '{{ cli }}' + + - name: Remove all logical interfaces + ce_interface: + interface_type: '{{ item }}' + state: absent + provider: '{{ cli }}' + with_items: + - loopback + - eth-trunk + - nve + + - name: Admin up all 10GE interfaces + ce_interface: + interface_type: 10GE + admin_state: up + provider: '{{ cli }}' +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"interface": "10GE1/0/10", "admin_state": "down"} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: {"admin_state": "up", "description": "None", + "interface": "10GE1/0/10", "mode": "layer2"} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: {"admin_state": "down", "description": "None", + "interface": "10GE1/0/10", "mode": "layer2"} +updates: + description: command list sent to the device + returned: always + type: list + sample: ["interface 10GE1/0/10", "shutdown"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + + +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_INTFS = """ + + + + + + %s + + + + + + + + + +""" + + +CE_NC_GET_INTF = """ + + + + + %s + + + + + + + + + + +""" + +CE_NC_XML_CREATE_INTF = """ + + + + %s + %s + + + +""" + +CE_NC_XML_CREATE_INTF_L2SUB = """ + + + + %s + %s + true + + + +""" + +CE_NC_XML_DELETE_INTF = """ + + + + %s + + + +""" + + +CE_NC_XML_MERGE_INTF_DES = """ + + + + %s + %s + + + +""" +CE_NC_XML_MERGE_INTF_STATUS = """ + + + + %s + %s + + + +""" + +CE_NC_XML_MERGE_INTF_L2ENABLE = """ + + + + %s + %s + + + +""" + +ADMIN_STATE_TYPE = ('ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', + 'vlanif', 'meth', 'eth-trunk', 'vbdif', 'tunnel', + 'ethernet', 'stack-port') + +SWITCH_PORT_TYPE = ('ge', '10ge', '25ge', + '4x10ge', '40ge', '100ge', 'eth-trunk') + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + if interface.upper().startswith('GE'): + return 'ge' + elif interface.upper().startswith('10GE'): + return '10ge' + elif interface.upper().startswith('25GE'): + return '25ge' + elif interface.upper().startswith('4X10GE'): + return '4x10ge' + elif interface.upper().startswith('40GE'): + return '40ge' + elif interface.upper().startswith('100GE'): + return '100ge' + elif interface.upper().startswith('VLANIF'): + return 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + return 'loopback' + elif interface.upper().startswith('METH'): + return 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + return 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + return 'vbdif' + elif interface.upper().startswith('NVE'): + return 'nve' + elif interface.upper().startswith('TUNNEL'): + return 'tunnel' + elif interface.upper().startswith('ETHERNET'): + return 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + return 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + return 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + return 'stack-port' + elif interface.upper().startswith('NULL'): + return 'null' + else: + return None + + +def is_admin_state_enable(iftype): + """admin state disable: loopback nve""" + + return bool(iftype in ADMIN_STATE_TYPE) + + +def is_portswitch_enalbe(iftype): + """"is portswitch? """ + + return bool(iftype in SWITCH_PORT_TYPE) + + +class Interface(object): + """Manages physical attributes of interfaces.""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # interface info + self.interface = self.module.params['interface'] + self.interface_type = self.module.params['interface_type'] + self.admin_state = self.module.params['admin_state'] + self.description = self.module.params['description'] + self.mode = self.module.params['mode'] + self.l2sub = self.module.params['l2sub'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + self.intfs_info = dict() # all type interface info + self.intf_info = dict() # one interface info + self.intf_type = None # loopback tunnel ... + + def init_module(self): + """init_module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_interfaces_dict(self): + """ get interfaces attributes dict.""" + + intfs_info = dict() + conf_str = CE_NC_GET_INTFS % self.interface_type + recv_xml = get_nc_config(self.module, conf_str) + + if "" in recv_xml: + return intfs_info + + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + intfs = root.findall("ifm/interfaces/") + if intfs: + for intf in intfs: + intf_type = intf.find("ifPhyType").text.lower() + if intf_type: + if not intfs_info.get(intf_type): + intfs_info[intf_type] = list() + intf_info = dict() + for tmp in intf: + intf_info[tmp.tag] = tmp.text + intfs_info[intf_type].append(intf_info) + return intfs_info + + def get_interface_dict(self, ifname): + """ get one interface attributes dict.""" + + intf_info = dict() + conf_str = CE_NC_GET_INTF % ifname + recv_xml = get_nc_config(self.module, conf_str) + + if "" in recv_xml: + return intf_info + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + intfs = root.findall("ifm/interfaces/interface/") + if intfs: + for intf in intfs: + intf_info[intf.tag] = intf.text + return intf_info + + def create_interface(self, ifname, description, admin_state, mode, l2sub): + """Create interface.""" + + if l2sub: + self.updates_cmd.append("interface %s mode l2" % ifname) + else: + self.updates_cmd.append("interface %s" % ifname) + + if not description: + description = '' + else: + self.updates_cmd.append("description %s" % description) + + if l2sub: + xmlstr = CE_NC_XML_CREATE_INTF_L2SUB % (ifname, description) + else: + xmlstr = CE_NC_XML_CREATE_INTF % (ifname, description) + if admin_state and is_admin_state_enable(self.intf_type): + xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (ifname, admin_state) + if admin_state == 'up': + self.updates_cmd.append("undo shutdown") + else: + self.updates_cmd.append("shutdown") + if mode and is_portswitch_enalbe(self.intf_type): + if mode == "layer2": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'enable') + self.updates_cmd.append('portswitch') + elif mode == "layer3": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'disable') + self.updates_cmd.append('undo portswitch') + + conf_str = ' ' + xmlstr + ' ' + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "CREATE_INTF") + self.changed = True + + def delete_interface(self, ifname): + """ Delete interface.""" + + xmlstr = CE_NC_XML_DELETE_INTF % ifname + conf_str = ' ' + xmlstr + ' ' + self.updates_cmd.append('undo interface %s' % ifname) + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "DELETE_INTF") + self.changed = True + + def delete_interfaces(self, iftype): + """ Delete interfaces with type.""" + + xmlstr = '' + intfs_list = self.intfs_info.get(iftype.lower()) + if not intfs_list: + return + + for intf in intfs_list: + xmlstr += CE_NC_XML_DELETE_INTF % intf['ifName'] + self.updates_cmd.append('undo interface %s' % intf['ifName']) + + conf_str = ' ' + xmlstr + ' ' + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "DELETE_INTFS") + self.changed = True + + def merge_interface(self, ifname, description, admin_state, mode): + """ Merge interface attributes.""" + + xmlstr = '' + change = False + self.updates_cmd.append("interface %s" % ifname) + if description and self.intf_info["ifDescr"] != description: + xmlstr += CE_NC_XML_MERGE_INTF_DES % (ifname, description) + self.updates_cmd.append("description %s" % description) + change = True + + if admin_state and is_admin_state_enable(self.intf_type) \ + and self.intf_info["ifAdminStatus"] != admin_state: + xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (ifname, admin_state) + change = True + if admin_state == "up": + self.updates_cmd.append("undo shutdown") + else: + self.updates_cmd.append("shutdown") + + if is_portswitch_enalbe(self.intf_type): + if mode == "layer2" and self.intf_info["isL2SwitchPort"] != "true": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'enable') + self.updates_cmd.append("portswitch") + change = True + elif mode == "layer3" \ + and self.intf_info["isL2SwitchPort"] != "false": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'disable') + self.updates_cmd.append("undo portswitch") + change = True + + if not change: + return + + conf_str = ' ' + xmlstr + ' ' + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "MERGE_INTF_ATTR") + self.changed = True + + def merge_interfaces(self, iftype, description, admin_state, mode): + """ Merge interface attributes by type.""" + + xmlstr = '' + change = False + intfs_list = self.intfs_info.get(iftype.lower()) + if not intfs_list: + return + + for intf in intfs_list: + if_change = False + self.updates_cmd.append("interface %s" % intf['ifName']) + if description and intf["ifDescr"] != description: + xmlstr += CE_NC_XML_MERGE_INTF_DES % ( + intf['ifName'], description) + self.updates_cmd.append("description %s" % description) + if_change = True + if admin_state and is_admin_state_enable(self.intf_type)\ + and intf["ifAdminStatus"] != admin_state: + xmlstr += CE_NC_XML_MERGE_INTF_STATUS % ( + intf['ifName'], admin_state) + if_change = True + if admin_state == "up": + self.updates_cmd.append("undo shutdown") + else: + self.updates_cmd.append("shutdown") + + if is_portswitch_enalbe(self.intf_type): + if mode == "layer2" \ + and intf["isL2SwitchPort"] != "true": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % ( + intf['ifName'], 'enable') + self.updates_cmd.append("portswitch") + if_change = True + elif mode == "layer3" \ + and intf["isL2SwitchPort"] != "false": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % ( + intf['ifName'], 'disable') + self.updates_cmd.append("undo portswitch") + if_change = True + + if if_change: + change = True + else: + self.updates_cmd.pop() + + if not change: + return + + conf_str = ' ' + xmlstr + ' ' + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "MERGE_INTFS_ATTR") + self.changed = True + + def default_interface(self, ifname): + """default_interface""" + + change = False + xmlstr = "" + self.updates_cmd.append("interface %s" % ifname) + # set description default + if self.intf_info["ifDescr"]: + xmlstr += CE_NC_XML_MERGE_INTF_DES % (ifname, '') + self.updates_cmd.append("undo description") + change = True + + # set admin_status default + if is_admin_state_enable(self.intf_type) \ + and self.intf_info["ifAdminStatus"] != 'up': + xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (ifname, 'up') + self.updates_cmd.append("undo shutdown") + change = True + + # set portswitch default + if is_portswitch_enalbe(self.intf_type) \ + and self.intf_info["isL2SwitchPort"] != "true": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'enable') + self.updates_cmd.append("portswitch") + change = True + + if not change: + return + + conf_str = ' ' + xmlstr + ' ' + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "SET_INTF_DEFAULT") + self.changed = True + + def default_interfaces(self, iftype): + """ Set interface config to default by type.""" + + change = False + xmlstr = '' + intfs_list = self.intfs_info.get(iftype.lower()) + if not intfs_list: + return + + for intf in intfs_list: + if_change = False + self.updates_cmd.append("interface %s" % intf['ifName']) + + # set description default + if intf['ifDescr']: + xmlstr += CE_NC_XML_MERGE_INTF_DES % (intf['ifName'], '') + self.updates_cmd.append("undo description") + if_change = True + + # set admin_status default + if is_admin_state_enable(self.intf_type) and intf["ifAdminStatus"] != 'up': + xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (intf['ifName'], 'up') + self.updates_cmd.append("undo shutdown") + if_change = True + + # set portswitch default + if is_portswitch_enalbe(self.intf_type) and intf["isL2SwitchPort"] != "true": + xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (intf['ifName'], 'enable') + self.updates_cmd.append("portswitch") + if_change = True + + if if_change: + change = True + else: + self.updates_cmd.pop() + + if not change: + return + + conf_str = ' ' + xmlstr + ' ' + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "SET_INTFS_DEFAULT") + self.changed = True + + def check_params(self): + """Check all input params""" + + if not self.interface and not self.interface_type: + self.module.fail_json( + msg='Error: Interface or interface_type must be set.') + if self.interface and self.interface_type: + self.module.fail_json( + msg='Error: Interface or interface_type' + ' can not be set at the same time.') + + # interface type check + if self.interface: + self.intf_type = get_interface_type(self.interface) + if not self.intf_type: + self.module.fail_json( + msg='Error: interface name of %s' + ' is error.' % self.interface) + + elif self.interface_type: + self.intf_type = get_interface_type(self.interface_type) + if not self.intf_type or self.intf_type != self.interface_type.replace(" ", "").lower(): + self.module.fail_json( + msg='Error: interface type of %s' + ' is error.' % self.interface_type) + + if not self.intf_type: + self.module.fail_json( + msg='Error: interface or interface type %s is error.') + + # shutdown check + if not is_admin_state_enable(self.intf_type) \ + and self.state == "present" and self.admin_state == "down": + self.module.fail_json( + msg='Error: The %s interface can not' + ' be shutdown.' % self.intf_type) + + # port switch mode check + if not is_portswitch_enalbe(self.intf_type)\ + and self.mode and self.state == "present": + self.module.fail_json( + msg='Error: The %s interface can not manage' + ' Layer 2 or Layer 3 state.' % self.intf_type) + + # check description len + if self.description: + if len(self.description) > 242 \ + or len(self.description.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: interface description ' + 'is not in the range from 1 to 242.') + # check l2sub flag + if self.l2sub: + if not self.interface: + self.module.fail_json(msg='Error: L2sub flag can not be set when there no interface set with.') + if self.interface.count(".") != 1: + self.module.fail_json(msg='Error: Interface name is invalid, it is not sub-interface.') + + def get_proposed(self): + """get_proposed""" + + self.proposed['state'] = self.state + if self.interface: + self.proposed["interface"] = self.interface + if self.interface_type: + self.proposed["interface_type"] = self.interface_type + + if self.state == 'present': + if self.description: + self.proposed["description"] = self.description + if self.mode: + self.proposed["mode"] = self.mode + if self.admin_state: + self.proposed["admin_state"] = self.admin_state + self.proposed["l2sub"] = self.l2sub + + elif self.state == 'default': + if self.description: + self.proposed["description"] = "" + if is_admin_state_enable(self.intf_type) and self.admin_state: + self.proposed["admin_state"] = self.admin_state + if is_portswitch_enalbe(self.intf_type) and self.mode: + self.proposed["mode"] = self.mode + + def get_existing(self): + """get_existing""" + + if self.intf_info: + self.existing["interface"] = self.intf_info["ifName"] + if is_admin_state_enable(self.intf_type): + self.existing["admin_state"] = self.intf_info["ifAdminStatus"] + self.existing["description"] = self.intf_info["ifDescr"] + if is_portswitch_enalbe(self.intf_type): + if self.intf_info["isL2SwitchPort"] == "true": + self.existing["mode"] = "layer2" + else: + self.existing["mode"] = "layer3" + + if self.intfs_info: + intfs = self.intfs_info.get(self.intf_type.lower()) + for intf in intfs: + intf_para = dict() + if intf["ifAdminStatus"]: + intf_para["admin_state"] = intf["ifAdminStatus"] + intf_para["description"] = intf["ifDescr"] + + if intf["isL2SwitchPort"] == "true": + intf_para["mode"] = "layer2" + else: + intf_para["mode"] = "layer3" + self.existing[intf["ifName"]] = intf_para + + def get_end_state(self): + """get_end_state""" + if self.interface: + end_info = self.get_interface_dict(self.interface) + if end_info: + self.end_state["interface"] = end_info["ifName"] + if is_admin_state_enable(self.intf_type): + self.end_state["admin_state"] = end_info["ifAdminStatus"] + self.end_state["description"] = end_info["ifDescr"] + if is_portswitch_enalbe(self.intf_type): + if end_info["isL2SwitchPort"] == "true": + self.end_state["mode"] = "layer2" + else: + self.end_state["mode"] = "layer3" + + if self.interface_type: + end_info = self.get_interfaces_dict() + intfs = end_info.get(self.intf_type.lower()) + for intf in intfs: + intf_para = dict() + if intf["ifAdminStatus"]: + intf_para["admin_state"] = intf["ifAdminStatus"] + intf_para["description"] = intf["ifDescr"] + + if intf["isL2SwitchPort"] == "true": + intf_para["mode"] = "layer2" + else: + intf_para["mode"] = "layer3" + self.end_state[intf["ifName"]] = intf_para + + def work(self): + """worker""" + + self.check_params() + + # single interface config + if self.interface: + self.intf_info = self.get_interface_dict(self.interface) + self.get_existing() + if self.state == 'present': + if not self.intf_info: + # create interface + self.create_interface(self.interface, + self.description, + self.admin_state, + self.mode, + self.l2sub) + else: + # merge interface + if self.description or self.admin_state or self.mode: + self.merge_interface(self.interface, + self.description, + self.admin_state, + self.mode) + + elif self.state == 'absent': + if self.intf_info: + # delete interface + self.delete_interface(self.interface) + else: + # interface does not exist + self.module.fail_json( + msg='Error: interface does not exist.') + + else: # default + if not self.intf_info: + # error, interface does not exist + self.module.fail_json( + msg='Error: interface does not exist.') + else: + self.default_interface(self.interface) + + # interface type config + else: + self.intfs_info = self.get_interfaces_dict() + self.get_existing() + if self.state == 'present': + if self.intfs_info.get(self.intf_type.lower()): + if self.description or self.admin_state or self.mode: + self.merge_interfaces(self.intf_type, + self.description, + self.admin_state, + self.mode) + elif self.state == 'absent': + # delete all interface of this type + if self.intfs_info.get(self.intf_type.lower()): + self.delete_interfaces(self.intf_type) + + else: + # set interfaces config to default + if self.intfs_info.get(self.intf_type.lower()): + self.default_interfaces(self.intf_type) + else: + self.module.fail_json( + msg='Error: no interface in this type.') + + self.get_proposed() + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """main""" + + argument_spec = dict( + interface=dict(required=False, type='str'), + admin_state=dict(choices=['up', 'down'], required=False), + description=dict(required=False, default=None), + mode=dict(choices=['layer2', 'layer3'], required=False), + interface_type=dict(required=False), + l2sub=dict(required=False, default=False, type='bool'), + state=dict(choices=['absent', 'present', 'default'], + default='present', required=False), + ) + + argument_spec.update(ce_argument_spec) + interface = Interface(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_interface_ospf.py b/plugins/modules/network/cloudengine/ce_interface_ospf.py new file mode 100644 index 0000000000..7e7e4f92a5 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_interface_ospf.py @@ -0,0 +1,797 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_interface_ospf +short_description: Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches. +description: + - Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - Full name of interface, i.e. 40GE1/0/10. + required: true + process_id: + description: + - Specifies a process ID. + The value is an integer ranging from 1 to 4294967295. + required: true + area: + description: + - Ospf area associated with this ospf process. + Valid values are a string, formatted as an IP address + (i.e. "0.0.0.0") or as an integer between 1 and 4294967295. + required: true + cost: + description: + - The cost associated with this interface. + Valid values are an integer in the range from 1 to 65535. + hello_interval: + description: + - Time between sending successive hello packets. + Valid values are an integer in the range from 1 to 65535. + dead_interval: + description: + - Time interval an ospf neighbor waits for a hello + packet before tearing down adjacencies. Valid values are an + integer in the range from 1 to 235926000. + silent_interface: + description: + - Setting to true will prevent this interface from receiving + HELLO packets. Valid values are 'true' and 'false'. + type: bool + default: 'no' + auth_mode: + description: + - Specifies the authentication type. + choices: ['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'] + auth_text_simple: + description: + - Specifies a password for simple authentication. + The value is a string of 1 to 8 characters. + auth_key_id: + description: + - Authentication key id when C(auth_mode) is 'hmac-sha256', 'md5' or 'hmac-md5. + Valid value is an integer is in the range from 1 to 255. + auth_text_md5: + description: + - Specifies a password for MD5, HMAC-MD5, or HMAC-SHA256 authentication. + The value is a string of 1 to 255 case-sensitive characters, spaces not supported. + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: eth_trunk module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Enables OSPF and sets the cost on an interface + ce_interface_ospf: + interface: 10GE1/0/30 + process_id: 1 + area: 100 + cost: 100 + provider: '{{ cli }}' + + - name: Sets the dead interval of the OSPF neighbor + ce_interface_ospf: + interface: 10GE1/0/30 + process_id: 1 + area: 100 + dead_interval: 100 + provider: '{{ cli }}' + + - name: Sets the interval for sending Hello packets on an interface + ce_interface_ospf: + interface: 10GE1/0/30 + process_id: 1 + area: 100 + hello_interval: 2 + provider: '{{ cli }}' + + - name: Disables an interface from receiving and sending OSPF packets + ce_interface_ospf: + interface: 10GE1/0/30 + process_id: 1 + area: 100 + silent_interface: true + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30", "cost": "100"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"process_id": "1", "area": "0.0.0.100"} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30", + "cost": "100", "dead_interval": "40", "hello_interval": "10", + "silent_interface": "false", "auth_mode": "none"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface 10GE1/0/30", + "ospf enable 1 area 0.0.0.100", + "ospf cost 100"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_OSPF = """ + + + + + + %s + + + + + %s + + + %s + + + + + + + + + + + + + + + + + + +""" + +CE_NC_XML_BUILD_PROCESS = """ + + + + + + %s + + + %s + %s + + + + + + + +""" + +CE_NC_XML_BUILD_MERGE_INTF = """ + + + %s + + +""" + +CE_NC_XML_BUILD_DELETE_INTF = """ + + + %s + + +""" +CE_NC_XML_SET_IF_NAME = """ + %s +""" + +CE_NC_XML_SET_HELLO = """ + %s +""" + +CE_NC_XML_SET_DEAD = """ + %s +""" + +CE_NC_XML_SET_SILENT = """ + %s +""" + +CE_NC_XML_SET_COST = """ + %s +""" + +CE_NC_XML_SET_AUTH_MODE = """ + %s +""" + + +CE_NC_XML_SET_AUTH_TEXT_SIMPLE = """ + %s +""" + +CE_NC_XML_SET_AUTH_MD5 = """ + %s + %s +""" + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + if interface.upper().startswith('GE'): + return 'ge' + elif interface.upper().startswith('10GE'): + return '10ge' + elif interface.upper().startswith('25GE'): + return '25ge' + elif interface.upper().startswith('4X10GE'): + return '4x10ge' + elif interface.upper().startswith('40GE'): + return '40ge' + elif interface.upper().startswith('100GE'): + return '100ge' + elif interface.upper().startswith('VLANIF'): + return 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + return 'loopback' + elif interface.upper().startswith('METH'): + return 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + return 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + return 'vbdif' + elif interface.upper().startswith('NVE'): + return 'nve' + elif interface.upper().startswith('TUNNEL'): + return 'tunnel' + elif interface.upper().startswith('ETHERNET'): + return 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + return 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + return 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + return 'stack-port' + elif interface.upper().startswith('NULL'): + return 'null' + else: + return None + + +def is_valid_v4addr(addr): + """check is ipv4 addr is valid""" + + if not addr: + return False + + if addr.find('.') != -1: + addr_list = addr.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +class InterfaceOSPF(object): + """ + Manages configuration of an OSPF interface instance. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.interface = self.module.params['interface'] + self.process_id = self.module.params['process_id'] + self.area = self.module.params['area'] + self.cost = self.module.params['cost'] + self.hello_interval = self.module.params['hello_interval'] + self.dead_interval = self.module.params['dead_interval'] + self.silent_interface = self.module.params['silent_interface'] + self.auth_mode = self.module.params['auth_mode'] + self.auth_text_simple = self.module.params['auth_text_simple'] + self.auth_key_id = self.module.params['auth_key_id'] + self.auth_text_md5 = self.module.params['auth_text_md5'] + self.state = self.module.params['state'] + + # ospf info + self.ospf_info = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def netconf_set_config(self, xml_str, xml_name): + """netconf set config""" + + rcv_xml = set_nc_config(self.module, xml_str) + if "" not in rcv_xml: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_area_ip(self): + """convert integer to ip address""" + + if not self.area.isdigit(): + return self.area + + addr_int = ['0'] * 4 + addr_int[0] = str(((int(self.area) & 0xFF000000) >> 24) & 0xFF) + addr_int[1] = str(((int(self.area) & 0x00FF0000) >> 16) & 0xFF) + addr_int[2] = str(((int(self.area) & 0x0000FF00) >> 8) & 0XFF) + addr_int[3] = str(int(self.area) & 0xFF) + + return '.'.join(addr_int) + + def get_ospf_dict(self): + """ get one ospf attributes dict.""" + + ospf_info = dict() + conf_str = CE_NC_GET_OSPF % ( + self.process_id, self.get_area_ip(), self.interface) + rcv_xml = get_nc_config(self.module, conf_str) + + if "" in rcv_xml: + return ospf_info + + xml_str = rcv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get process base info + root = ElementTree.fromstring(xml_str) + ospfsite = root.find("ospfv2/ospfv2comm/ospfSites/ospfSite") + if not ospfsite: + self.module.fail_json(msg="Error: ospf process does not exist.") + + for site in ospfsite: + if site.tag in ["processId", "routerId", "vrfName"]: + ospf_info[site.tag] = site.text + + # get areas info + ospf_info["areaId"] = "" + areas = root.find( + "ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area") + if areas: + for area in areas: + if area.tag == "areaId": + ospf_info["areaId"] = area.text + break + + # get interface info + ospf_info["interface"] = dict() + intf = root.find( + "ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area/interfaces/interface") + if intf: + for attr in intf: + if attr.tag in ["ifName", "networkType", + "helloInterval", "deadInterval", + "silentEnable", "configCost", + "authenticationMode", "authTextSimple", + "keyId", "authTextMd5"]: + ospf_info["interface"][attr.tag] = attr.text + + return ospf_info + + def set_ospf_interface(self): + """set interface ospf enable, and set its ospf attributes""" + + xml_intf = CE_NC_XML_SET_IF_NAME % self.interface + + # ospf view + self.updates_cmd.append("ospf %s" % self.process_id) + self.updates_cmd.append("area %s" % self.get_area_ip()) + if self.silent_interface: + xml_intf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower() + if self.silent_interface: + self.updates_cmd.append("silent-interface %s" % self.interface) + else: + self.updates_cmd.append("undo silent-interface %s" % self.interface) + + # interface view + self.updates_cmd.append("interface %s" % self.interface) + self.updates_cmd.append("ospf enable %s area %s" % ( + self.process_id, self.get_area_ip())) + if self.cost: + xml_intf += CE_NC_XML_SET_COST % self.cost + self.updates_cmd.append("ospf cost %s" % self.cost) + if self.hello_interval: + xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval + self.updates_cmd.append("ospf timer hello %s" % + self.hello_interval) + if self.dead_interval: + xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval + self.updates_cmd.append("ospf timer dead %s" % self.dead_interval) + if self.auth_mode: + xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode + if self.auth_mode == "none": + self.updates_cmd.append("undo ospf authentication-mode") + else: + self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode) + if self.auth_mode == "simple" and self.auth_text_simple: + xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple + self.updates_cmd.pop() + self.updates_cmd.append("ospf authentication-mode %s %s" + % (self.auth_mode, self.auth_text_simple)) + elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id: + xml_intf += CE_NC_XML_SET_AUTH_MD5 % ( + self.auth_key_id, self.auth_text_md5) + self.updates_cmd.pop() + self.updates_cmd.append("ospf authentication-mode %s %s %s" + % (self.auth_mode, self.auth_key_id, self.auth_text_md5)) + else: + pass + + xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, + self.get_area_ip(), + (CE_NC_XML_BUILD_MERGE_INTF % xml_intf)) + self.netconf_set_config(xml_str, "SET_INTERFACE_OSPF") + self.changed = True + + def merge_ospf_interface(self): + """merge interface ospf attributes""" + + intf_dict = self.ospf_info["interface"] + + # ospf view + xml_ospf = "" + if intf_dict.get("silentEnable") != str(self.silent_interface).lower(): + xml_ospf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower() + self.updates_cmd.append("ospf %s" % self.process_id) + self.updates_cmd.append("area %s" % self.get_area_ip()) + if self.silent_interface: + self.updates_cmd.append("silent-interface %s" % self.interface) + else: + self.updates_cmd.append("undo silent-interface %s" % self.interface) + + # interface view + xml_intf = "" + self.updates_cmd.append("interface %s" % self.interface) + if self.cost and intf_dict.get("configCost") != self.cost: + xml_intf += CE_NC_XML_SET_COST % self.cost + self.updates_cmd.append("ospf cost %s" % self.cost) + if self.hello_interval and intf_dict.get("helloInterval") != self.hello_interval: + xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval + self.updates_cmd.append("ospf timer hello %s" % + self.hello_interval) + if self.dead_interval and intf_dict.get("deadInterval") != self.dead_interval: + xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval + self.updates_cmd.append("ospf timer dead %s" % self.dead_interval) + if self.auth_mode: + # NOTE: for security, authentication config will always be update + xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode + if self.auth_mode == "none": + self.updates_cmd.append("undo ospf authentication-mode") + else: + self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode) + if self.auth_mode == "simple" and self.auth_text_simple: + xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple + self.updates_cmd.pop() + self.updates_cmd.append("ospf authentication-mode %s %s" + % (self.auth_mode, self.auth_text_simple)) + elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id: + xml_intf += CE_NC_XML_SET_AUTH_MD5 % ( + self.auth_key_id, self.auth_text_md5) + self.updates_cmd.pop() + self.updates_cmd.append("ospf authentication-mode %s %s %s" + % (self.auth_mode, self.auth_key_id, self.auth_text_md5)) + else: + pass + if not xml_intf: + self.updates_cmd.pop() # remove command: interface + + if not xml_ospf and not xml_intf: + return + + xml_sum = CE_NC_XML_SET_IF_NAME % self.interface + xml_sum += xml_ospf + xml_intf + xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, + self.get_area_ip(), + (CE_NC_XML_BUILD_MERGE_INTF % xml_sum)) + self.netconf_set_config(xml_str, "MERGE_INTERFACE_OSPF") + self.changed = True + + def unset_ospf_interface(self): + """set interface ospf disable, and all its ospf attributes will be removed""" + + intf_dict = self.ospf_info["interface"] + xml_sum = "" + xml_intf = CE_NC_XML_SET_IF_NAME % self.interface + if intf_dict.get("silentEnable") == "true": + xml_sum += CE_NC_XML_BUILD_MERGE_INTF % ( + xml_intf + (CE_NC_XML_SET_SILENT % "false")) + self.updates_cmd.append("ospf %s" % self.process_id) + self.updates_cmd.append("area %s" % self.get_area_ip()) + self.updates_cmd.append( + "undo silent-interface %s" % self.interface) + + xml_sum += CE_NC_XML_BUILD_DELETE_INTF % xml_intf + xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, + self.get_area_ip(), + xml_sum) + self.netconf_set_config(xml_str, "DELETE_INTERFACE_OSPF") + self.updates_cmd.append("undo ospf cost") + self.updates_cmd.append("undo ospf timer hello") + self.updates_cmd.append("undo ospf timer dead") + self.updates_cmd.append("undo ospf authentication-mode") + self.updates_cmd.append("undo ospf enable %s area %s" % ( + self.process_id, self.get_area_ip())) + self.changed = True + + def check_params(self): + """Check all input params""" + + self.interface = self.interface.replace(" ", "").upper() + + # interface check + if not get_interface_type(self.interface): + self.module.fail_json(msg="Error: interface is invalid.") + + # process_id check + if not self.process_id.isdigit(): + self.module.fail_json(msg="Error: process_id is not digit.") + if int(self.process_id) < 1 or int(self.process_id) > 4294967295: + self.module.fail_json(msg="Error: process_id must be an integer between 1 and 4294967295.") + + # area check + if self.area.isdigit(): + if int(self.area) < 0 or int(self.area) > 4294967295: + self.module.fail_json(msg="Error: area id (Integer) must be between 0 and 4294967295.") + else: + if not is_valid_v4addr(self.area): + self.module.fail_json(msg="Error: area id is invalid.") + + # area authentication check + if self.state == "present": + if self.auth_mode: + if self.auth_mode == "simple": + if self.auth_text_simple and len(self.auth_text_simple) > 8: + self.module.fail_json( + msg="Error: auth_text_simple is not in the range from 1 to 8.") + if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: + if self.auth_key_id and not self.auth_text_md5: + self.module.fail_json( + msg='Error: auth_key_id and auth_text_md5 should be set at the same time.') + if not self.auth_key_id and self.auth_text_md5: + self.module.fail_json( + msg='Error: auth_key_id and auth_text_md5 should be set at the same time.') + if self.auth_key_id: + if not self.auth_key_id.isdigit(): + self.module.fail_json( + msg="Error: auth_key_id is not digit.") + if int(self.auth_key_id) < 1 or int(self.auth_key_id) > 255: + self.module.fail_json( + msg="Error: auth_key_id is not in the range from 1 to 255.") + if self.auth_text_md5 and len(self.auth_text_md5) > 255: + self.module.fail_json( + msg="Error: auth_text_md5 is not in the range from 1 to 255.") + # cost check + if self.cost: + if not self.cost.isdigit(): + self.module.fail_json(msg="Error: cost is not digit.") + if int(self.cost) < 1 or int(self.cost) > 65535: + self.module.fail_json( + msg="Error: cost is not in the range from 1 to 65535") + + # hello_interval check + if self.hello_interval: + if not self.hello_interval.isdigit(): + self.module.fail_json( + msg="Error: hello_interval is not digit.") + if int(self.hello_interval) < 1 or int(self.hello_interval) > 65535: + self.module.fail_json( + msg="Error: hello_interval is not in the range from 1 to 65535") + + # dead_interval check + if self.dead_interval: + if not self.dead_interval.isdigit(): + self.module.fail_json(msg="Error: dead_interval is not digit.") + if int(self.dead_interval) < 1 or int(self.dead_interval) > 235926000: + self.module.fail_json( + msg="Error: dead_interval is not in the range from 1 to 235926000") + + def get_proposed(self): + """get proposed info""" + + self.proposed["interface"] = self.interface + self.proposed["process_id"] = self.process_id + self.proposed["area"] = self.get_area_ip() + self.proposed["cost"] = self.cost + self.proposed["hello_interval"] = self.hello_interval + self.proposed["dead_interval"] = self.dead_interval + self.proposed["silent_interface"] = self.silent_interface + if self.auth_mode: + self.proposed["auth_mode"] = self.auth_mode + if self.auth_mode == "simple": + self.proposed["auth_text_simple"] = self.auth_text_simple + if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: + self.proposed["auth_key_id"] = self.auth_key_id + self.proposed["auth_text_md5"] = self.auth_text_md5 + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.ospf_info: + return + + if self.ospf_info["interface"]: + self.existing["interface"] = self.interface + self.existing["cost"] = self.ospf_info["interface"].get("configCost") + self.existing["hello_interval"] = self.ospf_info["interface"].get("helloInterval") + self.existing["dead_interval"] = self.ospf_info["interface"].get("deadInterval") + self.existing["silent_interface"] = self.ospf_info["interface"].get("silentEnable") + self.existing["auth_mode"] = self.ospf_info["interface"].get("authenticationMode") + self.existing["auth_text_simple"] = self.ospf_info["interface"].get("authTextSimple") + self.existing["auth_key_id"] = self.ospf_info["interface"].get("keyId") + self.existing["auth_text_md5"] = self.ospf_info["interface"].get("authTextMd5") + self.existing["process_id"] = self.ospf_info["processId"] + self.existing["area"] = self.ospf_info["areaId"] + + def get_end_state(self): + """get end state info""" + + ospf_info = self.get_ospf_dict() + if not ospf_info: + return + + if ospf_info["interface"]: + self.end_state["interface"] = self.interface + self.end_state["cost"] = ospf_info["interface"].get("configCost") + self.end_state["hello_interval"] = ospf_info["interface"].get("helloInterval") + self.end_state["dead_interval"] = ospf_info["interface"].get("deadInterval") + self.end_state["silent_interface"] = ospf_info["interface"].get("silentEnable") + self.end_state["auth_mode"] = ospf_info["interface"].get("authenticationMode") + self.end_state["auth_text_simple"] = ospf_info["interface"].get("authTextSimple") + self.end_state["auth_key_id"] = ospf_info["interface"].get("keyId") + self.end_state["auth_text_md5"] = ospf_info["interface"].get("authTextMd5") + self.end_state["process_id"] = ospf_info["processId"] + self.end_state["area"] = ospf_info["areaId"] + + def work(self): + """worker""" + + self.check_params() + self.ospf_info = self.get_ospf_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + if self.state == "present": + if not self.ospf_info or not self.ospf_info["interface"]: + # create ospf area and set interface config + self.set_ospf_interface() + else: + # merge interface ospf area config + self.merge_ospf_interface() + else: + if self.ospf_info and self.ospf_info["interface"]: + # delete interface ospf area config + self.unset_ospf_interface() + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + interface=dict(required=True, type='str'), + process_id=dict(required=True, type='str'), + area=dict(required=True, type='str'), + cost=dict(required=False, type='str'), + hello_interval=dict(required=False, type='str'), + dead_interval=dict(required=False, type='str'), + silent_interface=dict(required=False, default=False, type='bool'), + auth_mode=dict(required=False, + choices=['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'], type='str'), + auth_text_simple=dict(required=False, type='str', no_log=True), + auth_key_id=dict(required=False, type='str'), + auth_text_md5=dict(required=False, type='str', no_log=True), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = InterfaceOSPF(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_ip_interface.py b/plugins/modules/network/cloudengine/ce_ip_interface.py new file mode 100644 index 0000000000..b1a9539c8f --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_ip_interface.py @@ -0,0 +1,739 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_ip_interface +short_description: Manages L3 attributes for IPv4 and IPv6 interfaces on HUAWEI CloudEngine switches. +description: + - Manages Layer 3 attributes for IPv4 and IPv6 interfaces on HUAWEI CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - Interface must already be a L3 port when using this module. + - Logical interfaces (loopback, vlanif) must be created first. + - C(mask) must be inserted in decimal format (i.e. 24) for + both IPv6 and IPv4. + - A single interface can have multiple IPv6 configured. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - Full name of interface, i.e. 40GE1/0/22, vlanif10. + required: true + addr: + description: + - IPv4 or IPv6 Address. + mask: + description: + - Subnet mask for IPv4 or IPv6 Address in decimal format. + version: + description: + - IP address version. + default: v4 + choices: ['v4','v6'] + ipv4_type: + description: + - Specifies an address type. + The value is an enumerated type. + main, primary IP address. + sub, secondary IP address. + default: main + choices: ['main','sub'] + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: ip_interface module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Ensure ipv4 address is configured on 10GE1/0/22 + ce_ip_interface: + interface: 10GE1/0/22 + version: v4 + state: present + addr: 20.20.20.20 + mask: 24 + provider: '{{ cli }}' + + - name: Ensure ipv4 secondary address is configured on 10GE1/0/22 + ce_ip_interface: + interface: 10GE1/0/22 + version: v4 + state: present + addr: 30.30.30.30 + mask: 24 + ipv4_type: sub + provider: '{{ cli }}' + + - name: Ensure ipv6 is enabled on 10GE1/0/22 + ce_ip_interface: + interface: 10GE1/0/22 + version: v6 + state: present + provider: '{{ cli }}' + + - name: Ensure ipv6 address is configured on 10GE1/0/22 + ce_ip_interface: + interface: 10GE1/0/22 + version: v6 + state: present + addr: 2001::db8:800:200c:cccb + mask: 64 + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"addr": "20.20.20.20", "interface": "10GE1/0/22", "mask": "24"} +existing: + description: k/v pairs of existing IP attributes on the interface + returned: always + type: dict + sample: {"ipv4": [{"ifIpAddr": "11.11.11.11", "subnetMask": "255.255.0.0", "addrType": "main"}], + "interface": "10GE1/0/22"} +end_state: + description: k/v pairs of IP attributes after module execution + returned: always + type: dict + sample: {"ipv4": [{"ifIpAddr": "20.20.20.20", "subnetMask": "255.255.255.0", "addrType": "main"}], + "interface": "10GE1/0/22"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface 10GE1/0/22", "ip address 20.20.20.20 24"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_INTF = """ + + + + + %s + + + + + + + + + +""" + +CE_NC_ADD_IPV4 = """ + + + + + %s + + + + %s + %s + %s + + + + + + + +""" + +CE_NC_MERGE_IPV4 = """ + + + + + %s + + + + %s + %s + main + + + %s + %s + main + + + + + + + +""" + + +CE_NC_DEL_IPV4 = """ + + + + + %s + + + + %s + %s + %s + + + + + + + +""" + +CE_NC_ADD_IPV6 = """ + + + + + %s + + + + %s + %s + global + + + + + + + +""" + +CE_NC_DEL_IPV6 = """ + + + + + %s + + + + %s + %s + global + + + + + + + +""" + +CE_NC_MERGE_IPV6_ENABLE = """ + + + + + %s + + %s + + + + + +""" + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + if interface.upper().startswith('GE'): + return 'ge' + elif interface.upper().startswith('10GE'): + return '10ge' + elif interface.upper().startswith('25GE'): + return '25ge' + elif interface.upper().startswith('4X10GE'): + return '4x10ge' + elif interface.upper().startswith('40GE'): + return '40ge' + elif interface.upper().startswith('100GE'): + return '100ge' + elif interface.upper().startswith('VLANIF'): + return 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + return 'loopback' + elif interface.upper().startswith('METH'): + return 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + return 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + return 'vbdif' + elif interface.upper().startswith('NVE'): + return 'nve' + elif interface.upper().startswith('TUNNEL'): + return 'tunnel' + elif interface.upper().startswith('ETHERNET'): + return 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + return 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + return 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + return 'stack-port' + elif interface.upper().startswith('NULL'): + return 'null' + else: + return None + + +def is_valid_v4addr(addr): + """check is ipv4 addr is valid""" + + if not addr: + return False + + if addr.find('.') != -1: + addr_list = addr.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +class IpInterface(object): + """ + Manages L3 attributes for IPv4 and IPv6 interfaces. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info] + self.interface = self.module.params['interface'] + self.addr = self.module.params['addr'] + self.mask = self.module.params['mask'] + self.version = self.module.params['version'] + self.ipv4_type = self.module.params['ipv4_type'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + # interface info + self.intf_info = dict() + self.intf_type = None + + def __init_module__(self): + """ init module """ + + required_if = [("version", "v4", ("addr", "mask"))] + required_together = [("addr", "mask")] + self.module = AnsibleModule( + argument_spec=self.spec, + required_if=required_if, + required_together=required_together, + supports_check_mode=True + ) + + def netconf_set_config(self, xml_str, xml_name): + """ netconf set config """ + + rcv_xml = set_nc_config(self.module, xml_str) + if "" not in rcv_xml: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_interface_dict(self, ifname): + """ get one interface attributes dict.""" + + intf_info = dict() + conf_str = CE_NC_GET_INTF % ifname + rcv_xml = get_nc_config(self.module, conf_str) + + if "" in rcv_xml: + return intf_info + + # get interface base info + intf = re.findall( + r'.*(.*).*\s*' + r'(.*).*', rcv_xml) + + if intf: + intf_info = dict(ifName=intf[0][0], + isL2SwitchPort=intf[0][1]) + + # get interface ipv4 address info + ipv4_info = re.findall( + r'.*(.*).*\s*(.*)' + r'.*\s*(.*).*', rcv_xml) + intf_info["am4CfgAddr"] = list() + for info in ipv4_info: + intf_info["am4CfgAddr"].append( + dict(ifIpAddr=info[0], subnetMask=info[1], addrType=info[2])) + + # get interface ipv6 address info + ipv6_info = re.findall( + r'.*.*\s*(.*).*', rcv_xml) + if not ipv6_info: + self.module.fail_json(msg='Error: Fail to get interface %s IPv6 state.' % self.interface) + else: + intf_info["enableFlag"] = ipv6_info[0] + + # get interface ipv6 enable info + ipv6_info = re.findall( + r'.*(.*).*\s*(.*)' + r'.*\s*(.*).*', rcv_xml) + + intf_info["am6CfgAddr"] = list() + for info in ipv6_info: + intf_info["am6CfgAddr"].append( + dict(ifIp6Addr=info[0], addrPrefixLen=info[1], addrType6=info[2])) + + return intf_info + + def convert_len_to_mask(self, masklen): + """convert mask length to ip address mask, i.e. 24 to 255.255.255.0""" + + mask_int = ["0"] * 4 + length = int(masklen) + + if length > 32: + self.module.fail_json(msg='Error: IPv4 ipaddress mask length is invalid.') + if length < 8: + mask_int[0] = str(int((0xFF << (8 - length % 8)) & 0xFF)) + if length >= 8: + mask_int[0] = '255' + mask_int[1] = str(int((0xFF << (16 - (length % 16))) & 0xFF)) + if length >= 16: + mask_int[1] = '255' + mask_int[2] = str(int((0xFF << (24 - (length % 24))) & 0xFF)) + if length >= 24: + mask_int[2] = '255' + mask_int[3] = str(int((0xFF << (32 - (length % 32))) & 0xFF)) + if length == 32: + mask_int[3] = '255' + + return '.'.join(mask_int) + + def is_ipv4_exist(self, addr, maskstr, ipv4_type): + """"Check IPv4 address exist""" + + addrs = self.intf_info["am4CfgAddr"] + if not addrs: + return False + + for address in addrs: + if address["ifIpAddr"] == addr: + return address["subnetMask"] == maskstr and address["addrType"] == ipv4_type + return False + + def get_ipv4_main_addr(self): + """get IPv4 main address""" + + addrs = self.intf_info["am4CfgAddr"] + if not addrs: + return None + + for address in addrs: + if address["addrType"] == "main": + return address + + return None + + def is_ipv6_exist(self, addr, masklen): + """Check IPv6 address exist""" + + addrs = self.intf_info["am6CfgAddr"] + if not addrs: + return False + + for address in addrs: + if address["ifIp6Addr"] == addr.upper(): + if address["addrPrefixLen"] == masklen and address["addrType6"] == "global": + return True + else: + self.module.fail_json( + msg="Error: Input IPv6 address or mask is invalid.") + + return False + + def set_ipv4_addr(self, ifname, addr, mask, ipv4_type): + """Set interface IPv4 address""" + + if not addr or not mask or not type: + return + + maskstr = self.convert_len_to_mask(mask) + if self.state == "present": + if not self.is_ipv4_exist(addr, maskstr, ipv4_type): + # primary IP address + if ipv4_type == "main": + main_addr = self.get_ipv4_main_addr() + if not main_addr: + # no ipv4 main address in this interface + xml_str = CE_NC_ADD_IPV4 % (ifname, addr, maskstr, ipv4_type) + self.netconf_set_config(xml_str, "ADD_IPV4_ADDR") + else: + # remove old address and set new + xml_str = CE_NC_MERGE_IPV4 % (ifname, main_addr["ifIpAddr"], + main_addr["subnetMask"], + addr, maskstr) + self.netconf_set_config(xml_str, "MERGE_IPV4_ADDR") + # secondary IP address + else: + xml_str = CE_NC_ADD_IPV4 % (ifname, addr, maskstr, ipv4_type) + self.netconf_set_config(xml_str, "ADD_IPV4_ADDR") + + self.updates_cmd.append("interface %s" % ifname) + if ipv4_type == "main": + self.updates_cmd.append("ip address %s %s" % (addr, maskstr)) + else: + self.updates_cmd.append("ip address %s %s sub" % (addr, maskstr)) + self.changed = True + else: + if self.is_ipv4_exist(addr, maskstr, ipv4_type): + xml_str = CE_NC_DEL_IPV4 % (ifname, addr, maskstr, ipv4_type) + self.netconf_set_config(xml_str, "DEL_IPV4_ADDR") + self.updates_cmd.append("interface %s" % ifname) + if ipv4_type == "main": + self.updates_cmd.append("undo ip address %s %s" % (addr, maskstr)) + else: + self.updates_cmd.append("undo ip address %s %s sub" % (addr, maskstr)) + self.changed = True + + def set_ipv6_addr(self, ifname, addr, mask): + """Set interface IPv6 address""" + + if not addr or not mask: + return + + if self.state == "present": + self.updates_cmd.append("interface %s" % ifname) + if self.intf_info["enableFlag"] == "false": + xml_str = CE_NC_MERGE_IPV6_ENABLE % (ifname, "true") + self.netconf_set_config(xml_str, "SET_IPV6_ENABLE") + self.updates_cmd.append("ipv6 enable") + self.changed = True + + if not self.is_ipv6_exist(addr, mask): + xml_str = CE_NC_ADD_IPV6 % (ifname, addr, mask) + self.netconf_set_config(xml_str, "ADD_IPV6_ADDR") + + self.updates_cmd.append("ipv6 address %s %s" % (addr, mask)) + self.changed = True + + if not self.changed: + self.updates_cmd.pop() + else: + if self.is_ipv6_exist(addr, mask): + xml_str = CE_NC_DEL_IPV6 % (ifname, addr, mask) + self.netconf_set_config(xml_str, "DEL_IPV6_ADDR") + self.updates_cmd.append("interface %s" % ifname) + self.updates_cmd.append( + "undo ipv6 address %s %s" % (addr, mask)) + self.changed = True + + def set_ipv6_enable(self, ifname): + """Set interface IPv6 enable""" + + if self.state == "present": + if self.intf_info["enableFlag"] == "false": + xml_str = CE_NC_MERGE_IPV6_ENABLE % (ifname, "true") + self.netconf_set_config(xml_str, "SET_IPV6_ENABLE") + self.updates_cmd.append("interface %s" % ifname) + self.updates_cmd.append("ipv6 enable") + self.changed = True + else: + if self.intf_info["enableFlag"] == "true": + xml_str = CE_NC_MERGE_IPV6_ENABLE % (ifname, "false") + self.netconf_set_config(xml_str, "SET_IPV6_DISABLE") + self.updates_cmd.append("interface %s" % ifname) + self.updates_cmd.append("undo ipv6 enable") + self.changed = True + + def check_params(self): + """Check all input params""" + + # check interface type + if self.interface: + self.intf_type = get_interface_type(self.interface) + if not self.intf_type: + self.module.fail_json( + msg='Error: Interface name of %s ' + 'is error.' % self.interface) + + # ipv4 addr and mask check + if self.version == "v4": + if not is_valid_v4addr(self.addr): + self.module.fail_json( + msg='Error: The %s is not a valid address.' % self.addr) + if not self.mask.isdigit(): + self.module.fail_json(msg='Error: mask is invalid.') + if int(self.mask) > 32 or int(self.mask) < 1: + self.module.fail_json( + msg='Error: mask must be an integer between 1 and 32.') + + # ipv6 mask check + if self.version == "v6": + if self.addr: + if not self.mask.isdigit(): + self.module.fail_json(msg='Error: mask is invalid.') + if int(self.mask) > 128 or int(self.mask) < 1: + self.module.fail_json( + msg='Error: mask must be an integer between 1 and 128.') + + # interface and layer3 check + self.intf_info = self.get_interface_dict(self.interface) + if not self.intf_info: + self.module.fail_json(msg='Error: interface %s does not exist.' % self.interface) + + if self.intf_info["isL2SwitchPort"] == "true": + self.module.fail_json(msg='Error: interface %s is layer2.' % self.interface) + + def get_proposed(self): + """get proposed info""" + + self.proposed["state"] = self.state + self.proposed["addr"] = self.addr + self.proposed["mask"] = self.mask + self.proposed["ipv4_type"] = self.ipv4_type + self.proposed["version"] = self.version + self.proposed["interface"] = self.interface + + def get_existing(self): + """get existing info""" + + self.existing["interface"] = self.interface + self.existing["ipv4addr"] = self.intf_info["am4CfgAddr"] + self.existing["ipv6addr"] = self.intf_info["am6CfgAddr"] + self.existing["ipv6enalbe"] = self.intf_info["enableFlag"] + + def get_end_state(self): + """get end state info""" + + intf_info = self.get_interface_dict(self.interface) + self.end_state["interface"] = self.interface + self.end_state["ipv4addr"] = intf_info["am4CfgAddr"] + self.end_state["ipv6addr"] = intf_info["am6CfgAddr"] + self.end_state["ipv6enalbe"] = intf_info["enableFlag"] + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + + # deal present or absent + if self.version == "v4": + self.set_ipv4_addr(self.interface, self.addr, self.mask, self.ipv4_type) + else: + if not self.addr and not self.mask: + self.set_ipv6_enable(self.interface) + else: + self.set_ipv6_addr(self.interface, self.addr, self.mask) + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + interface=dict(required=True), + addr=dict(required=False), + version=dict(required=False, choices=['v4', 'v6'], + default='v4'), + mask=dict(type='str', required=False), + ipv4_type=dict(required=False, choices=['main', 'sub'], default='main'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = IpInterface(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_is_is_instance.py b/plugins/modules/network/cloudengine/ce_is_is_instance.py new file mode 100644 index 0000000000..555c5ba778 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_is_is_instance.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ce_is_is_instance +author: xuxiaowei0512 (@CloudEngine-Ansible) +short_description: Manages isis process id configuration on HUAWEI CloudEngine devices. +description: + - Manages isis process id, creates a isis instance id or deletes a process id on HUAWEI CloudEngine devices. +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - This module works with connection C(netconf). +options: + instance_id: + description: + - Specifies the id of a isis process.The value is a number of 1 to 4294967295. + required: true + type: int + vpn_name: + description: + - VPN Instance, associate the VPN instance with the corresponding IS-IS process. + type: str + state: + description: + - Determines whether the config should be present or not on the device. + default: present + type: str + choices: ['present', 'absent'] +''' + +EXAMPLES = r''' + - name: Set isis process + ce_is_is_instance: + instance_id: 3 + state: present + + - name: Unset isis process + ce_is_is_instance: + instance_id: 3 + state: absent + + - name: check isis process + ce_is_is_instance: + instance_id: 4294967296 + state: present + + - name: Set vpn name + ce_is_is_instance: + instance_id: 22 + vpn_name: vpn1 + state: present + + - name: check vpn name + ce_is_is_instance: + instance_id: 22 + vpn_name: vpn1234567896321452212221556asdasdasdasdsadvdv + state: present +''' + +RETURN = r''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "instance_id": 1, + "vpn_name": null + } +existing: + description: k/v pairs of existing configuration + returned: always + type: dict + sample: { + "session": {} + } +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: { + "session": { + "instance_id": 1, + "vpn_name": null + } + } +updates: + description: commands sent to the device + returned: always + type: list + sample: [ + "isis 1" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config + +CE_NC_GET_ISIS = """ + + + %s + + +""" + +CE_NC_GET_ISIS_INSTANCE = """ + + + %s + + + +""" + + +def is_valid_ip_vpn(vpname): + """check ip vpn""" + + if not vpname: + return False + + if vpname == "_public_": + return False + + if len(vpname) < 1 or len(vpname) > 31: + return False + + return True + + +class ISIS_Instance(object): + """Manages ISIS Instance""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.instance_id = self.module.params['instance_id'] + self.vpn_name = self.module.params['vpn_name'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.isis_dict = dict() + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def get_isis_dict(self): + """isis config dict""" + isis_dict = dict() + isis_dict["instance"] = dict() + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_INSTANCE % self.instance_id)) + + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return isis_dict + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # get isis info + glb = root.find("isiscomm/isSites/isSite") + if glb: + for attr in glb: + isis_dict["instance"][attr.tag] = attr.text + + return isis_dict + + def config_session(self): + """configures isis""" + xml_str = "" + instance = self.isis_dict["instance"] + if not self.instance_id: + return xml_str + + if self.state == "present": + xml_str = "%s" % self.instance_id + self.updates_cmd.append("isis %s" % self.instance_id) + + if self.vpn_name: + xml_str += "%s" % self.vpn_name + self.updates_cmd.append("vpn-instance %s" % self.vpn_name) + else: + # absent + if self.instance_id and str(self.instance_id) == instance.get("instanceId"): + xml_str = "%s" % self.instance_id + self.updates_cmd.append("undo isis %s" % self.instance_id) + + if self.state == "present": + return '' + xml_str + '' + else: + if xml_str: + return '' + xml_str + '' + + def netconf_load_config(self, xml_str): + """load isis config by netconf""" + + if not xml_str: + return + + xml_cfg = """ + + + %s + + """ % xml_str + set_nc_config(self.module, xml_cfg) + self.changed = True + + def check_params(self): + """Check all input params""" + + # check instance id + if not self.instance_id: + self.module.fail_json(msg="Error: Missing required arguments: instance_id.") + + if self.instance_id: + if self.instance_id < 1 or self.instance_id > 4294967295: + self.module.fail_json(msg="Error: Instance id is not ranges from 1 to 4294967295.") + + # check vpn_name + if self.vpn_name: + if not is_valid_ip_vpn(self.vpn_name): + self.module.fail_json(msg="Error: Session vpn_name is invalid.") + + def get_proposed(self): + """get proposed info""" + # base config + self.proposed["instance_id"] = self.instance_id + self.proposed["vpn_name"] = self.vpn_name + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.isis_dict: + self.existing["instance"] = None + + self.existing["instance"] = self.isis_dict.get("instance") + + def get_end_state(self): + """get end state info""" + + isis_dict = self.get_isis_dict() + if not isis_dict: + self.end_state["instance"] = None + + self.end_state["instance"] = isis_dict.get("instance") + + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + self.check_params() + self.isis_dict = self.get_isis_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = '' + if self.instance_id: + cfg_str = self.config_session() + if cfg_str: + xml_str += cfg_str + + # update to device + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + argument_spec = dict( + instance_id=dict(required=True, type='int'), + vpn_name=dict(required=False, type='str'), + state=dict(required=False, default='present', choices=['present', 'absent']) + ) + + module = ISIS_Instance(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_is_is_interface.py b/plugins/modules/network/cloudengine/ce_is_is_interface.py new file mode 100644 index 0000000000..41e47d0243 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_is_is_interface.py @@ -0,0 +1,788 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_is_is_interface +author: xuxiaowei0512 (@CloudEngine-Ansible) +short_description: Manages isis interface configuration on HUAWEI CloudEngine devices. +description: + - Manages isis process id, creates a isis instance id or deletes a process id on HUAWEI CloudEngine devices. +notes: + - Interface must already be a L3 port when using this module. + - This module requires the netconf system service be enabled on the remote device being managed. + - This module works with connection C(netconf). +options: + instance_id: + description: + - Specifies the id of a isis process. + The value is a number of 1 to 4294967295. + required: true + type: int + ifname: + description: + - A L3 interface. + required: true + type: str + leveltype: + description: + - level type for three types. + type: str + choices: ['level_1', 'level_2', 'level_1_2'] + level1dispriority: + description: + - the dispriority of the level1. + The value is a number of 1 to 127. + type: int + level2dispriority: + description: + - the dispriority of the level1. + The value is a number of 1 to 127. + type: int + silentenable: + description: + - enable the interface can send isis message. + The value is a bool type. + type: bool + silentcost: + description: + - Specifies whether the routing cost of the silent interface is 0. + The value is a bool type. + type: bool + typep2penable: + description: + - Simulate the network type of the interface as P2P. + The value is a bool type. + type: bool + snpacheck: + description: + - Enable SNPA check for LSPs and SNPs. + The value is a bool type. + type: bool + p2pnegotiationmode: + description: + - Set the P2P neighbor negotiation type. + type: str + choices: ['2_way', '3_way', '3_wayonly'] + p2ppeeripignore: + description: + - When the P2P hello packet is received, no IP address check is performed. + The value is a bool type. + type: bool + ppposicpcheckenable: + description: + - Interface for setting PPP link protocol to check OSICP negotiation status. + The value is a bool type. + type: bool + level1cost: + description: + - Specifies the link cost of the interface when performing Level-1 SPF calculation. + The value is a number of 0 to 16777215. + type: int + level2cost: + description: + - Specifies the link cost of the interface when performing Level-2 SPF calculation. + The value is a number of 0 to 16777215. + type: int + bfdstaticen: + description: + - Configure static BFD on a specific interface enabled with ISIS. + The value is a bool type. + type: bool + bfdblocken: + description: + - Blocking interfaces to dynamically create BFD features. + The value is a bool type. + type: bool + state: + description: + - Determines whether the config should be present or not on the device. + type: str + default: 'present' + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' + - name: "create vlan and config vlanif" + ce_config: + lines: 'vlan {{ test_vlan_id }},quit,interface {{test_intf_vlanif}},ip address {{test_vlanif_ip}} 24' + match: none + + - name: "create eth-trunk and config eth-trunk" + ce_config: + lines: 'interface {{test_intf_trunk}},undo portswitch,ip address {{test_trunk_ip}} 24' + match: none + + - name: "create vpn instance" + ce_config: + lines: 'ip vpn-instance {{test_vpn}},ipv4-family' + match: none + + - name: Set isis circuit-level + ce_is_is_interface: + instance_id: 3 + ifname: Eth-Trunk10 + leveltype: level_1_2 + state: present + + - name: Set isis level1dispriority + ce_is_is_interface: + instance_id: 3 + ifname: Eth-Trunk10 + level1dispriority: 0 + state: present + + - name: Set isis level2dispriority + ce_is_is_interface: + instance_id: 3 + ifname: Eth-Trunk10 + level2dispriority: 0 + state: present + + - name: Set isis silentenable + ce_is_is_interface: + instance_id: 3 + ifname: Eth-Trunk10 + silentenable: true + state: present + + - name: Set vpn name + ce_is_is_instance: + instance_id: 22 + vpn_name: vpn1 + state: present +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "addr_type": null, + "create_type": null, + "dest_addr": null, + "out_if_name": "10GE1/0/1", + "session_name": "bfd_l2link", + "src_addr": null, + "state": "present", + "use_default_ip": true, + "vrf_name": null + } +existing: + description: k/v pairs of existing configuration + returned: always + type: dict + sample: { + "session": {} + } +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: { + "session": { + "addrType": "IPV4", + "createType": "SESS_STATIC", + "destAddr": null, + "outIfName": "10GE1/0/1", + "sessName": "bfd_l2link", + "srcAddr": null, + "useDefaultIp": "true", + "vrfName": null + } + } +updates: + description: commands sent to the device + returned: always + type: list + sample: [ + "bfd bfd_l2link bind peer-ip default-ip interface 10ge1/0/1" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import sys +import socket +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config + +CE_NC_GET_ISIS = """ + + + %s + + +""" + +CE_NC_GET_ISIS_INTERFACE = """ + + + %s + + + + + + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_INTERFACE = """ + + + %s + + + %s + + + + +""" + +CE_NC_DELETE_ISIS_INTERFACE = """ + + + %s + + + %s + + + + +""" + +CE_NC_GET_ISIS_BFDINTERFACE = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_BFDINTERFACE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_DELETE_ISIS_BFDINTERFACE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + + +def is_valid_ip_vpn(vpname): + """check ip vpn""" + + if not vpname: + return False + + if vpname == "_public_": + return False + + if len(vpname) < 1 or len(vpname) > 31: + return False + + return True + + +def check_ip_addr(ipaddr): + """check ip address, Supports IPv4 and IPv6""" + + if not ipaddr or '\x00' in ipaddr: + return False + + try: + res = socket.getaddrinfo(ipaddr, 0, socket.AF_UNSPEC, + socket.SOCK_STREAM, + 0, socket.AI_NUMERICHOST) + return bool(res) + except socket.gaierror: + err = sys.exc_info()[1] + if err.args[0] == socket.EAI_NONAME: + return False + raise + + return True + + +def check_default_ip(ipaddr): + """check the default multicast IP address""" + + # The value ranges from 224.0.0.107 to 224.0.0.250 + if not check_ip_addr(ipaddr): + return False + + if ipaddr.count(".") != 3: + return False + + ips = ipaddr.split(".") + if ips[0] != "224" or ips[1] != "0" or ips[2] != "0": + return False + + if not ips[3].isdigit() or int(ips[3]) < 107 or int(ips[3]) > 250: + return False + + return True + + +def get_interface_type(interface): + """get the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface.upper().startswith('GE'): + return 'ge' + elif interface.upper().startswith('10GE'): + return '10ge' + elif interface.upper().startswith('25GE'): + return '25ge' + elif interface.upper().startswith('4X10GE'): + return '4x10ge' + elif interface.upper().startswith('40GE'): + return '40ge' + elif interface.upper().startswith('100GE'): + return '100ge' + elif interface.upper().startswith('VLANIF'): + return 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + return 'loopback' + elif interface.upper().startswith('METH'): + return 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + return 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + return 'vbdif' + elif interface.upper().startswith('NVE'): + return 'nve' + elif interface.upper().startswith('TUNNEL'): + return 'tunnel' + elif interface.upper().startswith('ETHERNET'): + return 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + return 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + return 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + return 'stack-port' + elif interface.upper().startswith('NULL'): + return'null' + else: + return None + + +class ISIS_Instance(object): + """Manages ISIS Instance""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.instance_id = self.module.params['instance_id'] + self.ifname = self.module.params['ifname'] + self.leveltype = self.module.params['leveltype'] + self.level1dispriority = self.module.params['level1dispriority'] + self.level2dispriority = self.module.params['level2dispriority'] + self.silentenable = self.module.params['silentenable'] + self.silentcost = self.module.params['silentcost'] + self.typep2penable = self.module.params['typep2penable'] + self.snpacheck = self.module.params['snpacheck'] + self.p2pnegotiationmode = self.module.params['p2pnegotiationmode'] + self.p2ppeeripignore = self.module.params['p2ppeeripignore'] + self.ppposicpcheckenable = self.module.params['ppposicpcheckenable'] + self.level1cost = self.module.params['level1cost'] + self.level2cost = self.module.params['level2cost'] + self.bfdstaticen = self.module.params['bfdstaticen'] + self.bfdblocken = self.module.params['bfdblocken'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.isis_dict = dict() + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + mutually_exclusive = [["level1dispriority", "level2dispriority"], + ["level1cost", "level2cost"]] + self.module = AnsibleModule( + argument_spec=self.spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def get_isis_dict(self): + """bfd config dict""" + + isis_dict = dict() + isis_dict["instance"] = dict() + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_INTERFACE % self.instance_id)) + if self.bfdstaticen or self.bfdblocken: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_BFDINTERFACE % self.instance_id)) + + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return isis_dict + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # + glb = root.find("isiscomm/isSites/isSite/isCircuits/isCircuit") + if self.bfdstaticen or self.bfdblocken: + glb = root.find("isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isCircMts/isCircMt") + if glb: + for attr in glb: + isis_dict["instance"][attr.tag] = attr.text + + return isis_dict + + def config_session(self): + """configures bfd session""" + + xml_str = "" + instance = self.isis_dict["instance"] + if not self.instance_id: + return xml_str + if self.ifname: + xml_str = "%s" % self.ifname + self.updates_cmd.append("interface %s" % self.ifname) + if self.state == "present": + self.updates_cmd.append("isis enable %s" % self.instance_id) + + if self.leveltype: + if self.leveltype == "level_1": + xml_str += "level_1" + self.updates_cmd.append("isis circuit-level level-1") + elif self.leveltype == "level_2": + xml_str += "level_2" + self.updates_cmd.append("isis circuit-level level-2") + elif self.leveltype == "level_1_2": + xml_str += "level_1_2" + self.updates_cmd.append("isis circuit-level level-1-2") + if self.level1dispriority is not None: + xml_str += "%s" % self.level1dispriority + self.updates_cmd.append("isis dis-priority %s level-1" % self.level1dispriority) + if self.level2dispriority is not None: + xml_str += "%s" % self.level2dispriority + self.updates_cmd.append("isis dis-priority %s level-2" % self.level2dispriority) + if self.p2pnegotiationmode: + if self.p2pnegotiationmode == "2_way": + xml_str += "2_way" + self.updates_cmd.append("isis ppp-negotiation 2-way") + elif self.p2pnegotiationmode == "3_way": + xml_str += "3_way" + self.updates_cmd.append("isis ppp-negotiation 3-way") + elif self.p2pnegotiationmode == "3_wayonly": + xml_str += "3_wayonly" + self.updates_cmd.append("isis ppp-negotiation only") + if self.level1cost is not None: + xml_str += "%s" % self.level1cost + self.updates_cmd.append("isis cost %s level-1" % self.level1cost) + if self.level2cost is not None: + xml_str += "%s" % self.level2cost + self.updates_cmd.append("isis cost %s level-2" % self.level2cost) + + else: + # absent + self.updates_cmd.append("undo isis enable") + if self.leveltype and self.leveltype == instance.get("circuitLevelType"): + xml_str += "level_1_2" + self.updates_cmd.append("undo isis circuit-level") + if self.level1dispriority is not None and self.level1dispriority == instance.get("level1DisPriority"): + xml_str += "64" + self.updates_cmd.append("undo isis dis-priority %s level-1" % self.level1dispriority) + if self.level2dispriority is not None and self.level2dispriority == instance.get("level2dispriority"): + xml_str += "64" + self.updates_cmd.append("undo isis dis-priority %s level-2" % self.level2dispriority) + if self.p2pnegotiationmode and self.p2pnegotiationmode == instance.get("p2pNegotiationMode"): + xml_str += "" + self.updates_cmd.append("undo isis ppp-negotiation") + if self.level1cost is not None and self.level1cost == instance.get("level1Cost"): + xml_str += "" + self.updates_cmd.append("undo isis cost %s level-1" % self.level1cost) + if self.level2cost is not None and self.level2cost == instance.get("level2Cost"): + xml_str += "" + self.updates_cmd.append("undo isis cost %s level-2" % self.level2cost) + + if self.silentenable and instance.get("silentEnable", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis silent") + elif not self.silentenable and instance.get("silentEnable", "false") == "true": + xml_str += "false" + self.updates_cmd.append("undo isis silent") + + if self.silentcost and instance.get("silentCost", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis silent advertise-zero-cost") + elif not self.silentcost and instance.get("silentCost", "false") == "true": + xml_str += "false" + + if self.typep2penable and instance.get("typeP2pEnable", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis circuit-type p2p") + elif not self.typep2penable and instance.get("typeP2pEnable", "false") == "true": + xml_str += "false" + self.updates_cmd.append("undo isis circuit-type") + + if self.snpacheck and instance.get("snpaCheck", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis circuit-type p2p strict-snpa-check") + elif not self.snpacheck and instance.get("snpaCheck", "false") == "true": + xml_str += "false" + + if self.p2ppeeripignore and instance.get("p2pPeerIPIgnore", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis peer-ip-ignore") + elif not self.p2ppeeripignore and instance.get("p2pPeerIPIgnore", "false") == "true": + xml_str += "false" + self.updates_cmd.append("undo isis peer-ip-ignore") + + if self.ppposicpcheckenable and instance.get("pPPOsicpCheckEnable", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis ppp-osicp-check") + elif not self.ppposicpcheckenable and instance.get("pPPOsicpCheckEnable", "false") == "true": + xml_str += "false" + self.updates_cmd.append("undo isis ppp-osicp-check") + if self.bfdstaticen and instance.get("bfdStaticEn", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis bfd static") + elif not self.bfdstaticen and instance.get("bfdStaticEn", "false") == "true": + xml_str += "false" + self.updates_cmd.append("undo isis bfd static") + if self.bfdblocken and instance.get("bfdBlockEn", "false") == "false": + xml_str += "true" + self.updates_cmd.append("isis bfd block") + elif not self.bfdblocken and instance.get("bfdBlockEn", "false") == "true": + xml_str += "false" + self.updates_cmd.append("undo isis bfd block") + + if self.state == "present": + if self.bfdstaticen is not None or self.bfdblocken is not None: + return CE_NC_MERGE_ISIS_BFDINTERFACE % (self.instance_id, xml_str) + return CE_NC_MERGE_ISIS_INTERFACE % (self.instance_id, xml_str) + else: + if self.bfdstaticen is not None or self.bfdblocken is not None: + return CE_NC_DELETE_ISIS_BFDINTERFACE % (self.instance_id, xml_str) + return CE_NC_DELETE_ISIS_INTERFACE % (self.instance_id, xml_str) + + def netconf_load_config(self, xml_str): + """load bfd config by netconf""" + + if not xml_str: + return + + xml_cfg = """ + + + %s + + """ % xml_str + set_nc_config(self.module, xml_cfg) + self.changed = True + + def check_params(self): + """Check all input params""" + + # check instance id + if not self.instance_id: + self.module.fail_json(msg="Error: Missing required arguments: instance_id.") + + if self.instance_id: + if self.instance_id < 1 or self.instance_id > 4294967295: + self.module.fail_json(msg="Error: Instance id is not ranges from 1 to 4294967295.") + + # check level1dispriority + if self.level1dispriority is not None: + if self.level1dispriority < 0 or self.level1dispriority > 127: + self.module.fail_json(msg="Error: level1dispriority is not ranges from 0 to 127.") + + if self.level2dispriority is not None: + if self.level2dispriority < 0 or self.level2dispriority > 127: + self.module.fail_json(msg="Error: level2dispriority is not ranges from 0 to 127.") + + if self.level1cost is not None: + if self.level1cost < 0 or self.level1cost > 16777215: + self.module.fail_json(msg="Error: level1cost is not ranges from 0 to 16777215.") + + if self.level2cost is not None: + if self.level2cost < 0 or self.level2cost > 16777215: + self.module.fail_json(msg="Error: level2cost is not ranges from 0 to 16777215.") + + def get_proposed(self): + """get proposed info""" + self.proposed["instance_id"] = self.instance_id + self.proposed["ifname"] = self.ifname + self.proposed["leveltype"] = self.leveltype + self.proposed["level1dispriority"] = self.level1dispriority + self.proposed["level2dispriority"] = self.level2dispriority + self.proposed["silentenable"] = self.silentenable + self.proposed["silentcost"] = self.silentcost + self.proposed["typep2penable"] = self.typep2penable + self.proposed["snpacheck"] = self.snpacheck + self.proposed["p2pnegotiationmode"] = self.p2pnegotiationmode + self.proposed["p2ppeeripignore"] = self.p2ppeeripignore + self.proposed["ppposicpcheckenable"] = self.ppposicpcheckenable + self.proposed["level1cost"] = self.level1cost + self.proposed["level2cost"] = self.level2cost + self.proposed["bfdstaticen"] = self.bfdstaticen + self.proposed["bfdblocken"] = self.bfdblocken + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.isis_dict: + self.existing["instance"] = None + else: + self.existing["instance"] = self.isis_dict.get("instance") + + def get_end_state(self): + """get end state info""" + + isis_dict = self.get_isis_dict() + if not isis_dict: + self.end_state["instance"] = None + else: + self.end_state["instance"] = isis_dict.get("instance") + if self.existing == self.end_state: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.isis_dict = self.get_isis_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = '' + if self.instance_id: + xml_str += self.config_session() + + # update to device + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + instance_id=dict(required=True, type='int'), + ifname=dict(required=True, type='str'), + leveltype=dict(required=False, type='str', choices=['level_1', 'level_2', 'level_1_2']), + level1dispriority=dict(required=False, type='int'), + level2dispriority=dict(required=False, type='int'), + silentenable=dict(required=False, type='bool'), + silentcost=dict(required=False, type='bool'), + typep2penable=dict(required=False, type='bool'), + snpacheck=dict(required=False, type='bool'), + p2pnegotiationmode=dict(required=False, type='str', choices=['2_way', '3_way', '3_wayonly']), + p2ppeeripignore=dict(required=False, type='bool'), + ppposicpcheckenable=dict(required=False, type='bool'), + level1cost=dict(required=False, type='int'), + level2cost=dict(required=False, type='int'), + bfdstaticen=dict(required=False, type='bool'), + bfdblocken=dict(required=False, type='bool'), + state=dict(required=False, default='present', choices=['present', 'absent']) + ) + + module = ISIS_Instance(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_is_is_view.py b/plugins/modules/network/cloudengine/ce_is_is_view.py new file mode 100644 index 0000000000..7a8b279ae9 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_is_is_view.py @@ -0,0 +1,1955 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_is_is_view +author: xuxiaowei0512 (@CloudEngine-Ansible) +short_description: Manages isis view configuration on HUAWEI CloudEngine devices. +description: + - Manages isis process id, creates a isis instance id or deletes a process id + on HUAWEI CloudEngine devices. +options: + coststyle: + description: + - Specifies the cost style. + type: str + choices: ['narrow', 'wide', 'transition', 'ntransition', 'wtransition'] + cost_type: + description: + - Specifies the cost type. + type: str + choices: ['external', 'internal'] + defaultmode: + description: + - Specifies the default mode. + type: str + choices: ['always', 'matchDefault', 'matchAny'] + export_policytype: + description: + - Specifies the default mode. + type: str + choices: ['aclNumOrName', 'ipPrefix', 'routePolicy'] + export_protocol: + description: + - Specifies the export router protocol. + type: str + choices: ['direct', 'ospf', 'isis', 'static', 'rip', 'bgp', 'ospfv3', 'all'] + impotr_leveltype: + description: + - Specifies the export router protocol. + type: str + choices: ['level_1', 'level_2', 'level_1_2'] + islevel: + description: + - Specifies the isis level. + type: str + choices: ['level_1', 'level_2', 'level_1_2'] + level_type: + description: + - Specifies the isis level type. + type: str + choices: ['level_1', 'level_2', 'level_1_2'] + penetration_direct: + description: + - Specifies the penetration direct. + type: str + choices: ['level2-level1', 'level1-level2'] + protocol: + description: + - Specifies the protocol. + type: str + choices: ['direct', 'ospf', 'isis', 'static', 'rip', 'bgp', 'ospfv3', 'all'] + aclnum_or_name: + description: + - Specifies the acl number or name for isis. + type: str + allow_filter: + description: + - Specifies the alow filter or not. + type: bool + allow_up_down: + description: + - Specifies the alow up or down. + type: bool + autocostenable: + description: + - Specifies the alow auto cost enable. + type: bool + autocostenablecompatible: + description: + - Specifies the alow auto cost enable compatible. + type: bool + avoid_learning: + description: + - Specifies the alow avoid learning. + type: bool + bfd_min_tx: + description: + - Specifies the bfd min sent package. + type: int + bfd_min_rx: + description: + - Specifies the bfd min received package. + type: int + bfd_multiplier_num: + description: + - Specifies the bfd multiplier number. + type: int + cost: + description: + - Specifies the bfd cost. + type: int + description: + description: + - Specifies description of isis. + type: str + enablelevel1tolevel2: + description: + - Enable level1 to level2. + type: bool + export_aclnumorname: + description: + - Specifies export acl number or name. + type: str + export_ipprefix: + description: + - Specifies export ip prefix. + type: str + export_processid: + description: + - Specifies export process id. + type: int + export_routepolicyname: + description: + - Specifies export route policy name. + type: str + import_aclnumorname: + description: + - Specifies import acl number or name. + type: str + import_cost: + description: + - Specifies import cost. + type: int + import_ipprefix: + description: + - Specifies import ip prefix. + type: str + import_route_policy: + description: + - Specifies import route policy. + type: str + import_routepolicy_name: + description: + - Specifies import route policy name. + type: str + import_routepolicyname: + description: + - Specifies import route policy name. + type: str + import_tag: + description: + - Specifies import tag. + type: int + inheritcost: + description: + - Enable inherit cost. + type: bool + instance_id: + description: + - Specifies instance id. + type: int + ip_address: + description: + - Specifies ip address. + type: str + ip_prefix_name: + description: + - Specifies ip prefix name. + type: str + max_load: + description: + - Specifies route max load. + type: int + mode_routepolicyname: + description: + - Specifies the mode of route polic yname. + type: str + mode_tag: + description: + - Specifies the tag of mode. + type: int + netentity: + description: + - Specifies the netentity. + type: str + permitibgp: + description: + - Specifies the permitibgp. + type: bool + processid: + description: + - Specifies the process id. + type: int + relaxspfLimit: + description: + - Specifies enable the relax spf limit. + type: bool + route_policy_name: + description: + - Specifies the route policy name. + type: str + stdbandwidth: + description: + - Specifies the std band width. + type: int + stdlevel1cost: + description: + - Specifies the std level1 cost. + type: int + stdlevel2cost: + description: + - Specifies the std level2 cost. + type: int + tag: + description: + - Specifies the isis tag. + type: int + weight: + description: + - Specifies the isis weight. + type: int + preference_value: + description: + - Specifies the preference value. + type: int + state: + description: + - Determines whether the config should be present or not on the device. + default: present + type: str + choices: ['present', 'absent'] +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - This module works with connection C(netconf). +''' + +EXAMPLES = ''' + - name: Set isis description + ce_is_is_view: + instance_id: 3 + description: abcdeggfs + state: present + + - name: Set isis islevel + ce_is_is_view: + instance_id: 3 + islevel: level_1 + state: present + - name: Set isis coststyle + ce_is_is_view: + instance_id: 3 + coststyle: narrow + state: present + + - name: Set isis stdlevel1cost + ce_is_is_view: + instance_id: 3 + stdlevel1cost: 63 + state: present + + - name: set isis stdlevel2cost + ce_is_is_view: + instance_id: 3 + stdlevel2cost: 63 + state: present + + - name: set isis stdbandwidth + ce_is_is_view: + instance_id: 3 + stdbandwidth: 1 + state: present +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "state": "present" + } +existing: + description: k/v pairs of existing configuration + returned: always + type: dict + sample: { + "session": {} + } +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: { + "session": { + "addrType": "IPV4", + "createType": "SESS_STATIC", + "destAddr": null, + "outIfName": "10GE1/0/1", + "sessName": "bfd_l2link", + "srcAddr": null, + "useDefaultIp": "true", + "vrfName": null + } + } +updates: + description: commands sent to the device + returned: always + type: list + sample: [ + "bfd bfd_l2link bind peer-ip default-ip interface 10ge1/0/1" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import sys +import socket +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config + +CE_NC_GET_ISIS = """ + + + %s + + +""" + +CE_NC_GET_ISIS_INSTANCE = """ + + + %s + + + + + + + + + + + +""" + +CE_NC_GET_ISIS_ENTITY = """ + + + %s + + + + + + + +""" + +CE_NC_CREAT_ISIS_ENTITY = """ + + + %s + + + %s + + + + +""" + +CE_NC_DELATE_ISIS_ENTITY = """ + + + %s + + + %s + + + + +""" + +CE_NC_GET_ISIS_PREFERENCE = """ + + + %s + + + + + + + + + + + + +""" + +CE_NC_MREGE_ISIS_PREFERENCE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_DELETE_ISIS_PREFERENCE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_GET_ISIS_MAXLOAD = """ + + + %s + + + afIpv4 + 0 + + + + + +""" + +CE_NC_MERGE_ISIS_MAXLOAD = """ + + + %s + + + afIpv4 + 0 + %s + + + + +""" + +CE_NC_DELETE_ISIS_MAXLOAD = """ + + + %s + + + afIpv4 + 0 + 32 + + + + +""" + +CE_NC_GET_ISIS_NEXTHOP = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_NEXTHOP = """ + + + %s + + + afIpv4 + 0 + + + %s + %s + + + + + + +""" + +CE_NC_DELETE_ISIS_NEXTHOP = """ + + + %s + + + afIpv4 + 0 + + + %s + 1 + + + + + + +""" + +CE_NC_GET_ISIS_LEAKROUTELEVEL2 = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_LEAKROUTELEVEL2 = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_DELETE_ISIS_LEAKROUTELEVEL2 = """ + + + %s + + + afIpv4 + 0 + + + 0 + + + + false + + + + + + +""" + +CE_NC_GET_ISIS_LEAKROUTELEVEL1 = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_LEAKROUTELEVEL1 = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_DELETE_ISIS_LEAKROUTELEVEL1 = """ + + + %s + + + afIpv4 + 0 + + + 0 + + + + false + false + + + + + + +""" + +CE_NC_GET_ISIS_DEFAULTROUTE = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_DEFAULTROUTE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_DELETE_ISIS_DEFAULTROUTE = """ + + + %s + + + afIpv4 + 0 + + + always + 0 + 0 + level_2 + false + + + + + + +""" + +CE_NC_GET_ISIS_IMPORTROUTE = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_IMPORTROUTE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_GET_ISIS_EXPORTROUTE = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_EXPORTROUTE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_GET_ISIS_IMPORTIPROUTE = """ + + + %s + + + afIpv4 + 0 + + + + + + + + + + + + +""" + +CE_NC_MERGE_ISIS_IMPORTIPROUTE = """ + + + %s + + + afIpv4 + 0 + + + %s + + + + + + +""" + +CE_NC_GET_ISIS_BFDLINK = """ + + + %s + + + afIpv4 + 0 + + + + + + + +""" + +CE_NC_MERGE_ISIS_BFDLINK = """ + + + %s + + + afIpv4 + 0 + %s + + + + +""" + +CE_NC_DELETE_ISIS_BFDLINK = """ + + + %s + + + afIpv4 + 0 + 3 + 3 + 3 + + + + +""" + + +def is_valid_ip_vpn(vpname): + """check ip vpn""" + + if not vpname: + return False + + if vpname == "_public_": + return False + + if len(vpname) < 1 or len(vpname) > 31: + return False + + return True + + +def check_ip_addr(ipaddr): + """check ip address, Supports IPv4 and IPv6""" + + if not ipaddr or '\x00' in ipaddr: + return False + + try: + res = socket.getaddrinfo(ipaddr, 0, socket.AF_UNSPEC, + socket.SOCK_STREAM, + 0, socket.AI_NUMERICHOST) + return bool(res) + except socket.gaierror: + err = sys.exc_info()[1] + if err.args[0] == socket.EAI_NONAME: + return False + raise + + return True + + +def check_default_ip(ipaddr): + """check the default multicast IP address""" + + # The value ranges from 224.0.0.107 to 224.0.0.250 + if not check_ip_addr(ipaddr): + return False + + if ipaddr.count(".") != 3: + return False + + ips = ipaddr.split(".") + if ips[0] != "224" or ips[1] != "0" or ips[2] != "0": + return False + + if not ips[3].isdigit() or int(ips[3]) < 107 or int(ips[3]) > 250: + return False + + return True + + +def get_interface_type(interface): + """get the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class ISIS_View(object): + """Manages ISIS Instance""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.instance_id = self.module.params['instance_id'] + self.description = self.module.params['description'] + self.islevel = self.module.params['islevel'] + self.coststyle = self.module.params['coststyle'] + self.relaxspfLimit = self.module.params['relaxspfLimit'] + self.stdlevel1cost = self.module.params['stdlevel1cost'] + self.stdlevel2cost = self.module.params['stdlevel2cost'] + self.stdbandwidth = self.module.params['stdbandwidth'] + self.autocostenable = self.module.params['autocostenable'] + self.autocostenablecompatible = self.module.params['autocostenablecompatible'] + self.netentity = self.module.params['netentity'] + self.preference_value = self.module.params['preference_value'] + self.route_policy_name = self.module.params['route_policy_name'] + self.max_load = self.module.params['max_load'] + self.ip_address = self.module.params['ip_address'] + self.weight = self.module.params['weight'] + self.aclnum_or_name = self.module.params['aclnum_or_name'] + self.ip_prefix_name = self.module.params['ip_prefix_name'] + self.import_routepolicy_name = self.module.params['import_routepolicy_name'] + self.tag = self.module.params['tag'] + self.allow_filter = self.module.params['allow_filter'] + self.allow_up_down = self.module.params['allow_up_down'] + self.penetration_direct = self.module.params['penetration_direct'] + self.enablelevel1tolevel2 = self.module.params['enablelevel1tolevel2'] + self.defaultmode = self.module.params['defaultmode'] + self.mode_routepolicyname = self.module.params['mode_routepolicyname'] + self.cost = self.module.params['cost'] + self.mode_tag = self.module.params['mode_tag'] + self.level_type = self.module.params['level_type'] + self.avoid_learning = self.module.params['avoid_learning'] + self.protocol = self.module.params['protocol'] + self.processid = self.module.params['processid'] + self.cost_type = self.module.params['cost_type'] + self.import_cost = self.module.params['import_cost'] + self.import_tag = self.module.params['import_tag'] + self.impotr_leveltype = self.module.params['impotr_leveltype'] + self.import_route_policy = self.module.params['import_route_policy'] + self.inheritcost = self.module.params['inheritcost'] + self.permitibgp = self.module.params['permitibgp'] + self.avoid_learning = self.module.params['avoid_learning'] + self.export_protocol = self.module.params['export_protocol'] + self.export_policytype = self.module.params['export_policytype'] + self.export_processid = self.module.params['export_processid'] + self.export_aclnumorname = self.module.params['export_aclnumorname'] + self.export_ipprefix = self.module.params['export_ipprefix'] + self.export_routepolicyname = self.module.params['export_routepolicyname'] + self.import_aclnumorname = self.module.params['import_aclnumorname'] + self.import_ipprefix = self.module.params['import_ipprefix'] + self.import_routepolicyname = self.module.params['import_routepolicyname'] + self.bfd_min_rx = self.module.params['bfd_min_rx'] + self.bfd_min_tx = self.module.params['bfd_min_tx'] + self.bfd_multiplier_num = self.module.params['bfd_multiplier_num'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.isis_dict = dict() + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + + mutually_exclusive = [["stdlevel1cost", "stdlevel2cost"], + ["aclnum_or_name", "ip_prefix_name", "import_routepolicy_name"], + ["export_aclnumorname", "import_ipprefix", "import_routepolicyname"]] + required_together = [('ip_address', 'weight')] + self.module = AnsibleModule( + argument_spec=self.spec, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + supports_check_mode=True) + + def get_isis_dict(self): + """bfd config dict""" + + isis_dict = dict() + isis_dict["instance"] = dict() + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_INSTANCE % self.instance_id)) + + if self.netentity: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_ENTITY % self.instance_id)) + + if self.route_policy_name or self.preference_value: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_PREFERENCE % self.instance_id)) + if self.max_load: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_MAXLOAD % self.instance_id)) + if self.ip_address: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_NEXTHOP % self.instance_id)) + if self.penetration_direct and self.penetration_direct == "level2-level1": + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_LEAKROUTELEVEL2 % self.instance_id)) + elif self.penetration_direct and self.penetration_direct == "level1-level2": + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_LEAKROUTELEVEL1 % self.instance_id)) + elif self.defaultmode: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_DEFAULTROUTE % self.instance_id)) + elif self.protocol: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_IMPORTROUTE % self.instance_id)) + elif self.export_protocol: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_EXPORTROUTE % self.instance_id)) + elif self.bfd_min_rx or self.bfd_min_tx or self.bfd_multiplier_num: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_BFDLINK % self.instance_id)) + elif self.import_aclnumorname or self.import_ipprefix or self.import_ipprefix: + conf_str = CE_NC_GET_ISIS % ( + (CE_NC_GET_ISIS_IMPORTIPROUTE % self.instance_id)) + xml_str = get_nc_config(self.module, conf_str) + + if "" in xml_str: + return isis_dict + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # get bfd global info + if self.netentity: + glb = root.find("isiscomm/isSites/isSite/isNetEntitys/isNetEntity") + elif self.route_policy_name or self.preference_value: + glb = root.find("isiscomm/isSites/isSite//isSiteMTs/isSiteMT/isPreferences/isPreference") + elif self.max_load: + glb = root.find("isiscomm/isSites/isSite/isSiteMTs/isSiteMT") + elif self.ip_address: + glb = root.find("isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isNextHopWeights/isNextHopWeight") + elif self.penetration_direct and self.penetration_direct == "level2-level1": + glb = root.find("isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isLeakRouteLevel2ToLevel1s/isLeakRouteLevel2ToLevel1") + elif self.penetration_direct and self.penetration_direct == "level1-level2": + glb = root.find( + "isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isLeakRouteLevel1ToLevel2s/isLeakRouteLevel1ToLevel2") + elif self.defaultmode: + glb = root.find( + "isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isDefaultRoutes/isDefaultRoute") + elif self.protocol: + glb = root.find( + "isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isImportRoutes/isImportRoute") + elif self.export_protocol: + glb = root.find( + "isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isFilterExports/isFilterExport") + elif self.bfd_min_rx or self.bfd_min_tx or self.bfd_multiplier_num: + glb = root.find( + "isiscomm/isSites/isSite/isSiteMTs/isSiteMT") + elif self.import_aclnumorname or self.import_ipprefix or self.import_ipprefix: + glb = root.find( + "isiscomm/isSites/isSite/isSiteMTs/isSiteMT/isFilterImports/isFilterImport") + else: + glb = root.find("isiscomm/isSites/isSite") + + if glb is not None: + for attr in glb: + isis_dict["instance"][attr.tag] = attr.text + + return isis_dict + + def config_session(self): + """configures bfd session""" + + xml_str = "" + instance = self.isis_dict["instance"] + if not self.instance_id: + return xml_str + xml_str = "%s" % self.instance_id + self.updates_cmd.append("isis %s" % self.instance_id) + cmd_list = list() + + if self.state == "present": + if self.description and self.description != instance.get("description"): + xml_str += "%s" % self.description + self.updates_cmd.append("description %s" % self.description) + + if self.islevel and self.islevel != instance.get("isLevel"): + xml_str += "%s" % self.islevel + self.updates_cmd.append("is-level %s" % self.islevel) + + if self.coststyle: + if self.coststyle != instance.get("costStyle"): + xml_str += "%s" % self.coststyle + self.updates_cmd.append("cost-style %s" % self.coststyle) + if self.relaxspfLimit and instance.get("relaxSpfLimit", "false") == "false": + xml_str += "true" + self.updates_cmd.append("cost-style %s relax-spf-limit" % self.coststyle) + elif not self.relaxspfLimit and instance.get("relaxSpfLimit", "false") == "true": + xml_str += "false" + self.updates_cmd.append("cost-style %s" % self.coststyle) + + if self.stdlevel1cost and str(self.stdlevel1cost) != instance.get("stdLevel1Cost"): + xml_str += "%s" % self.stdlevel1cost + self.updates_cmd.append("circuit-cost %s level-1" % self.stdlevel1cost) + + if self.stdlevel2cost and str(self.stdlevel2cost) != instance.get("stdLevel2Cost"): + xml_str += "%s" % self.stdlevel2cost + self.updates_cmd.append("circuit-cost %s level-2" % self.stdlevel2cost) + + if self.stdbandwidth and str(self.stdbandwidth) != instance.get("stdbandwidth"): + xml_str += "%s" % self.stdbandwidth + self.updates_cmd.append("bandwidth-reference %s" % self.stdbandwidth) + + if self.netentity and self.netentity != instance.get("netEntity"): + xml_str = CE_NC_CREAT_ISIS_ENTITY % (self.instance_id, self.netentity) + self.updates_cmd.append("network-entity %s" % self.netentity) + + if self.preference_value or self.route_policy_name: + xml_str = "" + cmd_session = "preference" + if self.preference_value and str(self.preference_value) != instance.get("preferenceValue"): + xml_str = "%s" % self.preference_value + cmd_session += " %s" % self.preference_value + if self.route_policy_name and self.route_policy_name != instance.get("routePolicyName"): + xml_str += "%s" % self.route_policy_name + cmd_session += " route-policy %s" % self.route_policy_name + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + xml_str = CE_NC_MREGE_ISIS_PREFERENCE % (self.instance_id, xml_str) + + if self.max_load and str(self.max_load) != instance.get("maxLoadBalancing"): + xml_str = CE_NC_MERGE_ISIS_MAXLOAD % (self.instance_id, self.max_load) + self.updates_cmd.append("maximum load-balancing %s" % self.max_load) + + if self.ip_address: + xml_str = CE_NC_MERGE_ISIS_NEXTHOP % (self.instance_id, self.ip_address, self.weight) + self.updates_cmd.append("nexthop %s weight %s" % (self.ip_address, self.weight)) + + if self.penetration_direct: + xml_str = "" + if self.penetration_direct == "level2-level1": + cmd_session = "import-route isis level-2 into level-1" + elif self.penetration_direct == "level1-level2": + cmd_session = "import-route isis level-1 into level-2" + if self.aclnum_or_name: + xml_str = "%s" % self.aclnum_or_name + xml_str += "aclNumOrName" + if isinstance(self.aclnum_or_name, int): + cmd_session += " filter-policy %s" % self.aclnum_or_name + elif isinstance(self.aclnum_or_name, str): + cmd_session += " filter-policy acl-name %s" % self.aclnum_or_name + if self.ip_prefix_name: + xml_str = "%s" % self.ip_prefix_name + xml_str += "ipPrefix" + cmd_session += " filter-policy ip-prefix %s" % self.ip_prefix_name + if self.import_routepolicy_name: + xml_str = "%s" % self.import_routepolicy_name + xml_str += "routePolicy" + cmd_session += " filter-policy route-policy %s" % self.import_routepolicy_name + if self.tag: + xml_str += "%s" % self.tag + cmd_session += " tag %s" % self.tag + if self.allow_filter or self.allow_up_down: + cmd_session += " direct" + if self.allow_filter: + xml_str += "true" + cmd_session += " allow-filter-policy" + if self.allow_up_down: + xml_str += "true" + cmd_session += " allow-up-down-bit" + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + if self.enablelevel1tolevel2: + xml_str += "true" + self.updates_cmd.append("undo import-route isis level-1 into level-2 disable") + + if self.defaultmode: + cmd_session = "default-route-advertise" + if self.defaultmode == "always": + xml_str = "always" + cmd_session += " always" + elif self.defaultmode == "matchDefault": + xml_str = "matchDefault" + cmd_session += " match default" + elif self.defaultmode == "matchAny": + xml_str = "matchAny" + xml_str += "routePolicy" + xml_str += "%s" % self.mode_routepolicyname + cmd_session += " route-policy %s" % self.mode_routepolicyname + if self.cost is not None: + xml_str += "%s" % self.cost + cmd_session += " cost %s" % self.cost + if self.mode_tag: + xml_str += "%s" % self.mode_tag + cmd_session += " tag %s" % self.mode_tag + if self.level_type: + if self.level_type == "level_1": + xml_str += "level_1" + cmd_session += " level-1" + elif self.level_type == "level_2": + xml_str += "level_2" + cmd_session += " level-2" + elif self.level_type == "level_1_2": + xml_str += "level_1_2" + cmd_session += " level-1-2" + if self.avoid_learning: + xml_str += "true" + cmd_session += " avoid-learning" + elif not self.avoid_learning: + xml_str += "false" + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + + if self.protocol: + cmd_session = "import-route" + if self.protocol == "rip": + xml_str = "rip" + cmd_session += " rip" + elif self.protocol == "isis": + xml_str = "isis" + cmd_session += " isis" + elif self.protocol == "ospf": + xml_str = "ospf" + cmd_session += " ospf" + elif self.protocol == "static": + xml_str = "static" + cmd_session += " static" + elif self.protocol == "direct": + xml_str = "direct" + cmd_session += " direct" + elif self.protocol == "bgp": + xml_str = "bgp" + cmd_session += " bgp" + if self.permitibgp: + xml_str += "true" + cmd_session += " permit-ibgp" + if self.protocol == "rip" or self.protocol == "isis" or self.protocol == "ospf": + xml_str += "%s" % self.processid + cmd_session += " %s" % self.processid + if self.inheritcost: + xml_str += "%s" % self.inheritcost + cmd_session += " inherit-cost" + if self.cost_type: + if self.cost_type == "external": + xml_str += "external" + cmd_session += " cost-type external" + elif self.cost_type == "internal": + xml_str += "internal" + cmd_session += " cost-type internal" + if self.import_cost: + xml_str += "%s" % self.import_cost + cmd_session += " cost %s" % self.import_cost + if self.import_tag: + xml_str += "%s" % self.import_tag + cmd_session += " tag %s" % self.import_tag + if self.import_route_policy: + xml_str += "routePolicy" + xml_str += "%s" % self.import_route_policy + cmd_session += " route-policy %s" % self.import_route_policy + if self.impotr_leveltype: + if self.impotr_leveltype == "level_1": + cmd_session += " level-1" + elif self.impotr_leveltype == "level_2": + cmd_session += " level-2" + elif self.impotr_leveltype == "level_1_2": + cmd_session += " level-1-2" + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + + if self.bfd_min_rx or self.bfd_min_tx or self.bfd_multiplier_num: + xml_str = "" + self.updates_cmd.append("bfd all-interfaces enable") + cmd_session = "bfd all-interfaces" + if self.bfd_min_rx: + xml_str += "%s" % self.bfd_min_rx + cmd_session += " min-rx-interval %s" % self.bfd_min_rx + if self.bfd_min_tx: + xml_str += "%s" % self.bfd_min_tx + cmd_session += " min-tx-interval %s" % self.bfd_min_tx + if self.bfd_multiplier_num: + xml_str += "%s" % self.bfd_multiplier_num + cmd_session += " detect-multiplier %s" % self.bfd_multiplier_num + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + + if self.export_protocol: + cmd_session = "filter-policy" + if self.export_aclnumorname: + xml_str = "aclNumOrName" + xml_str += "%s" % self.export_aclnumorname + if isinstance(self.export_aclnumorname, int): + cmd_session += " %s" % self.export_aclnumorname + elif isinstance(self.export_aclnumorname, str): + cmd_session += " acl-name %s" % self.export_aclnumorname + if self.export_ipprefix: + xml_str = "ipPrefix" + xml_str += "%s" % self.export_ipprefix + cmd_session += " ip-prefix %s" % self.export_ipprefix + if self.export_routepolicyname: + xml_str = "routePolicy" + xml_str += "%s" % self.export_routepolicyname + cmd_session += " route-policy %s" % self.export_routepolicyname + xml_str += "%s" % self.export_protocol + cmd_session += " export %s" % self.export_protocol + if self.export_processid is not None: + xml_str += "%s" % self.export_processid + cmd_session += " %s" % self.export_processid + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + + if self.import_ipprefix or self.import_aclnumorname or self.import_routepolicyname: + cmd_session = "filter-policy" + if self.import_aclnumorname: + xml_str = "aclNumOrName" + xml_str += "%s" % self.import_aclnumorname + if isinstance(self.import_aclnumorname, int): + cmd_session += " %s" % self.import_aclnumorname + elif isinstance(self.import_aclnumorname, str): + cmd_session += " acl-name %s" % self.import_aclnumorname + if self.import_ipprefix: + xml_str = "ipPrefix" + xml_str += "%s" % self.import_ipprefix + cmd_session += " ip-prefix %s" % self.import_ipprefix + if self.import_routepolicyname: + xml_str = "routePolicy" + xml_str += "%s" % self.import_routepolicyname + cmd_session += " route-policy %s" % self.import_routepolicyname + cmd_session += "import" + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + else: + # absent + if self.description and self.description == instance.get("description"): + xml_str += "%s" % self.description + self.updates_cmd.append("undo description") + + if self.islevel and self.islevel == instance.get("isLevel"): + xml_str += "level_1_2" + self.updates_cmd.append("undo is-level") + + if self.coststyle and self.coststyle == instance.get("costStyle"): + xml_str += "%s" % ("narrow") + xml_str += "false" + self.updates_cmd.append("undo cost-style") + + if self.stdlevel1cost and str(self.stdlevel1cost) == instance.get("stdLevel1Cost"): + xml_str += "%s" % self.stdlevel1cost + self.updates_cmd.append("undo circuit-cost %s level-1" % self.stdlevel1cost) + + if self.stdlevel2cost and str(self.stdlevel2cost) == instance.get("stdLevel2Cost"): + xml_str += "%s" % self.stdlevel2cost + self.updates_cmd.append("undo circuit-cost %s level-2" % self.stdlevel2cost) + + if self.stdbandwidth and str(self.stdbandwidth) == instance.get("stdbandwidth"): + xml_str += "100" + self.updates_cmd.append("undo bandwidth-reference") + + if self.netentity and self.netentity == instance.get("netEntity"): + xml_str = CE_NC_DELATE_ISIS_ENTITY % (self.instance_id, self.netentity) + self.updates_cmd.append("undo network-entity %s" % self.netentity) + + if self.preference_value or self.route_policy_name: + xml_str = "" + if self.preference_value and str(self.preference_value) == instance.get("preferenceValue"): + xml_str = "%s" % self.preference_value + if self.route_policy_name and self.route_policy_name == instance.get("routePolicyName"): + xml_str += "%s" % self.route_policy_name + self.updates_cmd.append("undo preference") + elif not self.preference_value and self.route_policy_name and self.route_policy_name == instance.get("routePolicyName"): + xml_str = "%s" % self.route_policy_name + self.updates_cmd.append("undo preference") + xml_str = CE_NC_DELETE_ISIS_PREFERENCE % (self.instance_id, xml_str) + + if self.max_load and str(self.max_load) == instance.get("maxLoadBalancing"): + xml_str = CE_NC_DELETE_ISIS_MAXLOAD % self.instance_id + self.updates_cmd.append("undo maximum load-balancing") + + if self.ip_address: + xml_str = CE_NC_DELETE_ISIS_NEXTHOP % (self.instance_id, self.ip_address) + self.updates_cmd.append("undo nexthop %s" % self.ip_address) + + if self.penetration_direct: + if self.penetration_direct == "level2-level1": + self.updates_cmd.append("undo import-route isis level-2 into level-1") + elif self.penetration_direct == "level1-level2": + self.updates_cmd.append("undo import-route isis level-1 into level-2") + self.updates_cmd.append("import-route isis level-1 into level-2 disable") + + if self.bfd_min_rx or self.bfd_min_tx or self.bfd_multiplier_num is not None: + xml_str = CE_NC_DELETE_ISIS_BFDLINK % self.instance_id + self.updates_cmd.append("undo bfd all-interfaces enable") + cmd_session = "undo bfd all-interfaces" + if self.bfd_min_rx: + cmd_session += " min-rx-interval %s" % self.bfd_min_rx + if self.bfd_min_tx: + cmd_session += " min-tx-interval %s" % self.bfd_min_tx + if self.bfd_multiplier_num: + cmd_session += " detect-multiplier %s" % self.bfd_multiplier_num + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + + if self.defaultmode: + xml_str = CE_NC_DELETE_ISIS_DEFAULTROUTE % self.instance_id + self.updates_cmd.append("undo default-route-advertise") + + if self.protocol: + if self.protocol == "rip" or self.protocol == "isis" or self.protocol == "ospf": + self.updates_cmd.append("undo import-route %s %s" % (self.protocol, self.processid)) + else: + self.updates_cmd.append("undo import-route %s" % self.protocol) + + if self.export_protocol: + cmd_session = "undo filter-policy" + if self.export_aclnumorname: + if isinstance(self.export_aclnumorname, int): + cmd_session += " %s" % self.export_aclnumorname + elif isinstance(self.export_aclnumorname, str): + cmd_session += " acl-name %s" % self.export_aclnumorname + if self.export_ipprefix: + cmd_session += " ip-prefix %s" % self.export_ipprefix + if self.export_routepolicyname: + cmd_session += " route-policy %s" % self.export_routepolicyname + cmd_session += " export %s" % self.export_protocol + if self.export_processid is not None: + cmd_session += " %s" % self.export_processid + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + if self.import_ipprefix or self.import_aclnumorname or self.import_routepolicyname: + cmd_session = "undo filter-policy" + if self.import_aclnumorname: + if isinstance(self.import_aclnumorname, int): + cmd_session += " %s" % self.import_aclnumorname + elif isinstance(self.import_aclnumorname, str): + cmd_session += " acl-name %s" % self.import_aclnumorname + if self.import_ipprefix: + cmd_session += " ip-prefix %s" % self.import_ipprefix + if self.import_routepolicyname: + cmd_session += " route-policy %s" % self.import_routepolicyname + cmd_session += " import" + cmd_list.insert(0, cmd_session) + self.updates_cmd.extend(cmd_list) + + if self.autocostenable and instance.get("stdAutoCostEnable", "false") == "false": + xml_str += "true" + self.updates_cmd.append("auto-cost enable") + elif not self.autocostenable and instance.get("stdAutoCostEnable", "false") == "true": + xml_str += "false" + xml_str += "false" + self.updates_cmd.append("undo auto-cost enable") + + if self.autocostenable: + if self.autocostenablecompatible and instance.get("stdAutoCostEnableCompatible", "false") == "false": + xml_str += "true" + self.updates_cmd.append("auto-cost enable compatible") + elif not self.autocostenablecompatible and instance.get("stdAutoCostEnableCompatible", "false") == "true": + xml_str += "false" + self.updates_cmd.append("auto-cost enable") + + if self.state == "present": + if self.netentity or self.preference_value or self.route_policy_name or self.max_load or self.ip_address: + return xml_str + elif self.penetration_direct: + if self.penetration_direct == "level2-level1": + return CE_NC_MERGE_ISIS_LEAKROUTELEVEL2 % (self.instance_id, xml_str) + elif self.penetration_direct == "level1-level2": + return CE_NC_MERGE_ISIS_LEAKROUTELEVEL1 % (self.instance_id, xml_str) + elif self.defaultmode: + return CE_NC_MERGE_ISIS_DEFAULTROUTE % (self.instance_id, xml_str) + elif self.protocol: + return CE_NC_MERGE_ISIS_IMPORTROUTE % (self.instance_id, xml_str) + elif self.export_protocol: + return CE_NC_MERGE_ISIS_EXPORTROUTE % (self.instance_id, xml_str) + elif self.import_routepolicyname or self.import_aclnumorname or self.import_ipprefix: + return CE_NC_MERGE_ISIS_IMPORTIPROUTE % (self.instance_id, xml_str) + elif self.bfd_min_rx or self.bfd_min_tx or self.bfd_multiplier_num: + return CE_NC_MERGE_ISIS_BFDLINK % (self.instance_id, xml_str) + else: + return '' + xml_str + '' + else: + if self.netentity or self.preference_value or self.route_policy_name or self.max_load \ + or self.ip_address or self.defaultmode or self.bfd_min_rx or self.bfd_min_tx or self.bfd_multiplier_num is not None: + return xml_str + else: + return '' + xml_str + '' + + def netconf_load_config(self, xml_str): + """load bfd config by netconf""" + + if not xml_str: + return + if xml_str == "%s" % self.instance_id: + pass + else: + xml_cfg = """ + + + %s + + """ % xml_str + set_nc_config(self.module, xml_cfg) + self.changed = True + + def check_params(self): + """Check all input params""" + levelcost = 16777215 + if not self.instance_id: + self.module.fail_json(msg="Error: Missing required arguments: instance_id.") + + if self.instance_id: + if self.instance_id < 1 or self.instance_id > 4294967295: + self.module.fail_json(msg="Error: Instance id is not ranges from 1 to 4294967295.") + + # check description + if self.description: + if len(self.description) < 1 or len(self.description) > 80: + self.module.fail_json(msg="Error: description is invalid.") + + # + if self.stdbandwidth: + if self.stdbandwidth < 1 or self.stdbandwidth > 2147483648: + self.module.fail_json(msg="Error: stdbandwidth is not ranges from 1 to 2147483648.") + + if self.relaxspfLimit is not None and not self.coststyle: + self.module.fail_json(msg="Error: relaxspfLimit must set after coststyle.") + + if self.coststyle: + if self.coststyle != "wide" and self.coststyle != "wtransition": + levelcost = 63 + else: + levelcost = 16777215 + if self.stdlevel1cost: + if self.stdlevel1cost < 1 or self.stdlevel1cost > levelcost: + self.module.fail_json(msg="Error: stdlevel1cost is not ranges from 1 to %s." % levelcost) + + if self.stdlevel2cost: + if self.stdlevel2cost < 1 or self.stdlevel2cost > levelcost: + self.module.fail_json(msg="Error: stdlevel2cost is not ranges from 1 to %s." % levelcost) + + if self.coststyle: + if self.coststyle != "ntransition" and self.coststyle != "transition": + if self.relaxspfLimit: + self.module.fail_json(msg="Error: relaxspfLimit can not be set while the coststyle is not ntransition or transition") + + if self.autocostenablecompatible: + if not self.autocostenable: + self.module.fail_json(msg="Error: you shoule enable the autocostenable first.") + + if self.preference_value: + if self.preference_value < 1 or self.preference_value > 255: + self.module.fail_json(msg="Error: preference_value is not ranges from 1 to 255.") + + if self.route_policy_name: + if len(self.route_policy_name) < 1 or len(self.route_policy_name) > 200: + self.module.fail_json(msg="Error: route_policy_name is invalid.") + + if self.max_load: + if self.max_load < 1 or self.max_load > 32: + self.module.fail_json(msg="Error: max_load is not ranges from 1 to 32.") + + if self.weight: + if self.weight < 1 or self.weight > 254: + self.module.fail_json(msg="Error: weight is not ranges from 1 to 254.") + + if self.aclnum_or_name: + if isinstance(self.aclnum_or_name, int): + if self.aclnum_or_name < 2000 or self.aclnum_or_name > 2999: + self.module.fail_json(msg="Error: acl_num is not ranges from 2000 to 2999.") + elif isinstance(self.aclnum_or_name, str): + if len(self.aclnum_or_name) < 1 or len(self.aclnum_or_name) > 32: + self.module.fail_json(msg="Error: acl_name is invalid.") + if self.ip_prefix_name: + if len(self.ip_prefix_name) < 1 or len(self.ip_prefix_name) > 169: + self.module.fail_json(msg="Error: ip_prefix_name is invalid.") + if self.import_routepolicy_name: + if len(self.import_routepolicy_name) < 1 or len(self.import_routepolicy_name) > 200: + self.module.fail_json(msg="Error: import_routepolicy_name is invalid.") + if self.tag: + if self.tag < 1 or self.tag > 4294967295: + self.module.fail_json(msg="Error: tag is not ranges from 1 to 4294967295.") + + if self.mode_routepolicyname: + if len(self.mode_routepolicyname) < 1 or len(self.mode_routepolicyname) > 200: + self.module.fail_json(msg="Error: mode_routepolicyname is invalid.") + if self.cost is not None: + if self.cost < 0 or self.cost > 4261412864: + self.module.fail_json(msg="Error: cost is not ranges from 0 to 4261412864.") + if self.mode_tag: + if self.mode_tag < 1 or self.mode_tag > 4294967295: + self.module.fail_json(msg="Error: mode_tag is not ranges from 1 to 4294967295.") + + if self.processid is not None: + if self.processid < 0 or self.processid > 4294967295: + self.module.fail_json(msg="Error: processid is not ranges from 0 to 4294967295.") + + if self.import_cost is not None: + if self.import_cost < 0 or self.import_cost > 4261412864: + self.module.fail_json(msg="Error: import_cost is not ranges from 0 to 4261412864.") + + if self.import_tag: + if self.import_tag < 1 or self.import_tag > 4294967295: + self.module.fail_json(msg="Error: import_tag is not ranges from 1 to 4294967295.") + + if self.export_aclnumorname: + if isinstance(self.export_aclnumorname, int): + if self.export_aclnumorname < 2000 or self.export_aclnumorname > 2999: + self.module.fail_json(msg="Error: acl_num is not ranges from 2000 to 2999.") + elif isinstance(self.export_aclnumorname, str): + if len(self.export_aclnumorname) < 1 or len(self.export_aclnumorname) > 32: + self.module.fail_json(msg="Error: acl_name is invalid.") + + if self.export_processid: + if self.export_processid < 1 or self.export_processid > 4294967295: + self.module.fail_json(msg="Error: export_processid is not ranges from 1 to 4294967295.") + + if self.export_ipprefix: + if len(self.export_ipprefix) < 1 or len(self.export_ipprefix) > 169: + self.module.fail_json(msg="Error: export_ipprefix is invalid.") + + if self.export_routepolicyname: + if len(self.export_routepolicyname) < 1 or len(self.export_routepolicyname) > 200: + self.module.fail_json(msg="Error: export_routepolicyname is invalid.") + + if self.bfd_min_rx: + if self.bfd_min_rx < 50 or self.bfd_min_rx > 1000: + self.module.fail_json(msg="Error: bfd_min_rx is not ranges from 50 to 1000.") + + if self.bfd_min_tx: + if self.bfd_min_tx < 50 or self.bfd_min_tx > 1000: + self.module.fail_json(msg="Error: bfd_min_tx is not ranges from 50 to 1000.") + + if self.bfd_multiplier_num: + if self.bfd_multiplier_num < 3 or self.bfd_multiplier_num > 50: + self.module.fail_json(msg="Error: bfd_multiplier_num is not ranges from 3 to 50.") + + if self.import_routepolicyname: + if len(self.import_routepolicyname) < 1 or len(self.import_routepolicyname) > 200: + self.module.fail_json(msg="Error: import_routepolicyname is invalid.") + + if self.import_aclnumorname: + if isinstance(self.import_aclnumorname, int): + if self.import_aclnumorname < 2000 or self.import_aclnumorname > 2999: + self.module.fail_json(msg="Error: acl_num is not ranges from 2000 to 2999.") + elif isinstance(self.import_aclnumorname, str): + if len(self.import_aclnumorname) < 1 or len(self.import_aclnumorname) > 32: + self.module.fail_json(msg="Error: acl_name is invalid.") + + def get_proposed(self): + """get proposed info""" + # base config + self.proposed["instance_id"] = self.instance_id + self.proposed["description"] = self.description + self.proposed["islevel"] = self.islevel + self.proposed["coststyle"] = self.coststyle + self.proposed["relaxspfLimit"] = self.relaxspfLimit + self.proposed["stdlevel1cost"] = self.stdlevel1cost + self.proposed["stdlevel2cost"] = self.stdlevel2cost + self.proposed["stdbandwidth"] = self.stdbandwidth + self.proposed["autocostenable"] = self.autocostenable + self.proposed["autocostenablecompatible"] = self.autocostenablecompatible + self.proposed["netentity"] = self.netentity + self.proposed["preference_value"] = self.preference_value + self.proposed["route_policy_name"] = self.route_policy_name + self.proposed["max_load"] = self.max_load + self.proposed["ip_address"] = self.ip_address + self.proposed["weight"] = self.weight + self.proposed["penetration_direct"] = self.penetration_direct + self.proposed["aclnum_or_name"] = self.aclnum_or_name + self.proposed["ip_prefix_name"] = self.ip_prefix_name + self.proposed["import_routepolicy_name"] = self.import_routepolicy_name + self.proposed["tag"] = self.tag + self.proposed["allow_filter"] = self.allow_filter + self.proposed["allow_up_down"] = self.allow_up_down + self.proposed["enablelevel1tolevel2"] = self.enablelevel1tolevel2 + self.proposed["protocol"] = self.protocol + self.proposed["processid"] = self.processid + self.proposed["cost_type"] = self.cost_type + self.proposed["import_cost"] = self.import_cost + self.proposed["import_tag"] = self.import_tag + self.proposed["import_route_policy"] = self.import_route_policy + self.proposed["impotr_leveltype"] = self.impotr_leveltype + self.proposed["inheritcost"] = self.inheritcost + self.proposed["permitibgp"] = self.permitibgp + self.proposed["export_protocol"] = self.export_protocol + self.proposed["export_policytype"] = self.export_policytype + self.proposed["export_processid"] = self.export_processid + self.proposed["export_aclnumorname"] = self.export_aclnumorname + self.proposed["export_ipprefix"] = self.export_ipprefix + self.proposed["export_routepolicyname"] = self.export_routepolicyname + self.proposed["import_aclnumorname"] = self.import_aclnumorname + self.proposed["import_ipprefix"] = self.import_ipprefix + self.proposed["import_routepolicyname"] = self.import_routepolicyname + self.proposed["bfd_min_rx"] = self.bfd_min_rx + self.proposed["bfd_min_tx"] = self.bfd_min_tx + self.proposed["bfd_multiplier_num"] = self.bfd_multiplier_num + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.isis_dict: + self.existing["instance"] = None + else: + self.existing["instance"] = self.isis_dict.get("instance") + + def get_end_state(self): + """get end state info""" + + isis_dict = self.get_isis_dict() + if not isis_dict: + self.end_state["instance"] = None + else: + self.end_state["instance"] = isis_dict.get("instance") + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.isis_dict = self.get_isis_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = '' + if self.instance_id: + xml_str += self.config_session() + # update to device + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + instance_id=dict(required=True, type='int'), + description=dict(required=False, type='str'), + islevel=dict(required=False, type='str', choices=['level_1', 'level_2', 'level_1_2']), + coststyle=dict(required=False, type='str', choices=['narrow', 'wide', 'transition', 'ntransition', 'wtransition']), + relaxspfLimit=dict(required=False, type='bool'), + stdlevel1cost=dict(required=False, type='int'), + stdlevel2cost=dict(required=False, type='int'), + stdbandwidth=dict(required=False, type='int'), + autocostenable=dict(required=False, type='bool'), + autocostenablecompatible=dict(required=False, type='bool'), + netentity=dict(required=False, type='str'), + preference_value=dict(required=False, type='int'), + route_policy_name=dict(required=False, type='str'), + max_load=dict(required=False, type='int'), + ip_address=dict(required=False, type='str'), + weight=dict(required=False, type='int'), + penetration_direct=dict(required=False, type='str', choices=['level2-level1', 'level1-level2']), + aclnum_or_name=dict(required=False, type='str'), + ip_prefix_name=dict(required=False, type='str'), + import_routepolicy_name=dict(required=False, type='str'), + tag=dict(required=False, type='int'), + allow_filter=dict(required=False, type='bool'), + allow_up_down=dict(required=False, type='bool'), + enablelevel1tolevel2=dict(required=False, type='bool'), + defaultmode=dict(required=False, type='str', choices=['always', 'matchDefault', 'matchAny']), + mode_routepolicyname=dict(required=False, type='str'), + cost=dict(required=False, type='int'), + mode_tag=dict(required=False, type='int'), + level_type=dict(required=False, type='str', choices=['level_1', 'level_2', 'level_1_2']), + avoid_learning=dict(required=False, type='bool'), + protocol=dict(required=False, type='str', choices=['direct', 'ospf', 'isis', 'static', 'rip', 'bgp', 'ospfv3', 'all']), + processid=dict(required=False, type='int'), + cost_type=dict(required=False, type='str', choices=['external', 'internal']), + import_cost=dict(required=False, type='int'), + import_tag=dict(required=False, type='int'), + import_route_policy=dict(required=False, type='str'), + impotr_leveltype=dict(required=False, type='str', choices=['level_1', 'level_2', 'level_1_2']), + inheritcost=dict(required=False, type='bool'), + permitibgp=dict(required=False, type='bool'), + export_protocol=dict(required=False, type='str', choices=['direct', 'ospf', 'isis', 'static', 'rip', 'bgp', 'ospfv3', 'all']), + export_policytype=dict(required=False, type='str', choices=['aclNumOrName', 'ipPrefix', 'routePolicy']), + export_processid=dict(required=False, type='int'), + export_aclnumorname=dict(required=False, type='str'), + export_ipprefix=dict(required=False, type='str'), + export_routepolicyname=dict(required=False, type='str'), + import_aclnumorname=dict(required=False, type='str'), + import_ipprefix=dict(required=False, type='str'), + import_routepolicyname=dict(required=False, type='str'), + bfd_min_rx=dict(required=False, type='int'), + bfd_min_tx=dict(required=False, type='int'), + bfd_multiplier_num=dict(required=False, type='int'), + state=dict(required=False, default='present', choices=['present', 'absent']) + ) + + module = ISIS_View(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_lacp.py b/plugins/modules/network/cloudengine/ce_lacp.py new file mode 100644 index 0000000000..769499a8ad --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_lacp.py @@ -0,0 +1,492 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ce_lacp +short_description: Manages Eth-Trunk interfaces on HUAWEI CloudEngine switches +description: + - Manages Eth-Trunk specific configuration parameters on HUAWEI CloudEngine switches. +author: xuxiaowei0512 (@CloudEngine-Ansible) +notes: + - C(state=absent) removes the Eth-Trunk config and interface if it already exists. If members to be removed are not explicitly + passed, all existing members (if any), are removed, and Eth-Trunk removed. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + trunk_id: + description: + - Eth-Trunk interface number. + The value is an integer. + The value range depends on the assign forward eth-trunk mode command. + When 256 is specified, the value ranges from 0 to 255. + When 512 is specified, the value ranges from 0 to 511. + When 1024 is specified, the value ranges from 0 to 1023. + type: int + mode: + description: + - Specifies the working mode of an Eth-Trunk interface. + default: null + choices: ['Manual','Dynamic','Static'] + type: str + preempt_enable: + description: + - Specifies lacp preempt enable of Eth-Trunk lacp. + The value is an boolean 'true' or 'false'. + type: bool + state_flapping: + description: + - Lacp dampening state-flapping. + type: bool + port_id_extension_enable: + description: + - Enable the function of extending the LACP negotiation port number. + type: bool + unexpected_mac_disable: + description: + - Lacp dampening unexpected-mac disable. + type: bool + system_id: + description: + - Link Aggregation Control Protocol System ID,interface Eth-Trunk View. + - Formate 'X-X-X',X is hex(a,aa,aaa, or aaaa) + type: str + timeout_type: + description: + - Lacp timeout type,that may be 'Fast' or 'Slow'. + choices: ['Slow', 'Fast'] + type: str + fast_timeout: + description: + - When lacp timeout type is 'Fast', user-defined time can be a number(3~90). + type: int + mixed_rate_link_enable: + description: + - Value of max active linknumber. + type: bool + preempt_delay: + description: + - Value of preemption delay time. + type: int + collector_delay: + description: + - Value of delay time in units of 10 microseconds. + type: int + max_active_linknumber: + description: + - Max active linknumber in link aggregation group. + type: int + select: + description: + - Select priority or speed to preempt. + choices: ['Speed', 'Prority'] + type: str + priority: + description: + - The priority of eth-trunk member interface. + type: int + global_priority: + description: + - Configure lacp priority on system-view. + type: int + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] + type: str +''' +EXAMPLES = r''' + - name: Ensure Eth-Trunk100 is created, and set to mode lacp-static + ce_lacp: + trunk_id: 100 + mode: 'lacp-static' + state: present + - name: Ensure Eth-Trunk100 is created, add two members, and set global priority to 1231 + ce_lacp: + trunk_id: 100 + global_priority: 1231 + state: present + - name: Ensure Eth-Trunk100 is created, and set mode to Dynamic and configure other options + ce_lacp: + trunk_id: 100 + mode: Dynamic + preempt_enable: True, + state_flapping: True, + port_id_extension_enable: True, + unexpected_mac_disable: True, + timeout_type: Fast, + fast_timeout: 123, + mixed_rate_link_enable: True, + preempt_delay: 23, + collector_delay: 33, + state: present +''' + +RETURN = r''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"trunk_id": "100", "members": ['10GE1/0/24','10GE1/0/25'], "mode": "lacp-static"} +existing: + description: k/v pairs of existing Eth-Trunk + returned: always + type: dict + sample: {"trunk_id": "100", "hash_type": "mac", "members_detail": [ + {"memberIfName": "10GE1/0/25", "memberIfState": "Down"}], + "min_links": "1", "mode": "manual"} +end_state: + description: k/v pairs of Eth-Trunk info after module execution + returned: always + type: dict + sample: {"trunk_id": "100", "hash_type": "mac", "members_detail": [ + {"memberIfName": "10GE1/0/24", "memberIfState": "Down"}, + {"memberIfName": "10GE1/0/25", "memberIfState": "Down"}], + "min_links": "1", "mode": "lacp-static"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface Eth-Trunk 100", + "mode lacp-static", + "interface 10GE1/0/25", + "eth-trunk 100"] +''' + +import xml.etree.ElementTree as ET +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config + +LACP = {'trunk_id': 'ifName', + 'mode': 'workMode', + 'preempt_enable': 'isSupportPrmpt', + 'state_flapping': 'dampStaFlapEn', + 'port_id_extension_enable': 'trunkPortIdExt', + 'unexpected_mac_disable': 'dampUnexpMacEn', + 'system_id': 'trunkSysMac', + 'timeout_type': 'rcvTimeoutType', + 'fast_timeout': 'fastTimeoutUserDefinedValue', + 'mixed_rate_link_enable': 'mixRateEnable', + 'preempt_delay': 'promptDelay', + 'collector_delay': 'collectMaxDelay', + 'max_active_linknumber': 'maxActiveNum', + 'select': 'selectPortStd', + 'weight': 'weight', + 'priority': 'portPriority', + 'global_priority': 'priority' + } + + +def has_element(parent, xpath): + """get or create a element by xpath""" + ele = parent.find('./' + xpath) + if ele is not None: + return ele + ele = parent + lpath = xpath.split('/') + for p in lpath: + e = parent.find('.//' + p) + if e is None: + e = ET.SubElement(ele, p) + ele = e + return ele + + +def bulid_xml(kwargs, operation='get'): + """create a xml tree by dictionary with operation,get,merge and delete""" + attrib = {'xmlns': "http://www.huawei.com/netconf/vrp", + 'content-version': "1.0", 'format-version': "1.0"} + + root = ET.Element('ifmtrunk') + for key in kwargs.keys(): + if key in ('global_priority',): + xpath = 'lacpSysInfo' + elif key in ('priority',): + xpath = 'TrunkIfs/TrunkIf/TrunkMemberIfs/TrunkMemberIf/lacpPortInfo/lacpPort' + elif key in ['preempt_enable', 'timeout_type', 'fast_timeout', 'select', 'preempt_delay', + 'max_active_linknumber', 'collector_delay', 'mixed_rate_link_enable', + 'state_flapping', 'unexpected_mac_disable', 'system_id', + 'port_id_extension_enable']: + xpath = 'TrunkIfs/TrunkIf/lacpTrunk' + elif key in ('trunk_id', 'mode'): + xpath = 'TrunkIfs/TrunkIf' + if xpath != '': + parent = has_element(root, xpath) + element = ET.SubElement(parent, LACP[key]) + if operation == 'merge': + parent.attrib = dict(operation=operation) + element.text = str(kwargs[key]) + if key == 'mode': + element.text = str(kwargs[key]) + if key == 'trunk_id': + element.text = 'Eth-Trunk' + str(kwargs[key]) + root.attrib = attrib + config = ET.tostring(root) + if operation == 'merge' or operation == 'delete': + return '%s' % to_native(config) + return '%s' % to_native(config) + + +def check_param(kwargs): + """check args list,the boolean or list values cloud not be checked,because they are limit by args list in main""" + + for key in kwargs: + if kwargs[key] is None: + continue + if key == 'trunk_id': + value = int(kwargs[key]) + # maximal value is 1024,although the value is limit by command 'assign forward eth-trunk mode ' + if value < 0 or value > 1024: + return 'Error: Wrong Value of Eth-Trunk interface number' + elif key == 'system_id': + # X-X-X ,X is hex(4 bit) + if not re.match(r'[0-9a-f]{1,4}\-[0-9a-f]{1,4}\-[0-9a-f]{1,4}', kwargs[key], re.IGNORECASE): + return 'Error: The system-id is invalid.' + values = kwargs[key].split('-') + flag = 0 + # all 'X' is 0,that is invalid value + for v in values: + if len(v.strip('0')) < 1: + flag += 1 + if flag == 3: + return 'Error: The system-id is invalid.' + elif key == 'timeout_type': + # select a value from choices, choices=['Slow','Fast'],it's checked by AnsibleModule + pass + elif key == 'fast_timeout': + value = int(kwargs[key]) + if value < 3 or value > 90: + return 'Error: Wrong Value of timeout,fast user-defined value<3-90>' + rtype = str(kwargs.get('timeout_type')) + if rtype == 'Slow': + return 'Error: Short timeout period for receiving packets is need,when user define the time.' + elif key == 'preempt_delay': + value = int(kwargs[key]) + if value < 0 or value > 180: + return 'Error: Value of preemption delay time is from 0 to 180' + elif key == 'collector_delay': + value = int(kwargs[key]) + if value < 0 or value > 65535: + return 'Error: Value of collector delay time is from 0 to 65535' + elif key == 'max_active_linknumber': + value = int(kwargs[key]) + if value < 0 or value > 64: + return 'Error: Value of collector delay time is from 0 to 64' + elif key == 'priority' or key == 'global_priority': + value = int(kwargs[key]) + if value < 0 or value > 65535: + return 'Error: Value of priority is from 0 to 65535' + return 'ok' + + +def xml_to_dict(args): + """transfer xml string into dict """ + rdict = dict() + args = re.sub(r'xmlns=\".+?\"', '', args) + root = ET.fromstring(args) + ifmtrunk = root.find('.//ifmtrunk') + if ifmtrunk is not None: + try: + ifmtrunk_iter = ET.Element.iter(ifmtrunk) + except AttributeError: + ifmtrunk_iter = ifmtrunk.getiterator() + + for ele in ifmtrunk_iter: + if ele.text is not None and len(ele.text.strip()) > 0: + rdict[ele.tag] = ele.text + return rdict + + +def compare_config(module, kwarg_exist, kwarg_end): + """compare config between exist and end""" + dic_command = {'isSupportPrmpt': 'lacp preempt enable', + 'rcvTimeoutType': 'lacp timeout', # lacp timeout fast user-defined 23 + 'fastTimeoutUserDefinedValue': 'lacp timeout user-defined', + 'selectPortStd': 'lacp select', + 'promptDelay': 'lacp preempt delay', + 'maxActiveNum': 'lacp max active-linknumber', + 'collectMaxDelay': 'lacp collector delay', + 'mixRateEnable': 'lacp mixed-rate link enable', + 'dampStaFlapEn': 'lacp dampening state-flapping', + 'dampUnexpMacEn': 'lacp dampening unexpected-mac disable', + 'trunkSysMac': 'lacp system-id', + 'trunkPortIdExt': 'lacp port-id-extension enable', + 'portPriority': 'lacp priority', # interface 10GE1/0/1 + 'lacpMlagPriority': 'lacp m-lag priority', + 'lacpMlagSysId': 'lacp m-lag system-id', + 'priority': 'lacp priority' + } + rlist = list() + exist = set(kwarg_exist.keys()) + end = set(kwarg_end.keys()) + undo = exist - end + add = end - exist + update = end & exist + + for key in undo: + if key in dic_command: + rlist.append('undo ' + dic_command[key]) + for key in add: + if key in dic_command: + rlist.append(dic_command[key] + ' ' + kwarg_end[key]) + for key in update: + if kwarg_exist[key] != kwarg_end[key] and key in dic_command: + if kwarg_exist[key] == 'true' and kwarg_end[key] == 'false': + rlist.append('undo ' + dic_command[key]) + elif kwarg_exist[key] == 'false' and kwarg_end[key] == 'true': + rlist.append(dic_command[key]) + else: + rlist.append(dic_command[key] + ' ' + kwarg_end[key].lower()) + return rlist + + +class Lacp(object): + """ + Manages Eth-Trunk interfaces LACP. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.trunk_id = self.module.params['trunk_id'] + self.mode = self.module.params['mode'] + self.param = dict() + + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """ init AnsibleModule """ + + self.module = AnsibleModule( + argument_spec=self.spec, + mutually_exclusive=[['trunk_id', 'global_priority']], + required_one_of=[['trunk_id', 'global_priority']], + supports_check_mode=True) + + def check_params(self): + """check module params """ + for key in self.module.params.keys(): + if key in LACP.keys() and self.module.params[key] is not None: + self.param[key] = self.module.params[key] + if isinstance(self.module.params[key], bool): + self.param[key] = str(self.module.params[key]).lower() + msg = check_param(self.param) + if msg != 'ok': + self.module.fail_json(msg=msg) + + def get_existing(self): + """get existing""" + xml_str = bulid_xml(self.param) + xml = get_nc_config(self.module, xml_str) + return xml_to_dict(xml) + + def get_proposed(self): + """get proposed""" + proposed = dict(state=self.state) + proposed.update(self.param) + return proposed + + def get_end_state(self): + """ get end_state""" + xml_str = bulid_xml(self.param) + xml = get_nc_config(self.module, xml_str) + return xml_to_dict(xml) + + def work(self): + """worker""" + + self.check_params() + existing = self.get_existing() + proposed = self.get_proposed() + + # deal present or absent + if self.state == "present": + operation = 'merge' + else: + operation = 'delete' + + xml_str = bulid_xml(self.param, operation=operation) + set_nc_config(self.module, xml_str) + end_state = self.get_end_state() + + self.results['proposed'] = proposed + self.results['existing'] = existing + self.results['end_state'] = end_state + updates_cmd = compare_config(self.module, existing, end_state) + self.results['updates'] = updates_cmd + if updates_cmd: + self.results['changed'] = True + else: + self.results['changed'] = False + + self.module.exit_json(**self.results) + + +def main(): + + argument_spec = dict( + mode=dict(required=False, + choices=['Manual', 'Dynamic', 'Static'], + type='str'), + trunk_id=dict(required=False, type='int'), + preempt_enable=dict(required=False, type='bool'), + state_flapping=dict(required=False, type='bool'), + port_id_extension_enable=dict(required=False, type='bool'), + unexpected_mac_disable=dict(required=False, type='bool'), + system_id=dict(required=False, type='str'), + timeout_type=dict(required=False, type='str', choices=['Slow', 'Fast']), + fast_timeout=dict(required=False, type='int'), + mixed_rate_link_enable=dict(required=False, type='bool'), + preempt_delay=dict(required=False, type='int'), + collector_delay=dict(required=False, type='int'), + max_active_linknumber=dict(required=False, type='int'), + select=dict(required=False, type='str', choices=['Speed', 'Prority']), + priority=dict(required=False, type='int'), + global_priority=dict(required=False, type='int'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + + module = Lacp(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_link_status.py b/plugins/modules/network/cloudengine/ce_link_status.py new file mode 100644 index 0000000000..c1c58e8a93 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_link_status.py @@ -0,0 +1,568 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: ce_link_status +short_description: Get interface link status on HUAWEI CloudEngine switches. +description: + - Get interface link status on HUAWEI CloudEngine switches. +author: + - Zhijin Zhou (@QijunPan) +notes: + - Current physical state shows an interface's physical status. + - Current link state shows an interface's link layer protocol status. + - Current IPv4 state shows an interface's IPv4 protocol status. + - Current IPv6 state shows an interface's IPv6 protocol status. + - Inbound octets(bytes) shows the number of bytes that an interface received. + - Inbound unicast(pkts) shows the number of unicast packets that an interface received. + - Inbound multicast(pkts) shows the number of multicast packets that an interface received. + - Inbound broadcast(pkts) shows the number of broadcast packets that an interface received. + - Inbound error(pkts) shows the number of error packets that an interface received. + - Inbound drop(pkts) shows the total number of packets that were sent to the interface but dropped by an interface. + - Inbound rate(byte/sec) shows the rate at which an interface receives bytes within an interval. + - Inbound rate(pkts/sec) shows the rate at which an interface receives packets within an interval. + - Outbound octets(bytes) shows the number of the bytes that an interface sent. + - Outbound unicast(pkts) shows the number of unicast packets that an interface sent. + - Outbound multicast(pkts) shows the number of multicast packets that an interface sent. + - Outbound broadcast(pkts) shows the number of broadcast packets that an interface sent. + - Outbound error(pkts) shows the total number of packets that an interface sent but dropped by the remote interface. + - Outbound drop(pkts) shows the number of dropped packets that an interface sent. + - Outbound rate(byte/sec) shows the rate at which an interface sends bytes within an interval. + - Outbound rate(pkts/sec) shows the rate at which an interface sends packets within an interval. + - Speed shows the rate for an Ethernet interface. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - For the interface parameter, you can enter C(all) to display information about all interfaces, + an interface type such as C(40GE) to display information about interfaces of the specified type, + or full name of an interface such as C(40GE1/0/22) or C(vlanif10) + to display information about the specific interface. + required: true +''' + +EXAMPLES = ''' + +- name: Link status test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Get specified interface link status information + ce_link_status: + interface: 40GE1/0/1 + provider: "{{ cli }}" + + - name: Get specified interface type link status information + ce_link_status: + interface: 40GE + provider: "{{ cli }}" + + - name: Get all interfaces link status information + ce_link_status: + interface: all + provider: "{{ cli }}" +''' + +RETURN = ''' +result: + description: Interface link status information + returned: always + type: dict + sample: { + "40ge2/0/8": { + "Current IPv4 state": "down", + "Current IPv6 state": "down", + "Current link state": "up", + "Current physical state": "up", + "Inbound broadcast(pkts)": "0", + "Inbound drop(pkts)": "0", + "Inbound error(pkts)": "0", + "Inbound multicast(pkts)": "20151", + "Inbound octets(bytes)": "7314813", + "Inbound rate(byte/sec)": "11", + "Inbound rate(pkts/sec)": "0", + "Inbound unicast(pkts)": "0", + "Outbound broadcast(pkts)": "1", + "Outbound drop(pkts)": "0", + "Outbound error(pkts)": "0", + "Outbound multicast(pkts)": "20152", + "Outbound octets(bytes)": "7235021", + "Outbound rate(byte/sec)": "11", + "Outbound rate(pkts/sec)": "0", + "Outbound unicast(pkts)": "0", + "Speed": "40GE" + } + } +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, get_nc_next + +CE_NC_GET_PORT_SPEED = """ + + + + + %s + + + + + + + +""" + +CE_NC_GET_INT_STATISTICS = """ + + + + + %s + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + +INTERFACE_ALL = 1 +INTERFACE_TYPE = 2 +INTERFACE_FULL_NAME = 3 + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + if interface.upper().startswith('GE'): + return 'ge' + elif interface.upper().startswith('10GE'): + return '10ge' + elif interface.upper().startswith('25GE'): + return '25ge' + elif interface.upper().startswith('4X10GE'): + return '4x10ge' + elif interface.upper().startswith('40GE'): + return '40ge' + elif interface.upper().startswith('100GE'): + return '100ge' + elif interface.upper().startswith('VLANIF'): + return 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + return 'loopback' + elif interface.upper().startswith('METH'): + return 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + return 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + return 'vbdif' + elif interface.upper().startswith('NVE'): + return 'nve' + elif interface.upper().startswith('TUNNEL'): + return 'tunnel' + elif interface.upper().startswith('ETHERNET'): + return 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + return 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + return 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + return 'stack-Port' + elif interface.upper().startswith('NULL'): + return 'null' + else: + return None + + +def is_ethernet_port(interface): + """Judge whether it is ethernet port""" + + ethernet_port = ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'meth'] + if_type = get_interface_type(interface) + if if_type in ethernet_port: + return True + return False + + +class LinkStatus(object): + """Get interface link status information""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # interface name + self.interface = self.module.params['interface'] + self.interface = self.interface.replace(' ', '').lower() + self.param_type = None + self.if_type = None + + # state + self.results = dict() + self.result = dict() + + def check_params(self): + """Check all input params""" + + if not self.interface: + self.module.fail_json(msg='Error: Interface name cannot be empty.') + + if self.interface and self.interface != 'all': + if not self.if_type: + self.module.fail_json( + msg='Error: Interface name of %s is error.' % self.interface) + + def init_module(self): + """Init module object""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def show_result(self): + """Show result""" + + self.results['result'] = self.result + + self.module.exit_json(**self.results) + + def get_intf_dynamic_info(self, dyn_info, intf_name): + """Get interface dynamic information""" + + if not intf_name: + return + + if dyn_info: + for eles in dyn_info: + if eles.tag in ["ifPhyStatus", "ifV4State", "ifV6State", "ifLinkStatus"]: + if eles.tag == "ifPhyStatus": + self.result[intf_name][ + 'Current physical state'] = eles.text + elif eles.tag == "ifLinkStatus": + self.result[intf_name][ + 'Current link state'] = eles.text + elif eles.tag == "ifV4State": + self.result[intf_name][ + 'Current IPv4 state'] = eles.text + elif eles.tag == "ifV6State": + self.result[intf_name][ + 'Current IPv6 state'] = eles.text + + def get_intf_statistics_info(self, stat_info, intf_name): + """Get interface statistics information""" + + if not intf_name: + return + + if_type = get_interface_type(intf_name) + if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \ + if_type == 'vbdif' or if_type == 'vlanif': + return + + if stat_info: + for eles in stat_info: + if eles.tag in ["receiveByte", "sendByte", "rcvUniPacket", "rcvMutiPacket", "rcvBroadPacket", + "sendUniPacket", "sendMutiPacket", "sendBroadPacket", "rcvErrorPacket", + "rcvDropPacket", "sendErrorPacket", "sendDropPacket"]: + if eles.tag == "receiveByte": + self.result[intf_name][ + 'Inbound octets(bytes)'] = eles.text + elif eles.tag == "rcvUniPacket": + self.result[intf_name][ + 'Inbound unicast(pkts)'] = eles.text + elif eles.tag == "rcvMutiPacket": + self.result[intf_name][ + 'Inbound multicast(pkts)'] = eles.text + elif eles.tag == "rcvBroadPacket": + self.result[intf_name][ + 'Inbound broadcast(pkts)'] = eles.text + elif eles.tag == "rcvErrorPacket": + self.result[intf_name][ + 'Inbound error(pkts)'] = eles.text + elif eles.tag == "rcvDropPacket": + self.result[intf_name][ + 'Inbound drop(pkts)'] = eles.text + elif eles.tag == "sendByte": + self.result[intf_name][ + 'Outbound octets(bytes)'] = eles.text + elif eles.tag == "sendUniPacket": + self.result[intf_name][ + 'Outbound unicast(pkts)'] = eles.text + elif eles.tag == "sendMutiPacket": + self.result[intf_name][ + 'Outbound multicast(pkts)'] = eles.text + elif eles.tag == "sendBroadPacket": + self.result[intf_name][ + 'Outbound broadcast(pkts)'] = eles.text + elif eles.tag == "sendErrorPacket": + self.result[intf_name][ + 'Outbound error(pkts)'] = eles.text + elif eles.tag == "sendDropPacket": + self.result[intf_name][ + 'Outbound drop(pkts)'] = eles.text + + def get_intf_cleared_stat(self, clr_stat, intf_name): + """Get interface cleared state information""" + + if not intf_name: + return + + if_type = get_interface_type(intf_name) + if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \ + if_type == 'vbdif' or if_type == 'vlanif': + return + + if clr_stat: + for eles in clr_stat: + if eles.tag in ["inByteRate", "inPacketRate", "outByteRate", "outPacketRate"]: + if eles.tag == "inByteRate": + self.result[intf_name][ + 'Inbound rate(byte/sec)'] = eles.text + elif eles.tag == "inPacketRate": + self.result[intf_name][ + 'Inbound rate(pkts/sec)'] = eles.text + elif eles.tag == "outByteRate": + self.result[intf_name][ + 'Outbound rate(byte/sec)'] = eles.text + elif eles.tag == "outPacketRate": + self.result[intf_name][ + 'Outbound rate(pkts/sec)'] = eles.text + + def get_all_interface_info(self, intf_type=None): + """Get interface information by all or by interface type""" + + xml_str = CE_NC_GET_INT_STATISTICS % '' + con_obj = get_nc_next(self.module, xml_str) + if "" in con_obj: + return + + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get link status information + root = ElementTree.fromstring(xml_str) + intfs_info = root.findall("ifm/interfaces/interface") + if not intfs_info: + return + + intf_name = '' + flag = False + for eles in intfs_info: + if eles.tag == "interface": + for ele in eles: + if ele.tag in ["ifName", "ifDynamicInfo", "ifStatistics", "ifClearedStat"]: + if ele.tag == "ifName": + intf_name = ele.text.lower() + if intf_type: + if get_interface_type(intf_name) != intf_type.lower(): + break + else: + flag = True + self.init_interface_data(intf_name) + if is_ethernet_port(intf_name): + self.get_port_info(intf_name) + if ele.tag == "ifDynamicInfo": + self.get_intf_dynamic_info(ele, intf_name) + elif ele.tag == "ifStatistics": + self.get_intf_statistics_info(ele, intf_name) + elif ele.tag == "ifClearedStat": + self.get_intf_cleared_stat(ele, intf_name) + if intf_type and not flag: + self.module.fail_json( + msg='Error: %s interface type does not exist.' % intf_type.upper()) + + def get_interface_info(self): + """Get interface information""" + + xml_str = CE_NC_GET_INT_STATISTICS % self.interface.upper() + con_obj = get_nc_config(self.module, xml_str) + if "" in con_obj: + self.module.fail_json( + msg='Error: %s interface does not exist.' % self.interface.upper()) + return + + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get link status information + root = ElementTree.fromstring(xml_str) + intf_info = root.find("ifm/interfaces/interface") + if intf_info: + for eles in intf_info: + if eles.tag in ["ifDynamicInfo", "ifStatistics", "ifClearedStat"]: + if eles.tag == "ifDynamicInfo": + self.get_intf_dynamic_info(eles, self.interface) + elif eles.tag == "ifStatistics": + self.get_intf_statistics_info(eles, self.interface) + elif eles.tag == "ifClearedStat": + self.get_intf_cleared_stat(eles, self.interface) + + def init_interface_data(self, intf_name): + """Init interface data""" + + # init link status data + self.result[intf_name] = dict() + self.result[intf_name]['Current physical state'] = 'down' + self.result[intf_name]['Current link state'] = 'down' + self.result[intf_name]['Current IPv4 state'] = 'down' + self.result[intf_name]['Current IPv6 state'] = 'down' + self.result[intf_name]['Inbound octets(bytes)'] = '--' + self.result[intf_name]['Inbound unicast(pkts)'] = '--' + self.result[intf_name]['Inbound multicast(pkts)'] = '--' + self.result[intf_name]['Inbound broadcast(pkts)'] = '--' + self.result[intf_name]['Inbound error(pkts)'] = '--' + self.result[intf_name]['Inbound drop(pkts)'] = '--' + self.result[intf_name]['Inbound rate(byte/sec)'] = '--' + self.result[intf_name]['Inbound rate(pkts/sec)'] = '--' + self.result[intf_name]['Outbound octets(bytes)'] = '--' + self.result[intf_name]['Outbound unicast(pkts)'] = '--' + self.result[intf_name]['Outbound multicast(pkts)'] = '--' + self.result[intf_name]['Outbound broadcast(pkts)'] = '--' + self.result[intf_name]['Outbound error(pkts)'] = '--' + self.result[intf_name]['Outbound drop(pkts)'] = '--' + self.result[intf_name]['Outbound rate(byte/sec)'] = '--' + self.result[intf_name]['Outbound rate(pkts/sec)'] = '--' + self.result[intf_name]['Speed'] = '--' + + def get_port_info(self, interface): + """Get port information""" + + if_type = get_interface_type(interface) + if if_type == 'meth': + xml_str = CE_NC_GET_PORT_SPEED % interface.lower().replace('meth', 'MEth') + else: + xml_str = CE_NC_GET_PORT_SPEED % interface.upper() + con_obj = get_nc_config(self.module, xml_str) + if "" in con_obj: + return + + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get link status information + root = ElementTree.fromstring(xml_str) + port_info = root.find("devm/ports/port") + if port_info: + for eles in port_info: + if eles.tag == "ethernetPort": + for ele in eles: + if ele.tag == 'speed': + self.result[interface]['Speed'] = ele.text + + def get_link_status(self): + """Get link status information""" + + if self.param_type == INTERFACE_FULL_NAME: + self.init_interface_data(self.interface) + self.get_interface_info() + if is_ethernet_port(self.interface): + self.get_port_info(self.interface) + elif self.param_type == INTERFACE_TYPE: + self.get_all_interface_info(self.interface) + else: + self.get_all_interface_info() + + def get_intf_param_type(self): + """Get the type of input interface parameter""" + + if self.interface == 'all': + self.param_type = INTERFACE_ALL + return + + if self.if_type == self.interface: + self.param_type = INTERFACE_TYPE + return + + self.param_type = INTERFACE_FULL_NAME + + def work(self): + """Worker""" + + self.if_type = get_interface_type(self.interface) + self.check_params() + self.get_intf_param_type() + self.get_link_status() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + interface=dict(required=True, type='str'), + ) + argument_spec.update(ce_argument_spec) + linkstatus_obj = LinkStatus(argument_spec) + linkstatus_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_lldp.py b/plugins/modules/network/cloudengine/ce_lldp.py new file mode 100644 index 0000000000..3502ed3ee6 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_lldp.py @@ -0,0 +1,791 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: ce_lldp +short_description: Manages LLDP configuration on HUAWEI CloudEngine switches. +description: + - Manages LLDP configuration on HUAWEI CloudEngine switches. +author: + - xuxiaowei0512 (@CloudEngine-Ansible) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + lldpenable: + description: + - Set global LLDP enable state. + required: false + choices: ['enabled', 'disabled'] + type: str + mdnstatus: + description: + - Set global MDN enable state. + required: false + choices: ['rxOnly', 'disabled'] + type: str + interval: + description: + - Frequency at which LLDP advertisements are sent (in seconds). + required: false + type: int + hold_multiplier: + description: + - Time multiplier for device information in neighbor devices. + required: false + type: int + restart_delay: + description: + - Specifies the delay time of the interface LLDP module from disabled state to re enable. + required: false + type: int + transmit_delay: + description: + - Delay time for sending LLDP messages. + required: false + type: int + notification_interval: + description: + - Suppression time for sending LLDP alarm. + required: false + type: int + fast_count: + description: + - The number of LLDP messages sent to the neighbor nodes by the specified device. + required: false + type: int + mdn_notification_interval: + description: + - Delay time for sending MDN neighbor information change alarm. + required: false + type: int + management_address: + description: + - The management IP address of LLDP. + required: false + default: null + type: str + bind_name: + description: + - Binding interface name. + required: false + default: null + type: str + state: + description: + - Manage the state of the resource. + required: false + default: present + type: str + choices: ['present','absent'] +''' + +EXAMPLES = ''' + - name: "Configure global LLDP enable state" + ce_lldp: + lldpenable: enabled + + - name: "Configure global MDN enable state" + ce_lldp: + mdnstatus: rxOnly + + - name: "Configure LLDP transmit interval and ensure global LLDP state is already enabled" + ce_lldp: + enable: enable + interval: 32 + + - name: "Configure LLDP transmit multiplier hold and ensure global LLDP state is already enabled" + ce_lldp: + enable: enable + hold_multiplier: 5 + + - name: "Configure the delay time of the interface LLDP module from disabled state to re enable" + ce_lldp: + enable: enable + restart_delay: 3 + + - name: "Reset the delay time for sending LLDP messages" + ce_lldp: + enable: enable + transmit_delay: 4 + + - name: "Configure device to send neighbor device information change alarm delay time" + ce_lldp: + lldpenable: enabled + notification_interval: 6 + + - name: "Configure the number of LLDP messages sent to the neighbor nodes by the specified device" + ce_lldp: + enable: enable + fast_count: 5 + + - name: "Configure the delay time for sending MDN neighbor information change alarm" + ce_lldp: + enable: enable + mdn_notification_interval: 6 + - name: "Configuring the management IP address of LLDP" + ce_lldp: + enable: enable + management_address: 10.1.0.1 + + - name: "Configuring LLDP to manage the binding relationship between IP addresses and interfaces" + ce_lldp: + enable: enable + bind_name: LoopBack2 +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "lldpenable": "enabled", + "mdnstatus": "rxOnly", + "interval": "32", + "hold_multiplier": "5", + "restart_delay": "3", + "transmit_delay": "4", + "notification_interval": "6", + "fast_count": "5", + "mdn_notification_interval": "6", + "management_address": "10.1.0.1", + "bind_name": "LoopBack2", + "state": "present" + } +existing: + description: k/v pairs of existing global LLDP configuration. + returned: always + type: dict + sample: { + "lldpenable": "disabled", + "mdnstatus": "disabled" + } +end_state: + description: k/v pairs of global LLDP configuration after module execution. + returned: always + type: dict + sample: { + "lldpenable": "enabled", + "mdnstatus": "rxOnly", + "interval": "32", + "hold_multiplier": "5", + "restart_delay": "3", + "transmit_delay": "4", + "notification_interval": "6", + "fast_count": "5", + "mdn_notification_interval": "6", + "management_address": "10.1.0.1", + "bind_name": "LoopBack2" + } +updates: + description: command sent to the device + returned: always + type: list + sample: [ + "lldp enable", + "lldp mdn enable", + "lldp transmit interval 32", + "lldp transmit multiplier 5", + "lldp restart 3", + "lldp transmit delay 4", + "lldp trap-interval 6", + "lldp fast-count 5", + "lldp mdn trap-interval 6", + "lldp management-address 10.1.0.1", + "lldp management-address bind interface LoopBack 2" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import copy +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import set_nc_config, get_nc_config + +CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG = """ + + + + + + + + +""" + +CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """ + + + + %s + + + +""" + +CE_NC_MERGE_GLOBA_MDNENABLE_CONFIG = """ + + + + %s + + + +""" + +CE_NC_GET_GLOBAL_LLDP_CONFIG = """ + + + + + + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER = """ + + + + +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_INTERVAL = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HOLD_MULTIPLIER = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_RESTART_DELAY = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TRANSMIT_DELAY = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_NOTIFICATION_INTERVAL = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_FAST_COUNT = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_MDN_NOTIFICATION_INTERVAL = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_MANAGEMENT_ADDRESS = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_BIND_NAME = """ + %s +""" + +CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL = """ + + + + +""" + + +class Lldp(object): + """Manage global lldp enable configuration""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + self.lldpenable = self.module.params['lldpenable'] or None + self.interval = self.module.params['interval'] or None + self.mdnstatus = self.module.params['mdnstatus'] or None + self.hold_multiplier = self.module.params['hold_multiplier'] or None + self.restart_delay = self.module.params['restart_delay'] or None + self.transmit_delay = self.module.params['transmit_delay'] or None + self.notification_interval = self.module.params['notification_interval'] or None + self.fast_count = self.module.params['fast_count'] or None + self.mdn_notification_interval = self.module.params['mdn_notification_interval'] or None + self.management_address = self.module.params['management_address'] + self.bind_name = self.module.params['bind_name'] + self.state = self.module.params['state'] + self.lldp_conf = dict() + self.conf_exsit = False + self.conf_exsit_lldp = False + self.enable_flag = 0 + self.check_params() + self.existing_state_value = dict() + self.existing_end_state_value = dict() + self.changed = False + self.proposed_changed = dict() + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def is_valid_v4addr(self): + """check if ipv4 addr is valid""" + if self.management_address.find('.') != -1: + addr_list = self.management_address.split('.') + if self.management_address == "0.0.0.0": + self.module.fail_json(msg='Error: The management address is 0.0.0.0 .') + if len(addr_list) != 4: + self.module.fail_json(msg='Error: Invalid IPV4 address.') + for each_num in addr_list: + each_num_tmp = str(each_num) + if not each_num_tmp.isdigit(): + self.module.fail_json(msg='Error: The ip address is not digit.') + if (int(each_num) > 255) or (int(each_num) < 0): + self.module.fail_json( + msg='Error: The value of ip address is out of [0 - 255].') + else: + self.module.fail_json(msg='Error: Invalid IP address.') + + def check_params(self): + """Check all input params""" + + if self.interval: + if int(self.interval) < 5 or int(self.interval) > 32768: + self.module.fail_json( + msg='Error: The value of interval is out of [5 - 32768].') + + if self.hold_multiplier: + if int(self.hold_multiplier) < 2 or int(self.hold_multiplier) > 10: + self.module.fail_json( + msg='Error: The value of hold_multiplier is out of [2 - 10].') + + if self.restart_delay: + if int(self.restart_delay) < 1 or int(self.restart_delay) > 10: + self.module.fail_json( + msg='Error: The value of restart_delay is out of [1 - 10].') + + if self.transmit_delay: + if int(self.transmit_delay) < 1 or int(self.transmit_delay) > 8192: + self.module.fail_json( + msg='Error: The value of transmit_delay is out of [1 - 8192].') + + if self.notification_interval: + if int(self.notification_interval) < 5 or int(self.notification_interval) > 3600: + self.module.fail_json( + msg='Error: The value of notification_interval is out of [5 - 3600].') + + if self.fast_count: + if int(self.fast_count) < 1 or int(self.fast_count) > 8: + self.module.fail_json( + msg='Error: The value of fast_count is out of [1 - 8].') + + if self.mdn_notification_interval: + if int(self.mdn_notification_interval) < 5 or int(self.mdn_notification_interval) > 3600: + self.module.fail_json( + msg='Error: The value of mdn_notification_interval is out of [5 - 3600].') + + if self.management_address: + self.is_valid_v4addr() + + if self.bind_name: + if (len(self.bind_name) < 1) or (len(self.bind_name) > 63): + self.module.fail_json( + msg='Error: Bind_name length is between 1 and 63.') + + def init_module(self): + """Init module object""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def config_lldp(self): + """Configure lldp enabled and mdn enabled parameters""" + + if self.state == 'present': + if (self.enable_flag == 1 and self.lldpenable == 'enabled') and not self.conf_exsit: + if self.mdnstatus: + xml_str = CE_NC_MERGE_GLOBA_MDNENABLE_CONFIG % self.mdnstatus + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "MDN_ENABLE_CONFIG") + + if self.lldpenable == 'enabled' and not self.conf_exsit: + xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_ENABLE_CONFIG") + + if self.mdnstatus: + xml_str = CE_NC_MERGE_GLOBA_MDNENABLE_CONFIG % self.mdnstatus + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "MDN_ENABLE_CONFIG") + + if (self.enable_flag == 1) and not self.conf_exsit: + if self.mdnstatus: + xml_str = CE_NC_MERGE_GLOBA_MDNENABLE_CONFIG % self.mdnstatus + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "MDN_ENABLE_CONFIG") + + if (self.lldpenable == 'enabled' or self.enable_flag == 1) and not self.conf_exsit_lldp: + if self.hold_multiplier: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HOLD_MULTIPLIER % self.hold_multiplier) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.interval: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_INTERVAL % self.interval) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.restart_delay: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_RESTART_DELAY % self.restart_delay) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.transmit_delay: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TRANSMIT_DELAY % self.transmit_delay) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.notification_interval: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_NOTIFICATION_INTERVAL % self.notification_interval) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.fast_count: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_FAST_COUNT % self.fast_count) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.mdn_notification_interval: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_MDN_NOTIFICATION_INTERVAL % self.mdn_notification_interval) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.management_address: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_MANAGEMENT_ADDRESS % self.management_address) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.bind_name: + xml_str = CE_NC_MERGE_GLOBAL_LLDP_CONFIG_HEADER + \ + (CE_NC_MERGE_GLOBAL_LLDP_CONFIG_BIND_NAME % self.bind_name) + \ + CE_NC_MERGE_GLOBAL_LLDP_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_CONFIG_INTERVAL") + + if self.lldpenable == 'disabled' and not self.conf_exsit: + xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_DISABLE_CONFIG") + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def get_lldp_exist_config(self): + """Get lldp existed configure""" + + lldp_config = list() + lldp_dict = dict() + + conf_enable_str = CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG + conf_enable_obj = get_nc_config(self.module, conf_enable_str) + + xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get lldp enable config info + root_enable = ElementTree.fromstring(xml_enable_str) + ntpsite_enable = root_enable.findall("lldp/lldpSys") + for nexthop_enable in ntpsite_enable: + for ele_enable in nexthop_enable: + if ele_enable.tag in ["lldpEnable", "mdnStatus"]: + lldp_dict[ele_enable.tag] = ele_enable.text + + if self.state == "present": + cur_lldp_cfg = dict(lldpenable=lldp_dict['lldpEnable'], mdnstatus=lldp_dict['mdnStatus']) + exp_lldp_cfg = dict(lldpenable=self.lldpenable, mdnstatus=self.mdnstatus) + if lldp_dict['lldpEnable'] == 'enabled': + self.enable_flag = 1 + if cur_lldp_cfg == exp_lldp_cfg: + self.conf_exsit = True + lldp_config.append(dict(lldpenable=lldp_dict['lldpEnable'], mdnstatus=lldp_dict['mdnStatus'])) + + conf_str = CE_NC_GET_GLOBAL_LLDP_CONFIG + conf_obj = get_nc_config(self.module, conf_str) + if "" in conf_obj: + pass + + else: + xml_str = conf_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get all ntp config info + root = ElementTree.fromstring(xml_str) + ntpsite = root.findall("lldp/lldpSys/lldpSysParameter") + for nexthop in ntpsite: + for ele in nexthop: + if ele.tag in ["messageTxInterval", "messageTxHoldMultiplier", "reinitDelay", "txDelay", + "notificationInterval", "fastMessageCount", "mdnNotificationInterval", + "configManAddr", "bindifName"]: + lldp_dict[ele.tag] = ele.text + + if self.state == "present": + cur_ntp_cfg = dict(interval=lldp_dict['messageTxInterval'], + hold_multiplier=lldp_dict['messageTxHoldMultiplier'], + restart_delay=lldp_dict['reinitDelay'], + transmit_delay=lldp_dict['txDelay'], + notification_interval=lldp_dict['notificationInterval'], + fast_count=lldp_dict['fastMessageCount'], + mdn_notification_interval=lldp_dict['mdnNotificationInterval'], + management_address=lldp_dict['configManAddr'], + bind_name=lldp_dict['bindifName']) + + exp_ntp_cfg = dict(interval=self.interval, hold_multiplier=self.hold_multiplier, + restart_delay=self.restart_delay, transmit_delay=self.transmit_delay, + notification_interval=self.notification_interval, + fast_count=self.fast_count, mdn_notification_interval=self.mdn_notification_interval, + management_address=self.management_address, bind_name=self.bind_name) + + if cur_ntp_cfg == exp_ntp_cfg: + self.conf_exsit_lldp = True + + lldp_config.append(dict(interval=lldp_dict['messageTxInterval'], + hold_multiplier=lldp_dict['messageTxHoldMultiplier'], + restart_delay=lldp_dict['reinitDelay'], transmit_delay=lldp_dict['txDelay'], + notification_interval=lldp_dict['notificationInterval'], + fast_count=lldp_dict['fastMessageCount'], + mdn_notification_interval=lldp_dict['mdnNotificationInterval'], + management_address=lldp_dict['configManAddr'], + bind_name=lldp_dict['bindifName'])) + + tmp_dict = dict() + str_1 = str(lldp_config) + temp_1 = str_1.replace('[', '').replace(']', '').replace('{', '').replace('}', '').replace('\'', '') + if temp_1: + tmp_2 = temp_1.split(',') + for i in tmp_2: + tmp_value = re.match(r'(.*):(.*)', i) + key_tmp = tmp_value.group(1) + key_value = tmp_value.group(2) + tmp_dict[key_tmp] = key_value + return tmp_dict + + def get_existing(self): + """Get existing info""" + + self.existing = self.get_lldp_exist_config() + + def get_proposed(self): + """Get proposed info""" + + if self.enable_flag == 1: + if self.lldpenable == 'enabled': + self.proposed = dict(lldpenable=self.lldpenable) + if self.mdnstatus: + self.proposed = dict(mdnstatus=self.mdnstatus) + elif self.lldpenable == 'disabled': + self.proposed = dict(lldpenable=self.lldpenable) + self.changed = True + else: + if self.mdnstatus: + self.proposed = dict(mdnstatus=self.mdnstatus) + else: + if self.lldpenable == 'enabled': + self.proposed = dict(lldpenable=self.lldpenable) + self.changed = True + if self.mdnstatus: + self.proposed = dict(mdnstatus=self.mdnstatus) + if self.enable_flag == 1 or self.lldpenable == 'enabled': + if self.interval: + self.proposed = dict(interval=self.interval) + if self.hold_multiplier: + self.proposed = dict(hold_multiplier=self.hold_multiplier) + if self.restart_delay: + self.proposed = dict(restart_delay=self.restart_delay) + if self.transmit_delay: + self.proposed = dict(transmit_delay=self.transmit_delay) + if self.notification_interval: + self.proposed = dict(notification_interval=self.notification_interval) + if self.fast_count: + self.proposed = dict(fast_count=self.fast_count) + if self.mdn_notification_interval: + self.proposed = dict(mdn_notification_interval=self.mdn_notification_interval) + if self.management_address: + self.proposed = dict(management_address=self.management_address) + if self.bind_name: + self.proposed = dict(bind_name=self.bind_name) + + def get_end_state(self): + """Get end state info""" + + self.end_state = self.get_lldp_exist_config() + existing_key_list = self.existing.keys() + end_state_key_list = self.end_state.keys() + for i in end_state_key_list: + for j in existing_key_list: + if i == j and self.existing[i] != self.end_state[j]: + self.changed = True + + def get_update_cmd(self): + """Get updated commands""" + + if self.conf_exsit and self.conf_exsit_lldp: + return + + if self.state == "present": + if self.lldpenable == "enabled": + self.updates_cmd.append("lldp enable") + + if self.mdnstatus: + self.updates_cmd.append("lldp mdn enable") + if self.mdnstatus == "rxOnly": + self.updates_cmd.append("lldp mdn enable") + else: + self.updates_cmd.append("undo lldp mdn enable") + if self.interval: + self.updates_cmd.append("lldp transmit interval %s" % self.interval) + if self.hold_multiplier: + self.updates_cmd.append("lldp transmit multiplier %s" % self.hold_multiplier) + if self.restart_delay: + self.updates_cmd.append("lldp restart %s" % self.restart_delay) + if self.transmit_delay: + self.updates_cmd.append("lldp transmit delay %s" % self.transmit_delay) + if self.notification_interval: + self.updates_cmd.append("lldp trap-interval %s" % self.notification_interval) + if self.fast_count: + self.updates_cmd.append("lldp fast-count %s" % self.fast_count) + if self.mdn_notification_interval: + self.updates_cmd.append("lldp mdn trap-interval %s" % self.mdn_notification_interval) + if self.management_address: + self.updates_cmd.append("lldp management-address %s" % self.management_address) + if self.bind_name: + self.updates_cmd.append("lldp management-address bind interface %s" % self.bind_name) + elif self.lldpenable == "disabled": + self.updates_cmd.append("undo lldp enable") + else: + if self.enable_flag == 1: + if self.mdnstatus: + if self.mdnstatus == "rxOnly": + self.updates_cmd.append("lldp mdn enable") + else: + self.updates_cmd.append("undo lldp mdn enable") + if self.interval: + self.updates_cmd.append("lldp transmit interval %s" % self.interval) + if self.hold_multiplier: + self.updates_cmd.append("lldp transmit multiplier %s" % self.hold_multiplier) + if self.restart_delay: + self.updates_cmd.append("lldp restart %s" % self.restart_delay) + if self.transmit_delay: + self.updates_cmd.append("lldp transmit delay %s" % self.transmit_delay) + if self.notification_interval: + self.updates_cmd.append("lldp trap-interval %s" % self.notification_interval) + if self.fast_count: + self.updates_cmd.append("lldp fast-count %s" % self.fast_count) + if self.mdn_notification_interval: + self.updates_cmd.append("lldp mdn trap-interval %s" % self.mdn_notification_interval) + if self.management_address: + self.updates_cmd.append("lldp management-address %s" % self.management_address) + if self.bind_name: + self.updates_cmd.append("lldp management-address bind interface %s" % self.bind_name) + + def work(self): + """Execute task""" + self.check_params() + self.get_existing() + self.get_proposed() + self.config_lldp() + self.get_update_cmd() + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + lldpenable=dict(required=False, choices=['enabled', 'disabled']), + mdnstatus=dict(required=False, choices=['rxOnly', 'disabled']), + interval=dict(required=False, type='int'), + hold_multiplier=dict(required=False, type='int'), + restart_delay=dict(required=False, type='int'), + transmit_delay=dict(required=False, type='int'), + notification_interval=dict(required=False, type='int'), + fast_count=dict(required=False, type='int'), + mdn_notification_interval=dict(required=False, type='int'), + management_address=dict(required=False, type='str'), + bind_name=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + lldp_obj = Lldp(argument_spec) + lldp_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_lldp_interface.py b/plugins/modules/network/cloudengine/ce_lldp_interface.py new file mode 100644 index 0000000000..67a2355880 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_lldp_interface.py @@ -0,0 +1,1384 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ce_lldp_interface +short_description: Manages INTERFACE LLDP configuration on HUAWEI CloudEngine switches. +description: + - Manages INTERFACE LLDP configuration on HUAWEI CloudEngine switches. +author: xuxiaowei0512 (@CloudEngine-Ansible) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + lldpenable: + description: + - Set global LLDP enable state. + type: str + choices: ['enabled', 'disabled'] + function_lldp_interface_flag: + description: + - Used to distinguish between command line functions. + type: str + choices: ['disableINTERFACE','tlvdisableINTERFACE','tlvenableINTERFACE','intervalINTERFACE'] + type_tlv_disable: + description: + - Used to distinguish between command line functions. + type: str + choices: ['basic_tlv', 'dot3_tlv'] + type_tlv_enable: + description: + - Used to distinguish between command line functions. + type: str + choices: ['dot1_tlv','dcbx'] + lldpadminstatus: + description: + - Set interface lldp enable state. + type: str + choices: ['txOnly', 'rxOnly', 'txAndRx', 'disabled'] + ifname: + description: + - Interface name. + type: str + txinterval: + description: + - LLDP send message interval. + type: int + txprotocolvlanid: + description: + - Set tx protocol vlan id. + type: int + txvlannameid: + description: + - Set tx vlan name id. + type: int + vlannametxenable: + description: + - Set vlan name tx enable or not. + type: bool + manaddrtxenable: + description: + - Make it able to send management address TLV. + type: bool + portdesctxenable: + description: + - Enabling the ability to send a description of TLV. + type: bool + syscaptxenable: + description: + - Enable the ability to send system capabilities TLV. + type: bool + sysdesctxenable: + description: + - Enable the ability to send system description TLV. + type: bool + sysnametxenable: + description: + - Enable the ability to send system name TLV. + type: bool + portvlantxenable: + description: + - Enable port vlan tx. + type: bool + protovlantxenable: + description: + - Enable protocol vlan tx. + type: bool + protoidtxenable: + description: + - Enable the ability to send protocol identity TLV. + type: bool + macphytxenable: + description: + - Enable MAC/PHY configuration and state TLV to be sent. + type: bool + linkaggretxenable: + description: + - Enable the ability to send link aggregation TLV. + type: bool + maxframetxenable: + description: + - Enable the ability to send maximum frame length TLV. + type: bool + eee: + description: + - Enable the ability to send EEE TLV. + type: bool + dcbx: + description: + - Enable the ability to send DCBX TLV. + type: bool + state: + description: + - Manage the state of the resource. + type: str + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' + - name: "Configure global LLDP enable state" + ce_lldp_interface_interface: + lldpenable: enabled + + - name: "Configure interface lldp enable state" + ce_lldp_interface: + function_lldp_interface_flag: disableINTERFACE + ifname: 10GE1/0/1 + lldpadminstatus: rxOnly + - name: "Configure LLDP transmit interval and ensure global LLDP state is already enabled" + ce_lldp_interface: + function_lldp_interface_flag: intervalINTERFACE + ifname: 10GE1/0/1 + txinterval: 4 + + - name: "Configure basic-tlv: management-address TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: basic_tlv + ifname: 10GE1/0/1 + manaddrtxenable: true + + - name: "Configure basic-tlv: prot description TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: basic_tlv + ifname: 10GE1/0/1 + portdesctxenable: true + + - name: "Configure basic-tlv: system capabilities TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: basic_tlv + ifname: 10GE1/0/1 + syscaptxenable: true + + - name: "Configure basic-tlv: system description TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: basic_tlv + ifname: 10GE1/0/1 + sysdesctxenable: true + + - name: "Configure basic-tlv: system name TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: basic_tlv + ifname: 10GE1/0/1 + sysnametxenable: true + + - name: "TLV types that are forbidden to be published on the configuration interface, link aggregation TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: dot3_tlv + ifname: 10GE1/0/1 + linkAggreTxEnable: true + + - name: "TLV types that are forbidden to be published on the configuration interface, MAC/PHY configuration/status TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: dot3_tlv + ifname: 10GE1/0/1 + macPhyTxEnable: true + + - name: "TLV types that are forbidden to be published on the configuration interface, maximum frame size TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: dot3_tlv + ifname: 10GE1/0/1 + maxFrameTxEnable: true + + - name: "TLV types that are forbidden to be published on the configuration interface, EEE TLV" + ce_lldp_interface: + function_lldp_interface_flag: tlvdisableINTERFACE + type_tlv_disable: dot3_tlv + ifname: 10GE1/0/1 + eee: true + + - name: "Configure the interface to publish an optional DCBX TLV type " + ce_lldp_interface: + function_lldp_interface_flag: tlvenableINTERFACE + ifname: 10GE1/0/1 + type_tlv_enable: dcbx +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "lldpenable": "enabled", + "lldpadminstatus": "rxOnly", + "function_lldp_interface_flag": "tlvenableINTERFACE", + "type_tlv_enable": "dot1_tlv", + "ifname": "10GE1/0/1", + "state": "present" + } +existing: + description: k/v pairs of existing global LLDP configration + returned: always + type: dict + sample: { + "lldpenable": "disabled", + "ifname": "10GE1/0/1", + "lldpadminstatus": "txAndRx" + } +end_state: + description: k/v pairs of global DLDP configration after module execution + returned: always + type: dict + sample: { + "lldpenable": "enabled", + "lldpadminstatus": "rxOnly", + "function_lldp_interface_flag": "tlvenableINTERFACE", + "type_tlv_enable": "dot1_tlv", + "ifname": "10GE1/0/1" + } +updates: + description: command sent to the device + returned: always + type: list + sample: [ + "lldp enable", + "interface 10ge 1/0/1", + "undo lldp disable", + "lldp tlv-enable dot1-tlv vlan-name 4", + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import copy +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import set_nc_config, get_nc_config + +CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG = """ + + + + + + + +""" + +CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """ + + + + %s + + + +""" + +CE_NC_GET_INTERFACE_LLDP_CONFIG = """ + + + + + + + + + + +""" + +CE_NC_MERGE_INTERFACE_LLDP_CONFIG = """ + + + + + %s + %s + + + + +""" + +CE_NC_GET_INTERFACE_INTERVAl_CONFIG = """ + + + + + + + + + + + + +""" + +CE_NC_MERGE_INTERFACE_INTERVAl_CONFIG = """ + + + + + %s + + %s + + + + + +""" + +CE_NC_GET_INTERFACE_TLV_ENABLE_CONFIG = """ + + + + + + + + + + + + + +""" + +CE_NC_GET_INTERFACE_TLV_DISABLE_CONFIG = """ + + + + + + + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER = """ + + + + + %s + +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_PROTOIDTXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_DCBX = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MANADDRTXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_PORTDESCTXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSCAPTXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSDESCTXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSNAMETXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_LINKAGGRETXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MACPHYTXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MAXFRAMETXENABLE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_EEE = """ + %s +""" + +CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL = """ + + + + + +""" + +CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """ + + + + %s + + + +""" + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('PORT-GROUP'): + iftype = 'stack-Port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + return iftype.lower() + + +class Lldp_interface(object): + """Manage global lldp enable configuration""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + self.lldpenable = self.module.params['lldpenable'] or None + self.function_lldp_interface_flag = self.module.params['function_lldp_interface_flag'] + self.type_tlv_disable = self.module.params['type_tlv_disable'] + self.type_tlv_enable = self.module.params['type_tlv_enable'] + self.ifname = self.module.params['ifname'] + if self.function_lldp_interface_flag == 'disableINTERFACE': + self.ifname = self.module.params['ifname'] + self.lldpadminstatus = self.module.params['lldpadminstatus'] + elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + if self.type_tlv_disable == 'basic_tlv': + self.ifname = self.module.params['ifname'] + self.manaddrtxenable = self.module.params['manaddrtxenable'] + self.portdesctxenable = self.module.params['portdesctxenable'] + self.syscaptxenable = self.module.params['syscaptxenable'] + self.sysdesctxenable = self.module.params['sysdesctxenable'] + self.sysnametxenable = self.module.params['sysnametxenable'] + if self.type_tlv_disable == 'dot3_tlv': + self.ifname = self.module.params['ifname'] + self.macphytxenable = self.module.params['macphytxenable'] + self.linkaggretxenable = self.module.params['linkaggretxenable'] + self.maxframetxenable = self.module.params['maxframetxenable'] + self.eee = self.module.params['eee'] + elif self.function_lldp_interface_flag == 'tlvenableINTERFACE': + if self.type_tlv_enable == 'dot1_tlv': + self.ifname = self.module.params['ifname'] + self.protoidtxenable = self.module.params['protoidtxenable'] + if self.type_tlv_enable == 'dcbx': + self.ifname = self.module.params['ifname'] + self.dcbx = self.module.params['dcbx'] + elif self.function_lldp_interface_flag == 'intervalINTERFACE': + self.ifname = self.module.params['ifname'] + self.txinterval = self.module.params['txinterval'] + self.state = self.module.params['state'] + + self.lldp_conf = dict() + self.conf_disable_exsit = False + self.conf_interface_lldp_disable_exsit = False + self.conf_interval_exsit = False + self.conf_tlv_disable_exsit = False + self.conf_tlv_enable_exsit = False + self.enable_flag = 0 + self.check_params() + self.existing_state_value = dict() + self.existing_end_state_value = dict() + self.interface_lldp_info = list() + + # state + self.changed = False + self.proposed_changed = dict() + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def check_params(self): + """Check all input params""" + + if self.ifname: + intf_type = get_interface_type(self.ifname) + if not intf_type: + self.module.fail_json(msg='Error: ifname name of %s is error.' % self.ifname) + if (len(self.ifname) < 1) or (len(self.ifname) > 63): + self.module.fail_json(msg='Error: Ifname length is beetween 1 and 63.') + + if self.function_lldp_interface_flag == 'intervalINTERFACE': + if self.txinterval: + if int(self.txinterval) < 1 or int(self.txinterval) > 32768: + self.module.fail_json( + msg='Error: The value of txinterval is out of [1 - 32768].') + if self.ifname: + intf_type = get_interface_type(self.ifname) + if not intf_type: + self.module.fail_json( + msg='Error: ifname name of %s ' + 'is error.' % self.ifname) + if (len(self.ifname) < 1) or (len(self.ifname) > 63): + self.module.fail_json( + msg='Error: Ifname length is beetween 1 and 63.') + + if self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + if self.type_tlv_disable == 'dot1_tlv': + if self.ifname: + intf_type = get_interface_type(self.ifname) + if not intf_type: + self.module.fail_json( + msg='Error: ifname name of %s ' + 'is error.' % self.ifname) + if (len(self.ifname) < 1) or (len(self.ifname) > 63): + self.module.fail_json( + msg='Error: Ifname length is beetween 1 and 63.') + + if self.function_lldp_interface_flag == 'tlvenableINTERFACE': + if self.type_tlv_enable == 'dot1_tlv': + if self.ifname: + intf_type = get_interface_type(self.ifname) + if not intf_type: + self.module.fail_json( + msg='Error: ifname name of %s ' + 'is error.' % self.ifname) + if (len(self.ifname) < 1) or (len(self.ifname) > 63): + self.module.fail_json( + msg='Error: Ifname length is beetween 1 and 63.') + + def check_response(self, xml_str, xml_name): + """Check if response message is already OK""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def get_lldp_enable_pre_config(self): + """Get lldp enable configure""" + + lldp_dict = dict() + lldp_config = list() + conf_enable_str = CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG + conf_enable_obj = get_nc_config(self.module, conf_enable_str) + xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get lldp enable config info + root_enable = ElementTree.fromstring(xml_enable_str) + ntpsite_enable = root_enable.findall("lldp/lldpSys") + for nexthop_enable in ntpsite_enable: + for ele_enable in nexthop_enable: + if ele_enable.tag in ["lldpEnable"]: + lldp_dict[ele_enable.tag] = ele_enable.text + if lldp_dict['lldpEnable'] == 'enabled': + self.enable_flag = 1 + lldp_config.append(dict(lldpenable=lldp_dict['lldpEnable'])) + return lldp_config + + def get_interface_lldp_disable_pre_config(self): + """Get interface undo lldp disable configure""" + lldp_dict = dict() + interface_lldp_disable_dict = dict() + if self.enable_flag == 1: + conf_enable_str = CE_NC_GET_INTERFACE_LLDP_CONFIG + conf_enable_obj = get_nc_config(self.module, conf_enable_str) + if "" in conf_enable_obj: + return + xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_enable_str) + lldp_disable_enable = root.findall("lldp/lldpInterfaces/lldpInterface") + for nexthop_enable in lldp_disable_enable: + name = nexthop_enable.find("ifName") + status = nexthop_enable.find("lldpAdminStatus") + if name is not None and status is not None: + interface_lldp_disable_dict[name.text] = status.text + return interface_lldp_disable_dict + + def get_interface_lldp_disable_config(self): + lldp_config = list() + interface_lldp_disable_dict_tmp = dict() + if self.state == "present": + if self.ifname: + interface_lldp_disable_dict_tmp = self.get_interface_lldp_disable_pre_config() + key_list = interface_lldp_disable_dict_tmp.keys() + if len(key_list) != 0: + for key in key_list: + if key == self.ifname: + if interface_lldp_disable_dict_tmp[key] != self.lldpadminstatus: + self.conf_interface_lldp_disable_exsit = True + else: + self.conf_interface_lldp_disable_exsit = False + elif self.ifname not in key_list: + self.conf_interface_lldp_disable_exsit = True + elif (len(key_list) == 0) and self.ifname and self.lldpadminstatus: + self.conf_interface_lldp_disable_exsit = True + lldp_config.append(interface_lldp_disable_dict_tmp) + return lldp_config + + def get_interface_tlv_disable_config(self): + lldp_config = list() + lldp_dict = dict() + cur_interface_mdn_cfg = dict() + exp_interface_mdn_cfg = dict() + + if self.enable_flag == 1: + conf_str = CE_NC_GET_INTERFACE_TLV_DISABLE_CONFIG + conf_obj = get_nc_config(self.module, conf_str) + if "" in conf_obj: + return lldp_config + xml_str = conf_obj.replace('\r', '').replace('\n', '') + xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "") + xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + lldp_tlvdisable_ifname = root.findall("lldp/lldpInterfaces/lldpInterface") + for ele in lldp_tlvdisable_ifname: + ifname_tmp = ele.find("ifName") + manaddrtxenable_tmp = ele.find("tlvTxEnable/manAddrTxEnable") + portdesctxenable_tmp = ele.find("tlvTxEnable/portDescTxEnable") + syscaptxenable_tmp = ele.find("tlvTxEnable/sysCapTxEnable") + sysdesctxenable_tmp = ele.find("tlvTxEnable/sysDescTxEnable") + sysnametxenable_tmp = ele.find("tlvTxEnable/sysNameTxEnable") + linkaggretxenable_tmp = ele.find("tlvTxEnable/linkAggreTxEnable") + macphytxenable_tmp = ele.find("tlvTxEnable/macPhyTxEnable") + maxframetxenable_tmp = ele.find("tlvTxEnable/maxFrameTxEnable") + eee_tmp = ele.find("tlvTxEnable/eee") + if ifname_tmp is not None: + if ifname_tmp.text is not None: + cur_interface_mdn_cfg["ifname"] = ifname_tmp.text + if ifname_tmp is not None and manaddrtxenable_tmp is not None: + if manaddrtxenable_tmp.text is not None: + cur_interface_mdn_cfg["manaddrtxenable"] = manaddrtxenable_tmp.text + if ifname_tmp is not None and portdesctxenable_tmp is not None: + if portdesctxenable_tmp.text is not None: + cur_interface_mdn_cfg['portdesctxenable'] = portdesctxenable_tmp.text + if ifname_tmp is not None and syscaptxenable_tmp is not None: + if syscaptxenable_tmp.text is not None: + cur_interface_mdn_cfg['syscaptxenable'] = syscaptxenable_tmp.text + if ifname_tmp is not None and sysdesctxenable_tmp is not None: + if sysdesctxenable_tmp.text is not None: + cur_interface_mdn_cfg['sysdesctxenable'] = sysdesctxenable_tmp.text + if ifname_tmp is not None and sysnametxenable_tmp is not None: + if sysnametxenable_tmp.text is not None: + cur_interface_mdn_cfg['sysnametxenable'] = sysnametxenable_tmp.text + if ifname_tmp is not None and linkaggretxenable_tmp is not None: + if linkaggretxenable_tmp.text is not None: + cur_interface_mdn_cfg['linkaggretxenable'] = linkaggretxenable_tmp.text + if ifname_tmp is not None and macphytxenable_tmp is not None: + if macphytxenable_tmp.text is not None: + cur_interface_mdn_cfg['macphytxenable'] = macphytxenable_tmp.text + if ifname_tmp is not None and maxframetxenable_tmp is not None: + if maxframetxenable_tmp.text is not None: + cur_interface_mdn_cfg['maxframetxenable'] = maxframetxenable_tmp.text + if ifname_tmp is not None and eee_tmp is not None: + if eee_tmp.text is not None: + cur_interface_mdn_cfg['eee'] = eee_tmp.text + if self.state == "present": + if self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + if self.type_tlv_disable == 'basic_tlv': + if self.ifname: + exp_interface_mdn_cfg['ifname'] = self.ifname + if self.manaddrtxenable: + exp_interface_mdn_cfg['manaddrtxenable'] = self.manaddrtxenable + if self.portdesctxenable: + exp_interface_mdn_cfg['portdesctxenable'] = self.portdesctxenable + if self.syscaptxenable: + exp_interface_mdn_cfg['syscaptxenable'] = self.syscaptxenable + if self.sysdesctxenable: + exp_interface_mdn_cfg['sysdesctxenable'] = self.sysdesctxenable + if self.sysnametxenable: + exp_interface_mdn_cfg['sysnametxenable'] = self.sysnametxenable + if self.ifname == ifname_tmp.text: + key_list = exp_interface_mdn_cfg.keys() + key_list_cur = cur_interface_mdn_cfg.keys() + if len(key_list) != 0: + for key in key_list: + if key == "ifname" and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname'])) + if "manaddrtxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(manaddrtxenable=cur_interface_mdn_cfg['manaddrtxenable'])) + if "portdesctxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(portdesctxenable=cur_interface_mdn_cfg['portdesctxenable'])) + if "syscaptxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(syscaptxenable=cur_interface_mdn_cfg['syscaptxenable'])) + if "sysdesctxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(sysdesctxenable=cur_interface_mdn_cfg['sysdesctxenable'])) + if "sysnametxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(sysnametxenable=cur_interface_mdn_cfg['sysnametxenable'])) + if key in key_list_cur: + if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]): + self.conf_tlv_disable_exsit = True + self.changed = True + return lldp_config + else: + self.conf_tlv_disable_exsit = True + return lldp_config + + if self.type_tlv_disable == 'dot3_tlv': + if self.ifname: + exp_interface_mdn_cfg['ifname'] = self.ifname + if self.linkaggretxenable: + exp_interface_mdn_cfg['linkaggretxenable'] = self.linkaggretxenable + if self.macphytxenable: + exp_interface_mdn_cfg['macphytxenable'] = self.macphytxenable + if self.maxframetxenable: + exp_interface_mdn_cfg['maxframetxenable'] = self.maxframetxenable + if self.eee: + exp_interface_mdn_cfg['eee'] = self.eee + if self.ifname == ifname_tmp.text: + key_list = exp_interface_mdn_cfg.keys() + key_list_cur = cur_interface_mdn_cfg.keys() + if len(key_list) != 0: + for key in key_list: + if key == "ifname" and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname'])) + if "linkaggretxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(linkaggretxenable=cur_interface_mdn_cfg['linkaggretxenable'])) + if "macphytxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(macphytxenable=cur_interface_mdn_cfg['macphytxenable'])) + if "maxframetxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(maxframetxenable=cur_interface_mdn_cfg['maxframetxenable'])) + if "eee" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(eee=cur_interface_mdn_cfg['eee'])) + if key in key_list_cur: + if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]): + self.conf_tlv_disable_exsit = True + self.changed = True + return lldp_config + else: + self.conf_tlv_disable_exsit = True + return lldp_config + return lldp_config + + def get_interface_tlv_enable_config(self): + lldp_config = list() + lldp_dict = dict() + cur_interface_mdn_cfg = dict() + exp_interface_mdn_cfg = dict() + if self.enable_flag == 1: + conf_str = CE_NC_GET_INTERFACE_TLV_ENABLE_CONFIG + conf_obj = get_nc_config(self.module, conf_str) + if "" in conf_obj: + return lldp_config + xml_str = conf_obj.replace('\r', '').replace('\n', '') + xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "") + xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + lldpenablesite = root.findall("lldp/lldpInterfaces/lldpInterface") + for ele in lldpenablesite: + ifname_tmp = ele.find("ifName") + protoidtxenable_tmp = ele.find("tlvTxEnable/protoIdTxEnable") + dcbx_tmp = ele.find("tlvTxEnable/dcbx") + if ifname_tmp is not None: + if ifname_tmp.text is not None: + cur_interface_mdn_cfg["ifname"] = ifname_tmp.text + if ifname_tmp is not None and protoidtxenable_tmp is not None: + if protoidtxenable_tmp.text is not None: + cur_interface_mdn_cfg["protoidtxenable"] = protoidtxenable_tmp.text + if ifname_tmp is not None and dcbx_tmp is not None: + if dcbx_tmp.text is not None: + cur_interface_mdn_cfg['dcbx'] = dcbx_tmp.text + if self.state == "present": + if self.function_lldp_interface_flag == 'tlvenableINTERFACE': + if self.type_tlv_enable == 'dot1_tlv': + if self.ifname: + exp_interface_mdn_cfg['ifname'] = self.ifname + if self.protoidtxenable: + exp_interface_mdn_cfg['protoidtxenable'] = self.protoidtxenable + if self.ifname == ifname_tmp.text: + key_list = exp_interface_mdn_cfg.keys() + key_list_cur = cur_interface_mdn_cfg.keys() + if len(key_list) != 0: + for key in key_list: + if "protoidtxenable" == str(key) and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(protoidtxenable=cur_interface_mdn_cfg['protoidtxenable'])) + if key in key_list_cur: + if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]): + self.conf_tlv_enable_exsit = True + self.changed = True + return lldp_config + else: + self.conf_tlv_enable_exsit = True + return lldp_config + if self.type_tlv_enable == 'dcbx': + if self.ifname: + exp_interface_mdn_cfg['ifname'] = self.ifname + if self.dcbx: + exp_interface_mdn_cfg['dcbx'] = self.dcbx + if self.ifname == ifname_tmp.text: + key_list = exp_interface_mdn_cfg.keys() + key_list_cur = cur_interface_mdn_cfg.keys() + if len(key_list) != 0: + for key in key_list: + if "dcbx" == key and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(dcbx=cur_interface_mdn_cfg['dcbx'])) + if key in key_list_cur: + if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]): + self.conf_tlv_enable_exsit = True + self.changed = True + return lldp_config + else: + self.conf_tlv_enable_exsit = True + return lldp_config + return lldp_config + + def get_interface_interval_config(self): + lldp_config = list() + lldp_dict = dict() + cur_interface_mdn_cfg = dict() + exp_interface_mdn_cfg = dict() + interface_lldp_disable_dict_tmp2 = self.get_interface_lldp_disable_pre_config() + if self.enable_flag == 1: + if interface_lldp_disable_dict_tmp2[self.ifname] != 'disabled': + conf_str = CE_NC_GET_INTERFACE_INTERVAl_CONFIG + conf_obj = get_nc_config(self.module, conf_str) + if "" in conf_obj: + return lldp_config + xml_str = conf_obj.replace('\r', '').replace('\n', '') + xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "") + xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + txintervalsite = root.findall("lldp/lldpInterfaces/lldpInterface") + for ele in txintervalsite: + ifname_tmp = ele.find("ifName") + txinterval_tmp = ele.find("msgInterval/txInterval") + if ifname_tmp is not None: + if ifname_tmp.text is not None: + cur_interface_mdn_cfg["ifname"] = ifname_tmp.text + if txinterval_tmp is not None: + if txinterval_tmp.text is not None: + cur_interface_mdn_cfg["txinterval"] = txinterval_tmp.text + if self.state == "present": + if self.ifname: + exp_interface_mdn_cfg["ifname"] = self.ifname + if self.txinterval: + exp_interface_mdn_cfg["txinterval"] = self.txinterval + if self.ifname == ifname_tmp.text: + key_list = exp_interface_mdn_cfg.keys() + key_list_cur = cur_interface_mdn_cfg.keys() + if len(key_list) != 0: + for key in key_list: + if "txinterval" == str(key) and self.ifname == cur_interface_mdn_cfg['ifname']: + lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname'], txinterval=exp_interface_mdn_cfg['txinterval'])) + if key in key_list_cur: + if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]): + self.conf_interval_exsit = True + lldp_config.append(cur_interface_mdn_cfg) + return lldp_config + else: + self.conf_interval_exsit = True + return lldp_config + return lldp_config + + def config_global_lldp_enable(self): + if self.state == 'present': + if self.enable_flag == 0 and self.lldpenable == 'enabled': + xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_ENABLE_CONFIG") + self.changed = True + elif self.enable_flag == 1 and self.lldpenable == 'disabled': + xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_ENABLE_CONFIG") + self.changed = True + + def config_interface_lldp_disable_config(self): + if self.function_lldp_interface_flag == 'disableINTERFACE': + if self.enable_flag == 1 and self.conf_interface_lldp_disable_exsit: + if self.ifname: + xml_str = CE_NC_MERGE_INTERFACE_LLDP_CONFIG % (self.ifname, self.lldpadminstatus) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "INTERFACE_LLDP_DISABLE_CONFIG") + self.changed = True + + def config_interface_tlv_disable_config(self): + if self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + if self.enable_flag == 1 and self.conf_tlv_disable_exsit: + if self.type_tlv_disable == 'basic_tlv': + if self.ifname: + if self.portdesctxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_PORTDESCTXENABLE % self.portdesctxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_PORTDESCTXENABLE") + self.changed = True + if self.manaddrtxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MANADDRTXENABLE % self.manaddrtxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_MANADDRTXENABLE") + self.changed = True + if self.syscaptxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSCAPTXENABLE % self.syscaptxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_SYSCAPTXENABLE") + self.changed = True + if self.sysdesctxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSDESCTXENABLE % self.sysdesctxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_SYSDESCTXENABLE") + self.changed = True + if self.sysnametxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSNAMETXENABLE % self.sysnametxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_SYSNAMETXENABLE") + self.changed = True + if self.type_tlv_disable == 'dot3_tlv': + if self.ifname: + if self.linkaggretxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_LINKAGGRETXENABLE % self.linkaggretxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_LINKAGGRETXENABLE") + self.changed = True + if self.macphytxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MACPHYTXENABLE % self.macphytxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_MACPHYTXENABLE") + self.changed = True + if self.maxframetxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MAXFRAMETXENABLE % self.maxframetxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_MAXFRAMETXENABLE") + self.changed = True + if self.eee: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_EEE % self.eee) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_DISABLE_EEE") + self.changed = True + + def config_interface_tlv_enable_config(self): + if self.function_lldp_interface_flag == 'tlvenableINTERFACE': + if self.enable_flag == 1 and self.conf_tlv_enable_exsit: + if self.type_tlv_enable == 'dot1_tlv': + if self.ifname: + if self.protoidtxenable: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_PROTOIDTXENABLE % self.protoidtxenable) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_ENABLE_DOT1_PORT_VLAN") + self.changed = True + if self.type_tlv_enable == 'dcbx': + if self.ifname: + if self.dcbx: + xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \ + (CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_DCBX % self.dcbx) + \ + CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "TLV_ENABLE_DCBX_VLAN") + self.changed = True + + def config_interface_interval_config(self): + if self.function_lldp_interface_flag == 'intervalINTERFACE': + tmp = self.get_interface_lldp_disable_pre_config() + if self.enable_flag == 1 and self.conf_interval_exsit and tmp[self.ifname] != 'disabled': + if self.ifname: + if self.txinterval: + xml_str = CE_NC_MERGE_INTERFACE_INTERVAl_CONFIG % (self.ifname, self.txinterval) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "INTERFACE_INTERVAL_CONFIG") + self.changed = True + + def get_existing(self): + """get existing information""" + self.get_lldp_enable_pre_config() + if self.lldpenable: + self.existing['globalLLDPENABLE'] = self.get_lldp_enable_pre_config() + if self.function_lldp_interface_flag == 'disableINTERFACE': + self.existing['disableINTERFACE'] = self.get_interface_lldp_disable_config() + if self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + self.existing['tlvdisableINTERFACE'] = self.get_interface_tlv_disable_config() + if self.function_lldp_interface_flag == 'tlvenableINTERFACE': + self.existing['tlvenableINTERFACE'] = self.get_interface_tlv_enable_config() + if self.function_lldp_interface_flag == 'intervalINTERFACE': + self.existing['intervalINTERFACE'] = self.get_interface_interval_config() + + def get_proposed(self): + """get proposed""" + if self.lldpenable: + self.proposed = dict(lldpenable=self.lldpenable) + if self.function_lldp_interface_flag == 'disableINTERFACE': + if self.enable_flag == 1: + self.proposed = dict(ifname=self.ifname, lldpadminstatus=self.lldpadminstatus) + if self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + if self.enable_flag == 1: + if self.type_tlv_disable == 'basic_tlv': + if self.ifname: + if self.manaddrtxenable: + self.proposed = dict(ifname=self.ifname, manaddrtxenable=self.manaddrtxenable) + if self.portdesctxenable: + self.proposed = dict(ifname=self.ifname, portdesctxenable=self.portdesctxenable) + if self.syscaptxenable: + self.proposed = dict(ifname=self.ifname, syscaptxenable=self.syscaptxenable) + if self.sysdesctxenable: + self.proposed = dict(ifname=self.ifname, sysdesctxenable=self.sysdesctxenable) + if self.sysnametxenable: + self.proposed = dict(ifname=self.ifname, sysnametxenable=self.sysnametxenable) + if self.type_tlv_disable == 'dot3_tlv': + if self.ifname: + if self.linkaggretxenable: + self.proposed = dict(ifname=self.ifname, linkaggretxenable=self.linkaggretxenable) + if self.macphytxenable: + self.proposed = dict(ifname=self.ifname, macphytxenable=self.macphytxenable) + if self.maxframetxenable: + self.proposed = dict(ifname=self.ifname, maxframetxenable=self.maxframetxenable) + if self.eee: + self.proposed = dict(ifname=self.ifname, eee=self.eee) + if self.function_lldp_interface_flag == 'tlvenableINTERFACE': + if self.enable_flag == 1: + if self.type_tlv_enable == 'dot1_tlv': + if self.ifname: + if self.protoidtxenable: + self.proposed = dict(ifname=self.ifname, protoidtxenable=self.protoidtxenable) + if self.type_tlv_enable == 'dcbx': + if self.ifname: + if self.dcbx: + self.proposed = dict(ifname=self.ifname, dcbx=self.dcbx) + if self.function_lldp_interface_flag == 'intervalINTERFACE': + tmp1 = self.get_interface_lldp_disable_pre_config() + if self.enable_flag == 1 and tmp1[self.ifname] != 'disabled': + self.proposed = dict(ifname=self.ifname, txinterval=self.txinterval) + + def config_lldp_interface(self): + """config lldp interface""" + if self.lldpenable: + self.config_global_lldp_enable() + if self.function_lldp_interface_flag == 'disableINTERFACE': + self.config_interface_lldp_disable_config() + elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + self.config_interface_tlv_disable_config() + elif self.function_lldp_interface_flag == 'tlvenableINTERFACE': + self.config_interface_tlv_enable_config() + elif self.function_lldp_interface_flag == 'intervalINTERFACE': + self.config_interface_interval_config() + + def get_end_state(self): + """get end_state information""" + self.get_lldp_enable_pre_config() + if self.lldpenable: + self.end_state['globalLLDPENABLE'] = self.get_lldp_enable_pre_config() + if self.function_lldp_interface_flag == 'disableINTERFACE': + self.end_state['disableINTERFACE'] = self.get_interface_lldp_disable_config() + if self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + self.end_state['tlvdisableINTERFACE'] = self.get_interface_tlv_disable_config() + if self.function_lldp_interface_flag == 'tlvenableINTERFACE': + self.end_state['tlvenableINTERFACE'] = self.get_interface_tlv_enable_config() + if self.function_lldp_interface_flag == 'intervalINTERFACE': + self.end_state['intervalINTERFACE'] = self.get_interface_interval_config() + + def get_update_cmd(self): + """Get updated commands""" + + cmds = [] + if self.state == "present": + if self.lldpenable == "enabled": + cmds.append("lldp enable") + if self.function_lldp_interface_flag == 'disableINTERFACE': + if self.ifname: + cmds.append("%s %s" % ("interface", self.ifname)) + if self.lldpadminstatus == 'disabled': + cmds.append("lldp disable") + else: + cmds.append("undo lldp disable") + elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + if self.type_tlv_disable == 'basic_tlv': + if self.ifname: + cmds.append("%s %s" % ("interface", self.ifname)) + if self.manaddrtxenable: + if self.manaddrtxenable == "false": + cmds.append("lldp tlv-disable basic-tlv management-address") + if self.manaddrtxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv management-address") + if self.portdesctxenable: + if self.portdesctxenable == "false": + cmds.append("lldp tlv-disable basic-tlv port-description") + if self.portdesctxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv port-description") + if self.syscaptxenable: + if self.syscaptxenable == "false": + cmds.append("lldp tlv-disable basic-tlv system-capability") + if self.syscaptxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv system-capability") + if self.sysdesctxenable: + if self.sysdesctxenable == "false": + cmds.append("lldp tlv-disable basic-tlv system-description") + if self.sysdesctxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv system-description") + if self.sysnametxenable: + if self.sysnametxenable == "false": + cmds.append("lldp tlv-disable basic-tlv system-name") + if self.sysnametxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv system-name") + if self.type_tlv_disable == 'dot3_tlv': + if self.ifname: + cmds.append("%s %s" % ("interface", self.ifname)) + if self.linkaggretxenable: + if self.linkaggretxenable == "false": + cmds.append("lldp tlv-disable dot3-tlv link-aggregation") + if self.linkaggretxenable == "true": + cmds.append("undo lldp tlv-disable dot3-tlv link-aggregation") + if self.macphytxenable: + if self.macphytxenable == "false": + cmds.append("lldp tlv-disable dot3-tlv mac-physic") + if self.macphytxenable == "true": + cmds.append("undo lldp tlv-disable dot3-tlv mac-physic") + if self.maxframetxenable: + if self.maxframetxenable == "false": + cmds.append("lldp tlv-disable dot3-tlv max-frame-size") + if self.maxframetxenable == "true": + cmds.append("undo lldp tlv-disable dot3-tlv max-frame-size") + if self.eee: + if self.eee == "false": + cmds.append("lldp tlv-disable dot3-tlv eee") + if self.eee == "true": + cmds.append("undo lldp tlv-disable dot3-tlv eee") + elif self.function_lldp_interface_flag == 'tlvenableINTERFACE': + if self.type_tlv_enable == 'dot1_tlv': + if self.ifname: + cmds.append("%s %s" % ("interface", self.ifname)) + if self.protoidtxenable: + if self.protoidtxenable == "false": + cmds.append("undo lldp tlv-enable dot1-tlv protocol-identity") + if self.protoidtxenable == "true": + cmds.append("lldp tlv-enable dot1-tlv protocol-identity") + if self.type_tlv_enable == 'dcbx': + if self.ifname: + cmds.append("%s %s" % ("interface", self.ifname)) + if self.dcbx: + if self.dcbx == "false": + cmds.append("undo lldp tlv-enable dcbx") + if self.dcbx == "true": + cmds.append("lldp tlv-enable dcbx") + elif self.function_lldp_interface_flag == 'intervalINTERFACE': + if self.ifname: + cmds.append("%s %s" % ("interface", self.ifname)) + if self.txinterval: + cmds.append("lldp transmit fast-mode interval %s" % self.txinterval) + elif self.lldpenable == "disabled": + cmds.append("undo lldp enable") + else: + if self.enable_flag == 1: + if self.function_lldp_interface_flag == 'disableINTERFACE': + if self.ifname: + cmds.append("interface %s" % self.ifname) + if self.lldpadminstatus == 'disabled': + cmds.append("lldp disable") + else: + cmds.append("undo lldp disable") + elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE': + if self.type_tlv_disable == 'basic_tlv': + if self.ifname: + cmds.append("interface %s" % self.ifname) + if self.manaddrtxenable: + if self.manaddrtxenable == "false": + cmds.append("lldp tlv-disable basic-tlv management-address") + if self.manaddrtxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv management-address") + if self.portdesctxenable: + if self.portdesctxenable == "false": + cmds.append("lldp tlv-disable basic-tlv port-description") + if self.portdesctxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv port-description") + if self.syscaptxenable: + if self.syscaptxenable == "false": + cmds.append("lldp tlv-disable basic-tlv system-capability") + if self.syscaptxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv system-capability") + if self.sysdesctxenable: + if self.sysdesctxenable == "false": + cmds.append("lldp tlv-disable basic-tlv system-description") + if self.sysdesctxenable == "true": + cli_str = "%s %s\n" % (cli_str, "undo lldp tlv-disable basic-tlv system-description") + if self.sysnametxenable: + if self.sysnametxenable == "false": + cmds.append("lldp tlv-disable basic-tlv system-name") + if self.sysnametxenable == "true": + cmds.append("undo lldp tlv-disable basic-tlv system-name") + if self.type_tlv_disable == 'dot3_tlv': + if self.ifname: + cmds.append("interface %s" % self.ifname) + if self.linkaggretxenable: + if self.linkaggretxenable == "false": + cmds.append("lldp tlv-disable dot3-tlv link-aggregation") + if self.linkaggretxenable == "true": + cmds.append("undo lldp tlv-disable dot3-tlv link-aggregation") + if self.macphytxenable: + if self.macphytxenable == "false": + cmds.append("lldp tlv-disable dot3-tlv mac-physic") + if self.macphytxenable == "true": + cli_str = "%s %s\n" % (cli_str, "undo lldp tlv-disable dot3-tlv mac-physic") + if self.maxframetxenable: + if self.maxframetxenable == "false": + cmds.append("lldp tlv-disable dot3-tlv max-frame-size") + if self.maxframetxenable == "true": + cmds.append("undo lldp tlv-disable dot3-tlv max-frame-size") + if self.eee: + if self.eee == "false": + cmds.append("lldp tlv-disable dot3-tlv eee") + if self.eee == "true": + cmds.append("undo lldp tlv-disable dot3-tlv eee") + elif self.function_lldp_interface_flag == 'tlvenableINTERFACE': + if self.type_tlv_enable == 'dot1_tlv': + if self.ifname: + cmds.append("interface %s" % self.ifname) + if self.protoidtxenable: + if self.protoidtxenable == "false": + cmds.append("undo lldp tlv-enable dot1-tlv protocol-identity") + if self.protoidtxenable == "true": + cmds.append("lldp tlv-enable dot1-tlv protocol-identity") + if self.type_tlv_enable == 'dcbx': + if self.ifname: + cmds.append("interface %s" % self.ifname) + if self.dcbx: + if self.dcbx == "false": + cmds.append("undo lldp tlv-enable dcbx") + if self.dcbx == "true": + cmds.append("lldp tlv-enable dcbx") + elif self.function_lldp_interface_flag == 'intervalINTERFACE': + if self.ifname: + cmds.append("interface %s" % self.ifname) + if self.txinterval: + cmds.append("lldp transmit fast-mode interval %s" % self.txinterval) + self.updates_cmd = cmds + + def work(self): + """Execute task""" + self.check_params() + self.get_existing() + self.get_proposed() + self.config_lldp_interface() + self.get_update_cmd() + self.get_end_state() + self.show_result() + + +def main(): + """Main function""" + + argument_spec = dict( + lldpenable=dict(choices=['enabled', 'disabled']), + function_lldp_interface_flag=dict(choices=['disableINTERFACE', 'tlvdisableINTERFACE', 'tlvenableINTERFACE', 'intervalINTERFACE'], type='str'), + type_tlv_disable=dict(choices=['basic_tlv', 'dot3_tlv'], type='str'), + type_tlv_enable=dict(choices=['dot1_tlv', 'dcbx'], type='str'), + ifname=dict(type='str'), + lldpadminstatus=dict(choices=['txOnly', 'rxOnly', 'txAndRx', 'disabled'], type='str'), + manaddrtxenable=dict(type='bool'), + portdesctxenable=dict(type='bool'), + syscaptxenable=dict(type='bool'), + sysdesctxenable=dict(type='bool'), + sysnametxenable=dict(type='bool'), + portvlantxenable=dict(type='bool'), + protovlantxenable=dict(type='bool'), + txprotocolvlanid=dict(type='int'), + vlannametxenable=dict(type='bool'), + txvlannameid=dict(type='int'), + txinterval=dict(type='int'), + protoidtxenable=dict(type='bool'), + macphytxenable=dict(type='bool'), + linkaggretxenable=dict(type='bool'), + maxframetxenable=dict(type='bool'), + eee=dict(type='bool'), + dcbx=dict(type='bool'), + state=dict(type='str', choices=['absent', 'present'], default='present'), + ) + + lldp_interface_obj = Lldp_interface(argument_spec) + lldp_interface_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_mdn_interface.py b/plugins/modules/network/cloudengine/ce_mdn_interface.py new file mode 100644 index 0000000000..583d81504f --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_mdn_interface.py @@ -0,0 +1,402 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: ce_mdn_interface +short_description: Manages MDN configuration on HUAWEI CloudEngine switches. +description: + - Manages MDN configuration on HUAWEI CloudEngine switches. +author: xuxiaowei0512 (@CloudEngine-Ansible) +options: + lldpenable: + description: + - Set global LLDP enable state. + type: str + choices: ['enabled', 'disabled'] + mdnstatus: + description: + - Set interface MDN enable state. + type: str + choices: ['rxOnly', 'disabled'] + ifname: + description: + - Interface name. + type: str + state: + description: + - Manage the state of the resource. + default: present + type: str + choices: ['present','absent'] +notes: + - This module requires the netconf system service be enabled on + the remote device being managed. + - This module works with connection C(netconf). +''' + +EXAMPLES = ''' + - name: "Configure global LLDP enable state" + ce_mdn_interface: + lldpenable: enabled + + - name: "Configure interface MDN enable state" + ce_mdn_interface: + ifname: 10GE1/0/1 + mdnstatus: rxOnly +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "lldpenable": "enabled", + "ifname": "10GE1/0/1", + "mdnstatus": "rxOnly", + "state":"present" + } +existing: + description: k/v pairs of existing global LLDP configration + returned: always + type: dict + sample: { + "lldpenable": "enabled", + "ifname": "10GE1/0/1", + "mdnstatus": "disabled" + } +end_state: + description: k/v pairs of global LLDP configration after module execution + returned: always + type: dict + sample: { + "lldpenable": "enabled", + "ifname": "10GE1/0/1", + "mdnstatus": "rxOnly" + } +updates: + description: command sent to the device + returned: always + type: list + sample: [ + "interface 10ge 1/0/1", + "lldp mdn enable", + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import copy +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import set_nc_config, get_nc_config, execute_nc_action + +CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG = """ + + + + + + + +""" + +CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """ + + + + %s + + + +""" + +CE_NC_GET_INTERFACE_MDNENABLE_CONFIG = """ + + + + + + + + + + +""" + +CE_NC_MERGE_INTERFACE_MDNENABLE_CONFIG = """ + + + + + %s + %s + + + + +""" + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('PORT-GROUP'): + iftype = 'stack-Port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + return iftype.lower() + + +class Interface_mdn(object): + """Manage global lldp enable configration""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # LLDP global configration info + self.lldpenable = self.module.params['lldpenable'] or None + self.ifname = self.module.params['ifname'] + self.mdnstatus = self.module.params['mdnstatus'] or None + self.state = self.module.params['state'] + self.lldp_conf = dict() + self.conf_exsit = False + self.enable_flag = 0 + self.check_params() + + # state + self.changed = False + self.proposed_changed = dict() + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def check_params(self): + """Check all input params""" + + if self.ifname: + intf_type = get_interface_type(self.ifname) + if not intf_type: + self.module.fail_json( + msg='Error: ifname name of %s ' + 'is error.' % self.ifname) + if (len(self.ifname) < 1) or (len(self.ifname) > 63): + self.module.fail_json( + msg='Error: Ifname length is beetween 1 and 63.') + + def init_module(self): + """Init module object""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def config_interface_mdn(self): + """Configure lldp enabled and interface mdn enabled parameters""" + + if self.state == 'present': + if self.enable_flag == 0 and self.lldpenable == 'enabled': + xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_ENABLE_CONFIG") + self.changed = True + elif self.enable_flag == 1 and self.lldpenable == 'disabled': + xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "LLDP_ENABLE_CONFIG") + self.changed = True + elif self.enable_flag == 1 and self.conf_exsit: + xml_str = CE_NC_MERGE_INTERFACE_MDNENABLE_CONFIG % (self.ifname, self.mdnstatus) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "INTERFACE_MDN_ENABLE_CONFIG") + self.changed = True + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def get_interface_mdn_exist_config(self): + """Get lldp existed configure""" + + lldp_config = list() + lldp_dict = dict() + conf_enable_str = CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG + conf_enable_obj = get_nc_config(self.module, conf_enable_str) + xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get lldp enable config info + root_enable = ElementTree.fromstring(xml_enable_str) + ntpsite_enable = root_enable.findall("lldp/lldpSys") + for nexthop_enable in ntpsite_enable: + for ele_enable in nexthop_enable: + if ele_enable.tag in ["lldpEnable"]: + lldp_dict[ele_enable.tag] = ele_enable.text + + if self.state == "present": + if lldp_dict['lldpEnable'] == 'enabled': + self.enable_flag = 1 + lldp_config.append(dict(lldpenable=lldp_dict['lldpEnable'])) + + if self.enable_flag == 1: + conf_str = CE_NC_GET_INTERFACE_MDNENABLE_CONFIG + conf_obj = get_nc_config(self.module, conf_str) + if "" in conf_obj: + return lldp_config + xml_str = conf_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + # get all ntp config info + root = ElementTree.fromstring(xml_str) + ntpsite = root.findall("lldp/mdnInterfaces/mdnInterface") + for nexthop in ntpsite: + for ele in nexthop: + if ele.tag in ["ifName", "mdnStatus"]: + lldp_dict[ele.tag] = ele.text + if self.state == "present": + cur_interface_mdn_cfg = dict(ifname=lldp_dict['ifName'], mdnstatus=lldp_dict['mdnStatus']) + exp_interface_mdn_cfg = dict(ifname=self.ifname, mdnstatus=self.mdnstatus) + if self.ifname == lldp_dict['ifName']: + if cur_interface_mdn_cfg != exp_interface_mdn_cfg: + self.conf_exsit = True + lldp_config.append(dict(ifname=lldp_dict['ifName'], mdnstatus=lldp_dict['mdnStatus'])) + return lldp_config + lldp_config.append(dict(ifname=lldp_dict['ifName'], mdnstatus=lldp_dict['mdnStatus'])) + return lldp_config + + def get_existing(self): + """Get existing info""" + + self.existing = self.get_interface_mdn_exist_config() + + def get_proposed(self): + """Get proposed info""" + + if self.lldpenable: + self.proposed = dict(lldpenable=self.lldpenable) + if self.enable_flag == 1: + if self.ifname: + self.proposed = dict(ifname=self.ifname, mdnstatus=self.mdnstatus) + + def get_end_state(self): + """Get end state info""" + + self.end_state = self.get_interface_mdn_exist_config() + + def get_update_cmd(self): + """Get updated commands""" + + update_list = list() + if self.state == "present": + if self.lldpenable == "enabled": + cli_str = "lldp enable" + update_list.append(cli_str) + if self.ifname: + cli_str = "%s %s" % ("interface", self.ifname) + update_list.append(cli_str) + if self.mdnstatus: + if self.mdnstatus == "rxOnly": + cli_str = "lldp mdn enable" + update_list.append(cli_str) + else: + cli_str = "undo lldp mdn enable" + update_list.append(cli_str) + + elif self.lldpenable == "disabled": + cli_str = "undo lldp enable" + update_list.append(cli_str) + else: + if self.enable_flag == 1: + if self.ifname: + cli_str = "%s %s" % ("interface", self.ifname) + update_list.append(cli_str) + if self.mdnstatus: + if self.mdnstatus == "rxOnly": + cli_str = "lldp mdn enable" + update_list.append(cli_str) + else: + cli_str = "undo lldp mdn enable" + update_list.append(cli_str) + + self.updates_cmd.append(update_list) + + def work(self): + """Excute task""" + self.check_params() + self.get_existing() + self.get_proposed() + self.config_interface_mdn() + self.get_update_cmd() + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + lldpenable=dict(type='str', choices=['enabled', 'disabled']), + mdnstatus=dict(type='str', choices=['rxOnly', 'disabled']), + ifname=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + lldp_obj = Interface_mdn(argument_spec) + lldp_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_mlag_config.py b/plugins/modules/network/cloudengine/ce_mlag_config.py new file mode 100644 index 0000000000..1e40ae94a7 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_mlag_config.py @@ -0,0 +1,916 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_mlag_config +short_description: Manages MLAG configuration on HUAWEI CloudEngine switches. +description: + - Manages MLAG configuration on HUAWEI CloudEngine switches. +author: + - Li Yanfeng (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + dfs_group_id: + description: + - ID of a DFS group. The value is 1. + default: present + nickname: + description: + - The nickname bound to a DFS group. The value is an integer that ranges from 1 to 65471. + pseudo_nickname: + description: + - A pseudo nickname of a DFS group. The value is an integer that ranges from 1 to 65471. + pseudo_priority: + description: + - The priority of a pseudo nickname. The value is an integer that ranges from 128 to 255. + The default value is 192. A larger value indicates a higher priority. + ip_address: + description: + - IP address bound to the DFS group. The value is in dotted decimal notation. + vpn_instance_name: + description: + - Name of the VPN instance bound to the DFS group. The value is a string of 1 to 31 case-sensitive + characters without spaces. If the character string is quoted by double quotation marks, the character + string can contain spaces. The value _public_ is reserved and cannot be used as the VPN instance name. + priority_id: + description: + - Priority of a DFS group. The value is an integer that ranges from 1 to 254. The default value is 100. + eth_trunk_id: + description: + - Name of the peer-link interface. The value is in the range from 0 to 511. + peer_link_id: + description: + - Number of the peer-link interface. The value is 1. + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: mlag config module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Create DFS Group id + ce_mlag_config: + dfs_group_id: 1 + provider: "{{ cli }}" + - name: Set dfs-group priority + ce_mlag_config: + dfs_group_id: 1 + priority_id: 3 + state: present + provider: "{{ cli }}" + - name: Set pseudo nickname + ce_mlag_config: + dfs_group_id: 1 + pseudo_nickname: 3 + pseudo_priority: 130 + state: present + provider: "{{ cli }}" + - name: Set ip + ce_mlag_config: + dfs_group_id: 1 + ip_address: 11.1.1.2 + vpn_instance_name: 6 + provider: "{{ cli }}" + - name: Set peer link + ce_mlag_config: + eth_trunk_id: 3 + peer_link_id: 2 + state: present + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { "eth_trunk_id": "3", + "peer_link_id": "1", + "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: { } +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: { "eth_trunk_id": "Eth-Trunk3", + "peer_link_id": "1"} +updates: + description: command sent to the device + returned: always + type: list + sample: {"peer-link 1"} +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_DFS_GROUP_INFO = """ + + + + + + + + + + + + + + + + + +""" +CE_NC_GET_PEER_LINK_INFO = """ + + + + + + + + + + + +""" + +CE_NC_CREATE_DFS_GROUP_INFO_HEADER = """ + + + + + %s +""" + +CE_NC_CREATE_DFS_GROUP_INFO_TAIL = """ + + + + +""" + +CE_NC_MERGE_DFS_GROUP_INFO_HEADER = """ + + + + + %s +""" + +CE_NC_MERGE_DFS_GROUP_INFO_TAIL = """ + + + + +""" + +CE_NC_DELETE_DFS_GROUP_ATTRIBUTE_HEADER = """ + + + + + %s +""" + +CE_NC_DELETE_DFS_GROUP_ATTRIBUTE_TAIL = """ + + + + +""" + +CE_NC_DELETE_DFS_GROUP_INFO_HEADER = """ + + + + + %s +""" + +CE_NC_DELETE_DFS_GROUP_INFO_TAIL = """ + + + + +""" + +CE_NC_CREATE_PEER_LINK_INFO = """ + + + + + 1 + %s + %s + + + + +""" + +CE_NC_MERGE_PEER_LINK_INFO = """ + + + + + 1 + %s + %s + + + + +""" +CE_NC_DELETE_PEER_LINK_INFO = """ + + + + + 1 + %s + %s + + + + +""" + + +def is_valid_address(address): + """check ip-address is valid""" + + if address.find('.') != -1: + addr_list = address.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +class MlagConfig(object): + """ + Manages Manages MLAG config information. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.dfs_group_id = self.module.params['dfs_group_id'] + self.nickname = self.module.params['nickname'] + self.pseudo_nickname = self.module.params['pseudo_nickname'] + self.pseudo_priority = self.module.params['pseudo_priority'] + self.ip_address = self.module.params['ip_address'] + self.vpn_instance_name = self.module.params['vpn_instance_name'] + self.priority_id = self.module.params['priority_id'] + self.eth_trunk_id = self.module.params['eth_trunk_id'] + self.peer_link_id = self.module.params['peer_link_id'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + self.commands = list() + # DFS group info + self.dfs_group_info = None + # peer link info + self.peer_link_info = None + + def init_module(self): + """ init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, con_obj, xml_name): + """Check if response message is already succeed.""" + + xml_str = con_obj.xml + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_dfs_group_info(self): + """ get dfs group attributes info.""" + + dfs_group_info = dict() + conf_str = CE_NC_GET_DFS_GROUP_INFO + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return dfs_group_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + dfs_info = root.findall( + "dfs/groupInstances/groupInstance") + if dfs_info: + for tmp in dfs_info: + for site in tmp: + if site.tag in ["groupId", "priority", "ipAddress", "srcVpnName"]: + dfs_group_info[site.tag] = site.text + + dfs_nick_info = root.findall( + "dfs/groupInstances/groupInstance/trillType") + + if dfs_nick_info: + for tmp in dfs_nick_info: + for site in tmp: + if site.tag in ["localNickname", "pseudoNickname", "pseudoPriority"]: + dfs_group_info[site.tag] = site.text + return dfs_group_info + + def get_peer_link_info(self): + """ get peer link info.""" + + peer_link_info = dict() + conf_str = CE_NC_GET_PEER_LINK_INFO + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return peer_link_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + link_info = root.findall( + "mlag/peerlinks/peerlink") + if link_info: + for tmp in link_info: + for site in tmp: + if site.tag in ["linkId", "portName"]: + peer_link_info[site.tag] = site.text + return peer_link_info + + def is_dfs_group_info_change(self): + """whether dfs group info""" + if not self.dfs_group_info: + return False + + if self.priority_id and self.dfs_group_info["priority"] != self.priority_id: + return True + if self.ip_address and self.dfs_group_info["ipAddress"] != self.ip_address: + return True + if self.vpn_instance_name and self.dfs_group_info["srcVpnName"] != self.vpn_instance_name: + return True + if self.nickname and self.dfs_group_info["localNickname"] != self.nickname: + return True + if self.pseudo_nickname and self.dfs_group_info["pseudoNickname"] != self.pseudo_nickname: + return True + if self.pseudo_priority and self.dfs_group_info["pseudoPriority"] != self.pseudo_priority: + return True + return False + + def check_dfs_group_info_change(self): + """check dfs group info""" + if not self.dfs_group_info: + return True + + if self.priority_id and self.dfs_group_info["priority"] == self.priority_id: + return True + if self.ip_address and self.dfs_group_info["ipAddress"] == self.ip_address: + return True + if self.vpn_instance_name and self.dfs_group_info["srcVpnName"] == self.vpn_instance_name: + return True + if self.nickname and self.dfs_group_info["localNickname"] == self.nickname: + return True + if self.pseudo_nickname and self.dfs_group_info["pseudoNickname"] == self.pseudo_nickname: + return True + if self.pseudo_priority and self.dfs_group_info["pseudoPriority"] == self.pseudo_priority: + return True + return False + + def modify_dfs_group(self): + """modify dfs group info""" + + if self.is_dfs_group_info_change(): + + conf_str = CE_NC_MERGE_DFS_GROUP_INFO_HEADER % self.dfs_group_id + if self.priority_id and self.dfs_group_info["priority"] != self.priority_id: + conf_str += "%s" % self.priority_id + if self.ip_address and self.dfs_group_info["ipAddress"] != self.ip_address: + conf_str += "%s" % self.ip_address + if self.vpn_instance_name and self.dfs_group_info["srcVpnName"] != self.vpn_instance_name: + if not self.ip_address: + self.module.fail_json( + msg='Error: ip_address can not be null if vpn_instance_name is exist.') + conf_str += "%s" % self.vpn_instance_name + + if self.nickname or self.pseudo_nickname or self.pseudo_priority: + conf_str += "" + if self.nickname and self.dfs_group_info["localNickname"] != self.nickname: + conf_str += "%s" % self.nickname + if self.pseudo_nickname and self.dfs_group_info["pseudoNickname"] != self.pseudo_nickname: + conf_str += "%s" % self.pseudo_nickname + + if self.pseudo_priority and self.dfs_group_info["pseudoPriority"] != self.pseudo_priority: + if not self.pseudo_nickname: + self.module.fail_json( + msg='Error: pseudo_nickname can not be null if pseudo_priority is exist.') + conf_str += "%s" % self.pseudo_priority + conf_str += "" + + conf_str += CE_NC_MERGE_DFS_GROUP_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge DFS group info failed.') + + self.updates_cmd.append("dfs-group 1") + if self.priority_id: + self.updates_cmd.append("priority %s" % self.priority_id) + if self.ip_address: + if self.vpn_instance_name: + self.updates_cmd.append( + "source ip %s vpn-instance %s" % (self.ip_address, self.vpn_instance_name)) + else: + self.updates_cmd.append("source ip %s" % self.ip_address) + if self.nickname: + self.updates_cmd.append("source nickname %s" % self.nickname) + if self.pseudo_nickname: + if self.pseudo_priority: + self.updates_cmd.append( + "pseudo-nickname %s priority %s" % (self.pseudo_nickname, self.pseudo_priority)) + else: + self.updates_cmd.append( + "pseudo-nickname %s" % self.pseudo_nickname) + + self.changed = True + + def create_dfs_group(self): + """create dfs group info""" + + conf_str = CE_NC_CREATE_DFS_GROUP_INFO_HEADER % self.dfs_group_id + if self.priority_id and self.priority_id != 100: + conf_str += "%s" % self.priority_id + if self.ip_address: + conf_str += "%s" % self.ip_address + if self.vpn_instance_name: + if not self.ip_address: + self.module.fail_json( + msg='Error: ip_address can not be null if vpn_instance_name is exist.') + conf_str += "%s" % self.vpn_instance_name + + if self.nickname or self.pseudo_nickname or self.pseudo_priority: + conf_str += "" + if self.nickname: + conf_str += "%s" % self.nickname + if self.pseudo_nickname: + conf_str += "%s" % self.pseudo_nickname + if self.pseudo_priority: + if not self.pseudo_nickname: + self.module.fail_json( + msg='Error: pseudo_nickname can not be null if pseudo_priority is exist.') + conf_str += "%s" % self.pseudo_priority + conf_str += "" + + conf_str += CE_NC_CREATE_DFS_GROUP_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge DFS group info failed.') + + self.updates_cmd.append("dfs-group 1") + if self.priority_id: + self.updates_cmd.append("priority %s" % self.priority_id) + if self.ip_address: + if self.vpn_instance_name: + self.updates_cmd.append( + "source ip %s vpn-instance %s" % (self.ip_address, self.vpn_instance_name)) + else: + self.updates_cmd.append("source ip %s" % self.ip_address) + if self.nickname: + self.updates_cmd.append("source nickname %s" % self.nickname) + if self.pseudo_nickname: + if self.pseudo_priority: + self.updates_cmd.append( + "pseudo-nickname %s priority %s" % (self.pseudo_nickname, self.pseudo_priority)) + else: + self.updates_cmd.append( + "pseudo-nickname %s" % self.pseudo_nickname) + + self.changed = True + + def delete_dfs_group(self): + """delete dfg group""" + + conf_str = CE_NC_DELETE_DFS_GROUP_INFO_HEADER % self.dfs_group_id + conf_str += CE_NC_DELETE_DFS_GROUP_INFO_TAIL + + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Delete DFS group id failed.') + self.updates_cmd.append("undo dfs-group 1") + self.changed = True + + def delete_dfs_group_attribute(self): + """delete dfg group attribute info""" + + conf_str = CE_NC_DELETE_DFS_GROUP_ATTRIBUTE_HEADER % self.dfs_group_id + change = False + if self.priority_id and self.dfs_group_info["priority"] == self.priority_id: + conf_str += "%s" % self.priority_id + change = True + self.updates_cmd.append("undo priority %s" % self.priority_id) + if self.ip_address and self.dfs_group_info["ipAddress"] == self.ip_address: + if self.vpn_instance_name and self.dfs_group_info["srcVpnName"] == self.vpn_instance_name: + conf_str += "%s" % self.ip_address + conf_str += "%s" % self.vpn_instance_name + self.updates_cmd.append( + "undo source ip %s vpn-instance %s" % (self.ip_address, self.vpn_instance_name)) + else: + conf_str += "%s" % self.ip_address + self.updates_cmd.append("undo source ip %s" % self.ip_address) + change = True + + conf_str += CE_NC_DELETE_DFS_GROUP_ATTRIBUTE_TAIL + + if change: + self.updates_cmd.append("undo dfs-group 1") + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Delete DFS group attribute failed.') + self.changed = True + + def delete_dfs_group_nick(self): + + conf_str = CE_NC_DELETE_DFS_GROUP_ATTRIBUTE_HEADER % self.dfs_group_id + conf_str = conf_str.replace('', '') + change = False + + if self.nickname or self.pseudo_nickname: + conf_str += "" + if self.nickname and self.dfs_group_info["localNickname"] == self.nickname: + conf_str += "%s" % self.nickname + change = True + self.updates_cmd.append("undo source nickname %s" % self.nickname) + if self.pseudo_nickname and self.dfs_group_info["pseudoNickname"] == self.pseudo_nickname: + conf_str += "%s" % self.pseudo_nickname + if self.pseudo_priority and self.dfs_group_info["pseudoPriority"] == self.pseudo_priority: + self.updates_cmd.append( + "undo pseudo-nickname %s priority %s" % (self.pseudo_nickname, self.pseudo_priority)) + if not self.pseudo_priority: + self.updates_cmd.append( + "undo pseudo-nickname %s" % self.pseudo_nickname) + change = True + conf_str += "" + + conf_str += CE_NC_DELETE_DFS_GROUP_ATTRIBUTE_TAIL + + if change: + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Delete DFS group attribute failed.') + self.changed = True + + def modify_peer_link(self): + """modify peer link info""" + + eth_trunk_id = "Eth-Trunk" + eth_trunk_id += self.eth_trunk_id + if self.eth_trunk_id and eth_trunk_id != self.peer_link_info.get("portName"): + conf_str = CE_NC_MERGE_PEER_LINK_INFO % ( + self.peer_link_id, eth_trunk_id) + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Merge peer link failed.') + self.updates_cmd.append("peer-link %s" % self.peer_link_id) + self.changed = True + + def delete_peer_link(self): + """delete peer link info""" + + eth_trunk_id = "Eth-Trunk" + eth_trunk_id += self.eth_trunk_id + if self.eth_trunk_id and eth_trunk_id == self.peer_link_info.get("portName"): + conf_str = CE_NC_DELETE_PEER_LINK_INFO % ( + self.peer_link_id, eth_trunk_id) + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: Delete peer link failed.') + self.updates_cmd.append("undo peer-link %s" % self.peer_link_id) + self.changed = True + + def check_params(self): + """Check all input params""" + + # dfs_group_id check + if self.dfs_group_id: + if self.dfs_group_id != "1": + self.module.fail_json( + msg='Error: The value of dfs_group_id must be 1.') + + # nickname check + if self.nickname: + if not self.nickname.isdigit(): + self.module.fail_json( + msg='Error: The value of nickname is an integer.') + if int(self.nickname) < 1 or int(self.nickname) > 65471: + self.module.fail_json( + msg='Error: The nickname is not in the range from 1 to 65471.') + + # pseudo_nickname check + if self.pseudo_nickname: + if not self.pseudo_nickname.isdigit(): + self.module.fail_json( + msg='Error: The value of pseudo_nickname is an integer.') + if int(self.pseudo_nickname) < 1 or int(self.pseudo_nickname) > 65471: + self.module.fail_json( + msg='Error: The pseudo_nickname is not in the range from 1 to 65471.') + + # pseudo_priority check + if self.pseudo_priority: + if not self.pseudo_priority.isdigit(): + self.module.fail_json( + msg='Error: The value of pseudo_priority is an integer.') + if int(self.pseudo_priority) < 128 or int(self.pseudo_priority) > 255: + self.module.fail_json( + msg='Error: The pseudo_priority is not in the range from 128 to 255.') + + # ip_address check + if self.ip_address: + if not is_valid_address(self.ip_address): + self.module.fail_json( + msg='Error: The %s is not a valid ip address.' % self.ip_address) + + # vpn_instance_name check + if self.vpn_instance_name: + if len(self.vpn_instance_name) > 31 \ + or len(self.vpn_instance_name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: The length of vpn_instance_name is not in the range from 1 to 31.') + + # priority_id check + if self.priority_id: + if not self.priority_id.isdigit(): + self.module.fail_json( + msg='Error: The value of priority_id is an integer.') + if int(self.priority_id) < 1 or int(self.priority_id) > 254: + self.module.fail_json( + msg='Error: The priority_id is not in the range from 1 to 254.') + + # peer_link_id check + if self.peer_link_id: + if self.peer_link_id != "1": + self.module.fail_json( + msg='Error: The value of peer_link_id must be 1.') + + # eth_trunk_id check + if self.eth_trunk_id: + if not self.eth_trunk_id.isdigit(): + self.module.fail_json( + msg='Error: The value of eth_trunk_id is an integer.') + if int(self.eth_trunk_id) < 0 or int(self.eth_trunk_id) > 511: + self.module.fail_json( + msg='Error: The value of eth_trunk_id is not in the range from 0 to 511.') + + def get_proposed(self): + """get proposed info""" + + if self.dfs_group_id: + self.proposed["dfs_group_id"] = self.dfs_group_id + if self.nickname: + self.proposed["nickname"] = self.nickname + if self.pseudo_nickname: + self.proposed["pseudo_nickname"] = self.pseudo_nickname + if self.pseudo_priority: + self.proposed["pseudo_priority"] = self.pseudo_priority + if self.ip_address: + self.proposed["ip_address"] = self.ip_address + if self.vpn_instance_name: + self.proposed["vpn_instance_name"] = self.vpn_instance_name + if self.priority_id: + self.proposed["priority_id"] = self.priority_id + if self.eth_trunk_id: + self.proposed["eth_trunk_id"] = self.eth_trunk_id + if self.peer_link_id: + self.proposed["peer_link_id"] = self.peer_link_id + if self.state: + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + if self.dfs_group_id: + self.dfs_group_info = self.get_dfs_group_info() + if self.peer_link_id and self.eth_trunk_id: + self.peer_link_info = self.get_peer_link_info() + if self.dfs_group_info: + if self.dfs_group_id: + self.existing["dfs_group_id"] = self.dfs_group_info["groupId"] + if self.nickname: + self.existing["nickname"] = self.dfs_group_info[ + "localNickname"] + if self.pseudo_nickname: + self.existing["pseudo_nickname"] = self.dfs_group_info[ + "pseudoNickname"] + if self.pseudo_priority: + self.existing["pseudo_priority"] = self.dfs_group_info[ + "pseudoPriority"] + if self.ip_address: + self.existing["ip_address"] = self.dfs_group_info["ipAddress"] + if self.vpn_instance_name: + self.existing["vpn_instance_name"] = self.dfs_group_info[ + "srcVpnName"] + if self.priority_id: + self.existing["priority_id"] = self.dfs_group_info["priority"] + if self.peer_link_info: + if self.eth_trunk_id: + self.existing["eth_trunk_id"] = self.peer_link_info["portName"] + if self.peer_link_id: + self.existing["peer_link_id"] = self.peer_link_info["linkId"] + + def get_end_state(self): + """get end state info""" + if self.dfs_group_id: + self.dfs_group_info = self.get_dfs_group_info() + if self.peer_link_id and self.eth_trunk_id: + self.peer_link_info = self.get_peer_link_info() + + if self.dfs_group_info: + if self.dfs_group_id: + self.end_state["dfs_group_id"] = self.dfs_group_info["groupId"] + if self.nickname: + self.end_state["nickname"] = self.dfs_group_info[ + "localNickname"] + if self.pseudo_nickname: + self.end_state["pseudo_nickname"] = self.dfs_group_info[ + "pseudoNickname"] + if self.pseudo_priority: + self.end_state["pseudo_priority"] = self.dfs_group_info[ + "pseudoPriority"] + if self.ip_address: + self.end_state["ip_address"] = self.dfs_group_info["ipAddress"] + if self.vpn_instance_name: + self.end_state["vpn_instance_name"] = self.dfs_group_info[ + "srcVpnName"] + if self.priority_id: + self.end_state["priority_id"] = self.dfs_group_info["priority"] + if self.peer_link_info: + if self.eth_trunk_id: + self.end_state[ + "eth_trunk_id"] = self.peer_link_info["portName"] + if self.peer_link_id: + self.end_state["peer_link_id"] = self.peer_link_info["linkId"] + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + if self.dfs_group_id: + if self.state == "present": + if self.dfs_group_info: + if self.nickname or self.pseudo_nickname or self.pseudo_priority or self.priority_id \ + or self.ip_address or self.vpn_instance_name: + if self.nickname: + if self.dfs_group_info["ipAddress"] not in ["0.0.0.0", None]: + self.module.fail_json(msg='Error: nickname and ip_address can not be exist at the ' + 'same time.') + if self.ip_address: + if self.dfs_group_info["localNickname"] not in ["0", None]: + self.module.fail_json(msg='Error: nickname and ip_address can not be exist at the ' + 'same time.') + self.modify_dfs_group() + else: + self.create_dfs_group() + else: + if not self.dfs_group_info: + self.module.fail_json( + msg='Error: DFS Group does not exist.') + if not self.nickname and not self.pseudo_nickname and not self.pseudo_priority and not self.priority_id\ + and not self.ip_address and not self.vpn_instance_name: + self.delete_dfs_group() + else: + self.updates_cmd.append("dfs-group 1") + self.delete_dfs_group_attribute() + self.delete_dfs_group_nick() + if "undo dfs-group 1" in self.updates_cmd: + self.updates_cmd = ["undo dfs-group 1"] + + if self.eth_trunk_id and not self.peer_link_id: + self.module.fail_json( + msg='Error: eth_trunk_id and peer_link_id must be config at the same time.') + if self.peer_link_id and not self.eth_trunk_id: + self.module.fail_json( + msg='Error: eth_trunk_id and peer_link_id must be config at the same time.') + + if self.eth_trunk_id and self.peer_link_id: + if self.state == "present": + self.modify_peer_link() + else: + if self.peer_link_info: + self.delete_peer_link() + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + dfs_group_id=dict(type='str'), + nickname=dict(type='str'), + pseudo_nickname=dict(type='str'), + pseudo_priority=dict(type='str'), + ip_address=dict(type='str'), + vpn_instance_name=dict(type='str'), + priority_id=dict(type='str'), + eth_trunk_id=dict(type='str'), + peer_link_id=dict(type='str'), + state=dict(type='str', default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = MlagConfig(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_mlag_interface.py b/plugins/modules/network/cloudengine/ce_mlag_interface.py new file mode 100644 index 0000000000..b5ec70554e --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_mlag_interface.py @@ -0,0 +1,1042 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_mlag_interface +short_description: Manages MLAG interfaces on HUAWEI CloudEngine switches. +description: + - Manages MLAG interface attributes on HUAWEI CloudEngine switches. +author: + - Li Yanfeng (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + eth_trunk_id: + description: + - Name of the local M-LAG interface. The value is ranging from 0 to 511. + dfs_group_id: + description: + - ID of a DFS group.The value is 1. + default: present + mlag_id: + description: + - ID of the M-LAG. The value is an integer that ranges from 1 to 2048. + mlag_system_id: + description: + - M-LAG global LACP system MAC address. The value is a string of 0 to 255 characters. The default value + is the MAC address of the Ethernet port of MPU. + mlag_priority_id: + description: + - M-LAG global LACP system priority. The value is an integer ranging from 0 to 65535. + The default value is 32768. + interface: + description: + - Name of the interface that enters the Error-Down state when the peer-link fails. + The value is a string of 1 to 63 characters. + mlag_error_down: + description: + - Configure the interface on the slave device to enter the Error-Down state. + choices: ['enable','disable'] + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] + +''' + +EXAMPLES = ''' +- name: mlag interface module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Set interface mlag error down + ce_mlag_interface: + interface: 10GE2/0/1 + mlag_error_down: enable + provider: "{{ cli }}" + - name: Create mlag + ce_mlag_interface: + eth_trunk_id: 1 + dfs_group_id: 1 + mlag_id: 4 + provider: "{{ cli }}" + - name: Set mlag global attribute + ce_mlag_interface: + mlag_system_id: 0020-1409-0407 + mlag_priority_id: 5 + provider: "{{ cli }}" + - name: Set mlag interface attribute + ce_mlag_interface: + eth_trunk_id: 1 + mlag_system_id: 0020-1409-0400 + mlag_priority_id: 3 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { "interface": "eth-trunk1", + "mlag_error_down": "disable", + "state": "present" + } +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: { "mlagErrorDownInfos": [ + { + "dfsgroupId": "1", + "portName": "Eth-Trunk1" + } + ] + } +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {} +updates: + description: command sent to the device + returned: always + type: list + sample: { "interface eth-trunk1", + "undo m-lag unpaired-port suspend"} +''' + +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_MLAG_INFO = """ + + + + + %s + + + + +""" + +CE_NC_CREATE_MLAG_INFO = """ + + + + + %s + %s + %s + + + + +""" + +CE_NC_DELETE_MLAG_INFO = """ + + + + + %s + %s + + + + +""" + +CE_NC_GET_LACP_MLAG_INFO = """ + + + + + %s + + + + + + + + +""" + +CE_NC_SET_LACP_MLAG_INFO_HEAD = """ + + + + + %s + +""" + +CE_NC_SET_LACP_MLAG_INFO_TAIL = """ + + + + + +""" + +CE_NC_GET_GLOBAL_LACP_MLAG_INFO = """ + + + + + + + + + + +""" + +CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD = """ + + + + +""" + +CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL = """ + + + + +""" + +CE_NC_GET_MLAG_ERROR_DOWN_INFO = """ + + + + + + + + + + + + +""" + +CE_NC_CREATE_MLAG_ERROR_DOWN_INFO = """ + + + + + 1 + %s + + + + +""" + +CE_NC_DELETE_MLAG_ERROR_DOWN_INFO = """ + + + + + 1 + %s + + + + + +""" + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class MlagInterface(object): + """ + Manages Manages MLAG interface information. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.eth_trunk_id = self.module.params['eth_trunk_id'] + self.dfs_group_id = self.module.params['dfs_group_id'] + self.mlag_id = self.module.params['mlag_id'] + self.mlag_system_id = self.module.params['mlag_system_id'] + self.mlag_priority_id = self.module.params['mlag_priority_id'] + self.interface = self.module.params['interface'] + self.mlag_error_down = self.module.params['mlag_error_down'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + # mlag info + self.commands = list() + self.mlag_info = None + self.mlag_global_info = None + self.mlag_error_down_info = None + self.mlag_trunk_attribute_info = None + + def init_module(self): + """ init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) # show updates result + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def get_mlag_info(self): + """ get mlag info.""" + + mlag_info = dict() + conf_str = CE_NC_GET_MLAG_INFO % ("Eth-Trunk%s" % self.eth_trunk_id) + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return mlag_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + mlag_info["mlagInfos"] = list() + root = ElementTree.fromstring(xml_str) + dfs_mlag_infos = root.findall( + "./mlag/mlagInstances/mlagInstance") + + if dfs_mlag_infos: + for dfs_mlag_info in dfs_mlag_infos: + mlag_dict = dict() + for ele in dfs_mlag_info: + if ele.tag in ["dfsgroupId", "mlagId", "localMlagPort"]: + mlag_dict[ele.tag] = ele.text + mlag_info["mlagInfos"].append(mlag_dict) + return mlag_info + + def get_mlag_global_info(self): + """ get mlag global info.""" + + mlag_global_info = dict() + conf_str = CE_NC_GET_GLOBAL_LACP_MLAG_INFO + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return mlag_global_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + global_info = root.findall( + "./ifmtrunk/lacpSysInfo/lacpMlagGlobal") + + if global_info: + for tmp in global_info: + for site in tmp: + if site.tag in ["lacpMlagSysId", "lacpMlagPriority"]: + mlag_global_info[site.tag] = site.text + return mlag_global_info + + def get_mlag_trunk_attribute_info(self): + """ get mlag global info.""" + + mlag_trunk_attribute_info = dict() + eth_trunk = "Eth-Trunk" + eth_trunk += self.eth_trunk_id + conf_str = CE_NC_GET_LACP_MLAG_INFO % eth_trunk + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return mlag_trunk_attribute_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + global_info = root.findall( + "./ifmtrunk/TrunkIfs/TrunkIf/lacpMlagIf") + + if global_info: + for tmp in global_info: + for site in tmp: + if site.tag in ["lacpMlagSysId", "lacpMlagPriority"]: + mlag_trunk_attribute_info[site.tag] = site.text + return mlag_trunk_attribute_info + + def get_mlag_error_down_info(self): + """ get error down info.""" + + mlag_error_down_info = dict() + conf_str = CE_NC_GET_MLAG_ERROR_DOWN_INFO + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return mlag_error_down_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + mlag_error_down_info["mlagErrorDownInfos"] = list() + root = ElementTree.fromstring(xml_str) + mlag_error_infos = root.findall( + "./mlag/errordowns/errordown") + + if mlag_error_infos: + for mlag_error_info in mlag_error_infos: + mlag_error_dict = dict() + for ele in mlag_error_info: + if ele.tag in ["dfsgroupId", "portName"]: + mlag_error_dict[ele.tag] = ele.text + mlag_error_down_info[ + "mlagErrorDownInfos"].append(mlag_error_dict) + return mlag_error_down_info + + def check_macaddr(self): + """check mac-address whether valid""" + + valid_char = '0123456789abcdef-' + mac = self.mlag_system_id + + if len(mac) > 16: + return False + + mac_list = re.findall(r'([0-9a-fA-F]+)', mac) + if len(mac_list) != 3: + return False + + if mac.count('-') != 2: + return False + + for _, value in enumerate(mac, start=0): + if value.lower() not in valid_char: + return False + if all((int(mac_list[0], base=16) == 0, int(mac_list[1], base=16) == 0, int(mac_list[2], base=16) == 0)): + return False + a = "000" + mac_list[0] + b = "000" + mac_list[1] + c = "000" + mac_list[2] + self.mlag_system_id = "-".join([a[-4:], b[-4:], c[-4:]]) + return True + + def check_params(self): + """Check all input params""" + + # eth_trunk_id check + if self.eth_trunk_id: + if not self.eth_trunk_id.isdigit(): + self.module.fail_json( + msg='Error: The value of eth_trunk_id is an integer.') + if int(self.eth_trunk_id) < 0 or int(self.eth_trunk_id) > 511: + self.module.fail_json( + msg='Error: The value of eth_trunk_id is not in the range from 0 to 511.') + + # dfs_group_id check + if self.dfs_group_id: + if self.dfs_group_id != "1": + self.module.fail_json( + msg='Error: The value of dfs_group_id must be 1.') + + # mlag_id check + if self.mlag_id: + if not self.mlag_id.isdigit(): + self.module.fail_json( + msg='Error: The value of mlag_id is an integer.') + if int(self.mlag_id) < 1 or int(self.mlag_id) > 2048: + self.module.fail_json( + msg='Error: The value of mlag_id is not in the range from 1 to 2048.') + + # mlag_system_id check + if self.mlag_system_id: + if not self.check_macaddr(): + self.module.fail_json( + msg="Error: mlag_system_id has invalid value %s." % self.mlag_system_id) + + # mlag_priority_id check + if self.mlag_priority_id: + if not self.mlag_priority_id.isdigit(): + self.module.fail_json( + msg='Error: The value of mlag_priority_id is an integer.') + if int(self.mlag_priority_id) < 0 or int(self.mlag_priority_id) > 254: + self.module.fail_json( + msg='Error: The value of mlag_priority_id is not in the range from 0 to 254.') + + # interface check + if self.interface: + intf_type = get_interface_type(self.interface) + if not intf_type: + self.module.fail_json( + msg='Error: Interface name of %s ' + 'is error.' % self.interface) + + def is_mlag_info_change(self): + """whether mlag info change""" + + if not self.mlag_info: + return True + + eth_trunk = "Eth-Trunk" + eth_trunk += self.eth_trunk_id + for info in self.mlag_info["mlagInfos"]: + if info["mlagId"] == self.mlag_id and info["localMlagPort"] == eth_trunk: + return False + return True + + def is_mlag_info_exist(self): + """whether mlag info exist""" + + if not self.mlag_info: + return False + + eth_trunk = "Eth-Trunk" + eth_trunk += self.eth_trunk_id + + for info in self.mlag_info["mlagInfos"]: + if info["localMlagPort"] == eth_trunk: + return True + return False + + def is_mlag_error_down_info_change(self): + """whether mlag error down info change""" + + if not self.mlag_error_down_info: + return True + + for info in self.mlag_error_down_info["mlagErrorDownInfos"]: + if info["portName"].upper() == self.interface.upper(): + return False + return True + + def is_mlag_error_down_info_exist(self): + """whether mlag error down info exist""" + + if not self.mlag_error_down_info: + return False + + for info in self.mlag_error_down_info["mlagErrorDownInfos"]: + if info["portName"].upper() == self.interface.upper(): + return True + return False + + def is_mlag_interface_info_change(self): + """whether mlag interface attribute info change""" + + if not self.mlag_trunk_attribute_info: + return True + + if self.mlag_system_id: + if self.mlag_trunk_attribute_info["lacpMlagSysId"] != self.mlag_system_id: + return True + if self.mlag_priority_id: + if self.mlag_trunk_attribute_info["lacpMlagPriority"] != self.mlag_priority_id: + return True + return False + + def is_mlag_interface_info_exist(self): + """whether mlag interface attribute info exist""" + + if not self.mlag_trunk_attribute_info: + return False + + if self.mlag_system_id: + if self.mlag_priority_id: + if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id \ + and self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id: + return True + else: + if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id: + return True + + if self.mlag_priority_id: + if self.mlag_system_id: + if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id \ + and self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id: + return True + else: + if self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id: + return True + + return False + + def is_mlag_global_info_change(self): + """whether mlag global attribute info change""" + + if not self.mlag_global_info: + return True + + if self.mlag_system_id: + if self.mlag_global_info["lacpMlagSysId"] != self.mlag_system_id: + return True + if self.mlag_priority_id: + if self.mlag_global_info["lacpMlagPriority"] != self.mlag_priority_id: + return True + return False + + def is_mlag_global_info_exist(self): + """whether mlag global attribute info exist""" + + if not self.mlag_global_info: + return False + + if self.mlag_system_id: + if self.mlag_priority_id: + if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id \ + and self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id: + return True + else: + if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id: + return True + + if self.mlag_priority_id: + if self.mlag_system_id: + if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id \ + and self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id: + return True + else: + if self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id: + return True + + return False + + def create_mlag(self): + """create mlag info""" + + if self.is_mlag_info_change(): + mlag_port = "Eth-Trunk" + mlag_port += self.eth_trunk_id + conf_str = CE_NC_CREATE_MLAG_INFO % ( + self.dfs_group_id, self.mlag_id, mlag_port) + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: create mlag info failed.') + + self.updates_cmd.append("interface %s" % mlag_port) + self.updates_cmd.append("dfs-group %s m-lag %s" % + (self.dfs_group_id, self.mlag_id)) + self.changed = True + + def delete_mlag(self): + """delete mlag info""" + + if self.is_mlag_info_exist(): + mlag_port = "Eth-Trunk" + mlag_port += self.eth_trunk_id + conf_str = CE_NC_DELETE_MLAG_INFO % ( + self.dfs_group_id, mlag_port) + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: delete mlag info failed.') + + self.updates_cmd.append("interface %s" % mlag_port) + self.updates_cmd.append( + "undo dfs-group %s m-lag %s" % (self.dfs_group_id, self.mlag_id)) + self.changed = True + + def create_mlag_error_down(self): + """create mlag error down info""" + + if self.is_mlag_error_down_info_change(): + conf_str = CE_NC_CREATE_MLAG_ERROR_DOWN_INFO % self.interface + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: create mlag error down info failed.') + + self.updates_cmd.append("interface %s" % self.interface) + self.updates_cmd.append("m-lag unpaired-port suspend") + self.changed = True + + def delete_mlag_error_down(self): + """delete mlag error down info""" + + if self.is_mlag_error_down_info_exist(): + + conf_str = CE_NC_DELETE_MLAG_ERROR_DOWN_INFO % self.interface + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: delete mlag error down info failed.') + + self.updates_cmd.append("interface %s" % self.interface) + self.updates_cmd.append("undo m-lag unpaired-port suspend") + self.changed = True + + def set_mlag_interface(self): + """set mlag interface attribute info""" + + if self.is_mlag_interface_info_change(): + mlag_port = "Eth-Trunk" + mlag_port += self.eth_trunk_id + conf_str = CE_NC_SET_LACP_MLAG_INFO_HEAD % mlag_port + if self.mlag_priority_id: + conf_str += "%s" % self.mlag_priority_id + if self.mlag_system_id: + conf_str += "%s" % self.mlag_system_id + conf_str += CE_NC_SET_LACP_MLAG_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set mlag interface attribute info failed.') + + self.updates_cmd.append("interface %s" % mlag_port) + if self.mlag_priority_id: + self.updates_cmd.append( + "lacp m-lag priority %s" % self.mlag_priority_id) + + if self.mlag_system_id: + self.updates_cmd.append( + "lacp m-lag system-id %s" % self.mlag_system_id) + self.changed = True + + def delete_mlag_interface(self): + """delete mlag interface attribute info""" + + if self.is_mlag_interface_info_exist(): + mlag_port = "Eth-Trunk" + mlag_port += self.eth_trunk_id + conf_str = CE_NC_SET_LACP_MLAG_INFO_HEAD % mlag_port + cmd = "interface %s" % mlag_port + self.cli_add_command(cmd) + + if self.mlag_priority_id: + cmd = "lacp m-lag priority %s" % self.mlag_priority_id + conf_str += "" + self.cli_add_command(cmd, True) + + if self.mlag_system_id: + cmd = "lacp m-lag system-id %s" % self.mlag_system_id + conf_str += "" + self.cli_add_command(cmd, True) + + if self.commands: + conf_str += CE_NC_SET_LACP_MLAG_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set mlag interface atrribute info failed.') + + self.changed = True + + def set_mlag_global(self): + """set mlag global attribute info""" + + if self.is_mlag_global_info_change(): + conf_str = CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD + if self.mlag_priority_id: + conf_str += "%s" % self.mlag_priority_id + if self.mlag_system_id: + conf_str += "%s" % self.mlag_system_id + conf_str += CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set mlag interface attribute info failed.') + + if self.mlag_priority_id: + self.updates_cmd.append( + "lacp m-lag priority %s" % self.mlag_priority_id) + + if self.mlag_system_id: + self.updates_cmd.append( + "lacp m-lag system-id %s" % self.mlag_system_id) + self.changed = True + + def delete_mlag_global(self): + """delete mlag global attribute info""" + + xml_str = '' + if self.is_mlag_global_info_exist(): + if self.mlag_priority_id: + cmd = "lacp m-lag priority %s" % self.mlag_priority_id + xml_str += '' + self.cli_add_command(cmd, True) + + if self.mlag_system_id: + cmd = "lacp m-lag system-id %s" % self.mlag_system_id + xml_str += '' + self.cli_add_command(cmd, True) + + if xml_str != '': + conf_str = CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD + xml_str + CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set mlag interface atrribute info failed.') + self.changed = True + + def get_proposed(self): + """get proposed info""" + + if self.eth_trunk_id: + self.proposed["eth_trunk_id"] = self.eth_trunk_id + if self.dfs_group_id: + self.proposed["dfs_group_id"] = self.dfs_group_id + if self.mlag_id: + self.proposed["mlag_id"] = self.mlag_id + if self.mlag_system_id: + self.proposed["mlag_system_id"] = self.mlag_system_id + if self.mlag_priority_id: + self.proposed["mlag_priority_id"] = self.mlag_priority_id + if self.interface: + self.proposed["interface"] = self.interface + if self.mlag_error_down: + self.proposed["mlag_error_down"] = self.mlag_error_down + if self.state: + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + self.mlag_info = self.get_mlag_info() + self.mlag_global_info = self.get_mlag_global_info() + self.mlag_error_down_info = self.get_mlag_error_down_info() + + if self.eth_trunk_id or self.dfs_group_id or self.mlag_id: + if not self.mlag_system_id and not self.mlag_priority_id: + if self.mlag_info: + self.existing["mlagInfos"] = self.mlag_info["mlagInfos"] + + if self.mlag_system_id or self.mlag_priority_id: + if self.eth_trunk_id: + if self.mlag_trunk_attribute_info: + if self.mlag_system_id: + self.existing["lacpMlagSysId"] = self.mlag_trunk_attribute_info[ + "lacpMlagSysId"] + if self.mlag_priority_id: + self.existing["lacpMlagPriority"] = self.mlag_trunk_attribute_info[ + "lacpMlagPriority"] + else: + if self.mlag_global_info: + if self.mlag_system_id: + self.existing["lacpMlagSysId"] = self.mlag_global_info[ + "lacpMlagSysId"] + if self.mlag_priority_id: + self.existing["lacpMlagPriority"] = self.mlag_global_info[ + "lacpMlagPriority"] + + if self.interface or self.mlag_error_down: + if self.mlag_error_down_info: + self.existing["mlagErrorDownInfos"] = self.mlag_error_down_info[ + "mlagErrorDownInfos"] + + def get_end_state(self): + """get end state info""" + + if self.eth_trunk_id or self.dfs_group_id or self.mlag_id: + self.mlag_info = self.get_mlag_info() + if not self.mlag_system_id and not self.mlag_priority_id: + if self.mlag_info: + self.end_state["mlagInfos"] = self.mlag_info["mlagInfos"] + + if self.mlag_system_id or self.mlag_priority_id: + if self.eth_trunk_id: + self.mlag_trunk_attribute_info = self.get_mlag_trunk_attribute_info() + if self.mlag_trunk_attribute_info: + if self.mlag_system_id: + self.end_state["lacpMlagSysId"] = self.mlag_trunk_attribute_info[ + "lacpMlagSysId"] + if self.mlag_priority_id: + self.end_state["lacpMlagPriority"] = self.mlag_trunk_attribute_info[ + "lacpMlagPriority"] + else: + self.mlag_global_info = self.get_mlag_global_info() + if self.mlag_global_info: + if self.mlag_system_id: + self.end_state["lacpMlagSysId"] = self.mlag_global_info[ + "lacpMlagSysId"] + if self.mlag_priority_id: + self.end_state["lacpMlagPriority"] = self.mlag_global_info[ + "lacpMlagPriority"] + + if self.interface or self.mlag_error_down: + self.mlag_error_down_info = self.get_mlag_error_down_info() + if self.mlag_error_down_info: + self.end_state["mlagErrorDownInfos"] = self.mlag_error_down_info[ + "mlagErrorDownInfos"] + + def work(self): + """worker""" + + self.check_params() + self.get_proposed() + self.get_existing() + + if self.eth_trunk_id or self.dfs_group_id or self.mlag_id: + self.mlag_info = self.get_mlag_info() + if self.eth_trunk_id and self.dfs_group_id and self.mlag_id: + if self.state == "present": + self.create_mlag() + else: + self.delete_mlag() + else: + if not self.mlag_system_id and not self.mlag_priority_id: + self.module.fail_json( + msg='Error: eth_trunk_id, dfs_group_id, mlag_id must be config at the same time.') + + if self.mlag_system_id or self.mlag_priority_id: + + if self.eth_trunk_id: + self.mlag_trunk_attribute_info = self.get_mlag_trunk_attribute_info() + if self.mlag_system_id or self.mlag_priority_id: + if self.state == "present": + self.set_mlag_interface() + else: + self.delete_mlag_interface() + else: + self.mlag_global_info = self.get_mlag_global_info() + if self.mlag_system_id or self.mlag_priority_id: + if self.state == "present": + self.set_mlag_global() + else: + self.delete_mlag_global() + + if self.interface or self.mlag_error_down: + self.mlag_error_down_info = self.get_mlag_error_down_info() + if self.interface and self.mlag_error_down: + if self.mlag_error_down == "enable": + self.create_mlag_error_down() + else: + self.delete_mlag_error_down() + else: + self.module.fail_json( + msg='Error: interface, mlag_error_down must be config at the same time.') + + self.get_end_state() + if self.existing == self.end_state: + self.changed = False + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + eth_trunk_id=dict(type='str'), + dfs_group_id=dict(type='str'), + mlag_id=dict(type='str'), + mlag_system_id=dict(type='str'), + mlag_priority_id=dict(type='str'), + interface=dict(type='str'), + mlag_error_down=dict(type='str', choices=['enable', 'disable']), + state=dict(type='str', default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = MlagInterface(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_mtu.py b/plugins/modules/network/cloudengine/ce_mtu.py new file mode 100644 index 0000000000..54d31a664c --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_mtu.py @@ -0,0 +1,585 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_mtu +short_description: Manages MTU settings on HUAWEI CloudEngine switches. +description: + - Manages MTU settings on HUAWEI CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - Either C(sysmtu) param is required or C(interface) AND C(mtu) params are req'd. + - C(state=absent) unconfigures a given MTU if that value is currently present. + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - Full name of interface, i.e. 40GE1/0/22. + mtu: + description: + - MTU for a specific interface. + The value is an integer ranging from 46 to 9600, in bytes. + jumbo_max: + description: + - Maximum frame size. The default value is 9216. + The value is an integer and expressed in bytes. The value range is 1536 to 12224 for the CE12800 + and 1536 to 12288 for ToR switches. + jumbo_min: + description: + - Non-jumbo frame size threshold. The default value is 1518. + The value is an integer that ranges from 1518 to jumbo_max, in bytes. + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: Mtu test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config jumboframe on 40GE1/0/22" + ce_mtu: + interface: 40GE1/0/22 + jumbo_max: 9000 + jumbo_min: 8000 + provider: "{{ cli }}" + + - name: "Config mtu on 40GE1/0/22 (routed interface)" + ce_mtu: + interface: 40GE1/0/22 + mtu: 1600 + provider: "{{ cli }}" + + - name: "Config mtu on 40GE1/0/23 (switched interface)" + ce_mtu: + interface: 40GE1/0/22 + mtu: 9216 + provider: "{{ cli }}" + + - name: "Config mtu and jumboframe on 40GE1/0/22 (routed interface)" + ce_mtu: + interface: 40GE1/0/22 + mtu: 1601 + jumbo_max: 9001 + jumbo_min: 8001 + provider: "{{ cli }}" + + - name: "Unconfigure mtu and jumboframe on a given interface" + ce_mtu: + state: absent + interface: 40GE1/0/22 + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"mtu": "1700", "jumbo_max": "9000", jumbo_min: "8000"} +existing: + description: k/v pairs of existing mtu/sysmtu on the interface/system + returned: always + type: dict + sample: {"mtu": "1600", "jumbo_max": "9216", "jumbo_min": "1518"} +end_state: + description: k/v pairs of mtu/sysmtu values after module execution + returned: always + type: dict + sample: {"mtu": "1700", "jumbo_max": "9000", jumbo_min: "8000"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface 40GE1/0/23", "mtu 1700", "jumboframe enable 9000 8000"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +import copy +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, load_config +from ansible.module_utils.connection import exec_command + + +def is_interface_support_setjumboframe(interface): + """is interface support set jumboframe""" + + if interface is None: + return False + support_flag = False + if interface.upper().startswith('GE'): + support_flag = True + elif interface.upper().startswith('10GE'): + support_flag = True + elif interface.upper().startswith('25GE'): + support_flag = True + elif interface.upper().startswith('4X10GE'): + support_flag = True + elif interface.upper().startswith('40GE'): + support_flag = True + elif interface.upper().startswith('100GE'): + support_flag = True + else: + support_flag = False + return support_flag + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-Port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class Mtu(object): + """set mtu""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # interface info + self.interface = self.module.params['interface'] + self.mtu = self.module.params['mtu'] + self.state = self.module.params['state'] + self.jbf_max = self.module.params['jumbo_max'] or None + self.jbf_min = self.module.params['jumbo_min'] or None + self.jbf_config = list() + self.jbf_cli = "" + self.commands = list() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + self.intf_info = dict() # one interface info + self.intf_type = None # loopback tunnel ... + + def init_module(self): + """ init_module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_interface_dict(self, ifname): + """ get one interface attributes dict.""" + intf_info = dict() + + flags = list() + exp = r"| ignore-case section include ^#\s+interface %s\s+" % ifname.replace(" ", "") + flags.append(exp) + output = self.get_config(flags) + output_list = output.split('\n') + if output_list is None: + return intf_info + + mtu = None + for config in output_list: + config = config.strip() + if config.startswith('mtu'): + mtu = re.findall(r'.*mtu\s*([0-9]*)', output)[0] + + intf_info = dict(ifName=ifname, + ifMtu=mtu) + + return intf_info + + def prase_jumboframe_para(self, config_str): + """prase_jumboframe_para""" + + interface_cli = "interface %s" % (self.interface.replace(" ", "").lower()) + if config_str.find(interface_cli) == -1: + self.module.fail_json(msg='Error: Interface does not exist.') + + try: + npos1 = config_str.index('jumboframe enable') + except ValueError: + # return default vale + return [9216, 1518] + try: + npos2 = config_str.index('\n', npos1) + config_str_tmp = config_str[npos1:npos2] + except ValueError: + config_str_tmp = config_str[npos1:] + + return re.findall(r'([0-9]+)', config_str_tmp) + + def cli_load_config(self): + """load config by cli""" + + if not self.module.check_mode: + if len(self.commands) > 1: + load_config(self.module, self.commands) + self.changed = True + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + + def get_jumboframe_config(self): + """ get_jumboframe_config""" + + flags = list() + exp = r"| ignore-case section include ^#\s+interface %s\s+" % self.interface.replace(" ", "") + flags.append(exp) + output = self.get_config(flags) + output = output.replace('*', '').lower() + + return self.prase_jumboframe_para(output) + + def set_jumboframe(self): + """ set_jumboframe""" + + if self.state == "present": + if not self.jbf_max and not self.jbf_min: + return + + jbf_value = self.get_jumboframe_config() + self.jbf_config = copy.deepcopy(jbf_value) + if len(jbf_value) == 1: + jbf_value.append("1518") + self.jbf_config.append("1518") + if not self.jbf_max: + return + + if (len(jbf_value) > 2) or (len(jbf_value) == 0): + self.module.fail_json( + msg='Error: Get jubmoframe config value num error.') + if self.jbf_min is None: + if jbf_value[0] == self.jbf_max: + return + else: + if (jbf_value[0] == self.jbf_max) \ + and (jbf_value[1] == self.jbf_min): + return + if jbf_value[0] != self.jbf_max: + jbf_value[0] = self.jbf_max + if (jbf_value[1] != self.jbf_min) and (self.jbf_min is not None): + jbf_value[1] = self.jbf_min + else: + jbf_value.pop(1) + else: + jbf_value = self.get_jumboframe_config() + self.jbf_config = copy.deepcopy(jbf_value) + if (jbf_value == [9216, 1518]): + return + jbf_value = [9216, 1518] + + if len(jbf_value) == 2: + self.jbf_cli = "jumboframe enable %s %s" % ( + jbf_value[0], jbf_value[1]) + else: + self.jbf_cli = "jumboframe enable %s" % (jbf_value[0]) + self.cli_add_command(self.jbf_cli) + + if self.state == "present": + if self.jbf_min: + self.updates_cmd.append( + "jumboframe enable %s %s" % (self.jbf_max, self.jbf_min)) + else: + self.updates_cmd.append("jumboframe enable %s" % (self.jbf_max)) + else: + self.updates_cmd.append("undo jumboframe enable") + + return + + def merge_interface(self, ifname, mtu): + """ Merge interface mtu.""" + + xmlstr = '' + change = False + + command = "interface %s" % ifname + self.cli_add_command(command) + + if self.state == "present": + if mtu and self.intf_info["ifMtu"] != mtu: + command = "mtu %s" % mtu + self.cli_add_command(command) + self.updates_cmd.append("mtu %s" % mtu) + change = True + else: + if self.intf_info["ifMtu"] != '1500' and self.intf_info["ifMtu"]: + command = "mtu 1500" + self.cli_add_command(command) + self.updates_cmd.append("undo mtu") + change = True + + return + + def check_params(self): + """Check all input params""" + + # interface type check + if self.interface: + self.intf_type = get_interface_type(self.interface) + if not self.intf_type: + self.module.fail_json( + msg='Error: Interface name of %s ' + 'is error.' % self.interface) + + if not self.intf_type: + self.module.fail_json( + msg='Error: Interface %s is error.') + + # mtu check mtu + if self.mtu: + if not self.mtu.isdigit(): + self.module.fail_json(msg='Error: Mtu is invalid.') + # check mtu range + if int(self.mtu) < 46 or int(self.mtu) > 9600: + self.module.fail_json( + msg='Error: Mtu is not in the range from 46 to 9600.') + # get interface info + self.intf_info = self.get_interface_dict(self.interface) + if not self.intf_info: + self.module.fail_json(msg='Error: interface does not exist.') + + # check interface can set jumbo frame + if self.state == 'present': + if self.jbf_max: + if not is_interface_support_setjumboframe(self.interface): + self.module.fail_json( + msg='Error: Interface %s does not support jumboframe set.' % self.interface) + if not self.jbf_max.isdigit(): + self.module.fail_json( + msg='Error: Max jumboframe is not digit.') + if (int(self.jbf_max) > 12288) or (int(self.jbf_max) < 1536): + self.module.fail_json( + msg='Error: Max jumboframe is between 1536 to 12288.') + + if self.jbf_min: + if not self.jbf_min.isdigit(): + self.module.fail_json( + msg='Error: Min jumboframe is not digit.') + if not self.jbf_max: + self.module.fail_json( + msg='Error: please specify max jumboframe value.') + if (int(self.jbf_min) > int(self.jbf_max)) or (int(self.jbf_min) < 1518): + self.module.fail_json( + msg='Error: Min jumboframe is between ' + '1518 to jumboframe max value.') + + if self.jbf_min is not None: + if self.jbf_max is None: + self.module.fail_json( + msg='Error: please input MAX jumboframe ' + 'value.') + + def get_proposed(self): + """ get_proposed""" + + self.proposed['state'] = self.state + if self.interface: + self.proposed["interface"] = self.interface + + if self.state == 'present': + if self.mtu: + self.proposed["mtu"] = self.mtu + if self.jbf_max: + if self.jbf_min: + self.proposed["jumboframe"] = "jumboframe enable %s %s" % ( + self.jbf_max, self.jbf_min) + else: + self.proposed[ + "jumboframe"] = "jumboframe enable %s %s" % (self.jbf_max, 1518) + + def get_existing(self): + """ get_existing""" + + if self.intf_info: + self.existing["interface"] = self.intf_info["ifName"] + self.existing["mtu"] = self.intf_info["ifMtu"] + + if self.intf_info: + if not self.existing["interface"]: + self.existing["interface"] = self.interface + + if len(self.jbf_config) != 2: + return + + self.existing["jumboframe"] = "jumboframe enable %s %s" % ( + self.jbf_config[0], self.jbf_config[1]) + + def get_end_state(self): + """ get_end_state""" + + if self.intf_info: + end_info = self.get_interface_dict(self.interface) + if end_info: + self.end_state["interface"] = end_info["ifName"] + self.end_state["mtu"] = end_info["ifMtu"] + if self.intf_info: + if not self.end_state["interface"]: + self.end_state["interface"] = self.interface + + if self.state == 'absent': + self.end_state["jumboframe"] = "jumboframe enable %s %s" % ( + 9216, 1518) + elif not self.jbf_max and not self.jbf_min: + if len(self.jbf_config) != 2: + return + self.end_state["jumboframe"] = "jumboframe enable %s %s" % ( + self.jbf_config[0], self.jbf_config[1]) + elif self.jbf_min: + self.end_state["jumboframe"] = "jumboframe enable %s %s" % ( + self.jbf_max, self.jbf_min) + else: + self.end_state[ + "jumboframe"] = "jumboframe enable %s %s" % (self.jbf_max, 1518) + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + self.check_params() + + self.get_proposed() + + self.merge_interface(self.interface, self.mtu) + self.set_jumboframe() + self.cli_load_config() + + self.get_existing() + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """ main""" + + argument_spec = dict( + interface=dict(required=True, type='str'), + mtu=dict(type='str'), + state=dict(choices=['absent', 'present'], + default='present', required=False), + jumbo_max=dict(type='str'), + jumbo_min=dict(type='str'), + ) + argument_spec.update(ce_argument_spec) + interface = Mtu(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_multicast_global.py b/plugins/modules/network/cloudengine/ce_multicast_global.py new file mode 100644 index 0000000000..c5cafce220 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_multicast_global.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_multicast_global +author: xuxiaowei0512 (@xuxiaowei0512) +short_description: Manages multicast global configuration on HUAWEI CloudEngine switches. +description: + - Manages multicast global on HUAWEI CloudEngine switches. +notes: + - If no vrf is supplied, vrf is set to default. + - If I(state=absent), the route will be removed, regardless of the non-required parameters. + - This module requires the netconf system service be enabled on the remote device being managed. + - This module works with connection C(netconf). +options: + aftype: + description: + - Destination ip address family type of static route. + required: true + type: str + choices: ['v4','v6'] + vrf: + description: + - VPN instance of destination ip address. + type: str + state: + description: + - Specify desired state of the resource. + type: str + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +--- + - name: multicast routing-enable + ce_multicast_global: + aftype: v4 + state: absent + provider: "{{ cli }}" + - name: multicast routing-enable + ce_multicast_global: + aftype: v4 + state: present + provider: "{{ cli }}" + - name: multicast routing-enable + ce_multicast_global: + aftype: v4 + vrf: vrf1 + provider: "{{ cli }}" + +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"addressFamily": "ipv4unicast", "state": "present", "vrfName": "_public_"} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: {"addressFamily": "ipv4unicast", "state": "present", "vrfName": "_public_"} +updates: + description: command list sent to the device + returned: always + type: list + sample: ["multicast routing-enable"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config + +CE_NC_GET_MULTICAST_GLOBAL = """ + + + + + %s + %s + + + + +""" +CE_NC_MERGE_MULTICAST_GLOBAL = """ + + + + %s + %s + + + +""" +CE_NC_DELETE_MULTICAST_GLOBAL = """ + + + + %s + %s + + + +""" + + +def build_config_xml(xmlstr): + """build config xml""" + + return ' ' + xmlstr + ' ' + + +class MulticastGlobal(object): + """multicast global module""" + + def __init__(self, argument_spec): + """multicast global info""" + self.spec = argument_spec + self.module = None + self._initmodule_() + + self.aftype = self.module.params['aftype'] + self.state = self.module.params['state'] + if self.aftype == "v4": + self.version = "ipv4unicast" + else: + self.version = "ipv6unicast" + # vpn instance info + self.vrf = self.module.params['vrf'] + if self.vrf is None: + self.vrf = "_public_" + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + self.multicast_global_info = dict() + + def _initmodule_(self): + """init module""" + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=False) + + def _checkresponse_(self, xml_str, xml_name): + """check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def set_change_state(self): + """set change state""" + state = self.state + change = False + self.get_multicast_global() + # new or edit + if state == 'present': + if not self.multicast_global_info.get('multicast_global'): + # i.e. self.multicast_global_info['multicast_global'] has not value + change = True + else: + # delete + if self.multicast_global_info.get('multicast_global'): + # i.e. self.multicast_global_info['multicast_global'] has value + change = True + self.changed = change + + def get_multicast_global(self): + """get one data""" + self.multicast_global_info["multicast_global"] = list() + getxmlstr = CE_NC_GET_MULTICAST_GLOBAL % ( + self.version, self.vrf) + xml_str = get_nc_config(self.module, getxmlstr) + if 'data/' in xml_str: + return + xml_str = xml_str.replace('\r', '').replace('\n', ''). \ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', ""). \ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + mcast_enable = root.findall( + "mcastbase/mcastAfsEnables/mcastAfsEnable") + if mcast_enable: + # i.e. mcast_enable = [{vrfName:11,addressFamily:'xx'},{vrfName:22,addressFamily:'xx'}...] + for mcast_enable_key in mcast_enable: + # i.e. mcast_enable_key = {vrfName:11,addressFamily:'xx'} + mcast_info = dict() + for ele in mcast_enable_key: + if ele.tag in ["vrfName", "addressFamily"]: + mcast_info[ele.tag] = ele.text + self.multicast_global_info['multicast_global'].append(mcast_info) + + def get_existing(self): + """get existing information""" + self.set_change_state() + self.existing["multicast_global"] = self.multicast_global_info["multicast_global"] + + def get_proposed(self): + """get proposed information""" + self.proposed['addressFamily'] = self.version + self.proposed['state'] = self.state + self.proposed['vrfName'] = self.vrf + + def set_multicast_global(self): + """set multicast global""" + if not self.changed: + return + version = self.version + state = self.state + if state == "present": + configxmlstr = CE_NC_MERGE_MULTICAST_GLOBAL % (self.vrf, version) + else: + configxmlstr = CE_NC_DELETE_MULTICAST_GLOBAL % (self.vrf, version) + + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self._checkresponse_(recv_xml, "SET_MULTICAST_GLOBAL") + + def set_update_cmd(self): + """set update command""" + if not self.changed: + return + if self.state == "present": + self.updates_cmd.append('multicast routing-enable') + else: + self.updates_cmd.append('undo multicast routing-enable') + + def get_end_state(self): + """get end state information""" + self.get_multicast_global() + self.end_state["multicast_global"] = self.multicast_global_info["multicast_global"] + + def work(self): + """worker""" + self.get_existing() + self.get_proposed() + self.set_multicast_global() + self.set_update_cmd() + self.get_end_state() + self.results['changed'] = self.changed + self.results['existing'] = self.existing + self.results['proposed'] = self.proposed + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + self.module.exit_json(**self.results) + + +def main(): + """main""" + + argument_spec = dict( + aftype=dict(choices=['v4', 'v6'], required=True), + vrf=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], default='present', required=False), + ) + interface = MulticastGlobal(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_multicast_igmp_enable.py b/plugins/modules/network/cloudengine/ce_multicast_igmp_enable.py new file mode 100644 index 0000000000..9a57337859 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_multicast_igmp_enable.py @@ -0,0 +1,544 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_multicast_igmp_enable +author: xuxiaowei0512 (@CloudEngine-Ansible) +short_description: Manages multicast igmp enable configuration on HUAWEI CloudEngine switches. +description: + - Manages multicast igmp on HUAWEI CloudEngine switches. +notes: + - If no vrf is supplied, vrf is set to default. + If I(state=absent), the route will be removed, regardless of the + non-required parameters. + - This module requires the netconf system service be enabled on + the remote device being managed. + - This module works with connection C(netconf). +options: + aftype: + description: + - Destination ip address family type of static route. + required: true + type: str + choices: ['v4','v6'] + features: + description: + - Distinguish between Globally Enabled IGMP or + - Enabled IGMP under vlanID. + required: true + type: str + choices: ['global','vlan'] + vlan_id: + description: + - Virtual LAN identity. + type: int + igmp: + description: + - Enable Layer 2 multicast Snooping in a VLAN. + type: bool + version: + description: + - Specifies the IGMP version that can be processed. + default: 2 + type: int + proxy: + description: + - Layer 2 multicast snooping proxy is enabled. + type: bool + state: + description: + - Specify desired state of the resource. + choices: ['present','absent'] + default: present + type: str +''' + +EXAMPLES = ''' + + - name: configure global igmp enable + ce_multicast_igmp_enable: + aftype: v4 + features: 'global' + state: present + + - name: configure global igmp disable + ce_multicast_igmp_enable: + features: 'global' + aftype: v4 + state: absent + + - name: configure vlan igmp enable + ce_multicast_igmp_enable: + features: 'vlan' + aftype: v4 + vlan_id: 1 + igmp: true + + - name: new proxy,igmp,version + ce_multicast_igmp_enable: + features: 'vlan' + aftype: v4 + vlan_id: 1 + proxy: true + igmp: true + version: 1 + + - name: modify proxy,igmp,version + ce_multicast_igmp_enable: + features: 'vlan' + aftype: v4 + vlan_id: 1 + version: 2 + + - name: delete proxy,igmp,version + ce_multicast_igmp_enable: + features: 'vlan' + aftype: v4 + vlan_id: 1 + state: absent +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"addrFamily": "ipv4unicast", "features": "vlan", "proxyEnable": "false", + "snoopingEnable": "false", "state": "absent", "version": 2, "vlanId": 1} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: {} +updates: + description: command list sent to the device + returned: always + type: list + sample: ["undo igmp snooping enable", + "undo igmp snooping version", + "undo igmp snooping proxy"] +changed: + description: check if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config + +CE_NC_GET_IGMP_GLOBAL = """ + + + + + %s + + + + +""" +CE_NC_MERGE_IGMP_SYSVIEW = """ + + + + %s + + + +""" +CE_NC_DELETE_IGMP_SYSVIEW = """ + + + + %s + + + +""" +CE_NC_GET_IGMP_VLAN_INFO = """ + + + + + + %s + %s + + + + + + + + +""" +CE_NC_MERGE_IGMP_VLANVIEW = """ + + + + + %s + %s%s%s%s + + + + +""" +CE_NC_MERGE_IGMP_VLANVIEW_SNOENABLE = """ +%s +""" +CE_NC_MERGE_IGMP_VLANVIEW_VERSION = """ +%s +""" +CE_NC_MERGE_IGMP_VLANVIEW_PROXYENABLE = """ +%s +""" +CE_NC_DELETE_IGMP_VLANVIEW = """ + + + + + %s + %s + + + + +""" + + +def get_xml(xml, value): + """operate xml""" + tempxml = xml % value + return tempxml + + +def build_config_xml(xmlstr): + """build config xml""" + + return ' ' + xmlstr + ' ' + + +class IgmpSnoop(object): + """igmp snooping module""" + + def __init__(self, argument_spec): + """igmp snooping info""" + self.spec = argument_spec + self.module = None + self._initmodule_() + + self.aftype = self.module.params['aftype'] + self.state = self.module.params['state'] + if self.aftype == "v4": + self.addr_family = "ipv4unicast" + else: + self.addr_family = "ipv6unicast" + self.features = self.module.params['features'] + self.vlan_id = self.module.params['vlan_id'] + self.igmp = str(self.module.params['igmp']).lower() + self.version = self.module.params['version'] + if self.version is None: + self.version = 2 + self.proxy = str(self.module.params['proxy']).lower() + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + self.igmp_info_data = dict() + + def _initmodule_(self): + """init module""" + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=False) + + def _checkresponse_(self, xml_str, xml_name): + """check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def _checkparams_(self): + """check all input params""" + # check vlan id + if self.features == 'vlan': + if not self.vlan_id: + self.module.fail_json(msg='Error: missing required arguments: vlan_id.') + + if self.vlan_id: + if self.vlan_id <= 0 or self.vlan_id > 4094: + self.module.fail_json( + msg='Error: Vlan id is not in the range from 1 to 4094.') + # check version + if self.version: + if self.version <= 0 or self.version > 3: + self.module.fail_json( + msg='Error: Version id is not in the range from 1 to 3.') + + def set_change_state(self): + """set change state""" + state = self.state + change = False + # vlan view igmp + if self.features == 'vlan': + self.get_igmp_vlan() + change = self.compare_data() + else: + # sys view igmp(global) + self.get_igmp_global() + # new or edit + if state == 'present': + if not self.igmp_info_data["igmp_info"]: + # igmp_info_data has not igmp_info value. + change = True + else: + # delete + if self.igmp_info_data["igmp_info"]: + # igmp_info_data has not igmp_info value. + change = True + self.changed = change + + def compare_data(self): + """compare new data and old data""" + state = self.state + change = False + # new or edit + if state == 'present': + # edit + if self.igmp_info_data["igmp_info"]: + for data in self.igmp_info_data["igmp_info"]: + if self.addr_family == data["addrFamily"] and str(self.vlan_id) == data["vlanId"]: + if self.igmp: + if self.igmp != data["snoopingEnable"]: + change = True + if self.version: + if str(self.version) != data["version"]: + change = True + if self.proxy: + if self.proxy != data["proxyEnable"]: + change = True + # new + else: + change = True + else: + # delete + if self.igmp_info_data["igmp_info"]: + change = True + return change + + def get_igmp_vlan(self): + """get igmp vlan info data""" + self.igmp_info_data["igmp_info"] = list() + getxmlstr = CE_NC_GET_IGMP_VLAN_INFO % (self.addr_family, self.vlan_id) + xml_str = get_nc_config(self.module, getxmlstr) + if 'data/' in xml_str: + return + xml_str = xml_str.replace('\r', '').replace('\n', ''). \ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', ""). \ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + igmp_enable = root.findall( + "l2mc/vlan/l2McVlanCfgs/l2McVlanCfg") + if igmp_enable: + # igmp_enable = [{addressFamily:'xx'}] + for igmp_enable_key in igmp_enable: + # igmp_enable_key = {addressFamily:'xx'} + igmp_global_info = dict() + for ele in igmp_enable_key: + if ele.tag in ["addrFamily", "vlanId", "snoopingEnable", "version", "proxyEnable"]: + igmp_global_info[ele.tag] = ele.text + self.igmp_info_data["igmp_info"].append(igmp_global_info) + + def get_igmp_global(self): + """get igmp global data""" + self.igmp_info_data["igmp_info"] = list() + getxmlstr = CE_NC_GET_IGMP_GLOBAL % ( + self.addr_family) + xml_str = get_nc_config(self.module, getxmlstr) + if 'data/' in xml_str: + return + xml_str = xml_str.replace('\r', '').replace('\n', ''). \ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', ""). \ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + igmp_enable = root.findall( + 'l2mc/l2McSnpgEnables/l2McSnpgEnable') + if igmp_enable: + # igmp_enable = [{addressFamily:'xx'}] + for igmp_enable_key in igmp_enable: + # igmp_enable_key = {addressFamily:'xx'} + igmp_global_info = dict() + for ele in igmp_enable_key: + if ele.tag in ["addrFamily"]: + igmp_global_info[ele.tag] = ele.text + self.igmp_info_data["igmp_info"].append(igmp_global_info) + + def set_vlanview_igmp(self): + """set igmp of vlanview""" + if not self.changed: + return + addr_family = self.addr_family + state = self.state + igmp_xml = """\n""" + version_xml = """\n""" + proxy_xml = """\n""" + if state == "present": + if self.igmp: + igmp_xml = get_xml(CE_NC_MERGE_IGMP_VLANVIEW_SNOENABLE, self.igmp.lower()) + if str(self.version): + version_xml = get_xml(CE_NC_MERGE_IGMP_VLANVIEW_VERSION, self.version) + if self.proxy: + proxy_xml = get_xml(CE_NC_MERGE_IGMP_VLANVIEW_PROXYENABLE, self.proxy.lower()) + configxmlstr = CE_NC_MERGE_IGMP_VLANVIEW % ( + addr_family, self.vlan_id, igmp_xml, version_xml, proxy_xml) + else: + configxmlstr = CE_NC_DELETE_IGMP_VLANVIEW % (addr_family, self.vlan_id) + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self._checkresponse_(recv_xml, "SET_VLANVIEW_IGMP") + + def set_sysview_igmp(self): + """set igmp of sysview""" + if not self.changed: + return + version = self.addr_family + state = self.state + if state == "present": + configxmlstr = CE_NC_MERGE_IGMP_SYSVIEW % (version) + else: + configxmlstr = CE_NC_DELETE_IGMP_SYSVIEW % (version) + + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self._checkresponse_(recv_xml, "SET_SYSVIEW_IGMP") + + def set_sysview_cmd(self): + """set sysview update command""" + if not self.changed: + return + if self.state == "present": + self.updates_cmd.append('igmp snooping enable') + else: + self.updates_cmd.append('undo igmp snooping enable') + + def set_vlanview_cmd(self): + """set vlanview update command""" + if not self.changed: + return + if self.state == "present": + if self.igmp: + if self.igmp.lower() == 'true': + self.updates_cmd.append('igmp snooping enable') + else: + self.updates_cmd.append('undo igmp snooping enable') + if str(self.version): + self.updates_cmd.append('igmp snooping version %s' % (self.version)) + else: + self.updates_cmd.append('undo igmp snooping version') + if self.proxy: + if self.proxy.lower() == 'true': + self.updates_cmd.append('igmp snooping proxy') + else: + self.updates_cmd.append('undo igmp snooping proxy') + + else: + self.updates_cmd.append('undo igmp snooping enable') + self.updates_cmd.append('undo igmp snooping version') + self.updates_cmd.append('undo igmp snooping proxy') + + def get_existing(self): + """get existing information""" + self.set_change_state() + self.existing["igmp_info"] = self.igmp_info_data["igmp_info"] + + def get_proposed(self): + """get proposed information""" + self.proposed['addrFamily'] = self.addr_family + self.proposed['features'] = self.features + if self.features == 'vlan': + self.proposed['snoopingEnable'] = self.igmp + self.proposed['version'] = self.version + self.proposed['vlanId'] = self.vlan_id + self.proposed['proxyEnable'] = self.proxy + self.proposed['state'] = self.state + + def set_igmp_netconf(self): + """config netconf""" + if self.features == 'vlan': + self.set_vlanview_igmp() + else: + self.set_sysview_igmp() + + def set_update_cmd(self): + """set update command""" + if self.features == 'vlan': + self.set_vlanview_cmd() + else: + self.set_sysview_cmd() + + def get_end_state(self): + """get end state information""" + if self.features == 'vlan': + self.get_igmp_vlan() + else: + # sys view igmp(global) + self.get_igmp_global() + self.end_state["igmp_info"] = self.igmp_info_data["igmp_info"] + + def work(self): + """worker""" + self._checkparams_() + self.get_existing() + self.get_proposed() + self.set_igmp_netconf() + self.set_update_cmd() + self.get_end_state() + self.results['changed'] = self.changed + self.results['existing'] = self.existing + self.results['proposed'] = self.proposed + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + self.module.exit_json(**self.results) + + +def main(): + """main""" + argument_spec = dict( + aftype=dict(choices=['v4', 'v6'], required=True), + features=dict(required=True, choices=['global', 'vlan'], type='str'), + vlan_id=dict(type='int'), + igmp=dict(type='bool', default=False), + version=dict(type='int', default=2), + proxy=dict(type='bool', default=False), + state=dict(choices=['absent', 'present'], default='present'), + ) + interface = IgmpSnoop(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_netconf.py b/plugins/modules/network/cloudengine/ce_netconf.py new file mode 100644 index 0000000000..141731f08c --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_netconf.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_netconf +short_description: Run an arbitrary netconf command on HUAWEI CloudEngine switches. +description: + - Sends an arbitrary netconf command on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + rpc: + description: + - The type of rpc. + required: true + choices: ['get', 'edit-config', 'execute-action', 'execute-cli'] + cfg_xml: + description: + - The config xml string. + required: true +''' + +EXAMPLES = ''' + +- name: CloudEngine netconf test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Netconf get operation" + ce_netconf: + rpc: get + cfg_xml: ' + + + + 10 + + + + + + + + + ' + provider: "{{ cli }}" + + - name: "Netconf edit-config operation" + ce_netconf: + rpc: edit-config + cfg_xml: ' + + + + default_wdz + local + invalid + + + + ' + provider: "{{ cli }}" + + - name: "Netconf execute-action operation" + ce_netconf: + rpc: execute-action + cfg_xml: ' + + + ipv4unicast + + + ' + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"result": ["ok"]} +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import execute_nc_action, ce_argument_spec, execute_nc_cli + + +def main(): + """ main """ + + argument_spec = dict( + rpc=dict(choices=['get', 'edit-config', + 'execute-action', 'execute-cli'], required=True), + cfg_xml=dict(required=True) + ) + + argument_spec.update(ce_argument_spec) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + rpc = module.params['rpc'] + cfg_xml = module.params['cfg_xml'] + changed = False + end_state = dict() + + if rpc == "get": + + response = get_nc_config(module, cfg_xml) + + if "" in response: + end_state["result"] = "" + else: + tmp1 = response.split(r"") + tmp2 = tmp1[1].split(r"") + result = tmp2[0].split("\n") + + end_state["result"] = result + + elif rpc == "edit-config": + + response = set_nc_config(module, cfg_xml) + + if "" not in response: + module.fail_json(msg='rpc edit-config failed.') + + changed = True + end_state["result"] = "ok" + + elif rpc == "execute-action": + + response = execute_nc_action(module, cfg_xml) + + if "" not in response: + module.fail_json(msg='rpc execute-action failed.') + + changed = True + end_state["result"] = "ok" + + elif rpc == "execute-cli": + + response = execute_nc_cli(module, cfg_xml) + + if "" in response: + end_state["result"] = "" + else: + tmp1 = response.split(r"") + tmp2 = tmp1[1].split(r"") + result = tmp2[0].split("\n") + + end_state["result"] = result + + else: + module.fail_json(msg='please input correct rpc.') + + results = dict() + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_netstream_aging.py b/plugins/modules/network/cloudengine/ce_netstream_aging.py new file mode 100644 index 0000000000..f4e6e87f81 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_netstream_aging.py @@ -0,0 +1,520 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_netstream_aging +short_description: Manages timeout mode of NetStream on HUAWEI CloudEngine switches. +description: + - Manages timeout mode of NetStream on HUAWEI CloudEngine switches. +author: YangYang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + timeout_interval: + description: + - Netstream timeout interval. + If is active type the interval is 1-60. + If is inactive ,the interval is 5-600. + default: 30 + type: + description: + - Specifies the packet type of netstream timeout active interval. + choices: ['ip', 'vxlan'] + state: + description: + - Specify desired state of the resource. + choices: ['present', 'absent'] + default: present + timeout_type: + description: + - Netstream timeout type. + choices: ['active', 'inactive', 'tcp-session', 'manual'] + manual_slot: + description: + - Specifies the slot number of netstream manual timeout. +''' + +EXAMPLES = ''' +- name: netstream aging module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configure netstream ip timeout active interval , the interval is 40 minutes. + ce_netstream_aging: + timeout_interval: 40 + type: ip + timeout_type: active + state: present + provider: "{{ cli }}" + + - name: Configure netstream vxlan timeout active interval , the interval is 40 minutes. + ce_netstream_aging: + timeout_interval: 40 + type: vxlan + timeout_type: active + active_state: present + provider: "{{ cli }}" + + - name: Delete netstream ip timeout active interval , set the ip timeout interval to 30 minutes. + ce_netstream_aging: + type: ip + timeout_type: active + state: absent + provider: "{{ cli }}" + + - name: Delete netstream vxlan timeout active interval , set the vxlan timeout interval to 30 minutes. + ce_netstream_aging: + type: vxlan + timeout_type: active + state: absent + provider: "{{ cli }}" + + - name: Enable netstream ip tcp session timeout. + ce_netstream_aging: + type: ip + timeout_type: tcp-session + state: present + provider: "{{ cli }}" + + - name: Enable netstream vxlan tcp session timeout. + ce_netstream_aging: + type: vxlan + timeout_type: tcp-session + state: present + provider: "{{ cli }}" + + - name: Disable netstream ip tcp session timeout. + ce_netstream_aging: + type: ip + timeout_type: tcp-session + state: absent + provider: "{{ cli }}" + + - name: Disable netstream vxlan tcp session timeout. + ce_netstream_aging: + type: vxlan + timeout_type: tcp-session + state: absent + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"timeout_interval": "40", + "type": "ip", + "state": "absent", + "timeout_type": active} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"active_timeout": [ + { + "ip": "40", + "vxlan": 30 + } + ], + "inactive_timeout": [ + { + "ip": 30, + "vxlan": 30 + } + ], + "tcp_timeout": [ + { + "ip": "disable", + "vxlan": "disable" + } + ]} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"active_timeout": [ + { + "ip": 30, + "vxlan": 30 + } + ], + "inactive_timeout": [ + { + "ip": 30, + "vxlan": 30 + } + ], + "tcp_timeout": [ + { + "ip": "disable", + "vxlan": "disable" + } + ]} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["undo netstream timeout ip active 40"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + + +class NetStreamAging(object): + """ + Manages netstream aging. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.timeout_interval = self.module.params['timeout_interval'] + self.type = self.module.params['type'] + self.state = self.module.params['state'] + self.timeout_type = self.module.params['timeout_type'] + self.manual_slot = self.module.params['manual_slot'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + # local parameters + self.existing["active_timeout"] = list() + self.existing["inactive_timeout"] = list() + self.existing["tcp_timeout"] = list() + self.end_state["active_timeout"] = list() + self.end_state["inactive_timeout"] = list() + self.end_state["tcp_timeout"] = list() + self.active_changed = False + self.inactive_changed = False + self.tcp_changed = False + + def init_module(self): + """init module""" + + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) + + def get_exist_timer_out_para(self): + """Get exist netstream timeout parameters""" + + active_tmp = dict() + inactive_tmp = dict() + tcp_tmp = dict() + active_tmp["ip"] = "30" + active_tmp["vxlan"] = "30" + inactive_tmp["ip"] = "30" + inactive_tmp["vxlan"] = "30" + tcp_tmp["ip"] = "absent" + tcp_tmp["vxlan"] = "absent" + + cmd = "display current-configuration | include ^netstream timeout" + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + config = str(out).strip() + if config: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem = config_mem.lstrip() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 4 and config_mem_list[2] == "ip": + if config_mem_list[3] == "active": + active_tmp["ip"] = config_mem_list[4] + if config_mem_list[3] == "inactive": + inactive_tmp["ip"] = config_mem_list[4] + if config_mem_list[3] == "tcp-session": + tcp_tmp["ip"] = "present" + if len(config_mem_list) > 4 and config_mem_list[2] == "vxlan": + if config_mem_list[4] == "active": + active_tmp["vxlan"] = config_mem_list[5] + if config_mem_list[4] == "inactive": + inactive_tmp["vxlan"] = config_mem_list[5] + if config_mem_list[4] == "tcp-session": + tcp_tmp["vxlan"] = "present" + self.existing["active_timeout"].append(active_tmp) + self.existing["inactive_timeout"].append(inactive_tmp) + self.existing["tcp_timeout"].append(tcp_tmp) + + def get_end_timer_out_para(self): + """Get end netstream timeout parameters""" + + active_tmp = dict() + inactive_tmp = dict() + tcp_tmp = dict() + active_tmp["ip"] = "30" + active_tmp["vxlan"] = "30" + inactive_tmp["ip"] = "30" + inactive_tmp["vxlan"] = "30" + tcp_tmp["ip"] = "absent" + tcp_tmp["vxlan"] = "absent" + cmd = "display current-configuration | include ^netstream timeout" + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + config = str(out).strip() + if config: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem = config_mem.lstrip() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 4 and config_mem_list[2] == "ip": + if config_mem_list[3] == "active": + active_tmp["ip"] = config_mem_list[4] + if config_mem_list[3] == "inactive": + inactive_tmp["ip"] = config_mem_list[4] + if config_mem_list[3] == "tcp-session": + tcp_tmp["ip"] = "present" + if len(config_mem_list) > 4 and config_mem_list[2] == "vxlan": + if config_mem_list[4] == "active": + active_tmp["vxlan"] = config_mem_list[5] + if config_mem_list[4] == "inactive": + inactive_tmp["vxlan"] = config_mem_list[5] + if config_mem_list[4] == "tcp-session": + tcp_tmp["vxlan"] = "present" + self.end_state["active_timeout"].append(active_tmp) + self.end_state["inactive_timeout"].append(inactive_tmp) + self.end_state["tcp_timeout"].append(tcp_tmp) + + def check_params(self): + """Check all input params""" + + # interval check + if not str(self.timeout_interval).isdigit(): + self.module.fail_json( + msg='Error: Timeout interval should be numerical.') + if self.timeout_type == "active": + if int(self.timeout_interval) < 1 or int(self.timeout_interval) > 60: + self.module.fail_json( + msg="Error: Active interval should between 1 - 60 minutes.") + if self.timeout_type == "inactive": + if int(self.timeout_interval) < 5 or int(self.timeout_interval) > 600: + self.module.fail_json( + msg="Error: Inactive interval should between 5 - 600 seconds.") + if self.timeout_type == "manual": + if not self.manual_slot: + self.module.fail_json( + msg="Error: If use manual timeout mode,slot number is needed.") + if re.match(r'^\d+(\/\d*)?$', self.manual_slot) is None: + self.module.fail_json( + msg='Error: Slot number should be numerical.') + + def get_proposed(self): + """get proposed info""" + + if self.timeout_interval: + self.proposed["timeout_interval"] = self.timeout_interval + if self.timeout_type: + self.proposed["timeout_type"] = self.timeout_type + if self.type: + self.proposed["type"] = self.type + if self.state: + self.proposed["state"] = self.state + if self.manual_slot: + self.proposed["manual_slot"] = self.manual_slot + + def get_existing(self): + """get existing info""" + active_tmp = dict() + inactive_tmp = dict() + tcp_tmp = dict() + + self.get_exist_timer_out_para() + + if self.timeout_type == "active": + for active_tmp in self.existing["active_timeout"]: + if self.state == "present": + if str(active_tmp[self.type]) != self.timeout_interval: + self.active_changed = True + else: + if self.timeout_interval != "30": + if str(active_tmp[self.type]) != "30": + if str(active_tmp[self.type]) != self.timeout_interval: + self.module.fail_json( + msg='Error: The specified active interval do not exist.') + if str(active_tmp[self.type]) != "30": + self.timeout_interval = active_tmp[self.type] + self.active_changed = True + if self.timeout_type == "inactive": + for inactive_tmp in self.existing["inactive_timeout"]: + if self.state == "present": + if str(inactive_tmp[self.type]) != self.timeout_interval: + self.inactive_changed = True + else: + if self.timeout_interval != "30": + if str(inactive_tmp[self.type]) != "30": + if str(inactive_tmp[self.type]) != self.timeout_interval: + self.module.fail_json( + msg='Error: The specified inactive interval do not exist.') + if str(inactive_tmp[self.type]) != "30": + self.timeout_interval = inactive_tmp[self.type] + self.inactive_changed = True + if self.timeout_type == "tcp-session": + for tcp_tmp in self.existing["tcp_timeout"]: + if str(tcp_tmp[self.type]) != self.state: + self.tcp_changed = True + + def operate_time_out(self): + """configure timeout parameters""" + + cmd = "" + if self.timeout_type == "manual": + if self.type == "ip": + self.cli_add_command("quit") + cmd = "reset netstream cache ip slot %s" % self.manual_slot + self.cli_add_command(cmd) + elif self.type == "vxlan": + self.cli_add_command("quit") + cmd = "reset netstream cache vxlan inner-ip slot %s" % self.manual_slot + self.cli_add_command(cmd) + + if not self.active_changed and not self.inactive_changed and not self.tcp_changed: + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + return + + if self.active_changed or self.inactive_changed: + if self.type == "ip": + cmd = "netstream timeout ip %s %s" % (self.timeout_type, self.timeout_interval) + elif self.type == "vxlan": + cmd = "netstream timeout vxlan inner-ip %s %s" % (self.timeout_type, self.timeout_interval) + if self.state == "absent": + self.cli_add_command(cmd, undo=True) + else: + self.cli_add_command(cmd) + if self.timeout_type == "tcp-session" and self.tcp_changed: + if self.type == "ip": + if self.state == "present": + cmd = "netstream timeout ip tcp-session" + else: + cmd = "undo netstream timeout ip tcp-session" + + elif self.type == "vxlan": + if self.state == "present": + cmd = "netstream timeout vxlan inner-ip tcp-session" + else: + cmd = "undo netstream timeout vxlan inner-ip tcp-session" + self.cli_add_command(cmd) + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + def get_end_state(self): + """get end state info""" + + self.get_end_timer_out_para() + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + self.operate_time_out() + self.get_end_state() + if self.existing == self.end_state: + self.changed = False + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + timeout_interval=dict(required=False, type='str', default='30'), + type=dict(required=False, choices=['ip', 'vxlan']), + state=dict(required=False, choices=['present', 'absent'], default='present'), + timeout_type=dict(required=False, choices=['active', 'inactive', 'tcp-session', 'manual']), + manual_slot=dict(required=False, type='str'), + ) + argument_spec.update(ce_argument_spec) + module = NetStreamAging(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_netstream_export.py b/plugins/modules/network/cloudengine/ce_netstream_export.py new file mode 100644 index 0000000000..b5c01d05a7 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_netstream_export.py @@ -0,0 +1,561 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_netstream_export +short_description: Manages netstream export on HUAWEI CloudEngine switches. +description: + - Configure NetStream flow statistics exporting and versions for exported packets on HUAWEI CloudEngine switches. +author: Zhijin Zhou (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + type: + description: + - Specifies NetStream feature. + required: true + choices: ['ip', 'vxlan'] + source_ip: + description: + - Specifies source address which can be IPv6 or IPv4 of the exported NetStream packet. + host_ip: + description: + - Specifies destination address which can be IPv6 or IPv4 of the exported NetStream packet. + host_port: + description: + - Specifies the destination UDP port number of the exported packets. + The value is an integer that ranges from 1 to 65535. + host_vpn: + description: + - Specifies the VPN instance of the exported packets carrying flow statistics. + Ensure the VPN instance has been created on the device. + version: + description: + - Sets the version of exported packets. + choices: ['5', '9'] + as_option: + description: + - Specifies the AS number recorded in the statistics as the original or the peer AS number. + choices: ['origin', 'peer'] + bgp_nexthop: + description: + - Configures the statistics to carry BGP next hop information. Currently, only V9 supports the exported + packets carrying BGP next hop information. + choices: ['enable','disable'] + default: 'disable' + state: + description: + - Manage the state of the resource. + choices: ['present','absent'] + default: present +''' + +EXAMPLES = ''' +- name: netstream export module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configures the source address for the exported packets carrying IPv4 flow statistics. + ce_netstream_export: + type: ip + source_ip: 192.8.2.2 + provider: "{{ cli }}" + + - name: Configures the source IP address for the exported packets carrying VXLAN flexible flow statistics. + ce_netstream_export: + type: vxlan + source_ip: 192.8.2.3 + provider: "{{ cli }}" + + - name: Configures the destination IP address and destination UDP port number for the exported packets carrying IPv4 flow statistics. + ce_netstream_export: + type: ip + host_ip: 192.8.2.4 + host_port: 25 + host_vpn: test + provider: "{{ cli }}" + + - name: Configures the destination IP address and destination UDP port number for the exported packets carrying VXLAN flexible flow statistics. + ce_netstream_export: + type: vxlan + host_ip: 192.8.2.5 + host_port: 26 + host_vpn: test + provider: "{{ cli }}" + + - name: Configures the version number of the exported packets carrying IPv4 flow statistics. + ce_netstream_export: + type: ip + version: 9 + as_option: origin + bgp_nexthop: enable + provider: "{{ cli }}" + + - name: Configures the version for the exported packets carrying VXLAN flexible flow statistics. + ce_netstream_export: + type: vxlan + version: 9 + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "as_option": "origin", + "bgp_nexthop": "enable", + "host_ip": "192.8.5.6", + "host_port": "26", + "host_vpn": "test", + "source_ip": "192.8.2.5", + "state": "present", + "type": "ip", + "version": "9" + } +existing: + description: k/v pairs of existing attributes on the device + returned: always + type: dict + sample: { + "as_option": null, + "bgp_nexthop": "disable", + "host_ip": null, + "host_port": null, + "host_vpn": null, + "source_ip": null, + "type": "ip", + "version": null + } +end_state: + description: k/v pairs of end attributes on the device + returned: always + type: dict + sample: { + "as_option": "origin", + "bgp_nexthop": "enable", + "host_ip": "192.8.5.6", + "host_port": "26", + "host_vpn": "test", + "source_ip": "192.8.2.5", + "type": "ip", + "version": "9" + } +updates: + description: command list sent to the device + returned: always + type: list + sample: [ + "netstream export ip source 192.8.2.5", + "netstream export ip host 192.8.5.6 26 vpn-instance test", + "netstream export ip version 9 origin-as bgp-nexthop" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + + +def is_ipv4_addr(ip_addr): + """check ipaddress validate""" + + rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.' + rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])' + ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$') + + return bool(re.match(ipv4_regex, ip_addr)) + + +def is_config_exist(cmp_cfg, test_cfg): + """is configuration exist""" + + test_cfg_tmp = test_cfg + ' *$' + '|' + test_cfg + ' *\n' + obj = re.compile(test_cfg_tmp) + result = re.findall(obj, cmp_cfg) + if not result: + return False + return True + + +class NetstreamExport(object): + """Manage NetStream export""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # NetStream export configuration parameters + self.type = self.module.params['type'] + self.source_ip = self.module.params['source_ip'] + self.host_ip = self.module.params['host_ip'] + self.host_port = self.module.params['host_port'] + self.host_vpn = self.module.params['host_vpn'] + self.version = self.module.params['version'] + self.as_option = self.module.params['as_option'] + self.bgp_netxhop = self.module.params['bgp_nexthop'] + self.state = self.module.params['state'] + + self.commands = list() + self.config = None + self.exist_conf = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def get_netstream_config(self): + """get current netstream configuration""" + + cmd = "display current-configuration | include ^netstream export" + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + config = str(out).strip() + return config + + def get_existing(self): + """get existing config""" + + self.existing = dict(type=self.type, + source_ip=self.exist_conf['source_ip'], + host_ip=self.exist_conf['host_ip'], + host_port=self.exist_conf['host_port'], + host_vpn=self.exist_conf['host_vpn'], + version=self.exist_conf['version'], + as_option=self.exist_conf['as_option'], + bgp_nexthop=self.exist_conf['bgp_netxhop']) + + def get_proposed(self): + """get proposed config""" + + self.proposed = dict(type=self.type, + source_ip=self.source_ip, + host_ip=self.host_ip, + host_port=self.host_port, + host_vpn=self.host_vpn, + version=self.version, + as_option=self.as_option, + bgp_nexthop=self.bgp_netxhop, + state=self.state) + + def get_end_state(self): + """get end config""" + self.get_config_data() + self.end_state = dict(type=self.type, + source_ip=self.exist_conf['source_ip'], + host_ip=self.exist_conf['host_ip'], + host_port=self.exist_conf['host_port'], + host_vpn=self.exist_conf['host_vpn'], + version=self.exist_conf['version'], + as_option=self.exist_conf['as_option'], + bgp_nexthop=self.exist_conf['bgp_netxhop']) + + def show_result(self): + """show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + if cmd not in self.updates_cmd: + self.updates_cmd.append(cmd) # show updates result + + def config_nets_export_src_addr(self): + """Configures the source address for the exported packets""" + + if is_ipv4_addr(self.source_ip): + if self.type == 'ip': + cmd = "netstream export ip source %s" % self.source_ip + else: + cmd = "netstream export vxlan inner-ip source %s" % self.source_ip + else: + if self.type == 'ip': + cmd = "netstream export ip source ipv6 %s" % self.source_ip + else: + cmd = "netstream export vxlan inner-ip source ipv6 %s" % self.source_ip + + if is_config_exist(self.config, cmd): + self.exist_conf['source_ip'] = self.source_ip + if self.state == 'present': + return + else: + undo = True + else: + if self.state == 'absent': + return + else: + undo = False + + self.cli_add_command(cmd, undo) + + def config_nets_export_host_addr(self): + """Configures the destination IP address and destination UDP port number""" + + if is_ipv4_addr(self.host_ip): + if self.type == 'ip': + cmd = 'netstream export ip host %s %s' % (self.host_ip, self.host_port) + else: + cmd = 'netstream export vxlan inner-ip host %s %s' % (self.host_ip, self.host_port) + else: + if self.type == 'ip': + cmd = 'netstream export ip host ipv6 %s %s' % (self.host_ip, self.host_port) + else: + cmd = 'netstream export vxlan inner-ip host ipv6 %s %s' % (self.host_ip, self.host_port) + + if self.host_vpn: + cmd += " vpn-instance %s" % self.host_vpn + + if is_config_exist(self.config, cmd): + self.exist_conf['host_ip'] = self.host_ip + self.exist_conf['host_port'] = self.host_port + if self.host_vpn: + self.exist_conf['host_vpn'] = self.host_vpn + + if self.state == 'present': + return + else: + undo = True + else: + if self.state == 'absent': + return + else: + undo = False + + self.cli_add_command(cmd, undo) + + def config_nets_export_vxlan_ver(self): + """Configures the version for the exported packets carrying VXLAN flexible flow statistics""" + + cmd = 'netstream export vxlan inner-ip version 9' + + if is_config_exist(self.config, cmd): + self.exist_conf['version'] = self.version + + if self.state == 'present': + return + else: + undo = True + else: + if self.state == 'absent': + return + else: + undo = False + + self.cli_add_command(cmd, undo) + + def config_nets_export_ip_ver(self): + """Configures the version number of the exported packets carrying IPv4 flow statistics""" + + cmd = 'netstream export ip version %s' % self.version + if self.version == '5': + if self.as_option == 'origin': + cmd += ' origin-as' + elif self.as_option == 'peer': + cmd += ' peer-as' + else: + if self.as_option == 'origin': + cmd += ' origin-as' + elif self.as_option == 'peer': + cmd += ' peer-as' + + if self.bgp_netxhop == 'enable': + cmd += ' bgp-nexthop' + + if cmd == 'netstream export ip version 5': + cmd_tmp = "netstream export ip version" + if cmd_tmp in self.config: + if self.state == 'present': + self.cli_add_command(cmd, False) + else: + self.exist_conf['version'] = self.version + return + + if is_config_exist(self.config, cmd): + self.exist_conf['version'] = self.version + self.exist_conf['as_option'] = self.as_option + self.exist_conf['bgp_netxhop'] = self.bgp_netxhop + + if self.state == 'present': + return + else: + undo = True + else: + if self.state == 'absent': + return + else: + undo = False + + self.cli_add_command(cmd, undo) + + def config_netstream_export(self): + """configure netstream export""" + + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + def check_params(self): + """Check all input params""" + + if not self.type: + self.module.fail_json(msg='Error: The value of type cannot be empty.') + + if self.host_port: + if not self.host_port.isdigit(): + self.module.fail_json(msg='Error: Host port is invalid.') + if int(self.host_port) < 1 or int(self.host_port) > 65535: + self.module.fail_json(msg='Error: Host port is not in the range from 1 to 65535.') + + if self.host_vpn: + if self.host_vpn == '_public_': + self.module.fail_json( + msg='Error: The host vpn name _public_ is reserved.') + if len(self.host_vpn) < 1 or len(self.host_vpn) > 31: + self.module.fail_json(msg='Error: The host vpn name length is not in the range from 1 to 31.') + + if self.type == 'vxlan' and self.version == '5': + self.module.fail_json(msg="Error: When type is vxlan, version must be 9.") + + if self.type == 'ip' and self.version == '5' and self.bgp_netxhop == 'enable': + self.module.fail_json(msg="Error: When type=ip and version=5, bgp_netxhop is not supported.") + + if (self.host_ip and not self.host_port) or (self.host_port and not self.host_ip): + self.module.fail_json(msg="Error: host_ip and host_port must both exist or not exist.") + + def get_config_data(self): + """get configuration commands and current configuration""" + + self.exist_conf['type'] = self.type + self.exist_conf['source_ip'] = None + self.exist_conf['host_ip'] = None + self.exist_conf['host_port'] = None + self.exist_conf['host_vpn'] = None + self.exist_conf['version'] = None + self.exist_conf['as_option'] = None + self.exist_conf['bgp_netxhop'] = 'disable' + + self.config = self.get_netstream_config() + + if self.type and self.source_ip: + self.config_nets_export_src_addr() + + if self.type and self.host_ip and self.host_port: + self.config_nets_export_host_addr() + + if self.type == 'vxlan' and self.version == '9': + self.config_nets_export_vxlan_ver() + + if self.type == 'ip' and self.version: + self.config_nets_export_ip_ver() + + def work(self): + """execute task""" + + self.check_params() + self.get_proposed() + self.get_config_data() + self.get_existing() + + self.config_netstream_export() + + self.get_end_state() + self.show_result() + + +def main(): + """main function entry""" + + argument_spec = dict( + type=dict(required=True, type='str', choices=['ip', 'vxlan']), + source_ip=dict(required=False, type='str'), + host_ip=dict(required=False, type='str'), + host_port=dict(required=False, type='str'), + host_vpn=dict(required=False, type='str'), + version=dict(required=False, type='str', choices=['5', '9']), + as_option=dict(required=False, type='str', choices=['origin', 'peer']), + bgp_nexthop=dict(required=False, type='str', choices=['enable', 'disable'], default='disable'), + state=dict(choices=['absent', 'present'], default='present', required=False) + ) + argument_spec.update(ce_argument_spec) + netstream_export = NetstreamExport(argument_spec) + netstream_export.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_netstream_global.py b/plugins/modules/network/cloudengine/ce_netstream_global.py new file mode 100644 index 0000000000..ac8c61e06f --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_netstream_global.py @@ -0,0 +1,946 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_netstream_global +short_description: Manages global parameters of NetStream on HUAWEI CloudEngine switches. +description: + - Manages global parameters of NetStream on HUAWEI CloudEngine switches. +author: YangYang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + type: + description: + - Specifies the type of netstream global. + choices: ['ip', 'vxlan'] + default: 'ip' + state: + description: + - Specify desired state of the resource. + choices: ['present', 'absent'] + default: present + interface: + description: + - Netstream global interface. + required: true + sampler_interval: + description: + - Specifies the netstream sampler interval, length is 1 - 65535. + sampler_direction: + description: + - Specifies the netstream sampler direction. + choices: ['inbound', 'outbound'] + statistics_direction: + description: + - Specifies the netstream statistic direction. + choices: ['inbound', 'outbound'] + statistics_record: + description: + - Specifies the flexible netstream statistic record, length is 1 - 32. + index_switch: + description: + - Specifies the netstream index-switch. + choices: ['16', '32'] + default: '16' +''' + +EXAMPLES = ''' +- name: netstream global module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configure a netstream sampler at interface 10ge1/0/2, direction is outbound,interval is 30. + ce_netstream_global: + interface: 10ge1/0/2 + type: ip + sampler_interval: 30 + sampler_direction: outbound + state: present + provider: "{{ cli }}" + - name: Configure a netstream flexible statistic at interface 10ge1/0/2, record is test1, type is ip. + ce_netstream_global: + type: ip + interface: 10ge1/0/2 + statistics_record: test1 + provider: "{{ cli }}" + - name: Set the vxlan index-switch to 32. + ce_netstream_global: + type: vxlan + interface: all + index_switch: 32 + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"index_switch": "16", + "interface": "10ge1/0/2", + "state": "present", + "statistics_record": "test", + "type": "vxlan"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"flexible_statistic": [ + { + "interface": "10ge1/0/2", + "statistics_record": [], + "type": "ip" + }, + { + "interface": "10ge1/0/2", + "statistics_record": [], + "type": "vxlan" + } + ], + "index-switch": [ + { + "index-switch": "16", + "type": "ip" + }, + { + "index-switch": "16", + "type": "vxlan" + } + ], + "ip_record": [ + "test", + "test1" + ], + "sampler": [ + { + "interface": "all", + "sampler_direction": "null", + "sampler_interval": "null" + } + ], + "statistic": [ + { + "interface": "10ge1/0/2", + "statistics_direction": [], + "type": "null" + } + ], + "vxlan_record": [ + "test" + ]} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"flexible_statistic": [ + { + "interface": "10ge1/0/2", + "statistics_record": [], + "type": "ip" + }, + { + "interface": "10ge1/0/2", + "statistics_record": [ + "test" + ], + "type": "vxlan" + } + ], + "index-switch": [ + { + "index-switch": "16", + "type": "ip" + }, + { + "index-switch": "16", + "type": "vxlan" + } + ], + "sampler": [ + { + "interface": "all", + "sampler_direction": "null", + "sampler_interval": "null" + } + ], + "statistic": [ + { + "interface": "10ge1/0/2", + "statistics_direction": [], + "type": "null" + } + ]} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface 10ge1/0/2", + "netstream record test vxlan inner-ip"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_connection, rm_config_prefix +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('ALL'): + iftype = 'all' + else: + return None + + return iftype.lower() + + +def get_config(module, flags): + + """Retrieves the current config from the device or cache + """ + time_stamp_regex = re.compile(r'\s*\d{4}-\d{1,2}-\d{1,2}\s+\d{2}\:\d{2}\:\d{2}\.\d+\s*') + flags = [] if flags is None else flags + if isinstance(flags, str): + flags = [flags] + elif not isinstance(flags, list): + flags = [] + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + conn = get_connection(module) + rc, out, err = conn.exec_command(cmd) + if rc != 0: + module.fail_json(msg=err) + cfg = str(out).strip() + # remove default configuration prefix '~' + for flag in flags: + if "include-default" in flag: + cfg = rm_config_prefix(cfg) + break + lines = cfg.split('\n') + lines = [l for l in lines if time_stamp_regex.match(l) is None] + if cfg.startswith('display'): + if len(lines) > 1: + lines.pop(0) + else: + return '' + return '\n'.join(lines) + + +class NetStreamGlobal(object): + """ + Manages netstream global parameters. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.type = self.module.params['type'] + self.interface = self.module.params['interface'] + self.sampler_interval = self.module.params['sampler_interval'] + self.sampler_direction = self.module.params['sampler_direction'] + self.statistics_direction = self.module.params['statistics_direction'] + self.statistics_record = self.module.params['statistics_record'] + self.index_switch = self.module.params['index_switch'] + self.state = self.module.params['state'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + # local parameters + self.existing["sampler"] = list() + self.existing["statistic"] = list() + self.existing["flexible_statistic"] = list() + self.existing["index-switch"] = list() + self.existing["ip_record"] = list() + self.existing["vxlan_record"] = list() + self.end_state["sampler"] = list() + self.end_state["statistic"] = list() + self.end_state["flexible_statistic"] = list() + self.end_state["index-switch"] = list() + self.sampler_changed = False + self.statistic_changed = False + self.flexible_changed = False + self.index_switch_changed = False + + def init_module(self): + """init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) + + def get_exist_sampler_interval(self): + """get exist netstream sampler interval""" + + sampler_tmp = dict() + sampler_tmp1 = dict() + flags = list() + exp = " | ignore-case include ^netstream sampler random-packets" + flags.append(exp) + config = get_config(self.module, flags) + if not config: + sampler_tmp["sampler_interval"] = "null" + sampler_tmp["sampler_direction"] = "null" + sampler_tmp["interface"] = "null" + else: + config_list = config.split(' ') + config_num = len(config_list) + sampler_tmp["sampler_direction"] = config_list[config_num - 1] + sampler_tmp["sampler_interval"] = config_list[config_num - 2] + sampler_tmp["interface"] = "all" + self.existing["sampler"].append(sampler_tmp) + if self.interface != "all": + flags = list() + exp = r" | ignore-case section include ^#\s+interface %s" \ + r" | include netstream sampler random-packets" % self.interface + flags.append(exp) + config = get_config(self.module, flags) + if not config: + sampler_tmp1["sampler_interval"] = "null" + sampler_tmp1["sampler_direction"] = "null" + else: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + sampler_tmp1 = dict() + config_mem_list = config_mem.split(' ') + config_num = len(config_mem_list) + if config_num > 1: + sampler_tmp1["sampler_direction"] = config_mem_list[ + config_num - 1] + sampler_tmp1["sampler_interval"] = config_mem_list[ + config_num - 2] + sampler_tmp1["interface"] = self.interface + self.existing["sampler"].append(sampler_tmp1) + + def get_exist_statistic_record(self): + """get exist netstream statistic record parameter""" + + if self.statistics_record and self.statistics_direction: + self.module.fail_json( + msg='Error: The statistic direction and record can not exist at the same time.') + statistic_tmp = dict() + statistic_tmp1 = dict() + statistic_tmp["statistics_record"] = list() + statistic_tmp["interface"] = self.interface + statistic_tmp1["statistics_record"] = list() + statistic_tmp1["interface"] = self.interface + flags = list() + exp = r" | ignore-case section include ^#\s+interface %s" \ + r" | include netstream record"\ + % (self.interface) + flags.append(exp) + config = get_config(self.module, flags) + if not config: + statistic_tmp["type"] = "ip" + self.existing["flexible_statistic"].append(statistic_tmp) + statistic_tmp1["type"] = "vxlan" + self.existing["flexible_statistic"].append(statistic_tmp1) + else: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem = config_mem.lstrip() + statistic_tmp["statistics_record"] = list() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 3 and str(config_mem_list[3]) == "ip": + statistic_tmp["statistics_record"].append( + str(config_mem_list[2])) + statistic_tmp["type"] = "ip" + self.existing["flexible_statistic"].append(statistic_tmp) + for config_mem in config_list: + statistic_tmp1["statistics_record"] = list() + config_mem = config_mem.lstrip() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 3 and str(config_mem_list[3]) == "vxlan": + statistic_tmp1["statistics_record"].append( + str(config_mem_list[2])) + statistic_tmp1["type"] = "vxlan" + self.existing["flexible_statistic"].append(statistic_tmp1) + + def get_exist_interface_statistic(self): + """get exist netstream interface statistic parameter""" + + statistic_tmp1 = dict() + statistic_tmp1["statistics_direction"] = list() + flags = list() + exp = r" | ignore-case section include ^#\s+interface %s" \ + r" | include netstream inbound|outbound"\ + % self.interface + flags.append(exp) + config = get_config(self.module, flags) + if not config: + statistic_tmp1["type"] = "null" + else: + statistic_tmp1["type"] = "ip" + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem = config_mem.lstrip() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 1: + statistic_tmp1["statistics_direction"].append( + str(config_mem_list[1])) + statistic_tmp1["interface"] = self.interface + self.existing["statistic"].append(statistic_tmp1) + + def get_exist_index_switch(self): + """get exist netstream index-switch""" + + index_switch_tmp = dict() + index_switch_tmp1 = dict() + index_switch_tmp["index-switch"] = "16" + index_switch_tmp["type"] = "ip" + index_switch_tmp1["index-switch"] = "16" + index_switch_tmp1["type"] = "vxlan" + flags = list() + exp = " | ignore-case include index-switch" + flags.append(exp) + config = get_config(self.module, flags) + if not config: + self.existing["index-switch"].append(index_switch_tmp) + self.existing["index-switch"].append(index_switch_tmp1) + else: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 2 and str(config_mem_list[2]) == "ip": + index_switch_tmp["index-switch"] = "32" + index_switch_tmp["type"] = "ip" + if len(config_mem_list) > 2 and str(config_mem_list[2]) == "vxlan": + index_switch_tmp1["index-switch"] = "32" + index_switch_tmp1["type"] = "vxlan" + self.existing["index-switch"].append(index_switch_tmp) + self.existing["index-switch"].append(index_switch_tmp1) + + def get_exist_record(self): + """get exist netstream record""" + + flags = list() + exp = " | ignore-case include netstream record" + flags.append(exp) + config = get_config(self.module, flags) + if config: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 3 and config_mem_list[3] == "ip": + self.existing["ip_record"].append(config_mem_list[2]) + if len(config_mem_list) > 3 and config_mem_list[3] == "vxlan": + self.existing["vxlan_record"].append(config_mem_list[2]) + + def get_end_sampler_interval(self): + """get end netstream sampler interval""" + + sampler_tmp = dict() + sampler_tmp1 = dict() + flags = list() + exp = " | ignore-case include ^netstream sampler random-packets" + flags.append(exp) + config = get_config(self.module, flags) + if not config: + sampler_tmp["sampler_interval"] = "null" + sampler_tmp["sampler_direction"] = "null" + else: + config_list = config.split(' ') + config_num = len(config_list) + if config_num > 1: + sampler_tmp["sampler_direction"] = config_list[config_num - 1] + sampler_tmp["sampler_interval"] = config_list[config_num - 2] + sampler_tmp["interface"] = "all" + self.end_state["sampler"].append(sampler_tmp) + if self.interface != "all": + flags = list() + exp = r" | ignore-case section include ^#\s+interface %s" \ + r" | include netstream sampler random-packets" % self.interface + flags.append(exp) + config = get_config(self.module, flags) + if not config: + sampler_tmp1["sampler_interval"] = "null" + sampler_tmp1["sampler_direction"] = "null" + else: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + sampler_tmp1 = dict() + config_mem_list = config_mem.split(' ') + config_num = len(config_mem_list) + if config_num > 1: + sampler_tmp1["sampler_direction"] = config_mem_list[ + config_num - 1] + sampler_tmp1["sampler_interval"] = config_mem_list[ + config_num - 2] + sampler_tmp1["interface"] = self.interface + self.end_state["sampler"].append(sampler_tmp1) + + def get_end_statistic_record(self): + """get end netstream statistic record parameter""" + + if self.statistics_record and self.statistics_direction: + self.module.fail_json( + msg='Error: The statistic direction and record can not exist at the same time.') + statistic_tmp = dict() + statistic_tmp1 = dict() + statistic_tmp["statistics_record"] = list() + statistic_tmp["interface"] = self.interface + statistic_tmp1["statistics_record"] = list() + statistic_tmp1["interface"] = self.interface + flags = list() + exp = r" | ignore-case section include ^#\s+interface %s" \ + r" | include netstream record"\ + % (self.interface) + flags.append(exp) + config = get_config(self.module, flags) + if not config: + statistic_tmp["type"] = "ip" + self.end_state["flexible_statistic"].append(statistic_tmp) + statistic_tmp1["type"] = "vxlan" + self.end_state["flexible_statistic"].append(statistic_tmp1) + else: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem = config_mem.lstrip() + statistic_tmp["statistics_record"] = list() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 3 and str(config_mem_list[3]) == "ip": + statistic_tmp["statistics_record"].append( + str(config_mem_list[2])) + statistic_tmp["type"] = "ip" + self.end_state["flexible_statistic"].append(statistic_tmp) + for config_mem in config_list: + statistic_tmp1["statistics_record"] = list() + config_mem = config_mem.lstrip() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 3 and str(config_mem_list[3]) == "vxlan": + statistic_tmp1["statistics_record"].append( + str(config_mem_list[2])) + statistic_tmp1["type"] = "vxlan" + self.end_state["flexible_statistic"].append(statistic_tmp1) + + def get_end_interface_statistic(self): + """get end netstream interface statistic parameters""" + + statistic_tmp1 = dict() + statistic_tmp1["statistics_direction"] = list() + flags = list() + exp = r" | ignore-case section include ^#\s+interface %s" \ + r" | include netstream inbound|outbound"\ + % self.interface + flags.append(exp) + config = get_config(self.module, flags) + if not config: + statistic_tmp1["type"] = "null" + else: + statistic_tmp1["type"] = "ip" + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem = config_mem.lstrip() + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 1: + statistic_tmp1["statistics_direction"].append( + str(config_mem_list[1])) + statistic_tmp1["interface"] = self.interface + self.end_state["statistic"].append(statistic_tmp1) + + def get_end_index_switch(self): + """get end netstream index switch""" + + index_switch_tmp = dict() + index_switch_tmp1 = dict() + index_switch_tmp["index-switch"] = "16" + index_switch_tmp["type"] = "ip" + index_switch_tmp1["index-switch"] = "16" + index_switch_tmp1["type"] = "vxlan" + flags = list() + exp = " | ignore-case include index-switch" + flags.append(exp) + config = get_config(self.module, flags) + if not config: + self.end_state["index-switch"].append(index_switch_tmp) + self.end_state["index-switch"].append(index_switch_tmp1) + else: + config = config.lstrip() + config_list = config.split('\n') + for config_mem in config_list: + config_mem_list = config_mem.split(' ') + if len(config_mem_list) > 2 and str(config_mem_list[2]) == "ip": + index_switch_tmp["index-switch"] = "32" + index_switch_tmp["type"] = "ip" + if len(config_mem_list) > 2 and str(config_mem_list[2]) == "vxlan": + index_switch_tmp1["index-switch"] = "32" + index_switch_tmp1["type"] = "vxlan" + self.end_state["index-switch"].append(index_switch_tmp) + self.end_state["index-switch"].append(index_switch_tmp1) + + def check_params(self): + """check all input params""" + + # netstream parameters check + if not get_interface_type(self.interface): + self.module.fail_json( + msg='Error: Interface name of %s is error.' % self.interface) + if self.sampler_interval: + if not str(self.sampler_interval).isdigit(): + self.module.fail_json( + msg='Error: Active interval should be numerical.') + if int(self.sampler_interval) < 1 or int(self.sampler_interval) > 65535: + self.module.fail_json( + msg="Error: Sampler interval should between 1 - 65535.") + if self.statistics_record: + if len(self.statistics_record) < 1 or len(self.statistics_record) > 32: + self.module.fail_json( + msg="Error: Statistic record length should between 1 - 32.") + if self.interface == "all": + if self.statistics_record or self.statistics_direction: + self.module.fail_json( + msg="Error: Statistic function should be used at interface.") + if self.statistics_direction: + if self.type == "vxlan": + self.module.fail_json( + msg="Error: Vxlan do not support inbound or outbound statistic.") + if (self.sampler_interval and not self.sampler_direction) \ + or (self.sampler_direction and not self.sampler_interval): + self.module.fail_json( + msg="Error: Sampler interval and direction must be set at the same time.") + + if self.statistics_record and not self.type: + self.module.fail_json( + msg="Error: Statistic type and record must be set at the same time.") + + self.get_exist_record() + if self.statistics_record: + if self.type == "ip": + if self.statistics_record not in self.existing["ip_record"]: + self.module.fail_json( + msg="Error: The statistic record is not exist.") + if self.type == "vxlan": + if self.statistics_record not in self.existing["vxlan_record"]: + self.module.fail_json( + msg="Error: The statistic record is not exist.") + + def get_proposed(self): + """get proposed info""" + + if self.type: + self.proposed["type"] = self.type + if self.interface: + self.proposed["interface"] = self.interface + if self.sampler_interval: + self.proposed["sampler_interval"] = self.sampler_interval + if self.sampler_direction: + self.proposed["sampler_direction"] = self.sampler_direction + if self.statistics_direction: + self.proposed["statistics_direction"] = self.statistics_direction + if self.statistics_record: + self.proposed["statistics_record"] = self.statistics_record + if self.index_switch: + self.proposed["index_switch"] = self.index_switch + if self.state: + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + sampler_tmp = dict() + statistic_tmp = dict() + statistic_tmp1 = dict() + index_tmp = dict() + temp = False + + self.get_exist_sampler_interval() + self.get_exist_interface_statistic() + self.get_exist_statistic_record() + self.get_exist_index_switch() + + if self.state == "present": + for sampler_tmp in self.existing["sampler"]: + if self.interface == str(sampler_tmp["interface"]): + temp = True + if (self.sampler_interval and str(sampler_tmp["sampler_interval"]) != self.sampler_interval) \ + or (self.sampler_direction and + str(sampler_tmp["sampler_direction"]) != self.sampler_direction): + self.sampler_changed = True + if not temp: + if self.sampler_direction or self.sampler_interval: + self.sampler_changed = True + for statistic_tmp in self.existing["statistic"]: + if str(statistic_tmp["interface"]) == self.interface and self.interface != "all": + if self.type == "vxlan": + if statistic_tmp["statistics_direction"] \ + and 'outbound' in statistic_tmp["statistics_direction"]: + self.module.fail_json( + msg='Error: The NetStream record vxlan ' + 'cannot be configured because the port has been configured NetStream outbound ip.') + if statistic_tmp["statistics_direction"] and self.statistics_direction: + if self.statistics_direction not in statistic_tmp["statistics_direction"]: + self.statistic_changed = True + else: + if self.statistics_direction: + self.statistic_changed = True + for statistic_tmp1 in self.existing["flexible_statistic"]: + if self.interface != "all" \ + and self.type == str(statistic_tmp1["type"]) \ + and self.interface == str(statistic_tmp1["interface"]): + if statistic_tmp1["statistics_record"] and self.statistics_record: + if self.statistics_record not in statistic_tmp1["statistics_record"]: + self.flexible_changed = True + else: + if self.statistics_record: + self.flexible_changed = True + for index_tmp in self.existing["index-switch"]: + if self.type == str(index_tmp["type"]): + if self.index_switch != str(index_tmp["index-switch"]): + self.index_switch_changed = True + else: + for sampler_tmp in self.existing["sampler"]: + if self.interface == str(sampler_tmp["interface"]): + if (self.sampler_interval and str(sampler_tmp["sampler_interval"]) == self.sampler_interval) \ + and (self.sampler_direction and str(sampler_tmp["sampler_direction"]) == self.sampler_direction): + self.sampler_changed = True + for statistic_tmp in self.existing["statistic"]: + if str(statistic_tmp["interface"]) == self.interface and self.interface != "all": + if len(statistic_tmp["statistics_direction"]) and self.statistics_direction: + if self.statistics_direction in statistic_tmp["statistics_direction"]: + self.statistic_changed = True + for statistic_tmp1 in self.existing["flexible_statistic"]: + if self.interface != "all" \ + and self.type == str(statistic_tmp1["type"]) \ + and self.interface == str(statistic_tmp1["interface"]): + if len(statistic_tmp1["statistics_record"]) and self.statistics_record: + if self.statistics_record in statistic_tmp1["statistics_record"]: + self.flexible_changed = True + for index_tmp in self.existing["index-switch"]: + if self.type == str(index_tmp["type"]): + if self.index_switch == str(index_tmp["index-switch"]): + if self.index_switch != "16": + self.index_switch_changed = True + + def operate_ns_gloabl(self): + """configure netstream global parameters""" + + cmd = "" + if not self.sampler_changed and not self.statistic_changed \ + and not self.flexible_changed and not self.index_switch_changed: + self.changed = False + return + + if self.sampler_changed is True: + if self.type == "vxlan": + self.module.fail_json( + msg="Error: Netstream do not support vxlan sampler.") + if self.interface != "all": + cmd = "interface %s" % self.interface + self.cli_add_command(cmd) + cmd = "netstream sampler random-packets %s %s" % ( + self.sampler_interval, self.sampler_direction) + if self.state == "present": + self.cli_add_command(cmd) + else: + self.cli_add_command(cmd, undo=True) + if self.interface != "all": + cmd = "quit" + self.cli_add_command(cmd) + if self.statistic_changed is True: + if self.interface != "all": + cmd = "interface %s" % self.interface + self.cli_add_command(cmd) + cmd = "netstream %s ip" % self.statistics_direction + if self.state == "present": + self.cli_add_command(cmd) + else: + self.cli_add_command(cmd, undo=True) + if self.interface != "all": + cmd = "quit" + self.cli_add_command(cmd) + if self.flexible_changed is True: + if self.interface != "all": + cmd = "interface %s" % self.interface + self.cli_add_command(cmd) + if self.state == "present": + for statistic_tmp in self.existing["flexible_statistic"]: + tmp_list = statistic_tmp["statistics_record"] + if self.type == statistic_tmp["type"]: + if self.type == "ip": + if len(tmp_list) > 0: + cmd = "netstream record %s ip" % tmp_list[0] + self.cli_add_command(cmd, undo=True) + cmd = "netstream record %s ip" % self.statistics_record + self.cli_add_command(cmd) + if self.type == "vxlan": + if len(tmp_list) > 0: + cmd = "netstream record %s vxlan inner-ip" % tmp_list[ + 0] + self.cli_add_command(cmd, undo=True) + cmd = "netstream record %s vxlan inner-ip" % self.statistics_record + self.cli_add_command(cmd) + else: + if self.type == "ip": + cmd = "netstream record %s ip" % self.statistics_record + self.cli_add_command(cmd, undo=True) + if self.type == "vxlan": + cmd = "netstream record %s vxlan inner-ip" % self.statistics_record + self.cli_add_command(cmd, undo=True) + if self.interface != "all": + cmd = "quit" + self.cli_add_command(cmd) + if self.index_switch_changed is True: + if self.interface != "all": + self.module.fail_json( + msg="Error: Index-switch function should be used globally.") + if self.type == "ip": + cmd = "netstream export ip index-switch %s" % self.index_switch + else: + cmd = "netstream export vxlan inner-ip index-switch %s" % self.index_switch + if self.state == "present": + self.cli_add_command(cmd) + else: + self.cli_add_command(cmd, undo=True) + + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + def get_end_state(self): + """get end state info""" + + self.get_end_sampler_interval() + self.get_end_interface_statistic() + self.get_end_statistic_record() + self.get_end_index_switch() + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + self.operate_ns_gloabl() + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + type=dict(required=False, choices=['ip', 'vxlan'], default='ip'), + interface=dict(required=True, type='str'), + sampler_interval=dict(required=False, type='str'), + sampler_direction=dict(required=False, choices=['inbound', 'outbound']), + statistics_direction=dict(required=False, choices=['inbound', 'outbound']), + statistics_record=dict(required=False, type='str'), + index_switch=dict(required=False, choices=['16', '32'], default='16'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + ) + argument_spec.update(ce_argument_spec) + module = NetStreamGlobal(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_netstream_template.py b/plugins/modules/network/cloudengine/ce_netstream_template.py new file mode 100644 index 0000000000..5ea943fa6e --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_netstream_template.py @@ -0,0 +1,498 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_netstream_template +short_description: Manages NetStream template configuration on HUAWEI CloudEngine switches. +description: + - Manages NetStream template configuration on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present', 'absent'] + type: + description: + - Configure the type of netstream record. + required: true + choices: ['ip', 'vxlan'] + record_name: + description: + - Configure the name of netstream record. + The value is a string of 1 to 32 case-insensitive characters. + match: + description: + - Configure flexible flow statistics template keywords. + choices: ['destination-address', 'destination-port', 'tos', 'protocol', 'source-address', 'source-port'] + collect_counter: + description: + - Configure the number of packets and bytes that are included in the flexible flow statistics sent to NSC. + choices: ['bytes', 'packets'] + collect_interface: + description: + - Configure the input or output interface that are included in the flexible flow statistics sent to NSC. + choices: ['input', 'output'] + description: + description: + - Configure the description of netstream record. + The value is a string of 1 to 80 case-insensitive characters. +''' + +EXAMPLES = ''' +- name: netstream template module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Config ipv4 netstream record + ce_netstream_template: + state: present + type: ip + record_name: test + provider: "{{ cli }}" + - name: Undo ipv4 netstream record + ce_netstream_template: + state: absent + type: ip + record_name: test + provider: "{{ cli }}" + - name: Config ipv4 netstream record collect_counter + ce_netstream_template: + state: present + type: ip + record_name: test + collect_counter: bytes + provider: "{{ cli }}" + - name: Undo ipv4 netstream record collect_counter + ce_netstream_template: + state: absent + type: ip + record_name: test + collect_counter: bytes + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"record_name": "test", + "type": "ip", + "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"record_name": "test", + "type": "ip"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["netstream record test ip"] +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_connection, rm_config_prefix +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + + +def get_config(module, flags): + + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + if isinstance(flags, str): + flags = [flags] + elif not isinstance(flags, list): + flags = [] + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + conn = get_connection(module) + rc, out, err = conn.exec_command(cmd) + if rc != 0: + module.fail_json(msg=err) + cfg = str(out).strip() + # remove default configuration prefix '~' + for flag in flags: + if "include-default" in flag: + cfg = rm_config_prefix(cfg) + break + if cfg.startswith('display'): + lines = cfg.split('\n') + if len(lines) > 1: + return '\n'.join(lines[1:]) + else: + return '' + return cfg + + +class NetstreamTemplate(object): + """ Manages netstream template configuration """ + + def __init__(self, **kwargs): + """ Netstream template module init """ + + # module + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # netstream config + self.netstream_cfg = None + + # module args + self.state = self.module.params['state'] or None + self.type = self.module.params['type'] or None + self.record_name = self.module.params['record_name'] or None + self.match = self.module.params['match'] or None + self.collect_counter = self.module.params['collect_counter'] or None + self.collect_interface = self.module.params['collect_interface'] or None + self.description = self.module.params['description'] or None + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def cli_load_config(self, commands): + """ Cli load configuration """ + + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_get_netstream_config(self): + """ Cli get netstream configuration """ + + if self.type == "ip": + cmd = "netstream record %s ip" % self.record_name + else: + cmd = "netstream record %s vxlan inner-ip" % self.record_name + flags = list() + regular = "| section include %s" % cmd + flags.append(regular) + self.netstream_cfg = get_config(self.module, flags) + + def check_args(self): + """ Check module args """ + + if not self.type or not self.record_name: + self.module.fail_json( + msg='Error: Please input type and record_name.') + + if self.record_name: + if len(self.record_name) < 1 or len(self.record_name) > 32: + self.module.fail_json( + msg='Error: The len of record_name is out of [1 - 32].') + + if self.description: + if len(self.description) < 1 or len(self.description) > 80: + self.module.fail_json( + msg='Error: The len of description is out of [1 - 80].') + + def get_proposed(self): + """ Get module proposed """ + + self.proposed["state"] = self.state + + if self.type: + self.proposed["type"] = self.type + if self.record_name: + self.proposed["record_name"] = self.record_name + if self.match: + self.proposed["match"] = self.match + if self.collect_counter: + self.proposed["collect_counter"] = self.collect_counter + if self.collect_interface: + self.proposed["collect_interface"] = self.collect_interface + if self.description: + self.proposed["description"] = self.description + + def get_existing(self): + """ Get existing configuration """ + + self.cli_get_netstream_config() + + if self.netstream_cfg is not None and "netstream record" in self.netstream_cfg: + self.existing["type"] = self.type + self.existing["record_name"] = self.record_name + + if self.description: + tmp_value = re.findall(r'description (.*)', self.netstream_cfg) + if tmp_value is not None and len(tmp_value) > 0: + self.existing["description"] = tmp_value[0] + + if self.match: + if self.type == "ip": + tmp_value = re.findall(r'match ip (.*)', self.netstream_cfg) + else: + tmp_value = re.findall(r'match inner-ip (.*)', self.netstream_cfg) + + if tmp_value: + self.existing["match"] = tmp_value + + if self.collect_counter: + tmp_value = re.findall(r'collect counter (.*)', self.netstream_cfg) + if tmp_value: + self.existing["collect_counter"] = tmp_value + + if self.collect_interface: + tmp_value = re.findall(r'collect interface (.*)', self.netstream_cfg) + if tmp_value: + self.existing["collect_interface"] = tmp_value + + def get_end_state(self): + """ Get end state """ + + self.cli_get_netstream_config() + + if self.netstream_cfg is not None and "netstream record" in self.netstream_cfg: + self.end_state["type"] = self.type + self.end_state["record_name"] = self.record_name + + if self.description: + tmp_value = re.findall(r'description (.*)', self.netstream_cfg) + if tmp_value is not None and len(tmp_value) > 0: + self.end_state["description"] = tmp_value[0] + + if self.match: + if self.type == "ip": + tmp_value = re.findall(r'match ip (.*)', self.netstream_cfg) + else: + tmp_value = re.findall(r'match inner-ip (.*)', self.netstream_cfg) + + if tmp_value: + self.end_state["match"] = tmp_value + + if self.collect_counter: + tmp_value = re.findall(r'collect counter (.*)', self.netstream_cfg) + if tmp_value: + self.end_state["collect_counter"] = tmp_value + + if self.collect_interface: + tmp_value = re.findall(r'collect interface (.*)', self.netstream_cfg) + if tmp_value: + self.end_state["collect_interface"] = tmp_value + if self.end_state == self.existing: + self.changed = False + self.updates_cmd = list() + + def present_netstream(self): + """ Present netstream configuration """ + + cmds = list() + need_create_record = False + + if self.type == "ip": + cmd = "netstream record %s ip" % self.record_name + else: + cmd = "netstream record %s vxlan inner-ip" % self.record_name + cmds.append(cmd) + + if self.existing.get('record_name') != self.record_name: + self.updates_cmd.append(cmd) + need_create_record = True + + if self.description: + cmd = "description %s" % self.description.strip() + if need_create_record or not self.netstream_cfg or cmd not in self.netstream_cfg: + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.match: + if self.type == "ip": + cmd = "match ip %s" % self.match + cfg = "match ip" + else: + cmd = "match inner-ip %s" % self.match + cfg = "match inner-ip" + + if need_create_record or cfg not in self.netstream_cfg or self.match != self.existing["match"][0]: + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.collect_counter: + cmd = "collect counter %s" % self.collect_counter + if need_create_record or cmd not in self.netstream_cfg: + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.collect_interface: + cmd = "collect interface %s" % self.collect_interface + if need_create_record or cmd not in self.netstream_cfg: + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if cmds: + self.cli_load_config(cmds) + self.changed = True + + def absent_netstream(self): + """ Absent netstream configuration """ + + cmds = list() + absent_netstream_attr = False + + if not self.netstream_cfg: + return + + if self.description or self.match or self.collect_counter or self.collect_interface: + absent_netstream_attr = True + + if absent_netstream_attr: + if self.type == "ip": + cmd = "netstream record %s ip" % self.record_name + else: + cmd = "netstream record %s vxlan inner-ip" % self.record_name + + cmds.append(cmd) + + if self.description: + cfg = "description %s" % self.description + if self.netstream_cfg and cfg in self.netstream_cfg: + cmd = "undo description %s" % self.description + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.match: + if self.type == "ip": + cfg = "match ip %s" % self.match + else: + cfg = "match inner-ip %s" % self.match + if self.netstream_cfg and cfg in self.netstream_cfg: + if self.type == "ip": + cmd = "undo match ip %s" % self.match + else: + cmd = "undo match inner-ip %s" % self.match + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.collect_counter: + cfg = "collect counter %s" % self.collect_counter + if self.netstream_cfg and cfg in self.netstream_cfg: + cmd = "undo collect counter %s" % self.collect_counter + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.collect_interface: + cfg = "collect interface %s" % self.collect_interface + if self.netstream_cfg and cfg in self.netstream_cfg: + cmd = "undo collect interface %s" % self.collect_interface + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if len(cmds) > 1: + self.cli_load_config(cmds) + self.changed = True + + else: + if self.type == "ip": + cmd = "undo netstream record %s ip" % self.record_name + else: + cmd = "undo netstream record %s vxlan inner-ip" % self.record_name + + cmds.append(cmd) + self.updates_cmd.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def work(self): + """ Work function """ + + self.check_args() + self.get_proposed() + self.get_existing() + + if self.state == "present": + self.present_netstream() + else: + self.absent_netstream() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + type=dict(choices=['ip', 'vxlan'], required=True), + record_name=dict(type='str'), + match=dict(choices=['destination-address', 'destination-port', + 'tos', 'protocol', 'source-address', 'source-port']), + collect_counter=dict(choices=['bytes', 'packets']), + collect_interface=dict(choices=['input', 'output']), + description=dict(type='str') + ) + argument_spec.update(ce_argument_spec) + module = NetstreamTemplate(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_ntp.py b/plugins/modules/network/cloudengine/ce_ntp.py new file mode 100644 index 0000000000..bc4a5a966c --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_ntp.py @@ -0,0 +1,619 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_ntp +short_description: Manages core NTP configuration on HUAWEI CloudEngine switches. +description: + - Manages core NTP configuration on HUAWEI CloudEngine switches. +author: + - Zhijin Zhou (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + server: + description: + - Network address of NTP server. + peer: + description: + - Network address of NTP peer. + key_id: + description: + - Authentication key identifier to use with given NTP server or peer. + is_preferred: + description: + - Makes given NTP server or peer the preferred NTP server or peer for the device. + choices: ['enable', 'disable'] + vpn_name: + description: + - Makes the device communicate with the given + NTP server or peer over a specific vpn. + default: '_public_' + source_int: + description: + - Local source interface from which NTP messages are sent. + Must be fully qualified interface name, i.e. C(40GE1/0/22), C(vlanif10). + Interface types, such as C(10GE), C(40GE), C(100GE), C(Eth-Trunk), C(LoopBack), + C(MEth), C(NULL), C(Tunnel), C(Vlanif). + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: NTP test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Set NTP Server with parameters" + ce_ntp: + server: 192.8.2.6 + vpn_name: js + source_int: vlanif4001 + is_preferred: enable + key_id: 32 + provider: "{{ cli }}" + + - name: "Set NTP Peer with parameters" + ce_ntp: + peer: 192.8.2.6 + vpn_name: js + source_int: vlanif4001 + is_preferred: enable + key_id: 32 + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"server": "2.2.2.2", "key_id": "48", + "is_preferred": "enable", "vpn_name":"js", + "source_int": "vlanif4002", "state":"present"} +existing: + description: k/v pairs of existing ntp server/peer + returned: always + type: dict + sample: {"server": "2.2.2.2", "key_id": "32", + "is_preferred": "disable", "vpn_name":"js", + "source_int": "vlanif4002"} +end_state: + description: k/v pairs of ntp info after module execution + returned: always + type: dict + sample: {"server": "2.2.2.2", "key_id": "48", + "is_preferred": "enable", "vpn_name":"js", + "source_int": "vlanif4002"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["ntp server 2.2.2.2 authentication-keyid 48 source-interface vlanif4002 vpn-instance js preferred"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, set_nc_config + +CE_NC_GET_NTP_CONFIG = """ + + + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_NTP_CONFIG = """ + + + + + %s + %s + %s + %s + %s + %s + %s + %s + 0-0 + + + + +""" + +CE_NC_DELETE_NTP_CONFIG = """ + + + + + %s + %s + %s + %s + %s + 0-0 + + + + +""" + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-Port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class Ntp(object): + """Ntp class""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.mutually_exclusive = [('server', 'peer')] + self.init_module() + + # ntp configuration info + self.server = self.module.params['server'] or None + self.peer = self.module.params['peer'] or None + self.key_id = self.module.params['key_id'] + self.is_preferred = self.module.params['is_preferred'] + self.vpn_name = self.module.params['vpn_name'] + self.interface = self.module.params['source_int'] or "" + self.state = self.module.params['state'] + self.ntp_conf = dict() + self.conf_exsit = False + self.ip_ver = 'IPv4' + + if self.server: + self.peer_type = 'Server' + self.address = self.server + elif self.peer: + self.peer_type = 'Peer' + self.address = self.peer + else: + self.peer_type = None + self.address = None + + self.check_params() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = list() + self.end_state = list() + + self.init_data() + + def init_data(self): + """Init data""" + + if self.interface is not None: + self.interface = self.interface.lower() + + if not self.key_id: + self.key_id = "" + + if not self.is_preferred: + self.is_preferred = 'disable' + + def init_module(self): + """Init module""" + + required_one_of = [("server", "peer")] + self.module = AnsibleModule( + argument_spec=self.spec, + supports_check_mode=True, + required_one_of=required_one_of, + mutually_exclusive=self.mutually_exclusive + ) + + def check_ipaddr_validate(self): + """Check ipaddress validate""" + + rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.' + rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])' + ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$') + ipv6_regex = '^(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}$' + + flag = False + if bool(re.match(ipv4_regex, self.address)): + flag = True + self.ip_ver = "IPv4" + if not self.ntp_ucast_ipv4_validate(): + flag = False + elif bool(re.match(ipv6_regex, self.address)): + flag = True + self.ip_ver = "IPv6" + else: + flag = True + self.ip_ver = "IPv6" + + if not flag: + if self.peer_type == "Server": + self.module.fail_json(msg='Error: Illegal server ip-address.') + else: + self.module.fail_json(msg='Error: Illegal peer ip-address.') + + def ntp_ucast_ipv4_validate(self): + """Check ntp ucast ipv4 address""" + + addr_list = re.findall(r'(.*)\.(.*)\.(.*)\.(.*)', self.address) + if not addr_list: + self.module.fail_json(msg='Error: Match ip-address fail.') + + value = ((int(addr_list[0][0])) * 0x1000000) + (int(addr_list[0][1]) * 0x10000) + \ + (int(addr_list[0][2]) * 0x100) + (int(addr_list[0][3])) + if (value & (0xff000000) == 0x7f000000) or (value & (0xF0000000) == 0xF0000000) \ + or (value & (0xF0000000) == 0xE0000000) or (value == 0): + return False + return True + + def check_params(self): + """Check all input params""" + + # check interface type + if self.interface: + intf_type = get_interface_type(self.interface) + if not intf_type: + self.module.fail_json( + msg='Error: Interface name of %s ' + 'is error.' % self.interface) + + if self.vpn_name: + if (len(self.vpn_name) < 1) or (len(self.vpn_name) > 31): + self.module.fail_json( + msg='Error: VPN name length is between 1 and 31.') + + if self.address: + self.check_ipaddr_validate() + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def set_ntp(self, *args): + """Configure ntp parameters""" + + if self.state == 'present': + if self.ip_ver == 'IPv4': + xml_str = CE_NC_MERGE_NTP_CONFIG % ( + args[0], args[1], '::', args[2], args[3], args[4], args[5], args[6]) + elif self.ip_ver == 'IPv6': + xml_str = CE_NC_MERGE_NTP_CONFIG % ( + args[0], '0.0.0.0', args[1], args[2], args[3], args[4], args[5], args[6]) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "NTP_CORE_CONFIG") + else: + if self.ip_ver == 'IPv4': + xml_str = CE_NC_DELETE_NTP_CONFIG % ( + args[0], args[1], '::', args[2], args[3]) + elif self.ip_ver == 'IPv6': + xml_str = CE_NC_DELETE_NTP_CONFIG % ( + args[0], '0.0.0.0', args[1], args[2], args[3]) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "UNDO_NTP_CORE_CONFIG") + + def config_ntp(self): + """Config ntp""" + + if self.state == "present": + if self.address and not self.conf_exsit: + if self.is_preferred == 'enable': + is_preferred = 'true' + else: + is_preferred = 'false' + self.set_ntp(self.ip_ver, self.address, self.peer_type, + self.vpn_name, self.key_id, is_preferred, self.interface) + self.changed = True + else: + if self.address: + self.set_ntp(self.ip_ver, self.address, + self.peer_type, self.vpn_name, '', '', '') + self.changed = True + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def get_ntp_exist_config(self): + """Get ntp existed configure""" + + ntp_config = list() + conf_str = CE_NC_GET_NTP_CONFIG + con_obj = get_nc_config(self.module, conf_str) + if "" in con_obj: + return ntp_config + + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get all ntp config info + root = ElementTree.fromstring(xml_str) + ntpsite = root.findall("ntp/ntpUCastCfgs/ntpUCastCfg") + for nexthop in ntpsite: + ntp_dict = dict() + for ele in nexthop: + if ele.tag in ["addrFamily", "vpnName", "ifName", "ipv4Addr", + "ipv6Addr", "type", "isPreferred", "keyId"]: + ntp_dict[ele.tag] = ele.text + + ip_addr = ntp_dict['ipv6Addr'] + if ntp_dict['addrFamily'] == "IPv4": + ip_addr = ntp_dict['ipv4Addr'] + if ntp_dict['ifName'] is None: + ntp_dict['ifName'] = "" + if ntp_dict['isPreferred'] == 'true': + is_preferred = 'enable' + else: + is_preferred = 'disable' + + if self.state == "present": + key_id = ntp_dict['keyId'] or "" + cur_ntp_cfg = dict(vpn_name=ntp_dict['vpnName'], source_int=ntp_dict['ifName'].lower(), address=ip_addr, + peer_type=ntp_dict['type'], prefer=is_preferred, key_id=key_id) + exp_ntp_cfg = dict(vpn_name=self.vpn_name, source_int=self.interface.lower(), address=self.address, + peer_type=self.peer_type, prefer=self.is_preferred, key_id=self.key_id) + if cur_ntp_cfg == exp_ntp_cfg: + self.conf_exsit = True + + vpn_name = ntp_dict['vpnName'] + if ntp_dict['vpnName'] == "_public_": + vpn_name = None + + if_name = ntp_dict['ifName'] + if if_name == "": + if_name = None + if self.peer_type == 'Server': + ntp_config.append(dict(vpn_name=vpn_name, + source_int=if_name, server=ip_addr, + is_preferred=is_preferred, key_id=ntp_dict['keyId'])) + else: + ntp_config.append(dict(vpn_name=vpn_name, + source_int=if_name, peer=ip_addr, + is_preferred=is_preferred, key_id=ntp_dict['keyId'])) + + return ntp_config + + def get_existing(self): + """Get existing info""" + + if self.address: + self.existing = self.get_ntp_exist_config() + + def get_proposed(self): + """Get proposed info""" + + if self.address: + vpn_name = self.vpn_name + if vpn_name == "_public_": + vpn_name = None + + if_name = self.interface + if if_name == "": + if_name = None + + key_id = self.key_id + if key_id == "": + key_id = None + if self.peer_type == 'Server': + self.proposed = dict(state=self.state, vpn_name=vpn_name, + source_int=if_name, server=self.address, + is_preferred=self.is_preferred, key_id=key_id) + else: + self.proposed = dict(state=self.state, vpn_name=vpn_name, + source_int=if_name, peer=self.address, + is_preferred=self.is_preferred, key_id=key_id) + + def get_end_state(self): + """Get end state info""" + + if self.address: + self.end_state = self.get_ntp_exist_config() + + def get_update_cmd(self): + """Get updated commands""" + + if self.conf_exsit: + return + + cli_str = "" + if self.state == "present": + if self.address: + if self.peer_type == 'Server': + if self.ip_ver == "IPv4": + cli_str = "%s %s" % ( + "ntp unicast-server", self.address) + else: + cli_str = "%s %s" % ( + "ntp unicast-server ipv6", self.address) + elif self.peer_type == 'Peer': + if self.ip_ver == "IPv4": + cli_str = "%s %s" % ("ntp unicast-peer", self.address) + else: + cli_str = "%s %s" % ( + "ntp unicast-peer ipv6", self.address) + + if self.key_id: + cli_str = "%s %s %s" % ( + cli_str, "authentication-keyid", self.key_id) + if self.interface: + cli_str = "%s %s %s" % ( + cli_str, "source-interface", self.interface) + if (self.vpn_name) and (self.vpn_name != '_public_'): + cli_str = "%s %s %s" % ( + cli_str, "vpn-instance", self.vpn_name) + if self.is_preferred == "enable": + cli_str = "%s %s" % (cli_str, "preferred") + else: + if self.address: + if self.peer_type == 'Server': + if self.ip_ver == "IPv4": + cli_str = "%s %s" % ( + "undo ntp unicast-server", self.address) + else: + cli_str = "%s %s" % ( + "undo ntp unicast-server ipv6", self.address) + elif self.peer_type == 'Peer': + if self.ip_ver == "IPv4": + cli_str = "%s %s" % ( + "undo ntp unicast-peer", self.address) + else: + cli_str = "%s %s" % ( + "undo ntp unicast-peer ipv6", self.address) + if (self.vpn_name) and (self.vpn_name != '_public_'): + cli_str = "%s %s %s" % ( + cli_str, "vpn-instance", self.vpn_name) + + self.updates_cmd.append(cli_str) + + def work(self): + """Execute task""" + + self.get_existing() + self.get_proposed() + + self.config_ntp() + + self.get_update_cmd() + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + server=dict(type='str'), + peer=dict(type='str'), + key_id=dict(type='str'), + is_preferred=dict(type='str', choices=['enable', 'disable']), + vpn_name=dict(type='str', default='_public_'), + source_int=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + argument_spec.update(ce_argument_spec) + ntp_obj = Ntp(argument_spec) + ntp_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_ntp_auth.py b/plugins/modules/network/cloudengine/ce_ntp_auth.py new file mode 100644 index 0000000000..c7aa6ecd07 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_ntp_auth.py @@ -0,0 +1,520 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- + +module: ce_ntp_auth +short_description: Manages NTP authentication configuration on HUAWEI CloudEngine switches. +description: + - Manages NTP authentication configuration on HUAWEI CloudEngine switches. +author: + - Zhijin Zhou (@QijunPan) +notes: + - If C(state=absent), the module will attempt to remove the given key configuration. + If a matching key configuration isn't found on the device, the module will fail. + - If C(state=absent) and C(authentication=on), authentication will be turned on. + - If C(state=absent) and C(authentication=off), authentication will be turned off. + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + key_id: + description: + - Authentication key identifier (numeric). + required: true + auth_pwd: + description: + - Plain text with length of 1 to 255, encrypted text with length of 20 to 392. + auth_mode: + description: + - Specify authentication algorithm. + choices: ['hmac-sha256', 'md5'] + auth_type: + description: + - Whether the given password is in cleartext or + has been encrypted. If in cleartext, the device + will encrypt it before storing it. + default: encrypt + choices: ['text', 'encrypt'] + trusted_key: + description: + - Whether the given key is required to be supplied by a time source + for the device to synchronize to the time source. + default: 'disable' + choices: ['enable', 'disable'] + authentication: + description: + - Configure ntp authentication enable or unconfigure ntp authentication enable. + choices: ['enable', 'disable'] + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: NTP AUTH test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Configure ntp authentication key-id" + ce_ntp_auth: + key_id: 32 + auth_mode: md5 + auth_pwd: 11111111111111111111111 + provider: "{{ cli }}" + + - name: "Configure ntp authentication key-id and trusted authentication keyid" + ce_ntp_auth: + key_id: 32 + auth_mode: md5 + auth_pwd: 11111111111111111111111 + trusted_key: enable + provider: "{{ cli }}" + + - name: "Configure ntp authentication key-id and authentication enable" + ce_ntp_auth: + key_id: 32 + auth_mode: md5 + auth_pwd: 11111111111111111111111 + authentication: enable + provider: "{{ cli }}" + + - name: "Unconfigure ntp authentication key-id and trusted authentication keyid" + ce_ntp_auth: + key_id: 32 + state: absent + provider: "{{ cli }}" + + - name: "Unconfigure ntp authentication key-id and authentication enable" + ce_ntp_auth: + key_id: 32 + authentication: enable + state: absent + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "auth_type": "text", + "authentication": "enable", + "key_id": "32", + "auth_pwd": "1111", + "auth_mode": "md5", + "trusted_key": "enable", + "state": "present" + } +existing: + description: k/v pairs of existing ntp authentication + returned: always + type: dict + sample: { + "authentication": "off", + "authentication-keyid": [ + { + "auth_mode": "md5", + "key_id": "1", + "trusted_key": "disable" + } + ] + } +end_state: + description: k/v pairs of ntp authentication after module execution + returned: always + type: dict + sample: { + "authentication": "off", + "authentication-keyid": [ + { + "auth_mode": "md5", + "key_id": "1", + "trusted_key": "disable" + }, + { + "auth_mode": "md5", + "key_id": "32", + "trusted_key": "enable" + } + ] + } +state: + description: state as sent in from the playbook + returned: always + type: str + sample: "present" +updates: + description: command sent to the device + returned: always + type: list + sample: [ + "ntp authentication-key 32 md5 1111", + "ntp trusted-key 32", + "ntp authentication enable" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import copy +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, load_config +from ansible.module_utils.connection import exec_command + + +class NtpAuth(object): + """Manage ntp authentication""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # ntp_auth configuration info + self.key_id = self.module.params['key_id'] + self.password = self.module.params['auth_pwd'] or None + self.auth_mode = self.module.params['auth_mode'] or None + self.auth_type = self.module.params['auth_type'] + self.trusted_key = self.module.params['trusted_key'] + self.authentication = self.module.params['authentication'] or None + self.state = self.module.params['state'] + self.check_params() + + self.ntp_auth_conf = dict() + self.key_id_exist = False + self.cur_trusted_key = 'disable' + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = list() + self.end_state = list() + + self.get_ntp_auth_exist_config() + + def check_params(self): + """Check all input params""" + + if not self.key_id.isdigit(): + self.module.fail_json( + msg='Error: key_id is not digit.') + + if (int(self.key_id) < 1) or (int(self.key_id) > 4294967295): + self.module.fail_json( + msg='Error: The length of key_id is between 1 and 4294967295.') + if self.state == "present" and not self.password: + self.module.fail_json( + msg='Error: The password cannot be empty.') + if self.state == "present" and self.password: + if (self.auth_type == 'encrypt') and\ + ((len(self.password) < 20) or (len(self.password) > 392)): + self.module.fail_json( + msg='Error: The length of encrypted password is between 20 and 392.') + elif (self.auth_type == 'text') and\ + ((len(self.password) < 1) or (len(self.password) > 255)): + self.module.fail_json( + msg='Error: The length of text password is between 1 and 255.') + + def init_module(self): + """Init module object""" + + required_if = [("state", "present", ("auth_pwd", "auth_mode"))] + self.module = AnsibleModule( + argument_spec=self.spec, + required_if=required_if, + supports_check_mode=True + ) + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_ntp_auth_enable(self): + """Get ntp authentication enable state""" + + flags = list() + exp = "| exclude undo | include ntp authentication" + flags.append(exp) + config = self.get_config(flags) + auth_en = re.findall( + r'.*ntp\s*authentication\s*enable.*', config) + if auth_en: + self.ntp_auth_conf['authentication'] = 'enable' + else: + self.ntp_auth_conf['authentication'] = 'disable' + + def get_ntp_all_auth_keyid(self): + """Get all authentication keyid info""" + + ntp_auth_conf = list() + + flags = list() + exp = "| include authentication-keyid %s" % self.key_id + flags.append(exp) + config = self.get_config(flags) + ntp_config_list = config.split('\n') + if not ntp_config_list: + self.ntp_auth_conf["authentication-keyid"] = "None" + return ntp_auth_conf + + self.key_id_exist = True + cur_auth_mode = "" + cur_auth_pwd = "" + for ntp_config in ntp_config_list: + ntp_auth_mode = re.findall(r'.*authentication-mode(\s\S*)\s\S*\s(\S*)', ntp_config) + ntp_auth_trust = re.findall(r'.*trusted.*', ntp_config) + if ntp_auth_trust: + self.cur_trusted_key = 'enable' + if ntp_auth_mode: + cur_auth_mode = ntp_auth_mode[0][0].strip() + cur_auth_pwd = ntp_auth_mode[0][1].strip() + ntp_auth_conf.append(dict(key_id=self.key_id, + auth_mode=cur_auth_mode, + auth_pwd=cur_auth_pwd, + trusted_key=self.cur_trusted_key)) + self.ntp_auth_conf["authentication-keyid"] = ntp_auth_conf + + return ntp_auth_conf + + def get_ntp_auth_exist_config(self): + """Get ntp authentication existed configure""" + + self.get_ntp_auth_enable() + self.get_ntp_all_auth_keyid() + + def config_ntp_auth_keyid(self): + """Config ntp authentication keyid""" + + commands = list() + if self.auth_type == 'encrypt': + config_cli = "ntp authentication-keyid %s authentication-mode %s cipher %s" % ( + self.key_id, self.auth_mode, self.password) + else: + config_cli = "ntp authentication-keyid %s authentication-mode %s %s" % ( + self.key_id, self.auth_mode, self.password) + + commands.append(config_cli) + + if self.trusted_key != self.cur_trusted_key: + if self.trusted_key == 'enable': + config_cli_trust = "ntp trusted authentication-keyid %s" % (self.key_id) + commands.append(config_cli_trust) + else: + config_cli_trust = "undo ntp trusted authentication-keyid %s" % (self.key_id) + commands.append(config_cli_trust) + + self.cli_load_config(commands) + + def config_ntp_auth_enable(self): + """Config ntp authentication enable""" + + commands = list() + if self.ntp_auth_conf['authentication'] != self.authentication: + if self.authentication == 'enable': + config_cli = "ntp authentication enable" + else: + config_cli = "undo ntp authentication enable" + commands.append(config_cli) + + self.cli_load_config(commands) + + def undo_config_ntp_auth_keyid(self): + """Undo ntp authentication key-id""" + + commands = list() + config_cli = "undo ntp authentication-keyid %s" % self.key_id + commands.append(config_cli) + + self.cli_load_config(commands) + + def cli_load_config(self, commands): + """Load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def config_ntp_auth(self): + """Config ntp authentication""" + + if self.state == "present": + self.config_ntp_auth_keyid() + else: + if not self.key_id_exist: + self.module.fail_json( + msg='Error: The Authentication-keyid does not exist.') + self.undo_config_ntp_auth_keyid() + + if self.authentication: + self.config_ntp_auth_enable() + + self.changed = True + + def get_existing(self): + """Get existing info""" + + self.existing = copy.deepcopy(self.ntp_auth_conf) + + def get_proposed(self): + """Get proposed result""" + + auth_type = self.auth_type + trusted_key = self.trusted_key + if self.state == 'absent': + auth_type = None + trusted_key = None + self.proposed = dict(key_id=self.key_id, auth_pwd=self.password, + auth_mode=self.auth_mode, auth_type=auth_type, + trusted_key=trusted_key, authentication=self.authentication, + state=self.state) + + def get_update_cmd(self): + """Get updated commands""" + + cli_str = "" + if self.state == "present": + cli_str = "ntp authentication-keyid %s authentication-mode %s " % ( + self.key_id, self.auth_mode) + if self.auth_type == 'encrypt': + cli_str = "%s cipher %s" % (cli_str, self.password) + else: + cli_str = "%s %s" % (cli_str, self.password) + else: + cli_str = "undo ntp authentication-keyid %s" % self.key_id + + self.updates_cmd.append(cli_str) + + if self.authentication: + cli_str = "" + + if self.ntp_auth_conf['authentication'] != self.authentication: + if self.authentication == 'enable': + cli_str = "ntp authentication enable" + else: + cli_str = "undo ntp authentication enable" + + if cli_str != "": + self.updates_cmd.append(cli_str) + + cli_str = "" + if self.state == "present": + if self.trusted_key != self.cur_trusted_key: + if self.trusted_key == 'enable': + cli_str = "ntp trusted authentication-keyid %s" % self.key_id + else: + cli_str = "undo ntp trusted authentication-keyid %s" % self.key_id + else: + cli_str = "undo ntp trusted authentication-keyid %s" % self.key_id + + if cli_str != "": + self.updates_cmd.append(cli_str) + + def get_end_state(self): + """Get end state info""" + + self.ntp_auth_conf = dict() + self.get_ntp_auth_exist_config() + self.end_state = copy.deepcopy(self.ntp_auth_conf) + if self.end_state == self.existing: + self.changed = False + + def show_result(self): + """Show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def work(self): + """Execute task""" + + self.get_existing() + self.get_proposed() + self.get_update_cmd() + + self.config_ntp_auth() + + self.get_end_state() + self.show_result() + + +def main(): + """Main function entry""" + + argument_spec = dict( + key_id=dict(required=True, type='str'), + auth_pwd=dict(type='str', no_log=True), + auth_mode=dict(choices=['md5', 'hmac-sha256'], type='str'), + auth_type=dict(choices=['text', 'encrypt'], default='encrypt'), + trusted_key=dict(choices=['enable', 'disable'], default='disable'), + authentication=dict(choices=['enable', 'disable']), + state=dict(choices=['absent', 'present'], default='present'), + ) + argument_spec.update(ce_argument_spec) + ntp_auth_obj = NtpAuth(argument_spec) + ntp_auth_obj.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_ospf.py b/plugins/modules/network/cloudengine/ce_ospf.py new file mode 100644 index 0000000000..c5af33b219 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_ospf.py @@ -0,0 +1,972 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_ospf +short_description: Manages configuration of an OSPF instance on HUAWEI CloudEngine switches. +description: + - Manages configuration of an OSPF instance on HUAWEI CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + process_id: + description: + - Specifies a process ID. + The value is an integer ranging from 1 to 4294967295. + required: true + area: + description: + - Specifies the area ID. The area with the area-id being 0 is a backbone area. + Valid values are a string, formatted as an IP address + (i.e. "0.0.0.0") or as an integer between 1 and 4294967295. + addr: + description: + - Specifies the address of the network segment where the interface resides. + The value is in dotted decimal notation. + mask: + description: + - IP network wildcard bits in decimal format between 0 and 32. + auth_mode: + description: + - Specifies the authentication type. + choices: ['none', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'] + auth_text_simple: + description: + - Specifies a password for simple authentication. + The value is a string of 1 to 8 characters. + auth_key_id: + description: + - Authentication key id when C(auth_mode) is 'hmac-sha256', 'md5' or 'hmac-md5. + Valid value is an integer is in the range from 1 to 255. + auth_text_md5: + description: + - Specifies a password for MD5, HMAC-MD5, or HMAC-SHA256 authentication. + The value is a string of 1 to 255 case-sensitive characters, spaces not supported. + nexthop_addr: + description: + - IPv4 address for configure next-hop address's weight. + Valid values are a string, formatted as an IP address. + nexthop_weight: + description: + - Indicates the weight of the next hop. + The smaller the value is, the higher the preference of the route is. + It is an integer that ranges from 1 to 254. + max_load_balance: + description: + - The maximum number of paths for forward packets over multiple paths. + Valid value is an integer in the range from 1 to 64. + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: ospf module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configure ospf + ce_ospf: + process_id: 1 + area: 100 + state: present + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"process_id": "1", "area": "100"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"process_id": "1", "areas": [], "nexthops":[], "max_load_balance": "32"} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"process_id": "1", + "areas": [{"areaId": "0.0.0.100", "areaType": "Normal"}], + "nexthops":[], "max_load_balance": "32"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["ospf 1", "area 0.0.0.100"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_OSPF = """ + + + + + + %s + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + +CE_NC_CREATE_PROCESS = """ + + + + + + %s + + + + + +""" + +CE_NC_DELETE_PROCESS = """ + + + + + + %s + + + + + +""" + +CE_NC_XML_BUILD_MERGE_PROCESS = """ + + + + + + %s + %s + + + + + +""" + +CE_NC_XML_BUILD_PROCESS = """ + + + + + + %s + %s + + + + + +""" + +CE_NC_XML_BUILD_MERGE_AREA = """ + + + %s + %s + + +""" + +CE_NC_XML_BUILD_DELETE_AREA = """ + + + %s + %s + + +""" + +CE_NC_XML_BUILD_AREA = """ + + + %s + %s + + +""" + +CE_NC_XML_SET_AUTH_MODE = """ + %s +""" +CE_NC_XML_SET_AUTH_TEXT_SIMPLE = """ + %s +""" + +CE_NC_XML_SET_AUTH_MD5 = """ + %s + %s +""" + + +CE_NC_XML_MERGE_NETWORKS = """ + + + %s + %s + + +""" + +CE_NC_XML_DELETE_NETWORKS = """ + + + %s + %s + + +""" + +CE_NC_XML_SET_LB = """ + %s +""" + + +CE_NC_XML_BUILD_MERGE_TOPO = """ + + + base + %s + + + +""" + +CE_NC_XML_BUILD_TOPO = """ + + + base + %s + + + +""" + +CE_NC_XML_MERGE_NEXTHOP = """ + + + %s + %s + + +""" + +CE_NC_XML_DELETE_NEXTHOP = """ + + + %s + + +""" + + +class OSPF(object): + """ + Manages configuration of an ospf instance. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.process_id = self.module.params['process_id'] + self.area = self.module.params['area'] + self.addr = self.module.params['addr'] + self.mask = self.module.params['mask'] + self.auth_mode = self.module.params['auth_mode'] + self.auth_text_simple = self.module.params['auth_text_simple'] + self.auth_key_id = self.module.params['auth_key_id'] + self.auth_text_md5 = self.module.params['auth_text_md5'] + self.nexthop_addr = self.module.params['nexthop_addr'] + self.nexthop_weight = self.module.params['nexthop_weight'] + self.max_load_balance = self.module.params['max_load_balance'] + self.state = self.module.params['state'] + + # ospf info + self.ospf_info = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """ init module """ + + required_together = [ + ("addr", "mask"), + ("auth_key_id", "auth_text_md5"), + ("nexthop_addr", "nexthop_weight") + ] + self.module = AnsibleModule( + argument_spec=self.spec, required_together=required_together, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_wildcard_mask(self): + """convert mask length to ip address wildcard mask, i.e. 24 to 0.0.0.255""" + + mask_int = ["255"] * 4 + length = int(self.mask) + + if length > 32: + self.module.fail_json(msg='IPv4 ipaddress mask length is invalid') + if length < 8: + mask_int[0] = str(int(~(0xFF << (8 - length % 8)) & 0xFF)) + if length >= 8: + mask_int[0] = '0' + mask_int[1] = str(int(~(0xFF << (16 - (length % 16))) & 0xFF)) + if length >= 16: + mask_int[1] = '0' + mask_int[2] = str(int(~(0xFF << (24 - (length % 24))) & 0xFF)) + if length >= 24: + mask_int[2] = '0' + mask_int[3] = str(int(~(0xFF << (32 - (length % 32))) & 0xFF)) + if length == 32: + mask_int[3] = '0' + + return '.'.join(mask_int) + + def get_area_ip(self): + """convert integer to ip address""" + + if not self.area.isdigit(): + return self.area + + addr_int = ['0'] * 4 + addr_int[0] = str(((int(self.area) & 0xFF000000) >> 24) & 0xFF) + addr_int[1] = str(((int(self.area) & 0x00FF0000) >> 16) & 0xFF) + addr_int[2] = str(((int(self.area) & 0x0000FF00) >> 8) & 0XFF) + addr_int[3] = str(int(self.area) & 0xFF) + + return '.'.join(addr_int) + + def get_ospf_dict(self, process_id): + """ get one ospf attributes dict.""" + + ospf_info = dict() + conf_str = CE_NC_GET_OSPF % process_id + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return ospf_info + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get process base info + root = ElementTree.fromstring(xml_str) + ospfsite = root.find("ospfv2/ospfv2comm/ospfSites/ospfSite") + if ospfsite: + for site in ospfsite: + if site.tag in ["processId", "routerId", "vrfName"]: + ospf_info[site.tag] = site.text + + # get Topology info + topo = root.find( + "ospfv2/ospfv2comm/ospfSites/ospfSite/ProcessTopologys/ProcessTopology") + if topo: + for eles in topo: + if eles.tag in ["maxLoadBalancing"]: + ospf_info[eles.tag] = eles.text + + # get nexthop info + ospf_info["nexthops"] = list() + nexthops = root.findall( + "ospfv2/ospfv2comm/ospfSites/ospfSite/ProcessTopologys/ProcessTopology/nexthopMTs/nexthopMT") + if nexthops: + for nexthop in nexthops: + nh_dict = dict() + for ele in nexthop: + if ele.tag in ["ipAddress", "weight"]: + nh_dict[ele.tag] = ele.text + ospf_info["nexthops"].append(nh_dict) + + # get areas info + ospf_info["areas"] = list() + areas = root.findall( + "ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area") + if areas: + for area in areas: + area_dict = dict() + for ele in area: + if ele.tag in ["areaId", "authTextSimple", "areaType", + "authenticationMode", "keyId", "authTextMd5"]: + area_dict[ele.tag] = ele.text + if ele.tag == "networks": + # get networks info + area_dict["networks"] = list() + for net in ele: + net_dict = dict() + for net_ele in net: + if net_ele.tag in ["ipAddress", "wildcardMask"]: + net_dict[net_ele.tag] = net_ele.text + area_dict["networks"].append(net_dict) + + ospf_info["areas"].append(area_dict) + return ospf_info + + def is_area_exist(self): + """is ospf area exist""" + if not self.ospf_info: + return False + for area in self.ospf_info["areas"]: + if area["areaId"] == self.get_area_ip(): + return True + + return False + + def is_network_exist(self): + """is ospf area network exist""" + if not self.ospf_info: + return False + + for area in self.ospf_info["areas"]: + if area["areaId"] == self.get_area_ip(): + if not area.get("networks"): + return False + for network in area.get("networks"): + if network["ipAddress"] == self.addr and network["wildcardMask"] == self.get_wildcard_mask(): + return True + return False + + def is_nexthop_exist(self): + """is ospf nexthop exist""" + + if not self.ospf_info: + return False + for nexthop in self.ospf_info["nexthops"]: + if nexthop["ipAddress"] == self.nexthop_addr: + return True + + return False + + def is_nexthop_change(self): + """is ospf nexthop change""" + if not self.ospf_info: + return True + + for nexthop in self.ospf_info["nexthops"]: + if nexthop["ipAddress"] == self.nexthop_addr: + if nexthop["weight"] == self.nexthop_weight: + return False + else: + return True + + return True + + def create_process(self): + """Create ospf process""" + + xml_area = "" + self.updates_cmd.append("ospf %s" % self.process_id) + xml_create = CE_NC_CREATE_PROCESS % self.process_id + set_nc_config(self.module, xml_create) + + # nexthop weight + xml_nh = "" + if self.nexthop_addr: + xml_nh = CE_NC_XML_MERGE_NEXTHOP % ( + self.nexthop_addr, self.nexthop_weight) + self.updates_cmd.append("nexthop %s weight %s" % ( + self.nexthop_addr, self.nexthop_weight)) + + # max load balance + xml_lb = "" + if self.max_load_balance: + xml_lb = CE_NC_XML_SET_LB % self.max_load_balance + self.updates_cmd.append( + "maximum load-balancing %s" % self.max_load_balance) + + xml_topo = "" + if xml_lb or xml_nh: + xml_topo = CE_NC_XML_BUILD_TOPO % (xml_nh + xml_lb) + + if self.area: + self.updates_cmd.append("area %s" % self.get_area_ip()) + xml_auth = "" + xml_network = "" + + # networks + if self.addr and self.mask: + xml_network = CE_NC_XML_MERGE_NETWORKS % ( + self.addr, self.get_wildcard_mask()) + self.updates_cmd.append("network %s %s" % ( + self.addr, self.get_wildcard_mask())) + + # authentication mode + if self.auth_mode: + xml_auth += CE_NC_XML_SET_AUTH_MODE % self.auth_mode + if self.auth_mode == "none": + self.updates_cmd.append("undo authentication-mode") + else: + self.updates_cmd.append( + "authentication-mode %s" % self.auth_mode) + if self.auth_mode == "simple" and self.auth_text_simple: + xml_auth += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple + self.updates_cmd.pop() + self.updates_cmd.append( + "authentication-mode %s %s" % (self.auth_mode, self.auth_text_simple)) + if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: + if self.auth_key_id and self.auth_text_md5: + xml_auth += CE_NC_XML_SET_AUTH_MD5 % ( + self.auth_key_id, self.auth_text_md5) + self.updates_cmd.pop() + self.updates_cmd.append( + "authentication-mode %s %s %s" % (self.auth_mode, self.auth_key_id, self.auth_text_md5)) + if xml_network or xml_auth or not self.is_area_exist(): + xml_area += CE_NC_XML_BUILD_MERGE_AREA % ( + self.get_area_ip(), xml_network + xml_auth) + + xml_str = CE_NC_XML_BUILD_MERGE_PROCESS % ( + self.process_id, xml_topo + xml_area) + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "CREATE_PROCESS") + self.changed = True + + def delete_process(self): + """Delete ospf process""" + + xml_str = CE_NC_DELETE_PROCESS % self.process_id + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "DELETE_PROCESS") + self.updates_cmd.append("undo ospf %s" % self.process_id) + self.changed = True + + def merge_process(self): + """merge ospf process""" + + xml_area = "" + xml_str = "" + self.updates_cmd.append("ospf %s" % self.process_id) + + # nexthop weight + xml_nh = "" + if self.nexthop_addr and self.is_nexthop_change(): + xml_nh = CE_NC_XML_MERGE_NEXTHOP % ( + self.nexthop_addr, self.nexthop_weight) + self.updates_cmd.append("nexthop %s weight %s" % ( + self.nexthop_addr, self.nexthop_weight)) + + # max load balance + xml_lb = "" + if self.max_load_balance and self.ospf_info.get("maxLoadBalancing") != self.max_load_balance: + xml_lb = CE_NC_XML_SET_LB % self.max_load_balance + self.updates_cmd.append( + "maximum load-balancing %s" % self.max_load_balance) + + xml_topo = "" + if xml_lb or xml_nh: + xml_topo = CE_NC_XML_BUILD_MERGE_TOPO % (xml_nh + xml_lb) + + if self.area: + self.updates_cmd.append("area %s" % self.get_area_ip()) + xml_network = "" + xml_auth = "" + if self.addr and self.mask: + if not self.is_network_exist(): + xml_network += CE_NC_XML_MERGE_NETWORKS % ( + self.addr, self.get_wildcard_mask()) + self.updates_cmd.append("network %s %s" % ( + self.addr, self.get_wildcard_mask())) + + # NOTE: for security, authentication config will always be update + if self.auth_mode: + xml_auth += CE_NC_XML_SET_AUTH_MODE % self.auth_mode + if self.auth_mode == "none": + self.updates_cmd.append("undo authentication-mode") + else: + self.updates_cmd.append( + "authentication-mode %s" % self.auth_mode) + if self.auth_mode == "simple" and self.auth_text_simple: + xml_auth += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple + self.updates_cmd.pop() + self.updates_cmd.append( + "authentication-mode %s %s" % (self.auth_mode, self.auth_text_simple)) + if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: + if self.auth_key_id and self.auth_text_md5: + xml_auth += CE_NC_XML_SET_AUTH_MD5 % ( + self.auth_key_id, self.auth_text_md5) + self.updates_cmd.pop() + self.updates_cmd.append( + "authentication-mode %s %s %s" % (self.auth_mode, self.auth_key_id, self.auth_text_md5)) + if xml_network or xml_auth or not self.is_area_exist(): + xml_area += CE_NC_XML_BUILD_MERGE_AREA % ( + self.get_area_ip(), xml_network + xml_auth) + elif self.is_area_exist(): + self.updates_cmd.pop() # remove command: area + else: + pass + + if xml_area or xml_topo: + xml_str = CE_NC_XML_BUILD_MERGE_PROCESS % ( + self.process_id, xml_topo + xml_area) + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "MERGE_PROCESS") + self.changed = True + + def remove_area_network(self): + """remvoe ospf area network""" + + if not self.is_network_exist(): + return + + xml_network = CE_NC_XML_DELETE_NETWORKS % ( + self.addr, self.get_wildcard_mask()) + xml_area = CE_NC_XML_BUILD_AREA % (self.get_area_ip(), xml_network) + xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, xml_area) + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "DELETE_AREA_NETWORK") + self.updates_cmd.append("ospf %s" % self.process_id) + self.updates_cmd.append("area %s" % self.get_area_ip()) + self.updates_cmd.append("undo network %s %s" % + (self.addr, self.get_wildcard_mask())) + self.changed = True + + def remove_area(self): + """remove ospf area""" + + if not self.is_area_exist(): + return + + xml_area = CE_NC_XML_BUILD_DELETE_AREA % (self.get_area_ip(), "") + xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, xml_area) + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "DELETE_AREA") + self.updates_cmd.append("ospf %s" % self.process_id) + self.updates_cmd.append("undo area %s" % self.get_area_ip()) + self.changed = True + + def remove_nexthop(self): + """remove ospf nexthop weight""" + + if not self.is_nexthop_exist(): + return + + xml_nh = CE_NC_XML_DELETE_NEXTHOP % self.nexthop_addr + xml_topo = CE_NC_XML_BUILD_TOPO % xml_nh + xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, xml_topo) + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "DELETE_NEXTHOP_WEIGHT") + self.updates_cmd.append("ospf %s" % self.process_id) + self.updates_cmd.append("undo nexthop %s" % self.nexthop_addr) + self.changed = True + + def is_valid_v4addr(self, addr): + """check is ipv4 addr is valid""" + + if addr.find('.') != -1: + addr_list = addr.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + def convert_ip_to_network(self): + """convert ip to subnet address""" + + ip_list = self.addr.split('.') + mask_list = self.get_wildcard_mask().split('.') + + for i in range(len(ip_list)): + ip_list[i] = str((int(ip_list[i]) & (~int(mask_list[i]))) & 0xff) + + self.addr = '.'.join(ip_list) + + def check_params(self): + """Check all input params""" + + # process_id check + if not self.process_id.isdigit(): + self.module.fail_json(msg="Error: process_id is not digit.") + if int(self.process_id) < 1 or int(self.process_id) > 4294967295: + self.module.fail_json( + msg="Error: process_id must be an integer between 1 and 4294967295.") + + if self.area: + # area check + if self.area.isdigit(): + if int(self.area) < 0 or int(self.area) > 4294967295: + self.module.fail_json( + msg="Error: area id (Integer) must be between 0 and 4294967295.") + + else: + if not self.is_valid_v4addr(self.area): + self.module.fail_json(msg="Error: area id is invalid.") + + # area network check + if self.addr: + if not self.is_valid_v4addr(self.addr): + self.module.fail_json( + msg="Error: network addr is invalid.") + if not self.mask.isdigit(): + self.module.fail_json( + msg="Error: network mask is not digit.") + if int(self.mask) < 0 or int(self.mask) > 32: + self.module.fail_json( + msg="Error: network mask is invalid.") + + # area authentication check + if self.state == "present" and self.auth_mode: + if self.auth_mode == "simple": + if self.auth_text_simple and len(self.auth_text_simple) > 8: + self.module.fail_json( + msg="Error: auth_text_simple is not in the range from 1 to 8.") + if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: + if self.auth_key_id: + if not self.auth_key_id.isdigit(): + self.module.fail_json( + msg="Error: auth_key_id is not digit.") + if int(self.auth_key_id) < 1 or int(self.auth_key_id) > 255: + self.module.fail_json( + msg="Error: auth_key_id is not in the range from 1 to 255.") + if self.auth_text_md5 and len(self.auth_text_md5) > 255: + self.module.fail_json( + msg="Error: auth_text_md5 is not in the range from 1 to 255.") + + # process max load balance check + if self.state == "present" and self.max_load_balance: + if not self.max_load_balance.isdigit(): + self.module.fail_json( + msg="Error: max_load_balance is not digit.") + if int(self.max_load_balance) < 1 or int(self.max_load_balance) > 64: + self.module.fail_json( + msg="Error: max_load_balance is not in the range from 1 to 64.") + + # process nexthop weight check + if self.nexthop_addr: + if not self.is_valid_v4addr(self.nexthop_addr): + self.module.fail_json(msg="Error: nexthop_addr is invalid.") + if not self.nexthop_weight.isdigit(): + self.module.fail_json( + msg="Error: nexthop_weight is not digit.") + if int(self.nexthop_weight) < 1 or int(self.nexthop_weight) > 254: + self.module.fail_json( + msg="Error: nexthop_weight is not in the range from 1 to 254.") + + if self.addr: + self.convert_ip_to_network() + + def get_proposed(self): + """get proposed info""" + + self.proposed["process_id"] = self.process_id + self.proposed["area"] = self.area + if self.area: + self.proposed["addr"] = self.addr + self.proposed["mask"] = self.mask + if self.auth_mode: + self.proposed["auth_mode"] = self.auth_mode + if self.auth_mode == "simple": + self.proposed["auth_text_simple"] = self.auth_text_simple + if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: + self.proposed["auth_key_id"] = self.auth_key_id + self.proposed["auth_text_md5"] = self.auth_text_md5 + + if self.nexthop_addr: + self.proposed["nexthop_addr"] = self.nexthop_addr + self.proposed["nexthop_weight"] = self.nexthop_weight + self.proposed["max_load_balance"] = self.max_load_balance + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.ospf_info: + return + + self.existing["process_id"] = self.process_id + self.existing["areas"] = self.ospf_info["areas"] + self.existing["nexthops"] = self.ospf_info["nexthops"] + self.existing["max_load_balance"] = self.ospf_info.get( + "maxLoadBalancing") + + def get_end_state(self): + """get end state info""" + + ospf_info = self.get_ospf_dict(self.process_id) + + if not ospf_info: + return + + self.end_state["process_id"] = self.process_id + self.end_state["areas"] = ospf_info["areas"] + self.end_state["nexthops"] = ospf_info["nexthops"] + self.end_state["max_load_balance"] = ospf_info.get("maxLoadBalancing") + + if self.end_state == self.existing: + if not self.auth_text_simple and not self.auth_text_md5: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.ospf_info = self.get_ospf_dict(self.process_id) + self.get_existing() + self.get_proposed() + + # deal present or absent + if self.state == "present": + if not self.ospf_info: + # create ospf process + self.create_process() + else: + # merge ospf + self.merge_process() + else: + if self.ospf_info: + if self.area: + if self.addr: + # remove ospf area network + self.remove_area_network() + else: + # remove ospf area + self.remove_area() + if self.nexthop_addr: + # remove ospf nexthop weight + self.remove_nexthop() + + if not self.area and not self.nexthop_addr: + # remove ospf process + self.delete_process() + else: + self.module.fail_json(msg='Error: ospf process does not exist') + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + process_id=dict(required=True, type='str'), + area=dict(required=False, type='str'), + addr=dict(required=False, type='str'), + mask=dict(required=False, type='str'), + auth_mode=dict(required=False, + choices=['none', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'], type='str'), + auth_text_simple=dict(required=False, type='str', no_log=True), + auth_key_id=dict(required=False, type='str'), + auth_text_md5=dict(required=False, type='str', no_log=True), + nexthop_addr=dict(required=False, type='str'), + nexthop_weight=dict(required=False, type='str'), + max_load_balance=dict(required=False, type='str'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = OSPF(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_ospf_vrf.py b/plugins/modules/network/cloudengine/ce_ospf_vrf.py new file mode 100644 index 0000000000..e1b1b721a8 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_ospf_vrf.py @@ -0,0 +1,1623 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_ospf_vrf +short_description: Manages configuration of an OSPF VPN instance on HUAWEI CloudEngine switches. +description: + - Manages configuration of an OSPF VPN instance on HUAWEI CloudEngine switches. +author: Yang yang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + ospf: + description: + - The ID of the ospf process. + Valid values are an integer, 1 - 4294967295, the default value is 1. + required: true + route_id: + description: + - Specifies the ospf private route id,. + Valid values are a string, formatted as an IP address + (i.e. "10.1.1.1") the length is 0 - 20. + vrf: + description: + - Specifies the vpn instance which use ospf,length is 1 - 31. + Valid values are a string. + default: _public_ + description: + description: + - Specifies the description information of ospf process. + bandwidth: + description: + - Specifies the reference bandwidth used to assign ospf cost. + Valid values are an integer, in Mbps, 1 - 2147483648, the default value is 100. + lsaalflag: + description: + - Specifies the mode of timer to calculate interval of arrive LSA. + If set the parameter but not specifies value, the default will be used. + If true use general timer. + If false use intelligent timer. + type: bool + default: 'no' + lsaainterval: + description: + - Specifies the interval of arrive LSA when use the general timer. + Valid value is an integer, in millisecond, from 0 to 10000. + lsaamaxinterval: + description: + - Specifies the max interval of arrive LSA when use the intelligent timer. + Valid value is an integer, in millisecond, from 0 to 10000, the default value is 1000. + lsaastartinterval: + description: + - Specifies the start interval of arrive LSA when use the intelligent timer. + Valid value is an integer, in millisecond, from 0 to 10000, the default value is 500. + lsaaholdinterval: + description: + - Specifies the hold interval of arrive LSA when use the intelligent timer. + Valid value is an integer, in millisecond, from 0 to 10000, the default value is 500. + lsaointervalflag: + description: + - Specifies whether cancel the interval of LSA originate or not. + If set the parameter but noe specifies value, the default will be used. + true:cancel the interval of LSA originate, the interval is 0. + false:do not cancel the interval of LSA originate. + type: bool + default: 'no' + lsaointerval: + description: + - Specifies the interval of originate LSA . + Valid value is an integer, in second, from 0 to 10, the default value is 5. + lsaomaxinterval: + description: + - Specifies the max interval of originate LSA . + Valid value is an integer, in millisecond, from 1 to 10000, the default value is 5000. + lsaostartinterval: + description: + - Specifies the start interval of originate LSA . + Valid value is an integer, in millisecond, from 0 to 1000, the default value is 500. + lsaoholdinterval: + description: + - Specifies the hold interval of originate LSA . + Valid value is an integer, in millisecond, from 0 to 5000, the default value is 1000. + spfintervaltype: + description: + - Specifies the mode of timer which used to calculate SPF. + If set the parameter but noe specifies value, the default will be used. + If is intelligent-timer, then use intelligent timer. + If is timer, then use second level timer. + If is millisecond, then use millisecond level timer. + choices: ['intelligent-timer','timer','millisecond'] + default: intelligent-timer + spfinterval: + description: + - Specifies the interval to calculate SPF when use second level timer. + Valid value is an integer, in second, from 1 to 10. + spfintervalmi: + description: + - Specifies the interval to calculate SPF when use millisecond level timer. + Valid value is an integer, in millisecond, from 1 to 10000. + spfmaxinterval: + description: + - Specifies the max interval to calculate SPF when use intelligent timer. + Valid value is an integer, in millisecond, from 1 to 20000, the default value is 5000. + spfstartinterval: + description: + - Specifies the start interval to calculate SPF when use intelligent timer. + Valid value is an integer, in millisecond, from 1 to 1000, the default value is 50. + spfholdinterval: + description: + - Specifies the hold interval to calculate SPF when use intelligent timer. + Valid value is an integer, in millisecond, from 1 to 5000, the default value is 200. + state: + description: + - Specify desired state of the resource. + choices: ['present', 'absent'] + default: present +''' + +EXAMPLES = ''' +- name: ospf vrf module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configure ospf route id + ce_ospf_vrf: + ospf: 2 + route_id: 2.2.2.2 + lsaointervalflag: False + lsaointerval: 2 + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: { + "bandwidth": "100", + "description": null, + "lsaaholdinterval": "500", + "lsaainterval": null, + "lsaamaxinterval": "1000", + "lsaastartinterval": "500", + "lsaalflag": "False", + "lsaoholdinterval": "1000", + "lsaointerval": "2", + "lsaointervalflag": "False", + "lsaomaxinterval": "5000", + "lsaostartinterval": "500", + "process_id": "2", + "route_id": "2.2.2.2", + "spfholdinterval": "1000", + "spfinterval": null, + "spfintervalmi": null, + "spfintervaltype": "intelligent-timer", + "spfmaxinterval": "10000", + "spfstartinterval": "500", + "vrf": "_public_" + } +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: { + "bandwidthReference": "100", + "description": null, + "lsaArrivalFlag": "false", + "lsaArrivalHoldInterval": "500", + "lsaArrivalInterval": null, + "lsaArrivalMaxInterval": "1000", + "lsaArrivalStartInterval": "500", + "lsaOriginateHoldInterval": "1000", + "lsaOriginateInterval": "2", + "lsaOriginateIntervalFlag": "false", + "lsaOriginateMaxInterval": "5000", + "lsaOriginateStartInterval": "500", + "processId": "2", + "routerId": "2.2.2.2", + "spfScheduleHoldInterval": "1000", + "spfScheduleInterval": null, + "spfScheduleIntervalMillisecond": null, + "spfScheduleIntervalType": "intelligent-timer", + "spfScheduleMaxInterval": "10000", + "spfScheduleStartInterval": "500", + "vrfName": "_public_" + } +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: { + "bandwidthReference": "100", + "description": null, + "lsaArrivalFlag": "false", + "lsaArrivalHoldInterval": "500", + "lsaArrivalInterval": null, + "lsaArrivalMaxInterval": "1000", + "lsaArrivalStartInterval": "500", + "lsaOriginateHoldInterval": "1000", + "lsaOriginateInterval": "2", + "lsaOriginateIntervalFlag": "false", + "lsaOriginateMaxInterval": "5000", + "lsaOriginateStartInterval": "500", + "processId": "2", + "routerId": "2.2.2.2", + "spfScheduleHoldInterval": "1000", + "spfScheduleInterval": null, + "spfScheduleIntervalMillisecond": null, + "spfScheduleIntervalType": "intelligent-timer", + "spfScheduleMaxInterval": "10000", + "spfScheduleStartInterval": "500", + "vrfName": "_public_" + } +updates: + description: commands sent to the device + returned: always + type: list + sample: ["ospf 2"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: False +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_OSPF_VRF = """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + +CE_NC_CREATE_OSPF_VRF = """ + + + + + %s +%s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + %s + + + + +""" +CE_NC_CREATE_ROUTE_ID = """ + %s +""" + +CE_NC_DELETE_OSPF = """ + + + + + %s + %s + %s + + + + +""" + + +def build_config_xml(xmlstr): + """build_config_xml""" + + return ' ' + xmlstr + ' ' + + +class OspfVrf(object): + """ + Manages configuration of an ospf instance. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.ospf = self.module.params['ospf'] + self.route_id = self.module.params['route_id'] + self.vrf = self.module.params['vrf'] + self.description = self.module.params['description'] + self.bandwidth = self.module.params['bandwidth'] + self.lsaalflag = self.module.params['lsaalflag'] + self.lsaainterval = self.module.params['lsaainterval'] + self.lsaamaxinterval = self.module.params['lsaamaxinterval'] + self.lsaastartinterval = self.module.params['lsaastartinterval'] + self.lsaaholdinterval = self.module.params['lsaaholdinterval'] + self.lsaointervalflag = self.module.params['lsaointervalflag'] + self.lsaointerval = self.module.params['lsaointerval'] + self.lsaomaxinterval = self.module.params['lsaomaxinterval'] + self.lsaostartinterval = self.module.params['lsaostartinterval'] + self.lsaoholdinterval = self.module.params['lsaoholdinterval'] + self.spfintervaltype = self.module.params['spfintervaltype'] + self.spfinterval = self.module.params['spfinterval'] + self.spfintervalmi = self.module.params['spfintervalmi'] + self.spfmaxinterval = self.module.params['spfmaxinterval'] + self.spfstartinterval = self.module.params['spfstartinterval'] + self.spfholdinterval = self.module.params['spfholdinterval'] + self.state = self.module.params['state'] + + # ospf info + self.ospf_info = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + self.lsa_arrival_changed = False + self.lsa_originate_changed = False + self.spf_changed = False + self.route_id_changed = False + self.bandwidth_changed = False + self.description_changed = False + self.vrf_changed = False + + def init_module(self): + """" init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def is_valid_ospf_process_id(self): + """check whether the input ospf process id is valid""" + + if not self.ospf.isdigit(): + return False + if int(self.ospf) > 4294967295 or int(self.ospf) < 1: + return False + return True + + def is_valid_ospf_route_id(self): + """check is ipv4 addr is valid""" + + if self.route_id.find('.') != -1: + addr_list = self.route_id.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + return False + + def is_valid_vrf_name(self): + """check whether the input ospf vrf name is valid""" + + if len(self.vrf) > 31 or len(self.vrf) < 1: + return False + if self.vrf.find('?') != -1: + return False + if self.vrf.find(' ') != -1: + return False + return True + + def is_valid_description(self): + """check whether the input ospf description is valid""" + + if len(self.description) > 80 or len(self.description) < 1: + return False + if self.description.find('?') != -1: + return False + return True + + def is_valid_bandwidth(self): + """check whether the input ospf bandwidth reference is valid""" + + if not self.bandwidth.isdigit(): + return False + if int(self.bandwidth) > 2147483648 or int(self.bandwidth) < 1: + return False + return True + + def is_valid_lsa_arrival_interval(self): + """check whether the input ospf lsa arrival interval is valid""" + + if self.lsaainterval is None: + return False + if not self.lsaainterval.isdigit(): + return False + if int(self.lsaainterval) > 10000 or int(self.lsaainterval) < 0: + return False + return True + + def isvalidlsamaxarrivalinterval(self): + """check whether the input ospf lsa max arrival interval is valid""" + + if not self.lsaamaxinterval.isdigit(): + return False + if int(self.lsaamaxinterval) > 10000 or int(self.lsaamaxinterval) < 1: + return False + return True + + def isvalidlsastartarrivalinterval(self): + """check whether the input ospf lsa start arrival interval is valid""" + + if not self.lsaastartinterval.isdigit(): + return False + if int(self.lsaastartinterval) > 1000 or int(self.lsaastartinterval) < 0: + return False + return True + + def isvalidlsaholdarrivalinterval(self): + """check whether the input ospf lsa hold arrival interval is valid""" + + if not self.lsaaholdinterval.isdigit(): + return False + if int(self.lsaaholdinterval) > 5000 or int(self.lsaaholdinterval) < 0: + return False + return True + + def is_valid_lsa_originate_interval(self): + """check whether the input ospf lsa originate interval is valid""" + + if not self.lsaointerval.isdigit(): + return False + if int(self.lsaointerval) > 10 or int(self.lsaointerval) < 0: + return False + return True + + def isvalidlsaoriginatemaxinterval(self): + """check whether the input ospf lsa originate max interval is valid""" + + if not self.lsaomaxinterval.isdigit(): + return False + if int(self.lsaomaxinterval) > 10000 or int(self.lsaomaxinterval) < 1: + return False + return True + + def isvalidlsaostartinterval(self): + """check whether the input ospf lsa originate start interval is valid""" + + if not self.lsaostartinterval.isdigit(): + return False + if int(self.lsaostartinterval) > 1000 or int(self.lsaostartinterval) < 0: + return False + return True + + def isvalidlsaoholdinterval(self): + """check whether the input ospf lsa originate hold interval is valid""" + + if not self.lsaoholdinterval.isdigit(): + return False + if int(self.lsaoholdinterval) > 5000 or int(self.lsaoholdinterval) < 1: + return False + return True + + def is_valid_spf_interval(self): + """check whether the input ospf spf interval is valid""" + + if not self.spfinterval.isdigit(): + return False + if int(self.spfinterval) > 10 or int(self.spfinterval) < 1: + return False + return True + + def is_valid_spf_milli_interval(self): + """check whether the input ospf spf millisecond level interval is valid""" + + if not self.spfintervalmi.isdigit(): + return False + if int(self.spfintervalmi) > 10000 or int(self.spfintervalmi) < 1: + return False + return True + + def is_valid_spf_max_interval(self): + """check whether the input ospf spf intelligent timer max interval is valid""" + + if not self.spfmaxinterval.isdigit(): + return False + if int(self.spfmaxinterval) > 20000 or int(self.spfmaxinterval) < 1: + return False + return True + + def is_valid_spf_start_interval(self): + """check whether the input ospf spf intelligent timer start interval is valid""" + + if not self.spfstartinterval.isdigit(): + return False + if int(self.spfstartinterval) > 1000 or int(self.spfstartinterval) < 1: + return False + return True + + def is_valid_spf_hold_interval(self): + """check whether the input ospf spf intelligent timer hold interval is valid""" + + if not self.spfholdinterval.isdigit(): + return False + if int(self.spfholdinterval) > 5000 or int(self.spfholdinterval) < 1: + return False + return True + + def is_route_id_exist(self): + """is route id exist""" + + if not self.ospf_info: + return False + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] != self.ospf: + continue + if ospf_site["routerId"] == self.route_id: + return True + else: + continue + return False + + def get_exist_ospf_id(self): + """get exist ospf process id""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["processId"] + else: + continue + return None + + def get_exist_route(self): + """get exist route id""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["routerId"] + else: + continue + return None + + def get_exist_vrf(self): + """get exist vrf""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["vrfName"] + else: + continue + return None + + def get_exist_bandwidth(self): + """get exist bandwidth""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["bandwidthReference"] + else: + continue + return None + + def get_exist_lsa_a_interval(self): + """get exist lsa arrival interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaArrivalInterval"] + else: + continue + return None + + def get_exist_lsa_a_interval_flag(self): + """get exist lsa arrival interval flag""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaArrivalFlag"] + else: + continue + return None + + def get_exist_lsa_a_max_interval(self): + """get exist lsa arrival max interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaArrivalMaxInterval"] + else: + continue + return None + + def get_exist_lsa_a_start_interval(self): + """get exist lsa arrival start interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaArrivalStartInterval"] + else: + continue + return None + + def get_exist_lsa_a_hold_interval(self): + """get exist lsa arrival hold interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaArrivalHoldInterval"] + else: + continue + return None + + def getexistlsaointerval(self): + """get exist lsa originate interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaOriginateInterval"] + else: + continue + return None + + def getexistlsaointerval_flag(self): + """get exist lsa originate interval flag""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaOriginateIntervalFlag"] + else: + continue + return None + + def getexistlsaomaxinterval(self): + """get exist lsa originate max interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaOriginateMaxInterval"] + else: + continue + return None + + def getexistlsaostartinterval(self): + """get exist lsa originate start interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaOriginateStartInterval"] + else: + continue + return None + + def getexistlsaoholdinterval(self): + """get exist lsa originate hold interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["lsaOriginateHoldInterval"] + else: + continue + return None + + def get_exist_spf_interval(self): + """get exist spf second level timer interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["spfScheduleInterval"] + else: + continue + return None + + def get_exist_spf_milli_interval(self): + """get exist spf millisecond level timer interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["spfScheduleIntervalMillisecond"] + else: + continue + return None + + def get_exist_spf_max_interval(self): + """get exist spf max interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["spfScheduleMaxInterval"] + else: + continue + return None + + def get_exist_spf_start_interval(self): + """get exist spf start interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["spfScheduleStartInterval"] + else: + continue + return None + + def get_exist_spf_hold_interval(self): + """get exist spf hold interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["spfScheduleHoldInterval"] + else: + continue + return None + + def get_exist_spf_interval_type(self): + """get exist spf hold interval""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["spfScheduleIntervalType"] + else: + continue + return None + + def is_ospf_exist(self): + """is ospf exist""" + + if not self.ospf_info: + return False + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return True + else: + continue + return False + + def get_exist_description(self): + """is description exist""" + + if not self.ospf_info: + return None + + for ospf_site in self.ospf_info["ospfsite"]: + if ospf_site["processId"] == self.ospf: + return ospf_site["description"] + else: + continue + return None + + def check_params(self): + """Check all input params""" + + if self.ospf == '': + self.module.fail_json( + msg='Error: The ospf process id should not be null.') + if self.ospf: + if not self.is_valid_ospf_process_id(): + self.module.fail_json( + msg='Error: The ospf process id should between 1 - 4294967295.') + if self.route_id == '': + self.module.fail_json( + msg='Error: The ospf route id length should not be null.') + if self.route_id: + if not self.is_valid_ospf_route_id(): + self.module.fail_json( + msg='Error: The ospf route id length should between 0 - 20,i.e.10.1.1.1.') + if self.vrf == '': + self.module.fail_json( + msg='Error: The ospf vpn instance length should not be null.') + if self.vrf: + if not self.is_valid_vrf_name(): + self.module.fail_json( + msg='Error: The ospf vpn instance length should between 0 - 31,but can not contain " " or "?".') + if self.description == '': + self.module.fail_json( + msg='Error: The ospf description should not be null.') + if self.description: + if not self.is_valid_description(): + self.module.fail_json( + msg='Error: The ospf description length should between 1 - 80,but can not contain "?".') + if self.bandwidth == '': + self.module.fail_json( + msg='Error: The ospf bandwidth reference should not be null.') + if self.bandwidth: + if not self.is_valid_bandwidth(): + self.module.fail_json( + msg='Error: The ospf bandwidth reference should between 1 - 2147483648.') + if self.lsaalflag is True: + if not self.is_valid_lsa_arrival_interval(): + self.module.fail_json( + msg='Error: The ospf lsa arrival interval should between 0 - 10000.') + if self.lsaamaxinterval or self.lsaastartinterval or self.lsaaholdinterval: + self.module.fail_json( + msg='Error: Non-Intelligent Timer and Intelligent Timer Interval of ' + 'lsa-arrival-interval can not configured at the same time.') + if self.lsaalflag is False: + if self.lsaainterval: + self.module.fail_json( + msg='Error: The parameter of lsa arrival interval command is invalid, ' + 'because LSA arrival interval can not be config when the LSA arrival flag is not set.') + if self.lsaamaxinterval == '' or self.lsaastartinterval == '' or self.lsaaholdinterval == '': + self.module.fail_json( + msg='Error: The ospf lsa arrival intervals should not be null.') + if self.lsaamaxinterval: + if not self.isvalidlsamaxarrivalinterval(): + self.module.fail_json( + msg='Error: The ospf lsa arrival max interval should between 1 - 10000.') + if self.lsaastartinterval: + if not self.isvalidlsastartarrivalinterval(): + self.module.fail_json( + msg='Error: The ospf lsa arrival start interval should between 1 - 1000.') + if self.lsaaholdinterval: + if not self.isvalidlsaholdarrivalinterval(): + self.module.fail_json( + msg='Error: The ospf lsa arrival hold interval should between 1 - 5000.') + if self.lsaointervalflag is True: + if self.lsaointerval or self.lsaomaxinterval \ + or self.lsaostartinterval or self.lsaoholdinterval: + self.module.fail_json( + msg='Error: Interval for other-type and Instantly Flag ' + 'of lsa-originate-interval can not configured at the same time.') + if self.lsaointerval == '': + self.module.fail_json( + msg='Error: The ospf lsa originate interval should should not be null.') + if self.lsaointerval: + if not self.is_valid_lsa_originate_interval(): + self.module.fail_json( + msg='Error: The ospf lsa originate interval should between 0 - 10 s.') + if self.lsaomaxinterval == '' or self.lsaostartinterval == '' or self.lsaoholdinterval == '': + self.module.fail_json( + msg='Error: The ospf lsa originate intelligent intervals should should not be null.') + if self.lsaomaxinterval: + if not self.isvalidlsaoriginatemaxinterval(): + self.module.fail_json( + msg='Error: The ospf lsa originate max interval should between 1 - 10000 ms.') + if self.lsaostartinterval: + if not self.isvalidlsaostartinterval(): + self.module.fail_json( + msg='Error: The ospf lsa originate start interval should between 0 - 1000 ms.') + if self.lsaoholdinterval: + if not self.isvalidlsaoholdinterval(): + self.module.fail_json( + msg='Error: The ospf lsa originate hold interval should between 1 - 5000 ms.') + if self.spfintervaltype == '': + self.module.fail_json( + msg='Error: The ospf spf interval type should should not be null.') + if self.spfintervaltype == 'intelligent-timer': + if self.spfinterval is not None or self.spfintervalmi is not None: + self.module.fail_json( + msg='Error: Interval second and interval millisecond ' + 'of spf-schedule-interval can not configured if use intelligent timer.') + if self.spfmaxinterval == '' or self.spfstartinterval == '' or self.spfholdinterval == '': + self.module.fail_json( + msg='Error: The ospf spf intelligent timer intervals should should not be null.') + if self.spfmaxinterval and not self.is_valid_spf_max_interval(): + self.module.fail_json( + msg='Error: The ospf spf max interval of intelligent timer should between 1 - 20000 ms.') + if self.spfstartinterval and not self.is_valid_spf_start_interval(): + self.module.fail_json( + msg='Error: The ospf spf start interval of intelligent timer should between 1 - 1000 ms.') + if self.spfholdinterval and not self.is_valid_spf_hold_interval(): + self.module.fail_json( + msg='Error: The ospf spf hold interval of intelligent timer should between 1 - 5000 ms.') + if self.spfintervaltype == 'timer': + if self.spfintervalmi is not None: + self.module.fail_json( + msg='Error: Interval second and interval millisecond ' + 'of spf-schedule-interval can not configured at the same time.') + if self.spfmaxinterval or self.spfstartinterval or self.spfholdinterval: + self.module.fail_json( + msg='Error: Interval second and interval intelligent ' + 'of spf-schedule-interval can not configured at the same time.') + if self.spfinterval == '' or self.spfinterval is None: + self.module.fail_json( + msg='Error: The ospf spf timer intervals should should not be null.') + if not self.is_valid_spf_interval(): + self.module.fail_json( + msg='Error: Interval second should between 1 - 10 s.') + if self.spfintervaltype == 'millisecond': + if self.spfinterval is not None: + self.module.fail_json( + msg='Error: Interval millisecond and interval second ' + 'of spf-schedule-interval can not configured at the same time.') + if self.spfmaxinterval or self.spfstartinterval or self.spfholdinterval: + self.module.fail_json( + msg='Error: Interval millisecond and interval intelligent ' + 'of spf-schedule-interval can not configured at the same time.') + if self.spfintervalmi == '' or self.spfintervalmi is None: + self.module.fail_json( + msg='Error: The ospf spf millisecond intervals should should not be null.') + if not self.is_valid_spf_milli_interval(): + self.module.fail_json( + msg='Error: Interval millisecond should between 1 - 10000 ms.') + + def get_ospf_info(self): + """ get the detail information of ospf """ + + self.ospf_info["ospfsite"] = list() + + getxmlstr = CE_NC_GET_OSPF_VRF + xml_str = get_nc_config(self.module, getxmlstr) + if 'data/' in xml_str: + return + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + + # get the vpn address family and RD text + ospf_sites = root.findall( + "ospfv2/ospfv2comm/ospfSites/ospfSite") + if ospf_sites: + for ospf_site in ospf_sites: + ospf_ele_info = dict() + for ospf_site_ele in ospf_site: + if ospf_site_ele.tag in ["processId", "routerId", "vrfName", "bandwidthReference", + "description", "lsaArrivalInterval", "lsaArrivalMaxInterval", + "lsaArrivalStartInterval", "lsaArrivalHoldInterval", "lsaArrivalFlag", + "lsaOriginateInterval", "lsaOriginateMaxInterval", + "lsaOriginateStartInterval", "lsaOriginateHoldInterval", + "lsaOriginateIntervalFlag", "spfScheduleInterval", + "spfScheduleIntervalMillisecond", "spfScheduleMaxInterval", + "spfScheduleStartInterval", "spfScheduleHoldInterval", + "spfScheduleIntervalType"]: + ospf_ele_info[ + ospf_site_ele.tag] = ospf_site_ele.text + if ospf_ele_info["processId"] == self.ospf: + self.ospf_info["ospfsite"].append(ospf_ele_info) + + def get_proposed(self): + """get proposed info""" + + self.proposed["process_id"] = self.ospf + self.proposed["route_id"] = self.route_id + self.proposed["vrf"] = self.vrf + self.proposed["description"] = self.description + self.proposed["bandwidth"] = self.bandwidth + self.proposed["lsaalflag"] = self.lsaalflag + self.proposed["lsaainterval"] = self.lsaainterval + self.proposed["lsaamaxinterval"] = self.lsaamaxinterval + self.proposed["lsaastartinterval"] = self.lsaastartinterval + self.proposed["lsaaholdinterval"] = self.lsaaholdinterval + self.proposed["lsaointervalflag"] = self.lsaointervalflag + self.proposed["lsaointerval"] = self.lsaointerval + self.proposed["lsaomaxinterval"] = self.lsaomaxinterval + self.proposed["lsaostartinterval"] = self.lsaostartinterval + self.proposed["lsaoholdinterval"] = self.lsaoholdinterval + self.proposed["spfintervaltype"] = self.spfintervaltype + self.proposed["spfinterval"] = self.spfinterval + self.proposed["spfintervalmi"] = self.spfintervalmi + self.proposed["spfmaxinterval"] = self.spfmaxinterval + self.proposed["spfstartinterval"] = self.spfstartinterval + self.proposed["spfholdinterval"] = self.spfholdinterval + + def operate_ospf_info(self): + """operate ospf info""" + + config_route_id_xml = '' + vrf = self.get_exist_vrf() + if vrf is None: + vrf = '_public_' + description = self.get_exist_description() + if description is None: + description = '' + bandwidth_reference = self.get_exist_bandwidth() + if bandwidth_reference is None: + bandwidth_reference = '100' + lsa_in_interval = self.get_exist_lsa_a_interval() + if lsa_in_interval is None: + lsa_in_interval = '' + lsa_arrival_max_interval = self.get_exist_lsa_a_max_interval() + if lsa_arrival_max_interval is None: + lsa_arrival_max_interval = '1000' + lsa_arrival_start_interval = self.get_exist_lsa_a_start_interval() + if lsa_arrival_start_interval is None: + lsa_arrival_start_interval = '500' + lsa_arrival_hold_interval = self.get_exist_lsa_a_hold_interval() + if lsa_arrival_hold_interval is None: + lsa_arrival_hold_interval = '500' + lsa_originate_interval = self.getexistlsaointerval() + if lsa_originate_interval is None: + lsa_originate_interval = '5' + lsa_originate_max_interval = self.getexistlsaomaxinterval() + if lsa_originate_max_interval is None: + lsa_originate_max_interval = '5000' + lsa_originate_start_interval = self.getexistlsaostartinterval() + if lsa_originate_start_interval is None: + lsa_originate_start_interval = '500' + lsa_originate_hold_interval = self.getexistlsaoholdinterval() + if lsa_originate_hold_interval is None: + lsa_originate_hold_interval = '1000' + spf_interval = self.get_exist_spf_interval() + if spf_interval is None: + spf_interval = '' + spf_interval_milli = self.get_exist_spf_milli_interval() + if spf_interval_milli is None: + spf_interval_milli = '' + spf_max_interval = self.get_exist_spf_max_interval() + if spf_max_interval is None: + spf_max_interval = '5000' + spf_start_interval = self.get_exist_spf_start_interval() + if spf_start_interval is None: + spf_start_interval = '50' + spf_hold_interval = self.get_exist_spf_hold_interval() + if spf_hold_interval is None: + spf_hold_interval = '200' + + if self.route_id: + if self.state == 'present': + if self.route_id != self.get_exist_route(): + self.route_id_changed = True + config_route_id_xml = CE_NC_CREATE_ROUTE_ID % self.route_id + else: + if self.route_id != self.get_exist_route(): + self.module.fail_json( + msg='Error: The route id %s is not exist.' % self.route_id) + self.route_id_changed = True + configxmlstr = CE_NC_DELETE_OSPF % ( + self.ospf, self.get_exist_route(), self.get_exist_vrf()) + conf_str = build_config_xml(configxmlstr) + + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "OPERATE_VRF_AF") + self.changed = True + return + if self.vrf != '_public_': + if self.state == 'present': + if self.vrf != self.get_exist_vrf(): + self.vrf_changed = True + vrf = self.vrf + else: + if self.vrf != self.get_exist_vrf(): + self.module.fail_json( + msg='Error: The vrf %s is not exist.' % self.vrf) + self.vrf_changed = True + configxmlstr = CE_NC_DELETE_OSPF % ( + self.ospf, self.get_exist_route(), self.get_exist_vrf()) + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "OPERATE_VRF_AF") + self.changed = True + return + if self.bandwidth: + if self.state == 'present': + if self.bandwidth != self.get_exist_bandwidth(): + self.bandwidth_changed = True + bandwidth_reference = self.bandwidth + else: + if self.bandwidth != self.get_exist_bandwidth(): + self.module.fail_json( + msg='Error: The bandwidth %s is not exist.' % self.bandwidth) + if self.get_exist_bandwidth() != '100': + self.bandwidth_changed = True + bandwidth_reference = '100' + if self.description: + if self.state == 'present': + if self.description != self.get_exist_description(): + self.description_changed = True + description = self.description + else: + if self.description != self.get_exist_description(): + self.module.fail_json( + msg='Error: The description %s is not exist.' % self.description) + self.description_changed = True + description = '' + + if self.lsaalflag is False: + lsa_in_interval = '' + if self.state == 'present': + if self.lsaamaxinterval: + if self.lsaamaxinterval != self.get_exist_lsa_a_max_interval(): + self.lsa_arrival_changed = True + lsa_arrival_max_interval = self.lsaamaxinterval + if self.lsaastartinterval: + if self.lsaastartinterval != self.get_exist_lsa_a_start_interval(): + self.lsa_arrival_changed = True + lsa_arrival_start_interval = self.lsaastartinterval + if self.lsaaholdinterval: + if self.lsaaholdinterval != self.get_exist_lsa_a_hold_interval(): + self.lsa_arrival_changed = True + lsa_arrival_hold_interval = self.lsaaholdinterval + else: + if self.lsaamaxinterval: + if self.lsaamaxinterval != self.get_exist_lsa_a_max_interval(): + self.module.fail_json( + msg='Error: The lsaamaxinterval %s is not exist.' % self.lsaamaxinterval) + if self.get_exist_lsa_a_max_interval() != '1000': + lsa_arrival_max_interval = '1000' + self.lsa_arrival_changed = True + if self.lsaastartinterval: + if self.lsaastartinterval != self.get_exist_lsa_a_start_interval(): + self.module.fail_json( + msg='Error: The lsaastartinterval %s is not exist.' % self.lsaastartinterval) + if self.get_exist_lsa_a_start_interval() != '500': + lsa_arrival_start_interval = '500' + self.lsa_arrival_changed = True + if self.lsaaholdinterval: + if self.lsaaholdinterval != self.get_exist_lsa_a_hold_interval(): + self.module.fail_json( + msg='Error: The lsaaholdinterval %s is not exist.' % self.lsaaholdinterval) + if self.get_exist_lsa_a_hold_interval() != '500': + lsa_arrival_hold_interval = '500' + self.lsa_arrival_changed = True + else: + if self.state == 'present': + lsaalflag = "false" + if self.lsaalflag is True: + lsaalflag = "true" + if lsaalflag != self.get_exist_lsa_a_interval_flag(): + self.lsa_arrival_changed = True + if self.lsaainterval is None: + self.module.fail_json( + msg='Error: The lsaainterval is not supplied.') + else: + lsa_in_interval = self.lsaainterval + else: + if self.lsaainterval: + if self.lsaainterval != self.get_exist_lsa_a_interval(): + self.lsa_arrival_changed = True + lsa_in_interval = self.lsaainterval + else: + if self.lsaainterval: + if self.lsaainterval != self.get_exist_lsa_a_interval(): + self.module.fail_json( + msg='Error: The lsaainterval %s is not exist.' % self.lsaainterval) + self.lsaalflag = False + lsa_in_interval = '' + self.lsa_arrival_changed = True + + if self.lsaointervalflag is False: + if self.state == 'present': + if self.lsaomaxinterval: + if self.lsaomaxinterval != self.getexistlsaomaxinterval(): + self.lsa_originate_changed = True + lsa_originate_max_interval = self.lsaomaxinterval + if self.lsaostartinterval: + if self.lsaostartinterval != self.getexistlsaostartinterval(): + self.lsa_originate_changed = True + lsa_originate_start_interval = self.lsaostartinterval + if self.lsaoholdinterval: + if self.lsaoholdinterval != self.getexistlsaoholdinterval(): + self.lsa_originate_changed = True + lsa_originate_hold_interval = self.lsaoholdinterval + if self.lsaointerval: + if self.lsaointerval != self.getexistlsaointerval(): + self.lsa_originate_changed = True + lsa_originate_interval = self.lsaointerval + else: + if self.lsaomaxinterval: + if self.lsaomaxinterval != self.getexistlsaomaxinterval(): + self.module.fail_json( + msg='Error: The lsaomaxinterval %s is not exist.' % self.lsaomaxinterval) + if self.getexistlsaomaxinterval() != '5000': + lsa_originate_max_interval = '5000' + self.lsa_originate_changed = True + if self.lsaostartinterval: + if self.lsaostartinterval != self.getexistlsaostartinterval(): + self.module.fail_json( + msg='Error: The lsaostartinterval %s is not exist.' % self.lsaostartinterval) + if self.getexistlsaostartinterval() != '500': + lsa_originate_start_interval = '500' + self.lsa_originate_changed = True + if self.lsaoholdinterval: + if self.lsaoholdinterval != self.getexistlsaoholdinterval(): + self.module.fail_json( + msg='Error: The lsaoholdinterval %s is not exist.' % self.lsaoholdinterval) + if self.getexistlsaoholdinterval() != '1000': + lsa_originate_hold_interval = '1000' + self.lsa_originate_changed = True + if self.lsaointerval: + if self.lsaointerval != self.getexistlsaointerval(): + self.module.fail_json( + msg='Error: The lsaointerval %s is not exist.' % self.lsaointerval) + if self.getexistlsaointerval() != '5': + lsa_originate_interval = '5' + self.lsa_originate_changed = True + else: + if self.state == 'present': + if self.getexistlsaointerval_flag() != 'true': + self.lsa_originate_changed = True + lsa_originate_interval = '5' + lsa_originate_max_interval = '5000' + lsa_originate_start_interval = '500' + lsa_originate_hold_interval = '1000' + else: + if self.getexistlsaointerval_flag() == 'true': + self.lsaointervalflag = False + self.lsa_originate_changed = True + if self.spfintervaltype != self.get_exist_spf_interval_type(): + self.spf_changed = True + if self.spfintervaltype == 'timer': + if self.spfinterval: + if self.state == 'present': + if self.spfinterval != self.get_exist_spf_interval(): + self.spf_changed = True + spf_interval = self.spfinterval + spf_interval_milli = '' + else: + if self.spfinterval != self.get_exist_spf_interval(): + self.module.fail_json( + msg='Error: The spfinterval %s is not exist.' % self.spfinterval) + self.spfintervaltype = 'intelligent-timer' + spf_interval = '' + self.spf_changed = True + if self.spfintervaltype == 'millisecond': + if self.spfintervalmi: + if self.state == 'present': + if self.spfintervalmi != self.get_exist_spf_milli_interval(): + self.spf_changed = True + spf_interval_milli = self.spfintervalmi + spf_interval = '' + else: + if self.spfintervalmi != self.get_exist_spf_milli_interval(): + self.module.fail_json( + msg='Error: The spfintervalmi %s is not exist.' % self.spfintervalmi) + self.spfintervaltype = 'intelligent-timer' + spf_interval_milli = '' + self.spf_changed = True + if self.spfintervaltype == 'intelligent-timer': + spf_interval = '' + spf_interval_milli = '' + if self.spfmaxinterval: + if self.state == 'present': + if self.spfmaxinterval != self.get_exist_spf_max_interval(): + self.spf_changed = True + spf_max_interval = self.spfmaxinterval + else: + if self.spfmaxinterval != self.get_exist_spf_max_interval(): + self.module.fail_json( + msg='Error: The spfmaxinterval %s is not exist.' % self.spfmaxinterval) + if self.get_exist_spf_max_interval() != '5000': + self.spf_changed = True + spf_max_interval = '5000' + if self.spfstartinterval: + if self.state == 'present': + if self.spfstartinterval != self.get_exist_spf_start_interval(): + self.spf_changed = True + spf_start_interval = self.spfstartinterval + else: + if self.spfstartinterval != self.get_exist_spf_start_interval(): + self.module.fail_json( + msg='Error: The spfstartinterval %s is not exist.' % self.spfstartinterval) + if self.get_exist_spf_start_interval() != '50': + self.spf_changed = True + spf_start_interval = '50' + if self.spfholdinterval: + if self.state == 'present': + if self.spfholdinterval != self.get_exist_spf_hold_interval(): + self.spf_changed = True + spf_hold_interval = self.spfholdinterval + else: + if self.spfholdinterval != self.get_exist_spf_hold_interval(): + self.module.fail_json( + msg='Error: The spfholdinterval %s is not exist.' % self.spfholdinterval) + if self.get_exist_spf_hold_interval() != '200': + self.spf_changed = True + spf_hold_interval = '200' + + if not self.description_changed and not self.vrf_changed and not self.lsa_arrival_changed \ + and not self.lsa_originate_changed and not self.spf_changed \ + and not self.route_id_changed and not self.bandwidth_changed: + self.changed = False + return + else: + self.changed = True + lsaointervalflag = "false" + lsaalflag = "false" + if self.lsaointervalflag is True: + lsaointervalflag = "true" + if self.lsaalflag is True: + lsaalflag = "true" + configxmlstr = CE_NC_CREATE_OSPF_VRF % ( + self.ospf, config_route_id_xml, vrf, + description, bandwidth_reference, lsaalflag, + lsa_in_interval, lsa_arrival_max_interval, lsa_arrival_start_interval, + lsa_arrival_hold_interval, lsaointervalflag, lsa_originate_interval, + lsa_originate_max_interval, lsa_originate_start_interval, lsa_originate_hold_interval, + self.spfintervaltype, spf_interval, spf_interval_milli, + spf_max_interval, spf_start_interval, spf_hold_interval) + + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "OPERATE_VRF_AF") + + def get_existing(self): + """get existing info""" + + self.get_ospf_info() + self.existing['ospf_info'] = self.ospf_info["ospfsite"] + + def set_update_cmd(self): + """ set update command""" + if not self.changed: + return + + if self.state == 'present': + if self.vrf_changed: + if self.vrf != '_public_': + if self.route_id_changed: + self.updates_cmd.append( + 'ospf %s router-id %s vpn-instance %s' % (self.ospf, self.route_id, self.vrf)) + else: + self.updates_cmd.append( + 'ospf %s vpn-instance %s ' % (self.ospf, self.vrf)) + else: + if self.route_id_changed: + self.updates_cmd.append( + 'ospf %s router-id %s' % (self.ospf, self.route_id)) + else: + if self.route_id_changed: + if self.vrf != '_public_': + self.updates_cmd.append( + 'ospf %s router-id %s vpn-instance %s' % (self.ospf, self.route_id, self.get_exist_vrf())) + else: + self.updates_cmd.append( + 'ospf %s router-id %s' % (self.ospf, self.route_id)) + else: + if self.route_id_changed: + self.updates_cmd.append('undo ospf %s' % self.ospf) + return + + self.updates_cmd.append('ospf %s' % self.ospf) + + if self.description: + if self.state == 'present': + if self.description_changed: + self.updates_cmd.append( + 'description %s' % self.description) + else: + if self.description_changed: + self.updates_cmd.append('undo description') + if self.bandwidth_changed: + if self.state == 'present': + if self.get_exist_bandwidth() != '100': + self.updates_cmd.append( + 'bandwidth-reference %s' % (self.get_exist_bandwidth())) + else: + self.updates_cmd.append('undo bandwidth-reference') + if self.lsaalflag is True: + if self.lsa_arrival_changed: + if self.state == 'present': + self.updates_cmd.append( + 'lsa-arrival-interval %s' % (self.get_exist_lsa_a_interval())) + else: + self.updates_cmd.append( + 'undo lsa-arrival-interval') + + if self.lsaalflag is False: + if self.lsa_arrival_changed: + if self.state == 'present': + if self.get_exist_lsa_a_max_interval() != '1000' \ + or self.get_exist_lsa_a_start_interval() != '500'\ + or self.get_exist_lsa_a_hold_interval() != '500': + self.updates_cmd.append('lsa-arrival-interval intelligent-timer %s %s %s' + % (self.get_exist_lsa_a_max_interval(), + self.get_exist_lsa_a_start_interval(), + self.get_exist_lsa_a_hold_interval())) + else: + if self.get_exist_lsa_a_max_interval() == '1000' \ + and self.get_exist_lsa_a_start_interval() == '500'\ + and self.get_exist_lsa_a_hold_interval() == '500': + self.updates_cmd.append( + 'undo lsa-arrival-interval') + if self.lsaointervalflag is False: + if self.lsa_originate_changed: + if self.state == 'present': + if self.getexistlsaointerval() != '5' \ + or self.getexistlsaomaxinterval() != '5000' \ + or self.getexistlsaostartinterval() != '500' \ + or self.getexistlsaoholdinterval() != '1000': + self.updates_cmd.append('lsa-originate-interval other-type %s intelligent-timer %s %s %s' + % (self.getexistlsaointerval(), + self.getexistlsaomaxinterval(), + self.getexistlsaostartinterval(), + self.getexistlsaoholdinterval())) + else: + self.updates_cmd.append( + 'undo lsa-originate-interval') + if self.lsaointervalflag is True: + if self.lsa_originate_changed: + if self.state == 'present': + self.updates_cmd.append('lsa-originate-interval 0 ') + else: + self.updates_cmd.append( + 'undo lsa-originate-interval') + if self.spfintervaltype == 'millisecond': + if self.spf_changed: + if self.state == 'present': + self.updates_cmd.append( + 'spf-schedule-interval millisecond %s' % self.get_exist_spf_milli_interval()) + else: + self.updates_cmd.append( + 'undo spf-schedule-interval') + if self.spfintervaltype == 'timer': + if self.spf_changed: + if self.state == 'present': + self.updates_cmd.append( + 'spf-schedule-interval %s' % self.get_exist_spf_interval()) + else: + self.updates_cmd.append( + 'undo spf-schedule-interval') + if self.spfintervaltype == 'intelligent-timer': + if self.spf_changed: + if self.state == 'present': + if self.get_exist_spf_max_interval() != '5000' \ + or self.get_exist_spf_start_interval() != '50' \ + or self.get_exist_spf_hold_interval() != '200': + self.updates_cmd.append('spf-schedule-interval intelligent-timer %s %s %s' + % (self.get_exist_spf_max_interval(), + self.get_exist_spf_start_interval(), + self.get_exist_spf_hold_interval())) + else: + self.updates_cmd.append( + 'undo spf-schedule-interval') + + def get_end_state(self): + """get end state info""" + + self.get_ospf_info() + self.end_state['ospf_info'] = self.ospf_info["ospfsite"] + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + self.operate_ospf_info() + self.get_end_state() + self.set_update_cmd() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + ospf=dict(required=True, type='str'), + route_id=dict(required=False, type='str'), + vrf=dict(required=False, type='str', default='_public_'), + description=dict(required=False, type='str'), + bandwidth=dict(required=False, type='str'), + lsaalflag=dict(type='bool', default=False), + lsaainterval=dict(required=False, type='str'), + lsaamaxinterval=dict(required=False, type='str'), + lsaastartinterval=dict(required=False, type='str'), + lsaaholdinterval=dict(required=False, type='str'), + lsaointervalflag=dict(type='bool', default=False), + lsaointerval=dict(required=False, type='str'), + lsaomaxinterval=dict(required=False, type='str'), + lsaostartinterval=dict(required=False, type='str'), + lsaoholdinterval=dict(required=False, type='str'), + spfintervaltype=dict(required=False, default='intelligent-timer', + choices=['intelligent-timer', 'timer', 'millisecond']), + spfinterval=dict(required=False, type='str'), + spfintervalmi=dict(required=False, type='str'), + spfmaxinterval=dict(required=False, type='str'), + spfstartinterval=dict(required=False, type='str'), + spfholdinterval=dict(required=False, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + ) + + argument_spec.update(ce_argument_spec) + module = OspfVrf(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_reboot.py b/plugins/modules/network/cloudengine/ce_reboot.py new file mode 100644 index 0000000000..289c67a43e --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_reboot.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_reboot +short_description: Reboot a HUAWEI CloudEngine switches. +description: + - Reboot a HUAWEI CloudEngine switches. +author: Gong Jianjun (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +requirements: ["ncclient"] +options: + confirm: + description: + - Safeguard boolean. Set to true if you're sure you want to reboot. + type: bool + required: true + save_config: + description: + - Flag indicating whether to save the configuration. + required: false + type: bool + default: false +''' + +EXAMPLES = ''' +- name: reboot module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Reboot the device + ce_reboot: + confirm: true + save_config: true + provider: "{{ cli }}" +''' + +RETURN = ''' +rebooted: + description: Whether the device was instructed to reboot. + returned: success + type: bool + sample: true +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import execute_nc_action, ce_argument_spec + +try: + from ncclient.operations.errors import TimeoutExpiredError + HAS_NCCLIENT = True +except ImportError: + HAS_NCCLIENT = False + +CE_NC_XML_EXECUTE_REBOOT = """ + + + + %s + + + +""" + + +class Reboot(object): + """ Reboot a network device """ + + def __init__(self, **kwargs): + """ __init___ """ + + self.network_module = None + self.netconf = None + self.init_network_module(**kwargs) + + self.confirm = self.network_module.params['confirm'] + self.save_config = self.network_module.params['save_config'] + + def init_network_module(self, **kwargs): + """ init network module """ + + self.network_module = AnsibleModule(**kwargs) + + def netconf_set_action(self, xml_str): + """ netconf execute action """ + + try: + execute_nc_action(self.network_module, xml_str) + except TimeoutExpiredError: + pass + + def work(self): + """ start to work """ + + if not self.confirm: + self.network_module.fail_json( + msg='Error: Confirm must be set to true for this module to work.') + + xml_str = CE_NC_XML_EXECUTE_REBOOT % str(self.save_config).lower() + self.netconf_set_action(xml_str) + + +def main(): + """ main """ + + argument_spec = dict( + confirm=dict(required=True, type='bool'), + save_config=dict(default=False, type='bool') + ) + + argument_spec.update(ce_argument_spec) + module = Reboot(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_NCCLIENT: + module.network_module.fail_json(msg='Error: The ncclient library is required.') + + changed = False + rebooted = False + + module.work() + + changed = True + rebooted = True + + results = dict() + results['changed'] = changed + results['rebooted'] = rebooted + + module.network_module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_rollback.py b/plugins/modules/network/cloudengine/ce_rollback.py new file mode 100644 index 0000000000..428b32ddf2 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_rollback.py @@ -0,0 +1,453 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_rollback +short_description: Set a checkpoint or rollback to a checkpoint on HUAWEI CloudEngine switches. +description: + - This module offers the ability to set a configuration checkpoint + file or rollback to a configuration checkpoint file on HUAWEI CloudEngine switches. +author: + - Li Yanfeng (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + commit_id: + description: + - Specifies the label of the configuration rollback point to which system configurations are + expected to roll back. + The value is an integer that the system generates automatically. + label: + description: + - Specifies a user label for a configuration rollback point. + The value is a string of 1 to 256 case-sensitive ASCII characters, spaces not supported. + The value must start with a letter and cannot be presented in a single hyphen (-). + filename: + description: + - Specifies a configuration file for configuration rollback. + The value is a string of 5 to 64 case-sensitive characters in the format of *.zip, *.cfg, or *.dat, + spaces not supported. + last: + description: + - Specifies the number of configuration rollback points. + The value is an integer that ranges from 1 to 80. + oldest: + description: + - Specifies the number of configuration rollback points. + The value is an integer that ranges from 1 to 80. + action: + description: + - The operation of configuration rollback. + required: true + choices: ['rollback','clear','set','display','commit'] +''' +EXAMPLES = ''' +- name: rollback module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + +- name: Ensure commit_id is exist, and specifies the label of the configuration rollback point to + which system configurations are expected to roll back. + ce_rollback: + commit_id: 1000000748 + action: rollback + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: sometimes + type: dict + sample: {"commit_id": "1000000748", "action": "rollback"} +existing: + description: k/v pairs of existing rollback + returned: sometimes + type: dict + sample: {"commitId": "1000000748", "userLabel": "abc"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["rollback configuration to file a.cfg", + "set configuration commit 1000000783 label ddd", + "clear configuration commit 1000000783 label", + "display configuration commit list"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: {"commitId": "1000000748", "userLabel": "abc"} +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, exec_command, run_commands +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList + + +class RollBack(object): + """ + Manages rolls back the system from the current configuration state to a historical configuration state. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + self.commands = list() + # module input info + self.commit_id = self.module.params['commit_id'] + self.label = self.module.params['label'] + self.filename = self.module.params['filename'] + self.last = self.module.params['last'] + self.oldest = self.module.params['oldest'] + self.action = self.module.params['action'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + # configuration rollback points info + self.rollback_info = None + self.init_module() + + def init_module(self): + """ init module """ + + required_if = [('action', 'set', ['commit_id', 'label']), ('action', 'commit', ['label'])] + mutually_exclusive = None + required_one_of = None + if self.action == "rollback": + required_one_of = [['commit_id', 'label', 'filename', 'last']] + elif self.action == "clear": + required_one_of = [['commit_id', 'oldest']] + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True, required_if=required_if, mutually_exclusive=mutually_exclusive, required_one_of=required_one_of) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + self.commands.append("return") + self.commands.append("mmi-mode enable") + + if self.action == "commit": + self.commands.append("sys") + + self.commands.append(command) + self.updates_cmd.append(command) + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + run_commands(self.module, commands) + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_rollback_dict(self): + """ get rollback attributes dict.""" + + rollback_info = dict() + rollback_info["RollBackInfos"] = list() + + flags = list() + exp = "commit list" + flags.append(exp) + cfg_info = self.get_config(flags) + if not cfg_info: + return rollback_info + + cfg_line = cfg_info.split("\n") + for cfg in cfg_line: + if re.findall(r'^\d', cfg): + pre_rollback_info = cfg.split() + rollback_info["RollBackInfos"].append(dict(commitId=pre_rollback_info[1], userLabel=pre_rollback_info[2])) + + return rollback_info + + def get_filename_type(self, filename): + """Gets the type of filename, such as cfg, zip, dat...""" + + if filename is None: + return None + if ' ' in filename: + self.module.fail_json( + msg='Error: Configuration file name include spaces.') + + iftype = None + + if filename.endswith('.cfg'): + iftype = 'cfg' + elif filename.endswith('.zip'): + iftype = 'zip' + elif filename.endswith('.dat'): + iftype = 'dat' + else: + return None + return iftype.lower() + + def set_config(self): + + if self.action == "rollback": + if self.commit_id: + cmd = "rollback configuration to commit-id %s" % self.commit_id + self.cli_add_command(cmd) + if self.label: + cmd = "rollback configuration to label %s" % self.label + self.cli_add_command(cmd) + if self.filename: + cmd = "rollback configuration to file %s" % self.filename + self.cli_add_command(cmd) + if self.last: + cmd = "rollback configuration last %s" % self.last + self.cli_add_command(cmd) + elif self.action == "set": + if self.commit_id and self.label: + cmd = "set configuration commit %s label %s" % (self.commit_id, self.label) + self.cli_add_command(cmd) + elif self.action == "clear": + if self.commit_id: + cmd = "clear configuration commit %s label" % self.commit_id + self.cli_add_command(cmd) + if self.oldest: + cmd = "clear configuration commit oldest %s" % self.oldest + self.cli_add_command(cmd) + elif self.action == "commit": + if self.label: + cmd = "commit label %s" % self.label + self.cli_add_command(cmd) + + elif self.action == "display": + self.rollback_info = self.get_rollback_dict() + if self.commands: + self.commands.append('return') + self.commands.append('undo mmi-mode enable') + self.cli_load_config(self.commands) + self.changed = True + + def check_params(self): + """Check all input params""" + + # commit_id check + rollback_info = self.rollback_info["RollBackInfos"] + if self.commit_id: + if not self.commit_id.isdigit(): + self.module.fail_json( + msg='Error: The parameter of commit_id is invalid.') + + info_bool = False + for info in rollback_info: + if info.get("commitId") == self.commit_id: + info_bool = True + if not info_bool: + self.module.fail_json( + msg='Error: The parameter of commit_id is not exist.') + + if self.action == "clear": + info_bool = False + for info in rollback_info: + if info.get("commitId") == self.commit_id: + if info.get("userLabel") == "-": + info_bool = True + if info_bool: + self.module.fail_json( + msg='Error: This commit_id does not have a label.') + + # filename check + if self.filename: + if not self.get_filename_type(self.filename): + self.module.fail_json( + msg='Error: Invalid file name or file name extension ( *.cfg, *.zip, *.dat ).') + # last check + if self.last: + if not self.last.isdigit(): + self.module.fail_json( + msg='Error: Number of configuration checkpoints is not digit.') + if int(self.last) <= 0 or int(self.last) > 80: + self.module.fail_json( + msg='Error: Number of configuration checkpoints is not in the range from 1 to 80.') + + # oldest check + if self.oldest: + if not self.oldest.isdigit(): + self.module.fail_json( + msg='Error: Number of configuration checkpoints is not digit.') + if int(self.oldest) <= 0 or int(self.oldest) > 80: + self.module.fail_json( + msg='Error: Number of configuration checkpoints is not in the range from 1 to 80.') + + # label check + if self.label: + if self.label[0].isdigit(): + self.module.fail_json( + msg='Error: Commit label which should not start with a number.') + if len(self.label.replace(' ', '')) == 1: + if self.label == '-': + self.module.fail_json( + msg='Error: Commit label which should not be "-"') + if len(self.label.replace(' ', '')) < 1 or len(self.label) > 256: + self.module.fail_json( + msg='Error: Label of configuration checkpoints is a string of 1 to 256 characters.') + + if self.action == "rollback": + info_bool = False + for info in rollback_info: + if info.get("userLabel") == self.label: + info_bool = True + if not info_bool: + self.module.fail_json( + msg='Error: The parameter of userLabel is not exist.') + + if self.action == "commit": + info_bool = False + for info in rollback_info: + if info.get("userLabel") == self.label: + info_bool = True + if info_bool: + self.module.fail_json( + msg='Error: The parameter of userLabel is existing.') + + if self.action == "set": + info_bool = False + for info in rollback_info: + if info.get("commitId") == self.commit_id: + if info.get("userLabel") != "-": + info_bool = True + if info_bool: + self.module.fail_json( + msg='Error: The userLabel of this commitid is present and can be reset after deletion.') + + def get_proposed(self): + """get proposed info""" + + if self.commit_id: + self.proposed["commit_id"] = self.commit_id + if self.label: + self.proposed["label"] = self.label + if self.filename: + self.proposed["filename"] = self.filename + if self.last: + self.proposed["last"] = self.last + if self.oldest: + self.proposed["oldest"] = self.oldest + + def get_existing(self): + """get existing info""" + if not self.rollback_info: + self.existing["RollBackInfos"] = None + else: + self.existing["RollBackInfos"] = self.rollback_info["RollBackInfos"] + + def get_end_state(self): + """get end state info""" + + rollback_info = self.get_rollback_dict() + if not rollback_info: + self.end_state["RollBackInfos"] = None + else: + self.end_state["RollBackInfos"] = rollback_info["RollBackInfos"] + + def work(self): + """worker""" + + self.rollback_info = self.get_rollback_dict() + self.check_params() + self.get_proposed() + + self.set_config() + + self.get_existing() + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + commit_id=dict(required=False), + label=dict(required=False, type='str'), + filename=dict(required=False, type='str'), + last=dict(required=False, type='str'), + oldest=dict(required=False, type='str'), + action=dict(required=False, type='str', choices=[ + 'rollback', 'clear', 'set', 'commit', 'display']), + ) + argument_spec.update(ce_argument_spec) + module = RollBack(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_sflow.py b/plugins/modules/network/cloudengine/ce_sflow.py new file mode 100644 index 0000000000..ddd179e4bd --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_sflow.py @@ -0,0 +1,1167 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_sflow +short_description: Manages sFlow configuration on HUAWEI CloudEngine switches. +description: + - Configure Sampled Flow (sFlow) to monitor traffic on an interface in real time, + detect abnormal traffic, and locate the source of attack traffic, + ensuring stable running of the network. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + agent_ip: + description: + - Specifies the IPv4/IPv6 address of an sFlow agent. + source_ip: + description: + - Specifies the source IPv4/IPv6 address of sFlow packets. + collector_id: + description: + - Specifies the ID of an sFlow collector. This ID is used when you specify + the collector in subsequent sFlow configuration. + choices: ['1', '2'] + collector_ip: + description: + - Specifies the IPv4/IPv6 address of the sFlow collector. + collector_ip_vpn: + description: + - Specifies the name of a VPN instance. + The value is a string of 1 to 31 case-sensitive characters, spaces not supported. + When double quotation marks are used around the string, spaces are allowed in the string. + The value C(_public_) is reserved and cannot be used as the VPN instance name. + collector_datagram_size: + description: + - Specifies the maximum length of sFlow packets sent from an sFlow agent to an sFlow collector. + The value is an integer, in bytes. It ranges from 1024 to 8100. The default value is 1400. + collector_udp_port: + description: + - Specifies the UDP destination port number of sFlow packets. + The value is an integer that ranges from 1 to 65535. The default value is 6343. + collector_meth: + description: + - Configures the device to send sFlow packets through service interfaces, + enhancing the sFlow packet forwarding capability. + The enhanced parameter is optional. No matter whether you configure the enhanced mode, + the switch determines to send sFlow packets through service cards or management port + based on the routing information on the collector. + When the value is meth, the device forwards sFlow packets at the control plane. + When the value is enhanced, the device forwards sFlow packets at the forwarding plane to + enhance the sFlow packet forwarding capacity. + choices: ['meth', 'enhanced'] + collector_description: + description: + - Specifies the description of an sFlow collector. + The value is a string of 1 to 255 case-sensitive characters without spaces. + sflow_interface: + description: + - Full name of interface for Flow Sampling or Counter. + It must be a physical interface, Eth-Trunk, or Layer 2 subinterface. + sample_collector: + description: + - Indicates the ID list of the collector. + sample_rate: + description: + - Specifies the flow sampling rate in the format 1/rate. + The value is an integer and ranges from 1 to 4294967295. The default value is 8192. + sample_length: + description: + - Specifies the maximum length of sampled packets. + The value is an integer and ranges from 18 to 512, in bytes. The default value is 128. + sample_direction: + description: + - Enables flow sampling in the inbound or outbound direction. + choices: ['inbound', 'outbound', 'both'] + counter_collector: + description: + - Indicates the ID list of the counter collector. + counter_interval: + description: + - Indicates the counter sampling interval. + The value is an integer that ranges from 10 to 4294967295, in seconds. The default value is 20. + export_route: + description: + - Configures the sFlow packets sent by the switch not to carry routing information. + choices: ['enable', 'disable'] + rate_limit: + description: + - Specifies the rate of sFlow packets sent from a card to the control plane. + The value is an integer that ranges from 100 to 1500, in pps. + type: str + rate_limit_slot: + description: + - Specifies the slot where the rate of output sFlow packets is limited. + If this parameter is not specified, the rate of sFlow packets sent from + all cards to the control plane is limited. + The value is an integer or a string of characters. + type: str + forward_enp_slot: + description: + - Enable the Embedded Network Processor (ENP) chip function. + The switch uses the ENP chip to perform sFlow sampling, + and the maximum sFlow sampling interval is 65535. + If you set the sampling interval to be larger than 65535, + the switch automatically restores it to 65535. + The value is an integer or 'all'. + type: str + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +--- + +- name: sflow module test + hosts: ce128 + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Configuring sFlow Agent + ce_sflow: + agent_ip: 6.6.6.6 + provider: '{{ cli }}' + + - name: Configuring sFlow Collector + ce_sflow: + collector_id: 1 + collector_ip: 7.7.7.7 + collector_ip_vpn: vpn1 + collector_description: Collector1 + provider: '{{ cli }}' + + - name: Configure flow sampling. + ce_sflow: + sflow_interface: 10GE2/0/2 + sample_collector: 1 + sample_direction: inbound + provider: '{{ cli }}' + + - name: Configure counter sampling. + ce_sflow: + sflow_interface: 10GE2/0/2 + counter_collector: 1 + counter_interval: 1000 + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"agent_ip": "6.6.6.6", "state": "present"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"agent": {}} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"agent": {"family": "ipv4", "ipv4Addr": "1.2.3.4", "ipv6Addr": null}} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["sflow agent ip 6.6.6.6"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr + +CE_NC_GET_SFLOW = """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + %s + + + + + + + + + %s + + + + + + + + + + + +""" + + +def is_config_exist(cmp_cfg, test_cfg): + """is configuration exist?""" + + if not cmp_cfg or not test_cfg: + return False + + return bool(test_cfg in cmp_cfg) + + +def is_valid_ip_vpn(vpname): + """check ip vpn""" + + if not vpname: + return False + + if vpname == "_public_": + return False + + if len(vpname) < 1 or len(vpname) > 31: + return False + + return True + + +def get_ip_version(address): + """get ip version fast""" + + if not address: + return None + + if address.count(':') >= 2 and address.count(":") <= 7: + return "ipv6" + elif address.count('.') == 3: + return "ipv4" + else: + return None + + +def get_interface_type(interface): + """get the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class Sflow(object): + """Manages sFlow""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.agent_ip = self.module.params['agent_ip'] + self.agent_version = None + self.source_ip = self.module.params['source_ip'] + self.source_version = None + self.export_route = self.module.params['export_route'] + self.rate_limit = self.module.params['rate_limit'] + self.rate_limit_slot = self.module.params['rate_limit_slot'] + self.forward_enp_slot = self.module.params['forward_enp_slot'] + self.collector_id = self.module.params['collector_id'] + self.collector_ip = self.module.params['collector_ip'] + self.collector_version = None + self.collector_ip_vpn = self.module.params['collector_ip_vpn'] + self.collector_datagram_size = self.module.params['collector_datagram_size'] + self.collector_udp_port = self.module.params['collector_udp_port'] + self.collector_meth = self.module.params['collector_meth'] + self.collector_description = self.module.params['collector_description'] + self.sflow_interface = self.module.params['sflow_interface'] + self.sample_collector = self.module.params['sample_collector'] or list() + self.sample_rate = self.module.params['sample_rate'] + self.sample_length = self.module.params['sample_length'] + self.sample_direction = self.module.params['sample_direction'] + self.counter_collector = self.module.params['counter_collector'] or list() + self.counter_interval = self.module.params['counter_interval'] + self.state = self.module.params['state'] + + # state + self.config = "" # current config + self.sflow_dict = dict() + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + + required_together = [("collector_id", "collector_ip")] + self.module = AnsibleModule( + argument_spec=self.spec, required_together=required_together, supports_check_mode=True) + + def check_response(self, con_obj, xml_name): + """Check if response message is already succeed""" + + xml_str = con_obj.xml + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def netconf_set_config(self, xml_str, xml_name): + """netconf set config""" + + rcv_xml = set_nc_config(self.module, xml_str) + if "" not in rcv_xml: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_sflow_dict(self): + """ sflow config dict""" + + sflow_dict = dict(source=list(), agent=dict(), collector=list(), + sampling=dict(), counter=dict(), export=dict()) + conf_str = CE_NC_GET_SFLOW % ( + self.sflow_interface, self.sflow_interface) + + if not self.collector_meth: + conf_str = conf_str.replace("", "") + + rcv_xml = get_nc_config(self.module, conf_str) + + if "" in rcv_xml: + return sflow_dict + + xml_str = rcv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + + # get source info + srcs = root.findall("sflow/sources/source") + if srcs: + for src in srcs: + attrs = dict() + for attr in src: + if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]: + attrs[attr.tag] = attr.text + sflow_dict["source"].append(attrs) + + # get agent info + agent = root.find("sflow/agents/agent") + if agent: + for attr in agent: + if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]: + sflow_dict["agent"][attr.tag] = attr.text + + # get collector info + collectors = root.findall("sflow/collectors/collector") + if collectors: + for collector in collectors: + attrs = dict() + for attr in collector: + if attr.tag in ["collectorID", "family", "ipv4Addr", "ipv6Addr", + "vrfName", "datagramSize", "port", "description", "meth"]: + attrs[attr.tag] = attr.text + sflow_dict["collector"].append(attrs) + + # get sampling info + sample = root.find("sflow/samplings/sampling") + if sample: + for attr in sample: + if attr.tag in ["ifName", "collectorID", "direction", "length", "rate"]: + sflow_dict["sampling"][attr.tag] = attr.text + + # get counter info + counter = root.find("sflow/counters/counter") + if counter: + for attr in counter: + if attr.tag in ["ifName", "collectorID", "interval"]: + sflow_dict["counter"][attr.tag] = attr.text + + # get export info + export = root.find("sflow/exports/export") + if export: + for attr in export: + if attr.tag == "ExportRoute": + sflow_dict["export"][attr.tag] = attr.text + + return sflow_dict + + def config_agent(self): + """configures sFlow agent""" + + xml_str = '' + if not self.agent_ip: + return xml_str + + self.agent_version = get_ip_version(self.agent_ip) + if not self.agent_version: + self.module.fail_json(msg="Error: agent_ip is invalid.") + + if self.state == "present": + if self.agent_ip != self.sflow_dict["agent"].get("ipv4Addr") \ + and self.agent_ip != self.sflow_dict["agent"].get("ipv6Addr"): + xml_str += '' + xml_str += '%s' % self.agent_version + if self.agent_version == "ipv4": + xml_str += '%s' % self.agent_ip + self.updates_cmd.append("sflow agent ip %s" % self.agent_ip) + else: + xml_str += '%s' % self.agent_ip + self.updates_cmd.append("sflow agent ipv6 %s" % self.agent_ip) + xml_str += '' + + else: + if self.agent_ip == self.sflow_dict["agent"].get("ipv4Addr") \ + or self.agent_ip == self.sflow_dict["agent"].get("ipv6Addr"): + xml_str += '' + self.updates_cmd.append("undo sflow agent") + + return xml_str + + def config_source(self): + """configures the source IP address for sFlow packets""" + + xml_str = '' + if not self.source_ip: + return xml_str + + self.source_version = get_ip_version(self.source_ip) + if not self.source_version: + self.module.fail_json(msg="Error: source_ip is invalid.") + + src_dict = dict() + for src in self.sflow_dict["source"]: + if src.get("family") == self.source_version: + src_dict = src + break + + if self.state == "present": + if self.source_ip != src_dict.get("ipv4Addr") \ + and self.source_ip != src_dict.get("ipv6Addr"): + xml_str += '' + xml_str += '%s' % self.source_version + if self.source_version == "ipv4": + xml_str += '%s' % self.source_ip + self.updates_cmd.append("sflow source ip %s" % self.source_ip) + else: + xml_str += '%s' % self.source_ip + self.updates_cmd.append( + "sflow source ipv6 %s" % self.source_ip) + xml_str += '' + else: + if self.source_ip == src_dict.get("ipv4Addr"): + xml_str += 'ipv4' + self.updates_cmd.append("undo sflow source ip %s" % self.source_ip) + elif self.source_ip == src_dict.get("ipv6Addr"): + xml_str += 'ipv6' + self.updates_cmd.append("undo sflow source ipv6 %s" % self.source_ip) + + return xml_str + + def config_collector(self): + """creates an sFlow collector and sets or modifies optional parameters for the sFlow collector""" + + xml_str = '' + if not self.collector_id: + return xml_str + + if self.state == "present" and not self.collector_ip: + return xml_str + + if self.collector_ip: + self.collector_version = get_ip_version(self.collector_ip) + if not self.collector_version: + self.module.fail_json(msg="Error: collector_ip is invalid.") + + # get collector dict + exist_dict = dict() + for collector in self.sflow_dict["collector"]: + if collector.get("collectorID") == self.collector_id: + exist_dict = collector + break + + change = False + if self.state == "present": + if not exist_dict: + change = True + elif self.collector_version != exist_dict.get("family"): + change = True + elif self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"): + change = True + elif self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"): + change = True + elif self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"): + change = True + elif not self.collector_ip_vpn and exist_dict.get("vrfName") != "_public_": + change = True + elif self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"): + change = True + elif not self.collector_udp_port and exist_dict.get("port") != "6343": + change = True + elif self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"): + change = True + elif not self.collector_datagram_size and exist_dict.get("datagramSize") != "1400": + change = True + elif self.collector_meth and self.collector_meth != exist_dict.get("meth"): + change = True + elif not self.collector_meth and exist_dict.get("meth") and exist_dict.get("meth") != "meth": + change = True + elif self.collector_description and self.collector_description != exist_dict.get("description"): + change = True + elif not self.collector_description and exist_dict.get("description"): + change = True + else: + pass + else: # absent + # collector not exist + if not exist_dict: + return xml_str + if self.collector_version and self.collector_version != exist_dict.get("family"): + return xml_str + if self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"): + return xml_str + if self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"): + return xml_str + if self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"): + return xml_str + if self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"): + return xml_str + if self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"): + return xml_str + if self.collector_meth and self.collector_meth != exist_dict.get("meth"): + return xml_str + if self.collector_description and self.collector_description != exist_dict.get("description"): + return xml_str + change = True + + if not change: + return xml_str + + # update or delete + if self.state == "absent": + xml_str += '%s' % self.collector_id + self.updates_cmd.append("undo collector %s" % self.collector_id) + else: + xml_str += '%s' % self.collector_id + cmd = "sflow collector %s" % self.collector_id + xml_str += '%s' % self.collector_version + if self.collector_version == "ipv4": + cmd += " ip %s" % self.collector_ip + xml_str += '%s' % self.collector_ip + else: + cmd += " ipv6 %s" % self.collector_ip + xml_str += '%s' % self.collector_ip + if self.collector_ip_vpn: + cmd += " vpn-instance %s" % self.collector_ip_vpn + xml_str += '%s' % self.collector_ip_vpn + if self.collector_datagram_size: + cmd += " length %s" % self.collector_datagram_size + xml_str += '%s' % self.collector_datagram_size + if self.collector_udp_port: + cmd += " udp-port %s" % self.collector_udp_port + xml_str += '%s' % self.collector_udp_port + if self.collector_description: + cmd += " description %s" % self.collector_description + xml_str += '%s' % self.collector_description + else: + xml_str += '' + if self.collector_meth: + if self.collector_meth == "enhanced": + cmd += " enhanced" + xml_str += '%s' % self.collector_meth + self.updates_cmd.append(cmd) + + xml_str += "" + + return xml_str + + def config_sampling(self): + """configure sflow sampling on an interface""" + + xml_str = '' + if not self.sflow_interface: + return xml_str + + if not self.sflow_dict["sampling"] and self.state == "absent": + return xml_str + + self.updates_cmd.append("interface %s" % self.sflow_interface) + if self.state == "present": + xml_str += '%s' % self.sflow_interface + else: + xml_str += '%s' % self.sflow_interface + + # sample_collector + if self.sample_collector: + if self.sflow_dict["sampling"].get("collectorID") \ + and self.sflow_dict["sampling"].get("collectorID") != "invalid": + existing = self.sflow_dict["sampling"].get("collectorID").split(',') + else: + existing = list() + + if self.state == "present": + diff = list(set(self.sample_collector) - set(existing)) + if diff: + self.updates_cmd.append( + "sflow sampling collector %s" % ' '.join(diff)) + new_set = list(self.sample_collector + existing) + xml_str += '%s' % ','.join(list(set(new_set))) + else: + same = list(set(self.sample_collector) & set(existing)) + if same: + self.updates_cmd.append( + "undo sflow sampling collector %s" % ' '.join(same)) + xml_str += '%s' % ','.join(list(set(same))) + + # sample_rate + if self.sample_rate: + exist = bool(self.sample_rate == self.sflow_dict["sampling"].get("rate")) + if self.state == "present" and not exist: + self.updates_cmd.append( + "sflow sampling rate %s" % self.sample_rate) + xml_str += '%s' % self.sample_rate + elif self.state == "absent" and exist: + self.updates_cmd.append( + "undo sflow sampling rate %s" % self.sample_rate) + xml_str += '%s' % self.sample_rate + + # sample_length + if self.sample_length: + exist = bool(self.sample_length == self.sflow_dict["sampling"].get("length")) + if self.state == "present" and not exist: + self.updates_cmd.append( + "sflow sampling length %s" % self.sample_length) + xml_str += '%s' % self.sample_length + elif self.state == "absent" and exist: + self.updates_cmd.append( + "undo sflow sampling length %s" % self.sample_length) + xml_str += '%s' % self.sample_length + + # sample_direction + if self.sample_direction: + direction = list() + if self.sample_direction == "both": + direction = ["inbound", "outbound"] + else: + direction.append(self.sample_direction) + existing = list() + if self.sflow_dict["sampling"].get("direction"): + if self.sflow_dict["sampling"].get("direction") == "both": + existing = ["inbound", "outbound"] + else: + existing.append( + self.sflow_dict["sampling"].get("direction")) + + if self.state == "present": + diff = list(set(direction) - set(existing)) + if diff: + new_set = list(set(direction + existing)) + self.updates_cmd.append( + "sflow sampling %s" % ' '.join(diff)) + if len(new_set) > 1: + new_dir = "both" + else: + new_dir = new_set[0] + xml_str += '%s' % new_dir + else: + same = list(set(existing) & set(direction)) + if same: + self.updates_cmd.append("undo sflow sampling %s" % ' '.join(same)) + if len(same) > 1: + del_dir = "both" + else: + del_dir = same[0] + xml_str += '%s' % del_dir + + if xml_str.endswith(""): + self.updates_cmd.pop() + return "" + + xml_str += '' + + return xml_str + + def config_counter(self): + """configures sflow counter on an interface""" + + xml_str = '' + if not self.sflow_interface: + return xml_str + + if not self.sflow_dict["counter"] and self.state == "absent": + return xml_str + + self.updates_cmd.append("interface %s" % self.sflow_interface) + if self.state == "present": + xml_str += '%s' % self.sflow_interface + else: + xml_str += '%s' % self.sflow_interface + + # counter_collector + if self.counter_collector: + if self.sflow_dict["counter"].get("collectorID") \ + and self.sflow_dict["counter"].get("collectorID") != "invalid": + existing = self.sflow_dict["counter"].get("collectorID").split(',') + else: + existing = list() + + if self.state == "present": + diff = list(set(self.counter_collector) - set(existing)) + if diff: + self.updates_cmd.append("sflow counter collector %s" % ' '.join(diff)) + new_set = list(self.counter_collector + existing) + xml_str += '%s' % ','.join(list(set(new_set))) + else: + same = list(set(self.counter_collector) & set(existing)) + if same: + self.updates_cmd.append( + "undo sflow counter collector %s" % ' '.join(same)) + xml_str += '%s' % ','.join(list(set(same))) + + # counter_interval + if self.counter_interval: + exist = bool(self.counter_interval == self.sflow_dict["counter"].get("interval")) + if self.state == "present" and not exist: + self.updates_cmd.append( + "sflow counter interval %s" % self.counter_interval) + xml_str += '%s' % self.counter_interval + elif self.state == "absent" and exist: + self.updates_cmd.append( + "undo sflow counter interval %s" % self.counter_interval) + xml_str += '%s' % self.counter_interval + + if xml_str.endswith(""): + self.updates_cmd.pop() + return "" + + xml_str += '' + + return xml_str + + def config_export(self): + """configure sflow export""" + + xml_str = '' + if not self.export_route: + return xml_str + + if self.export_route == "enable": + if self.sflow_dict["export"] and self.sflow_dict["export"].get("ExportRoute") == "disable": + xml_str = 'disable' + self.updates_cmd.append("undo sflow export extended-route-data disable") + else: # disable + if not self.sflow_dict["export"] or self.sflow_dict["export"].get("ExportRoute") != "disable": + xml_str = 'disable' + self.updates_cmd.append("sflow export extended-route-data disable") + + return xml_str + + def netconf_load_config(self, xml_str): + """load sflow config by netconf""" + + if not xml_str: + return + + xml_cfg = """ + + + %s + + """ % xml_str + + self.netconf_set_config(xml_cfg, "SET_SFLOW") + self.changed = True + + def check_params(self): + """Check all input params""" + + # check agent_ip + if self.agent_ip: + self.agent_ip = self.agent_ip.upper() + if not check_ip_addr(self.agent_ip): + self.module.fail_json(msg="Error: agent_ip is invalid.") + + # check source_ip + if self.source_ip: + self.source_ip = self.source_ip.upper() + if not check_ip_addr(self.source_ip): + self.module.fail_json(msg="Error: source_ip is invalid.") + + # check collector + if self.collector_id: + # check collector_ip and collector_ip_vpn + if self.collector_ip: + self.collector_ip = self.collector_ip.upper() + if not check_ip_addr(self.collector_ip): + self.module.fail_json( + msg="Error: collector_ip is invalid.") + if self.collector_ip_vpn and not is_valid_ip_vpn(self.collector_ip_vpn): + self.module.fail_json( + msg="Error: collector_ip_vpn is invalid.") + + # check collector_datagram_size ranges from 1024 to 8100 + if self.collector_datagram_size: + if not self.collector_datagram_size.isdigit(): + self.module.fail_json( + msg="Error: collector_datagram_size is not digit.") + if int(self.collector_datagram_size) < 1024 or int(self.collector_datagram_size) > 8100: + self.module.fail_json( + msg="Error: collector_datagram_size is not ranges from 1024 to 8100.") + + # check collector_udp_port ranges from 1 to 65535 + if self.collector_udp_port: + if not self.collector_udp_port.isdigit(): + self.module.fail_json( + msg="Error: collector_udp_port is not digit.") + if int(self.collector_udp_port) < 1 or int(self.collector_udp_port) > 65535: + self.module.fail_json( + msg="Error: collector_udp_port is not ranges from 1 to 65535.") + + # check collector_description 1 to 255 case-sensitive characters + if self.collector_description: + if self.collector_description.count(" "): + self.module.fail_json( + msg="Error: collector_description should without spaces.") + if len(self.collector_description) < 1 or len(self.collector_description) > 255: + self.module.fail_json( + msg="Error: collector_description is not ranges from 1 to 255.") + + # check sflow_interface + if self.sflow_interface: + intf_type = get_interface_type(self.sflow_interface) + if not intf_type: + self.module.fail_json(msg="Error: intf_type is invalid.") + if intf_type not in ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'eth-trunk']: + self.module.fail_json( + msg="Error: interface %s is not support sFlow." % self.sflow_interface) + + # check sample_collector + if self.sample_collector: + self.sample_collector.sort() + if self.sample_collector not in [["1"], ["2"], ["1", "2"]]: + self.module.fail_json( + msg="Error: sample_collector is invalid.") + + # check sample_rate ranges from 1 to 4294967295 + if self.sample_rate: + if not self.sample_rate.isdigit(): + self.module.fail_json( + msg="Error: sample_rate is not digit.") + if int(self.sample_rate) < 1 or int(self.sample_rate) > 4294967295: + self.module.fail_json( + msg="Error: sample_rate is not ranges from 1 to 4294967295.") + + # check sample_length ranges from 18 to 512 + if self.sample_length: + if not self.sample_length.isdigit(): + self.module.fail_json( + msg="Error: sample_rate is not digit.") + if int(self.sample_length) < 18 or int(self.sample_length) > 512: + self.module.fail_json( + msg="Error: sample_length is not ranges from 18 to 512.") + + # check counter_collector + if self.counter_collector: + self.counter_collector.sort() + if self.counter_collector not in [["1"], ["2"], ["1", "2"]]: + self.module.fail_json( + msg="Error: counter_collector is invalid.") + + # counter_interval ranges from 10 to 4294967295 + if self.counter_interval: + if not self.counter_interval.isdigit(): + self.module.fail_json( + msg="Error: counter_interval is not digit.") + if int(self.counter_interval) < 10 or int(self.counter_interval) > 4294967295: + self.module.fail_json( + msg="Error: sample_length is not ranges from 10 to 4294967295.") + + if self.rate_limit or self.rate_limit_slot or self.forward_enp_slot: + self.module.fail_json(msg="Error: The following parameters cannot be configured" + "because XML mode is not supported:rate_limit,rate_limit_slot,forward_enp_slot.") + + def get_proposed(self): + """get proposed info""" + + # base config + if self.agent_ip: + self.proposed["agent_ip"] = self.agent_ip + if self.source_ip: + self.proposed["source_ip"] = self.source_ip + if self.export_route: + self.proposed["export_route"] = self.export_route + if self.rate_limit: + self.proposed["rate_limit"] = self.rate_limit + self.proposed["rate_limit_slot"] = self.rate_limit_slot + if self.forward_enp_slot: + self.proposed["forward_enp_slot"] = self.forward_enp_slot + if self.collector_id: + self.proposed["collector_id"] = self.collector_id + if self.collector_ip: + self.proposed["collector_ip"] = self.collector_ip + self.proposed["collector_ip_vpn"] = self.collector_ip_vpn + if self.collector_datagram_size: + self.proposed[ + "collector_datagram_size"] = self.collector_datagram_size + if self.collector_udp_port: + self.proposed["collector_udp_port"] = self.collector_udp_port + if self.collector_meth: + self.proposed["collector_meth"] = self.collector_meth + if self.collector_description: + self.proposed[ + "collector_description"] = self.collector_description + + # sample and counter config + if self.sflow_interface: + self.proposed["sflow_interface"] = self.sflow_interface + if self.sample_collector: + self.proposed["sample_collector"] = self.sample_collector + if self.sample_rate: + self.proposed["sample_rate"] = self.sample_rate + if self.sample_length: + self.proposed["sample_length"] = self.sample_length + if self.sample_direction: + self.proposed["sample_direction"] = self.sample_direction + if self.counter_collector: + self.proposed["counter_collector"] = self.counter_collector + if self.counter_interval: + self.proposed["counter_interval"] = self.counter_interval + + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.sflow_dict: + return + + if self.agent_ip: + self.existing["agent"] = self.sflow_dict["agent"] + if self.source_ip: + self.existing["source"] = self.sflow_dict["source"] + if self.collector_id: + self.existing["collector"] = self.sflow_dict["collector"] + if self.export_route: + self.existing["export"] = self.sflow_dict["export"] + + if self.sflow_interface: + self.existing["sampling"] = self.sflow_dict["sampling"] + self.existing["counter"] = self.sflow_dict["counter"] + + def get_end_state(self): + """get end state info""" + + sflow_dict = self.get_sflow_dict() + if not sflow_dict: + return + + if self.agent_ip: + self.end_state["agent"] = sflow_dict["agent"] + if self.source_ip: + self.end_state["source"] = sflow_dict["source"] + if self.collector_id: + self.end_state["collector"] = sflow_dict["collector"] + if self.export_route: + self.end_state["export"] = sflow_dict["export"] + + if self.sflow_interface: + self.end_state["sampling"] = sflow_dict["sampling"] + self.end_state["counter"] = sflow_dict["counter"] + if self.existing == self.end_state: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.sflow_dict = self.get_sflow_dict() + self.get_existing() + self.get_proposed() + + # deal present or absent + xml_str = '' + if self.export_route: + xml_str += self.config_export() + if self.agent_ip: + xml_str += self.config_agent() + if self.source_ip: + xml_str += self.config_source() + + if self.state == "present": + if self.collector_id and self.collector_ip: + xml_str += self.config_collector() + if self.sflow_interface: + xml_str += self.config_sampling() + xml_str += self.config_counter() + else: + if self.sflow_interface: + xml_str += self.config_sampling() + xml_str += self.config_counter() + if self.collector_id: + xml_str += self.config_collector() + + if xml_str: + self.netconf_load_config(xml_str) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + agent_ip=dict(required=False, type='str'), + source_ip=dict(required=False, type='str'), + export_route=dict(required=False, type='str', + choices=['enable', 'disable']), + rate_limit=dict(required=False, removed_in_version=2.13, type='str'), + rate_limit_slot=dict(required=False, removed_in_version=2.13, type='str'), + forward_enp_slot=dict(required=False, removed_in_version=2.13, type='str'), + collector_id=dict(required=False, type='str', choices=['1', '2']), + collector_ip=dict(required=False, type='str'), + collector_ip_vpn=dict(required=False, type='str'), + collector_datagram_size=dict(required=False, type='str'), + collector_udp_port=dict(required=False, type='str'), + collector_meth=dict(required=False, type='str', + choices=['meth', 'enhanced']), + collector_description=dict(required=False, type='str'), + sflow_interface=dict(required=False, type='str'), + sample_collector=dict(required=False, type='list'), + sample_rate=dict(required=False, type='str'), + sample_length=dict(required=False, type='str'), + sample_direction=dict(required=False, type='str', + choices=['inbound', 'outbound', 'both']), + counter_collector=dict(required=False, type='list'), + counter_interval=dict(required=False, type='str'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = Sflow(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_snmp_community.py b/plugins/modules/network/cloudengine/ce_snmp_community.py new file mode 100644 index 0000000000..be0557c41a --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_snmp_community.py @@ -0,0 +1,979 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_snmp_community +short_description: Manages SNMP community configuration on HUAWEI CloudEngine switches. +description: + - Manages SNMP community configuration on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + acl_number: + description: + - Access control list number. + community_name: + description: + - Unique name to identify the community. + access_right: + description: + - Access right read or write. + choices: ['read','write'] + community_mib_view: + description: + - Mib view name. + group_name: + description: + - Unique name to identify the SNMPv3 group. + security_level: + description: + - Security level indicating whether to use authentication and encryption. + choices: ['noAuthNoPriv', 'authentication', 'privacy'] + read_view: + description: + - Mib view name for read. + write_view: + description: + - Mib view name for write. + notify_view: + description: + - Mib view name for notification. + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' + +- name: CloudEngine snmp community test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config SNMP community" + ce_snmp_community: + state: present + community_name: Wdz123456789 + access_right: write + provider: "{{ cli }}" + + - name: "Undo SNMP community" + ce_snmp_community: + state: absent + community_name: Wdz123456789 + access_right: write + provider: "{{ cli }}" + + - name: "Config SNMP group" + ce_snmp_community: + state: present + group_name: wdz_group + security_level: noAuthNoPriv + acl_number: 2000 + provider: "{{ cli }}" + + - name: "Undo SNMP group" + ce_snmp_community: + state: absent + group_name: wdz_group + security_level: noAuthNoPriv + acl_number: 2000 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"acl_number": "2000", "group_name": "wdz_group", + "security_level": "noAuthNoPriv", "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"snmp v3 group": {"snmp_group": ["wdz_group", "noAuthNoPriv", "2000"]}} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-agent group v3 wdz_group noauthentication acl 2000"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +# get snmp community +CE_GET_SNMP_COMMUNITY_HEADER = """ + + + + + + +""" +CE_GET_SNMP_COMMUNITY_TAIL = """ + + + + +""" +# merge snmp community +CE_MERGE_SNMP_COMMUNITY_HEADER = """ + + + + + %s + %s +""" +CE_MERGE_SNMP_COMMUNITY_TAIL = """ + + + + +""" +# create snmp community +CE_CREATE_SNMP_COMMUNITY_HEADER = """ + + + + + %s + %s +""" +CE_CREATE_SNMP_COMMUNITY_TAIL = """ + + + + +""" +# delete snmp community +CE_DELETE_SNMP_COMMUNITY_HEADER = """ + + + + + %s + %s +""" +CE_DELETE_SNMP_COMMUNITY_TAIL = """ + + + + +""" + +# get snmp v3 group +CE_GET_SNMP_V3_GROUP_HEADER = """ + + + + + + +""" +CE_GET_SNMP_V3_GROUP_TAIL = """ + + + + +""" +# merge snmp v3 group +CE_MERGE_SNMP_V3_GROUP_HEADER = """ + + + + + %s + %s +""" +CE_MERGE_SNMP_V3_GROUP_TAIL = """ + + + + +""" +# create snmp v3 group +CE_CREATE_SNMP_V3_GROUP_HEADER = """ + + + + + %s + %s +""" +CE_CREATE_SNMP_V3_GROUP_TAIL = """ + + + + +""" +# delete snmp v3 group +CE_DELETE_SNMP_V3_GROUP_HEADER = """ + + + + + %s + %s +""" +CE_DELETE_SNMP_V3_GROUP_TAIL = """ + + + + +""" + + +class SnmpCommunity(object): + """ Manages SNMP community configuration """ + + def netconf_get_config(self, **kwargs): + """ Get configure through netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ Set configure through netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = set_nc_config(module, conf_str) + + return xml_str + + def check_snmp_community_args(self, **kwargs): + """ Check snmp community args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + result["community_info"] = [] + state = module.params['state'] + community_name = module.params['community_name'] + access_right = module.params['access_right'] + acl_number = module.params['acl_number'] + community_mib_view = module.params['community_mib_view'] + + if community_name and access_right: + if len(community_name) > 32 or len(community_name) == 0: + module.fail_json( + msg='Error: The len of community_name %s is out of [1 - 32].' % community_name) + + if acl_number: + if acl_number.isdigit(): + if int(acl_number) > 2999 or int(acl_number) < 2000: + module.fail_json( + msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number) + else: + if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1: + module.fail_json( + msg='Error: The len of acl_number %s is out of [1 - 32] or is invalid.' % acl_number) + + if community_mib_view: + if len(community_mib_view) > 32 or len(community_mib_view) == 0: + module.fail_json( + msg='Error: The len of community_mib_view %s is out of [1 - 32].' % community_mib_view) + + conf_str = CE_GET_SNMP_COMMUNITY_HEADER + if acl_number: + conf_str += "" + if community_mib_view: + conf_str += "" + + conf_str += CE_GET_SNMP_COMMUNITY_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + community_info = root.findall("snmp/communitys/community") + if community_info: + for tmp in community_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["communityName", "accessRight", "aclNumber", "mibViewName"]: + tmp_dict[site.tag] = site.text + + result["community_info"].append(tmp_dict) + + if result["community_info"]: + community_name_list = list() + for tmp in result["community_info"]: + if "communityName" in tmp.keys(): + community_name_list.append(tmp["communityName"]) + + if community_name not in community_name_list: + need_cfg = True + else: + need_cfg_bool = True + + for tmp in result["community_info"]: + if tmp["communityName"] == community_name: + + cfg_bool_list = list() + + if access_right: + if "accessRight" in tmp.keys(): + need_cfg_access = False + if tmp["accessRight"] != access_right: + need_cfg_access = True + else: + need_cfg_access = True + + cfg_bool_list.append(need_cfg_access) + + if acl_number: + if "aclNumber" in tmp.keys(): + need_cfg_acl = False + if tmp["aclNumber"] != acl_number: + need_cfg_acl = True + else: + need_cfg_acl = True + + cfg_bool_list.append(need_cfg_acl) + + if community_mib_view: + if "mibViewName" in tmp.keys(): + need_cfg_mib = False + if tmp["mibViewName"] != community_mib_view: + need_cfg_mib = True + else: + need_cfg_mib = True + cfg_bool_list.append(need_cfg_mib) + + if True not in cfg_bool_list: + need_cfg_bool = False + + if state == "present": + if not need_cfg_bool: + need_cfg = False + else: + need_cfg = True + else: + if not need_cfg_bool: + need_cfg = True + else: + need_cfg = False + + result["need_cfg"] = need_cfg + return result + + def check_snmp_v3_group_args(self, **kwargs): + """ Check snmp v3 group args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + result["group_info"] = [] + state = module.params['state'] + group_name = module.params['group_name'] + security_level = module.params['security_level'] + acl_number = module.params['acl_number'] + read_view = module.params['read_view'] + write_view = module.params['write_view'] + notify_view = module.params['notify_view'] + + community_name = module.params['community_name'] + access_right = module.params['access_right'] + + if group_name and security_level: + + if community_name and access_right: + module.fail_json( + msg='Error: Community is used for v1/v2c, group_name is used for v3, do not ' + 'input at the same time.') + + if len(group_name) > 32 or len(group_name) == 0: + module.fail_json( + msg='Error: The len of group_name %s is out of [1 - 32].' % group_name) + + if acl_number: + if acl_number.isdigit(): + if int(acl_number) > 2999 or int(acl_number) < 2000: + module.fail_json( + msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number) + else: + if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1: + module.fail_json( + msg='Error: The len of acl_number %s is out of [1 - 32] or is invalid.' % acl_number) + + if read_view: + if len(read_view) > 32 or len(read_view) < 1: + module.fail_json( + msg='Error: The len of read_view %s is out of [1 - 32].' % read_view) + + if write_view: + if len(write_view) > 32 or len(write_view) < 1: + module.fail_json( + msg='Error: The len of write_view %s is out of [1 - 32].' % write_view) + + if notify_view: + if len(notify_view) > 32 or len(notify_view) < 1: + module.fail_json( + msg='Error: The len of notify_view %s is out of [1 - 32].' % notify_view) + + conf_str = CE_GET_SNMP_V3_GROUP_HEADER + if acl_number: + conf_str += "" + if read_view: + conf_str += "" + if write_view: + conf_str += "" + if notify_view: + conf_str += "" + + conf_str += CE_GET_SNMP_V3_GROUP_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + group_info = root.findall("snmp/snmpv3Groups/snmpv3Group") + if group_info: + for tmp in group_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["groupName", "securityLevel", "readViewName", "writeViewName", + "notifyViewName", "aclNumber"]: + tmp_dict[site.tag] = site.text + + result["group_info"].append(tmp_dict) + + if result["group_info"]: + group_name_list = list() + + for tmp in result["group_info"]: + if "groupName" in tmp.keys(): + group_name_list.append(tmp["groupName"]) + if group_name not in group_name_list: + if state == "present": + need_cfg = True + else: + need_cfg = False + else: + need_cfg_bool = True + for tmp in result["group_info"]: + if tmp["groupName"] == group_name: + + cfg_bool_list = list() + + if security_level: + if "securityLevel" in tmp.keys(): + need_cfg_group = False + if tmp["securityLevel"] != security_level: + need_cfg_group = True + else: + need_cfg_group = True + + cfg_bool_list.append(need_cfg_group) + + if acl_number: + if "aclNumber" in tmp.keys(): + need_cfg_acl = False + if tmp["aclNumber"] != acl_number: + need_cfg_acl = True + else: + need_cfg_acl = True + + cfg_bool_list.append(need_cfg_acl) + + if read_view: + if "readViewName" in tmp.keys(): + need_cfg_read = False + if tmp["readViewName"] != read_view: + need_cfg_read = True + else: + need_cfg_read = True + cfg_bool_list.append(need_cfg_read) + + if write_view: + if "writeViewName" in tmp.keys(): + need_cfg_write = False + if tmp["writeViewName"] != write_view: + need_cfg_write = True + else: + need_cfg_write = True + cfg_bool_list.append(need_cfg_write) + + if notify_view: + if "notifyViewName" in tmp.keys(): + need_cfg_notify = False + if tmp["notifyViewName"] != notify_view: + need_cfg_notify = True + else: + need_cfg_notify = True + cfg_bool_list.append(need_cfg_notify) + + if True not in cfg_bool_list: + need_cfg_bool = False + + if state == "present": + if not need_cfg_bool: + need_cfg = False + else: + need_cfg = True + else: + if not need_cfg_bool: + need_cfg = True + else: + need_cfg = False + + result["need_cfg"] = need_cfg + return result + + def merge_snmp_community(self, **kwargs): + """ Merge snmp community operation """ + + module = kwargs["module"] + community_name = module.params['community_name'] + access_right = module.params['access_right'] + acl_number = module.params['acl_number'] + community_mib_view = module.params['community_mib_view'] + + conf_str = CE_MERGE_SNMP_COMMUNITY_HEADER % ( + community_name, access_right) + if acl_number: + conf_str += "%s" % acl_number + if community_mib_view: + conf_str += "%s" % community_mib_view + + conf_str += CE_MERGE_SNMP_COMMUNITY_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge snmp community failed.') + + community_safe_name = "******" + + cmd = "snmp-agent community %s %s" % (access_right, community_safe_name) + + if acl_number: + cmd += " acl %s" % acl_number + if community_mib_view: + cmd += " mib-view %s" % community_mib_view + + return cmd + + def create_snmp_community(self, **kwargs): + """ Create snmp community operation """ + + module = kwargs["module"] + community_name = module.params['community_name'] + access_right = module.params['access_right'] + acl_number = module.params['acl_number'] + community_mib_view = module.params['community_mib_view'] + + conf_str = CE_CREATE_SNMP_COMMUNITY_HEADER % ( + community_name, access_right) + if acl_number: + conf_str += "%s" % acl_number + if community_mib_view: + conf_str += "%s" % community_mib_view + + conf_str += CE_CREATE_SNMP_COMMUNITY_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create snmp community failed.') + + community_safe_name = "******" + + cmd = "snmp-agent community %s %s" % (access_right, community_safe_name) + + if acl_number: + cmd += " acl %s" % acl_number + if community_mib_view: + cmd += " mib-view %s" % community_mib_view + + return cmd + + def delete_snmp_community(self, **kwargs): + """ Delete snmp community operation """ + + module = kwargs["module"] + community_name = module.params['community_name'] + access_right = module.params['access_right'] + acl_number = module.params['acl_number'] + community_mib_view = module.params['community_mib_view'] + + conf_str = CE_DELETE_SNMP_COMMUNITY_HEADER % ( + community_name, access_right) + if acl_number: + conf_str += "%s" % acl_number + if community_mib_view: + conf_str += "%s" % community_mib_view + + conf_str += CE_DELETE_SNMP_COMMUNITY_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create snmp community failed.') + + community_safe_name = "******" + cmd = "undo snmp-agent community %s %s" % ( + access_right, community_safe_name) + + return cmd + + def merge_snmp_v3_group(self, **kwargs): + """ Merge snmp v3 group operation """ + + module = kwargs["module"] + group_name = module.params['group_name'] + security_level = module.params['security_level'] + acl_number = module.params['acl_number'] + read_view = module.params['read_view'] + write_view = module.params['write_view'] + notify_view = module.params['notify_view'] + + conf_str = CE_MERGE_SNMP_V3_GROUP_HEADER % (group_name, security_level) + if acl_number: + conf_str += "%s" % acl_number + if read_view: + conf_str += "%s" % read_view + if write_view: + conf_str += "%s" % write_view + if notify_view: + conf_str += "%s" % notify_view + conf_str += CE_MERGE_SNMP_V3_GROUP_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge snmp v3 group failed.') + + if security_level == "noAuthNoPriv": + security_level_cli = "noauthentication" + elif security_level == "authentication": + security_level_cli = "authentication" + elif security_level == "privacy": + security_level_cli = "privacy" + + cmd = "snmp-agent group v3 %s %s" % (group_name, security_level_cli) + + if read_view: + cmd += " read-view %s" % read_view + if write_view: + cmd += " write-view %s" % write_view + if notify_view: + cmd += " notify-view %s" % notify_view + if acl_number: + cmd += " acl %s" % acl_number + + return cmd + + def create_snmp_v3_group(self, **kwargs): + """ Create snmp v3 group operation """ + + module = kwargs["module"] + group_name = module.params['group_name'] + security_level = module.params['security_level'] + acl_number = module.params['acl_number'] + read_view = module.params['read_view'] + write_view = module.params['write_view'] + notify_view = module.params['notify_view'] + + conf_str = CE_CREATE_SNMP_V3_GROUP_HEADER % ( + group_name, security_level) + if acl_number: + conf_str += "%s" % acl_number + if read_view: + conf_str += "%s" % read_view + if write_view: + conf_str += "%s" % write_view + if notify_view: + conf_str += "%s" % notify_view + conf_str += CE_CREATE_SNMP_V3_GROUP_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create snmp v3 group failed.') + + if security_level == "noAuthNoPriv": + security_level_cli = "noauthentication" + elif security_level == "authentication": + security_level_cli = "authentication" + elif security_level == "privacy": + security_level_cli = "privacy" + + cmd = "snmp-agent group v3 %s %s" % (group_name, security_level_cli) + + if read_view: + cmd += " read-view %s" % read_view + if write_view: + cmd += " write-view %s" % write_view + if notify_view: + cmd += " notify-view %s" % notify_view + if acl_number: + cmd += " acl %s" % acl_number + + return cmd + + def delete_snmp_v3_group(self, **kwargs): + """ Delete snmp v3 group operation """ + + module = kwargs["module"] + group_name = module.params['group_name'] + security_level = module.params['security_level'] + acl_number = module.params['acl_number'] + read_view = module.params['read_view'] + write_view = module.params['write_view'] + notify_view = module.params['notify_view'] + + conf_str = CE_DELETE_SNMP_V3_GROUP_HEADER % ( + group_name, security_level) + if acl_number: + conf_str += "%s" % acl_number + if read_view: + conf_str += "%s" % read_view + if write_view: + conf_str += "%s" % write_view + if notify_view: + conf_str += "%s" % notify_view + conf_str += CE_DELETE_SNMP_V3_GROUP_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete snmp v3 group failed.') + + if security_level == "noAuthNoPriv": + security_level_cli = "noauthentication" + elif security_level == "authentication": + security_level_cli = "authentication" + elif security_level == "privacy": + security_level_cli = "privacy" + + cmd = "undo snmp-agent group v3 %s %s" % ( + group_name, security_level_cli) + + return cmd + + +def main(): + """ main function """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + acl_number=dict(type='str'), + community_name=dict(type='str', no_log=True), + access_right=dict(choices=['read', 'write']), + community_mib_view=dict(type='str'), + group_name=dict(type='str'), + security_level=dict( + choices=['noAuthNoPriv', 'authentication', 'privacy']), + read_view=dict(type='str'), + write_view=dict(type='str'), + notify_view=dict(type='str') + ) + + argument_spec.update(ce_argument_spec) + required_together = [("community_name", "access_right"), ("security_level", "group_name")] + module = AnsibleModule( + argument_spec=argument_spec, + required_together=required_together, + supports_check_mode=True + ) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + state = module.params['state'] + acl_number = module.params['acl_number'] + community_name = module.params['community_name'] + community_mib_view = module.params['community_mib_view'] + access_right = module.params['access_right'] + group_name = module.params['group_name'] + security_level = module.params['security_level'] + read_view = module.params['read_view'] + write_view = module.params['write_view'] + notify_view = module.params['notify_view'] + + snmp_community_obj = SnmpCommunity() + + if not snmp_community_obj: + module.fail_json(msg='Error: Init module failed.') + + snmp_community_rst = snmp_community_obj.check_snmp_community_args( + module=module) + snmp_v3_group_rst = snmp_community_obj.check_snmp_v3_group_args( + module=module) + + # get proposed + proposed["state"] = state + if acl_number: + proposed["acl_number"] = acl_number + if community_name: + proposed["community_name"] = community_name + if community_mib_view: + proposed["community_mib_view"] = community_mib_view + if access_right: + proposed["access_right"] = access_right + if group_name: + proposed["group_name"] = group_name + if security_level: + proposed["security_level"] = security_level + if read_view: + proposed["read_view"] = read_view + if write_view: + proposed["write_view"] = write_view + if notify_view: + proposed["notify_view"] = notify_view + + # state exist snmp community config + exist_tmp = dict() + for item in snmp_community_rst: + if item != "need_cfg": + exist_tmp[item] = snmp_community_rst[item] + + if exist_tmp: + existing["snmp community"] = exist_tmp + # state exist snmp v3 group config + exist_tmp = dict() + for item in snmp_v3_group_rst: + if item != "need_cfg": + exist_tmp[item] = snmp_v3_group_rst[item] + + if exist_tmp: + existing["snmp v3 group"] = exist_tmp + + if state == "present": + if snmp_community_rst["need_cfg"]: + if len(snmp_community_rst["community_info"]) != 0: + cmd = snmp_community_obj.merge_snmp_community(module=module) + changed = True + updates.append(cmd) + else: + cmd = snmp_community_obj.create_snmp_community(module=module) + changed = True + updates.append(cmd) + + if snmp_v3_group_rst["need_cfg"]: + if len(snmp_v3_group_rst["group_info"]): + cmd = snmp_community_obj.merge_snmp_v3_group(module=module) + changed = True + updates.append(cmd) + else: + cmd = snmp_community_obj.create_snmp_v3_group(module=module) + changed = True + updates.append(cmd) + + else: + if snmp_community_rst["need_cfg"]: + cmd = snmp_community_obj.delete_snmp_community(module=module) + changed = True + updates.append(cmd) + if snmp_v3_group_rst["need_cfg"]: + cmd = snmp_community_obj.delete_snmp_v3_group(module=module) + changed = True + updates.append(cmd) + + # state end snmp community config + snmp_community_rst = snmp_community_obj.check_snmp_community_args( + module=module) + end_tmp = dict() + for item in snmp_community_rst: + if item != "need_cfg": + end_tmp[item] = snmp_community_rst[item] + end_tmp[item] = snmp_community_rst[item] + if end_tmp: + end_state["snmp community"] = end_tmp + # state end snmp v3 group config + snmp_v3_group_rst = snmp_community_obj.check_snmp_v3_group_args( + module=module) + end_tmp = dict() + for item in snmp_v3_group_rst: + if item != "need_cfg": + end_tmp[item] = snmp_v3_group_rst[item] + if end_tmp: + end_state["snmp v3 group"] = end_tmp + + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_snmp_contact.py b/plugins/modules/network/cloudengine/ce_snmp_contact.py new file mode 100644 index 0000000000..52f9616230 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_snmp_contact.py @@ -0,0 +1,272 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_snmp_contact +short_description: Manages SNMP contact configuration on HUAWEI CloudEngine switches. +description: + - Manages SNMP contact configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + contact: + description: + - Contact information. + required: true + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' + +- name: CloudEngine snmp contact test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config SNMP contact" + ce_snmp_contact: + state: present + contact: call Operator at 010-99999999 + provider: "{{ cli }}" + + - name: "Undo SNMP contact" + ce_snmp_contact: + state: absent + contact: call Operator at 010-99999999 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"contact": "call Operator at 010-99999999", + "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"contact": "call Operator at 010-99999999"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-agent sys-info contact call Operator at 010-99999999"] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config, ce_argument_spec + + +class SnmpContact(object): + """ Manages SNMP contact configuration """ + + def __init__(self, **kwargs): + """ Class init """ + + # module + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # config + self.cur_cfg = dict() + + # module args + self.state = self.module.params['state'] + self.contact = self.module.params['contact'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def check_args(self): + """ Check invalid args """ + + if self.contact: + if len(self.contact) > 255 or len(self.contact) < 1: + self.module.fail_json( + msg='Error: The len of contact %s is out of [1 - 255].' % self.contact) + else: + self.module.fail_json( + msg='Error: The len of contact is 0.') + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_proposed(self): + """ Get proposed state """ + + self.proposed["state"] = self.state + + if self.contact: + self.proposed["contact"] = self.contact + + def get_existing(self): + """ Get existing state """ + + tmp_cfg = self.cli_get_config() + if tmp_cfg: + temp_data = tmp_cfg.split(r"contact ") + if len(temp_data) > 1: + self.cur_cfg["contact"] = temp_data[1] + self.existing["contact"] = temp_data[1] + + def get_end_state(self): + """ Get end state """ + + tmp_cfg = self.cli_get_config() + if tmp_cfg: + temp_data = tmp_cfg.split(r"contact ") + if len(temp_data) > 1: + self.end_state["contact"] = temp_data[1] + + def cli_load_config(self, commands): + """ Load configure by cli """ + + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_get_config(self): + """ Get configure by cli """ + + regular = "| include snmp | include contact" + flags = list() + flags.append(regular) + tmp_cfg = self.get_config(flags) + + return tmp_cfg + + def set_config(self): + """ Set configure by cli """ + + cmd = "snmp-agent sys-info contact %s" % self.contact + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def undo_config(self): + """ Undo configure by cli """ + + cmd = "undo snmp-agent sys-info contact" + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def work(self): + """ Main work function """ + + self.check_args() + self.get_proposed() + self.get_existing() + + if self.state == "present": + if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]: + pass + else: + self.set_config() + else: + if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]: + self.undo_config() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + contact=dict(type='str', required=True) + ) + + argument_spec.update(ce_argument_spec) + module = SnmpContact(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_snmp_location.py b/plugins/modules/network/cloudengine/ce_snmp_location.py new file mode 100644 index 0000000000..3187ddeb78 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_snmp_location.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_snmp_location +short_description: Manages SNMP location configuration on HUAWEI CloudEngine switches. +description: + - Manages SNMP location configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + location: + description: + - Location information. + required: true + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' + +- name: CloudEngine snmp location test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config SNMP location" + ce_snmp_location: + state: present + location: nanjing China + provider: "{{ cli }}" + + - name: "Remove SNMP location" + ce_snmp_location: + state: absent + location: nanjing China + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"location": "nanjing China", + "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"location": "nanjing China"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-agent sys-info location nanjing China"] +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config, ce_argument_spec + + +class SnmpLocation(object): + """ Manages SNMP location configuration """ + + def __init__(self, **kwargs): + """ Class init """ + + # module + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # config + self.cur_cfg = dict() + + # module args + self.state = self.module.params['state'] + self.location = self.module.params['location'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def check_args(self): + """ Check invalid args """ + + if self.location: + if len(self.location) > 255 or len(self.location) < 1: + self.module.fail_json( + msg='Error: The len of location %s is out of [1 - 255].' % self.location) + else: + self.module.fail_json( + msg='Error: The len of location is 0.') + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_proposed(self): + """ Get proposed state """ + + self.proposed["state"] = self.state + + if self.location: + self.proposed["location"] = self.location + + def get_existing(self): + """ Get existing state """ + + tmp_cfg = self.cli_get_config() + if tmp_cfg: + temp_data = tmp_cfg.split(r"location ") + if len(temp_data) > 1: + self.cur_cfg["location"] = temp_data[1] + self.existing["location"] = temp_data[1] + + def get_end_state(self): + """ Get end state """ + + tmp_cfg = self.cli_get_config() + if tmp_cfg: + temp_data = tmp_cfg.split(r"location ") + if len(temp_data) > 1: + self.end_state["location"] = temp_data[1] + + def cli_load_config(self, commands): + """ Load config by cli """ + + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_get_config(self): + """ Get config by cli """ + + regular = "| include snmp | include location" + flags = list() + flags.append(regular) + tmp_cfg = self.get_config(flags) + + return tmp_cfg + + def set_config(self): + """ Set configure by cli """ + + cmd = "snmp-agent sys-info location %s" % self.location + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def undo_config(self): + """ Undo configure by cli """ + + cmd = "undo snmp-agent sys-info location" + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def work(self): + """ Main work function """ + + self.check_args() + self.get_proposed() + self.get_existing() + + if self.state == "present": + if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]: + pass + else: + self.set_config() + else: + if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]: + self.undo_config() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + location=dict(type='str', required=True) + ) + + argument_spec.update(ce_argument_spec) + module = SnmpLocation(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_snmp_target_host.py b/plugins/modules/network/cloudengine/ce_snmp_target_host.py new file mode 100644 index 0000000000..037cdd54f7 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_snmp_target_host.py @@ -0,0 +1,944 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_snmp_target_host +short_description: Manages SNMP target host configuration on HUAWEI CloudEngine switches. +description: + - Manages SNMP target host configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + version: + description: + - Version(s) Supported by SNMP Engine. + choices: ['none', 'v1', 'v2c', 'v3', 'v1v2c', 'v1v3', 'v2cv3', 'all'] + connect_port: + description: + - Udp port used by SNMP agent to connect the Network management. + host_name: + description: + - Unique name to identify target host entry. + address: + description: + - Network Address. + notify_type: + description: + - To configure notify type as trap or inform. + choices: ['trap','inform'] + vpn_name: + description: + - VPN instance Name. + recv_port: + description: + - UDP Port number used by network management to receive alarm messages. + security_model: + description: + - Security Model. + choices: ['v1','v2c', 'v3'] + security_name: + description: + - Security Name. + security_name_v3: + description: + - Security Name V3. + security_level: + description: + - Security level indicating whether to use authentication and encryption. + choices: ['noAuthNoPriv','authentication', 'privacy'] + is_public_net: + description: + - To enable or disable Public Net-manager for target Host. + default: no_use + choices: ['no_use','true','false'] + interface_name: + description: + - Name of the interface to send the trap message. +''' + +EXAMPLES = ''' + +- name: CloudEngine snmp target host test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config SNMP version" + ce_snmp_target_host: + state: present + version: v2cv3 + provider: "{{ cli }}" + + - name: "Config SNMP target host" + ce_snmp_target_host: + state: present + host_name: test1 + address: 1.1.1.1 + notify_type: trap + vpn_name: js + security_model: v2c + security_name: wdz + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"address": "10.135.182.158", "host_name": "test2", + "notify_type": "trap", "security_level": "authentication", + "security_model": "v3", "security_name_v3": "wdz", + "state": "present", "vpn_name": "js"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"target host info": [{"address": "10.135.182.158", "domain": "snmpUDPDomain", + "nmsName": "test2", "notifyType": "trap", + "securityLevel": "authentication", "securityModel": "v3", + "securityNameV3": "wdz", "vpnInstanceName": "js"}]} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-agent target-host host-name test2 trap address udp-domain 10.135.182.158 vpn-instance js params securityname wdz v3 authentication"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, \ + ce_argument_spec, load_config, check_ip_addr + +# get snmp version +CE_GET_SNMP_VERSION = """ + + + + + + + +""" +# merge snmp version +CE_MERGE_SNMP_VERSION = """ + + + + %s + + + +""" + +# get snmp target host +CE_GET_SNMP_TARGET_HOST_HEADER = """ + + + + + +""" +CE_GET_SNMP_TARGET_HOST_TAIL = """ + + + + +""" + +# merge snmp target host +CE_MERGE_SNMP_TARGET_HOST_HEADER = """ + + + + + %s +""" +CE_MERGE_SNMP_TARGET_HOST_TAIL = """ + + + + +""" + +# create snmp target host +CE_CREATE_SNMP_TARGET_HOST_HEADER = """ + + + + + %s +""" +CE_CREATE_SNMP_TARGET_HOST_TAIL = """ + + + + +""" + +# delete snmp target host +CE_DELETE_SNMP_TARGET_HOST_HEADER = """ + + + + + %s +""" +CE_DELETE_SNMP_TARGET_HOST_TAIL = """ + + + + +""" + +# get snmp listen port +CE_GET_SNMP_PORT = """ + + + + + + + +""" + +# merge snmp listen port +CE_MERGE_SNMP_PORT = """ + + + + %s + + + +""" + + +INTERFACE_TYPE = ['ethernet', 'eth-trunk', 'tunnel', 'null', 'loopback', + 'vlanif', '100ge', '40ge', 'mtunnel', '10ge', 'ge', 'meth', 'vbdif', 'nve'] + + +class SnmpTargetHost(object): + """ Manages SNMP target host configuration """ + + def __init__(self, **kwargs): + """ Class init """ + + # module + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + required_together = [("address", "notify_type"), ("address", "notify_type")] + required_if = [ + ["security_model", "v1", ["security_name"]], + ["security_model", "v2c", ["security_name"]], + ["security_model", "v3", ["security_name_v3"]] + ] + self.module = AnsibleModule( + argument_spec=argument_spec, + required_together=required_together, + required_if=required_if, + supports_check_mode=True + ) + + # module args + self.state = self.module.params['state'] + self.version = self.module.params['version'] + self.connect_port = self.module.params['connect_port'] + self.host_name = self.module.params['host_name'] + self.domain = "snmpUDPDomain" + self.address = self.module.params['address'] + self.notify_type = self.module.params['notify_type'] + self.vpn_name = self.module.params['vpn_name'] + self.recv_port = self.module.params['recv_port'] + self.security_model = self.module.params['security_model'] + self.security_name = self.module.params['security_name'] + self.security_name_v3 = self.module.params['security_name_v3'] + self.security_level = self.module.params['security_level'] + self.is_public_net = self.module.params['is_public_net'] + self.interface_name = self.module.params['interface_name'] + + # config + self.cur_cli_cfg = dict() + self.cur_netconf_cfg = dict() + self.end_netconf_cfg = dict() + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def netconf_get_config(self, conf_str): + """ Get configure by netconf """ + + xml_str = get_nc_config(self.module, conf_str) + + return xml_str + + def netconf_set_config(self, conf_str): + """ Set configure by netconf """ + + xml_str = set_nc_config(self.module, conf_str) + + return xml_str + + def check_cli_args(self): + """ Check invalid cli args """ + + if self.connect_port: + if int(self.connect_port) != 161 and (int(self.connect_port) > 65535 or int(self.connect_port) < 1025): + self.module.fail_json( + msg='Error: The value of connect_port %s is out of [161, 1025 - 65535].' % self.connect_port) + + def check_netconf_args(self, result): + """ Check invalid netconf args """ + + need_cfg = True + same_flag = True + delete_flag = False + result["target_host_info"] = [] + + if self.host_name: + + if len(self.host_name) > 32 or len(self.host_name) < 1: + self.module.fail_json( + msg='Error: The len of host_name is out of [1 - 32].') + + if self.vpn_name and self.is_public_net != 'no_use': + if self.is_public_net == "true": + self.module.fail_json( + msg='Error: Do not support vpn_name and is_public_net at the same time.') + + conf_str = CE_GET_SNMP_TARGET_HOST_HEADER + + if self.domain: + conf_str += "" + + if self.address: + if not check_ip_addr(ipaddr=self.address): + self.module.fail_json( + msg='Error: The host address [%s] is invalid.' % self.address) + conf_str += "
" + + if self.notify_type: + conf_str += "" + + if self.vpn_name: + if len(self.vpn_name) > 31 or len(self.vpn_name) < 1: + self.module.fail_json( + msg='Error: The len of vpn_name is out of [1 - 31].') + conf_str += "" + + if self.recv_port: + if int(self.recv_port) > 65535 or int(self.recv_port) < 0: + self.module.fail_json( + msg='Error: The value of recv_port is out of [0 - 65535].') + conf_str += "" + + if self.security_model: + conf_str += "" + + if self.security_name: + if len(self.security_name) > 32 or len(self.security_name) < 1: + self.module.fail_json( + msg='Error: The len of security_name is out of [1 - 32].') + conf_str += "" + + if self.security_name_v3: + if len(self.security_name_v3) > 32 or len(self.security_name_v3) < 1: + self.module.fail_json( + msg='Error: The len of security_name_v3 is out of [1 - 32].') + conf_str += "" + + if self.security_level: + conf_str += "" + + if self.is_public_net != 'no_use': + conf_str += "" + + if self.interface_name: + if len(self.interface_name) > 63 or len(self.interface_name) < 1: + self.module.fail_json( + msg='Error: The len of interface_name is out of [1 - 63].') + + find_flag = False + for item in INTERFACE_TYPE: + if item in self.interface_name.lower(): + find_flag = True + break + if not find_flag: + self.module.fail_json( + msg='Error: Please input full name of interface_name.') + + conf_str += "" + + conf_str += CE_GET_SNMP_TARGET_HOST_TAIL + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + if self.state == "present": + same_flag = False + else: + delete_flag = False + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + target_host_info = root.findall( + "snmp/targetHosts/targetHost") + if target_host_info: + for tmp in target_host_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["nmsName", "domain", "address", "notifyType", "vpnInstanceName", + "portNumber", "securityModel", "securityName", "securityNameV3", + "securityLevel", "isPublicNet", "interface-name"]: + tmp_dict[site.tag] = site.text + + result["target_host_info"].append(tmp_dict) + + if result["target_host_info"]: + for tmp in result["target_host_info"]: + + same_flag = True + + if "nmsName" in tmp.keys(): + if tmp["nmsName"] != self.host_name: + same_flag = False + else: + delete_flag = True + + if "domain" in tmp.keys(): + if tmp["domain"] != self.domain: + same_flag = False + + if "address" in tmp.keys(): + if tmp["address"] != self.address: + same_flag = False + + if "notifyType" in tmp.keys(): + if tmp["notifyType"] != self.notify_type: + same_flag = False + + if "vpnInstanceName" in tmp.keys(): + if tmp["vpnInstanceName"] != self.vpn_name: + same_flag = False + + if "portNumber" in tmp.keys(): + if tmp["portNumber"] != self.recv_port: + same_flag = False + + if "securityModel" in tmp.keys(): + if tmp["securityModel"] != self.security_model: + same_flag = False + + if "securityName" in tmp.keys(): + if tmp["securityName"] != self.security_name: + same_flag = False + + if "securityNameV3" in tmp.keys(): + if tmp["securityNameV3"] != self.security_name_v3: + same_flag = False + + if "securityLevel" in tmp.keys(): + if tmp["securityLevel"] != self.security_level: + same_flag = False + + if "isPublicNet" in tmp.keys(): + if tmp["isPublicNet"] != self.is_public_net: + same_flag = False + + if "interface-name" in tmp.keys(): + if tmp.get("interface-name") is not None: + if tmp["interface-name"].lower() != self.interface_name.lower(): + same_flag = False + else: + same_flag = False + + if same_flag: + break + + if self.state == "present": + need_cfg = True + if same_flag: + need_cfg = False + else: + need_cfg = False + if delete_flag: + need_cfg = True + + result["need_cfg"] = need_cfg + + def cli_load_config(self, commands): + """ Load configure by cli """ + + if not self.module.check_mode: + load_config(self.module, commands) + + def get_snmp_version(self): + """ Get snmp version """ + + version = None + conf_str = CE_GET_SNMP_VERSION + recv_xml = self.netconf_get_config(conf_str=conf_str) + + if "" in recv_xml: + pass + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + version_info = root.find("snmp/engine") + if version_info: + for site in version_info: + if site.tag in ["version"]: + version = site.text + + return version + + def xml_get_connect_port(self): + """ Get connect port by xml """ + tmp_cfg = None + conf_str = CE_GET_SNMP_PORT + recv_xml = self.netconf_get_config(conf_str=conf_str) + if "" in recv_xml: + pass + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + snmp_port_info = root.findall("snmp/systemCfg/snmpListenPort") + + if snmp_port_info: + tmp_cfg = snmp_port_info[0].text + return tmp_cfg + + def get_proposed(self): + """ Get proposed state """ + + self.proposed["state"] = self.state + + if self.version: + self.proposed["version"] = self.version + if self.connect_port: + self.proposed["connect_port"] = self.connect_port + if self.host_name: + self.proposed["host_name"] = self.host_name + if self.address: + self.proposed["address"] = self.address + if self.notify_type: + self.proposed["notify_type"] = self.notify_type + if self.vpn_name: + self.proposed["vpn_name"] = self.vpn_name + if self.recv_port: + self.proposed["recv_port"] = self.recv_port + if self.security_model: + self.proposed["security_model"] = self.security_model + if self.security_name: + self.proposed["security_name"] = "******" + if self.security_name_v3: + self.proposed["security_name_v3"] = self.security_name_v3 + if self.security_level: + self.proposed["security_level"] = self.security_level + if self.is_public_net != 'no_use': + self.proposed["is_public_net"] = self.is_public_net + if self.interface_name: + self.proposed["interface_name"] = self.interface_name + + def get_existing(self): + """ Get existing state """ + + if self.version: + version = self.get_snmp_version() + if version: + self.cur_cli_cfg["version"] = version + self.existing["version"] = version + + if self.connect_port: + tmp_cfg = self.xml_get_connect_port() + if tmp_cfg: + self.cur_cli_cfg["connect port"] = tmp_cfg + self.existing["connect port"] = tmp_cfg + + if self.host_name: + self.existing["target host info"] = self.cur_netconf_cfg[ + "target_host_info"] + + def get_end_state(self): + """ Get end state """ + + if self.version: + version = self.get_snmp_version() + if version: + self.end_state["version"] = version + + if self.connect_port: + tmp_cfg = self.xml_get_connect_port() + if tmp_cfg: + self.end_state["connect port"] = tmp_cfg + + if self.host_name: + self.end_state["target host info"] = self.end_netconf_cfg[ + "target_host_info"] + if self.existing == self.end_state: + self.changed = False + self.updates_cmd = list() + + def config_version_cli(self): + """ Config version by cli """ + + if "disable" in self.cur_cli_cfg["version"]: + cmd = "snmp-agent sys-info version %s" % self.version + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + else: + if self.version != self.cur_cli_cfg["version"]: + cmd = "snmp-agent sys-info version %s disable" % self.cur_cli_cfg[ + "version"] + self.updates_cmd.append(cmd) + cmd = "snmp-agent sys-info version %s" % self.version + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def undo_config_version_cli(self): + """ Undo config version by cli """ + + if "disable" in self.cur_cli_cfg["version"]: + pass + else: + cmd = "snmp-agent sys-info version %s disable" % self.cur_cli_cfg[ + "version"] + + cmds = list() + cmds.append(cmd) + + self.updates_cmd.append(cmd) + self.cli_load_config(cmds) + self.changed = True + + def config_connect_port_xml(self): + """ Config connect port by xml """ + + if "connect port" in self.cur_cli_cfg.keys(): + if self.cur_cli_cfg["connect port"] == self.connect_port: + pass + else: + cmd = "snmp-agent udp-port %s" % self.connect_port + + cmds = list() + cmds.append(cmd) + + self.updates_cmd.append(cmd) + conf_str = CE_MERGE_SNMP_PORT % self.connect_port + self.netconf_set_config(conf_str=conf_str) + self.changed = True + else: + cmd = "snmp-agent udp-port %s" % self.connect_port + + cmds = list() + cmds.append(cmd) + + self.updates_cmd.append(cmd) + conf_str = CE_MERGE_SNMP_PORT % self.connect_port + self.netconf_set_config(conf_str=conf_str) + self.changed = True + + def undo_config_connect_port_cli(self): + """ Undo config connect port by cli """ + + if "connect port" in self.cur_cli_cfg.keys(): + if not self.cur_cli_cfg["connect port"]: + pass + else: + cmd = "undo snmp-agent udp-port" + + cmds = list() + cmds.append(cmd) + + self.updates_cmd.append(cmd) + connect_port = "161" + conf_str = CE_MERGE_SNMP_PORT % connect_port + self.netconf_set_config(conf_str=conf_str) + self.changed = True + + def merge_snmp_target_host(self): + """ Merge snmp target host operation """ + + conf_str = CE_MERGE_SNMP_TARGET_HOST_HEADER % self.host_name + + if self.domain: + conf_str += "%s" % self.domain + if self.address: + conf_str += "
%s
" % self.address + if self.notify_type: + conf_str += "%s" % self.notify_type + if self.vpn_name: + conf_str += "%s" % self.vpn_name + if self.recv_port: + conf_str += "%s" % self.recv_port + if self.security_model: + conf_str += "%s" % self.security_model + if self.security_name: + conf_str += "%s" % self.security_name + if self.security_name_v3: + conf_str += "%s" % self.security_name_v3 + if self.security_level: + conf_str += "%s" % self.security_level + if self.is_public_net != 'no_use': + conf_str += "%s" % self.is_public_net + if self.interface_name: + conf_str += "%s" % self.interface_name + + conf_str += CE_MERGE_SNMP_TARGET_HOST_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge snmp target host failed.') + + cmd = "snmp-agent target-host host-name %s " % self.host_name + cmd += "%s " % self.notify_type + cmd += "address udp-domain %s " % self.address + + if self.recv_port: + cmd += "udp-port %s " % self.recv_port + if self.interface_name: + cmd += "source %s " % self.interface_name + if self.vpn_name: + cmd += "vpn-instance %s " % self.vpn_name + if self.is_public_net == "true": + cmd += "public-net " + if self.security_model in ["v1", "v2c"] and self.security_name: + cmd += "params securityname %s %s " % ( + "******", self.security_model) + if self.security_model == "v3" and self.security_name_v3: + cmd += "params securityname %s %s " % ( + self.security_name_v3, self.security_model) + if self.security_level and self.security_level in ["authentication", "privacy"]: + cmd += "%s" % self.security_level + + self.changed = True + self.updates_cmd.append(cmd) + + def delete_snmp_target_host(self): + """ Delete snmp target host operation """ + + conf_str = CE_DELETE_SNMP_TARGET_HOST_HEADER % self.host_name + + if self.domain: + conf_str += "%s" % self.domain + if self.address: + conf_str += "
%s
" % self.address + if self.notify_type: + conf_str += "%s" % self.notify_type + if self.vpn_name: + conf_str += "%s" % self.vpn_name + if self.recv_port: + conf_str += "%s" % self.recv_port + if self.security_model: + conf_str += "%s" % self.security_model + if self.security_name: + conf_str += "%s" % self.security_name + if self.security_name_v3: + conf_str += "%s" % self.security_name_v3 + if self.security_level: + conf_str += "%s" % self.security_level + if self.is_public_net != 'no_use': + conf_str += "%s" % self.is_public_net + if self.interface_name: + conf_str += "%s" % self.interface_name + + conf_str += CE_DELETE_SNMP_TARGET_HOST_TAIL + + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Delete snmp target host failed.') + + if not self.address: + cmd = "undo snmp-agent target-host host-name %s " % self.host_name + else: + if self.notify_type == "trap": + cmd = "undo snmp-agent target-host trap address udp-domain %s " % self.address + else: + cmd = "undo snmp-agent target-host inform address udp-domain %s " % self.address + if self.recv_port: + cmd += "udp-port %s " % self.recv_port + if self.interface_name: + cmd += "source %s " % self.interface_name + if self.vpn_name: + cmd += "vpn-instance %s " % self.vpn_name + if self.is_public_net == "true": + cmd += "public-net " + if self.security_model in ["v1", "v2c"] and self.security_name: + cmd += "params securityname %s" % "******" + if self.security_model == "v3" and self.security_name_v3: + cmd += "params securityname %s" % self.security_name_v3 + + self.changed = True + self.updates_cmd.append(cmd) + + def merge_snmp_version(self): + """ Merge snmp version operation """ + + conf_str = CE_MERGE_SNMP_VERSION % self.version + recv_xml = self.netconf_set_config(conf_str=conf_str) + + if "" not in recv_xml: + self.module.fail_json(msg='Error: Merge snmp version failed.') + + if self.version == "none": + cmd = "snmp-agent sys-info version %s disable" % self.cur_cli_cfg[ + "version"] + self.updates_cmd.append(cmd) + elif self.version == "v1v2c": + cmd = "snmp-agent sys-info version v1" + self.updates_cmd.append(cmd) + cmd = "snmp-agent sys-info version v2c" + self.updates_cmd.append(cmd) + elif self.version == "v1v3": + cmd = "snmp-agent sys-info version v1" + self.updates_cmd.append(cmd) + cmd = "snmp-agent sys-info version v3" + self.updates_cmd.append(cmd) + elif self.version == "v2cv3": + cmd = "snmp-agent sys-info version v2c" + self.updates_cmd.append(cmd) + cmd = "snmp-agent sys-info version v3" + self.updates_cmd.append(cmd) + else: + cmd = "snmp-agent sys-info version %s" % self.version + self.updates_cmd.append(cmd) + + self.changed = True + + def work(self): + """ Main work function """ + + self.check_cli_args() + self.check_netconf_args(self.cur_netconf_cfg) + self.get_proposed() + self.get_existing() + + if self.state == "present": + if self.version: + if self.version != self.cur_cli_cfg["version"]: + self.merge_snmp_version() + if self.connect_port: + self.config_connect_port_xml() + if self.cur_netconf_cfg["need_cfg"]: + self.merge_snmp_target_host() + + else: + if self.connect_port: + self.undo_config_connect_port_cli() + if self.cur_netconf_cfg["need_cfg"]: + self.delete_snmp_target_host() + + self.check_netconf_args(self.end_netconf_cfg) + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + version=dict(choices=['none', 'v1', 'v2c', 'v3', + 'v1v2c', 'v1v3', 'v2cv3', 'all']), + connect_port=dict(type='str'), + host_name=dict(type='str'), + address=dict(type='str'), + notify_type=dict(choices=['trap', 'inform']), + vpn_name=dict(type='str'), + recv_port=dict(type='str'), + security_model=dict(choices=['v1', 'v2c', 'v3']), + security_name=dict(type='str', no_log=True), + security_name_v3=dict(type='str'), + security_level=dict( + choices=['noAuthNoPriv', 'authentication', 'privacy']), + is_public_net=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']), + interface_name=dict(type='str') + ) + + argument_spec.update(ce_argument_spec) + module = SnmpTargetHost(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_snmp_traps.py b/plugins/modules/network/cloudengine/ce_snmp_traps.py new file mode 100644 index 0000000000..ec40b8f00c --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_snmp_traps.py @@ -0,0 +1,563 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_snmp_traps +short_description: Manages SNMP traps configuration on HUAWEI CloudEngine switches. +description: + - Manages SNMP traps configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + feature_name: + description: + - Alarm feature name. + choices: ['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad', 'devm', + 'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down', 'fcoe', + 'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6', 'isis', + 'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp', 'mpls_lspm', + 'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3', 'openflow', 'ospf', + 'ospfv3', 'pim', 'pim-std', 'qos', 'radius', 'rm', 'rmon', 'securitytrap', + 'smlktrap', 'snmp', 'ssh', 'stackmng', 'sysclock', 'sysom', 'system', + 'tcp', 'telnet', 'trill', 'trunk', 'tty', 'vbst', 'vfs', 'virtual-perception', + 'vrrp', 'vstm', 'all'] + trap_name: + description: + - Alarm trap name. + interface_type: + description: + - Interface type. + choices: ['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif', '100GE', + '40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve'] + interface_number: + description: + - Interface number. + port_number: + description: + - Source port number. +''' + +EXAMPLES = ''' + +- name: CloudEngine snmp traps test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config SNMP trap all enable" + ce_snmp_traps: + state: present + feature_name: all + provider: "{{ cli }}" + + - name: "Config SNMP trap interface" + ce_snmp_traps: + state: present + interface_type: 40GE + interface_number: 2/0/1 + provider: "{{ cli }}" + + - name: "Config SNMP trap port" + ce_snmp_traps: + state: present + port_number: 2222 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"feature_name": "all", + "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"snmp-agent trap": [], + "undo snmp-agent trap": []} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"snmp-agent trap": ["enable"], + "undo snmp-agent trap": []} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-agent trap enable"] +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import load_config, ce_argument_spec, run_commands +from ansible.module_utils.connection import exec_command + + +class SnmpTraps(object): + """ Manages SNMP trap configuration """ + + def __init__(self, **kwargs): + """ Class init """ + + # module + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule( + argument_spec=self.spec, + required_together=[("interface_type", "interface_number")], + supports_check_mode=True + ) + + # config + self.cur_cfg = dict() + self.cur_cfg["snmp-agent trap"] = [] + self.cur_cfg["undo snmp-agent trap"] = [] + + # module args + self.state = self.module.params['state'] + self.feature_name = self.module.params['feature_name'] + self.trap_name = self.module.params['trap_name'] + self.interface_type = self.module.params['interface_type'] + self.interface_number = self.module.params['interface_number'] + self.port_number = self.module.params['port_number'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.existing["snmp-agent trap"] = [] + self.existing["undo snmp-agent trap"] = [] + self.end_state = dict() + self.end_state["snmp-agent trap"] = [] + self.end_state["undo snmp-agent trap"] = [] + + commands = list() + cmd1 = 'display interface brief' + commands.append(cmd1) + self.interface = run_commands(self.module, commands) + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def check_args(self): + """ Check invalid args """ + + if self.port_number: + if self.port_number.isdigit(): + if int(self.port_number) < 1025 or int(self.port_number) > 65535: + self.module.fail_json( + msg='Error: The value of port_number is out of [1025 - 65535].') + else: + self.module.fail_json( + msg='Error: The port_number is not digit.') + + if self.interface_type and self.interface_number: + tmp_interface = self.interface_type + self.interface_number + if tmp_interface not in self.interface[0]: + self.module.fail_json( + msg='Error: The interface %s is not in the device.' % tmp_interface) + + def get_proposed(self): + """ Get proposed state """ + + self.proposed["state"] = self.state + + if self.feature_name: + self.proposed["feature_name"] = self.feature_name + + if self.trap_name: + self.proposed["trap_name"] = self.trap_name + + if self.interface_type: + self.proposed["interface_type"] = self.interface_type + + if self.interface_number: + self.proposed["interface_number"] = self.interface_number + + if self.port_number: + self.proposed["port_number"] = self.port_number + + def get_existing(self): + """ Get existing state """ + + tmp_cfg = self.cli_get_config() + if tmp_cfg: + temp_cfg_lower = tmp_cfg.lower() + temp_data = tmp_cfg.split("\n") + temp_data_lower = temp_cfg_lower.split("\n") + + for item in temp_data: + if "snmp-agent trap source-port " in item: + if self.port_number: + item_tmp = item.split("snmp-agent trap source-port ") + self.cur_cfg["trap source-port"] = item_tmp[1] + self.existing["trap source-port"] = item_tmp[1] + elif "snmp-agent trap source " in item: + if self.interface_type: + item_tmp = item.split("snmp-agent trap source ") + self.cur_cfg["trap source interface"] = item_tmp[1] + self.existing["trap source interface"] = item_tmp[1] + + if self.feature_name: + for item in temp_data_lower: + if item == "snmp-agent trap enable": + self.cur_cfg["snmp-agent trap"].append("enable") + self.existing["snmp-agent trap"].append("enable") + elif item == "snmp-agent trap disable": + self.cur_cfg["snmp-agent trap"].append("disable") + self.existing["snmp-agent trap"].append("disable") + elif "undo snmp-agent trap enable " in item: + item_tmp = item.split("undo snmp-agent trap enable ") + self.cur_cfg[ + "undo snmp-agent trap"].append(item_tmp[1]) + self.existing[ + "undo snmp-agent trap"].append(item_tmp[1]) + elif "snmp-agent trap enable " in item: + item_tmp = item.split("snmp-agent trap enable ") + self.cur_cfg["snmp-agent trap"].append(item_tmp[1]) + self.existing["snmp-agent trap"].append(item_tmp[1]) + else: + del self.existing["snmp-agent trap"] + del self.existing["undo snmp-agent trap"] + + def get_end_state(self): + """ Get end_state state """ + + tmp_cfg = self.cli_get_config() + if tmp_cfg: + temp_cfg_lower = tmp_cfg.lower() + temp_data = tmp_cfg.split("\n") + temp_data_lower = temp_cfg_lower.split("\n") + + for item in temp_data: + if "snmp-agent trap source-port " in item: + if self.port_number: + item_tmp = item.split("snmp-agent trap source-port ") + self.end_state["trap source-port"] = item_tmp[1] + elif "snmp-agent trap source " in item: + if self.interface_type: + item_tmp = item.split("snmp-agent trap source ") + self.end_state["trap source interface"] = item_tmp[1] + + if self.feature_name: + for item in temp_data_lower: + if item == "snmp-agent trap enable": + self.end_state["snmp-agent trap"].append("enable") + elif item == "snmp-agent trap disable": + self.end_state["snmp-agent trap"].append("disable") + elif "undo snmp-agent trap enable " in item: + item_tmp = item.split("undo snmp-agent trap enable ") + self.end_state[ + "undo snmp-agent trap"].append(item_tmp[1]) + elif "snmp-agent trap enable " in item: + item_tmp = item.split("snmp-agent trap enable ") + self.end_state["snmp-agent trap"].append(item_tmp[1]) + else: + del self.end_state["snmp-agent trap"] + del self.end_state["undo snmp-agent trap"] + if self.end_state == self.existing: + self.changed = False + self.updates_cmd = list() + + def cli_load_config(self, commands): + """ Load configure through cli """ + + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_get_config(self): + """ Get configure through cli """ + + regular = "| include snmp | include trap" + flags = list() + flags.append(regular) + tmp_cfg = self.get_config(flags) + + return tmp_cfg + + def set_trap_feature_name(self): + """ Set feature name for trap """ + + if self.feature_name == "all": + cmd = "snmp-agent trap enable" + else: + cmd = "snmp-agent trap enable feature-name %s" % self.feature_name + if self.trap_name: + cmd += " trap-name %s" % self.trap_name + + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def undo_trap_feature_name(self): + """ Undo feature name for trap """ + + if self.feature_name == "all": + cmd = "undo snmp-agent trap enable" + else: + cmd = "undo snmp-agent trap enable feature-name %s" % self.feature_name + if self.trap_name: + cmd += " trap-name %s" % self.trap_name + + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def set_trap_source_interface(self): + """ Set source interface for trap """ + + cmd = "snmp-agent trap source %s %s" % ( + self.interface_type, self.interface_number) + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def undo_trap_source_interface(self): + """ Undo source interface for trap """ + + cmd = "undo snmp-agent trap source" + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def set_trap_source_port(self): + """ Set source port for trap """ + + cmd = "snmp-agent trap source-port %s" % self.port_number + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def undo_trap_source_port(self): + """ Undo source port for trap """ + + cmd = "undo snmp-agent trap source-port" + self.updates_cmd.append(cmd) + + cmds = list() + cmds.append(cmd) + + self.cli_load_config(cmds) + self.changed = True + + def work(self): + """ The work function """ + + self.check_args() + self.get_proposed() + self.get_existing() + + find_flag = False + find_undo_flag = False + tmp_interface = None + + if self.state == "present": + if self.feature_name: + if self.trap_name: + tmp_cfg = "feature-name %s trap-name %s" % ( + self.feature_name, self.trap_name.lower()) + else: + tmp_cfg = "feature-name %s" % self.feature_name + + find_undo_flag = False + if self.cur_cfg["undo snmp-agent trap"]: + for item in self.cur_cfg["undo snmp-agent trap"]: + if item == tmp_cfg: + find_undo_flag = True + elif tmp_cfg in item: + find_undo_flag = True + elif self.feature_name == "all": + find_undo_flag = True + if find_undo_flag: + self.set_trap_feature_name() + + if not find_undo_flag: + find_flag = False + if self.cur_cfg["snmp-agent trap"]: + for item in self.cur_cfg["snmp-agent trap"]: + if item == "enable": + find_flag = True + elif item == tmp_cfg: + find_flag = True + if not find_flag: + self.set_trap_feature_name() + + if self.interface_type: + find_flag = False + tmp_interface = self.interface_type + self.interface_number + + if "trap source interface" in self.cur_cfg.keys(): + if self.cur_cfg["trap source interface"] == tmp_interface: + find_flag = True + + if not find_flag: + self.set_trap_source_interface() + + if self.port_number: + find_flag = False + + if "trap source-port" in self.cur_cfg.keys(): + if self.cur_cfg["trap source-port"] == self.port_number: + find_flag = True + + if not find_flag: + self.set_trap_source_port() + + else: + if self.feature_name: + if self.trap_name: + tmp_cfg = "feature-name %s trap-name %s" % ( + self.feature_name, self.trap_name.lower()) + else: + tmp_cfg = "feature-name %s" % self.feature_name + + find_flag = False + if self.cur_cfg["snmp-agent trap"]: + for item in self.cur_cfg["snmp-agent trap"]: + if item == tmp_cfg: + find_flag = True + elif item == "enable": + find_flag = True + elif tmp_cfg in item: + find_flag = True + else: + find_flag = True + + find_undo_flag = False + if self.cur_cfg["undo snmp-agent trap"]: + for item in self.cur_cfg["undo snmp-agent trap"]: + if item == tmp_cfg: + find_undo_flag = True + elif tmp_cfg in item: + find_undo_flag = True + + if find_undo_flag: + pass + elif find_flag: + self.undo_trap_feature_name() + + if self.interface_type: + if "trap source interface" in self.cur_cfg.keys(): + self.undo_trap_source_interface() + + if self.port_number: + if "trap source-port" in self.cur_cfg.keys(): + self.undo_trap_source_port() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + feature_name=dict(choices=['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad', + 'devm', 'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down', + 'fcoe', 'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6', + 'isis', 'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp', + 'mpls_lspm', 'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3', + 'openflow', 'ospf', 'ospfv3', 'pim', 'pim-std', 'qos', 'radius', + 'rm', 'rmon', 'securitytrap', 'smlktrap', 'snmp', 'ssh', 'stackmng', + 'sysclock', 'sysom', 'system', 'tcp', 'telnet', 'trill', 'trunk', + 'tty', 'vbst', 'vfs', 'virtual-perception', 'vrrp', 'vstm', 'all']), + trap_name=dict(type='str'), + interface_type=dict(choices=['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif', + '100GE', '40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']), + interface_number=dict(type='str'), + port_number=dict(type='str') + ) + + argument_spec.update(ce_argument_spec) + module = SnmpTraps(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_snmp_user.py b/plugins/modules/network/cloudengine/ce_snmp_user.py new file mode 100644 index 0000000000..5358b8926f --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_snmp_user.py @@ -0,0 +1,1048 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_snmp_user +short_description: Manages SNMP user configuration on HUAWEI CloudEngine switches. +description: + - Manages SNMP user configurations on CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + acl_number: + description: + - Access control list number. + usm_user_name: + description: + - Unique name to identify the USM user. + aaa_local_user: + description: + - Unique name to identify the local user. + remote_engine_id: + description: + - Remote engine id of the USM user. + user_group: + description: + - Name of the group where user belongs to. + auth_protocol: + description: + - Authentication protocol. + choices: ['noAuth', 'md5', 'sha'] + auth_key: + description: + - The authentication password. Password length, 8-255 characters. + priv_protocol: + description: + - Encryption protocol. + choices: ['noPriv', 'des56', '3des168', 'aes128', 'aes192', 'aes256'] + priv_key: + description: + - The encryption password. Password length 8-255 characters. +''' + +EXAMPLES = ''' + +- name: CloudEngine snmp user test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config SNMP usm user" + ce_snmp_user: + state: present + usm_user_name: wdz_snmp + remote_engine_id: 800007DB03389222111200 + acl_number: 2000 + user_group: wdz_group + provider: "{{ cli }}" + + - name: "Undo SNMP usm user" + ce_snmp_user: + state: absent + usm_user_name: wdz_snmp + remote_engine_id: 800007DB03389222111200 + acl_number: 2000 + user_group: wdz_group + provider: "{{ cli }}" + + - name: "Config SNMP local user" + ce_snmp_user: + state: present + aaa_local_user: wdz_user + auth_protocol: md5 + auth_key: huawei123 + priv_protocol: des56 + priv_key: huawei123 + provider: "{{ cli }}" + + - name: "Config SNMP local user" + ce_snmp_user: + state: absent + aaa_local_user: wdz_user + auth_protocol: md5 + auth_key: huawei123 + priv_protocol: des56 + priv_key: huawei123 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"acl_number": "2000", "remote_engine_id": "800007DB03389222111200", + "state": "present", "user_group": "wdz_group", + "usm_user_name": "wdz_snmp"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"snmp local user": {"local_user_info": []}, + "snmp usm user": {"usm_user_info": []}} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"snmp local user": {"local_user_info": []}, + "snmp usm user": {"usm_user_info": [{"aclNumber": "2000", "engineID": "800007DB03389222111200", + "groupName": "wdz_group", "userName": "wdz_snmp"}]}} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-agent remote-engineid 800007DB03389222111200 usm-user v3 wdz_snmp wdz_group acl 2000"] +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec + +# get snmp v3 USM user +CE_GET_SNMP_V3_USM_USER_HEADER = """ + + + + + + + +""" +CE_GET_SNMP_V3_USM_USER_TAIL = """ + + + + +""" +# merge snmp v3 USM user +CE_MERGE_SNMP_V3_USM_USER_HEADER = """ + + + + + %s + %s + %s +""" +CE_MERGE_SNMP_V3_USM_USER_TAIL = """ + + + + +""" +# create snmp v3 USM user +CE_CREATE_SNMP_V3_USM_USER_HEADER = """ + + + + + %s + %s + %s +""" +CE_CREATE_SNMP_V3_USM_USER_TAIL = """ + + + + +""" +# delete snmp v3 USM user +CE_DELETE_SNMP_V3_USM_USER_HEADER = """ + + + + + %s + %s + %s +""" +CE_DELETE_SNMP_V3_USM_USER_TAIL = """ + + + + +""" + +# get snmp v3 aaa local user +CE_GET_SNMP_V3_LOCAL_USER = """ + + + + + + + + + + + + + +""" +# merge snmp v3 aaa local user +CE_MERGE_SNMP_V3_LOCAL_USER = """ + + + + + %s + %s + %s + %s + %s + + + + +""" +# create snmp v3 aaa local user +CE_CREATE_SNMP_V3_LOCAL_USER = """ + + + + + %s + %s + %s + %s + %s + + + + +""" +# delete snmp v3 aaa local user +CE_DELETE_SNMP_V3_LOCAL_USER = """ + + + + + %s + %s + %s + %s + %s + + + + +""" +# display info +GET_SNMP_LOCAL_ENGINE = """ + + + + + + + +""" + + +class SnmpUser(object): + """ Manages SNMP user configuration """ + + def netconf_get_config(self, **kwargs): + """ Get configure by netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = get_nc_config(module, conf_str) + + return xml_str + + def netconf_set_config(self, **kwargs): + """ Set configure by netconf """ + + module = kwargs["module"] + conf_str = kwargs["conf_str"] + + xml_str = set_nc_config(module, conf_str) + + return xml_str + + def check_snmp_v3_usm_user_args(self, **kwargs): + """ Check snmp v3 usm user invalid args """ + + module = kwargs["module"] + result = dict() + need_cfg = False + state = module.params['state'] + usm_user_name = module.params['usm_user_name'] + remote_engine_id = module.params['remote_engine_id'] + + acl_number = module.params['acl_number'] + user_group = module.params['user_group'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + local_user_name = module.params['aaa_local_user'] + + if usm_user_name: + if len(usm_user_name) > 32 or len(usm_user_name) == 0: + module.fail_json( + msg='Error: The length of usm_user_name %s is out of [1 - 32].' % usm_user_name) + if remote_engine_id: + if len(remote_engine_id) > 64 or len(remote_engine_id) < 10: + module.fail_json( + msg='Error: The length of remote_engine_id %s is out of [10 - 64].' % remote_engine_id) + + conf_str = CE_GET_SNMP_V3_USM_USER_HEADER + + if acl_number: + if acl_number.isdigit(): + if int(acl_number) > 2999 or int(acl_number) < 2000: + module.fail_json( + msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number) + else: + if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1: + module.fail_json( + msg='Error: The length of acl_number %s is out of [1 - 32].' % acl_number) + + conf_str += "" + + if user_group: + if len(user_group) > 32 or len(user_group) == 0: + module.fail_json( + msg='Error: The length of user_group %s is out of [1 - 32].' % user_group) + + conf_str += "" + + if auth_protocol: + conf_str += "" + + if auth_key: + if len(auth_key) > 255 or len(auth_key) == 0: + module.fail_json( + msg='Error: The length of auth_key %s is out of [1 - 255].' % auth_key) + + conf_str += "" + + if priv_protocol: + if not auth_protocol: + module.fail_json( + msg='Error: Please input auth_protocol at the same time.') + + conf_str += "" + + if priv_key: + if len(priv_key) > 255 or len(priv_key) == 0: + module.fail_json( + msg='Error: The length of priv_key %s is out of [1 - 255].' % priv_key) + conf_str += "" + + result["usm_user_info"] = [] + + conf_str += CE_GET_SNMP_V3_USM_USER_TAIL + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + usm_user_info = root.findall("snmp/usmUsers/usmUser") + if usm_user_info: + for tmp in usm_user_info: + tmp_dict = dict() + tmp_dict["remoteEngineID"] = None + for site in tmp: + if site.tag in ["userName", "remoteEngineID", "engineID", "groupName", "authProtocol", + "authKey", "privProtocol", "privKey", "aclNumber"]: + tmp_dict[site.tag] = site.text + + result["usm_user_info"].append(tmp_dict) + + cur_cfg = dict() + if usm_user_name: + cur_cfg["userName"] = usm_user_name + if user_group: + cur_cfg["groupName"] = user_group + if auth_protocol: + cur_cfg["authProtocol"] = auth_protocol + if auth_key: + cur_cfg["authKey"] = auth_key + if priv_protocol: + cur_cfg["privProtocol"] = priv_protocol + if priv_key: + cur_cfg["privKey"] = priv_key + if acl_number: + cur_cfg["aclNumber"] = acl_number + + if remote_engine_id: + cur_cfg["engineID"] = remote_engine_id + cur_cfg["remoteEngineID"] = "true" + else: + cur_cfg["engineID"] = self.local_engine_id + cur_cfg["remoteEngineID"] = "false" + + if result["usm_user_info"]: + num = 0 + for tmp in result["usm_user_info"]: + if cur_cfg == tmp: + num += 1 + + if num == 0: + if state == "present": + need_cfg = True + else: + need_cfg = False + else: + if state == "present": + need_cfg = False + else: + need_cfg = True + + else: + if state == "present": + need_cfg = True + else: + need_cfg = False + + result["need_cfg"] = need_cfg + return result + + def check_snmp_v3_local_user_args(self, **kwargs): + """ Check snmp v3 local user invalid args """ + + module = kwargs["module"] + result = dict() + + need_cfg = False + state = module.params['state'] + local_user_name = module.params['aaa_local_user'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + usm_user_name = module.params['usm_user_name'] + + if local_user_name: + + if usm_user_name: + module.fail_json( + msg='Error: Please do not input usm_user_name and local_user_name at the same time.') + + if not auth_protocol or not auth_key or not priv_protocol or not priv_key: + module.fail_json( + msg='Error: Please input auth_protocol auth_key priv_protocol priv_key for local user.') + + if len(local_user_name) > 32 or len(local_user_name) == 0: + module.fail_json( + msg='Error: The length of local_user_name %s is out of [1 - 32].' % local_user_name) + + if len(auth_key) > 255 or len(auth_key) == 0: + module.fail_json( + msg='Error: The length of auth_key %s is out of [1 - 255].' % auth_key) + + if len(priv_key) > 255 or len(priv_key) == 0: + module.fail_json( + msg='Error: The length of priv_key %s is out of [1 - 255].' % priv_key) + + result["local_user_info"] = [] + + conf_str = CE_GET_SNMP_V3_LOCAL_USER + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + + if "" in recv_xml: + if state == "present": + need_cfg = True + + else: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + local_user_info = root.findall( + "snmp/localUsers/localUser") + if local_user_info: + for tmp in local_user_info: + tmp_dict = dict() + for site in tmp: + if site.tag in ["userName", "authProtocol", "authKey", "privProtocol", "privKey"]: + tmp_dict[site.tag] = site.text + + result["local_user_info"].append(tmp_dict) + + if result["local_user_info"]: + for tmp in result["local_user_info"]: + if "userName" in tmp.keys(): + if state == "present": + if tmp["userName"] != local_user_name: + need_cfg = True + else: + if tmp["userName"] == local_user_name: + need_cfg = True + if auth_protocol: + if "authProtocol" in tmp.keys(): + if state == "present": + if tmp["authProtocol"] != auth_protocol: + need_cfg = True + else: + if tmp["authProtocol"] == auth_protocol: + need_cfg = True + if auth_key: + if "authKey" in tmp.keys(): + if state == "present": + if tmp["authKey"] != auth_key: + need_cfg = True + else: + if tmp["authKey"] == auth_key: + need_cfg = True + if priv_protocol: + if "privProtocol" in tmp.keys(): + if state == "present": + if tmp["privProtocol"] != priv_protocol: + need_cfg = True + else: + if tmp["privProtocol"] == priv_protocol: + need_cfg = True + if priv_key: + if "privKey" in tmp.keys(): + if state == "present": + if tmp["privKey"] != priv_key: + need_cfg = True + else: + if tmp["privKey"] == priv_key: + need_cfg = True + + result["need_cfg"] = need_cfg + return result + + def merge_snmp_v3_usm_user(self, **kwargs): + """ Merge snmp v3 usm user operation """ + + module = kwargs["module"] + usm_user_name = module.params['usm_user_name'] + remote_engine_id = module.params['remote_engine_id'] + acl_number = module.params['acl_number'] + user_group = module.params['user_group'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + cmds = [] + + if remote_engine_id: + conf_str = CE_MERGE_SNMP_V3_USM_USER_HEADER % ( + usm_user_name, "true", remote_engine_id) + cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % ( + remote_engine_id, usm_user_name) + else: + if not self.local_engine_id: + module.fail_json( + msg='Error: The local engine id is null, please input remote_engine_id.') + + conf_str = CE_MERGE_SNMP_V3_USM_USER_HEADER % ( + usm_user_name, "false", self.local_engine_id) + cmd = "snmp-agent usm-user v3 %s" % usm_user_name + + if user_group: + conf_str += "%s" % user_group + cmd += " %s" % user_group + + if acl_number: + conf_str += "%s" % acl_number + cmd += " acl %s" % acl_number + + cmds.append(cmd) + + if remote_engine_id: + cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % ( + remote_engine_id, usm_user_name) + else: + cmd = "snmp-agent usm-user v3 %s" % usm_user_name + + if auth_protocol: + conf_str += "%s" % auth_protocol + + if auth_protocol != "noAuth": + cmd += " authentication-mode %s" % auth_protocol + + if auth_key: + conf_str += "%s" % auth_key + + if auth_protocol != "noAuth": + cmd += " cipher %s" % "******" + if auth_protocol or auth_key: + cmds.append(cmd) + + if remote_engine_id: + cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % ( + remote_engine_id, usm_user_name) + else: + cmd = "snmp-agent usm-user v3 %s" % usm_user_name + + if priv_protocol: + conf_str += "%s" % priv_protocol + + if auth_protocol != "noAuth" and priv_protocol != "noPriv": + cmd += " privacy-mode %s" % priv_protocol + + if priv_key: + conf_str += "%s" % priv_key + + if auth_protocol != "noAuth" and priv_protocol != "noPriv": + cmd += " cipher %s" % "******" + if priv_key or priv_protocol: + cmds.append(cmd) + + conf_str += CE_MERGE_SNMP_V3_USM_USER_TAIL + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge snmp v3 usm user failed.') + + return cmds + + def create_snmp_v3_usm_user(self, **kwargs): + """ Create snmp v3 usm user operation """ + + module = kwargs["module"] + usm_user_name = module.params['usm_user_name'] + remote_engine_id = module.params['remote_engine_id'] + acl_number = module.params['acl_number'] + user_group = module.params['user_group'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + cmds = [] + + if remote_engine_id: + conf_str = CE_CREATE_SNMP_V3_USM_USER_HEADER % ( + usm_user_name, "true", remote_engine_id) + cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % ( + remote_engine_id, usm_user_name) + else: + if not self.local_engine_id: + module.fail_json( + msg='Error: The local engine id is null, please input remote_engine_id.') + + conf_str = CE_CREATE_SNMP_V3_USM_USER_HEADER % ( + usm_user_name, "false", self.local_engine_id) + cmd = "snmp-agent usm-user v3 %s" % usm_user_name + + if user_group: + conf_str += "%s" % user_group + cmd += " %s" % user_group + + if acl_number: + conf_str += "%s" % acl_number + cmd += " acl %s" % acl_number + cmds.append(cmd) + + if remote_engine_id: + cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % ( + remote_engine_id, usm_user_name) + else: + cmd = "snmp-agent usm-user v3 %s" % usm_user_name + + if auth_protocol: + conf_str += "%s" % auth_protocol + + if auth_protocol != "noAuth": + cmd += " authentication-mode %s" % auth_protocol + + if auth_key: + conf_str += "%s" % auth_key + + if auth_protocol != "noAuth": + cmd += " cipher %s" % "******" + + if auth_key or auth_protocol: + cmds.append(cmd) + + if remote_engine_id: + cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % ( + remote_engine_id, usm_user_name) + else: + cmd = "snmp-agent usm-user v3 %s" % usm_user_name + + if priv_protocol: + conf_str += "%s" % priv_protocol + + if auth_protocol != "noAuth" and priv_protocol != "noPriv": + cmd += " privacy-mode %s" % priv_protocol + + if priv_key: + conf_str += "%s" % priv_key + + if auth_protocol != "noAuth" and priv_protocol != "noPriv": + cmd += " cipher %s" % "******" + + if priv_protocol or priv_key: + cmds.append(cmd) + + conf_str += CE_CREATE_SNMP_V3_USM_USER_TAIL + + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create snmp v3 usm user failed.') + + return cmds + + def delete_snmp_v3_usm_user(self, **kwargs): + """ Delete snmp v3 usm user operation """ + + module = kwargs["module"] + usm_user_name = module.params['usm_user_name'] + remote_engine_id = module.params['remote_engine_id'] + acl_number = module.params['acl_number'] + user_group = module.params['user_group'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + if remote_engine_id: + conf_str = CE_DELETE_SNMP_V3_USM_USER_HEADER % ( + usm_user_name, "true", remote_engine_id) + cmd = "undo snmp-agent remote-engineid %s usm-user v3 %s" % ( + remote_engine_id, usm_user_name) + else: + if not self.local_engine_id: + module.fail_json( + msg='Error: The local engine id is null, please input remote_engine_id.') + + conf_str = CE_DELETE_SNMP_V3_USM_USER_HEADER % ( + usm_user_name, "false", self.local_engine_id) + cmd = "undo snmp-agent usm-user v3 %s" % usm_user_name + + if user_group: + conf_str += "%s" % user_group + + if acl_number: + conf_str += "%s" % acl_number + + if auth_protocol: + conf_str += "%s" % auth_protocol + + if auth_key: + conf_str += "%s" % auth_key + + if priv_protocol: + conf_str += "%s" % priv_protocol + + if priv_key: + conf_str += "%s" % priv_key + + conf_str += CE_DELETE_SNMP_V3_USM_USER_TAIL + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete snmp v3 usm user failed.') + + return cmd + + def merge_snmp_v3_local_user(self, **kwargs): + """ Merge snmp v3 local user operation """ + + module = kwargs["module"] + local_user_name = module.params['aaa_local_user'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + conf_str = CE_MERGE_SNMP_V3_LOCAL_USER % ( + local_user_name, auth_protocol, auth_key, priv_protocol, priv_key) + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Merge snmp v3 local user failed.') + + cmd = "snmp-agent local-user v3 %s " % local_user_name + "authentication-mode %s " % auth_protocol + \ + "cipher ****** " + "privacy-mode %s " % priv_protocol + "cipher ******" + + return cmd + + def create_snmp_v3_local_user(self, **kwargs): + """ Create snmp v3 local user operation """ + + module = kwargs["module"] + local_user_name = module.params['aaa_local_user'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + conf_str = CE_CREATE_SNMP_V3_LOCAL_USER % ( + local_user_name, auth_protocol, auth_key, priv_protocol, priv_key) + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Create snmp v3 local user failed.') + + cmd = "snmp-agent local-user v3 %s " % local_user_name + "authentication-mode %s " % auth_protocol + \ + "cipher ****** " + "privacy-mode %s " % priv_protocol + "cipher ******" + + return cmd + + def delete_snmp_v3_local_user(self, **kwargs): + """ Delete snmp v3 local user operation """ + + module = kwargs["module"] + local_user_name = module.params['aaa_local_user'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + + conf_str = CE_DELETE_SNMP_V3_LOCAL_USER % ( + local_user_name, auth_protocol, auth_key, priv_protocol, priv_key) + recv_xml = self.netconf_set_config(module=module, conf_str=conf_str) + + if "" not in recv_xml: + module.fail_json(msg='Error: Delete snmp v3 local user failed.') + + cmd = "undo snmp-agent local-user v3 %s" % local_user_name + + return cmd + + def get_snmp_local_engine(self, **kwargs): + """ Get snmp local engine operation """ + + module = kwargs["module"] + + conf_str = GET_SNMP_LOCAL_ENGINE + recv_xml = self.netconf_get_config(module=module, conf_str=conf_str) + if "" in recv_xml: + xml_str = recv_xml.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + local_engine_info = root.findall("snmp/engine/engineID") + if local_engine_info: + self.local_engine_id = local_engine_info[0].text + + +def main(): + """ Module main function """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + acl_number=dict(type='str'), + usm_user_name=dict(type='str'), + remote_engine_id=dict(type='str'), + user_group=dict(type='str'), + auth_protocol=dict(choices=['noAuth', 'md5', 'sha']), + auth_key=dict(type='str', no_log=True), + priv_protocol=dict( + choices=['noPriv', 'des56', '3des168', 'aes128', 'aes192', 'aes256']), + priv_key=dict(type='str', no_log=True), + aaa_local_user=dict(type='str') + ) + + mutually_exclusive = [("usm_user_name", "local_user_name")] + argument_spec.update(ce_argument_spec) + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True + ) + + changed = False + proposed = dict() + existing = dict() + end_state = dict() + updates = [] + + state = module.params['state'] + acl_number = module.params['acl_number'] + usm_user_name = module.params['usm_user_name'] + remote_engine_id = module.params['remote_engine_id'] + user_group = module.params['user_group'] + auth_protocol = module.params['auth_protocol'] + auth_key = module.params['auth_key'] + priv_protocol = module.params['priv_protocol'] + priv_key = module.params['priv_key'] + aaa_local_user = module.params['aaa_local_user'] + + snmp_user_obj = SnmpUser() + + if not snmp_user_obj: + module.fail_json(msg='Error: Init module failed.') + + # get proposed + proposed["state"] = state + if acl_number: + proposed["acl_number"] = acl_number + if usm_user_name: + proposed["usm_user_name"] = usm_user_name + if remote_engine_id: + proposed["remote_engine_id"] = remote_engine_id + if user_group: + proposed["user_group"] = user_group + if auth_protocol: + proposed["auth_protocol"] = auth_protocol + if auth_key: + proposed["auth_key"] = auth_key + if priv_protocol: + proposed["priv_protocol"] = priv_protocol + if priv_key: + proposed["priv_key"] = priv_key + if aaa_local_user: + proposed["aaa_local_user"] = aaa_local_user + + snmp_user_obj.get_snmp_local_engine(module=module) + snmp_v3_usm_user_rst = snmp_user_obj.check_snmp_v3_usm_user_args( + module=module) + snmp_v3_local_user_rst = snmp_user_obj.check_snmp_v3_local_user_args( + module=module) + + # state exist snmp v3 user config + exist_tmp = dict() + for item in snmp_v3_usm_user_rst: + if item != "need_cfg": + exist_tmp[item] = snmp_v3_usm_user_rst[item] + if exist_tmp: + existing["snmp usm user"] = exist_tmp + + exist_tmp = dict() + for item in snmp_v3_local_user_rst: + if item != "need_cfg": + exist_tmp[item] = snmp_v3_local_user_rst[item] + if exist_tmp: + existing["snmp local user"] = exist_tmp + + if state == "present": + if snmp_v3_usm_user_rst["need_cfg"]: + if len(snmp_v3_usm_user_rst["usm_user_info"]) != 0: + cmd = snmp_user_obj.merge_snmp_v3_usm_user(module=module) + changed = True + updates.append(cmd) + else: + cmd = snmp_user_obj.create_snmp_v3_usm_user(module=module) + changed = True + updates.append(cmd) + + if snmp_v3_local_user_rst["need_cfg"]: + if len(snmp_v3_local_user_rst["local_user_info"]) != 0: + cmd = snmp_user_obj.merge_snmp_v3_local_user( + module=module) + changed = True + updates.append(cmd) + else: + cmd = snmp_user_obj.create_snmp_v3_local_user( + module=module) + changed = True + updates.append(cmd) + + else: + if snmp_v3_usm_user_rst["need_cfg"]: + cmd = snmp_user_obj.delete_snmp_v3_usm_user(module=module) + changed = True + updates.append(cmd) + if snmp_v3_local_user_rst["need_cfg"]: + cmd = snmp_user_obj.delete_snmp_v3_local_user(module=module) + changed = True + updates.append(cmd) + + # state exist snmp v3 user config + snmp_v3_usm_user_rst = snmp_user_obj.check_snmp_v3_usm_user_args( + module=module) + end_tmp = dict() + for item in snmp_v3_usm_user_rst: + if item != "need_cfg": + end_tmp[item] = snmp_v3_usm_user_rst[item] + if end_tmp: + end_state["snmp usm user"] = end_tmp + + snmp_v3_local_user_rst = snmp_user_obj.check_snmp_v3_local_user_args( + module=module) + end_tmp = dict() + for item in snmp_v3_local_user_rst: + if item != "need_cfg": + end_tmp[item] = snmp_v3_local_user_rst[item] + if end_tmp: + end_state["snmp local user"] = end_tmp + + results = dict() + results['proposed'] = proposed + results['existing'] = existing + results['changed'] = changed + results['end_state'] = end_state + results['updates'] = updates + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_startup.py b/plugins/modules/network/cloudengine/ce_startup.py new file mode 100644 index 0000000000..4311b68bce --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_startup.py @@ -0,0 +1,469 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_startup +short_description: Manages a system startup information on HUAWEI CloudEngine switches. +description: + - Manages a system startup information on HUAWEI CloudEngine switches. +author: + - Li Yanfeng (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + cfg_file: + description: + - Name of the configuration file that is applied for the next startup. + The value is a string of 5 to 255 characters. + default: present + software_file: + description: + - File name of the system software that is applied for the next startup. + The value is a string of 5 to 255 characters. + patch_file: + description: + - Name of the patch file that is applied for the next startup. + slot: + description: + - Position of the device.The value is a string of 1 to 32 characters. + The possible value of slot is all, slave-board, or the specific slotID. + action: + description: + - Display the startup information. + choices: ['display'] + +''' + +EXAMPLES = ''' +- name: startup module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Display startup information + ce_startup: + action: display + provider: "{{ cli }}" + + - name: Set startup patch file + ce_startup: + patch_file: 2.PAT + slot: all + provider: "{{ cli }}" + + - name: Set startup software file + ce_startup: + software_file: aa.cc + slot: 1 + provider: "{{ cli }}" + + - name: Set startup cfg file + ce_startup: + cfg_file: 2.cfg + slot: 1 + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"patch_file": "2.PAT", + "slot": "all"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: { + "configSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc", + "curentPatchFile": "NULL", + "curentStartupFile": "NULL", + "curentSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc", + "nextPatchFile": "flash:/1.PAT", + "nextStartupFile": "flash:/1.cfg", + "nextSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc", + "position": "5" + } +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"StartupInfos": null} +updates: + description: command sent to the device + returned: always + type: list + sample: {"startup patch 2.PAT all"} +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, run_commands +from ansible.module_utils.connection import exec_command + + +class StartUp(object): + """ + Manages system startup information. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.cfg_file = self.module.params['cfg_file'] + self.software_file = self.module.params['software_file'] + self.patch_file = self.module.params['patch_file'] + self.slot = self.module.params['slot'] + self.action = self.module.params['action'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + # system startup info + self.startup_info = None + + def init_module(self): + """ init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_startup_dict(self): + """Retrieves the current config from the device or cache + """ + cmd = 'display startup' + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + startup_info = dict() + startup_info["StartupInfos"] = list() + if not cfg: + return startup_info + else: + re_find = re.findall(r'(.*)\s*' + r'\s*Configured\s*startup\s*system\s*software:\s*(.*)' + r'\s*Startup\s*system\s*software:\s*(.*)' + r'\s*Next\s*startup\s*system\s*software:\s*(.*)' + r'\s*Startup\s*saved-configuration\s*file:\s*(.*)' + r'\s*Next\s*startup\s*saved-configuration\s*file:\s*(.*)' + r'\s*Startup\s*paf\s*file:\s*(.*)' + r'\s*Next\s*startup\s*paf\s*file:\s*(.*)' + r'\s*Startup\s*patch\s*package:\s*(.*)' + r'\s*Next\s*startup\s*patch\s*package:\s*(.*)', cfg) + + if re_find: + for mem in re_find: + startup_info["StartupInfos"].append( + dict(nextStartupFile=mem[5], configSysSoft=mem[1], curentSysSoft=mem[2], + nextSysSoft=mem[3], curentStartupFile=mem[4], curentPatchFile=mem[8], + nextPatchFile=mem[9], postion=mem[0])) + return startup_info + return startup_info + + def get_cfg_filename_type(self, filename): + """Gets the type of cfg filename, such as cfg, zip, dat...""" + + if filename is None: + return None + if ' ' in filename: + self.module.fail_json( + msg='Error: Configuration file name include spaces.') + + iftype = None + + if filename.endswith('.cfg'): + iftype = 'cfg' + elif filename.endswith('.zip'): + iftype = 'zip' + elif filename.endswith('.dat'): + iftype = 'dat' + else: + return None + return iftype.lower() + + def get_pat_filename_type(self, filename): + """Gets the type of patch filename, such as cfg, zip, dat...""" + + if filename is None: + return None + if ' ' in filename: + self.module.fail_json( + msg='Error: Patch file name include spaces.') + + iftype = None + + if filename.endswith('.PAT'): + iftype = 'PAT' + else: + return None + return iftype.upper() + + def get_software_filename_type(self, filename): + """Gets the type of software filename, such as cfg, zip, dat...""" + + if filename is None: + return None + if ' ' in filename: + self.module.fail_json( + msg='Error: Software file name include spaces.') + + iftype = None + + if filename.endswith('.cc'): + iftype = 'cc' + else: + return None + return iftype.lower() + + def startup_next_cfg_file(self): + """set next cfg file""" + commands = list() + cmd = {'output': None, 'command': ''} + if self.slot: + cmd['command'] = "startup saved-configuration %s slot %s" % ( + self.cfg_file, self.slot) + commands.append(cmd) + self.updates_cmd.append( + "startup saved-configuration %s slot %s" % (self.cfg_file, self.slot)) + run_commands(self.module, commands) + self.changed = True + else: + cmd['command'] = "startup saved-configuration %s" % self.cfg_file + commands.append(cmd) + self.updates_cmd.append( + "startup saved-configuration %s" % self.cfg_file) + run_commands(self.module, commands) + self.changed = True + + def startup_next_software_file(self): + """set next software file""" + commands = list() + cmd = {'output': None, 'command': ''} + if self.slot: + if self.slot == "all" or self.slot == "slave-board": + cmd['command'] = "startup system-software %s %s" % ( + self.software_file, self.slot) + commands.append(cmd) + self.updates_cmd.append( + "startup system-software %s %s" % (self.software_file, self.slot)) + run_commands(self.module, commands) + self.changed = True + else: + cmd['command'] = "startup system-software %s slot %s" % ( + self.software_file, self.slot) + commands.append(cmd) + self.updates_cmd.append( + "startup system-software %s slot %s" % (self.software_file, self.slot)) + run_commands(self.module, commands) + self.changed = True + + if not self.slot: + cmd['command'] = "startup system-software %s" % self.software_file + commands.append(cmd) + self.updates_cmd.append( + "startup system-software %s" % self.software_file) + run_commands(self.module, commands) + self.changed = True + + def startup_next_pat_file(self): + """set next patch file""" + + commands = list() + cmd = {'output': None, 'command': ''} + if self.slot: + if self.slot == "all": + cmd['command'] = "startup patch %s %s" % ( + self.patch_file, self.slot) + commands.append(cmd) + self.updates_cmd.append( + "startup patch %s %s" % (self.patch_file, self.slot)) + run_commands(self.module, commands) + self.changed = True + else: + cmd['command'] = "startup patch %s slot %s" % ( + self.patch_file, self.slot) + commands.append(cmd) + self.updates_cmd.append( + "startup patch %s slot %s" % (self.patch_file, self.slot)) + run_commands(self.module, commands) + self.changed = True + + if not self.slot: + cmd['command'] = "startup patch %s" % self.patch_file + commands.append(cmd) + self.updates_cmd.append( + "startup patch %s" % self.patch_file) + run_commands(self.module, commands) + self.changed = True + + def check_params(self): + """Check all input params""" + + # cfg_file check + if self.cfg_file: + if not self.get_cfg_filename_type(self.cfg_file): + self.module.fail_json( + msg='Error: Invalid cfg file name or cfg file name extension ( *.cfg, *.zip, *.dat ).') + + # software_file check + if self.software_file: + if not self.get_software_filename_type(self.software_file): + self.module.fail_json( + msg='Error: Invalid software file name or software file name extension ( *.cc).') + + # patch_file check + if self.patch_file: + if not self.get_pat_filename_type(self.patch_file): + self.module.fail_json( + msg='Error: Invalid patch file name or patch file name extension ( *.PAT ).') + + # slot check + if self.slot: + if self.slot.isdigit(): + if int(self.slot) <= 0 or int(self.slot) > 16: + self.module.fail_json( + msg='Error: The number of slot is not in the range from 1 to 16.') + else: + if len(self.slot) <= 0 or len(self.slot) > 32: + self.module.fail_json( + msg='Error: The length of slot is not in the range from 1 to 32.') + + def get_proposed(self): + """get proposed info""" + + if self.cfg_file: + self.proposed["cfg_file"] = self.cfg_file + if self.software_file: + self.proposed["system_file"] = self.software_file + if self.patch_file: + self.proposed["patch_file"] = self.patch_file + if self.slot: + self.proposed["slot"] = self.slot + + def get_existing(self): + """get existing info""" + + if not self.startup_info: + self.existing["StartupInfos"] = None + else: + self.existing["StartupInfos"] = self.startup_info["StartupInfos"] + + def get_end_state(self): + """get end state info""" + if not self.startup_info: + self.end_state["StartupInfos"] = None + else: + self.end_state["StartupInfos"] = self.startup_info["StartupInfos"] + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.get_proposed() + + self.startup_info = self.get_startup_dict() + self.get_existing() + + startup_info = self.startup_info["StartupInfos"][0] + if self.cfg_file: + if self.cfg_file != startup_info["nextStartupFile"]: + self.startup_next_cfg_file() + + if self.software_file: + if self.software_file != startup_info["nextSysSoft"]: + self.startup_next_software_file() + if self.patch_file: + if self.patch_file != startup_info["nextPatchFile"]: + self.startup_next_pat_file() + if self.action == "display": + self.startup_info = self.get_startup_dict() + + self.startup_info = self.get_startup_dict() + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + cfg_file=dict(type='str'), + software_file=dict(type='str'), + patch_file=dict(type='str'), + slot=dict(type='str'), + action=dict(type='str', choices=['display']) + ) + argument_spec.update(ce_argument_spec) + module = StartUp(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_static_route.py b/plugins/modules/network/cloudengine/ce_static_route.py new file mode 100644 index 0000000000..7a8ea88206 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_static_route.py @@ -0,0 +1,833 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_static_route +short_description: Manages static route configuration on HUAWEI CloudEngine switches. +description: + - Manages the static routes on HUAWEI CloudEngine switches. +author: Yang yang (@QijunPan) +notes: + - If no vrf is supplied, vrf is set to default. + - If I(state=absent), the route will be removed, regardless of the non-required parameters. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + prefix: + description: + - Destination ip address of static route. + required: true + mask: + description: + - Destination ip mask of static route. + required: true + aftype: + description: + - Destination ip address family type of static route. + required: true + choices: ['v4','v6'] + next_hop: + description: + - Next hop address of static route. + nhp_interface: + description: + - Next hop interface full name of static route. + vrf: + description: + - VPN instance of destination ip address. + destvrf: + description: + - VPN instance of next hop ip address. + tag: + description: + - Route tag value (numeric). + description: + description: + - Name of the route. Used with the name parameter on the CLI. + pref: + description: + - Preference or administrative difference of route (range 1-255). + state: + description: + - Specify desired state of the resource. + choices: ['present','absent'] + default: present +''' + +EXAMPLES = ''' +- name: static route module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Config a ipv4 static route, next hop is an address and that it has the proper description + ce_static_route: + prefix: 2.1.1.2 + mask: 24 + next_hop: 3.1.1.2 + description: 'Configured by Ansible' + aftype: v4 + provider: "{{ cli }}" + - name: Config a ipv4 static route ,next hop is an interface and that it has the proper description + ce_static_route: + prefix: 2.1.1.2 + mask: 24 + next_hop: 10GE1/0/1 + description: 'Configured by Ansible' + aftype: v4 + provider: "{{ cli }}" + - name: Config a ipv6 static route, next hop is an address and that it has the proper description + ce_static_route: + prefix: fc00:0:0:2001::1 + mask: 64 + next_hop: fc00:0:0:2004::1 + description: 'Configured by Ansible' + aftype: v6 + provider: "{{ cli }}" + - name: Config a ipv4 static route, next hop is an interface and that it has the proper description + ce_static_route: + prefix: fc00:0:0:2001::1 + mask: 64 + next_hop: 10GE1/0/1 + description: 'Configured by Ansible' + aftype: v6 + provider: "{{ cli }}" + - name: Config a VRF and set ipv4 static route, next hop is an address and that it has the proper description + ce_static_route: + vrf: vpna + prefix: 2.1.1.2 + mask: 24 + next_hop: 3.1.1.2 + description: 'Configured by Ansible' + aftype: v4 + provider: "{{ cli }}" +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"next_hop": "3.3.3.3", "pref": "100", + "prefix": "192.168.20.642", "mask": "24", "description": "testing", + "vrf": "_public_"} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: {"next_hop": "3.3.3.3", "pref": "100", + "prefix": "192.168.20.0", "mask": "24", "description": "testing", + "tag" : "null"} +updates: + description: command list sent to the device + returned: always + type: list + sample: ["ip route-static 192.168.20.0 255.255.255.0 3.3.3.3 preference 100 description testing"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_STATIC_ROUTE = """ + + + + + + + + + + + + + + + + + + + + + +""" + +CE_NC_GET_STATIC_ROUTE_ABSENT = """ + + + + + + + + + + + + + + + + + + +""" + +CE_NC_SET_STATIC_ROUTE = """ + + + + + %s + %s + base + %s + %s + %s + %s + %s%s%s%s + + + + +""" +CE_NC_SET_DESCRIPTION = """ +%s +""" + +CE_NC_SET_PREFERENCE = """ +%s +""" + +CE_NC_SET_TAG = """ +%s +""" + +CE_NC_DELETE_STATIC_ROUTE = """ + + + + + %s + %s + base + %s + %s + %s + %s + %s + + + + +""" + + +def build_config_xml(xmlstr): + """build config xml""" + + return ' ' + xmlstr + ' ' + + +def is_valid_v4addr(addr): + """check if ipv4 addr is valid""" + if addr.find('.') != -1: + addr_list = addr.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + return False + + +def is_valid_v6addr(addr): + """check if ipv6 addr is valid""" + if addr.find(':') != -1: + addr_list = addr.split(':') + # The IPv6 binary system has a length of 128 bits and is grouped by 16 bits. + # Each group is separated by a colon ":" and can be divided into 8 groups, each group being represented by 4 hexadecimal + if len(addr_list) > 8: + return False + # You can use a double colon "::" to represent a group of 0 or more consecutive 0s, but only once. + if addr.count('::') > 1: + return False + # if do not use '::', the length of address should not be less than 8. + if addr.count('::') == 0 and len(addr_list) < 8: + return False + for group in addr_list: + if group.strip() == '': + continue + try: + # Each group is represented in 4-digit hexadecimal + int(group, base=16) + except ValueError: + return False + return True + return False + + +def is_valid_tag(tag): + """check if the tag is valid""" + + if not tag.isdigit(): + return False + + if int(tag) < 1 or int(tag) > 4294967295: + return False + + return True + + +def is_valid_preference(pref): + """check if the preference is valid""" + if pref.isdigit(): + return int(pref) > 0 and int(pref) < 256 + else: + return False + + +def is_valid_description(description): + """check if the description is valid""" + if description.find('?') != -1: + return False + if len(description) < 1 or len(description) > 255: + return False + return True + + +class StaticRoute(object): + """static route module""" + + def __init__(self, argument_spec, ): + self.spec = argument_spec + self.module = None + self.init_module() + + # static route info + self.prefix = self.module.params['prefix'] + self.mask = self.module.params['mask'] + self.aftype = self.module.params['aftype'] + self.next_hop = self.module.params['next_hop'] + self.nhp_interface = self.module.params['nhp_interface'] + if self.nhp_interface is None: + self.nhp_interface = "Invalid0" + self.tag = self.module.params['tag'] + self.description = self.module.params['description'] + self.state = self.module.params['state'] + self.pref = self.module.params['pref'] + + # vpn instance info + self.vrf = self.module.params['vrf'] + if self.vrf is None: + self.vrf = "_public_" + self.destvrf = self.module.params['destvrf'] + if self.destvrf is None: + self.destvrf = "_public_" + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + self.static_routes_info = dict() + + def init_module(self): + """init module""" + + required_one_of = [["next_hop", "nhp_interface"]] + self.module = AnsibleModule( + argument_spec=self.spec, required_one_of=required_one_of, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def convert_len_to_mask(self, masklen): + """convert mask length to ip address mask, i.e. 24 to 255.255.255.0""" + + mask_int = ["0"] * 4 + length = int(masklen) + + if length > 32: + self.module.fail_json(msg='IPv4 ipaddress mask length is invalid') + if length < 8: + mask_int[0] = str(int((0xFF << (8 - length % 8)) & 0xFF)) + if length >= 8: + mask_int[0] = '255' + mask_int[1] = str(int((0xFF << (16 - (length % 16))) & 0xFF)) + if length >= 16: + mask_int[1] = '255' + mask_int[2] = str(int((0xFF << (24 - (length % 24))) & 0xFF)) + if length >= 24: + mask_int[2] = '255' + mask_int[3] = str(int((0xFF << (32 - (length % 32))) & 0xFF)) + if length == 32: + mask_int[3] = '255' + + return '.'.join(mask_int) + + def convert_ip_prefix(self): + """convert prefix to real value i.e. 2.2.2.2/24 to 2.2.2.0/24""" + if self.aftype == "v4": + if self.prefix.find('.') == -1: + return False + if self.mask == '32': + return True + if self.mask == '0': + self.prefix = '0.0.0.0' + return True + addr_list = self.prefix.split('.') + length = len(addr_list) + if length > 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + byte_len = 8 + ip_len = int(self.mask) // byte_len + ip_bit = int(self.mask) % byte_len + else: + if self.prefix.find(':') == -1: + return False + if self.mask == '128': + return True + if self.mask == '0': + self.prefix = '::' + return True + addr_list = self.prefix.split(':') + length = len(addr_list) + if length > 6: + return False + byte_len = 16 + ip_len = int(self.mask) // byte_len + ip_bit = int(self.mask) % byte_len + + if self.aftype == "v4": + for i in range(ip_len + 1, length): + addr_list[i] = 0 + else: + for i in range(length - ip_len, length): + addr_list[i] = 0 + for j in range(0, byte_len - ip_bit): + if self.aftype == "v4": + addr_list[ip_len] = int(addr_list[ip_len]) & (0 << j) + else: + if addr_list[length - ip_len - 1] == "": + continue + addr_list[length - ip_len - + 1] = '0x%s' % addr_list[length - ip_len - 1] + addr_list[length - ip_len - + 1] = int(addr_list[length - ip_len - 1], 16) & (0 << j) + + if self.aftype == "v4": + self.prefix = '%s.%s.%s.%s' % (addr_list[0], addr_list[1], addr_list[2], addr_list[3]) + return True + else: + ipv6_addr_str = "" + for num in range(0, length - ip_len): + ipv6_addr_str += '%s:' % addr_list[num] + self.prefix = ipv6_addr_str + return True + + def set_update_cmd(self): + """set update command""" + if not self.changed: + return + if self.aftype == "v4": + aftype = "ip" + maskstr = self.convert_len_to_mask(self.mask) + else: + aftype = "ipv6" + maskstr = self.mask + if self.next_hop is None: + next_hop = '' + else: + next_hop = self.next_hop + if self.vrf == "_public_": + vrf = '' + else: + vrf = self.vrf + if self.destvrf == "_public_": + destvrf = '' + else: + destvrf = self.destvrf + if self.nhp_interface == "Invalid0": + nhp_interface = '' + else: + nhp_interface = self.nhp_interface + if self.state == "present": + if self.vrf != "_public_": + if self.destvrf != "_public_": + self.updates_cmd.append('%s route-static vpn-instance %s %s %s vpn-instance %s %s' + % (aftype, vrf, self.prefix, maskstr, destvrf, next_hop)) + else: + self.updates_cmd.append('%s route-static vpn-instance %s %s %s %s %s' + % (aftype, vrf, self.prefix, maskstr, nhp_interface, next_hop)) + elif self.destvrf != "_public_": + self.updates_cmd.append('%s route-static %s %s vpn-instance %s %s' + % (aftype, self.prefix, maskstr, self.destvrf, next_hop)) + else: + self.updates_cmd.append('%s route-static %s %s %s %s' + % (aftype, self.prefix, maskstr, nhp_interface, next_hop)) + if self.pref: + self.updates_cmd[0] += ' preference %s' % (self.pref) + if self.tag: + self.updates_cmd[0] += ' tag %s' % (self.tag) + if self.description: + self.updates_cmd[0] += ' description %s' % (self.description) + + if self.state == "absent": + if self.vrf != "_public_": + if self.destvrf != "_public_": + self.updates_cmd.append('undo %s route-static vpn-instance %s %s %s vpn-instance %s %s' + % (aftype, vrf, self.prefix, maskstr, destvrf, next_hop)) + else: + self.updates_cmd.append('undo %s route-static vpn-instance %s %s %s %s %s' + % (aftype, vrf, self.prefix, maskstr, nhp_interface, next_hop)) + elif self.destvrf != "_public_": + self.updates_cmd.append('undo %s route-static %s %s vpn-instance %s %s' + % (aftype, self.prefix, maskstr, self.destvrf, self.next_hop)) + else: + self.updates_cmd.append('undo %s route-static %s %s %s %s' + % (aftype, self.prefix, maskstr, nhp_interface, next_hop)) + + def operate_static_route(self, version, prefix, mask, nhp_interface, next_hop, vrf, destvrf, state): + """operate ipv4 static route""" + + description_xml = """\n""" + preference_xml = """\n""" + tag_xml = """\n""" + if next_hop is None: + next_hop = '0.0.0.0' + if nhp_interface is None: + nhp_interface = "Invalid0" + + if vrf is None: + vpn_instance = "_public_" + else: + vpn_instance = vrf + + if destvrf is None: + dest_vpn_instance = "_public_" + else: + dest_vpn_instance = destvrf + if self.description: + description_xml = CE_NC_SET_DESCRIPTION % self.description + if self.pref: + preference_xml = CE_NC_SET_PREFERENCE % self.pref + if self.tag: + tag_xml = CE_NC_SET_TAG % self.tag + + if state == "present": + configxmlstr = CE_NC_SET_STATIC_ROUTE % ( + vpn_instance, version, prefix, mask, nhp_interface, + dest_vpn_instance, next_hop, description_xml, preference_xml, tag_xml) + else: + configxmlstr = CE_NC_DELETE_STATIC_ROUTE % ( + vpn_instance, version, prefix, mask, nhp_interface, dest_vpn_instance, next_hop) + + conf_str = build_config_xml(configxmlstr) + + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "OPERATE_STATIC_ROUTE") + + def get_static_route(self, state): + """get ipv4 static route""" + + self.static_routes_info["sroute"] = list() + + if state == 'absent': + getxmlstr = CE_NC_GET_STATIC_ROUTE_ABSENT + else: + getxmlstr = CE_NC_GET_STATIC_ROUTE + + xml_str = get_nc_config(self.module, getxmlstr) + + if 'data/' in xml_str: + return + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + static_routes = root.findall( + "staticrt/staticrtbase/srRoutes/srRoute") + + if static_routes: + for static_route in static_routes: + static_info = dict() + for static_ele in static_route: + if static_ele.tag in ["vrfName", "afType", "topologyName", + "prefix", "maskLength", "destVrfName", + "nexthop", "ifName", "preference", "description"]: + static_info[ + static_ele.tag] = static_ele.text + if static_ele.tag == "tag": + if static_ele.text is not None: + static_info["tag"] = static_ele.text + else: + static_info["tag"] = "None" + self.static_routes_info["sroute"].append(static_info) + + def check_params(self): + """check all input params""" + + # check prefix and mask + if not self.mask.isdigit(): + self.module.fail_json(msg='Error: Mask is invalid.') + # ipv4 check + if self.aftype == "v4": + if int(self.mask) > 32 or int(self.mask) < 0: + self.module.fail_json( + msg='Error: Ipv4 mask must be an integer between 1 and 32.') + # next_hop check + if self.next_hop: + if not is_valid_v4addr(self.next_hop): + self.module.fail_json( + msg='Error: The %s is not a valid address' % self.next_hop) + # ipv6 check + if self.aftype == "v6": + if int(self.mask) > 128 or int(self.mask) < 0: + self.module.fail_json( + msg='Error: Ipv6 mask must be an integer between 1 and 128.') + if self.next_hop: + if not is_valid_v6addr(self.next_hop): + self.module.fail_json( + msg='Error: The %s is not a valid address' % self.next_hop) + + # description check + if self.description: + if not is_valid_description(self.description): + self.module.fail_json( + msg='Error: Dsecription length should be 1 - 35, and can not contain "?".') + # tag check + if self.tag: + if not is_valid_tag(self.tag): + self.module.fail_json( + msg='Error: Tag should be integer 1 - 4294967295.') + # preference check + if self.pref: + if not is_valid_preference(self.pref): + self.module.fail_json( + msg='Error: Preference should be integer 1 - 255.') + if self.nhp_interface != "Invalid0" and self.destvrf != "_public_": + self.module.fail_json( + msg='Error: Destination vrf dose no support next hop is interface.') + # convert prefix + if not self.convert_ip_prefix(): + self.module.fail_json( + msg='Error: The %s is not a valid address' % self.prefix) + + def set_ip_static_route(self): + """set ip static route""" + if not self.changed: + return + version = None + if self.aftype == "v4": + version = "ipv4unicast" + else: + version = "ipv6unicast" + self.operate_static_route(version, self.prefix, self.mask, self.nhp_interface, + self.next_hop, self.vrf, self.destvrf, self.state) + + def is_prefix_exist(self, static_route, version): + """is prefix mask nex_thop exist""" + if static_route is None: + return False + if self.next_hop and self.nhp_interface: + return static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["afType"] == version \ + and static_route["ifName"].lower() == self.nhp_interface.lower() \ + and static_route["nexthop"].lower() == self.next_hop.lower() + + if self.next_hop and not self.nhp_interface: + return static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["afType"] == version \ + and static_route["nexthop"].lower() == self.next_hop.lower() + + if not self.next_hop and self.nhp_interface: + return static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["afType"] == version \ + and static_route["ifName"].lower() == self.nhp_interface.lower() + + def get_ip_static_route(self): + """get ip static route""" + + if self.aftype == "v4": + version = "ipv4unicast" + else: + version = "ipv6unicast" + change = False + self.get_static_route(self.state) + if self.state == 'present': + for static_route in self.static_routes_info["sroute"]: + if self.is_prefix_exist(static_route, version): + if self.vrf: + if static_route["vrfName"] != self.vrf: + change = True + if self.tag: + if static_route["tag"] != self.tag: + change = True + if self.destvrf: + if static_route["destVrfName"] != self.destvrf: + change = True + if self.description: + if static_route["description"] != self.description: + change = True + if self.pref: + if static_route["preference"] != self.pref: + change = True + if self.nhp_interface: + if static_route["ifName"].lower() != self.nhp_interface.lower(): + change = True + if self.next_hop: + if static_route["nexthop"].lower() != self.next_hop.lower(): + change = True + return change + else: + continue + change = True + else: + for static_route in self.static_routes_info["sroute"]: + if static_route["nexthop"] and self.next_hop: + if static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["nexthop"].lower() == self.next_hop.lower() \ + and static_route["afType"] == version: + change = True + return change + if static_route["ifName"] and self.nhp_interface: + if static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["ifName"].lower() == self.nhp_interface.lower() \ + and static_route["afType"] == version: + change = True + return change + else: + continue + change = False + return change + + def get_proposed(self): + """get proposed information""" + + self.proposed['prefix'] = self.prefix + self.proposed['mask'] = self.mask + self.proposed['afType'] = self.aftype + self.proposed['next_hop'] = self.next_hop + self.proposed['ifName'] = self.nhp_interface + self.proposed['vrfName'] = self.vrf + self.proposed['destVrfName'] = self.destvrf + if self.tag: + self.proposed['tag'] = self.tag + if self.description: + self.proposed['description'] = self.description + if self.pref is None: + self.proposed['preference'] = 60 + else: + self.proposed['preference'] = self.pref + self.proposed['state'] = self.state + + def get_existing(self): + """get existing information""" + + change = self.get_ip_static_route() + self.existing['sroute'] = self.static_routes_info["sroute"] + self.changed = bool(change) + + def get_end_state(self): + """get end state information""" + + self.get_static_route(self.state) + self.end_state['sroute'] = self.static_routes_info["sroute"] + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + self.set_ip_static_route() + self.set_update_cmd() + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """main""" + + argument_spec = dict( + prefix=dict(required=True, type='str'), + mask=dict(required=True, type='str'), + aftype=dict(choices=['v4', 'v6'], required=True), + next_hop=dict(required=False, type='str'), + nhp_interface=dict(required=False, type='str'), + vrf=dict(required=False, type='str'), + destvrf=dict(required=False, type='str'), + tag=dict(required=False, type='str'), + description=dict(required=False, type='str'), + pref=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], + default='present', required=False), + ) + argument_spec.update(ce_argument_spec) + interface = StaticRoute(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_static_route_bfd.py b/plugins/modules/network/cloudengine/ce_static_route_bfd.py new file mode 100644 index 0000000000..7cd571a2d3 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_static_route_bfd.py @@ -0,0 +1,1596 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_static_route_bfd +short_description: Manages static route configuration on HUAWEI CloudEngine switches. +description: + - Manages the static routes on HUAWEI CloudEngine switches. +author: xuxiaowei0512 (@CloudEngine-Ansible) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. + - If no vrf is supplied, vrf is set to default. + - If I(state=absent), the route configuration will be removed, regardless of the non-required parameters. +options: + prefix: + description: + - Destination ip address of static route. + required: true + type: str + mask: + description: + - Destination ip mask of static route. + type: str + aftype: + description: + - Destination ip address family type of static route. + required: true + type: str + choices: ['v4','v6'] + next_hop: + description: + - Next hop address of static route. + type: str + nhp_interface: + description: + - Next hop interface full name of static route. + type: str + vrf: + description: + - VPN instance of destination ip address. + type: str + destvrf: + description: + - VPN instance of next hop ip address. + type: str + tag: + description: + - Route tag value (numeric). + type: int + description: + description: + - Name of the route. Used with the name parameter on the CLI. + type: str + pref: + description: + - Preference or administrative difference of route (range 1-255). + type: int + function_flag: + description: + - Used to distinguish between command line functions. + required: true + choices: ['globalBFD','singleBFD','dynamicBFD','staticBFD'] + type: str + min_tx_interval: + description: + - Set the minimum BFD session sending interval (range 50-1000). + type: int + min_rx_interval: + description: + - Set the minimum BFD receive interval (range 50-1000). + type: int + detect_multiplier: + description: + - Configure the BFD multiplier (range 3-50). + type: int + bfd_session_name: + description: + - bfd name (range 1-15). + type: str + commands: + description: + - Incoming command line is used to send sys,undo ip route-static default-bfd,commit. + type: list + state: + description: + - Specify desired state of the resource. + required: false + choices: ['present','absent'] + type: str + default: present +''' + +EXAMPLES = ''' + #ip route-static bfd interface-type interface-number nexthop-address [ local-address address ] + #[ min-rx-interval min-rx-interval | min-tx-interval min-tx-interval | detect-multiplier multiplier ] + - name: Config an ip route-static bfd 10GE1/0/1 3.3.3.3 min-rx-interval 50 min-tx-interval 50 detect-multiplier 5 + ce_static_route_bfd: + function_flag: 'singleBFD' + nhp_interface: 10GE1/0/1 + next_hop: 3.3.3.3 + min_tx_interval: 50 + min_rx_interval: 50 + detect_multiplier: 5 + aftype: v4 + state: present + + #undo ip route-static bfd [ interface-type interface-number | vpn-instance vpn-instance-name ] nexthop-address + - name: undo ip route-static bfd 10GE1/0/1 3.3.3.4 + ce_static_route_bfd: + function_flag: 'singleBFD' + nhp_interface: 10GE1/0/1 + next_hop: 3.3.3.4 + aftype: v4 + state: absent + + #ip route-static default-bfd { min-rx-interval {min-rx-interval} | min-tx-interval {min-tx-interval} | detect-multiplier {multiplier}} + - name: Config an ip route-static default-bfd min-rx-interval 50 min-tx-interval 50 detect-multiplier 6 + ce_static_route_bfd: + function_flag: 'globalBFD' + min_tx_interval: 50 + min_rx_interval: 50 + detect_multiplier: 6 + aftype: v4 + state: present + + - name: undo ip route-static default-bfd + ce_static_route_bfd: + function_flag: 'globalBFD' + aftype: v4 + state: absent + commands: 'sys,undo ip route-static default-bfd,commit' + + - name: Config an ipv4 static route 2.2.2.0/24 2.2.2.1 preference 1 tag 2 description test for staticBFD + ce_static_route_bfd: + function_flag: 'staticBFD' + prefix: 2.2.2.2 + mask: 24 + next_hop: 2.2.2.1 + tag: 2 + description: test + pref: 1 + aftype: v4 + bfd_session_name: btoa + state: present +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"function_flag": "staticBFD", "next_hop": "3.3.3.3", "pref": "100", + "prefix": "192.168.20.642", "mask": "24", "description": "testing", + "vrf": "_public_", "bfd_session_name": "btoa"} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: {"function_flag": "", "next_hop": "", "pref": "101", + "prefix": "192.168.20.0", "mask": "24", "description": "testing", + "tag" : "null", "bfd_session_name": "btoa"} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: {"function_flag": "staticBFD", "next_hop": "3.3.3.3", "pref": "100", + "prefix": "192.168.20.0", "mask": "24", "description": "testing", + "tag" : "null", "bfd_session_name": "btoa"} +updates: + description: command list sent to the device + returned: always + type: list + sample: ["ip route-static 192.168.20.0 255.255.255.0 3.3.3.3 preference 100 description testing"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config + +CE_NC_GET_STATIC_ROUTE_BFD_SESSIONNAME = """ + + + + + + + + + + + + + + + + + + + + + + +""" +# bfd enable +CE_NC_GET_STATIC_ROUTE_BFD_ENABLE = """ + + + + + + + + + + + + + + + + + + + + + + +""" + +CE_NC_GET_STATIC_ROUTE_BFD_ABSENT = """ + + + + + + %s + %s + %s + %s + + + + + +""" + +CE_NC_GET_STATIC_ROUTE_BFD = """ + + + + + + %s + %s + %s + %s + + + + + + + + + +""" +CE_NC_GET_STATIC_ROUTE_IPV4_GLOBAL_BFD = """ + + + + + + + + + + + +""" +CE_NC_GET_STATIC_ROUTE_ABSENT = """ + + + + + + + + + + + + + + + + + + +""" + +CE_NC_DELETE_STATIC_ROUTE_SINGLEBFD = """ + + + + + %s + %s + %s + %s + + + + +""" +CE_NC_SET_STATIC_ROUTE_SINGLEBFD = """ + + + + + %s + %s + %s + %s%s%s%s%s + + + + + +""" +CE_NC_SET_STATIC_ROUTE_SINGLEBFD_LOCALADRESS = """ +%s +""" +CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINTX = """ +%s +""" +CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINRX = """ +%s +""" +CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MUL = """ +%s +""" +CE_NC_SET_IPV4_STATIC_ROUTE_GLOBALBFD = """ + + + + %s%s%s + + + +""" + +CE_NC_SET_STATIC_ROUTE = """ + + + + + %s + %s + base + %s + %s + %s + %s + %s%s%s%s%s + + + + +""" +CE_NC_SET_DESCRIPTION = """ +%s +""" + +CE_NC_SET_PREFERENCE = """ +%s +""" + +CE_NC_SET_TAG = """ +%s +""" +CE_NC_SET_BFDSESSIONNAME = """ +%s +""" +CE_NC_SET_BFDENABLE = """ +true +""" +CE_NC_DELETE_STATIC_ROUTE = """ + + + + + %s + %s + base + %s + %s + %s + %s + %s + + + + +""" + + +def build_config_xml(xmlstr): + """build config xml""" + + return ' ' + xmlstr + ' ' + + +def is_valid_v4addr(addr): + """check if ipv4 addr is valid""" + if addr.find('.') != -1: + addr_list = addr.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + return False + + +def is_valid_v6addr(addr): + """check if ipv6 addr is valid""" + if addr.find(':') != -1: + addr_list = addr.split(':') + if len(addr_list) > 6: + return False + if addr_list[1] == "": + return False + return True + return False + + +def is_valid_tag(tag): + """check if the tag is valid""" + + if int(tag) < 1 or int(tag) > 4294967295: + return False + return True + + +def is_valid_bdf_interval(interval): + """check if the min_tx_interva,min-rx-interval is valid""" + + if interval < 50 or interval > 1000: + return False + return True + + +def is_valid_bdf_multiplier(multiplier): + """check if the detect_multiplier is valid""" + + if multiplier < 3 or multiplier > 50: + return False + return True + + +def is_valid_bdf_session_name(session_name): + """check if the bfd_session_name is valid""" + if session_name.find(' ') != -1: + return False + if len(session_name) < 1 or len(session_name) > 15: + return False + return True + + +def is_valid_preference(pref): + """check if the preference is valid""" + + if int(pref) > 0 and int(pref) < 256: + return True + return False + + +def is_valid_description(description): + """check if the description is valid""" + if description.find('?') != -1: + return False + if len(description) < 1 or len(description) > 255: + return False + return True + + +def compare_command(commands): + """check if the commands is valid""" + if len(commands) < 3: + return True + if commands[0] != 'sys' or commands[1] != 'undo ip route-static default-bfd' \ + or commands[2] != 'commit': + return True + + +def get_to_lines(stdout): + """data conversion""" + lines = list() + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + lines.append(item) + return lines + + +def get_change_state(oldvalue, newvalue, change): + """get change state""" + if newvalue is not None: + if oldvalue != str(newvalue): + change = True + else: + if oldvalue != newvalue: + change = True + return change + + +def get_xml(xml, value): + """operate xml""" + if value is None: + value = '' + else: + value = value + tempxml = xml % value + return tempxml + + +class StaticRouteBFD(object): + """static route module""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self._initmodule_() + + # static route info + self.function_flag = self.module.params['function_flag'] + self.aftype = self.module.params['aftype'] + self.state = self.module.params['state'] + if self.aftype == "v4": + self.version = "ipv4unicast" + else: + self.version = "ipv6unicast" + if self.function_flag != 'globalBFD': + self.nhp_interface = self.module.params['nhp_interface'] + if self.nhp_interface is None: + self.nhp_interface = "Invalid0" + + self.destvrf = self.module.params['destvrf'] + if self.destvrf is None: + self.destvrf = "_public_" + + self.next_hop = self.module.params['next_hop'] + self.prefix = self.module.params['prefix'] + + if self.function_flag != 'globalBFD' and self.function_flag != 'singleBFD': + self.mask = self.module.params['mask'] + self.tag = self.module.params['tag'] + self.description = self.module.params['description'] + self.pref = self.module.params['pref'] + if self.pref is None: + self.pref = 60 + # vpn instance info + self.vrf = self.module.params['vrf'] + if self.vrf is None: + self.vrf = "_public_" + # bfd session name + self.bfd_session_name = self.module.params['bfd_session_name'] + + if self.function_flag == 'globalBFD' or self.function_flag == 'singleBFD': + self.min_tx_interval = self.module.params['min_tx_interval'] + self.min_rx_interval = self.module.params['min_rx_interval'] + self.detect_multiplier = self.module.params['detect_multiplier'] + if self.function_flag == 'globalBFD' and self.state == 'absent': + self.commands = self.module.params['commands'] + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + self.static_routes_info = dict() + + def _initmodule_(self): + """init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=False) + + def _checkresponse_(self, xml_str, xml_name): + """check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def _convertlentomask_(self, masklen): + """convert mask length to ip address mask, i.e. 24 to 255.255.255.0""" + + mask_int = ["0"] * 4 + length = int(masklen) + + if length > 32: + self.module.fail_json(msg='IPv4 ipaddress mask length is invalid') + if length < 8: + mask_int[0] = str(int((0xFF << (8 - length % 8)) & 0xFF)) + if length >= 8: + mask_int[0] = '255' + mask_int[1] = str(int((0xFF << (16 - (length % 16))) & 0xFF)) + if length >= 16: + mask_int[1] = '255' + mask_int[2] = str(int((0xFF << (24 - (length % 24))) & 0xFF)) + if length >= 24: + mask_int[2] = '255' + mask_int[3] = str(int((0xFF << (32 - (length % 32))) & 0xFF)) + if length == 32: + mask_int[3] = '255' + + return '.'.join(mask_int) + + def _convertipprefix_(self): + """convert prefix to real value i.e. 2.2.2.2/24 to 2.2.2.0/24""" + if self.function_flag == 'singleBFD': + if self.aftype == "v4": + if self.prefix.find('.') == -1: + return False + addr_list = self.prefix.split('.') + length = len(addr_list) + if length > 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + else: + if self.prefix.find(':') == -1: + return False + else: + if self.aftype == "v4": + if self.prefix.find('.') == -1: + return False + if self.mask == '32': + self.prefix = self.prefix + return True + if self.mask == '0': + self.prefix = '0.0.0.0' + return True + addr_list = self.prefix.split('.') + length = len(addr_list) + if length > 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + byte_len = 8 + ip_len = int(self.mask) // byte_len + ip_bit = int(self.mask) % byte_len + else: + if self.prefix.find(':') == -1: + return False + if self.mask == '128': + self.prefix = self.prefix + return True + if self.mask == '0': + self.prefix = '::' + return True + addr_list = self.prefix.split(':') + length = len(addr_list) + if length > 6: + return False + byte_len = 16 + ip_len = int(self.mask) // byte_len + ip_bit = int(self.mask) % byte_len + + if self.aftype == "v4": + for i in range(ip_len + 1, length): + addr_list[i] = 0 + else: + for i in range(length - ip_len, length): + addr_list[i] = 0 + for j in range(0, byte_len - ip_bit): + if self.aftype == "v4": + addr_list[ip_len] = int(addr_list[ip_len]) & (0 << j) + else: + if addr_list[length - ip_len - 1] == "": + continue + addr_list[length - ip_len - + 1] = '0x%s' % addr_list[length - ip_len - 1] + addr_list[length - ip_len - + 1] = int(addr_list[length - ip_len - 1], 16) & (0 << j) + + if self.aftype == "v4": + self.prefix = '%s.%s.%s.%s' % (addr_list[0], addr_list[1], addr_list[2], addr_list[3]) + return True + if self.aftype == "v6": + ipv6_addr_str = "" + for num in range(0, length - ip_len): + ipv6_addr_str += '%s:' % addr_list[num] + self.prefix = ipv6_addr_str + + return True + + def set_update_cmd_globalbfd(self): + """set globalBFD update command""" + if not self.changed: + return + if self.state == "present": + self.updates_cmd.append('ip route-static default-bfd') + if self.min_tx_interval: + self.updates_cmd.append(' min-rx-interval %s' % (self.min_tx_interval)) + if self.min_rx_interval: + self.updates_cmd.append(' min-tx-interval %s' % (self.min_rx_interval)) + if self.detect_multiplier: + self.updates_cmd.append(' detect-multiplier %s' % (self.detect_multiplier)) + else: + self.updates_cmd.append('undo ip route-static default-bfd') + + def set_update_cmd_singlebfd(self): + """set singleBFD update command""" + if not self.changed: + return + if self.next_hop is None: + next_hop = '' + else: + next_hop = self.next_hop + + if self.destvrf == "_public_": + destvrf = '' + else: + destvrf = self.destvrf + + if self.nhp_interface == "Invalid0": + nhp_interface = '' + else: + nhp_interface = self.nhp_interface + if self.prefix == "0.0.0.0": + prefix = '' + else: + prefix = self.prefix + if self.state == "present": + if nhp_interface: + self.updates_cmd.append('ip route-static bfd %s %s' % (nhp_interface, next_hop)) + elif destvrf: + self.updates_cmd.append('ip route-static bfd vpn-instance %s %s' % (destvrf, next_hop)) + else: + self.updates_cmd.append('ip route-static bfd %s' % (next_hop)) + if prefix: + self.updates_cmd.append(' local-address %s' % (self.prefix)) + if self.min_tx_interval: + self.updates_cmd.append(' min-rx-interval %s' % (self.min_tx_interval)) + if self.min_rx_interval: + self.updates_cmd.append(' min-tx-interval %s' % (self.min_rx_interval)) + if self.detect_multiplier: + self.updates_cmd.append(' detect-multiplier %s' % (self.detect_multiplier)) + else: + if nhp_interface: + self.updates_cmd.append('undo ip route-static bfd %s %s' % (nhp_interface, next_hop)) + elif destvrf: + self.updates_cmd.append('undo ip route-static bfd vpn-instance %s %s' % (destvrf, next_hop)) + else: + self.updates_cmd.append('undo ip route-static bfd %s' % (next_hop)) + + def set_update_cmd(self): + """set update command""" + if not self.changed: + return + + if self.aftype == "v4": + maskstr = self._convertlentomask_(self.mask) + else: + maskstr = self.mask + static_bfd_flag = True + if self.bfd_session_name: + static_bfd_flag = False + if self.next_hop is None: + next_hop = '' + else: + next_hop = self.next_hop + if self.vrf == "_public_": + vrf = '' + else: + vrf = self.vrf + if self.destvrf == "_public_": + destvrf = '' + else: + destvrf = self.destvrf + if self.nhp_interface == "Invalid0": + nhp_interface = '' + else: + nhp_interface = self.nhp_interface + if self.state == "present": + if self.vrf != "_public_": + if self.destvrf != "_public_": + self.updates_cmd.append('ip route-static vpn-instance %s %s %s vpn-instance %s %s' + % (vrf, self.prefix, maskstr, destvrf, next_hop)) + else: + self.updates_cmd.append('ip route-static vpn-instance %s %s %s %s %s' + % (vrf, self.prefix, maskstr, nhp_interface, next_hop)) + elif self.destvrf != "_public_": + self.updates_cmd.append('ip route-static %s %s vpn-instance %s %s' + % (self.prefix, maskstr, self.destvrf, next_hop)) + else: + self.updates_cmd.append('ip route-static %s %s %s %s' + % (self.prefix, maskstr, nhp_interface, next_hop)) + if self.pref != 60: + self.updates_cmd.append(' preference %s' % (self.pref)) + if self.tag: + self.updates_cmd.append(' tag %s' % (self.tag)) + if not static_bfd_flag: + self.updates_cmd.append(' track bfd-session %s' % (self.bfd_session_name)) + else: + self.updates_cmd.append(' bfd enable') + if self.description: + self.updates_cmd.append(' description %s' % (self.description)) + + if self.state == "absent": + if self.vrf != "_public_": + if self.destvrf != "_public_": + self.updates_cmd.append('undo ip route-static vpn-instance %s %s %s vpn-instance %s %s' + % (vrf, self.prefix, maskstr, destvrf, next_hop)) + else: + self.updates_cmd.append('undo ip route-static vpn-instance %s %s %s %s %s' + % (vrf, self.prefix, maskstr, nhp_interface, next_hop)) + elif self.destvrf != "_public_": + self.updates_cmd.append('undo ip route-static %s %s vpn-instance %s %s' + % (self.prefix, maskstr, self.destvrf, self.next_hop)) + else: + self.updates_cmd.append('undo ip route-static %s %s %s %s' + % (self.prefix, maskstr, nhp_interface, next_hop)) + + def operate_static_route_globalbfd(self): + """set globalbfd update command""" + min_tx_interval = self.min_tx_interval + min_rx_interval = self.min_rx_interval + multiplier = self.detect_multiplier + min_tx_interval_xml = """\n""" + min_rx_interval_xml = """\n""" + multiplier_xml = """\n""" + if self.state == "present": + if min_tx_interval is not None: + min_tx_interval_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINTX % min_tx_interval + if min_rx_interval is not None: + min_rx_interval_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINRX % min_rx_interval + if multiplier is not None: + multiplier_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MUL % multiplier + + configxmlstr = CE_NC_SET_IPV4_STATIC_ROUTE_GLOBALBFD % ( + min_tx_interval_xml, min_rx_interval_xml, multiplier_xml) + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self._checkresponse_(recv_xml, "OPERATE_STATIC_ROUTE_globalBFD") + + if self.state == "absent" and self.commands: + min_tx_interval_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINTX % 1000 + min_rx_interval_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINRX % 1000 + multiplier_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MUL % 3 + + configxmlstr = CE_NC_SET_IPV4_STATIC_ROUTE_GLOBALBFD % ( + min_tx_interval_xml, min_rx_interval_xml, multiplier_xml) + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self._checkresponse_(recv_xml, "OPERATE_STATIC_ROUTE_globalBFD") + + def operate_static_route_singlebfd(self, version, prefix, nhp_interface, next_hop, destvrf, state): + """operate ipv4 static route singleBFD""" + min_tx_interval = self.min_tx_interval + min_rx_interval = self.min_rx_interval + multiplier = self.detect_multiplier + min_tx_interval_xml = """\n""" + min_rx_interval_xml = """\n""" + multiplier_xml = """\n""" + local_address_xml = """\n""" + if next_hop is None: + next_hop = '0.0.0.0' + + if destvrf is None: + dest_vpn_instance = "_public_" + else: + dest_vpn_instance = destvrf + + if nhp_interface is None: + nhp_interface = "Invalid0" + + if min_tx_interval is not None: + min_tx_interval_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINTX % min_tx_interval + if min_rx_interval is not None: + min_rx_interval_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MINRX % min_rx_interval + if multiplier is not None: + multiplier_xml = CE_NC_SET_IPV4_STATIC_ROUTE_BFDCOMMON_MUL % multiplier + + if prefix is not None: + local_address_xml = CE_NC_SET_STATIC_ROUTE_SINGLEBFD_LOCALADRESS % prefix + + if state == "present": + configxmlstr = CE_NC_SET_STATIC_ROUTE_SINGLEBFD % ( + version, nhp_interface, dest_vpn_instance, + next_hop, local_address_xml, min_tx_interval_xml, + min_rx_interval_xml, multiplier_xml) + + else: + configxmlstr = CE_NC_DELETE_STATIC_ROUTE_SINGLEBFD % ( + version, nhp_interface, dest_vpn_instance, next_hop) + + conf_str = build_config_xml(configxmlstr) + + recv_xml = set_nc_config(self.module, conf_str) + self._checkresponse_(recv_xml, "OPERATE_STATIC_ROUTE_singleBFD") + + def operate_static_route(self, version, prefix, mask, nhp_interface, next_hop, vrf, destvrf, state): + """operate ipv4 static route""" + description_xml = """\n""" + preference_xml = """\n""" + tag_xml = """\n""" + bfd_xml = """\n""" + if next_hop is None: + next_hop = '0.0.0.0' + if nhp_interface is None: + nhp_interface = "Invalid0" + + if vrf is None: + vpn_instance = "_public_" + else: + vpn_instance = vrf + + if destvrf is None: + dest_vpn_instance = "_public_" + else: + dest_vpn_instance = destvrf + + description_xml = get_xml(CE_NC_SET_DESCRIPTION, self.description) + + preference_xml = get_xml(CE_NC_SET_PREFERENCE, self.pref) + + tag_xml = get_xml(CE_NC_SET_TAG, self.tag) + + if self.function_flag == 'staticBFD': + if self.bfd_session_name: + bfd_xml = CE_NC_SET_BFDSESSIONNAME % self.bfd_session_name + else: + bfd_xml = CE_NC_SET_BFDENABLE + if state == "present": + configxmlstr = CE_NC_SET_STATIC_ROUTE % ( + vpn_instance, version, prefix, mask, nhp_interface, + dest_vpn_instance, next_hop, description_xml, preference_xml, tag_xml, bfd_xml) + + else: + configxmlstr = CE_NC_DELETE_STATIC_ROUTE % ( + vpn_instance, version, prefix, mask, nhp_interface, dest_vpn_instance, next_hop) + + conf_str = build_config_xml(configxmlstr) + recv_xml = set_nc_config(self.module, conf_str) + self._checkresponse_(recv_xml, "OPERATE_STATIC_ROUTE") + + def get_change_state_global_bfd(self): + """get ipv4 global bfd change state""" + + self.get_global_bfd(self.state) + change = False + if self.state == "present": + if self.static_routes_info["sroute_global_bfd"]: + for static_route in self.static_routes_info["sroute_global_bfd"]: + if static_route is not None: + if self.min_tx_interval is not None: + if int(static_route["minTxInterval"]) != self.min_tx_interval: + change = True + if self.min_rx_interval is not None: + if int(static_route["minRxInterval"]) != self.min_rx_interval: + change = True + if self.detect_multiplier is not None: + if int(static_route["multiplier"]) != self.detect_multiplier: + change = True + return change + else: + continue + else: + change = True + else: + if self.commands: + if self.static_routes_info["sroute_global_bfd"]: + for static_route in self.static_routes_info["sroute_global_bfd"]: + if static_route is not None: + if int(static_route["minTxInterval"]) != 1000 or \ + int(static_route["minRxInterval"]) != 1000 or \ + int(static_route["multiplier"]) != 3: + change = True + return change + + def get_global_bfd(self, state): + """get ipv4 global bfd""" + + self.static_routes_info["sroute_global_bfd"] = list() + + getglobalbfdxmlstr = None + if self.aftype == 'v4': + getglobalbfdxmlstr = CE_NC_GET_STATIC_ROUTE_IPV4_GLOBAL_BFD + + if getglobalbfdxmlstr is not None: + xml_global_bfd_str = get_nc_config(self.module, getglobalbfdxmlstr) + + if 'data/' in xml_global_bfd_str: + return + + xml_global_bfd_str = xml_global_bfd_str.replace('\r', '').replace('\n', ''). \ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', ""). \ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_global_bfd_str) + static_routes_global_bfd = root.findall( + "staticrt/staticrtbase/srIPv4StaticSite") + + if static_routes_global_bfd: + for static_route in static_routes_global_bfd: + static_info = dict() + for static_ele in static_route: + if static_ele.tag == "minTxInterval": + if static_ele.text is not None: + static_info["minTxInterval"] = static_ele.text + if static_ele.tag == "minRxInterval": + if static_ele.text is not None: + static_info["minRxInterval"] = static_ele.text + if static_ele.tag == "multiplier": + if static_ele.text is not None: + static_info["multiplier"] = static_ele.text + + self.static_routes_info["sroute_global_bfd"].append(static_info) + + def get_change_state_single_bfd(self): + """get ipv4 single bfd change state""" + + self.get_single_bfd(self.state) + change = False + version = self.version + if self.state == 'present': + if self.static_routes_info["sroute_single_bfd"]: + for static_route in self.static_routes_info["sroute_single_bfd"]: + if static_route is not None and static_route['afType'] == version: + if self.nhp_interface: + if static_route["ifName"].lower() != self.nhp_interface.lower(): + change = True + if self.destvrf: + if static_route["destVrfName"].lower() != self.destvrf.lower(): + change = True + if self.next_hop: + if static_route["nexthop"].lower() != self.next_hop.lower(): + change = True + if self.prefix: + if static_route["localAddress"].lower() != self.prefix.lower(): + change = True + if self.min_tx_interval: + if int(static_route["minTxInterval"]) != self.min_tx_interval: + change = True + if self.min_rx_interval: + if int(static_route["minRxInterval"]) != self.min_rx_interval: + change = True + if self.detect_multiplier: + if int(static_route["multiplier"]) != self.detect_multiplier: + change = True + return change + + else: + continue + else: + change = True + else: + for static_route in self.static_routes_info["sroute_single_bfd"]: + # undo ip route-static bfd [ interface-type interface-number | + # vpn-instance vpn-instance-name ] nexthop-address + + if static_route["ifName"] and self.nhp_interface: + if static_route["ifName"].lower() == self.nhp_interface.lower() \ + and static_route["nexthop"].lower() == self.next_hop.lower() \ + and static_route["afType"] == version: + change = True + return change + + if static_route["destVrfName"] and self.destvrf: + if static_route["destVrfName"].lower() == self.destvrf.lower() \ + and static_route["nexthop"].lower() == self.next_hop.lower() \ + and static_route["afType"] == version: + change = True + return change + + if static_route["nexthop"] and self.next_hop: + if static_route["nexthop"].lower() == self.next_hop.lower() \ + and static_route["afType"] == version: + change = True + return change + else: + continue + change = False + return change + + def get_single_bfd(self, state): + """get ipv4 sigle bfd""" + self.static_routes_info["sroute_single_bfd"] = list() + if self.aftype == "v4": + version = "ipv4unicast" + else: + version = "ipv6unicast" + if state == 'absent': + getbfdxmlstr = CE_NC_GET_STATIC_ROUTE_BFD_ABSENT % ( + version, self.nhp_interface, self.destvrf, self.next_hop) + else: + getbfdxmlstr = CE_NC_GET_STATIC_ROUTE_BFD % ( + version, self.nhp_interface, self.destvrf, self.next_hop) + xml_bfd_str = get_nc_config(self.module, getbfdxmlstr) + + if 'data/' in xml_bfd_str: + return + xml_bfd_str = xml_bfd_str.replace('\r', '').replace('\n', ''). \ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', ""). \ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_bfd_str) + static_routes_bfd = root.findall( + "staticrt/staticrtbase/srBfdParas/srBfdPara") + if static_routes_bfd: + for static_route in static_routes_bfd: + static_info = dict() + for static_ele in static_route: + if static_ele.tag in ["afType", "destVrfName", "nexthop", "ifName"]: + static_info[static_ele.tag] = static_ele.text + if static_ele.tag == "localAddress": + if static_ele.text is not None: + static_info["localAddress"] = static_ele.text + else: + static_info["localAddress"] = "None" + if static_ele.tag == "minTxInterval": + if static_ele.text is not None: + static_info["minTxInterval"] = static_ele.text + if static_ele.tag == "minRxInterval": + if static_ele.text is not None: + static_info["minRxInterval"] = static_ele.text + if static_ele.tag == "multiplier": + if static_ele.text is not None: + static_info["multiplier"] = static_ele.text + self.static_routes_info["sroute_single_bfd"].append(static_info) + + def get_static_route(self, state): + """get ipv4 static route about BFD""" + self.static_routes_info["sroute"] = list() + # Increase the parameter used to distinguish whether the incoming bfdSessionName + static_bfd_flag = True + if self.bfd_session_name: + static_bfd_flag = False + + if state == 'absent': + getxmlstr = CE_NC_GET_STATIC_ROUTE_ABSENT + else: + # self.static_bfd_flag is true + if static_bfd_flag: + getxmlstr = CE_NC_GET_STATIC_ROUTE_BFD_ENABLE + + else: + getxmlstr = CE_NC_GET_STATIC_ROUTE_BFD_SESSIONNAME + xml_str = get_nc_config(self.module, getxmlstr) + if 'data/' in xml_str: + return + xml_str = xml_str.replace('\r', '').replace('\n', ''). \ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', ""). \ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + root = ElementTree.fromstring(xml_str) + static_routes = root.findall( + "staticrt/staticrtbase/srRoutes/srRoute") + + if static_routes: + for static_route in static_routes: + static_info = dict() + for static_ele in static_route: + if static_ele.tag in ["vrfName", "afType", "topologyName", + "prefix", "maskLength", "destVrfName", + "nexthop", "ifName", "preference", "description"]: + static_info[static_ele.tag] = static_ele.text + if static_ele.tag == "tag": + if static_ele.text is not None: + static_info["tag"] = static_ele.text + else: + static_info["tag"] = "None" + if static_bfd_flag: + if static_ele.tag == "bfdEnable": + if static_ele.text is not None: + static_info["bfdEnable"] = static_ele.text + else: + static_info["bfdEnable"] = "None" + else: + if static_ele.tag == "sessionName": + if static_ele.text is not None: + static_info["sessionName"] = static_ele.text + else: + static_info["sessionName"] = "None" + self.static_routes_info["sroute"].append(static_info) + + def _checkparams_(self): + """check all input params""" + if self.function_flag == 'singleBFD': + if not self.next_hop: + self.module.fail_json(msg='Error: missing required argument: next_hop.') + if self.state != 'absent': + if self.nhp_interface == "Invalid0" and (not self.prefix or self.prefix == '0.0.0.0'): + self.module.fail_json(msg='Error: If a nhp_interface is not configured, ' + 'the prefix must be configured.') + + if self.function_flag != 'globalBFD': + if self.function_flag == 'dynamicBFD' or self.function_flag == 'staticBFD': + if not self.mask: + self.module.fail_json(msg='Error: missing required argument: mask.') + # check prefix and mask + if not self.mask.isdigit(): + self.module.fail_json(msg='Error: Mask is invalid.') + if self.function_flag != 'singleBFD' or (self.function_flag == 'singleBFD' and self.destvrf != "_public_"): + if not self.prefix: + self.module.fail_json(msg='Error: missing required argument: prefix.') + # convert prefix + if not self._convertipprefix_(): + self.module.fail_json(msg='Error: The %s is not a valid address' % self.prefix) + + if self.nhp_interface != "Invalid0" and self.destvrf != "_public_": + self.module.fail_json(msg='Error: Destination vrf dose not support next hop is interface.') + + if not self.next_hop and self.nhp_interface == "Invalid0": + self.module.fail_json(msg='Error: one of the following is required: next_hop,nhp_interface.') + + if self.function_flag == 'dynamicBFD' or self.function_flag == 'staticBFD': + # description check + if self.description: + if not is_valid_description(self.description): + self.module.fail_json( + msg='Error: Dsecription length should be 1 - 35, and can not contain "?".') + # tag check + if self.tag is not None: + if not is_valid_tag(self.tag): + self.module.fail_json( + msg='Error: Tag should be integer 1 - 4294967295.') + # preference check + if self.pref is not None: + if not is_valid_preference(self.pref): + self.module.fail_json( + msg='Error: Preference should be integer 1 - 255.') + + if self.function_flag == 'staticBFD': + if self.bfd_session_name: + if not is_valid_bdf_session_name(self.bfd_session_name): + self.module.fail_json( + msg='Error: bfd_session_name length should be 1 - 15, and can not contain Space.') + + # ipv4 check + if self.aftype == "v4": + if self.function_flag == 'dynamicBFD' or self.function_flag == 'staticBFD': + if int(self.mask) > 32 or int(self.mask) < 0: + self.module.fail_json( + msg='Error: Ipv4 mask must be an integer between 1 and 32.') + # next_hop check + if self.function_flag != 'globalBFD': + if self.next_hop: + if not is_valid_v4addr(self.next_hop): + self.module.fail_json( + msg='Error: The %s is not a valid address.' % self.next_hop) + # ipv6 check + if self.aftype == "v6": + if self.function_flag == 'dynamicBFD' or self.function_flag == 'staticBFD': + if int(self.mask) > 128 or int(self.mask) < 0: + self.module.fail_json( + msg='Error: Ipv6 mask must be an integer between 1 and 128.') + if self.function_flag != 'globalBFD': + if self.next_hop: + if not is_valid_v6addr(self.next_hop): + self.module.fail_json( + msg='Error: The %s is not a valid address.' % self.next_hop) + + if self.function_flag == 'globalBFD' or self.function_flag == 'singleBFD': + # BFD prarams + if self.min_tx_interval: + if not is_valid_bdf_interval(self.min_tx_interval): + self.module.fail_json( + msg='Error: min_tx_interval should be integer 50 - 1000.') + if self.min_rx_interval: + if not is_valid_bdf_interval(self.min_rx_interval): + self.module.fail_json( + msg='Error: min_rx_interval should be integer 50 - 1000.') + if self.detect_multiplier: + if not is_valid_bdf_multiplier(self.detect_multiplier): + self.module.fail_json( + msg='Error: detect_multiplier should be integer 3 - 50.') + + if self.function_flag == 'globalBFD': + if self.state != 'absent': + if not self.min_tx_interval and not self.min_rx_interval and not self.detect_multiplier: + self.module.fail_json( + msg='Error: one of the following is required: min_tx_interval,' + 'detect_multiplier,min_rx_interval.') + else: + if not self.commands: + self.module.fail_json( + msg='Error: missing required argument: command.') + if compare_command(self.commands): + self.module.fail_json( + msg='Error: The command %s line is incorrect.' % (',').join(self.commands)) + + def set_ip_static_route_globalbfd(self): + """set ip static route globalBFD""" + if not self.changed: + return + if self.aftype == "v4": + self.operate_static_route_globalbfd() + + def set_ip_static_route_singlebfd(self): + """set ip static route singleBFD""" + if not self.changed: + return + version = None + if self.aftype == "v4": + version = "ipv4unicast" + else: + version = "ipv6unicast" + self.operate_static_route_singlebfd(version, self.prefix, self.nhp_interface, + self.next_hop, self.destvrf, self.state) + + def set_ip_static_route(self): + """set ip static route""" + if not self.changed: + return + version = None + if self.aftype == "v4": + version = "ipv4unicast" + else: + version = "ipv6unicast" + self.operate_static_route(version, self.prefix, self.mask, self.nhp_interface, + self.next_hop, self.vrf, self.destvrf, self.state) + + def is_prefix_exist(self, static_route, version): + """is prefix mask nex_thop exist""" + if static_route is None: + return False + if self.next_hop and self.nhp_interface: + return static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["afType"] == version \ + and static_route["ifName"].lower() == self.nhp_interface.lower() \ + and static_route["nexthop"].lower() == self.next_hop.lower() + + if self.next_hop and not self.nhp_interface: + return static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["afType"] == version \ + and static_route["nexthop"].lower() == self.next_hop.lower() + + if not self.next_hop and self.nhp_interface: + return static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["afType"] == version \ + and static_route["ifName"].lower() == self.nhp_interface.lower() + + def get_ip_static_route(self): + """get ip static route""" + change = False + version = self.version + self.get_static_route(self.state) + change_list = list() + if self.state == 'present': + for static_route in self.static_routes_info["sroute"]: + if self.is_prefix_exist(static_route, self.version): + info_dict = dict() + exist_dict = dict() + if self.vrf: + info_dict["vrfName"] = self.vrf + exist_dict["vrfName"] = static_route["vrfName"] + if self.destvrf: + info_dict["destVrfName"] = self.destvrf + exist_dict["destVrfName"] = static_route["destVrfName"] + if self.description: + info_dict["description"] = self.description + exist_dict["description"] = static_route["description"] + if self.tag: + info_dict["tag"] = self.tag + exist_dict["tag"] = static_route["tag"] + if self.pref: + info_dict["preference"] = str(self.pref) + exist_dict["preference"] = static_route["preference"] + if self.nhp_interface: + if self.nhp_interface.lower() == "invalid0": + info_dict["ifName"] = "Invalid0" + else: + info_dict["ifName"] = "Invalid0" + exist_dict["ifName"] = static_route["ifName"] + if self.next_hop: + info_dict["nexthop"] = self.next_hop + exist_dict["nexthop"] = static_route["nexthop"] + + if self.bfd_session_name: + info_dict["bfdEnable"] = 'true' + + else: + info_dict["bfdEnable"] = 'false' + exist_dict["bfdEnable"] = static_route["bfdEnable"] + + if exist_dict != info_dict: + change = True + else: + change = False + change_list.append(change) + + if False in change_list: + change = False + else: + change = True + return change + + else: + for static_route in self.static_routes_info["sroute"]: + if static_route["nexthop"] and self.next_hop: + if static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["nexthop"].lower() == self.next_hop.lower() \ + and static_route["afType"] == version: + change = True + return change + if static_route["ifName"] and self.nhp_interface: + if static_route["prefix"].lower() == self.prefix.lower() \ + and static_route["maskLength"] == self.mask \ + and static_route["ifName"].lower() == self.nhp_interface.lower() \ + and static_route["afType"] == version: + change = True + return change + else: + continue + change = False + return change + + def get_proposed(self): + """get proposed information""" + self.proposed['afType'] = self.aftype + self.proposed['state'] = self.state + if self.function_flag != 'globalBFD': + self.proposed['ifName'] = self.nhp_interface + self.proposed['destVrfName'] = self.destvrf + self.proposed['next_hop'] = self.next_hop + + if self.function_flag == 'singleBFD': + if self.prefix: + self.proposed['localAddress'] = self.prefix + + if self.function_flag == 'globalBFD' or self.function_flag == 'singleBFD': + self.proposed['minTxInterval'] = self.min_tx_interval + self.proposed['minRxInterval'] = self.min_rx_interval + self.proposed['multiplier'] = self.detect_multiplier + + if self.function_flag != 'globalBFD' and self.function_flag != 'singleBFD': + self.proposed['prefix'] = self.prefix + self.proposed['mask'] = self.mask + self.proposed['vrfName'] = self.vrf + if self.tag: + self.proposed['tag'] = self.tag + if self.description: + self.proposed['description'] = self.description + if self.pref is None: + self.proposed['preference'] = 60 + else: + self.proposed['preference'] = self.pref + + static_bfd_flag = True + if self.bfd_session_name: + static_bfd_flag = False + if not static_bfd_flag: + self.proposed['sessionName'] = self.bfd_session_name + else: + self.proposed['bfdEnable'] = 'true' + + def get_existing(self): + """get existing information""" + # globalBFD + if self.function_flag == 'globalBFD': + change = self.get_change_state_global_bfd() + self.existing['sroute_global_bfd'] = self.static_routes_info["sroute_global_bfd"] + # singleBFD + elif self.function_flag == 'singleBFD': + change = self.get_change_state_single_bfd() + self.existing['sroute_single_bfd'] = self.static_routes_info["sroute_single_bfd"] + # dynamicBFD / staticBFD + else: + change = self.get_ip_static_route() + self.existing['static_sroute'] = self.static_routes_info["sroute"] + self.changed = bool(change) + + def get_end_state(self): + """get end state information""" + + # globalBFD + if self.function_flag == 'globalBFD': + self.get_global_bfd(self.state) + self.end_state['sroute_global_bfd'] = self.static_routes_info["sroute_global_bfd"] + # singleBFD + elif self.function_flag == 'singleBFD': + self.static_routes_info["sroute_single_bfd"] = list() + self.get_single_bfd(self.state) + self.end_state['sroute_single_bfd'] = self.static_routes_info["sroute_single_bfd"] + # dynamicBFD / staticBFD + else: + self.get_static_route(self.state) + self.end_state['static_sroute'] = self.static_routes_info["sroute"] + + def work(self): + """worker""" + self._checkparams_() + self.get_existing() + self.get_proposed() + + if self.function_flag == 'globalBFD': + self.set_ip_static_route_globalbfd() + self.set_update_cmd_globalbfd() + elif self.function_flag == 'singleBFD': + self.set_ip_static_route_singlebfd() + self.set_update_cmd_singlebfd() + else: + self.set_ip_static_route() + self.set_update_cmd() + + self.get_end_state() + if self.existing == self.end_state: + self.changed = False + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """main""" + + argument_spec = dict( + prefix=dict(type='str'), + mask=dict(type='str'), + aftype=dict(choices=['v4', 'v6'], required=True), + next_hop=dict(type='str'), + nhp_interface=dict(type='str'), + vrf=dict(type='str'), + destvrf=dict(type='str'), + tag=dict(type='int'), + description=dict(type='str'), + pref=dict(type='int'), + # bfd + function_flag=dict(required=True, choices=['globalBFD', 'singleBFD', 'dynamicBFD', 'staticBFD']), + min_tx_interval=dict(type='int'), + min_rx_interval=dict(type='int'), + detect_multiplier=dict(type='int'), + # bfd session name + bfd_session_name=dict(type='str'), + commands=dict(type='list', required=False), + state=dict(choices=['absent', 'present'], default='present', required=False), + ) + interface = StaticRouteBFD(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_stp.py b/plugins/modules/network/cloudengine/ce_stp.py new file mode 100644 index 0000000000..a9c690305d --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_stp.py @@ -0,0 +1,973 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_stp +short_description: Manages STP configuration on HUAWEI CloudEngine switches. +description: + - Manages STP configurations on HUAWEI CloudEngine switches. +author: + - wangdezhuang (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + state: + description: + - Specify desired state of the resource. + default: present + choices: ['present', 'absent'] + stp_mode: + description: + - Set an operation mode for the current MSTP process. + The mode can be STP, RSTP, or MSTP. + choices: ['stp', 'rstp', 'mstp'] + stp_enable: + description: + - Enable or disable STP on a switch. + choices: ['enable', 'disable'] + stp_converge: + description: + - STP convergence mode. + Fast means set STP aging mode to Fast. + Normal means set STP aging mode to Normal. + choices: ['fast', 'normal'] + bpdu_protection: + description: + - Configure BPDU protection on an edge port. + This function prevents network flapping caused by attack packets. + choices: ['enable', 'disable'] + tc_protection: + description: + - Configure the TC BPDU protection function for an MSTP process. + choices: ['enable', 'disable'] + tc_protection_interval: + description: + - Set the time the MSTP device takes to handle the maximum number of TC BPDUs + and immediately refresh forwarding entries. + The value is an integer ranging from 1 to 600, in seconds. + tc_protection_threshold: + description: + - Set the maximum number of TC BPDUs that the MSTP can handle. + The value is an integer ranging from 1 to 255. The default value is 1 on the switch. + interface: + description: + - Interface name. + If the value is C(all), will apply configuration to all interfaces. + if the value is a special name, only support input the full name. + edged_port: + description: + - Set the current port as an edge port. + choices: ['enable', 'disable'] + bpdu_filter: + description: + - Specify a port as a BPDU filter port. + choices: ['enable', 'disable'] + cost: + description: + - Set the path cost of the current port. + The default instance is 0. + root_protection: + description: + - Enable root protection on the current port. + choices: ['enable', 'disable'] + loop_protection: + description: + - Enable loop protection on the current port. + choices: ['enable', 'disable'] +''' + +EXAMPLES = ''' + +- name: CloudEngine stp test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Config stp mode" + ce_stp: + state: present + stp_mode: stp + provider: "{{ cli }}" + + - name: "Undo stp mode" + ce_stp: + state: absent + stp_mode: stp + provider: "{{ cli }}" + + - name: "Enable bpdu protection" + ce_stp: + state: present + bpdu_protection: enable + provider: "{{ cli }}" + + - name: "Disable bpdu protection" + ce_stp: + state: present + bpdu_protection: disable + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"bpdu_protection": "enable", + "state": "present"} +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: {"bpdu_protection": "disable"} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"bpdu_protection": "enable"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["stp bpdu-protection"] +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import exec_command, load_config, ce_argument_spec + + +def get_config(module, flags): + + """Retrieves the current config from the device or cache""" + + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(module, cmd) + if rc != 0: + module.fail_json(msg=err) + config = str(out).strip() + if config.startswith("display"): + configs = config.split("\n") + if len(configs) > 1: + return "\n".join(configs[1:]) + else: + return "" + else: + return config + + +class Stp(object): + """ Manages stp/rstp/mstp configuration """ + + def __init__(self, **kwargs): + """ Stp module init """ + + # module + argument_spec = kwargs["argument_spec"] + self.spec = argument_spec + self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True) + + # config + self.cur_cfg = dict() + self.stp_cfg = None + self.interface_stp_cfg = None + + # module args + self.state = self.module.params['state'] or None + self.stp_mode = self.module.params['stp_mode'] or None + self.stp_enable = self.module.params['stp_enable'] or None + self.stp_converge = self.module.params['stp_converge'] or None + self.interface = self.module.params['interface'] or None + self.edged_port = self.module.params['edged_port'] or None + self.bpdu_filter = self.module.params['bpdu_filter'] or None + self.cost = self.module.params['cost'] or None + self.bpdu_protection = self.module.params['bpdu_protection'] or None + self.tc_protection = self.module.params['tc_protection'] or None + self.tc_protection_interval = self.module.params['tc_protection_interval'] or None + self.tc_protection_threshold = self.module.params['tc_protection_threshold'] or None + self.root_protection = self.module.params['root_protection'] or None + self.loop_protection = self.module.params['loop_protection'] or None + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def cli_load_config(self, commands): + """ Cli load configuration """ + + if not self.module.check_mode: + load_config(self.module, commands) + + def cli_get_stp_config(self): + """ Cli get stp configuration """ + + flags = [r"| section include #\s*\n\s*stp", r"| section exclude #\s*\n+\s*stp process \d+"] + self.stp_cfg = get_config(self.module, flags) + + def cli_get_interface_stp_config(self): + """ Cli get interface's stp configuration """ + + if self.interface: + regular = r"| ignore-case section include ^#\s+interface %s\s+" % self.interface.replace(" ", "") + flags = list() + flags.append(regular) + tmp_cfg = get_config(self.module, flags) + + if not tmp_cfg: + self.module.fail_json( + msg='Error: The interface %s is not exist.' % self.interface) + + if "undo portswitch" in tmp_cfg: + self.module.fail_json( + msg='Error: The interface %s is not switch mode.' % self.interface) + + self.interface_stp_cfg = tmp_cfg + + def check_params(self): + """ Check module params """ + + if self.cost: + if self.cost.isdigit(): + if int(self.cost) < 1 or int(self.cost) > 200000000: + self.module.fail_json( + msg='Error: The value of cost is out of [1 - 200000000].') + else: + self.module.fail_json( + msg='Error: The cost is not digit.') + + if self.tc_protection_interval: + if self.tc_protection_interval.isdigit(): + if int(self.tc_protection_interval) < 1 or int(self.tc_protection_interval) > 600: + self.module.fail_json( + msg='Error: The value of tc_protection_interval is out of [1 - 600].') + else: + self.module.fail_json( + msg='Error: The tc_protection_interval is not digit.') + + if self.tc_protection_threshold: + if self.tc_protection_threshold.isdigit(): + if int(self.tc_protection_threshold) < 1 or int(self.tc_protection_threshold) > 255: + self.module.fail_json( + msg='Error: The value of tc_protection_threshold is out of [1 - 255].') + else: + self.module.fail_json( + msg='Error: The tc_protection_threshold is not digit.') + + if self.root_protection or self.loop_protection or self.cost: + if not self.interface: + self.module.fail_json( + msg='Error: Please input interface.') + elif self.interface == "all": + self.module.fail_json( + msg='Error: Interface can not be all when config root_protection or loop_protection or cost.') + + if self.root_protection and self.root_protection == "enable": + if self.loop_protection and self.loop_protection == "enable": + self.module.fail_json( + msg='Error: Can not enable root_protection and loop_protection at the same interface.') + + if self.edged_port or self.bpdu_filter: + if not self.interface: + self.module.fail_json( + msg='Error: Please input interface.') + + def get_proposed(self): + """ Get module proposed """ + + self.proposed["state"] = self.state + + if self.stp_mode: + self.proposed["stp_mode"] = self.stp_mode + if self.stp_enable: + self.proposed["stp_enable"] = self.stp_enable + if self.stp_converge: + self.proposed["stp_converge"] = self.stp_converge + if self.interface: + self.proposed["interface"] = self.interface + if self.edged_port: + self.proposed["edged_port"] = self.edged_port + if self.bpdu_filter: + self.proposed["bpdu_filter"] = self.bpdu_filter + if self.cost: + self.proposed["cost"] = self.cost + if self.bpdu_protection: + self.proposed["bpdu_protection"] = self.bpdu_protection + if self.tc_protection: + self.proposed["tc_protection"] = self.tc_protection + if self.tc_protection_interval: + self.proposed["tc_protection_interval"] = self.tc_protection_interval + if self.tc_protection_threshold: + self.proposed["tc_protection_threshold"] = self.tc_protection_threshold + if self.root_protection: + self.proposed["root_protection"] = self.root_protection + if self.loop_protection: + self.proposed["loop_protection"] = self.loop_protection + + def get_existing(self): + """ Get existing configuration """ + + self.cli_get_stp_config() + if self.interface and self.interface != "all": + self.cli_get_interface_stp_config() + + if self.stp_mode: + if "stp mode stp" in self.stp_cfg: + self.cur_cfg["stp_mode"] = "stp" + self.existing["stp_mode"] = "stp" + elif "stp mode rstp" in self.stp_cfg: + self.cur_cfg["stp_mode"] = "rstp" + self.existing["stp_mode"] = "rstp" + else: + self.cur_cfg["stp_mode"] = "mstp" + self.existing["stp_mode"] = "mstp" + + if self.stp_enable: + if "stp disable" in self.stp_cfg: + self.cur_cfg["stp_enable"] = "disable" + self.existing["stp_enable"] = "disable" + else: + self.cur_cfg["stp_enable"] = "enable" + self.existing["stp_enable"] = "enable" + + if self.stp_converge: + if "stp converge fast" in self.stp_cfg: + self.cur_cfg["stp_converge"] = "fast" + self.existing["stp_converge"] = "fast" + else: + self.cur_cfg["stp_converge"] = "normal" + self.existing["stp_converge"] = "normal" + + if self.edged_port: + if self.interface == "all": + if "stp edged-port default" in self.stp_cfg: + self.cur_cfg["edged_port"] = "enable" + self.existing["edged_port"] = "enable" + else: + self.cur_cfg["edged_port"] = "disable" + self.existing["edged_port"] = "disable" + else: + if "stp edged-port enable" in self.interface_stp_cfg: + self.cur_cfg["edged_port"] = "enable" + self.existing["edged_port"] = "enable" + else: + self.cur_cfg["edged_port"] = "disable" + self.existing["edged_port"] = "disable" + + if self.bpdu_filter: + if self.interface == "all": + if "stp bpdu-filter default" in self.stp_cfg: + self.cur_cfg["bpdu_filter"] = "enable" + self.existing["bpdu_filter"] = "enable" + else: + self.cur_cfg["bpdu_filter"] = "disable" + self.existing["bpdu_filter"] = "disable" + else: + if "stp bpdu-filter enable" in self.interface_stp_cfg: + self.cur_cfg["bpdu_filter"] = "enable" + self.existing["bpdu_filter"] = "enable" + else: + self.cur_cfg["bpdu_filter"] = "disable" + self.existing["bpdu_filter"] = "disable" + + if self.bpdu_protection: + if "stp bpdu-protection" in self.stp_cfg: + self.cur_cfg["bpdu_protection"] = "enable" + self.existing["bpdu_protection"] = "enable" + else: + self.cur_cfg["bpdu_protection"] = "disable" + self.existing["bpdu_protection"] = "disable" + + if self.tc_protection: + pre_cfg = self.stp_cfg.split("\n") + if "stp tc-protection" in pre_cfg: + self.cur_cfg["tc_protection"] = "enable" + self.existing["tc_protection"] = "enable" + else: + self.cur_cfg["tc_protection"] = "disable" + self.existing["tc_protection"] = "disable" + + if self.tc_protection_interval: + if "stp tc-protection interval" in self.stp_cfg: + tmp_value = re.findall(r'stp tc-protection interval (.*)', self.stp_cfg) + if not tmp_value: + self.module.fail_json( + msg='Error: Can not find tc-protection interval on the device.') + self.cur_cfg["tc_protection_interval"] = tmp_value[0] + self.existing["tc_protection_interval"] = tmp_value[0] + else: + self.cur_cfg["tc_protection_interval"] = "null" + self.existing["tc_protection_interval"] = "null" + + if self.tc_protection_threshold: + if "stp tc-protection threshold" in self.stp_cfg: + tmp_value = re.findall(r'stp tc-protection threshold (.*)', self.stp_cfg) + if not tmp_value: + self.module.fail_json( + msg='Error: Can not find tc-protection threshold on the device.') + self.cur_cfg["tc_protection_threshold"] = tmp_value[0] + self.existing["tc_protection_threshold"] = tmp_value[0] + else: + self.cur_cfg["tc_protection_threshold"] = "1" + self.existing["tc_protection_threshold"] = "1" + + if self.cost: + tmp_value = re.findall(r'stp instance (.*) cost (.*)', self.interface_stp_cfg) + if not tmp_value: + self.cur_cfg["cost"] = "null" + self.existing["cost"] = "null" + else: + self.cur_cfg["cost"] = tmp_value[0][1] + self.existing["cost"] = tmp_value[0][1] + + # root_protection and loop_protection should get configuration at the same time + if self.root_protection or self.loop_protection: + if "stp root-protection" in self.interface_stp_cfg: + self.cur_cfg["root_protection"] = "enable" + self.existing["root_protection"] = "enable" + else: + self.cur_cfg["root_protection"] = "disable" + self.existing["root_protection"] = "disable" + + if "stp loop-protection" in self.interface_stp_cfg: + self.cur_cfg["loop_protection"] = "enable" + self.existing["loop_protection"] = "enable" + else: + self.cur_cfg["loop_protection"] = "disable" + self.existing["loop_protection"] = "disable" + + def get_end_state(self): + """ Get end state """ + + self.cli_get_stp_config() + if self.interface and self.interface != "all": + self.cli_get_interface_stp_config() + + if self.stp_mode: + if "stp mode stp" in self.stp_cfg: + self.end_state["stp_mode"] = "stp" + elif "stp mode rstp" in self.stp_cfg: + self.end_state["stp_mode"] = "rstp" + else: + self.end_state["stp_mode"] = "mstp" + + if self.stp_enable: + if "stp disable" in self.stp_cfg: + self.end_state["stp_enable"] = "disable" + else: + self.end_state["stp_enable"] = "enable" + + if self.stp_converge: + if "stp converge fast" in self.stp_cfg: + self.end_state["stp_converge"] = "fast" + else: + self.end_state["stp_converge"] = "normal" + + if self.edged_port: + if self.interface == "all": + if "stp edged-port default" in self.stp_cfg: + self.end_state["edged_port"] = "enable" + else: + self.end_state["edged_port"] = "disable" + else: + if "stp edged-port enable" in self.interface_stp_cfg: + self.end_state["edged_port"] = "enable" + else: + self.end_state["edged_port"] = "disable" + + if self.bpdu_filter: + if self.interface == "all": + if "stp bpdu-filter default" in self.stp_cfg: + self.end_state["bpdu_filter"] = "enable" + else: + self.end_state["bpdu_filter"] = "disable" + else: + if "stp bpdu-filter enable" in self.interface_stp_cfg: + self.end_state["bpdu_filter"] = "enable" + else: + self.end_state["bpdu_filter"] = "disable" + + if self.bpdu_protection: + if "stp bpdu-protection" in self.stp_cfg: + self.end_state["bpdu_protection"] = "enable" + else: + self.end_state["bpdu_protection"] = "disable" + + if self.tc_protection: + pre_cfg = self.stp_cfg.split("\n") + if "stp tc-protection" in pre_cfg: + self.end_state["tc_protection"] = "enable" + else: + self.end_state["tc_protection"] = "disable" + + if self.tc_protection_interval: + if "stp tc-protection interval" in self.stp_cfg: + tmp_value = re.findall(r'stp tc-protection interval (.*)', self.stp_cfg) + if not tmp_value: + self.module.fail_json( + msg='Error: Can not find tc-protection interval on the device.') + self.end_state["tc_protection_interval"] = tmp_value[0] + else: + self.end_state["tc_protection_interval"] = "null" + + if self.tc_protection_threshold: + if "stp tc-protection threshold" in self.stp_cfg: + tmp_value = re.findall(r'stp tc-protection threshold (.*)', self.stp_cfg) + if not tmp_value: + self.module.fail_json( + msg='Error: Can not find tc-protection threshold on the device.') + self.end_state["tc_protection_threshold"] = tmp_value[0] + else: + self.end_state["tc_protection_threshold"] = "1" + + if self.cost: + tmp_value = re.findall(r'stp instance (.*) cost (.*)', self.interface_stp_cfg) + if not tmp_value: + self.end_state["cost"] = "null" + else: + self.end_state["cost"] = tmp_value[0][1] + + if self.root_protection or self.loop_protection: + if "stp root-protection" in self.interface_stp_cfg: + self.end_state["root_protection"] = "enable" + else: + self.end_state["root_protection"] = "disable" + + if "stp loop-protection" in self.interface_stp_cfg: + self.end_state["loop_protection"] = "enable" + else: + self.end_state["loop_protection"] = "disable" + + if self.existing == self.end_state: + self.changed = False + self.updates_cmd = list() + + def present_stp(self): + """ Present stp configuration """ + + cmds = list() + + # config stp global + if self.stp_mode: + if self.stp_mode != self.cur_cfg["stp_mode"]: + cmd = "stp mode %s" % self.stp_mode + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.stp_enable: + if self.stp_enable != self.cur_cfg["stp_enable"]: + cmd = "stp %s" % self.stp_enable + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.stp_converge: + if self.stp_converge != self.cur_cfg["stp_converge"]: + cmd = "stp converge %s" % self.stp_converge + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.edged_port: + if self.interface == "all": + if self.edged_port != self.cur_cfg["edged_port"]: + if self.edged_port == "enable": + cmd = "stp edged-port default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp edged-port default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.bpdu_filter: + if self.interface == "all": + if self.bpdu_filter != self.cur_cfg["bpdu_filter"]: + if self.bpdu_filter == "enable": + cmd = "stp bpdu-filter default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp bpdu-filter default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.bpdu_protection: + if self.bpdu_protection != self.cur_cfg["bpdu_protection"]: + if self.bpdu_protection == "enable": + cmd = "stp bpdu-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp bpdu-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.tc_protection: + if self.tc_protection != self.cur_cfg["tc_protection"]: + if self.tc_protection == "enable": + cmd = "stp tc-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp tc-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.tc_protection_interval: + if self.tc_protection_interval != self.cur_cfg["tc_protection_interval"]: + cmd = "stp tc-protection interval %s" % self.tc_protection_interval + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.tc_protection_threshold: + if self.tc_protection_threshold != self.cur_cfg["tc_protection_threshold"]: + cmd = "stp tc-protection threshold %s" % self.tc_protection_threshold + cmds.append(cmd) + self.updates_cmd.append(cmd) + + # config interface stp + if self.interface and self.interface != "all": + tmp_changed = False + + cmd = "interface %s" % self.interface + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.edged_port: + if self.edged_port != self.cur_cfg["edged_port"]: + if self.edged_port == "enable": + cmd = "stp edged-port enable" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp edged-port" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.bpdu_filter: + if self.bpdu_filter != self.cur_cfg["bpdu_filter"]: + if self.bpdu_filter == "enable": + cmd = "stp bpdu-filter enable" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp bpdu-filter" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.root_protection: + if self.root_protection == "enable" and self.cur_cfg["loop_protection"] == "enable": + self.module.fail_json( + msg='Error: The interface has enable loop_protection, can not enable root_protection.') + if self.root_protection != self.cur_cfg["root_protection"]: + if self.root_protection == "enable": + cmd = "stp root-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp root-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.loop_protection: + if self.loop_protection == "enable" and self.cur_cfg["root_protection"] == "enable": + self.module.fail_json( + msg='Error: The interface has enable root_protection, can not enable loop_protection.') + if self.loop_protection != self.cur_cfg["loop_protection"]: + if self.loop_protection == "enable": + cmd = "stp loop-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp loop-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.cost: + if self.cost != self.cur_cfg["cost"]: + cmd = "stp cost %s" % self.cost + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if not tmp_changed: + cmd = "interface %s" % self.interface + self.updates_cmd.remove(cmd) + cmds.remove(cmd) + + if cmds: + self.cli_load_config(cmds) + self.changed = True + + def absent_stp(self): + """ Absent stp configuration """ + + cmds = list() + + if self.stp_mode: + if self.stp_mode == self.cur_cfg["stp_mode"]: + if self.stp_mode != "mstp": + cmd = "undo stp mode" + cmds.append(cmd) + self.updates_cmd.append(cmd) + self.changed = True + + if self.stp_enable: + if self.stp_enable != self.cur_cfg["stp_enable"]: + cmd = "stp %s" % self.stp_enable + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.stp_converge: + if self.stp_converge == self.cur_cfg["stp_converge"]: + cmd = "undo stp converge" + cmds.append(cmd) + self.updates_cmd.append(cmd) + self.changed = True + + if self.edged_port: + if self.interface == "all": + if self.edged_port != self.cur_cfg["edged_port"]: + if self.edged_port == "enable": + cmd = "stp edged-port default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp edged-port default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.bpdu_filter: + if self.interface == "all": + if self.bpdu_filter != self.cur_cfg["bpdu_filter"]: + if self.bpdu_filter == "enable": + cmd = "stp bpdu-filter default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp bpdu-filter default" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.bpdu_protection: + if self.bpdu_protection != self.cur_cfg["bpdu_protection"]: + if self.bpdu_protection == "enable": + cmd = "stp bpdu-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp bpdu-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.tc_protection: + if self.tc_protection != self.cur_cfg["tc_protection"]: + if self.tc_protection == "enable": + cmd = "stp tc-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + else: + cmd = "undo stp tc-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.tc_protection_interval: + if self.tc_protection_interval == self.cur_cfg["tc_protection_interval"]: + cmd = "undo stp tc-protection interval" + cmds.append(cmd) + self.updates_cmd.append(cmd) + self.changed = True + + if self.tc_protection_threshold: + if self.tc_protection_threshold == self.cur_cfg["tc_protection_threshold"]: + if self.tc_protection_threshold != "1": + cmd = "undo stp tc-protection threshold" + cmds.append(cmd) + self.updates_cmd.append(cmd) + self.changed = True + + # undo interface stp + if self.interface and self.interface != "all": + tmp_changed = False + + cmd = "interface %s" % self.interface + cmds.append(cmd) + self.updates_cmd.append(cmd) + + if self.edged_port: + if self.edged_port != self.cur_cfg["edged_port"]: + if self.edged_port == "enable": + cmd = "stp edged-port enable" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp edged-port" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.bpdu_filter: + if self.bpdu_filter != self.cur_cfg["bpdu_filter"]: + if self.bpdu_filter == "enable": + cmd = "stp bpdu-filter enable" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp bpdu-filter" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.root_protection: + if self.root_protection == "enable" and self.cur_cfg["loop_protection"] == "enable": + self.module.fail_json( + msg='Error: The interface has enable loop_protection, can not enable root_protection.') + if self.root_protection != self.cur_cfg["root_protection"]: + if self.root_protection == "enable": + cmd = "stp root-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp root-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.loop_protection: + if self.loop_protection == "enable" and self.cur_cfg["root_protection"] == "enable": + self.module.fail_json( + msg='Error: The interface has enable root_protection, can not enable loop_protection.') + if self.loop_protection != self.cur_cfg["loop_protection"]: + if self.loop_protection == "enable": + cmd = "stp loop-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + else: + cmd = "undo stp loop-protection" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if self.cost: + if self.cost == self.cur_cfg["cost"]: + cmd = "undo stp cost" + cmds.append(cmd) + self.updates_cmd.append(cmd) + tmp_changed = True + + if not tmp_changed: + cmd = "interface %s" % self.interface + self.updates_cmd.remove(cmd) + cmds.remove(cmd) + + if cmds: + self.cli_load_config(cmds) + self.changed = True + + def work(self): + """ Work function """ + + self.check_params() + self.get_proposed() + self.get_existing() + + if self.state == "present": + self.present_stp() + else: + self.absent_stp() + + self.get_end_state() + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + self.results['updates'] = self.updates_cmd + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + stp_mode=dict(choices=['stp', 'rstp', 'mstp']), + stp_enable=dict(choices=['enable', 'disable']), + stp_converge=dict(choices=['fast', 'normal']), + bpdu_protection=dict(choices=['enable', 'disable']), + tc_protection=dict(choices=['enable', 'disable']), + tc_protection_interval=dict(type='str'), + tc_protection_threshold=dict(type='str'), + interface=dict(type='str'), + edged_port=dict(choices=['enable', 'disable']), + bpdu_filter=dict(choices=['enable', 'disable']), + cost=dict(type='str'), + root_protection=dict(choices=['enable', 'disable']), + loop_protection=dict(choices=['enable', 'disable']) + ) + + argument_spec.update(ce_argument_spec) + module = Stp(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_switchport.py b/plugins/modules/network/cloudengine/ce_switchport.py new file mode 100644 index 0000000000..aeb8c365b9 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_switchport.py @@ -0,0 +1,1001 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_switchport +short_description: Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches. +description: + - Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - When C(state=absent), VLANs can be added/removed from trunk links and + the existing access VLAN can be 'unconfigured' to just having VLAN 1 on that interface. + - When working with trunks VLANs the keywords add/remove are always sent + in the C(port trunk allow-pass vlan) command. Use verbose mode to see commands sent. + - When C(state=unconfigured), the interface will result with having a default Layer 2 interface, i.e. vlan 1 in access mode. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - Full name of the interface, i.e. 40GE1/0/22. + required: true + mode: + description: + - The link type of an interface. + choices: ['access','trunk', 'hybrid', 'dot1qtunnel'] + default_vlan: + description: + - If C(mode=access, or mode=dot1qtunnel), used as the access VLAN ID, in the range from 1 to 4094. + pvid_vlan: + description: + - If C(mode=trunk, or mode=hybrid), used as the trunk native VLAN ID, in the range from 1 to 4094. + trunk_vlans: + description: + - If C(mode=trunk), used as the VLAN range to ADD or REMOVE + from the trunk, such as 2-10 or 2,5,10-15, etc. + untagged_vlans: + description: + - If C(mode=hybrid), used as the VLAN range to ADD or REMOVE + from the trunk, such as 2-10 or 2,5,10-15, etc. + tagged_vlans: + description: + - If C(mode=hybrid), used as the VLAN range to ADD or REMOVE + from the trunk, such as 2-10 or 2,5,10-15, etc. + state: + description: + - Manage the state of the resource. + default: present + choices: ['present', 'absent', 'unconfigured'] +''' + +EXAMPLES = ''' +- name: switchport module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + - name: Ensure 10GE1/0/22 is in its default switchport state + ce_switchport: + interface: 10GE1/0/22 + state: unconfigured + provider: '{{ cli }}' + + - name: Ensure 10GE1/0/22 is configured for access vlan 20 + ce_switchport: + interface: 10GE1/0/22 + mode: access + default_vlan: 20 + provider: '{{ cli }}' + + - name: Ensure 10GE1/0/22 only has vlans 5-10 as trunk vlans + ce_switchport: + interface: 10GE1/0/22 + mode: trunk + pvid_vlan: 10 + trunk_vlans: 5-10 + provider: '{{ cli }}' + + - name: Ensure 10GE1/0/22 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged) + ce_switchport: + interface: 10GE1/0/22 + mode: trunk + pvid_vlan: 10 + trunk_vlans: 2-50 + provider: '{{ cli }}' + + - name: Ensure these VLANs are not being tagged on the trunk + ce_switchport: + interface: 10GE1/0/22 + mode: trunk + trunk_vlans: 51-4000 + state: absent + provider: '{{ cli }}' +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"default_vlan": "20", "interface": "10GE1/0/22", "mode": "access"} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: {"default_vlan": "10", "interface": "10GE1/0/22", + "mode": "access", "switchport": "enable"} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: {"default_vlan": "20", "interface": "10GE1/0/22", + "mode": "access", "switchport": "enable"} +updates: + description: command string sent to the device + returned: always + type: list + sample: ["10GE1/0/22", "port default vlan 20"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from xml.etree import ElementTree as ET +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_PORT_ATTR = """ + + + + + %s + + + + + + + + + + + +""" + +CE_NC_SET_PORT = """ + + + + %s + + %s + %s + %s + %s + + + + +""" + +CE_NC_SET_PORT_MODE = """ + + + + %s + + %s + + + + +""" + +CE_NC_SET_DEFAULT_PORT = """ + + + + + %s + + access + 1 + + + + + + + +""" + + +SWITCH_PORT_TYPE = ('ge', '10ge', '25ge', + '4x10ge', '40ge', '100ge', 'eth-trunk') + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +def is_portswitch_enalbed(iftype): + """"[undo] portswitch""" + + return bool(iftype in SWITCH_PORT_TYPE) + + +def vlan_bitmap_undo(bitmap): + """convert vlan bitmap to undo bitmap""" + + vlan_bit = ['F'] * 1024 + + if not bitmap or len(bitmap) == 0: + return ''.join(vlan_bit) + + bit_len = len(bitmap) + for num in range(bit_len): + undo = (~int(bitmap[num], 16)) & 0xF + vlan_bit[num] = hex(undo)[2] + + return ''.join(vlan_bit) + + +def is_vlan_bitmap_empty(bitmap): + """check vlan bitmap empty""" + + if not bitmap or len(bitmap) == 0: + return True + + bit_len = len(bitmap) + for num in range(bit_len): + if bitmap[num] != '0': + return False + + return True + + +class SwitchPort(object): + """ + Manages Layer 2 switchport interfaces. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # interface and vlan info + self.interface = self.module.params['interface'] + self.mode = self.module.params['mode'] + self.state = self.module.params['state'] + self.default_vlan = self.module.params['default_vlan'] + self.pvid_vlan = self.module.params['pvid_vlan'] + self.trunk_vlans = self.module.params['trunk_vlans'] + self.untagged_vlans = self.module.params['untagged_vlans'] + self.tagged_vlans = self.module.params['tagged_vlans'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + self.intf_info = dict() # interface vlan info + self.intf_type = None # loopback tunnel ... + + def init_module(self): + """ init module """ + + required_if = [('state', 'absent', ['mode']), ('state', 'present', ['mode'])] + mutually_exclusive = [['default_vlan', 'trunk_vlans'], + ['default_vlan', 'pvid_vlan'], + ['default_vlan', 'untagged_vlans'], + ['trunk_vlans', 'untagged_vlans'], + ['trunk_vlans', 'tagged_vlans'], + ['default_vlan', 'tagged_vlans']] + + self.module = AnsibleModule( + argument_spec=self.spec, required_if=required_if, supports_check_mode=True, mutually_exclusive=mutually_exclusive) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_interface_dict(self, ifname): + """ get one interface attributes dict.""" + + intf_info = dict() + conf_str = CE_NC_GET_PORT_ATTR % ifname + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return intf_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + tree = ET.fromstring(xml_str) + l2Enable = tree.find('ethernet/ethernetIfs/ethernetIf/l2Enable') + intf_info["l2Enable"] = l2Enable.text + port_type = tree.find('ethernet/ethernetIfs/ethernetIf/l2Attribute') + for pre in port_type: + intf_info[pre.tag] = pre.text + intf_info["ifName"] = ifname + if intf_info["trunkVlans"] is None: + intf_info["trunkVlans"] = "" + if intf_info["untagVlans"] is None: + intf_info["untagVlans"] = "" + return intf_info + + def is_l2switchport(self): + """Check layer2 switch port""" + + return bool(self.intf_info["l2Enable"] == "enable") + + def merge_access_vlan(self, ifname, default_vlan): + """Merge access interface vlan""" + + change = False + conf_str = "" + + self.updates_cmd.append("interface %s" % ifname) + if self.state == "present": + if self.intf_info["linkType"] == "access": + if default_vlan and self.intf_info["pvid"] != default_vlan: + self.updates_cmd.append( + "port default vlan %s" % default_vlan) + conf_str = CE_NC_SET_PORT % (ifname, "access", default_vlan, "", "") + change = True + else: # not access + self.updates_cmd.append("port link-type access") + if default_vlan: + self.updates_cmd.append( + "port default vlan %s" % default_vlan) + conf_str = CE_NC_SET_PORT % (ifname, "access", default_vlan, "", "") + else: + conf_str = CE_NC_SET_PORT % (ifname, "access", "1", "", "") + change = True + elif self.state == "absent": + if self.intf_info["linkType"] == "access": + if default_vlan and self.intf_info["pvid"] == default_vlan and default_vlan != "1": + self.updates_cmd.append( + "undo port default vlan %s" % default_vlan) + conf_str = CE_NC_SET_PORT % (ifname, "access", "1", "", "") + change = True + + if not change: + self.updates_cmd.pop() # remove interface + return + conf_str = "" + conf_str + "" + rcv_xml = set_nc_config(self.module, conf_str) + self.check_response(rcv_xml, "MERGE_ACCESS_PORT") + self.changed = True + + def merge_trunk_vlan(self, ifname, pvid_vlan, trunk_vlans): + """Merge trunk interface vlan""" + + change = False + xmlstr = "" + pvid = "" + trunk = "" + self.updates_cmd.append("interface %s" % ifname) + if trunk_vlans: + vlan_list = self.vlan_range_to_list(trunk_vlans) + vlan_map = self.vlan_list_to_bitmap(vlan_list) + if self.state == "present": + if self.intf_info["linkType"] == "trunk": + if pvid_vlan and self.intf_info["pvid"] != pvid_vlan: + self.updates_cmd.append( + "port trunk pvid vlan %s" % pvid_vlan) + pvid = pvid_vlan + change = True + + if trunk_vlans: + add_vlans = self.vlan_bitmap_add( + self.intf_info["trunkVlans"], vlan_map) + if not is_vlan_bitmap_empty(add_vlans): + self.updates_cmd.append( + "port trunk allow-pass %s" + % trunk_vlans.replace(',', ' ').replace('-', ' to ')) + trunk = "%s:%s" % (add_vlans, add_vlans) + change = True + if pvid or trunk: + xmlstr += CE_NC_SET_PORT % (ifname, "trunk", pvid, trunk, "") + if not pvid: + xmlstr = xmlstr.replace("", "") + if not trunk: + xmlstr = xmlstr.replace("", "") + + else: # not trunk + self.updates_cmd.append("port link-type trunk") + change = True + if pvid_vlan: + self.updates_cmd.append( + "port trunk pvid vlan %s" % pvid_vlan) + pvid = pvid_vlan + if trunk_vlans: + self.updates_cmd.append( + "port trunk allow-pass %s" + % trunk_vlans.replace(',', ' ').replace('-', ' to ')) + trunk = "%s:%s" % (vlan_map, vlan_map) + if pvid or trunk: + xmlstr += CE_NC_SET_PORT % (ifname, "trunk", pvid, trunk, "") + if not pvid: + xmlstr = xmlstr.replace("", "") + if not trunk: + xmlstr = xmlstr.replace("", "") + + if not pvid_vlan and not trunk_vlans: + xmlstr += CE_NC_SET_PORT_MODE % (ifname, "trunk") + self.updates_cmd.append( + "undo port trunk allow-pass vlan 1") + elif self.state == "absent": + if self.intf_info["linkType"] == "trunk": + if pvid_vlan and self.intf_info["pvid"] == pvid_vlan and pvid_vlan != '1': + self.updates_cmd.append( + "undo port trunk pvid vlan %s" % pvid_vlan) + pvid = "1" + change = True + if trunk_vlans: + del_vlans = self.vlan_bitmap_del( + self.intf_info["trunkVlans"], vlan_map) + if not is_vlan_bitmap_empty(del_vlans): + self.updates_cmd.append( + "undo port trunk allow-pass %s" + % trunk_vlans.replace(',', ' ').replace('-', ' to ')) + undo_map = vlan_bitmap_undo(del_vlans) + trunk = "%s:%s" % (undo_map, del_vlans) + change = True + if pvid or trunk: + xmlstr += CE_NC_SET_PORT % (ifname, "trunk", pvid, trunk, "") + if not pvid: + xmlstr = xmlstr.replace("", "") + if not trunk: + xmlstr = xmlstr.replace("", "") + + if not change: + self.updates_cmd.pop() + return + conf_str = "" + xmlstr + "" + rcv_xml = set_nc_config(self.module, conf_str) + self.check_response(rcv_xml, "MERGE_TRUNK_PORT") + self.changed = True + + def merge_hybrid_vlan(self, ifname, pvid_vlan, tagged_vlans, untagged_vlans): + """Merge hybrid interface vlan""" + + change = False + xmlstr = "" + pvid = "" + tagged = "" + untagged = "" + self.updates_cmd.append("interface %s" % ifname) + if tagged_vlans: + vlan_targed_list = self.vlan_range_to_list(tagged_vlans) + vlan_targed_map = self.vlan_list_to_bitmap(vlan_targed_list) + if untagged_vlans: + vlan_untarged_list = self.vlan_range_to_list(untagged_vlans) + vlan_untarged_map = self.vlan_list_to_bitmap(vlan_untarged_list) + if self.state == "present": + if self.intf_info["linkType"] == "hybrid": + if pvid_vlan and self.intf_info["pvid"] != pvid_vlan: + self.updates_cmd.append( + "port hybrid pvid vlan %s" % pvid_vlan) + pvid = pvid_vlan + change = True + if tagged_vlans: + add_vlans = self.vlan_bitmap_add( + self.intf_info["trunkVlans"], vlan_targed_map) + if not is_vlan_bitmap_empty(add_vlans): + self.updates_cmd.append( + "port hybrid tagged vlan %s" + % tagged_vlans.replace(',', ' ').replace('-', ' to ')) + tagged = "%s:%s" % (add_vlans, add_vlans) + change = True + if untagged_vlans: + add_vlans = self.vlan_bitmap_add( + self.intf_info["untagVlans"], vlan_untarged_map) + if not is_vlan_bitmap_empty(add_vlans): + self.updates_cmd.append( + "port hybrid untagged vlan %s" + % untagged_vlans.replace(',', ' ').replace('-', ' to ')) + untagged = "%s:%s" % (add_vlans, add_vlans) + change = True + if pvid or tagged or untagged: + xmlstr += CE_NC_SET_PORT % (ifname, "hybrid", pvid, tagged, untagged) + if not pvid: + xmlstr = xmlstr.replace("", "") + if not tagged: + xmlstr = xmlstr.replace("", "") + if not untagged: + xmlstr = xmlstr.replace("", "") + else: + self.updates_cmd.append("port link-type hybrid") + change = True + if pvid_vlan: + self.updates_cmd.append( + "port hybrid pvid vlan %s" % pvid_vlan) + pvid = pvid_vlan + if tagged_vlans: + self.updates_cmd.append( + "port hybrid tagged vlan %s" + % tagged_vlans.replace(',', ' ').replace('-', ' to ')) + tagged = "%s:%s" % (vlan_targed_map, vlan_targed_map) + if untagged_vlans: + self.updates_cmd.append( + "port hybrid untagged vlan %s" + % untagged_vlans.replace(',', ' ').replace('-', ' to ')) + untagged = "%s:%s" % (vlan_untarged_map, vlan_untarged_map) + if pvid or tagged or untagged: + xmlstr += CE_NC_SET_PORT % (ifname, "hybrid", pvid, tagged, untagged) + if not pvid: + xmlstr = xmlstr.replace("", "") + if not tagged: + xmlstr = xmlstr.replace("", "") + if not untagged: + xmlstr = xmlstr.replace("", "") + if not pvid_vlan and not tagged_vlans and not untagged_vlans: + xmlstr += CE_NC_SET_PORT_MODE % (ifname, "hybrid") + self.updates_cmd.append( + "undo port hybrid untagged vlan 1") + elif self.state == "absent": + if self.intf_info["linkType"] == "hybrid": + if pvid_vlan and self.intf_info["pvid"] == pvid_vlan and pvid_vlan != '1': + self.updates_cmd.append( + "undo port hybrid pvid vlan %s" % pvid_vlan) + pvid = "1" + change = True + if tagged_vlans: + del_vlans = self.vlan_bitmap_del( + self.intf_info["trunkVlans"], vlan_targed_map) + if not is_vlan_bitmap_empty(del_vlans): + self.updates_cmd.append( + "undo port hybrid tagged vlan %s" + % tagged_vlans.replace(',', ' ').replace('-', ' to ')) + undo_map = vlan_bitmap_undo(del_vlans) + tagged = "%s:%s" % (undo_map, del_vlans) + change = True + if untagged_vlans: + del_vlans = self.vlan_bitmap_del( + self.intf_info["untagVlans"], vlan_untarged_map) + if not is_vlan_bitmap_empty(del_vlans): + self.updates_cmd.append( + "undo port hybrid untagged vlan %s" + % untagged_vlans.replace(',', ' ').replace('-', ' to ')) + undo_map = vlan_bitmap_undo(del_vlans) + untagged = "%s:%s" % (undo_map, del_vlans) + change = True + if pvid or tagged or untagged: + xmlstr += CE_NC_SET_PORT % (ifname, "hybrid", pvid, tagged, untagged) + if not pvid: + xmlstr = xmlstr.replace("", "") + if not tagged: + xmlstr = xmlstr.replace("", "") + if not untagged: + xmlstr = xmlstr.replace("", "") + + if not change: + self.updates_cmd.pop() + return + + conf_str = "" + xmlstr + "" + rcv_xml = set_nc_config(self.module, conf_str) + self.check_response(rcv_xml, "MERGE_HYBRID_PORT") + self.changed = True + + def merge_dot1qtunnel_vlan(self, ifname, default_vlan): + """Merge dot1qtunnel""" + + change = False + conf_str = "" + + self.updates_cmd.append("interface %s" % ifname) + if self.state == "present": + if self.intf_info["linkType"] == "dot1qtunnel": + if default_vlan and self.intf_info["pvid"] != default_vlan: + self.updates_cmd.append( + "port default vlan %s" % default_vlan) + conf_str = CE_NC_SET_PORT % (ifname, "dot1qtunnel", default_vlan, "", "") + change = True + else: + self.updates_cmd.append("port link-type dot1qtunnel") + if default_vlan: + self.updates_cmd.append( + "port default vlan %s" % default_vlan) + conf_str = CE_NC_SET_PORT % (ifname, "dot1qtunnel", default_vlan, "", "") + else: + conf_str = CE_NC_SET_PORT % (ifname, "dot1qtunnel", "1", "", "") + change = True + elif self.state == "absent": + if self.intf_info["linkType"] == "dot1qtunnel": + if default_vlan and self.intf_info["pvid"] == default_vlan and default_vlan != "1": + self.updates_cmd.append( + "undo port default vlan %s" % default_vlan) + conf_str = CE_NC_SET_PORT % (ifname, "dot1qtunnel", "1", "", "") + change = True + if not change: + self.updates_cmd.pop() # remove interface + return + conf_str = "" + conf_str + "" + rcv_xml = set_nc_config(self.module, conf_str) + self.check_response(rcv_xml, "MERGE_DOT1QTUNNEL_PORT") + self.changed = True + + def default_switchport(self, ifname): + """Set interface default or unconfigured""" + + change = False + if self.intf_info["linkType"] != "access": + self.updates_cmd.append("interface %s" % ifname) + self.updates_cmd.append("port link-type access") + self.updates_cmd.append("port default vlan 1") + change = True + else: + if self.intf_info["pvid"] != "1": + self.updates_cmd.append("interface %s" % ifname) + self.updates_cmd.append("port default vlan 1") + change = True + + if not change: + return + + conf_str = CE_NC_SET_DEFAULT_PORT % ifname + rcv_xml = set_nc_config(self.module, conf_str) + self.check_response(rcv_xml, "DEFAULT_INTF_VLAN") + self.changed = True + + def vlan_series(self, vlanid_s): + """ convert vlan range to vlan list """ + + vlan_list = [] + peerlistlen = len(vlanid_s) + if peerlistlen != 2: + self.module.fail_json(msg='Error: Format of vlanid is invalid.') + for num in range(peerlistlen): + if not vlanid_s[num].isdigit(): + self.module.fail_json( + msg='Error: Format of vlanid is invalid.') + if int(vlanid_s[0]) > int(vlanid_s[1]): + self.module.fail_json(msg='Error: Format of vlanid is invalid.') + elif int(vlanid_s[0]) == int(vlanid_s[1]): + vlan_list.append(str(vlanid_s[0])) + return vlan_list + for num in range(int(vlanid_s[0]), int(vlanid_s[1])): + vlan_list.append(str(num)) + vlan_list.append(vlanid_s[1]) + + return vlan_list + + def vlan_region(self, vlanid_list): + """ convert vlan range to vlan list """ + + vlan_list = [] + peerlistlen = len(vlanid_list) + for num in range(peerlistlen): + if vlanid_list[num].isdigit(): + vlan_list.append(vlanid_list[num]) + else: + vlan_s = self.vlan_series(vlanid_list[num].split('-')) + vlan_list.extend(vlan_s) + + return vlan_list + + def vlan_range_to_list(self, vlan_range): + """ convert vlan range to vlan list """ + + vlan_list = self.vlan_region(vlan_range.split(',')) + + return vlan_list + + def vlan_list_to_bitmap(self, vlanlist): + """ convert vlan list to vlan bitmap """ + + vlan_bit = ['0'] * 1024 + bit_int = [0] * 1024 + + vlan_list_len = len(vlanlist) + for num in range(vlan_list_len): + tagged_vlans = int(vlanlist[num]) + if tagged_vlans <= 0 or tagged_vlans > 4094: + self.module.fail_json( + msg='Error: Vlan id is not in the range from 1 to 4094.') + j = tagged_vlans // 4 + bit_int[j] |= 0x8 >> (tagged_vlans % 4) + vlan_bit[j] = hex(bit_int[j])[2] + + vlan_xml = ''.join(vlan_bit) + + return vlan_xml + + def vlan_bitmap_add(self, oldmap, newmap): + """vlan add bitmap""" + + vlan_bit = ['0'] * 1024 + + if len(newmap) != 1024: + self.module.fail_json(msg='Error: New vlan bitmap is invalid.') + + if len(oldmap) != 1024 and len(oldmap) != 0: + self.module.fail_json(msg='Error: old vlan bitmap is invalid.') + + if len(oldmap) == 0: + return newmap + + for num in range(1024): + new_tmp = int(newmap[num], 16) + old_tmp = int(oldmap[num], 16) + add = (~(new_tmp & old_tmp)) & new_tmp + vlan_bit[num] = hex(add)[2] + + vlan_xml = ''.join(vlan_bit) + + return vlan_xml + + def vlan_bitmap_del(self, oldmap, delmap): + """vlan del bitmap""" + + vlan_bit = ['0'] * 1024 + + if not oldmap or len(oldmap) == 0: + return ''.join(vlan_bit) + + if len(oldmap) != 1024 or len(delmap) != 1024: + self.module.fail_json(msg='Error: vlan bitmap is invalid.') + + for num in range(1024): + tmp = int(delmap[num], 16) & int(oldmap[num], 16) + vlan_bit[num] = hex(tmp)[2] + + vlan_xml = ''.join(vlan_bit) + + return vlan_xml + + def check_params(self): + """Check all input params""" + + # interface type check + if self.interface: + self.intf_type = get_interface_type(self.interface) + if not self.intf_type: + self.module.fail_json( + msg='Error: Interface name of %s is error.' % self.interface) + + if not self.intf_type or not is_portswitch_enalbed(self.intf_type): + self.module.fail_json(msg='Error: Interface %s is error.') + + # check default_vlan + if self.default_vlan: + if not self.default_vlan.isdigit(): + self.module.fail_json(msg='Error: Access vlan id is invalid.') + if int(self.default_vlan) <= 0 or int(self.default_vlan) > 4094: + self.module.fail_json( + msg='Error: Access vlan id is not in the range from 1 to 4094.') + + # check pvid_vlan + if self.pvid_vlan: + if not self.pvid_vlan.isdigit(): + self.module.fail_json(msg='Error: Pvid vlan id is invalid.') + if int(self.pvid_vlan) <= 0 or int(self.pvid_vlan) > 4094: + self.module.fail_json( + msg='Error: Pvid vlan id is not in the range from 1 to 4094.') + + # get interface info + self.intf_info = self.get_interface_dict(self.interface) + if not self.intf_info: + self.module.fail_json(msg='Error: Interface does not exist.') + + if not self.is_l2switchport(): + self.module.fail_json( + msg='Error: Interface is not layer2 switch port.') + if self.state == "unconfigured": + if any([self.mode, self.default_vlan, self.pvid_vlan, self.trunk_vlans, self.untagged_vlans, self.tagged_vlans]): + self.module.fail_json( + msg='Error: When state is unconfigured, only interface name exists.') + else: + if self.mode == "access": + if any([self.pvid_vlan, self.trunk_vlans, self.untagged_vlans, self.tagged_vlans]): + self.module.fail_json( + msg='Error: When mode is access, only default_vlan can be supported.') + elif self.mode == "trunk": + if any([self.default_vlan, self.untagged_vlans, self.tagged_vlans]): + self.module.fail_json( + msg='Error: When mode is trunk, only pvid_vlan and trunk_vlans can exist.') + elif self.mode == "hybrid": + if any([self.default_vlan, self.trunk_vlans]): + self.module.fail_json( + msg='Error: When mode is hybrid, default_vlan and trunk_vlans cannot exist') + else: + if any([self.pvid_vlan, self.trunk_vlans, self.untagged_vlans, self.tagged_vlans]): + self.module.fail_json( + msg='Error: When mode is dot1qtunnel, only default_vlan can be supported.') + + def get_proposed(self): + """get proposed info""" + + self.proposed['state'] = self.state + self.proposed['interface'] = self.interface + self.proposed['mode'] = self.mode + if self.mode: + if self.mode == "access": + self.proposed['access_pvid'] = self.default_vlan + elif self.mode == "trunk": + self.proposed['pvid_vlan'] = self.pvid_vlan + self.proposed['trunk_vlans'] = self.trunk_vlans + elif self.mode == "hybrid": + self.proposed['pvid_vlan'] = self.pvid_vlan + self.proposed['untagged_vlans'] = self.untagged_vlans + self.proposed['tagged_vlans'] = self.tagged_vlans + else: + self.proposed['dot1qtunnel_pvid'] = self.default_vlan + + def get_existing(self): + """get existing info""" + + if self.intf_info: + self.existing["interface"] = self.intf_info["ifName"] + self.existing["switchport"] = self.intf_info["l2Enable"] + self.existing["mode"] = self.intf_info["linkType"] + if self.intf_info["linkType"] == "access": + self.existing['access_pvid'] = self.intf_info["pvid"] + elif self.intf_info["linkType"] == "trunk": + self.existing['trunk_pvid'] = self.intf_info["pvid"] + self.existing['trunk_vlans'] = self.intf_info["trunkVlans"] + elif self.intf_info["linkType"] == "hybrid": + self.existing['hybrid_pvid'] = self.intf_info["pvid"] + self.existing['hybrid_untagged_vlans'] = self.intf_info["untagVlans"] + self.existing['hybrid_tagged_vlans'] = self.intf_info["trunkVlans"] + else: + self.existing['dot1qtunnel_pvid'] = self.intf_info["pvid"] + + def get_end_state(self): + """get end state info""" + + end_info = self.get_interface_dict(self.interface) + if end_info: + self.end_state["interface"] = end_info["ifName"] + self.end_state["switchport"] = end_info["l2Enable"] + self.end_state["mode"] = end_info["linkType"] + if end_info["linkType"] == "access": + self.end_state['access_pvid'] = end_info["pvid"] + elif end_info["linkType"] == "trunk": + self.end_state['trunk_pvid'] = end_info["pvid"] + self.end_state['trunk_vlans'] = end_info["trunkVlans"] + elif end_info["linkType"] == "hybrid": + self.end_state['hybrid_pvid'] = end_info["pvid"] + self.end_state['hybrid_untagged_vlans'] = end_info["untagVlans"] + self.end_state['hybrid_tagged_vlans'] = end_info["trunkVlans"] + else: + self.end_state['dot1qtunnel_pvid'] = end_info["pvid"] + if self.end_state == self.existing: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + if not self.intf_info: + self.module.fail_json(msg='Error: interface does not exist.') + self.get_existing() + self.get_proposed() + + # present or absent + if self.state == "present" or self.state == "absent": + if self.mode == "access": + self.merge_access_vlan(self.interface, self.default_vlan) + elif self.mode == "trunk": + self.merge_trunk_vlan( + self.interface, self.pvid_vlan, self.trunk_vlans) + elif self.mode == "hybrid": + self.merge_hybrid_vlan(self.interface, self.pvid_vlan, self.tagged_vlans, self.untagged_vlans) + else: + self.merge_dot1qtunnel_vlan(self.interface, self.default_vlan) + + # unconfigured + else: + self.default_switchport(self.interface) + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + interface=dict(required=True, type='str'), + mode=dict(choices=['access', 'trunk', 'dot1qtunnel', 'hybrid'], required=False), + default_vlan=dict(type='str', required=False), + pvid_vlan=dict(type='str', required=False), + trunk_vlans=dict(type='str', required=False), + untagged_vlans=dict(type='str', required=False), + tagged_vlans=dict(type='str', required=False), + state=dict(choices=['absent', 'present', 'unconfigured'], + default='present') + ) + + argument_spec.update(ce_argument_spec) + switchport = SwitchPort(argument_spec) + switchport.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vlan.py b/plugins/modules/network/cloudengine/ce_vlan.py new file mode 100644 index 0000000000..05d109456a --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vlan.py @@ -0,0 +1,691 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vlan +short_description: Manages VLAN resources and attributes on Huawei CloudEngine switches. +description: + - Manages VLAN configurations on Huawei CloudEngine switches. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + vlan_id: + description: + - Single VLAN ID, in the range from 1 to 4094. + vlan_range: + description: + - Range of VLANs such as C(2-10) or C(2,5,10-15), etc. + name: + description: + - Name of VLAN, minimum of 1 character, maximum of 31 characters. + description: + description: + - Specify VLAN description, minimum of 1 character, maximum of 80 characters. + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: vlan module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Ensure a range of VLANs are not present on the switch + ce_vlan: + vlan_range: "2-10,20,50,55-60,100-150" + state: absent + provider: "{{ cli }}" + + - name: Ensure VLAN 50 exists with the name WEB + ce_vlan: + vlan_id: 50 + name: WEB + state: absent + provider: "{{ cli }}" + + - name: Ensure VLAN is NOT on the device + ce_vlan: + vlan_id: 50 + state: absent + provider: "{{ cli }}" + +''' + +RETURN = ''' +proposed_vlans_list: + description: list of VLANs being proposed + returned: always + type: list + sample: ["100"] +existing_vlans_list: + description: list of existing VLANs on the switch prior to making changes + returned: always + type: list + sample: ["1", "2", "3", "4", "5", "20"] +end_state_vlans_list: + description: list of VLANs after the module is executed + returned: always + type: list + sample: ["1", "2", "3", "4", "5", "20", "100"] +proposed: + description: k/v pairs of parameters passed into module (does not include + vlan_id or vlan_range) + returned: always + type: dict + sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "vlan for app" } +existing: + description: k/v pairs of existing vlan or null when using vlan_range + returned: always + type: dict + sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "" } +end_state: + description: k/v pairs of the VLAN after executing module or null + when using vlan_range + returned: always + type: dict + sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "vlan for app" } +updates: + description: command string sent to the device + returned: always + type: list + sample: ["vlan 20", "name VLAN20"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, execute_nc_action, ce_argument_spec + +CE_NC_CREATE_VLAN = """ + + + + + %s + %s + %s + + + + + + +""" + +CE_NC_DELETE_VLAN = """ + + + + + %s + + + + +""" + +CE_NC_MERGE_VLAN_DES = """ + + + + + %s + %s + + + + + + +""" + +CE_NC_MERGE_VLAN_NAME = """ + + + + + %s + %s + + + + + + +""" + + +CE_NC_MERGE_VLAN = """ + + + + + %s + %s + %s + + + + + + +""" + +CE_NC_GET_VLAN = """ + + + + + %s + + + + + + +""" + +CE_NC_GET_VLANS = """ + + + + + + + + + + +""" + +CE_NC_CREATE_VLAN_BATCH = """ + + + + %s:%s + + + +""" + +CE_NC_DELETE_VLAN_BATCH = """ + + + + %s:%s + + + +""" + + +class Vlan(object): + """ + Manages VLAN resources and attributes + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # vlan config info + self.vlan_id = self.module.params['vlan_id'] + self.vlan_range = self.module.params['vlan_range'] + self.name = self.module.params['name'] + self.description = self.module.params['description'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.vlan_exist = False + self.vlan_attr_exist = None + self.vlans_list_exist = list() + self.vlans_list_change = list() + self.updates_cmd = list() + self.results = dict() + self.vlan_attr_end = dict() + + def init_module(self): + """ + init ansible NetworkModule. + """ + + required_one_of = [["vlan_id", "vlan_range"]] + mutually_exclusive = [["vlan_id", "vlan_range"]] + + self.module = AnsibleModule( + argument_spec=self.spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def config_vlan(self, vlan_id, name='', description=''): + """Create vlan.""" + + if name is None: + name = '' + if description is None: + description = '' + + conf_str = CE_NC_CREATE_VLAN % (vlan_id, name, description) + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "CREATE_VLAN") + self.changed = True + + def merge_vlan(self, vlan_id, name, description): + """Merge vlan.""" + + conf_str = None + + if not name and description: + conf_str = CE_NC_MERGE_VLAN_DES % (vlan_id, description) + if not description and name: + conf_str = CE_NC_MERGE_VLAN_NAME % (vlan_id, name) + if description and name: + conf_str = CE_NC_MERGE_VLAN % (vlan_id, name, description) + + if not conf_str: + return + + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "MERGE_VLAN") + self.changed = True + + def create_vlan_batch(self, vlan_list): + """Create vlan batch.""" + + if not vlan_list: + return + + vlan_bitmap = self.vlan_list_to_bitmap(vlan_list) + xmlstr = CE_NC_CREATE_VLAN_BATCH % (vlan_bitmap, vlan_bitmap) + + recv_xml = execute_nc_action(self.module, xmlstr) + self.check_response(recv_xml, "CREATE_VLAN_BATCH") + self.updates_cmd.append('vlan batch %s' % ( + self.vlan_range.replace(',', ' ').replace('-', ' to '))) + self.changed = True + + def delete_vlan_batch(self, vlan_list): + """Delete vlan batch.""" + + if not vlan_list: + return + + vlan_bitmap = self.vlan_list_to_bitmap(vlan_list) + xmlstr = CE_NC_DELETE_VLAN_BATCH % (vlan_bitmap, vlan_bitmap) + + recv_xml = execute_nc_action(self.module, xmlstr) + self.check_response(recv_xml, "DELETE_VLAN_BATCH") + self.updates_cmd.append('undo vlan batch %s' % ( + self.vlan_range.replace(',', ' ').replace('-', ' to '))) + self.changed = True + + def undo_config_vlan(self, vlanid): + """Delete vlan.""" + + conf_str = CE_NC_DELETE_VLAN % vlanid + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "DELETE_VLAN") + self.changed = True + self.updates_cmd.append('undo vlan %s' % self.vlan_id) + + def get_vlan_attr(self, vlan_id): + """ get vlan attributes.""" + + conf_str = CE_NC_GET_VLAN % vlan_id + xml_str = get_nc_config(self.module, conf_str) + attr = dict() + + if "" in xml_str: + return attr + else: + re_find_id = re.findall(r'.*(.*).*\s*', xml_str) + re_find_name = re.findall(r'.*(.*).*\s*', xml_str) + re_find_desc = re.findall(r'.*(.*).*\s*', xml_str) + + if re_find_id: + if re_find_name: + attr = dict(vlan_id=re_find_id[0], name=re_find_name[0], + description=re_find_desc[0]) + else: + attr = dict(vlan_id=re_find_id[0], name=None, + description=re_find_desc[0]) + return attr + + def get_vlans_name(self): + """ get all vlan vid and its name list, + sample: [ ("20", "VLAN_NAME_20"), ("30", "VLAN_NAME_30") ]""" + + conf_str = CE_NC_GET_VLANS + xml_str = get_nc_config(self.module, conf_str) + vlan_list = list() + + if "" in xml_str: + return vlan_list + else: + vlan_list = re.findall( + r'.*(.*).*\s*(.*).*', xml_str) + return vlan_list + + def get_vlans_list(self): + """ get all vlan vid list, sample: [ "20", "30", "31" ]""" + + conf_str = CE_NC_GET_VLANS + xml_str = get_nc_config(self.module, conf_str) + vlan_list = list() + + if "" in xml_str: + return vlan_list + else: + vlan_list = re.findall( + r'.*(.*).*', xml_str) + return vlan_list + + def vlan_series(self, vlanid_s): + """ convert vlan range to list """ + + vlan_list = [] + peerlistlen = len(vlanid_s) + if peerlistlen != 2: + self.module.fail_json(msg='Error: Format of vlanid is invalid.') + for num in range(peerlistlen): + if not vlanid_s[num].isdigit(): + self.module.fail_json( + msg='Error: Format of vlanid is invalid.') + if int(vlanid_s[0]) > int(vlanid_s[1]): + self.module.fail_json(msg='Error: Format of vlanid is invalid.') + elif int(vlanid_s[0]) == int(vlanid_s[1]): + vlan_list.append(str(vlanid_s[0])) + return vlan_list + for num in range(int(vlanid_s[0]), int(vlanid_s[1])): + vlan_list.append(str(num)) + vlan_list.append(vlanid_s[1]) + + return vlan_list + + def vlan_region(self, vlanid_list): + """ convert vlan range to vlan list """ + + vlan_list = [] + peerlistlen = len(vlanid_list) + for num in range(peerlistlen): + if vlanid_list[num].isdigit(): + vlan_list.append(vlanid_list[num]) + else: + vlan_s = self.vlan_series(vlanid_list[num].split('-')) + vlan_list.extend(vlan_s) + + return vlan_list + + def vlan_range_to_list(self, vlan_range): + """ convert vlan range to vlan list """ + + vlan_list = self.vlan_region(vlan_range.split(',')) + + return vlan_list + + def vlan_list_to_bitmap(self, vlanlist): + """ convert vlan list to vlan bitmap """ + + vlan_bit = ['0'] * 1024 + bit_int = [0] * 1024 + + vlan_list_len = len(vlanlist) + for num in range(vlan_list_len): + tagged_vlans = int(vlanlist[num]) + if tagged_vlans <= 0 or tagged_vlans > 4094: + self.module.fail_json( + msg='Error: Vlan id is not in the range from 1 to 4094.') + j = tagged_vlans // 4 + bit_int[j] |= 0x8 >> (tagged_vlans % 4) + vlan_bit[j] = hex(bit_int[j])[2] + + vlan_xml = ''.join(vlan_bit) + + return vlan_xml + + def check_params(self): + """Check all input params""" + + if not self.vlan_id and self.description: + self.module.fail_json( + msg='Error: Vlan description could be set only at one vlan.') + + if not self.vlan_id and self.name: + self.module.fail_json( + msg='Error: Vlan name could be set only at one vlan.') + + # check vlan id + if self.vlan_id: + if not self.vlan_id.isdigit(): + self.module.fail_json( + msg='Error: Vlan id is not digit.') + if int(self.vlan_id) <= 0 or int(self.vlan_id) > 4094: + self.module.fail_json( + msg='Error: Vlan id is not in the range from 1 to 4094.') + + # check vlan description + if self.description: + if len(self.description) > 81 or len(self.description.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: vlan description is not in the range from 1 to 80.') + + # check vlan name + if self.name: + if len(self.name) > 31 or len(self.name.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: Vlan name is not in the range from 1 to 31.') + + def get_proposed(self): + """ + get proposed config. + """ + + if self.vlans_list_change: + if self.state == 'present': + proposed_vlans_tmp = list(self.vlans_list_change) + proposed_vlans_tmp.extend(self.vlans_list_exist) + self.results['proposed_vlans_list'] = list( + set(proposed_vlans_tmp)) + else: + self.results['proposed_vlans_list'] = list( + set(self.vlans_list_exist) - set(self.vlans_list_change)) + self.results['proposed_vlans_list'].sort() + else: + self.results['proposed_vlans_list'] = self.vlans_list_exist + + if self.vlan_id: + if self.state == "present": + self.results['proposed'] = dict( + vlan_id=self.vlan_id, + name=self.name, + description=self.description + ) + else: + self.results['proposed'] = None + else: + self.results['proposed'] = None + + def get_existing(self): + """ + get existing config. + """ + + self.results['existing_vlans_list'] = self.vlans_list_exist + + if self.vlan_id: + if self.vlan_attr_exist: + self.results['existing'] = dict( + vlan_id=self.vlan_attr_exist['vlan_id'], + name=self.vlan_attr_exist['name'], + description=self.vlan_attr_exist['description'] + ) + else: + self.results['existing'] = None + else: + self.results['existing'] = None + + def get_end_state(self): + """ + get end state config. + """ + + self.results['end_state_vlans_list'] = self.get_vlans_list() + + if self.vlan_id: + if self.vlan_attr_end: + self.results['end_state'] = dict( + vlan_id=self.vlan_attr_end['vlan_id'], + name=self.vlan_attr_end['name'], + description=self.vlan_attr_end['description'] + ) + else: + self.results['end_state'] = None + + else: + self.results['end_state'] = None + + def work(self): + """ + worker. + """ + + # check param + self.check_params() + + # get all vlan info + self.vlans_list_exist = self.get_vlans_list() + + # get vlan attributes + if self.vlan_id: + self.vlans_list_change.append(self.vlan_id) + self.vlan_attr_exist = self.get_vlan_attr(self.vlan_id) + if self.vlan_attr_exist: + self.vlan_exist = True + + if self.vlan_range: + new_vlans_tmp = self.vlan_range_to_list(self.vlan_range) + if self.state == 'present': + self.vlans_list_change = list( + set(new_vlans_tmp) - set(self.vlans_list_exist)) + else: + self.vlans_list_change = [ + val for val in new_vlans_tmp if val in self.vlans_list_exist] + + if self.state == 'present': + if self.vlan_id: + if not self.vlan_exist: + # create a new vlan + self.config_vlan(self.vlan_id, self.name, self.description) + elif self.description and self.description != self.vlan_attr_exist['description']: + # merge vlan description + self.merge_vlan(self.vlan_id, self.name, self.description) + elif self.name and self.name != self.vlan_attr_exist['name']: + # merge vlan name + self.merge_vlan(self.vlan_id, self.name, self.description) + + # update command for results + if self.changed: + self.updates_cmd.append('vlan %s' % self.vlan_id) + if self.name: + self.updates_cmd.append('name %s' % self.name) + if self.description: + self.updates_cmd.append( + 'description %s' % self.description) + elif self.vlan_range and self.vlans_list_change: + self.create_vlan_batch(self.vlans_list_change) + else: # absent + if self.vlan_id: + if self.vlan_exist: + # delete the vlan + self.undo_config_vlan(self.vlan_id) + elif self.vlan_range and self.vlans_list_change: + self.delete_vlan_batch(self.vlans_list_change) + + # result + if self.vlan_id: + self.vlan_attr_end = self.get_vlan_attr(self.vlan_id) + + self.get_existing() + self.get_proposed() + self.get_end_state() + + self.results['changed'] = self.changed + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """ module main """ + + argument_spec = dict( + vlan_id=dict(required=False), + vlan_range=dict(required=False, type='str'), + name=dict(required=False, type='str'), + description=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], + default='present', required=False), + ) + + argument_spec.update(ce_argument_spec) + vlancfg = Vlan(argument_spec) + vlancfg.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vrf.py b/plugins/modules/network/cloudengine/ce_vrf.py new file mode 100644 index 0000000000..7586e03292 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vrf.py @@ -0,0 +1,356 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vrf +short_description: Manages VPN instance on HUAWEI CloudEngine switches. +description: + - Manages VPN instance of HUAWEI CloudEngine switches. +author: Yang yang (@QijunPan) +notes: + - If I(state=absent), the route will be removed, regardless of the non-required options. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + vrf: + description: + - VPN instance, the length of vrf name is 1 - 31, i.e. "test", but can not be C(_public_). + required: true + description: + description: + - Description of the vrf, the string length is 1 - 242 . + state: + description: + - Manage the state of the resource. + choices: ['present','absent'] + default: present +''' + +EXAMPLES = ''' +- name: vrf module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Config a vpn install named vpna, description is test + ce_vrf: + vrf: vpna + description: test + state: present + provider: "{{ cli }}" + - name: Delete a vpn install named vpna + ce_vrf: + vrf: vpna + state: absent + provider: "{{ cli }}" +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"vrf": "vpna", + "description": "test", + "state": "present"} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: {"vrf": "vpna", + "description": "test", + "present": "present"} +updates: + description: command list sent to the device + returned: always + type: list + sample: ["ip vpn-instance vpna", + "description test"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_VRF = """ + + + + + + + + + + + + +""" + +CE_NC_CREATE_VRF = """ + + + + + %s + %s + + + + +""" + +CE_NC_DELETE_VRF = """ + + + + + %s + %s + + + + +""" + + +def build_config_xml(xmlstr): + """build_config_xml""" + + return ' ' + xmlstr + ' ' + + +class Vrf(object): + """Manage vpn instance""" + + def __init__(self, argument_spec, ): + self.spec = argument_spec + self.module = None + self.init_module() + + # vpn instance info + self.vrf = self.module.params['vrf'] + self.description = self.module.params['description'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init_module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def set_update_cmd(self): + """ set update command""" + if not self.changed: + return + if self.state == "present": + self.updates_cmd.append('ip vpn-instance %s' % (self.vrf)) + if self.description: + self.updates_cmd.append('description %s' % (self.description)) + else: + self.updates_cmd.append('undo ip vpn-instance %s' % (self.vrf)) + + def get_vrf(self): + """ check if vrf is need to change""" + + getxmlstr = CE_NC_GET_VRF + xml_str = get_nc_config(self.module, getxmlstr) + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + vpn_instances = root.findall( + "l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance") + if vpn_instances: + for vpn_instance in vpn_instances: + if vpn_instance.find('vrfName').text == self.vrf: + if vpn_instance.find('vrfDescription').text == self.description: + if self.state == "present": + return False + else: + return True + else: + return True + return self.state == "present" + else: + return self.state == "present" + + def check_params(self): + """Check all input params""" + + # vrf and description check + if self.vrf == '_public_': + self.module.fail_json( + msg='Error: The vrf name _public_ is reserved.') + if len(self.vrf) < 1 or len(self.vrf) > 31: + self.module.fail_json( + msg='Error: The vrf name length must between 1 and 242.') + if self.description: + if len(self.description) < 1 or len(self.description) > 242: + self.module.fail_json( + msg='Error: The vrf description length must between 1 and 242.') + + def operate_vrf(self): + """config/delete vrf""" + if not self.changed: + return + if self.state == "present": + if self.description is None: + configxmlstr = CE_NC_CREATE_VRF % (self.vrf, '') + else: + configxmlstr = CE_NC_CREATE_VRF % (self.vrf, self.description) + else: + configxmlstr = CE_NC_DELETE_VRF % (self.vrf, self.description) + + conf_str = build_config_xml(configxmlstr) + + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "OPERATE_VRF") + + def get_proposed(self): + """get_proposed""" + + if self.state == 'present': + self.proposed['vrf'] = self.vrf + if self.description: + self.proposed['description'] = self.description + + else: + self.proposed = dict() + self.proposed['state'] = self.state + + def get_existing(self): + """get_existing""" + + change = self.get_vrf() + if change: + if self.state == 'present': + self.existing = dict() + else: + self.existing['vrf'] = self.vrf + if self.description: + self.existing['description'] = self.description + self.changed = True + else: + if self.state == 'absent': + self.existing = dict() + else: + self.existing['vrf'] = self.vrf + if self.description: + self.existing['description'] = self.description + self.changed = False + + def get_end_state(self): + """get_end_state""" + + change = self.get_vrf() + if not change: + if self.state == 'present': + self.end_state['vrf'] = self.vrf + if self.description: + self.end_state['description'] = self.description + else: + self.end_state = dict() + else: + if self.state == 'present': + self.end_state = dict() + else: + self.end_state['vrf'] = self.vrf + if self.description: + self.end_state['description'] = self.description + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + self.operate_vrf() + self.set_update_cmd() + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """main""" + + argument_spec = dict( + vrf=dict(required=True, type='str'), + description=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], + default='present', required=False), + ) + argument_spec.update(ce_argument_spec) + interface = Vrf(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vrf_af.py b/plugins/modules/network/cloudengine/ce_vrf_af.py new file mode 100644 index 0000000000..562ffd800a --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vrf_af.py @@ -0,0 +1,851 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} +DOCUMENTATION = ''' +--- +module: ce_vrf_af +short_description: Manages VPN instance address family on HUAWEI CloudEngine switches. +description: + - Manages VPN instance address family of HUAWEI CloudEngine switches. +author: Yang yang (@QijunPan) +notes: + - If I(state=absent), the vrf will be removed, regardless of the non-required parameters. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + vrf: + description: + - VPN instance. + required: true + vrf_aftype: + description: + - VPN instance address family. + choices: ['v4','v6'] + default: v4 + route_distinguisher: + description: + - VPN instance route distinguisher,the RD used to distinguish same route prefix from different vpn. + The RD must be setted before setting vpn_target_value. + vpn_target_state: + description: + - Manage the state of the vpn target. + choices: ['present','absent'] + vpn_target_type: + description: + - VPN instance vpn target type. + choices: ['export_extcommunity', 'import_extcommunity'] + vpn_target_value: + description: + - VPN instance target value. Such as X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295> + or number<0-65535>.number<0-65535>:number<0-65535> or number<65536-4294967295>:number<0-65535> + but not support 0:0 and 0.0:0. + evpn: + description: + - Is extend vpn or normal vpn. + type: bool + default: 'no' + state: + description: + - Manage the state of the af. + choices: ['present','absent'] + default: present +''' + +EXAMPLES = ''' +- name: vrf af module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Config vpna, set address family is ipv4 + ce_vrf_af: + vrf: vpna + vrf_aftype: v4 + state: present + provider: "{{ cli }}" + - name: Config vpna, delete address family is ipv4 + ce_vrf_af: + vrf: vpna + vrf_aftype: v4 + state: absent + provider: "{{ cli }}" + - name: Config vpna, set address family is ipv4,rd=1:1,set vpn_target_type=export_extcommunity,vpn_target_value=2:2 + ce_vrf_af: + vrf: vpna + vrf_aftype: v4 + route_distinguisher: 1:1 + vpn_target_type: export_extcommunity + vpn_target_value: 2:2 + vpn_target_state: present + state: present + provider: "{{ cli }}" + - name: Config vpna, set address family is ipv4,rd=1:1,delete vpn_target_type=export_extcommunity,vpn_target_value=2:2 + ce_vrf_af: + vrf: vpna + vrf_aftype: v4 + route_distinguisher: 1:1 + vpn_target_type: export_extcommunity + vpn_target_value: 2:2 + vpn_target_state: absent + state: present + provider: "{{ cli }}" +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"vrf": "vpna", + "vrf_aftype": "v4", + "state": "present", + "vpn_targe_state":"absent", + "evpn": "none", + "vpn_target_type": "none", + "vpn_target_value": "none"} +existing: + description: k/v pairs of existing switchport + returned: always + type: dict + sample: { + "route_distinguisher": [ + "1:1", + "2:2" + ], + "vpn_target_type": [], + "vpn_target_value": [], + "vrf": "vpna", + "vrf_aftype": [ + "ipv4uni", + "ipv6uni" + ] + } +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict + sample: { + "route_distinguisher": [ + "1:1", + "2:2" + ], + "vpn_target_type": [ + "import_extcommunity", + "3:3" + ], + "vpn_target_value": [], + "vrf": "vpna", + "vrf_aftype": [ + "ipv4uni", + "ipv6uni" + ] + } +updates: + description: command list sent to the device + returned: always + type: list + sample: [ + "ip vpn-instance vpna", + "vpn-target 3:3 import_extcommunity" + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + + +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_VRF = """ + + + + + + + + + + + + +""" + +CE_NC_GET_VRF_AF = """ + + + + + + %s + + + + %s + + + + + + + +""" + +CE_NC_DELETE_VRF_AF = """ + + + + + %s + + + %s + + + + + + +""" + +CE_NC_CREATE_VRF_AF = """ + + + + + %s + + + %s + %s%s + + + + + +""" +CE_NC_CREATE_VRF_TARGET = """ + + + %s + %s + + +""" + +CE_NC_DELETE_VRF_TARGET = """ + + + %s + %s + + +""" + +CE_NC_GET_VRF_TARGET = """ + + + + + + +""" + +CE_NC_CREATE_EXTEND_VRF_TARGET = """ + + + %s + %s + evpn + + +""" + +CE_NC_DELETE_EXTEND_VRF_TARGET = """ + + + %s + %s + evpn + + +""" + +CE_NC_GET_EXTEND_VRF_TARGET = """ + + + + + + + +""" + + +def build_config_xml(xmlstr): + """build_config_xml""" + + return ' ' + xmlstr + ' ' + + +def is_valid_value(vrf_targe_value): + """check if the vrf target value is valid""" + + each_num = None + if len(vrf_targe_value) > 21 or len(vrf_targe_value) < 3: + return False + if vrf_targe_value.find(':') == -1: + return False + elif vrf_targe_value == '0:0': + return False + elif vrf_targe_value == '0.0:0': + return False + else: + value_list = vrf_targe_value.split(':') + if value_list[0].find('.') != -1: + if not value_list[1].isdigit(): + return False + if int(value_list[1]) > 65535: + return False + value = value_list[0].split('.') + if len(value) == 4: + for each_num in value: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + elif len(value) == 2: + for each_num in value: + if not each_num.isdigit(): + return False + if int(each_num) > 65535: + return False + return True + else: + return False + elif not value_list[0].isdigit(): + return False + elif not value_list[1].isdigit(): + return False + elif int(value_list[0]) < 65536 and int(value_list[1]) < 4294967296: + return True + elif int(value_list[0]) > 65535 and int(value_list[0]) < 4294967296: + return bool(int(value_list[1]) < 65536) + else: + return False + + +class VrfAf(object): + """manage the vrf address family and export/import target""" + + def __init__(self, argument_spec, ): + self.spec = argument_spec + self.module = None + self.init_module() + + # vpn instance info + self.vrf = self.module.params['vrf'] + self.vrf_aftype = self.module.params['vrf_aftype'] + if self.vrf_aftype == 'v4': + self.vrf_aftype = 'ipv4uni' + else: + self.vrf_aftype = 'ipv6uni' + self.route_distinguisher = self.module.params['route_distinguisher'] + self.evpn = self.module.params['evpn'] + self.vpn_target_type = self.module.params['vpn_target_type'] + self.vpn_target_value = self.module.params['vpn_target_value'] + self.vpn_target_state = self.module.params['vpn_target_state'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + self.vpn_target_changed = False + self.vrf_af_type_changed = False + self.vrf_rd_changed = False + self.vrf_af_info = dict() + + def init_module(self): + """init_module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def is_vrf_af_exist(self): + """is vrf address family exist""" + + if not self.vrf_af_info: + return False + + for vrf_af_ele in self.vrf_af_info["vpnInstAF"]: + if vrf_af_ele["afType"] == self.vrf_aftype: + return True + else: + continue + return False + + def get_exist_rd(self): + """get exist route distinguisher """ + + if not self.vrf_af_info: + return None + + for vrf_af_ele in self.vrf_af_info["vpnInstAF"]: + if vrf_af_ele["afType"] == self.vrf_aftype: + if vrf_af_ele["vrfRD"] is None: + return None + else: + return vrf_af_ele["vrfRD"] + else: + continue + return None + + def is_vrf_rd_exist(self): + """is vrf route distinguisher exist""" + + if not self.vrf_af_info: + return False + + for vrf_af_ele in self.vrf_af_info["vpnInstAF"]: + if vrf_af_ele["afType"] == self.vrf_aftype: + if vrf_af_ele["vrfRD"] is None: + return False + if self.route_distinguisher is not None: + return bool(vrf_af_ele["vrfRD"] == self.route_distinguisher) + else: + return True + else: + continue + return False + + def is_vrf_rt_exist(self): + """is vpn target exist""" + + if not self.vrf_af_info: + return False + + for vrf_af_ele in self.vrf_af_info["vpnInstAF"]: + if vrf_af_ele["afType"] == self.vrf_aftype: + if self.evpn is False: + if not vrf_af_ele.get("vpnTargets"): + return False + for vpn_target in vrf_af_ele.get("vpnTargets"): + if vpn_target["vrfRTType"] == self.vpn_target_type \ + and vpn_target["vrfRTValue"] == self.vpn_target_value: + return True + else: + continue + else: + if not vrf_af_ele.get("evpnTargets"): + return False + for evpn_target in vrf_af_ele.get("evpnTargets"): + if evpn_target["vrfRTType"] == self.vpn_target_type \ + and evpn_target["vrfRTValue"] == self.vpn_target_value: + return True + else: + continue + else: + continue + return False + + def set_update_cmd(self): + """ set update command""" + if not self.changed: + return + if self.vpn_target_type: + if self.vpn_target_type == "export_extcommunity": + vpn_target_type = "export-extcommunity" + else: + vpn_target_type = "import-extcommunity" + if self.state == "present": + self.updates_cmd.append('ip vpn-instance %s' % (self.vrf)) + if self.vrf_aftype == 'ipv4uni': + self.updates_cmd.append('ipv4-family') + elif self.vrf_aftype == 'ipv6uni': + self.updates_cmd.append('ipv6-family') + if self.route_distinguisher: + if not self.is_vrf_rd_exist(): + self.updates_cmd.append( + 'route-distinguisher %s' % self.route_distinguisher) + else: + if self.get_exist_rd() is not None: + self.updates_cmd.append( + 'undo route-distinguisher %s' % self.get_exist_rd()) + if self.vpn_target_state == "present": + if not self.is_vrf_rt_exist(): + if self.evpn is False: + self.updates_cmd.append( + 'vpn-target %s %s' % (self.vpn_target_value, vpn_target_type)) + else: + self.updates_cmd.append( + 'vpn-target %s %s evpn' % (self.vpn_target_value, vpn_target_type)) + elif self.vpn_target_state == "absent": + if self.is_vrf_rt_exist(): + if self.evpn is False: + self.updates_cmd.append( + 'undo vpn-target %s %s' % (self.vpn_target_value, vpn_target_type)) + else: + self.updates_cmd.append( + 'undo vpn-target %s %s evpn' % (self.vpn_target_value, vpn_target_type)) + else: + self.updates_cmd.append('ip vpn-instance %s' % (self.vrf)) + if self.vrf_aftype == 'ipv4uni': + self.updates_cmd.append('undo ipv4-family') + elif self.vrf_aftype == 'ipv6uni': + self.updates_cmd.append('undo ipv6-family') + + def get_vrf(self): + """ check if vrf is need to change""" + + getxmlstr = CE_NC_GET_VRF + xmlstr_new_1 = (self.vrf.lower()) + + xml_str = get_nc_config(self.module, getxmlstr) + re_find_1 = re.findall( + r'.*(.*).*', xml_str.lower()) + + if re_find_1 is None: + return False + + return xmlstr_new_1 in re_find_1 + + def get_vrf_af(self): + """ check if vrf is need to change""" + + self.vrf_af_info["vpnInstAF"] = list() + if self.evpn is True: + getxmlstr = CE_NC_GET_VRF_AF % ( + self.vrf, CE_NC_GET_EXTEND_VRF_TARGET) + else: + getxmlstr = CE_NC_GET_VRF_AF % (self.vrf, CE_NC_GET_VRF_TARGET) + + xml_str = get_nc_config(self.module, getxmlstr) + + if 'data/' in xml_str: + return self.state == 'present' + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + + # get the vpn address family and RD text + vrf_addr_types = root.findall( + "l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance/vpnInstAFs/vpnInstAF") + if vrf_addr_types: + for vrf_addr_type in vrf_addr_types: + vrf_af_info = dict() + for vrf_addr_type_ele in vrf_addr_type: + if vrf_addr_type_ele.tag in ["vrfName", "afType", "vrfRD"]: + vrf_af_info[vrf_addr_type_ele.tag] = vrf_addr_type_ele.text + if vrf_addr_type_ele.tag == 'vpnTargets': + vrf_af_info["vpnTargets"] = list() + for rtargets in vrf_addr_type_ele: + rt_dict = dict() + for rtarget in rtargets: + if rtarget.tag in ["vrfRTValue", "vrfRTType"]: + rt_dict[rtarget.tag] = rtarget.text + vrf_af_info["vpnTargets"].append(rt_dict) + if vrf_addr_type_ele.tag == 'exVpnTargets': + vrf_af_info["evpnTargets"] = list() + for rtargets in vrf_addr_type_ele: + rt_dict = dict() + for rtarget in rtargets: + if rtarget.tag in ["vrfRTValue", "vrfRTType"]: + rt_dict[rtarget.tag] = rtarget.text + vrf_af_info["evpnTargets"].append(rt_dict) + self.vrf_af_info["vpnInstAF"].append(vrf_af_info) + + def check_params(self): + """Check all input params""" + + # vrf and description check + if self.vrf == '_public_': + self.module.fail_json( + msg='Error: The vrf name _public_ is reserved.') + if not self.get_vrf(): + self.module.fail_json( + msg='Error: The vrf name do not exist.') + if self.state == 'present': + if self.route_distinguisher: + if not is_valid_value(self.route_distinguisher): + self.module.fail_json(msg='Error:The vrf route distinguisher length must between 3 ~ 21,' + 'i.e. X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>' + 'or number<0-65535>.number<0-65535>:number<0-65535>' + 'or number<65536-4294967295>:number<0-65535>' + ' but not be 0:0 or 0.0:0.') + if not self.vpn_target_state: + if self.vpn_target_value or self.vpn_target_type: + self.module.fail_json( + msg='Error: The vpn target state should be exist.') + if self.vpn_target_state: + if not self.vpn_target_value or not self.vpn_target_type: + self.module.fail_json( + msg='Error: The vpn target value and type should be exist.') + if self.vpn_target_value: + if not is_valid_value(self.vpn_target_value): + self.module.fail_json(msg='Error:The vrf target value length must between 3 ~ 21,' + 'i.e. X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>' + 'or number<0-65535>.number<0-65535>:number<0-65535>' + 'or number<65536-4294967295>:number<0-65535>' + ' but not be 0:0 or 0.0:0.') + + def operate_vrf_af(self): + """config/delete vrf""" + + vrf_target_operate = '' + if self.route_distinguisher is None: + route_d = '' + else: + route_d = self.route_distinguisher + + if self.state == 'present': + if self.vrf_aftype: + if self.is_vrf_af_exist(): + self.vrf_af_type_changed = False + else: + self.vrf_af_type_changed = True + configxmlstr = CE_NC_CREATE_VRF_AF % ( + self.vrf, self.vrf_aftype, route_d, vrf_target_operate) + else: + self.vrf_af_type_changed = bool(self.is_vrf_af_exist()) + + if self.vpn_target_state == 'present': + if self.evpn is False and not self.is_vrf_rt_exist(): + vrf_target_operate = CE_NC_CREATE_VRF_TARGET % ( + self.vpn_target_type, self.vpn_target_value) + configxmlstr = CE_NC_CREATE_VRF_AF % ( + self.vrf, self.vrf_aftype, route_d, vrf_target_operate) + self.vpn_target_changed = True + if self.evpn is True and not self.is_vrf_rt_exist(): + vrf_target_operate = CE_NC_CREATE_EXTEND_VRF_TARGET % ( + self.vpn_target_type, self.vpn_target_value) + configxmlstr = CE_NC_CREATE_VRF_AF % ( + self.vrf, self.vrf_aftype, route_d, vrf_target_operate) + self.vpn_target_changed = True + elif self.vpn_target_state == 'absent': + if self.evpn is False and self.is_vrf_rt_exist(): + vrf_target_operate = CE_NC_DELETE_VRF_TARGET % ( + self.vpn_target_type, self.vpn_target_value) + configxmlstr = CE_NC_CREATE_VRF_AF % ( + self.vrf, self.vrf_aftype, route_d, vrf_target_operate) + self.vpn_target_changed = True + if self.evpn is True and self.is_vrf_rt_exist(): + vrf_target_operate = CE_NC_DELETE_EXTEND_VRF_TARGET % ( + self.vpn_target_type, self.vpn_target_value) + configxmlstr = CE_NC_CREATE_VRF_AF % ( + self.vrf, self.vrf_aftype, route_d, vrf_target_operate) + self.vpn_target_changed = True + else: + if self.route_distinguisher: + if not self.is_vrf_rd_exist(): + configxmlstr = CE_NC_CREATE_VRF_AF % ( + self.vrf, self.vrf_aftype, route_d, vrf_target_operate) + self.vrf_rd_changed = True + else: + self.vrf_rd_changed = False + else: + if self.is_vrf_rd_exist(): + configxmlstr = CE_NC_CREATE_VRF_AF % ( + self.vrf, self.vrf_aftype, route_d, vrf_target_operate) + self.vrf_rd_changed = True + else: + self.vrf_rd_changed = False + if not self.vrf_rd_changed and not self.vrf_af_type_changed and not self.vpn_target_changed: + self.changed = False + else: + self.changed = True + else: + if self.is_vrf_af_exist(): + configxmlstr = CE_NC_DELETE_VRF_AF % ( + self.vrf, self.vrf_aftype) + self.changed = True + else: + self.changed = False + + if not self.changed: + return + + conf_str = build_config_xml(configxmlstr) + + recv_xml = set_nc_config(self.module, conf_str) + self.check_response(recv_xml, "OPERATE_VRF_AF") + + def get_proposed(self): + """get_proposed""" + + if self.state == 'present': + self.proposed['vrf'] = self.vrf + if self.vrf_aftype is None: + self.proposed['vrf_aftype'] = 'ipv4uni' + else: + self.proposed['vrf_aftype'] = self.vrf_aftype + if self.route_distinguisher is not None: + self.proposed['route_distinguisher'] = self.route_distinguisher + else: + self.proposed['route_distinguisher'] = list() + if self.vpn_target_state == 'present': + self.proposed['evpn'] = self.evpn + self.proposed['vpn_target_type'] = self.vpn_target_type + self.proposed['vpn_target_value'] = self.vpn_target_value + else: + self.proposed['vpn_target_type'] = list() + self.proposed['vpn_target_value'] = list() + else: + self.proposed = dict() + self.proposed['state'] = self.state + self.proposed['vrf'] = self.vrf + self.proposed['vrf_aftype'] = list() + self.proposed['route_distinguisher'] = list() + self.proposed['vpn_target_value'] = list() + self.proposed['vpn_target_type'] = list() + + def get_existing(self): + """get_existing""" + + self.get_vrf_af() + self.existing['vrf'] = self.vrf + self.existing['vrf_aftype'] = list() + self.existing['route_distinguisher'] = list() + self.existing['vpn_target_value'] = list() + self.existing['vpn_target_type'] = list() + self.existing['evpn_target_value'] = list() + self.existing['evpn_target_type'] = list() + if self.vrf_af_info["vpnInstAF"] is None: + return + for vrf_af_ele in self.vrf_af_info["vpnInstAF"]: + self.existing['vrf_aftype'].append(vrf_af_ele["afType"]) + self.existing['route_distinguisher'].append( + vrf_af_ele["vrfRD"]) + if vrf_af_ele.get("vpnTargets"): + for vpn_target in vrf_af_ele.get("vpnTargets"): + self.existing['vpn_target_type'].append( + vpn_target["vrfRTType"]) + self.existing['vpn_target_value'].append( + vpn_target["vrfRTValue"]) + if vrf_af_ele.get("evpnTargets"): + for evpn_target in vrf_af_ele.get("evpnTargets"): + self.existing['evpn_target_type'].append( + evpn_target["vrfRTType"]) + self.existing['evpn_target_value'].append( + evpn_target["vrfRTValue"]) + + def get_end_state(self): + """get_end_state""" + + self.get_vrf_af() + self.end_state['vrf'] = self.vrf + self.end_state['vrf_aftype'] = list() + self.end_state['route_distinguisher'] = list() + self.end_state['vpn_target_value'] = list() + self.end_state['vpn_target_type'] = list() + self.end_state['evpn_target_value'] = list() + self.end_state['evpn_target_type'] = list() + if self.vrf_af_info["vpnInstAF"] is None: + return + for vrf_af_ele in self.vrf_af_info["vpnInstAF"]: + self.end_state['vrf_aftype'].append(vrf_af_ele["afType"]) + self.end_state['route_distinguisher'].append(vrf_af_ele["vrfRD"]) + if vrf_af_ele.get("vpnTargets"): + for vpn_target in vrf_af_ele.get("vpnTargets"): + self.end_state['vpn_target_type'].append( + vpn_target["vrfRTType"]) + self.end_state['vpn_target_value'].append( + vpn_target["vrfRTValue"]) + if vrf_af_ele.get("evpnTargets"): + for evpn_target in vrf_af_ele.get("evpnTargets"): + self.end_state['evpn_target_type'].append( + evpn_target["vrfRTType"]) + self.end_state['evpn_target_value'].append( + evpn_target["vrfRTValue"]) + + def work(self): + """worker""" + + self.check_params() + self.get_existing() + self.get_proposed() + self.operate_vrf_af() + self.set_update_cmd() + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """main""" + + argument_spec = dict( + vrf=dict(required=True, type='str'), + vrf_aftype=dict(choices=['v4', 'v6'], + default='v4', required=False), + route_distinguisher=dict(required=False, type='str'), + evpn=dict(type='bool', default=False), + vpn_target_type=dict( + choices=['export_extcommunity', 'import_extcommunity'], required=False), + vpn_target_value=dict(required=False, type='str'), + vpn_target_state=dict(choices=['absent', 'present'], required=False), + state=dict(choices=['absent', 'present'], + default='present', required=False), + ) + argument_spec.update(ce_argument_spec) + interface = VrfAf(argument_spec) + interface.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vrf_interface.py b/plugins/modules/network/cloudengine/ce_vrf_interface.py new file mode 100644 index 0000000000..0eff09eee5 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vrf_interface.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vrf_interface +short_description: Manages interface specific VPN configuration on HUAWEI CloudEngine switches. +description: + - Manages interface specific VPN configuration of HUAWEI CloudEngine switches. +author: Zhijin Zhou (@QijunPan) +notes: + - Ensure that a VPN instance has been created and the IPv4 address family has been enabled for the VPN instance. + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + vrf: + description: + - VPN instance, the length of vrf name is 1 ~ 31, i.e. "test", but can not be C(_public_). + required: true + vpn_interface: + description: + - An interface that can binding VPN instance, i.e. 40GE1/0/22, Vlanif10. + Must be fully qualified interface name. + Interface types, such as 10GE, 40GE, 100GE, LoopBack, MEth, Tunnel, Vlanif.... + required: true + state: + description: + - Manage the state of the resource. + required: false + choices: ['present','absent'] + default: present +''' + +EXAMPLES = ''' +- name: VRF interface test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: "Configure a VPN instance for the interface" + ce_vrf_interface: + vpn_interface: 40GE1/0/2 + vrf: test + state: present + provider: "{{ cli }}" + + - name: "Disable the association between a VPN instance and an interface" + ce_vrf_interface: + vpn_interface: 40GE1/0/2 + vrf: test + state: absent + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: { + "state": "present", + "vpn_interface": "40GE2/0/17", + "vrf": "jss" + } +existing: + description: k/v pairs of existing attributes on the interface + returned: verbose mode + type: dict + sample: { + "vpn_interface": "40GE2/0/17", + "vrf": null + } +end_state: + description: k/v pairs of end attributes on the interface + returned: verbose mode + type: dict + sample: { + "vpn_interface": "40GE2/0/17", + "vrf": "jss" + } +updates: + description: command list sent to the device + returned: always + type: list + sample: [ + "ip binding vpn-instance jss", + ] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, set_nc_config + +CE_NC_GET_VRF = """ + + + + + + %s + + + + + +""" + +CE_NC_GET_VRF_INTERFACE = """ + + + + + + + + + + + + + + + + +""" + +CE_NC_MERGE_VRF_INTERFACE = """ + + + + + + %s + + + %s + + + + + + + +""" + +CE_NC_GET_INTF = """ + + + + + %s + + + + + +""" + +CE_NC_DEL_INTF_VPN = """ + + + + + + %s + + + %s + + + + + + + +""" + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-Port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class VrfInterface(object): + """Manage vpn instance""" + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # vpn instance info + self.vrf = self.module.params['vrf'] + self.vpn_interface = self.module.params['vpn_interface'] + self.vpn_interface = self.vpn_interface.upper().replace(' ', '') + self.state = self.module.params['state'] + self.intf_info = dict() + self.intf_info['isL2SwitchPort'] = None + self.intf_info['vrfName'] = None + self.conf_exist = False + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init_module""" + + required_one_of = [("vrf", "vpn_interface")] + self.module = AnsibleModule( + argument_spec=self.spec, required_one_of=required_one_of, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_update_cmd(self): + """ get updated command""" + + if self.conf_exist: + return + + if self.state == 'absent': + self.updates_cmd.append( + "undo ip binding vpn-instance %s" % self.vrf) + return + + if self.vrf != self.intf_info['vrfName']: + self.updates_cmd.append("ip binding vpn-instance %s" % self.vrf) + + return + + def check_params(self): + """Check all input params""" + + if not self.is_vrf_exist(): + self.module.fail_json( + msg='Error: The VPN instance is not existed.') + + if self.state == 'absent': + if self.vrf != self.intf_info['vrfName']: + self.module.fail_json( + msg='Error: The VPN instance is not bound to the interface.') + + if self.intf_info['isL2SwitchPort'] == 'true': + self.module.fail_json( + msg='Error: L2Switch Port can not binding a VPN instance.') + + # interface type check + if self.vpn_interface: + intf_type = get_interface_type(self.vpn_interface) + if not intf_type: + self.module.fail_json( + msg='Error: interface name of %s' + ' is error.' % self.vpn_interface) + + # vrf check + if self.vrf == '_public_': + self.module.fail_json( + msg='Error: The vrf name _public_ is reserved.') + if len(self.vrf) < 1 or len(self.vrf) > 31: + self.module.fail_json( + msg='Error: The vrf name length must be between 1 and 31.') + + def get_interface_vpn_name(self, vpninfo, vpn_name): + """ get vpn instance name""" + + l3vpn_if = vpninfo.findall("l3vpnIf") + for l3vpn_ifinfo in l3vpn_if: + for ele in l3vpn_ifinfo: + if ele.tag in ['ifName']: + if ele.text.lower() == self.vpn_interface.lower(): + self.intf_info['vrfName'] = vpn_name + + def get_interface_vpn(self): + """ get the VPN instance associated with the interface""" + + xml_str = CE_NC_GET_VRF_INTERFACE + con_obj = get_nc_config(self.module, xml_str) + if "" in con_obj: + return + + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get global vrf interface info + root = ElementTree.fromstring(xml_str) + vpns = root.findall( + "l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance") + if vpns: + for vpnele in vpns: + vpn_name = None + for vpninfo in vpnele: + if vpninfo.tag == 'vrfName': + vpn_name = vpninfo.text + if vpninfo.tag == 'l3vpnIfs': + self.get_interface_vpn_name(vpninfo, vpn_name) + + return + + def is_vrf_exist(self): + """ judge whether the VPN instance is existed""" + + conf_str = CE_NC_GET_VRF % self.vrf + con_obj = get_nc_config(self.module, conf_str) + if "" in con_obj: + return False + + return True + + def get_intf_conf_info(self): + """ get related configuration of the interface""" + + conf_str = CE_NC_GET_INTF % self.vpn_interface + con_obj = get_nc_config(self.module, conf_str) + if "" in con_obj: + return + + # get interface base info + xml_str = con_obj.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + interface = root.find("ifm/interfaces/interface") + if interface: + for eles in interface: + if eles.tag in ["isL2SwitchPort"]: + self.intf_info[eles.tag] = eles.text + + self.get_interface_vpn() + return + + def get_existing(self): + """get existing config""" + + self.existing = dict(vrf=self.intf_info['vrfName'], + vpn_interface=self.vpn_interface) + + def get_proposed(self): + """get_proposed""" + + self.proposed = dict(vrf=self.vrf, + vpn_interface=self.vpn_interface, + state=self.state) + + def get_end_state(self): + """get_end_state""" + + self.intf_info['vrfName'] = None + self.get_intf_conf_info() + + self.end_state = dict(vrf=self.intf_info['vrfName'], + vpn_interface=self.vpn_interface) + + def show_result(self): + """ show result""" + + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + def judge_if_config_exist(self): + """ judge whether configuration has existed""" + + if self.state == 'absent': + return False + + delta = set(self.proposed.items()).difference( + self.existing.items()) + delta = dict(delta) + if len(delta) == 1 and delta['state']: + return True + + return False + + def config_interface_vrf(self): + """ configure VPN instance of the interface""" + + if not self.conf_exist and self.state == 'present': + + xml_str = CE_NC_MERGE_VRF_INTERFACE % ( + self.vrf, self.vpn_interface) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "VRF_INTERFACE_CONFIG") + self.changed = True + elif self.state == 'absent': + xml_str = CE_NC_DEL_INTF_VPN % (self.vrf, self.vpn_interface) + ret_xml = set_nc_config(self.module, xml_str) + self.check_response(ret_xml, "DEL_VRF_INTERFACE_CONFIG") + self.changed = True + + def work(self): + """execute task""" + + self.get_intf_conf_info() + self.check_params() + self.get_existing() + self.get_proposed() + self.conf_exist = self.judge_if_config_exist() + + self.config_interface_vrf() + + self.get_update_cmd() + self.get_end_state() + self.show_result() + + +def main(): + """main""" + + argument_spec = dict( + vrf=dict(required=True, type='str'), + vpn_interface=dict(required=True, type='str'), + state=dict(choices=['absent', 'present'], + default='present', required=False), + ) + argument_spec.update(ce_argument_spec) + vrf_intf = VrfInterface(argument_spec) + vrf_intf.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vrrp.py b/plugins/modules/network/cloudengine/ce_vrrp.py new file mode 100644 index 0000000000..6e3e5e1c26 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vrrp.py @@ -0,0 +1,1331 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vrrp +short_description: Manages VRRP interfaces on HUAWEI CloudEngine devices. +description: + - Manages VRRP interface attributes on HUAWEI CloudEngine devices. +author: + - Li Yanfeng (@numone213) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + interface: + description: + - Name of an interface. The value is a string of 1 to 63 characters. + vrid: + description: + - VRRP backup group ID. + The value is an integer ranging from 1 to 255. + default: present + virtual_ip : + description: + - Virtual IP address. The value is a string of 0 to 255 characters. + vrrp_type: + description: + - Type of a VRRP backup group. + type: str + choices: ['normal', 'member', 'admin'] + admin_ignore_if_down: + description: + - mVRRP ignores an interface Down event. + type: bool + default: 'false' + admin_vrid: + description: + - Tracked mVRRP ID. The value is an integer ranging from 1 to 255. + admin_interface: + description: + - Tracked mVRRP interface name. The value is a string of 1 to 63 characters. + admin_flowdown: + description: + - Disable the flowdown function for service VRRP. + type: bool + default: 'false' + priority: + description: + - Configured VRRP priority. + The value ranges from 1 to 254. The default value is 100. A larger value indicates a higher priority. + version: + description: + - VRRP version. The default version is v2. + type: str + choices: ['v2','v3'] + advertise_interval: + description: + - Configured interval between sending advertisements, in milliseconds. + Only the master router sends VRRP advertisements. The default value is 1000 milliseconds. + preempt_timer_delay: + description: + - Preemption delay. + The value is an integer ranging from 0 to 3600. The default value is 0. + gratuitous_arp_interval: + description: + - Interval at which gratuitous ARP packets are sent, in seconds. + The value ranges from 30 to 1200.The default value is 300. + recover_delay: + description: + - Delay in recovering after an interface goes Up. + The delay is used for interface flapping suppression. + The value is an integer ranging from 0 to 3600. + The default value is 0 seconds. + holding_multiplier: + description: + - The configured holdMultiplier.The value is an integer ranging from 3 to 10. The default value is 3. + auth_mode: + description: + - Authentication type used for VRRP packet exchanges between virtual routers. + The values are noAuthentication, simpleTextPassword, md5Authentication. + The default value is noAuthentication. + type: str + choices: ['simple','md5','none'] + is_plain: + description: + - Select the display mode of an authentication key. + By default, an authentication key is displayed in ciphertext. + type: bool + default: 'false' + auth_key: + description: + - This object is set based on the authentication type. + When noAuthentication is specified, the value is empty. + When simpleTextPassword or md5Authentication is specified, the value is a string of 1 to 8 characters + in plaintext and displayed as a blank text for security. + fast_resume: + description: + - mVRRP's fast resume mode. + type: str + choices: ['enable','disable'] + state: + description: + - Specify desired state of the resource. + type: str + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: vrrp module test + hosts: cloudengine + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + tasks: + - name: Set vrrp version + ce_vrrp: + version: v3 + provider: "{{ cli }}" + - name: Set vrrp gratuitous-arp interval + ce_vrrp: + gratuitous_arp_interval: 40 + mlag_id: 4 + provider: "{{ cli }}" + - name: Set vrrp recover-delay + ce_vrrp: + recover_delay: 10 + provider: "{{ cli }}" + - name: Set vrrp vrid virtual-ip + ce_vrrp: + interface: 40GE2/0/8 + vrid: 1 + virtual_ip: 10.14.2.7 + provider: "{{ cli }}" + - name: Set vrrp vrid admin + ce_vrrp: + interface: 40GE2/0/8 + vrid: 1 + vrrp_type: admin + provider: "{{ cli }}" + - name: Set vrrp vrid fast_resume + ce_vrrp: + interface: 40GE2/0/8 + vrid: 1 + fast_resume: enable + provider: "{{ cli }}" + - name: Set vrrp vrid holding-multiplier + ce_vrrp: + interface: 40GE2/0/8 + vrid: 1 + holding_multiplier: 4 + provider: "{{ cli }}" + - name: Set vrrp vrid preempt timer delay + ce_vrrp: + interface: 40GE2/0/8 + vrid: 1 + preempt_timer_delay: 10 + provider: "{{ cli }}" + - name: Set vrrp vrid admin-vrrp + ce_vrrp: + interface: 40GE2/0/8 + vrid: 1 + admin_interface: 40GE2/0/9 + admin_vrid: 2 + vrrp_type: member + provider: "{{ cli }}" + - name: Set vrrp vrid authentication-mode + ce_vrrp: + interface: 40GE2/0/8 + vrid: 1 + is_plain: true + auth_mode: simple + auth_key: aaa + provider: "{{ cli }}" +''' + +RETURN = ''' +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: { + "auth_key": "aaa", + "auth_mode": "simple", + "interface": "40GE2/0/8", + "is_plain": true, + "state": "present", + "vrid": "1" + } +existing: + description: k/v pairs of existing aaa server + returned: always + type: dict + sample: { + "auth_mode": "none", + "interface": "40GE2/0/8", + "is_plain": "false", + "vrid": "1", + "vrrp_type": "normal" + } +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: { + "auth_mode": "simple", + "interface": "40GE2/0/8", + "is_plain": "true", + "vrid": "1", + "vrrp_type": "normal" + } +updates: + description: command sent to the device + returned: always + type: list + sample: { "interface 40GE2/0/8", + "vrrp vrid 1 authentication-mode simple plain aaa"} +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + + +CE_NC_GET_VRRP_GROUP_INFO = """ + + + + + %s + %s + + + + +""" + +CE_NC_SET_VRRP_GROUP_INFO_HEAD = """ + + + + + %s + %s +""" +CE_NC_SET_VRRP_GROUP_INFO_TAIL = """ + + + + +""" +CE_NC_GET_VRRP_GLOBAL_INFO = """ + + + + + + + + + + +""" + +CE_NC_SET_VRRP_GLOBAL_HEAD = """ + + + +""" +CE_NC_SET_VRRP_GLOBAL_TAIL = """ + + + +""" + +CE_NC_GET_VRRP_VIRTUAL_IP_INFO = """ + + + + + %s + %s + + + + + + + + + +""" +CE_NC_CREATE_VRRP_VIRTUAL_IP_INFO = """ + + + + + %s + %s + + + %s + + + + + + +""" +CE_NC_DELETE_VRRP_VIRTUAL_IP_INFO = """ + + + + + %s + %s + + + %s + + + + + + +""" + + +def is_valid_address(address): + """check ip-address is valid""" + + if address.find('.') != -1: + addr_list = address.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('NULL'): + iftype = 'null' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + else: + return None + + return iftype.lower() + + +class Vrrp(object): + """ + Manages Manages vrrp information. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.interface = self.module.params['interface'] + self.vrid = self.module.params['vrid'] + self.virtual_ip = self.module.params['virtual_ip'] + self.vrrp_type = self.module.params['vrrp_type'] + self.admin_ignore_if_down = 'false' if self.module.params['admin_ignore_if_down'] is False else 'true' + self.admin_vrid = self.module.params['admin_vrid'] + self.admin_interface = self.module.params['admin_interface'] + self.admin_flowdown = 'false' if self.module.params['admin_flowdown'] is False else 'true' + self.priority = self.module.params['priority'] + self.version = self.module.params['version'] + self.advertise_interval = self.module.params['advertise_interval'] + self.preempt_timer_delay = self.module.params['preempt_timer_delay'] + self.gratuitous_arp_interval = self.module.params[ + 'gratuitous_arp_interval'] + self.recover_delay = self.module.params['recover_delay'] + self.holding_multiplier = self.module.params['holding_multiplier'] + self.auth_mode = self.module.params['auth_mode'] + self.is_plain = 'false' if self.module.params['is_plain'] is False else 'true' + self.auth_key = self.module.params['auth_key'] + self.fast_resume = self.module.params['fast_resume'] + self.state = self.module.params['state'] + + # vrrp info + self.vrrp_global_info = None + self.virtual_ip_info = None + self.vrrp_group_info = None + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + def init_module(self): + """ init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def get_virtual_ip_info(self): + """ get vrrp virtual ip info.""" + virtual_ip_info = dict() + conf_str = CE_NC_GET_VRRP_VIRTUAL_IP_INFO % (self.vrid, self.interface) + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return virtual_ip_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + virtual_ip_info["vrrpVirtualIpInfos"] = list() + root = ElementTree.fromstring(xml_str) + vrrp_virtual_ip_infos = root.findall( + "vrrp/vrrpGroups/vrrpGroup/virtualIps/virtualIp") + if vrrp_virtual_ip_infos: + for vrrp_virtual_ip_info in vrrp_virtual_ip_infos: + virtual_ip_dict = dict() + for ele in vrrp_virtual_ip_info: + if ele.tag in ["virtualIpAddress"]: + virtual_ip_dict[ele.tag] = ele.text + virtual_ip_info["vrrpVirtualIpInfos"].append( + virtual_ip_dict) + return virtual_ip_info + + def get_vrrp_global_info(self): + """ get vrrp global info.""" + + vrrp_global_info = dict() + conf_str = CE_NC_GET_VRRP_GLOBAL_INFO + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return vrrp_global_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + global_info = root.findall( + "vrrp/vrrpGlobalCfg") + + if global_info: + for tmp in global_info: + for site in tmp: + if site.tag in ["gratuitousArpTimeOut", "gratuitousArpFlag", "recoverDelay", "version"]: + vrrp_global_info[site.tag] = site.text + return vrrp_global_info + + def get_vrrp_group_info(self): + """ get vrrp group info.""" + + vrrp_group_info = dict() + conf_str = CE_NC_GET_VRRP_GROUP_INFO % (self.interface, self.vrid) + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return vrrp_group_info + else: + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + root = ElementTree.fromstring(xml_str) + global_info = root.findall( + "vrrp/vrrpGroups/vrrpGroup") + + if global_info: + for tmp in global_info: + for site in tmp: + if site.tag in ["ifName", "vrrpId", "priority", "advertiseInterval", "preemptMode", "delayTime", + "authenticationMode", "authenticationKey", "vrrpType", "adminVrrpId", + "adminIfName", "adminIgnoreIfDown", "isPlain", "unflowdown", "fastResume", + "holdMultiplier"]: + vrrp_group_info[site.tag] = site.text + return vrrp_group_info + + def check_params(self): + """Check all input params""" + + # interface check + if self.interface: + intf_type = get_interface_type(self.interface) + if not intf_type: + self.module.fail_json( + msg='Error: Interface name of %s ' + 'is error.' % self.interface) + + # vrid check + if self.vrid: + if not self.vrid.isdigit(): + self.module.fail_json( + msg='Error: The value of vrid is an integer.') + if int(self.vrid) < 1 or int(self.vrid) > 255: + self.module.fail_json( + msg='Error: The value of vrid ranges from 1 to 255.') + + # virtual_ip check + if self.virtual_ip: + if not is_valid_address(self.virtual_ip): + self.module.fail_json( + msg='Error: The %s is not a valid ip address.' % self.virtual_ip) + + # admin_vrid check + if self.admin_vrid: + if not self.admin_vrid.isdigit(): + self.module.fail_json( + msg='Error: The value of admin_vrid is an integer.') + if int(self.admin_vrid) < 1 or int(self.admin_vrid) > 255: + self.module.fail_json( + msg='Error: The value of admin_vrid ranges from 1 to 255.') + + # admin_interface check + if self.admin_interface: + intf_type = get_interface_type(self.admin_interface) + if not intf_type: + self.module.fail_json( + msg='Error: Admin interface name of %s ' + 'is error.' % self.admin_interface) + + # priority check + if self.priority: + if not self.priority.isdigit(): + self.module.fail_json( + msg='Error: The value of priority is an integer.') + if int(self.priority) < 1 or int(self.priority) > 254: + self.module.fail_json( + msg='Error: The value of priority ranges from 1 to 254. The default value is 100.') + + # advertise_interval check + if self.advertise_interval: + if not self.advertise_interval.isdigit(): + self.module.fail_json( + msg='Error: The value of advertise_interval is an integer.') + if int(self.advertise_interval) < 1000 or int(self.advertise_interval) > 255000: + self.module.fail_json( + msg='Error: The value of advertise_interval ranges from 1000 to 255000 milliseconds. The default value is 1000 milliseconds.') + if int(self.advertise_interval) % 1000 != 0: + self.module.fail_json( + msg='Error: The advertisement interval value of VRRP must be a multiple of 1000 milliseconds.') + # preempt_timer_delay check + if self.preempt_timer_delay: + if not self.preempt_timer_delay.isdigit(): + self.module.fail_json( + msg='Error: The value of preempt_timer_delay is an integer.') + if int(self.preempt_timer_delay) < 1 or int(self.preempt_timer_delay) > 3600: + self.module.fail_json( + msg='Error: The value of preempt_timer_delay ranges from 1 to 3600. The default value is 0.') + + # holding_multiplier check + if self.holding_multiplier: + if not self.holding_multiplier.isdigit(): + self.module.fail_json( + msg='Error: The value of holding_multiplier is an integer.') + if int(self.holding_multiplier) < 3 or int(self.holding_multiplier) > 10: + self.module.fail_json( + msg='Error: The value of holding_multiplier ranges from 3 to 10. The default value is 3.') + + # auth_key check + if self.auth_key: + if len(self.auth_key) > 16 \ + or len(self.auth_key.replace(' ', '')) < 1: + self.module.fail_json( + msg='Error: The length of auth_key is not in the range from 1 to 16.') + + def is_virtual_ip_change(self): + """whether virtual ip change""" + + if not self.virtual_ip_info: + return True + + for info in self.virtual_ip_info["vrrpVirtualIpInfos"]: + if info["virtualIpAddress"] == self.virtual_ip: + return False + return True + + def is_virtual_ip_exist(self): + """whether virtual ip info exist""" + + if not self.virtual_ip_info: + return False + + for info in self.virtual_ip_info["vrrpVirtualIpInfos"]: + if info["virtualIpAddress"] == self.virtual_ip: + return True + return False + + def is_vrrp_global_info_change(self): + """whether vrrp global attribute info change""" + + if not self.vrrp_global_info: + return True + + if self.gratuitous_arp_interval: + if self.vrrp_global_info["gratuitousArpFlag"] == "false": + self.module.fail_json(msg="Error: gratuitousArpFlag is false.") + if self.vrrp_global_info["gratuitousArpTimeOut"] != self.gratuitous_arp_interval: + return True + if self.recover_delay: + if self.vrrp_global_info["recoverDelay"] != self.recover_delay: + return True + if self.version: + if self.vrrp_global_info["version"] != self.version: + return True + return False + + def is_vrrp_global_info_exist(self): + """whether vrrp global attribute info exist""" + + if self.gratuitous_arp_interval or self.recover_delay or self.version: + if self.gratuitous_arp_interval: + if self.vrrp_global_info["gratuitousArpFlag"] == "false": + self.module.fail_json( + msg="Error: gratuitousArpFlag is false.") + if self.vrrp_global_info["gratuitousArpTimeOut"] != self.gratuitous_arp_interval: + return False + if self.recover_delay: + if self.vrrp_global_info["recoverDelay"] != self.recover_delay: + return False + if self.version: + if self.vrrp_global_info["version"] != self.version: + return False + return True + + return False + + def is_vrrp_group_info_change(self): + """whether vrrp group attribute info change""" + if self.vrrp_type: + if self.vrrp_group_info["vrrpType"] != self.vrrp_type: + return True + if self.admin_ignore_if_down: + if self.vrrp_group_info["adminIgnoreIfDown"] != self.admin_ignore_if_down: + return True + if self.admin_vrid: + if self.vrrp_group_info["adminVrrpId"] != self.admin_vrid: + return True + if self.admin_interface: + if self.vrrp_group_info["adminIfName"] != self.admin_interface: + return True + if self.admin_flowdown: + if self.vrrp_group_info["unflowdown"] != self.admin_flowdown: + return True + if self.priority: + if self.vrrp_group_info["priority"] != self.priority: + return True + if self.fast_resume: + fast_resume = "false" + if self.fast_resume == "enable": + fast_resume = "true" + if self.vrrp_group_info["fastResume"] != fast_resume: + return True + if self.advertise_interval: + if self.vrrp_group_info["advertiseInterval"] != self.advertise_interval: + return True + if self.preempt_timer_delay: + if self.vrrp_group_info["delayTime"] != self.preempt_timer_delay: + return True + if self.holding_multiplier: + if self.vrrp_group_info["holdMultiplier"] != self.holding_multiplier: + return True + if self.auth_mode: + if self.vrrp_group_info["authenticationMode"] != self.auth_mode: + return True + if self.auth_key: + return True + if self.is_plain: + if self.vrrp_group_info["isPlain"] != self.is_plain: + return True + + return False + + def is_vrrp_group_info_exist(self): + """whether vrrp group attribute info exist""" + + if self.vrrp_type: + if self.vrrp_group_info["vrrpType"] != self.vrrp_type: + return False + if self.admin_ignore_if_down: + if self.vrrp_group_info["adminIgnoreIfDown"] != self.admin_ignore_if_down: + return False + if self.admin_vrid: + if self.vrrp_group_info["adminVrrpId"] != self.admin_vrid: + return False + if self.admin_interface: + if self.vrrp_group_info["adminIfName"] != self.admin_interface: + return False + if self.admin_flowdown: + if self.vrrp_group_info["unflowdown"] != self.admin_flowdown: + return False + if self.priority: + if self.vrrp_group_info["priority"] != self.priority: + return False + if self.fast_resume: + fast_resume = "false" + if self.fast_resume == "enable": + fast_resume = "true" + if self.vrrp_group_info["fastResume"] != fast_resume: + return False + if self.advertise_interval: + if self.vrrp_group_info["advertiseInterval"] != self.advertise_interval: + return False + if self.preempt_timer_delay: + if self.vrrp_group_info["delayTime"] != self.preempt_timer_delay: + return False + if self.holding_multiplier: + if self.vrrp_group_info["holdMultiplier"] != self.holding_multiplier: + return False + if self.auth_mode: + if self.vrrp_group_info["authenticationMode"] != self.auth_mode: + return False + if self.is_plain: + if self.vrrp_group_info["isPlain"] != self.is_plain: + return False + return True + + def create_virtual_ip(self): + """create virtual ip info""" + + if self.is_virtual_ip_change(): + conf_str = CE_NC_CREATE_VRRP_VIRTUAL_IP_INFO % ( + self.vrid, self.interface, self.virtual_ip) + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: create virtual ip info failed.') + + self.updates_cmd.append("interface %s" % self.interface) + self.updates_cmd.append( + "vrrp vrid %s virtual-ip %s" % (self.vrid, self.virtual_ip)) + self.changed = True + + def delete_virtual_ip(self): + """delete virtual ip info""" + + if self.is_virtual_ip_exist(): + conf_str = CE_NC_DELETE_VRRP_VIRTUAL_IP_INFO % ( + self.vrid, self.interface, self.virtual_ip) + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: delete virtual ip info failed.') + + self.updates_cmd.append("interface %s" % self.interface) + self.updates_cmd.append( + "undo vrrp vrid %s virtual-ip %s " % (self.vrid, self.virtual_ip)) + self.changed = True + + def set_vrrp_global(self): + """set vrrp global attribute info""" + + if self.is_vrrp_global_info_change(): + conf_str = CE_NC_SET_VRRP_GLOBAL_HEAD + if self.gratuitous_arp_interval: + conf_str += "%s" % self.gratuitous_arp_interval + if self.recover_delay: + conf_str += "%s" % self.recover_delay + if self.version: + conf_str += "%s" % self.version + conf_str += CE_NC_SET_VRRP_GLOBAL_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set vrrp global attribute info failed.') + + if self.gratuitous_arp_interval: + self.updates_cmd.append( + "vrrp gratuitous-arp interval %s" % self.gratuitous_arp_interval) + + if self.recover_delay: + self.updates_cmd.append( + "vrrp recover-delay %s" % self.recover_delay) + + if self.version: + version = "3" + if self.version == "v2": + version = "2" + self.updates_cmd.append("vrrp version %s" % version) + self.changed = True + + def delete_vrrp_global(self): + """delete vrrp global attribute info""" + + if self.is_vrrp_global_info_exist(): + conf_str = CE_NC_SET_VRRP_GLOBAL_HEAD + if self.gratuitous_arp_interval: + if self.gratuitous_arp_interval == "120": + self.module.fail_json( + msg='Error: The default value of gratuitous_arp_interval is 120.') + gratuitous_arp_interval = "120" + conf_str += "%s" % gratuitous_arp_interval + if self.recover_delay: + if self.recover_delay == "0": + self.module.fail_json( + msg='Error: The default value of recover_delay is 0.') + recover_delay = "0" + conf_str += "%s" % recover_delay + if self.version: + if self.version == "v2": + self.module.fail_json( + msg='Error: The default value of version is v2.') + version = "v2" + conf_str += "%s" % version + conf_str += CE_NC_SET_VRRP_GLOBAL_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set vrrp global attribute info failed.') + if self.gratuitous_arp_interval: + self.updates_cmd.append("undo vrrp gratuitous-arp interval") + + if self.recover_delay: + self.updates_cmd.append("undo vrrp recover-delay") + + if self.version == "v3": + self.updates_cmd.append("undo vrrp version") + self.changed = True + + def set_vrrp_group(self): + """set vrrp group attribute info""" + + if self.is_vrrp_group_info_change(): + conf_str = CE_NC_SET_VRRP_GROUP_INFO_HEAD % ( + self.interface, self.vrid) + if self.vrrp_type: + conf_str += "%s" % self.vrrp_type + if self.admin_vrid: + conf_str += "%s" % self.admin_vrid + if self.admin_interface: + conf_str += "%s" % self.admin_interface + if self.admin_flowdown: + conf_str += "%s" % self.admin_flowdown + if self.priority: + conf_str += "%s" % self.priority + if self.vrrp_type == "admin": + if self.admin_ignore_if_down: + conf_str += "%s" % self.admin_ignore_if_down + if self.fast_resume: + fast_resume = "false" + if self.fast_resume == "enable": + fast_resume = "true" + conf_str += "%s" % fast_resume + if self.advertise_interval: + conf_str += "%s" % self.advertise_interval + if self.preempt_timer_delay: + conf_str += "%s" % self.preempt_timer_delay + if self.holding_multiplier: + conf_str += "%s" % self.holding_multiplier + if self.auth_mode: + conf_str += "%s" % self.auth_mode + if self.auth_key: + conf_str += "%s" % self.auth_key + if self.auth_mode == "simple": + conf_str += "%s" % self.is_plain + + conf_str += CE_NC_SET_VRRP_GROUP_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set vrrp group attribute info failed.') + if self.interface and self.vrid: + self.updates_cmd.append("interface %s" % self.interface) + if self.vrrp_type == "admin": + if self.admin_ignore_if_down == "true": + self.updates_cmd.append( + "vrrp vrid %s admin ignore-if-down" % self.vrid) + else: + self.updates_cmd.append( + "vrrp vrid %s admin" % self.vrid) + + if self.priority: + self.updates_cmd.append( + "vrrp vrid %s priority %s" % (self.vrid, self.priority)) + + if self.fast_resume == "enable": + self.updates_cmd.append( + "vrrp vrid %s fast-resume" % self.vrid) + if self.fast_resume == "disable": + self.updates_cmd.append( + "undo vrrp vrid %s fast-resume" % self.vrid) + + if self.advertise_interval: + advertise_interval = int(self.advertise_interval) / 1000 + self.updates_cmd.append("vrrp vrid %s timer advertise %s" % ( + self.vrid, int(advertise_interval))) + + if self.preempt_timer_delay: + self.updates_cmd.append("vrrp vrid %s preempt timer delay %s" % (self.vrid, + self.preempt_timer_delay)) + + if self.holding_multiplier: + self.updates_cmd.append( + "vrrp vrid %s holding-multiplier %s" % (self.vrid, self.holding_multiplier)) + + if self.admin_vrid and self.admin_interface: + if self.admin_flowdown == "true": + self.updates_cmd.append("vrrp vrid %s track admin-vrrp interface %s vrid %s unflowdown" % + (self.vrid, self.admin_interface, self.admin_vrid)) + else: + self.updates_cmd.append("vrrp vrid %s track admin-vrrp interface %s vrid %s" % + (self.vrid, self.admin_interface, self.admin_vrid)) + + if self.auth_mode and self.auth_key: + if self.auth_mode == "simple": + if self.is_plain == "true": + self.updates_cmd.append("vrrp vrid %s authentication-mode simple plain %s" % + (self.vrid, self.auth_key)) + else: + self.updates_cmd.append("vrrp vrid %s authentication-mode simple cipher %s" % + (self.vrid, self.auth_key)) + if self.auth_mode == "md5": + self.updates_cmd.append( + "vrrp vrid %s authentication-mode md5 %s" % (self.vrid, self.auth_key)) + self.changed = True + + def delete_vrrp_group(self): + """delete vrrp group attribute info""" + + if self.is_vrrp_group_info_exist(): + conf_str = CE_NC_SET_VRRP_GROUP_INFO_HEAD % ( + self.interface, self.vrid) + if self.vrrp_type: + vrrp_type = self.vrrp_type + if self.vrrp_type == "admin": + vrrp_type = "normal" + if self.vrrp_type == "member" and self.admin_vrid and self.admin_interface: + vrrp_type = "normal" + conf_str += "%s" % vrrp_type + if self.priority: + if self.priority == "100": + self.module.fail_json( + msg='Error: The default value of priority is 100.') + priority = "100" + conf_str += "%s" % priority + + if self.fast_resume: + fast_resume = "false" + if self.fast_resume == "enable": + fast_resume = "true" + conf_str += "%s" % fast_resume + if self.advertise_interval: + if self.advertise_interval == "1000": + self.module.fail_json( + msg='Error: The default value of advertise_interval is 1000.') + advertise_interval = "1000" + conf_str += "%s" % advertise_interval + if self.preempt_timer_delay: + if self.preempt_timer_delay == "0": + self.module.fail_json( + msg='Error: The default value of preempt_timer_delay is 0.') + preempt_timer_delay = "0" + conf_str += "%s" % preempt_timer_delay + if self.holding_multiplier: + if self.holding_multiplier == "0": + self.module.fail_json( + msg='Error: The default value of holding_multiplier is 3.') + holding_multiplier = "3" + conf_str += "%s" % holding_multiplier + if self.auth_mode: + auth_mode = self.auth_mode + if self.auth_mode == "md5" or self.auth_mode == "simple": + auth_mode = "none" + conf_str += "%s" % auth_mode + + conf_str += CE_NC_SET_VRRP_GROUP_INFO_TAIL + recv_xml = set_nc_config(self.module, conf_str) + if "" not in recv_xml: + self.module.fail_json( + msg='Error: set vrrp global attribute info failed.') + if self.interface and self.vrid: + self.updates_cmd.append("interface %s" % self.interface) + if self.vrrp_type == "admin": + self.updates_cmd.append( + "undo vrrp vrid %s admin" % self.vrid) + + if self.priority: + self.updates_cmd.append( + "undo vrrp vrid %s priority" % self.vrid) + + if self.fast_resume: + self.updates_cmd.append( + "undo vrrp vrid %s fast-resume" % self.vrid) + + if self.advertise_interval: + self.updates_cmd.append( + "undo vrrp vrid %s timer advertise" % self.vrid) + + if self.preempt_timer_delay: + self.updates_cmd.append( + "undo vrrp vrid %s preempt timer delay" % self.vrid) + + if self.holding_multiplier: + self.updates_cmd.append( + "undo vrrp vrid %s holding-multiplier" % self.vrid) + + if self.admin_vrid and self.admin_interface: + self.updates_cmd.append( + "undo vrrp vrid %s track admin-vrrp" % self.vrid) + + if self.auth_mode: + self.updates_cmd.append( + "undo vrrp vrid %s authentication-mode" % self.vrid) + self.changed = True + + def get_proposed(self): + """get proposed info""" + + if self.interface: + self.proposed["interface"] = self.interface + if self.vrid: + self.proposed["vrid"] = self.vrid + if self.virtual_ip: + self.proposed["virtual_ip"] = self.virtual_ip + if self.vrrp_type: + self.proposed["vrrp_type"] = self.vrrp_type + if self.admin_vrid: + self.proposed["admin_vrid"] = self.admin_vrid + if self.admin_interface: + self.proposed["admin_interface"] = self.admin_interface + if self.admin_flowdown: + self.proposed["unflowdown"] = self.admin_flowdown + if self.admin_ignore_if_down: + self.proposed["admin_ignore_if_down"] = self.admin_ignore_if_down + if self.priority: + self.proposed["priority"] = self.priority + if self.version: + self.proposed["version"] = self.version + if self.advertise_interval: + self.proposed["advertise_interval"] = self.advertise_interval + if self.preempt_timer_delay: + self.proposed["preempt_timer_delay"] = self.preempt_timer_delay + if self.gratuitous_arp_interval: + self.proposed[ + "gratuitous_arp_interval"] = self.gratuitous_arp_interval + if self.recover_delay: + self.proposed["recover_delay"] = self.recover_delay + if self.holding_multiplier: + self.proposed["holding_multiplier"] = self.holding_multiplier + if self.auth_mode: + self.proposed["auth_mode"] = self.auth_mode + if self.is_plain: + self.proposed["is_plain"] = self.is_plain + if self.auth_key: + self.proposed["auth_key"] = self.auth_key + if self.fast_resume: + self.proposed["fast_resume"] = self.fast_resume + if self.state: + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if self.gratuitous_arp_interval: + self.existing["gratuitous_arp_interval"] = self.vrrp_global_info[ + "gratuitousArpTimeOut"] + if self.version: + self.existing["version"] = self.vrrp_global_info["version"] + if self.recover_delay: + self.existing["recover_delay"] = self.vrrp_global_info[ + "recoverDelay"] + + if self.virtual_ip: + if self.virtual_ip_info: + self.existing["interface"] = self.interface + self.existing["vrid"] = self.vrid + self.existing["virtual_ip_info"] = self.virtual_ip_info[ + "vrrpVirtualIpInfos"] + + if self.vrrp_group_info: + self.existing["interface"] = self.vrrp_group_info["ifName"] + self.existing["vrid"] = self.vrrp_group_info["vrrpId"] + self.existing["vrrp_type"] = self.vrrp_group_info["vrrpType"] + if self.vrrp_type == "admin": + self.existing["admin_ignore_if_down"] = self.vrrp_group_info[ + "adminIgnoreIfDown"] + if self.admin_vrid and self.admin_interface: + self.existing["admin_vrid"] = self.vrrp_group_info[ + "adminVrrpId"] + self.existing["admin_interface"] = self.vrrp_group_info[ + "adminIfName"] + self.existing["admin_flowdown"] = self.vrrp_group_info[ + "unflowdown"] + if self.priority: + self.existing["priority"] = self.vrrp_group_info["priority"] + if self.advertise_interval: + self.existing["advertise_interval"] = self.vrrp_group_info[ + "advertiseInterval"] + if self.preempt_timer_delay: + self.existing["preempt_timer_delay"] = self.vrrp_group_info[ + "delayTime"] + if self.holding_multiplier: + self.existing["holding_multiplier"] = self.vrrp_group_info[ + "holdMultiplier"] + if self.fast_resume: + fast_resume_exist = "disable" + fast_resume = self.vrrp_group_info["fastResume"] + if fast_resume == "true": + fast_resume_exist = "enable" + self.existing["fast_resume"] = fast_resume_exist + if self.auth_mode: + self.existing["auth_mode"] = self.vrrp_group_info[ + "authenticationMode"] + self.existing["is_plain"] = self.vrrp_group_info["isPlain"] + + def get_end_state(self): + """get end state info""" + + if self.gratuitous_arp_interval or self.version or self.recover_delay: + self.vrrp_global_info = self.get_vrrp_global_info() + if self.interface and self.vrid: + if self.virtual_ip: + self.virtual_ip_info = self.get_virtual_ip_info() + if self.virtual_ip_info: + self.vrrp_group_info = self.get_vrrp_group_info() + + if self.gratuitous_arp_interval: + self.end_state["gratuitous_arp_interval"] = self.vrrp_global_info[ + "gratuitousArpTimeOut"] + if self.version: + self.end_state["version"] = self.vrrp_global_info["version"] + if self.recover_delay: + self.end_state["recover_delay"] = self.vrrp_global_info[ + "recoverDelay"] + + if self.virtual_ip: + if self.virtual_ip_info: + self.end_state["interface"] = self.interface + self.end_state["vrid"] = self.vrid + self.end_state["virtual_ip_info"] = self.virtual_ip_info[ + "vrrpVirtualIpInfos"] + + if self.vrrp_group_info: + self.end_state["interface"] = self.vrrp_group_info["ifName"] + self.end_state["vrid"] = self.vrrp_group_info["vrrpId"] + self.end_state["vrrp_type"] = self.vrrp_group_info["vrrpType"] + if self.vrrp_type == "admin": + self.end_state["admin_ignore_if_down"] = self.vrrp_group_info[ + "adminIgnoreIfDown"] + if self.admin_vrid and self.admin_interface: + self.end_state["admin_vrid"] = self.vrrp_group_info[ + "adminVrrpId"] + self.end_state["admin_interface"] = self.vrrp_group_info[ + "adminIfName"] + self.end_state["admin_flowdown"] = self.vrrp_group_info[ + "unflowdown"] + if self.priority: + self.end_state["priority"] = self.vrrp_group_info["priority"] + if self.advertise_interval: + self.end_state["advertise_interval"] = self.vrrp_group_info[ + "advertiseInterval"] + if self.preempt_timer_delay: + self.end_state["preempt_timer_delay"] = self.vrrp_group_info[ + "delayTime"] + if self.holding_multiplier: + self.end_state["holding_multiplier"] = self.vrrp_group_info[ + "holdMultiplier"] + if self.fast_resume: + fast_resume_end = "disable" + fast_resume = self.vrrp_group_info["fastResume"] + if fast_resume == "true": + fast_resume_end = "enable" + self.end_state["fast_resume"] = fast_resume_end + if self.auth_mode: + self.end_state["auth_mode"] = self.vrrp_group_info[ + "authenticationMode"] + self.end_state["is_plain"] = self.vrrp_group_info["isPlain"] + if self.existing == self.end_state: + self.changed = False + + def work(self): + """worker""" + + self.check_params() + if self.gratuitous_arp_interval or self.version or self.recover_delay: + self.vrrp_global_info = self.get_vrrp_global_info() + if self.interface and self.vrid: + self.virtual_ip_info = self.get_virtual_ip_info() + if self.virtual_ip_info: + self.vrrp_group_info = self.get_vrrp_group_info() + self.get_proposed() + self.get_existing() + + if self.gratuitous_arp_interval or self.version or self.recover_delay: + if self.state == "present": + self.set_vrrp_global() + else: + self.delete_vrrp_global() + else: + if not self.interface or not self.vrid: + self.module.fail_json( + msg='Error: interface, vrid must be config at the same time.') + + if self.interface and self.vrid: + if self.virtual_ip: + if self.state == "present": + self.create_virtual_ip() + else: + self.delete_virtual_ip() + else: + if not self.vrrp_group_info: + self.module.fail_json( + msg='Error: The VRRP group does not exist.') + if self.admin_ignore_if_down == "true": + if self.vrrp_type != "admin": + self.module.fail_json( + msg='Error: vrrpType must be admin when admin_ignore_if_down is true.') + if self.admin_interface or self.admin_vrid: + if self.vrrp_type != "member": + self.module.fail_json( + msg='Error: it binds a VRRP group to an mVRRP group, vrrp_type must be "member".') + if not self.vrrp_type or not self.interface or not self.vrid: + self.module.fail_json( + msg='Error: admin_interface admin_vrid vrrp_type interface vrid must ' + 'be config at the same time.') + if self.auth_mode == "md5" and self.is_plain == "true": + self.module.fail_json( + msg='Error: is_plain can not be True when auth_mode is md5.') + + if self.state == "present": + self.set_vrrp_group() + else: + self.delete_vrrp_group() + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """ Module main """ + + argument_spec = dict( + interface=dict(type='str'), + vrid=dict(type='str'), + virtual_ip=dict(type='str'), + vrrp_type=dict(type='str', choices=['normal', 'member', 'admin']), + admin_ignore_if_down=dict(type='bool', default=False), + admin_vrid=dict(type='str'), + admin_interface=dict(type='str'), + admin_flowdown=dict(type='bool', default=False), + priority=dict(type='str'), + version=dict(type='str', choices=['v2', 'v3']), + advertise_interval=dict(type='str'), + preempt_timer_delay=dict(type='str'), + gratuitous_arp_interval=dict(type='str'), + recover_delay=dict(type='str'), + holding_multiplier=dict(type='str'), + auth_mode=dict(type='str', choices=['simple', 'md5', 'none']), + is_plain=dict(type='bool', default=False), + auth_key=dict(type='str'), + fast_resume=dict(type='str', choices=['enable', 'disable']), + state=dict(type='str', default='present', + choices=['present', 'absent']) + ) + + argument_spec.update(ce_argument_spec) + module = Vrrp(argument_spec=argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vxlan_arp.py b/plugins/modules/network/cloudengine/ce_vxlan_arp.py new file mode 100644 index 0000000000..7198877371 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vxlan_arp.py @@ -0,0 +1,692 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vxlan_arp +short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices. +description: + - Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices. +author: QijunPan (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + evn_bgp: + description: + - Enables EVN BGP. + choices: ['enable', 'disable'] + evn_source_ip: + description: + - Specifies the source address of an EVN BGP peer. + The value is in dotted decimal notation. + evn_peer_ip: + description: + - Specifies the IP address of an EVN BGP peer. + The value is in dotted decimal notation. + evn_server: + description: + - Configures the local device as the router reflector (RR) on the EVN network. + choices: ['enable', 'disable'] + evn_reflect_client: + description: + - Configures the local device as the route reflector (RR) and its peer as the client. + choices: ['enable', 'disable'] + vbdif_name: + description: + - Full name of VBDIF interface, i.e. Vbdif100. + arp_collect_host: + description: + - Enables EVN BGP or BGP EVPN to collect host information. + choices: ['enable', 'disable'] + host_collect_protocol: + description: + - Enables EVN BGP or BGP EVPN to advertise host information. + choices: ['bgp','none'] + bridge_domain_id: + description: + - Specifies a BD(bridge domain) ID. + The value is an integer ranging from 1 to 16777215. + arp_suppress: + description: + - Enables ARP broadcast suppression in a BD. + choices: ['enable', 'disable'] + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +- name: vxlan arp module test + hosts: ce128 + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships. + ce_vxlan_arp: + evn_bgp: enable + evn_source_ip: 6.6.6.6 + evn_peer_ip: 7.7.7.7 + provider: "{{ cli }}" + - name: Configure a Layer 3 VXLAN gateway as a BGP RR. + ce_vxlan_arp: + evn_bgp: enable + evn_server: enable + provider: "{{ cli }}" + - name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information. + ce_vxlan_arp: + vbdif_name: Vbdif100 + arp_collect_host: enable + provider: "{{ cli }}" + - name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information. + ce_vxlan_arp: + host_collect_protocol: bgp + provider: "{{ cli }}" + - name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway. + ce_vxlan_arp: + bridge_domain_id: 100 + arp_suppress: enable + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["evn bgp", + "source-address 6.6.6.6", + "peer 7.7.7.7"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec +from ansible.module_utils.connection import exec_command + + +def is_config_exist(cmp_cfg, test_cfg): + """is configuration exist""" + + if not cmp_cfg or not test_cfg: + return False + + return bool(test_cfg in cmp_cfg) + + +def is_valid_v4addr(addr): + """check is ipv4 addr is valid""" + + if addr.count('.') == 3: + addr_list = addr.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +def get_evn_peers(config): + """get evn peer ip list""" + + get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config) + if not get: + return None + else: + return list(set(get)) + + +def get_evn_srouce(config): + """get evn peer ip list""" + + get = re.findall( + r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config) + if not get: + return None + else: + return get[0] + + +def get_evn_reflect_client(config): + """get evn reflect client list""" + + get = re.findall( + r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config) + if not get: + return None + else: + return list(get) + + +class VxlanArp(object): + """ + Manages arp attributes of VXLAN. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.evn_bgp = self.module.params['evn_bgp'] + self.evn_source_ip = self.module.params['evn_source_ip'] + self.evn_peer_ip = self.module.params['evn_peer_ip'] + self.evn_server = self.module.params['evn_server'] + self.evn_reflect_client = self.module.params['evn_reflect_client'] + self.vbdif_name = self.module.params['vbdif_name'] + self.arp_collect_host = self.module.params['arp_collect_host'] + self.host_collect_protocol = self.module.params[ + 'host_collect_protocol'] + self.bridge_domain_id = self.module.params['bridge_domain_id'] + self.arp_suppress = self.module.params['arp_suppress'] + self.state = self.module.params['state'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.config = "" # current config + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init module""" + + required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")] + self.module = AnsibleModule(argument_spec=self.spec, + required_together=required_together, + supports_check_mode=True) + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_current_config(self): + """get current configuration""" + + flags = list() + exp = r"| ignore-case section include evn bgp|host collect protocol bgp" + if self.vbdif_name: + exp += r"|^#\s+interface %s\s+" % self.vbdif_name.lower().capitalize().replace(" ", "") + + if self.bridge_domain_id: + exp += r"|^#\s+bridge-domain %s\s+" % self.bridge_domain_id + + flags.append(exp) + cfg_str = self.get_config(flags) + config = cfg_str.split("\n") + + exist_config = "" + for cfg in config: + if not cfg.startswith("display"): + exist_config += cfg + return exist_config + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) # show updates result + + def config_bridge_domain(self): + """manage bridge domain configuration""" + + if not self.bridge_domain_id: + return + + # bridge-domain bd-id + # [undo] arp broadcast-suppress enable + + cmd = "bridge-domain %s" % self.bridge_domain_id + if not is_config_exist(self.config, cmd): + self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id) + + cmd = "arp broadcast-suppress enable" + exist = is_config_exist(self.config, cmd) + if self.arp_suppress == "enable" and not exist: + self.cli_add_command("bridge-domain %s" % self.bridge_domain_id) + self.cli_add_command(cmd) + self.cli_add_command("quit") + elif self.arp_suppress == "disable" and exist: + self.cli_add_command("bridge-domain %s" % self.bridge_domain_id) + self.cli_add_command(cmd, undo=True) + self.cli_add_command("quit") + + def config_evn_bgp(self): + """enables EVN BGP and configure evn bgp command""" + + evn_bgp_view = False + evn_bgp_enable = False + + cmd = "evn bgp" + exist = is_config_exist(self.config, cmd) + if self.evn_bgp == "enable" or exist: + evn_bgp_enable = True + + # [undo] evn bgp + if self.evn_bgp: + if self.evn_bgp == "enable" and not exist: + self.cli_add_command(cmd) + evn_bgp_view = True + elif self.evn_bgp == "disable" and exist: + self.cli_add_command(cmd, undo=True) + return + + # [undo] source-address ip-address + if evn_bgp_enable and self.evn_source_ip: + cmd = "source-address %s" % self.evn_source_ip + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd) + elif self.state == "absent" and exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd, undo=True) + + # [undo] peer ip-address + # [undo] peer ipv4-address reflect-client + if evn_bgp_enable and self.evn_peer_ip: + cmd = "peer %s" % self.evn_peer_ip + exist = is_config_exist(self.config, cmd) + if self.state == "present": + if not exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd) + if self.evn_reflect_client == "enable": + self.cli_add_command( + "peer %s reflect-client" % self.evn_peer_ip) + else: + if self.evn_reflect_client: + cmd = "peer %s reflect-client" % self.evn_peer_ip + exist = is_config_exist(self.config, cmd) + if self.evn_reflect_client == "enable" and not exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd) + elif self.evn_reflect_client == "disable" and exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd, undo=True) + else: + if exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd, undo=True) + + # [undo] server enable + if evn_bgp_enable and self.evn_server: + cmd = "server enable" + exist = is_config_exist(self.config, cmd) + if self.evn_server == "enable" and not exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd) + elif self.evn_server == "disable" and exist: + if not evn_bgp_view: + self.cli_add_command("evn bgp") + evn_bgp_view = True + self.cli_add_command(cmd, undo=True) + + if evn_bgp_view: + self.cli_add_command("quit") + + def config_vbdif(self): + """configure command at the VBDIF interface view""" + + # interface vbdif bd-id + # [undo] arp collect host enable + + cmd = "interface %s" % self.vbdif_name.lower().capitalize() + exist = is_config_exist(self.config, cmd) + + if not exist: + self.module.fail_json( + msg="Error: Interface %s does not exist." % self.vbdif_name) + + cmd = "arp collect host enable" + exist = is_config_exist(self.config, cmd) + if self.arp_collect_host == "enable" and not exist: + self.cli_add_command("interface %s" % + self.vbdif_name.lower().capitalize()) + self.cli_add_command(cmd) + self.cli_add_command("quit") + elif self.arp_collect_host == "disable" and exist: + self.cli_add_command("interface %s" % + self.vbdif_name.lower().capitalize()) + self.cli_add_command(cmd, undo=True) + self.cli_add_command("quit") + + def config_host_collect_protocal(self): + """Enable EVN BGP or BGP EVPN to advertise host information""" + + # [undo] host collect protocol bgp + cmd = "host collect protocol bgp" + exist = is_config_exist(self.config, cmd) + + if self.state == "present": + if self.host_collect_protocol == "bgp" and not exist: + self.cli_add_command(cmd) + elif self.host_collect_protocol == "none" and exist: + self.cli_add_command(cmd, undo=True) + else: + if self.host_collect_protocol == "bgp" and exist: + self.cli_add_command(cmd, undo=True) + + def is_valid_vbdif(self, ifname): + """check is interface vbdif is valid""" + + if not ifname.upper().startswith('VBDIF'): + return False + bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "") + if not bdid.isdigit(): + return False + if int(bdid) < 1 or int(bdid) > 16777215: + return False + + return True + + def check_params(self): + """Check all input params""" + + # bridge domain id check + if self.bridge_domain_id: + if not self.bridge_domain_id.isdigit(): + self.module.fail_json( + msg="Error: Bridge domain id is not digit.") + if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215: + self.module.fail_json( + msg="Error: Bridge domain id is not in the range from 1 to 16777215.") + + # evn_source_ip check + if self.evn_source_ip: + if not is_valid_v4addr(self.evn_source_ip): + self.module.fail_json(msg="Error: evn_source_ip is invalid.") + + # evn_peer_ip check + if self.evn_peer_ip: + if not is_valid_v4addr(self.evn_peer_ip): + self.module.fail_json(msg="Error: evn_peer_ip is invalid.") + + # vbdif_name check + if self.vbdif_name: + self.vbdif_name = self.vbdif_name.replace( + " ", "").lower().capitalize() + if not self.is_valid_vbdif(self.vbdif_name): + self.module.fail_json(msg="Error: vbdif_name is invalid.") + + # evn_reflect_client and evn_peer_ip must set at the same time + if self.evn_reflect_client and not self.evn_peer_ip: + self.module.fail_json( + msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.") + + # evn_server and evn_reflect_client can not set at the same time + if self.evn_server == "enable" and self.evn_reflect_client == "enable": + self.module.fail_json( + msg="Error: evn_server and evn_reflect_client can not set at the same time.") + + def get_proposed(self): + """get proposed info""" + + if self.evn_bgp: + self.proposed["evn_bgp"] = self.evn_bgp + if self.evn_source_ip: + self.proposed["evn_source_ip"] = self.evn_source_ip + if self.evn_peer_ip: + self.proposed["evn_peer_ip"] = self.evn_peer_ip + if self.evn_server: + self.proposed["evn_server"] = self.evn_server + if self.evn_reflect_client: + self.proposed["evn_reflect_client"] = self.evn_reflect_client + if self.arp_collect_host: + self.proposed["arp_collect_host"] = self.arp_collect_host + if self.host_collect_protocol: + self.proposed["host_collect_protocol"] = self.host_collect_protocol + if self.arp_suppress: + self.proposed["arp_suppress"] = self.arp_suppress + if self.vbdif_name: + self.proposed["vbdif_name"] = self.evn_peer_ip + if self.bridge_domain_id: + self.proposed["bridge_domain_id"] = self.bridge_domain_id + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + evn_bgp_exist = is_config_exist(self.config, "evn bgp") + if evn_bgp_exist: + self.existing["evn_bgp"] = "enable" + else: + self.existing["evn_bgp"] = "disable" + + if evn_bgp_exist: + if is_config_exist(self.config, "server enable"): + self.existing["evn_server"] = "enable" + else: + self.existing["evn_server"] = "disable" + + self.existing["evn_source_ip"] = get_evn_srouce(self.config) + self.existing["evn_peer_ip"] = get_evn_peers(self.config) + self.existing["evn_reflect_client"] = get_evn_reflect_client( + self.config) + + if is_config_exist(self.config, "arp collect host enable"): + self.existing["host_collect_protocol"] = "enable" + else: + self.existing["host_collect_protocol"] = "disable" + + if is_config_exist(self.config, "host collect protocol bgp"): + self.existing["host_collect_protocol"] = "bgp" + else: + self.existing["host_collect_protocol"] = None + + if is_config_exist(self.config, "arp broadcast-suppress enable"): + self.existing["arp_suppress"] = "enable" + else: + self.existing["arp_suppress"] = "disable" + + def get_end_state(self): + """get end state info""" + + config = self.get_current_config() + evn_bgp_exist = is_config_exist(config, "evn bgp") + if evn_bgp_exist: + self.end_state["evn_bgp"] = "enable" + else: + self.end_state["evn_bgp"] = "disable" + + if evn_bgp_exist: + if is_config_exist(config, "server enable"): + self.end_state["evn_server"] = "enable" + else: + self.end_state["evn_server"] = "disable" + + self.end_state["evn_source_ip"] = get_evn_srouce(config) + self.end_state["evn_peer_ip"] = get_evn_peers(config) + self.end_state[ + "evn_reflect_client"] = get_evn_reflect_client(config) + + if is_config_exist(config, "arp collect host enable"): + self.end_state["host_collect_protocol"] = "enable" + else: + self.end_state["host_collect_protocol"] = "disable" + + if is_config_exist(config, "host collect protocol bgp"): + self.end_state["host_collect_protocol"] = "bgp" + else: + self.end_state["host_collect_protocol"] = None + + if is_config_exist(config, "arp broadcast-suppress enable"): + self.end_state["arp_suppress"] = "enable" + else: + self.end_state["arp_suppress"] = "disable" + + def work(self): + """worker""" + + self.check_params() + self.config = self.get_current_config() + self.get_existing() + self.get_proposed() + + # deal present or absent + if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip: + self.config_evn_bgp() + + if self.vbdif_name and self.arp_collect_host: + self.config_vbdif() + + if self.host_collect_protocol: + self.config_host_collect_protocal() + + if self.bridge_domain_id and self.arp_suppress: + self.config_bridge_domain() + + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + evn_bgp=dict(required=False, type='str', + choices=['enable', 'disable']), + evn_source_ip=dict(required=False, type='str'), + evn_peer_ip=dict(required=False, type='str'), + evn_server=dict(required=False, type='str', + choices=['enable', 'disable']), + evn_reflect_client=dict( + required=False, type='str', choices=['enable', 'disable']), + vbdif_name=dict(required=False, type='str'), + arp_collect_host=dict(required=False, type='str', + choices=['enable', 'disable']), + host_collect_protocol=dict( + required=False, type='str', choices=['bgp', 'none']), + bridge_domain_id=dict(required=False, type='str'), + arp_suppress=dict(required=False, type='str', + choices=['enable', 'disable']), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = VxlanArp(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vxlan_gateway.py b/plugins/modules/network/cloudengine/ce_vxlan_gateway.py new file mode 100644 index 0000000000..da8e9a23fc --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vxlan_gateway.py @@ -0,0 +1,940 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vxlan_gateway +short_description: Manages gateway for the VXLAN network on HUAWEI CloudEngine devices. +description: + - Configuring Centralized All-Active Gateways or Distributed Gateway for + the VXLAN Network on HUAWEI CloudEngine devices. +author: QijunPan (@QijunPan) +notes: + - Ensure All-Active Gateways or Distributed Gateway for the VXLAN Network can not configure at the same time. + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + dfs_id: + description: + - Specifies the ID of a DFS group. + The value must be 1. + dfs_source_ip: + description: + - Specifies the IPv4 address bound to a DFS group. + The value is in dotted decimal notation. + dfs_source_vpn: + description: + - Specifies the name of a VPN instance bound to a DFS group. + The value is a string of 1 to 31 case-sensitive characters without spaces. + If the character string is quoted by double quotation marks, the character string can contain spaces. + The value C(_public_) is reserved and cannot be used as the VPN instance name. + dfs_udp_port: + description: + - Specifies the UDP port number of the DFS group. + The value is an integer that ranges from 1025 to 65535. + dfs_all_active: + description: + - Creates all-active gateways. + choices: ['enable', 'disable'] + dfs_peer_ip: + description: + - Configure the IP address of an all-active gateway peer. + The value is in dotted decimal notation. + dfs_peer_vpn: + description: + - Specifies the name of the VPN instance that is associated with all-active gateway peer. + The value is a string of 1 to 31 case-sensitive characters, spaces not supported. + When double quotation marks are used around the string, spaces are allowed in the string. + The value C(_public_) is reserved and cannot be used as the VPN instance name. + vpn_instance: + description: + - Specifies the name of a VPN instance. + The value is a string of 1 to 31 case-sensitive characters, spaces not supported. + When double quotation marks are used around the string, spaces are allowed in the string. + The value C(_public_) is reserved and cannot be used as the VPN instance name. + vpn_vni: + description: + - Specifies a VNI ID. + Binds a VXLAN network identifier (VNI) to a virtual private network (VPN) instance. + The value is an integer ranging from 1 to 16000000. + vbdif_name: + description: + - Full name of VBDIF interface, i.e. Vbdif100. + vbdif_bind_vpn: + description: + - Specifies the name of the VPN instance that is associated with the interface. + The value is a string of 1 to 31 case-sensitive characters, spaces not supported. + When double quotation marks are used around the string, spaces are allowed in the string. + The value C(_public_) is reserved and cannot be used as the VPN instance name. + vbdif_mac: + description: + - Specifies a MAC address for a VBDIF interface. + The value is in the format of H-H-H. Each H is a 4-digit hexadecimal number, such as C(00e0) or C(fc01). + If an H contains less than four digits, 0s are added ahead. For example, C(e0) is equal to C(00e0). + A MAC address cannot be all 0s or 1s or a multicast MAC address. + arp_distribute_gateway: + description: + - Enable the distributed gateway function on VBDIF interface. + choices: ['enable','disable'] + arp_direct_route: + description: + - Enable VLINK direct route on VBDIF interface. + choices: ['enable','disable'] + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +- name: vxlan gateway module test + hosts: ce128 + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Configuring Centralized All-Active Gateways for the VXLAN Network + ce_vxlan_gateway: + dfs_id: 1 + dfs_source_ip: 6.6.6.6 + dfs_all_active: enable + dfs_peer_ip: 7.7.7.7 + provider: "{{ cli }}" + - name: Bind the VPN instance to a Layer 3 gateway, enable distributed gateway, and configure host route advertisement. + ce_vxlan_gateway: + vbdif_name: Vbdif100 + vbdif_bind_vpn: vpn1 + arp_distribute_gateway: enable + arp_direct_route: enable + provider: "{{ cli }}" + - name: Assign a VNI to a VPN instance. + ce_vxlan_gateway: + vpn_instance: vpn1 + vpn_vni: 100 + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"dfs_id": "1", "dfs_source_ip": "6.6.6.6", "dfs_all_active":"enable", "dfs_peer_ip": "7.7.7.7"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"dfs_id": "1", "dfs_source_ip": null, "evn_peer_ip": [], "dfs_all_active": "disable"} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"dfs_id": "1", "evn_source_ip": "6.6.6.6", "evn_source_vpn": null, + "evn_peers": [{"ip": "7.7.7.7", "vpn": ""}], "dfs_all_active": "enable"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["dfs-group 1", + "source ip 6.6.6.6", + "active-active-gateway", + "peer 7.7.7.7"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec +from ansible.module_utils.connection import exec_command + + +def is_config_exist(cmp_cfg, test_cfg): + """is configuration exist?""" + + if not cmp_cfg or not test_cfg: + return False + + return bool(test_cfg in cmp_cfg) + + +def is_valid_v4addr(addr): + """check is ipv4 addr""" + + if not addr: + return False + + if addr.count('.') == 3: + addr_list = addr.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +def mac_format(mac): + """convert mac format to xxxx-xxxx-xxxx""" + + if not mac: + return None + + if mac.count("-") != 2: + return None + + addrs = mac.split("-") + for i in range(3): + if not addrs[i] or not addrs[i].isalnum(): + return None + if len(addrs[i]) < 1 or len(addrs[i]) > 4: + return None + try: + addrs[i] = int(addrs[i], 16) + except ValueError: + return None + + try: + return "%04x-%04x-%04x" % (addrs[0], addrs[1], addrs[2]) + except ValueError: + return None + except TypeError: + return None + + +def get_dfs_source_ip(config): + """get dfs source ip address""" + + get = re.findall(r"source ip ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config) + if not get: + return None + else: + return get[0] + + +def get_dfs_source_vpn(config): + """get dfs source ip vpn instance name""" + + get = re.findall( + r"source ip [0-9]+.[0-9]+.[0-9]+.[0-9]+ vpn-instance (\S+)", config) + if not get: + return None + else: + return get[0] + + +def get_dfs_udp_port(config): + """get dfs udp port""" + + get = re.findall(r"udp port (\d+)", config) + if not get: + return None + else: + return get[0] + + +def get_dfs_peers(config): + """get evn peer ip list""" + + get = re.findall( + r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s?(vpn-instance)?\s?(\S*)", config) + if not get: + return None + else: + peers = list() + for item in get: + peers.append(dict(ip=item[0], vpn=item[2])) + return peers + + +def get_ip_vpn(config): + """get ip vpn instance""" + + get = re.findall(r"ip vpn-instance (\S+)", config) + if not get: + return None + else: + return get[0] + + +def get_ip_vpn_vni(config): + """get ip vpn vxlan vni""" + + get = re.findall(r"vxlan vni (\d+)", config) + if not get: + return None + else: + return get[0] + + +def get_vbdif_vpn(config): + """get ip vpn name of interface vbdif""" + + get = re.findall(r"ip binding vpn-instance (\S+)", config) + if not get: + return None + else: + return get[0] + + +def get_vbdif_mac(config): + """get mac address of interface vbdif""" + + get = re.findall( + r" mac-address ([0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4})", config) + if not get: + return None + else: + return get[0] + + +class VxlanGateway(object): + """ + Manages Gateway for the VXLAN Network. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.dfs_id = self.module.params['dfs_id'] + self.dfs_source_ip = self.module.params['dfs_source_ip'] + self.dfs_source_vpn = self.module.params['dfs_source_vpn'] + self.dfs_udp_port = self.module.params['dfs_udp_port'] + self.dfs_all_active = self.module.params['dfs_all_active'] + self.dfs_peer_ip = self.module.params['dfs_peer_ip'] + self.dfs_peer_vpn = self.module.params['dfs_peer_vpn'] + self.vpn_instance = self.module.params['vpn_instance'] + self.vpn_vni = self.module.params['vpn_vni'] + self.vbdif_name = self.module.params['vbdif_name'] + self.vbdif_mac = self.module.params['vbdif_mac'] + self.vbdif_bind_vpn = self.module.params['vbdif_bind_vpn'] + self.arp_distribute_gateway = self.module.params['arp_distribute_gateway'] + self.arp_direct_route = self.module.params['arp_direct_route'] + self.state = self.module.params['state'] + + # host info + self.host = self.module.params['host'] + self.username = self.module.params['username'] + self.port = self.module.params['port'] + + # state + self.config = "" # current config + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_current_config(self): + """get current configuration""" + + flags = list() + exp = r" | ignore-case section include ^#\s+dfs-group" + if self.vpn_instance: + exp += r"|^#\s+ip vpn-instance %s" % self.vpn_instance + if self.vbdif_name: + exp += r"|^#\s+interface %s" % self.vbdif_name + flags.append(exp) + return self.get_config(flags) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) # show updates result + + def config_dfs_group(self): + """manage Dynamic Fabric Service (DFS) group configuration""" + + if not self.dfs_id: + return + + dfs_view = False + view_cmd = "dfs-group %s" % self.dfs_id + exist = is_config_exist(self.config, view_cmd) + if self.state == "present" and not exist: + self.cli_add_command(view_cmd) + dfs_view = True + + # undo dfs-group dfs-group-id + if self.state == "absent" and exist: + if not self.dfs_source_ip and not self.dfs_udp_port and not self.dfs_all_active and not self.dfs_peer_ip: + self.cli_add_command(view_cmd, undo=True) + return + + # [undo] source ip ip-address [ vpn-instance vpn-instance-name ] + if self.dfs_source_ip: + cmd = "source ip %s" % self.dfs_source_ip + if self.dfs_source_vpn: + cmd += " vpn-instance %s" % self.dfs_source_vpn + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(cmd) + if self.state == "absent" and exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(cmd, undo=True) + + # [undo] udp port port-number + if self.dfs_udp_port: + cmd = "udp port %s" % self.dfs_udp_port + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(cmd) + elif self.state == "absent" and exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(cmd, undo=True) + + # [undo] active-active-gateway + # [undo]peer[ vpn-instance vpn-instance-name ] + aa_cmd = "active-active-gateway" + aa_exist = is_config_exist(self.config, aa_cmd) + aa_view = False + if self.dfs_all_active == "disable": + if aa_exist: + cmd = "peer %s" % self.dfs_peer_ip + if self.dfs_source_vpn: + cmd += " vpn-instance %s" % self.dfs_peer_vpn + exist = is_config_exist(self.config, cmd) + if self.state == "absent" and exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(aa_cmd) + self.cli_add_command(cmd, undo=True) + self.cli_add_command("quit") + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(aa_cmd, undo=True) + elif self.dfs_all_active == "enable": + if not aa_exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(aa_cmd) + aa_view = True + + if self.dfs_peer_ip: + cmd = "peer %s" % self.dfs_peer_ip + if self.dfs_peer_vpn: + cmd += " vpn-instance %s" % self.dfs_peer_vpn + exist = is_config_exist(self.config, cmd) + + if self.state == "present" and not exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + if not aa_view: + self.cli_add_command(aa_cmd) + self.cli_add_command(cmd) + self.cli_add_command("quit") + elif self.state == "absent" and exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + if not aa_view: + self.cli_add_command(aa_cmd) + self.cli_add_command(cmd, undo=True) + self.cli_add_command("quit") + else: # not input dfs_all_active + if aa_exist and self.dfs_peer_ip: + cmd = "peer %s" % self.dfs_peer_ip + if self.dfs_peer_vpn: + cmd += " vpn-instance %s" % self.dfs_peer_vpn + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(aa_cmd) + self.cli_add_command(cmd) + self.cli_add_command("quit") + elif self.state == "absent" and exist: + if not dfs_view: + self.cli_add_command(view_cmd) + dfs_view = True + self.cli_add_command(aa_cmd) + self.cli_add_command(cmd, undo=True) + self.cli_add_command("quit") + else: + pass + elif not aa_exist and self.dfs_peer_ip and self.state == "present": + self.module.fail_json( + msg="Error: All-active gateways is not enable.") + else: + pass + + if dfs_view: + self.cli_add_command("quit") + + def config_ip_vpn(self): + """configure command at the ip vpn view""" + + if not self.vpn_instance or not self.vpn_vni: + return + + # ip vpn-instance vpn-instance-name + view_cmd = "ip vpn-instance %s" % self.vpn_instance + exist = is_config_exist(self.config, view_cmd) + if not exist: + self.module.fail_json( + msg="Error: ip vpn instance %s is not exist." % self.vpn_instance) + + # [undo] vxlan vni vni-id + cmd = "vxlan vni %s" % self.vpn_vni + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + self.cli_add_command(view_cmd) + self.cli_add_command(cmd) + self.cli_add_command("quit") + elif self.state == "absent" and exist: + self.cli_add_command(view_cmd) + self.cli_add_command(cmd, undo=True) + self.cli_add_command("quit") + + def config_vbdif(self): + """configure command at the VBDIF interface view""" + + if not self.vbdif_name: + return + + vbdif_cmd = "interface %s" % self.vbdif_name.lower().capitalize() + exist = is_config_exist(self.config, vbdif_cmd) + + if not exist: + self.module.fail_json( + msg="Error: Interface %s is not exist." % self.vbdif_name) + + # interface vbdif bd-id + # [undo] ip binding vpn-instance vpn-instance-name + vbdif_view = False + if self.vbdif_bind_vpn: + cmd = "ip binding vpn-instance %s" % self.vbdif_bind_vpn + exist = is_config_exist(self.config, cmd) + + if self.state == "present" and not exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command(cmd) + elif self.state == "absent" and exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command(cmd, undo=True) + + # [undo] arp distribute-gateway enable + if self.arp_distribute_gateway: + cmd = "arp distribute-gateway enable" + exist = is_config_exist(self.config, cmd) + if self.arp_distribute_gateway == "enable" and not exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command(cmd) + elif self.arp_distribute_gateway == "disable" and exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command(cmd, undo=True) + + # [undo] arp direct-route enable + if self.arp_direct_route: + cmd = "arp direct-route enable" + exist = is_config_exist(self.config, cmd) + if self.arp_direct_route == "enable" and not exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command(cmd) + elif self.arp_direct_route == "disable" and exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command(cmd, undo=True) + + # mac-address mac-address + # undo mac-address + if self.vbdif_mac: + cmd = "mac-address %s" % self.vbdif_mac + exist = is_config_exist(self.config, cmd) + if self.state == "present" and not exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command(cmd) + elif self.state == "absent" and exist: + if not vbdif_view: + self.cli_add_command(vbdif_cmd) + vbdif_view = True + self.cli_add_command("undo mac-address") + + # quit + if vbdif_view: + self.cli_add_command("quit") + + def is_valid_vbdif(self, ifname): + """check is interface vbdif""" + + if not ifname.upper().startswith('VBDIF'): + return False + bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "") + if not bdid.isdigit(): + return False + if int(bdid) < 1 or int(bdid) > 16777215: + return False + + return True + + def is_valid_ip_vpn(self, vpname): + """check ip vpn""" + + if not vpname: + return False + + if vpname == "_public_": + self.module.fail_json( + msg="Error: The value C(_public_) is reserved and cannot be used as the VPN instance name.") + + if len(vpname) < 1 or len(vpname) > 31: + self.module.fail_json( + msg="Error: IP vpn name length is not in the range from 1 to 31.") + + return True + + def check_params(self): + """Check all input params""" + + # dfs id check + if self.dfs_id: + if not self.dfs_id.isdigit(): + self.module.fail_json(msg="Error: DFS id is not digit.") + if int(self.dfs_id) != 1: + self.module.fail_json(msg="Error: DFS is not 1.") + + # dfs_source_ip check + if self.dfs_source_ip: + if not is_valid_v4addr(self.dfs_source_ip): + self.module.fail_json(msg="Error: dfs_source_ip is invalid.") + # dfs_source_vpn check + if self.dfs_source_vpn and not self.is_valid_ip_vpn(self.dfs_source_vpn): + self.module.fail_json(msg="Error: dfs_source_vpn is invalid.") + + # dfs_source_vpn and dfs_source_ip must set at the same time + if self.dfs_source_vpn and not self.dfs_source_ip: + self.module.fail_json( + msg="Error: dfs_source_vpn and dfs_source_ip must set at the same time.") + + # dfs_udp_port check + if self.dfs_udp_port: + if not self.dfs_udp_port.isdigit(): + self.module.fail_json( + msg="Error: dfs_udp_port id is not digit.") + if int(self.dfs_udp_port) < 1025 or int(self.dfs_udp_port) > 65535: + self.module.fail_json( + msg="dfs_udp_port is not ranges from 1025 to 65535.") + + # dfs_peer_ip check + if self.dfs_peer_ip: + if not is_valid_v4addr(self.dfs_peer_ip): + self.module.fail_json(msg="Error: dfs_peer_ip is invalid.") + # dfs_peer_vpn check + if self.dfs_peer_vpn and not self.is_valid_ip_vpn(self.dfs_peer_vpn): + self.module.fail_json(msg="Error: dfs_peer_vpn is invalid.") + + # dfs_peer_vpn and dfs_peer_ip must set at the same time + if self.dfs_peer_vpn and not self.dfs_peer_ip: + self.module.fail_json( + msg="Error: dfs_peer_vpn and dfs_peer_ip must set at the same time.") + + # vpn_instance check + if self.vpn_instance and not self.is_valid_ip_vpn(self.vpn_instance): + self.module.fail_json(msg="Error: vpn_instance is invalid.") + + # vpn_vni check + if self.vpn_vni: + if not self.vpn_vni.isdigit(): + self.module.fail_json(msg="Error: vpn_vni id is not digit.") + if int(self.vpn_vni) < 1 or int(self.vpn_vni) > 16000000: + self.module.fail_json( + msg="vpn_vni is not ranges from 1 to 16000000.") + + # vpn_instance and vpn_vni must set at the same time + if bool(self.vpn_instance) != bool(self.vpn_vni): + self.module.fail_json( + msg="Error: vpn_instance and vpn_vni must set at the same time.") + + # vbdif_name check + if self.vbdif_name: + self.vbdif_name = self.vbdif_name.replace(" ", "").lower().capitalize() + if not self.is_valid_vbdif(self.vbdif_name): + self.module.fail_json(msg="Error: vbdif_name is invalid.") + + # vbdif_mac check + if self.vbdif_mac: + mac = mac_format(self.vbdif_mac) + if not mac: + self.module.fail_json(msg="Error: vbdif_mac is invalid.") + self.vbdif_mac = mac + + # vbdif_bind_vpn check + if self.vbdif_bind_vpn and not self.is_valid_ip_vpn(self.vbdif_bind_vpn): + self.module.fail_json(msg="Error: vbdif_bind_vpn is invalid.") + + # All-Active Gateways or Distributed Gateway config can not set at the + # same time. + if self.dfs_id: + if self.vpn_vni or self.arp_distribute_gateway == "enable": + self.module.fail_json(msg="Error: All-Active Gateways or Distributed Gateway config " + "can not set at the same time.") + + def get_proposed(self): + """get proposed info""" + + if self.dfs_id: + self.proposed["dfs_id"] = self.dfs_id + self.proposed["dfs_source_ip"] = self.dfs_source_ip + self.proposed["dfs_source_vpn"] = self.dfs_source_vpn + self.proposed["dfs_udp_port"] = self.dfs_udp_port + self.proposed["dfs_all_active"] = self.dfs_all_active + self.proposed["dfs_peer_ip"] = self.dfs_peer_ip + self.proposed["dfs_peer_vpn"] = self.dfs_peer_vpn + + if self.vpn_instance: + self.proposed["vpn_instance"] = self.vpn_instance + self.proposed["vpn_vni"] = self.vpn_vni + + if self.vbdif_name: + self.proposed["vbdif_name"] = self.vbdif_name + self.proposed["vbdif_mac"] = self.vbdif_mac + self.proposed["vbdif_bind_vpn"] = self.vbdif_bind_vpn + self.proposed[ + "arp_distribute_gateway"] = self.arp_distribute_gateway + self.proposed["arp_direct_route"] = self.arp_direct_route + + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if not self.config: + return + + if is_config_exist(self.config, "dfs-group 1"): + self.existing["dfs_id"] = "1" + self.existing["dfs_source_ip"] = get_dfs_source_ip(self.config) + self.existing["dfs_source_vpn"] = get_dfs_source_vpn(self.config) + self.existing["dfs_udp_port"] = get_dfs_udp_port(self.config) + if is_config_exist(self.config, "active-active-gateway"): + self.existing["dfs_all_active"] = "enable" + self.existing["dfs_peers"] = get_dfs_peers(self.config) + else: + self.existing["dfs_all_active"] = "disable" + + if self.vpn_instance: + self.existing["vpn_instance"] = get_ip_vpn(self.config) + self.existing["vpn_vni"] = get_ip_vpn_vni(self.config) + + if self.vbdif_name: + self.existing["vbdif_name"] = self.vbdif_name + self.existing["vbdif_mac"] = get_vbdif_mac(self.config) + self.existing["vbdif_bind_vpn"] = get_vbdif_vpn(self.config) + if is_config_exist(self.config, "arp distribute-gateway enable"): + self.existing["arp_distribute_gateway"] = "enable" + else: + self.existing["arp_distribute_gateway"] = "disable" + if is_config_exist(self.config, "arp direct-route enable"): + self.existing["arp_direct_route"] = "enable" + else: + self.existing["arp_direct_route"] = "disable" + + def get_end_state(self): + """get end state info""" + + config = self.get_current_config() + if not config: + return + + if is_config_exist(config, "dfs-group 1"): + self.end_state["dfs_id"] = "1" + self.end_state["dfs_source_ip"] = get_dfs_source_ip(config) + self.end_state["dfs_source_vpn"] = get_dfs_source_vpn(config) + self.end_state["dfs_udp_port"] = get_dfs_udp_port(config) + if is_config_exist(config, "active-active-gateway"): + self.end_state["dfs_all_active"] = "enable" + self.end_state["dfs_peers"] = get_dfs_peers(config) + else: + self.end_state["dfs_all_active"] = "disable" + + if self.vpn_instance: + self.end_state["vpn_instance"] = get_ip_vpn(config) + self.end_state["vpn_vni"] = get_ip_vpn_vni(config) + + if self.vbdif_name: + self.end_state["vbdif_name"] = self.vbdif_name + self.end_state["vbdif_mac"] = get_vbdif_mac(config) + self.end_state["vbdif_bind_vpn"] = get_vbdif_vpn(config) + if is_config_exist(config, "arp distribute-gateway enable"): + self.end_state["arp_distribute_gateway"] = "enable" + else: + self.end_state["arp_distribute_gateway"] = "disable" + if is_config_exist(config, "arp direct-route enable"): + self.end_state["arp_direct_route"] = "enable" + else: + self.end_state["arp_direct_route"] = "disable" + + def work(self): + """worker""" + + self.check_params() + self.config = self.get_current_config() + self.get_existing() + self.get_proposed() + + # deal present or absent + if self.dfs_id: + self.config_dfs_group() + + if self.vpn_instance: + self.config_ip_vpn() + + if self.vbdif_name: + self.config_vbdif() + + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + dfs_id=dict(required=False, type='str'), + dfs_source_ip=dict(required=False, type='str'), + dfs_source_vpn=dict(required=False, type='str'), + dfs_udp_port=dict(required=False, type='str'), + dfs_all_active=dict(required=False, type='str', + choices=['enable', 'disable']), + dfs_peer_ip=dict(required=False, type='str'), + dfs_peer_vpn=dict(required=False, type='str'), + vpn_instance=dict(required=False, type='str'), + vpn_vni=dict(required=False, type='str'), + vbdif_name=dict(required=False, type='str'), + vbdif_mac=dict(required=False, type='str'), + vbdif_bind_vpn=dict(required=False, type='str'), + arp_distribute_gateway=dict( + required=False, type='str', choices=['enable', 'disable']), + arp_direct_route=dict(required=False, type='str', + choices=['enable', 'disable']), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = VxlanGateway(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vxlan_global.py b/plugins/modules/network/cloudengine/ce_vxlan_global.py new file mode 100644 index 0000000000..954c5bd7e3 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vxlan_global.py @@ -0,0 +1,543 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vxlan_global +short_description: Manages global attributes of VXLAN and bridge domain on HUAWEI CloudEngine devices. +description: + - Manages global attributes of VXLAN and bridge domain on HUAWEI CloudEngine devices. +author: QijunPan (@QijunPan) +notes: + - Recommended connection is C(network_cli). + - This module also works with C(local) connections for legacy playbooks. +options: + bridge_domain_id: + description: + - Specifies a bridge domain ID. + The value is an integer ranging from 1 to 16777215. + tunnel_mode_vxlan: + description: + - Set the tunnel mode to VXLAN when configuring the VXLAN feature. + choices: ['enable', 'disable'] + nvo3_prevent_loops: + description: + - Loop prevention of VXLAN traffic in non-enhanced mode. + When the device works in non-enhanced mode, + inter-card forwarding of VXLAN traffic may result in loops. + choices: ['enable', 'disable'] + nvo3_acl_extend: + description: + - Enabling or disabling the VXLAN ACL extension function. + choices: ['enable', 'disable'] + nvo3_gw_enhanced: + description: + - Configuring the Layer 3 VXLAN Gateway to Work in Non-loopback Mode. + choices: ['l2', 'l3'] + nvo3_service_extend: + description: + - Enabling or disabling the VXLAN service extension function. + choices: ['enable', 'disable'] + nvo3_eth_trunk_hash: + description: + - Eth-Trunk from load balancing VXLAN packets in optimized mode. + choices: ['enable','disable'] + nvo3_ecmp_hash: + description: + - Load balancing of VXLAN packets through ECMP in optimized mode. + choices: ['enable', 'disable'] + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +- name: vxlan global module test + hosts: ce128 + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Create bridge domain and set tunnel mode to VXLAN + ce_vxlan_global: + bridge_domain_id: 100 + nvo3_acl_extend: enable + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"bridge_domain_id": "100", "nvo3_acl_extend": "enable", state="present"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"bridge_domain": {"80", "90"}, "nvo3_acl_extend": "disable"} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"bridge_domain_id": {"80", "90", "100"}, "nvo3_acl_extend": "enable"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["bridge-domain 100", + "ip tunnel mode vxlan"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import load_config +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_argument_spec +from ansible.module_utils.connection import exec_command + + +def is_config_exist(cmp_cfg, test_cfg): + """is configuration exist?""" + + if not cmp_cfg or not test_cfg: + return False + + return bool(test_cfg in cmp_cfg) + + +def get_nvo3_gw_enhanced(cmp_cfg): + """get the Layer 3 VXLAN Gateway to Work in Non-loopback Mode """ + + get = re.findall( + r"assign forward nvo3-gateway enhanced (l[2|3])", cmp_cfg) + if not get: + return None + else: + return get[0] + + +class VxlanGlobal(object): + """ + Manages global attributes of VXLAN and bridge domain. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.tunnel_mode_vxlan = self.module.params['tunnel_mode_vxlan'] + self.nvo3_prevent_loops = self.module.params['nvo3_prevent_loops'] + self.nvo3_acl_extend = self.module.params['nvo3_acl_extend'] + self.nvo3_gw_enhanced = self.module.params['nvo3_gw_enhanced'] + self.nvo3_service_extend = self.module.params['nvo3_service_extend'] + self.nvo3_eth_trunk_hash = self.module.params['nvo3_eth_trunk_hash'] + self.nvo3_ecmp_hash = self.module.params['nvo3_ecmp_hash'] + self.bridge_domain_id = self.module.params['bridge_domain_id'] + self.state = self.module.params['state'] + + # state + self.config = "" # current config + self.bd_info = list() + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def init_module(self): + """init module""" + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def cli_load_config(self, commands): + """load config by cli""" + + if not self.module.check_mode: + load_config(self.module, commands) + + def get_config(self, flags=None): + """Retrieves the current config from the device or cache + """ + flags = [] if flags is None else flags + + cmd = 'display current-configuration ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + rc, out, err = exec_command(self.module, cmd) + if rc != 0: + self.module.fail_json(msg=err) + cfg = str(out).strip() + + return cfg + + def get_current_config(self): + """get current configuration""" + + flags = list() + exp = " include-default | include vxlan|assign | exclude undo" + flags.append(exp) + return self.get_config(flags) + + def cli_add_command(self, command, undo=False): + """add command to self.update_cmd and self.commands""" + + if undo and command.lower() not in ["quit", "return"]: + cmd = "undo " + command + else: + cmd = command + + self.commands.append(cmd) # set to device + if command.lower() not in ["quit", "return"]: + self.updates_cmd.append(cmd) # show updates result + + def get_bd_list(self): + """get bridge domain list""" + flags = list() + bd_info = list() + exp = " include-default | include bridge-domain | exclude undo" + flags.append(exp) + bd_str = self.get_config(flags) + if not bd_str: + return bd_info + bd_num = re.findall(r'bridge-domain\s*([0-9]+)', bd_str) + bd_info.extend(bd_num) + return bd_info + + def config_bridge_domain(self): + """manage bridge domain""" + + if not self.bridge_domain_id: + return + + cmd = "bridge-domain %s" % self.bridge_domain_id + exist = self.bridge_domain_id in self.bd_info + if self.state == "present": + if not exist: + self.cli_add_command(cmd) + self.cli_add_command("quit") + else: + if exist: + self.cli_add_command(cmd, undo=True) + + def config_tunnel_mode(self): + """config tunnel mode vxlan""" + + # ip tunnel mode vxlan + if self.tunnel_mode_vxlan: + cmd = "ip tunnel mode vxlan" + exist = is_config_exist(self.config, cmd) + if self.tunnel_mode_vxlan == "enable": + if not exist: + self.cli_add_command(cmd) + else: + if exist: + self.cli_add_command(cmd, undo=True) + + def config_assign_forward(self): + """config assign forward command""" + + # [undo] assign forward nvo3-gateway enhanced {l2|l3) + if self.nvo3_gw_enhanced: + cmd = "assign forward nvo3-gateway enhanced %s" % self.nvo3_gw_enhanced + exist = is_config_exist(self.config, cmd) + if self.state == "present": + if not exist: + self.cli_add_command(cmd) + else: + if exist: + self.cli_add_command(cmd, undo=True) + + # [undo] assign forward nvo3 f-linecard compatibility enable + if self.nvo3_prevent_loops: + cmd = "assign forward nvo3 f-linecard compatibility enable" + exist = is_config_exist(self.config, cmd) + if self.nvo3_prevent_loops == "enable": + if not exist: + self.cli_add_command(cmd) + else: + if exist: + self.cli_add_command(cmd, undo=True) + + # [undo] assign forward nvo3 acl extend enable + if self.nvo3_acl_extend: + cmd = "assign forward nvo3 acl extend enable" + exist = is_config_exist(self.config, cmd) + if self.nvo3_acl_extend == "enable": + if not exist: + self.cli_add_command(cmd) + else: + if exist: + self.cli_add_command(cmd, undo=True) + + # [undo] assign forward nvo3 service extend enable + if self.nvo3_service_extend: + cmd = "assign forward nvo3 service extend enable" + exist = is_config_exist(self.config, cmd) + if self.nvo3_service_extend == "enable": + if not exist: + self.cli_add_command(cmd) + else: + if exist: + self.cli_add_command(cmd, undo=True) + + # assign forward nvo3 eth-trunk hash {enable|disable} + if self.nvo3_eth_trunk_hash: + cmd = "assign forward nvo3 eth-trunk hash enable" + exist = is_config_exist(self.config, cmd) + if self.nvo3_eth_trunk_hash == "enable": + if not exist: + self.cli_add_command(cmd) + else: + if exist: + self.cli_add_command(cmd, undo=True) + + # [undo] assign forward nvo3 ecmp hash enable + if self.nvo3_ecmp_hash: + cmd = "assign forward nvo3 ecmp hash enable" + exist = is_config_exist(self.config, cmd) + if self.nvo3_ecmp_hash == "enable": + if not exist: + self.cli_add_command(cmd) + else: + if exist: + self.cli_add_command(cmd, undo=True) + + def check_params(self): + """Check all input params""" + + # bridge domain id check + if self.bridge_domain_id: + if not self.bridge_domain_id.isdigit(): + self.module.fail_json( + msg="Error: bridge domain id is not digit.") + if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215: + self.module.fail_json( + msg="Error: bridge domain id is not in the range from 1 to 16777215.") + + def get_proposed(self): + """get proposed info""" + + if self.tunnel_mode_vxlan: + self.proposed["tunnel_mode_vxlan"] = self.tunnel_mode_vxlan + if self.nvo3_prevent_loops: + self.proposed["nvo3_prevent_loops"] = self.nvo3_prevent_loops + if self.nvo3_acl_extend: + self.proposed["nvo3_acl_extend"] = self.nvo3_acl_extend + if self.nvo3_gw_enhanced: + self.proposed["nvo3_gw_enhanced"] = self.nvo3_gw_enhanced + if self.nvo3_service_extend: + self.proposed["nvo3_service_extend"] = self.nvo3_service_extend + if self.nvo3_eth_trunk_hash: + self.proposed["nvo3_eth_trunk_hash"] = self.nvo3_eth_trunk_hash + if self.nvo3_ecmp_hash: + self.proposed["nvo3_ecmp_hash"] = self.nvo3_ecmp_hash + if self.bridge_domain_id: + self.proposed["bridge_domain_id"] = self.bridge_domain_id + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + self.existing["bridge_domain"] = self.bd_info + + cmd = "ip tunnel mode vxlan" + exist = is_config_exist(self.config, cmd) + if exist: + self.existing["tunnel_mode_vxlan"] = "enable" + else: + self.existing["tunnel_mode_vxlan"] = "disable" + + cmd = "assign forward nvo3 f-linecard compatibility enable" + exist = is_config_exist(self.config, cmd) + if exist: + self.existing["nvo3_prevent_loops"] = "enable" + else: + self.existing["nvo3_prevent_loops"] = "disable" + + cmd = "assign forward nvo3 acl extend enable" + exist = is_config_exist(self.config, cmd) + if exist: + self.existing["nvo3_acl_extend"] = "enable" + else: + self.existing["nvo3_acl_extend"] = "disable" + + self.existing["nvo3_gw_enhanced"] = get_nvo3_gw_enhanced( + self.config) + + cmd = "assign forward nvo3 service extend enable" + exist = is_config_exist(self.config, cmd) + if exist: + self.existing["nvo3_service_extend"] = "enable" + else: + self.existing["nvo3_service_extend"] = "disable" + + cmd = "assign forward nvo3 eth-trunk hash enable" + exist = is_config_exist(self.config, cmd) + if exist: + self.existing["nvo3_eth_trunk_hash"] = "enable" + else: + self.existing["nvo3_eth_trunk_hash"] = "disable" + + cmd = "assign forward nvo3 ecmp hash enable" + exist = is_config_exist(self.config, cmd) + if exist: + self.existing["nvo3_ecmp_hash"] = "enable" + else: + self.existing["nvo3_ecmp_hash"] = "disable" + + def get_end_state(self): + """get end state info""" + + config = self.get_current_config() + + self.end_state["bridge_domain"] = self.get_bd_list() + + cmd = "ip tunnel mode vxlan" + exist = is_config_exist(config, cmd) + if exist: + self.end_state["tunnel_mode_vxlan"] = "enable" + else: + self.end_state["tunnel_mode_vxlan"] = "disable" + + cmd = "assign forward nvo3 f-linecard compatibility enable" + exist = is_config_exist(config, cmd) + if exist: + self.end_state["nvo3_prevent_loops"] = "enable" + else: + self.end_state["nvo3_prevent_loops"] = "disable" + + cmd = "assign forward nvo3 acl extend enable" + exist = is_config_exist(config, cmd) + if exist: + self.end_state["nvo3_acl_extend"] = "enable" + else: + self.end_state["nvo3_acl_extend"] = "disable" + + self.end_state["nvo3_gw_enhanced"] = get_nvo3_gw_enhanced(config) + + cmd = "assign forward nvo3 service extend enable" + exist = is_config_exist(config, cmd) + if exist: + self.end_state["nvo3_service_extend"] = "enable" + else: + self.end_state["nvo3_service_extend"] = "disable" + + cmd = "assign forward nvo3 eth-trunk hash enable" + exist = is_config_exist(config, cmd) + if exist: + self.end_state["nvo3_eth_trunk_hash"] = "enable" + else: + self.end_state["nvo3_eth_trunk_hash"] = "disable" + + cmd = "assign forward nvo3 ecmp hash enable" + exist = is_config_exist(config, cmd) + if exist: + self.end_state["nvo3_ecmp_hash"] = "enable" + else: + self.end_state["nvo3_ecmp_hash"] = "disable" + if self.existing == self.end_state: + self.changed = True + + def work(self): + """worker""" + + self.check_params() + self.config = self.get_current_config() + self.bd_info = self.get_bd_list() + self.get_existing() + self.get_proposed() + + # deal present or absent + self.config_bridge_domain() + self.config_tunnel_mode() + self.config_assign_forward() + if self.commands: + self.cli_load_config(self.commands) + self.changed = True + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + tunnel_mode_vxlan=dict(required=False, type='str', + choices=['enable', 'disable']), + nvo3_prevent_loops=dict(required=False, type='str', + choices=['enable', 'disable']), + nvo3_acl_extend=dict(required=False, type='str', + choices=['enable', 'disable']), + nvo3_gw_enhanced=dict(required=False, type='str', + choices=['l2', 'l3']), + nvo3_service_extend=dict(required=False, type='str', + choices=['enable', 'disable']), + nvo3_eth_trunk_hash=dict(required=False, type='str', + choices=['enable', 'disable']), + nvo3_ecmp_hash=dict(required=False, type='str', + choices=['enable', 'disable']), + bridge_domain_id=dict(required=False, type='str'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = VxlanGlobal(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vxlan_tunnel.py b/plugins/modules/network/cloudengine/ce_vxlan_tunnel.py new file mode 100644 index 0000000000..0ca5c15656 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vxlan_tunnel.py @@ -0,0 +1,944 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vxlan_tunnel +short_description: Manages VXLAN tunnel configuration on HUAWEI CloudEngine devices. +description: + - This module offers the ability to set the VNI and mapped to the BD, + and configure an ingress replication list on HUAWEI CloudEngine devices. +author: + - Li Yanfeng (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + bridge_domain_id: + description: + - Specifies a bridge domain ID. The value is an integer ranging from 1 to 16777215. + vni_id: + description: + - Specifies a VXLAN network identifier (VNI) ID. The value is an integer ranging from 1 to 16000000. + nve_name: + description: + - Specifies the number of an NVE interface. The value ranges from 1 to 2. + nve_mode: + description: + - Specifies the working mode of an NVE interface. + choices: ['mode-l2','mode-l3'] + peer_list_ip: + description: + - Specifies the IP address of a remote VXLAN tunnel endpoints (VTEP). + The value is in dotted decimal notation. + protocol_type: + description: + - The operation type of routing protocol. + choices: ['bgp','null'] + source_ip: + description: + - Specifies an IP address for a source VTEP. The value is in dotted decimal notation. + state: + description: + - Manage the state of the resource. + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- name: vxlan tunnel module test + hosts: ce128 + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Make sure nve_name is exist, ensure vni_id and protocol_type is configured on Nve1 interface. + ce_vxlan_tunnel: + nve_name: Nve1 + vni_id: 100 + protocol_type: bgp + state: present + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {nve_interface_name": "Nve1", nve_mode": "mode-l2", "source_ip": "0.0.0.0"} +existing: + description: + - k/v pairs of existing rollback + returned: always + type: dict + sample: {nve_interface_name": "Nve1", nve_mode": "mode-l3", "source_ip": "0.0.0.0"} + +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface Nve1", + "mode l3"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: {nve_interface_name": "Nve1", nve_mode": "mode-l3", "source_ip": "0.0.0.0"} +''' +import re +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_VNI_BD_INFO = """ + + + + + + + + + + +""" + +CE_NC_GET_NVE_INFO = """ + + + + + %s + + + + +""" + +CE_NC_MERGE_VNI_BD_ID = """ + + + + + %s + %s + + + + +""" + +CE_NC_DELETE_VNI_BD_ID = """ + + + + + %s + %s + + + + +""" + +CE_NC_MERGE_NVE_MODE = """ + + + + + %s + %s + + + + +""" + +CE_NC_MERGE_NVE_SOURCE_IP_PROTOCOL = """ + + + + + %s + %s + + + + +""" + +CE_NC_MERGE_VNI_PEER_ADDRESS_IP_HEAD = """ + + + + + %s + + + %s +""" + +CE_NC_MERGE_VNI_PEER_ADDRESS_IP_END = """ + + + + + + +""" +CE_NC_MERGE_VNI_PEER_ADDRESS_IP_MERGE = """ + + + %s + + +""" + +CE_NC_DELETE_VNI_PEER_ADDRESS_IP_HEAD = """ + + + + + %s + + + %s +""" +CE_NC_DELETE_VNI_PEER_ADDRESS_IP_END = """ + + + + + + +""" +CE_NC_DELETE_VNI_PEER_ADDRESS_IP_DELETE = """ + + + %s + + +""" + +CE_NC_DELETE_PEER_ADDRESS_IP_HEAD = """ + + + + + %s + + + %s +""" +CE_NC_DELETE_PEER_ADDRESS_IP_END = """ + + + + + + +""" +CE_NC_MERGE_VNI_PROTOCOL = """ + + + + + %s + + + %s + %s + + + + + + +""" + +CE_NC_DELETE_VNI_PROTOCOL = """ + + + + + %s + + + %s + %s + + + + + + +""" + + +def is_valid_address(address): + """check ip-address is valid""" + + if address.find('.') != -1: + addr_list = address.split('.') + if len(addr_list) != 4: + return False + for each_num in addr_list: + if not each_num.isdigit(): + return False + if int(each_num) > 255: + return False + return True + + return False + + +class VxlanTunnel(object): + """ + Manages vxlan tunnel configuration. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.init_module() + + # module input info + self.bridge_domain_id = self.module.params['bridge_domain_id'] + self.vni_id = self.module.params['vni_id'] + self.nve_name = self.module.params['nve_name'] + self.nve_mode = self.module.params['nve_mode'] + self.peer_list_ip = self.module.params['peer_list_ip'] + self.protocol_type = self.module.params['protocol_type'] + self.source_ip = self.module.params['source_ip'] + self.state = self.module.params['state'] + + # state + self.changed = False + self.updates_cmd = list() + self.results = dict() + self.existing = dict() + self.proposed = dict() + self.end_state = dict() + + # configuration nve info + self.vni2bd_info = None + self.nve_info = None + + def init_module(self): + """ init module """ + + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_vni2bd_dict(self): + """ get vni2bd attributes dict.""" + + vni2bd_info = dict() + # get vni bd info + conf_str = CE_NC_GET_VNI_BD_INFO + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return vni2bd_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + # get vni to bridge domain id info + root = ElementTree.fromstring(xml_str) + vni2bd_info["vni2BdInfos"] = list() + vni2bds = root.findall("nvo3/nvo3Vni2Bds/nvo3Vni2Bd") + + if vni2bds: + for vni2bd in vni2bds: + vni_dict = dict() + for ele in vni2bd: + if ele.tag in ["vniId", "bdId"]: + vni_dict[ele.tag] = ele.text + vni2bd_info["vni2BdInfos"].append(vni_dict) + + return vni2bd_info + + def check_nve_interface(self, nve_name): + """is nve interface exist""" + + if not self.nve_info: + return False + + if self.nve_info["ifName"] == nve_name: + return True + return False + + def get_nve_dict(self, nve_name): + """ get nve interface attributes dict.""" + + nve_info = dict() + # get nve info + conf_str = CE_NC_GET_NVE_INFO % nve_name + xml_str = get_nc_config(self.module, conf_str) + if "" in xml_str: + return nve_info + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get nve info + root = ElementTree.fromstring(xml_str) + + nvo3 = root.find("nvo3/nvo3Nves/nvo3Nve") + if nvo3: + for nve in nvo3: + if nve.tag in ["srcAddr", "ifName", "nveType"]: + nve_info[nve.tag] = nve.text + + # get nve vni info + nve_info["vni_peer_protocols"] = list() + + vni_members = root.findall( + "nvo3/nvo3Nves/nvo3Nve/vniMembers/vniMember") + if vni_members: + for member in vni_members: + vni_dict = dict() + for ele in member: + if ele.tag in ["vniId", "protocol"]: + vni_dict[ele.tag] = ele.text + nve_info["vni_peer_protocols"].append(vni_dict) + + # get vni peer address ip info + nve_info["vni_peer_ips"] = list() + + re_find = re.findall(r'(.*?)\s*' + r'(.*?)\s*' + r'(.*?)', xml_str) + + if re_find: + for vni_peers in re_find: + vni_info = dict() + vni_peer = re.findall(r'(.*?)', vni_peers[2]) + if vni_peer: + vni_info["vniId"] = vni_peers[0] + vni_peer_list = list() + for peer in vni_peer: + vni_peer_list.append(peer) + vni_info["peerAddr"] = vni_peer_list + nve_info["vni_peer_ips"].append(vni_info) + + return nve_info + + def check_nve_name(self): + """Gets Nve interface name""" + + if self.nve_name is None: + return False + if self.nve_name in ["Nve1", "Nve2"]: + return True + return False + + def is_vni_bd_exist(self, vni_id, bd_id): + """is vni to bridge-domain-id exist""" + + if not self.vni2bd_info: + return False + + for vni2bd in self.vni2bd_info["vni2BdInfos"]: + if vni2bd["vniId"] == vni_id and vni2bd["bdId"] == bd_id: + return True + return False + + def is_vni_bd_change(self, vni_id, bd_id): + """is vni to bridge-domain-id change""" + + if not self.vni2bd_info: + return True + + for vni2bd in self.vni2bd_info["vni2BdInfos"]: + if vni2bd["vniId"] == vni_id and vni2bd["bdId"] == bd_id: + return False + return True + + def is_nve_mode_exist(self, nve_name, mode): + """is nve interface mode exist""" + + if not self.nve_info: + return False + + if self.nve_info["ifName"] == nve_name and self.nve_info["nveType"] == mode: + return True + return False + + def is_nve_mode_change(self, nve_name, mode): + """is nve interface mode change""" + + if not self.nve_info: + return True + + if self.nve_info["ifName"] == nve_name and self.nve_info["nveType"] == mode: + return False + return True + + def is_nve_source_ip_exist(self, nve_name, source_ip): + """is vni to bridge-domain-id exist""" + + if not self.nve_info: + return False + + if self.nve_info["ifName"] == nve_name and self.nve_info["srcAddr"] == source_ip: + return True + return False + + def is_nve_source_ip_change(self, nve_name, source_ip): + """is vni to bridge-domain-id change""" + + if not self.nve_info: + return True + + if self.nve_info["ifName"] == nve_name and self.nve_info["srcAddr"] == source_ip: + return False + return True + + def is_vni_protocol_exist(self, nve_name, vni_id, protocol_type): + """is vni protocol exist""" + + if not self.nve_info: + return False + if self.nve_info["ifName"] == nve_name: + for member in self.nve_info["vni_peer_protocols"]: + if member["vniId"] == vni_id and member["protocol"] == protocol_type: + return True + return False + + def is_vni_protocol_change(self, nve_name, vni_id, protocol_type): + """is vni protocol change""" + + if not self.nve_info: + return True + if self.nve_info["ifName"] == nve_name: + for member in self.nve_info["vni_peer_protocols"]: + if member["vniId"] == vni_id and member["protocol"] == protocol_type: + return False + return True + + def is_vni_peer_list_exist(self, nve_name, vni_id, peer_ip): + """is vni peer list exist""" + + if not self.nve_info: + return False + if self.nve_info["ifName"] == nve_name: + for member in self.nve_info["vni_peer_ips"]: + if member["vniId"] == vni_id and peer_ip in member["peerAddr"]: + return True + return False + + def is_vni_peer_list_change(self, nve_name, vni_id, peer_ip_list): + """is vni peer list change""" + + if not self.nve_info: + return True + + if self.nve_info["ifName"] == nve_name: + if not self.nve_info["vni_peer_ips"]: + return True + + nve_peer_info = list() + for nve_peer in self.nve_info["vni_peer_ips"]: + if nve_peer["vniId"] == vni_id: + nve_peer_info.append(nve_peer) + + if not nve_peer_info: + return True + + nve_peer_list = nve_peer_info[0]["peerAddr"] + for peer in peer_ip_list: + if peer not in nve_peer_list: + return True + + return False + + def config_merge_vni2bd(self, bd_id, vni_id): + """config vni to bd id""" + + if self.is_vni_bd_change(vni_id, bd_id): + cfg_xml = CE_NC_MERGE_VNI_BD_ID % (vni_id, bd_id) + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "MERGE_VNI_BD") + self.updates_cmd.append("bridge-domain %s" % bd_id) + self.updates_cmd.append("vxlan vni %s" % vni_id) + self.changed = True + + def config_merge_mode(self, nve_name, mode): + """config nve mode""" + + if self.is_nve_mode_change(nve_name, mode): + cfg_xml = CE_NC_MERGE_NVE_MODE % (nve_name, mode) + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "MERGE_MODE") + self.updates_cmd.append("interface %s" % nve_name) + if mode == "mode-l3": + self.updates_cmd.append("mode l3") + else: + self.updates_cmd.append("undo mode l3") + self.changed = True + + def config_merge_source_ip(self, nve_name, source_ip): + """config nve source ip""" + + if self.is_nve_source_ip_change(nve_name, source_ip): + cfg_xml = CE_NC_MERGE_NVE_SOURCE_IP_PROTOCOL % ( + nve_name, source_ip) + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "MERGE_SOURCE_IP") + self.updates_cmd.append("interface %s" % nve_name) + self.updates_cmd.append("source %s" % source_ip) + self.changed = True + + def config_merge_vni_peer_ip(self, nve_name, vni_id, peer_ip_list): + """config vni peer ip""" + + if self.is_vni_peer_list_change(nve_name, vni_id, peer_ip_list): + cfg_xml = CE_NC_MERGE_VNI_PEER_ADDRESS_IP_HEAD % ( + nve_name, vni_id) + for peer_ip in peer_ip_list: + cfg_xml += CE_NC_MERGE_VNI_PEER_ADDRESS_IP_MERGE % peer_ip + cfg_xml += CE_NC_MERGE_VNI_PEER_ADDRESS_IP_END + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "MERGE_VNI_PEER_IP") + self.updates_cmd.append("interface %s" % nve_name) + + for peer_ip in peer_ip_list: + cmd_output = "vni %s head-end peer-list %s" % (vni_id, peer_ip) + self.updates_cmd.append(cmd_output) + self.changed = True + + def config_merge_vni_protocol_type(self, nve_name, vni_id, protocol_type): + """config vni protocol type""" + + if self.is_vni_protocol_change(nve_name, vni_id, protocol_type): + cfg_xml = CE_NC_MERGE_VNI_PROTOCOL % ( + nve_name, vni_id, protocol_type) + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "MERGE_VNI_PEER_PROTOCOL") + self.updates_cmd.append("interface %s" % nve_name) + + if protocol_type == "bgp": + self.updates_cmd.append( + "vni %s head-end peer-list protocol %s" % (vni_id, protocol_type)) + else: + self.updates_cmd.append( + "undo vni %s head-end peer-list protocol bgp" % vni_id) + self.changed = True + + def config_delete_vni2bd(self, bd_id, vni_id): + """remove vni to bd id""" + + if not self.is_vni_bd_exist(vni_id, bd_id): + return + cfg_xml = CE_NC_DELETE_VNI_BD_ID % (vni_id, bd_id) + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "DELETE_VNI_BD") + self.updates_cmd.append( + "bridge-domain %s" % bd_id) + self.updates_cmd.append( + "undo vxlan vni %s" % vni_id) + + self.changed = True + + def config_delete_mode(self, nve_name, mode): + """nve mode""" + + if mode == "mode-l3": + if not self.is_nve_mode_exist(nve_name, mode): + return + cfg_xml = CE_NC_MERGE_NVE_MODE % (nve_name, "mode-l2") + + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "DELETE_MODE") + self.updates_cmd.append("interface %s" % nve_name) + self.updates_cmd.append("undo mode l3") + self.changed = True + else: + self.module.fail_json( + msg='Error: Can not configure undo mode l2.') + + def config_delete_source_ip(self, nve_name, source_ip): + """nve source ip""" + + if not self.is_nve_source_ip_exist(nve_name, source_ip): + return + ipaddr = "0.0.0.0" + cfg_xml = CE_NC_MERGE_NVE_SOURCE_IP_PROTOCOL % ( + nve_name, ipaddr) + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "DELETE_SOURCE_IP") + self.updates_cmd.append("interface %s" % nve_name) + self.updates_cmd.append("undo source %s" % source_ip) + self.changed = True + + def config_delete_vni_peer_ip(self, nve_name, vni_id, peer_ip_list): + """remove vni peer ip""" + + for peer_ip in peer_ip_list: + if not self.is_vni_peer_list_exist(nve_name, vni_id, peer_ip): + self.module.fail_json(msg='Error: The %s does not exist' % peer_ip) + + config = False + + nve_peer_info = list() + for nve_peer in self.nve_info["vni_peer_ips"]: + if nve_peer["vniId"] == vni_id: + nve_peer_info = nve_peer.get("peerAddr") + for peer in nve_peer_info: + if peer not in peer_ip_list: + config = True + + if not config: + cfg_xml = CE_NC_DELETE_VNI_PEER_ADDRESS_IP_HEAD % ( + nve_name, vni_id) + for peer_ip in peer_ip_list: + cfg_xml += CE_NC_DELETE_VNI_PEER_ADDRESS_IP_DELETE % peer_ip + cfg_xml += CE_NC_DELETE_VNI_PEER_ADDRESS_IP_END + else: + cfg_xml = CE_NC_DELETE_PEER_ADDRESS_IP_HEAD % ( + nve_name, vni_id) + for peer_ip in peer_ip_list: + cfg_xml += CE_NC_DELETE_VNI_PEER_ADDRESS_IP_DELETE % peer_ip + cfg_xml += CE_NC_DELETE_PEER_ADDRESS_IP_END + + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "DELETE_VNI_PEER_IP") + self.updates_cmd.append("interface %s" % nve_name) + + for peer_ip in peer_ip_list: + cmd_output = "undo vni %s head-end peer-list %s" % (vni_id, peer_ip) + self.updates_cmd.append(cmd_output) + + self.changed = True + + def config_delete_vni_protocol_type(self, nve_name, vni_id, protocol_type): + """remove vni protocol type""" + + if not self.is_vni_protocol_exist(nve_name, vni_id, protocol_type): + return + + cfg_xml = CE_NC_DELETE_VNI_PROTOCOL % (nve_name, vni_id, protocol_type) + recv_xml = set_nc_config(self.module, cfg_xml) + self.check_response(recv_xml, "DELETE_VNI_PEER_PROTOCOL") + self.updates_cmd.append("interface %s" % nve_name) + self.updates_cmd.append( + "undo vni %s head-end peer-list protocol bgp " % vni_id) + self.changed = True + + def check_params(self): + """Check all input params""" + + # bridge_domain_id check + if self.bridge_domain_id: + if not self.bridge_domain_id.isdigit(): + self.module.fail_json( + msg='Error: The parameter of bridge domain id is invalid.') + if int(self.bridge_domain_id) > 16777215 or int(self.bridge_domain_id) < 1: + self.module.fail_json( + msg='Error: The bridge domain id must be an integer between 1 and 16777215.') + # vni_id check + if self.vni_id: + if not self.vni_id.isdigit(): + self.module.fail_json( + msg='Error: The parameter of vni id is invalid.') + if int(self.vni_id) > 16000000 or int(self.vni_id) < 1: + self.module.fail_json( + msg='Error: The vni id must be an integer between 1 and 16000000.') + + # nve_name check + if self.nve_name: + if not self.check_nve_name(): + self.module.fail_json( + msg='Error: Error: NVE interface %s is invalid.' % self.nve_name) + + # peer_list_ip check + if self.peer_list_ip: + for peer_ip in self.peer_list_ip: + if not is_valid_address(peer_ip): + self.module.fail_json( + msg='Error: The ip address %s is invalid.' % self.peer_list_ip) + # source_ip check + if self.source_ip: + if not is_valid_address(self.source_ip): + self.module.fail_json( + msg='Error: The ip address %s is invalid.' % self.source_ip) + + def get_proposed(self): + """get proposed info""" + + if self.bridge_domain_id: + self.proposed["bridge_domain_id"] = self.bridge_domain_id + if self.vni_id: + self.proposed["vni_id"] = self.vni_id + if self.nve_name: + self.proposed["nve_name"] = self.nve_name + if self.nve_mode: + self.proposed["nve_mode"] = self.nve_mode + if self.peer_list_ip: + self.proposed["peer_list_ip"] = self.peer_list_ip + if self.source_ip: + self.proposed["source_ip"] = self.source_ip + if self.state: + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if self.vni2bd_info: + self.existing["vni_to_bridge_domain"] = self.vni2bd_info[ + "vni2BdInfos"] + + if self.nve_info: + self.existing["nve_interface_name"] = self.nve_info["ifName"] + self.existing["source_ip"] = self.nve_info["srcAddr"] + self.existing["nve_mode"] = self.nve_info["nveType"] + self.existing["vni_peer_list_ip"] = self.nve_info[ + "vni_peer_ips"] + self.existing["vni_peer_list_protocol"] = self.nve_info[ + "vni_peer_protocols"] + + def get_end_state(self): + """get end state info""" + + vni2bd_info = self.get_vni2bd_dict() + if vni2bd_info: + self.end_state["vni_to_bridge_domain"] = vni2bd_info["vni2BdInfos"] + + nve_info = self.get_nve_dict(self.nve_name) + if nve_info: + self.end_state["nve_interface_name"] = nve_info["ifName"] + self.end_state["source_ip"] = nve_info["srcAddr"] + self.end_state["nve_mode"] = nve_info["nveType"] + self.end_state["vni_peer_list_ip"] = nve_info[ + "vni_peer_ips"] + self.end_state["vni_peer_list_protocol"] = nve_info[ + "vni_peer_protocols"] + + def work(self): + """worker""" + + self.check_params() + self.vni2bd_info = self.get_vni2bd_dict() + if self.nve_name: + self.nve_info = self.get_nve_dict(self.nve_name) + self.get_existing() + self.get_proposed() + # deal present or absent + if self.state == "present": + if self.bridge_domain_id and self.vni_id: + self.config_merge_vni2bd(self.bridge_domain_id, self.vni_id) + if self.nve_name: + if self.check_nve_interface(self.nve_name): + if self.nve_mode: + self.config_merge_mode(self.nve_name, self.nve_mode) + if self.source_ip: + self.config_merge_source_ip( + self.nve_name, self.source_ip) + if self.vni_id and self.peer_list_ip: + self.config_merge_vni_peer_ip( + self.nve_name, self.vni_id, self.peer_list_ip) + if self.vni_id and self.protocol_type: + self.config_merge_vni_protocol_type( + self.nve_name, self.vni_id, self.protocol_type) + else: + self.module.fail_json( + msg='Error: Nve interface %s does not exist.' % self.nve_name) + + else: + if self.bridge_domain_id and self.vni_id: + self.config_delete_vni2bd(self.bridge_domain_id, self.vni_id) + if self.nve_name: + if self.check_nve_interface(self.nve_name): + if self.nve_mode: + self.config_delete_mode(self.nve_name, self.nve_mode) + if self.source_ip: + self.config_delete_source_ip( + self.nve_name, self.source_ip) + if self.vni_id and self.peer_list_ip: + self.config_delete_vni_peer_ip( + self.nve_name, self.vni_id, self.peer_list_ip) + if self.vni_id and self.protocol_type: + self.config_delete_vni_protocol_type( + self.nve_name, self.vni_id, self.protocol_type) + else: + self.module.fail_json( + msg='Error: Nve interface %s does not exist.' % self.nve_name) + + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + bridge_domain_id=dict(required=False), + vni_id=dict(required=False, type='str'), + nve_name=dict(required=False, type='str'), + nve_mode=dict(required=False, choices=['mode-l2', 'mode-l3']), + peer_list_ip=dict(required=False, type='list'), + protocol_type=dict(required=False, type='str', choices=[ + 'bgp', 'null']), + + source_ip=dict(required=False), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = VxlanTunnel(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudengine/ce_vxlan_vap.py b/plugins/modules/network/cloudengine/ce_vxlan_vap.py new file mode 100644 index 0000000000..a25ee3d8c5 --- /dev/null +++ b/plugins/modules/network/cloudengine/ce_vxlan_vap.py @@ -0,0 +1,937 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ce_vxlan_vap +short_description: Manages VXLAN virtual access point on HUAWEI CloudEngine Devices. +description: + - Manages VXLAN Virtual access point on HUAWEI CloudEngine Devices. +author: QijunPan (@QijunPan) +notes: + - This module requires the netconf system service be enabled on the remote device being managed. + - Recommended connection is C(netconf). + - This module also works with C(local) connections for legacy playbooks. +options: + bridge_domain_id: + description: + - Specifies a bridge domain ID. + The value is an integer ranging from 1 to 16777215. + bind_vlan_id: + description: + - Specifies the VLAN binding to a BD(Bridge Domain). + The value is an integer ranging ranging from 1 to 4094. + l2_sub_interface: + description: + - Specifies an Sub-Interface full name, i.e. "10GE1/0/41.1". + The value is a string of 1 to 63 case-insensitive characters, spaces supported. + encapsulation: + description: + - Specifies an encapsulation type of packets allowed to pass through a Layer 2 sub-interface. + choices: ['dot1q', 'default', 'untag', 'qinq', 'none'] + ce_vid: + description: + - When I(encapsulation) is 'dot1q', specifies a VLAN ID in the outer VLAN tag. + When I(encapsulation) is 'qinq', specifies an outer VLAN ID for + double-tagged packets to be received by a Layer 2 sub-interface. + The value is an integer ranging from 1 to 4094. + pe_vid: + description: + - When I(encapsulation) is 'qinq', specifies an inner VLAN ID for + double-tagged packets to be received by a Layer 2 sub-interface. + The value is an integer ranging from 1 to 4094. + state: + description: + - Determines whether the config should be present or not + on the device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +- name: vxlan vap module test + hosts: ce128 + connection: local + gather_facts: no + vars: + cli: + host: "{{ inventory_hostname }}" + port: "{{ ansible_ssh_port }}" + username: "{{ username }}" + password: "{{ password }}" + transport: cli + + tasks: + + - name: Create a mapping between a VLAN and a BD + ce_vxlan_vap: + bridge_domain_id: 100 + bind_vlan_id: 99 + provider: "{{ cli }}" + + - name: Bind a Layer 2 sub-interface to a BD + ce_vxlan_vap: + bridge_domain_id: 100 + l2_sub_interface: 10GE2/0/20.1 + provider: "{{ cli }}" + + - name: Configure an encapsulation type on a Layer 2 sub-interface + ce_vxlan_vap: + l2_sub_interface: 10GE2/0/20.1 + encapsulation: dot1q + provider: "{{ cli }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"bridge_domain_id": "100", "bind_vlan_id": "99", state="present"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"bridge_domain_id": "100", "bind_intf_list": ["10GE2/0/20.1", "10GE2/0/20.2"], + "bind_vlan_list": []} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"bridge_domain_id": "100", "bind_intf_list": ["110GE2/0/20.1", "10GE2/0/20.2"], + "bind_vlan_list": ["99"]} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["bridge-domain 100", + "l2 binding vlan 99"] +changed: + description: check to see if a change was made on the device + returned: always + type: bool + sample: true +''' + +from xml.etree import ElementTree +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec + +CE_NC_GET_BD_VAP = """ + + + + + %s + + + + + + + + + + + + +""" + +CE_NC_MERGE_BD_VLAN = """ + + + + + %s + + %s:%s + + + + + +""" + +CE_NC_MERGE_BD_INTF = """ + + + + + %s + + + %s + + + + + + +""" + +CE_NC_DELETE_BD_INTF = """ + + + + + %s + + + %s + + + + + + +""" + +CE_NC_GET_ENCAP = """ + + + + + %s + + + + + + + + + + + + + + +""" + +CE_NC_SET_ENCAP = """ + + + + + %s + %s + + + + +""" + +CE_NC_UNSET_ENCAP = """ + + + + + %s + none + + + + +""" + +CE_NC_SET_ENCAP_DOT1Q = """ + + + + + %s + dot1q + + %s:%s + + + + + +""" + +CE_NC_SET_ENCAP_QINQ = """ + + + + + %s + qinq + + + %s + %s:%s + + + + + + +""" + + +def vlan_vid_to_bitmap(vid): + """convert VLAN list to VLAN bitmap""" + + vlan_bit = ['0'] * 1024 + int_vid = int(vid) + j = int_vid // 4 + bit_int = 0x8 >> (int_vid % 4) + vlan_bit[j] = str(hex(bit_int))[2] + + return ''.join(vlan_bit) + + +def bitmap_to_vlan_list(bitmap): + """convert VLAN bitmap to VLAN list""" + + tmp = list() + if not bitmap: + return tmp + + bit_len = len(bitmap) + for i in range(bit_len): + if bitmap[i] == "0": + continue + bit = int(bitmap[i]) + if bit & 0x8: + tmp.append(str(i * 4)) + if bit & 0x4: + tmp.append(str(i * 4 + 1)) + if bit & 0x2: + tmp.append(str(i * 4 + 2)) + if bit & 0x1: + tmp.append(str(i * 4 + 3)) + + return tmp + + +def is_vlan_bitmap_empty(bitmap): + """check VLAN bitmap empty""" + + if not bitmap or len(bitmap) == 0: + return True + + for bit in bitmap: + if bit != '0': + return False + + return True + + +def is_vlan_in_bitmap(vid, bitmap): + """check is VLAN id in bitmap""" + + if is_vlan_bitmap_empty(bitmap): + return False + + i = int(vid) // 4 + if i > len(bitmap): + return False + + if int(bitmap[i]) & (0x8 >> (int(vid) % 4)): + return True + + return False + + +def get_interface_type(interface): + """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" + + if interface is None: + return None + + iftype = None + + if interface.upper().startswith('GE'): + iftype = 'ge' + elif interface.upper().startswith('10GE'): + iftype = '10ge' + elif interface.upper().startswith('25GE'): + iftype = '25ge' + elif interface.upper().startswith('4X10GE'): + iftype = '4x10ge' + elif interface.upper().startswith('40GE'): + iftype = '40ge' + elif interface.upper().startswith('100GE'): + iftype = '100ge' + elif interface.upper().startswith('VLANIF'): + iftype = 'vlanif' + elif interface.upper().startswith('LOOPBACK'): + iftype = 'loopback' + elif interface.upper().startswith('METH'): + iftype = 'meth' + elif interface.upper().startswith('ETH-TRUNK'): + iftype = 'eth-trunk' + elif interface.upper().startswith('VBDIF'): + iftype = 'vbdif' + elif interface.upper().startswith('NVE'): + iftype = 'nve' + elif interface.upper().startswith('TUNNEL'): + iftype = 'tunnel' + elif interface.upper().startswith('ETHERNET'): + iftype = 'ethernet' + elif interface.upper().startswith('FCOE-PORT'): + iftype = 'fcoe-port' + elif interface.upper().startswith('FABRIC-PORT'): + iftype = 'fabric-port' + elif interface.upper().startswith('STACK-PORT'): + iftype = 'stack-Port' + elif interface.upper().startswith('NULL'): + iftype = 'null' + else: + return None + + return iftype.lower() + + +class VxlanVap(object): + """ + Manages VXLAN virtual access point. + """ + + def __init__(self, argument_spec): + self.spec = argument_spec + self.module = None + self.__init_module__() + + # module input info + self.bridge_domain_id = self.module.params['bridge_domain_id'] + self.bind_vlan_id = self.module.params['bind_vlan_id'] + self.l2_sub_interface = self.module.params['l2_sub_interface'] + self.ce_vid = self.module.params['ce_vid'] + self.pe_vid = self.module.params['pe_vid'] + self.encapsulation = self.module.params['encapsulation'] + self.state = self.module.params['state'] + + # state + self.vap_info = dict() + self.l2sub_info = dict() + self.changed = False + self.updates_cmd = list() + self.commands = list() + self.results = dict() + self.proposed = dict() + self.existing = dict() + self.end_state = dict() + + def __init_module__(self): + """init module""" + + required_together = [()] + self.module = AnsibleModule( + argument_spec=self.spec, supports_check_mode=True) + + def check_response(self, xml_str, xml_name): + """Check if response message is already succeed.""" + + if "" not in xml_str: + self.module.fail_json(msg='Error: %s failed.' % xml_name) + + def get_bd_vap_dict(self): + """get virtual access point info""" + + vap_info = dict() + conf_str = CE_NC_GET_BD_VAP % self.bridge_domain_id + xml_str = get_nc_config(self.module, conf_str) + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get vap: VLAN + vap_info["bdId"] = self.bridge_domain_id + root = ElementTree.fromstring(xml_str) + vap_info["vlanList"] = "" + vap_vlan = root.find("evc/bds/bd/bdBindVlan") + if vap_vlan: + for ele in vap_vlan: + if ele.tag == "vlanList": + vap_info["vlanList"] = ele.text + + # get vap: l2 su-interface + vap_ifs = root.findall( + "evc/bds/bd/servicePoints/servicePoint/ifName") + if_list = list() + if vap_ifs: + for vap_if in vap_ifs: + if vap_if.tag == "ifName": + if_list.append(vap_if.text) + vap_info["intfList"] = if_list + + return vap_info + + def get_l2_sub_intf_dict(self, ifname): + """get l2 sub-interface info""" + + intf_info = dict() + if not ifname: + return intf_info + + conf_str = CE_NC_GET_ENCAP % ifname + xml_str = get_nc_config(self.module, conf_str) + + if "" in xml_str: + return intf_info + + xml_str = xml_str.replace('\r', '').replace('\n', '').\ + replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ + replace('xmlns="http://www.huawei.com/netconf/vrp"', "") + + # get l2 sub interface encapsulation info + root = ElementTree.fromstring(xml_str) + bds = root.find("ethernet/servicePoints/servicePoint") + if not bds: + return intf_info + + for ele in bds: + if ele.tag in ["ifName", "flowType"]: + intf_info[ele.tag] = ele.text.lower() + + if intf_info.get("flowType") == "dot1q": + ce_vid = root.find( + "ethernet/servicePoints/servicePoint/flowDot1qs") + intf_info["dot1qVids"] = "" + if ce_vid: + for ele in ce_vid: + if ele.tag == "dot1qVids": + intf_info["dot1qVids"] = ele.text + elif intf_info.get("flowType") == "qinq": + vids = root.find( + "ethernet/servicePoints/servicePoint/flowQinqs/flowQinq") + if vids: + for ele in vids: + if ele.tag in ["peVlanId", "ceVids"]: + intf_info[ele.tag] = ele.text + + return intf_info + + def config_traffic_encap_dot1q(self): + """configure traffic encapsulation type dot1q""" + + xml_str = "" + self.updates_cmd.append("interface %s" % self.l2_sub_interface) + if self.state == "present": + if self.encapsulation != self.l2sub_info.get("flowType"): + if self.ce_vid: + vlan_bitmap = vlan_vid_to_bitmap(self.ce_vid) + xml_str = CE_NC_SET_ENCAP_DOT1Q % ( + self.l2_sub_interface, vlan_bitmap, vlan_bitmap) + self.updates_cmd.append("encapsulation %s vid %s" % ( + self.encapsulation, self.ce_vid)) + else: + xml_str = CE_NC_SET_ENCAP % ( + self.l2_sub_interface, self.encapsulation) + self.updates_cmd.append( + "encapsulation %s" % self.encapsulation) + else: + if self.ce_vid and not is_vlan_in_bitmap( + self.ce_vid, self.l2sub_info.get("dot1qVids")): + vlan_bitmap = vlan_vid_to_bitmap(self.ce_vid) + xml_str = CE_NC_SET_ENCAP_DOT1Q % ( + self.l2_sub_interface, vlan_bitmap, vlan_bitmap) + self.updates_cmd.append("encapsulation %s vid %s" % ( + self.encapsulation, self.ce_vid)) + else: + if self.encapsulation == self.l2sub_info.get("flowType"): + if self.ce_vid: + if is_vlan_in_bitmap(self.ce_vid, self.l2sub_info.get("dot1qVids")): + xml_str = CE_NC_UNSET_ENCAP % self.l2_sub_interface + self.updates_cmd.append("undo encapsulation %s vid %s" % ( + self.encapsulation, self.ce_vid)) + else: + xml_str = CE_NC_UNSET_ENCAP % self.l2_sub_interface + self.updates_cmd.append( + "undo encapsulation %s" % self.encapsulation) + + if not xml_str: + self.updates_cmd.pop() + return + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "CONFIG_INTF_ENCAP_DOT1Q") + self.changed = True + + def config_traffic_encap_qinq(self): + """configure traffic encapsulation type qinq""" + + xml_str = "" + self.updates_cmd.append("interface %s" % self.l2_sub_interface) + if self.state == "present": + if self.encapsulation != self.l2sub_info.get("flowType"): + if self.ce_vid: + vlan_bitmap = vlan_vid_to_bitmap(self.ce_vid) + xml_str = CE_NC_SET_ENCAP_QINQ % (self.l2_sub_interface, + self.pe_vid, + vlan_bitmap, + vlan_bitmap) + self.updates_cmd.append( + "encapsulation %s vid %s ce-vid %s" % (self.encapsulation, + self.pe_vid, + self.ce_vid)) + else: + xml_str = CE_NC_SET_ENCAP % ( + self.l2_sub_interface, self.encapsulation) + self.updates_cmd.append( + "encapsulation %s" % self.encapsulation) + else: + if self.ce_vid: + if not is_vlan_in_bitmap(self.ce_vid, self.l2sub_info.get("ceVids")) \ + or self.pe_vid != self.l2sub_info.get("peVlanId"): + vlan_bitmap = vlan_vid_to_bitmap(self.ce_vid) + xml_str = CE_NC_SET_ENCAP_QINQ % (self.l2_sub_interface, + self.pe_vid, + vlan_bitmap, + vlan_bitmap) + self.updates_cmd.append( + "encapsulation %s vid %s ce-vid %s" % (self.encapsulation, + self.pe_vid, + self.ce_vid)) + else: + if self.encapsulation == self.l2sub_info.get("flowType"): + if self.ce_vid: + if is_vlan_in_bitmap(self.ce_vid, self.l2sub_info.get("ceVids")) \ + and self.pe_vid == self.l2sub_info.get("peVlanId"): + xml_str = CE_NC_UNSET_ENCAP % self.l2_sub_interface + self.updates_cmd.append( + "undo encapsulation %s vid %s ce-vid %s" % (self.encapsulation, + self.pe_vid, + self.ce_vid)) + else: + xml_str = CE_NC_UNSET_ENCAP % self.l2_sub_interface + self.updates_cmd.append( + "undo encapsulation %s" % self.encapsulation) + + if not xml_str: + self.updates_cmd.pop() + return + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "CONFIG_INTF_ENCAP_QINQ") + self.changed = True + + def config_traffic_encap(self): + """configure traffic encapsulation types""" + + if not self.l2sub_info: + self.module.fail_json(msg="Error: Interface %s does not exist." % self.l2_sub_interface) + + if not self.encapsulation: + return + + xml_str = "" + if self.encapsulation in ["default", "untag"]: + if self.state == "present": + if self.encapsulation != self.l2sub_info.get("flowType"): + xml_str = CE_NC_SET_ENCAP % ( + self.l2_sub_interface, self.encapsulation) + self.updates_cmd.append( + "interface %s" % self.l2_sub_interface) + self.updates_cmd.append( + "encapsulation %s" % self.encapsulation) + else: + if self.encapsulation == self.l2sub_info.get("flowType"): + xml_str = CE_NC_UNSET_ENCAP % self.l2_sub_interface + self.updates_cmd.append( + "interface %s" % self.l2_sub_interface) + self.updates_cmd.append( + "undo encapsulation %s" % self.encapsulation) + elif self.encapsulation == "none": + if self.state == "present": + if self.encapsulation != self.l2sub_info.get("flowType"): + xml_str = CE_NC_UNSET_ENCAP % self.l2_sub_interface + self.updates_cmd.append( + "interface %s" % self.l2_sub_interface) + self.updates_cmd.append( + "undo encapsulation %s" % self.l2sub_info.get("flowType")) + elif self.encapsulation == "dot1q": + self.config_traffic_encap_dot1q() + return + elif self.encapsulation == "qinq": + self.config_traffic_encap_qinq() + return + else: + pass + + if not xml_str: + return + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "CONFIG_INTF_ENCAP") + self.changed = True + + def config_vap_sub_intf(self): + """configure a Layer 2 sub-interface as a service access point""" + + if not self.vap_info: + self.module.fail_json(msg="Error: Bridge domain %s does not exist." % self.bridge_domain_id) + + xml_str = "" + if self.state == "present": + if self.l2_sub_interface not in self.vap_info["intfList"]: + self.updates_cmd.append("interface %s" % self.l2_sub_interface) + self.updates_cmd.append("bridge-domain %s" % + self.bridge_domain_id) + xml_str = CE_NC_MERGE_BD_INTF % ( + self.bridge_domain_id, self.l2_sub_interface) + else: + if self.l2_sub_interface in self.vap_info["intfList"]: + self.updates_cmd.append("interface %s" % self.l2_sub_interface) + self.updates_cmd.append( + "undo bridge-domain %s" % self.bridge_domain_id) + xml_str = CE_NC_DELETE_BD_INTF % ( + self.bridge_domain_id, self.l2_sub_interface) + + if not xml_str: + return + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "CONFIG_VAP_SUB_INTERFACE") + self.changed = True + + def config_vap_vlan(self): + """configure a VLAN as a service access point""" + + xml_str = "" + if self.state == "present": + if not is_vlan_in_bitmap(self.bind_vlan_id, self.vap_info["vlanList"]): + self.updates_cmd.append("bridge-domain %s" % + self.bridge_domain_id) + self.updates_cmd.append( + "l2 binding vlan %s" % self.bind_vlan_id) + vlan_bitmap = vlan_vid_to_bitmap(self.bind_vlan_id) + xml_str = CE_NC_MERGE_BD_VLAN % ( + self.bridge_domain_id, vlan_bitmap, vlan_bitmap) + else: + if is_vlan_in_bitmap(self.bind_vlan_id, self.vap_info["vlanList"]): + self.updates_cmd.append("bridge-domain %s" % + self.bridge_domain_id) + self.updates_cmd.append( + "undo l2 binding vlan %s" % self.bind_vlan_id) + vlan_bitmap = vlan_vid_to_bitmap(self.bind_vlan_id) + xml_str = CE_NC_MERGE_BD_VLAN % ( + self.bridge_domain_id, "0" * 1024, vlan_bitmap) + + if not xml_str: + return + recv_xml = set_nc_config(self.module, xml_str) + self.check_response(recv_xml, "CONFIG_VAP_VLAN") + self.changed = True + + def is_vlan_valid(self, vid, name): + """check VLAN id""" + + if not vid: + return + + if not vid.isdigit(): + self.module.fail_json(msg="Error: %s is not digit." % name) + return + + if int(vid) < 1 or int(vid) > 4094: + self.module.fail_json( + msg="Error: %s is not in the range from 1 to 4094." % name) + + def is_l2_sub_intf_valid(self, ifname): + """check l2 sub interface valid""" + + if ifname.count('.') != 1: + return False + + if_num = ifname.split('.')[1] + if not if_num.isdigit(): + return False + + if int(if_num) < 1 or int(if_num) > 4096: + self.module.fail_json( + msg="Error: Sub-interface number is not in the range from 1 to 4096.") + return False + + if not get_interface_type(ifname): + return False + + return True + + def check_params(self): + """Check all input params""" + + # bridge domain id check + if self.bridge_domain_id: + if not self.bridge_domain_id.isdigit(): + self.module.fail_json( + msg="Error: Bridge domain id is not digit.") + if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215: + self.module.fail_json( + msg="Error: Bridge domain id is not in the range from 1 to 16777215.") + + # check bind_vlan_id + if self.bind_vlan_id: + self.is_vlan_valid(self.bind_vlan_id, "bind_vlan_id") + + # check l2_sub_interface + if self.l2_sub_interface and not self.is_l2_sub_intf_valid(self.l2_sub_interface): + self.module.fail_json(msg="Error: l2_sub_interface is invalid.") + + # check ce_vid + if self.ce_vid: + self.is_vlan_valid(self.ce_vid, "ce_vid") + if not self.encapsulation or self.encapsulation not in ["dot1q", "qinq"]: + self.module.fail_json(msg="Error: ce_vid can not be set " + "when encapsulation is '%s'." % self.encapsulation) + if self.encapsulation == "qinq" and not self.pe_vid: + self.module.fail_json(msg="Error: ce_vid and pe_vid must be set at the same time " + "when encapsulation is '%s'." % self.encapsulation) + # check pe_vid + if self.pe_vid: + self.is_vlan_valid(self.pe_vid, "pe_vid") + if not self.encapsulation or self.encapsulation != "qinq": + self.module.fail_json(msg="Error: pe_vid can not be set " + "when encapsulation is '%s'." % self.encapsulation) + if not self.ce_vid: + self.module.fail_json(msg="Error: ce_vid and pe_vid must be set at the same time " + "when encapsulation is '%s'." % self.encapsulation) + + def get_proposed(self): + """get proposed info""" + + if self.bridge_domain_id: + self.proposed["bridge_domain_id"] = self.bridge_domain_id + if self.bind_vlan_id: + self.proposed["bind_vlan_id"] = self.bind_vlan_id + if self.l2_sub_interface: + self.proposed["l2_sub_interface"] = self.l2_sub_interface + if self.encapsulation: + self.proposed["encapsulation"] = self.encapsulation + if self.ce_vid: + self.proposed["ce_vid"] = self.ce_vid + if self.pe_vid: + self.proposed["pe_vid"] = self.pe_vid + self.proposed["state"] = self.state + + def get_existing(self): + """get existing info""" + + if self.bridge_domain_id: + if self.bind_vlan_id or self.l2_sub_interface: + self.existing["bridge_domain_id"] = self.bridge_domain_id + self.existing["bind_vlan_list"] = bitmap_to_vlan_list( + self.vap_info.get("vlanList")) + self.existing["bind_intf_list"] = self.vap_info.get("intfList") + + if self.encapsulation and self.l2_sub_interface: + self.existing["l2_sub_interface"] = self.l2_sub_interface + self.existing["encapsulation"] = self.l2sub_info.get("flowType") + if self.existing["encapsulation"] == "dot1q": + self.existing["ce_vid"] = bitmap_to_vlan_list( + self.l2sub_info.get("dot1qVids")) + if self.existing["encapsulation"] == "qinq": + self.existing["ce_vid"] = bitmap_to_vlan_list( + self.l2sub_info.get("ceVids")) + self.existing["pe_vid"] = self.l2sub_info.get("peVlanId") + + def get_end_state(self): + """get end state info""" + + if self.bridge_domain_id: + if self.bind_vlan_id or self.l2_sub_interface: + vap_info = self.get_bd_vap_dict() + self.end_state["bridge_domain_id"] = self.bridge_domain_id + self.end_state["bind_vlan_list"] = bitmap_to_vlan_list( + vap_info.get("vlanList")) + self.end_state["bind_intf_list"] = vap_info.get("intfList") + + if self.encapsulation and self.l2_sub_interface: + l2sub_info = self.get_l2_sub_intf_dict(self.l2_sub_interface) + self.end_state["l2_sub_interface"] = self.l2_sub_interface + self.end_state["encapsulation"] = l2sub_info.get("flowType") + if self.end_state["encapsulation"] == "dot1q": + self.end_state["ce_vid"] = bitmap_to_vlan_list( + l2sub_info.get("dot1qVids")) + if self.end_state["encapsulation"] == "qinq": + self.end_state["ce_vid"] = bitmap_to_vlan_list( + l2sub_info.get("ceVids")) + self.end_state["pe_vid"] = l2sub_info.get("peVlanId") + + def data_init(self): + """data init""" + if self.l2_sub_interface: + self.l2_sub_interface = self.l2_sub_interface.replace( + " ", "").upper() + if self.encapsulation and self.l2_sub_interface: + self.l2sub_info = self.get_l2_sub_intf_dict(self.l2_sub_interface) + if self.bridge_domain_id: + if self.bind_vlan_id or self.l2_sub_interface: + self.vap_info = self.get_bd_vap_dict() + + def work(self): + """worker""" + + self.check_params() + self.data_init() + self.get_existing() + self.get_proposed() + + # Traffic encapsulation types + if self.encapsulation and self.l2_sub_interface: + self.config_traffic_encap() + + # A VXLAN service access point can be a Layer 2 sub-interface or VLAN + if self.bridge_domain_id: + if self.l2_sub_interface: + # configure a Layer 2 sub-interface as a service access point + self.config_vap_sub_intf() + + if self.bind_vlan_id: + # configure a VLAN as a service access point + self.config_vap_vlan() + self.get_end_state() + self.results['changed'] = self.changed + self.results['proposed'] = self.proposed + self.results['existing'] = self.existing + self.results['end_state'] = self.end_state + if self.changed: + self.results['updates'] = self.updates_cmd + else: + self.results['updates'] = list() + self.module.exit_json(**self.results) + + +def main(): + """Module main""" + + argument_spec = dict( + bridge_domain_id=dict(required=False, type='str'), + bind_vlan_id=dict(required=False, type='str'), + l2_sub_interface=dict(required=False, type='str'), + encapsulation=dict(required=False, type='str', + choices=['dot1q', 'default', 'untag', 'qinq', 'none']), + ce_vid=dict(required=False, type='str'), + pe_vid=dict(required=False, type='str'), + state=dict(required=False, default='present', + choices=['present', 'absent']) + ) + argument_spec.update(ce_argument_spec) + module = VxlanVap(argument_spec) + module.work() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cloudvision/cv_server_provision.py b/plugins/modules/network/cloudvision/cv_server_provision.py new file mode 100644 index 0000000000..24f4a46f91 --- /dev/null +++ b/plugins/modules/network/cloudvision/cv_server_provision.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cv_server_provision +author: "EOS+ CS (ansible-dev@arista.com) (@mharista)" +short_description: + Provision server port by applying or removing template configuration to an + Arista CloudVision Portal configlet that is applied to a switch. +description: + - This module allows a server team to provision server network ports for + new servers without having to access Arista CVP or asking the network team + to do it for them. Provide the information for connecting to CVP, switch + rack, port the new server is connected to, optional vlan, and an action + and the module will apply the configuration to the switch port via CVP. + Actions are add (applies template config to port), + remove (defaults the interface config) and + show (returns the current port config). +options: + host: + description: + - The hostname or IP address of the CVP node being connected to. + required: true + port: + description: + - The port number to use when making API calls to the CVP node. This + will default to the default port for the specified protocol. Port 80 + for http and port 443 for https. + protocol: + description: + - The protocol to use when making API calls to CVP. CVP defaults to https + and newer versions of CVP no longer support http. + default: https + choices: [https, http] + username: + description: + - The user that will be used to connect to CVP for making API calls. + required: true + password: + description: + - The password of the user that will be used to connect to CVP for API + calls. + required: true + server_name: + description: + - The hostname or identifier for the server that is having it's switch + port provisioned. + required: true + switch_name: + description: + - The hostname of the switch is being configured for the server being + provisioned. + required: true + switch_port: + description: + - The physical port number on the switch that the new server is + connected to. + required: true + port_vlan: + description: + - The vlan that should be applied to the port for this server. + This parameter is dependent on a proper template that supports single + vlan provisioning with it. If a port vlan is specified by the template + specified does not support this the module will exit out with no + changes. If a template is specified that requires a port vlan but no + port vlan is specified the module will exit out with no changes. + template: + description: + - A path to a Jinja formatted template file that contains the + configuration block that will be applied to the specified switch port. + This template will have variable fields replaced by the module before + being applied to the switch configuration. + required: true + action: + description: + - The action for the module to take. The actions are add, which applies + the specified template config to port, remove, which defaults the + specified interface configuration, and show, which will return the + current port configuration with no changes. + default: show + choices: [show, add, remove] + auto_run: + description: + - Flag that determines whether or not the module will execute the CVP + task spawned as a result of changes to a switch configlet. When an + add or remove action is taken which results in a change to a switch + configlet, CVP will spawn a task that needs to be executed for the + configuration to be applied to the switch. If this option is True then + the module will determined the task number created by the configuration + change, execute it and wait for the task to complete. If the option + is False then the task will remain in the Pending state in CVP for + a network administrator to review and execute. + type: bool + default: 'no' +requirements: [Jinja2, cvprac >= 0.7.0] +''' + +EXAMPLES = ''' +- name: Get current configuration for interface Ethernet2 + cv_server_provision: + host: cvp_node + username: cvp_user + password: cvp_pass + protocol: https + server_name: new_server + switch_name: eos_switch_1 + switch_port: 2 + template: template_file.j2 + action: show + +- name: Remove existing configuration from interface Ethernet2. Run task. + cv_server_provision: + host: cvp_node + username: cvp_user + password: cvp_pass + protocol: https + server_name: new_server + switch_name: eos_switch_1 + switch_port: 2 + template: template_file.j2 + action: remove + auto_run: True + +- name: Add template configuration to interface Ethernet2. No VLAN. Run task. + cv_server_provision: + host: cvp_node + username: cvp_user + password: cvp_pass + protocol: https + server_name: new_server + switch_name: eos_switch_1 + switch_port: 2 + template: single_attached_trunk.j2 + action: add + auto_run: True + +- name: Add template with VLAN configuration to interface Ethernet2. Run task. + cv_server_provision: + host: cvp_node + username: cvp_user + password: cvp_pass + protocol: https + server_name: new_server + switch_name: eos_switch_1 + switch_port: 2 + port_vlan: 22 + template: single_attached_vlan.j2 + action: add + auto_run: True +''' + +RETURN = ''' +changed: + description: Signifies if a change was made to the configlet + returned: success + type: bool + sample: true +currentConfigBlock: + description: The current config block for the user specified interface + returned: when action = show + type: str + sample: | + interface Ethernet4 + ! +newConfigBlock: + description: The new config block for the user specified interface + returned: when action = add or remove + type: str + sample: | + interface Ethernet3 + description example + no switchport + ! +oldConfigBlock: + description: The current config block for the user specified interface + before any changes are made + returned: when action = add or remove + type: str + sample: | + interface Ethernet3 + ! +fullConfig: + description: The full config of the configlet after being updated + returned: when action = add or remove + type: str + sample: | + ! + interface Ethernet3 + ! + interface Ethernet4 + ! +updateConfigletResponse: + description: Response returned from CVP when configlet update is triggered + returned: when action = add or remove and configuration changes + type: str + sample: "Configlet veos1-server successfully updated and task initiated." +portConfigurable: + description: Signifies if the user specified port has an entry in the + configlet that Ansible has access to + returned: success + type: bool + sample: true +switchConfigurable: + description: Signifies if the user specified switch has a configlet + applied to it that CVP is allowed to edit + returned: success + type: bool + sample: true +switchInfo: + description: Information from CVP describing the switch being configured + returned: success + type: dict + sample: {"architecture": "i386", + "bootupTimeStamp": 1491264298.21, + "complianceCode": "0000", + "complianceIndication": "NONE", + "deviceInfo": "Registered", + "deviceStatus": "Registered", + "fqdn": "veos1", + "hardwareRevision": "", + "internalBuildId": "12-12", + "internalVersion": "4.17.1F-11111.4171F", + "ipAddress": "192.168.1.20", + "isDANZEnabled": "no", + "isMLAGEnabled": "no", + "key": "00:50:56:5d:e5:e0", + "lastSyncUp": 1496432895799, + "memFree": 472976, + "memTotal": 1893460, + "modelName": "vEOS", + "parentContainerId": "container_13_5776759195930", + "serialNumber": "", + "systemMacAddress": "00:50:56:5d:e5:e0", + "taskIdList": [], + "tempAction": null, + "type": "netelement", + "unAuthorized": false, + "version": "4.17.1F", + "ztpMode": "false"} +taskCompleted: + description: Signifies if the task created and executed has completed successfully + returned: when action = add or remove, and auto_run = true, + and configuration changes + type: bool + sample: true +taskCreated: + description: Signifies if a task was created due to configlet changes + returned: when action = add or remove, and auto_run = true or false, + and configuration changes + type: bool + sample: true +taskExecuted: + description: Signifies if the automation executed the spawned task + returned: when action = add or remove, and auto_run = true, + and configuration changes + type: bool + sample: true +taskId: + description: The task ID created by CVP because of changes to configlet + returned: when action = add or remove, and auto_run = true or false, + and configuration changes + type: str + sample: "500" +''' + +import re +import time +from ansible.module_utils.basic import AnsibleModule +try: + import jinja2 + from jinja2 import meta + HAS_JINJA2 = True +except ImportError: + HAS_JINJA2 = False +try: + from cvprac.cvp_client import CvpClient + from cvprac.cvp_client_errors import CvpLoginError, CvpApiError + HAS_CVPRAC = True +except ImportError: + HAS_CVPRAC = False + + +def connect(module): + ''' Connects to CVP device using user provided credentials from playbook. + + :param module: Ansible module with parameters and client connection. + :return: CvpClient object with connection instantiated. + ''' + client = CvpClient() + try: + client.connect([module.params['host']], + module.params['username'], + module.params['password'], + protocol=module.params['protocol'], + port=module.params['port']) + except CvpLoginError as e: + module.fail_json(msg=str(e)) + return client + + +def switch_info(module): + ''' Get dictionary of switch info from CVP. + + :param module: Ansible module with parameters and client connection. + :return: Dict of switch info from CVP or exit with failure if no + info for device is found. + ''' + switch_name = module.params['switch_name'] + switch_info = module.client.api.get_device_by_name(switch_name) + if not switch_info: + module.fail_json(msg=str("Device with name '%s' does not exist." + % switch_name)) + return switch_info + + +def switch_in_compliance(module, sw_info): + ''' Check if switch is currently in compliance. + + :param module: Ansible module with parameters and client connection. + :param sw_info: Dict of switch info. + :return: Nothing or exit with failure if device is not in compliance. + ''' + compliance = module.client.api.check_compliance(sw_info['key'], + sw_info['type']) + if compliance['complianceCode'] != '0000': + module.fail_json(msg=str('Switch %s is not in compliance. Returned' + ' compliance code %s.' + % (sw_info['fqdn'], + compliance['complianceCode']))) + + +def server_configurable_configlet(module, sw_info): + ''' Check CVP that the user specified switch has a configlet assigned to + it that Ansible is allowed to edit. + + :param module: Ansible module with parameters and client connection. + :param sw_info: Dict of switch info. + :return: Dict of configlet information or None. + ''' + configurable_configlet = None + configlet_name = module.params['switch_name'] + '-server' + switch_configlets = module.client.api.get_configlets_by_device_id( + sw_info['key']) + for configlet in switch_configlets: + if configlet['name'] == configlet_name: + configurable_configlet = configlet + return configurable_configlet + + +def port_configurable(module, configlet): + ''' Check configlet if the user specified port has a configuration entry + in the configlet to determine if Ansible is allowed to configure the + port on this switch. + + :param module: Ansible module with parameters and client connection. + :param configlet: Dict of configlet info. + :return: True or False. + ''' + configurable = False + regex = r'^interface Ethernet%s' % module.params['switch_port'] + for config_line in configlet['config'].split('\n'): + if re.match(regex, config_line): + configurable = True + return configurable + + +def configlet_action(module, configlet): + ''' Take appropriate action based on current state of device and user + requested action. + + Return current config block for specified port if action is show. + + If action is add or remove make the appropriate changes to the + configlet and return the associated information. + + :param module: Ansible module with parameters and client connection. + :param configlet: Dict of configlet info. + :return: Dict of information to updated results with. + ''' + result = dict() + existing_config = current_config(module, configlet['config']) + if module.params['action'] == 'show': + result['currentConfigBlock'] = existing_config + return result + elif module.params['action'] == 'add': + result['newConfigBlock'] = config_from_template(module) + elif module.params['action'] == 'remove': + result['newConfigBlock'] = ('interface Ethernet%s\n!' + % module.params['switch_port']) + result['oldConfigBlock'] = existing_config + result['fullConfig'] = updated_configlet_content(module, + configlet['config'], + result['newConfigBlock']) + resp = module.client.api.update_configlet(result['fullConfig'], + configlet['key'], + configlet['name']) + if 'data' in resp: + result['updateConfigletResponse'] = resp['data'] + if 'task' in resp['data']: + result['changed'] = True + result['taskCreated'] = True + return result + + +def current_config(module, config): + ''' Parse the full port configuration for the user specified port out of + the full configlet configuration and return as a string. + + :param module: Ansible module with parameters and client connection. + :param config: Full config to parse specific port config from. + :return: String of current config block for user specified port. + ''' + regex = r'^interface Ethernet%s' % module.params['switch_port'] + match = re.search(regex, config, re.M) + if not match: + module.fail_json(msg=str('interface section not found - %s' + % config)) + block_start, line_end = match.regs[0] + + match = re.search(r'!', config[line_end:], re.M) + if not match: + return config[block_start:] + _, block_end = match.regs[0] + + block_end = line_end + block_end + return config[block_start:block_end] + + +def valid_template(port, template): + ''' Test if the user provided Jinja template is valid. + + :param port: User specified port. + :param template: Contents of Jinja template. + :return: True or False + ''' + valid = True + regex = r'^interface Ethernet%s' % port + match = re.match(regex, template, re.M) + if not match: + valid = False + return valid + + +def config_from_template(module): + ''' Load the Jinja template and apply user provided parameters in necessary + places. Fail if template is not found. Fail if rendered template does + not reference the correct port. Fail if the template requires a VLAN + but the user did not provide one with the port_vlan parameter. + + :param module: Ansible module with parameters and client connection. + :return: String of Jinja template rendered with parameters or exit with + failure. + ''' + template_loader = jinja2.FileSystemLoader('./templates') + env = jinja2.Environment(loader=template_loader, + undefined=jinja2.DebugUndefined) + template = env.get_template(module.params['template']) + if not template: + module.fail_json(msg=str('Could not find template - %s' + % module.params['template'])) + + data = {'switch_port': module.params['switch_port'], + 'server_name': module.params['server_name']} + + temp_source = env.loader.get_source(env, module.params['template'])[0] + parsed_content = env.parse(temp_source) + temp_vars = list(meta.find_undeclared_variables(parsed_content)) + if 'port_vlan' in temp_vars: + if module.params['port_vlan']: + data['port_vlan'] = module.params['port_vlan'] + else: + module.fail_json(msg=str('Template %s requires a vlan. Please' + ' re-run with vlan number provided.' + % module.params['template'])) + + template = template.render(data) + if not valid_template(module.params['switch_port'], template): + module.fail_json(msg=str('Template content does not configure proper' + ' interface - %s' % template)) + return template + + +def updated_configlet_content(module, existing_config, new_config): + ''' Update the configlet configuration with the new section for the port + specified by the user. + + :param module: Ansible module with parameters and client connection. + :param existing_config: String of current configlet configuration. + :param new_config: String of configuration for user specified port to + replace in the existing config. + :return: String of the full updated configuration. + ''' + regex = r'^interface Ethernet%s' % module.params['switch_port'] + match = re.search(regex, existing_config, re.M) + if not match: + module.fail_json(msg=str('interface section not found - %s' + % existing_config)) + block_start, line_end = match.regs[0] + + updated_config = existing_config[:block_start] + new_config + match = re.search(r'!\n', existing_config[line_end:], re.M) + if match: + _, block_end = match.regs[0] + block_end = line_end + block_end + updated_config += '\n%s' % existing_config[block_end:] + return updated_config + + +def configlet_update_task(module): + ''' Poll device info of switch from CVP up to three times to see if the + configlet updates have spawned a task. It sometimes takes a second for + the task to be spawned after configlet updates. If a task is found + return the task ID. Otherwise return None. + + :param module: Ansible module with parameters and client connection. + :return: Task ID or None. + ''' + for num in range(3): + device_info = switch_info(module) + if (('taskIdList' in device_info) and + (len(device_info['taskIdList']) > 0)): + for task in device_info['taskIdList']: + if ('Configlet Assign' in task['description'] and + task['data']['WORKFLOW_ACTION'] == 'Configlet Push'): + return task['workOrderId'] + time.sleep(1) + return None + + +def wait_for_task_completion(module, task): + ''' Poll CVP for the executed task to complete. There is currently no + timeout. Exits with failure if task status is Failed or Cancelled. + + :param module: Ansible module with parameters and client connection. + :param task: Task ID to poll for completion. + :return: True or exit with failure if task is cancelled or fails. + ''' + task_complete = False + while not task_complete: + task_info = module.client.api.get_task_by_id(task) + task_status = task_info['workOrderUserDefinedStatus'] + if task_status == 'Completed': + return True + elif task_status in ['Failed', 'Cancelled']: + module.fail_json(msg=str('Task %s has reported status %s. Please' + ' consult the CVP admins for more' + ' information.' % (task, task_status))) + time.sleep(2) + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + host=dict(required=True), + port=dict(required=False, default=None), + protocol=dict(default='https', choices=['http', 'https']), + username=dict(required=True), + password=dict(required=True, no_log=True), + server_name=dict(required=True), + switch_name=dict(required=True), + switch_port=dict(required=True), + port_vlan=dict(required=False, default=None), + template=dict(require=True), + action=dict(default='show', choices=['show', 'add', 'remove']), + auto_run=dict(type='bool', default=False)) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=False) + if not HAS_JINJA2: + module.fail_json(msg='The Jinja2 python module is required.') + if not HAS_CVPRAC: + module.fail_json(msg='The cvprac python module is required.') + result = dict(changed=False) + module.client = connect(module) + + try: + result['switchInfo'] = switch_info(module) + if module.params['action'] in ['add', 'remove']: + switch_in_compliance(module, result['switchInfo']) + switch_configlet = server_configurable_configlet(module, + result['switchInfo']) + if not switch_configlet: + module.fail_json(msg=str('Switch %s has no configurable server' + ' ports.' % module.params['switch_name'])) + result['switchConfigurable'] = True + if not port_configurable(module, switch_configlet): + module.fail_json(msg=str('Port %s is not configurable as a server' + ' port on switch %s.' + % (module.params['switch_port'], + module.params['switch_name']))) + result['portConfigurable'] = True + result['taskCreated'] = False + result['taskExecuted'] = False + result['taskCompleted'] = False + result.update(configlet_action(module, switch_configlet)) + if module.params['auto_run'] and module.params['action'] != 'show': + task_id = configlet_update_task(module) + if task_id: + result['taskId'] = task_id + note = ('Update config on %s with %s action from Ansible.' + % (module.params['switch_name'], + module.params['action'])) + module.client.api.add_note_to_task(task_id, note) + module.client.api.execute_task(task_id) + result['taskExecuted'] = True + task_completed = wait_for_task_completion(module, task_id) + if task_completed: + result['taskCompleted'] = True + else: + result['taskCreated'] = False + except CvpApiError as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_backup.py b/plugins/modules/network/cnos/cnos_backup.py new file mode 100644 index 0000000000..366906f902 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_backup.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to Backup Config to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_backup +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Backup the current running or startup configuration to a + remote server on devices running Lenovo CNOS +description: + - This module allows you to work with switch configurations. It provides a + way to back up the running or startup configurations of a switch to a + remote server. This is achieved by periodically saving a copy of the + startup or running configuration of the network device to a remote server + using FTP, SFTP, TFTP, or SCP. The first step is to create a directory from + where the remote server can be reached. The next step is to provide the + full file path of the location where the configuration will be backed up. + Authentication details required by the remote server must be provided as + well. This module uses SSH to manage network device configuration. + The results of the operation will be placed in a directory named 'results' + that must be created by the user in their local directory to where the + playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: + configType: + description: + - This specifies what type of configuration will be backed up. The + choices are the running or startup configurations. There is no + default value, so it will result in an error if the input is + incorrect. + required: Yes + default: Null + choices: [running-config, startup-config] + protocol: + description: + - This refers to the protocol used by the network device to + interact with the remote server to where to upload the backup + configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other + protocols will result in error. If this parameter is + not specified, there is no default value to be used. + required: Yes + default: Null + choices: [SFTP, SCP, FTP, TFTP] + rcserverip: + description: + -This specifies the IP Address of the remote server to where the + configuration will be backed up. + required: Yes + default: Null + rcpath: + description: + - This specifies the full file path where the configuration file + will be copied on the remote server. In case the relative path is + used as the variable value, the root folder for the user of the + server needs to be specified. + required: Yes + default: Null + serverusername: + description: + - Specify the username for the server relating to the protocol + used. + required: Yes + default: Null + serverpassword: + description: + - Specify the password for the server relating to the protocol + used. + required: Yes + default: Null +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_backup. + These are written in the main.yml file of the tasks directory. +--- +- name: Test Running Config Backup + cnos_backup: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt" + configType: running-config + protocol: "sftp" + serverip: "10.241.106.118" + rcpath: "/root/cnos/G8272-running-config.txt" + serverusername: "root" + serverpassword: "root123" + +- name: Test Startup Config Backup + cnos_backup: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt" + configType: startup-config + protocol: "sftp" + serverip: "10.241.106.118" + rcpath: "/root/cnos/G8272-startup-config.txt" + serverusername: "root" + serverpassword: "root123" + +- name: Test Running Config Backup -TFTP + cnos_backup: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt" + configType: running-config + protocol: "tftp" + serverip: "10.241.106.118" + rcpath: "/anil/G8272-running-config.txt" + serverusername: "root" + serverpassword: "root123" + +- name: Test Startup Config Backup - TFTP + cnos_backup: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt" + configType: startup-config + protocol: "tftp" + serverip: "10.241.106.118" + rcpath: "/anil/G8272-startup-config.txt" + serverusername: "root" + serverpassword: "root123" + +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Config file transferred to server" +''' + +import sys +import time +import socket +import array +import json +import time +import re +import os +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +# Utility Method to back up the running config or start up config +# This method supports only SCP or SFTP or FTP or TFTP +# Tuning of timeout parameter is pending +def doConfigBackUp(module, prompt, answer): + host = module.params['host'] + server = module.params['serverip'] + username = module.params['serverusername'] + password = module.params['serverpassword'] + protocol = module.params['protocol'].lower() + rcPath = module.params['rcpath'] + configType = module.params['configType'] + confPath = rcPath + host + '_' + configType + '.txt' + + retVal = '' + + # config backup command happens here + command = "copy " + configType + " " + protocol + " " + protocol + "://" + command = command + username + "@" + server + "/" + confPath + command = command + " vrf management\n" + cnos.debugOutput(command + "\n") + # cnos.checkForFirstTimeAccess(module, command, 'yes/no', 'yes') + cmd = [] + if(protocol == "scp"): + scp_cmd1 = [{'command': command, 'prompt': 'timeout:', 'answer': '0'}] + scp_cmd2 = [{'command': '\n', 'prompt': 'Password:', + 'answer': password}] + cmd.extend(scp_cmd1) + cmd.extend(scp_cmd2) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "sftp"): + sftp_cmd = [{'command': command, 'prompt': 'Password:', + 'answer': password}] + cmd.extend(sftp_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "ftp"): + ftp_cmd = [{'command': command, 'prompt': 'Password:', + 'answer': password}] + cmd.extend(ftp_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "tftp"): + command = "copy " + configType + " " + protocol + " " + protocol + command = command + "://" + server + "/" + confPath + command = command + " vrf management\n" + # cnos.debugOutput(command) + tftp_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(tftp_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + else: + return "Error-110" + + return retVal +# EOM + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True), + configType=dict(required=True), + protocol=dict(required=True), + serverip=dict(required=True), + rcpath=dict(required=True), + serverusername=dict(required=False), + serverpassword=dict(required=False, no_log=True),), + supports_check_mode=False) + + outputfile = module.params['outputfile'] + protocol = module.params['protocol'].lower() + output = '' + if(protocol == "tftp" or protocol == "ftp" or + protocol == "sftp" or protocol == "scp"): + transfer_status = doConfigBackUp(module, None, None) + else: + transfer_status = "Invalid Protocol option" + + output = output + "\n Config Back Up status \n" + transfer_status + + # Save it into the file + path = outputfile.rsplit('/', 1) + # cnos.debugOutput(path[0]) + if not os.path.exists(path[0]): + os.makedirs(path[0]) + file = open(outputfile, "a") + file.write(output) + file.close() + + # Logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, msg="Config file transferred to server") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_banner.py b/plugins/modules/network/cnos/cnos_banner.py new file mode 100644 index 0000000000..c78b37ce6f --- /dev/null +++ b/plugins/modules/network/cnos/cnos_banner.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# +# Copyright (C) 2017 Lenovo, Inc. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send banner commands to Lenovo Switches +# Two types of banners are supported login and motd +# Lenovo Networking +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_banner +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage multiline banners on Lenovo CNOS devices +description: + - This will configure both login and motd banners on remote devices + running Lenovo CNOS. It allows playbooks to add or remote + banner text from the active running configuration. +notes: + - Tested against CNOS 10.8.1 +options: + banner: + description: + - Specifies which banner should be configured on the remote device. + In Ansible 2.8 and earlier only I(login) and I(motd) were supported. + required: true + choices: ['login', 'motd'] + text: + description: + - The banner text that should be + present in the remote device running configuration. This argument + accepts a multiline string, with no empty lines. Requires + I(state=present). + state: + description: + - Specifies whether or not the configuration is + present in the current devices active running configuration. + default: present + choices: ['present', 'absent'] + provider: + description: + - B(Deprecated) + - "Starting with Ansible 2.5 we recommend using + C(connection: network_cli)." + - For more information please see the + L(CNOS Platform Options guide, ../network/user_guide/platform_cnos.html). + - HORIZONTALLINE + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when building the connection to the + remote device. + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used + instead. + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used + instead. + timeout: + description: + - Specifies the timeout in seconds for communicating with the network + device for either connecting or sending commands. If the timeout + is exceeded before the operation is completed, the module will + error. + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not + specified in the task, the value of environment variable + C(ANSIBLE_NET_SSH_KEYFILE)will be used instead. + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the + value is not specified in the task, the value of environment + variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: 'no' + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value + of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used + instead. +''' + +EXAMPLES = """ +- name: configure the login banner + cnos_banner: + banner: login + text: | + this is my login banner + that contains a multiline + string + state: present + +- name: remove the motd banner + cnos_banner: + banner: motd + state: absent + +- name: Configure banner from file + cnos_banner: + banner: motd + text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}" + state: present + +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - banner login + - this is my login banner + - that contains a multiline + - string +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import exec_command +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import load_config, run_commands +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import check_args +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible.module_utils._text import to_text +import re + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + state = module.params['state'] + + if state == 'absent' and 'text' in have.keys() and have['text']: + commands.append('no banner %s' % module.params['banner']) + + elif state == 'present': + if want['text'] and (want['text'] != have.get('text')): + banner_cmd = 'banner %s ' % module.params['banner'] + for bline in want['text'].strip().splitlines(): + final_cmd = banner_cmd + bline.strip() + commands.append(final_cmd) + + return commands + + +def map_config_to_obj(module): + rc, out, err = exec_command(module, + 'show banner %s' % module.params['banner']) + if rc == 0: + output = out + else: + rc, out, err = exec_command(module, + 'show running-config | include banner %s' + % module.params['banner']) + if out: + output = re.search(r'\^C(.*)\^C', out, re.S).group(1).strip() + else: + output = None + obj = {'banner': module.params['banner'], 'state': 'absent'} + if output: + obj['text'] = output + obj['state'] = 'present' + return obj + + +def map_params_to_obj(module): + text = module.params['text'] + if text: + text = to_text(text).strip() + + return { + 'banner': module.params['banner'], + 'text': text, + 'state': module.params['state'] + } + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + banner=dict(required=True, choices=['login', 'motd']), + text=dict(), + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(cnos_argument_spec) + + required_if = [('state', 'present', ('text',))] + + module = AnsibleModule(argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + result = {'changed': False} + if warnings: + result['warnings'] = warnings + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + response = load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_bgp.py b/plugins/modules/network/cnos/cnos_bgp.py new file mode 100644 index 0000000000..a6ce89e3e7 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_bgp.py @@ -0,0 +1,1180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send BGP commands to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_bgp +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage BGP resources and attributes on devices running CNOS +description: + - This module allows you to work with Border Gateway Protocol (BGP) related + configurations. The operators used are overloaded to ensure control over + switch BGP configurations. This module is invoked using method with + asNumber as one of its arguments. The first level of the BGP configuration + allows to set up an AS number, with the following attributes going + into various configuration operations under the context of BGP. + After passing this level, there are eight BGP arguments that will perform + further configurations. They are bgpArg1, bgpArg2, bgpArg3, bgpArg4, + bgpArg5, bgpArg6, bgpArg7, and bgpArg8. For more details on how to use + these arguments, see [Overloaded Variables]. + This module uses SSH to manage network device configuration. + The results of the operation will be placed in a directory named 'results' + that must be created by the user in their local directory to where the + playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: + asNum: + description: + - AS number + required: Yes + default: Null + bgpArg1: + description: + - This is an overloaded bgp first argument. Usage of this argument + can be found is the User Guide referenced above. + required: Yes + default: Null + choices: [address-family,bestpath,bgp,cluster-id,confederation, + enforce-first-as,fast-external-failover,graceful-restart, + graceful-restart-helper,log-neighbor-changes, + maxas-limit,neighbor,router-id,shutdown,synchronization, + timers,vrf] + bgpArg2: + description: + - This is an overloaded bgp second argument. Usage of this argument + can be found is the User Guide referenced above. + required: No + default: Null + choices: [ipv4 or ipv6, always-compare-med,compare-confed-aspath, + compare-routerid,dont-compare-originator-id,tie-break-on-age, + as-path,med,identifier,peers] + bgpArg3: + description: + - This is an overloaded bgp third argument. Usage of this argument + can be found is the User Guide referenced above. + required: No + default: Null + choices: [aggregate-address,client-to-client,dampening,distance, + maximum-paths,network,nexthop,redistribute,save, + synchronization,ignore or multipath-relax, + confed or missing-as-worst or non-deterministic or + remove-recv-med or remove-send-med] + bgpArg4: + description: + - This is an overloaded bgp fourth argument. Usage of this argument + can be found is the User Guide referenced above. + required: No + default: Null + choices: [Aggregate prefix, Reachability Half-life time,route-map, + Distance for routes ext,ebgp or ibgp,IP prefix , + IP prefix /, synchronization, + Delay value, direct, ospf, static, memory] + bgpArg5: + description: + - This is an overloaded bgp fifth argument. Usage of this argument + can be found is the User Guide referenced above. + required: No + default: Null + choices: [as-set, summary-only, Value to start reusing a route, + Distance for routes internal, Supported multipath numbers, + backdoor, map, route-map ] + bgpArg6: + description: + - This is an overloaded bgp sixth argument. Usage of this argument + can be found is the User Guide referenced above. + required: No + default: Null + choices: [summary-only,as-set, route-map name, + Value to start suppressing a route, Distance local routes, + Network mask, Pointer to route-map entries] + bgpArg7: + description: + - This is an overloaded bgp seventh argument. Use of this argument + can be found is the User Guide referenced above. + required: No + default: Null + choices: [Maximum duration to suppress a stable route(minutes), + backdoor,route-map, Name of the route map ] + bgpArg8: + description: + - This is an overloaded bgp eight argument. Usage of this argument + can be found is the User Guide referenced above. + required: No + default: Null + choices: [Un-reachability Half-life time for the penalty(minutes), + backdoor] +''' +EXAMPLES = ''' +Tasks: The following are examples of using the module cnos_bgp. These are + written in the main.yml file of the tasks directory. +--- +- name: Test BGP - neighbor + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "neighbor" + bgpArg2: "10.241.107.40" + bgpArg3: 13 + bgpArg4: "address-family" + bgpArg5: "ipv4" + bgpArg6: "next-hop-self" + +- name: Test BGP - BFD + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "neighbor" + bgpArg2: "10.241.107.40" + bgpArg3: 13 + bgpArg4: "bfd" + +- name: Test BGP - address-family - dampening + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "address-family" + bgpArg2: "ipv4" + bgpArg3: "dampening" + bgpArg4: 13 + bgpArg5: 233 + bgpArg6: 333 + bgpArg7: 15 + bgpArg8: 33 + +- name: Test BGP - address-family - network + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "address-family" + bgpArg2: "ipv4" + bgpArg3: "network" + bgpArg4: "1.2.3.4/5" + bgpArg5: "backdoor" + +- name: Test BGP - bestpath - always-compare-med + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "bestpath" + bgpArg2: "always-compare-med" + +- name: Test BGP - bestpath-compare-confed-aspat + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "bestpath" + bgpArg2: "compare-confed-aspath" + +- name: Test BGP - bgp + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "bgp" + bgpArg2: 33 + +- name: Test BGP - cluster-id + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "cluster-id" + bgpArg2: "1.2.3.4" + +- name: Test BGP - confederation-identifier + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "confederation" + bgpArg2: "identifier" + bgpArg3: 333 + +- name: Test BGP - enforce-first-as + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "enforce-first-as" + +- name: Test BGP - fast-external-failover + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "fast-external-failover" + +- name: Test BGP - graceful-restart + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "graceful-restart" + bgpArg2: 333 + +- name: Test BGP - graceful-restart-helper + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "graceful-restart-helper" + +- name: Test BGP - maxas-limit + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "maxas-limit" + bgpArg2: 333 + +- name: Test BGP - neighbor + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "neighbor" + bgpArg2: "10.241.107.40" + bgpArg3: 13 + bgpArg4: "address-family" + bgpArg5: "ipv4" + bgpArg6: "next-hop-self" + +- name: Test BGP - router-id + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "router-id" + bgpArg2: "1.2.3.4" + +- name: Test BGP - synchronization + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "synchronization" + +- name: Test BGP - timers + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "timers" + bgpArg2: 333 + bgpArg3: 3333 + +- name: Test BGP - vrf + cnos_bgp: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" + asNum: 33 + bgpArg1: "vrf" + +''' +RETURN = ''' +msg: + description: Success or failure message. Upon any failure, the method returns + an error display string. + returned: always + type: str +''' + +import sys +import time +import socket +import array +import json +import time +import re +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def bgpNeighborConfig(module, cmd, prompt, answer): + retVal = '' + command = '' + bgpNeighborArg1 = module.params['bgpArg4'] + bgpNeighborArg2 = module.params['bgpArg5'] + bgpNeighborArg3 = module.params['bgpArg6'] + bgpNeighborArg4 = module.params['bgpArg7'] + bgpNeighborArg5 = module.params['bgpArg8'] + deviceType = module.params['deviceType'] + + if(bgpNeighborArg1 == "address-family"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_address_family", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + " unicast" + # debugOutput(command) + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + retVal = retVal + bgpNeighborAFConfig(module, cmd, '(config-router-neighbor-af)#', answer) + return retVal + else: + retVal = "Error-316" + return retVal + + elif(bgpNeighborArg1 == "advertisement-interval"): + command = command + bgpNeighborArg1 + + elif(bgpNeighborArg1 == "bfd"): + command = command + bgpNeighborArg1 + " " + if(bgpNeighborArg2 is not None and bgpNeighborArg2 == "mutihop"): + command = command + bgpNeighborArg2 + + elif(bgpNeighborArg1 == "connection-retry-time"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_connection_retrytime", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + else: + retVal = "Error-315" + return retVal + + elif(bgpNeighborArg1 == "description"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_description", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + else: + retVal = "Error-314" + return retVal + + elif(bgpNeighborArg1 == "disallow-infinite-holdtime"): + command = command + bgpNeighborArg1 + + elif(bgpNeighborArg1 == "dont-capability-negotiate"): + command = command + bgpNeighborArg1 + + elif(bgpNeighborArg1 == "dynamic-capability"): + command = command + bgpNeighborArg1 + + elif(bgpNeighborArg1 == "ebgp-multihop"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_maxhopcount", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + else: + retVal = "Error-313" + return retVal + + elif(bgpNeighborArg1 == "interface"): + command = command + bgpNeighborArg1 + " " + # TBD + + elif(bgpNeighborArg1 == "local-as"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_local_as", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + " " + if(bgpNeighborArg3 is not None and + bgpNeighborArg3 == "no-prepend"): + command = command + bgpNeighborArg3 + " " + if(bgpNeighborArg4 is not None and + bgpNeighborArg4 == "replace-as"): + command = command + bgpNeighborArg4 + " " + if(bgpNeighborArg5 is not None and + bgpNeighborArg5 == "dual-as"): + command = command + bgpNeighborArg5 + else: + command = command.strip() + else: + command = command.strip() + else: + command = command.strip() + else: + retVal = "Error-312" + return retVal + + elif(bgpNeighborArg1 == "maximum-peers"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_maxpeers", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + else: + retVal = "Error-311" + return retVal + + elif(bgpNeighborArg1 == "password"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_password", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + else: + retVal = "Error-310" + return retVal + + elif(bgpNeighborArg1 == "remove-private-AS"): + command = command + bgpNeighborArg1 + + elif(bgpNeighborArg1 == "timers"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_timers_Keepalive", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_timers_holdtime", bgpNeighborArg3) + if(value == "ok"): + command = command + bgpNeighborArg3 + else: + retVal = "Error-309" + return retVal + else: + retVal = "Error-308" + return retVal + + elif(bgpNeighborArg1 == "transport"): + command = command + bgpNeighborArg1 + " connection-mode passive " + + elif(bgpNeighborArg1 == "ttl-security"): + command = command + bgpNeighborArg1 + " hops " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_ttl_hops", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + else: + retVal = "Error-307" + return retVal + + elif(bgpNeighborArg1 == "update-source"): + command = command + bgpNeighborArg1 + " " + if(bgpNeighborArg2 is not None): + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_update_options", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + " " + if(bgpNeighborArg2 == "ethernet"): + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_update_ethernet", + bgpNeighborArg3) + if(value == "ok"): + command = command + bgpNeighborArg3 + else: + retVal = "Error-304" + return retVal + elif(bgpNeighborArg2 == "loopback"): + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_update_loopback", + bgpNeighborArg3) + if(value == "ok"): + command = command + bgpNeighborArg3 + else: + retVal = "Error-305" + return retVal + else: + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_update_vlan", + bgpNeighborArg3) + if(value == "ok"): + command = command + bgpNeighborArg3 + else: + retVal = "Error-306" + return retVal + else: + command = command + bgpNeighborArg2 + else: + retVal = "Error-303" + return retVal + + elif(bgpNeighborArg1 == "weight"): + command = command + bgpNeighborArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_weight", bgpNeighborArg2) + if(value == "ok"): + command = command + bgpNeighborArg2 + else: + retVal = "Error-302" + return retVal + + else: + retVal = "Error-301" + return retVal + + # debugOutput(command) + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + command = "exit \n" + return retVal +# EOM + + +def bgpNeighborAFConfig(module, cmd, prompt, answer): + retVal = '' + command = '' + bgpNeighborAFArg1 = module.params['bgpArg6'] + bgpNeighborAFArg2 = module.params['bgpArg7'] + bgpNeighborAFArg3 = module.params['bgpArg8'] + deviceType = module.params['deviceType'] + if(bgpNeighborAFArg1 == "allowas-in"): + command = command + bgpNeighborAFArg1 + " " + if(bgpNeighborAFArg2 is not None): + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_af_occurances", bgpNeighborAFArg2) + if(value == "ok"): + command = command + bgpNeighborAFArg2 + else: + retVal = "Error-325" + return retVal + + elif(bgpNeighborAFArg1 == "default-originate"): + command = command + bgpNeighborAFArg1 + " " + if(bgpNeighborAFArg2 is not None and bgpNeighborAFArg2 == "route-map"): + command = command + bgpNeighborAFArg2 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_af_routemap", bgpNeighborAFArg2) + if(value == "ok"): + command = command + bgpNeighborAFArg3 + else: + retVal = "Error-324" + return retVal + + elif(bgpNeighborAFArg1 == "filter-list"): + command = command + bgpNeighborAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_af_filtername", bgpNeighborAFArg2) + if(value == "ok"): + command = command + bgpNeighborAFArg2 + " " + if(bgpNeighborAFArg3 == "in" or bgpNeighborAFArg3 == "out"): + command = command + bgpNeighborAFArg3 + else: + retVal = "Error-323" + return retVal + else: + retVal = "Error-322" + return retVal + + elif(bgpNeighborAFArg1 == "maximum-prefix"): + command = command + bgpNeighborAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_af_maxprefix", bgpNeighborAFArg2) + if(value == "ok"): + command = command + bgpNeighborAFArg2 + " " + if(bgpNeighborAFArg3 is not None): + command = command + bgpNeighborAFArg3 + else: + command = command.strip() + else: + retVal = "Error-326" + return retVal + + elif(bgpNeighborAFArg1 == "next-hop-self"): + command = command + bgpNeighborAFArg1 + + elif(bgpNeighborAFArg1 == "prefix-list"): + command = command + bgpNeighborAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_af_prefixname", bgpNeighborAFArg2) + if(value == "ok"): + command = command + bgpNeighborAFArg2 + " " + if(bgpNeighborAFArg3 == "in" or bgpNeighborAFArg3 == "out"): + command = command + bgpNeighborAFArg3 + else: + retVal = "Error-321" + return retVal + else: + retVal = "Error-320" + return retVal + + elif(bgpNeighborAFArg1 == "route-map"): + command = command + bgpNeighborAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_af_routemap", bgpNeighborAFArg2) + if(value == "ok"): + command = command + bgpNeighborAFArg2 + else: + retVal = "Error-319" + return retVal + elif(bgpNeighborAFArg1 == "route-reflector-client"): + command = command + bgpNeighborAFArg1 + + elif(bgpNeighborAFArg1 == "send-community"): + command = command + bgpNeighborAFArg1 + " " + if(bgpNeighborAFArg2 is not None and bgpNeighborAFArg2 == "extended"): + command = command + bgpNeighborAFArg2 + + elif(bgpNeighborAFArg1 == "soft-reconfiguration"): + command = command + bgpNeighborAFArg1 + " inbound" + + elif(bgpNeighborAFArg1 == "unsuppress-map"): + command = command + bgpNeighborAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_neighbor_af_routemap", bgpNeighborAFArg2) + if(value == "ok"): + command = command + bgpNeighborAFArg2 + else: + retVal = "Error-318" + return retVal + + else: + retVal = "Error-317" + return retVal + + # debugOutput(command) + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + return retVal +# EOM + + +def bgpAFConfig(module, cmd, prompt, answer): + retVal = '' + command = '' + bgpAFArg1 = module.params['bgpArg3'] + bgpAFArg2 = module.params['bgpArg4'] + bgpAFArg3 = module.params['bgpArg5'] + bgpAFArg4 = module.params['bgpArg6'] + bgpAFArg5 = module.params['bgpArg7'] + bgpAFArg6 = module.params['bgpArg8'] + deviceType = module.params['deviceType'] + if(bgpAFArg1 == "aggregate-address"): + command = command + bgpAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_aggregate_prefix", bgpAFArg2) + if(value == "ok"): + if(bgpAFArg2 is None): + command = command.strip() + elif(bgpAFArg2 == "as-set" or bgpAFArg2 == "summary-only"): + command = command + bgpAFArg2 + " " + if((bgpAFArg3 is not None) and (bgpAFArg2 == "as-set")): + command = command + "summary-only" + else: + retVal = "Error-297" + return retVal + else: + retVal = "Error-296" + return retVal + + elif(bgpAFArg1 == "client-to-client"): + command = command + bgpAFArg1 + " reflection " + + elif(bgpAFArg1 == "dampening"): + command = command + bgpAFArg1 + " " + if(bgpAFArg2 == "route-map"): + command = command + bgpAFArg2 + " " + value = cnos.checkSanityofVariable( + deviceType, "addrfamily_routemap_name", bgpAFArg3) + if(value == "ok"): + command = command + bgpAFArg3 + else: + retVal = "Error-196" + return retVal + elif(bgpAFArg2 is not None): + value = cnos.checkSanityofVariable( + deviceType, "reachability_half_life", bgpAFArg2) + if(value == "ok"): + command = command + bgpAFArg2 + " " + if(bgpAFArg3 is not None): + value1 = cnos.checkSanityofVariable( + deviceType, "start_reuse_route_value", bgpAFArg3) + value2 = cnos.checkSanityofVariable( + deviceType, "start_suppress_route_value", bgpAFArg4) + value3 = cnos.checkSanityofVariable( + deviceType, "max_duration_to_suppress_route", + bgpAFArg5) + if(value1 == "ok" and value2 == "ok" and value3 == "ok"): + command = command + bgpAFArg3 + " " + bgpAFArg4 + \ + " " + bgpAFArg5 + " " + if(bgpAFArg6 is not None): + value = cnos.checkSanityofVariable( + deviceType, + "unreachability_halftime_for_penalty", + bgpAFArg6) + if(value == "ok"): + command = command + bgpAFArg6 + else: + retVal = "Error-295" + return retVal + else: + command = command.strip() + else: + retVal = "Error-294" + return retVal + + elif(bgpAFArg1 == "distance"): + command = command + bgpAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "distance_external_AS", bgpAFArg2) + if(value == "ok"): + command = command + bgpAFArg2 + " " + value = cnos.checkSanityofVariable( + deviceType, "distance_internal_AS", bgpAFArg3) + if(value == "ok"): + command = command + bgpAFArg3 + " " + value = cnos.checkSanityofVariable( + deviceType, "distance_local_routes", bgpAFArg4) + if(value == "ok"): + command = command + bgpAFArg4 + else: + retVal = "Error-291" + return retVal + else: + retVal = "Error-292" + return retVal + else: + retVal = "Error-293" + return retVal + + elif(bgpAFArg1 == "maximum-paths"): + command = command + bgpAFArg1 + " " + value = cnos.checkSanityofVariable(deviceType, "maxpath_option", bgpAFArg2) + if(value == "ok"): + command = command + bgpAFArg2 + " " + value = cnos.checkSanityofVariable( + deviceType, "maxpath_numbers", bgpAFArg3) + if(value == "ok"): + command = command + bgpAFArg3 + else: + retVal = "Error-199" + return retVal + else: + retVal = "Error-290" + return retVal + + elif(bgpAFArg1 == "network"): + command = command + bgpAFArg1 + " " + if(bgpAFArg2 == "synchronization"): + command = command + bgpAFArg2 + else: + value = cnos.checkSanityofVariable( + deviceType, "network_ip_prefix_with_mask", bgpAFArg2) + if(value == "ok"): + command = command + bgpAFArg2 + " " + if(bgpAFArg3 is not None and bgpAFArg3 == "backdoor"): + command = command + bgpAFArg3 + elif(bgpAFArg3 is not None and bgpAFArg3 == "route-map"): + command = command + bgpAFArg3 + value = cnos.checkSanityofVariable( + deviceType, "addrfamily_routemap_name", bgpAFArg4) + if(value == "ok"): + command = command + bgpAFArg4 + " " + if(bgpAFArg5 is not None and bgpAFArg5 == "backdoor"): + command = command + bgpAFArg5 + else: + retVal = "Error-298" + return retVal + else: + retVal = "Error-196" + return retVal + else: + command = command.strip() + else: + value = cnos.checkSanityofVariable( + deviceType, "network_ip_prefix_value", bgpAFArg2) + if(value == "ok"): + command = command + bgpAFArg2 + " " + if(bgpAFArg3 is not None and bgpAFArg3 == "backdoor"): + command = command + bgpAFArg3 + elif(bgpAFArg3 is not None and bgpAFArg3 == "route-map"): + command = command + bgpAFArg3 + value = cnos.checkSanityofVariable( + deviceType, "addrfamily_routemap_name", bgpAFArg4) + if(value == "ok"): + command = command + bgpAFArg4 + " " + if(bgpAFArg5 is not None and + bgpAFArg5 == "backdoor"): + command = command + bgpAFArg5 + else: + retVal = "Error-298" + return retVal + else: + retVal = "Error-196" + return retVal + elif(bgpAFArg3 is not None and bgpAFArg3 == "mask"): + command = command + bgpAFArg3 + value = cnos.checkSanityofVariable( + deviceType, "network_ip_prefix_mask", bgpAFArg4) + if(value == "ok"): + command = command + bgpAFArg4 + " " + else: + retVal = "Error-299" + return retVal + else: + command = command.strip() + else: + retVal = "Error-300" + return retVal + + elif(bgpAFArg1 == "nexthop"): + command = command + bgpAFArg1 + " trigger-delay critical " + value = cnos.checkSanityofVariable( + deviceType, "nexthop_crtitical_delay", bgpAFArg2) + if(value == "ok"): + command = command + bgpAFArg2 + " " + value = cnos.checkSanityofVariable( + deviceType, "nexthop_noncrtitical_delay", bgpAFArg3) + if(value == "ok"): + command = command + bgpAFArg3 + " " + else: + retVal = "Error-198" + return retVal + else: + retVal = "Error-197" + return retVal + + elif(bgpAFArg1 == "redistribute"): + command = command + bgpAFArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "addrfamily_redistribute_option", bgpAFArg2) + if(value == "ok"): + if(bgpAFArg2 is not None): + command = command + bgpAFArg2 + " " + "route-map " + value = cnos.checkSanityofVariable( + deviceType, "addrfamily_routemap_name", bgpAFArg3) + if(value == "ok"): + command = command + bgpAFArg3 + else: + retVal = "Error-196" + return retVal + else: + retVal = "Error-195" + return retVal + + elif(bgpAFArg1 == "save" or bgpAFArg1 == "synchronization"): + command = command + bgpAFArg1 + + else: + retVal = "Error-194" + return retVal + # debugOutput(command) + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + command = "exit \n" + return retVal +# EOM + + +def bgpConfig(module, cmd, prompt, answer): + retVal = '' + command = '' + bgpArg1 = module.params['bgpArg1'] + bgpArg2 = module.params['bgpArg2'] + bgpArg3 = module.params['bgpArg3'] + bgpArg4 = module.params['bgpArg4'] + bgpArg5 = module.params['bgpArg5'] + bgpArg6 = module.params['bgpArg6'] + bgpArg7 = module.params['bgpArg7'] + bgpArg8 = module.params['bgpArg8'] + asNum = module.params['asNum'] + deviceType = module.params['deviceType'] + # cnos.debugOutput(bgpArg1) + if(bgpArg1 == "address-family"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "bgp_address_family", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + " " + "unicast \n" + # debugOutput(command) + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + retVal = retVal + bgpAFConfig(module, cmd, prompt, answer) + return retVal + else: + retVal = "Error-178" + return retVal + + elif(bgpArg1 == "bestpath"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " " + if(bgpArg2 == "always-compare-med"): + # debugOutput(bgpArg2) + command = command + bgpArg2 + elif(bgpArg2 == "compare-confed-aspath"): + # debugOutput(bgpArg2) + command = command + bgpArg2 + elif(bgpArg2 == "compare-routerid"): + # debugOutput(bgpArg2) + command = command + bgpArg2 + elif(bgpArg2 == "dont-compare-originator-id"): + # debugOutput(bgpArg2) + command = command + bgpArg2 + elif(bgpArg2 == "tie-break-on-age"): + # debugOutput(bgpArg2) + command = command + bgpArg2 + elif(bgpArg2 == "as-path"): + # debugOutput(bgpArg2) + command = command + bgpArg2 + " " + if(bgpArg3 == "ignore" or bgpArg3 == "multipath-relax"): + command = command + bgpArg3 + else: + retVal = "Error-179" + return retVal + elif(bgpArg2 == "med"): + # debugOutput(bgpArg2) + command = command + bgpArg2 + " " + if(bgpArg3 == "confed" or + bgpArg3 == "missing-as-worst" or + bgpArg3 == "non-deterministic" or + bgpArg3 == "remove-recv-med" or + bgpArg3 == "remove-send-med"): + command = command + bgpArg3 + else: + retVal = "Error-180" + return retVal + else: + retVal = "Error-181" + return retVal + + elif(bgpArg1 == "bgp"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " as-local-count " + value = cnos.checkSanityofVariable( + deviceType, "bgp_bgp_local_count", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + else: + retVal = "Error-182" + return retVal + + elif(bgpArg1 == "cluster-id"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " " + value = cnos.checkSanityofVariable(deviceType, "cluster_id_as_ip", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + else: + value = cnos.checkSanityofVariable( + deviceType, "cluster_id_as_number", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + else: + retVal = "Error-183" + return retVal + + elif(bgpArg1 == "confederation"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " " + if(bgpArg2 == "identifier"): + value = cnos.checkSanityofVariable( + deviceType, "confederation_identifier", bgpArg3) + if(value == "ok"): + command = command + bgpArg2 + " " + bgpArg3 + "\n" + else: + retVal = "Error-184" + return retVal + elif(bgpArg2 == "peers"): + value = cnos.checkSanityofVariable( + deviceType, "confederation_peers_as", bgpArg3) + if(value == "ok"): + command = command + bgpArg2 + " " + bgpArg3 + else: + retVal = "Error-185" + return retVal + else: + retVal = "Error-186" + return retVal + + elif(bgpArg1 == "enforce-first-as"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + + elif(bgpArg1 == "fast-external-failover"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + + elif(bgpArg1 == "graceful-restart"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " stalepath-time " + value = cnos.checkSanityofVariable( + deviceType, "stalepath_delay_value", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + else: + retVal = "Error-187" + return retVal + + elif(bgpArg1 == "graceful-restart-helper"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + + elif(bgpArg1 == "log-neighbor-changes"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + + elif(bgpArg1 == "maxas-limit"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " " + value = cnos.checkSanityofVariable(deviceType, "maxas_limit_as", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + else: + retVal = "Error-188" + return retVal + + elif(bgpArg1 == "neighbor"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "neighbor_ipaddress", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + if(bgpArg3 is not None): + command = command + " remote-as " + value = cnos.checkSanityofVariable( + deviceType, "neighbor_as", bgpArg3) + if(value == "ok"): + # debugOutput(command) + command = command + bgpArg3 + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + retVal = retVal + bgpNeighborConfig(module, cmd, prompt, answer) + return retVal + else: + retVal = "Error-189" + return retVal + + elif(bgpArg1 == "router-id"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " " + value = cnos.checkSanityofVariable(deviceType, "router_id", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + else: + retVal = "Error-190" + return retVal + + elif(bgpArg1 == "shutdown"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + + elif(bgpArg1 == "synchronization"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + + elif(bgpArg1 == "timers"): + # cnos.debugOutput(bgpArg3) + command = command + bgpArg1 + " bgp " + value = cnos.checkSanityofVariable( + deviceType, "bgp_keepalive_interval", bgpArg2) + if(value == "ok"): + command = command + bgpArg2 + else: + retVal = "Error-191" + return retVal + if(bgpArg3 is not None): + value = cnos.checkSanityofVariable(deviceType, "bgp_holdtime", bgpArg3) + if(value == "ok"): + command = command + " " + bgpArg3 + else: + retVal = "Error-192" + return retVal + else: + retVal = "Error-192" + return retVal + + elif(bgpArg1 == "vrf"): + # debugOutput(bgpArg1) + command = command + bgpArg1 + " default" + else: + # debugOutput(bgpArg1) + retVal = "Error-192" + return retVal + # debugOutput(command) + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + command = "exit \n" + # debugOutput(command) + return retVal +# EOM + + +def main(): + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True), + bgpArg1=dict(required=True), + bgpArg2=dict(required=False), + bgpArg3=dict(required=False), + bgpArg4=dict(required=False), + bgpArg5=dict(required=False), + bgpArg6=dict(required=False), + bgpArg7=dict(required=False), + bgpArg8=dict(required=False), + asNum=dict(required=True),), + supports_check_mode=False) + + asNum = module.params['asNum'] + outputfile = module.params['outputfile'] + deviceType = module.params['deviceType'] + output = '' + command = 'router bgp ' + value = cnos.checkSanityofVariable(deviceType, "bgp_as_number", asNum) + if(value == "ok"): + # BGP command happens here. It creates if not present + command = command + asNum + cmd = [{'command': command, 'prompt': None, 'answer': None}] + output = output + bgpConfig(module, cmd, '(config)#', None) + else: + output = "Error-176" + # Save it into the file + file = open(outputfile, "a") + file.write(output) + file.close() + + # Logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, msg="BGP configurations accomplished") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_command.py b/plugins/modules/network/cnos/cnos_command.py new file mode 100644 index 0000000000..b81812b415 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_command.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Module to execute CNOS Commands on Lenovo Switches. +# Lenovo Networking +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_command +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Run arbitrary commands on Lenovo CNOS devices +description: + - Sends arbitrary commands to an CNOS node and returns the results + read from the device. The C(cnos_command) module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. +options: + commands: + description: + - List of commands to send to the remote device. + The resulting output from the command is returned. + If the I(wait_for) argument is provided, the module is not + returned until the condition is satisfied or the number of + retires is expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +--- +- name: test contains operator + cnos_command: + commands: + - show version + - show system memory + wait_for: + - "result[0] contains 'Lenovo'" + - "result[1] contains 'MemFree'" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for single command + cnos_command: + commands: ['show version'] + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for multiple commands + cnos_command: + commands: + - show version + - show interface information + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + - "result.stdout | length == 2" +""" + +RETURN = """ +stdout: + description: the set of responses from the commands + returned: always + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: the conditionals that failed + returned: failed + type: list + sample: ['...', '...'] +""" + +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import run_commands, check_args +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def main(): + spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=spec, supports_check_mode=True) + result = {'changed': False} + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + commands = module.params['commands'] + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_conditional_command.py b/plugins/modules/network/cnos/cnos_conditional_command.py new file mode 100644 index 0000000000..4caee7a95f --- /dev/null +++ b/plugins/modules/network/cnos/cnos_conditional_command.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send Conditional CLI commands to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_conditional_command +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Execute a single command based on condition on devices + running Lenovo CNOS +description: + - This module allows you to modify the running configuration of a switch. It + provides a way to execute a single CNOS command on a network device by + evaluating the current running configuration and executing the command only + if the specific settings have not been already configured. + The CNOS command is passed as an argument of the method. + This module functions the same as the cnos_command module. + The only exception is that following inventory variable can be specified + ["condition = "] + When this inventory variable is specified as the variable of a task, the + command is executed for the network element that matches the flag string. + Usually, commands are executed across a group of network devices. When + there is a requirement to skip the execution of the command on one or + more devices, it is recommended to use this module. This module uses SSH to + manage network device configuration. +extends_documentation_fragment: +- community.general.cnos + +options: + clicommand: + description: + - This specifies the CLI command as an attribute to this method. + The command is passed using double quotes. The variables can be + placed directly on to the CLI commands or can be invoked + from the vars directory. + required: true + default: Null + condition: + description: + - If you specify condition=false in the inventory file against any + device, the command execution is skipped for that device. + required: true + default: Null + flag: + description: + - If a task needs to be executed, you have to set the flag the same + as it is specified in the inventory for that device. + required: true + default: Null + +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module + cnos_conditional_command. These are written in the main.yml file of the tasks + directory. +--- +- name: Applying CLI template on VLAG Tier1 Leaf Switch1 + cnos_conditional_command: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_conditional_command_ + {{ inventory_hostname }}_output.txt" + condition: "{{ hostvars[inventory_hostname]['condition']}}" + flag: leaf_switch2 + command: "spanning-tree mode enable" + +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Command Applied" +''' + +import sys +import time +import socket +import array +import json +import time +import re +import os +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + clicommand=dict(required=True), + outputfile=dict(required=True), + condition=dict(required=True), + flag=dict(required=True), + host=dict(required=False), + deviceType=dict(required=True), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, + no_log=True), ), supports_check_mode=False) + + condition = module.params['condition'] + flag = module.params['flag'] + cliCommand = module.params['clicommand'] + outputfile = module.params['outputfile'] + output = '' + if (condition is None or condition != flag): + module.exit_json(changed=True, msg="Command Skipped for this switch") + return '' + # Send the CLi command + cmd = [{'command': cliCommand, 'prompt': None, 'answer': None}] + output = output + str(cnos.run_cnos_commands(module, cmd)) + # Write to memory + save_cmd = [{'command': 'save', 'prompt': None, 'answer': None}] + cmd.extend(save_cmd) + output = output + str(cnos.run_cnos_commands(module, cmd)) + + # Save it into the file + path = outputfile.rsplit('/', 1) + # cnos.debugOutput(path[0]) + if not os.path.exists(path[0]): + os.makedirs(path[0]) + file = open(outputfile, "a") + file.write(output) + file.close() + + # Logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, + msg="CLI Command executed and results saved in file ") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_conditional_template.py b/plugins/modules/network/cnos/cnos_conditional_template.py new file mode 100644 index 0000000000..244c4fe8de --- /dev/null +++ b/plugins/modules/network/cnos/cnos_conditional_template.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send conditional template to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_conditional_template +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage switch configuration using templates based on + condition on devices running Lenovo CNOS +description: + - This module allows you to work with the running configuration of a + switch. It provides a way to execute a set of CNOS commands on a switch by + evaluating the current running configuration and executing the commands + only if the specific settings have not been already configured. + The configuration source can be a set of commands or a template written in + the Jinja2 templating language. This module functions the same as the + cnos_template module. The only exception is that the following inventory + variable can be specified. + ["condition = "] + When this inventory variable is specified as the variable of a task, the + template is executed for the network element that matches the flag string. + Usually, templates are used when commands are the same across a group of + network devices. When there is a requirement to skip the execution of the + template on one or more devices, it is recommended to use this module. + This module uses SSH to manage network device configuration. +extends_documentation_fragment: +- community.general.cnos + +options: + commandfile: + description: + - This specifies the path to the CNOS command file which needs to + be applied. This usually comes from the commands folder. Generally + this file is the output of the variables applied on a template + file. So this command is preceded by a template module. The + command file must contain the Ansible keyword + {{ inventory_hostname }} and the condition flag in its filename to + ensure that the command file is unique for each switch and + condition. If this is omitted, the command file will be + overwritten during iteration. For example, + commandfile=./commands/clos_leaf_bgp_ + {{ inventory_hostname }}_LP21_commands.txt + required: true + default: Null + condition: + description: + - If you specify condition= in the inventory file + against any device, the template execution is done for that device + in case it matches the flag setting for that task. + required: true + default: Null + flag: + description: + - If a task needs to be executed, you have to set the flag the same + as it is specified in the inventory for that device. + required: true + default: Null +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module + cnos_conditional_template. These are written in the main.yml file of the + tasks directory. +--- +- name: Applying CLI template on VLAG Tier1 Leaf Switch1 + cnos_conditional_template: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/vlag_1tier_leaf_switch1_ + {{ inventory_hostname }}_output.txt" + condition: "{{ hostvars[inventory_hostname]['condition']}}" + flag: "leaf_switch1" + commandfile: "./commands/vlag_1tier_leaf_switch1_ + {{ inventory_hostname }}_commands.txt" + stp_mode1: "disable" + port_range1: "17,18,29,30" + portchannel_interface_number1: 1001 + portchannel_mode1: active + slot_chassis_number1: 1/48 + switchport_mode1: trunk +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Template Applied." +''' + +import sys +import time +import socket +import array +import json +import time +import re +import os +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + commandfile=dict(required=True), + outputfile=dict(required=True), + condition=dict(required=True), + flag=dict(required=True), + host=dict(required=False), + deviceType=dict(required=True), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True),), + supports_check_mode=False) + + condition = module.params['condition'] + flag = module.params['flag'] + commandfile = module.params['commandfile'] + outputfile = module.params['outputfile'] + + output = '' + if (condition is None or condition != flag): + module.exit_json(changed=True, msg="Template Skipped for this switch") + return " " + # Send commands one by one + f = open(commandfile, "r") + cmd = [] + for line in f: + # Omit the comment lines in template file + if not line.startswith("#"): + # cnos.debugOutput(line) + command = line.strip() + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + # Write to memory + save_cmd = [{'command': 'save', 'prompt': None, 'answer': None}] + cmd.extend(save_cmd) + output = output + str(cnos.run_cnos_commands(module, cmd)) + # Write output to file + path = outputfile.rsplit('/', 1) + # cnos.debugOutput(path[0]) + if not os.path.exists(path[0]): + os.makedirs(path[0]) + file = open(outputfile, "a") + file.write(output) + file.close() + + # Logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, msg="Template Applied") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_config.py b/plugins/modules/network/cnos/cnos_config.py new file mode 100644 index 0000000000..abbf95c04d --- /dev/null +++ b/plugins/modules/network/cnos/cnos_config.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Module to configure Lenovo Switches. +# Lenovo Networking +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_config +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage Lenovo CNOS configuration sections +description: + - Lenovo CNOS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with CNOS configuration sections in + a deterministic way. +notes: + - Tested against CNOS 10.9.1 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is + mutually exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block', 'config'] + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + comment: + description: + - Allows a commit description to be specified to be included + when the configuration is committed. If the configuration is + not changed or committed, this argument is ignored. + default: 'configured by cnos_config' + admin: + description: + - Enters into administration configuration mode for making config + changes to the device. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +Tasks: The following are examples of using the module cnos_config. +--- +- name: configure top level configuration + cnos_config: + "lines: hostname {{ inventory_hostname }}" + +- name: configure interface settings + cnos_config: + lines: + - enable + - ip ospf enable + parents: interface ip 13 + +- name: load a config from disk and replace the current config + cnos_config: + src: config.cfg + backup: yes + +- name: configurable backup path + cnos_config: + src: config.cfg + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: Only when lines is specified. + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/cnos01.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import load_config, get_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import check_args +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +DEFAULT_COMMIT_COMMENT = 'configured by cnos_config' + + +def get_running_config(module): + contents = module.params['config'] + if not contents: + contents = get_config(module) + return NetworkConfig(indent=1, contents=contents) + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + replace_config = replace == 'config' + path = module.params['parents'] + comment = module.params['comment'] + admin = module.params['admin'] + check_mode = module.check_mode + + candidate = get_candidate(module) + + if match != 'none' and replace != 'config': + contents = get_running_config(module) + configobj = NetworkConfig(contents=contents, indent=1) + commands = candidate.difference(configobj, path=path, match=match, + replace=replace) + else: + commands = candidate.items + + if commands: + commands = dumps(commands, 'commands').split('\n') + + if any((module.params['lines'], module.params['src'])): + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + + diff = load_config(module, commands) + if diff: + result['diff'] = dict(prepared=diff) + result['changed'] = True + + +def main(): + """main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', + 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block', 'config']), + + config=dict(), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + comment=dict(default=DEFAULT_COMMIT_COMMENT), + admin=dict(type='bool', default=False) + ) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('replace', 'config', ['src'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + result = dict(changed=False, warnings=warnings) + + if module.params['backup']: + result['__backup__'] = get_config(module) + + run(module, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_factory.py b/plugins/modules/network/cnos/cnos_factory.py new file mode 100644 index 0000000000..7aa0ab712a --- /dev/null +++ b/plugins/modules/network/cnos/cnos_factory.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to Reset to factory settings of Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_factory +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Reset the switch startup configuration to default (factory) + on devices running Lenovo CNOS. +description: + - This module allows you to reset a switch's startup configuration. The + method provides a way to reset the startup configuration to its factory + settings. This is helpful when you want to move the switch to another + topology as a new network device. This module uses SSH to manage network + device configuration. The result of the operation can be viewed in results + directory. +extends_documentation_fragment: +- community.general.cnos + +options: {} + +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_reload. These are + written in the main.yml file of the tasks directory. +--- +- name: Test Reset to factory + cnos_factory: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_factory_{{ inventory_hostname }}_output.txt" + +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Switch Startup Config is Reset to factory settings" +''' + +import sys +import time +import socket +import array +import json +import time +import re +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True),), + supports_check_mode=False) + + command = 'write erase' + outputfile = module.params['outputfile'] + output = '' + cmd = [{'command': command, 'prompt': '[n]', 'answer': 'y'}] + output = output + str(cnos.run_cnos_commands(module, cmd)) + + # Save it into the file + file = open(outputfile, "a") + file.write(output) + file.close() + + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, + msg="Switch Startup Config is Reset to Factory settings") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_facts.py b/plugins/modules/network/cnos/cnos_facts.py new file mode 100644 index 0000000000..a3efab537f --- /dev/null +++ b/plugins/modules/network/cnos/cnos_facts.py @@ -0,0 +1,539 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) 2019 Red Hat Inc. +# Copyright (C) 2019 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Module to Collect facts from Lenovo Switches running Lenovo CNOS commands +# Lenovo Networking +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_facts +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Collect facts from remote devices running Lenovo CNOS +description: + - Collects a base set of device facts from a remote Lenovo device + running on CNOS. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +notes: + - Tested against CNOS 10.8.1 +options: + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: 'no' + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' +EXAMPLES = ''' +Tasks: The following are examples of using the module cnos_facts. +--- +- name: Test cnos Facts + cnos_facts: + +--- +# Collect all facts from the device +- cnos_facts: + gather_subset: all + +# Collect only the config and default facts +- cnos_facts: + gather_subset: + - config + +# Do not collect hardware facts +- cnos_facts: + gather_subset: + - "!hardware" +''' +RETURN = ''' + ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list +# default + ansible_net_model: + description: The model name returned from the Lenovo CNOS device + returned: always + type: str + ansible_net_serialnum: + description: The serial number of the Lenovo CNOS device + returned: always + type: str + ansible_net_version: + description: The CNOS operating system version running on the remote device + returned: always + type: str + ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + ansible_net_image: + description: Indicates the active image for the device + returned: always + type: str +# hardware + ansible_net_memfree_mb: + description: The available free memory on the remote device in MB + returned: when hardware is configured + type: int +# config + ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str +# interfaces + ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list + ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list + ansible_net_interfaces: + description: A hash of all interfaces running on the system. + This gives information on description, mac address, mtu, speed, + duplex and operstatus + returned: when interfaces is configured + type: dict + ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +''' + +import re + +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import run_commands +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import check_args +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import zip + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + self.PERSISTENT_COMMAND_TIMEOUT = 60 + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS, + check_rc=False) + + def run(self, cmd): + return run_commands(self.module, cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = ['show sys-info', 'show running-config'] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + data_run = self.responses[1] + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + if data_run: + self.facts['hostname'] = self.parse_hostname(data_run) + + def parse_version(self, data): + for line in data.split('\n'): + line = line.strip() + match = re.match(r'System Software Revision (.*?)', + line, re.M | re.I) + if match: + vers = line.split(':') + ver = vers[1].strip() + return ver + return "NA" + + def parse_hostname(self, data_run): + for line in data_run.split('\n'): + line = line.strip() + match = re.match(r'hostname (.*?)', line, re.M | re.I) + if match: + hosts = line.split() + hostname = hosts[1].strip('\"') + return hostname + return "NA" + + def parse_model(self, data): + for line in data.split('\n'): + line = line.strip() + match = re.match(r'System Model (.*?)', line, re.M | re.I) + if match: + mdls = line.split(':') + mdl = mdls[1].strip() + return mdl + return "NA" + + def parse_image(self, data): + match = re.search(r'(.*) image(.*)', data, re.M | re.I) + if match: + return "Image1" + else: + return "Image2" + + def parse_serialnum(self, data): + for line in data.split('\n'): + line = line.strip() + match = re.match(r'System Serial Number (.*?)', line, re.M | re.I) + if match: + serNums = line.split(':') + ser = serNums[1].strip() + return ser + return "NA" + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show running-config' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.run(['show process memory']) + data = to_text(data, errors='surrogate_or_strict').strip() + data = data.replace(r"\n", "\n") + if data: + for line in data.split('\n'): + line = line.strip() + match = re.match(r'Mem: (.*?)', line, re.M | re.I) + if match: + memline = line.split(':') + mems = memline[1].strip().split() + self.facts['memtotal_mb'] = int(mems[0]) / 1024 + self.facts['memused_mb'] = int(mems[1]) / 1024 + self.facts['memfree_mb'] = int(mems[2]) / 1024 + self.facts['memshared_mb'] = int(mems[3]) / 1024 + self.facts['memavailable_mb'] = int(mems[5]) / 1024 + + def parse_memtotal(self, data): + match = re.search(r'^MemTotal:\s*(.*) kB', data, re.M | re.I) + if match: + return int(match.group(1)) / 1024 + + def parse_memfree(self, data): + match = re.search(r'^MemFree:\s*(.*) kB', data, re.M | re.I) + if match: + return int(match.group(1)) / 1024 + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = ['show interface brief'] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data1 = self.run(['show interface status']) + data1 = to_text(data1, errors='surrogate_or_strict').strip() + data1 = data1.replace(r"\n", "\n") + data2 = self.run(['show interface mac-address']) + data2 = to_text(data2, errors='surrogate_or_strict').strip() + data2 = data2.replace(r"\n", "\n") + lines1 = None + lines2 = None + if data1: + lines1 = self.parse_interfaces(data1) + if data2: + lines2 = self.parse_interfaces(data2) + if lines1 is not None and lines2 is not None: + self.facts['interfaces'] = self.populate_interfaces(lines1, lines2) + data3 = self.run(['show lldp neighbors']) + data3 = to_text(data3, errors='surrogate_or_strict').strip() + data3 = data3.replace(r"\n", "\n") + if data3: + lines3 = self.parse_neighbors(data3) + if lines3 is not None: + self.facts['neighbors'] = self.populate_neighbors(lines3) + + data4 = self.run(['show ip interface brief vrf all']) + data5 = self.run(['show ipv6 interface brief vrf all']) + data4 = to_text(data4, errors='surrogate_or_strict').strip() + data4 = data4.replace(r"\n", "\n") + data5 = to_text(data5, errors='surrogate_or_strict').strip() + data5 = data5.replace(r"\n", "\n") + lines4 = None + lines5 = None + if data4: + lines4 = self.parse_ipaddresses(data4) + ipv4_interfaces = self.set_ip_interfaces(lines4) + self.facts['all_ipv4_addresses'] = ipv4_interfaces + if data5: + lines5 = self.parse_ipaddresses(data5) + ipv6_interfaces = self.set_ipv6_interfaces(lines5) + self.facts['all_ipv6_addresses'] = ipv6_interfaces + + def parse_ipaddresses(self, data): + parsed = list() + for line in data.split('\n'): + if len(line) == 0: + continue + else: + line = line.strip() + match = re.match(r'^(Ethernet+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(po+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(mgmt+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(loopback+)', line) + if match: + key = match.group(1) + parsed.append(line) + return parsed + + def populate_interfaces(self, lines1, lines2): + interfaces = dict() + for line1, line2 in zip(lines1, lines2): + line = line1 + " " + line2 + intfSplit = line.split() + innerData = dict() + innerData['description'] = intfSplit[1].strip() + innerData['macaddress'] = intfSplit[8].strip() + innerData['type'] = intfSplit[6].strip() + innerData['speed'] = intfSplit[5].strip() + innerData['duplex'] = intfSplit[4].strip() + innerData['operstatus'] = intfSplit[2].strip() + interfaces[intfSplit[0].strip()] = innerData + return interfaces + + def parse_interfaces(self, data): + parsed = list() + for line in data.split('\n'): + if len(line) == 0: + continue + else: + line = line.strip() + match = re.match(r'^(Ethernet+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(po+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(mgmt+)', line) + if match: + key = match.group(1) + parsed.append(line) + return parsed + + def set_ip_interfaces(self, line4): + ipv4_addresses = list() + for line in line4: + ipv4Split = line.split() + if 'Ethernet' in ipv4Split[0]: + ipv4_addresses.append(ipv4Split[1]) + if 'mgmt' in ipv4Split[0]: + ipv4_addresses.append(ipv4Split[1]) + if 'po' in ipv4Split[0]: + ipv4_addresses.append(ipv4Split[1]) + if 'loopback' in ipv4Split[0]: + ipv4_addresses.append(ipv4Split[1]) + return ipv4_addresses + + def set_ipv6_interfaces(self, line4): + ipv6_addresses = list() + for line in line4: + ipv6Split = line.split() + if 'Ethernet' in ipv6Split[0]: + ipv6_addresses.append(ipv6Split[1]) + if 'mgmt' in ipv6Split[0]: + ipv6_addresses.append(ipv6Split[1]) + if 'po' in ipv6Split[0]: + ipv6_addresses.append(ipv6Split[1]) + if 'loopback' in ipv6Split[0]: + ipv6_addresses.append(ipv6Split[1]) + return ipv6_addresses + + def populate_neighbors(self, lines3): + neighbors = dict() + device_name = '' + for line in lines3: + neighborSplit = line.split() + innerData = dict() + count = len(neighborSplit) + if count == 5: + local_interface = neighborSplit[1].strip() + innerData['Device Name'] = neighborSplit[0].strip() + innerData['Hold Time'] = neighborSplit[2].strip() + innerData['Capability'] = neighborSplit[3].strip() + innerData['Remote Port'] = neighborSplit[4].strip() + neighbors[local_interface] = innerData + elif count == 4: + local_interface = neighborSplit[0].strip() + innerData['Hold Time'] = neighborSplit[1].strip() + innerData['Capability'] = neighborSplit[2].strip() + innerData['Remote Port'] = neighborSplit[3].strip() + neighbors[local_interface] = innerData + return neighbors + + def parse_neighbors(self, neighbors): + parsed = list() + for line in neighbors.split('\n'): + if len(line) == 0: + continue + else: + line = line.strip() + if 'Ethernet' in line: + parsed.append(line) + if 'mgmt' in line: + parsed.append(line) + if 'po' in line: + parsed.append(line) + if 'loopback' in line: + parsed.append(line) + return parsed + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +PERSISTENT_COMMAND_TIMEOUT = 60 + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + check_args(module, warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_image.py b/plugins/modules/network/cnos/cnos_image.py new file mode 100644 index 0000000000..0ab3d809a5 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_image.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to download new image to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_image +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Perform firmware upgrade/download from a remote server on + devices running Lenovo CNOS +description: + - This module allows you to work with switch firmware images. It provides a + way to download a firmware image to a network device from a remote server + using FTP, SFTP, TFTP, or SCP. The first step is to create a directory + from where the remote server can be reached. The next step is to provide + the full file path of the image's location. Authentication details + required by the remote server must be provided as well. By default, this + method makes the newly downloaded firmware image the active image, which + will be used by the switch during the next restart. + This module uses SSH to manage network device configuration. + The results of the operation will be placed in a directory named 'results' + that must be created by the user in their local directory to where the + playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: + protocol: + description: + - This refers to the protocol used by the network device to + interact with the remote server from where to download the + firmware image. The choices are FTP, SFTP, TFTP, or SCP. Any other + protocols will result in error. If this parameter is not specified + there is no default value to be used. + required: true + choices: [SFTP, SCP, FTP, TFTP] + serverip: + description: + - This specifies the IP Address of the remote server from where the + software image will be downloaded. + required: true + imgpath: + description: + - This specifies the full file path of the image located on the + remote server. In case the relative path is used as the variable + value, the root folder for the user of the server needs to be + specified. + required: true + imgtype: + description: + - This specifies the firmware image type to be downloaded + required: true + choices: [all, boot, os, onie] + serverusername: + description: + - Specify the username for the server relating to the protocol used + required: true + serverpassword: + description: + - Specify the password for the server relating to the protocol used +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_image. These are + written in the main.yml file of the tasks directory. +--- +- name: Test Image transfer + cnos_image: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_image_{{ inventory_hostname }}_output.txt" + protocol: "sftp" + serverip: "10.241.106.118" + imgpath: "/root/cnos_images/G8272-10.1.0.112.img" + imgtype: "os" + serverusername: "root" + serverpassword: "root123" + +- name: Test Image tftp + cnos_image: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_image_{{ inventory_hostname }}_output.txt" + protocol: "tftp" + serverip: "10.241.106.118" + imgpath: "/anil/G8272-10.2.0.34.img" + imgtype: "os" + serverusername: "root" + serverpassword: "root123" +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Image file transferred to device" +''' + +import sys +import time +import socket +import array +import json +import time +import re +import os +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def doImageDownload(module, prompt, answer): + protocol = module.params['protocol'].lower() + server = module.params['serverip'] + imgPath = module.params['imgpath'] + imgType = module.params['imgtype'] + username = module.params['serverusername'] + password = module.params['serverpassword'] + retVal = '' + command = "copy " + protocol + " " + protocol + "://" + username + "@" + command = command + server + "/" + imgPath + " system-image " + command = command + imgType + " vrf management" + cmd = [] + if(protocol == "scp"): + prompt = ['timeout', 'Confirm download operation', 'Password', + 'Do you want to change that to the standby image'] + answer = ['240', 'y', password, 'y'] + scp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, + 'check_all': True}] + cmd.extend(scp_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "sftp"): + prompt = ['Confirm download operation', 'Password', + 'Do you want to change that to the standby image'] + answer = ['y', password, 'y'] + sftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, + 'check_all': True}] + cmd.extend(sftp_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "ftp"): + prompt = ['Confirm download operation', 'Password', + 'Do you want to change that to the standby image'] + answer = ['y', password, 'y'] + ftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, + 'check_all': True}] + cmd.extend(ftp_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "tftp"): + command = "copy " + protocol + " " + protocol + "://" + server + command = command + "/" + imgPath + " system-image " + imgType + command = command + " vrf management" + prompt = ['Confirm download operation', + 'Do you want to change that to the standby image'] + answer = ['y', 'y'] + tftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, + 'check_all': True}] + cmd.extend(tftp_cmd) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + else: + return "Error-110" + + return retVal +# EOM + + +def main(): + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True), + protocol=dict(required=True), + serverip=dict(required=True), + imgpath=dict(required=True), + imgtype=dict(required=True), + serverusername=dict(required=False), + serverpassword=dict(required=False, no_log=True),), + supports_check_mode=False) + + outputfile = module.params['outputfile'] + protocol = module.params['protocol'].lower() + output = '' + + # Invoke method for image transfer from server + if(protocol == "tftp" or protocol == "ftp" or protocol == "sftp" or + protocol == "scp"): + transfer_status = doImageDownload(module, None, None) + else: + transfer_status = "Invalid Protocol option" + + output = output + "\n Image Transfer status \n" + transfer_status + + # Save it into the file + path = outputfile.rsplit('/', 1) + if not os.path.exists(path[0]): + os.makedirs(path[0]) + file = open(outputfile, "a") + file.write(output) + file.close() + + # Logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, msg="Image file transferred to device") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_interface.py b/plugins/modules/network/cnos/cnos_interface.py new file mode 100644 index 0000000000..7d32044cb6 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_interface.py @@ -0,0 +1,555 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Lenovo, Inc. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on Interfaces with Lenovo Switches +# Lenovo Networking +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_interface +author: "Anil Kumar Muraleedharan(@amuraleedhar)" +short_description: Manage Interface on Lenovo CNOS network devices +description: + - This module provides declarative management of Interfaces + on Lenovo CNOS network devices. +notes: + - Tested against CNOS 10.8.1 +options: + name: + description: + - Name of the Interface. + required: true + description: + description: + - Description of Interface. + enabled: + description: + - Interface link status. + type: bool + default: True + speed: + description: + - Interface link speed. + mtu: + description: + - Maximum size of transmit packet. + duplex: + description: + - Interface link status + default: auto + choices: ['full', 'half', 'auto'] + tx_rate: + description: + - Transmit rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules, + ../network/user_guide/network_working_with_command_output.html) + rx_rate: + description: + - Receiver rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules, + ../network/user_guide/network_working_with_command_output.html) + neighbors: + description: + - Check operational state of given interface C(name) for LLDP neighbor. + - The following suboptions are available. + suboptions: + host: + description: + - "LLDP neighbor host for given interface C(name)." + port: + description: + - "LLDP neighbor port to which interface C(name) is connected." + aggregate: + description: List of Interfaces definitions. + delay: + description: + - Time in seconds to wait before checking for the operational state on + remote device. This wait is applicable for operational state argument + which are I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate) + default: 20 + state: + description: + - State of the Interface configuration, C(up) means present and + operationally up and C(down) means present and operationally C(down) + default: present + choices: ['present', 'absent', 'up', 'down'] + provider: + description: + - B(Deprecated) + - "Starting with Ansible 2.5 we recommend using C(connection: network_cli)." + - For more information please see the L(CNOS Platform Options guide, ../network/user_guide/platform_cnos.html). + - HORIZONTALLINE + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: 'no' + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. +''' + +EXAMPLES = """ +- name: configure interface + cnos_interface: + name: Ethernet1/33 + description: test-interface + speed: 100 + duplex: half + mtu: 999 + +- name: remove interface + cnos_interface: + name: loopback3 + state: absent + +- name: make interface up + cnos_interface: + name: Ethernet1/33 + enabled: True + +- name: make interface down + cnos_interface: + name: Ethernet1/33 + enabled: False + +- name: Check intent arguments + cnos_interface: + name: Ethernet1/33 + state: up + tx_rate: ge(0) + rx_rate: le(0) + +- name: Check neighbors intent arguments + cnos_interface: + name: Ethernet1/33 + neighbors: + - port: eth0 + host: netdev + +- name: Config + intent + cnos_interface: + name: Ethernet1/33 + enabled: False + state: down + +- name: Add interface using aggregate + cnos_interface: + aggregate: + - { name: Ethernet1/33, mtu: 256, description: test-interface-1 } + - { name: Ethernet1/44, mtu: 516, description: test-interface-2 } + duplex: full + speed: 100 + state: present + +- name: Delete interface using aggregate + cnos_interface: + aggregate: + - name: loopback3 + - name: loopback6 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always, except for the platforms that use Netconf transport to + manage the device. + type: list + sample: + - interface Ethernet1/33 + - description test-interface + - duplex half + - mtu 512 +""" +import re + +from copy import deepcopy +from time import sleep + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import exec_command +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import debugOutput, check_args +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import conditional +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + + +def validate_mtu(value, module): + if value and not 64 <= int(value) <= 9216: + module.fail_json(msg='mtu must be between 64 and 9216') + + +def validate_param_values(module, obj, param=None): + if param is None: + param = module.params + for key in obj: + # validate the param value (if validator func exists) + validator = globals().get('validate_%s' % key) + if callable(validator): + validator(param.get(key), module) + + +def parse_shutdown(configobj, name): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'^shutdown', cfg, re.M) + if match: + return True + else: + return False + + +def parse_config_argument(configobj, name, arg=None): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'%s (.+)$' % arg, cfg, re.M) + if match: + return match.group(1) + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'] == name: + return o + + return None + + +def add_command_to_interface(interface, cmd, commands): + if interface not in commands: + commands.append(interface) + commands.append(cmd) + + +def map_config_to_obj(module): + config = get_config(module) + configobj = NetworkConfig(indent=1, contents=config) + + match = re.findall(r'^interface (\S+)', config, re.M) + if not match: + return list() + + instances = list() + + for item in set(match): + obj = { + 'name': item, + 'description': parse_config_argument(configobj, item, 'description'), + 'speed': parse_config_argument(configobj, item, 'speed'), + 'duplex': parse_config_argument(configobj, item, 'duplex'), + 'mtu': parse_config_argument(configobj, item, 'mtu'), + 'disable': True if parse_shutdown(configobj, item) else False, + 'state': 'present' + } + instances.append(obj) + return instances + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + validate_param_values(module, item, item) + d = item.copy() + + if d['enabled']: + d['disable'] = False + else: + d['disable'] = True + + obj.append(d) + + else: + params = { + 'name': module.params['name'], + 'description': module.params['description'], + 'speed': module.params['speed'], + 'mtu': module.params['mtu'], + 'duplex': module.params['duplex'], + 'state': module.params['state'], + 'delay': module.params['delay'], + 'tx_rate': module.params['tx_rate'], + 'rx_rate': module.params['rx_rate'], + 'neighbors': module.params['neighbors'] + } + + validate_param_values(module, params) + if module.params['enabled']: + params.update({'disable': False}) + else: + params.update({'disable': True}) + + obj.append(params) + return obj + + +def map_obj_to_commands(updates): + commands = list() + want, have = updates + + args = ('speed', 'description', 'duplex', 'mtu') + for w in want: + name = w['name'] + disable = w['disable'] + state = w['state'] + + obj_in_have = search_obj_in_list(name, have) + interface = 'interface ' + name + if state == 'absent' and obj_in_have: + commands.append('no ' + interface) + elif state in ('present', 'up', 'down'): + if obj_in_have: + for item in args: + candidate = w.get(item) + running = obj_in_have.get(item) + if candidate != running: + if candidate: + cmd = item + ' ' + str(candidate) + add_command_to_interface(interface, cmd, commands) + + if disable and not obj_in_have.get('disable', False): + add_command_to_interface(interface, 'shutdown', commands) + elif not disable and obj_in_have.get('disable', False): + add_command_to_interface(interface, 'no shutdown', commands) + else: + commands.append(interface) + for item in args: + value = w.get(item) + if value: + commands.append(item + ' ' + str(value)) + + if disable: + commands.append('no shutdown') + return commands + + +def check_declarative_intent_params(module, want, result): + failed_conditions = [] + have_neighbors_lldp = None + for w in want: + want_state = w.get('state') + want_tx_rate = w.get('tx_rate') + want_rx_rate = w.get('rx_rate') + want_neighbors = w.get('neighbors') + + if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate and not want_neighbors: + continue + + if result['changed']: + sleep(w['delay']) + + command = 'show interface %s brief' % w['name'] + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc) + if want_state in ('up', 'down'): + state_data = out.strip().lower().split(w['name']) + have_state = None + have_state = state_data[1].split()[3] + if have_state is None or not conditional(want_state, have_state.strip()): + failed_conditions.append('state ' + 'eq(%s)' % want_state) + + command = 'show interface %s' % w['name'] + rc, out, err = exec_command(module, command) + have_tx_rate = None + have_rx_rate = None + rates = out.splitlines() + for s in rates: + s = s.strip() + if 'output rate' in s and 'input rate' in s: + sub = s.split() + if want_tx_rate: + have_tx_rate = sub[8] + if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int): + failed_conditions.append('tx_rate ' + want_tx_rate) + if want_rx_rate: + have_rx_rate = sub[2] + if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int): + failed_conditions.append('rx_rate ' + want_rx_rate) + if want_neighbors: + have_host = [] + have_port = [] + + # Process LLDP neighbors + if have_neighbors_lldp is None: + rc, have_neighbors_lldp, err = exec_command(module, 'show lldp neighbors detail') + if rc != 0: + module.fail_json(msg=to_text(err, + errors='surrogate_then_replace'), + command=command, rc=rc) + + if have_neighbors_lldp: + lines = have_neighbors_lldp.strip().split('Local Port ID: ') + for line in lines: + field = line.split('\n') + if field[0].strip() == w['name']: + for item in field: + if item.startswith('System Name:'): + have_host.append(item.split(':')[1].strip()) + if item.startswith('Port Description:'): + have_port.append(item.split(':')[1].strip()) + + for item in want_neighbors: + host = item.get('host') + port = item.get('port') + if host and host not in have_host: + failed_conditions.append('host ' + host) + if port and port not in have_port: + failed_conditions.append('port ' + port) + return failed_conditions + + +def main(): + """ main entry point for module execution + """ + neighbors_spec = dict( + host=dict(), + port=dict() + ) + + element_spec = dict( + name=dict(), + description=dict(), + speed=dict(), + mtu=dict(), + duplex=dict(default='auto', choices=['full', 'half', 'auto']), + enabled=dict(default=True, type='bool'), + tx_rate=dict(), + rx_rate=dict(), + neighbors=dict(type='list', elements='dict', options=neighbors_spec), + delay=dict(default=20, type='int'), + state=dict(default='present', + choices=['present', 'absent', 'up', 'down']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + argument_spec.update(cnos_argument_spec) + + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + warnings = list() + check_args(module, warnings) + + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have)) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + failed_conditions = check_declarative_intent_params(module, want, result) + + if failed_conditions: + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_l2_interface.py b/plugins/modules/network/cnos/cnos_l2_interface.py new file mode 100644 index 0000000000..a6decc39b9 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_l2_interface.py @@ -0,0 +1,598 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2017 Lenovo, Inc. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send banner commands to Lenovo Switches +# Two types of banners are supported login and motd +# Lenovo Networking +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_l2_interface +short_description: Manage Layer-2 interface on Lenovo CNOS devices. +description: + - This module provides declarative management of Layer-2 interfaces on + Lenovo CNOS devices. +author: + - Anil Kumar Muraleedharan (@amuraleedhar) +options: + name: + description: + - Full name of the interface excluding any logical + unit number, i.e. Ethernet1/3. + required: true + aliases: ['interface'] + mode: + description: + - Mode in which interface needs to be configured. + default: access + choices: ['access', 'trunk'] + access_vlan: + description: + - Configure given VLAN in access port. + If C(mode=access), used as the access VLAN ID. + trunk_vlans: + description: + - List of VLANs to be configured in trunk port. + If C(mode=trunk), used as the VLAN range to ADD or REMOVE + from the trunk. + native_vlan: + description: + - Native VLAN to be configured in trunk port. + If C(mode=trunk), used as the trunk native VLAN ID. + trunk_allowed_vlans: + description: + - List of allowed VLANs in a given trunk port. + If C(mode=trunk), these are the only VLANs that will be + configured on the trunk, i.e. "2-10,15". + aggregate: + description: + - List of Layer-2 interface definitions. + state: + description: + - Manage the state of the Layer-2 Interface configuration. + default: present + choices: ['present','absent', 'unconfigured'] + provider: + description: + - B(Deprecated) + - "Starting with Ansible 2.5 we recommend using + C(connection: network_cli)." + - For more information please see the + L(CNOS Platform Options guide, ../network/user_guide/platform_cnos.html). + - HORIZONTALLINE + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when building the connection to the + remote device. + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used + instead. + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used + instead. + timeout: + description: + - Specifies the timeout in seconds for communicating with the network + device for either connecting or sending commands. If the timeout + is exceeded before the operation is completed, the module will + error. + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not + specified in the task, the value of environment variable + C(ANSIBLE_NET_SSH_KEYFILE)will be used instead. + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the + value is not specified in the task, the value of environment + variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: 'no' + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value + of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used + instead. +''' + +EXAMPLES = """ +- name: Ensure Ethernet1/5 is in its default l2 interface state + cnos_l2_interface: + name: Ethernet1/5 + state: unconfigured + +- name: Ensure Ethernet1/5 is configured for access vlan 20 + cnos_l2_interface: + name: Ethernet1/5 + mode: access + access_vlan: 20 + +- name: Ensure Ethernet1/5 only has vlans 5-10 as trunk vlans + cnos_l2_interface: + name: Ethernet1/5 + mode: trunk + native_vlan: 10 + trunk_vlans: 5-10 + +- name: Ensure Ethernet1/5 is a trunk port and ensure 2-50 are being tagged + (doesn't mean others aren't also being tagged) + cnos_l2_interface: + name: Ethernet1/5 + mode: trunk + native_vlan: 10 + trunk_vlans: 2-50 + +- name: Ensure these VLANs are not being tagged on the trunk + cnos_l2_interface: + name: Ethernet1/5 + mode: trunk + trunk_vlans: 51-4094 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to + manage the device. + type: list + sample: + - interface Ethernet1/5 + - switchport access vlan 20 +""" + +import re +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import run_commands + + +def get_interface_type(interface): + intf_type = 'unknown' + if interface.upper()[:2] in ('ET', 'GI', 'FA', 'TE', 'FO', 'HU', 'TWE'): + intf_type = 'ethernet' + elif interface.upper().startswith('VL'): + intf_type = 'svi' + elif interface.upper().startswith('LO'): + intf_type = 'loopback' + elif interface.upper()[:2] in ('MG', 'MA'): + intf_type = 'management' + elif interface.upper().startswith('PO'): + intf_type = 'portchannel' + elif interface.upper().startswith('NV'): + intf_type = 'nve' + + return intf_type + + +def is_switchport(name, module): + intf_type = get_interface_type(name) + + if intf_type in ('ethernet', 'portchannel'): + config = run_commands(module, + ['show interface {0} switchport'.format(name)])[0] + match = re.search(r'Switchport : enabled', config) + return bool(match) + return False + + +def interface_is_portchannel(name, module): + if get_interface_type(name) == 'ethernet': + config = run_commands(module, ['show run interface {0}'.format(name)])[0] + if any(c in config for c in ['channel group', 'channel-group']): + return True + return False + + +def get_switchport(name, module): + config = run_commands(module, + ['show interface {0} switchport'.format(name)])[0] + mode = re.search(r'Switchport mode : (?:.* )?(\w+)$', config, re.M) + access = re.search(r'Configured Vlans : (\d+)', config) + native = re.search(r'Default/Native Vlan : (\d+)', config) + trunk = re.search(r'Enabled Vlans : (.+)$', config, re.M) + if mode: + mode = mode.group(1) + if access: + access = access.group(1) + if native: + native = native.group(1) + if trunk: + trunk = trunk.group(1) + if trunk == 'ALL': + trunk = '1-4094' + + switchport_config = { + "interface": name, + "mode": mode, + "access_vlan": access, + "native_vlan": native, + "trunk_vlans": trunk, + } + + return switchport_config + + +def remove_switchport_config_commands(name, existing, proposed, module): + mode = proposed.get('mode') + commands = [] + command = None + + if mode == 'access': + av_check = existing.get('access_vlan') == proposed.get('access_vlan') + if av_check: + command = 'no switchport access vlan' + commands.append(command) + + elif mode == 'trunk': + # Supported Remove Scenarios for trunk_vlans_list + # 1) Existing: 1,2,3 Proposed: 1,2,3 - Remove all + # 2) Existing: 1,2,3 Proposed: 1,2 - Remove 1,2 Leave 3 + # 3) Existing: 1,2,3 Proposed: 2,3 - Remove 2,3 Leave 1 + # 4) Existing: 1,2,3 Proposed: 4,5,6 - None removed. + # 5) Existing: None Proposed: 1,2,3 - None removed. + existing_vlans = existing.get('trunk_vlans_list') + proposed_vlans = proposed.get('trunk_vlans_list') + vlans_to_remove = set(proposed_vlans).intersection(existing_vlans) + + if vlans_to_remove: + proposed_allowed_vlans = proposed.get('trunk_allowed_vlans') + remove_trunk_allowed_vlans = proposed.get('trunk_vlans', + proposed_allowed_vlans) + command = 'switchport trunk allowed vlan remove {0}' + command = command.format(remove_trunk_allowed_vlans) + commands.append(command) + + native_check = existing.get('native_vlan') == proposed.get('native_vlan') + if native_check and proposed.get('native_vlan'): + command = 'no switchport trunk native vlan' + commands.append(command) + + if commands: + commands.insert(0, 'interface ' + name) + return commands + + +def get_switchport_config_commands(name, existing, proposed, module): + """Gets commands required to config a given switchport interface + """ + + proposed_mode = proposed.get('mode') + existing_mode = existing.get('mode') + commands = [] + command = None + + if proposed_mode != existing_mode: + if proposed_mode == 'trunk': + command = 'switchport mode trunk' + elif proposed_mode == 'access': + command = 'switchport mode access' + + if command: + commands.append(command) + + if proposed_mode == 'access': + av_check = str(existing.get('access_vlan')) == str(proposed.get('access_vlan')) + if not av_check: + command = 'switchport access vlan {0}'.format(proposed.get('access_vlan')) + commands.append(command) + + elif proposed_mode == 'trunk': + tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list') + + if not tv_check: + if proposed.get('allowed'): + command = 'switchport trunk allowed vlan {0}' + command = command.format(proposed.get('trunk_allowed_vlans')) + commands.append(command) + + else: + existing_vlans = existing.get('trunk_vlans_list') + proposed_vlans = proposed.get('trunk_vlans_list') + vlans_to_add = set(proposed_vlans).difference(existing_vlans) + if vlans_to_add: + command = 'switchport trunk allowed vlan add {0}' + command = command.format(proposed.get('trunk_vlans')) + commands.append(command) + + native_check = str(existing.get('native_vlan')) == str(proposed.get('native_vlan')) + if not native_check and proposed.get('native_vlan'): + command = 'switchport trunk native vlan {0}' + command = command.format(proposed.get('native_vlan')) + commands.append(command) + + if commands: + commands.insert(0, 'interface ' + name) + return commands + + +def is_switchport_default(existing): + """Determines if switchport has a default config based on mode + Args: + existing (dict): existing switchport configuration from Ansible mod + Returns: + boolean: True if switchport has OOB Layer 2 config, i.e. + vlan 1 and trunk all and mode is access + """ + + c1 = str(existing['access_vlan']) == '1' + c2 = str(existing['native_vlan']) == '1' + c3 = existing['trunk_vlans'] == '1-4094' + c4 = existing['mode'] == 'access' + + default = c1 and c2 and c3 and c4 + + return default + + +def default_switchport_config(name): + commands = [] + commands.append('interface ' + name) + commands.append('switchport mode access') + commands.append('switch access vlan 1') + commands.append('switchport trunk native vlan 1') + commands.append('switchport trunk allowed vlan all') + return commands + + +def vlan_range_to_list(vlans): + result = [] + if vlans: + for part in vlans.split(','): + if part.lower() == 'none': + break + if part: + if '-' in part: + start, stop = (int(i) for i in part.split('-')) + result.extend(range(start, stop + 1)) + else: + result.append(int(part)) + return sorted(result) + + +def get_list_of_vlans(module): + config = run_commands(module, ['show vlan'])[0] + vlans = set() + + lines = config.strip().splitlines() + for line in lines: + line_parts = line.split() + if line_parts: + try: + int(line_parts[0]) + except ValueError: + continue + vlans.add(line_parts[0]) + + return list(vlans) + + +def flatten_list(commands): + flat_list = [] + for command in commands: + if isinstance(command, list): + flat_list.extend(command) + else: + flat_list.append(command) + return flat_list + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + obj.append(item.copy()) + else: + obj.append({ + 'name': module.params['name'], + 'mode': module.params['mode'], + 'access_vlan': module.params['access_vlan'], + 'native_vlan': module.params['native_vlan'], + 'trunk_vlans': module.params['trunk_vlans'], + 'trunk_allowed_vlans': module.params['trunk_allowed_vlans'], + 'state': module.params['state'] + }) + + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + name=dict(type='str', aliases=['interface']), + mode=dict(choices=['access', 'trunk'], default='access'), + access_vlan=dict(type='str'), + native_vlan=dict(type='str'), + trunk_vlans=dict(type='str'), + trunk_allowed_vlans=dict(type='str'), + state=dict(choices=['absent', 'present', 'unconfigured'], + default='present') + ) + + aggregate_spec = deepcopy(element_spec) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + argument_spec.update(cnos_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['access_vlan', 'trunk_vlans'], + ['access_vlan', 'native_vlan'], + ['access_vlan', 'trunk_allowed_vlans']], + supports_check_mode=True) + + warnings = list() + commands = [] + result = {'changed': False, 'warnings': warnings} + + want = map_params_to_obj(module) + for w in want: + name = w['name'] + mode = w['mode'] + access_vlan = w['access_vlan'] + state = w['state'] + trunk_vlans = w['trunk_vlans'] + native_vlan = w['native_vlan'] + trunk_allowed_vlans = w['trunk_allowed_vlans'] + + args = dict(name=name, mode=mode, access_vlan=access_vlan, + native_vlan=native_vlan, trunk_vlans=trunk_vlans, + trunk_allowed_vlans=trunk_allowed_vlans) + + proposed = dict((k, v) for k, v in args.items() if v is not None) + + name = name.lower() + + if mode == 'access' and state == 'present' and not access_vlan: + msg = 'access_vlan param required for mode=access && state=present' + module.fail_json(msg=msg) + + if mode == 'trunk' and access_vlan: + msg = 'access_vlan param not supported when using mode=trunk' + module.fail_json(msg=msg) + + if not is_switchport(name, module): + module.fail_json(msg='Ensure interface is configured to be a L2' + '\nport first before using this module. You can use' + '\nthe cnos_interface module for this.') + + if interface_is_portchannel(name, module): + module.fail_json(msg='Cannot change L2 config on physical ' + '\nport because it is in a portchannel. ' + '\nYou should update the portchannel config.') + + # existing will never be null for Eth intfs as there is always a default + existing = get_switchport(name, module) + + # Safeguard check + # If there isn't an existing, something is wrong per previous comment + if not existing: + msg = 'Make sure you are using the FULL interface name' + module.fail_json(msg=msg) + + if trunk_vlans or trunk_allowed_vlans: + if trunk_vlans: + trunk_vlans_list = vlan_range_to_list(trunk_vlans) + elif trunk_allowed_vlans: + trunk_vlans_list = vlan_range_to_list(trunk_allowed_vlans) + proposed['allowed'] = True + + existing_trunks_list = vlan_range_to_list((existing['trunk_vlans'])) + + existing['trunk_vlans_list'] = existing_trunks_list + proposed['trunk_vlans_list'] = trunk_vlans_list + + current_vlans = get_list_of_vlans(module) + + if state == 'present': + if access_vlan and access_vlan not in current_vlans: + module.fail_json(msg='You are trying to configure a VLAN' + ' on an interface that\ndoes not exist on the ' + ' switch yet!', vlan=access_vlan) + elif native_vlan and native_vlan not in current_vlans: + module.fail_json(msg='You are trying to configure a VLAN on' + ' an interface that\ndoes not exist on the ' + ' switch yet!', vlan=native_vlan) + else: + command = get_switchport_config_commands(name, existing, + proposed, module) + commands.append(command) + elif state == 'unconfigured': + is_default = is_switchport_default(existing) + if not is_default: + command = default_switchport_config(name) + commands.append(command) + elif state == 'absent': + command = remove_switchport_config_commands(name, existing, + proposed, module) + commands.append(command) + + if trunk_vlans or trunk_allowed_vlans: + existing.pop('trunk_vlans_list') + proposed.pop('trunk_vlans_list') + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + result['changed'] = True + load_config(module, cmds) + if 'configure' in cmds: + cmds.pop(0) + + result['commands'] = cmds + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_l3_interface.py b/plugins/modules/network/cnos/cnos_l3_interface.py new file mode 100644 index 0000000000..cf68a237f6 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_l3_interface.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2019 Lenovo, Inc. +# (c) 2019, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on Link Aggregation with Lenovo Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_l3_interface +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage Layer-3 interfaces on Lenovo CNOS network devices. +description: + - This module provides declarative management of Layer-3 interfaces + on CNOS network devices. +notes: + - Tested against CNOS 10.8.1 +options: + name: + description: + - Name of the Layer-3 interface to be configured eg. Ethernet1/2 + ipv4: + description: + - IPv4 address to be set for the Layer-3 interface mentioned in I(name) + option. The address format is /, the mask is number + in range 0-32 eg. 10.241.107.1/24 + ipv6: + description: + - IPv6 address to be set for the Layer-3 interface mentioned in I(name) + option. The address format is /, the mask is number + in range 0-128 eg. fd5d:12c9:2201:1::1/64 + aggregate: + description: + - List of Layer-3 interfaces definitions. Each of the entry in aggregate + list should define name of interface C(name) and a optional C(ipv4) or + C(ipv6) address. + state: + description: + - State of the Layer-3 interface configuration. It indicates if the + configuration should be present or absent on remote device. + default: present + choices: ['present', 'absent'] + provider: + description: + - B(Deprecated) + - "Starting with Ansible 2.5 we recommend using + C(connection: network_cli)." + - For more information please see the + L(CNOS Platform Options guide, ../network/user_guide/platform_cnos.html). + - HORIZONTALLINE + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when building the connection to the + remote device. + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used + instead. + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used + instead. + timeout: + description: + - Specifies the timeout in seconds for communicating with the network + device for either connecting or sending commands. If the timeout + is exceeded before the operation is completed, the module will + error. + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not + specified in the task, the value of environment variable + C(ANSIBLE_NET_SSH_KEYFILE)will be used instead. + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the + value is not specified in the task, the value of environment + variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: 'no' + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value + of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used + instead. +''' + +EXAMPLES = """ +- name: Remove Ethernet1/33 IPv4 and IPv6 address + cnos_l3_interface: + name: Ethernet1/33 + state: absent + +- name: Set Ethernet1/33 IPv4 address + cnos_l3_interface: + name: Ethernet1/33 + ipv4: 10.241.107.1/24 + +- name: Set Ethernet1/33 IPv6 address + cnos_l3_interface: + name: Ethernet1/33 + ipv6: "fd5d:12c9:2201:1::1/64" + +- name: Set Ethernet1/33 in dhcp + cnos_l3_interface: + name: Ethernet1/33 + ipv4: dhcp + ipv6: dhcp + +- name: Set interface Vlan1 (SVI) IPv4 address + cnos_l3_interface: + name: Vlan1 + ipv4: 192.168.0.5/24 + +- name: Set IP addresses on aggregate + cnos_l3_interface: + aggregate: + - { name: Ethernet1/33, ipv4: 10.241.107.1/24 } + - { name: Ethernet1/44, ipv4: 10.240.106.1/24, + ipv6: "fd5d:12c9:2201:1::1/64" } + +- name: Remove IP addresses on aggregate + cnos_l3_interface: + aggregate: + - { name: Ethernet1/33, ipv4: 10.241.107.1/24 } + - { name: Ethernet1/44, ipv4: 10.240.106.1/24, + ipv6: "fd5d:12c9:2201:1::1/64" } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to + manage the device. + type: list + sample: + - interface Ethernet1/33 + - ip address 10.241.107.1 255.255.255.0 + - ipv6 address fd5d:12c9:2201:1::1/64 +""" +import re + +from copy import deepcopy + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import run_commands +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import is_netmask, is_masklen +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_netmask, to_masklen + + +def validate_ipv4(value, module): + if value: + address = value.split('/') + if len(address) != 2: + module.fail_json( + msg='address format is /,got invalid format %s' % value) + if not is_masklen(address[1]): + module.fail_json( + msg='invalid value for mask: %s, mask should be in range 0-32' % address[1]) + + +def validate_ipv6(value, module): + if value: + address = value.split('/') + if len(address) != 2: + module.fail_json( + msg='address format is /, got invalid format %s' % value) + else: + if not 0 <= int(address[1]) <= 128: + module.fail_json( + msg='invalid value for mask: %s, mask should be in range 0-128' % address[1]) + + +def validate_param_values(module, obj, param=None): + if param is None: + param = module.params + for key in obj: + # validate the param value (if validator func exists) + validator = globals().get('validate_%s' % key) + if callable(validator): + validator(param.get(key), module) + + +def parse_config_argument(configobj, name, arg=None): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + + values = [] + matches = re.finditer(r'%s (.+)$' % arg, cfg, re.M) + for match in matches: + match_str = match.group(1).strip() + if arg == 'ipv6 address': + values.append(match_str) + else: + values = match_str + break + + return values or None + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'].lower() == name.lower(): + return o + + return None + + +def get_interface_type(interface): + intf_type = 'unknown' + if interface.upper()[:2] in ('ET', 'GI', 'FA', 'TE', 'FO', 'HU', 'TWE'): + intf_type = 'ethernet' + elif interface.upper().startswith('VL'): + intf_type = 'svi' + elif interface.upper().startswith('LO'): + intf_type = 'loopback' + elif interface.upper()[:2] in ('MG', 'MA'): + intf_type = 'management' + elif interface.upper().startswith('PO'): + intf_type = 'portchannel' + elif interface.upper().startswith('NV'): + intf_type = 'nve' + + return intf_type + + +def is_switchport(name, module): + intf_type = get_interface_type(name) + + if intf_type in ('ethernet', 'portchannel'): + config = run_commands(module, + ['show interface {0} switchport'.format(name)])[0] + match = re.search(r'Switchport : enabled', config) + return bool(match) + return False + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + for w in want: + name = w['name'] + ipv4 = w['ipv4'] + ipv6 = w['ipv6'] + state = w['state'] + + interface = 'interface ' + name + commands.append(interface) + + obj_in_have = search_obj_in_list(name, have) + if state == 'absent' and obj_in_have: + if obj_in_have['ipv4']: + if ipv4: + address = ipv4.split('/') + if len(address) == 2: + ipv4 = '{0} {1}'.format( + address[0], to_netmask(address[1])) + commands.append('no ip address %s' % ipv4) + else: + commands.append('no ip address') + if obj_in_have['ipv6']: + if ipv6: + commands.append('no ipv6 address %s' % ipv6) + else: + commands.append('no ipv6 address') + if 'dhcp' in obj_in_have['ipv6']: + commands.append('no ipv6 address dhcp') + + elif state == 'present': + if ipv4: + if obj_in_have is None or obj_in_have.get('ipv4') is None or ipv4 != obj_in_have['ipv4']: + address = ipv4.split('/') + if len(address) == 2: + ipv4 = '{0} {1}'.format( + address[0], to_netmask(address[1])) + commands.append('ip address %s' % ipv4) + + if ipv6: + if obj_in_have is None or obj_in_have.get('ipv6') is None or ipv6.lower() not in [addr.lower() for addr in obj_in_have['ipv6']]: + commands.append('ipv6 address %s' % ipv6) + if commands[-1] == interface: + commands.pop(-1) + + return commands + + +def map_config_to_obj(module): + config = get_config(module) + configobj = NetworkConfig(indent=1, contents=config) + + match = re.findall(r'^interface (\S+)', config, re.M) + if not match: + return list() + + instances = list() + + for item in set(match): + ipv4 = parse_config_argument(configobj, item, 'ip address') + if ipv4: + # eg. 192.168.2.10 255.255.255.0 -> 192.168.2.10/24 + address = ipv4.strip().split(' ') + if len(address) == 2 and is_netmask(address[1]): + ipv4 = '{0}/{1}'.format(address[0], to_text(to_masklen(address[1]))) + + obj = { + 'name': item, + 'ipv4': ipv4, + 'ipv6': parse_config_argument(configobj, item, 'ipv6 address'), + 'state': 'present' + } + instances.append(obj) + + return instances + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + validate_param_values(module, item, item) + obj.append(item.copy()) + else: + obj.append({ + 'name': module.params['name'], + 'ipv4': module.params['ipv4'], + 'ipv6': module.params['ipv6'], + 'state': module.params['state'] + }) + + validate_param_values(module, obj) + + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + name=dict(), + ipv4=dict(), + ipv6=dict(), + state=dict(default='present', + choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + argument_spec.update(cnos_argument_spec) + + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + + result = {'changed': False} + + want = map_params_to_obj(module) + for w in want: + name = w['name'] + name = name.lower() + if is_switchport(name, module): + module.fail_json(msg='Ensure interface is configured to be a L3' + '\nport first before using this module. You can use' + '\nthe cnos_interface module for this.') + + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + resp = load_config(module, commands) + if resp is not None: + warnings.extend((out for out in resp if out)) + + result['changed'] = True + + if warnings: + result['warnings'] = warnings + if 'overlaps with address configured on' in warnings[0]: + result['failed'] = True + result['msg'] = warnings[0] + if 'Cannot set overlapping address' in warnings[0]: + result['failed'] = True + result['msg'] = warnings[0] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_linkagg.py b/plugins/modules/network/cnos/cnos_linkagg.py new file mode 100644 index 0000000000..1725bafa82 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_linkagg.py @@ -0,0 +1,391 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on Link Aggregation with Lenovo Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_linkagg +author: "Anil Kumar Muraleedharan (@auraleedhar)" +short_description: Manage link aggregation groups on Lenovo CNOS devices +description: + - This module provides declarative management of link aggregation groups + on Lenovo CNOS network devices. +notes: + - Tested against CNOS 10.8.1 +options: + group: + description: + - Channel-group number for the port-channel + Link aggregation group. Range 1-255. + mode: + description: + - Mode of the link aggregation group. + choices: ['active', 'on', 'passive'] + members: + description: + - List of members of the link aggregation group. + aggregate: + description: List of link aggregation definitions. + state: + description: + - State of the link aggregation group. + default: present + choices: ['present', 'absent'] + purge: + description: + - Purge links not defined in the I(aggregate) parameter. + type: bool + default: no + provider: + description: + - B(Deprecated) + - "Starting with Ansible 2.5 we recommend using C(connection: network_cli)." + - For more information please see the L(CNOS Platform Options guide, ../network/user_guide/platform_cnos.html). + - HORIZONTALLINE + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: 'no' + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. +''' + +EXAMPLES = """ +- name: create link aggregation group + cnos_linkagg: + group: 10 + state: present + +- name: delete link aggregation group + cnos_linkagg: + group: 10 + state: absent + +- name: set link aggregation group to members + cnos_linkagg: + group: 200 + mode: active + members: + - Ethernet1/33 + - Ethernet1/44 + +- name: remove link aggregation group from GigabitEthernet0/0 + cnos_linkagg: + group: 200 + mode: active + members: + - Ethernet1/33 + +- name: Create aggregate of linkagg definitions + cnos_linkagg: + aggregate: + - { group: 3, mode: on, members: [Ethernet1/33] } + - { group: 100, mode: passive, members: [Ethernet1/44] } +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to + manage the device. + type: list + sample: + - interface port-channel 30 + - interface Ethernet1/33 + - channel-group 30 mode on + - no interface port-channel 30 +""" + +import re +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import CustomNetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec + + +def search_obj_in_list(group, lst): + for o in lst: + if o['group'] == group: + return o + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + purge = module.params['purge'] + + for w in want: + group = w['group'] + mode = w['mode'] + members = w.get('members') or [] + state = w['state'] + del w['state'] + + obj_in_have = search_obj_in_list(group, have) + + if state == 'absent': + if obj_in_have: + commands.append('no interface port-channel {0}'.format(group)) + + elif state == 'present': + cmd = ['interface port-channel {0}'.format(group), + 'exit'] + if not obj_in_have: + if not group: + module.fail_json(msg='group is a required option') + commands.extend(cmd) + + if members: + for m in members: + commands.append('interface {0}'.format(m)) + commands.append('channel-group {0} mode {1}'.format(group, mode)) + + else: + if members: + if 'members' not in obj_in_have.keys(): + for m in members: + commands.extend(cmd) + commands.append('interface {0}'.format(m)) + commands.append('channel-group {0} mode {1}'.format(group, mode)) + + elif set(members) != set(obj_in_have['members']): + missing_members = list(set(members) - set(obj_in_have['members'])) + for m in missing_members: + commands.extend(cmd) + commands.append('interface {0}'.format(m)) + commands.append('channel-group {0} mode {1}'.format(group, mode)) + + superfluous_members = list(set(obj_in_have['members']) - set(members)) + for m in superfluous_members: + commands.extend(cmd) + commands.append('interface {0}'.format(m)) + commands.append('no channel-group') + + if purge: + for h in have: + obj_in_want = search_obj_in_list(h['group'], want) + if not obj_in_want: + commands.append('no interface port-channel {0}'.format(h['group'])) + + return commands + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + d = item.copy() + d['group'] = str(d['group']) + + obj.append(d) + else: + obj.append({ + 'group': str(module.params['group']), + 'mode': module.params['mode'], + 'members': module.params['members'], + 'state': module.params['state'] + }) + + return obj + + +def parse_mode(module, config, group, member): + mode = None + netcfg = CustomNetworkConfig(indent=1, contents=config) + parents = ['interface {0}'.format(member)] + body = netcfg.get_section(parents) + + match_int = re.findall(r'interface {0}\n'.format(member), body, re.M) + if match_int: + match = re.search(r'channel-group {0} mode (\S+)'.format(group), + body, re.M) + if match: + mode = match.group(1) + + return mode + + +def parse_members(module, config, group): + members = [] + + for line in config.strip().split('!'): + l = line.strip() + if l.startswith('interface'): + match_group = re.findall(r'channel-group {0} mode'.format(group), l, re.M) + if match_group: + match = re.search(r'interface (\S+)', l, re.M) + if match: + members.append(match.group(1)) + + return members + + +def get_channel(module, config, group): + match = re.findall(r'^interface (\S+)', config, re.M) + + if not match: + return {} + + channel = {} + for item in set(match): + member = item + channel['mode'] = parse_mode(module, config, group, member) + channel['members'] = parse_members(module, config, group) + + return channel + + +def map_config_to_obj(module): + objs = list() + config = get_config(module) + + for line in config.split('\n'): + l = line.strip() + match = re.search(r'interface port-channel(\S+)', l, re.M) + if match: + obj = {} + group = match.group(1) + obj['group'] = group + obj.update(get_channel(module, config, group)) + objs.append(obj) + + return objs + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + group=dict(type='int'), + mode=dict(choices=['active', 'on', 'passive']), + members=dict(type='list'), + state=dict(default='present', + choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['group'] = dict(required=True) + + required_one_of = [['group', 'aggregate']] + required_together = [['members', 'mode']] + mutually_exclusive = [['group', 'aggregate']] + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec, + required_together=required_together), + purge=dict(default=False, type='bool') + ) + + argument_spec.update(element_spec) + argument_spec.update(cnos_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + required_together=required_together, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_lldp.py b/plugins/modules/network/cnos/cnos_lldp.py new file mode 100644 index 0000000000..d9853de516 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_lldp.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2019 Lenovo. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on Link Aggregation with Lenovo Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_lldp +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage LLDP configuration on Lenovo CNOS network devices. +description: + - This module provides declarative management of LLDP service + on Lenovc CNOS network devices. +notes: + - Tested against CNOS 10.9.1 +options: + state: + description: + - State of the LLDP configuration. If value is I(present) lldp will be + enabled else if it is I(absent) it will be disabled. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Enable LLDP service + cnos_lldp: + state: present + +- name: Disable LLDP service + cnos_lldp: + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to + manage the device. + type: list + sample: + - lldp timer 1024 + - lldp trap-interval 330 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import debugOutput, run_commands +from ansible.module_utils.connection import exec_command + + +def get_ethernet_range(module): + output = run_commands(module, ['show interface brief'])[0].split('\n') + maxport = None + last_interface = None + for line in output: + if line.startswith('Ethernet1/'): + last_interface = line.split(' ')[0] + if last_interface is not None: + eths = last_interface.split('/') + maxport = eths[1] + return maxport + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + state=dict(default='present', + choices=['present', 'absent']) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + warnings = list() + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + maxport = get_ethernet_range(module) + commands = [] + prime_cmd = 'interface ethernet 1/1-' + maxport + + if module.params['state'] == 'absent': + commands.append(prime_cmd) + commands.append('no lldp receive') + commands.append('no lldp transmit') + commands.append('exit') + commands.append('interface mgmt 0') + commands.append('no lldp receive') + commands.append('no lldp transmit') + commands.append('exit') + elif module.params['state'] == 'present': + commands.append(prime_cmd) + commands.append('lldp receive') + commands.append('lldp transmit') + commands.append('exit') + commands.append('interface mgmt 0') + commands.append('lldp receive') + commands.append('lldp transmit') + commands.append('exit') + + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_logging.py b/plugins/modules/network/cnos/cnos_logging.py new file mode 100644 index 0000000000..e78174b4e6 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_logging.py @@ -0,0 +1,425 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2019 Lenovo, Inc. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on Link Aggregation with Lenovo Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_logging +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage logging on network devices +description: + - This module provides declarative management of logging + on Cisco Cnos devices. +notes: + - Tested against CNOS 10.9.1 +options: + dest: + description: + - Destination of the logs. Lenovo uses the term server instead of host in + its CLI. + choices: ['server', 'console', 'monitor', 'logfile'] + name: + description: + - If value of C(dest) is I(file) it indicates file-name + and for I(server) indicates the server name to be notified. + size: + description: + - Size of buffer. The acceptable value is in range from 4096 to + 4294967295 bytes. + default: 10485760 + facility: + description: + - Set logging facility. This is applicable only for server logging + level: + description: + - Set logging severity levels. 0-emerg;1-alert;2-crit;3-err;4-warn; + 5-notif;6-inform;7-debug + default: 5 + aggregate: + description: List of logging definitions. + state: + description: + - State of the logging configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: configure server logging + cnos_logging: + dest: server + name: 10.241.107.224 + facility: local7 + state: present + +- name: remove server logging configuration + cnos_logging: + dest: server + name: 10.241.107.224 + state: absent + +- name: configure console logging level and facility + cnos_logging: + dest: console + level: 7 + state: present + +- name: configure buffer size + cnos_logging: + dest: logfile + level: 5 + name: testfile + size: 5000 + +- name: Configure logging using aggregate + cnos_logging: + aggregate: + - { dest: console, level: 6 } + - { dest: logfile, size: 9000 } + +- name: remove logging using aggregate + cnos_logging: + aggregate: + - { dest: console, level: 6 } + - { dest: logfile, name: anil, size: 9000 } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - logging console 7 + - logging server 10.241.107.224 +""" + +import re + +from copy import deepcopy +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_capabilities +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import check_args +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec + + +def validate_size(value, module): + if value: + if not int(4096) <= int(value) <= int(4294967295): + module.fail_json(msg='size must be between 4096 and 4294967295') + else: + return value + + +def map_obj_to_commands(updates, module): + dest_group = ('console', 'monitor', 'logfile', 'server') + commands = list() + want, have = updates + for w in want: + dest = w['dest'] + name = w['name'] + size = w['size'] + facility = w['facility'] + level = w['level'] + state = w['state'] + del w['state'] + + if state == 'absent': + if dest: + + if dest == 'server': + commands.append('no logging server {0}'.format(name)) + + elif dest in dest_group: + commands.append('no logging {0}'.format(dest)) + + else: + module.fail_json(msg='dest must be among console, monitor, logfile, server') + + if state == 'present' and w not in have: + if dest == 'server': + cmd_str = 'logging server {0}'.format(name) + if level is not None and level > 0 and level < 8: + cmd_str = cmd_str + ' ' + level + if facility is not None: + cmd_str = cmd_str + ' facility ' + facility + commands.append(cmd_str) + + elif dest == 'logfile' and size: + present = False + + for entry in have: + if entry['dest'] == 'logfile' and entry['size'] == size and entry['level'] == level: + present = True + + if not present: + cmd_str = 'logging logfile ' + if name is not None: + cmd_str = cmd_str + name + if level and level != '7': + cmd_str = cmd_str + ' ' + level + else: + cmd_str = cmd_str + ' 7' + if size is not None: + cmd_str = cmd_str + ' size ' + size + commands.append(cmd_str) + else: + module.fail_json(msg='Name of the logfile is a mandatory parameter') + + else: + if dest: + dest_cmd = 'logging {0}'.format(dest) + if level: + dest_cmd += ' {0}'.format(level) + commands.append(dest_cmd) + return commands + + +def parse_facility(line, dest): + facility = None + if dest == 'server': + result = line.split() + i = 0 + for x in result: + if x == 'facility': + return result[i + 1] + i = i + 1 + return facility + + +def parse_size(line, dest): + size = None + if dest == 'logfile': + if 'logging logfile' in line: + result = line.split() + i = 0 + for x in result: + if x == 'size': + return result[i + 1] + i = i + 1 + return '10485760' + return size + + +def parse_name(line, dest): + name = None + if dest == 'server': + if 'logging server' in line: + result = line.split() + i = 0 + for x in result: + if x == 'server': + name = result[i + 1] + elif dest == 'logfile': + if 'logging logfile' in line: + result = line.split() + i = 0 + for x in result: + if x == 'logfile': + name = result[i + 1] + else: + name = None + return name + + +def parse_level(line, dest): + level_group = ('0', '1', '2', '3', '4', '5', '6', '7') + level = '7' + if dest == 'server': + if 'logging server' in line: + result = line.split() + if(len(result) > 3): + if result[3].isdigit(): + level = result[3] + else: + if dest == 'logfile': + if 'logging logfile' in line: + result = line.split() + if result[3].isdigit(): + level = result[3] + else: + match = re.search(r'logging {0} (\S+)'.format(dest), line, re.M) + + return level + + +def map_config_to_obj(module): + obj = [] + dest_group = ('console', 'server', 'monitor', 'logfile') + data = get_config(module, flags=['| include logging']) + index = 0 + for line in data.split('\n'): + logs = line.split() + index = len(logs) + if index == 0 or index == 1: + continue + if logs[0] != 'logging': + continue + if logs[1] == 'monitor' or logs[1] == 'console': + obj.append({'dest': logs[1], 'level': logs[2]}) + elif logs[1] == 'logfile': + level = '5' + if index > 3 and logs[3].isdigit(): + level = logs[3] + size = '10485760' + if len(logs) > 4: + size = logs[5] + obj.append({'dest': logs[1], 'name': logs[2], 'size': size, 'level': level}) + elif logs[1] == 'server': + level = '5' + facility = None + + if index > 3 and logs[3].isdigit(): + level = logs[3] + if index > 3 and logs[3] == 'facility': + facility = logs[4] + if index > 4 and logs[4] == 'facility': + facility = logs[5] + obj.append({'dest': logs[1], 'name': logs[2], 'facility': facility, 'level': level}) + else: + continue + return obj + + +def map_params_to_obj(module, required_if=None): + obj = [] + aggregate = module.params.get('aggregate') + + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + module._check_required_if(required_if, item) + + d = item.copy() + if d['dest'] != 'server' and d['dest'] != 'logfile': + d['name'] = None + + if d['dest'] == 'logfile': + if 'size' in d: + d['size'] = str(validate_size(d['size'], module)) + elif 'size' not in d: + d['size'] = str(10485760) + else: + pass + + if d['dest'] != 'logfile': + d['size'] = None + + obj.append(d) + + else: + if module.params['dest'] != 'server' and module.params['dest'] != 'logfile': + module.params['name'] = None + + if module.params['dest'] == 'logfile': + if not module.params['size']: + module.params['size'] = str(10485760) + else: + module.params['size'] = None + + if module.params['size'] is None: + obj.append({ + 'dest': module.params['dest'], + 'name': module.params['name'], + 'size': module.params['size'], + 'facility': module.params['facility'], + 'level': module.params['level'], + 'state': module.params['state'] + }) + + else: + obj.append({ + 'dest': module.params['dest'], + 'name': module.params['name'], + 'size': str(validate_size(module.params['size'], module)), + 'facility': module.params['facility'], + 'level': module.params['level'], + 'state': module.params['state'] + }) + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + dest=dict(type='str', + choices=['server', 'console', 'monitor', 'logfile']), + name=dict(type='str'), + size=dict(type='int', default=10485760), + facility=dict(type='str'), + level=dict(type='str', default='5'), + state=dict(default='present', choices=['present', 'absent']), + ) + + aggregate_spec = deepcopy(element_spec) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + + required_if = [('dest', 'server', ['name'])] + + module = AnsibleModule(argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True) + warnings = list() + check_args(module, warnings) + + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module, required_if=required_if) + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_reload.py b/plugins/modules/network/cnos/cnos_reload.py new file mode 100644 index 0000000000..0d456e9028 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_reload.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to reload Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_reload +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Perform switch restart on devices running Lenovo CNOS +description: + - This module allows you to restart the switch using the current startup + configuration. The module is usually invoked after the running + configuration has been saved over the startup configuration. + This module uses SSH to manage network device configuration. + The results of the operation can be viewed in results directory. +extends_documentation_fragment: +- community.general.cnos + +options: {} + +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_reload. These are + written in the main.yml file of the tasks directory. +--- +- name: Test Reload + cnos_reload: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_reload_{{ inventory_hostname }}_output.txt" +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Device is Reloading. Please wait..." +''' + +import sys +import time +import socket +import array +import json +import time +import re +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True),), + supports_check_mode=False) + + command = 'reload' + outputfile = module.params['outputfile'] + output = '' + cmd = [{'command': command, 'prompt': 'reboot system? (y/n): ', + 'answer': 'y'}] + output = output + str(cnos.run_cnos_commands(module, cmd)) + + # Save it into the file + file = open(outputfile, "a") + file.write(output) + file.close() + + errorMsg = cnos.checkOutputForError(output) + if(errorMsg in "Device Response Timed out"): + module.exit_json(changed=True, + msg="Device is Reloading. Please wait...") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_rollback.py b/plugins/modules/network/cnos/cnos_rollback.py new file mode 100644 index 0000000000..f8a2e1179c --- /dev/null +++ b/plugins/modules/network/cnos/cnos_rollback.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to Rollback Config back to Lenovo Switches +# +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_rollback +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Roll back the running or startup configuration from a remote + server on devices running Lenovo CNOS +description: + - This module allows you to work with switch configurations. It provides a + way to roll back configurations of a switch from a remote server. This is + achieved by using startup or running configurations of the target device + that were previously backed up to a remote server using FTP, SFTP, TFTP, + or SCP. The first step is to create a directory from where the remote + server can be reached. The next step is to provide the full file path of + he backup configuration's location. Authentication details required by the + remote server must be provided as well. + By default, this method overwrites the switch's configuration file with + the newly downloaded file. This module uses SSH to manage network device + configuration. The results of the operation will be placed in a directory + named 'results' that must be created by the user in their local directory + to where the playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: + configType: + description: + - This refers to the type of configuration which will be used for + the rolling back process. The choices are the running or startup + configurations. There is no default value, so it will result + in an error if the input is incorrect. + required: Yes + default: Null + choices: [running-config, startup-config] + protocol: + description: + - This refers to the protocol used by the network device to + interact with the remote server from where to download the backup + configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other + protocols will result in error. If this parameter is not + specified, there is no default value to be used. + required: Yes + default: Null + choices: [SFTP, SCP, FTP, TFTP] + rcserverip: + description: + - This specifies the IP Address of the remote server from where the + backup configuration will be downloaded. + required: Yes + default: Null + rcpath: + description: + - This specifies the full file path of the configuration file + located on the remote server. In case the relative path is used as + the variable value, the root folder for the user of the server + needs to be specified. + required: Yes + default: Null + serverusername: + description: + - Specify username for the server relating to the protocol used. + required: Yes + default: Null + serverpassword: + description: + - Specify password for the server relating to the protocol used. + required: Yes + default: Null +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_rollback. + These are written in the main.yml file of the tasks directory. +--- + +- name: Test Rollback of config - Running config + cnos_rolback: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt" + configType: running-config + protocol: "sftp" + serverip: "10.241.106.118" + rcpath: "/root/cnos/G8272-running-config.txt" + serverusername: "root" + serverpassword: "root123" + +- name: Test Rollback of config - Startup config + cnos_rolback: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt" + configType: startup-config + protocol: "sftp" + serverip: "10.241.106.118" + rcpath: "/root/cnos/G8272-startup-config.txt" + serverusername: "root" + serverpassword: "root123" + +- name: Test Rollback of config - Running config - TFTP + cnos_rolback: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt" + configType: running-config + protocol: "tftp" + serverip: "10.241.106.118" + rcpath: "/anil/G8272-running-config.txt" + serverusername: "root" + serverpassword: "root123" + +- name: Test Rollback of config - Startup config - TFTP + cnos_rolback: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt" + configType: startup-config + protocol: "tftp" + serverip: "10.241.106.118" + rcpath: "/anil/G8272-startup-config.txt" + serverusername: "root" + serverpassword: "root123" + +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Config file transferred to Device" +''' + +import sys +import time +import socket +import array +import json +import time +import re +import os +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +# Utility Method to rollback the running config or start up config +# This method supports only SCP or SFTP or FTP or TFTP +def doConfigRollBack(module, prompt, answer): + host = module.params['host'] + server = module.params['serverip'] + username = module.params['serverusername'] + password = module.params['serverpassword'] + protocol = module.params['protocol'].lower() + rcPath = module.params['rcpath'] + configType = module.params['configType'] + confPath = rcPath + retVal = '' + + command = "copy " + protocol + " " + protocol + "://" + command = command + username + "@" + server + "/" + confPath + command = command + " " + configType + " vrf management\n" + cnos.debugOutput(command + "\n") + # cnos.checkForFirstTimeAccess(module, command, 'yes/no', 'yes') + cmd = [] + if(protocol == "scp"): + scp_cmd1 = [{'command': command, 'prompt': 'timeout:', 'answer': '0'}] + scp_cmd2 = [{'command': '\n', 'prompt': 'Password:', + 'answer': password}] + cmd.extend(scp_cmd1) + cmd.extend(scp_cmd2) + if(configType == 'startup-config'): + scp_cmd3 = [{'command': 'y', 'prompt': None, 'answer': None}] + cmd.extend(scp_cmd3) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "sftp"): + sftp_cmd = [{'command': command, 'prompt': 'Password:', + 'answer': password}] + cmd.extend(sftp_cmd) + # cnos.debugOutput(configType + "\n") + if(configType == 'startup-config'): + sftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}] + cmd.extend(sftp_cmd2) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "ftp"): + ftp_cmd = [{'command': command, 'prompt': 'Password:', + 'answer': password}] + cmd.extend(ftp_cmd) + if(configType == 'startup-config'): + ftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}] + cmd.extend(ftp_cmd2) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + elif(protocol == "tftp"): + command = "copy " + protocol + " " + protocol + command = command + "://" + server + "/" + confPath + command = command + " " + configType + " vrf management\n" + cnos.debugOutput(command) + tftp_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(tftp_cmd) + if(configType == 'startup-config'): + tftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}] + cmd.extend(tftp_cmd2) + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + else: + return "Error-110" + + return retVal +# EOM + + +def main(): + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True), + configType=dict(required=True), + protocol=dict(required=True), + serverip=dict(required=True), + rcpath=dict(required=True), + serverusername=dict(required=False), + serverpassword=dict(required=False, no_log=True),), + supports_check_mode=False) + + outputfile = module.params['outputfile'] + protocol = module.params['protocol'].lower() + output = '' + if protocol in ('tftp', 'ftp', 'sftp', 'scp'): + transfer_status = doConfigRollBack(module, None, None) + else: + transfer_status = 'Invalid Protocol option' + output = output + "\n Config Transfer status \n" + transfer_status + + # Save it into the file + if '/' in outputfile: + path = outputfile.rsplit('/', 1) + # cnos.debugOutput(path[0]) + if not os.path.exists(path[0]): + os.makedirs(path[0]) + file = open(outputfile, "a") + file.write(output) + file.close() + + # need to add logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, msg="Config file transferred to Device") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_save.py b/plugins/modules/network/cnos/cnos_save.py new file mode 100644 index 0000000000..2d03be1fb8 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_save.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to save running config to start up config to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_save +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Save the running configuration as the startup configuration + on devices running Lenovo CNOS +description: + - This module allows you to copy the running configuration of a switch over + its startup configuration. It is recommended to use this module shortly + after any major configuration changes so they persist after a switch + restart. This module uses SSH to manage network device configuration. + The results of the operation will be placed in a directory named 'results' + that must be created by the user in their local directory to where the + playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: {} + +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_save. These are + written in the main.yml file of the tasks directory. +--- +- name: Test Save + cnos_save: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_save_{{ inventory_hostname }}_output.txt" +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Switch Running Config is Saved to Startup Config" +''' + +import sys +import time +import socket +import array +import json +import time +import re +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True),), + supports_check_mode=False) + + command = 'write memory' + outputfile = module.params['outputfile'] + output = '' + cmd = [{'command': command, 'prompt': None, 'answer': None}] + output = output + str(cnos.run_cnos_commands(module, cmd)) + + # Save it into the file + file = open(outputfile, "a") + file.write(output) + file.close() + + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, + msg="Switch Running Config is Saved to Startup Config ") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_showrun.py b/plugins/modules/network/cnos/cnos_showrun.py new file mode 100644 index 0000000000..3f161f5028 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_showrun.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to display running config of Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_showrun +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Collect the current running configuration on devices running on CNOS +description: + - This module allows you to view the switch running configuration. It + executes the display running-config CLI command on a switch and returns a + file containing the current running configuration of the target network + device. This module uses SSH to manage network device configuration. + The results of the operation will be placed in a directory named 'results' + that must be created by the user in their local directory to where the + playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: {} + +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_showrun. These are + written in the main.yml file of the tasks directory. +--- +- name: Run show running-config + cnos_showrun: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + outputfile: "./results/test_showrun_{{ inventory_hostname }}_output.txt" + +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Running Configuration saved in file" +''' + +import sys +import time +import socket +import array +import json +import time +import re +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True),), + supports_check_mode=False) + + command = 'show running-config' + outputfile = module.params['outputfile'] + output = '' + cmd = [{'command': command, 'prompt': None, 'answer': None}] + output = output + str(cnos.run_cnos_commands(module, cmd)) + # Save it into the file + file = open(outputfile, "a") + file.write(output) + file.close() + + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, + msg="Running Configuration saved in file ") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_static_route.py b/plugins/modules/network/cnos/cnos_static_route.py new file mode 100644 index 0000000000..0195831c5b --- /dev/null +++ b/plugins/modules/network/cnos/cnos_static_route.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2019 Lenovo, Inc. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on Link Aggregation with Lenovo Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_static_route +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage static IP routes on Lenovo CNOS network devices +description: + - This module provides declarative management of static + IP routes on Lenovo CNOS network devices. +notes: + - Tested against CNOS 10.10.1 +options: + prefix: + description: + - Network prefix of the static route. + mask: + description: + - Network prefix mask of the static route. + next_hop: + description: + - Next hop IP of the static route. + interface: + description: + - Interface of the static route. + description: + description: + - Name of the static route + aliases: ['description'] + admin_distance: + description: + - Admin distance of the static route. + default: 1 + tag: + description: + - Set tag of the static route. + aggregate: + description: List of static route definitions. + state: + description: + - State of the static route configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: configure static route + cnos_static_route: + prefix: 10.241.107.0 + mask: 255.255.255.0 + next_hop: 10.241.106.1 + +- name: configure ultimate route with name and tag + cnos_static_route: + prefix: 10.241.107.0 + mask: 255.255.255.0 + interface: Ethernet1/13 + description: hello world + tag: 100 + +- name: remove configuration + cnos_static_route: + prefix: 10.241.107.0 + mask: 255.255.255.0 + next_hop: 10.241.106.0 + state: absent + +- name: Add static route aggregates + cnos_static_route: + aggregate: + - { prefix: 10.241.107.0, mask: 255.255.255.0, next_hop: 10.241.105.0 } + - { prefix: 10.241.106.0, mask: 255.255.255.0, next_hop: 10.241.104.0 } + +- name: Remove static route aggregates + cnos_static_route: + aggregate: + - { prefix: 10.241.107.0, mask: 255.255.255.0, next_hop: 10.241.105.0 } + - { prefix: 10.241.106.0, mask: 255.255.255.0, next_hop: 10.241.104.0 } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - ip route 10.241.107.0 255.255.255.0 10.241.106.0 +""" +from copy import deepcopy +from re import findall +from ansible_collections.ansible.netcommon.plugins.module_utils.compat import ipaddress +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import check_args +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec + + +def map_obj_to_commands(want, have): + commands = list() + + for w in want: + state = w['state'] + command = 'ip route' + prefix = w['prefix'] + mask = w['mask'] + command = ' '.join((command, prefix, mask)) + + for key in ['interface', 'next_hop', 'admin_distance', 'tag', + 'description']: + if w.get(key): + if key == 'description' and len(w.get(key).split()) > 1: + # name with multiple words needs to be quoted + command = ' '.join((command, key, '"%s"' % w.get(key))) + elif key in ('description', 'tag'): + command = ' '.join((command, key, w.get(key))) + else: + command = ' '.join((command, w.get(key))) + + if state == 'absent': + commands.append('no %s' % command) + elif state == 'present': + commands.append(command) + + return commands + + +def map_config_to_obj(module): + obj = [] + + out = get_config(module, flags='| include ip route') + for line in out.splitlines(): + # Split by whitespace but do not split quotes, needed for description + splitted_line = findall(r'[^"\s]\S*|".+?"', line) + route = {} + prefix_with_mask = splitted_line[2] + prefix = None + mask = None + iface = None + nhop = None + if validate_ip_address(prefix_with_mask) is True: + my_net = ipaddress.ip_network(prefix_with_mask) + prefix = str(my_net.network_address) + mask = str(my_net.netmask) + route.update({'prefix': prefix, + 'mask': mask, 'admin_distance': '1'}) + if splitted_line[3] is not None: + if validate_ip_address(splitted_line[3]) is False: + iface = str(splitted_line[3]) + route.update(interface=iface) + if validate_ip_address(splitted_line[4]) is True: + nhop = str(splitted_line[4]) + route.update(next_hop=nhop) + if splitted_line[5].isdigit(): + route.update(admin_distance=str(splitted_line[5])) + elif splitted_line[4].isdigit(): + route.update(admin_distance=str(splitted_line[4])) + else: + if splitted_line[6] is not None and splitted_line[6].isdigit(): + route.update(admin_distance=str(splitted_line[6])) + else: + nhop = str(splitted_line[3]) + route.update(next_hop=nhop) + if splitted_line[4].isdigit(): + route.update(admin_distance=str(splitted_line[4])) + + index = 0 + for word in splitted_line: + if word in ('tag', 'description'): + route.update(word=splitted_line[index + 1]) + index = index + 1 + obj.append(route) + + return obj + + +def map_params_to_obj(module, required_together=None): + keys = ['prefix', 'mask', 'state', 'next_hop', 'interface', 'description', + 'admin_distance', 'tag'] + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + route = item.copy() + for key in keys: + if route.get(key) is None: + route[key] = module.params.get(key) + + route = dict((k, v) for k, v in route.items() if v is not None) + module._check_required_together(required_together, route) + obj.append(route) + else: + module._check_required_together(required_together, module.params) + route = dict() + for key in keys: + if module.params.get(key) is not None: + route[key] = module.params.get(key) + obj.append(route) + + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + prefix=dict(type='str'), + mask=dict(type='str'), + next_hop=dict(type='str'), + interface=dict(type='str'), + description=dict(type='str'), + admin_distance=dict(type='str', default='1'), + tag=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['prefix'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + argument_spec.update(element_spec) + + required_one_of = [['aggregate', 'prefix']] + required_together = [['prefix', 'mask']] + mutually_exclusive = [['aggregate', 'prefix']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + result = {'changed': False} + if warnings: + result['warnings'] = warnings + want = map_params_to_obj(module, required_together=required_together) + have = map_config_to_obj(module) + + commands = map_obj_to_commands(want, have) + result['commands'] = commands + if commands: + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_system.py b/plugins/modules/network/cnos/cnos_system.py new file mode 100644 index 0000000000..6c72c44a43 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_system.py @@ -0,0 +1,387 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2019 Lenovo. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on System Configuration with Lenovo Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_system +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage the system attributes on Lenovo CNOS devices +description: + - This module provides declarative management of node system attributes + on Lenovo CNOS devices. It provides an option to configure host system + parameters or remove those parameters from the device active + configuration. +options: + hostname: + description: + - Configure the device hostname parameter. This option takes an + ASCII string value or keyword 'default' + domain_name: + description: + - Configures the default domain + name suffix to be used when referencing this node by its + FQDN. This argument accepts either a list of domain names or + a list of dicts that configure the domain name and VRF name or + keyword 'default'. See examples. + lookup_enabled: + description: + - Administrative control for enabling or disabling DNS lookups. + When this argument is set to True, lookups are performed and + when it is set to False, lookups are not performed. + type: bool + domain_search: + description: + - Configures a list of domain + name suffixes to search when performing DNS name resolution. + This argument accepts either a list of domain names or + a list of dicts that configure the domain name and VRF name or + keyword 'default'. See examples. + name_servers: + description: + - List of DNS name servers by IP address to use to perform name resolution + lookups. This argument accepts either a list of DNS servers or + a list of hashes that configure the name server and VRF name or + keyword 'default'. See examples. + lookup_source: + description: + - Provides one or more source interfaces to use for performing DNS + lookups. The interface must be a valid interface configured. + on the device. + state: + description: + - State of the configuration + values in the device's current active configuration. When set + to I(present), the values should be configured in the device active + configuration and when set to I(absent) the values should not be + in the device active configuration + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: configure hostname and domain-name + cnos_system: + hostname: cnos01 + domain_name: test.example.com + +- name: remove configuration + cnos_system: + state: absent + +- name: configure name servers + cnos_system: + name_servers: + - 8.8.8.8 + - 8.8.4.4 + +- name: configure DNS Lookup sources + cnos_system: + lookup_source: MgmtEth0/0/CPU0/0 + lookup_enabled: yes + +- name: configure name servers with VRF support + nxos_system: + name_servers: + - { server: 8.8.8.8, vrf: mgmt } + - { server: 8.8.4.4, vrf: mgmt } +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - hostname cnos01 + - ip domain-name test.example.com vrf default +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import check_args, debugOutput +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList + +_CONFIGURED_VRFS = None + + +def map_obj_to_commands(want, have, module): + commands = list() + state = module.params['state'] + + def needs_update(x): + return want.get(x) and (want.get(x) != have.get(x)) + + def difference(x, y, z): + return [item for item in x[z] if item not in y[z]] + + if state == 'absent': + if have['hostname']: + commands.append('no hostname') + + for item in have['domain_name']: + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip domain-name {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + + for item in have['domain_search']: + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip domain-list {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + + for item in have['name_servers']: + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip name-server {0} vrf {1}'.format(item['server'], my_vrf) + commands.append(cmd) + + if state == 'present': + if needs_update('hostname'): + if want['hostname'] == 'default': + if have['hostname']: + commands.append('no hostname') + else: + commands.append('hostname %s' % want['hostname']) + + if want.get('lookup_enabled') is not None: + if have.get('lookup_enabled') != want.get('lookup_enabled'): + cmd = 'ip domain-lookup' + if want['lookup_enabled'] is False: + cmd = 'no %s' % cmd + commands.append(cmd) + + if want['domain_name']: + if want.get('domain_name')[0]['name'] == 'default': + if have['domain_name']: + for item in have['domain_name']: + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip domain-name {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + else: + for item in difference(have, want, 'domain_name'): + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip domain-name {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + for item in difference(want, have, 'domain_name'): + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'ip domain-name {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + + if want['domain_search']: + if want.get('domain_search')[0]['name'] == 'default': + if have['domain_search']: + for item in have['domain_search']: + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip domain-list {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + else: + for item in difference(have, want, 'domain_search'): + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip domain-list {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + for item in difference(want, have, 'domain_search'): + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'ip domain-list {0} vrf {1}'.format(item['name'], my_vrf) + commands.append(cmd) + + if want['name_servers']: + if want.get('name_servers')[0]['server'] == 'default': + if have['name_servers']: + for item in have['name_servers']: + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip name-server {0} vrf {1}'.format(item['server'], my_vrf) + commands.append(cmd) + else: + for item in difference(have, want, 'name_servers'): + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'no ip name-server {0} vrf {1}'.format(item['server'], my_vrf) + commands.append(cmd) + for item in difference(want, have, 'name_servers'): + my_vrf = 'default' + if item['vrf'] is not None: + my_vrf = item['vrf'] + cmd = 'ip name-server {0} vrf {1}'.format(item['server'], my_vrf) + commands.append(cmd) + + return commands + + +def parse_hostname(config): + match = re.search(r'^hostname (\S+)', config, re.M) + if match: + return match.group(1) + + +def parse_domain_name(config): + objects = list() + myconf = config.splitlines() + for line in myconf: + if 'ip domain-name' in line: + datas = line.split() + objects.append({'name': datas[2], 'vrf': datas[4]}) + + return objects + + +def parse_domain_search(config): + objects = list() + myconf = config.splitlines() + for line in myconf: + if 'ip domain-list' in line: + datas = line.split() + objects.append({'name': datas[2], 'vrf': datas[4]}) + + return objects + + +def parse_name_servers(config): + objects = list() + myconf = config.splitlines() + for line in myconf: + if 'ip name-server' in line: + datas = line.split() + objects.append({'server': datas[2], 'vrf': datas[4]}) + + return objects + + +def map_config_to_obj(module): + config = get_config(module) + configobj = NetworkConfig(indent=2, contents=config) + + return { + 'hostname': parse_hostname(config), + 'lookup_enabled': 'no ip domain-lookup' not in config, + 'domain_name': parse_domain_name(config), + 'domain_search': parse_domain_search(config), + 'name_servers': parse_name_servers(config), + } + + +def map_params_to_obj(module): + obj = { + 'hostname': module.params['hostname'], + 'lookup_enabled': module.params['lookup_enabled'], + } + + domain_name = ComplexList(dict( + name=dict(key=True), + vrf=dict() + ), module) + + domain_search = ComplexList(dict( + name=dict(key=True), + vrf=dict() + ), module) + + name_servers = ComplexList(dict( + server=dict(key=True), + vrf=dict() + ), module) + + for arg, cast in [('domain_name', domain_name), + ('domain_search', domain_search), + ('name_servers', name_servers)]: + if module.params[arg] is not None: + obj[arg] = cast(module.params[arg]) + else: + obj[arg] = None + + return obj + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + hostname=dict(), + lookup_enabled=dict(type='bool'), + + # { name: , vrf: } + domain_name=dict(type='list'), + + # {name: , vrf: } + domain_search=dict(type='list'), + + # { server: ; vrf: } + name_servers=dict(type='list'), + + lookup_source=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands(want, have, module) + result['commands'] = commands + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_template.py b/plugins/modules/network/cnos/cnos_template.py new file mode 100644 index 0000000000..1e1bc2dd61 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_template.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send CLI templates to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_template +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage switch configuration using templates on devices running Lenovo CNOS +description: + - This module allows you to work with the running configuration of a switch. It provides a way + to execute a set of CNOS commands on a switch by evaluating the current running configuration + and executing the commands only if the specific settings have not been already configured. + The configuration source can be a set of commands or a template written in the Jinja2 templating language. + This module uses SSH to manage network device configuration. + The results of the operation will be placed in a directory named 'results' + that must be created by the user in their local directory to where the playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: + commandfile: + description: + - This specifies the path to the CNOS command file which needs to be applied. This usually + comes from the commands folder. Generally this file is the output of the variables applied + on a template file. So this command is preceded by a template module. + Note The command file must contain the Ansible keyword {{ inventory_hostname }} in its + filename to ensure that the command file is unique for each switch and condition. + If this is omitted, the command file will be overwritten during iteration. For example, + commandfile=./commands/clos_leaf_bgp_{{ inventory_hostname }}_commands.txt + required: true + default: Null +''' +EXAMPLES = ''' +Tasks : The following are examples of using the module cnos_template. These are written in the main.yml file of the tasks directory. +--- +- name: Replace Config CLI command template with values + template: + src: demo_template.j2 + dest: "./commands/demo_template_{{ inventory_hostname }}_commands.txt" + vlanid1: 13 + slot_chassis_number1: "1/2" + portchannel_interface_number1: 100 + portchannel_mode1: "active" + +- name: Applying CLI commands on Switches + cnos_template: + deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" + commandfile: "./commands/demo_template_{{ inventory_hostname }}_commands.txt" + outputfile: "./results/demo_template_command_{{ inventory_hostname }}_output.txt" + +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Template Applied." +''' + +import sys +import time +import socket +import array +import json +import time +import re +import os +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + commandfile=dict(required=True), + outputfile=dict(required=True), + host=dict(required=False), + deviceType=dict(required=True), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True),), + supports_check_mode=False) + commandfile = module.params['commandfile'] + outputfile = module.params['outputfile'] + output = '' + + # Send commands one by one to the device + f = open(commandfile, "r") + cmd = [] + for line in f: + # Omit the comment lines in template file + if not line.startswith("#"): + command = line.strip() + inner_cmd = [{'command': command, 'prompt': None, 'answer': None}] + cmd.extend(inner_cmd) + # Write to memory + save_cmd = [{'command': 'save', 'prompt': None, 'answer': None}] + cmd.extend(save_cmd) + output = output + str(cnos.run_cnos_commands(module, cmd)) + # Write output to file + path = outputfile.rsplit('/', 1) + # cnos.debugOutput(path[0]) + if not os.path.exists(path[0]): + os.makedirs(path[0]) + file = open(outputfile, "a") + file.write(output) + file.close() + + # Logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, msg="Template Applied") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_user.py b/plugins/modules/network/cnos/cnos_user.py new file mode 100644 index 0000000000..98a4552c83 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_user.py @@ -0,0 +1,390 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2019 Lenovo. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on management of local users on Lenovo CNOS Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_user +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage the collection of local users on Lenovo CNOS devices +description: + - This module provides declarative management of the local usernames + configured on Lenovo CNOS devices. It allows playbooks to manage + either individual usernames or the collection of usernames in the + current running config. It also supports purging usernames from the + configuration that are not explicitly defined. +options: + aggregate: + description: + - The set of username objects to be configured on the remote + Lenovo CNOS device. The list entries can either be the username + or a hash of username and properties. This argument is mutually + exclusive with the C(name) argument. + aliases: ['users', 'collection'] + name: + description: + - The username to be configured on the remote Lenovo CNOS + device. This argument accepts a string value and is mutually + exclusive with the C(aggregate) argument. + configured_password: + description: + - The password to be configured on the network device. The + password needs to be provided in cleartext and it will be encrypted + on the device. + Please note that this option is not same as C(provider password). + update_password: + description: + - Since passwords are encrypted in the device running config, this + argument will instruct the module when to change the password. When + set to C(always), the password will always be updated in the device + and when set to C(on_create) the password will be updated only if + the username is created. + default: always + choices: ['on_create', 'always'] + role: + description: + - The C(role) argument configures the role for the username in the + device running configuration. The argument accepts a string value + defining the role name. This argument does not check if the role + has been configured on the device. + aliases: ['roles'] + sshkey: + description: + - The C(sshkey) argument defines the SSH public key to configure + for the username. This argument accepts a valid SSH key value. + purge: + description: + - The C(purge) argument instructs the module to consider the + resource definition absolute. It will remove any previously + configured usernames on the device with the exception of the + `admin` user which cannot be deleted per cnos constraints. + type: bool + default: 'no' + state: + description: + - The C(state) argument configures the state of the username definition + as it relates to the device operational configuration. When set + to I(present), the username(s) should be configured in the device active + configuration and when set to I(absent) the username(s) should not be + in the device active configuration + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: create a new user + cnos_user: + name: ansible + sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + state: present + +- name: remove all users except admin + cnos_user: + purge: yes + +- name: set multiple users role + aggregate: + - name: netop + - name: netend + role: network-operator + state: present +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - name ansible + - name ansible password password +start: + description: The time the job started + returned: always + type: str + sample: "2016-11-16 10:38:15.126146" +end: + description: The time the job ended + returned: always + type: str + sample: "2016-11-16 10:38:25.595612" +delta: + description: The time elapsed to perform all operations + returned: always + type: str + sample: "0:00:10.469466" +""" +import re + +from copy import deepcopy +from functools import partial + +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import run_commands, load_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_config +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types, iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import get_user_roles + + +def validate_roles(value, module): + for item in value: + if item not in get_user_roles(): + module.fail_json(msg='invalid role specified') + + +def map_obj_to_commands(updates, module): + commands = list() + state = module.params['state'] + update_password = module.params['update_password'] + + for update in updates: + want, have = update + + def needs_update(x): + return want.get(x) and (want.get(x) != have.get(x)) + + def add(x): + return commands.append('username %s %s' % (want['name'], x)) + + def remove(x): + return commands.append('no username %s %s' % (want['name'], x)) + + if want['state'] == 'absent': + commands.append('no username %s' % want['name']) + continue + + if want['state'] == 'present' and not have: + commands.append('username %s' % want['name']) + + if needs_update('configured_password'): + if update_password == 'always' or not have: + add('password %s' % want['configured_password']) + + if needs_update('sshkey'): + add('sshkey %s' % want['sshkey']) + + if want['roles']: + if have: + for item in set(have['roles']).difference(want['roles']): + remove('role %s' % item) + + for item in set(want['roles']).difference(have['roles']): + add('role %s' % item) + else: + for item in want['roles']: + add('role %s' % item) + + return commands + + +def parse_password(data): + if 'no password set' in data: + return None + return '' + + +def parse_roles(data): + roles = list() + if 'role:' in data: + items = data.split() + my_item = items[items.index('role:') + 1] + roles.append(my_item) + return roles + + +def parse_username(data): + name = data.split(' ', 1)[0] + username = name[1:] + return username + + +def parse_sshkey(data): + key = None + if 'sskkey:' in data: + items = data.split() + key = items[items.index('sshkey:') + 1] + return key + + +def map_config_to_obj(module): + out = run_commands(module, ['show user-account']) + data = out[0] + objects = list() + datum = data.split('User') + + for item in datum: + objects.append({ + 'name': parse_username(item), + 'configured_password': parse_password(item), + 'sshkey': parse_sshkey(item), + 'roles': parse_roles(item), + 'state': 'present' + }) + return objects + + +def get_param_value(key, item, module): + # if key doesn't exist in the item, get it from module.params + if not item.get(key): + value = module.params[key] + + # if key does exist, do a type check on it to validate it + else: + value_type = module.argument_spec[key].get('type', 'str') + type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type] + type_checker(item[key]) + value = item[key] + + return value + + +def map_params_to_obj(module): + aggregate = module.params['aggregate'] + if not aggregate: + if not module.params['name'] and module.params['purge']: + return list() + elif not module.params['name']: + module.fail_json(msg='username is required') + else: + collection = [{'name': module.params['name']}] + else: + collection = list() + for item in aggregate: + if not isinstance(item, dict): + collection.append({'name': item}) + elif 'name' not in item: + module.fail_json(msg='name is required') + else: + collection.append(item) + + objects = list() + + for item in collection: + get_value = partial(get_param_value, item=item, module=module) + item.update({ + 'configured_password': get_value('configured_password'), + 'sshkey': get_value('sshkey'), + 'roles': get_value('roles'), + 'state': get_value('state') + }) + + for key, value in iteritems(item): + if value: + # validate the param value (if validator func exists) + validator = globals().get('validate_%s' % key) + if all((value, validator)): + validator(value, module) + + objects.append(item) + + return objects + + +def update_objects(want, have): + updates = list() + for entry in want: + item = next((i for i in have if i['name'] == entry['name']), None) + if all((item is None, entry['state'] == 'present')): + updates.append((entry, {})) + elif item: + for key, value in iteritems(entry): + if value and value != item[key]: + updates.append((entry, item)) + return updates + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + name=dict(), + configured_password=dict(no_log=True), + update_password=dict(default='always', choices=['on_create', 'always']), + roles=dict(type='list', aliases=['role']), + sshkey=dict(), + state=dict(default='present', choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec, aliases=['collection', 'users']), + purge=dict(type='bool', default=False) + ) + + argument_spec.update(element_spec) + + mutually_exclusive = [('name', 'aggregate')] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + + result = {'changed': False} + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands(update_objects(want, have), module) + + if module.params['purge']: + want_users = [x['name'] for x in want] + have_users = [x['name'] for x in have] + for item in set(have_users).difference(want_users): + if item != 'admin': + if not item.strip(): + continue + item = item.replace("\\", "\\\\") + commands.append('no username %s' % item) + + result['commands'] = commands + + # the cnos cli prevents this by rule but still + if 'no username admin' in commands: + module.fail_json(msg='Cannot delete the `admin` account') + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_vlag.py b/plugins/modules/network/cnos/cnos_vlag.py new file mode 100644 index 0000000000..8568f7fb42 --- /dev/null +++ b/plugins/modules/network/cnos/cnos_vlag.py @@ -0,0 +1,446 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2017 Lenovo, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send VLAG commands to Lenovo Switches +# Lenovo Networking +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cnos_vlag +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage VLAG resources and attributes on devices running + Lenovo CNOS +description: + - This module allows you to work with virtual Link Aggregation Groups + (vLAG) related configurations. The operators used are overloaded to ensure + control over switch vLAG configurations. Apart from the regular device + connection related attributes, there are four vLAG arguments which are + overloaded variables that will perform further configurations. They are + vlagArg1, vlagArg2, vlagArg3, and vlagArg4. For more details on how to use + these arguments, see [Overloaded Variables]. + This module uses SSH to manage network device configuration. + The results of the operation will be placed in a directory named 'results' + that must be created by the user in their local directory to where the + playbook is run. +extends_documentation_fragment: +- community.general.cnos + +options: + vlagArg1: + description: + - This is an overloaded vlag first argument. Usage of this argument can + be found is the User Guide referenced above. + required: Yes + default: Null + choices: [enable, auto-recovery,config-consistency,isl,mac-address-table, + peer-gateway,priority,startup-delay,tier-id,vrrp,instance,hlthchk] + vlagArg2: + description: + - This is an overloaded vlag second argument. Usage of this argument can + be found is the User Guide referenced above. + required: No + default: Null + choices: [Interval in seconds,disable or strict,Port Aggregation Number, + VLAG priority,Delay time in seconds,VLAG tier-id value, + VLAG instance number,keepalive-attempts,keepalive-interval, + retry-interval,peer-ip] + vlagArg3: + description: + - This is an overloaded vlag third argument. Usage of this argument can + be found is the User Guide referenced above. + required: No + default: Null + choices: [enable or port-aggregation,Number of keepalive attempts, + Interval in seconds,Interval in seconds, + VLAG health check peer IP4 address] + vlagArg4: + description: + - This is an overloaded vlag fourth argument. Usage of this argument can + be found is the User Guide referenced above. + required: No + default: Null + choices: [Port Aggregation Number,default or management] + +''' +EXAMPLES = ''' + +Tasks : The following are examples of using the module cnos_vlag. These are + written in the main.yml file of the tasks directory. +--- +- name: Test Vlag - enable + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "enable" + +- name: Test Vlag - autorecovery + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "auto-recovery" + vlagArg2: 266 + +- name: Test Vlag - config-consistency + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "config-consistency" + vlagArg2: "strict" + +- name: Test Vlag - isl + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "isl" + vlagArg2: 23 + +- name: Test Vlag - mac-address-table + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "mac-address-table" + +- name: Test Vlag - peer-gateway + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "peer-gateway" + +- name: Test Vlag - priority + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "priority" + vlagArg2: 1313 + +- name: Test Vlag - startup-delay + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "startup-delay" + vlagArg2: 323 + +- name: Test Vlag - tier-id + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "tier-id" + vlagArg2: 313 + +- name: Test Vlag - vrrp + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "vrrp" + +- name: Test Vlag - instance + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "instance" + vlagArg2: 33 + vlagArg3: 333 + +- name: Test Vlag - instance2 + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "instance" + vlagArg2: "33" + +- name: Test Vlag - keepalive-attempts + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "hlthchk" + vlagArg2: "keepalive-attempts" + vlagArg3: 13 + +- name: Test Vlag - keepalive-interval + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "hlthchk" + vlagArg2: "keepalive-interval" + vlagArg3: 131 + +- name: Test Vlag - retry-interval + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "hlthchk" + vlagArg2: "retry-interval" + vlagArg3: 133 + +- name: Test Vlag - peer ip + cnos_vlag: + deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}" + outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt" + vlagArg1: "hlthchk" + vlagArg2: "peer-ip" + vlagArg3: "1.2.3.4" + +''' +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "vLAG configurations accomplished" +''' + +import sys +import time +import socket +import array +import json +import time +import re +try: + from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos + HAS_LIB = True +except Exception: + HAS_LIB = False + +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict + + +def vlagConfig(module, prompt, answer): + + retVal = '' + # vlag config command happens here. + command = 'vlag ' + + vlagArg1 = module.params['vlagArg1'] + vlagArg2 = module.params['vlagArg2'] + vlagArg3 = module.params['vlagArg3'] + vlagArg4 = module.params['vlagArg4'] + deviceType = module.params['deviceType'] + + if(vlagArg1 == "enable"): + # debugOutput("enable") + command = command + vlagArg1 + " " + + elif(vlagArg1 == "auto-recovery"): + # debugOutput("auto-recovery") + command = command + vlagArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "vlag_auto_recovery", vlagArg2) + if(value == "ok"): + command = command + vlagArg2 + else: + retVal = "Error-160" + return retVal + + elif(vlagArg1 == "config-consistency"): + # debugOutput("config-consistency") + command = command + vlagArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "vlag_config_consistency", vlagArg2) + if(value == "ok"): + command = command + vlagArg2 + else: + retVal = "Error-161" + return retVal + + elif(vlagArg1 == "isl"): + # debugOutput("isl") + command = command + vlagArg1 + " port-channel " + value = cnos.checkSanityofVariable( + deviceType, "vlag_port_aggregation", vlagArg2) + if(value == "ok"): + command = command + vlagArg2 + else: + retVal = "Error-162" + return retVal + + elif(vlagArg1 == "mac-address-table"): + # debugOutput("mac-address-table") + command = command + vlagArg1 + " refresh" + + elif(vlagArg1 == "peer-gateway"): + # debugOutput("peer-gateway") + command = command + vlagArg1 + " " + + elif(vlagArg1 == "priority"): + # debugOutput("priority") + command = command + vlagArg1 + " " + value = cnos.checkSanityofVariable(deviceType, "vlag_priority", + vlagArg2) + if(value == "ok"): + command = command + vlagArg2 + else: + retVal = "Error-163" + return retVal + + elif(vlagArg1 == "startup-delay"): + # debugOutput("startup-delay") + command = command + vlagArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "vlag_startup_delay", vlagArg2) + if(value == "ok"): + command = command + vlagArg2 + else: + retVal = "Error-164" + return retVal + + elif(vlagArg1 == "tier-id"): + # debugOutput("tier-id") + command = command + vlagArg1 + " " + value = cnos.checkSanityofVariable(deviceType, "vlag_tier_id", vlagArg2) + if(value == "ok"): + command = command + vlagArg2 + else: + retVal = "Error-165" + return retVal + + elif(vlagArg1 == "vrrp"): + # debugOutput("vrrp") + command = command + vlagArg1 + " active" + + elif(vlagArg1 == "instance"): + # debugOutput("instance") + command = command + vlagArg1 + " " + value = cnos.checkSanityofVariable(deviceType, "vlag_instance", + vlagArg2) + if(value == "ok"): + command = command + vlagArg2 + if(vlagArg3 is not None): + command = command + " port-channel " + value = cnos.checkSanityofVariable( + deviceType, "vlag_port_aggregation", vlagArg3) + if(value == "ok"): + command = command + vlagArg3 + else: + retVal = "Error-162" + return retVal + else: + command = command + " enable " + else: + retVal = "Error-166" + return retVal + + elif(vlagArg1 == "hlthchk"): + # debugOutput("hlthchk") + command = command + vlagArg1 + " " + value = cnos.checkSanityofVariable( + deviceType, "vlag_hlthchk_options", vlagArg2) + if(value == "ok"): + if(vlagArg2 == "keepalive-attempts"): + value = cnos.checkSanityofVariable( + deviceType, "vlag_keepalive_attempts", vlagArg3) + if(value == "ok"): + command = command + vlagArg2 + " " + vlagArg3 + else: + retVal = "Error-167" + return retVal + elif(vlagArg2 == "keepalive-interval"): + value = cnos.checkSanityofVariable( + deviceType, "vlag_keepalive_interval", vlagArg3) + if(value == "ok"): + command = command + vlagArg2 + " " + vlagArg3 + else: + retVal = "Error-168" + return retVal + elif(vlagArg2 == "retry-interval"): + value = cnos.checkSanityofVariable( + deviceType, "vlag_retry_interval", vlagArg3) + if(value == "ok"): + command = command + vlagArg2 + " " + vlagArg3 + else: + retVal = "Error-169" + return retVal + elif(vlagArg2 == "peer-ip"): + # Here I am not taking care of IPV6 option. + value = cnos.checkSanityofVariable( + deviceType, "vlag_peerip", vlagArg3) + if(value == "ok"): + command = command + vlagArg2 + " " + vlagArg3 + if(vlagArg4 is not None): + value = cnos.checkSanityofVariable( + deviceType, "vlag_peerip_vrf", vlagArg4) + if(value == "ok"): + command = command + " vrf " + vlagArg4 + else: + retVal = "Error-170" + return retVal + else: + retVal = "Error-171" + return retVal + + else: + retVal = "Error-172" + return retVal + + # debugOutput(command) + cmd = [{'command': command, 'prompt': None, 'answer': None}] + retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) + return retVal +# EOM + + +def main(): + # + # Define parameters for vlag creation entry + # + module = AnsibleModule( + argument_spec=dict( + outputfile=dict(required=True), + host=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + enablePassword=dict(required=False, no_log=True), + deviceType=dict(required=True), + vlagArg1=dict(required=True), + vlagArg2=dict(required=False), + vlagArg3=dict(required=False), + vlagArg4=dict(required=False),), + supports_check_mode=False) + + outputfile = module.params['outputfile'] + output = "" + + # Send the CLi command + output = output + str(vlagConfig(module, '(config)#', None)) + + # Save it into the file + file = open(outputfile, "a") + file.write(output) + file.close() + + # need to add logic to check when changes occur or not + errorMsg = cnos.checkOutputForError(output) + if(errorMsg is None): + module.exit_json(changed=True, msg="VLAG configurations accomplished") + else: + module.fail_json(msg=errorMsg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_vlan.py b/plugins/modules/network/cnos/cnos_vlan.py new file mode 100644 index 0000000000..ce336c2ccb --- /dev/null +++ b/plugins/modules/network/cnos/cnos_vlan.py @@ -0,0 +1,409 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2017 Lenovo, Inc. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to send VLAN commands to Lenovo Switches +# Overloading aspect of vlan creation in a range is pending +# Lenovo Networking + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_vlan +author: "Anil Kumar Mureleedharan(@amuraleedhar)" +short_description: Manage VLANs on CNOS network devices +description: + - This module provides declarative management of VLANs + on Lenovo CNOS network devices. +notes: + - Tested against CNOS 10.8.1 +options: + name: + description: + - Name of the VLAN. + vlan_id: + description: + - ID of the VLAN. Range 1-4094. + required: true + interfaces: + description: + - List of interfaces that should be associated to the VLAN. + required: true + associated_interfaces: + description: + - This is a intent option and checks the operational state of the for + given vlan C(name) for associated interfaces. If the value in the + C(associated_interfaces) does not match with the operational state of + vlan interfaces on device it will result in failure. + delay: + description: + - Delay the play should wait to check for declarative intent params + values. + default: 10 + aggregate: + description: List of VLANs definitions. + purge: + description: + - Purge VLANs not defined in the I(aggregate) parameter. + default: no + type: bool + state: + description: + - State of the VLAN configuration. + default: present + choices: ['present', 'absent', 'active', 'suspend'] + provider: + description: + - B(Deprecated) + - "Starting with Ansible 2.5 we recommend using C(connection: network_cli)." + - For more information please see the L(CNOS Platform Options guide, ../network/user_guide/platform_cnos.html). + - HORIZONTALLINE + - A dict object containing connection details. + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + password: + description: + - Specifies the password to use to authenticate the connection to + the remote device. This value is used to authenticate + the SSH session. If the value is not specified in the task, the + value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is + exceeded before the operation is completed, the module will error. + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to + the remote device. This value is the path to the + key used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + authorize: + description: + - Instructs the module to enter privileged mode on the remote device + before sending any commands. If not specified, the device will + attempt to execute all commands in non-privileged mode. If the value + is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTHORIZE) will be used instead. + type: bool + default: 'no' + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. +''' + +EXAMPLES = """ +- name: Create vlan + cnos_vlan: + vlan_id: 100 + name: test-vlan + state: present + +- name: Add interfaces to VLAN + cnos_vlan: + vlan_id: 100 + interfaces: + - Ethernet1/33 + - Ethernet1/44 + +- name: Check if interfaces is assigned to VLAN + cnos_vlan: + vlan_id: 100 + associated_interfaces: + - Ethernet1/33 + - Ethernet1/44 + +- name: Delete vlan + cnos_vlan: + vlan_id: 100 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - vlan 100 + - name test-vlan +""" + +import re +import time + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import load_config, run_commands +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import debugOutput, check_args +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec +from ansible.module_utils._text import to_text + + +def search_obj_in_list(vlan_id, lst): + obj = list() + for o in lst: + if o['vlan_id'] == vlan_id: + return o + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + purge = module.params['purge'] + + for w in want: + vlan_id = w['vlan_id'] + name = w['name'] + interfaces = w['interfaces'] + state = w['state'] + + obj_in_have = search_obj_in_list(vlan_id, have) + + if state == 'absent': + if obj_in_have: + commands.append('no vlan {0}'.format(vlan_id)) + + elif state == 'present': + if not obj_in_have: + commands.append('vlan {0}'.format(vlan_id)) + if name: + commands.append('name {0}'.format(name)) + + if interfaces: + for i in interfaces: + commands.append('interface {0}'.format(i)) + commands.append('switchport mode access') + commands.append('switchport access vlan {0}'.format(vlan_id)) + + else: + if name: + if name != obj_in_have['name']: + commands.append('vlan {0}'.format(vlan_id)) + commands.append('name {0}'.format(name)) + + if interfaces: + if not obj_in_have['interfaces']: + for i in interfaces: + commands.append('vlan {0}'.format(vlan_id)) + commands.append('interface {0}'.format(i)) + commands.append('switchport mode access') + commands.append('switchport access vlan {0}'.format(vlan_id)) + + elif set(interfaces) != set(obj_in_have['interfaces']): + missing_interfaces = list(set(interfaces) - set(obj_in_have['interfaces'])) + for i in missing_interfaces: + commands.append('vlan {0}'.format(vlan_id)) + commands.append('interface {0}'.format(i)) + commands.append('switchport mode access') + commands.append('switchport access vlan {0}'.format(vlan_id)) + + superfluous_interfaces = list(set(obj_in_have['interfaces']) - set(interfaces)) + for i in superfluous_interfaces: + commands.append('vlan {0}'.format(vlan_id)) + commands.append('interface {0}'.format(i)) + commands.append('switchport mode access') + commands.append('no switchport access vlan') + else: + commands.append('vlan {0}'.format(vlan_id)) + if name: + commands.append('name {0}'.format(name)) + commands.append('state {0}'.format(state)) + + if purge: + for h in have: + obj_in_want = search_obj_in_list(h['vlan_id'], want) + if not obj_in_want and h['vlan_id'] != '1': + commands.append('no vlan {0}'.format(h['vlan_id'])) + + return commands + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + d = item.copy() + d['vlan_id'] = str(d['vlan_id']) + + obj.append(d) + else: + obj.append({ + 'vlan_id': str(module.params['vlan_id']), + 'name': module.params['name'], + 'interfaces': module.params['interfaces'], + # 'associated_interfaces': module.params['associated_interfaces'], + 'state': module.params['state'] + }) + + return obj + + +def parse_to_logical_rows(out): + relevant_data = False + cur_row = [] + for line in out.splitlines(): + if not line: + """Skip empty lines.""" + continue + if '0' < line[0] <= '9': + """Line starting with a number.""" + if len(cur_row) > 0: + yield cur_row + cur_row = [] # Reset it to hold a next chunk + relevant_data = True + if relevant_data: + data = line.strip().split('(') + cur_row.append(data[0]) + yield cur_row + + +def parse_to_obj(logical_rows): + first_row = logical_rows[0] + rest_rows = logical_rows[1:] + vlan_data = first_row.split() + obj = {} + obj['vlan_id'] = vlan_data[0] + obj['name'] = vlan_data[1] + obj['state'] = vlan_data[2] + obj['interfaces'] = rest_rows + return obj + + +def parse_vlan_brief(vlan_out): + return [parse_to_obj(r) for r in parse_to_logical_rows(vlan_out)] + + +def map_config_to_obj(module): + return parse_vlan_brief(run_commands(module, ['show vlan brief'])[0]) + + +def check_declarative_intent_params(want, module, result): + + have = None + is_delay = False + + for w in want: + if w.get('associated_interfaces') is None: + continue + + if result['changed'] and not is_delay: + time.sleep(module.params['delay']) + is_delay = True + + if have is None: + have = map_config_to_obj(module) + + for i in w['associated_interfaces']: + obj_in_have = search_obj_in_list(w['vlan_id'], have) + if obj_in_have and 'interfaces' in obj_in_have and i not in obj_in_have['interfaces']: + module.fail_json(msg="Interface %s not configured on vlan %s" % (i, w['vlan_id'])) + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + vlan_id=dict(type='int'), + name=dict(), + interfaces=dict(type='list'), + associated_interfaces=dict(type='list'), + delay=dict(default=10, type='int'), + state=dict(default='present', + choices=['present', 'absent', 'active', 'suspend']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['vlan_id'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + purge=dict(default=False, type='bool') + ) + + argument_spec.update(element_spec) + argument_spec.update(cnos_argument_spec) + + required_one_of = [['vlan_id', 'aggregate']] + mutually_exclusive = [['vlan_id', 'aggregate']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + warnings = list() + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + check_declarative_intent_params(want, module, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cnos/cnos_vrf.py b/plugins/modules/network/cnos/cnos_vrf.py new file mode 100644 index 0000000000..1d8ffc5d6b --- /dev/null +++ b/plugins/modules/network/cnos/cnos_vrf.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +# +# Copyright (C) 2019 Lenovo. +# (c) 2017, Ansible by Red Hat, inc +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Module to work on management of local users on Lenovo CNOS Switches +# Lenovo Networking +# +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: cnos_vrf +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage VRFs on Lenovo CNOS network devices +description: + - This module provides declarative management of VRFs + on Lenovo CNOS network devices. +notes: + - Tested against CNOS 10.9.1 +options: + name: + description: + - Name of the VRF. + required: true + rd: + description: + - Route distinguisher of the VRF + interfaces: + description: + - Identifies the set of interfaces that + should be configured in the VRF. Interfaces must be routed + interfaces in order to be placed into a VRF. The name of interface + should be in expanded format and not abbreviated. + associated_interfaces: + description: + - This is a intent option and checks the operational state of the for + given vrf C(name) for associated interfaces. If the value in the + C(associated_interfaces) does not match with the operational state of + vrf interfaces on device it will result in failure. + aggregate: + description: List of VRFs contexts + purge: + description: + - Purge VRFs not defined in the I(aggregate) parameter. + default: no + type: bool + delay: + description: + - Time in seconds to wait before checking for the operational state on + remote device. This wait is applicable for operational state arguments. + default: 10 + state: + description: + - State of the VRF configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Create vrf + cnos_vrf: + name: test + rd: 1:200 + interfaces: + - Ethernet1/33 + state: present + +- name: Delete VRFs + cnos_vrf: + name: test + state: absent + +- name: Create aggregate of VRFs with purge + cnos_vrf: + aggregate: + - { name: test4, rd: "1:204" } + - { name: test5, rd: "1:205" } + state: present + purge: yes + +- name: Delete aggregate of VRFs + cnos_vrf: + aggregate: + - name: test2 + - name: test3 + - name: test4 + - name: test5 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - vrf context test + - rd 1:100 + - interface Ethernet1/44 + - vrf member test +""" +import re +import time + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import load_config, run_commands +from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_argument_spec, check_args + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'] == name: + return o + + +def get_interface_type(interface): + intf_type = 'unknown' + if interface.upper()[:2] in ('ET', 'GI', 'FA', 'TE', 'FO', 'HU', 'TWE'): + intf_type = 'ethernet' + elif interface.upper().startswith('VL'): + intf_type = 'svi' + elif interface.upper().startswith('LO'): + intf_type = 'loopback' + elif interface.upper()[:2] in ('MG', 'MA'): + intf_type = 'management' + elif interface.upper().startswith('PO'): + intf_type = 'portchannel' + elif interface.upper().startswith('NV'): + intf_type = 'nve' + + return intf_type + + +def is_switchport(name, module): + intf_type = get_interface_type(name) + + if intf_type in ('ethernet', 'portchannel'): + config = run_commands(module, + ['show interface {0} switchport'.format(name)])[0] + match = re.search(r'Switchport : enabled', config) + return bool(match) + return False + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + state = module.params['state'] + purge = module.params['purge'] + + for w in want: + name = w['name'] + rd = w['rd'] + interfaces = w['interfaces'] + + obj_in_have = search_obj_in_list(name, have) + + if name == 'default': + module.fail_json(msg='VRF context default is reserved') + elif len(name) > 63: + module.fail_json(msg='VRF name is too long') + if state == 'absent': + if name == 'management': + module.fail_json(msg='Management VRF context cannot be deleted') + if obj_in_have: + commands.append('no vrf context %s' % name) + elif state == 'present': + if not obj_in_have: + commands.append('vrf context %s' % name) + + if rd is not None: + commands.append('rd %s' % rd) + + if w['interfaces']: + for i in w['interfaces']: + commands.append('interface %s' % i) + commands.append('vrf member %s' % w['name']) + else: + if w['rd'] is not None and w['rd'] != obj_in_have['rd']: + commands.append('vrf context %s' % w['name']) + commands.append('rd %s' % w['rd']) + + if w['interfaces']: + if not obj_in_have['interfaces']: + for i in w['interfaces']: + commands.append('interface %s' % i) + commands.append('vrf member %s' % w['name']) + elif set(w['interfaces']) != obj_in_have['interfaces']: + missing_interfaces = list(set(w['interfaces']) - set(obj_in_have['interfaces'])) + + for i in missing_interfaces: + commands.append('interface %s' % i) + commands.append('vrf member %s' % w['name']) + + if purge: + for h in have: + obj_in_want = search_obj_in_list(h['name'], want) + if not obj_in_want: + commands.append('no vrf context %s' % h['name']) + + return commands + + +def map_config_to_obj(module): + objs = [] + output = run_commands(module, {'command': 'show vrf'}) + if output is not None: + vrfText = output[0].strip() + vrfList = vrfText.split('VRF') + for vrfItem in vrfList: + if 'FIB ID' in vrfItem: + obj = dict() + list_of_words = vrfItem.split() + vrfName = list_of_words[0] + obj['name'] = vrfName[:-1] + obj['rd'] = list_of_words[list_of_words.index('RD') + 1] + start = False + obj['interfaces'] = [] + for intName in list_of_words: + if 'Interfaces' in intName: + start = True + if start is True: + if '!' not in intName and 'Interfaces' not in intName: + obj['interfaces'].append(intName.strip().lower()) + objs.append(obj) + else: + module.fail_json(msg='Could not fetch VRF details from device') + return objs + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + if item.get('interfaces'): + item['interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('interfaces') if intf] + + if item.get('associated_interfaces'): + item['associated_interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('associated_interfaces') if intf] + + obj.append(item.copy()) + else: + obj.append({ + 'name': module.params['name'], + 'state': module.params['state'], + 'rd': module.params['rd'], + 'interfaces': [intf.replace(" ", "").lower() for intf in module.params['interfaces']] if module.params['interfaces'] else [], + 'associated_interfaces': [intf.replace(" ", "").lower() for intf in + module.params['associated_interfaces']] if module.params['associated_interfaces'] else [] + + }) + + return obj + + +def check_declarative_intent_params(want, module, result): + have = None + is_delay = False + + for w in want: + if w.get('associated_interfaces') is None: + continue + + if result['changed'] and not is_delay: + time.sleep(module.params['delay']) + is_delay = True + + if have is None: + have = map_config_to_obj(module) + + for i in w['associated_interfaces']: + obj_in_have = search_obj_in_list(w['name'], have) + + if obj_in_have: + interfaces = obj_in_have.get('interfaces') + if interfaces is not None and i not in interfaces: + module.fail_json(msg="Interface %s not configured on vrf %s" % (i, w['name'])) + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + name=dict(), + interfaces=dict(type='list'), + associated_interfaces=dict(type='list'), + delay=dict(default=10, type='int'), + rd=dict(), + state=dict(default='present', choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + purge=dict(default=False, type='bool') + ) + + argument_spec.update(element_spec) + + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + result = {'changed': False} + + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + for w in want: + name = w['name'] + name = name.lower() + if is_switchport(name, module): + module.fail_json(msg='Ensure interface is configured to be a L3' + '\nport first before using this module. You can use' + '\nthe cnos_interface module for this.') + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + check_declarative_intent_params(want, module, result) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/cumulus/nclu.py b/plugins/modules/network/cumulus/nclu.py new file mode 100644 index 0000000000..85a1af6f89 --- /dev/null +++ b/plugins/modules/network/cumulus/nclu.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016-2018, Cumulus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: nclu +author: "Cumulus Networks (@isharacomix)" +short_description: Configure network interfaces using NCLU +description: + - Interface to the Network Command Line Utility, developed to make it easier + to configure operating systems running ifupdown2 and Quagga, such as + Cumulus Linux. Command documentation is available at + U(https://docs.cumulusnetworks.com/cumulus-linux/System-Configuration/Network-Command-Line-Utility-NCLU/) +options: + commands: + description: + - A list of strings containing the net commands to run. Mutually + exclusive with I(template). + template: + description: + - A single, multi-line string with jinja2 formatting. This string + will be broken by lines, and each line will be run through net. + Mutually exclusive with I(commands). + commit: + description: + - When true, performs a 'net commit' at the end of the block. + Mutually exclusive with I(atomic). + default: false + type: bool + abort: + description: + - Boolean. When true, perform a 'net abort' before the block. + This cleans out any uncommitted changes in the buffer. + Mutually exclusive with I(atomic). + default: false + type: bool + atomic: + description: + - When true, equivalent to both I(commit) and I(abort) being true. + Mutually exclusive with I(commit) and I(atomic). + default: false + type: bool + description: + description: + - Commit description that will be recorded to the commit log if + I(commit) or I(atomic) are true. + default: "Ansible-originated commit" +''' + +EXAMPLES = ''' + +- name: Add two interfaces without committing any changes + nclu: + commands: + - add int swp1 + - add int swp2 + +- name: Modify hostname to Cumulus-1 and commit the change + nclu: + commands: + - add hostname Cumulus-1 + commit: true + +- name: Add 48 interfaces and commit the change. + nclu: + template: | + {% for iface in range(1,49) %} + add int swp{{iface}} + {% endfor %} + commit: true + description: "Ansible - add swps1-48" + +- name: Fetch Status Of Interface + nclu: + commands: + - show interface swp1 + register: output + +- name: Print Status Of Interface + debug: + var: output + +- name: Fetch Details From All Interfaces In JSON Format + nclu: + commands: + - show interface json + register: output + +- name: Print Interface Details + debug: + var: output["msg"] + +- name: Atomically add an interface + nclu: + commands: + - add int swp1 + atomic: true + description: "Ansible - add swp1" + +- name: Remove IP address from interface swp1 + nclu: + commands: + - del int swp1 ip address 1.1.1.1/24 + +- name: Configure BGP AS and add 2 EBGP neighbors using BGP Unnumbered + nclu: + commands: + - add bgp autonomous-system 65000 + - add bgp neighbor swp51 interface remote-as external + - add bgp neighbor swp52 interface remote-as external + commit: true + +- name: Configure BGP AS and Add 2 EBGP neighbors Using BGP Unnumbered via Template + nclu: + template: | + {% for neighbor in range(51,53) %} + add bgp neighbor swp{{neighbor}} interface remote-as external + add bgp autonomous-system 65000 + {% endfor %} + atomic: true + +- name: Check BGP Status + nclu: + commands: + - show bgp summary json + register: output + +- name: Print BGP Status In JSON + debug: + var: output["msg"] +''' + +RETURN = ''' +changed: + description: whether the interface was changed + returned: changed + type: bool + sample: True +msg: + description: human-readable report of success or failure + returned: always + type: str + sample: "interface bond0 config updated" +''' + +from ansible.module_utils.basic import AnsibleModule + + +def command_helper(module, command, errmsg=None): + """Run a command, catch any nclu errors""" + (_rc, output, _err) = module.run_command("/usr/bin/net %s" % command) + if _rc or 'ERROR' in output or 'ERROR' in _err: + module.fail_json(msg=errmsg or output) + return str(output) + + +def check_pending(module): + """Check the pending diff of the nclu buffer.""" + pending = command_helper(module, "pending", "Error in pending config. You may want to view `net pending` on this target.") + + delimeter1 = "net add/del commands since the last 'net commit'" + color1 = '\x1b[94m' + if delimeter1 in pending: + pending = pending.split(delimeter1)[0] + pending = pending.replace(color1, '') + return pending.strip() + + +def run_nclu(module, command_list, command_string, commit, atomic, abort, description): + _changed = False + + commands = [] + if command_list: + commands = command_list + elif command_string: + commands = command_string.splitlines() + + do_commit = False + do_abort = abort + if commit or atomic: + do_commit = True + if atomic: + do_abort = True + + if do_abort: + command_helper(module, "abort") + + # First, look at the staged commands. + before = check_pending(module) + # Run all of the net commands + output_lines = [] + for line in commands: + if line.strip(): + output_lines += [command_helper(module, line.strip(), "Failed on line %s" % line)] + output = "\n".join(output_lines) + + # If pending changes changed, report a change. + after = check_pending(module) + if before == after: + _changed = False + else: + _changed = True + + # Do the commit. + if do_commit: + result = command_helper(module, "commit description '%s'" % description) + if "commit ignored" in result: + _changed = False + command_helper(module, "abort") + elif command_helper(module, "show commit last") == "": + _changed = False + + return _changed, output + + +def main(testing=False): + module = AnsibleModule(argument_spec=dict( + commands=dict(required=False, type='list'), + template=dict(required=False, type='str'), + description=dict(required=False, type='str', default="Ansible-originated commit"), + abort=dict(required=False, type='bool', default=False), + commit=dict(required=False, type='bool', default=False), + atomic=dict(required=False, type='bool', default=False)), + mutually_exclusive=[('commands', 'template'), + ('commit', 'atomic'), + ('abort', 'atomic')] + ) + command_list = module.params.get('commands', None) + command_string = module.params.get('template', None) + commit = module.params.get('commit') + atomic = module.params.get('atomic') + abort = module.params.get('abort') + description = module.params.get('description') + + _changed, output = run_nclu(module, command_list, command_string, commit, atomic, abort, description) + if not testing: + module.exit_json(changed=_changed, msg=output) + elif testing: + return {"changed": _changed, "msg": output} + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/edgeos/edgeos_command.py b/plugins/modules/network/edgeos/edgeos_command.py new file mode 100644 index 0000000000..944be74108 --- /dev/null +++ b/plugins/modules/network/edgeos/edgeos_command.py @@ -0,0 +1,176 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: edgeos_command +author: + - Chad Norgan (@beardymcbeards) + - Sam Doran (@samdoran) +short_description: Run one or more commands on EdgeOS devices +description: + - This command module allows running one or more commands on a remote + device running EdgeOS, such as the Ubiquiti EdgeRouter. + - This module does not support running commands in configuration mode. + - Certain C(show) commands in EdgeOS produce many lines of output and + use a custom pager that can cause this module to hang. If the + value of the environment variable C(ANSIBLE_EDGEOS_TERMINAL_LENGTH) + is not set, the default number of 10000 is used. + - "This is a network module and requires C(connection: network_cli) + in order to work properly." + - For more information please see the L(Network Guide,../network/getting_started/index.html). +options: + commands: + description: + - The commands or ordered set of commands that should be run against the + remote device. The output of the command is returned to the playbook. + If the C(wait_for) argument is provided, the module is not returned + until the condition is met or the number of retries is exceeded. + required: True + wait_for: + description: + - Causes the task to wait for a specific condition to be met before + moving forward. If the condition is not met before the specified + number of retries is exceeded, the task will fail. + required: False + match: + description: + - Used in conjunction with C(wait_for) to create match policy. If set to + C(all), then all conditions in C(wait_for) must be met. If set to + C(any), then only one condition must match. + required: False + default: 'all' + choices: ['any', 'all'] + retries: + description: + - Number of times a command should be tried before it is considered failed. + The command is run on the target device and evaluated against the + C(wait_for) conditionals. + required: False + default: 10 + interval: + description: + - The number of seconds to wait between C(retries) of the command. + required: False + default: 1 + +notes: + - Tested against EdgeOS 1.9.7 + - Running C(show system boot-messages all) will cause the module to hang since + EdgeOS is using a custom pager setting to display the output of that command. +''' + +EXAMPLES = """ +tasks: + - name: Reboot the device + edgeos_command: + commands: reboot now + + - name: Show the configuration for eth0 and eth1 + edgeos_command: + commands: show interfaces ethernet {{ item }} + loop: + - eth0 + - eth1 +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +""" +import time + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import transform_commands, to_lines +from ansible_collections.community.general.plugins.module_utils.network.edgeos.edgeos import run_commands + + +def parse_commands(module, warnings): + commands = transform_commands(module) + + if module.check_mode: + for item in list(commands): + if not item['command'].startswith('show'): + warnings.append( + 'Only show commands are supported when using check mode, not ' + 'executing %s' % item['command'] + ) + commands.remove(item) + + return commands + + +def main(): + spec = dict( + commands=dict(type='list', required=True), + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=spec, supports_check_mode=True) + + warnings = list() + result = {'changed': False, 'warnings': warnings} + commands = parse_commands(module, warnings) + wait_for = module.params['wait_for'] or list() + + try: + conditionals = [Conditional(c) for c in wait_for] + except AttributeError as exc: + module.fail_json(msg=to_text(exc)) + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)), + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/edgeos/edgeos_config.py b/plugins/modules/network/edgeos/edgeos_config.py new file mode 100644 index 0000000000..cc3b4f17aa --- /dev/null +++ b/plugins/modules/network/edgeos/edgeos_config.py @@ -0,0 +1,317 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: edgeos_config +author: + - "Nathaniel Case (@Qalthos)" + - "Sam Doran (@samdoran)" +short_description: Manage EdgeOS configuration on remote device +description: + - This module provides configuration file management of EdgeOS + devices. It provides arguments for managing both the + configuration file and state of the active configuration. All + configuration statements are based on `set` and `delete` commands + in the device configuration. + - "This is a network module and requires the C(connection: network_cli) in order + to work properly." + - For more information please see the L(Network Guide,../network/getting_started/index.html). +notes: + - Tested against EdgeOS 1.9.7 + - Setting C(ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) to 30 is recommended since + the save command can take longer than the default of 10 seconds on + some EdgeOS hardware. +options: + lines: + description: + - The ordered set of configuration lines to be managed and + compared with the existing configuration on the remote + device. + src: + description: + - The C(src) argument specifies the path to the source config + file to load. The source config file can either be in + bracket format or set format. The source file can include + Jinja2 template variables. + match: + description: + - The C(match) argument controls the method used to match + against the current active configuration. By default, the + desired config is matched against the active config and the + deltas are loaded. If the C(match) argument is set to C(none) + the active configuration is ignored and the configuration is + always loaded. + default: line + choices: ['line', 'none'] + backup: + description: + - The C(backup) argument will backup the current device's active + configuration to the Ansible control host prior to making any + changes. If the C(backup_options) value is not given, the backup + file will be located in the backup folder in the playbook root + directory or role root directory if the playbook is part of an + ansible role. If the directory does not exist, it is created. + type: bool + default: 'no' + comment: + description: + - Allows a commit description to be specified to be included + when the configuration is committed. If the configuration is + not changed or committed, this argument is ignored. + default: 'configured by edgeos_config' + config: + description: + - The C(config) argument specifies the base configuration to use + to compare against the desired configuration. If this value + is not specified, the module will automatically retrieve the + current active configuration from the remote device. + save: + description: + - The C(save) argument controls whether or not changes made + to the active configuration are saved to disk. This is + independent of committing the config. When set to C(True), the + active configuration is saved. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure the remote device + edgeos_config: + lines: + - set system host-name {{ inventory_hostname }} + - set service lldp + - delete service dhcp-server + +- name: backup and load from file + edgeos_config: + src: edgeos.cfg + backup: yes + +- name: configurable backup path + edgeos_config: + src: edgeos.cfg + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +commands: + description: The list of configuration commands sent to the device + returned: always + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/edgeos_config.2016-07-16@22:28:34 +""" + +import re + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.community.general.plugins.module_utils.network.edgeos.edgeos import load_config, get_config, run_commands + + +DEFAULT_COMMENT = 'configured by edgeos_config' + + +def config_to_commands(config): + set_format = config.startswith('set') or config.startswith('delete') + candidate = NetworkConfig(indent=4, contents=config) + if not set_format: + candidate = [c.line for c in candidate.items] + commands = list() + # this filters out less specific lines + for item in candidate: + for index, entry in enumerate(commands): + if item.startswith(entry): + del commands[index] + break + commands.append(item) + + commands = ['set %s' % cmd.replace(' {', '') for cmd in commands] + + else: + commands = to_native(candidate).split('\n') + + return commands + + +def get_candidate(module): + contents = module.params['src'] or module.params['lines'] + + if module.params['lines']: + contents = '\n'.join(contents) + + return config_to_commands(contents) + + +def check_command(module, command): + """Tests against a command line to be valid otherwise raise errors + + Error on uneven single quote which breaks ansible waiting for further input. Ansible + will handle even single quote failures correctly. + + :param command: the command line from current or new config + :type command: string + :raises ValueError: + * if contains odd number of single quotes + :return: command string unchanged + :rtype: string + """ + if command.count("'") % 2 != 0: + module.fail_json(msg="Unmatched single (') quote found in command: " + command) + + return command + + +def diff_config(module, commands, config): + config = [to_native(check_command(module, c)) for c in config.splitlines()] + + updates = list() + visited = set() + delete_commands = [line for line in commands if line.startswith('delete')] + + for line in commands: + item = to_native(check_command(module, line)) + + if not item.startswith('set') and not item.startswith('delete'): + raise ValueError('line must start with either `set` or `delete`') + + elif item.startswith('set'): + + if item not in config: + updates.append(line) + + # If there is a corresponding delete command in the desired config, make sure to append + # the set command even though it already exists in the running config + else: + ditem = re.sub('set', 'delete', item) + for line in delete_commands: + if ditem.startswith(line): + updates.append(item) + + elif item.startswith('delete'): + if not config: + updates.append(line) + else: + item = re.sub(r'delete', 'set', item) + for entry in config: + if entry.startswith(item) and line not in visited: + updates.append(line) + visited.add(line) + + return list(updates) + + +def run(module, result): + # get the current active config from the node or passed in via + # the config param + config = module.params['config'] or get_config(module) + + # create the candidate config object from the arguments + candidate = get_candidate(module) + + # create loadable config that includes only the configuration updates + commands = diff_config(module, candidate, config) + + result['commands'] = commands + + commit = not module.check_mode + comment = module.params['comment'] + + if commands: + load_config(module, commands, commit=commit, comment=comment) + + result['changed'] = True + + +def main(): + + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + spec = dict( + src=dict(type='path'), + lines=dict(type='list'), + + match=dict(default='line', choices=['line', 'none']), + + comment=dict(default=DEFAULT_COMMENT), + + config=dict(), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + save=dict(type='bool', default=False), + ) + + mutually_exclusive = [('lines', 'src')] + + module = AnsibleModule( + argument_spec=spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True + ) + + warnings = list() + + result = dict(changed=False, warnings=warnings) + + if module.params['backup']: + result['__backup__'] = get_config(module=module) + + if any((module.params['src'], module.params['lines'])): + run(module, result) + + if module.params['save']: + diff = run_commands(module, commands=['configure', 'compare saved'])[1] + if diff != '[edit]': + run_commands(module, commands=['save']) + result['changed'] = True + run_commands(module, commands=['exit']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/edgeos/edgeos_facts.py b/plugins/modules/network/edgeos/edgeos_facts.py new file mode 100644 index 0000000000..104061829a --- /dev/null +++ b/plugins/modules/network/edgeos/edgeos_facts.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: edgeos_facts +author: + - Nathaniel Case (@Qalthos) + - Sam Doran (@samdoran) +short_description: Collect facts from remote devices running EdgeOS +description: + - Collects a base set of device facts from a remote device that + is running EdgeOS. This module prepends all of the + base network fact keys with U(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +notes: + - Tested against EdgeOS 1.9.7 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, default, config, and neighbors. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: "!config" +''' + +EXAMPLES = """ +- name: collect all facts from the device + edgeos_facts: + gather_subset: all + +- name: collect only the config and default facts + edgeos_facts: + gather_subset: config + +- name: collect everything exception the config + edgeos_facts: + gather_subset: "!config" +""" + +RETURN = """ +ansible_net_config: + description: The running-config from the device + returned: when config is configured + type: str +ansible_net_commits: + description: The set of available configuration revisions + returned: when present + type: list +ansible_net_hostname: + description: The configured system hostname + returned: always + type: str +ansible_net_model: + description: The device model string + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the device + returned: always + type: str +ansible_net_version: + description: The version of the software running + returned: always + type: str +ansible_net_neighbors: + description: The set of LLDP neighbors + returned: when interface is configured + type: list +ansible_net_gather_subset: + description: The list of subsets gathered by the module + returned: always + type: list +""" + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.network.edgeos.edgeos import run_commands + + +class FactsBase(object): + + COMMANDS = frozenset() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, list(self.COMMANDS)) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version', + 'show host name', + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + + self.facts['hostname'] = self.responses[1] + + def parse_version(self, data): + match = re.search(r'Version:\s*v(\S+)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'HW model:\s*([A-Za-z0-9- ]+)', data) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'HW S/N:\s+(\S+)', data) + if match: + return match.group(1) + + +class Config(FactsBase): + + COMMANDS = [ + 'show configuration commands', + 'show system commit', + ] + + def populate(self): + super(Config, self).populate() + + self.facts['config'] = self.responses + + commits = self.responses[1] + entries = list() + entry = None + + for line in commits.split('\n'): + match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line) + if match: + if entry: + entries.append(entry) + + entry = dict(revision=match.group(1), + datetime=match.group(2), + by=str(match.group(3)).strip(), + via=str(match.group(4)).strip(), + comment=None) + elif entry: + entry['comment'] = line.strip() + + self.facts['commits'] = entries + + +class Neighbors(FactsBase): + + COMMANDS = [ + 'show lldp neighbors', + 'show lldp neighbors detail', + ] + + def populate(self): + super(Neighbors, self).populate() + + all_neighbors = self.responses[0] + if 'LLDP not configured' not in all_neighbors: + neighbors = self.parse( + self.responses[1] + ) + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def parse(self, data): + parsed = list() + values = None + for line in data.split('\n'): + if not line: + continue + elif line[0] == ' ': + values += '\n%s' % line + elif line.startswith('Interface'): + if values: + parsed.append(values) + values = line + if values: + parsed.append(values) + return parsed + + def parse_neighbors(self, data): + facts = dict() + for item in data: + interface = self.parse_interface(item) + host = self.parse_host(item) + port = self.parse_port(item) + if interface not in facts: + facts[interface] = list() + facts[interface].append(dict(host=host, port=port)) + return facts + + def parse_interface(self, data): + match = re.search(r'^Interface:\s+(\S+),', data) + return match.group(1) + + def parse_host(self, data): + match = re.search(r'SysName:\s+(.+)$', data, re.M) + if match: + return match.group(1) + + def parse_port(self, data): + match = re.search(r'PortDescr:\s+(.+)$', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + neighbors=Neighbors, + config=Config +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = AnsibleModule(argument_spec=spec, + supports_check_mode=True) + + warnings = list() + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Subset must be one of [%s], got %s' % + (', '.join(VALID_SUBSETS), subset)) + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/edgeswitch/edgeswitch_facts.py b/plugins/modules/network/edgeswitch/edgeswitch_facts.py new file mode 100644 index 0000000000..9d6969ecb6 --- /dev/null +++ b/plugins/modules/network/edgeswitch/edgeswitch_facts.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: edgeswitch_facts +author: "Frederic Bor (@f-bor)" +short_description: Collect facts from remote devices running Edgeswitch +description: + - Collects a base set of device facts from a remote device that + is running Ubiquiti Edgeswitch. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +notes: + - Tested against Edgeswitch 1.7.4 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' + +EXAMPLES = """ +# Collect all facts from the device +- edgeswitch_facts: + gather_subset: all + +# Collect only the config and default facts +- edgeswitch_facts: + gather_subset: + - config + +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.edgeswitch.edgeswitch import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, commands=cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = ['show version', 'show sysinfo'] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['hostname'] = self.parse_hostname(self.responses[1]) + + def parse_version(self, data): + match = re.search(r'Software Version\.+ (.*)', data) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'System Name\.+ (.*)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'Machine Model\.+ (.*)', data) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'Serial Number\.+ (.*)', data) + if match: + return match.group(1) + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show interfaces description', + 'show interfaces status all' + ] + + def populate(self): + super(Interfaces, self).populate() + + interfaces = {} + + data = self.responses[0] + self.parse_interfaces_description(data, interfaces) + + data = self.responses[1] + self.parse_interfaces_status(data, interfaces) + + self.facts['interfaces'] = interfaces + + def parse_interfaces_description(self, data, interfaces): + for line in data.split('\n'): + match = re.match(r'(\d\/\d+)\s+(\w+)\s+(\w+)', line) + if match: + name = match.group(1) + interface = {} + interface['operstatus'] = match.group(2) + interface['lineprotocol'] = match.group(3) + interface['description'] = line[30:] + interfaces[name] = interface + + def parse_interfaces_status(self, data, interfaces): + for line in data.split('\n'): + match = re.match(r'(\d\/\d+)', line) + if match: + name = match.group(1) + interface = interfaces[name] + interface['physicalstatus'] = line[61:71].strip() + interface['mediatype'] = line[73:91].strip() + + +FACT_SUBSETS = dict( + default=Default, + config=Config, + interfaces=Interfaces, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/edgeswitch/edgeswitch_vlan.py b/plugins/modules/network/edgeswitch/edgeswitch_vlan.py new file mode 100644 index 0000000000..a367ffd3d7 --- /dev/null +++ b/plugins/modules/network/edgeswitch/edgeswitch_vlan.py @@ -0,0 +1,497 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Ansible by Red Hat, inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: edgeswitch_vlan +author: "Frederic Bor (@f-bor)" +short_description: Manage VLANs on Ubiquiti Edgeswitch network devices +description: + - This module provides declarative management of VLANs + on Ubiquiti Edgeswitch network devices. +notes: + - Tested against edgeswitch 1.7.4 + - This module use native Ubiquiti vlan syntax and does not support switchport compatibility syntax. + For clarity, it is strongly advised to not use both syntaxes on the same interface. + - Edgeswitch does not support deleting or changing name of VLAN 1 + - As auto_tag, auto_untag and auto_exclude are a kind of default setting for all interfaces, they are mutually exclusive + +options: + name: + description: + - Name of the VLAN. + vlan_id: + description: + - ID of the VLAN. Range 1-4093. + tagged_interfaces: + description: + - List of interfaces that should accept and transmit tagged frames for the VLAN. + Accept range of interfaces. + untagged_interfaces: + description: + - List of interfaces that should accept untagged frames and transmit them tagged for the VLAN. + Accept range of interfaces. + excluded_interfaces: + description: + - List of interfaces that should be excluded of the VLAN. + Accept range of interfaces. + auto_tag: + description: + - Each of the switch interfaces will be set to accept and transmit + untagged frames for I(vlan_id) unless defined in I(*_interfaces). + This is a default setting for all switch interfaces. + type: bool + auto_untag: + description: + - Each of the switch interfaces will be set to accept untagged frames and + transmit them tagged for I(vlan_id) unless defined in I(*_interfaces). + This is a default setting for all switch interfaces. + type: bool + auto_exclude: + description: + - Each of the switch interfaces will be excluded from I(vlan_id) + unless defined in I(*_interfaces). + This is a default setting for all switch interfaces. + type: bool + aggregate: + description: List of VLANs definitions. + purge: + description: + - Purge VLANs not defined in the I(aggregate) parameter. + default: no + type: bool + state: + description: + - action on the VLAN configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Create vlan + edgeswitch_vlan: + vlan_id: 100 + name: voice + action: present + +- name: Add interfaces to VLAN + edgeswitch_vlan: + vlan_id: 100 + tagged_interfaces: + - 0/1 + - 0/4-0/6 + +- name: setup three vlans and delete the rest + edgeswitch_vlan: + purge: true + aggregate: + - { vlan_id: 1, name: default, auto_untag: true, excluded_interfaces: 0/45-0/48 } + - { vlan_id: 100, name: voice, auto_tag: true } + - { vlan_id: 200, name: video, auto_exclude: true, untagged_interfaces: 0/45-0/48, tagged_interfaces: 0/49 } + +- name: Delete vlan + edgeswitch_vlan: + vlan_id: 100 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - vlan database + - vlan 100 + - vlan name 100 "test vlan" + - exit + - interface 0/1 + - vlan pvid 50 + - vlan participation include 50,100 + - vlan tagging 100 + - vlan participation exclude 200 + - no vlan tagging 200 +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.edgeswitch.edgeswitch import load_config, run_commands +from ansible_collections.community.general.plugins.module_utils.network.edgeswitch.edgeswitch import build_aggregate_spec, map_params_to_obj +from ansible_collections.community.general.plugins.module_utils.network.edgeswitch.edgeswitch_interface import InterfaceConfiguration, merge_interfaces + + +def search_obj_in_list(vlan_id, lst): + for o in lst: + if o['vlan_id'] == vlan_id: + return o + + +def map_vlans_to_commands(want, have, module): + commands = [] + vlans_added = [] + vlans_removed = [] + vlans_names = [] + + for w in want: + vlan_id = w['vlan_id'] + name = w['name'] + state = w['state'] + + obj_in_have = search_obj_in_list(vlan_id, have) + + if state == 'absent': + if obj_in_have: + vlans_removed.append(vlan_id) + + elif state == 'present': + if not obj_in_have: + vlans_added.append(vlan_id) + if name: + vlans_names.append('vlan name {0} "{1}"'.format(vlan_id, name)) + else: + if name: + if name != obj_in_have['name']: + vlans_names.append('vlan name {0} "{1}"'.format(vlan_id, name)) + + if module.params['purge']: + for h in have: + obj_in_want = search_obj_in_list(h['vlan_id'], want) + # you can't delete vlan 1 on Edgeswitch + if not obj_in_want and h['vlan_id'] != '1': + vlans_removed.append(h['vlan_id']) + + if vlans_removed: + commands.append('no vlan {0}'.format(','.join(vlans_removed))) + + if vlans_added: + commands.append('vlan {0}'.format(','.join(vlans_added))) + + if vlans_names: + commands.extend(vlans_names) + + if commands: + commands.insert(0, 'vlan database') + commands.append('exit') + + return commands + + +class VlanInterfaceConfiguration(InterfaceConfiguration): + """ class holding vlan definitions for a given interface + """ + def __init__(self): + InterfaceConfiguration.__init__(self) + self.tagged = [] + self.untagged = [] + self.excluded = [] + + def set_vlan(self, vlan_id, type): + try: + self.tagged.remove(vlan_id) + except ValueError: + pass + + try: + self.untagged.remove(vlan_id) + except ValueError: + pass + + try: + self.excluded.remove(vlan_id) + except ValueError: + pass + + f = getattr(self, type) + f.append(vlan_id) + + def gen_commands(self, port, module): + """ to reduce commands generated by this module + we group vlans changes to have a max of 5 vlan commands by interface + """ + exclude = [] + include = [] + tag = [] + untag = [] + pvid = [] + + for vlan_id in self.excluded: + if vlan_id not in port['forbidden_vlans']: + exclude.append(vlan_id) + + if vlan_id in port['tagged_vlans']: + untag.append(vlan_id) + + for vlan_id in self.untagged: + if vlan_id in port['forbidden_vlans'] or vlan_id not in port['untagged_vlans'] and vlan_id not in port['tagged_vlans']: + include.append(vlan_id) + + if vlan_id in port['tagged_vlans']: + untag.append(vlan_id) + + if vlan_id != port['pvid_mode']: + pvid.append(vlan_id) + + for vlan_id in self.tagged: + if vlan_id not in port['tagged_vlans']: + tag.append(vlan_id) + include.append(vlan_id) + + if include: + self.commands.append('vlan participation include {0}'.format(','.join(include))) + + if pvid: + if len(pvid) > 1: + module.fail_json(msg='{0} can\'t have more than one untagged vlan') + return + self.commands.append('vlan pvid {0}'.format(pvid[0])) + + if untag: + self.commands.append('no vlan tagging {0}'.format(','.join(untag))) + + if tag: + self.commands.append('vlan tagging {0}'.format(','.join(tag))) + + if exclude: + self.commands.append('vlan participation exclude {0}'.format(','.join(exclude))) + + +def set_interfaces_vlan(interfaces_param, interfaces, vlan_id, type): + """ set vlan_id type for each interface in interfaces_param on interfaces + unrange interfaces_param if needed + """ + if interfaces_param: + for i in interfaces_param: + match = re.search(r'(\d+)\/(\d+)-(\d+)\/(\d+)', i) + if match: + group = match.group(1) + start = int(match.group(2)) + end = int(match.group(4)) + for x in range(start, end + 1): + key = '{0}/{1}'.format(group, x) + interfaces[key].set_vlan(vlan_id, type) + else: + interfaces[i].set_vlan(vlan_id, type) + + +def map_interfaces_to_commands(want, ports, module): + commands = list() + + # generate a configuration for each interface + interfaces = {} + for key, value in ports.items(): + interfaces[key] = VlanInterfaceConfiguration() + + for w in want: + state = w['state'] + if state != 'present': + continue + + auto_tag = w['auto_tag'] + auto_untag = w['auto_untag'] + auto_exclude = w['auto_exclude'] + vlan_id = w['vlan_id'] + tagged_interfaces = w['tagged_interfaces'] + untagged_interfaces = w['untagged_interfaces'] + excluded_interfaces = w['excluded_interfaces'] + + # set the default type, if any + for key, value in ports.items(): + if auto_tag: + interfaces[key].tagged.append(vlan_id) + elif auto_exclude: + interfaces[key].excluded.append(vlan_id) + elif auto_untag: + interfaces[key].untagged.append(vlan_id) + + # set explicit definitions + set_interfaces_vlan(tagged_interfaces, interfaces, vlan_id, 'tagged') + set_interfaces_vlan(untagged_interfaces, interfaces, vlan_id, 'untagged') + set_interfaces_vlan(excluded_interfaces, interfaces, vlan_id, 'excluded') + + # generate commands for each interface + for i, interface in interfaces.items(): + port = ports[i] + interface.gen_commands(port, module) + + # reduce them using range syntax when possible + interfaces = merge_interfaces(interfaces) + + # final output + for i, interface in interfaces.items(): + if len(interface.commands) > 0: + commands.append('interface {0}'.format(i)) + commands.extend(interface.commands) + + return commands + + +def parse_vlan_brief(vlan_out): + have = [] + for line in vlan_out.split('\n'): + obj = re.match(r'(?P\d+)\s+(?P[^\s]+)\s+', line) + if obj: + have.append(obj.groupdict()) + return have + + +def unrange(vlans): + res = [] + for vlan in vlans: + match = re.match(r'(\d+)-(\d+)', vlan) + if match: + start = int(match.group(1)) + end = int(match.group(2)) + for vlan_id in range(start, end + 1): + res.append(str(vlan_id)) + else: + res.append(vlan) + return res + + +def parse_interfaces_switchport(cmd_out): + ports = dict() + objs = re.findall( + r'Port: (\d+\/\d+)\n' + 'VLAN Membership Mode:(.*)\n' + 'Access Mode VLAN:(.*)\n' + 'General Mode PVID:(.*)\n' + 'General Mode Ingress Filtering:(.*)\n' + 'General Mode Acceptable Frame Type:(.*)\n' + 'General Mode Dynamically Added VLANs:(.*)\n' + 'General Mode Untagged VLANs:(.*)\n' + 'General Mode Tagged VLANs:(.*)\n' + 'General Mode Forbidden VLANs:(.*)\n', cmd_out) + for o in objs: + port = { + 'interface': o[0], + 'pvid_mode': o[3].replace("(default)", "").strip(), + 'untagged_vlans': unrange(o[7].strip().split(',')), + 'tagged_vlans': unrange(o[8].strip().split(',')), + 'forbidden_vlans': unrange(o[9].strip().split(',')) + } + ports[port['interface']] = port + return ports + + +def map_ports_to_obj(module): + return parse_interfaces_switchport(run_commands(module, ['show interfaces switchport'])[0]) + + +def map_config_to_obj(module): + return parse_vlan_brief(run_commands(module, ['show vlan brief'])[0]) + + +def check_params(module, want): + """ Deeper checks on parameters + """ + def check_parmams_interface(interfaces): + if interfaces: + for i in interfaces: + match = re.search(r'(\d+)\/(\d+)-(\d+)\/(\d+)', i) + if match: + if match.group(1) != match.group(3): + module.fail_json(msg="interface range must be within same group: " + i) + else: + match = re.search(r'(\d+)\/(\d+)', i) + if not match: + module.fail_json(msg="wrong interface format: " + i) + + for w in want: + auto_tag = w['auto_tag'] + auto_untag = w['auto_untag'] + auto_exclude = w['auto_exclude'] + + c = 0 + if auto_tag: + c = c + 1 + + if auto_untag: + c = c + 1 + + if auto_exclude: + c = c + 1 + + if c > 1: + module.fail_json(msg="parameters are mutually exclusive: auto_tag, auto_untag, auto_exclude") + return + + check_parmams_interface(w['tagged_interfaces']) + check_parmams_interface(w['untagged_interfaces']) + check_parmams_interface(w['excluded_interfaces']) + w['vlan_id'] = str(w['vlan_id']) + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + vlan_id=dict(type='int'), + name=dict(), + tagged_interfaces=dict(type='list'), + untagged_interfaces=dict(type='list'), + excluded_interfaces=dict(type='list'), + auto_tag=dict(type='bool'), + auto_exclude=dict(type='bool'), + auto_untag=dict(type='bool'), + state=dict(default='present', + choices=['present', 'absent']) + ) + + argument_spec = build_aggregate_spec( + element_spec, + ['vlan_id'], + dict(purge=dict(default=False, type='bool')) + ) + + required_one_of = [['vlan_id', 'aggregate']] + mutually_exclusive = [ + ['vlan_id', 'aggregate'], + ['auto_tag', 'auto_untag', 'auto_exclude']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + result = {'changed': False} + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + check_params(module, want) + + # vlans are not created/deleted in configure mode + commands = map_vlans_to_commands(want, have, module) + result['commands'] = commands + + if commands: + if not module.check_mode: + run_commands(module, commands, check_rc=False) + result['changed'] = True + + ports = map_ports_to_obj(module) + + # interfaces vlan are set in configure mode + commands = map_interfaces_to_commands(want, ports, module) + result['commands'].extend(commands) + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/enos/enos_command.py b/plugins/modules/network/enos/enos_command.py new file mode 100644 index 0000000000..2da3ebe868 --- /dev/null +++ b/plugins/modules/network/enos/enos_command.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Module to execute ENOS Commands on Lenovo Switches. +# Lenovo Networking +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: enos_command +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Run arbitrary commands on Lenovo ENOS devices +description: + - Sends arbitrary commands to an ENOS node and returns the results + read from the device. The C(enos_command) module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. +extends_documentation_fragment: +- community.general.enos + +options: + commands: + description: + - List of commands to send to the remote device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retires as expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +--- +vars: + cli: + host: "{{ inventory_hostname }}" + port: 22 + username: admin + password: admin + timeout: 30 + +--- +- name: test contains operator + enos_command: + commands: + - show version + - show system memory + wait_for: + - "result[0] contains 'Lenovo'" + - "result[1] contains 'MemFree'" + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for single command + enos_command: + commands: ['show version'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for multiple commands + enos_command: + commands: + - show version + - show interface information + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + - "result.stdout | length == 2" +""" + +RETURN = """ +stdout: + description: the set of responses from the commands + returned: always + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: the conditionals that failed + returned: failed + type: list + sample: ['...', '...'] +""" + +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.enos.enos import run_commands, check_args +from ansible_collections.community.general.plugins.module_utils.network.enos.enos import enos_argument_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def main(): + spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + spec.update(enos_argument_spec) + + module = AnsibleModule(argument_spec=spec, supports_check_mode=True) + result = {'changed': False} + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + commands = module.params['commands'] + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/enos/enos_config.py b/plugins/modules/network/enos/enos_config.py new file mode 100644 index 0000000000..3aecf6b7c5 --- /dev/null +++ b/plugins/modules/network/enos/enos_config.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Module to configure Lenovo Switches. +# Lenovo Networking +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: enos_config +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Manage Lenovo ENOS configuration sections +description: + - Lenovo ENOS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with ENOS configuration sections in + a deterministic way. +extends_documentation_fragment: +- community.general.enos + +notes: + - Tested against ENOS 8.4.1 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is + mutually exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block', 'config'] + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + comment: + description: + - Allows a commit description to be specified to be included + when the configuration is committed. If the configuration is + not changed or committed, this argument is ignored. + default: 'configured by enos_config' + admin: + description: + - Enters into administration configuration mode for making config + changes to the device. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure top level configuration + enos_config: + "lines: hostname {{ inventory_hostname }}" + +- name: configure interface settings + enos_config: + lines: + - enable + - ip ospf enable + parents: interface ip 13 + +- name: load a config from disk and replace the current config + enos_config: + src: config.cfg + backup: yes + +- name: configurable backup path + enos_config: + src: config.cfg + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: Only when lines is specified. + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/enos01.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.enos.enos import load_config, get_config +from ansible_collections.community.general.plugins.module_utils.network.enos.enos import enos_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.enos.enos import check_args +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +DEFAULT_COMMIT_COMMENT = 'configured by enos_config' + + +def get_running_config(module): + contents = module.params['config'] + if not contents: + contents = get_config(module) + return NetworkConfig(indent=1, contents=contents) + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + replace_config = replace == 'config' + path = module.params['parents'] + comment = module.params['comment'] + admin = module.params['admin'] + check_mode = module.check_mode + + candidate = get_candidate(module) + + if match != 'none' and replace != 'config': + contents = get_running_config(module) + configobj = NetworkConfig(contents=contents, indent=1) + commands = candidate.difference(configobj, path=path, match=match, + replace=replace) + else: + commands = candidate.items + + if commands: + commands = dumps(commands, 'commands').split('\n') + + if any((module.params['lines'], module.params['src'])): + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + + diff = load_config(module, commands) + if diff: + result['diff'] = dict(prepared=diff) + result['changed'] = True + + +def main(): + """main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block', 'config']), + + config=dict(), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + comment=dict(default=DEFAULT_COMMIT_COMMENT), + admin=dict(type='bool', default=False) + ) + + argument_spec.update(enos_argument_spec) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('replace', 'config', ['src'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + warnings = list() + check_args(module, warnings) + + result = dict(changed=False, warnings=warnings) + + if module.params['backup']: + result['__backup__'] = get_config(module) + + run(module, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/enos/enos_facts.py b/plugins/modules/network/enos/enos_facts.py new file mode 100644 index 0000000000..dda2653101 --- /dev/null +++ b/plugins/modules/network/enos/enos_facts.py @@ -0,0 +1,507 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Module to Collect facts from Lenovo Switches running Lenovo ENOS commands +# Lenovo Networking +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: enos_facts +author: "Anil Kumar Muraleedharan (@amuraleedhar)" +short_description: Collect facts from remote devices running Lenovo ENOS +description: + - Collects a base set of device facts from a remote Lenovo device + running on ENOS. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: +- community.general.enos + +notes: + - Tested against ENOS 8.4.1 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' +EXAMPLES = ''' +Tasks: The following are examples of using the module enos_facts. +--- +- name: Test Enos Facts + enos_facts: + provider={{ cli }} + + vars: + cli: + host: "{{ inventory_hostname }}" + port: 22 + username: admin + password: admin + transport: cli + timeout: 30 + authorize: True + auth_pass: + +--- +# Collect all facts from the device +- enos_facts: + gather_subset: all + provider: "{{ cli }}" + +# Collect only the config and default facts +- enos_facts: + gather_subset: + - config + provider: "{{ cli }}" + +# Do not collect hardware facts +- enos_facts: + gather_subset: + - "!hardware" + provider: "{{ cli }}" + +''' +RETURN = ''' + ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list +# default + ansible_net_model: + description: The model name returned from the Lenovo ENOS device + returned: always + type: str + ansible_net_serialnum: + description: The serial number of the Lenovo ENOS device + returned: always + type: str + ansible_net_version: + description: The ENOS operating system version running on the remote device + returned: always + type: str + ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + ansible_net_image: + description: Indicates the active image for the device + returned: always + type: str +# hardware + ansible_net_memfree_mb: + description: The available free memory on the remote device in MB + returned: when hardware is configured + type: int +# config + ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str +# interfaces + ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list + ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list + ansible_net_interfaces: + description: A hash of all interfaces running on the system. + This gives information on description, mac address, mtu, speed, + duplex and operstatus + returned: when interfaces is configured + type: dict + ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +''' + +import re + +from ansible_collections.community.general.plugins.module_utils.network.enos.enos import run_commands, enos_argument_spec, check_args +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import zip + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + self.PERSISTENT_COMMAND_TIMEOUT = 60 + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS, + check_rc=False) + + def run(self, cmd): + return run_commands(self.module, cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = ['show version', 'show run'] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + data_run = self.responses[1] + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + if data_run: + self.facts['hostname'] = self.parse_hostname(data_run) + + def parse_version(self, data): + match = re.search(r'^Software Version (.*?) ', data, re.M | re.I) + if match: + return match.group(1) + + def parse_hostname(self, data_run): + for line in data_run.split('\n'): + line = line.strip() + match = re.match(r'hostname (.*?)', line, re.M | re.I) + if match: + hosts = line.split() + hostname = hosts[1].strip('\"') + return hostname + return "NA" + + def parse_model(self, data): + match = re.search(r'^Lenovo RackSwitch (\S+)', data, re.M | re.I) + if match: + return match.group(1) + + def parse_image(self, data): + match = re.search(r'(.*) image1(.*)', data, re.M | re.I) + if match: + return "Image1" + else: + return "Image2" + + def parse_serialnum(self, data): + match = re.search(r'^Switch Serial No: (\S+)', data, re.M | re.I) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show system memory' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.run(['show system memory']) + data = to_text(data, errors='surrogate_or_strict').strip() + data = data.replace(r"\n", "\n") + if data: + self.facts['memtotal_mb'] = self.parse_memtotal(data) + self.facts['memfree_mb'] = self.parse_memfree(data) + + def parse_memtotal(self, data): + match = re.search(r'^MemTotal:\s*(.*) kB', data, re.M | re.I) + if match: + return int(match.group(1)) / 1024 + + def parse_memfree(self, data): + match = re.search(r'^MemFree:\s*(.*) kB', data, re.M | re.I) + if match: + return int(match.group(1)) / 1024 + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = ['show interface status'] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data1 = self.run(['show interface status']) + data1 = to_text(data1, errors='surrogate_or_strict').strip() + data1 = data1.replace(r"\n", "\n") + data2 = self.run(['show lldp port']) + data2 = to_text(data2, errors='surrogate_or_strict').strip() + data2 = data2.replace(r"\n", "\n") + lines1 = None + lines2 = None + if data1: + lines1 = self.parse_interfaces(data1) + if data2: + lines2 = self.parse_interfaces(data2) + if lines1 is not None and lines2 is not None: + self.facts['interfaces'] = self.populate_interfaces(lines1, lines2) + data3 = self.run(['show lldp remote-device port']) + data3 = to_text(data3, errors='surrogate_or_strict').strip() + data3 = data3.replace(r"\n", "\n") + + lines3 = None + if data3: + lines3 = self.parse_neighbors(data3) + if lines3 is not None: + self.facts['neighbors'] = self.populate_neighbors(lines3) + + data4 = self.run(['show interface ip']) + data4 = data4[0].split('\n') + lines4 = None + if data4: + lines4 = self.parse_ipaddresses(data4) + ipv4_interfaces = self.set_ipv4_interfaces(lines4) + self.facts['all_ipv4_addresses'] = ipv4_interfaces + ipv6_interfaces = self.set_ipv6_interfaces(lines4) + self.facts['all_ipv6_addresses'] = ipv6_interfaces + + def parse_ipaddresses(self, data4): + parsed = list() + for line in data4: + if len(line) == 0: + continue + else: + line = line.strip() + if len(line) == 0: + continue + match = re.search(r'IP4', line, re.M | re.I) + if match: + key = match.group() + parsed.append(line) + match = re.search(r'IP6', line, re.M | re.I) + if match: + key = match.group() + parsed.append(line) + return parsed + + def set_ipv4_interfaces(self, line4): + ipv4_addresses = list() + for line in line4: + ipv4Split = line.split() + if ipv4Split[1] == "IP4": + ipv4_addresses.append(ipv4Split[2]) + return ipv4_addresses + + def set_ipv6_interfaces(self, line4): + ipv6_addresses = list() + for line in line4: + ipv6Split = line.split() + if ipv6Split[1] == "IP6": + ipv6_addresses.append(ipv6Split[2]) + return ipv6_addresses + + def populate_neighbors(self, lines3): + neighbors = dict() + for line in lines3: + neighborSplit = line.split("|") + innerData = dict() + innerData['Remote Chassis ID'] = neighborSplit[2].strip() + innerData['Remote Port'] = neighborSplit[3].strip() + sysName = neighborSplit[4].strip() + if sysName is not None: + innerData['Remote System Name'] = neighborSplit[4].strip() + else: + innerData['Remote System Name'] = "NA" + neighbors[neighborSplit[0].strip()] = innerData + return neighbors + + def populate_interfaces(self, lines1, lines2): + interfaces = dict() + for line1, line2 in zip(lines1, lines2): + line = line1 + " " + line2 + intfSplit = line.split() + innerData = dict() + innerData['description'] = intfSplit[6].strip() + innerData['macaddress'] = intfSplit[8].strip() + innerData['mtu'] = intfSplit[9].strip() + innerData['speed'] = intfSplit[1].strip() + innerData['duplex'] = intfSplit[2].strip() + innerData['operstatus'] = intfSplit[5].strip() + if("up" not in intfSplit[5].strip()) and ("down" not in intfSplit[5].strip()): + innerData['description'] = intfSplit[7].strip() + innerData['macaddress'] = intfSplit[9].strip() + innerData['mtu'] = intfSplit[10].strip() + innerData['operstatus'] = intfSplit[6].strip() + interfaces[intfSplit[0].strip()] = innerData + return interfaces + + def parse_neighbors(self, neighbors): + parsed = list() + for line in neighbors.split('\n'): + if len(line) == 0: + continue + else: + line = line.strip() + match = re.match(r'^([0-9]+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(INT+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(EXT+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(MGT+)', line) + if match: + key = match.group(1) + parsed.append(line) + return parsed + + def parse_interfaces(self, data): + parsed = list() + for line in data.split('\n'): + if len(line) == 0: + continue + else: + line = line.strip() + match = re.match(r'^([0-9]+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(INT+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(EXT+)', line) + if match: + key = match.group(1) + parsed.append(line) + match = re.match(r'^(MGT+)', line) + if match: + key = match.group(1) + parsed.append(line) + return parsed + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +PERSISTENT_COMMAND_TIMEOUT = 60 + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + argument_spec.update(enos_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + check_args(module, warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/eric_eccli/eric_eccli_command.py b/plugins/modules/network/eric_eccli/eric_eccli_command.py new file mode 100644 index 0000000000..445b9a85f1 --- /dev/null +++ b/plugins/modules/network/eric_eccli/eric_eccli_command.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Ericsson AB. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: eric_eccli_command +author: Ericsson IPOS OAM team (@itercheng) +short_description: Run commands on remote devices running ERICSSON ECCLI +description: + - Sends arbitrary commands to an ERICSSON eccli node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module also support running commands in configuration mode + in raw command style. +options: + commands: + description: + - List of commands to send to the remote ECCLI device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. If a command sent to the + device requires answering a prompt, it is possible to pass + a dict containing I(command), I(answer) and I(prompt). + Common answers are 'y' or "\r" (carriage return, must be + double quotes). See examples. + type: list + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + type: list + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + type: str + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + type: int + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + type: int + default: 1 +notes: + - Tested against IPOS 19.3 + - For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide ` + - For more information on using Ansible to manage Ericsson devices see the Ericsson documents. + - "Starting with Ansible 2.5 we recommend using C(connection: network_cli)." + - For more information please see the L(ERIC_ECCLI Platform Options guide,../network/user_guide/platform_eric_eccli.html). +''' + +EXAMPLES = r""" +tasks: + - name: run show version on remote devices + eric_eccli_command: + commands: show version + + - name: run show version and check to see if output contains IPOS + eric_eccli_command: + commands: show version + wait_for: result[0] contains IPOS + + - name: run multiple commands on remote nodes + eric_eccli_command: + commands: + - show version + - show running-config interfaces + + - name: run multiple commands and evaluate the output + eric_eccli_command: + commands: + - show version + - show running-config interfaces + wait_for: + - result[0] contains IPOS + - result[1] contains management +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import re +import time + +from ansible_collections.community.general.plugins.module_utils.network.eric_eccli.eric_eccli import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import transform_commands +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def parse_commands(module, warnings): + commands = transform_commands(module) + + for item in list(commands): + if module.check_mode: + if item['command'].startswith('conf'): + warnings.append( + 'only non-config commands are supported when using check mode, not ' + 'executing %s' % item['command'] + ) + commands.remove(item) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list() + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/exos/exos_command.py b/plugins/modules/network/exos/exos_command.py new file mode 100644 index 0000000000..3282c512a7 --- /dev/null +++ b/plugins/modules/network/exos/exos_command.py @@ -0,0 +1,220 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: exos_command +author: "Rafael D. Vencioneck (@rdvencioneck)" +short_description: Run commands on remote devices running Extreme EXOS +description: + - Sends arbitrary commands to an Extreme EXOS device and returns the results + read from the device. This module includes an argument that will cause the + module to wait for a specific condition before returning or timing out if + the condition is not met. + - This module does not support running configuration commands. + Please use M(exos_config) to configure EXOS devices. +notes: + - If a command sent to the device requires answering a prompt, it is possible + to pass a dict containing I(command), I(answer) and I(prompt). See examples. +options: + commands: + description: + - List of commands to send to the remote EXOS device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + exos_command: + commands: show version + - name: run show version and check to see if output contains ExtremeXOS + exos_command: + commands: show version + wait_for: result[0] contains ExtremeXOS + - name: run multiple commands on remote nodes + exos_command: + commands: + - show version + - show ports no-refresh + - name: run multiple commands and evaluate the output + exos_command: + commands: + - show version + - show ports no-refresh + wait_for: + - result[0] contains ExtremeXOS + - result[1] contains 20 + - name: run command that requires answering a prompt + exos_command: + commands: + - command: 'clear license-info' + prompt: 'Are you sure.*' + answer: 'Yes' +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import re +import time + +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for item in list(commands): + command_split = re.match(r'^(\w*)(.*)$', item['command']) + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + commands.remove(item) + elif command_split and command_split.group(1) not in ('check', 'clear', 'debug', 'history', + 'ls', 'mrinfo', 'mtrace', 'nslookup', + 'ping', 'rtlookup', 'show', 'traceroute'): + module.fail_json( + msg='some commands were not recognized. exos_command can only run read-only' + 'commands. For configuration commands, please use exos_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not be satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/exos/exos_config.py b/plugins/modules/network/exos/exos_config.py new file mode 100644 index 0000000000..d9ab02fd30 --- /dev/null +++ b/plugins/modules/network/exos/exos_config.py @@ -0,0 +1,436 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Extreme Networks Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: exos_config +author: "Lance Richardson (@hlrichardson)" +short_description: Manage Extreme Networks EXOS configuration sections +description: + - Extreme EXOS configurations use a simple flat text file syntax. + This module provides an implementation for working with EXOS + configuration lines in a deterministic way. +notes: + - Tested against EXOS version 22.6.0b19 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(running_config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + aliases: ['config'] + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config all). + type: bool + default: 'no' + save_when: + description: + - When changes are made to the device running-configuration, the + changes are not copied to non-volatile storage by default. Using + this argument will change that behavior. If the argument is set to + I(always), then the running-config will always be copied to the + startup-config and the I(modified) flag will always be set to + True. If the argument is set to I(modified), then the running-config + will only be copied to the startup-config if it has changed since + the last save to startup-config. If the argument is set to + I(never), the running-config will never be copied to the + startup-config. If the argument is set to I(changed), then the running-config + will only be copied to the startup-config if the task has made a change. + default: never + choices: ['always', 'never', 'modified', 'changed'] + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument + the module can generate diffs against different sources. + - When this option is configure as I(startup), the module will return + the diff of the running-config against the startup-config. + - When this option is configured as I(intended), the module will + return the diff of the running-config against the configuration + provided in the C(intended_config) argument. + - When this option is configured as I(running), the module will + return the before and after diff of the running-config with respect + to any changes made to the device configuration. + default: running + choices: ['running', 'startup', 'intended'] + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + intended_config: + description: + - The C(intended_config) provides the master configuration that + the node should conform to and is used to check the final + running-config against. This argument will not modify any settings + on the remote device and is strictly used to check the compliance + of the current device's configuration against. When specifying this + argument, the task should also modify the C(diff_against) value and + set it to I(intended). + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure SNMP system name + exos_config: + lines: configure snmp sysName "{{ inventory_hostname }}" + +- name: configure interface settings + exos_config: + lines: + - configure ports 2 description-string "Master Uplink" + backup: yes + +- name: check the running-config against master config + exos_config: + diff_against: intended + intended_config: "{{ lookup('file', 'master.cfg') }}" + +- name: check the startup-config against the running-config + exos_config: + diff_against: startup + diff_ignore_lines: + - ntp clock .* + +- name: save running to startup when modified + exos_config: + save_when: modified + +- name: configurable backup path + exos_config: + lines: + - configure ports 2 description-string "Master Uplink" + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['switch-attributes hostname foo', 'router ospf', 'area 0'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['create vlan "foo"', 'configure snmp sysName "x620-red"'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/x870_config.2018-08-08@15:00:21 + +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.exos.exos import run_commands, get_config, load_config, get_diff +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible.module_utils._text import to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list + +__metaclass__ = type + + +def get_running_config(module, current_config=None, flags=None): + contents = module.params['running_config'] + if not contents: + if current_config: + contents = current_config.config_text + else: + contents = get_config(module, flags=flags) + return contents + + +def get_startup_config(module, flags=None): + reply = run_commands(module, {'command': 'show switch', 'output': 'text'}) + match = re.search(r'Config Selected: +(\S+)\.cfg', to_text(reply, errors='surrogate_or_strict').strip(), re.MULTILINE) + if match: + cfgname = match.group(1).strip() + command = ' '.join(['debug cfgmgr show configuration file', cfgname]) + if flags: + command += ' '.join(to_list(flags)).strip() + reply = run_commands(module, {'command': command, 'output': 'text'}) + data = reply[0] + else: + data = '' + return data + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + candidate.add(module.params['lines']) + candidate = dumps(candidate, 'raw') + return candidate + + +def save_config(module, result): + result['changed'] = True + if not module.check_mode: + command = {"command": "save configuration", + "prompt": "Do you want to save configuration", "answer": "y"} + run_commands(module, command) + else: + module.warn('Skipping command `save configuration` ' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + running_config=dict(aliases=['config']), + intended_config=dict(), + + defaults=dict(type='bool', default=False), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + + save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'), + + diff_against=dict(choices=['startup', 'intended', 'running'], default='running'), + diff_ignore_lines=dict(type='list'), + ) + + mutually_exclusive = [('lines', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('diff_against', 'intended', ['intended_config'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + if warnings: + result['warnings'] = warnings + + config = None + flags = ['detail'] if module.params['defaults'] else [] + diff_ignore_lines = module.params['diff_ignore_lines'] + + if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'): + contents = get_config(module, flags=flags) + config = NetworkConfig(indent=1, contents=contents) + if module.params['backup']: + result['__backup__'] = contents + + if any((module.params['lines'], module.params['src'])): + match = module.params['match'] + replace = module.params['replace'] + + candidate = get_candidate(module) + running = get_running_config(module, config) + + try: + response = get_diff(module, candidate=candidate, running=running, diff_match=match, diff_ignore_lines=diff_ignore_lines, diff_replace=replace) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + config_diff = response.get('config_diff') + + if config_diff: + commands = config_diff.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + load_config(module, commands) + + result['changed'] = True + + running_config = None + startup_config = None + + if module.params['save_when'] == 'always': + save_config(module, result) + elif module.params['save_when'] == 'modified': + running = get_running_config(module) + startup = get_startup_config(module) + + running_config = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines) + startup_config = NetworkConfig(indent=1, contents=startup, ignore_lines=diff_ignore_lines) + + if running_config.sha1 != startup_config.sha1: + save_config(module, result) + elif module.params['save_when'] == 'changed' and result['changed']: + save_config(module, result) + + if module._diff: + if not running_config: + contents = get_running_config(module) + else: + contents = running_config.config_text + + # recreate the object in order to process diff_ignore_lines + running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if module.params['diff_against'] == 'running': + if module.check_mode: + module.warn("unable to perform diff against running-config due to check mode") + contents = None + else: + contents = config.config_text + + elif module.params['diff_against'] == 'startup': + if not startup_config: + contents = get_startup_config(module) + else: + contents = startup_config.config_text + + elif module.params['diff_against'] == 'intended': + contents = module.params['intended_config'] + + if contents is not None: + base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if running_config.sha1 != base_config.sha1: + if module.params['diff_against'] == 'intended': + before = running_config + after = base_config + elif module.params['diff_against'] in ('startup', 'running'): + before = base_config + after = running_config + + result.update({ + 'changed': True, + 'diff': {'before': str(before), 'after': str(after)} + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/exos/exos_facts.py b/plugins/modules/network/exos/exos_facts.py new file mode 100644 index 0000000000..b5f79708d8 --- /dev/null +++ b/plugins/modules/network/exos/exos_facts.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: exos_facts +author: + - "Lance Richardson (@hlrichardson)" + - "Ujwal Koamrla (@ujwalkomarla)" +short_description: Collect facts from devices running Extreme EXOS +description: + - Collects a base set of device facts from a remote device that + is running EXOS. This module prepends all of the base network + fact keys with C(ansible_net_). The facts module will + always collect a base set of facts from the device and can + enable or disable collection of additional facts. +notes: + - Tested against EXOS 22.5.1.7 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + type: list + default: ['!config'] + gather_network_resources: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all and the resources like interfaces, vlans etc. + Can specify a list of values to include a larger subset. + Values can also be used with an initial C(M(!)) to specify that + a specific subset should not be collected. + Valid subsets are 'all', 'lldp_global'. + type: list +''' + +EXAMPLES = """ + - name: Gather all legacy facts + exos_facts: + gather_subset: all + + - name: Gather only the config and default facts + exos_facts: + gather_subset: config + + - name: do not gather hardware facts + exos_facts: + gather_subset: "!hardware" + + - name: Gather legacy and resource facts + exos_facts: + gather_subset: all + gather_network_resources: all + + - name: Gather only the lldp global resource facts and no legacy facts + exos_facts: + gather_subset: + - '!all' + - '!min' + gather_network_resource: + - lldp_global + + - name: Gather lldp global resource and minimal legacy facts + exos_facts: + gather_subset: min + gather_network_resource: lldp_global +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +ansible_net_gather_network_resources: + description: The list of fact for network resource subsets collected from the device + returned: when the resource is configured + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# hardware +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All Primary IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.facts.facts import FactsArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts + + +def main(): + """Main entry point for AnsibleModule + """ + argument_spec = FactsArgs.argument_spec + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + warnings = ['default value for `gather_subset` ' + 'will be changed to `min` from `!config` v2.11 onwards'] + + result = Facts(module).get_facts() + + ansible_facts, additional_warnings = result + warnings.extend(additional_warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/exos/exos_l2_interfaces.py b/plugins/modules/network/exos/exos_l2_interfaces.py new file mode 100644 index 0000000000..199e53f38f --- /dev/null +++ b/plugins/modules/network/exos/exos_l2_interfaces.py @@ -0,0 +1,1136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The module file for exos_l2_interfaces +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: exos_l2_interfaces +short_description: Manage L2 interfaces on Extreme Networks EXOS devices. +description: This module provides declarative management of L2 interfaces on Extreme Networks EXOS network devices. +author: Jayalakshmi Viswanathan (@jayalakshmiV) +notes: + - Tested against EXOS 30.2.1.8 + - This module works with connection C(httpapi). + See L(EXOS Platform Options,../network/user_guide/platform_exos.html) +options: + config: + description: A dictionary of L2 interfaces options + type: list + elements: dict + suboptions: + name: + description: + - Name of the interface + type: str + required: True + access: + description: + - Switchport mode access command to configure the interface as a layer 2 access. + type: dict + suboptions: + vlan: + description: + - Configure given VLAN in access port. It's used as the access VLAN ID. + type: int + trunk: + description: + - Switchport mode trunk command to configure the interface as a Layer 2 trunk. + type: dict + suboptions: + native_vlan: + description: + - Native VLAN to be configured in trunk port. It's used as the trunk native VLAN ID. + type: int + trunk_allowed_vlans: + description: + - List of allowed VLANs in a given trunk port. These are the only VLANs that will be configured on the trunk. + type: list + state: + description: + - The state the configuration should be left in + type: str + choices: + - merged + - replaced + - overridden + - deleted + default: merged +''' +EXAMPLES = """ +# Using deleted + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 10 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 1, +# "trunk-vlans": [ +# 10 +# ] +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 10, +# "trunk-vlans": [ +# 20, +# 30 +# ] +# } +# } +# } +# } +# ] +# } +# } + +- name: Delete L2 interface configuration for the given arguments + exos_l2_interfaces: + config: + - name: '3' + state: deleted + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "access": { +# "vlan": 10 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": null, +# "name": "2", +# "trunk": { +# "native_vlan": 1, +# "trunk_allowed_vlans": [ +# 10 +# ] +# } +# }, +# { +# "access": null, +# "name": "3", +# "trunk": { +# "native_vlan": 10, +# "trunk_allowed_vlans": [ +# 20, +# 30 +# ] +# } +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 1, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=3/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# } +# ], +# +# "after": [ +# { +# "access": { +# "vlan": 10 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": null, +# "name": "2", +# "trunk": { +# "native_vlan": 1, +# "trunk_allowed_vlans": [ +# 10 +# ] +# } +# }, +# { +# "access": { +# "vlan": 1 +# }, +# "name": "3", +# "trunk": null +# } +# ] +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 10 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 1, +# "trunk-vlans": [ +# 10 +# ] +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# } +# } +# } +# } +# ] +# } +# } + + +# Using deleted without any config passed +#"(NOTE: This will delete all of configured resource module attributes from each configured interface)" + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 10 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 1, +# "trunk-vlans": [ +# 10 +# ] +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 10, +# "trunk-vlans": [ +# 20, +# 30 +# ] +# } +# } +# } +# } +# ] +# } +# } + +- name: Delete L2 interface configuration for the given arguments + exos_l2_interfaces: + state: deleted + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "access": { +# "vlan": 10 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": null, +# "name": "2", +# "trunk": { +# "native_vlan": 1, +# "trunk_allowed_vlans": [ +# 10 +# ] +# } +# }, +# { +# "access": null, +# "name": "3", +# "trunk": { +# "native_vlan": 10, +# "trunk_allowed_vlans": [ +# 20, +# 30 +# ] +# } +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 1, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# }, +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 1, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=2/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# }, +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 1, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=3/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# } +# ], +# +# "after": [ +# { +# "access": { +# "vlan": 1 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": { +# "vlan": 1 +# }, +# "name": "2", +# "trunk": null +# }, +# { +# "access": { +# "vlan": 1 +# }, +# "name": "3", +# "trunk": null +# } +# ] +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# } +# } +# } +# } +# ] +# } +# } + + +# Using merged + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# }, +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# }, +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# }, +# } +# } +# }, +# ] +# } +# } + +- name: Merge provided configuration with device configuration + exos_l2_interfaces: + config: + - access: + vlan: 10 + name: '1' + - name: '2' + trunk: + trunk_allowed_vlans: 10 + - name: '3' + trunk: + native_vlan: 10 + trunk_allowed_vlans: 20 + state: merged + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "access": { +# "vlan": 1 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": { +# "vlan": 1 +# }, +# "name": "2", +# "trunk": null +# }, +# { +# "access": { +# "vlan": 1 +# }, +# "name": "3", +# "trunk": null +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 10, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# }, +# { +# "data": { +# "openconfig-vlan:config": { +# "trunk-vlans": [10], +# "interface-mode": "TRUNK" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=2/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# }, +# { +# "data": { +# "openconfig-vlan:config": { +# "native-vlan": 10, +# "trunk-vlans": [20], +# "interface-mode": "TRUNK" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=3/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# } +# ], +# +# "after": [ +# { +# "access": { +# "vlan": 10 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": null, +# "name": "2", +# "trunk": { +# "native_vlan": 1, +# "trunk_allowed_vlans": [ +# 10 +# ] +# } +# }, +# { +# "access": null, +# "name": "3", +# "trunk": { +# "native_vlan": 10, +# "trunk_allowed_vlans": [ +# 20 +# ] +# } +# } +# ] +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 10 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 1, +# "trunk-vlans": [ +# 10 +# ] +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 10, +# "trunk-vlans": [ +# 20 +# ] +# } +# } +# } +# }, +# ] +# } +# } + + +# Using overridden + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 10 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 1, +# "trunk-vlans": [ +# 10 +# ] +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 10, +# "trunk-vlans": [ +# 20, +# 30 +# ] +# } +# } +# } +# } +# ] +# } +# } + +- name: Overrride device configuration of all L2 interfaces with provided configuration + exos_l2_interfaces: + config: + - access: + vlan: 10 + name: '2' + state: overridden + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "access": { +# "vlan": 10 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": null, +# "name": "2", +# "trunk": { +# "native_vlan": 1, +# "trunk_allowed_vlans": [ +# 10 +# ] +# } +# }, +# { +# "access": null, +# "name": "3", +# "trunk": { +# "native_vlan": 10, +# "trunk_allowed_vlans": [ +# 20, +# 30 +# ] +# } +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 1, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# }, +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 10, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=2/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# } +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 1, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=3/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# } +# ], +# +# "after": [ +# { +# "access": { +# "vlan": 1 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": { +# "vlan": 10 +# }, +# "name": "2", +# "trunk": null +# }, +# { +# "access": { +# "vlan": 1 +# }, +# "name": "3", +# "trunk": null +# } +# ] +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 10 +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 1 +# } +# } +# } +# } +# ] +# } +# } + + +# Using replaced + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 10 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 20 +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 1, +# "trunk-vlans": [ +# 10 +# ] +# } +# } +# } +# } +# ] +# } +# } + +- name: Replace device configuration of listed L2 interfaces with provided configuration + exos_l2_interfaces: + config: + - access: + vlan: 20 + name: '1' + - name: '2' + trunk: + trunk_allowed_vlans: 10 + - name: '3' + trunk: + native_vlan: 10 + trunk_allowed_vlan: 20,30 + state: replaced + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "access": { +# "vlan": 10 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": { +# "vlan": 20 +# }, +# "name": "2", +# "trunk": null +# }, +# { +# "access": null, +# "name": "3", +# "trunk": { +# "native_vlan": 1, +# "trunk_allowed_vlans": [ +# 10 +# ] +# } +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:config": { +# "access-vlan": 20, +# "interface-mode": "ACCESS" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# }, +# { +# "data": { +# "openconfig-vlan:config": { +# "trunk-vlans": [10], +# "interface-mode": "TRUNK" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=2/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# }, +# { +# "data": { +# "openconfig-vlan:config": { +# "native-vlan": 10, +# "trunk-vlans": [20, 30] +# "interface-mode": "TRUNK" +# } +# } +# "method": "PATCH", +# "path": "rest/restconf/data/openconfig-interfaces:interfaces/interface=3/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config" +# } +# ], +# +# "after": [ +# { +# "access": { +# "vlan": 20 +# }, +# "name": "1", +# "trunk": null +# }, +# { +# "access": null, +# "name": "2", +# "trunk": { +# "native_vlan": null, +# "trunk_allowed_vlans": [ +# 10 +# ] +# } +# }, +# { +# "access": null, +# "name": "3", +# "trunk": { +# "native_vlan": 10, +# "trunk_allowed_vlans": [ +# 20, +# 30 +# ] +# } +# } +# ] +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-interfaces:interfaces/ +# method: GET +# data: +# { +# "openconfig-interfaces:interfaces": { +# "interface": [ +# { +# "name": "1", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "ACCESS", +# "access-vlan": 20 +# } +# } +# } +# }, +# { +# "name": "2", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "trunk-vlans": [ +# 10 +# ] +# } +# } +# } +# }, +# { +# "name": "3", +# "openconfig-if-ethernet:ethernet": { +# "openconfig-vlan:switched-vlan": { +# "config": { +# "interface-mode": "TRUNK", +# "native-vlan": 10, +# "trunk-vlans": [ +# 20, +# 30 +# ] +# } +# } +# } +# } +# ] +# } +# } + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +after: + description: The resulting configuration model invocation. + returned: when changed + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +requests: + description: The set of requests pushed to the remote device. + returned: always + type: list + sample: [{"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.l2_interfaces.l2_interfaces import L2_interfacesArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.config.l2_interfaces.l2_interfaces import L2_interfaces + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + required_if = [('state', 'merged', ('config', )), + ('state', 'replaced', ('config', ))] + module = AnsibleModule(argument_spec=L2_interfacesArgs.argument_spec, required_if=required_if, + supports_check_mode=True) + + result = L2_interfaces(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/exos/exos_lldp_global.py b/plugins/modules/network/exos/exos_lldp_global.py new file mode 100644 index 0000000000..8ed421aa2b --- /dev/null +++ b/plugins/modules/network/exos/exos_lldp_global.py @@ -0,0 +1,429 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for exos_lldp_global +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: exos_lldp_global +short_description: Configure and manage Link Layer Discovery Protocol(LLDP) attributes on EXOS platforms. +description: This module configures and manages the Link Layer Discovery Protocol(LLDP) attributes on Extreme Networks EXOS platforms. +author: Ujwal Komarla (@ujwalkomarla) +notes: +- Tested against Extreme Networks EXOS version 30.2.1.8 on x460g2. +- This module works with connection C(httpapi). + See L(EXOS Platform Options,../network/user_guide/platform_exos.html) +options: + config: + description: A dictionary of LLDP options + type: dict + suboptions: + interval: + description: + - Frequency at which LLDP advertisements are sent (in seconds). By default - 30 seconds. + type: int + default: 30 + tlv_select: + description: + - This attribute can be used to specify the TLVs that need to be sent in the LLDP packets. By default, only system name and system description is sent + type: dict + suboptions: + management_address: + description: + - Used to specify the management address in TLV messages + type: bool + port_description: + description: + - Used to specify the port description TLV + type: bool + system_capabilities: + description: + - Used to specify the system capabilities TLV + type: bool + system_description: + description: + - Used to specify the system description TLV + type: bool + default: true + system_name: + description: + - Used to specify the system name TLV + type: bool + default: true + + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - replaced + - deleted + default: merged +''' +EXAMPLES = """ +# Using merged + + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig_lldp:lldp/config +# method: GET +# data: +# { +# "openconfig_lldp:config": { +# "enabled": true, +# "hello-timer": 30, +# "suppress-tlv-advertisement": [ +# "PORT_DESCRIPTION", +# "SYSTEM_CAPABILITIES", +# "MANAGEMENT_ADDRESS" +# ], +# "system-description": "ExtremeXOS (X460G2-24t-10G4) version 30.2.1.8" +# "system-name": "X460G2-24t-10G4" +# } +# } + +- name: Merge provided LLDP configuration with device configuration + exos_lldp_global: + config: + interval: 10000 + tlv_select: + system_capabilities: true + state: merged + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "interval": 30, +# "tlv_select": { +# "system_name": true, +# "system_description": true +# "port_description": false, +# "management_address": false, +# "system_capabilities": false +# } +# } +# ] +# +# "requests": [ +# { +# "data": { +# "openconfig_lldp:config": { +# "hello-timer": 10000, +# "suppress-tlv-advertisement": [ +# "PORT_DESCRIPTION", +# "MANAGEMENT_ADDRESS" +# ] +# } +# }, +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig_lldp:lldp/config" +# } +# ] +# +# "after": [ +# { +# "interval": 10000, +# "tlv_select": { +# "system_name": true, +# "system_description": true, +# "port_description": false, +# "management_address": false, +# "system_capabilities": true +# } +# } +# ] + + +# After state: +# ------------- +# path: /rest/restconf/data/openconfig_lldp:lldp/config +# method: GET +# data: +# { +# "openconfig_lldp:config": { +# "enabled": true, +# "hello-timer": 10000, +# "suppress-tlv-advertisement": [ +# "PORT_DESCRIPTION", +# "MANAGEMENT_ADDRESS" +# ], +# "system-description": "ExtremeXOS (X460G2-24t-10G4) version 30.2.1.8" +# "system-name": "X460G2-24t-10G4" +# } +# } + + +# Using replaced + + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig_lldp:lldp/config +# method: GET +# data: +# { +# "openconfig_lldp:config": { +# "enabled": true, +# "hello-timer": 30, +# "suppress-tlv-advertisement": [ +# "PORT_DESCRIPTION", +# "SYSTEM_CAPABILITIES", +# "MANAGEMENT_ADDRESS" +# ], +# "system-description": "ExtremeXOS (X460G2-24t-10G4) version 30.2.1.8" +# "system-name": "X460G2-24t-10G4" +# } +# } + +- name: Replace device configuration with provided LLDP configuration + exos_lldp_global: + config: + interval: 10000 + tlv_select: + system_capabilities: true + state: replaced + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "interval": 30, +# "tlv_select": { +# "system_name": true, +# "system_description": true +# "port_description": false, +# "management_address": false, +# "system_capabilities": false +# } +# } +# ] +# +# "requests": [ +# { +# "data": { +# "openconfig_lldp:config": { +# "hello-timer": 10000, +# "suppress-tlv-advertisement": [ +# "SYSTEM_NAME", +# "SYSTEM_DESCRIPTION", +# "PORT_DESCRIPTION", +# "MANAGEMENT_ADDRESS" +# ] +# } +# }, +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig_lldp:lldp/config" +# } +# ] +# +# "after": [ +# { +# "interval": 10000, +# "tlv_select": { +# "system_name": false, +# "system_description": false, +# "port_description": false, +# "management_address": false, +# "system_capabilities": true +# } +# } +# ] + + +# After state: +# ------------- +# path: /rest/restconf/data/openconfig_lldp:lldp/config +# method: GET +# data: +# { +# "openconfig_lldp:config": { +# "enabled": true, +# "hello-timer": 10000, +# "suppress-tlv-advertisement": [ +# "SYSTEM_NAME", +# "SYSTEM_DESCRIPTION", +# "PORT_DESCRIPTION", +# "MANAGEMENT_ADDRESS" +# ], +# "system-description": "ExtremeXOS (X460G2-24t-10G4) version 30.2.1.8" +# "system-name": "X460G2-24t-10G4" +# } +# } + + +# Using deleted + + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig_lldp:lldp/config +# method: GET +# data: +# { +# "openconfig_lldp:config": { +# "enabled": true, +# "hello-timer": 10000, +# "suppress-tlv-advertisement": [ +# "SYSTEM_CAPABILITIES", +# "MANAGEMENT_ADDRESS" +# ], +# "system-description": "ExtremeXOS (X460G2-24t-10G4) version 30.2.1.8" +# "system-name": "X460G2-24t-10G4" +# } +# } + +- name: Delete attributes of given LLDP service (This won't delete the LLDP service itself) + exos_lldp_global: + config: + state: deleted + +# Module Execution Results: +# ------------------------- +# +# "before": [ +# { +# "interval": 10000, +# "tlv_select": { +# "system_name": true, +# "system_description": true, +# "port_description": true, +# "management_address": false, +# "system_capabilities": false +# } +# } +# ] +# +# "requests": [ +# { +# "data": { +# "openconfig_lldp:config": { +# "hello-timer": 30, +# "suppress-tlv-advertisement": [ +# "SYSTEM_CAPABILITIES", +# "PORT_DESCRIPTION", +# "MANAGEMENT_ADDRESS" +# ] +# } +# }, +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig_lldp:lldp/config" +# } +# ] +# +# "after": [ +# { +# "interval": 30, +# "tlv_select": { +# "system_name": true, +# "system_description": true, +# "port_description": false, +# "management_address": false, +# "system_capabilities": false +# } +# } +# ] + + +# After state: +# ------------- +# path: /rest/restconf/data/openconfig_lldp:lldp/config +# method: GET +# data: +# { +# "openconfig_lldp:config": { +# "enabled": true, +# "hello-timer": 30, +# "suppress-tlv-advertisement": [ +# "SYSTEM_CAPABILITIES", +# "PORT_DESCRIPTION", +# "MANAGEMENT_ADDRESS" +# ], +# "system-description": "ExtremeXOS (X460G2-24t-10G4) version 30.2.1.8" +# "system-name": "X460G2-24t-10G4" +# } +# } + + +""" +RETURN = """ +before: + description: The configuration as structured data prior to module invocation. + returned: always + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +after: + description: The configuration as structured data after module completion. + returned: when changed + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +requests: + description: The set of requests pushed to the remote device. + returned: always + type: list + sample: [{"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.lldp_global.lldp_global import Lldp_globalArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.config.lldp_global.lldp_global import Lldp_global + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + required_if = [('state', 'merged', ('config',)), + ('state', 'replaced', ('config',))] + module = AnsibleModule(argument_spec=Lldp_globalArgs.argument_spec, required_if=required_if, + supports_check_mode=True) + + result = Lldp_global(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/exos/exos_lldp_interfaces.py b/plugins/modules/network/exos/exos_lldp_interfaces.py new file mode 100644 index 0000000000..744f76c24b --- /dev/null +++ b/plugins/modules/network/exos/exos_lldp_interfaces.py @@ -0,0 +1,679 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The module file for exos_lldp_interfaces +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: exos_lldp_interfaces +short_description: Manage link layer discovery protocol (LLDP) attributes of interfaces on EXOS platforms. +description: + - This module manages link layer discovery protocol (LLDP) attributes of interfaces on Extreme Networks EXOS platforms. +author: Jayalakshmi Viswanathan (@JayalakshmiV) +options: + config: + description: The list of link layer discovery protocol interface attribute configurations + type: list + elements: dict + suboptions: + name: + description: + - Name of the interface LLDP needs to be configured on. + type: str + required: True + enabled: + description: + - This is a boolean value to control disabling of LLDP on the interface C(name) + type: bool + state: + description: + - The state the configuration should be left in. + type: str + choices: + - merged + - replaced + - overridden + - deleted + default: merged +''' +EXAMPLES = """ +# Using merged + +# Before state: +# ------------- +# +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": true, +# "name": "1" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "2" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "3" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "4" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "5" +# } +# } +# ] +# } +# } + +- name: Merge provided configuration with device configuration + exos_lldp_interfaces: + config: + - name: '2' + enabled: false + - name: '5' + enabled: true + state: merged + +# Module Execution Results: +# ------------------------- +# +# "before": +# - name: '1' +# enabled: True +# - name: '2' +# enabled: True +# - name: '3' +# enabled: False +# - name: '4' +# enabled: True +# - name: '5' +# enabled: False +# +# "requests": [ +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": false, +# "name": "2" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=2/config" +# }, +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": true, +# "name": "5" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=5/config" +# } +# ] +# +# "after": +# - name: '1' +# enabled: True +# - name: '2' +# enabled: False +# - name: '3' +# enabled: False +# - name: '4' +# enabled: True +# - name: '5' +# enabled: True + +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": true, +# "name": "1" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "2" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "3" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "4" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "5" +# } +# } +# ] +# } +# } + + +# Using replaced + +# Before state: +# ------------- +# +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": true, +# "name": "1" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "2" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "3" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "4" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "5" +# } +# } +# ] +# } +# } + +- name: Replaces device configuration of listed lldp_interfaces with provided configuration + exos_lldp_interfaces: + config: + - name: '1' + enabled: false + - name: '3' + enabled: true + state: merged + +# Module Execution Results: +# ------------------------- +# +# "before": +# - name: '1' +# enabled: True +# - name: '2' +# enabled: True +# - name: '3' +# enabled: False +# - name: '4' +# enabled: True +# - name: '5' +# enabled: False +# +# "requests": [ +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": false, +# "name": "1" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=1/config" +# }, +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": true, +# "name": "3" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=3/config" +# } +# ] +# +# "after": +# - name: '1' +# enabled: False +# - name: '2' +# enabled: True +# - name: '3' +# enabled: True +# - name: '4' +# enabled: True +# - name: '5' +# enabled: False + +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": false, +# "name": "1" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "2" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "3" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "4" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "5" +# } +# } +# ] +# } +# } + + +# Using deleted + +# Before state: +# ------------- +# +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": false, +# "name": "1" +# }, +# }, +# { +# "config": { +# "enabled": false, +# "name": "2" +# }, +# }, +# { +# "config": { +# "enabled": false, +# "name": "3" +# }, +# } +# ] +# } +# } + +- name: Delete lldp interface configuration (this will not delete other lldp configuration) + exos_lldp_interfaces: + config: + - name: '1' + - name: '3' + state: deleted + +# Module Execution Results: +# ------------------------- +# +# "before": +# - name: '1' +# enabled: False +# - name: '2' +# enabled: False +# - name: '3' +# enabled: False +# +# "requests": [ +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": true, +# "name": "1" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=1/config" +# }, +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": true, +# "name": "3" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=3/config" +# } +# ] +# +# "after": +# - name: '1' +# enabled: True +# - name: '2' +# enabled: False +# - name: '3' +# enabled: True +# +# After state: +# ------------- +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": true, +# "name": "1" +# }, +# }, +# { +# "config": { +# "enabled": false, +# "name": "2" +# }, +# }, +# { +# "config": { +# "enabled": true, +# "name": "3" +# }, +# } +# ] +# } +# } + + +# Using overridden + +# Before state: +# ------------- +# +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": true, +# "name": "1" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "2" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "3" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "4" +# } +# }, +# { +# "config": { +# "enabled": false, +# "name": "5" +# } +# } +# ] +# } +# } + +- name: Override device configuration of all lldp_interfaces with provided configuration + exos_lldp_interfaces: + config: + - name: '3' + enabled: true + state: overridden + +# Module Execution Results: +# ------------------------- +# +# "before": +# - name: '1' +# enabled: True +# - name: '2' +# enabled: True +# - name: '3' +# enabled: False +# - name: '4' +# enabled: True +# - name: '5' +# enabled: False +# +# "requests": [ +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": true, +# "name": "5" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=5/config" +# }, +# { +# "data": | +# { +# "openconfig-lldp:config": { +# "enabled": true, +# "name": "3" +# } +# } +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface=3/config" +# } +# ] +# +# "after": +# - name: '1' +# enabled: True +# - name: '2' +# enabled: True +# - name: '3' +# enabled: True +# - name: '4' +# enabled: True +# - name: '5' +# enabled: True + +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4 +# method: GET +# data: +# { +# "openconfig-lldp:interfaces": { +# "interface": [ +# { +# "config": { +# "enabled": true, +# "name": "1" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "2" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "3" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "4" +# } +# }, +# { +# "config": { +# "enabled": true, +# "name": "5" +# } +# } +# ] +# } +# } + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +after: + description: The resulting configuration model invocation. + returned: when changed + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +requests: + description: The set of requests pushed to the remote device. + returned: always + type: list + sample: [{"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.lldp_interfaces.lldp_interfaces import Lldp_interfacesArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.config.lldp_interfaces.lldp_interfaces import Lldp_interfaces + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + required_if = [('state', 'merged', ('config', )), + ('state', 'replaced', ('config', ))] + module = AnsibleModule(argument_spec=Lldp_interfacesArgs.argument_spec, + required_if=required_if, + supports_check_mode=True) + + result = Lldp_interfaces(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/exos/exos_vlans.py b/plugins/modules/network/exos/exos_vlans.py new file mode 100644 index 0000000000..d828c7cd70 --- /dev/null +++ b/plugins/modules/network/exos/exos_vlans.py @@ -0,0 +1,758 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for exos_vlans +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: exos_vlans +short_description: Manage VLANs on Extreme Networks EXOS devices. +description: This module provides declarative management of VLANs on Extreme Networks EXOS network devices. +author: Jayalakshmi Viswanathan (@jayalakshmiV) +notes: + - Tested against EXOS 30.2.1.8 + - This module works with connection C(httpapi). + See L(EXOS Platform Options,../network/user_guide/platform_exos.html) +options: + config: + description: A dictionary of VLANs options + type: list + elements: dict + suboptions: + name: + description: + - Ascii name of the VLAN. + type: str + vlan_id: + description: + - ID of the VLAN. Range 1-4094 + type: int + required: True + state: + description: + - Operational state of the VLAN + type: str + choices: + - active + - suspend + default: active + state: + description: + - The state the configuration should be left in + type: str + choices: + - merged + - replaced + - overridden + - deleted + default: merged +''' +EXAMPLES = """ +# Using deleted + +# Before state: +# ------------- +# +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# }, +# { +# "config": { +# "name": "vlan_10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# }, +# }, +# { +# "config": { +# "name": "vlan_20", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 20 +# }, +# }, +# { +# "config": { +# "name": "vlan_30", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 30 +# }, +# } +# ] +# } +# } + +- name: Delete attributes of given VLANs + exos_vlans: + config: + - vlan_id: 10 + - vlan_id: 20 + - vlan_id: 30 + state: deleted + +# Module Execution Results: +# ------------------------- +# +# "after": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# } +# ], +# +# "before": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# }, +# { +# "name": "vlan_10", +# "state": "active", +# "vlan_id": 10 +# }, +# { +# "name": "vlan_20", +# "state": "active", +# "vlan_id": 20 +# } +# { +# "name": "vlan_30", +# "state": "active", +# "vlan_id": 30 +# } +# ], +# +# "requests": [ +# { +# "data": null, +# "method": "DELETE", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=10" +# }, +# { +# "data": null, +# "method": "DELETE", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=20" +# }, +# { +# "data": null, +# "method": "DELETE", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=30" +# } +# ] +# +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# } +# ] +# } +# } + + +# Using merged + +# Before state: +# ------------- +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# } +# ] +# } +# } + +- name: Merge provided configuration with device configuration + exos_vlans: + config: + - name: vlan_10 + vlan_id: 10 + state: active + - name: vlan_20 + vlan_id: 20 + state: active + - name: vlan_30 + vlan_id: 30 + state: active + state: merged + +# Module Execution Results: +# ------------------------- +# +# "after": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# }, +# { +# "name": "vlan_10", +# "state": "active", +# "vlan_id": 10 +# }, +# { +# "name": "vlan_20", +# "state": "active", +# "vlan_id": 20 +# }, +# { +# "name": "vlan_30", +# "state": "active", +# "vlan_id": 30 +# } +# ], +# +# "before": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:vlan": [ +# { +# "config": { +# "name": "vlan_10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# } +# } +# ] +# }, +# "method": "POST", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/" +# }, +# { +# "data": { +# "openconfig-vlan:vlan": [ +# { +# "config": { +# "name": "vlan_20", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 20 +# } +# } +# ] +# }, +# "method": "POST", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/" +# }, +# "data": { +# "openconfig-vlan:vlan": [ +# { +# "config": { +# "name": "vlan_30", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 30 +# } +# } +# ] +# }, +# "method": "POST", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/" +# } +# ] +# +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# }, +# { +# "config": { +# "name": "vlan_10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# }, +# }, +# { +# "config": { +# "name": "vlan_20", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 20 +# }, +# }, +# { +# "config": { +# "name": "vlan_30", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 30 +# }, +# } +# ] +# } +# } + + +# Using overridden + +# Before state: +# ------------- +# +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# }, +# { +# "config": { +# "name": "vlan_10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# }, +# }, +# { +# "config": { +# "name": "vlan_20", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 20 +# }, +# }, +# { +# "config": { +# "name": "vlan_30", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 30 +# }, +# } +# ] +# } +# } + +- name: Override device configuration of all VLANs with provided configuration + exos_vlans: + config: + - name: TEST_VLAN10 + vlan_id: 10 + state: overridden + +# Module Execution Results: +# ------------------------- +# +# "after": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# }, +# { +# "name": "TEST_VLAN10", +# "state": "active", +# "vlan_id": 10 +# }, +# ], +# +# "before": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# }, +# { +# "name": "vlan_10", +# "state": "active", +# "vlan_id": 10 +# }, +# { +# "name": "vlan_20", +# "state": "active", +# "vlan_id": 20 +# }, +# { +# "name": "vlan_30", +# "state": "active", +# "vlan_id": 30 +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:vlan": { +# "vlan": [ +# { +# "config": { +# "name": "TEST_VLAN10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# } +# } +# ] +# } +# } +# }, +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/" +# }, +# { +# "data": null, +# "method": "DELETE", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=20" +# }, +# { +# "data": null, +# "method": "DELETE", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=30" +# } +# ] +# +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# }, +# { +# "config": { +# "name": "TEST_VLAN10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# }, +# } +# ] +# } +# } + + +# Using replaced + +# Before state: +# ------------- +# +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# }, +# { +# "config": { +# "name": "vlan_10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# }, +# }, +# { +# "config": { +# "name": "vlan_20", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 20 +# }, +# }, +# { +# "config": { +# "name": "vlan_30", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 30 +# }, +# } +# ] +# } +# } + +- name: Replaces device configuration of listed VLANs with provided configuration + exos_vlans: + config: + - name: Test_VLAN20 + vlan_id: 20 + - name: Test_VLAN30 + vlan_id: 30 + state: replaced + +# Module Execution Results: +# ------------------------- +# +# "after": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# }, +# { +# "name": "vlan_10", +# "state": "active", +# "vlan_id": 10 +# }, +# { +# "name": "TEST_VLAN20", +# "state": "active", +# "vlan_id": 20 +# }, +# { +# "name": "TEST_VLAN30", +# "state": "active", +# "vlan_id": 30 +# } +# ], +# +# "before": [ +# { +# "name": "Default", +# "state": "active", +# "vlan_id": 1 +# }, +# { +# "name": "vlan_10", +# "state": "active", +# "vlan_id": 10 +# }, +# { +# "name": "vlan_20", +# "state": "active", +# "vlan_id": 20 +# }, +# { +# "name": "vlan_30", +# "state": "active", +# "vlan_id": 30 +# } +# ], +# +# "requests": [ +# { +# "data": { +# "openconfig-vlan:vlan": { +# "vlan": [ +# { +# "config": { +# "name": "TEST_VLAN20", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 20 +# } +# "config": { +# "name": "TEST_VLAN30", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 30 +# } +# } +# ] +# }, +# "method": "PATCH", +# "path": "/rest/restconf/data/openconfig-vlan:vlans/" +# } +# ] +# +# After state: +# ------------- +# +# path: /rest/restconf/data/openconfig-vlan:vlans/ +# method: GET +# data: +# { +# "openconfig-vlan:vlans": { +# "vlan": [ +# { +# "config": { +# "name": "Default", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 1 +# }, +# }, +# { +# "config": { +# "name": "vlan_10", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 10 +# }, +# }, +# { +# "config": { +# "name": "TEST_VLAN20", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 20 +# }, +# }, +# { +# "config": { +# "name": "TEST_VLAN30", +# "status": "ACTIVE", +# "tpid": "oc-vlan-types:TPID_0x8100", +# "vlan-id": 30 +# }, +# } +# ] +# } +# } + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +after: + description: The resulting configuration model invocation. + returned: when changed + sample: > + The configuration returned will always be in the same format + of the parameters above. + type: list +requests: + description: The set of requests pushed to the remote device. + returned: always + type: list + sample: [{"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.vlans.vlans import VlansArgs +from ansible_collections.community.general.plugins.module_utils.network.exos.config.vlans.vlans import Vlans + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + required_if = [('state', 'merged', ('config',)), + ('state', 'replaced', ('config',))] + module = AnsibleModule(argument_spec=VlansArgs.argument_spec, required_if=required_if, + supports_check_mode=True) + + result = Vlans(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_asm_policy.py b/plugins/modules/network/f5/bigip_asm_policy.py new file mode 100644 index 0000000000..e15bf813a5 --- /dev/null +++ b/plugins/modules/network/f5/bigip_asm_policy.py @@ -0,0 +1,1062 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_asm_policy +short_description: Manage BIG-IP ASM policies +description: + - Manage BIG-IP ASM policies. +deprecated: + removed_in: '2.12' + alternative: bigip_asm_policy_manage + why: > + The bigip_asm_policy module has been split into three new modules to handle import, export and general policy + management. This will allow scalability of the asm policy management as well as ease of maintenance. + Additionally to further reduce the burden of having multiple smaller module F5 has created asm_policy + role in Ansible Galaxy for a more declarative way of ASM policy management. +options: + active: + description: + - If C(yes) will apply and activate existing inactive policy. If C(no), it will + deactivate existing active policy. Generally should be C(yes) only in cases where + you want to activate new or existing policy. + default: no + type: bool + name: + description: + - The ASM policy to manage or create. + required: True + state: + description: + - When C(state) is C(present), and C(file) or C(template) parameter is provided, + new ASM policy is imported and created with the given C(name). + - When C(state) is present and no C(file) or C(template) parameter is provided + new blank ASM policy is created with the given C(name). + - When C(state) is C(absent), ensures that the policy is removed, even if it is + currently active. + choices: + - present + - absent + default: present + file: + description: + - Full path to a policy file to be imported into the BIG-IP ASM. + - Policy files exported from newer versions of BIG-IP cannot be imported into older + versions of BIG-IP. The opposite, however, is true; you can import older into + newer. + template: + description: + - An ASM policy built-in template. If the template does not exist we will raise an error. + - Once the policy has been created, this value cannot change. + - The C(Comprehensive), C(Drupal), C(Fundamental), C(Joomla), + C(Vulnerability Assessment Baseline), and C(Wordpress) templates are only available + on BIG-IP versions >= 13. + choices: + - ActiveSync v1.0 v2.0 (http) + - ActiveSync v1.0 v2.0 (https) + - Comprehensive + - Drupal + - Fundamental + - Joomla + - LotusDomino 6.5 (http) + - LotusDomino 6.5 (https) + - OWA Exchange 2003 (http) + - OWA Exchange 2003 (https) + - OWA Exchange 2003 with ActiveSync (http) + - OWA Exchange 2003 with ActiveSync (https) + - OWA Exchange 2007 (http) + - OWA Exchange 2007 (https) + - OWA Exchange 2007 with ActiveSync (http) + - OWA Exchange 2007 with ActiveSync (https) + - OWA Exchange 2010 (http) + - OWA Exchange 2010 (https) + - Oracle 10g Portal (http) + - Oracle 10g Portal (https) + - Oracle Applications 11i (http) + - Oracle Applications 11i (https) + - PeopleSoft Portal 9 (http) + - PeopleSoft Portal 9 (https) + - Rapid Deployment Policy + - SAP NetWeaver 7 (http) + - SAP NetWeaver 7 (https) + - SharePoint 2003 (http) + - SharePoint 2003 (https) + - SharePoint 2007 (http) + - SharePoint 2007 (https) + - SharePoint 2010 (http) + - SharePoint 2010 (https) + - Vulnerability Assessment Baseline + - Wordpress + partition: + description: + - Device partition to manage resources on. + default: Common +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +author: + - Wojciech Wypior (@wojtek0806) + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = r''' +- name: Import and activate ASM policy + bigip_asm_policy: + name: new_asm_policy + file: /root/asm_policy.xml + active: yes + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Import ASM policy from template + bigip_asm_policy: + name: new_sharepoint_policy + template: SharePoint 2007 (http) + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Create blank ASM policy + bigip_asm_policy: + name: new_blank_policy + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Create blank ASM policy and activate + bigip_asm_policy: + name: new_blank_policy + active: yes + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Activate ASM policy + bigip_asm_policy: + name: inactive_policy + active: yes + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Deactivate ASM policy + bigip_asm_policy: + name: active_policy + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Import and activate ASM policy in Role + bigip_asm_policy: + name: new_asm_policy + file: "{{ role_path }}/files/asm_policy.xml" + active: yes + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Import ASM binary policy + bigip_asm_policy: + name: new_asm_policy + file: "/root/asm_policy.plc" + active: yes + state: present + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost +''' + +RETURN = r''' +active: + description: Set when activating/deactivating ASM policy + returned: changed + type: bool + sample: yes +state: + description: Action performed on the target device. + returned: changed + type: str + sample: absent +file: + description: Local path to ASM policy file. + returned: changed + type: str + sample: /root/some_policy.xml +template: + description: Name of the built-in ASM policy template + returned: changed + type: str + sample: OWA Exchange 2007 (https) +name: + description: Name of the ASM policy to be managed/created + returned: changed + type: str + sample: Asm_APP1_Transparent +''' + +import os +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback +from distutils.version import LooseVersion + + +try: + from library.module_utils.network.f5.bigip import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import fq_name + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.icontrol import upload_file + from library.module_utils.network.f5.icontrol import tmos_version + from library.module_utils.network.f5.icontrol import module_provisioned +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.bigip import F5RestClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import fq_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import upload_file + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import tmos_version + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import module_provisioned + + +class Parameters(AnsibleF5Parameters): + updatables = [ + 'active', + ] + + returnables = [ + 'name', + 'template', + 'file', + 'active', + ] + + api_attributes = [ + 'name', + 'file', + 'active', + ] + api_map = { + 'filename': 'file', + } + + @property + def template_link(self): + if self._values['template_link'] is not None: + return self._values['template_link'] + collection = self._templates_from_device() + for resource in collection['items']: + if resource['name'] == self.template.upper(): + return dict(link=resource['selfLink']) + return None + + @property + def full_path(self): + return fq_name(self.name) + + def _templates_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/asm/policy-templates/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return response + + def to_return(self): + result = {} + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + return result + + +class V1Parameters(Parameters): + @property + def template(self): + if self._values['template'] is None: + return None + template_map = { + 'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP', + 'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS', + 'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP', + 'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS', + 'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP', + 'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS', + 'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP', + 'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS', + 'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP', + 'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS', + 'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP', + 'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS', + 'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP', + 'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS', + 'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP', + 'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS', + 'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP', + 'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS', + 'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP', + 'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS', + 'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT', + 'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP', + 'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS', + 'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP', + 'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS', + 'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP', + 'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS', + 'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP', + 'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS' + } + if self._values['template'] in template_map: + return template_map[self._values['template']] + else: + raise F5ModuleError( + "The specified template is not valid for this version of BIG-IP." + ) + + +class V2Parameters(Parameters): + @property + def template(self): + if self._values['template'] is None: + return None + template_map = { + 'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP', + 'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS', + 'Comprehensive': 'POLICY_TEMPLATE_COMPREHENSIVE', # v13 + 'Drupal': 'POLICY_TEMPLATE_DRUPAL', # v13 + 'Fundamental': 'POLICY_TEMPLATE_FUNDAMENTAL', # v13 + 'Joomla': 'POLICY_TEMPLATE_JOOMLA', # v13 + 'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP', + 'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS', + 'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP', + 'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS', + 'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP', + 'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS', + 'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP', + 'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS', + 'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP', + 'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS', + 'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP', + 'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS', + 'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP', + 'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS', + 'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP', + 'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS', + 'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP', + 'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS', + 'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT', + 'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP', + 'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS', + 'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP', + 'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS', + 'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP', + 'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS', + 'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP', + 'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS', + 'Vulnerability Assessment Baseline': 'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT', # v13 + 'Wordpress': 'POLICY_TEMPLATE_WORDPRESS' # v13 + } + return template_map[self._values['template']] + + +class Changes(Parameters): + @property + def template(self): + if self._values['template'] is None: + return None + template_map = { + 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP': 'ActiveSync v1.0 v2.0 (http)', + 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS': 'ActiveSync v1.0 v2.0 (https)', + 'POLICY_TEMPLATE_COMPREHENSIVE': 'Comprehensive', + 'POLICY_TEMPLATE_DRUPAL': 'Drupal', + 'POLICY_TEMPLATE_FUNDAMENTAL': 'Fundamental', + 'POLICY_TEMPLATE_JOOMLA': 'Joomla', + 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP': 'LotusDomino 6.5 (http)', + 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS': 'LotusDomino 6.5 (https)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP': 'OWA Exchange 2003 (http)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS': 'OWA Exchange 2003 (https)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2003 with ActiveSync (http)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2003 with ActiveSync (https)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP': 'OWA Exchange 2007 (http)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS': 'OWA Exchange 2007 (https)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2007 with ActiveSync (http)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2007 with ActiveSync (https)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP': 'OWA Exchange 2010 (http)', + 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS': 'OWA Exchange 2010 (https)', + 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP': 'Oracle 10g Portal (http)', + 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS': 'Oracle 10g Portal (https)', + 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP': 'Oracle Applications 11i (http)', + 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS': 'Oracle Applications 11i (https)', + 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP': 'PeopleSoft Portal 9 (http)', + 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS': 'PeopleSoft Portal 9 (https)', + 'POLICY_TEMPLATE_RAPID_DEPLOYMENT': 'Rapid Deployment Policy', + 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP': 'SAP NetWeaver 7 (http)', + 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS': 'SAP NetWeaver 7 (https)', + 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP': 'SharePoint 2003 (http)', + 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS': 'SharePoint 2003 (https)', + 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP': 'SharePoint 2007 (http)', + 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS': 'SharePoint 2007 (https)', + 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP': 'SharePoint 2010 (http)', + 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS': 'SharePoint 2010 (https)', + 'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT': 'Vulnerability Assessment Baseline', + 'POLICY_TEMPLATE_WORDPRESS': 'Wordpress', + } + return template_map[self._values['template']] + + +class Difference(object): + def __init__(self, want, have=None): + self.want = want + self.have = have + + def compare(self, param): + try: + result = getattr(self, param) + return result + except AttributeError: + return self.__default(param) + + def __default(self, param): + attr1 = getattr(self.want, param) + try: + attr2 = getattr(self.have, param) + if attr1 != attr2: + return attr1 + except AttributeError: + return attr1 + + @property + def active(self): + if self.want.active is True and self.have.active is False: + return True + if self.want.active is False and self.have.active is True: + return False + + +class BaseManager(object): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + self.have = None + self.changes = Changes() + + def exec_module(self): + changed = False + result = dict() + state = self.want.state + + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + + changes = self.changes.to_return() + result.update(**changes) + result.update(dict(changed=changed)) + self._announce_deprecations(result) + return result + + def _announce_deprecations(self, result): + warnings = result.pop('__warnings', []) + for warning in warnings: + self.client.module.deprecate( + msg=warning['msg'], + version=warning['version'] + ) + + def _set_changed_options(self): + changed = {} + for key in Parameters.returnables: + if getattr(self.want, key) is not None: + changed[key] = getattr(self.want, key) + if changed: + self.changes = Changes(params=changed) + + def should_update(self): + result = self._update_changed_options() + if result: + return True + return False + + def _update_changed_options(self): + diff = Difference(self.want, self.have) + updatables = Parameters.updatables + changed = dict() + for k in updatables: + change = diff.compare(k) + if change is None: + continue + else: + if isinstance(change, dict): + changed.update(change) + else: + changed[k] = change + if changed: + self.changes = Changes(params=changed) + return True + return False + + def present(self): + if self.exists(): + return self.update() + else: + return self.create() + + def absent(self): + if not self.exists(): + return False + else: + return self.remove() + + def exists(self): + uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if any(p['name'] == self.want.name and p['partition'] == self.want.partition for p in response['items']): + return True + return False + + def _file_is_missing(self): + if self.want.template and self.want.file is None: + return False + if self.want.template is None and self.want.file is None: + return False + if not os.path.exists(self.want.file): + return True + return False + + def create(self): + if self.want.active is None: + self.want.update(dict(active=False)) + if self._file_is_missing(): + raise F5ModuleError( + "The specified ASM policy file does not exist" + ) + self._set_changed_options() + if self.module.check_mode: + return True + + if self.want.template is None and self.want.file is None: + self.create_blank() + else: + if self.want.template is not None: + self.create_from_template() + elif self.want.file is not None: + self.create_from_file() + + if self.want.active: + self.activate() + return True + else: + return True + + def update(self): + self.have = self.read_current_from_device() + if not self.should_update(): + return False + if self.module.check_mode: + return True + self.update_on_device() + if self.changes.active: + self.activate() + return True + + def activate(self): + self.have = self.read_current_from_device() + task_id = self.apply_on_device() + if self.wait_for_task(task_id, 'apply'): + return True + else: + raise F5ModuleError('Apply policy task failed.') + + def wait_for_task(self, task_id, task): + uri = '' + if task == 'apply': + uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + task_id + ) + elif task == 'import': + uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + task_id + ) + while True: + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + if response['status'] in ['COMPLETED', 'FAILURE']: + break + time.sleep(1) + + if response['status'] == 'FAILURE': + return False + if response['status'] == 'COMPLETED': + return True + + def _get_policy_id(self): + name = self.want.name + partition = self.want.partition + uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + policy_id = next( + (p['id'] for p in response['items'] if p['name'] == name and p['partition'] == partition), None + ) + + if not policy_id: + raise F5ModuleError("The policy was not found") + return policy_id + + def update_on_device(self): + params = self.changes.api_params() + policy_id = self._get_policy_id() + uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + policy_id + ) + if not params['active']: + resp = self.client.api.patch(uri, json=params) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def create_blank(self): + self.create_on_device() + if self.exists(): + return True + else: + raise F5ModuleError( + 'Failed to create ASM policy: {0}'.format(self.want.name) + ) + + def remove(self): + if self.module.check_mode: + return True + self.remove_from_device() + if self.exists(): + raise F5ModuleError( + 'Failed to delete ASM policy: {0}'.format(self.want.name) + ) + return True + + def is_activated(self): + if self.want.active is True: + return True + else: + return False + + def read_current_from_device(self): + policy_id = self._get_policy_id() + uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + policy_id + ) + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + response.update((dict(self_link=response['selfLink']))) + + return Parameters(params=response) + + def upload_file_to_device(self, content, name): + url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + try: + upload_file(self.client, url, content, name) + except F5ModuleError: + raise F5ModuleError( + "Failed to upload the file." + ) + + def import_to_device(self): + name = os.path.split(self.want.file)[1] + self.upload_file_to_device(self.want.file, name) + time.sleep(2) + + full_name = fq_name(self.want.partition, self.want.name) + cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1}'.format(full_name, name) + + uri = "https://{0}:{1}/mgmt/tm/util/bash/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + args = dict( + command='run', + utilCmdArgs='-c "{0}"'.format(cmd) + ) + resp = self.client.api.post(uri, json=args) + + try: + response = resp.json() + if 'commandResult' in response: + if 'Unexpected Error' in response['commandResult']: + raise F5ModuleError(response['commandResult']) + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return True + + def remove_temp_policy_from_device(self): + name = os.path.split(self.want.file)[1] + tpath_name = '/var/config/rest/downloads/{0}'.format(name) + uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + args = dict( + command='run', + utilCmdArgs=tpath_name + ) + resp = self.client.api.post(uri, json=args) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def apply_on_device(self): + uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + params = dict(policyReference={'link': self.have.self_link}) + resp = self.client.api.post(uri, json=params) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return response['id'] + + def create_from_template_on_device(self): + full_name = fq_name(self.want.partition, self.want.name) + cmd = 'tmsh create asm policy {0} policy-template {1}'.format(full_name, self.want.template) + uri = "https://{0}:{1}/mgmt/tm/util/bash/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + args = dict( + command='run', + utilCmdArgs='-c "{0}"'.format(cmd) + ) + resp = self.client.api.post(uri, json=args) + + try: + response = resp.json() + if 'commandResult' in response: + if 'Unexpected Error' in response['commandResult']: + raise F5ModuleError(response['commandResult']) + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def create_on_device(self): + params = self.changes.api_params() + params['name'] = self.want.name + params['partition'] = self.want.partition + # we need to remove active from params as API will raise an error if the active is set to True, + # policies can only be activated via apply-policy task endpoint. + params.pop('active') + uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.post(uri, json=params) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 401, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + time.sleep(2) + return response['selfLink'] + + def remove_from_device(self): + policy_id = self._get_policy_id() + uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + policy_id + ) + response = self.client.api.delete(uri) + if response.status in [200, 201]: + return True + raise F5ModuleError(response.content) + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = F5RestClient(**self.module.params) + self.kwargs = kwargs + + def exec_module(self): + if not module_provisioned(self.client, 'asm'): + raise F5ModuleError( + "ASM must be provisioned to use this module." + ) + if self.version_is_less_than_13(): + manager = self.get_manager('v1') + else: + manager = self.get_manager('v2') + return manager.exec_module() + + def get_manager(self, type): + if type == 'v1': + return V1Manager(**self.kwargs) + elif type == 'v2': + return V2Manager(**self.kwargs) + + def version_is_less_than_13(self): + version = tmos_version(self.client) + if LooseVersion(version) < LooseVersion('13.0.0'): + return True + else: + return False + + +class V1Manager(BaseManager): + def __init__(self, *args, **kwargs): + module = kwargs.get('module', None) + client = F5RestClient(**module.params) + super(V1Manager, self).__init__(client=client, module=module) + self.want = V1Parameters(params=module.params, client=client) + + def create_from_file(self): + self.import_to_device() + self.remove_temp_policy_from_device() + + def create_from_template(self): + self.create_from_template_on_device() + + +class V2Manager(BaseManager): + def __init__(self, *args, **kwargs): + module = kwargs.get('module', None) + client = F5RestClient(**module.params) + super(V2Manager, self).__init__(client=client, module=module) + self.want = V2Parameters(params=module.params, client=client) + + def create_from_template(self): + if not self.create_from_template_on_device(): + return False + + def create_from_file(self): + if not self.import_to_device(): + return False + self.remove_temp_policy_from_device() + + +class ArgumentSpec(object): + def __init__(self): + self.template_map = [ + 'ActiveSync v1.0 v2.0 (http)', + 'ActiveSync v1.0 v2.0 (https)', + 'Comprehensive', + 'Drupal', + 'Fundamental', + 'Joomla', + 'LotusDomino 6.5 (http)', + 'LotusDomino 6.5 (https)', + 'OWA Exchange 2003 (http)', + 'OWA Exchange 2003 (https)', + 'OWA Exchange 2003 with ActiveSync (http)', + 'OWA Exchange 2003 with ActiveSync (https)', + 'OWA Exchange 2007 (http)', + 'OWA Exchange 2007 (https)', + 'OWA Exchange 2007 with ActiveSync (http)', + 'OWA Exchange 2007 with ActiveSync (https)', + 'OWA Exchange 2010 (http)', + 'OWA Exchange 2010 (https)', + 'Oracle 10g Portal (http)', + 'Oracle 10g Portal (https)', + 'Oracle Applications 11i (http)', + 'Oracle Applications 11i (https)', + 'PeopleSoft Portal 9 (http)', + 'PeopleSoft Portal 9 (https)', + 'Rapid Deployment Policy', + 'SAP NetWeaver 7 (http)', + 'SAP NetWeaver 7 (https)', + 'SharePoint 2003 (http)', + 'SharePoint 2003 (https)', + 'SharePoint 2007 (http)', + 'SharePoint 2007 (https)', + 'SharePoint 2010 (http)', + 'SharePoint 2010 (https)', + 'Vulnerability Assessment Baseline', + 'Wordpress', + ] + self.supports_check_mode = True + argument_spec = dict( + name=dict( + required=True, + ), + file=dict(type='path'), + template=dict( + choices=self.template_map + ), + active=dict( + type='bool' + ), + state=dict( + default='present', + choices=['present', 'absent'] + ), + partition=dict( + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ) + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode, + mutually_exclusive=[ + ['file', 'template'] + ] + ) + + client = F5RestClient(**module.params) + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_device_facts.py b/plugins/modules/network/f5/bigip_device_facts.py new file mode 120000 index 0000000000..0b7da78370 --- /dev/null +++ b/plugins/modules/network/f5/bigip_device_facts.py @@ -0,0 +1 @@ +bigip_device_info.py \ No newline at end of file diff --git a/plugins/modules/network/f5/bigip_device_info.py b/plugins/modules/network/f5/bigip_device_info.py new file mode 100644 index 0000000000..d48a9200a7 --- /dev/null +++ b/plugins/modules/network/f5/bigip_device_info.py @@ -0,0 +1,16267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, F5 Networks Inc. +# Copyright: (c) 2013, Matt Hite +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_device_info +short_description: Collect information from F5 BIG-IP devices +description: + - Collect information from F5 BIG-IP devices. + - This module was called C(bigip_device_facts) before Ansible 2.9. The usage did not change. +options: + gather_subset: + description: + - When supplied, this argument will restrict the information returned to a given subset. + - Can specify a list of values to include a larger subset. + - Values can also be used with an initial C(!) to specify that a specific subset + should not be collected. + type: list + required: True + choices: + - all + - monitors + - profiles + - asm-policy-stats + - asm-policies + - asm-server-technologies + - asm-signature-sets + - client-ssl-profiles + - devices + - device-groups + - external-monitors + - fasthttp-profiles + - fastl4-profiles + - gateway-icmp-monitors + - gtm-pools + - gtm-servers + - gtm-wide-ips + - gtm-a-pools + - gtm-a-wide-ips + - gtm-aaaa-pools + - gtm-aaaa-wide-ips + - gtm-cname-pools + - gtm-cname-wide-ips + - gtm-mx-pools + - gtm-mx-wide-ips + - gtm-naptr-pools + - gtm-naptr-wide-ips + - gtm-srv-pools + - gtm-srv-wide-ips + - http-monitors + - https-monitors + - http-profiles + - iapp-services + - iapplx-packages + - icmp-monitors + - interfaces + - internal-data-groups + - irules + - ltm-pools + - ltm-policies + - nodes + - oneconnect-profiles + - partitions + - provision-info + - self-ips + - server-ssl-profiles + - software-volumes + - software-images + - software-hotfixes + - ssl-certs + - ssl-keys + - system-db + - system-info + - tcp-monitors + - tcp-half-open-monitors + - tcp-profiles + - traffic-groups + - trunks + - udp-profiles + - users + - vcmp-guests + - virtual-addresses + - virtual-servers + - vlans + - "!all" + - "!monitors" + - "!profiles" + - "!asm-policy-stats" + - "!asm-policies" + - "!asm-server-technologies" + - "!asm-signature-sets" + - "!client-ssl-profiles" + - "!devices" + - "!device-groups" + - "!external-monitors" + - "!fasthttp-profiles" + - "!fastl4-profiles" + - "!gateway-icmp-monitors" + - "!gtm-pools" + - "!gtm-servers" + - "!gtm-wide-ips" + - "!gtm-a-pools" + - "!gtm-a-wide-ips" + - "!gtm-aaaa-pools" + - "!gtm-aaaa-wide-ips" + - "!gtm-cname-pools" + - "!gtm-cname-wide-ips" + - "!gtm-mx-pools" + - "!gtm-mx-wide-ips" + - "!gtm-naptr-pools" + - "!gtm-naptr-wide-ips" + - "!gtm-srv-pools" + - "!gtm-srv-wide-ips" + - "!http-monitors" + - "!https-monitors" + - "!http-profiles" + - "!iapp-services" + - "!iapplx-packages" + - "!icmp-monitors" + - "!interfaces" + - "!internal-data-groups" + - "!irules" + - "!ltm-pools" + - "!ltm-policies" + - "!nodes" + - "!oneconnect-profiles" + - "!partitions" + - "!provision-info" + - "!self-ips" + - "!server-ssl-profiles" + - "!software-volumes" + - "!software-images" + - "!software-hotfixes" + - "!ssl-certs" + - "!ssl-keys" + - "!system-db" + - "!system-info" + - "!tcp-monitors" + - "!tcp-half-open-monitors" + - "!tcp-profiles" + - "!traffic-groups" + - "!trunks" + - "!udp-profiles" + - "!users" + - "!vcmp-guests" + - "!virtual-addresses" + - "!virtual-servers" + - "!vlans" + aliases: ['include'] +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +author: + - Tim Rupp (@caphrim007) + - Wojciech Wypior (@wojtek0806) +''' + +EXAMPLES = r''' +- name: Collect BIG-IP information + bigip_device_info: + gather_subset: + - interfaces + - vlans + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Collect all BIG-IP information + bigip_device_info: + gather_subset: + - all + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Collect all BIG-IP information except trunks + bigip_device_info: + gather_subset: + - all + - "!trunks" + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost +''' + +RETURN = r''' +asm_policy_stats: + description: Miscellaneous ASM policy related information. + returned: When C(asm-policy-stats) is specified in C(gather_subset). + type: complex + contains: + policies: + description: + - The total number of ASM policies on the device. + returned: queried + type: int + sample: 3 + policies_active: + description: + - The number of ASM policies that are marked as active. + returned: queried + type: int + sample: 3 + policies_attached: + description: + - The number of ASM policies that are attached to virtual servers. + returned: queried + type: int + sample: 1 + policies_inactive: + description: + - The number of ASM policies that are marked as inactive. + returned: queried + type: int + sample: 0 + policies_unattached: + description: + - The number of ASM policies that are not attached to a virtual server. + returned: queried + type: int + sample: 3 + sample: hash/dictionary of values +asm_policies: + description: Detailed information for ASM policies present on device. + returned: When C(asm-policies) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/foo_policy + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: foo_policy + policy_id: + description: + - Generated ID of the ASM policy resource. + returned: queried + type: str + sample: l0Ckxe-7yHsXp8U5tTgbFQ + active: + description: + - Indicates if an ASM policy is active. + returned: queried + type: bool + sample: yes + protocol_independent: + description: + - Indicates if the ASM policy differentiates between HTTP/WS and HTTPS/WSS URLs. + returned: queried + type: bool + sample: no + has_parent: + description: + - Indicates if the ASM policy is a child of another ASM policy. + returned: queried + type: bool + sample: no + type: + description: + - The type of policy, can be C(Security) or C(Parent). + returned: queried + type: str + sample: security + virtual_servers: + description: + - Virtual server or servers which have this policy assigned to them. + returned: queried + type: list + sample: ['/Common/foo_VS/'] + allowed_response_codes: + description: + - Lists the response status codes between 400 and 599 that the security profile considers legal. + returned: queried + type: list + sample: ['400', '404'] + description: + description: + - Description of the resource. + returned: queried + type: str + sample: Significant Policy Description + learning_mode: + description: + - Determine how the policy is built. + returned: queried + type: str + sample: manual + enforcement_mode: + description: + - Specifies whether blocking is active or inactive for the ASM policy. + returned: queried + type: str + sample: blocking + trust_xff: + description: + - Indicates the system has confidence in an XFF (X-Forwarded-For) header in the request. + returned: queried + type: bool + sample: yes + custom_xff_headers: + description: + - List of custom XFF headers trusted by the system. + returned: queried + type: str + sample: asm-proxy1 + case_insensitive: + description: + - Indicates if the ASM policy treats file types, URLs, and parameters as case sensitive. + returned: queried + type: bool + sample: yes + signature_staging: + description: + - Specifies if the staging feature is active on the ASM policy. + returned: queried + type: bool + sample: yes + place_signatures_in_staging: + description: + - Specifies if the system places new or updated signatures in staging + for the number of days specified in the enforcement readiness period. + returned: queried + type: bool + sample: no + enforcement_readiness_period: + description: + - Period in days both security policy entities and attack signatures + remain in staging mode before the system suggests to enforce them. + returned: queried + type: int + sample: 8 + path_parameter_handling: + description: + - Specifies how the system handles path parameters that are attached to path segments in URIs. + returned: queried + type: str + sample: ignore + trigger_asm_irule_event: + description: + - Indicates if iRule event is enabled. + returned: queried + type: str + sample: disabled + inspect_http_uploads: + description: + - Specify if the system should inspect all http uploads. + returned: queried + type: bool + sample: yes + mask_credit_card_numbers_in_request: + description: + - Indicates if the system masks credit card numbers. + returned: queried + type: bool + sample: no + maximum_http_header_length: + description: + - Maximum length of an HTTP header name and value that the system processes. + returned: queried + type: int + sample: 8192 + use_dynamic_session_id_in_url: + description: + - Specifies how the security policy processes URLs that use dynamic sessions. + returned: queried + type: bool + sample: no + maximum_cookie_header_length: + description: + - Maximum length of a cookie header name and value that the system processes. + returned: queried + type: int + sample: 8192 + application_language: + description: + - The language encoding for the web application. + returned: queried + type: str + sample: utf-8 + disallowed_geolocations: + description: + - Displays countries that may not access the web application. + returned: queried + type: str + sample: Argentina + csrf_protection_enabled: + description: + - Specifies if CSRF protection is active on the ASM policy. + returned: queried + type: bool + sample: yes + csrf_protection_ssl_only: + description: + - Specifies that only HTTPS URLs will be checked for CSRF protection. + returned: queried + type: bool + sample: yes + csrf_protection_expiration_time_in_seconds: + description: + - Specifies how long, in seconds, a configured CSRF token is valid before it expires. + returned: queried + type: int + sample: 600 + csrf_urls: + description: + - Specifies a list of URLs for CSRF token verification. + - In version 13.0.0 and above this has become a sub-collection and a list of dictionaries. + - In version 12.x this is a list of simple strings. + returned: queried + type: complex + contains: + csrf_url_required_parameters: + description: + - Indicates whether to ignore or require one of the specified parameters is present + in a request when checking if the URL entry matches the request. + returned: queried + type: str + sample: ignore + csrf_url_parameters_list: + description: + - List of parameters to look for in a request when checking if the URL entry matches the request. + returned: queried + type: list + sample: ['fooparam'] + csrf_url: + description: + - Specifies an URL to protect. + returned: queried + type: str + sample: ['/foo.html'] + csrf_url_method: + description: + - Method for the specified URL. + returned: queried + type: str + sample: POST + csrf_url_enforcement_action: + description: + - Indicates the action specified for the system to take when the URL entry matches. + returned: queried + type: str + sample: none + csrf_url_id: + description: + - Specified the generated ID for the configured CSRF url resource. + returned: queried + type: str + sample: l0Ckxe-7yHsXp8U5tTgbFQ + csrf_url_wildcard_order: + description: + - Specified the order in which the wildcard URLs are enforced. + returned: queried + type: str + sample: 1 + sample: hash/dictionary of values +asm_server_technologies: + description: Detailed information for ASM server technologies present on device. + returned: When C(asm-server-technologies) is specified in C(gather_subset). + type: complex + contains: + id: + description: + - Displays the generated ID for the server technology resource. + returned: queried + type: str + sample: l0Ckxe-7yHsXp8U5tTgbFQ + server_technology_name: + description: + - Human friendly name of the server technology resource. + returned: queried + type: str + sample: Wordpress + server_technology_references: + description: + - List of dictionaries containing API self links of the associated technology resources. + returned: queried + type: complex + contains: + link: + description: + - A self link to an associated server technology. + sample: https://localhost/mgmt/tm/asm/server-technologies/NQG7CT02OBC2cQWbnP7T-A?ver=13.1.0 + sample: hash/dictionary of values +asm_signature_sets: + description: Detailed information for ASM signature sets present on device. + returned: When C(asm-signature-sets) is specified in C(gather_subset). + type: complex + contains: + name: + description: + - Name of the signature set + returned: queried + type: str + sample: WebSphere signatures + id: + description: + - Displays the generated ID for the signature set resource. + returned: queried + type: str + sample: l0Ckxe-7yHsXp8U5tTgbFQ + type: + description: + - The method used to select signatures to be a part of the signature set. + returned: queried + type: str + sample: filter-based + category: + description: + - Displays the category of the signature set. + returned: queried + type: str + sample: filter-based + is_user_defined: + description: + - Specifies that this signature set was added by a user. + returned: queried + type: bool + sample: no + assign_to_policy_by_default: + description: + - Indicates whether the system assigns this signature set to a new created security policy by default. + returned: queried + type: bool + sample: yes + default_alarm: + description: + - Displays whether the security policy logs the request data in the Statistics + screen if a request matches a signature that is included in the signature set + returned: queried + type: bool + sample: yes + default_block: + description: + - Displays, when the security policy's enforcement mode is Blocking, + how the system treats requests that match a signature included in the signature set. + returned: queried + type: bool + sample: yes + default_learn: + description: + - Displays whether the security policy learns all requests that match a signature + that is included in the signature set. + returned: queried + type: bool + sample: yes + sample: hash/dictionary of values +client_ssl_profiles: + description: Client SSL Profile related information. + returned: When C(client-ssl-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/bigip02.internal + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: bigip02.internal + alert_timeout: + description: + - Maximum time period in seconds to keep the SSL session active after alert + message is sent, or indefinite. + returned: queried + type: int + sample: 0 + allow_non_ssl: + description: + - Enables or disables non-SSL connections. + returned: queried + type: bool + sample: yes + authenticate_depth: + description: + - Specifies the authenticate depth. This is the client certificate chain maximum traversal depth. + returned: queried + type: int + sample: 9 + authenticate_frequency: + description: + - Specifies how often the system authenticates a user. + returned: queried + type: str + sample: once + ca_file: + description: + - Specifies the certificate authority (CA) file name. + returned: queried + type: str + sample: /Common/default-ca.crt + cache_size: + description: + - Specifies the SSL session cache size. + returned: queried + type: int + sample: 262144 + cache_timeout: + description: + - Specifies the SSL session cache timeout value. + returned: queried + type: int + sample: 3600 + certificate_file: + description: + - Specifies the name of the certificate installed on the traffic + management system for the purpose of terminating or initiating + an SSL connection. + returned: queried + type: str + sample: /Common/default.crt + chain_file: + description: + - Specifies or builds a certificate chain file that a client can + use to authenticate the profile. + returned: queried + type: str + sample: /Common/ca-chain.crt + ciphers: + description: + - Specifies a list of cipher names. + returned: queried + type: str + sample: ['DEFAULT'] + crl_file: + description: + - Specifies the certificate revocation list file name. + returned: queried + type: str + sample: /Common/default.crl + parent: + description: + - Parent of the profile + returned: queried + type: str + sample: /Common/clientssl + description: + description: + - Description of the profile. + returned: queried + type: str + sample: My profile + modssl_methods: + description: + - Enables or disables ModSSL method emulation. + returned: queried + type: bool + sample: no + peer_certification_mode: + description: + - Specifies the peer certificate mode. + returned: queried + type: str + sample: ignore + sni_require: + description: + - When this option is C(yes), a client connection that does not + specify a known server name or does not support SNI extension will + be rejected. + returned: queried + type: bool + sample: no + sni_default: + description: + - When C(yes), this profile is the default SSL profile when the server + name in a client connection does not match any configured server + names, or a client connection does not specify any server name at + all. + returned: queried + type: bool + sample: yes + strict_resume: + description: + - Enables or disables strict-resume. + returned: queried + type: bool + sample: yes + profile_mode_enabled: + description: + - Specifies the profile mode, which enables or disables SSL + processing. + returned: queried + type: bool + sample: yes + renegotiation_maximum_record_delay: + description: + - Maximum number of SSL records that the traffic + management system can receive before it renegotiates an SSL + session. + returned: queried + type: int + sample: 0 + renegotiation_period: + description: + - Number of seconds required to renegotiate an SSL + session. + returned: queried + type: int + sample: 0 + renegotiation: + description: + - Specifies whether renegotiations are enabled. + returned: queried + type: bool + sample: yes + server_name: + description: + - Specifies the server names to be matched with SNI (server name + indication) extension information in ClientHello from a client + connection. + returned: queried + type: str + sample: bigip01 + session_ticket: + description: + - Enables or disables session-ticket. + returned: queried + type: bool + sample: no + unclean_shutdown: + description: + - Whether to force the SSL profile to perform a clean shutdown of all SSL + connections or not + returned: queried + type: bool + sample: no + retain_certificate: + description: + - APM module requires storing certificate in SSL session. When + C(no), certificate will not be stored in SSL session. + returned: queried + type: bool + sample: yes + secure_renegotiation_mode: + description: + - Specifies the secure renegotiation mode. + returned: queried + type: str + sample: require + handshake_timeout: + description: + - Specifies the handshake timeout in seconds. + returned: queried + type: int + sample: 10 + forward_proxy_certificate_extension_include: + description: + - Specifies the extensions of the web server certificates to be + included in the generated certificates using SSL Forward Proxy. + returned: queried + type: list + sample: ["basic-constraints", "subject-alternative-name"] + forward_proxy_certificate_lifespan: + description: + - Specifies the lifespan of the certificate generated using the SSL + forward proxy feature. + returned: queried + type: int + sample: 30 + forward_proxy_lookup_by_ipaddr_port: + description: + - Specifies whether to perform certificate look up by IP address and + port number. + returned: queried + type: bool + sample: no + forward_proxy_enabled: + description: + - Enables or disables SSL forward proxy feature. + returned: queried + type: bool + sample: yes + forward_proxy_ca_passphrase: + description: + - Specifies the passphrase of the key file that is used as the + certification authority key when SSL forward proxy feature is + enabled. + returned: queried + type: str + forward_proxy_ca_certificate_file: + description: + - Specifies the name of the certificate file that is used as the + certification authority certificate when SSL forward proxy feature + is enabled. + returned: queried + type: str + forward_proxy_ca_key_file: + description: + - Specifies the name of the key file that is used as the + certification authority key when SSL forward proxy feature is + enabled. + returned: queried + type: str + sample: hash/dictionary of values +devices: + description: Device related information. + returned: When C(devices) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/bigip02.internal + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: bigip02.internal + active_modules: + description: + - The currently licensed and provisioned modules on the device. + returned: queried + type: list + sample: ["DNS Services (LAB)", "PSM, VE"] + base_mac_address: + description: + - Media Access Control address (MAC address) of the device. + returned: queried + type: str + sample: "fa:16:3e:c3:42:6f" + build: + description: + - The minor version information of the total product version. + returned: queried + type: str + sample: 0.0.1 + chassis_id: + description: + - Serial number of the device. + returned: queried + type: str + sample: 11111111-2222-3333-444444444444 + chassis_type: + description: + - Displays the chassis type. The possible values are C(individual) and C(viprion). + returned: queried + type: str + sample: individual + comment: + description: + - User comments about the device. + returned: queried + type: str + sample: My device + configsync_address: + description: + - IP address used for configuration synchronization. + returned: queried + type: str + sample: 10.10.10.10 + contact: + description: + - Administrator contact information. + returned: queried + type: str + sample: The User + description: + description: + - Description of the device. + returned: queried + type: str + sample: My device + edition: + description: + - Displays the software edition. + returned: queried + type: str + sample: Point Release 7 + failover_state: + description: + - Device failover state. + returned: queried + type: str + sample: active + hostname: + description: + - Device hostname + returned: queried + type: str + sample: bigip02.internal + location: + description: + - Specifies the physical location of the device. + returned: queried + type: str + sample: London + management_address: + description: + - IP address of the management interface. + returned: queried + type: str + sample: 3.3.3.3 + marketing_name: + description: + - Marketing name of the device platform. + returned: queried + type: str + sample: BIG-IP Virtual Edition + multicast_address: + description: + - Specifies the multicast IP address used for failover. + returned: queried + type: str + sample: 4.4.4.4 + optional_modules: + description: + - Modules that are available for the current platform, but are not currently licensed. + returned: queried + type: list + sample: ["App Mode (TMSH Only, No Root/Bash)", "BIG-IP VE, Multicast Routing"] + platform_id: + description: + - Displays the device platform identifier. + returned: queried + type: str + sample: Z100 + primary_mirror_address: + description: + - Specifies the IP address used for state mirroring. + returned: queried + type: str + sample: 5.5.5.5 + product: + description: + - Displays the software product name. + returned: queried + type: str + sample: BIG-IP + secondary_mirror_address: + description: + - Secondary IP address used for state mirroring. + returned: queried + type: str + sample: 2.2.2.2 + self: + description: + - Whether this device is the one that was queried for information, or not. + returned: queried + type: bool + sample: yes + software_version: + description: + - Displays the software version number. + returned: queried + type: str + sample: 13.1.0.7 + timelimited_modules: + description: + - Displays the licensed modules that are time-limited. + returned: queried + type: list + sample: ["IP Intelligence, 3Yr, ...", "PEM URL Filtering, 3Yr, ..."] + timezone: + description: + - Displays the time zone configured on the device. + returned: queried + type: str + sample: UTC + unicast_addresses: + description: + - Specifies the entire set of unicast addresses used for failover. + returned: queried + type: complex + contains: + effective_ip: + description: + - The IP address that peers can use to reach this unicast address IP. + returned: queried + type: str + sample: 5.4.3.5 + effective_port: + description: + - The port that peers can use to reach this unicast address. + returned: queried + type: int + sample: 1026 + ip: + description: + - The IP address that the failover daemon will listen on for packets from its peers. + returned: queried + type: str + sample: 5.4.3.5 + port: + description: + - The IP port that the failover daemon uses to accept packets from its peers. + returned: queried + type: int + sample: 1026 + sample: hash/dictionary of values +device_groups: + description: Device group related information. + returned: When C(device-groups) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/fasthttp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: fasthttp + autosync_enabled: + description: + - Whether the device group automatically synchronizes configuration data to its members. + returned: queried + type: bool + sample: no + description: + description: + - Description of the device group. + returned: queried + type: str + sample: My device group + devices: + description: + - List of devices that are in the group. Devices are listed by their C(full_path). + returned: queried + type: list + sample: [/Common/bigip02.internal] + full_load_on_sync: + description: + - Specifies that the entire configuration for a device group is sent when configuration + synchronization is performed. + returned: queried + type: bool + sample: yes + incremental_config_sync_size_maximum: + description: + - Specifies the maximum size (in KB) to devote to incremental config sync cached transactions. + returned: queried + type: int + sample: 1024 + network_failover_enabled: + description: + - Specifies whether network failover is used. + returned: queried + type: bool + sample: yes + type: + description: + - Specifies the type of device group. + returned: queried + type: str + sample: sync-only + asm_sync_enabled: + description: + - Specifies whether to synchronize ASM configurations of device group members. + returned: queried + type: bool + sample: yes + sample: hash/dictionary of values +external_monitors: + description: External monitor related information. + returned: When C(external-monitors) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/external + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: external + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: external + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My monitor + destination: + description: + - Specifies the IP address and service port of the resource that is + the destination of this monitor. + returned: queried + type: str + sample: "*:*" + args: + description: + - Specifies any command-line arguments that the script requires. + returned: queried + type: str + sample: arg1 arg2 arg3 + external_program: + description: + - Specifies the name of the file for the monitor to use. + returned: queried + type: str + sample: /Common/arg_example + variables: + description: + - Specifies any variables that the script requires. + type: complex + sample: { "key1": "val", "key_2": "val 2" } + interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when either the resource is down or the status + of the resource is unknown. + returned: queried + type: int + sample: 5 + manual_resume: + description: + - Specifies whether the system automatically changes the status of a + resource to up at the next successful monitor check. + returned: queried + type: bool + sample: yes + time_until_up: + description: + - Specifies the amount of time, in seconds, after the first + successful response before a node is marked up. + returned: queried + type: int + sample: 0 + timeout: + description: + - Specifies the number of seconds the target has in which to respond + to the monitor request. + returned: queried + type: int + sample: 16 + up_interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when the resource is up. + returned: queried + type: int + sample: 0 + sample: hash/dictionary of values +fasthttp_profiles: + description: FastHTTP profile related information. + returned: When C(fasthttp-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/fasthttp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: fasthttp + client_close_timeout: + description: + - Number of seconds after which the system closes a client connection, when + the system either receives a client FIN packet or sends a FIN packet to the client. + returned: queried + type: int + sample: 5 + oneconnect_idle_timeout_override: + description: + - Number of seconds after which a server-side connection in a OneConnect pool + is eligible for deletion, when the connection has no traffic. + returned: queried + type: int + sample: 0 + oneconnect_maximum_reuse: + description: + - Maximum number of times that the system can re-use a current connection. + returned: queried + type: int + sample: 0 + oneconnect_maximum_pool_size: + description: + - Maximum number of connections to a load balancing pool. + returned: queried + type: int + sample: 2048 + oneconnect_minimum_pool_size: + description: + - Minimum number of connections to a load balancing pool. + returned: queried + type: int + sample: 0 + oneconnect_replenish': + description: + - Specifies, when C(yes), that the system will not keep a steady-state maximum of + connections to the back-end unless the number of connections to the pool have + dropped beneath the C(minimum_pool_size) specified in the profile. + returned: queried + type: bool + sample: yes + oneconnect_ramp_up_increment: + description: + - The increment in which the system makes additional connections available, when + all available connections are in use. + returned: queried + type: int + sample: 4 + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: fasthttp + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My profile + force_http_1_0_response: + description: + - Specifies, when C(yes), that the server sends responses to clients in the HTTP/1.0 + format. + returned: queried + type: bool + sample: no + request_header_insert: + description: + - A string that the system inserts as a header in an HTTP request. If the header + exists already, the system does not replace it. + returned: queried + type: str + sample: "X-F5-Authentication: foo" + http_1_1_close_workarounds: + description: + - Specifies, when C(yes), that the server uses workarounds for HTTP 1.1 close issues. + returned: queried + type: bool + sample: no + idle_timeout: + description: + - Length of time that a connection is idle (has no traffic) before the connection + is eligible for deletion. + returned: queried + type: int + sample: 300 + insert_x_forwarded_for: + description: + - Whether the system inserts the X-Forwarded-For header in an HTTP request with the + client IP address, to use with connection pooling. + returned: queried + type: bool + sample: no + maximum_header_size: + description: + - Maximum amount of HTTP header data that the system buffers before making a load + balancing decision. + returned: queried + type: int + sample: 32768 + maximum_requests: + description: + - Maximum number of requests that the system can receive on a client-side connection, + before the system closes the connection. + returned: queried + type: int + sample: 0 + maximum_segment_size_override: + description: + - Maximum segment size (MSS) override for server-side connections. + returned: queried + type: int + sample: 0 + receive_window_size: + description: + - Amount of data the BIG-IP system can accept without acknowledging the server. + returned: queried + type: int + sample: 0 + reset_on_timeout: + description: + - Specifies, when C(yes), that the system sends a reset packet (RST) in addition to + deleting the connection, when a connection exceeds the idle timeout value. + returned: queried + type: bool + sample: yes + server_close_timeout: + description: + - Number of seconds after which the system closes a client connection, when the system + either receives a server FIN packet or sends a FIN packet to the server. + returned: queried + type: int + sample: 5 + server_sack: + description: + - Whether the BIG-IP system processes Selective ACK (Sack) packets in cookie responses + from the server. + returned: queried + type: bool + sample: no + server_timestamp: + description: + - Whether the BIG-IP system processes timestamp request packets in cookie responses + from the server. + returned: queried + type: bool + sample: no + unclean_shutdown: + description: + - How the system handles closing connections. Values provided may be C(enabled), C(disabled), + or C(fast). + returned: queried + type: str + sample: enabled + sample: hash/dictionary of values +fastl4_profiles: + description: FastL4 profile related information. + returned: When C(fastl4-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/fastl4 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: fastl4 + client_timeout: + description: + - Specifies late binding client timeout in seconds. + - This is the number of seconds allowed for a client to transmit enough data to + select a server pool. + - If this timeout expires, the timeout-recovery option dictates whether + to drop the connection or fallback to the normal FastL4 load-balancing method + to pick a server pool. + returned: queried + type: int + sample: 30 + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: fastl4 + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My profile + explicit_flow_migration: + description: + - Specifies whether to have the iRule code determine exactly when + the FIX stream drops down to the ePVA hardware. + returned: queried + type: bool + sample: yes + hardware_syn_cookie: + description: + - Enables or disables hardware SYN cookie support when PVA10 is present on the system. + - This option is deprecated in version 13.0.0 and is replaced by C(syn-cookie-enable). + returned: queried + type: bool + sample: no + idle_timeout: + description: + - Specifies the number of seconds that a connection is idle before the connection is + eligible for deletion. + - Values will be in the range of 0 to 4294967295 (inclusive). + - C(0) is equivalent to the TMUI value "immediate". + - C(4294967295) is equivalent to the TMUI value "indefinite". + returned: queried + type: int + sample: 300 + dont_fragment_flag: + description: + - Describes the Don't Fragment (DF) bit setting in the IP Header of + the outgoing TCP packet. + - When C(pmtu), sets the outgoing IP Header DF bit based on IP pmtu + setting(tm.pathmtudiscovery). + - When C(preserve), sets the outgoing Packet's IP Header DF bit to be same as incoming + IP Header DF bit. + - When C(set), sets the outgoing packet's IP Header DF bit. + - When C(clear), clears the outgoing packet's IP Header DF bit. + returned: queried + type: str + sample: pmtu + ip_tos_to_client: + description: + - Specifies an IP Type of Service (ToS) number for the client-side. + - This option specifies the ToS level that the traffic management + system assigns to IP packets when sending them to clients. + returned: queried + type: str or int + sample: 200 + ip_tos_to_server: + description: + - Specifies an IP ToS number for the server side. + - This option specifies the ToS level that the traffic management system assigns + to IP packets when sending them to servers. + returned: queried + type: str or int + sample: pass-through + ttl_mode: + description: + - Describe the outgoing TCP packet's IP Header TTL mode. + - When C(proxy), sets the outgoing IP Header TTL value to 255/64 for ipv4/ipv6 + respectively. + - When C(preserve), sets the outgoing IP Header TTL value to be same as the + incoming IP Header TTL value. + - When C(decrement), sets the outgoing IP Header TTL value to be one less than + the incoming TTL value. + - When C(set), sets the outgoing IP Header TTL value to a specific value(as + specified by C(ttl_v4) or C(ttl_v6). + returned: queried + type: str + sample: preserve + ttl_v4: + description: + - Specify the outgoing packet's IP Header TTL value for IPv4 traffic. + - Maximum value that can be specified is 255. + returned: queried + type: int + sample: 200 + ttl_v6: + description: + - Specify the outgoing packet's IP Header TTL value for IPv6 + traffic. + - Maximum value that can be specified is 255. + returned: queried + type: int + sample: 300 + keep_alive_interval: + description: + - Specifies the keep-alive probe interval, in seconds. + - A value of 0 indicates keep-alive is disabled. + returned: queried + type: int + sample: 10 + late_binding: + description: + - Specifies whether to enable or disable intelligent selection of a + back-end server pool. + returned: queried + type: bool + sample: yes + link_qos_to_client: + description: + - Specifies a Link Quality of Service (QoS) (VLAN priority) number + for the client side. + - This option specifies the QoS level that the system assigns to packets + when sending them to clients. + returned: queried + type: int or string + sample: 7 + link_qos_to_server: + description: + - Specifies a Link QoS (VLAN priority) number for the server side. + - This option specifies the QoS level that the system assigns to + packets when sending them to servers. + returned: queried + type: int or string + sample: 5 + loose_close: + description: + - Specifies that the system closes a loosely-initiated connection + when the system receives the first FIN packet from either the + client or the server. + returned: queried + type: bool + sample: no + loose_init: + description: + - Specifies that the system initializes a connection when it + receives any Transmission Control Protocol (TCP) packet, rather + than requiring a SYN packet for connection initiation. + returned: queried + type: bool + sample: yes + mss_override: + description: + - Specifies a maximum segment size (MSS) override for server + connections. Note that this is also the MSS advertised to a client + when a client first connects. + - C(0) (zero), means the option is disabled. Otherwise, the value will be + between 256 and 9162. + returned: queried + type: int + sample: 500 + priority_to_client: + description: + - Specifies internal packet priority for the client side. + - This option specifies the internal packet priority that the system + assigns to packets when sending them to clients. + returned: queried + type: int or string + sample: 300 + priority_to_server: + description: + - Specifies internal packet priority for the server side. + - This option specifies the internal packet priority that the system + assigns to packets when sending them to servers. + returned: queried + type: int or string + sample: 200 + pva_acceleration: + description: + - Specifies the Packet Velocity(r) ASIC acceleration policy. + returned: queried + type: str + sample: full + pva_dynamic_client_packets: + description: + - Specifies the number of client packets before dynamic ePVA + hardware re-offloading occurs. + - Values will be between 0 and 10. + returned: queried + type: int + sample: 8 + pva_dynamic_server_packets: + description: + - Specifies the number of server packets before dynamic ePVA + hardware re-offloading occurs. + - Values will be between 0 and 10. + returned: queried + type: int + sample: 5 + pva_flow_aging: + description: + - Specifies if automatic aging from ePVA flow cache is enabled or not. + returned: queried + type: bool + sample: yes + pva_flow_evict: + description: + - Specifies if this flow can be evicted upon hash collision with a + new flow learn snoop request. + returned: queried + type: bool + sample: no + pva_offload_dynamic: + description: + - Specifies whether PVA flow dynamic offloading is enabled or not. + returned: queried + type: bool + sample: yes + pva_offload_state: + description: + - Specifies at what stage the ePVA performs hardware offload. + - When C(embryonic), implies at TCP CSYN or the first client UDP packet. + - When C(establish), implies TCP 3WAY handshaking or UDP CS round trip are + confirmed. + returned: queried + type: str + sample: embryonic + reassemble_fragments: + description: + - Specifies whether to reassemble fragments. + returned: queried + type: bool + sample: yes + receive_window: + description: + - Specifies the window size to use, in bytes. + - The maximum is 2^31 for window scale enabling. + returned: queried + type: int + sample: 1000 + reset_on_timeout: + description: + - Specifies whether you want to reset connections on timeout. + returned: queried + type: bool + sample: yes + rtt_from_client: + description: + - Enables or disables the TCP timestamp options to measure the round + trip time to the client. + returned: queried + type: bool + sample: no + rtt_from_server: + description: + - Enables or disables the TCP timestamp options to measure the round + trip time to the server. + returned: queried + type: bool + sample: yes + server_sack: + description: + - Specifies whether to support server sack option in cookie response + by default. + returned: queried + type: bool + sample: no + server_timestamp: + description: + - Specifies whether to support server timestamp option in cookie + response by default. + returned: queried + type: bool + sample: yes + software_syn_cookie: + description: + - Enables or disables software SYN cookie support when PVA10 is not present + on the system. + - This option is deprecated in version 13.0.0 and is replaced by + C(syn_cookie_enabled). + returned: queried + type: bool + sample: yes + syn_cookie_enabled: + description: + - Enables syn-cookies capability on this virtual server. + returned: queried + type: bool + sample: no + syn_cookie_mss: + description: + - Specifies a maximum segment size (MSS) for server connections when + SYN Cookie is enabled. + returned: queried + type: int + sample: 2000 + syn_cookie_whitelist: + description: + - Specifies whether or not to use a SYN Cookie WhiteList when doing + software SYN Cookies. + returned: queried + type: bool + sample: no + tcp_close_timeout: + description: + - Specifies a TCP close timeout in seconds. + returned: queried + type: int + sample: 100 + generate_init_seq_number: + description: + - Specifies whether you want to generate TCP sequence numbers on all + SYNs that conform with RFC1948, and allow timestamp recycling. + returned: queried + type: bool + sample: yes + tcp_handshake_timeout: + description: + - Specifies a TCP handshake timeout in seconds. + returned: queried + type: int + sample: 5 + strip_sack: + description: + - Specifies whether you want to block the TCP SackOK option from + passing to the server on an initiating SYN. + returned: queried + type: bool + sample: yes + tcp_time_wait_timeout: + description: + - Specifies a TCP time_wait timeout in milliseconds. + returned: queried + type: int + sample: 60 + tcp_timestamp_mode: + description: + - Specifies how you want to handle the TCP timestamp. + returned: queried + type: str + sample: preserve + tcp_window_scale_mode: + description: + - Specifies how you want to handle the TCP window scale. + returned: queried + type: str + sample: preserve + timeout_recovery: + description: + - Specifies late binding timeout recovery mode. This is the action + to take when late binding timeout occurs on a connection. + - When C(disconnect), only the L7 iRule actions are acceptable to + pick a server. + - When C(fallback), the normal FastL4 load-balancing methods are acceptable + to pick a server. + returned: queried + type: str + sample: fallback + sample: hash/dictionary of values +gateway_icmp_monitors: + description: Gateway ICMP monitor related information. + returned: When C(gateway-icmp-monitors) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/gateway_icmp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: gateway_icmp + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: gateway_icmp + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My monitor + adaptive: + description: + - Whether adaptive response time monitoring is enabled for this monitor. + returned: queried + type: bool + sample: no + adaptive_divergence_type: + description: + - Specifies whether the adaptive-divergence-value is C(relative) or + C(absolute). + returned: queried + type: str + sample: relative + adaptive_divergence_value: + description: + - Specifies how far from mean latency each monitor probe is allowed + to be. + returned: queried + type: int + sample: 25 + adaptive_limit: + description: + - Specifies the hard limit, in milliseconds, which the probe is not + allowed to exceed, regardless of the divergence value. + returned: queried + type: int + sample: 200 + adaptive_sampling_timespan: + description: + - Specifies the size of the sliding window, in seconds, which + records probe history. + returned: queried + type: int + sample: 300 + destination: + description: + - Specifies the IP address and service port of the resource that is + the destination of this monitor. + returned: queried + type: str + sample: "*:*" + interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when either the resource is down or the status + of the resource is unknown. + returned: queried + type: int + sample: 5 + manual_resume: + description: + - Specifies whether the system automatically changes the status of a + resource to up at the next successful monitor check. + returned: queried + type: bool + sample: yes + time_until_up: + description: + - Specifies the amount of time, in seconds, after the first + successful response before a node is marked up. + returned: queried + type: int + sample: 0 + timeout: + description: + - Specifies the number of seconds the target has in which to respond + to the monitor request. + returned: queried + type: int + sample: 16 + transparent: + description: + - Specifies whether the monitor operates in transparent mode. + returned: queried + type: bool + sample: no + up_interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when the resource is up. + returned: queried + type: int + sample: 0 + sample: hash/dictionary of values +gtm_pools: + description: + - GTM pool related information. + - Every "type" of pool has the exact same list of possible information. Therefore, + the list of information here is presented once instead of 6 times. + returned: When any of C(gtm-pools) or C(gtm-*-pools) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/pool1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: pool1 + alternate_mode: + description: + - The load balancing mode that the system uses to load balance name resolution + requests among the members of the pool. + returned: queried + type: str + sample: drop-packet + dynamic_ratio: + description: + - Whether or not the dynamic ratio load balancing algorithm is enabled for this + pool. + returned: queried + type: bool + sample: yes + enabled: + description: + - Is the pool enabled. + returned: queried + type: bool + disabled: + description: + - Is the pool disabled. + returned: queried + type: bool + fallback_mode: + description: + - Specifies the load balancing mode that the system uses to load balance + name resolution amongst the pool members if the preferred and alternate + modes are unsuccessful in picking a pool. + returned: queried + type: str + load_balancing_mode: + description: + - Specifies the preferred load balancing mode that the system uses to load + balance requests across pool members. + returned: queried + type: str + manual_resume: + description: + - Whether manual resume is enabled for this pool. + returned: queried + type: bool + max_answers_returned: + description: + - Maximum number of available virtual servers that the system lists in a + response. + returned: queried + type: int + members: + description: + - Lists of members (and their configurations) in the pool. + returned: queried + type: complex + partition: + description: + - Partition the pool exists on. + returned: queried + qos_hit_ratio: + description: + - Weight of the Hit Ratio performance factor for the QoS dynamic load + balancing method. + returned: queried + type: int + qos_hops: + description: + - Weight of the Hops performance factor when load balancing mode or fallback mode + is QoS. + returned: queried + type: int + qos_kilobytes_second: + description: + - Weight assigned to Kilobytes per Second performance factor when load balancing + option is QoS. + returned: queried + type: int + qos_lcs: + description: + - Weight assign to the Link Capacity performance factor when load balancing option + is QoS. + returned: queried + type: int + qos_packet_rate: + description: + - Weight assign to the Packet Rate performance factor when load balancing option + is QoS. + returned: queried + type: int + qos_rtt: + description: + - Weight assign to the Round Trip Time performance factor when load balancing option + is QoS. + returned: queried + type: int + qos_topology: + description: + - Weight assign to the Topology performance factor when load balancing option + is QoS. + returned: queried + type: int + qos_vs_capacity: + description: + - Weight assign to the Virtual Server performance factor when load balancing option + is QoS. + returned: queried + type: int + qos_vs_score: + description: + - Weight assign to the Virtual Server Score performance factor when load balancing + option is QoS. + returned: queried + type: int + ttl: + description: + - Number of seconds that the IP address, once found, is valid. + returned: queried + type: int + verify_member_availability: + description: + - Whether or not the system verifies the availability of the members before + sending a connection to them. + returned: queried + type: bool + sample: hash/dictionary of values +gtm_servers: + description: + - GTM server related information. + returned: When C(gtm-servers) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/server1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: server1 + datacenter: + description: + - Full name of the datacenter this server belongs to. + returned: queried + type: str + enabled: + description: + - Whether the server is enabled. + returned: queried + type: bool + disabled: + description: + - Whether the server is disabled. + returned: queried + type: bool + expose_route_domains: + description: + - Allow the GTM server to auto-discover the LTM virtual servers from all + route domains. + returned: queried + type: bool + iq_allow_path: + description: + - Whether the GTM uses this BIG-IP system to conduct a path probe before + delegating traffic to it. + returned: queried + type: bool + iq_allow_service_check: + description: + - Whether the GTM uses this BIG-IP system to conduct a service check probe + before delegating traffic to it. + returned: queried + type: bool + iq_allow_snmp: + description: + - Whether the GTM uses this BIG-IP system to conduct an SNMP probe + before delegating traffic to it. + returned: queried + type: bool + limit_cpu_usage: + description: + - For a server configured as a generic host, specifies the percent of CPU + usage, otherwise has no effect. + returned: queried + type: int + limit_cpu_usage_status: + description: + - Whether C(limit_cpu_usage) is enabled for this server. + returned: queried + type: bool + limit_max_bps: + description: + - Maximum allowable data throughput rate in bits per second for this server. + returned: queried + type: int + limit_max_bps_status: + description: + - Whether C(limit_max_bps) is enabled for this server. + returned: queried + type: bool + limit_max_connections: + description: + - Maximum number of concurrent connections, combined, for this server. + returned: queried + type: int + limit_max_connections_status: + description: + - Whether C(limit_max_connections) is enabled for this server. + type: bool + limit_max_pps: + description: + - Maximum allowable data transfer rate, in packets per second, for this server. + returned: queried + type: int + limit_max_pps_status: + description: + - Whether C(limit_max_pps) is enabled for this server. + returned: queried + type: bool + limit_mem_available: + description: + - For a server configured as a generic host, specifies the available memory + required by the virtual servers on the server. + - If available memory falls below this limit, the system marks the server as + unavailable. + returned: queried + type: int + limit_mem_available_status: + description: + - Whether C(limit_mem_available) is enabled for this server. + returned: queried + type: bool + link_discovery: + description: + - Specifies whether the system auto-discovers the links for this server. + returned: queried + type: str + monitors: + description: + - Specifies health monitors that the system uses to determine whether this + server is available for load balancing. + returned: queried + type: list + sample: ['/Common/https_443', '/Common/icmp'] + monitor_type: + description: + - Whether one or monitors need to pass, or all monitors need to pass. + returned: queried + type: str + sample: and_list + product: + description: + - Specifies the server type. + returned: queried + type: str + prober_fallback: + description: + - The type of prober to use to monitor this servers resources when the + preferred type is not available. + returned: queried + type: str + prober_preference: + description: + - Specifies the type of prober to use to monitor this servers resources. + returned: queried + type: str + virtual_server_discovery: + description: + - Whether the system auto-discovers the virtual servers for this server. + returned: queried + type: str + addresses: + description: + - Specifies the server IP addresses for the server. + returned: queried + type: complex + devices: + description: + - Specifies the names of the devices that represent this server. + returned: queried + type: complex + virtual_servers: + description: + - Virtual servers that are resources for this server. + returned: queried + type: complex + sample: hash/dictionary of values +gtm_wide_ips: + description: + - GTM Wide IP related information. + - Every "type" of wide-ip has the exact same list of possible information. Therefore, + the list of information here is presented once instead of 6 times. + returned: When any of C(gtm-wide-ips) or C(gtm-*-wide-ips) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/wide1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: wide1 + description: + description: + - Description of the wide ip. + returned: queried + type: str + enabled: + description: + - Whether the Wide IP is enabled. + returned: queried + type: bool + disabled: + description: + - Whether the Wide IP is disabled. + returned: queried + type: bool + failure_rcode: + description: + - Specifies the DNS RCODE used when C(failure_rcode_response) is C(yes). + returned: queried + type: int + failure_rcode_response: + description: + - When C(yes), specifies that the system returns a RCODE response to + Wide IP requests after exhausting all load-balancing methods. + returned: queried + type: bool + failure_rcode_ttl: + description: + - Specifies the negative caching TTL of the SOA for the RCODE response. + returned: queried + type: int + last_resort_pool: + description: + - Specifies which pool, as listed in Pool List, for the system to use as + the last resort pool for the wide IP. + returned: queried + type: str + minimal_response: + description: + - Specifies that the system forms the smallest allowable DNS response to + a query. + returned: queried + type: str + persist_cidr_ipv4: + description: + - Specifies the number of bits the system uses to identify IPv4 addresses + when persistence is enabled. + returned: queried + type: int + persist_cidr_ipv6: + description: + - Specifies the number of bits the system uses to identify IPv6 addresses + when persistence is enabled. + returned: queried + type: int + pool_lb_mode: + description: + - Specifies the load balancing method used to select a pool in this wide IP. + returned: queried + type: str + ttl_persistence: + description: + - Specifies, in seconds, the length of time for which the persistence + entry is valid. + returned: queried + type: int + pools: + description: + - Specifies the pools that this wide IP uses for load balancing. + returned: queried + type: complex + sample: hash/dictionary of values +http_monitors: + description: HTTP monitor related information. + returned: When C(http-monitors) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/http + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: http + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: http + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My monitor + adaptive: + description: + - Whether adaptive response time monitoring is enabled for this monitor. + returned: queried + type: bool + sample: no + adaptive_divergence_type: + description: + - Specifies whether the adaptive-divergence-value is C(relative) or + C(absolute). + returned: queried + type: str + sample: relative + adaptive_divergence_value: + description: + - Specifies how far from mean latency each monitor probe is allowed + to be. + returned: queried + type: int + sample: 25 + adaptive_limit: + description: + - Specifies the hard limit, in milliseconds, which the probe is not + allowed to exceed, regardless of the divergence value. + returned: queried + type: int + sample: 200 + adaptive_sampling_timespan: + description: + - Specifies the size of the sliding window, in seconds, which + records probe history. + returned: queried + type: int + sample: 300 + destination: + description: + - Specifies the IP address and service port of the resource that is + the destination of this monitor. + returned: queried + type: str + sample: "*:*" + interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when either the resource is down or the status + of the resource is unknown. + returned: queried + type: int + sample: 5 + ip_dscp: + description: + - Specifies the differentiated services code point (DSCP). + returned: queried + type: int + sample: 0 + manual_resume: + description: + - Specifies whether the system automatically changes the status of a + resource to up at the next successful monitor check. + returned: queried + type: bool + sample: yes + receive_string: + description: + - Specifies the text string that the monitor looks for in the + returned resource. + returned: queried + type: str + sample: check string + receive_disable_string: + description: + - Specifies a text string that the monitor looks for in the returned + resource. If the text string is matched in the returned resource, + the corresponding node or pool member is marked session disabled. + returned: queried + type: str + sample: check disable string + reverse: + description: + - Specifies whether the monitor operates in reverse mode. When the + monitor is in reverse mode, a successful check marks the monitored + object down instead of up. + returned: queried + type: bool + sample: no + send_string: + description: + - Specifies the text string that the monitor sends to the target + object. + returned: queried + type: str + sample: "GET /\\r\\n" + time_until_up: + description: + - Specifies the amount of time, in seconds, after the first + successful response before a node is marked up. + returned: queried + type: int + sample: 0 + timeout: + description: + - Specifies the number of seconds the target has in which to respond + to the monitor request. + returned: queried + type: int + sample: 16 + transparent: + description: + - Specifies whether the monitor operates in transparent mode. + returned: queried + type: bool + sample: no + up_interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when the resource is up. + returned: queried + type: int + sample: 0 + username: + description: + - Specifies the username, if the monitored target requires + authentication. + returned: queried + type: str + sample: user1 + sample: hash/dictionary of values +https_monitors: + description: HTTPS monitor related information. + returned: When C(https-monitors) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/http + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: http + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: http + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My monitor + adaptive: + description: + - Whether adaptive response time monitoring is enabled for this monitor. + returned: queried + type: bool + sample: no + adaptive_divergence_type: + description: + - Specifies whether the adaptive-divergence-value is C(relative) or + C(absolute). + returned: queried + type: str + sample: relative + adaptive_divergence_value: + description: + - Specifies how far from mean latency each monitor probe is allowed + to be. + returned: queried + type: int + sample: 25 + adaptive_limit: + description: + - Specifies the hard limit, in milliseconds, which the probe is not + allowed to exceed, regardless of the divergence value. + returned: queried + type: int + sample: 200 + adaptive_sampling_timespan: + description: + - Specifies the size of the sliding window, in seconds, which + records probe history. + returned: queried + type: int + sample: 300 + destination: + description: + - Specifies the IP address and service port of the resource that is + the destination of this monitor. + returned: queried + type: str + sample: "*:*" + interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when either the resource is down or the status + of the resource is unknown. + returned: queried + type: int + sample: 5 + ip_dscp: + description: + - Specifies the differentiated services code point (DSCP). + returned: queried + type: int + sample: 0 + manual_resume: + description: + - Specifies whether the system automatically changes the status of a + resource to up at the next successful monitor check. + returned: queried + type: bool + sample: yes + receive_string: + description: + - Specifies the text string that the monitor looks for in the + returned resource. + returned: queried + type: str + sample: check string + receive_disable_string: + description: + - Specifies a text string that the monitor looks for in the returned + resource. If the text string is matched in the returned resource, + the corresponding node or pool member is marked session disabled. + returned: queried + type: str + sample: check disable string + reverse: + description: + - Specifies whether the monitor operates in reverse mode. When the + monitor is in reverse mode, a successful check marks the monitored + object down instead of up. + returned: queried + type: bool + sample: no + send_string: + description: + - Specifies the text string that the monitor sends to the target + object. + returned: queried + type: str + sample: "GET /\\r\\n" + ssl_profile: + description: + - Specifies the SSL profile to use for the HTTPS monitor. + returned: queried + type: str + sample: /Common/serverssl + time_until_up: + description: + - Specifies the amount of time, in seconds, after the first + successful response before a node is marked up. + returned: queried + type: int + sample: 0 + timeout: + description: + - Specifies the number of seconds the target has in which to respond + to the monitor request. + returned: queried + type: int + sample: 16 + transparent: + description: + - Specifies whether the monitor operates in transparent mode. + returned: queried + type: bool + sample: no + up_interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when the resource is up. + returned: queried + type: int + sample: 0 + username: + description: + - Specifies the username, if the monitored target requires + authentication. + returned: queried + type: str + sample: user1 + sample: hash/dictionary of values +http_profiles: + description: HTTP profile related information. + returned: When C(http-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/http + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: http + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: http + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My profile + accept_xff: + description: + - Enables or disables trusting the client IP address, and statistics + from the client IP address, based on the request's X-Forwarded-For + (XFF) headers, if they exist. + returned: queried + type: bool + sample: yes + allow_truncated_redirects: + description: + - Specifies the pass-through behavior when a redirect lacking the + trailing carriage-return and line feed pair at the end of the headers + is parsed. + - When C(no), will silently drop the invalid HTTP. + returned: queried + type: bool + sample: no + excess_client_headers: + description: + - Specifies the pass-through behavior when C(max_header_count) value is + exceeded by the client. + - When C(reject), rejects the connection. + returned: queried + type: str + sample: reject + excess_server_headers: + description: + - Specifies the pass-through behavior when C(max_header_count) value is + exceeded by the server. + - When C(reject), rejects the connection. + returned: queried + type: str + sample: reject + known_methods: + description: + - Optimizes the behavior of a known HTTP method in the list. + - The default methods include the following HTTP/1.1 methods. CONNECT, + DELETE, GET, HEAD, LOCK, OPTIONS, POST, PROPFIND, PUT, TRACE, UNLOCK. + - If a known method is deleted from the C(known_methods) list, the + BIG-IP system applies the C(unknown_method) setting to manage that traffic. + returned: queried + type: list + sample: ['CONNECT', 'DELETE', ...] + max_header_count: + description: + - Specifies the maximum number of headers the system supports. + returned: queried + type: int + sample: 64 + max_header_size: + description: + - Specifies the maximum size in bytes the system allows for all HTTP + request headers combined, including the request line. + returned: queried + type: int + sample: 32768 + max_requests: + description: + - Specifies the number of requests that the system accepts on a per-connection + basis. + returned: queried + type: int + sample: 0 + oversize_client_headers: + description: + - Specifies the pass-through behavior when the C(max_header_size) value + is exceeded by the client. + returned: queried + type: str + sample: reject + oversize_server_headers: + description: + - Specifies the pass-through behavior when the C(max_header_size) value + is exceeded by the server. + returned: queried + type: str + sample: reject + pipeline_action: + description: + - Enables or disables HTTP/1.1 pipelining. + returned: queried + type: str + sample: allow + unknown_method: + description: + - Specifies the behavior (allow, reject, or pass through) when an unknown + HTTP method is parsed. + returned: queried + type: str + sample: allow + default_connect_handling: + description: + - Specifies the behavior of the proxy service when handling outbound requests. + returned: queried + type: str + sample: deny + hsts_include_subdomains: + description: + - When C(yes), applies the HSTS policy to the HSTS host and its subdomains. + returned: queried + type: bool + sample: yes + hsts_enabled: + description: + - When C(yes), enables the HTTP Strict Transport Security settings. + returned: queried + type: bool + sample: yes + insert_x_forwarded_for: + description: + - When C(yes), specifies that the system inserts an X-Forwarded-For header in + an HTTP request with the client IP address, to use with connection pooling. + returned: queried + type: bool + sample: no + lws_max_columns: + description: + - Specifies the maximum column width for any given line, when inserting an HTTP + header in an HTTP request. + returned: queried + type: int + sample: 80 + onconnect_transformations: + description: + - When C(yes), specifies, that the system performs HTTP header transformations + for the purpose of keeping connections open. + returned: queried + type: bool + sample: yes + proxy_mode: + description: + - Specifies the proxy mode for this profile. Either reverse, explicit, or transparent. + returned: queried + type: str + sample: reverse + redirect_rewrite: + description: + - Specifies whether the system rewrites the URIs that are part of HTTP + redirect (3XX) responses + returned: queried + type: str + sample: none + request_chunking: + description: + - Specifies how the system handles HTTP content that is chunked by a client. + returned: queried + type: str + sample: preserve + response_chunking: + description: + - Specifies how the system handles HTTP content that is chunked by a server. + returned: queried + type: str + sample: selective + server_agent_name: + description: + - Specifies the string used as the server name in traffic generated by LTM. + returned: queried + type: str + sample: BigIP + sflow_poll_interval: + description: + - The maximum interval in seconds between two pollings. + returned: queried + type: int + sample: 0 + sflow_sampling_rate: + description: + - Specifies the ratio of packets observed to the samples generated. + returned: queried + type: int + sample: 0 + via_request: + description: + - Specifies whether to Remove, Preserve, or Append Via headers included in + a client request to an origin web server. + returned: queried + type: str + sample: preserve + via_response: + description: + - Specifies whether to Remove, Preserve, or Append Via headers included in + an origin web server response to a client. + returned: queried + type: str + sample: preserve + sample: hash/dictionary of values +iapp_services: + description: iApp v1 service related information. + returned: When C(iapp-services) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/service1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: service1 + device_group: + description: + - The device group the iApp service is part of. + returned: queried + type: str + sample: /Common/dg1 + inherited_device_group: + description: + - Whether the device group is inherited or not. + returned: queried + type: bool + sample: yes + inherited_traffic_group: + description: + - Whether the traffic group is inherited or not. + returned: queried + type: bool + sample: yes + strict_updates: + description: + - Whether strict updates are enabled or not. + returned: queried + type: bool + sample: yes + template_modified: + description: + - Whether template that the service is based on is modified from its + default value, or not. + returned: queried + type: bool + sample: yes + traffic_group: + description: + - Traffic group the service is a part of. + returned: queried + type: str + sample: /Common/tg + tables: + description: + - List of the tabular data used to create the service. + returned: queried + type: complex + sample: [{"name": "basic__snatpool_members"},...] + variables: + description: + - List of the variable data used to create the service. + returned: queried + type: complex + sample: [{"name": "afm__policy"},{"encrypted": "no"},{"value": "/#no_not_use#"},...] + metadata: + description: + - List of the metadata data used to create the service.. + returned: queried + type: complex + sample: [{"name": "var1"},{"persist": "true"},...] + lists: + description: + - List of the lists data used to create the service. + returned: queried + type: complex + sample: [{"name": "irules__irules"},{"value": []},...] + description: + description: + - Description of the service + returned: queried + type: str + sample: My service + sample: hash/dictionary of values +icmp_monitors: + description: ICMP monitor related information. + returned: When C(icmp-monitors) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/icmp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: icmp + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: icmp + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My monitor + adaptive: + description: + - Whether adaptive response time monitoring is enabled for this monitor. + returned: queried + type: bool + sample: no + adaptive_divergence_type: + description: + - Specifies whether the adaptive-divergence-value is C(relative) or + C(absolute). + returned: queried + type: str + sample: relative + adaptive_divergence_value: + description: + - Specifies how far from mean latency each monitor probe is allowed + to be. + returned: queried + type: int + sample: 25 + adaptive_limit: + description: + - Specifies the hard limit, in milliseconds, which the probe is not + allowed to exceed, regardless of the divergence value. + returned: queried + type: int + sample: 200 + adaptive_sampling_timespan: + description: + - Specifies the size of the sliding window, in seconds, which + records probe history. + returned: queried + type: int + sample: 300 + destination: + description: + - Specifies the IP address and service port of the resource that is + the destination of this monitor. + returned: queried + type: str + sample: "*:*" + interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when either the resource is down or the status + of the resource is unknown. + returned: queried + type: int + sample: 5 + manual_resume: + description: + - Specifies whether the system automatically changes the status of a + resource to up at the next successful monitor check. + type: bool + sample: yes + time_until_up: + description: + - Specifies the amount of time, in seconds, after the first + successful response before a node is marked up. + returned: queried + type: int + sample: 0 + timeout: + description: + - Specifies the number of seconds the target has in which to respond + to the monitor request. + returned: queried + type: int + sample: 16 + transparent: + description: + - Specifies whether the monitor operates in transparent mode. + returned: queried + type: bool + sample: no + up_interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when the resource is up. + returned: queried + type: int + sample: 0 + sample: hash/dictionary of values +interfaces: + description: Interface related information. + returned: When C(interfaces) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/interface1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: interface1 + active_media_type: + description: + - Displays the current media setting for the interface. + returned: queried + type: str + sample: 100TX-FD + flow_control: + description: + - Specifies how the system controls the sending of PAUSE frames for + flow control. + returned: queried + type: str + sample: tx-rx + description: + description: + - Description of the interface + returned: queried + type: str + sample: My interface + bundle: + description: + - The bundle capability on the port. + returned: queried + type: str + sample: not-supported + bundle_speed: + description: + - The bundle-speed on the port when bundle capability is + enabled. + returned: queried + type: str + sample: 100G + enabled: + description: + - Whether the interface is enabled or not + returned: queried + type: bool + sample: yes + if_index: + description: + - The index assigned to this interface. + returned: queried + type: int + sample: 32 + mac_address: + description: + - Displays the 6-byte ethernet address in non-case-sensitive + hexadecimal colon notation. + returned: queried + type: str + sample: "00:0b:09:88:00:9a" + media_sfp: + description: + - The settings for an SFP (pluggable) interface. + returned: queried + type: str + sample: auto + lldp_admin: + description: + - Sets the sending or receiving of LLDP packets on that interface. + Should be one of C(disable), C(txonly), C(rxonly) or C(txrx). + returned: queried + type: str + sample: txonly + mtu: + description: + - Displays the Maximum Transmission Unit (MTU) of the interface, + which is the maximum number of bytes in a frame without IP + fragmentation. + returned: queried + type: int + sample: 1500 + prefer_port: + description: + - Indicates which side of a combo port the interface uses, if both + sides of the port have the potential for external links. + returned: queried + type: str + sample: sfp + sflow_poll_interval: + description: + - Specifies the maximum interval in seconds between two + pollings. + returned: queried + type: int + sample: 0 + sflow_poll_interval_global: + description: + - Specifies whether the global interface poll-interval setting + overrides the object-level poll-interval setting. + returned: queried + type: bool + sample: yes + stp_auto_edge_port: + description: + - STP edge port detection. + returned: queried + type: bool + sample: yes + stp_enabled: + description: + - Whether STP is enabled or not. + returned: queried + type: bool + sample: no + stp_link_type: + description: + - Specifies the STP link type for the interface. + returned: queried + type: str + sample: auto + sample: hash/dictionary of values +irules: + description: iRule related information. + returned: When C(irules) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/irul1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: irule1 + ignore_verification: + description: + - Whether the verification of the iRule should be ignored or not. + returned: queried + type: bool + sample: no + checksum: + description: + - Checksum of the iRule as calculated by BIG-IP. + returned: queried + type: str + sample: d41d8cd98f00b204e9800998ecf8427e + definition: + description: + - The actual definition of the iRule. + returned: queried + type: str + sample: when HTTP_REQUEST ... + signature: + description: + - The calculated signature of the iRule. + returned: queried + type: str + sample: WsYy2M6xMqvosIKIEH/FSsvhtWMe6xKOA6i7f... + sample: hash/dictionary of values +ltm_pools: + description: List of LTM (Local Traffic Manager) pools. + returned: When C(ltm-pools) is specified in C(gather_subset). + type: complex + contains: + active_member_count: + description: + - The number of active pool members in the pool. + returned: queried + type: int + sample: 3 + all_avg_queue_entry_age: + description: + - Average queue entry age, for both the pool and its members. + returned: queried + type: int + sample: 5 + all_max_queue_entry_age_ever: + description: + - Maximum queue entry age ever, for both the pool and its members. + returned: queried + type: int + sample: 2 + all_max_queue_entry_age_recently: + description: + - Maximum queue entry age recently, for both the pool and its members. + returned: queried + type: int + sample: 5 + all_num_connections_queued_now: + description: + - Number of connections queued now, for both the pool and its members. + returned: queried + type: int + sample: 20 + all_num_connections_serviced: + description: + - Number of connections serviced, for both the pool and its members. + returned: queried + type: int + sample: 15 + all_queue_head_entry_age: + description: + - Queue head entry age, for both the pool and its members. + returned: queried + type: int + sample: 4 + available_member_count: + description: + - The number of available pool members in the pool. + returned: queried + type: int + sample: 4 + availability_status: + description: + - The availability of the pool. + returned: queried + type: str + sample: offline + allow_nat: + description: + - Whether NATs are automatically enabled or disabled for any connections using this pool. + returned: queried + type: bool + sample: yes + allow_snat: + description: + - Whether SNATs are automatically enabled or disabled for any connections using this pool. + returned: queried + type: bool + sample: yes + client_ip_tos: + description: + - Whether the system sets a Type of Service (ToS) level within a packet sent to the client, + based on the targeted pool. + - Values can range from C(0) to C(255), or be set to C(pass-through) or C(mimic). + returned: queried + type: str + sample: pass-through + client_link_qos: + description: + - Whether the system sets a Quality of Service (QoS) level within a packet sent to the client, + based on the targeted pool. + - Values can range from C(0) to C(7), or be set to C(pass-through). + returned: queried + type: str + sample: pass-through + current_sessions: + description: + - Current sessions. + returned: queried + type: int + sample: 2 + description: + description: + - Description of the pool. + returned: queried + type: str + sample: my pool + enabled_status: + description: + - The enabled-ness of the pool. + returned: queried + type: str + sample: enabled + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/pool1 + ignore_persisted_weight: + description: + - Do not count the weight of persisted connections on pool members when making load balancing decisions. + returned: queried + type: bool + sample: no + lb_method: + description: + - Load balancing method used by the pool. + returned: queried + type: str + sample: round-robin + member_count: + description: + - Total number of members in the pool. + returned: queried + type: int + sample: 50 + metadata: + description: + - Dictionary of arbitrary key/value pairs set on the pool. + returned: queried + type: complex + sample: hash/dictionary of values + minimum_active_members: + description: + - Whether the system load balances traffic according to the priority number assigned to the pool member. + - This parameter is identical to C(priority_group_activation) and is just an alias for it. + returned: queried + type: int + sample: 2 + minimum_up_members: + description: + - The minimum number of pool members that must be up. + returned: queried + type: int + sample: 1 + minimum_up_members_action: + description: + - The action to take if the C(minimum_up_members_checking) is enabled and the number of active pool + members falls below the number specified in C(minimum_up_members). + returned: queried + type: str + sample: failover + minimum_up_members_checking: + description: + - Enables or disables the C(minimum_up_members) feature. + returned: queried + type: bool + sample: no + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: pool1 + pool_avg_queue_entry_age: + description: + - Average queue entry age, for the pool only. + returned: queried + type: int + sample: 5 + pool_max_queue_entry_age_ever: + description: + - Maximum queue entry age ever, for the pool only. + returned: queried + type: int + sample: 2 + pool_max_queue_entry_age_recently: + description: + - Maximum queue entry age recently, for the pool only. + returned: queried + type: int + sample: 5 + pool_num_connections_queued_now: + description: + - Number of connections queued now, for the pool only. + returned: queried + type: int + sample: 20 + pool_num_connections_serviced: + description: + - Number of connections serviced, for the pool only. + returned: queried + type: int + sample: 15 + pool_queue_head_entry_age: + description: + - Queue head entry age, for the pool only. + returned: queried + type: int + sample: 4 + priority_group_activation: + description: + - Whether the system load balances traffic according to the priority number assigned to the pool member. + - This parameter is identical to C(minimum_active_members) and is just an alias for it. + returned: queried + type: int + sample: 2 + queue_depth_limit: + description: + - The maximum number of connections that may simultaneously be queued to go to any member of this pool. + returned: queried + type: int + sample: 3 + queue_on_connection_limit: + description: + - Enable or disable queuing connections when pool member or node connection limits are reached. + returned: queried + type: bool + sample: yes + queue_time_limit: + description: + - Specifies the maximum time, in milliseconds, a connection will remain enqueued. + returned: queried + type: int + sample: 0 + real_session: + description: + - The actual REST API value for the C(session) attribute. + - This is different from the C(state) return value, insofar as the return value + can be considered a generalization of all available sessions, instead of the + specific value of the session. + returned: queried + type: str + sample: monitor-enabled + real_state: + description: + - The actual REST API value for the C(state) attribute. + - This is different from the C(state) return value, insofar as the return value + can be considered a generalization of all available states, instead of the + specific value of the state. + returned: queried + type: str + sample: up + reselect_tries: + description: + - The number of times the system tries to contact a pool member after a passive failure. + returned: queried + type: int + sample: 0 + server_ip_tos: + description: + - The Type of Service (ToS) level to use when sending packets to a server. + returned: queried + type: str + sample: pass-through + server_link_qos: + description: + - The Quality of Service (QoS) level to use when sending packets to a server. + returned: queried + type: str + sample: pass-through + service_down_action: + description: + - The action to take if the service specified in the pool is marked down. + returned: queried + type: str + sample: none + server_side_bits_in: + description: + - Number of server-side ingress bits. + returned: queried + type: int + sample: 1000 + server_side_bits_out: + description: + - Number of server-side egress bits. + returned: queried + type: int + sample: 200 + server_side_current_connections: + description: + - Number of current connections server-side. + returned: queried + type: int + sample: 300 + server_side_max_connections: + description: + - Maximum number of connections server-side. + returned: queried + type: int + sample: 40 + server_side_pkts_in: + description: + - Number of server-side ingress packets. + returned: queried + type: int + sample: 1098384 + server_side_pkts_out: + description: + - Number of server-side egress packets. + returned: queried + type: int + sample: 3484734 + server_side_total_connections: + description: + - Total number of connections. + returned: queried + type: int + sample: 24 + slow_ramp_time: + description: + - The ramp time for the pool. + - This provides the ability to cause a pool member that has just been enabled, + or marked up, to receive proportionally less traffic than other members in the pool. + returned: queried + type: int + sample: 10 + status_reason: + description: + - If there is a problem with the status of the pool, that problem is reported here. + returned: queried + type: str + sample: The children pool member(s) are down. + members: + description: List of LTM (Local Traffic Manager) pools. + returned: when members exist in the pool. + type: complex + contains: + address: + description: IP address of the pool member. + returned: queried + type: str + sample: 1.1.1.1 + connection_limit: + description: The maximum number of concurrent connections allowed for a pool member. + returned: queried + type: int + sample: 0 + description: + description: The description of the pool member. + returned: queried + type: str + sample: pool member 1 + dynamic_ratio: + description: + - A range of numbers that you want the system to use in conjunction with the ratio load balancing method. + returned: queried + type: int + sample: 1 + ephemeral: + description: + - Whether the node backing the pool member is ephemeral or not. + returned: queried + type: bool + sample: yes + fqdn_autopopulate: + description: + - Whether the node should scale to the IP address set returned by DNS. + returned: queried + type: bool + sample: yes + full_path: + description: + - Full name of the resource as known to BIG-IP. + - Includes the port in the name + returned: queried + type: str + sample: "/Common/member:80" + inherit_profile: + description: + - Whether the pool member inherits the encapsulation profile from the parent pool. + returned: queried + type: bool + sample: no + logging: + description: + - Whether the monitor applied should log its actions. + returned: queried + type: bool + sample: no + monitors: + description: + - Monitors active on the pool member. Monitor names are in their "full_path" form. + returned: queried + type: list + sample: ['/Common/http'] + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: "member:80" + partition: + description: + - Partition that the member exists on. + returned: queried + type: str + sample: Common + priority_group: + description: + - The priority group within the pool for this pool member. + returned: queried + type: int + sample: 0 + encapsulation_profile: + description: + - The encapsulation profile to use for the pool member. + returned: queried + type: str + sample: ip4ip4 + rate_limit: + description: + - The maximum number of connections per second allowed for a pool member. + returned: queried + type: bool + sample: no + ratio: + description: + - The weight of the pool for load balancing purposes. + returned: queried + type: int + sample: 1 + session: + description: + - Enables or disables the pool member for new sessions. + returned: queried + type: str + sample: monitor-enabled + state: + description: + - Controls the state of the pool member, overriding any monitors. + returned: queried + type: str + sample: down + total_requests: + description: + - Total requests. + returned: queried + type: int + sample: 8 + sample: hash/dictionary of values +ltm_policies: + description: List of LTM (Local Traffic Manager) policies. + returned: When C(ltm-policies) is specified in C(gather_subset). + type: complex + contains: + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: policy1 + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/policy1 + description: + description: + - Description of the policy. + returned: queried + type: str + sample: My policy + strategy: + description: + - The match strategy for the policy. + returned: queried + type: str + sample: /Common/first-match + requires: + description: + - Aspects of the system required by this policy. + returned: queried + type: list + sample: ['http'] + controls: + description: + - Aspects of the system controlled by this policy. + returned: queried + type: list + sample: ['forwarding'] + status: + description: + - Indicates published or draft policy status. + returned: queried + type: str + sample: draft + rules: + description: + - List of LTM (Local Traffic Manager) policy rules. + returned: when rules are defined in the policy. + type: complex + contains: + actions: + description: + - The actions the policy will take when a match is encountered. + returned: when actions are defined in the rule. + type: complex + contains: + http_reply: + description: + - Indicate if the action will affects a reply to a given HTTP request. + returned: when defined in the action. + type: bool + sample: yes + redirect: + description: + - This action will redirect a request. + returned: when defined in the action. + type: bool + sample: no + request: + description: + - This policy action is performed on connection requests. + returned: when defined in the action. + type: bool + sample: no + location: + description: + - This action will come from the given location. + returned: when defined in the action. + type: str + sample: "tcl:https://[getfield [HTTP::host] \\\":\\\" 1][HTTP::uri]" + sample: hash/dictionary of values + conditions: + description: + - The conditions that a policy will match on. + returned: when conditions are defined in the rule. + type: complex + contains: + case_insensitive: + description: + - The value matched on is case insensitive. + returned: when defined in the condition. + type: bool + sample: no + case_sensitive: + description: + - The value matched on is case sensitive. + returned: when defined in the condition. + type: bool + sample: yes + contains_string: + description: + - The value matches if it contains a certain string. + returned: when defined in the condition. + type: bool + sample: yes + external: + description: + - The value matched on is from the external side of a connection. + returned: when defined in the condition. + type: bool + sample: yes + http_basic_auth: + description: + - This condition matches on basic HTTP authorization. + returned: when defined in the condition. + type: bool + sample: no + http_host: + description: + - This condition matches on an HTTP host. + returned: when defined in the condition. + type: bool + sample: yes + http_uri: + description: + - This condition matches on an HTTP URI. + returned: when defined in the condition. + type: bool + sample: no + request: + description: + - This policy will match on a request. + returned: when defined in the condition. + type: bool + sample: yes + username: + description: + - Matches on a username. + returned: when defined in the condition. + type: bool + sample: yes + all: + description: + - Matches all. + returned: when defined in the condition. + type: bool + sample: yes + values: + description: + - The specified values will be matched on. + returned: when defined in the condition. + type: list + sample: ['foo.bar.com', 'baz.cool.com'] + sample: hash/dictionary of values + sample: hash/dictionary of values + sample: hash/dictionary of values +nodes: + description: Node related information. + returned: When C(nodes) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/5.6.7.8 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: 5.6.7.8 + ratio: + description: + - Fixed size ratio used for node during C(Ratio) load balancing. + returned: queried + type: int + sample: 10 + description: + description: + - Description of the node. + returned: queried + type: str + sample: My node + connection_limit: + description: + - Maximum number of connections that node can handle. + returned: queried + type: int + sample: 100 + address: + description: + - IP address of the node. + returned: queried + type: str + sample: 2.3.4.5 + dynamic_ratio: + description: + - Dynamic ratio number for the node used when doing C(Dynamic Ratio) load balancing. + returned: queried + type: int + sample: 200 + rate_limit: + description: + - Maximum number of connections per second allowed for node. + returned: queried + type: int + sample: 1000 + monitor_status: + description: + - Status of the node as reported by the monitor(s) associated with it. + - This value is also used in determining node C(state). + returned: queried + type: str + sample: down + session_status: + description: + - This value is also used in determining node C(state). + returned: queried + type: str + sample: enabled + availability_status: + description: + - The availability of the node. + returned: queried + type: str + sample: offline + enabled_status: + description: + - The enabled-ness of the node. + returned: queried + type: str + sample: enabled + status_reason: + description: + - If there is a problem with the status of the node, that problem is reported here. + returned: queried + type: str + sample: /Common/https_443 No successful responses received... + monitor_rule: + description: + - A string representation of the full monitor rule. + returned: queried + type: str + sample: /Common/https_443 and /Common/icmp + monitors: + description: + - A list of the monitors identified in the C(monitor_rule). + returned: queried + type: list + sample: ['/Common/https_443', '/Common/icmp'] + monitor_type: + description: + - The C(monitor_type) field related to the C(bigip_node) module, for this nodes + monitors. + returned: queried + type: str + sample: and_list + sample: hash/dictionary of values +oneconnect_profiles: + description: OneConnect profile related information. + returned: When C(oneconnect-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/oneconnect + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: oneconnect + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: oneconnect + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My profile + idle_timeout_override: + description: + - Specifies the number of seconds that a connection is idle before + the connection flow is eligible for deletion. + returned: queried + type: int + sample: 1000 + limit_type: + description: + - When C(none), simultaneous in-flight requests and responses over TCP + connections to a pool member are counted toward the limit. + - When C(idle), idle connections will be dropped as the TCP connection + limit is reached. + - When C(strict), the TCP connection limit is honored with no + exceptions. This means that idle connections will prevent new TCP + connections from being made until they expire, even if they could + otherwise be reused. + returned: queried + type: str + sample: idle + max_age: + description: + - Specifies the maximum age, in number of seconds, of a connection + in the connection reuse pool. + returned: queried + type: int + sample: 100 + max_reuse: + description: + - Specifies the maximum number of times that a server connection can + be reused. + returned: queried + type: int + sample: 1000 + max_size: + description: + - Specifies the maximum number of connections that the system holds + in the connection reuse pool. + - If the pool is already full, then the server connection closes after + the response is completed. + returned: queried + type: int + sample: 1000 + share_pools: + description: + - Indicates that connections may be shared not only within a virtual + server, but also among similar virtual servers. + returned: queried + type: bool + sample: yes + source_mask: + description: + - Specifies a source IP mask. + - If no mask is provided, the value C(any6) is used. + returned: queried + type: str + sample: 255.255.255.0 + sample: hash/dictionary of values +partitions: + description: Partition related information. + returned: When C(partitions) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: Common + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: Common + description: + description: + - Description of the partition. + returned: queried + type: str + sample: Tenant 1 + default_route_domain: + description: + - ID of the route domain that is associated with the IP addresses that reside + in the partition. + returned: queried + type: int + sample: 0 + sample: hash/dictionary of values +provision_info: + description: Module provisioning related information. + returned: When C(provision-info) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: asm + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: asm + cpu_ratio: + description: + - Ratio of CPU allocated to this module. + - Only relevant if C(level) was specified as C(custom). Otherwise, this value + will be reported as C(0). + returned: queried + type: int + sample: 0 + disk_ratio: + description: + - Ratio of disk allocated to this module. + - Only relevant if C(level) was specified as C(custom). Otherwise, this value + will be reported as C(0). + returned: queried + type: int + sample: 0 + memory_ratio: + description: + - Ratio of memory allocated to this module. + - Only relevant if C(level) was specified as C(custom). Otherwise, this value + will be reported as C(0). + returned: queried + type: int + sample: 0 + level: + description: + - Provisioned level of the module on BIG-IP. + - Valid return values can include C(none), C(minimum), C(nominal), C(dedicated) + and C(custom). + returned: queried + type: int + sample: 0 + sample: hash/dictionary of values +self_ips: + description: Self-IP related information. + returned: When C(self-ips) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/self1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: self1 + description: + description: + - Description of the Self-IP. + returned: queried + type: str + sample: My self-ip + netmask: + description: + - Netmask portion of the IP address. In dotted notation. + returned: queried + type: str + sample: 255.255.255.0 + netmask_cidr: + description: + - Netmask portion of the IP address. In CIDR notation. + returned: queried + type: int + sample: 24 + floating: + description: + - Whether the Self-IP is a floating address or not. + returned: queried + type: bool + sample: yes + traffic_group: + description: + - Traffic group the Self-IP is associated with. + returned: queried + type: str + sample: /Common/traffic-group-local-only + service_policy: + description: + - Service policy assigned to the Self-IP. + returned: queried + type: str + sample: /Common/service1 + vlan: + description: + - VLAN associated with the Self-IP. + returned: queried + type: str + sample: /Common/vlan1 + allow_access_list: + description: + - List of protocols and optionally their ports that are allowed to access the + Self-IP. Also known as port-lockdown in the web interface. + - Items in the list are in the format of "protocol:port". Some items may not + have a port associated with them and in those cases the port is C(0). + returned: queried + type: list + sample: ['tcp:80', 'egp:0'] + traffic_group_inherited: + description: + - Whether or not the traffic group is inherited. + returned: queried + type: bool + sample: no + sample: hash/dictionary of values +server_ssl_profiles: + description: Server SSL related information. + returned: When C(server-ssl-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: serverssl + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: serverssl + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My profile + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: serverssl + alert_timeout: + description: + - Maximum time period in seconds to keep the SSL + session active after alert message is sent, or indefinite. + returned: queried + type: str + sample: 100 + allow_expired_crl: + description: + - Use the specified CRL file even if it has expired. + returned: queried + type: bool + sample: yes + authentication_frequency: + description: + - Specifies the frequency of authentication. + returned: queried + type: str + sample: once + authenticate_depth: + description: + - The client certificate chain maximum traversal depth + returned: queried + type: int + sample: 9 + authenticate_name: + description: + - Common Name (CN) that is embedded in a server certificate. + - The system authenticates a server based on the specified CN. + returned: queried + type: str + sample: foo + bypass_on_client_cert_fail: + description: + - Enables or disables SSL forward proxy bypass on failing to get + client certificate that server asks for. + returned: queried + type: bool + sample: yes + bypass_on_handshake_alert: + description: + - Enables or disables SSL forward proxy bypass on receiving + handshake_failure, protocol_version or unsupported_extension alert + message during the serverside SSL handshake. + returned: queried + type: bool + sample: no + c3d_ca_cert: + description: + - Name of the certificate file that is used as the + certification authority certificate when SSL client certificate + constrained delegation is enabled. + returned: queried + type: str + sample: /Common/cacert.crt + c3d_ca_key: + description: + - Name of the key file that is used as the + certification authority key when SSL client certificate + constrained delegation is enabled. + returned: queried + type: str + sample: /Common/default.key + c3d_cert_extension_includes: + description: + - Extensions of the client certificates to be included + in the generated certificates using SSL client certificate + constrained delegation. + returned: queried + type: list + sample: [ "basic-constraints", "extended-key-usage", ... ] + c3d_cert_lifespan: + description: + - Lifespan of the certificate generated using the SSL + client certificate constrained delegation. + returned: queried + type: int + sample: 24 + ca_file: + description: + - Certificate authority file name. + returned: queried + type: str + sample: default.crt + cache_size: + description: + - The SSL session cache size. + returned: queried + type: int + sample: 262144 + cache_timeout: + description: + - The SSL session cache timeout value, which is the usable + lifetime seconds of negotiated SSL session IDs. + returned: queried + type: int + sample: 86400 + cert: + description: + - The name of the certificate installed on the traffic + management system for the purpose of terminating or initiating an + SSL connection. + returned: queried + type: str + sample: /Common/default.crt + chain: + description: + - Specifies or builds a certificate chain file that a client can use + to authenticate the profile. + returned: queried + type: str + sample: /Common/default.crt + cipher_group: + description: + - Specifies a cipher group. + returned: queried + type: str + ciphers: + description: + - Specifies a cipher name. + returned: queried + type: str + sample: DEFAULT + crl_file: + description: + - Specifies the certificate revocation list file name. + returned: queried + type: str + expire_cert_response_control: + description: + - Specifies the BIGIP action when the server certificate has + expired. + returned: queried + type: str + sample: drop + handshake_timeout: + description: + - Specifies the handshake timeout in seconds. + returned: queried + type: str + sample: 10 + key: + description: + - Specifies the key file name. Specifies the name of the key + installed on the traffic management system for the purpose of + terminating or initiating an SSL connection. + returned: queried + type: str + sample: /Common/default.key + max_active_handshakes: + description: + - Specifies the maximum number allowed SSL active handshakes. + returned: queried + type: str + sample: 100 + mod_ssl_methods: + description: + - Enables or disables ModSSL methods. + returned: queried + type: bool + sample: yes + mode: + description: + - Enables or disables SSL processing. + returned: queried + type: bool + sample: no + ocsp: + description: + - Specifies the name of ocsp profile for purpose of validating + status of server certificate. + returned: queried + type: str + options: + description: + - Enables options, including some industry-related workarounds. + returned: queried + type: list + sample: [ "netscape-reuse-cipher-change-bug", "dont-insert-empty-fragments" ] + peer_cert_mode: + description: + - Specifies the peer certificate mode. + returned: queried + type: str + sample: ignore + proxy_ssl: + description: + - Allows further modification of application traffic within + an SSL tunnel while still allowing the server to perform necessary + authorization, authentication, auditing steps. + returned: queried + type: bool + sample: yes + proxy_ssl_passthrough: + description: + - Allows Proxy SSL to passthrough the traffic when ciphersuite negotiated + between the client and server is not supported. + returned: queried + type: bool + sample: yes + renegotiate_period: + description: + - Number of seconds from the initial connect time + after which the system renegotiates an SSL session. + returned: queried + type: str + sample: indefinite + renegotiate_size: + description: + - Specifies a throughput size, in megabytes, of SSL renegotiation. + returned: queried + type: str + sample: indefinite + renegotiation: + description: + - Whether renegotiations are enabled. + returned: queried + type: bool + sample: yes + retain_certificate: + description: + - APM module requires storing certificate in SSL session. When C(no), + certificate will not be stored in SSL session. + returned: queried + type: bool + sample: no + generic_alert: + description: + - Enables or disables generic-alert. + returned: queried + type: bool + sample: yes + secure_renegotiation: + description: + - Specifies the secure renegotiation mode. + returned: queried + type: str + sample: require + server_name: + description: + - Server name to be included in SNI (server name + indication) extension during SSL handshake in ClientHello. + returned: queried + type: str + session_mirroring: + description: + - Enables or disables the mirroring of sessions to high availability + peer. + returned: queried + type: bool + sample: yes + session_ticket: + description: + - Enables or disables session-ticket. + returned: queried + type: bool + sample: no + sni_default: + description: + - When C(yes), this profile is the default SSL profile when the server + name in a client connection does not match any configured server + names, or a client connection does not specify any server name at + all. + returned: queried + type: bool + sample: yes + sni_require: + description: + - When C(yes), connections to a server that does not support SNI + extension will be rejected. + returned: queried + type: bool + sample: no + ssl_c3d: + description: + - Enables or disables SSL Client certificate constrained delegation. + returned: queried + type: bool + sample: yes + ssl_forward_proxy_enabled: + description: + - Enables or disables ssl-forward-proxy feature. + returned: queried + type: bool + sample: no + ssl_sign_hash: + description: + - Specifies SSL sign hash algorithm which is used to sign and verify + SSL Server Key Exchange and Certificate Verify messages for the + specified SSL profiles. + returned: queried + type: str + sample: sha1 + ssl_forward_proxy_bypass: + description: + - Enables or disables ssl-forward-proxy-bypass feature. + returned: queried + type: bool + sample: yes + strict_resume: + description: + - Enables or disables the resumption of SSL sessions after an + unclean shutdown. + returned: queried + type: bool + sample: no + unclean_shutdown: + description: + - Specifies, when C(yes), that the SSL profile performs unclean + shutdowns of all SSL connections, which means that underlying TCP + connections are closed without exchanging the required SSL + shutdown alerts. + returned: queried + type: bool + sample: yes + untrusted_cert_response_control: + description: + - Specifies the BIGIP action when the server certificate has + untrusted CA. + returned: queried + type: str + sample: drop + sample: hash/dictionary of values +software_hotfixes: + description: List of software hotfixes. + returned: When C(software-hotfixes) is specified in C(gather_subset). + type: complex + contains: + name: + description: + - Name of the image. + returned: queried + type: str + sample: Hotfix-BIGIP-13.0.0.3.0.1679-HF3.iso + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: Hotfix-BIGIP-13.0.0.3.0.1679-HF3.iso + build: + description: + - Build number of the image. + - This is usually a sub-string of the C(name). + returned: queried + type: str + sample: 3.0.1679 + checksum: + description: + - MD5 checksum of the image. + - Note that this is the checksum that is stored inside the ISO. It is not + the actual checksum of the ISO. + returned: queried + type: str + sample: df1ec715d2089d0fa54c0c4284656a98 + product: + description: + - Product contained in the ISO. + returned: queried + type: str + sample: BIG-IP + id: + description: + - ID component of the image. + - This is usually a sub-string of the C(name). + returned: queried + type: str + sample: HF3 + title: + description: + - Human friendly name of the image. + returned: queried + type: str + sample: Hotfix Version 3.0.1679 + verified: + description: + - Whether or not the system has verified this image. + returned: queried + type: bool + sample: yes + version: + description: + - Version of software contained in the image. + - This is a sub-string of the C(name). + returned: queried + type: str + sample: 13.0.0 + sample: hash/dictionary of values +software_images: + description: List of software images. + returned: When C(software-images) is specified in C(gather_subset). + type: complex + contains: + name: + description: + - Name of the image. + returned: queried + type: str + sample: BIGIP-13.1.0.7-0.0.1.iso + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: BIGIP-13.1.0.7-0.0.1.iso + build: + description: + - Build number of the image. + - This is usually a sub-string of the C(name). + returned: queried + type: str + sample: 0.0.1 + build_date: + description: + - Date of the build. + returned: queried + type: str + sample: "2018-05-05T15:26:30" + checksum: + description: + - MD5 checksum of the image. + - Note that this is the checksum that is stored inside the ISO. It is not + the actual checksum of the ISO. + returned: queried + type: str + sample: df1ec715d2089d0fa54c0c4284656a98 + file_size: + description: + - Size, in megabytes, of the image. + returned: queried + type: int + sample: 1938 + last_modified: + description: + - Last modified date of the ISO. + returned: queried + type: str + sample: "2018-05-05T15:26:30" + product: + description: + - Product contained in the ISO. + returned: queried + type: str + sample: BIG-IP + verified: + description: + - Whether or not the system has verified this image. + returned: queried + type: bool + sample: yes + version: + description: + - Version of software contained in the image. + - This is a sub-string of the C(name). + returned: queried + type: str + sample: 13.1.0.7 + sample: hash/dictionary of values +software_volumes: + description: List of software volumes. + returned: When C(software-volumes) is specified in C(gather_subset). + type: complex + contains: + active: + description: + - Whether the volume is currently active or not. + - An active volume contains the currently running version of software. + returned: queried + type: bool + sample: yes + base_build: + description: + - Base build version of the software installed in the volume. + - When a hotfix is installed, this refers to the base version of software + that the hotfix requires. + returned: queried + type: str + sample: 0.0.6 + build: + description: + - Build version of the software installed in the volume. + returned: queried + type: str + sample: 0.0.6 + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: HD1.1 + default_boot_location: + description: + - Whether this volume is the default boot location or not. + returned: queried + type: bool + sample: yes + name: + description: + - Relative name of the resource in BIG-IP. + - This usually matches the C(full_name). + returned: queried + type: str + sample: HD1.1 + product: + description: + - The F5 product installed in this slot. + - This should always be BIG-IP. + returned: queried + type: str + sample: BIG-IP + status: + description: + - Status of the software installed, or being installed, in the volume. + - When C(complete), indicates that the software has completed installing. + returned: queried + type: str + sample: complete + version: + description: + - Version of software installed in the volume, excluding the C(build) number. + returned: queried + type: str + sample: 13.1.0.4 + sample: hash/dictionary of values +ssl_certs: + description: SSL certificate related information. + returned: When C(ssl-certs) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/cert1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: cert1 + key_type: + description: + - Specifies the type of cryptographic key associated with this certificate. + returned: queried + type: str + sample: rsa-private + key_size: + description: + - Specifies the size (in bytes) of the file associated with this file object. + returned: queried + type: int + sample: 2048 + system_path: + description: + - Path on the BIG-IP where the cert can be found. + returned: queried + type: str + sample: /config/ssl/ssl.crt/f5-irule.crt + sha1_checksum: + description: + - SHA1 checksum of the certificate. + returned: queried + type: str + sample: 1306e84e1e6a2da53816cefe1f684b80d6be1e3e + subject: + description: + - Specifies X509 information of the certificate's subject. + returned: queried + type: str + sample: "emailAddress=support@f5.com,CN=..." + last_update_time: + description: + - Specifies the last time at which the file-object was + updated/modified. + returned: queried + type: str + sample: "2018-05-15T21:11:15Z" + issuer: + description: + - Specifies X509 information of the certificate's issuer. + returned: queried + type: str + sample: "emailAddress=support@f5.com,...CN=support.f5.com," + is_bundle: + description: + - Specifies whether the certificate file is a bundle (that is, + whether it contains more than one certificate). + returned: queried + type: bool + sample: no + fingerprint: + description: + - Displays the SHA-256 fingerprint of the certificate. + returned: queried + type: str + sample: "SHA256/88:A3:05:...:59:01:EA:5D:B0" + expiration_date: + description: + - Specifies a string representation of the expiration date of the + certificate. + returned: queried + type: str + sample: "Aug 13 21:21:29 2031 GMT" + expiration_timestamp: + description: + - Specifies the date at which this certificate expires. Stored as a + POSIX time. + returned: queried + type: int + sample: 1944422489 + create_time: + description: + - Specifies the time at which the file-object was created. + returned: queried + type: str + sample: "2018-05-15T21:11:15Z" + sample: hash/dictionary of values +ssl_keys: + description: SSL certificate related information. + returned: When C(ssl-certs) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/key1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: key1 + key_type: + description: + - Specifies the cryptographic type of the key in question. That is, + which algorithm this key is compatible with. + returned: queried + type: str + sample: rsa-private + key_size: + description: + - Specifies the size of the cryptographic key associated with this + file object, in bits. + returned: queried + type: int + sample: 2048 + security_type: + description: + - Specifies the type of security used to handle or store the key. + returned: queried + type: str + sample: normal + system_path: + description: + - The path on the filesystem where the key is stored. + returned: queried + type: str + sample: /config/ssl/ssl.key/default.key + sha1_checksum: + description: + - The SHA1 checksum of the key. + returned: queried + type: str + sample: 1fcf7de3dd8e834d613099d8e10b2060cd9ecc9f + sample: hash/dictionary of values +system_db: + description: System DB related information. + returned: When C(system-db) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: vendor.wwwurl + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: vendor.wwwurl + default: + description: + - Default value of the key. + returned: queried + type: str + sample: www.f5.com + scf_config: + description: + - Whether the database key would be found in an SCF config or not. + returned: queried + type: str + sample: false + value: + description: + - The value of the key + returned: queried + type: str + sample: www.f5.com + value_range: + description: + - The accepted range of values for the key + returned: queried + type: str + sample: string + sample: hash/dictionary of values +system_info: + description: Traffic group related information. + returned: When C(traffic-groups) is specified in C(gather_subset). + type: complex + contains: + base_mac_address: + description: + - Media Access Control address (MAC address) of the device. + returned: queried + type: str + sample: "fa:16:3e:c3:42:6f" + marketing_name: + description: + - Marketing name of the device platform. + returned: queried + type: str + sample: BIG-IP Virtual Edition + time: + description: + - Mapping of the current time information to specific time-named keys. + returned: queried + type: complex + contains: + day: + description: + - The current day of the month, in numeric form. + returned: queried + type: int + sample: 7 + hour: + description: + - The current hour of the day in 24-hour form. + returned: queried + type: int + sample: 18 + minute: + description: + - The current minute of the hour. + returned: queried + type: int + sample: 16 + month: + description: + - The current month, in numeric form. + returned: queried + type: int + sample: 6 + second: + description: + - The current second of the minute. + returned: queried + type: int + sample: 51 + year: + description: + - The current year in 4-digit form. + returned: queried + type: int + sample: 2018 + hardware_information: + description: + - Information related to the hardware (drives and CPUs) of the system. + type: complex + returned: queried + contains: + model: + description: + - The model of the hardware. + returned: queried + type: str + sample: Virtual Disk + name: + description: + - The name of the hardware. + returned: queried + type: str + sample: HD1 + type: + description: + - The type of hardware. + returned: queried + type: str + sample: physical-disk + versions: + description: + - Hardware specific properties. + returned: queried + type: complex + contains: + name: + description: + - Name of the property. + returned: queried + type: str + sample: Size + version: + description: + - Value of the property. + returned: queried + type: str + sample: 154.00G + package_edition: + description: + - Displays the software edition. + returned: queried + type: str + sample: Point Release 7 + package_version: + description: + - A string combining the C(product_build) and C(product_build_date). + returned: queried + type: str + sample: "Build 0.0.1 - Tue May 15 15:26:30 PDT 2018" + product_code: + description: + - Code identifying the product. + returned: queried + type: str + sample: BIG-IP + product_build: + description: + - Build version of the release version. + returned: queried + type: str + sample: 0.0.1 + product_version: + description: + - Major product version of the running software. + returned: queried + type: str + sample: 13.1.0.7 + product_built: + description: + - Unix timestamp of when the product was built. + returned: queried + type: int + sample: 180515152630 + product_build_date: + description: + - Human readable build date. + returned: queried + type: str + sample: "Tue May 15 15:26:30 PDT 2018" + product_changelist: + description: + - Changelist that product branches from. + returned: queried + type: int + sample: 2557198 + product_jobid: + description: + - ID of the job that built the product version. + returned: queried + type: int + sample: 1012030 + chassis_serial: + description: + - Serial of the chassis. + returned: queried + type: str + sample: 11111111-2222-3333-444444444444 + host_board_part_revision: + description: + - Revision of the host board. + returned: queried + type: str + host_board_serial: + description: + - Serial of the host board. + returned: queried + type: str + platform: + description: + - Platform identifier. + returned: queried + type: str + sample: Z100 + switch_board_part_revision: + description: + - Switch board revision. + returned: queried + type: str + switch_board_serial: + description: + - Serial of the switch board. + returned: queried + type: str + uptime: + description: + - Time, in seconds, since the system booted. + returned: queried + type: int + sample: 603202 + sample: hash/dictionary of values +tcp_monitors: + description: TCP monitor related information. + returned: When C(tcp-monitors) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/tcp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: tcp + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: tcp + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My monitor + adaptive: + description: + - Whether adaptive response time monitoring is enabled for this monitor. + returned: queried + type: bool + sample: no + adaptive_divergence_type: + description: + - Specifies whether the adaptive-divergence-value is C(relative) or + C(absolute). + returned: queried + type: str + sample: relative + adaptive_divergence_value: + description: + - Specifies how far from mean latency each monitor probe is allowed + to be. + returned: queried + type: int + sample: 25 + adaptive_limit: + description: + - Specifies the hard limit, in milliseconds, which the probe is not + allowed to exceed, regardless of the divergence value. + returned: queried + type: int + sample: 200 + adaptive_sampling_timespan: + description: + - Specifies the size of the sliding window, in seconds, which + records probe history. + returned: queried + type: int + sample: 300 + destination: + description: + - Specifies the IP address and service port of the resource that is + the destination of this monitor. + returned: queried + type: str + sample: "*:*" + interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when either the resource is down or the status + of the resource is unknown. + returned: queried + type: int + sample: 5 + ip_dscp: + description: + - Specifies the differentiated services code point (DSCP). + returned: queried + type: int + sample: 0 + manual_resume: + description: + - Specifies whether the system automatically changes the status of a + resource to up at the next successful monitor check. + returned: queried + type: bool + sample: yes + reverse: + description: + - Specifies whether the monitor operates in reverse mode. When the + monitor is in reverse mode, a successful check marks the monitored + object down instead of up. + returned: queried + type: bool + sample: no + time_until_up: + description: + - Specifies the amount of time, in seconds, after the first + successful response before a node is marked up. + returned: queried + type: int + sample: 0 + timeout: + description: + - Specifies the number of seconds the target has in which to respond + to the monitor request. + returned: queried + type: int + sample: 16 + transparent: + description: + - Specifies whether the monitor operates in transparent mode. + returned: queried + type: bool + sample: no + up_interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when the resource is up. + returned: queried + type: int + sample: 0 + sample: hash/dictionary of values +tcp_half_open_monitors: + description: TCP Half-open monitor related information. + returned: When C(tcp-half-open-monitors) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/tcp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: tcp + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: tcp + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My monitor + destination: + description: + - Specifies the IP address and service port of the resource that is + the destination of this monitor. + returned: queried + type: str + sample: "*:*" + interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when either the resource is down or the status + of the resource is unknown. + returned: queried + type: int + sample: 5 + manual_resume: + description: + - Specifies whether the system automatically changes the status of a + resource to up at the next successful monitor check. + returned: queried + type: bool + sample: yes + time_until_up: + description: + - Specifies the amount of time, in seconds, after the first + successful response before a node is marked up. + returned: queried + type: int + sample: 0 + timeout: + description: + - Specifies the number of seconds the target has in which to respond + to the monitor request. + returned: queried + type: int + sample: 16 + transparent: + description: + - Specifies whether the monitor operates in transparent mode. + returned: queried + type: bool + sample: no + up_interval: + description: + - Specifies, in seconds, the frequency at which the system issues + the monitor check when the resource is up. + returned: queried + type: int + sample: 0 + sample: hash/dictionary of values +tcp_profiles: + description: TCP profile related information. + returned: When C(tcp-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: tcp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: /Common/tcp + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: tcp + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My profile + abc: + description: + - Appropriate Byte Counting (RFC 3465) + - When C(yes), increases the congestion window by basing the increase + amount on the number of previously unacknowledged bytes that each ACK covers. + returned: queried + type: bool + sample: yes + ack_on_push: + description: + - Specifies, when C(yes), significantly improved performance to Microsoft + Windows and MacOS peers who are writing out on a very small send buffer. + returned: queried + type: bool + sample: no + auto_proxy_buffer: + description: + - Specifies, C(yes), that the system uses the network measurements to set + the optimal proxy buffer size. + returned: queried + type: bool + sample: yes + auto_receive_window: + description: + - Specifies, when C(yes), that the system uses the network measurements to + set the optimal receive window size. + returned: queried + type: bool + sample: no + auto_send_buffer: + description: + - Specifies, when C(yes), that the system uses the network measurements to + set the optimal send buffer size. + returned: queried + type: bool + sample: yes + close_wait: + description: + - Specifies the length of time that a TCP connection remains in the LAST-ACK + state before quitting. + - In addition to a numeric value, the value of this fact may also be one of + C(immediate) or C(indefinite). + - When C(immediate), specifies that the TCP connection closes immediately + after entering the LAST-ACK state. + - When C(indefinite), specifies that TCP connections in the LAST-ACK state + do not close until they meet the maximum retransmissions timeout. + returned: queried + type: str + sample: indefinite + congestion_metrics_cache: + description: + - Specifies, when C(yes), that the system uses a cache for storing congestion + metrics. + - Subsequently, because these metrics are already known and cached, the initial + slow-start ramp for previously-encountered peers improves. + returned: queried + type: bool + sample: yes + congestion_metrics_cache_timeout: + description: + - Specifies the number of seconds for which entries in the congestion metrics + cache are valid. + returned: queried + type: int + sample: 0 + congestion_control: + description: + - Specifies the algorithm to use to share network resources among competing + users to reduce congestion. + - Return values may include, C(high-speed), C(cdg), C(chd), C(none), C(cubic), + C(illinois), C(new-reno), C(reno), C(scalable), C(vegas), C(westwood), and + C(woodside). + returned: queried + type: str + sample: high-speed + deferred_accept: + description: + - Specifies, when C(yes), that the system defers allocation of the connection + chain context until the system has received the payload from the client. + - Enabling this setting is useful in dealing with 3-way handshake denial-of-service + attacks. + returned: queried + type: bool + sample: yes + delay_window_control: + description: + - Specifies that the system uses an estimate of queuing delay as a measure of + congestion to control, in addition to the normal loss-based control, the amount + of data sent. + returned: queried + type: bool + sample: yes + delayed_acks: + description: + - Specifies, when checked (enabled), that the system can send fewer than one ACK + (acknowledgment) segment per data segment received. + returned: queried + type: bool + sample: yes + dsack: + description: + - D-SACK (RFC 2883) + - Specifies, when C(yes), the use of the selective ACK (SACK) option to acknowledge + duplicate segments. + returned: queried + type: bool + sample: yes + early_retransmit: + description: + - Specifies, when C(yes), that the system uses early retransmit (as specified in + RFC 5827) to reduce the recovery time for connections that are receive- buffer + or user-data limited. + returned: queried + type: bool + sample: yes + explicit_congestion_notification: + description: + - Specifies, when C(yes), that the system uses the TCP flags CWR (congestion window + reduction) and ECE (ECN-Echo) to notify its peer of congestion and congestion + counter-measures. + returned: queried + type: bool + sample: yes + enhanced_loss_recovery: + description: + - Specifies whether the system uses enhanced loss recovery to recover from random + packet losses more effectively. + returned: queried + type: bool + sample: yes + fast_open: + description: + - Specifies, when C(yes), that the system supports TCP Fast Open, which reduces + latency by allowing a client to include the first packet of data with the SYN + returned: queried + type: bool + sample: yes + fast_open_cookie_expiration: + description: + - Specifies the number of seconds that a Fast Open Cookie delivered to a client + is valid for SYN packets from that client. + returned: queried + type: int + sample: 1000 + fin_wait_1: + description: + - Specifies the length of time that a TCP connection is in the FIN-WAIT-1 or + CLOSING state before quitting. + returned: queried + type: str + sample: indefinite + fin_wait_2: + description: + - Specifies the length of time that a TCP connection is in the FIN-WAIT-2 state + before quitting. + returned: queried + type: str + sample: 100 + idle_timeout: + description: + - Specifies the length of time that a connection is idle (has no traffic) before + the connection is eligible for deletion. + returned: queried + type: str + sample: 300 + initial_congestion_window_size: + description: + - Specifies the initial congestion window size for connections to this destination. + returned: queried + type: int + sample: 3 + initial_receive_window_size: + description: + - Specifies the initial receive window size for connections to this destination. + returned: queried + type: int + sample: 5 + dont_fragment_flag: + description: + - Specifies the Don't Fragment (DF) bit setting in the IP Header of the outgoing + TCP packet. + returned: queried + type: str + sample: pmtu + ip_tos: + description: + - Specifies the L3 Type of Service (ToS) level that the system inserts in TCP + packets destined for clients. + returned: queried + type: str + sample: mimic + time_to_live: + description: + - Specifies the outgoing TCP packet's IP Header TTL mode. + returned: queried + type: str + sample: proxy + time_to_live_v4: + description: + - Specifies the outgoing packet's IP Header TTL value for IPv4 traffic. + returned: queried + type: int + sample: 255 + time_to_live_v6: + description: + - Specifies the outgoing packet's IP Header TTL value for IPv6 traffic. + returned: queried + type: int + sample: 64 + keep_alive_interval: + description: + - Specifies how frequently the system sends data over an idle TCP + connection, to determine whether the connection is still valid. + returned: queried + type: str + sample: 50 + limited_transmit_recovery: + description: + - Specifies, when C(yes), that the system uses limited transmit recovery + revisions for fast retransmits (as specified in RFC 3042) to reduce + the recovery time for connections on a lossy network. + returned: queried + type: bool + sample: yes + link_qos: + description: + - Specifies the L2 Quality of Service (QoS) level that the system inserts + in TCP packets destined for clients. + returned: queried + type: str + sample: 200 + max_segment_retrans: + description: + - Specifies the maximum number of times that the system resends data segments. + returned: queried + type: int + sample: 8 + max_syn_retrans: + description: + - Specifies the maximum number of times that the system resends a SYN + packet when it does not receive a corresponding SYN-ACK. + returned: queried + type: int + sample: 3 + max_segment_size: + description: + - Specifies the largest amount of data that the system can receive in a + single TCP segment, not including the TCP and IP headers. + returned: queried + type: int + sample: 1460 + md5_signature: + description: + - Specifies, when C(yes), to use RFC2385 TCP-MD5 signatures to protect + TCP traffic against intermediate tampering. + returned: queried + type: bool + sample: yes + minimum_rto: + description: + - Specifies the minimum length of time the system waits for + acknowledgements of data sent before resending the data. + returned: queried + type: int + sample: 1000 + multipath_tcp: + description: + - Specifies, when C(yes), that the system accepts Multipath TCP (MPTCP) + connections, which allow multiple client-side flows to connect to a + single server-side flow. + returned: queried + type: bool + sample: yes + mptcp_checksum: + description: + - Specifies, when C(yes), that the system calculates the checksum for + MPTCP connections. + returned: queried + type: bool + sample: no + mptcp_checksum_verify: + description: + - Specifies, when C(yes), that the system verifies the checksum for + MPTCP connections. + returned: queried + type: bool + sample: no + mptcp_fallback: + description: + - Specifies an action on fallback, that is, when MPTCP transitions + to regular TCP, because something prevents MPTCP from working correctly. + returned: queried + type: str + sample: reset + mptcp_fast_join: + description: + - Specifies, when C(yes), a FAST join, allowing data to be sent on the + MP_JOIN_SYN, which can allow a server response to occur in parallel + with the JOIN. + returned: queried + type: bool + sample: no + mptcp_idle_timeout: + description: + - Specifies the number of seconds that an MPTCP connection is idle + before the connection is eligible for deletion. + returned: queried + type: int + sample: 300 + mptcp_join_max: + description: + - Specifies the highest number of MPTCP connections that can join to + a given connection. + returned: queried + type: int + sample: 5 + mptcp_make_after_break: + description: + - Specifies that make-after-break functionality is supported, allowing + for long-lived MPTCP sessions. + returned: queried + type: bool + sample: no + mptcp_no_join_dss_ack: + description: + - Specifies, when checked (enabled), that no DSS option is sent on the + JOIN ACK. + returned: queried + type: bool + sample: no + mptcp_rto_max: + description: + - Specifies the number of RTOs (retransmission timeouts) before declaring + the subflow dead. + returned: queried + type: int + sample: 5 + mptcp_retransmit_min: + description: + - Specifies the minimum value (in msec) of the retransmission timer for + these MPTCP flows. + returned: queried + type: int + sample: 1000 + mptcp_subflow_max: + description: + - Specifies the maximum number of MPTCP subflows for a single flow. + returned: queried + type: int + sample: 6 + mptcp_timeout: + description: + - Specifies, in seconds, the timeout value to discard long-lived sessions + that do not have an active flow. + returned: queried + type: int + sample: 3600 + nagle_algorithm: + description: + - Specifies whether the system applies Nagle's algorithm to reduce the + number of short segments on the network. + returned: queried + type: bool + sample: no + pkt_loss_ignore_burst: + description: + - Specifies the probability of performing congestion control when + multiple packets are lost, even if the Packet Loss Ignore Rate was + not exceeded. + returned: queried + type: int + sample: 0 + pkt_loss_ignore_rate: + description: + - Specifies the threshold of packets lost per million at which the + system performs congestion control. + returned: queried + type: int + sample: 0 + proxy_buffer_high: + description: + - Specifies the proxy buffer level, in bytes, at which the receive window + is closed. + returned: queried + type: int + sample: 49152 + proxy_buffer_low: + description: + - Specifies the proxy buffer level, in bytes, at which the receive window + is opened. + returned: queried + type: int + sample: 32768 + proxy_max_segment: + description: + - Specifies, when C(yes), that the system attempts to advertise the same + maximum segment size (MSS) to the server-side connection as that of the + client-side connection. + returned: queried + type: bool + sample: yes + proxy_options: + description: + - Specifies, when C(yes), that the system advertises an option (such as + time stamps) to the server only when the option is negotiated with the + client. + returned: queried + type: bool + sample: no + push_flag: + description: + - Specifies how the BIG-IP system receives ACKs. + returned: queried + type: str + sample: default + rate_pace: + description: + - Specifies, when C(yes), that the system paces the egress packets to + avoid dropping packets, allowing for optimum goodput. + returned: queried + type: bool + sample: yes + rate_pace_max_rate: + description: + - Specifies the maximum rate in bytes per second to which the system + paces TCP data transmission. + returned: queried + type: int + sample: 0 + receive_window: + description: + - Specifies the maximum advertised RECEIVE window size. + returned: queried + type: int + sample: 65535 + reset_on_timeout: + description: + - Specifies, when C(yes), that the system sends a reset packet (RST) + in addition to deleting the connection, when a connection exceeds + the idle timeout value. + returned: queried + type: bool + sample: yes + retransmit_threshold: + description: + - Specifies the number of duplicate ACKs (retransmit threshold) to start + fast recovery. + returned: queried + type: int + sample: 3 + selective_acks: + description: + - Specifies, when C(yes), that the system processes data using + selective ACKs (SACKs) whenever possible, to improve system performance. + returned: queried + type: bool + sample: yes + selective_nack: + description: + - Specifies, when C(yes), that the system processes data using a selective + negative acknowledgment (SNACK) whenever possible, to improve system + performance. + returned: queried + type: bool + sample: yes + send_buffer: + description: + - Specifies the SEND window size. + returned: queried + type: int + sample: 65535 + slow_start: + description: + - Specifies, when C(yes), that the system uses Slow-Start Congestion + Avoidance as described in RFC3390 in order to ramp up traffic without + causing excessive congestion on the link. + returned: queried + type: bool + sample: yes + syn_cookie_enable: + description: + - Specifies the default (if no DoS profile is associated) number of + embryonic connections that are allowed on any virtual server, + before SYN Cookie challenges are enabled for that virtual server. + returned: queried + type: bool + sample: yes + syn_cookie_white_list: + description: + - Specifies whether or not to use a SYN Cookie WhiteList when doing + software SYN Cookies. + returned: queried + type: bool + sample: no + syn_retrans_to_base: + description: + - Specifies the initial RTO (Retransmission TimeOut) base multiplier + for SYN retransmissions. + returned: queried + type: int + sample: 3000 + tail_loss_probe: + description: + - Specifies, when C(yes), that the system uses Tail Loss Probe to + reduce the number of retransmission timeouts. + returned: queried + type: bool + sample: yes + time_wait_recycle: + description: + - Specifies, when C(yes), that connections in a TIME-WAIT state are + reused when the system receives a SYN packet, indicating a request + for a new connection. + returned: queried + type: bool + sample: yes + time_wait: + description: + - Specifies the length of time that a TCP connection remains in the + TIME-WAIT state before entering the CLOSED state. + returned: queried + type: str + sample: 2000 + timestamps: + description: + - Specifies, when C(yes), that the system uses the timestamps extension + for TCP (as specified in RFC 1323) to enhance high-speed network performance. + returned: queried + type: bool + sample: yes + verified_accept: + description: + - Specifies, when C(yes), that the system can actually communicate with + the server before establishing a client connection. + returned: queried + type: bool + sample: yes + zero_window_timeout: + description: + - Specifies the timeout in milliseconds for terminating a connection + with an effective zero length TCP transmit window. + returned: queried + type: str + sample: 2000 + sample: hash/dictionary of values +traffic_groups: + description: Traffic group related information. + returned: When C(traffic-groups) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/tg1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: tg1 + description: + description: + - Description of the traffic group. + returned: queried + type: str + sample: My traffic group + auto_failback_enabled: + description: + - Specifies whether the traffic group fails back to the default + device. + returned: queried + type: bool + sample: yes + auto_failback_time: + description: + - Specifies the time required to fail back. + returned: queried + type: int + sample: 60 + ha_load_factor: + description: + - Specifies a number for this traffic group that represents the load + this traffic group presents to the system relative to other + traffic groups. + returned: queried + type: int + sample: 1 + ha_order: + description: + - This list of devices specifies the order in which the devices will + become active for the traffic group when a failure occurs. + returned: queried + type: list + sample: ['/Common/device1', '/Common/device2'] + is_floating: + description: + - Indicates whether the traffic group can fail over to other devices + in the device group. + returned: queried + type: bool + sample: no + mac_masquerade_address: + description: + - Specifies a MAC address for the traffic group. + returned: queried + type: str + sample: "00:98:76:54:32:10" + sample: hash/dictionary of values +trunks: + description: Trunk related information. + returned: When C(trunks) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/trunk1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: trunk1 + description: + description: + - Description of the Trunk. + returned: queried + type: str + sample: My trunk + media_speed: + description: + - Speed of the media attached to the trunk. + returned: queried + type: int + sample: 10000 + lacp_mode: + description: + - The operation mode for LACP. + returned: queried + type: str + sample: passive + lacp_enabled: + description: + - Whether LACP is enabled or not. + returned: queried + type: bool + sample: yes + stp_enabled: + description: + - Whether Spanning Tree Protocol (STP) is enabled or not. + returned: queried + type: bool + sample: yes + operational_member_count: + description: + - Number of working members associated with the trunk. + returned: queried + type: int + sample: 1 + media_status: + description: + - Whether the media that is part of the trunk is up or not. + returned: queried + type: bool + sample: yes + link_selection_policy: + description: + - The LACP policy that the trunk uses to determine which member link can handle + new traffic. + returned: queried + type: str + sample: maximum-bandwidth + lacp_timeout: + description: + - The rate at which the system sends the LACP control packets. + returned: queried + type: int + sample: 10 + interfaces: + description: + - The list of interfaces that are part of the trunk. + returned: queried + type: list + sample: ['1.2', '1.3'] + distribution_hash: + description: + - The basis for the has that the system uses as the frame distribution algorithm. + - The system uses this hash to determine which interface to use for forwarding + traffic. + returned: queried + type: str + sample: src-dst-ipport + configured_member_count: + description: + - The number of configured members that are associated with the trunk. + returned: queried + type: int + sample: 1 + sample: hash/dictionary of values +udp_profiles: + description: UDP profile related information. + returned: When C(udp-profiles) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: udp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: /Common/udp + parent: + description: + - Profile from which this profile inherits settings. + returned: queried + type: str + sample: udp + description: + description: + - Description of the resource. + returned: queried + type: str + sample: My profile + allow_no_payload: + description: + - Allow the passage of datagrams that contain header information, but no essential data. + returned: queried + type: bool + sample: yes + buffer_max_bytes: + description: + - Ingress buffer byte limit. Maximum allowed value is 16777215. + returned: queried + type: int + sample: 655350 + buffer_max_packets: + description: + - Ingress buffer packet limit. Maximum allowed value is 255. + returned: queried + type: int + sample: 0 + datagram_load_balancing: + description: + - Load balance UDP datagram by datagram + returned: queried + type: bool + sample: yes + idle_timeout: + description: + - Number of seconds that a connection is idle before + the connection is eligible for deletion. + - In addition to a number, may be one of the values C(indefinite), or + C(immediate). + returned: queried + type: bool + sample: 200 + ip_df_mode: + description: + - Describes the Don't Fragment (DF) bit setting in the outgoing UDP + packet. + - May be one of C(pmtu), C(preserve), C(set), or C(clear). + - When C(pmtu), sets the outgoing UDP packet DF big based on the ip + pmtu setting. + - When C(preserve), preserves the incoming UDP packet Don't Fragment bit. + - When C(set), sets the outgoing UDP packet DF bit. + - When C(clear), clears the outgoing UDP packet DF bit. + returned: queried + type: str + sample: pmtu + ip_tos_to_client: + description: + - The Type of Service level that the traffic management + system assigns to UDP packets when sending them to clients. + - May be numeric, or the values C(pass-through) or C(mimic). + returned: queried + type: str + sample: mimic + ip_ttl_mode: + description: + - The outgoing UDP packet's TTL mode. + - Valid modes are C(proxy), C(preserve), C(decrement), and C(set). + - When C(proxy), set the IP TTL of ipv4 to the default value of 255 and + ipv6 to the default value of 64. + - When C(preserve), set the IP TTL to the original packet TTL value. + - When C(decrement), set the IP TTL to the original packet TTL value minus 1. + - When C(set), set the IP TTL with the specified values in C(ip_ttl_v4) and + C(ip_ttl_v6) values in the same profile. + returned: queried + type: str + sample: proxy + ip_ttl_v4: + description: + - IPv4 TTL. + returned: queried + type: int + sample: 10 + ip_ttl_v6: + description: + - IPv6 TTL. + returned: queried + type: int + sample: 100 + link_qos_to_client: + description: + - The Quality of Service level that the system assigns to + UDP packets when sending them to clients. + - May be either numeric, or the value C(pass-through). + returned: queried + type: str + sample: pass-through + no_checksum: + description: + - Whether the checksum processing is enabled or disabled. + - Note that if the datagram is IPv6, the system always performs + checksum processing. + returned: queried + type: bool + sample: yes + proxy_mss: + description: + - When C(yes), specifies that the system advertises the same mss + to the server as was negotiated with the client. + returned: queried + type: bool + sample: yes + sample: hash/dictionary of values +users: + description: Details of the users on the system. + returned: When C(users) is specified in C(gather_subset). + type: complex + contains: + description: + description: + - Description of the resource. + returned: queried + type: str + sample: Admin user + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: admin + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: admin + partition_access: + description: + - Partition that user has access to, including user role. + returned: queried + type: complex + contains: + name: + description: + - Name of partition + returned: queried + type: str + sample: all-partitions + role: + description: + - Role allowed to user on partition. + returned: queried + type: str + sample: auditor + shell: + description: + - The shell assigned to the user account. + returned: queried + type: str + sample: tmsh +vcmp_guests: + description: vCMP related information. + returned: When C(vcmp-guests) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: guest1 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: guest1 + allowed_slots: + description: + - List of slots that the guest is allowed to be assigned to. + returned: queried + type: list + sample: [0, 1, 3] + assigned_slots: + description: + - Slots that the guest is assigned to. + returned: queried + type: list + sample: [0] + boot_priority: + description: + - Specifies boot priority of the guest. Lower number means earlier to boot. + returned: queried + type: int + sample: 65535 + cores_per_slot: + description: + - Number of cores that the system allocates to the guest. + returned: queried + type: int + sample: 2 + hostname: + description: + - FQDN assigned to the guest. + returned: queried + type: str + sample: guest1.localdomain + hotfix_image: + description: + - hotfix image to install onto any of this guest's newly created virtual disks. + returned: queried + type: str + sample: Hotfix-BIGIP-12.1.3.4-0.0.2-hf1.iso + initial_image: + description: + - Software image to install onto any of this guest's newly created virtual disks. + returned: queried + type: str + sample: BIGIP-12.1.3.4-0.0.2.iso + mgmt_route: + description: + - Management gateway IP address for the guest. + returned: queried + type: str + sample: 2.2.2.1 + mgmt_address: + description: + - Management IP address configuration for the guest. + returned: queried + type: str + sample: 2.3.2.3 + mgmt_network: + description: + - Accessibility of this vCMP guest's management network. + returned: queried + type: str + sample: bridged + vlans: + description: + - List of VLANs on which the guest is either enabled or disabled. + returned: queried + type: list + sample: ['/Common/vlan1', '/Common/vlan2'] + min_number_of_slots: + description: + - Specifies the minimum number of slots that the guest must be assigned to. + returned: queried + type: int + sample: 2 + number_of_slots: + description: + - Specifies the number of slots the guest should be assigned to. + - This number is always greater than, or equal to, C(min_number_of_slots). + returned: queried + type: int + sample: 2 + ssl_mode: + description: + - The SSL hardware allocation mode for the guest. + returned: queried + type: str + sample: shared + state: + description: + - Specifies the state of the guest. + - May be one of C(configured), C(provisioned), or C(deployed). + - Each state implies the actions of all states before it. + returned: queried + type: str + sample: provisioned + virtual_disk: + description: + - The filename of the virtual disk to use for this guest. + returned: queried + type: str + sample: guest1.img + sample: hash/dictionary of values +virtual_addresses: + description: Virtual address related information. + returned: When C(virtual-addresses) is specified in C(gather_subset). + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/2.3.4.5 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: 2.3.4.5 + address: + description: + - The virtual IP address. + returned: queried + type: str + sample: 2.3.4.5 + arp_enabled: + description: + - Whether or not ARP is enabled for the specified virtual address. + returned: queried + type: bool + sample: yes + auto_delete_enabled: + description: + - Indicates if the virtual address will be deleted automatically on + deletion of the last associated virtual server or not. + returned: queried + type: bool + sample: no + connection_limit: + description: + - Concurrent connection limit for one or more virtual + servers. + returned: queried + type: int + sample: 0 + description: + description: + - The description of the virtual address. + returned: queried + type: str + sample: My virtual address + enabled: + description: + - Whether the virtual address is enabled or not. + returned: queried + type: bool + sample: yes + icmp_echo: + description: + - Whether the virtual address should reply to ICMP echo requests. + returned: queried + type: bool + sample: yes + floating: + description: + - Property derived from traffic-group. A floating virtual + address is a virtual address for a VLAN that serves as a shared + address by all devices of a BIG-IP traffic-group. + returned: queried + type: bool + sample: yes + netmask: + description: + - Netmask of the virtual address. + returned: queried + type: str + sample: 255.255.255.255 + route_advertisement: + description: + - Specifies the route advertisement setting for the virtual address. + returned: queried + type: bool + sample: no + traffic_group: + description: + - Traffic group on which the virtual address is active. + returned: queried + type: str + sample: /Common/traffic-group-1 + spanning: + description: + - Whether or not spanning is enabled for the specified virtual address. + returned: queried + type: bool + sample: no + inherited_traffic_group: + description: + - Indicates if the traffic-group is inherited from the parent folder. + returned: queried + type: bool + sample: no + sample: hash/dictionary of values +virtual_servers: + description: Virtual address related information. + returned: When C(virtual-addresses) is specified in C(gather_subset). + type: complex + contains: + availability_status: + description: + - The availability of the virtual server. + returned: queried + type: str + sample: offline + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/2.3.4.5 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: 2.3.4.5 + auto_lasthop: + description: + - When enabled, allows the system to send return traffic to the MAC address + that transmitted the request, even if the routing table points to a different + network or interface. + returned: queried + type: str + sample: default + bw_controller_policy: + description: + - The bandwidth controller for the system to use to enforce a throughput policy + for incoming network traffic. + returned: queried + type: str + sample: /Common/bw1 + client_side_bits_in: + description: + - Number of client-side ingress bits. + returned: queried + type: int + sample: 1000 + client_side_bits_out: + description: + - Number of client-side egress bits. + returned: queried + type: int + sample: 200 + client_side_current_connections: + description: + - Number of current connections client-side. + returned: queried + type: int + sample: 300 + client_side_evicted_connections: + description: + - Number of evicted connections client-side. + returned: queried + type: int + sample: 100 + client_side_max_connections: + description: + - Maximum number of connections client-side. + returned: queried + type: int + sample: 40 + client_side_pkts_in: + description: + - Number of client-side ingress packets. + returned: queried + type: int + sample: 1098384 + client_side_pkts_out: + description: + - Number of client-side egress packets. + returned: queried + type: int + sample: 3484734 + client_side_slow_killed: + description: + - Number of slow connections killed, client-side. + returned: queried + type: int + sample: 234 + client_side_total_connections: + description: + - Total number of connections. + returned: queried + type: int + sample: 24 + cmp_enabled: + description: + - Whether or not clustered multi-processor (CMP) acceleration is enabled. + returned: queried + type: bool + sample: yes + cmp_mode: + description: + - The clustered-multiprocessing mode. + returned: queried + type: str + sample: all-cpus + connection_limit: + description: + - Maximum number of concurrent connections you want to allow for the virtual server. + returned: queried + type: int + sample: 100 + description: + description: + - The description of the virtual server. + returned: queried + type: str + sample: My virtual + enabled: + description: + - Whether or not the virtual is enabled. + returned: queried + type: bool + sample: yes + ephemeral_bits_in: + description: + - Number of ephemeral ingress bits. + returned: queried + type: int + sample: 1000 + ephemeral_bits_out: + description: + - Number of ephemeral egress bits. + returned: queried + type: int + sample: 200 + ephemeral_current_connections: + description: + - Number of ephemeral current connections. + returned: queried + type: int + sample: 300 + ephemeral_evicted_connections: + description: + - Number of ephemeral evicted connections. + returned: queried + type: int + sample: 100 + ephemeral_max_connections: + description: + - Maximum number of ephemeral connections. + returned: queried + type: int + sample: 40 + ephemeral_pkts_in: + description: + - Number of ephemeral ingress packets. + returned: queried + type: int + sample: 1098384 + ephemeral_pkts_out: + description: + - Number of ephemeral egress packets. + returned: queried + type: int + sample: 3484734 + ephemeral_slow_killed: + description: + - Number of ephemeral slow connections killed. + returned: queried + type: int + sample: 234 + ephemeral_total_connections: + description: + - Total number of ephemeral connections. + returned: queried + type: int + sample: 24 + total_software_accepted_syn_cookies: + description: + - SYN Cookies Total Software Accepted. + returned: queried + type: int + sample: 0 + total_hardware_accepted_syn_cookies: + description: + - SYN Cookies Total Hardware Accepted. + returned: queried + type: int + sample: 0 + total_hardware_syn_cookies: + description: + - SYN Cookies Total Hardware + returned: queried + type: int + sample: 0 + hardware_syn_cookie_instances: + description: + - Hardware SYN Cookie Instances + returned: queried + type: int + sample: 0 + total_software_rejected_syn_cookies: + description: + - Total Software Rejected + returned: queried + type: int + sample: 0 + software_syn_cookie_instances: + description: + - Software SYN Cookie Instances + returned: queried + type: int + sample: 0 + current_syn_cache: + description: + - Current SYN Cache + returned: queried + type: int + sample: 0 + max_conn_duration: + description: + - Max Conn Duration/msec + returned: queried + type: int + sample: 0 + mean_conn_duration: + description: + - Mean Conn Duration/msec + returned: queried + type: int + sample: 0 + min_conn_duration: + description: + - Min Conn Duration/msec + returned: queried + type: int + sample: 0 + cpu_usage_ratio_last_5_min: + description: + - CPU Usage Ratio (%) Last 5 Minutes + returned: queried + type: int + sample: 0 + cpu_usage_ratio_last_5_sec: + description: + - CPU Usage Ratio (%) Last 5 Seconds + returned: queried + type: int + sample: 0 + cpu_usage_ratio_last_1_min: + description: + - CPU Usage Ratio (%) Last 1 Minute + returned: queried + type: int + sample: 0 + syn_cache_overflow: + description: + - SYN Cache Overflow + returned: queried + type: int + sample: 0 + total_software_syn_cookies: + description: + - Total Software + returned: queried + type: int + sample: 0 + syn_cookies_status: + description: + - SYN Cookies Status + returned: queried + type: str + sample: not-activated + fallback_persistence_profile: + description: + - Fallback persistence profile for the virtual server to use + when the default persistence profile is not available. + returned: queried + type: str + sample: /Common/fallback1 + persistence_profile: + description: + - The persistence profile you want the system to use as the default + for this virtual server. + returned: queried + type: str + sample: /Common/persist1 + translate_port: + description: + - Enables or disables port translation. + returned: queried + type: bool + sample: yes + translate_address: + description: + - Enables or disables address translation for the virtual server. + returned: queried + type: bool + sample: yes + vlans: + description: + - List of VLANs on which the virtual server is either enabled or disabled. + returned: queried + type: list + sample: ['/Common/vlan1', '/Common/vlan2'] + destination: + description: + - Name of the virtual address and service on which the virtual server + listens for connections. + returned: queried + type: str + sample: /Common/2.2.3.3%1:76 + last_hop_pool: + description: + - Name of the last hop pool that you want the virtual + server to use to direct reply traffic to the last hop router. + returned: queried + type: str + sample: /Common/pool1 + nat64_enabled: + description: + - Whether or not NAT64 is enabled. + returned: queried + type: bool + sample: yes + source_port_behavior: + description: + - Specifies whether the system preserves the source port of the connection. + returned: queried + type: str + sample: preserve + ip_intelligence_policy: + description: + - IP Intelligence policy assigned to the virtual + returned: queried + type: str + sample: /Common/ip1 + protocol: + description: + - IP protocol for which you want the virtual server to direct traffic. + returned: queried + type: str + sample: tcp + default_pool: + description: + - Pool name that you want the virtual server to use as the default pool. + returned: queried + type: str + sample: /Common/pool1 + rate_limit_mode: + description: + - Indicates whether the rate limit is applied per virtual object, + per source address, per destination address, or some combination + thereof. + returned: queried + type: str + sample: object + rate_limit_source_mask: + description: + - Specifies a mask, in bits, to be applied to the source address as + part of the rate limiting. + returned: queried + type: int + sample: 0 + rate_limit: + description: + - Maximum number of connections per second allowed for a virtual server. + returned: queried + type: int + sample: 34 + snat_type: + description: + - Specifies the type of source address translation associated + with the specified virtual server. + returned: queried + type: str + sample: none + snat_pool: + description: + - Specifies the name of a LSN or SNAT pool used by the specified virtual server. + returned: queried + type: str + sample: /Common/pool1 + status_reason: + description: + - If there is a problem with the status of the virtual, that problem is reported here. + returned: queried + type: str + sample: The children pool member(s) either don't have service checking... + gtm_score: + description: + - Specifies a score that is associated with the virtual server. + returned: queried + type: int + sample: 0 + rate_class: + description: + - Name of an existing rate class that you want the + virtual server to use to enforce a throughput policy for incoming + network traffic. + returned: queried + type: str + rate_limit_destination_mask: + description: + - Specifies a mask, in bits, to be applied to the destination + address as part of the rate limiting. + returned: queried + type: int + sample: 32 + source_address: + description: + - Specifies an IP address or network from which the virtual server + will accept traffic. + returned: queried + type: str + sample: 0.0.0./0 + authentication_profile: + description: + - Specifies a list of authentication profile names, separated by + spaces, that the virtual server uses to manage authentication. + returned: queried + type: list + sample: ['/Common/ssl_drldp'] + connection_mirror_enabled: + description: + - Whether or not connection mirroring is enabled. + returned: queried + type: bool + sample: yes + irules: + description: + - List of iRules that customize the virtual server to direct and manage traffic. + returned: queried + type: list + sample: ['/Common/rule1', /Common/rule2'] + security_log_profiles: + description: + - Specifies the log profile applied to the virtual server. + returned: queried + type: list + sample: ['/Common/global-network', '/Common/local-dos'] + type: + description: + - Virtual server type. + returned: queried + type: str + sample: standard + destination_address: + description: + - Address portion of the C(destination). + returned: queried + type: str + sample: 2.3.3.2 + destination_port: + description: + - Port potion of the C(destination). + returned: queried + type: int + sample: 80 + profiles: + description: + - List of the profiles attached to the virtual. + type: complex + contains: + context: + description: + - Which side of the connection the profile affects; either C(all), + C(client-side) or C(server-side). + returned: queried + type: str + sample: client-side + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: /Common/tcp + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: tcp + total_requests: + description: + - Total requests. + returned: queried + type: int + sample: 8 + sample: hash/dictionary of values +vlans: + description: List of VLAN information. + returned: When C(vlans) is specified in C(gather_subset). + type: complex + contains: + auto_lasthop: + description: + - Allows the system to send return traffic to the MAC address that transmitted the + request, even if the routing table points to a different network or interface. + returned: queried + type: str + sample: enabled + cmp_hash_algorithm: + description: + - Specifies how the traffic on the VLAN will be disaggregated. + returned: queried + type: str + sample: default + description: + description: + - Description of the VLAN. + returned: queried + type: str + sample: My vlan + failsafe_action: + description: + - Action for the system to take when the fail-safe mechanism is triggered. + returned: queried + type: str + sample: reboot + failsafe_enabled: + description: + - Whether failsafe is enabled or not. + returned: queried + type: bool + sample: yes + failsafe_timeout: + description: + - Number of seconds that an active unit can run without detecting network traffic + on this VLAN before it starts a failover. + returned: queried + type: int + sample: 90 + if_index: + description: + - Index assigned to this VLAN. It is a unique identifier assigned for all objects + displayed in the SNMP IF-MIB. + returned: queried + type: int + sample: 176 + learning_mode: + description: + - Whether switch ports placed in the VLAN are configured for switch learning, + forwarding only, or dropped. + returned: queried + type: str + sample: enable-forward + interfaces: + description: + - List of tagged or untagged interfaces and trunks that you want to configure for the VLAN. + returned: queried + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: queried + type: str + sample: 1.3 + name: + description: + - Relative name of the resource in BIG-IP. + returned: queried + type: str + sample: 1.3 + tagged: + description: + - Whether the interface is tagged or not. + returned: queried + type: bool + sample: no + mtu: + description: + - Specific maximum transition unit (MTU) for the VLAN. + returned: queried + type: int + sample: 1500 + sflow_poll_interval: + description: + - Maximum interval in seconds between two pollings. + returned: queried + type: int + sample: 0 + sflow_poll_interval_global: + description: + - Whether the global VLAN poll-interval setting, overrides the object-level + poll-interval setting. + returned: queried + type: bool + sample: no + sflow_sampling_rate: + description: + - Ratio of packets observed to the samples generated. + returned: queried + type: int + sample: 0 + sflow_sampling_rate_global: + description: + - Whether the global VLAN sampling-rate setting, overrides the object-level + sampling-rate setting. + returned: queried + type: bool + sample: yes + source_check_enabled: + description: + - Specifies that only connections that have a return route in the routing table are accepted. + returned: queried + type: bool + sample: yes + true_mac_address: + description: + - Media access control (MAC) address for the lowest-numbered interface assigned to this VLAN. + returned: queried + type: str + sample: "fa:16:3e:10:da:ff" + tag: + description: + - Tag number for the VLAN. + returned: queried + type: int + sample: 30 + sample: hash/dictionary of values +''' + +import datetime +import math +import re +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_netmask +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE +from ansible.module_utils.six import iteritems +from ansible.module_utils.six import string_types +from collections import namedtuple +from distutils.version import LooseVersion + +try: + from library.module_utils.network.f5.bigip import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.common import fq_name + from library.module_utils.network.f5.common import flatten_boolean + from library.module_utils.network.f5.common import transform_name + from library.module_utils.network.f5.ipaddress import is_valid_ip + from library.module_utils.network.f5.icontrol import modules_provisioned + from library.module_utils.network.f5.icontrol import tmos_version + from library.module_utils.network.f5.urls import parseStats +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.bigip import F5RestClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import fq_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import flatten_boolean + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import transform_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.ipaddress import is_valid_ip + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import modules_provisioned + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import tmos_version + from ansible_collections.community.general.plugins.module_utils.network.f5.urls import parseStats + + +class BaseManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + self.kwargs = kwargs + + # A list of modules currently provisioned on the device. + # + # This list is used by different fact managers to check to see + # if they should even attempt to gather information. If the module is + # not provisioned, then it is likely that the REST API will not + # return valid data. + # + # For example, ASM (at the time of this writing 13.x/14.x) will + # raise an exception if you attempt to query its APIs if it is + # not provisioned. An example error message is shown below. + # + # { + # "code": 400, + # "message": "java.net.ConnectException: Connection refused (Connection refused)", + # "referer": "172.18.43.40", + # "restOperationId": 18164160, + # "kind": ":resterrorresponse" + # } + # + # This list is provided to the specific fact manager by the + # master ModuleManager of this module. + self.provisioned_modules = [] + + def exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + return results + + +class Parameters(AnsibleF5Parameters): + @property + def gather_subset(self): + if isinstance(self._values['gather_subset'], string_types): + self._values['gather_subset'] = [self._values['gather_subset']] + elif not isinstance(self._values['gather_subset'], list): + raise F5ModuleError( + "The specified gather_subset must be a list." + ) + tmp = list(set(self._values['gather_subset'])) + tmp.sort() + self._values['gather_subset'] = tmp + + return self._values['gather_subset'] + + +class BaseParameters(Parameters): + @property + def enabled(self): + return flatten_boolean(self._values['enabled']) + + @property + def disabled(self): + return flatten_boolean(self._values['disabled']) + + def _remove_internal_keywords(self, resource): + resource.pop('kind', None) + resource.pop('generation', None) + resource.pop('selfLink', None) + resource.pop('isSubcollection', None) + resource.pop('fullPath', None) + + def to_return(self): + result = {} + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + return result + + +class AsmPolicyStatsParameters(BaseParameters): + api_map = { + + } + + returnables = [ + 'policies', + 'policies_active', + 'policies_attached', + 'policies_inactive', + 'policies_unattached', + ] + + @property + def policies(self): + if self._values['policies'] is None or len(self._values['policies']) == 0: + return None + return len(self._values['policies']) + + @property + def policies_active(self): + if self._values['policies'] is None or len(self._values['policies']) == 0: + return None + return len([x for x in self._values['policies'] if x['active'] is True]) + + @property + def policies_inactive(self): + if self._values['policies'] is None or len(self._values['policies']) == 0: + return None + return len([x for x in self._values['policies'] if x['active'] is not True]) + + @property + def policies_attached(self): + if self._values['policies'] is None or len(self._values['policies']) == 0: + return None + return len([x for x in self._values['policies'] if x['active'] is True and len(x['virtualServers']) > 0]) + + @property + def policies_unattached(self): + if self._values['policies'] is None or len(self._values['policies']) == 0: + return None + return len([x for x in self._values['policies'] if x['active'] is True and len(x['virtualServers']) == 0]) + + +class AsmPolicyStatsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(AsmPolicyStatsFactManager, self).__init__(**kwargs) + self.want = AsmPolicyStatsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(asm_policy_stats=facts) + return result + + def _exec_module(self): + if 'asm' not in self.provisioned_modules: + return [] + facts = self.read_facts() + results = facts.to_return() + return results + + def read_facts(self): + collection = self.read_collection_from_device() + params = AsmPolicyStatsParameters(params=collection) + return params + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/asm/policies".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return dict( + policies=response['items'] + ) + + +class AsmPolicyFactParameters(BaseParameters): + api_map = { + 'hasParent': 'has_parent', + 'protocolIndependent': 'protocol_independent', + 'virtualServers': 'virtual_servers', + 'allowedResponseCodes': 'allowed_response_codes', + 'learningMode': 'learning_mode', + 'enforcementMode': 'enforcement_mode', + 'customXffHeaders': 'custom_xff_headers', + 'caseInsensitive': 'case_insensitive', + 'stagingSettings': 'staging_settings', + 'applicationLanguage': 'application_language', + 'trustXff': 'trust_xff', + 'geolocation-enforcement': 'geolocation_enforcement', + 'disallowedLocations': 'disallowed_locations', + 'signature-settings': 'signature_settings', + 'header-settings': 'header_settings', + 'cookie-settings': 'cookie_settings', + 'policy-builder': 'policy_builder', + 'disallowed-geolocations': 'disallowed_geolocations', + 'whitelist-ips': 'whitelist_ips', + 'fullPath': 'full_path', + 'csrf-protection': 'csrf_protection', + } + + returnables = [ + 'full_path', + 'name', + 'policy_id', + 'active', + 'protocol_independent', + 'has_parent', + 'type', + 'virtual_servers', + 'allowed_response_codes', + 'description', + 'learning_mode', + 'enforcement_mode', + 'custom_xff_headers', + 'case_insensitive', + 'signature_staging', + 'place_signatures_in_staging', + 'enforcement_readiness_period', + 'path_parameter_handling', + 'trigger_asm_irule_event', + 'inspect_http_uploads', + 'mask_credit_card_numbers_in_request', + 'maximum_http_header_length', + 'use_dynamic_session_id_in_url', + 'maximum_cookie_header_length', + 'application_language', + 'trust_xff', + 'disallowed_geolocations', + 'csrf_urls', + 'csrf_protection_enabled', + 'csrf_protection_ssl_only', + 'csrf_protection_expiration_time_in_seconds', + ] + + def _morph_keys(self, key_map, item): + for k, v in iteritems(key_map): + item[v] = item.pop(k, None) + result = self._filter_params(item) + return result + + @property + def active(self): + return flatten_boolean(self._values['active']) + + @property + def case_insensitive(self): + return flatten_boolean(self._values['case_insensitive']) + + @property + def has_parent(self): + return flatten_boolean(self._values['has_parent']) + + @property + def policy_id(self): + if self._values['id'] is None: + return None + return self._values['id'] + + @property + def signature_staging(self): + if 'staging_settings' in self._values: + if self._values['staging_settings'] is None: + return None + if 'signatureStaging' in self._values['staging_settings']: + return flatten_boolean(self._values['staging_settings']['signatureStaging']) + if 'signature_settings' in self._values: + if self._values['signature_settings'] is None: + return None + if 'signatureStaging' in self._values['signature_settings']: + return flatten_boolean(self._values['signature_settings']['signatureStaging']) + + @property + def place_signatures_in_staging(self): + if 'staging_settings' in self._values: + if self._values['staging_settings'] is None: + return None + if 'placeSignaturesInStaging' in self._values['staging_settings']: + return flatten_boolean(self._values['staging_settings']['placeSignaturesInStaging']) + if 'signature_settings' in self._values: + if self._values['signature_settings'] is None: + return None + if 'signatureStaging' in self._values['signature_settings']: + return flatten_boolean(self._values['signature_settings']['placeSignaturesInStaging']) + + @property + def enforcement_readiness_period(self): + if 'staging_settings' in self._values: + if self._values['staging_settings'] is None: + return None + if 'enforcementReadinessPeriod' in self._values['staging_settings']: + return self._values['staging_settings']['enforcementReadinessPeriod'] + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'signatureStaging' in self._values['general']: + return self._values['general']['enforcementReadinessPeriod'] + + @property + def path_parameter_handling(self): + if 'attributes' in self._values: + if self._values['attributes'] is None: + return None + if 'pathParameterHandling' in self._values['attributes']: + return self._values['attributes']['pathParameterHandling'] + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'pathParameterHandling' in self._values['general']: + return self._values['general']['pathParameterHandling'] + + @property + def trigger_asm_irule_event(self): + if 'attributes' in self._values: + if self._values['attributes'] is None: + return None + if 'triggerAsmIruleEvent' in self._values['attributes']: + return self._values['attributes']['triggerAsmIruleEvent'] + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'triggerAsmIruleEvent' in self._values['general']: + return self._values['general']['triggerAsmIruleEvent'] + + @property + def inspect_http_uploads(self): + if 'attributes' in self._values: + if self._values['attributes'] is None: + return None + if 'inspectHttpUploads' in self._values['attributes']: + return flatten_boolean(self._values['attributes']['inspectHttpUploads']) + if 'antivirus' in self._values: + if self._values['antivirus'] is None: + return None + if 'inspectHttpUploads' in self._values['antivirus']: + return flatten_boolean(self._values['antivirus']['inspectHttpUploads']) + + @property + def mask_credit_card_numbers_in_request(self): + if 'attributes' in self._values: + if self._values['attributes'] is None: + return None + if 'maskCreditCardNumbersInRequest' in self._values['attributes']: + return flatten_boolean(self._values['attributes']['maskCreditCardNumbersInRequest']) + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'maskCreditCardNumbersInRequest' in self._values['general']: + return flatten_boolean(self._values['general']['maskCreditCardNumbersInRequest']) + + @property + def maximum_http_header_length(self): + if 'attributes' in self._values: + if self._values['attributes'] is None: + return None + if 'maximumHttpHeaderLength' in self._values['attributes']: + if self._values['attributes']['maximumHttpHeaderLength'] == 'any': + return 'any' + return int(self._values['attributes']['maximumHttpHeaderLength']) + + if 'header_settings' in self._values: + if self._values['header_settings'] is None: + return None + if 'maximumHttpHeaderLength' in self._values['header_settings']: + if self._values['header_settings']['maximumHttpHeaderLength'] == 'any': + return 'any' + return int(self._values['header_settings']['maximumHttpHeaderLength']) + + @property + def use_dynamic_session_id_in_url(self): + if 'attributes' in self._values: + if self._values['attributes'] is None: + return None + if 'useDynamicSessionIdInUrl' in self._values['attributes']: + return flatten_boolean(self._values['attributes']['useDynamicSessionIdInUrl']) + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'useDynamicSessionIdInUrl' in self._values['general']: + return flatten_boolean(self._values['general']['useDynamicSessionIdInUrl']) + + @property + def maximum_cookie_header_length(self): + if 'attributes' in self._values: + if self._values['attributes'] is None: + return None + if 'maximumCookieHeaderLength' in self._values['attributes']: + if self._values['attributes']['maximumCookieHeaderLength'] == 'any': + return 'any' + return int(self._values['attributes']['maximumCookieHeaderLength']) + if 'cookie_settings' in self._values: + if self._values['cookie_settings'] is None: + return None + if 'maximumCookieHeaderLength' in self._values['cookie_settings']: + if self._values['cookie_settings']['maximumCookieHeaderLength'] == 'any': + return 'any' + return int(self._values['cookie_settings']['maximumCookieHeaderLength']) + + @property + def trust_xff(self): + if 'trust_xff' in self._values: + if self._values['trust_xff'] is None: + return None + return flatten_boolean(self._values['trust_xff']) + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'trustXff' in self._values['general']: + return flatten_boolean(self._values['general']['trustXff']) + + @property + def custom_xff_headers(self): + if 'custom_xff_headers' in self._values: + if self._values['custom_xff_headers'] is None: + return None + return self._values['custom_xff_headers'] + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'customXffHeaders' in self._values['general']: + return self._values['general']['customXffHeaders'] + + @property + def allowed_response_codes(self): + if 'allowed_response_codes' in self._values: + if self._values['allowed_response_codes'] is None: + return None + return self._values['allowed_response_codes'] + if 'general' in self._values: + if self._values['general'] is None: + return None + if 'allowedResponseCodes' in self._values['general']: + return self._values['general']['allowedResponseCodes'] + + @property + def learning_mode(self): + if 'policy_builder' in self._values: + if self._values['policy_builder'] is None: + return None + if 'learningMode' in self._values['policy_builder']: + return self._values['policy_builder']['learningMode'] + + @property + def disallowed_locations(self): + if 'geolocation_enforcement' in self._values: + if self._values['geolocation_enforcement'] is None: + return None + return self._values['geolocation_enforcement']['disallowedLocations'] + + @property + def disallowed_geolocations(self): + if 'disallowed_geolocations' in self._values: + if self._values['disallowed_geolocations'] is None: + return None + return self._values['disallowed_geolocations'] + + @property + def csrf_protection_enabled(self): + if 'csrf_protection' in self._values: + return flatten_boolean(self._values['csrf_protection']['enabled']) + + @property + def csrf_protection_ssl_only(self): + if 'csrf_protection' in self._values: + if 'sslOnly' in self._values['csrf_protection']: + return flatten_boolean(self._values['csrf_protection']['sslOnly']) + + @property + def csrf_protection_expiration_time_in_seconds(self): + if 'csrf_protection' in self._values: + if 'expirationTimeInSeconds' in self._values['csrf_protection']: + if self._values['csrf_protection']['expirationTimeInSeconds'] is None: + return None + if self._values['csrf_protection']['expirationTimeInSeconds'] == 'disabled': + return 'disabled' + return int(self._values['csrf_protection']['expirationTimeInSeconds']) + + def format_csrf_collection(self, items): + result = list() + key_map = { + 'requiredParameters': 'csrf_url_required_parameters', + 'url': 'csrf_url', + 'method': 'csrf_url_method', + 'enforcementAction': 'csrf_url_enforcement_action', + 'id': 'csrf_url_id', + 'wildcardOrder': 'csrf_url_wildcard_order', + 'parametersList': 'csrf_url_parameters_list' + } + for item in items: + self._remove_internal_keywords(item) + item.pop('lastUpdateMicros') + output = self._morph_keys(key_map, item) + result.append(output) + return result + + @property + def csrf_urls(self): + if 'csrfUrls' in self._values: + if self._values['csrfUrls'] is None: + return None + return self._values['csrfUrls'] + if 'csrf-urls' in self._values: + if self._values['csrf-urls'] is None: + return None + return self.format_csrf_collection(self._values['csrf-urls']) + + @property + def protocol_independent(self): + return flatten_boolean(self._values['protocol_independent']) + + +# TODO include: web-scraping,ip-intelligence,session-tracking, +# TODO login-enforcement,data-guard,redirection-protection,vulnerability-assessment, parentPolicyReference + + +class AsmPolicyFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(AsmPolicyFactManager, self).__init__(**kwargs) + self.want = AsmPolicyFactParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(asm_policies=facts) + return result + + def _exec_module(self): + if 'asm' not in self.provisioned_modules: + return [] + manager = self.get_manager() + return manager._exec_module() + + def get_manager(self): + if self.version_is_less_than_13(): + return AsmPolicyFactManagerV12(**self.kwargs) + else: + return AsmPolicyFactManagerV13(**self.kwargs) + + def version_is_less_than_13(self): + version = tmos_version(self.client) + if LooseVersion(version) < LooseVersion('13.0.0'): + return True + else: + return False + + def read_facts(self): + results = [] + collection = self.increment_read() + for resource in collection: + params = AsmPolicyFactParameters(params=resource) + results.append(params) + return results + + def increment_read(self): + n = 0 + result = [] + while True: + items = self.read_collection_from_device(skip=n) + if not items: + break + result.extend(items) + n = n + 10 + return result + + +class AsmPolicyFactManagerV12(AsmPolicyFactManager): + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_collection_from_device(self, skip=0): + uri = "https://{0}:{1}/mgmt/tm/asm/policies".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + + to_expand = 'policy-builder,geolocation-enforcement,csrf-protection' + query = '?$top=10&$skip={0}&$expand={1}'.format(skip, to_expand) + + resp = self.client.api.get(uri + query) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + if 'items' not in response: + return None + return response['items'] + + +class AsmPolicyFactManagerV13(AsmPolicyFactManager): + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_collection_from_device(self, skip=0): + uri = "https://{0}:{1}/mgmt/tm/asm/policies".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + to_expand = 'general,signature-settings,header-settings,cookie-settings,antivirus,' \ + 'policy-builder,csrf-protection,csrf-urls' + query = '?$top=10&$skip={0}&$expand={1}'.format(skip, to_expand) + resp = self.client.api.get(uri + query) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + if 'items' not in response: + return None + + return response['items'] + + +class AsmServerTechnologyFactParameters(BaseParameters): + api_map = { + 'serverTechnologyName': 'server_technology_name', + 'serverTechnologyReferences': 'server_technology_references', + } + + returnables = [ + 'id', + 'server_technology_name', + 'server_technology_references', + ] + + +class AsmServerTechnologyFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(AsmServerTechnologyFactManager, self).__init__(**kwargs) + self.want = AsmServerTechnologyFactParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(asm_server_technologies=facts) + return result + + def _exec_module(self): + results = [] + if 'asm' not in self.provisioned_modules: + return results + if self.version_is_less_than_13(): + return results + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['server_technology_name']) + return results + + def version_is_less_than_13(self): + version = tmos_version(self.client) + if LooseVersion(version) < LooseVersion('13.0.0'): + return True + else: + return False + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = AsmServerTechnologyFactParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/asm/server-technologies".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class AsmSignatureSetsFactParameters(BaseParameters): + api_map = { + 'isUserDefined': 'is_user_defined', + 'assignToPolicyByDefault': 'assign_to_policy_by_default', + 'defaultAlarm': 'default_alarm', + 'defaultBlock': 'default_block', + 'defaultLearn': 'default_learn', + } + + returnables = [ + 'name', + 'id', + 'type', + 'category', + 'is_user_defined', + 'assign_to_policy_by_default', + 'default_alarm', + 'default_block', + 'default_learn', + ] + + @property + def is_user_defined(self): + return flatten_boolean(self._values['is_user_defined']) + + @property + def assign_to_policy_by_default(self): + return flatten_boolean(self._values['assign_to_policy_by_default']) + + @property + def default_alarm(self): + return flatten_boolean(self._values['default_alarm']) + + @property + def default_block(self): + return flatten_boolean(self._values['default_block']) + + @property + def default_learn(self): + return flatten_boolean(self._values['default_learn']) + +# TODO: add the following: filter, systems, signatureReferences + + +class AsmSignatureSetsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(AsmSignatureSetsFactManager, self).__init__(**kwargs) + self.want = AsmSignatureSetsFactParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(asm_signature_sets=facts) + return result + + def _exec_module(self): + results = [] + if 'asm' not in self.provisioned_modules: + return results + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['name']) + return results + + def read_facts(self): + results = [] + collection = self.increment_read() + for resource in collection: + params = AsmSignatureSetsFactParameters(params=resource) + results.append(params) + return results + + def increment_read(self): + n = 0 + result = [] + while True: + items = self.read_collection_from_device(skip=n) + if not items: + break + result.extend(items) + n = n + 5 + return result + + def read_collection_from_device(self, skip=0): + uri = "https://{0}:{1}/mgmt/tm/asm/signature-sets".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = '?$top=5&$skip={0}'.format(skip) + resp = self.client.api.get(uri + query) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + if 'items' not in response: + return None + + return response['items'] + + +class ClientSslProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'alertTimeout': 'alert_timeout', + 'allowNonSsl': 'allow_non_ssl', + 'authenticateDepth': 'authenticate_depth', + 'authenticate': 'authenticate_frequency', + 'caFile': 'ca_file', + 'cacheSize': 'cache_size', + 'cacheTimeout': 'cache_timeout', + 'cert': 'certificate_file', + 'chain': 'chain_file', + 'crlFile': 'crl_file', + 'defaultsFrom': 'parent', + 'modSslMethods': 'modssl_methods', + 'peerCertMode': 'peer_certification_mode', + 'sniRequire': 'sni_require', + 'strictResume': 'strict_resume', + 'mode': 'profile_mode_enabled', + 'renegotiateMaxRecordDelay': 'renegotiation_maximum_record_delay', + 'renegotiatePeriod': 'renegotiation_period', + 'serverName': 'server_name', + 'sessionTicket': 'session_ticket', + 'sniDefault': 'sni_default', + 'uncleanShutdown': 'unclean_shutdown', + 'retainCertificate': 'retain_certificate', + 'secureRenegotiation': 'secure_renegotiation_mode', + 'handshakeTimeout': 'handshake_timeout', + 'certExtensionIncludes': 'forward_proxy_certificate_extension_include', + 'certLifespan': 'forward_proxy_certificate_lifespan', + 'certLookupByIpaddrPort': 'forward_proxy_lookup_by_ipaddr_port', + 'sslForwardProxy': 'forward_proxy_enabled', + 'proxyCaPassphrase': 'forward_proxy_ca_passphrase', + 'proxyCaCert': 'forward_proxy_ca_certificate_file', + 'proxyCaKey': 'forward_proxy_ca_key_file' + } + + returnables = [ + 'full_path', + 'name', + 'alert_timeout', + 'allow_non_ssl', + 'authenticate_depth', + 'authenticate_frequency', + 'ca_file', + 'cache_size', + 'cache_timeout', + 'certificate_file', + 'chain_file', + 'ciphers', + 'crl_file', + 'parent', + 'description', + 'modssl_methods', + 'peer_certification_mode', + 'sni_require', + 'sni_default', + 'strict_resume', + 'profile_mode_enabled', + 'renegotiation_maximum_record_delay', + 'renegotiation_period', + 'renegotiation', + 'server_name', + 'session_ticket', + 'unclean_shutdown', + 'retain_certificate', + 'secure_renegotiation_mode', + 'handshake_timeout', + 'forward_proxy_certificate_extension_include', + 'forward_proxy_certificate_lifespan', + 'forward_proxy_lookup_by_ipaddr_port', + 'forward_proxy_enabled', + 'forward_proxy_ca_passphrase', + 'forward_proxy_ca_certificate_file', + 'forward_proxy_ca_key_file' + ] + + @property + def alert_timeout(self): + if self._values['alert_timeout'] is None: + return None + if self._values['alert_timeout'] == 'indefinite': + return 0 + return int(self._values['alert_timeout']) + + @property + def renegotiation_maximum_record_delay(self): + if self._values['renegotiation_maximum_record_delay'] is None: + return None + if self._values['renegotiation_maximum_record_delay'] == 'indefinite': + return 0 + return int(self._values['renegotiation_maximum_record_delay']) + + @property + def renegotiation_period(self): + if self._values['renegotiation_period'] is None: + return None + if self._values['renegotiation_period'] == 'indefinite': + return 0 + return int(self._values['renegotiation_period']) + + @property + def handshake_timeout(self): + if self._values['handshake_timeout'] is None: + return None + if self._values['handshake_timeout'] == 'indefinite': + return 0 + return int(self._values['handshake_timeout']) + + @property + def allow_non_ssl(self): + if self._values['allow_non_ssl'] is None: + return None + if self._values['allow_non_ssl'] == 'disabled': + return 'no' + return 'yes' + + @property + def forward_proxy_enabled(self): + if self._values['forward_proxy_enabled'] is None: + return None + if self._values['forward_proxy_enabled'] == 'disabled': + return 'no' + return 'yes' + + @property + def renegotiation(self): + if self._values['renegotiation'] is None: + return None + if self._values['renegotiation'] == 'disabled': + return 'no' + return 'yes' + + @property + def forward_proxy_lookup_by_ipaddr_port(self): + if self._values['forward_proxy_lookup_by_ipaddr_port'] is None: + return None + if self._values['forward_proxy_lookup_by_ipaddr_port'] == 'disabled': + return 'no' + return 'yes' + + @property + def unclean_shutdown(self): + if self._values['unclean_shutdown'] is None: + return None + if self._values['unclean_shutdown'] == 'disabled': + return 'no' + return 'yes' + + @property + def session_ticket(self): + if self._values['session_ticket'] is None: + return None + if self._values['session_ticket'] == 'disabled': + return 'no' + return 'yes' + + @property + def retain_certificate(self): + if self._values['retain_certificate'] is None: + return None + if self._values['retain_certificate'] == 'true': + return 'yes' + return 'no' + + @property + def server_name(self): + if self._values['server_name'] in [None, 'none']: + return None + return self._values['server_name'] + + @property + def forward_proxy_ca_certificate_file(self): + if self._values['forward_proxy_ca_certificate_file'] in [None, 'none']: + return None + return self._values['forward_proxy_ca_certificate_file'] + + @property + def forward_proxy_ca_key_file(self): + if self._values['forward_proxy_ca_key_file'] in [None, 'none']: + return None + return self._values['forward_proxy_ca_key_file'] + + @property + def authenticate_frequency(self): + if self._values['authenticate_frequency'] is None: + return None + return self._values['authenticate_frequency'] + + @property + def ca_file(self): + if self._values['ca_file'] in [None, 'none']: + return None + return self._values['ca_file'] + + @property + def certificate_file(self): + if self._values['certificate_file'] in [None, 'none']: + return None + return self._values['certificate_file'] + + @property + def chain_file(self): + if self._values['chain_file'] in [None, 'none']: + return None + return self._values['chain_file'] + + @property + def crl_file(self): + if self._values['crl_file'] in [None, 'none']: + return None + return self._values['crl_file'] + + @property + def ciphers(self): + if self._values['ciphers'] in [None, 'none']: + return None + return self._values['ciphers'].split(' ') + + @property + def modssl_methods(self): + if self._values['modssl_methods'] is None: + return None + if self._values['modssl_methods'] == 'disabled': + return 'no' + return 'yes' + + @property + def strict_resume(self): + if self._values['strict_resume'] is None: + return None + if self._values['strict_resume'] == 'disabled': + return 'no' + return 'yes' + + @property + def profile_mode_enabled(self): + if self._values['profile_mode_enabled'] is None: + return None + if self._values['profile_mode_enabled'] == 'disabled': + return 'no' + return 'yes' + + @property + def sni_require(self): + if self._values['sni_require'] is None: + return None + if self._values['sni_require'] == 'false': + return 'no' + return 'yes' + + @property + def sni_default(self): + if self._values['sni_default'] is None: + return None + if self._values['sni_default'] == 'false': + return 'no' + return 'yes' + + +class ClientSslProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(ClientSslProfilesFactManager, self).__init__(**kwargs) + self.want = ClientSslProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(client_ssl_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = ClientSslProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/client-ssl".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class DeviceGroupsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'autoSync': 'autosync_enabled', + 'asmSync': 'asm_sync_enabled', + 'devicesReference': 'devices', + 'fullLoadOnSync': 'full_load_on_sync', + 'incrementalConfigSyncSizeMax': 'incremental_config_sync_size_maximum', + 'networkFailover': 'network_failover_enabled' + } + + returnables = [ + 'full_path', + 'name', + 'autosync_enabled', + 'description', + 'devices', + 'full_load_on_sync', + 'incremental_config_sync_size_maximum', + 'network_failover_enabled', + 'type', + 'asm_sync_enabled' + ] + + @property + def network_failover_enabled(self): + if self._values['network_failover_enabled'] is None: + return None + if self._values['network_failover_enabled'] == 'enabled': + return 'yes' + return 'no' + + @property + def asm_sync_enabled(self): + if self._values['asm_sync_enabled'] is None: + return None + if self._values['asm_sync_enabled'] == 'disabled': + return 'no' + return 'yes' + + @property + def autosync_enabled(self): + if self._values['autosync_enabled'] is None: + return None + if self._values['autosync_enabled'] == 'disabled': + return 'no' + return 'yes' + + @property + def full_load_on_sync(self): + if self._values['full_load_on_sync'] is None: + return None + if self._values['full_load_on_sync'] == 'true': + return 'yes' + return 'no' + + @property + def devices(self): + if self._values['devices'] is None or 'items' not in self._values['devices']: + return None + result = [x['fullPath'] for x in self._values['devices']['items']] + result.sort() + return result + + +class DeviceGroupsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(DeviceGroupsFactManager, self).__init__(**kwargs) + self.want = DeviceGroupsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(device_groups=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = DeviceGroupsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/cm/device-group/?expandSubcollections=true".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class DevicesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'activeModules': 'active_modules', + 'baseMac': 'base_mac_address', + 'chassisId': 'chassis_id', + 'chassisType': 'chassis_type', + 'configsyncIp': 'configsync_address', + 'failoverState': 'failover_state', + 'managementIp': 'management_address', + 'marketingName': 'marketing_name', + 'multicastIp': 'multicast_address', + 'optionalModules': 'optional_modules', + 'platformId': 'platform_id', + 'mirrorIp': 'primary_mirror_address', + 'mirrorSecondaryIp': 'secondary_mirror_address', + 'version': 'software_version', + 'timeLimitedModules': 'timelimited_modules', + 'timeZone': 'timezone', + 'unicastAddress': 'unicast_addresses', + 'selfDevice': 'self' + } + + returnables = [ + 'full_path', + 'name', + 'active_modules', + 'base_mac_address', + 'build', + 'chassis_id', + 'chassis_type', + 'comment', + 'configsync_address', + 'contact', + 'description', + 'edition', + 'failover_state', + 'hostname', + 'location', + 'management_address', + 'marketing_name', + 'multicast_address', + 'optional_modules', + 'platform_id', + 'primary_mirror_address', + 'product', + 'secondary_mirror_address', + 'self', + 'software_version', + 'timelimited_modules', + 'timezone', + 'unicast_addresses', + ] + + @property + def active_modules(self): + if self._values['active_modules'] is None: + return None + result = [] + for x in self._values['active_modules']: + parts = x.split('|') + result += parts[2:] + return list(set(result)) + + @property + def self(self): + result = flatten_boolean(self._values['self']) + return result + + @property + def configsync_address(self): + if self._values['configsync_address'] in [None, 'none']: + return None + return self._values['configsync_address'] + + @property + def primary_mirror_address(self): + if self._values['primary_mirror_address'] in [None, 'any6']: + return None + return self._values['primary_mirror_address'] + + @property + def secondary_mirror_address(self): + if self._values['secondary_mirror_address'] in [None, 'any6']: + return None + return self._values['secondary_mirror_address'] + + @property + def unicast_addresses(self): + if self._values['unicast_addresses'] is None: + return None + result = [] + + for addr in self._values['unicast_addresses']: + tmp = {} + for key in ['effectiveIp', 'effectivePort', 'ip', 'port']: + if key in addr: + renamed_key = self.convert(key) + tmp[renamed_key] = addr.get(key, None) + if tmp: + result.append(tmp) + if result: + return result + + def convert(self, name): + s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + + +class DevicesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(DevicesFactManager, self).__init__(**kwargs) + self.want = DevicesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(devices=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = DevicesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/cm/device".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class ExternalMonitorsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'adaptiveDivergenceType': 'adaptive_divergence_type', + 'adaptiveDivergenceValue': 'adaptive_divergence_value', + 'adaptiveLimit': 'adaptive_limit', + 'adaptiveSamplingTimespan': 'adaptive_sampling_timespan', + 'manualResume': 'manual_resume', + 'timeUntilUp': 'time_until_up', + 'upInterval': 'up_interval', + 'run': 'external_program', + 'apiRawValues': 'variables', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'args', + 'destination', + 'external_program', + 'interval', + 'manual_resume', + 'time_until_up', + 'timeout', + 'up_interval', + 'variables', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + @property + def variables(self): + if self._values['variables'] is None: + return None + result = {} + for k, v in iteritems(self._values['variables']): + k = k.replace('userDefined ', '').strip() + result[k] = v + return result + + +class ExternalMonitorsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(ExternalMonitorsFactManager, self).__init__(**kwargs) + self.want = ExternalMonitorsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(external_monitors=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = ExternalMonitorsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class FastHttpProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'clientCloseTimeout': 'client_close_timeout', + 'connpoolIdleTimeoutOverride': 'oneconnect_idle_timeout_override', + 'connpoolMaxReuse': 'oneconnect_maximum_reuse', + 'connpoolMaxSize': 'oneconnect_maximum_pool_size', + 'connpoolMinSize': 'oneconnect_minimum_pool_size', + 'connpoolReplenish': 'oneconnect_replenish', + 'connpoolStep': 'oneconnect_ramp_up_increment', + 'defaultsFrom': 'parent', + 'forceHttp_10Response': 'force_http_1_0_response', + 'headerInsert': 'request_header_insert', + 'http_11CloseWorkarounds': 'http_1_1_close_workarounds', + 'idleTimeout': 'idle_timeout', + 'insertXforwardedFor': 'insert_x_forwarded_for', + 'maxHeaderSize': 'maximum_header_size', + 'maxRequests': 'maximum_requests', + 'mssOverride': 'maximum_segment_size_override', + 'receiveWindowSize': 'receive_window_size', + 'resetOnTimeout': 'reset_on_timeout', + 'serverCloseTimeout': 'server_close_timeout', + 'serverSack': 'server_sack', + 'serverTimestamp': 'server_timestamp', + 'uncleanShutdown': 'unclean_shutdown' + } + + returnables = [ + 'full_path', + 'name', + 'client_close_timeout', + 'oneconnect_idle_timeout_override', + 'oneconnect_maximum_reuse', + 'oneconnect_maximum_pool_size', + 'oneconnect_minimum_pool_size', + 'oneconnect_replenish', + 'oneconnect_ramp_up_increment', + 'parent', + 'description', + 'force_http_1_0_response', + 'request_header_insert', + 'http_1_1_close_workarounds', + 'idle_timeout', + 'insert_x_forwarded_for', + 'maximum_header_size', + 'maximum_requests', + 'maximum_segment_size_override', + 'receive_window_size', + 'reset_on_timeout', + 'server_close_timeout', + 'server_sack', + 'server_timestamp', + 'unclean_shutdown' + ] + + @property + def request_header_insert(self): + if self._values['request_header_insert'] in [None, 'none']: + return None + return self._values['request_header_insert'] + + @property + def server_timestamp(self): + return flatten_boolean(self._values['server_timestamp']) + + @property + def server_sack(self): + return flatten_boolean(self._values['server_sack']) + + @property + def reset_on_timeout(self): + return flatten_boolean(self._values['reset_on_timeout']) + + @property + def insert_x_forwarded_for(self): + return flatten_boolean(self._values['insert_x_forwarded_for']) + + @property + def http_1_1_close_workarounds(self): + return flatten_boolean(self._values['http_1_1_close_workarounds']) + + @property + def force_http_1_0_response(self): + return flatten_boolean(self._values['force_http_1_0_response']) + + @property + def oneconnect_replenish(self): + return flatten_boolean(self._values['oneconnect_replenish']) + + @property + def idle_timeout(self): + if self._values['idle_timeout'] is None: + return None + elif self._values['idle_timeout'] == 'immediate': + return 0 + elif self._values['idle_timeout'] == 'indefinite': + return 4294967295 + return int(self._values['idle_timeout']) + + +class FastHttpProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(FastHttpProfilesFactManager, self).__init__(**kwargs) + self.want = FastHttpProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(fasthttp_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = FastHttpProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fasthttp".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class FastL4ProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'clientTimeout': 'client_timeout', + 'defaultsFrom': 'parent', + 'explicitFlowMigration': 'explicit_flow_migration', + 'hardwareSynCookie': 'hardware_syn_cookie', + 'idleTimeout': 'idle_timeout', + 'ipDfMode': 'dont_fragment_flag', + 'ipTosToClient': 'ip_tos_to_client', + 'ipTosToServer': 'ip_tos_to_server', + 'ipTtlMode': 'ttl_mode', + 'ipTtlV4': 'ttl_v4', + 'ipTtlV6': 'ttl_v6', + 'keepAliveInterval': 'keep_alive_interval', + 'lateBinding': 'late_binding', + 'linkQosToClient': 'link_qos_to_client', + 'linkQosToServer': 'link_qos_to_server', + 'looseClose': 'loose_close', + 'looseInitialization': 'loose_init', + 'mssOverride': 'mss_override', + 'priorityToClient': 'priority_to_client', + 'priorityToServer': 'priority_to_server', + 'pvaAcceleration': 'pva_acceleration', + 'pvaDynamicClientPackets': 'pva_dynamic_client_packets', + 'pvaDynamicServerPackets': 'pva_dynamic_server_packets', + 'pvaFlowAging': 'pva_flow_aging', + 'pvaFlowEvict': 'pva_flow_evict', + 'pvaOffloadDynamic': 'pva_offload_dynamic', + 'pvaOffloadState': 'pva_offload_state', + 'reassembleFragments': 'reassemble_fragments', + 'receiveWindowSize': 'receive_window', + 'resetOnTimeout': 'reset_on_timeout', + 'rttFromClient': 'rtt_from_client', + 'rttFromServer': 'rtt_from_server', + 'serverSack': 'server_sack', + 'serverTimestamp': 'server_timestamp', + 'softwareSynCookie': 'software_syn_cookie', + 'synCookieEnable': 'syn_cookie_enabled', + 'synCookieMss': 'syn_cookie_mss', + 'synCookieWhitelist': 'syn_cookie_whitelist', + 'tcpCloseTimeout': 'tcp_close_timeout', + 'tcpGenerateIsn': 'generate_init_seq_number', + 'tcpHandshakeTimeout': 'tcp_handshake_timeout', + 'tcpStripSack': 'strip_sack', + 'tcpTimeWaitTimeout': 'tcp_time_wait_timeout', + 'tcpTimestampMode': 'tcp_timestamp_mode', + 'tcpWscaleMode': 'tcp_window_scale_mode', + 'timeoutRecovery': 'timeout_recovery', + } + + returnables = [ + 'full_path', + 'name', + 'client_timeout', + 'parent', + 'description', + 'explicit_flow_migration', + 'hardware_syn_cookie', + 'idle_timeout', + 'dont_fragment_flag', + 'ip_tos_to_client', + 'ip_tos_to_server', + 'ttl_mode', + 'ttl_v4', + 'ttl_v6', + 'keep_alive_interval', + 'late_binding', + 'link_qos_to_client', + 'link_qos_to_server', + 'loose_close', + 'loose_init', + 'mss_override', # Maximum Segment Size Override + 'priority_to_client', + 'priority_to_server', + 'pva_acceleration', + 'pva_dynamic_client_packets', + 'pva_dynamic_server_packets', + 'pva_flow_aging', + 'pva_flow_evict', + 'pva_offload_dynamic', + 'pva_offload_state', + 'reassemble_fragments', + 'receive_window', + 'reset_on_timeout', + 'rtt_from_client', + 'rtt_from_server', + 'server_sack', + 'server_timestamp', + 'software_syn_cookie', + 'syn_cookie_enabled', + 'syn_cookie_mss', + 'syn_cookie_whitelist', + 'tcp_close_timeout', + 'generate_init_seq_number', + 'tcp_handshake_timeout', + 'strip_sack', + 'tcp_time_wait_timeout', + 'tcp_timestamp_mode', + 'tcp_window_scale_mode', + 'timeout_recovery', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def strip_sack(self): + return flatten_boolean(self._values['strip_sack']) + + @property + def generate_init_seq_number(self): + return flatten_boolean(self._values['generate_init_seq_number']) + + @property + def syn_cookie_whitelist(self): + return flatten_boolean(self._values['syn_cookie_whitelist']) + + @property + def syn_cookie_enabled(self): + return flatten_boolean(self._values['syn_cookie_enabled']) + + @property + def software_syn_cookie(self): + return flatten_boolean(self._values['software_syn_cookie']) + + @property + def server_timestamp(self): + return flatten_boolean(self._values['server_timestamp']) + + @property + def server_sack(self): + return flatten_boolean(self._values['server_sack']) + + @property + def rtt_from_server(self): + return flatten_boolean(self._values['rtt_from_server']) + + @property + def rtt_from_client(self): + return flatten_boolean(self._values['rtt_from_client']) + + @property + def reset_on_timeout(self): + return flatten_boolean(self._values['reset_on_timeout']) + + @property + def explicit_flow_migration(self): + return flatten_boolean(self._values['explicit_flow_migration']) + + @property + def reassemble_fragments(self): + return flatten_boolean(self._values['reassemble_fragments']) + + @property + def pva_flow_aging(self): + return flatten_boolean(self._values['pva_flow_aging']) + + @property + def pva_flow_evict(self): + return flatten_boolean(self._values['pva_flow_evict']) + + @property + def pva_offload_dynamic(self): + return flatten_boolean(self._values['pva_offload_dynamic']) + + @property + def hardware_syn_cookie(self): + return flatten_boolean(self._values['hardware_syn_cookie']) + + @property + def loose_close(self): + return flatten_boolean(self._values['loose_close']) + + @property + def loose_init(self): + return flatten_boolean(self._values['loose_init']) + + @property + def late_binding(self): + return flatten_boolean(self._values['late_binding']) + + @property + def tcp_handshake_timeout(self): + if self._values['tcp_handshake_timeout'] is None: + return None + elif self._values['tcp_handshake_timeout'] == 'immediate': + return 0 + elif self._values['tcp_handshake_timeout'] == 'indefinite': + return 4294967295 + return int(self._values['tcp_handshake_timeout']) + + @property + def idle_timeout(self): + if self._values['idle_timeout'] is None: + return None + elif self._values['idle_timeout'] == 'immediate': + return 0 + elif self._values['idle_timeout'] == 'indefinite': + return 4294967295 + return int(self._values['idle_timeout']) + + @property + def tcp_close_timeout(self): + if self._values['tcp_close_timeout'] is None: + return None + elif self._values['tcp_close_timeout'] == 'immediate': + return 0 + elif self._values['tcp_close_timeout'] == 'indefinite': + return 4294967295 + return int(self._values['tcp_close_timeout']) + + @property + def keep_alive_interval(self): + if self._values['keep_alive_interval'] is None: + return None + elif self._values['keep_alive_interval'] == 'disabled': + return 0 + return int(self._values['keep_alive_interval']) + + @property + def ip_tos_to_client(self): + if self._values['ip_tos_to_client'] is None: + return None + try: + return int(self._values['ip_tos_to_client']) + except ValueError: + return self._values['ip_tos_to_client'] + + @property + def ip_tos_to_server(self): + if self._values['ip_tos_to_server'] is None: + return None + try: + return int(self._values['ip_tos_to_server']) + except ValueError: + return self._values['ip_tos_to_server'] + + @property + def link_qos_to_client(self): + if self._values['link_qos_to_client'] is None: + return None + try: + return int(self._values['link_qos_to_client']) + except ValueError: + return self._values['link_qos_to_client'] + + @property + def link_qos_to_server(self): + if self._values['link_qos_to_server'] is None: + return None + try: + return int(self._values['link_qos_to_server']) + except ValueError: + return self._values['link_qos_to_server'] + + @property + def priority_to_client(self): + if self._values['priority_to_client'] is None: + return None + try: + return int(self._values['priority_to_client']) + except ValueError: + return self._values['priority_to_client'] + + @property + def priority_to_server(self): + if self._values['priority_to_server'] is None: + return None + try: + return int(self._values['priority_to_server']) + except ValueError: + return self._values['priority_to_server'] + + +class FastL4ProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(FastL4ProfilesFactManager, self).__init__(**kwargs) + self.want = FastL4ProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(fastl4_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = FastL4ProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fastl4".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GatewayIcmpMonitorsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'adaptiveDivergenceType': 'adaptive_divergence_type', + 'adaptiveDivergenceValue': 'adaptive_divergence_value', + 'adaptiveLimit': 'adaptive_limit', + 'adaptiveSamplingTimespan': 'adaptive_sampling_timespan', + 'manualResume': 'manual_resume', + 'timeUntilUp': 'time_until_up', + 'upInterval': 'up_interval', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'adaptive', + 'adaptive_divergence_type', + 'adaptive_divergence_value', + 'adaptive_limit', + 'adaptive_sampling_timespan', + 'destination', + 'interval', + 'manual_resume', + 'time_until_up', + 'timeout', + 'transparent', + 'up_interval', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def transparent(self): + return flatten_boolean(self._values['transparent']) + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + @property + def adaptive(self): + return flatten_boolean(self._values['adaptive']) + + +class GatewayIcmpMonitorsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GatewayIcmpMonitorsFactManager, self).__init__(**kwargs) + self.want = GatewayIcmpMonitorsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gateway_icmp_monitors=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GatewayIcmpMonitorsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/gateway-icmp".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmXPoolsParameters(BaseParameters): + api_map = { + 'alternateMode': 'alternate_mode', + 'dynamicRatio': 'dynamic_ratio', + 'fallbackMode': 'fallback_mode', + 'fullPath': 'full_path', + 'loadBalancingMode': 'load_balancing_mode', + 'manualResume': 'manual_resume', + 'maxAnswersReturned': 'max_answers_returned', + 'qosHitRatio': 'qos_hit_ratio', + 'qosHops': 'qos_hops', + 'qosKilobytesSecond': 'qos_kilobytes_second', + 'qosLcs': 'qos_lcs', + 'qosPacketRate': 'qos_packet_rate', + 'qosRtt': 'qos_rtt', + 'qosTopology': 'qos_topology', + 'qosVsCapacity': 'qos_vs_capacity', + 'qosVsScore': 'qos_vs_score', + 'verifyMemberAvailability': 'verify_member_availability', + 'membersReference': 'members' + } + + returnables = [ + 'alternate_mode', + 'dynamic_ratio', + 'enabled', + 'disabled', + 'fallback_mode', + 'full_path', + 'load_balancing_mode', + 'manual_resume', + 'max_answers_returned', + 'members', + 'name', + 'partition', + 'qos_hit_ratio', + 'qos_hops', + 'qos_kilobytes_second', + 'qos_lcs', + 'qos_packet_rate', + 'qos_rtt', + 'qos_topology', + 'qos_vs_capacity', + 'qos_vs_score', + 'ttl', + 'verify_member_availability', + ] + + @property + def verify_member_availability(self): + return flatten_boolean(self._values['verify_member_availability']) + + @property + def dynamic_ratio(self): + return flatten_boolean(self._values['dynamic_ratio']) + + @property + def max_answers_returned(self): + if self._values['max_answers_returned'] is None: + return None + return int(self._values['max_answers_returned']) + + @property + def members(self): + result = [] + if self._values['members'] is None or 'items' not in self._values['members']: + return result + for item in self._values['members']['items']: + self._remove_internal_keywords(item) + if 'disabled' in item: + item['disabled'] = flatten_boolean(item['disabled']) + item['enabled'] = flatten_boolean(not item['disabled']) + if 'enabled' in item: + item['enabled'] = flatten_boolean(item['enabled']) + item['disabled'] = flatten_boolean(not item['enabled']) + if 'fullPath' in item: + item['full_path'] = item.pop('fullPath') + if 'memberOrder' in item: + item['member_order'] = int(item.pop('memberOrder')) + # Cast some attributes to integer + for x in ['order', 'preference', 'ratio', 'service']: + if x in item: + item[x] = int(item[x]) + result.append(item) + return result + + @property + def qos_hit_ratio(self): + if self._values['qos_hit_ratio'] is None: + return None + return int(self._values['qos_hit_ratio']) + + @property + def qos_hops(self): + if self._values['qos_hops'] is None: + return None + return int(self._values['qos_hops']) + + @property + def qos_kilobytes_second(self): + if self._values['qos_kilobytes_second'] is None: + return None + return int(self._values['qos_kilobytes_second']) + + @property + def qos_lcs(self): + if self._values['qos_lcs'] is None: + return None + return int(self._values['qos_lcs']) + + @property + def qos_packet_rate(self): + if self._values['qos_packet_rate'] is None: + return None + return int(self._values['qos_packet_rate']) + + @property + def qos_rtt(self): + if self._values['qos_rtt'] is None: + return None + return int(self._values['qos_rtt']) + + @property + def qos_topology(self): + if self._values['qos_topology'] is None: + return None + return int(self._values['qos_topology']) + + @property + def qos_vs_capacity(self): + if self._values['qos_vs_capacity'] is None: + return None + return int(self._values['qos_vs_capacity']) + + @property + def qos_vs_score(self): + if self._values['qos_vs_score'] is None: + return None + return int(self._values['qos_vs_score']) + + @property + def availability_state(self): + if self._values['stats'] is None: + return None + try: + result = self._values['stats']['status']['availabilityState'] + return result['description'] + except AttributeError: + return None + + @property + def enabled_state(self): + if self._values['stats'] is None: + return None + try: + result = self._values['stats']['status']['enabledState'] + return result['description'] + except AttributeError: + return None + + @property + def availability_status(self): + # This fact is a combination of the availability_state and enabled_state + # + # The purpose of the fact is to give a higher-level view of the availability + # of the pool, that can be used in playbooks. If you need further detail, + # consider using the following facts together. + # + # - availability_state + # - enabled_state + # + if self.enabled_state == 'enabled': + if self.availability_state == 'offline': + return 'red' + elif self.availability_state == 'available': + return 'green' + elif self.availability_state == 'unknown': + return 'blue' + else: + return 'none' + else: + # disabled + return 'black' + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + +class GtmAPoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmAPoolsFactManager, self).__init__(**kwargs) + self.want = GtmXPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_a_pools=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXPoolsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/pool/a".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = "?expandSubcollections=true" + resp = self.client.api.get(uri + query) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmAaaaPoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmAaaaPoolsFactManager, self).__init__(**kwargs) + self.want = GtmXPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_aaaa_pools=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXPoolsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/pool/aaaa".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = "?expandSubcollections=true" + resp = self.client.api.get(uri + query) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmCnamePoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmCnamePoolsFactManager, self).__init__(**kwargs) + self.want = GtmXPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_cname_pools=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXPoolsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/pool/cname".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = "?expandSubcollections=true" + resp = self.client.api.get(uri + query) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmMxPoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmMxPoolsFactManager, self).__init__(**kwargs) + self.want = GtmXPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_mx_pools=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXPoolsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/pool/mx".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = "?expandSubcollections=true" + resp = self.client.api.get(uri + query) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmNaptrPoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmNaptrPoolsFactManager, self).__init__(**kwargs) + self.want = GtmXPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_naptr_pools=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXPoolsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/pool/naptr".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = "?expandSubcollections=true" + resp = self.client.api.get(uri + query) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmSrvPoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmSrvPoolsFactManager, self).__init__(**kwargs) + self.want = GtmXPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_srv_pools=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXPoolsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/pool/srv".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = "?expandSubcollections=true" + resp = self.client.api.get(uri + query) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmServersParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'exposeRouteDomains': 'expose_route_domains', + 'iqAllowPath': 'iq_allow_path', + 'iqAllowServiceCheck': 'iq_allow_service_check', + 'iqAllowSnmp': 'iq_allow_snmp', + 'limitCpuUsage': 'limit_cpu_usage', + 'limitCpuUsageStatus': 'limit_cpu_usage_status', + 'limitMaxBps': 'limit_max_bps', + 'limitMaxBpsStatus': 'limit_max_bps_status', + 'limitMaxConnections': 'limit_max_connections', + 'limitMaxConnectionsStatus': 'limit_max_connections_status', + 'limitMaxPps': 'limit_max_pps', + 'limitMaxPpsStatus': 'limit_max_pps_status', + 'limitMemAvail': 'limit_mem_available', + 'limitMemAvailStatus': 'limit_mem_available_status', + 'linkDiscovery': 'link_discovery', + 'proberFallback': 'prober_fallback', + 'proberPreference': 'prober_preference', + 'virtualServerDiscovery': 'virtual_server_discovery', + 'devicesReference': 'devices', + 'virtualServersReference': 'virtual_servers', + 'monitor': 'monitors', + } + + returnables = [ + 'datacenter', + 'enabled', + 'disabled', + 'expose_route_domains', + 'iq_allow_path', + 'full_path', + 'iq_allow_service_check', + 'iq_allow_snmp', + 'limit_cpu_usage', + 'limit_cpu_usage_status', + 'limit_max_bps', + 'limit_max_bps_status', + 'limit_max_connections', + 'limit_max_connections_status', + 'limit_max_pps', + 'limit_max_pps_status', + 'limit_mem_available', + 'limit_mem_available_status', + 'link_discovery', + 'monitors', + 'monitor_type', + 'name', + 'product', + 'prober_fallback', + 'prober_preference', + 'virtual_server_discovery', + 'addresses', + 'devices', + 'virtual_servers', + ] + + @property + def monitors(self): + if self._values['monitors'] is None: + return [] + try: + result = re.findall(r'/\w+/[^\s}]+', self._values['monitors']) + return result + except Exception: + return [self._values['monitors']] + + @property + def monitor_type(self): + if self._values['monitors'] is None: + return None + pattern = r'min\s+\d+\s+of' + matches = re.search(pattern, self._values['monitors']) + if matches: + return 'm_of_n' + else: + return 'and_list' + + @property + def limit_mem_available_status(self): + return flatten_boolean(self._values['limit_mem_available_status']) + + @property + def limit_max_pps_status(self): + return flatten_boolean(self._values['limit_max_pps_status']) + + @property + def limit_max_connections_status(self): + return flatten_boolean(self._values['limit_max_connections_status']) + + @property + def limit_max_bps_status(self): + return flatten_boolean(self._values['limit_max_bps_status']) + + @property + def limit_cpu_usage_status(self): + return flatten_boolean(self._values['limit_cpu_usage_status']) + + @property + def iq_allow_service_check(self): + return flatten_boolean(self._values['iq_allow_service_check']) + + @property + def iq_allow_snmp(self): + return flatten_boolean(self._values['iq_allow_snmp']) + + @property + def expose_route_domains(self): + return flatten_boolean(self._values['expose_route_domains']) + + @property + def iq_allow_path(self): + return flatten_boolean(self._values['iq_allow_path']) + + @property + def product(self): + if self._values['product'] is None: + return None + if self._values['product'] in ['single-bigip', 'redundant-bigip']: + return 'bigip' + return self._values['product'] + + @property + def devices(self): + result = [] + if self._values['devices'] is None or 'items' not in self._values['devices']: + return result + for item in self._values['devices']['items']: + self._remove_internal_keywords(item) + if 'fullPath' in item: + item['full_path'] = item.pop('fullPath') + result.append(item) + return result + + @property + def virtual_servers(self): + result = [] + if self._values['virtual_servers'] is None or 'items' not in self._values['virtual_servers']: + return result + for item in self._values['virtual_servers']['items']: + self._remove_internal_keywords(item) + if 'disabled' in item: + if item['disabled'] in BOOLEANS_TRUE: + item['disabled'] = flatten_boolean(item['disabled']) + item['enabled'] = flatten_boolean(not item['disabled']) + if 'enabled' in item: + if item['enabled'] in BOOLEANS_TRUE: + item['enabled'] = flatten_boolean(item['enabled']) + item['disabled'] = flatten_boolean(not item['enabled']) + if 'fullPath' in item: + item['full_path'] = item.pop('fullPath') + if 'limitMaxBps' in item: + item['limit_max_bps'] = int(item.pop('limitMaxBps')) + if 'limitMaxBpsStatus' in item: + item['limit_max_bps_status'] = item.pop('limitMaxBpsStatus') + if 'limitMaxConnections' in item: + item['limit_max_connections'] = int(item.pop('limitMaxConnections')) + if 'limitMaxConnectionsStatus' in item: + item['limit_max_connections_status'] = item.pop('limitMaxConnectionsStatus') + if 'limitMaxPps' in item: + item['limit_max_pps'] = int(item.pop('limitMaxPps')) + if 'limitMaxPpsStatus' in item: + item['limit_max_pps_status'] = item.pop('limitMaxPpsStatus') + if 'translationAddress' in item: + item['translation_address'] = item.pop('translationAddress') + if 'translationPort' in item: + item['translation_port'] = int(item.pop('translationPort')) + result.append(item) + return result + + @property + def limit_cpu_usage(self): + if self._values['limit_cpu_usage'] is None: + return None + return int(self._values['limit_cpu_usage']) + + @property + def limit_max_bps(self): + if self._values['limit_max_bps'] is None: + return None + return int(self._values['limit_max_bps']) + + @property + def limit_max_connections(self): + if self._values['limit_max_connections'] is None: + return None + return int(self._values['limit_max_connections']) + + @property + def limit_max_pps(self): + if self._values['limit_max_pps'] is None: + return None + return int(self._values['limit_max_pps']) + + @property + def limit_mem_available(self): + if self._values['limit_mem_available'] is None: + return None + return int(self._values['limit_mem_available']) + + +class GtmServersFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmServersFactManager, self).__init__(**kwargs) + self.want = GtmServersParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_servers=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmServersParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/server".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmXWideIpsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'failureRcode': 'failure_rcode', + 'failureRcodeResponse': 'failure_rcode_response', + 'failureRcodeTtl': 'failure_rcode_ttl', + 'lastResortPool': 'last_resort_pool', + 'minimalResponse': 'minimal_response', + 'persistCidrIpv4': 'persist_cidr_ipv4', + 'persistCidrIpv6': 'persist_cidr_ipv6', + 'poolLbMode': 'pool_lb_mode', + 'ttlPersistence': 'ttl_persistence' + } + + returnables = [ + 'full_path', + 'description', + 'enabled', + 'disabled', + 'failure_rcode', + 'failure_rcode_response', + 'failure_rcode_ttl', + 'last_resort_pool', + 'minimal_response', + 'name', + 'persist_cidr_ipv4', + 'persist_cidr_ipv6', + 'pool_lb_mode', + 'ttl_persistence', + 'pools', + ] + + @property + def pools(self): + result = [] + if self._values['pools'] is None: + return [] + for pool in self._values['pools']: + del pool['nameReference'] + for x in ['order', 'ratio']: + if x in pool: + pool[x] = int(pool[x]) + result.append(pool) + return result + + @property + def failure_rcode_response(self): + return flatten_boolean(self._values['failure_rcode_response']) + + @property + def failure_rcode_ttl(self): + if self._values['failure_rcode_ttl'] is None: + return None + return int(self._values['failure_rcode_ttl']) + + @property + def persist_cidr_ipv4(self): + if self._values['persist_cidr_ipv4'] is None: + return None + return int(self._values['persist_cidr_ipv4']) + + @property + def persist_cidr_ipv6(self): + if self._values['persist_cidr_ipv6'] is None: + return None + return int(self._values['persist_cidr_ipv6']) + + @property + def ttl_persistence(self): + if self._values['ttl_persistence'] is None: + return None + return int(self._values['ttl_persistence']) + + +class GtmAWideIpsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmAWideIpsFactManager, self).__init__(**kwargs) + self.want = GtmXWideIpsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_a_wide_ips=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXWideIpsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/a".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmAaaaWideIpsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmAaaaWideIpsFactManager, self).__init__(**kwargs) + self.want = GtmXWideIpsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_aaaa_wide_ips=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXWideIpsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/aaaa".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmCnameWideIpsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmCnameWideIpsFactManager, self).__init__(**kwargs) + self.want = GtmXWideIpsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_cname_wide_ips=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXWideIpsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/cname".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmMxWideIpsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmMxWideIpsFactManager, self).__init__(**kwargs) + self.want = GtmXWideIpsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_mx_wide_ips=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXWideIpsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/mx".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmNaptrWideIpsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmNaptrWideIpsFactManager, self).__init__(**kwargs) + self.want = GtmXWideIpsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_naptr_wide_ips=facts) + return result + + def _exec_module(self): + results = [] + if 'gtm' not in self.provisioned_modules: + return [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXWideIpsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/naptr".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class GtmSrvWideIpsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(GtmSrvWideIpsFactManager, self).__init__(**kwargs) + self.want = GtmXWideIpsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(gtm_srv_wide_ips=facts) + return result + + def _exec_module(self): + if 'gtm' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = GtmXWideIpsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/wideip/srv".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class HttpMonitorsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'adaptiveDivergenceType': 'adaptive_divergence_type', + 'adaptiveDivergenceValue': 'adaptive_divergence_value', + 'adaptiveLimit': 'adaptive_limit', + 'adaptiveSamplingTimespan': 'adaptive_sampling_timespan', + 'ipDscp': 'ip_dscp', + 'manualResume': 'manual_resume', + 'recv': 'receive_string', + 'recvDisable': 'receive_disable_string', + 'send': 'send_string', + 'timeUntilUp': 'time_until_up', + 'upInterval': 'up_interval', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'adaptive', + 'adaptive_divergence_type', + 'adaptive_divergence_value', + 'adaptive_limit', + 'adaptive_sampling_timespan', + 'destination', + 'interval', + 'ip_dscp', + 'manual_resume', + 'receive_string', + 'receive_disable_string', + 'reverse', + 'send_string', + 'time_until_up', + 'timeout', + 'transparent', + 'up_interval', + 'username', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def transparent(self): + return flatten_boolean(self._values['transparent']) + + @property + def reverse(self): + return flatten_boolean(self._values['reverse']) + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + @property + def adaptive(self): + return flatten_boolean(self._values['adaptive']) + + +class HttpMonitorsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(HttpMonitorsFactManager, self).__init__(**kwargs) + self.want = HttpMonitorsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(http_monitors=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = HttpMonitorsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/http".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = response['items'] + return result + + +class HttpsMonitorsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'adaptiveDivergenceType': 'adaptive_divergence_type', + 'adaptiveDivergenceValue': 'adaptive_divergence_value', + 'adaptiveLimit': 'adaptive_limit', + 'adaptiveSamplingTimespan': 'adaptive_sampling_timespan', + 'ipDscp': 'ip_dscp', + 'manualResume': 'manual_resume', + 'recv': 'receive_string', + 'recvDisable': 'receive_disable_string', + 'send': 'send_string', + 'sslProfile': 'ssl_profile', + 'timeUntilUp': 'time_until_up', + 'upInterval': 'up_interval', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'adaptive', + 'adaptive_divergence_type', + 'adaptive_divergence_value', + 'adaptive_limit', + 'adaptive_sampling_timespan', + 'destination', + 'interval', + 'ip_dscp', + 'manual_resume', + 'receive_string', + 'receive_disable_string', + 'reverse', + 'send_string', + 'ssl_profile', + 'time_until_up', + 'timeout', + 'transparent', + 'up_interval', + 'username', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def transparent(self): + return flatten_boolean(self._values['transparent']) + + @property + def reverse(self): + return flatten_boolean(self._values['reverse']) + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + @property + def adaptive(self): + return flatten_boolean(self._values['adaptive']) + + +class HttpsMonitorsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(HttpsMonitorsFactManager, self).__init__(**kwargs) + self.want = HttpsMonitorsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(https_monitors=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = HttpsMonitorsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/https".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = response['items'] + return result + + +class HttpProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'acceptXff': 'accept_xff', + 'explicitProxy': 'explicit_proxy', + 'insertXforwardedFor': 'insert_x_forwarded_for', + 'lwsWidth': 'lws_max_columns', + 'oneconnectTransformations': 'onconnect_transformations', + 'proxyType': 'proxy_mode', + 'redirectRewrite': 'redirect_rewrite', + 'requestChunking': 'request_chunking', + 'responseChunking': 'response_chunking', + 'serverAgentName': 'server_agent_name', + 'viaRequest': 'via_request', + 'viaResponse': 'via_response', + 'pipeline': 'pipeline_action', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'accept_xff', + 'allow_truncated_redirects', + 'excess_client_headers', + 'excess_server_headers', + 'known_methods', + 'max_header_count', + 'max_header_size', + 'max_requests', + 'oversize_client_headers', + 'oversize_server_headers', + 'pipeline_action', + 'unknown_method', + 'default_connect_handling', + 'hsts_include_subdomains', + 'hsts_enabled', + 'insert_x_forwarded_for', + 'lws_max_columns', + 'onconnect_transformations', + 'proxy_mode', + 'redirect_rewrite', + 'request_chunking', + 'response_chunking', + 'server_agent_name', + 'sflow_poll_interval', + 'sflow_sampling_rate', + 'via_request', + 'via_response', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def accept_xff(self): + return flatten_boolean(self._values['accept_xff']) + + @property + def excess_client_headers(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['excessClientHeaders'] is None: + return None + return self._values['enforcement']['excessClientHeaders'] + + @property + def excess_server_headers(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['excessServerHeaders'] is None: + return None + return self._values['enforcement']['excessServerHeaders'] + + @property + def known_methods(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['knownMethods'] is None: + return None + return self._values['enforcement']['knownMethods'] + + @property + def max_header_count(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['maxHeaderCount'] is None: + return None + return self._values['enforcement']['maxHeaderCount'] + + @property + def max_header_size(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['maxHeaderSize'] is None: + return None + return self._values['enforcement']['maxHeaderSize'] + + @property + def max_requests(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['maxRequests'] is None: + return None + return self._values['enforcement']['maxRequests'] + + @property + def oversize_client_headers(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['oversizeClientHeaders'] is None: + return None + return self._values['enforcement']['oversizeClientHeaders'] + + @property + def oversize_server_headers(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['oversizeServerHeaders'] is None: + return None + return self._values['enforcement']['oversizeServerHeaders'] + + @property + def allow_truncated_redirects(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['truncatedRedirects'] is None: + return None + return flatten_boolean(self._values['enforcement']['truncatedRedirects']) + + @property + def unknown_method(self): + if self._values['enforcement'] is None: + return None + if self._values['enforcement']['unknownMethod'] is None: + return None + return self._values['enforcement']['unknownMethod'] + + @property + def default_connect_handling(self): + if self._values['explicit_proxy'] is None: + return None + if self._values['explicit_proxy']['defaultConnectHandling'] is None: + return None + return self._values['explicit_proxy']['defaultConnectHandling'] + + @property + def hsts_include_subdomains(self): + if self._values['hsts'] is None: + return None + if self._values['hsts']['includeSubdomains'] is None: + return None + return flatten_boolean(self._values['hsts']['includeSubdomains']) + + @property + def hsts_enabled(self): + if self._values['hsts'] is None: + return None + if self._values['hsts']['mode'] is None: + return None + return flatten_boolean(self._values['hsts']['mode']) + + @property + def hsts_max_age(self): + if self._values['hsts'] is None: + return None + if self._values['hsts']['mode'] is None: + return None + return self._values['hsts']['maximumAge'] + + @property + def insert_x_forwarded_for(self): + if self._values['insert_x_forwarded_for'] is None: + return None + return flatten_boolean(self._values['insert_x_forwarded_for']) + + @property + def onconnect_transformations(self): + if self._values['onconnect_transformations'] is None: + return None + return flatten_boolean(self._values['onconnect_transformations']) + + @property + def sflow_poll_interval(self): + if self._values['sflow'] is None: + return None + if self._values['sflow']['pollInterval'] is None: + return None + return self._values['sflow']['pollInterval'] + + @property + def sflow_sampling_rate(self): + if self._values['sflow'] is None: + return None + if self._values['sflow']['samplingRate'] is None: + return None + return self._values['sflow']['samplingRate'] + + +class HttpProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(HttpProfilesFactManager, self).__init__(**kwargs) + self.want = HttpProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(http_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = HttpProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class IappServicesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'deviceGroup': 'device_group', + 'inheritedDevicegroup': 'inherited_device_group', + 'inheritedTrafficGroup': 'inherited_traffic_group', + 'strictUpdates': 'strict_updates', + 'templateModified': 'template_modified', + 'trafficGroup': 'traffic_group', + } + + returnables = [ + 'full_path', + 'name', + 'device_group', + 'inherited_device_group', + 'inherited_traffic_group', + 'strict_updates', + 'template_modified', + 'traffic_group', + 'tables', + 'variables', + 'metadata', + 'lists', + 'description', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def inherited_device_group(self): + return flatten_boolean(self._values['inherited_device_group']) + + @property + def inherited_traffic_group(self): + return flatten_boolean(self._values['inherited_traffic_group']) + + @property + def strict_updates(self): + return flatten_boolean(self._values['strict_updates']) + + @property + def template_modified(self): + return flatten_boolean(self._values['template_modified']) + + +class IappServicesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(IappServicesFactManager, self).__init__(**kwargs) + self.want = IappServicesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(iapp_services=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = IappServicesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/application/service".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class IapplxPackagesParameters(BaseParameters): + api_map = { + 'packageName': 'package_name', + } + + returnables = [ + 'name', + 'version', + 'release', + 'arch', + 'package_name', + 'tags', + ] + + +class IapplxPackagesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(IapplxPackagesFactManager, self).__init__(**kwargs) + self.want = IapplxPackagesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(iapplx_packages=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['name']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = IapplxPackagesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + params = dict(operation='QUERY') + uri = "https://{0}:{1}/mgmt/shared/iapp/package-management-tasks".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.post(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + status = self.wait_for_task(response['id']) + if status == 'FINISHED': + uri = "https://{0}:{1}/mgmt/shared/iapp/package-management-tasks/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + response['id'] + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + else: + raise F5ModuleError( + "An error occurred querying iAppLX packages." + ) + result = response['queryResponse'] + return result + + def wait_for_task(self, task_id): + uri = "https://{0}:{1}/mgmt/shared/iapp/package-management-tasks/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + task_id + ) + for x in range(0, 60): + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if response['status'] in ['FINISHED', 'FAILED']: + return response['status'] + time.sleep(1) + return response['status'] + + +class IcmpMonitorsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'adaptiveDivergenceType': 'adaptive_divergence_type', + 'adaptiveDivergenceValue': 'adaptive_divergence_value', + 'adaptiveLimit': 'adaptive_limit', + 'adaptiveSamplingTimespan': 'adaptive_sampling_timespan', + 'manualResume': 'manual_resume', + 'timeUntilUp': 'time_until_up', + 'upInterval': 'up_interval', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'adaptive', + 'adaptive_divergence_type', + 'adaptive_divergence_value', + 'adaptive_limit', + 'adaptive_sampling_timespan', + 'destination', + 'interval', + 'manual_resume', + 'time_until_up', + 'timeout', + 'transparent', + 'up_interval', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def transparent(self): + return flatten_boolean(self._values['transparent']) + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + @property + def adaptive(self): + return flatten_boolean(self._values['adaptive']) + + +class IcmpMonitorsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(IcmpMonitorsFactManager, self).__init__(**kwargs) + self.want = IcmpMonitorsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(icmp_monitors=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = IcmpMonitorsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/icmp".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class InterfacesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'mediaActive': 'active_media_type', + 'flowControl': 'flow_control', + 'bundleSpeed': 'bundle_speed', + 'ifIndex': 'if_index', + 'macAddress': 'mac_address', + 'mediaSfp': 'media_sfp', + 'lldpAdmin': 'lldp_admin', + 'preferPort': 'prefer_port', + 'stpAutoEdgePort': 'stp_auto_edge_port', + 'stp': 'stp_enabled', + 'stpLinkType': 'stp_link_type' + } + + returnables = [ + 'full_path', + 'name', + 'active_media_type', + 'flow_control', + 'description', + 'bundle', + 'bundle_speed', + 'enabled', + 'if_index', + 'mac_address', + 'media_sfp', + 'lldp_admin', + 'mtu', + 'prefer_port', + 'sflow_poll_interval', + 'sflow_poll_interval_global', + 'stp_auto_edge_port', + 'stp_enabled', + 'stp_link_type' + ] + + @property + def stp_auto_edge_port(self): + return flatten_boolean(self._values['stp_auto_edge_port']) + + @property + def stp_enabled(self): + return flatten_boolean(self._values['stp_enabled']) + + @property + def sflow_poll_interval_global(self): + if self._values['sflow'] is None: + return None + if 'pollIntervalGlobal' in self._values['sflow']: + return self._values['sflow']['pollIntervalGlobal'] + + @property + def sflow_poll_interval(self): + if self._values['sflow'] is None: + return None + if 'pollInterval' in self._values['sflow']: + return self._values['sflow']['pollInterval'] + + @property + def mac_address(self): + if self._values['mac_address'] in [None, 'none']: + return None + return self._values['mac_address'] + + @property + def enabled(self): + return flatten_boolean(self._values['enabled']) + + +class InterfacesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(InterfacesFactManager, self).__init__(**kwargs) + self.want = InterfacesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(interfaces=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = InterfacesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/net/interface".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class InternalDataGroupsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path' + } + + returnables = [ + 'full_path', + 'name', + 'type', + 'records' + ] + + +class InternalDataGroupsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(InternalDataGroupsFactManager, self).__init__(**kwargs) + self.want = InternalDataGroupsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(internal_data_groups=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = InternalDataGroupsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/internal".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class IrulesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'ignoreVerification': 'ignore_verification', + } + + returnables = [ + 'full_path', + 'name', + 'ignore_verification', + 'checksum', + 'definition', + 'signature' + ] + + @property + def checksum(self): + if self._values['apiAnonymous'] is None: + return None + pattern = r'definition-checksum\s(?P\w+)' + matches = re.search(pattern, self._values['apiAnonymous']) + if matches: + return matches.group('checksum') + + @property + def definition(self): + if self._values['apiAnonymous'] is None: + return None + pattern = r'(definition-(checksum|signature)\s[\w=\/+]+)' + result = re.sub(pattern, '', self._values['apiAnonymous']).strip() + if result: + return result + + @property + def signature(self): + if self._values['apiAnonymous'] is None: + return None + pattern = r'definition-signature\s(?P[\w=\/+]+)' + matches = re.search(pattern, self._values['apiAnonymous']) + if matches: + return matches.group('signature') + + @property + def ignore_verification(self): + if self._values['ignore_verification'] is None: + return 'no' + return 'yes' + + +class IrulesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(IrulesFactManager, self).__init__(**kwargs) + self.want = IrulesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(irules=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = IrulesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/rule".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class LtmPoolsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'allowNat': 'allow_nat', + 'allowSnat': 'allow_snat', + 'ignorePersistedWeight': 'ignore_persisted_weight', + 'ipTosToClient': 'client_ip_tos', + 'ipTosToServer': 'server_ip_tos', + 'linkQosToClient': 'client_link_qos', + 'linkQosToServer': 'server_link_qos', + 'loadBalancingMode': 'lb_method', + 'minActiveMembers': 'minimum_active_members', + 'minUpMembers': 'minimum_up_members', + 'minUpMembersAction': 'minimum_up_members_action', + 'minUpMembersChecking': 'minimum_up_members_checking', + 'queueDepthLimit': 'queue_depth_limit', + 'queueOnConnectionLimit': 'queue_on_connection_limit', + 'queueTimeLimit': 'queue_time_limit', + 'reselectTries': 'reselect_tries', + 'serviceDownAction': 'service_down_action', + 'slowRampTime': 'slow_ramp_time', + 'monitor': 'monitors', + } + + returnables = [ + 'full_path', + 'name', + 'allow_nat', + 'allow_snat', + 'description', + 'ignore_persisted_weight', + 'client_ip_tos', + 'server_ip_tos', + 'client_link_qos', + 'server_link_qos', + 'lb_method', + 'minimum_active_members', + 'minimum_up_members', + 'minimum_up_members_action', + 'minimum_up_members_checking', + 'monitors', + 'queue_depth_limit', + 'queue_on_connection_limit', + 'queue_time_limit', + 'reselect_tries', + 'service_down_action', + 'slow_ramp_time', + 'priority_group_activation', + 'members', + 'metadata', + 'active_member_count', + 'available_member_count', + 'availability_status', + 'enabled_status', + 'status_reason', + 'all_max_queue_entry_age_ever', + 'all_avg_queue_entry_age', + 'all_queue_head_entry_age', + 'all_max_queue_entry_age_recently', + 'all_num_connections_queued_now', + 'all_num_connections_serviced', + 'pool_max_queue_entry_age_ever', + 'pool_avg_queue_entry_age', + 'pool_queue_head_entry_age', + 'pool_max_queue_entry_age_recently', + 'pool_num_connections_queued_now', + 'pool_num_connections_serviced', + 'current_sessions', + 'member_count', + 'total_requests', + 'server_side_bits_in', + 'server_side_bits_out', + 'server_side_current_connections', + 'server_side_max_connections', + 'server_side_pkts_in', + 'server_side_pkts_out', + 'server_side_total_connections', + ] + + @property + def active_member_count(self): + if 'availableMemberCnt' in self._values['stats']: + return int(self._values['stats']['activeMemberCnt']) + return None + + @property + def available_member_count(self): + if 'availableMemberCnt' in self._values['stats']: + return int(self._values['stats']['availableMemberCnt']) + return None + + @property + def all_max_queue_entry_age_ever(self): + return self._values['stats']['connqAll']['ageEdm'] + + @property + def all_avg_queue_entry_age(self): + return self._values['stats']['connqAll']['ageEma'] + + @property + def all_queue_head_entry_age(self): + return self._values['stats']['connqAll']['ageHead'] + + @property + def all_max_queue_entry_age_recently(self): + return self._values['stats']['connqAll']['ageMax'] + + @property + def all_num_connections_queued_now(self): + return self._values['stats']['connqAll']['depth'] + + @property + def all_num_connections_serviced(self): + return self._values['stats']['connqAll']['serviced'] + + @property + def availability_status(self): + return self._values['stats']['status']['availabilityState'] + + @property + def enabled_status(self): + return self._values['stats']['status']['enabledState'] + + @property + def status_reason(self): + return self._values['stats']['status']['statusReason'] + + @property + def pool_max_queue_entry_age_ever(self): + return self._values['stats']['connq']['ageEdm'] + + @property + def pool_avg_queue_entry_age(self): + return self._values['stats']['connq']['ageEma'] + + @property + def pool_queue_head_entry_age(self): + return self._values['stats']['connq']['ageHead'] + + @property + def pool_max_queue_entry_age_recently(self): + return self._values['stats']['connq']['ageMax'] + + @property + def pool_num_connections_queued_now(self): + return self._values['stats']['connq']['depth'] + + @property + def pool_num_connections_serviced(self): + return self._values['stats']['connq']['serviced'] + + @property + def current_sessions(self): + return self._values['stats']['curSessions'] + + @property + def member_count(self): + if 'memberCnt' in self._values['stats']: + return self._values['stats']['memberCnt'] + return None + + @property + def total_requests(self): + return self._values['stats']['totRequests'] + + @property + def server_side_bits_in(self): + return self._values['stats']['serverside']['bitsIn'] + + @property + def server_side_bits_out(self): + return self._values['stats']['serverside']['bitsOut'] + + @property + def server_side_current_connections(self): + return self._values['stats']['serverside']['curConns'] + + @property + def server_side_max_connections(self): + return self._values['stats']['serverside']['maxConns'] + + @property + def server_side_pkts_in(self): + return self._values['stats']['serverside']['pktsIn'] + + @property + def server_side_pkts_out(self): + return self._values['stats']['serverside']['pktsOut'] + + @property + def server_side_total_connections(self): + return self._values['stats']['serverside']['totConns'] + + @property + def ignore_persisted_weight(self): + return flatten_boolean(self._values['ignore_persisted_weight']) + + @property + def minimum_up_members_checking(self): + return flatten_boolean(self._values['minimum_up_members_checking']) + + @property + def queue_on_connection_limit(self): + return flatten_boolean(self._values['queue_on_connection_limit']) + + @property + def priority_group_activation(self): + """Returns the TMUI value for "Priority Group Activation" + + This value is identified as ``minActiveMembers`` in the REST API, so this + is just a convenience key for users of Ansible (where the ``bigip_virtual_server`` + parameter is called ``priority_group_activation``. + + Returns: + int: Priority number assigned to the pool members. + """ + return self._values['minimum_active_members'] + + @property + def metadata(self): + """Returns metadata associated with a pool + + An arbitrary amount of metadata may be associated with a pool. You typically + see this used in situations where the user wants to annotate a resource, maybe + in cases where an automation system is responsible for creating the resource. + + The metadata in the API is always stored as a list of dictionaries. We change + this to be a simple dictionary before it is returned to the user. + + Returns: + dict: A dictionary of key/value pairs where the key is the metadata name + and the value is the metadata value. + """ + if self._values['metadata'] is None: + return None + result = dict([(k['name'], k['value']) for k in self._values['metadata']]) + return result + + @property + def members(self): + if not self._values['members']: + return None + result = [] + for member in self._values['members']: + member['connection_limit'] = member.pop('connectionLimit', None) + member['dynamic_ratio'] = member.pop('dynamicRatio', None) + member['full_path'] = member.pop('fullPath', None) + member['inherit_profile'] = member.pop('inheritProfile', None) + member['priority_group'] = member.pop('priorityGroup', None) + member['rate_limit'] = member.pop('rateLimit', None) + + if 'fqdn' in member and 'autopopulate' in member['fqdn']: + if member['fqdn']['autopopulate'] == 'enabled': + member['fqdn_autopopulate'] = 'yes' + elif member['fqdn']['autopopulate'] == 'disabled': + member['fqdn_autopopulate'] = 'no' + del member['fqdn'] + + for key in ['ephemeral', 'inherit_profile', 'logging', 'rate_limit']: + tmp = flatten_boolean(member[key]) + member[key] = tmp + + if 'profiles' in member: + # Even though the ``profiles`` is a list, there is only ever 1 + member['encapsulation_profile'] = [x['name'] for x in member['profiles']][0] + del member['profiles'] + + if 'monitor' in member: + monitors = member.pop('monitor') + if monitors is not None: + try: + member['monitors'] = re.findall(r'/[\w-]+/[^\s}]+', monitors) + except Exception: + member['monitors'] = [monitors.strip()] + + session = member.pop('session') + state = member.pop('state') + + member['real_session'] = session + member['real_state'] = state + + if state in ['user-up', 'unchecked', 'fqdn-up-no-addr', 'fqdn-up'] and session in ['user-enabled']: + member['state'] = 'present' + elif state in ['user-down'] and session in ['user-disabled']: + member['state'] = 'forced_offline' + elif state in ['up', 'checking'] and session in ['monitor-enabled']: + member['state'] = 'present' + elif state in ['down'] and session in ['monitor-enabled']: + member['state'] = 'offline' + else: + member['state'] = 'disabled' + self._remove_internal_keywords(member) + member = dict([(k, v) for k, v in iteritems(member) if v is not None]) + result.append(member) + return result + + @property + def monitors(self): + if self._values['monitors'] is None: + return None + try: + result = re.findall(r'/[\w-]+/[^\s}]+', self._values['monitors']) + return result + except Exception: + return [self._values['monitors'].strip()] + + +class LtmPoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(LtmPoolsFactManager, self).__init__(**kwargs) + self.want = LtmPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(ltm_pools=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource + members = self.read_member_from_device(attrs['fullPath']) + attrs['members'] = members + attrs['stats'] = self.read_stats_from_device(attrs['fullPath']) + params = LtmPoolsParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + """Read the LTM pools collection from the device + + Note that sub-collection expansion does not work with LTM pools. Therefore, + one needs to query the ``members`` endpoint separately and add that to the + list of ``attrs`` before the full set of attributes is sent to the ``Parameters`` + class. + + Returns: + list: List of ``Pool`` objects + """ + uri = "https://{0}:{1}/mgmt/tm/ltm/pool".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + def read_member_from_device(self, full_path): + uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/members".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=full_path) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + def read_stats_from_device(self, full_path): + uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/stats".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=full_path) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + try: + return result['stats'] + except KeyError: + return {} + + +class LtmPolicyParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'rulesReference': 'rules', + } + + returnables = [ + 'full_path', + 'name', + 'status', + 'description', + 'strategy', + 'rules', + 'requires', + 'controls', + ] + + def _handle_conditions(self, conditions): + result = [] + if conditions is None or 'items' not in conditions: + return result + for condition in conditions['items']: + tmp = dict() + tmp['case_insensitive'] = flatten_boolean(condition.pop('caseInsensitive', None)) + tmp['case_sensitive'] = flatten_boolean(condition.pop('caseSensitive', None)) + tmp['contains_string'] = flatten_boolean(condition.pop('contains', None)) + tmp['external'] = flatten_boolean(condition.pop('external', None)) + tmp['http_basic_auth'] = flatten_boolean(condition.pop('httpBasicAuth', None)) + tmp['http_host'] = flatten_boolean(condition.pop('httpHost', None)) + tmp['http_uri'] = flatten_boolean(condition.pop('httpUri', None)) + tmp['request'] = flatten_boolean(condition.pop('request', None)) + tmp['username'] = flatten_boolean(condition.pop('username', None)) + tmp['external'] = flatten_boolean(condition.pop('external', None)) + tmp['values'] = condition.pop('values', None) + tmp['all'] = flatten_boolean(condition.pop('all', None)) + result.append(self._filter_params(tmp)) + return result + + def _handle_actions(self, actions): + result = [] + if actions is None or 'items' not in actions: + return result + for action in actions['items']: + tmp = dict() + tmp['httpReply'] = flatten_boolean(action.pop('http_reply', None)) + tmp['redirect'] = flatten_boolean(action.pop('redirect', None)) + tmp['request'] = flatten_boolean(action.pop('request', None)) + tmp['location'] = action.pop('location', None) + result.append(self._filter_params(tmp)) + return result + + @property + def rules(self): + result = [] + if self._values['rules'] is None or 'items' not in self._values['rules']: + return result + for item in self._values['rules']['items']: + self._remove_internal_keywords(item) + item['conditions'] = self._handle_conditions(item.pop('conditionsReference', None)) + item['actions'] = self._handle_actions(item.pop('actionsReference', None)) + result.append(item) + return result + + +class LtmPolicyFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(LtmPolicyFactManager, self).__init__(**kwargs) + self.want = LtmPolicyParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(ltm_policies=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = LtmPolicyParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/policy/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + query = "?expandSubcollections=true" + resp = self.client.api.get(uri + query) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class NodesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'connectionLimit': 'connection_limit', + 'dynamicRatio': 'dynamic_ratio', + 'rateLimit': 'rate_limit', + 'monitor': 'monitors' + } + + returnables = [ + 'full_path', + 'name', + 'ratio', + 'description', + 'connection_limit', + 'address', + 'dynamic_ratio', + 'rate_limit', + 'monitor_status', + 'session_status', + 'availability_status', + 'enabled_status', + 'status_reason', + 'monitor_rule', + 'monitors', + 'monitor_type' + ] + + @property + def monitors(self): + if self._values['monitors'] is None: + return [] + try: + result = re.findall(r'/\w+/[^\s}]+', self._values['monitors']) + return result + except Exception: + return [self._values['monitors']] + + @property + def monitor_type(self): + if self._values['monitors'] is None: + return None + pattern = r'min\s+\d+\s+of' + matches = re.search(pattern, self._values['monitors']) + if matches: + return 'm_of_n' + else: + return 'and_list' + + @property + def rate_limit(self): + if self._values['rate_limit'] is None: + return None + elif self._values['rate_limit'] == 'disabled': + return 0 + else: + return int(self._values['rate_limit']) + + @property + def monitor_status(self): + return self._values['stats']['monitorStatus'] + + @property + def session_status(self): + return self._values['stats']['sessionStatus'] + + @property + def availability_status(self): + return self._values['stats']['status']['availabilityState'] + + @property + def enabled_status(self): + return self._values['stats']['status']['enabledState'] + + @property + def status_reason(self): + return self._values['stats']['status']['statusReason'] + + @property + def monitor_rule(self): + return self._values['stats']['monitorRule'] + + +class NodesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(NodesFactManager, self).__init__(**kwargs) + self.want = NodesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(nodes=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource + attrs['stats'] = self.read_stats_from_device(attrs['fullPath']) + params = NodesParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/node".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + def read_stats_from_device(self, full_path): + uri = "https://{0}:{1}/mgmt/tm/ltm/node/{2}/stats".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=full_path) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + try: + return result['stats'] + except KeyError: + return {} + + +class OneConnectProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'clientTimeout': 'client_timeout', + 'defaultsFrom': 'parent', + 'idleTimeoutOverride': 'idle_timeout_override', + 'limitType': 'limit_type', + 'maxAge': 'max_age', + 'maxReuse': 'max_reuse', + 'maxSize': 'max_size', + 'sharePools': 'share_pools', + 'sourceMask': 'source_mask', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'idle_timeout_override', + 'limit_type', + 'max_age', + 'max_reuse', + 'max_size', + 'share_pools', + 'source_mask', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def idle_timeout_override(self): + if self._values['idle_timeout_override'] is None: + return None + elif self._values['idle_timeout_override'] == 'disabled': + return 0 + elif self._values['idle_timeout_override'] == 'indefinite': + return 4294967295 + return int(self._values['idle_timeout_override']) + + @property + def share_pools(self): + return flatten_boolean(self._values['share_pools']) + + +class OneConnectProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(OneConnectProfilesFactManager, self).__init__(**kwargs) + self.want = OneConnectProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(oneconnect_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = OneConnectProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/one-connect".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class PartitionParameters(BaseParameters): + api_map = { + 'defaultRouteDomain': 'default_route_domain', + 'fullPath': 'full_path', + } + + returnables = [ + 'name', + 'full_path', + 'description', + 'default_route_domain' + ] + + +class PartitionFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(PartitionFactManager, self).__init__(**kwargs) + self.want = PartitionParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(partitions=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = PartitionParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/auth/partition".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class ProvisionInfoParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'cpuRatio': 'cpu_ratio', + 'diskRatio': 'disk_ratio', + 'memoryRatio': 'memory_ratio', + } + + returnables = [ + 'full_path', + 'name', + 'cpu_ratio', + 'disk_ratio', + 'memory_ratio', + 'level' + ] + + +class ProvisionInfoFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(ProvisionInfoFactManager, self).__init__(**kwargs) + self.want = ProvisionInfoParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(provision_info=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = ProvisionInfoParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/provision".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class RouteDomainParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'bwcPolicy': 'bandwidth_controller_policy', + 'connectionLimit': 'connection_limit', + 'flowEvictionPolicy': 'flow_eviction_policy', + 'servicePolicy': 'service_policy', + 'routingProtocol': 'routing_protocol' + } + + returnables = [ + 'name', + 'id', + 'full_path', + 'parent', + 'bandwidth_controller_policy', + 'connection_limit', + 'description', + 'flow_eviction_policy', + 'service_policy', + 'strict', + 'routing_protocol', + 'vlans' + ] + + @property + def strict(self): + return flatten_boolean(self._values['strict']) + + @property + def connection_limit(self): + if self._values['connection_limit'] is None: + return None + return int(self._values['connection_limit']) + + +class RouteDomainFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(RouteDomainFactManager, self).__init__(**kwargs) + self.want = RouteDomainParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(route_domains=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = RouteDomainParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/net/route-domain".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SelfIpsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'trafficGroup': 'traffic_group', + 'servicePolicy': 'service_policy', + 'allowService': 'allow_access_list', + 'inheritedTrafficGroup': 'traffic_group_inherited' + } + + returnables = [ + 'full_path', + 'name', + 'address', + 'description', + 'netmask', + 'netmask_cidr', + 'floating', + 'traffic_group', + 'service_policy', + 'vlan', + 'allow_access_list', + 'traffic_group_inherited' + ] + + @property + def address(self): + parts = self._values['address'].split('/') + return parts[0] + + @property + def netmask(self): + parts = self._values['address'].split('/') + return to_netmask(parts[1]) + + @property + def netmask_cidr(self): + parts = self._values['address'].split('/') + return int(parts[1]) + + @property + def traffic_group_inherited(self): + if self._values['traffic_group_inherited'] is None: + return None + elif self._values['traffic_group_inherited'] in [False, 'false']: + # BIG-IP appears to store this as a string. This is a bug, so we handle both + # cases here. + return 'no' + else: + return 'yes' + + @property + def floating(self): + if self._values['floating'] is None: + return None + elif self._values['floating'] == 'disabled': + return 'no' + else: + return 'yes' + + +class SelfIpsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SelfIpsFactManager, self).__init__(**kwargs) + self.want = SelfIpsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(self_ips=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = SelfIpsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/net/self".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class ServerSslProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'alertTimeout': 'alert_timeout', + 'allowExpiredCrl': 'allow_expired_crl', + 'authenticate': 'authentication_frequency', + 'authenticateDepth': 'authenticate_depth', + 'authenticateName': 'authenticate_name', + 'bypassOnClientCertFail': 'bypass_on_client_cert_fail', + 'bypassOnHandshakeAlert': 'bypass_on_handshake_alert', + 'c3dCaCert': 'c3d_ca_cert', + 'c3dCaKey': 'c3d_ca_key', + 'c3dCertExtensionIncludes': 'c3d_cert_extension_includes', + 'c3dCertLifespan': 'c3d_cert_lifespan', + 'caFile': 'ca_file', + 'cacheSize': 'cache_size', + 'cacheTimeout': 'cache_timeout', + 'cipherGroup': 'cipher_group', + 'crlFile': 'crl_file', + 'expireCertResponseControl': 'expire_cert_response_control', + 'genericAlert': 'generic_alert', + 'handshakeTimeout': 'handshake_timeout', + 'maxActiveHandshakes': 'max_active_handshakes', + 'modSslMethods': 'mod_ssl_methods', + 'tmOptions': 'options', + 'peerCertMode': 'peer_cert_mode', + 'proxySsl': 'proxy_ssl', + 'proxySslPassthrough': 'proxy_ssl_passthrough', + 'renegotiatePeriod': 'renegotiate_period', + 'renegotiateSize': 'renegotiate_size', + 'retainCertificate': 'retain_certificate', + 'secureRenegotiation': 'secure_renegotiation', + 'serverName': 'server_name', + 'sessionMirroring': 'session_mirroring', + 'sessionTicket': 'session_ticket', + 'sniDefault': 'sni_default', + 'sniRequire': 'sni_require', + 'sslC3d': 'ssl_c3d', + 'sslForwardProxy': 'ssl_forward_proxy_enabled', + 'sslForwardProxyBypass': 'ssl_forward_proxy_bypass', + 'sslSignHash': 'ssl_sign_hash', + 'strictResume': 'strict_resume', + 'uncleanShutdown': 'unclean_shutdown', + 'untrustedCertResponseControl': 'untrusted_cert_response_control' + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'unclean_shutdown', + 'strict_resume', + 'ssl_forward_proxy_enabled', + 'ssl_forward_proxy_bypass', + 'sni_default', + 'sni_require', + 'ssl_c3d', + 'session_mirroring', + 'session_ticket', + 'mod_ssl_methods', + 'allow_expired_crl', + 'retain_certificate', + 'mode', + 'bypass_on_client_cert_fail', + 'bypass_on_handshake_alert', + 'generic_alert', + 'renegotiation', + 'proxy_ssl', + 'proxy_ssl_passthrough', + 'peer_cert_mode', + 'untrusted_cert_response_control', + 'ssl_sign_hash', + 'server_name', + 'secure_renegotiation', + 'renegotiate_size', + 'renegotiate_period', + 'options', + 'ocsp', + 'max_active_handshakes', + 'key', + 'handshake_timeout', + 'expire_cert_response_control', + 'cert', + 'chain', + 'authentication_frequency', + 'ciphers', + 'cipher_group', + 'crl_file', + 'cache_timeout', + 'cache_size', + 'ca_file', + 'c3d_cert_lifespan', + 'alert_timeout', + 'c3d_ca_key', + 'authenticate_depth', + 'authenticate_name', + 'c3d_ca_cert', + 'c3d_cert_extension_includes', + ] + + @property + def c3d_cert_extension_includes(self): + if self._values['c3d_cert_extension_includes'] is None: + return None + if len(self._values['c3d_cert_extension_includes']) == 0: + return None + self._values['c3d_cert_extension_includes'].sort() + return self._values['c3d_cert_extension_includes'] + + @property + def options(self): + if self._values['options'] is None: + return None + if len(self._values['options']) == 0: + return None + self._values['options'].sort() + return self._values['options'] + + @property + def c3d_ca_cert(self): + if self._values['c3d_ca_cert'] in [None, 'none']: + return None + return self._values['c3d_ca_cert'] + + @property + def ocsp(self): + if self._values['ocsp'] in [None, 'none']: + return None + return self._values['ocsp'] + + @property + def server_name(self): + if self._values['server_name'] in [None, 'none']: + return None + return self._values['server_name'] + + @property + def cipher_group(self): + if self._values['cipher_group'] in [None, 'none']: + return None + return self._values['cipher_group'] + + @property + def authenticate_name(self): + if self._values['authenticate_name'] in [None, 'none']: + return None + return self._values['authenticate_name'] + + @property + def c3d_ca_key(self): + if self._values['c3d_ca_key'] in [None, 'none']: + return None + return self._values['c3d_ca_key'] + + @property + def ca_file(self): + if self._values['ca_file'] in [None, 'none']: + return None + return self._values['ca_file'] + + @property + def crl_file(self): + if self._values['crl_file'] in [None, 'none']: + return None + return self._values['crl_file'] + + @property + def authentication_frequency(self): + if self._values['authentication_frequency'] in [None, 'none']: + return None + return self._values['authentication_frequency'] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def proxy_ssl_passthrough(self): + return flatten_boolean(self._values['proxy_ssl_passthrough']) + + @property + def proxy_ssl(self): + return flatten_boolean(self._values['proxy_ssl']) + + @property + def generic_alert(self): + return flatten_boolean(self._values['generic_alert']) + + @property + def renegotiation(self): + return flatten_boolean(self._values['renegotiation']) + + @property + def bypass_on_handshake_alert(self): + return flatten_boolean(self._values['bypass_on_handshake_alert']) + + @property + def bypass_on_client_cert_fail(self): + return flatten_boolean(self._values['bypass_on_client_cert_fail']) + + @property + def mode(self): + return flatten_boolean(self._values['mode']) + + @property + def retain_certificate(self): + return flatten_boolean(self._values['retain_certificate']) + + @property + def allow_expired_crl(self): + return flatten_boolean(self._values['allow_expired_crl']) + + @property + def mod_ssl_methods(self): + return flatten_boolean(self._values['mod_ssl_methods']) + + @property + def session_ticket(self): + return flatten_boolean(self._values['session_ticket']) + + @property + def session_mirroring(self): + return flatten_boolean(self._values['session_mirroring']) + + @property + def unclean_shutdown(self): + return flatten_boolean(self._values['unclean_shutdown']) + + @property + def strict_resume(self): + return flatten_boolean(self._values['strict_resume']) + + @property + def ssl_forward_proxy_enabled(self): + return flatten_boolean(self._values['ssl_forward_proxy_enabled']) + + @property + def ssl_forward_proxy_bypass(self): + return flatten_boolean(self._values['ssl_forward_proxy_bypass']) + + @property + def sni_default(self): + return flatten_boolean(self._values['sni_default']) + + @property + def sni_require(self): + return flatten_boolean(self._values['sni_require']) + + @property + def ssl_c3d(self): + return flatten_boolean(self._values['ssl_c3d']) + + +class ServerSslProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(ServerSslProfilesFactManager, self).__init__(**kwargs) + self.want = ServerSslProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(server_ssl_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = ServerSslProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/server-ssl".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SoftwareVolumesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'basebuild': 'base_build', + } + + returnables = [ + 'full_path', + 'name', + 'active', + 'base_build', + 'build', + 'product', + 'status', + 'version', + 'install_volume', + 'default_boot_location' + ] + + @property + def install_volume(self): + if self._values['media'] is None: + return None + return self._values['media'].get('name', None) + + @property + def default_boot_location(self): + if self._values['media'] is None: + return None + return flatten_boolean(self._values['media'].get('defaultBootLocation', None)) + + @property + def active(self): + if self._values['active'] is True: + return 'yes' + return 'no' + + +class SoftwareVolumesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SoftwareVolumesFactManager, self).__init__(**kwargs) + self.want = SoftwareVolumesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(software_volumes=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = SoftwareVolumesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/software/volume".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SoftwareHotfixesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + } + + returnables = [ + 'name', + 'full_path', + 'build', + 'checksum', + 'id', + 'product', + 'title', + 'verified', + 'version', + ] + + +class SoftwareHotfixesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SoftwareHotfixesFactManager, self).__init__(**kwargs) + self.want = SoftwareHotfixesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(software_hotfixes=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = SoftwareHotfixesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/software/hotfix".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SoftwareImagesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'buildDate': 'build_date', + 'fileSize': 'file_size', + 'lastModified': 'last_modified', + } + + returnables = [ + 'name', + 'full_path', + 'build', + 'build_date', + 'checksum', + 'file_size', + 'last_modified', + 'product', + 'verified', + 'version', + ] + + @property + def file_size(self): + if self._values['file_size'] is None: + return None + matches = re.match(r'\d+', self._values['file_size']) + if matches: + return int(matches.group(0)) + + @property + def build_date(self): + """Normalizes the build_date string + + The ISOs usually ship with a broken format + + ex: Tue May 15 15 26 30 PDT 2018 + + This will re-format that time so that it looks like ISO 8601 without + microseconds + + ex: 2018-05-15T15:26:30 + + :return: + """ + if self._values['build_date'] is None: + return None + + d = self._values['build_date'].split(' ') + + # This removes the timezone portion from the string. This is done + # because Python has awfule tz parsing and strptime doesnt work with + # all timezones in %Z; it only uses the timezones found in time.tzname + d.pop(6) + + result = datetime.datetime.strptime(' '.join(d), '%a %b %d %H %M %S %Y').isoformat() + return result + + @property + def last_modified(self): + """Normalizes the last_modified string + + The strings that the system reports look like the following + + ex: Tue May 15 15:26:30 2018 + + This property normalizes this value to be isoformat + + ex: 2018-05-15T15:26:30 + + :return: + """ + if self._values['last_modified'] is None: + return None + result = datetime.datetime.strptime(self._values['last_modified'], '%a %b %d %H:%M:%S %Y').isoformat() + return result + + +class SoftwareImagesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SoftwareImagesFactManager, self).__init__(**kwargs) + self.want = SoftwareImagesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(software_images=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = SoftwareImagesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/software/image".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SslCertificatesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'keyType': 'key_type', + 'certificateKeySize': 'key_size', + 'systemPath': 'system_path', + 'checksum': 'sha1_checksum', + 'lastUpdateTime': 'last_update_time', + 'isBundle': 'is_bundle', + 'expirationString': 'expiration_date', + 'expirationDate': 'expiration_timestamp', + 'createTime': 'create_time' + } + + returnables = [ + 'full_path', + 'name', + 'key_type', + 'key_size', + 'system_path', + 'sha1_checksum', + 'subject', + 'last_update_time', + 'issuer', + 'is_bundle', + 'fingerprint', + 'expiration_date', + 'expiration_timestamp', + 'create_time', + ] + + @property + def sha1_checksum(self): + if self._values['sha1_checksum'] is None: + return None + parts = self._values['sha1_checksum'].split(':') + return parts[2] + + @property + def is_bundle(self): + if self._values['sha1_checksum'] is None: + return None + if self._values['is_bundle'] in BOOLEANS_TRUE: + return 'yes' + return 'no' + + +class SslCertificatesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SslCertificatesFactManager, self).__init__(**kwargs) + self.want = SslCertificatesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(ssl_certs=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = SslCertificatesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SslKeysParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'keyType': 'key_type', + 'keySize': 'key_size', + 'securityType': 'security_type', + 'systemPath': 'system_path', + 'checksum': 'sha1_checksum' + } + + returnables = [ + 'full_path', + 'name', + 'key_type', + 'key_size', + 'security_type', + 'system_path', + 'sha1_checksum' + ] + + @property + def sha1_checksum(self): + if self._values['sha1_checksum'] is None: + return None + parts = self._values['sha1_checksum'].split(':') + return parts[2] + + +class SslKeysFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SslKeysFactManager, self).__init__(**kwargs) + self.want = SslKeysParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(ssl_keys=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = SslKeysParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SystemDbParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultValue': 'default', + 'scfConfig': 'scf_config', + 'valueRange': 'value_range' + } + + returnables = [ + 'name', + 'full_path', + 'default', + 'scf_config', + 'value', + 'value_range' + ] + + +class SystemDbFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SystemDbFactManager, self).__init__(**kwargs) + self.want = SystemInfoParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(system_db=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = SystemDbParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/db".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class SystemInfoParameters(BaseParameters): + api_map = { + + } + + returnables = [ + 'base_mac_address', + 'marketing_name', + 'time', + 'hardware_information', + 'product_information', + 'package_edition', + 'package_version', + 'product_code', + 'product_build', + 'product_built', + 'product_build_date', + 'product_changelist', + 'product_jobid', + 'product_version', + 'uptime', + 'chassis_serial', + 'host_board_part_revision', + 'host_board_serial', + 'platform', + 'switch_board_part_revision', + 'switch_board_serial' + ] + + @property + def chassis_serial(self): + if self._values['system-info'] is None: + return None + if 'bigipChassisSerialNum' not in self._values['system-info'][0]: + return None + return self._values['system-info'][0]['bigipChassisSerialNum'] + + @property + def switch_board_serial(self): + if self._values['system-info'] is None: + return None + if 'switchBoardSerialNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['switchBoardSerialNum'].strip() == '': + return None + return self._values['system-info'][0]['switchBoardSerialNum'] + + @property + def switch_board_part_revision(self): + if self._values['system-info'] is None: + return None + if 'switchBoardPartRevNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['switchBoardPartRevNum'].strip() == '': + return None + return self._values['system-info'][0]['switchBoardPartRevNum'] + + @property + def platform(self): + if self._values['system-info'] is None: + return None + return self._values['system-info'][0]['platform'] + + @property + def host_board_serial(self): + if self._values['system-info'] is None: + return None + if 'hostBoardSerialNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['hostBoardSerialNum'].strip() == '': + return None + return self._values['system-info'][0]['hostBoardSerialNum'] + + @property + def host_board_part_revision(self): + if self._values['system-info'] is None: + return None + if 'hostBoardPartRevNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['hostBoardPartRevNum'].strip() == '': + return None + return self._values['system-info'][0]['hostBoardPartRevNum'] + + @property + def package_edition(self): + return self._values['Edition'] + + @property + def package_version(self): + return 'Build {0} - {1}'.format(self._values['Build'], self._values['Date']) + + @property + def product_build(self): + return self._values['Build'] + + @property + def product_build_date(self): + return self._values['Date'] + + @property + def product_built(self): + if 'Built' in self._values['version_info']: + return int(self._values['version_info']['Built']) + + @property + def product_changelist(self): + if 'Changelist' in self._values['version_info']: + return int(self._values['version_info']['Changelist']) + + @property + def product_jobid(self): + if 'JobID' in self._values['version_info']: + return int(self._values['version_info']['JobID']) + + @property + def product_code(self): + return self._values['Product'] + + @property + def product_version(self): + return self._values['Version'] + + @property + def hardware_information(self): + if self._values['hardware-version'] is None: + return None + self._transform_name_attribute(self._values['hardware-version']) + result = [v for k, v in iteritems(self._values['hardware-version'])] + return result + + def _transform_name_attribute(self, entry): + if isinstance(entry, dict): + for k, v in iteritems(entry): + if k == 'tmName': + entry['name'] = entry.pop('tmName') + self._transform_name_attribute(v) + elif isinstance(entry, list): + for k in entry: + if k == 'tmName': + entry['name'] = entry.pop('tmName') + self._transform_name_attribute(k) + else: + return + + @property + def time(self): + if self._values['fullDate'] is None: + return None + date = datetime.datetime.strptime(self._values['fullDate'], "%Y-%m-%dT%H:%M:%SZ") + result = dict( + day=date.day, + hour=date.hour, + minute=date.minute, + month=date.month, + second=date.second, + year=date.year + ) + return result + + @property + def marketing_name(self): + if self._values['platform'] is None: + return None + return self._values['platform'][0]['marketingName'] + + @property + def base_mac_address(self): + if self._values['platform'] is None: + return None + return self._values['platform'][0]['baseMac'] + + +class SystemInfoFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SystemInfoFactManager, self).__init__(**kwargs) + self.want = SystemInfoParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(system_info=facts) + return result + + def _exec_module(self): + facts = self.read_facts() + results = facts.to_return() + return results + + def read_facts(self): + collection = self.read_collection_from_device() + params = SystemInfoParameters(params=collection) + return params + + def read_collection_from_device(self): + result = dict() + tmp = self.read_hardware_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_clock_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_version_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_uptime_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_version_file_info_from_device() + if tmp: + result.update(tmp) + + return result + + def read_version_file_info_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/util/bash".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + args = dict( + command='run', + utilCmdArgs='-c "cat /VERSION"' + ) + resp = self.client.api.post(uri, json=args) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + pattern = r'^(?P(Product|Build|Sequence|BaseBuild|Edition|Date|Built|Changelist|JobID))\:(?P.*)' + result = response['commandResult'].strip() + except KeyError: + return None + + if 'No such file or directory' in result: + return None + + lines = response['commandResult'].split("\n") + result = dict() + for line in lines: + if not line: + continue + matches = re.match(pattern, line) + if matches: + result[matches.group('key')] = matches.group('value').strip() + + if result: + return dict( + version_info=result + ) + + def read_uptime_info_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/util/bash".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + args = dict( + command='run', + utilCmdArgs='-c "cat /proc/uptime"' + ) + resp = self.client.api.post(uri, json=args) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + parts = response['commandResult'].strip().split(' ') + return dict( + uptime=math.floor(float(parts[0])) + ) + except KeyError: + pass + + def read_hardware_info_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/hardware".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + return result + + def read_clock_info_from_device(self): + """Parses clock info from the REST API + + The clock stat returned from the REST API (at the time of 13.1.0.7) + is similar to the following. + + { + "kind": "tm:sys:clock:clockstats", + "selfLink": "https://localhost/mgmt/tm/sys/clock?ver=13.1.0.4", + "entries": { + "https://localhost/mgmt/tm/sys/clock/0": { + "nestedStats": { + "entries": { + "fullDate": { + "description": "2018-06-05T13:38:33Z" + } + } + } + } + } + } + + Parsing this data using the ``parseStats`` method, yields a list of + the clock stats in a format resembling that below. + + [{'fullDate': '2018-06-05T13:41:05Z'}] + + Therefore, this method cherry-picks the first entry from this list + and returns it. There can be no other items in this list. + + Returns: + A dict mapping keys to the corresponding clock stats. For + example: + + {'fullDate': '2018-06-05T13:41:05Z'} + + There should never not be a clock stat, unless by chance it + is removed from the API in the future, or changed to a different + API endpoint. + + Raises: + F5ModuleError: A non-successful HTTP code was returned or a JSON + response was not found. + """ + uri = "https://{0}:{1}/mgmt/tm/sys/clock".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + return result[0] + + def read_version_info_from_device(self): + """Parses version info from the REST API + + The version stat returned from the REST API (at the time of 13.1.0.7) + is similar to the following. + + { + "kind": "tm:sys:version:versionstats", + "selfLink": "https://localhost/mgmt/tm/sys/version?ver=13.1.0.4", + "entries": { + "https://localhost/mgmt/tm/sys/version/0": { + "nestedStats": { + "entries": { + "Build": { + "description": "0.0.6" + }, + "Date": { + "description": "Tue Mar 13 20:10:42 PDT 2018" + }, + "Edition": { + "description": "Point Release 4" + }, + "Product": { + "description": "BIG-IP" + }, + "Title": { + "description": "Main Package" + }, + "Version": { + "description": "13.1.0.4" + } + } + } + } + } + } + + Parsing this data using the ``parseStats`` method, yields a list of + the clock stats in a format resembling that below. + + [{'Build': '0.0.6', 'Date': 'Tue Mar 13 20:10:42 PDT 2018', + 'Edition': 'Point Release 4', 'Product': 'BIG-IP', 'Title': 'Main Package', + 'Version': '13.1.0.4'}] + + Therefore, this method cherry-picks the first entry from this list + and returns it. There can be no other items in this list. + + Returns: + A dict mapping keys to the corresponding clock stats. For + example: + + {'Build': '0.0.6', 'Date': 'Tue Mar 13 20:10:42 PDT 2018', + 'Edition': 'Point Release 4', 'Product': 'BIG-IP', 'Title': 'Main Package', + 'Version': '13.1.0.4'} + + There should never not be a version stat, unless by chance it + is removed from the API in the future, or changed to a different + API endpoint. + + Raises: + F5ModuleError: A non-successful HTTP code was returned or a JSON + response was not found. + """ + uri = "https://{0}:{1}/mgmt/tm/sys/version".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + return result[0] + + +class TcpMonitorsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'adaptiveDivergenceType': 'adaptive_divergence_type', + 'adaptiveDivergenceValue': 'adaptive_divergence_value', + 'adaptiveLimit': 'adaptive_limit', + 'adaptiveSamplingTimespan': 'adaptive_sampling_timespan', + 'ipDscp': 'ip_dscp', + 'manualResume': 'manual_resume', + 'timeUntilUp': 'time_until_up', + 'upInterval': 'up_interval', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'adaptive', + 'adaptive_divergence_type', + 'adaptive_divergence_value', + 'adaptive_limit', + 'adaptive_sampling_timespan', + 'destination', + 'interval', + 'ip_dscp', + 'manual_resume', + 'reverse', + 'time_until_up', + 'timeout', + 'transparent', + 'up_interval', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def transparent(self): + return flatten_boolean(self._values['transparent']) + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + @property + def adaptive(self): + return flatten_boolean(self._values['adaptive']) + + @property + def reverse(self): + return flatten_boolean(self._values['reverse']) + + +class TcpMonitorsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(TcpMonitorsFactManager, self).__init__(**kwargs) + self.want = TcpMonitorsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(tcp_monitors=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = TcpMonitorsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class TcpHalfOpenMonitorsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'manualResume': 'manual_resume', + 'timeUntilUp': 'time_until_up', + 'upInterval': 'up_interval', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'destination', + 'interval', + 'manual_resume', + 'time_until_up', + 'timeout', + 'transparent', + 'up_interval', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def transparent(self): + return flatten_boolean(self._values['transparent']) + + @property + def manual_resume(self): + return flatten_boolean(self._values['manual_resume']) + + +class TcpHalfOpenMonitorsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(TcpHalfOpenMonitorsFactManager, self).__init__(**kwargs) + self.want = TcpHalfOpenMonitorsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(tcp_half_open_monitors=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = TcpHalfOpenMonitorsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class TcpProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'defaultsFrom': 'parent', + 'ackOnPush': 'ack_on_push', + 'autoProxyBufferSize': 'auto_proxy_buffer', + 'autoReceiveWindowSize': 'auto_receive_window', + 'autoSendBufferSize': 'auto_send_buffer', + 'closeWaitTimeout': 'close_wait', + 'cmetricsCache': 'congestion_metrics_cache', + 'cmetricsCacheTimeout': 'congestion_metrics_cache_timeout', + 'congestionControl': 'congestion_control', + 'deferredAccept': 'deferred_accept', + 'delayWindowControl': 'delay_window_control', + 'delayedAcks': 'delayed_acks', + 'earlyRetransmit': 'early_retransmit', + 'ecn': 'explicit_congestion_notification', + 'enhancedLossRecovery': 'enhanced_loss_recovery', + 'fastOpen': 'fast_open', + 'fastOpenCookieExpiration': 'fast_open_cookie_expiration', + 'finWaitTimeout': 'fin_wait_1', + 'finWait_2Timeout': 'fin_wait_2', + 'idleTimeout': 'idle_timeout', + 'initCwnd': 'initial_congestion_window_size', + 'initRwnd': 'initial_receive_window_size', + 'ipDfMode': 'dont_fragment_flag', + 'ipTosToClient': 'ip_tos', + 'ipTtlMode': 'time_to_live', + 'ipTtlV4': 'time_to_live_v4', + 'ipTtlV6': 'time_to_live_v6', + 'keepAliveInterval': 'keep_alive_interval', + 'limitedTransmit': 'limited_transmit_recovery', + 'linkQosToClient': 'link_qos', + 'maxRetrans': 'max_segment_retrans', + 'synMaxRetrans': 'max_syn_retrans', + 'rexmtThresh': 'retransmit_threshold', + 'maxSegmentSize': 'max_segment_size', + 'md5Signature': 'md5_signature', + 'minimumRto': 'minimum_rto', + 'mptcp': 'multipath_tcp', + 'mptcpCsum': 'mptcp_checksum', + 'mptcpCsumVerify': 'mptcp_checksum_verify', + 'mptcpFallback': 'mptcp_fallback', + 'mptcpFastjoin': 'mptcp_fast_join', + 'mptcpIdleTimeout': 'mptcp_idle_timeout', + 'mptcpJoinMax': 'mptcp_join_max', + 'mptcpMakeafterbreak': 'mptcp_make_after_break', + 'mptcpNojoindssack': 'mptcp_no_join_dss_ack', + 'mptcpRtomax': 'mptcp_rto_max', + 'mptcpRxmitmin': 'mptcp_retransmit_min', + 'mptcpSubflowmax': 'mptcp_subflow_max', + 'mptcpTimeout': 'mptcp_timeout', + 'nagle': 'nagle_algorithm', + 'pktLossIgnoreBurst': 'pkt_loss_ignore_burst', + 'pktLossIgnoreRate': 'pkt_loss_ignore_rate', + 'proxyBufferHigh': 'proxy_buffer_high', + 'proxyBufferLow': 'proxy_buffer_low', + 'proxyMss': 'proxy_max_segment', + 'proxyOptions': 'proxy_options', + 'pushFlag': 'push_flag', + 'ratePace': 'rate_pace', + 'ratePaceMaxRate': 'rate_pace_max_rate', + 'receiveWindowSize': 'receive_window', + 'resetOnTimeout': 'reset_on_timeout', + 'selectiveAcks': 'selective_acks', + 'selectiveNack': 'selective_nack', + 'sendBufferSize': 'send_buffer', + 'slowStart': 'slow_start', + 'synCookieEnable': 'syn_cookie_enable', + 'synCookieWhitelist': 'syn_cookie_white_list', + 'synRtoBase': 'syn_retrans_to_base', + 'tailLossProbe': 'tail_loss_probe', + 'timeWaitRecycle': 'time_wait_recycle', + 'timeWaitTimeout': 'time_wait', + 'verifiedAccept': 'verified_accept', + 'zeroWindowTimeout': 'zero_window_timeout', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'abc', + 'ack_on_push', + 'auto_proxy_buffer', + 'auto_receive_window', + 'auto_send_buffer', + 'close_wait', + 'congestion_metrics_cache', + 'congestion_metrics_cache_timeout', + 'congestion_control', + 'deferred_accept', + 'delay_window_control', + 'delayed_acks', + 'dsack', + 'early_retransmit', + 'explicit_congestion_notification', + 'enhanced_loss_recovery', + 'fast_open', + 'fast_open_cookie_expiration', + 'fin_wait_1', + 'fin_wait_2', + 'idle_timeout', + 'initial_congestion_window_size', + 'initial_receive_window_size', + 'dont_fragment_flag', + 'ip_tos', + 'time_to_live', + 'time_to_live_v4', + 'time_to_live_v6', + 'keep_alive_interval', + 'limited_transmit_recovery', + 'link_qos', + 'max_segment_retrans', + 'max_syn_retrans', + 'max_segment_size', + 'md5_signature', + 'minimum_rto', + 'multipath_tcp', + 'mptcp_checksum', + 'mptcp_checksum_verify', + 'mptcp_fallback', + 'mptcp_fast_join', + 'mptcp_idle_timeout', + 'mptcp_join_max', + 'mptcp_make_after_break', + 'mptcp_no_join_dss_ack', + 'mptcp_rto_max', + 'mptcp_retransmit_min', + 'mptcp_subflow_max', + 'mptcp_timeout', + 'nagle_algorithm', + 'pkt_loss_ignore_burst', + 'pkt_loss_ignore_rate', + 'proxy_buffer_high', + 'proxy_buffer_low', + 'proxy_max_segment', + 'proxy_options', + 'push_flag', + 'rate_pace', + 'rate_pace_max_rate', + 'receive_window', + 'reset_on_timeout', + 'retransmit_threshold', + 'selective_acks', + 'selective_nack', + 'send_buffer', + 'slow_start', + 'syn_cookie_enable', + 'syn_cookie_white_list', + 'syn_retrans_to_base', + 'tail_loss_probe', + 'time_wait_recycle', + 'time_wait', + 'timestamps', + 'verified_accept', + 'zero_window_timeout', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def time_wait(self): + if self._values['time_wait'] is None: + return None + if self._values['time_wait'] == 0: + return "immediate" + if self._values['time_wait'] == 4294967295: + return 'indefinite' + return self._values['time_wait'] + + @property + def close_wait(self): + if self._values['close_wait'] is None: + return None + if self._values['close_wait'] == 0: + return "immediate" + if self._values['close_wait'] == 4294967295: + return 'indefinite' + return self._values['close_wait'] + + @property + def fin_wait_1(self): + if self._values['fin_wait_1'] is None: + return None + if self._values['fin_wait_1'] == 0: + return "immediate" + if self._values['fin_wait_1'] == 4294967295: + return 'indefinite' + return self._values['fin_wait_1'] + + @property + def fin_wait_2(self): + if self._values['fin_wait_2'] is None: + return None + if self._values['fin_wait_2'] == 0: + return "immediate" + if self._values['fin_wait_2'] == 4294967295: + return 'indefinite' + return self._values['fin_wait_2'] + + @property + def zero_window_timeout(self): + if self._values['zero_window_timeout'] is None: + return None + if self._values['zero_window_timeout'] == 4294967295: + return 'indefinite' + return self._values['zero_window_timeout'] + + @property + def idle_timeout(self): + if self._values['idle_timeout'] is None: + return None + if self._values['idle_timeout'] == 4294967295: + return 'indefinite' + return self._values['idle_timeout'] + + @property + def keep_alive_interval(self): + if self._values['keep_alive_interval'] is None: + return None + if self._values['keep_alive_interval'] == 4294967295: + return 'indefinite' + return self._values['keep_alive_interval'] + + @property + def verified_accept(self): + return flatten_boolean(self._values['verified_accept']) + + @property + def timestamps(self): + return flatten_boolean(self._values['timestamps']) + + @property + def time_wait_recycle(self): + return flatten_boolean(self._values['time_wait_recycle']) + + @property + def tail_loss_probe(self): + return flatten_boolean(self._values['tail_loss_probe']) + + @property + def syn_cookie_white_list(self): + return flatten_boolean(self._values['syn_cookie_white_list']) + + @property + def syn_cookie_enable(self): + return flatten_boolean(self._values['syn_cookie_enable']) + + @property + def slow_start(self): + return flatten_boolean(self._values['slow_start']) + + @property + def selective_nack(self): + return flatten_boolean(self._values['selective_nack']) + + @property + def selective_acks(self): + return flatten_boolean(self._values['selective_acks']) + + @property + def reset_on_timeout(self): + return flatten_boolean(self._values['reset_on_timeout']) + + @property + def rate_pace(self): + return flatten_boolean(self._values['rate_pace']) + + @property + def proxy_options(self): + return flatten_boolean(self._values['proxy_options']) + + @property + def proxy_max_segment(self): + return flatten_boolean(self._values['proxy_max_segment']) + + @property + def nagle_algorithm(self): + return flatten_boolean(self._values['nagle_algorithm']) + + @property + def mptcp_no_join_dss_ack(self): + return flatten_boolean(self._values['mptcp_no_join_dss_ack']) + + @property + def mptcp_make_after_break(self): + return flatten_boolean(self._values['mptcp_make_after_break']) + + @property + def mptcp_fast_join(self): + return flatten_boolean(self._values['mptcp_fast_join']) + + @property + def mptcp_checksum_verify(self): + return flatten_boolean(self._values['mptcp_checksum_verify']) + + @property + def mptcp_checksum(self): + return flatten_boolean(self._values['mptcp_checksum']) + + @property + def multipath_tcp(self): + return flatten_boolean(self._values['multipath_tcp']) + + @property + def md5_signature(self): + return flatten_boolean(self._values['md5_signature']) + + @property + def limited_transmit_recovery(self): + return flatten_boolean(self._values['limited_transmit_recovery']) + + @property + def fast_open(self): + return flatten_boolean(self._values['fast_open']) + + @property + def enhanced_loss_recovery(self): + return flatten_boolean(self._values['enhanced_loss_recovery']) + + @property + def explicit_congestion_notification(self): + return flatten_boolean(self._values['explicit_congestion_notification']) + + @property + def early_retransmit(self): + return flatten_boolean(self._values['early_retransmit']) + + @property + def dsack(self): + return flatten_boolean(self._values['dsack']) + + @property + def delayed_acks(self): + return flatten_boolean(self._values['delayed_acks']) + + @property + def delay_window_control(self): + return flatten_boolean(self._values['delay_window_control']) + + @property + def deferred_accept(self): + return flatten_boolean(self._values['deferred_accept']) + + @property + def congestion_metrics_cache(self): + return flatten_boolean(self._values['congestion_metrics_cache']) + + @property + def auto_send_buffer(self): + return flatten_boolean(self._values['auto_send_buffer']) + + @property + def auto_receive_window(self): + return flatten_boolean(self._values['auto_receive_window']) + + @property + def auto_proxy_buffer(self): + return flatten_boolean(self._values['auto_proxy_buffer']) + + @property + def abc(self): + return flatten_boolean(self._values['abc']) + + @property + def ack_on_push(self): + return flatten_boolean(self._values['ack_on_push']) + + +class TcpProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(TcpProfilesFactManager, self).__init__(**kwargs) + self.want = TcpProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(tcp_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = TcpProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/tcp".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class TrafficGroupsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'autoFailbackEnabled': 'auto_failback_enabled', + 'autoFailbackTime': 'auto_failback_time', + 'haLoadFactor': 'ha_load_factor', + 'haOrder': 'ha_order', + 'isFloating': 'is_floating', + 'mac': 'mac_masquerade_address' + } + + returnables = [ + 'full_path', + 'name', + 'description', + 'auto_failback_enabled', + 'auto_failback_time', + 'ha_load_factor', + 'ha_order', + 'is_floating', + 'mac_masquerade_address' + ] + + @property + def auto_failback_time(self): + if self._values['auto_failback_time'] is None: + return None + return int(self._values['auto_failback_time']) + + @property + def auto_failback_enabled(self): + if self._values['auto_failback_enabled'] is None: + return None + elif self._values['auto_failback_enabled'] == 'false': + # Yes, the REST API stores this as a string + return 'no' + return 'yes' + + @property + def is_floating(self): + if self._values['is_floating'] is None: + return None + elif self._values['is_floating'] == 'true': + # Yes, the REST API stores this as a string + return 'yes' + return 'no' + + @property + def mac_masquerade_address(self): + if self._values['mac_masquerade_address'] in [None, 'none']: + return None + return self._values['mac_masquerade_address'] + + +class TrafficGroupsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(TrafficGroupsFactManager, self).__init__(**kwargs) + self.want = TrafficGroupsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(traffic_groups=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource + attrs['stats'] = self.read_stats_from_device(attrs['fullPath']) + params = TrafficGroupsParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/cm/traffic-group".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + def read_stats_from_device(self, full_path): + uri = "https://{0}:{1}/mgmt/tm/cm/traffic-group/{2}/stats".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=full_path) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + try: + return result['stats'] + except KeyError: + return {} + + +class TrunksParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'media': 'media_speed', + 'lacpMode': 'lacp_mode', + 'lacp': 'lacp_state', + 'lacpTimeout': 'lacp_timeout', + 'stp': 'stp_enabled', + 'workingMbrCount': 'operational_member_count', + 'linkSelectPolicy': 'link_selection_policy', + 'distributionHash': 'distribution_hash', + 'cfgMbrCount': 'configured_member_count' + } + + returnables = [ + 'full_path', + 'name', + 'description', + 'media_speed', + 'lacp_mode', # 'active' or 'passive' + 'lacp_enabled', + 'stp_enabled', + 'operational_member_count', + 'media_status', + 'link_selection_policy', + 'lacp_timeout', + 'interfaces', + 'distribution_hash', + 'configured_member_count' + ] + + @property + def lacp_enabled(self): + if self._values['lacp_enabled'] is None: + return None + elif self._values['lacp_enabled'] == 'disabled': + return 'no' + return 'yes' + + @property + def stp_enabled(self): + if self._values['stp_enabled'] is None: + return None + elif self._values['stp_enabled'] == 'disabled': + return 'no' + return 'yes' + + @property + def media_status(self): + return self._values['stats']['status'] + + +class TrunksFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(TrunksFactManager, self).__init__(**kwargs) + self.want = TrunksParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(trunks=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource + attrs['stats'] = self.read_stats_from_device(attrs['fullPath']) + params = TrunksParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/net/trunk".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + def read_stats_from_device(self, full_path): + uri = "https://{0}:{1}/mgmt/tm/net/trunk/{2}/stats".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=full_path) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + try: + return result['stats'] + except KeyError: + return {} + + +class UsersParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'partitionAccess': 'partition_access', + } + + returnables = [ + 'full_path', + 'name', + 'description', + 'partition_access', + 'shell', + ] + + @property + def partition_access(self): + result = [] + if self._values['partition_access'] is None: + return [] + for partition in self._values['partition_access']: + del partition['nameReference'] + result.append(partition) + return result + + @property + def shell(self): + if self._values['shell'] in [None, 'none']: + return None + return self._values['shell'] + + +class UsersFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(UsersFactManager, self).__init__(**kwargs) + self.want = UsersParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(users=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource + params = UsersParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/auth/user".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class UdpProfilesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'allowNoPayload': 'allow_no_payload', + 'bufferMaxBytes': 'buffer_max_bytes', + 'bufferMaxPackets': 'buffer_max_packets', + 'datagramLoadBalancing': 'datagram_load_balancing', + 'defaultsFrom': 'parent', + 'idleTimeout': 'idle_timeout', + 'ipDfMode': 'ip_df_mode', + 'ipTosToClient': 'ip_tos_to_client', + 'ipTtlMode': 'ip_ttl_mode', + 'ipTtlV4': 'ip_ttl_v4', + 'ipTtlV6': 'ip_ttl_v6', + 'linkQosToClient': 'link_qos_to_client', + 'noChecksum': 'no_checksum', + 'proxyMss': 'proxy_mss', + } + + returnables = [ + 'full_path', + 'name', + 'parent', + 'description', + 'allow_no_payload', + 'buffer_max_bytes', + 'buffer_max_packets', + 'datagram_load_balancing', + 'idle_timeout', + 'ip_df_mode', + 'ip_tos_to_client', + 'ip_ttl_mode', + 'ip_ttl_v4', + 'ip_ttl_v6', + 'link_qos_to_client', + 'no_checksum', + 'proxy_mss', + ] + + @property + def description(self): + if self._values['description'] in [None, 'none']: + return None + return self._values['description'] + + @property + def allow_no_payload(self): + return flatten_boolean(self._values['allow_no_payload']) + + @property + def datagram_load_balancing(self): + return flatten_boolean(self._values['datagram_load_balancing']) + + @property + def proxy_mss(self): + return flatten_boolean(self._values['proxy_mss']) + + @property + def no_checksum(self): + return flatten_boolean(self._values['no_checksum']) + + +class UdpProfilesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(UdpProfilesFactManager, self).__init__(**kwargs) + self.want = UdpProfilesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(udp_profiles=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = UdpProfilesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class VcmpGuestsParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'allowedSlots': 'allowed_slots', + 'assignedSlots': 'assigned_slots', + 'bootPriority': 'boot_priority', + 'coresPerSlot': 'cores_per_slot', + 'initialImage': 'initial_image', + 'initialHotfix': 'hotfix_image', + 'managementGw': 'mgmt_route', + 'managementIp': 'mgmt_address', + 'managementNetwork': 'mgmt_network', + 'minSlots': 'min_number_of_slots', + 'slots': 'number_of_slots', + 'sslMode': 'ssl_mode', + 'virtualDisk': 'virtual_disk' + } + + returnables = [ + 'name', + 'full_path', + 'allowed_slots', + 'assigned_slots', + 'boot_priority', + 'cores_per_slot', + 'hostname', + 'hotfix_image', + 'initial_image', + 'mgmt_route', + 'mgmt_address', + 'mgmt_network', + 'vlans', + 'min_number_of_slots', + 'number_of_slots', + 'ssl_mode', + 'state', + 'virtual_disk', + ] + + +class VcmpGuestsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(VcmpGuestsFactManager, self).__init__(**kwargs) + self.want = VcmpGuestsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(vcmp_guests=facts) + return result + + def _exec_module(self): + if 'vcmp' not in self.provisioned_modules: + return [] + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = VcmpGuestsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/vcmp/guest".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class VirtualAddressesParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'arp': 'arp_enabled', + 'autoDelete': 'auto_delete_enabled', + 'connectionLimit': 'connection_limit', + 'icmpEcho': 'icmp_echo', + 'mask': 'netmask', + 'routeAdvertisement': 'route_advertisement', + 'trafficGroup': 'traffic_group', + 'inheritedTrafficGroup': 'inherited_traffic_group' + } + + returnables = [ + 'full_path', + 'name', + 'address', + 'arp_enabled', + 'auto_delete_enabled', + 'connection_limit', + 'description', + 'enabled', + 'icmp_echo', + 'floating', + 'netmask', + 'route_advertisement', + 'traffic_group', + 'spanning', + 'inherited_traffic_group' + ] + + @property + def spanning(self): + return flatten_boolean(self._values['spanning']) + + @property + def arp_enabled(self): + return flatten_boolean(self._values['arp_enabled']) + + @property + def route_advertisement(self): + return flatten_boolean(self._values['route_advertisement']) + + @property + def auto_delete_enabled(self): + return flatten_boolean(self._values['auto_delete_enabled']) + + @property + def inherited_traffic_group(self): + return flatten_boolean(self._values['inherited_traffic_group']) + + @property + def icmp_echo(self): + return flatten_boolean(self._values['icmp_echo']) + + @property + def floating(self): + return flatten_boolean(self._values['floating']) + + @property + def enabled(self): + return flatten_boolean(self._values['enabled']) + + +class VirtualAddressesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(VirtualAddressesFactManager, self).__init__(**kwargs) + self.want = VirtualAddressesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(virtual_addresses=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = VirtualAddressesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/virtual-address".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class VirtualServersParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'autoLasthop': 'auto_lasthop', + 'bwcPolicy': 'bw_controller_policy', + 'cmpEnabled': 'cmp_enabled', + 'connectionLimit': 'connection_limit', + 'fallbackPersistence': 'fallback_persistence_profile', + 'persist': 'persistence_profile', + 'translatePort': 'translate_port', + 'translateAddress': 'translate_address', + 'lastHopPool': 'last_hop_pool', + 'nat64': 'nat64_enabled', + 'sourcePort': 'source_port_behavior', + 'ipIntelligencePolicy': 'ip_intelligence_policy', + 'ipProtocol': 'protocol', + 'pool': 'default_pool', + 'rateLimitMode': 'rate_limit_mode', + 'rateLimitSrcMask': 'rate_limit_source_mask', + 'rateLimitDstMask': 'rate_limit_destination_mask', + 'rateLimit': 'rate_limit', + 'sourceAddressTranslation': 'snat_type', + 'gtmScore': 'gtm_score', + 'rateClass': 'rate_class', + 'source': 'source_address', + 'auth': 'authentication_profile', + 'mirror': 'connection_mirror_enabled', + 'rules': 'irules', + 'securityLogProfiles': 'security_log_profiles', + 'profilesReference': 'profiles' + } + + returnables = [ + 'full_path', + 'name', + 'auto_lasthop', + 'bw_controller_policy', + 'cmp_enabled', + 'connection_limit', + 'description', + 'enabled', + 'fallback_persistence_profile', + 'persistence_profile', + 'translate_port', + 'translate_address', + 'vlans', + 'destination', + 'last_hop_pool', + 'nat64_enabled', + 'source_port_behavior', + 'ip_intelligence_policy', + 'protocol', + 'default_pool', + 'rate_limit_mode', + 'rate_limit_source_mask', + 'rate_limit', + 'snat_type', + 'snat_pool', + 'gtm_score', + 'rate_class', + 'rate_limit_destination_mask', + 'source_address', + 'authentication_profile', + 'connection_mirror_enabled', + 'irules', + 'security_log_profiles', + 'type', + 'profiles', + 'destination_address', + 'destination_port', + 'availability_status', + 'status_reason', + 'total_requests', + 'client_side_bits_in', + 'client_side_bits_out', + 'client_side_current_connections', + 'client_side_evicted_connections', + 'client_side_max_connections', + 'client_side_pkts_in', + 'client_side_pkts_out', + 'client_side_slow_killed', + 'client_side_total_connections', + 'cmp_mode', + 'ephemeral_bits_in', + 'ephemeral_bits_out', + 'ephemeral_current_connections', + 'ephemeral_evicted_connections', + 'ephemeral_max_connections', + 'ephemeral_pkts_in', + 'ephemeral_pkts_out', + 'ephemeral_slow_killed', + 'ephemeral_total_connections', + 'total_software_accepted_syn_cookies', + 'total_hardware_accepted_syn_cookies', + 'total_hardware_syn_cookies', + 'hardware_syn_cookie_instances', + 'total_software_rejected_syn_cookies', + 'software_syn_cookie_instances', + 'current_syn_cache', + 'syn_cache_overflow', + 'total_software_syn_cookies', + 'syn_cookies_status', + 'max_conn_duration', + 'mean_conn_duration', + 'min_conn_duration', + 'cpu_usage_ratio_last_5_min', + 'cpu_usage_ratio_last_5_sec', + 'cpu_usage_ratio_last_1_min', + ] + + @property + def max_conn_duration(self): + return self._values['stats']['csMaxConnDur'] + + @property + def mean_conn_duration(self): + return self._values['stats']['csMeanConnDur'] + + @property + def min_conn_duration(self): + return self._values['stats']['csMinConnDur'] + + @property + def cpu_usage_ratio_last_5_min(self): + return self._values['stats']['fiveMinAvgUsageRatio'] + + @property + def cpu_usage_ratio_last_5_sec(self): + return self._values['stats']['fiveSecAvgUsageRatio'] + + @property + def cpu_usage_ratio_last_1_min(self): + return self._values['stats']['oneMinAvgUsageRatio'] + + @property + def cmp_mode(self): + return self._values['stats']['cmpEnableMode'] + + @property + def availability_status(self): + return self._values['stats']['status']['availabilityState'] + + @property + def status_reason(self): + return self._values['stats']['status']['statusReason'] + + @property + def total_requests(self): + return self._values['stats']['totRequests'] + + @property + def ephemeral_bits_in(self): + return self._values['stats']['ephemeral']['bitsIn'] + + @property + def ephemeral_bits_out(self): + return self._values['stats']['ephemeral']['bitsOut'] + + @property + def ephemeral_current_connections(self): + return self._values['stats']['ephemeral']['curConns'] + + @property + def ephemeral_evicted_connections(self): + return self._values['stats']['ephemeral']['evictedConns'] + + @property + def ephemeral_max_connections(self): + return self._values['stats']['ephemeral']['maxConns'] + + @property + def ephemeral_pkts_in(self): + return self._values['stats']['ephemeral']['pktsIn'] + + @property + def ephemeral_pkts_out(self): + return self._values['stats']['ephemeral']['pktsOut'] + + @property + def ephemeral_slow_killed(self): + return self._values['stats']['ephemeral']['slowKilled'] + + @property + def ephemeral_total_connections(self): + return self._values['stats']['ephemeral']['totConns'] + + @property + def client_side_bits_in(self): + return self._values['stats']['clientside']['bitsIn'] + + @property + def client_side_bits_out(self): + return self._values['stats']['clientside']['bitsOut'] + + @property + def client_side_current_connections(self): + return self._values['stats']['clientside']['curConns'] + + @property + def client_side_evicted_connections(self): + return self._values['stats']['clientside']['evictedConns'] + + @property + def client_side_max_connections(self): + return self._values['stats']['clientside']['maxConns'] + + @property + def client_side_pkts_in(self): + return self._values['stats']['clientside']['pktsIn'] + + @property + def client_side_pkts_out(self): + return self._values['stats']['clientside']['pktsOut'] + + @property + def client_side_slow_killed(self): + return self._values['stats']['clientside']['slowKilled'] + + @property + def client_side_total_connections(self): + return self._values['stats']['clientside']['totConns'] + + @property + def total_software_accepted_syn_cookies(self): + return self._values['stats']['syncookie']['accepts'] + + @property + def total_hardware_accepted_syn_cookies(self): + return self._values['stats']['syncookie']['hwAccepts'] + + @property + def total_hardware_syn_cookies(self): + return self._values['stats']['syncookie']['hwSyncookies'] + + @property + def hardware_syn_cookie_instances(self): + return self._values['stats']['syncookie']['hwsyncookieInstance'] + + @property + def total_software_rejected_syn_cookies(self): + return self._values['stats']['syncookie']['rejects'] + + @property + def software_syn_cookie_instances(self): + return self._values['stats']['syncookie']['swsyncookieInstance'] + + @property + def current_syn_cache(self): + return self._values['stats']['syncookie']['syncacheCurr'] + + @property + def syn_cache_overflow(self): + return self._values['stats']['syncookie']['syncacheOver'] + + @property + def total_software_syn_cookies(self): + return self._values['stats']['syncookie']['syncookies'] + + @property + def syn_cookies_status(self): + return self._values['stats']['syncookieStatus'] + + @property + def destination_address(self): + if self._values['destination'] is None: + return None + tup = self.destination_tuple + return tup.ip + + @property + def destination_port(self): + if self._values['destination'] is None: + return None + tup = self.destination_tuple + return tup.port + + @property + def type(self): + """Attempt to determine the current server type + + This check is very unscientific. It turns out that this information is not + exactly available anywhere on a BIG-IP. Instead, we rely on a semi-reliable + means for determining what the type of the virtual server is. Hopefully it + always works. + + There are a handful of attributes that can be used to determine a specific + type. There are some types though that can only be determined by looking at + the profiles that are assigned to them. We follow that method for those + complicated types; message-routing, fasthttp, and fastl4. + + Because type determination is an expensive operation, we cache the result + from the operation. + + Returns: + string: The server type. + """ + if self._values['l2Forward'] is True: + result = 'forwarding-l2' + elif self._values['ipForward'] is True: + result = 'forwarding-ip' + elif self._values['stateless'] is True: + result = 'stateless' + elif self._values['reject'] is True: + result = 'reject' + elif self._values['dhcpRelay'] is True: + result = 'dhcp' + elif self._values['internal'] is True: + result = 'internal' + elif self.has_fasthttp_profiles: + result = 'performance-http' + elif self.has_fastl4_profiles: + result = 'performance-l4' + elif self.has_message_routing_profiles: + result = 'message-routing' + else: + result = 'standard' + return result + + @property + def profiles(self): + """Returns a list of profiles from the API + + The profiles are formatted so that they are usable in this module and + are able to be compared by the Difference engine. + + Returns: + list (:obj:`list` of :obj:`dict`): List of profiles. + + Each dictionary in the list contains the following three (3) keys. + + * name + * context + * fullPath + + Raises: + F5ModuleError: If the specified context is a value other that + ``all``, ``server-side``, or ``client-side``. + """ + if 'items' not in self._values['profiles']: + return None + result = [] + for item in self._values['profiles']['items']: + context = item['context'] + if context == 'serverside': + context = 'server-side' + elif context == 'clientside': + context = 'client-side' + name = item['name'] + if context in ['all', 'server-side', 'client-side']: + result.append(dict(name=name, context=context, full_path=item['fullPath'])) + else: + raise F5ModuleError( + "Unknown profile context found: '{0}'".format(context) + ) + return result + + @property + def has_message_routing_profiles(self): + if self.profiles is None: + return None + current = self._read_current_message_routing_profiles_from_device() + result = [x['name'] for x in self.profiles if x['name'] in current] + if len(result) > 0: + return True + return False + + @property + def has_fastl4_profiles(self): + if self.profiles is None: + return None + current = self._read_current_fastl4_profiles_from_device() + result = [x['name'] for x in self.profiles if x['name'] in current] + if len(result) > 0: + return True + return False + + @property + def has_fasthttp_profiles(self): + """Check if ``fasthttp`` profile is in API profiles + + This method is used to determine the server type when doing comparisons + in the Difference class. + + Returns: + bool: True if server has ``fasthttp`` profiles. False otherwise. + """ + if self.profiles is None: + return None + current = self._read_current_fasthttp_profiles_from_device() + result = [x['name'] for x in self.profiles if x['name'] in current] + if len(result) > 0: + return True + return False + + def _read_current_message_routing_profiles_from_device(self): + result = [] + result += self._read_diameter_profiles_from_device() + result += self._read_sip_profiles_from_device() + return result + + def _read_diameter_profiles_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/diameter/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = [x['name'] for x in response['items']] + return result + + def _read_sip_profiles_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/sip/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = [x['name'] for x in response['items']] + return result + + def _read_current_fastl4_profiles_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fastl4/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = [x['name'] for x in response['items']] + return result + + def _read_current_fasthttp_profiles_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fasthttp/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = [x['name'] for x in response['items']] + return result + + @property + def security_log_profiles(self): + if self._values['security_log_profiles'] is None: + return None + result = list(set([x.strip('"') for x in self._values['security_log_profiles']])) + result.sort() + return result + + @property + def snat_type(self): + if self._values['snat_type'] is None: + return None + if 'type' in self._values['snat_type']: + if self._values['snat_type']['type'] == 'automap': + return 'automap' + elif self._values['snat_type']['type'] == 'none': + return 'none' + elif self._values['snat_type']['type'] == 'pool': + return 'snat' + + @property + def connection_mirror_enabled(self): + if self._values['connection_mirror_enabled'] is None: + return None + elif self._values['connection_mirror_enabled'] == 'enabled': + return 'yes' + return 'no' + + @property + def rate_limit(self): + if self._values['rate_limit'] is None: + return None + elif self._values['rate_limit'] == 'disabled': + return -1 + return int(self._values['rate_limit']) + + @property + def nat64_enabled(self): + if self._values['nat64_enabled'] is None: + return None + elif self._values['nat64_enabled'] == 'enabled': + return 'yes' + return 'no' + + @property + def enabled(self): + if self._values['enabled'] is None: + return 'no' + elif self._values['enabled'] is True: + return 'yes' + return 'no' + + @property + def translate_port(self): + if self._values['translate_port'] is None: + return None + elif self._values['translate_port'] == 'enabled': + return 'yes' + return 'no' + + @property + def translate_address(self): + if self._values['translate_address'] is None: + return None + elif self._values['translate_address'] == 'enabled': + return 'yes' + return 'no' + + @property + def persistence_profile(self): + """Return persistence profile in a consumable form + + I don't know why the persistence profile is stored this way, but below is the + general format of it. + + "persist": [ + { + "name": "msrdp", + "partition": "Common", + "tmDefault": "yes", + "nameReference": { + "link": "https://localhost/mgmt/tm/ltm/persistence/msrdp/~Common~msrdp?ver=13.1.0.4" + } + } + ], + + As you can see, this is quite different from something like the fallback + persistence profile which is just simply + + /Common/fallback1 + + This method makes the persistence profile look like the fallback profile. + + Returns: + string: The persistence profile configured on the virtual. + """ + if self._values['persistence_profile'] is None: + return None + profile = self._values['persistence_profile'][0] + result = fq_name(profile['partition'], profile['name']) + return result + + @property + def destination_tuple(self): + Destination = namedtuple('Destination', ['ip', 'port', 'route_domain']) + + # Remove the partition + if self._values['destination'] is None: + result = Destination(ip=None, port=None, route_domain=None) + return result + destination = re.sub(r'^/[a-zA-Z0-9_.-]+/', '', self._values['destination']) + + if is_valid_ip(destination): + result = Destination( + ip=destination, + port=None, + route_domain=None + ) + return result + + # Covers the following examples + # + # /Common/2700:bc00:1f10:101::6%2.80 + # 2700:bc00:1f10:101::6%2.80 + # 1.1.1.1%2:80 + # /Common/1.1.1.1%2:80 + # /Common/2700:bc00:1f10:101::6%2.any + # + pattern = r'(?P[^%]+)%(?P[0-9]+)[:.](?P[0-9]+|any)' + matches = re.search(pattern, destination) + if matches: + try: + port = int(matches.group('port')) + except ValueError: + # Can be a port of "any". This only happens with IPv6 + port = matches.group('port') + if port == 'any': + port = 0 + ip = matches.group('ip') + if not is_valid_ip(ip): + raise F5ModuleError( + "The provided destination is not a valid IP address" + ) + result = Destination( + ip=matches.group('ip'), + port=port, + route_domain=int(matches.group('route_domain')) + ) + return result + + pattern = r'(?P[^%]+)%(?P[0-9]+)' + matches = re.search(pattern, destination) + if matches: + ip = matches.group('ip') + if not is_valid_ip(ip): + raise F5ModuleError( + "The provided destination is not a valid IP address" + ) + result = Destination( + ip=matches.group('ip'), + port=None, + route_domain=int(matches.group('route_domain')) + ) + return result + + parts = destination.split('.') + if len(parts) == 4: + # IPv4 + ip, port = destination.split(':') + if not is_valid_ip(ip): + raise F5ModuleError( + "The provided destination is not a valid IP address" + ) + result = Destination( + ip=ip, + port=int(port), + route_domain=None + ) + return result + elif len(parts) == 2: + # IPv6 + ip, port = destination.split('.') + try: + port = int(port) + except ValueError: + # Can be a port of "any". This only happens with IPv6 + if port == 'any': + port = 0 + if not is_valid_ip(ip): + raise F5ModuleError( + "The provided destination is not a valid IP address" + ) + result = Destination( + ip=ip, + port=port, + route_domain=None + ) + return result + else: + result = Destination(ip=None, port=None, route_domain=None) + return result + + +class VirtualServersFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(VirtualServersFactManager, self).__init__(**kwargs) + self.want = VirtualServersParameters(client=self.client, params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(virtual_servers=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource + attrs['stats'] = self.read_stats_from_device(attrs['fullPath']) + params = VirtualServersParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/ltm/virtual?expandSubcollections=true".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + def read_stats_from_device(self, full_path): + uri = "https://{0}:{1}/mgmt/tm/ltm/virtual/{2}/stats".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=full_path) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + try: + return result['stats'] + except KeyError: + return {} + + +class VlansParameters(BaseParameters): + api_map = { + 'autoLasthop': 'auto_lasthop', + 'cmpHash': 'cmp_hash_algorithm', + 'failsafeAction': 'failsafe_action', + 'failsafe': 'failsafe_enabled', + 'failsafeTimeout': 'failsafe_timeout', + 'ifIndex': 'if_index', + 'learning': 'learning_mode', + 'interfacesReference': 'interfaces', + 'sourceChecking': 'source_check_enabled', + 'fullPath': 'full_path' + } + + returnables = [ + 'full_path', + 'name', + 'auto_lasthop', + 'cmp_hash_algorithm', + 'description', + 'failsafe_action', + 'failsafe_enabled', + 'failsafe_timeout', + 'if_index', + 'learning_mode', + 'interfaces', + 'mtu', + 'sflow_poll_interval', + 'sflow_poll_interval_global', + 'sflow_sampling_rate', + 'sflow_sampling_rate_global', + 'source_check_enabled', + 'true_mac_address', + 'tag', + ] + + @property + def interfaces(self): + if self._values['interfaces'] is None: + return None + if 'items' not in self._values['interfaces']: + return None + result = [] + for item in self._values['interfaces']['items']: + tmp = dict( + name=item['name'], + full_path=item['fullPath'] + ) + if 'tagged' in item: + tmp['tagged'] = 'yes' + else: + tmp['tagged'] = 'no' + result.append(tmp) + return result + + @property + def sflow_poll_interval(self): + return int(self._values['sflow']['pollInterval']) + + @property + def sflow_poll_interval_global(self): + return flatten_boolean(self._values['sflow']['pollIntervalGlobal']) + + @property + def sflow_sampling_rate(self): + return int(self._values['sflow']['samplingRate']) + + @property + def sflow_sampling_rate_global(self): + return flatten_boolean(self._values['sflow']['samplingRateGlobal']) + + @property + def source_check_state(self): + return flatten_boolean(self._values['source_check_state']) + + @property + def true_mac_address(self): + # Who made this field a "description"!? + return self._values['stats']['macTrue'] + + @property + def tag(self): + # We can't agree on field names...SMH + return self._values['stats']['id'] + + @property + def failsafe_enabled(self): + return flatten_boolean(self._values['failsafe_enabled']) + + +class VlansFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(VlansFactManager, self).__init__(**kwargs) + self.want = VlansParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(vlans=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource + attrs['stats'] = self.read_stats_from_device(attrs['fullPath']) + params = VlansParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/net/vlan?expandSubcollections=true".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + def read_stats_from_device(self, full_path): + uri = "https://{0}:{1}/mgmt/tm/net/vlan/{2}/stats".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=full_path) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + try: + return result['stats'] + except KeyError: + return {} + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + self.kwargs = kwargs + self.want = Parameters(params=self.module.params) + self.managers = { + 'asm-policy-stats': AsmPolicyStatsFactManager, + 'asm-policies': AsmPolicyFactManager, + 'asm-server-technologies': AsmServerTechnologyFactManager, + 'asm-signature-sets': AsmSignatureSetsFactManager, + 'client-ssl-profiles': ClientSslProfilesFactManager, + 'devices': DevicesFactManager, + 'device-groups': DeviceGroupsFactManager, + 'external-monitors': ExternalMonitorsFactManager, + 'fasthttp-profiles': FastHttpProfilesFactManager, + 'fastl4-profiles': FastL4ProfilesFactManager, + 'gateway-icmp-monitors': GatewayIcmpMonitorsFactManager, + 'gtm-a-pools': GtmAPoolsFactManager, + 'gtm-servers': GtmServersFactManager, + 'gtm-a-wide-ips': GtmAWideIpsFactManager, + 'gtm-aaaa-pools': GtmAaaaPoolsFactManager, + 'gtm-aaaa-wide-ips': GtmAaaaWideIpsFactManager, + 'gtm-cname-pools': GtmCnamePoolsFactManager, + 'gtm-cname-wide-ips': GtmCnameWideIpsFactManager, + 'gtm-mx-pools': GtmMxPoolsFactManager, + 'gtm-mx-wide-ips': GtmMxWideIpsFactManager, + 'gtm-naptr-pools': GtmNaptrPoolsFactManager, + 'gtm-naptr-wide-ips': GtmNaptrWideIpsFactManager, + 'gtm-srv-pools': GtmSrvPoolsFactManager, + 'gtm-srv-wide-ips': GtmSrvWideIpsFactManager, + 'http-monitors': HttpMonitorsFactManager, + 'https-monitors': HttpsMonitorsFactManager, + 'http-profiles': HttpProfilesFactManager, + 'iapp-services': IappServicesFactManager, + 'iapplx-packages': IapplxPackagesFactManager, + 'icmp-monitors': IcmpMonitorsFactManager, + 'interfaces': InterfacesFactManager, + 'internal-data-groups': InternalDataGroupsFactManager, + 'irules': IrulesFactManager, + 'ltm-pools': LtmPoolsFactManager, + 'ltm-policies': LtmPolicyFactManager, + 'nodes': NodesFactManager, + 'oneconnect-profiles': OneConnectProfilesFactManager, + 'partitions': PartitionFactManager, + 'provision-info': ProvisionInfoFactManager, + 'route-domains': RouteDomainFactManager, + 'self-ips': SelfIpsFactManager, + 'server-ssl-profiles': ServerSslProfilesFactManager, + 'software-volumes': SoftwareVolumesFactManager, + 'software-images': SoftwareImagesFactManager, + 'software-hotfixes': SoftwareHotfixesFactManager, + 'ssl-certs': SslCertificatesFactManager, + 'ssl-keys': SslKeysFactManager, + 'system-db': SystemDbFactManager, + 'system-info': SystemInfoFactManager, + 'tcp-monitors': TcpMonitorsFactManager, + 'tcp-half-open-monitors': TcpHalfOpenMonitorsFactManager, + 'tcp-profiles': TcpProfilesFactManager, + 'traffic-groups': TrafficGroupsFactManager, + 'trunks': TrunksFactManager, + 'udp-profiles': UdpProfilesFactManager, + 'users': UsersFactManager, + 'vcmp-guests': VcmpGuestsFactManager, + 'virtual-addresses': VirtualAddressesFactManager, + 'virtual-servers': VirtualServersFactManager, + 'vlans': VlansFactManager, + } + + def exec_module(self): + self.handle_all_keyword() + self.handle_profiles_keyword() + self.handle_monitors_keyword() + self.handle_gtm_pools_keyword() + self.handle_gtm_wide_ips_keyword() + res = self.check_valid_gather_subset(self.want.gather_subset) + if res: + invalid = ','.join(res) + raise F5ModuleError( + "The specified 'gather_subset' options are invalid: {0}".format(invalid) + ) + result = self.filter_excluded_facts() + + managers = [] + for name in result: + manager = self.get_manager(name) + if manager: + managers.append(manager) + + if not managers: + result = dict( + queried=False + ) + return result + + result = self.execute_managers(managers) + if result: + result['queried'] = True + else: + result['queried'] = False + return result + + def filter_excluded_facts(self): + # Remove the excluded entries from the list of possible facts + exclude = [x[1:] for x in self.want.gather_subset if x[0] == '!'] + include = [x for x in self.want.gather_subset if x[0] != '!'] + result = [x for x in include if x not in exclude] + return result + + def handle_all_keyword(self): + if 'all' not in self.want.gather_subset: + return + managers = list(self.managers.keys()) + self.want.gather_subset + managers.remove('all') + self.want.update({'gather_subset': managers}) + + def handle_profiles_keyword(self): + if 'profiles' not in self.want.gather_subset: + return + managers = [x for x in self.managers.keys() if '-profiles' in x] + self.want.gather_subset + managers.remove('profiles') + self.want.update({'gather_subset': managers}) + + def handle_monitors_keyword(self): + if 'monitors' not in self.want.gather_subset: + return + managers = [x for x in self.managers.keys() if '-monitors' in x] + self.want.gather_subset + managers.remove('monitors') + self.want.update({'gather_subset': managers}) + + def handle_gtm_pools_keyword(self): + if 'gtm-pools' not in self.want.gather_subset: + return + keys = self.managers.keys() + managers = [x for x in keys if x.startswith('gtm-') and x.endswith('-pools')] + managers += self.want.gather_subset + managers.remove('gtm-pools') + self.want.update({'gather_subset': managers}) + + def handle_gtm_wide_ips_keyword(self): + if 'gtm-wide-ips' not in self.want.gather_subset: + return + keys = self.managers.keys() + managers = [x for x in keys if x.startswith('gtm-') and x.endswith('-wide-ips')] + managers += self.want.gather_subset + managers.remove('gtm-wide-ips') + self.want.update({'gather_subset': managers}) + + def check_valid_gather_subset(self, includes): + """Check that the specified subset is valid + + The ``gather_subset`` parameter is specified as a "raw" field which means that + any Python type could technically be provided + + :param includes: + :return: + """ + keys = self.managers.keys() + result = [] + for x in includes: + if x not in keys: + if x[0] == '!': + if x[1:] not in keys: + result.append(x) + else: + result.append(x) + return result + + def execute_managers(self, managers): + results = dict() + client = F5RestClient(**self.module.params) + prov = modules_provisioned(client) + for manager in managers: + manager.provisioned_modules = prov + result = manager.exec_module() + results.update(result) + return results + + def get_manager(self, which): + result = {} + manager = self.managers.get(which, None) + if not manager: + return result + kwargs = dict() + kwargs.update(self.kwargs) + + kwargs['client'] = F5RestClient(**self.module.params) + result = manager(**kwargs) + return result + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = False + argument_spec = dict( + gather_subset=dict( + type='list', + required=True, + aliases=['include'], + choices=[ + # Meta choices + 'all', + 'monitors', + 'profiles', + 'gtm-pools', + 'gtm-wide-ips', + + # Non-meta choices + 'asm-policies', + 'asm-policy-stats', + 'asm-server-technologies', + 'asm-signature-sets', + 'client-ssl-profiles', + 'devices', + 'device-groups', + 'external-monitors', + 'fasthttp-profiles', + 'fastl4-profiles', + 'gateway-icmp-monitors', + 'gtm-a-pools', + 'gtm-servers', + 'gtm-a-wide-ips', + 'gtm-aaaa-pools', + 'gtm-aaaa-wide-ips', + 'gtm-cname-pools', + 'gtm-cname-wide-ips', + 'gtm-mx-pools', + 'gtm-mx-wide-ips', + 'gtm-naptr-pools', + 'gtm-naptr-wide-ips', + 'gtm-srv-pools', + 'gtm-srv-wide-ips', + 'http-profiles', + 'http-monitors', + 'https-monitors', + 'iapp-services', + 'iapplx-packages', + 'icmp-monitors', + 'interfaces', + 'internal-data-groups', + 'irules', + 'ltm-pools', + 'ltm-policies', + 'nodes', + 'oneconnect-profiles', + 'partitions', + 'provision-info', + 'self-ips', + 'server-ssl-profiles', + 'software-volumes', + 'software-images', + 'software-hotfixes', + 'ssl-certs', + 'ssl-keys', + 'system-db', + 'system-info', + 'tcp-monitors', + 'tcp-half-open-monitors', + 'tcp-profiles', + 'traffic-groups', + 'trunks', + 'udp-profiles', + 'users', + 'vcmp-guests', + 'virtual-addresses', + 'virtual-servers', + 'vlans', + + # Negations of meta choices + '!all', + "!monitors", + '!profiles', + '!gtm-pools', + '!gtm-wide-ips', + + # Negations of non-meta-choices + '!asm-policy-stats', + '!asm-policies', + '!asm-server-technologies', + '!asm-signature-sets', + '!client-ssl-profiles', + '!devices', + '!device-groups', + '!external-monitors', + '!fasthttp-profiles', + '!fastl4-profiles', + '!gateway-icmp-monitors', + '!gtm-a-pools', + '!gtm-servers', + '!gtm-a-wide-ips', + '!gtm-aaaa-pools', + '!gtm-aaaa-wide-ips', + '!gtm-cname-pools', + '!gtm-cname-wide-ips', + '!gtm-mx-pools', + '!gtm-mx-wide-ips', + '!gtm-naptr-pools', + '!gtm-naptr-wide-ips', + '!gtm-srv-pools', + '!gtm-srv-wide-ips', + '!http-profiles', + '!http-monitors', + '!https-monitors', + '!iapp-services', + '!iapplx-packages', + '!icmp-monitors', + '!interfaces', + '!internal-data-groups', + '!irules', + '!ltm-pools', + '!ltm-policies', + '!nodes', + '!oneconnect-profiles', + '!partitions', + '!provision-info', + '!self-ips', + '!server-ssl-profiles', + '!software-volumes', + '!software-images', + '!software-hotfixes', + '!ssl-certs', + '!ssl-keys', + '!system-db', + '!system-info', + '!tcp-monitors', + '!tcp-half-open-monitors', + '!tcp-profiles', + '!traffic-groups', + '!trunks', + '!udp-profiles', + '!users', + '!vcmp-guests', + '!virtual-addresses', + '!virtual-servers', + '!vlans', + ] + ), + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode + ) + if module._name == 'bigip_device_facts': + module.deprecate("The 'bigip_device_facts' module has been renamed to 'bigip_device_info'", version='2.13') + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_device_traffic_group.py b/plugins/modules/network/f5/bigip_device_traffic_group.py new file mode 100644 index 0000000000..95388c4e93 --- /dev/null +++ b/plugins/modules/network/f5/bigip_device_traffic_group.py @@ -0,0 +1,667 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_device_traffic_group +short_description: Manages traffic groups on BIG-IP +description: + - Supports managing traffic groups and their attributes on a BIG-IP. +options: + name: + description: + - The name of the traffic group. + type: str + required: True + mac_address: + description: + - Specifies the floating Media Access Control (MAC) address associated with the floating IP addresses + defined for a traffic group. + - Primarily, a MAC masquerade address minimizes ARP communications or dropped packets as a result of failover. + - A MAC masquerade address ensures that any traffic destined for a specific traffic group reaches an available + device after failover, which happens because along with the traffic group, the MAC masquerade address floats + to the available device. + - Without a MAC masquerade address, the sending host must learn the MAC address for a newly-active device, + either by sending an ARP request or by relying on the gratuitous ARP from the newly-active device. + - To unset the MAC address, specify an empty value (C("")) to this parameter. + type: str + ha_order: + description: + - Specifies order in which you would like to assign devices for failover. + - If you configure this setting, you must configure the setting on every traffic group in the device group. + - The values should be device names of the devices that belong to the failover group configured beforehand. + - The order in which the devices are placed as arguments to this parameter, determines their HA order + on the device, in other words changing the order of the same elements will cause a change on the unit. + - To disable an HA order failover method , specify an empty string value (C("")) to this parameter. + - Disabling HA order will revert the device back to using Load Aware method as it is the default, + unless C(ha_group) setting is also configured. + - Device names will be prepended by a partition by the module, so you can provide either the full path format + name C(/Common/bigip1) or just the name string C(bigip1). + type: list + ha_group: + description: + - Specifies a configured C(HA group) to be associated with the traffic group. + - Once you create an HA group on a device and associate the HA group with a traffic group, + you must create an HA group and associate it with that same traffic group on every device in the device group. + - To disable an HA group failover method , specify an empty string value (C("")) to this parameter. + - Disabling HA group will revert the device back to using C(Load Aware) method as it is the default, + unless C(ha_order) setting is also configured. + - The C(auto_failback) and C(auto_failback_time) are not compatible with C(ha_group). + type: str + ha_load_factor: + description: + - The value of the load the traffic-group presents the system relative to other traffic groups. + - This parameter only takes effect when C(Load Aware) failover method is in use. + - The correct value range is C(1 - 1000) inclusive. + type: int + auto_failback: + description: + - Specifies whether the traffic group fails back to the initial device specified in C(ha_order). + type: bool + auto_failback_time: + description: + - Specifies the number of seconds the system delays before failing back to the initial device + specified in C(ha_order). + - The correct value range is C(0 - 300) inclusive. + type: int + partition: + description: + - Device partition to manage resources on. + type: str + default: Common + state: + description: + - When C(present), ensures that the traffic group exists. + - When C(absent), ensures the traffic group is removed. + type: str + choices: + - present + - absent + default: present +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +author: + - Tim Rupp (@caphrim007) + - Wojciech Wypior (@wojtek0806) +''' + +EXAMPLES = r''' +- name: Create a traffic group + bigip_device_traffic_group: + name: foo1 + state: present + provider: + user: admin + password: secret + server: lb.mydomain.com + delegate_to: localhost + +- name: Create a traffic group with ha_group failover + bigip_device_traffic_group: + name: foo2 + state: present + ha_group: foo_HA_grp + provider: + user: admin + password: secret + server: lb.mydomain.com + delegate_to: localhost + +- name: Create a traffic group with ha_order failover + bigip_device_traffic_group: + name: foo3 + state: present + ha_order: + - /Common/bigip1.lab.local + - /Common/bigip2.lab.local + auto_failback: yes + auto_failback_time: 40 + provider: + user: admin + password: secret + server: lb.mydomain.com + delegate_to: localhost + +- name: Change traffic group ha_order to ha_group + bigip_device_traffic_group: + name: foo3 + state: present + ha_group: foo_HA_grp + ha_order: "" + auto_failback: no + provider: + user: admin + password: secret + server: lb.mydomain.com + delegate_to: localhost + +- name: Remove traffic group + bigip_device_traffic_group: + name: foo + state: absent + provider: + user: admin + password: secret + server: lb.mydomain.com + delegate_to: localhost +''' + +RETURN = r''' +mac_address: + description: The MAC masquerade address + returned: changed + type: str + sample: "02:01:d7:93:35:08" +ha_group: + description: The configured HA group associated with traffic group + returned: changed + type: str + sample: foo_HA_grp +ha_order: + description: Specifies the order in which the devices will failover + returned: changed + type: list + sample: ['/Common/bigip1', '/Common/bigip2'] +ha_load_factor: + description: The value of the load the traffic-group presents the system relative to other traffic groups + returned: changed + type: int + sample: 20 +auto_failback: + description: Specifies whether the traffic group fails back to the initial device specified in ha_order + returned: changed + type: bool + sample: yes +auto_failback_time: + description: Specifies the number of seconds the system delays before failing back + returned: changed + type: int + sample: 60 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + from library.module_utils.network.f5.bigip import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.common import fq_name + from library.module_utils.network.f5.common import transform_name + from library.module_utils.network.f5.common import flatten_boolean +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.bigip import F5RestClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import fq_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import transform_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import flatten_boolean + + +class Parameters(AnsibleF5Parameters): + api_map = { + 'mac': 'mac_address', + 'haGroup': 'ha_group', + 'haOrder': 'ha_order', + 'haLoadFactor': 'ha_load_factor', + 'autoFailbackTime': 'auto_failback_time', + 'autoFailbackEnabled': 'auto_failback', + } + + api_attributes = [ + 'mac', + 'haGroup', + 'haOrder', + 'haLoadFactor', + 'autoFailbackTime', + 'autoFailbackEnabled', + + ] + + returnables = [ + 'mac_address', + 'ha_group', + 'ha_order', + 'ha_load_factor', + 'auto_failback_time', + 'auto_failback', + ] + + updatables = [ + 'mac_address', + 'ha_group', + 'ha_order', + 'ha_load_factor', + 'auto_failback_time', + 'auto_failback', + ] + + +class ApiParameters(Parameters): + pass + + +class ModuleParameters(Parameters): + @property + def mac_address(self): + if self._values['mac_address'] is None: + return None + if self._values['mac_address'] == '': + return 'none' + return self._values['mac_address'] + + @property + def ha_group(self): + if self._values['ha_group'] is None: + return None + if self._values['ha_group'] == '': + return 'none' + if self.auto_failback == 'true': + raise F5ModuleError( + "The auto_failback cannot be enabled when ha_group is specified." + ) + return self._values['ha_group'] + + @property + def ha_load_factor(self): + if self._values['ha_load_factor'] is None: + return None + value = self._values['ha_load_factor'] + if value < 1 or value > 1000: + raise F5ModuleError( + "Invalid ha_load_factor value, correct range is 1 - 1000, specified value: {0}.".format(value)) + return value + + @property + def auto_failback_time(self): + if self._values['auto_failback_time'] is None: + return None + value = self._values['auto_failback_time'] + if value < 0 or value > 300: + raise F5ModuleError( + "Invalid auto_failback_time value, correct range is 0 - 300, specified value: {0}.".format(value)) + return value + + @property + def auto_failback(self): + result = flatten_boolean(self._values['auto_failback']) + if result == 'yes': + return 'true' + if result == 'no': + return 'false' + return None + + @property + def ha_order(self): + if self._values['ha_order'] is None: + return None + if len(self._values['ha_order']) == 1 and self._values['ha_order'][0] == '': + if self.auto_failback == 'true': + raise F5ModuleError( + 'Cannot enable auto failback when HA order list is empty, at least one device must be specified.' + ) + return 'none' + result = [fq_name(self.partition, value) for value in self._values['ha_order']] + return result + + +class Changes(Parameters): + def to_return(self): + result = {} + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + return result + + +class UsableChanges(Changes): + pass + + +class ReportableChanges(Changes): + + @property + def mac_address(self): + if self._values['mac_address'] is None: + return None + if self._values['mac_address'] == 'none': + return '' + return self._values['mac_address'] + + @property + def ha_group(self): + if self._values['ha_group'] is None: + return None + if self._values['ha_group'] == 'none': + return '' + return self._values['ha_group'] + + @property + def auto_failback(self): + result = self._values['auto_failback'] + if result == 'true': + return 'yes' + if result == 'false': + return 'no' + return None + + @property + def ha_order(self): + if self._values['ha_order'] is None: + return None + if self._values['ha_order'] == 'none': + return '' + return self._values['ha_order'] + + +class Difference(object): + def __init__(self, want, have=None): + self.want = want + self.have = have + + def compare(self, param): + try: + result = getattr(self, param) + return result + except AttributeError: + return self.__default(param) + + def __default(self, param): + attr1 = getattr(self.want, param) + try: + attr2 = getattr(self.have, param) + if attr1 != attr2: + return attr1 + except AttributeError: + return attr1 + + @property + def ha_group(self): + if self.want.ha_group is None: + return None + if self.have.ha_group is None and self.want.ha_group == 'none': + return None + if self.want.ha_group != self.have.ha_group: + if self.have.auto_failback == 'true' and self.want.auto_failback != 'false': + raise F5ModuleError( + "The auto_failback parameter on the device must disabled to use ha_group failover method." + ) + return self.want.ha_group + + @property + def ha_order(self): + # Device order is literally derived from the order in the array, + # hence lists with the same elements but in different order cannot be equal, so cmp_simple_list + # function will not work here. + if self.want.ha_order is None: + return None + if self.have.ha_order is None and self.want.ha_order == 'none': + return None + if self.want.ha_order != self.have.ha_order: + return self.want.ha_order + + @property + def partition(self): + raise F5ModuleError( + "Partition cannot be changed for a traffic group. Only /Common is allowed." + ) + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = F5RestClient(**self.module.params) + self.have = None + self.want = ModuleParameters(params=self.module.params) + self.changes = UsableChanges() + + def _set_changed_options(self): + changed = {} + for key in Parameters.returnables: + if getattr(self.want, key) is not None: + changed[key] = getattr(self.want, key) + if changed: + self.changes = UsableChanges(params=changed) + + def _update_changed_options(self): + diff = Difference(self.want, self.have) + updatables = Parameters.updatables + changed = dict() + for k in updatables: + change = diff.compare(k) + if change is None: + continue + else: + if isinstance(change, dict): + changed.update(change) + else: + changed[k] = change + if changed: + self.changes = UsableChanges(params=changed) + return True + return False + + def _announce_deprecations(self, result): + warnings = result.pop('__warnings', []) + for warning in warnings: + self.module.deprecate( + msg=warning['msg'], + version=warning['version'] + ) + + def exec_module(self): + changed = False + result = dict() + state = self.want.state + + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + + reportable = ReportableChanges(params=self.changes.to_return()) + changes = reportable.to_return() + result.update(**changes) + result.update(dict(changed=changed)) + self._announce_deprecations(result) + return result + + def present(self): + if self.exists(): + return self.update() + else: + return self.create() + + def absent(self): + if self.exists(): + return self.remove() + return False + + def should_update(self): + result = self._update_changed_options() + if result: + return True + return False + + def update(self): + self.have = self.read_current_from_device() + if not self.should_update(): + return False + if self.module.check_mode: + return True + self.update_on_device() + return True + + def remove(self): + if self.module.check_mode: + return True + self.remove_from_device() + if self.exists(): + raise F5ModuleError("Failed to delete the resource.") + return True + + def create(self): + self._set_changed_options() + if self.want.partition.lower().strip('/') != 'common': + raise F5ModuleError( + "Traffic groups can only be created in the /Common partition" + ) + if self.module.check_mode: + return True + self.create_on_device() + return True + + def exists(self): + uri = "https://{0}:{1}/mgmt/tm/cm/traffic-group/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError: + return False + if resp.status == 404 or 'code' in response and response['code'] == 404: + return False + return True + + def create_on_device(self): + params = self.changes.api_params() + params['name'] = self.want.name + params['partition'] = self.want.partition + uri = "https://{0}:{1}/mgmt/tm/cm/traffic-group/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.post(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return response['selfLink'] + + def update_on_device(self): + params = self.changes.api_params() + uri = "https://{0}:{1}/mgmt/tm/cm/traffic-group/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.patch(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def read_current_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/cm/traffic-group/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return ApiParameters(params=response) + + def remove_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/cm/traffic-group/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + response = self.client.api.delete(uri) + if response.status == 200: + return True + raise F5ModuleError(response.content) + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = True + argument_spec = dict( + name=dict(required=True), + mac_address=dict(), + ha_order=dict( + type='list' + ), + ha_group=dict(), + ha_load_factor=dict( + type='int' + ), + auto_failback=dict( + type='bool', + ), + auto_failback_time=dict( + type='int' + ), + state=dict( + default='present', + choices=['absent', 'present'] + ), + partition=dict( + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ), + + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode, + ) + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_facts.py b/plugins/modules/network/f5/bigip_facts.py new file mode 100644 index 0000000000..94eb93547e --- /dev/null +++ b/plugins/modules/network/f5/bigip_facts.py @@ -0,0 +1,1803 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 F5 Networks Inc. +# Copyright (c) 2013 Matt Hite +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_facts +short_description: Collect facts from F5 BIG-IP devices +description: + - Collect facts from F5 BIG-IP devices via iControl SOAP API +author: + - Matt Hite (@mhite) + - Tim Rupp (@caphrim007) +notes: + - Requires BIG-IP software version >= 11.4 + - F5 developed module 'bigsuds' required (see http://devcentral.f5.com) + - Best run as a local_action in your playbook + - Tested with manager and above account privilege level + - C(provision) facts were added in 2.2 + - This module is deprecated. Use the C(bigip_device_info) module instead. +deprecated: + removed_in: '2.11' + alternative: bigip_device_info + why: > + The bigip_facts module relies on SOAP to communicate with the BIG-IP, + and has a large amount of code that does not conform to existing F5 standards. + The M(bigip_device_info) module is easier to maintain and use. +requirements: + - bigsuds +options: + session: + description: + - BIG-IP session support; may be useful to avoid concurrency + issues in certain circumstances. + default: no + type: bool + include: + description: + - Fact category or list of categories to collect + required: True + choices: + - address_class + - certificate + - client_ssl_profile + - device + - device_group + - interface + - key + - node + - pool + - provision + - rule + - self_ip + - software + - system_info + - traffic_group + - trunk + - virtual_address + - virtual_server + - vlan + filter: + description: + - Shell-style glob matching string used to filter fact keys. Not + applicable for software, provision, and system_info fact categories. +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +''' + +EXAMPLES = r''' +- name: Collect BIG-IP facts + bigip_facts: + server: lb.mydomain.com + user: admin + password: secret + include: + - interface + - vlan + delegate_to: localhost +''' + +import fnmatch +import re +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types +from ansible.module_utils.six.moves import zip + +try: + from library.module_utils.network.f5.legacy import bigip_api, bigsuds_found + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.common import F5BaseClient +except ImportError: + from ansible_collections.community.general.plugins.module_utils.network.f5.legacy import bigip_api, bigsuds_found + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5BaseClient + +try: + from suds import MethodNotFound, WebFault +except ImportError: + pass # Handle via f5_utils.bigsuds_found + + +class F5(object): + """F5 iControl class. + + F5 BIG-IP iControl API class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, host, user, password, session=False, validate_certs=True, port=443): + self.api = bigip_api(host, user, password, validate_certs, port) + if session: + self.start_session() + + def start_session(self): + self.api = self.api.with_session_id() + + def get_api(self): + return self.api + + def set_recursive_query_state(self, state): + self.api.System.Session.set_recursive_query_state(state) + + def get_recursive_query_state(self): + return self.api.System.Session.get_recursive_query_state() + + def enable_recursive_query_state(self): + self.set_recursive_query_state('STATE_ENABLED') + + def disable_recursive_query_state(self): + self.set_recursive_query_state('STATE_DISABLED') + + def set_active_folder(self, folder): + self.api.System.Session.set_active_folder(folder=folder) + + def get_active_folder(self): + return self.api.System.Session.get_active_folder() + + +class Interfaces(object): + """Interfaces class. + + F5 BIG-IP interfaces class. + + Attributes: + api: iControl API instance. + interfaces: A list of BIG-IP interface names. + """ + + def __init__(self, api, regex=None): + self.api = api + self.interfaces = api.Networking.Interfaces.get_list() + if regex: + re_filter = re.compile(regex) + self.interfaces = filter(re_filter.search, self.interfaces) + + def get_list(self): + return self.interfaces + + def get_active_media(self): + return self.api.Networking.Interfaces.get_active_media(self.interfaces) + + def get_actual_flow_control(self): + return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces) + + def get_bundle_state(self): + return self.api.Networking.Interfaces.get_bundle_state(self.interfaces) + + def get_description(self): + return self.api.Networking.Interfaces.get_description(self.interfaces) + + def get_dual_media_state(self): + return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces) + + def get_enabled_state(self): + return self.api.Networking.Interfaces.get_enabled_state(self.interfaces) + + def get_if_index(self): + return self.api.Networking.Interfaces.get_if_index(self.interfaces) + + def get_learning_mode(self): + return self.api.Networking.Interfaces.get_learning_mode(self.interfaces) + + def get_lldp_admin_status(self): + return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces) + + def get_lldp_tlvmap(self): + return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces) + + def get_mac_address(self): + return self.api.Networking.Interfaces.get_mac_address(self.interfaces) + + def get_media(self): + return self.api.Networking.Interfaces.get_media(self.interfaces) + + def get_media_option(self): + return self.api.Networking.Interfaces.get_media_option(self.interfaces) + + def get_media_option_sfp(self): + return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces) + + def get_media_sfp(self): + return self.api.Networking.Interfaces.get_media_sfp(self.interfaces) + + def get_media_speed(self): + return self.api.Networking.Interfaces.get_media_speed(self.interfaces) + + def get_media_status(self): + return self.api.Networking.Interfaces.get_media_status(self.interfaces) + + def get_mtu(self): + return self.api.Networking.Interfaces.get_mtu(self.interfaces) + + def get_phy_master_slave_mode(self): + return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces) + + def get_prefer_sfp_state(self): + return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces) + + def get_flow_control(self): + return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces) + + def get_sflow_poll_interval(self): + return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces) + + def get_sflow_poll_interval_global(self): + return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces) + + def get_sfp_media_state(self): + return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces) + + def get_stp_active_edge_port_state(self): + return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces) + + def get_stp_enabled_state(self): + return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces) + + def get_stp_link_type(self): + return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces) + + def get_stp_protocol_detection_reset_state(self): + return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces) + + +class SelfIPs(object): + """Self IPs class. + + F5 BIG-IP Self IPs class. + + Attributes: + api: iControl API instance. + self_ips: List of self IPs. + """ + + def __init__(self, api, regex=None): + self.api = api + self.self_ips = api.Networking.SelfIPV2.get_list() + if regex: + re_filter = re.compile(regex) + self.self_ips = filter(re_filter.search, self.self_ips) + + def get_list(self): + return self.self_ips + + def get_address(self): + return self.api.Networking.SelfIPV2.get_address(self.self_ips) + + def get_allow_access_list(self): + return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips) + + def get_description(self): + return self.api.Networking.SelfIPV2.get_description(self.self_ips) + + def get_enforced_firewall_policy(self): + return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips) + + def get_floating_state(self): + return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips) + + def get_fw_rule(self): + return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips) + + def get_netmask(self): + return self.api.Networking.SelfIPV2.get_netmask(self.self_ips) + + def get_staged_firewall_policy(self): + return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips) + + def get_traffic_group(self): + return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips) + + def get_vlan(self): + return self.api.Networking.SelfIPV2.get_vlan(self.self_ips) + + def get_is_traffic_group_inherited(self): + return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips) + + +class Trunks(object): + """Trunks class. + + F5 BIG-IP trunks class. + + Attributes: + api: iControl API instance. + trunks: List of trunks. + """ + + def __init__(self, api, regex=None): + self.api = api + self.trunks = api.Networking.Trunk.get_list() + if regex: + re_filter = re.compile(regex) + self.trunks = filter(re_filter.search, self.trunks) + + def get_list(self): + return self.trunks + + def get_active_lacp_state(self): + return self.api.Networking.Trunk.get_active_lacp_state(self.trunks) + + def get_configured_member_count(self): + return self.api.Networking.Trunk.get_configured_member_count(self.trunks) + + def get_description(self): + return self.api.Networking.Trunk.get_description(self.trunks) + + def get_distribution_hash_option(self): + return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks) + + def get_interface(self): + return self.api.Networking.Trunk.get_interface(self.trunks) + + def get_lacp_enabled_state(self): + return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks) + + def get_lacp_timeout_option(self): + return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks) + + def get_link_selection_policy(self): + return self.api.Networking.Trunk.get_link_selection_policy(self.trunks) + + def get_media_speed(self): + return self.api.Networking.Trunk.get_media_speed(self.trunks) + + def get_media_status(self): + return self.api.Networking.Trunk.get_media_status(self.trunks) + + def get_operational_member_count(self): + return self.api.Networking.Trunk.get_operational_member_count(self.trunks) + + def get_stp_enabled_state(self): + return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks) + + def get_stp_protocol_detection_reset_state(self): + return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks) + + +class Vlans(object): + """Vlans class. + + F5 BIG-IP Vlans class. + + Attributes: + api: iControl API instance. + vlans: List of VLANs. + """ + + def __init__(self, api, regex=None): + self.api = api + self.vlans = api.Networking.VLAN.get_list() + if regex: + re_filter = re.compile(regex) + self.vlans = filter(re_filter.search, self.vlans) + + def get_list(self): + return self.vlans + + def get_auto_lasthop(self): + return self.api.Networking.VLAN.get_auto_lasthop(self.vlans) + + def get_cmp_hash_algorithm(self): + return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans) + + def get_description(self): + return self.api.Networking.VLAN.get_description(self.vlans) + + def get_dynamic_forwarding(self): + return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans) + + def get_failsafe_action(self): + return self.api.Networking.VLAN.get_failsafe_action(self.vlans) + + def get_failsafe_state(self): + return self.api.Networking.VLAN.get_failsafe_state(self.vlans) + + def get_failsafe_timeout(self): + return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans) + + def get_if_index(self): + return self.api.Networking.VLAN.get_if_index(self.vlans) + + def get_learning_mode(self): + return self.api.Networking.VLAN.get_learning_mode(self.vlans) + + def get_mac_masquerade_address(self): + return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans) + + def get_member(self): + return self.api.Networking.VLAN.get_member(self.vlans) + + def get_mtu(self): + return self.api.Networking.VLAN.get_mtu(self.vlans) + + def get_sflow_poll_interval(self): + return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans) + + def get_sflow_poll_interval_global(self): + return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans) + + def get_sflow_sampling_rate(self): + return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans) + + def get_sflow_sampling_rate_global(self): + return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans) + + def get_source_check_state(self): + return self.api.Networking.VLAN.get_source_check_state(self.vlans) + + def get_true_mac_address(self): + return self.api.Networking.VLAN.get_true_mac_address(self.vlans) + + def get_vlan_id(self): + return self.api.Networking.VLAN.get_vlan_id(self.vlans) + + +class Software(object): + """Software class. + + F5 BIG-IP software class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_all_software_status(self): + return self.api.System.SoftwareManagement.get_all_software_status() + + +class VirtualServers(object): + """Virtual servers class. + + F5 BIG-IP virtual servers class. + + Attributes: + api: iControl API instance. + virtual_servers: List of virtual servers. + """ + + def __init__(self, api, regex=None): + self.api = api + self.virtual_servers = api.LocalLB.VirtualServer.get_list() + if regex: + re_filter = re.compile(regex) + self.virtual_servers = filter(re_filter.search, self.virtual_servers) + + def get_list(self): + return self.virtual_servers + + def get_name(self): + return [x[x.rfind('/') + 1:] for x in self.virtual_servers] + + def get_actual_hardware_acceleration(self): + return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers) + + def get_authentication_profile(self): + return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers) + + def get_auto_lasthop(self): + return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers) + + def get_bw_controller_policy(self): + return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers) + + def get_clone_pool(self): + return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers) + + def get_cmp_enable_mode(self): + return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers) + + def get_connection_limit(self): + return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers) + + def get_connection_mirror_state(self): + return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers) + + def get_default_pool_name(self): + return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers) + + def get_description(self): + return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers) + + def get_destination(self): + return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers) + + def get_enabled_state(self): + return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers) + + def get_enforced_firewall_policy(self): + return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers) + + def get_fallback_persistence_profile(self): + return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers) + + def get_fw_rule(self): + return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers) + + def get_gtm_score(self): + return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers) + + def get_last_hop_pool(self): + return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers) + + def get_nat64_state(self): + return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers) + + def get_object_status(self): + return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers) + + def get_persistence_profile(self): + return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers) + + def get_profile(self): + return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers) + + def get_protocol(self): + return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers) + + def get_rate_class(self): + return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers) + + def get_rate_limit(self): + return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers) + + def get_rate_limit_destination_mask(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers) + + def get_rate_limit_mode(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers) + + def get_rate_limit_source_mask(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers) + + def get_related_rule(self): + return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers) + + def get_rule(self): + return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers) + + def get_security_log_profile(self): + return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers) + + def get_snat_pool(self): + return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers) + + def get_snat_type(self): + return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers) + + def get_source_address(self): + return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers) + + def get_source_address_translation_lsn_pool(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers) + + def get_source_address_translation_snat_pool(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers) + + def get_source_address_translation_type(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers) + + def get_source_port_behavior(self): + return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers) + + def get_staged_firewall_policy(self): + return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers) + + def get_translate_address_state(self): + return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers) + + def get_translate_port_state(self): + return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers) + + def get_type(self): + return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers) + + def get_vlan(self): + return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers) + + def get_wildmask(self): + return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers) + + +class Pools(object): + """Pools class. + + F5 BIG-IP pools class. + + Attributes: + api: iControl API instance. + pool_names: List of pool names. + """ + + def __init__(self, api, regex=None): + self.api = api + self.pool_names = api.LocalLB.Pool.get_list() + if regex: + re_filter = re.compile(regex) + self.pool_names = filter(re_filter.search, self.pool_names) + + def get_list(self): + return self.pool_names + + def get_name(self): + return [x[x.rfind('/') + 1:] for x in self.pool_names] + + def get_action_on_service_down(self): + return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names) + + def get_active_member_count(self): + return self.api.LocalLB.Pool.get_active_member_count(self.pool_names) + + def get_aggregate_dynamic_ratio(self): + return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names) + + def get_allow_nat_state(self): + return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names) + + def get_allow_snat_state(self): + return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names) + + def get_client_ip_tos(self): + return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names) + + def get_client_link_qos(self): + return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names) + + def get_description(self): + return self.api.LocalLB.Pool.get_description(self.pool_names) + + def get_gateway_failsafe_device(self): + return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names) + + def get_ignore_persisted_weight_state(self): + return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names) + + def get_lb_method(self): + result = [] + lb_choice = dict( + LB_METHOD_DYNAMIC_RATIO_MEMBER='dynamic-ratio-member', + LB_METHOD_DYNAMIC_RATIO='dynamic-ratio-node', + LB_METHOD_FASTEST_APP_RESPONSE='fastest-app-response', + LB_METHOD_FASTEST_NODE_ADDRESS='fastest-node', + LB_METHOD_LEAST_CONNECTION_MEMBER='least-connections-member', + LB_METHOD_LEAST_CONNECTION_NODE_ADDRESS='least-connections-node', + LB_METHOD_LEAST_SESSIONS='least-sessions', + LB_METHOD_OBSERVED_MEMBER='observed-member', + LB_METHOD_OBSERVED_NODE_ADDRESS='observed-node', + LB_METHOD_PREDICTIVE_MEMBER='predictive-member', + LB_METHOD_PREDICTIVE_NODE_ADDRESS='predictive-node', + LB_METHOD_RATIO_LEAST_CONNECTION_MEMBER='ratio-least-connections-member', + LB_METHOD_RATIO_LEAST_CONNECTION_NODE_ADDRESS='ratio-least-connections-node', + LB_METHOD_RATIO_MEMBER='ratio-member', + LB_METHOD_RATIO_NODE_ADDRESS='ratio-node', + LB_METHOD_RATIO_SESSION='ratio-session', + LB_METHOD_ROUND_ROBIN='round-robin', + LB_METHOD_WEIGHTED_LEAST_CONNECTION_MEMBER='weighted-least-connections-member', + LB_METHOD_WEIGHTED_LEAST_CONNECTION_NODE_ADDRESS='weighted-least-connections-node' + ) + methods = self.api.LocalLB.Pool.get_lb_method(self.pool_names) + for method in methods: + result.append(lb_choice.get(method, method)) + return result + + def get_member(self): + return self.api.LocalLB.Pool.get_member_v2(self.pool_names) + + def get_minimum_active_member(self): + return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names) + + def get_minimum_up_member(self): + return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names) + + def get_minimum_up_member_action(self): + return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names) + + def get_minimum_up_member_enabled_state(self): + return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names) + + def get_monitor_association(self): + return self.api.LocalLB.Pool.get_monitor_association(self.pool_names) + + def get_monitor_instance(self): + return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names) + + def get_object_status(self): + return self.api.LocalLB.Pool.get_object_status(self.pool_names) + + def get_profile(self): + return self.api.LocalLB.Pool.get_profile(self.pool_names) + + def get_queue_depth_limit(self): + return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names) + + def get_queue_on_connection_limit_state(self): + return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names) + + def get_queue_time_limit(self): + return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names) + + def get_reselect_tries(self): + return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names) + + def get_server_ip_tos(self): + return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names) + + def get_server_link_qos(self): + return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names) + + def get_simple_timeout(self): + return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names) + + def get_slow_ramp_time(self): + return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names) + + +class Devices(object): + """Devices class. + + F5 BIG-IP devices class. + + Attributes: + api: iControl API instance. + devices: List of devices. + """ + + def __init__(self, api, regex=None): + self.api = api + self.devices = api.Management.Device.get_list() + if regex: + re_filter = re.compile(regex) + self.devices = filter(re_filter.search, self.devices) + + def get_list(self): + return self.devices + + def get_active_modules(self): + return self.api.Management.Device.get_active_modules(self.devices) + + def get_base_mac_address(self): + return self.api.Management.Device.get_base_mac_address(self.devices) + + def get_blade_addresses(self): + return self.api.Management.Device.get_blade_addresses(self.devices) + + def get_build(self): + return self.api.Management.Device.get_build(self.devices) + + def get_chassis_id(self): + return self.api.Management.Device.get_chassis_id(self.devices) + + def get_chassis_type(self): + return self.api.Management.Device.get_chassis_type(self.devices) + + def get_comment(self): + return self.api.Management.Device.get_comment(self.devices) + + def get_configsync_address(self): + return self.api.Management.Device.get_configsync_address(self.devices) + + def get_contact(self): + return self.api.Management.Device.get_contact(self.devices) + + def get_description(self): + return self.api.Management.Device.get_description(self.devices) + + def get_edition(self): + return self.api.Management.Device.get_edition(self.devices) + + def get_failover_state(self): + return self.api.Management.Device.get_failover_state(self.devices) + + def get_local_device(self): + return self.api.Management.Device.get_local_device() + + def get_hostname(self): + return self.api.Management.Device.get_hostname(self.devices) + + def get_inactive_modules(self): + return self.api.Management.Device.get_inactive_modules(self.devices) + + def get_location(self): + return self.api.Management.Device.get_location(self.devices) + + def get_management_address(self): + return self.api.Management.Device.get_management_address(self.devices) + + def get_marketing_name(self): + return self.api.Management.Device.get_marketing_name(self.devices) + + def get_multicast_address(self): + return self.api.Management.Device.get_multicast_address(self.devices) + + def get_optional_modules(self): + return self.api.Management.Device.get_optional_modules(self.devices) + + def get_platform_id(self): + return self.api.Management.Device.get_platform_id(self.devices) + + def get_primary_mirror_address(self): + return self.api.Management.Device.get_primary_mirror_address(self.devices) + + def get_product(self): + return self.api.Management.Device.get_product(self.devices) + + def get_secondary_mirror_address(self): + return self.api.Management.Device.get_secondary_mirror_address(self.devices) + + def get_software_version(self): + return self.api.Management.Device.get_software_version(self.devices) + + def get_timelimited_modules(self): + return self.api.Management.Device.get_timelimited_modules(self.devices) + + def get_timezone(self): + return self.api.Management.Device.get_timezone(self.devices) + + def get_unicast_addresses(self): + return self.api.Management.Device.get_unicast_addresses(self.devices) + + +class DeviceGroups(object): + """Device groups class. + + F5 BIG-IP device groups class. + + Attributes: + api: iControl API instance. + device_groups: List of device groups. + """ + + def __init__(self, api, regex=None): + self.api = api + self.device_groups = api.Management.DeviceGroup.get_list() + if regex: + re_filter = re.compile(regex) + self.device_groups = filter(re_filter.search, self.device_groups) + + def get_list(self): + return self.device_groups + + def get_all_preferred_active(self): + return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups) + + def get_autosync_enabled_state(self): + return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups) + + def get_description(self): + return self.api.Management.DeviceGroup.get_description(self.device_groups) + + def get_device(self): + return self.api.Management.DeviceGroup.get_device(self.device_groups) + + def get_full_load_on_sync_state(self): + return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups) + + def get_incremental_config_sync_size_maximum(self): + return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups) + + def get_network_failover_enabled_state(self): + return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups) + + def get_sync_status(self): + return self.api.Management.DeviceGroup.get_sync_status(self.device_groups) + + def get_type(self): + return self.api.Management.DeviceGroup.get_type(self.device_groups) + + +class TrafficGroups(object): + """Traffic groups class. + + F5 BIG-IP traffic groups class. + + Attributes: + api: iControl API instance. + traffic_groups: List of traffic groups. + """ + + def __init__(self, api, regex=None): + self.api = api + self.traffic_groups = api.Management.TrafficGroup.get_list() + if regex: + re_filter = re.compile(regex) + self.traffic_groups = filter(re_filter.search, self.traffic_groups) + + def get_list(self): + return self.traffic_groups + + def get_auto_failback_enabled_state(self): + return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups) + + def get_auto_failback_time(self): + return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups) + + def get_default_device(self): + return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups) + + def get_description(self): + return self.api.Management.TrafficGroup.get_description(self.traffic_groups) + + def get_ha_load_factor(self): + return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups) + + def get_ha_order(self): + return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups) + + def get_is_floating(self): + return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups) + + def get_mac_masquerade_address(self): + return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups) + + def get_unit_id(self): + return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups) + + +class Rules(object): + """Rules class. + + F5 BIG-IP iRules class. + + Attributes: + api: iControl API instance. + rules: List of iRules. + """ + + def __init__(self, api, regex=None): + self.api = api + self.rules = api.LocalLB.Rule.get_list() + if regex: + re_filter = re.compile(regex) + self.traffic_groups = filter(re_filter.search, self.rules) + + def get_list(self): + return self.rules + + def get_description(self): + return self.api.LocalLB.Rule.get_description(rule_names=self.rules) + + def get_ignore_vertification(self): + return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules) + + def get_verification_status(self): + return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules) + + def get_definition(self): + return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)] + + +class Nodes(object): + """Nodes class. + + F5 BIG-IP nodes class. + + Attributes: + api: iControl API instance. + nodes: List of nodes. + """ + + def __init__(self, api, regex=None): + self.api = api + self.nodes = api.LocalLB.NodeAddressV2.get_list() + if regex: + re_filter = re.compile(regex) + self.nodes = filter(re_filter.search, self.nodes) + + def get_list(self): + return self.nodes + + def get_address(self): + return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes) + + def get_name(self): + return [x[x.rfind('/') + 1:] for x in self.nodes] + + def get_connection_limit(self): + return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes) + + def get_description(self): + return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes) + + def get_dynamic_ratio(self): + return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes) + + def get_monitor_instance(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes) + + def get_monitor_rule(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes) + + def get_monitor_status(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes) + + def get_object_status(self): + return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes) + + def get_rate_limit(self): + return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes) + + def get_ratio(self): + return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes) + + def get_session_status(self): + return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes) + + +class VirtualAddresses(object): + """Virtual addresses class. + + F5 BIG-IP virtual addresses class. + + Attributes: + api: iControl API instance. + virtual_addresses: List of virtual addresses. + """ + + def __init__(self, api, regex=None): + self.api = api + self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list() + if regex: + re_filter = re.compile(regex) + self.virtual_addresses = filter(re_filter.search, self.virtual_addresses) + + def get_list(self): + return self.virtual_addresses + + def get_address(self): + return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses) + + def get_arp_state(self): + return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses) + + def get_auto_delete_state(self): + return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses) + + def get_connection_limit(self): + return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses) + + def get_description(self): + return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses) + + def get_enabled_state(self): + return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses) + + def get_icmp_echo_state(self): + return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses) + + def get_is_floating_state(self): + return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses) + + def get_netmask(self): + return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses) + + def get_object_status(self): + return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses) + + def get_route_advertisement_state(self): + return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses) + + def get_traffic_group(self): + return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses) + + +class AddressClasses(object): + """Address group/class class. + + F5 BIG-IP address group/class class. + + In TMUI these things are known as Address Data Groups. Examples that ship with the + box include /Common/aol and /Common/private_net + + Attributes: + api: iControl API instance. + address_classes: List of address classes. + """ + + def __init__(self, api, regex=None): + self.api = api + self.address_classes = api.LocalLB.Class.get_address_class_list() + if regex: + re_filter = re.compile(regex) + self.address_classes = filter(re_filter.search, self.address_classes) + + def get_list(self): + return self.address_classes + + def get_address_class(self): + key = self.api.LocalLB.Class.get_address_class(self.address_classes) + value = self.api.LocalLB.Class.get_address_class_member_data_value(key) + + result = [] + for idx, v in enumerate(key): + for idx2, member in enumerate(v['members']): + dg_value = dict( + value=value[idx][idx2] + ) + dg_value.update(member) + result.append(dg_value) + return result + + def get_description(self): + return self.api.LocalLB.Class.get_description(self.address_classes) + + +class Certificates(object): + """Certificates class. + + F5 BIG-IP certificates class. + + Attributes: + api: iControl API instance. + certificates: List of certificate identifiers. + certificate_list: List of certificate information structures. + """ + + def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): + self.api = api + self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode) + self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list] + if regex: + re_filter = re.compile(regex) + self.certificates = filter(re_filter.search, self.certificates) + self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates] + + def get_list(self): + return self.certificates + + def get_certificate_list(self): + return self.certificate_list + + +class Keys(object): + """Keys class. + + F5 BIG-IP keys class. + + Attributes: + api: iControl API instance. + keys: List of key identifiers. + key_list: List of key information structures. + """ + + def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): + self.api = api + self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode) + self.keys = [x['key_info']['id'] for x in self.key_list] + if regex: + re_filter = re.compile(regex) + self.keys = filter(re_filter.search, self.keys) + self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys] + + def get_list(self): + return self.keys + + def get_key_list(self): + return self.key_list + + +class ProfileClientSSL(object): + """Client SSL profiles class. + + F5 BIG-IP client SSL profiles class. + + Attributes: + api: iControl API instance. + profiles: List of client SSL profiles. + """ + + def __init__(self, api, regex=None): + self.api = api + self.profiles = api.LocalLB.ProfileClientSSL.get_list() + if regex: + re_filter = re.compile(regex) + self.profiles = filter(re_filter.search, self.profiles) + + def get_list(self): + return self.profiles + + def get_alert_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles) + + def get_allow_nonssl_state(self): + return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles) + + def get_authenticate_depth(self): + return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles) + + def get_authenticate_once_state(self): + return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles) + + def get_ca_file(self): + return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles) + + def get_cache_size(self): + return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles) + + def get_cache_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles) + + def get_certificate_file(self): + return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles) + + def get_chain_file(self): + return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles) + + def get_cipher_list(self): + return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles) + + def get_client_certificate_ca_file(self): + return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles) + + def get_crl_file(self): + return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles) + + def get_default_profile(self): + return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles) + + def get_description(self): + return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles) + + def get_forward_proxy_ca_certificate_file(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles) + + def get_forward_proxy_ca_key_file(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles) + + def get_forward_proxy_ca_passphrase(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles) + + def get_forward_proxy_certificate_extension_include(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles) + + def get_forward_proxy_certificate_lifespan(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles) + + def get_forward_proxy_enabled_state(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles) + + def get_forward_proxy_lookup_by_ipaddr_port_state(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles) + + def get_handshake_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles) + + def get_key_file(self): + return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles) + + def get_modssl_emulation_state(self): + return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles) + + def get_passphrase(self): + return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles) + + def get_peer_certification_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles) + + def get_profile_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles) + + def get_renegotiation_maximum_record_delay(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles) + + def get_renegotiation_period(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles) + + def get_renegotiation_state(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles) + + def get_renegotiation_throughput(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles) + + def get_retain_certificate_state(self): + return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles) + + def get_secure_renegotiation_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles) + + def get_server_name(self): + return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles) + + def get_session_ticket_state(self): + return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles) + + def get_sni_default_state(self): + return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles) + + def get_sni_require_state(self): + return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles) + + def get_ssl_option(self): + return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles) + + def get_strict_resume_state(self): + return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles) + + def get_unclean_shutdown_state(self): + return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles) + + def get_is_base_profile(self): + return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles) + + def get_is_system_profile(self): + return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles) + + +class SystemInfo(object): + """System information class. + + F5 BIG-IP system information class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_base_mac_address(self): + return self.api.System.SystemInfo.get_base_mac_address() + + def get_blade_temperature(self): + return self.api.System.SystemInfo.get_blade_temperature() + + def get_chassis_slot_information(self): + return self.api.System.SystemInfo.get_chassis_slot_information() + + def get_globally_unique_identifier(self): + return self.api.System.SystemInfo.get_globally_unique_identifier() + + def get_group_id(self): + return self.api.System.SystemInfo.get_group_id() + + def get_hardware_information(self): + return self.api.System.SystemInfo.get_hardware_information() + + def get_marketing_name(self): + return self.api.System.SystemInfo.get_marketing_name() + + def get_product_information(self): + return self.api.System.SystemInfo.get_product_information() + + def get_pva_version(self): + return self.api.System.SystemInfo.get_pva_version() + + def get_system_id(self): + return self.api.System.SystemInfo.get_system_id() + + def get_system_information(self): + return self.api.System.SystemInfo.get_system_information() + + def get_time(self): + return self.api.System.SystemInfo.get_time() + + def get_time_zone(self): + return self.api.System.SystemInfo.get_time_zone() + + def get_uptime(self): + return self.api.System.SystemInfo.get_uptime() + + +class ProvisionInfo(object): + """Provision information class. + + F5 BIG-IP provision information class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_list(self): + result = [] + list = self.api.Management.Provision.get_list() + for item in list: + item = item.lower().replace('tmos_module_', '') + result.append(item) + return result + + def get_provisioned_list(self): + result = [] + list = self.api.Management.Provision.get_provisioned_list() + for item in list: + item = item.lower().replace('tmos_module_', '') + result.append(item) + return result + + +def generate_dict(api_obj, fields): + result_dict = {} + lists = [] + supported_fields = [] + if api_obj.get_list(): + for field in fields: + try: + api_response = getattr(api_obj, "get_" + field)() + except (MethodNotFound, WebFault): + pass + else: + lists.append(api_response) + supported_fields.append(field) + for i, j in enumerate(api_obj.get_list()): + temp = {} + temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)]) + result_dict[j] = temp + return result_dict + + +def generate_simple_dict(api_obj, fields): + result_dict = {} + for field in fields: + try: + api_response = getattr(api_obj, "get_" + field)() + except (MethodNotFound, WebFault): + pass + else: + result_dict[field] = api_response + return result_dict + + +def generate_interface_dict(f5, regex): + interfaces = Interfaces(f5.get_api(), regex) + fields = ['active_media', 'actual_flow_control', 'bundle_state', + 'description', 'dual_media_state', 'enabled_state', 'if_index', + 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap', + 'mac_address', 'media', 'media_option', 'media_option_sfp', + 'media_sfp', 'media_speed', 'media_status', 'mtu', + 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control', + 'sflow_poll_interval', 'sflow_poll_interval_global', + 'sfp_media_state', 'stp_active_edge_port_state', + 'stp_enabled_state', 'stp_link_type', + 'stp_protocol_detection_reset_state'] + return generate_dict(interfaces, fields) + + +def generate_self_ip_dict(f5, regex): + self_ips = SelfIPs(f5.get_api(), regex) + fields = ['address', 'allow_access_list', 'description', + 'enforced_firewall_policy', 'floating_state', 'fw_rule', + 'netmask', 'staged_firewall_policy', 'traffic_group', + 'vlan', 'is_traffic_group_inherited'] + return generate_dict(self_ips, fields) + + +def generate_trunk_dict(f5, regex): + trunks = Trunks(f5.get_api(), regex) + fields = ['active_lacp_state', 'configured_member_count', 'description', + 'distribution_hash_option', 'interface', 'lacp_enabled_state', + 'lacp_timeout_option', 'link_selection_policy', 'media_speed', + 'media_status', 'operational_member_count', 'stp_enabled_state', + 'stp_protocol_detection_reset_state'] + return generate_dict(trunks, fields) + + +def generate_vlan_dict(f5, regex): + vlans = Vlans(f5.get_api(), regex) + fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description', + 'dynamic_forwarding', 'failsafe_action', 'failsafe_state', + 'failsafe_timeout', 'if_index', 'learning_mode', + 'mac_masquerade_address', 'member', 'mtu', + 'sflow_poll_interval', 'sflow_poll_interval_global', + 'sflow_sampling_rate', 'sflow_sampling_rate_global', + 'source_check_state', 'true_mac_address', 'vlan_id'] + return generate_dict(vlans, fields) + + +def generate_vs_dict(f5, regex): + virtual_servers = VirtualServers(f5.get_api(), regex) + fields = ['actual_hardware_acceleration', 'authentication_profile', + 'auto_lasthop', 'bw_controller_policy', 'clone_pool', + 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state', + 'default_pool_name', 'description', 'destination', + 'enabled_state', 'enforced_firewall_policy', + 'fallback_persistence_profile', 'fw_rule', 'gtm_score', + 'last_hop_pool', 'nat64_state', 'object_status', + 'persistence_profile', 'profile', 'protocol', + 'rate_class', 'rate_limit', 'rate_limit_destination_mask', + 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule', + 'rule', 'security_log_profile', 'snat_pool', 'snat_type', + 'source_address', 'source_address_translation_lsn_pool', + 'source_address_translation_snat_pool', + 'source_address_translation_type', 'source_port_behavior', + 'staged_firewall_policy', 'translate_address_state', + 'translate_port_state', 'type', 'vlan', 'wildmask', + 'name'] + return generate_dict(virtual_servers, fields) + + +def generate_pool_dict(f5, regex): + pools = Pools(f5.get_api(), regex) + fields = ['action_on_service_down', 'active_member_count', + 'aggregate_dynamic_ratio', 'allow_nat_state', + 'allow_snat_state', 'client_ip_tos', 'client_link_qos', + 'description', 'gateway_failsafe_device', + 'ignore_persisted_weight_state', 'lb_method', 'member', + 'minimum_active_member', 'minimum_up_member', + 'minimum_up_member_action', 'minimum_up_member_enabled_state', + 'monitor_association', 'monitor_instance', 'object_status', + 'profile', 'queue_depth_limit', + 'queue_on_connection_limit_state', 'queue_time_limit', + 'reselect_tries', 'server_ip_tos', 'server_link_qos', + 'simple_timeout', 'slow_ramp_time', 'name'] + return generate_dict(pools, fields) + + +def generate_device_dict(f5, regex): + devices = Devices(f5.get_api(), regex) + fields = ['active_modules', 'base_mac_address', 'blade_addresses', + 'build', 'chassis_id', 'chassis_type', 'comment', + 'configsync_address', 'contact', 'description', 'edition', + 'failover_state', 'hostname', 'inactive_modules', 'location', + 'management_address', 'marketing_name', 'multicast_address', + 'optional_modules', 'platform_id', 'primary_mirror_address', + 'product', 'secondary_mirror_address', 'software_version', + 'timelimited_modules', 'timezone', 'unicast_addresses'] + return generate_dict(devices, fields) + + +def generate_device_group_dict(f5, regex): + device_groups = DeviceGroups(f5.get_api(), regex) + fields = ['all_preferred_active', 'autosync_enabled_state', 'description', + 'device', 'full_load_on_sync_state', + 'incremental_config_sync_size_maximum', + 'network_failover_enabled_state', 'sync_status', 'type'] + return generate_dict(device_groups, fields) + + +def generate_traffic_group_dict(f5, regex): + traffic_groups = TrafficGroups(f5.get_api(), regex) + fields = ['auto_failback_enabled_state', 'auto_failback_time', + 'default_device', 'description', 'ha_load_factor', + 'ha_order', 'is_floating', 'mac_masquerade_address', + 'unit_id'] + return generate_dict(traffic_groups, fields) + + +def generate_rule_dict(f5, regex): + rules = Rules(f5.get_api(), regex) + fields = ['definition', 'description', 'ignore_vertification', + 'verification_status'] + return generate_dict(rules, fields) + + +def generate_node_dict(f5, regex): + nodes = Nodes(f5.get_api(), regex) + fields = ['name', 'address', 'connection_limit', 'description', 'dynamic_ratio', + 'monitor_instance', 'monitor_rule', 'monitor_status', + 'object_status', 'rate_limit', 'ratio', 'session_status'] + return generate_dict(nodes, fields) + + +def generate_virtual_address_dict(f5, regex): + virtual_addresses = VirtualAddresses(f5.get_api(), regex) + fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit', + 'description', 'enabled_state', 'icmp_echo_state', + 'is_floating_state', 'netmask', 'object_status', + 'route_advertisement_state', 'traffic_group'] + return generate_dict(virtual_addresses, fields) + + +def generate_address_class_dict(f5, regex): + address_classes = AddressClasses(f5.get_api(), regex) + fields = ['address_class', 'description'] + return generate_dict(address_classes, fields) + + +def generate_certificate_dict(f5, regex): + certificates = Certificates(f5.get_api(), regex) + return dict(zip(certificates.get_list(), certificates.get_certificate_list())) + + +def generate_key_dict(f5, regex): + keys = Keys(f5.get_api(), regex) + return dict(zip(keys.get_list(), keys.get_key_list())) + + +def generate_client_ssl_profile_dict(f5, regex): + profiles = ProfileClientSSL(f5.get_api(), regex) + fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth', + 'authenticate_once_state', 'ca_file', 'cache_size', + 'cache_timeout', 'certificate_file', 'chain_file', + 'cipher_list', 'client_certificate_ca_file', 'crl_file', + 'default_profile', 'description', + 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file', + 'forward_proxy_ca_passphrase', + 'forward_proxy_certificate_extension_include', + 'forward_proxy_certificate_lifespan', + 'forward_proxy_enabled_state', + 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout', + 'key_file', 'modssl_emulation_state', 'passphrase', + 'peer_certification_mode', 'profile_mode', + 'renegotiation_maximum_record_delay', 'renegotiation_period', + 'renegotiation_state', 'renegotiation_throughput', + 'retain_certificate_state', 'secure_renegotiation_mode', + 'server_name', 'session_ticket_state', 'sni_default_state', + 'sni_require_state', 'ssl_option', 'strict_resume_state', + 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile'] + return generate_dict(profiles, fields) + + +def generate_system_info_dict(f5): + system_info = SystemInfo(f5.get_api()) + fields = ['base_mac_address', + 'blade_temperature', 'chassis_slot_information', + 'globally_unique_identifier', 'group_id', + 'hardware_information', + 'marketing_name', + 'product_information', 'pva_version', 'system_id', + 'system_information', 'time', + 'time_zone', 'uptime'] + return generate_simple_dict(system_info, fields) + + +def generate_software_list(f5): + software = Software(f5.get_api()) + software_list = software.get_all_software_status() + return software_list + + +def generate_provision_dict(f5): + provisioned = ProvisionInfo(f5.get_api()) + fields = ['list', 'provisioned_list'] + return generate_simple_dict(provisioned, fields) + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = False + argument_spec = dict( + session=dict(type='bool', default='no'), + include=dict( + type='raw', + required=True, + choices=[ + 'address_class', 'certificate', 'client_ssl_profile', 'device', + 'device_group', 'interface', 'key', 'node', 'pool', 'provision', + 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', + 'trunk', 'virtual_address', 'virtual_server', 'vlan' + ] + ), + filter=dict(type='str'), + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec + ) + client = F5BaseClient(**module.params) + provider = client.merge_provider_params() + + if not bigsuds_found: + module.fail_json(msg="the python suds and bigsuds modules are required") + + server = provider['server'] + server_port = provider['server_port'] + user = provider['user'] + password = provider['password'] + validate_certs = provider['validate_certs'] + + session = module.params['session'] + fact_filter = module.params['filter'] + + if validate_certs: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json( + msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task' + ) + + if fact_filter: + regex = fnmatch.translate(fact_filter) + else: + regex = None + if isinstance(module.params['include'], string_types): + includes = module.params['include'].split(',') + else: + includes = module.params['include'] + include = [x.lower() for x in includes] + valid_includes = ('address_class', 'certificate', 'client_ssl_profile', + 'device', 'device_group', 'interface', 'key', 'node', + 'pool', 'provision', 'rule', 'self_ip', 'software', + 'system_info', 'traffic_group', 'trunk', + 'virtual_address', 'virtual_server', 'vlan') + include_test = (x in valid_includes for x in include) + if not all(include_test): + module.fail_json(msg="Value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) + + try: + facts = {} + + if len(include) > 0: + f5 = F5(server, user, password, session, validate_certs, server_port) + saved_active_folder = f5.get_active_folder() + saved_recursive_query_state = f5.get_recursive_query_state() + if saved_active_folder != "/": + f5.set_active_folder("/") + if saved_recursive_query_state != "STATE_ENABLED": + f5.enable_recursive_query_state() + + if 'interface' in include: + facts['interface'] = generate_interface_dict(f5, regex) + if 'self_ip' in include: + facts['self_ip'] = generate_self_ip_dict(f5, regex) + if 'trunk' in include: + facts['trunk'] = generate_trunk_dict(f5, regex) + if 'vlan' in include: + facts['vlan'] = generate_vlan_dict(f5, regex) + if 'virtual_server' in include: + facts['virtual_server'] = generate_vs_dict(f5, regex) + if 'pool' in include: + facts['pool'] = generate_pool_dict(f5, regex) + if 'provision' in include: + facts['provision'] = generate_provision_dict(f5) + if 'device' in include: + facts['device'] = generate_device_dict(f5, regex) + if 'device_group' in include: + facts['device_group'] = generate_device_group_dict(f5, regex) + if 'traffic_group' in include: + facts['traffic_group'] = generate_traffic_group_dict(f5, regex) + if 'rule' in include: + facts['rule'] = generate_rule_dict(f5, regex) + if 'node' in include: + facts['node'] = generate_node_dict(f5, regex) + if 'virtual_address' in include: + facts['virtual_address'] = generate_virtual_address_dict(f5, regex) + if 'address_class' in include: + facts['address_class'] = generate_address_class_dict(f5, regex) + if 'software' in include: + facts['software'] = generate_software_list(f5) + if 'certificate' in include: + facts['certificate'] = generate_certificate_dict(f5, regex) + if 'key' in include: + facts['key'] = generate_key_dict(f5, regex) + if 'client_ssl_profile' in include: + facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex) + if 'system_info' in include: + facts['system_info'] = generate_system_info_dict(f5) + + # restore saved state + if saved_active_folder and saved_active_folder != "/": + f5.set_active_folder(saved_active_folder) + if saved_recursive_query_state and \ + saved_recursive_query_state != "STATE_ENABLED": + f5.set_recursive_query_state(saved_recursive_query_state) + + result = dict( + ansible_facts=facts, + ) + result.update(**facts) + + except Exception as e: + module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc())) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_firewall_address_list.py b/plugins/modules/network/f5/bigip_firewall_address_list.py new file mode 100644 index 0000000000..22e175c23e --- /dev/null +++ b/plugins/modules/network/f5/bigip_firewall_address_list.py @@ -0,0 +1,979 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_firewall_address_list +short_description: Manage address lists on BIG-IP AFM +description: + - Manages the AFM address lists on a BIG-IP. This module can be used to add + and remove address list entries. +options: + name: + description: + - Specifies the name of the address list. + type: str + required: True + partition: + description: + - Device partition to manage resources on. + type: str + default: Common + description: + description: + - Description of the address list + type: str + geo_locations: + description: + - List of geolocations specified by their C(country) and C(region). + suboptions: + country: + description: + - The country name, or code, of the geolocation to use. + - In addition to the country full names, you may also specify their abbreviated + form, such as C(US) instead of C(United States). + - Valid country codes can be found here https://countrycode.org/. + type: str + required: True + choices: + - Any valid 2 character ISO country code. + - Any valid country name. + region: + description: + - Region name of the country to use. + type: str + type: list + addresses: + description: + - Individual addresses that you want to add to the list. These addresses differ + from ranges, and lists of lists such as what can be used in C(address_ranges) + and C(address_lists) respectively. + - This list can also include networks that have CIDR notation. + type: list + address_ranges: + description: + - A list of address ranges where the range starts with a port number, is followed + by a dash (-) and then a second number. + - If the first address is greater than the second number, the numbers will be + reversed so-as to be properly formatted. ie, C(2.2.2.2-1.1.1). would become + C(1.1.1.1-2.2.2.2). + type: list + address_lists: + description: + - Simple list of existing address lists to add to this list. Address lists can be + specified in either their fully qualified name (/Common/foo) or their short + name (foo). If a short name is used, the C(partition) argument will automatically + be prepended to the short name. + type: list + fqdns: + description: + - A list of fully qualified domain names (FQDNs). + - An FQDN has at least one decimal point in it, separating the host from the domain. + - To add FQDNs to a list requires that a global FQDN resolver be configured. + At the moment, this must either be done via C(bigip_command), or, in the GUI + of BIG-IP. If using C(bigip_command), this can be done with C(tmsh modify security + firewall global-fqdn-policy FOO) where C(FOO) is a DNS resolver configured + at C(tmsh create net dns-resolver FOO). + type: list + state: + description: + - When C(present), ensures that the address list and entries exists. + - When C(absent), ensures the address list is removed. + type: str + choices: + - present + - absent + default: present +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +author: + - Tim Rupp (@caphrim007) + - Wojciech Wypior (@wojtek0806) +''' + +EXAMPLES = r''' +- name: Create an address list + bigip_firewall_address_list: + name: foo + addresses: + - 3.3.3.3 + - 4.4.4.4 + - 5.5.5.5 + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost +''' + +RETURN = r''' +description: + description: The new description of the address list. + returned: changed + type: str + sample: My address list +addresses: + description: The new list of addresses applied to the address list. + returned: changed + type: list + sample: [1.1.1.1, 2.2.2.2] +address_ranges: + description: The new list of address ranges applied to the address list. + returned: changed + type: list + sample: [1.1.1.1-2.2.2.2, 3.3.3.3-4.4.4.4] +address_lists: + description: The new list of address list names applied to the address list. + returned: changed + type: list + sample: [/Common/list1, /Common/list2] +fqdns: + description: The new list of FQDN names applied to the address list. + returned: changed + type: list + sample: [google.com, mit.edu] +geo_locations: + description: The new list of geo locations applied to the address list. + returned: changed + type: complex + contains: + country: + description: Country of the geo location. + returned: changed + type: str + sample: US + region: + description: Region of the geo location. + returned: changed + type: str + sample: California +''' + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + from library.module_utils.network.f5.bigip import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import fq_name + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.common import transform_name + from library.module_utils.compat.ipaddress import ip_address + from library.module_utils.compat.ipaddress import ip_interface + from library.module_utils.network.f5.ipaddress import is_valid_ip + from library.module_utils.network.f5.ipaddress import is_valid_ip_interface +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.bigip import F5RestClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import fq_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import transform_name + from ansible_collections.ansible.netcommon.plugins.module_utils.compat.ipaddress import ip_address + from ansible_collections.ansible.netcommon.plugins.module_utils.compat.ipaddress import ip_interface + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.ipaddress import is_valid_ip + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.ipaddress import is_valid_ip_interface + + +class Parameters(AnsibleF5Parameters): + api_map = { + 'addressLists': 'address_lists', + 'geo': 'geo_locations', + } + + api_attributes = [ + 'addressLists', + 'addresses', + 'description', + 'fqdns', + 'geo', + ] + + returnables = [ + 'addresses', + 'address_ranges', + 'address_lists', + 'description', + 'fqdns', + 'geo_locations', + ] + + updatables = [ + 'addresses', + 'address_ranges', + 'address_lists', + 'description', + 'fqdns', + 'geo_locations', + ] + + +class ApiParameters(Parameters): + @property + def address_ranges(self): + if self._values['addresses'] is None: + return None + result = [] + for address_range in self._values['addresses']: + if '-' not in address_range['name']: + continue + result.append(address_range['name'].strip()) + result = sorted(result) + return result + + @property + def address_lists(self): + if self._values['address_lists'] is None: + return None + result = [] + for x in self._values['address_lists']: + item = '/{0}/{1}'.format(x['partition'], x['name']) + result.append(item) + result = sorted(result) + return result + + @property + def addresses(self): + if self._values['addresses'] is None: + return None + result = [x['name'] for x in self._values['addresses'] if '-' not in x['name']] + result = sorted(result) + return result + + @property + def fqdns(self): + if self._values['fqdns'] is None: + return None + result = [str(x['name']) for x in self._values['fqdns']] + result = sorted(result) + return result + + @property + def geo_locations(self): + if self._values['geo_locations'] is None: + return None + result = [str(x['name']) for x in self._values['geo_locations']] + result = sorted(result) + return result + + +class ModuleParameters(Parameters): + def __init__(self, params=None): + super(ModuleParameters, self).__init__(params=params) + self.country_iso_map = { + 'Afghanistan': 'AF', + 'Albania': 'AL', + 'Algeria': 'DZ', + 'American Samoa': 'AS', + 'Andorra': 'AD', + 'Angola': 'AO', + 'Anguilla': 'AI', + 'Antarctica': 'AQ', + 'Antigua and Barbuda': 'AG', + 'Argentina': 'AR', + 'Armenia': 'AM', + 'Aruba': 'AW', + 'Australia': 'AU', + 'Austria': 'AT', + 'Azerbaijan': 'AZ', + 'Bahamas': 'BS', + 'Bahrain': 'BH', + 'Bangladesh': 'BD', + 'Barbados': 'BB', + 'Belarus': 'BY', + 'Belgium': 'BE', + 'Belize': 'BZ', + 'Benin': 'BJ', + 'Bermuda': 'BM', + 'Bhutan': 'BT', + 'Bolivia': 'BO', + 'Bosnia and Herzegovina': 'BA', + 'Botswana': 'BW', + 'Brazil': 'BR', + 'Brunei': 'BN', + 'Bulgaria': 'BG', + 'Burkina Faso': 'BF', + 'Burundi': 'BI', + 'Cameroon': 'CM', + 'Canada': 'CA', + 'Cape Verde': 'CV', + 'Central African Republic': 'CF', + 'Chile': 'CL', + 'China': 'CN', + 'Christmas Island': 'CX', + 'Cocos Islands': 'CC', + 'Colombia': 'CO', + 'Cook Islands': 'CK', + 'Costa Rica': 'CR', + 'Cuba': 'CU', + 'Curacao': 'CW', + 'Cyprus': 'CY', + 'Czech Republic': 'CZ', + 'Democratic Republic of the Congo': 'CD', + 'Denmark': 'DK', + 'Djibouti': 'DJ', + 'Dominica': 'DM', + 'Dominican Republic': 'DO', + 'Ecuador': 'EC', + 'Egypt': 'EG', + 'Eritrea': 'ER', + 'Estonia': 'EE', + 'Ethiopia': 'ET', + 'Falkland Islands': 'FK', + 'Faroe Islands': 'FO', + 'Fiji': 'FJ', + 'Finland': 'FI', + 'France': 'FR', + 'French Polynesia': 'PF', + 'Gabon': 'GA', + 'Gambia': 'GM', + 'Georgia': 'GE', + 'Germany': 'DE', + 'Ghana': 'GH', + 'Gilbraltar': 'GI', + 'Greece': 'GR', + 'Greenland': 'GL', + 'Grenada': 'GD', + 'Guam': 'GU', + 'Guatemala': 'GT', + 'Guernsey': 'GG', + 'Guinea': 'GN', + 'Guinea-Bissau': 'GW', + 'Guyana': 'GY', + 'Haiti': 'HT', + 'Honduras': 'HN', + 'Hong Kong': 'HK', + 'Hungary': 'HU', + 'Iceland': 'IS', + 'India': 'IN', + 'Indonesia': 'ID', + 'Iran': 'IR', + 'Iraq': 'IQ', + 'Ireland': 'IE', + 'Isle of Man': 'IM', + 'Israel': 'IL', + 'Italy': 'IT', + 'Ivory Coast': 'CI', + 'Jamaica': 'JM', + 'Japan': 'JP', + 'Jersey': 'JE', + 'Jordan': 'JO', + 'Kazakhstan': 'KZ', + 'Laos': 'LA', + 'Latvia': 'LV', + 'Lebanon': 'LB', + 'Lesotho': 'LS', + 'Liberia': 'LR', + 'Libya': 'LY', + 'Liechtenstein': 'LI', + 'Lithuania': 'LT', + 'Luxembourg': 'LU', + 'Macau': 'MO', + 'Macedonia': 'MK', + 'Madagascar': 'MG', + 'Malawi': 'MW', + 'Malaysia': 'MY', + 'Maldives': 'MV', + 'Mali': 'ML', + 'Malta': 'MT', + 'Marshall Islands': 'MH', + 'Mauritania': 'MR', + 'Mauritius': 'MU', + 'Mayotte': 'YT', + 'Mexico': 'MX', + 'Micronesia': 'FM', + 'Moldova': 'MD', + 'Monaco': 'MC', + 'Mongolia': 'MN', + 'Montenegro': 'ME', + 'Montserrat': 'MS', + 'Morocco': 'MA', + 'Mozambique': 'MZ', + 'Myanmar': 'MM', + 'Namibia': 'NA', + 'Nauru': 'NR', + 'Nepal': 'NP', + 'Netherlands': 'NL', + 'Netherlands Antilles': 'AN', + 'New Caledonia': 'NC', + 'New Zealand': 'NZ', + 'Nicaragua': 'NI', + 'Niger': 'NE', + 'Nigeria': 'NG', + 'Niue': 'NU', + 'North Korea': 'KP', + 'Northern Mariana Islands': 'MP', + 'Norway': 'NO', + 'Oman': 'OM', + 'Pakistan': 'PK', + 'Palau': 'PW', + 'Palestine': 'PS', + 'Panama': 'PA', + 'Papua New Guinea': 'PG', + 'Paraguay': 'PY', + 'Peru': 'PE', + 'Philippines': 'PH', + 'Pitcairn': 'PN', + 'Poland': 'PL', + 'Portugal': 'PT', + 'Puerto Rico': 'PR', + 'Qatar': 'QA', + 'Republic of the Congo': 'CG', + 'Reunion': 'RE', + 'Romania': 'RO', + 'Russia': 'RU', + 'Rwanda': 'RW', + 'Saint Barthelemy': 'BL', + 'Saint Helena': 'SH', + 'Saint Kitts and Nevis': 'KN', + 'Saint Lucia': 'LC', + 'Saint Martin': 'MF', + 'Saint Pierre and Miquelon': 'PM', + 'Saint Vincent and the Grenadines': 'VC', + 'Samoa': 'WS', + 'San Marino': 'SM', + 'Sao Tome and Principe': 'ST', + 'Saudi Arabia': 'SA', + 'Senegal': 'SN', + 'Serbia': 'RS', + 'Seychelles': 'SC', + 'Sierra Leone': 'SL', + 'Singapore': 'SG', + 'Sint Maarten': 'SX', + 'Slovakia': 'SK', + 'Slovenia': 'SI', + 'Solomon Islands': 'SB', + 'Somalia': 'SO', + 'South Africa': 'ZA', + 'South Korea': 'KR', + 'South Sudan': 'SS', + 'Spain': 'ES', + 'Sri Lanka': 'LK', + 'Sudan': 'SD', + 'Suriname': 'SR', + 'Svalbard and Jan Mayen': 'SJ', + 'Swaziland': 'SZ', + 'Sweden': 'SE', + 'Switzerland': 'CH', + 'Syria': 'SY', + 'Taiwan': 'TW', + 'Tajikstan': 'TJ', + 'Tanzania': 'TZ', + 'Thailand': 'TH', + 'Togo': 'TG', + 'Tokelau': 'TK', + 'Tonga': 'TO', + 'Trinidad and Tobago': 'TT', + 'Tunisia': 'TN', + 'Turkey': 'TR', + 'Turkmenistan': 'TM', + 'Turks and Caicos Islands': 'TC', + 'Tuvalu': 'TV', + 'U.S. Virgin Islands': 'VI', + 'Uganda': 'UG', + 'Ukraine': 'UA', + 'United Arab Emirates': 'AE', + 'United Kingdom': 'GB', + 'United States': 'US', + 'Uruguay': 'UY', + 'Uzbekistan': 'UZ', + 'Vanuatu': 'VU', + 'Vatican': 'VA', + 'Venezuela': 'VE', + 'Vietnam': 'VN', + 'Wallis and Futuna': 'WF', + 'Western Sahara': 'EH', + 'Yemen': 'YE', + 'Zambia': 'ZM', + 'Zimbabwe': 'ZW' + } + self.choices_iso_codes = self.country_iso_map.values() + + def is_valid_hostname(self, host): + """Reasonable attempt at validating a hostname + + Compiled from various paragraphs outlined here + https://tools.ietf.org/html/rfc3696#section-2 + https://tools.ietf.org/html/rfc1123 + + Notably, + * Host software MUST handle host names of up to 63 characters and + SHOULD handle host names of up to 255 characters. + * The "LDH rule", after the characters that it permits. (letters, digits, hyphen) + * If the hyphen is used, it is not permitted to appear at + either the beginning or end of a label + + :param host: + :return: + """ + if len(host) > 255: + return False + host = host.rstrip(".") + allowed = re.compile(r'(?!-)[A-Z0-9-]{1,63}(? int(stop): + stop, start = start, stop + item = '{0}-{1}'.format(str(start), str(stop)) + result.append(item) + result = sorted(result) + return result + + @property + def address_lists(self): + if self._values['address_lists'] is None: + return None + result = [] + for x in self._values['address_lists']: + item = fq_name(self.partition, x) + result.append(item) + result = sorted(result) + return result + + @property + def fqdns(self): + if self._values['fqdns'] is None: + return None + result = [] + for x in self._values['fqdns']: + if self.is_valid_hostname(x): + result.append(x) + else: + raise F5ModuleError( + "The hostname '{0}' looks invalid.".format(x) + ) + result = sorted(result) + return result + + @property + def geo_locations(self): + if self._values['geo_locations'] is None: + return None + result = [] + for x in self._values['geo_locations']: + if x['region'] is not None and x['region'].strip() != '': + tmp = '{0}:{1}'.format(x['country'], x['region']) + else: + tmp = x['country'] + result.append(tmp) + result = sorted(result) + return result + + +class Changes(Parameters): + def to_return(self): + result = {} + try: + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + except Exception: + pass + return result + + +class ReportableChanges(Changes): + @property + def addresses(self): + result = [] + for item in self._values['addresses']: + if '-' in item['name']: + continue + result.append(item['name']) + return result + + @property + def address_ranges(self): + result = [] + for item in self._values['addresses']: + if '-' not in item['name']: + continue + start, stop = item['name'].split('-') + start = start.strip() + stop = stop.strip() + + start = ip_address(u'{0}'.format(start)) + stop = ip_address(u'{0}'.format(stop)) + if start.version != stop.version: + raise F5ModuleError( + "When specifying a range, IP addresses must be of the same type; IPv4 or IPv6." + ) + if int(start) > int(stop): + stop, start = start, stop + item = '{0}-{1}'.format(str(start), str(stop)) + result.append(item) + result = sorted(result) + return result + + @property + def address_lists(self): + result = [] + for x in self._values['address_lists']: + item = '/{0}/{1}'.format(x['partition'], x['name']) + result.append(item) + result = sorted(result) + return result + + +class UsableChanges(Changes): + @property + def addresses(self): + if self._values['addresses'] is None and self._values['address_ranges'] is None: + return None + result = [] + if self._values['addresses']: + result += [dict(name=str(x)) for x in self._values['addresses']] + if self._values['address_ranges']: + result += [dict(name=str(x)) for x in self._values['address_ranges']] + return result + + @property + def address_lists(self): + if self._values['address_lists'] is None: + return None + result = [] + for x in self._values['address_lists']: + partition, name = x.split('/')[1:] + result.append(dict( + name=name, + partition=partition + )) + return result + + +class Difference(object): + def __init__(self, want, have=None): + self.want = want + self.have = have + + def compare(self, param): + try: + result = getattr(self, param) + return result + except AttributeError: + return self.__default(param) + + def __default(self, param): + attr1 = getattr(self.want, param) + try: + attr2 = getattr(self.have, param) + if attr1 != attr2: + return attr1 + except AttributeError: + return attr1 + + @property + def addresses(self): + if self.want.addresses is None: + return None + elif self.have.addresses is None: + return self.want.addresses + if sorted(self.want.addresses) != sorted(self.have.addresses): + return self.want.addresses + + @property + def address_lists(self): + if self.want.address_lists is None: + return None + elif self.have.address_lists is None: + return self.want.address_lists + if sorted(self.want.address_lists) != sorted(self.have.address_lists): + return self.want.address_lists + + @property + def address_ranges(self): + if self.want.address_ranges is None: + return None + elif self.have.address_ranges is None: + return self.want.address_ranges + if sorted(self.want.address_ranges) != sorted(self.have.address_ranges): + return self.want.address_ranges + + @property + def fqdns(self): + if self.want.fqdns is None: + return None + elif self.have.fqdns is None: + return self.want.fqdns + if sorted(self.want.fqdns) != sorted(self.have.fqdns): + return self.want.fqdns + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = F5RestClient(**self.module.params) + self.want = ModuleParameters(params=self.module.params) + self.have = ApiParameters() + self.changes = UsableChanges() + + def _update_changed_options(self): + diff = Difference(self.want, self.have) + updatables = Parameters.updatables + changed = dict() + for k in updatables: + change = diff.compare(k) + if change is None: + continue + else: + if isinstance(change, dict): + changed.update(change) + else: + changed[k] = change + if changed: + self.changes = UsableChanges(params=changed) + return True + return False + + def should_update(self): + result = self._update_changed_options() + if result: + return True + return False + + def exec_module(self): + changed = False + result = dict() + state = self.want.state + + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + + reportable = ReportableChanges(params=self.changes.to_return()) + changes = reportable.to_return() + result.update(**changes) + result.update(dict(changed=changed)) + self._announce_deprecations(result) + return result + + def _announce_deprecations(self, result): + warnings = result.pop('__warnings', []) + for warning in warnings: + self.module.deprecate( + msg=warning['msg'], + version=warning['version'] + ) + + def present(self): + if self.exists(): + return self.update() + else: + return self.create() + + def absent(self): + if self.exists(): + return self.remove() + return False + + def update(self): + self.have = self.read_current_from_device() + if not self.should_update(): + return False + if self.module.check_mode: + return True + self.update_on_device() + return True + + def remove(self): + if self.module.check_mode: + return True + self.remove_from_device() + if self.exists(): + raise F5ModuleError("Failed to delete the resource.") + return True + + def create(self): + self.have = ApiParameters() + self._update_changed_options() + if self.module.check_mode: + return True + self.create_on_device() + return True + + def exists(self): + uri = "https://{0}:{1}/mgmt/tm/security/firewall/address-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError: + return False + if resp.status == 404 or 'code' in response and response['code'] == 404: + return False + return True + + def create_on_device(self): + params = self.changes.api_params() + params['name'] = self.want.name + params['partition'] = self.want.partition + uri = "https://{0}:{1}/mgmt/tm/security/firewall/address-list/".format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + resp = self.client.api.post(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def update_on_device(self): + params = self.changes.api_params() + uri = "https://{0}:{1}/mgmt/tm/security/firewall/address-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.patch(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def remove_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/security/firewall/address-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.delete(uri) + if resp.status == 200: + return True + raise F5ModuleError(resp.content) + + def read_current_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/security/firewall/address-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return ApiParameters(params=response) + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = True + argument_spec = dict( + description=dict(), + name=dict(required=True), + addresses=dict(type='list'), + address_ranges=dict(type='list'), + address_lists=dict(type='list'), + geo_locations=dict( + type='list', + elements='dict', + options=dict( + country=dict( + required=True, + ), + region=dict() + ) + ), + fqdns=dict(type='list'), + partition=dict( + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ), + state=dict( + default='present', + choices=['present', 'absent'] + ) + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode + ) + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_firewall_port_list.py b/plugins/modules/network/f5/bigip_firewall_port_list.py new file mode 100644 index 0000000000..5c54370836 --- /dev/null +++ b/plugins/modules/network/f5/bigip_firewall_port_list.py @@ -0,0 +1,646 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_firewall_port_list +short_description: Manage port lists on BIG-IP AFM +description: + - Manages the AFM port lists on a BIG-IP. This module can be used to add + and remove port list entries. +options: + name: + description: + - Specifies the name of the port list. + type: str + required: True + partition: + description: + - Device partition to manage resources on. + type: str + default: Common + description: + description: + - Description of the port list + type: str + ports: + description: + - Simple list of port values to add to the list + type: list + port_ranges: + description: + - A list of port ranges where the range starts with a port number, is followed + by a dash (-) and then a second number. + - If the first number is greater than the second number, the numbers will be + reversed so-as to be properly formatted. ie, 90-78 would become 78-90. + type: list + port_lists: + description: + - Simple list of existing port lists to add to this list. Port lists can be + specified in either their fully qualified name (/Common/foo) or their short + name (foo). If a short name is used, the C(partition) argument will automatically + be prepended to the short name. + type: list + state: + description: + - When C(present), ensures that the address list and entries exists. + - When C(absent), ensures the address list is removed. + type: str + choices: + - present + - absent + default: present +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +author: + - Tim Rupp (@caphrim007) + - Wojciech Wypior (@wojtek0806) +''' + +EXAMPLES = r''' +- name: Create a simple port list + bigip_firewall_port_list: + name: foo + ports: + - 80 + - 443 + state: present + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost + +- name: Override the above list of ports with a new list + bigip_firewall_port_list: + name: foo + ports: + - 3389 + - 8080 + - 25 + state: present + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost + +- name: Create port list with series of ranges + bigip_firewall_port_list: + name: foo + port_ranges: + - 25-30 + - 80-500 + - 50-78 + state: present + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost + +- name: Use multiple types of port arguments + bigip_firewall_port_list: + name: foo + port_ranges: + - 25-30 + - 80-500 + - 50-78 + ports: + - 8080 + - 443 + state: present + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost + +- name: Remove port list + bigip_firewall_port_list: + name: foo + state: absent + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost + +- name: Create port list from a file with one port per line + bigip_firewall_port_list: + name: lot-of-ports + ports: "{{ lookup('file', 'my-large-port-list.txt').split('\n') }}" + state: present + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost +''' + +RETURN = r''' +description: + description: The new description of the port list. + returned: changed + type: str + sample: My port list +ports: + description: The new list of ports applied to the port list. + returned: changed + type: list + sample: [80, 443] +port_ranges: + description: The new list of port ranges applied to the port list. + returned: changed + type: list + sample: [80-100, 200-8080] +port_lists: + description: The new list of port list names applied to the port list. + returned: changed + type: list + sample: [/Common/list1, /Common/list2] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + from library.module_utils.network.f5.bigip import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import fq_name + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.common import transform_name + from library.module_utils.network.f5.icontrol import module_provisioned +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.bigip import F5RestClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import fq_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import transform_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import module_provisioned + + +class Parameters(AnsibleF5Parameters): + api_map = { + 'portLists': 'port_lists', + } + + api_attributes = [ + 'portLists', 'ports', 'description', + ] + + returnables = [ + 'ports', 'port_ranges', 'port_lists', 'description', + ] + + updatables = [ + 'description', 'ports', 'port_ranges', 'port_lists', + ] + + +class ApiParameters(Parameters): + @property + def port_ranges(self): + if self._values['ports'] is None: + return None + result = [] + for port_range in self._values['ports']: + if '-' not in port_range['name']: + continue + start, stop = port_range['name'].split('-') + start = int(start.strip()) + stop = int(stop.strip()) + if start > stop: + stop, start = start, stop + item = '{0}-{1}'.format(start, stop) + result.append(item) + return result + + @property + def port_lists(self): + if self._values['port_lists'] is None: + return None + result = [] + for x in self._values['port_lists']: + item = '/{0}/{1}'.format(x['partition'], x['name']) + result.append(item) + return result + + @property + def ports(self): + if self._values['ports'] is None: + return None + result = [int(x['name']) for x in self._values['ports'] if '-' not in x['name']] + return result + + +class ModuleParameters(Parameters): + @property + def ports(self): + if self._values['ports'] is None: + return None + if any(x for x in self._values['ports'] if '-' in str(x)): + raise F5ModuleError( + "Ports must be whole numbers between 0 and 65,535" + ) + if any(x for x in self._values['ports'] if 0 < int(x) > 65535): + raise F5ModuleError( + "Ports must be whole numbers between 0 and 65,535" + ) + result = [int(x) for x in self._values['ports']] + return result + + @property + def port_ranges(self): + if self._values['port_ranges'] is None: + return None + result = [] + for port_range in self._values['port_ranges']: + if '-' not in port_range: + continue + start, stop = port_range.split('-') + start = int(start.strip()) + stop = int(stop.strip()) + if start > stop: + stop, start = start, stop + if 0 < start > 65535 or 0 < stop > 65535: + raise F5ModuleError( + "Ports must be whole numbers between 0 and 65,535" + ) + item = '{0}-{1}'.format(start, stop) + result.append(item) + return result + + @property + def port_lists(self): + if self._values['port_lists'] is None: + return None + result = [] + for x in self._values['port_lists']: + item = fq_name(self.partition, x) + result.append(item) + return result + + +class Changes(Parameters): + def to_return(self): + result = {} + try: + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + except Exception: + pass + return result + + +class ReportableChanges(Changes): + @property + def ports(self): + result = [] + for item in self._values['ports']: + if '-' in item['name']: + continue + result.append(item['name']) + return result + + @property + def port_ranges(self): + result = [] + for item in self._values['ports']: + if '-' not in item['name']: + continue + result.append(item['name']) + return result + + +class UsableChanges(Changes): + @property + def ports(self): + if self._values['ports'] is None and self._values['port_ranges'] is None: + return None + result = [] + if self._values['ports']: + # The values of the 'key' index literally need to be string values. + # If they are not, on BIG-IP 12.1.0 they will raise this REST exception. + # + # { + # "code": 400, + # "message": "one or more configuration identifiers must be provided", + # "errorStack": [], + # "apiError": 26214401 + # } + result += [dict(name=str(x)) for x in self._values['ports']] + if self._values['port_ranges']: + result += [dict(name=str(x)) for x in self._values['port_ranges']] + return result + + @property + def port_lists(self): + if self._values['port_lists'] is None: + return None + result = [] + for x in self._values['port_lists']: + partition, name = x.split('/')[1:] + result.append(dict( + name=name, + partition=partition + )) + return result + + +class Difference(object): + def __init__(self, want, have=None): + self.want = want + self.have = have + + def compare(self, param): + try: + result = getattr(self, param) + return result + except AttributeError: + return self.__default(param) + + def __default(self, param): + attr1 = getattr(self.want, param) + try: + attr2 = getattr(self.have, param) + if attr1 != attr2: + return attr1 + except AttributeError: + return attr1 + + @property + def ports(self): + if self.want.ports is None: + return None + elif self.have.ports is None: + return self.want.ports + if sorted(self.want.ports) != sorted(self.have.ports): + return self.want.ports + + @property + def port_lists(self): + if self.want.port_lists is None: + return None + elif self.have.port_lists is None: + return self.want.port_lists + if sorted(self.want.port_lists) != sorted(self.have.port_lists): + return self.want.port_lists + + @property + def port_ranges(self): + if self.want.port_ranges is None: + return None + elif self.have.port_ranges is None: + return self.want.port_ranges + if sorted(self.want.port_ranges) != sorted(self.have.port_ranges): + return self.want.port_ranges + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = F5RestClient(**self.module.params) + self.want = ModuleParameters(params=self.module.params) + self.have = ApiParameters() + self.changes = UsableChanges() + + def _set_changed_options(self): + changed = {} + for key in Parameters.returnables: + if getattr(self.want, key) is not None: + changed[key] = getattr(self.want, key) + if changed: + self.changes = UsableChanges(params=changed) + + def _update_changed_options(self): + diff = Difference(self.want, self.have) + updatables = Parameters.updatables + changed = dict() + for k in updatables: + change = diff.compare(k) + if change is None: + continue + else: + if isinstance(change, dict): + changed.update(change) + else: + changed[k] = change + if changed: + self.changes = UsableChanges(params=changed) + return True + return False + + def should_update(self): + result = self._update_changed_options() + if result: + return True + return False + + def exec_module(self): + if not module_provisioned(self.client, 'afm'): + raise F5ModuleError( + "AFM must be provisioned to use this module." + ) + changed = False + result = dict() + state = self.want.state + + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + + reportable = ReportableChanges(params=self.changes.to_return()) + changes = reportable.to_return() + result.update(**changes) + result.update(dict(changed=changed)) + self._announce_deprecations(result) + return result + + def _announce_deprecations(self, result): + warnings = result.pop('__warnings', []) + for warning in warnings: + self.module.deprecate( + msg=warning['msg'], + version=warning['version'] + ) + + def present(self): + if self.exists(): + return self.update() + else: + return self.create() + + def absent(self): + if self.exists(): + return self.remove() + return False + + def update(self): + self.have = self.read_current_from_device() + if not self.should_update(): + return False + if self.module.check_mode: + return True + self.update_on_device() + return True + + def remove(self): + if self.module.check_mode: + return True + self.remove_from_device() + if self.exists(): + raise F5ModuleError("Failed to delete the resource.") + return True + + def create(self): + self._set_changed_options() + if self.module.check_mode: + return True + self.create_on_device() + return True + + def exists(self): + uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError: + return False + if resp.status == 404 or 'code' in response and response['code'] == 404: + return False + return True + + def update_on_device(self): + params = self.changes.api_params() + uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.patch(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def read_current_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return ApiParameters(params=response) + + def create_on_device(self): + params = self.changes.api_params() + params['name'] = self.want.name + params['partition'] = self.want.partition + uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.post(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return response['selfLink'] + + def remove_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name) + ) + response = self.client.api.delete(uri) + if response.status == 200: + return True + raise F5ModuleError(response.content) + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = True + argument_spec = dict( + name=dict(required=True), + description=dict(), + ports=dict(type='list'), + port_ranges=dict(type='list'), + port_lists=dict(type='list'), + partition=dict( + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ), + state=dict( + default='present', + choices=['present', 'absent'] + ) + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode + ) + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_gtm_facts.py b/plugins/modules/network/f5/bigip_gtm_facts.py new file mode 100644 index 0000000000..b08b3dc4d2 --- /dev/null +++ b/plugins/modules/network/f5/bigip_gtm_facts.py @@ -0,0 +1,986 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_gtm_facts +short_description: Collect facts from F5 BIG-IP GTM devices +description: + - Collect facts from F5 BIG-IP GTM devices. +options: + include: + description: + - Fact category to collect. + required: True + choices: + - pool + - wide_ip + - server + filter: + description: + - Perform regex filter of response. Filtering is done on the name of + the resource. Valid filters are anything that can be provided to + Python's C(re) module. +deprecated: + removed_in: '2.11' + alternative: bigip_device_info + why: > + The bigip_gtm_facts module is an outlier as all facts are being collected + in the bigip_device_info module. Additionally, the M(bigip_device_info) + module is easier to maintain and use. +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +notes: + - This module is deprecated. Use the C(bigip_device_info) module instead. +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = r''' +- name: Get pool facts + bigip_gtm_facts: + server: lb.mydomain.com + user: admin + password: secret + include: pool + filter: my_pool + delegate_to: localhost +''' + +RETURN = r''' +wide_ip: + description: + Contains the lb method for the wide ip and the pools that are within the wide ip. + returned: changed + type: list + sample: + wide_ip: + - enabled: True + failure_rcode: noerror + failure_rcode_response: disabled + failure_rcode_ttl: 0 + full_path: /Common/foo.ok.com + last_resort_pool: "" + minimal_response: enabled + name: foo.ok.com + partition: Common + persist_cidr_ipv4: 32 + persist_cidr_ipv6: 128 + persistence: disabled + pool_lb_mode: round-robin + pools: + - name: d3qw + order: 0 + partition: Common + ratio: 1 + ttl_persistence: 3600 + type: naptr +pool: + description: Contains the pool object status and enabled status. + returned: changed + type: list + sample: + pool: + - alternate_mode: round-robin + dynamic_ratio: disabled + enabled: True + fallback_mode: return-to-dns + full_path: /Common/d3qw + load_balancing_mode: round-robin + manual_resume: disabled + max_answers_returned: 1 + members: + - disabled: True + flags: a + full_path: ok3.com + member_order: 0 + name: ok3.com + order: 10 + preference: 10 + ratio: 1 + service: 80 + name: d3qw + partition: Common + qos_hit_ratio: 5 + qos_hops: 0 + qos_kilobytes_second: 3 + qos_lcs: 30 + qos_packet_rate: 1 + qos_rtt: 50 + qos_topology: 0 + qos_vs_capacity: 0 + qos_vs_score: 0 + availability_state: offline + enabled_state: disabled + ttl: 30 + type: naptr + verify_member_availability: disabled +server: + description: + Contains the virtual server enabled and availability status, and address. + returned: changed + type: list + sample: + server: + - addresses: + - device_name: /Common/qweqwe + name: 10.10.10.10 + translation: none + datacenter: /Common/xfxgh + enabled: True + expose_route_domains: no + full_path: /Common/qweqwe + iq_allow_path: yes + iq_allow_service_check: yes + iq_allow_snmp: yes + limit_cpu_usage: 0 + limit_cpu_usage_status: disabled + limit_max_bps: 0 + limit_max_bps_status: disabled + limit_max_connections: 0 + limit_max_connections_status: disabled + limit_max_pps: 0 + limit_max_pps_status: disabled + limit_mem_avail: 0 + limit_mem_avail_status: disabled + link_discovery: disabled + monitor: /Common/bigip + name: qweqwe + partition: Common + product: single-bigip + virtual_server_discovery: disabled + virtual_servers: + - destination: 10.10.10.10:0 + enabled: True + full_path: jsdfhsd + limit_max_bps: 0 + limit_max_bps_status: disabled + limit_max_connections: 0 + limit_max_connections_status: disabled + limit_max_pps: 0 + limit_max_pps_status: disabled + name: jsdfhsd + translation_address: none + translation_port: 0 +''' + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE +from distutils.version import LooseVersion + +try: + from f5.bigip import ManagementRoot + from icontrol.exceptions import iControlUnexpectedHTTPError + from f5.utils.responses.handlers import Stats + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +try: + from library.module_utils.network.f5.common import F5BaseClient +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5BaseClient + +try: + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import f5_argument_spec +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + + +class F5Client(F5BaseClient): + def __init__(self, *args, **kwargs): + super(F5Client, self).__init__(*args, **kwargs) + self.provider = self.merge_provider_params() + + @property + def api(self): + if self._client: + return self._client + + try: + result = ManagementRoot( + self.provider['server'], + self.provider['user'], + self.provider['password'], + port=self.provider['server_port'], + verify=self.provider['validate_certs'], + token='tmos' + ) + self._client = result + return self._client + except Exception as ex: + error = 'Unable to connect to {0} on port {1}. The reported error was "{2}".'.format( + self.provider['server'], self.provider['server_port'], str(ex) + ) + raise F5ModuleError(error) + + +class BaseManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + self.kwargs = kwargs + + self.types = dict( + a_s='a', + aaaas='aaaa', + cnames='cname', + mxs='mx', + naptrs='naptr', + srvs='srv' + ) + + def filter_matches_name(self, name): + if self.want.filter is None: + return True + matches = re.match(self.want.filter, str(name)) + if matches: + return True + else: + return False + + def version_is_less_than_12(self): + version = self.client.api.tmos_version + if LooseVersion(version) < LooseVersion('12.0.0'): + return True + else: + return False + + def get_facts_from_collection(self, collection, collection_type=None): + results = [] + for item in collection: + if not self.filter_matches_name(item.name): + continue + facts = self.format_facts(item, collection_type) + results.append(facts) + return results + + def read_stats_from_device(self, resource): + stats = Stats(resource.stats.load()) + return stats.stat + + +class UntypedManager(BaseManager): + def exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + filtered = [(k, v) for k, v in iteritems(attrs) if self.filter_matches_name(k)] + if filtered: + results.append(dict(filtered)) + return results + + +class TypedManager(BaseManager): + def exec_module(self): + results = [] + for collection, type in iteritems(self.types): + facts = self.read_facts(collection) + if not facts: + continue + for x in facts: + x.update({'type': type}) + for item in facts: + attrs = item.to_return() + filtered = [(k, v) for k, v in iteritems(attrs) if self.filter_matches_name(k)] + if filtered: + results.append(dict(filtered)) + return results + + +class Parameters(AnsibleF5Parameters): + @property + def include(self): + requested = self._values['include'] + valid = ['pool', 'wide_ip', 'server', 'all'] + + if any(x for x in requested if x not in valid): + raise F5ModuleError( + "The valid 'include' choices are {0}".format(', '.join(valid)) + ) + + if 'all' in requested: + return ['all'] + else: + return requested + + +class BaseParameters(Parameters): + @property + def enabled(self): + if self._values['enabled'] is None: + return None + elif self._values['enabled'] in BOOLEANS_TRUE: + return True + else: + return False + + @property + def disabled(self): + if self._values['disabled'] is None: + return None + elif self._values['disabled'] in BOOLEANS_TRUE: + return True + else: + return False + + def _remove_internal_keywords(self, resource): + resource.pop('kind', None) + resource.pop('generation', None) + resource.pop('selfLink', None) + resource.pop('isSubcollection', None) + resource.pop('fullPath', None) + + def to_return(self): + result = {} + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + return result + + +class PoolParameters(BaseParameters): + api_map = { + 'alternateMode': 'alternate_mode', + 'dynamicRatio': 'dynamic_ratio', + 'fallbackMode': 'fallback_mode', + 'fullPath': 'full_path', + 'loadBalancingMode': 'load_balancing_mode', + 'manualResume': 'manual_resume', + 'maxAnswersReturned': 'max_answers_returned', + 'qosHitRatio': 'qos_hit_ratio', + 'qosHops': 'qos_hops', + 'qosKilobytesSecond': 'qos_kilobytes_second', + 'qosLcs': 'qos_lcs', + 'qosPacketRate': 'qos_packet_rate', + 'qosRtt': 'qos_rtt', + 'qosTopology': 'qos_topology', + 'qosVsCapacity': 'qos_vs_capacity', + 'qosVsScore': 'qos_vs_score', + 'verifyMemberAvailability': 'verify_member_availability', + 'membersReference': 'members' + } + + returnables = [ + 'alternate_mode', 'dynamic_ratio', 'enabled', 'disabled', 'fallback_mode', + 'load_balancing_mode', 'manual_resume', 'max_answers_returned', 'members', + 'name', 'partition', 'qos_hit_ratio', 'qos_hops', 'qos_kilobytes_second', + 'qos_lcs', 'qos_packet_rate', 'qos_rtt', 'qos_topology', 'qos_vs_capacity', + 'qos_vs_score', 'ttl', 'type', 'full_path', 'availability_state', + 'enabled_state', 'availability_status' + ] + + @property + def max_answers_returned(self): + if self._values['max_answers_returned'] is None: + return None + return int(self._values['max_answers_returned']) + + @property + def members(self): + result = [] + if self._values['members'] is None or 'items' not in self._values['members']: + return result + for item in self._values['members']['items']: + self._remove_internal_keywords(item) + if 'disabled' in item: + if item['disabled'] in BOOLEANS_TRUE: + item['disabled'] = True + else: + item['disabled'] = False + if 'enabled' in item: + if item['enabled'] in BOOLEANS_TRUE: + item['enabled'] = True + else: + item['enabled'] = False + if 'fullPath' in item: + item['full_path'] = item.pop('fullPath') + if 'memberOrder' in item: + item['member_order'] = int(item.pop('memberOrder')) + # Cast some attributes to integer + for x in ['order', 'preference', 'ratio', 'service']: + if x in item: + item[x] = int(item[x]) + result.append(item) + return result + + @property + def qos_hit_ratio(self): + if self._values['qos_hit_ratio'] is None: + return None + return int(self._values['qos_hit_ratio']) + + @property + def qos_hops(self): + if self._values['qos_hops'] is None: + return None + return int(self._values['qos_hops']) + + @property + def qos_kilobytes_second(self): + if self._values['qos_kilobytes_second'] is None: + return None + return int(self._values['qos_kilobytes_second']) + + @property + def qos_lcs(self): + if self._values['qos_lcs'] is None: + return None + return int(self._values['qos_lcs']) + + @property + def qos_packet_rate(self): + if self._values['qos_packet_rate'] is None: + return None + return int(self._values['qos_packet_rate']) + + @property + def qos_rtt(self): + if self._values['qos_rtt'] is None: + return None + return int(self._values['qos_rtt']) + + @property + def qos_topology(self): + if self._values['qos_topology'] is None: + return None + return int(self._values['qos_topology']) + + @property + def qos_vs_capacity(self): + if self._values['qos_vs_capacity'] is None: + return None + return int(self._values['qos_vs_capacity']) + + @property + def qos_vs_score(self): + if self._values['qos_vs_score'] is None: + return None + return int(self._values['qos_vs_score']) + + @property + def availability_state(self): + if self._values['stats'] is None: + return None + try: + result = self._values['stats']['status_availabilityState'] + return result['description'] + except AttributeError: + return None + + @property + def enabled_state(self): + if self._values['stats'] is None: + return None + try: + result = self._values['stats']['status_enabledState'] + return result['description'] + except AttributeError: + return None + + @property + def availability_status(self): + # This fact is a combination of the availability_state and enabled_state + # + # The purpose of the fact is to give a higher-level view of the availability + # of the pool, that can be used in playbooks. If you need further detail, + # consider using the following facts together. + # + # - availability_state + # - enabled_state + # + if self.enabled_state == 'enabled': + if self.availability_state == 'offline': + return 'red' + elif self.availability_state == 'available': + return 'green' + elif self.availability_state == 'unknown': + return 'blue' + else: + return 'none' + else: + # disabled + return 'black' + + +class WideIpParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'failureRcode': 'failure_return_code', + 'failureRcodeResponse': 'failure_return_code_response', + 'failureRcodeTtl': 'failure_return_code_ttl', + 'lastResortPool': 'last_resort_pool', + 'minimalResponse': 'minimal_response', + 'persistCidrIpv4': 'persist_cidr_ipv4', + 'persistCidrIpv6': 'persist_cidr_ipv6', + 'poolLbMode': 'pool_lb_mode', + 'ttlPersistence': 'ttl_persistence' + } + + returnables = [ + 'full_path', 'description', 'enabled', 'disabled', 'failure_return_code', + 'failure_return_code_response', 'failure_return_code_ttl', 'last_resort_pool', + 'minimal_response', 'persist_cidr_ipv4', 'persist_cidr_ipv6', 'pool_lb_mode', + 'ttl_persistence', 'pools' + ] + + @property + def pools(self): + result = [] + if self._values['pools'] is None: + return [] + for pool in self._values['pools']: + del pool['nameReference'] + for x in ['order', 'ratio']: + if x in pool: + pool[x] = int(pool[x]) + result.append(pool) + return result + + @property + def failure_return_code_ttl(self): + if self._values['failure_return_code_ttl'] is None: + return None + return int(self._values['failure_return_code_ttl']) + + @property + def persist_cidr_ipv4(self): + if self._values['persist_cidr_ipv4'] is None: + return None + return int(self._values['persist_cidr_ipv4']) + + @property + def persist_cidr_ipv6(self): + if self._values['persist_cidr_ipv6'] is None: + return None + return int(self._values['persist_cidr_ipv6']) + + @property + def ttl_persistence(self): + if self._values['ttl_persistence'] is None: + return None + return int(self._values['ttl_persistence']) + + +class ServerParameters(BaseParameters): + api_map = { + 'fullPath': 'full_path', + 'exposeRouteDomains': 'expose_route_domains', + 'iqAllowPath': 'iq_allow_path', + 'iqAllowServiceCheck': 'iq_allow_service_check', + 'iqAllowSnmp': 'iq_allow_snmp', + 'limitCpuUsage': 'limit_cpu_usage', + 'limitCpuUsageStatus': 'limit_cpu_usage_status', + 'limitMaxBps': 'limit_max_bps', + 'limitMaxBpsStatus': 'limit_max_bps_status', + 'limitMaxConnections': 'limit_max_connections', + 'limitMaxConnectionsStatus': 'limit_max_connections_status', + 'limitMaxPps': 'limit_max_pps', + 'limitMaxPpsStatus': 'limit_max_pps_status', + 'limitMemAvail': 'limit_mem_available', + 'limitMemAvailStatus': 'limit_mem_available_status', + 'linkDiscovery': 'link_discovery', + 'proberFallback': 'prober_fallback', + 'proberPreference': 'prober_preference', + 'virtualServerDiscovery': 'virtual_server_discovery', + 'devicesReference': 'devices', + 'virtualServersReference': 'virtual_servers' + } + + returnables = [ + 'datacenter', 'enabled', 'disabled', 'expose_route_domains', 'iq_allow_path', + 'full_path', 'iq_allow_service_check', 'iq_allow_snmp', 'limit_cpu_usage', + 'limit_cpu_usage_status', 'limit_max_bps', 'limit_max_bps_status', + 'limit_max_connections', 'limit_max_connections_status', 'limit_max_pps', + 'limit_max_pps_status', 'limit_mem_available', 'limit_mem_available_status', + 'link_discovery', 'monitor', 'product', 'prober_fallback', 'prober_preference', + 'virtual_server_discovery', 'addresses', 'devices', 'virtual_servers' + ] + + @property + def product(self): + if self._values['product'] is None: + return None + if self._values['product'] in ['single-bigip', 'redundant-bigip']: + return 'bigip' + return self._values['product'] + + @property + def devices(self): + result = [] + if self._values['devices'] is None or 'items' not in self._values['devices']: + return result + for item in self._values['devices']['items']: + self._remove_internal_keywords(item) + if 'fullPath' in item: + item['full_path'] = item.pop('fullPath') + result.append(item) + return result + + @property + def virtual_servers(self): + result = [] + if self._values['virtual_servers'] is None or 'items' not in self._values['virtual_servers']: + return result + for item in self._values['virtual_servers']['items']: + self._remove_internal_keywords(item) + if 'disabled' in item: + if item['disabled'] in BOOLEANS_TRUE: + item['disabled'] = True + else: + item['disabled'] = False + if 'enabled' in item: + if item['enabled'] in BOOLEANS_TRUE: + item['enabled'] = True + else: + item['enabled'] = False + if 'fullPath' in item: + item['full_path'] = item.pop('fullPath') + if 'limitMaxBps' in item: + item['limit_max_bps'] = int(item.pop('limitMaxBps')) + if 'limitMaxBpsStatus' in item: + item['limit_max_bps_status'] = item.pop('limitMaxBpsStatus') + if 'limitMaxConnections' in item: + item['limit_max_connections'] = int(item.pop('limitMaxConnections')) + if 'limitMaxConnectionsStatus' in item: + item['limit_max_connections_status'] = item.pop('limitMaxConnectionsStatus') + if 'limitMaxPps' in item: + item['limit_max_pps'] = int(item.pop('limitMaxPps')) + if 'limitMaxPpsStatus' in item: + item['limit_max_pps_status'] = item.pop('limitMaxPpsStatus') + if 'translationAddress' in item: + item['translation_address'] = item.pop('translationAddress') + if 'translationPort' in item: + item['translation_port'] = int(item.pop('translationPort')) + result.append(item) + return result + + @property + def limit_cpu_usage(self): + if self._values['limit_cpu_usage'] is None: + return None + return int(self._values['limit_cpu_usage']) + + @property + def limit_max_bps(self): + if self._values['limit_max_bps'] is None: + return None + return int(self._values['limit_max_bps']) + + @property + def limit_max_connections(self): + if self._values['limit_max_connections'] is None: + return None + return int(self._values['limit_max_connections']) + + @property + def limit_max_pps(self): + if self._values['limit_max_pps'] is None: + return None + return int(self._values['limit_max_pps']) + + @property + def limit_mem_available(self): + if self._values['limit_mem_available'] is None: + return None + return int(self._values['limit_mem_available']) + + +class PoolFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + super(PoolFactManager, self).__init__(**kwargs) + self.kwargs = kwargs + + def exec_module(self): + if self.version_is_less_than_12(): + manager = self.get_manager('untyped') + else: + manager = self.get_manager('typed') + facts = manager.exec_module() + result = dict(pool=facts) + return result + + def get_manager(self, type): + if type == 'typed': + return TypedPoolFactManager(**self.kwargs) + elif type == 'untyped': + return UntypedPoolFactManager(**self.kwargs) + + +class TypedPoolFactManager(TypedManager): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + super(TypedPoolFactManager, self).__init__(**kwargs) + self.want = PoolParameters(params=self.module.params) + + def read_facts(self, collection): + results = [] + collection = self.read_collection_from_device(collection) + for resource in collection: + attrs = resource.attrs + attrs['stats'] = self.read_stats_from_device(resource) + params = PoolParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self, collection_name): + pools = self.client.api.tm.gtm.pools + collection = getattr(pools, collection_name) + result = collection.get_collection( + requests_params=dict( + params='expandSubcollections=true' + ) + ) + return result + + +class UntypedPoolFactManager(UntypedManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(UntypedPoolFactManager, self).__init__(**kwargs) + self.want = PoolParameters(params=self.module.params) + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource.attrs + attrs['stats'] = self.read_stats_from_device(resource) + params = PoolParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + result = self.client.api.tm.gtm.pools.get_collection( + requests_params=dict( + params='expandSubcollections=true' + ) + ) + return result + + +class WideIpFactManager(BaseManager): + def exec_module(self): + if self.version_is_less_than_12(): + manager = self.get_manager('untyped') + else: + manager = self.get_manager('typed') + facts = manager.exec_module() + result = dict(wide_ip=facts) + return result + + def get_manager(self, type): + if type == 'typed': + return TypedWideIpFactManager(**self.kwargs) + elif type == 'untyped': + return UntypedWideIpFactManager(**self.kwargs) + + +class TypedWideIpFactManager(TypedManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(TypedWideIpFactManager, self).__init__(**kwargs) + self.want = WideIpParameters(params=self.module.params) + + def read_facts(self, collection): + results = [] + collection = self.read_collection_from_device(collection) + for resource in collection: + attrs = resource.attrs + params = WideIpParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self, collection_name): + wideips = self.client.api.tm.gtm.wideips + collection = getattr(wideips, collection_name) + result = collection.get_collection( + requests_params=dict( + params='expandSubcollections=true' + ) + ) + return result + + +class UntypedWideIpFactManager(UntypedManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(UntypedWideIpFactManager, self).__init__(**kwargs) + self.want = WideIpParameters(params=self.module.params) + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource.attrs + params = WideIpParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + result = self.client.api.tm.gtm.wideips.get_collection( + requests_params=dict( + params='expandSubcollections=true' + ) + ) + return result + + +class ServerFactManager(UntypedManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(ServerFactManager, self).__init__(**kwargs) + self.want = ServerParameters(params=self.module.params) + + def exec_module(self): + facts = super(ServerFactManager, self).exec_module() + result = dict(server=facts) + return result + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + attrs = resource.attrs + params = ServerParameters(params=attrs) + results.append(params) + return results + + def read_collection_from_device(self): + result = self.client.api.tm.gtm.servers.get_collection( + requests_params=dict( + params='expandSubcollections=true' + ) + ) + return result + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + self.kwargs = kwargs + self.want = Parameters(params=self.module.params) + + def exec_module(self): + if not self.gtm_provisioned(): + raise F5ModuleError( + "GTM must be provisioned to use this module." + ) + + if 'all' in self.want.include: + names = ['pool', 'wide_ip', 'server'] + else: + names = self.want.include + managers = [self.get_manager(name) for name in names] + result = self.execute_managers(managers) + if result: + result['changed'] = True + else: + result['changed'] = False + self._announce_deprecations() + return result + + def _announce_deprecations(self): + warnings = [] + if self.want: + warnings += self.want._values.get('__warnings', []) + for warning in warnings: + self.module.deprecate( + msg=warning['msg'], + version=warning['version'] + ) + + def execute_managers(self, managers): + results = dict() + for manager in managers: + result = manager.exec_module() + results.update(result) + return results + + def get_manager(self, which): + if 'pool' == which: + return PoolFactManager(**self.kwargs) + if 'wide_ip' == which: + return WideIpFactManager(**self.kwargs) + if 'server' == which: + return ServerFactManager(**self.kwargs) + + def gtm_provisioned(self): + resource = self.client.api.tm.sys.dbs.db.load( + name='provisioned.cpu.gtm' + ) + if int(resource.value) == 0: + return False + return True + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = False + argument_spec = dict( + include=dict( + type='list', + choices=[ + 'pool', + 'wide_ip', + 'server', + ], + required=True + ), + filter=dict() + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode + ) + if not HAS_F5SDK: + module.fail_json(msg="The python f5-sdk module is required") + + client = F5Client(**module.params) + + try: + mm = ModuleManager(module=module, client=client) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_iapplx_package.py b/plugins/modules/network/f5/bigip_iapplx_package.py new file mode 120000 index 0000000000..45a77871e2 --- /dev/null +++ b/plugins/modules/network/f5/bigip_iapplx_package.py @@ -0,0 +1 @@ +bigip_lx_package.py \ No newline at end of file diff --git a/plugins/modules/network/f5/bigip_lx_package.py b/plugins/modules/network/f5/bigip_lx_package.py new file mode 100644 index 0000000000..1c5b5d8451 --- /dev/null +++ b/plugins/modules/network/f5/bigip_lx_package.py @@ -0,0 +1,481 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_lx_package +short_description: Manages Javascript LX packages on a BIG-IP +description: + - Manages Javascript LX packages on a BIG-IP. This module will allow + you to deploy LX packages to the BIG-IP and manage their lifecycle. +options: + package: + description: + - The LX package that you want to upload or remove. When C(state) is C(present), + and you intend to use this module in a C(role), it is recommended that you use + the C({{ role_path }}) variable. An example is provided in the C(EXAMPLES) section. + - When C(state) is C(absent), it is not necessary for the package to exist on the + Ansible controller. If the full path to the package is provided, the filename will + specifically be cherry picked from it to properly remove the package. + type: path + state: + description: + - Whether the LX package should exist or not. + type: str + default: present + choices: + - present + - absent +notes: + - Requires the rpm tool be installed on the host. This can be accomplished through + different ways on each platform. On Debian based systems with C(apt); + C(apt-get install rpm). On Mac with C(brew); C(brew install rpm). + This command is already present on RedHat based systems. + - Requires BIG-IP >= 12.1.0 because the required functionality is missing + on versions earlier than that. + - The module name C(bigip_iapplx_package) has been deprecated in favor of C(bigip_lx_package). +requirements: + - Requires BIG-IP >= 12.1.0 + - The 'rpm' tool installed on the Ansible controller +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +author: + - Tim Rupp (@caphrim007) + - Wojciech Wypior (@wojtek0806) +''' + +EXAMPLES = r''' +- name: Install AS3 + bigip_lx_package: + package: f5-appsvcs-3.5.0-3.noarch.rpm + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost + +- name: Add an LX package stored in a role + bigip_lx_package: + package: "{{ roles_path }}/files/MyApp-0.1.0-0001.noarch.rpm'" + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost + +- name: Remove an LX package + bigip_lx_package: + package: MyApp-0.1.0-0001.noarch.rpm + state: absent + provider: + password: secret + server: lb.mydomain.com + user: admin + delegate_to: localhost +''' + +RETURN = r''' +# only common fields returned +''' + +import os +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import urlparse +from distutils.version import LooseVersion + +try: + from library.module_utils.network.f5.bigip import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.icontrol import tmos_version + from library.module_utils.network.f5.icontrol import upload_file +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.bigip import F5RestClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import tmos_version + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.icontrol import upload_file + + +class Parameters(AnsibleF5Parameters): + api_attributes = [] + returnables = [] + + @property + def package(self): + if self._values['package'] is None: + return None + return self._values['package'] + + @property + def package_file(self): + if self._values['package'] is None: + return None + return os.path.basename(self._values['package']) + + @property + def package_name(self): + """Return a valid name for the package + + BIG-IP determines the package name by the content of the RPM info. + It does not use the filename. Therefore, we do the same. This method + is only used though when the file actually exists on your Ansible + controller. + + If the package does not exist, then we instead use the filename + portion of the 'package' argument that is provided. + + Non-existence typically occurs when using 'state' = 'absent' + + :return: + """ + cmd = ['rpm', '-qp', '--queryformat', '%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}', self.package] + rc, out, err = self._module.run_command(cmd) + if not out: + return str(self.package_file) + return out + + @property + def package_root(self): + if self._values['package'] is None: + return None + base = os.path.basename(self._values['package']) + result = os.path.splitext(base) + return result[0] + + +class ApiParameters(Parameters): + pass + + +class ModuleParameters(Parameters): + pass + + +class Changes(Parameters): + def to_return(self): + result = {} + try: + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + except Exception: + pass + return result + + +class UsableChanges(Changes): + pass + + +class ReportableChanges(Changes): + pass + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = F5RestClient(**self.module.params) + self.want = ModuleParameters(module=self.module, params=self.module.params) + self.changes = UsableChanges() + + def exec_module(self): + result = dict() + changed = False + state = self.want.state + + version = tmos_version(self.client) + if LooseVersion(version) <= LooseVersion('12.0.0'): + raise F5ModuleError( + "This version of BIG-IP is not supported." + ) + + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + + changes = self.changes.to_return() + result.update(**changes) + result.update(dict(changed=changed)) + return result + + def present(self): + if self.exists(): + return False + else: + return self.create() + + def absent(self): + changed = False + if self.exists(): + changed = self.remove() + return changed + + def remove(self): + if self.module.check_mode: + return True + self.remove_from_device() + if self.exists(): + raise F5ModuleError("Failed to delete the LX package.") + return True + + def create(self): + if self.module.check_mode: + return True + if not os.path.exists(self.want.package): + if self.want.package.startswith('/'): + raise F5ModuleError( + "The specified LX package was not found at {0}.".format(self.want.package) + ) + else: + raise F5ModuleError( + "The specified LX package was not found in {0}.".format(os.getcwd()) + ) + self.upload_to_device() + self.create_on_device() + self.enable_iapplx_on_device() + self.remove_package_file_from_device() + if self.exists(): + return True + else: + raise F5ModuleError("Failed to install LX package.") + + def exists(self): + exists = False + packages = self.get_installed_packages_on_device() + if os.path.exists(self.want.package): + exists = True + for package in packages: + if exists: + if self.want.package_name == package['packageName']: + return True + else: + if self.want.package_root == package['packageName']: + return True + return False + + def get_installed_packages_on_device(self): + uri = "https://{0}:{1}/mgmt/shared/iapp/package-management-tasks".format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + params = dict(operation='QUERY') + resp = self.client.api.post(uri, json=params) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + path = urlparse(response["selfLink"]).path + task = self._wait_for_task(path) + + if task['status'] == 'FINISHED': + return task['queryResponse'] + raise F5ModuleError( + "Failed to find the installed packages on the device." + ) + + def _wait_for_task(self, path): + task = None + for x in range(0, 60): + task = self.check_task_on_device(path) + if task['status'] in ['FINISHED', 'FAILED']: + return task + time.sleep(1) + return task + + def check_task_on_device(self, path): + uri = "https://{0}:{1}{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + path + ) + resp = self.client.api.get(uri) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return response + + def upload_to_device(self): + url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + try: + upload_file(self.client, url, self.want.package) + except F5ModuleError: + raise F5ModuleError( + "Failed to upload the file." + ) + + def remove_package_file_from_device(self): + params = dict( + command="run", + utilCmdArgs="/var/config/rest/downloads/{0}".format(self.want.package_file) + ) + uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + resp = self.client.api.post(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def create_on_device(self): + remote_path = "/var/config/rest/downloads/{0}".format(self.want.package_file) + params = dict( + operation='INSTALL', packageFilePath=remote_path + ) + uri = "https://{0}:{1}/mgmt/shared/iapp/package-management-tasks".format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + + resp = self.client.api.post(uri, json=params) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + path = urlparse(response["selfLink"]).path + task = self._wait_for_task(path) + + if task['status'] == 'FINISHED': + return True + else: + raise F5ModuleError(task['errorMessage']) + + def remove_from_device(self): + params = dict( + operation='UNINSTALL', + packageName=self.want.package_root + ) + uri = "https://{0}:{1}/mgmt/shared/iapp/package-management-tasks".format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + + resp = self.client.api.post(uri, json=params) + + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + path = urlparse(response["selfLink"]).path + task = self._wait_for_task(path) + + if task['status'] == 'FINISHED': + return True + return False + + def enable_iapplx_on_device(self): + params = dict( + command="run", + utilCmdArgs='-c "touch /var/config/rest/iapps/enable"' + ) + uri = "https://{0}:{1}/mgmt/tm/util/bash".format( + self.client.provider['server'], + self.client.provider['server_port'] + ) + resp = self.client.api.post(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = True + argument_spec = dict( + state=dict( + default='present', + choices=['present', 'absent'] + ), + package=dict(type='path') + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + self.required_if = [ + ['state', 'present', ['package']] + ] + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode, + required_if=spec.required_if + ) + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/f5/bigip_security_address_list.py b/plugins/modules/network/f5/bigip_security_address_list.py new file mode 120000 index 0000000000..6bef19600f --- /dev/null +++ b/plugins/modules/network/f5/bigip_security_address_list.py @@ -0,0 +1 @@ +bigip_firewall_address_list.py \ No newline at end of file diff --git a/plugins/modules/network/f5/bigip_security_port_list.py b/plugins/modules/network/f5/bigip_security_port_list.py new file mode 120000 index 0000000000..a78176b5bf --- /dev/null +++ b/plugins/modules/network/f5/bigip_security_port_list.py @@ -0,0 +1 @@ +bigip_firewall_port_list.py \ No newline at end of file diff --git a/plugins/modules/network/f5/bigip_traffic_group.py b/plugins/modules/network/f5/bigip_traffic_group.py new file mode 120000 index 0000000000..7caabb783b --- /dev/null +++ b/plugins/modules/network/f5/bigip_traffic_group.py @@ -0,0 +1 @@ +bigip_device_traffic_group.py \ No newline at end of file diff --git a/plugins/modules/network/f5/bigiq_device_facts.py b/plugins/modules/network/f5/bigiq_device_facts.py new file mode 120000 index 0000000000..e6543531fc --- /dev/null +++ b/plugins/modules/network/f5/bigiq_device_facts.py @@ -0,0 +1 @@ +bigiq_device_info.py \ No newline at end of file diff --git a/plugins/modules/network/f5/bigiq_device_info.py b/plugins/modules/network/f5/bigiq_device_info.py new file mode 100644 index 0000000000..5adcdac6c7 --- /dev/null +++ b/plugins/modules/network/f5/bigiq_device_info.py @@ -0,0 +1,2314 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigiq_device_info +short_description: Collect information from F5 BIG-IQ devices +description: + - Collect information from F5 BIG-IQ devices. + - This module was called C(bigiq_device_facts) before Ansible 2.9. The usage did not change. +options: + gather_subset: + description: + - When supplied, this argument will restrict the information returned to a given subset. + - Can specify a list of values to include a larger subset. + - Values can also be used with an initial C(!) to specify that a specific subset + should not be collected. + type: list + required: True + choices: + - all + - applications + - managed-devices + - purchased-pool-licenses + - regkey-pools + - system-info + - vlans + - "!all" + - "!applications" + - "!managed-devices" + - "!purchased-pool-licenses" + - "!regkey-pools" + - "!system-info" + - "!vlans" +extends_documentation_fragment: +- f5networks.f5_modules.f5 + +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = r''' +- name: Collect BIG-IQ information + bigiq_device_info: + gather_subset: + - system-info + - vlans + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Collect all BIG-IQ information + bigiq_device_info: + gather_subset: + - all + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost + +- name: Collect all BIG-IP information except trunks + bigiq_device_info: + gather_subset: + - all + - "!trunks" + provider: + server: lb.mydomain.com + user: admin + password: secret + delegate_to: localhost +''' + +RETURN = r''' +applications: + description: Application related information + returned: When C(managed-devices) is specified in C(gather_subset). + type: complex + contains: + protection_mode: + description: + - The type of F5 Web Application Security Service protection on the application. + returned: changed + type: str + sample: Not Protected + id: + description: + - ID of the application as known to the BIG-IQ. + returned: changed + type: str + sample: 996baae8-5d1d-3662-8a2d-3612fa2aceae + name: + description: + - Name of the application. + returned: changed + type: str + sample: site12http.example.com + status: + description: + - Current state of the application. + returned: changed + type: str + sample: DEPLOYED + transactions_per_second: + description: + - Current measurement of Transactions Per second being handled by the application. + returned: changed + type: float + sample: 0.87 + connections: + description: + - Current number of connections established to the application. + returned: changed + type: float + sample: 3.06 + new_connections: + description: + - Number of new connections being established per second. + returned: changed + type: float + sample: 0.35 + response_time: + description: + - Measured response time of the application in milliseconds. + returned: changed + type: float + sample: 0.02 + health: + description: + - Health of the application. + returned: changed + type: str + sample: Good + active_alerts: + description: + - Number of alerts active on the application. + returned: changed + type: int + sample: 0 + bad_traffic: + description: + - Percent of traffic to application that is determined to be 'bad'. + - This value is dependent on C(protection_mode) being enabled. + returned: changed + type: float + sample: 1.7498 + enhanced_analytics: + description: + - Whether enhanced analytics is enabled for the application or not. + returned: changed + type: bool + sample: yes + bad_traffic_growth: + description: + - Whether or not Bad Traffic Growth alerts are configured to be triggered or not. + returned: changed + type: bool + sample: no + sample: hash/dictionary of values +managed_devices: + description: Managed device related information. + returned: When C(managed-devices) is specified in C(gather_subset). + type: complex + contains: + address: + description: + - Address where the device was discovered. + returned: changed + type: str + sample: 10.10.10.10 + build: + description: + - Build of the version. + returned: changed + type: str + sample: 0.0.4 + device_uri: + description: + - URI to reach the management interface of the device. + returned: changed + type: str + sample: "https://10.10.10.10:443" + edition: + description: + - Edition string of the product version. + returned: changed + type: str + sample: Final + group_name: + description: + - BIG-IQ group that the device is a member of. + returned: changed + type: str + sample: cm-bigip-allBigIpDevices + hostname: + description: + - Discovered hostname of the device. + returned: changed + type: str + sample: tier2labB1.lab.fp.foo.com + https_port: + description: + - HTTPS port available on the management interface of the device. + returned: changed + type: int + sample: 443 + is_clustered: + description: + - Whether the device is clustered or not. + returned: changed + type: bool + sample: no + is_license_expired: + description: + - Whether the license on the device is expired or not. + returned: changed + type: bool + sample: yes + is_virtual: + description: + - Whether the device is a virtual edition or not. + returned: changed + type: bool + sample: yes + machine_id: + description: + - Machine specific ID assigned to this device by BIG-IQ. + returned: changed + type: str + sample: c141bc88-f734-4434-be64-a3e9ea98356e + management_address: + description: + - IP address of the management interface on the device. + returned: changed + type: str + sample: 10.10.10.10 + mcp_device_name: + description: + - Device name as known by MCPD on the BIG-IP. + returned: changed + type: str + sample: /Common/tier2labB1.lab.fp.foo.com + product: + description: + - Product that the managed device is identified as. + returned: changed + type: str + sample: BIG-IP + rest_framework_version: + description: + - REST framework version running on the device + returned: changed + type: str + sample: 13.1.1-0.0.4 + self_link: + description: + - Internal reference to the managed device in BIG-IQ. + returned: changed + type: str + sample: "https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/c141bc88-f734-4434-be64-a3e9ea98356e" + slots: + description: + - Volumes on the device and versions of software installed in those volumes. + returned: changed + type: complex + sample: {"volume": "HD1.1", "product": "BIG-IP", "version": "13.1.1", "build": "0.0.4", "isActive": "yes"} + state: + description: + - State of the device. + returned: changed + type: str + sample: ACTIVE + tags: + description: + - Misc tags that are assigned to the device. + returned: changed + type: complex + sample: {'BIGIQ_tier_2_device': '2018-08-22T13:30:47.693-07:00', 'BIGIQ_SSG_name': 'tim-ssg'} + trust_domain_guid: + description: + - GUID of the trust domain the device is part of. + returned: changed + type: str + sample: 40ddf541-e604-4905-bde3005056813e36 + uuid: + description: + - UUID of the device in BIG-IQ. + returned: changed + type: str + sample: c141bc88-f734-4434-be64-a3e9ea98356e + version: + description: + - Version of TMOS installed on the device. + returned: changed + type: str + sample: 13.1.1 + sample: hash/dictionary of values +purchased_pool_licenses: + description: Purchased Pool License related information. + returned: When C(purchased-pool-licenses) is specified in C(gather_subset). + type: complex + contains: + base_reg_key: + description: + - Base registration key of the purchased pool + returned: changed + type: str + sample: XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX + dossier: + description: + - Dossier of the purchased pool license + returned: changed + type: str + sample: d6bd4b8ba5...e9a1a1199b73af9932948a + free_device_licenses: + description: + - Number of free licenses remaining. + returned: changed + type: int + sample: 34 + name: + description: + - Name of the purchased pool + returned: changed + type: str + sample: my-pool1 + state: + description: + - State of the purchased pool license + returned: changed + type: str + sample: LICENSED + total_device_licenses: + description: + - Total number of licenses in the pool. + returned: changed + type: int + sample: 40 + uuid: + description: + - UUID of the purchased pool license + returned: changed + type: str + sample: b2112329-cba7-4f1f-9a26-fab9be416d60 + vendor: + description: + - Vendor who provided the license + returned: changed + type: str + sample: F5 Networks, Inc + licensed_date_time: + description: + - Timestamp that the pool was licensed. + returned: changed + type: str + sample: "2018-09-10T00:00:00-07:00" + licensed_version: + description: + - Version of BIG-IQ that is licensed. + returned: changed + type: str + sample: 6.0.1 + evaluation_start_date_time: + description: + - Date that evaluation license starts. + returned: changed + type: str + sample: "2018-09-09T00:00:00-07:00" + evaluation_end_date_time: + description: + - Date that evaluation license ends. + returned: changed + type: str + sample: "2018-10-11T00:00:00-07:00" + license_end_date_time: + description: + - Date that the license expires. + returned: changed + type: str + sample: "2018-10-11T00:00:00-07:00" + license_start_date_time: + description: + - Date that the license starts. + returned: changed + type: str + sample: "2018-09-09T00:00:00-07:00" + registration_key: + description: + - Purchased pool license key. + returned: changed + type: str + sample: XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX + sample: hash/dictionary of values +regkey_pools: + description: Regkey Pool related information. + returned: When C(regkey-pools) is specified in C(gather_subset). + type: complex + contains: + name: + description: + - Name of the regkey pool. + returned: changed + type: str + sample: pool1 + id: + description: + - ID of the regkey pool. + returned: changed + type: str + sample: 4f9b565c-0831-4657-b6c2-6dde6182a502 + total_offerings: + description: + - Total number of offerings in the pool + returned: changed + type: int + sample: 10 + offerings: + description: List of the offerings in the pool. + type: complex + contains: + dossier: + description: + - Dossier of the license. + returned: changed + type: str + sample: d6bd4b8ba5...e9a1a1199b73af9932948a + name: + description: + - Name of the regkey. + returned: changed + type: str + sample: regkey1 + state: + description: + - State of the regkey license + returned: changed + type: str + sample: LICENSED + licensed_date_time: + description: + - Timestamp that the regkey was licensed. + returned: changed + type: str + sample: "2018-09-10T00:00:00-07:00" + licensed_version: + description: + - Version of BIG-IQ that is licensed. + returned: changed + type: str + sample: 6.0.1 + evaluation_start_date_time: + description: + - Date that evaluation license starts. + returned: changed + type: str + sample: "2018-09-09T00:00:00-07:00" + evaluation_end_date_time: + description: + - Date that evaluation license ends. + returned: changed + type: str + sample: "2018-10-11T00:00:00-07:00" + license_end_date_time: + description: + - Date that the license expires. + returned: changed + type: str + sample: "2018-10-11T00:00:00-07:00" + license_start_date_time: + description: + - Date that the license starts. + returned: changed + type: str + sample: "2018-09-09T00:00:00-07:00" + registration_key: + description: + - Registration license key. + returned: changed + type: str + sample: XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX + sample: hash/dictionary of values + sample: hash/dictionary of values +system_info: + description: System info related information. + returned: When C(system-info) is specified in C(gather_subset). + type: complex + contains: + base_mac_address: + description: + - Media Access Control address (MAC address) of the device. + returned: changed + type: str + sample: "fa:16:3e:c3:42:6f" + marketing_name: + description: + - Marketing name of the device platform. + returned: changed + type: str + sample: BIG-IQ Virtual Edition + time: + description: + - Mapping of the current time information to specific time-named keys. + returned: changed + type: complex + contains: + day: + description: + - The current day of the month, in numeric form. + returned: changed + type: int + sample: 7 + hour: + description: + - The current hour of the day in 24-hour form. + returned: changed + type: int + sample: 18 + minute: + description: + - The current minute of the hour. + returned: changed + type: int + sample: 16 + month: + description: + - The current month, in numeric form. + returned: changed + type: int + sample: 6 + second: + description: + - The current second of the minute. + returned: changed + type: int + sample: 51 + year: + description: + - The current year in 4-digit form. + returned: changed + type: int + sample: 2018 + hardware_information: + description: + - Information related to the hardware (drives and CPUs) of the system. + type: complex + returned: changed + contains: + model: + description: + - The model of the hardware. + type: str + sample: Virtual Disk + name: + description: + - The name of the hardware. + type: str + sample: HD1 + type: + description: + - The type of hardware. + type: str + sample: physical-disk + versions: + description: + - Hardware specific properties + type: complex + contains: + name: + description: + - Name of the property + type: str + sample: Size + version: + description: + - Value of the property + type: str + sample: 154.00G + is_admin_password_changed: + description: + - Whether the admin password was changed from its default or not. + returned: changed + type: bool + sample: yes + is_root_password_changed: + description: + - Whether the root password was changed from its default or not. + returned: changed + type: bool + sample: no + is_system_setup: + description: + - Whether the system has been setup or not. + returned: changed + type: bool + sample: yes + package_edition: + description: + - Displays the software edition. + returned: changed + type: str + sample: Point Release 7 + package_version: + description: + - A string combining the C(product_build) and C(product_build_date). + type: str + sample: "Build 0.0.1 - Tue May 15 15:26:30 PDT 2018" + product_code: + description: + - Code identifying the product. + type: str + sample: BIG-IQ + product_build: + description: + - Build version of the release version. + type: str + sample: 0.0.1 + product_version: + description: + - Major product version of the running software. + type: str + sample: 6.0.0 + product_built: + description: + - Unix timestamp of when the product was built. + type: int + sample: 180515152630 + product_build_date: + description: + - Human readable build date. + type: str + sample: "Tue May 15 15:26:30 PDT 2018" + product_changelist: + description: + - Changelist that product branches from. + type: int + sample: 2557198 + product_jobid: + description: + - ID of the job that built the product version. + type: int + sample: 1012030 + chassis_serial: + description: + - Serial of the chassis + type: str + sample: 11111111-2222-3333-444444444444 + host_board_part_revision: + description: + - Revision of the host board. + type: str + host_board_serial: + description: + - Serial of the host board. + type: str + platform: + description: + - Platform identifier. + type: str + sample: Z100 + switch_board_part_revision: + description: + - Switch board revision. + type: str + switch_board_serial: + description: + - Serial of the switch board. + type: str + uptime: + description: + - Time, in seconds, since the system booted. + type: int + sample: 603202 + sample: hash/dictionary of values +vlans: + description: List of VLAN information. + returned: When C(vlans) is specified in C(gather_subset). + type: complex + contains: + auto_lasthop: + description: + - Allows the system to send return traffic to the MAC address that transmitted the + request, even if the routing table points to a different network or interface. + returned: changed + type: str + sample: enabled + cmp_hash_algorithm: + description: + - Specifies how the traffic on the VLAN will be disaggregated. + returned: changed + type: str + sample: default + description: + description: + - Description of the VLAN. + returned: changed + type: str + sample: My vlan + failsafe_action: + description: + - Action for the system to take when the fail-safe mechanism is triggered. + returned: changed + type: str + sample: reboot + failsafe_enabled: + description: + - Whether failsafe is enabled or not. + returned: changed + type: bool + sample: yes + failsafe_timeout: + description: + - Number of seconds that an active unit can run without detecting network traffic + on this VLAN before it starts a failover. + returned: changed + type: int + sample: 90 + if_index: + description: + - Index assigned to this VLAN. It is a unique identifier assigned for all objects + displayed in the SNMP IF-MIB. + returned: changed + type: int + sample: 176 + learning_mode: + description: + - Whether switch ports placed in the VLAN are configured for switch learning, + forwarding only, or dropped. + returned: changed + type: str + sample: enable-forward + interfaces: + description: + - List of tagged or untagged interfaces and trunks that you want to configure for the VLAN. + returned: changed + type: complex + contains: + full_path: + description: + - Full name of the resource as known to BIG-IP. + returned: changed + type: str + sample: 1.3 + name: + description: + - Relative name of the resource in BIG-IP. + returned: changed + type: str + sample: 1.3 + tagged: + description: + - Whether the interface is tagged or not. + returned: changed + type: bool + sample: no + mtu: + description: + - Specific maximum transition unit (MTU) for the VLAN. + returned: changed + type: int + sample: 1500 + sflow_poll_interval: + description: + - Maximum interval in seconds between two pollings. + returned: changed + type: int + sample: 0 + sflow_poll_interval_global: + description: + - Whether the global VLAN poll-interval setting, overrides the object-level + poll-interval setting. + returned: changed + type: bool + sample: no + sflow_sampling_rate: + description: + - Ratio of packets observed to the samples generated. + returned: changed + type: int + sample: 0 + sflow_sampling_rate_global: + description: + - Whether the global VLAN sampling-rate setting, overrides the object-level + sampling-rate setting. + returned: changed + type: bool + sample: yes + source_check_enabled: + description: + - Specifies that only connections that have a return route in the routing table are accepted. + returned: changed + type: bool + sample: yes + true_mac_address: + description: + - Media access control (MAC) address for the lowest-numbered interface assigned to this VLAN. + returned: changed + type: str + sample: "fa:16:3e:10:da:ff" + tag: + description: + - Tag number for the VLAN. + returned: changed + type: int + sample: 30 + sample: hash/dictionary of values +''' + +import datetime +import math +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six import string_types + +try: + from library.module_utils.network.f5.bigiq import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.common import fq_name + from library.module_utils.network.f5.common import flatten_boolean + from library.module_utils.network.f5.ipaddress import is_valid_ip + from library.module_utils.network.f5.common import transform_name +except ImportError: + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.bigiq import F5RestClient + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import F5ModuleError + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import AnsibleF5Parameters + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import f5_argument_spec + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import fq_name + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import flatten_boolean + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.ipaddress import is_valid_ip + from ansible_collections.f5networks.f5_modules.plugins.module_utils.network.f5.common import transform_name + + +def parseStats(entry): + if 'description' in entry: + return entry['description'] + elif 'value' in entry: + return entry['value'] + elif 'entries' in entry or 'nestedStats' in entry and 'entries' in entry['nestedStats']: + if 'entries' in entry: + entries = entry['entries'] + else: + entries = entry['nestedStats']['entries'] + result = None + + for name in entries: + entry = entries[name] + if 'https://localhost' in name: + name = name.split('/') + name = name[-1] + if result and isinstance(result, list): + result.append(parseStats(entry)) + elif result and isinstance(result, dict): + result[name] = parseStats(entry) + else: + try: + int(name) + result = list() + result.append(parseStats(entry)) + except ValueError: + result = dict() + result[name] = parseStats(entry) + else: + if '.' in name: + names = name.split('.') + key = names[0] + value = names[1] + if not result[key]: + result[key] = {} + result[key][value] = parseStats(entry) + else: + if result and isinstance(result, list): + result.append(parseStats(entry)) + elif result and isinstance(result, dict): + result[name] = parseStats(entry) + else: + try: + int(name) + result = list() + result.append(parseStats(entry)) + except ValueError: + result = dict() + result[name] = parseStats(entry) + return result + + +class BaseManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + self.kwargs = kwargs + + def exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + return results + + +class Parameters(AnsibleF5Parameters): + @property + def gather_subset(self): + if isinstance(self._values['gather_subset'], string_types): + self._values['gather_subset'] = [self._values['gather_subset']] + elif not isinstance(self._values['gather_subset'], list): + raise F5ModuleError( + "The specified gather_subset must be a list." + ) + tmp = list(set(self._values['gather_subset'])) + tmp.sort() + self._values['gather_subset'] = tmp + + return self._values['gather_subset'] + + +class BaseParameters(Parameters): + @property + def enabled(self): + return flatten_boolean(self._values['enabled']) + + @property + def disabled(self): + return flatten_boolean(self._values['disabled']) + + def _remove_internal_keywords(self, resource): + resource.pop('kind', None) + resource.pop('generation', None) + resource.pop('selfLink', None) + resource.pop('isSubcollection', None) + resource.pop('fullPath', None) + + def to_return(self): + result = {} + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + return result + + +class ApplicationsParameters(BaseParameters): + api_map = { + 'protectionMode': 'protection_mode', + 'transactionsPerSecond': 'transactions_per_second', + 'newConnections': 'new_connections', + 'responseTime': 'response_time', + 'activeAlerts': 'active_alerts', + 'badTraffic': 'bad_traffic', + 'enhancedAnalytics': 'enhanced_analytics', + 'badTrafficGrowth': 'bad_traffic_growth' + } + + returnables = [ + 'protection_mode', + 'id', + 'name', + 'status', + 'transactions_per_second', + 'connections', + 'new_connections', + 'response_time', + 'health', + 'active_alerts', + 'bad_traffic', + 'enhanced_analytics', + 'bad_traffic_growth', + ] + + @property + def enhanced_analytics(self): + return flatten_boolean(self._values['enhanced_analytics']) + + @property + def bad_traffic_growth(self): + return flatten_boolean(self._values['bad_traffic_growth']) + + +class ApplicationsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(ApplicationsFactManager, self).__init__(**kwargs) + self.want = ApplicationsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(applications=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['name']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = ApplicationsParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + return response['result']['items'] + except KeyError: + return [] + + +class ManagedDevicesParameters(BaseParameters): + api_map = { + 'deviceUri': 'device_uri', + 'groupName': 'group_name', + 'httpsPort': 'https_port', + 'isClustered': 'is_clustered', + 'isLicenseExpired': 'is_license_expired', + 'isVirtual': 'is_virtual', + 'machineId': 'machine_id', + 'managementAddress': 'management_address', + 'mcpDeviceName': 'mcp_device_name', + 'restFrameworkVersion': 'rest_framework_version', + 'selfLink': 'self_link', + 'trustDomainGuid': 'trust_domain_guid', + } + + returnables = [ + 'address', + 'build', + 'device_uri', + 'edition', + 'group_name', + 'hostname', + 'https_port', + 'is_clustered', + 'is_license_expired', + 'is_virtual', + 'machine_id', + 'management_address', + 'mcp_device_name', + 'product', + 'rest_framework_version', + 'self_link', + 'slots', + 'state', + 'tags', + 'trust_domain_guid', + 'uuid', + 'version', + ] + + @property + def slots(self): + result = [] + if self._values['slots'] is None: + return None + for x in self._values['slots']: + x['is_active'] = flatten_boolean(x.pop('isActive', False)) + result.append(x) + return result + + @property + def tags(self): + if self._values['tags'] is None: + return None + result = dict((x['name'], x['value']) for x in self._values['tags']) + return result + + @property + def https_port(self): + return int(self._values['https_port']) + + @property + def is_clustered(self): + return flatten_boolean(self._values['is_clustered']) + + @property + def is_license_expired(self): + return flatten_boolean(self._values['is_license_expired']) + + @property + def is_virtual(self): + return flatten_boolean(self._values['is_virtual']) + + +class ManagedDevicesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(ManagedDevicesFactManager, self).__init__(**kwargs) + self.want = ManagedDevicesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(managed_devices=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['hostname']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = ManagedDevicesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class PurchasedPoolLicensesParameters(BaseParameters): + api_map = { + 'baseRegKey': 'base_reg_key', + 'freeDeviceLicenses': 'free_device_licenses', + 'licenseState': 'license_state', + 'totalDeviceLicenses': 'total_device_licenses', + } + + returnables = [ + 'base_reg_key', + 'dossier', + 'free_device_licenses', + 'name', + 'state', + 'total_device_licenses', + 'uuid', + + # license_state facts + 'vendor', + 'licensed_date_time', + 'licensed_version', + 'evaluation_start_date_time', + 'evaluation_end_date_time', + 'license_end_date_time', + 'license_start_date_time', + 'registration_key', + ] + + @property + def registration_key(self): + try: + return self._values['license_state']['registrationKey'] + except KeyError: + return None + + @property + def license_start_date_time(self): + try: + return self._values['license_state']['licenseStartDateTime'] + except KeyError: + return None + + @property + def license_end_date_time(self): + try: + return self._values['license_state']['licenseEndDateTime'] + except KeyError: + return None + + @property + def evaluation_end_date_time(self): + try: + return self._values['license_state']['evaluationEndDateTime'] + except KeyError: + return None + + @property + def evaluation_start_date_time(self): + try: + return self._values['license_state']['evaluationStartDateTime'] + except KeyError: + return None + + @property + def licensed_version(self): + try: + return self._values['license_state']['licensedVersion'] + except KeyError: + return None + + @property + def licensed_date_time(self): + try: + return self._values['license_state']['licensedDateTime'] + except KeyError: + return None + + @property + def vendor(self): + try: + return self._values['license_state']['vendor'] + except KeyError: + return None + + +class PurchasedPoolLicensesFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(PurchasedPoolLicensesFactManager, self).__init__(**kwargs) + self.want = PurchasedPoolLicensesParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(purchased_pool_licenses=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['name']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = PurchasedPoolLicensesParameters(params=resource) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/purchased-pool/licenses".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + return response['items'] + except KeyError: + return [] + + +class RegkeyPoolsParameters(BaseParameters): + api_map = { + + } + + returnables = [ + 'name', + 'id', + 'offerings', + 'total_offerings', + ] + + +class RegkeyPoolsOfferingParameters(BaseParameters): + api_map = { + 'regKey': 'registration_key', + 'licenseState': 'license_state', + 'status': 'state', + } + + returnables = [ + 'name', + 'dossier', + 'state', + + # license_state facts + 'licensed_date_time', + 'licensed_version', + 'evaluation_start_date_time', + 'evaluation_end_date_time', + 'license_end_date_time', + 'license_start_date_time', + 'registration_key', + ] + + @property + def registration_key(self): + try: + return self._values['license_state']['registrationKey'] + except KeyError: + return None + + @property + def license_start_date_time(self): + try: + return self._values['license_state']['licenseStartDateTime'] + except KeyError: + return None + + @property + def license_end_date_time(self): + try: + return self._values['license_state']['licenseEndDateTime'] + except KeyError: + return None + + @property + def evaluation_end_date_time(self): + try: + return self._values['license_state']['evaluationEndDateTime'] + except KeyError: + return None + + @property + def evaluation_start_date_time(self): + try: + return self._values['license_state']['evaluationStartDateTime'] + except KeyError: + return None + + @property + def licensed_version(self): + try: + return self._values['license_state']['licensedVersion'] + except KeyError: + return None + + @property + def licensed_date_time(self): + try: + return self._values['license_state']['licensedDateTime'] + except KeyError: + return None + + @property + def vendor(self): + try: + return self._values['license_state']['vendor'] + except KeyError: + return None + + +class RegkeyPoolsFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(RegkeyPoolsFactManager, self).__init__(**kwargs) + self.want = RegkeyPoolsParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(regkey_pools=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['name']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + params = RegkeyPoolsParameters(params=resource) + offerings = self.read_offerings_from_device(resource['id']) + params.update({'total_offerings': len(offerings)}) + for offering in offerings: + params2 = RegkeyPoolsOfferingParameters(params=offering) + params.update({'offerings': params2.to_return()}) + results.append(params) + return results + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + return response['items'] + except KeyError: + return [] + + def read_offerings_from_device(self, license): + uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings".format( + self.client.provider['server'], + self.client.provider['server_port'], + license, + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + return response['items'] + except KeyError: + return [] + + +class SystemInfoParameters(BaseParameters): + api_map = { + 'isSystemSetup': 'is_system_setup', + 'isAdminPasswordChanged': 'is_admin_password_changed', + 'isRootPasswordChanged': 'is_root_password_changed' + } + + returnables = [ + 'base_mac_address', + 'chassis_serial', + 'hardware_information', + 'host_board_part_revision', + 'host_board_serial', + 'is_admin_password_changed', + 'is_root_password_changed', + 'is_system_setup', + 'marketing_name', + 'package_edition', + 'package_version', + 'platform', + 'product_build', + 'product_build_date', + 'product_built', + 'product_changelist', + 'product_code', + 'product_information', + 'product_jobid', + 'product_version', + 'switch_board_part_revision', + 'switch_board_serial', + 'time', + 'uptime', + ] + + @property + def is_admin_password_changed(self): + return flatten_boolean(self._values['is_admin_password_changed']) + + @property + def is_root_password_changed(self): + return flatten_boolean(self._values['is_root_password_changed']) + + @property + def is_system_setup(self): + if self._values['is_system_setup'] is None: + return 'no' + return flatten_boolean(self._values['is_system_setup']) + + @property + def chassis_serial(self): + if self._values['system-info'] is None: + return None + + # Yes, this is still called "bigip" even though this is querying the BIG-IQ + # product. This is likely due to BIG-IQ inheriting TMOS. + if 'bigipChassisSerialNum' not in self._values['system-info'][0]: + return None + return self._values['system-info'][0]['bigipChassisSerialNum'] + + @property + def switch_board_serial(self): + if self._values['system-info'] is None: + return None + if 'switchBoardSerialNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['switchBoardSerialNum'].strip() == '': + return None + return self._values['system-info'][0]['switchBoardSerialNum'] + + @property + def switch_board_part_revision(self): + if self._values['system-info'] is None: + return None + if 'switchBoardPartRevNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['switchBoardPartRevNum'].strip() == '': + return None + return self._values['system-info'][0]['switchBoardPartRevNum'] + + @property + def platform(self): + if self._values['system-info'] is None: + return None + return self._values['system-info'][0]['platform'] + + @property + def host_board_serial(self): + if self._values['system-info'] is None: + return None + if 'hostBoardSerialNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['hostBoardSerialNum'].strip() == '': + return None + return self._values['system-info'][0]['hostBoardSerialNum'] + + @property + def host_board_part_revision(self): + if self._values['system-info'] is None: + return None + if 'hostBoardPartRevNum' not in self._values['system-info'][0]: + return None + if self._values['system-info'][0]['hostBoardPartRevNum'].strip() == '': + return None + return self._values['system-info'][0]['hostBoardPartRevNum'] + + @property + def package_edition(self): + return self._values['Edition'] + + @property + def package_version(self): + return 'Build {0} - {1}'.format(self._values['Build'], self._values['Date']) + + @property + def product_build(self): + return self._values['Build'] + + @property + def product_build_date(self): + return self._values['Date'] + + @property + def product_built(self): + if 'version_info' not in self._values: + return None + if 'Built' in self._values['version_info']: + return int(self._values['version_info']['Built']) + + @property + def product_changelist(self): + if 'version_info' not in self._values: + return None + if 'Changelist' in self._values['version_info']: + return int(self._values['version_info']['Changelist']) + + @property + def product_jobid(self): + if 'version_info' not in self._values: + return None + if 'JobID' in self._values['version_info']: + return int(self._values['version_info']['JobID']) + + @property + def product_code(self): + return self._values['Product'] + + @property + def product_version(self): + return self._values['Version'] + + @property + def hardware_information(self): + if self._values['hardware-version'] is None: + return None + self._transform_name_attribute(self._values['hardware-version']) + result = [v for k, v in iteritems(self._values['hardware-version'])] + return result + + def _transform_name_attribute(self, entry): + if isinstance(entry, dict): + for k, v in iteritems(entry): + if k == 'tmName': + entry['name'] = entry.pop('tmName') + self._transform_name_attribute(v) + elif isinstance(entry, list): + for k in entry: + if k == 'tmName': + entry['name'] = entry.pop('tmName') + self._transform_name_attribute(k) + else: + return + + @property + def time(self): + if self._values['fullDate'] is None: + return None + date = datetime.datetime.strptime(self._values['fullDate'], "%Y-%m-%dT%H:%M:%SZ") + result = dict( + day=date.day, + hour=date.hour, + minute=date.minute, + month=date.month, + second=date.second, + year=date.year + ) + return result + + @property + def marketing_name(self): + if self._values['platform'] is None: + return None + return self._values['platform'][0]['marketingName'] + + @property + def base_mac_address(self): + if self._values['platform'] is None: + return None + return self._values['platform'][0]['baseMac'] + + +class SystemInfoFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(SystemInfoFactManager, self).__init__(**kwargs) + self.want = SystemInfoParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(system_info=facts) + return result + + def _exec_module(self): + facts = self.read_facts() + results = facts.to_return() + return results + + def read_facts(self): + collection = self.read_collection_from_device() + params = SystemInfoParameters(params=collection) + return params + + def read_collection_from_device(self): + result = dict() + tmp = self.read_hardware_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_system_setup_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_clock_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_version_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_uptime_info_from_device() + if tmp: + result.update(tmp) + + tmp = self.read_version_file_info_from_device() + if tmp: + result.update(tmp) + + return result + + def read_system_setup_from_device(self): + uri = "https://{0}:{1}/mgmt/shared/system/setup".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + return response + + def read_version_file_info_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/util/bash".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + args = dict( + command='run', + utilCmdArgs='-c "cat /VERSION"' + ) + resp = self.client.api.post(uri, json=args) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + pattern = r'^(?P(Product|Build|Sequence|BaseBuild|Edition|Date|Built|Changelist|JobID))\:(?P.*)' + result = response['commandResult'].strip() + except KeyError: + return None + + if 'No such file or directory' in result: + return None + + lines = response['commandResult'].split("\n") + result = dict() + for line in lines: + if not line: + continue + matches = re.match(pattern, line) + if matches: + result[matches.group('key')] = matches.group('value').strip() + + if result: + return dict( + version_info=result + ) + + def read_uptime_info_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/util/bash".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + args = dict( + command='run', + utilCmdArgs='-c "cat /proc/uptime"' + ) + resp = self.client.api.post(uri, json=args) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + try: + parts = response['commandResult'].strip().split(' ') + return dict( + uptime=math.floor(float(parts[0])) + ) + except KeyError: + pass + + def read_hardware_info_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/sys/hardware".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + return result + + def read_clock_info_from_device(self): + """Parses clock info from the REST API + + The clock stat returned from the REST API (at the time of 13.1.0.7) + is similar to the following. + + { + "kind": "tm:sys:clock:clockstats", + "selfLink": "https://localhost/mgmt/tm/sys/clock?ver=13.1.0.4", + "entries": { + "https://localhost/mgmt/tm/sys/clock/0": { + "nestedStats": { + "entries": { + "fullDate": { + "description": "2018-06-05T13:38:33Z" + } + } + } + } + } + } + + Parsing this data using the ``parseStats`` method, yields a list of + the clock stats in a format resembling that below. + + [{'fullDate': '2018-06-05T13:41:05Z'}] + + Therefore, this method cherry-picks the first entry from this list + and returns it. There can be no other items in this list. + + Returns: + A dict mapping keys to the corresponding clock stats. For + example: + + {'fullDate': '2018-06-05T13:41:05Z'} + + There should never not be a clock stat, unless by chance it + is removed from the API in the future, or changed to a different + API endpoint. + + Raises: + F5ModuleError: A non-successful HTTP code was returned or a JSON + response was not found. + """ + uri = "https://{0}:{1}/mgmt/tm/sys/clock".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + if result is None: + return None + return result[0] + + def read_version_info_from_device(self): + """Parses version info from the REST API + + The version stat returned from the REST API (at the time of 13.1.0.7) + is similar to the following. + + { + "kind": "tm:sys:version:versionstats", + "selfLink": "https://localhost/mgmt/tm/sys/version?ver=13.1.0.4", + "entries": { + "https://localhost/mgmt/tm/sys/version/0": { + "nestedStats": { + "entries": { + "Build": { + "description": "0.0.6" + }, + "Date": { + "description": "Tue Mar 13 20:10:42 PDT 2018" + }, + "Edition": { + "description": "Point Release 4" + }, + "Product": { + "description": "BIG-IP" + }, + "Title": { + "description": "Main Package" + }, + "Version": { + "description": "13.1.0.4" + } + } + } + } + } + } + + Parsing this data using the ``parseStats`` method, yields a list of + the clock stats in a format resembling that below. + + [{'Build': '0.0.6', 'Date': 'Tue Mar 13 20:10:42 PDT 2018', + 'Edition': 'Point Release 4', 'Product': 'BIG-IP', 'Title': 'Main Package', + 'Version': '13.1.0.4'}] + + Therefore, this method cherry-picks the first entry from this list + and returns it. There can be no other items in this list. + + Returns: + A dict mapping keys to the corresponding clock stats. For + example: + + {'Build': '0.0.6', 'Date': 'Tue Mar 13 20:10:42 PDT 2018', + 'Edition': 'Point Release 4', 'Product': 'BIG-IP', 'Title': 'Main Package', + 'Version': '13.1.0.4'} + + There should never not be a version stat, unless by chance it + is removed from the API in the future, or changed to a different + API endpoint. + + Raises: + F5ModuleError: A non-successful HTTP code was returned or a JSON + response was not found. + """ + uri = "https://{0}:{1}/mgmt/tm/sys/version".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + if result is None: + return None + return result[0] + + +class VlansParameters(BaseParameters): + api_map = { + 'autoLasthop': 'auto_lasthop', + 'cmpHash': 'cmp_hash_algorithm', + 'failsafeAction': 'failsafe_action', + 'failsafe': 'failsafe_enabled', + 'failsafeTimeout': 'failsafe_timeout', + 'ifIndex': 'if_index', + 'learning': 'learning_mode', + 'interfacesReference': 'interfaces', + 'sourceChecking': 'source_check_enabled', + 'fullPath': 'full_path' + } + + returnables = [ + 'full_path', + 'name', + 'auto_lasthop', + 'cmp_hash_algorithm', + 'description', + 'failsafe_action', + 'failsafe_enabled', + 'failsafe_timeout', + 'if_index', + 'learning_mode', + 'interfaces', + 'mtu', + 'sflow_poll_interval', + 'sflow_poll_interval_global', + 'sflow_sampling_rate', + 'sflow_sampling_rate_global', + 'source_check_enabled', + 'true_mac_address', + 'tag', + ] + + @property + def interfaces(self): + if self._values['interfaces'] is None: + return None + if 'items' not in self._values['interfaces']: + return None + result = [] + for item in self._values['interfaces']['items']: + tmp = dict( + name=item['name'], + full_path=item['fullPath'] + ) + if 'tagged' in item: + tmp['tagged'] = 'yes' + else: + tmp['tagged'] = 'no' + result.append(tmp) + return result + + @property + def sflow_poll_interval(self): + return int(self._values['sflow']['pollInterval']) + + @property + def sflow_poll_interval_global(self): + return flatten_boolean(self._values['sflow']['pollIntervalGlobal']) + + @property + def sflow_sampling_rate(self): + return int(self._values['sflow']['samplingRate']) + + @property + def sflow_sampling_rate_global(self): + return flatten_boolean(self._values['sflow']['samplingRateGlobal']) + + @property + def source_check_state(self): + return flatten_boolean(self._values['source_check_state']) + + @property + def true_mac_address(self): + if self._values['stats']['macTrue'] in [None, 'none']: + return None + return self._values['stats']['macTrue'] + + @property + def tag(self): + return self._values['stats']['id'] + + @property + def failsafe_enabled(self): + return flatten_boolean(self._values['failsafe_enabled']) + + +class VlansFactManager(BaseManager): + def __init__(self, *args, **kwargs): + self.client = kwargs.get('client', None) + self.module = kwargs.get('module', None) + super(VlansFactManager, self).__init__(**kwargs) + self.want = VlansParameters(params=self.module.params) + + def exec_module(self): + facts = self._exec_module() + result = dict(vlans=facts) + return result + + def _exec_module(self): + results = [] + facts = self.read_facts() + for item in facts: + attrs = item.to_return() + results.append(attrs) + results = sorted(results, key=lambda k: k['full_path']) + return results + + def read_facts(self): + results = [] + collection = self.read_collection_from_device() + for resource in collection: + resource.update(self.read_stats(resource['fullPath'])) + params = VlansParameters(params=resource) + results.append(params) + return results + + def read_stats(self, resource): + uri = "https://{0}:{1}/mgmt/tm/net/vlan/{2}/stats".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(name=resource) + + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + result = parseStats(response) + return result + + def read_collection_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/net/vlan/?expandSubcollections=true".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + if 'items' not in response: + return [] + result = response['items'] + return result + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = kwargs.get('client', None) + self.kwargs = kwargs + self.want = Parameters(params=self.module.params) + self.managers = { + 'applications': dict( + manager=ApplicationsFactManager, + client=F5RestClient, + ), + 'managed-devices': dict( + manager=ManagedDevicesFactManager, + client=F5RestClient, + ), + 'purchased-pool-licenses': dict( + manager=PurchasedPoolLicensesFactManager, + client=F5RestClient, + ), + 'regkey-pools': dict( + manager=RegkeyPoolsFactManager, + client=F5RestClient, + ), + 'system-info': dict( + manager=SystemInfoFactManager, + client=F5RestClient, + ), + 'vlans': dict( + manager=VlansFactManager, + client=F5RestClient, + ), + } + + def exec_module(self): + self.handle_all_keyword() + res = self.check_valid_gather_subset(self.want.gather_subset) + if res: + invalid = ','.join(res) + raise F5ModuleError( + "The specified 'gather_subset' options are invalid: {0}".format(invalid) + ) + result = self.filter_excluded_facts() + + managers = [] + for name in result: + manager = self.get_manager(name) + if manager: + managers.append(manager) + + if not managers: + result = dict( + changed=False + ) + return result + + result = self.execute_managers(managers) + if result: + result['changed'] = True + else: + result['changed'] = False + return result + + def filter_excluded_facts(self): + # Remove the excluded entries from the list of possible facts + exclude = [x[1:] for x in self.want.gather_subset if x[0] == '!'] + include = [x for x in self.want.gather_subset if x[0] != '!'] + result = [x for x in include if x not in exclude] + return result + + def handle_all_keyword(self): + if 'all' not in self.want.gather_subset: + return + managers = list(self.managers.keys()) + self.want.gather_subset + managers.remove('all') + self.want.update({'gather_subset': managers}) + + def check_valid_gather_subset(self, includes): + """Check that the specified subset is valid + + The ``gather_subset`` parameter is specified as a "raw" field which means that + any Python type could technically be provided + + :param includes: + :return: + """ + keys = self.managers.keys() + result = [] + for x in includes: + if x not in keys: + if x[0] == '!': + if x[1:] not in keys: + result.append(x) + else: + result.append(x) + return result + + def execute_managers(self, managers): + results = dict() + for manager in managers: + result = manager.exec_module() + results.update(result) + return results + + def get_manager(self, which): + result = {} + info = self.managers.get(which, None) + if not info: + return result + kwargs = dict() + kwargs.update(self.kwargs) + + manager = info.get('manager', None) + client = info.get('client', None) + kwargs['client'] = client(**self.module.params) + result = manager(**kwargs) + return result + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = False + argument_spec = dict( + gather_subset=dict( + type='list', + required=True, + choices=[ + # Meta choices + 'all', + + # Non-meta choices + 'applications', + 'managed-devices', + 'purchased-pool-licenses', + 'regkey-pools', + 'system-info', + 'vlans', + + # Negations of meta choices + '!all', + + # Negations of non-meta-choices + '!applications', + '!managed-devices', + '!purchased-pool-licenses', + '!regkey-pools', + '!system-info', + '!vlans', + ] + ), + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode + ) + if module._name == 'bigiq_device_facts': + module.deprecate("The 'bigiq_device_facts' module has been renamed to 'bigiq_device_info'", version='2.13') + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/fortianalyzer/faz_device.py b/plugins/modules/network/fortianalyzer/faz_device.py new file mode 100644 index 0000000000..08a3615751 --- /dev/null +++ b/plugins/modules/network/fortianalyzer/faz_device.py @@ -0,0 +1,438 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: faz_device +author: Luke Weighall (@lweighall) +short_description: Add or remove device +description: + - Add or remove a device or list of devices to FortiAnalyzer Device Manager. ADOM Capable. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: true + default: root + type: str + + mode: + description: + - Add or delete devices. Or promote unregistered devices that are in the FortiAnalyzer "waiting pool" + required: false + default: add + choices: ["add", "delete", "promote"] + type: str + + device_username: + description: + - The username of the device being added to FortiAnalyzer. + required: false + type: str + + device_password: + description: + - The password of the device being added to FortiAnalyzer. + required: false + type: str + + device_ip: + description: + - The IP of the device being added to FortiAnalyzer. + required: false + type: str + + device_unique_name: + description: + - The desired "friendly" name of the device being added to FortiAnalyzer. + required: false + type: str + + device_serial: + description: + - The serial number of the device being added to FortiAnalyzer. + required: false + type: str + + os_type: + description: + - The os type of the device being added (default 0). + required: true + choices: ["unknown", "fos", "fsw", "foc", "fml", "faz", "fwb", "fch", "fct", "log", "fmg", "fsa", "fdd", "fac"] + type: str + + mgmt_mode: + description: + - Management Mode of the device you are adding. + choices: ["unreg", "fmg", "faz", "fmgfaz"] + required: true + type: str + + os_minor_vers: + description: + - Minor OS rev of the device. + required: true + type: str + + os_ver: + description: + - Major OS rev of the device + required: true + choices: ["unknown", "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0"] + type: str + + platform_str: + description: + - Required for determine the platform for VM platforms. ie FortiGate-VM64 + required: false + type: str + + faz_quota: + description: + - Specifies the quota for the device in FAZ + required: False + type: str +''' + +EXAMPLES = ''' +- name: DISCOVER AND ADD DEVICE A PHYSICAL FORTIGATE + faz_device: + adom: "root" + device_username: "admin" + device_password: "admin" + device_ip: "10.10.24.201" + device_unique_name: "FGT1" + device_serial: "FGVM000000117994" + state: "present" + mgmt_mode: "faz" + os_type: "fos" + os_ver: "5.0" + minor_rev: 6 + + +- name: DISCOVER AND ADD DEVICE A VIRTUAL FORTIGATE + faz_device: + adom: "root" + device_username: "admin" + device_password: "admin" + device_ip: "10.10.24.202" + device_unique_name: "FGT2" + mgmt_mode: "faz" + os_type: "fos" + os_ver: "5.0" + minor_rev: 6 + state: "present" + platform_str: "FortiGate-VM64" + +- name: DELETE DEVICE FGT01 + faz_device: + adom: "root" + device_unique_name: "ansible-fgt01" + mode: "delete" + +- name: DELETE DEVICE FGT02 + faz_device: + adom: "root" + device_unique_name: "ansible-fgt02" + mode: "delete" + +- name: PROMOTE FGT01 IN FAZ BY IP + faz_device: + adom: "root" + device_password: "fortinet" + device_ip: "10.7.220.151" + device_username: "ansible" + mgmt_mode: "faz" + mode: "promote" + + +- name: PROMOTE FGT02 IN FAZ + faz_device: + adom: "root" + device_password: "fortinet" + device_unique_name: "ansible-fgt02" + device_username: "ansible" + mgmt_mode: "faz" + mode: "promote" + +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.fortianalyzer import FortiAnalyzerHandler +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZBaseException +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZCommon +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZMethods +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import DEFAULT_RESULT_OBJ +from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAIL_SOCKET_MSG + + +def faz_add_device(faz, paramgram): + """ + This method is used to add devices to the faz or delete them + """ + + datagram = { + "adom": paramgram["adom"], + "device": {"adm_usr": paramgram["device_username"], "adm_pass": paramgram["device_password"], + "ip": paramgram["ip"], "name": paramgram["device_unique_name"], + "mgmt_mode": paramgram["mgmt_mode"], "os_type": paramgram["os_type"], + "mr": paramgram["os_minor_vers"]} + } + + if paramgram["platform_str"] is not None: + datagram["device"]["platform_str"] = paramgram["platform_str"] + + if paramgram["sn"] is not None: + datagram["device"]["sn"] = paramgram["sn"] + + if paramgram["device_action"] is not None: + datagram["device"]["device_action"] = paramgram["device_action"] + + if paramgram["faz.quota"] is not None: + datagram["device"]["faz.quota"] = paramgram["faz.quota"] + + url = '/dvm/cmd/add/device/' + response = faz.process_request(url, datagram, FAZMethods.EXEC) + return response + + +def faz_delete_device(faz, paramgram): + """ + This method deletes a device from the FAZ + """ + datagram = { + "adom": paramgram["adom"], + "device": paramgram["device_unique_name"], + } + + url = '/dvm/cmd/del/device/' + response = faz.process_request(url, datagram, FAZMethods.EXEC) + return response + + +def faz_get_unknown_devices(faz): + """ + This method gets devices with an unknown management type field + """ + + faz_filter = ["mgmt_mode", "==", "0"] + + datagram = { + "filter": faz_filter + } + + url = "/dvmdb/device" + response = faz.process_request(url, datagram, FAZMethods.GET) + + return response + + +def faz_approve_unregistered_device_by_ip(faz, paramgram): + """ + This method approves unregistered devices by ip. + """ + # TRY TO FIND DETAILS ON THIS UNREGISTERED DEVICE + unknown_devices = faz_get_unknown_devices(faz) + target_device = None + if unknown_devices[0] == 0: + for device in unknown_devices[1]: + if device["ip"] == paramgram["ip"]: + target_device = device + else: + return "No devices are waiting to be registered!" + + # now that we have the target device details...fill out the datagram and make the call to promote it + if target_device is not None: + target_device_paramgram = { + "adom": paramgram["adom"], + "ip": target_device["ip"], + "device_username": paramgram["device_username"], + "device_password": paramgram["device_password"], + "device_unique_name": paramgram["device_unique_name"], + "sn": target_device["sn"], + "os_type": target_device["os_type"], + "mgmt_mode": paramgram["mgmt_mode"], + "os_minor_vers": target_device["mr"], + "os_ver": target_device["os_ver"], + "platform_str": target_device["platform_str"], + "faz.quota": target_device["faz.quota"], + "device_action": paramgram["device_action"] + } + + add_device = faz_add_device(faz, target_device_paramgram) + return add_device + + return str("Couldn't find the desired device with ip: " + str(paramgram["device_ip"])) + + +def faz_approve_unregistered_device_by_name(faz, paramgram): + # TRY TO FIND DETAILS ON THIS UNREGISTERED DEVICE + unknown_devices = faz_get_unknown_devices(faz) + target_device = None + if unknown_devices[0] == 0: + for device in unknown_devices[1]: + if device["name"] == paramgram["device_unique_name"]: + target_device = device + else: + return "No devices are waiting to be registered!" + + # now that we have the target device details...fill out the datagram and make the call to promote it + if target_device is not None: + target_device_paramgram = { + "adom": paramgram["adom"], + "ip": target_device["ip"], + "device_username": paramgram["device_username"], + "device_password": paramgram["device_password"], + "device_unique_name": paramgram["device_unique_name"], + "sn": target_device["sn"], + "os_type": target_device["os_type"], + "mgmt_mode": paramgram["mgmt_mode"], + "os_minor_vers": target_device["mr"], + "os_ver": target_device["os_ver"], + "platform_str": target_device["platform_str"], + "faz.quota": target_device["faz.quota"], + "device_action": paramgram["device_action"] + } + + add_device = faz_add_device(faz, target_device_paramgram) + return add_device + + return str("Couldn't find the desired device with name: " + str(paramgram["device_unique_name"])) + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + mode=dict(choices=["add", "delete", "promote"], type="str", default="add"), + + device_ip=dict(required=False, type="str"), + device_username=dict(required=False, type="str"), + device_password=dict(required=False, type="str", no_log=True), + device_unique_name=dict(required=False, type="str"), + device_serial=dict(required=False, type="str"), + + os_type=dict(required=False, type="str", choices=["unknown", "fos", "fsw", "foc", "fml", + "faz", "fwb", "fch", "fct", "log", "fmg", + "fsa", "fdd", "fac"]), + mgmt_mode=dict(required=False, type="str", choices=["unreg", "fmg", "faz", "fmgfaz"]), + os_minor_vers=dict(required=False, type="str"), + os_ver=dict(required=False, type="str", choices=["unknown", "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0"]), + platform_str=dict(required=False, type="str"), + faz_quota=dict(required=False, type="str") + ) + + required_if = [ + ['mode', 'delete', ['device_unique_name']], + ['mode', 'add', ['device_serial', 'device_username', + 'device_password', 'device_unique_name', 'device_ip', 'mgmt_mode', 'platform_str']] + + ] + + module = AnsibleModule(argument_spec, supports_check_mode=True, required_if=required_if, ) + + # START SESSION LOGIC + paramgram = { + "adom": module.params["adom"], + "mode": module.params["mode"], + "ip": module.params["device_ip"], + "device_username": module.params["device_username"], + "device_password": module.params["device_password"], + "device_unique_name": module.params["device_unique_name"], + "sn": module.params["device_serial"], + "os_type": module.params["os_type"], + "mgmt_mode": module.params["mgmt_mode"], + "os_minor_vers": module.params["os_minor_vers"], + "os_ver": module.params["os_ver"], + "platform_str": module.params["platform_str"], + "faz.quota": module.params["faz_quota"], + "device_action": None + } + # INSERT THE PARAMGRAM INTO THE MODULE SO WHEN WE PASS IT TO MOD_UTILS.FortiManagerHandler IT HAS THAT INFO + + if paramgram["mode"] == "add": + paramgram["device_action"] = "add_model" + elif paramgram["mode"] == "promote": + paramgram["device_action"] = "promote_unreg" + module.paramgram = paramgram + + # TRY TO INIT THE CONNECTION SOCKET PATH AND FortiManagerHandler OBJECT AND TOOLS + faz = None + if module._socket_path: + connection = Connection(module._socket_path) + faz = FortiAnalyzerHandler(connection, module) + faz.tools = FAZCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + # BEGIN MODULE-SPECIFIC LOGIC -- THINGS NEED TO HAPPEN DEPENDING ON THE ENDPOINT AND OPERATION + results = DEFAULT_RESULT_OBJ + + try: + if paramgram["mode"] == "add": + results = faz_add_device(faz, paramgram) + except BaseException as err: + raise FAZBaseException(msg="An error occurred trying to add the device. Error: " + str(err)) + + try: + if paramgram["mode"] == "promote": + if paramgram["ip"] is not None: + results = faz_approve_unregistered_device_by_ip(faz, paramgram) + elif paramgram["device_unique_name"] is not None: + results = faz_approve_unregistered_device_by_name(faz, paramgram) + except BaseException as err: + raise FAZBaseException(msg="An error occurred trying to promote the device. Error: " + str(err)) + + try: + if paramgram["mode"] == "delete": + results = faz_delete_device(faz, paramgram) + except BaseException as err: + raise FAZBaseException(msg="An error occurred trying to delete the device. Error: " + str(err)) + + # PROCESS RESULTS + try: + faz.govern_response(module=module, results=results, + ansible_facts=faz.construct_ansible_facts(results, module.params, paramgram)) + except BaseException as err: + raise FAZBaseException(msg="An error occurred with govern_response(). Error: " + str(err)) + + # This should only be hit if faz.govern_response is missed or failed somehow. In fact. It should never be hit. + # But it's here JIC. + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_device.py b/plugins/modules/network/fortimanager/fmgr_device.py new file mode 100644 index 0000000000..b1a0afad28 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_device.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_device +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Add or remove device from FortiManager. +description: + - Add or remove a device or list of devices from FortiManager Device Manager using JSON RPC API. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: true + default: root + + mode: + description: + - The desired mode of the specified object. + required: false + default: add + choices: ["add", "delete"] + + blind_add: + description: + - When adding a device, module will check if it exists, and skip if it does. + - If enabled, this option will stop the module from checking if it already exists, and blindly add the device. + required: false + default: "disable" + choices: ["enable", "disable"] + + device_username: + description: + - The username of the device being added to FortiManager. + required: false + + device_password: + description: + - The password of the device being added to FortiManager. + required: false + + device_ip: + description: + - The IP of the device being added to FortiManager. Supports both IPv4 and IPv6. + required: false + + device_unique_name: + description: + - The desired "friendly" name of the device being added to FortiManager. + required: false + + device_serial: + description: + - The serial number of the device being added to FortiManager. + required: false +''' + +EXAMPLES = ''' +- name: DISCOVER AND ADD DEVICE FGT1 + fmgr_device: + adom: "root" + device_username: "admin" + device_password: "admin" + device_ip: "10.10.24.201" + device_unique_name: "FGT1" + device_serial: "FGVM000000117994" + mode: "add" + blind_add: "enable" + +- name: DISCOVER AND ADD DEVICE FGT2 + fmgr_device: + adom: "root" + device_username: "admin" + device_password: "admin" + device_ip: "10.10.24.202" + device_unique_name: "FGT2" + device_serial: "FGVM000000117992" + mode: "delete" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def discover_device(fmgr, paramgram): + """ + This method is used to discover devices before adding them to FMGR + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + + datagram = { + "odd_request_form": "True", + "device": {"adm_usr": paramgram["device_username"], + "adm_pass": paramgram["device_password"], + "ip": paramgram["device_ip"]} + } + + url = '/dvm/cmd/discover/device/' + + response = fmgr.process_request(url, datagram, FMGRMethods.EXEC) + return response + + +def add_device(fmgr, paramgram): + """ + This method is used to add devices to the FMGR + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + + datagram = { + "adom": paramgram["adom"], + "flags": ["create_task", "nonblocking"], + "odd_request_form": "True", + "device": {"adm_usr": paramgram["device_username"], "adm_pass": paramgram["device_password"], + "ip": paramgram["device_ip"], "name": paramgram["device_unique_name"], + "sn": paramgram["device_serial"], "mgmt_mode": "fmgfaz", "flags": 24} + } + + url = '/dvm/cmd/add/device/' + response = fmgr.process_request(url, datagram, FMGRMethods.EXEC) + return response + + +def delete_device(fmgr, paramgram): + """ + This method deletes a device from the FMGR + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + datagram = { + "adom": paramgram["adom"], + "flags": ["create_task", "nonblocking"], + "device": paramgram["device_unique_name"], + } + + url = '/dvm/cmd/del/device/' + response = fmgr.process_request(url, datagram, FMGRMethods.EXEC) + return response + + +def get_device(fmgr, paramgram): + """ + This method attempts to find the firewall on FortiManager to see if it already exists. + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + datagram = { + "adom": paramgram["adom"], + "filter": ["name", "==", paramgram["device_unique_name"]], + } + + url = '/dvmdb/adom/{adom}/device/{name}'.format(adom=paramgram["adom"], + name=paramgram["device_unique_name"]) + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + mode=dict(choices=["add", "delete"], type="str", default="add"), + blind_add=dict(choices=["enable", "disable"], type="str", default="disable"), + device_ip=dict(required=False, type="str"), + device_username=dict(required=False, type="str"), + device_password=dict(required=False, type="str", no_log=True), + device_unique_name=dict(required=True, type="str"), + device_serial=dict(required=False, type="str") + ) + + # BUILD MODULE OBJECT SO WE CAN BUILD THE PARAMGRAM + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + + # BUILD THE PARAMGRAM + paramgram = { + "device_ip": module.params["device_ip"], + "device_username": module.params["device_username"], + "device_password": module.params["device_password"], + "device_unique_name": module.params["device_unique_name"], + "device_serial": module.params["device_serial"], + "adom": module.params["adom"], + "mode": module.params["mode"] + } + + # INSERT THE PARAMGRAM INTO THE MODULE SO WHEN WE PASS IT TO MOD_UTILS.FortiManagerHandler IT HAS THAT INFO + module.paramgram = paramgram + + # TRY TO INIT THE CONNECTION SOCKET PATH AND FortiManagerHandler OBJECT AND TOOLS + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + # BEGIN MODULE-SPECIFIC LOGIC -- THINGS NEED TO HAPPEN DEPENDING ON THE ENDPOINT AND OPERATION + results = DEFAULT_RESULT_OBJ + try: + if paramgram["mode"] == "add": + # CHECK IF DEVICE EXISTS + if module.params["blind_add"] == "disable": + exists_results = get_device(fmgr, paramgram) + fmgr.govern_response(module=module, results=exists_results, good_codes=(0, -3), changed=False, + ansible_facts=fmgr.construct_ansible_facts(exists_results, + module.params, paramgram)) + + discover_results = discover_device(fmgr, paramgram) + fmgr.govern_response(module=module, results=discover_results, stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(discover_results, + module.params, paramgram)) + + if discover_results[0] == 0: + results = add_device(fmgr, paramgram) + fmgr.govern_response(module=module, results=discover_results, stop_on_success=True, + changed_if_success=True, + ansible_facts=fmgr.construct_ansible_facts(discover_results, + module.params, paramgram)) + + if paramgram["mode"] == "delete": + results = delete_device(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_device_config.py b/plugins/modules/network/fortimanager/fmgr_device_config.py new file mode 100644 index 0000000000..d61004bae7 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_device_config.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_device_config +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Edit device configurations +description: + - Edit device configurations from FortiManager Device Manager using JSON RPC API. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + device_unique_name: + description: + - The unique device's name that you are editing. A.K.A. Friendly name of the device in FortiManager. + required: True + + device_hostname: + description: + - The device's new hostname. + required: false + + install_config: + description: + - Tells FMGR to attempt to install the config after making it. + required: false + default: disable + + interface: + description: + - The interface/port number you are editing. + required: false + + interface_ip: + description: + - The IP and subnet of the interface/port you are editing. + required: false + + interface_allow_access: + description: + - Specify what protocols are allowed on the interface, comma-separated list (see examples). + required: false +''' + +EXAMPLES = ''' +- name: CHANGE HOSTNAME + fmgr_device_config: + device_hostname: "ChangedbyAnsible" + device_unique_name: "FGT1" + +- name: EDIT INTERFACE INFORMATION + fmgr_device_config: + adom: "root" + device_unique_name: "FGT2" + interface: "port3" + interface_ip: "10.1.1.1/24" + interface_allow_access: "ping, telnet, https" + +- name: INSTALL CONFIG + fmgr_device_config: + adom: "root" + device_unique_name: "FGT1" + install_config: "enable" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods + + +def update_device_hostname(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + datagram = { + "hostname": paramgram["device_hostname"] + } + + url = "pm/config/device/{device_name}/global/system/global".format(device_name=paramgram["device_unique_name"]) + response = fmgr.process_request(url, datagram, FMGRMethods.UPDATE) + return response + + +def update_device_interface(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + access_list = list() + allow_access_list = paramgram["interface_allow_access"].replace(' ', '') + access_list = allow_access_list.split(',') + + datagram = { + "allowaccess": access_list, + "ip": paramgram["interface_ip"] + } + + url = "/pm/config/device/{device_name}/global/system/interface" \ + "/{interface}".format(device_name=paramgram["device_unique_name"], interface=paramgram["interface"]) + response = fmgr.process_request(url, datagram, FMGRMethods.UPDATE) + return response + + +def exec_config(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + datagram = { + "scope": { + "name": paramgram["device_unique_name"] + }, + "adom": paramgram["adom"], + "flags": "none" + } + + url = "/securityconsole/install/device" + response = fmgr.process_request(url, datagram, FMGRMethods.EXEC) + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + device_unique_name=dict(required=True, type="str"), + device_hostname=dict(required=False, type="str"), + interface=dict(required=False, type="str"), + interface_ip=dict(required=False, type="str"), + interface_allow_access=dict(required=False, type="str"), + install_config=dict(required=False, type="str", default="disable"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + paramgram = { + "device_unique_name": module.params["device_unique_name"], + "device_hostname": module.params["device_hostname"], + "interface": module.params["interface"], + "interface_ip": module.params["interface_ip"], + "interface_allow_access": module.params["interface_allow_access"], + "install_config": module.params["install_config"], + "adom": module.params["adom"] + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + # BEGIN MODULE-SPECIFIC LOGIC -- THINGS NEED TO HAPPEN DEPENDING ON THE ENDPOINT AND OPERATION + results = DEFAULT_RESULT_OBJ + try: + if paramgram["device_hostname"] is not None: + results = update_device_hostname(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + if paramgram["interface_ip"] is not None or paramgram["interface_allow_access"] is not None: + results = update_device_interface(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + if paramgram["install_config"] == "enable": + results = exec_config(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_device_group.py b/plugins/modules/network/fortimanager/fmgr_device_group.py new file mode 100644 index 0000000000..7a6bf74d2b --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_device_group.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_device_group +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Alter FortiManager device groups. +description: + - Add or edit device groups and assign devices to device groups FortiManager Device Manager using JSON RPC API. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + vdom: + description: + - The VDOM of the Fortigate you want to add, must match the device in FMGR. Usually root. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + grp_name: + description: + - The name of the device group. + required: false + + grp_desc: + description: + - The description of the device group. + required: false + + grp_members: + description: + - A comma separated list of device names or device groups to be added as members to the device group. + - If Group Members are defined, and mode="delete", only group members will be removed. + - If you want to delete a group itself, you must omit this parameter from the task in playbook. + required: false + +''' + + +EXAMPLES = ''' +- name: CREATE DEVICE GROUP + fmgr_device_group: + grp_name: "TestGroup" + grp_desc: "CreatedbyAnsible" + adom: "ansible" + mode: "add" + +- name: CREATE DEVICE GROUP 2 + fmgr_device_group: + grp_name: "AnsibleGroup" + grp_desc: "CreatedbyAnsible" + adom: "ansible" + mode: "add" + +- name: ADD DEVICES TO DEVICE GROUP + fmgr_device_group: + mode: "add" + grp_name: "TestGroup" + grp_members: "FGT1,FGT2" + adom: "ansible" + vdom: "root" + +- name: REMOVE DEVICES TO DEVICE GROUP + fmgr_device_group: + mode: "delete" + grp_name: "TestGroup" + grp_members: "FGT1,FGT2" + adom: "ansible" + +- name: DELETE DEVICE GROUP + fmgr_device_group: + grp_name: "AnsibleGroup" + grp_desc: "CreatedbyAnsible" + mode: "delete" + adom: "ansible" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def get_groups(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + datagram = { + "method": "get" + } + + url = '/dvmdb/adom/{adom}/group'.format(adom=paramgram["adom"]) + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + return response + + +def add_device_group(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + mode = paramgram["mode"] + + datagram = { + "name": paramgram["grp_name"], + "desc": paramgram["grp_desc"], + "os_type": "fos" + } + + url = '/dvmdb/adom/{adom}/group'.format(adom=paramgram["adom"]) + + # IF MODE = SET -- USE THE 'SET' API CALL MODE + if mode == "set": + response = fmgr.process_request(url, datagram, FMGRMethods.SET) + # IF MODE = UPDATE -- USER THE 'UPDATE' API CALL MODE + elif mode == "update": + response = fmgr.process_request(url, datagram, FMGRMethods.UPDATE) + # IF MODE = ADD -- USE THE 'ADD' API CALL MODE + elif mode == "add": + response = fmgr.process_request(url, datagram, FMGRMethods.ADD) + + return response + + +def delete_device_group(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + + datagram = { + "adom": paramgram["adom"], + "name": paramgram["grp_name"] + } + + url = '/dvmdb/adom/{adom}/group/{grp_name}'.format(adom=paramgram["adom"], grp_name=paramgram["grp_name"]) + response = fmgr.process_request(url, datagram, FMGRMethods.DELETE) + return response + + +def add_group_member(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + device_member_list = paramgram["grp_members"].replace(' ', '') + device_member_list = device_member_list.split(',') + + for dev_name in device_member_list: + datagram = {'name': dev_name, 'vdom': paramgram["vdom"]} + + url = '/dvmdb/adom/{adom}/group/{grp_name}/object member'.format(adom=paramgram["adom"], + grp_name=paramgram["grp_name"]) + response = fmgr.process_request(url, datagram, FMGRMethods.ADD) + + return response + + +def delete_group_member(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + device_member_list = paramgram["grp_members"].replace(' ', '') + device_member_list = device_member_list.split(',') + + for dev_name in device_member_list: + datagram = {'name': dev_name, 'vdom': paramgram["vdom"]} + + url = '/dvmdb/adom/{adom}/group/{grp_name}/object member'.format(adom=paramgram["adom"], + grp_name=paramgram["grp_name"]) + response = fmgr.process_request(url, datagram, FMGRMethods.DELETE) + + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + vdom=dict(required=False, type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + grp_desc=dict(required=False, type="str"), + grp_name=dict(required=True, type="str"), + grp_members=dict(required=False, type="str"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + paramgram = { + "mode": module.params["mode"], + "grp_name": module.params["grp_name"], + "grp_desc": module.params["grp_desc"], + "grp_members": module.params["grp_members"], + "adom": module.params["adom"], + "vdom": module.params["vdom"] + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + # BEGIN MODULE-SPECIFIC LOGIC -- THINGS NEED TO HAPPEN DEPENDING ON THE ENDPOINT AND OPERATION + results = DEFAULT_RESULT_OBJ + try: + # PROCESS THE GROUP ADDS FIRST + if paramgram["grp_name"] is not None and paramgram["mode"] in ["add", "set", "update"]: + # add device group + results = add_device_group(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + # PROCESS THE GROUP MEMBER ADDS + if paramgram["grp_members"] is not None and paramgram["mode"] in ["add", "set", "update"]: + # assign devices to device group + results = add_group_member(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + # PROCESS THE GROUP MEMBER DELETES + if paramgram["grp_members"] is not None and paramgram["mode"] == "delete": + # remove devices grom a group + results = delete_group_member(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + # PROCESS THE GROUP DELETES, ONLY IF GRP_MEMBERS IS NOT NULL TOO + if paramgram["grp_name"] is not None and paramgram["mode"] == "delete" and paramgram["grp_members"] is None: + # delete device group + results = delete_device_group(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_device_provision_template.py b/plugins/modules/network/fortimanager/fmgr_device_provision_template.py new file mode 100644 index 0000000000..c043f06bef --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_device_provision_template.py @@ -0,0 +1,1552 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_device_provision_template +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manages Device Provisioning Templates in FortiManager. +description: + - Allows the editing and assignment of device provisioning templates in FortiManager. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: true + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values. + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + device_unique_name: + description: + - The unique device's name that you are editing. + required: True + + provisioning_template: + description: + - The provisioning template you want to apply (default = default). + required: True + + provision_targets: + description: + - The friendly names of devices in FortiManager to assign the provisioning template to. CSV separated list. + required: True + + snmp_status: + description: + - Enables or disables SNMP globally. + required: False + choices: ["enable", "disable"] + + snmp_v2c_query_port: + description: + - Sets the snmp v2c community query port. + required: False + + snmp_v2c_trap_port: + description: + - Sets the snmp v2c community trap port. + required: False + + snmp_v2c_status: + description: + - Enables or disables the v2c community specified. + required: False + choices: ["enable", "disable"] + + snmp_v2c_trap_status: + description: + - Enables or disables the v2c community specified for traps. + required: False + choices: ["enable", "disable"] + + snmp_v2c_query_status: + description: + - Enables or disables the v2c community specified for queries. + required: False + choices: ["enable", "disable"] + + snmp_v2c_name: + description: + - Specifies the v2c community name. + required: False + + snmp_v2c_id: + description: + - Primary key for the snmp community. this must be unique! + required: False + + snmp_v2c_trap_src_ipv4: + description: + - Source ip the traps should come from IPv4. + required: False + + snmp_v2c_trap_hosts_ipv4: + description: > + - IPv4 addresses of the hosts that should get SNMP v2c traps, comma separated, must include mask + ("10.7.220.59 255.255.255.255, 10.7.220.60 255.255.255.255"). + required: False + + snmp_v2c_query_hosts_ipv4: + description: > + - IPv4 addresses or subnets that are allowed to query SNMP v2c, comma separated + ("10.7.220.59 255.255.255.0, 10.7.220.0 255.255.255.0"). + required: False + + snmpv3_auth_proto: + description: + - SNMPv3 auth protocol. + required: False + choices: ["md5", "sha"] + + snmpv3_auth_pwd: + description: + - SNMPv3 auth pwd __ currently not encrypted! ensure this file is locked down permissions wise! + required: False + + snmpv3_name: + description: + - SNMPv3 user name. + required: False + + snmpv3_notify_hosts: + description: + - List of ipv4 hosts to send snmpv3 traps to. Comma separated IPv4 list. + required: False + + snmpv3_priv_proto: + description: + - SNMPv3 priv protocol. + required: False + choices: ["aes", "des", "aes256", "aes256cisco"] + + snmpv3_priv_pwd: + description: + - SNMPv3 priv pwd currently not encrypted! ensure this file is locked down permissions wise! + required: False + + snmpv3_queries: + description: + - Allow snmpv3_queries. + required: False + choices: ["enable", "disable"] + + snmpv3_query_port: + description: + - SNMPv3 query port. + required: False + + snmpv3_security_level: + description: + - SNMPv3 security level. + required: False + choices: ["no-auth-no-priv", "auth-no-priv", "auth-priv"] + + snmpv3_source_ip: + description: + - SNMPv3 source ipv4 address for traps. + required: False + + snmpv3_status: + description: + - SNMPv3 user is enabled or disabled. + required: False + choices: ["enable", "disable"] + + snmpv3_trap_rport: + description: + - SNMPv3 trap remote port. + required: False + + snmpv3_trap_status: + description: + - SNMPv3 traps is enabled or disabled. + required: False + choices: ["enable", "disable"] + + syslog_port: + description: + - Syslog port that will be set. + required: False + + syslog_server: + description: + - Server the syslogs will be sent to. + required: False + + syslog_status: + description: + - Enables or disables syslogs. + required: False + choices: ["enable", "disable"] + + syslog_mode: + description: + - Remote syslog logging over UDP/Reliable TCP. + - choice | udp | Enable syslogging over UDP. + - choice | legacy-reliable | Enable legacy reliable syslogging by RFC3195 (Reliable Delivery for Syslog). + - choice | reliable | Enable reliable syslogging by RFC6587 (Transmission of Syslog Messages over TCP). + required: false + choices: ["udp", "legacy-reliable", "reliable"] + default: "udp" + + syslog_filter: + description: + - Sets the logging level for syslog. + required: False + choices: ["emergency", "alert", "critical", "error", "warning", "notification", "information", "debug"] + + syslog_facility: + description: + - Remote syslog facility. + - choice | kernel | Kernel messages. + - choice | user | Random user-level messages. + - choice | mail | Mail system. + - choice | daemon | System daemons. + - choice | auth | Security/authorization messages. + - choice | syslog | Messages generated internally by syslog. + - choice | lpr | Line printer subsystem. + - choice | news | Network news subsystem. + - choice | uucp | Network news subsystem. + - choice | cron | Clock daemon. + - choice | authpriv | Security/authorization messages (private). + - choice | ftp | FTP daemon. + - choice | ntp | NTP daemon. + - choice | audit | Log audit. + - choice | alert | Log alert. + - choice | clock | Clock daemon. + - choice | local0 | Reserved for local use. + - choice | local1 | Reserved for local use. + - choice | local2 | Reserved for local use. + - choice | local3 | Reserved for local use. + - choice | local4 | Reserved for local use. + - choice | local5 | Reserved for local use. + - choice | local6 | Reserved for local use. + - choice | local7 | Reserved for local use. + required: false + choices: ["kernel", "user", "mail", "daemon", "auth", "syslog", + "lpr", "news", "uucp", "cron", "authpriv", "ftp", "ntp", "audit", + "alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"] + default: "syslog" + + syslog_enc_algorithm: + description: + - Enable/disable reliable syslogging with TLS encryption. + - choice | high | SSL communication with high encryption algorithms. + - choice | low | SSL communication with low encryption algorithms. + - choice | disable | Disable SSL communication. + - choice | high-medium | SSL communication with high and medium encryption algorithms. + required: false + choices: ["high", "low", "disable", "high-medium"] + default: "disable" + + syslog_certificate: + description: + - Certificate used to communicate with Syslog server if encryption on. + required: false + + ntp_status: + description: + - Enables or disables ntp. + required: False + choices: ["enable", "disable"] + + ntp_sync_interval: + description: + - Sets the interval in minutes for ntp sync. + required: False + + ntp_type: + description: + - Enables fortiguard servers or custom servers are the ntp source. + required: False + choices: ["fortiguard", "custom"] + + ntp_server: + description: + - Only used with custom ntp_type -- specifies IP of server to sync to -- comma separated ip addresses for multiples. + required: False + + ntp_auth: + description: + - Enables or disables ntp authentication. + required: False + choices: ["enable", "disable"] + + ntp_auth_pwd: + description: + - Sets the ntp auth password. + required: False + + ntp_v3: + description: + - Enables or disables ntpv3 (default is ntpv4). + required: False + choices: ["enable", "disable"] + + admin_https_redirect: + description: + - Enables or disables https redirect from http. + required: False + choices: ["enable", "disable"] + + admin_https_port: + description: + - SSL admin gui port number. + required: False + + admin_http_port: + description: + - Non-SSL admin gui port number. + required: False + + admin_timeout: + description: + - Admin timeout in minutes. + required: False + + admin_language: + description: + - Sets the admin gui language. + required: False + choices: ["english", "simch", "japanese", "korean", "spanish", "trach", "french", "portuguese"] + + admin_switch_controller: + description: + - Enables or disables the switch controller. + required: False + choices: ["enable", "disable"] + + admin_gui_theme: + description: + - Changes the admin gui theme. + required: False + choices: ["green", "red", "blue", "melongene", "mariner"] + + admin_enable_fortiguard: + description: + - Enables FortiGuard security updates to their default settings. + required: False + choices: ["none", "direct", "this-fmg"] + + admin_fortianalyzer_target: + description: + - Configures faz target. + required: False + + admin_fortiguard_target: + description: + - Configures fortiguard target. + - admin_enable_fortiguard must be set to "direct". + required: False + + smtp_username: + description: + - SMTP auth username. + required: False + + smtp_password: + description: + - SMTP password. + required: False + + smtp_port: + description: + - SMTP port number. + required: False + + smtp_replyto: + description: + - SMTP reply to address. + required: False + + smtp_conn_sec: + description: + - defines the ssl level for smtp. + required: False + choices: ["none", "starttls", "smtps"] + + smtp_server: + description: + - SMTP server ipv4 address. + required: False + + smtp_source_ipv4: + description: + - SMTP source ip address. + required: False + + smtp_validate_cert: + description: + - Enables or disables valid certificate checking for smtp. + required: False + choices: ["enable", "disable"] + + dns_suffix: + description: + - Sets the local dns domain suffix. + required: False + + dns_primary_ipv4: + description: + - primary ipv4 dns forwarder. + required: False + + dns_secondary_ipv4: + description: + - secondary ipv4 dns forwarder. + required: False + + delete_provisioning_template: + description: + - If specified, all other options are ignored. The specified provisioning template will be deleted. + required: False + +''' + + +EXAMPLES = ''' +- name: SET SNMP SYSTEM INFO + fmgr_device_provision_template: + provisioning_template: "default" + snmp_status: "enable" + mode: "set" + +- name: SET SNMP SYSTEM INFO ANSIBLE ADOM + fmgr_device_provision_template: + provisioning_template: "default" + snmp_status: "enable" + mode: "set" + adom: "ansible" + +- name: SET SNMP SYSTEM INFO different template (SNMPv2) + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + snmp_status: "enable" + mode: "set" + adom: "ansible" + snmp_v2c_query_port: "162" + snmp_v2c_trap_port: "161" + snmp_v2c_status: "enable" + snmp_v2c_trap_status: "enable" + snmp_v2c_query_status: "enable" + snmp_v2c_name: "ansibleV2c" + snmp_v2c_id: "1" + snmp_v2c_trap_src_ipv4: "10.7.220.41" + snmp_v2c_trap_hosts_ipv4: "10.7.220.59 255.255.255.255, 10.7.220.60 255.255.255.255" + snmp_v2c_query_hosts_ipv4: "10.7.220.59 255.255.255.255, 10.7.220.0 255.255.255.0" + +- name: SET SNMP SYSTEM INFO different template (SNMPv3) + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + snmp_status: "enable" + mode: "set" + adom: "ansible" + snmpv3_auth_proto: "sha" + snmpv3_auth_pwd: "fortinet" + snmpv3_name: "ansibleSNMPv3" + snmpv3_notify_hosts: "10.7.220.59,10.7.220.60" + snmpv3_priv_proto: "aes256" + snmpv3_priv_pwd: "fortinet" + snmpv3_queries: "enable" + snmpv3_query_port: "161" + snmpv3_security_level: "auth_priv" + snmpv3_source_ip: "0.0.0.0" + snmpv3_status: "enable" + snmpv3_trap_rport: "162" + snmpv3_trap_status: "enable" + +- name: SET SYSLOG INFO + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + mode: "set" + adom: "ansible" + syslog_server: "10.7.220.59" + syslog_port: "514" + syslog_mode: "disable" + syslog_status: "enable" + syslog_filter: "information" + +- name: SET NTP TO FORTIGUARD + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + mode: "set" + adom: "ansible" + ntp_status: "enable" + ntp_sync_interval: "60" + type: "fortiguard" + +- name: SET NTP TO CUSTOM SERVER + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + mode: "set" + adom: "ansible" + ntp_status: "enable" + ntp_sync_interval: "60" + ntp_type: "custom" + ntp_server: "10.7.220.32,10.7.220.1" + ntp_auth: "enable" + ntp_auth_pwd: "fortinet" + ntp_v3: "disable" + +- name: SET ADMIN GLOBAL SETTINGS + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + mode: "set" + adom: "ansible" + admin_https_redirect: "enable" + admin_https_port: "4433" + admin_http_port: "8080" + admin_timeout: "30" + admin_language: "english" + admin_switch_controller: "enable" + admin_gui_theme: "blue" + admin_enable_fortiguard: "direct" + admin_fortiguard_target: "10.7.220.128" + admin_fortianalyzer_target: "10.7.220.61" + +- name: SET CUSTOM SMTP SERVER + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + mode: "set" + adom: "ansible" + smtp_username: "ansible" + smtp_password: "fortinet" + smtp_port: "25" + smtp_replyto: "ansible@do-not-reply.com" + smtp_conn_sec: "starttls" + smtp_server: "10.7.220.32" + smtp_source_ipv4: "0.0.0.0" + smtp_validate_cert: "disable" + +- name: SET DNS SERVERS + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + mode: "set" + adom: "ansible" + dns_suffix: "ansible.local" + dns_primary_ipv4: "8.8.8.8" + dns_secondary_ipv4: "4.4.4.4" + +- name: SET PROVISIONING TEMPLATE DEVICE TARGETS IN FORTIMANAGER + fmgr_device_provision_template: + provisioning_template: "ansibleTest" + mode: "set" + adom: "ansible" + provision_targets: "FGT1, FGT2" + +- name: DELETE ENTIRE PROVISIONING TEMPLATE + fmgr_device_provision_template: + delete_provisioning_template: "ansibleTest" + mode: "delete" + adom: "ansible" + +''' +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def get_devprof(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + response = DEFAULT_RESULT_OBJ + datagram = {} + + url = "/pm/devprof/adom/{adom}/{name}".format(adom=paramgram["adom"], name=paramgram["provisioning_template"]) + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + + return response + + +def set_devprof(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + response = DEFAULT_RESULT_OBJ + if paramgram["mode"] in ['set', 'add', 'update']: + datagram = { + "name": paramgram["provisioning_template"], + "type": "devprof", + "description": "CreatedByAnsible", + } + url = "/pm/devprof/adom/{adom}".format(adom=paramgram["adom"]) + + elif paramgram["mode"] == "delete": + datagram = {} + + url = "/pm/devprof/adom/{adom}/{name}".format(adom=paramgram["adom"], + name=paramgram["delete_provisioning_template"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def get_devprof_scope(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + response = DEFAULT_RESULT_OBJ + datagram = { + "name": paramgram["provisioning_template"], + "type": "devprof", + "description": "CreatedByAnsible", + } + + url = "/pm/devprof/adom/{adom}".format(adom=paramgram["adom"]) + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + + return response + + +def set_devprof_scope(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + response = DEFAULT_RESULT_OBJ + if paramgram["mode"] in ['set', 'add', 'update']: + datagram = { + "name": paramgram["provisioning_template"], + "type": "devprof", + "description": "CreatedByAnsible", + } + + targets = [] + for target in paramgram["provision_targets"].split(","): + # split the host on the space to get the mask out + new_target = {"name": target.strip()} + targets.append(new_target) + + datagram["scope member"] = targets + + url = "/pm/devprof/adom/{adom}".format(adom=paramgram["adom"]) + + elif paramgram["mode"] == "delete": + datagram = { + "name": paramgram["provisioning_template"], + "type": "devprof", + "description": "CreatedByAnsible", + "scope member": paramgram["targets_to_add"] + } + + url = "/pm/devprof/adom/{adom}".format(adom=paramgram["adom"]) + + response = fmgr.process_request(url, datagram, FMGRMethods.SET) + return response + + +def set_devprof_snmp(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + datagram = { + "status": paramgram["snmp_status"] + } + url = "/pm/config/adom/{adom}/devprof/" \ + "{provisioning_template}/system/snmp/sysinfo".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + + response = fmgr.process_request(url, datagram, FMGRMethods.SET) + return response + + +def set_devprof_snmp_v2c(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + if paramgram["mode"] in ['set', 'add', 'update']: + datagram = { + "query-v2c-port": paramgram["snmp_v2c_query_port"], + "trap-v2c-rport": paramgram["snmp_v2c_trap_port"], + "status": paramgram["snmp_v2c_status"], + "trap-v2c-status": paramgram["snmp_v2c_trap_status"], + "query-v2c-status": paramgram["snmp_v2c_query_status"], + "name": paramgram["snmp_v2c_name"], + "id": paramgram["snmp_v2c_id"], + "meta fields": dict(), + "hosts": list(), + "events": 411578417151, + "query-v1-status": 0, + "query-v1-port": 161, + "trap-v1-status": 0, + "trap-v1-lport": 162, + "trap-v1-rport": 162, + "trap-v2c-lport": 162, + } + + # BUILD THE HOST STRINGS + id_counter = 1 + if paramgram["snmp_v2c_trap_hosts_ipv4"] or paramgram["snmp_v2c_query_hosts_ipv4"]: + hosts = [] + if paramgram["snmp_v2c_query_hosts_ipv4"]: + for ipv4_host in paramgram["snmp_v2c_query_hosts_ipv4"].strip().split(","): + # split the host on the space to get the mask out + new_ipv4_host = {"ha-direct": "enable", + "host-type": "query", + "id": id_counter, + "ip": ipv4_host.strip().split(), + "meta fields": {}, + "source-ip": "0.0.0.0"} + hosts.append(new_ipv4_host) + id_counter += 1 + + if paramgram["snmp_v2c_trap_hosts_ipv4"]: + for ipv4_host in paramgram["snmp_v2c_trap_hosts_ipv4"].strip().split(","): + # split the host on the space to get the mask out + new_ipv4_host = {"ha-direct": "enable", + "host-type": "trap", + "id": id_counter, + "ip": ipv4_host.strip().split(), + "meta fields": {}, + "source-ip": paramgram["snmp_v2c_trap_src_ipv4"]} + hosts.append(new_ipv4_host) + id_counter += 1 + datagram["hosts"] = hosts + + url = "/pm/config/adom/{adom}/devprof/" \ + "{provisioning_template}/system/snmp/community".format(adom=adom, + provisioning_template=paramgram[ + "provisioning_template"]) + elif paramgram["mode"] == "delete": + datagram = { + "confirm": 1 + } + + url = "/pm/config/adom/{adom}/" \ + "devprof/{provisioning_template}/" \ + "system/snmp/community/{snmp_v2c_id}".format(adom=adom, + provisioning_template=paramgram["provisioning_template"], + snmp_v2c_id=paramgram["snmp_v2c_id"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_snmp_v3(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + if paramgram["mode"] in ['set', 'add', 'update']: + datagram = {} + datagram["auth-pwd"] = paramgram["snmpv3_auth_pwd"] + datagram["priv-pwd"] = paramgram["snmpv3_priv_pwd"] + datagram["trap-rport"] = paramgram["snmpv3_trap_rport"] + datagram["query-port"] = paramgram["snmpv3_query_port"] + datagram["name"] = paramgram["snmpv3_name"] + datagram["notify-hosts"] = paramgram["snmpv3_notify_hosts"].strip().split(",") + datagram["events"] = 1647387997183 + datagram["trap-lport"] = 162 + + datagram["source-ip"] = paramgram["snmpv3_source_ip"] + datagram["ha-direct"] = 0 + + url = "/pm/config/adom/{adom}/" \ + "devprof/{provisioning_template}/" \ + "system/snmp/user".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + elif paramgram["mode"] == "delete": + datagram = { + "confirm": 1 + } + + url = "/pm/config/adom/{adom}/devprof/" \ + "{provisioning_template}/system/snmp" \ + "/user/{snmpv3_name}".format(adom=adom, + provisioning_template=paramgram["provisioning_template"], + snmpv3_name=paramgram["snmpv3_name"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_syslog(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + + datagram = { + "status": paramgram["syslog_status"], + "port": paramgram["syslog_port"], + "server": paramgram["syslog_server"], + "mode": paramgram["syslog_mode"], + "facility": paramgram["syslog_facility"] + } + + if paramgram["mode"] in ['set', 'add', 'update']: + if paramgram["syslog_enc_algorithm"] in ["high", "low", "high-medium"] \ + and paramgram["syslog_certificate"] is not None: + datagram["certificate"] = paramgram["certificate"] + datagram["enc-algorithm"] = paramgram["syslog_enc_algorithm"] + + url = "/pm/config/adom/{adom}/" \ + "devprof/{provisioning_template}/" \ + "log/syslogd/setting".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + elif paramgram["mode"] == "delete": + url = "/pm/config/adom/{adom}/" \ + "devprof/{provisioning_template}/" \ + "log/syslogd/setting".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_syslog_filter(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + datagram = { + "severity": paramgram["syslog_filter"] + } + response = DEFAULT_RESULT_OBJ + + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/log/syslogd/filter".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_ntp(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + + # IF SET TO FORTIGUARD, BUILD A STRING SPECIFIC TO THAT + if paramgram["ntp_type"] == "fortiguard": + datagram = {} + if paramgram["ntp_status"] == "enable": + datagram["ntpsync"] = 1 + if paramgram["ntp_status"] == "disable": + datagram["ntpsync"] = 0 + if paramgram["ntp_sync_interval"] is None: + datagram["syncinterval"] = 1 + else: + datagram["syncinterval"] = paramgram["ntp_sync_interval"] + + datagram["type"] = 0 + + # IF THE NTP TYPE IS CUSTOM BUILD THE SERVER LIST + if paramgram["ntp_type"] == "custom": + id_counter = 0 + key_counter = 0 + ntpservers = [] + datagram = {} + if paramgram["ntp_status"] == "enable": + datagram["ntpsync"] = 1 + if paramgram["ntp_status"] == "disable": + datagram["ntpsync"] = 0 + try: + datagram["syncinterval"] = paramgram["ntp_sync_interval"] + except BaseException: + datagram["syncinterval"] = 1 + datagram["type"] = 1 + + for server in paramgram["ntp_server"].strip().split(","): + id_counter += 1 + server_fields = dict() + + key_counter += 1 + if paramgram["ntp_auth"] == "enable": + server_fields["authentication"] = 1 + server_fields["key"] = paramgram["ntp_auth_pwd"] + server_fields["key-id"] = key_counter + else: + server_fields["authentication"] = 0 + server_fields["key"] = "" + server_fields["key-id"] = key_counter + + if paramgram["ntp_v3"] == "enable": + server_fields["ntp_v3"] = 1 + else: + server_fields["ntp_v3"] = 0 + + # split the host on the space to get the mask out + new_ntp_server = {"authentication": server_fields["authentication"], + "id": id_counter, "key": server_fields["key"], + "key-id": id_counter, "ntpv3": server_fields["ntp_v3"], + "server": server} + ntpservers.append(new_ntp_server) + datagram["ntpserver"] = ntpservers + + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/system/ntp".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_admin(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + datagram = { + "admin-https-redirect": paramgram["admin_https_redirect"], + "admin-port": paramgram["admin_http_port"], + "admin-sport": paramgram["admin_https_port"], + "admintimeout": paramgram["admin_timeout"], + "language": paramgram["admin_language"], + "gui-theme": paramgram["admin_gui_theme"], + "switch-controller": paramgram["admin_switch_controller"], + } + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/system/global".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_smtp(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + datagram = { + "port": paramgram["smtp_port"], + "reply-to": paramgram["smtp_replyto"], + "server": paramgram["smtp_server"], + "source-ip": paramgram["smtp_source_ipv4"] + } + + if paramgram["smtp_username"]: + datagram["authenticate"] = 1 + datagram["username"] = paramgram["smtp_username"] + datagram["password"] = paramgram["smtp_password"] + + if paramgram["smtp_conn_sec"] == "none": + datagram["security"] = 0 + if paramgram["smtp_conn_sec"] == "starttls": + datagram["security"] = 1 + if paramgram["smtp_conn_sec"] == "smtps": + datagram["security"] = 2 + + if paramgram["smtp_validate_cert"] == "enable": + datagram["validate-server"] = 1 + else: + datagram["validate-server"] = 0 + + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/system/email-server".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_dns(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + datagram = { + "domain": paramgram["dns_suffix"], + "primary": paramgram["dns_primary_ipv4"], + "secondary": paramgram["dns_secondary_ipv4"], + } + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/system/dns".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_toggle_fg(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + response = DEFAULT_RESULT_OBJ + datagram = {} + if paramgram["admin_enable_fortiguard"] in ["direct", "this-fmg"]: + datagram["include-default-servers"] = "enable" + elif paramgram["admin_enable_fortiguard"] == "none": + datagram["include-default-servers"] = "disable" + + datagram["server-list"] = list() + + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/system/central-management".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + response = fmgr.process_request(url, datagram, FMGRMethods.SET) + + return response + + +def set_devprof_fg(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + datagram = { + "target": paramgram["admin_enable_fortiguard"], + "target-ip": None + } + + if paramgram["mode"] in ['set', 'add', 'update']: + if paramgram["admin_fortiguard_target"] is not None and datagram["target"] == "direct": + datagram["target-ip"] = paramgram["admin_fortiguard_target"] + + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/device/profile/fortiguard".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def set_devprof_faz(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + paramgram["mode"] = paramgram["mode"] + adom = paramgram["adom"] + response = DEFAULT_RESULT_OBJ + datagram = { + "target-ip": paramgram["admin_fortianalyzer_target"], + "target": "others", + } + url = "/pm/config/adom/{adom}" \ + "/devprof/{provisioning_template}" \ + "/device/profile/fortianalyzer".format(adom=adom, + provisioning_template=paramgram["provisioning_template"]) + if paramgram["mode"] == "delete": + datagram["hastarget"] = "False" + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + provisioning_template=dict(required=False, type="str"), + provision_targets=dict(required=False, type="str"), + + device_unique_name=dict(required=False, type="str"), + snmp_status=dict(required=False, type="str", choices=["enable", "disable"]), + snmp_v2c_query_port=dict(required=False, type="int"), + snmp_v2c_trap_port=dict(required=False, type="int"), + snmp_v2c_status=dict(required=False, type="str", choices=["enable", "disable"]), + snmp_v2c_trap_status=dict(required=False, type="str", choices=["enable", "disable"]), + snmp_v2c_query_status=dict(required=False, type="str", choices=["enable", "disable"]), + snmp_v2c_name=dict(required=False, type="str", no_log=True), + snmp_v2c_id=dict(required=False, type="int"), + snmp_v2c_trap_src_ipv4=dict(required=False, type="str"), + snmp_v2c_trap_hosts_ipv4=dict(required=False, type="str"), + snmp_v2c_query_hosts_ipv4=dict(required=False, type="str"), + + snmpv3_auth_proto=dict(required=False, type="str", choices=["md5", "sha"]), + snmpv3_auth_pwd=dict(required=False, type="str", no_log=True), + snmpv3_name=dict(required=False, type="str"), + snmpv3_notify_hosts=dict(required=False, type="str"), + snmpv3_priv_proto=dict(required=False, type="str", choices=["aes", "des", "aes256", "aes256cisco"]), + snmpv3_priv_pwd=dict(required=False, type="str", no_log=True), + snmpv3_queries=dict(required=False, type="str", choices=["enable", "disable"]), + snmpv3_query_port=dict(required=False, type="int"), + snmpv3_security_level=dict(required=False, type="str", + choices=["no-auth-no-priv", "auth-no-priv", "auth-priv"]), + snmpv3_source_ip=dict(required=False, type="str"), + snmpv3_status=dict(required=False, type="str", choices=["enable", "disable"]), + snmpv3_trap_rport=dict(required=False, type="int"), + snmpv3_trap_status=dict(required=False, type="str", choices=["enable", "disable"]), + + syslog_port=dict(required=False, type="int"), + syslog_server=dict(required=False, type="str"), + syslog_mode=dict(required=False, type="str", choices=["udp", "legacy-reliable", "reliable"], default="udp"), + syslog_status=dict(required=False, type="str", choices=["enable", "disable"]), + syslog_filter=dict(required=False, type="str", choices=["emergency", "alert", "critical", "error", + "warning", "notification", "information", "debug"]), + syslog_enc_algorithm=dict(required=False, type="str", choices=["high", "low", "disable", "high-medium"], + default="disable"), + syslog_facility=dict(required=False, type="str", choices=["kernel", "user", "mail", "daemon", "auth", + "syslog", "lpr", "news", "uucp", "cron", "authpriv", + "ftp", "ntp", "audit", "alert", "clock", "local0", + "local1", "local2", "local3", "local4", "local5", + "local6", "local7"], default="syslog"), + syslog_certificate=dict(required=False, type="str"), + + ntp_status=dict(required=False, type="str", choices=["enable", "disable"]), + ntp_sync_interval=dict(required=False, type="int"), + ntp_type=dict(required=False, type="str", choices=["fortiguard", "custom"]), + ntp_server=dict(required=False, type="str"), + ntp_auth=dict(required=False, type="str", choices=["enable", "disable"]), + ntp_auth_pwd=dict(required=False, type="str", no_log=True), + ntp_v3=dict(required=False, type="str", choices=["enable", "disable"]), + + admin_https_redirect=dict(required=False, type="str", choices=["enable", "disable"]), + admin_https_port=dict(required=False, type="int"), + admin_http_port=dict(required=False, type="int"), + admin_timeout=dict(required=False, type="int"), + admin_language=dict(required=False, type="str", + choices=["english", "simch", "japanese", "korean", + "spanish", "trach", "french", "portuguese"]), + admin_switch_controller=dict(required=False, type="str", choices=["enable", "disable"]), + admin_gui_theme=dict(required=False, type="str", choices=["green", "red", "blue", "melongene", "mariner"]), + admin_enable_fortiguard=dict(required=False, type="str", choices=["none", "direct", "this-fmg"]), + admin_fortianalyzer_target=dict(required=False, type="str"), + admin_fortiguard_target=dict(required=False, type="str"), + + smtp_username=dict(required=False, type="str"), + smtp_password=dict(required=False, type="str", no_log=True), + smtp_port=dict(required=False, type="int"), + smtp_replyto=dict(required=False, type="str"), + smtp_conn_sec=dict(required=False, type="str", choices=["none", "starttls", "smtps"]), + smtp_server=dict(required=False, type="str"), + smtp_source_ipv4=dict(required=False, type="str"), + smtp_validate_cert=dict(required=False, type="str", choices=["enable", "disable"]), + + dns_suffix=dict(required=False, type="str"), + dns_primary_ipv4=dict(required=False, type="str"), + dns_secondary_ipv4=dict(required=False, type="str"), + delete_provisioning_template=dict(required=False, type="str") + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + paramgram = { + "adom": module.params["adom"], + "mode": module.params["mode"], + "provision_targets": module.params["provision_targets"], + "provisioning_template": module.params["provisioning_template"], + + "snmp_status": module.params["snmp_status"], + "snmp_v2c_query_port": module.params["snmp_v2c_query_port"], + "snmp_v2c_trap_port": module.params["snmp_v2c_trap_port"], + "snmp_v2c_status": module.params["snmp_v2c_status"], + "snmp_v2c_trap_status": module.params["snmp_v2c_trap_status"], + "snmp_v2c_query_status": module.params["snmp_v2c_query_status"], + "snmp_v2c_name": module.params["snmp_v2c_name"], + "snmp_v2c_id": module.params["snmp_v2c_id"], + "snmp_v2c_trap_src_ipv4": module.params["snmp_v2c_trap_src_ipv4"], + "snmp_v2c_trap_hosts_ipv4": module.params["snmp_v2c_trap_hosts_ipv4"], + "snmp_v2c_query_hosts_ipv4": module.params["snmp_v2c_query_hosts_ipv4"], + + "snmpv3_auth_proto": module.params["snmpv3_auth_proto"], + "snmpv3_auth_pwd": module.params["snmpv3_auth_pwd"], + "snmpv3_name": module.params["snmpv3_name"], + "snmpv3_notify_hosts": module.params["snmpv3_notify_hosts"], + "snmpv3_priv_proto": module.params["snmpv3_priv_proto"], + "snmpv3_priv_pwd": module.params["snmpv3_priv_pwd"], + "snmpv3_queries": module.params["snmpv3_queries"], + "snmpv3_query_port": module.params["snmpv3_query_port"], + "snmpv3_security_level": module.params["snmpv3_security_level"], + "snmpv3_source_ip": module.params["snmpv3_source_ip"], + "snmpv3_status": module.params["snmpv3_status"], + "snmpv3_trap_rport": module.params["snmpv3_trap_rport"], + "snmpv3_trap_status": module.params["snmpv3_trap_status"], + + "syslog_port": module.params["syslog_port"], + "syslog_server": module.params["syslog_server"], + "syslog_mode": module.params["syslog_mode"], + "syslog_status": module.params["syslog_status"], + "syslog_filter": module.params["syslog_filter"], + "syslog_facility": module.params["syslog_facility"], + "syslog_enc_algorithm": module.params["syslog_enc_algorithm"], + "syslog_certificate": module.params["syslog_certificate"], + + "ntp_status": module.params["ntp_status"], + "ntp_sync_interval": module.params["ntp_sync_interval"], + "ntp_type": module.params["ntp_type"], + "ntp_server": module.params["ntp_server"], + "ntp_auth": module.params["ntp_auth"], + "ntp_auth_pwd": module.params["ntp_auth_pwd"], + "ntp_v3": module.params["ntp_v3"], + + "admin_https_redirect": module.params["admin_https_redirect"], + "admin_https_port": module.params["admin_https_port"], + "admin_http_port": module.params["admin_http_port"], + "admin_timeout": module.params["admin_timeout"], + "admin_language": module.params["admin_language"], + "admin_switch_controller": module.params["admin_switch_controller"], + "admin_gui_theme": module.params["admin_gui_theme"], + "admin_enable_fortiguard": module.params["admin_enable_fortiguard"], + "admin_fortianalyzer_target": module.params["admin_fortianalyzer_target"], + "admin_fortiguard_target": module.params["admin_fortiguard_target"], + + "smtp_username": module.params["smtp_username"], + "smtp_password": module.params["smtp_password"], + "smtp_port": module.params["smtp_port"], + "smtp_replyto": module.params["smtp_replyto"], + "smtp_conn_sec": module.params["smtp_conn_sec"], + "smtp_server": module.params["smtp_server"], + "smtp_source_ipv4": module.params["smtp_source_ipv4"], + "smtp_validate_cert": module.params["smtp_validate_cert"], + + "dns_suffix": module.params["dns_suffix"], + "dns_primary_ipv4": module.params["dns_primary_ipv4"], + "dns_secondary_ipv4": module.params["dns_secondary_ipv4"], + "delete_provisioning_template": module.params["delete_provisioning_template"] + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + results = DEFAULT_RESULT_OBJ + try: + # CHECK IF WE ARE DELETING AN ENTIRE TEMPLATE. IF THAT'S THE CASE DO IT FIRST AND IGNORE THE REST. + if paramgram["delete_provisioning_template"] is not None: + results = set_devprof(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -10, -1], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram), + stop_on_success=True) + except Exception as err: + raise FMGBaseException(err) + + try: + # CHECK TO SEE IF THE DEVPROF TEMPLATE EXISTS + devprof = get_devprof(fmgr, paramgram) + if devprof[0] != 0: + results = set_devprof(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -2], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS THE SNMP SETTINGS IF THE SNMP_STATUS VARIABLE IS SET + if paramgram["snmp_status"] is not None: + results = set_devprof_snmp(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + # PROCESS THE SNMP V2C COMMUNITY SETTINGS IF THEY ARE ALL HERE + if all(v is not None for v in (paramgram["snmp_v2c_query_port"], paramgram["snmp_v2c_trap_port"], + paramgram["snmp_v2c_status"], paramgram["snmp_v2c_trap_status"], + paramgram["snmp_v2c_query_status"], paramgram["snmp_v2c_name"], + paramgram["snmp_v2c_id"])): + results = set_devprof_snmp_v2c(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -10033], stop_on_success=True, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + # PROCESS THE SNMPV3 USER IF THERE + if all(v is not None for v in ( + [paramgram["snmpv3_auth_proto"], paramgram["snmpv3_auth_pwd"], paramgram["snmpv3_name"], + paramgram["snmpv3_notify_hosts"], paramgram["snmpv3_priv_proto"], + paramgram["snmpv3_priv_pwd"], + paramgram["snmpv3_queries"], + paramgram["snmpv3_query_port"], paramgram["snmpv3_security_level"], + paramgram["snmpv3_source_ip"], + paramgram["snmpv3_status"], paramgram["snmpv3_trap_rport"], paramgram["snmpv3_trap_status"]])): + + results = set_devprof_snmp_v3(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -10033, -10000, -3], + stop_on_success=True, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS THE SYSLOG SETTINGS IF THE ALL THE NEEDED SYSLOG VARIABLES ARE PRESENT + if all(v is not None for v in [paramgram["syslog_port"], paramgram["syslog_mode"], + paramgram["syslog_server"], paramgram["syslog_status"]]): + # enable syslog in the devprof template + results = set_devprof_syslog(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -10033, -10000, -3], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF THE SYSLOG FILTER IS PRESENT THEN RUN THAT + if paramgram["syslog_filter"] is not None: + results = set_devprof_syslog_filter(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS NTP OPTIONS + if paramgram["ntp_status"]: + # VALIDATE INPUT + if paramgram["ntp_type"] == "custom" and paramgram["ntp_server"] is None: + module.exit_json(msg="You requested custom NTP type but did not provide ntp_server parameter.") + if paramgram["ntp_auth"] == "enable" and paramgram["ntp_auth_pwd"] is None: + module.exit_json( + msg="You requested NTP Authentication but did not provide ntp_auth_pwd parameter.") + + results = set_devprof_ntp(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + try: + # PROCESS THE ADMIN OPTIONS + if any(v is not None for v in ( + paramgram["admin_https_redirect"], paramgram["admin_https_port"], paramgram["admin_http_port"], + paramgram["admin_timeout"], + paramgram["admin_language"], paramgram["admin_switch_controller"], + paramgram["admin_gui_theme"])): + + results = set_devprof_admin(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS FORTIGUARD OPTIONS + if paramgram["admin_enable_fortiguard"] is not None: + + results = set_devprof_toggle_fg(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + results = set_devprof_fg(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS THE SMTP OPTIONS + if all(v is not None for v in ( + paramgram["smtp_username"], paramgram["smtp_password"], paramgram["smtp_port"], + paramgram["smtp_replyto"], + paramgram["smtp_conn_sec"], paramgram["smtp_server"], + paramgram["smtp_source_ipv4"], paramgram["smtp_validate_cert"])): + + results = set_devprof_smtp(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS THE DNS OPTIONS + if any(v is not None for v in + (paramgram["dns_suffix"], paramgram["dns_primary_ipv4"], paramgram["dns_secondary_ipv4"])): + results = set_devprof_dns(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS THE admin_fortianalyzer_target OPTIONS + if paramgram["admin_fortianalyzer_target"] is not None: + + results = set_devprof_faz(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # PROCESS THE PROVISIONING TEMPLATE TARGET PARAMETER + if paramgram["provision_targets"] is not None: + if paramgram["mode"] != "delete": + results = set_devprof_scope(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0], stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + if paramgram["mode"] == "delete": + # WE NEED TO FIGURE OUT WHAT'S THERE FIRST, BEFORE WE CAN RUN THIS + targets_to_add = list() + try: + current_scope = get_devprof_scope(fmgr, paramgram) + targets_to_remove = paramgram["provision_targets"].strip().split(",") + targets = current_scope[1][1]["scope member"] + for target in targets: + if target["name"] not in targets_to_remove: + target_append = {"name": target["name"]} + targets_to_add.append(target_append) + except BaseException: + pass + paramgram["targets_to_add"] = targets_to_add + results = set_devprof_scope(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -10033, -10000, -3], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_fwobj_address.py b/plugins/modules/network/fortimanager/fmgr_fwobj_address.py new file mode 100644 index 0000000000..fe3a9afbd5 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_fwobj_address.py @@ -0,0 +1,667 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_fwobj_address +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Allows the management of firewall objects in FortiManager +description: + - Allows for the management of IPv4, IPv6, and multicast address objects within FortiManager. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + allow_routing: + description: + - Enable/disable use of this address in the static route configuration. + choices: ['enable', 'disable'] + default: 'disable' + + associated_interface: + description: + - Associated interface name. + + cache_ttl: + description: + - Minimal TTL of individual IP addresses in FQDN cache. Only applies when type = wildcard-fqdn. + + color: + description: + - Color of the object in FortiManager GUI. + - Takes integers 1-32 + default: 22 + + comment: + description: + - Comment for the object in FortiManager. + + country: + description: + - Country name. Required if type = geographic. + + end_ip: + description: + - End IP. Only used when ipv4 = iprange. + + group_members: + description: + - Address group member. If this is defined w/out group_name, the operation will fail. + + group_name: + description: + - Address group name. If this is defined in playbook task, all other options are ignored. + + ipv4: + description: + - Type of IPv4 Object. + - Must not be specified with either multicast or IPv6 parameters. + choices: ['ipmask', 'iprange', 'fqdn', 'wildcard', 'geography', 'wildcard-fqdn', 'group'] + + ipv4addr: + description: + - IP and network mask. If only defining one IP use this parameter. (i.e. 10.7.220.30/255.255.255.255) + - Can also define subnets (i.e. 10.7.220.0/255.255.255.0) + - Also accepts CIDR (i.e. 10.7.220.0/24) + - If Netmask is omitted after IP address, /32 is assumed. + - When multicast is set to Broadcast Subnet the ipv4addr parameter is used to specify the subnet. + + ipv6: + description: + - Puts module into IPv6 mode. + - Must not be specified with either ipv4 or multicast parameters. + choices: ['ip', 'iprange', 'group'] + + ipv6addr: + description: + - IPv6 address in full. (i.e. 2001:0db8:85a3:0000:0000:8a2e:0370:7334) + + fqdn: + description: + - Fully qualified domain name. + + mode: + description: + - Sets one of three modes for managing the object. + choices: ['add', 'set', 'delete'] + default: add + + multicast: + description: + - Manages Multicast Address Objects. + - Sets either a Multicast IP Range or a Broadcast Subnet. + - Must not be specified with either ipv4 or ipv6 parameters. + - When set to Broadcast Subnet the ipv4addr parameter is used to specify the subnet. + - Can create IPv4 Multicast Objects (multicastrange and broadcastmask options -- uses start/end-ip and ipv4addr). + choices: ['multicastrange', 'broadcastmask', 'ip6'] + + name: + description: + - Friendly Name Address object name in FortiManager. + + obj_id: + description: + - Object ID for NSX. + + start_ip: + description: + - Start IP. Only used when ipv4 = iprange. + + visibility: + description: + - Enable/disable address visibility. + choices: ['enable', 'disable'] + default: 'enable' + + wildcard: + description: + - IP address and wildcard netmask. Required if ipv4 = wildcard. + + wildcard_fqdn: + description: + - Wildcard FQDN. Required if ipv4 = wildcard-fqdn. +''' + +EXAMPLES = ''' +- name: ADD IPv4 IP ADDRESS OBJECT + fmgr_fwobj_address: + ipv4: "ipmask" + ipv4addr: "10.7.220.30/32" + name: "ansible_v4Obj" + comment: "Created by Ansible" + color: "6" + +- name: ADD IPv4 IP ADDRESS OBJECT MORE OPTIONS + fmgr_fwobj_address: + ipv4: "ipmask" + ipv4addr: "10.7.220.34/32" + name: "ansible_v4Obj_MORE" + comment: "Created by Ansible" + color: "6" + allow_routing: "enable" + cache_ttl: "180" + associated_interface: "port1" + obj_id: "123" + +- name: ADD IPv4 IP ADDRESS SUBNET OBJECT + fmgr_fwobj_address: + ipv4: "ipmask" + ipv4addr: "10.7.220.0/255.255.255.128" + name: "ansible_subnet" + comment: "Created by Ansible" + mode: "set" + +- name: ADD IPv4 IP ADDRESS RANGE OBJECT + fmgr_fwobj_address: + ipv4: "iprange" + start_ip: "10.7.220.1" + end_ip: "10.7.220.125" + name: "ansible_range" + comment: "Created by Ansible" + +- name: ADD IPv4 IP ADDRESS WILDCARD OBJECT + fmgr_fwobj_address: + ipv4: "wildcard" + wildcard: "10.7.220.30/255.255.255.255" + name: "ansible_wildcard" + comment: "Created by Ansible" + +- name: ADD IPv4 IP ADDRESS WILDCARD FQDN OBJECT + fmgr_fwobj_address: + ipv4: "wildcard-fqdn" + wildcard_fqdn: "*.myds.com" + name: "Synology myds DDNS service" + comment: "Created by Ansible" + +- name: ADD IPv4 IP ADDRESS FQDN OBJECT + fmgr_fwobj_address: + ipv4: "fqdn" + fqdn: "ansible.com" + name: "ansible_fqdn" + comment: "Created by Ansible" + +- name: ADD IPv4 IP ADDRESS GEO OBJECT + fmgr_fwobj_address: + ipv4: "geography" + country: "usa" + name: "ansible_geo" + comment: "Created by Ansible" + +- name: ADD IPv6 ADDRESS + fmgr_fwobj_address: + ipv6: "ip" + ipv6addr: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + name: "ansible_v6Obj" + comment: "Created by Ansible" + +- name: ADD IPv6 ADDRESS RANGE + fmgr_fwobj_address: + ipv6: "iprange" + start_ip: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + end_ip: "2001:0db8:85a3:0000:0000:8a2e:0370:7446" + name: "ansible_v6range" + comment: "Created by Ansible" + +- name: ADD IPv4 IP ADDRESS GROUP + fmgr_fwobj_address: + ipv4: "group" + group_name: "ansibleIPv4Group" + group_members: "ansible_fqdn, ansible_wildcard, ansible_range" + +- name: ADD IPv6 IP ADDRESS GROUP + fmgr_fwobj_address: + ipv6: "group" + group_name: "ansibleIPv6Group" + group_members: "ansible_v6Obj, ansible_v6range" + +- name: ADD MULTICAST RANGE + fmgr_fwobj_address: + multicast: "multicastrange" + start_ip: "224.0.0.251" + end_ip: "224.0.0.251" + name: "ansible_multicastrange" + comment: "Created by Ansible" + +- name: ADD BROADCAST SUBNET + fmgr_fwobj_address: + multicast: "broadcastmask" + ipv4addr: "10.7.220.0/24" + name: "ansible_broadcastSubnet" + comment: "Created by Ansible" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + + +import re +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def fmgr_fwobj_ipv4(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # EVAL THE MODE PARAMETER FOR SET OR ADD + if paramgram["mode"] in ['set', 'add']: + # CREATE THE DATAGRAM DICTIONARY + # ENSURE THE DATAGRAM KEYS MATCH THE JSON API GUIDE ATTRIBUTES, NOT WHAT IS IN ANSIBLE + # SOME PARAMETERS SHOWN IN THIS DICTIONARY WE DON'T EVEN ASK THE USER FOR IN PLAYBOOKS BUT ARE REQUIRED + datagram = { + "comment": paramgram["comment"], + "associated-interface": paramgram["associated-interface"], + "cache-ttl": paramgram["cache-ttl"], + "name": paramgram["name"], + "allow-routing": paramgram["allow-routing"], + "color": paramgram["color"], + "meta fields": {}, + "dynamic_mapping": [], + "visibility": paramgram["allow-routing"], + "type": paramgram["ipv4"], + } + + # SET THE CORRECT URL BASED ON THE TYPE (WE'RE DOING GROUPS IN THIS METHOD, TOO) + if datagram["type"] == "group": + url = '/pm/config/adom/{adom}/obj/firewall/addrgrp'.format(adom=paramgram["adom"]) + else: + url = '/pm/config/adom/{adom}/obj/firewall/address'.format(adom=paramgram["adom"]) + + ######################### + # IF type = 'ipmask' + ######################### + if datagram["type"] == "ipmask": + # CREATE THE SUBNET LIST OBJECT + subnet = [] + # EVAL THE IPV4ADDR INPUT AND SPLIT THE IP ADDRESS FROM THE MASK AND APPEND THEM TO THE SUBNET LIST + for subnets in paramgram["ipv4addr"].split("/"): + subnet.append(subnets) + + # CHECK THAT THE SECOND ENTRY IN THE SUBNET LIST (WHAT WAS TO THE RIGHT OF THE / CHARACTER) + # IS IN SUBNET MASK FORMAT AND NOT CIDR FORMAT. + # IF IT IS IN CIDR FORMAT, WE NEED TO CONVERT IT TO SUBNET BIT MASK FORMAT FOR THE JSON API + if not re.match(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}', subnet[1]): + # IF THE SUBNET PARAMETER INPUT DIDN'T LOOK LIKE xxx.xxx.xxx.xxx TO REGEX... + # ... RUN IT THROUGH THE CIDR_TO_NETMASK() FUNCTION + mask = fmgr._tools.cidr_to_netmask(subnet[1]) + # AND THEN UPDATE THE SUBNET LIST OBJECT + subnet[1] = mask + + # INCLUDE THE SUBNET LIST OBJECT IN THE DATAGRAM DICTIONARY TO BE SUBMITTED + datagram["subnet"] = subnet + + ######################### + # IF type = 'iprange' + ######################### + if datagram["type"] == "iprange": + datagram["start-ip"] = paramgram["start-ip"] + datagram["end-ip"] = paramgram["end-ip"] + datagram["subnet"] = ["0.0.0.0", "0.0.0.0"] + + ######################### + # IF type = 'geography' + ######################### + if datagram["type"] == "geography": + datagram["country"] = paramgram["country"] + + ######################### + # IF type = 'wildcard' + ######################### + if datagram["type"] == "wildcard": + + subnet = [] + for subnets in paramgram["wildcard"].split("/"): + subnet.append(subnets) + + if not re.match(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}', subnet[1]): + mask = fmgr._tools.cidr_to_netmask(subnet[1]) + subnet[1] = mask + + datagram["wildcard"] = subnet + + ######################### + # IF type = 'wildcard-fqdn' + ######################### + if datagram["type"] == "wildcard-fqdn": + datagram["wildcard-fqdn"] = paramgram["wildcard-fqdn"] + + ######################### + # IF type = 'fqdn' + ######################### + if datagram["type"] == "fqdn": + datagram["fqdn"] = paramgram["fqdn"] + + ######################### + # IF type = 'group' + ######################### + if datagram["type"] == "group": + datagram = { + "comment": paramgram["comment"], + "name": paramgram["group_name"], + "color": paramgram["color"], + "meta fields": {}, + "dynamic_mapping": [], + "visibility": paramgram["visibility"] + } + + members = [] + group_members = paramgram["group_members"].replace(" ", "") + try: + for member in group_members.split(","): + members.append(member) + except Exception: + pass + + datagram["member"] = members + + # EVAL THE MODE PARAMETER FOR DELETE + if paramgram["mode"] == "delete": + # IF A GROUP, SET THE CORRECT NAME AND URL FOR THE GROUP ENDPOINT + if paramgram["ipv4"] == "group": + datagram = {} + url = '/pm/config/adom/{adom}/obj/firewall/addrgrp/{name}'.format(adom=paramgram["adom"], + name=paramgram["group_name"]) + # OTHERWISE WE'RE JUST GOING TO USE THE ADDRESS ENDPOINT + else: + datagram = {} + url = '/pm/config/adom/{adom}/obj/firewall/address/{name}'.format(adom=paramgram["adom"], + name=paramgram["name"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def fmgr_fwobj_ipv6(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # EVAL THE MODE PARAMETER FOR SET OR ADD + if paramgram["mode"] in ['set', 'add']: + # CREATE THE DATAGRAM DICTIONARY + # ENSURE THE DATAGRAM KEYS MATCH THE JSON API GUIDE ATTRIBUTES, NOT WHAT IS IN ANSIBLE + # SOME PARAMETERS SHOWN IN THIS DICTIONARY WE DON'T EVEN ASK THE USER FOR IN PLAYBOOKS BUT ARE REQUIRED + datagram = { + "comment": paramgram["comment"], + "name": paramgram["name"], + "color": paramgram["color"], + "dynamic_mapping": [], + "visibility": paramgram["visibility"], + "type": paramgram["ipv6"] + } + + # SET THE CORRECT URL BASED ON THE TYPE (WE'RE DOING GROUPS IN THIS METHOD, TOO) + if datagram["type"] == "group": + url = '/pm/config/adom/{adom}/obj/firewall/addrgrp6'.format(adom=paramgram["adom"]) + else: + url = '/pm/config/adom/{adom}/obj/firewall/address6'.format(adom=paramgram["adom"]) + + ######################### + # IF type = 'ip' + ######################### + if datagram["type"] == "ip": + datagram["type"] = "ipprefix" + datagram["ip6"] = paramgram["ipv6addr"] + + ######################### + # IF type = 'iprange' + ######################### + if datagram["type"] == "iprange": + datagram["start-ip"] = paramgram["start-ip"] + datagram["end-ip"] = paramgram["end-ip"] + + ######################### + # IF type = 'group' + ######################### + if datagram["type"] == "group": + datagram = None + datagram = { + "comment": paramgram["comment"], + "name": paramgram["group_name"], + "color": paramgram["color"], + "visibility": paramgram["visibility"] + } + + members = [] + group_members = paramgram["group_members"].replace(" ", "") + try: + for member in group_members.split(","): + members.append(member) + except Exception: + pass + + datagram["member"] = members + + # EVAL THE MODE PARAMETER FOR DELETE + if paramgram["mode"] == "delete": + # IF A GROUP, SET THE CORRECT NAME AND URL FOR THE GROUP ENDPOINT + if paramgram["ipv6"] == "group": + datagram = {} + url = '/pm/config/adom/{adom}/obj/firewall/addrgrp6/{name}'.format(adom=paramgram["adom"], + name=paramgram["group_name"]) + # OTHERWISE WE'RE JUST GOING TO USE THE ADDRESS ENDPOINT + else: + datagram = {} + url = '/pm/config/adom/{adom}/obj/firewall/address6/{name}'.format(adom=paramgram["adom"], + name=paramgram["name"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def fmgr_fwobj_multicast(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # EVAL THE MODE PARAMETER FOR SET OR ADD + if paramgram["mode"] in ['set', 'add']: + # CREATE THE DATAGRAM DICTIONARY + # ENSURE THE DATAGRAM KEYS MATCH THE JSON API GUIDE ATTRIBUTES, NOT WHAT IS IN ANSIBLE + # SOME PARAMETERS SHOWN IN THIS DICTIONARY WE DON'T EVEN ASK THE USER FOR IN PLAYBOOKS BUT ARE REQUIRED + datagram = { + "associated-interface": paramgram["associated-interface"], + "comment": paramgram["comment"], + "name": paramgram["name"], + "color": paramgram["color"], + "type": paramgram["multicast"], + "visibility": paramgram["visibility"], + } + + # SET THE CORRECT URL + url = '/pm/config/adom/{adom}/obj/firewall/multicast-address'.format(adom=paramgram["adom"]) + + ######################### + # IF type = 'multicastrange' + ######################### + if paramgram["multicast"] == "multicastrange": + datagram["start-ip"] = paramgram["start-ip"] + datagram["end-ip"] = paramgram["end-ip"] + datagram["subnet"] = ["0.0.0.0", "0.0.0.0"] + + ######################### + # IF type = 'broadcastmask' + ######################### + if paramgram["multicast"] == "broadcastmask": + # EVAL THE IPV4ADDR INPUT AND SPLIT THE IP ADDRESS FROM THE MASK AND APPEND THEM TO THE SUBNET LIST + subnet = [] + for subnets in paramgram["ipv4addr"].split("/"): + subnet.append(subnets) + # CHECK THAT THE SECOND ENTRY IN THE SUBNET LIST (WHAT WAS TO THE RIGHT OF THE / CHARACTER) + # IS IN SUBNET MASK FORMAT AND NOT CIDR FORMAT. + # IF IT IS IN CIDR FORMAT, WE NEED TO CONVERT IT TO SUBNET BIT MASK FORMAT FOR THE JSON API + if not re.match(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}', subnet[1]): + # IF THE SUBNET PARAMETER INPUT DIDN'T LOOK LIKE 255.255.255.255 TO REGEX... + # ... RUN IT THROUGH THE fmgr_cidr_to_netmask() FUNCTION + mask = fmgr._tools.cidr_to_netmask(subnet[1]) + # AND THEN UPDATE THE SUBNET LIST OBJECT + subnet[1] = mask + + # INCLUDE THE SUBNET LIST OBJECT IN THE DATAGRAM DICTIONARY TO BE SUBMITTED + datagram["subnet"] = subnet + + # EVAL THE MODE PARAMETER FOR DELETE + if paramgram["mode"] == "delete": + datagram = { + "name": paramgram["name"] + } + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/firewall/multicast-address/{name}'.format(adom=paramgram["adom"], + name=paramgram["name"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + mode=dict(choices=["add", "set", "delete"], type="str", default="add"), + + allow_routing=dict(required=False, type="str", choices=['enable', 'disable'], default="disable"), + associated_interface=dict(required=False, type="str"), + cache_ttl=dict(required=False, type="str"), + color=dict(required=False, type="str", default=22), + comment=dict(required=False, type="str"), + country=dict(required=False, type="str"), + fqdn=dict(required=False, type="str"), + name=dict(required=False, type="str"), + start_ip=dict(required=False, type="str"), + end_ip=dict(required=False, type="str"), + ipv4=dict(required=False, type="str", choices=['ipmask', 'iprange', 'fqdn', 'wildcard', + 'geography', 'wildcard-fqdn', 'group']), + visibility=dict(required=False, type="str", choices=['enable', 'disable'], default="enable"), + wildcard=dict(required=False, type="str"), + wildcard_fqdn=dict(required=False, type="str"), + ipv6=dict(required=False, type="str", choices=['ip', 'iprange', 'group']), + group_members=dict(required=False, type="str"), + group_name=dict(required=False, type="str"), + ipv4addr=dict(required=False, type="str"), + ipv6addr=dict(required=False, type="str"), + multicast=dict(required=False, type="str", choices=['multicastrange', 'broadcastmask', 'ip6']), + obj_id=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + mutually_exclusive=[ + ['ipv4', 'ipv6'], + ['ipv4', 'multicast'], + ['ipv6', 'multicast'] + ]) + paramgram = { + "adom": module.params["adom"], + "allow-routing": module.params["allow_routing"], + "associated-interface": module.params["associated_interface"], + "cache-ttl": module.params["cache_ttl"], + "color": module.params["color"], + "comment": module.params["comment"], + "country": module.params["country"], + "end-ip": module.params["end_ip"], + "fqdn": module.params["fqdn"], + "name": module.params["name"], + "start-ip": module.params["start_ip"], + "visibility": module.params["visibility"], + "wildcard": module.params["wildcard"], + "wildcard-fqdn": module.params["wildcard_fqdn"], + "ipv6": module.params["ipv6"], + "ipv4": module.params["ipv4"], + "group_members": module.params["group_members"], + "group_name": module.params["group_name"], + "ipv4addr": module.params["ipv4addr"], + "ipv6addr": module.params["ipv6addr"], + "multicast": module.params["multicast"], + "mode": module.params["mode"], + "obj-id": module.params["obj_id"], + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr._tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + results = DEFAULT_RESULT_OBJ + try: + if paramgram["ipv4"]: + results = fmgr_fwobj_ipv4(fmgr, paramgram) + + elif paramgram["ipv6"]: + results = fmgr_fwobj_ipv6(fmgr, paramgram) + + elif paramgram["multicast"]: + results = fmgr_fwobj_multicast(fmgr, paramgram) + + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + if results is not None: + return module.exit_json(**results[1]) + else: + return module.exit_json(msg="Couldn't find a proper ipv4 or ipv6 or multicast parameter " + "to run in the logic tree. Exiting...") + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_fwobj_ippool.py b/plugins/modules/network/fortimanager/fmgr_fwobj_ippool.py new file mode 100644 index 0000000000..cb06bc47a6 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_fwobj_ippool.py @@ -0,0 +1,446 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_fwobj_ippool +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Allows the editing of IP Pool Objects within FortiManager. +description: + - Allows users to add/edit/delete IP Pool Objects. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + type: + description: + - IP pool type (overload, one-to-one, fixed port range, or port block allocation). + - choice | overload | IP addresses in the IP pool can be shared by clients. + - choice | one-to-one | One to one mapping. + - choice | fixed-port-range | Fixed port range. + - choice | port-block-allocation | Port block allocation. + required: false + choices: ["overload", "one-to-one", "fixed-port-range", "port-block-allocation"] + + startip: + description: + - First IPv4 address (inclusive) in the range for the address pool (format xxx.xxx.xxx.xxx, Default| 0.0.0.0). + required: false + + source_startip: + description: + - First IPv4 address (inclusive) in the range of the source addresses to be translated (format xxx.xxx.xxx.xxx, + Default| 0.0.0.0). + required: false + + source_endip: + description: + - Final IPv4 address (inclusive) in the range of the source addresses to be translated (format xxx.xxx.xxx.xxx, + Default| 0.0.0.0). + required: false + + permit_any_host: + description: + - Enable/disable full cone NAT. + - choice | disable | Disable full cone NAT. + - choice | enable | Enable full cone NAT. + required: false + choices: ["disable", "enable"] + + pba_timeout: + description: + - Port block allocation timeout (seconds). + required: false + + num_blocks_per_user: + description: + - Number of addresses blocks that can be used by a user (1 to 128, default = 8). + required: false + + name: + description: + - IP pool name. + required: false + + endip: + description: + - Final IPv4 address (inclusive) in the range for the address pool (format xxx.xxx.xxx.xxx, Default| 0.0.0.0). + required: false + + comments: + description: + - Comment. + required: false + + block_size: + description: + - Number of addresses in a block (64 to 4096, default = 128). + required: false + + associated_interface: + description: + - Associated interface name. + required: false + + arp_reply: + description: + - Enable/disable replying to ARP requests when an IP Pool is added to a policy (default = enable). + - choice | disable | Disable ARP reply. + - choice | enable | Enable ARP reply. + required: false + choices: ["disable", "enable"] + + arp_intf: + description: + - Select an interface from available options that will reply to ARP requests. (If blank, any is selected). + required: false + + dynamic_mapping: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameter.ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + dynamic_mapping_arp_intf: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_arp_reply: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + choices: ["disable", "enable"] + + dynamic_mapping_associated_interface: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_block_size: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_comments: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_endip: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_num_blocks_per_user: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_pba_timeout: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_permit_any_host: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + choices: ["disable", "enable"] + + dynamic_mapping_source_endip: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_source_startip: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_startip: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_type: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + choices: ["overload", "one-to-one", "fixed-port-range", "port-block-allocation"] + + +''' + +EXAMPLES = ''' +- name: ADD FMGR_FIREWALL_IPPOOL Overload + fmgr_fwobj_ippool: + mode: "add" + adom: "ansible" + name: "Ansible_pool4_overload" + comments: "Created by ansible" + type: "overload" + + # OPTIONS FOR ALL MODES + startip: "10.10.10.10" + endip: "10.10.10.100" + arp_reply: "enable" + +- name: ADD FMGR_FIREWALL_IPPOOL one-to-one + fmgr_fwobj_ippool: + mode: "add" + adom: "ansible" + name: "Ansible_pool4_121" + comments: "Created by ansible" + type: "one-to-one" + + # OPTIONS FOR ALL MODES + startip: "10.10.20.10" + endip: "10.10.20.100" + arp_reply: "enable" + +- name: ADD FMGR_FIREWALL_IPPOOL FIXED PORT RANGE + fmgr_fwobj_ippool: + mode: "add" + adom: "ansible" + name: "Ansible_pool4_fixed_port" + comments: "Created by ansible" + type: "fixed-port-range" + + # OPTIONS FOR ALL MODES + startip: "10.10.40.10" + endip: "10.10.40.100" + arp_reply: "enable" + # FIXED PORT RANGE OPTIONS + source_startip: "192.168.20.1" + source_endip: "192.168.20.20" + +- name: ADD FMGR_FIREWALL_IPPOOL PORT BLOCK ALLOCATION + fmgr_fwobj_ippool: + mode: "add" + adom: "ansible" + name: "Ansible_pool4_port_block_allocation" + comments: "Created by ansible" + type: "port-block-allocation" + + # OPTIONS FOR ALL MODES + startip: "10.10.30.10" + endip: "10.10.30.100" + arp_reply: "enable" + # PORT BLOCK ALLOCATION OPTIONS + block_size: "128" + num_blocks_per_user: "1" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_fwobj_ippool_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/firewall/ippool'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/firewall/ippool/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + type=dict(required=False, type="str", choices=["overload", + "one-to-one", + "fixed-port-range", + "port-block-allocation"]), + startip=dict(required=False, type="str"), + source_startip=dict(required=False, type="str"), + source_endip=dict(required=False, type="str"), + permit_any_host=dict(required=False, type="str", choices=["disable", "enable"]), + pba_timeout=dict(required=False, type="int"), + num_blocks_per_user=dict(required=False, type="int"), + name=dict(required=False, type="str"), + endip=dict(required=False, type="str"), + comments=dict(required=False, type="str"), + block_size=dict(required=False, type="int"), + associated_interface=dict(required=False, type="str"), + arp_reply=dict(required=False, type="str", choices=["disable", "enable"]), + arp_intf=dict(required=False, type="str"), + dynamic_mapping=dict(required=False, type="list"), + dynamic_mapping_arp_intf=dict(required=False, type="str"), + dynamic_mapping_arp_reply=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_associated_interface=dict(required=False, type="str"), + dynamic_mapping_block_size=dict(required=False, type="int"), + dynamic_mapping_comments=dict(required=False, type="str"), + dynamic_mapping_endip=dict(required=False, type="str"), + dynamic_mapping_num_blocks_per_user=dict(required=False, type="int"), + dynamic_mapping_pba_timeout=dict(required=False, type="int"), + dynamic_mapping_permit_any_host=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_source_endip=dict(required=False, type="str"), + dynamic_mapping_source_startip=dict(required=False, type="str"), + dynamic_mapping_startip=dict(required=False, type="str"), + dynamic_mapping_type=dict(required=False, type="str", choices=["overload", + "one-to-one", + "fixed-port-range", + "port-block-allocation"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "type": module.params["type"], + "startip": module.params["startip"], + "source-startip": module.params["source_startip"], + "source-endip": module.params["source_endip"], + "permit-any-host": module.params["permit_any_host"], + "pba-timeout": module.params["pba_timeout"], + "num-blocks-per-user": module.params["num_blocks_per_user"], + "name": module.params["name"], + "endip": module.params["endip"], + "comments": module.params["comments"], + "block-size": module.params["block_size"], + "associated-interface": module.params["associated_interface"], + "arp-reply": module.params["arp_reply"], + "arp-intf": module.params["arp_intf"], + "dynamic_mapping": { + "arp-intf": module.params["dynamic_mapping_arp_intf"], + "arp-reply": module.params["dynamic_mapping_arp_reply"], + "associated-interface": module.params["dynamic_mapping_associated_interface"], + "block-size": module.params["dynamic_mapping_block_size"], + "comments": module.params["dynamic_mapping_comments"], + "endip": module.params["dynamic_mapping_endip"], + "num-blocks-per-user": module.params["dynamic_mapping_num_blocks_per_user"], + "pba-timeout": module.params["dynamic_mapping_pba_timeout"], + "permit-any-host": module.params["dynamic_mapping_permit_any_host"], + "source-endip": module.params["dynamic_mapping_source_endip"], + "source-startip": module.params["dynamic_mapping_source_startip"], + "startip": module.params["dynamic_mapping_startip"], + "type": module.params["dynamic_mapping_type"], + } + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['dynamic_mapping'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + # UPDATE THE CHANGED PARAMGRAM + module.paramgram = paramgram + + results = DEFAULT_RESULT_OBJ + try: + results = fmgr_fwobj_ippool_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_fwobj_ippool6.py b/plugins/modules/network/fortimanager/fmgr_fwobj_ippool6.py new file mode 100644 index 0000000000..05165419f9 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_fwobj_ippool6.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_fwobj_ippool6 +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Allows the editing of IP Pool Objects within FortiManager. +description: + - Allows users to add/edit/delete IPv6 Pool Objects. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + startip: + description: + - First IPv6 address (inclusive) in the range for the address pool. + required: false + + name: + description: + - IPv6 IP pool name. + required: false + + endip: + description: + - Final IPv6 address (inclusive) in the range for the address pool. + required: false + + comments: + description: + - Comment. + required: false + + dynamic_mapping: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + dynamic_mapping_comments: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_endip: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + dynamic_mapping_startip: + description: + - Dynamic Mapping clone of original suffixed parameter. + required: false + + +''' + +EXAMPLES = ''' +- name: ADD FMGR_FIREWALL_IPPOOL6 + fmgr_firewall_ippool6: + mode: "add" + adom: "ansible" + startip: + name: "IPv6 IPPool" + endip: + comments: "Created by Ansible" + +- name: DELETE FMGR_FIREWALL_IPPOOL6 + fmgr_firewall_ippool6: + mode: "delete" + adom: "ansible" + name: "IPv6 IPPool" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +def fmgr_fwobj_ippool6_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/firewall/ippool6'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/firewall/ippool6/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + startip=dict(required=False, type="str"), + name=dict(required=False, type="str"), + endip=dict(required=False, type="str"), + comments=dict(required=False, type="str"), + dynamic_mapping=dict(required=False, type="list"), + dynamic_mapping_comments=dict(required=False, type="str"), + dynamic_mapping_endip=dict(required=False, type="str"), + dynamic_mapping_startip=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "startip": module.params["startip"], + "name": module.params["name"], + "endip": module.params["endip"], + "comments": module.params["comments"], + "dynamic_mapping": { + "comments": module.params["dynamic_mapping_comments"], + "endip": module.params["dynamic_mapping_endip"], + "startip": module.params["dynamic_mapping_startip"], + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['dynamic_mapping'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + + try: + results = fmgr_fwobj_ippool6_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_fwobj_service.py b/plugins/modules/network/fortimanager/fmgr_fwobj_service.py new file mode 100644 index 0000000000..7e4b518053 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_fwobj_service.py @@ -0,0 +1,623 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_fwobj_service +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manages FortiManager Firewall Service Objects. +description: + - Manages FortiManager Firewall Service Objects. + +options: + adom: + description: + -The ADOM the configuration should belong to. + required: false + default: root + + app_category: + description: + - Application category ID. + required: false + + app_service_type: + description: + - Application service type. + required: false + + application: + description: + - Application ID. + required: false + + category: + description: + - Service category. + required: false + + check_reset_range: + description: + - Enable disable RST check. + required: false + + color: + description: + - GUI icon color. + required: false + default: 22 + + comment: + description: + - Comment. + required: false + + custom_type: + description: + - Tells module what kind of custom service to be added. + choices: ['tcp_udp_sctp', 'icmp', 'icmp6', 'ip', 'http', 'ftp', 'connect', 'socks_tcp', 'socks_udp', 'all'] + default: all + required: false + + explicit_proxy: + description: + - Enable/disable explicit web proxy service. + choices: ['enable', 'disable'] + default: 'disable' + required: false + + fqdn: + description: + - Fully qualified domain name. + required: false + default: "" + + group_name: + description: + - Name of the Service Group. + required: false + + group_member: + description: + - Comma-Seperated list of members' names. + required: false + + icmp_code: + description: + - ICMP code. + required: false + + icmp_type: + description: + - ICMP type. + required: false + + iprange: + description: + - Start IP-End IP. + required: false + default: "0.0.0.0" + + name: + description: + - Custom service name. + required: false + + mode: + description: + - Sets one of three modes for managing the object. + choices: ['add', 'set', 'delete'] + default: add + required: false + + object_type: + description: + - Tells module if we are adding a custom service, category, or group. + choices: ['custom', 'group', 'category'] + required: false + + protocol: + description: + - Protocol type. + required: false + + protocol_number: + description: + - IP protocol number. + required: false + + sctp_portrange: + description: + - Multiple SCTP port ranges. Comma separated list of destination ports to add (i.e. '443,80'). + - Syntax is + - If no sourcePort is defined, it assumes all of them. + - Ranges can be defined with a hyphen - + - Examples -- '443' (destPort 443 only) '443:1000-2000' (destPort 443 from source ports 1000-2000). + - String multiple together in same quotes, comma separated. ('443:1000-2000, 80:1000-2000'). + required: false + + session_ttl: + description: + - Session TTL (300 - 604800, 0 = default). + required: false + default: 0 + + tcp_halfclose_timer: + description: + - TCP half close timeout (1 - 86400 sec, 0 = default). + required: false + default: 0 + + tcp_halfopen_timer: + description: + - TCP half close timeout (1 - 86400 sec, 0 = default). + required: false + default: 0 + + tcp_portrange: + description: + - Comma separated list of destination ports to add (i.e. '443,80'). + - Syntax is + - If no sourcePort is defined, it assumes all of them. + - Ranges can be defined with a hyphen - + - Examples -- '443' (destPort 443 only) '443:1000-2000' (destPort 443 from source ports 1000-2000). + - String multiple together in same quotes, comma separated. ('443:1000-2000, 80:1000-2000'). + required: false + + tcp_timewait_timer: + description: + - TCP half close timeout (1 - 300 sec, 0 = default). + required: false + default: 0 + + udp_idle_timer: + description: + - TCP half close timeout (0 - 86400 sec, 0 = default). + required: false + default: 0 + + udp_portrange: + description: + - Comma separated list of destination ports to add (i.e. '443,80'). + - Syntax is + - If no sourcePort is defined, it assumes all of them. + - Ranges can be defined with a hyphen - + - Examples -- '443' (destPort 443 only) '443:1000-2000' (destPort 443 from source ports 1000-2000). + - String multiple together in same quotes, comma separated. ('443:1000-2000, 80:1000-2000'). + required: false + + visibility: + description: + - Enable/disable service visibility. + required: false + choices: ["enable", "disable"] + default: "enable" + +''' + +EXAMPLES = ''' +- name: ADD A CUSTOM SERVICE FOR TCP/UDP/SCP + fmgr_fwobj_service: + adom: "ansible" + name: "ansible_custom_service" + object_type: "custom" + custom_type: "tcp_udp_sctp" + tcp_portrange: "443" + udp_portrange: "51" + sctp_portrange: "100" + +- name: ADD A CUSTOM SERVICE FOR TCP/UDP/SCP WITH SOURCE RANGES AND MULTIPLES + fmgr_fwobj_service: + adom: "ansible" + name: "ansible_custom_serviceWithSource" + object_type: "custom" + custom_type: "tcp_udp_sctp" + tcp_portrange: "443:2000-1000,80-82:10000-20000" + udp_portrange: "51:100-200,162:200-400" + sctp_portrange: "100:2000-2500" + +- name: ADD A CUSTOM SERVICE FOR ICMP + fmgr_fwobj_service: + adom: "ansible" + name: "ansible_custom_icmp" + object_type: "custom" + custom_type: "icmp" + icmp_type: "8" + icmp_code: "3" + +- name: ADD A CUSTOM SERVICE FOR ICMP6 + fmgr_fwobj_service: + adom: "ansible" + name: "ansible_custom_icmp6" + object_type: "custom" + custom_type: "icmp6" + icmp_type: "5" + icmp_code: "1" + +- name: ADD A CUSTOM SERVICE FOR IP - GRE + fmgr_fwobj_service: + adom: "ansible" + name: "ansible_custom_icmp6" + object_type: "custom" + custom_type: "ip" + protocol_number: "47" + +- name: ADD A CUSTOM PROXY FOR ALL WITH SOURCE RANGES AND MULTIPLES + fmgr_fwobj_service: + adom: "ansible" + name: "ansible_custom_proxy_all" + object_type: "custom" + custom_type: "all" + explicit_proxy: "enable" + tcp_portrange: "443:2000-1000,80-82:10000-20000" + iprange: "www.ansible.com" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +def fmgr_fwobj_service_custom(fmgr, paramgram): + """ + description: + - the tcp and udp-portrange parameters are in a list when there are multiple. they are not in a list when they + singular or by themselves (only 1 was listed) + - the syntax for this is (destPort:sourcePort). Ranges are (xxxx-xxxx) i.e. 443:443, or 443:1000-2000. + - if you leave out the second field after the colon (source port) it assumes any source port (which is usual) + - multiples would look like ['443:1000-2000','80'] + - a single would look simple like "443:1000-2000" without the list around it ( a string!) + - the protocol parameter is the protocol NUMBER, not the string of it. + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + response = DEFAULT_RESULT_OBJ + if paramgram["mode"] in ['set', 'add']: + # SET THE URL FOR ADD / SET + url = '/pm/config/adom/{adom}/obj/firewall/service/custom'.format(adom=paramgram["adom"]) + # BUILD THE DEFAULT DATAGRAM + datagram = { + # ADVANCED OPTIONS + "app-category": paramgram["app-category"], + "app-service-type": paramgram["app-service-type"], + "application": paramgram["application"], + "category": paramgram["category"], + "check-reset-range": paramgram["check-reset-range"], + "color": paramgram["color"], + "session-ttl": paramgram["session-ttl"], + "tcp-halfclose-timer": paramgram["tcp-halfclose-timer"], + "tcp-halfopen-timer": paramgram["tcp-halfopen-timer"], + "tcp-timewait-timer": paramgram["tcp-timewait-timer"], + "udp-idle-timer": paramgram["udp-idle-timer"], + "visibility": paramgram["visibility"], + "comment": paramgram["comment"], + "proxy": paramgram["explicit-proxy"], + "name": paramgram["name"] + } + + if datagram["proxy"] == "disable": + ####################################### + # object-type = "TCP/UDP/SCTP" + ####################################### + if paramgram["custom_type"] == "tcp_udp_sctp": + datagram["protocol"] = "TCP/UDP/SCTP" + # PROCESS PORT RANGES TO PUT INTO THE PROPER SYNTAX + if paramgram["tcp-portrange"] is not None: + tcp_list = [] + for tcp in paramgram["tcp-portrange"].split(","): + tcp = tcp.strip() + tcp_list.append(tcp) + datagram["tcp-portrange"] = tcp_list + + if paramgram["udp-portrange"] is not None: + udp_list = [] + for udp in paramgram["udp-portrange"].split(","): + udp = udp.strip() + udp_list.append(udp) + datagram["udp-portrange"] = udp_list + + if paramgram["sctp-portrange"] is not None: + sctp_list = [] + for sctp in paramgram["sctp-portrange"].split(","): + sctp = sctp.strip() + sctp_list.append(sctp) + datagram["sctp-portrange"] = sctp_list + + ####################################### + # object-type = "ICMP" + ####################################### + if paramgram["custom_type"] == "icmp": + datagram["icmpcode"] = paramgram["icmp_code"] + datagram["icmptype"] = paramgram["icmp_type"] + datagram["protocol"] = "ICMP" + + ####################################### + # object-type = "ICMP6" + ####################################### + if paramgram["custom_type"] == "icmp6": + datagram["icmpcode"] = paramgram["icmp_code"] + datagram["icmptype"] = paramgram["icmp_type"] + datagram["protocol"] = "ICMP6" + + ####################################### + # object-type = "IP" + ####################################### + if paramgram["custom_type"] == "ip": + datagram["protocol"] = "IP" + datagram["protocol-number"] = paramgram["protocol-number"] + + ####################################### + # object-type in any of the explicit proxy options + ####################################### + if datagram["proxy"] == "enable": + datagram["protocol"] = paramgram["custom_type"].upper() + datagram["iprange"] = paramgram["iprange"] + + # PROCESS PROXY TCP PORT RANGES TO PUT INTO THE PROPER SYNTAX + if paramgram["tcp-portrange"] is not None: + tcp_list = [] + for tcp in paramgram["tcp-portrange"].split(","): + tcp = tcp.strip() + tcp_list.append(tcp) + datagram["tcp-portrange"] = tcp_list + + if paramgram["mode"] == "delete": + datagram = { + "name": paramgram["name"] + } + # SET DELETE URL + url = '/pm/config/adom/{adom}/obj/firewall/service/custom' \ + '/{name}'.format(adom=paramgram["adom"], name=paramgram["name"]) + + datagram = scrub_dict(datagram) + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def fmgr_fwobj_service_group(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + response = DEFAULT_RESULT_OBJ + if paramgram["mode"] in ['set', 'add']: + url = '/pm/config/adom/{adom}/obj/firewall/service/group'.format(adom=paramgram["adom"]) + datagram = { + "name": paramgram["group-name"], + "comment": paramgram["comment"], + "proxy": paramgram["explicit-proxy"], + "color": paramgram["color"] + } + + members = paramgram["group-member"] + member = [] + for obj in members.split(","): + member.append(obj.strip()) + datagram["member"] = member + + if paramgram["mode"] == "delete": + datagram = { + "name": paramgram["name"] + } + # SET DELETE URL + url = '/pm/config/adom/{adom}/obj/firewall/service/group' \ + '/{name}'.format(adom=paramgram["adom"], name=paramgram["group-name"]) + + datagram = scrub_dict(datagram) + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def fmgr_fwobj_service_category(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + response = DEFAULT_RESULT_OBJ + if paramgram["mode"] in ['set', 'add']: + url = '/pm/config/adom/{adom}/obj/firewall/service/category'.format(adom=paramgram["adom"]) + # GET RID OF ANY WHITESPACE + category = paramgram["category"] + category = category.strip() + + datagram = { + "name": paramgram["category"], + "comment": "Created by Ansible" + } + + # IF MODE = DELETE + if paramgram["mode"] == "delete": + datagram = { + "name": paramgram["name"] + } + # SET DELETE URL + url = '/pm/config/adom/{adom}/obj/firewall/service/category' \ + '/{name}'.format(adom=paramgram["adom"], name=paramgram["category"]) + + datagram = scrub_dict(datagram) + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + mode=dict(required=False, type="str", choices=['add', 'set', 'delete'], default="add"), + app_category=dict(required=False, type="str"), + app_service_type=dict(required=False, type="str"), + application=dict(required=False, type="str"), + category=dict(required=False, type="str"), + check_reset_range=dict(required=False, type="str"), + color=dict(required=False, type="int", default=22), + comment=dict(required=False, type="str"), + custom_type=dict(required=False, type="str", choices=['tcp_udp_sctp', 'icmp', 'icmp6', 'ip', 'http', 'ftp', + 'connect', 'socks_tcp', 'socks_udp', 'all'], + default="all"), + explicit_proxy=dict(required=False, type="str", choices=['enable', 'disable'], default="disable"), + fqdn=dict(required=False, type="str", default=""), + group_name=dict(required=False, type="str"), + group_member=dict(required=False, type="str"), + icmp_code=dict(required=False, type="int"), + icmp_type=dict(required=False, type="int"), + iprange=dict(required=False, type="str", default="0.0.0.0"), + name=dict(required=False, type="str"), + protocol=dict(required=False, type="str"), + protocol_number=dict(required=False, type="int"), + sctp_portrange=dict(required=False, type="str"), + session_ttl=dict(required=False, type="int", default=0), + object_type=dict(required=False, type="str", choices=['custom', 'group', 'category']), + tcp_halfclose_timer=dict(required=False, type="int", default=0), + tcp_halfopen_timer=dict(required=False, type="int", default=0), + tcp_portrange=dict(required=False, type="str"), + tcp_timewait_timer=dict(required=False, type="int", default=0), + udp_idle_timer=dict(required=False, type="int", default=0), + udp_portrange=dict(required=False, type="str"), + visibility=dict(required=False, type="str", default="enable", choices=["enable", "disable"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE DATAGRAM + paramgram = { + "adom": module.params["adom"], + "app-category": module.params["app_category"], + "app-service-type": module.params["app_service_type"], + "application": module.params["application"], + "category": module.params["category"], + "check-reset-range": module.params["check_reset_range"], + "color": module.params["color"], + "comment": module.params["comment"], + "custom_type": module.params["custom_type"], + "explicit-proxy": module.params["explicit_proxy"], + "fqdn": module.params["fqdn"], + "group-name": module.params["group_name"], + "group-member": module.params["group_member"], + "icmp_code": module.params["icmp_code"], + "icmp_type": module.params["icmp_type"], + "iprange": module.params["iprange"], + "name": module.params["name"], + "mode": module.params["mode"], + "protocol": module.params["protocol"], + "protocol-number": module.params["protocol_number"], + "sctp-portrange": module.params["sctp_portrange"], + "object_type": module.params["object_type"], + "session-ttl": module.params["session_ttl"], + "tcp-halfclose-timer": module.params["tcp_halfclose_timer"], + "tcp-halfopen-timer": module.params["tcp_halfopen_timer"], + "tcp-portrange": module.params["tcp_portrange"], + "tcp-timewait-timer": module.params["tcp_timewait_timer"], + "udp-idle-timer": module.params["udp_idle_timer"], + "udp-portrange": module.params["udp_portrange"], + "visibility": module.params["visibility"], + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + results = DEFAULT_RESULT_OBJ + + try: + # CHECK FOR CATEGORIES TO ADD + # THIS IS ONLY WHEN OBJECT_TYPE ISN'T SPECIFICALLY ADDING A CATEGORY! + # WE NEED TO ADD THE CATEGORY BEFORE ADDING THE OBJECT + # IF ANY category ARE DEFINED AND MODE IS ADD OR SET LETS ADD THOSE + # THIS IS A "BLIND ADD" AND THE EXIT CODE FOR OBJECT ALREADY EXISTS IS TREATED AS A PASS + if paramgram["category"] is not None and paramgram["mode"] in ['add', 'set'] \ + and paramgram["object_type"] != "category": + category_add = fmgr_fwobj_service_category(fmgr, paramgram) + fmgr.govern_response(module=module, results=category_add, + ansible_facts=fmgr.construct_ansible_facts(category_add, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF OBJECT_TYPE IS CATEGORY... + if paramgram["object_type"] == 'category': + results = fmgr_fwobj_service_category(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -2, -3], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF OBJECT_TYPE IS CUSTOM... + if paramgram["object_type"] == 'custom': + results = fmgr_fwobj_service_custom(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -2, -3], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF OBJECT_TYPE IS GROUP... + if paramgram["object_type"] == 'group': + results = fmgr_fwobj_service_group(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, good_codes=[0, -2, -3], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_fwobj_vip.py b/plugins/modules/network/fortimanager/fmgr_fwobj_vip.py new file mode 100644 index 0000000000..bf20660062 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_fwobj_vip.py @@ -0,0 +1,2428 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_fwobj_vip +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manages Virtual IPs objects in FortiManager +description: + - Manages Virtual IP objects in FortiManager for IPv4 + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + websphere_server: + description: + - Enable to add an HTTP header to indicate SSL offloading for a WebSphere server. + - choice | disable | Do not add HTTP header indicating SSL offload for WebSphere server. + - choice | enable | Add HTTP header indicating SSL offload for WebSphere server. + required: false + choices: ["disable", "enable"] + + weblogic_server: + description: + - Enable to add an HTTP header to indicate SSL offloading for a WebLogic server. + - choice | disable | Do not add HTTP header indicating SSL offload for WebLogic server. + - choice | enable | Add HTTP header indicating SSL offload for WebLogic server. + required: false + choices: ["disable", "enable"] + + type: + description: + - Configure a static NAT, load balance, server load balance, DNS translation, or FQDN VIP. + - choice | static-nat | Static NAT. + - choice | load-balance | Load balance. + - choice | server-load-balance | Server load balance. + - choice | dns-translation | DNS translation. + - choice | fqdn | FQDN Translation + required: false + choices: ["static-nat", "load-balance", "server-load-balance", "dns-translation", "fqdn"] + + ssl_server_session_state_type: + description: + - How to expire SSL sessions for the segment of the SSL connection between the server and the FortiGate. + - choice | disable | Do not keep session states. + - choice | time | Expire session states after this many minutes. + - choice | count | Expire session states when this maximum is reached. + - choice | both | Expire session states based on time or count, whichever occurs first. + required: false + choices: ["disable", "time", "count", "both"] + + ssl_server_session_state_timeout: + description: + - Number of minutes to keep FortiGate to Server SSL session state. + required: false + + ssl_server_session_state_max: + description: + - Maximum number of FortiGate to Server SSL session states to keep. + required: false + + ssl_server_min_version: + description: + - Lowest SSL/TLS version acceptable from a server. Use the client setting by default. + - choice | ssl-3.0 | SSL 3.0. + - choice | tls-1.0 | TLS 1.0. + - choice | tls-1.1 | TLS 1.1. + - choice | tls-1.2 | TLS 1.2. + - choice | client | Use same value as client configuration. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"] + + ssl_server_max_version: + description: + - Highest SSL/TLS version acceptable from a server. Use the client setting by default. + - choice | ssl-3.0 | SSL 3.0. + - choice | tls-1.0 | TLS 1.0. + - choice | tls-1.1 | TLS 1.1. + - choice | tls-1.2 | TLS 1.2. + - choice | client | Use same value as client configuration. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"] + + ssl_server_algorithm: + description: + - Permitted encryption algorithms for the server side of SSL full mode sessions according to encryption strength + - choice | high | High encryption. Allow only AES and ChaCha. + - choice | low | Low encryption. Allow AES, ChaCha, 3DES, RC4, and DES. + - choice | medium | Medium encryption. Allow AES, ChaCha, 3DES, and RC4. + - choice | custom | Custom encryption. Use ssl-server-cipher-suites to select the cipher suites that are allowed. + - choice | client | Use the same encryption algorithms for both client and server sessions. + required: false + choices: ["high", "low", "medium", "custom", "client"] + + ssl_send_empty_frags: + description: + - Enable/disable sending empty fragments to avoid CBC IV attacks (SSL 3.0 & TLS 1.0 only). + - choice | disable | Do not send empty fragments. + - choice | enable | Send empty fragments. + required: false + choices: ["disable", "enable"] + + ssl_pfs: + description: + - Select the cipher suites that can be used for SSL perfect forward secrecy (PFS). + - choice | require | Allow only Diffie-Hellman cipher-suites, so PFS is applied. + - choice | deny | Allow only non-Diffie-Hellman cipher-suites, so PFS is not applied. + - choice | allow | Allow use of any cipher suite so PFS may or may not be used depending on the cipher suite + required: false + choices: ["require", "deny", "allow"] + + ssl_mode: + description: + - Apply SSL offloading mode + - choice | half | Client to FortiGate SSL. + - choice | full | Client to FortiGate and FortiGate to Server SSL. + required: false + choices: ["half", "full"] + + ssl_min_version: + description: + - Lowest SSL/TLS version acceptable from a client. + - choice | ssl-3.0 | SSL 3.0. + - choice | tls-1.0 | TLS 1.0. + - choice | tls-1.1 | TLS 1.1. + - choice | tls-1.2 | TLS 1.2. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + ssl_max_version: + description: + - Highest SSL/TLS version acceptable from a client. + - choice | ssl-3.0 | SSL 3.0. + - choice | tls-1.0 | TLS 1.0. + - choice | tls-1.1 | TLS 1.1. + - choice | tls-1.2 | TLS 1.2. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + ssl_http_match_host: + description: + - Enable/disable HTTP host matching for location conversion. + - choice | disable | Do not match HTTP host. + - choice | enable | Match HTTP host in response header. + required: false + choices: ["disable", "enable"] + + ssl_http_location_conversion: + description: + - Enable to replace HTTP with HTTPS in the reply's Location HTTP header field. + - choice | disable | Disable HTTP location conversion. + - choice | enable | Enable HTTP location conversion. + required: false + choices: ["disable", "enable"] + + ssl_hsts_include_subdomains: + description: + - Indicate that HSTS header applies to all subdomains. + - choice | disable | HSTS header does not apply to subdomains. + - choice | enable | HSTS header applies to subdomains. + required: false + choices: ["disable", "enable"] + + ssl_hsts_age: + description: + - Number of seconds the client should honour the HSTS setting. + required: false + + ssl_hsts: + description: + - Enable/disable including HSTS header in response. + - choice | disable | Do not add a HSTS header to each a HTTP response. + - choice | enable | Add a HSTS header to each HTTP response. + required: false + choices: ["disable", "enable"] + + ssl_hpkp_report_uri: + description: + - URL to report HPKP violations to. + required: false + + ssl_hpkp_primary: + description: + - Certificate to generate primary HPKP pin from. + required: false + + ssl_hpkp_include_subdomains: + description: + - Indicate that HPKP header applies to all subdomains. + - choice | disable | HPKP header does not apply to subdomains. + - choice | enable | HPKP header applies to subdomains. + required: false + choices: ["disable", "enable"] + + ssl_hpkp_backup: + description: + - Certificate to generate backup HPKP pin from. + required: false + + ssl_hpkp_age: + description: + - Number of seconds the client should honour the HPKP setting. + required: false + + ssl_hpkp: + description: + - Enable/disable including HPKP header in response. + - choice | disable | Do not add a HPKP header to each HTTP response. + - choice | enable | Add a HPKP header to each a HTTP response. + - choice | report-only | Add a HPKP Report-Only header to each HTTP response. + required: false + choices: ["disable", "enable", "report-only"] + + ssl_dh_bits: + description: + - Number of bits to use in the Diffie-Hellman exchange for RSA encryption of SSL sessions. + - choice | 768 | 768-bit Diffie-Hellman prime. + - choice | 1024 | 1024-bit Diffie-Hellman prime. + - choice | 1536 | 1536-bit Diffie-Hellman prime. + - choice | 2048 | 2048-bit Diffie-Hellman prime. + - choice | 3072 | 3072-bit Diffie-Hellman prime. + - choice | 4096 | 4096-bit Diffie-Hellman prime. + required: false + choices: ["768", "1024", "1536", "2048", "3072", "4096"] + + ssl_client_session_state_type: + description: + - How to expire SSL sessions for the segment of the SSL connection between the client and the FortiGate. + - choice | disable | Do not keep session states. + - choice | time | Expire session states after this many minutes. + - choice | count | Expire session states when this maximum is reached. + - choice | both | Expire session states based on time or count, whichever occurs first. + required: false + choices: ["disable", "time", "count", "both"] + + ssl_client_session_state_timeout: + description: + - Number of minutes to keep client to FortiGate SSL session state. + required: false + + ssl_client_session_state_max: + description: + - Maximum number of client to FortiGate SSL session states to keep. + required: false + + ssl_client_renegotiation: + description: + - Allow, deny, or require secure renegotiation of client sessions to comply with RFC 5746. + - choice | deny | Abort any client initiated SSL re-negotiation attempt. + - choice | allow | Allow a SSL client to renegotiate. + - choice | secure | Abort any client initiated SSL re-negotiation attempt that does not use RFC 5746. + required: false + choices: ["deny", "allow", "secure"] + + ssl_client_fallback: + description: + - Enable/disable support for preventing Downgrade Attacks on client connections (RFC 7507). + - choice | disable | Disable. + - choice | enable | Enable. + required: false + choices: ["disable", "enable"] + + ssl_certificate: + description: + - The name of the SSL certificate to use for SSL acceleration. + required: false + + ssl_algorithm: + description: + - Permitted encryption algorithms for SSL sessions according to encryption strength. + - choice | high | High encryption. Allow only AES and ChaCha. + - choice | medium | Medium encryption. Allow AES, ChaCha, 3DES, and RC4. + - choice | low | Low encryption. Allow AES, ChaCha, 3DES, RC4, and DES. + - choice | custom | Custom encryption. Use config ssl-cipher-suites to select the cipher suites that are allowed. + required: false + choices: ["high", "medium", "low", "custom"] + + srcintf_filter: + description: + - Interfaces to which the VIP applies. Separate the names with spaces. + required: false + + src_filter: + description: + - Source address filter. Each address must be either an IP/subnet (x.x.x.x/n) or a range (x.x.x.x-y.y.y.y). + - Separate addresses with spaces. + required: false + + service: + description: + - Service name. + required: false + + server_type: + description: + - Protocol to be load balanced by the virtual server (also called the server load balance virtual IP). + - choice | http | HTTP + - choice | https | HTTPS + - choice | ssl | SSL + - choice | tcp | TCP + - choice | udp | UDP + - choice | ip | IP + - choice | imaps | IMAPS + - choice | pop3s | POP3S + - choice | smtps | SMTPS + required: false + choices: ["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s", "smtps"] + + protocol: + description: + - Protocol to use when forwarding packets. + - choice | tcp | TCP. + - choice | udp | UDP. + - choice | sctp | SCTP. + - choice | icmp | ICMP. + required: false + choices: ["tcp", "udp", "sctp", "icmp"] + + portmapping_type: + description: + - Port mapping type. + - choice | 1-to-1 | One to one. + - choice | m-to-n | Many to many. + required: false + choices: ["1-to-1", "m-to-n"] + + portforward: + description: + - Enable/disable port forwarding. + - choice | disable | Disable port forward. + - choice | enable | Enable port forward. + required: false + choices: ["disable", "enable"] + + persistence: + description: + - Configure how to make sure that clients connect to the same server every time they make a request that is part + - of the same session. + - choice | none | None. + - choice | http-cookie | HTTP cookie. + - choice | ssl-session-id | SSL session ID. + required: false + choices: ["none", "http-cookie", "ssl-session-id"] + + outlook_web_access: + description: + - Enable to add the Front-End-Https header for Microsoft Outlook Web Access. + - choice | disable | Disable Outlook Web Access support. + - choice | enable | Enable Outlook Web Access support. + required: false + choices: ["disable", "enable"] + + nat_source_vip: + description: + - Enable to prevent unintended servers from using a virtual IP. + - Disable to use the actual IP address of the server as the source address. + - choice | disable | Do not force to NAT as VIP. + - choice | enable | Force to NAT as VIP. + required: false + choices: ["disable", "enable"] + + name: + description: + - Virtual IP name. + required: false + + monitor: + description: + - Name of the health check monitor to use when polling to determine a virtual server's connectivity status. + required: false + + max_embryonic_connections: + description: + - Maximum number of incomplete connections. + required: false + + mappedport: + description: + - Port number range on the destination network to which the external port number range is mapped. + required: false + + mappedip: + description: + - IP address or address range on the destination network to which the external IP address is mapped. + required: false + + mapped_addr: + description: + - Mapped FQDN address name. + required: false + + ldb_method: + description: + - Method used to distribute sessions to real servers. + - choice | static | Distribute to server based on source IP. + - choice | round-robin | Distribute to server based round robin order. + - choice | weighted | Distribute to server based on weight. + - choice | least-session | Distribute to server with lowest session count. + - choice | least-rtt | Distribute to server with lowest Round-Trip-Time. + - choice | first-alive | Distribute to the first server that is alive. + - choice | http-host | Distribute to server based on host field in HTTP header. + required: false + choices: ["static", "round-robin", "weighted", "least-session", "least-rtt", "first-alive", "http-host"] + + https_cookie_secure: + description: + - Enable/disable verification that inserted HTTPS cookies are secure. + - choice | disable | Do not mark cookie as secure, allow sharing between an HTTP and HTTPS connection. + - choice | enable | Mark inserted cookie as secure, cookie can only be used for HTTPS a connection. + required: false + choices: ["disable", "enable"] + + http_multiplex: + description: + - Enable/disable HTTP multiplexing. + - choice | disable | Disable HTTP session multiplexing. + - choice | enable | Enable HTTP session multiplexing. + required: false + choices: ["disable", "enable"] + + http_ip_header_name: + description: + - For HTTP multiplexing, enter a custom HTTPS header name. The orig client IP address is added to this header. + - If empty, X-Forwarded-For is used. + required: false + + http_ip_header: + description: + - For HTTP multiplexing, enable to add the original client IP address in the XForwarded-For HTTP header. + - choice | disable | Disable adding HTTP header. + - choice | enable | Enable adding HTTP header. + required: false + choices: ["disable", "enable"] + + http_cookie_share: + description: + - Control sharing of cookies across virtual servers. same-ip means a cookie from one virtual server can be used + - by another. Disable stops cookie sharing. + - choice | disable | Only allow HTTP cookie to match this virtual server. + - choice | same-ip | Allow HTTP cookie to match any virtual server with same IP. + required: false + choices: ["disable", "same-ip"] + + http_cookie_path: + description: + - Limit HTTP cookie persistence to the specified path. + required: false + + http_cookie_generation: + description: + - Generation of HTTP cookie to be accepted. Changing invalidates all existing cookies. + required: false + + http_cookie_domain_from_host: + description: + - Enable/disable use of HTTP cookie domain from host field in HTTP. + - choice | disable | Disable use of HTTP cookie domain from host field in HTTP (use http-cooke-domain setting). + - choice | enable | Enable use of HTTP cookie domain from host field in HTTP. + required: false + choices: ["disable", "enable"] + + http_cookie_domain: + description: + - Domain that HTTP cookie persistence should apply to. + required: false + + http_cookie_age: + description: + - Time in minutes that client web browsers should keep a cookie. Default is 60 seconds. 0 = no time limit. + required: false + + gratuitous_arp_interval: + description: + - Enable to have the VIP send gratuitous ARPs. 0=disabled. Set from 5 up to 8640000 seconds to enable. + required: false + + extport: + description: + - Incoming port number range that you want to map to a port number range on the destination network. + required: false + + extip: + description: + - IP address or address range on the external interface that you want to map to an address or address range on t + - he destination network. + required: false + + extintf: + description: + - Interface connected to the source network that receives the packets that will be forwarded to the destination + - network. + required: false + + extaddr: + description: + - External FQDN address name. + required: false + + dns_mapping_ttl: + description: + - DNS mapping TTL (Set to zero to use TTL in DNS response, default = 0). + required: false + + comment: + description: + - Comment. + required: false + + color: + description: + - Color of icon on the GUI. + required: false + + arp_reply: + description: + - Enable to respond to ARP requests for this virtual IP address. Enabled by default. + - choice | disable | Disable ARP reply. + - choice | enable | Enable ARP reply. + required: false + choices: ["disable", "enable"] + + dynamic_mapping: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + dynamic_mapping_arp_reply: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_color: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_comment: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_dns_mapping_ttl: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_extaddr: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_extintf: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_extip: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_extport: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_gratuitous_arp_interval: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_http_cookie_age: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_http_cookie_domain: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_http_cookie_domain_from_host: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_http_cookie_generation: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_http_cookie_path: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_http_cookie_share: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | same-ip | + required: false + choices: ["disable", "same-ip"] + + dynamic_mapping_http_ip_header: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_http_ip_header_name: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_http_multiplex: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_https_cookie_secure: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ldb_method: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | static | + - choice | round-robin | + - choice | weighted | + - choice | least-session | + - choice | least-rtt | + - choice | first-alive | + - choice | http-host | + required: false + choices: ["static", "round-robin", "weighted", "least-session", "least-rtt", "first-alive", "http-host"] + + dynamic_mapping_mapped_addr: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_mappedip: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_mappedport: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_max_embryonic_connections: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_monitor: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_nat_source_vip: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_outlook_web_access: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_persistence: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | none | + - choice | http-cookie | + - choice | ssl-session-id | + required: false + choices: ["none", "http-cookie", "ssl-session-id"] + + dynamic_mapping_portforward: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_portmapping_type: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | 1-to-1 | + - choice | m-to-n | + required: false + choices: ["1-to-1", "m-to-n"] + + dynamic_mapping_protocol: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | tcp | + - choice | udp | + - choice | sctp | + - choice | icmp | + required: false + choices: ["tcp", "udp", "sctp", "icmp"] + + dynamic_mapping_server_type: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | http | + - choice | https | + - choice | ssl | + - choice | tcp | + - choice | udp | + - choice | ip | + - choice | imaps | + - choice | pop3s | + - choice | smtps | + required: false + choices: ["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s", "smtps"] + + dynamic_mapping_service: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_src_filter: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_srcintf_filter: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_algorithm: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | high | + - choice | medium | + - choice | low | + - choice | custom | + required: false + choices: ["high", "medium", "low", "custom"] + + dynamic_mapping_ssl_certificate: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_client_fallback: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ssl_client_renegotiation: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | deny | + - choice | allow | + - choice | secure | + required: false + choices: ["deny", "allow", "secure"] + + dynamic_mapping_ssl_client_session_state_max: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_client_session_state_timeout: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_client_session_state_type: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | time | + - choice | count | + - choice | both | + required: false + choices: ["disable", "time", "count", "both"] + + dynamic_mapping_ssl_dh_bits: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | 768 | + - choice | 1024 | + - choice | 1536 | + - choice | 2048 | + - choice | 3072 | + - choice | 4096 | + required: false + choices: ["768", "1024", "1536", "2048", "3072", "4096"] + + dynamic_mapping_ssl_hpkp: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + - choice | report-only | + required: false + choices: ["disable", "enable", "report-only"] + + dynamic_mapping_ssl_hpkp_age: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_hpkp_backup: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_hpkp_include_subdomains: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ssl_hpkp_primary: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_hpkp_report_uri: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_hsts: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ssl_hsts_age: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_hsts_include_subdomains: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ssl_http_location_conversion: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ssl_http_match_host: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ssl_max_version: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | ssl-3.0 | + - choice | tls-1.0 | + - choice | tls-1.1 | + - choice | tls-1.2 | + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + dynamic_mapping_ssl_min_version: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | ssl-3.0 | + - choice | tls-1.0 | + - choice | tls-1.1 | + - choice | tls-1.2 | + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + dynamic_mapping_ssl_mode: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | half | + - choice | full | + required: false + choices: ["half", "full"] + + dynamic_mapping_ssl_pfs: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | require | + - choice | deny | + - choice | allow | + required: false + choices: ["require", "deny", "allow"] + + dynamic_mapping_ssl_send_empty_frags: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_ssl_server_algorithm: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | high | + - choice | low | + - choice | medium | + - choice | custom | + - choice | client | + required: false + choices: ["high", "low", "medium", "custom", "client"] + + dynamic_mapping_ssl_server_max_version: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | ssl-3.0 | + - choice | tls-1.0 | + - choice | tls-1.1 | + - choice | tls-1.2 | + - choice | client | + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"] + + dynamic_mapping_ssl_server_min_version: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | ssl-3.0 | + - choice | tls-1.0 | + - choice | tls-1.1 | + - choice | tls-1.2 | + - choice | client | + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"] + + dynamic_mapping_ssl_server_session_state_max: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_server_session_state_timeout: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_server_session_state_type: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | time | + - choice | count | + - choice | both | + required: false + choices: ["disable", "time", "count", "both"] + + dynamic_mapping_type: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | static-nat | + - choice | load-balance | + - choice | server-load-balance | + - choice | dns-translation | + - choice | fqdn | + required: false + choices: ["static-nat", "load-balance", "server-load-balance", "dns-translation", "fqdn"] + + dynamic_mapping_weblogic_server: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_websphere_server: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + required: false + choices: ["disable", "enable"] + + dynamic_mapping_realservers_client_ip: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_healthcheck: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | disable | + - choice | enable | + - choice | vip | + required: false + choices: ["disable", "enable", "vip"] + + dynamic_mapping_realservers_holddown_interval: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_http_host: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_ip: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_max_connections: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_monitor: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_port: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_seq: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_realservers_status: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | active | + - choice | standby | + - choice | disable | + required: false + choices: ["active", "standby", "disable"] + + dynamic_mapping_realservers_weight: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + required: false + + dynamic_mapping_ssl_cipher_suites_cipher: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - choice | TLS-RSA-WITH-RC4-128-MD5 | + - choice | TLS-RSA-WITH-RC4-128-SHA | + - choice | TLS-RSA-WITH-DES-CBC-SHA | + - choice | TLS-RSA-WITH-3DES-EDE-CBC-SHA | + - choice | TLS-RSA-WITH-AES-128-CBC-SHA | + - choice | TLS-RSA-WITH-AES-256-CBC-SHA | + - choice | TLS-RSA-WITH-AES-128-CBC-SHA256 | + - choice | TLS-RSA-WITH-AES-256-CBC-SHA256 | + - choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA | + - choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA | + - choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256 | + - choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256 | + - choice | TLS-RSA-WITH-SEED-CBC-SHA | + - choice | TLS-RSA-WITH-ARIA-128-CBC-SHA256 | + - choice | TLS-RSA-WITH-ARIA-256-CBC-SHA384 | + - choice | TLS-DHE-RSA-WITH-DES-CBC-SHA | + - choice | TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA | + - choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA | + - choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA | + - choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA256 | + - choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA256 | + - choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA | + - choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA | + - choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 | + - choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256 | + - choice | TLS-DHE-RSA-WITH-SEED-CBC-SHA | + - choice | TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256 | + - choice | TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384 | + - choice | TLS-ECDHE-RSA-WITH-RC4-128-SHA | + - choice | TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA | + - choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA | + - choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA | + - choice | TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | + - choice | TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 | + - choice | TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | + - choice | TLS-DHE-RSA-WITH-AES-128-GCM-SHA256 | + - choice | TLS-DHE-RSA-WITH-AES-256-GCM-SHA384 | + - choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA | + - choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA | + - choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA256 | + - choice | TLS-DHE-DSS-WITH-AES-128-GCM-SHA256 | + - choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA256 | + - choice | TLS-DHE-DSS-WITH-AES-256-GCM-SHA384 | + - choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256 | + - choice | TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 | + - choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384 | + - choice | TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384 | + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA | + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 | + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 | + - choice | TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384 | + - choice | TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384 | + - choice | TLS-RSA-WITH-AES-128-GCM-SHA256 | + - choice | TLS-RSA-WITH-AES-256-GCM-SHA384 | + - choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA | + - choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA | + - choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256 | + - choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256 | + - choice | TLS-DHE-DSS-WITH-SEED-CBC-SHA | + - choice | TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256 | + - choice | TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384 | + - choice | TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256 | + - choice | TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384 | + - choice | TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256 | + - choice | TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384 | + - choice | TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA | + - choice | TLS-DHE-DSS-WITH-DES-CBC-SHA | + required: false + choices: ["TLS-RSA-WITH-RC4-128-MD5", + "TLS-RSA-WITH-RC4-128-SHA", + "TLS-RSA-WITH-DES-CBC-SHA", + "TLS-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA", + "TLS-RSA-WITH-AES-256-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA256", + "TLS-RSA-WITH-AES-256-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-RSA-WITH-SEED-CBC-SHA", + "TLS-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-RSA-WITH-DES-CBC-SHA", + "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-SEED-CBC-SHA", + "TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-RC4-128-SHA", + "TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-128-GCM-SHA256", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384", + "TLS-RSA-WITH-AES-128-GCM-SHA256", + "TLS-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-SEED-CBC-SHA", + "TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-DSS-WITH-DES-CBC-SHA"] + + dynamic_mapping_ssl_cipher_suites_versions: + description: + - Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent. + - FLAG Based Options. Specify multiple in list form. + - flag | ssl-3.0 | + - flag | tls-1.0 | + - flag | tls-1.1 | + - flag | tls-1.2 | + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + realservers: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + realservers_client_ip: + description: + - Only clients in this IP range can connect to this real server. + required: false + + realservers_healthcheck: + description: + - Enable to check the responsiveness of the real server before forwarding traffic. + - choice | disable | Disable per server health check. + - choice | enable | Enable per server health check. + - choice | vip | Use health check defined in VIP. + required: false + choices: ["disable", "enable", "vip"] + + realservers_holddown_interval: + description: + - Time in seconds that the health check monitor monitors an unresponsive server that should be active. + required: false + + realservers_http_host: + description: + - HTTP server domain name in HTTP header. + required: false + + realservers_ip: + description: + - IP address of the real server. + required: false + + realservers_max_connections: + description: + - Max number of active connections that can be directed to the real server. When reached, sessions are sent to + - their real servers. + required: false + + realservers_monitor: + description: + - Name of the health check monitor to use when polling to determine a virtual server's connectivity status. + required: false + + realservers_port: + description: + - Port for communicating with the real server. Required if port forwarding is enabled. + required: false + + realservers_seq: + description: + - Real Server Sequence Number + required: false + + realservers_status: + description: + - Set the status of the real server to active so that it can accept traffic. + - Or on standby or disabled so no traffic is sent. + - choice | active | Server status active. + - choice | standby | Server status standby. + - choice | disable | Server status disable. + required: false + choices: ["active", "standby", "disable"] + + realservers_weight: + description: + - Weight of the real server. If weighted load balancing is enabled, the server with the highest weight gets more + - connections. + required: false + + ssl_cipher_suites: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ssl_cipher_suites_cipher: + description: + - Cipher suite name. + - choice | TLS-RSA-WITH-RC4-128-MD5 | Cipher suite TLS-RSA-WITH-RC4-128-MD5. + - choice | TLS-RSA-WITH-RC4-128-SHA | Cipher suite TLS-RSA-WITH-RC4-128-SHA. + - choice | TLS-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-RSA-WITH-DES-CBC-SHA. + - choice | TLS-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-RSA-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA. + - choice | TLS-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA. + - choice | TLS-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA256. + - choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA. + - choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA. + - choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256. + - choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256. + - choice | TLS-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-RSA-WITH-SEED-CBC-SHA. + - choice | TLS-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-RSA-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-DHE-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-DES-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-SEED-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-ECDHE-RSA-WITH-RC4-128-SHA | Cipher suite TLS-ECDHE-RSA-WITH-RC4-128-SHA. + - choice | TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA. + - choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA. + - choice | TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256. + - choice | TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256. + - choice | TLS-DHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-DHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-RSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-GCM-SHA256. + - choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-DSS-WITH-AES-256-GCM-SHA384. + - choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384. + - choice | TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA. + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384. + - choice | TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-RSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DSS-RSA-WITH-CAMELLIA-128-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-SEED-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC_SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC_SHA384. + - choice | TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-DES-CBC-SHA. + required: false + choices: ["TLS-RSA-WITH-RC4-128-MD5", + "TLS-RSA-WITH-RC4-128-SHA", + "TLS-RSA-WITH-DES-CBC-SHA", + "TLS-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA", + "TLS-RSA-WITH-AES-256-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA256", + "TLS-RSA-WITH-AES-256-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-RSA-WITH-SEED-CBC-SHA", + "TLS-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-RSA-WITH-DES-CBC-SHA", + "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-SEED-CBC-SHA", + "TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-RC4-128-SHA", + "TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-128-GCM-SHA256", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384", + "TLS-RSA-WITH-AES-128-GCM-SHA256", + "TLS-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-SEED-CBC-SHA", + "TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-DSS-WITH-DES-CBC-SHA"] + + ssl_cipher_suites_versions: + description: + - SSL/TLS versions that the cipher suite can be used with. + - FLAG Based Options. Specify multiple in list form. + - flag | ssl-3.0 | SSL 3.0. + - flag | tls-1.0 | TLS 1.0. + - flag | tls-1.1 | TLS 1.1. + - flag | tls-1.2 | TLS 1.2. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + ssl_server_cipher_suites: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ssl_server_cipher_suites_cipher: + description: + - Cipher suite name. + - choice | TLS-RSA-WITH-RC4-128-MD5 | Cipher suite TLS-RSA-WITH-RC4-128-MD5. + - choice | TLS-RSA-WITH-RC4-128-SHA | Cipher suite TLS-RSA-WITH-RC4-128-SHA. + - choice | TLS-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-RSA-WITH-DES-CBC-SHA. + - choice | TLS-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-RSA-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA. + - choice | TLS-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA. + - choice | TLS-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA256. + - choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA. + - choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA. + - choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256. + - choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256. + - choice | TLS-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-RSA-WITH-SEED-CBC-SHA. + - choice | TLS-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-RSA-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-DHE-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-DES-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-SEED-CBC-SHA. + - choice | TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-ECDHE-RSA-WITH-RC4-128-SHA | Cipher suite TLS-ECDHE-RSA-WITH-RC4-128-SHA. + - choice | TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA. + - choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA. + - choice | TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 | Suite TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256. + - choice | TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256. + - choice | TLS-DHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-DHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-RSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-GCM-SHA256. + - choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-DSS-WITH-AES-256-GCM-SHA384. + - choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384. + - choice | TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA. + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384. + - choice | TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-GCM-SHA256. + - choice | TLS-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-RSA-WITH-AES-256-GCM-SHA384. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DSS-RSA-WITH-CAMELLIA-128-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-SEED-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256. + - choice | TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384. + - choice | TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC_SHA256. + - choice | TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC_SHA384. + - choice | TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA. + - choice | TLS-DHE-DSS-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-DES-CBC-SHA. + required: false + choices: ["TLS-RSA-WITH-RC4-128-MD5", + "TLS-RSA-WITH-RC4-128-SHA", + "TLS-RSA-WITH-DES-CBC-SHA", + "TLS-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA", + "TLS-RSA-WITH-AES-256-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA256", + "TLS-RSA-WITH-AES-256-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-RSA-WITH-SEED-CBC-SHA", + "TLS-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-RSA-WITH-DES-CBC-SHA", + "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-SEED-CBC-SHA", + "TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-RC4-128-SHA", + "TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-128-GCM-SHA256", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384", + "TLS-RSA-WITH-AES-128-GCM-SHA256", + "TLS-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-SEED-CBC-SHA", + "TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-DSS-WITH-DES-CBC-SHA"] + + ssl_server_cipher_suites_priority: + description: + - SSL/TLS cipher suites priority. + required: false + + ssl_server_cipher_suites_versions: + description: + - SSL/TLS versions that the cipher suite can be used with. + - FLAG Based Options. Specify multiple in list form. + - flag | ssl-3.0 | SSL 3.0. + - flag | tls-1.0 | TLS 1.0. + - flag | tls-1.1 | TLS 1.1. + - flag | tls-1.2 | TLS 1.2. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + +''' + +EXAMPLES = ''' +# BASIC FULL STATIC NAT MAPPING +- name: EDIT FMGR_FIREWALL_VIP SNAT + fmgr_fwobj_vip: + name: "Basic StaticNAT Map" + mode: "set" + adom: "ansible" + type: "static-nat" + extip: "82.72.192.185" + extintf: "any" + mappedip: "10.7.220.25" + comment: "Created by Ansible" + color: "17" + +# BASIC PORT PNAT MAPPING +- name: EDIT FMGR_FIREWALL_VIP PNAT + fmgr_fwobj_vip: + name: "Basic PNAT Map Port 10443" + mode: "set" + adom: "ansible" + type: "static-nat" + extip: "82.72.192.185" + extport: "10443" + extintf: "any" + portforward: "enable" + protocol: "tcp" + mappedip: "10.7.220.25" + mappedport: "443" + comment: "Created by Ansible" + color: "17" + +# BASIC DNS TRANSLATION NAT +- name: EDIT FMGR_FIREWALL_DNST + fmgr_fwobj_vip: + name: "Basic DNS Translation" + mode: "set" + adom: "ansible" + type: "dns-translation" + extip: "192.168.0.1-192.168.0.100" + extintf: "dmz" + mappedip: "3.3.3.0/24, 4.0.0.0/24" + comment: "Created by Ansible" + color: "12" + +# BASIC FQDN NAT +- name: EDIT FMGR_FIREWALL_FQDN + fmgr_fwobj_vip: + name: "Basic FQDN Translation" + mode: "set" + adom: "ansible" + type: "fqdn" + mapped_addr: "google-play" + comment: "Created by Ansible" + color: "5" + +# DELETE AN ENTRY +- name: DELETE FMGR_FIREWALL_VIP PNAT + fmgr_fwobj_vip: + name: "Basic PNAT Map Port 10443" + mode: "delete" + adom: "ansible" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +def fmgr_firewall_vip_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/firewall/vip'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/firewall/vip/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + websphere_server=dict(required=False, type="str", choices=["disable", "enable"]), + weblogic_server=dict(required=False, type="str", choices=["disable", "enable"]), + type=dict(required=False, type="str", + choices=["static-nat", "load-balance", "server-load-balance", "dns-translation", "fqdn"]), + ssl_server_session_state_type=dict(required=False, type="str", choices=["disable", "time", "count", "both"]), + ssl_server_session_state_timeout=dict(required=False, type="int"), + ssl_server_session_state_max=dict(required=False, type="int"), + ssl_server_min_version=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]), + ssl_server_max_version=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]), + ssl_server_algorithm=dict(required=False, type="str", choices=["high", "low", "medium", "custom", "client"]), + ssl_send_empty_frags=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_pfs=dict(required=False, type="str", choices=["require", "deny", "allow"]), + ssl_mode=dict(required=False, type="str", choices=["half", "full"]), + ssl_min_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + ssl_max_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + ssl_http_match_host=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_http_location_conversion=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_hsts_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_hsts_age=dict(required=False, type="int"), + ssl_hsts=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_hpkp_report_uri=dict(required=False, type="str"), + ssl_hpkp_primary=dict(required=False, type="str"), + ssl_hpkp_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_hpkp_backup=dict(required=False, type="str"), + ssl_hpkp_age=dict(required=False, type="int"), + ssl_hpkp=dict(required=False, type="str", choices=["disable", "enable", "report-only"]), + ssl_dh_bits=dict(required=False, type="str", choices=["768", "1024", "1536", "2048", "3072", "4096"]), + ssl_client_session_state_type=dict(required=False, type="str", choices=["disable", "time", "count", "both"]), + ssl_client_session_state_timeout=dict(required=False, type="int"), + ssl_client_session_state_max=dict(required=False, type="int"), + ssl_client_renegotiation=dict(required=False, type="str", choices=["deny", "allow", "secure"]), + ssl_client_fallback=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_certificate=dict(required=False, type="str"), + ssl_algorithm=dict(required=False, type="str", choices=["high", "medium", "low", "custom"]), + srcintf_filter=dict(required=False, type="str"), + src_filter=dict(required=False, type="str"), + service=dict(required=False, type="str"), + server_type=dict(required=False, type="str", + choices=["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s", "smtps"]), + protocol=dict(required=False, type="str", choices=["tcp", "udp", "sctp", "icmp"]), + portmapping_type=dict(required=False, type="str", choices=["1-to-1", "m-to-n"]), + portforward=dict(required=False, type="str", choices=["disable", "enable"]), + persistence=dict(required=False, type="str", choices=["none", "http-cookie", "ssl-session-id"]), + outlook_web_access=dict(required=False, type="str", choices=["disable", "enable"]), + nat_source_vip=dict(required=False, type="str", choices=["disable", "enable"]), + name=dict(required=False, type="str"), + monitor=dict(required=False, type="str"), + max_embryonic_connections=dict(required=False, type="int"), + mappedport=dict(required=False, type="str"), + mappedip=dict(required=False, type="str"), + mapped_addr=dict(required=False, type="str"), + ldb_method=dict(required=False, type="str", + choices=["static", "round-robin", "weighted", "least-session", "least-rtt", "first-alive", + "http-host"]), + https_cookie_secure=dict(required=False, type="str", choices=["disable", "enable"]), + http_multiplex=dict(required=False, type="str", choices=["disable", "enable"]), + http_ip_header_name=dict(required=False, type="str"), + http_ip_header=dict(required=False, type="str", choices=["disable", "enable"]), + http_cookie_share=dict(required=False, type="str", choices=["disable", "same-ip"]), + http_cookie_path=dict(required=False, type="str"), + http_cookie_generation=dict(required=False, type="int"), + http_cookie_domain_from_host=dict(required=False, type="str", choices=["disable", "enable"]), + http_cookie_domain=dict(required=False, type="str"), + http_cookie_age=dict(required=False, type="int"), + gratuitous_arp_interval=dict(required=False, type="int"), + extport=dict(required=False, type="str"), + extip=dict(required=False, type="str"), + extintf=dict(required=False, type="str"), + extaddr=dict(required=False, type="str"), + dns_mapping_ttl=dict(required=False, type="int"), + comment=dict(required=False, type="str"), + color=dict(required=False, type="int"), + arp_reply=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping=dict(required=False, type="list"), + dynamic_mapping_arp_reply=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_color=dict(required=False, type="int"), + dynamic_mapping_comment=dict(required=False, type="str"), + dynamic_mapping_dns_mapping_ttl=dict(required=False, type="int"), + dynamic_mapping_extaddr=dict(required=False, type="str"), + dynamic_mapping_extintf=dict(required=False, type="str"), + dynamic_mapping_extip=dict(required=False, type="str"), + dynamic_mapping_extport=dict(required=False, type="str"), + dynamic_mapping_gratuitous_arp_interval=dict(required=False, type="int"), + dynamic_mapping_http_cookie_age=dict(required=False, type="int"), + dynamic_mapping_http_cookie_domain=dict(required=False, type="str"), + dynamic_mapping_http_cookie_domain_from_host=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_http_cookie_generation=dict(required=False, type="int"), + dynamic_mapping_http_cookie_path=dict(required=False, type="str"), + dynamic_mapping_http_cookie_share=dict(required=False, type="str", choices=["disable", "same-ip"]), + dynamic_mapping_http_ip_header=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_http_ip_header_name=dict(required=False, type="str"), + dynamic_mapping_http_multiplex=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_https_cookie_secure=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ldb_method=dict(required=False, type="str", choices=["static", + "round-robin", + "weighted", + "least-session", + "least-rtt", + "first-alive", + "http-host"]), + dynamic_mapping_mapped_addr=dict(required=False, type="str"), + dynamic_mapping_mappedip=dict(required=False, type="str"), + dynamic_mapping_mappedport=dict(required=False, type="str"), + dynamic_mapping_max_embryonic_connections=dict(required=False, type="int"), + dynamic_mapping_monitor=dict(required=False, type="str"), + dynamic_mapping_nat_source_vip=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_outlook_web_access=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_persistence=dict(required=False, type="str", choices=["none", "http-cookie", "ssl-session-id"]), + dynamic_mapping_portforward=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_portmapping_type=dict(required=False, type="str", choices=["1-to-1", "m-to-n"]), + dynamic_mapping_protocol=dict(required=False, type="str", choices=["tcp", "udp", "sctp", "icmp"]), + dynamic_mapping_server_type=dict(required=False, type="str", + choices=["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s", + "smtps"]), + dynamic_mapping_service=dict(required=False, type="str"), + dynamic_mapping_src_filter=dict(required=False, type="str"), + dynamic_mapping_srcintf_filter=dict(required=False, type="str"), + dynamic_mapping_ssl_algorithm=dict(required=False, type="str", choices=["high", "medium", "low", "custom"]), + dynamic_mapping_ssl_certificate=dict(required=False, type="str"), + dynamic_mapping_ssl_client_fallback=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ssl_client_renegotiation=dict(required=False, type="str", choices=["deny", "allow", "secure"]), + dynamic_mapping_ssl_client_session_state_max=dict(required=False, type="int"), + dynamic_mapping_ssl_client_session_state_timeout=dict(required=False, type="int"), + dynamic_mapping_ssl_client_session_state_type=dict(required=False, type="str", + choices=["disable", "time", "count", "both"]), + dynamic_mapping_ssl_dh_bits=dict(required=False, type="str", + choices=["768", "1024", "1536", "2048", "3072", "4096"]), + dynamic_mapping_ssl_hpkp=dict(required=False, type="str", choices=["disable", "enable", "report-only"]), + dynamic_mapping_ssl_hpkp_age=dict(required=False, type="int"), + dynamic_mapping_ssl_hpkp_backup=dict(required=False, type="str"), + dynamic_mapping_ssl_hpkp_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ssl_hpkp_primary=dict(required=False, type="str"), + dynamic_mapping_ssl_hpkp_report_uri=dict(required=False, type="str"), + dynamic_mapping_ssl_hsts=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ssl_hsts_age=dict(required=False, type="int"), + dynamic_mapping_ssl_hsts_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ssl_http_location_conversion=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ssl_http_match_host=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ssl_max_version=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + dynamic_mapping_ssl_min_version=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + dynamic_mapping_ssl_mode=dict(required=False, type="str", choices=["half", "full"]), + dynamic_mapping_ssl_pfs=dict(required=False, type="str", choices=["require", "deny", "allow"]), + dynamic_mapping_ssl_send_empty_frags=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_ssl_server_algorithm=dict(required=False, type="str", + choices=["high", "low", "medium", "custom", "client"]), + dynamic_mapping_ssl_server_max_version=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]), + dynamic_mapping_ssl_server_min_version=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]), + dynamic_mapping_ssl_server_session_state_max=dict(required=False, type="int"), + dynamic_mapping_ssl_server_session_state_timeout=dict(required=False, type="int"), + dynamic_mapping_ssl_server_session_state_type=dict(required=False, type="str", + choices=["disable", "time", "count", "both"]), + dynamic_mapping_type=dict(required=False, type="str", + choices=["static-nat", "load-balance", "server-load-balance", "dns-translation", + "fqdn"]), + dynamic_mapping_weblogic_server=dict(required=False, type="str", choices=["disable", "enable"]), + dynamic_mapping_websphere_server=dict(required=False, type="str", choices=["disable", "enable"]), + + dynamic_mapping_realservers_client_ip=dict(required=False, type="str"), + dynamic_mapping_realservers_healthcheck=dict(required=False, type="str", choices=["disable", "enable", "vip"]), + dynamic_mapping_realservers_holddown_interval=dict(required=False, type="int"), + dynamic_mapping_realservers_http_host=dict(required=False, type="str"), + dynamic_mapping_realservers_ip=dict(required=False, type="str"), + dynamic_mapping_realservers_max_connections=dict(required=False, type="int"), + dynamic_mapping_realservers_monitor=dict(required=False, type="str"), + dynamic_mapping_realservers_port=dict(required=False, type="int"), + dynamic_mapping_realservers_seq=dict(required=False, type="str"), + dynamic_mapping_realservers_status=dict(required=False, type="str", choices=["active", "standby", "disable"]), + dynamic_mapping_realservers_weight=dict(required=False, type="int"), + + dynamic_mapping_ssl_cipher_suites_cipher=dict(required=False, + type="str", + choices=["TLS-RSA-WITH-RC4-128-MD5", + "TLS-RSA-WITH-RC4-128-SHA", + "TLS-RSA-WITH-DES-CBC-SHA", + "TLS-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA", + "TLS-RSA-WITH-AES-256-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA256", + "TLS-RSA-WITH-AES-256-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-RSA-WITH-SEED-CBC-SHA", + "TLS-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-RSA-WITH-DES-CBC-SHA", + "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-SEED-CBC-SHA", + "TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-RC4-128-SHA", + "TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-128-GCM-SHA256", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384", + "TLS-RSA-WITH-AES-128-GCM-SHA256", + "TLS-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-SEED-CBC-SHA", + "TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-DSS-WITH-DES-CBC-SHA"]), + dynamic_mapping_ssl_cipher_suites_versions=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + realservers=dict(required=False, type="list"), + realservers_client_ip=dict(required=False, type="str"), + realservers_healthcheck=dict(required=False, type="str", choices=["disable", "enable", "vip"]), + realservers_holddown_interval=dict(required=False, type="int"), + realservers_http_host=dict(required=False, type="str"), + realservers_ip=dict(required=False, type="str"), + realservers_max_connections=dict(required=False, type="int"), + realservers_monitor=dict(required=False, type="str"), + realservers_port=dict(required=False, type="int"), + realservers_seq=dict(required=False, type="str"), + realservers_status=dict(required=False, type="str", choices=["active", "standby", "disable"]), + realservers_weight=dict(required=False, type="int"), + ssl_cipher_suites=dict(required=False, type="list"), + ssl_cipher_suites_cipher=dict(required=False, + type="str", + choices=["TLS-RSA-WITH-RC4-128-MD5", + "TLS-RSA-WITH-RC4-128-SHA", + "TLS-RSA-WITH-DES-CBC-SHA", + "TLS-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA", + "TLS-RSA-WITH-AES-256-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA256", + "TLS-RSA-WITH-AES-256-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-RSA-WITH-SEED-CBC-SHA", + "TLS-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-RSA-WITH-DES-CBC-SHA", + "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-SEED-CBC-SHA", + "TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-RC4-128-SHA", + "TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-128-GCM-SHA256", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384", + "TLS-RSA-WITH-AES-128-GCM-SHA256", + "TLS-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-SEED-CBC-SHA", + "TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-DSS-WITH-DES-CBC-SHA"]), + ssl_cipher_suites_versions=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + ssl_server_cipher_suites=dict(required=False, type="list"), + ssl_server_cipher_suites_cipher=dict(required=False, + type="str", + choices=["TLS-RSA-WITH-RC4-128-MD5", + "TLS-RSA-WITH-RC4-128-SHA", + "TLS-RSA-WITH-DES-CBC-SHA", + "TLS-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA", + "TLS-RSA-WITH-AES-256-CBC-SHA", + "TLS-RSA-WITH-AES-128-CBC-SHA256", + "TLS-RSA-WITH-AES-256-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-RSA-WITH-SEED-CBC-SHA", + "TLS-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-RSA-WITH-DES-CBC-SHA", + "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-RSA-WITH-SEED-CBC-SHA", + "TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-RC4-128-SHA", + "TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA", + "TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256", + "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA", + "TLS-DHE-DSS-WITH-AES-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-128-GCM-SHA256", + "TLS-DHE-DSS-WITH-AES-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA", + "TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256", + "TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384", + "TLS-RSA-WITH-AES-128-GCM-SHA256", + "TLS-RSA-WITH-AES-256-GCM-SHA384", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA", + "TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256", + "TLS-DHE-DSS-WITH-SEED-CBC-SHA", + "TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256", + "TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384", + "TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256", + "TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384", + "TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA", + "TLS-DHE-DSS-WITH-DES-CBC-SHA"]), + ssl_server_cipher_suites_priority=dict(required=False, type="str"), + ssl_server_cipher_suites_versions=dict(required=False, type="str", + choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "websphere-server": module.params["websphere_server"], + "weblogic-server": module.params["weblogic_server"], + "type": module.params["type"], + "ssl-server-session-state-type": module.params["ssl_server_session_state_type"], + "ssl-server-session-state-timeout": module.params["ssl_server_session_state_timeout"], + "ssl-server-session-state-max": module.params["ssl_server_session_state_max"], + "ssl-server-min-version": module.params["ssl_server_min_version"], + "ssl-server-max-version": module.params["ssl_server_max_version"], + "ssl-server-algorithm": module.params["ssl_server_algorithm"], + "ssl-send-empty-frags": module.params["ssl_send_empty_frags"], + "ssl-pfs": module.params["ssl_pfs"], + "ssl-mode": module.params["ssl_mode"], + "ssl-min-version": module.params["ssl_min_version"], + "ssl-max-version": module.params["ssl_max_version"], + "ssl-http-match-host": module.params["ssl_http_match_host"], + "ssl-http-location-conversion": module.params["ssl_http_location_conversion"], + "ssl-hsts-include-subdomains": module.params["ssl_hsts_include_subdomains"], + "ssl-hsts-age": module.params["ssl_hsts_age"], + "ssl-hsts": module.params["ssl_hsts"], + "ssl-hpkp-report-uri": module.params["ssl_hpkp_report_uri"], + "ssl-hpkp-primary": module.params["ssl_hpkp_primary"], + "ssl-hpkp-include-subdomains": module.params["ssl_hpkp_include_subdomains"], + "ssl-hpkp-backup": module.params["ssl_hpkp_backup"], + "ssl-hpkp-age": module.params["ssl_hpkp_age"], + "ssl-hpkp": module.params["ssl_hpkp"], + "ssl-dh-bits": module.params["ssl_dh_bits"], + "ssl-client-session-state-type": module.params["ssl_client_session_state_type"], + "ssl-client-session-state-timeout": module.params["ssl_client_session_state_timeout"], + "ssl-client-session-state-max": module.params["ssl_client_session_state_max"], + "ssl-client-renegotiation": module.params["ssl_client_renegotiation"], + "ssl-client-fallback": module.params["ssl_client_fallback"], + "ssl-certificate": module.params["ssl_certificate"], + "ssl-algorithm": module.params["ssl_algorithm"], + "srcintf-filter": module.params["srcintf_filter"], + "src-filter": module.params["src_filter"], + "service": module.params["service"], + "server-type": module.params["server_type"], + "protocol": module.params["protocol"], + "portmapping-type": module.params["portmapping_type"], + "portforward": module.params["portforward"], + "persistence": module.params["persistence"], + "outlook-web-access": module.params["outlook_web_access"], + "nat-source-vip": module.params["nat_source_vip"], + "name": module.params["name"], + "monitor": module.params["monitor"], + "max-embryonic-connections": module.params["max_embryonic_connections"], + "mappedport": module.params["mappedport"], + "mappedip": module.params["mappedip"], + "mapped-addr": module.params["mapped_addr"], + "ldb-method": module.params["ldb_method"], + "https-cookie-secure": module.params["https_cookie_secure"], + "http-multiplex": module.params["http_multiplex"], + "http-ip-header-name": module.params["http_ip_header_name"], + "http-ip-header": module.params["http_ip_header"], + "http-cookie-share": module.params["http_cookie_share"], + "http-cookie-path": module.params["http_cookie_path"], + "http-cookie-generation": module.params["http_cookie_generation"], + "http-cookie-domain-from-host": module.params["http_cookie_domain_from_host"], + "http-cookie-domain": module.params["http_cookie_domain"], + "http-cookie-age": module.params["http_cookie_age"], + "gratuitous-arp-interval": module.params["gratuitous_arp_interval"], + "extport": module.params["extport"], + "extip": module.params["extip"], + "extintf": module.params["extintf"], + "extaddr": module.params["extaddr"], + "dns-mapping-ttl": module.params["dns_mapping_ttl"], + "comment": module.params["comment"], + "color": module.params["color"], + "arp-reply": module.params["arp_reply"], + "dynamic_mapping": { + "arp-reply": module.params["dynamic_mapping_arp_reply"], + "color": module.params["dynamic_mapping_color"], + "comment": module.params["dynamic_mapping_comment"], + "dns-mapping-ttl": module.params["dynamic_mapping_dns_mapping_ttl"], + "extaddr": module.params["dynamic_mapping_extaddr"], + "extintf": module.params["dynamic_mapping_extintf"], + "extip": module.params["dynamic_mapping_extip"], + "extport": module.params["dynamic_mapping_extport"], + "gratuitous-arp-interval": module.params["dynamic_mapping_gratuitous_arp_interval"], + "http-cookie-age": module.params["dynamic_mapping_http_cookie_age"], + "http-cookie-domain": module.params["dynamic_mapping_http_cookie_domain"], + "http-cookie-domain-from-host": module.params["dynamic_mapping_http_cookie_domain_from_host"], + "http-cookie-generation": module.params["dynamic_mapping_http_cookie_generation"], + "http-cookie-path": module.params["dynamic_mapping_http_cookie_path"], + "http-cookie-share": module.params["dynamic_mapping_http_cookie_share"], + "http-ip-header": module.params["dynamic_mapping_http_ip_header"], + "http-ip-header-name": module.params["dynamic_mapping_http_ip_header_name"], + "http-multiplex": module.params["dynamic_mapping_http_multiplex"], + "https-cookie-secure": module.params["dynamic_mapping_https_cookie_secure"], + "ldb-method": module.params["dynamic_mapping_ldb_method"], + "mapped-addr": module.params["dynamic_mapping_mapped_addr"], + "mappedip": module.params["dynamic_mapping_mappedip"], + "mappedport": module.params["dynamic_mapping_mappedport"], + "max-embryonic-connections": module.params["dynamic_mapping_max_embryonic_connections"], + "monitor": module.params["dynamic_mapping_monitor"], + "nat-source-vip": module.params["dynamic_mapping_nat_source_vip"], + "outlook-web-access": module.params["dynamic_mapping_outlook_web_access"], + "persistence": module.params["dynamic_mapping_persistence"], + "portforward": module.params["dynamic_mapping_portforward"], + "portmapping-type": module.params["dynamic_mapping_portmapping_type"], + "protocol": module.params["dynamic_mapping_protocol"], + "server-type": module.params["dynamic_mapping_server_type"], + "service": module.params["dynamic_mapping_service"], + "src-filter": module.params["dynamic_mapping_src_filter"], + "srcintf-filter": module.params["dynamic_mapping_srcintf_filter"], + "ssl-algorithm": module.params["dynamic_mapping_ssl_algorithm"], + "ssl-certificate": module.params["dynamic_mapping_ssl_certificate"], + "ssl-client-fallback": module.params["dynamic_mapping_ssl_client_fallback"], + "ssl-client-renegotiation": module.params["dynamic_mapping_ssl_client_renegotiation"], + "ssl-client-session-state-max": module.params["dynamic_mapping_ssl_client_session_state_max"], + "ssl-client-session-state-timeout": module.params["dynamic_mapping_ssl_client_session_state_timeout"], + "ssl-client-session-state-type": module.params["dynamic_mapping_ssl_client_session_state_type"], + "ssl-dh-bits": module.params["dynamic_mapping_ssl_dh_bits"], + "ssl-hpkp": module.params["dynamic_mapping_ssl_hpkp"], + "ssl-hpkp-age": module.params["dynamic_mapping_ssl_hpkp_age"], + "ssl-hpkp-backup": module.params["dynamic_mapping_ssl_hpkp_backup"], + "ssl-hpkp-include-subdomains": module.params["dynamic_mapping_ssl_hpkp_include_subdomains"], + "ssl-hpkp-primary": module.params["dynamic_mapping_ssl_hpkp_primary"], + "ssl-hpkp-report-uri": module.params["dynamic_mapping_ssl_hpkp_report_uri"], + "ssl-hsts": module.params["dynamic_mapping_ssl_hsts"], + "ssl-hsts-age": module.params["dynamic_mapping_ssl_hsts_age"], + "ssl-hsts-include-subdomains": module.params["dynamic_mapping_ssl_hsts_include_subdomains"], + "ssl-http-location-conversion": module.params["dynamic_mapping_ssl_http_location_conversion"], + "ssl-http-match-host": module.params["dynamic_mapping_ssl_http_match_host"], + "ssl-max-version": module.params["dynamic_mapping_ssl_max_version"], + "ssl-min-version": module.params["dynamic_mapping_ssl_min_version"], + "ssl-mode": module.params["dynamic_mapping_ssl_mode"], + "ssl-pfs": module.params["dynamic_mapping_ssl_pfs"], + "ssl-send-empty-frags": module.params["dynamic_mapping_ssl_send_empty_frags"], + "ssl-server-algorithm": module.params["dynamic_mapping_ssl_server_algorithm"], + "ssl-server-max-version": module.params["dynamic_mapping_ssl_server_max_version"], + "ssl-server-min-version": module.params["dynamic_mapping_ssl_server_min_version"], + "ssl-server-session-state-max": module.params["dynamic_mapping_ssl_server_session_state_max"], + "ssl-server-session-state-timeout": module.params["dynamic_mapping_ssl_server_session_state_timeout"], + "ssl-server-session-state-type": module.params["dynamic_mapping_ssl_server_session_state_type"], + "type": module.params["dynamic_mapping_type"], + "weblogic-server": module.params["dynamic_mapping_weblogic_server"], + "websphere-server": module.params["dynamic_mapping_websphere_server"], + "realservers": { + "client-ip": module.params["dynamic_mapping_realservers_client_ip"], + "healthcheck": module.params["dynamic_mapping_realservers_healthcheck"], + "holddown-interval": module.params["dynamic_mapping_realservers_holddown_interval"], + "http-host": module.params["dynamic_mapping_realservers_http_host"], + "ip": module.params["dynamic_mapping_realservers_ip"], + "max-connections": module.params["dynamic_mapping_realservers_max_connections"], + "monitor": module.params["dynamic_mapping_realservers_monitor"], + "port": module.params["dynamic_mapping_realservers_port"], + "seq": module.params["dynamic_mapping_realservers_seq"], + "status": module.params["dynamic_mapping_realservers_status"], + "weight": module.params["dynamic_mapping_realservers_weight"], + }, + "ssl-cipher-suites": { + "cipher": module.params["dynamic_mapping_ssl_cipher_suites_cipher"], + "versions": module.params["dynamic_mapping_ssl_cipher_suites_versions"], + }, + }, + "realservers": { + "client-ip": module.params["realservers_client_ip"], + "healthcheck": module.params["realservers_healthcheck"], + "holddown-interval": module.params["realservers_holddown_interval"], + "http-host": module.params["realservers_http_host"], + "ip": module.params["realservers_ip"], + "max-connections": module.params["realservers_max_connections"], + "monitor": module.params["realservers_monitor"], + "port": module.params["realservers_port"], + "seq": module.params["realservers_seq"], + "status": module.params["realservers_status"], + "weight": module.params["realservers_weight"], + }, + "ssl-cipher-suites": { + "cipher": module.params["ssl_cipher_suites_cipher"], + "versions": module.params["ssl_cipher_suites_versions"], + }, + "ssl-server-cipher-suites": { + "cipher": module.params["ssl_server_cipher_suites_cipher"], + "priority": module.params["ssl_server_cipher_suites_priority"], + "versions": module.params["ssl_server_cipher_suites_versions"], + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['dynamic_mapping', 'realservers', 'ssl-cipher-suites', 'ssl-server-cipher-suites'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + try: + results = fmgr_firewall_vip_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_fwpol_ipv4.py b/plugins/modules/network/fortimanager/fmgr_fwpol_ipv4.py new file mode 100644 index 0000000000..387652fa22 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_fwpol_ipv4.py @@ -0,0 +1,1359 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_fwpol_ipv4 +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Allows the add/delete of Firewall Policies on Packages in FortiManager. +description: + - Allows the add/delete of Firewall Policies on Packages in FortiManager. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + package_name: + description: + - The policy package you want to modify + required: false + default: "default" + + fail_on_missing_dependency: + description: + - Normal behavior is to "skip" tasks that fail dependency checks, so other tasks can run. + - If set to "enabled" if a failed dependency check happeens, Ansible will exit as with failure instead of skip. + required: false + default: "disable" + choices: ["enable", "disable"] + + wsso: + description: + - Enable/disable WiFi Single Sign On (WSSO). + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + webfilter_profile: + description: + - Name of an existing Web filter profile. + required: false + + webcache_https: + description: + - Enable/disable web cache for HTTPS. + - choice | disable | Disable web cache for HTTPS. + - choice | enable | Enable web cache for HTTPS. + required: false + choices: ["disable", "enable"] + + webcache: + description: + - Enable/disable web cache. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + wccp: + description: + - Enable/disable forwarding traffic matching this policy to a configured WCCP server. + - choice | disable | Disable WCCP setting. + - choice | enable | Enable WCCP setting. + required: false + choices: ["disable", "enable"] + + wanopt_profile: + description: + - WAN optimization profile. + required: false + + wanopt_peer: + description: + - WAN optimization peer. + required: false + + wanopt_passive_opt: + description: + - WAN optimization passive mode options. This option decides what IP address will be used to connect server. + - choice | default | Allow client side WAN opt peer to decide. + - choice | transparent | Use address of client to connect to server. + - choice | non-transparent | Use local FortiGate address to connect to server. + required: false + choices: ["default", "transparent", "non-transparent"] + + wanopt_detection: + description: + - WAN optimization auto-detection mode. + - choice | active | Active WAN optimization peer auto-detection. + - choice | passive | Passive WAN optimization peer auto-detection. + - choice | off | Turn off WAN optimization peer auto-detection. + required: false + choices: ["active", "passive", "off"] + + wanopt: + description: + - Enable/disable WAN optimization. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + waf_profile: + description: + - Name of an existing Web application firewall profile. + required: false + + vpntunnel: + description: + - Policy-based IPsec VPN | name of the IPsec VPN Phase 1. + required: false + + voip_profile: + description: + - Name of an existing VoIP profile. + required: false + + vlan_filter: + description: + - Set VLAN filters. + required: false + + vlan_cos_rev: + description: + - VLAN reverse direction user priority | 255 passthrough, 0 lowest, 7 highest.. + required: false + + vlan_cos_fwd: + description: + - VLAN forward direction user priority | 255 passthrough, 0 lowest, 7 highest. + required: false + + utm_status: + description: + - Enable to add one or more security profiles (AV, IPS, etc.) to the firewall policy. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + users: + description: + - Names of individual users that can authenticate with this policy. + required: false + + url_category: + description: + - URL category ID list. + required: false + + traffic_shaper_reverse: + description: + - Reverse traffic shaper. + required: false + + traffic_shaper: + description: + - Traffic shaper. + required: false + + timeout_send_rst: + description: + - Enable/disable sending RST packets when TCP sessions expire. + - choice | disable | Disable sending of RST packet upon TCP session expiration. + - choice | enable | Enable sending of RST packet upon TCP session expiration. + required: false + choices: ["disable", "enable"] + + tcp_session_without_syn: + description: + - Enable/disable creation of TCP session without SYN flag. + - choice | all | Enable TCP session without SYN. + - choice | data-only | Enable TCP session data only. + - choice | disable | Disable TCP session without SYN. + required: false + choices: ["all", "data-only", "disable"] + + tcp_mss_sender: + description: + - Sender TCP maximum segment size (MSS). + required: false + + tcp_mss_receiver: + description: + - Receiver TCP maximum segment size (MSS). + required: false + + status: + description: + - Enable or disable this policy. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + ssl_ssh_profile: + description: + - Name of an existing SSL SSH profile. + required: false + + ssl_mirror_intf: + description: + - SSL mirror interface name. + required: false + + ssl_mirror: + description: + - Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring). + - choice | disable | Disable SSL mirror. + - choice | enable | Enable SSL mirror. + required: false + choices: ["disable", "enable"] + + ssh_filter_profile: + description: + - Name of an existing SSH filter profile. + required: false + + srcintf: + description: + - Incoming (ingress) interface. + required: false + + srcaddr_negate: + description: + - When enabled srcaddr specifies what the source address must NOT be. + - choice | disable | Disable source address negate. + - choice | enable | Enable source address negate. + required: false + choices: ["disable", "enable"] + + srcaddr: + description: + - Source address and address group names. + required: false + + spamfilter_profile: + description: + - Name of an existing Spam filter profile. + required: false + + session_ttl: + description: + - TTL in seconds for sessions accepted by this policy (0 means use the system default session TTL). + required: false + + service_negate: + description: + - When enabled service specifies what the service must NOT be. + - choice | disable | Disable negated service match. + - choice | enable | Enable negated service match. + required: false + choices: ["disable", "enable"] + + service: + description: + - Service and service group names. + required: false + + send_deny_packet: + description: + - Enable to send a reply when a session is denied or blocked by a firewall policy. + - choice | disable | Disable deny-packet sending. + - choice | enable | Enable deny-packet sending. + required: false + choices: ["disable", "enable"] + + schedule_timeout: + description: + - Enable to force current sessions to end when the schedule object times out. + - choice | disable | Disable schedule timeout. + - choice | enable | Enable schedule timeout. + required: false + choices: ["disable", "enable"] + + schedule: + description: + - Schedule name. + required: false + + scan_botnet_connections: + description: + - Block or monitor connections to Botnet servers or disable Botnet scanning. + - choice | disable | Do not scan connections to botnet servers. + - choice | block | Block connections to botnet servers. + - choice | monitor | Log connections to botnet servers. + required: false + choices: ["disable", "block", "monitor"] + + rtp_nat: + description: + - Enable Real Time Protocol (RTP) NAT. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + rtp_addr: + description: + - Address names if this is an RTP NAT policy. + required: false + + rsso: + description: + - Enable/disable RADIUS single sign-on (RSSO). + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + replacemsg_override_group: + description: + - Override the default replacement message group for this policy. + required: false + + redirect_url: + description: + - URL users are directed to after seeing and accepting the disclaimer or authenticating. + required: false + + radius_mac_auth_bypass: + description: + - Enable MAC authentication bypass. The bypassed MAC address must be received from RADIUS server. + - choice | disable | Disable MAC authentication bypass. + - choice | enable | Enable MAC authentication bypass. + required: false + choices: ["disable", "enable"] + + profile_type: + description: + - Determine whether the firewall policy allows security profile groups or single profiles only. + - choice | single | Do not allow security profile groups. + - choice | group | Allow security profile groups. + required: false + choices: ["single", "group"] + + profile_protocol_options: + description: + - Name of an existing Protocol options profile. + required: false + + profile_group: + description: + - Name of profile group. + required: false + + poolname: + description: + - IP Pool names. + required: false + + policyid: + description: + - Policy ID. + required: false + + permit_stun_host: + description: + - Accept UDP packets from any Session Traversal Utilities for NAT (STUN) host. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + permit_any_host: + description: + - Accept UDP packets from any host. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + per_ip_shaper: + description: + - Per-IP traffic shaper. + required: false + + outbound: + description: + - Policy-based IPsec VPN | only traffic from the internal network can initiate a VPN. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + ntlm_guest: + description: + - Enable/disable NTLM guest user access. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + ntlm_enabled_browsers: + description: + - HTTP-User-Agent value of supported browsers. + required: false + + ntlm: + description: + - Enable/disable NTLM authentication. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + np_acceleration: + description: + - Enable/disable UTM Network Processor acceleration. + - choice | disable | Disable UTM Network Processor acceleration. + - choice | enable | Enable UTM Network Processor acceleration. + required: false + choices: ["disable", "enable"] + + natoutbound: + description: + - Policy-based IPsec VPN | apply source NAT to outbound traffic. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + natip: + description: + - Policy-based IPsec VPN | source NAT IP address for outgoing traffic. + required: false + + natinbound: + description: + - Policy-based IPsec VPN | apply destination NAT to inbound traffic. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + nat: + description: + - Enable/disable source NAT. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + name: + description: + - Policy name. + required: false + + mms_profile: + description: + - Name of an existing MMS profile. + required: false + + match_vip: + description: + - Enable to match packets that have had their destination addresses changed by a VIP. + - choice | disable | Do not match DNATed packet. + - choice | enable | Match DNATed packet. + required: false + choices: ["disable", "enable"] + + logtraffic_start: + description: + - Record logs when a session starts and ends. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + logtraffic: + description: + - Enable or disable logging. Log all sessions or security profile sessions. + - choice | disable | Disable all logging for this policy. + - choice | all | Log all sessions accepted or denied by this policy. + - choice | utm | Log traffic that has a security profile applied to it. + required: false + choices: ["disable", "all", "utm"] + + learning_mode: + description: + - Enable to allow everything, but log all of the meaningful data for security information gathering. + - choice | disable | Disable learning mode in firewall policy. + - choice | enable | Enable learning mode in firewall policy. + required: false + choices: ["disable", "enable"] + + label: + description: + - Label for the policy that appears when the GUI is in Section View mode. + required: false + + ips_sensor: + description: + - Name of an existing IPS sensor. + required: false + + ippool: + description: + - Enable to use IP Pools for source NAT. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + internet_service_src_negate: + description: + - When enabled internet-service-src specifies what the service must NOT be. + - choice | disable | Disable negated Internet Service source match. + - choice | enable | Enable negated Internet Service source match. + required: false + choices: ["disable", "enable"] + + internet_service_src_id: + description: + - Internet Service source ID. + required: false + + internet_service_src_custom: + description: + - Custom Internet Service source name. + required: false + + internet_service_src: + description: + - Enable/disable use of Internet Services in source for this policy. If enabled, source address is not used. + - choice | disable | Disable use of Internet Services source in policy. + - choice | enable | Enable use of Internet Services source in policy. + required: false + choices: ["disable", "enable"] + + internet_service_negate: + description: + - When enabled internet-service specifies what the service must NOT be. + - choice | disable | Disable negated Internet Service match. + - choice | enable | Enable negated Internet Service match. + required: false + choices: ["disable", "enable"] + + internet_service_id: + description: + - Internet Service ID. + required: false + + internet_service_custom: + description: + - Custom Internet Service name. + required: false + + internet_service: + description: + - Enable/disable use of Internet Services for this policy. If enabled, dstaddr and service are not used. + - choice | disable | Disable use of Internet Services in policy. + - choice | enable | Enable use of Internet Services in policy. + required: false + choices: ["disable", "enable"] + + inbound: + description: + - Policy-based IPsec VPN | only traffic from the remote network can initiate a VPN. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + identity_based_route: + description: + - Name of identity-based routing rule. + required: false + + icap_profile: + description: + - Name of an existing ICAP profile. + required: false + + gtp_profile: + description: + - GTP profile. + required: false + + groups: + description: + - Names of user groups that can authenticate with this policy. + required: false + + global_label: + description: + - Label for the policy that appears when the GUI is in Global View mode. + required: false + + fsso_agent_for_ntlm: + description: + - FSSO agent to use for NTLM authentication. + required: false + + fsso: + description: + - Enable/disable Fortinet Single Sign-On. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + fixedport: + description: + - Enable to prevent source NAT from changing a session's source port. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + firewall_session_dirty: + description: + - How to handle sessions if the configuration of this firewall policy changes. + - choice | check-all | Flush all current sessions accepted by this policy. + - choice | check-new | Continue to allow sessions already accepted by this policy. + required: false + choices: ["check-all", "check-new"] + + dstintf: + description: + - Outgoing (egress) interface. + required: false + + dstaddr_negate: + description: + - When enabled dstaddr specifies what the destination address must NOT be. + - choice | disable | Disable destination address negate. + - choice | enable | Enable destination address negate. + required: false + choices: ["disable", "enable"] + + dstaddr: + description: + - Destination address and address group names. + required: false + + dsri: + description: + - Enable DSRI to ignore HTTP server responses. + - choice | disable | Disable DSRI. + - choice | enable | Enable DSRI. + required: false + choices: ["disable", "enable"] + + dscp_value: + description: + - DSCP value. + required: false + + dscp_negate: + description: + - Enable negated DSCP match. + - choice | disable | Disable DSCP negate. + - choice | enable | Enable DSCP negate. + required: false + choices: ["disable", "enable"] + + dscp_match: + description: + - Enable DSCP check. + - choice | disable | Disable DSCP check. + - choice | enable | Enable DSCP check. + required: false + choices: ["disable", "enable"] + + dnsfilter_profile: + description: + - Name of an existing DNS filter profile. + required: false + + dlp_sensor: + description: + - Name of an existing DLP sensor. + required: false + + disclaimer: + description: + - Enable/disable user authentication disclaimer. + - choice | disable | Disable user authentication disclaimer. + - choice | enable | Enable user authentication disclaimer. + required: false + choices: ["disable", "enable"] + + diffservcode_rev: + description: + - Change packet's reverse (reply) DiffServ to this value. + required: false + + diffservcode_forward: + description: + - Change packet's DiffServ to this value. + required: false + + diffserv_reverse: + description: + - Enable to change packet's reverse (reply) DiffServ values to the specified diffservcode-rev value. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + diffserv_forward: + description: + - Enable to change packet's DiffServ values to the specified diffservcode-forward value. + - choice | disable | Disable WAN optimization. + - choice | enable | Enable WAN optimization. + required: false + choices: ["disable", "enable"] + + devices: + description: + - Names of devices or device groups that can be matched by the policy. + required: false + + delay_tcp_npu_session: + description: + - Enable TCP NPU session delay to guarantee packet order of 3-way handshake. + - choice | disable | Disable TCP NPU session delay in order to guarantee packet order of 3-way handshake. + - choice | enable | Enable TCP NPU session delay in order to guarantee packet order of 3-way handshake. + required: false + choices: ["disable", "enable"] + + custom_log_fields: + description: + - Custom fields to append to log messages for this policy. + required: false + + comments: + description: + - Comment. + required: false + + capture_packet: + description: + - Enable/disable capture packets. + - choice | disable | Disable capture packets. + - choice | enable | Enable capture packets. + required: false + choices: ["disable", "enable"] + + captive_portal_exempt: + description: + - Enable to exempt some users from the captive portal. + - choice | disable | Disable exemption of captive portal. + - choice | enable | Enable exemption of captive portal. + required: false + choices: ["disable", "enable"] + + block_notification: + description: + - Enable/disable block notification. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + av_profile: + description: + - Name of an existing Antivirus profile. + required: false + + auto_asic_offload: + description: + - Enable/disable offloading security profile processing to CP processors. + - choice | disable | Disable ASIC offloading. + - choice | enable | Enable auto ASIC offloading. + required: false + choices: ["disable", "enable"] + + auth_redirect_addr: + description: + - HTTP-to-HTTPS redirect address for firewall authentication. + required: false + + auth_path: + description: + - Enable/disable authentication-based routing. + - choice | disable | Disable authentication-based routing. + - choice | enable | Enable authentication-based routing. + required: false + choices: ["disable", "enable"] + + auth_cert: + description: + - HTTPS server certificate for policy authentication. + required: false + + application_list: + description: + - Name of an existing Application list. + required: false + + application: + description: + - Application ID list. + required: false + + app_group: + description: + - Application group names. + required: false + + app_category: + description: + - Application category ID list. + required: false + + action: + description: + - Policy action (allow/deny/ipsec). + - choice | deny | Blocks sessions that match the firewall policy. + - choice | accept | Allows session that match the firewall policy. + - choice | ipsec | Firewall policy becomes a policy-based IPsec VPN policy. + required: false + choices: ["deny", "accept", "ipsec"] + + vpn_dst_node: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + required: false + + vpn_dst_node_host: + description: + - VPN Destination Node Host. + required: false + + vpn_dst_node_seq: + description: + - VPN Destination Node Seq. + required: false + + vpn_dst_node_subnet: + description: + - VPN Destination Node Seq. + required: false + + vpn_src_node: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + required: false + + vpn_src_node_host: + description: + - VPN Source Node Host. + required: false + + vpn_src_node_seq: + description: + - VPN Source Node Seq. + required: false + + vpn_src_node_subnet: + description: + - VPN Source Node. + required: false + + +''' + +EXAMPLES = ''' +- name: ADD VERY BASIC IPV4 POLICY WITH NO NAT (WIDE OPEN) + fmgr_fwpol_ipv4: + mode: "set" + adom: "ansible" + package_name: "default" + name: "Basic_IPv4_Policy" + comments: "Created by Ansible" + action: "accept" + dstaddr: "all" + srcaddr: "all" + dstintf: "any" + srcintf: "any" + logtraffic: "utm" + service: "ALL" + schedule: "always" + +- name: ADD VERY BASIC IPV4 POLICY WITH NAT AND MULTIPLE ENTRIES + fmgr_fwpol_ipv4: + mode: "set" + adom: "ansible" + package_name: "default" + name: "Basic_IPv4_Policy_2" + comments: "Created by Ansible" + action: "accept" + dstaddr: "google-play" + srcaddr: "all" + dstintf: "any" + srcintf: "any" + logtraffic: "utm" + service: "HTTP, HTTPS" + schedule: "always" + nat: "enable" + users: "karen, kevin" + +- name: ADD VERY BASIC IPV4 POLICY WITH NAT AND MULTIPLE ENTRIES AND SEC PROFILES + fmgr_fwpol_ipv4: + mode: "set" + adom: "ansible" + package_name: "default" + name: "Basic_IPv4_Policy_3" + comments: "Created by Ansible" + action: "accept" + dstaddr: "google-play, autoupdate.opera.com" + srcaddr: "corp_internal" + dstintf: "zone_wan1, zone_wan2" + srcintf: "zone_int1" + logtraffic: "utm" + service: "HTTP, HTTPS" + schedule: "always" + nat: "enable" + users: "karen, kevin" + av_profile: "sniffer-profile" + ips_sensor: "default" + +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +def fmgr_firewall_policy_modify(fmgr, paramgram): + """ + fmgr_firewall_policy -- Add/Set/Deletes Firewall Policy Objects defined in the "paramgram" + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy'.format(adom=adom, pkg=paramgram["package_name"]) + datagram = scrub_dict((prepare_dict(paramgram))) + del datagram["package_name"] + datagram = fmgr._tools.split_comma_strings_into_lists(datagram) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + url = '/pm/config/adom/{adom}/pkg/{pkg}/firewall' \ + '/policy/{policyid}'.format(adom=paramgram["adom"], + pkg=paramgram["package_name"], + policyid=paramgram["policyid"]) + datagram = { + "policyid": paramgram["policyid"] + } + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + package_name=dict(type="str", required=False, default="default"), + fail_on_missing_dependency=dict(type="str", required=False, default="disable", choices=["enable", + "disable"]), + wsso=dict(required=False, type="str", choices=["disable", "enable"]), + webfilter_profile=dict(required=False, type="str"), + webcache_https=dict(required=False, type="str", choices=["disable", "enable"]), + webcache=dict(required=False, type="str", choices=["disable", "enable"]), + wccp=dict(required=False, type="str", choices=["disable", "enable"]), + wanopt_profile=dict(required=False, type="str"), + wanopt_peer=dict(required=False, type="str"), + wanopt_passive_opt=dict(required=False, type="str", choices=["default", "transparent", "non-transparent"]), + wanopt_detection=dict(required=False, type="str", choices=["active", "passive", "off"]), + wanopt=dict(required=False, type="str", choices=["disable", "enable"]), + waf_profile=dict(required=False, type="str"), + vpntunnel=dict(required=False, type="str"), + voip_profile=dict(required=False, type="str"), + vlan_filter=dict(required=False, type="str"), + vlan_cos_rev=dict(required=False, type="int"), + vlan_cos_fwd=dict(required=False, type="int"), + utm_status=dict(required=False, type="str", choices=["disable", "enable"]), + users=dict(required=False, type="str"), + url_category=dict(required=False, type="str"), + traffic_shaper_reverse=dict(required=False, type="str"), + traffic_shaper=dict(required=False, type="str"), + timeout_send_rst=dict(required=False, type="str", choices=["disable", "enable"]), + tcp_session_without_syn=dict(required=False, type="str", choices=["all", "data-only", "disable"]), + tcp_mss_sender=dict(required=False, type="int"), + tcp_mss_receiver=dict(required=False, type="int"), + status=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_ssh_profile=dict(required=False, type="str"), + ssl_mirror_intf=dict(required=False, type="str"), + ssl_mirror=dict(required=False, type="str", choices=["disable", "enable"]), + ssh_filter_profile=dict(required=False, type="str"), + srcintf=dict(required=False, type="str"), + srcaddr_negate=dict(required=False, type="str", choices=["disable", "enable"]), + srcaddr=dict(required=False, type="str"), + spamfilter_profile=dict(required=False, type="str"), + session_ttl=dict(required=False, type="int"), + service_negate=dict(required=False, type="str", choices=["disable", "enable"]), + service=dict(required=False, type="str"), + send_deny_packet=dict(required=False, type="str", choices=["disable", "enable"]), + schedule_timeout=dict(required=False, type="str", choices=["disable", "enable"]), + schedule=dict(required=False, type="str"), + scan_botnet_connections=dict(required=False, type="str", choices=["disable", "block", "monitor"]), + rtp_nat=dict(required=False, type="str", choices=["disable", "enable"]), + rtp_addr=dict(required=False, type="str"), + rsso=dict(required=False, type="str", choices=["disable", "enable"]), + replacemsg_override_group=dict(required=False, type="str"), + redirect_url=dict(required=False, type="str"), + radius_mac_auth_bypass=dict(required=False, type="str", choices=["disable", "enable"]), + profile_type=dict(required=False, type="str", choices=["single", "group"]), + profile_protocol_options=dict(required=False, type="str"), + profile_group=dict(required=False, type="str"), + poolname=dict(required=False, type="str"), + policyid=dict(required=False, type="str"), + permit_stun_host=dict(required=False, type="str", choices=["disable", "enable"]), + permit_any_host=dict(required=False, type="str", choices=["disable", "enable"]), + per_ip_shaper=dict(required=False, type="str"), + outbound=dict(required=False, type="str", choices=["disable", "enable"]), + ntlm_guest=dict(required=False, type="str", choices=["disable", "enable"]), + ntlm_enabled_browsers=dict(required=False, type="str"), + ntlm=dict(required=False, type="str", choices=["disable", "enable"]), + np_acceleration=dict(required=False, type="str", choices=["disable", "enable"]), + natoutbound=dict(required=False, type="str", choices=["disable", "enable"]), + natip=dict(required=False, type="str"), + natinbound=dict(required=False, type="str", choices=["disable", "enable"]), + nat=dict(required=False, type="str", choices=["disable", "enable"]), + name=dict(required=False, type="str"), + mms_profile=dict(required=False, type="str"), + match_vip=dict(required=False, type="str", choices=["disable", "enable"]), + logtraffic_start=dict(required=False, type="str", choices=["disable", "enable"]), + logtraffic=dict(required=False, type="str", choices=["disable", "all", "utm"]), + learning_mode=dict(required=False, type="str", choices=["disable", "enable"]), + label=dict(required=False, type="str"), + ips_sensor=dict(required=False, type="str"), + ippool=dict(required=False, type="str", choices=["disable", "enable"]), + internet_service_src_negate=dict(required=False, type="str", choices=["disable", "enable"]), + internet_service_src_id=dict(required=False, type="str"), + internet_service_src_custom=dict(required=False, type="str"), + internet_service_src=dict(required=False, type="str", choices=["disable", "enable"]), + internet_service_negate=dict(required=False, type="str", choices=["disable", "enable"]), + internet_service_id=dict(required=False, type="str"), + internet_service_custom=dict(required=False, type="str"), + internet_service=dict(required=False, type="str", choices=["disable", "enable"]), + inbound=dict(required=False, type="str", choices=["disable", "enable"]), + identity_based_route=dict(required=False, type="str"), + icap_profile=dict(required=False, type="str"), + gtp_profile=dict(required=False, type="str"), + groups=dict(required=False, type="str"), + global_label=dict(required=False, type="str"), + fsso_agent_for_ntlm=dict(required=False, type="str"), + fsso=dict(required=False, type="str", choices=["disable", "enable"]), + fixedport=dict(required=False, type="str", choices=["disable", "enable"]), + firewall_session_dirty=dict(required=False, type="str", choices=["check-all", "check-new"]), + dstintf=dict(required=False, type="str"), + dstaddr_negate=dict(required=False, type="str", choices=["disable", "enable"]), + dstaddr=dict(required=False, type="str"), + dsri=dict(required=False, type="str", choices=["disable", "enable"]), + dscp_value=dict(required=False, type="str"), + dscp_negate=dict(required=False, type="str", choices=["disable", "enable"]), + dscp_match=dict(required=False, type="str", choices=["disable", "enable"]), + dnsfilter_profile=dict(required=False, type="str"), + dlp_sensor=dict(required=False, type="str"), + disclaimer=dict(required=False, type="str", choices=["disable", "enable"]), + diffservcode_rev=dict(required=False, type="str"), + diffservcode_forward=dict(required=False, type="str"), + diffserv_reverse=dict(required=False, type="str", choices=["disable", "enable"]), + diffserv_forward=dict(required=False, type="str", choices=["disable", "enable"]), + devices=dict(required=False, type="str"), + delay_tcp_npu_session=dict(required=False, type="str", choices=["disable", "enable"]), + custom_log_fields=dict(required=False, type="str"), + comments=dict(required=False, type="str"), + capture_packet=dict(required=False, type="str", choices=["disable", "enable"]), + captive_portal_exempt=dict(required=False, type="str", choices=["disable", "enable"]), + block_notification=dict(required=False, type="str", choices=["disable", "enable"]), + av_profile=dict(required=False, type="str"), + auto_asic_offload=dict(required=False, type="str", choices=["disable", "enable"]), + auth_redirect_addr=dict(required=False, type="str"), + auth_path=dict(required=False, type="str", choices=["disable", "enable"]), + auth_cert=dict(required=False, type="str"), + application_list=dict(required=False, type="str"), + application=dict(required=False, type="str"), + app_group=dict(required=False, type="str"), + app_category=dict(required=False, type="str"), + action=dict(required=False, type="str", choices=["deny", "accept", "ipsec"]), + vpn_dst_node=dict(required=False, type="list"), + vpn_dst_node_host=dict(required=False, type="str"), + vpn_dst_node_seq=dict(required=False, type="str"), + vpn_dst_node_subnet=dict(required=False, type="str"), + vpn_src_node=dict(required=False, type="list"), + vpn_src_node_host=dict(required=False, type="str"), + vpn_src_node_seq=dict(required=False, type="str"), + vpn_src_node_subnet=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "package_name": module.params["package_name"], + "wsso": module.params["wsso"], + "webfilter-profile": module.params["webfilter_profile"], + "webcache-https": module.params["webcache_https"], + "webcache": module.params["webcache"], + "wccp": module.params["wccp"], + "wanopt-profile": module.params["wanopt_profile"], + "wanopt-peer": module.params["wanopt_peer"], + "wanopt-passive-opt": module.params["wanopt_passive_opt"], + "wanopt-detection": module.params["wanopt_detection"], + "wanopt": module.params["wanopt"], + "waf-profile": module.params["waf_profile"], + "vpntunnel": module.params["vpntunnel"], + "voip-profile": module.params["voip_profile"], + "vlan-filter": module.params["vlan_filter"], + "vlan-cos-rev": module.params["vlan_cos_rev"], + "vlan-cos-fwd": module.params["vlan_cos_fwd"], + "utm-status": module.params["utm_status"], + "users": module.params["users"], + "url-category": module.params["url_category"], + "traffic-shaper-reverse": module.params["traffic_shaper_reverse"], + "traffic-shaper": module.params["traffic_shaper"], + "timeout-send-rst": module.params["timeout_send_rst"], + "tcp-session-without-syn": module.params["tcp_session_without_syn"], + "tcp-mss-sender": module.params["tcp_mss_sender"], + "tcp-mss-receiver": module.params["tcp_mss_receiver"], + "status": module.params["status"], + "ssl-ssh-profile": module.params["ssl_ssh_profile"], + "ssl-mirror-intf": module.params["ssl_mirror_intf"], + "ssl-mirror": module.params["ssl_mirror"], + "ssh-filter-profile": module.params["ssh_filter_profile"], + "srcintf": module.params["srcintf"], + "srcaddr-negate": module.params["srcaddr_negate"], + "srcaddr": module.params["srcaddr"], + "spamfilter-profile": module.params["spamfilter_profile"], + "session-ttl": module.params["session_ttl"], + "service-negate": module.params["service_negate"], + "service": module.params["service"], + "send-deny-packet": module.params["send_deny_packet"], + "schedule-timeout": module.params["schedule_timeout"], + "schedule": module.params["schedule"], + "scan-botnet-connections": module.params["scan_botnet_connections"], + "rtp-nat": module.params["rtp_nat"], + "rtp-addr": module.params["rtp_addr"], + "rsso": module.params["rsso"], + "replacemsg-override-group": module.params["replacemsg_override_group"], + "redirect-url": module.params["redirect_url"], + "radius-mac-auth-bypass": module.params["radius_mac_auth_bypass"], + "profile-type": module.params["profile_type"], + "profile-protocol-options": module.params["profile_protocol_options"], + "profile-group": module.params["profile_group"], + "poolname": module.params["poolname"], + "policyid": module.params["policyid"], + "permit-stun-host": module.params["permit_stun_host"], + "permit-any-host": module.params["permit_any_host"], + "per-ip-shaper": module.params["per_ip_shaper"], + "outbound": module.params["outbound"], + "ntlm-guest": module.params["ntlm_guest"], + "ntlm-enabled-browsers": module.params["ntlm_enabled_browsers"], + "ntlm": module.params["ntlm"], + "np-acceleration": module.params["np_acceleration"], + "natoutbound": module.params["natoutbound"], + "natip": module.params["natip"], + "natinbound": module.params["natinbound"], + "nat": module.params["nat"], + "name": module.params["name"], + "mms-profile": module.params["mms_profile"], + "match-vip": module.params["match_vip"], + "logtraffic-start": module.params["logtraffic_start"], + "logtraffic": module.params["logtraffic"], + "learning-mode": module.params["learning_mode"], + "label": module.params["label"], + "ips-sensor": module.params["ips_sensor"], + "ippool": module.params["ippool"], + "internet-service-src-negate": module.params["internet_service_src_negate"], + "internet-service-src-id": module.params["internet_service_src_id"], + "internet-service-src-custom": module.params["internet_service_src_custom"], + "internet-service-src": module.params["internet_service_src"], + "internet-service-negate": module.params["internet_service_negate"], + "internet-service-id": module.params["internet_service_id"], + "internet-service-custom": module.params["internet_service_custom"], + "internet-service": module.params["internet_service"], + "inbound": module.params["inbound"], + "identity-based-route": module.params["identity_based_route"], + "icap-profile": module.params["icap_profile"], + "gtp-profile": module.params["gtp_profile"], + "groups": module.params["groups"], + "global-label": module.params["global_label"], + "fsso-agent-for-ntlm": module.params["fsso_agent_for_ntlm"], + "fsso": module.params["fsso"], + "fixedport": module.params["fixedport"], + "firewall-session-dirty": module.params["firewall_session_dirty"], + "dstintf": module.params["dstintf"], + "dstaddr-negate": module.params["dstaddr_negate"], + "dstaddr": module.params["dstaddr"], + "dsri": module.params["dsri"], + "dscp-value": module.params["dscp_value"], + "dscp-negate": module.params["dscp_negate"], + "dscp-match": module.params["dscp_match"], + "dnsfilter-profile": module.params["dnsfilter_profile"], + "dlp-sensor": module.params["dlp_sensor"], + "disclaimer": module.params["disclaimer"], + "diffservcode-rev": module.params["diffservcode_rev"], + "diffservcode-forward": module.params["diffservcode_forward"], + "diffserv-reverse": module.params["diffserv_reverse"], + "diffserv-forward": module.params["diffserv_forward"], + "devices": module.params["devices"], + "delay-tcp-npu-session": module.params["delay_tcp_npu_session"], + "custom-log-fields": module.params["custom_log_fields"], + "comments": module.params["comments"], + "capture-packet": module.params["capture_packet"], + "captive-portal-exempt": module.params["captive_portal_exempt"], + "block-notification": module.params["block_notification"], + "av-profile": module.params["av_profile"], + "auto-asic-offload": module.params["auto_asic_offload"], + "auth-redirect-addr": module.params["auth_redirect_addr"], + "auth-path": module.params["auth_path"], + "auth-cert": module.params["auth_cert"], + "application-list": module.params["application_list"], + "application": module.params["application"], + "app-group": module.params["app_group"], + "app-category": module.params["app_category"], + "action": module.params["action"], + "vpn_dst_node": { + "host": module.params["vpn_dst_node_host"], + "seq": module.params["vpn_dst_node_seq"], + "subnet": module.params["vpn_dst_node_subnet"], + }, + "vpn_src_node": { + "host": module.params["vpn_src_node_host"], + "seq": module.params["vpn_src_node_seq"], + "subnet": module.params["vpn_src_node_subnet"], + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['vpn_dst_node', 'vpn_src_node'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + # BEGIN MODULE-SPECIFIC LOGIC -- THINGS NEED TO HAPPEN DEPENDING ON THE ENDPOINT AND OPERATION + results = DEFAULT_RESULT_OBJ + try: + if paramgram["mode"] == "delete": + # WE NEED TO GET THE POLICY ID FROM THE NAME OF THE POLICY TO DELETE IT + url = '/pm/config/adom/{adom}/pkg/{pkg}/firewall' \ + '/policy/'.format(adom=paramgram["adom"], + pkg=paramgram["package_name"]) + datagram = { + "filter": ["name", "==", paramgram["name"]] + } + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + try: + if response[1][0]["policyid"]: + policy_id = response[1][0]["policyid"] + paramgram["policyid"] = policy_id + except BaseException: + fmgr.return_response(module=module, results=response, good_codes=[0, ], stop_on_success=True, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram), + msg="Couldn't find policy ID number for policy name specified.") + except Exception as err: + raise FMGBaseException(err) + + try: + results = fmgr_firewall_policy_modify(fmgr, paramgram) + if module.params["fail_on_missing_dependency"] == "disable": + fmgr.govern_response(module=module, results=results, good_codes=[0, -9998], + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + if module.params["fail_on_missing_dependency"] == "enable" and results[0] == -10131: + fmgr.govern_response(module=module, results=results, good_codes=[0, ], failed=True, skipped=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_fwpol_package.py b/plugins/modules/network/fortimanager/fmgr_fwpol_package.py new file mode 100644 index 0000000000..6a9e18a77c --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_fwpol_package.py @@ -0,0 +1,485 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_fwpol_package +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manages FortiManager Firewall Policies Packages. +description: + - Manages FortiManager Firewall Policies Packages. Policy Packages contain one or more Firewall Policies/Rules and + are distritbuted via FortiManager to Fortigates. + - This module controls the creation/edit/delete/assign of these packages. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + choices: ['add', 'set', 'delete'] + default: add + + name: + description: + - Name of the FortiManager package or folder. + required: True + + object_type: + description: + - Are we managing packages or folders, or installing packages? + required: True + choices: ['pkg','folder','install'] + + package_folder: + description: + - Name of the folder you want to put the package into. + required: false + + central_nat: + description: + - Central NAT setting. + required: false + choices: ['enable', 'disable'] + default: disable + + fwpolicy_implicit_log: + description: + - Implicit Log setting for all IPv4 policies in package. + required: false + choices: ['enable', 'disable'] + default: disable + + fwpolicy6_implicit_log: + description: + - Implicit Log setting for all IPv6 policies in package. + required: false + choices: ['enable', 'disable'] + default: disable + + inspection_mode: + description: + - Inspection mode setting for the policies flow or proxy. + required: false + choices: ['flow', 'proxy'] + default: flow + + ngfw_mode: + description: + - NGFW mode setting for the policies flow or proxy. + required: false + choices: ['profile-based', 'policy-based'] + default: profile-based + + ssl_ssh_profile: + description: + - if policy-based ngfw-mode, refer to firewall ssl-ssh-profile. + required: false + + scope_members: + description: + - The devices or scope that you want to assign this policy package to. + required: false + + scope_members_vdom: + description: + - The members VDOM you want to assign the package to. + required: false + default: root + + parent_folder: + description: + - The parent folder name you want to add this object under. + required: false + +''' + + +EXAMPLES = ''' +- name: CREATE BASIC POLICY PACKAGE + fmgr_fwpol_package: + adom: "ansible" + mode: "add" + name: "testPackage" + object_type: "pkg" + +- name: ADD PACKAGE WITH TARGETS + fmgr_fwpol_package: + mode: "add" + adom: "ansible" + name: "ansibleTestPackage1" + object_type: "pkg" + inspection_mode: "flow" + ngfw_mode: "profile-based" + scope_members: "seattle-fgt02, seattle-fgt03" + +- name: ADD FOLDER + fmgr_fwpol_package: + mode: "add" + adom: "ansible" + name: "ansibleTestFolder1" + object_type: "folder" + +- name: ADD PACKAGE INTO PARENT FOLDER + fmgr_fwpol_package: + mode: "set" + adom: "ansible" + name: "ansibleTestPackage2" + object_type: "pkg" + parent_folder: "ansibleTestFolder1" + +- name: ADD FOLDER INTO PARENT FOLDER + fmgr_fwpol_package: + mode: "set" + adom: "ansible" + name: "ansibleTestFolder2" + object_type: "folder" + parent_folder: "ansibleTestFolder1" + +- name: INSTALL PACKAGE + fmgr_fwpol_package: + mode: "set" + adom: "ansible" + name: "ansibleTestPackage1" + object_type: "install" + scope_members: "seattle-fgt03, seattle-fgt02" + +- name: REMOVE PACKAGE + fmgr_fwpol_package: + mode: "delete" + adom: "ansible" + name: "ansibleTestPackage1" + object_type: "pkg" + +- name: REMOVE NESTED PACKAGE + fmgr_fwpol_package: + mode: "delete" + adom: "ansible" + name: "ansibleTestPackage2" + object_type: "pkg" + parent_folder: "ansibleTestFolder1" + +- name: REMOVE NESTED FOLDER + fmgr_fwpol_package: + mode: "delete" + adom: "ansible" + name: "ansibleTestFolder2" + object_type: "folder" + parent_folder: "ansibleTestFolder1" + +- name: REMOVE FOLDER + fmgr_fwpol_package: + mode: "delete" + adom: "ansible" + name: "ansibleTestFolder1" + object_type: "folder" +''' +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods + + +def fmgr_fwpol_package(fmgr, paramgram): + """ + This function will create FMGR Firewall Policy Packages, or delete them. It is also capable of assigning packages. + This function DOES NOT install the package. See the function fmgr_fwpol_package_install() + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + if paramgram["mode"] in ['set', 'add']: + url = '/pm/pkg/adom/{adom}'.format(adom=paramgram["adom"]) + members_list = [] + + # CHECK FOR SCOPE MEMBERS AND CREATE THAT DICT + if paramgram["scope_members"] is not None: + members = FMGRCommon.split_comma_strings_into_lists(paramgram["scope_members"]) + for member in members: + scope_dict = { + "name": member, + "vdom": paramgram["scope_members_vdom"], + } + members_list.append(scope_dict) + + # IF PARENT FOLDER IS NOT DEFINED + if paramgram["parent_folder"] is None: + datagram = { + "type": paramgram["object_type"], + "name": paramgram["name"], + "scope member": members_list, + "package settings": { + "central-nat": paramgram["central-nat"], + "fwpolicy-implicit-log": paramgram["fwpolicy-implicit-log"], + "fwpolicy6-implicit-log": paramgram["fwpolicy6-implicit-log"], + "inspection-mode": paramgram["inspection-mode"], + "ngfw-mode": paramgram["ngfw-mode"], + } + } + + if paramgram["ngfw-mode"] == "policy-based" and paramgram["ssl-ssh-profile"] is not None: + datagram["package settings"]["ssl-ssh-profile"] = paramgram["ssl-ssh-profile"] + + # IF PARENT FOLDER IS DEFINED + if paramgram["parent_folder"] is not None: + datagram = { + "type": "folder", + "name": paramgram["parent_folder"], + "subobj": [{ + "name": paramgram["name"], + "scope member": members_list, + "type": "pkg", + "package settings": { + "central-nat": paramgram["central-nat"], + "fwpolicy-implicit-log": paramgram["fwpolicy-implicit-log"], + "fwpolicy6-implicit-log": paramgram["fwpolicy6-implicit-log"], + "inspection-mode": paramgram["inspection-mode"], + "ngfw-mode": paramgram["ngfw-mode"], + } + }] + } + + # NORMAL DELETE NO PARENT + if paramgram["mode"] == "delete" and paramgram["parent_folder"] is None: + datagram = { + "name": paramgram["name"] + } + # SET DELETE URL + url = '/pm/pkg/adom/{adom}/{name}'.format(adom=paramgram["adom"], name=paramgram["name"]) + + # DELETE WITH PARENT + if paramgram["mode"] == "delete" and paramgram["parent_folder"] is not None: + datagram = { + "name": paramgram["name"] + } + # SET DELETE URL + url = '/pm/pkg/adom/{adom}/{parent_folder}/{name}'.format(adom=paramgram["adom"], + name=paramgram["name"], + parent_folder=paramgram["parent_folder"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def fmgr_fwpol_package_folder(fmgr, paramgram): + """ + This function will create folders for firewall packages. It can create down to two levels deep. + We haven't yet tested for any more layers below two levels. + parent_folders for multiple levels may need to defined as "level1/level2/level3" for the URL parameters and such. + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + if paramgram["mode"] in ['set', 'add']: + url = '/pm/pkg/adom/{adom}'.format(adom=paramgram["adom"]) + # IF PARENT FOLDER IS NOT DEFINED + if paramgram["parent_folder"] is None: + datagram = { + "type": paramgram["object_type"], + "name": paramgram["name"], + } + + # IF PARENT FOLDER IS DEFINED + if paramgram["parent_folder"] is not None: + datagram = { + "type": paramgram["object_type"], + "name": paramgram["parent_folder"], + "subobj": [{ + "name": paramgram["name"], + "type": paramgram["object_type"], + + }] + } + # NORMAL DELETE NO PARENT + if paramgram["mode"] == "delete" and paramgram["parent_folder"] is None: + datagram = { + "name": paramgram["name"] + } + # SET DELETE URL + url = '/pm/pkg/adom/{adom}/{name}'.format(adom=paramgram["adom"], name=paramgram["name"]) + + # DELETE WITH PARENT + if paramgram["mode"] == "delete" and paramgram["parent_folder"] is not None: + datagram = { + "name": paramgram["name"] + } + # SET DELETE URL + url = '/pm/pkg/adom/{adom}/{parent_folder}/{name}'.format(adom=paramgram["adom"], + name=paramgram["name"], + parent_folder=paramgram["parent_folder"]) + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +def fmgr_fwpol_package_install(fmgr, paramgram): + """ + This method/function installs FMGR FW Policy Packages to the scope members defined in the playbook. + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + # INIT BLANK MEMBERS LIST + members_list = [] + # USE THE PARSE CSV FUNCTION TO GET A LIST FORMAT OF THE MEMBERS + members = FMGRCommon.split_comma_strings_into_lists(paramgram["scope_members"]) + # USE THAT LIST TO BUILD THE DICTIONARIES NEEDED, AND ADD TO THE BLANK MEMBERS LIST + for member in members: + scope_dict = { + "name": member, + "vdom": paramgram["scope_members_vdom"], + } + members_list.append(scope_dict) + # THEN FOR THE DATAGRAM, USING THE MEMBERS LIST CREATED ABOVE + datagram = { + "adom": paramgram["adom"], + "pkg": paramgram["name"], + "scope": members_list + } + # EXECUTE THE INSTALL REQUEST + url = '/securityconsole/install/package' + response = fmgr.process_request(url, datagram, FMGRMethods.EXEC) + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + mode=dict(choices=["add", "set", "delete"], type="str", default="add"), + + name=dict(required=False, type="str"), + object_type=dict(required=True, type="str", choices=['pkg', 'folder', 'install']), + package_folder=dict(required=False, type="str"), + central_nat=dict(required=False, type="str", default="disable", choices=['enable', 'disable']), + fwpolicy_implicit_log=dict(required=False, type="str", default="disable", choices=['enable', 'disable']), + fwpolicy6_implicit_log=dict(required=False, type="str", default="disable", choices=['enable', 'disable']), + inspection_mode=dict(required=False, type="str", default="flow", choices=['flow', 'proxy']), + ngfw_mode=dict(required=False, type="str", default="profile-based", choices=['profile-based', 'policy-based']), + ssl_ssh_profile=dict(required=False, type="str"), + scope_members=dict(required=False, type="str"), + scope_members_vdom=dict(required=False, type="str", default="root"), + parent_folder=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE DATAGRAM + paramgram = { + "adom": module.params["adom"], + "name": module.params["name"], + "mode": module.params["mode"], + "object_type": module.params["object_type"], + "package-folder": module.params["package_folder"], + "central-nat": module.params["central_nat"], + "fwpolicy-implicit-log": module.params["fwpolicy_implicit_log"], + "fwpolicy6-implicit-log": module.params["fwpolicy6_implicit_log"], + "inspection-mode": module.params["inspection_mode"], + "ngfw-mode": module.params["ngfw_mode"], + "ssl-ssh-profile": module.params["ssl_ssh_profile"], + "scope_members": module.params["scope_members"], + "scope_members_vdom": module.params["scope_members_vdom"], + "parent_folder": module.params["parent_folder"], + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + # BEGIN MODULE-SPECIFIC LOGIC -- THINGS NEED TO HAPPEN DEPENDING ON THE ENDPOINT AND OPERATION + results = DEFAULT_RESULT_OBJ + + try: + if paramgram["object_type"] == "pkg": + results = fmgr_fwpol_package(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF THE object_type IS FOLDER LETS RUN THAT METHOD + if paramgram["object_type"] == "folder": + results = fmgr_fwpol_package_folder(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF THE object_type IS INSTALL AND NEEDED PARAMETERS ARE DEFINED INSTALL THE PACKAGE + if paramgram["scope_members"] is not None and paramgram["name"] is not None and\ + paramgram["object_type"] == "install": + results = fmgr_fwpol_package_install(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_ha.py b/plugins/modules/network/fortimanager/fmgr_ha.py new file mode 100644 index 0000000000..81c7f75af3 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_ha.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_ha +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manages the High-Availability State of FortiManager Clusters and Nodes. +description: Change HA state or settings of FortiManager nodes (Standalone/Master/Slave). + +options: + fmgr_ha_mode: + description: + - Sets the role of the FortiManager host for HA. + required: false + choices: ["standalone", "master", "slave"] + + fmgr_ha_peer_ipv4: + description: + - Sets the IPv4 address of a HA peer. + required: false + + fmgr_ha_peer_ipv6: + description: + - Sets the IPv6 address of a HA peer. + required: false + + fmgr_ha_peer_sn: + description: + - Sets the HA Peer Serial Number. + required: false + + fmgr_ha_peer_status: + description: + - Sets the peer status to enable or disable. + required: false + choices: ["enable", "disable"] + + fmgr_ha_cluster_pw: + description: + - Sets the password for the HA cluster. Only required once. System remembers between HA mode switches. + required: false + + fmgr_ha_cluster_id: + description: + - Sets the ID number of the HA cluster. Defaults to 1. + required: false + default: 1 + + fmgr_ha_hb_threshold: + description: + - Sets heartbeat lost threshold (1-255). + required: false + default: 3 + + fmgr_ha_hb_interval: + description: + - Sets the heartbeat interval (1-255). + required: false + default: 5 + + fmgr_ha_file_quota: + description: + - Sets the File quota in MB (2048-20480). + required: false + default: 4096 +''' + + +EXAMPLES = ''' +- name: SET FORTIMANAGER HA NODE TO MASTER + fmgr_ha: + fmgr_ha_mode: "master" + fmgr_ha_cluster_pw: "fortinet" + fmgr_ha_cluster_id: "1" + +- name: SET FORTIMANAGER HA NODE TO SLAVE + fmgr_ha: + fmgr_ha_mode: "slave" + fmgr_ha_cluster_pw: "fortinet" + fmgr_ha_cluster_id: "1" + +- name: SET FORTIMANAGER HA NODE TO STANDALONE + fmgr_ha: + fmgr_ha_mode: "standalone" + +- name: ADD FORTIMANAGER HA PEER + fmgr_ha: + fmgr_ha_peer_ipv4: "192.168.1.254" + fmgr_ha_peer_sn: "FMG-VM1234567890" + fmgr_ha_peer_status: "enable" + +- name: CREATE CLUSTER ON MASTER + fmgr_ha: + fmgr_ha_mode: "master" + fmgr_ha_cluster_pw: "fortinet" + fmgr_ha_cluster_id: "1" + fmgr_ha_hb_threshold: "10" + fmgr_ha_hb_interval: "15" + fmgr_ha_file_quota: "2048" +''' +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def fmgr_set_ha_mode(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + if paramgram["fmgr_ha_cluster_pw"] is not None and str(paramgram["fmgr_ha_mode"].lower()) != "standalone": + datagram = { + "mode": paramgram["fmgr_ha_mode"], + "file-quota": paramgram["fmgr_ha_file_quota"], + "hb-interval": paramgram["fmgr_ha_hb_interval"], + "hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"], + "password": paramgram["fmgr_ha_cluster_pw"], + "clusterid": paramgram["fmgr_ha_cluster_id"] + } + elif str(paramgram["fmgr_ha_mode"].lower()) == "standalone": + datagram = { + "mode": paramgram["fmgr_ha_mode"], + "file-quota": paramgram["fmgr_ha_file_quota"], + "hb-interval": paramgram["fmgr_ha_hb_interval"], + "hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"], + "clusterid": paramgram["fmgr_ha_cluster_id"] + } + + url = '/cli/global/system/ha' + response = fmgr.process_request(url, datagram, FMGRMethods.SET) + return response + + +def fmgr_get_ha_peer_list(fmgr): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + + datagram = {} + paramgram = {} + + url = '/cli/global/system/ha/peer/' + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + return response + + +def fmgr_set_ha_peer(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + datagram = { + "ip": paramgram["fmgr_ha_peer_ipv4"], + "ip6": paramgram["fmgr_ha_peer_ipv6"], + "serial-number": paramgram["fmgr_ha_peer_sn"], + "status": paramgram["fmgr_ha_peer_status"], + "id": paramgram["peer_id"] + } + + url = '/cli/global/system/ha/peer/' + response = fmgr.process_request(url, datagram, FMGRMethods.SET) + return response + + +def main(): + argument_spec = dict( + fmgr_ha_mode=dict(required=False, type="str", choices=["standalone", "master", "slave"]), + fmgr_ha_cluster_pw=dict(required=False, type="str", no_log=True), + fmgr_ha_peer_status=dict(required=False, type="str", choices=["enable", "disable"]), + fmgr_ha_peer_sn=dict(required=False, type="str"), + fmgr_ha_peer_ipv4=dict(required=False, type="str"), + fmgr_ha_peer_ipv6=dict(required=False, type="str"), + fmgr_ha_hb_threshold=dict(required=False, type="int", default=3), + fmgr_ha_hb_interval=dict(required=False, type="int", default=5), + fmgr_ha_file_quota=dict(required=False, type="int", default=4096), + fmgr_ha_cluster_id=dict(required=False, type="int", default=1) + ) + + required_if = [ + ['fmgr_ha_peer_ipv4', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']], + ['fmgr_ha_peer_ipv6', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']], + ['fmgr_ha_mode', 'master', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']], + ['fmgr_ha_mode', 'slave', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']], + ] + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_if=required_if) + paramgram = { + "fmgr_ha_mode": module.params["fmgr_ha_mode"], + "fmgr_ha_cluster_pw": module.params["fmgr_ha_cluster_pw"], + "fmgr_ha_peer_status": module.params["fmgr_ha_peer_status"], + "fmgr_ha_peer_sn": module.params["fmgr_ha_peer_sn"], + "fmgr_ha_peer_ipv4": module.params["fmgr_ha_peer_ipv4"], + "fmgr_ha_peer_ipv6": module.params["fmgr_ha_peer_ipv6"], + "fmgr_ha_hb_threshold": module.params["fmgr_ha_hb_threshold"], + "fmgr_ha_hb_interval": module.params["fmgr_ha_hb_interval"], + "fmgr_ha_file_quota": module.params["fmgr_ha_file_quota"], + "fmgr_ha_cluster_id": module.params["fmgr_ha_cluster_id"], + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + # INIT FLAGS AND COUNTERS + get_ha_peers = 0 + results = DEFAULT_RESULT_OBJ + try: + if any(v is not None for v in (paramgram["fmgr_ha_peer_sn"], paramgram["fmgr_ha_peer_ipv4"], + paramgram["fmgr_ha_peer_ipv6"], paramgram["fmgr_ha_peer_status"])): + get_ha_peers = 1 + except Exception as err: + raise FMGBaseException(err) + try: + # IF HA MODE IS NOT NULL, SWITCH THAT + if paramgram["fmgr_ha_mode"] is not None: + if (str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and paramgram["fmgr_ha_cluster_pw"] is not None)\ + or str.lower(paramgram["fmgr_ha_mode"]) == "standalone": + results = fmgr_set_ha_mode(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, stop_on_success=False, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + elif str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and\ + paramgram["fmgr_ha_mode"] is not None and\ + paramgram["fmgr_ha_cluster_pw"] is None: + module.exit_json(msg="If setting HA Mode of MASTER or SLAVE, you must specify a cluster password") + + except Exception as err: + raise FMGBaseException(err) + # IF GET_HA_PEERS IS ENABLED, LETS PROCESS THE PEERS + try: + if get_ha_peers == 1: + # GET THE CURRENT LIST OF PEERS FROM THE NODE + peers = fmgr_get_ha_peer_list(fmgr) + # GET LENGTH OF RETURNED PEERS LIST AND ADD ONE FOR THE NEXT ID + paramgram["next_peer_id"] = len(peers[1]) + 1 + # SET THE ACTUAL NUMBER OF PEERS + num_of_peers = len(peers[1]) + # SET THE PEER ID FOR DISABLE METHOD + paramgram["peer_id"] = len(peers) - 1 + # SET THE PEER LOOPCOUNT TO 1 TO START THE LOOP + peer_loopcount = 1 + + # LOOP THROUGH PEERS TO FIND THE SERIAL NUMBER MATCH TO GET THE RIGHT PEER ID + # IDEA BEING WE DON'T WANT TO SUBMIT A BAD peer_id THAT DOESN'T JIVE WITH CURRENT DB ON FMG + # SO LETS SEARCH FOR IT, AND IF WE FIND IT, WE WILL CHANGE THE PEER ID VARIABLES TO MATCH + # IF NOT FOUND, LIFE GOES ON AND WE ASSUME THAT WE'RE ADDING A PEER + # AT WHICH POINT THE next_peer_id VARIABLE WILL HAVE THE RIGHT PRIMARY KEY + + if paramgram["fmgr_ha_peer_sn"] is not None: + while peer_loopcount <= num_of_peers: + # GET THE SERIAL NUMBER FOR CURRENT PEER IN LOOP TO COMPARE TO SN IN PLAYBOOK + try: + sn_compare = peers[1][peer_loopcount - 1]["serial-number"] + # IF THE SN IN THE PEERS MATCHES THE PLAYBOOK SN, SET THE IDS + if sn_compare == paramgram["fmgr_ha_peer_sn"]: + paramgram["peer_id"] = peer_loopcount + paramgram["next_peer_id"] = paramgram["peer_id"] + except Exception as err: + raise FMGBaseException(err) + # ADVANCE THE LOOP AND REPEAT UNTIL DONE + peer_loopcount += 1 + + # IF THE PEER STATUS ISN'T IN THE PLAYBOOK, ASSUME ITS ENABLE + if paramgram["fmgr_ha_peer_status"] is None: + paramgram["fmgr_ha_peer_status"] = "enable" + + # IF THE PEER STATUS IS ENABLE, USE THE next_peer_id IN THE API CALL FOR THE ID + if paramgram["fmgr_ha_peer_status"] == "enable": + results = fmgr_set_ha_peer(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, stop_on_success=True, + ansible_facts=fmgr.construct_ansible_facts(results, + module.params, paramgram)) + + # IF THE PEER STATUS IS DISABLE, WE HAVE TO HANDLE THAT A BIT DIFFERENTLY + # JUST USING TWO DIFFERENT peer_id 's HERE + if paramgram["fmgr_ha_peer_status"] == "disable": + results = fmgr_set_ha_peer(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, stop_on_success=True, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_provisioning.py b/plugins/modules/network/fortimanager/fmgr_provisioning.py new file mode 100644 index 0000000000..14fe81837e --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_provisioning.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_provisioning +author: Andrew Welsh (@Ghilli3) +short_description: Provision devices via FortiMananger +description: + - Add model devices on the FortiManager using jsonrpc API and have them pre-configured, + so when central management is configured, the configuration is pushed down to the + registering devices + +options: + adom: + description: + - The administrative domain (admon) the configuration belongs to + required: true + vdom: + description: + - The virtual domain (vdom) the configuration belongs to + host: + description: + - The FortiManager's Address. + required: true + username: + description: + - The username to log into the FortiManager + required: true + password: + description: + - The password associated with the username account. + required: false + + policy_package: + description: + - The name of the policy package to be assigned to the device. + required: True + name: + description: + - The name of the device to be provisioned. + required: True + group: + description: + - The name of the device group the provisioned device can belong to. + required: False + serial: + description: + - The serial number of the device that will be provisioned. + required: True + platform: + description: + - The platform of the device, such as model number or VM. + required: True + description: + description: + - Description of the device to be provisioned. + required: False + os_version: + description: + - The Fortinet OS version to be used for the device, such as 5.0 or 6.0. + required: True + minor_release: + description: + - The minor release number such as 6.X.1, as X being the minor release. + required: False + patch_release: + description: + - The patch release number such as 6.0.X, as X being the patch release. + required: False + os_type: + description: + - The Fortinet OS type to be pushed to the device, such as 'FOS' for FortiOS. + required: True +''' + +EXAMPLES = ''' +- name: Create FGT1 Model Device + fmgr_provisioning: + host: "{{ inventory_hostname }}" + username: "{{ username }}" + password: "{{ password }}" + adom: "root" + vdom: "root" + policy_package: "default" + name: "FGT1" + group: "Ansible" + serial: "FGVM000000117994" + platform: "FortiGate-VM64" + description: "Provisioned by Ansible" + os_version: '6.0' + minor_release: 0 + patch_release: 0 + os_type: 'fos' + + +- name: Create FGT2 Model Device + fmgr_provisioning: + host: "{{ inventory_hostname }}" + username: "{{ username }}" + password: "{{ password }}" + adom: "root" + vdom: "root" + policy_package: "test_pp" + name: "FGT2" + group: "Ansible" + serial: "FGVM000000117992" + platform: "FortiGate-VM64" + description: "Provisioned by Ansible" + os_version: '5.0' + minor_release: 6 + patch_release: 0 + os_type: 'fos' + +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager + +# check for pyFMG lib +try: + from pyFMG.fortimgr import FortiManager + HAS_PYFMGR = True +except ImportError: + HAS_PYFMGR = False + + +def dev_group_exists(fmg, dev_grp_name, adom): + datagram = { + 'adom': adom, + 'name': dev_grp_name, + } + + url = '/dvmdb/adom/{adom}/group/{dev_grp_name}'.format(adom=adom, dev_grp_name=dev_grp_name) + response = fmg.get(url, datagram) + return response + + +def prov_template_exists(fmg, prov_template, adom, vdom): + datagram = { + 'name': prov_template, + 'adom': adom, + } + + url = '/pm/devprof/adom/{adom}/devprof/{name}'.format(adom=adom, name=prov_template) + response = fmg.get(url, datagram) + return response + + +def create_model_device(fmg, name, serial, group, platform, os_version, + os_type, minor_release, patch_release=0, adom='root'): + datagram = { + 'adom': adom, + 'flags': ['create_task', 'nonblocking'], + 'groups': [{'name': group, 'vdom': 'root'}], + 'device': { + 'mr': minor_release, + 'name': name, + 'sn': serial, + 'mgmt_mode': 'fmg', + 'device action': 'add_model', + 'platform_str': platform, + 'os_ver': os_version, + 'os_type': os_type, + 'patch': patch_release, + 'desc': 'Provisioned by Ansible', + } + } + + url = '/dvm/cmd/add/device' + response = fmg.execute(url, datagram) + return response + + +def update_flags(fmg, name): + datagram = { + 'flags': ['is_model', 'linked_to_model'] + } + url = 'dvmdb/device/{name}'.format(name=name) + response = fmg.update(url, datagram) + return response + + +def assign_provision_template(fmg, template, adom, target): + datagram = { + 'name': template, + 'type': 'devprof', + 'description': 'Provisioned by Ansible', + 'scope member': [{'name': target}] + } + url = "/pm/devprof/adom/{adom}".format(adom=adom) + response = fmg.update(url, datagram) + return response + + +def set_devprof_scope(self, provisioning_template, adom, provision_targets): + """ + GET the DevProf (check to see if exists) + """ + fields = dict() + targets = [] + fields["name"] = provisioning_template + fields["type"] = "devprof" + fields["description"] = "CreatedByAnsible" + + for target in provision_targets.strip().split(","): + # split the host on the space to get the mask out + new_target = {"name": target} + targets.append(new_target) + + fields["scope member"] = targets + + body = {"method": "set", "params": [{"url": "/pm/devprof/adom/{adom}".format(adom=adom), + "data": fields, "session": self.session}]} + response = self.make_request(body).json() + return response + + +def assign_dev_grp(fmg, grp_name, device_name, vdom, adom): + datagram = { + 'name': device_name, + 'vdom': vdom, + } + url = "/dvmdb/adom/{adom}/group/{grp_name}/object member".format(adom=adom, grp_name=grp_name) + response = fmg.set(url, datagram) + return response + + +def update_install_target(fmg, device, pp='default', vdom='root', adom='root'): + datagram = { + 'scope member': [{'name': device, 'vdom': vdom}], + 'type': 'pkg' + } + url = '/pm/pkg/adom/{adom}/{pkg_name}'.format(adom=adom, pkg_name=pp) + response = fmg.update(url, datagram) + return response + + +def install_pp(fmg, device, pp='default', vdom='root', adom='root'): + datagram = { + 'adom': adom, + 'flags': 'nonblocking', + 'pkg': pp, + 'scope': [{'name': device, 'vdom': vdom}], + } + url = 'securityconsole/install/package' + response = fmg.execute(url, datagram) + return response + + +def main(): + + argument_spec = dict( + adom=dict(required=False, type="str"), + vdom=dict(required=False, type="str"), + host=dict(required=True, type="str"), + password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True), + username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"]), no_log=True), + + policy_package=dict(required=False, type="str"), + name=dict(required=False, type="str"), + group=dict(required=False, type="str"), + serial=dict(required=True, type="str"), + platform=dict(required=True, type="str"), + description=dict(required=False, type="str"), + os_version=dict(required=True, type="str"), + minor_release=dict(required=False, type="str"), + patch_release=dict(required=False, type="str"), + os_type=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True, ) + + # check if params are set + if module.params["host"] is None or module.params["username"] is None: + module.fail_json(msg="Host and username are required for connection") + + # check if login failed + fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"]) + response = fmg.login() + + if "FortiManager instance connnected" not in str(response): + module.fail_json(msg="Connection to FortiManager Failed") + else: + + if module.params["policy_package"] is None: + module.params["policy_package"] = 'default' + if module.params["adom"] is None: + module.params["adom"] = 'root' + if module.params["vdom"] is None: + module.params["vdom"] = 'root' + if module.params["platform"] is None: + module.params["platform"] = 'FortiGate-VM64' + if module.params["os_type"] is None: + module.params["os_type"] = 'fos' + + results = create_model_device(fmg, + module.params["name"], + module.params["serial"], + module.params["group"], + module.params["platform"], + module.params["os_ver"], + module.params["os_type"], + module.params["minor_release"], + module.params["patch_release"], + module.params["adom"]) + if results[0] != 0: + module.fail_json(msg="Create model failed", **results) + + results = update_flags(fmg, module.params["name"]) + if results[0] != 0: + module.fail_json(msg="Update device flags failed", **results) + + # results = assign_dev_grp(fmg, 'Ansible', 'FGVM000000117992', 'root', 'root') + # if not results[0] == 0: + # module.fail_json(msg="Setting device group failed", **results) + + results = update_install_target(fmg, module.params["name"], module.params["policy_package"]) + if results[0] != 0: + module.fail_json(msg="Adding device target to package failed", **results) + + results = install_pp(fmg, module.params["name"], module.params["policy_package"]) + if results[0] != 0: + module.fail_json(msg="Installing policy package failed", **results) + + fmg.logout() + + # results is returned as a tuple + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_query.py b/plugins/modules/network/fortimanager/fmgr_query.py new file mode 100644 index 0000000000..f123b8436e --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_query.py @@ -0,0 +1,430 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community" +} + +DOCUMENTATION = ''' +--- +module: fmgr_query +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: Luke Weighall (@lweighall) +short_description: Query FortiManager data objects for use in Ansible workflows. +description: + - Provides information on data objects within FortiManager so that playbooks can perform conditionals. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + object: + description: + - The data object we wish to query (device, package, rule, etc). Will expand choices as improves. + required: true + choices: + - device + - cluster_nodes + - task + - custom + + custom_endpoint: + description: + - ADVANCED USERS ONLY! REQUIRES KNOWLEDGE OF FMGR JSON API! + - The HTTP Endpoint on FortiManager you wish to GET from. + required: false + + custom_dict: + description: + - ADVANCED USERS ONLY! REQUIRES KNOWLEDGE OF FMGR JSON API! + - DICTIONARY JSON FORMAT ONLY -- Custom dictionary/datagram to send to the endpoint. + required: false + + device_ip: + description: + - The IP of the device you want to query. + required: false + + device_unique_name: + description: + - The desired "friendly" name of the device you want to query. + required: false + + device_serial: + description: + - The serial number of the device you want to query. + required: false + + task_id: + description: + - The ID of the task you wish to query status on. If left blank and object = 'task' a list of tasks are returned. + required: false + + nodes: + description: + - A LIST of firewalls in the cluster you want to verify i.e. ["firewall_A","firewall_B"]. + required: false +''' + + +EXAMPLES = ''' +- name: QUERY FORTIGATE DEVICE BY IP + fmgr_query: + object: "device" + adom: "ansible" + device_ip: "10.7.220.41" + +- name: QUERY FORTIGATE DEVICE BY SERIAL + fmgr_query: + adom: "ansible" + object: "device" + device_serial: "FGVM000000117992" + +- name: QUERY FORTIGATE DEVICE BY FRIENDLY NAME + fmgr_query: + adom: "ansible" + object: "device" + device_unique_name: "ansible-fgt01" + +- name: VERIFY CLUSTER MEMBERS AND STATUS + fmgr_query: + adom: "ansible" + object: "cluster_nodes" + device_unique_name: "fgt-cluster01" + nodes: ["ansible-fgt01", "ansible-fgt02", "ansible-fgt03"] + +- name: GET STATUS OF TASK ID + fmgr_query: + adom: "ansible" + object: "task" + task_id: "3" + +- name: USE CUSTOM TYPE TO QUERY AVAILABLE SCRIPTS + fmgr_query: + adom: "ansible" + object: "custom" + custom_endpoint: "/dvmdb/adom/ansible/script" + custom_dict: { "type": "cli" } +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def fmgr_get_custom(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # IF THE CUSTOM DICTIONARY (OFTEN CONTAINING FILTERS) IS DEFINED CREATED THAT + if paramgram["custom_dict"] is not None: + datagram = paramgram["custom_dict"] + else: + datagram = dict() + + # SET THE CUSTOM ENDPOINT PROVIDED + url = paramgram["custom_endpoint"] + # MAKE THE CALL AND RETURN RESULTS + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + return response + + +def fmgr_get_task_status(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # IF THE TASK_ID IS DEFINED, THEN GET THAT SPECIFIC TASK + # OTHERWISE, GET ALL RECENT TASKS IN A LIST + if paramgram["task_id"] is not None: + + datagram = { + "adom": paramgram["adom"] + } + url = '/task/task/{task_id}'.format(task_id=paramgram["task_id"]) + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + else: + datagram = { + "adom": paramgram["adom"] + } + url = '/task/task' + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + return response + + +def fmgr_get_device(fmgr, paramgram): + """ + This method is used to get information on devices. This will not work on HA_SLAVE nodes, only top level devices. + Such as cluster objects and standalone devices. + + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + # FIRST TRY TO RUN AN UPDATE ON THE DEVICE + # RUN A QUICK CLUSTER REFRESH/UPDATE ATTEMPT TO ENSURE WE'RE GETTING THE LATEST INFORMOATION + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + update_url = '/dvm/cmd/update/device' + update_dict = { + "adom": paramgram['adom'], + "device": paramgram['device_unique_name'], + "flags": "create_task" + } + # DO THE UPDATE CALL + fmgr.process_request(update_url, update_dict, FMGRMethods.EXEC) + + # SET THE URL + url = '/dvmdb/adom/{adom}/device'.format(adom=paramgram["adom"]) + device_found = 0 + response = [] + + # TRY TO FIND IT FIRST BY SERIAL NUMBER + if paramgram["device_serial"] is not None: + datagram = { + "filter": ["sn", "==", paramgram["device_serial"]] + } + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + if len(response[1]) >= 0: + device_found = 1 + + # CHECK IF ANYTHING WAS RETURNED, IF NOT TRY DEVICE NAME PARAMETER + if device_found == 0 and paramgram["device_unique_name"] is not None: + datagram = { + "filter": ["name", "==", paramgram["device_unique_name"]] + } + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + if len(response[1]) >= 0: + device_found = 1 + + # CHECK IF ANYTHING WAS RETURNED, IF NOT TRY DEVICE IP ADDRESS + if device_found == 0 and paramgram["device_ip"] is not None: + datagram = { + "filter": ["ip", "==", paramgram["device_ip"]] + } + response = fmgr.process_request(url, datagram, FMGRMethods.GET) + if len(response[1]) >= 0: + device_found = 1 + + return response + + +def fmgr_get_cluster_nodes(fmgr, paramgram): + """ + This method is used to get information on devices. This WILL work on HA_SLAVE nodes, but NOT top level standalone + devices. + Such as cluster objects and standalone devices. + + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + # USE THE DEVICE METHOD TO GET THE CLUSTER INFORMATION SO WE CAN SEE THE HA_SLAVE NODES + response = fmgr_get_device(fmgr, paramgram) + # CHECK FOR HA_SLAVE NODES, IF CLUSTER IS MISSING COMPLETELY THEN QUIT + try: + returned_nodes = response[1][0]["ha_slave"] + num_of_nodes = len(returned_nodes) + except Exception: + error_msg = {"cluster_status": "MISSING"} + return error_msg + + # INIT LOOP RESOURCES + loop_count = 0 + good_nodes = [] + expected_nodes = list(paramgram["nodes"]) + missing_nodes = list(paramgram["nodes"]) + bad_status_nodes = [] + + # LOOP THROUGH THE NODES AND GET THEIR STATUS TO BUILD THE RETURN JSON OBJECT + # WE'RE ALSO CHECKING THE NODES IF THEY ARE BAD STATUS, OR PLAIN MISSING + while loop_count < num_of_nodes: + node_append = { + "node_name": returned_nodes[loop_count]["name"], + "node_serial": returned_nodes[loop_count]["sn"], + "node_parent": returned_nodes[loop_count]["did"], + "node_status": returned_nodes[loop_count]["status"], + } + # IF THE NODE IS IN THE EXPECTED NODES LIST AND WORKING THEN ADD IT TO GOOD NODES LIST + if node_append["node_name"] in expected_nodes and node_append["node_status"] == 1: + good_nodes.append(node_append["node_name"]) + # IF THE NODE IS IN THE EXPECTED NODES LIST BUT NOT WORKING THEN ADDED IT TO BAD_STATUS_NODES + # IF THE NODE STATUS IS NOT 1 THEN ITS BAD + if node_append["node_name"] in expected_nodes and node_append["node_status"] != 1: + bad_status_nodes.append(node_append["node_name"]) + # REMOVE THE NODE FROM MISSING NODES LIST IF NOTHING IS WRONG WITH NODE -- LEAVING US A LIST OF + # NOT WORKING NODES + missing_nodes.remove(node_append["node_name"]) + loop_count += 1 + + # BUILD RETURN OBJECT FROM NODE LISTS + nodes = { + "good_nodes": good_nodes, + "expected_nodes": expected_nodes, + "missing_nodes": missing_nodes, + "bad_nodes": bad_status_nodes, + "query_status": "good", + } + if len(nodes["good_nodes"]) == len(nodes["expected_nodes"]): + nodes["cluster_status"] = "OK" + else: + nodes["cluster_status"] = "NOT-COMPLIANT" + return nodes + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + object=dict(required=True, type="str", choices=["device", "cluster_nodes", "task", "custom"]), + custom_endpoint=dict(required=False, type="str"), + custom_dict=dict(required=False, type="dict"), + device_ip=dict(required=False, type="str"), + device_unique_name=dict(required=False, type="str"), + device_serial=dict(required=False, type="str"), + nodes=dict(required=False, type="list"), + task_id=dict(required=False, type="str") + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + paramgram = { + "adom": module.params["adom"], + "object": module.params["object"], + "device_ip": module.params["device_ip"], + "device_unique_name": module.params["device_unique_name"], + "device_serial": module.params["device_serial"], + "nodes": module.params["nodes"], + "task_id": module.params["task_id"], + "custom_endpoint": module.params["custom_endpoint"], + "custom_dict": module.params["custom_dict"] + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + results = DEFAULT_RESULT_OBJ + + try: + # IF OBJECT IS DEVICE + if paramgram["object"] == "device" and any(v is not None for v in [paramgram["device_unique_name"], + paramgram["device_serial"], + paramgram["device_ip"]]): + results = fmgr_get_device(fmgr, paramgram) + if results[0] not in [0]: + module.fail_json(msg="Device query failed!") + elif len(results[1]) == 0: + module.exit_json(msg="Device NOT FOUND!") + else: + module.exit_json(msg="Device Found", **results[1][0]) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF OBJECT IS CLUSTER_NODES + if paramgram["object"] == "cluster_nodes" and paramgram["nodes"] is not None: + results = fmgr_get_cluster_nodes(fmgr, paramgram) + if results["cluster_status"] == "MISSING": + module.exit_json(msg="No cluster device found!", **results) + elif results["query_status"] == "good": + module.exit_json(msg="Cluster Found - Showing Nodes", **results) + elif results is None: + module.fail_json(msg="Query FAILED -- Check module or playbook syntax") + except Exception as err: + raise FMGBaseException(err) + + try: + # IF OBJECT IS TASK + if paramgram["object"] == "task": + results = fmgr_get_task_status(fmgr, paramgram) + if results[0] != 0: + module.fail_json(**results[1]) + if results[0] == 0: + module.exit_json(**results[1]) + except Exception as err: + raise FMGBaseException(err) + + try: + # IF OBJECT IS CUSTOM + if paramgram["object"] == "custom": + results = fmgr_get_custom(fmgr, paramgram) + if results[0] != 0: + module.fail_json(msg="QUERY FAILED -- Please check syntax check JSON guide if needed.") + if results[0] == 0: + results_len = len(results[1]) + if results_len > 0: + results_combine = dict() + if isinstance(results[1], dict): + results_combine["results"] = results[1] + if isinstance(results[1], list): + results_combine["results"] = results[1][0:results_len] + module.exit_json(msg="Custom Query Success", **results_combine) + else: + module.exit_json(msg="NO RESULTS") + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_script.py b/plugins/modules/network/fortimanager/fmgr_script.py new file mode 100644 index 0000000000..6beef12a4d --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_script.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_script +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: Andrew Welsh (@Ghilli3) +short_description: Add/Edit/Delete and execute scripts +description: Create/edit/delete scripts and execute the scripts on the FortiManager using jsonrpc API + +options: + adom: + description: + - The administrative domain (admon) the configuration belongs to + required: true + + vdom: + description: + - The virtual domain (vdom) the configuration belongs to + + mode: + description: + - The desired mode of the specified object. Execute will run the script. + required: false + default: "add" + choices: ["add", "delete", "execute", "set"] + + script_name: + description: + - The name of the script. + required: True + + script_type: + description: + - The type of script (CLI or TCL). + required: false + + script_target: + description: + - The target of the script to be run. + required: false + + script_description: + description: + - The description of the script. + required: false + + script_content: + description: + - The script content that will be executed. + required: false + + script_scope: + description: + - (datasource) The devices that the script will run on, can have both device member and device group member. + required: false + + script_package: + description: + - (datasource) Policy package object to run the script against + required: false +''' + +EXAMPLES = ''' +- name: CREATE SCRIPT + fmgr_script: + adom: "root" + script_name: "TestScript" + script_type: "cli" + script_target: "remote_device" + script_description: "Create by Ansible" + script_content: "get system status" + +- name: EXECUTE SCRIPT + fmgr_script: + adom: "root" + script_name: "TestScript" + mode: "execute" + script_scope: "FGT1,FGT2" + +- name: DELETE SCRIPT + fmgr_script: + adom: "root" + script_name: "TestScript" + mode: "delete" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG + + +def set_script(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + datagram = { + 'content': paramgram["script_content"], + 'desc': paramgram["script_description"], + 'name': paramgram["script_name"], + 'target': paramgram["script_target"], + 'type': paramgram["script_type"], + } + + url = '/dvmdb/adom/{adom}/script/'.format(adom=paramgram["adom"]) + response = fmgr.process_request(url, datagram, FMGRMethods.SET) + return response + + +def delete_script(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + datagram = { + 'name': paramgram["script_name"], + } + + url = '/dvmdb/adom/{adom}/script/{script_name}'.format(adom=paramgram["adom"], script_name=paramgram["script_name"]) + response = fmgr.process_request(url, datagram, FMGRMethods.DELETE) + return response + + +def execute_script(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + scope_list = list() + scope = paramgram["script_scope"].replace(' ', '') + scope = scope.split(',') + for dev_name in scope: + scope_list.append({'name': dev_name, 'vdom': paramgram["vdom"]}) + + datagram = { + 'adom': paramgram["adom"], + 'script': paramgram["script_name"], + 'package': paramgram["script_package"], + 'scope': scope_list, + } + + url = '/dvmdb/adom/{adom}/script/execute'.format(adom=paramgram["adom"]) + response = fmgr.process_request(url, datagram, FMGRMethods.EXEC) + return response + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + vdom=dict(required=False, type="str", default="root"), + mode=dict(choices=["add", "execute", "set", "delete"], type="str", default="add"), + script_name=dict(required=True, type="str"), + script_type=dict(required=False, type="str"), + script_target=dict(required=False, type="str"), + script_description=dict(required=False, type="str"), + script_content=dict(required=False, type="str"), + script_scope=dict(required=False, type="str"), + script_package=dict(required=False, type="str"), + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + paramgram = { + "script_name": module.params["script_name"], + "script_type": module.params["script_type"], + "script_target": module.params["script_target"], + "script_description": module.params["script_description"], + "script_content": module.params["script_content"], + "script_scope": module.params["script_scope"], + "script_package": module.params["script_package"], + "adom": module.params["adom"], + "vdom": module.params["vdom"], + "mode": module.params["mode"], + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + results = DEFAULT_RESULT_OBJ + + try: + if paramgram["mode"] in ['add', 'set']: + results = set_script(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, msg="Operation Finished", + ansible_facts=fmgr.construct_ansible_facts(results, module.params, module.params)) + except Exception as err: + raise FMGBaseException(err) + + try: + if paramgram["mode"] == "execute": + results = execute_script(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, msg="Operation Finished", + ansible_facts=fmgr.construct_ansible_facts(results, module.params, module.params)) + except Exception as err: + raise FMGBaseException(err) + + try: + if paramgram["mode"] == "delete": + results = delete_script(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, msg="Operation Finished", + ansible_facts=fmgr.construct_ansible_facts(results, module.params, module.params)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_appctrl.py b/plugins/modules/network/fortimanager/fmgr_secprof_appctrl.py new file mode 100644 index 0000000000..918bff2490 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_appctrl.py @@ -0,0 +1,520 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_appctrl +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manage application control security profiles +description: + - Manage application control security profiles within FortiManager + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + unknown_application_log: + description: + - Enable/disable logging for unknown applications. + - choice | disable | Disable logging for unknown applications. + - choice | enable | Enable logging for unknown applications. + required: false + choices: ["disable", "enable"] + + unknown_application_action: + description: + - Pass or block traffic from unknown applications. + - choice | pass | Pass or allow unknown applications. + - choice | block | Drop or block unknown applications. + required: false + choices: ["pass", "block"] + + replacemsg_group: + description: + - Replacement message group. + required: false + + p2p_black_list: + description: + - NO DESCRIPTION PARSED ENTER MANUALLY + - FLAG Based Options. Specify multiple in list form. + - flag | skype | Skype. + - flag | edonkey | Edonkey. + - flag | bittorrent | Bit torrent. + required: false + choices: ["skype", "edonkey", "bittorrent"] + + other_application_log: + description: + - Enable/disable logging for other applications. + - choice | disable | Disable logging for other applications. + - choice | enable | Enable logging for other applications. + required: false + choices: ["disable", "enable"] + + other_application_action: + description: + - Action for other applications. + - choice | pass | Allow sessions matching an application in this application list. + - choice | block | Block sessions matching an application in this application list. + required: false + choices: ["pass", "block"] + + options: + description: + - NO DESCRIPTION PARSED ENTER MANUALLY + - FLAG Based Options. Specify multiple in list form. + - flag | allow-dns | Allow DNS. + - flag | allow-icmp | Allow ICMP. + - flag | allow-http | Allow generic HTTP web browsing. + - flag | allow-ssl | Allow generic SSL communication. + - flag | allow-quic | Allow QUIC. + required: false + choices: ["allow-dns", "allow-icmp", "allow-http", "allow-ssl", "allow-quic"] + + name: + description: + - List name. + required: false + + extended_log: + description: + - Enable/disable extended logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + deep_app_inspection: + description: + - Enable/disable deep application inspection. + - choice | disable | Disable deep application inspection. + - choice | enable | Enable deep application inspection. + required: false + choices: ["disable", "enable"] + + comment: + description: + - comments + required: false + + app_replacemsg: + description: + - Enable/disable replacement messages for blocked applications. + - choice | disable | Disable replacement messages for blocked applications. + - choice | enable | Enable replacement messages for blocked applications. + required: false + choices: ["disable", "enable"] + + entries: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, OMIT THE USE OF THIS PARAMETER + - AND USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + entries_action: + description: + - Pass or block traffic, or reset connection for traffic from this application. + - choice | pass | Pass or allow matching traffic. + - choice | block | Block or drop matching traffic. + - choice | reset | Reset sessions for matching traffic. + required: false + choices: ["pass", "block", "reset"] + + entries_application: + description: + - ID of allowed applications. + required: false + + entries_behavior: + description: + - Application behavior filter. + required: false + + entries_category: + description: + - Category ID list. + required: false + + entries_log: + description: + - Enable/disable logging for this application list. + - choice | disable | Disable logging. + - choice | enable | Enable logging. + required: false + choices: ["disable", "enable"] + + entries_log_packet: + description: + - Enable/disable packet logging. + - choice | disable | Disable packet logging. + - choice | enable | Enable packet logging. + required: false + choices: ["disable", "enable"] + + entries_per_ip_shaper: + description: + - Per-IP traffic shaper. + required: false + + entries_popularity: + description: + - Application popularity filter (1 - 5, from least to most popular). + - FLAG Based Options. Specify multiple in list form. + - flag | 1 | Popularity level 1. + - flag | 2 | Popularity level 2. + - flag | 3 | Popularity level 3. + - flag | 4 | Popularity level 4. + - flag | 5 | Popularity level 5. + required: false + choices: ["1", "2", "3", "4", "5"] + + entries_protocols: + description: + - Application protocol filter. + required: false + + entries_quarantine: + description: + - Quarantine method. + - choice | none | Quarantine is disabled. + - choice | attacker | Block all traffic sent from attacker's IP address. + - The attacker's IP address is also added to the banned user list. The target's address is not affected. + required: false + choices: ["none", "attacker"] + + entries_quarantine_expiry: + description: + - Duration of quarantine. (Format ###d##h##m, minimum 1m, maximum 364d23h59m, default = 5m). + - Requires quarantine set to attacker. + required: false + + entries_quarantine_log: + description: + - Enable/disable quarantine logging. + - choice | disable | Disable quarantine logging. + - choice | enable | Enable quarantine logging. + required: false + choices: ["disable", "enable"] + + entries_rate_count: + description: + - Count of the rate. + required: false + + entries_rate_duration: + description: + - Duration (sec) of the rate. + required: false + + entries_rate_mode: + description: + - Rate limit mode. + - choice | periodical | Allow configured number of packets every rate-duration. + - choice | continuous | Block packets once the rate is reached. + required: false + choices: ["periodical", "continuous"] + + entries_rate_track: + description: + - Track the packet protocol field. + - choice | none | + - choice | src-ip | Source IP. + - choice | dest-ip | Destination IP. + - choice | dhcp-client-mac | DHCP client. + - choice | dns-domain | DNS domain. + required: false + choices: ["none", "src-ip", "dest-ip", "dhcp-client-mac", "dns-domain"] + + entries_risk: + description: + - Risk, or impact, of allowing traffic from this application to occur 1 - 5; + - (Low, Elevated, Medium, High, and Critical). + required: false + + entries_session_ttl: + description: + - Session TTL (0 = default). + required: false + + entries_shaper: + description: + - Traffic shaper. + required: false + + entries_shaper_reverse: + description: + - Reverse traffic shaper. + required: false + + entries_sub_category: + description: + - Application Sub-category ID list. + required: false + + entries_technology: + description: + - Application technology filter. + required: false + + entries_vendor: + description: + - Application vendor filter. + required: false + + entries_parameters_value: + description: + - Parameter value. + required: false + + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_appctrl: + name: "Ansible_Application_Control_Profile" + comment: "Created by Ansible Module TEST" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_appctrl: + name: "Ansible_Application_Control_Profile" + comment: "Created by Ansible Module TEST" + mode: "set" + entries: [{ + action: "block", + log: "enable", + log-packet: "enable", + protocols: ["1"], + quarantine: "attacker", + quarantine-log: "enable", + }, + {action: "pass", + category: ["2","3","4"]}, + ] +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + +############### +# START METHODS +############### + + +def fmgr_application_list_modify(fmgr, paramgram): + """ + fmgr_application_list -- Modifies Application Control Profiles on FortiManager + + :param fmgr: The fmgr object instance from fmgr_utils.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + + :return: The response from the FortiManager + :rtype: dict + """ + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if paramgram["mode"] in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/application/list'.format(adom=paramgram["adom"]) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif paramgram["mode"] == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/application/list/{name}'.format(adom=paramgram["adom"], + name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + unknown_application_log=dict(required=False, type="str", choices=["disable", "enable"]), + unknown_application_action=dict(required=False, type="str", choices=["pass", "block"]), + replacemsg_group=dict(required=False, type="str"), + p2p_black_list=dict(required=False, type="str", choices=["skype", "edonkey", "bittorrent"]), + other_application_log=dict(required=False, type="str", choices=["disable", "enable"]), + other_application_action=dict(required=False, type="str", choices=["pass", "block"]), + options=dict(required=False, type="str", + choices=["allow-dns", "allow-icmp", "allow-http", "allow-ssl", "allow-quic"]), + name=dict(required=False, type="str"), + extended_log=dict(required=False, type="str", choices=["disable", "enable"]), + deep_app_inspection=dict(required=False, type="str", choices=["disable", "enable"]), + comment=dict(required=False, type="str"), + app_replacemsg=dict(required=False, type="str", choices=["disable", "enable"]), + entries=dict(required=False, type="list"), + entries_action=dict(required=False, type="str", choices=["pass", "block", "reset"]), + entries_application=dict(required=False, type="str"), + entries_behavior=dict(required=False, type="str"), + entries_category=dict(required=False, type="str"), + entries_log=dict(required=False, type="str", choices=["disable", "enable"]), + entries_log_packet=dict(required=False, type="str", choices=["disable", "enable"]), + entries_per_ip_shaper=dict(required=False, type="str"), + entries_popularity=dict(required=False, type="str", choices=["1", "2", "3", "4", "5"]), + entries_protocols=dict(required=False, type="str"), + entries_quarantine=dict(required=False, type="str", choices=["none", "attacker"]), + entries_quarantine_expiry=dict(required=False, type="str"), + entries_quarantine_log=dict(required=False, type="str", choices=["disable", "enable"]), + entries_rate_count=dict(required=False, type="int"), + entries_rate_duration=dict(required=False, type="int"), + entries_rate_mode=dict(required=False, type="str", choices=["periodical", "continuous"]), + entries_rate_track=dict(required=False, type="str", + choices=["none", "src-ip", "dest-ip", "dhcp-client-mac", "dns-domain"]), + entries_risk=dict(required=False, type="str"), + entries_session_ttl=dict(required=False, type="int"), + entries_shaper=dict(required=False, type="str"), + entries_shaper_reverse=dict(required=False, type="str"), + entries_sub_category=dict(required=False, type="str"), + entries_technology=dict(required=False, type="str"), + entries_vendor=dict(required=False, type="str"), + + entries_parameters_value=dict(required=False, type="str"), + + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "unknown-application-log": module.params["unknown_application_log"], + "unknown-application-action": module.params["unknown_application_action"], + "replacemsg-group": module.params["replacemsg_group"], + "p2p-black-list": module.params["p2p_black_list"], + "other-application-log": module.params["other_application_log"], + "other-application-action": module.params["other_application_action"], + "options": module.params["options"], + "name": module.params["name"], + "extended-log": module.params["extended_log"], + "deep-app-inspection": module.params["deep_app_inspection"], + "comment": module.params["comment"], + "app-replacemsg": module.params["app_replacemsg"], + "entries": { + "action": module.params["entries_action"], + "application": module.params["entries_application"], + "behavior": module.params["entries_behavior"], + "category": module.params["entries_category"], + "log": module.params["entries_log"], + "log-packet": module.params["entries_log_packet"], + "per-ip-shaper": module.params["entries_per_ip_shaper"], + "popularity": module.params["entries_popularity"], + "protocols": module.params["entries_protocols"], + "quarantine": module.params["entries_quarantine"], + "quarantine-expiry": module.params["entries_quarantine_expiry"], + "quarantine-log": module.params["entries_quarantine_log"], + "rate-count": module.params["entries_rate_count"], + "rate-duration": module.params["entries_rate_duration"], + "rate-mode": module.params["entries_rate_mode"], + "rate-track": module.params["entries_rate_track"], + "risk": module.params["entries_risk"], + "session-ttl": module.params["entries_session_ttl"], + "shaper": module.params["entries_shaper"], + "shaper-reverse": module.params["entries_shaper_reverse"], + "sub-category": module.params["entries_sub_category"], + "technology": module.params["entries_technology"], + "vendor": module.params["entries_vendor"], + "parameters": { + "value": module.params["entries_parameters_value"], + } + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['entries'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + try: + results = fmgr_application_list_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_av.py b/plugins/modules/network/fortimanager/fmgr_secprof_av.py new file mode 100644 index 0000000000..017429b947 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_av.py @@ -0,0 +1,1390 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_av +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manage security profile +description: + - Manage security profile groups for FortiManager objects + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + scan_mode: + description: + - Choose between full scan mode and quick scan mode. + required: false + choices: + - quick + - full + + replacemsg_group: + description: + - Replacement message group customized for this profile. + required: false + + name: + description: + - Profile name. + required: false + + mobile_malware_db: + description: + - Enable/disable using the mobile malware signature database. + required: false + choices: + - disable + - enable + + inspection_mode: + description: + - Inspection mode. + required: false + choices: + - proxy + - flow-based + + ftgd_analytics: + description: + - Settings to control which files are uploaded to FortiSandbox. + required: false + choices: + - disable + - suspicious + - everything + + extended_log: + description: + - Enable/disable extended logging for antivirus. + required: false + choices: + - disable + - enable + + comment: + description: + - Comment. + required: false + + av_virus_log: + description: + - Enable/disable AntiVirus logging. + required: false + choices: + - disable + - enable + + av_block_log: + description: + - Enable/disable logging for AntiVirus file blocking. + required: false + choices: + - disable + - enable + + analytics_wl_filetype: + description: + - Do not submit files matching this DLP file-pattern to FortiSandbox. + required: false + + analytics_max_upload: + description: + - Maximum size of files that can be uploaded to FortiSandbox (1 - 395 MBytes, default = 10). + required: false + + analytics_db: + description: + - Enable/disable using the FortiSandbox signature database to supplement the AV signature databases. + required: false + choices: + - disable + - enable + + analytics_bl_filetype: + description: + - Only submit files matching this DLP file-pattern to FortiSandbox. + required: false + + content_disarm: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + content_disarm_cover_page: + description: + - Enable/disable inserting a cover page into the disarmed document. + required: false + choices: + - disable + - enable + + content_disarm_detect_only: + description: + - Enable/disable only detect disarmable files, do not alter content. + required: false + choices: + - disable + - enable + + content_disarm_office_embed: + description: + - Enable/disable stripping of embedded objects in Microsoft Office documents. + required: false + choices: + - disable + - enable + + content_disarm_office_hylink: + description: + - Enable/disable stripping of hyperlinks in Microsoft Office documents. + required: false + choices: + - disable + - enable + + content_disarm_office_linked: + description: + - Enable/disable stripping of linked objects in Microsoft Office documents. + required: false + choices: + - disable + - enable + + content_disarm_office_macro: + description: + - Enable/disable stripping of macros in Microsoft Office documents. + required: false + choices: + - disable + - enable + + content_disarm_original_file_destination: + description: + - Destination to send original file if active content is removed. + required: false + choices: + - fortisandbox + - quarantine + - discard + + content_disarm_pdf_act_form: + description: + - Enable/disable stripping of actions that submit data to other targets in PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_act_gotor: + description: + - Enable/disable stripping of links to other PDFs in PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_act_java: + description: + - Enable/disable stripping of actions that execute JavaScript code in PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_act_launch: + description: + - Enable/disable stripping of links to external applications in PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_act_movie: + description: + - Enable/disable stripping of embedded movies in PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_act_sound: + description: + - Enable/disable stripping of embedded sound files in PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_embedfile: + description: + - Enable/disable stripping of embedded files in PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_hyperlink: + description: + - Enable/disable stripping of hyperlinks from PDF documents. + required: false + choices: + - disable + - enable + + content_disarm_pdf_javacode: + description: + - Enable/disable stripping of JavaScript code in PDF documents. + required: false + choices: + - disable + - enable + + ftp: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ftp_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + ftp_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + ftp_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + ftp_options: + description: + - Enable/disable FTP AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + ftp_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive + + http: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + http_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + http_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + http_content_disarm: + description: + - Enable Content Disarm and Reconstruction for this protocol. + required: false + choices: + - disable + - enable + + http_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + http_options: + description: + - Enable/disable HTTP AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + http_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive + + imap: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + imap_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + imap_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + imap_content_disarm: + description: + - Enable Content Disarm and Reconstruction for this protocol. + required: false + choices: + - disable + - enable + + imap_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + imap_executables: + description: + - Treat Windows executable files as viruses for the purpose of blocking or monitoring. + required: false + choices: + - default + - virus + + imap_options: + description: + - Enable/disable IMAP AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + imap_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive + + mapi: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + mapi_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + mapi_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + mapi_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + mapi_executables: + description: + - Treat Windows executable files as viruses for the purpose of blocking or monitoring. + required: false + choices: + - default + - virus + + mapi_options: + description: + - Enable/disable MAPI AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + mapi_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive + + nac_quar: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + nac_quar_expiry: + description: + - Duration of quarantine. + required: false + + nac_quar_infected: + description: + - Enable/Disable quarantining infected hosts to the banned user list. + required: false + choices: + - none + - quar-src-ip + + nac_quar_log: + description: + - Enable/disable AntiVirus quarantine logging. + required: false + choices: + - disable + - enable + + nntp: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + nntp_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + nntp_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + nntp_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + nntp_options: + description: + - Enable/disable NNTP AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + nntp_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive + + pop3: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + pop3_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + pop3_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + pop3_content_disarm: + description: + - Enable Content Disarm and Reconstruction for this protocol. + required: false + choices: + - disable + - enable + + pop3_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + pop3_executables: + description: + - Treat Windows executable files as viruses for the purpose of blocking or monitoring. + required: false + choices: + - default + - virus + + pop3_options: + description: + - Enable/disable POP3 AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + pop3_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive + + smb: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + smb_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + smb_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + smb_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + smb_options: + description: + - Enable/disable SMB AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + smb_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive + + smtp: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + smtp_archive_block: + description: + - Select the archive types to block. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + smtp_archive_log: + description: + - Select the archive types to log. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - encrypted + - corrupted + - multipart + - nested + - mailbomb + - unhandled + - partiallycorrupted + - fileslimit + - timeout + + smtp_content_disarm: + description: + - Enable Content Disarm and Reconstruction for this protocol. + required: false + choices: + - disable + - enable + + smtp_emulator: + description: + - Enable/disable the virus emulator. + required: false + choices: + - disable + - enable + + smtp_executables: + description: + - Treat Windows executable files as viruses for the purpose of blocking or monitoring. + required: false + choices: + - default + - virus + + smtp_options: + description: + - Enable/disable SMTP AntiVirus scanning, monitoring, and quarantine. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - scan + - quarantine + - avmonitor + + smtp_outbreak_prevention: + description: + - Enable FortiGuard Virus Outbreak Prevention service. + required: false + choices: + - disabled + - files + - full-archive +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_av: + name: "Ansible_AV_Profile" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_av: + name: "Ansible_AV_Profile" + comment: "Created by Ansible Module TEST" + mode: "set" + inspection_mode: "proxy" + ftgd_analytics: "everything" + av_block_log: "enable" + av_virus_log: "enable" + scan_mode: "full" + mobile_malware_db: "enable" + ftp_archive_block: "encrypted" + ftp_outbreak_prevention: "files" + ftp_archive_log: "timeout" + ftp_emulator: "disable" + ftp_options: "scan" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + +############### +# START METHODS +############### + + +def fmgr_antivirus_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/antivirus/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + else: + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/antivirus/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + return response + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(required=False, type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + scan_mode=dict(required=False, type="str", choices=["quick", "full"]), + replacemsg_group=dict(required=False, type="dict"), + name=dict(required=False, type="str"), + mobile_malware_db=dict(required=False, type="str", choices=["disable", "enable"]), + inspection_mode=dict(required=False, type="str", choices=["proxy", "flow-based"]), + ftgd_analytics=dict(required=False, type="str", choices=["disable", "suspicious", "everything"]), + extended_log=dict(required=False, type="str", choices=["disable", "enable"]), + comment=dict(required=False, type="str"), + av_virus_log=dict(required=False, type="str", choices=["disable", "enable"]), + av_block_log=dict(required=False, type="str", choices=["disable", "enable"]), + analytics_wl_filetype=dict(required=False, type="dict"), + analytics_max_upload=dict(required=False, type="int"), + analytics_db=dict(required=False, type="str", choices=["disable", "enable"]), + analytics_bl_filetype=dict(required=False, type="dict"), + content_disarm=dict(required=False, type="list"), + content_disarm_cover_page=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_detect_only=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_office_embed=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_office_hylink=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_office_linked=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_office_macro=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_original_file_destination=dict(required=False, type="str", choices=["fortisandbox", + "quarantine", + "discard"]), + content_disarm_pdf_act_form=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_act_gotor=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_act_java=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_act_launch=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_act_movie=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_act_sound=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_embedfile=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_hyperlink=dict(required=False, type="str", choices=["disable", "enable"]), + content_disarm_pdf_javacode=dict(required=False, type="str", choices=["disable", "enable"]), + ftp=dict(required=False, type="list"), + ftp_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + ftp_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + ftp_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + ftp_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + ftp_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + http=dict(required=False, type="list"), + http_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + http_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + http_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]), + http_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + http_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + http_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + imap=dict(required=False, type="list"), + imap_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + imap_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + imap_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]), + imap_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + imap_executables=dict(required=False, type="str", choices=["default", "virus"]), + imap_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + imap_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + mapi=dict(required=False, type="list"), + mapi_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + mapi_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + mapi_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + mapi_executables=dict(required=False, type="str", choices=["default", "virus"]), + mapi_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + mapi_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + nac_quar=dict(required=False, type="list"), + nac_quar_expiry=dict(required=False, type="str"), + nac_quar_infected=dict(required=False, type="str", choices=["none", "quar-src-ip"]), + nac_quar_log=dict(required=False, type="str", choices=["disable", "enable"]), + nntp=dict(required=False, type="list"), + nntp_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + nntp_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + nntp_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + nntp_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + nntp_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + pop3=dict(required=False, type="list"), + pop3_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + pop3_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + pop3_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]), + pop3_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + pop3_executables=dict(required=False, type="str", choices=["default", "virus"]), + pop3_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + pop3_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + smb=dict(required=False, type="list"), + smb_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + smb_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + smb_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + smb_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + smb_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + smtp=dict(required=False, type="list"), + smtp_archive_block=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + smtp_archive_log=dict(required=False, type="str", choices=["encrypted", + "corrupted", + "multipart", + "nested", + "mailbomb", + "unhandled", + "partiallycorrupted", + "fileslimit", + "timeout"]), + smtp_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]), + smtp_emulator=dict(required=False, type="str", choices=["disable", "enable"]), + smtp_executables=dict(required=False, type="str", choices=["default", "virus"]), + smtp_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]), + smtp_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "scan-mode": module.params["scan_mode"], + "replacemsg-group": module.params["replacemsg_group"], + "name": module.params["name"], + "mobile-malware-db": module.params["mobile_malware_db"], + "inspection-mode": module.params["inspection_mode"], + "ftgd-analytics": module.params["ftgd_analytics"], + "extended-log": module.params["extended_log"], + "comment": module.params["comment"], + "av-virus-log": module.params["av_virus_log"], + "av-block-log": module.params["av_block_log"], + "analytics-wl-filetype": module.params["analytics_wl_filetype"], + "analytics-max-upload": module.params["analytics_max_upload"], + "analytics-db": module.params["analytics_db"], + "analytics-bl-filetype": module.params["analytics_bl_filetype"], + "content-disarm": { + "cover-page": module.params["content_disarm_cover_page"], + "detect-only": module.params["content_disarm_detect_only"], + "office-embed": module.params["content_disarm_office_embed"], + "office-hylink": module.params["content_disarm_office_hylink"], + "office-linked": module.params["content_disarm_office_linked"], + "office-macro": module.params["content_disarm_office_macro"], + "original-file-destination": module.params["content_disarm_original_file_destination"], + "pdf-act-form": module.params["content_disarm_pdf_act_form"], + "pdf-act-gotor": module.params["content_disarm_pdf_act_gotor"], + "pdf-act-java": module.params["content_disarm_pdf_act_java"], + "pdf-act-launch": module.params["content_disarm_pdf_act_launch"], + "pdf-act-movie": module.params["content_disarm_pdf_act_movie"], + "pdf-act-sound": module.params["content_disarm_pdf_act_sound"], + "pdf-embedfile": module.params["content_disarm_pdf_embedfile"], + "pdf-hyperlink": module.params["content_disarm_pdf_hyperlink"], + "pdf-javacode": module.params["content_disarm_pdf_javacode"], + }, + "ftp": { + "archive-block": module.params["ftp_archive_block"], + "archive-log": module.params["ftp_archive_log"], + "emulator": module.params["ftp_emulator"], + "options": module.params["ftp_options"], + "outbreak-prevention": module.params["ftp_outbreak_prevention"], + }, + "http": { + "archive-block": module.params["http_archive_block"], + "archive-log": module.params["http_archive_log"], + "content-disarm": module.params["http_content_disarm"], + "emulator": module.params["http_emulator"], + "options": module.params["http_options"], + "outbreak-prevention": module.params["http_outbreak_prevention"], + }, + "imap": { + "archive-block": module.params["imap_archive_block"], + "archive-log": module.params["imap_archive_log"], + "content-disarm": module.params["imap_content_disarm"], + "emulator": module.params["imap_emulator"], + "executables": module.params["imap_executables"], + "options": module.params["imap_options"], + "outbreak-prevention": module.params["imap_outbreak_prevention"], + }, + "mapi": { + "archive-block": module.params["mapi_archive_block"], + "archive-log": module.params["mapi_archive_log"], + "emulator": module.params["mapi_emulator"], + "executables": module.params["mapi_executables"], + "options": module.params["mapi_options"], + "outbreak-prevention": module.params["mapi_outbreak_prevention"], + }, + "nac-quar": { + "expiry": module.params["nac_quar_expiry"], + "infected": module.params["nac_quar_infected"], + "log": module.params["nac_quar_log"], + }, + "nntp": { + "archive-block": module.params["nntp_archive_block"], + "archive-log": module.params["nntp_archive_log"], + "emulator": module.params["nntp_emulator"], + "options": module.params["nntp_options"], + "outbreak-prevention": module.params["nntp_outbreak_prevention"], + }, + "pop3": { + "archive-block": module.params["pop3_archive_block"], + "archive-log": module.params["pop3_archive_log"], + "content-disarm": module.params["pop3_content_disarm"], + "emulator": module.params["pop3_emulator"], + "executables": module.params["pop3_executables"], + "options": module.params["pop3_options"], + "outbreak-prevention": module.params["pop3_outbreak_prevention"], + }, + "smb": { + "archive-block": module.params["smb_archive_block"], + "archive-log": module.params["smb_archive_log"], + "emulator": module.params["smb_emulator"], + "options": module.params["smb_options"], + "outbreak-prevention": module.params["smb_outbreak_prevention"], + }, + "smtp": { + "archive-block": module.params["smtp_archive_block"], + "archive-log": module.params["smtp_archive_log"], + "content-disarm": module.params["smtp_content_disarm"], + "emulator": module.params["smtp_emulator"], + "executables": module.params["smtp_executables"], + "options": module.params["smtp_options"], + "outbreak-prevention": module.params["smtp_outbreak_prevention"], + } + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ["content-disarm", "ftp", "http", "imap", "mapi", "nac-quar", "nntp", "pop3", "smb", "smtp"] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + module.paramgram = paramgram + + results = DEFAULT_RESULT_OBJ + + try: + results = fmgr_antivirus_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_dns.py b/plugins/modules/network/fortimanager/fmgr_secprof_dns.py new file mode 100644 index 0000000000..356c08d712 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_dns.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_dns +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manage DNS security profiles in FortiManager +description: + - Manage DNS security profiles in FortiManager + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values. + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + youtube_restrict: + type: str + description: + - Set safe search for YouTube restriction level. + - choice | strict | Enable strict safe seach for YouTube. + - choice | moderate | Enable moderate safe search for YouTube. + required: false + choices: ["strict", "moderate"] + + sdns_ftgd_err_log: + type: str + description: + - Enable/disable FortiGuard SDNS rating error logging. + - choice | disable | Disable FortiGuard SDNS rating error logging. + - choice | enable | Enable FortiGuard SDNS rating error logging. + required: false + choices: ["disable", "enable"] + + sdns_domain_log: + type: str + description: + - Enable/disable domain filtering and botnet domain logging. + - choice | disable | Disable domain filtering and botnet domain logging. + - choice | enable | Enable domain filtering and botnet domain logging. + required: false + choices: ["disable", "enable"] + + safe_search: + type: str + description: + - Enable/disable Google, Bing, and YouTube safe search. + - choice | disable | Disable Google, Bing, and YouTube safe search. + - choice | enable | Enable Google, Bing, and YouTube safe search. + required: false + choices: ["disable", "enable"] + + redirect_portal: + type: str + description: + - IP address of the SDNS redirect portal. + required: false + + name: + type: str + description: + - Profile name. + required: false + + log_all_domain: + type: str + description: + - Enable/disable logging of all domains visited (detailed DNS logging). + - choice | disable | Disable logging of all domains visited. + - choice | enable | Enable logging of all domains visited. + required: false + choices: ["disable", "enable"] + + external_ip_blocklist: + type: str + description: + - One or more external IP block lists. + required: false + + comment: + type: str + description: + - Comment for the security profile to show in the FortiManager GUI. + required: false + + block_botnet: + type: str + description: + - Enable/disable blocking botnet C&C; DNS lookups. + - choice | disable | Disable blocking botnet C&C; DNS lookups. + - choice | enable | Enable blocking botnet C&C; DNS lookups. + required: false + choices: ["disable", "enable"] + + block_action: + type: str + description: + - Action to take for blocked domains. + - choice | block | Return NXDOMAIN for blocked domains. + - choice | redirect | Redirect blocked domains to SDNS portal. + required: false + choices: ["block", "redirect"] + + domain_filter_domain_filter_table: + type: str + description: + - DNS domain filter table ID. + required: false + + ftgd_dns_options: + type: str + description: + - FortiGuard DNS filter options. + - FLAG Based Options. Specify multiple in list form. + - flag | error-allow | Allow all domains when FortiGuard DNS servers fail. + - flag | ftgd-disable | Disable FortiGuard DNS domain rating. + required: false + choices: ["error-allow", "ftgd-disable"] + + ftgd_dns_filters_action: + type: str + description: + - Action to take for DNS requests matching the category. + - choice | monitor | Allow DNS requests matching the category and log the result. + - choice | block | Block DNS requests matching the category. + required: false + choices: ["monitor", "block"] + + ftgd_dns_filters_category: + type: str + description: + - Category number. + required: false + + ftgd_dns_filters_log: + type: str + description: + - Enable/disable DNS filter logging for this DNS profile. + - choice | disable | Disable DNS filter logging. + - choice | enable | Enable DNS filter logging. + required: false + choices: ["disable", "enable"] + + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_dns: + name: "Ansible_DNS_Profile" + comment: "Created by Ansible Module TEST" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_dns: + name: "Ansible_DNS_Profile" + comment: "Created by Ansible Module TEST" + mode: "set" + block_action: "block" + + +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_dnsfilter_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + url = "" + datagram = {} + + response = DEFAULT_RESULT_OBJ + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/dnsfilter/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/dnsfilter/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + youtube_restrict=dict(required=False, type="str", choices=["strict", "moderate"]), + sdns_ftgd_err_log=dict(required=False, type="str", choices=["disable", "enable"]), + sdns_domain_log=dict(required=False, type="str", choices=["disable", "enable"]), + safe_search=dict(required=False, type="str", choices=["disable", "enable"]), + redirect_portal=dict(required=False, type="str"), + name=dict(required=False, type="str"), + log_all_domain=dict(required=False, type="str", choices=["disable", "enable"]), + external_ip_blocklist=dict(required=False, type="str"), + comment=dict(required=False, type="str"), + block_botnet=dict(required=False, type="str", choices=["disable", "enable"]), + block_action=dict(required=False, type="str", choices=["block", "redirect"]), + + domain_filter_domain_filter_table=dict(required=False, type="str"), + + ftgd_dns_options=dict(required=False, type="str", choices=["error-allow", "ftgd-disable"]), + + ftgd_dns_filters_action=dict(required=False, type="str", choices=["monitor", "block"]), + ftgd_dns_filters_category=dict(required=False, type="str"), + ftgd_dns_filters_log=dict(required=False, type="str", choices=["disable", "enable"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "youtube-restrict": module.params["youtube_restrict"], + "sdns-ftgd-err-log": module.params["sdns_ftgd_err_log"], + "sdns-domain-log": module.params["sdns_domain_log"], + "safe-search": module.params["safe_search"], + "redirect-portal": module.params["redirect_portal"], + "name": module.params["name"], + "log-all-domain": module.params["log_all_domain"], + "external-ip-blocklist": module.params["external_ip_blocklist"], + "comment": module.params["comment"], + "block-botnet": module.params["block_botnet"], + "block-action": module.params["block_action"], + "domain-filter": { + "domain-filter-table": module.params["domain_filter_domain_filter_table"], + }, + "ftgd-dns": { + "options": module.params["ftgd_dns_options"], + "filters": { + "action": module.params["ftgd_dns_filters_action"], + "category": module.params["ftgd_dns_filters_category"], + "log": module.params["ftgd_dns_filters_log"], + } + } + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + results = DEFAULT_RESULT_OBJ + + try: + results = fmgr_dnsfilter_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_ips.py b/plugins/modules/network/fortimanager/fmgr_secprof_ips.py new file mode 100644 index 0000000000..e735c7bcb3 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_ips.py @@ -0,0 +1,668 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_ips +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Managing IPS security profiles in FortiManager +description: + - Managing IPS security profiles in FortiManager + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + replacemsg_group: + description: + - Replacement message group. + required: false + + name: + description: + - Sensor name. + required: false + + extended_log: + description: + - Enable/disable extended logging. + required: false + choices: + - disable + - enable + + comment: + description: + - Comment. + required: false + + block_malicious_url: + description: + - Enable/disable malicious URL blocking. + required: false + choices: + - disable + - enable + + entries: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + entries_action: + description: + - Action taken with traffic in which signatures are detected. + required: false + choices: + - pass + - block + - reset + - default + + entries_application: + description: + - Applications to be protected. set application ? lists available applications. all includes + all applications. other includes all unlisted applications. + required: false + + entries_location: + description: + - Protect client or server traffic. + required: false + + entries_log: + description: + - Enable/disable logging of signatures included in filter. + required: false + choices: + - disable + - enable + + entries_log_attack_context: + description: + - Enable/disable logging of attack context| URL buffer, header buffer, body buffer, packet buffer. + required: false + choices: + - disable + - enable + + entries_log_packet: + description: + - Enable/disable packet logging. Enable to save the packet that triggers the filter. You can + download the packets in pcap format for diagnostic use. + required: false + choices: + - disable + - enable + + entries_os: + description: + - Operating systems to be protected. all includes all operating systems. other includes all + unlisted operating systems. + required: false + + entries_protocol: + description: + - Protocols to be examined. set protocol ? lists available protocols. all includes all protocols. + other includes all unlisted protocols. + required: false + + entries_quarantine: + description: + - Quarantine method. + required: false + choices: + - none + - attacker + + entries_quarantine_expiry: + description: + - Duration of quarantine. + required: false + + entries_quarantine_log: + description: + - Enable/disable quarantine logging. + required: false + choices: + - disable + - enable + + entries_rate_count: + description: + - Count of the rate. + required: false + + entries_rate_duration: + description: + - Duration (sec) of the rate. + required: false + + entries_rate_mode: + description: + - Rate limit mode. + required: false + choices: + - periodical + - continuous + + entries_rate_track: + description: + - Track the packet protocol field. + required: false + choices: + - none + - src-ip + - dest-ip + - dhcp-client-mac + - dns-domain + + entries_rule: + description: + - Identifies the predefined or custom IPS signatures to add to the sensor. + required: false + + entries_severity: + description: + - Relative severity of the signature, from info to critical. Log messages generated by the signature + include the severity. + required: false + + entries_status: + description: + - Status of the signatures included in filter. default enables the filter and only use filters + with default status of enable. Filters with default status of disable will not be used. + required: false + choices: + - disable + - enable + - default + + entries_exempt_ip_dst_ip: + description: + - Destination IP address and netmask. + required: false + + entries_exempt_ip_src_ip: + description: + - Source IP address and netmask. + required: false + + filter: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + filter_action: + description: + - Action of selected rules. + required: false + choices: + - pass + - block + - default + - reset + + filter_application: + description: + - Vulnerable application filter. + required: false + + filter_location: + description: + - Vulnerability location filter. + required: false + + filter_log: + description: + - Enable/disable logging of selected rules. + required: false + choices: + - disable + - enable + + filter_log_packet: + description: + - Enable/disable packet logging of selected rules. + required: false + choices: + - disable + - enable + + filter_name: + description: + - Filter name. + required: false + + filter_os: + description: + - Vulnerable OS filter. + required: false + + filter_protocol: + description: + - Vulnerable protocol filter. + required: false + + filter_quarantine: + description: + - Quarantine IP or interface. + required: false + choices: + - none + - attacker + + filter_quarantine_expiry: + description: + - Duration of quarantine in minute. + required: false + + filter_quarantine_log: + description: + - Enable/disable logging of selected quarantine. + required: false + choices: + - disable + - enable + + filter_severity: + description: + - Vulnerability severity filter. + required: false + + filter_status: + description: + - Selected rules status. + required: false + choices: + - disable + - enable + - default + + override: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + override_action: + description: + - Action of override rule. + required: false + choices: + - pass + - block + - reset + + override_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + override_log_packet: + description: + - Enable/disable packet logging. + required: false + choices: + - disable + - enable + + override_quarantine: + description: + - Quarantine IP or interface. + required: false + choices: + - none + - attacker + + override_quarantine_expiry: + description: + - Duration of quarantine in minute. + required: false + + override_quarantine_log: + description: + - Enable/disable logging of selected quarantine. + required: false + choices: + - disable + - enable + + override_rule_id: + description: + - Override rule ID. + required: false + + override_status: + description: + - Enable/disable status of override rule. + required: false + choices: + - disable + - enable + + override_exempt_ip_dst_ip: + description: + - Destination IP address and netmask. + required: false + + override_exempt_ip_src_ip: + description: + - Source IP address and netmask. + required: false +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_ips: + name: "Ansible_IPS_Profile" + comment: "Created by Ansible Module TEST" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_ips: + name: "Ansible_IPS_Profile" + comment: "Created by Ansible Module TEST" + mode: "set" + block_malicious_url: "enable" + entries: [{severity: "high", action: "block", log-packet: "enable"}, {severity: "medium", action: "pass"}] +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_ips_sensor_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/ips/sensor'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/ips/sensor/{name}'.format( + adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], + type="str", default="add"), + + replacemsg_group=dict(required=False, type="str"), + name=dict(required=False, type="str"), + extended_log=dict(required=False, type="str", + choices=["disable", "enable"]), + comment=dict(required=False, type="str"), + block_malicious_url=dict(required=False, type="str", choices=[ + "disable", "enable"]), + entries=dict(required=False, type="list"), + entries_action=dict(required=False, type="str", choices=[ + "pass", "block", "reset", "default"]), + entries_application=dict(required=False, type="str"), + entries_location=dict(required=False, type="str"), + entries_log=dict(required=False, type="str", + choices=["disable", "enable"]), + entries_log_attack_context=dict( + required=False, type="str", choices=["disable", "enable"]), + entries_log_packet=dict(required=False, type="str", choices=[ + "disable", "enable"]), + entries_os=dict(required=False, type="str"), + entries_protocol=dict(required=False, type="str"), + entries_quarantine=dict(required=False, type="str", choices=[ + "none", "attacker"]), + entries_quarantine_expiry=dict(required=False, type="str"), + entries_quarantine_log=dict( + required=False, type="str", choices=["disable", "enable"]), + entries_rate_count=dict(required=False, type="int"), + entries_rate_duration=dict(required=False, type="int"), + entries_rate_mode=dict(required=False, type="str", choices=[ + "periodical", "continuous"]), + entries_rate_track=dict(required=False, type="str", + choices=["none", "src-ip", "dest-ip", "dhcp-client-mac", "dns-domain"]), + entries_rule=dict(required=False, type="str"), + entries_severity=dict(required=False, type="str"), + entries_status=dict(required=False, type="str", choices=[ + "disable", "enable", "default"]), + + entries_exempt_ip_dst_ip=dict(required=False, type="str"), + entries_exempt_ip_src_ip=dict(required=False, type="str"), + filter=dict(required=False, type="list"), + filter_action=dict(required=False, type="str", choices=[ + "pass", "block", "default", "reset"]), + filter_application=dict(required=False, type="str"), + filter_location=dict(required=False, type="str"), + filter_log=dict(required=False, type="str", + choices=["disable", "enable"]), + filter_log_packet=dict(required=False, type="str", + choices=["disable", "enable"]), + filter_name=dict(required=False, type="str"), + filter_os=dict(required=False, type="str"), + filter_protocol=dict(required=False, type="str"), + filter_quarantine=dict(required=False, type="str", + choices=["none", "attacker"]), + filter_quarantine_expiry=dict(required=False, type="int"), + filter_quarantine_log=dict(required=False, type="str", choices=[ + "disable", "enable"]), + filter_severity=dict(required=False, type="str"), + filter_status=dict(required=False, type="str", choices=[ + "disable", "enable", "default"]), + override=dict(required=False, type="list"), + override_action=dict(required=False, type="str", + choices=["pass", "block", "reset"]), + override_log=dict(required=False, type="str", + choices=["disable", "enable"]), + override_log_packet=dict(required=False, type="str", choices=[ + "disable", "enable"]), + override_quarantine=dict(required=False, type="str", choices=[ + "none", "attacker"]), + override_quarantine_expiry=dict(required=False, type="int"), + override_quarantine_log=dict( + required=False, type="str", choices=["disable", "enable"]), + override_rule_id=dict(required=False, type="str"), + override_status=dict(required=False, type="str", + choices=["disable", "enable"]), + + override_exempt_ip_dst_ip=dict(required=False, type="str"), + override_exempt_ip_src_ip=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "replacemsg-group": module.params["replacemsg_group"], + "name": module.params["name"], + "extended-log": module.params["extended_log"], + "comment": module.params["comment"], + "block-malicious-url": module.params["block_malicious_url"], + "entries": { + "action": module.params["entries_action"], + "application": module.params["entries_application"], + "location": module.params["entries_location"], + "log": module.params["entries_log"], + "log-attack-context": module.params["entries_log_attack_context"], + "log-packet": module.params["entries_log_packet"], + "os": module.params["entries_os"], + "protocol": module.params["entries_protocol"], + "quarantine": module.params["entries_quarantine"], + "quarantine-expiry": module.params["entries_quarantine_expiry"], + "quarantine-log": module.params["entries_quarantine_log"], + "rate-count": module.params["entries_rate_count"], + "rate-duration": module.params["entries_rate_duration"], + "rate-mode": module.params["entries_rate_mode"], + "rate-track": module.params["entries_rate_track"], + "rule": module.params["entries_rule"], + "severity": module.params["entries_severity"], + "status": module.params["entries_status"], + "exempt-ip": { + "dst-ip": module.params["entries_exempt_ip_dst_ip"], + "src-ip": module.params["entries_exempt_ip_src_ip"], + }, + }, + "filter": { + "action": module.params["filter_action"], + "application": module.params["filter_application"], + "location": module.params["filter_location"], + "log": module.params["filter_log"], + "log-packet": module.params["filter_log_packet"], + "name": module.params["filter_name"], + "os": module.params["filter_os"], + "protocol": module.params["filter_protocol"], + "quarantine": module.params["filter_quarantine"], + "quarantine-expiry": module.params["filter_quarantine_expiry"], + "quarantine-log": module.params["filter_quarantine_log"], + "severity": module.params["filter_severity"], + "status": module.params["filter_status"], + }, + "override": { + "action": module.params["override_action"], + "log": module.params["override_log"], + "log-packet": module.params["override_log_packet"], + "quarantine": module.params["override_quarantine"], + "quarantine-expiry": module.params["override_quarantine_expiry"], + "quarantine-log": module.params["override_quarantine_log"], + "rule-id": module.params["override_rule_id"], + "status": module.params["override_status"], + "exempt-ip": { + "dst-ip": module.params["override_exempt_ip_dst_ip"], + "src-ip": module.params["override_exempt_ip_src_ip"], + } + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['entries', 'filter', 'override'] + + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + try: + results = fmgr_ips_sensor_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_profile_group.py b/plugins/modules/network/fortimanager/fmgr_secprof_profile_group.py new file mode 100644 index 0000000000..3fdbd69238 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_profile_group.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_profile_group +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manage security profiles within FortiManager +description: + - Manage security profile group which allows you to create a group of security profiles and apply that to a policy. + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values. + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + webfilter_profile: + type: str + description: + - Name of an existing Web filter profile. + required: false + + waf_profile: + type: str + description: + - Name of an existing Web application firewall profile. + required: false + + voip_profile: + type: str + description: + - Name of an existing VoIP profile. + required: false + + ssl_ssh_profile: + type: str + description: + - Name of an existing SSL SSH profile. + required: false + + ssh_filter_profile: + type: str + description: + - Name of an existing SSH filter profile. + required: false + + spamfilter_profile: + type: str + description: + - Name of an existing Spam filter profile. + required: false + + profile_protocol_options: + type: str + description: + - Name of an existing Protocol options profile. + required: false + + name: + type: str + description: + - Profile group name. + required: false + + mms_profile: + type: str + description: + - Name of an existing MMS profile. + required: false + + ips_sensor: + type: str + description: + - Name of an existing IPS sensor. + required: false + + icap_profile: + type: str + description: + - Name of an existing ICAP profile. + required: false + + dnsfilter_profile: + type: str + description: + - Name of an existing DNS filter profile. + required: false + + dlp_sensor: + type: str + description: + - Name of an existing DLP sensor. + required: false + + av_profile: + type: str + description: + - Name of an existing Antivirus profile. + required: false + + application_list: + type: str + description: + - Name of an existing Application list. + required: false + + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_profile_group: + name: "Ansible_TEST_Profile_Group" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_profile_group: + name: "Ansible_TEST_Profile_Group" + mode: "set" + av_profile: "Ansible_AV_Profile" + profile_protocol_options: "default" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_firewall_profile_group_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + url = "" + datagram = {} + + response = DEFAULT_RESULT_OBJ + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/firewall/profile-group'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/firewall/profile-group/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + webfilter_profile=dict(required=False, type="str"), + waf_profile=dict(required=False, type="str"), + voip_profile=dict(required=False, type="str"), + ssl_ssh_profile=dict(required=False, type="str"), + ssh_filter_profile=dict(required=False, type="str"), + spamfilter_profile=dict(required=False, type="str"), + profile_protocol_options=dict(required=False, type="str"), + name=dict(required=False, type="str"), + mms_profile=dict(required=False, type="str"), + ips_sensor=dict(required=False, type="str"), + icap_profile=dict(required=False, type="str"), + dnsfilter_profile=dict(required=False, type="str"), + dlp_sensor=dict(required=False, type="str"), + av_profile=dict(required=False, type="str"), + application_list=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "webfilter-profile": module.params["webfilter_profile"], + "waf-profile": module.params["waf_profile"], + "voip-profile": module.params["voip_profile"], + "ssl-ssh-profile": module.params["ssl_ssh_profile"], + "ssh-filter-profile": module.params["ssh_filter_profile"], + "spamfilter-profile": module.params["spamfilter_profile"], + "profile-protocol-options": module.params["profile_protocol_options"], + "name": module.params["name"], + "mms-profile": module.params["mms_profile"], + "ips-sensor": module.params["ips_sensor"], + "icap-profile": module.params["icap_profile"], + "dnsfilter-profile": module.params["dnsfilter_profile"], + "dlp-sensor": module.params["dlp_sensor"], + "av-profile": module.params["av_profile"], + "application-list": module.params["application_list"], + + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + results = DEFAULT_RESULT_OBJ + + try: + results = fmgr_firewall_profile_group_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_proxy.py b/plugins/modules/network/fortimanager/fmgr_secprof_proxy.py new file mode 100644 index 0000000000..8e92d7092b --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_proxy.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_proxy +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manage proxy security profiles in FortiManager +description: + - Manage proxy security profiles for FortiGates via FortiManager using the FMG API with playbooks + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + strip_encoding: + description: + - Enable/disable stripping unsupported encoding from the request header. + - choice | disable | Disable stripping of unsupported encoding from the request header. + - choice | enable | Enable stripping of unsupported encoding from the request header. + required: false + choices: ["disable", "enable"] + + name: + description: + - Profile name. + required: false + + log_header_change: + description: + - Enable/disable logging HTTP header changes. + - choice | disable | Disable Enable/disable logging HTTP header changes. + - choice | enable | Enable Enable/disable logging HTTP header changes. + required: false + choices: ["disable", "enable"] + + header_x_forwarded_for: + description: + - Action to take on the HTTP x-forwarded-for header in forwarded requests| forwards (pass), adds, or removes the + - HTTP header. + - choice | pass | Forward the same HTTP header. + - choice | add | Add the HTTP header. + - choice | remove | Remove the HTTP header. + required: false + choices: ["pass", "add", "remove"] + + header_x_authenticated_user: + description: + - Action to take on the HTTP x-authenticated-user header in forwarded requests| forwards (pass), adds, or remove + - s the HTTP header. + - choice | pass | Forward the same HTTP header. + - choice | add | Add the HTTP header. + - choice | remove | Remove the HTTP header. + required: false + choices: ["pass", "add", "remove"] + + header_x_authenticated_groups: + description: + - Action to take on the HTTP x-authenticated-groups header in forwarded requests| forwards (pass), adds, or remo + - ves the HTTP header. + - choice | pass | Forward the same HTTP header. + - choice | add | Add the HTTP header. + - choice | remove | Remove the HTTP header. + required: false + choices: ["pass", "add", "remove"] + + header_via_response: + description: + - Action to take on the HTTP via header in forwarded responses| forwards (pass), adds, or removes the HTTP heade + - r. + - choice | pass | Forward the same HTTP header. + - choice | add | Add the HTTP header. + - choice | remove | Remove the HTTP header. + required: false + choices: ["pass", "add", "remove"] + + header_via_request: + description: + - Action to take on the HTTP via header in forwarded requests| forwards (pass), adds, or removes the HTTP header + - . + - choice | pass | Forward the same HTTP header. + - choice | add | Add the HTTP header. + - choice | remove | Remove the HTTP header. + required: false + choices: ["pass", "add", "remove"] + + header_front_end_https: + description: + - Action to take on the HTTP front-end-HTTPS header in forwarded requests| forwards (pass), adds, or removes the + - HTTP header. + - choice | pass | Forward the same HTTP header. + - choice | add | Add the HTTP header. + - choice | remove | Remove the HTTP header. + required: false + choices: ["pass", "add", "remove"] + + header_client_ip: + description: + - Actions to take on the HTTP client-IP header in forwarded requests| forwards (pass), adds, or removes the HTTP + - header. + - choice | pass | Forward the same HTTP header. + - choice | add | Add the HTTP header. + - choice | remove | Remove the HTTP header. + required: false + choices: ["pass", "add", "remove"] + + headers: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + headers_action: + description: + - Action when HTTP the header forwarded. + - choice | add-to-request | Add the HTTP header to request. + - choice | add-to-response | Add the HTTP header to response. + - choice | remove-from-request | Remove the HTTP header from request. + - choice | remove-from-response | Remove the HTTP header from response. + required: false + choices: ["add-to-request", "add-to-response", "remove-from-request", "remove-from-response"] + + headers_content: + description: + - HTTP header's content. + required: false + + headers_name: + description: + - HTTP forwarded header name. + required: false + + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_proxy: + name: "Ansible_Web_Proxy_Profile" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_proxy: + name: "Ansible_Web_Proxy_Profile" + mode: "set" + header_client_ip: "pass" + header_front_end_https: "add" + header_via_request: "remove" + header_via_response: "pass" + header_x_authenticated_groups: "add" + header_x_authenticated_user: "remove" + strip_encoding: "enable" + log_header_change: "enable" + header_x_forwarded_for: "pass" + headers_action: "add-to-request" + headers_content: "test" + headers_name: "test_header" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_web_proxy_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/web-proxy/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/web-proxy/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + strip_encoding=dict(required=False, type="str", choices=["disable", "enable"]), + name=dict(required=False, type="str"), + log_header_change=dict(required=False, type="str", choices=["disable", "enable"]), + header_x_forwarded_for=dict(required=False, type="str", choices=["pass", "add", "remove"]), + header_x_authenticated_user=dict(required=False, type="str", choices=["pass", "add", "remove"]), + header_x_authenticated_groups=dict(required=False, type="str", choices=["pass", "add", "remove"]), + header_via_response=dict(required=False, type="str", choices=["pass", "add", "remove"]), + header_via_request=dict(required=False, type="str", choices=["pass", "add", "remove"]), + header_front_end_https=dict(required=False, type="str", choices=["pass", "add", "remove"]), + header_client_ip=dict(required=False, type="str", choices=["pass", "add", "remove"]), + headers=dict(required=False, type="list"), + headers_action=dict(required=False, type="str", choices=["add-to-request", "add-to-response", + "remove-from-request", "remove-from-response"]), + headers_content=dict(required=False, type="str"), + headers_name=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "strip-encoding": module.params["strip_encoding"], + "name": module.params["name"], + "log-header-change": module.params["log_header_change"], + "header-x-forwarded-for": module.params["header_x_forwarded_for"], + "header-x-authenticated-user": module.params["header_x_authenticated_user"], + "header-x-authenticated-groups": module.params["header_x_authenticated_groups"], + "header-via-response": module.params["header_via_response"], + "header-via-request": module.params["header_via_request"], + "header-front-end-https": module.params["header_front_end_https"], + "header-client-ip": module.params["header_client_ip"], + "headers": { + "action": module.params["headers_action"], + "content": module.params["headers_content"], + "name": module.params["headers_name"], + } + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['headers'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + module.paramgram = paramgram + + results = DEFAULT_RESULT_OBJ + try: + results = fmgr_web_proxy_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_spam.py b/plugins/modules/network/fortimanager/fmgr_secprof_spam.py new file mode 100644 index 0000000000..2e15abf203 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_spam.py @@ -0,0 +1,611 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_spam +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: spam filter profile for FMG +description: + - Manage spam filter security profiles within FortiManager via API + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + spam_rbl_table: + description: + - Anti-spam DNSBL table ID. + required: false + + spam_mheader_table: + description: + - Anti-spam MIME header table ID. + required: false + + spam_log_fortiguard_response: + description: + - Enable/disable logging FortiGuard spam response. + required: false + choices: + - disable + - enable + + spam_log: + description: + - Enable/disable spam logging for email filtering. + required: false + choices: + - disable + - enable + + spam_iptrust_table: + description: + - Anti-spam IP trust table ID. + required: false + + spam_filtering: + description: + - Enable/disable spam filtering. + required: false + choices: + - disable + - enable + + spam_bword_threshold: + description: + - Spam banned word threshold. + required: false + + spam_bword_table: + description: + - Anti-spam banned word table ID. + required: false + + spam_bwl_table: + description: + - Anti-spam black/white list table ID. + required: false + + replacemsg_group: + description: + - Replacement message group. + required: false + + options: + description: + - None + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - bannedword + - spamfsip + - spamfssubmit + - spamfschksum + - spamfsurl + - spamhelodns + - spamraddrdns + - spamrbl + - spamhdrcheck + - spamfsphish + - spambwl + + name: + description: + - Profile name. + required: false + + flow_based: + description: + - Enable/disable flow-based spam filtering. + required: false + choices: + - disable + - enable + + external: + description: + - Enable/disable external Email inspection. + required: false + choices: + - disable + - enable + + comment: + description: + - Comment. + required: false + + gmail: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + gmail_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + imap: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + imap_action: + description: + - Action for spam email. + required: false + choices: + - pass + - tag + + imap_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + imap_tag_msg: + description: + - Subject text or header added to spam email. + required: false + + imap_tag_type: + description: + - Tag subject or header for spam email. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - subject + - header + - spaminfo + + mapi: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + mapi_action: + description: + - Action for spam email. + required: false + choices: + - pass + - discard + + mapi_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + msn_hotmail: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + msn_hotmail_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + pop3: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + pop3_action: + description: + - Action for spam email. + required: false + choices: + - pass + - tag + + pop3_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + pop3_tag_msg: + description: + - Subject text or header added to spam email. + required: false + + pop3_tag_type: + description: + - Tag subject or header for spam email. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - subject + - header + - spaminfo + + smtp: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + smtp_action: + description: + - Action for spam email. + required: false + choices: + - pass + - tag + - discard + + smtp_hdrip: + description: + - Enable/disable SMTP email header IP checks for spamfsip, spamrbl and spambwl filters. + required: false + choices: + - disable + - enable + + smtp_local_override: + description: + - Enable/disable local filter to override SMTP remote check result. + required: false + choices: + - disable + - enable + + smtp_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + smtp_tag_msg: + description: + - Subject text or header added to spam email. + required: false + + smtp_tag_type: + description: + - Tag subject or header for spam email. + - FLAG Based Options. Specify multiple in list form. + required: false + choices: + - subject + - header + - spaminfo + + yahoo_mail: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + yahoo_mail_log: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_spam: + name: "Ansible_Spam_Filter_Profile" + mode: "delete" + + - name: Create FMGR_SPAMFILTER_PROFILE + fmgr_secprof_spam: + host: "{{ inventory_hostname }}" + username: "{{ username }}" + password: "{{ password }}" + mode: "set" + adom: "root" + spam_log_fortiguard_response: "enable" + spam_iptrust_table: + spam_filtering: "enable" + spam_bword_threshold: 10 + options: ["bannedword", "spamfsip", "spamfsurl", "spamrbl", "spamfsphish", "spambwl"] + name: "Ansible_Spam_Filter_Profile" + flow_based: "enable" + external: "enable" + comment: "Created by Ansible" + gmail_log: "enable" + spam_log: "enable" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + +############### +# START METHODS +############### + + +def fmgr_spamfilter_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/spamfilter/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/spamfilter/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + spam_rbl_table=dict(required=False, type="str"), + spam_mheader_table=dict(required=False, type="str"), + spam_log_fortiguard_response=dict(required=False, type="str", choices=["disable", "enable"]), + spam_log=dict(required=False, type="str", choices=["disable", "enable"]), + spam_iptrust_table=dict(required=False, type="str"), + spam_filtering=dict(required=False, type="str", choices=["disable", "enable"]), + spam_bword_threshold=dict(required=False, type="int"), + spam_bword_table=dict(required=False, type="str"), + spam_bwl_table=dict(required=False, type="str"), + replacemsg_group=dict(required=False, type="str"), + options=dict(required=False, type="list", choices=["bannedword", + "spamfsip", + "spamfssubmit", + "spamfschksum", + "spamfsurl", + "spamhelodns", + "spamraddrdns", + "spamrbl", + "spamhdrcheck", + "spamfsphish", + "spambwl"]), + name=dict(required=False, type="str"), + flow_based=dict(required=False, type="str", choices=["disable", "enable"]), + external=dict(required=False, type="str", choices=["disable", "enable"]), + comment=dict(required=False, type="str"), + gmail=dict(required=False, type="dict"), + gmail_log=dict(required=False, type="str", choices=["disable", "enable"]), + imap=dict(required=False, type="dict"), + imap_action=dict(required=False, type="str", choices=["pass", "tag"]), + imap_log=dict(required=False, type="str", choices=["disable", "enable"]), + imap_tag_msg=dict(required=False, type="str"), + imap_tag_type=dict(required=False, type="str", choices=["subject", "header", "spaminfo"]), + mapi=dict(required=False, type="dict"), + mapi_action=dict(required=False, type="str", choices=["pass", "discard"]), + mapi_log=dict(required=False, type="str", choices=["disable", "enable"]), + msn_hotmail=dict(required=False, type="dict"), + msn_hotmail_log=dict(required=False, type="str", choices=["disable", "enable"]), + pop3=dict(required=False, type="dict"), + pop3_action=dict(required=False, type="str", choices=["pass", "tag"]), + pop3_log=dict(required=False, type="str", choices=["disable", "enable"]), + pop3_tag_msg=dict(required=False, type="str"), + pop3_tag_type=dict(required=False, type="str", choices=["subject", "header", "spaminfo"]), + smtp=dict(required=False, type="dict"), + smtp_action=dict(required=False, type="str", choices=["pass", "tag", "discard"]), + smtp_hdrip=dict(required=False, type="str", choices=["disable", "enable"]), + smtp_local_override=dict(required=False, type="str", choices=["disable", "enable"]), + smtp_log=dict(required=False, type="str", choices=["disable", "enable"]), + smtp_tag_msg=dict(required=False, type="str"), + smtp_tag_type=dict(required=False, type="str", choices=["subject", "header", "spaminfo"]), + yahoo_mail=dict(required=False, type="dict"), + yahoo_mail_log=dict(required=False, type="str", choices=["disable", "enable"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "spam-rbl-table": module.params["spam_rbl_table"], + "spam-mheader-table": module.params["spam_mheader_table"], + "spam-log-fortiguard-response": module.params["spam_log_fortiguard_response"], + "spam-log": module.params["spam_log"], + "spam-iptrust-table": module.params["spam_iptrust_table"], + "spam-filtering": module.params["spam_filtering"], + "spam-bword-threshold": module.params["spam_bword_threshold"], + "spam-bword-table": module.params["spam_bword_table"], + "spam-bwl-table": module.params["spam_bwl_table"], + "replacemsg-group": module.params["replacemsg_group"], + "options": module.params["options"], + "name": module.params["name"], + "flow-based": module.params["flow_based"], + "external": module.params["external"], + "comment": module.params["comment"], + "gmail": { + "log": module.params["gmail_log"], + }, + "imap": { + "action": module.params["imap_action"], + "log": module.params["imap_log"], + "tag-msg": module.params["imap_tag_msg"], + "tag-type": module.params["imap_tag_type"], + }, + "mapi": { + "action": module.params["mapi_action"], + "log": module.params["mapi_log"], + }, + "msn-hotmail": { + "log": module.params["msn_hotmail_log"], + }, + "pop3": { + "action": module.params["pop3_action"], + "log": module.params["pop3_log"], + "tag-msg": module.params["pop3_tag_msg"], + "tag-type": module.params["pop3_tag_type"], + }, + "smtp": { + "action": module.params["smtp_action"], + "hdrip": module.params["smtp_hdrip"], + "local-override": module.params["smtp_local_override"], + "log": module.params["smtp_log"], + "tag-msg": module.params["smtp_tag_msg"], + "tag-type": module.params["smtp_tag_type"], + }, + "yahoo-mail": { + "log": module.params["yahoo_mail_log"], + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['gmail', 'imap', 'mapi', 'msn-hotmail', 'pop3', 'smtp', 'yahoo-mail'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + try: + + results = fmgr_spamfilter_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_ssl_ssh.py b/plugins/modules/network/fortimanager/fmgr_secprof_ssl_ssh.py new file mode 100644 index 0000000000..67d8aee12c --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_ssl_ssh.py @@ -0,0 +1,958 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_ssl_ssh +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manage SSL and SSH security profiles in FortiManager +description: + - Manage SSL and SSH security profiles in FortiManager via the FMG API + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + whitelist: + description: + - Enable/disable exempting servers by FortiGuard whitelist. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + use_ssl_server: + description: + - Enable/disable the use of SSL server table for SSL offloading. + - choice | disable | Don't use SSL server configuration. + - choice | enable | Use SSL server configuration. + required: false + choices: ["disable", "enable"] + + untrusted_caname: + description: + - Untrusted CA certificate used by SSL Inspection. + required: false + + ssl_exemptions_log: + description: + - Enable/disable logging SSL exemptions. + - choice | disable | Disable logging SSL exemptions. + - choice | enable | Enable logging SSL exemptions. + required: false + choices: ["disable", "enable"] + + ssl_anomalies_log: + description: + - Enable/disable logging SSL anomalies. + - choice | disable | Disable logging SSL anomalies. + - choice | enable | Enable logging SSL anomalies. + required: false + choices: ["disable", "enable"] + + server_cert_mode: + description: + - Re-sign or replace the server's certificate. + - choice | re-sign | Multiple clients connecting to multiple servers. + - choice | replace | Protect an SSL server. + required: false + choices: ["re-sign", "replace"] + + server_cert: + description: + - Certificate used by SSL Inspection to replace server certificate. + required: false + + rpc_over_https: + description: + - Enable/disable inspection of RPC over HTTPS. + - choice | disable | Disable inspection of RPC over HTTPS. + - choice | enable | Enable inspection of RPC over HTTPS. + required: false + choices: ["disable", "enable"] + + name: + description: + - Name. + required: false + + mapi_over_https: + description: + - Enable/disable inspection of MAPI over HTTPS. + - choice | disable | Disable inspection of MAPI over HTTPS. + - choice | enable | Enable inspection of MAPI over HTTPS. + required: false + choices: ["disable", "enable"] + + comment: + description: + - Optional comments. + required: false + + caname: + description: + - CA certificate used by SSL Inspection. + required: false + + ftps: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ftps_allow_invalid_server_cert: + description: + - When enabled, allows SSL sessions whose server certificate validation failed. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + ftps_client_cert_request: + description: + - Action based on client certificate request failure. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ftps_ports: + description: + - Ports to use for scanning (1 - 65535, default = 443). + required: false + + ftps_status: + description: + - Configure protocol inspection status. + - choice | disable | Disable. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "deep-inspection"] + + ftps_unsupported_ssl: + description: + - Action based on the SSL encryption used being unsupported. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ftps_untrusted_cert: + description: + - Allow, ignore, or block the untrusted SSL session server certificate. + - choice | allow | Allow the untrusted server certificate. + - choice | block | Block the connection when an untrusted server certificate is detected. + - choice | ignore | Always take the server certificate as trusted. + required: false + choices: ["allow", "block", "ignore"] + + https: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + https_allow_invalid_server_cert: + description: + - When enabled, allows SSL sessions whose server certificate validation failed. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + https_client_cert_request: + description: + - Action based on client certificate request failure. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + https_ports: + description: + - Ports to use for scanning (1 - 65535, default = 443). + required: false + + https_status: + description: + - Configure protocol inspection status. + - choice | disable | Disable. + - choice | certificate-inspection | Inspect SSL handshake only. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "certificate-inspection", "deep-inspection"] + + https_unsupported_ssl: + description: + - Action based on the SSL encryption used being unsupported. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + https_untrusted_cert: + description: + - Allow, ignore, or block the untrusted SSL session server certificate. + - choice | allow | Allow the untrusted server certificate. + - choice | block | Block the connection when an untrusted server certificate is detected. + - choice | ignore | Always take the server certificate as trusted. + required: false + choices: ["allow", "block", "ignore"] + + imaps: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + imaps_allow_invalid_server_cert: + description: + - When enabled, allows SSL sessions whose server certificate validation failed. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + imaps_client_cert_request: + description: + - Action based on client certificate request failure. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + imaps_ports: + description: + - Ports to use for scanning (1 - 65535, default = 443). + required: false + + imaps_status: + description: + - Configure protocol inspection status. + - choice | disable | Disable. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "deep-inspection"] + + imaps_unsupported_ssl: + description: + - Action based on the SSL encryption used being unsupported. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + imaps_untrusted_cert: + description: + - Allow, ignore, or block the untrusted SSL session server certificate. + - choice | allow | Allow the untrusted server certificate. + - choice | block | Block the connection when an untrusted server certificate is detected. + - choice | ignore | Always take the server certificate as trusted. + required: false + choices: ["allow", "block", "ignore"] + + pop3s: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + pop3s_allow_invalid_server_cert: + description: + - When enabled, allows SSL sessions whose server certificate validation failed. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + pop3s_client_cert_request: + description: + - Action based on client certificate request failure. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + pop3s_ports: + description: + - Ports to use for scanning (1 - 65535, default = 443). + required: false + + pop3s_status: + description: + - Configure protocol inspection status. + - choice | disable | Disable. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "deep-inspection"] + + pop3s_unsupported_ssl: + description: + - Action based on the SSL encryption used being unsupported. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + pop3s_untrusted_cert: + description: + - Allow, ignore, or block the untrusted SSL session server certificate. + - choice | allow | Allow the untrusted server certificate. + - choice | block | Block the connection when an untrusted server certificate is detected. + - choice | ignore | Always take the server certificate as trusted. + required: false + choices: ["allow", "block", "ignore"] + + smtps: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + smtps_allow_invalid_server_cert: + description: + - When enabled, allows SSL sessions whose server certificate validation failed. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + smtps_client_cert_request: + description: + - Action based on client certificate request failure. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + smtps_ports: + description: + - Ports to use for scanning (1 - 65535, default = 443). + required: false + + smtps_status: + description: + - Configure protocol inspection status. + - choice | disable | Disable. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "deep-inspection"] + + smtps_unsupported_ssl: + description: + - Action based on the SSL encryption used being unsupported. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + smtps_untrusted_cert: + description: + - Allow, ignore, or block the untrusted SSL session server certificate. + - choice | allow | Allow the untrusted server certificate. + - choice | block | Block the connection when an untrusted server certificate is detected. + - choice | ignore | Always take the server certificate as trusted. + required: false + choices: ["allow", "block", "ignore"] + + ssh: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ssh_inspect_all: + description: + - Level of SSL inspection. + - choice | disable | Disable. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "deep-inspection"] + + ssh_ports: + description: + - Ports to use for scanning (1 - 65535, default = 443). + required: false + + ssh_ssh_algorithm: + description: + - Relative strength of encryption algorithms accepted during negotiation. + - choice | compatible | Allow a broader set of encryption algorithms for best compatibility. + - choice | high-encryption | Allow only AES-CTR, AES-GCM ciphers and high encryption algorithms. + required: false + choices: ["compatible", "high-encryption"] + + ssh_ssh_policy_check: + description: + - Enable/disable SSH policy check. + - choice | disable | Disable SSH policy check. + - choice | enable | Enable SSH policy check. + required: false + choices: ["disable", "enable"] + + ssh_ssh_tun_policy_check: + description: + - Enable/disable SSH tunnel policy check. + - choice | disable | Disable SSH tunnel policy check. + - choice | enable | Enable SSH tunnel policy check. + required: false + choices: ["disable", "enable"] + + ssh_status: + description: + - Configure protocol inspection status. + - choice | disable | Disable. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "deep-inspection"] + + ssh_unsupported_version: + description: + - Action based on SSH version being unsupported. + - choice | block | Block. + - choice | bypass | Bypass. + required: false + choices: ["block", "bypass"] + + ssl: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ssl_allow_invalid_server_cert: + description: + - When enabled, allows SSL sessions whose server certificate validation failed. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + ssl_client_cert_request: + description: + - Action based on client certificate request failure. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ssl_inspect_all: + description: + - Level of SSL inspection. + - choice | disable | Disable. + - choice | certificate-inspection | Inspect SSL handshake only. + - choice | deep-inspection | Full SSL inspection. + required: false + choices: ["disable", "certificate-inspection", "deep-inspection"] + + ssl_unsupported_ssl: + description: + - Action based on the SSL encryption used being unsupported. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ssl_untrusted_cert: + description: + - Allow, ignore, or block the untrusted SSL session server certificate. + - choice | allow | Allow the untrusted server certificate. + - choice | block | Block the connection when an untrusted server certificate is detected. + - choice | ignore | Always take the server certificate as trusted. + required: false + choices: ["allow", "block", "ignore"] + + ssl_exempt: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ssl_exempt_address: + description: + - IPv4 address object. + required: false + + ssl_exempt_address6: + description: + - IPv6 address object. + required: false + + ssl_exempt_fortiguard_category: + description: + - FortiGuard category ID. + required: false + + ssl_exempt_regex: + description: + - Exempt servers by regular expression. + required: false + + ssl_exempt_type: + description: + - Type of address object (IPv4 or IPv6) or FortiGuard category. + - choice | fortiguard-category | FortiGuard category. + - choice | address | Firewall IPv4 address. + - choice | address6 | Firewall IPv6 address. + - choice | wildcard-fqdn | Fully Qualified Domain Name with wildcard characters. + - choice | regex | Regular expression FQDN. + required: false + choices: ["fortiguard-category", "address", "address6", "wildcard-fqdn", "regex"] + + ssl_exempt_wildcard_fqdn: + description: + - Exempt servers by wildcard FQDN. + required: false + + ssl_server: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ssl_server_ftps_client_cert_request: + description: + - Action based on client certificate request failure during the FTPS handshake. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ssl_server_https_client_cert_request: + description: + - Action based on client certificate request failure during the HTTPS handshake. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ssl_server_imaps_client_cert_request: + description: + - Action based on client certificate request failure during the IMAPS handshake. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ssl_server_ip: + description: + - IPv4 address of the SSL server. + required: false + + ssl_server_pop3s_client_cert_request: + description: + - Action based on client certificate request failure during the POP3S handshake. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ssl_server_smtps_client_cert_request: + description: + - Action based on client certificate request failure during the SMTPS handshake. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + ssl_server_ssl_other_client_cert_request: + description: + - Action based on client certificate request failure during an SSL protocol handshake. + - choice | bypass | Bypass. + - choice | inspect | Inspect. + - choice | block | Block. + required: false + choices: ["bypass", "inspect", "block"] + + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_ssl_ssh: + name: Ansible_SSL_SSH_Profile + mode: delete + + - name: CREATE Profile + fmgr_secprof_ssl_ssh: + name: Ansible_SSL_SSH_Profile + comment: "Created by Ansible Module TEST" + mode: set + mapi_over_https: enable + rpc_over_https: enable + server_cert_mode: replace + ssl_anomalies_log: enable + ssl_exemptions_log: enable + use_ssl_server: enable + whitelist: enable +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + +############### +# START METHODS +############### + + +def fmgr_firewall_ssl_ssh_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + whitelist=dict(required=False, type="str", choices=["disable", "enable"]), + use_ssl_server=dict(required=False, type="str", choices=["disable", "enable"]), + untrusted_caname=dict(required=False, type="str"), + ssl_exemptions_log=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_anomalies_log=dict(required=False, type="str", choices=["disable", "enable"]), + server_cert_mode=dict(required=False, type="str", choices=["re-sign", "replace"]), + server_cert=dict(required=False, type="str"), + rpc_over_https=dict(required=False, type="str", choices=["disable", "enable"]), + name=dict(required=False, type="str"), + mapi_over_https=dict(required=False, type="str", choices=["disable", "enable"]), + comment=dict(required=False, type="str"), + caname=dict(required=False, type="str"), + ftps=dict(required=False, type="list"), + ftps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), + ftps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ftps_ports=dict(required=False, type="str"), + ftps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), + ftps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ftps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), + https=dict(required=False, type="list"), + https_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), + https_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + https_ports=dict(required=False, type="str"), + https_status=dict(required=False, type="str", choices=["disable", "certificate-inspection", "deep-inspection"]), + https_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + https_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), + imaps=dict(required=False, type="list"), + imaps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), + imaps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + imaps_ports=dict(required=False, type="str"), + imaps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), + imaps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + imaps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), + pop3s=dict(required=False, type="list"), + pop3s_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), + pop3s_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + pop3s_ports=dict(required=False, type="str"), + pop3s_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), + pop3s_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + pop3s_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), + smtps=dict(required=False, type="list"), + smtps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), + smtps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + smtps_ports=dict(required=False, type="str"), + smtps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), + smtps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + smtps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), + ssh=dict(required=False, type="list"), + ssh_inspect_all=dict(required=False, type="str", choices=["disable", "deep-inspection"]), + ssh_ports=dict(required=False, type="str"), + ssh_ssh_algorithm=dict(required=False, type="str", choices=["compatible", "high-encryption"]), + ssh_ssh_policy_check=dict(required=False, type="str", choices=["disable", "enable"]), + ssh_ssh_tun_policy_check=dict(required=False, type="str", choices=["disable", "enable"]), + ssh_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), + ssh_unsupported_version=dict(required=False, type="str", choices=["block", "bypass"]), + ssl=dict(required=False, type="list"), + ssl_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), + ssl_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ssl_inspect_all=dict(required=False, type="str", choices=["disable", "certificate-inspection", + "deep-inspection"]), + ssl_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ssl_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), + ssl_exempt=dict(required=False, type="list"), + ssl_exempt_address=dict(required=False, type="str"), + ssl_exempt_address6=dict(required=False, type="str"), + ssl_exempt_fortiguard_category=dict(required=False, type="str"), + ssl_exempt_regex=dict(required=False, type="str"), + ssl_exempt_type=dict(required=False, type="str", choices=["fortiguard-category", "address", "address6", + "wildcard-fqdn", "regex"]), + ssl_exempt_wildcard_fqdn=dict(required=False, type="str"), + ssl_server=dict(required=False, type="list"), + ssl_server_ftps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ssl_server_https_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ssl_server_imaps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ssl_server_ip=dict(required=False, type="str"), + ssl_server_pop3s_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ssl_server_smtps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), + ssl_server_ssl_other_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", + "block"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "whitelist": module.params["whitelist"], + "use-ssl-server": module.params["use_ssl_server"], + "untrusted-caname": module.params["untrusted_caname"], + "ssl-exemptions-log": module.params["ssl_exemptions_log"], + "ssl-anomalies-log": module.params["ssl_anomalies_log"], + "server-cert-mode": module.params["server_cert_mode"], + "server-cert": module.params["server_cert"], + "rpc-over-https": module.params["rpc_over_https"], + "name": module.params["name"], + "mapi-over-https": module.params["mapi_over_https"], + "comment": module.params["comment"], + "caname": module.params["caname"], + "ftps": { + "allow-invalid-server-cert": module.params["ftps_allow_invalid_server_cert"], + "client-cert-request": module.params["ftps_client_cert_request"], + "ports": module.params["ftps_ports"], + "status": module.params["ftps_status"], + "unsupported-ssl": module.params["ftps_unsupported_ssl"], + "untrusted-cert": module.params["ftps_untrusted_cert"], + }, + "https": { + "allow-invalid-server-cert": module.params["https_allow_invalid_server_cert"], + "client-cert-request": module.params["https_client_cert_request"], + "ports": module.params["https_ports"], + "status": module.params["https_status"], + "unsupported-ssl": module.params["https_unsupported_ssl"], + "untrusted-cert": module.params["https_untrusted_cert"], + }, + "imaps": { + "allow-invalid-server-cert": module.params["imaps_allow_invalid_server_cert"], + "client-cert-request": module.params["imaps_client_cert_request"], + "ports": module.params["imaps_ports"], + "status": module.params["imaps_status"], + "unsupported-ssl": module.params["imaps_unsupported_ssl"], + "untrusted-cert": module.params["imaps_untrusted_cert"], + }, + "pop3s": { + "allow-invalid-server-cert": module.params["pop3s_allow_invalid_server_cert"], + "client-cert-request": module.params["pop3s_client_cert_request"], + "ports": module.params["pop3s_ports"], + "status": module.params["pop3s_status"], + "unsupported-ssl": module.params["pop3s_unsupported_ssl"], + "untrusted-cert": module.params["pop3s_untrusted_cert"], + }, + "smtps": { + "allow-invalid-server-cert": module.params["smtps_allow_invalid_server_cert"], + "client-cert-request": module.params["smtps_client_cert_request"], + "ports": module.params["smtps_ports"], + "status": module.params["smtps_status"], + "unsupported-ssl": module.params["smtps_unsupported_ssl"], + "untrusted-cert": module.params["smtps_untrusted_cert"], + }, + "ssh": { + "inspect-all": module.params["ssh_inspect_all"], + "ports": module.params["ssh_ports"], + "ssh-algorithm": module.params["ssh_ssh_algorithm"], + "ssh-policy-check": module.params["ssh_ssh_policy_check"], + "ssh-tun-policy-check": module.params["ssh_ssh_tun_policy_check"], + "status": module.params["ssh_status"], + "unsupported-version": module.params["ssh_unsupported_version"], + }, + "ssl": { + "allow-invalid-server-cert": module.params["ssl_allow_invalid_server_cert"], + "client-cert-request": module.params["ssl_client_cert_request"], + "inspect-all": module.params["ssl_inspect_all"], + "unsupported-ssl": module.params["ssl_unsupported_ssl"], + "untrusted-cert": module.params["ssl_untrusted_cert"], + }, + "ssl-exempt": { + "address": module.params["ssl_exempt_address"], + "address6": module.params["ssl_exempt_address6"], + "fortiguard-category": module.params["ssl_exempt_fortiguard_category"], + "regex": module.params["ssl_exempt_regex"], + "type": module.params["ssl_exempt_type"], + "wildcard-fqdn": module.params["ssl_exempt_wildcard_fqdn"], + }, + "ssl-server": { + "ftps-client-cert-request": module.params["ssl_server_ftps_client_cert_request"], + "https-client-cert-request": module.params["ssl_server_https_client_cert_request"], + "imaps-client-cert-request": module.params["ssl_server_imaps_client_cert_request"], + "ip": module.params["ssl_server_ip"], + "pop3s-client-cert-request": module.params["ssl_server_pop3s_client_cert_request"], + "smtps-client-cert-request": module.params["ssl_server_smtps_client_cert_request"], + "ssl-other-client-cert-request": module.params["ssl_server_ssl_other_client_cert_request"], + } + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['ftps', 'https', 'imaps', 'pop3s', 'smtps', 'ssh', 'ssl', 'ssl-exempt', 'ssl-server'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + + try: + + results = fmgr_firewall_ssl_ssh_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_voip.py b/plugins/modules/network/fortimanager/fmgr_secprof_voip.py new file mode 100644 index 0000000000..8959684e1c --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_voip.py @@ -0,0 +1,1202 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_voip +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: VOIP security profiles in FMG +description: + - Manage VOIP security profiles in FortiManager via API + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + name: + description: + - Profile name. + required: false + + comment: + description: + - Comment. + required: false + + sccp: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + sccp_block_mcast: + description: + - Enable/disable block multicast RTP connections. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sccp_log_call_summary: + description: + - Enable/disable log summary of SCCP calls. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sccp_log_violations: + description: + - Enable/disable logging of SCCP violations. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sccp_max_calls: + description: + - Maximum calls per minute per SCCP client (max 65535). + required: false + + sccp_status: + description: + - Enable/disable SCCP. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sccp_verify_header: + description: + - Enable/disable verify SCCP header content. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + sip_ack_rate: + description: + - ACK request rate limit (per second, per policy). + required: false + + sip_block_ack: + description: + - Enable/disable block ACK requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_bye: + description: + - Enable/disable block BYE requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_cancel: + description: + - Enable/disable block CANCEL requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_geo_red_options: + description: + - Enable/disable block OPTIONS requests, but OPTIONS requests still notify for redundancy. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_info: + description: + - Enable/disable block INFO requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_invite: + description: + - Enable/disable block INVITE requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_long_lines: + description: + - Enable/disable block requests with headers exceeding max-line-length. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_message: + description: + - Enable/disable block MESSAGE requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_notify: + description: + - Enable/disable block NOTIFY requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_options: + description: + - Enable/disable block OPTIONS requests and no OPTIONS as notifying message for redundancy either. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_prack: + description: + - Enable/disable block prack requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_publish: + description: + - Enable/disable block PUBLISH requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_refer: + description: + - Enable/disable block REFER requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_register: + description: + - Enable/disable block REGISTER requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_subscribe: + description: + - Enable/disable block SUBSCRIBE requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_unknown: + description: + - Block unrecognized SIP requests (enabled by default). + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_block_update: + description: + - Enable/disable block UPDATE requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_bye_rate: + description: + - BYE request rate limit (per second, per policy). + required: false + + sip_call_keepalive: + description: + - Continue tracking calls with no RTP for this many minutes. + required: false + + sip_cancel_rate: + description: + - CANCEL request rate limit (per second, per policy). + required: false + + sip_contact_fixup: + description: + - Fixup contact anyway even if contact's IP|port doesn't match session's IP|port. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_hnt_restrict_source_ip: + description: + - Enable/disable restrict RTP source IP to be the same as SIP source IP when HNT is enabled. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_hosted_nat_traversal: + description: + - Hosted NAT Traversal (HNT). + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_info_rate: + description: + - INFO request rate limit (per second, per policy). + required: false + + sip_invite_rate: + description: + - INVITE request rate limit (per second, per policy). + required: false + + sip_ips_rtp: + description: + - Enable/disable allow IPS on RTP. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_log_call_summary: + description: + - Enable/disable logging of SIP call summary. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_log_violations: + description: + - Enable/disable logging of SIP violations. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_malformed_header_allow: + description: + - Action for malformed Allow header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_call_id: + description: + - Action for malformed Call-ID header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_contact: + description: + - Action for malformed Contact header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_content_length: + description: + - Action for malformed Content-Length header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_content_type: + description: + - Action for malformed Content-Type header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_cseq: + description: + - Action for malformed CSeq header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_expires: + description: + - Action for malformed Expires header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_from: + description: + - Action for malformed From header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_max_forwards: + description: + - Action for malformed Max-Forwards header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_p_asserted_identity: + description: + - Action for malformed P-Asserted-Identity header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_rack: + description: + - Action for malformed RAck header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_record_route: + description: + - Action for malformed Record-Route header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_route: + description: + - Action for malformed Route header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_rseq: + description: + - Action for malformed RSeq header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_a: + description: + - Action for malformed SDP a line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_b: + description: + - Action for malformed SDP b line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_c: + description: + - Action for malformed SDP c line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_i: + description: + - Action for malformed SDP i line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_k: + description: + - Action for malformed SDP k line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_m: + description: + - Action for malformed SDP m line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_o: + description: + - Action for malformed SDP o line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_r: + description: + - Action for malformed SDP r line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_s: + description: + - Action for malformed SDP s line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_t: + description: + - Action for malformed SDP t line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_v: + description: + - Action for malformed SDP v line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_sdp_z: + description: + - Action for malformed SDP z line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_to: + description: + - Action for malformed To header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_header_via: + description: + - Action for malformed VIA header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_malformed_request_line: + description: + - Action for malformed request line. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_max_body_length: + description: + - Maximum SIP message body length (0 meaning no limit). + required: false + + sip_max_dialogs: + description: + - Maximum number of concurrent calls/dialogs (per policy). + required: false + + sip_max_idle_dialogs: + description: + - Maximum number established but idle dialogs to retain (per policy). + required: false + + sip_max_line_length: + description: + - Maximum SIP header line length (78-4096). + required: false + + sip_message_rate: + description: + - MESSAGE request rate limit (per second, per policy). + required: false + + sip_nat_trace: + description: + - Enable/disable preservation of original IP in SDP i line. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_no_sdp_fixup: + description: + - Enable/disable no SDP fix-up. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_notify_rate: + description: + - NOTIFY request rate limit (per second, per policy). + required: false + + sip_open_contact_pinhole: + description: + - Enable/disable open pinhole for non-REGISTER Contact port. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_open_record_route_pinhole: + description: + - Enable/disable open pinhole for Record-Route port. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_open_register_pinhole: + description: + - Enable/disable open pinhole for REGISTER Contact port. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_open_via_pinhole: + description: + - Enable/disable open pinhole for Via port. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_options_rate: + description: + - OPTIONS request rate limit (per second, per policy). + required: false + + sip_prack_rate: + description: + - PRACK request rate limit (per second, per policy). + required: false + + sip_preserve_override: + description: + - Override i line to preserve original IPS (default| append). + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_provisional_invite_expiry_time: + description: + - Expiry time for provisional INVITE (10 - 3600 sec). + required: false + + sip_publish_rate: + description: + - PUBLISH request rate limit (per second, per policy). + required: false + + sip_refer_rate: + description: + - REFER request rate limit (per second, per policy). + required: false + + sip_register_contact_trace: + description: + - Enable/disable trace original IP/port within the contact header of REGISTER requests. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_register_rate: + description: + - REGISTER request rate limit (per second, per policy). + required: false + + sip_rfc2543_branch: + description: + - Enable/disable support via branch compliant with RFC 2543. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_rtp: + description: + - Enable/disable create pinholes for RTP traffic to traverse firewall. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_ssl_algorithm: + description: + - Relative strength of encryption algorithms accepted in negotiation. + - choice | high | High encryption. Allow only AES and ChaCha. + - choice | medium | Medium encryption. Allow AES, ChaCha, 3DES, and RC4. + - choice | low | Low encryption. Allow AES, ChaCha, 3DES, RC4, and DES. + required: false + choices: ["high", "medium", "low"] + + sip_ssl_auth_client: + description: + - Require a client certificate and authenticate it with the peer/peergrp. + required: false + + sip_ssl_auth_server: + description: + - Authenticate the server's certificate with the peer/peergrp. + required: false + + sip_ssl_client_certificate: + description: + - Name of Certificate to offer to server if requested. + required: false + + sip_ssl_client_renegotiation: + description: + - Allow/block client renegotiation by server. + - choice | allow | Allow a SSL client to renegotiate. + - choice | deny | Abort any SSL connection that attempts to renegotiate. + - choice | secure | Reject any SSL connection that does not offer a RFC 5746 Secure Renegotiation Indication. + required: false + choices: ["allow", "deny", "secure"] + + sip_ssl_max_version: + description: + - Highest SSL/TLS version to negotiate. + - choice | ssl-3.0 | SSL 3.0. + - choice | tls-1.0 | TLS 1.0. + - choice | tls-1.1 | TLS 1.1. + - choice | tls-1.2 | TLS 1.2. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + sip_ssl_min_version: + description: + - Lowest SSL/TLS version to negotiate. + - choice | ssl-3.0 | SSL 3.0. + - choice | tls-1.0 | TLS 1.0. + - choice | tls-1.1 | TLS 1.1. + - choice | tls-1.2 | TLS 1.2. + required: false + choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"] + + sip_ssl_mode: + description: + - SSL/TLS mode for encryption & decryption of traffic. + - choice | off | No SSL. + - choice | full | Client to FortiGate and FortiGate to Server SSL. + required: false + choices: ["off", "full"] + + sip_ssl_pfs: + description: + - SSL Perfect Forward Secrecy. + - choice | require | PFS mandatory. + - choice | deny | PFS rejected. + - choice | allow | PFS allowed. + required: false + choices: ["require", "deny", "allow"] + + sip_ssl_send_empty_frags: + description: + - Send empty fragments to avoid attack on CBC IV (SSL 3.0 & TLS 1.0 only). + - choice | disable | Do not send empty fragments. + - choice | enable | Send empty fragments. + required: false + choices: ["disable", "enable"] + + sip_ssl_server_certificate: + description: + - Name of Certificate return to the client in every SSL connection. + required: false + + sip_status: + description: + - Enable/disable SIP. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_strict_register: + description: + - Enable/disable only allow the registrar to connect. + - choice | disable | Disable status. + - choice | enable | Enable status. + required: false + choices: ["disable", "enable"] + + sip_subscribe_rate: + description: + - SUBSCRIBE request rate limit (per second, per policy). + required: false + + sip_unknown_header: + description: + - Action for unknown SIP header. + - choice | pass | Bypass malformed messages. + - choice | discard | Discard malformed messages. + - choice | respond | Respond with error code. + required: false + choices: ["pass", "discard", "respond"] + + sip_update_rate: + description: + - UPDATE request rate limit (per second, per policy). + required: false + + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_voip: + name: "Ansible_VOIP_Profile" + mode: "delete" + + - name: Create FMGR_VOIP_PROFILE + fmgr_secprof_voip: + mode: "set" + adom: "root" + name: "Ansible_VOIP_Profile" + comment: "Created by Ansible" + sccp: {block-mcast: "enable", log-call-summary: "enable", log-violations: "enable", status: "enable"} +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_voip_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/voip/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/voip/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + name=dict(required=False, type="str"), + comment=dict(required=False, type="str"), + sccp=dict(required=False, type="dict"), + sccp_block_mcast=dict(required=False, type="str", choices=["disable", "enable"]), + sccp_log_call_summary=dict(required=False, type="str", choices=["disable", "enable"]), + sccp_log_violations=dict(required=False, type="str", choices=["disable", "enable"]), + sccp_max_calls=dict(required=False, type="int"), + sccp_status=dict(required=False, type="str", choices=["disable", "enable"]), + sccp_verify_header=dict(required=False, type="str", choices=["disable", "enable"]), + sip=dict(required=False, type="dict"), + sip_ack_rate=dict(required=False, type="int"), + sip_block_ack=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_bye=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_cancel=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_geo_red_options=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_info=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_invite=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_long_lines=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_message=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_notify=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_options=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_prack=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_publish=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_refer=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_register=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_subscribe=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_unknown=dict(required=False, type="str", choices=["disable", "enable"]), + sip_block_update=dict(required=False, type="str", choices=["disable", "enable"]), + sip_bye_rate=dict(required=False, type="int"), + sip_call_keepalive=dict(required=False, type="int"), + sip_cancel_rate=dict(required=False, type="int"), + sip_contact_fixup=dict(required=False, type="str", choices=["disable", "enable"]), + sip_hnt_restrict_source_ip=dict(required=False, type="str", choices=["disable", "enable"]), + sip_hosted_nat_traversal=dict(required=False, type="str", choices=["disable", "enable"]), + sip_info_rate=dict(required=False, type="int"), + sip_invite_rate=dict(required=False, type="int"), + sip_ips_rtp=dict(required=False, type="str", choices=["disable", "enable"]), + sip_log_call_summary=dict(required=False, type="str", choices=["disable", "enable"]), + sip_log_violations=dict(required=False, type="str", choices=["disable", "enable"]), + sip_malformed_header_allow=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_call_id=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_contact=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_content_length=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_content_type=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_cseq=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_expires=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_from=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_max_forwards=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_p_asserted_identity=dict(required=False, type="str", choices=["pass", + "discard", + "respond"]), + sip_malformed_header_rack=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_record_route=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_route=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_rseq=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_a=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_b=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_c=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_i=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_k=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_m=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_o=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_r=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_s=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_t=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_v=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_sdp_z=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_to=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_header_via=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_malformed_request_line=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_max_body_length=dict(required=False, type="int"), + sip_max_dialogs=dict(required=False, type="int"), + sip_max_idle_dialogs=dict(required=False, type="int"), + sip_max_line_length=dict(required=False, type="int"), + sip_message_rate=dict(required=False, type="int"), + sip_nat_trace=dict(required=False, type="str", choices=["disable", "enable"]), + sip_no_sdp_fixup=dict(required=False, type="str", choices=["disable", "enable"]), + sip_notify_rate=dict(required=False, type="int"), + sip_open_contact_pinhole=dict(required=False, type="str", choices=["disable", "enable"]), + sip_open_record_route_pinhole=dict(required=False, type="str", choices=["disable", "enable"]), + sip_open_register_pinhole=dict(required=False, type="str", choices=["disable", "enable"]), + sip_open_via_pinhole=dict(required=False, type="str", choices=["disable", "enable"]), + sip_options_rate=dict(required=False, type="int"), + sip_prack_rate=dict(required=False, type="int"), + sip_preserve_override=dict(required=False, type="str", choices=["disable", "enable"]), + sip_provisional_invite_expiry_time=dict(required=False, type="int"), + sip_publish_rate=dict(required=False, type="int"), + sip_refer_rate=dict(required=False, type="int"), + sip_register_contact_trace=dict(required=False, type="str", choices=["disable", "enable"]), + sip_register_rate=dict(required=False, type="int"), + sip_rfc2543_branch=dict(required=False, type="str", choices=["disable", "enable"]), + sip_rtp=dict(required=False, type="str", choices=["disable", "enable"]), + sip_ssl_algorithm=dict(required=False, type="str", choices=["high", "medium", "low"]), + sip_ssl_auth_client=dict(required=False, type="str"), + sip_ssl_auth_server=dict(required=False, type="str"), + sip_ssl_client_certificate=dict(required=False, type="str"), + sip_ssl_client_renegotiation=dict(required=False, type="str", choices=["allow", "deny", "secure"]), + sip_ssl_max_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + sip_ssl_min_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]), + sip_ssl_mode=dict(required=False, type="str", choices=["off", "full"]), + sip_ssl_pfs=dict(required=False, type="str", choices=["require", "deny", "allow"]), + sip_ssl_send_empty_frags=dict(required=False, type="str", choices=["disable", "enable"]), + sip_ssl_server_certificate=dict(required=False, type="str"), + sip_status=dict(required=False, type="str", choices=["disable", "enable"]), + sip_strict_register=dict(required=False, type="str", choices=["disable", "enable"]), + sip_subscribe_rate=dict(required=False, type="int"), + sip_unknown_header=dict(required=False, type="str", choices=["pass", "discard", "respond"]), + sip_update_rate=dict(required=False, type="int"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "name": module.params["name"], + "comment": module.params["comment"], + "sccp": { + "block-mcast": module.params["sccp_block_mcast"], + "log-call-summary": module.params["sccp_log_call_summary"], + "log-violations": module.params["sccp_log_violations"], + "max-calls": module.params["sccp_max_calls"], + "status": module.params["sccp_status"], + "verify-header": module.params["sccp_verify_header"], + }, + "sip": { + "ack-rate": module.params["sip_ack_rate"], + "block-ack": module.params["sip_block_ack"], + "block-bye": module.params["sip_block_bye"], + "block-cancel": module.params["sip_block_cancel"], + "block-geo-red-options": module.params["sip_block_geo_red_options"], + "block-info": module.params["sip_block_info"], + "block-invite": module.params["sip_block_invite"], + "block-long-lines": module.params["sip_block_long_lines"], + "block-message": module.params["sip_block_message"], + "block-notify": module.params["sip_block_notify"], + "block-options": module.params["sip_block_options"], + "block-prack": module.params["sip_block_prack"], + "block-publish": module.params["sip_block_publish"], + "block-refer": module.params["sip_block_refer"], + "block-register": module.params["sip_block_register"], + "block-subscribe": module.params["sip_block_subscribe"], + "block-unknown": module.params["sip_block_unknown"], + "block-update": module.params["sip_block_update"], + "bye-rate": module.params["sip_bye_rate"], + "call-keepalive": module.params["sip_call_keepalive"], + "cancel-rate": module.params["sip_cancel_rate"], + "contact-fixup": module.params["sip_contact_fixup"], + "hnt-restrict-source-ip": module.params["sip_hnt_restrict_source_ip"], + "hosted-nat-traversal": module.params["sip_hosted_nat_traversal"], + "info-rate": module.params["sip_info_rate"], + "invite-rate": module.params["sip_invite_rate"], + "ips-rtp": module.params["sip_ips_rtp"], + "log-call-summary": module.params["sip_log_call_summary"], + "log-violations": module.params["sip_log_violations"], + "malformed-header-allow": module.params["sip_malformed_header_allow"], + "malformed-header-call-id": module.params["sip_malformed_header_call_id"], + "malformed-header-contact": module.params["sip_malformed_header_contact"], + "malformed-header-content-length": module.params["sip_malformed_header_content_length"], + "malformed-header-content-type": module.params["sip_malformed_header_content_type"], + "malformed-header-cseq": module.params["sip_malformed_header_cseq"], + "malformed-header-expires": module.params["sip_malformed_header_expires"], + "malformed-header-from": module.params["sip_malformed_header_from"], + "malformed-header-max-forwards": module.params["sip_malformed_header_max_forwards"], + "malformed-header-p-asserted-identity": module.params["sip_malformed_header_p_asserted_identity"], + "malformed-header-rack": module.params["sip_malformed_header_rack"], + "malformed-header-record-route": module.params["sip_malformed_header_record_route"], + "malformed-header-route": module.params["sip_malformed_header_route"], + "malformed-header-rseq": module.params["sip_malformed_header_rseq"], + "malformed-header-sdp-a": module.params["sip_malformed_header_sdp_a"], + "malformed-header-sdp-b": module.params["sip_malformed_header_sdp_b"], + "malformed-header-sdp-c": module.params["sip_malformed_header_sdp_c"], + "malformed-header-sdp-i": module.params["sip_malformed_header_sdp_i"], + "malformed-header-sdp-k": module.params["sip_malformed_header_sdp_k"], + "malformed-header-sdp-m": module.params["sip_malformed_header_sdp_m"], + "malformed-header-sdp-o": module.params["sip_malformed_header_sdp_o"], + "malformed-header-sdp-r": module.params["sip_malformed_header_sdp_r"], + "malformed-header-sdp-s": module.params["sip_malformed_header_sdp_s"], + "malformed-header-sdp-t": module.params["sip_malformed_header_sdp_t"], + "malformed-header-sdp-v": module.params["sip_malformed_header_sdp_v"], + "malformed-header-sdp-z": module.params["sip_malformed_header_sdp_z"], + "malformed-header-to": module.params["sip_malformed_header_to"], + "malformed-header-via": module.params["sip_malformed_header_via"], + "malformed-request-line": module.params["sip_malformed_request_line"], + "max-body-length": module.params["sip_max_body_length"], + "max-dialogs": module.params["sip_max_dialogs"], + "max-idle-dialogs": module.params["sip_max_idle_dialogs"], + "max-line-length": module.params["sip_max_line_length"], + "message-rate": module.params["sip_message_rate"], + "nat-trace": module.params["sip_nat_trace"], + "no-sdp-fixup": module.params["sip_no_sdp_fixup"], + "notify-rate": module.params["sip_notify_rate"], + "open-contact-pinhole": module.params["sip_open_contact_pinhole"], + "open-record-route-pinhole": module.params["sip_open_record_route_pinhole"], + "open-register-pinhole": module.params["sip_open_register_pinhole"], + "open-via-pinhole": module.params["sip_open_via_pinhole"], + "options-rate": module.params["sip_options_rate"], + "prack-rate": module.params["sip_prack_rate"], + "preserve-override": module.params["sip_preserve_override"], + "provisional-invite-expiry-time": module.params["sip_provisional_invite_expiry_time"], + "publish-rate": module.params["sip_publish_rate"], + "refer-rate": module.params["sip_refer_rate"], + "register-contact-trace": module.params["sip_register_contact_trace"], + "register-rate": module.params["sip_register_rate"], + "rfc2543-branch": module.params["sip_rfc2543_branch"], + "rtp": module.params["sip_rtp"], + "ssl-algorithm": module.params["sip_ssl_algorithm"], + "ssl-auth-client": module.params["sip_ssl_auth_client"], + "ssl-auth-server": module.params["sip_ssl_auth_server"], + "ssl-client-certificate": module.params["sip_ssl_client_certificate"], + "ssl-client-renegotiation": module.params["sip_ssl_client_renegotiation"], + "ssl-max-version": module.params["sip_ssl_max_version"], + "ssl-min-version": module.params["sip_ssl_min_version"], + "ssl-mode": module.params["sip_ssl_mode"], + "ssl-pfs": module.params["sip_ssl_pfs"], + "ssl-send-empty-frags": module.params["sip_ssl_send_empty_frags"], + "ssl-server-certificate": module.params["sip_ssl_server_certificate"], + "status": module.params["sip_status"], + "strict-register": module.params["sip_strict_register"], + "subscribe-rate": module.params["sip_subscribe_rate"], + "unknown-header": module.params["sip_unknown_header"], + "update-rate": module.params["sip_update_rate"], + } + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['sccp', 'sip'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + module.paramgram = paramgram + + results = DEFAULT_RESULT_OBJ + try: + + results = fmgr_voip_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_waf.py b/plugins/modules/network/fortimanager/fmgr_secprof_waf.py new file mode 100644 index 0000000000..7ec95bade9 --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_waf.py @@ -0,0 +1,1481 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of` +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_waf +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: FortiManager web application firewall security profile +description: + - Manage web application firewall security profiles for FGTs via FMG + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + name: + description: + - WAF Profile name. + required: false + + external: + description: + - Disable/Enable external HTTP Inspection. + - choice | disable | Disable external inspection. + - choice | enable | Enable external inspection. + required: false + choices: ["disable", "enable"] + + extended_log: + description: + - Enable/disable extended logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + comment: + description: + - Comment. + required: false + + address_list: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + address_list_blocked_address: + description: + - Blocked address. + required: false + + address_list_blocked_log: + description: + - Enable/disable logging on blocked addresses. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + address_list_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + address_list_status: + description: + - Status. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + address_list_trusted_address: + description: + - Trusted address. + required: false + + constraint: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + constraint_content_length_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_content_length_length: + description: + - Length of HTTP content in bytes (0 to 2147483647). + required: false + + constraint_content_length_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_content_length_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_content_length_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_address: + description: + - Host address. + required: false + + constraint_exception_content_length: + description: + - HTTP content length in request. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_header_length: + description: + - HTTP header length in request. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_hostname: + description: + - Enable/disable hostname check. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_line_length: + description: + - HTTP line length in request. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_malformed: + description: + - Enable/disable malformed HTTP request check. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_max_cookie: + description: + - Maximum number of cookies in HTTP request. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_max_header_line: + description: + - Maximum number of HTTP header line. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_max_range_segment: + description: + - Maximum number of range segments in HTTP range line. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_max_url_param: + description: + - Maximum number of parameters in URL. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_method: + description: + - Enable/disable HTTP method check. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_param_length: + description: + - Maximum length of parameter in URL, HTTP POST request or HTTP body. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_pattern: + description: + - URL pattern. + required: false + + constraint_exception_regex: + description: + - Enable/disable regular expression based pattern match. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_url_param_length: + description: + - Maximum length of parameter in URL. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_exception_version: + description: + - Enable/disable HTTP version check. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_header_length_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_header_length_length: + description: + - Length of HTTP header in bytes (0 to 2147483647). + required: false + + constraint_header_length_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_header_length_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_header_length_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_hostname_action: + description: + - Action for a hostname constraint. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_hostname_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_hostname_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_hostname_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_line_length_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_line_length_length: + description: + - Length of HTTP line in bytes (0 to 2147483647). + required: false + + constraint_line_length_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_line_length_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_line_length_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_malformed_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_malformed_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_malformed_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_malformed_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_cookie_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_max_cookie_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_cookie_max_cookie: + description: + - Maximum number of cookies in HTTP request (0 to 2147483647). + required: false + + constraint_max_cookie_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_max_cookie_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_header_line_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_max_header_line_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_header_line_max_header_line: + description: + - Maximum number HTTP header lines (0 to 2147483647). + required: false + + constraint_max_header_line_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_max_header_line_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_range_segment_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_max_range_segment_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_range_segment_max_range_segment: + description: + - Maximum number of range segments in HTTP range line (0 to 2147483647). + required: false + + constraint_max_range_segment_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_max_range_segment_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_url_param_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_max_url_param_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_max_url_param_max_url_param: + description: + - Maximum number of parameters in URL (0 to 2147483647). + required: false + + constraint_max_url_param_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_max_url_param_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_method_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_method_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_method_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_method_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_param_length_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_param_length_length: + description: + - Maximum length of parameter in URL, HTTP POST request or HTTP body in bytes (0 to 2147483647). + required: false + + constraint_param_length_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_param_length_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_param_length_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_url_param_length_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_url_param_length_length: + description: + - Maximum length of URL parameter in bytes (0 to 2147483647). + required: false + + constraint_url_param_length_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_url_param_length_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_url_param_length_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_version_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + required: false + choices: ["allow", "block"] + + constraint_version_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + constraint_version_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + constraint_version_status: + description: + - Enable/disable the constraint. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + method: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + method_default_allowed_methods: + description: + - Methods. + - FLAG Based Options. Specify multiple in list form. + - flag | delete | HTTP DELETE method. + - flag | get | HTTP GET method. + - flag | head | HTTP HEAD method. + - flag | options | HTTP OPTIONS method. + - flag | post | HTTP POST method. + - flag | put | HTTP PUT method. + - flag | trace | HTTP TRACE method. + - flag | others | Other HTTP methods. + - flag | connect | HTTP CONNECT method. + required: false + choices: ["delete", "get", "head", "options", "post", "put", "trace", "others", "connect"] + + method_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + method_severity: + description: + - Severity. + - choice | low | low severity + - choice | medium | medium severity + - choice | high | High severity + required: false + choices: ["low", "medium", "high"] + + method_status: + description: + - Status. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + method_method_policy_address: + description: + - Host address. + required: false + + method_method_policy_allowed_methods: + description: + - Allowed Methods. + - FLAG Based Options. Specify multiple in list form. + - flag | delete | HTTP DELETE method. + - flag | get | HTTP GET method. + - flag | head | HTTP HEAD method. + - flag | options | HTTP OPTIONS method. + - flag | post | HTTP POST method. + - flag | put | HTTP PUT method. + - flag | trace | HTTP TRACE method. + - flag | others | Other HTTP methods. + - flag | connect | HTTP CONNECT method. + required: false + choices: ["delete", "get", "head", "options", "post", "put", "trace", "others", "connect"] + + method_method_policy_pattern: + description: + - URL pattern. + required: false + + method_method_policy_regex: + description: + - Enable/disable regular expression based pattern match. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + signature: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + signature_credit_card_detection_threshold: + description: + - The minimum number of Credit cards to detect violation. + required: false + + signature_disabled_signature: + description: + - Disabled signatures + required: false + + signature_disabled_sub_class: + description: + - Disabled signature subclasses. + required: false + + signature_custom_signature_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + - choice | erase | Erase credit card numbers. + required: false + choices: ["allow", "block", "erase"] + + signature_custom_signature_case_sensitivity: + description: + - Case sensitivity in pattern. + - choice | disable | Case insensitive in pattern. + - choice | enable | Case sensitive in pattern. + required: false + choices: ["disable", "enable"] + + signature_custom_signature_direction: + description: + - Traffic direction. + - choice | request | Match HTTP request. + - choice | response | Match HTTP response. + required: false + choices: ["request", "response"] + + signature_custom_signature_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + signature_custom_signature_name: + description: + - Signature name. + required: false + + signature_custom_signature_pattern: + description: + - Match pattern. + required: false + + signature_custom_signature_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + signature_custom_signature_status: + description: + - Status. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + signature_custom_signature_target: + description: + - Match HTTP target. + - FLAG Based Options. Specify multiple in list form. + - flag | arg | HTTP arguments. + - flag | arg-name | Names of HTTP arguments. + - flag | req-body | HTTP request body. + - flag | req-cookie | HTTP request cookies. + - flag | req-cookie-name | HTTP request cookie names. + - flag | req-filename | HTTP request file name. + - flag | req-header | HTTP request headers. + - flag | req-header-name | HTTP request header names. + - flag | req-raw-uri | Raw URI of HTTP request. + - flag | req-uri | URI of HTTP request. + - flag | resp-body | HTTP response body. + - flag | resp-hdr | HTTP response headers. + - flag | resp-status | HTTP response status. + required: false + choices: ["arg","arg-name","req-body","req-cookie","req-cookie-name","req-filename","req-header","req-header-name", + "req-raw-uri","req-uri","resp-body","resp-hdr","resp-status"] + + signature_main_class_action: + description: + - Action. + - choice | allow | Allow. + - choice | block | Block. + - choice | erase | Erase credit card numbers. + required: false + choices: ["allow", "block", "erase"] + + signature_main_class_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + signature_main_class_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + signature_main_class_status: + description: + - Status. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + url_access: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + url_access_action: + description: + - Action. + - choice | bypass | Allow the HTTP request, also bypass further WAF scanning. + - choice | permit | Allow the HTTP request, and continue further WAF scanning. + - choice | block | Block HTTP request. + required: false + choices: ["bypass", "permit", "block"] + + url_access_address: + description: + - Host address. + required: false + + url_access_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + url_access_severity: + description: + - Severity. + - choice | low | Low severity. + - choice | medium | Medium severity. + - choice | high | High severity. + required: false + choices: ["low", "medium", "high"] + + url_access_access_pattern_negate: + description: + - Enable/disable match negation. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + url_access_access_pattern_pattern: + description: + - URL pattern. + required: false + + url_access_access_pattern_regex: + description: + - Enable/disable regular expression based pattern match. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + url_access_access_pattern_srcaddr: + description: + - Source address. + required: false + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_waf: + name: "Ansible_WAF_Profile" + comment: "Created by Ansible Module TEST" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_waf: + name: "Ansible_WAF_Profile" + comment: "Created by Ansible Module TEST" + mode: "set" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_waf_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + mode = paramgram["mode"] + adom = paramgram["adom"] + # INIT A BASIC OBJECTS + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/waf/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/waf/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + name=dict(required=False, type="str"), + external=dict(required=False, type="str", choices=["disable", "enable"]), + extended_log=dict(required=False, type="str", choices=["disable", "enable"]), + comment=dict(required=False, type="str"), + address_list=dict(required=False, type="list"), + address_list_blocked_address=dict(required=False, type="str"), + address_list_blocked_log=dict(required=False, type="str", choices=["disable", "enable"]), + address_list_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + address_list_status=dict(required=False, type="str", choices=["disable", "enable"]), + address_list_trusted_address=dict(required=False, type="str"), + constraint=dict(required=False, type="list"), + + constraint_content_length_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_content_length_length=dict(required=False, type="int"), + constraint_content_length_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_content_length_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_content_length_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_exception_address=dict(required=False, type="str"), + constraint_exception_content_length=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_header_length=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_hostname=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_line_length=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_malformed=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_max_cookie=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_max_header_line=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_max_range_segment=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_max_url_param=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_method=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_param_length=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_pattern=dict(required=False, type="str"), + constraint_exception_regex=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_url_param_length=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_exception_version=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_header_length_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_header_length_length=dict(required=False, type="int"), + constraint_header_length_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_header_length_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_header_length_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_hostname_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_hostname_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_hostname_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_hostname_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_line_length_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_line_length_length=dict(required=False, type="int"), + constraint_line_length_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_line_length_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_line_length_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_malformed_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_malformed_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_malformed_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_malformed_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_max_cookie_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_max_cookie_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_max_cookie_max_cookie=dict(required=False, type="int"), + constraint_max_cookie_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_max_cookie_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_max_header_line_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_max_header_line_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_max_header_line_max_header_line=dict(required=False, type="int"), + constraint_max_header_line_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_max_header_line_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_max_range_segment_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_max_range_segment_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_max_range_segment_max_range_segment=dict(required=False, type="int"), + constraint_max_range_segment_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_max_range_segment_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_max_url_param_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_max_url_param_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_max_url_param_max_url_param=dict(required=False, type="int"), + constraint_max_url_param_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_max_url_param_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_method_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_method_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_method_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_method_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_param_length_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_param_length_length=dict(required=False, type="int"), + constraint_param_length_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_param_length_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_param_length_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_url_param_length_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_url_param_length_length=dict(required=False, type="int"), + constraint_url_param_length_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_url_param_length_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_url_param_length_status=dict(required=False, type="str", choices=["disable", "enable"]), + + constraint_version_action=dict(required=False, type="str", choices=["allow", "block"]), + constraint_version_log=dict(required=False, type="str", choices=["disable", "enable"]), + constraint_version_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + constraint_version_status=dict(required=False, type="str", choices=["disable", "enable"]), + method=dict(required=False, type="list"), + method_default_allowed_methods=dict(required=False, type="str", choices=["delete", + "get", + "head", + "options", + "post", + "put", + "trace", + "others", + "connect"]), + method_log=dict(required=False, type="str", choices=["disable", "enable"]), + method_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + method_status=dict(required=False, type="str", choices=["disable", "enable"]), + + method_method_policy_address=dict(required=False, type="str"), + method_method_policy_allowed_methods=dict(required=False, type="str", choices=["delete", + "get", + "head", + "options", + "post", + "put", + "trace", + "others", + "connect"]), + method_method_policy_pattern=dict(required=False, type="str"), + method_method_policy_regex=dict(required=False, type="str", choices=["disable", "enable"]), + signature=dict(required=False, type="list"), + signature_credit_card_detection_threshold=dict(required=False, type="int"), + signature_disabled_signature=dict(required=False, type="str"), + signature_disabled_sub_class=dict(required=False, type="str"), + + signature_custom_signature_action=dict(required=False, type="str", choices=["allow", "block", "erase"]), + signature_custom_signature_case_sensitivity=dict(required=False, type="str", choices=["disable", "enable"]), + signature_custom_signature_direction=dict(required=False, type="str", choices=["request", "response"]), + signature_custom_signature_log=dict(required=False, type="str", choices=["disable", "enable"]), + signature_custom_signature_name=dict(required=False, type="str"), + signature_custom_signature_pattern=dict(required=False, type="str"), + signature_custom_signature_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + signature_custom_signature_status=dict(required=False, type="str", choices=["disable", "enable"]), + signature_custom_signature_target=dict(required=False, type="str", choices=["arg", + "arg-name", + "req-body", + "req-cookie", + "req-cookie-name", + "req-filename", + "req-header", + "req-header-name", + "req-raw-uri", + "req-uri", + "resp-body", + "resp-hdr", + "resp-status"]), + + signature_main_class_action=dict(required=False, type="str", choices=["allow", "block", "erase"]), + signature_main_class_log=dict(required=False, type="str", choices=["disable", "enable"]), + signature_main_class_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + signature_main_class_status=dict(required=False, type="str", choices=["disable", "enable"]), + url_access=dict(required=False, type="list"), + url_access_action=dict(required=False, type="str", choices=["bypass", "permit", "block"]), + url_access_address=dict(required=False, type="str"), + url_access_log=dict(required=False, type="str", choices=["disable", "enable"]), + url_access_severity=dict(required=False, type="str", choices=["low", "medium", "high"]), + + url_access_access_pattern_negate=dict(required=False, type="str", choices=["disable", "enable"]), + url_access_access_pattern_pattern=dict(required=False, type="str"), + url_access_access_pattern_regex=dict(required=False, type="str", choices=["disable", "enable"]), + url_access_access_pattern_srcaddr=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "name": module.params["name"], + "external": module.params["external"], + "extended-log": module.params["extended_log"], + "comment": module.params["comment"], + "address-list": { + "blocked-address": module.params["address_list_blocked_address"], + "blocked-log": module.params["address_list_blocked_log"], + "severity": module.params["address_list_severity"], + "status": module.params["address_list_status"], + "trusted-address": module.params["address_list_trusted_address"], + }, + "constraint": { + "content-length": { + "action": module.params["constraint_content_length_action"], + "length": module.params["constraint_content_length_length"], + "log": module.params["constraint_content_length_log"], + "severity": module.params["constraint_content_length_severity"], + "status": module.params["constraint_content_length_status"], + }, + "exception": { + "address": module.params["constraint_exception_address"], + "content-length": module.params["constraint_exception_content_length"], + "header-length": module.params["constraint_exception_header_length"], + "hostname": module.params["constraint_exception_hostname"], + "line-length": module.params["constraint_exception_line_length"], + "malformed": module.params["constraint_exception_malformed"], + "max-cookie": module.params["constraint_exception_max_cookie"], + "max-header-line": module.params["constraint_exception_max_header_line"], + "max-range-segment": module.params["constraint_exception_max_range_segment"], + "max-url-param": module.params["constraint_exception_max_url_param"], + "method": module.params["constraint_exception_method"], + "param-length": module.params["constraint_exception_param_length"], + "pattern": module.params["constraint_exception_pattern"], + "regex": module.params["constraint_exception_regex"], + "url-param-length": module.params["constraint_exception_url_param_length"], + "version": module.params["constraint_exception_version"], + }, + "header-length": { + "action": module.params["constraint_header_length_action"], + "length": module.params["constraint_header_length_length"], + "log": module.params["constraint_header_length_log"], + "severity": module.params["constraint_header_length_severity"], + "status": module.params["constraint_header_length_status"], + }, + "hostname": { + "action": module.params["constraint_hostname_action"], + "log": module.params["constraint_hostname_log"], + "severity": module.params["constraint_hostname_severity"], + "status": module.params["constraint_hostname_status"], + }, + "line-length": { + "action": module.params["constraint_line_length_action"], + "length": module.params["constraint_line_length_length"], + "log": module.params["constraint_line_length_log"], + "severity": module.params["constraint_line_length_severity"], + "status": module.params["constraint_line_length_status"], + }, + "malformed": { + "action": module.params["constraint_malformed_action"], + "log": module.params["constraint_malformed_log"], + "severity": module.params["constraint_malformed_severity"], + "status": module.params["constraint_malformed_status"], + }, + "max-cookie": { + "action": module.params["constraint_max_cookie_action"], + "log": module.params["constraint_max_cookie_log"], + "max-cookie": module.params["constraint_max_cookie_max_cookie"], + "severity": module.params["constraint_max_cookie_severity"], + "status": module.params["constraint_max_cookie_status"], + }, + "max-header-line": { + "action": module.params["constraint_max_header_line_action"], + "log": module.params["constraint_max_header_line_log"], + "max-header-line": module.params["constraint_max_header_line_max_header_line"], + "severity": module.params["constraint_max_header_line_severity"], + "status": module.params["constraint_max_header_line_status"], + }, + "max-range-segment": { + "action": module.params["constraint_max_range_segment_action"], + "log": module.params["constraint_max_range_segment_log"], + "max-range-segment": module.params["constraint_max_range_segment_max_range_segment"], + "severity": module.params["constraint_max_range_segment_severity"], + "status": module.params["constraint_max_range_segment_status"], + }, + "max-url-param": { + "action": module.params["constraint_max_url_param_action"], + "log": module.params["constraint_max_url_param_log"], + "max-url-param": module.params["constraint_max_url_param_max_url_param"], + "severity": module.params["constraint_max_url_param_severity"], + "status": module.params["constraint_max_url_param_status"], + }, + "method": { + "action": module.params["constraint_method_action"], + "log": module.params["constraint_method_log"], + "severity": module.params["constraint_method_severity"], + "status": module.params["constraint_method_status"], + }, + "param-length": { + "action": module.params["constraint_param_length_action"], + "length": module.params["constraint_param_length_length"], + "log": module.params["constraint_param_length_log"], + "severity": module.params["constraint_param_length_severity"], + "status": module.params["constraint_param_length_status"], + }, + "url-param-length": { + "action": module.params["constraint_url_param_length_action"], + "length": module.params["constraint_url_param_length_length"], + "log": module.params["constraint_url_param_length_log"], + "severity": module.params["constraint_url_param_length_severity"], + "status": module.params["constraint_url_param_length_status"], + }, + "version": { + "action": module.params["constraint_version_action"], + "log": module.params["constraint_version_log"], + "severity": module.params["constraint_version_severity"], + "status": module.params["constraint_version_status"], + }, + }, + "method": { + "default-allowed-methods": module.params["method_default_allowed_methods"], + "log": module.params["method_log"], + "severity": module.params["method_severity"], + "status": module.params["method_status"], + "method-policy": { + "address": module.params["method_method_policy_address"], + "allowed-methods": module.params["method_method_policy_allowed_methods"], + "pattern": module.params["method_method_policy_pattern"], + "regex": module.params["method_method_policy_regex"], + }, + }, + "signature": { + "credit-card-detection-threshold": module.params["signature_credit_card_detection_threshold"], + "disabled-signature": module.params["signature_disabled_signature"], + "disabled-sub-class": module.params["signature_disabled_sub_class"], + "custom-signature": { + "action": module.params["signature_custom_signature_action"], + "case-sensitivity": module.params["signature_custom_signature_case_sensitivity"], + "direction": module.params["signature_custom_signature_direction"], + "log": module.params["signature_custom_signature_log"], + "name": module.params["signature_custom_signature_name"], + "pattern": module.params["signature_custom_signature_pattern"], + "severity": module.params["signature_custom_signature_severity"], + "status": module.params["signature_custom_signature_status"], + "target": module.params["signature_custom_signature_target"], + }, + "main-class": { + "action": module.params["signature_main_class_action"], + "log": module.params["signature_main_class_log"], + "severity": module.params["signature_main_class_severity"], + "status": module.params["signature_main_class_status"], + }, + }, + "url-access": { + "action": module.params["url_access_action"], + "address": module.params["url_access_address"], + "log": module.params["url_access_log"], + "severity": module.params["url_access_severity"], + "access-pattern": { + "negate": module.params["url_access_access_pattern_negate"], + "pattern": module.params["url_access_access_pattern_pattern"], + "regex": module.params["url_access_access_pattern_regex"], + "srcaddr": module.params["url_access_access_pattern_srcaddr"], + } + } + } + + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['address-list', 'constraint', 'method', 'signature', 'url-access'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + + try: + results = fmgr_waf_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_wanopt.py b/plugins/modules/network/fortimanager/fmgr_secprof_wanopt.py new file mode 100644 index 0000000000..03ee0580ba --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_wanopt.py @@ -0,0 +1,689 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_wanopt +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: WAN optimization +description: + - Manage WanOpt security profiles in FortiManager via API + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + transparent: + description: + - Enable/disable transparent mode. + required: false + choices: + - disable + - enable + + name: + description: + - Profile name. + required: false + + comments: + description: + - Comment. + required: false + + auth_group: + description: + - Optionally add an authentication group to restrict access to the WAN Optimization tunnel to + peers in the authentication group. + required: false + + cifs: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + cifs_byte_caching: + description: + - Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching + file data sent across the WAN and in future serving if from the cache. + required: false + choices: + - disable + - enable + + cifs_log_traffic: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + cifs_port: + description: + - Single port number or port number range for CIFS. Only packets with a destination port number + that matches this port number or range are accepted by this profile. + required: false + + cifs_prefer_chunking: + description: + - Select dynamic or fixed-size data chunking for HTTP WAN Optimization. + required: false + choices: + - dynamic + - fix + + cifs_secure_tunnel: + description: + - Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the + same TCP port (7810). + required: false + choices: + - disable + - enable + + cifs_status: + description: + - Enable/disable HTTP WAN Optimization. + required: false + choices: + - disable + - enable + + cifs_tunnel_sharing: + description: + - Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols. + required: false + choices: + - private + - shared + - express-shared + + ftp: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ftp_byte_caching: + description: + - Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching + file data sent across the WAN and in future serving if from the cache. + required: false + choices: + - disable + - enable + + ftp_log_traffic: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + ftp_port: + description: + - Single port number or port number range for FTP. Only packets with a destination port number + that matches this port number or range are accepted by this profile. + required: false + + ftp_prefer_chunking: + description: + - Select dynamic or fixed-size data chunking for HTTP WAN Optimization. + required: false + choices: + - dynamic + - fix + + ftp_secure_tunnel: + description: + - Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the + same TCP port (7810). + required: false + choices: + - disable + - enable + + ftp_status: + description: + - Enable/disable HTTP WAN Optimization. + required: false + choices: + - disable + - enable + + ftp_tunnel_sharing: + description: + - Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols. + required: false + choices: + - private + - shared + - express-shared + + http: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + http_byte_caching: + description: + - Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching + file data sent across the WAN and in future serving if from the cache. + required: false + choices: + - disable + - enable + + http_log_traffic: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + http_port: + description: + - Single port number or port number range for HTTP. Only packets with a destination port number + that matches this port number or range are accepted by this profile. + required: false + + http_prefer_chunking: + description: + - Select dynamic or fixed-size data chunking for HTTP WAN Optimization. + required: false + choices: + - dynamic + - fix + + http_secure_tunnel: + description: + - Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the + same TCP port (7810). + required: false + choices: + - disable + - enable + + http_ssl: + description: + - Enable/disable SSL/TLS offloading (hardware acceleration) for HTTPS traffic in this tunnel. + required: false + choices: + - disable + - enable + + http_ssl_port: + description: + - Port on which to expect HTTPS traffic for SSL/TLS offloading. + required: false + + http_status: + description: + - Enable/disable HTTP WAN Optimization. + required: false + choices: + - disable + - enable + + http_tunnel_non_http: + description: + - Configure how to process non-HTTP traffic when a profile configured for HTTP traffic accepts + a non-HTTP session. Can occur if an application sends non-HTTP traffic using an HTTP destination port. + required: false + choices: + - disable + - enable + + http_tunnel_sharing: + description: + - Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols. + required: false + choices: + - private + - shared + - express-shared + + http_unknown_http_version: + description: + - How to handle HTTP sessions that do not comply with HTTP 0.9, 1.0, or 1.1. + required: false + choices: + - best-effort + - reject + - tunnel + + mapi: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + mapi_byte_caching: + description: + - Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching + file data sent across the WAN and in future serving if from the cache. + required: false + choices: + - disable + - enable + + mapi_log_traffic: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + mapi_port: + description: + - Single port number or port number range for MAPI. Only packets with a destination port number + that matches this port number or range are accepted by this profile. + required: false + + mapi_secure_tunnel: + description: + - Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the + same TCP port (7810). + required: false + choices: + - disable + - enable + + mapi_status: + description: + - Enable/disable HTTP WAN Optimization. + required: false + choices: + - disable + - enable + + mapi_tunnel_sharing: + description: + - Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols. + required: false + choices: + - private + - shared + - express-shared + + tcp: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + tcp_byte_caching: + description: + - Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching + file data sent across the WAN and in future serving if from the cache. + required: false + choices: + - disable + - enable + + tcp_byte_caching_opt: + description: + - Select whether TCP byte-caching uses system memory only or both memory and disk space. + required: false + choices: + - mem-only + - mem-disk + + tcp_log_traffic: + description: + - Enable/disable logging. + required: false + choices: + - disable + - enable + + tcp_port: + description: + - Single port number or port number range for TCP. Only packets with a destination port number + that matches this port number or range are accepted by this profile. + required: false + + tcp_secure_tunnel: + description: + - Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the + same TCP port (7810). + required: false + choices: + - disable + - enable + + tcp_ssl: + description: + - Enable/disable SSL/TLS offloading. + required: false + choices: + - disable + - enable + + tcp_ssl_port: + description: + - Port on which to expect HTTPS traffic for SSL/TLS offloading. + required: false + + tcp_status: + description: + - Enable/disable HTTP WAN Optimization. + required: false + choices: + - disable + - enable + + tcp_tunnel_sharing: + description: + - Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols. + required: false + choices: + - private + - shared + - express-shared + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_wanopt: + name: "Ansible_WanOpt_Profile" + mode: "delete" + + - name: Create FMGR_WANOPT_PROFILE + fmgr_secprof_wanopt: + mode: "set" + adom: "root" + transparent: "enable" + name: "Ansible_WanOpt_Profile" + comments: "Created by Ansible" + cifs: {byte-caching: "enable", + log-traffic: "enable", + port: 80, + prefer-chunking: "dynamic", + status: "enable", + tunnel-sharing: "private"} + ftp: {byte-caching: "enable", + log-traffic: "enable", + port: 80, + prefer-chunking: "dynamic", + secure-tunnel: "disable", + status: "enable", + tunnel-sharing: "private"} +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +############### +# START METHODS +############### + + +def fmgr_wanopt_profile_modify(fmgr, paramgram): + """ + :param fmgr: The fmgr object instance from fortimanager.py + :type fmgr: class object + :param paramgram: The formatted dictionary of options to process + :type paramgram: dict + :return: The response from the FortiManager + :rtype: dict + """ + + mode = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/wanopt/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/wanopt/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + transparent=dict(required=False, type="str", choices=["disable", "enable"]), + name=dict(required=False, type="str"), + comments=dict(required=False, type="str"), + auth_group=dict(required=False, type="str"), + cifs=dict(required=False, type="dict"), + cifs_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]), + cifs_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]), + cifs_port=dict(required=False, type="str"), + cifs_prefer_chunking=dict(required=False, type="str", choices=["dynamic", "fix"]), + cifs_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]), + cifs_status=dict(required=False, type="str", choices=["disable", "enable"]), + cifs_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]), + ftp=dict(required=False, type="dict"), + ftp_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]), + ftp_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]), + ftp_port=dict(required=False, type="str"), + ftp_prefer_chunking=dict(required=False, type="str", choices=["dynamic", "fix"]), + ftp_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]), + ftp_status=dict(required=False, type="str", choices=["disable", "enable"]), + ftp_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]), + http=dict(required=False, type="dict"), + http_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]), + http_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]), + http_port=dict(required=False, type="str"), + http_prefer_chunking=dict(required=False, type="str", choices=["dynamic", "fix"]), + http_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]), + http_ssl=dict(required=False, type="str", choices=["disable", "enable"]), + http_ssl_port=dict(required=False, type="str"), + http_status=dict(required=False, type="str", choices=["disable", "enable"]), + http_tunnel_non_http=dict(required=False, type="str", choices=["disable", "enable"]), + http_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]), + http_unknown_http_version=dict(required=False, type="str", choices=["best-effort", "reject", "tunnel"]), + mapi=dict(required=False, type="dict"), + mapi_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]), + mapi_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]), + mapi_port=dict(required=False, type="str"), + mapi_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]), + mapi_status=dict(required=False, type="str", choices=["disable", "enable"]), + mapi_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]), + tcp=dict(required=False, type="dict"), + tcp_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]), + tcp_byte_caching_opt=dict(required=False, type="str", choices=["mem-only", "mem-disk"]), + tcp_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]), + tcp_port=dict(required=False, type="str"), + tcp_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]), + tcp_ssl=dict(required=False, type="str", choices=["disable", "enable"]), + tcp_ssl_port=dict(required=False, type="str"), + tcp_status=dict(required=False, type="str", choices=["disable", "enable"]), + tcp_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "transparent": module.params["transparent"], + "name": module.params["name"], + "comments": module.params["comments"], + "auth-group": module.params["auth_group"], + "cifs": { + "byte-caching": module.params["cifs_byte_caching"], + "log-traffic": module.params["cifs_log_traffic"], + "port": module.params["cifs_port"], + "prefer-chunking": module.params["cifs_prefer_chunking"], + "secure-tunnel": module.params["cifs_secure_tunnel"], + "status": module.params["cifs_status"], + "tunnel-sharing": module.params["cifs_tunnel_sharing"], + }, + "ftp": { + "byte-caching": module.params["ftp_byte_caching"], + "log-traffic": module.params["ftp_log_traffic"], + "port": module.params["ftp_port"], + "prefer-chunking": module.params["ftp_prefer_chunking"], + "secure-tunnel": module.params["ftp_secure_tunnel"], + "status": module.params["ftp_status"], + "tunnel-sharing": module.params["ftp_tunnel_sharing"], + }, + "http": { + "byte-caching": module.params["http_byte_caching"], + "log-traffic": module.params["http_log_traffic"], + "port": module.params["http_port"], + "prefer-chunking": module.params["http_prefer_chunking"], + "secure-tunnel": module.params["http_secure_tunnel"], + "ssl": module.params["http_ssl"], + "ssl-port": module.params["http_ssl_port"], + "status": module.params["http_status"], + "tunnel-non-http": module.params["http_tunnel_non_http"], + "tunnel-sharing": module.params["http_tunnel_sharing"], + "unknown-http-version": module.params["http_unknown_http_version"], + }, + "mapi": { + "byte-caching": module.params["mapi_byte_caching"], + "log-traffic": module.params["mapi_log_traffic"], + "port": module.params["mapi_port"], + "secure-tunnel": module.params["mapi_secure_tunnel"], + "status": module.params["mapi_status"], + "tunnel-sharing": module.params["mapi_tunnel_sharing"], + }, + "tcp": { + "byte-caching": module.params["tcp_byte_caching"], + "byte-caching-opt": module.params["tcp_byte_caching_opt"], + "log-traffic": module.params["tcp_log_traffic"], + "port": module.params["tcp_port"], + "secure-tunnel": module.params["tcp_secure_tunnel"], + "ssl": module.params["tcp_ssl"], + "ssl-port": module.params["tcp_ssl_port"], + "status": module.params["tcp_status"], + "tunnel-sharing": module.params["tcp_tunnel_sharing"], + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['cifs', 'ftp', 'http', 'mapi', 'tcp'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + + try: + results = fmgr_wanopt_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/fortimanager/fmgr_secprof_web.py b/plugins/modules/network/fortimanager/fmgr_secprof_web.py new file mode 100644 index 0000000000..f06f1266cb --- /dev/null +++ b/plugins/modules/network/fortimanager/fmgr_secprof_web.py @@ -0,0 +1,1085 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fmgr_secprof_web +notes: + - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). +author: + - Luke Weighall (@lweighall) + - Andrew Welsh (@Ghilli3) + - Jim Huber (@p4r4n0y1ng) +short_description: Manage web filter security profiles in FortiManager +description: + - Manage web filter security profiles in FortiManager through playbooks using the FMG API + +options: + adom: + description: + - The ADOM the configuration should belong to. + required: false + default: root + + mode: + description: + - Sets one of three modes for managing the object. + - Allows use of soft-adds instead of overwriting existing values + choices: ['add', 'set', 'delete', 'update'] + required: false + default: add + + youtube_channel_status: + description: + - YouTube channel filter status. + - choice | disable | Disable YouTube channel filter. + - choice | blacklist | Block matches. + - choice | whitelist | Allow matches. + required: false + choices: ["disable", "blacklist", "whitelist"] + + wisp_servers: + description: + - WISP servers. + required: false + + wisp_algorithm: + description: + - WISP server selection algorithm. + - choice | auto-learning | Select the lightest loading healthy server. + - choice | primary-secondary | Select the first healthy server in order. + - choice | round-robin | Select the next healthy server. + required: false + choices: ["auto-learning", "primary-secondary", "round-robin"] + + wisp: + description: + - Enable/disable web proxy WISP. + - choice | disable | Disable web proxy WISP. + - choice | enable | Enable web proxy WISP. + required: false + choices: ["disable", "enable"] + + web_url_log: + description: + - Enable/disable logging URL filtering. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_invalid_domain_log: + description: + - Enable/disable logging invalid domain names. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_ftgd_quota_usage: + description: + - Enable/disable logging daily quota usage. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_ftgd_err_log: + description: + - Enable/disable logging rating errors. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_vbs_log: + description: + - Enable/disable logging VBS scripts. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_unknown_log: + description: + - Enable/disable logging unknown scripts. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_referer_log: + description: + - Enable/disable logging referrers. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_jscript_log: + description: + - Enable/disable logging JScripts. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_js_log: + description: + - Enable/disable logging Java scripts. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_cookie_removal_log: + description: + - Enable/disable logging blocked cookies. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_cookie_log: + description: + - Enable/disable logging cookie filtering. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_command_block_log: + description: + - Enable/disable logging blocked commands. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_applet_log: + description: + - Enable/disable logging Java applets. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_filter_activex_log: + description: + - Enable/disable logging ActiveX. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_extended_all_action_log: + description: + - Enable/disable extended any filter action logging for web filtering. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_content_log: + description: + - Enable/disable logging logging blocked web content. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + replacemsg_group: + description: + - Replacement message group. + required: false + + post_action: + description: + - Action taken for HTTP POST traffic. + - choice | normal | Normal, POST requests are allowed. + - choice | block | POST requests are blocked. + required: false + choices: ["normal", "block"] + + ovrd_perm: + description: + - FLAG Based Options. Specify multiple in list form. + - flag | bannedword-override | Banned word override. + - flag | urlfilter-override | URL filter override. + - flag | fortiguard-wf-override | FortiGuard Web Filter override. + - flag | contenttype-check-override | Content-type header override. + required: false + choices: + - bannedword-override + - urlfilter-override + - fortiguard-wf-override + - contenttype-check-override + + options: + description: + - FLAG Based Options. Specify multiple in list form. + - flag | block-invalid-url | Block sessions contained an invalid domain name. + - flag | jscript | Javascript block. + - flag | js | JS block. + - flag | vbs | VB script block. + - flag | unknown | Unknown script block. + - flag | wf-referer | Referring block. + - flag | intrinsic | Intrinsic script block. + - flag | wf-cookie | Cookie block. + - flag | per-user-bwl | Per-user black/white list filter + - flag | activexfilter | ActiveX filter. + - flag | cookiefilter | Cookie filter. + - flag | javafilter | Java applet filter. + required: false + choices: + - block-invalid-url + - jscript + - js + - vbs + - unknown + - wf-referer + - intrinsic + - wf-cookie + - per-user-bwl + - activexfilter + - cookiefilter + - javafilter + + name: + description: + - Profile name. + required: false + + log_all_url: + description: + - Enable/disable logging all URLs visited. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + inspection_mode: + description: + - Web filtering inspection mode. + - choice | proxy | Proxy. + - choice | flow-based | Flow based. + required: false + choices: ["proxy", "flow-based"] + + https_replacemsg: + description: + - Enable replacement messages for HTTPS. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + extended_log: + description: + - Enable/disable extended logging for web filtering. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + comment: + description: + - Optional comments. + required: false + + ftgd_wf: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + ftgd_wf_exempt_quota: + description: + - Do not stop quota for these categories. + required: false + + ftgd_wf_max_quota_timeout: + description: + - Maximum FortiGuard quota used by single page view in seconds (excludes streams). + required: false + + ftgd_wf_options: + description: + - Options for FortiGuard Web Filter. + - FLAG Based Options. Specify multiple in list form. + - flag | error-allow | Allow web pages with a rating error to pass through. + - flag | rate-server-ip | Rate the server IP in addition to the domain name. + - flag | connect-request-bypass | Bypass connection which has CONNECT request. + - flag | ftgd-disable | Disable FortiGuard scanning. + required: false + choices: ["error-allow", "rate-server-ip", "connect-request-bypass", "ftgd-disable"] + + ftgd_wf_ovrd: + description: + - Allow web filter profile overrides. + required: false + + ftgd_wf_rate_crl_urls: + description: + - Enable/disable rating CRL by URL. + - choice | disable | Disable rating CRL by URL. + - choice | enable | Enable rating CRL by URL. + required: false + choices: ["disable", "enable"] + + ftgd_wf_rate_css_urls: + description: + - Enable/disable rating CSS by URL. + - choice | disable | Disable rating CSS by URL. + - choice | enable | Enable rating CSS by URL. + required: false + choices: ["disable", "enable"] + + ftgd_wf_rate_image_urls: + description: + - Enable/disable rating images by URL. + - choice | disable | Disable rating images by URL (blocked images are replaced with blanks). + - choice | enable | Enable rating images by URL (blocked images are replaced with blanks). + required: false + choices: ["disable", "enable"] + + ftgd_wf_rate_javascript_urls: + description: + - Enable/disable rating JavaScript by URL. + - choice | disable | Disable rating JavaScript by URL. + - choice | enable | Enable rating JavaScript by URL. + required: false + choices: ["disable", "enable"] + + ftgd_wf_filters_action: + description: + - Action to take for matches. + - choice | block | Block access. + - choice | monitor | Allow access while logging the action. + - choice | warning | Allow access after warning the user. + - choice | authenticate | Authenticate user before allowing access. + required: false + choices: ["block", "monitor", "warning", "authenticate"] + + ftgd_wf_filters_auth_usr_grp: + description: + - Groups with permission to authenticate. + required: false + + ftgd_wf_filters_category: + description: + - Categories and groups the filter examines. + required: false + + ftgd_wf_filters_log: + description: + - Enable/disable logging. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + ftgd_wf_filters_override_replacemsg: + description: + - Override replacement message. + required: false + + ftgd_wf_filters_warn_duration: + description: + - Duration of warnings. + required: false + + ftgd_wf_filters_warning_duration_type: + description: + - Re-display warning after closing browser or after a timeout. + - choice | session | After session ends. + - choice | timeout | After timeout occurs. + required: false + choices: ["session", "timeout"] + + ftgd_wf_filters_warning_prompt: + description: + - Warning prompts in each category or each domain. + - choice | per-domain | Per-domain warnings. + - choice | per-category | Per-category warnings. + required: false + choices: ["per-domain", "per-category"] + + ftgd_wf_quota_category: + description: + - FortiGuard categories to apply quota to (category action must be set to monitor). + required: false + + ftgd_wf_quota_duration: + description: + - Duration of quota. + required: false + + ftgd_wf_quota_override_replacemsg: + description: + - Override replacement message. + required: false + + ftgd_wf_quota_type: + description: + - Quota type. + - choice | time | Use a time-based quota. + - choice | traffic | Use a traffic-based quota. + required: false + choices: ["time", "traffic"] + + ftgd_wf_quota_unit: + description: + - Traffic quota unit of measurement. + - choice | B | Quota in bytes. + - choice | KB | Quota in kilobytes. + - choice | MB | Quota in megabytes. + - choice | GB | Quota in gigabytes. + required: false + choices: ["B", "KB", "MB", "GB"] + + ftgd_wf_quota_value: + description: + - Traffic quota value. + required: false + + override: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + override_ovrd_cookie: + description: + - Allow/deny browser-based (cookie) overrides. + - choice | deny | Deny browser-based (cookie) override. + - choice | allow | Allow browser-based (cookie) override. + required: false + choices: ["deny", "allow"] + + override_ovrd_dur: + description: + - Override duration. + required: false + + override_ovrd_dur_mode: + description: + - Override duration mode. + - choice | constant | Constant mode. + - choice | ask | Prompt for duration when initiating an override. + required: false + choices: ["constant", "ask"] + + override_ovrd_scope: + description: + - Override scope. + - choice | user | Override for the user. + - choice | user-group | Override for the user's group. + - choice | ip | Override for the initiating IP. + - choice | ask | Prompt for scope when initiating an override. + - choice | browser | Create browser-based (cookie) override. + required: false + choices: ["user", "user-group", "ip", "ask", "browser"] + + override_ovrd_user_group: + description: + - User groups with permission to use the override. + required: false + + override_profile: + description: + - Web filter profile with permission to create overrides. + required: false + + override_profile_attribute: + description: + - Profile attribute to retrieve from the RADIUS server. + - choice | User-Name | Use this attribute. + - choice | NAS-IP-Address | Use this attribute. + - choice | Framed-IP-Address | Use this attribute. + - choice | Framed-IP-Netmask | Use this attribute. + - choice | Filter-Id | Use this attribute. + - choice | Login-IP-Host | Use this attribute. + - choice | Reply-Message | Use this attribute. + - choice | Callback-Number | Use this attribute. + - choice | Callback-Id | Use this attribute. + - choice | Framed-Route | Use this attribute. + - choice | Framed-IPX-Network | Use this attribute. + - choice | Class | Use this attribute. + - choice | Called-Station-Id | Use this attribute. + - choice | Calling-Station-Id | Use this attribute. + - choice | NAS-Identifier | Use this attribute. + - choice | Proxy-State | Use this attribute. + - choice | Login-LAT-Service | Use this attribute. + - choice | Login-LAT-Node | Use this attribute. + - choice | Login-LAT-Group | Use this attribute. + - choice | Framed-AppleTalk-Zone | Use this attribute. + - choice | Acct-Session-Id | Use this attribute. + - choice | Acct-Multi-Session-Id | Use this attribute. + required: false + choices: + - User-Name + - NAS-IP-Address + - Framed-IP-Address + - Framed-IP-Netmask + - Filter-Id + - Login-IP-Host + - Reply-Message + - Callback-Number + - Callback-Id + - Framed-Route + - Framed-IPX-Network + - Class + - Called-Station-Id + - Calling-Station-Id + - NAS-Identifier + - Proxy-State + - Login-LAT-Service + - Login-LAT-Node + - Login-LAT-Group + - Framed-AppleTalk-Zone + - Acct-Session-Id + - Acct-Multi-Session-Id + + override_profile_type: + description: + - Override profile type. + - choice | list | Profile chosen from list. + - choice | radius | Profile determined by RADIUS server. + required: false + choices: ["list", "radius"] + + url_extraction: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + url_extraction_redirect_header: + description: + - HTTP header name to use for client redirect on blocked requests + required: false + + url_extraction_redirect_no_content: + description: + - Enable / Disable empty message-body entity in HTTP response + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + url_extraction_redirect_url: + description: + - HTTP header value to use for client redirect on blocked requests + required: false + + url_extraction_server_fqdn: + description: + - URL extraction server FQDN (fully qualified domain name) + required: false + + url_extraction_status: + description: + - Enable URL Extraction + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + web_blacklist: + description: + - Enable/disable automatic addition of URLs detected by FortiSandbox to blacklist. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_bword_table: + description: + - Banned word table ID. + required: false + + web_bword_threshold: + description: + - Banned word score threshold. + required: false + + web_content_header_list: + description: + - Content header list. + required: false + + web_keyword_match: + description: + - Search keywords to log when match is found. + required: false + + web_log_search: + description: + - Enable/disable logging all search phrases. + - choice | disable | Disable setting. + - choice | enable | Enable setting. + required: false + choices: ["disable", "enable"] + + web_safe_search: + description: + - Safe search type. + - FLAG Based Options. Specify multiple in list form. + - flag | url | Insert safe search string into URL. + - flag | header | Insert safe search header. + required: false + choices: ["url", "header"] + + web_urlfilter_table: + description: + - URL filter table ID. + required: false + + web_whitelist: + description: + - FortiGuard whitelist settings. + - FLAG Based Options. Specify multiple in list form. + - flag | exempt-av | Exempt antivirus. + - flag | exempt-webcontent | Exempt web content. + - flag | exempt-activex-java-cookie | Exempt ActiveX-JAVA-Cookie. + - flag | exempt-dlp | Exempt DLP. + - flag | exempt-rangeblock | Exempt RangeBlock. + - flag | extended-log-others | Support extended log. + required: false + choices: + - exempt-av + - exempt-webcontent + - exempt-activex-java-cookie + - exempt-dlp + - exempt-rangeblock + - extended-log-others + + web_youtube_restrict: + description: + - YouTube EDU filter level. + - choice | strict | Strict access for YouTube. + - choice | none | Full access for YouTube. + - choice | moderate | Moderate access for YouTube. + required: false + choices: ["strict", "none", "moderate"] + + youtube_channel_filter: + description: + - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! + - List of multiple child objects to be added. Expects a list of dictionaries. + - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. + - If submitted, all other prefixed sub-parameters ARE IGNORED. + - This object is MUTUALLY EXCLUSIVE with its options. + - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. + - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS + required: false + + youtube_channel_filter_channel_id: + description: + - YouTube channel ID to be filtered. + required: false + + youtube_channel_filter_comment: + description: + - Comment. + required: false + + +''' + +EXAMPLES = ''' + - name: DELETE Profile + fmgr_secprof_web: + name: "Ansible_Web_Filter_Profile" + mode: "delete" + + - name: CREATE Profile + fmgr_secprof_web: + name: "Ansible_Web_Filter_Profile" + comment: "Created by Ansible Module TEST" + mode: "set" + extended_log: "enable" + inspection_mode: "proxy" + log_all_url: "enable" + options: "js" + ovrd_perm: "bannedword-override" + post_action: "block" + web_content_log: "enable" + web_extended_all_action_log: "enable" + web_filter_activex_log: "enable" + web_filter_applet_log: "enable" + web_filter_command_block_log: "enable" + web_filter_cookie_log: "enable" + web_filter_cookie_removal_log: "enable" + web_filter_js_log: "enable" + web_filter_jscript_log: "enable" + web_filter_referer_log: "enable" + web_filter_unknown_log: "enable" + web_filter_vbs_log: "enable" + web_ftgd_err_log: "enable" + web_ftgd_quota_usage: "enable" + web_invalid_domain_log: "enable" + web_url_log: "enable" + wisp: "enable" + wisp_algorithm: "auto-learning" + youtube_channel_status: "blacklist" +''' + +RETURN = """ +api_result: + description: full API response, includes status code and message + returned: always + type: str +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.fortimanager import FortiManagerHandler +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import prepare_dict +from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import scrub_dict + + +def fmgr_webfilter_profile_modify(fmgr, paramgram): + + mode = paramgram["mode"] + adom = paramgram["adom"] + + response = DEFAULT_RESULT_OBJ + url = "" + datagram = {} + + # EVAL THE MODE PARAMETER FOR SET OR ADD + if mode in ['set', 'add', 'update']: + url = '/pm/config/adom/{adom}/obj/webfilter/profile'.format(adom=adom) + datagram = scrub_dict(prepare_dict(paramgram)) + + # EVAL THE MODE PARAMETER FOR DELETE + elif mode == "delete": + # SET THE CORRECT URL FOR DELETE + url = '/pm/config/adom/{adom}/obj/webfilter/profile/{name}'.format(adom=adom, name=paramgram["name"]) + datagram = {} + + response = fmgr.process_request(url, datagram, paramgram["mode"]) + + return response + + +############# +# END METHODS +############# + + +def main(): + argument_spec = dict( + adom=dict(type="str", default="root"), + mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), + + youtube_channel_status=dict(required=False, type="str", choices=["disable", "blacklist", "whitelist"]), + wisp_servers=dict(required=False, type="str"), + wisp_algorithm=dict(required=False, type="str", choices=["auto-learning", "primary-secondary", "round-robin"]), + wisp=dict(required=False, type="str", choices=["disable", "enable"]), + web_url_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_invalid_domain_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_ftgd_quota_usage=dict(required=False, type="str", choices=["disable", "enable"]), + web_ftgd_err_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_vbs_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_unknown_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_referer_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_jscript_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_js_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_cookie_removal_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_cookie_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_command_block_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_applet_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_filter_activex_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_extended_all_action_log=dict(required=False, type="str", choices=["disable", "enable"]), + web_content_log=dict(required=False, type="str", choices=["disable", "enable"]), + replacemsg_group=dict(required=False, type="str"), + post_action=dict(required=False, type="str", choices=["normal", "block"]), + ovrd_perm=dict(required=False, type="list", choices=["bannedword-override", + "urlfilter-override", + "fortiguard-wf-override", + "contenttype-check-override"]), + options=dict(required=False, type="list", choices=["block-invalid-url", + "jscript", + "js", + "vbs", + "unknown", + "wf-referer", + "intrinsic", + "wf-cookie", + "per-user-bwl", + "activexfilter", + "cookiefilter", + "javafilter"]), + name=dict(required=False, type="str"), + log_all_url=dict(required=False, type="str", choices=["disable", "enable"]), + inspection_mode=dict(required=False, type="str", choices=["proxy", "flow-based"]), + https_replacemsg=dict(required=False, type="str", choices=["disable", "enable"]), + extended_log=dict(required=False, type="str", choices=["disable", "enable"]), + comment=dict(required=False, type="str"), + ftgd_wf=dict(required=False, type="list"), + ftgd_wf_exempt_quota=dict(required=False, type="str"), + ftgd_wf_max_quota_timeout=dict(required=False, type="int"), + ftgd_wf_options=dict(required=False, type="str", choices=["error-allow", "rate-server-ip", + "connect-request-bypass", "ftgd-disable"]), + ftgd_wf_ovrd=dict(required=False, type="str"), + ftgd_wf_rate_crl_urls=dict(required=False, type="str", choices=["disable", "enable"]), + ftgd_wf_rate_css_urls=dict(required=False, type="str", choices=["disable", "enable"]), + ftgd_wf_rate_image_urls=dict(required=False, type="str", choices=["disable", "enable"]), + ftgd_wf_rate_javascript_urls=dict(required=False, type="str", choices=["disable", "enable"]), + + ftgd_wf_filters_action=dict(required=False, type="str", choices=["block", "monitor", + "warning", "authenticate"]), + ftgd_wf_filters_auth_usr_grp=dict(required=False, type="str"), + ftgd_wf_filters_category=dict(required=False, type="str"), + ftgd_wf_filters_log=dict(required=False, type="str", choices=["disable", "enable"]), + ftgd_wf_filters_override_replacemsg=dict(required=False, type="str"), + ftgd_wf_filters_warn_duration=dict(required=False, type="str"), + ftgd_wf_filters_warning_duration_type=dict(required=False, type="str", choices=["session", "timeout"]), + ftgd_wf_filters_warning_prompt=dict(required=False, type="str", choices=["per-domain", "per-category"]), + + ftgd_wf_quota_category=dict(required=False, type="str"), + ftgd_wf_quota_duration=dict(required=False, type="str"), + ftgd_wf_quota_override_replacemsg=dict(required=False, type="str"), + ftgd_wf_quota_type=dict(required=False, type="str", choices=["time", "traffic"]), + ftgd_wf_quota_unit=dict(required=False, type="str", choices=["B", "KB", "MB", "GB"]), + ftgd_wf_quota_value=dict(required=False, type="int"), + override=dict(required=False, type="list"), + override_ovrd_cookie=dict(required=False, type="str", choices=["deny", "allow"]), + override_ovrd_dur=dict(required=False, type="str"), + override_ovrd_dur_mode=dict(required=False, type="str", choices=["constant", "ask"]), + override_ovrd_scope=dict(required=False, type="str", choices=["user", "user-group", "ip", "ask", "browser"]), + override_ovrd_user_group=dict(required=False, type="str"), + override_profile=dict(required=False, type="str"), + override_profile_attribute=dict(required=False, type="list", choices=["User-Name", + "NAS-IP-Address", + "Framed-IP-Address", + "Framed-IP-Netmask", + "Filter-Id", + "Login-IP-Host", + "Reply-Message", + "Callback-Number", + "Callback-Id", + "Framed-Route", + "Framed-IPX-Network", + "Class", + "Called-Station-Id", + "Calling-Station-Id", + "NAS-Identifier", + "Proxy-State", + "Login-LAT-Service", + "Login-LAT-Node", + "Login-LAT-Group", + "Framed-AppleTalk-Zone", + "Acct-Session-Id", + "Acct-Multi-Session-Id"]), + override_profile_type=dict(required=False, type="str", choices=["list", "radius"]), + url_extraction=dict(required=False, type="list"), + url_extraction_redirect_header=dict(required=False, type="str"), + url_extraction_redirect_no_content=dict(required=False, type="str", choices=["disable", "enable"]), + url_extraction_redirect_url=dict(required=False, type="str"), + url_extraction_server_fqdn=dict(required=False, type="str"), + url_extraction_status=dict(required=False, type="str", choices=["disable", "enable"]), + web=dict(required=False, type="list"), + web_blacklist=dict(required=False, type="str", choices=["disable", "enable"]), + web_bword_table=dict(required=False, type="str"), + web_bword_threshold=dict(required=False, type="int"), + web_content_header_list=dict(required=False, type="str"), + web_keyword_match=dict(required=False, type="str"), + web_log_search=dict(required=False, type="str", choices=["disable", "enable"]), + web_safe_search=dict(required=False, type="str", choices=["url", "header"]), + web_urlfilter_table=dict(required=False, type="str"), + web_whitelist=dict(required=False, type="list", choices=["exempt-av", + "exempt-webcontent", + "exempt-activex-java-cookie", + "exempt-dlp", + "exempt-rangeblock", + "extended-log-others"]), + web_youtube_restrict=dict(required=False, type="str", choices=["strict", "none", "moderate"]), + youtube_channel_filter=dict(required=False, type="list"), + youtube_channel_filter_channel_id=dict(required=False, type="str"), + youtube_channel_filter_comment=dict(required=False, type="str"), + + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) + # MODULE PARAMGRAM + paramgram = { + "mode": module.params["mode"], + "adom": module.params["adom"], + "youtube-channel-status": module.params["youtube_channel_status"], + "wisp-servers": module.params["wisp_servers"], + "wisp-algorithm": module.params["wisp_algorithm"], + "wisp": module.params["wisp"], + "web-url-log": module.params["web_url_log"], + "web-invalid-domain-log": module.params["web_invalid_domain_log"], + "web-ftgd-quota-usage": module.params["web_ftgd_quota_usage"], + "web-ftgd-err-log": module.params["web_ftgd_err_log"], + "web-filter-vbs-log": module.params["web_filter_vbs_log"], + "web-filter-unknown-log": module.params["web_filter_unknown_log"], + "web-filter-referer-log": module.params["web_filter_referer_log"], + "web-filter-jscript-log": module.params["web_filter_jscript_log"], + "web-filter-js-log": module.params["web_filter_js_log"], + "web-filter-cookie-removal-log": module.params["web_filter_cookie_removal_log"], + "web-filter-cookie-log": module.params["web_filter_cookie_log"], + "web-filter-command-block-log": module.params["web_filter_command_block_log"], + "web-filter-applet-log": module.params["web_filter_applet_log"], + "web-filter-activex-log": module.params["web_filter_activex_log"], + "web-extended-all-action-log": module.params["web_extended_all_action_log"], + "web-content-log": module.params["web_content_log"], + "replacemsg-group": module.params["replacemsg_group"], + "post-action": module.params["post_action"], + "ovrd-perm": module.params["ovrd_perm"], + "options": module.params["options"], + "name": module.params["name"], + "log-all-url": module.params["log_all_url"], + "inspection-mode": module.params["inspection_mode"], + "https-replacemsg": module.params["https_replacemsg"], + "extended-log": module.params["extended_log"], + "comment": module.params["comment"], + "ftgd-wf": { + "exempt-quota": module.params["ftgd_wf_exempt_quota"], + "max-quota-timeout": module.params["ftgd_wf_max_quota_timeout"], + "options": module.params["ftgd_wf_options"], + "ovrd": module.params["ftgd_wf_ovrd"], + "rate-crl-urls": module.params["ftgd_wf_rate_crl_urls"], + "rate-css-urls": module.params["ftgd_wf_rate_css_urls"], + "rate-image-urls": module.params["ftgd_wf_rate_image_urls"], + "rate-javascript-urls": module.params["ftgd_wf_rate_javascript_urls"], + "filters": { + "action": module.params["ftgd_wf_filters_action"], + "auth-usr-grp": module.params["ftgd_wf_filters_auth_usr_grp"], + "category": module.params["ftgd_wf_filters_category"], + "log": module.params["ftgd_wf_filters_log"], + "override-replacemsg": module.params["ftgd_wf_filters_override_replacemsg"], + "warn-duration": module.params["ftgd_wf_filters_warn_duration"], + "warning-duration-type": module.params["ftgd_wf_filters_warning_duration_type"], + "warning-prompt": module.params["ftgd_wf_filters_warning_prompt"], + }, + "quota": { + "category": module.params["ftgd_wf_quota_category"], + "duration": module.params["ftgd_wf_quota_duration"], + "override-replacemsg": module.params["ftgd_wf_quota_override_replacemsg"], + "type": module.params["ftgd_wf_quota_type"], + "unit": module.params["ftgd_wf_quota_unit"], + "value": module.params["ftgd_wf_quota_value"], + }, + }, + "override": { + "ovrd-cookie": module.params["override_ovrd_cookie"], + "ovrd-dur": module.params["override_ovrd_dur"], + "ovrd-dur-mode": module.params["override_ovrd_dur_mode"], + "ovrd-scope": module.params["override_ovrd_scope"], + "ovrd-user-group": module.params["override_ovrd_user_group"], + "profile": module.params["override_profile"], + "profile-attribute": module.params["override_profile_attribute"], + "profile-type": module.params["override_profile_type"], + }, + "url-extraction": { + "redirect-header": module.params["url_extraction_redirect_header"], + "redirect-no-content": module.params["url_extraction_redirect_no_content"], + "redirect-url": module.params["url_extraction_redirect_url"], + "server-fqdn": module.params["url_extraction_server_fqdn"], + "status": module.params["url_extraction_status"], + }, + "web": { + "blacklist": module.params["web_blacklist"], + "bword-table": module.params["web_bword_table"], + "bword-threshold": module.params["web_bword_threshold"], + "content-header-list": module.params["web_content_header_list"], + "keyword-match": module.params["web_keyword_match"], + "log-search": module.params["web_log_search"], + "safe-search": module.params["web_safe_search"], + "urlfilter-table": module.params["web_urlfilter_table"], + "whitelist": module.params["web_whitelist"], + "youtube-restrict": module.params["web_youtube_restrict"], + }, + "youtube-channel-filter": { + "channel-id": module.params["youtube_channel_filter_channel_id"], + "comment": module.params["youtube_channel_filter_comment"], + } + } + module.paramgram = paramgram + fmgr = None + if module._socket_path: + connection = Connection(module._socket_path) + fmgr = FortiManagerHandler(connection, module) + fmgr.tools = FMGRCommon() + else: + module.fail_json(**FAIL_SOCKET_MSG) + + list_overrides = ['ftgd-wf', 'override', 'url-extraction', 'web', 'youtube-channel-filter'] + paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, + paramgram=paramgram, module=module) + + results = DEFAULT_RESULT_OBJ + + try: + + results = fmgr_webfilter_profile_modify(fmgr, paramgram) + fmgr.govern_response(module=module, results=results, + ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) + + except Exception as err: + raise FMGBaseException(err) + + return module.exit_json(**results[1]) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/ftd/ftd_configuration.py b/plugins/modules/network/ftd/ftd_configuration.py new file mode 100644 index 0000000000..41cded30d8 --- /dev/null +++ b/plugins/modules/network/ftd/ftd_configuration.py @@ -0,0 +1,139 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ftd_configuration +short_description: Manages configuration on Cisco FTD devices over REST API +description: + - Manages configuration on Cisco FTD devices including creating, updating, removing configuration objects, + scheduling and staring jobs, deploying pending changes, etc. All operations are performed over REST API. +author: "Cisco Systems, Inc. (@annikulin)" +options: + operation: + description: + - The name of the operation to execute. Commonly, the operation starts with 'add', 'edit', 'get', 'upsert' + or 'delete' verbs, but can have an arbitrary name too. + required: true + type: str + data: + description: + - Key-value pairs that should be sent as body parameters in a REST API call + type: dict + query_params: + description: + - Key-value pairs that should be sent as query parameters in a REST API call. + type: dict + path_params: + description: + - Key-value pairs that should be sent as path parameters in a REST API call. + type: dict + register_as: + description: + - Specifies Ansible fact name that is used to register received response from the FTD device. + type: str + filters: + description: + - Key-value dict that represents equality filters. Every key is a property name and value is its desired value. + If multiple filters are present, they are combined with logical operator AND. + type: dict +''' + +EXAMPLES = """ +- name: Create a network object + ftd_configuration: + operation: "addNetworkObject" + data: + name: "Ansible-network-host" + description: "From Ansible with love" + subType: "HOST" + value: "192.168.2.0" + dnsResolution: "IPV4_AND_IPV6" + type: "networkobject" + isSystemDefined: false + register_as: "hostNetwork" + +- name: Delete the network object + ftd_configuration: + operation: "deleteNetworkObject" + path_params: + objId: "{{ hostNetwork['id'] }}" +""" + +RETURN = """ +response: + description: HTTP response returned from the API call. + returned: success + type: dict +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.community.general.plugins.module_utils.network.ftd.configuration import BaseConfigurationResource, CheckModeException, \ + FtdInvalidOperationNameError +from ansible_collections.community.general.plugins.module_utils.network.ftd.fdm_swagger_client import ValidationError +from ansible_collections.community.general.plugins.module_utils.network.ftd.common import construct_ansible_facts, FtdConfigurationError, \ + FtdServerError, FtdUnexpectedResponse + + +def main(): + fields = dict( + operation=dict(type='str', required=True), + data=dict(type='dict'), + query_params=dict(type='dict'), + path_params=dict(type='dict'), + register_as=dict(type='str'), + filters=dict(type='dict') + ) + module = AnsibleModule(argument_spec=fields, + supports_check_mode=True) + params = module.params + + connection = Connection(module._socket_path) + resource = BaseConfigurationResource(connection, module.check_mode) + op_name = params['operation'] + try: + resp = resource.execute_operation(op_name, params) + module.exit_json(changed=resource.config_changed, response=resp, + ansible_facts=construct_ansible_facts(resp, module.params)) + except FtdInvalidOperationNameError as e: + module.fail_json(msg='Invalid operation name provided: %s' % e.operation_name) + except FtdConfigurationError as e: + module.fail_json(msg='Failed to execute %s operation because of the configuration error: %s' % (op_name, e.msg)) + except FtdServerError as e: + module.fail_json(msg='Server returned an error trying to execute %s operation. Status code: %s. ' + 'Server response: %s' % (op_name, e.code, e.response)) + except FtdUnexpectedResponse as e: + module.fail_json(msg=e.args[0]) + except ValidationError as e: + module.fail_json(msg=e.args[0]) + except CheckModeException: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ftd/ftd_file_download.py b/plugins/modules/network/ftd/ftd_file_download.py new file mode 100644 index 0000000000..18c8b15633 --- /dev/null +++ b/plugins/modules/network/ftd/ftd_file_download.py @@ -0,0 +1,131 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ftd_file_download +short_description: Downloads files from Cisco FTD devices over HTTP(S) +description: + - Downloads files from Cisco FTD devices including pending changes, disk files, certificates, + troubleshoot reports, and backups. +author: "Cisco Systems, Inc. (@annikulin)" +options: + operation: + description: + - The name of the operation to execute. + - Only operations that return a file can be used in this module. + required: true + type: str + path_params: + description: + - Key-value pairs that should be sent as path parameters in a REST API call. + type: dict + destination: + description: + - Absolute path of where to download the file to. + - If destination is a directory, the module uses a filename from 'Content-Disposition' header specified by + the server. + required: true + type: path +''' + +EXAMPLES = """ +- name: Download pending changes + ftd_file_download: + operation: 'getdownload' + path_params: + objId: 'default' + destination: /tmp/ +""" + +RETURN = """ +msg: + description: The error message describing why the module failed. + returned: error + type: str +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.community.general.plugins.module_utils.network.ftd.common import FtdServerError, HTTPMethod +from ansible_collections.community.general.plugins.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError, FILE_MODEL_NAME + + +def is_download_operation(op_spec): + return op_spec[OperationField.METHOD] == HTTPMethod.GET and op_spec[OperationField.MODEL_NAME] == FILE_MODEL_NAME + + +def validate_params(connection, op_name, path_params): + field_name = 'Invalid path_params provided' + try: + is_valid, validation_report = connection.validate_path_params(op_name, path_params) + if not is_valid: + raise ValidationError({ + field_name: validation_report + }) + except Exception as e: + raise ValidationError({ + field_name: str(e) + }) + + +def main(): + fields = dict( + operation=dict(type='str', required=True), + path_params=dict(type='dict'), + destination=dict(type='path', required=True) + ) + module = AnsibleModule(argument_spec=fields, + supports_check_mode=True) + params = module.params + connection = Connection(module._socket_path) + + op_name = params['operation'] + op_spec = connection.get_operation_spec(op_name) + if op_spec is None: + module.fail_json(msg='Operation with specified name is not found: %s' % op_name) + if not is_download_operation(op_spec): + module.fail_json( + msg='Invalid download operation: %s. The operation must make GET request and return a file.' % + op_name) + + try: + path_params = params['path_params'] + validate_params(connection, op_name, path_params) + if module.check_mode: + module.exit_json(changed=False) + connection.download_file(op_spec[OperationField.URL], params['destination'], path_params) + module.exit_json(changed=False) + except FtdServerError as e: + module.fail_json(msg='Download request for %s operation failed. Status code: %s. ' + 'Server response: %s' % (op_name, e.code, e.response)) + except ValidationError as e: + module.fail_json(msg=e.args[0]) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ftd/ftd_file_upload.py b/plugins/modules/network/ftd/ftd_file_upload.py new file mode 100644 index 0000000000..50062d8c1f --- /dev/null +++ b/plugins/modules/network/ftd/ftd_file_upload.py @@ -0,0 +1,107 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ftd_file_upload +short_description: Uploads files to Cisco FTD devices over HTTP(S) +description: + - Uploads files to Cisco FTD devices including disk files, backups, and upgrades. +author: "Cisco Systems, Inc. (@annikulin)" +options: + operation: + description: + - The name of the operation to execute. + - Only operations that upload file can be used in this module. + required: true + type: str + file_to_upload: + description: + - Absolute path to the file that should be uploaded. + required: true + type: path + register_as: + description: + - Specifies Ansible fact name that is used to register received response from the FTD device. + type: str +''' + +EXAMPLES = """ +- name: Upload disk file + ftd_file_upload: + operation: 'postuploaddiskfile' + file_to_upload: /tmp/test1.txt +""" + +RETURN = """ +msg: + description: The error message describing why the module failed. + returned: error + type: str +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible_collections.community.general.plugins.module_utils.network.ftd.common import construct_ansible_facts, FtdServerError, HTTPMethod +from ansible_collections.community.general.plugins.module_utils.network.ftd.fdm_swagger_client import OperationField + + +def is_upload_operation(op_spec): + return op_spec[OperationField.METHOD] == HTTPMethod.POST or 'UploadStatus' in op_spec[OperationField.MODEL_NAME] + + +def main(): + fields = dict( + operation=dict(type='str', required=True), + file_to_upload=dict(type='path', required=True), + register_as=dict(type='str'), + ) + module = AnsibleModule(argument_spec=fields, + supports_check_mode=True) + params = module.params + connection = Connection(module._socket_path) + + op_spec = connection.get_operation_spec(params['operation']) + if op_spec is None: + module.fail_json(msg='Operation with specified name is not found: %s' % params['operation']) + if not is_upload_operation(op_spec): + module.fail_json( + msg='Invalid upload operation: %s. The operation must make POST request and return UploadStatus model.' % + params['operation']) + + try: + if module.check_mode: + module.exit_json() + resp = connection.upload_file(params['file_to_upload'], op_spec[OperationField.URL]) + module.exit_json(changed=True, response=resp, ansible_facts=construct_ansible_facts(resp, module.params)) + except FtdServerError as e: + module.fail_json(msg='Upload request for %s operation failed. Status code: %s. ' + 'Server response: %s' % (params['operation'], e.code, e.response)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ftd/ftd_install.py b/plugins/modules/network/ftd/ftd_install.py new file mode 100644 index 0000000000..025bac5e3b --- /dev/null +++ b/plugins/modules/network/ftd/ftd_install.py @@ -0,0 +1,294 @@ +#!/usr/bin/python + +# Copyright (c) 2019 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ftd_install +short_description: Installs FTD pkg image on the firewall +description: + - Provisioning module for FTD devices that installs ROMMON image (if needed) and + FTD pkg image on the firewall. + - Can be used with `httpapi` and `local` connection types. The `httpapi` is preferred, + the `local` connection should be used only when the device cannot be accessed via + REST API. +requirements: [ "python >= 3.5", "firepower-kickstart" ] +notes: + - Requires `firepower-kickstart` library that should be installed separately and requires Python >= 3.5. + - On localhost, Ansible can be still run with Python >= 2.7, but the interpreter for this particular module must be + Python >= 3.5. + - Python interpreter for the module can overwritten in `ansible_python_interpreter` variable. +author: "Cisco Systems, Inc. (@annikulin)" +options: + device_hostname: + description: + - Hostname of the device as appears in the prompt (e.g., 'firepower-5516'). + required: true + type: str + device_username: + description: + - Username to login on the device. + - Defaulted to 'admin' if not specified. + required: false + type: str + default: admin + device_password: + description: + - Password to login on the device. + required: true + type: str + device_sudo_password: + description: + - Root password for the device. If not specified, `device_password` is used. + required: false + type: str + device_new_password: + description: + - New device password to set after image installation. + - If not specified, current password from `device_password` property is reused. + - Not applicable for ASA5500-X series devices. + required: false + type: str + device_ip: + description: + - Device IP address of management interface. + - If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API. + - For 'local' connection type, this parameter is mandatory. + required: false + type: str + device_gateway: + description: + - Device gateway of management interface. + - If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API. + - For 'local' connection type, this parameter is mandatory. + required: false + type: str + device_netmask: + description: + - Device netmask of management interface. + - If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API. + - For 'local' connection type, this parameter is mandatory. + required: false + type: str + device_model: + description: + - Platform model of the device (e.g., 'Cisco ASA5506-X Threat Defense'). + - If not specified and connection is 'httpapi`, the module tries to fetch the device model via REST API. + - For 'local' connection type, this parameter is mandatory. + required: false + type: str + choices: + - Cisco ASA5506-X Threat Defense + - Cisco ASA5508-X Threat Defense + - Cisco ASA5516-X Threat Defense + - Cisco Firepower 2110 Threat Defense + - Cisco Firepower 2120 Threat Defense + - Cisco Firepower 2130 Threat Defense + - Cisco Firepower 2140 Threat Defense + dns_server: + description: + - DNS IP address of management interface. + - If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API. + - For 'local' connection type, this parameter is mandatory. + required: false + type: str + console_ip: + description: + - IP address of a terminal server. + - Used to set up an SSH connection with device's console port through the terminal server. + required: true + type: str + console_port: + description: + - Device's port on a terminal server. + required: true + type: str + console_username: + description: + - Username to login on a terminal server. + required: true + type: str + console_password: + description: + - Password to login on a terminal server. + required: true + type: str + rommon_file_location: + description: + - Path to the boot (ROMMON) image on TFTP server. + - Only TFTP is supported. + required: true + type: str + image_file_location: + description: + - Path to the FTD pkg image on the server to be downloaded. + - FTP, SCP, SFTP, TFTP, or HTTP protocols are usually supported, but may depend on the device model. + required: true + type: str + image_version: + description: + - Version of FTD image to be installed. + - Helps to compare target and current FTD versions to prevent unnecessary reinstalls. + required: true + type: str + force_install: + description: + - Forces the FTD image to be installed even when the same version is already installed on the firewall. + - By default, the module stops execution when the target version is installed in the device. + required: false + type: bool + default: false + search_domains: + description: + - Search domains delimited by comma. + - Defaulted to 'cisco.com' if not specified. + required: false + type: str + default: cisco.com +''' + +EXAMPLES = """ + - name: Install image v6.3.0 on FTD 5516 + ftd_install: + device_hostname: firepower + device_password: pass + device_ip: 192.168.0.1 + device_netmask: 255.255.255.0 + device_gateway: 192.168.0.254 + dns_server: 8.8.8.8 + + console_ip: 10.89.0.0 + console_port: 2004 + console_username: console_user + console_password: console_pass + + rommon_file_location: 'tftp://10.89.0.11/installers/ftd-boot-9.10.1.3.lfbff' + image_file_location: 'https://10.89.0.11/installers/ftd-6.3.0-83.pkg' + image_version: 6.3.0-83 +""" + +RETURN = """ +msg: + description: The message saying whether the image was installed or explaining why the installation failed. + returned: always + type: str +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.ftd.configuration import BaseConfigurationResource, ParamName +from ansible_collections.community.general.plugins.module_utils.network.ftd.device import assert_kick_is_installed, FtdPlatformFactory, FtdModel +from ansible_collections.community.general.plugins.module_utils.network.ftd.operation import FtdOperations, get_system_info + +REQUIRED_PARAMS_FOR_LOCAL_CONNECTION = ['device_ip', 'device_netmask', 'device_gateway', 'device_model', 'dns_server'] + + +def main(): + fields = dict( + device_hostname=dict(type='str', required=True), + device_username=dict(type='str', required=False, default='admin'), + device_password=dict(type='str', required=True, no_log=True), + device_sudo_password=dict(type='str', required=False, no_log=True), + device_new_password=dict(type='str', required=False, no_log=True), + device_ip=dict(type='str', required=False), + device_netmask=dict(type='str', required=False), + device_gateway=dict(type='str', required=False), + device_model=dict(type='str', required=False, choices=FtdModel.supported_models()), + dns_server=dict(type='str', required=False), + search_domains=dict(type='str', required=False, default='cisco.com'), + + console_ip=dict(type='str', required=True), + console_port=dict(type='str', required=True), + console_username=dict(type='str', required=True), + console_password=dict(type='str', required=True, no_log=True), + + rommon_file_location=dict(type='str', required=True), + image_file_location=dict(type='str', required=True), + image_version=dict(type='str', required=True), + force_install=dict(type='bool', required=False, default=False) + ) + module = AnsibleModule(argument_spec=fields) + assert_kick_is_installed(module) + + use_local_connection = module._socket_path is None + if use_local_connection: + check_required_params_for_local_connection(module, module.params) + platform_model = module.params['device_model'] + check_that_model_is_supported(module, platform_model) + else: + connection = Connection(module._socket_path) + resource = BaseConfigurationResource(connection, module.check_mode) + system_info = get_system_info(resource) + + platform_model = module.params['device_model'] or system_info['platformModel'] + check_that_model_is_supported(module, platform_model) + check_that_update_is_needed(module, system_info) + check_management_and_dns_params(resource, module.params) + + ftd_platform = FtdPlatformFactory.create(platform_model, module.params) + ftd_platform.install_ftd_image(module.params) + + module.exit_json(changed=True, + msg='Successfully installed FTD image %s on the firewall device.' % module.params["image_version"]) + + +def check_required_params_for_local_connection(module, params): + missing_params = [k for k, v in iteritems(params) if k in REQUIRED_PARAMS_FOR_LOCAL_CONNECTION and v is None] + if missing_params: + message = "The following parameters are mandatory when the module is used with 'local' connection: %s." % \ + ', '.join(sorted(missing_params)) + module.fail_json(msg=message) + + +def check_that_model_is_supported(module, platform_model): + if platform_model not in FtdModel.supported_models(): + module.fail_json(msg="Platform model '%s' is not supported by this module." % platform_model) + + +def check_that_update_is_needed(module, system_info): + target_ftd_version = module.params["image_version"] + if not module.params["force_install"] and target_ftd_version == system_info['softwareVersion']: + module.exit_json(changed=False, msg="FTD already has %s version of software installed." % target_ftd_version) + + +def check_management_and_dns_params(resource, params): + if not all([params['device_ip'], params['device_netmask'], params['device_gateway']]): + management_ip = resource.execute_operation(FtdOperations.GET_MANAGEMENT_IP_LIST, {})['items'][0] + params['device_ip'] = params['device_ip'] or management_ip['ipv4Address'] + params['device_netmask'] = params['device_netmask'] or management_ip['ipv4NetMask'] + params['device_gateway'] = params['device_gateway'] or management_ip['ipv4Gateway'] + if not params['dns_server']: + dns_setting = resource.execute_operation(FtdOperations.GET_DNS_SETTING_LIST, {})['items'][0] + dns_server_group_id = dns_setting['dnsServerGroup']['id'] + dns_server_group = resource.execute_operation(FtdOperations.GET_DNS_SERVER_GROUP, + {ParamName.PATH_PARAMS: {'objId': dns_server_group_id}}) + params['dns_server'] = dns_server_group['dnsServers'][0]['ipAddress'] + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_banner.py b/plugins/modules/network/icx/icx_banner.py new file mode 100644 index 0000000000..7faadbe997 --- /dev/null +++ b/plugins/modules/network/icx/icx_banner.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_banner +author: "Ruckus Wireless (@Commscope)" +short_description: Manage multiline banners on Ruckus ICX 7000 series switches +description: + - This will configure both login and motd banners on remote + ruckus ICX 7000 series switches. It allows playbooks to add or remove + banner text from the active running configuration. +notes: + - Tested against ICX 10.1 +options: + banner: + description: + - Specifies which banner should be configured on the remote device. + type: str + required: true + choices: ['motd', 'exec', 'incoming'] + text: + description: + - The banner text that should be + present in the remote device running configuration. + This argument accepts a multiline string, with no empty lines. + type: str + state: + description: + - Specifies whether or not the configuration is + present in the current devices active running configuration. + type: str + default: present + choices: ['present', 'absent'] + enterkey: + description: + - Specifies whether or not the motd configuration should accept + the require-enter-key + type: bool + default: no + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, + by specifying it as module parameter. + type: bool + default: yes + +''' + +EXAMPLES = """ +- name: configure the motd banner + icx_banner: + banner: motd + text: | + this is my motd banner + that contains a multiline + string + state: present + +- name: remove the motd banner + icx_banner: + banner: motd + state: absent + +- name: configure require-enter-key for motd + icx_banner: + banner: motd + enterkey: True + +- name: remove require-enter-key for motd + icx_banner: + banner: motd + enterkey: False +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - banner motd + - this is my motd banner + - that contains a multiline + - string +""" + +import re +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import exec_command +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import load_config, get_config +from ansible.module_utils.connection import Connection, ConnectionError + + +def map_obj_to_commands(updates, module): + commands = list() + state = module.params['state'] + want, have = updates + + if module.params['banner'] != 'motd' and module.params['enterkey']: + module.fail_json(msg=module.params['banner'] + " banner can have text only, got enterkey") + + if state == 'absent': + if 'text' in have.keys() and have['text']: + commands.append('no banner %s' % module.params['banner']) + if(module.params['enterkey'] is False): + commands.append('no banner %s require-enter-key' % module.params['banner']) + + elif state == 'present': + if module.params['text'] is None and module.params['enterkey'] is None: + module.fail_json(msg=module.params['banner'] + " one of the following is required: text, enterkey:only if motd") + + if module.params["banner"] == "motd" and want['enterkey'] != have['enterkey']: + if(module.params['enterkey']): + commands.append('banner %s require-enter-key' % module.params['banner']) + + if want['text'] and (want['text'] != have.get('text')): + module.params["enterkey"] = None + banner_cmd = 'banner %s' % module.params['banner'] + banner_cmd += ' $\n' + banner_cmd += module.params['text'].strip() + banner_cmd += '\n$' + commands.append(banner_cmd) + return commands + + +def map_config_to_obj(module): + compare = module.params.get('check_running_config') + obj = {'banner': module.params['banner'], 'state': 'absent', 'enterkey': False} + exec_command(module, 'skip') + output_text = '' + output_re = '' + out = get_config(module, flags=['| begin banner %s' + % module.params['banner']], compare=module.params['check_running_config']) + if out: + try: + output_re = re.search(r'banner %s( require-enter-key)' % module.params['banner'], out, re.S).group(0) + obj['enterkey'] = True + except BaseException: + pass + try: + output_text = re.search(r'banner %s (\$([^\$])+\$){1}' % module.params['banner'], out, re.S).group(1).strip('$\n') + except BaseException: + pass + + else: + output_text = None + if output_text: + obj['text'] = output_text + obj['state'] = 'present' + if module.params['check_running_config'] is False: + obj = {'banner': module.params['banner'], 'state': 'absent', 'enterkey': False, 'text': 'JUNK'} + return obj + + +def map_params_to_obj(module): + text = module.params['text'] + if text: + text = str(text).strip() + + return { + 'banner': module.params['banner'], + 'text': text, + 'state': module.params['state'], + 'enterkey': module.params['enterkey'] + } + + +def main(): + """entry point for module execution + """ + argument_spec = dict( + banner=dict(required=True, choices=['motd', 'exec', 'incoming']), + text=dict(), + enterkey=dict(type='bool'), + state=dict(default='present', choices=['present', 'absent']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + + required_one_of = [['text', 'enterkey', 'state']] + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + supports_check_mode=True) + + warnings = list() + results = {'changed': False} + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands((want, have), module) + results['commands'] = commands + + if commands: + if not module.check_mode: + response = load_config(module, commands) + + results['changed'] = True + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_command.py b/plugins/modules/network/icx/icx_command.py new file mode 100644 index 0000000000..b63f8842d6 --- /dev/null +++ b/plugins/modules/network/icx/icx_command.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_command +author: "Ruckus Wireless (@Commscope)" +short_description: Run arbitrary commands on remote Ruckus ICX 7000 series switches +description: + - Sends arbitrary commands to an ICX node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. +notes: + - Tested against ICX 10.1 +options: + commands: + description: + - List of commands to send to the remote ICX device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. If a command sent to the + device requires answering a prompt, checkall and newline if + multiple prompts, it is possible to pass + a dict containing I(command), I(answer), I(prompt), I(check_all) + and I(newline).Common answers are 'y' or "\r" (carriage return, + must be double quotes). See examples. + type: list + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + type: list + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + type: str + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of times a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + type: int + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + type: int + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + icx_command: + commands: show version + + - name: run show version and check to see if output contains ICX + icx_command: + commands: show version + wait_for: result[0] contains ICX + + - name: run multiple commands on remote nodes + icx_command: + commands: + - show version + - show interfaces + + - name: run multiple commands and evaluate the output + icx_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains ICX + - result[1] contains GigabitEthernet1/1/1 + - name: run commands that require answering a prompt + icx_command: + commands: + - command: 'service password-encryption sha1' + prompt: 'Warning: Moving to higher password-encryption type,.*' + answer: 'y' + - name: run commands that require answering multiple prompt + icx_command: + commands: + - command: 'username qqq password qqq' + prompt: + - 'User already exists. Do you want to modify:.*' + - 'To modify or remove user, enter current password:' + answer: + - 'y' + - 'qqq\\\r' + check_all: True + newline: False +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" + + +import re +import time +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList, to_lines +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict(), + check_all=dict(type='bool', default='False'), + newline=dict(type='bool', default='True') + ), module) + commands = command(module.params['commands']) + for item in list(commands): + if module.check_mode: + if not item['command'].startswith('show'): + warnings.append( + 'Only show commands are supported when using check mode, not executing configure terminal') + commands.remove(item) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + run_commands(module, ['skip']) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + + responses = run_commands(module, commands) + + for item in list(conditionals): + + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_config.py b/plugins/modules/network/icx/icx_config.py new file mode 100644 index 0000000000..920a56d38a --- /dev/null +++ b/plugins/modules/network/icx/icx_config.py @@ -0,0 +1,483 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icx_config +author: "Ruckus Wireless (@Commscope)" +short_description: Manage configuration sections of Ruckus ICX 7000 series switches +description: + - Ruckus ICX configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with ICX configuration sections in + a deterministic way. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + type: list + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + type: list + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + type: str + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + type: list + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + type: list + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + type: str + choices: ['line', 'strict', 'exact', 'none'] + default: line + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + type: str + default: line + choices: ['line', 'block'] + multiline_delimiter: + description: + - This argument is used when pushing a multiline configuration + element to the ICX device. It specifies the character to use + as the delimiting character. This only applies to the + configuration action. + type: str + default: "@" + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory or role root directory, if + playbook is part of an ansible role. If the directory does not exist, + it is created. + type: bool + default: 'no' + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config all). + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(running_config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + type: str + aliases: ['config'] + save_when: + description: + - When changes are made to the device running-configuration, the + changes are not copied to non-volatile storage by default. Using + this argument will change that before. If the argument is set to + I(always), then the running-config will always be copied to the + start-up configuration and the I(modified) flag will always be set to + True. If the argument is set to I(modified), then the running-config + will only be copied to the start-up configuration if it has changed since + the last save to configuration. If the argument is set to + I(never), the running-config will never be copied to the + configuration. If the argument is set to I(changed), then the running-config + will only be copied to the configuration if the task has made a change. + type: str + default: never + choices: ['always', 'never', 'modified', 'changed'] + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument + the module can generate diffs against different sources. + - When this option is configure as I(startup), the module will return + the diff of the running-config against the configuration. + - When this option is configured as I(intended), the module will + return the diff of the running-config against the configuration + provided in the C(intended_config) argument. + - When this option is configured as I(running), the module will + return the before and after diff of the running-config with respect + to any changes made to the device configuration. + type: str + choices: ['running', 'startup', 'intended'] + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + type: list + intended_config: + description: + - The C(intended_config) provides the master configuration that + the node should conform to and is used to check the final + running-config against. This argument will not modify any settings + on the remote device and is strictly used to check the compliance + of the current device's configuration against. When specifying this + argument, the task should also modify the C(diff_against) value and + set it to I(intended). + type: str +''' + +EXAMPLES = """ +- name: configure top level configuration + icx_config: + lines: hostname {{ inventory_hostname }} + +- name: configure interface settings + icx_config: + lines: + - port-name test string + - ip address 172.31.1.1 255.255.255.0 + parents: interface ethernet 1/1/2 + +- name: configure ip helpers on multiple interfaces + icx_config: + lines: + - ip helper-address 172.26.1.10 + - ip helper-address 172.26.3.8 + parents: "{{ item }}" + with_items: + - interface ethernet 1/1/2 + - interface ethernet 1/1/3 + +- name: load new acl into device + icx_config: + lines: + - permit ip host 192.0.2.1 any log + - permit ip host 192.0.2.2 any log + - permit ip host 192.0.2.3 any log + - permit ip host 192.0.2.4 any log + parents: ip access-list extended test + before: no ip access-list extended test + match: exact + +- name: check the running-config against master config + icx_config: + diff_against: intended + intended_config: "{{ lookup('file', 'master.cfg') }}" + +- name: check the configuration against the running-config + icx_config: + diff_against: startup + diff_ignore_lines: + - ntp clock .* + +- name: for idempotency, use full-form commands + icx_config: + lines: + # - en + - enable + # parents: int eth1/0/11 + parents: interface ethernet 1/1/2 + +# Set boot image based on comparison to a group_var (version) and the version +# that is returned from the `icx_facts` module +- name: SETTING BOOT IMAGE + icx_config: + lines: + - no boot system + - boot system flash bootflash:{{new_image}} + host: "{{ inventory_hostname }}" + when: ansible_net_version != version + +- name: render template onto an ICX device + icx_config: + backup: yes + src: "{{ lookup('file', 'config.j2') }}" +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/icx_config.2016-07-16@22:28:34 +""" + +import json +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import ConnectionError +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import run_commands, get_config +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import get_defaults_flag, get_connection +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import check_args as icx_check_args +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +def check_args(module, warnings): + icx_check_args(module, warnings) + if module.params['multiline_delimiter']: + if len(module.params['multiline_delimiter']) != 1: + module.fail_json(msg='multiline_delimiter value can only be a ' + 'single character') + + +def edit_config_or_macro(connection, commands): + if "macro name" in commands[0]: + connection.edit_macro(candidate=commands) + else: + if commands[0] != '': + connection.edit_config(candidate=commands) + + +def get_candidate_config(module): + candidate = '' + if module.params['src']: + candidate = module.params['src'] + + elif module.params['lines']: + candidate_obj = NetworkConfig(indent=1) + parents = module.params['parents'] or list() + candidate_obj.add(module.params['lines'], parents=parents) + candidate = dumps(candidate_obj, 'raw') + + return candidate + + +def get_running_config(module, current_config=None, flags=None): + running = module.params['running_config'] + if not running: + if not module.params['defaults'] and current_config: + running = current_config + else: + running = get_config(module, flags=flags) + + return running + + +def save_config(module, result): + result['changed'] = True + if not module.check_mode: + run_commands(module, 'write memory') + else: + module.warn('Skipping command `copy running-config start-up configuration` ' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + src=dict(), + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + multiline_delimiter=dict(default='@'), + + running_config=dict(aliases=['config']), + intended_config=dict(), + + defaults=dict(type='bool', default=False), + backup=dict(type='bool', default=False), + + save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'), + + diff_against=dict(choices=['startup', 'intended', 'running']), + diff_ignore_lines=dict(type='list'), + + ) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('diff_against', 'intended', ['intended_config'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + result['warnings'] = warnings + run_commands(module, 'skip') + diff_ignore_lines = module.params['diff_ignore_lines'] + config = None + contents = None + flags = None if module.params['defaults'] else [] + connection = get_connection(module) + + if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'): + contents = get_config(module, flags=flags) + config = NetworkConfig(indent=1, contents=contents) + if module.params['backup']: + result['__backup__'] = contents + + if any((module.params['lines'], module.params['src'])): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate = get_candidate_config(module) + running = get_running_config(module, contents, flags=flags) + try: + response = connection.get_diff(candidate=candidate, running=running, diff_match=match, diff_ignore_lines=diff_ignore_lines, path=path, + diff_replace=replace) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + config_diff = response['config_diff'] + banner_diff = response['banner_diff'] + + if config_diff or banner_diff: + commands = config_diff.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + result['updates'] = commands + result['banners'] = banner_diff + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + edit_config_or_macro(connection, commands) + if banner_diff: + connection.edit_banner(candidate=json.dumps(banner_diff), multiline_delimiter=module.params['multiline_delimiter']) + + result['changed'] = True + + running_config = module.params['running_config'] + startup_config = None + + if module.params['save_when'] == 'always': + save_config(module, result) + elif module.params['save_when'] == 'modified': + output = run_commands(module, ['show running-config', 'show configuration']) + + running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines) + startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines) + + if running_config.sha1 != startup_config.sha1: + save_config(module, result) + elif module.params['save_when'] == 'changed' and result['changed']: + save_config(module, result) + + if module._diff: + if not running_config: + output = run_commands(module, 'show running-config') + contents = output[0] + else: + contents = running_config + + # recreate the object in order to process diff_ignore_lines + running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if module.params['diff_against'] == 'running': + if module.check_mode: + module.warn("unable to perform diff against running-config due to check mode") + contents = None + else: + contents = config.config_text + + elif module.params['diff_against'] == 'startup': + if not startup_config: + output = run_commands(module, 'show configuration') + contents = output[0] + else: + contents = startup_config.config_text + + elif module.params['diff_against'] == 'intended': + contents = module.params['intended_config'] + + if contents is not None: + base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if running_config.sha1 != base_config.sha1: + if module.params['diff_against'] == 'intended': + before = running_config + after = base_config + elif module.params['diff_against'] in ('startup', 'running'): + before = base_config + after = running_config + + result.update({ + 'changed': True, + 'diff': {'before': str(before), 'after': str(after)} + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_copy.py b/plugins/modules/network/icx/icx_copy.py new file mode 100644 index 0000000000..2512870b04 --- /dev/null +++ b/plugins/modules/network/icx/icx_copy.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_copy +author: "Ruckus Wireless (@Commscope)" +short_description: Transfer files from or to remote Ruckus ICX 7000 series switches +description: + - This module transfers files from or to remote devices running ICX. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + upload: + description: + - Name of the resource to be uploaded. Mutually exclusive with download. + type: str + choices: ['running-config', 'startup-config', 'flash_primary', 'flash_secondary'] + download: + description: + - Name of the resource to be downloaded. Mutually exclusive with upload. + type: str + choices: ['running-config', 'startup-config', 'flash_primary', 'flash_secondary', 'bootrom', 'fips-primary-sig', 'fips-secondary-sig', 'fips-bootrom-sig'] + protocol: + description: + - Data transfer protocol to be used + type: str + choices: ['scp', 'https'] + required: true + remote_server: + description: + - IP address of the remote server + type: str + required: true + remote_port: + description: + - The port number of the remote host. Default values will be selected based on protocol type. + Default scp:22, http:443 + type: str + remote_filename: + description: + - The name or path of the remote file/resource to be uploaded or downloaded. + type: str + required: true + remote_user: + description: + - remote username to be used for scp login. + type: str + remote_pass: + description: + - remote password to be used for scp login. + type: str + public_key: + description: + - public key type to be used to login to scp server + type: str + choices: ['rsa', 'dsa'] + +''' + +EXAMPLES = """ +- name: upload running-config to the remote scp server + icx_copy: + upload: running-config + protocol: scp + remote_server: 172.16.10.49 + remote_filename: running.conf + remote_user: user1 + remote_pass: pass123 + +- name: download running-config from the remote scp server + icx_copy: + download: running-config + protocol: scp + remote_server: 172.16.10.49 + remote_filename: running.conf + remote_user: user1 + remote_pass: pass123 + +- name: download running-config from the remote scp server using rsa public key + icx_copy: + download: running-config + protocol: scp + remote_server: 172.16.10.49 + remote_filename: running.conf + remote_user: user1 + remote_pass: pass123 + public_key: rsa + +- name: upload startup-config to the remote https server + icx_copy: + upload: startup-config + protocol: https + remote_server: 172.16.10.49 + remote_filename: config/running.conf + remote_user: user1 + remote_pass: pass123 + +- name: upload startup-config to the remote https server + icx_copy: + upload: startup-config + protocol: https + remote_server: 172.16.10.49 + remote_filename: config/running.conf + remote_user: user1 + remote_pass: pass123 + +- name: Download OS image into the flash from remote scp ipv6 server + icx_copy: + download: startup-config + protocol: scp + remote_server: ipv6 FE80:CD00:0000:0CDE:1257:0000:211E:729C + remote_filename: img.bin + remote_user: user1 + remote_pass: pass123 + +- name: Download OS image into the secondary flash from remote scp ipv6 server + icx_copy: + Download: flash_secondary + protocol: scp + remote_server: ipv6 FE80:CD00:0000:0CDE:1257:0000:211E:729C + remote_filename: img.bin + remote_user: user1 + remote_pass: pass123 + +- name: Download OS image into the secondary flash from remote scp ipv6 server on port 5000 + icx_copy: + Download: flash_secondary + protocol: scp + remote_server: ipv6 FE80:CD00:0000:0CDE:1257:0000:211E:729C + remote_port: 5000 + remote_filename: img.bin + remote_user: user1 + remote_pass: pass123 + +- name: Download OS image into the primary flash from remote https ipv6 server + icx_copy: + Download: flash_primary + protocol: https + remote_server: ipv6 FE80:CD00:0000:0CDE:1257:0000:211E:729C + remote_filename: images/img.bin + remote_user: user1 + remote_pass: pass123 + +- name: Download OS image into the primary flash from remote https ipv6 server on port 8080 + icx_copy: + Download: flash_primary + protocol: https + remote_server: ipv6 FE80:CD00:0000:0CDE:1257:0000:211E:729C + remote_port: 8080 + remote_filename: images/img.bin + remote_user: user1 + remote_pass: pass123 +""" + +RETURN = """ +changed: + description: true when downloaded any configuration or flash. false otherwise. + returned: always + type: bool +""" + + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import ConnectionError, exec_command +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import exec_scp, run_commands + + +def map_params_to_obj(module): + command = dict() + + if(module.params['protocol'] == 'scp'): + if(module.params['upload'] is not None): + module.params["upload"] = module.params["upload"].replace("flash_primary", "primary") + module.params["upload"] = module.params["upload"].replace("flash_secondary", "secondary") + if(module.params["upload"] == 'running-config' or module.params["upload"] == 'startup-config'): + command["command"] = "copy %s scp %s%s %s%s" % (module.params['upload'], + module.params["remote_server"], + " " + module.params["remote_port"] if module.params["remote_port"] else "", + module.params["remote_filename"], + "public-key " + module.params["public_key"] if module.params["public_key"] else "") + else: + command["command"] = "copy flash scp %s%s %s%s %s" % (module.params["remote_server"], + " " + module.params["remote_port"] if module.params["remote_port"] else "", + module.params["remote_filename"], + "public-key " + module.params["public_key"] if module.params["public_key"] else "", + module.params["upload"]) + command["scp_user"] = module.params["remote_user"] + command["scp_pass"] = module.params["remote_pass"] + if(module.params['download'] is not None): + module.params["download"] = module.params["download"].replace("flash_primary", "primary") + module.params["download"] = module.params["download"].replace("flash_secondary", "secondary") + if(module.params["download"] == 'running-config' or module.params["download"] == 'startup-config'): + command["command"] = "copy scp %s %s%s %s%s" % (module.params['download'], + module.params["remote_server"], + " " + module.params["remote_port"] if module.params["remote_port"] else "", + module.params["remote_filename"], + "public-key " + module.params["public_key"] if module.params["public_key"] else "") + else: + command["command"] = "copy scp flash %s%s %s%s %s" % (module.params["remote_server"], + " " + module.params["remote_port"] if module.params["remote_port"] else "", + module.params["remote_filename"], + "public-key " + module.params["public_key"] if module.params["public_key"] else "", + module.params["download"]) + command["scp_user"] = module.params["remote_user"] + command["scp_pass"] = module.params["remote_pass"] + if(module.params['protocol'] == 'https'): + if(module.params['upload'] is not None): + module.params["upload"] = module.params["upload"].replace("flash_primary", "primary") + module.params["upload"] = module.params["upload"].replace("flash_secondary", "secondary") + if(module.params["upload"] == 'running-config' or module.params["upload"] == 'startup-config'): + command["command"] = "copy %s https %s %s%s" % (module.params['upload'], + module.params["remote_server"], + module.params["remote_filename"], + " port " + module.params["remote_port"] if module.params["remote_port"] else "") + else: + command["command"] = "copy https flash %s %s %s%s" % (module.params["remote_server"], + module.params["remote_filename"], + module.params['upload'], + " port " + module.params["remote_port"] if module.params["remote_port"] else "") + if(module.params['download'] is not None): + module.params["download"] = module.params["download"].replace("flash_primary", "primary") + module.params["download"] = module.params["download"].replace("flash_secondary", "secondary") + if(module.params["download"] == 'running-config' or module.params["download"] == 'startup-config'): + command["command"] = "copy https %s %s %s%s" % (module.params['download'], + module.params["remote_server"], + module.params["remote_filename"], + " port " + module.params["remote_port"] if module.params["remote_port"] else "") + else: + command["command"] = "copy https flash %s %s %s%s" % (module.params["remote_server"], + module.params["remote_filename"], + module.params['download'], + " port " + module.params["remote_port"] if module.params["remote_port"] else "") + return command + + +def checkValidations(module): + validation = dict( + scp=dict( + upload=[ + 'running-config', + 'startup-config', + 'flash_primary', + 'flash_secondary'], + download=[ + 'running-config', + 'startup-config', + 'flash_primary', + 'flash_secondary', + 'bootrom', + 'fips-primary-sig', + 'fips-secondary-sig', + 'fips-bootrom-sig']), + https=dict( + upload=[ + 'running-config', + 'startup-config'], + download=[ + 'flash_primary', + 'flash_secondary', + 'startup-config'])) + protocol = module.params['protocol'] + upload = module.params['upload'] + download = module.params['download'] + + if(protocol == 'scp' and module.params['remote_user'] is None): + module.fail_json(msg="While using scp remote_user argument is required") + if(upload is None and download is None): + module.fail_json(msg="Upload or download params are required.") + if(upload is not None and download is not None): + module.fail_json(msg="Only upload or download can be used at a time.") + if(upload): + if(not (upload in validation.get(protocol).get("upload"))): + module.fail_json(msg="Specified resource '" + upload + "' can't be uploaded to '" + protocol + "'") + if(download): + if(not (download in validation.get(protocol).get("download"))): + module.fail_json(msg="Specified resource '" + download + "' can't be downloaded from '" + protocol + "'") + + +def main(): + """entry point for module execution + """ + argument_spec = dict( + upload=dict( + type='str', + required=False, + choices=[ + 'running-config', + 'flash_primary', + 'flash_secondary', + 'startup-config']), + download=dict( + type='str', + required=False, + choices=[ + 'running-config', + 'startup-config', + 'flash_primary', + 'flash_secondary', + 'bootrom', + 'fips-primary-sig', + 'fips-secondary-sig', + 'fips-bootrom-sig']), + protocol=dict( + type='str', + required=True, + choices=[ + 'https', + 'scp']), + remote_server=dict( + type='str', + required=True), + remote_port=dict( + type='str', + required=False), + remote_filename=dict( + type='str', + required=True), + remote_user=dict( + type='str', + required=False), + remote_pass=dict( + type='str', + required=False, + no_log=True), + public_key=dict( + type='str', + required=False, + choices=[ + 'rsa', + 'dsa'])) + mutually_exclusive = [['upload', 'download']] + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=mutually_exclusive) + + checkValidations(module) + warnings = list() + result = {'changed': False, 'warnings': warnings} + exec_command(module, 'skip') + + response = '' + try: + command = map_params_to_obj(module) + result['commands'] = [command["command"]] + + if(module.params['protocol'] == 'scp'): + response = exec_scp(module, command) + else: + response = run_commands(module, command) + if('Response Code: 404' in response): + module.fail_json(msg=response) + else: + result['response'] = "in progress..." + if(module.params["download"] is not None): + result['changed'] = True + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_facts.py b/plugins/modules/network/icx/icx_facts.py new file mode 100644 index 0000000000..af45d0d14c --- /dev/null +++ b/plugins/modules/network/icx/icx_facts.py @@ -0,0 +1,548 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icx_facts +author: "Ruckus Wireless (@Commscope)" +short_description: Collect facts from remote Ruckus ICX 7000 series switches +description: + - Collects a base set of device facts from a remote device that + is running ICX. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + type: list + default: '!config' +''' + +EXAMPLES = """ +# Collect all facts from the device +- icx_facts: + gather_subset: all + +# Collect only the config and default facts +- icx_facts: + gather_subset: + - config + +# Do not collect hardware facts +- icx_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str +ansible_net_image: + description: The image file the device is running + returned: always + type: str +ansible_net_stacked_models: + description: The model names of each device in the stack + returned: when multiple devices are configured in a stack + type: list +ansible_net_stacked_serialnums: + description: The serial numbers of each device in the stack + returned: when multiple devices are configured in a stack + type: list + +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_filesystems_info: + description: A hash of all file systems containing info about each file system (e.g. free and total space) + returned: when hardware is configured + type: dict +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" + + +import re +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import zip + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, commands=cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = ['show version', 'show running-config | include hostname'] + + def populate(self): + super(Default, self).run(['skip']) + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + self.facts['hostname'] = self.parse_hostname(self.responses[1]) + self.parse_stacks(data) + + def parse_version(self, data): + match = re.search(r'SW: Version ([0-9]+.[0-9]+.[0-9a-zA-Z]+)', data) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'^hostname (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'HW: (\S+ \S+)', data, re.M) + if match: + return match.group(1) + + def parse_image(self, data): + match = re.search(r'\([0-9]+ bytes\) from \S+ (\S+)', data) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'Serial #:(\S+)', data) + if match: + return match.group(1) + + def parse_stacks(self, data): + match = re.findall(r'UNIT [1-9]+: SL [1-9]+: (\S+)', data, re.M) + if match: + self.facts['stacked_models'] = match + + match = re.findall(r'^System [Ss]erial [Nn]umber\s+: (\S+)', data, re.M) + if match: + self.facts['stacked_serialnums'] = match + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show memory', + 'show flash' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + if data: + self.facts['filesystems'] = self.parse_filesystems(data) + self.facts['filesystems_info'] = self.parse_filesystems_info(self.responses[1]) + + if data: + if 'Invalid input detected' in data: + warnings.append('Unable to gather memory statistics') + else: + match = re.search(r'Dynamic memory: ([0-9]+) bytes total, ([0-9]+) bytes free, ([0-9]+%) used', data) + if match: + self.facts['memtotal_mb'] = int(match.group(1)) / 1024 + self.facts['memfree_mb'] = int(match.group(2)) / 1024 + + def parse_filesystems(self, data): + return "flash" + + def parse_filesystems_info(self, data): + facts = dict() + fs = '' + for line in data.split('\n'): + match = re.match(r'^(Stack unit \S+):', line) + if match: + fs = match.group(1) + facts[fs] = dict() + continue + match = re.match(r'\W+NAND Type: Micron NAND (\S+)', line) + if match: + facts[fs]['spacetotal'] = match.group(1) + match = re.match(r'\W+Code Flash Free Space = (\S+)', line) + if match: + facts[fs]['spacefree'] = int(int(match.group(1)) / 1024) + facts[fs]['spacefree'] = str(facts[fs]['spacefree']) + "Kb" + return {"flash": facts} + + +class Config(FactsBase): + + COMMANDS = ['skip', 'show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[1] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'skip', + 'show interfaces', + 'show running-config', + 'show lldp', + 'show media' + ] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + data = self.responses[1] + if data: + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces(interfaces) + + data = self.responses[1] + if data: + data = self.parse_interfaces(data) + self.populate_ipv4_interfaces(data) + + data = self.responses[2] + if data: + self.populate_ipv6_interfaces(data) + + data = self.responses[3] + lldp_errs = ['Invalid input', 'LLDP is not enabled'] + + if data and not any(err in data for err in lldp_errs): + neighbors = self.run(['show lldp neighbors detail']) + if neighbors: + self.facts['neighbors'] = self.parse_neighbors(neighbors[0]) + + data = self.responses[4] + self.populate_mediatype(data) + + interfaceList = {} + for iface in self.facts['interfaces']: + if 'type' in self.facts['interfaces'][iface]: + newName = self.facts['interfaces'][iface]['type'] + iface + else: + newName = iface + interfaceList[newName] = self.facts['interfaces'][iface] + self.facts['interfaces'] = interfaceList + + def populate_mediatype(self, data): + lines = data.split("\n") + for line in lines: + match = re.match(r'Port (\S+):\W+Type\W+:\W+(.*)', line) + if match: + self.facts['interfaces'][match.group(1)]["mediatype"] = match.group(2) + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + facts[key] = intf + return facts + + def populate_ipv4_interfaces(self, data): + for key, value in data.items(): + self.facts['interfaces'][key]['ipv4'] = dict() + primary_address = addresses = [] + primary_address = re.findall(r'Internet address is (\S+/\S+), .*$', value, re.M) + addresses = re.findall(r'Secondary address (.+)$', value, re.M) + if len(primary_address) == 0: + continue + addresses.append(primary_address[0]) + for address in addresses: + addr, subnet = address.split("/") + ipv4 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv4') + self.facts['interfaces'][key]['ipv4'] = ipv4 + + def populate_ipv6_interfaces(self, data): + parts = data.split("\n") + for line in parts: + match = re.match(r'\W*interface \S+ (\S+)', line) + if match: + key = match.group(1) + try: + self.facts['interfaces'][key]['ipv6'] = list() + except KeyError: + self.facts['interfaces'][key] = dict() + self.facts['interfaces'][key]['ipv6'] = list() + self.facts['interfaces'][key]['ipv6'] = {} + continue + match = re.match(r'\W+ipv6 address (\S+)/(\S+)', line) + if match: + self.add_ip_address(match.group(1), "ipv6") + self.facts['interfaces'][key]['ipv6']["address"] = match.group(1) + self.facts['interfaces'][key]['ipv6']["subnet"] = match.group(2) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + for entry in neighbors.split('------------------------------------------------'): + if entry == '': + continue + intf = self.parse_lldp_intf(entry) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = self.parse_lldp_host(entry) + fact['port'] = self.parse_lldp_port(entry) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + key = '' + for line in data.split('\n'): + if len(line) == 0: + continue + elif line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'\S+Ethernet(\S+)', line) + if match: + key = match.group(1) + parsed[key] = line + return parsed + + def parse_description(self, data): + match = re.search(r'Port name is ([ \S]+)', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'Hardware is \S+, address is (\S+)', data) + if match: + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Internet address is (\S+)', data) + if match: + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+)', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'Configured speed (\S+), actual (\S+)', data) + if match: + return match.group(1) + + def parse_duplex(self, data): + match = re.search(r'configured duplex (\S+), actual (\S+)', data, re.M) + if match: + return match.group(2) + + def parse_mediatype(self, data): + match = re.search(r'media type is (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lldp_intf(self, data): + match = re.search(r'^Local Intf: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_host(self, data): + match = re.search(r'System Name: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_port(self, data): + match = re.search(r'Port id: (.+)$', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +warnings = list() + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_interface.py b/plugins/modules/network/icx/icx_interface.py new file mode 100644 index 0000000000..1dbce9dde3 --- /dev/null +++ b/plugins/modules/network/icx/icx_interface.py @@ -0,0 +1,693 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_interface +author: "Ruckus Wireless (@Commscope)" +short_description: Manage Interface on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of Interfaces + on ruckus icx devices. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + name: + description: + - Name of the Interface. + type: str + description: + description: + - Name of the description. + type: str + enabled: + description: + - Interface link status + default: yes + type: bool + speed: + description: + - Interface link speed/duplex + choices: ['10-full', '10-half', '100-full', '100-half', '1000-full', '1000-full-master', + '1000-full-slave', '10g-full', '10g-full-master', '10g-full-slave', '2500-full', '2500-full-master', + '2500-full-slave', '5g-full', '5g-full-master', '5g-full-slave', 'auto'] + type: str + stp: + description: + - enable/disable stp for the interface + type: bool + tx_rate: + description: + - Transmit rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) + type: str + rx_rate: + description: + - Receiver rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) + type: str + neighbors: + description: + - Check the operational state of given interface C(name) for CDP/LLDP neighbor. + - The following suboptions are available. + type: list + suboptions: + host: + description: + - "CDP/LLDP neighbor host for given interface C(name)." + type: str + port: + description: + - "CDP/LLDP neighbor port to which given interface C(name) is connected." + type: str + delay: + description: + - Time in seconds to wait before checking for the operational state on remote + device. This wait is applicable for operational state argument which are + I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate). + type: int + default: 10 + state: + description: + - State of the Interface configuration, C(up) means present and + operationally up and C(down) means present and operationally C(down) + default: present + type: str + choices: ['present', 'absent', 'up', 'down'] + power: + description: + - Inline power on Power over Ethernet (PoE) ports. + type: dict + suboptions: + by_class: + description: + - "The range is 0-4" + - "The power limit based on class value for given interface C(name)" + choices: ['0', '1', '2', '3', '4'] + type: str + limit: + description: + - "The range is 1000-15400|30000mW. For PoH ports the range is 1000-95000mW" + - "The power limit based on actual power value for given interface C(name)" + type: str + priority: + description: + - "The range is 1 (highest) to 3 (lowest)" + - "The priority for power management or given interface C(name)" + choices: ['1', '2', '3'] + type: str + enabled: + description: + - "enable/disable the poe of the given interface C(name)" + default: no + type: bool + aggregate: + description: + - List of Interfaces definitions. + type: list + suboptions: + name: + description: + - Name of the Interface. + type: str + description: + description: + - Name of the description. + type: str + enabled: + description: + - Interface link status + type: bool + speed: + description: + - Interface link speed/duplex + choices: ['10-full', '10-half', '100-full', '100-half', '1000-full', '1000-full-master', + '1000-full-slave', '10g-full', '10g-full-master', '10g-full-slave', '2500-full', '2500-full-master', + '2500-full-slave', '5g-full', '5g-full-master', '5g-full-slave', 'auto'] + type: str + stp: + description: + - enable/disable stp for the interface + type: bool + tx_rate: + description: + - Transmit rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) + type: str + rx_rate: + description: + - Receiver rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) + type: str + neighbors: + description: + - Check the operational state of given interface C(name) for CDP/LLDP neighbor. + - The following suboptions are available. + type: list + suboptions: + host: + description: + - "CDP/LLDP neighbor host for given interface C(name)." + type: str + port: + description: + - "CDP/LLDP neighbor port to which given interface C(name) is connected." + type: str + delay: + description: + - Time in seconds to wait before checking for the operational state on remote + device. This wait is applicable for operational state argument which are + I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate). + type: int + state: + description: + - State of the Interface configuration, C(up) means present and + operationally up and C(down) means present and operationally C(down) + type: str + choices: ['present', 'absent', 'up', 'down'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + - Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + power: + description: + - Inline power on Power over Ethernet (PoE) ports. + type: dict + suboptions: + by_class: + description: + - "The range is 0-4" + - "The power limit based on class value for given interface C(name)" + choices: ['0', '1', '2', '3', '4'] + type: str + limit: + description: + - "The range is 1000-15400|30000mW. For PoH ports the range is 1000-95000mW" + - "The power limit based on actual power value for given interface C(name)" + type: str + priority: + description: + - "The range is 1 (highest) to 3 (lowest)" + - "The priority for power management or given interface C(name)" + choices: ['1', '2', '3'] + type: str + enabled: + description: + - "enable/disable the poe of the given interface C(name)" + type: bool + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + - Module will use environment variable value(default:True), unless it is overridden, + by specifying it as module parameter. + default: yes + type: bool +''' + +EXAMPLES = """ +- name: enable ethernet port and set name + icx_interface: + name: ethernet 1/1/1 + description: interface-1 + stp: true + enabled: true + +- name: disable ethernet port 1/1/1 + icx_interface: + name: ethernet 1/1/1 + enabled: false + +- name: enable ethernet port range, set name and speed. + icx_interface: + name: ethernet 1/1/1 to 1/1/10 + description: interface-1 + speed: 100-full + enabled: true + +- name: enable poe. Set class. + icx_interface: + name: ethernet 1/1/1 + power: + by_class: 2 + +- name: configure poe limit of interface + icx_interface: + name: ethernet 1/1/1 + power: + limit: 10000 + +- name: disable poe of interface + icx_interface: + name: ethernet 1/1/1 + power: + enabled: false + +- name: set lag name for a range of lags + icx_interface: + name: lag 1 to 10 + description: test lags + +- name: Disable lag + icx_interface: + name: lag 1 + enabled: false + +- name: enable management interface + icx_interface: + name: management 1 + enabled: true + +- name: enable loopback interface + icx_interface: + name: loopback 10 + enabled: true + +- name: Add interface using aggregate + icx_interface: + aggregate: + - { name: ethernet 1/1/1, description: test-interface-1, power: { by_class: 2 } } + - { name: ethernet 1/1/3, description: test-interface-3} + speed: 10-full + enabled: true + +- name: Check tx_rate, rx_rate intent arguments + icx_interface: + name: ethernet 1/1/10 + state: up + tx_rate: ge(0) + rx_rate: le(0) + +- name: Check neighbors intent arguments + icx_interface: + name: ethernet 1/1/10 + neighbors: + - port: 1/1/5 + host: netdev +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface ethernet 1/1/1 + - port-name interface-1 + - state present + - speed-duplex 100-full + - inline power priority 1 +""" + +import re +from copy import deepcopy +from time import sleep +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import load_config, get_config +from ansible.module_utils.connection import Connection, ConnectionError, exec_command +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import conditional, remove_default_spec + + +def parse_enable(configobj, name): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'^disable', cfg, re.M) + if match: + return True + else: + return False + + +def parse_power_argument(configobj, name): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'(^inline power|^inline power(.*))+$', cfg, re.M) + if match: + return match.group(1) + + +def parse_config_argument(configobj, name, arg=None): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'%s (.+)$' % arg, cfg, re.M) + if match: + return match.group(1) + + +def parse_stp_arguments(module, item): + rc, out, err = exec_command(module, 'show interfaces ' + item) + match = re.search(r'STP configured to (\S+),', out, re.S) + if match: + return True if match.group(1) == "ON" else False + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'] == name: + return o + + return None + + +def validate_power(module, power): + count = 0 + for item in power: + if power.get(item) is not None: + count += 1 + if count > 1: + module.fail_json(msg='power parameters are mutually exclusive: class,limit,priority,enabled') + + +def add_command_to_interface(interface, cmd, commands): + if interface not in commands: + commands.append(interface) + commands.append(cmd) + + +def map_config_to_obj(module): + compare = module.params['check_running_config'] + config = get_config(module, None, compare) + configobj = NetworkConfig(indent=1, contents=config) + match = re.findall(r'^interface (.+)$', config, re.M) + + if not match: + return list() + + instances = list() + + for item in set(match): + obj = { + 'name': item, + 'port-name': parse_config_argument(configobj, item, 'port-name'), + 'speed-duplex': parse_config_argument(configobj, item, 'speed-duplex'), + 'stp': parse_stp_arguments(module, item), + 'disable': True if parse_enable(configobj, item) else False, + 'power': parse_power_argument(configobj, item), + 'state': 'present' + } + instances.append(obj) + return instances + + +def parse_poe_config(poe, power): + if poe.get('by_class') is not None: + power += 'power-by-class %s' % poe.get('by_class') + elif poe.get('limit') is not None: + power += 'power-limit %s' % poe.get('limit') + elif poe.get('priority') is not None: + power += 'priority %s' % poe.get('priority') + elif poe.get('enabled'): + power = 'inline power' + elif poe.get('enabled') is False: + power = 'no inline power' + return power + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + item['port-name'] = item.pop('description') + item['speed-duplex'] = item.pop('speed') + poe = item.get('power') + if poe: + + validate_power(module, poe) + power = 'inline power' + ' ' + power_arg = parse_poe_config(poe, power) + item.update({'power': power_arg}) + + d = item.copy() + + if d['enabled']: + d['disable'] = False + else: + d['disable'] = True + + obj.append(d) + + else: + params = { + 'name': module.params['name'], + 'port-name': module.params['description'], + 'speed-duplex': module.params['speed'], + 'stp': module.params['stp'], + 'delay': module.params['delay'], + 'state': module.params['state'], + 'tx_rate': module.params['tx_rate'], + 'rx_rate': module.params['rx_rate'], + 'neighbors': module.params['neighbors'] + } + poe = module.params.get('power') + if poe: + validate_power(module, poe) + power = 'inline power' + ' ' + power_arg = parse_poe_config(poe, power) + params.update({'power': power_arg}) + + if module.params['enabled']: + params.update({'disable': False}) + else: + params.update({'disable': True}) + + obj.append(params) + return obj + + +def map_obj_to_commands(updates): + commands = list() + want, have = updates + + args = ('speed-duplex', 'port-name', 'power', 'stp') + for w in want: + name = w['name'] + disable = w['disable'] + state = w['state'] + + obj_in_have = search_obj_in_list(name, have) + interface = 'interface ' + name + + if state == 'absent' and have == []: + commands.append('no ' + interface) + + elif state == 'absent' and obj_in_have: + commands.append('no ' + interface) + + elif state in ('present', 'up', 'down'): + if obj_in_have: + for item in args: + candidate = w.get(item) + running = obj_in_have.get(item) + if candidate == 'no inline power' and running is None: + candidate = None + if candidate != running: + if candidate: + if item == 'power': + cmd = str(candidate) + elif item == 'stp': + cmd = 'spanning-tree' if candidate else 'no spanning-tree' + else: + cmd = item + ' ' + str(candidate) + add_command_to_interface(interface, cmd, commands) + + if disable and not obj_in_have.get('disable', False): + add_command_to_interface(interface, 'disable', commands) + elif not disable and obj_in_have.get('disable', False): + add_command_to_interface(interface, 'enable', commands) + else: + commands.append(interface) + for item in args: + value = w.get(item) + if value: + if item == 'power': + commands.append(str(value)) + elif item == 'stp': + cmd = 'spanning-tree' if item else 'no spanning-tree' + else: + commands.append(item + ' ' + str(value)) + + if disable: + commands.append('disable') + if disable is False: + commands.append('enable') + + return commands + + +def check_declarative_intent_params(module, want, result): + failed_conditions = [] + have_neighbors_lldp = None + have_neighbors_cdp = None + for w in want: + want_state = w.get('state') + want_tx_rate = w.get('tx_rate') + want_rx_rate = w.get('rx_rate') + want_neighbors = w.get('neighbors') + + if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate and not want_neighbors: + continue + + if result['changed']: + sleep(w['delay']) + + command = 'show interfaces %s' % w['name'] + rc, out, err = exec_command(module, command) + + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc) + + if want_state in ('up', 'down'): + match = re.search(r'%s (\w+)' % 'line protocol is', out, re.M) + have_state = None + if match: + have_state = match.group(1) + if have_state is None or not conditional(want_state, have_state.strip()): + failed_conditions.append('state ' + 'eq(%s)' % want_state) + + if want_tx_rate: + match = re.search(r'%s (\d+)' % 'output rate:', out, re.M) + have_tx_rate = None + if match: + have_tx_rate = match.group(1) + + if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int): + failed_conditions.append('tx_rate ' + want_tx_rate) + + if want_rx_rate: + match = re.search(r'%s (\d+)' % 'input rate:', out, re.M) + have_rx_rate = None + if match: + have_rx_rate = match.group(1) + + if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int): + failed_conditions.append('rx_rate ' + want_rx_rate) + + if want_neighbors: + have_host = [] + have_port = [] + + if have_neighbors_lldp is None: + rc, have_neighbors_lldp, err = exec_command(module, 'show lldp neighbors detail') + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc) + if have_neighbors_lldp: + lines = have_neighbors_lldp.strip().split('Local port: ') + + for line in lines: + field = line.split('\n') + if field[0].strip() == w['name'].split(' ')[1]: + for item in field: + match = re.search(r'\s*\+\s+System name\s+:\s+"(.*)"', item, re.M) + if match: + have_host.append(match.group(1)) + + match = re.search(r'\s*\+\s+Port description\s+:\s+"(.*)"', item, re.M) + if match: + have_port.append(match.group(1)) + + for item in want_neighbors: + host = item.get('host') + port = item.get('port') + if host and host not in have_host: + failed_conditions.append('host ' + host) + if port and port not in have_port: + failed_conditions.append('port ' + port) + return failed_conditions + + +def main(): + """ main entry point for module execution + """ + power_spec = dict( + by_class=dict(choices=['0', '1', '2', '3', '4']), + limit=dict(type='str'), + priority=dict(choices=['1', '2', '3']), + enabled=dict(type='bool') + ) + neighbors_spec = dict( + host=dict(), + port=dict() + ) + element_spec = dict( + name=dict(), + description=dict(), + enabled=dict(default=True, type='bool'), + speed=dict(type='str', choices=['10-full', '10-half', '100-full', '100-half', '1000-full', '1000-full-master', + '1000-full-slave', '10g-full', '10g-full-master', '10g-full-slave', '2500-full', '2500-full-master', + '2500-full-slave', '5g-full', '5g-full-master', '5g-full-slave', 'auto']), + stp=dict(type='bool'), + tx_rate=dict(), + rx_rate=dict(), + neighbors=dict(type='list', elements='dict', options=neighbors_spec), + delay=dict(default=10, type='int'), + state=dict(default='present', + choices=['present', 'absent', 'up', 'down']), + power=dict(type='dict', options=power_spec), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + argument_spec.update(element_spec) + + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + warnings = list() + result = {} + result['changed'] = False + if warnings: + result['warnings'] = warnings + exec_command(module, 'skip') + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands((want, have)) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + failed_conditions = check_declarative_intent_params(module, want, result) + + if failed_conditions: + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_l3_interface.py b/plugins/modules/network/icx/icx_l3_interface.py new file mode 100644 index 0000000000..cd382c3610 --- /dev/null +++ b/plugins/modules/network/icx/icx_l3_interface.py @@ -0,0 +1,438 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icx_l3_interface +author: "Ruckus Wireless (@Commscope)" +short_description: Manage Layer-3 interfaces on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of Layer-3 interfaces + on ICX network devices. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + name: + description: + - Name of the Layer-3 interface to be configured eg. GigabitEthernet0/2, ve 10, ethernet 1/1/1 + type: str + ipv4: + description: + - IPv4 address to be set for the Layer-3 interface mentioned in I(name) option. + The address format is /, the mask is number + in range 0-32 eg. 192.168.0.1/24 + type: str + ipv6: + description: + - IPv6 address to be set for the Layer-3 interface mentioned in I(name) option. + The address format is /, the mask is number + in range 0-128 eg. fd5d:12c9:2201:1::1/64. + type: str + mode: + description: + - Specifies if ipv4 address should be dynamic/advertise to ospf/not advertise to ospf. + This should be specified only if ipv4 address is configured and if it is not secondary IP address. + choices: ['dynamic', 'ospf-ignore', 'ospf-passive'] + type: str + replace: + description: + - Replaces the configured primary IP address on the interface. + choices: ['yes', 'no'] + type: str + secondary: + description: + - Specifies that the configured address is a secondary IP address. + If this keyword is omitted, the configured address is the primary IP address. + choices: ['yes', 'no'] + type: str + aggregate: + description: + - List of Layer-3 interfaces definitions. Each of the entry in aggregate list should + define name of interface C(name) and a optional C(ipv4) or C(ipv6) address. + type: list + suboptions: + name: + description: + - Name of the Layer-3 interface to be configured eg. GigabitEthernet0/2, ve 10, ethernet 1/1/1 + type: str + ipv4: + description: + - IPv4 address to be set for the Layer-3 interface mentioned in I(name) option. + The address format is /, the mask is number + in range 0-32 eg. 192.168.0.1/24 + type: str + ipv6: + description: + - IPv6 address to be set for the Layer-3 interface mentioned in I(name) option. + The address format is /, the mask is number + in range 0-128 eg. fd5d:12c9:2201:1::1/64. + type: str + mode: + description: + - Specifies if ipv4 address should be dynamic/advertise to ospf/not advertise to ospf. + This should be specified only if ipv4 address is configured and if it is not secondary IP address. + choices: ['dynamic', 'ospf-ignore', 'ospf-passive'] + type: str + replace: + description: + - Replaces the configured primary IP address on the interface. + choices: ['yes', 'no'] + type: str + secondary: + description: + - Specifies that the configured address is a secondary IP address. + If this keyword is omitted, the configured address is the primary IP address. + choices: ['yes', 'no'] + type: str + state: + description: + - State of the Layer-3 interface configuration. It indicates if the configuration should + be present or absent on remote device. + choices: ['present', 'absent'] + type: str + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + state: + description: + - State of the Layer-3 interface configuration. It indicates if the configuration should + be present or absent on remote device. + default: present + choices: ['present', 'absent'] + type: str + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes +''' + +EXAMPLES = """ +- name: Remove ethernet 1/1/1 IPv4 and IPv6 address + icx_l3_interface: + name: ethernet 1/1/1 + ipv4: 192.168.0.1/24 + ipv6: "fd5d:12c9:2201:1::1/64" + state: absent + +- name: Replace ethernet 1/1/1 primary IPv4 address + icx_l3_interface: + name: ethernet 1/1/1 + ipv4: 192.168.0.1/24 + replace: yes + state: absent + +- name: Replace ethernet 1/1/1 dynamic IPv4 address + icx_l3_interface: + name: ethernet 1/1/1 + ipv4: 192.168.0.1/24 + mode: dynamic + state: absent + +- name: Set ethernet 1/1/1 secondary IPv4 address + icx_l3_interface: + name: ethernet 1/1/1 + ipv4: 192.168.0.1/24 + secondary: yes + state: absent + +- name: Set ethernet 1/1/1 IPv4 address + icx_l3_interface: + name: ethernet 1/1/1 + ipv4: 192.168.0.1/24 + +- name: Set ethernet 1/1/1 IPv6 address + icx_l3_interface: + name: ethernet 1/1/1 + ipv6: "fd5d:12c9:2201:1::1/64" + +- name: Set IP addresses on aggregate + icx_l3_interface: + aggregate: + - { name: GigabitEthernet0/3, ipv4: 192.168.2.10/24 } + - { name: GigabitEthernet0/3, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" } + +- name: Remove IP addresses on aggregate + icx_l3_interface: + aggregate: + - { name: GigabitEthernet0/3, ipv4: 192.168.2.10/24 } + - { name: GigabitEthernet0/3, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" } + state: absent + + +- name: Set the ipv4 and ipv6 of a virtual ethernet(ve) + icx_l3_interface: + name: ve 100 + ipv4: 192.168.0.1 + ipv6: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - interface ethernet 1/1/1 + - ip address 192.168.0.1 255.255.255.0 + - ipv6 address fd5d:12c9:2201:1::1/64 +""" + + +import re +from copy import deepcopy +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import exec_command +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import get_config, load_config +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import is_netmask, is_masklen, to_netmask, to_masklen + + +def validate_ipv4(value, module): + if value: + address = value.split('/') + if len(address) != 2: + module.fail_json(msg='address format is /, got invalid format %s' % value) + else: + if not is_masklen(address[1]): + module.fail_json(msg='invalid value for mask: %s, mask should be in range 0-32' % address[1]) + + +def validate_ipv6(value, module): + if value: + address = value.split('/') + if len(address) != 2: + module.fail_json(msg='address format is /, got invalid format %s' % value) + else: + if not 0 <= int(address[1]) <= 128: + module.fail_json(msg='invalid value for mask: %s, mask should be in range 0-128' % address[1]) + + +def validate_param_values(module, obj, param=None): + if param is None: + param = module.params + for key in obj: + validator = globals().get('validate_%s' % key) + if callable(validator): + validator(param.get(key), module) + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + validate_param_values(module, item, item) + obj.append(item.copy()) + else: + obj.append({ + 'name': module.params['name'], + 'ipv4': module.params['ipv4'], + 'ipv6': module.params['ipv6'], + 'state': module.params['state'], + 'replace': module.params['replace'], + 'mode': module.params['mode'], + 'secondary': module.params['secondary'], + }) + + validate_param_values(module, obj) + + return obj + + +def parse_config_argument(configobj, name, arg=None): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + + values = [] + matches = re.finditer(r'%s (.+)$' % arg, cfg, re.M) + for match in matches: + match_str = match.group(1).strip() + if arg == 'ipv6 address': + values.append(match_str) + else: + values = match_str + break + + return values or None + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'] == name: + return o + + return None + + +def map_config_to_obj(module): + compare = module.params['check_running_config'] + config = get_config(module, flags=['| begin interface'], compare=compare) + configobj = NetworkConfig(indent=1, contents=config) + + match = re.findall(r'^interface (\S+ \S+)', config, re.M) + if not match: + return list() + + instances = list() + + for item in set(match): + ipv4 = parse_config_argument(configobj, item, 'ip address') + if ipv4: + address = ipv4.strip().split(' ') + if len(address) == 2 and is_netmask(address[1]): + ipv4 = '{0}/{1}'.format(address[0], to_text(to_masklen(address[1]))) + obj = { + 'name': item, + 'ipv4': ipv4, + 'ipv6': parse_config_argument(configobj, item, 'ipv6 address'), + 'state': 'present' + } + instances.append(obj) + + return instances + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + for w in want: + name = w['name'] + ipv4 = w['ipv4'] + ipv6 = w['ipv6'] + state = w['state'] + if 'replace' in w: + replace = w['replace'] == 'yes' + else: + replace = False + if w['mode'] is not None: + mode = ' ' + w['mode'] + else: + mode = '' + if w['secondary'] is not None: + secondary = w['secondary'] == 'yes' + else: + secondary = False + + interface = 'interface ' + name + commands.append(interface) + + obj_in_have = search_obj_in_list(name, have) + if state == 'absent' and have == []: + if ipv4: + address = ipv4.split('/') + if len(address) == 2: + ipv4 = '{addr} {mask}'.format(addr=address[0], mask=to_netmask(address[1])) + commands.append('no ip address {ip}'.format(ip=ipv4)) + if ipv6: + commands.append('no ipv6 address {ip}'.format(ip=ipv6)) + + elif state == 'absent' and obj_in_have: + if obj_in_have['ipv4']: + if ipv4: + address = ipv4.split('/') + if len(address) == 2: + ipv4 = '{addr} {mask}'.format(addr=address[0], mask=to_netmask(address[1])) + commands.append('no ip address {ip}'.format(ip=ipv4)) + if obj_in_have['ipv6']: + if ipv6: + commands.append('no ipv6 address {ip}'.format(ip=ipv6)) + + elif state == 'present': + if ipv4: + if obj_in_have is None or obj_in_have.get('ipv4') is None or ipv4 != obj_in_have['ipv4']: + address = ipv4.split('/') + if len(address) == 2: + ipv4 = '{0} {1}'.format(address[0], to_netmask(address[1])) + commands.append('ip address %s%s%s%s' % (format(ipv4), mode, ' replace' if (replace) else '', ' secondary' if (secondary) else '')) + + if ipv6: + if obj_in_have is None or obj_in_have.get('ipv6') is None or ipv6.lower() not in [addr.lower() for addr in obj_in_have['ipv6']]: + commands.append('ipv6 address {ip}'.format(ip=ipv6)) + + if commands[-1] == interface: + commands.pop(-1) + else: + commands.append("exit") + + return commands + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + name=dict(), + ipv4=dict(), + ipv6=dict(), + replace=dict(choices=['yes', 'no']), + mode=dict(choices=['dynamic', 'ospf-ignore', 'ospf-passive']), + secondary=dict(choices=['yes', 'no']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])), + state=dict(default='present', + choices=['present', 'absent']), + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec) + ) + + argument_spec.update(element_spec) + + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate'], ['secondary', 'replace'], ['secondary', 'mode']] + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + + result = {'changed': False} + exec_command(module, 'skip') + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands((want, have), module) + + if commands: + if not module.check_mode: + resp = load_config(module, commands) + warnings.extend((out for out in resp if out)) + + result['changed'] = True + + if warnings: + result['warnings'] = warnings + + result['commands'] = commands + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_linkagg.py b/plugins/modules/network/icx/icx_linkagg.py new file mode 100644 index 0000000000..82d2b952be --- /dev/null +++ b/plugins/modules/network/icx/icx_linkagg.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_linkagg +author: "Ruckus Wireless (@Commscope)" +short_description: Manage link aggregation groups on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of link aggregation groups + on Ruckus ICX network devices. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + group: + description: + - Channel-group number for the port-channel + Link aggregation group. Range 1-255 or set to 'auto' to auto-generates a LAG ID + type: int + name: + description: + - Name of the LAG + type: str + mode: + description: + - Mode of the link aggregation group. + type: str + choices: ['dynamic', 'static'] + members: + description: + - List of port members or ranges of the link aggregation group. + type: list + state: + description: + - State of the link aggregation group. + type: str + default: present + choices: ['present', 'absent'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes + aggregate: + description: + - List of link aggregation definitions. + type: list + suboptions: + group: + description: + - Channel-group number for the port-channel + Link aggregation group. Range 1-255 or set to 'auto' to auto-generates a LAG ID + type: int + name: + description: + - Name of the LAG + type: str + mode: + description: + - Mode of the link aggregation group. + type: str + choices: ['dynamic', 'static'] + members: + description: + - List of port members or ranges of the link aggregation group. + type: list + state: + description: + - State of the link aggregation group. + type: str + choices: ['present', 'absent'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + purge: + description: + - Purge links not defined in the I(aggregate) parameter. + type: bool + default: no + +''' + +EXAMPLES = """ +- name: create static link aggregation group + icx_linkagg: + group: 10 + mode: static + name: LAG1 + +- name: create link aggregation group with auto id + icx_linkagg: + group: auto + mode: dynamic + name: LAG2 + +- name: delete link aggregation group + icx_linkagg: + group: 10 + state: absent + +- name: Set members to LAG + icx_linkagg: + group: 200 + mode: static + members: + - ethernet 1/1/1 to 1/1/6 + - ethernet 1/1/10 + +- name: Remove links other then LAG id 100 and 3 using purge + icx_linkagg: + aggregate: + - { group: 3} + - { group: 100} + purge: true +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - lag LAG1 dynamic id 11 + - ports ethernet 1/1/1 to 1/1/6 + - no ports ethernet 1/1/10 + - no lag LAG1 dynamic id 12 +""" + + +import re +from copy import deepcopy + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import ConnectionError, exec_command +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import run_commands, get_config, load_config +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import CustomNetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + + +def range_to_members(ranges, prefix=""): + match = re.findall(r'(ethe[a-z]* [0-9]/[0-9]/[0-9]+)( to [0-9]/[0-9]/[0-9]+)?', ranges) + members = list() + for m in match: + start, end = m + if(end == ''): + start = start.replace("ethe ", "ethernet ") + members.append("%s%s" % (prefix, start)) + else: + start_tmp = re.search(r'[0-9]/[0-9]/([0-9]+)', start) + end_tmp = re.search(r'[0-9]/[0-9]/([0-9]+)', end) + start = int(start_tmp.group(1)) + end = int(end_tmp.group(1)) + 1 + for num in range(start, end): + members.append("%sethernet 1/1/%s" % (prefix, num)) + return members + + +def map_config_to_obj(module): + objs = dict() + compare = module.params['check_running_config'] + config = get_config(module, None, compare=compare) + obj = None + for line in config.split('\n'): + l = line.strip() + match1 = re.search(r'lag (\S+) (\S+) id (\S+)', l, re.M) + if match1: + obj = dict() + obj['name'] = match1.group(1) + obj['mode'] = match1.group(2) + obj['group'] = match1.group(3) + obj['state'] = 'present' + obj['members'] = list() + else: + match2 = re.search(r'ports .*', l, re.M) + if match2 and obj is not None: + obj['members'].extend(range_to_members(match2.group(0))) + elif obj is not None: + objs[obj['group']] = obj + obj = None + return objs + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + d = item.copy() + d['group'] = str(d['group']) + obj.append(d) + else: + obj.append({ + 'group': str(module.params['group']), + 'mode': module.params['mode'], + 'members': module.params['members'], + 'state': module.params['state'], + 'name': module.params['name'] + }) + + return obj + + +def search_obj_in_list(group, lst): + for o in lst: + if o['group'] == group: + return o + return None + + +def is_member(member, lst): + for li in lst: + ml = range_to_members(li) + if member in ml: + return True + return False + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + purge = module.params['purge'] + + for w in want: + if have == {} and w['state'] == 'absent': + commands.append("%slag %s %s id %s" % ('no ' if w['state'] == 'absent' else '', w['name'], w['mode'], w['group'])) + elif have.get(w['group']) is None: + commands.append("%slag %s %s id %s" % ('no ' if w['state'] == 'absent' else '', w['name'], w['mode'], w['group'])) + if(w.get('members') is not None and w['state'] == 'present'): + for m in w['members']: + commands.append("ports %s" % (m)) + if w['state'] == 'present': + commands.append("exit") + else: + commands.append("%slag %s %s id %s" % ('no ' if w['state'] == 'absent' else '', w['name'], w['mode'], w['group'])) + if(w.get('members') is not None and w['state'] == 'present'): + for m in have[w['group']]['members']: + if not is_member(m, w['members']): + commands.append("no ports %s" % (m)) + for m in w['members']: + sm = range_to_members(ranges=m) + for smm in sm: + if smm not in have[w['group']]['members']: + commands.append("ports %s" % (smm)) + + if w['state'] == 'present': + commands.append("exit") + if purge: + for h in have: + if search_obj_in_list(have[h]['group'], want) is None: + commands.append("no lag %s %s id %s" % (have[h]['name'], have[h]['mode'], have[h]['group'])) + return commands + + +def main(): + element_spec = dict( + group=dict(type='int'), + name=dict(type='str'), + mode=dict(choices=['dynamic', 'static']), + members=dict(type='list'), + state=dict(default='present', + choices=['present', 'absent']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['group'] = dict(required=True, type='int') + + required_one_of = [['group', 'aggregate']] + required_together = [['name', 'group']] + mutually_exclusive = [['group', 'aggregate']] + + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec, required_together=required_together), + purge=dict(default=False, type='bool') + ) + + argument_spec.update(element_spec) + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + required_together=required_together, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + result = {'changed': False} + exec_command(module, 'skip') + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands((want, have), module) + + result["commands"] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_lldp.py b/plugins/modules/network/icx/icx_lldp.py new file mode 100644 index 0000000000..955ebc4553 --- /dev/null +++ b/plugins/modules/network/icx/icx_lldp.py @@ -0,0 +1,183 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_lldp +author: "Ruckus Wireless (@Commscope)" +short_description: Manage LLDP configuration on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of LLDP service on ICX network devices. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + interfaces: + description: + - specify interfaces + suboptions: + name: + description: + - List of ethernet ports to enable lldp. To add a range of ports use 'to' keyword. See the example. + type: list + state: + description: + - State of lldp configuration for interfaces + type: str + choices: ['present', 'absent', 'enabled', 'disabled'] + type: list + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes + state: + description: + - Enables the receipt and transmission of Link Layer Discovery Protocol (LLDP) globally. + type: str + choices: ['present', 'absent', 'enabled', 'disabled'] +''' + +EXAMPLES = """ +- name: Disable LLDP + icx_lldp: + state: absent + +- name: Enable LLDP + icx_lldp: + state: present + +- name: Disable LLDP on ports 1/1/1 - 1/1/10, 1/1/20 + icx_lldp: + interfaces: + - name: + - ethernet 1/1/1 to 1/1/10 + - ethernet 1/1/20 + state: absent + state: present + +- name: Enable LLDP on ports 1/1/5 - 1/1/10 + icx_lldp: + interfaces: + - name: + - ethernet 1/1/1 to 1/1/10 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - lldp run + - no lldp run +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import load_config, run_commands + + +def has_lldp(module): + run_commands(module, ['skip']) + output = run_commands(module, ['show lldp']) + is_lldp_enable = False + if len(output) > 0 and "LLDP is not running" not in output[0]: + is_lldp_enable = True + + return is_lldp_enable + + +def map_obj_to_commands(module, commands): + interfaces = module.params.get('interfaces') + for item in interfaces: + state = item.get('state') + if state == 'present': + for port in item.get('name'): + if 'all' in port: + module.fail_json(msg='cannot enable on all the ports') + else: + commands.append('lldp enable ports {0}'.format(str(port))) + elif state == 'absent': + for port in item.get('name'): + if 'all' in port: + module.fail_json(msg='cannot enable on all the ports') + else: + commands.append('no lldp enable ports {0}'.format(str(port))) + + +def main(): + """ main entry point for module execution + """ + interfaces_spec = dict( + name=dict(type='list'), + state=dict(choices=['present', 'absent', + 'enabled', 'disabled']) + ) + + argument_spec = dict( + interfaces=dict(type='list', elements='dict', options=interfaces_spec), + state=dict(choices=['present', 'absent', + 'enabled', 'disabled']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + warnings = list() + + result = {'changed': False} + + if warnings: + result['warnings'] = warnings + + if module.params['check_running_config'] is False: + HAS_LLDP = None + else: + HAS_LLDP = has_lldp(module) + + commands = [] + state = module.params['state'] + + if state is None: + if HAS_LLDP: + map_obj_to_commands(module, commands) + else: + module.fail_json(msg='LLDP is not running') + else: + if state == 'absent' and HAS_LLDP is None: + commands.append('no lldp run') + + if state == 'absent' and HAS_LLDP: + commands.append('no lldp run') + + elif state == 'present': + if not HAS_LLDP: + commands.append('lldp run') + if module.params.get('interfaces'): + map_obj_to_commands(module, commands) + + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_logging.py b/plugins/modules/network/icx/icx_logging.py new file mode 100644 index 0000000000..847aef6711 --- /dev/null +++ b/plugins/modules/network/icx/icx_logging.py @@ -0,0 +1,581 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_logging +author: "Ruckus Wireless (@Commscope)" +short_description: Manage logging on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of logging + on Ruckus ICX 7000 series switches. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + dest: + description: + - Destination of the logs. + choices: ['on', 'host', 'console', 'buffered', 'persistence', 'rfc5424'] + type: str + name: + description: + - ipv4 address/ipv6 address/name of syslog server. + type: str + udp_port: + description: + - UDP port of destination host(syslog server). + type: str + facility: + description: + - Specifies log facility to log messages from the device. + choices: ['auth','cron','daemon','kern','local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7', 'user', + 'lpr','mail','news','syslog','sys9','sys10','sys11','sys12','sys13','sys14','user','uucp'] + type: str + level: + description: + - Specifies the message level. + type: list + choices: ['alerts', 'critical', 'debugging', 'emergencies', 'errors', 'informational', + 'notifications', 'warnings'] + aggregate: + description: + - List of logging definitions. + type: list + suboptions: + dest: + description: + - Destination of the logs. + choices: ['on', 'host', 'console', 'buffered', 'persistence', 'rfc5424'] + type: str + name: + description: + - ipv4 address/ipv6 address/name of syslog server. + type: str + udp_port: + description: + - UDP port of destination host(syslog server). + type: str + facility: + description: + - Specifies log facility to log messages from the device. + choices: ['auth','cron','daemon','kern','local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7', 'user', + 'lpr','mail','news','syslog','sys9','sys10','sys11','sys12','sys13','sys14','user','uucp'] + type: str + level: + description: + - Specifies the message level. + type: list + choices: ['alerts', 'critical', 'debugging', 'emergencies', 'errors', 'informational', + 'notifications', 'warnings'] + state: + description: + - State of the logging configuration. + choices: ['present', 'absent'] + type: str + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + state: + description: + - State of the logging configuration. + default: present + choices: ['present', 'absent'] + type: str + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes +''' + +EXAMPLES = """ +- name: Configure host logging. + icx_logging: + dest: host + name: 172.16.0.1 + udp_port: 5555 +- name: Remove host logging configuration. + icx_logging: + dest: host + name: 172.16.0.1 + udp_port: 5555 + state: absent +- name: Disables the real-time display of syslog messages. + icx_logging: + dest: console + state: absent +- name: Enables local syslog logging. + icx_logging: + dest : on + state: present +- name: configure buffer level. + icx_logging: + dest: buffered + level: critical +- name: Configure logging using aggregate + icx_logging: + aggregate: + - { dest: buffered, level: ['notifications','errors'] } +- name: remove logging using aggregate + icx_logging: + aggregate: + - { dest: console } + - { dest: host, name: 172.16.0.1, udp_port: 5555 } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - logging host 172.16.0.1 + - logging console +""" + +import re +from copy import deepcopy +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import Connection, ConnectionError, exec_command +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec, validate_ip_v6_address +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import get_config, load_config + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'] == name: + return o + + +def diff_in_list(want, have): + adds = set() + removes = set() + for w in want: + if w['dest'] == 'buffered': + for h in have: + if h['dest'] == 'buffered': + adds = w['level'] - h['level'] + removes = h['level'] - w['level'] + return adds, removes + return adds, removes + + +def map_obj_to_commands(updates): + dest_group = ('host', 'console', 'persistence', 'enable') + commands = list() + want, have = updates + + for w in want: + dest = w['dest'] + name = w['name'] + level = w['level'] + state = w['state'] + udp_port = w['udp_port'] + facility = w['facility'] + del w['state'] + del w['facility'] + + facility_name = '' + facility_level = '' + if name is not None and validate_ip_v6_address(name): + name = 'ipv6 ' + name + + if facility: + for item in have: + if item['dest'] == 'facility': + facility_name = item['dest'] + facility_level = item['facility'] + + if state == 'absent': + if have == []: + if facility: + commands.append('no logging facility') + + if dest == 'buffered': + for item in have: + if item['dest'] == 'buffered': + want_level = level + have_level = item['level'] + for item in want_level: + commands.append('no logging buffered {0}'.format(item)) + + if dest == 'host': + if name and udp_port: + commands.append('no logging host {0} udp-port {1}'.format(name, udp_port)) + elif name: + commands.append('no logging host {0}'.format(name)) + else: + if dest == 'rfc5424': + commands.append('no logging enable {0}'.format(dest)) + else: + if dest != 'buffered': + commands.append('no logging {0}'.format(dest)) + + if facility: + if facility_name == 'facility' and facility_level != 'user': + commands.append('no logging facility') + + if dest == 'buffered': + for item in have: + if item['dest'] == 'buffered': + want_level = level + have_level = item['level'] + for item in want_level: + if item in have_level: + commands.append('no logging buffered {0}'.format(item)) + + if w in have: + if dest == 'host': + if name and udp_port: + commands.append('no logging host {0} udp-port {1}'.format(name, udp_port)) + elif name: + commands.append('no logging host {0}'.format(name)) + else: + if dest == 'rfc5424': + commands.append('no logging enable {0}'.format(dest)) + else: + if dest != 'buffered': + commands.append('no logging {0}'.format(dest)) + + if state == 'present': + if facility: + if facility != facility_level: + commands.append('logging facility {0}'.format(facility)) + if w not in have: + if dest == 'host': + if name and udp_port: + commands.append('logging host {0} udp-port {1}'.format(name, udp_port)) + elif name: + commands.append('logging host {0}'.format(name)) + elif dest == 'buffered': + adds, removes = diff_in_list(want, have) + for item in adds: + commands.append('logging buffered {0}'.format(item)) + for item in removes: + commands.append('no logging buffered {0}'.format(item)) + elif dest == 'rfc5424': + commands.append('logging enable {0}'.format(dest)) + else: + commands.append('logging {0}'.format(dest)) + + return commands + + +def parse_port(line, dest): + port = None + if dest == 'host': + match = re.search(r'logging host \S+\s+udp-port\s+(\d+)', line, re.M) + if match: + port = match.group(1) + else: + match_port = re.search(r'logging host ipv6 \S+\s+udp-port\s+(\d+)', line, re.M) + if match_port: + port = match_port.group(1) + return port + + +def parse_name(line, dest): + name = None + if dest == 'host': + match = re.search(r'logging host (\S+)', line, re.M) + if match: + if match.group(1) == 'ipv6': + ipv6_add = re.search(r'logging host ipv6 (\S+)', line, re.M) + name = ipv6_add.group(1) + else: + name = match.group(1) + + return name + + +def parse_address(line, dest): + if dest == 'host': + match = re.search(r'^logging host ipv6 (\S+)', line.strip(), re.M) + if match: + return True + return False + + +def map_config_to_obj(module): + obj = [] + facility = '' + addr6 = False + dest_group = ('host', 'console', 'buffered', 'persistence', 'enable') + dest_level = ('alerts', 'critical', 'debugging', 'emergencies', 'errors', 'informational', 'notifications', 'warnings') + buff_level = list() + if module.params['check_running_config'] is False: + return [] + data = get_config(module, flags=['| include logging']) + facility_match = re.search(r'^logging facility (\S+)', data, re.M) + if facility_match: + facility = facility_match.group(1) + obj.append({ + 'dest': 'facility', + 'facility': facility + }) + else: + obj.append({ + 'dest': 'facility', + 'facility': 'user' + }) + for line in data.split('\n'): + match = re.search(r'^logging (\S+)', line.strip(), re.M) + if match: + + if match.group(1) in dest_group: + dest = match.group(1) + if dest == 'host': + obj.append({ + 'dest': dest, + 'name': parse_name(line.strip(), dest), + 'udp_port': parse_port(line, dest), + 'level': None, + 'addr6': parse_address(line, dest) + + }) + elif dest == 'buffered': + obj.append({ + 'dest': dest, + 'level': None, + 'name': None, + 'udp_port': None, + 'addr6': False + }) + else: + if dest == 'enable': + dest = 'rfc5424' + obj.append({ + 'dest': dest, + 'level': None, + 'name': None, + 'udp_port': None, + 'addr6': False + }) + else: + + ip_match = re.search(r'^no logging buffered (\S+)', line, re.M) + if ip_match: + dest = 'buffered' + buff_level.append(ip_match.group(1)) + if 'no logging on' not in data: + obj.append({ + 'dest': 'on', + 'level': None, + 'name': None, + 'udp_port': None, + 'addr6': False + + }) + levels = set() + for items in dest_level: + if items not in buff_level: + levels.add(items) + obj.append({ + 'dest': 'buffered', + 'level': levels, + 'name': None, + 'udp_port': None, + 'addr6': False + + }) + return obj + + +def count_terms(check, param=None): + count = 0 + for term in check: + if param[term] is not None: + count += 1 + return count + + +def check_required_if(module, spec, param): + for sp in spec: + missing = [] + max_missing_count = 0 + is_one_of = False + if len(sp) == 4: + key, val, requirements, is_one_of = sp + else: + key, val, requirements = sp + + if is_one_of: + max_missing_count = len(requirements) + term = 'any' + else: + term = 'all' + + if key in param and param[key] == val: + for check in requirements: + count = count_terms((check,), param) + if count == 0: + missing.append(check) + if len(missing) and len(missing) >= max_missing_count: + msg = "%s is %s but %s of the following are missing: %s" % (key, val, term, ', '.join(missing)) + module.fail_json(msg=msg) + + +def map_params_to_obj(module, required_if=None): + obj = [] + addr6 = False + aggregate = module.params.get('aggregate') + + if aggregate: + for item in aggregate: + if item['name'] is not None and validate_ip_v6_address(item['name']): + addr6 = True + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + check_required_if(module, required_if, item) + item.update({'addr6': addr6}) + + d = item.copy() + d['level'] = set(d['level']) if d['level'] is not None else None + if d['dest'] != 'host': + d['name'] = None + d['udp_port'] = None + + if d['dest'] != 'buffered': + d['level'] = None + del d['check_running_config'] + obj.append(d) + + else: + if module.params['name'] is not None and validate_ip_v6_address(module.params['name']): + addr6 = True + if module.params['dest'] != 'host': + module.params['name'] = None + module.params['udp_port'] = None + + if module.params['dest'] != 'buffered': + module.params['level'] = None + + obj.append({ + 'dest': module.params['dest'], + 'name': module.params['name'], + 'udp_port': module.params['udp_port'], + 'level': set(module.params['level']) if module.params['level'] else None, + 'facility': module.params['facility'], + 'state': module.params['state'], + 'addr6': addr6 + }) + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + dest=dict( + type='str', + choices=[ + 'on', + 'host', + 'console', + 'buffered', + 'persistence', + 'rfc5424']), + name=dict( + type='str'), + udp_port=dict(), + level=dict( + type='list', + choices=[ + 'alerts', + 'critical', + 'debugging', + 'emergencies', + 'errors', + 'informational', + 'notifications', + 'warnings']), + facility=dict( + type='str', + choices=[ + 'auth', + 'cron', + 'daemon', + 'kern', + 'local0', + 'local1', + 'local2', + 'local3', + 'local4', + 'local5', + 'local6', + 'local7', + 'user', + 'lpr', + 'mail', + 'news', + 'syslog', + 'sys9', + 'sys10', + 'sys11', + 'sys12', + 'sys13', + 'sys14', + 'user', + 'uucp']), + state=dict( + default='present', + choices=[ + 'present', + 'absent']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG']))) + + aggregate_spec = deepcopy(element_spec) + + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + required_if = [('dest', 'host', ['name']), + ('dest', 'buffered', ['level'])] + module = AnsibleModule(argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + warnings = list() + + exec_command(module, 'skip') + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module, required_if=required_if) + have = map_config_to_obj(module) + result['want'] = want + result['have'] = have + + commands = map_obj_to_commands((want, have)) + result['commands'] = commands + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_ping.py b/plugins/modules/network/icx/icx_ping.py new file mode 100644 index 0000000000..828570e6f6 --- /dev/null +++ b/plugins/modules/network/icx/icx_ping.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: icx_ping +author: "Ruckus Wireless (@Commscope)" +short_description: Tests reachability using ping from Ruckus ICX 7000 series switches +description: + - Tests reachability using ping from switch to a remote destination. +notes: + - Tested against ICX 10.1 +options: + count: + description: + - Number of packets to send. Default is 1. + type: int + dest: + description: + - ip-addr | host-name | vrf vrf-name | ipv6 [ ipv6-addr | host-name | vrf vrf-name] (resolvable by switch) of the remote node. + required: true + type: str + timeout: + description: + - Specifies the time, in milliseconds for which the device waits for a reply from the pinged device. + The value can range from 1 to 4294967296. The default is 5000 (5 seconds). + type: int + ttl: + description: + - Specifies the time to live as a maximum number of hops. The value can range from 1 to 255. The default is 64. + type: int + size: + description: + - Specifies the size of the ICMP data portion of the packet, in bytes. This is the payload and does not include the header. + The value can range from 0 to 10000. The default is 16.. + type: int + source: + description: + - IP address to be used as the origin of the ping packets. + type: str + vrf: + description: + - Specifies the Virtual Routing and Forwarding (VRF) instance of the device to be pinged. + type: str + state: + description: + - Determines if the expected result is success or fail. + type: str + choices: [ absent, present ] + default: present +''' + +EXAMPLES = r''' +- name: Test reachability to 10.10.10.10 + icx_ping: + dest: 10.10.10.10 + +- name: Test reachability to ipv6 address from source with timeout + icx_ping: + dest: ipv6 2001:cdba:0000:0000:0000:0000:3257:9652 + source: 10.1.1.1 + timeout: 100000 + +- name: Test reachability to 10.1.1.1 through vrf using 5 packets + icx_ping: + dest: 10.1.1.1 + vrf: x.x.x.x + count: 5 + +- name: Test unreachability to 10.30.30.30 + icx_ping: + dest: 10.40.40.40 + state: absent + +- name: Test reachability to ipv4 with ttl and packet size + icx_ping: + dest: 10.10.10.10 + ttl: 20 + size: 500 +''' + +RETURN = ''' +commands: + description: Show the command sent. + returned: always + type: list + sample: ["ping 10.40.40.40 count 20 source loopback0", "ping 10.40.40.40"] +packet_loss: + description: Percentage of packets lost. + returned: always + type: str + sample: "0%" +packets_rx: + description: Packets successfully received. + returned: always + type: int + sample: 20 +packets_tx: + description: Packets successfully transmitted. + returned: always + type: int + sample: 20 +rtt: + description: Show RTT stats. + returned: always + type: dict + sample: {"avg": 2, "max": 8, "min": 1} +''' + +from ansible.module_utils._text import to_text +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection, ConnectionError +import re + + +def build_ping(dest, count=None, source=None, timeout=None, ttl=None, size=None, vrf=None): + """ + Function to build the command to send to the terminal for the switch + to execute. All args come from the module's unique params. + """ + + if vrf is not None: + cmd = "ping vrf {0} {1}".format(vrf, dest) + else: + cmd = "ping {0}".format(dest) + + if count is not None: + cmd += " count {0}".format(str(count)) + + if timeout is not None: + cmd += " timeout {0}".format(str(timeout)) + + if ttl is not None: + cmd += " ttl {0}".format(str(ttl)) + + if size is not None: + cmd += " size {0}".format(str(size)) + + if source is not None: + cmd += " source {0}".format(source) + + return cmd + + +def parse_ping(ping_stats): + """ + Function used to parse the statistical information from the ping response. + Example: "Success rate is 100 percent (5/5), round-trip min/avg/max=40/51/55 ms." + Returns the percent of packet loss, received packets, transmitted packets, and RTT dict. + """ + if ping_stats.startswith('Success'): + rate_re = re.compile(r"^\w+\s+\w+\s+\w+\s+(?P\d+)\s+\w+\s+\((?P\d+)/(?P\d+)\)") + rtt_re = re.compile(r".*,\s+\S+\s+\S+=(?P\d+)/(?P\d+)/(?P\d+)\s+\w+\.+\s*$|.*\s*$") + + rate = rate_re.match(ping_stats) + rtt = rtt_re.match(ping_stats) + return rate.group("pct"), rate.group("rx"), rate.group("tx"), rtt.groupdict() + else: + rate_re = re.compile(r"^Sending+\s+(?P\d+),") + rate = rate_re.match(ping_stats) + rtt = {'avg': 0, 'max': 0, 'min': 0} + return 0, 0, rate.group('tx'), rtt + + +def validate_results(module, loss, results): + """ + This function is used to validate whether the ping results were unexpected per "state" param. + """ + state = module.params["state"] + if state == "present" and loss == 100: + module.fail_json(msg="Ping failed unexpectedly", **results) + elif state == "absent" and loss < 100: + module.fail_json(msg="Ping succeeded unexpectedly", **results) + + +def validate_fail(module, responses): + if ("Success" in responses or "No reply" in responses) is False: + module.fail_json(msg=responses) + + +def validate_parameters(module, timeout, count): + if timeout and not 1 <= int(timeout) <= 4294967294: + module.fail_json(msg="bad value for timeout - valid range (1-4294967294)") + if count and not 1 <= int(count) <= 4294967294: + module.fail_json(msg="bad value for count - valid range (1-4294967294)") + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + count=dict(type="int"), + dest=dict(type="str", required=True), + timeout=dict(type="int"), + ttl=dict(type="int"), + size=dict(type="int"), + source=dict(type="str"), + state=dict(type="str", choices=["absent", "present"], default="present"), + vrf=dict(type="str") + ) + + module = AnsibleModule(argument_spec=argument_spec) + + count = module.params["count"] + dest = module.params["dest"] + source = module.params["source"] + timeout = module.params["timeout"] + ttl = module.params["ttl"] + size = module.params["size"] + vrf = module.params["vrf"] + results = {} + warnings = list() + + if warnings: + results["warnings"] = warnings + + response = '' + try: + validate_parameters(module, timeout, count) + results["commands"] = [build_ping(dest, count, source, timeout, ttl, size, vrf)] + ping_results = run_commands(module, commands=results["commands"]) + ping_results_list = ping_results[0].split("\n") + + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + validate_fail(module, ping_results[0]) + + stats = "" + statserror = '' + for line in ping_results_list: + if line.startswith('Sending'): + statserror = line + if line.startswith('Success'): + stats = line + elif line.startswith('No reply'): + stats = statserror + + success, rx, tx, rtt = parse_ping(stats) + loss = abs(100 - int(success)) + results["packet_loss"] = str(loss) + "%" + results["packets_rx"] = int(rx) + results["packets_tx"] = int(tx) + + # Convert rtt values to int + for k, v in rtt.items(): + if rtt[k] is not None: + rtt[k] = int(v) + + results["rtt"] = rtt + + validate_results(module, loss, results) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_static_route.py b/plugins/modules/network/icx/icx_static_route.py new file mode 100644 index 0000000000..51b6855bfa --- /dev/null +++ b/plugins/modules/network/icx/icx_static_route.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icx_static_route +author: "Ruckus Wireless (@Commscope)" +short_description: Manage static IP routes on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of static + IP routes on Ruckus ICX network devices. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + prefix: + description: + - Network prefix of the static route. + type: str + mask: + description: + - Network prefix mask of the static route. + type: str + next_hop: + description: + - Next hop IP of the static route. + type: str + admin_distance: + description: + - Admin distance of the static route. Range is 1 to 255. + type: int + aggregate: + description: List of static route definitions. + type: list + suboptions: + prefix: + description: + - Network prefix of the static route. + type: str + mask: + description: + - Network prefix mask of the static route. + type: str + next_hop: + description: + - Next hop IP of the static route. + type: str + admin_distance: + description: + - Admin distance of the static route. Range is 1 to 255. + type: int + state: + description: + - State of the static route configuration. + type: str + choices: ['present', 'absent'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + purge: + description: + - Purge routes not defined in the I(aggregate) parameter. + default: no + type: bool + state: + description: + - State of the static route configuration. + type: str + default: present + choices: ['present', 'absent'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes +''' + +EXAMPLES = """ +- name: configure static route + icx_static_route: + prefix: 192.168.2.0/24 + next_hop: 10.0.0.1 + +- name: remove configuration + icx_static_route: + prefix: 192.168.2.0 + mask: 255.255.255.0 + next_hop: 10.0.0.1 + state: absent + +- name: Add static route aggregates + icx_static_route: + aggregate: + - { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } + - { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } + +- name: remove static route aggregates + icx_static_route: + aggregate: + - { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } + - { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - ip route 192.168.2.0 255.255.255.0 10.0.0.1 +""" + + +from copy import deepcopy +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.connection import ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import get_config, load_config + +try: + from ipaddress import ip_network + HAS_IPADDRESS = True +except ImportError: + HAS_IPADDRESS = False + + +def map_obj_to_commands(want, have, module): + commands = list() + purge = module.params['purge'] + for w in want: + for h in have: + for key in ['prefix', 'mask', 'next_hop']: + if w[key] != h[key]: + break + else: + break + else: + h = None + + prefix = w['prefix'] + mask = w['mask'] + next_hop = w['next_hop'] + admin_distance = w.get('admin_distance') + if not admin_distance and h: + w['admin_distance'] = admin_distance = h['admin_distance'] + state = w['state'] + del w['state'] + + if state == 'absent' and have == []: + commands.append('no ip route %s %s %s' % (prefix, mask, next_hop)) + + if state == 'absent' and w in have: + commands.append('no ip route %s %s %s' % (prefix, mask, next_hop)) + elif state == 'present' and w not in have: + if admin_distance: + commands.append('ip route %s %s %s distance %s' % (prefix, mask, next_hop, admin_distance)) + else: + commands.append('ip route %s %s %s' % (prefix, mask, next_hop)) + if purge: + commands = [] + for h in have: + if h not in want: + commands.append('no ip route %s %s %s' % (prefix, mask, next_hop)) + return commands + + +def map_config_to_obj(module): + obj = [] + compare = module.params['check_running_config'] + out = get_config(module, flags='| include ip route', compare=compare) + + for line in out.splitlines(): + splitted_line = line.split() + if len(splitted_line) not in (4, 5, 6): + continue + cidr = ip_network(to_text(splitted_line[2])) + prefix = str(cidr.network_address) + mask = str(cidr.netmask) + next_hop = splitted_line[3] + if len(splitted_line) == 6: + admin_distance = splitted_line[5] + else: + admin_distance = '1' + + obj.append({ + 'prefix': prefix, 'mask': mask, 'next_hop': next_hop, + 'admin_distance': admin_distance + }) + + return obj + + +def prefix_length_parser(prefix, mask, module): + if '/' in prefix and mask is not None: + module.fail_json(msg='Ambigous, specifed both length and mask') + if '/' in prefix: + cidr = ip_network(to_text(prefix)) + prefix = str(cidr.network_address) + mask = str(cidr.netmask) + return prefix, mask + + +def map_params_to_obj(module, required_together=None): + keys = ['prefix', 'mask', 'next_hop', 'admin_distance', 'state'] + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + route = item.copy() + for key in keys: + if route.get(key) is None: + route[key] = module.params.get(key) + + module._check_required_together(required_together, route) + + prefix, mask = prefix_length_parser(route['prefix'], route['mask'], module) + route.update({'prefix': prefix, 'mask': mask}) + + obj.append(route) + else: + module._check_required_together(required_together, module.params) + prefix, mask = prefix_length_parser(module.params['prefix'], module.params['mask'], module) + + obj.append({ + 'prefix': prefix, + 'mask': mask, + 'next_hop': module.params['next_hop'].strip(), + 'admin_distance': module.params.get('admin_distance'), + 'state': module.params['state'], + }) + + for route in obj: + if route['admin_distance']: + route['admin_distance'] = str(route['admin_distance']) + + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + prefix=dict(type='str'), + mask=dict(type='str'), + next_hop=dict(type='str'), + admin_distance=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['prefix'] = dict(required=True) + + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + purge=dict(default=False, type='bool') + ) + + argument_spec.update(element_spec) + + required_one_of = [['aggregate', 'prefix']] + required_together = [['prefix', 'next_hop']] + mutually_exclusive = [['aggregate', 'prefix']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + if not HAS_IPADDRESS: + module.fail_json(msg="ipaddress python package is required") + + warnings = list() + + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module, required_together=required_together) + have = map_config_to_obj(module) + + commands = map_obj_to_commands(want, have, module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_system.py b/plugins/modules/network/icx/icx_system.py new file mode 100644 index 0000000000..ffda03fece --- /dev/null +++ b/plugins/modules/network/icx/icx_system.py @@ -0,0 +1,470 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icx_system +author: "Ruckus Wireless (@Commscope)" +short_description: Manage the system attributes on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of node system attributes + on Ruckus ICX 7000 series switches. It provides an option to configure host system + parameters or remove those parameters from the device active + configuration. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + hostname: + description: + - Configure the device hostname parameter. This option takes an ASCII string value. + type: str + domain_name: + description: + - Configure the IP domain name on the remote device to the provided value. + Value should be in the dotted name form and + will be appended to the hostname to create a fully-qualified domain name. + type: list + domain_search: + description: + - Provides the list of domain names to + append to the hostname for the purpose of doing name resolution. + This argument accepts a list of names and will be reconciled + with the current active configuration on the running node. + type: list + name_servers: + description: + - List of DNS name servers by IP address to use to perform name resolution + lookups. + type: list + aaa_servers: + description: + - Configures radius/tacacs server + type: list + suboptions: + type: + description: + - specify the type of the server + type: str + choices: ['radius','tacacs'] + hostname: + description: + - Configures the host name of the RADIUS server + type: str + auth_port_type: + description: + - specifies the type of the authentication port + type: str + choices: ['auth-port'] + auth_port_num: + description: + - Configures the authentication UDP port. The default value is 1812. + type: str + acct_port_num: + description: + - Configures the accounting UDP port. The default value is 1813. + type: str + acct_type: + description: + - Usage of the accounting port. + type: str + choices: ['accounting-only', 'authentication-only','authorization-only', default] + auth_key: + description: + - Configure the key for the server + type: str + auth_key_type: + description: + - List of authentication level specified in the choices + type: list + choices: ['dot1x','mac-auth','web-auth'] + state: + description: + - State of the configuration + values in the device's current active configuration. When set + to I(present), the values should be configured in the device active + configuration and when set to I(absent) the values should not be + in the device active configuration + type: str + default: present + choices: ['present', 'absent'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes +''' + +EXAMPLES = """ +- name: configure hostname and domain name + icx_system: + hostname: icx + domain_search: + - ansible.com + - redhat.com + - ruckus.com + +- name: configure radius server of type auth-port + icx_system: + aaa_servers: + - type: radius + hostname: radius-server + auth_port_type: auth-port + auth_port_num: 1821 + acct_port_num: 1321 + acct_type: accounting-only + auth_key: abc + auth_key_type: + - dot1x + - mac-auth + +- name: configure tacacs server + icx_system: + aaa_servers: + - type: tacacs + hostname: tacacs-server + auth_port_type: auth-port + auth_port_num: 1821 + acct_port_num: 1321 + acct_type: accounting-only + auth_key: xyz + +- name: configure name servers + icx_system: + name_servers: + - 8.8.8.8 + - 8.8.4.4 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - hostname icx + - ip domain name test.example.com + - radius-server host 172.16.10.12 auth-port 2083 acct-port 1850 default key abc dot1x mac-auth + - tacacs-server host 10.2.3.4 auth-port 4058 authorization-only key xyz + +""" + + +import re +from copy import deepcopy +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import get_config, load_config +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList, validate_ip_v6_address +from ansible.module_utils.connection import Connection, ConnectionError, exec_command + + +def diff_list(want, have): + adds = [w for w in want if w not in have] + removes = [h for h in have if h not in want] + return (adds, removes) + + +def map_obj_to_commands(want, have, module): + commands = list() + state = module.params['state'] + + def needs_update(x): + return want.get(x) is not None and (want.get(x) != have.get(x)) + + if state == 'absent': + if have['name_servers'] == [] and have['aaa_servers'] == [] and have['domain_search'] == [] and have['hostname'] is None: + if want['hostname']: + commands.append('no hostname') + + if want['domain_search']: + for item in want['domain_search']: + commands.append('no ip dns domain-list %s' % item) + + if want['name_servers']: + for item in want['name_servers']: + commands.append('no ip dns server-address %s' % item) + + if want['aaa_servers']: + want_servers = [] + want_server = want['aaa_servers'] + if want_server: + want_list = deepcopy(want_server) + for items in want_list: + items['auth_key'] = None + want_servers.append(items) + for item in want_servers: + ipv6addr = validate_ip_v6_address(item['hostname']) + if ipv6addr: + commands.append('no ' + item['type'] + '-server host ipv6 ' + item['hostname']) + else: + commands.append('no ' + item['type'] + '-server host ' + item['hostname']) + + if want['hostname']: + if have['hostname'] == want['hostname']: + commands.append('no hostname') + + if want['domain_search']: + for item in want['domain_search']: + if item in have['domain_search']: + commands.append('no ip dns domain-list %s' % item) + + if want['name_servers']: + for item in want['name_servers']: + if item in have['name_servers']: + commands.append('no ip dns server-address %s' % item) + + if want['aaa_servers']: + want_servers = [] + want_server = want['aaa_servers'] + have_server = have['aaa_servers'] + if want_server: + want_list = deepcopy(want_server) + for items in want_list: + items['auth_key'] = None + want_servers.append(items) + for item in want_servers: + if item in have_server: + ipv6addr = validate_ip_v6_address(item['hostname']) + if ipv6addr: + commands.append('no ' + item['type'] + '-server host ipv6 ' + item['hostname']) + else: + commands.append('no ' + item['type'] + '-server host ' + item['hostname']) + + elif state == 'present': + if needs_update('hostname'): + commands.append('hostname %s' % want['hostname']) + + if want['domain_search']: + adds, removes = diff_list(want['domain_search'], have['domain_search']) + for item in removes: + commands.append('no ip dns domain-list %s' % item) + for item in adds: + commands.append('ip dns domain-list %s' % item) + + if want['name_servers']: + adds, removes = diff_list(want['name_servers'], have['name_servers']) + for item in removes: + commands.append('no ip dns server-address %s' % item) + for item in adds: + commands.append('ip dns server-address %s' % item) + + if want['aaa_servers']: + want_servers = [] + want_server = want['aaa_servers'] + have_server = have['aaa_servers'] + want_list = deepcopy(want_server) + for items in want_list: + items['auth_key'] = None + want_servers.append(items) + + adds, removes = diff_list(want_servers, have_server) + + for item in removes: + ip6addr = validate_ip_v6_address(item['hostname']) + if ip6addr: + cmd = 'no ' + item['type'] + '-server host ipv6 ' + item['hostname'] + else: + cmd = 'no ' + item['type'] + '-server host ' + item['hostname'] + commands.append(cmd) + + for w_item in adds: + for item in want_server: + if item['hostname'] == w_item['hostname'] and item['type'] == w_item['type']: + auth_key = item['auth_key'] + + ip6addr = validate_ip_v6_address(w_item['hostname']) + if ip6addr: + cmd = w_item['type'] + '-server host ipv6 ' + w_item['hostname'] + else: + cmd = w_item['type'] + '-server host ' + w_item['hostname'] + if w_item['auth_port_type']: + cmd += ' ' + w_item['auth_port_type'] + ' ' + w_item['auth_port_num'] + if w_item['acct_port_num'] and w_item['type'] == 'radius': + cmd += ' acct-port ' + w_item['acct_port_num'] + if w_item['type'] == 'tacacs': + if any((w_item['acct_port_num'], w_item['auth_key_type'])): + module.fail_json(msg='acct_port and auth_key_type is not applicable for tacacs server') + if w_item['acct_type']: + cmd += ' ' + w_item['acct_type'] + if auth_key is not None: + cmd += ' key ' + auth_key + if w_item['auth_key_type'] and w_item['type'] == 'radius': + val = '' + for y in w_item['auth_key_type']: + val = val + ' ' + y + cmd += val + commands.append(cmd) + + return commands + + +def parse_hostname(config): + match = re.search(r'^hostname (\S+)', config, re.M) + if match: + return match.group(1) + + +def parse_domain_search(config): + match = re.findall(r'^ip dns domain[- ]list (\S+)', config, re.M) + matches = list() + for name in match: + matches.append(name) + return matches + + +def parse_name_servers(config): + matches = list() + values = list() + lines = config.split('\n') + for line in lines: + if 'ip dns server-address' in line: + values = line.split(' ') + for val in values: + match = re.search(r'([0-9.]+)', val) + if match: + matches.append(match.group()) + + return matches + + +def parse_aaa_servers(config): + configlines = config.split('\n') + obj = [] + for line in configlines: + auth_key_type = [] + if 'radius-server' in line or 'tacacs-server' in line: + aaa_type = 'radius' if 'radius-server' in line else 'tacacs' + match = re.search(r'(host ipv6 (\S+))|(host (\S+))', line) + if match: + hostname = match.group(2) if match.group(2) is not None else match.group(4) + match = re.search(r'auth-port ([0-9]+)', line) + if match: + auth_port_num = match.group(1) + else: + auth_port_num = None + match = re.search(r'acct-port ([0-9]+)', line) + if match: + acct_port_num = match.group(1) + else: + acct_port_num = None + match = re.search(r'acct-port [0-9]+ (\S+)', line) + if match: + acct_type = match.group(1) + else: + acct_type = None + if aaa_type == 'tacacs': + match = re.search(r'auth-port [0-9]+ (\S+)', line) + if match: + acct_type = match.group(1) + else: + acct_type = None + match = re.search(r'(dot1x)', line) + if match: + auth_key_type.append('dot1x') + match = re.search(r'(mac-auth)', line) + if match: + auth_key_type.append('mac-auth') + match = re.search(r'(web-auth)', line) + if match: + auth_key_type.append('web-auth') + + obj.append({ + 'type': aaa_type, + 'hostname': hostname, + 'auth_port_type': 'auth-port', + 'auth_port_num': auth_port_num, + 'acct_port_num': acct_port_num, + 'acct_type': acct_type, + 'auth_key': None, + 'auth_key_type': set(auth_key_type) if len(auth_key_type) > 0 else None + }) + + return obj + + +def map_config_to_obj(module): + compare = module.params['check_running_config'] + config = get_config(module, None, compare=compare) + return { + 'hostname': parse_hostname(config), + 'domain_search': parse_domain_search(config), + 'name_servers': parse_name_servers(config), + 'aaa_servers': parse_aaa_servers(config) + } + + +def map_params_to_obj(module): + if module.params['aaa_servers']: + for item in module.params['aaa_servers']: + if item['auth_key_type']: + item['auth_key_type'] = set(item['auth_key_type']) + obj = { + 'hostname': module.params['hostname'], + 'domain_name': module.params['domain_name'], + 'domain_search': module.params['domain_search'], + 'name_servers': module.params['name_servers'], + 'state': module.params['state'], + 'aaa_servers': module.params['aaa_servers'] + } + return obj + + +def main(): + """ Main entry point for Ansible module execution + """ + server_spec = dict( + type=dict(choices=['radius', 'tacacs']), + hostname=dict(), + auth_port_type=dict(choices=['auth-port']), + auth_port_num=dict(), + acct_port_num=dict(), + acct_type=dict(choices=['accounting-only', 'authentication-only', 'authorization-only', 'default']), + auth_key=dict(), + auth_key_type=dict(type='list', choices=['dot1x', 'mac-auth', 'web-auth']) + ) + argument_spec = dict( + hostname=dict(), + + domain_name=dict(type='list'), + domain_search=dict(type='list'), + name_servers=dict(type='list'), + + aaa_servers=dict(type='list', elements='dict', options=server_spec), + state=dict(choices=['present', 'absent'], default='present'), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + + result['warnings'] = warnings + exec_command(module, 'skip') + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands(want, have, module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/icx/icx_user.py b/plugins/modules/network/icx/icx_user.py new file mode 100644 index 0000000000..e26f9dc849 --- /dev/null +++ b/plugins/modules/network/icx/icx_user.py @@ -0,0 +1,390 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icx_user +author: "Ruckus Wireless (@Commscope)" +short_description: Manage the user accounts on Ruckus ICX 7000 series switches. +description: + - This module creates or updates user account on network devices. It allows playbooks to manage + either individual usernames or the aggregate of usernames in the + current running config. It also supports purging usernames from the + configuration that are not explicitly defined. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + aggregate: + description: + - The set of username objects to be configured on the remote + ICX device. The list entries can either be the username + or a hash of username and properties. This argument is mutually + exclusive with the C(name) argument. + aliases: ['users', 'collection'] + type: list + suboptions: + name: + description: + - The username to be configured on the ICX device. + required: true + type: str + configured_password: + description: The password to be configured on the ICX device. + type: str + update_password: + description: + - This argument will instruct the module when to change the password. When + set to C(always), the password will always be updated in the device + and when set to C(on_create) the password will be updated only if + the username is created. + choices: ['on_create', 'always'] + type: str + privilege: + description: + - The privilege level to be granted to the user + choices: ['0', '4', '5'] + type: str + nopassword: + description: + - Defines the username without assigning + a password. This will allow the user to login to the system + without being authenticated by a password. + type: bool + state: + description: + - Configures the state of the username definition + as it relates to the device operational configuration. When set + to I(present), the username(s) should be configured in the device active + configuration and when set to I(absent) the username(s) should not be + in the device active configuration + choices: ['present', 'absent'] + type: str + access_time: + description: + - This parameter indicates the time the file's access time should be set to. + Should be preserve when no modification is required, YYYYMMDDHHMM.SS when using default time format, or now. + Default is None meaning that preserve is the default for state=[file,directory,link,hard] and now is default for state=touch + type: str + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + name: + description: + - The username to be configured on the ICX device. + required: true + type: str + configured_password: + description: The password to be configured on the ICX device. + type: str + update_password: + description: + - This argument will instruct the module when to change the password. When + set to C(always), the password will always be updated in the device + and when set to C(on_create) the password will be updated only if + the username is created. + default: always + choices: ['on_create', 'always'] + type: str + privilege: + description: + - The privilege level to be granted to the user + default: 0 + choices: ['0', '4', '5'] + type: str + nopassword: + description: + - Defines the username without assigning + a password. This will allow the user to login to the system + without being authenticated by a password. + type: bool + purge: + description: + - If set to true module will remove any previously + configured usernames on the device except the current defined set of users. + type: bool + default: false + state: + description: + - Configures the state of the username definition + as it relates to the device operational configuration. When set + to I(present), the username(s) should be configured in the device active + configuration and when set to I(absent) the username(s) should not be + in the device active configuration + default: present + choices: ['present', 'absent'] + type: str + access_time: + description: + - This parameter indicates the time the file's access time should be set to. + Should be preserve when no modification is required, YYYYMMDDHHMM.SS when using default time format, or now. + Default is None meaning that preserve is the default for state=[file,directory,link,hard] and now is default for state=touch + type: str + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes +''' + +EXAMPLES = """ +- name: create a new user without password + icx_user: + name: user1 + nopassword: true + +- name: create a new user with password + icx_user: + name: user1 + configured_password: 'newpassword' + +- name: remove users + icx_user: + name: user1 + state: absent + +- name: set user privilege level to 5 + icx_user: + name: user1 + privilege: 5 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - username ansible nopassword + - username ansible password-string alethea123 + - no username ansible + - username ansible privilege 5 + - username ansible enable +""" + +from copy import deepcopy + +import re +import base64 +import hashlib + +from functools import partial + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible.module_utils.connection import exec_command +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import get_config, load_config + + +def get_param_value(key, item, module): + if not item.get(key): + value = module.params[key] + + else: + value_type = module.argument_spec[key].get('type', 'str') + type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type] + type_checker(item[key]) + value = item[key] + + validator = globals().get('validate_%s' % key) + if all((value, validator)): + validator(value, module) + + return value + + +def map_params_to_obj(module): + users = module.params['aggregate'] + if not users: + if not module.params['name'] and module.params['purge']: + return list() + elif not module.params['name']: + module.fail_json(msg='username is required') + else: + aggregate = [{'name': module.params['name']}] + else: + aggregate = list() + for item in users: + if not isinstance(item, dict): + aggregate.append({'name': item}) + elif 'name' not in item: + module.fail_json(msg='name is required') + else: + aggregate.append(item) + + objects = list() + + for item in aggregate: + get_value = partial(get_param_value, item=item, module=module) + item['configured_password'] = get_value('configured_password') + item['nopassword'] = get_value('nopassword') + item['privilege'] = get_value('privilege') + item['state'] = get_value('state') + objects.append(item) + + return objects + + +def parse_privilege(data): + match = re.search(r'privilege (\S)', data, re.M) + if match: + return match.group(1) + + +def map_config_to_obj(module): + compare = module.params['check_running_config'] + data = get_config(module, flags=['| include username'], compare=compare) + + match = re.findall(r'(?:^(?:u|\s{2}u))sername (\S+)', data, re.M) + if not match: + return list() + + instances = list() + + for user in set(match): + regex = r'username %s .+$' % user + cfg = re.findall(regex, data, re.M) + cfg = '\n'.join(cfg) + obj = { + 'name': user, + 'state': 'present', + 'nopassword': 'nopassword' in cfg, + 'configured_password': None, + 'privilege': parse_privilege(cfg) + } + instances.append(obj) + + return instances + + +def map_obj_to_commands(updates, module): + commands = list() + state = module.params['state'] + update_password = module.params['update_password'] + + def needs_update(want, have, x): + return want.get(x) and (want.get(x) != have.get(x)) + + def add(command, want, x): + command.append('username %s %s' % (want['name'], x)) + for update in updates: + want, have = update + if want['state'] == 'absent': + commands.append(user_del_cmd(want['name'])) + + if needs_update(want, have, 'privilege'): + add(commands, want, 'privilege %s password %s' % (want['privilege'], want['configured_password'])) + else: + if needs_update(want, have, 'configured_password'): + if update_password == 'always' or not have: + add(commands, want, '%spassword %s' % ('privilege ' + str(have.get('privilege')) + + " " if have.get('privilege') is not None else '', want['configured_password'])) + + if needs_update(want, have, 'nopassword'): + if want['nopassword']: + add(commands, want, 'nopassword') + + if needs_update(want, have, 'access_time'): + add(commands, want, 'access-time %s' % want['access_time']) + + if needs_update(want, have, 'expiry_days'): + add(commands, want, 'expires %s' % want['expiry_days']) + + return commands + + +def update_objects(want, have): + updates = list() + for entry in want: + item = next((i for i in have if i['name'] == entry['name']), None) + + if all((item is None, entry['state'] == 'present')): + updates.append((entry, {})) + + elif all((have == [], entry['state'] == 'absent')): + for key, value in iteritems(entry): + if key not in ['update_password']: + updates.append((entry, item)) + break + elif item: + for key, value in iteritems(entry): + if key not in ['update_password']: + if value is not None and value != item.get(key): + updates.append((entry, item)) + break + return updates + + +def user_del_cmd(username): + return 'no username %s' % username + + +def main(): + """entry point for module execution + """ + element_spec = dict( + name=dict(), + + configured_password=dict(no_log=True), + nopassword=dict(type='bool', default=False), + update_password=dict(default='always', choices=['on_create', 'always']), + privilege=dict(type='str', choices=['0', '4', '5']), + access_time=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']), + purge=dict(type='bool', default=False) + ) + + argument_spec.update(element_spec) + + mutually_exclusive = [('name', 'aggregate')] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + result = {'changed': False} + exec_command(module, 'skip') + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands(update_objects(want, have), module) + + if module.params['purge']: + want_users = [x['name'] for x in want] + have_users = [x['name'] for x in have] + for item in set(have_users).difference(want_users): + if item != 'admin': + commands.append(user_del_cmd(item)) + + result["commands"] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/icx/icx_vlan.py b/plugins/modules/network/icx/icx_vlan.py new file mode 100644 index 0000000000..468a7d0988 --- /dev/null +++ b/plugins/modules/network/icx/icx_vlan.py @@ -0,0 +1,783 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: icx_vlan +author: "Ruckus Wireless (@Commscope)" +short_description: Manage VLANs on Ruckus ICX 7000 series switches +description: + - This module provides declarative management of VLANs + on ICX network devices. +notes: + - Tested against ICX 10.1. + - For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html). +options: + name: + description: + - Name of the VLAN. + type: str + vlan_id: + description: + - ID of the VLAN. Range 1-4094. + required: true + type: int + interfaces: + description: + - List of ethernet ports or LAGS to be added as access(untagged) ports to the vlan. + To add a range of ports use 'to' keyword. See the example. + suboptions: + name: + description: + - Name of the interface or lag + type: list + purge: + description: + - Purge interfaces not defined in the I(name) + type: bool + type: dict + tagged: + description: + - List of ethernet ports or LAGS to be added as trunk(tagged) ports to the vlan. + To add a range of ports use 'to' keyword. See the example. + suboptions: + name: + description: + - Name of the interface or lag + type: list + purge: + description: + - Purge interfaces not defined in the I(name) + type: bool + type: dict + ip_dhcp_snooping: + description: + - Enables DHCP snooping on a VLAN. + type: bool + ip_arp_inspection: + description: + - Enables dynamic ARP inspection on a VLAN. + type: bool + associated_interfaces: + description: + - This is a intent option and checks the operational state of the for given vlan C(name) + for associated interfaces. If the value in the C(associated_interfaces) does not match with + the operational state of vlan interfaces on device it will result in failure. + type: list + associated_tagged: + description: + - This is a intent option and checks the operational state of given vlan C(name) + for associated tagged ports and lags. If the value in the C(associated_tagged) does not match with + the operational state of vlan interfaces on device it will result in failure. + type: list + delay: + description: + - Delay the play should wait to check for declarative intent params values. + default: 10 + type: int + stp: + description: + - Enable spanning-tree 802-1w/rstp for this vlan. + suboptions: + type: + description: + - Specify the type of spanning-tree + type: str + default: 802-1w + choices: ['802-1w','rstp'] + priority: + description: + - Configures the priority of the bridge. The value ranges from + 0 through 65535. A lower numerical value means the bridge has + a higher priority. Thus, the highest priority is 0. The default is 32768. + type: str + enabled: + description: + - Manage the state(Enable/Disable) of the spanning_tree_802_1w in the current vlan + type: bool + type: dict + aggregate: + description: + - List of VLANs definitions. + type: list + suboptions: + name: + description: + - Name of the VLAN. + type: str + vlan_id: + description: + - ID of the VLAN. Range 1-4094. + required: true + type: str + ip_dhcp_snooping: + description: + - Enables DHCP snooping on a VLAN. + type: bool + ip_arp_inspection: + description: + - Enables dynamic ARP inspection on a VLAN. + type: bool + tagged: + description: + - List of ethernet ports or LAGS to be added as trunk(tagged) ports to the vlan. + To add a range of ports use 'to' keyword. See the example. + suboptions: + name: + description: + - Name of the interface or lag + type: list + purge: + description: + - Purge interfaces not defined in the I(name) + type: bool + type: dict + interfaces: + description: + - List of ethernet ports or LAGS to be added as access(untagged) ports to the vlan. + To add a range of ports use 'to' keyword. See the example. + suboptions: + name: + description: + - Name of the interface or lag + type: list + purge: + description: + - Purge interfaces not defined in the I(name) + type: bool + type: dict + delay: + description: + - Delay the play should wait to check for declarative intent params values. + type: int + stp: + description: + - Enable spanning-tree 802-1w/rstp for this vlan. + suboptions: + type: + description: + - Specify the type of spanning-tree + type: str + default: 802-1w + choices: ['802-1w','rstp'] + priority: + description: + - Configures the priority of the bridge. The value ranges from + 0 through 65535. A lower numerical value means the bridge has + a higher priority. Thus, the highest priority is 0. The default is 32768. + type: str + enabled: + description: + - Manage the state(Enable/Disable) of the spanning_tree_802_1w in the current vlan + type: bool + type: dict + state: + description: + - State of the VLAN configuration. + type: str + choices: ['present', 'absent'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + associated_interfaces: + description: + - This is a intent option and checks the operational state of the for given vlan C(name) + for associated interfaces. If the value in the C(associated_interfaces) does not match with + the operational state of vlan interfaces on device it will result in failure. + type: list + associated_tagged: + description: + - This is a intent option and checks the operational state of given vlan C(name) + for associated tagged ports and lags. If the value in the C(associated_tagged) does not match with + the operational state of vlan interfaces on device it will result in failure. + type: list + purge: + description: + - Purge VLANs not defined in the I(aggregate) parameter. + default: no + type: bool + state: + description: + - State of the VLAN configuration. + type: str + default: present + choices: ['present', 'absent'] + check_running_config: + description: + - Check running configuration. This can be set as environment variable. + Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter. + type: bool + default: yes +''' + +EXAMPLES = """ +- name: Add a single ethernet 1/1/48 as access(untagged) port to vlan 20 + icx_vlan: + name: test-vlan + vlan_id: 20 + interfaces: + name: + - ethernet 1/1/48 + +- name: Add a single LAG 10 as access(untagged) port to vlan 20 + icx_vlan: + vlan_id: 20 + interfaces: + name: + - lag 10 + +- name: Add a range of ethernet ports as trunk(tagged) ports to vlan 20 by port + icx_vlan: + vlan_id: 20 + tagged: + name: + - ethernet 1/1/40 to 1/1/48 + +- name: Add discontinuous lags, ethernet ports as access(untagged) and trunk(tagged) port to vlan 20. + icx_vlan: + vlan_id: 20 + interfaces: + name: + - ethernet 1/1/40 to 1/1/48 + - ethernet 2/1/1 + - lag 1 + - lag 3 to 5 + tagged: + name: + - ethernet 1/1/20 to 1/1/25 + - lag 1 to 3 + +- name: Remove an access and range of trunk ports from vlan + icx_vlan: + vlan_id: 20 + interfaces: + name: + - ethernet 1/1/40 + tagged: + name: + - ethernet 1/1/39 to 1/1/70 + +- name: Enable dhcp snooping, disable arp inspection in vlan + icx_vlan: + vlan_id: 20 + ip_dhcp_snooping: present + ip_arp_inspection: absent + +- name: Create vlan 20. Enable arp inspection in vlan. Purge all other vlans. + icx_vlan: + vlan_id: 20 + ip_arp_inspection: present + purge: present + +- name: Remove vlan 20. + icx_vlan: + vlan_id: 20 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - vlan 100 + - name test-vlan +""" + +import re +from time import sleep +import itertools +from copy import deepcopy +from time import sleep +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.community.general.plugins.module_utils.network.icx.icx import load_config, get_config +from ansible.module_utils.connection import Connection, ConnectionError, exec_command +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import conditional, remove_default_spec + + +def search_obj_in_list(vlan_id, lst): + obj = list() + for o in lst: + if str(o['vlan_id']) == vlan_id: + return o + + +def parse_vlan_brief(module, vlan_id): + command = 'show run vlan %s' % vlan_id + rc, out, err = exec_command(module, command) + lines = out.split('\n') + untagged_ports = list() + untagged_lags = list() + tagged_ports = list() + tagged_lags = list() + + for line in lines: + if 'tagged' in line.split(): + lags = line.split(" lag ") + ports = lags[0].split(" ethe ") + del ports[0] + del lags[0] + for port in ports: + if "to" in port: + p = port.split(" to ") + pr = int(p[1].split('/')[2]) - int(p[0].split('/')[2]) + for i in range(0, pr + 1): + tagged_ports.append((int(p[0].split('/')[2]) + i)) + else: + tagged_ports.append(int(port.split('/')[2])) + for lag in lags: + if "to" in lag: + l = lag.split(" to ") + lr = int(l[1]) - int(l[0]) + for i in range(0, lr + 1): + tagged_lags.append((int(l[0]) + i)) + else: + tagged_lags.append(int(lag)) + if 'untagged' in line.split(): + lags = line.split(" lag ") + ports = lags[0].split(" ethe ") + del ports[0] + del lags[0] + for port in ports: + if "to" in port: + p = port.split(" to ") + pr = int(p[1].split('/')[2]) - int(p[0].split('/')[2]) + for i in range(0, pr + 1): + untagged_ports.append((int(p[0].split('/')[2]) + i)) + else: + untagged_ports.append(int(port.split('/')[2])) + for lag in lags: + if "to" in lag: + l = lag.split(" to ") + lr = int(l[1]) - int(l[0]) + for i in range(0, lr + 1): + untagged_lags.append((int(l[0]) + i)) + else: + untagged_lags.append(int(lag)) + + return untagged_ports, untagged_lags, tagged_ports, tagged_lags + + +def extract_list_from_interface(interface): + if 'ethernet' in interface: + if 'to' in interface: + s = re.search(r"\d+\/\d+/(?P\d+)\sto\s+\d+\/\d+/(?P\d+)", interface) + low = int(s.group('low')) + high = int(s.group('high')) + else: + s = re.search(r"\d+\/\d+/(?P\d+)", interface) + low = int(s.group('low')) + high = int(s.group('low')) + elif 'lag' in interface: + if 'to' in interface: + s = re.search(r"(?P\d+)\sto\s(?P\d+)", interface) + low = int(s.group('low')) + high = int(s.group('high')) + else: + s = re.search(r"(?P\d+)", interface) + low = int(s.group('low')) + high = int(s.group('low')) + + return low, high + + +def parse_vlan_id(module): + vlans = [] + command = 'show vlan brief' + rc, out, err = exec_command(module, command) + lines = out.split('\n') + for line in lines: + if 'VLANs Configured :' in line: + values = line.split(':')[1] + vlans = [s for s in values.split() if s.isdigit()] + s = re.findall(r"(?P\d+)\sto\s(?P\d+)", values) + for ranges in s: + low = int(ranges[0]) + 1 + high = int(ranges[1]) + while(high > low): + vlans.append(str(low)) + low = low + 1 + return vlans + + +def spanning_tree(module, stp): + stp_cmd = list() + if stp.get('enabled') is False: + if stp.get('type') == '802-1w': + stp_cmd.append('no spanning-tree' + ' ' + stp.get('type')) + stp_cmd.append('no spanning-tree') + + elif stp.get('type'): + stp_cmd.append('spanning-tree' + ' ' + stp.get('type')) + if stp.get('priority') and stp.get('type') == 'rstp': + module.fail_json(msg='spanning-tree 802-1w only can have priority') + elif stp.get('priority'): + stp_cmd.append('spanning-tree' + ' ' + stp.get('type') + ' ' + 'priority' + ' ' + stp.get('priority')) + + return stp_cmd + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + stp = item.get('stp') + if stp: + stp_cmd = spanning_tree(module, stp) + item.update({'stp': stp_cmd}) + + d = item.copy() + + obj.append(d) + + else: + params = { + 'name': module.params['name'], + 'vlan_id': module.params['vlan_id'], + 'interfaces': module.params['interfaces'], + 'tagged': module.params['tagged'], + 'associated_interfaces': module.params['associated_interfaces'], + 'associated_tagged': module.params['associated_tagged'], + 'delay': module.params['delay'], + 'ip_dhcp_snooping': module.params['ip_dhcp_snooping'], + 'ip_arp_inspection': module.params['ip_arp_inspection'], + 'state': module.params['state'], + } + + stp = module.params.get('stp') + if stp: + stp_cmd = spanning_tree(module, stp) + params.update({'stp': stp_cmd}) + + obj.append(params) + + return obj + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + purge = module.params['purge'] + + for w in want: + vlan_id = w['vlan_id'] + state = w['state'] + name = w['name'] + interfaces = w.get('interfaces') + tagged = w.get('tagged') + dhcp = w.get('ip_dhcp_snooping') + arp = w.get('ip_arp_inspection') + stp = w.get('stp') + obj_in_have = search_obj_in_list(str(vlan_id), have) + + if state == 'absent': + if have == []: + commands.append('no vlan {0}'.format(vlan_id)) + if obj_in_have: + commands.append('no vlan {0}'.format(vlan_id)) + + elif state == 'present': + if not obj_in_have: + commands.append('vlan {0}'.format(vlan_id)) + if name: + commands.append('vlan {0} name {1}'.format(vlan_id, name)) + + if interfaces: + if interfaces['name']: + for item in interfaces['name']: + commands.append('untagged {0}'.format(item)) + + if tagged: + if tagged['name']: + for item in tagged['name']: + commands.append('tagged {0}'.format(item)) + + if dhcp is True: + commands.append('ip dhcp snooping vlan {0}'.format(vlan_id)) + elif dhcp is False: + commands.append('no ip dhcp snooping vlan {0}'.format(vlan_id)) + + if arp is True: + commands.append('ip arp inspection vlan {0}'.format(vlan_id)) + elif dhcp is False: + commands.append('no ip arp inspection vlan {0}'.format(vlan_id)) + + if stp: + if w.get('stp'): + [commands.append(cmd) for cmd in w['stp']] + + else: + commands.append('vlan {0}'.format(vlan_id)) + if name: + if name != obj_in_have['name']: + commands.append('vlan {0} name {1}'.format(vlan_id, name)) + + if interfaces: + if interfaces['name']: + have_interfaces = list() + for interface in interfaces['name']: + low, high = extract_list_from_interface(interface) + + while(high >= low): + if 'ethernet' in interface: + have_interfaces.append('ethernet 1/1/{0}'.format(low)) + if 'lag' in interface: + have_interfaces.append('lag {0}'.format(low)) + low = low + 1 + + if interfaces['purge'] is True: + remove_interfaces = list(set(obj_in_have['interfaces']) - set(have_interfaces)) + for item in remove_interfaces: + commands.append('no untagged {0}'.format(item)) + + if interfaces['name']: + add_interfaces = list(set(have_interfaces) - set(obj_in_have['interfaces'])) + for item in add_interfaces: + commands.append('untagged {0}'.format(item)) + + if tagged: + if tagged['name']: + have_tagged = list() + for tag in tagged['name']: + low, high = extract_list_from_interface(tag) + + while(high >= low): + if 'ethernet' in tag: + have_tagged.append('ethernet 1/1/{0}'.format(low)) + if 'lag' in tag: + have_tagged.append('lag {0}'.format(low)) + low = low + 1 + if tagged['purge'] is True: + remove_tagged = list(set(obj_in_have['tagged']) - set(have_tagged)) + for item in remove_tagged: + commands.append('no tagged {0}'.format(item)) + + if tagged['name']: + add_tagged = list(set(have_tagged) - set(obj_in_have['tagged'])) + for item in add_tagged: + commands.append('tagged {0}'.format(item)) + + if dhcp != obj_in_have['ip_dhcp_snooping']: + if dhcp is True: + commands.append('ip dhcp snooping vlan {0}'.format(vlan_id)) + elif dhcp is False: + commands.append('no ip dhcp snooping vlan {0}'.format(vlan_id)) + + if arp != obj_in_have['ip_arp_inspection']: + if arp is True: + commands.append('ip arp inspection vlan {0}'.format(vlan_id)) + elif arp is False: + commands.append('no ip arp inspection vlan {0}'.format(vlan_id)) + + if stp: + if w.get('stp'): + [commands.append(cmd) for cmd in w['stp']] + + if len(commands) == 1 and 'vlan ' + str(vlan_id) in commands: + commands = [] + + if purge: + commands = [] + vlans = parse_vlan_id(module) + for h in vlans: + obj_in_want = search_obj_in_list(h, want) + if not obj_in_want and h != '1': + commands.append('no vlan {0}'.format(h)) + + return commands + + +def parse_name_argument(module, item): + command = 'show vlan {0}'.format(item) + rc, out, err = exec_command(module, command) + match = re.search(r"Name (\S+),", out) + if match: + return match.group(1) + + +def parse_interfaces_argument(module, item, port_type): + untagged_ports, untagged_lags, tagged_ports, tagged_lags = parse_vlan_brief(module, item) + ports = list() + if port_type == "interfaces": + if untagged_ports: + for port in untagged_ports: + ports.append('ethernet 1/1/' + str(port)) + if untagged_lags: + for port in untagged_lags: + ports.append('lag ' + str(port)) + + elif port_type == "tagged": + if tagged_ports: + for port in tagged_ports: + ports.append('ethernet 1/1/' + str(port)) + if tagged_lags: + for port in tagged_lags: + ports.append('lag ' + str(port)) + + return ports + + +def parse_config_argument(config, arg): + match = re.search(arg, config, re.M) + if match: + return True + else: + return False + + +def map_config_to_obj(module): + config = get_config(module) + vlans = parse_vlan_id(module) + instance = list() + + for item in set(vlans): + obj = { + 'vlan_id': item, + 'name': parse_name_argument(module, item), + 'interfaces': parse_interfaces_argument(module, item, 'interfaces'), + 'tagged': parse_interfaces_argument(module, item, 'tagged'), + 'ip_dhcp_snooping': parse_config_argument(config, 'ip dhcp snooping vlan {0}'.format(item)), + 'ip_arp_inspection': parse_config_argument(config, 'ip arp inspection vlan {0}'.format(item)), + } + instance.append(obj) + return instance + + +def check_fail(module, output): + error = [ + re.compile(r"^error", re.I) + ] + for x in output: + for regex in error: + if regex.search(x): + module.fail_json(msg=x) + + +def check_declarative_intent_params(want, module, result): + def parse_ports(interfaces, ports, lags): + for interface in interfaces: + low, high = extract_list_from_interface(interface) + + while(high >= low): + if 'ethernet' in interface: + if not (low in ports): + module.fail_json(msg='One or more conditional statements have not been satisfied ' + interface) + if 'lag' in interface: + if not (low in lags): + module.fail_json(msg='One or more conditional statements have not been satisfied ' + interface) + low = low + 1 + + is_delay = False + low = 0 + high = 0 + for w in want: + if w.get('associated_interfaces') is None and w.get('associated_tagged') is None: + continue + + if result['changed'] and not is_delay: + sleep(module.params['delay']) + is_delay = True + + untagged_ports, untagged_lags, tagged_ports, tagged_lags = parse_vlan_brief(module, w['vlan_id']) + + if w['associated_interfaces']: + parse_ports(w.get('associated_interfaces'), untagged_ports, untagged_lags) + + if w['associated_tagged']: + parse_ports(w.get('associated_tagged'), tagged_ports, tagged_lags) + + +def main(): + """ main entry point for module execution + """ + stp_spec = dict( + type=dict(default='802-1w', choices=['802-1w', 'rstp']), + priority=dict(), + enabled=dict(type='bool'), + ) + inter_spec = dict( + name=dict(type='list'), + purge=dict(type='bool') + ) + tagged_spec = dict( + name=dict(type='list'), + purge=dict(type='bool') + ) + element_spec = dict( + vlan_id=dict(type='int'), + name=dict(), + interfaces=dict(type='dict', options=inter_spec), + tagged=dict(type='dict', options=tagged_spec), + ip_dhcp_snooping=dict(type='bool'), + ip_arp_inspection=dict(type='bool'), + associated_interfaces=dict(type='list'), + associated_tagged=dict(type='list'), + delay=dict(default=10, type='int'), + stp=dict(type='dict', options=stp_spec), + state=dict(default='present', choices=['present', 'absent']), + check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG'])) + ) + aggregate_spec = deepcopy(element_spec) + aggregate_spec['vlan_id'] = dict(required=True) + + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + purge=dict(default=False, type='bool') + ) + argument_spec.update(element_spec) + required_one_of = [['vlan_id', 'aggregate']] + mutually_exclusive = [['vlan_id', 'aggregate']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + warnings = list() + result = {} + result['changed'] = False + if warnings: + result['warnings'] = warnings + exec_command(module, 'skip') + want = map_params_to_obj(module) + if module.params['check_running_config'] is False: + have = [] + else: + have = map_config_to_obj(module) + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + output = load_config(module, commands) + if output: + check_fail(module, output) + result['output'] = output + result['changed'] = True + + check_declarative_intent_params(want, module, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/dladm_etherstub.py b/plugins/modules/network/illumos/dladm_etherstub.py new file mode 100644 index 0000000000..d9410c2336 --- /dev/null +++ b/plugins/modules/network/illumos/dladm_etherstub.py @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dladm_etherstub +short_description: Manage etherstubs on Solaris/illumos systems. +description: + - Create or delete etherstubs on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + name: + description: + - Etherstub name. + required: true + temporary: + description: + - Specifies that the etherstub is temporary. Temporary etherstubs + do not persist across reboots. + required: false + default: false + type: bool + state: + description: + - Create or delete Solaris/illumos etherstub. + required: false + default: "present" + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +# Create 'stub0' etherstub +- dladm_etherstub: + name: stub0 + state: present + +# Remove 'stub0 etherstub +- dladm_etherstub: + name: stub0 + state: absent +''' + +RETURN = ''' +name: + description: etherstub name + returned: always + type: str + sample: "switch0" +state: + description: state of the target + returned: always + type: str + sample: "present" +temporary: + description: etherstub's persistence + returned: always + type: bool + sample: "True" +''' +from ansible.module_utils.basic import AnsibleModule + + +class Etherstub(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def etherstub_exists(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('show-etherstub') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def create_etherstub(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('create-etherstub') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_etherstub(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('delete-etherstub') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ), + supports_check_mode=True + ) + + etherstub = Etherstub(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = etherstub.name + result['state'] = etherstub.state + result['temporary'] = etherstub.temporary + + if etherstub.state == 'absent': + if etherstub.etherstub_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = etherstub.delete_etherstub() + if rc != 0: + module.fail_json(name=etherstub.name, msg=err, rc=rc) + elif etherstub.state == 'present': + if not etherstub.etherstub_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = etherstub.create_etherstub() + + if rc is not None and rc != 0: + module.fail_json(name=etherstub.name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/dladm_iptun.py b/plugins/modules/network/illumos/dladm_iptun.py new file mode 100644 index 0000000000..9e48be0d49 --- /dev/null +++ b/plugins/modules/network/illumos/dladm_iptun.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dladm_iptun +short_description: Manage IP tunnel interfaces on Solaris/illumos systems. +description: + - Manage IP tunnel interfaces on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + name: + description: + - IP tunnel interface name. + required: true + temporary: + description: + - Specifies that the IP tunnel interface is temporary. Temporary IP tunnel + interfaces do not persist across reboots. + required: false + default: false + type: bool + type: + description: + - Specifies the type of tunnel to be created. + required: false + default: "ipv4" + choices: [ "ipv4", "ipv6", "6to4" ] + aliases: ['tunnel_type'] + local_address: + description: + - Literal IP address or hostname corresponding to the tunnel source. + required: false + aliases: [ "local" ] + remote_address: + description: + - Literal IP address or hostname corresponding to the tunnel destination. + required: false + aliases: [ "remote" ] + state: + description: + - Create or delete Solaris/illumos VNIC. + required: false + default: "present" + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +- name: Create IPv4 tunnel interface 'iptun0' + dladm_iptun: name=iptun0 local_address=192.0.2.23 remote_address=203.0.113.10 state=present + +- name: Change IPv4 tunnel remote address + dladm_iptun: name=iptun0 type=ipv4 local_address=192.0.2.23 remote_address=203.0.113.11 + +- name: Create IPv6 tunnel interface 'tun0' + dladm_iptun: name=tun0 type=ipv6 local_address=192.0.2.23 remote_address=203.0.113.42 + +- name: Remove 'iptun0' tunnel interface + dladm_iptun: name=iptun0 state=absent +''' + +RETURN = ''' +name: + description: tunnel interface name + returned: always + type: str + sample: iptun0 +state: + description: state of the target + returned: always + type: str + sample: present +temporary: + description: specifies if operation will persist across reboots + returned: always + type: bool + sample: True +local_address: + description: local IP address + returned: always + type: str + sample: 1.1.1.1/32 +remote_address: + description: remote IP address + returned: always + type: str + sample: 2.2.2.2/32 +type: + description: tunnel type + returned: always + type: str + sample: ipv4 +''' + +from ansible.module_utils.basic import AnsibleModule + + +SUPPORTED_TYPES = ['ipv4', 'ipv6', '6to4'] + + +class IPTun(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.type = module.params['type'] + self.local_address = module.params['local_address'] + self.remote_address = module.params['remote_address'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + self.dladm_bin = self.module.get_bin_path('dladm', True) + + def iptun_exists(self): + cmd = [self.dladm_bin] + + cmd.append('show-iptun') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def create_iptun(self): + cmd = [self.dladm_bin] + + cmd.append('create-iptun') + + if self.temporary: + cmd.append('-t') + + cmd.append('-T') + cmd.append(self.type) + cmd.append('-a') + cmd.append('local=' + self.local_address + ',remote=' + self.remote_address) + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_iptun(self): + cmd = [self.dladm_bin] + + cmd.append('delete-iptun') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def update_iptun(self): + cmd = [self.dladm_bin] + + cmd.append('modify-iptun') + + if self.temporary: + cmd.append('-t') + cmd.append('-a') + cmd.append('local=' + self.local_address + ',remote=' + self.remote_address) + cmd.append(self.name) + + return self.module.run_command(cmd) + + def _query_iptun_props(self): + cmd = [self.dladm_bin] + + cmd.append('show-iptun') + cmd.append('-p') + cmd.append('-c') + cmd.append('link,type,flags,local,remote') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def iptun_needs_updating(self): + (rc, out, err) = self._query_iptun_props() + + NEEDS_UPDATING = False + + if rc == 0: + configured_local, configured_remote = out.split(':')[3:] + + if self.local_address != configured_local or self.remote_address != configured_remote: + NEEDS_UPDATING = True + + return NEEDS_UPDATING + else: + self.module.fail_json(msg='Failed to query tunnel interface %s properties' % self.name, + err=err, + rc=rc) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str'), + type=dict(default='ipv4', type='str', aliases=['tunnel_type'], + choices=SUPPORTED_TYPES), + local_address=dict(type='str', aliases=['local']), + remote_address=dict(type='str', aliases=['remote']), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ), + required_if=[ + ['state', 'present', ['local_address', 'remote_address']], + ], + supports_check_mode=True + ) + + iptun = IPTun(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = iptun.name + result['type'] = iptun.type + result['local_address'] = iptun.local_address + result['remote_address'] = iptun.remote_address + result['state'] = iptun.state + result['temporary'] = iptun.temporary + + if iptun.state == 'absent': + if iptun.iptun_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = iptun.delete_iptun() + if rc != 0: + module.fail_json(name=iptun.name, msg=err, rc=rc) + elif iptun.state == 'present': + if not iptun.iptun_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = iptun.create_iptun() + + if rc is not None and rc != 0: + module.fail_json(name=iptun.name, msg=err, rc=rc) + else: + if iptun.iptun_needs_updating(): + (rc, out, err) = iptun.update_iptun() + if rc != 0: + module.fail_json(msg='Error while updating tunnel interface: "%s"' % err, + name=iptun.name, + stderr=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/dladm_linkprop.py b/plugins/modules/network/illumos/dladm_linkprop.py new file mode 100644 index 0000000000..de8d699e4a --- /dev/null +++ b/plugins/modules/network/illumos/dladm_linkprop.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dladm_linkprop +short_description: Manage link properties on Solaris/illumos systems. +description: + - Set / reset link properties on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + link: + description: + - Link interface name. + required: true + aliases: [ "nic", "interface" ] + property: + description: + - Specifies the name of the property we want to manage. + required: true + aliases: [ "name" ] + value: + description: + - Specifies the value we want to set for the link property. + required: false + temporary: + description: + - Specifies that lin property configuration is temporary. Temporary + link property configuration does not persist across reboots. + required: false + type: bool + default: false + state: + description: + - Set or reset the property value. + required: false + default: "present" + choices: [ "present", "absent", "reset" ] +''' + +EXAMPLES = ''' +- name: Set 'maxbw' to 100M on e1000g1 + dladm_linkprop: name=e1000g1 property=maxbw value=100M state=present + +- name: Set 'mtu' to 9000 on e1000g1 + dladm_linkprop: name=e1000g1 property=mtu value=9000 + +- name: Reset 'mtu' property on e1000g1 + dladm_linkprop: name=e1000g1 property=mtu state=reset +''' + +RETURN = ''' +property: + description: property name + returned: always + type: str + sample: mtu +state: + description: state of the target + returned: always + type: str + sample: present +temporary: + description: specifies if operation will persist across reboots + returned: always + type: bool + sample: True +link: + description: link name + returned: always + type: str + sample: e100g0 +value: + description: property value + returned: always + type: str + sample: 9000 +''' + +from ansible.module_utils.basic import AnsibleModule + + +class LinkProp(object): + + def __init__(self, module): + self.module = module + + self.link = module.params['link'] + self.property = module.params['property'] + self.value = module.params['value'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + self.dladm_bin = self.module.get_bin_path('dladm', True) + + def property_exists(self): + cmd = [self.dladm_bin] + + cmd.append('show-linkprop') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.link) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + self.module.fail_json(msg='Unknown property "%s" on link %s' % + (self.property, self.link), + property=self.property, + link=self.link) + + def property_is_modified(self): + cmd = [self.dladm_bin] + + cmd.append('show-linkprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('value,default') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.link) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + (value, default) = out.split(':') + + if rc == 0 and value == default: + return True + else: + return False + + def property_is_readonly(self): + cmd = [self.dladm_bin] + + cmd.append('show-linkprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('perm') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.link) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + + if rc == 0 and out == 'r-': + return True + else: + return False + + def property_is_set(self): + cmd = [self.dladm_bin] + + cmd.append('show-linkprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('value') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.link) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + + if rc == 0 and self.value == out: + return True + else: + return False + + def set_property(self): + cmd = [self.dladm_bin] + + cmd.append('set-linkprop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property + '=' + self.value) + cmd.append(self.link) + + return self.module.run_command(cmd) + + def reset_property(self): + cmd = [self.dladm_bin] + + cmd.append('reset-linkprop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.link) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + link=dict(required=True, default=None, type='str', aliases=['nic', 'interface']), + property=dict(required=True, type='str', aliases=['name']), + value=dict(required=False, type='str'), + temporary=dict(default=False, type='bool'), + state=dict( + default='present', choices=['absent', 'present', 'reset']), + ), + required_if=[ + ['state', 'present', ['value']], + ], + + supports_check_mode=True + ) + + linkprop = LinkProp(module) + + rc = None + out = '' + err = '' + result = {} + result['property'] = linkprop.property + result['link'] = linkprop.link + result['state'] = linkprop.state + if linkprop.value: + result['value'] = linkprop.value + + if linkprop.state == 'absent' or linkprop.state == 'reset': + if linkprop.property_exists(): + if not linkprop.property_is_modified(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = linkprop.reset_property() + if rc != 0: + module.fail_json(property=linkprop.property, + link=linkprop.link, + msg=err, + rc=rc) + + elif linkprop.state == 'present': + if linkprop.property_exists(): + if not linkprop.property_is_readonly(): + if not linkprop.property_is_set(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = linkprop.set_property() + if rc != 0: + module.fail_json(property=linkprop.property, + link=linkprop.link, + msg=err, + rc=rc) + else: + module.fail_json(msg='Property "%s" is read-only!' % (linkprop.property), + property=linkprop.property, + link=linkprop.link) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/dladm_vlan.py b/plugins/modules/network/illumos/dladm_vlan.py new file mode 100644 index 0000000000..a651abd1fd --- /dev/null +++ b/plugins/modules/network/illumos/dladm_vlan.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dladm_vlan +short_description: Manage VLAN interfaces on Solaris/illumos systems. +description: + - Create or delete VLAN interfaces on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + name: + description: + - VLAN interface name. + required: true + link: + description: + - VLAN underlying link name. + required: true + temporary: + description: + - Specifies that the VLAN interface is temporary. Temporary VLANs + do not persist across reboots. + required: false + default: false + type: bool + vlan_id: + description: + - VLAN ID value for VLAN interface. + required: false + default: false + aliases: [ "vid" ] + state: + description: + - Create or delete Solaris/illumos VNIC. + required: false + default: "present" + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +- name: Create 'vlan42' VLAN over 'bnx0' link + dladm_vlan: name=vlan42 link=bnx0 vlan_id=42 state=present + +- name: Remove 'vlan1337' VLAN interface + dladm_vlan: name=vlan1337 state=absent +''' + +RETURN = ''' +name: + description: VLAN name + returned: always + type: str + sample: vlan42 +state: + description: state of the target + returned: always + type: str + sample: present +temporary: + description: specifies if operation will persist across reboots + returned: always + type: bool + sample: True +link: + description: VLAN's underlying link name + returned: always + type: str + sample: e100g0 +vlan_id: + description: VLAN ID + returned: always + type: str + sample: 42 +''' + +from ansible.module_utils.basic import AnsibleModule + + +class VLAN(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.link = module.params['link'] + self.vlan_id = module.params['vlan_id'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def vlan_exists(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('show-vlan') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def create_vlan(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('create-vlan') + + if self.temporary: + cmd.append('-t') + + cmd.append('-l') + cmd.append(self.link) + cmd.append('-v') + cmd.append(self.vlan_id) + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_vlan(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('delete-vlan') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def is_valid_vlan_id(self): + + return 0 <= int(self.vlan_id) <= 4095 + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str'), + link=dict(default=None, type='str'), + vlan_id=dict(default=0, aliases=['vid']), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ), + required_if=[ + ['state', 'present', ['vlan_id', 'link', 'name']], + ], + supports_check_mode=True + ) + + vlan = VLAN(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = vlan.name + result['link'] = vlan.link + result['state'] = vlan.state + result['temporary'] = vlan.temporary + + if int(vlan.vlan_id) != 0: + if not vlan.is_valid_vlan_id(): + module.fail_json(msg='Invalid VLAN id value', + name=vlan.name, + state=vlan.state, + link=vlan.link, + vlan_id=vlan.vlan_id) + result['vlan_id'] = vlan.vlan_id + + if vlan.state == 'absent': + if vlan.vlan_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = vlan.delete_vlan() + if rc != 0: + module.fail_json(name=vlan.name, msg=err, rc=rc) + elif vlan.state == 'present': + if not vlan.vlan_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = vlan.create_vlan() + + if rc is not None and rc != 0: + module.fail_json(name=vlan.name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/dladm_vnic.py b/plugins/modules/network/illumos/dladm_vnic.py new file mode 100644 index 0000000000..cd7b86a6aa --- /dev/null +++ b/plugins/modules/network/illumos/dladm_vnic.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: dladm_vnic +short_description: Manage VNICs on Solaris/illumos systems. +description: + - Create or delete VNICs on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + name: + description: + - VNIC name. + required: true + link: + description: + - VNIC underlying link name. + required: true + temporary: + description: + - Specifies that the VNIC is temporary. Temporary VNICs + do not persist across reboots. + required: false + default: false + type: bool + mac: + description: + - Sets the VNIC's MAC address. Must be valid unicast MAC address. + required: false + default: false + aliases: [ "macaddr" ] + vlan: + description: + - Enable VLAN tagging for this VNIC. The VLAN tag will have id + I(vlan). + required: false + default: false + aliases: [ "vlan_id" ] + state: + description: + - Create or delete Solaris/illumos VNIC. + required: false + default: "present" + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +# Create 'vnic0' VNIC over 'bnx0' link +- dladm_vnic: + name: vnic0 + link: bnx0 + state: present + +# Create VNIC with specified MAC and VLAN tag over 'aggr0' +- dladm_vnic: + name: vnic1 + link: aggr0 + mac: '00:00:5E:00:53:23' + vlan: 4 + +# Remove 'vnic0' VNIC +- dladm_vnic: + name: vnic0 + link: bnx0 + state: absent +''' + +RETURN = ''' +name: + description: VNIC name + returned: always + type: str + sample: "vnic0" +link: + description: VNIC underlying link name + returned: always + type: str + sample: "igb0" +state: + description: state of the target + returned: always + type: str + sample: "present" +temporary: + description: VNIC's persistence + returned: always + type: bool + sample: "True" +mac: + description: MAC address to use for VNIC + returned: if mac is specified + type: str + sample: "00:00:5E:00:53:42" +vlan: + description: VLAN to use for VNIC + returned: success + type: int + sample: 42 +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +class VNIC(object): + + UNICAST_MAC_REGEX = r'^[a-f0-9][2-9a-f0]:([a-f0-9]{2}:){4}[a-f0-9]{2}$' + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.link = module.params['link'] + self.mac = module.params['mac'] + self.vlan = module.params['vlan'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def vnic_exists(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('show-vnic') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def create_vnic(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('create-vnic') + + if self.temporary: + cmd.append('-t') + + if self.mac: + cmd.append('-m') + cmd.append(self.mac) + + if self.vlan: + cmd.append('-v') + cmd.append(self.vlan) + + cmd.append('-l') + cmd.append(self.link) + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_vnic(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('delete-vnic') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def is_valid_unicast_mac(self): + + mac_re = re.match(self.UNICAST_MAC_REGEX, self.mac) + + return mac_re is None + + def is_valid_vlan_id(self): + + return 0 <= self.vlan <= 4095 + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + link=dict(required=True), + mac=dict(default=None, aliases=['macaddr']), + vlan=dict(default=None, aliases=['vlan_id']), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ), + supports_check_mode=True + ) + + vnic = VNIC(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = vnic.name + result['link'] = vnic.link + result['state'] = vnic.state + result['temporary'] = vnic.temporary + + if vnic.mac is not None: + if vnic.is_valid_unicast_mac(): + module.fail_json(msg='Invalid unicast MAC address', + mac=vnic.mac, + name=vnic.name, + state=vnic.state, + link=vnic.link, + vlan=vnic.vlan) + result['mac'] = vnic.mac + + if vnic.vlan is not None: + if vnic.is_valid_vlan_id(): + module.fail_json(msg='Invalid VLAN tag', + mac=vnic.mac, + name=vnic.name, + state=vnic.state, + link=vnic.link, + vlan=vnic.vlan) + result['vlan'] = vnic.vlan + + if vnic.state == 'absent': + if vnic.vnic_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = vnic.delete_vnic() + if rc != 0: + module.fail_json(name=vnic.name, msg=err, rc=rc) + elif vnic.state == 'present': + if not vnic.vnic_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = vnic.create_vnic() + + if rc is not None and rc != 0: + module.fail_json(name=vnic.name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/flowadm.py b/plugins/modules/network/illumos/flowadm.py new file mode 100644 index 0000000000..b35a65fd45 --- /dev/null +++ b/plugins/modules/network/illumos/flowadm.py @@ -0,0 +1,513 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: flowadm +short_description: Manage bandwidth resource control and priority for protocols, services and zones on Solaris/illumos systems +description: + - Create/modify/remove networking bandwidth and associated resources for a type of traffic on a particular link. +author: Adam Števko (@xen0l) +options: + name: + description: > + - A flow is defined as a set of attributes based on Layer 3 and Layer 4 + headers, which can be used to identify a protocol, service, or a zone. + required: true + aliases: [ 'flow' ] + link: + description: + - Specifiies a link to configure flow on. + required: false + local_ip: + description: + - Identifies a network flow by the local IP address. + required: false + remote_ip: + description: + - Identifies a network flow by the remote IP address. + required: false + transport: + description: > + - Specifies a Layer 4 protocol to be used. It is typically used in combination with I(local_port) to + identify the service that needs special attention. + required: false + local_port: + description: + - Identifies a service specified by the local port. + required: false + dsfield: + description: > + - Identifies the 8-bit differentiated services field (as defined in + RFC 2474). The optional dsfield_mask is used to state the bits of interest in + the differentiated services field when comparing with the dsfield + value. Both values must be in hexadecimal. + required: false + maxbw: + description: > + - Sets the full duplex bandwidth for the flow. The bandwidth is + specified as an integer with one of the scale suffixes(K, M, or G + for Kbps, Mbps, and Gbps). If no units are specified, the input + value will be read as Mbps. + required: false + priority: + description: + - Sets the relative priority for the flow. + required: false + default: 'medium' + choices: [ 'low', 'medium', 'high' ] + temporary: + description: + - Specifies that the configured flow is temporary. Temporary + flows do not persist across reboots. + required: false + default: false + type: bool + state: + description: + - Create/delete/enable/disable an IP address on the network interface. + required: false + default: present + choices: [ 'absent', 'present', 'resetted' ] +''' + +EXAMPLES = ''' +# Limit SSH traffic to 100M via vnic0 interface +- flowadm: + link: vnic0 + flow: ssh_out + transport: tcp + local_port: 22 + maxbw: 100M + state: present + +# Reset flow properties +- flowadm: + name: dns + state: resetted + +# Configure policy for EF PHB (DSCP value of 101110 from RFC 2598) with a bandwidth of 500 Mbps and a high priority. +- flowadm: + link: bge0 + dsfield: '0x2e:0xfc' + maxbw: 500M + priority: high + flow: efphb-flow + state: present +''' + +RETURN = ''' +name: + description: flow name + returned: always + type: str + sample: "http_drop" +link: + description: flow's link + returned: if link is defined + type: str + sample: "vnic0" +state: + description: state of the target + returned: always + type: str + sample: "present" +temporary: + description: flow's persistence + returned: always + type: bool + sample: "True" +priority: + description: flow's priority + returned: if priority is defined + type: str + sample: "low" +transport: + description: flow's transport + returned: if transport is defined + type: str + sample: "tcp" +maxbw: + description: flow's maximum bandwidth + returned: if maxbw is defined + type: str + sample: "100M" +local_Ip: + description: flow's local IP address + returned: if local_ip is defined + type: str + sample: "10.0.0.42" +local_port: + description: flow's local port + returned: if local_port is defined + type: int + sample: 1337 +remote_Ip: + description: flow's remote IP address + returned: if remote_ip is defined + type: str + sample: "10.0.0.42" +dsfield: + description: flow's differentiated services value + returned: if dsfield is defined + type: str + sample: "0x2e:0xfc" +''' + + +import socket + +from ansible.module_utils.basic import AnsibleModule + + +SUPPORTED_TRANSPORTS = ['tcp', 'udp', 'sctp', 'icmp', 'icmpv6'] +SUPPORTED_PRIORITIES = ['low', 'medium', 'high'] + +SUPPORTED_ATTRIBUTES = ['local_ip', 'remote_ip', 'transport', 'local_port', 'dsfield'] +SUPPORTPED_PROPERTIES = ['maxbw', 'priority'] + + +class Flow(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.link = module.params['link'] + self.local_ip = module.params['local_ip'] + self.remote_ip = module.params['remote_ip'] + self.transport = module.params['transport'] + self.local_port = module.params['local_port'] + self.dsfield = module.params['dsfield'] + self.maxbw = module.params['maxbw'] + self.priority = module.params['priority'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + self._needs_updating = { + 'maxbw': False, + 'priority': False, + } + + @classmethod + def is_valid_port(cls, port): + return 1 <= int(port) <= 65535 + + @classmethod + def is_valid_address(cls, ip): + + if ip.count('/') == 1: + ip_address, netmask = ip.split('/') + else: + ip_address = ip + + if len(ip_address.split('.')) == 4: + try: + socket.inet_pton(socket.AF_INET, ip_address) + except socket.error: + return False + + if not 0 <= netmask <= 32: + return False + else: + try: + socket.inet_pton(socket.AF_INET6, ip_address) + except socket.error: + return False + + if not 0 <= netmask <= 128: + return False + + return True + + @classmethod + def is_hex(cls, number): + try: + int(number, 16) + except ValueError: + return False + + return True + + @classmethod + def is_valid_dsfield(cls, dsfield): + + dsmask = None + + if dsfield.count(':') == 1: + dsval = dsfield.split(':')[0] + else: + dsval, dsmask = dsfield.split(':') + + if dsmask and not 0x01 <= int(dsmask, 16) <= 0xff and not 0x01 <= int(dsval, 16) <= 0xff: + return False + elif not 0x01 <= int(dsval, 16) <= 0xff: + return False + + return True + + def flow_exists(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('show-flow') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def delete_flow(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('remove-flow') + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def create_flow(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('add-flow') + cmd.append('-l') + cmd.append(self.link) + + if self.local_ip: + cmd.append('-a') + cmd.append('local_ip=' + self.local_ip) + + if self.remote_ip: + cmd.append('-a') + cmd.append('remote_ip=' + self.remote_ip) + + if self.transport: + cmd.append('-a') + cmd.append('transport=' + self.transport) + + if self.local_port: + cmd.append('-a') + cmd.append('local_port=' + self.local_port) + + if self.dsfield: + cmd.append('-a') + cmd.append('dsfield=' + self.dsfield) + + if self.maxbw: + cmd.append('-p') + cmd.append('maxbw=' + self.maxbw) + + if self.priority: + cmd.append('-p') + cmd.append('priority=' + self.priority) + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def _query_flow_props(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('show-flowprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('property,possible') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def flow_needs_udpating(self): + (rc, out, err) = self._query_flow_props() + + NEEDS_UPDATING = False + + if rc == 0: + properties = (line.split(':') for line in out.rstrip().split('\n')) + for prop, value in properties: + if prop == 'maxbw' and self.maxbw != value: + self._needs_updating.update({prop: True}) + NEEDS_UPDATING = True + + elif prop == 'priority' and self.priority != value: + self._needs_updating.update({prop: True}) + NEEDS_UPDATING = True + + return NEEDS_UPDATING + else: + self.module.fail_json(msg='Error while checking flow properties: %s' % err, + stderr=err, + rc=rc) + + def update_flow(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('set-flowprop') + + if self.maxbw and self._needs_updating['maxbw']: + cmd.append('-p') + cmd.append('maxbw=' + self.maxbw) + + if self.priority and self._needs_updating['priority']: + cmd.append('-p') + cmd.append('priority=' + self.priority) + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['flow']), + link=dict(required=False), + local_ip=dict(required=False), + remote_ip=dict(required=False), + transport=dict(required=False, choices=SUPPORTED_TRANSPORTS), + local_port=dict(required=False), + dsfield=dict(required=False), + maxbw=dict(required=False), + priority=dict(required=False, + default='medium', + choices=SUPPORTED_PRIORITIES), + temporary=dict(default=False, type='bool'), + state=dict(required=False, + default='present', + choices=['absent', 'present', 'resetted']), + ), + mutually_exclusive=[ + ('local_ip', 'remote_ip'), + ('local_ip', 'transport'), + ('local_ip', 'local_port'), + ('local_ip', 'dsfield'), + ('remote_ip', 'transport'), + ('remote_ip', 'local_port'), + ('remote_ip', 'dsfield'), + ('transport', 'dsfield'), + ('local_port', 'dsfield'), + ], + supports_check_mode=True + ) + + flow = Flow(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = flow.name + result['state'] = flow.state + result['temporary'] = flow.temporary + + if flow.link: + result['link'] = flow.link + + if flow.maxbw: + result['maxbw'] = flow.maxbw + + if flow.priority: + result['priority'] = flow.priority + + if flow.local_ip: + if flow.is_valid_address(flow.local_ip): + result['local_ip'] = flow.local_ip + + if flow.remote_ip: + if flow.is_valid_address(flow.remote_ip): + result['remote_ip'] = flow.remote_ip + + if flow.transport: + result['transport'] = flow.transport + + if flow.local_port: + if flow.is_valid_port(flow.local_port): + result['local_port'] = flow.local_port + else: + module.fail_json(msg='Invalid port: %s' % flow.local_port, + rc=1) + + if flow.dsfield: + if flow.is_valid_dsfield(flow.dsfield): + result['dsfield'] = flow.dsfield + else: + module.fail_json(msg='Invalid dsfield: %s' % flow.dsfield, + rc=1) + + if flow.state == 'absent': + if flow.flow_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = flow.delete_flow() + if rc != 0: + module.fail_json(msg='Error while deleting flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + + elif flow.state == 'present': + if not flow.flow_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = flow.create_flow() + if rc != 0: + module.fail_json(msg='Error while creating flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + else: + if flow.flow_needs_udpating(): + (rc, out, err) = flow.update_flow() + if rc != 0: + module.fail_json(msg='Error while updating flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + + elif flow.state == 'resetted': + if flow.flow_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = flow.reset_flow() + if rc != 0: + module.fail_json(msg='Error while resetting flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/ipadm_addr.py b/plugins/modules/network/illumos/ipadm_addr.py new file mode 100644 index 0000000000..d898d6c4bd --- /dev/null +++ b/plugins/modules/network/illumos/ipadm_addr.py @@ -0,0 +1,403 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipadm_addr +short_description: Manage IP addresses on an interface on Solaris/illumos systems +description: + - Create/delete static/dynamic IP addresses on network interfaces on Solaris/illumos systems. + - Up/down static/dynamic IP addresses on network interfaces on Solaris/illumos systems. + - Manage IPv6 link-local addresses on network interfaces on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + address: + description: + - Specifiies an IP address to configure in CIDR notation. + required: false + aliases: [ "addr" ] + addrtype: + description: + - Specifiies a type of IP address to configure. + required: false + default: static + choices: [ 'static', 'dhcp', 'addrconf' ] + addrobj: + description: + - Specifies an unique IP address on the system. + required: true + temporary: + description: + - Specifies that the configured IP address is temporary. Temporary + IP addresses do not persist across reboots. + required: false + default: false + type: bool + wait: + description: + - Specifies the time in seconds we wait for obtaining address via DHCP. + required: false + default: 60 + state: + description: + - Create/delete/enable/disable an IP address on the network interface. + required: false + default: present + choices: [ 'absent', 'present', 'up', 'down', 'enabled', 'disabled', 'refreshed' ] +''' + +EXAMPLES = ''' +- name: Configure IP address 10.0.0.1 on e1000g0 + ipadm_addr: addr=10.0.0.1/32 addrobj=e1000g0/v4 state=present + +- name: Delete addrobj + ipadm_addr: addrobj=e1000g0/v4 state=absent + +- name: Configure link-local IPv6 address + ipadm_addr: addtype=addrconf addrobj=vnic0/v6 + +- name: Configure address via DHCP and wait 180 seconds for address obtaining + ipadm_addr: addrobj=vnic0/dhcp addrtype=dhcp wait=180 +''' + +RETURN = ''' +addrobj: + description: address object name + returned: always + type: str + sample: bge0/v4 +state: + description: state of the target + returned: always + type: str + sample: present +temporary: + description: specifies if operation will persist across reboots + returned: always + type: bool + sample: True +addrtype: + description: address type + returned: always + type: str + sample: static +address: + description: IP address + returned: only if addrtype is 'static' + type: str + sample: 1.3.3.7/32 +wait: + description: time we wait for DHCP + returned: only if addrtype is 'dhcp' + type: str + sample: 10 +''' + +import socket + +from ansible.module_utils.basic import AnsibleModule + + +SUPPORTED_TYPES = ['static', 'addrconf', 'dhcp'] + + +class Addr(object): + + def __init__(self, module): + self.module = module + + self.address = module.params['address'] + self.addrtype = module.params['addrtype'] + self.addrobj = module.params['addrobj'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + self.wait = module.params['wait'] + + def is_cidr_notation(self): + + return self.address.count('/') == 1 + + def is_valid_address(self): + + ip_address = self.address.split('/')[0] + + try: + if len(ip_address.split('.')) == 4: + socket.inet_pton(socket.AF_INET, ip_address) + else: + socket.inet_pton(socket.AF_INET6, ip_address) + except socket.error: + return False + + return True + + def is_dhcp(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-addr') + cmd.append('-p') + cmd.append('-o') + cmd.append('type') + cmd.append(self.addrobj) + + (rc, out, err) = self.module.run_command(cmd) + + if rc == 0: + if out.rstrip() != 'dhcp': + return False + + return True + else: + self.module.fail_json(msg='Wrong addrtype %s for addrobj "%s": %s' % (out, self.addrobj, err), + rc=rc, + stderr=err) + + def addrobj_exists(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-addr') + cmd.append(self.addrobj) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def delete_addr(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('delete-addr') + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + def create_addr(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('create-addr') + cmd.append('-T') + cmd.append(self.addrtype) + + if self.temporary: + cmd.append('-t') + + if self.addrtype == 'static': + cmd.append('-a') + cmd.append(self.address) + + if self.addrtype == 'dhcp' and self.wait: + cmd.append('-w') + cmd.append(self.wait) + + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + def up_addr(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('up-addr') + + if self.temporary: + cmd.append('-t') + + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + def down_addr(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('down-addr') + + if self.temporary: + cmd.append('-t') + + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + def enable_addr(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('enable-addr') + cmd.append('-t') + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + def disable_addr(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('disable-addr') + cmd.append('-t') + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + def refresh_addr(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('refresh-addr') + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + address=dict(aliases=['addr']), + addrtype=dict(default='static', choices=SUPPORTED_TYPES), + addrobj=dict(required=True), + temporary=dict(default=False, type='bool'), + state=dict( + default='present', choices=['absent', 'present', 'up', 'down', 'enabled', 'disabled', 'refreshed']), + wait=dict(default=60, type='int'), + ), + mutually_exclusive=[ + ('address', 'wait'), + ], + supports_check_mode=True + ) + + addr = Addr(module) + + rc = None + out = '' + err = '' + result = {} + result['addrobj'] = addr.addrobj + result['state'] = addr.state + result['temporary'] = addr.temporary + result['addrtype'] = addr.addrtype + + if addr.addrtype == 'static' and addr.address: + if addr.is_cidr_notation() and addr.is_valid_address(): + result['address'] = addr.address + else: + module.fail_json(msg='Invalid IP address: %s' % addr.address) + + if addr.addrtype == 'dhcp' and addr.wait: + result['wait'] = addr.wait + + if addr.state == 'absent': + if addr.addrobj_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addr.delete_addr() + if rc != 0: + module.fail_json(msg='Error while deleting addrobj: "%s"' % err, + addrobj=addr.addrobj, + stderr=err, + rc=rc) + + elif addr.state == 'present': + if not addr.addrobj_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addr.create_addr() + if rc != 0: + module.fail_json(msg='Error while configuring IP address: "%s"' % err, + addrobj=addr.addrobj, + addr=addr.address, + stderr=err, + rc=rc) + + elif addr.state == 'up': + if addr.addrobj_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addr.up_addr() + if rc != 0: + module.fail_json(msg='Error while bringing IP address up: "%s"' % err, + addrobj=addr.addrobj, + stderr=err, + rc=rc) + + elif addr.state == 'down': + if addr.addrobj_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addr.down_addr() + if rc != 0: + module.fail_json(msg='Error while bringing IP address down: "%s"' % err, + addrobj=addr.addrobj, + stderr=err, + rc=rc) + + elif addr.state == 'refreshed': + if addr.addrobj_exists(): + if addr.is_dhcp(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addr.refresh_addr() + if rc != 0: + module.fail_json(msg='Error while refreshing IP address: "%s"' % err, + addrobj=addr.addrobj, + stderr=err, + rc=rc) + else: + module.fail_json(msg='state "refreshed" cannot be used with "%s" addrtype' % addr.addrtype, + addrobj=addr.addrobj, + stderr=err, + rc=1) + + elif addr.state == 'enabled': + if addr.addrobj_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addr.enable_addr() + if rc != 0: + module.fail_json(msg='Error while enabling IP address: "%s"' % err, + addrobj=addr.addrobj, + stderr=err, + rc=rc) + + elif addr.state == 'disabled': + if addr.addrobj_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addr.disable_addr() + if rc != 0: + module.fail_json(msg='Error while disabling IP address: "%s"' % err, + addrobj=addr.addrobj, + stderr=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/ipadm_addrprop.py b/plugins/modules/network/illumos/ipadm_addrprop.py new file mode 100644 index 0000000000..574c6b3ee4 --- /dev/null +++ b/plugins/modules/network/illumos/ipadm_addrprop.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipadm_addrprop +short_description: Manage IP address properties on Solaris/illumos systems. +description: + - Modify IP address properties on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + addrobj: + description: + - Specifies the address object we want to manage. + required: true + aliases: [nic, interface] + property: + description: + - Specifies the name of the address property we want to manage. + required: true + aliases: [name] + value: + description: + - Specifies the value we want to set for the address property. + required: false + temporary: + description: + - Specifies that the address property value is temporary. + Temporary values do not persist across reboots. + required: false + default: false + type: bool + state: + description: + - Set or reset the property value. + required: false + default: present + choices: [ "present", "absent", "reset" ] +''' + +EXAMPLES = ''' +- name: Mark address on addrobj as deprecated + ipadm_addrprop: property=deprecated value=on addrobj=e1000g0/v6 + +- name: Set network prefix length for addrobj + ipadm_addrprop: addrobj=bge0/v4 name=prefixlen value=26 +''' + +RETURN = ''' +property: + description: property name + returned: always + type: str + sample: deprecated +addrobj: + description: address object name + returned: always + type: str + sample: bge0/v4 +state: + description: state of the target + returned: always + type: str + sample: present +temporary: + description: specifies if operation will persist across reboots + returned: always + type: bool + sample: True +value: + description: property value + returned: when value is provided + type: str + sample: 26 +''' + +from ansible.module_utils.basic import AnsibleModule + + +class AddrProp(object): + + def __init__(self, module): + self.module = module + + self.addrobj = module.params['addrobj'] + self.property = module.params['property'] + self.value = module.params['value'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def property_exists(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-addrprop') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.addrobj) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + self.module.fail_json(msg='Unknown property "%s" on addrobj %s' % + (self.property, self.addrobj), + property=self.property, + addrobj=self.addrobj) + + def property_is_modified(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-addrprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current,default') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.addrobj) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + (value, default) = out.split(':') + + if rc == 0 and value == default: + return True + else: + return False + + def property_is_set(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-addrprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.addrobj) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + + if rc == 0 and self.value == out: + return True + else: + return False + + def set_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('set-addrprop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property + '=' + self.value) + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + def reset_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('reset-addrprop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.addrobj) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + addrobj=dict(required=True, default=None, aliases=['nic', 'interface']), + property=dict(required=True, aliases=['name']), + value=dict(required=False), + temporary=dict(default=False, type='bool'), + state=dict( + default='present', choices=['absent', 'present', 'reset']), + ), + supports_check_mode=True + ) + + addrprop = AddrProp(module) + + rc = None + out = '' + err = '' + result = {} + result['property'] = addrprop.property + result['addrobj'] = addrprop.addrobj + result['state'] = addrprop.state + result['temporary'] = addrprop.temporary + if addrprop.value: + result['value'] = addrprop.value + + if addrprop.state == 'absent' or addrprop.state == 'reset': + if addrprop.property_exists(): + if not addrprop.property_is_modified(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = addrprop.reset_property() + if rc != 0: + module.fail_json(property=addrprop.property, + addrobj=addrprop.addrobj, + msg=err, + rc=rc) + + elif addrprop.state == 'present': + if addrprop.value is None: + module.fail_json(msg='Value is mandatory with state "present"') + + if addrprop.property_exists(): + if not addrprop.property_is_set(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = addrprop.set_property() + if rc != 0: + module.fail_json(property=addrprop.property, + addrobj=addrprop.addrobj, + msg=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/ipadm_if.py b/plugins/modules/network/illumos/ipadm_if.py new file mode 100644 index 0000000000..89da9a8553 --- /dev/null +++ b/plugins/modules/network/illumos/ipadm_if.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipadm_if +short_description: Manage IP interfaces on Solaris/illumos systems. +description: + - Create, delete, enable or disable IP interfaces on Solaris/illumos + systems. +author: Adam Števko (@xen0l) +options: + name: + description: + - IP interface name. + required: true + temporary: + description: + - Specifies that the IP interface is temporary. Temporary IP + interfaces do not persist across reboots. + required: false + default: false + type: bool + state: + description: + - Create or delete Solaris/illumos IP interfaces. + required: false + default: "present" + choices: [ "present", "absent", "enabled", "disabled" ] +''' + +EXAMPLES = ''' +# Create vnic0 interface +- ipadm_if: + name: vnic0 + state: enabled + +# Disable vnic0 interface +- ipadm_if: + name: vnic0 + state: disabled +''' + +RETURN = ''' +name: + description: IP interface name + returned: always + type: str + sample: "vnic0" +state: + description: state of the target + returned: always + type: str + sample: "present" +temporary: + description: persistence of a IP interface + returned: always + type: bool + sample: "True" +''' +from ansible.module_utils.basic import AnsibleModule + + +class IPInterface(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def interface_exists(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('show-if') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + if rc == 0: + return True + else: + return False + + def interface_is_disabled(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('show-if') + cmd.append('-o') + cmd.append('state') + cmd.append(self.name) + + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(name=self.name, rc=rc, msg=err) + + return 'disabled' in out + + def create_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('create-if') + + if self.temporary: + cmd.append('-t') + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('delete-if') + + if self.temporary: + cmd.append('-t') + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def enable_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('enable-if') + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def disable_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('disable-if') + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', + 'present', + 'enabled', + 'disabled']), + ), + supports_check_mode=True + ) + + interface = IPInterface(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = interface.name + result['state'] = interface.state + result['temporary'] = interface.temporary + + if interface.state == 'absent': + if interface.interface_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = interface.delete_interface() + if rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + elif interface.state == 'present': + if not interface.interface_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = interface.create_interface() + + if rc is not None and rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + + elif interface.state == 'enabled': + if interface.interface_is_disabled(): + (rc, out, err) = interface.enable_interface() + + if rc is not None and rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + + elif interface.state == 'disabled': + if not interface.interface_is_disabled(): + (rc, out, err) = interface.disable_interface() + + if rc is not None and rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/ipadm_ifprop.py b/plugins/modules/network/illumos/ipadm_ifprop.py new file mode 100644 index 0000000000..3ba86abe81 --- /dev/null +++ b/plugins/modules/network/illumos/ipadm_ifprop.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipadm_ifprop +short_description: Manage IP interface properties on Solaris/illumos systems. +description: + - Modify IP interface properties on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + interface: + description: + - Specifies the IP interface we want to manage. + required: true + aliases: [nic] + protocol: + description: + - Specifies the protocol for which we want to manage properties. + required: true + property: + description: + - Specifies the name of the property we want to manage. + required: true + aliases: [name] + value: + description: + - Specifies the value we want to set for the property. + required: false + temporary: + description: + - Specifies that the property value is temporary. Temporary + property values do not persist across reboots. + required: false + default: false + type: bool + state: + description: + - Set or reset the property value. + required: false + default: present + choices: [ "present", "absent", "reset" ] +''' + +EXAMPLES = ''' +- name: Allow forwarding of IPv4 packets on network interface e1000g0 + ipadm_ifprop: protocol=ipv4 property=forwarding value=on interface=e1000g0 + +- name: Temporarily reset IPv4 forwarding property on network interface e1000g0 + ipadm_ifprop: protocol=ipv4 interface=e1000g0 temporary=true property=forwarding state=reset + +- name: Configure IPv6 metric on network interface e1000g0 + ipadm_ifprop: protocol=ipv6 nic=e1000g0 name=metric value=100 + +- name: Set IPv6 MTU on network interface bge0 + ipadm_ifprop: interface=bge0 name=mtu value=1280 protocol=ipv6 +''' + +RETURN = ''' +protocol: + description: property's protocol + returned: always + type: str + sample: ipv4 +property: + description: property's name + returned: always + type: str + sample: mtu +interface: + description: interface name we want to set property on + returned: always + type: str + sample: e1000g0 +state: + description: state of the target + returned: always + type: str + sample: present +value: + description: property's value + returned: when value is provided + type: str + sample: 1280 +''' + +from ansible.module_utils.basic import AnsibleModule + + +SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6'] + + +class IfProp(object): + + def __init__(self, module): + self.module = module + + self.interface = module.params['interface'] + self.protocol = module.params['protocol'] + self.property = module.params['property'] + self.value = module.params['value'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def property_exists(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-ifprop') + cmd.append('-p') + cmd.append(self.property) + cmd.append('-m') + cmd.append(self.protocol) + cmd.append(self.interface) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + self.module.fail_json(msg='Unknown %s property "%s" on IP interface %s' % + (self.protocol, self.property, self.interface), + protocol=self.protocol, + property=self.property, + interface=self.interface) + + def property_is_modified(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-ifprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current,default') + cmd.append('-p') + cmd.append(self.property) + cmd.append('-m') + cmd.append(self.protocol) + cmd.append(self.interface) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + (value, default) = out.split(':') + + if rc == 0 and value == default: + return True + else: + return False + + def property_is_set(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-ifprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current') + cmd.append('-p') + cmd.append(self.property) + cmd.append('-m') + cmd.append(self.protocol) + cmd.append(self.interface) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + + if rc == 0 and self.value == out: + return True + else: + return False + + def set_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('set-ifprop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property + "=" + self.value) + cmd.append('-m') + cmd.append(self.protocol) + cmd.append(self.interface) + + return self.module.run_command(cmd) + + def reset_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('reset-ifprop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property) + cmd.append('-m') + cmd.append(self.protocol) + cmd.append(self.interface) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS), + property=dict(required=True, aliases=['name']), + value=dict(required=False), + temporary=dict(default=False, type='bool'), + interface=dict(required=True, default=None, aliases=['nic']), + state=dict( + default='present', choices=['absent', 'present', 'reset']), + ), + supports_check_mode=True + ) + + ifprop = IfProp(module) + + rc = None + out = '' + err = '' + result = {} + result['protocol'] = ifprop.protocol + result['property'] = ifprop.property + result['interface'] = ifprop.interface + result['state'] = ifprop.state + if ifprop.value: + result['value'] = ifprop.value + + if ifprop.state == 'absent' or ifprop.state == 'reset': + if ifprop.property_exists(): + if not ifprop.property_is_modified(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = ifprop.reset_property() + if rc != 0: + module.fail_json(protocol=ifprop.protocol, + property=ifprop.property, + interface=ifprop.interface, + msg=err, + rc=rc) + + elif ifprop.state == 'present': + if ifprop.value is None: + module.fail_json(msg='Value is mandatory with state "present"') + + if ifprop.property_exists(): + if not ifprop.property_is_set(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = ifprop.set_property() + if rc != 0: + module.fail_json(protocol=ifprop.protocol, + property=ifprop.property, + interface=ifprop.interface, + msg=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/illumos/ipadm_prop.py b/plugins/modules/network/illumos/ipadm_prop.py new file mode 100644 index 0000000000..83512c4c80 --- /dev/null +++ b/plugins/modules/network/illumos/ipadm_prop.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipadm_prop +short_description: Manage protocol properties on Solaris/illumos systems. +description: + - Modify protocol properties on Solaris/illumos systems. +author: Adam Števko (@xen0l) +options: + protocol: + description: + - Specifies the protocol for which we want to manage properties. + required: true + property: + description: + - Specifies the name of property we want to manage. + required: true + value: + description: + - Specifies the value we want to set for the property. + required: false + temporary: + description: + - Specifies that the property value is temporary. Temporary + property values do not persist across reboots. + required: false + default: false + type: bool + state: + description: + - Set or reset the property value. + required: false + default: present + choices: [ "present", "absent", "reset" ] +''' + +EXAMPLES = ''' +# Set TCP receive buffer size +- ipadm_prop: protocol=tcp property=recv_buf value=65536 + +# Reset UDP send buffer size to the default value +- ipadm_prop: protocol=udp property=send_buf state=reset +''' + +RETURN = ''' +protocol: + description: property's protocol + returned: always + type: str + sample: "TCP" +property: + description: name of the property + returned: always + type: str + sample: "recv_maxbuf" +state: + description: state of the target + returned: always + type: str + sample: "present" +temporary: + description: property's persistence + returned: always + type: bool + sample: "True" +value: + description: value of the property. May be int or string depending on property. + returned: always + type: int + sample: "'1024' or 'never'" +''' + +from ansible.module_utils.basic import AnsibleModule + + +SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6', 'icmp', 'tcp', 'udp', 'sctp'] + + +class Prop(object): + + def __init__(self, module): + self.module = module + + self.protocol = module.params['protocol'] + self.property = module.params['property'] + self.value = module.params['value'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def property_exists(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-prop') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + self.module.fail_json(msg='Unknown property "%s" for protocol %s' % + (self.property, self.protocol), + protocol=self.protocol, + property=self.property) + + def property_is_modified(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-prop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current,default') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + (value, default) = out.split(':') + + if rc == 0 and value == default: + return True + else: + return False + + def property_is_set(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-prop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + + if rc == 0 and self.value == out: + return True + else: + return False + + def set_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('set-prop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property + "=" + self.value) + cmd.append(self.protocol) + + return self.module.run_command(cmd) + + def reset_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('reset-prop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS), + property=dict(required=True), + value=dict(required=False), + temporary=dict(default=False, type='bool'), + state=dict( + default='present', choices=['absent', 'present', 'reset']), + ), + supports_check_mode=True + ) + + prop = Prop(module) + + rc = None + out = '' + err = '' + result = {} + result['protocol'] = prop.protocol + result['property'] = prop.property + result['state'] = prop.state + result['temporary'] = prop.temporary + if prop.value: + result['value'] = prop.value + + if prop.state == 'absent' or prop.state == 'reset': + if prop.property_exists(): + if not prop.property_is_modified(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = prop.reset_property() + if rc != 0: + module.fail_json(protocol=prop.protocol, + property=prop.property, + msg=err, + rc=rc) + + elif prop.state == 'present': + if prop.value is None: + module.fail_json(msg='Value is mandatory with state "present"') + + if prop.property_exists(): + if not prop.property_is_set(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = prop.set_property() + if rc != 0: + module.fail_json(protocol=prop.protocol, + property=prop.property, + msg=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ingate/ig_config.py b/plugins/modules/network/ingate/ig_config.py new file mode 100644 index 0000000000..16936f7db3 --- /dev/null +++ b/plugins/modules/network/ingate/ig_config.py @@ -0,0 +1,567 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Ingate Systems AB +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ig_config +short_description: Manage the configuration database on an Ingate SBC. +description: + - Manage the configuration database on an Ingate SBC. +extends_documentation_fragment: +- community.general.ingate + +options: + add: + description: + - Add a row to a table. + type: bool + delete: + description: + - Delete all rows in a table or a specific row. + type: bool + get: + description: + - Return all rows in a table or a specific row. + type: bool + modify: + description: + - Modify a row in a table. + type: bool + revert: + description: + - Reset the preliminary configuration. + type: bool + factory: + description: + - Reset the preliminary configuration to its factory defaults. + type: bool + store: + description: + - Store the preliminary configuration. + type: bool + no_response: + description: + - Expect no response when storing the preliminary configuration. + Refer to the C(store) option. + type: bool + return_rowid: + description: + - Get rowid(s) from a table where the columns match. + type: bool + download: + description: + - Download the configuration database from the unit. + type: bool + store_download: + description: + - If the downloaded configuration should be stored on disk. + Refer to the C(download) option. + type: bool + default: false + path: + description: + - Where in the filesystem to store the downloaded configuration. + Refer to the C(download) option. + filename: + description: + - The name of the file to store the downloaded configuration in. + Refer to the C(download) option. + table: + description: + - The name of the table. + rowid: + description: + - A row id. + type: int + columns: + description: + - A dict containing column names/values. +notes: + - If C(store_download) is set to True, and C(path) and C(filename) is omitted, + the file will be stored in the current directory with an automatic filename. +author: + - Ingate Systems AB (@ingatesystems) +''' + +EXAMPLES = ''' +- name: Add/remove DNS servers + hosts: 192.168.1.1 + connection: local + vars: + client_rw: + version: v1 + address: "{{ inventory_hostname }}" + scheme: http + username: alice + password: foobar + tasks: + + - name: Load factory defaults + ig_config: + client: "{{ client_rw }}" + factory: true + register: result + - debug: + var: result + + - name: Revert to last known applied configuration + ig_config: + client: "{{ client_rw }}" + revert: true + register: result + - debug: + var: result + + - name: Change the unit name + ig_config: + client: "{{ client_rw }}" + modify: true + table: misc.unitname + columns: + unitname: "Test Ansible" + register: result + - debug: + var: result + + - name: Add a DNS server + ig_config: + client: "{{ client_rw }}" + add: true + table: misc.dns_servers + columns: + server: 192.168.1.21 + register: result + - debug: + var: result + + - name: Add a DNS server + ig_config: + client: "{{ client_rw }}" + add: true + table: misc.dns_servers + columns: + server: 192.168.1.22 + register: result + - debug: + var: result + + - name: Add a DNS server + ig_config: + client: "{{ client_rw }}" + add: true + table: misc.dns_servers + columns: + server: 192.168.1.23 + register: last_dns + - debug: + var: last_dns + + - name: Modify the last added DNS server + ig_config: + client: "{{ client_rw }}" + modify: true + table: misc.dns_servers + rowid: "{{ last_dns['add'][0]['id'] }}" + columns: + server: 192.168.1.24 + register: result + - debug: + var: result + + - name: Return the last added DNS server + ig_config: + client: "{{ client_rw }}" + get: true + table: misc.dns_servers + rowid: "{{ last_dns['add'][0]['id'] }}" + register: result + - debug: + var: result + + - name: Remove last added DNS server + ig_config: + client: "{{ client_rw }}" + delete: true + table: misc.dns_servers + rowid: "{{ last_dns['add'][0]['id'] }}" + register: result + - debug: + var: result + + - name: Return the all rows from table misc.dns_servers + ig_config: + client: "{{ client_rw }}" + get: true + table: misc.dns_servers + register: result + - debug: + var: result + + - name: Remove remaining DNS servers + ig_config: + client: "{{ client_rw }}" + delete: true + table: misc.dns_servers + register: result + - debug: + var: result + + - name: Get rowid for interface eth0 + ig_config: + client: "{{ client_rw }}" + return_rowid: true + table: network.local_nets + columns: + interface: eth0 + register: result + - debug: + var: result + + - name: Store the preliminary configuration + ig_config: + client: "{{ client_rw }}" + store: true + register: result + - debug: + var: result + + - name: Do backup of the configuration database + ig_config: + client: "{{ client_rw }}" + download: true + store_download: true + register: result + - debug: + var: result +''' + +RETURN = ''' +add: + description: A list containing information about the added row + returned: when C(add) is yes and success + type: complex + contains: + href: + description: The REST API URL to the added row + returned: success + type: str + sample: http://192.168.1.1/api/v1/misc/dns_servers/2 + data: + description: Column names/values + returned: success + type: complex + sample: {'number': '2', 'server': '10.48.254.33'} + id: + description: The row id + returned: success + type: int + sample: 22 +delete: + description: A list containing information about the deleted row(s) + returned: when C(delete) is yes and success + type: complex + contains: + table: + description: The name of the table + returned: success + type: str + sample: misc.dns_servers + data: + description: Column names/values + returned: success + type: complex + sample: {'number': '2', 'server': '10.48.254.33'} + id: + description: The row id + returned: success + type: int + sample: 22 +get: + description: A list containing information about the row(s) + returned: when C(get) is yes and success + type: complex + contains: + table: + description: The name of the table + returned: success + type: str + sample: Testname + href: + description: The REST API URL to the row + returned: success + type: str + sample: http://192.168.1.1/api/v1/misc/dns_servers/1 + data: + description: Column names/values + returned: success + type: complex + sample: {'number': '2', 'server': '10.48.254.33'} + id: + description: The row id + returned: success + type: int + sample: 1 +modify: + description: A list containing information about the modified row + returned: when C(modify) is yes and success + type: complex + contains: + table: + description: The name of the table + returned: success + type: str + sample: Testname + href: + description: The REST API URL to the modified row + returned: success + type: str + sample: http://192.168.1.1/api/v1/misc/dns_servers/1 + data: + description: Column names/values + returned: success + type: complex + sample: {'number': '2', 'server': '10.48.254.33'} + id: + description: The row id + returned: success + type: int + sample: 10 +revert: + description: A command status message + returned: when C(revert) is yes and success + type: complex + contains: + msg: + description: The command status message + returned: success + type: str + sample: reverted the configuration to the last applied configuration. +factory: + description: A command status message + returned: when C(factory) is yes and success + type: complex + contains: + msg: + description: The command status message + returned: success + type: str + sample: reverted the configuration to the factory configuration. +store: + description: A command status message + returned: when C(store) is yes and success + type: complex + contains: + msg: + description: The command status message + returned: success + type: str + sample: Successfully applied and saved the configuration. +return_rowid: + description: The matched row id(s). + returned: when C(return_rowid) is yes and success + type: list + sample: [1, 3] +download: + description: Configuration database and meta data + returned: when C(download) is yes and success + type: complex + contains: + config: + description: The configuration database + returned: success + type: str + filename: + description: A suggested name for the configuration + returned: success + type: str + sample: testname_2018-10-01T214040.cfg + mimetype: + description: The mimetype + returned: success + type: str + sample: application/x-config-database +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.ingate.common import (ingate_argument_spec, + ingate_create_client) + +try: + from ingate import ingatesdk + HAS_INGATESDK = True +except ImportError: + HAS_INGATESDK = False + + +def make_request(module): + # Create client and authenticate. + api_client = ingate_create_client(**module.params) + + if module.params.get('add'): + # Add a row to a table. + table = module.params['table'] + columns = module.params['columns'] + response = api_client.add_row(table, **columns) + return True, 'add', response + elif module.params.get('delete'): + # Delete a row/table. + changed = False + table = module.params['table'] + rowid = module.params.get('rowid') + if rowid: + response = api_client.delete_row(table, rowid=rowid) + else: + response = api_client.delete_table(table) + if response: + changed = True + return changed, 'delete', response + elif module.params.get('get'): + # Get the contents of a table/row. + table = module.params['table'] + rowid = module.params.get('rowid') + if rowid: + response = api_client.dump_row(table, rowid=rowid) + else: + response = api_client.dump_table(table) + if response: + changed = True + return changed, 'get', response + elif module.params.get('modify'): + # Modify a table row. + table = module.params['table'] + columns = module.params['columns'] + rowid = module.params.get('rowid') + if rowid: + response = api_client.modify_row(table, rowid=rowid, **columns) + else: + response = api_client.modify_single_row(table, **columns) + if response: + changed = True + return changed, 'modify', response + elif module.params.get('revert'): + # Revert edits. + response = api_client.revert_edits() + if response: + response = response[0]['revert-edits'] + return True, 'revert', response + elif module.params.get('factory'): + # Load factory defaults. + response = api_client.load_factory() + if response: + response = response[0]['load-factory'] + return True, 'factory', response + elif module.params.get('store'): + # Store edit. + no_response = module.params.get('no_response') + response = api_client.store_edit(no_response=no_response) + if response: + response = response[0]['store-edit'] + return True, 'store', response + elif module.params.get('return_rowid'): + # Find matching rowid(s) in a table. + table = module.params['table'] + columns = module.params['columns'] + response = api_client.dump_table(table) + rowids = [] + for row in response: + match = False + for (name, value) in columns.items(): + if name not in row['data']: + continue + if not row['data'][name] == value: + match = False + break + else: + match = True + if match: + rowids.append(row['id']) + return False, 'return_rowid', rowids + elif module.params.get('download'): + # Download the configuration database. + store = module.params.get('store_download') + path = module.params.get('path') + filename = module.params.get('filename') + response = api_client.download_config(store=store, path=path, + filename=filename) + if response: + response = response[0]['download-config'] + return False, 'download', response + return False, '', {} + + +def main(): + argument_spec = ingate_argument_spec( + add=dict(type='bool'), + delete=dict(type='bool'), + get=dict(type='bool'), + modify=dict(type='bool'), + revert=dict(type='bool'), + factory=dict(type='bool'), + store=dict(type='bool'), + no_response=dict(type='bool', default=False), + return_rowid=dict(type='bool'), + download=dict(type='bool'), + store_download=dict(type='bool', default=False), + path=dict(), + filename=dict(), + table=dict(), + rowid=dict(type='int'), + columns=dict(type='dict'), + ) + + mutually_exclusive = [('add', 'delete', 'get', 'modify', 'revert', + 'factory', 'store', 'return_rowid', 'download')] + required_one_of = [['add', 'delete', 'get', 'modify', 'revert', 'factory', + 'store', 'return_rowid', 'download']] + required_if = [('add', True, ['table', 'columns']), + ('delete', True, ['table']), + ('get', True, ['table']), + ('modify', True, ['table', 'columns']), + ('return_rowid', True, ['table', 'columns'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + required_one_of=required_one_of, + supports_check_mode=False) + if not HAS_INGATESDK: + module.fail_json(msg='The Ingate Python SDK module is required') + + result = dict(changed=False) + try: + changed, command, response = make_request(module) + if response and command: + result[command] = response + result['changed'] = changed + except ingatesdk.SdkError as e: + module.fail_json(msg=str(e)) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ingate/ig_unit_information.py b/plugins/modules/network/ingate/ig_unit_information.py new file mode 100644 index 0000000000..b59ab5e199 --- /dev/null +++ b/plugins/modules/network/ingate/ig_unit_information.py @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Ingate Systems AB +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1' +} + +DOCUMENTATION = ''' +--- +module: ig_unit_information +short_description: Get unit information from an Ingate SBC. +description: + - Get unit information from an Ingate SBC. +extends_documentation_fragment: +- community.general.ingate + +author: + - Ingate Systems AB (@ingatesystems) +''' + +EXAMPLES = ''' +- name: Get unit information + ig_unit_information: + client: + version: v1 + scheme: http + address: 192.168.1.1 + username: alice + password: foobar +''' + +RETURN = ''' +unit-information: + description: Information about the unit + returned: success + type: complex + contains: + installid: + description: The installation identifier + returned: success + type: str + sample: any + interfaces: + description: List of interface names + returned: success + type: str + sample: eth0 eth1 eth2 eth3 eth4 eth5 + lang: + description: The unit's language + returned: success + type: str + sample: en + lic_email: + description: License email information + returned: success + type: str + sample: example@example.com + lic_mac: + description: License MAC information + returned: success + type: str + sample: any + lic_name: + description: License name information + returned: success + type: str + sample: Example Inc + macaddr: + description: The MAC address of the first interface + returned: success + type: str + sample: 52:54:00:4c:e2:07 + mode: + description: Operational mode of the unit + returned: success + type: str + sample: Siparator + modules: + description: Installed module licenses + returned: success + type: str + sample: failover vpn sip qturn ems qos rsc voipsm + patches: + description: Installed patches on the unit + returned: success + type: list + sample: [] + product: + description: The product name + returned: success + type: str + sample: Software SIParator/Firewall + serial: + description: The serial number of the unit + returned: success + type: str + sample: IG-200-839-2008-0 + systemid: + description: The system identifier of the unit + returned: success + type: str + sample: IG-200-839-2008-0 + unitname: + description: The name of the unit + returned: success + type: str + sample: Testname + version: + description: Firmware version + returned: success + type: str + sample: 6.2.0-beta2 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.network.ingate.common import (ingate_argument_spec, + ingate_create_client, + is_ingatesdk_installed) + +try: + from ingate import ingatesdk +except ImportError: + pass + + +def make_request(module): + # Create client and authenticate. + api_client = ingate_create_client(**module.params) + + # Get unit information. + response = api_client.unit_information() + return response + + +def main(): + argument_spec = ingate_argument_spec() + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=False) + + is_ingatesdk_installed(module) + + result = dict(changed=False) + try: + response = make_request(module) + result.update(response[0]) + except ingatesdk.SdkError as e: + module.fail_json(msg=to_native(e)) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ironware/ironware_command.py b/plugins/modules/network/ironware/ironware_command.py new file mode 100644 index 0000000000..9584fd17bc --- /dev/null +++ b/plugins/modules/network/ironware/ironware_command.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ironware_command +author: "Paul Baker (@paulquack)" +short_description: Run arbitrary commands on Extreme IronWare devices +description: + - Sends arbitrary commands to a Extreme Ironware node and returns the + results read from the device. This module includes a I(wait_for) + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. +extends_documentation_fragment: +- community.general.ironware + +options: + commands: + description: + - List of commands to send to the remote device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retires as expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. If the value + is set to C(all) then all conditionals in the I(wait_for) must be + satisfied. If the value is set to C(any) then only one of the + values must be satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +- ironware_command: + commands: + - show version + +- ironware_command: + commands: + - show interfaces brief wide + - show mpls vll +""" + +RETURN = """ +stdout: + description: the set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: the conditionals that failed + returned: failed + type: list + sample: ['...', '...'] +""" +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import ironware_argument_spec, check_args +from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import run_commands +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def main(): + spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + spec.update(ironware_argument_spec) + + module = AnsibleModule(argument_spec=spec, supports_check_mode=True) + check_args(module) + + result = {'changed': False} + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + commands = module.params['commands'] + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ironware/ironware_config.py b/plugins/modules/network/ironware/ironware_config.py new file mode 100644 index 0000000000..02e59fec00 --- /dev/null +++ b/plugins/modules/network/ironware/ironware_config.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ironware_config +author: "Paul Baker (@paulquack)" +short_description: Manage configuration sections on Extreme Ironware devices +description: + - Extreme Ironware configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with Ironware configuration sections in + a deterministic way. +extends_documentation_fragment: +- community.general.ironware + +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When the argument is set to + I(merge), the configuration changes are merged with the current + device running configuration. When the argument is set to I(check) + the configuration updates are determined but not actually configured + on the remote device. + default: merge + choices: ['merge', 'check'] + commit: + description: + - This argument specifies the update method to use when applying the + configuration changes to the remote node. If the value is set to + I(merge) the configuration updates are merged with the running- + config. If the value is set to I(check), no changes are made to + the remote host. + default: merge + choices: ['merge', 'check'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + save_when: + description: + - When changes are made to the device running-configuration, the + changes are not copied to non-volatile storage by default. Using + this argument will change that before. If the argument is set to + I(always), then the running-config will always be copied to the + startup-config and the I(modified) flag will always be set to + True. If the argument is set to I(modified), then the running-config + will only be copied to the startup-config if it has changed since + the last save to startup-config. If the argument is set to + I(never), the running-config will never be copied to the + startup-config + default: never + choices: ['always', 'never', 'modified'] + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- ironware_config: + lines: + - port-name test + - enable + - load-interval 30 + - rate-limit input broadcast unknown-unicast multicast 521216 64000 + parents: ['interface ethernet 1/2'] +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/ironware_config.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import ironware_argument_spec, check_args +from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import get_config, load_config, run_commands +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + configobjs = None + + candidate = get_candidate(module) + if match != 'none': + contents = module.params['config'] + if not contents: + contents = get_config(module) + config = NetworkConfig(indent=1, contents=contents) + configobjs = candidate.difference(config, path=path, match=match, + replace=replace) + + else: + configobjs = candidate.items + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + if result['changed'] or module.params['save_when'] == 'always': + result['changed'] = True + if not module.check_mode: + cmd = {'command': 'write memory'} + run_commands(module, [cmd]) + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + config=dict(), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + save_when=dict(choices=['always', 'never', 'modified'], default='never') + + ) + + argument_spec.update(ironware_argument_spec) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + + check_args(module) + + if module.params['backup']: + result['__backup__'] = get_config(module) + + run(module, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ironware/ironware_facts.py b/plugins/modules/network/ironware/ironware_facts.py new file mode 100644 index 0000000000..1aa738dbeb --- /dev/null +++ b/plugins/modules/network/ironware/ironware_facts.py @@ -0,0 +1,652 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ironware_facts +author: "Paul Baker (@paulquack)" +short_description: Collect facts from devices running Extreme Ironware +description: + - Collects a base set of device facts from a remote device that + is running Ironware. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: +- community.general.ironware + +notes: + - Tested against Ironware 5.8e +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, mpls and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: ['!config','!mpls'] +''' + +EXAMPLES = """ +# Collect all facts from the device +- ironware_facts: + gather_subset: all + +# Collect only the config and default facts +- ironware_facts: + gather_subset: + - config + +# Do not collect hardware facts +- ironware_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str + +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# mpls +ansible_net_mpls_lsps: + description: All MPLS LSPs configured on the device + returned: When LSP is configured + type: dict +ansible_net_mpls_vll: + description: All VLL instances configured on the device + returned: When MPLS VLL is configured + type: dict +ansible_net_mpls_vll_local: + description: All VLL-LOCAL instances configured on the device + returned: When MPLS VLL-LOCAL is configured + type: dict +ansible_net_mpls_vpls: + description: All VPLS instances configured on the device + returned: When MPLS VPLS is configured + type: dict + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import run_commands +from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import ironware_argument_spec, check_args +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version', + 'show chassis' + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + + data = self.responses[1] + if data: + self.facts['model'] = self.parse_model(data) + + def parse_version(self, data): + match = re.search(r'IronWare : Version (\S+)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'^\*\*\* (.+) \*\*\*$', data, re.M) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'Serial #: (\S+),', data) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + COMMANDS = [ + 'dir | include Directory', + 'show memory' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + if data: + self.facts['filesystems'] = self.parse_filesystems(data) + + data = self.responses[1] + if data: + self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024 / 1024, 0)) + self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024 / 1024, 0)) + + def parse_filesystems(self, data): + return re.findall(r'^Directory of (\S+)', data, re.M) + + def parse_memtotal(self, data): + match = re.search(r'Total SDRAM\D*(\d+)\s', data, re.M) + if match: + return match.group(1) + + def parse_memfree(self, data): + match = re.search(r'(Total Free Memory|Available Memory)\D*(\d+)\s', data, re.M) + if match: + return match.group(2) + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class MPLS(FactsBase): + + COMMANDS = [ + 'show mpls lsp detail', + 'show mpls vll-local detail', + 'show mpls vll detail', + 'show mpls vpls detail' + ] + + def populate(self): + super(MPLS, self).populate() + data = self.responses[0] + if data: + data = self.parse_mpls(data) + self.facts['mpls_lsps'] = self.populate_lsps(data) + + data = self.responses[1] + if data: + data = self.parse_mpls(data) + self.facts['mpls_vll_local'] = self.populate_vll_local(data) + + data = self.responses[2] + if data: + data = self.parse_mpls(data) + self.facts['mpls_vll'] = self.populate_vll(data) + + data = self.responses[3] + if data: + data = self.parse_mpls(data) + self.facts['mpls_vpls'] = self.populate_vpls(data) + + def parse_mpls(self, data): + parsed = dict() + for line in data.split('\n'): + if not line: + continue + elif line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'^(LSP|VLL|VPLS) ([^\s,]+)', line) + if match: + key = match.group(2) + parsed[key] = line + return parsed + + def populate_vpls(self, vpls): + facts = dict() + for key, value in iteritems(vpls): + vpls = dict() + vpls['endpoints'] = self.parse_vpls_endpoints(value) + vpls['vc-id'] = self.parse_vpls_vcid(value) + facts[key] = vpls + return facts + + def populate_vll_local(self, vll_locals): + facts = dict() + for key, value in iteritems(vll_locals): + vll = dict() + vll['endpoints'] = self.parse_vll_endpoints(value) + facts[key] = vll + return facts + + def populate_vll(self, vlls): + facts = dict() + for key, value in iteritems(vlls): + vll = dict() + vll['endpoints'] = self.parse_vll_endpoints(value) + vll['vc-id'] = self.parse_vll_vcid(value) + vll['cos'] = self.parse_vll_cos(value) + facts[key] = vll + return facts + + def parse_vll_vcid(self, data): + match = re.search(r'VC-ID (\d+),', data, re.M) + if match: + return match.group(1) + + def parse_vll_cos(self, data): + match = re.search(r'COS +: +(\d+)', data, re.M) + if match: + return match.group(1) + + def parse_vll_endpoints(self, data): + facts = list() + regex = r'End-point[0-9 ]*: +(?Ptagged|untagged) +(vlan +(?P[0-9]+) +)?(inner- vlan +(?P[0-9]+) +)?(?Pe [0-9/]+|--)' + matches = re.finditer(regex, data, re.IGNORECASE | re.DOTALL) + for match in matches: + f = match.groupdict() + f['type'] = 'local' + facts.append(f) + + regex = r'Vll-Peer +: +(?P[0-9\.]+).*Tunnel LSP +: +(?P\S+)' + matches = re.finditer(regex, data, re.IGNORECASE | re.DOTALL) + for match in matches: + f = match.groupdict() + f['type'] = 'remote' + facts.append(f) + + return facts + + def parse_vpls_vcid(self, data): + match = re.search(r'Id (\d+),', data, re.M) + if match: + return match.group(1) + + def parse_vpls_endpoints(self, data): + facts = list() + regex = r'Vlan (?P[0-9]+)\s(?: +(?:L2.*)\s| +Tagged: (?P.+)+\s| +Untagged: (?P.+)\s)*' + matches = re.finditer(regex, data, re.IGNORECASE) + for match in matches: + f = match.groupdict() + f['type'] = 'local' + facts.append(f) + + regex = r'Peer address: (?P[0-9\.]+)' + matches = re.finditer(regex, data, re.IGNORECASE) + for match in matches: + f = match.groupdict() + f['type'] = 'remote' + facts.append(f) + + return facts + + def populate_lsps(self, lsps): + facts = dict() + for key, value in iteritems(lsps): + lsp = dict() + lsp['to'] = self.parse_lsp_to(value) + lsp['from'] = self.parse_lsp_from(value) + lsp['adminstatus'] = self.parse_lsp_adminstatus(value) + lsp['operstatus'] = self.parse_lsp_operstatus(value) + lsp['pri_path'] = self.parse_lsp_pripath(value) + lsp['sec_path'] = self.parse_lsp_secpath(value) + lsp['frr'] = self.parse_lsp_frr(value) + + facts[key] = lsp + + return facts + + def parse_lsp_to(self, data): + match = re.search(r'^LSP .* to (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_lsp_from(self, data): + match = re.search(r'From: ([^\s,]+),', data, re.M) + if match: + return match.group(1) + + def parse_lsp_adminstatus(self, data): + match = re.search(r'admin: (\w+),', data, re.M) + if match: + return match.group(1) + + def parse_lsp_operstatus(self, data): + match = re.search(r'From: .* status: (\w+)', data, re.M) + if match: + return match.group(1) + + def parse_lsp_pripath(self, data): + match = re.search(r'Pri\. path: ([^\s,]+), up: (\w+), active: (\w+)', data, re.M) + if match: + path = dict() + path['name'] = match.group(1) if match.group(1) != 'NONE' else None + path['up'] = True if match.group(2) == 'yes' else False + path['active'] = True if match.group(3) == 'yes' else False + return path + + def parse_lsp_secpath(self, data): + match = re.search(r'Sec\. path: ([^\s,]+), active: (\w+).*\n.* status: (\w+)', data, re.M) + if match: + path = dict() + path['name'] = match.group(1) if match.group(1) != 'NONE' else None + path['up'] = True if match.group(3) == 'up' else False + path['active'] = True if match.group(2) == 'yes' else False + return path + + def parse_lsp_frr(self, data): + match = re.search(r'Backup LSP: (\w+)', data, re.M) + if match: + path = dict() + path['up'] = True if match.group(1) == 'UP' else False + path['name'] = None + if path['up']: + match = re.search(r'bypass_lsp: (\S)', data, re.M) + path['name'] = match.group(1) if match else None + return path + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show interfaces', + 'show ipv6 interface', + 'show lldp neighbors' + ] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.responses[0] + if data: + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces(interfaces) + + data = self.responses[1] + if data: + data = self.parse_interfaces(data) + self.populate_ipv6_interfaces(data) + + data = self.responses[2] + if data and 'LLDP is not running' not in data: + self.facts['neighbors'] = self.parse_neighbors(data) + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + + ipv4 = self.parse_ipv4(value) + intf['ipv4'] = self.parse_ipv4(value) + if ipv4: + self.add_ip_address(ipv4['address'], 'ipv4') + + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv6_interfaces(self, data): + for key, value in iteritems(data): + self.facts['interfaces'][key]['ipv6'] = list() + addresses = re.findall(r'\s([0-9a-f]+:+[0-9a-f:]+\/\d+)\s', value, re.M) + for addr in addresses: + address, masklen = addr.split('/') + ipv6 = dict(address=address, masklen=int(masklen)) + self.add_ip_address(ipv6['address'], 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + for line in neighbors.split('\n'): + if line == '': + continue + match = re.search(r'([\d\/]+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', line, re.M) + if match: + intf = match.group(1) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = match.group(5) + fact['port'] = match.group(3) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + for line in data.split('\n'): + if not line: + continue + elif line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'^(\S+Ethernet|eth )(\S+)', line) + if match: + key = match.group(2) + parsed[key] = line + return parsed + + def parse_description(self, data): + match = re.search(r'Port name is (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'address is (\S+)', data) + if match: + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Internet address is ([^\s,]+)', data) + if match: + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+)', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'BW is (\d+)', data) + if match: + return int(match.group(1)) + + def parse_duplex(self, data): + match = re.search(r'configured duplex \S+ actual (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_mediatype(self, data): + match = re.search(r'Type\s*:\s*(.+)$', data, re.M) + if match: + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, + mpls=MPLS, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=["!config", "!mpls"], type='list') + ) + + argument_spec.update(ironware_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + check_args(module) + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/itential/iap_start_workflow.py b/plugins/modules/network/itential/iap_start_workflow.py new file mode 100644 index 0000000000..9717cc3977 --- /dev/null +++ b/plugins/modules/network/itential/iap_start_workflow.py @@ -0,0 +1,183 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Itential +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +This module provides the ability to start a workflow from Itential Automation Platform +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: iap_start_workflow +author: "Itential (@cma0) " +short_description: Start a workflow in the Itential Automation Platform +description: + - This will start a specified workflow in the Itential Automation Platform with given arguments. +options: + iap_port: + description: + - Provide the port number for the Itential Automation Platform + required: true + type: str + default: null + + iap_fqdn: + description: + - Provide the fqdn for the Itential Automation Platform + required: true + type: str + default: null + + token_key: + description: + - Token key generated by iap_token module for the Itential Automation Platform + required: true + type: str + default: null + + workflow_name: + description: + - Provide the workflow name + required: true + type: str + default: null + + description: + description: + - Provide the description for the workflow + required: true + type: str + default: null + + variables: + description: + - Provide the values to the job variables + required: true + type: dict + default: null + + https: + description: + - Use HTTPS to connect + - By default using http + type: bool + default: False + + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: False +''' + +EXAMPLES = ''' +- name: Start a workflow in the Itential Automation Platform + iap_start_workflow: + iap_port: 3000 + iap_fqdn: localhost + token_key: "DFSFSFHFGFGF[DSFSFAADAFASD%3D" + workflow_name: "RouterUpgradeWorkflow" + description: "OS-Router-Upgrade" + variables: {"deviceName":"ASR9K"} + register: result + +- debug: var=result +''' + +RETURN = ''' +response: + description: The result contains the response from the call + type: dict + returned: always +msg: + description: The msg will contain the error code or status of the workflow + type: str + returned: always +''' + +# Ansible imports +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + +# Standard library imports +import json + + +def start_workflow(module): + """ + :param module: + :return: response and msg + """ + # By default this will be http. + # By default when using https, self signed certificate is used + # If https needs to pass certificate then use validate_certs as true + if module.params['https']: + transport_protocol = 'https' + else: + transport_protocol = 'http' + + application_token = str(module.params['token_key']) + url = str(transport_protocol) + "://" + str(module.params['iap_fqdn']) + ":" + str(module.params[ + 'iap_port']) + "/workflow_engine/startJobWithOptions/" \ + + str(module.params['workflow_name']) + "?token=" + str(application_token) + options = { + "variables": module.params['variables'], + "description": str(module.params['description']) + } + + payload = { + "workflow": module.params['workflow_name'], + "options": options + } + + json_body = module.jsonify(payload) + headers = dict() + headers['Content-Type'] = 'application/json' + + # Using fetch url instead of requests + response, info = fetch_url(module, url, data=json_body, headers=headers) + response_code = str(info['status']) + if info['status'] not in [200, 201]: + module.fail_json(msg="Failed to connect to Itential Automation Platform. Response code is " + response_code) + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + jsonResponse = json.loads(response.read().decode('utf-8')) + module.exit_json(changed=True, msg={"workflow_name": module.params['workflow_name'], "status": "started"}, + response=jsonResponse) + + +def main(): + """ + :return: response and msg + """ + # define the available arguments/parameters that a user can pass to + # the module + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=dict( + iap_port=dict(type='str', required=True), + iap_fqdn=dict(type='str', required=True), + token_key=dict(type='str', required=True), + workflow_name=dict(type='str', required=True), + description=dict(type='str', required=True), + variables=dict(type='dict', required=False), + https=(dict(type='bool', default=False)), + validate_certs=dict(type='bool', default=False) + ) + ) + start_workflow(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/itential/iap_token.py b/plugins/modules/network/itential/iap_token.py new file mode 100644 index 0000000000..96b5607ca4 --- /dev/null +++ b/plugins/modules/network/itential/iap_token.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Itential +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +This module provides the token for Itential Automation Platform +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iap_token +author: "Itential (@cma0) " +short_description: Get token for the Itential Automation Platform +description: + - Checks the connection to IAP and retrieves a login token. +options: + iap_port: + description: + - Provide the port number for the Itential Automation Platform + required: true + default: null + + iap_fqdn: + description: + - Provide the fqdn or ip-address for the Itential Automation Platform + required: true + default: null + + username: + description: + - Provide the username for the Itential Automation Platform + required: true + default: null + + password: + description: + - Provide the password for the Itential Automation Platform + required: true + default: null + + https: + description: + - Use HTTPS to connect + - By default using http + type: bool + default: False + + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: False +''' + +EXAMPLES = ''' +- name: Get token for the Itential Automation Platform + iap_token: + iap_port: 3000 + iap_fqdn: localhost + username: myusername + password: mypass + register: result + +- debug: var=result.token +''' + +RETURN = ''' +token: + description: The token acquired from the Itential Automation Platform + type: str + returned: always +''' +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def get_token(module): + """ + :param module: + :return: token + """ + # defaulting the value for transport_protocol to be : http + transport_protocol = 'http' + if module.params['https'] or module.params['validate_certs'] is True: + transport_protocol = 'https' + + url = transport_protocol + "://" + module.params['iap_fqdn'] + ":" + module.params['iap_port'] + "/login" + username = module.params['username'] + password = module.params['password'] + + login = { + "user": { + "username": username, + "password": password + } + } + json_body = module.jsonify(login) + headers = {} + headers['Content-Type'] = 'application/json' + + # Using fetch url instead of requests + response, info = fetch_url(module, url, data=json_body, headers=headers) + response_code = str(info['status']) + if info['status'] not in [200, 201]: + module.fail_json(msg="Failed to connect to Itential Automation Platform" + response_code) + response = response.read() + module.exit_json(changed=True, token=response) + + +def main(): + """ + :return: token + """ + # define the available arguments/parameters that a user can pass to + # the module + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=dict( + iap_port=dict(type='int', required=True), + iap_fqdn=dict(type='str', required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + https=(dict(type='bool', default=False)), + validate_certs=dict(type='bool', default=False) + ) + ) + get_token(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netact/netact_cm_command.py b/plugins/modules/network/netact/netact_cm_command.py new file mode 100644 index 0000000000..74b077667f --- /dev/null +++ b/plugins/modules/network/netact/netact_cm_command.py @@ -0,0 +1,366 @@ +#!/usr/bin/python +# Copyright: Nokia +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# pylint: disable=invalid-name +# pylint: disable=wrong-import-position +# pylint: disable=too-many-locals +# pylint: disable=too-many-branches +# pylint: disable=too-many-statements + +""" +NetAct CM ansible command module +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: netact_cm_command + +short_description: Manage network configuration data in Nokia Core and Radio networks + + +description: + netact_cm_command can be used to run various configuration management operations. + This module requires that the target hosts have Nokia NetAct network management system installed. + Module will access the Configurator command line interface in NetAct to upload network configuration to NetAct, + run configuration export, plan import and configuration provision operations + To set the scope of the operation, define Distinguished Name (DN) or Working Set (WS) or + Maintenance Region (MR) as input +options: + operation: + description: + Supported operations allow user to upload actual configuration from the network, to import and + provision prepared plans, or export reference or actual configuration for planning purposes. + Provision_Mass_Modification enables provisioning the same parameters to multiple network elements. + This operation supports modifications only to one object class at a time. With this option + NetAct Configurator creates and provisions a plan to the network with the given scope and options. + required: true + choices: + - upload + - provision + - import + - export + - Provision_Mass_Modification + aliases: + - op + opsName: + description: + - user specified operation name + required: false + DN: + description: + Sets the exact scope of the operation in form of a list of managed object + Distinguished Names (DN) in the network. + A single DN or a list of DNs can be given (comma separated list without spaces). + Alternatively, if DN or a list of DNs is not given, working set (WS) or Maintenance Region (MR) + must be provided as parameter to set the scope of operation. + required: false + + WS: + description: + Sets the scope of the operation to use one or more pre-defined working sets (WS) in NetAct. + A working set contains network elements selected by user according to defined criteria. + A single WS name, or multiple WSs can be provided (comma-separated list without spaces). + Alternatively, if a WS name or a list of WSs is not given, Distinguished Name (DN) or + Maintenance Region(MR) must be provided as parameter to set the scope of operation. + required: false + MR: + description: + Sets the scope of the operation to network elements assigned to a Maintenance Region (MR) + Value can be set as MR IDs including the Maintenance Region Collection (MRC) + information (for example MRC-FIN1/MR-Hel). + Multiple MRs can be given (comma-separated list without spaces) + The value of this parameter is searched through MR IDs under given MRC. If there is no match, + then it is searched from all MR names. + Alternatively, if MR ID or a list or MR IDs is not given, Distinguished Name (DN) or Working Set (WS) + must be provided as parameter to set the scope of operation. + required: false + planName: + description: + - Specifies a plan name. + required: false + typeOption: + description: + Specifies the type of the export operation. + required: false + choices: + - plan + - actual + - reference + - template + - siteTemplate + aliases: + - type + fileFormat: + description: + Indicates file format. + required: false + choices: + - RAML2 + - CSV + - XLSX + fileName: + description: + - Specifies a file name. Valid for Import and Export operations. + required: false + inputFile: + description: + Specifies full path to plan file location for the import operation. + This parameter (inputFile) or the fileName parameter must be filled. If both are present then + the inputFile is used. + required: false + createBackupPlan: + description: + - Specifies if backup plan generation is enabled. + required: false + type: bool + backupPlanName: + description: + - Specifies a backup plan name + required: false + verbose: + description: + NetAct Configurator will print more info + required: false + extra_opts: + description: + Extra options to be set for operations. Check Configuration Management > Configuration Management + Operating Procedures > Command Line Operations in Nokia NetAct user documentation for further + information for extra options. + required: false +notes: + - Check mode is not currently supported +author: + - Harri Tuominen (@hatuomin) +''' + +EXAMPLES = ''' +# Pass in a message +- name: Upload + netact_cm_command: + operation: "Upload" + opsname: 'Uploading_test' + dn: "PLMN-PLMN/MRBTS-746" + extra_opts: '-btsContentInUse true' + +- name: Provision + netact_cm_command: + operation: "Provision" + opsname: 'Provision_test' + dn: "PLMN-PLMN/MRBTS-746" + planName: 'mySiteTemplate' + type: 'actual' + createBackupPlan: true + backupPlanName: 'myBackupPlanName' + +- name: Export and fetching data from target + netact_cm_command: + operation: "Export" + opsname: 'Export_test' + planName: 'mySiteTemplate' + type: 'actual' + fileName: 'exportTest.xml' +- fetch: + src: /var/opt/nokia/oss/global/racops/export/exportTest.xml + dest: fetched + +- name: Import + netact_cm_command: + operation: "Import" + opsname: 'Import_test' + fileFormat: 'CSV' + type: 'plan' + fileName: 'myCSVFile' + planName: 'myPlanName' + extra_ops: 'enablePolicyPlans true' + +# fail the module +- name: Test failure of the module + netact_cm_command: + name: fail me +''' + +RETURN = ''' +original_message: + description: The original name param that was passed in + returned: Command line + type: str + sample: '/opt/oss/bin/racclimx.sh -op Upload -opsName Uploading_testi -DN PLMN-PLMN/MRBTS-746' +message: + description: The output message that the netact_cm_command module generates + returned: Command output message + type: str +changed: + description: data changed + returned: true if data is changed + type: bool +''' + +from ansible.module_utils.basic import AnsibleModule + +racclimx = '/opt/oss/bin/racclimx.sh' + + +def main(): + """ + Main module where option are handled and command is executed + :return: + """ + # define the available arguments/parameters that a user can pass to + # the module + module_args = dict( + operation=dict(type='str', required=True, + aliases=['op'], + choices=['Upload', 'Provision', 'Import', + 'Export', 'Provision_Mass_Modification']), + opsName=dict(type='str', required=False), + DN=dict(type='str', required=False), + WS=dict(type='str', required=False), + MR=dict(type='str', required=False), + + planName=dict(type='str', required=False), + typeOption=dict(type='str', required=False, aliases=['type'], + choices=['plan', 'actual', 'reference', 'template', 'siteTemplate']), + fileFormat=dict(type='str', required=False, choices=['CSV', 'RAML2', 'XLSX']), + fileName=dict(type='str', required=False), + createBackupPlan=dict(type='bool', required=False), + backupPlanName=dict(type='str', required=False), + inputFile=dict(type='str', required=False), + + verbose=dict(type='str', required=False), + extra_opts=dict(type='str', required=False) + ) + + # seed the result dict in the object + # we primarily care about changed and state + # change is if this module effectively modified the target + # state will include any data that you want your module to pass back + # for consumption, for example, in a subsequent task + result = dict( + changed=False, + original_message='', + cmd='', + message='' + ) + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + # if the user is working with this module in only check mode we do not + # want to make any changes to the environment, just return the current + # state with no modifications + if module.check_mode: + result['skipped'] = True + result['msg'] = 'check mode not (yet) supported for this module' + module.exit_json(**result) + + # manipulate or modify the state as needed (this is going to be the + # part where your module will do what it needs to do) + + operation = module.params.get('operation') + if not operation: + module.fail_json(msg='Operation not defined', **result) + + opsname = module.params.get('opsName') + dn = module.params.get('DN') + ws = module.params.get('WS') + mr = module.params.get('MR') + + planname = module.params.get('planName') + typeoption = module.params.get('typeOption') + fileformat = module.params.get('fileFormat') + filename = module.params.get('fileName') + + createBackupPlan = module.params.get('createBackupPlan') + backupPlanName = module.params.get('backupPlanName') + inputfile = module.params.get('inputFile') + + extra_opts = module.params.get('extra_opts') + verbose = module.params.get('verbose') + + # parameter checks + + command = [racclimx, '-op', operation] + + if opsname: + command.append('-opsName') + command.append(opsname) + + if dn: + command.append('-DN') + command.append(dn) + + if ws: + command.append('-WS') + command.append(ws) + + if mr: + command.append('-MR') + command.append(mr) + + if planname: + command.append('-planName') + command.append(planname) + + if typeoption: + command.append('-type') + command.append(typeoption) + + if fileformat: + command.append('-fileFormat') + command.append(fileformat) + + if filename: + command.append('-fileName') + command.append(filename) + + if createBackupPlan: + command.append('-createBackupPlan') + command.append('true') + + if backupPlanName: + command.append('-backupPlanName') + command.append(backupPlanName) + + if inputfile: + command.append('-inputFile') + command.append(inputfile) + + if extra_opts: + command = command + extra_opts.split(" ") + + if verbose: + if verbose == 'True': + command.append("-v") + + rc, out, err = module.run_command(command, check_rc=True) + if rc != 0: + result['changed'] = False + module.fail_json(msg=err) + else: + result['changed'] = True + result['original_message'] = out + result['cmd'] = command + result['message'] = out + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netscaler/netscaler_cs_action.py b/plugins/modules/network/netscaler/netscaler_cs_action.py new file mode 100644 index 0000000000..7f2e16a273 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_cs_action.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_cs_action +short_description: Manage content switching actions +description: + - Manage content switching actions + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + name: + description: + - >- + Name for the content switching action. Must begin with an ASCII alphanumeric or underscore C(_) + character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon + C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. Can be changed after the content + switching action is created. + + targetlbvserver: + description: + - "Name of the load balancing virtual server to which the content is switched." + + targetvserver: + description: + - "Name of the VPN virtual server to which the content is switched." + + targetvserverexpr: + description: + - "Information about this content switching action." + + comment: + description: + - "Comments associated with this cs action." + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +# lb_vserver_1 must have been already created with the netscaler_lb_vserver module + +- name: Configure netscaler content switching action + delegate_to: localhost + netscaler_cs_action: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + validate_certs: no + + state: present + + name: action-1 + targetlbvserver: lb_vserver_1 +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: "['message 1', 'message 2']" + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }" +''' + +import json + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csaction import csaction + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ( + ConfigProxy, + get_nitro_client, + netscaler_common_arguments, + log, loglines, + ensure_feature_is_enabled, + get_immutables_intersection +) + + +def action_exists(client, module): + if csaction.count_filtered(client, 'name:%s' % module.params['name']) > 0: + return True + else: + return False + + +def action_identical(client, module, csaction_proxy): + if len(diff_list(client, module, csaction_proxy)) == 0: + return True + else: + return False + + +def diff_list(client, module, csaction_proxy): + action_list = csaction.get_filtered(client, 'name:%s' % module.params['name']) + diff_list = csaction_proxy.diff_object(action_list[0]) + if False and 'targetvserverexpr' in diff_list: + json_value = json.loads(action_list[0].targetvserverexpr) + if json_value == module.params['targetvserverexpr']: + del diff_list['targetvserverexpr'] + return diff_list + + +def main(): + + module_specific_arguments = dict( + + name=dict(type='str'), + targetlbvserver=dict(type='str'), + targetvserverexpr=dict(type='str'), + comment=dict(type='str'), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + + argument_spec.update(module_specific_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'name', + 'targetlbvserver', + 'targetvserverexpr', + 'comment', + ] + readonly_attrs = [ + 'hits', + 'referencecount', + 'undefhits', + 'builtin', + ] + + immutable_attrs = [ + 'name', + 'targetvserverexpr', + ] + + transforms = { + } + + # Instantiate config proxy + csaction_proxy = ConfigProxy( + actual=csaction(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + + ensure_feature_is_enabled(client, 'CS') + # Apply appropriate state + if module.params['state'] == 'present': + log('Applying actions for state present') + if not action_exists(client, module): + if not module.check_mode: + csaction_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not action_identical(client, module, csaction_proxy): + + # Check if we try to change value of immutable attributes + immutables_changed = get_immutables_intersection(csaction_proxy, diff_list(client, module, csaction_proxy).keys()) + if immutables_changed != []: + module.fail_json( + msg='Cannot update immutable attributes %s' % (immutables_changed,), + diff=diff_list(client, module, csaction_proxy), + **module_result + ) + + if not module.check_mode: + csaction_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + log('Sanity checks for state present') + if not module.check_mode: + if not action_exists(client, module): + module.fail_json(msg='Content switching action does not exist', **module_result) + if not action_identical(client, module, csaction_proxy): + module.fail_json( + msg='Content switching action differs from configured', + diff=diff_list(client, module, csaction_proxy), + **module_result + ) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if action_exists(client, module): + if not module.check_mode: + csaction_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state absent') + if action_exists(client, module): + module.fail_json(msg='Content switching action still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_cs_policy.py b/plugins/modules/network/netscaler/netscaler_cs_policy.py new file mode 100644 index 0000000000..d2b1c1423c --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_cs_policy.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_cs_policy +short_description: Manage content switching policy +description: + - Manage content switching policy. + - "This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance." + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + policyname: + description: + - >- + Name for the content switching policy. Must begin with an ASCII alphanumeric or underscore C(_) + character, and must contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon + C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. Cannot be changed after a policy is + created. + - "The following requirement applies only to the NetScaler CLI:" + - >- + If the name includes one or more spaces, enclose the name in double or single quotation marks (for + example, my policy or my policy). + - "Minimum length = 1" + + url: + description: + - >- + URL string that is matched with the URL of a request. Can contain a wildcard character. Specify the + string value in the following format: C([[prefix] [*]] [.suffix]). + - "Minimum length = 1" + - "Maximum length = 208" + + rule: + description: + - >- + Expression, or name of a named expression, against which traffic is evaluated. Written in the classic + or default syntax. + - "Note:" + - >- + Maximum length of a string literal in the expression is 255 characters. A longer string can be split + into smaller strings of up to 255 characters each, and the smaller strings concatenated with the + + operator. For example, you can create a 500-character string as follows: '"" + ""' + + domain: + description: + - "The domain name. The string value can range to 63 characters." + - "Minimum length = 1" + + action: + description: + - >- + Content switching action that names the target load balancing virtual server to which the traffic is + switched. + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +- name: Create url cs policy + delegate_to: localhost + netscaler_cs_policy: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + validate_certs: no + + state: present + + policyname: policy_1 + url: /example/ +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: ['message 1', 'message 2'] + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Could not load nitro python sdk" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: { 'url': 'difference. ours: (str) example1 other: (str) /example1' } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, ensure_feature_is_enabled +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.cs.cspolicy import cspolicy + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + + +def policy_exists(client, module): + log('Checking if policy exists') + if cspolicy.count_filtered(client, 'policyname:%s' % module.params['policyname']) > 0: + return True + else: + return False + + +def policy_identical(client, module, cspolicy_proxy): + log('Checking if defined policy is identical to configured') + if cspolicy.count_filtered(client, 'policyname:%s' % module.params['policyname']) == 0: + return False + policy_list = cspolicy.get_filtered(client, 'policyname:%s' % module.params['policyname']) + diff_dict = cspolicy_proxy.diff_object(policy_list[0]) + if 'ip' in diff_dict: + del diff_dict['ip'] + if len(diff_dict) == 0: + return True + else: + return False + + +def diff_list(client, module, cspolicy_proxy): + policy_list = cspolicy.get_filtered(client, 'policyname:%s' % module.params['policyname']) + return cspolicy_proxy.diff_object(policy_list[0]) + + +def main(): + + module_specific_arguments = dict( + policyname=dict(type='str'), + url=dict(type='str'), + rule=dict(type='str'), + domain=dict(type='str'), + action=dict(type='str'), + ) + + hand_inserted_arguments = dict( + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'policyname', + 'url', + 'rule', + 'domain', + 'action', + ] + readonly_attrs = [ + 'vstype', + 'hits', + 'bindhits', + 'labelname', + 'labeltype', + 'priority', + 'activepolicy', + 'cspolicytype', + ] + + transforms = { + } + + # Instantiate config proxy + cspolicy_proxy = ConfigProxy( + actual=cspolicy(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + transforms=transforms, + ) + + try: + ensure_feature_is_enabled(client, 'CS') + + # Apply appropriate state + if module.params['state'] == 'present': + log('Sanity checks for state present') + if not policy_exists(client, module): + if not module.check_mode: + cspolicy_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not policy_identical(client, module, cspolicy_proxy): + if not module.check_mode: + cspolicy_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state present') + if not policy_exists(client, module): + module.fail_json(msg='Policy does not exist', **module_result) + if not policy_identical(client, module, cspolicy_proxy): + module.fail_json(msg='Policy differs from configured', diff=diff_list(client, module, cspolicy_proxy), **module_result) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if policy_exists(client, module): + if not module.check_mode: + cspolicy_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state absent') + if policy_exists(client, module): + module.fail_json(msg='Policy still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_cs_vserver.py b/plugins/modules/network/netscaler/netscaler_cs_vserver.py new file mode 100644 index 0000000000..7fbc495cc0 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_cs_vserver.py @@ -0,0 +1,1307 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_cs_vserver +short_description: Manage content switching vserver +description: + - Manage content switching vserver + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + name: + description: + - >- + Name for the content switching virtual server. Must begin with an ASCII alphanumeric or underscore + C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, + colon C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. + - "Cannot be changed after the CS virtual server is created." + - "Minimum length = 1" + + td: + description: + - >- + Integer value that uniquely identifies the traffic domain in which you want to configure the entity. + If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID + of 0. + - "Minimum value = 0" + - "Maximum value = 4094" + + servicetype: + choices: + - 'HTTP' + - 'SSL' + - 'TCP' + - 'FTP' + - 'RTSP' + - 'SSL_TCP' + - 'UDP' + - 'DNS' + - 'SIP_UDP' + - 'SIP_TCP' + - 'SIP_SSL' + - 'ANY' + - 'RADIUS' + - 'RDP' + - 'MYSQL' + - 'MSSQL' + - 'DIAMETER' + - 'SSL_DIAMETER' + - 'DNS_TCP' + - 'ORACLE' + - 'SMPP' + description: + - "Protocol used by the virtual server." + + ipv46: + description: + - "IP address of the content switching virtual server." + - "Minimum length = 1" + + targettype: + choices: + - 'GSLB' + description: + - "Virtual server target type." + + ippattern: + description: + - >- + IP address pattern, in dotted decimal notation, for identifying packets to be accepted by the virtual + server. The IP Mask parameter specifies which part of the destination IP address is matched against + the pattern. Mutually exclusive with the IP Address parameter. + - >- + For example, if the IP pattern assigned to the virtual server is C(198.51.100.0) and the IP mask is + C(255.255.240.0) (a forward mask), the first 20 bits in the destination IP addresses are matched with + the first 20 bits in the pattern. The virtual server accepts requests with IP addresses that range + from 198.51.96.1 to 198.51.111.254. You can also use a pattern such as C(0.0.2.2) and a mask such as + C(0.0.255.255) (a reverse mask). + - >- + If a destination IP address matches more than one IP pattern, the pattern with the longest match is + selected, and the associated virtual server processes the request. For example, if the virtual + servers, C(vs1) and C(vs2), have the same IP pattern, C(0.0.100.128), but different IP masks of C(0.0.255.255) + and C(0.0.224.255), a destination IP address of 198.51.100.128 has the longest match with the IP pattern + of C(vs1). If a destination IP address matches two or more virtual servers to the same extent, the + request is processed by the virtual server whose port number matches the port number in the request. + + ipmask: + description: + - >- + IP mask, in dotted decimal notation, for the IP Pattern parameter. Can have leading or trailing + non-zero octets (for example, C(255.255.240.0) or C(0.0.255.255)). Accordingly, the mask specifies whether + the first n bits or the last n bits of the destination IP address in a client request are to be + matched with the corresponding bits in the IP pattern. The former is called a forward mask. The + latter is called a reverse mask. + + range: + description: + - >- + Number of consecutive IP addresses, starting with the address specified by the IP Address parameter, + to include in a range of addresses assigned to this virtual server. + - "Minimum value = C(1)" + - "Maximum value = C(254)" + + port: + description: + - "Port number for content switching virtual server." + - "Minimum value = 1" + - "Range C(1) - C(65535)" + - "* in CLI is represented as 65535 in NITRO API" + + stateupdate: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Enable state updates for a specific content switching virtual server. By default, the Content + Switching virtual server is always UP, regardless of the state of the Load Balancing virtual servers + bound to it. This parameter interacts with the global setting as follows: + - "Global Level | Vserver Level | Result" + - "enabled enabled enabled" + - "enabled disabled enabled" + - "disabled enabled enabled" + - "disabled disabled disabled" + - >- + If you want to enable state updates for only some content switching virtual servers, be sure to + disable the state update parameter. + + cacheable: + description: + - >- + Use this option to specify whether a virtual server, used for load balancing or content switching, + routes requests to the cache redirection virtual server before sending it to the configured servers. + type: bool + + redirecturl: + description: + - >- + URL to which traffic is redirected if the virtual server becomes unavailable. The service type of the + virtual server should be either C(HTTP) or C(SSL). + - >- + Caution: Make sure that the domain in the URL does not match the domain specified for a content + switching policy. If it does, requests are continuously redirected to the unavailable virtual server. + - "Minimum length = 1" + + clttimeout: + description: + - "Idle time, in seconds, after which the client connection is terminated. The default values are:" + - "Minimum value = C(0)" + - "Maximum value = C(31536000)" + + precedence: + choices: + - 'RULE' + - 'URL' + description: + - >- + Type of precedence to use for both RULE-based and URL-based policies on the content switching virtual + server. With the default C(RULE) setting, incoming requests are evaluated against the rule-based + content switching policies. If none of the rules match, the URL in the request is evaluated against + the URL-based content switching policies. + + casesensitive: + description: + - >- + Consider case in URLs (for policies that use URLs instead of RULES). For example, with the C(on) + setting, the URLs /a/1.html and /A/1.HTML are treated differently and can have different targets (set + by content switching policies). With the C(off) setting, /a/1.html and /A/1.HTML are switched to the + same target. + type: bool + + somethod: + choices: + - 'CONNECTION' + - 'DYNAMICCONNECTION' + - 'BANDWIDTH' + - 'HEALTH' + - 'NONE' + description: + - >- + Type of spillover used to divert traffic to the backup virtual server when the primary virtual server + reaches the spillover threshold. Connection spillover is based on the number of connections. + Bandwidth spillover is based on the total Kbps of incoming and outgoing traffic. + + sopersistence: + choices: + - 'enabled' + - 'disabled' + description: + - "Maintain source-IP based persistence on primary and backup virtual servers." + + sopersistencetimeout: + description: + - "Time-out value, in minutes, for spillover persistence." + - "Minimum value = C(2)" + - "Maximum value = C(1440)" + + sothreshold: + description: + - >- + Depending on the spillover method, the maximum number of connections or the maximum total bandwidth + (Kbps) that a virtual server can handle before spillover occurs. + - "Minimum value = C(1)" + - "Maximum value = C(4294967287)" + + sobackupaction: + choices: + - 'DROP' + - 'ACCEPT' + - 'REDIRECT' + description: + - >- + Action to be performed if spillover is to take effect, but no backup chain to spillover is usable or + exists. + + redirectportrewrite: + choices: + - 'enabled' + - 'disabled' + description: + - "State of port rewrite while performing HTTP redirect." + + downstateflush: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Flush all active transactions associated with a virtual server whose state transitions from UP to + DOWN. Do not enable this option for applications that must complete their transactions. + + backupvserver: + description: + - >- + Name of the backup virtual server that you are configuring. Must begin with an ASCII alphanumeric or + underscore C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), + space C( ), colon C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. Can be changed after the + backup virtual server is created. You can assign a different backup virtual server or rename the + existing virtual server. + - "Minimum length = 1" + + disableprimaryondown: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Continue forwarding the traffic to backup virtual server even after the primary server comes UP from + the DOWN state. + + insertvserveripport: + choices: + - 'OFF' + - 'VIPADDR' + - 'V6TOV4MAPPING' + description: + - >- + Insert the virtual server's VIP address and port number in the request header. Available values + function as follows: + - "C(VIPADDR) - Header contains the vserver's IP address and port number without any translation." + - "C(OFF) - The virtual IP and port header insertion option is disabled." + - >- + C(V6TOV4MAPPING) - Header contains the mapped IPv4 address corresponding to the IPv6 address of the + vserver and the port number. An IPv6 address can be mapped to a user-specified IPv4 address using the + set ns ip6 command. + + vipheader: + description: + - "Name of virtual server IP and port header, for use with the VServer IP Port Insertion parameter." + - "Minimum length = 1" + + rtspnat: + description: + - "Enable network address translation (NAT) for real-time streaming protocol (RTSP) connections." + type: bool + + authenticationhost: + description: + - >- + FQDN of the authentication virtual server. The service type of the virtual server should be either + C(HTTP) or C(SSL). + - "Minimum length = 3" + - "Maximum length = 252" + + authentication: + description: + - "Authenticate users who request a connection to the content switching virtual server." + type: bool + + listenpolicy: + description: + - >- + String specifying the listen policy for the content switching virtual server. Can be either the name + of an existing expression or an in-line expression. + + authn401: + description: + - "Enable HTTP 401-response based authentication." + type: bool + + authnvsname: + description: + - >- + Name of authentication virtual server that authenticates the incoming user requests to this content + switching virtual server. . + - "Minimum length = 1" + - "Maximum length = 252" + + push: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Process traffic with the push virtual server that is bound to this content switching virtual server + (specified by the Push VServer parameter). The service type of the push virtual server should be + either C(HTTP) or C(SSL). + + pushvserver: + description: + - >- + Name of the load balancing virtual server, of type C(PUSH) or C(SSL_PUSH), to which the server pushes + updates received on the client-facing load balancing virtual server. + - "Minimum length = 1" + + pushlabel: + description: + - >- + Expression for extracting the label from the response received from server. This string can be either + an existing rule name or an inline expression. The service type of the virtual server should be + either C(HTTP) or C(SSL). + + pushmulticlients: + description: + - >- + Allow multiple Web 2.0 connections from the same client to connect to the virtual server and expect + updates. + type: bool + + tcpprofilename: + description: + - "Name of the TCP profile containing TCP configuration settings for the virtual server." + - "Minimum length = 1" + - "Maximum length = 127" + + httpprofilename: + description: + - >- + Name of the HTTP profile containing HTTP configuration settings for the virtual server. The service + type of the virtual server should be either C(HTTP) or C(SSL). + - "Minimum length = 1" + - "Maximum length = 127" + + dbprofilename: + description: + - "Name of the DB profile." + - "Minimum length = 1" + - "Maximum length = 127" + + oracleserverversion: + choices: + - '10G' + - '11G' + description: + - "Oracle server version." + + comment: + description: + - "Information about this virtual server." + + mssqlserverversion: + choices: + - '70' + - '2000' + - '2000SP1' + - '2005' + - '2008' + - '2008R2' + - '2012' + - '2014' + description: + - "The version of the MSSQL server." + + l2conn: + description: + - "Use L2 Parameters to identify a connection." + type: bool + + mysqlprotocolversion: + description: + - "The protocol version returned by the mysql vserver." + + mysqlserverversion: + description: + - "The server version string returned by the mysql vserver." + - "Minimum length = 1" + - "Maximum length = 31" + + mysqlcharacterset: + description: + - "The character set returned by the mysql vserver." + + mysqlservercapabilities: + description: + - "The server capabilities returned by the mysql vserver." + + appflowlog: + choices: + - 'enabled' + - 'disabled' + description: + - "Enable logging appflow flow information." + + netprofile: + description: + - "The name of the network profile." + - "Minimum length = 1" + - "Maximum length = 127" + + icmpvsrresponse: + choices: + - 'PASSIVE' + - 'ACTIVE' + description: + - "Can be active or passive." + + rhistate: + choices: + - 'PASSIVE' + - 'ACTIVE' + description: + - "A host route is injected according to the setting on the virtual servers" + - >- + * If set to C(PASSIVE) on all the virtual servers that share the IP address, the appliance always + injects the hostroute. + - >- + * If set to C(ACTIVE) on all the virtual servers that share the IP address, the appliance injects even + if one virtual server is UP. + - >- + * If set to C(ACTIVE) on some virtual servers and C(PASSIVE) on the others, the appliance, injects even if + one virtual server set to C(ACTIVE) is UP. + + authnprofile: + description: + - "Name of the authentication profile to be used when authentication is turned on." + + dnsprofilename: + description: + - >- + Name of the DNS profile to be associated with the VServer. DNS profile properties will applied to the + transactions processed by a VServer. This parameter is valid only for DNS and DNS-TCP VServers. + - "Minimum length = 1" + - "Maximum length = 127" + + domainname: + description: + - "Domain name for which to change the time to live (TTL) and/or backup service IP address." + - "Minimum length = 1" + + ttl: + description: + - "." + - "Minimum value = C(1)" + + backupip: + description: + - "." + - "Minimum length = 1" + + cookiedomain: + description: + - "." + - "Minimum length = 1" + + cookietimeout: + description: + - "." + - "Minimum value = C(0)" + - "Maximum value = C(1440)" + + sitedomainttl: + description: + - "." + - "Minimum value = C(1)" + + lbvserver: + description: + - The default Load Balancing virtual server. + + ssl_certkey: + description: + - The name of the ssl certificate that is bound to this service. + - The ssl certificate must already exist. + - Creating the certificate can be done with the M(netscaler_ssl_certkey) module. + - This option is only applicable only when C(servicetype) is C(SSL). + + disabled: + description: + - When set to C(yes) the cs vserver will be disabled. + - When set to C(no) the cs vserver will be enabled. + - >- + Note that due to limitations of the underlying NITRO API a C(disabled) state change alone + does not cause the module result to report a changed status. + type: bool + default: 'no' + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +# policy_1 must have been already created with the netscaler_cs_policy module +# lbvserver_1 must have been already created with the netscaler_lb_vserver module + +- name: Setup content switching vserver + delegate_to: localhost + netscaler_cs_vserver: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + + state: present + + name: cs_vserver_1 + ipv46: 192.168.1.1 + port: 80 + servicetype: HTTP + + policybindings: + - policyname: policy_1 + targetlbvserver: lbvserver_1 +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: ['message 1', 'message 2'] + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: { 'clttimeout': 'difference. ours: (float) 100.0 other: (float) 60.0' } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ( + ConfigProxy, + get_nitro_client, + netscaler_common_arguments, + log, + loglines, + ensure_feature_is_enabled, + get_immutables_intersection +) +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver import csvserver + from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_lbvserver_binding import csvserver_lbvserver_binding + from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_cspolicy_binding import csvserver_cspolicy_binding + from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding import sslvserver_sslcertkey_binding + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + + +def cs_vserver_exists(client, module): + if csvserver.count_filtered(client, 'name:%s' % module.params['name']) > 0: + return True + else: + return False + + +def cs_vserver_identical(client, module, csvserver_proxy): + csvserver_list = csvserver.get_filtered(client, 'name:%s' % module.params['name']) + diff_dict = csvserver_proxy.diff_object(csvserver_list[0]) + if len(diff_dict) == 0: + return True + else: + return False + + +def get_configured_policybindings(client, module): + log('Getting configured policy bindigs') + bindings = {} + if module.params['policybindings'] is None: + return bindings + + for binding in module.params['policybindings']: + binding['name'] = module.params['name'] + key = binding['policyname'] + binding_proxy = ConfigProxy( + actual=csvserver_cspolicy_binding(), + client=client, + readwrite_attrs=[ + 'priority', + 'bindpoint', + 'policyname', + 'labelname', + 'gotopriorityexpression', + 'targetlbvserver', + 'name', + 'invoke', + 'labeltype', + ], + readonly_attrs=[], + attribute_values_dict=binding + ) + bindings[key] = binding_proxy + return bindings + + +def get_default_lb_vserver(client, module): + try: + default_lb_vserver = csvserver_lbvserver_binding.get(client, module.params['name']) + return default_lb_vserver[0] + except nitro_exception as e: + if e.errorcode == 258: + return csvserver_lbvserver_binding() + else: + raise + + +def default_lb_vserver_identical(client, module): + d = get_default_lb_vserver(client, module) + configured = ConfigProxy( + actual=csvserver_lbvserver_binding(), + client=client, + readwrite_attrs=[ + 'name', + 'lbvserver', + ], + attribute_values_dict={ + 'name': module.params['name'], + 'lbvserver': module.params['lbvserver'], + } + ) + log('default lb vserver %s' % ((d.name, d.lbvserver),)) + if d.name is None and module.params['lbvserver'] is None: + log('Default lb vserver identical missing') + return True + elif d.name is not None and module.params['lbvserver'] is None: + log('Default lb vserver needs removing') + return False + elif configured.has_equal_attributes(d): + log('Default lb vserver identical') + return True + else: + log('Default lb vserver not identical') + return False + + +def sync_default_lb_vserver(client, module): + d = get_default_lb_vserver(client, module) + + if module.params['lbvserver'] is not None: + configured = ConfigProxy( + actual=csvserver_lbvserver_binding(), + client=client, + readwrite_attrs=[ + 'name', + 'lbvserver', + ], + attribute_values_dict={ + 'name': module.params['name'], + 'lbvserver': module.params['lbvserver'], + } + ) + + if not configured.has_equal_attributes(d): + if d.name is not None: + log('Deleting default lb vserver %s' % d.lbvserver) + csvserver_lbvserver_binding.delete(client, d) + log('Adding default lb vserver %s' % configured.lbvserver) + configured.add() + else: + if d.name is not None: + log('Deleting default lb vserver %s' % d.lbvserver) + csvserver_lbvserver_binding.delete(client, d) + + +def get_actual_policybindings(client, module): + log('Getting actual policy bindigs') + bindings = {} + try: + count = csvserver_cspolicy_binding.count(client, name=module.params['name']) + if count == 0: + return bindings + except nitro_exception as e: + if e.errorcode == 258: + return bindings + else: + raise + + for binding in csvserver_cspolicy_binding.get(client, name=module.params['name']): + key = binding.policyname + bindings[key] = binding + + return bindings + + +def cs_policybindings_identical(client, module): + log('Checking policy bindings identical') + actual_bindings = get_actual_policybindings(client, module) + configured_bindings = get_configured_policybindings(client, module) + + actual_keyset = set(actual_bindings.keys()) + configured_keyset = set(configured_bindings.keys()) + if len(actual_keyset ^ configured_keyset) > 0: + return False + + # Compare item to item + for key in actual_bindings.keys(): + configured_binding_proxy = configured_bindings[key] + actual_binding_object = actual_bindings[key] + if not configured_binding_proxy.has_equal_attributes(actual_binding_object): + return False + + # Fallthrough to success + return True + + +def sync_cs_policybindings(client, module): + log('Syncing cs policybindings') + actual_bindings = get_actual_policybindings(client, module) + configured_bindings = get_configured_policybindings(client, module) + + # Delete actual bindings not in configured + delete_keys = list(set(actual_bindings.keys()) - set(configured_bindings.keys())) + for key in delete_keys: + log('Deleting binding for policy %s' % key) + csvserver_cspolicy_binding.delete(client, actual_bindings[key]) + + # Add configured bindings not in actual + add_keys = list(set(configured_bindings.keys()) - set(actual_bindings.keys())) + for key in add_keys: + log('Adding binding for policy %s' % key) + configured_bindings[key].add() + + # Update existing if changed + modify_keys = list(set(configured_bindings.keys()) & set(actual_bindings.keys())) + for key in modify_keys: + if not configured_bindings[key].has_equal_attributes(actual_bindings[key]): + log('Updating binding for policy %s' % key) + csvserver_cspolicy_binding.delete(client, actual_bindings[key]) + configured_bindings[key].add() + + +def ssl_certkey_bindings_identical(client, module): + log('Checking if ssl cert key bindings are identical') + vservername = module.params['name'] + if sslvserver_sslcertkey_binding.count(client, vservername) == 0: + bindings = [] + else: + bindings = sslvserver_sslcertkey_binding.get(client, vservername) + + if module.params['ssl_certkey'] is None: + if len(bindings) == 0: + return True + else: + return False + else: + certificate_list = [item.certkeyname for item in bindings] + if certificate_list == [module.params['ssl_certkey']]: + return True + else: + return False + + +def ssl_certkey_bindings_sync(client, module): + log('Syncing certkey bindings') + vservername = module.params['name'] + if sslvserver_sslcertkey_binding.count(client, vservername) == 0: + bindings = [] + else: + bindings = sslvserver_sslcertkey_binding.get(client, vservername) + + # Delete existing bindings + for binding in bindings: + log('Deleting existing binding for certkey %s' % binding.certkeyname) + sslvserver_sslcertkey_binding.delete(client, binding) + + # Add binding if appropriate + if module.params['ssl_certkey'] is not None: + log('Adding binding for certkey %s' % module.params['ssl_certkey']) + binding = sslvserver_sslcertkey_binding() + binding.vservername = module.params['name'] + binding.certkeyname = module.params['ssl_certkey'] + sslvserver_sslcertkey_binding.add(client, binding) + + +def diff_list(client, module, csvserver_proxy): + csvserver_list = csvserver.get_filtered(client, 'name:%s' % module.params['name']) + return csvserver_proxy.diff_object(csvserver_list[0]) + + +def do_state_change(client, module, csvserver_proxy): + if module.params['disabled']: + log('Disabling cs vserver') + result = csvserver.disable(client, csvserver_proxy.actual) + else: + log('Enabling cs vserver') + result = csvserver.enable(client, csvserver_proxy.actual) + return result + + +def main(): + + module_specific_arguments = dict( + + name=dict(type='str'), + td=dict(type='float'), + servicetype=dict( + type='str', + choices=[ + 'HTTP', + 'SSL', + 'TCP', + 'FTP', + 'RTSP', + 'SSL_TCP', + 'UDP', + 'DNS', + 'SIP_UDP', + 'SIP_TCP', + 'SIP_SSL', + 'ANY', + 'RADIUS', + 'RDP', + 'MYSQL', + 'MSSQL', + 'DIAMETER', + 'SSL_DIAMETER', + 'DNS_TCP', + 'ORACLE', + 'SMPP' + ] + ), + + ipv46=dict(type='str'), + dnsrecordtype=dict( + type='str', + choices=[ + 'A', + 'AAAA', + 'CNAME', + 'NAPTR', + ] + ), + ippattern=dict(type='str'), + ipmask=dict(type='str'), + range=dict(type='float'), + port=dict(type='int'), + stateupdate=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + cacheable=dict(type='bool'), + redirecturl=dict(type='str'), + clttimeout=dict(type='float'), + precedence=dict( + type='str', + choices=[ + 'RULE', + 'URL', + ] + ), + casesensitive=dict(type='bool'), + somethod=dict( + type='str', + choices=[ + 'CONNECTION', + 'DYNAMICCONNECTION', + 'BANDWIDTH', + 'HEALTH', + 'NONE', + ] + ), + sopersistence=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + sopersistencetimeout=dict(type='float'), + sothreshold=dict(type='float'), + sobackupaction=dict( + type='str', + choices=[ + 'DROP', + 'ACCEPT', + 'REDIRECT', + ] + ), + redirectportrewrite=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + downstateflush=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + disableprimaryondown=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + insertvserveripport=dict( + type='str', + choices=[ + 'OFF', + 'VIPADDR', + 'V6TOV4MAPPING', + ] + ), + vipheader=dict(type='str'), + rtspnat=dict(type='bool'), + authenticationhost=dict(type='str'), + authentication=dict(type='bool'), + listenpolicy=dict(type='str'), + authn401=dict(type='bool'), + authnvsname=dict(type='str'), + push=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + pushvserver=dict(type='str'), + pushlabel=dict(type='str'), + pushmulticlients=dict(type='bool'), + tcpprofilename=dict(type='str'), + httpprofilename=dict(type='str'), + dbprofilename=dict(type='str'), + oracleserverversion=dict( + type='str', + choices=[ + '10G', + '11G', + ] + ), + comment=dict(type='str'), + mssqlserverversion=dict( + type='str', + choices=[ + '70', + '2000', + '2000SP1', + '2005', + '2008', + '2008R2', + '2012', + '2014', + ] + ), + l2conn=dict(type='bool'), + mysqlprotocolversion=dict(type='float'), + mysqlserverversion=dict(type='str'), + mysqlcharacterset=dict(type='float'), + mysqlservercapabilities=dict(type='float'), + appflowlog=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + netprofile=dict(type='str'), + icmpvsrresponse=dict( + type='str', + choices=[ + 'PASSIVE', + 'ACTIVE', + ] + ), + rhistate=dict( + type='str', + choices=[ + 'PASSIVE', + 'ACTIVE', + ] + ), + authnprofile=dict(type='str'), + dnsprofilename=dict(type='str'), + ) + + hand_inserted_arguments = dict( + policybindings=dict(type='list'), + ssl_certkey=dict(type='str'), + disabled=dict( + type='bool', + default=False + ), + lbvserver=dict(type='str'), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'name', + 'td', + 'servicetype', + 'ipv46', + 'dnsrecordtype', + 'ippattern', + 'ipmask', + 'range', + 'port', + 'stateupdate', + 'cacheable', + 'redirecturl', + 'clttimeout', + 'precedence', + 'casesensitive', + 'somethod', + 'sopersistence', + 'sopersistencetimeout', + 'sothreshold', + 'sobackupaction', + 'redirectportrewrite', + 'downstateflush', + 'disableprimaryondown', + 'insertvserveripport', + 'vipheader', + 'rtspnat', + 'authenticationhost', + 'authentication', + 'listenpolicy', + 'authn401', + 'authnvsname', + 'push', + 'pushvserver', + 'pushlabel', + 'pushmulticlients', + 'tcpprofilename', + 'httpprofilename', + 'dbprofilename', + 'oracleserverversion', + 'comment', + 'mssqlserverversion', + 'l2conn', + 'mysqlprotocolversion', + 'mysqlserverversion', + 'mysqlcharacterset', + 'mysqlservercapabilities', + 'appflowlog', + 'netprofile', + 'icmpvsrresponse', + 'rhistate', + 'authnprofile', + 'dnsprofilename', + ] + + readonly_attrs = [ + 'ip', + 'value', + 'ngname', + 'type', + 'curstate', + 'sc', + 'status', + 'cachetype', + 'redirect', + 'homepage', + 'dnsvservername', + 'domain', + 'policyname', + 'servicename', + 'weight', + 'cachevserver', + 'targetvserver', + 'priority', + 'url', + 'gotopriorityexpression', + 'bindpoint', + 'invoke', + 'labeltype', + 'labelname', + 'gt2gb', + 'statechangetimesec', + 'statechangetimemsec', + 'tickssincelaststatechange', + 'ruletype', + 'lbvserver', + 'targetlbvserver', + ] + + immutable_attrs = [ + 'name', + 'td', + 'servicetype', + 'ipv46', + 'targettype', + 'range', + 'port', + 'state', + 'vipheader', + 'newname', + ] + + transforms = { + 'cacheable': ['bool_yes_no'], + 'rtspnat': ['bool_on_off'], + 'authn401': ['bool_on_off'], + 'casesensitive': ['bool_on_off'], + 'authentication': ['bool_on_off'], + 'l2conn': ['bool_on_off'], + 'pushmulticlients': ['bool_yes_no'], + 'stateupdate': [lambda v: v.upper()], + 'sopersistence': [lambda v: v.upper()], + 'redirectportrewrite': [lambda v: v.upper()], + 'downstateflush': [lambda v: v.upper()], + 'disableprimaryondown': [lambda v: v.upper()], + 'push': [lambda v: v.upper()], + 'appflowlog': [lambda v: v.upper()], + } + + # Instantiate config proxy + csvserver_proxy = ConfigProxy( + actual=csvserver(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + ensure_feature_is_enabled(client, 'CS') + + # Apply appropriate state + if module.params['state'] == 'present': + log('Applying actions for state present') + if not cs_vserver_exists(client, module): + if not module.check_mode: + csvserver_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not cs_vserver_identical(client, module, csvserver_proxy): + + # Check if we try to change value of immutable attributes + immutables_changed = get_immutables_intersection(csvserver_proxy, diff_list(client, module, csvserver_proxy).keys()) + if immutables_changed != []: + module.fail_json( + msg='Cannot update immutable attributes %s' % (immutables_changed,), + diff=diff_list(client, module, csvserver_proxy), + **module_result + ) + + if not module.check_mode: + csvserver_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Check policybindings + if not cs_policybindings_identical(client, module): + if not module.check_mode: + sync_cs_policybindings(client, module) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + + if module.params['servicetype'] != 'SSL' and module.params['ssl_certkey'] is not None: + module.fail_json(msg='ssl_certkey is applicable only to SSL vservers', **module_result) + + # Check ssl certkey bindings + if module.params['servicetype'] == 'SSL': + if not ssl_certkey_bindings_identical(client, module): + if not module.check_mode: + ssl_certkey_bindings_sync(client, module) + + module_result['changed'] = True + + # Check default lb vserver + if not default_lb_vserver_identical(client, module): + if not module.check_mode: + sync_default_lb_vserver(client, module) + module_result['changed'] = True + + if not module.check_mode: + res = do_state_change(client, module, csvserver_proxy) + if res.errorcode != 0: + msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) + module.fail_json(msg=msg, **module_result) + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state present') + if not cs_vserver_exists(client, module): + module.fail_json(msg='CS vserver does not exist', **module_result) + if not cs_vserver_identical(client, module, csvserver_proxy): + module.fail_json(msg='CS vserver differs from configured', diff=diff_list(client, module, csvserver_proxy), **module_result) + if not cs_policybindings_identical(client, module): + module.fail_json(msg='Policy bindings differ') + + if module.params['servicetype'] == 'SSL': + if not ssl_certkey_bindings_identical(client, module): + module.fail_json(msg='sll certkey bindings not identical', **module_result) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if cs_vserver_exists(client, module): + if not module.check_mode: + csvserver_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state absent') + if cs_vserver_exists(client, module): + module.fail_json(msg='CS vserver still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_gslb_service.py b/plugins/modules/network/netscaler/netscaler_gslb_service.py new file mode 100644 index 0000000000..2d2e0f8296 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_gslb_service.py @@ -0,0 +1,696 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_gslb_service +short_description: Manage gslb service entities in Netscaler. +description: + - Manage gslb service entities in Netscaler. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + servicename: + description: + - >- + Name for the GSLB service. Must begin with an ASCII alphanumeric or underscore C(_) character, and + must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, colon C(:), at C(@), + equals C(=), and hyphen C(-) characters. Can be changed after the GSLB service is created. + - >- + - "Minimum length = 1" + + cnameentry: + description: + - "Canonical name of the GSLB service. Used in CNAME-based GSLB." + - "Minimum length = 1" + + + servername: + description: + - "Name of the server hosting the GSLB service." + - "Minimum length = 1" + + servicetype: + choices: + - 'HTTP' + - 'FTP' + - 'TCP' + - 'UDP' + - 'SSL' + - 'SSL_BRIDGE' + - 'SSL_TCP' + - 'NNTP' + - 'ANY' + - 'SIP_UDP' + - 'SIP_TCP' + - 'SIP_SSL' + - 'RADIUS' + - 'RDP' + - 'RTSP' + - 'MYSQL' + - 'MSSQL' + - 'ORACLE' + description: + - "Type of service to create." + + port: + description: + - "Port on which the load balancing entity represented by this GSLB service listens." + - "Minimum value = 1" + - "Range 1 - 65535" + - "* in CLI is represented as 65535 in NITRO API" + + publicip: + description: + - >- + The public IP address that a NAT device translates to the GSLB service's private IP address. + Optional. + + publicport: + description: + - >- + The public port associated with the GSLB service's public IP address. The port is mapped to the + service's private port number. Applicable to the local GSLB service. Optional. + + maxclient: + description: + - >- + The maximum number of open connections that the service can support at any given time. A GSLB service + whose connection count reaches the maximum is not considered when a GSLB decision is made, until the + connection count drops below the maximum. + - "Minimum value = C(0)" + - "Maximum value = C(4294967294)" + + healthmonitor: + description: + - "Monitor the health of the GSLB service." + type: bool + + sitename: + description: + - "Name of the GSLB site to which the service belongs." + - "Minimum length = 1" + + cip: + choices: + - 'enabled' + - 'disabled' + description: + - >- + In the request that is forwarded to the GSLB service, insert a header that stores the client's IP + address. Client IP header insertion is used in connection-proxy based site persistence. + + cipheader: + description: + - >- + Name for the HTTP header that stores the client's IP address. Used with the Client IP option. If + client IP header insertion is enabled on the service and a name is not specified for the header, the + NetScaler appliance uses the name specified by the cipHeader parameter in the set ns param command + or, in the GUI, the Client IP Header parameter in the Configure HTTP Parameters dialog box. + - "Minimum length = 1" + + sitepersistence: + choices: + - 'ConnectionProxy' + - 'HTTPRedirect' + - 'NONE' + description: + - "Use cookie-based site persistence. Applicable only to C(HTTP) and C(SSL) GSLB services." + + siteprefix: + description: + - >- + The site's prefix string. When the service is bound to a GSLB virtual server, a GSLB site domain is + generated internally for each bound service-domain pair by concatenating the site prefix of the + service and the name of the domain. If the special string NONE is specified, the site-prefix string + is unset. When implementing HTTP redirect site persistence, the NetScaler appliance redirects GSLB + requests to GSLB services by using their site domains. + + clttimeout: + description: + - >- + Idle time, in seconds, after which a client connection is terminated. Applicable if connection proxy + based site persistence is used. + - "Minimum value = 0" + - "Maximum value = 31536000" + + maxbandwidth: + description: + - >- + Integer specifying the maximum bandwidth allowed for the service. A GSLB service whose bandwidth + reaches the maximum is not considered when a GSLB decision is made, until its bandwidth consumption + drops below the maximum. + + downstateflush: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Flush all active transactions associated with the GSLB service when its state transitions from UP to + DOWN. Do not enable this option for services that must complete their transactions. Applicable if + connection proxy based site persistence is used. + + maxaaausers: + description: + - >- + Maximum number of SSL VPN users that can be logged on concurrently to the VPN virtual server that is + represented by this GSLB service. A GSLB service whose user count reaches the maximum is not + considered when a GSLB decision is made, until the count drops below the maximum. + - "Minimum value = C(0)" + - "Maximum value = C(65535)" + + monthreshold: + description: + - >- + Monitoring threshold value for the GSLB service. If the sum of the weights of the monitors that are + bound to this GSLB service and are in the UP state is not equal to or greater than this threshold + value, the service is marked as DOWN. + - "Minimum value = C(0)" + - "Maximum value = C(65535)" + + hashid: + description: + - "Unique hash identifier for the GSLB service, used by hash based load balancing methods." + - "Minimum value = C(1)" + + comment: + description: + - "Any comments that you might want to associate with the GSLB service." + + appflowlog: + choices: + - 'enabled' + - 'disabled' + description: + - "Enable logging appflow flow information." + + ipaddress: + description: + - >- + IP address for the GSLB service. Should represent a load balancing, content switching, or VPN virtual + server on the NetScaler appliance, or the IP address of another load balancing device. + + monitor_bindings: + description: + - Bind monitors to this gslb service + suboptions: + + weight: + description: + - Weight to assign to the monitor-service binding. + - A larger number specifies a greater weight. + - Contributes to the monitoring threshold, which determines the state of the service. + - Minimum value = C(1) + - Maximum value = C(100) + + monitor_name: + description: + - Monitor name. + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +- name: Setup gslb service 2 + + delegate_to: localhost + register: result + check_mode: "{{ check_mode }}" + + netscaler_gslb_service: + operation: present + + servicename: gslb-service-2 + cnameentry: example.com + sitename: gslb-site-1 +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: "['message 1', 'message 2']" + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }" +''' + +import copy + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ( + ConfigProxy, + get_nitro_client, + netscaler_common_arguments, + log, + loglines, + ensure_feature_is_enabled, + monkey_patch_nitro_api, + get_immutables_intersection, +) + +try: + monkey_patch_nitro_api() + from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice import gslbservice + from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_lbmonitor_binding import gslbservice_lbmonitor_binding + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + + +def gslb_service_exists(client, module): + if gslbservice.count_filtered(client, 'servicename:%s' % module.params['servicename']) > 0: + return True + else: + return False + + +def gslb_service_identical(client, module, gslb_service_proxy): + gslb_service_list = gslbservice.get_filtered(client, 'servicename:%s' % module.params['servicename']) + diff_dict = gslb_service_proxy.diff_object(gslb_service_list[0]) + # Ignore ip attribute missing + if 'ip' in diff_dict: + del diff_dict['ip'] + if len(diff_dict) == 0: + return True + else: + return False + + +def get_actual_monitor_bindings(client, module): + log('get_actual_monitor_bindings') + # Get actual monitor bindings and index them by monitor_name + actual_monitor_bindings = {} + if gslbservice_lbmonitor_binding.count(client, servicename=module.params['servicename']) != 0: + # Get all monitor bindings associated with the named gslb vserver + fetched_bindings = gslbservice_lbmonitor_binding.get(client, servicename=module.params['servicename']) + # index by monitor name + for binding in fetched_bindings: + # complete_missing_attributes(binding, gslbservice_lbmonitor_binding_rw_attrs, fill_value=None) + actual_monitor_bindings[binding.monitor_name] = binding + return actual_monitor_bindings + + +def get_configured_monitor_bindings(client, module): + log('get_configured_monitor_bindings') + configured_monitor_proxys = {} + gslbservice_lbmonitor_binding_rw_attrs = [ + 'weight', + 'servicename', + 'monitor_name', + ] + # Get configured monitor bindings and index them by monitor_name + if module.params['monitor_bindings'] is not None: + for configured_monitor_bindings in module.params['monitor_bindings']: + binding_values = copy.deepcopy(configured_monitor_bindings) + binding_values['servicename'] = module.params['servicename'] + proxy = ConfigProxy( + actual=gslbservice_lbmonitor_binding(), + client=client, + attribute_values_dict=binding_values, + readwrite_attrs=gslbservice_lbmonitor_binding_rw_attrs, + readonly_attrs=[], + ) + configured_monitor_proxys[configured_monitor_bindings['monitor_name']] = proxy + return configured_monitor_proxys + + +def monitor_bindings_identical(client, module): + log('monitor_bindings_identical') + actual_bindings = get_actual_monitor_bindings(client, module) + configured_proxys = get_configured_monitor_bindings(client, module) + + actual_keyset = set(actual_bindings.keys()) + configured_keyset = set(configured_proxys.keys()) + + symmetric_difference = actual_keyset ^ configured_keyset + if len(symmetric_difference) != 0: + log('Symmetric difference %s' % symmetric_difference) + return False + + # Item for item equality test + for key, proxy in configured_proxys.items(): + if not proxy.has_equal_attributes(actual_bindings[key]): + log('monitor binding difference %s' % proxy.diff_object(actual_bindings[key])) + return False + + # Fallthrough to True result + return True + + +def sync_monitor_bindings(client, module): + log('sync_monitor_bindings') + + actual_monitor_bindings = get_actual_monitor_bindings(client, module) + configured_monitor_proxys = get_configured_monitor_bindings(client, module) + + # Delete actual bindings not in configured bindings + for monitor_name, actual_binding in actual_monitor_bindings.items(): + if monitor_name not in configured_monitor_proxys.keys(): + log('Deleting absent binding for monitor %s' % monitor_name) + log('dir is %s' % dir(actual_binding)) + gslbservice_lbmonitor_binding.delete(client, actual_binding) + + # Delete and re-add actual bindings that differ from configured + for proxy_key, binding_proxy in configured_monitor_proxys.items(): + if proxy_key in actual_monitor_bindings: + actual_binding = actual_monitor_bindings[proxy_key] + if not binding_proxy.has_equal_attributes(actual_binding): + log('Deleting differing binding for monitor %s' % actual_binding.monitor_name) + log('dir %s' % dir(actual_binding)) + log('attribute monitor_name %s' % getattr(actual_binding, 'monitor_name')) + log('attribute monitorname %s' % getattr(actual_binding, 'monitorname', None)) + gslbservice_lbmonitor_binding.delete(client, actual_binding) + log('Adding anew binding for monitor %s' % binding_proxy.monitor_name) + binding_proxy.add() + + # Add configured monitors that are missing from actual + for proxy_key, binding_proxy in configured_monitor_proxys.items(): + if proxy_key not in actual_monitor_bindings.keys(): + log('Adding monitor binding for monitor %s' % binding_proxy.monitor_name) + binding_proxy.add() + + +def diff_list(client, module, gslb_service_proxy): + gslb_service_list = gslbservice.get_filtered(client, 'servicename:%s' % module.params['servicename']) + diff_list = gslb_service_proxy.diff_object(gslb_service_list[0]) + if 'ip' in diff_list: + del diff_list['ip'] + return diff_list + + +def all_identical(client, module, gslb_service_proxy): + return gslb_service_identical(client, module, gslb_service_proxy) and monitor_bindings_identical(client, module) + + +def main(): + + module_specific_arguments = dict( + servicename=dict(type='str'), + cnameentry=dict(type='str'), + servername=dict(type='str'), + servicetype=dict( + type='str', + choices=[ + 'HTTP', + 'FTP', + 'TCP', + 'UDP', + 'SSL', + 'SSL_BRIDGE', + 'SSL_TCP', + 'NNTP', + 'ANY', + 'SIP_UDP', + 'SIP_TCP', + 'SIP_SSL', + 'RADIUS', + 'RDP', + 'RTSP', + 'MYSQL', + 'MSSQL', + 'ORACLE', + ] + ), + port=dict(type='int'), + publicip=dict(type='str'), + publicport=dict(type='int'), + maxclient=dict(type='float'), + healthmonitor=dict(type='bool'), + sitename=dict(type='str'), + cip=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + cipheader=dict(type='str'), + sitepersistence=dict( + type='str', + choices=[ + 'ConnectionProxy', + 'HTTPRedirect', + 'NONE', + ] + ), + siteprefix=dict(type='str'), + clttimeout=dict(type='float'), + maxbandwidth=dict(type='float'), + downstateflush=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + maxaaausers=dict(type='float'), + monthreshold=dict(type='float'), + hashid=dict(type='float'), + comment=dict(type='str'), + appflowlog=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + ipaddress=dict(type='str'), + ) + + hand_inserted_arguments = dict( + monitor_bindings=dict(type='list'), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'servicename', + 'cnameentry', + 'ip', + 'servername', + 'servicetype', + 'port', + 'publicip', + 'publicport', + 'maxclient', + 'healthmonitor', + 'sitename', + 'cip', + 'cipheader', + 'sitepersistence', + 'siteprefix', + 'clttimeout', + 'maxbandwidth', + 'downstateflush', + 'maxaaausers', + 'monthreshold', + 'hashid', + 'comment', + 'appflowlog', + 'ipaddress', + ] + + readonly_attrs = [ + 'gslb', + 'svrstate', + 'svreffgslbstate', + 'gslbthreshold', + 'gslbsvcstats', + 'monstate', + 'preferredlocation', + 'monitor_state', + 'statechangetimesec', + 'tickssincelaststatechange', + 'threshold', + 'clmonowner', + 'clmonview', + '__count', + ] + + immutable_attrs = [ + 'servicename', + 'cnameentry', + 'ip', + 'servername', + 'servicetype', + 'port', + 'sitename', + 'state', + 'cipheader', + 'cookietimeout', + 'clttimeout', + 'svrtimeout', + 'viewip', + 'monitor_name_svc', + 'newname', + ] + + transforms = { + 'healthmonitor': ['bool_yes_no'], + 'cip': [lambda v: v.upper()], + 'downstateflush': [lambda v: v.upper()], + 'appflowlog': [lambda v: v.upper()], + } + + # params = copy.deepcopy(module.params) + module.params['ip'] = module.params['ipaddress'] + + # Instantiate config proxy + gslb_service_proxy = ConfigProxy( + actual=gslbservice(), + client=client, + attribute_values_dict=module.params, + transforms=transforms, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + ) + + try: + ensure_feature_is_enabled(client, 'GSLB') + # Apply appropriate state + if module.params['state'] == 'present': + if not gslb_service_exists(client, module): + if not module.check_mode: + gslb_service_proxy.add() + sync_monitor_bindings(client, module) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not all_identical(client, module, gslb_service_proxy): + + # Check if we try to change value of immutable attributes + immutables_changed = get_immutables_intersection(gslb_service_proxy, diff_list(client, module, gslb_service_proxy).keys()) + if immutables_changed != []: + module.fail_json( + msg='Cannot update immutable attributes %s' % (immutables_changed,), + diff=diff_list(client, module, gslb_service_proxy), + **module_result + ) + + # Update main configuration object + if not gslb_service_identical(client, module, gslb_service_proxy): + if not module.check_mode: + gslb_service_proxy.update() + + # Update monitor bindigns + if not monitor_bindings_identical(client, module): + if not module.check_mode: + sync_monitor_bindings(client, module) + + # Fallthrough to save and change status update + module_result['changed'] = True + if module.params['save_config']: + client.save_config() + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + if not gslb_service_exists(client, module): + module.fail_json(msg='GSLB service does not exist', **module_result) + if not gslb_service_identical(client, module, gslb_service_proxy): + module.fail_json( + msg='GSLB service differs from configured', + diff=diff_list(client, module, gslb_service_proxy), + **module_result + ) + if not monitor_bindings_identical(client, module): + module.fail_json( + msg='Monitor bindings differ from configured', + diff=diff_list(client, module, gslb_service_proxy), + **module_result + ) + + elif module.params['state'] == 'absent': + if gslb_service_exists(client, module): + if not module.check_mode: + gslb_service_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + if gslb_service_exists(client, module): + module.fail_json(msg='GSLB service still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_gslb_site.py b/plugins/modules/network/netscaler/netscaler_gslb_site.py new file mode 100644 index 0000000000..a84ac1c781 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_gslb_site.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_gslb_site +short_description: Manage gslb site entities in Netscaler. +description: + - Manage gslb site entities in Netscaler. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + sitename: + description: + - >- + Name for the GSLB site. Must begin with an ASCII alphanumeric or underscore C(_) character, and must + contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals + C(=), and hyphen C(-) characters. Cannot be changed after the virtual server is created. + - "Minimum length = 1" + + sitetype: + choices: + - 'REMOTE' + - 'LOCAL' + description: + - >- + Type of site to create. If the type is not specified, the appliance automatically detects and sets + the type on the basis of the IP address being assigned to the site. If the specified site IP address + is owned by the appliance (for example, a MIP address or SNIP address), the site is a local site. + Otherwise, it is a remote site. + + siteipaddress: + description: + - >- + IP address for the GSLB site. The GSLB site uses this IP address to communicate with other GSLB + sites. For a local site, use any IP address that is owned by the appliance (for example, a SNIP or + MIP address, or the IP address of the ADNS service). + - "Minimum length = 1" + + publicip: + description: + - >- + Public IP address for the local site. Required only if the appliance is deployed in a private address + space and the site has a public IP address hosted on an external firewall or a NAT device. + - "Minimum length = 1" + + metricexchange: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Exchange metrics with other sites. Metrics are exchanged by using Metric Exchange Protocol (MEP). The + appliances in the GSLB setup exchange health information once every second. + - >- + If you disable metrics exchange, you can use only static load balancing methods (such as round robin, + static proximity, or the hash-based methods), and if you disable metrics exchange when a dynamic load + balancing method (such as least connection) is in operation, the appliance falls back to round robin. + Also, if you disable metrics exchange, you must use a monitor to determine the state of GSLB + services. Otherwise, the service is marked as DOWN. + + nwmetricexchange: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Exchange, with other GSLB sites, network metrics such as round-trip time (RTT), learned from + communications with various local DNS (LDNS) servers used by clients. RTT information is used in the + dynamic RTT load balancing method, and is exchanged every 5 seconds. + + sessionexchange: + choices: + - 'enabled' + - 'disabled' + description: + - "Exchange persistent session entries with other GSLB sites every five seconds." + + triggermonitor: + choices: + - 'ALWAYS' + - 'MEPDOWN' + - 'MEPDOWN_SVCDOWN' + description: + - >- + Specify the conditions under which the GSLB service must be monitored by a monitor, if one is bound. + Available settings function as follows: + - "* C(ALWAYS) - Monitor the GSLB service at all times." + - >- + * C(MEPDOWN) - Monitor the GSLB service only when the exchange of metrics through the Metrics Exchange + Protocol (MEP) is disabled. + - "C(MEPDOWN_SVCDOWN) - Monitor the service in either of the following situations:" + - "* The exchange of metrics through MEP is disabled." + - >- + * The exchange of metrics through MEP is enabled but the status of the service, learned through + metrics exchange, is DOWN. + + parentsite: + description: + - "Parent site of the GSLB site, in a parent-child topology." + + clip: + description: + - >- + Cluster IP address. Specify this parameter to connect to the remote cluster site for GSLB auto-sync. + Note: The cluster IP address is defined when creating the cluster. + + publicclip: + description: + - >- + IP address to be used to globally access the remote cluster when it is deployed behind a NAT. It can + be same as the normal cluster IP address. + + naptrreplacementsuffix: + description: + - >- + The naptr replacement suffix configured here will be used to construct the naptr replacement field in + NAPTR record. + - "Minimum length = 1" + + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +- name: Setup gslb site + delegate_to: localhost + netscaler_gslb_site: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + + sitename: gslb-site-1 + siteipaddress: 192.168.1.1 + sitetype: LOCAL + publicip: 192.168.1.1 + metricexchange: enabled + nwmetricexchange: enabled + sessionexchange: enabled + triggermonitor: ALWAYS + +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: "['message 1', 'message 2']" + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }" +''' + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite import gslbsite + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ( + ConfigProxy, + get_nitro_client, + netscaler_common_arguments, + log, + loglines, + ensure_feature_is_enabled, + get_immutables_intersection, +) + + +def gslb_site_exists(client, module): + if gslbsite.count_filtered(client, 'sitename:%s' % module.params['sitename']) > 0: + return True + else: + return False + + +def gslb_site_identical(client, module, gslb_site_proxy): + gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename']) + diff_dict = gslb_site_proxy.diff_object(gslb_site_list[0]) + if len(diff_dict) == 0: + return True + else: + return False + + +def diff_list(client, module, gslb_site_proxy): + gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename']) + return gslb_site_proxy.diff_object(gslb_site_list[0]) + + +def main(): + + module_specific_arguments = dict( + sitename=dict(type='str'), + sitetype=dict( + type='str', + choices=[ + 'REMOTE', + 'LOCAL', + ] + ), + siteipaddress=dict(type='str'), + publicip=dict(type='str'), + metricexchange=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + nwmetricexchange=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + sessionexchange=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + triggermonitor=dict( + type='str', + choices=[ + 'ALWAYS', + 'MEPDOWN', + 'MEPDOWN_SVCDOWN', + ] + ), + parentsite=dict(type='str'), + clip=dict(type='str'), + publicclip=dict(type='str'), + naptrreplacementsuffix=dict(type='str'), + ) + + hand_inserted_arguments = dict( + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'sitename', + 'sitetype', + 'siteipaddress', + 'publicip', + 'metricexchange', + 'nwmetricexchange', + 'sessionexchange', + 'triggermonitor', + 'parentsite', + 'clip', + 'publicclip', + 'naptrreplacementsuffix', + ] + + readonly_attrs = [ + 'status', + 'persistencemepstatus', + 'version', + '__count', + ] + + immutable_attrs = [ + 'sitename', + 'sitetype', + 'siteipaddress', + 'publicip', + 'parentsite', + 'clip', + 'publicclip', + ] + + transforms = { + 'metricexchange': [lambda v: v.upper()], + 'nwmetricexchange': [lambda v: v.upper()], + 'sessionexchange': [lambda v: v.upper()], + } + + # Instantiate config proxy + gslb_site_proxy = ConfigProxy( + actual=gslbsite(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + ensure_feature_is_enabled(client, 'GSLB') + + # Apply appropriate state + if module.params['state'] == 'present': + log('Applying actions for state present') + if not gslb_site_exists(client, module): + if not module.check_mode: + gslb_site_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not gslb_site_identical(client, module, gslb_site_proxy): + + # Check if we try to change value of immutable attributes + immutables_changed = get_immutables_intersection(gslb_site_proxy, diff_list(client, module, gslb_site_proxy).keys()) + if immutables_changed != []: + module.fail_json( + msg='Cannot update immutable attributes %s' % (immutables_changed,), + diff=diff_list(client, module, gslb_site_proxy), + **module_result + ) + + if not module.check_mode: + gslb_site_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state present') + if not gslb_site_exists(client, module): + module.fail_json(msg='GSLB site does not exist', **module_result) + if not gslb_site_identical(client, module, gslb_site_proxy): + module.fail_json(msg='GSLB site differs from configured', diff=diff_list(client, module, gslb_site_proxy), **module_result) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if gslb_site_exists(client, module): + if not module.check_mode: + gslb_site_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state absent') + if gslb_site_exists(client, module): + module.fail_json(msg='GSLB site still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_gslb_vserver.py b/plugins/modules/network/netscaler/netscaler_gslb_vserver.py new file mode 100644 index 0000000000..ee0c8390a7 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_gslb_vserver.py @@ -0,0 +1,955 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_gslb_vserver +short_description: Configure gslb vserver entities in Netscaler. +description: + - Configure gslb vserver entities in Netscaler. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + name: + description: + - >- + Name for the GSLB virtual server. Must begin with an ASCII alphanumeric or underscore C(_) character, + and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, colon C(:), at C(@), + equals C(=), and hyphen C(-) characters. Can be changed after the virtual server is created. + - "Minimum length = 1" + + servicetype: + choices: + - 'HTTP' + - 'FTP' + - 'TCP' + - 'UDP' + - 'SSL' + - 'SSL_BRIDGE' + - 'SSL_TCP' + - 'NNTP' + - 'ANY' + - 'SIP_UDP' + - 'SIP_TCP' + - 'SIP_SSL' + - 'RADIUS' + - 'RDP' + - 'RTSP' + - 'MYSQL' + - 'MSSQL' + - 'ORACLE' + description: + - "Protocol used by services bound to the virtual server." + - >- + + dnsrecordtype: + choices: + - 'A' + - 'AAAA' + - 'CNAME' + - 'NAPTR' + description: + - "DNS record type to associate with the GSLB virtual server's domain name." + - "Default value: A" + - "Possible values = A, AAAA, CNAME, NAPTR" + + lbmethod: + choices: + - 'ROUNDROBIN' + - 'LEASTCONNECTION' + - 'LEASTRESPONSETIME' + - 'SOURCEIPHASH' + - 'LEASTBANDWIDTH' + - 'LEASTPACKETS' + - 'STATICPROXIMITY' + - 'RTT' + - 'CUSTOMLOAD' + description: + - "Load balancing method for the GSLB virtual server." + - "Default value: LEASTCONNECTION" + - >- + Possible values = ROUNDROBIN, LEASTCONNECTION, LEASTRESPONSETIME, SOURCEIPHASH, LEASTBANDWIDTH, + LEASTPACKETS, STATICPROXIMITY, RTT, CUSTOMLOAD + + backuplbmethod: + choices: + - 'ROUNDROBIN' + - 'LEASTCONNECTION' + - 'LEASTRESPONSETIME' + - 'SOURCEIPHASH' + - 'LEASTBANDWIDTH' + - 'LEASTPACKETS' + - 'STATICPROXIMITY' + - 'RTT' + - 'CUSTOMLOAD' + description: + - >- + Backup load balancing method. Becomes operational if the primary load balancing method fails or + cannot be used. Valid only if the primary method is based on either round-trip time (RTT) or static + proximity. + + netmask: + description: + - "IPv4 network mask for use in the SOURCEIPHASH load balancing method." + - "Minimum length = 1" + + v6netmasklen: + description: + - >- + Number of bits to consider, in an IPv6 source IP address, for creating the hash that is required by + the C(SOURCEIPHASH) load balancing method. + - "Default value: C(128)" + - "Minimum value = C(1)" + - "Maximum value = C(128)" + + tolerance: + description: + - >- + Site selection tolerance, in milliseconds, for implementing the RTT load balancing method. If a + site's RTT deviates from the lowest RTT by more than the specified tolerance, the site is not + considered when the NetScaler appliance makes a GSLB decision. The appliance implements the round + robin method of global server load balancing between sites whose RTT values are within the specified + tolerance. If the tolerance is 0 (zero), the appliance always sends clients the IP address of the + site with the lowest RTT. + - "Minimum value = C(0)" + - "Maximum value = C(100)" + + persistencetype: + choices: + - 'SOURCEIP' + - 'NONE' + description: + - "Use source IP address based persistence for the virtual server." + - >- + After the load balancing method selects a service for the first packet, the IP address received in + response to the DNS query is used for subsequent requests from the same client. + + persistenceid: + description: + - >- + The persistence ID for the GSLB virtual server. The ID is a positive integer that enables GSLB sites + to identify the GSLB virtual server, and is required if source IP address based or spill over based + persistence is enabled on the virtual server. + - "Minimum value = C(0)" + - "Maximum value = C(65535)" + + persistmask: + description: + - >- + The optional IPv4 network mask applied to IPv4 addresses to establish source IP address based + persistence. + - "Minimum length = 1" + + v6persistmasklen: + description: + - >- + Number of bits to consider in an IPv6 source IP address when creating source IP address based + persistence sessions. + - "Default value: C(128)" + - "Minimum value = C(1)" + - "Maximum value = C(128)" + + timeout: + description: + - "Idle time, in minutes, after which a persistence entry is cleared." + - "Default value: C(2)" + - "Minimum value = C(2)" + - "Maximum value = C(1440)" + + mir: + choices: + - 'enabled' + - 'disabled' + description: + - "Include multiple IP addresses in the DNS responses sent to clients." + + disableprimaryondown: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Continue to direct traffic to the backup chain even after the primary GSLB virtual server returns to + the UP state. Used when spillover is configured for the virtual server. + + dynamicweight: + choices: + - 'SERVICECOUNT' + - 'SERVICEWEIGHT' + - 'DISABLED' + description: + - >- + Specify if the appliance should consider the service count, service weights, or ignore both when + using weight-based load balancing methods. The state of the number of services bound to the virtual + server help the appliance to select the service. + + considereffectivestate: + choices: + - 'NONE' + - 'STATE_ONLY' + description: + - >- + If the primary state of all bound GSLB services is DOWN, consider the effective states of all the + GSLB services, obtained through the Metrics Exchange Protocol (MEP), when determining the state of + the GSLB virtual server. To consider the effective state, set the parameter to STATE_ONLY. To + disregard the effective state, set the parameter to NONE. + - >- + The effective state of a GSLB service is the ability of the corresponding virtual server to serve + traffic. The effective state of the load balancing virtual server, which is transferred to the GSLB + service, is UP even if only one virtual server in the backup chain of virtual servers is in the UP + state. + + comment: + description: + - "Any comments that you might want to associate with the GSLB virtual server." + + somethod: + choices: + - 'CONNECTION' + - 'DYNAMICCONNECTION' + - 'BANDWIDTH' + - 'HEALTH' + - 'NONE' + description: + - "Type of threshold that, when exceeded, triggers spillover. Available settings function as follows:" + - "* C(CONNECTION) - Spillover occurs when the number of client connections exceeds the threshold." + - >- + * C(DYNAMICCONNECTION) - Spillover occurs when the number of client connections at the GSLB virtual + server exceeds the sum of the maximum client (Max Clients) settings for bound GSLB services. Do not + specify a spillover threshold for this setting, because the threshold is implied by the Max Clients + settings of the bound GSLB services. + - >- + * C(BANDWIDTH) - Spillover occurs when the bandwidth consumed by the GSLB virtual server's incoming and + outgoing traffic exceeds the threshold. + - >- + * C(HEALTH) - Spillover occurs when the percentage of weights of the GSLB services that are UP drops + below the threshold. For example, if services gslbSvc1, gslbSvc2, and gslbSvc3 are bound to a virtual + server, with weights 1, 2, and 3, and the spillover threshold is 50%, spillover occurs if gslbSvc1 + and gslbSvc3 or gslbSvc2 and gslbSvc3 transition to DOWN. + - "* C(NONE) - Spillover does not occur." + + sopersistence: + choices: + - 'enabled' + - 'disabled' + description: + - >- + If spillover occurs, maintain source IP address based persistence for both primary and backup GSLB + virtual servers. + + sopersistencetimeout: + description: + - "Timeout for spillover persistence, in minutes." + - "Default value: C(2)" + - "Minimum value = C(2)" + - "Maximum value = C(1440)" + + sothreshold: + description: + - >- + Threshold at which spillover occurs. Specify an integer for the CONNECTION spillover method, a + bandwidth value in kilobits per second for the BANDWIDTH method (do not enter the units), or a + percentage for the HEALTH method (do not enter the percentage symbol). + - "Minimum value = C(1)" + - "Maximum value = C(4294967287)" + + sobackupaction: + choices: + - 'DROP' + - 'ACCEPT' + - 'REDIRECT' + description: + - >- + Action to be performed if spillover is to take effect, but no backup chain to spillover is usable or + exists. + + appflowlog: + choices: + - 'enabled' + - 'disabled' + description: + - "Enable logging appflow flow information." + + domain_bindings: + description: + - >- + List of bindings for domains for this glsb vserver. + suboptions: + cookietimeout: + description: + - Timeout, in minutes, for the GSLB site cookie. + + domainname: + description: + - Domain name for which to change the time to live (TTL) and/or backup service IP address. + + ttl: + description: + - Time to live (TTL) for the domain. + + sitedomainttl: + description: + - >- + TTL, in seconds, for all internally created site domains (created when a site prefix is + configured on a GSLB service) that are associated with this virtual server. + - Minimum value = C(1) + + service_bindings: + description: + - List of bindings for gslb services bound to this gslb virtual server. + suboptions: + servicename: + description: + - Name of the GSLB service for which to change the weight. + weight: + description: + - Weight to assign to the GSLB service. + + disabled: + description: + - When set to C(yes) the GSLB Vserver state will be set to C(disabled). + - When set to C(no) the GSLB Vserver state will be set to C(enabled). + - >- + Note that due to limitations of the underlying NITRO API a C(disabled) state change alone + does not cause the module result to report a changed status. + type: bool + default: false + + + + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +import copy + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver import gslbvserver + from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver_gslbservice_binding import gslbvserver_gslbservice_binding + from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver_domain_binding import gslbvserver_domain_binding + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ( + ConfigProxy, + get_nitro_client, + netscaler_common_arguments, + log, + loglines, + ensure_feature_is_enabled, + get_immutables_intersection, + complete_missing_attributes +) + + +gslbvserver_domain_binding_rw_attrs = [ + 'name', + 'domainname', + 'backupipflag', + 'cookietimeout', + 'backupip', + 'ttl', + 'sitedomainttl', + 'cookie_domainflag', +] + +gslbvserver_gslbservice_binding_rw_attrs = [ + 'name', + 'servicename', + 'weight', +] + + +def get_actual_domain_bindings(client, module): + log('get_actual_domain_bindings') + # Get actual domain bindings and index them by domainname + actual_domain_bindings = {} + if gslbvserver_domain_binding.count(client, name=module.params['name']) != 0: + # Get all domain bindings associated with the named gslb vserver + fetched_domain_bindings = gslbvserver_domain_binding.get(client, name=module.params['name']) + # index by domainname + for binding in fetched_domain_bindings: + complete_missing_attributes(binding, gslbvserver_domain_binding_rw_attrs, fill_value=None) + actual_domain_bindings[binding.domainname] = binding + return actual_domain_bindings + + +def get_configured_domain_bindings_proxys(client, module): + log('get_configured_domain_bindings_proxys') + configured_domain_proxys = {} + # Get configured domain bindings and index them by domainname + if module.params['domain_bindings'] is not None: + for configured_domain_binding in module.params['domain_bindings']: + binding_values = copy.deepcopy(configured_domain_binding) + binding_values['name'] = module.params['name'] + gslbvserver_domain_binding_proxy = ConfigProxy( + actual=gslbvserver_domain_binding(), + client=client, + attribute_values_dict=binding_values, + readwrite_attrs=gslbvserver_domain_binding_rw_attrs, + readonly_attrs=[], + ) + configured_domain_proxys[configured_domain_binding['domainname']] = gslbvserver_domain_binding_proxy + return configured_domain_proxys + + +def sync_domain_bindings(client, module): + log('sync_domain_bindings') + + actual_domain_bindings = get_actual_domain_bindings(client, module) + configured_domain_proxys = get_configured_domain_bindings_proxys(client, module) + + # Delete actual bindings not in configured bindings + for domainname, actual_domain_binding in actual_domain_bindings.items(): + if domainname not in configured_domain_proxys.keys(): + log('Deleting absent binding for domain %s' % domainname) + gslbvserver_domain_binding.delete(client, actual_domain_binding) + + # Delete actual bindings that differ from configured + for proxy_key, binding_proxy in configured_domain_proxys.items(): + if proxy_key in actual_domain_bindings: + actual_binding = actual_domain_bindings[proxy_key] + if not binding_proxy.has_equal_attributes(actual_binding): + log('Deleting differing binding for domain %s' % binding_proxy.domainname) + gslbvserver_domain_binding.delete(client, actual_binding) + log('Adding anew binding for domain %s' % binding_proxy.domainname) + binding_proxy.add() + + # Add configured domains that are missing from actual + for proxy_key, binding_proxy in configured_domain_proxys.items(): + if proxy_key not in actual_domain_bindings.keys(): + log('Adding domain binding for domain %s' % binding_proxy.domainname) + binding_proxy.add() + + +def domain_bindings_identical(client, module): + log('domain_bindings_identical') + actual_domain_bindings = get_actual_domain_bindings(client, module) + configured_domain_proxys = get_configured_domain_bindings_proxys(client, module) + + actual_keyset = set(actual_domain_bindings.keys()) + configured_keyset = set(configured_domain_proxys.keys()) + + symmetric_difference = actual_keyset ^ configured_keyset + + log('symmetric difference %s' % symmetric_difference) + if len(symmetric_difference) != 0: + return False + + # Item for item equality test + for key, proxy in configured_domain_proxys.items(): + diff = proxy.diff_object(actual_domain_bindings[key]) + if 'backupipflag' in diff: + del diff['backupipflag'] + if not len(diff) == 0: + return False + # Fallthrough to True result + return True + + +def get_actual_service_bindings(client, module): + log('get_actual_service_bindings') + # Get actual domain bindings and index them by domainname + actual_bindings = {} + if gslbvserver_gslbservice_binding.count(client, name=module.params['name']) != 0: + # Get all service bindings associated with the named gslb vserver + fetched_bindings = gslbvserver_gslbservice_binding.get(client, name=module.params['name']) + # index by servicename + for binding in fetched_bindings: + complete_missing_attributes(binding, gslbvserver_gslbservice_binding_rw_attrs, fill_value=None) + actual_bindings[binding.servicename] = binding + + return actual_bindings + + +def get_configured_service_bindings(client, module): + log('get_configured_service_bindings_proxys') + configured_proxys = {} + # Get configured domain bindings and index them by domainname + if module.params['service_bindings'] is not None: + for configured_binding in module.params['service_bindings']: + binding_values = copy.deepcopy(configured_binding) + binding_values['name'] = module.params['name'] + gslbvserver_service_binding_proxy = ConfigProxy( + actual=gslbvserver_gslbservice_binding(), + client=client, + attribute_values_dict=binding_values, + readwrite_attrs=gslbvserver_gslbservice_binding_rw_attrs, + readonly_attrs=[], + ) + configured_proxys[configured_binding['servicename']] = gslbvserver_service_binding_proxy + return configured_proxys + + +def sync_service_bindings(client, module): + actual = get_actual_service_bindings(client, module) + configured = get_configured_service_bindings(client, module) + + # Delete extraneous + extraneous_service_bindings = list(set(actual.keys()) - set(configured.keys())) + for servicename in extraneous_service_bindings: + log('Deleting missing binding from service %s' % servicename) + binding = actual[servicename] + binding.name = module.params['name'] + gslbvserver_gslbservice_binding.delete(client, binding) + + # Recreate different + common_service_bindings = list(set(actual.keys()) & set(configured.keys())) + for servicename in common_service_bindings: + proxy = configured[servicename] + binding = actual[servicename] + if not proxy.has_equal_attributes(actual): + log('Recreating differing service binding %s' % servicename) + gslbvserver_gslbservice_binding.delete(client, binding) + proxy.add() + + # Add missing + missing_service_bindings = list(set(configured.keys()) - set(actual.keys())) + for servicename in missing_service_bindings: + proxy = configured[servicename] + log('Adding missing service binding %s' % servicename) + proxy.add() + + +def service_bindings_identical(client, module): + actual_bindings = get_actual_service_bindings(client, module) + configured_proxys = get_configured_service_bindings(client, module) + + actual_keyset = set(actual_bindings.keys()) + configured_keyset = set(configured_proxys.keys()) + + symmetric_difference = actual_keyset ^ configured_keyset + if len(symmetric_difference) != 0: + return False + + # Item for item equality test + for key, proxy in configured_proxys.items(): + if key in actual_bindings.keys(): + if not proxy.has_equal_attributes(actual_bindings[key]): + return False + + # Fallthrough to True result + return True + + +def gslb_vserver_exists(client, module): + if gslbvserver.count_filtered(client, 'name:%s' % module.params['name']) > 0: + return True + else: + return False + + +def gslb_vserver_identical(client, module, gslb_vserver_proxy): + gslb_vserver_list = gslbvserver.get_filtered(client, 'name:%s' % module.params['name']) + diff_dict = gslb_vserver_proxy.diff_object(gslb_vserver_list[0]) + if len(diff_dict) != 0: + return False + else: + return True + + +def all_identical(client, module, gslb_vserver_proxy): + return ( + gslb_vserver_identical(client, module, gslb_vserver_proxy) and + domain_bindings_identical(client, module) and + service_bindings_identical(client, module) + ) + + +def diff_list(client, module, gslb_vserver_proxy): + gslb_vserver_list = gslbvserver.get_filtered(client, 'name:%s' % module.params['name']) + return gslb_vserver_proxy.diff_object(gslb_vserver_list[0]) + + +def do_state_change(client, module, gslb_vserver_proxy): + if module.params['disabled']: + log('Disabling glsb_vserver') + result = gslbvserver.disable(client, gslb_vserver_proxy.actual) + else: + log('Enabling gslbvserver') + result = gslbvserver.enable(client, gslb_vserver_proxy.actual) + return result + + +def main(): + + module_specific_arguments = dict( + name=dict(type='str'), + servicetype=dict( + type='str', + choices=[ + 'HTTP', + 'FTP', + 'TCP', + 'UDP', + 'SSL', + 'SSL_BRIDGE', + 'SSL_TCP', + 'NNTP', + 'ANY', + 'SIP_UDP', + 'SIP_TCP', + 'SIP_SSL', + 'RADIUS', + 'RDP', + 'RTSP', + 'MYSQL', + 'MSSQL', + 'ORACLE', + ] + ), + dnsrecordtype=dict( + type='str', + choices=[ + 'A', + 'AAAA', + 'CNAME', + 'NAPTR', + ] + ), + lbmethod=dict( + type='str', + choices=[ + 'ROUNDROBIN', + 'LEASTCONNECTION', + 'LEASTRESPONSETIME', + 'SOURCEIPHASH', + 'LEASTBANDWIDTH', + 'LEASTPACKETS', + 'STATICPROXIMITY', + 'RTT', + 'CUSTOMLOAD', + ] + ), + backuplbmethod=dict( + type='str', + choices=[ + 'ROUNDROBIN', + 'LEASTCONNECTION', + 'LEASTRESPONSETIME', + 'SOURCEIPHASH', + 'LEASTBANDWIDTH', + 'LEASTPACKETS', + 'STATICPROXIMITY', + 'RTT', + 'CUSTOMLOAD', + ] + ), + netmask=dict(type='str'), + v6netmasklen=dict(type='float'), + tolerance=dict(type='float'), + persistencetype=dict( + type='str', + choices=[ + 'SOURCEIP', + 'NONE', + ] + ), + persistenceid=dict(type='float'), + persistmask=dict(type='str'), + v6persistmasklen=dict(type='float'), + timeout=dict(type='float'), + mir=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + disableprimaryondown=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + dynamicweight=dict( + type='str', + choices=[ + 'SERVICECOUNT', + 'SERVICEWEIGHT', + 'DISABLED', + ] + ), + considereffectivestate=dict( + type='str', + choices=[ + 'NONE', + 'STATE_ONLY', + ] + ), + comment=dict(type='str'), + somethod=dict( + type='str', + choices=[ + 'CONNECTION', + 'DYNAMICCONNECTION', + 'BANDWIDTH', + 'HEALTH', + 'NONE', + ] + ), + sopersistence=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + sopersistencetimeout=dict(type='float'), + sothreshold=dict(type='float'), + sobackupaction=dict( + type='str', + choices=[ + 'DROP', + 'ACCEPT', + 'REDIRECT', + ] + ), + appflowlog=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + domainname=dict(type='str'), + cookie_domain=dict(type='str'), + ) + + hand_inserted_arguments = dict( + domain_bindings=dict(type='list'), + service_bindings=dict(type='list'), + disabled=dict( + type='bool', + default=False, + ), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'name', + 'servicetype', + 'dnsrecordtype', + 'lbmethod', + 'backuplbmethod', + 'netmask', + 'v6netmasklen', + 'tolerance', + 'persistencetype', + 'persistenceid', + 'persistmask', + 'v6persistmasklen', + 'timeout', + 'mir', + 'disableprimaryondown', + 'dynamicweight', + 'considereffectivestate', + 'comment', + 'somethod', + 'sopersistence', + 'sopersistencetimeout', + 'sothreshold', + 'sobackupaction', + 'appflowlog', + 'cookie_domain', + ] + + readonly_attrs = [ + 'curstate', + 'status', + 'lbrrreason', + 'iscname', + 'sitepersistence', + 'totalservices', + 'activeservices', + 'statechangetimesec', + 'statechangetimemsec', + 'tickssincelaststatechange', + 'health', + 'policyname', + 'priority', + 'gotopriorityexpression', + 'type', + 'vsvrbindsvcip', + 'vsvrbindsvcport', + '__count', + ] + + immutable_attrs = [ + 'name', + 'servicetype', + ] + + transforms = { + 'mir': [lambda v: v.upper()], + 'disableprimaryondown': [lambda v: v.upper()], + 'sopersistence': [lambda v: v.upper()], + 'appflowlog': [lambda v: v.upper()], + } + + # Instantiate config proxy + gslb_vserver_proxy = ConfigProxy( + actual=gslbvserver(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + ensure_feature_is_enabled(client, 'GSLB') + # Apply appropriate state + if module.params['state'] == 'present': + log('Applying state present') + if not gslb_vserver_exists(client, module): + log('Creating object') + if not module.check_mode: + gslb_vserver_proxy.add() + sync_domain_bindings(client, module) + sync_service_bindings(client, module) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not all_identical(client, module, gslb_vserver_proxy): + log('Entering update actions') + + # Check if we try to change value of immutable attributes + if not gslb_vserver_identical(client, module, gslb_vserver_proxy): + log('Updating gslb vserver') + immutables_changed = get_immutables_intersection(gslb_vserver_proxy, diff_list(client, module, gslb_vserver_proxy).keys()) + if immutables_changed != []: + module.fail_json( + msg='Cannot update immutable attributes %s' % (immutables_changed,), + diff=diff_list(client, module, gslb_vserver_proxy), + **module_result + ) + if not module.check_mode: + gslb_vserver_proxy.update() + + # Update domain bindings + if not domain_bindings_identical(client, module): + if not module.check_mode: + sync_domain_bindings(client, module) + + # Update service bindings + if not service_bindings_identical(client, module): + if not module.check_mode: + sync_service_bindings(client, module) + + module_result['changed'] = True + if not module.check_mode: + if module.params['save_config']: + client.save_config() + else: + module_result['changed'] = False + + if not module.check_mode: + res = do_state_change(client, module, gslb_vserver_proxy) + if res.errorcode != 0: + msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) + module.fail_json(msg=msg, **module_result) + + # Sanity check for state + if not module.check_mode: + if not gslb_vserver_exists(client, module): + module.fail_json(msg='GSLB Vserver does not exist', **module_result) + if not gslb_vserver_identical(client, module, gslb_vserver_proxy): + module.fail_json(msg='GSLB Vserver differs from configured', diff=diff_list(client, module, gslb_vserver_proxy), **module_result) + if not domain_bindings_identical(client, module): + module.fail_json(msg='Domain bindings differ from configured', diff=diff_list(client, module, gslb_vserver_proxy), **module_result) + if not service_bindings_identical(client, module): + module.fail_json(msg='Service bindings differ from configured', diff=diff_list(client, module, gslb_vserver_proxy), **module_result) + + elif module.params['state'] == 'absent': + + if gslb_vserver_exists(client, module): + if not module.check_mode: + gslb_vserver_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + if gslb_vserver_exists(client, module): + module.fail_json(msg='GSLB Vserver still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_lb_monitor.py b/plugins/modules/network/netscaler/netscaler_lb_monitor.py new file mode 100644 index 0000000000..f2f5ad126a --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_lb_monitor.py @@ -0,0 +1,1381 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: netscaler_lb_monitor +short_description: Manage load balancing monitors +description: + - Manage load balancing monitors. + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + monitorname: + description: + - >- + Name for the monitor. Must begin with an ASCII alphanumeric or underscore C(_) character, and must + contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon C(:), at C(@), equals + C(=), and hyphen C(-) characters. + - "Minimum length = 1" + + type: + choices: + - 'PING' + - 'TCP' + - 'HTTP' + - 'TCP-ECV' + - 'HTTP-ECV' + - 'UDP-ECV' + - 'DNS' + - 'FTP' + - 'LDNS-PING' + - 'LDNS-TCP' + - 'LDNS-DNS' + - 'RADIUS' + - 'USER' + - 'HTTP-INLINE' + - 'SIP-UDP' + - 'SIP-TCP' + - 'LOAD' + - 'FTP-EXTENDED' + - 'SMTP' + - 'SNMP' + - 'NNTP' + - 'MYSQL' + - 'MYSQL-ECV' + - 'MSSQL-ECV' + - 'ORACLE-ECV' + - 'LDAP' + - 'POP3' + - 'CITRIX-XML-SERVICE' + - 'CITRIX-WEB-INTERFACE' + - 'DNS-TCP' + - 'RTSP' + - 'ARP' + - 'CITRIX-AG' + - 'CITRIX-AAC-LOGINPAGE' + - 'CITRIX-AAC-LAS' + - 'CITRIX-XD-DDC' + - 'ND6' + - 'CITRIX-WI-EXTENDED' + - 'DIAMETER' + - 'RADIUS_ACCOUNTING' + - 'STOREFRONT' + - 'APPC' + - 'SMPP' + - 'CITRIX-XNC-ECV' + - 'CITRIX-XDM' + - 'CITRIX-STA-SERVICE' + - 'CITRIX-STA-SERVICE-NHOP' + description: + - "Type of monitor that you want to create." + + action: + choices: + - 'NONE' + - 'LOG' + - 'DOWN' + description: + - >- + Action to perform when the response to an inline monitor (a monitor of type C(HTTP-INLINE)) indicates + that the service is down. A service monitored by an inline monitor is considered C(DOWN) if the response + code is not one of the codes that have been specified for the Response Code parameter. + - "Available settings function as follows:" + - >- + * C(NONE) - Do not take any action. However, the show service command and the show lb monitor command + indicate the total number of responses that were checked and the number of consecutive error + responses received after the last successful probe. + - "* C(LOG) - Log the event in NSLOG or SYSLOG." + - >- + * C(DOWN) - Mark the service as being down, and then do not direct any traffic to the service until the + configured down time has expired. Persistent connections to the service are terminated as soon as the + service is marked as C(DOWN). Also, log the event in NSLOG or SYSLOG. + + respcode: + description: + - >- + Response codes for which to mark the service as UP. For any other response code, the action performed + depends on the monitor type. C(HTTP) monitors and C(RADIUS) monitors mark the service as C(DOWN), while + C(HTTP-INLINE) monitors perform the action indicated by the Action parameter. + + httprequest: + description: + - 'HTTP request to send to the server (for example, C("HEAD /file.html")).' + + rtsprequest: + description: + - 'RTSP request to send to the server (for example, C("OPTIONS *")).' + + customheaders: + description: + - "Custom header string to include in the monitoring probes." + + maxforwards: + description: + - >- + Maximum number of hops that the SIP request used for monitoring can traverse to reach the server. + Applicable only to monitors of type C(SIP-UDP). + - "Minimum value = C(0)" + - "Maximum value = C(255)" + + sipmethod: + choices: + - 'OPTIONS' + - 'INVITE' + - 'REGISTER' + description: + - "SIP method to use for the query. Applicable only to monitors of type C(SIP-UDP)." + + sipuri: + description: + - >- + SIP URI string to send to the service (for example, C(sip:sip.test)). Applicable only to monitors of + type C(SIP-UDP). + - "Minimum length = 1" + + sipreguri: + description: + - >- + SIP user to be registered. Applicable only if the monitor is of type C(SIP-UDP) and the SIP Method + parameter is set to C(REGISTER). + - "Minimum length = 1" + + send: + description: + - "String to send to the service. Applicable to C(TCP-ECV), C(HTTP-ECV), and C(UDP-ECV) monitors." + + recv: + description: + - >- + String expected from the server for the service to be marked as UP. Applicable to C(TCP-ECV), C(HTTP-ECV), + and C(UDP-ECV) monitors. + + query: + description: + - "Domain name to resolve as part of monitoring the DNS service (for example, C(example.com))." + + querytype: + choices: + - 'Address' + - 'Zone' + - 'AAAA' + description: + - >- + Type of DNS record for which to send monitoring queries. Set to C(Address) for querying A records, C(AAAA) + for querying AAAA records, and C(Zone) for querying the SOA record. + + scriptname: + description: + - >- + Path and name of the script to execute. The script must be available on the NetScaler appliance, in + the /nsconfig/monitors/ directory. + - "Minimum length = 1" + + scriptargs: + description: + - "String of arguments for the script. The string is copied verbatim into the request." + + dispatcherip: + description: + - "IP address of the dispatcher to which to send the probe." + + dispatcherport: + description: + - "Port number on which the dispatcher listens for the monitoring probe." + + username: + description: + - >- + User name with which to probe the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3), C(CITRIX-AG), + C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC) or C(CITRIX-XDM) server. + - "Minimum length = 1" + + password: + description: + - >- + Password that is required for logging on to the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3), + C(CITRIX-AG), C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC-ECV) or C(CITRIX-XDM) server. Used in + conjunction with the user name specified for the C(username) parameter. + - "Minimum length = 1" + + secondarypassword: + description: + - >- + Secondary password that users might have to provide to log on to the Access Gateway server. + Applicable to C(CITRIX-AG) monitors. + + logonpointname: + description: + - >- + Name of the logon point that is configured for the Citrix Access Gateway Advanced Access Control + software. Required if you want to monitor the associated login page or Logon Agent. Applicable to + C(CITRIX-AAC-LAS) and C(CITRIX-AAC-LOGINPAGE) monitors. + + lasversion: + description: + - >- + Version number of the Citrix Advanced Access Control Logon Agent. Required by the C(CITRIX-AAC-LAS) + monitor. + + radkey: + description: + - >- + Authentication key (shared secret text string) for RADIUS clients and servers to exchange. Applicable + to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING). + - "Minimum length = 1" + + radnasid: + description: + - "NAS-Identifier to send in the Access-Request packet. Applicable to monitors of type C(RADIUS)." + - "Minimum length = 1" + + radnasip: + description: + - >- + Network Access Server (NAS) IP address to use as the source IP address when monitoring a RADIUS + server. Applicable to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING). + + radaccounttype: + description: + - "Account Type to be used in Account Request Packet. Applicable to monitors of type C(RADIUS_ACCOUNTING)." + - "Minimum value = 0" + - "Maximum value = 15" + + radframedip: + description: + - "Source ip with which the packet will go out . Applicable to monitors of type C(RADIUS_ACCOUNTING)." + + radapn: + description: + - >- + Called Station Id to be used in Account Request Packet. Applicable to monitors of type + C(RADIUS_ACCOUNTING). + - "Minimum length = 1" + + radmsisdn: + description: + - >- + Calling Stations Id to be used in Account Request Packet. Applicable to monitors of type + C(RADIUS_ACCOUNTING). + - "Minimum length = 1" + + radaccountsession: + description: + - >- + Account Session ID to be used in Account Request Packet. Applicable to monitors of type + C(RADIUS_ACCOUNTING). + - "Minimum length = 1" + + lrtm: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Calculate the least response times for bound services. If this parameter is not enabled, the + appliance does not learn the response times of the bound services. Also used for LRTM load balancing. + + deviation: + description: + - >- + Time value added to the learned average response time in dynamic response time monitoring (DRTM). + When a deviation is specified, the appliance learns the average response time of bound services and + adds the deviation to the average. The final value is then continually adjusted to accommodate + response time variations over time. Specified in milliseconds, seconds, or minutes. + - "Minimum value = C(0)" + - "Maximum value = C(20939)" + + units1: + choices: + - 'SEC' + - 'MSEC' + - 'MIN' + description: + - "Unit of measurement for the Deviation parameter. Cannot be changed after the monitor is created." + + interval: + description: + - "Time interval between two successive probes. Must be greater than the value of Response Time-out." + - "Minimum value = C(1)" + - "Maximum value = C(20940)" + + units3: + choices: + - 'SEC' + - 'MSEC' + - 'MIN' + description: + - "monitor interval units." + + resptimeout: + description: + - >- + Amount of time for which the appliance must wait before it marks a probe as FAILED. Must be less than + the value specified for the Interval parameter. + - >- + Note: For C(UDP-ECV) monitors for which a receive string is not configured, response timeout does not + apply. For C(UDP-ECV) monitors with no receive string, probe failure is indicated by an ICMP port + unreachable error received from the service. + - "Minimum value = C(1)" + - "Maximum value = C(20939)" + + units4: + choices: + - 'SEC' + - 'MSEC' + - 'MIN' + description: + - "monitor response timeout units." + + resptimeoutthresh: + description: + - >- + Response time threshold, specified as a percentage of the Response Time-out parameter. If the + response to a monitor probe has not arrived when the threshold is reached, the appliance generates an + SNMP trap called monRespTimeoutAboveThresh. After the response time returns to a value below the + threshold, the appliance generates a monRespTimeoutBelowThresh SNMP trap. For the traps to be + generated, the "MONITOR-RTO-THRESHOLD" alarm must also be enabled. + - "Minimum value = C(0)" + - "Maximum value = C(100)" + + retries: + description: + - >- + Maximum number of probes to send to establish the state of a service for which a monitoring probe + failed. + - "Minimum value = C(1)" + - "Maximum value = C(127)" + + failureretries: + description: + - >- + Number of retries that must fail, out of the number specified for the Retries parameter, for a + service to be marked as DOWN. For example, if the Retries parameter is set to 10 and the Failure + Retries parameter is set to 6, out of the ten probes sent, at least six probes must fail if the + service is to be marked as DOWN. The default value of 0 means that all the retries must fail if the + service is to be marked as DOWN. + - "Minimum value = C(0)" + - "Maximum value = C(32)" + + alertretries: + description: + - >- + Number of consecutive probe failures after which the appliance generates an SNMP trap called + monProbeFailed. + - "Minimum value = C(0)" + - "Maximum value = C(32)" + + successretries: + description: + - "Number of consecutive successful probes required to transition a service's state from DOWN to UP." + - "Minimum value = C(1)" + - "Maximum value = C(32)" + + downtime: + description: + - >- + Time duration for which to wait before probing a service that has been marked as DOWN. Expressed in + milliseconds, seconds, or minutes. + - "Minimum value = C(1)" + - "Maximum value = C(20939)" + + units2: + choices: + - 'SEC' + - 'MSEC' + - 'MIN' + description: + - "Unit of measurement for the Down Time parameter. Cannot be changed after the monitor is created." + + destip: + description: + - >- + IP address of the service to which to send probes. If the parameter is set to 0, the IP address of + the server to which the monitor is bound is considered the destination IP address. + + destport: + description: + - >- + TCP or UDP port to which to send the probe. If the parameter is set to 0, the port number of the + service to which the monitor is bound is considered the destination port. For a monitor of type C(USER), + however, the destination port is the port number that is included in the HTTP request sent to the + dispatcher. Does not apply to monitors of type C(PING). + + state: + choices: + - 'enabled' + - 'disabled' + description: + - >- + State of the monitor. The C(disabled) setting disables not only the monitor being configured, but all + monitors of the same type, until the parameter is set to C(enabled). If the monitor is bound to a + service, the state of the monitor is not taken into account when the state of the service is + determined. + + reverse: + description: + - >- + Mark a service as DOWN, instead of UP, when probe criteria are satisfied, and as UP instead of DOWN + when probe criteria are not satisfied. + type: bool + + transparent: + description: + - >- + The monitor is bound to a transparent device such as a firewall or router. The state of a transparent + device depends on the responsiveness of the services behind it. If a transparent device is being + monitored, a destination IP address must be specified. The probe is sent to the specified IP address + by using the MAC address of the transparent device. + type: bool + + iptunnel: + description: + - >- + Send the monitoring probe to the service through an IP tunnel. A destination IP address must be + specified. + type: bool + + tos: + description: + - "Probe the service by encoding the destination IP address in the IP TOS (6) bits." + type: bool + + tosid: + description: + - "The TOS ID of the specified destination IP. Applicable only when the TOS parameter is set." + - "Minimum value = C(1)" + - "Maximum value = C(63)" + + secure: + description: + - >- + Use a secure SSL connection when monitoring a service. Applicable only to TCP based monitors. The + secure option cannot be used with a C(CITRIX-AG) monitor, because a CITRIX-AG monitor uses a secure + connection by default. + type: bool + + validatecred: + description: + - >- + Validate the credentials of the Xen Desktop DDC server user. Applicable to monitors of type + C(CITRIX-XD-DDC). + type: bool + + domain: + description: + - >- + Domain in which the XenDesktop Desktop Delivery Controller (DDC) servers or Web Interface servers are + present. Required by C(CITRIX-XD-DDC) and C(CITRIX-WI-EXTENDED) monitors for logging on to the DDC servers + and Web Interface servers, respectively. + + ipaddress: + description: + - >- + Set of IP addresses expected in the monitoring response from the DNS server, if the record type is A + or AAAA. Applicable to C(DNS) monitors. + - "Minimum length = 1" + + group: + description: + - >- + Name of a newsgroup available on the NNTP service that is to be monitored. The appliance periodically + generates an NNTP query for the name of the newsgroup and evaluates the response. If the newsgroup is + found on the server, the service is marked as UP. If the newsgroup does not exist or if the search + fails, the service is marked as DOWN. Applicable to NNTP monitors. + - "Minimum length = 1" + + filename: + description: + - >- + Name of a file on the FTP server. The appliance monitors the FTP service by periodically checking the + existence of the file on the server. Applicable to C(FTP-EXTENDED) monitors. + - "Minimum length = 1" + + basedn: + description: + - >- + The base distinguished name of the LDAP service, from where the LDAP server can begin the search for + the attributes in the monitoring query. Required for C(LDAP) service monitoring. + - "Minimum length = 1" + + binddn: + description: + - >- + The distinguished name with which an LDAP monitor can perform the Bind operation on the LDAP server. + Optional. Applicable to C(LDAP) monitors. + - "Minimum length = 1" + + filter: + description: + - "Filter criteria for the LDAP query. Optional." + - "Minimum length = 1" + + attribute: + description: + - >- + Attribute to evaluate when the LDAP server responds to the query. Success or failure of the + monitoring probe depends on whether the attribute exists in the response. Optional. + - "Minimum length = 1" + + database: + description: + - "Name of the database to connect to during authentication." + - "Minimum length = 1" + + oraclesid: + description: + - "Name of the service identifier that is used to connect to the Oracle database during authentication." + - "Minimum length = 1" + + sqlquery: + description: + - >- + SQL query for a C(MYSQL-ECV) or C(MSSQL-ECV) monitor. Sent to the database server after the server + authenticates the connection. + - "Minimum length = 1" + + evalrule: + description: + - >- + Default syntax expression that evaluates the database server's response to a MYSQL-ECV or MSSQL-ECV + monitoring query. Must produce a Boolean result. The result determines the state of the server. If + the expression returns TRUE, the probe succeeds. + - >- + For example, if you want the appliance to evaluate the error message to determine the state of the + server, use the rule C(MYSQL.RES.ROW(10) .TEXT_ELEM(2).EQ("MySQL")). + + mssqlprotocolversion: + choices: + - '70' + - '2000' + - '2000SP1' + - '2005' + - '2008' + - '2008R2' + - '2012' + - '2014' + description: + - "Version of MSSQL server that is to be monitored." + + Snmpoid: + description: + - "SNMP OID for C(SNMP) monitors." + - "Minimum length = 1" + + snmpcommunity: + description: + - "Community name for C(SNMP) monitors." + - "Minimum length = 1" + + snmpthreshold: + description: + - "Threshold for C(SNMP) monitors." + - "Minimum length = 1" + + snmpversion: + choices: + - 'V1' + - 'V2' + description: + - "SNMP version to be used for C(SNMP) monitors." + + metrictable: + description: + - "Metric table to which to bind metrics." + - "Minimum length = 1" + - "Maximum length = 99" + + application: + description: + - >- + Name of the application used to determine the state of the service. Applicable to monitors of type + C(CITRIX-XML-SERVICE). + - "Minimum length = 1" + + sitepath: + description: + - >- + URL of the logon page. For monitors of type C(CITRIX-WEB-INTERFACE), to monitor a dynamic page under the + site path, terminate the site path with a slash C(/). Applicable to C(CITRIX-WEB-INTERFACE), + C(CITRIX-WI-EXTENDED) and C(CITRIX-XDM) monitors. + - "Minimum length = 1" + + storename: + description: + - >- + Store Name. For monitors of type C(STOREFRONT), C(storename) is an optional argument defining storefront + service store name. Applicable to C(STOREFRONT) monitors. + - "Minimum length = 1" + + storefrontacctservice: + description: + - >- + Enable/Disable probing for Account Service. Applicable only to Store Front monitors. For + multi-tenancy configuration users my skip account service. + type: bool + + hostname: + description: + - "Hostname in the FQDN format (Example: C(porche.cars.org)). Applicable to C(STOREFRONT) monitors." + - "Minimum length = 1" + + netprofile: + description: + - "Name of the network profile." + - "Minimum length = 1" + - "Maximum length = 127" + + originhost: + description: + - >- + Origin-Host value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter + servers. + - "Minimum length = 1" + + originrealm: + description: + - >- + Origin-Realm value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter + servers. + - "Minimum length = 1" + + hostipaddress: + description: + - >- + Host-IP-Address value for the Capabilities-Exchange-Request (CER) message to use for monitoring + Diameter servers. If Host-IP-Address is not specified, the appliance inserts the mapped IP (MIP) + address or subnet IP (SNIP) address from which the CER request (the monitoring probe) is sent. + - "Minimum length = 1" + + vendorid: + description: + - >- + Vendor-Id value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter + servers. + + productname: + description: + - >- + Product-Name value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter + servers. + - "Minimum length = 1" + + firmwarerevision: + description: + - >- + Firmware-Revision value for the Capabilities-Exchange-Request (CER) message to use for monitoring + Diameter servers. + + authapplicationid: + description: + - >- + List of Auth-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER) + message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a + monitoring CER message. + - "Minimum value = C(0)" + - "Maximum value = C(4294967295)" + + acctapplicationid: + description: + - >- + List of Acct-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER) + message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a + monitoring message. + - "Minimum value = C(0)" + - "Maximum value = C(4294967295)" + + inbandsecurityid: + choices: + - 'NO_INBAND_SECURITY' + - 'TLS' + description: + - >- + Inband-Security-Id for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter + servers. + + supportedvendorids: + description: + - >- + List of Supported-Vendor-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER) + message to use for monitoring Diameter servers. A maximum eight of these AVPs are supported in a + monitoring message. + - "Minimum value = C(1)" + - "Maximum value = C(4294967295)" + + vendorspecificvendorid: + description: + - >- + Vendor-Id to use in the Vendor-Specific-Application-Id grouped attribute-value pair (AVP) in the + monitoring CER message. To specify Auth-Application-Id or Acct-Application-Id in + Vendor-Specific-Application-Id, use vendorSpecificAuthApplicationIds or + vendorSpecificAcctApplicationIds, respectively. Only one Vendor-Id is supported for all the + Vendor-Specific-Application-Id AVPs in a CER monitoring message. + - "Minimum value = 1" + + vendorspecificauthapplicationids: + description: + - >- + List of Vendor-Specific-Auth-Application-Id attribute value pairs (AVPs) for the + Capabilities-Exchange-Request (CER) message to use for monitoring Diameter servers. A maximum of + eight of these AVPs are supported in a monitoring message. The specified value is combined with the + value of vendorSpecificVendorId to obtain the Vendor-Specific-Application-Id AVP in the CER + monitoring message. + - "Minimum value = C(0)" + - "Maximum value = C(4294967295)" + + vendorspecificacctapplicationids: + description: + - >- + List of Vendor-Specific-Acct-Application-Id attribute value pairs (AVPs) to use for monitoring + Diameter servers. A maximum of eight of these AVPs are supported in a monitoring message. The + specified value is combined with the value of vendorSpecificVendorId to obtain the + Vendor-Specific-Application-Id AVP in the CER monitoring message. + - "Minimum value = C(0)" + - "Maximum value = C(4294967295)" + + kcdaccount: + description: + - "KCD Account used by C(MSSQL) monitor." + - "Minimum length = 1" + - "Maximum length = 32" + + storedb: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Store the database list populated with the responses to monitor probes. Used in database specific + load balancing if C(MSSQL-ECV)/C(MYSQL-ECV) monitor is configured. + + storefrontcheckbackendservices: + description: + - >- + This option will enable monitoring of services running on storefront server. Storefront services are + monitored by probing to a Windows service that runs on the Storefront server and exposes details of + which storefront services are running. + type: bool + + trofscode: + description: + - "Code expected when the server is under maintenance." + + trofsstring: + description: + - >- + String expected from the server for the service to be marked as trofs. Applicable to HTTP-ECV/TCP-ECV + monitors. + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +- name: Set lb monitor + local_action: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + validate_certs: no + + + module: netscaler_lb_monitor + state: present + + monitorname: monitor_1 + type: HTTP-INLINE + action: DOWN + respcode: ['400'] +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: ['message 1', 'message 2'] + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: { 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' } +''' + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ( + ConfigProxy, + get_nitro_client, + netscaler_common_arguments, + log, + loglines, + ensure_feature_is_enabled, + get_immutables_intersection +) + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor import lbmonitor + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + + +def lbmonitor_exists(client, module): + log('Checking if monitor exists') + if lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname']) > 0: + return True + else: + return False + + +def lbmonitor_identical(client, module, lbmonitor_proxy): + log('Checking if monitor is identical') + + count = lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname']) + if count == 0: + return False + + lbmonitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname']) + diff_dict = lbmonitor_proxy.diff_object(lbmonitor_list[0]) + + # Skipping hashed fields since the cannot be compared directly + hashed_fields = [ + 'password', + 'secondarypassword', + 'radkey', + ] + for key in hashed_fields: + if key in diff_dict: + del diff_dict[key] + + if diff_dict == {}: + return True + else: + return False + + +def diff_list(client, module, lbmonitor_proxy): + monitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname']) + return lbmonitor_proxy.diff_object(monitor_list[0]) + + +def main(): + + module_specific_arguments = dict( + + monitorname=dict(type='str'), + + type=dict( + type='str', + choices=[ + 'PING', + 'TCP', + 'HTTP', + 'TCP-ECV', + 'HTTP-ECV', + 'UDP-ECV', + 'DNS', + 'FTP', + 'LDNS-PING', + 'LDNS-TCP', + 'LDNS-DNS', + 'RADIUS', + 'USER', + 'HTTP-INLINE', + 'SIP-UDP', + 'SIP-TCP', + 'LOAD', + 'FTP-EXTENDED', + 'SMTP', + 'SNMP', + 'NNTP', + 'MYSQL', + 'MYSQL-ECV', + 'MSSQL-ECV', + 'ORACLE-ECV', + 'LDAP', + 'POP3', + 'CITRIX-XML-SERVICE', + 'CITRIX-WEB-INTERFACE', + 'DNS-TCP', + 'RTSP', + 'ARP', + 'CITRIX-AG', + 'CITRIX-AAC-LOGINPAGE', + 'CITRIX-AAC-LAS', + 'CITRIX-XD-DDC', + 'ND6', + 'CITRIX-WI-EXTENDED', + 'DIAMETER', + 'RADIUS_ACCOUNTING', + 'STOREFRONT', + 'APPC', + 'SMPP', + 'CITRIX-XNC-ECV', + 'CITRIX-XDM', + 'CITRIX-STA-SERVICE', + 'CITRIX-STA-SERVICE-NHOP', + ] + ), + + action=dict( + type='str', + choices=[ + 'NONE', + 'LOG', + 'DOWN', + ] + ), + respcode=dict(type='list'), + httprequest=dict(type='str'), + rtsprequest=dict(type='str'), + customheaders=dict(type='str'), + maxforwards=dict(type='float'), + sipmethod=dict( + type='str', + choices=[ + 'OPTIONS', + 'INVITE', + 'REGISTER', + ] + ), + sipuri=dict(type='str'), + sipreguri=dict(type='str'), + send=dict(type='str'), + recv=dict(type='str'), + query=dict(type='str'), + querytype=dict( + type='str', + choices=[ + 'Address', + 'Zone', + 'AAAA', + ] + ), + scriptname=dict(type='str'), + scriptargs=dict(type='str'), + dispatcherip=dict(type='str'), + dispatcherport=dict(type='int'), + username=dict(type='str'), + password=dict(type='str'), + secondarypassword=dict(type='str'), + logonpointname=dict(type='str'), + lasversion=dict(type='str'), + radkey=dict(type='str'), + radnasid=dict(type='str'), + radnasip=dict(type='str'), + radaccounttype=dict(type='float'), + radframedip=dict(type='str'), + radapn=dict(type='str'), + radmsisdn=dict(type='str'), + radaccountsession=dict(type='str'), + lrtm=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + deviation=dict(type='float'), + units1=dict( + type='str', + choices=[ + 'SEC', + 'MSEC', + 'MIN', + ] + ), + interval=dict(type='int'), + units3=dict( + type='str', + choices=[ + 'SEC', + 'MSEC', + 'MIN', + ] + ), + resptimeout=dict(type='int'), + units4=dict( + type='str', + choices=[ + 'SEC', + 'MSEC', + 'MIN', + ] + ), + resptimeoutthresh=dict(type='float'), + retries=dict(type='int'), + failureretries=dict(type='int'), + alertretries=dict(type='int'), + successretries=dict(type='int'), + downtime=dict(type='int'), + units2=dict( + type='str', + choices=[ + 'SEC', + 'MSEC', + 'MIN', + ] + ), + destip=dict(type='str'), + destport=dict(type='int'), + reverse=dict(type='bool'), + transparent=dict(type='bool'), + iptunnel=dict(type='bool'), + tos=dict(type='bool'), + tosid=dict(type='float'), + secure=dict(type='bool'), + validatecred=dict(type='bool'), + domain=dict(type='str'), + ipaddress=dict(type='list'), + group=dict(type='str'), + filename=dict(type='str'), + basedn=dict(type='str'), + binddn=dict(type='str'), + filter=dict(type='str'), + attribute=dict(type='str'), + database=dict(type='str'), + oraclesid=dict(type='str'), + sqlquery=dict(type='str'), + evalrule=dict(type='str'), + mssqlprotocolversion=dict( + type='str', + choices=[ + '70', + '2000', + '2000SP1', + '2005', + '2008', + '2008R2', + '2012', + '2014', + ] + ), + Snmpoid=dict(type='str'), + snmpcommunity=dict(type='str'), + snmpthreshold=dict(type='str'), + snmpversion=dict( + type='str', + choices=[ + 'V1', + 'V2', + ] + ), + application=dict(type='str'), + sitepath=dict(type='str'), + storename=dict(type='str'), + storefrontacctservice=dict(type='bool'), + hostname=dict(type='str'), + netprofile=dict(type='str'), + originhost=dict(type='str'), + originrealm=dict(type='str'), + hostipaddress=dict(type='str'), + vendorid=dict(type='float'), + productname=dict(type='str'), + firmwarerevision=dict(type='float'), + authapplicationid=dict(type='list'), + acctapplicationid=dict(type='list'), + inbandsecurityid=dict( + type='str', + choices=[ + 'NO_INBAND_SECURITY', + 'TLS', + ] + ), + supportedvendorids=dict(type='list'), + vendorspecificvendorid=dict(type='float'), + vendorspecificauthapplicationids=dict(type='list'), + vendorspecificacctapplicationids=dict(type='list'), + storedb=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + storefrontcheckbackendservices=dict(type='bool'), + trofscode=dict(type='float'), + trofsstring=dict(type='str'), + ) + + hand_inserted_arguments = dict() + + argument_spec = dict() + argument_spec.update(module_specific_arguments) + argument_spec.update(netscaler_common_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk', **module_result) + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + # Instantiate lb monitor object + readwrite_attrs = [ + 'monitorname', + 'type', + 'action', + 'respcode', + 'httprequest', + 'rtsprequest', + 'customheaders', + 'maxforwards', + 'sipmethod', + 'sipuri', + 'sipreguri', + 'send', + 'recv', + 'query', + 'querytype', + 'scriptname', + 'scriptargs', + 'dispatcherip', + 'dispatcherport', + 'username', + 'password', + 'secondarypassword', + 'logonpointname', + 'lasversion', + 'radkey', + 'radnasid', + 'radnasip', + 'radaccounttype', + 'radframedip', + 'radapn', + 'radmsisdn', + 'radaccountsession', + 'lrtm', + 'deviation', + 'units1', + 'interval', + 'units3', + 'resptimeout', + 'units4', + 'resptimeoutthresh', + 'retries', + 'failureretries', + 'alertretries', + 'successretries', + 'downtime', + 'units2', + 'destip', + 'destport', + 'reverse', + 'transparent', + 'iptunnel', + 'tos', + 'tosid', + 'secure', + 'validatecred', + 'domain', + 'ipaddress', + 'group', + 'filename', + 'basedn', + 'binddn', + 'filter', + 'attribute', + 'database', + 'oraclesid', + 'sqlquery', + 'evalrule', + 'mssqlprotocolversion', + 'Snmpoid', + 'snmpcommunity', + 'snmpthreshold', + 'snmpversion', + 'application', + 'sitepath', + 'storename', + 'storefrontacctservice', + 'netprofile', + 'originhost', + 'originrealm', + 'hostipaddress', + 'vendorid', + 'productname', + 'firmwarerevision', + 'authapplicationid', + 'acctapplicationid', + 'inbandsecurityid', + 'supportedvendorids', + 'vendorspecificvendorid', + 'vendorspecificauthapplicationids', + 'vendorspecificacctapplicationids', + 'storedb', + 'storefrontcheckbackendservices', + 'trofscode', + 'trofsstring', + ] + + readonly_attrs = [ + 'lrtmconf', + 'lrtmconfstr', + 'dynamicresponsetimeout', + 'dynamicinterval', + 'multimetrictable', + 'dup_state', + 'dup_weight', + 'weight', + ] + + immutable_attrs = [ + 'monitorname', + 'type', + 'units1', + 'units3', + 'units4', + 'units2', + 'Snmpoid', + 'hostname', + 'servicename', + 'servicegroupname', + ] + + transforms = { + 'storefrontcheckbackendservices': ['bool_yes_no'], + 'secure': ['bool_yes_no'], + 'tos': ['bool_yes_no'], + 'validatecred': ['bool_yes_no'], + 'storefrontacctservice': ['bool_yes_no'], + 'iptunnel': ['bool_yes_no'], + 'transparent': ['bool_yes_no'], + 'reverse': ['bool_yes_no'], + 'lrtm': [lambda v: v.upper()], + 'storedb': [lambda v: v.upper()], + } + + lbmonitor_proxy = ConfigProxy( + actual=lbmonitor(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + ensure_feature_is_enabled(client, 'LB') + + if module.params['state'] == 'present': + log('Applying actions for state present') + if not lbmonitor_exists(client, module): + if not module.check_mode: + log('Adding monitor') + lbmonitor_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not lbmonitor_identical(client, module, lbmonitor_proxy): + + # Check if we try to change value of immutable attributes + immutables_changed = get_immutables_intersection(lbmonitor_proxy, diff_list(client, module, lbmonitor_proxy).keys()) + if immutables_changed != []: + diff = diff_list(client, module, lbmonitor_proxy) + msg = 'Cannot update immutable attributes %s' % (immutables_changed,) + module.fail_json(msg=msg, diff=diff, **module_result) + + if not module.check_mode: + log('Updating monitor') + lbmonitor_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + log('Doing nothing for monitor') + module_result['changed'] = False + + # Sanity check for result + log('Sanity checks for state present') + if not module.check_mode: + if not lbmonitor_exists(client, module): + module.fail_json(msg='lb monitor does not exist', **module_result) + if not lbmonitor_identical(client, module, lbmonitor_proxy): + module.fail_json( + msg='lb monitor is not configured correctly', + diff=diff_list(client, module, lbmonitor_proxy), + **module_result + ) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if lbmonitor_exists(client, module): + if not module.check_mode: + lbmonitor_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for result + log('Sanity checks for state absent') + if not module.check_mode: + if lbmonitor_exists(client, module): + module.fail_json(msg='lb monitor still exists', **module_result) + + module_result['actual_attributes'] = lbmonitor_proxy.get_actual_rw_attributes(filter='monitorname') + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_lb_vserver.py b/plugins/modules/network/netscaler/netscaler_lb_vserver.py new file mode 100644 index 0000000000..087edd42c5 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_lb_vserver.py @@ -0,0 +1,1941 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_lb_vserver +short_description: Manage load balancing vserver configuration +description: + - Manage load balancing vserver configuration + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + name: + description: + - >- + Name for the virtual server. Must begin with an ASCII alphanumeric or underscore C(_) character, and + must contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon C(:), at sign + C(@), equal sign C(=), and hyphen C(-) characters. Can be changed after the virtual server is created. + - "Minimum length = 1" + + servicetype: + choices: + - 'HTTP' + - 'FTP' + - 'TCP' + - 'UDP' + - 'SSL' + - 'SSL_BRIDGE' + - 'SSL_TCP' + - 'DTLS' + - 'NNTP' + - 'DNS' + - 'DHCPRA' + - 'ANY' + - 'SIP_UDP' + - 'SIP_TCP' + - 'SIP_SSL' + - 'DNS_TCP' + - 'RTSP' + - 'PUSH' + - 'SSL_PUSH' + - 'RADIUS' + - 'RDP' + - 'MYSQL' + - 'MSSQL' + - 'DIAMETER' + - 'SSL_DIAMETER' + - 'TFTP' + - 'ORACLE' + - 'SMPP' + - 'SYSLOGTCP' + - 'SYSLOGUDP' + - 'FIX' + - 'SSL_FIX' + description: + - "Protocol used by the service (also called the service type)." + + ipv46: + description: + - "IPv4 or IPv6 address to assign to the virtual server." + + ippattern: + description: + - >- + IP address pattern, in dotted decimal notation, for identifying packets to be accepted by the virtual + server. The IP Mask parameter specifies which part of the destination IP address is matched against + the pattern. Mutually exclusive with the IP Address parameter. + - >- + For example, if the IP pattern assigned to the virtual server is C(198.51.100.0) and the IP mask is + C(255.255.240.0) (a forward mask), the first 20 bits in the destination IP addresses are matched with + the first 20 bits in the pattern. The virtual server accepts requests with IP addresses that range + from C(198.51.96.1) to C(198.51.111.254). You can also use a pattern such as C(0.0.2.2) and a mask such as + C(0.0.255.255) (a reverse mask). + - >- + If a destination IP address matches more than one IP pattern, the pattern with the longest match is + selected, and the associated virtual server processes the request. For example, if virtual servers + C(vs1) and C(vs2) have the same IP pattern, C(0.0.100.128), but different IP masks of C(0.0.255.255) and + C(0.0.224.255), a destination IP address of C(198.51.100.128) has the longest match with the IP pattern of + vs1. If a destination IP address matches two or more virtual servers to the same extent, the request + is processed by the virtual server whose port number matches the port number in the request. + + ipmask: + description: + - >- + IP mask, in dotted decimal notation, for the IP Pattern parameter. Can have leading or trailing + non-zero octets (for example, C(255.255.240.0) or C(0.0.255.255)). Accordingly, the mask specifies whether + the first n bits or the last n bits of the destination IP address in a client request are to be + matched with the corresponding bits in the IP pattern. The former is called a forward mask. The + latter is called a reverse mask. + + port: + description: + - "Port number for the virtual server." + - "Range C(1) - C(65535)" + - "* in CLI is represented as C(65535) in NITRO API" + + range: + description: + - >- + Number of IP addresses that the appliance must generate and assign to the virtual server. The virtual + server then functions as a network virtual server, accepting traffic on any of the generated IP + addresses. The IP addresses are generated automatically, as follows: + - >- + * For a range of n, the last octet of the address specified by the IP Address parameter increments + n-1 times. + - "* If the last octet exceeds 255, it rolls over to 0 and the third octet increments by 1." + - >- + Note: The Range parameter assigns multiple IP addresses to one virtual server. To generate an array + of virtual servers, each of which owns only one IP address, use brackets in the IP Address and Name + parameters to specify the range. For example: + - "add lb vserver my_vserver[1-3] HTTP 192.0.2.[1-3] 80." + - "Minimum value = C(1)" + - "Maximum value = C(254)" + + persistencetype: + choices: + - 'SOURCEIP' + - 'COOKIEINSERT' + - 'SSLSESSION' + - 'RULE' + - 'URLPASSIVE' + - 'CUSTOMSERVERID' + - 'DESTIP' + - 'SRCIPDESTIP' + - 'CALLID' + - 'RTSPSID' + - 'DIAMETER' + - 'FIXSESSION' + - 'NONE' + description: + - "Type of persistence for the virtual server. Available settings function as follows:" + - "* C(SOURCEIP) - Connections from the same client IP address belong to the same persistence session." + - >- + * C(COOKIEINSERT) - Connections that have the same HTTP Cookie, inserted by a Set-Cookie directive from + a server, belong to the same persistence session. + - "* C(SSLSESSION) - Connections that have the same SSL Session ID belong to the same persistence session." + - >- + * C(CUSTOMSERVERID) - Connections with the same server ID form part of the same session. For this + persistence type, set the Server ID (CustomServerID) parameter for each service and configure the + Rule parameter to identify the server ID in a request. + - "* C(RULE) - All connections that match a user defined rule belong to the same persistence session." + - >- + * C(URLPASSIVE) - Requests that have the same server ID in the URL query belong to the same persistence + session. The server ID is the hexadecimal representation of the IP address and port of the service to + which the request must be forwarded. This persistence type requires a rule to identify the server ID + in the request. + - "* C(DESTIP) - Connections to the same destination IP address belong to the same persistence session." + - >- + * C(SRCIPDESTIP) - Connections that have the same source IP address and destination IP address belong to + the same persistence session. + - "* C(CALLID) - Connections that have the same CALL-ID SIP header belong to the same persistence session." + - "* C(RTSPSID) - Connections that have the same RTSP Session ID belong to the same persistence session." + - >- + * FIXSESSION - Connections that have the same SenderCompID and TargetCompID values belong to the same + persistence session. + + timeout: + description: + - "Time period for which a persistence session is in effect." + - "Minimum value = C(0)" + - "Maximum value = C(1440)" + + persistencebackup: + choices: + - 'SOURCEIP' + - 'NONE' + description: + - >- + Backup persistence type for the virtual server. Becomes operational if the primary persistence + mechanism fails. + + backuppersistencetimeout: + description: + - "Time period for which backup persistence is in effect." + - "Minimum value = C(2)" + - "Maximum value = C(1440)" + + lbmethod: + choices: + - 'ROUNDROBIN' + - 'LEASTCONNECTION' + - 'LEASTRESPONSETIME' + - 'URLHASH' + - 'DOMAINHASH' + - 'DESTINATIONIPHASH' + - 'SOURCEIPHASH' + - 'SRCIPDESTIPHASH' + - 'LEASTBANDWIDTH' + - 'LEASTPACKETS' + - 'TOKEN' + - 'SRCIPSRCPORTHASH' + - 'LRTM' + - 'CALLIDHASH' + - 'CUSTOMLOAD' + - 'LEASTREQUEST' + - 'AUDITLOGHASH' + - 'STATICPROXIMITY' + description: + - "Load balancing method. The available settings function as follows:" + - >- + * C(ROUNDROBIN) - Distribute requests in rotation, regardless of the load. Weights can be assigned to + services to enforce weighted round robin distribution. + - "* C(LEASTCONNECTION) (default) - Select the service with the fewest connections." + - "* C(LEASTRESPONSETIME) - Select the service with the lowest average response time." + - "* C(LEASTBANDWIDTH) - Select the service currently handling the least traffic." + - "* C(LEASTPACKETS) - Select the service currently serving the lowest number of packets per second." + - "* C(CUSTOMLOAD) - Base service selection on the SNMP metrics obtained by custom load monitors." + - >- + * C(LRTM) - Select the service with the lowest response time. Response times are learned through + monitoring probes. This method also takes the number of active connections into account. + - >- + Also available are a number of hashing methods, in which the appliance extracts a predetermined + portion of the request, creates a hash of the portion, and then checks whether any previous requests + had the same hash value. If it finds a match, it forwards the request to the service that served + those previous requests. Following are the hashing methods: + - "* C(URLHASH) - Create a hash of the request URL (or part of the URL)." + - >- + * C(DOMAINHASH) - Create a hash of the domain name in the request (or part of the domain name). The + domain name is taken from either the URL or the Host header. If the domain name appears in both + locations, the URL is preferred. If the request does not contain a domain name, the load balancing + method defaults to C(LEASTCONNECTION). + - "* C(DESTINATIONIPHASH) - Create a hash of the destination IP address in the IP header." + - "* C(SOURCEIPHASH) - Create a hash of the source IP address in the IP header." + - >- + * C(TOKEN) - Extract a token from the request, create a hash of the token, and then select the service + to which any previous requests with the same token hash value were sent. + - >- + * C(SRCIPDESTIPHASH) - Create a hash of the string obtained by concatenating the source IP address and + destination IP address in the IP header. + - "* C(SRCIPSRCPORTHASH) - Create a hash of the source IP address and source port in the IP header." + - "* C(CALLIDHASH) - Create a hash of the SIP Call-ID header." + + hashlength: + description: + - >- + Number of bytes to consider for the hash value used in the URLHASH and DOMAINHASH load balancing + methods. + - "Minimum value = C(1)" + - "Maximum value = C(4096)" + + netmask: + description: + - >- + IPv4 subnet mask to apply to the destination IP address or source IP address when the load balancing + method is C(DESTINATIONIPHASH) or C(SOURCEIPHASH). + - "Minimum length = 1" + + v6netmasklen: + description: + - >- + Number of bits to consider in an IPv6 destination or source IP address, for creating the hash that is + required by the C(DESTINATIONIPHASH) and C(SOURCEIPHASH) load balancing methods. + - "Minimum value = C(1)" + - "Maximum value = C(128)" + + backuplbmethod: + choices: + - 'ROUNDROBIN' + - 'LEASTCONNECTION' + - 'LEASTRESPONSETIME' + - 'SOURCEIPHASH' + - 'LEASTBANDWIDTH' + - 'LEASTPACKETS' + - 'CUSTOMLOAD' + description: + - "Backup load balancing method. Becomes operational if the primary load balancing me" + - "thod fails or cannot be used." + - "Valid only if the primary method is based on static proximity." + + cookiename: + description: + - >- + Use this parameter to specify the cookie name for C(COOKIE) persistence type. It specifies the name of + cookie with a maximum of 32 characters. If not specified, cookie name is internally generated. + + + listenpolicy: + description: + - >- + Default syntax expression identifying traffic accepted by the virtual server. Can be either an + expression (for example, C(CLIENT.IP.DST.IN_SUBNET(192.0.2.0/24)) or the name of a named expression. In + the above example, the virtual server accepts all requests whose destination IP address is in the + 192.0.2.0/24 subnet. + + listenpriority: + description: + - >- + Integer specifying the priority of the listen policy. A higher number specifies a lower priority. If + a request matches the listen policies of more than one virtual server the virtual server whose listen + policy has the highest priority (the lowest priority number) accepts the request. + - "Minimum value = C(0)" + - "Maximum value = C(101)" + + resrule: + description: + - >- + Default syntax expression specifying which part of a server's response to use for creating rule based + persistence sessions (persistence type RULE). Can be either an expression or the name of a named + expression. + - "Example:" + - 'C(HTTP.RES.HEADER("setcookie").VALUE(0).TYPECAST_NVLIST_T("=",";").VALUE("server1")).' + + persistmask: + description: + - "Persistence mask for IP based persistence types, for IPv4 virtual servers." + - "Minimum length = 1" + + v6persistmasklen: + description: + - "Persistence mask for IP based persistence types, for IPv6 virtual servers." + - "Minimum value = C(1)" + - "Maximum value = C(128)" + + rtspnat: + description: + - "Use network address translation (NAT) for RTSP data connections." + type: bool + + m: + choices: + - 'IP' + - 'MAC' + - 'IPTUNNEL' + - 'TOS' + description: + - "Redirection mode for load balancing. Available settings function as follows:" + - >- + * C(IP) - Before forwarding a request to a server, change the destination IP address to the server's IP + address. + - >- + * C(MAC) - Before forwarding a request to a server, change the destination MAC address to the server's + MAC address. The destination IP address is not changed. MAC-based redirection mode is used mostly in + firewall load balancing deployments. + - >- + * C(IPTUNNEL) - Perform IP-in-IP encapsulation for client IP packets. In the outer IP headers, set the + destination IP address to the IP address of the server and the source IP address to the subnet IP + (SNIP). The client IP packets are not modified. Applicable to both IPv4 and IPv6 packets. + - "* C(TOS) - Encode the virtual server's TOS ID in the TOS field of the IP header." + - "You can use either the C(IPTUNNEL) or the C(TOS) option to implement Direct Server Return (DSR)." + + tosid: + description: + - >- + TOS ID of the virtual server. Applicable only when the load balancing redirection mode is set to TOS. + - "Minimum value = C(1)" + - "Maximum value = C(63)" + + datalength: + description: + - >- + Length of the token to be extracted from the data segment of an incoming packet, for use in the token + method of load balancing. The length of the token, specified in bytes, must not be greater than 24 + KB. Applicable to virtual servers of type TCP. + - "Minimum value = C(1)" + - "Maximum value = C(100)" + + dataoffset: + description: + - >- + Offset to be considered when extracting a token from the TCP payload. Applicable to virtual servers, + of type TCP, using the token method of load balancing. Must be within the first 24 KB of the TCP + payload. + - "Minimum value = C(0)" + - "Maximum value = C(25400)" + + sessionless: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Perform load balancing on a per-packet basis, without establishing sessions. Recommended for load + balancing of intrusion detection system (IDS) servers and scenarios involving direct server return + (DSR), where session information is unnecessary. + + connfailover: + choices: + - 'DISABLED' + - 'STATEFUL' + - 'STATELESS' + description: + - >- + Mode in which the connection failover feature must operate for the virtual server. After a failover, + established TCP connections and UDP packet flows are kept active and resumed on the secondary + appliance. Clients remain connected to the same servers. Available settings function as follows: + - >- + * C(STATEFUL) - The primary appliance shares state information with the secondary appliance, in real + time, resulting in some runtime processing overhead. + - >- + * C(STATELESS) - State information is not shared, and the new primary appliance tries to re-create the + packet flow on the basis of the information contained in the packets it receives. + - "* C(DISABLED) - Connection failover does not occur." + + redirurl: + description: + - "URL to which to redirect traffic if the virtual server becomes unavailable." + - >- + WARNING! Make sure that the domain in the URL does not match the domain specified for a content + switching policy. If it does, requests are continuously redirected to the unavailable virtual server. + - "Minimum length = 1" + + cacheable: + description: + - >- + Route cacheable requests to a cache redirection virtual server. The load balancing virtual server can + forward requests only to a transparent cache redirection virtual server that has an IP address and + port combination of *:80, so such a cache redirection virtual server must be configured on the + appliance. + type: bool + + clttimeout: + description: + - "Idle time, in seconds, after which a client connection is terminated." + - "Minimum value = C(0)" + - "Maximum value = C(31536000)" + + somethod: + choices: + - 'CONNECTION' + - 'DYNAMICCONNECTION' + - 'BANDWIDTH' + - 'HEALTH' + - 'NONE' + description: + - "Type of threshold that, when exceeded, triggers spillover. Available settings function as follows:" + - "* C(CONNECTION) - Spillover occurs when the number of client connections exceeds the threshold." + - >- + * DYNAMICCONNECTION - Spillover occurs when the number of client connections at the virtual server + exceeds the sum of the maximum client (Max Clients) settings for bound services. Do not specify a + spillover threshold for this setting, because the threshold is implied by the Max Clients settings of + bound services. + - >- + * C(BANDWIDTH) - Spillover occurs when the bandwidth consumed by the virtual server's incoming and + outgoing traffic exceeds the threshold. + - >- + * C(HEALTH) - Spillover occurs when the percentage of weights of the services that are UP drops below + the threshold. For example, if services svc1, svc2, and svc3 are bound to a virtual server, with + weights 1, 2, and 3, and the spillover threshold is 50%, spillover occurs if svc1 and svc3 or svc2 + and svc3 transition to DOWN. + - "* C(NONE) - Spillover does not occur." + + sopersistence: + choices: + - 'enabled' + - 'disabled' + description: + - >- + If spillover occurs, maintain source IP address based persistence for both primary and backup virtual + servers. + + sopersistencetimeout: + description: + - "Timeout for spillover persistence, in minutes." + - "Minimum value = C(2)" + - "Maximum value = C(1440)" + + healththreshold: + description: + - >- + Threshold in percent of active services below which vserver state is made down. If this threshold is + 0, vserver state will be up even if one bound service is up. + - "Minimum value = C(0)" + - "Maximum value = C(100)" + + sothreshold: + description: + - >- + Threshold at which spillover occurs. Specify an integer for the C(CONNECTION) spillover method, a + bandwidth value in kilobits per second for the C(BANDWIDTH) method (do not enter the units), or a + percentage for the C(HEALTH) method (do not enter the percentage symbol). + - "Minimum value = C(1)" + - "Maximum value = C(4294967287)" + + sobackupaction: + choices: + - 'DROP' + - 'ACCEPT' + - 'REDIRECT' + description: + - >- + Action to be performed if spillover is to take effect, but no backup chain to spillover is usable or + exists. + + redirectportrewrite: + choices: + - 'enabled' + - 'disabled' + description: + - "Rewrite the port and change the protocol to ensure successful HTTP redirects from services." + + downstateflush: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Flush all active transactions associated with a virtual server whose state transitions from UP to + DOWN. Do not enable this option for applications that must complete their transactions. + + disableprimaryondown: + choices: + - 'enabled' + - 'disabled' + description: + - >- + If the primary virtual server goes down, do not allow it to return to primary status until manually + enabled. + + insertvserveripport: + choices: + - 'OFF' + - 'VIPADDR' + - 'V6TOV4MAPPING' + description: + - >- + Insert an HTTP header, whose value is the IP address and port number of the virtual server, before + forwarding a request to the server. The format of the header is : _, where vipHeader is the name that you specify for the header. If the virtual + server has an IPv6 address, the address in the header is enclosed in brackets ([ and ]) to separate + it from the port number. If you have mapped an IPv4 address to a virtual server's IPv6 address, the + value of this parameter determines which IP address is inserted in the header, as follows: + - >- + * C(VIPADDR) - Insert the IP address of the virtual server in the HTTP header regardless of whether the + virtual server has an IPv4 address or an IPv6 address. A mapped IPv4 address, if configured, is + ignored. + - >- + * C(V6TOV4MAPPING) - Insert the IPv4 address that is mapped to the virtual server's IPv6 address. If a + mapped IPv4 address is not configured, insert the IPv6 address. + - "* C(OFF) - Disable header insertion." + + vipheader: + description: + - "Name for the inserted header. The default name is vip-header." + - "Minimum length = 1" + + authenticationhost: + description: + - >- + Fully qualified domain name (FQDN) of the authentication virtual server to which the user must be + redirected for authentication. Make sure that the Authentication parameter is set to C(yes). + - "Minimum length = 3" + - "Maximum length = 252" + + authentication: + description: + - "Enable or disable user authentication." + type: bool + + authn401: + description: + - "Enable or disable user authentication with HTTP 401 responses." + type: bool + + authnvsname: + description: + - "Name of an authentication virtual server with which to authenticate users." + - "Minimum length = 1" + - "Maximum length = 252" + + push: + choices: + - 'enabled' + - 'disabled' + description: + - "Process traffic with the push virtual server that is bound to this load balancing virtual server." + + pushvserver: + description: + - >- + Name of the load balancing virtual server, of type PUSH or SSL_PUSH, to which the server pushes + updates received on the load balancing virtual server that you are configuring. + - "Minimum length = 1" + + pushlabel: + description: + - >- + Expression for extracting a label from the server's response. Can be either an expression or the name + of a named expression. + + pushmulticlients: + description: + - >- + Allow multiple Web 2.0 connections from the same client to connect to the virtual server and expect + updates. + type: bool + + tcpprofilename: + description: + - "Name of the TCP profile whose settings are to be applied to the virtual server." + - "Minimum length = 1" + - "Maximum length = 127" + + httpprofilename: + description: + - "Name of the HTTP profile whose settings are to be applied to the virtual server." + - "Minimum length = 1" + - "Maximum length = 127" + + dbprofilename: + description: + - "Name of the DB profile whose settings are to be applied to the virtual server." + - "Minimum length = 1" + - "Maximum length = 127" + + comment: + description: + - "Any comments that you might want to associate with the virtual server." + + l2conn: + description: + - >- + Use Layer 2 parameters (channel number, MAC address, and VLAN ID) in addition to the 4-tuple (::::) that is used to identify a connection. Allows + multiple TCP and non-TCP connections with the same 4-tuple to co-exist on the NetScaler appliance. + type: bool + + oracleserverversion: + choices: + - '10G' + - '11G' + description: + - "Oracle server version." + + mssqlserverversion: + choices: + - '70' + - '2000' + - '2000SP1' + - '2005' + - '2008' + - '2008R2' + - '2012' + - '2014' + description: + - >- + For a load balancing virtual server of type C(MSSQL), the Microsoft SQL Server version. Set this + parameter if you expect some clients to run a version different from the version of the database. + This setting provides compatibility between the client-side and server-side connections by ensuring + that all communication conforms to the server's version. + + mysqlprotocolversion: + description: + - "MySQL protocol version that the virtual server advertises to clients." + + mysqlserverversion: + description: + - "MySQL server version string that the virtual server advertises to clients." + - "Minimum length = 1" + - "Maximum length = 31" + + mysqlcharacterset: + description: + - "Character set that the virtual server advertises to clients." + + mysqlservercapabilities: + description: + - "Server capabilities that the virtual server advertises to clients." + + appflowlog: + choices: + - 'enabled' + - 'disabled' + description: + - "Apply AppFlow logging to the virtual server." + + netprofile: + description: + - >- + Name of the network profile to associate with the virtual server. If you set this parameter, the + virtual server uses only the IP addresses in the network profile as source IP addresses when + initiating connections with servers. + - "Minimum length = 1" + - "Maximum length = 127" + + icmpvsrresponse: + choices: + - 'PASSIVE' + - 'ACTIVE' + description: + - >- + How the NetScaler appliance responds to ping requests received for an IP address that is common to + one or more virtual servers. Available settings function as follows: + - >- + * If set to C(PASSIVE) on all the virtual servers that share the IP address, the appliance always + responds to the ping requests. + - >- + * If set to C(ACTIVE) on all the virtual servers that share the IP address, the appliance responds to + the ping requests if at least one of the virtual servers is UP. Otherwise, the appliance does not + respond. + - >- + * If set to C(ACTIVE) on some virtual servers and PASSIVE on the others, the appliance responds if at + least one virtual server with the ACTIVE setting is UP. Otherwise, the appliance does not respond. + - >- + Note: This parameter is available at the virtual server level. A similar parameter, ICMP Response, is + available at the IP address level, for IPv4 addresses of type VIP. To set that parameter, use the add + ip command in the CLI or the Create IP dialog box in the GUI. + + rhistate: + choices: + - 'PASSIVE' + - 'ACTIVE' + description: + - >- + Route Health Injection (RHI) functionality of the NetSaler appliance for advertising the route of the + VIP address associated with the virtual server. When Vserver RHI Level (RHI) parameter is set to + VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate + (RHI STATE) settings on the virtual servers associated with the VIP address: + - >- + * If you set C(rhistate) to C(PASSIVE) on all virtual servers, the NetScaler ADC always advertises the + route for the VIP address. + - >- + * If you set C(rhistate) to C(ACTIVE) on all virtual servers, the NetScaler ADC advertises the route for + the VIP address if at least one of the associated virtual servers is in UP state. + - >- + * If you set C(rhistate) to C(ACTIVE) on some and PASSIVE on others, the NetScaler ADC advertises the + route for the VIP address if at least one of the associated virtual servers, whose C(rhistate) set to + C(ACTIVE), is in UP state. + + newservicerequest: + description: + - >- + Number of requests, or percentage of the load on existing services, by which to increase the load on + a new service at each interval in slow-start mode. A non-zero value indicates that slow-start is + applicable. A zero value indicates that the global RR startup parameter is applied. Changing the + value to zero will cause services currently in slow start to take the full traffic as determined by + the LB method. Subsequently, any new services added will use the global RR factor. + + newservicerequestunit: + choices: + - 'PER_SECOND' + - 'PERCENT' + description: + - "Units in which to increment load at each interval in slow-start mode." + + newservicerequestincrementinterval: + description: + - >- + Interval, in seconds, between successive increments in the load on a new service or a service whose + state has just changed from DOWN to UP. A value of 0 (zero) specifies manual slow start. + - "Minimum value = C(0)" + - "Maximum value = C(3600)" + + minautoscalemembers: + description: + - "Minimum number of members expected to be present when vserver is used in Autoscale." + - "Minimum value = C(0)" + - "Maximum value = C(5000)" + + maxautoscalemembers: + description: + - "Maximum number of members expected to be present when vserver is used in Autoscale." + - "Minimum value = C(0)" + - "Maximum value = C(5000)" + + persistavpno: + description: + - "Persist AVP number for Diameter Persistency." + - "In case this AVP is not defined in Base RFC 3588 and it is nested inside a Grouped AVP," + - "define a sequence of AVP numbers (max 3) in order of parent to child. So say persist AVP number X" + - "is nested inside AVP Y which is nested in Z, then define the list as Z Y X." + - "Minimum value = C(1)" + + skippersistency: + choices: + - 'Bypass' + - 'ReLb' + - 'None' + description: + - >- + This argument decides the behavior incase the service which is selected from an existing persistence + session has reached threshold. + + td: + description: + - >- + Integer value that uniquely identifies the traffic domain in which you want to configure the entity. + If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID + of 0. + - "Minimum value = C(0)" + - "Maximum value = C(4094)" + + authnprofile: + description: + - "Name of the authentication profile to be used when authentication is turned on." + + macmoderetainvlan: + choices: + - 'enabled' + - 'disabled' + description: + - "This option is used to retain vlan information of incoming packet when macmode is enabled." + + dbslb: + choices: + - 'enabled' + - 'disabled' + description: + - "Enable database specific load balancing for MySQL and MSSQL service types." + + dns64: + choices: + - 'enabled' + - 'disabled' + description: + - "This argument is for enabling/disabling the C(dns64) on lbvserver." + + bypassaaaa: + description: + - >- + If this option is enabled while resolving DNS64 query AAAA queries are not sent to back end dns + server. + type: bool + + recursionavailable: + description: + - >- + When set to YES, this option causes the DNS replies from this vserver to have the RA bit turned on. + Typically one would set this option to YES, when the vserver is load balancing a set of DNS servers + thatsupport recursive queries. + type: bool + + processlocal: + choices: + - 'enabled' + - 'disabled' + description: + - >- + By turning on this option packets destined to a vserver in a cluster will not under go any steering. + Turn this option for single packet request response mode or when the upstream device is performing a + proper RSS for connection based distribution. + + dnsprofilename: + description: + - >- + Name of the DNS profile to be associated with the VServer. DNS profile properties will be applied to + the transactions processed by a VServer. This parameter is valid only for DNS and DNS-TCP VServers. + - "Minimum length = 1" + - "Maximum length = 127" + + servicebindings: + description: + - List of services along with the weights that are load balanced. + - The following suboptions are available. + suboptions: + servicename: + description: + - "Service to bind to the virtual server." + - "Minimum length = 1" + weight: + description: + - "Weight to assign to the specified service." + - "Minimum value = C(1)" + - "Maximum value = C(100)" + + servicegroupbindings: + description: + - List of service groups along with the weights that are load balanced. + - The following suboptions are available. + suboptions: + servicegroupname: + description: + - "The service group name bound to the selected load balancing virtual server." + weight: + description: + - >- + Integer specifying the weight of the service. A larger number specifies a greater weight. Defines the + capacity of the service relative to the other services in the load balancing configuration. + Determines the priority given to the service in load balancing decisions. + - "Minimum value = C(1)" + - "Maximum value = C(100)" + + ssl_certkey: + description: + - The name of the ssl certificate that is bound to this service. + - The ssl certificate must already exist. + - Creating the certificate can be done with the M(netscaler_ssl_certkey) module. + - This option is only applicable only when C(servicetype) is C(SSL). + + disabled: + description: + - When set to C(yes) the lb vserver will be disabled. + - When set to C(no) the lb vserver will be enabled. + - >- + Note that due to limitations of the underlying NITRO API a C(disabled) state change alone + does not cause the module result to report a changed status. + type: bool + default: 'no' + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +# Netscaler services service-http-1, service-http-2 must have been already created with the netscaler_service module + +- name: Create a load balancing vserver bound to services + delegate_to: localhost + netscaler_lb_vserver: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + validate_certs: no + + state: present + + name: lb_vserver_1 + servicetype: HTTP + timeout: 12 + ipv46: 6.93.3.3 + port: 80 + servicebindings: + - servicename: service-http-1 + weight: 80 + - servicename: service-http-2 + weight: 20 + +# Service group service-group-1 must have been already created with the netscaler_servicegroup module + +- name: Create load balancing vserver bound to servicegroup + delegate_to: localhost + netscaler_lb_vserver: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + validate_certs: no + state: present + + name: lb_vserver_2 + servicetype: HTTP + ipv46: 6.92.2.2 + port: 80 + timeout: 10 + servicegroupbindings: + - servicegroupname: service-group-1 +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: ['message 1', 'message 2'] + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: { 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ( + ConfigProxy, + get_nitro_client, + netscaler_common_arguments, + log, + loglines, + get_immutables_intersection, + ensure_feature_is_enabled +) +import copy + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver import lbvserver + from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding import lbvserver_servicegroup_binding + from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding import lbvserver_service_binding + from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding import sslvserver_sslcertkey_binding + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + + PYTHON_SDK_IMPORTED = True +except ImportError as e: + IMPORT_ERROR = str(e) + PYTHON_SDK_IMPORTED = False + + +def lb_vserver_exists(client, module): + log('Checking if lb vserver exists') + if lbvserver.count_filtered(client, 'name:%s' % module.params['name']) > 0: + return True + else: + return False + + +def lb_vserver_identical(client, module, lbvserver_proxy): + log('Checking if configured lb vserver is identical') + lbvserver_list = lbvserver.get_filtered(client, 'name:%s' % module.params['name']) + if lbvserver_proxy.has_equal_attributes(lbvserver_list[0]): + return True + else: + return False + + +def lb_vserver_diff(client, module, lbvserver_proxy): + lbvserver_list = lbvserver.get_filtered(client, 'name:%s' % module.params['name']) + return lbvserver_proxy.diff_object(lbvserver_list[0]) + + +def get_configured_service_bindings(client, module): + log('Getting configured service bindings') + + readwrite_attrs = [ + 'weight', + 'name', + 'servicename', + 'servicegroupname' + ] + readonly_attrs = [ + 'preferredlocation', + 'vserverid', + 'vsvrbindsvcip', + 'servicetype', + 'cookieipport', + 'port', + 'vsvrbindsvcport', + 'curstate', + 'ipv46', + 'dynamicweight', + ] + + configured_bindings = {} + if 'servicebindings' in module.params and module.params['servicebindings'] is not None: + for binding in module.params['servicebindings']: + attribute_values_dict = copy.deepcopy(binding) + attribute_values_dict['name'] = module.params['name'] + key = binding['servicename'].strip() + configured_bindings[key] = ConfigProxy( + actual=lbvserver_service_binding(), + client=client, + attribute_values_dict=attribute_values_dict, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + ) + return configured_bindings + + +def get_configured_servicegroup_bindings(client, module): + log('Getting configured service group bindings') + readwrite_attrs = [ + 'weight', + 'name', + 'servicename', + 'servicegroupname', + ] + readonly_attrs = [] + + configured_bindings = {} + + if 'servicegroupbindings' in module.params and module.params['servicegroupbindings'] is not None: + for binding in module.params['servicegroupbindings']: + attribute_values_dict = copy.deepcopy(binding) + attribute_values_dict['name'] = module.params['name'] + key = binding['servicegroupname'].strip() + configured_bindings[key] = ConfigProxy( + actual=lbvserver_servicegroup_binding(), + client=client, + attribute_values_dict=attribute_values_dict, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + ) + + return configured_bindings + + +def get_actual_service_bindings(client, module): + log('Getting actual service bindings') + bindings = {} + try: + if lbvserver_service_binding.count(client, module.params['name']) == 0: + return bindings + except nitro_exception as e: + if e.errorcode == 258: + return bindings + else: + raise + + bindigs_list = lbvserver_service_binding.get(client, module.params['name']) + + for item in bindigs_list: + key = item.servicename + bindings[key] = item + + return bindings + + +def get_actual_servicegroup_bindings(client, module): + log('Getting actual service group bindings') + bindings = {} + + try: + if lbvserver_servicegroup_binding.count(client, module.params['name']) == 0: + return bindings + except nitro_exception as e: + if e.errorcode == 258: + return bindings + else: + raise + + bindigs_list = lbvserver_servicegroup_binding.get(client, module.params['name']) + + for item in bindigs_list: + key = item.servicegroupname + bindings[key] = item + + return bindings + + +def service_bindings_identical(client, module): + log('service_bindings_identical') + + # Compare service keysets + configured_service_bindings = get_configured_service_bindings(client, module) + service_bindings = get_actual_service_bindings(client, module) + configured_keyset = set(configured_service_bindings.keys()) + service_keyset = set(service_bindings.keys()) + if len(configured_keyset ^ service_keyset) > 0: + return False + + # Compare service item to item + for key in configured_service_bindings.keys(): + conf = configured_service_bindings[key] + serv = service_bindings[key] + log('s diff %s' % conf.diff_object(serv)) + if not conf.has_equal_attributes(serv): + return False + + # Fallthrough to success + return True + + +def servicegroup_bindings_identical(client, module): + log('servicegroup_bindings_identical') + + # Compare servicegroup keysets + configured_servicegroup_bindings = get_configured_servicegroup_bindings(client, module) + servicegroup_bindings = get_actual_servicegroup_bindings(client, module) + configured_keyset = set(configured_servicegroup_bindings.keys()) + service_keyset = set(servicegroup_bindings.keys()) + log('len %s' % len(configured_keyset ^ service_keyset)) + if len(configured_keyset ^ service_keyset) > 0: + return False + + # Compare servicegroup item to item + for key in configured_servicegroup_bindings.keys(): + conf = configured_servicegroup_bindings[key] + serv = servicegroup_bindings[key] + log('sg diff %s' % conf.diff_object(serv)) + if not conf.has_equal_attributes(serv): + return False + + # Fallthrough to success + return True + + +def sync_service_bindings(client, module): + log('sync_service_bindings') + + actual_bindings = get_actual_service_bindings(client, module) + configured_bindigns = get_configured_service_bindings(client, module) + + # Delete actual but not configured + delete_keys = list(set(actual_bindings.keys()) - set(configured_bindigns.keys())) + for key in delete_keys: + log('Deleting service binding %s' % key) + actual_bindings[key].servicegroupname = '' + actual_bindings[key].delete(client, actual_bindings[key]) + + # Add configured but not in actual + add_keys = list(set(configured_bindigns.keys()) - set(actual_bindings.keys())) + for key in add_keys: + log('Adding service binding %s' % key) + configured_bindigns[key].add() + + # Update existing if changed + modify_keys = list(set(configured_bindigns.keys()) & set(actual_bindings.keys())) + for key in modify_keys: + if not configured_bindigns[key].has_equal_attributes(actual_bindings[key]): + log('Updating service binding %s' % key) + actual_bindings[key].servicegroupname = '' + actual_bindings[key].delete(client, actual_bindings[key]) + configured_bindigns[key].add() + + +def sync_servicegroup_bindings(client, module): + log('sync_servicegroup_bindings') + + actual_bindings = get_actual_servicegroup_bindings(client, module) + configured_bindigns = get_configured_servicegroup_bindings(client, module) + + # Delete actual but not configured + delete_keys = list(set(actual_bindings.keys()) - set(configured_bindigns.keys())) + for key in delete_keys: + log('Deleting servicegroup binding %s' % key) + actual_bindings[key].servicename = None + actual_bindings[key].delete(client, actual_bindings[key]) + + # Add configured but not in actual + add_keys = list(set(configured_bindigns.keys()) - set(actual_bindings.keys())) + for key in add_keys: + log('Adding servicegroup binding %s' % key) + configured_bindigns[key].add() + + # Update existing if changed + modify_keys = list(set(configured_bindigns.keys()) & set(actual_bindings.keys())) + for key in modify_keys: + if not configured_bindigns[key].has_equal_attributes(actual_bindings[key]): + log('Updating servicegroup binding %s' % key) + actual_bindings[key].servicename = None + actual_bindings[key].delete(client, actual_bindings[key]) + configured_bindigns[key].add() + + +def ssl_certkey_bindings_identical(client, module): + log('Entering ssl_certkey_bindings_identical') + vservername = module.params['name'] + + if sslvserver_sslcertkey_binding.count(client, vservername) == 0: + bindings = [] + else: + bindings = sslvserver_sslcertkey_binding.get(client, vservername) + + log('Existing certs %s' % bindings) + + if module.params['ssl_certkey'] is None: + if len(bindings) == 0: + return True + else: + return False + else: + certificate_list = [item.certkeyname for item in bindings] + log('certificate_list %s' % certificate_list) + if certificate_list == [module.params['ssl_certkey']]: + return True + else: + return False + + +def ssl_certkey_bindings_sync(client, module): + log('Syncing ssl certificates') + vservername = module.params['name'] + if sslvserver_sslcertkey_binding.count(client, vservername) == 0: + bindings = [] + else: + bindings = sslvserver_sslcertkey_binding.get(client, vservername) + log('bindings len is %s' % len(bindings)) + + # Delete existing bindings + for binding in bindings: + sslvserver_sslcertkey_binding.delete(client, binding) + + # Add binding if appropriate + if module.params['ssl_certkey'] is not None: + binding = sslvserver_sslcertkey_binding() + binding.vservername = module.params['name'] + binding.certkeyname = module.params['ssl_certkey'] + sslvserver_sslcertkey_binding.add(client, binding) + + +def do_state_change(client, module, lbvserver_proxy): + if module.params['disabled']: + log('Disabling lb server') + result = lbvserver.disable(client, lbvserver_proxy.actual) + else: + log('Enabling lb server') + result = lbvserver.enable(client, lbvserver_proxy.actual) + return result + + +def main(): + + module_specific_arguments = dict( + name=dict(type='str'), + servicetype=dict( + type='str', + choices=[ + 'HTTP', + 'FTP', + 'TCP', + 'UDP', + 'SSL', + 'SSL_BRIDGE', + 'SSL_TCP', + 'DTLS', + 'NNTP', + 'DNS', + 'DHCPRA', + 'ANY', + 'SIP_UDP', + 'SIP_TCP', + 'SIP_SSL', + 'DNS_TCP', + 'RTSP', + 'PUSH', + 'SSL_PUSH', + 'RADIUS', + 'RDP', + 'MYSQL', + 'MSSQL', + 'DIAMETER', + 'SSL_DIAMETER', + 'TFTP', + 'ORACLE', + 'SMPP', + 'SYSLOGTCP', + 'SYSLOGUDP', + 'FIX', + 'SSL_FIX', + ] + ), + ipv46=dict(type='str'), + ippattern=dict(type='str'), + ipmask=dict(type='str'), + port=dict(type='int'), + range=dict(type='float'), + persistencetype=dict( + type='str', + choices=[ + 'SOURCEIP', + 'COOKIEINSERT', + 'SSLSESSION', + 'RULE', + 'URLPASSIVE', + 'CUSTOMSERVERID', + 'DESTIP', + 'SRCIPDESTIP', + 'CALLID', + 'RTSPSID', + 'DIAMETER', + 'FIXSESSION', + 'NONE', + ] + ), + timeout=dict(type='float'), + persistencebackup=dict( + type='str', + choices=[ + 'SOURCEIP', + 'NONE', + ] + ), + backuppersistencetimeout=dict(type='float'), + lbmethod=dict( + type='str', + choices=[ + 'ROUNDROBIN', + 'LEASTCONNECTION', + 'LEASTRESPONSETIME', + 'URLHASH', + 'DOMAINHASH', + 'DESTINATIONIPHASH', + 'SOURCEIPHASH', + 'SRCIPDESTIPHASH', + 'LEASTBANDWIDTH', + 'LEASTPACKETS', + 'TOKEN', + 'SRCIPSRCPORTHASH', + 'LRTM', + 'CALLIDHASH', + 'CUSTOMLOAD', + 'LEASTREQUEST', + 'AUDITLOGHASH', + 'STATICPROXIMITY', + ] + ), + hashlength=dict(type='float'), + netmask=dict(type='str'), + v6netmasklen=dict(type='float'), + backuplbmethod=dict( + type='str', + choices=[ + 'ROUNDROBIN', + 'LEASTCONNECTION', + 'LEASTRESPONSETIME', + 'SOURCEIPHASH', + 'LEASTBANDWIDTH', + 'LEASTPACKETS', + 'CUSTOMLOAD', + ] + ), + cookiename=dict(type='str'), + listenpolicy=dict(type='str'), + listenpriority=dict(type='float'), + persistmask=dict(type='str'), + v6persistmasklen=dict(type='float'), + rtspnat=dict(type='bool'), + m=dict( + type='str', + choices=[ + 'IP', + 'MAC', + 'IPTUNNEL', + 'TOS', + ] + ), + tosid=dict(type='float'), + datalength=dict(type='float'), + dataoffset=dict(type='float'), + sessionless=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + connfailover=dict( + type='str', + choices=[ + 'DISABLED', + 'STATEFUL', + 'STATELESS', + ] + ), + redirurl=dict(type='str'), + cacheable=dict(type='bool'), + clttimeout=dict(type='float'), + somethod=dict( + type='str', + choices=[ + 'CONNECTION', + 'DYNAMICCONNECTION', + 'BANDWIDTH', + 'HEALTH', + 'NONE', + ] + ), + sopersistence=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + sopersistencetimeout=dict(type='float'), + healththreshold=dict(type='float'), + sothreshold=dict(type='float'), + sobackupaction=dict( + type='str', + choices=[ + 'DROP', + 'ACCEPT', + 'REDIRECT', + ] + ), + redirectportrewrite=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + downstateflush=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + disableprimaryondown=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + insertvserveripport=dict( + type='str', + choices=[ + 'OFF', + 'VIPADDR', + 'V6TOV4MAPPING', + ] + ), + vipheader=dict(type='str'), + authenticationhost=dict(type='str'), + authentication=dict(type='bool'), + authn401=dict(type='bool'), + authnvsname=dict(type='str'), + push=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + pushvserver=dict(type='str'), + pushlabel=dict(type='str'), + pushmulticlients=dict(type='bool'), + tcpprofilename=dict(type='str'), + httpprofilename=dict(type='str'), + dbprofilename=dict(type='str'), + comment=dict(type='str'), + l2conn=dict(type='bool'), + oracleserverversion=dict( + type='str', + choices=[ + '10G', + '11G', + ] + ), + mssqlserverversion=dict( + type='str', + choices=[ + '70', + '2000', + '2000SP1', + '2005', + '2008', + '2008R2', + '2012', + '2014', + ] + ), + mysqlprotocolversion=dict(type='float'), + mysqlserverversion=dict(type='str'), + mysqlcharacterset=dict(type='float'), + mysqlservercapabilities=dict(type='float'), + appflowlog=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + netprofile=dict(type='str'), + icmpvsrresponse=dict( + type='str', + choices=[ + 'PASSIVE', + 'ACTIVE', + ] + ), + rhistate=dict( + type='str', + choices=[ + 'PASSIVE', + 'ACTIVE', + ] + ), + newservicerequest=dict(type='float'), + newservicerequestunit=dict( + type='str', + choices=[ + 'PER_SECOND', + 'PERCENT', + ] + ), + newservicerequestincrementinterval=dict(type='float'), + minautoscalemembers=dict(type='float'), + maxautoscalemembers=dict(type='float'), + skippersistency=dict( + type='str', + choices=[ + 'Bypass', + 'ReLb', + 'None', + ] + ), + authnprofile=dict(type='str'), + macmoderetainvlan=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + dbslb=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + dns64=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + bypassaaaa=dict(type='bool'), + recursionavailable=dict(type='bool'), + processlocal=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + dnsprofilename=dict(type='str'), + ) + + hand_inserted_arguments = dict( + servicebindings=dict(type='list'), + servicegroupbindings=dict(type='list'), + ssl_certkey=dict(type='str'), + disabled=dict( + type='bool', + default=False + ), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'name', + 'servicetype', + 'ipv46', + 'ippattern', + 'ipmask', + 'port', + 'range', + 'persistencetype', + 'timeout', + 'persistencebackup', + 'backuppersistencetimeout', + 'lbmethod', + 'hashlength', + 'netmask', + 'v6netmasklen', + 'backuplbmethod', + 'cookiename', + 'listenpolicy', + 'listenpriority', + 'persistmask', + 'v6persistmasklen', + 'rtspnat', + 'm', + 'tosid', + 'datalength', + 'dataoffset', + 'sessionless', + 'connfailover', + 'redirurl', + 'cacheable', + 'clttimeout', + 'somethod', + 'sopersistence', + 'sopersistencetimeout', + 'healththreshold', + 'sothreshold', + 'sobackupaction', + 'redirectportrewrite', + 'downstateflush', + 'disableprimaryondown', + 'insertvserveripport', + 'vipheader', + 'authenticationhost', + 'authentication', + 'authn401', + 'authnvsname', + 'push', + 'pushvserver', + 'pushlabel', + 'pushmulticlients', + 'tcpprofilename', + 'httpprofilename', + 'dbprofilename', + 'comment', + 'l2conn', + 'oracleserverversion', + 'mssqlserverversion', + 'mysqlprotocolversion', + 'mysqlserverversion', + 'mysqlcharacterset', + 'mysqlservercapabilities', + 'appflowlog', + 'netprofile', + 'icmpvsrresponse', + 'rhistate', + 'newservicerequest', + 'newservicerequestunit', + 'newservicerequestincrementinterval', + 'minautoscalemembers', + 'maxautoscalemembers', + 'skippersistency', + 'authnprofile', + 'macmoderetainvlan', + 'dbslb', + 'dns64', + 'bypassaaaa', + 'recursionavailable', + 'processlocal', + 'dnsprofilename', + ] + + readonly_attrs = [ + 'value', + 'ipmapping', + 'ngname', + 'type', + 'curstate', + 'effectivestate', + 'status', + 'lbrrreason', + 'redirect', + 'precedence', + 'homepage', + 'dnsvservername', + 'domain', + 'policyname', + 'cachevserver', + 'health', + 'gotopriorityexpression', + 'ruletype', + 'groupname', + 'cookiedomain', + 'map', + 'gt2gb', + 'consolidatedlconn', + 'consolidatedlconngbl', + 'thresholdvalue', + 'bindpoint', + 'invoke', + 'labeltype', + 'labelname', + 'version', + 'totalservices', + 'activeservices', + 'statechangetimesec', + 'statechangetimeseconds', + 'statechangetimemsec', + 'tickssincelaststatechange', + 'isgslb', + 'vsvrdynconnsothreshold', + 'backupvserverstatus', + '__count', + ] + + immutable_attrs = [ + 'name', + 'servicetype', + 'ipv46', + 'port', + 'range', + 'state', + 'redirurl', + 'vipheader', + 'newservicerequestunit', + 'td', + ] + + transforms = { + 'rtspnat': ['bool_on_off'], + 'authn401': ['bool_on_off'], + 'bypassaaaa': ['bool_yes_no'], + 'authentication': ['bool_on_off'], + 'cacheable': ['bool_yes_no'], + 'l2conn': ['bool_on_off'], + 'pushmulticlients': ['bool_yes_no'], + 'recursionavailable': ['bool_yes_no'], + 'sessionless': [lambda v: v.upper()], + 'sopersistence': [lambda v: v.upper()], + 'redirectportrewrite': [lambda v: v.upper()], + 'downstateflush': [lambda v: v.upper()], + 'disableprimaryondown': [lambda v: v.upper()], + 'push': [lambda v: v.upper()], + 'appflowlog': [lambda v: v.upper()], + 'macmoderetainvlan': [lambda v: v.upper()], + 'dbslb': [lambda v: v.upper()], + 'dns64': [lambda v: v.upper()], + 'processlocal': [lambda v: v.upper()], + } + + lbvserver_proxy = ConfigProxy( + actual=lbvserver(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + ensure_feature_is_enabled(client, 'LB') + if module.params['state'] == 'present': + log('Applying actions for state present') + + if not lb_vserver_exists(client, module): + log('Add lb vserver') + if not module.check_mode: + lbvserver_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not lb_vserver_identical(client, module, lbvserver_proxy): + + # Check if we try to change value of immutable attributes + diff_dict = lb_vserver_diff(client, module, lbvserver_proxy) + immutables_changed = get_immutables_intersection(lbvserver_proxy, diff_dict.keys()) + if immutables_changed != []: + msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,) + module.fail_json(msg=msg, diff=diff_dict, **module_result) + + log('Update lb vserver') + if not module.check_mode: + lbvserver_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + log('Present noop') + + if not service_bindings_identical(client, module): + if not module.check_mode: + sync_service_bindings(client, module) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + + if not servicegroup_bindings_identical(client, module): + if not module.check_mode: + sync_servicegroup_bindings(client, module) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + + if module.params['servicetype'] != 'SSL' and module.params['ssl_certkey'] is not None: + module.fail_json(msg='ssl_certkey is applicable only to SSL vservers', **module_result) + + # Check if SSL certkey is sane + if module.params['servicetype'] == 'SSL': + if not ssl_certkey_bindings_identical(client, module): + if not module.check_mode: + ssl_certkey_bindings_sync(client, module) + + module_result['changed'] = True + + if not module.check_mode: + res = do_state_change(client, module, lbvserver_proxy) + if res.errorcode != 0: + msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) + module.fail_json(msg=msg, **module_result) + + # Sanity check + log('Sanity checks for state present') + if not module.check_mode: + if not lb_vserver_exists(client, module): + module.fail_json(msg='Did not create lb vserver', **module_result) + + if not lb_vserver_identical(client, module, lbvserver_proxy): + msg = 'lb vserver is not configured correctly' + module.fail_json(msg=msg, diff=lb_vserver_diff(client, module, lbvserver_proxy), **module_result) + + if not service_bindings_identical(client, module): + module.fail_json(msg='service bindings are not identical', **module_result) + + if not servicegroup_bindings_identical(client, module): + module.fail_json(msg='servicegroup bindings are not identical', **module_result) + + if module.params['servicetype'] == 'SSL': + if not ssl_certkey_bindings_identical(client, module): + module.fail_json(msg='sll certkey bindings not identical', **module_result) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if lb_vserver_exists(client, module): + if not module.check_mode: + log('Delete lb vserver') + lbvserver_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + log('Absent noop') + module_result['changed'] = False + + # Sanity check + log('Sanity checks for state absent') + if not module.check_mode: + if lb_vserver_exists(client, module): + module.fail_json(msg='lb vserver still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_nitro_request.py b/plugins/modules/network/netscaler/netscaler_nitro_request.py new file mode 100644 index 0000000000..73e608bfec --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_nitro_request.py @@ -0,0 +1,909 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netscaler_nitro_request +short_description: Issue Nitro API requests to a Netscaler instance. +description: + - Issue Nitro API requests to a Netscaler instance. + - This is intended to be a short hand for using the uri Ansible module to issue the raw HTTP requests directly. + - It provides consistent return values and has no other dependencies apart from the base Ansible runtime environment. + - This module is intended to run either on the Ansible control node or a bastion (jumpserver) with access to the actual Netscaler instance + + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + nsip: + description: + - The IP address of the Netscaler or MAS instance where the Nitro API calls will be made. + - "The port can be specified with the colon C(:). E.g. C(192.168.1.1:555)." + + nitro_user: + description: + - The username with which to authenticate to the Netscaler node. + required: true + + nitro_pass: + description: + - The password with which to authenticate to the Netscaler node. + required: true + + nitro_protocol: + choices: [ 'http', 'https' ] + default: http + description: + - Which protocol to use when accessing the Nitro API objects. + + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + default: 'yes' + type: bool + + nitro_auth_token: + description: + - The authentication token provided by the C(mas_login) operation. It is required when issuing Nitro API calls through a MAS proxy. + + resource: + description: + - The type of resource we are operating on. + - It is required for all I(operation) values except C(mas_login) and C(save_config). + + name: + description: + - The name of the resource we are operating on. + - "It is required for the following I(operation) values: C(update), C(get), C(delete)." + + attributes: + description: + - The attributes of the Nitro object we are operating on. + - "It is required for the following I(operation) values: C(add), C(update), C(action)." + + args: + description: + - A dictionary which defines the key arguments by which we will select the Nitro object to operate on. + - "It is required for the following I(operation) values: C(get_by_args), C('delete_by_args')." + + filter: + description: + - A dictionary which defines the filter with which to refine the Nitro objects returned by the C(get_filtered) I(operation). + + operation: + description: + - Define the Nitro operation that we want to perform. + choices: + - add + - update + - get + - get_by_args + - get_filtered + - get_all + - delete + - delete_by_args + - count + - mas_login + - save_config + - action + + expected_nitro_errorcode: + description: + - A list of numeric values that signify that the operation was successful. + default: [0] + required: true + + action: + description: + - The action to perform when the I(operation) value is set to C(action). + - Some common values for this parameter are C(enable), C(disable), C(rename). + + instance_ip: + description: + - The IP address of the target Netscaler instance when issuing a Nitro request through a MAS proxy. + + instance_name: + description: + - The name of the target Netscaler instance when issuing a Nitro request through a MAS proxy. + + instance_id: + description: + - The id of the target Netscaler instance when issuing a Nitro request through a MAS proxy. +''' + +EXAMPLES = ''' +- name: Add a server + delegate_to: localhost + netscaler_nitro_request: + nsip: "{{ nsip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: add + resource: server + name: test-server-1 + attributes: + name: test-server-1 + ipaddress: 192.168.1.1 + +- name: Update server + delegate_to: localhost + netscaler_nitro_request: + nsip: "{{ nsip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: update + resource: server + name: test-server-1 + attributes: + name: test-server-1 + ipaddress: 192.168.1.2 + +- name: Get server + delegate_to: localhost + register: result + netscaler_nitro_request: + nsip: "{{ nsip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: get + resource: server + name: test-server-1 + +- name: Delete server + delegate_to: localhost + register: result + netscaler_nitro_request: + nsip: "{{ nsip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: delete + resource: server + name: test-server-1 + +- name: Rename server + delegate_to: localhost + netscaler_nitro_request: + nsip: "{{ nsip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: action + action: rename + resource: server + attributes: + name: test-server-1 + newname: test-server-2 + +- name: Get server by args + delegate_to: localhost + register: result + netscaler_nitro_request: + nsip: "{{ nsip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: get_by_args + resource: server + args: + name: test-server-1 + +- name: Get server by filter + delegate_to: localhost + register: result + netscaler_nitro_request: + nsip: "{{ nsip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: get_filtered + resource: server + filter: + ipaddress: 192.168.1.2 + +# Doing a NITRO request through MAS. +# Requires to have an authentication token from the mas_login and used as the nitro_auth_token parameter +# Also nsip is the MAS address and the target Netscaler IP must be defined with instance_ip +# The rest of the task arguments remain the same as when issuing the NITRO request directly to a Netscaler instance. + +- name: Do mas login + delegate_to: localhost + register: login_result + netscaler_nitro_request: + nsip: "{{ mas_ip }}" + nitro_user: "{{ nitro_user }}" + nitro_pass: "{{ nitro_pass }}" + operation: mas_login + +- name: Add resource through MAS proxy + delegate_to: localhost + netscaler_nitro_request: + nsip: "{{ mas_ip }}" + nitro_auth_token: "{{ login_result.nitro_auth_token }}" + instance_ip: "{{ nsip }}" + operation: add + resource: server + name: test-server-1 + attributes: + name: test-server-1 + ipaddress: 192.168.1.7 +''' + +RETURN = ''' +nitro_errorcode: + description: A numeric value containing the return code of the NITRO operation. When 0 the operation is successful. Any non zero value indicates an error. + returned: always + type: int + sample: 0 + +nitro_message: + description: A string containing a human readable explanation for the NITRO operation result. + returned: always + type: str + sample: Success + +nitro_severity: + description: A string describing the severity of the NITRO operation error or NONE. + returned: always + type: str + sample: NONE + +http_response_data: + description: A dictionary that contains all the HTTP response's data. + returned: always + type: dict + sample: "status: 200" + +http_response_body: + description: A string with the actual HTTP response body content if existent. If there is no HTTP response body it is an empty string. + returned: always + type: str + sample: "{ errorcode: 0, message: Done, severity: NONE }" + +nitro_object: + description: The object returned from the NITRO operation. This is applicable to the various get operations which return an object. + returned: when applicable + type: list + sample: + - + ipaddress: "192.168.1.8" + ipv6address: "NO" + maxbandwidth: "0" + name: "test-server-1" + port: 0 + sp: "OFF" + state: "ENABLED" + +nitro_auth_token: + description: The token returned by the C(mas_login) operation when successful. + returned: when applicable + type: str + sample: "##E8D7D74DDBD907EE579E8BB8FF4529655F22227C1C82A34BFC93C9539D66" +''' + + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import AnsibleModule +import codecs + + +class NitroAPICaller(object): + + _argument_spec = dict( + nsip=dict( + fallback=(env_fallback, ['NETSCALER_NSIP']), + ), + nitro_user=dict( + fallback=(env_fallback, ['NETSCALER_NITRO_USER']), + ), + nitro_pass=dict( + fallback=(env_fallback, ['NETSCALER_NITRO_PASS']), + no_log=True + ), + nitro_protocol=dict( + choices=['http', 'https'], + fallback=(env_fallback, ['NETSCALER_NITRO_PROTOCOL']), + default='http' + ), + validate_certs=dict( + default=True, + type='bool' + ), + nitro_auth_token=dict( + type='str', + no_log=True + ), + resource=dict(type='str'), + name=dict(type='str'), + attributes=dict(type='dict'), + + args=dict(type='dict'), + filter=dict(type='dict'), + + operation=dict( + type='str', + required=True, + choices=[ + 'add', + 'update', + 'get', + 'get_by_args', + 'get_filtered', + 'get_all', + 'delete', + 'delete_by_args', + 'count', + + 'mas_login', + + # Actions + 'save_config', + + # Generic action handler + 'action', + ] + ), + expected_nitro_errorcode=dict( + type='list', + default=[0], + ), + action=dict(type='str'), + instance_ip=dict(type='str'), + instance_name=dict(type='str'), + instance_id=dict(type='str'), + ) + + def __init__(self): + + self._module = AnsibleModule( + argument_spec=self._argument_spec, + supports_check_mode=False, + ) + + self._module_result = dict( + failed=False, + ) + + # Prepare the http headers according to module arguments + self._headers = {} + self._headers['Content-Type'] = 'application/json' + + # Check for conflicting authentication methods + have_token = self._module.params['nitro_auth_token'] is not None + have_userpass = None not in (self._module.params['nitro_user'], self._module.params['nitro_pass']) + login_operation = self._module.params['operation'] == 'mas_login' + + if have_token and have_userpass: + self.fail_module(msg='Cannot define both authentication token and username/password') + + if have_token: + self._headers['Cookie'] = "NITRO_AUTH_TOKEN=%s" % self._module.params['nitro_auth_token'] + + if have_userpass and not login_operation: + self._headers['X-NITRO-USER'] = self._module.params['nitro_user'] + self._headers['X-NITRO-PASS'] = self._module.params['nitro_pass'] + + # Do header manipulation when doing a MAS proxy call + if self._module.params['instance_ip'] is not None: + self._headers['_MPS_API_PROXY_MANAGED_INSTANCE_IP'] = self._module.params['instance_ip'] + elif self._module.params['instance_name'] is not None: + self._headers['_MPS_API_PROXY_MANAGED_INSTANCE_NAME'] = self._module.params['instance_name'] + elif self._module.params['instance_id'] is not None: + self._headers['_MPS_API_PROXY_MANAGED_INSTANCE_ID'] = self._module.params['instance_id'] + + def edit_response_data(self, r, info, result, success_status): + # Search for body in both http body and http data + if r is not None: + result['http_response_body'] = codecs.decode(r.read(), 'utf-8') + elif 'body' in info: + result['http_response_body'] = codecs.decode(info['body'], 'utf-8') + del info['body'] + else: + result['http_response_body'] = '' + + result['http_response_data'] = info + + # Update the nitro_* parameters according to expected success_status + # Use explicit return values from http response or deduce from http status code + + # Nitro return code in http data + result['nitro_errorcode'] = None + result['nitro_message'] = None + result['nitro_severity'] = None + + if result['http_response_body'] != '': + try: + data = self._module.from_json(result['http_response_body']) + except ValueError: + data = {} + result['nitro_errorcode'] = data.get('errorcode') + result['nitro_message'] = data.get('message') + result['nitro_severity'] = data.get('severity') + + # If we do not have the nitro errorcode from body deduce it from the http status + if result['nitro_errorcode'] is None: + # HTTP status failed + if result['http_response_data'].get('status') != success_status: + result['nitro_errorcode'] = -1 + result['nitro_message'] = result['http_response_data'].get('msg', 'HTTP status %s' % result['http_response_data']['status']) + result['nitro_severity'] = 'ERROR' + # HTTP status succeeded + else: + result['nitro_errorcode'] = 0 + result['nitro_message'] = 'Success' + result['nitro_severity'] = 'NONE' + + def handle_get_return_object(self, result): + result['nitro_object'] = [] + if result['nitro_errorcode'] == 0: + if result['http_response_body'] != '': + data = self._module.from_json(result['http_response_body']) + if self._module.params['resource'] in data: + result['nitro_object'] = data[self._module.params['resource']] + else: + del result['nitro_object'] + + def fail_module(self, msg, **kwargs): + self._module_result['failed'] = True + self._module_result['changed'] = False + self._module_result.update(kwargs) + self._module_result['msg'] = msg + self._module.fail_json(**self._module_result) + + def main(self): + if self._module.params['operation'] == 'add': + result = self.add() + + if self._module.params['operation'] == 'update': + result = self.update() + + if self._module.params['operation'] == 'delete': + result = self.delete() + + if self._module.params['operation'] == 'delete_by_args': + result = self.delete_by_args() + + if self._module.params['operation'] == 'get': + result = self.get() + + if self._module.params['operation'] == 'get_by_args': + result = self.get_by_args() + + if self._module.params['operation'] == 'get_filtered': + result = self.get_filtered() + + if self._module.params['operation'] == 'get_all': + result = self.get_all() + + if self._module.params['operation'] == 'count': + result = self.count() + + if self._module.params['operation'] == 'mas_login': + result = self.mas_login() + + if self._module.params['operation'] == 'action': + result = self.action() + + if self._module.params['operation'] == 'save_config': + result = self.save_config() + + if result['nitro_errorcode'] not in self._module.params['expected_nitro_errorcode']: + self.fail_module(msg='NITRO Failure', **result) + + self._module_result.update(result) + self._module.exit_json(**self._module_result) + + def exit_module(self): + self._module.exit_json() + + def add(self): + # Check if required attributes are present + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + if self._module.params['attributes'] is None: + self.fail_module(msg='NITRO resource attributes are undefined.') + + url = '%s://%s/nitro/v1/config/%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + ) + + data = self._module.jsonify({self._module.params['resource']: self._module.params['attributes']}) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + data=data, + method='POST', + ) + + result = {} + + self.edit_response_data(r, info, result, success_status=201) + + if result['nitro_errorcode'] == 0: + self._module_result['changed'] = True + else: + self._module_result['changed'] = False + + return result + + def update(self): + # Check if required attributes are arguments present + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + if self._module.params['name'] is None: + self.fail_module(msg='NITRO resource name is undefined.') + + if self._module.params['attributes'] is None: + self.fail_module(msg='NITRO resource attributes are undefined.') + + url = '%s://%s/nitro/v1/config/%s/%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + self._module.params['name'], + ) + + data = self._module.jsonify({self._module.params['resource']: self._module.params['attributes']}) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + data=data, + method='PUT', + ) + + result = {} + self.edit_response_data(r, info, result, success_status=200) + + if result['nitro_errorcode'] == 0: + self._module_result['changed'] = True + else: + self._module_result['changed'] = False + + return result + + def get(self): + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + if self._module.params['name'] is None: + self.fail_module(msg='NITRO resource name is undefined.') + + url = '%s://%s/nitro/v1/config/%s/%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + self._module.params['name'], + ) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + method='GET', + ) + + result = {} + self.edit_response_data(r, info, result, success_status=200) + + self.handle_get_return_object(result) + self._module_result['changed'] = False + + return result + + def get_by_args(self): + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + + if self._module.params['args'] is None: + self.fail_module(msg='NITRO args is undefined.') + + url = '%s://%s/nitro/v1/config/%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + ) + + args_dict = self._module.params['args'] + args = ','.join(['%s:%s' % (k, args_dict[k]) for k in args_dict]) + + args = 'args=' + args + + url = '?'.join([url, args]) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + method='GET', + ) + result = {} + self.edit_response_data(r, info, result, success_status=200) + + self.handle_get_return_object(result) + self._module_result['changed'] = False + + return result + + def get_filtered(self): + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + + if self._module.params['filter'] is None: + self.fail_module(msg='NITRO filter is undefined.') + + keys = list(self._module.params['filter'].keys()) + filter_key = keys[0] + filter_value = self._module.params['filter'][filter_key] + filter_str = '%s:%s' % (filter_key, filter_value) + + url = '%s://%s/nitro/v1/config/%s?filter=%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + filter_str, + ) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + method='GET', + ) + + result = {} + self.edit_response_data(r, info, result, success_status=200) + self.handle_get_return_object(result) + self._module_result['changed'] = False + + return result + + def get_all(self): + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + + url = '%s://%s/nitro/v1/config/%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + ) + + print('headers %s' % self._headers) + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + method='GET', + ) + + result = {} + self.edit_response_data(r, info, result, success_status=200) + self.handle_get_return_object(result) + self._module_result['changed'] = False + + return result + + def delete(self): + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + + if self._module.params['name'] is None: + self.fail_module(msg='NITRO resource is undefined.') + + # Deletion by name takes precedence over deletion by attributes + + url = '%s://%s/nitro/v1/config/%s/%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + self._module.params['name'], + ) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + method='DELETE', + ) + + result = {} + self.edit_response_data(r, info, result, success_status=200) + + if result['nitro_errorcode'] == 0: + self._module_result['changed'] = True + else: + self._module_result['changed'] = False + + return result + + def delete_by_args(self): + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + + if self._module.params['args'] is None: + self.fail_module(msg='NITRO args is undefined.') + + url = '%s://%s/nitro/v1/config/%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + ) + + args_dict = self._module.params['args'] + args = ','.join(['%s:%s' % (k, args_dict[k]) for k in args_dict]) + + args = 'args=' + args + + url = '?'.join([url, args]) + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + method='DELETE', + ) + result = {} + self.edit_response_data(r, info, result, success_status=200) + + if result['nitro_errorcode'] == 0: + self._module_result['changed'] = True + else: + self._module_result['changed'] = False + + return result + + def count(self): + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + + url = '%s://%s/nitro/v1/config/%s?count=yes' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + ) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + method='GET', + ) + + result = {} + self.edit_response_data(r, info, result) + + if result['http_response_body'] != '': + data = self._module.from_json(result['http_response_body']) + + result['nitro_errorcode'] = data['errorcode'] + result['nitro_message'] = data['message'] + result['nitro_severity'] = data['severity'] + if self._module.params['resource'] in data: + result['nitro_count'] = data[self._module.params['resource']][0]['__count'] + + self._module_result['changed'] = False + + return result + + def action(self): + # Check if required attributes are present + if self._module.params['resource'] is None: + self.fail_module(msg='NITRO resource is undefined.') + if self._module.params['attributes'] is None: + self.fail_module(msg='NITRO resource attributes are undefined.') + if self._module.params['action'] is None: + self.fail_module(msg='NITRO action is undefined.') + + url = '%s://%s/nitro/v1/config/%s?action=%s' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + self._module.params['resource'], + self._module.params['action'], + ) + + data = self._module.jsonify({self._module.params['resource']: self._module.params['attributes']}) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + data=data, + method='POST', + ) + + result = {} + + self.edit_response_data(r, info, result, success_status=200) + + if result['nitro_errorcode'] == 0: + self._module_result['changed'] = True + else: + self._module_result['changed'] = False + + return result + + def mas_login(self): + url = '%s://%s/nitro/v1/config/login' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + ) + + login_credentials = { + 'login': { + + 'username': self._module.params['nitro_user'], + 'password': self._module.params['nitro_pass'], + } + } + + data = 'object=\n%s' % self._module.jsonify(login_credentials) + + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + data=data, + method='POST', + ) + print(r, info) + + result = {} + self.edit_response_data(r, info, result, success_status=200) + + if result['nitro_errorcode'] == 0: + body_data = self._module.from_json(result['http_response_body']) + result['nitro_auth_token'] = body_data['login'][0]['sessionid'] + + self._module_result['changed'] = False + + return result + + def save_config(self): + + url = '%s://%s/nitro/v1/config/nsconfig?action=save' % ( + self._module.params['nitro_protocol'], + self._module.params['nsip'], + ) + + data = self._module.jsonify( + { + 'nsconfig': {}, + } + ) + r, info = fetch_url( + self._module, + url=url, + headers=self._headers, + data=data, + method='POST', + ) + + result = {} + + self.edit_response_data(r, info, result, success_status=200) + self._module_result['changed'] = False + + return result + + +def main(): + + nitro_api_caller = NitroAPICaller() + nitro_api_caller.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netscaler/netscaler_save_config.py b/plugins/modules/network/netscaler/netscaler_save_config.py new file mode 100644 index 0000000000..6b4ec1ff5a --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_save_config.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_save_config +short_description: Save Netscaler configuration. +description: + - This module unconditionally saves the configuration on the target netscaler node. + - This module does not support check mode. + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + nsip: + description: + - The ip address of the netscaler appliance where the nitro API calls will be made. + - "The port can be specified with the colon (:). E.g. C(192.168.1.1:555)." + required: True + + nitro_user: + description: + - The username with which to authenticate to the netscaler node. + required: True + + nitro_pass: + description: + - The password with which to authenticate to the netscaler node. + required: True + + nitro_protocol: + choices: [ 'http', 'https' ] + default: http + description: + - Which protocol to use when accessing the nitro API objects. + + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + type: bool + + nitro_timeout: + description: + - Time in seconds until a timeout error is thrown when establishing a new session with Netscaler. + default: 310 + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +--- +- name: Save netscaler configuration + delegate_to: localhost + netscaler_save_config: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + +- name: Setup server without saving configuration + delegate_to: localhost + notify: Save configuration + netscaler_server: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + + save_config: no + + name: server-1 + ipaddress: 192.168.1.1 + +# Under playbook's handlers + +- name: Save configuration + delegate_to: localhost + netscaler_save_config: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: ['message 1', 'message 2'] + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +''' + +import copy + +try: + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import get_nitro_client, log, loglines, netscaler_common_arguments + + +def main(): + + argument_spec = copy.deepcopy(netscaler_common_arguments) + + # Delete common arguments irrelevant to this module + del argument_spec['state'] + del argument_spec['save_config'] + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False, + ) + + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + try: + log('Saving configuration') + client.save_config() + except nitro_exception as e: + msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_server.py b/plugins/modules/network/netscaler/netscaler_server.py new file mode 100644 index 0000000000..f6ccb07c98 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_server.py @@ -0,0 +1,401 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_server +short_description: Manage server configuration +description: + - Manage server entities configuration. + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + name: + description: + - "Name for the server." + - >- + Must begin with an ASCII alphabetic or underscore C(_) character, and must contain only ASCII + alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals C(=), and hyphen C(-) + characters. + - "Can be changed after the name is created." + - "Minimum length = 1" + + ipaddress: + description: + - >- + IPv4 or IPv6 address of the server. If you create an IP address based server, you can specify the + name of the server, instead of its IP address, when creating a service. Note: If you do not create a + server entry, the server IP address that you enter when you create a service becomes the name of the + server. + + domain: + description: + - "Domain name of the server. For a domain based configuration, you must create the server first." + - "Minimum length = 1" + + translationip: + description: + - "IP address used to transform the server's DNS-resolved IP address." + + translationmask: + description: + - "The netmask of the translation ip." + + domainresolveretry: + description: + - >- + Time, in seconds, for which the NetScaler appliance must wait, after DNS resolution fails, before + sending the next DNS query to resolve the domain name. + - "Minimum value = C(5)" + - "Maximum value = C(20939)" + default: 5 + + ipv6address: + description: + - >- + Support IPv6 addressing mode. If you configure a server with the IPv6 addressing mode, you cannot use + the server in the IPv4 addressing mode. + default: false + type: bool + + comment: + description: + - "Any information about the server." + + td: + description: + - >- + Integer value that uniquely identifies the traffic domain in which you want to configure the entity. + If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID + of 0. + - "Minimum value = C(0)" + - "Maximum value = C(4094)" + + graceful: + description: + - >- + Shut down gracefully, without accepting any new connections, and disabling each service when all of + its connections are closed. + - This option is meaningful only when setting the I(disabled) option to C(true) + type: bool + + delay: + description: + - Time, in seconds, after which all the services configured on the server are disabled. + - This option is meaningful only when setting the I(disabled) option to C(true) + + disabled: + description: + - When set to C(true) the server state will be set to C(disabled). + - When set to C(false) the server state will be set to C(enabled). + - >- + Note that due to limitations of the underlying NITRO API a C(disabled) state change alone + does not cause the module result to report a changed status. + type: bool + default: false + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +- name: Setup server + delegate_to: localhost + netscaler_server: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + + state: present + + name: server-1 + ipaddress: 192.168.1.1 +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: ['message 1', 'message 2'] + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: { 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' } +''' + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.basic.server import server + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, \ + get_immutables_intersection + + +def server_exists(client, module): + log('Checking if server exists') + if server.count_filtered(client, 'name:%s' % module.params['name']) > 0: + return True + else: + return False + + +def server_identical(client, module, server_proxy): + log('Checking if configured server is identical') + if server.count_filtered(client, 'name:%s' % module.params['name']) == 0: + return False + diff = diff_list(client, module, server_proxy) + + # Remove options that are not present in nitro server object + # These are special options relevant to the disabled action + for option in ['graceful', 'delay']: + if option in diff: + del diff[option] + + if diff == {}: + return True + else: + return False + + +def diff_list(client, module, server_proxy): + ret_val = server_proxy.diff_object(server.get_filtered(client, 'name:%s' % module.params['name'])[0]), + return ret_val[0] + + +def do_state_change(client, module, server_proxy): + if module.params['disabled']: + log('Disabling server') + result = server.disable(client, server_proxy.actual) + else: + log('Enabling server') + result = server.enable(client, server_proxy.actual) + return result + + +def main(): + + module_specific_arguments = dict( + name=dict(type='str'), + ipaddress=dict(type='str'), + domain=dict(type='str'), + translationip=dict(type='str'), + translationmask=dict(type='str'), + domainresolveretry=dict(type='int'), + ipv6address=dict( + type='bool', + default=False + ), + comment=dict(type='str'), + td=dict(type='float'), + graceful=dict(type='bool'), + delay=dict(type='float') + ) + + hand_inserted_arguments = dict( + disabled=dict( + type='bool', + default=False, + ), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + + client = get_nitro_client(module) + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + # Instantiate Server Config object + readwrite_attrs = [ + 'name', + 'ipaddress', + 'domain', + 'translationip', + 'translationmask', + 'domainresolveretry', + 'ipv6address', + 'graceful', + 'delay', + 'comment', + 'td', + ] + + readonly_attrs = [ + 'statechangetimesec', + 'tickssincelaststatechange', + 'autoscale', + 'customserverid', + 'monthreshold', + 'maxclient', + 'maxreq', + 'maxbandwidth', + 'usip', + 'cka', + 'tcpb', + 'cmp', + 'clttimeout', + 'svrtimeout', + 'cipheader', + 'cip', + 'cacheable', + 'sc', + 'sp', + 'downstateflush', + 'appflowlog', + 'boundtd', + '__count', + ] + + immutable_attrs = [ + 'name', + 'domain', + 'ipv6address', + 'td', + ] + + transforms = { + 'graceful': ['bool_yes_no'], + 'ipv6address': ['bool_yes_no'], + } + + server_proxy = ConfigProxy( + actual=server(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + + # Apply appropriate state + if module.params['state'] == 'present': + log('Applying actions for state present') + if not server_exists(client, module): + if not module.check_mode: + server_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not server_identical(client, module, server_proxy): + + # Check if we try to change value of immutable attributes + immutables_changed = get_immutables_intersection(server_proxy, diff_list(client, module, server_proxy).keys()) + if immutables_changed != []: + msg = 'Cannot update immutable attributes %s' % (immutables_changed,) + module.fail_json(msg=msg, diff=diff_list(client, module, server_proxy), **module_result) + if not module.check_mode: + server_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + if not module.check_mode: + res = do_state_change(client, module, server_proxy) + if res.errorcode != 0: + msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) + module.fail_json(msg=msg, **module_result) + + # Sanity check for result + log('Sanity checks for state present') + if not module.check_mode: + if not server_exists(client, module): + module.fail_json(msg='Server does not seem to exist', **module_result) + if not server_identical(client, module, server_proxy): + module.fail_json( + msg='Server is not configured according to parameters given', + diff=diff_list(client, module, server_proxy), + **module_result + ) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if server_exists(client, module): + if not module.check_mode: + server_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for result + log('Sanity checks for state absent') + if not module.check_mode: + if server_exists(client, module): + module.fail_json(msg='Server seems to be present', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_service.py b/plugins/modules/network/netscaler/netscaler_service.py new file mode 100644 index 0000000000..418e4bbfcb --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_service.py @@ -0,0 +1,964 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_service +short_description: Manage service configuration in Netscaler +description: + - Manage service configuration in Netscaler. + - This module allows the creation, deletion and modification of Netscaler services. + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance. + - This module supports check mode. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + name: + description: + - >- + Name for the service. Must begin with an ASCII alphabetic or underscore C(_) character, and must + contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals + C(=), and hyphen C(-) characters. Cannot be changed after the service has been created. + - "Minimum length = 1" + + ip: + description: + - "IP to assign to the service." + - "Minimum length = 1" + + servername: + description: + - "Name of the server that hosts the service." + - "Minimum length = 1" + + servicetype: + choices: + - 'HTTP' + - 'FTP' + - 'TCP' + - 'UDP' + - 'SSL' + - 'SSL_BRIDGE' + - 'SSL_TCP' + - 'DTLS' + - 'NNTP' + - 'RPCSVR' + - 'DNS' + - 'ADNS' + - 'SNMP' + - 'RTSP' + - 'DHCPRA' + - 'ANY' + - 'SIP_UDP' + - 'SIP_TCP' + - 'SIP_SSL' + - 'DNS_TCP' + - 'ADNS_TCP' + - 'MYSQL' + - 'MSSQL' + - 'ORACLE' + - 'RADIUS' + - 'RADIUSListener' + - 'RDP' + - 'DIAMETER' + - 'SSL_DIAMETER' + - 'TFTP' + - 'SMPP' + - 'PPTP' + - 'GRE' + - 'SYSLOGTCP' + - 'SYSLOGUDP' + - 'FIX' + - 'SSL_FIX' + description: + - "Protocol in which data is exchanged with the service." + + port: + description: + - "Port number of the service." + - "Range 1 - 65535" + - "* in CLI is represented as 65535 in NITRO API" + + cleartextport: + description: + - >- + Port to which clear text data must be sent after the appliance decrypts incoming SSL traffic. + Applicable to transparent SSL services. + - "Minimum value = 1" + + cachetype: + choices: + - 'TRANSPARENT' + - 'REVERSE' + - 'FORWARD' + description: + - "Cache type supported by the cache server." + + maxclient: + description: + - "Maximum number of simultaneous open connections to the service." + - "Minimum value = 0" + - "Maximum value = 4294967294" + + healthmonitor: + description: + - "Monitor the health of this service" + default: yes + type: bool + + maxreq: + description: + - "Maximum number of requests that can be sent on a persistent connection to the service." + - "Note: Connection requests beyond this value are rejected." + - "Minimum value = 0" + - "Maximum value = 65535" + + cacheable: + description: + - "Use the transparent cache redirection virtual server to forward requests to the cache server." + - "Note: Do not specify this parameter if you set the Cache Type parameter." + default: no + type: bool + + cip: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Before forwarding a request to the service, insert an HTTP header with the client's IPv4 or IPv6 + address as its value. Used if the server needs the client's IP address for security, accounting, or + other purposes, and setting the Use Source IP parameter is not a viable option. + + cipheader: + description: + - >- + Name for the HTTP header whose value must be set to the IP address of the client. Used with the + Client IP parameter. If you set the Client IP parameter, and you do not specify a name for the + header, the appliance uses the header name specified for the global Client IP Header parameter (the + cipHeader parameter in the set ns param CLI command or the Client IP Header parameter in the + Configure HTTP Parameters dialog box at System > Settings > Change HTTP parameters). If the global + Client IP Header parameter is not specified, the appliance inserts a header with the name + "client-ip.". + - "Minimum length = 1" + + usip: + description: + - >- + Use the client's IP address as the source IP address when initiating a connection to the server. When + creating a service, if you do not set this parameter, the service inherits the global Use Source IP + setting (available in the enable ns mode and disable ns mode CLI commands, or in the System > + Settings > Configure modes > Configure Modes dialog box). However, you can override this setting + after you create the service. + type: bool + + pathmonitor: + description: + - "Path monitoring for clustering." + + pathmonitorindv: + description: + - "Individual Path monitoring decisions." + + useproxyport: + description: + - >- + Use the proxy port as the source port when initiating connections with the server. With the NO + setting, the client-side connection port is used as the source port for the server-side connection. + - "Note: This parameter is available only when the Use Source IP (USIP) parameter is set to YES." + type: bool + + sp: + description: + - "Enable surge protection for the service." + type: bool + + rtspsessionidremap: + description: + - "Enable RTSP session ID mapping for the service." + default: off + type: bool + + clttimeout: + description: + - "Time, in seconds, after which to terminate an idle client connection." + - "Minimum value = 0" + - "Maximum value = 31536000" + + svrtimeout: + description: + - "Time, in seconds, after which to terminate an idle server connection." + - "Minimum value = 0" + - "Maximum value = 31536000" + + customserverid: + description: + - >- + Unique identifier for the service. Used when the persistency type for the virtual server is set to + Custom Server ID. + default: 'None' + + serverid: + description: + - "The identifier for the service. This is used when the persistency type is set to Custom Server ID." + + cka: + description: + - "Enable client keep-alive for the service." + type: bool + + tcpb: + description: + - "Enable TCP buffering for the service." + type: bool + + cmp: + description: + - "Enable compression for the service." + type: bool + + maxbandwidth: + description: + - "Maximum bandwidth, in Kbps, allocated to the service." + - "Minimum value = 0" + - "Maximum value = 4294967287" + + accessdown: + description: + - >- + Use Layer 2 mode to bridge the packets sent to this service if it is marked as DOWN. If the service + is DOWN, and this parameter is disabled, the packets are dropped. + default: no + type: bool + monthreshold: + description: + - >- + Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to + mark a service as UP or DOWN. + - "Minimum value = 0" + - "Maximum value = 65535" + + downstateflush: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Flush all active transactions associated with a service whose state transitions from UP to DOWN. Do + not enable this option for applications that must complete their transactions. + + tcpprofilename: + description: + - "Name of the TCP profile that contains TCP configuration settings for the service." + - "Minimum length = 1" + - "Maximum length = 127" + + httpprofilename: + description: + - "Name of the HTTP profile that contains HTTP configuration settings for the service." + - "Minimum length = 1" + - "Maximum length = 127" + + hashid: + description: + - >- + A numerical identifier that can be used by hash based load balancing methods. Must be unique for each + service. + - "Minimum value = 1" + + comment: + description: + - "Any information about the service." + + appflowlog: + choices: + - 'enabled' + - 'disabled' + description: + - "Enable logging of AppFlow information." + + netprofile: + description: + - "Network profile to use for the service." + - "Minimum length = 1" + - "Maximum length = 127" + + td: + description: + - >- + Integer value that uniquely identifies the traffic domain in which you want to configure the entity. + If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID + of 0. + - "Minimum value = 0" + - "Maximum value = 4094" + + processlocal: + choices: + - 'enabled' + - 'disabled' + description: + - >- + By turning on this option packets destined to a service in a cluster will not under go any steering. + Turn this option for single packet request response mode or when the upstream device is performing a + proper RSS for connection based distribution. + + dnsprofilename: + description: + - >- + Name of the DNS profile to be associated with the service. DNS profile properties will applied to the + transactions processed by a service. This parameter is valid only for ADNS and ADNS-TCP services. + - "Minimum length = 1" + - "Maximum length = 127" + + ipaddress: + description: + - "The new IP address of the service." + + graceful: + description: + - >- + Shut down gracefully, not accepting any new connections, and disabling the service when all of its + connections are closed. + default: no + type: bool + + monitor_bindings: + description: + - A list of load balancing monitors to bind to this service. + - Each monitor entry is a dictionary which may contain the following options. + - Note that if not using the built in monitors they must first be setup. + suboptions: + monitorname: + description: + - Name of the monitor. + weight: + description: + - Weight to assign to the binding between the monitor and service. + dup_state: + choices: + - 'enabled' + - 'disabled' + description: + - State of the monitor. + - The state setting for a monitor of a given type affects all monitors of that type. + - For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. + - If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled. + dup_weight: + description: + - Weight to assign to the binding between the monitor and service. + + disabled: + description: + - When set to C(yes) the service state will be set to DISABLED. + - When set to C(no) the service state will be set to ENABLED. + - >- + Note that due to limitations of the underlying NITRO API a C(disabled) state change alone + does not cause the module result to report a changed status. + type: bool + default: false + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +# Monitor monitor-1 must have been already setup + +- name: Setup http service + gather_facts: False + delegate_to: localhost + netscaler_service: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + + state: present + + name: service-http-1 + servicetype: HTTP + ipaddress: 10.78.0.1 + port: 80 + + monitor_bindings: + - monitor-1 +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: "['message 1', 'message 2']" + +diff: + description: A dictionary with a list of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: "{ 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }" +''' + +import copy + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service + from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding import service_lbmonitor_binding + from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding import lbmonitor_service_binding + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import (ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, + get_immutables_intersection) + + +def service_exists(client, module): + if service.count_filtered(client, 'name:%s' % module.params['name']) > 0: + return True + else: + return False + + +def service_identical(client, module, service_proxy): + service_list = service.get_filtered(client, 'name:%s' % module.params['name']) + diff_dict = service_proxy.diff_object(service_list[0]) + # the actual ip address is stored in the ipaddress attribute + # of the retrieved object + if 'ip' in diff_dict: + del diff_dict['ip'] + if 'graceful' in diff_dict: + del diff_dict['graceful'] + if len(diff_dict) == 0: + return True + else: + return False + + +def diff(client, module, service_proxy): + service_list = service.get_filtered(client, 'name:%s' % module.params['name']) + diff_object = service_proxy.diff_object(service_list[0]) + if 'ip' in diff_object: + del diff_object['ip'] + return diff_object + + +def get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs): + bindings = {} + if module.params['monitor_bindings'] is not None: + for binding in module.params['monitor_bindings']: + attribute_values_dict = copy.deepcopy(binding) + # attribute_values_dict['servicename'] = module.params['name'] + attribute_values_dict['servicegroupname'] = module.params['name'] + binding_proxy = ConfigProxy( + actual=lbmonitor_service_binding(), + client=client, + attribute_values_dict=attribute_values_dict, + readwrite_attrs=monitor_bindings_rw_attrs, + ) + key = binding_proxy.monitorname + bindings[key] = binding_proxy + return bindings + + +def get_actual_monitor_bindings(client, module): + bindings = {} + if service_lbmonitor_binding.count(client, module.params['name']) == 0: + return bindings + + # Fallthrough to rest of execution + for binding in service_lbmonitor_binding.get(client, module.params['name']): + # Excluding default monitors since we cannot operate on them + if binding.monitor_name in ('tcp-default', 'ping-default'): + continue + key = binding.monitor_name + actual = lbmonitor_service_binding() + actual.weight = binding.weight + actual.monitorname = binding.monitor_name + actual.dup_weight = binding.dup_weight + actual.servicename = module.params['name'] + bindings[key] = actual + + return bindings + + +def monitor_bindings_identical(client, module, monitor_bindings_rw_attrs): + configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs) + actual_bindings = get_actual_monitor_bindings(client, module) + + configured_key_set = set(configured_proxys.keys()) + actual_key_set = set(actual_bindings.keys()) + symmetrical_diff = configured_key_set ^ actual_key_set + if len(symmetrical_diff) > 0: + return False + + # Compare key to key + for monitor_name in configured_key_set: + proxy = configured_proxys[monitor_name] + actual = actual_bindings[monitor_name] + diff_dict = proxy.diff_object(actual) + if 'servicegroupname' in diff_dict: + if proxy.servicegroupname == actual.servicename: + del diff_dict['servicegroupname'] + if len(diff_dict) > 0: + return False + + # Fallthrought to success + return True + + +def sync_monitor_bindings(client, module, monitor_bindings_rw_attrs): + configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs) + actual_bindings = get_actual_monitor_bindings(client, module) + configured_keyset = set(configured_proxys.keys()) + actual_keyset = set(actual_bindings.keys()) + + # Delete extra + delete_keys = list(actual_keyset - configured_keyset) + for monitor_name in delete_keys: + log('Deleting binding for monitor %s' % monitor_name) + lbmonitor_service_binding.delete(client, actual_bindings[monitor_name]) + + # Delete and re-add modified + common_keyset = list(configured_keyset & actual_keyset) + for monitor_name in common_keyset: + proxy = configured_proxys[monitor_name] + actual = actual_bindings[monitor_name] + if not proxy.has_equal_attributes(actual): + log('Deleting and re adding binding for monitor %s' % monitor_name) + lbmonitor_service_binding.delete(client, actual) + proxy.add() + + # Add new + new_keys = list(configured_keyset - actual_keyset) + for monitor_name in new_keys: + log('Adding binding for monitor %s' % monitor_name) + configured_proxys[monitor_name].add() + + +def all_identical(client, module, service_proxy, monitor_bindings_rw_attrs): + return service_identical(client, module, service_proxy) and monitor_bindings_identical(client, module, monitor_bindings_rw_attrs) + + +def do_state_change(client, module, service_proxy): + if module.params['disabled']: + log('Disabling service') + result = service.disable(client, service_proxy.actual) + else: + log('Enabling service') + result = service.enable(client, service_proxy.actual) + return result + + +def main(): + + module_specific_arguments = dict( + name=dict(type='str'), + ip=dict(type='str'), + servername=dict(type='str'), + servicetype=dict( + type='str', + choices=[ + 'HTTP', + 'FTP', + 'TCP', + 'UDP', + 'SSL', + 'SSL_BRIDGE', + 'SSL_TCP', + 'DTLS', + 'NNTP', + 'RPCSVR', + 'DNS', + 'ADNS', + 'SNMP', + 'RTSP', + 'DHCPRA', + 'ANY', + 'SIP_UDP', + 'SIP_TCP', + 'SIP_SSL', + 'DNS_TCP', + 'ADNS_TCP', + 'MYSQL', + 'MSSQL', + 'ORACLE', + 'RADIUS', + 'RADIUSListener', + 'RDP', + 'DIAMETER', + 'SSL_DIAMETER', + 'TFTP', + 'SMPP', + 'PPTP', + 'GRE', + 'SYSLOGTCP', + 'SYSLOGUDP', + 'FIX', + 'SSL_FIX' + ] + ), + port=dict(type='int'), + cleartextport=dict(type='int'), + cachetype=dict( + type='str', + choices=[ + 'TRANSPARENT', + 'REVERSE', + 'FORWARD', + ] + ), + maxclient=dict(type='float'), + healthmonitor=dict( + type='bool', + default=True, + ), + maxreq=dict(type='float'), + cacheable=dict( + type='bool', + default=False, + ), + cip=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + cipheader=dict(type='str'), + usip=dict(type='bool'), + useproxyport=dict(type='bool'), + sp=dict(type='bool'), + rtspsessionidremap=dict( + type='bool', + default=False, + ), + clttimeout=dict(type='float'), + svrtimeout=dict(type='float'), + customserverid=dict( + type='str', + default='None', + ), + cka=dict(type='bool'), + tcpb=dict(type='bool'), + cmp=dict(type='bool'), + maxbandwidth=dict(type='float'), + accessdown=dict( + type='bool', + default=False + ), + monthreshold=dict(type='float'), + downstateflush=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ], + ), + tcpprofilename=dict(type='str'), + httpprofilename=dict(type='str'), + hashid=dict(type='float'), + comment=dict(type='str'), + appflowlog=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ], + ), + netprofile=dict(type='str'), + processlocal=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ], + ), + dnsprofilename=dict(type='str'), + ipaddress=dict(type='str'), + graceful=dict( + type='bool', + default=False, + ), + ) + + hand_inserted_arguments = dict( + monitor_bindings=dict(type='list'), + disabled=dict( + type='bool', + default=False, + ), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + + argument_spec.update(module_specific_arguments) + + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + # Fallthrough to rest of execution + + # Instantiate Service Config object + readwrite_attrs = [ + 'name', + 'ip', + 'servername', + 'servicetype', + 'port', + 'cleartextport', + 'cachetype', + 'maxclient', + 'healthmonitor', + 'maxreq', + 'cacheable', + 'cip', + 'cipheader', + 'usip', + 'useproxyport', + 'sp', + 'rtspsessionidremap', + 'clttimeout', + 'svrtimeout', + 'customserverid', + 'cka', + 'tcpb', + 'cmp', + 'maxbandwidth', + 'accessdown', + 'monthreshold', + 'downstateflush', + 'tcpprofilename', + 'httpprofilename', + 'hashid', + 'comment', + 'appflowlog', + 'netprofile', + 'processlocal', + 'dnsprofilename', + 'ipaddress', + 'graceful', + ] + + readonly_attrs = [ + 'numofconnections', + 'policyname', + 'serviceconftype', + 'serviceconftype2', + 'value', + 'gslb', + 'dup_state', + 'publicip', + 'publicport', + 'svrstate', + 'monitor_state', + 'monstatcode', + 'lastresponse', + 'responsetime', + 'riseapbrstatsmsgcode2', + 'monstatparam1', + 'monstatparam2', + 'monstatparam3', + 'statechangetimesec', + 'statechangetimemsec', + 'tickssincelaststatechange', + 'stateupdatereason', + 'clmonowner', + 'clmonview', + 'serviceipstr', + 'oracleserverversion', + ] + + immutable_attrs = [ + 'name', + 'ip', + 'servername', + 'servicetype', + 'port', + 'cleartextport', + 'cachetype', + 'cipheader', + 'serverid', + 'state', + 'td', + 'monitor_name_svc', + 'riseapbrstatsmsgcode', + 'all', + 'Internal', + 'newname', + ] + + transforms = { + 'pathmonitorindv': ['bool_yes_no'], + 'cacheable': ['bool_yes_no'], + 'cka': ['bool_yes_no'], + 'pathmonitor': ['bool_yes_no'], + 'tcpb': ['bool_yes_no'], + 'sp': ['bool_on_off'], + 'graceful': ['bool_yes_no'], + 'usip': ['bool_yes_no'], + 'healthmonitor': ['bool_yes_no'], + 'useproxyport': ['bool_yes_no'], + 'rtspsessionidremap': ['bool_on_off'], + 'accessdown': ['bool_yes_no'], + 'cmp': ['bool_yes_no'], + 'cip': [lambda v: v.upper()], + 'downstateflush': [lambda v: v.upper()], + 'appflowlog': [lambda v: v.upper()], + 'processlocal': [lambda v: v.upper()], + } + + monitor_bindings_rw_attrs = [ + 'servicename', + 'servicegroupname', + 'dup_state', + 'dup_weight', + 'monitorname', + 'weight', + ] + + # Translate module arguments to correspondign config object attributes + if module.params['ip'] is None: + module.params['ip'] = module.params['ipaddress'] + + service_proxy = ConfigProxy( + actual=service(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + + # Apply appropriate state + if module.params['state'] == 'present': + log('Applying actions for state present') + if not service_exists(client, module): + if not module.check_mode: + service_proxy.add() + sync_monitor_bindings(client, module, monitor_bindings_rw_attrs) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not all_identical(client, module, service_proxy, monitor_bindings_rw_attrs): + + # Check if we try to change value of immutable attributes + diff_dict = diff(client, module, service_proxy) + immutables_changed = get_immutables_intersection(service_proxy, diff_dict.keys()) + if immutables_changed != []: + msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,) + module.fail_json(msg=msg, diff=diff_dict, **module_result) + + # Service sync + if not service_identical(client, module, service_proxy): + if not module.check_mode: + service_proxy.update() + + # Monitor bindings sync + if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs): + if not module.check_mode: + sync_monitor_bindings(client, module, monitor_bindings_rw_attrs) + + module_result['changed'] = True + if not module.check_mode: + if module.params['save_config']: + client.save_config() + else: + module_result['changed'] = False + + if not module.check_mode: + res = do_state_change(client, module, service_proxy) + if res.errorcode != 0: + msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) + module.fail_json(msg=msg, **module_result) + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state present') + if not service_exists(client, module): + module.fail_json(msg='Service does not exist', **module_result) + + if not service_identical(client, module, service_proxy): + module.fail_json(msg='Service differs from configured', diff=diff(client, module, service_proxy), **module_result) + + if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs): + module.fail_json(msg='Monitor bindings are not identical', **module_result) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if service_exists(client, module): + if not module.check_mode: + service_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state absent') + if service_exists(client, module): + module.fail_json(msg='Service still exists', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_servicegroup.py b/plugins/modules/network/netscaler/netscaler_servicegroup.py new file mode 100644 index 0000000000..e86e629bd4 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_servicegroup.py @@ -0,0 +1,1046 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_servicegroup +short_description: Manage service group configuration in Netscaler +description: + - Manage service group configuration in Netscaler. + - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + servicegroupname: + description: + - >- + Name of the service group. Must begin with an ASCII alphabetic or underscore C(_) character, and must + contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals + C(=), and hyphen C(-) characters. Can be changed after the name is created. + - "Minimum length = 1" + + servicetype: + choices: + - 'HTTP' + - 'FTP' + - 'TCP' + - 'UDP' + - 'SSL' + - 'SSL_BRIDGE' + - 'SSL_TCP' + - 'DTLS' + - 'NNTP' + - 'RPCSVR' + - 'DNS' + - 'ADNS' + - 'SNMP' + - 'RTSP' + - 'DHCPRA' + - 'ANY' + - 'SIP_UDP' + - 'SIP_TCP' + - 'SIP_SSL' + - 'DNS_TCP' + - 'ADNS_TCP' + - 'MYSQL' + - 'MSSQL' + - 'ORACLE' + - 'RADIUS' + - 'RADIUSListener' + - 'RDP' + - 'DIAMETER' + - 'SSL_DIAMETER' + - 'TFTP' + - 'SMPP' + - 'PPTP' + - 'GRE' + - 'SYSLOGTCP' + - 'SYSLOGUDP' + - 'FIX' + - 'SSL_FIX' + description: + - "Protocol used to exchange data with the service." + + cachetype: + choices: + - 'TRANSPARENT' + - 'REVERSE' + - 'FORWARD' + description: + - "Cache type supported by the cache server." + + maxclient: + description: + - "Maximum number of simultaneous open connections for the service group." + - "Minimum value = C(0)" + - "Maximum value = C(4294967294)" + + maxreq: + description: + - "Maximum number of requests that can be sent on a persistent connection to the service group." + - "Note: Connection requests beyond this value are rejected." + - "Minimum value = C(0)" + - "Maximum value = C(65535)" + + cacheable: + description: + - "Use the transparent cache redirection virtual server to forward the request to the cache server." + - "Note: Do not set this parameter if you set the Cache Type." + type: bool + + cip: + choices: + - 'enabled' + - 'disabled' + description: + - "Insert the Client IP header in requests forwarded to the service." + + cipheader: + description: + - >- + Name of the HTTP header whose value must be set to the IP address of the client. Used with the Client + IP parameter. If client IP insertion is enabled, and the client IP header is not specified, the value + of Client IP Header parameter or the value set by the set ns config command is used as client's IP + header name. + - "Minimum length = 1" + + usip: + description: + - >- + Use client's IP address as the source IP address when initiating connection to the server. With the + NO setting, which is the default, a mapped IP (MIP) address or subnet IP (SNIP) address is used as + the source IP address to initiate server side connections. + type: bool + + pathmonitor: + description: + - "Path monitoring for clustering." + type: bool + + pathmonitorindv: + description: + - "Individual Path monitoring decisions." + type: bool + + useproxyport: + description: + - >- + Use the proxy port as the source port when initiating connections with the server. With the NO + setting, the client-side connection port is used as the source port for the server-side connection. + - "Note: This parameter is available only when the Use Source IP C(usip) parameter is set to C(yes)." + type: bool + + healthmonitor: + description: + - "Monitor the health of this service. Available settings function as follows:" + - "C(yes) - Send probes to check the health of the service." + - >- + C(no) - Do not send probes to check the health of the service. With the NO option, the appliance shows + the service as UP at all times. + type: bool + + sp: + description: + - "Enable surge protection for the service group." + type: bool + + rtspsessionidremap: + description: + - "Enable RTSP session ID mapping for the service group." + type: bool + + clttimeout: + description: + - "Time, in seconds, after which to terminate an idle client connection." + - "Minimum value = C(0)" + - "Maximum value = C(31536000)" + + svrtimeout: + description: + - "Time, in seconds, after which to terminate an idle server connection." + - "Minimum value = C(0)" + - "Maximum value = C(31536000)" + + cka: + description: + - "Enable client keep-alive for the service group." + type: bool + + tcpb: + description: + - "Enable TCP buffering for the service group." + type: bool + + cmp: + description: + - "Enable compression for the specified service." + type: bool + + maxbandwidth: + description: + - "Maximum bandwidth, in Kbps, allocated for all the services in the service group." + - "Minimum value = C(0)" + - "Maximum value = C(4294967287)" + + monthreshold: + description: + - >- + Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to + mark a service as UP or DOWN. + - "Minimum value = C(0)" + - "Maximum value = C(65535)" + + downstateflush: + choices: + - 'enabled' + - 'disabled' + description: + - >- + Flush all active transactions associated with all the services in the service group whose state + transitions from UP to DOWN. Do not enable this option for applications that must complete their + transactions. + + tcpprofilename: + description: + - "Name of the TCP profile that contains TCP configuration settings for the service group." + - "Minimum length = 1" + - "Maximum length = 127" + + httpprofilename: + description: + - "Name of the HTTP profile that contains HTTP configuration settings for the service group." + - "Minimum length = 1" + - "Maximum length = 127" + + comment: + description: + - "Any information about the service group." + + appflowlog: + choices: + - 'enabled' + - 'disabled' + description: + - "Enable logging of AppFlow information for the specified service group." + + netprofile: + description: + - "Network profile for the service group." + - "Minimum length = 1" + - "Maximum length = 127" + + autoscale: + choices: + - 'DISABLED' + - 'DNS' + - 'POLICY' + description: + - "Auto scale option for a servicegroup." + + memberport: + description: + - "member port." + + graceful: + description: + - "Wait for all existing connections to the service to terminate before shutting down the service." + type: bool + + servicemembers: + description: + - A list of dictionaries describing each service member of the service group. + suboptions: + ip: + description: + - IP address of the service. Must not overlap with an existing server entity defined by name. + + port: + description: + - Server port number. + - Range C(1) - C(65535) + - "* in CLI is represented as 65535 in NITRO API" + state: + choices: + - 'enabled' + - 'disabled' + description: + - Initial state of the service after binding. + hashid: + description: + - The hash identifier for the service. + - This must be unique for each service. + - This parameter is used by hash based load balancing methods. + - Minimum value = C(1) + + serverid: + description: + - The identifier for the service. + - This is used when the persistency type is set to Custom Server ID. + + servername: + description: + - Name of the server to which to bind the service group. + - The server must already be configured as a named server. + - Minimum length = 1 + + customserverid: + description: + - The identifier for this IP:Port pair. + - Used when the persistency type is set to Custom Server ID. + + weight: + description: + - Weight to assign to the servers in the service group. + - Specifies the capacity of the servers relative to the other servers in the load balancing configuration. + - The higher the weight, the higher the percentage of requests sent to the service. + - Minimum value = C(1) + - Maximum value = C(100) + + monitorbindings: + description: + - A list of monitornames to bind to this service + - Note that the monitors must have already been setup possibly using the M(netscaler_lb_monitor) module or some other method + suboptions: + monitorname: + description: + - The monitor name to bind to this servicegroup. + weight: + description: + - Weight to assign to the binding between the monitor and servicegroup. + + disabled: + description: + - When set to C(yes) the service group state will be set to DISABLED. + - When set to C(no) the service group state will be set to ENABLED. + - >- + Note that due to limitations of the underlying NITRO API a C(disabled) state change alone + does not cause the module result to report a changed status. + type: bool + default: false + + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' +# The LB Monitors monitor-1 and monitor-2 must already exist +# Service members defined by C(ip) must not redefine an existing server's ip address. +# Service members defined by C(servername) must already exist. + +- name: Setup http service with ip members + delegate_to: localhost + netscaler_servicegroup: + nsip: 172.18.0.2 + nitro_user: nsroot + nitro_pass: nsroot + + state: present + + servicegroupname: service-group-1 + servicetype: HTTP + servicemembers: + - ip: 10.78.78.78 + port: 80 + weight: 50 + - ip: 10.79.79.79 + port: 80 + weight: 40 + - servername: server-1 + port: 80 + weight: 10 + + monitorbindings: + - monitorname: monitor-1 + weight: 50 + - monitorname: monitor-2 + weight: 50 + +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: ['message 1', 'message 2'] + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: { 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' } +''' + +from ansible.module_utils.basic import AnsibleModule +import copy + +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, \ + loglines, get_immutables_intersection +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup + from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + + from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding import servicegroup_lbmonitor_binding + from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding import lbmonitor_servicegroup_binding + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + + +def servicegroup_exists(client, module): + log('Checking if service group exists') + count = servicegroup.count_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname']) + log('count is %s' % count) + if count > 0: + return True + else: + return False + + +def servicegroup_identical(client, module, servicegroup_proxy): + log('Checking if service group is identical') + servicegroups = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname']) + if servicegroup_proxy.has_equal_attributes(servicegroups[0]): + return True + else: + return False + + +def get_configured_service_members(client, module): + log('get_configured_service_members') + readwrite_attrs = [ + 'servicegroupname', + 'ip', + 'port', + 'state', + 'hashid', + 'serverid', + 'servername', + 'customserverid', + 'weight' + ] + readonly_attrs = [ + 'delay', + 'statechangetimesec', + 'svrstate', + 'tickssincelaststatechange', + 'graceful', + ] + + members = [] + if module.params['servicemembers'] is None: + return members + + for config in module.params['servicemembers']: + # Make a copy to update + config = copy.deepcopy(config) + config['servicegroupname'] = module.params['servicegroupname'] + member_proxy = ConfigProxy( + actual=servicegroup_servicegroupmember_binding(), + client=client, + attribute_values_dict=config, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs + ) + members.append(member_proxy) + return members + + +def get_actual_service_members(client, module): + try: + # count() raises nitro exception instead of returning 0 + count = servicegroup_servicegroupmember_binding.count(client, module.params['servicegroupname']) + if count > 0: + servicegroup_members = servicegroup_servicegroupmember_binding.get(client, module.params['servicegroupname']) + else: + servicegroup_members = [] + except nitro_exception as e: + if e.errorcode == 258: + servicegroup_members = [] + else: + raise + return servicegroup_members + + +def servicemembers_identical(client, module): + log('servicemembers_identical') + + servicegroup_members = get_actual_service_members(client, module) + log('servicemembers %s' % servicegroup_members) + module_servicegroups = get_configured_service_members(client, module) + log('Number of service group members %s' % len(servicegroup_members)) + if len(servicegroup_members) != len(module_servicegroups): + return False + + # Fallthrough to member evaluation + identical_count = 0 + for actual_member in servicegroup_members: + for member in module_servicegroups: + if member.has_equal_attributes(actual_member): + identical_count += 1 + break + if identical_count != len(servicegroup_members): + return False + + # Fallthrough to success + return True + + +def sync_service_members(client, module): + log('sync_service_members') + configured_service_members = get_configured_service_members(client, module) + actual_service_members = get_actual_service_members(client, module) + skip_add = [] + skip_delete = [] + + # Find positions of identical service members + for (configured_index, configured_service) in enumerate(configured_service_members): + for (actual_index, actual_service) in enumerate(actual_service_members): + if configured_service.has_equal_attributes(actual_service): + skip_add.append(configured_index) + skip_delete.append(actual_index) + + # Delete actual that are not identical to any configured + for (actual_index, actual_service) in enumerate(actual_service_members): + # Skip identical + if actual_index in skip_delete: + log('Skipping actual delete at index %s' % actual_index) + continue + + # Fallthrouth to deletion + if all([ + hasattr(actual_service, 'ip'), + actual_service.ip is not None, + hasattr(actual_service, 'servername'), + actual_service.servername is not None, + ]): + actual_service.ip = None + + actual_service.servicegroupname = module.params['servicegroupname'] + servicegroup_servicegroupmember_binding.delete(client, actual_service) + + # Add configured that are not already present in actual + for (configured_index, configured_service) in enumerate(configured_service_members): + + # Skip identical + if configured_index in skip_add: + log('Skipping configured add at index %s' % configured_index) + continue + + # Fallthrough to addition + configured_service.add() + + +def monitor_binding_equal(configured, actual): + if any([configured.monitorname != actual.monitor_name, + configured.servicegroupname != actual.servicegroupname, + configured.weight != float(actual.weight)]): + return False + return True + + +def get_configured_monitor_bindings(client, module): + log('Entering get_configured_monitor_bindings') + bindings = {} + if 'monitorbindings' in module.params and module.params['monitorbindings'] is not None: + for binding in module.params['monitorbindings']: + readwrite_attrs = [ + 'monitorname', + 'servicegroupname', + 'weight', + ] + readonly_attrs = [] + attribute_values_dict = copy.deepcopy(binding) + attribute_values_dict['servicegroupname'] = module.params['servicegroupname'] + binding_proxy = ConfigProxy( + actual=lbmonitor_servicegroup_binding(), + client=client, + attribute_values_dict=attribute_values_dict, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + ) + key = attribute_values_dict['monitorname'] + bindings[key] = binding_proxy + return bindings + + +def get_actual_monitor_bindings(client, module): + log('Entering get_actual_monitor_bindings') + bindings = {} + try: + # count() raises nitro exception instead of returning 0 + count = servicegroup_lbmonitor_binding.count(client, module.params['servicegroupname']) + except nitro_exception as e: + if e.errorcode == 258: + return bindings + else: + raise + + if count == 0: + return bindings + + # Fallthrough to rest of execution + for binding in servicegroup_lbmonitor_binding.get(client, module.params['servicegroupname']): + log('Gettign actual monitor with name %s' % binding.monitor_name) + key = binding.monitor_name + bindings[key] = binding + + return bindings + + +def monitor_bindings_identical(client, module): + log('Entering monitor_bindings_identical') + configured_bindings = get_configured_monitor_bindings(client, module) + actual_bindings = get_actual_monitor_bindings(client, module) + + configured_key_set = set(configured_bindings.keys()) + actual_key_set = set(actual_bindings.keys()) + symmetrical_diff = configured_key_set ^ actual_key_set + for default_monitor in ('tcp-default', 'ping-default'): + if default_monitor in symmetrical_diff: + log('Excluding %s monitor from key comparison' % default_monitor) + symmetrical_diff.remove(default_monitor) + if len(symmetrical_diff) > 0: + return False + + # Compare key to key + for key in configured_key_set: + configured_proxy = configured_bindings[key] + + # Follow nscli convention for missing weight value + if not hasattr(configured_proxy, 'weight'): + configured_proxy.weight = 1 + log('configured_proxy %s' % [configured_proxy.monitorname, configured_proxy.servicegroupname, configured_proxy.weight]) + log('actual_bindings %s' % [actual_bindings[key].monitor_name, actual_bindings[key].servicegroupname, actual_bindings[key].weight]) + if not monitor_binding_equal(configured_proxy, actual_bindings[key]): + return False + + # Fallthrought to success + return True + + +def sync_monitor_bindings(client, module): + log('Entering sync_monitor_bindings') + + actual_bindings = get_actual_monitor_bindings(client, module) + + # Exclude default monitors from deletion + for monitorname in ('tcp-default', 'ping-default'): + if monitorname in actual_bindings: + del actual_bindings[monitorname] + + configured_bindings = get_configured_monitor_bindings(client, module) + + to_remove = list(set(actual_bindings.keys()) - set(configured_bindings.keys())) + to_add = list(set(configured_bindings.keys()) - set(actual_bindings.keys())) + to_modify = list(set(configured_bindings.keys()) & set(actual_bindings.keys())) + + # Delete existing and modifiable bindings + for key in to_remove + to_modify: + binding = actual_bindings[key] + b = lbmonitor_servicegroup_binding() + b.monitorname = binding.monitor_name + b.servicegroupname = module.params['servicegroupname'] + # Cannot remove default monitor bindings + if b.monitorname in ('tcp-default', 'ping-default'): + continue + lbmonitor_servicegroup_binding.delete(client, b) + + # Add new and modified bindings + for key in to_add + to_modify: + binding = configured_bindings[key] + log('Adding %s' % binding.monitorname) + binding.add() + + +def diff(client, module, servicegroup_proxy): + servicegroup_list = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname']) + diff_object = servicegroup_proxy.diff_object(servicegroup_list[0]) + return diff_object + + +def do_state_change(client, module, servicegroup_proxy): + if module.params['disabled']: + log('Disabling service') + result = servicegroup.disable(client, servicegroup_proxy.actual) + else: + log('Enabling service') + result = servicegroup.enable(client, servicegroup_proxy.actual) + return result + + +def main(): + + module_specific_arguments = dict( + servicegroupname=dict(type='str'), + servicetype=dict( + type='str', + choices=[ + 'HTTP', + 'FTP', + 'TCP', + 'UDP', + 'SSL', + 'SSL_BRIDGE', + 'SSL_TCP', + 'DTLS', + 'NNTP', + 'RPCSVR', + 'DNS', + 'ADNS', + 'SNMP', + 'RTSP', + 'DHCPRA', + 'ANY', + 'SIP_UDP', + 'SIP_TCP', + 'SIP_SSL', + 'DNS_TCP', + 'ADNS_TCP', + 'MYSQL', + 'MSSQL', + 'ORACLE', + 'RADIUS', + 'RADIUSListener', + 'RDP', + 'DIAMETER', + 'SSL_DIAMETER', + 'TFTP', + 'SMPP', + 'PPTP', + 'GRE', + 'SYSLOGTCP', + 'SYSLOGUDP', + 'FIX', + 'SSL_FIX', + ] + ), + cachetype=dict( + type='str', + choices=[ + 'TRANSPARENT', + 'REVERSE', + 'FORWARD', + ] + ), + maxclient=dict(type='float'), + maxreq=dict(type='float'), + cacheable=dict(type='bool'), + cip=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + cipheader=dict(type='str'), + usip=dict(type='bool'), + pathmonitor=dict(type='bool'), + pathmonitorindv=dict(type='bool'), + useproxyport=dict(type='bool'), + healthmonitor=dict(type='bool'), + sp=dict(type='bool'), + rtspsessionidremap=dict(type='bool'), + clttimeout=dict(type='float'), + svrtimeout=dict(type='float'), + cka=dict(type='bool'), + tcpb=dict(type='bool'), + cmp=dict(type='bool'), + maxbandwidth=dict(type='float'), + monthreshold=dict(type='float'), + downstateflush=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + tcpprofilename=dict(type='str'), + httpprofilename=dict(type='str'), + comment=dict(type='str'), + appflowlog=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + netprofile=dict(type='str'), + autoscale=dict( + type='str', + choices=[ + 'DISABLED', + 'DNS', + 'POLICY', + ] + ), + memberport=dict(type='int'), + graceful=dict(type='bool'), + ) + + hand_inserted_arguments = dict( + servicemembers=dict(type='list'), + monitorbindings=dict(type='list'), + disabled=dict( + type='bool', + default=False, + ), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + argument_spec.update(module_specific_arguments) + argument_spec.update(hand_inserted_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + # Instantiate service group configuration object + readwrite_attrs = [ + 'servicegroupname', + 'servicetype', + 'cachetype', + 'maxclient', + 'maxreq', + 'cacheable', + 'cip', + 'cipheader', + 'usip', + 'pathmonitor', + 'pathmonitorindv', + 'useproxyport', + 'healthmonitor', + 'sp', + 'rtspsessionidremap', + 'clttimeout', + 'svrtimeout', + 'cka', + 'tcpb', + 'cmp', + 'maxbandwidth', + 'monthreshold', + 'downstateflush', + 'tcpprofilename', + 'httpprofilename', + 'comment', + 'appflowlog', + 'netprofile', + 'autoscale', + 'memberport', + 'graceful', + ] + + readonly_attrs = [ + 'numofconnections', + 'serviceconftype', + 'value', + 'svrstate', + 'ip', + 'monstatcode', + 'monstatparam1', + 'monstatparam2', + 'monstatparam3', + 'statechangetimemsec', + 'stateupdatereason', + 'clmonowner', + 'clmonview', + 'groupcount', + 'riseapbrstatsmsgcode2', + 'serviceipstr', + 'servicegroupeffectivestate' + ] + + immutable_attrs = [ + 'servicegroupname', + 'servicetype', + 'cachetype', + 'td', + 'cipheader', + 'state', + 'autoscale', + 'memberport', + 'servername', + 'port', + 'serverid', + 'monitor_name_svc', + 'dup_weight', + 'riseapbrstatsmsgcode', + 'delay', + 'graceful', + 'includemembers', + 'newname', + ] + + transforms = { + 'pathmonitorindv': ['bool_yes_no'], + 'cacheable': ['bool_yes_no'], + 'cka': ['bool_yes_no'], + 'pathmonitor': ['bool_yes_no'], + 'tcpb': ['bool_yes_no'], + 'sp': ['bool_on_off'], + 'usip': ['bool_yes_no'], + 'healthmonitor': ['bool_yes_no'], + 'useproxyport': ['bool_yes_no'], + 'rtspsessionidremap': ['bool_on_off'], + 'graceful': ['bool_yes_no'], + 'cmp': ['bool_yes_no'], + 'cip': [lambda v: v.upper()], + 'downstateflush': [lambda v: v.upper()], + 'appflowlog': [lambda v: v.upper()], + } + + # Instantiate config proxy + servicegroup_proxy = ConfigProxy( + actual=servicegroup(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + if module.params['state'] == 'present': + log('Applying actions for state present') + if not servicegroup_exists(client, module): + if not module.check_mode: + log('Adding service group') + servicegroup_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not servicegroup_identical(client, module, servicegroup_proxy): + + # Check if we try to change value of immutable attributes + diff_dict = diff(client, module, servicegroup_proxy) + immutables_changed = get_immutables_intersection(servicegroup_proxy, diff_dict.keys()) + if immutables_changed != []: + msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,) + module.fail_json(msg=msg, diff=diff_dict, **module_result) + + if not module.check_mode: + servicegroup_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Check bindings + if not monitor_bindings_identical(client, module): + if not module.check_mode: + sync_monitor_bindings(client, module) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + + if not servicemembers_identical(client, module): + if not module.check_mode: + sync_service_members(client, module) + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + + if not module.check_mode: + res = do_state_change(client, module, servicegroup_proxy) + if res.errorcode != 0: + msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) + module.fail_json(msg=msg, **module_result) + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state present') + if not servicegroup_exists(client, module): + module.fail_json(msg='Service group is not present', **module_result) + if not servicegroup_identical(client, module, servicegroup_proxy): + module.fail_json( + msg='Service group is not identical to configuration', + diff=diff(client, module, servicegroup_proxy), + **module_result + ) + if not servicemembers_identical(client, module): + module.fail_json(msg='Service group members differ from configuration', **module_result) + if not monitor_bindings_identical(client, module): + module.fail_json(msg='Monitor bindings are not identical', **module_result) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if servicegroup_exists(client, module): + if not module.check_mode: + servicegroup_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state absent') + if servicegroup_exists(client, module): + module.fail_json(msg='Service group is present', **module_result) + + except nitro_exception as e: + msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netscaler/netscaler_ssl_certkey.py b/plugins/modules/network/netscaler/netscaler_ssl_certkey.py new file mode 100644 index 0000000000..5a14596a18 --- /dev/null +++ b/plugins/modules/network/netscaler/netscaler_ssl_certkey.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Citrix Systems +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netscaler_ssl_certkey +short_description: Manage ssl certificate keys. +description: + - Manage ssl certificate keys. + + +author: George Nikolopoulos (@giorgos-nikolopoulos) + +options: + + certkey: + description: + - >- + Name for the certificate and private-key pair. Must begin with an ASCII alphanumeric or underscore + C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), + colon C(:), at C(@), equals C(=), and hyphen C(-) characters. Cannot be changed after the certificate-key + pair is created. + - "The following requirement applies only to the NetScaler CLI:" + - >- + If the name includes one or more spaces, enclose the name in double or single quotation marks (for + example, "my cert" or 'my cert'). + - "Minimum length = 1" + + cert: + description: + - >- + Name of and, optionally, path to the X509 certificate file that is used to form the certificate-key + pair. The certificate file should be present on the appliance's hard-disk drive or solid-state drive. + Storing a certificate in any location other than the default might cause inconsistency in a high + availability setup. /nsconfig/ssl/ is the default path. + - "Minimum length = 1" + + key: + description: + - >- + Name of and, optionally, path to the private-key file that is used to form the certificate-key pair. + The certificate file should be present on the appliance's hard-disk drive or solid-state drive. + Storing a certificate in any location other than the default might cause inconsistency in a high + availability setup. /nsconfig/ssl/ is the default path. + - "Minimum length = 1" + + password: + description: + - >- + Passphrase that was used to encrypt the private-key. Use this option to load encrypted private-keys + in PEM format. + + inform: + choices: + - 'DER' + - 'PEM' + - 'PFX' + description: + - >- + Input format of the certificate and the private-key files. The three formats supported by the + appliance are: + - "PEM - Privacy Enhanced Mail" + - "DER - Distinguished Encoding Rule" + - "PFX - Personal Information Exchange." + + passplain: + description: + - >- + Pass phrase used to encrypt the private-key. Required when adding an encrypted private-key in PEM + format. + - "Minimum length = 1" + + expirymonitor: + choices: + - 'enabled' + - 'disabled' + description: + - "Issue an alert when the certificate is about to expire." + + notificationperiod: + description: + - >- + Time, in number of days, before certificate expiration, at which to generate an alert that the + certificate is about to expire. + - "Minimum value = C(10)" + - "Maximum value = C(100)" + + +extends_documentation_fragment: +- community.general.netscaler + +requirements: + - nitro python sdk +''' + +EXAMPLES = ''' + +- name: Setup ssl certkey + delegate_to: localhost + netscaler_ssl_certkey: + nitro_user: nsroot + nitro_pass: nsroot + nsip: 172.18.0.2 + + certkey: certirificate_1 + cert: server.crt + key: server.key + expirymonitor: enabled + notificationperiod: 30 + inform: PEM + password: False + passplain: somesecret +''' + +RETURN = ''' +loglines: + description: list of logged messages by the module + returned: always + type: list + sample: "['message 1', 'message 2']" + +msg: + description: Message detailing the failure reason + returned: failure + type: str + sample: "Action does not exist" + +diff: + description: List of differences between the actual configured object and the configuration specified in the module + returned: failure + type: dict + sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }" +''' + +try: + from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslcertkey import sslcertkey + from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception + PYTHON_SDK_IMPORTED = True +except ImportError as e: + PYTHON_SDK_IMPORTED = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, \ + get_immutables_intersection + + +def key_exists(client, module): + log('Checking if key exists') + log('certkey is %s' % module.params['certkey']) + all_certificates = sslcertkey.get(client) + certkeys = [item.certkey for item in all_certificates] + if module.params['certkey'] in certkeys: + return True + else: + return False + + +def key_identical(client, module, sslcertkey_proxy): + log('Checking if configured key is identical') + sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey']) + diff_dict = sslcertkey_proxy.diff_object(sslcertkey_list[0]) + if 'password' in diff_dict: + del diff_dict['password'] + if 'passplain' in diff_dict: + del diff_dict['passplain'] + if len(diff_dict) == 0: + return True + else: + return False + + +def diff_list(client, module, sslcertkey_proxy): + sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey']) + return sslcertkey_proxy.diff_object(sslcertkey_list[0]) + + +def main(): + + module_specific_arguments = dict( + certkey=dict(type='str'), + cert=dict(type='str'), + key=dict(type='str'), + password=dict(type='bool'), + inform=dict( + type='str', + choices=[ + 'DER', + 'PEM', + 'PFX', + ] + ), + passplain=dict( + type='str', + no_log=True, + ), + expirymonitor=dict( + type='str', + choices=[ + 'enabled', + 'disabled', + ] + ), + notificationperiod=dict(type='float'), + ) + + argument_spec = dict() + + argument_spec.update(netscaler_common_arguments) + + argument_spec.update(module_specific_arguments) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + module_result = dict( + changed=False, + failed=False, + loglines=loglines, + ) + + # Fail the module if imports failed + if not PYTHON_SDK_IMPORTED: + module.fail_json(msg='Could not load nitro python sdk') + + # Fallthrough to rest of execution + client = get_nitro_client(module) + + try: + client.login() + except nitro_exception as e: + msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg) + except Exception as e: + if str(type(e)) == "": + module.fail_json(msg='Connection error %s' % str(e)) + elif str(type(e)) == "": + module.fail_json(msg='SSL Error %s' % str(e)) + else: + module.fail_json(msg='Unexpected error during login %s' % str(e)) + + readwrite_attrs = [ + 'certkey', + 'cert', + 'key', + 'password', + 'inform', + 'passplain', + 'expirymonitor', + 'notificationperiod', + ] + + readonly_attrs = [ + 'signaturealg', + 'certificatetype', + 'serial', + 'issuer', + 'clientcertnotbefore', + 'clientcertnotafter', + 'daystoexpiration', + 'subject', + 'publickey', + 'publickeysize', + 'version', + 'priority', + 'status', + 'passcrypt', + 'data', + 'servicename', + ] + + immutable_attrs = [ + 'certkey', + 'cert', + 'key', + 'password', + 'inform', + 'passplain', + ] + + transforms = { + 'expirymonitor': [lambda v: v.upper()], + } + + # Instantiate config proxy + sslcertkey_proxy = ConfigProxy( + actual=sslcertkey(), + client=client, + attribute_values_dict=module.params, + readwrite_attrs=readwrite_attrs, + readonly_attrs=readonly_attrs, + immutable_attrs=immutable_attrs, + transforms=transforms, + ) + + try: + + if module.params['state'] == 'present': + log('Applying actions for state present') + if not key_exists(client, module): + if not module.check_mode: + log('Adding certificate key') + sslcertkey_proxy.add() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + elif not key_identical(client, module, sslcertkey_proxy): + + # Check if we try to change value of immutable attributes + immutables_changed = get_immutables_intersection(sslcertkey_proxy, diff_list(client, module, sslcertkey_proxy).keys()) + if immutables_changed != []: + module.fail_json( + msg='Cannot update immutable attributes %s' % (immutables_changed,), + diff=diff_list(client, module, sslcertkey_proxy), + **module_result + ) + + if not module.check_mode: + sslcertkey_proxy.update() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state present') + if not key_exists(client, module): + module.fail_json(msg='SSL certkey does not exist') + if not key_identical(client, module, sslcertkey_proxy): + module.fail_json(msg='SSL certkey differs from configured', diff=diff_list(client, module, sslcertkey_proxy)) + + elif module.params['state'] == 'absent': + log('Applying actions for state absent') + if key_exists(client, module): + if not module.check_mode: + sslcertkey_proxy.delete() + if module.params['save_config']: + client.save_config() + module_result['changed'] = True + else: + module_result['changed'] = False + + # Sanity check for state + if not module.check_mode: + log('Sanity checks for state absent') + if key_exists(client, module): + module.fail_json(msg='SSL certkey still exists') + + except nitro_exception as e: + msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) + module.fail_json(msg=msg, **module_result) + + client.logout() + module.exit_json(**module_result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/netvisor/pn_access_list.py b/plugins/modules/network/netvisor/pn_access_list.py new file mode 100644 index 0000000000..c6fa6dc5d3 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_access_list.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_access_list +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to create/delete access-list +description: + - This module can be used to create and delete an access list. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use 'present' to create access-list and + 'absent' to delete access-list. + required: True + choices: [ "present", "absent"] + pn_name: + description: + - Access List Name. + required: false + type: str + pn_scope: + description: + - 'scope. Available valid values - local or fabric.' + required: false + choices: ['local', 'fabric'] +''' + +EXAMPLES = """ +- name: access list functionality + pn_access_list: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_scope: "local" + state: "present" + +- name: access list functionality + pn_access_list: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_scope: "local" + state: "absent" + +- name: access list functionality + pn_access_list: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_scope: "fabric" + state: "present" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the access-list command. + returned: always + type: list +stderr: + description: set of error responses from the access-list command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the access-list-show command. + If a list with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + list_name = module.params['pn_name'] + + cli += ' access-list-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if list_name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='access-list-create', + absent='access-list-delete', + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_name=dict(required=False, type='str'), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + ), + required_if=( + ["state", "present", ["pn_name", "pn_scope"]], + ["state", "absent", ["pn_name"]], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + list_name = module.params['pn_name'] + scope = module.params['pn_scope'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + ACC_LIST_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, list_name) + + if command == 'access-list-delete': + if ACC_LIST_EXISTS is False: + module.exit_json( + skipped=True, + msg='access-list with name %s does not exist' % list_name + ) + else: + if command == 'access-list-create': + if ACC_LIST_EXISTS is True: + module.exit_json( + skipped=True, + msg='access list with name %s already exists' % list_name + ) + cli += ' scope %s ' % scope + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_access_list_ip.py b/plugins/modules/network/netvisor/pn_access_list_ip.py new file mode 100644 index 0000000000..bab79d106c --- /dev/null +++ b/plugins/modules/network/netvisor/pn_access_list_ip.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_access_list_ip +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove access-list-ip +description: + - This modules can be used to add and remove IPs associated with access list. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use 'present' to add access-list-ip and + 'absent' to remove access-list-ip. + required: True + choices: ["present", "absent"] + pn_ip: + description: + - IP associated with the access list. + required: False + default: '::' + type: str + pn_name: + description: + - Access List Name. + required: False + type: str +''' + +EXAMPLES = """ +- name: access list ip functionality + pn_access_list_ip: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_ip: "172.16.3.1" + state: "present" + +- name: access list ip functionality + pn_access_list_ip: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_ip: "172.16.3.1" + state: "absent" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the access-list-ip command. + returned: always + type: list +stderr: + description: set of error responses from the access-list-ip command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the access-list-ip-show command. + If ip exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + ip = module.params['pn_ip'] + clicopy = cli + + cli += ' access-list-show name %s no-show-headers ' % name + out = run_commands(module, cli)[1] + + if name not in out: + module.fail_json( + failed=True, + msg='access-list with name %s does not exist' % name + ) + + cli = clicopy + cli += ' access-list-ip-show name %s format ip no-show-headers' % name + + out = run_commands(module, cli)[1] + out = out.split() + return True if ip in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='access-list-ip-add', + absent='access-list-ip-remove', + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_ip=dict(required=False, type='str', default='::'), + pn_name=dict(required=False, type='str'), + ), + required_if=( + ["state", "present", ["pn_name"]], + ["state", "absent", ["pn_name", "pn_ip"]], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + ip = module.params['pn_ip'] + name = module.params['pn_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + IP_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, name) + + if command == 'access-list-ip-remove': + if IP_EXISTS is False: + module.exit_json( + skipped=True, + msg='access-list with ip %s does not exist' % ip + ) + if ip: + cli += ' ip ' + ip + else: + if command == 'access-list-ip-add': + if IP_EXISTS is True: + module.exit_json( + skipped=True, + msg='access list with ip %s already exists' % ip + ) + if ip: + cli += ' ip ' + ip + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_admin_service.py b/plugins/modules/network/netvisor/pn_admin_service.py new file mode 100644 index 0000000000..578e89f8d8 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_admin_service.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_admin_service +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify admin-service +description: + - This module is used to modify services on the server-switch. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(update) to modify the admin-service. + required: True + type: str + choices: ['update'] + pn_web: + description: + - Web (HTTP) to enable or disable. + required: False + type: bool + pn_web_ssl: + description: + - Web SSL (HTTPS) to enable or disable. + required: False + type: bool + pn_snmp: + description: + - Simple Network Monitoring Protocol (SNMP) to enable or disable. + required: False + type: bool + pn_web_port: + description: + - Web (HTTP) port to enable or disable. + required: False + type: str + pn_web_ssl_port: + description: + - Web SSL (HTTPS) port to enable or disable. + required: False + type: str + pn_nfs: + description: + - Network File System (NFS) to enable or disable. + required: False + type: bool + pn_ssh: + description: + - Secure Shell to enable or disable. + required: False + type: bool + pn_web_log: + description: + - Web logging to enable or disable. + required: False + type: bool + pn__if: + description: + - administrative service interface. + required: False + type: str + choices: ['mgmt', 'data'] + pn_icmp: + description: + - Internet Message Control Protocol (ICMP) to enable or disable. + required: False + type: bool + pn_net_api: + description: + - Netvisor API to enable or disable APIs. + required: False + type: bool +''' + +EXAMPLES = """ +- name: admin service functionality + pn_admin_service: + pn_cliswitch: "sw01" + state: "update" + pn__if: "mgmt" + pn_web: False + pn_icmp: True + +- name: admin service functionality + pn_admin_service: + pn_cliswitch: "sw01" + state: "update" + pn_web: False + pn__if: "mgmt" + pn_snmp: True + pn_net_api: True + pn_ssh: True +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the admin-service command. + returned: always + type: list +stderr: + description: set of error responses from the admin-service command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, booleanArgs, run_cli + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='admin-service-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_web=dict(required=False, type='bool'), + pn_web_ssl=dict(required=False, type='bool'), + pn_snmp=dict(required=False, type='bool'), + pn_web_port=dict(required=False, type='str'), + pn_web_ssl_port=dict(required=False, type='str'), + pn_nfs=dict(required=False, type='bool'), + pn_ssh=dict(required=False, type='bool'), + pn_web_log=dict(required=False, type='bool'), + pn__if=dict(required=False, type='str', choices=['mgmt', 'data']), + pn_icmp=dict(required=False, type='bool'), + pn_net_api=dict(required=False, type='bool'), + ), + required_if=([['state', 'update', ['pn__if']]]), + required_one_of=[['pn_web', 'pn_web_ssl', 'pn_snmp', + 'pn_web_port', 'pn_web_ssl_port', 'pn_nfs', + 'pn_ssh', 'pn_web_log', 'pn_icmp', 'pn_net_api']] + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + web = module.params['pn_web'] + web_ssl = module.params['pn_web_ssl'] + snmp = module.params['pn_snmp'] + web_port = module.params['pn_web_port'] + web_ssl_port = module.params['pn_web_ssl_port'] + nfs = module.params['pn_nfs'] + ssh = module.params['pn_ssh'] + web_log = module.params['pn_web_log'] + _if = module.params['pn__if'] + icmp = module.params['pn_icmp'] + net_api = module.params['pn_net_api'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'admin-service-modify': + cli += ' %s ' % command + + if _if: + cli += ' if ' + _if + if web_port: + cli += ' web-port ' + web_port + if web_ssl_port: + cli += ' web-ssl-port ' + web_ssl_port + + cli += booleanArgs(web, 'web', 'no-web') + cli += booleanArgs(web_ssl, 'web-ssl', 'no-web-ssl') + cli += booleanArgs(snmp, 'snmp', 'no-snmp') + cli += booleanArgs(nfs, 'nfs', 'no-nfs') + cli += booleanArgs(ssh, 'ssh', 'no-ssh') + cli += booleanArgs(icmp, 'icmp', 'no-icmp') + cli += booleanArgs(net_api, 'net-api', 'no-net-api') + cli += booleanArgs(web_log, 'web-log', 'no-web-log') + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_admin_session_timeout.py b/plugins/modules/network/netvisor/pn_admin_session_timeout.py new file mode 100644 index 0000000000..59e4cc1db8 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_admin_session_timeout.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_admin_session_timeout +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify admin-session-timeout +description: + - This module can be used to modify admin session timeout. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. + C(update) to modify the admin-session-timeout. + required: True + type: str + choices: ['update'] + pn_timeout: + description: + - Maximum time to wait for user activity before + terminating login session. Minimum should be 60s. + required: False + type: str +''' + +EXAMPLES = """ +- name: admin session timeout functionality + pn_admin_session_timeout: + pn_cliswitch: "sw01" + state: "update" + pn_timeout: "61s" + +- name: admin session timeout functionality + pn_admin_session_timeout: + pn_cliswitch: "sw01" + state: "update" + pn_timeout: "1d" + +- name: admin session timeout functionality + pn_admin_session_timeout: + pn_cliswitch: "sw01" + state: "update" + pn_timeout: "10d20m3h15s" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the admin-session-timeout command. + returned: always + type: list +stderr: + description: set of error responses from the admin-session-timeout command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='admin-session-timeout-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_timeout=dict(required=False, type='str'), + ), + required_together=[['state', 'pn_timeout']], + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + timeout = module.params['pn_timeout'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + if command == 'admin-session-timeout-modify': + cli += ' %s ' % command + if timeout: + cli += ' timeout ' + timeout + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_admin_syslog.py b/plugins/modules/network/netvisor/pn_admin_syslog.py new file mode 100644 index 0000000000..bc971c0355 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_admin_syslog.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_admin_syslog +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/modify/delete admin-syslog +description: + - This module can be used to create the scope and other parameters of syslog event collection. + - This module can be used to modify parameters of syslog event collection. + - This module can be used to delete the scope and other parameters of syslog event collection. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(present) to create admin-syslog and + C(absent) to delete admin-syslog C(update) to modify the admin-syslog. + required: True + type: str + choices: ['present', 'absent', 'update'] + pn_scope: + description: + - Scope of the system log. + required: False + type: str + choices: ['local', 'fabric'] + pn_host: + description: + - Hostname to log system events. + required: False + type: str + pn_port: + description: + - Host port. + required: False + type: str + pn_transport: + description: + - Transport for log events - tcp/tls or udp. + required: False + type: str + choices: ['tcp-tls', 'udp'] + default: 'udp' + pn_message_format: + description: + - message-format for log events - structured or legacy. + required: False + choices: ['structured', 'legacy'] + type: str + pn_name: + description: + - name of the system log. + required: False + type: str +''' + +EXAMPLES = """ +- name: admin-syslog functionality + pn_admin_syslog: + pn_cliswitch: "sw01" + state: "absent" + pn_name: "foo" + pn_scope: "local" + +- name: admin-syslog functionality + pn_admin_syslog: + pn_cliswitch: "sw01" + state: "present" + pn_name: "foo" + pn_scope: "local" + pn_host: "166.68.224.46" + pn_message_format: "structured" + +- name: admin-syslog functionality + pn_admin_syslog: + pn_cliswitch: "sw01" + state: "update" + pn_name: "foo" + pn_host: "166.68.224.10" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the admin-syslog command. + returned: always + type: list +stderr: + description: set of error responses from the admin-syslog command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the admin-syslog-show command. + If a user with given name exists, return as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + + name = module.params['pn_name'] + + cli += ' admin-syslog-show format name no-show-headers' + out = run_commands(module, cli)[1] + + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='admin-syslog-create', + absent='admin-syslog-delete', + update='admin-syslog-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + pn_host=dict(required=False, type='str'), + pn_port=dict(required=False, type='str'), + pn_transport=dict(required=False, type='str', + choices=['tcp-tls', 'udp'], default='udp'), + pn_message_format=dict(required=False, type='str', + choices=['structured', 'legacy']), + pn_name=dict(required=False, type='str'), + ), + required_if=( + ['state', 'present', ['pn_name', 'pn_host', 'pn_scope']], + ['state', 'absent', ['pn_name']], + ['state', 'update', ['pn_name']] + ), + required_one_of=[['pn_port', 'pn_message_format', + 'pn_host', 'pn_transport', 'pn_scope']] + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + scope = module.params['pn_scope'] + host = module.params['pn_host'] + port = module.params['pn_port'] + transport = module.params['pn_transport'] + message_format = module.params['pn_message_format'] + name = module.params['pn_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + SYSLOG_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, name) + + if command == 'admin-syslog-modify': + if SYSLOG_EXISTS is False: + module.fail_json( + failed=True, + msg='admin syslog with name %s does not exist' % name + ) + + if command == 'admin-syslog-delete': + if SYSLOG_EXISTS is False: + module.exit_json( + skipped=True, + msg='admin syslog with name %s does not exist' % name + ) + + if command == 'admin-syslog-create': + if SYSLOG_EXISTS is True: + module.exit_json( + skipped=True, + msg='admin syslog user with name %s already exists' % name + ) + + if command == 'admin-syslog-create': + if scope: + cli += ' scope ' + scope + + if command != 'admin-syslog-delete': + if host: + cli += ' host ' + host + if port: + cli += ' port ' + port + if transport: + cli += ' transport ' + transport + if message_format: + cli += ' message-format ' + message_format + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_cluster.py b/plugins/modules/network/netvisor/pn_cluster.py new file mode 100644 index 0000000000..06d09639cf --- /dev/null +++ b/plugins/modules/network/netvisor/pn_cluster.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +""" PN CLI cluster-create/cluster-delete """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_cluster +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to create/delete a cluster. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute cluster-create or cluster-delete command. + - A cluster allows two switches to cooperate in high-availability (HA) + deployments. The nodes that form the cluster must be members of the same + fabric. Clusters are typically used in conjunction with a virtual link + aggregation group (VLAG) that allows links physically connected to two + separate switches appear as a single trunk to a third device. The third + device can be a switch,server, or any Ethernet device. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch to run the cli on. + required: False + default: 'local' + state: + description: + - Specify action to perform. Use 'present' to create cluster and 'absent' + to delete cluster. + required: true + choices: ['present', 'absent'] + pn_name: + description: + - Specify the name of the cluster. + required: true + pn_cluster_node1: + description: + - Specify the name of the first switch in the cluster. + - Required for 'cluster-create'. + pn_cluster_node2: + description: + - Specify the name of the second switch in the cluster. + - Required for 'cluster-create'. + pn_validate: + description: + - Validate the inter-switch links and state of switches in the cluster. + type: bool +''' + +EXAMPLES = """ +- name: create spine cluster + pn_cluster: + state: 'present' + pn_name: 'spine-cluster' + pn_cluster_node1: 'spine01' + pn_cluster_node2: 'spine02' + pn_validate: True + pn_quiet: True + +- name: delete spine cluster + pn_cluster: + state: 'absent' + pn_name: 'spine-cluster' + pn_quiet: True +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the cluster command. + returned: always + type: list +stderr: + description: The set of error responses from the cluster command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +NAME_EXISTS = None +NODE1_EXISTS = None +NODE2_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the cluster-show command. + If a cluster with given name exists, return NAME_EXISTS as True else False. + If the given cluster-node-1 is already a part of another cluster, return + NODE1_EXISTS as True else False. + If the given cluster-node-2 is already a part of another cluster, return + NODE2_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: NAME_EXISTS, NODE1_EXISTS, NODE2_EXISTS + """ + name = module.params['pn_name'] + node1 = module.params['pn_cluster_node1'] + node2 = module.params['pn_cluster_node2'] + + show = cli + ' cluster-show format name,cluster-node-1,cluster-node-2 ' + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global NAME_EXISTS, NODE1_EXISTS, NODE2_EXISTS + + if name in out: + NAME_EXISTS = True + else: + NAME_EXISTS = False + if node1 in out: + NODE1_EXISTS = True + else: + NODE2_EXISTS = False + if node2 in out: + NODE2_EXISTS = True + else: + NODE2_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'cluster-create' + if state == 'absent': + command = 'cluster-delete' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent']), + pn_name=dict(required=True, type='str'), + pn_cluster_node1=dict(type='str'), + pn_cluster_node2=dict(type='str'), + pn_validate=dict(type='bool') + ), + required_if=( + ["state", "present", + ["pn_name", "pn_cluster_node1", "pn_cluster_node2"]], + ["state", "absent", ["pn_name"]] + ) + ) + + # Accessing the parameters + state = module.params['state'] + name = module.params['pn_name'] + cluster_node1 = module.params['pn_cluster_node1'] + cluster_node2 = module.params['pn_cluster_node2'] + validate = module.params['pn_validate'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'cluster-create': + + check_cli(module, cli) + + if NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='Cluster with name %s already exists' % name + ) + if NODE1_EXISTS is True: + module.exit_json( + skipped=True, + msg='Node %s already part of a cluster' % cluster_node1 + ) + if NODE2_EXISTS is True: + module.exit_json( + skipped=True, + msg='Node %s already part of a cluster' % cluster_node2 + ) + + cli += ' %s name %s ' % (command, name) + cli += 'cluster-node-1 %s cluster-node-2 %s ' % (cluster_node1, + cluster_node2) + if validate is True: + cli += ' validate ' + if validate is False: + cli += ' no-validate ' + + if command == 'cluster-delete': + + check_cli(module, cli) + + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='Cluster with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_connection_stats_settings.py b/plugins/modules/network/netvisor/pn_connection_stats_settings.py new file mode 100644 index 0000000000..c6cc882ffe --- /dev/null +++ b/plugins/modules/network/netvisor/pn_connection_stats_settings.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_connection_stats_settings +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify connection-stats-settings +description: + - This module can be used to modify the settings for collecting statistical + data about connections. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(update) to modify the + connection-stats-settings. + required: True + type: str + choices: ['update'] + pn_enable: + description: + - Enable or disable collecting connections statistics. + required: False + type: bool + pn_connection_backup_enable: + description: + - Enable backup for connection statistics collection. + required: False + type: bool + pn_client_server_stats_max_memory: + description: + - maximum memory for client server statistics. + required: False + type: str + pn_connection_stats_log_disk_space: + description: + - disk-space allocated for statistics (including rotated log files). + required: False + type: str + pn_client_server_stats_log_enable: + description: + - Enable or disable statistics. + required: False + type: bool + pn_service_stat_max_memory: + description: + - maximum memory allowed for service statistics. + required: False + type: str + pn_connection_stats_log_interval: + description: + - interval to collect statistics. + required: False + type: str + pn_fabric_connection_backup_interval: + description: + - backup interval for fabric connection statistics collection. + required: False + type: str + pn_connection_backup_interval: + description: + - backup interval for connection statistics collection. + required: False + type: str + pn_connection_stats_log_enable: + description: + - enable or disable statistics. + required: False + type: bool + pn_fabric_connection_max_memory: + description: + - maximum memory allowed for fabric connection statistics. + required: False + type: str + pn_fabric_connection_backup_enable: + description: + - enable backup for fabric connection statistics collection. + required: False + type: bool + pn_client_server_stats_log_disk_space: + description: + - disk-space allocated for statistics (including rotated log files). + required: False + type: str + pn_connection_max_memory: + description: + - maximum memory allowed for connection statistics. + required: False + type: str + pn_connection_stats_max_memory: + description: + - maximum memory allowed for connection statistics. + required: False + type: str + pn_client_server_stats_log_interval: + description: + - interval to collect statistics. + required: False + type: str +''' + +EXAMPLES = """ +- name: "Modify connection stats settings" + pn_connection_stats_settings: + pn_cliswitch: "sw01" + state: "update" + pn_enable: False + pn_fabric_connection_max_memory: "1000" + +- name: "Modify connection stats settings" + pn_connection_stats_settings: + pn_cliswitch: "sw01" + state: "update" + pn_enable: True +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the connection-stats-settings command. + returned: always + type: list +stderr: + description: set of error responses from the connection-stats-settings command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='connection-stats-settings-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_enable=dict(required=False, type='bool'), + pn_connection_backup_enable=dict(required=False, type='bool'), + pn_client_server_stats_max_memory=dict(required=False, type='str'), + pn_connection_stats_log_disk_space=dict(required=False, + type='str'), + pn_client_server_stats_log_enable=dict(required=False, + type='bool'), + pn_service_stat_max_memory=dict(required=False, type='str'), + pn_connection_stats_log_interval=dict(required=False, type='str'), + pn_fabric_connection_backup_interval=dict(required=False, + type='str'), + pn_connection_backup_interval=dict(required=False, type='str'), + pn_connection_stats_log_enable=dict(required=False, type='bool'), + pn_fabric_connection_max_memory=dict(required=False, type='str'), + pn_fabric_connection_backup_enable=dict(required=False, + type='bool'), + pn_client_server_stats_log_disk_space=dict(required=False, + type='str'), + pn_connection_max_memory=dict(required=False, type='str'), + pn_connection_stats_max_memory=dict(required=False, type='str'), + pn_client_server_stats_log_interval=dict(required=False, + type='str'), + ), + required_one_of=[['pn_enable', 'pn_connection_backup_enable', + 'pn_client_server_stats_max_memory', + 'pn_connection_stats_log_disk_space', + 'pn_client_server_stats_log_enable', + 'pn_service_stat_max_memory', + 'pn_connection_stats_log_interval', + 'pn_connection_backup_interval', + 'pn_connection_stats_log_enable', + 'pn_fabric_connection_max_memory', + 'pn_fabric_connection_backup_enable', + 'pn_client_server_stats_log_disk_space', + 'pn_connection_max_memory', + 'pn_connection_stats_max_memory', + 'pn_client_server_stats_log_interval']] + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + enable = module.params['pn_enable'] + connection_backup_enable = module.params['pn_connection_backup_enable'] + client_server_stats_max_memory = module.params['pn_client_server_stats_max_memory'] + connection_stats_log_disk_space = module.params['pn_connection_stats_log_disk_space'] + client_server_stats_log_enable = module.params['pn_client_server_stats_log_enable'] + service_stat_max_memory = module.params['pn_service_stat_max_memory'] + connection_stats_log_interval = module.params['pn_connection_stats_log_interval'] + fabric_connection_backup_interval = module.params['pn_fabric_connection_backup_interval'] + connection_backup_interval = module.params['pn_connection_backup_interval'] + connection_stats_log_enable = module.params['pn_connection_stats_log_enable'] + fabric_connection_max_memory = module.params['pn_fabric_connection_max_memory'] + fabric_connection_backup_enable = module.params['pn_fabric_connection_backup_enable'] + client_server_stats_log_disk_space = module.params['pn_client_server_stats_log_disk_space'] + connection_max_memory = module.params['pn_connection_max_memory'] + connection_stats_max_memory = module.params['pn_connection_stats_max_memory'] + client_server_stats_log_interval = module.params['pn_client_server_stats_log_interval'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'connection-stats-settings-modify': + cli += ' %s ' % command + + cli += booleanArgs(enable, 'enable', 'disable') + cli += booleanArgs(connection_backup_enable, 'connection-backup-enable', 'connection-backup-disable') + cli += booleanArgs(client_server_stats_log_enable, 'client-server-stats-log-enable', 'client-server-stats-log-disable') + cli += booleanArgs(connection_stats_log_enable, 'connection-stats-log-enable', 'connection-stats-log-disable') + cli += booleanArgs(fabric_connection_backup_enable, 'fabric-connection-backup-enable', 'fabric-connection-backup-disable') + + if client_server_stats_max_memory: + cli += ' client-server-stats-max-memory ' + client_server_stats_max_memory + if connection_stats_log_disk_space: + cli += ' connection-stats-log-disk-space ' + connection_stats_log_disk_space + if service_stat_max_memory: + cli += ' service-stat-max-memory ' + service_stat_max_memory + if connection_stats_log_interval: + cli += ' connection-stats-log-interval ' + connection_stats_log_interval + if fabric_connection_backup_interval: + cli += ' fabric-connection-backup-interval ' + fabric_connection_backup_interval + if connection_backup_interval: + cli += ' connection-backup-interval ' + connection_backup_interval + if fabric_connection_max_memory: + cli += ' fabric-connection-max-memory ' + fabric_connection_max_memory + if client_server_stats_log_disk_space: + cli += ' client-server-stats-log-disk-space ' + client_server_stats_log_disk_space + if connection_max_memory: + cli += ' connection-max-memory ' + connection_max_memory + if connection_stats_max_memory: + cli += ' connection-stats-max-memory ' + connection_stats_max_memory + if client_server_stats_log_interval: + cli += ' client-server-stats-log-interval ' + client_server_stats_log_interval + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_cpu_class.py b/plugins/modules/network/netvisor/pn_cpu_class.py new file mode 100644 index 0000000000..fadbed03e4 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_cpu_class.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_cpu_class +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/modify/delete cpu-class +description: + - This module can be used to create, modify and delete CPU class information. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(present) to create cpu-class and + C(absent) to delete cpu-class C(update) to modify the cpu-class. + required: True + type: str + choices: ['present', 'absent', 'update'] + pn_scope: + description: + - scope for CPU class. + required: false + choices: ['local', 'fabric'] + pn_hog_protect: + description: + - enable host-based hog protection. + required: False + type: str + choices: ['disable', 'enable', 'enable-and-drop'] + pn_rate_limit: + description: + - rate-limit for CPU class. + required: False + type: str + pn_name: + description: + - name for the CPU class. + required: False + type: str +''' + +EXAMPLES = """ +- name: create cpu class + pn_cpu_class: + pn_cliswitch: 'sw01' + state: 'present' + pn_name: 'icmp' + pn_rate_limit: '1000' + pn_scope: 'local' + +- name: delete cpu class + pn_cpu_class: + pn_cliswitch: 'sw01' + state: 'absent' + pn_name: 'icmp' + + +- name: modify cpu class + pn_cpu_class: + pn_cliswitch: 'sw01' + state: 'update' + pn_name: 'icmp' + pn_rate_limit: '2000' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the cpu-class command. + returned: always + type: list +stderr: + description: set of error responses from the cpu-class command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the cpu-class-show command. + If a user with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + clicopy = cli + + cli += ' system-settings-show format cpu-class-enable no-show-headers' + out = run_commands(module, cli)[1] + out = out.split() + + if 'on' not in out: + module.fail_json( + failed=True, + msg='Enable CPU class before creating or deleting' + ) + + cli = clicopy + cli += ' cpu-class-show format name no-show-headers' + out = run_commands(module, cli)[1] + if out: + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='cpu-class-create', + absent='cpu-class-delete', + update='cpu-class-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + pn_hog_protect=dict(required=False, type='str', + choices=['disable', 'enable', + 'enable-and-drop']), + pn_rate_limit=dict(required=False, type='str'), + pn_name=dict(required=False, type='str'), + ), + required_if=( + ['state', 'present', ['pn_name', 'pn_scope', 'pn_rate_limit']], + ['state', 'absent', ['pn_name']], + ['state', 'update', ['pn_name']], + ) + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + scope = module.params['pn_scope'] + hog_protect = module.params['pn_hog_protect'] + rate_limit = module.params['pn_rate_limit'] + name = module.params['pn_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, name) + + if command == 'cpu-class-modify': + if NAME_EXISTS is False: + module.fail_json( + failed=True, + msg='cpu class with name %s does not exist' % name + ) + + if command == 'cpu-class-delete': + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='cpu class with name %s does not exist' % name + ) + + if command == 'cpu-class-create': + if NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='cpu class with name %s already exists' % name + ) + if scope: + cli += ' scope %s ' % scope + + if command != 'cpu-class-delete': + if hog_protect: + cli += ' hog-protect %s ' % hog_protect + if rate_limit: + cli += ' rate-limit %s ' % rate_limit + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_cpu_mgmt_class.py b/plugins/modules/network/netvisor/pn_cpu_mgmt_class.py new file mode 100644 index 0000000000..891c0a6cf0 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_cpu_mgmt_class.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_cpu_mgmt_class +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify cpu-mgmt-class +description: + - This module can we used to update mgmt port ingress policers. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + type: str + required: false + state: + description: + - State the action to perform. Use C(update) to modify cpu-mgmt-class. + type: str + required: true + choices: ['update'] + pn_burst_size: + description: + - ingress traffic burst size (bytes) or default. + required: false + type: str + pn_name: + description: + - mgmt port ingress traffic class. + type: str + required: false + choices: ['arp', 'icmp', 'ssh', 'snmp', 'fabric', 'bcast', 'nfs', + 'web', 'web-ssl', 'net-api'] + pn_rate_limit: + description: + - ingress rate limit on mgmt port(bps) or unlimited. + type: str + required: false +''' + +EXAMPLES = """ +- name: cpu mgmt class modify ingress policers + pn_cpu_mgmt_class: + pn_cliswitch: "sw01" + state: "update" + pn_name: "icmp" + pn_rate_limit: "10000" + pn_burst_size: "14000" + +- name: cpu mgmt class modify ingress policers + pn_cpu_mgmt_class: + pn_cliswitch: "sw01" + state: "update" + pn_name: "snmp" + pn_burst_size: "8000" + pn_rate_limit: "100000" + +- name: cpu mgmt class modify ingress policers + pn_cpu_mgmt_class: + pn_cliswitch: "sw01" + state: "update" + pn_name: "web" + pn_rate_limit: "10000" + pn_burst_size: "1000" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the cpu-mgmt-class command. + returned: always + type: list +stderr: + description: set of error responses from the cpu-mgmt-class command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='cpu-mgmt-class-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', choices=state_map.keys()), + pn_burst_size=dict(required=False, type='str'), + pn_name=dict(required=False, type='str', + choices=['arp', 'icmp', 'ssh', 'snmp', + 'fabric', 'bcast', 'nfs', 'web', + 'web-ssl', 'net-api']), + pn_rate_limit=dict(required=False, type='str'), + ), + required_if=([['state', 'update', ['pn_name', 'pn_burst_size', 'pn_rate_limit']]]), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + burst_size = module.params['pn_burst_size'] + name = module.params['pn_name'] + rate_limit = module.params['pn_rate_limit'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'cpu-mgmt-class-modify': + cli += ' %s name %s ' % (command, name) + cli += ' burst-size %s rate-limit %s' % (burst_size, rate_limit) + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_dhcp_filter.py b/plugins/modules/network/netvisor/pn_dhcp_filter.py new file mode 100644 index 0000000000..e331720401 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_dhcp_filter.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_dhcp_filter +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/modify/delete dhcp-filter +description: + - This module can be used to create, delete and modify a DHCP filter config. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(present) to create dhcp-filter and + C(absent) to delete dhcp-filter C(update) to modify the dhcp-filter. + required: True + type: str + choices: ['present', 'absent', 'update'] + pn_trusted_ports: + description: + - trusted ports of dhcp config. + required: False + type: str + pn_name: + description: + - name of the DHCP filter. + required: false + type: str +''' + +EXAMPLES = """ +- name: dhcp filter create + pn_dhcp_filter: + pn_cliswitch: "sw01" + pn_name: "foo" + state: "present" + pn_trusted_ports: "1" + +- name: dhcp filter delete + pn_dhcp_filter: + pn_cliswitch: "sw01" + pn_name: "foo" + state: "absent" + pn_trusted_ports: "1" + +- name: dhcp filter modify + pn_dhcp_filter: + pn_cliswitch: "sw01" + pn_name: "foo" + state: "update" + pn_trusted_ports: "1,2" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the dhcp-filter command. + returned: always + type: list +stderr: + description: set of error responses from the dhcp-filter command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the dhcp-filter-show command. + If a user with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + user_name = module.params['pn_name'] + + cli += ' dhcp-filter-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if user_name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='dhcp-filter-create', + absent='dhcp-filter-delete', + update='dhcp-filter-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_trusted_ports=dict(required=False, type='str'), + pn_name=dict(required=False, type='str'), + ), + required_if=[ + ["state", "present", ["pn_name", "pn_trusted_ports"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name", "pn_trusted_ports"]] + ] + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + trusted_ports = module.params['pn_trusted_ports'] + name = module.params['pn_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + USER_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, name) + + if command == 'dhcp-filter-modify': + if USER_EXISTS is False: + module.fail_json( + failed=True, + msg='dhcp-filter with name %s does not exist' % name + ) + if command == 'dhcp-filter-delete': + if USER_EXISTS is False: + module.exit_json( + skipped=True, + msg='dhcp-filter with name %s does not exist' % name + ) + if command == 'dhcp-filter-create': + if USER_EXISTS is True: + module.exit_json( + skipped=True, + msg='dhcp-filter with name %s already exists' % name + ) + if command != 'dhcp-filter-delete': + if trusted_ports: + cli += ' trusted-ports ' + trusted_ports + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_dscp_map.py b/plugins/modules/network/netvisor/pn_dscp_map.py new file mode 100644 index 0000000000..dc86fdee21 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_dscp_map.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_dscp_map +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/delete dscp-map +description: + - This module can be used to create a DSCP priority mapping table. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(present) to create dscp-map and + C(absent) to delete. + required: True + type: str + choices: ["present", "absent"] + pn_name: + description: + - Name for the DSCP map. + required: False + type: str + pn_scope: + description: + - Scope for dscp map. + required: False + choices: ["local", "fabric"] +''' + +EXAMPLES = """ +- name: dscp map create + pn_dscp_map: + pn_cliswitch: "sw01" + state: "present" + pn_name: "foo" + pn_scope: "local" + +- name: dscp map delete + pn_dscp_map: + pn_cliswitch: "sw01" + state: "absent" + pn_name: "foo" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the dscp-map command. + returned: always + type: list +stderr: + description: set of error responses from the dscp-map command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the dscp-map-show name command. + If a user with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + + cli += ' dscp-map-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='dscp-map-create', + absent='dscp-map-delete' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_name=dict(required=False, type='str'), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + ), + required_if=( + ["state", "present", ["pn_name", "pn_scope"]], + ["state", "absent", ["pn_name"]], + ) + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + name = module.params['pn_name'] + scope = module.params['pn_scope'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, name) + + if command == 'dscp-map-delete': + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='dscp map with name %s does not exist' % name + ) + else: + if command == 'dscp-map-create': + if NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='dscp map with name %s already exists' % name + ) + + if scope: + cli += ' scope ' + scope + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_dscp_map_pri_map.py b/plugins/modules/network/netvisor/pn_dscp_map_pri_map.py new file mode 100644 index 0000000000..30fd6f1060 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_dscp_map_pri_map.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_dscp_map_pri_map +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify dscp-map-pri-map +description: + - This module can be used to update priority mappings in tables. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(update) to modify + the dscp-map-pri-map. + required: True + type: str + choices: ['update'] + pn_pri: + description: + - CoS priority. + required: False + type: str + pn_name: + description: + - Name for the DSCP map. + required: False + type: str + pn_dsmap: + description: + - DSCP value(s). + required: False + type: str +''' + +EXAMPLES = """ +- name: dscp map pri map modify + pn_dscp_map_pri_map: + pn_cliswitch: 'sw01' + state: 'update' + pn_name: 'foo' + pn_pri: '0' + pn_dsmap: '40' + +- name: dscp map pri map modify + pn_dscp_map_pri_map: + pn_cliswitch: 'sw01' + state: 'update' + pn_name: 'foo' + pn_pri: '1' + pn_dsmap: '8,10,12,14' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the dscp-map-pri-map command. + returned: always + type: list +stderr: + description: set of error responses from the dscp-map-pri-map command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the dscp-map-show name command. + If a user with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + + cli += ' dscp-map-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='dscp-map-pri-map-modify' + ) + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_pri=dict(required=False, type='str'), + pn_name=dict(required=False, type='str'), + pn_dsmap=dict(required=False, type='str'), + ), + required_if=( + ['state', 'update', ['pn_name', 'pn_pri']], + ) + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + pri = module.params['pn_pri'] + name = module.params['pn_name'] + dsmap = module.params['pn_dsmap'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module, cli) + + if command == 'dscp-map-pri-map-modify': + if NAME_EXISTS is False: + module.fail_json( + failed=True, + msg='Create dscp map with name %s before updating' % name + ) + cli += ' %s ' % command + if pri: + cli += ' pri ' + pri + if name: + cli += ' name ' + name + if dsmap: + cli += ' dsmap ' + dsmap + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_fabric_local.py b/plugins/modules/network/netvisor/pn_fabric_local.py new file mode 100644 index 0000000000..d8a6451232 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_fabric_local.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_fabric_local +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify fabric-local +description: + - This module can be used to modify fabric local information. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: true + type: str + state: + description: + - State the action to perform. Use C(update) to modify the fabric-local. + required: false + type: str + choices: ['update'] + default: 'update' + pn_fabric_network: + description: + - fabric administration network. + required: false + choices: ['in-band', 'mgmt', 'vmgmt'] + default: 'mgmt' + pn_vlan: + description: + - VLAN assigned to fabric. + required: false + type: str + pn_control_network: + description: + - control plane network. + required: false + choices: ['in-band', 'mgmt', 'vmgmt'] + pn_fabric_advertisement_network: + description: + - network to send fabric advertisements on. + required: false + choices: ['inband-mgmt', 'inband-only', 'inband-vmgmt', 'mgmt-only'] +''' + +EXAMPLES = """ +- name: Fabric local module + pn_fabric_local: + pn_cliswitch: "sw01" + pn_vlan: "500" + +- name: Fabric local module + pn_fabric_local: + pn_cliswitch: "sw01" + pn_fabric_advertisement_network: "mgmt-only" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the fabric-local command. + returned: always + type: list +stderr: + description: set of error responses from the fabric-local command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='fabric-local-modify' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=True, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='update'), + pn_fabric_network=dict(required=False, type='str', + choices=['mgmt', 'in-band', 'vmgmt'], default='mgmt'), + pn_vlan=dict(required=False, type='str'), + pn_control_network=dict(required=False, type='str', + choices=['in-band', 'mgmt', 'vmgmt']), + pn_fabric_advertisement_network=dict(required=False, type='str', + choices=['inband-mgmt', 'inband-only', 'inband-vmgmt', 'mgmt-only']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[['pn_fabric_network', 'pn_vlan', + 'pn_control_network', + 'pn_fabric_advertisement_network']], + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + fabric_network = module.params['pn_fabric_network'] + vlan = module.params['pn_vlan'] + control_network = module.params['pn_control_network'] + fabric_adv_network = module.params['pn_fabric_advertisement_network'] + + command = state_map[state] + + if vlan: + if int(vlan) < 1 or int(vlan) > 4092: + module.fail_json( + failed=True, + msg='Valid vlan range is 1 to 4092' + ) + cli = pn_cli(module, cliswitch) + cli += ' vlan-show format id no-show-headers' + out = run_commands(module, cli)[1].split() + + if vlan in out and vlan != '1': + module.fail_json( + failed=True, + msg='vlan %s is already in used. Specify unused vlan' % vlan + ) + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'fabric-local-modify': + cli += ' %s ' % command + + if fabric_network: + cli += ' fabric-network ' + fabric_network + + if vlan: + cli += ' vlan ' + vlan + + if control_network: + cli += ' control-network ' + control_network + + if fabric_adv_network: + cli += ' fabric-advertisement-network ' + fabric_adv_network + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_igmp_snooping.py b/plugins/modules/network/netvisor/pn_igmp_snooping.py new file mode 100644 index 0000000000..0f93ed08a3 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_igmp_snooping.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_igmp_snooping +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify igmp-snooping +description: + - This module can be used to modify Internet Group Management Protocol (IGMP) snooping. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(update) to modify the igmp-snooping. + required: True + type: str + choices: ['update'] + pn_enable: + description: + - enable or disable IGMP snooping. + required: False + type: bool + pn_query_interval: + description: + - IGMP query interval in seconds. + required: False + type: str + pn_igmpv2_vlans: + description: + - VLANs on which to use IGMPv2 protocol. + required: False + type: str + pn_igmpv3_vlans: + description: + - VLANs on which to use IGMPv3 protocol. + required: False + type: str + pn_enable_vlans: + description: + - enable per VLAN IGMP snooping. + required: False + type: str + pn_vxlan: + description: + - enable or disable IGMP snooping on vxlans. + required: False + type: bool + pn_query_max_response_time: + description: + - maximum response time, in seconds, advertised in IGMP queries. + required: False + type: str + pn_scope: + description: + - IGMP snooping scope - fabric or local. + required: False + choices: ['local', 'fabric'] + pn_no_snoop_linklocal_vlans: + description: + - Remove snooping of link-local groups(224.0.0.0/24) on these vlans. + required: False + type: str + pn_snoop_linklocal_vlans: + description: + - Allow snooping of link-local groups(224.0.0.0/24) on these vlans. + required: False + type: str +''' + +EXAMPLES = """ +- name: 'Modify IGMP Snooping' + pn_igmp_snooping: + pn_cliswitch: 'sw01' + state: 'update' + pn_vxlan: True + pn_enable_vlans: '1-399,401-4092' + pn_no_snoop_linklocal_vlans: 'none' + pn_igmpv3_vlans: '1-399,401-4092' + +- name: 'Modify IGMP Snooping' + pn_igmp_snooping: + pn_cliswitch: 'sw01' + state: 'update' + pn_vxlan: False + pn_enable_vlans: '1-399' + pn_no_snoop_linklocal_vlans: 'none' + pn_igmpv3_vlans: '1-399' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the igmp-snooping command. + returned: always + type: list +stderr: + description: set of error responses from the igmp-snooping command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='igmp-snooping-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_enable=dict(required=False, type='bool'), + pn_query_interval=dict(required=False, type='str'), + pn_igmpv2_vlans=dict(required=False, type='str'), + pn_igmpv3_vlans=dict(required=False, type='str'), + pn_enable_vlans=dict(required=False, type='str'), + pn_vxlan=dict(required=False, type='bool'), + pn_query_max_response_time=dict(required=False, type='str'), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + pn_no_snoop_linklocal_vlans=dict(required=False, type='str'), + pn_snoop_linklocal_vlans=dict(required=False, type='str'), + ), + required_one_of=[['pn_enable', 'pn_query_interval', + 'pn_igmpv2_vlans', + 'pn_igmpv3_vlans', + 'pn_enable_vlans', + 'pn_vxlan', + 'pn_query_max_response_time', + 'pn_scope', + 'pn_no_snoop_linklocal_vlans', + 'pn_snoop_linklocal_vlans']] + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + enable = module.params['pn_enable'] + query_interval = module.params['pn_query_interval'] + igmpv2_vlans = module.params['pn_igmpv2_vlans'] + igmpv3_vlans = module.params['pn_igmpv3_vlans'] + enable_vlans = module.params['pn_enable_vlans'] + vxlan = module.params['pn_vxlan'] + query_max_response_time = module.params['pn_query_max_response_time'] + scope = module.params['pn_scope'] + no_snoop_linklocal_vlans = module.params['pn_no_snoop_linklocal_vlans'] + snoop_linklocal_vlans = module.params['pn_snoop_linklocal_vlans'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'igmp-snooping-modify': + cli += ' %s ' % command + + cli += booleanArgs(enable, 'enable', 'disable') + cli += booleanArgs(vxlan, 'vxlan', 'no-vxlan') + + if query_interval: + cli += ' query-interval ' + query_interval + if igmpv2_vlans: + cli += ' igmpv2-vlans ' + igmpv2_vlans + if igmpv3_vlans: + cli += ' igmpv3-vlans ' + igmpv3_vlans + if enable_vlans: + cli += ' enable-vlans ' + enable_vlans + if query_max_response_time: + cli += ' query-max-response-time ' + query_max_response_time + if scope: + cli += ' scope ' + scope + if no_snoop_linklocal_vlans: + cli += ' no-snoop-linklocal-vlans ' + no_snoop_linklocal_vlans + if snoop_linklocal_vlans: + cli += ' snoop-linklocal-vlans ' + snoop_linklocal_vlans + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_ipv6security_raguard.py b/plugins/modules/network/netvisor/pn_ipv6security_raguard.py new file mode 100644 index 0000000000..32b68113fe --- /dev/null +++ b/plugins/modules/network/netvisor/pn_ipv6security_raguard.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_ipv6security_raguard +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/modify/delete ipv6security-raguard +description: + - This module can be used to add ipv6 RA Guard Policy, Update ipv6 RA guard Policy and Remove ipv6 RA Guard Policy. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - ipv6security-raguard configuration command. + required: false + choices: ['present', 'update', 'absent'] + type: str + default: 'present' + pn_device: + description: + - RA Guard Device. host or router. + required: false + choices: ['host', 'router'] + type: str + pn_access_list: + description: + - RA Guard Access List of Source IPs. + required: false + type: str + pn_prefix_list: + description: + - RA Guard Prefix List. + required: false + type: str + pn_router_priority: + description: + - RA Guard Router Priority. + required: false + type: str + choices: ['low', 'medium', 'high'] + pn_name: + description: + - RA Guard Policy Name. + required: true + type: str +''' + +EXAMPLES = """ +- name: ipv6 security ragurad create + pn_ipv6security_raguard: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_device: "host" + +- name: ipv6 security ragurad create + pn_ipv6security_raguard: + pn_cliswitch: "sw01" + pn_name: "foo1" + pn_device: "host" + pn_access_list: "sample" + pn_prefix_list: "sample" + pn_router_priority: "low" + +- name: ipv6 security ragurad modify + pn_ipv6security_raguard: + pn_cliswitch: "sw01" + pn_name: "foo1" + pn_device: "router" + pn_router_priority: "medium" + state: "update" + +- name: ipv6 security ragurad delete + pn_ipv6security_raguard: + pn_cliswitch: "sw01" + pn_name: "foo" + state: "absent" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the ipv6security-raguard command. + returned: always + type: list +stderr: + description: set of error responses from the ipv6security-raguard command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module): + """ + This method checks for idempotency using the ipv6security-raguard-show command. + If a name exists, return True if name exists else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + + cli = 'ipv6security-raguard-show format name parsable-delim ,' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if name in out else False + + +def check_list(module, list_name, command): + """ + This method checks for idempotency using provided command. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + + cli = '%s format name no-show-headers' % command + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + if list_name not in out: + module.fail_json( + failed=True, + msg='%s name %s does not exists' % (command, list_name) + ) + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='ipv6security-raguard-create', + absent='ipv6security-raguard-delete', + update='ipv6security-raguard-modify' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='present'), + pn_device=dict(required=False, type='str', choices=['host', 'router']), + pn_access_list=dict(required=False, type='str'), + pn_prefix_list=dict(required=False, type='str'), + pn_router_priority=dict(required=False, type='str', choices=['low', 'medium', 'high']), + pn_name=dict(required=True, type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ['pn_device']], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + device = module.params['pn_device'] + access_list = module.params['pn_access_list'] + prefix_list = module.params['pn_prefix_list'] + router_priority = module.params['pn_router_priority'] + name = module.params['pn_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module) + + if command == 'ipv6security-raguard-modify': + if not device and not access_list and not prefix_list and not router_priority: + module.fail_json( + failed=True, + msg='required one of device, access_list, prefix_list or router_priority' + ) + + if command == 'ipv6security-raguard-create': + if NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='ipv6 security raguard with name %s already exists' % name + ) + + if command != 'ipv6security-raguard-create': + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='ipv6 security raguard with name %s does not exist' % name + ) + + cli += ' %s name %s ' % (command, name) + if command != 'ipv6security-raguard-delete': + if device == 'router': + cli += ' device ' + device + if access_list: + check_list(module, access_list, 'access-list-show') + cli += ' access-list ' + access_list + if prefix_list: + check_list(module, prefix_list, 'prefix-list-show') + cli += ' prefix-list ' + prefix_list + if router_priority: + cli += ' router-priority ' + router_priority + if device == 'host': + cli += ' device ' + device + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_ipv6security_raguard_port.py b/plugins/modules/network/netvisor/pn_ipv6security_raguard_port.py new file mode 100644 index 0000000000..15f82306ef --- /dev/null +++ b/plugins/modules/network/netvisor/pn_ipv6security_raguard_port.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_ipv6security_raguard_port +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove ipv6security-raguard-port +description: + - This module can be used to add ports to RA Guard Policy and remove ports to RA Guard Policy. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - ipv6security-raguard-port configuration command. + required: false + type: str + choices: ['present', 'absent'] + default: 'present' + pn_name: + description: + - RA Guard Policy Name. + required: true + type: str + pn_ports: + description: + - Ports attached to RA Guard Policy. + required: true + type: str +''' + +EXAMPLES = """ +- name: ipv6 security raguard port add + pn_ipv6security_raguard_port: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_ports: "1" + +- name: ipv6 security raguard port remove + pn_ipv6security_raguard_port: + pn_cliswitch: "sw01" + pn_name: "foo" + state: "absent" + pn_ports: "1" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the ipv6security-raguard-port command. + returned: always + type: list +stderr: + description: set of error responses from the ipv6security-raguard-port command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module): + """ + This method checks for idempotency using the ipv6security-raguard-show command. + If a name exists, return True if name exists else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + + cli = 'ipv6security-raguard-show format name parsable-delim ,' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='ipv6security-raguard-port-add', + absent='ipv6security-raguard-port-remove' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='present'), + pn_name=dict(required=True, type='str'), + pn_ports=dict(required=True, type='str') + ) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + name = module.params['pn_name'] + ports = module.params['pn_ports'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module) + + if command: + if NAME_EXISTS is False: + module.fail_json( + failed=True, + msg='ipv6 security raguard with name %s does not exist to add ports' % name + ) + + cli += ' %s name %s ports %s' % (command, name, ports) + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_ipv6security_raguard_vlan.py b/plugins/modules/network/netvisor/pn_ipv6security_raguard_vlan.py new file mode 100644 index 0000000000..0f310d0c43 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_ipv6security_raguard_vlan.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_ipv6security_raguard_vlan +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove ipv6security-raguard-vlan +description: + - This module can be used to Add vlans to RA Guard Policy and Remove vlans to RA Guard Policy. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - ipv6security-raguard-vlan configuration command. + required: false + type: str + choices: ['present', 'absent'] + default: 'present' + pn_vlans: + description: + - Vlans attached to RA Guard Policy. + required: true + type: str + pn_name: + description: + - RA Guard Policy Name. + required: true + type: str +''' + +EXAMPLES = """ +- name: ipv6 security raguard vlan add + pn_ipv6security_raguard_vlan: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_vlans: "100-105" + +- name: ipv6 security raguard vlan add + pn_ipv6security_raguard_vlan: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_vlans: "100" + +- name: ipv6 security raguard vlan remove + pn_ipv6security_raguard_vlan: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_vlans: "100-105" + state: 'absent' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the ipv6security-raguard-vlan command. + returned: always + type: list +stderr: + description: set of error responses from the ipv6security-raguard-vlan command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the ipv6-security-reguard command. + If a name exists, return True if name exists else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + vlans = module.params['pn_vlans'] + show = cli + + cli += ' ipv6security-raguard-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + NAME_EXISTS = True if name in out else False + + show += ' vlan-show format id no-show-headers' + out = run_commands(module, show)[1] + if out: + out = out.split() + + if vlans and '-' in vlans: + vlan_list = list() + vlans = vlans.strip().split('-') + for vlan in range(int(vlans[0]), int(vlans[1]) + 1): + vlan_list.append(str(vlan)) + + for vlan in vlan_list: + if vlan not in out: + module.fail_json( + failed=True, + msg='vlan id %s does not exist. Make sure you create vlan before adding it' % vlan + ) + else: + if vlans not in out: + module.fail_json( + failed=True, + msg='vlan id %s does not exist. Make sure you create vlan before adding it' % vlans + ) + + return NAME_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='ipv6security-raguard-vlan-add', + absent='ipv6security-raguard-vlan-remove' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='present'), + pn_vlans=dict(required=True, type='str'), + pn_name=dict(required=True, type='str'), + ) + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + vlans = module.params['pn_vlans'] + name = module.params['pn_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module, cli) + + cli += ' %s name %s ' % (command, name) + + if command: + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='ipv6security raguard with name %s does not exist' % name + ) + if vlans: + cli += ' vlans ' + vlans + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_log_audit_exception.py b/plugins/modules/network/netvisor/pn_log_audit_exception.py new file mode 100644 index 0000000000..e4cd2ff666 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_log_audit_exception.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/license/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_log_audit_exception +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/delete an audit exception +description: + - This module can be used to create an audit exception and delete an audit exception. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + pn_audit_type: + description: + - Specify the type of audit exception. + required: false + type: str + choices: ['cli', 'shell', 'vtysh'] + state: + description: + - State the action to perform. Use 'present' to create audit-exception and + 'absent' to delete audit-exception. + required: false + type: str + choices: ['present', 'absent'] + default: 'present' + pn_pattern: + description: + - Specify a regular expression to match exceptions. + required: false + type: str + pn_scope: + description: + - scope - local or fabric. + required: false + type: str + choices: ['local', 'fabric'] + pn_access: + description: + - Specify the access type to match exceptions. + required: true + type: str + choices: ['any', 'read-only', 'read-write'] +''' + +EXAMPLES = """ +- name: create a log-audit-exception + pn_log_audit_exception: + pn_audit_type: "cli" + pn_pattern: "test" + state: "present" + pn_access: "any" + pn_scope: "local" + +- name: delete a log-audit-exception + pn_log_audit_exception: + pn_audit_type: "shell" + pn_pattern: "test" + state: "absent" + pn_access: "any" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the pn_log_audit_exceptions command. + returned: always + type: list +stderr: + description: set of error responses from the log_audit_exceptions command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the log-audit-exception command. + If a list with given name exists, return exists as True else False. + :param module: The Ansible module to fetch input parameters. + :return Booleans: True or False. + """ + state = module.params['state'] + audit_type = module.params['pn_audit_type'] + pattern = module.params['pn_pattern'] + access = module.params['pn_access'] + scope = module.params['pn_scope'] + cli += ' log-audit-exception-show' + cli += ' no-show-headers format ' + cli += ' type,pattern,access,scope parsable-delim DELIM' + + stdout = run_commands(module, cli)[1] + + if stdout: + linelist = stdout.strip().split('\n') + for line in linelist: + wordlist = line.split('DELIM') + count = 0 + + if wordlist[0] == audit_type: + count += 1 + if wordlist[1] == pattern: + count += 1 + if wordlist[2] == access: + count += 1 + if state == 'present' and wordlist[3] == scope: + count += 1 + elif state == 'absent' and count == 3: + return True + if state == 'present' and count == 4: + return True + + return False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='log-audit-exception-create', + absent='log-audit-exception-delete', + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + pn_pattern=dict(required=True, type='str'), + state=dict(required=False, type='str', + choices=state_map.keys(), default='present'), + pn_access=dict(required=True, type='str', choices=['any', 'read-only', 'read-write']), + pn_audit_type=dict(required=True, type='str', choices=['cli', 'shell', 'vtysh']), + pn_scope=dict(required=False, type='str', choices=['local', 'fabric']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ["pn_scope"]], + ), + ) + + # Accessing the arguments + + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + access = module.params['pn_access'] + audit_type = module.params['pn_audit_type'] + pattern = module.params['pn_pattern'] + scope = module.params['pn_scope'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + audit_log_exists = check_cli(module, cli) + + cli += ' %s %s pattern %s %s' % (command, audit_type, pattern, access) + + if state == 'absent': + if audit_log_exists is False: + module.exit_json( + skipped=True, + msg='This audit log exception entry does not exist' + ) + run_cli(module, cli, state_map) + + elif state == 'present': + if audit_log_exists is True: + module.exit_json( + skipped=True, + msg='This audit log exception entry already exists' + ) + cli += ' scope %s ' % scope + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_ospf.py b/plugins/modules/network/netvisor/pn_ospf.py new file mode 100644 index 0000000000..b388198174 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_ospf.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +""" PN-CLI vrouter-ospf-add/remove """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_ospf +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to add/remove ospf protocol to a vRouter. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vrouter-ospf-add, vrouter-ospf-remove command. + - This command adds/removes Open Shortest Path First(OSPF) routing + protocol to a virtual router(vRouter) service. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + default: 'local' + state: + description: + - Assert the state of the ospf. Use 'present' to add ospf + and 'absent' to remove ospf. + required: True + default: present + choices: ['present', 'absent'] + pn_vrouter_name: + description: + - Specify the name of the vRouter. + required: True + pn_network_ip: + description: + - Specify the network IP (IPv4 or IPv6) address. + required: True + pn_ospf_area: + description: + - Stub area number for the configuration. Required for vrouter-ospf-add. +''' + +EXAMPLES = """ +- name: "Add OSPF to vrouter" + pn_ospf: + state: present + pn_vrouter_name: name-string + pn_network_ip: 192.168.11.2/24 + pn_ospf_area: 1.0.0.0 + +- name: "Remove OSPF from vrouter" + pn_ospf: + state: absent + pn_vrouter_name: name-string +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the ospf command. + returned: always + type: list +stderr: + description: The set of error responses from the ospf command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +VROUTER_EXISTS = None +NETWORK_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-ospf-show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If an OSPF network with the given ip exists on the given vRouter, + return NETWORK_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, NETWORK_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + network_ip = module.params['pn_network_ip'] + # Global flags + global VROUTER_EXISTS, NETWORK_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for OSPF networks + show = cli + ' vrouter-ospf-show vrouter-name %s ' % vrouter_name + show += 'format network no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if network_ip in out: + NETWORK_EXISTS = True + else: + NETWORK_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + cmd = shlex.split(cli) + + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-ospf-add' + if state == 'absent': + command = 'vrouter-ospf-remove' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(type='str', default='present', choices=['present', + 'absent']), + pn_vrouter_name=dict(required=True, type='str'), + pn_network_ip=dict(required=True, type='str'), + pn_ospf_area=dict(type='str') + ), + required_if=( + ['state', 'present', + ['pn_network_ip', 'pn_ospf_area']], + ['state', 'absent', ['pn_network_ip']] + ) + ) + + # Accessing the arguments + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + network_ip = module.params['pn_network_ip'] + ospf_area = module.params['pn_ospf_area'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + check_cli(module, cli) + + if state == 'present': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NETWORK_EXISTS is True: + module.exit_json( + skipped=True, + msg=('OSPF with network ip %s already exists on %s' + % (network_ip, vrouter_name)) + ) + cli += (' %s vrouter-name %s network %s ospf-area %s' + % (command, vrouter_name, network_ip, ospf_area)) + + if state == 'absent': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NETWORK_EXISTS is False: + module.exit_json( + skipped=True, + msg=('OSPF with network ip %s already exists on %s' + % (network_ip, vrouter_name)) + ) + cli += (' %s vrouter-name %s network %s' + % (command, vrouter_name, network_ip)) + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_ospfarea.py b/plugins/modules/network/netvisor/pn_ospfarea.py new file mode 100644 index 0000000000..4a1d850a07 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_ospfarea.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +""" PN-CLI vrouter-ospf-add/remove """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_ospfarea +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to add/remove ospf area to/from a vrouter. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vrouter-ospf-add, vrouter-ospf-remove command. + - This command adds/removes Open Shortest Path First(OSPF) area to/from + a virtual router(vRouter) service. +options: + pn_cliusername: + description: + - Login username. + required: true + pn_clipassword: + description: + - Login password. + required: true + pn_cliswitch: + description: + - Target switch(es) to run the CLI on. + required: False + state: + description: + - State the action to perform. Use 'present' to add ospf-area, 'absent' + to remove ospf-area and 'update' to modify ospf-area. + required: true + choices: ['present', 'absent', 'update'] + pn_vrouter_name: + description: + - Specify the name of the vRouter. + required: true + pn_ospf_area: + description: + - Specify the OSPF area number. + required: true + pn_stub_type: + description: + - Specify the OSPF stub type. + choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary'] + pn_prefix_listin: + description: + - OSPF prefix list for filtering incoming packets. + pn_prefix_listout: + description: + - OSPF prefix list for filtering outgoing packets. + pn_quiet: + description: + - Enable/disable system information. + required: false + type: bool + default: true +''' + +EXAMPLES = """ +- name: "Add OSPF area to vrouter" + pn_ospfarea: + state: present + pn_cliusername: admin + pn_clipassword: admin + pn_ospf_area: 1.0.0.0 + pn_stub_type: stub + +- name: "Remove OSPF from vrouter" + pn_ospf: + state: absent + pn_cliusername: admin + pn_clipassword: admin + pn_vrouter_name: name-string + pn_ospf_area: 1.0.0.0 +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the ospf command. + returned: always + type: list +stderr: + description: The set of error responses from the ospf command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-ospf-area-add' + if state == 'absent': + command = 'vrouter-ospf-area-remove' + if state == 'update': + command = 'vrouter-ospf-area-modify' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=True, type='str'), + pn_clipassword=dict(required=True, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_vrouter_name=dict(required=True, type='str'), + pn_ospf_area=dict(required=True, type='str'), + pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa', + 'stub-no-summary', + 'nssa-no-summary']), + pn_prefix_listin=dict(type='str'), + pn_prefix_listout=dict(type='str'), + pn_quiet=dict(type='bool', default='True') + ) + ) + + # Accessing the arguments + cliusername = module.params['pn_cliusername'] + clipassword = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + ospf_area = module.params['pn_ospf_area'] + stub_type = module.params['pn_stub_type'] + prefix_listin = module.params['pn_prefix_listin'] + prefix_listout = module.params['pn_prefix_listout'] + quiet = module.params['pn_quiet'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = '/usr/bin/cli' + + if quiet is True: + cli += ' --quiet ' + + cli += ' --user %s:%s ' % (cliusername, clipassword) + + if cliswitch: + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + + cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area) + + if stub_type: + cli += ' stub-type ' + stub_type + + if prefix_listin: + cli += ' prefix-list-in ' + prefix_listin + + if prefix_listout: + cli += ' prefix-list-out ' + prefix_listout + + # Run the CLI command + ospfcommand = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(ospfcommand) + + # Response in JSON format + if result != 0: + module.exit_json( + command=cli, + stderr=err.rstrip("\r\n"), + changed=False + ) + + else: + module.exit_json( + command=cli, + stdout=out.rstrip("\r\n"), + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_port_config.py b/plugins/modules/network/netvisor/pn_port_config.py new file mode 100644 index 0000000000..ecab7cbcc6 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_port_config.py @@ -0,0 +1,382 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_port_config +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify port-config +description: + - This module can be used to modify a port configuration. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(update) to modify the port-config. + required: True + type: str + choices: ['update'] + pn_intf: + description: + - physical interface. + required: False + type: str + pn_crc_check_enable: + description: + - CRC check on ingress and rewrite on egress. + required: False + type: bool + pn_dscp_map: + description: + - DSCP map name to enable on port. + required: False + type: str + pn_autoneg: + description: + - physical port autonegotiation. + required: False + type: bool + pn_speed: + description: + - physical port speed. + required: False + choices: ['disable', '10m', '100m', '1g', + '2.5g', '10g', '25g', '40g', '50g', '100g'] + pn_port: + description: + - physical port. + required: False + type: str + pn_vxlan_termination: + description: + - physical port vxlan termination setting. + required: False + type: bool + pn_pause: + description: + - physical port pause. + required: False + type: bool + pn_loopback: + description: + - physical port loopback. + required: False + type: bool + pn_loop_vlans: + description: + - looping vlans. + required: False + type: str + pn_routing: + description: + - routing. + required: False + type: bool + pn_edge_switch: + description: + - physical port edge switch. + required: False + type: bool + pn_enable: + description: + - physical port enable. + required: False + type: bool + pn_description: + description: + - physical port description. + required: False + type: str + pn_host_enable: + description: + - Host facing port control setting. + required: False + type: bool + pn_allowed_tpid: + description: + - Allowed TPID in addition to 0x8100 on Vlan header. + required: False + type: str + choices: ['vlan', 'q-in-q', 'q-in-q-old'] + pn_mirror_only: + description: + - physical port mirror only. + required: False + type: bool + pn_reflect: + description: + - physical port reflection. + required: False + type: bool + pn_jumbo: + description: + - jumbo frames on physical port. + required: False + type: bool + pn_egress_rate_limit: + description: + - max egress port data rate limit. + required: False + type: str + pn_eth_mode: + description: + - physical Ethernet mode. + required: False + choices: ['1000base-x', 'sgmii', 'disabled', 'GMII'] + pn_fabric_guard: + description: + - Fabric guard configuration. + required: False + type: bool + pn_local_switching: + description: + - no-local-switching port cannot bridge traffic to + another no-local-switching port. + required: False + type: bool + pn_lacp_priority: + description: + - LACP priority from 1 to 65535. + required: False + type: str + pn_send_port: + description: + - send port. + required: False + type: str + pn_port_mac_address: + description: + - physical port MAC Address. + required: False + type: str + pn_defer_bringup: + description: + - defer port bringup. + required: False + type: bool +''' + +EXAMPLES = """ +- name: port config modify + pn_port_config: + pn_cliswitch: "sw01" + state: "update" + pn_port: "all" + pn_dscp_map: "foo" + +- name: port config modify + pn_port_config: + pn_cliswitch: "sw01" + state: "update" + pn_port: "all" + pn_host_enable: true +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the port-config command. + returned: always + type: list +stderr: + description: set of error responses from the port-config command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the dscp-map-show name command. + If a user with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_dscp_map'] + + cli += ' dscp-map-show name %s format name no-show-headers' % name + out = run_commands(module, cli)[1] + + out = out.split() + + return True if name in out[-1] else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='port-config-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=['update']), + pn_intf=dict(required=False, type='str'), + pn_crc_check_enable=dict(required=False, type='bool'), + pn_dscp_map=dict(required=False, type='str'), + pn_autoneg=dict(required=False, type='bool'), + pn_speed=dict(required=False, type='str', + choices=['disable', '10m', '100m', + '1g', '2.5g', '10g', '25g', + '40g', '50g', '100g']), + pn_port=dict(required=False, type='str'), + pn_vxlan_termination=dict(required=False, type='bool'), + pn_pause=dict(required=False, type='bool'), + pn_loopback=dict(required=False, type='bool'), + pn_loop_vlans=dict(required=False, type='str'), + pn_routing=dict(required=False, type='bool'), + pn_edge_switch=dict(required=False, type='bool'), + pn_enable=dict(required=False, type='bool'), + pn_description=dict(required=False, type='str'), + pn_host_enable=dict(required=False, type='bool'), + pn_allowed_tpid=dict(required=False, type='str', + choices=['vlan', 'q-in-q', 'q-in-q-old']), + pn_mirror_only=dict(required=False, type='bool'), + pn_reflect=dict(required=False, type='bool'), + pn_jumbo=dict(required=False, type='bool'), + pn_egress_rate_limit=dict(required=False, type='str'), + pn_eth_mode=dict(required=False, type='str', + choices=['1000base-x', 'sgmii', + 'disabled', 'GMII']), + pn_fabric_guard=dict(required=False, type='bool'), + pn_local_switching=dict(required=False, type='bool'), + pn_lacp_priority=dict(required=False, type='str'), + pn_send_port=dict(required=False, type='str'), + pn_port_mac_address=dict(required=False, type='str'), + pn_defer_bringup=dict(required=False, type='bool'), + ), + required_if=( + ['state', 'update', ['pn_port']], + ), + required_one_of=[['pn_intf', 'pn_crc_check_enable', 'pn_dscp_map', + 'pn_speed', 'pn_autoneg', + 'pn_vxlan_termination', 'pn_pause', + 'pn_fec', 'pn_loopback', 'pn_loop_vlans', + 'pn_routing', 'pn_edge_switch', + 'pn_enable', 'pn_description', + 'pn_host_enable', 'pn_allowed_tpid', + 'pn_mirror_only', 'pn_reflect', + 'pn_jumbo', 'pn_egress_rate_limit', + 'pn_eth_mode', 'pn_fabric_guard', + 'pn_local_switching', 'pn_lacp_priority', + 'pn_send_port', 'pn_port_mac_address', + 'pn_defer_bringup']], + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + intf = module.params['pn_intf'] + crc_check_enable = module.params['pn_crc_check_enable'] + dscp_map = module.params['pn_dscp_map'] + autoneg = module.params['pn_autoneg'] + speed = module.params['pn_speed'] + port = module.params['pn_port'] + vxlan_termination = module.params['pn_vxlan_termination'] + pause = module.params['pn_pause'] + loopback = module.params['pn_loopback'] + loop_vlans = module.params['pn_loop_vlans'] + routing = module.params['pn_routing'] + edge_switch = module.params['pn_edge_switch'] + enable = module.params['pn_enable'] + description = module.params['pn_description'] + host_enable = module.params['pn_host_enable'] + allowed_tpid = module.params['pn_allowed_tpid'] + mirror_only = module.params['pn_mirror_only'] + reflect = module.params['pn_reflect'] + jumbo = module.params['pn_jumbo'] + egress_rate_limit = module.params['pn_egress_rate_limit'] + eth_mode = module.params['pn_eth_mode'] + fabric_guard = module.params['pn_fabric_guard'] + local_switching = module.params['pn_local_switching'] + lacp_priority = module.params['pn_lacp_priority'] + send_port = module.params['pn_send_port'] + port_mac_address = module.params['pn_port_mac_address'] + defer_bringup = module.params['pn_defer_bringup'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if dscp_map: + NAME_EXISTS = check_cli(module, cli) + + if command == 'port-config-modify': + cli += ' %s ' % command + if dscp_map: + if NAME_EXISTS is False: + module.fail_json( + failed=True, + msg='Create dscp map with name %s before updating' % dscp_map + ) + + cli += ' dscp-map ' + dscp_map + if intf: + cli += ' intf ' + intf + if speed: + cli += ' speed ' + speed + if port: + cli += ' port ' + port + if allowed_tpid: + cli += ' allowed-tpid ' + allowed_tpid + if egress_rate_limit: + cli += ' egress-rate-limit ' + egress_rate_limit + if eth_mode: + cli += ' eth-mode ' + eth_mode + if lacp_priority: + cli += ' lacp-priority ' + lacp_priority + if send_port: + cli += ' send-port ' + send_port + if port_mac_address: + cli += ' port-mac-address ' + port_mac_address + + cli += booleanArgs(crc_check_enable, 'crc-check-enable', 'crc-check-disable') + cli += booleanArgs(autoneg, 'autoneg', 'no-autoneg') + cli += booleanArgs(vxlan_termination, 'vxlan-termination', 'no-vxlan-termination') + cli += booleanArgs(pause, 'pause', 'no-pause') + cli += booleanArgs(loopback, 'loopback', 'no-loopback') + cli += booleanArgs(routing, 'routing', 'no-routing') + cli += booleanArgs(edge_switch, 'edge-switch', 'no-edge-switch') + cli += booleanArgs(enable, 'enable', 'disable') + cli += booleanArgs(host_enable, 'host-enable', 'host-disable') + cli += booleanArgs(mirror_only, 'mirror-only', 'no-mirror-receive-only') + cli += booleanArgs(reflect, 'reflect', 'no-reflect') + cli += booleanArgs(jumbo, 'jumbo', 'no-jumbo') + cli += booleanArgs(fabric_guard, 'fabric-guard', 'no-fabric-guard') + cli += booleanArgs(local_switching, 'local-switching', 'no-local-switching') + cli += booleanArgs(defer_bringup, 'defer-bringup', 'no-defer-bringup') + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_port_cos_bw.py b/plugins/modules/network/netvisor/pn_port_cos_bw.py new file mode 100644 index 0000000000..f24d223fac --- /dev/null +++ b/plugins/modules/network/netvisor/pn_port_cos_bw.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_port_cos_bw +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify port-cos-bw +description: + - This module can be used to update bw settings for CoS queues. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + type: str + state: + description: + - State the action to perform. Use C(update) to modify the port-cos-bw. + required: True + type: str + choices: ['update'] + pn_max_bw_limit: + description: + - Maximum b/w in percentage. + required: False + type: str + pn_cos: + description: + - CoS priority. + required: False + type: str + pn_port: + description: + - physical port number. + required: False + type: str + pn_weight: + description: + - Scheduling weight (1 to 127) after b/w guarantee met. + required: False + type: str + choices: ['priority', 'no-priority'] + pn_min_bw_guarantee: + description: + - Minimum b/w in percentage. + required: False + type: str +''' + +EXAMPLES = """ +- name: port cos bw modify + pn_port_cos_bw: + pn_cliswitch: "sw01" + state: "update" + pn_port: "1" + pn_cos: "0" + pn_min_bw_guarantee: "60" + +- name: port cos bw modify + pn_port_cos_bw: + pn_cliswitch: "sw01" + state: "update" + pn_port: "all" + pn_cos: "0" + pn_weight: "priority" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the port-cos-bw command. + returned: always + type: list +stderr: + description: set of error responses from the port-cos-bw command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='port-cos-bw-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_max_bw_limit=dict(required=False, type='str'), + pn_cos=dict(required=False, type='str'), + pn_port=dict(required=False, type='str'), + pn_weight=dict(required=False, type='str', + choices=['priority', 'no-priority']), + pn_min_bw_guarantee=dict(required=False, type='str'), + ), + required_if=( + ['state', 'update', ['pn_cos', 'pn_port']], + ), + required_one_of=[['pn_max_bw_limit', 'pn_min_bw_guarantee', 'pn_weight']], + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + max_bw_limit = module.params['pn_max_bw_limit'] + cos = module.params['pn_cos'] + port = module.params['pn_port'] + weight = module.params['pn_weight'] + min_bw_guarantee = module.params['pn_min_bw_guarantee'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'port-cos-bw-modify': + cli += ' %s ' % command + if max_bw_limit: + cli += ' max-bw-limit ' + max_bw_limit + if cos: + cli += ' cos ' + cos + if port: + cli += ' port ' + port + if weight: + cli += ' weight ' + weight + if min_bw_guarantee: + cli += ' min-bw-guarantee ' + min_bw_guarantee + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_port_cos_rate_setting.py b/plugins/modules/network/netvisor/pn_port_cos_rate_setting.py new file mode 100644 index 0000000000..0a111dfe51 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_port_cos_rate_setting.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_port_cos_rate_setting +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify port-cos-rate-setting +description: + - This modules can be used to update the port cos rate limit. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(update) to modify + the port-cos-rate-setting. + required: true + type: str + choices: ['update'] + pn_cos0_rate: + description: + - cos0 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_cos1_rate: + description: + - cos1 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_cos2_rate: + description: + - cos2 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_cos3_rate: + description: + - cos3 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_cos4_rate: + description: + - cos4 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_cos5_rate: + description: + - cos5 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_cos6_rate: + description: + - cos6 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_cos7_rate: + description: + - cos7 rate limit (pps) unlimited or 0 to 10000000. + required: false + type: str + pn_port: + description: + - port. + required: false + type: str + choices: ['control-port', 'data-port', 'span-ports'] +''' + +EXAMPLES = """ +- name: port cos rate modify + pn_port_cos_rate_setting: + pn_cliswitch: "sw01" + state: "update" + pn_port: "control-port" + pn_cos1_rate: "1000" + pn_cos5_rate: "1000" + pn_cos2_rate: "1000" + pn_cos0_rate: "1000" + +- name: port cos rate modify + pn_port_cos_rate_setting: + pn_cliswitch: "sw01" + state: "update" + pn_port: "data-port" + pn_cos1_rate: "2000" + pn_cos5_rate: "2000" + pn_cos2_rate: "2000" + pn_cos0_rate: "2000" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the port-cos-rate-setting command. + returned: always + type: list +stderr: + description: set of error responses from the port-cos-rate-setting command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='port-cos-rate-setting-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_cos1_rate=dict(required=False, type='str'), + pn_cos5_rate=dict(required=False, type='str'), + pn_cos2_rate=dict(required=False, type='str'), + pn_cos0_rate=dict(required=False, type='str'), + pn_cos6_rate=dict(required=False, type='str'), + pn_cos3_rate=dict(required=False, type='str'), + pn_cos4_rate=dict(required=False, type='str'), + pn_cos7_rate=dict(required=False, type='str'), + pn_port=dict(required=False, type='str', + choices=['control-port', 'data-port', 'span-ports']), + ), + required_if=( + ['state', 'update', ['pn_port']], + ), + required_one_of=[['pn_cos0_rate', + 'pn_cos1_rate', + 'pn_cos2_rate', + 'pn_cos3_rate', + 'pn_cos4_rate', + 'pn_cos5_rate', + 'pn_cos6_rate', + 'pn_cos7_rate']], + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + cos1_rate = module.params['pn_cos1_rate'] + cos5_rate = module.params['pn_cos5_rate'] + cos2_rate = module.params['pn_cos2_rate'] + cos0_rate = module.params['pn_cos0_rate'] + cos6_rate = module.params['pn_cos6_rate'] + cos3_rate = module.params['pn_cos3_rate'] + cos4_rate = module.params['pn_cos4_rate'] + cos7_rate = module.params['pn_cos7_rate'] + port = module.params['pn_port'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'port-cos-rate-setting-modify': + cli += ' %s ' % command + if cos1_rate: + cli += ' cos1-rate ' + cos1_rate + if cos5_rate: + cli += ' cos5-rate ' + cos5_rate + if cos2_rate: + cli += ' cos2-rate ' + cos2_rate + if cos0_rate: + cli += ' cos0-rate ' + cos0_rate + if cos6_rate: + cli += ' cos6-rate ' + cos6_rate + if cos3_rate: + cli += ' cos3-rate ' + cos3_rate + if cos4_rate: + cli += ' cos4-rate ' + cos4_rate + if cos7_rate: + cli += ' cos7-rate ' + cos7_rate + if port: + cli += ' port ' + port + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_prefix_list.py b/plugins/modules/network/netvisor/pn_prefix_list.py new file mode 100644 index 0000000000..9623a43cfe --- /dev/null +++ b/plugins/modules/network/netvisor/pn_prefix_list.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_prefix_list +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/delete prefix-list +description: + - This module can be used to create or delete prefix list. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to create prefix-list and + C(absent) to delete prefix-list. + required: false + type: str + choices: ['present', 'absent'] + default: 'present' + pn_name: + description: + - Prefix List Name. + required: true + type: str + pn_scope: + description: + - scope of prefix-list. + required: false + type: str + choices: ['local', 'fabric'] +''' + +EXAMPLES = """ +- name: Create prefix list + pn_prefix_list: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_scope: "local" + state: "present" + +- name: Delete prefix list + pn_prefix_list: + pn_cliswitch: "sw01" + pn_name: "foo" + state: "absent" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the prefix-list command. + returned: always + type: list +stderr: + description: set of error responses from the prefix-list command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the prefix-list-show command. + If a name exists, return True if name exists else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + + cli += ' prefix-list-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='prefix-list-create', + absent='prefix-list-delete' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', + choices=state_map.keys(), default='present'), + pn_name=dict(required=True, type='str'), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ["pn_name", "pn_scope"]], + ["state", "absent", ["pn_name"]], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + name = module.params['pn_name'] + scope = module.params['pn_scope'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module, cli) + + cli += ' %s name %s ' % (command, name) + + if command == 'prefix-list-delete': + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='prefix-list with name %s does not exist' % name + ) + else: + if command == 'prefix-list-create': + if NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='prefix list with name %s already exists' % name + ) + cli += ' scope %s ' % scope + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_prefix_list_network.py b/plugins/modules/network/netvisor/pn_prefix_list_network.py new file mode 100644 index 0000000000..b61815b21e --- /dev/null +++ b/plugins/modules/network/netvisor/pn_prefix_list_network.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_prefix_list_network +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove prefix-list-network +description: + - This module is used to add network associated with prefix list + and remove networks associated with prefix list. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to create + prefix-list-network and C(absent) to delete prefix-list-network. + required: true + type: str + choices: ['present', 'absent'] + pn_netmask: + description: + - netmask of the network associated the prefix list. + required: false + type: str + pn_name: + description: + - Prefix List Name. + required: false + type: str + pn_network: + description: + - network associated with the prefix list. + required: false + type: str +''' + +EXAMPLES = """ +- name: prefix list network add + pn_prefix_list_network: + pn_cliswitch: "sw01" + pn_name: "foo" + pn_network: "172.16.3.1" + pn_netmask: "24" + state: "present" + +- name: prefix list network remove + pn_prefix_list_network: + pn_cliswitch: "sw01" + state: "absent" + pn_name: "foo" + pn_network: "172.16.3.1" + pn_netmask: "24" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the prefix-list-network command. + returned: always + type: list +stderr: + description: set of error responses from the prefix-list-network command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using prefix-list-network-show command. + If network exists, return as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + network = module.params['pn_network'] + show = cli + + cli += ' prefix-list-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if name not in out.split()[-1]: + module.fail_json( + failed=True, + msg='Prefix list with name %s does not exists' % name + ) + + cli = show + cli += ' prefix-list-network-show name %s format network no-show-headers' % name + rc, out, err = run_commands(module, cli) + + if out: + out = out.split()[-1] + return True if network in out.split('/')[0] else False + + return False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='prefix-list-network-add', + absent='prefix-list-network-remove' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_netmask=dict(required=False, type='str'), + pn_name=dict(required=False, type='str'), + pn_network=dict(required=False, type='str'), + ), + required_if=( + ["state", "present", ["pn_name", "pn_network", "pn_netmask"]], + ["state", "absent", ["pn_name", "pn_network", "pn_netmask"]], + ), + required_together=( + ["pn_network", "pn_netmask"], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + netmask = module.params['pn_netmask'] + name = module.params['pn_name'] + network = module.params['pn_network'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NETWORK_EXISTS = check_cli(module, cli) + cli += ' %s ' % command + + if command == 'prefix-list-network-remove': + if NETWORK_EXISTS is False: + module.exit_json( + skipped=True, + msg='Prefix list with network %s does not exist' % network + ) + + if command == 'prefix-list-network-add': + if NETWORK_EXISTS is True: + module.exit_json( + skipped=True, + msg='Prefix list with network %s already exists' % network + ) + + if name: + cli += ' name ' + name + if network: + cli += ' network ' + network + if netmask: + cli += ' netmask ' + netmask + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_role.py b/plugins/modules/network/netvisor/pn_role.py new file mode 100644 index 0000000000..e4687ee42d --- /dev/null +++ b/plugins/modules/network/netvisor/pn_role.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_role +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/delete/modify role +description: + - This module can be used to create, delete and modify user roles. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to create role and + C(absent) to delete role and C(update) to modify role. + required: true + type: str + choices: ['present', 'absent', 'update'] + pn_scope: + description: + - local or fabric. + required: false + type: str + choices: ['local', 'fabric'] + pn_access: + description: + - type of access. + required: false + type: str + choices: ['read-only', 'read-write'] + pn_shell: + description: + - allow shell command. + required: false + type: bool + pn_sudo: + description: + - allow sudo from shell. + required: false + type: bool + pn_running_config: + description: + - display running configuration of switch. + required: false + type: bool + pn_name: + description: + - role name. + required: true + type: str + pn_delete_from_users: + description: + - delete from users. + required: false + type: bool +''' + +EXAMPLES = """ +- name: Role create + pn_role: + pn_cliswitch: 'sw01' + state: 'present' + pn_name: 'foo' + pn_scope: 'local' + pn_access: 'read-only' + +- name: Role delete + pn_role: + pn_cliswitch: 'sw01' + state: 'absent' + pn_name: 'foo' + +- name: Role modify + pn_role: + pn_cliswitch: 'sw01' + state: 'update' + pn_name: 'foo' + pn_access: 'read-write' + pn_sudo: true + pn_shell: true +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the role command. + returned: always + type: list +stderr: + description: set of error responses from the role command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the role-show command. + If a role with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + role_name = module.params['pn_name'] + + cli += ' role-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if role_name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='role-create', + absent='role-delete', + update='role-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + pn_access=dict(required=False, type='str', + choices=['read-only', 'read-write']), + pn_shell=dict(required=False, type='bool'), + pn_sudo=dict(required=False, type='bool'), + pn_running_config=dict(required=False, type='bool'), + pn_name=dict(required=False, type='str'), + pn_delete_from_users=dict(required=False, type='bool'), + ), + required_if=( + ["state", "present", ["pn_name", "pn_scope"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name"]], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + scope = module.params['pn_scope'] + access = module.params['pn_access'] + shell = module.params['pn_shell'] + sudo = module.params['pn_sudo'] + running_config = module.params['pn_running_config'] + name = module.params['pn_name'] + delete_from_users = module.params['pn_delete_from_users'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + ROLE_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, name) + + if shell is (False or '') and sudo is True: + module.fail_json( + failed=True, + msg='sudo access requires shell access' + ) + + if command == 'role-modify': + if ROLE_EXISTS is False: + module.fail_json( + failed=True, + msg='Role with name %s does not exist' % name + ) + + if command == 'role-delete': + if ROLE_EXISTS is False: + module.exit_json( + skipped=True, + msg='Role with name %s does not exist' % name + ) + + if command == 'role-create': + if ROLE_EXISTS is True: + module.exit_json( + skipped=True, + msg='Role with name %s already exists' % name + ) + + if scope: + cli += ' scope ' + scope + + if command != 'role-delete': + if access: + cli += ' access ' + access + + cli += booleanArgs(shell, 'shell', 'no-shell') + cli += booleanArgs(sudo, 'sudo', 'no-sudo') + cli += booleanArgs(running_config, 'running-config', 'no-running-config') + + if command == 'role-modify': + if delete_from_users: + cli += ' delete-from-users ' + delete_from_users + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_show.py b/plugins/modules/network/netvisor/pn_show.py new file mode 100644 index 0000000000..bcc3deae5a --- /dev/null +++ b/plugins/modules/network/netvisor/pn_show.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +""" PN CLI show commands """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_show +author: "Pluribus Networks (@amitsi)" +short_description: Run show commands on nvOS device. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute show command in the nodes and returns the results + read from the device. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + pn_command: + description: + - The C(pn_command) takes a CLI show command as value. + required: true + pn_parameters: + description: + - Display output using a specific parameter. Use 'all' to display + possible output. List of comma separated parameters. + default: 'all' + pn_options: + description: + - Specify formatting options. +''' + +EXAMPLES = """ +- name: run the vlan-show command + pn_show: + pn_command: 'vlan-show' + pn_parameters: id,scope,ports + pn_options: 'layout vertical' + +- name: run the vlag-show command + pn_show: + pn_command: 'vlag-show' + pn_parameters: 'id,name,cluster,mode' + pn_options: 'no-show-headers' + +- name: run the cluster-show command + pn_show: + pn_command: 'cluster-show' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the show command. + returned: always + type: list +stderr: + description: The set of error responses from the show command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused any change on the target. + returned: always(False) + type: bool +""" + +import shlex + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch: + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + command = module.params['pn_command'] + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + msg='%s: ' % command, + stderr=err.strip(), + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + msg='%s: ' % command, + stdout=out.strip(), + changed=False + ) + + else: + module.exit_json( + command=cli, + msg='%s: Nothing to display!!!' % command, + changed=False + ) + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=True, type='str'), + pn_clipassword=dict(required=True, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str'), + pn_command=dict(required=True, type='str'), + pn_parameters=dict(default='all', type='str'), + pn_options=dict(type='str') + ) + ) + + # Accessing the arguments + command = module.params['pn_command'] + parameters = module.params['pn_parameters'] + options = module.params['pn_options'] + + # Building the CLI command string + cli = pn_cli(module) + + cli += ' %s format %s ' % (command, parameters) + + if options: + cli += options + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_snmp_community.py b/plugins/modules/network/netvisor/pn_snmp_community.py new file mode 100644 index 0000000000..b6719be569 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_snmp_community.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_snmp_community +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/modify/delete snmp-community +description: + - This module can be used to create SNMP communities for SNMPv1 or + delete SNMP communities for SNMPv1 or modify SNMP communities for SNMPv1. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + state: + description: + - State the action to perform. Use C(present) to create snmp-community and + C(absent) to delete snmp-community C(update) to update snmp-community. + required: true + type: str + choices: ['present', 'absent', 'update'] + pn_community_type: + description: + - community type. + type: str + choices: ['read-only', 'read-write'] + pn_community_string: + description: + - community name. + type: str +''' + +EXAMPLES = """ +- name: Create snmp community + pn_snmp_community: + pn_cliswitch: "sw01" + state: "present" + pn_community_string: "foo" + pn_community_type: "read-write" + +- name: Delete snmp community + pn_snmp_community: + pn_cliswitch: "sw01" + state: "absent" + pn_community_string: "foo" + +- name: Modify snmp community + pn_snmp_community: + pn_cliswitch: "sw01" + state: "update" + pn_community_string: "foo" + pn_community_type: "read-only" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the snmp-community command. + returned: always + type: list +stderr: + description: set of error responses from the snmp-community command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the snmp-community-show command. + If a user with given name exists, return as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + comm_str = module.params['pn_community_string'] + + cli += ' snmp-community-show format community-string no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if comm_str in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='snmp-community-create', + absent='snmp-community-delete', + update='snmp-community-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_community_type=dict(required=False, type='str', + choices=['read-only', 'read-write']), + pn_community_string=dict(required=False, type='str'), + ), + required_if=( + ["state", "present", ["pn_community_type", "pn_community_string"]], + ["state", "absent", ["pn_community_string"]], + ["state", "update", ["pn_community_type", "pn_community_string"]], + ) + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + community_type = module.params['pn_community_type'] + comm_str = module.params['pn_community_string'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + COMMUNITY_EXISTS = check_cli(module, cli) + + if command == 'snmp-community-modify': + if COMMUNITY_EXISTS is False: + module.fail_json( + failed=True, + msg='snmp community name %s does not exist' % comm_str + ) + + if command == 'snmp-community-delete': + if COMMUNITY_EXISTS is False: + module.exit_json( + skipped=True, + msg='snmp community name %s does not exist' % comm_str + ) + + if command == 'snmp-community-create': + if COMMUNITY_EXISTS is True: + module.exit_json( + skipped=True, + msg='snmp community with name %s already exists' % comm_str + ) + + cli += ' %s community-string %s ' % (command, comm_str) + + if command != 'snmp-community-delete' and community_type: + cli += ' community-type ' + community_type + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_snmp_trap_sink.py b/plugins/modules/network/netvisor/pn_snmp_trap_sink.py new file mode 100644 index 0000000000..35d8ac0144 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_snmp_trap_sink.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_snmp_trap_sink +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/delete snmp-trap-sink +description: + - This module can be used to create a SNMP trap sink and delete a SNMP trap sink. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to create snmp-trap-sink and + C(absent) to delete snmp-trap-sink. + required: true + type: str + choices: ['present', 'absent'] + pn_dest_host: + description: + - destination host. + type: str + pn_community: + description: + - community type. + type: str + pn_dest_port: + description: + - destination port. + type: str + default: '162' + pn_type: + description: + - trap type. + type: str + choices: ['TRAP_TYPE_V1_TRAP', 'TRAP_TYPE_V2C_TRAP', 'TRAP_TYPE_V2_INFORM'] + default: 'TRAP_TYPE_V2C_TRAP' +''' + +EXAMPLES = """ +- name: snmp trap sink functionality + pn_snmp_trap_sink: + pn_cliswitch: "sw01" + state: "present" + pn_community: "foo" + pn_type: "TRAP_TYPE_V2_INFORM" + pn_dest_host: "192.168.67.8" + +- name: snmp trap sink functionality + pn_snmp_trap_sink: + pn_cliswitch: "sw01" + state: "absent" + pn_community: "foo" + pn_type: "TRAP_TYPE_V2_INFORM" + pn_dest_host: "192.168.67.8" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the snmp-trap-sink command. + returned: always + type: list +stderr: + description: set of error responses from the snmp-trap-sink command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the snmp-trap-sink-show command. + If a trap with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + community = module.params['pn_community'] + dest_host = module.params['pn_dest_host'] + + show = cli + cli += ' snmp-community-show format community-string no-show-headers' + rc, out, err = run_commands(module, cli) + + if out: + out = out.split() + + if community in out: + cli = show + cli += ' snmp-trap-sink-show community %s format type,dest-host no-show-headers' % community + rc, out, err = run_commands(module, cli) + + if out: + out = out.split() + + return True if dest_host in out else False + else: + return None + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='snmp-trap-sink-create', + absent='snmp-trap-sink-delete' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_dest_host=dict(required=False, type='str'), + pn_community=dict(required=False, type='str'), + pn_dest_port=dict(required=False, type='str', default='162'), + pn_type=dict(required=False, type='str', + choices=['TRAP_TYPE_V1_TRAP', + 'TRAP_TYPE_V2C_TRAP', + 'TRAP_TYPE_V2_INFORM'], + default='TRAP_TYPE_V2C_TRAP'), + ), + required_if=( + ["state", "present", ["pn_community", "pn_dest_host"]], + ["state", "absent", ["pn_community", "pn_dest_host"]], + ) + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + dest_host = module.params['pn_dest_host'] + community = module.params['pn_community'] + dest_port = module.params['pn_dest_port'] + pn_type = module.params['pn_type'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + VALUE_EXISTS = check_cli(module, cli) + cli += ' %s ' % command + + if command == 'snmp-trap-sink-create': + if VALUE_EXISTS is True: + module.exit_json( + skipped=True, + msg='snmp trap sink already exists' + ) + if VALUE_EXISTS is None: + module.fail_json( + failed=True, + msg='snmp community does not exists to create trap sink' + ) + if pn_type: + cli += ' type ' + pn_type + if dest_host: + cli += ' dest-host ' + dest_host + if community: + cli += ' community ' + community + if dest_port: + cli += ' dest-port ' + dest_port + + if command == 'snmp-trap-sink-delete': + if VALUE_EXISTS is None: + module.fail_json( + failed=True, + msg='snmp community does not exists to delete trap sink' + ) + if VALUE_EXISTS is False: + module.exit_json( + skipped=True, + msg='snmp-trap-sink with community %s does not exist with dest-host %s ' % (community, dest_host) + ) + if community: + cli += ' community ' + community + if dest_host: + cli += ' dest-host ' + dest_host + if dest_port: + cli += ' dest-port ' + dest_port + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_snmp_vacm.py b/plugins/modules/network/netvisor/pn_snmp_vacm.py new file mode 100644 index 0000000000..b3047bc77f --- /dev/null +++ b/plugins/modules/network/netvisor/pn_snmp_vacm.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_snmp_vacm +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/modify/delete snmp-vacm +description: + - This module can be used to create View Access Control Models (VACM), + modify VACM and delete VACM. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + type: str + required: false + state: + description: + - State the action to perform. Use C(present) to create snmp-vacm and + C(absent) to delete snmp-vacm and C(update) to modify snmp-vacm. + type: str + required: true + choices: ['present', 'absent', 'update'] + pn_oid_restrict: + description: + - restrict OID. + type: str + pn_priv: + description: + - privileges. + type: bool + pn_auth: + description: + - authentication required. + type: bool + pn_user_type: + description: + - SNMP user type. + type: str + choices: ['rouser', 'rwuser'] + pn_user_name: + description: + - SNMP administrator name. + type: str +''' + +EXAMPLES = """ +- name: create snmp vacm + pn_snmp_vacm: + pn_cliswitch: "sw01" + state: "present" + pn_user_name: "foo" + pn_user_type: "rouser" + +- name: update snmp vacm + pn_snmp_vacm: + pn_cliswitch: "sw01" + state: "update" + pn_user_name: "foo" + pn_user_type: "rwuser" + +- name: delete snmp vacm + pn_snmp_vacm: + pn_cliswitch: "sw01" + state: "absent" + pn_user_name: "foo" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the snmp-vacm command. + returned: always + type: list +stderr: + description: set of error responses from the snmp-vacm command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the snmp-vacm-show command. + If a user with given name exists, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + user_name = module.params['pn_user_name'] + show = cli + + cli += ' snmp-user-show format user-name no-show-headers' + rc, out, err = run_commands(module, cli) + + if out and user_name in out.split(): + pass + else: + return None + + cli = show + cli += ' snmp-vacm-show format user-name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if user_name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='snmp-vacm-create', + absent='snmp-vacm-delete', + update='snmp-vacm-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_oid_restrict=dict(required=False, type='str'), + pn_priv=dict(required=False, type='bool'), + pn_auth=dict(required=False, type='bool'), + pn_user_type=dict(required=False, type='str', + choices=['rouser', 'rwuser']), + pn_user_name=dict(required=False, type='str'), + ), + required_if=( + ["state", "present", ["pn_user_name"]], + ["state", "absent", ["pn_user_name"]], + ["state", "update", ["pn_user_name"]] + ) + + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + oid_restrict = module.params['pn_oid_restrict'] + priv = module.params['pn_priv'] + auth = module.params['pn_auth'] + user_type = module.params['pn_user_type'] + user_name = module.params['pn_user_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + USER_EXISTS = check_cli(module, cli) + cli += ' %s user-name %s ' % (command, user_name) + + if command == 'snmp-vacm-modify': + if USER_EXISTS is None: + module.fail_json( + failed=True, + msg='snmp user with name %s does not exists' % user_name + ) + if USER_EXISTS is False: + module.fail_json( + failed=True, + msg='snmp vacm with name %s does not exists' % user_name + ) + + if command == 'snmp-vacm-delete': + if USER_EXISTS is None: + module.fail_json( + failed=True, + msg='snmp user with name %s does not exists' % user_name + ) + + if USER_EXISTS is False: + module.exit_json( + skipped=True, + msg='snmp vacm with name %s does not exist' % user_name + ) + + if command == 'snmp-vacm-create': + if USER_EXISTS is None: + module.fail_json( + failed=True, + msg='snmp user with name %s does not exists' % user_name + ) + if USER_EXISTS is True: + module.exit_json( + skipped=True, + msg='snmp vacm with name %s already exists' % user_name + ) + + if command != 'snmp-vacm-delete': + if oid_restrict: + cli += ' oid-restrict ' + oid_restrict + if user_type: + cli += ' user-type ' + user_type + + cli += booleanArgs(auth, 'auth', 'no-auth') + cli += booleanArgs(priv, 'priv', 'no-priv') + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_stp.py b/plugins/modules/network/netvisor/pn_stp.py new file mode 100644 index 0000000000..29421e62c5 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_stp.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_stp +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify stp +description: + - This module can be used to modify Spanning Tree Protocol parameters. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + type: str + required: false + state: + description: + - State the action to perform. Use C(update) to stp. + type: str + required: true + choices: ['update'] + pn_hello_time: + description: + - STP hello time between 1 and 10 secs. + type: str + default: '2' + pn_enable: + description: + - enable or disable STP + type: bool + pn_root_guard_wait_time: + description: + - root guard wait time between 0 and 300 secs. 0 to disable wait. + type: str + default: '20' + pn_bpdus_bridge_ports: + description: + - BPDU packets to bridge specific port. + type: bool + pn_mst_max_hops: + description: + - maximum hop count for mstp bpdu. + type: str + default: '20' + pn_bridge_id: + description: + - STP bridge id. + type: str + pn_max_age: + description: + - maximum age time between 6 and 40 secs. + type: str + default: '20' + pn_stp_mode: + description: + - STP mode. + type: str + choices: ['rstp', 'mstp'] + pn_mst_config_name: + description: + - Name for MST Configuration Instance. + type: str + pn_forwarding_delay: + description: + - STP forwarding delay between 4 and 30 secs. + type: str + default: '15' + pn_bridge_priority: + description: + - STP bridge priority. + type: str + default: '32768' +''' + +EXAMPLES = """ +- name: Modify stp + pn_stp: + pn_cliswitch: "sw01" + state: "update" + pn_hello_time: "3" + pn_stp_mode: "rstp" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the stp command. + returned: always + type: list +stderr: + description: set of error responses from the stp command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='stp-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_hello_time=dict(required=False, type='str', default='2'), + pn_enable=dict(required=False, type='bool'), + pn_root_guard_wait_time=dict(required=False, type='str', default='20'), + pn_bpdus_bridge_ports=dict(required=False, type='bool'), + pn_mst_max_hops=dict(required=False, type='str', default='20'), + pn_bridge_id=dict(required=False, type='str'), + pn_max_age=dict(required=False, type='str', default='20'), + pn_stp_mode=dict(required=False, type='str', + choices=['rstp', 'mstp']), + pn_mst_config_name=dict(required=False, type='str'), + pn_forwarding_delay=dict(required=False, type='str', default='15'), + pn_bridge_priority=dict(required=False, type='str', default='32768'), + ), + required_one_of=[['pn_enable', 'pn_hello_time', + 'pn_root_guard_wait_time', + 'pn_bpdus_bridge_ports', + 'pn_mst_max_hops', + 'pn_bridge_id', + 'pn_max_age', + 'pn_stp_mode', + 'pn_mst_config_name', + 'pn_forwarding_delay', + 'pn_bridge_priority']] + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + hello_time = module.params['pn_hello_time'] + enable = module.params['pn_enable'] + root_guard_wait_time = module.params['pn_root_guard_wait_time'] + bpdus_bridge_ports = module.params['pn_bpdus_bridge_ports'] + mst_max_hops = module.params['pn_mst_max_hops'] + bridge_id = module.params['pn_bridge_id'] + max_age = module.params['pn_max_age'] + stp_mode = module.params['pn_stp_mode'] + mst_config_name = module.params['pn_mst_config_name'] + forwarding_delay = module.params['pn_forwarding_delay'] + bridge_priority = module.params['pn_bridge_priority'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'stp-modify': + cli += ' %s ' % command + if hello_time: + cli += ' hello-time ' + hello_time + if root_guard_wait_time: + cli += ' root-guard-wait-time ' + root_guard_wait_time + if mst_max_hops: + cli += ' mst-max-hops ' + mst_max_hops + if bridge_id: + cli += ' bridge-id ' + bridge_id + if max_age: + cli += ' max-age ' + max_age + if stp_mode: + cli += ' stp-mode ' + stp_mode + if mst_config_name: + cli += ' mst-config-name ' + mst_config_name + if forwarding_delay: + cli += ' forwarding-delay ' + forwarding_delay + if bridge_priority: + cli += ' bridge-priority ' + bridge_priority + + cli += booleanArgs(enable, 'enable', 'disable') + cli += booleanArgs(bpdus_bridge_ports, 'bpdus-bridge-ports', 'bpdus-all-ports') + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_stp_port.py b/plugins/modules/network/netvisor/pn_stp_port.py new file mode 100644 index 0000000000..013ba07ec3 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_stp_port.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_stp_port +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify stp-port. +description: + - This module can be used modify Spanning Tree Protocol (STP) parameters on ports. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + type: str + required: false + state: + description: + - State the action to perform. Use C(update) to update stp-port. + type: str + required: true + choices: ['update'] + pn_priority: + description: + - STP port priority from 0 to 240. + type: str + default: '128' + pn_cost: + description: + - STP port cost from 1 to 200000000. + type: str + default: '2000' + pn_root_guard: + description: + - STP port Root guard. + type: bool + pn_filter: + description: + - STP port filters BPDUs. + type: bool + pn_edge: + description: + - STP port is an edge port. + type: bool + pn_bpdu_guard: + description: + - STP port BPDU guard. + type: bool + pn_port: + description: + - STP port. + type: str + pn_block: + description: + - Specify if a STP port blocks BPDUs. + type: bool +''' + +EXAMPLES = """ +- name: Modify stp port + pn_stp_port: + pn_cliswitch: "sw01" + state: "update" + pn_port: "1" + pn_filter: True + pn_priority: '144' + +- name: Modify stp port + pn_stp_port: + pn_cliswitch: "sw01" + state: "update" + pn_port: "1" + pn_cost: "200" + +- name: Modify stp port + pn_stp_port: + pn_cliswitch: "sw01" + state: "update" + pn_port: "1" + pn_edge: True + pn_cost: "200" + +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the stp-port command. + returned: always + type: list +stderr: + description: set of error responses from the stp-port command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='stp-port-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_priority=dict(required=False, type='str', default='128'), + pn_cost=dict(required=False, type='str', default='2000'), + pn_root_guard=dict(required=False, type='bool'), + pn_filter=dict(required=False, type='bool'), + pn_edge=dict(required=False, type='bool'), + pn_bpdu_guard=dict(required=False, type='bool'), + pn_port=dict(required=False, type='str'), + pn_block=dict(required=False, type='bool'), + ), + required_if=( + ["state", "update", ["pn_port"]], + ), + required_one_of=( + ['pn_cost', 'pn_root_guard', 'pn_filter', + 'pn_edge', 'pn_bpdu_guard', 'pn_block'], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + priority = module.params['pn_priority'] + cost = module.params['pn_cost'] + root_guard = module.params['pn_root_guard'] + pn_filter = module.params['pn_filter'] + edge = module.params['pn_edge'] + bpdu_guard = module.params['pn_bpdu_guard'] + port = module.params['pn_port'] + block = module.params['pn_block'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'stp-port-modify': + cli += ' %s ' % command + if priority and (int(priority) % 16 == 0 and int(priority) < 240): + cli += ' priority ' + priority + else: + module.fail_json( + failed=True, + msg='Priority must be increment of 16 and should be less that 240' + ) + if cost and (int(cost) < 200000000): + cli += ' cost ' + cost + else: + module.fail_json( + failed=True, + msg='cost must be between 1 and 200000000' + ) + if port: + cli += ' port ' + port + + cli += booleanArgs(root_guard, 'root-guard', 'no-root-guard') + cli += booleanArgs(pn_filter, 'filter', 'no-filter') + cli += booleanArgs(edge, 'edge', 'no-edge') + cli += booleanArgs(bpdu_guard, 'bpdu-guard', 'no-bpdu-guard') + cli += booleanArgs(block, 'block', 'no-block') + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_switch_setup.py b/plugins/modules/network/netvisor/pn_switch_setup.py new file mode 100644 index 0000000000..b79df53971 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_switch_setup.py @@ -0,0 +1,412 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_switch_setup +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify switch-setup +description: + - This module can be used to modify switch setup. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(update) to modify the switch-setup. + required: true + type: str + choices: ['update'] + pn_force: + description: + - Force analytics-store change even if it involves removing data. + required: false + type: bool + pn_dns_ip: + description: + - DNS IP address. + required: false + type: str + pn_mgmt_netmask: + description: + - Netmask. + required: false + type: str + pn_gateway_ip6: + description: + - Gateway IPv6 address. + required: false + type: str + pn_in_band_ip6_assign: + description: + - Data IPv6 address assignment. + required: false + type: str + choices: ['none', 'autoconf'] + pn_domain_name: + description: + - Domain name. + required: false + type: str + pn_timezone: + description: + - Timezone to be configured. + required: false + type: str + pn_in_band_netmask: + description: + - Data in-band netmask. + required: false + type: str + pn_in_band_ip6: + description: + - Data in-band IPv6 address. + required: false + type: str + pn_in_band_netmask_ip6: + description: + - Data in-band IPv6 netmask. + required: false + type: str + pn_motd: + description: + - Message of the Day. + required: false + type: str + pn_loopback_ip6: + description: + - loopback IPv6 address. + required: false + type: str + pn_mgmt_ip6_assignment: + description: + - IPv6 address assignment. + required: false + choices: ['none', 'autoconf'] + pn_ntp_secondary_server: + description: + - Secondary NTP server. + required: false + type: str + pn_in_band_ip: + description: + - data in-band IP address. + required: false + type: str + pn_eula_accepted: + description: + - Accept EULA. + required: false + type: str + choices: ['true', 'false'] + pn_mgmt_ip: + description: + - Management IP address. + required: false + type: str + pn_ntp_server: + description: + - NTP server. + required: false + type: str + pn_mgmt_ip_assignment: + description: + - IP address assignment. + required: false + type: str + choices: ['none', 'dhcp'] + pn_date: + description: + - Date. + required: false + type: str + pn_password: + description: + - plain text password. + required: false + type: str + pn_banner: + description: + - Banner to display on server-switch. + required: false + type: str + pn_loopback_ip: + description: + - loopback IPv4 address. + required: false + type: str + pn_dns_secondary_ip: + description: + - secondary DNS IP address. + required: false + type: str + pn_switch_name: + description: + - switch name. + required: false + type: str + pn_eula_timestamp: + description: + - EULA timestamp. + required: false + type: str + pn_mgmt_netmask_ip6: + description: + - IPv6 netmask. + required: false + type: str + pn_enable_host_ports: + description: + - Enable host ports by default. + required: false + type: bool + pn_mgmt_ip6: + description: + - IPv6 address. + required: false + type: str + pn_analytics_store: + description: + - type of disk storage for analytics. + required: false + type: str + choices: ['default', 'optimized'] + pn_gateway_ip: + description: + - gateway IPv4 address. + required: false + type: str +''' + +EXAMPLES = """ +- name: Modify switch + pn_switch_setup: + pn_cliswitch: "sw01" + state: "update" + pn_timezone: "America/New_York" + pn_in_band_ip: "20.20.1.1" + pn_in_band_netmask: "24" + +- name: Modify switch + pn_switch_setup: + pn_cliswitch: "sw01" + state: "update" + pn_in_band_ip6: "2001:0db8:85a3::8a2e:0370:7334" + pn_in_band_netmask_ip6: "127" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the switch-setup command. + returned: always + type: list +stderr: + description: set of error responses from the switch-setup command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, booleanArgs, run_cli + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='switch-setup-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=['update']), + pn_force=dict(required=False, type='bool'), + pn_dns_ip=dict(required=False, type='str'), + pn_mgmt_netmask=dict(required=False, type='str'), + pn_gateway_ip6=dict(required=False, type='str'), + pn_in_band_ip6_assign=dict(required=False, type='str', + choices=['none', 'autoconf']), + pn_domain_name=dict(required=False, type='str'), + pn_timezone=dict(required=False, type='str'), + pn_in_band_netmask=dict(required=False, type='str'), + pn_in_band_ip6=dict(required=False, type='str'), + pn_in_band_netmask_ip6=dict(required=False, type='str'), + pn_motd=dict(required=False, type='str'), + pn_loopback_ip6=dict(required=False, type='str'), + pn_mgmt_ip6_assignment=dict(required=False, type='str', + choices=['none', 'autoconf']), + pn_ntp_secondary_server=dict(required=False, type='str'), + pn_in_band_ip=dict(required=False, type='str'), + pn_eula_accepted=dict(required=False, type='str', + choices=['true', 'false']), + pn_mgmt_ip=dict(required=False, type='str'), + pn_ntp_server=dict(required=False, type='str'), + pn_mgmt_ip_assignment=dict(required=False, type='str', + choices=['none', 'dhcp']), + pn_date=dict(required=False, type='str'), + pn_password=dict(required=False, type='str', no_log=True), + pn_banner=dict(required=False, type='str'), + pn_loopback_ip=dict(required=False, type='str'), + pn_dns_secondary_ip=dict(required=False, type='str'), + pn_switch_name=dict(required=False, type='str'), + pn_eula_timestamp=dict(required=False, type='str'), + pn_mgmt_netmask_ip6=dict(required=False, type='str'), + pn_enable_host_ports=dict(required=False, type='bool'), + pn_mgmt_ip6=dict(required=False, type='str'), + pn_analytics_store=dict(required=False, type='str', + choices=['default', 'optimized']), + pn_gateway_ip=dict(required=False, type='str'), + ), + required_one_of=[['pn_force', 'pn_dns_ip', 'pn_mgmt_netmask', + 'pn_gateway_ip6', 'pn_in_band_ip6_assign', + 'pn_domain_name', 'pn_timezone', + 'pn_in_band_netmask', 'pn_in_band_ip6', + 'pn_in_band_netmask_ip6', 'pn_motd', + 'pn_loopback_ip6', 'pn_mgmt_ip6_assignment', + 'pn_ntp_secondary_server', 'pn_in_band_ip', + 'pn_eula_accepted', 'pn_mgmt_ip', + 'pn_ntp_server', 'pn_mgmt_ip_assignment', + 'pn_date', 'pn_password', + 'pn_banner', 'pn_loopback_ip', + 'pn_dns_secondary_ip', 'pn_switch_name', + 'pn_eula_timestamp', 'pn_mgmt_netmask_ip6', + 'pn_enable_host_ports', 'pn_mgmt_ip6', + 'pn_analytics_store', 'pn_gateway_ip']], + required_together=[['pn_in_band_ip6', 'pn_in_band_netmask_ip6'], + ['pn_in_band_ip', 'pn_in_band_netmask'], + ['pn_mgmt_ip', 'pn_mgmt_netmask'], + ['pn_mgmt_ip6', 'pn_mgmt_netmask_ip6']], + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + force = module.params['pn_force'] + dns_ip = module.params['pn_dns_ip'] + mgmt_netmask = module.params['pn_mgmt_netmask'] + gateway_ip6 = module.params['pn_gateway_ip6'] + in_band_ip6_assign = module.params['pn_in_band_ip6_assign'] + domain_name = module.params['pn_domain_name'] + timezone = module.params['pn_timezone'] + in_band_netmask = module.params['pn_in_band_netmask'] + in_band_ip6 = module.params['pn_in_band_ip6'] + in_band_netmask_ip6 = module.params['pn_in_band_netmask_ip6'] + motd = module.params['pn_motd'] + loopback_ip6 = module.params['pn_loopback_ip6'] + mgmt_ip6_assignment = module.params['pn_mgmt_ip6_assignment'] + ntp_secondary_server = module.params['pn_ntp_secondary_server'] + in_band_ip = module.params['pn_in_band_ip'] + eula_accepted = module.params['pn_eula_accepted'] + mgmt_ip = module.params['pn_mgmt_ip'] + ntp_server = module.params['pn_ntp_server'] + mgmt_ip_assignment = module.params['pn_mgmt_ip_assignment'] + date = module.params['pn_date'] + password = module.params['pn_password'] + banner = module.params['pn_banner'] + loopback_ip = module.params['pn_loopback_ip'] + dns_secondary_ip = module.params['pn_dns_secondary_ip'] + switch_name = module.params['pn_switch_name'] + eula_timestamp = module.params['pn_eula_timestamp'] + mgmt_netmask_ip6 = module.params['pn_mgmt_netmask_ip6'] + enable_host_ports = module.params['pn_enable_host_ports'] + mgmt_ip6 = module.params['pn_mgmt_ip6'] + analytics_store = module.params['pn_analytics_store'] + gateway_ip = module.params['pn_gateway_ip'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'switch-setup-modify': + cli += ' %s ' % command + if dns_ip: + cli += ' dns-ip ' + dns_ip + if mgmt_netmask: + cli += ' mgmt-netmask ' + mgmt_netmask + if gateway_ip6: + cli += ' gateway-ip6 ' + gateway_ip6 + if in_band_ip6_assign: + cli += ' in-band-ip6-assign ' + in_band_ip6_assign + if domain_name: + cli += ' domain-name ' + domain_name + if timezone: + cli += ' timezone ' + timezone + if in_band_netmask: + cli += ' in-band-netmask ' + in_band_netmask + if in_band_ip6: + cli += ' in-band-ip6 ' + in_band_ip6 + if in_band_netmask_ip6: + cli += ' in-band-netmask-ip6 ' + in_band_netmask_ip6 + if motd: + cli += ' motd ' + motd + if loopback_ip6: + cli += ' loopback-ip6 ' + loopback_ip6 + if mgmt_ip6_assignment: + cli += ' mgmt-ip6-assignment ' + mgmt_ip6_assignment + if ntp_secondary_server: + cli += ' ntp-secondary-server ' + ntp_secondary_server + if in_band_ip: + cli += ' in-band-ip ' + in_band_ip + if eula_accepted: + cli += ' eula-accepted ' + eula_accepted + if mgmt_ip: + cli += ' mgmt-ip ' + mgmt_ip + if ntp_server: + cli += ' ntp-server ' + ntp_server + if mgmt_ip_assignment: + cli += ' mgmt-ip-assignment ' + mgmt_ip_assignment + if date: + cli += ' date ' + date + if password: + cli += ' password ' + password + if banner: + cli += ' banner ' + banner + if loopback_ip: + cli += ' loopback-ip ' + loopback_ip + if dns_secondary_ip: + cli += ' dns-secondary-ip ' + dns_secondary_ip + if switch_name: + cli += ' switch-name ' + switch_name + if eula_timestamp: + cli += ' eula_timestamp ' + eula_timestamp + if mgmt_netmask_ip6: + cli += ' mgmt-netmask-ip6 ' + mgmt_netmask_ip6 + if mgmt_ip6: + cli += ' mgmt-ip6 ' + mgmt_ip6 + if analytics_store: + cli += ' analytics-store ' + analytics_store + if gateway_ip: + cli += ' gateway-ip ' + gateway_ip + + cli += booleanArgs(force, 'force', 'no-force') + cli += booleanArgs(enable_host_ports, 'enable-host-ports', 'disable-host-ports') + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_trunk.py b/plugins/modules/network/netvisor/pn_trunk.py new file mode 100644 index 0000000000..69236c6fbc --- /dev/null +++ b/plugins/modules/network/netvisor/pn_trunk.py @@ -0,0 +1,464 @@ +#!/usr/bin/python +""" PN CLI trunk-create/trunk-delete/trunk-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_trunk +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to create/delete/modify a trunk. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute trunk-create or trunk-delete command. + - Trunks can be used to aggregate network links at Layer 2 on the local + switch. Use this command to create a new trunk. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + default: 'local' + state: + description: + - State the action to perform. Use 'present' to create trunk, + 'absent' to delete trunk and 'update' to modify trunk. + required: True + choices: ['present', 'absent', 'update'] + pn_name: + description: + - Specify the name for the trunk configuration. + required: true + pn_ports: + description: + - Specify the port number(s) for the link(s) to aggregate into the trunk. + - Required for trunk-create. + pn_speed: + description: + - Specify the port speed or disable the port. + choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g'] + pn_egress_rate_limit: + description: + - Specify an egress port data rate limit for the configuration. + pn_jumbo: + description: + - Specify if the port can receive jumbo frames. + type: bool + pn_lacp_mode: + description: + - Specify the LACP mode for the configuration. + choices: ['off', 'passive', 'active'] + pn_lacp_priority: + description: + - Specify the LACP priority. This is a number between 1 and 65535 with a + default value of 32768. + pn_lacp_timeout: + description: + - Specify the LACP time out as slow (30 seconds) or fast (4seconds). + The default value is slow. + choices: ['slow', 'fast'] + pn_lacp_fallback: + description: + - Specify the LACP fallback mode as bundles or individual. + choices: ['bundle', 'individual'] + pn_lacp_fallback_timeout: + description: + - Specify the LACP fallback timeout in seconds. The range is between 30 + and 60 seconds with a default value of 50 seconds. + pn_edge_switch: + description: + - Specify if the switch is an edge switch. + type: bool + pn_pause: + description: + - Specify if pause frames are sent. + type: bool + pn_description: + description: + - Specify a description for the trunk configuration. + pn_loopback: + description: + - Specify loopback if you want to use loopback. + type: bool + pn_mirror_receive: + description: + - Specify if the configuration receives mirrored traffic. + type: bool + pn_unknown_ucast_level: + description: + - Specify an unknown unicast level in percent. The default value is 100%. + pn_unknown_mcast_level: + description: + - Specify an unknown multicast level in percent. The default value is 100%. + pn_broadcast_level: + description: + - Specify a broadcast level in percent. The default value is 100%. + pn_port_macaddr: + description: + - Specify the MAC address of the port. + pn_loopvlans: + description: + - Specify a list of looping vlans. + pn_routing: + description: + - Specify if the port participates in routing on the network. + type: bool + pn_host: + description: + - Host facing port control setting. + type: bool +''' + +EXAMPLES = """ +- name: create trunk + pn_trunk: + state: 'present' + pn_name: 'spine-to-leaf' + pn_ports: '11,12,13,14' + +- name: delete trunk + pn_trunk: + state: 'absent' + pn_name: 'spine-to-leaf' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the trunk command. + returned: always + type: list +stderr: + description: The set of error responses from the trunk command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +TRUNK_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the trunk-show command. + If a trunk with given name exists, return TRUNK_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: TRUNK_EXISTS + """ + name = module.params['pn_name'] + + show = cli + ' trunk-show format switch,name no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global TRUNK_EXISTS + if name in out: + TRUNK_EXISTS = True + else: + TRUNK_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'trunk-create' + if state == 'absent': + command = 'trunk-delete' + if state == 'update': + command = 'trunk-modify' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_name=dict(required=True, type='str'), + pn_ports=dict(type='str'), + pn_speed=dict(type='str', + choices=['disable', '10m', '100m', '1g', '2.5g', + '10g', '40g']), + pn_egress_rate_limit=dict(type='str'), + pn_jumbo=dict(type='bool'), + pn_lacp_mode=dict(type='str', choices=[ + 'off', 'passive', 'active']), + pn_lacp_priority=dict(type='int'), + pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']), + pn_lacp_fallback=dict(type='str', choices=[ + 'bundle', 'individual']), + pn_lacp_fallback_timeout=dict(type='str'), + pn_edge_switch=dict(type='bool'), + pn_pause=dict(type='bool'), + pn_description=dict(type='str'), + pn_loopback=dict(type='bool'), + pn_mirror_receive=dict(type='bool'), + pn_unknown_ucast_level=dict(type='str'), + pn_unknown_mcast_level=dict(type='str'), + pn_broadcast_level=dict(type='str'), + pn_port_macaddr=dict(type='str'), + pn_loopvlans=dict(type='str'), + pn_routing=dict(type='bool'), + pn_host=dict(type='bool') + ), + required_if=( + ["state", "present", ["pn_name", "pn_ports"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + name = module.params['pn_name'] + ports = module.params['pn_ports'] + speed = module.params['pn_speed'] + egress_rate_limit = module.params['pn_egress_rate_limit'] + jumbo = module.params['pn_jumbo'] + lacp_mode = module.params['pn_lacp_mode'] + lacp_priority = module.params['pn_lacp_priority'] + lacp_timeout = module.params['pn_lacp_timeout'] + lacp_fallback = module.params['pn_lacp_fallback'] + lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout'] + edge_switch = module.params['pn_edge_switch'] + pause = module.params['pn_pause'] + description = module.params['pn_description'] + loopback = module.params['pn_loopback'] + mirror_receive = module.params['pn_mirror_receive'] + unknown_ucast_level = module.params['pn_unknown_ucast_level'] + unknown_mcast_level = module.params['pn_unknown_mcast_level'] + broadcast_level = module.params['pn_broadcast_level'] + port_macaddr = module.params['pn_port_macaddr'] + loopvlans = module.params['pn_loopvlans'] + routing = module.params['pn_routing'] + host = module.params['pn_host'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'trunk-delete': + + check_cli(module, cli) + if TRUNK_EXISTS is False: + module.exit_json( + skipped=True, + msg='Trunk with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + else: + if command == 'trunk-create': + check_cli(module, cli) + if TRUNK_EXISTS is True: + module.exit_json( + skipped=True, + msg='Trunk with name %s already exists' % name + ) + cli += ' %s name %s ' % (command, name) + + # Appending options + if ports: + cli += ' ports ' + ports + + if speed: + cli += ' speed ' + speed + + if egress_rate_limit: + cli += ' egress-rate-limit ' + egress_rate_limit + + if jumbo is True: + cli += ' jumbo ' + if jumbo is False: + cli += ' no-jumbo ' + + if lacp_mode: + cli += ' lacp-mode ' + lacp_mode + + if lacp_priority: + cli += ' lacp-priority ' + lacp_priority + + if lacp_timeout: + cli += ' lacp-timeout ' + lacp_timeout + + if lacp_fallback: + cli += ' lacp-fallback ' + lacp_fallback + + if lacp_fallback_timeout: + cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout + + if edge_switch is True: + cli += ' edge-switch ' + if edge_switch is False: + cli += ' no-edge-switch ' + + if pause is True: + cli += ' pause ' + if pause is False: + cli += ' no-pause ' + + if description: + cli += ' description ' + description + + if loopback is True: + cli += ' loopback ' + if loopback is False: + cli += ' no-loopback ' + + if mirror_receive is True: + cli += ' mirror-receive-only ' + if mirror_receive is False: + cli += ' no-mirror-receive-only ' + + if unknown_ucast_level: + cli += ' unknown-ucast-level ' + unknown_ucast_level + + if unknown_mcast_level: + cli += ' unknown-mcast-level ' + unknown_mcast_level + + if broadcast_level: + cli += ' broadcast-level ' + broadcast_level + + if port_macaddr: + cli += ' port-mac-address ' + port_macaddr + + if loopvlans: + cli += ' loopvlans ' + loopvlans + + if routing is True: + cli += ' routing ' + if routing is False: + cli += ' no-routing ' + + if host is True: + cli += ' host-enable ' + if host is False: + cli += ' host-disable ' + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_user.py b/plugins/modules/network/netvisor/pn_user.py new file mode 100644 index 0000000000..e3ac9db3e2 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_user.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_user +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/modify/delete user +description: + - This module can be used to create a user and apply a role, + update a user and delete a user. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + type: str + required: false + state: + description: + - State the action to perform. Use C(present) to create user and + C(absent) to delete user C(update) to update user. + type: str + required: true + choices: ['present', 'absent', 'update'] + pn_scope: + description: + - local or fabric. + type: str + choices: ['local', 'fabric'] + pn_initial_role: + description: + - initial role for user. + type: str + pn_password: + description: + - plain text password. + type: str + pn_name: + description: + - username. + type: str +''' + +EXAMPLES = """ +- name: Create user + pn_user: + pn_cliswitch: "sw01" + state: "present" + pn_scope: "fabric" + pn_password: "foo123" + pn_name: "foo" + +- name: Delete user + pn_user: + pn_cliswitch: "sw01" + state: "absent" + pn_name: "foo" + +- name: Modify user + pn_user: + pn_cliswitch: "sw01" + state: "update" + pn_password: "test1234" + pn_name: "foo" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the user command. + returned: always + type: list +stderr: + description: set of error responses from the user command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the user-show command. + If a user already exists on the given switch, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + + cli += ' user-show format name no-show-headers' + out = run_commands(module, cli)[1] + + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='user-create', + absent='user-delete', + update='user-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_scope=dict(required=False, type='str', + choices=['local', 'fabric']), + pn_initial_role=dict(required=False, type='str'), + pn_password=dict(required=False, type='str', no_log=True), + pn_name=dict(required=False, type='str'), + ), + required_if=( + ["state", "present", ["pn_name", "pn_scope"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name", "pn_password"]] + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + scope = module.params['pn_scope'] + initial_role = module.params['pn_initial_role'] + password = module.params['pn_password'] + name = module.params['pn_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + USER_EXISTS = check_cli(module, cli) + cli += ' %s name %s ' % (command, name) + + if command == 'user-modify': + if USER_EXISTS is False: + module.fail_json( + failed=True, + msg='User with name %s does not exist' % name + ) + if initial_role or scope: + module.fail_json( + failed=True, + msg='Only password can be modified' + ) + + if command == 'user-delete': + if USER_EXISTS is False: + module.exit_json( + skipped=True, + msg='user with name %s does not exist' % name + ) + + if command == 'user-create': + if USER_EXISTS is True: + module.exit_json( + skipped=True, + msg='User with name %s already exists' % name + ) + if scope: + cli += ' scope ' + scope + + if initial_role: + cli += ' initial-role ' + initial_role + + if command != 'user-delete': + if password: + cli += ' password ' + password + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vflow_table_profile.py b/plugins/modules/network/netvisor/pn_vflow_table_profile.py new file mode 100644 index 0000000000..e2c941e250 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vflow_table_profile.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vflow_table_profile +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify vflow-table-profile +description: + - This module can be used to modify a vFlow table profile. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(update) to modify + the vflow-table-profile. + required: true + type: str + choices: ['update'] + pn_profile: + description: + - type of vFlow profile. + required: false + type: str + choices: ['application', 'ipv6', 'qos'] + pn_hw_tbl: + description: + - hardware table used by vFlow. + required: false + type: str + choices: ['switch-main', 'switch-hash', 'npu-main', 'npu-hash'] + pn_enable: + description: + - enable or disable vflow profile table. + required: false + type: bool +''' + +EXAMPLES = """ +- name: Modify vflow table profile + pn_vflow_table_profile: + pn_cliswitch: 'sw01' + state: 'update' + pn_profile: 'ipv6' + pn_hw_tbl: 'switch-main' + pn_enable: true + +- name: Modify vflow table profile + pn_vflow_table_profile: + state: 'update' + pn_profile: 'qos' + pn_hw_tbl: 'switch-main' + pn_enable: false +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vflow-table-profile command. + returned: always + type: list +stderr: + description: set of error responses from the vflow-table-profile command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='vflow-table-profile-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_profile=dict(required=False, type='str', + choices=['application', 'ipv6', 'qos']), + pn_hw_tbl=dict(required=False, type='str', + choices=['switch-main', 'switch-hash', + 'npu-main', 'npu-hash']), + pn_enable=dict(required=False, type='bool'), + ), + required_if=( + ['state', 'update', ['pn_profile', 'pn_hw_tbl']], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + profile = module.params['pn_profile'] + hw_tbl = module.params['pn_hw_tbl'] + enable = module.params['pn_enable'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'vflow-table-profile-modify': + cli += ' %s ' % command + if profile: + cli += ' profile ' + profile + if hw_tbl: + cli += ' hw-tbl ' + hw_tbl + + cli += booleanArgs(enable, 'enable', 'disable') + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vlag.py b/plugins/modules/network/netvisor/pn_vlag.py new file mode 100644 index 0000000000..996d298354 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vlag.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +""" PN CLI vlag-create/vlag-delete/vlag-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vlag +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to create/delete/modify vlag. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vlag-create/vlag-delete/vlag-modify command. + - A virtual link aggregation group (VLAG) allows links that are physically + connected to two different Pluribus Networks devices to appear as a single + trunk to a third device. The third device can be a switch, server, or any + Ethernet device. A VLAG can provide Layer 2 multipathing, which allows you + to create redundancy by increasing bandwidth, enabling multiple parallel + paths between nodes and loadbalancing traffic where alternative paths exist. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run this command on. + default: 'local' + state: + description: + - State the action to perform. Use 'present' to create vlag, + 'absent' to delete vlag and 'update' to modify vlag. + required: True + choices: ['present', 'absent', 'update'] + pn_name: + description: + - The C(pn_name) takes a valid name for vlag configuration. + required: true + pn_port: + description: + - Specify the local VLAG port. + - Required for vlag-create. + pn_peer_port: + description: + - Specify the peer VLAG port. + - Required for vlag-create. + pn_mode: + description: + - Specify the mode for the VLAG. Active-standby indicates one side is + active and the other side is in standby mode. Active-active indicates + that both sides of the vlag are up by default. + choices: ['active-active', 'active-standby'] + pn_peer_switch: + description: + - Specify the fabric-name of the peer switch. + pn_failover_action: + description: + - Specify the failover action as move or ignore. + choices: ['move', 'ignore'] + pn_lacp_mode: + description: + - Specify the LACP mode. + choices: ['off', 'passive', 'active'] + pn_lacp_timeout: + description: + - Specify the LACP timeout as slow(30 seconds) or fast(4 seconds). + choices: ['slow', 'fast'] + pn_lacp_fallback: + description: + - Specify the LACP fallback mode as bundles or individual. + choices: ['bundle', 'individual'] + pn_lacp_fallback_timeout: + description: + - Specify the LACP fallback timeout in seconds. The range is between 30 + and 60 seconds with a default value of 50 seconds. +''' + +EXAMPLES = """ +- name: create a VLAG + pn_vlag: + state: 'present' + pn_name: spine-to-leaf + pn_port: 'spine01-to-leaf' + pn_peer_port: 'spine02-to-leaf' + pn_peer_switch: spine02 + pn_mode: 'active-active' + +- name: delete VLAGs + pn_vlag: + state: 'absent' + pn_name: spine-to-leaf +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the vlag command. + returned: always + type: list +stderr: + description: The set of error responses from the vlag command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +VLAG_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the vlag-show command. + If a vlag with given vlag exists, return VLAG_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VLAG_EXISTS + """ + name = module.params['pn_name'] + + show = cli + ' vlag-show format name no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global VLAG_EXISTS + if name in out: + VLAG_EXISTS = True + else: + VLAG_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vlag-create' + if state == 'absent': + command = 'vlag-delete' + if state == 'update': + command = 'vlag-modify' + return command + + +def main(): + """ This section is for argument parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_name=dict(required=True, type='str'), + pn_port=dict(type='str'), + pn_peer_port=dict(type='str'), + pn_mode=dict(type='str', choices=[ + 'active-standby', 'active-active']), + pn_peer_switch=dict(type='str'), + pn_failover_action=dict(type='str', choices=['move', 'ignore']), + pn_lacp_mode=dict(type='str', choices=[ + 'off', 'passive', 'active']), + pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']), + pn_lacp_fallback=dict(type='str', choices=[ + 'bundle', 'individual']), + pn_lacp_fallback_timeout=dict(type='str') + ), + required_if=( + ["state", "present", ["pn_name", "pn_port", "pn_peer_port", + "pn_peer_switch"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name"]] + ) + ) + + # Argument accessing + state = module.params['state'] + name = module.params['pn_name'] + port = module.params['pn_port'] + peer_port = module.params['pn_peer_port'] + mode = module.params['pn_mode'] + peer_switch = module.params['pn_peer_switch'] + failover_action = module.params['pn_failover_action'] + lacp_mode = module.params['pn_lacp_mode'] + lacp_timeout = module.params['pn_lacp_timeout'] + lacp_fallback = module.params['pn_lacp_fallback'] + lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'vlag-delete': + + check_cli(module, cli) + if VLAG_EXISTS is False: + module.exit_json( + skipped=True, + msg='VLAG with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + else: + + if command == 'vlag-create': + check_cli(module, cli) + if VLAG_EXISTS is True: + module.exit_json( + skipped=True, + msg='VLAG with name %s already exists' % name + ) + cli += ' %s name %s ' % (command, name) + + if port: + cli += ' port %s peer-port %s ' % (port, peer_port) + + if mode: + cli += ' mode ' + mode + + if peer_switch: + cli += ' peer-switch ' + peer_switch + + if failover_action: + cli += ' failover-' + failover_action + '-L2 ' + + if lacp_mode: + cli += ' lacp-mode ' + lacp_mode + + if lacp_timeout: + cli += ' lacp-timeout ' + lacp_timeout + + if lacp_fallback: + cli += ' lacp-fallback ' + lacp_fallback + + if lacp_fallback_timeout: + cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vlan.py b/plugins/modules/network/netvisor/pn_vlan.py new file mode 100644 index 0000000000..2c6b4bad29 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vlan.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +""" PN CLI vlan-create/vlan-delete """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vlan +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to create/delete a VLAN. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vlan-create or vlan-delete command. + - VLANs are used to isolate network traffic at Layer 2.The VLAN identifiers + 0 and 4095 are reserved and cannot be used per the IEEE 802.1Q standard. + The range of configurable VLAN identifiers is 2 through 4092. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + default: 'local' + state: + description: + - State the action to perform. Use 'present' to create vlan and + 'absent' to delete vlan. + required: True + choices: ['present', 'absent'] + pn_vlanid: + description: + - Specify a VLAN identifier for the VLAN. This is a value between + 2 and 4092. + required: True + pn_scope: + description: + - Specify a scope for the VLAN. + - Required for vlan-create. + choices: ['fabric', 'local'] + pn_description: + description: + - Specify a description for the VLAN. + pn_stats: + description: + - Specify if you want to collect statistics for a VLAN. Statistic + collection is enabled by default. + type: bool + pn_ports: + description: + - Specifies the switch network data port number, list of ports, or range + of ports. Port numbers must ne in the range of 1 to 64. + pn_untagged_ports: + description: + - Specifies the ports that should have untagged packets mapped to the + VLAN. Untagged packets are packets that do not contain IEEE 802.1Q VLAN + tags. +''' + +EXAMPLES = """ +- name: create a VLAN + pn_vlan: + state: 'present' + pn_vlanid: 1854 + pn_scope: fabric + +- name: delete VLANs + pn_vlan: + state: 'absent' + pn_vlanid: 1854 +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the vlan command. + returned: always + type: list +stderr: + description: The set of error responses from the vlan command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +VLAN_EXISTS = None +MAX_VLAN_ID = 4092 +MIN_VLAN_ID = 2 + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the vlan-show command. + If a vlan with given vlan id exists, return VLAN_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VLAN_EXISTS + """ + vlanid = module.params['pn_vlanid'] + + show = cli + \ + ' vlan-show id %s format id,scope no-show-headers' % str(vlanid) + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global VLAN_EXISTS + if str(vlanid) in out: + VLAN_EXISTS = True + else: + VLAN_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vlan-create' + if state == 'absent': + command = 'vlan-delete' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent']), + pn_vlanid=dict(required=True, type='int'), + pn_scope=dict(type='str', choices=['fabric', 'local']), + pn_description=dict(type='str'), + pn_stats=dict(type='bool'), + pn_ports=dict(type='str'), + pn_untagged_ports=dict(type='str') + ), + required_if=( + ["state", "present", ["pn_vlanid", "pn_scope"]], + ["state", "absent", ["pn_vlanid"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + vlanid = module.params['pn_vlanid'] + scope = module.params['pn_scope'] + description = module.params['pn_description'] + stats = module.params['pn_stats'] + ports = module.params['pn_ports'] + untagged_ports = module.params['pn_untagged_ports'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if not MIN_VLAN_ID <= vlanid <= MAX_VLAN_ID: + module.exit_json( + msg="VLAN id must be between 2 and 4092", + changed=False + ) + + if command == 'vlan-create': + + check_cli(module, cli) + if VLAN_EXISTS is True: + module.exit_json( + skipped=True, + msg='VLAN with id %s already exists' % str(vlanid) + ) + + cli += ' %s id %s scope %s ' % (command, str(vlanid), scope) + + if description: + cli += ' description ' + description + + if stats is True: + cli += ' stats ' + if stats is False: + cli += ' no-stats ' + + if ports: + cli += ' ports ' + ports + + if untagged_ports: + cli += ' untagged-ports ' + untagged_ports + + if command == 'vlan-delete': + + check_cli(module, cli) + if VLAN_EXISTS is False: + module.exit_json( + skipped=True, + msg='VLAN with id %s does not exist' % str(vlanid) + ) + + cli += ' %s id %s ' % (command, str(vlanid)) + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter.py b/plugins/modules/network/netvisor/pn_vrouter.py new file mode 100644 index 0000000000..10e41fd5ad --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter.py @@ -0,0 +1,425 @@ +#!/usr/bin/python +""" PN CLI vrouter-create/vrouter-delete/vrouter-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to create/delete/modify a vrouter. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vrouter-create, vrouter-delete, vrouter-modify command. + - Each fabric, cluster, standalone switch, or virtual network (VNET) can + provide its tenants with a virtual router (vRouter) service that forwards + traffic between networks and implements Layer 3 protocols. + - C(vrouter-create) creates a new vRouter service. + - C(vrouter-delete) deletes a vRouter service. + - C(vrouter-modify) modifies a vRouter service. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the CLI on. + required: False + default: 'local' + state: + description: + - State the action to perform. Use 'present' to create vrouter, + 'absent' to delete vrouter and 'update' to modify vrouter. + required: True + choices: ['present', 'absent', 'update'] + pn_name: + description: + - Specify the name of the vRouter. + required: true + pn_vnet: + description: + - Specify the name of the VNET. + - Required for vrouter-create. + pn_service_type: + description: + - Specify if the vRouter is a dedicated or shared VNET service. + choices: ['dedicated', 'shared'] + pn_service_state: + description: + - Specify to enable or disable vRouter service. + choices: ['enable', 'disable'] + pn_router_type: + description: + - Specify if the vRouter uses software or hardware. + - Note that if you specify hardware as router type, you cannot assign IP + addresses using DHCP. You must specify a static IP address. + choices: ['hardware', 'software'] + pn_hw_vrrp_id: + description: + - Specifies the VRRP ID for a hardware vrouter. + pn_router_id: + description: + - Specify the vRouter IP address. + pn_bgp_as: + description: + - Specify the Autonomous System Number(ASN) if the vRouter runs Border + Gateway Protocol(BGP). + pn_bgp_redistribute: + description: + - Specify how BGP routes are redistributed. + choices: ['static', 'connected', 'rip', 'ospf'] + pn_bgp_max_paths: + description: + - Specify the maximum number of paths for BGP. This is a number between + 1 and 255 or 0 to unset. + pn_bgp_options: + description: + - Specify other BGP options as a whitespaces separated string within + single quotes ''. + pn_rip_redistribute: + description: + - Specify how RIP routes are redistributed. + choices: ['static', 'connected', 'ospf', 'bgp'] + pn_ospf_redistribute: + description: + - Specify how OSPF routes are redistributed. + choices: ['static', 'connected', 'bgp', 'rip'] + pn_ospf_options: + description: + - Specify other OSPF options as a whitespaces separated string within + single quotes ''. + pn_vrrp_track_port: + description: + - Specify list of ports and port ranges. +''' + +EXAMPLES = """ +- name: create vrouter + pn_vrouter: + state: 'present' + pn_name: 'ansible-vrouter' + pn_vnet: 'ansible-fab-global' + pn_router_id: 208.74.182.1 + +- name: delete vrouter + pn_vrouter: + state: 'absent' + pn_name: 'ansible-vrouter' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the vrouter command. + returned: always + type: list +stderr: + description: The set of error responses from the vrouter command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +VROUTER_EXISTS = None +VROUTER_NAME_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the vlan-show command. + A switch can have only one vRouter configuration. + If a vRouter already exists on the given switch, return VROUTER_EXISTS as + True else False. + If a vRouter with the given name exists(on a different switch), return + VROUTER_NAME_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, VROUTER_NAME_EXISTS + """ + name = module.params['pn_name'] + # Global flags + global VROUTER_EXISTS, VROUTER_NAME_EXISTS + + # Get the name of the local switch + location = cli + ' switch-setup-show format switch-name' + location = shlex.split(location) + out = module.run_command(location)[1] + location = out.split()[1] + + # Check for any vRouters on the switch + check_vrouter = cli + ' vrouter-show location %s ' % location + check_vrouter += 'format name no-show-headers' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + + if out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for any vRouters with the given name + show = cli + ' vrouter-show format name no-show-headers ' + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if name in out: + VROUTER_NAME_EXISTS = True + else: + VROUTER_NAME_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-create' + if state == 'absent': + command = 'vrouter-delete' + if state == 'update': + command = 'vrouter-modify' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_name=dict(required=True, type='str'), + pn_vnet=dict(type='str'), + pn_service_type=dict(type='str', choices=['dedicated', 'shared']), + pn_service_state=dict(type='str', choices=['enable', 'disable']), + pn_router_type=dict(type='str', choices=['hardware', 'software']), + pn_hw_vrrp_id=dict(type='int'), + pn_router_id=dict(type='str'), + pn_bgp_as=dict(type='int'), + pn_bgp_redistribute=dict(type='str', choices=['static', 'connected', + 'rip', 'ospf']), + pn_bgp_max_paths=dict(type='int'), + pn_bgp_options=dict(type='str'), + pn_rip_redistribute=dict(type='str', choices=['static', 'connected', + 'bgp', 'ospf']), + pn_ospf_redistribute=dict(type='str', choices=['static', 'connected', + 'bgp', 'rip']), + pn_ospf_options=dict(type='str'), + pn_vrrp_track_port=dict(type='str') + ), + required_if=( + ["state", "present", ["pn_name", "pn_vnet"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + name = module.params['pn_name'] + vnet = module.params['pn_vnet'] + service_type = module.params['pn_service_type'] + service_state = module.params['pn_service_state'] + router_type = module.params['pn_router_type'] + hw_vrrp_id = module.params['pn_hw_vrrp_id'] + router_id = module.params['pn_router_id'] + bgp_as = module.params['pn_bgp_as'] + bgp_redistribute = module.params['pn_bgp_redistribute'] + bgp_max_paths = module.params['pn_bgp_max_paths'] + bgp_options = module.params['pn_bgp_options'] + rip_redistribute = module.params['pn_rip_redistribute'] + ospf_redistribute = module.params['pn_ospf_redistribute'] + ospf_options = module.params['pn_ospf_options'] + vrrp_track_port = module.params['pn_vrrp_track_port'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'vrouter-delete': + check_cli(module, cli) + if VROUTER_NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + else: + + if command == 'vrouter-create': + check_cli(module, cli) + if VROUTER_EXISTS is True: + module.exit_json( + skipped=True, + msg='Maximum number of vRouters has been reached on this ' + 'switch' + ) + if VROUTER_NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='vRouter with name %s already exists' % name + ) + cli += ' %s name %s ' % (command, name) + + if vnet: + cli += ' vnet ' + vnet + + if service_type: + cli += ' %s-vnet-service ' % service_type + + if service_state: + cli += ' ' + service_state + + if router_type: + cli += ' router-type ' + router_type + + if hw_vrrp_id: + cli += ' hw-vrrp-id ' + str(hw_vrrp_id) + + if router_id: + cli += ' router-id ' + router_id + + if bgp_as: + cli += ' bgp-as ' + str(bgp_as) + + if bgp_redistribute: + cli += ' bgp-redistribute ' + bgp_redistribute + + if bgp_max_paths: + cli += ' bgp-max-paths ' + str(bgp_max_paths) + + if bgp_options: + cli += ' %s ' % bgp_options + + if rip_redistribute: + cli += ' rip-redistribute ' + rip_redistribute + + if ospf_redistribute: + cli += ' ospf-redistribute ' + ospf_redistribute + + if ospf_options: + cli += ' %s ' % ospf_options + + if vrrp_track_port: + cli += ' vrrp-track-port ' + vrrp_track_port + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_bgp.py b/plugins/modules/network/netvisor/pn_vrouter_bgp.py new file mode 100644 index 0000000000..40928ad43e --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_bgp.py @@ -0,0 +1,472 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_bgp +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/modify/remove vrouter-bgp +description: + - This module can be used to add Border Gateway Protocol neighbor to a vRouter + modify Border Gateway Protocol neighbor to a vRouter and remove Border Gateway Protocol + neighbor from a vRouter. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - vrouter-bgp configuration command. + required: false + type: str + choices: ['present', 'absent', 'update'] + default: 'present' + pn_neighbor: + description: + - IP address for BGP neighbor. + required: true + type: str + pn_vrouter_name: + description: + - name of service config. + required: true + type: str + pn_send_community: + description: + - send any community attribute to neighbor. + required: false + type: bool + pn_weight: + description: + - default weight value between 0 and 65535 for the neighbor's routes. + required: false + pn_multi_protocol: + description: + - Multi-protocol features. + required: false + choices: ['ipv4-unicast', 'ipv6-unicast'] + pn_prefix_list_in: + description: + - prefixes used for filtering. + required: false + type: str + pn_route_reflector_client: + description: + - set as route reflector client. + required: false + type: bool + pn_default_originate: + description: + - announce default routes to the neighbor or not. + required: false + type: bool + pn_neighbor_holdtime: + description: + - BGP Holdtime (seconds). + required: false + type: str + pn_connect_retry_interval: + description: + - BGP Connect retry interval (seconds). + required: false + type: str + pn_advertisement_interval: + description: + - Minimum interval between sending BGP routing updates. + required: false + type: str + pn_route_map_out: + description: + - route map out for nbr. + required: false + type: str + pn_update_source: + description: + - IP address of BGP packets required for peering over loopback interface. + required: false + type: str + pn_bfd: + description: + - BFD protocol support for fault detection. + required: false + type: bool + default: False + pn_next_hop_self: + description: + - BGP next hop is self or not. + required: false + type: bool + pn_allowas_in: + description: + - Allow/reject routes with local AS in AS_PATH. + required: false + type: bool + pn_neighbor_keepalive_interval: + description: + - BGP Keepalive interval (seconds). + required: false + type: str + pn_max_prefix: + description: + - maximum number of prefixes. + required: false + type: str + pn_bfd_multihop: + description: + - always use BFD multi-hop port for fault detection. + required: false + type: bool + pn_interface: + description: + - Interface to reach the neighbor. + required: false + type: str + pn_password: + description: + - password for MD5 BGP. + required: false + type: str + pn_route_map_in: + description: + - route map in for nbr. + required: false + type: str + pn_soft_reconfig_inbound: + description: + - soft reset to reconfigure inbound traffic. + required: false + type: bool + pn_override_capability: + description: + - override capability. + required: false + type: bool + pn_max_prefix_warn_only: + description: + - warn if the maximum number of prefixes is exceeded. + required: false + type: bool + pn_ebgp_multihop: + description: + - value for external BGP from 1 to 255. + required: false + type: str + pn_remote_as: + description: + - BGP remote AS from 1 to 4294967295. + required: false + type: str + pn_prefix_list_out: + description: + - prefixes used for filtering outgoing packets. + required: false + type: str + pn_no_route_map_out: + description: + - Remove egress route-map from BGP neighbor. + required: false + type: str + pn_no_route_map_in: + description: + - Remove ingress route-map from BGP neighbor. + required: false + type: str +''' + +EXAMPLES = """ +- name: "Add BGP to vRouter" + pn_vrouter_bgp: + state: 'present' + pn_vrouter_name: 'sw01-vrouter' + pn_neighbor: '105.104.104.1' + pn_remote_as: 65000 + pn_bfd: true + +- name: "Remove BGP to vRouter" + pn_vrouter_bgp: + state: 'absent' + pn_vrouter_name: 'sw01-vrouter' + pn_neighbor: '105.104.104.1' + +- name: "Modify BGP to vRouter" + pn_vrouter_bgp: + state: 'update' + pn_vrouter_name: 'sw01-vrouter' + pn_neighbor: '105.104.104.1' + pn_remote_as: 65000 + pn_bfd: false + pn_allowas_in: true +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-bgp command. + returned: always + type: list +stderr: + description: set of error responses from the vrouter-bgp command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def is_valid(module, param_name, param_val, min_val, max_val): + if int(param_val) < min_val or int(param_val) > max_val: + module.fail_json( + failed=True, + msg='Valid %s range is %s to %s' % (param_name, min_val, max_val) + ) + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-bgp-show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If the given neighbor exists on the given vRouter, return NEIGHBOR_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + neighbor = module.params['pn_neighbor'] + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers' + out = run_commands(module, check_vrouter)[1] + if out: + out = out.split() + + VROUTER_EXISTS = True if vrouter_name in out else False + + if neighbor: + # Check for BGP neighbor + show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name + show += 'format neighbor no-show-headers' + out = run_commands(module, show)[1] + + if out and neighbor in out.split(): + NEIGHBOR_EXISTS = True + else: + NEIGHBOR_EXISTS = False + + return VROUTER_EXISTS, NEIGHBOR_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vrouter-bgp-add', + absent='vrouter-bgp-remove', + update='vrouter-bgp-modify' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='present'), + pn_neighbor=dict(required=True, type='str'), + pn_vrouter_name=dict(required=True, type='str'), + pn_send_community=dict(required=False, type='bool'), + pn_weight=dict(required=False, type='str'), + pn_multi_protocol=dict(required=False, type='str', choices=['ipv4-unicast', 'ipv6-unicast']), + pn_prefix_list_in=dict(required=False, type='str'), + pn_route_reflector_client=dict(required=False, type='bool'), + pn_default_originate=dict(required=False, type='bool'), + pn_neighbor_holdtime=dict(required=False, type='str'), + pn_connect_retry_interval=dict(required=False, type='str'), + pn_advertisement_interval=dict(required=False, type='str'), + pn_route_map_out=dict(required=False, type='str'), + pn_update_source=dict(required=False, type='str'), + pn_bfd=dict(required=False, type='bool', default=False), + pn_next_hop_self=dict(required=False, type='bool'), + pn_allowas_in=dict(required=False, type='bool'), + pn_neighbor_keepalive_interval=dict(required=False, type='str'), + pn_max_prefix=dict(required=False, type='str'), + pn_bfd_multihop=dict(required=False, type='bool'), + pn_interface=dict(required=False, type='str'), + pn_password=dict(required=False, type='str', no_log=True), + pn_route_map_in=dict(required=False, type='str'), + pn_soft_reconfig_inbound=dict(required=False, type='bool'), + pn_override_capability=dict(required=False, type='bool'), + pn_max_prefix_warn_only=dict(required=False, type='bool'), + pn_ebgp_multihop=dict(required=False, type='str'), + pn_remote_as=dict(required=False, type='str'), + pn_prefix_list_out=dict(required=False, type='str'), + pn_no_route_map_out=dict(required=False, type='str'), + pn_no_route_map_in=dict(required=False, type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]], + ["state", "absent", ["pn_vrouter_name", "pn_neighbor"]], + ["state", "update", ["pn_vrouter_name", "pn_neighbor"]] + ), + required_one_of=[['pn_send_community', 'pn_weight', 'pn_multi_protocol', + 'pn_prefix_list_in', 'pn_route_reflector_client', 'pn_default_originate', + 'pn_neighbor_holdtime', 'pn_connect_retry_interval', 'pn_advertisement_interval', + 'pn_route_map_out', 'pn_update_source', 'pn_bfd', + 'pn_next_hop_self', 'pn_allowas_in', 'pn_neighbor_keepalive_interval', + 'pn_max_prefix', 'pn_bfd_multihop', 'pn_interface', + 'pn_password', 'pn_route_map_in', 'pn_soft_reconfig_inbound', + 'pn_override_capability', 'pn_max_prefix_warn_only', 'pn_ebgp_multihop', + 'pn_remote_as', 'pn_prefix_list_out', 'pn_no_route_map_out', + 'pn_no_route_map_in']], + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + neighbor = module.params['pn_neighbor'] + vrouter_name = module.params['pn_vrouter_name'] + send_community = module.params['pn_send_community'] + weight = module.params['pn_weight'] + multi_protocol = module.params['pn_multi_protocol'] + prefix_list_in = module.params['pn_prefix_list_in'] + route_reflector_client = module.params['pn_route_reflector_client'] + default_originate = module.params['pn_default_originate'] + neighbor_holdtime = module.params['pn_neighbor_holdtime'] + connect_retry_interval = module.params['pn_connect_retry_interval'] + advertisement_interval = module.params['pn_advertisement_interval'] + route_map_out = module.params['pn_route_map_out'] + update_source = module.params['pn_update_source'] + bfd = module.params['pn_bfd'] + next_hop_self = module.params['pn_next_hop_self'] + allowas_in = module.params['pn_allowas_in'] + neighbor_keepalive_interval = module.params['pn_neighbor_keepalive_interval'] + max_prefix = module.params['pn_max_prefix'] + bfd_multihop = module.params['pn_bfd_multihop'] + interface = module.params['pn_interface'] + password = module.params['pn_password'] + route_map_in = module.params['pn_route_map_in'] + soft_reconfig_inbound = module.params['pn_soft_reconfig_inbound'] + override_capability = module.params['pn_override_capability'] + max_prefix_warn_only = module.params['pn_max_prefix_warn_only'] + ebgp_multihop = module.params['pn_ebgp_multihop'] + remote_as = module.params['pn_remote_as'] + prefix_list_out = module.params['pn_prefix_list_out'] + no_route_map_out = module.params['pn_no_route_map_out'] + no_route_map_in = module.params['pn_no_route_map_in'] + + command = state_map[state] + + if weight and weight != 'none': + if int(weight) < 1 or int(weight) > 65535: + module.fail_json( + failed=True, + msg='Valid weight range is 1 to 65535' + ) + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + VROUTER_EXISTS, NEIGHBOR_EXISTS = check_cli(module, cli) + + if state: + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + + if command == 'vrouter-bgp-remove' or command == 'vrouter-bgp-modify': + if NEIGHBOR_EXISTS is False: + module.exit_json( + skipped=True, + msg='BGP neighbor with IP %s does not exist on %s' % (neighbor, vrouter_name) + ) + + if command == 'vrouter-bgp-add': + if NEIGHBOR_EXISTS is True: + module.exit_json( + skipped=True, + msg='BGP neighbor with IP %s already exists on %s' % (neighbor, vrouter_name) + ) + + cli += ' %s vrouter-name %s neighbor %s ' % (command, vrouter_name, neighbor) + + if command == 'vrouter-bgp-add' or command == 'vrouter-bgp-modify': + if weight: + cli += ' weight ' + weight + if multi_protocol: + cli += ' multi-protocol ' + multi_protocol + if prefix_list_in: + cli += ' prefix-list-in ' + prefix_list_in + if neighbor_holdtime: + is_valid(module, 'neighbor holdtime', neighbor_holdtime, '0', '65535') + cli += ' neighbor-holdtime ' + neighbor_holdtime + if connect_retry_interval: + is_valid(module, 'connect retry interval', connect_retry_interval, '0', '65535') + cli += ' connect-retry-interval ' + connect_retry_interval + if advertisement_interval: + is_valid(module, 'advertisement interval', advertisement_interval, '0', '65535') + cli += ' advertisement-interval ' + advertisement_interval + if route_map_out: + cli += ' route-map-out ' + route_map_out + if update_source: + cli += ' update-source ' + update_source + if neighbor_keepalive_interval: + is_valid(module, 'neighbor keepalive interval', neighbor_keepalive_interval, '0', '65535') + cli += ' neighbor-keepalive-interval ' + neighbor_keepalive_interval + if max_prefix: + cli += ' max-prefix ' + max_prefix + if interface: + cli += ' interface ' + interface + if password: + cli += ' password ' + password + if route_map_in: + cli += ' route-map-in ' + route_map_in + if ebgp_multihop: + is_valid(module, 'ebgp_multihop', ebgp_multihop, '1', '255') + cli += ' ebgp-multihop ' + ebgp_multihop + if remote_as: + cli += ' remote-as ' + remote_as + if prefix_list_out: + cli += ' prefix-list-out ' + prefix_list_out + cli += booleanArgs(send_community, 'send-community', 'no-send-community') + cli += booleanArgs(route_reflector_client, 'route-reflector-client', 'no-route-reflector-client') + cli += booleanArgs(default_originate, 'default-originate', 'no-default-originate') + cli += booleanArgs(bfd, 'bfd', 'no-bfd') + cli += booleanArgs(next_hop_self, 'next-hop-self', 'no-next-hop-self') + cli += booleanArgs(allowas_in, 'allowas-in', 'no-allowas-in') + cli += booleanArgs(bfd_multihop, 'bfd-multihop', 'no-bfd-multihop') + cli += booleanArgs(soft_reconfig_inbound, 'soft-reconfig-inbound', 'no-soft-reconfig-inbound') + cli += booleanArgs(override_capability, 'override-capability', 'no-override-capability') + cli += booleanArgs(max_prefix_warn_only, 'max-prefix-warn-only', 'no-max-prefix-warn-only') + + if command == 'vrouter-bgp-modify': + if no_route_map_out: + cli += ' no-route-map-out ' + no_route_map_out + if no_route_map_in: + cli += ' no-route-map-in ' + no_route_map_in + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_bgp_network.py b/plugins/modules/network/netvisor/pn_vrouter_bgp_network.py new file mode 100644 index 0000000000..035c8ff271 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_bgp_network.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_bgp_network +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove vrouter-bgp-network +description: + - This module can be used to add Border Gateway Protocol network to a vRouter + and remove Border Gateway Protocol network from a vRouter. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to add bgp network and + C(absent) to remove bgp network. + required: true + type: str + choices: ['present', 'absent'] + pn_netmask: + description: + - BGP network mask. + required: false + type: str + pn_network: + description: + - IP address for BGP network. + required: false + type: str + pn_vrouter_name: + description: + - name of service config. + required: false + type: str +''' + +EXAMPLES = """ +- name: Add network to bgp + pn_vrouter_bgp_network: + pn_cliswitch: "sw01" + state: "present" + pn_vrouter_name: "foo-vrouter" + pn_network: '10.10.10.10' + pn_netmask: '31' + +- name: Remove network from bgp + pn_vrouter_bgp_network: + pn_cliswitch: "sw01" + state: "absent" + pn_vrouter_name: "foo-vrouter" + pn_network: '10.10.10.10' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-bgp-network command. + returned: always + type: list +stderr: + description: set of error responses from the vrouter-bgp-network command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for pim ssm config using the vrouter-show command. + If a user already exists on the given switch, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_vrouter_name'] + network = module.params['pn_network'] + + show = cli + cli += ' vrouter-show name %s format name no-show-headers' % name + rc, out, err = run_commands(module, cli) + VROUTER_EXISTS = '' if out else None + + cli = show + cli += ' vrouter-bgp-network-show vrouter-name %s network %s format network no-show-headers' % (name, network) + out = run_commands(module, cli)[1] + out = out.split() + NETWORK_EXISTS = True if network in out[-1] else False + + return NETWORK_EXISTS, VROUTER_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vrouter-bgp-network-add', + absent='vrouter-bgp-network-remove' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_netmask=dict(required=False, type='str'), + pn_network=dict(required=False, type='str'), + pn_vrouter_name=dict(required=False, type='str'), + ), + required_if=( + ['state', 'present', ['pn_vrouter_name', 'pn_netmask', 'pn_network']], + ['state', 'absent', ['pn_vrouter_name', 'pn_network']], + ), + ) + + # Accessing the arguments + state = module.params['state'] + cliswitch = module.params['pn_cliswitch'] + netmask = module.params['pn_netmask'] + network = module.params['pn_network'] + vrouter_name = module.params['pn_vrouter_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NETWORK_EXISTS, VROUTER_EXISTS = check_cli(module, cli) + + if VROUTER_EXISTS is None: + module.fail_json( + failed=True, + msg='vRouter %s does not exists' % vrouter_name + ) + + if command == 'vrouter-bgp-network-add': + if NETWORK_EXISTS is True: + module.exit_json( + skipped=True, + msg='Network %s already added to bgp' % network + ) + + if command == 'vrouter-bgp-network-remove': + if NETWORK_EXISTS is False: + module.exit_json( + skipped=True, + msg='Network %s does not exists' % network + ) + + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + + if netmask: + cli += ' netmask ' + netmask + if network: + cli += ' network ' + network + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_interface_ip.py b/plugins/modules/network/netvisor/pn_vrouter_interface_ip.py new file mode 100644 index 0000000000..2fe4929549 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_interface_ip.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_interface_ip +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove vrouter-interface-ip +description: + - This module can be used to add an IP address on interface from a vRouter + or remove an IP address on interface from a vRouter. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to addvrouter-interface-ip + and C(absent) to remove vrouter-interface-ip. + required: true + type: str + choices: ['present', 'absent'] + pn_bd: + description: + - interface Bridge Domain. + required: false + type: str + pn_netmask: + description: + - netmask. + required: false + type: str + pn_vnet: + description: + - interface VLAN VNET. + required: false + type: str + pn_ip: + description: + - IP address. + required: false + type: str + pn_nic: + description: + - virtual NIC assigned to interface. + required: false + type: str + pn_vrouter_name: + description: + - name of service config. + required: false + type: str +''' + +EXAMPLES = """ +- name: Add vrouter interface to nic + pn_vrouter_interface_ip: + state: "present" + pn_cliswitch: "sw01" + pn_vrouter_name: "foo-vrouter" + pn_ip: "2620:0:1651:1::30" + pn_netmask: "127" + pn_nic: "eth0.4092" + +- name: Remove vrouter interface to nic + pn_vrouter_interface_ip: + state: "absent" + pn_cliswitch: "sw01" + pn_vrouter_name: "foo-vrouter" + pn_ip: "2620:0:1651:1::30" + pn_nic: "eth0.4092" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-interface-ip command. + returned: always + type: list +stderr: + description: set of error responses from the vrouter-interface-ip command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-interface-show + command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + + If an interface with the given ip exists on the given vRouter, + return INTERFACE_EXISTS as True else False. This is required for + vrouter-interface-add. + + If nic_str exists on the given vRouter, return NIC_EXISTS as True else + False. This is required for vrouter-interface-remove. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Booleans: VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_ip'] + nic_str = module.params['pn_nic'] + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers' + out = run_commands(module, check_vrouter)[1] + if out: + out = out.split() + + VROUTER_EXISTS = True if vrouter_name in out else False + + if interface_ip: + # Check for interface and VRRP and fetch nic for VRRP + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += 'ip2 %s format ip2,nic no-show-headers' % interface_ip + out = run_commands(module, show)[1] + + if out and interface_ip in out.split(' ')[-2]: + INTERFACE_EXISTS = True + else: + INTERFACE_EXISTS = False + + if nic_str: + # Check for nic + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += 'format nic no-show-headers' + out = run_commands(module, show)[1] + + if out: + out = out.split() + + NIC_EXISTS = True if nic_str in out else False + + return VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vrouter-interface-ip-add', + absent='vrouter-interface-ip-remove' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_bd=dict(required=False, type='str'), + pn_netmask=dict(required=False, type='str'), + pn_vnet=dict(required=False, type='str'), + pn_ip=dict(required=False, type='str'), + pn_nic=dict(required=False, type='str'), + pn_vrouter_name=dict(required=False, type='str'), + ), + required_if=( + ["state", "present", ["pn_vrouter_name", "pn_nic", "pn_ip", "pn_netmask"]], + ["state", "absent", ["pn_vrouter_name", "pn_nic", "pn_ip"]] + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + bd = module.params['pn_bd'] + netmask = module.params['pn_netmask'] + vnet = module.params['pn_vnet'] + ip = module.params['pn_ip'] + nic = module.params['pn_nic'] + vrouter_name = module.params['pn_vrouter_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS = check_cli(module, cli) + + if VROUTER_EXISTS is False: + module.fail_json( + failed=True, + msg='vRouter %s does not exist' % vrouter_name + ) + + if NIC_EXISTS is False: + module.fail_json( + failed=True, + msg='vRouter with nic %s does not exist' % nic + ) + + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + + if command == 'vrouter-interface-ip-add': + if INTERFACE_EXISTS is True: + module.exit_json( + skipped=True, + msg='vRouter with interface ip %s exist' % ip + ) + cli += ' nic %s ip %s ' % (nic, ip) + + if bd: + cli += ' bd ' + bd + if netmask: + cli += ' netmask ' + netmask + if vnet: + cli += ' vnet ' + vnet + + if command == 'vrouter-interface-ip-remove': + if INTERFACE_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter with interface ip %s does not exist' % ip + ) + if nic: + cli += ' nic %s ' % nic + if ip: + cli += ' ip %s ' % ip.split('/')[0] + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_loopback_interface.py b/plugins/modules/network/netvisor/pn_vrouter_loopback_interface.py new file mode 100644 index 0000000000..f9c2e02448 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_loopback_interface.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_loopback_interface +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove vrouter-loopback-interface +description: + - This module can be used to add loopback interface to a vRouter or + remove loopback interface from a vRouter. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to add vrouter-loopback-interface + and C(absent) to remove vrouter-loopback-interface. + required: false + type: str + choices: ['present', 'absent'] + default: 'present' + pn_ip: + description: + - loopback IP address. + required: true + type: str + pn_index: + description: + - loopback index from 1 to 255. + required: false + type: str + pn_vrouter_name: + description: + - name of service config. + required: true + type: str +''' + +EXAMPLES = """ +- name: Add vrouter loopback interface + pn_vrouter_loopback_interface: + state: "present" + pn_cliswitch: "sw01" + pn_vrouter_name: "sw01-vrouter" + pn_ip: "192.168.10.1" + +- name: Remove vrouter loopback interface + pn_vrouter_loopback_interface: + state: "absent" + pn_cliswitch: "sw01" + pn_vrouter_name: "sw01-vrouter" + pn_ip: "192.168.10.1" + pn_index: "2" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-loopback-interface command. + returned: always + type: list +stderr: + description: set of error response from the vrouter-loopback-interface + command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-interface-show + command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Booleans: VROUTER_EXISTS, INTERFACE_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_ip'] + + # Check for vRouter + check_vrouter = 'vrouter-show format name no-show-headers' + out = run_commands(module, check_vrouter)[1] + if out: + out = out.split() + + VROUTER_EXISTS = True if vrouter_name in out else False + + if interface_ip: + # Check for interface and VRRP and fetch nic for VRRP + show = cli + ' vrouter-loopback-interface-show ' + show += 'vrouter-name %s ' % vrouter_name + show += 'format ip no-show-headers' + out = run_commands(module, show)[1] + + if out and interface_ip in out.split(): + INTERFACE_EXISTS = True + else: + INTERFACE_EXISTS = False + + return VROUTER_EXISTS, INTERFACE_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vrouter-loopback-interface-add', + absent='vrouter-loopback-interface-remove' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', + choices=state_map.keys(), default='present'), + pn_ip=dict(required=True, type='str'), + pn_index=dict(required=False, type='str'), + pn_vrouter_name=dict(required=True, type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ["pn_vrouter_name", "pn_ip"]], + ["state", "absent", ["pn_vrouter_name", "pn_ip", "pn_index"]] + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + ip = module.params['pn_ip'] + index = module.params['pn_index'] + vrouter_name = module.params['pn_vrouter_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + VROUTER_EXISTS, INTERFACE_EXISTS = check_cli(module, cli) + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + + if index and (int(index) < 1 or int(index) > 255): + module.fail_json( + failed=True, + msg='index should be in range 1 to 255' + ) + + if index and state == 'present': + show = 'vrouter-loopback-interface-show format index parsable-delim ,' + out = run_commands(module, show)[1] + if out: + out = out.split() + for res in out: + res = res.strip().split(',') + if index in res: + module.fail_json( + failed=True, + msg='index with value %s exist' % index + ) + + if command == 'vrouter-loopback-interface-add': + if VROUTER_EXISTS is False: + module.fail_json( + failed=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if INTERFACE_EXISTS is True: + module.exit_json( + skipped=True, + msg='vRouter with loopback ip %s exist' % ip + ) + if ip: + cli += ' ip ' + ip + if index: + cli += ' index ' + index + + if command == 'vrouter-loopback-interface-remove': + if VROUTER_EXISTS is False: + module.fail_json( + failed=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if INTERFACE_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter with loopback ip %s doesnt exist' % ip + ) + + if index: + cli += ' index ' + index + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_ospf.py b/plugins/modules/network/netvisor/pn_vrouter_ospf.py new file mode 100644 index 0000000000..5da19f97f4 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_ospf.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_ospf +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove vrouter-ospf +description: + - This module can be used to add OSPF protocol to vRouter + and remove OSPF protocol from a vRouter +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - vrouter-ospf configuration command. + required: false + type: str + choices: ['present', 'absent'] + default: 'present' + pn_netmask: + description: + - OSPF network IP address netmask. + required: false + type: str + pn_ospf_area: + description: + - stub area number for the configuration. + required: false + type: str + pn_network: + description: + - OSPF network IP address. + required: true + type: str + pn_vrouter_name: + description: + - name of service config. + required: true + type: str +''' + +EXAMPLES = """ +- name: Add OSPF to vRouter + pn_vrouter_ospf: + state: 'present' + pn_vrouter_name: 'sw01-vrouter' + pn_network: '105.104.104.1' + pn_netmask: '24' + pn_ospf_area: '0' +- name: "Remove OSPF to vRouter" + pn_vrouter_ospf: + state: 'absent' + pn_vrouter_name: 'sw01-vrouter' + pn_network: '105.104.104.1' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-ospf command. + returned: always + type: list +stderr: + description: set of error responses from the vrouter-ospf command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If an OSPF network with the given ip exists on the given vRouter, + return NETWORK_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :return Booleans: VROUTER_EXISTS, NETWORK_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + network = module.params['pn_network'] + show_cli = pn_cli(module) + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + out = run_commands(module, check_vrouter)[1] + if out: + out = out.split() + + VROUTER_EXISTS = True if vrouter_name in out else False + + # Check for OSPF networks + check_network = cli + ' vrouter-ospf-show vrouter-name %s ' % vrouter_name + check_network += 'format network no-show-headers' + out = run_commands(module, check_network)[1] + + if out and network in out: + NETWORK_EXISTS = True + else: + NETWORK_EXISTS = False + + return VROUTER_EXISTS, NETWORK_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vrouter-ospf-add', + absent='vrouter-ospf-remove' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='present'), + pn_netmask=dict(required=False, type='str'), + pn_ospf_area=dict(required=False, type='str'), + pn_network=dict(required=True, type='str'), + pn_vrouter_name=dict(required=True, type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ['pn_vrouter_name', 'pn_network', 'pn_netmask', 'pn_ospf_area']], + ["state", "absent", ['pn_vrouter_name', 'pn_network']], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + netmask = module.params['pn_netmask'] + ospf_area = module.params['pn_ospf_area'] + network = module.params['pn_network'] + vrouter_name = module.params['pn_vrouter_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + VROUTER_EXISTS, NETWORK_EXISTS = check_cli(module, cli) + + if state: + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + + if command == 'vrouter-ospf-remove': + if NETWORK_EXISTS is False: + module.exit_json( + skipped=True, + msg='OSPF with network %s dose not exists' % network + ) + cli += ' %s vrouter-name %s network %s' % (command, vrouter_name, network) + + if command == 'vrouter-ospf-add': + if NETWORK_EXISTS is True: + module.exit_json( + skipped=True, + msg='OSPF with network %s already exists' % network + ) + if netmask: + cli += ' netmask ' + netmask + if ospf_area: + cli += ' ospf-area ' + ospf_area + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_ospf6.py b/plugins/modules/network/netvisor/pn_vrouter_ospf6.py new file mode 100644 index 0000000000..3819ed8059 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_ospf6.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_ospf6 +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove vrouter-ospf6 +description: + - This module can be used to add interface ip to OSPF6 protocol + or remove interface ip from OSPF6 protocol on vRouter. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(present) to add vrouter-ospf6 and + C(absent) to remove interface from vrouter-ospf6. + required: true + type: str + choices: ['present', 'absent'] + pn_ospf6_area: + description: + - area id for this interface in IPv4 address format. + required: false + type: str + pn_nic: + description: + - OSPF6 control for this interface. + required: false + type: str + pn_vrouter_name: + description: + - name of service config. + required: false + type: str +''' + +EXAMPLES = """ +- name: Add vrouter interface nic to ospf6 + pn_vrouter_ospf6: + pn_cliswitch: "sw01" + state: "present" + pn_vrouter_name: "foo-vrouter" + pn_nic: "eth0.4092" + pn_ospf6_area: "0.0.0.0" + +- name: Remove vrouter interface nic to ospf6 + pn_vrouter_ospf6: + pn_cliswitch: "sw01" + state: "absent" + pn_vrouter_name: "foo-vrouter" + pn_nic: "eth0.4092" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-ospf6 command. + returned: always + type: list +stderr: + description: set of error responses from the vrouter-ospf6 command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-interface-show + command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + + If nic_str exists on the given vRouter, return NIC_EXISTS as True else + False. This is required for vrouter-ospf6-remove. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Booleans: VROUTER_EXISTS, NIC_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + nic_str = module.params['pn_nic'] + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + out = run_commands(module, check_vrouter)[1] + if out: + out = out.split() + + VROUTER_EXISTS = True if vrouter_name in out else False + + if nic_str: + # Check for nic + show = cli + ' vrouter-ospf6-show vrouter-name %s format nic no-show-headers' % vrouter_name + out = run_commands(module, show)[1] + + if out: + out.split() + + NIC_EXISTS = True if nic_str in out else False + + return VROUTER_EXISTS, NIC_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vrouter-ospf6-add', + absent='vrouter-ospf6-remove' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_ospf6_area=dict(required=False, type='str'), + pn_nic=dict(required=False, type='str'), + pn_vrouter_name=dict(required=False, type='str'), + ), + required_if=( + ["state", "present", ["pn_vrouter_name", "pn_nic", + "pn_ospf6_area"]], + ["state", "absent", ["pn_vrouter_name", "pn_nic"]] + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + ospf6_area = module.params['pn_ospf6_area'] + nic = module.params['pn_nic'] + vrouter_name = module.params['pn_vrouter_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + VROUTER_EXISTS, NIC_EXISTS = check_cli(module, cli) + + if VROUTER_EXISTS is False: + module.fail_json( + failed=True, + msg='vRouter %s does not exist' % vrouter_name + ) + + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + + if command == 'vrouter-ospf6-add': + if NIC_EXISTS is True: + module.exit_json( + skipped=True, + msg='OSPF6 with nic %s already exist' % nic + ) + if nic: + cli += ' nic %s' % nic + if ospf6_area: + cli += ' ospf6-area %s ' % ospf6_area + + if command == 'vrouter-ospf6-remove': + if NIC_EXISTS is False: + module.exit_json( + skipped=True, + msg='OSPF6 with nic %s does not exist' % nic + ) + if nic: + cli += ' nic %s' % nic + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_packet_relay.py b/plugins/modules/network/netvisor/pn_vrouter_packet_relay.py new file mode 100644 index 0000000000..e59ca8bb9d --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_packet_relay.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_packet_relay +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to add/remove vrouter-packet-relay +description: + - This module can be used to add packet relay configuration for DHCP on vrouter + and remove packet relay configuration for DHCP on vrouter. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - vrouter-packet-relay configuration command. + required: false + choices: ['present', 'absent'] + type: str + default: 'present' + pn_forward_ip: + description: + - forwarding IP address. + required: true + type: str + pn_nic: + description: + - NIC. + required: true + type: str + pn_forward_proto: + description: + - protocol type to forward packets. + required: false + type: str + choices: ['dhcp'] + default: 'dhcp' + pn_vrouter_name: + description: + - name of service config. + required: true + type: str +''' + +EXAMPLES = """ +- name: vRouter packet relay add + pn_vrouter_packet_relay: + pn_cliswitch: "sw01" + pn_forward_ip: "192.168.10.1" + pn_nic: "eth0.4092" + pn_vrouter_name: "sw01-vrouter" + +- name: vRouter packet relay remove + pn_vrouter_packet_relay: + pn_cliswitch: "sw01" + state: "absent" + pn_forward_ip: "192.168.10.1" + pn_nic: "eth0.4092" + pn_vrouter_name: "sw01-vrouter" +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-packet-relay command. + returned: always + type: list +stderr: + description: set of error responses from the vrouter-packet-relay command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-interface-show + command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + + If nic_str exists on the given vRouter, return NIC_EXISTS as True else + False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Booleans: VROUTER_EXISTS, NIC_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + nic_str = module.params['pn_nic'] + + # Check for vRouter + check_vrouter = 'vrouter-show format name no-show-headers' + out = run_commands(module, check_vrouter)[1] + + if out: + out = out.split() + + VROUTER_EXISTS = True if vrouter_name in out else False + + if nic_str: + # Check for nic + show = 'vrouter-interface-show vrouter-name %s format nic no-show-headers' % vrouter_name + out = run_commands(module, show)[1] + if out: + out = out.split() + + NIC_EXISTS = True if nic_str in out else False + + return VROUTER_EXISTS, NIC_EXISTS + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vrouter-packet-relay-add', + absent='vrouter-packet-relay-remove' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='present'), + pn_forward_ip=dict(required=True, type='str'), + pn_nic=dict(required=True, type='str'), + pn_forward_proto=dict(required=False, type='str', choices=['dhcp'], default='dhcp'), + pn_vrouter_name=dict(required=True, type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ["pn_vrouter_name", "pn_forward_ip", "pn_nic", "pn_forward_proto"]], + ["state", "absent", ["pn_vrouter_name", "pn_forward_ip", "pn_nic", "pn_forward_proto"]], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + forward_ip = module.params['pn_forward_ip'] + nic = module.params['pn_nic'] + forward_proto = module.params['pn_forward_proto'] + vrouter_name = module.params['pn_vrouter_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + VROUTER_EXISTS, NIC_EXISTS = check_cli(module, cli) + + if VROUTER_EXISTS is False: + module.fail_json( + failed=True, + msg='vRouter %s does not exist' % vrouter_name + ) + + if NIC_EXISTS is False: + module.fail_json( + failed=True, + msg='vRouter with nic %s does not exist' % nic + ) + + if command == 'vrouter-packet-relay-add' or command == 'vrouter-packet-relay-remove': + cli += ' %s' % command + cli += ' vrouter-name %s nic %s' % (vrouter_name, nic) + cli += ' forward-proto %s forward-ip %s' % (forward_proto, forward_ip) + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouter_pim_config.py b/plugins/modules/network/netvisor/pn_vrouter_pim_config.py new file mode 100644 index 0000000000..3078fb5e5d --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouter_pim_config.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouter_pim_config +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to modify vrouter-pim-config +description: + - This module can be used to modify pim parameters. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - State the action to perform. Use C(update) to modify the vrouter-pim-config. + required: true + type: str + choices: ['update'] + pn_query_interval: + description: + - igmp query interval in seconds. + required: false + type: str + pn_querier_timeout: + description: + - igmp querier timeout in seconds. + required: false + type: str + pn_hello_interval: + description: + - hello interval in seconds. + required: false + type: str + pn_vrouter_name: + description: + - name of service config. + required: false + type: str +''' + +EXAMPLES = """ +- name: pim config modify + pn_vrouter_pim_config: + pn_cliswitch: '192.168.1.1' + pn_query_interval: '10' + pn_querier_timeout: '30' + state: 'update' + pn_vrouter_name: 'ansible-spine1-vrouter' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vrouter-pim-config command. + returned: always + type: list +stderr: + description: set of error responses from the vrouter-pim-config command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for pim ssm config using the vrouter-show command. + If a user already exists on the given switch, return True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_vrouter_name'] + + show = cli + cli += ' vrouter-show format name no-show-headers ' + out = run_commands(module, cli)[1] + if out: + out = out.split() + if name in out: + pass + else: + return False + + cli = show + cli += ' vrouter-show name %s format proto-multi no-show-headers' % name + out = run_commands(module, cli)[1] + if out: + out = out.split() + + return True if 'none' not in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + update='vrouter-pim-config-modify' + ) + + module = AnsibleModule( + argument_spec=dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=True, type='str', + choices=state_map.keys()), + pn_query_interval=dict(required=False, type='str'), + pn_querier_timeout=dict(required=False, type='str'), + pn_hello_interval=dict(required=False, type='str'), + pn_vrouter_name=dict(required=True, type='str'), + ), + required_if=( + ['state', 'update', ['pn_vrouter_name']], + ), + required_one_of=[['pn_query_interval', + 'pn_querier_timeout', + 'pn_hello_interval']] + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + query_interval = module.params['pn_query_interval'] + querier_timeout = module.params['pn_querier_timeout'] + hello_interval = module.params['pn_hello_interval'] + vrouter_name = module.params['pn_vrouter_name'] + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + if command == 'vrouter-pim-config-modify': + PIM_SSM_CONFIG = check_cli(module, cli) + if PIM_SSM_CONFIG is False: + module.exit_json( + skipped=True, + msg='vrouter proto-multi is not configured/vrouter is not created' + ) + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + if querier_timeout: + cli += ' querier-timeout ' + querier_timeout + if hello_interval: + cli += ' hello-interval ' + hello_interval + if query_interval: + cli += ' query-interval ' + query_interval + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouterbgp.py b/plugins/modules/network/netvisor/pn_vrouterbgp.py new file mode 100644 index 0000000000..447b85e7ac --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouterbgp.py @@ -0,0 +1,487 @@ +#!/usr/bin/python +""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouterbgp +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to add/remove/modify vrouter-bgp. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command. + - Each fabric, cluster, standalone switch, or virtual network (VNET) can + provide its tenants with a vRouter service that forwards traffic between + networks and implements Layer 4 protocols. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + default: 'local' + state: + description: + - State the action to perform. Use 'present' to add bgp, + 'absent' to remove bgp and 'update' to modify bgp. + required: True + choices: ['present', 'absent', 'update'] + pn_vrouter_name: + description: + - Specify a name for the vRouter service. + required: True + pn_neighbor: + description: + - Specify a neighbor IP address to use for BGP. + - Required for vrouter-bgp-add. + pn_remote_as: + description: + - Specify the remote Autonomous System(AS) number. This value is between + 1 and 4294967295. + - Required for vrouter-bgp-add. + pn_next_hop_self: + description: + - Specify if the next-hop is the same router or not. + type: bool + pn_password: + description: + - Specify a password, if desired. + pn_ebgp: + description: + - Specify a value for external BGP to accept or attempt BGP connections + to external peers, not directly connected, on the network. This is a + value between 1 and 255. + pn_prefix_listin: + description: + - Specify the prefix list to filter traffic inbound. + pn_prefix_listout: + description: + - Specify the prefix list to filter traffic outbound. + pn_route_reflector: + description: + - Specify if a route reflector client is used. + type: bool + pn_override_capability: + description: + - Specify if you want to override capability. + type: bool + pn_soft_reconfig: + description: + - Specify if you want a soft reconfiguration of inbound traffic. + type: bool + pn_max_prefix: + description: + - Specify the maximum number of prefixes. + pn_max_prefix_warn: + description: + - Specify if you want a warning message when the maximum number of + prefixes is exceeded. + type: bool + pn_bfd: + description: + - Specify if you want BFD protocol support for fault detection. + type: bool + pn_multiprotocol: + description: + - Specify a multi-protocol for BGP. + choices: ['ipv4-unicast', 'ipv6-unicast'] + pn_weight: + description: + - Specify a default weight value between 0 and 65535 for the neighbor + routes. + pn_default_originate: + description: + - Specify if you want announce default routes to the neighbor or not. + type: bool + pn_keepalive: + description: + - Specify BGP neighbor keepalive interval in seconds. + pn_holdtime: + description: + - Specify BGP neighbor holdtime in seconds. + pn_route_mapin: + description: + - Specify inbound route map for neighbor. + pn_route_mapout: + description: + - Specify outbound route map for neighbor. +''' + +EXAMPLES = """ +- name: add vrouter-bgp + pn_vrouterbgp: + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_neighbor: 104.104.104.1 + pn_remote_as: 1800 + +- name: remove vrouter-bgp + pn_vrouterbgp: + state: 'absent' + pn_name: 'ansible-vrouter' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the vrouterbpg command. + returned: always + type: list +stderr: + description: The set of error responses from the vrouterbgp command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +VROUTER_EXISTS = None +NEIGHBOR_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-bgp-show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If a BGP neighbor with the given ip exists on the given vRouter, + return NEIGHBOR_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + neighbor = module.params['pn_neighbor'] + # Global flags + global VROUTER_EXISTS, NEIGHBOR_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for BGP neighbors + show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name + show += 'format neighbor no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if neighbor in out: + NEIGHBOR_EXISTS = True + else: + NEIGHBOR_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-bgp-add' + if state == 'absent': + command = 'vrouter-bgp-remove' + if state == 'update': + command = 'vrouter-bgp-modify' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_vrouter_name=dict(required=True, type='str'), + pn_neighbor=dict(type='str'), + pn_remote_as=dict(type='str'), + pn_next_hop_self=dict(type='bool'), + pn_password=dict(type='str', no_log=True), + pn_ebgp=dict(type='int'), + pn_prefix_listin=dict(type='str'), + pn_prefix_listout=dict(type='str'), + pn_route_reflector=dict(type='bool'), + pn_override_capability=dict(type='bool'), + pn_soft_reconfig=dict(type='bool'), + pn_max_prefix=dict(type='int'), + pn_max_prefix_warn=dict(type='bool'), + pn_bfd=dict(type='bool'), + pn_multiprotocol=dict(type='str', + choices=['ipv4-unicast', 'ipv6-unicast']), + pn_weight=dict(type='int'), + pn_default_originate=dict(type='bool'), + pn_keepalive=dict(type='str'), + pn_holdtime=dict(type='str'), + pn_route_mapin=dict(type='str'), + pn_route_mapout=dict(type='str') + ), + required_if=( + ["state", "present", + ["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]], + ["state", "absent", + ["pn_vrouter_name", "pn_neighbor"]], + ["state", "update", + ["pn_vrouter_name", "pn_neighbor"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + neighbor = module.params['pn_neighbor'] + remote_as = module.params['pn_remote_as'] + next_hop_self = module.params['pn_next_hop_self'] + password = module.params['pn_password'] + ebgp = module.params['pn_ebgp'] + prefix_listin = module.params['pn_prefix_listin'] + prefix_listout = module.params['pn_prefix_listout'] + route_reflector = module.params['pn_route_reflector'] + override_capability = module.params['pn_override_capability'] + soft_reconfig = module.params['pn_soft_reconfig'] + max_prefix = module.params['pn_max_prefix'] + max_prefix_warn = module.params['pn_max_prefix_warn'] + bfd = module.params['pn_bfd'] + multiprotocol = module.params['pn_multiprotocol'] + weight = module.params['pn_weight'] + default_originate = module.params['pn_default_originate'] + keepalive = module.params['pn_keepalive'] + holdtime = module.params['pn_holdtime'] + route_mapin = module.params['pn_route_mapin'] + route_mapout = module.params['pn_route_mapout'] + + # Building the CLI command string + cli = pn_cli(module) + + command = get_command_from_state(state) + if command == 'vrouter-bgp-remove': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NEIGHBOR_EXISTS is False: + module.exit_json( + skipped=True, + msg=('BGP neighbor with IP %s does not exist on %s' + % (neighbor, vrouter_name)) + ) + cli += (' %s vrouter-name %s neighbor %s ' + % (command, vrouter_name, neighbor)) + + else: + + if command == 'vrouter-bgp-add': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NEIGHBOR_EXISTS is True: + module.exit_json( + skipped=True, + msg=('BGP neighbor with IP %s already exists on %s' + % (neighbor, vrouter_name)) + ) + + cli += (' %s vrouter-name %s neighbor %s ' + % (command, vrouter_name, neighbor)) + + if remote_as: + cli += ' remote-as ' + str(remote_as) + + if next_hop_self is True: + cli += ' next-hop-self ' + if next_hop_self is False: + cli += ' no-next-hop-self ' + + if password: + cli += ' password ' + password + + if ebgp: + cli += ' ebgp-multihop ' + str(ebgp) + + if prefix_listin: + cli += ' prefix-list-in ' + prefix_listin + + if prefix_listout: + cli += ' prefix-list-out ' + prefix_listout + + if route_reflector is True: + cli += ' route-reflector-client ' + if route_reflector is False: + cli += ' no-route-reflector-client ' + + if override_capability is True: + cli += ' override-capability ' + if override_capability is False: + cli += ' no-override-capability ' + + if soft_reconfig is True: + cli += ' soft-reconfig-inbound ' + if soft_reconfig is False: + cli += ' no-soft-reconfig-inbound ' + + if max_prefix: + cli += ' max-prefix ' + str(max_prefix) + + if max_prefix_warn is True: + cli += ' max-prefix-warn-only ' + if max_prefix_warn is False: + cli += ' no-max-prefix-warn-only ' + + if bfd is True: + cli += ' bfd ' + if bfd is False: + cli += ' no-bfd ' + + if multiprotocol: + cli += ' multi-protocol ' + multiprotocol + + if weight: + cli += ' weight ' + str(weight) + + if default_originate is True: + cli += ' default-originate ' + if default_originate is False: + cli += ' no-default-originate ' + + if keepalive: + cli += ' neighbor-keepalive-interval ' + keepalive + + if holdtime: + cli += ' neighbor-holdtime ' + holdtime + + if route_mapin: + cli += ' route-map-in ' + route_mapin + + if route_mapout: + cli += ' route-map-out ' + route_mapout + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouterif.py b/plugins/modules/network/netvisor/pn_vrouterif.py new file mode 100644 index 0000000000..78cb7b274c --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouterif.py @@ -0,0 +1,492 @@ +#!/usr/bin/python +""" PN-CLI vrouter-interface-add/remove/modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouterif +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to add/remove/modify vrouter-interface. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vrouter-interface-add, vrouter-interface-remove, + vrouter-interface-modify command. + - You configure interfaces to vRouter services on a fabric, cluster, + standalone switch or virtual network(VNET). +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch to run the cli on. + required: False + default: 'local' + state: + description: + - State the action to perform. Use 'present' to add vrouter interface, + 'absent' to remove vrouter interface and 'update' to modify vrouter + interface. + required: True + choices: ['present', 'absent', 'update'] + pn_vrouter_name: + description: + - Specify the name of the vRouter interface. + required: True + pn_vlan: + description: + - Specify the VLAN identifier. This is a value between 1 and 4092. + pn_interface_ip: + description: + - Specify the IP address of the interface in x.x.x.x/n format. + pn_assignment: + description: + - Specify the DHCP method for IP address assignment. + choices: ['none', 'dhcp', 'dhcpv6', 'autov6'] + pn_vxlan: + description: + - Specify the VXLAN identifier. This is a value between 1 and 16777215. + pn_interface: + description: + - Specify if the interface is management, data or span interface. + choices: ['mgmt', 'data', 'span'] + pn_alias: + description: + - Specify an alias for the interface. + pn_exclusive: + description: + - Specify if the interface is exclusive to the configuration. Exclusive + means that other configurations cannot use the interface. Exclusive is + specified when you configure the interface as span interface and allows + higher throughput through the interface. + type: bool + required: False + pn_nic_enable: + description: + - Specify if the NIC is enabled or not + type: bool + pn_vrrp_id: + description: + - Specify the ID for the VRRP interface. The IDs on both vRouters must be + the same IS number. + pn_vrrp_priority: + description: + - Specify the priority for the VRRP interface. This is a value between + 1 (lowest) and 255 (highest). + pn_vrrp_adv_int: + description: + - Specify a VRRP advertisement interval in milliseconds. The range is + from 30 to 40950 with a default value of 1000. + pn_l3port: + description: + - Specify a Layer 3 port for the interface. + pn_secondary_macs: + description: + - Specify a secondary MAC address for the interface. + pn_nic_str: + description: + - Specify the type of NIC. Used for vrouter-interface remove/modify. +''' + +EXAMPLES = """ +- name: Add vrouter-interface + pn_vrouterif: + pn_cliusername: admin + pn_clipassword: admin + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: 101.101.101.2/24 + pn_vlan: 101 + +- name: Add VRRP.. + pn_vrouterif: + pn_cliusername: admin + pn_clipassword: admin + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: 101.101.101.2/24 + pn_vrrp_ip: 101.101.101.1/24 + pn_vrrp_priority: 100 + pn_vlan: 101 + +- name: Remove vrouter-interface + pn_vrouterif: + pn_cliusername: admin + pn_clipassword: admin + state: 'absent' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: 101.101.101.2/24 +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the vrouterif command. + returned: on success + type: list +stderr: + description: The set of error responses from the vrouterif command. + returned: on error + type: str +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +VROUTER_EXISTS = None +INTERFACE_EXISTS = None +NIC_EXISTS = None +VRRP_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-interface-show + command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + + If an interface with the given ip exists on the given vRouter, + return INTERFACE_EXISTS as True else False. This is required for + vrouter-interface-add. + + If nic_str exists on the given vRouter, return NIC_EXISTS as True else + False. This is required for vrouter-interface-remove. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + nic_str = module.params['pn_nic_str'] + + # Global flags + global VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + if interface_ip: + # Check for interface and VRRP and fetch nic for VRRP + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += 'ip %s format ip,nic no-show-headers' % interface_ip + show = shlex.split(show) + out = module.run_command(show)[1] + if out: + INTERFACE_EXISTS = True + else: + INTERFACE_EXISTS = False + + if nic_str: + # Check for nic + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += ' format nic no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + if nic_str in out: + NIC_EXISTS = True + else: + NIC_EXISTS = False + + +def get_nic(module, cli): + """ + This module checks if VRRP interface can be added. If No, return VRRP_EXISTS + as True. + If Yes, fetch the nic string from the primary interface and return nic and + VRRP_EXISTS as False. + :param module: + :param cli: + :return: nic, Global Boolean: VRRP_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + + global VRRP_EXISTS + + # Check for interface and VRRP and fetch nic for VRRP + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += 'ip %s format ip,nic no-show-headers' % interface_ip + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if len(out) > 3: + VRRP_EXISTS = True + return None + else: + nic = out[2] + VRRP_EXISTS = False + return nic + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-interface-add' + if state == 'absent': + command = 'vrouter-interface-remove' + if state == 'update': + command = 'vrouter-interface-modify' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_vrouter_name=dict(required=True, type='str'), + pn_vlan=dict(type='int'), + pn_interface_ip=dict(required=True, type='str'), + pn_assignment=dict(type='str', + choices=['none', 'dhcp', 'dhcpv6', 'autov6']), + pn_vxlan=dict(type='int'), + pn_interface=dict(type='str', choices=['mgmt', 'data', 'span']), + pn_alias=dict(type='str'), + pn_exclusive=dict(type='bool'), + pn_nic_enable=dict(type='bool'), + pn_vrrp_id=dict(type='int'), + pn_vrrp_priority=dict(type='int'), + pn_vrrp_adv_int=dict(type='str'), + pn_l3port=dict(type='str'), + pn_secondary_macs=dict(type='str'), + pn_nic_str=dict(type='str') + ), + required_if=( + ["state", "present", + ["pn_vrouter_name", "pn_interface_ip"]], + ["state", "absent", + ["pn_vrouter_name", "pn_nic_str"]] + ), + ) + + # Accessing the arguments + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + vlan = module.params['pn_vlan'] + interface_ip = module.params['pn_interface_ip'] + assignment = module.params['pn_assignment'] + vxlan = module.params['pn_vxlan'] + interface = module.params['pn_interface'] + alias = module.params['pn_alias'] + exclusive = module.params['pn_exclusive'] + nic_enable = module.params['pn_nic_enable'] + vrrp_id = module.params['pn_vrrp_id'] + vrrp_priority = module.params['pn_vrrp_priority'] + vrrp_adv_int = module.params['pn_vrrp_adv_int'] + l3port = module.params['pn_l3port'] + secondary_macs = module.params['pn_secondary_macs'] + nic_str = module.params['pn_nic_str'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + check_cli(module, cli) + if command == 'vrouter-interface-add': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + + if vrrp_id: + vrrp_primary = get_nic(module, cli) + if VRRP_EXISTS is True: + module.exit_json( + skipped=True, + msg=('VRRP interface on %s already exists. Check ' + 'the IP addresses' % vrouter_name) + ) + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + cli += (' ip %s vrrp-primary %s vrrp-id %s ' + % (interface_ip, vrrp_primary, str(vrrp_id))) + if vrrp_priority: + cli += ' vrrp-priority %s ' % str(vrrp_priority) + if vrrp_adv_int: + cli += ' vrrp-adv-int %s ' % vrrp_adv_int + + else: + if INTERFACE_EXISTS is True: + module.exit_json( + skipped=True, + msg=('vRouter interface on %s already exists. Check the ' + 'IP addresses' % vrouter_name) + ) + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + cli += ' ip %s ' % interface_ip + + if vlan: + cli += ' vlan ' + str(vlan) + + if l3port: + cli += ' l3-port ' + l3port + + if assignment: + cli += ' assignment ' + assignment + + if vxlan: + cli += ' vxlan ' + str(vxlan) + + if interface: + cli += ' if ' + interface + + if alias: + cli += ' alias-on ' + alias + + if exclusive is True: + cli += ' exclusive ' + if exclusive is False: + cli += ' no-exclusive ' + + if nic_enable is True: + cli += ' nic-enable ' + if nic_enable is False: + cli += ' nic-disable ' + + if secondary_macs: + cli += ' secondary-macs ' + secondary_macs + + if command == 'vrouter-interface-remove': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NIC_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter interface with nic %s does not exist' % nic_str + ) + cli += ' %s vrouter-name %s nic %s ' % (command, vrouter_name, nic_str) + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vrouterlbif.py b/plugins/modules/network/netvisor/pn_vrouterlbif.py new file mode 100644 index 0000000000..cf3e64cdc9 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vrouterlbif.py @@ -0,0 +1,333 @@ +#!/usr/bin/python +""" PN CLI vrouter-loopback-interface-add/remove """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vrouterlbif +author: "Pluribus Networks (@amitsi)" +short_description: CLI command to add/remove vrouter-loopback-interface. +deprecated: + removed_in: '2.12' + why: Doesn't support latest Pluribus Networks netvisor + alternative: Latest modules will be pushed in Ansible future versions. +description: + - Execute vrouter-loopback-interface-add, vrouter-loopback-interface-remove + commands. + - Each fabric, cluster, standalone switch, or virtual network (VNET) can + provide its tenants with a virtual router (vRouter) service that forwards + traffic between networks and implements Layer 3 protocols. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + default: 'local' + state: + description: + - State the action to perform. Use 'present' to add vrouter loopback + interface and 'absent' to remove vrouter loopback interface. + required: True + choices: ['present', 'absent'] + pn_vrouter_name: + description: + - Specify the name of the vRouter. + required: True + pn_index: + description: + - Specify the interface index from 1 to 255. + pn_interface_ip: + description: + - Specify the IP address. + required: True +''' + +EXAMPLES = """ +- name: add vrouter-loopback-interface + pn_vrouterlbif: + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: '104.104.104.1' + +- name: remove vrouter-loopback-interface + pn_vrouterlbif: + state: 'absent' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: '104.104.104.1' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). + returned: always + type: str +stdout: + description: The set of responses from the vrouterlb command. + returned: always + type: list +stderr: + description: The set of error responses from the vrouterlb command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +import shlex + +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +VROUTER_EXISTS = None +LB_INTERFACE_EXISTS = None +# Index range +MIN_INDEX = 1 +MAX_INDEX = 255 + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the + vrouter-loopback-interface-show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If a loopback interface with the given ip exists on the given vRouter, + return LB_INTERFACE_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, LB_INTERFACE_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + + # Global flags + global VROUTER_EXISTS, LB_INTERFACE_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for loopback interface + show = (cli + ' vrouter-loopback-interface-show vrouter-name %s format ip ' + 'no-show-headers' % vrouter_name) + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if interface_ip in out: + LB_INTERFACE_EXISTS = True + else: + LB_INTERFACE_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-loopback-interface-add' + if state == 'absent': + command = 'vrouter-loopback-interface-remove' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent']), + pn_vrouter_name=dict(required=True, type='str'), + pn_interface_ip=dict(type='str'), + pn_index=dict(type='int') + ), + required_if=( + ["state", "present", + ["pn_vrouter_name", "pn_interface_ip"]], + ["state", "absent", + ["pn_vrouter_name", "pn_interface_ip"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + index = module.params['pn_index'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if index: + if not MIN_INDEX <= index <= MAX_INDEX: + module.exit_json( + msg="Index must be between 1 and 255", + changed=False + ) + index = str(index) + + if command == 'vrouter-loopback-interface-remove': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if LB_INTERFACE_EXISTS is False: + module.exit_json( + skipped=True, + msg=('Loopback interface with IP %s does not exist on %s' + % (interface_ip, vrouter_name)) + ) + if not index: + # To remove loopback interface, we need the index. + # If index is not specified, get the Loopback interface index + # using the given interface ip. + get_index = cli + get_index += (' vrouter-loopback-interface-show vrouter-name %s ip ' + '%s ' % (vrouter_name, interface_ip)) + get_index += 'format index no-show-headers' + + get_index = shlex.split(get_index) + out = module.run_command(get_index)[1] + index = out.split()[1] + + cli += ' %s vrouter-name %s index %s' % (command, vrouter_name, index) + + if command == 'vrouter-loopback-interface-add': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg=('vRouter %s does not exist' % vrouter_name) + ) + if LB_INTERFACE_EXISTS is True: + module.exit_json( + skipped=True, + msg=('Loopback interface with IP %s already exists on %s' + % (interface_ip, vrouter_name)) + ) + cli += (' %s vrouter-name %s ip %s' + % (command, vrouter_name, interface_ip)) + if index: + cli += ' index %s ' % index + + run_cli(module, cli) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/netvisor/pn_vtep.py b/plugins/modules/network/netvisor/pn_vtep.py new file mode 100644 index 0000000000..919f35df24 --- /dev/null +++ b/plugins/modules/network/netvisor/pn_vtep.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Pluribus Networks +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pn_vtep +author: "Pluribus Networks (@rajaspachipulusu17)" +short_description: CLI command to create/delete vtep +description: + - This module can be used to create a vtep and delete a vtep. +options: + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: false + type: str + state: + description: + - vtep configuration command. + required: false + choices: ['present', 'absent'] + type: str + default: 'present' + pn_name: + description: + - vtep name. + required: false + type: str + pn_ip: + description: + - Primary IP address. + required: false + type: str + pn_vrouter_name: + description: + - name of the vrouter service. + required: false + type: str + pn_virtual_ip: + description: + - Virtual/Secondary IP address. + required: false + type: str + pn_location: + description: + - switch name. + required: false + type: str + pn_switch_in_cluster: + description: + - Tells whether switch in cluster or not. + required: false + type: bool + default: True +''' + +EXAMPLES = """ +- name: create vtep + pn_vtep: + pn_cliswitch: 'sw01' + pn_name: 'foo' + pn_vrouter_name: 'foo-vrouter' + pn_ip: '22.22.22.2' + pn_location: 'sw01' + pn_virtual_ip: "22.22.22.1" + +- name: delete vtep + pn_vtep: + pn_cliswitch: 'sw01' + state: 'absent' + pn_name: 'foo' +""" + +RETURN = """ +command: + description: the CLI command run on the target node. + returned: always + type: str +stdout: + description: set of responses from the vtep command. + returned: always + type: list +stderr: + description: set of error responses from the vtep command. + returned: on error + type: list +changed: + description: indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli +from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands + + +def check_cli(module, cli): + """ + This method checks for idempotency using the vtep-show command. + If a name exists, return True if name exists else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + """ + name = module.params['pn_name'] + + cli += ' vtep-show format name no-show-headers' + out = run_commands(module, cli)[1] + + if out: + out = out.split() + + return True if name in out else False + + +def main(): + """ This section is for arguments parsing """ + + state_map = dict( + present='vtep-create', + absent='vtep-delete' + ) + + argument_spec = dict( + pn_cliswitch=dict(required=False, type='str'), + state=dict(required=False, type='str', choices=state_map.keys(), default='present'), + pn_name=dict(required=False, type='str'), + pn_ip=dict(required=False, type='str'), + pn_vrouter_name=dict(required=False, type='str'), + pn_virtual_ip=dict(required=False, type='str'), + pn_location=dict(required=False, type='str'), + pn_switch_in_cluster=dict(required=False, type='bool', default='True') + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=( + ["state", "present", ["pn_name", "pn_ip", "pn_vrouter_name", "pn_location"]], + ["state", "absent", ["pn_name"]], + ), + ) + + # Accessing the arguments + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + name = module.params['pn_name'] + ip = module.params['pn_ip'] + vrouter_name = module.params['pn_vrouter_name'] + virtual_ip = module.params['pn_virtual_ip'] + location = module.params['pn_location'] + switch_in_cluster = module.params['pn_switch_in_cluster'] + + if switch_in_cluster and not virtual_ip and state == 'present': + module.exit_json( + failed=True, + msg='virtual ip is required when switch is in cluster' + ) + + command = state_map[state] + + # Building the CLI command string + cli = pn_cli(module, cliswitch) + + NAME_EXISTS = check_cli(module, cli) + + cli += ' %s name %s ' % (command, name) + + if command == 'vtep-delete': + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='vtep with name %s does not exist' % name + ) + + if command == 'vtep-create': + if NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='vtpe with name %s already exists' % name + ) + + cli += 'vrouter-name %s ' % vrouter_name + cli += 'ip %s ' % ip + cli += 'location %s ' % location + + if virtual_ip: + cli += 'virtual-ip %s ' % virtual_ip + + run_cli(module, cli, state_map) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nos/nos_command.py b/plugins/modules/network/nos/nos_command.py new file mode 100644 index 0000000000..7eb9be77a1 --- /dev/null +++ b/plugins/modules/network/nos/nos_command.py @@ -0,0 +1,224 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Extreme Networks Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: nos_command +author: "Lindsay Hill (@LindsayHill)" +short_description: Run commands on remote devices running Extreme Networks NOS +description: + - Sends arbitrary commands to a NOS device and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(nos_config) to configure NOS devices. +notes: + - Tested against NOS 7.2.0 + - If a command sent to the device requires answering a prompt, it is possible + to pass a dict containing I(command), I(answer) and I(prompt). See examples. +options: + commands: + description: + - List of commands to send to the remote NOS device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + nos_command: + commands: show version + + - name: run show version and check to see if output contains NOS + nos_command: + commands: show version + wait_for: result[0] contains NOS + + - name: run multiple commands on remote nodes + nos_command: + commands: + - show version + - show interfaces + + - name: run multiple commands and evaluate the output + nos_command: + commands: + - show version + - show interface status + wait_for: + - result[0] contains NOS + - result[1] contains Te + - name: run command that requires answering a prompt + nos_command: + commands: + - command: 'clear sessions' + prompt: 'This operation will logout all the user sessions. Do you want to continue (yes/no)?:' + answer: y +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import re +import time + +from ansible_collections.community.general.plugins.module_utils.network.nos.nos import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +__metaclass__ = type + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for item in list(commands): + configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command']) + if module.check_mode: + if configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'): + module.fail_json( + msg='nos_command does not support running config mode ' + 'commands. Please use nos_config instead' + ) + if not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + commands.remove(item) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nos/nos_config.py b/plugins/modules/network/nos/nos_config.py new file mode 100644 index 0000000000..c73fdfd1b3 --- /dev/null +++ b/plugins/modules/network/nos/nos_config.py @@ -0,0 +1,394 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Extreme Networks Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: nos_config +author: "Lindsay Hill (@LindsayHill)" +short_description: Manage Extreme Networks NOS configuration sections +description: + - Extreme NOS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with NOS configuration sections in + a deterministic way. +notes: + - Tested against NOS 7.2.0 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + multiline_delimiter: + description: + - This argument is used when pushing a multiline configuration + element to the NOS device. It specifies the character to use + as the delimiting character. This only applies to the + configuration action. + default: "@" + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(running_config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + aliases: ['config'] + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument + the module can generate diffs against different sources. + - When this option is configured as I(intended), the module will + return the diff of the running-config against the configuration + provided in the C(intended_config) argument. + - When this option is configured as I(running), the module will + return the before and after diff of the running-config with respect + to any changes made to the device configuration. + choices: ['running', 'intended'] + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + intended_config: + description: + - The C(intended_config) provides the master configuration that + the node should conform to and is used to check the final + running-config against. This argument will not modify any settings + on the remote device and is strictly used to check the compliance + of the current device's configuration against. When specifying this + argument, the task should also modify the C(diff_against) value and + set it to I(intended). + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure top level configuration + nos_config: + lines: logging raslog console INFO + +- name: configure interface settings + nos_config: + lines: + - description test interface + - ip address 172.31.1.1/24 + parents: + - interface TenGigabitEthernet 104/0/1 + +- name: configure multiple interfaces + nos_config: + lines: + - lacp timeout long + parents: "{{ item }}" + with_items: + - interface TenGigabitEthernet 104/0/1 + - interface TenGigabitEthernet 104/0/2 + +- name: load new acl into device + nos_config: + lines: + - seq 10 permit ip host 1.1.1.1 any log + - seq 20 permit ip host 2.2.2.2 any log + - seq 30 permit ip host 3.3.3.3 any log + - seq 40 permit ip host 4.4.4.4 any log + - seq 50 permit ip host 5.5.5.5 any log + parents: ip access-list extended test + before: no ip access-list extended test + match: exact + +- name: check the running-config against master config + nos_config: + diff_against: intended + intended_config: "{{ lookup('file', 'master.cfg') }}" + +- name: configurable backup path + nos_config: + lines: logging raslog console INFO + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['switch-attributes hostname foo', 'router ospf', 'area 0'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['switch-attributes hostname foo', 'router ospf', 'area 0'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/nos_config.2018-02-12@18:26:34 +""" + +from ansible_collections.community.general.plugins.module_utils.network.nos.nos import run_commands, get_config, load_config +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + +__metaclass__ = type + + +def check_args(module, warnings): + if module.params['multiline_delimiter']: + if len(module.params['multiline_delimiter']) != 1: + module.fail_json(msg='multiline_delimiter value can only be a ' + 'single character') + + +def get_running_config(module, current_config=None): + contents = module.params['running_config'] + if not contents: + if current_config: + contents = current_config.config_text + else: + contents = get_config(module) + return NetworkConfig(indent=1, contents=contents) + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + + if module.params['src']: + src = module.params['src'] + candidate.load(src) + + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + + return candidate + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + multiline_delimiter=dict(default='@'), + + running_config=dict(aliases=['config']), + intended_config=dict(), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + + diff_against=dict(choices=['intended', 'running']), + diff_ignore_lines=dict(type='list'), + ) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('diff_against', 'intended', ['intended_config'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + result['warnings'] = warnings + + config = None + + if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'): + contents = get_config(module) + config = NetworkConfig(indent=1, contents=contents) + if module.params['backup']: + result['__backup__'] = contents + + if any((module.params['lines'], module.params['src'])): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate = get_candidate(module) + + if match != 'none': + config = get_running_config(module, config) + configobjs = candidate.difference(config, path=path, match=match, replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + load_config(module, commands) + + result['changed'] = True + + running_config = None + + diff_ignore_lines = module.params['diff_ignore_lines'] + + if module._diff: + if not running_config: + output = run_commands(module, 'show running-config') + contents = output[0] + else: + contents = running_config.config_text + + # recreate the object in order to process diff_ignore_lines + running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if module.params['diff_against'] == 'running': + if module.check_mode: + module.warn("unable to perform diff against running-config due to check mode") + contents = None + else: + contents = config.config_text + + elif module.params['diff_against'] == 'intended': + contents = module.params['intended_config'] + + if contents is not None: + base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if running_config.sha1 != base_config.sha1: + if module.params['diff_against'] == 'intended': + before = running_config + after = base_config + elif module.params['diff_against'] in ('running'): + before = base_config + after = running_config + + result.update({ + 'changed': True, + 'diff': {'before': str(before), 'after': str(after)} + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nos/nos_facts.py b/plugins/modules/network/nos/nos_facts.py new file mode 100644 index 0000000000..4c597a8706 --- /dev/null +++ b/plugins/modules/network/nos/nos_facts.py @@ -0,0 +1,458 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: nos_facts +author: "Lindsay Hill (@LindsayHill)" +short_description: Collect facts from devices running Extreme NOS +description: + - Collects a base set of device facts from a remote device that + is running NOS. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +notes: + - Tested against NOS 7.2.0 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' + +EXAMPLES = """ +# Collect all facts from the device +- nos_facts: + gather_subset: all + +# Collect only the config and default facts +- nos_facts: + gather_subset: + - config + +# Do not collect hardware facts +- nos_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# hardware +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All Primary IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.nos.nos import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS) + + def run(self, cmd): + return run_commands(self.module, cmd) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version', + 'show inventory chassis', + r'show running-config | include host\-name' + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = self.parse_version(data) + + data = self.responses[1] + if data: + self.facts['model'] = self.parse_model(data) + self.facts['serialnum'] = self.parse_serialnum(data) + + data = self.responses[2] + if data: + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'Network Operating System Version: (\S+)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'SID:(\S+)', data, re.M) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'switch-attributes host-name (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'SN:(\S+)', data, re.M) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show process memory summary' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + if data: + self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0)) + self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0)) + + def parse_memtotal(self, data): + match = re.search(r'TotalMemory: (\d+)\s', data, re.M) + if match: + return match.group(1) + + def parse_memfree(self, data): + match = re.search(r'Total Free: (\d+)\s', data, re.M) + if match: + return match.group(1) + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show interface', + 'show ipv6 interface brief', + r'show lldp nei detail | inc ^Local\ Interface|^Remote\ Interface|^System\ Name' + ] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.responses[0] + if data: + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces(interfaces) + self.populate_ipv4_interfaces(interfaces) + + data = self.responses[1] + if data: + self.populate_ipv6_interfaces(data) + + data = self.responses[2] + if data: + self.facts['neighbors'] = self.parse_neighbors(data) + else: + self.facts['neighbors'] = dict() + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv4_interfaces(self, data): + for key, value in data.items(): + self.facts['interfaces'][key]['ipv4'] = list() + primary_address = addresses = [] + primary_address = re.findall(r'Primary Internet Address is (\S+)', value, re.M) + addresses = re.findall(r'Secondary Internet Address is (\S+)', value, re.M) + if not primary_address: + continue + addresses.append(primary_address[0]) + for address in addresses: + addr, subnet = address.split("/") + ipv4 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv4') + self.facts['interfaces'][key]['ipv4'].append(ipv4) + + # Only gets primary IPv6 addresses + def populate_ipv6_interfaces(self, data): + interfaces = re.split('=+', data)[1].strip() + matches = re.findall(r'(\S+ \S+) +[\w-]+.+\s+([\w:/]+/\d+)', interfaces, re.M) + for match in matches: + interface = match[0] + self.facts['interfaces'][interface]['ipv6'] = list() + address, masklen = match[1].split('/') + ipv6 = dict(address=address, masklen=int(masklen)) + self.add_ip_address(ipv6['address'], 'ipv6') + self.facts['interfaces'][interface]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + lines = neighbors.split('Local Interface: ') + if not lines: + return facts + for line in lines: + match = re.search(r'(\w+ \S+)\s+\(Local Int.+?\)[\s\S]+Remote Interface: (\S+.+?) \(Remote Int.+?\)[\s\S]+System Name: (\S+)', line, re.M) + if match: + intf = match.group(1) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = match.group(3) + fact['port'] = match.group(2) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + for interface in data.split('\n\n'): + match = re.match(r'^(\S+ \S+)', interface, re.M) + if not match: + continue + else: + parsed[match.group(1)] = interface + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'Hardware is Ethernet, address is (\S+)', data) + if match: + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Primary Internet Address is ([^\s,]+)', data) + if match: + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+) bytes', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'LineSpeed Actual\s+:\s(.+)', data) + if match: + return match.group(1) + + def parse_duplex(self, data): + match = re.search(r'Duplex: (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.match(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=["!config"], type='list') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nso/nso_action.py b/plugins/modules/network/nso/nso_action.py new file mode 100644 index 0000000000..30378ecbbf --- /dev/null +++ b/plugins/modules/network/nso/nso_action.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = ''' +--- +module: nso_action +extends_documentation_fragment: +- community.general.nso + +short_description: Executes Cisco NSO actions and verifies output. +description: + - This module provides support for executing Cisco NSO actions and then + verifying that the output is as expected. +requirements: + - Cisco NSO version 3.4 or higher. +author: "Claes Nästén (@cnasten)" +options: + path: + description: Path to NSO action. + required: true + input: + description: > + NSO action parameters. + output_required: + description: > + Required output parameters. + output_invalid: + description: > + List of result parameter names that will cause the task to fail if they + are present. + validate_strict: + description: > + If set to true, the task will fail if any output parameters not in + output_required is present in the output. + type: bool +''' + +EXAMPLES = ''' +- name: Sync NSO device + nso_action: + url: http://localhost:8080/jsonrpc + username: username + password: password + path: /ncs:devices/device{ce0}/sync-from + input: {} +''' + +RETURN = ''' +output: + description: Action output + returned: success + type: dict + sample: + result: true +''' + +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import normalize_value +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import ModuleFailException, NsoException +from ansible.module_utils.basic import AnsibleModule + + +class NsoAction(object): + REQUIRED_VERSIONS = [ + (3, 4) + ] + + def __init__(self, check_mode, client, + path, input, + output_required, output_invalid, validate_strict): + self._check_mode = check_mode + self._client = client + self._path = path + self._input = input + self._output_required = output_required + self._output_invalid = output_invalid + self._validate_strict = validate_strict + + def main(self): + schema = self._client.get_schema(path=self._path) + if schema['data']['kind'] != 'action': + raise ModuleFailException('{0} is not an action'.format(self._path)) + + input_schema = [c for c in schema['data']['children'] + if c.get('is_action_input', False)] + + for key, value in self._input.items(): + child = next((c for c in input_schema if c['name'] == key), None) + if child is None: + raise ModuleFailException('no parameter {0}'.format(key)) + + # implement type validation in the future + + if self._check_mode: + return {} + else: + return self._run_and_verify() + + def _run_and_verify(self): + output = self._client.run_action(None, self._path, self._input) + for key, value in self._output_required.items(): + if key not in output: + raise ModuleFailException('{0} not in result'.format(key)) + + n_value = normalize_value(value, output[key], key) + if value != n_value: + msg = '{0} value mismatch. expected {1} got {2}'.format( + key, value, n_value) + raise ModuleFailException(msg) + + for key in self._output_invalid.keys(): + if key in output: + raise ModuleFailException('{0} not allowed in result'.format(key)) + + if self._validate_strict: + for name in output.keys(): + if name not in self._output_required: + raise ModuleFailException('{0} not allowed in result'.format(name)) + + return output + + +def main(): + argument_spec = dict( + path=dict(required=True), + input=dict(required=False, type='dict', default={}), + output_required=dict(required=False, type='dict', default={}), + output_invalid=dict(required=False, type='dict', default={}), + validate_strict=dict(required=False, type='bool', default=False) + ) + argument_spec.update(nso_argument_spec) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + p = module.params + + client = connect(p) + nso_action = NsoAction( + module.check_mode, client, + p['path'], + p['input'], + p['output_required'], + p['output_invalid'], + p['validate_strict']) + try: + verify_version(client, NsoAction.REQUIRED_VERSIONS) + + output = nso_action.main() + client.logout() + module.exit_json(changed=True, output=output) + except NsoException as ex: + client.logout() + module.fail_json(msg=ex.message) + except ModuleFailException as ex: + client.logout() + module.fail_json(msg=ex.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nso/nso_config.py b/plugins/modules/network/nso/nso_config.py new file mode 100644 index 0000000000..1ed99784c1 --- /dev/null +++ b/plugins/modules/network/nso/nso_config.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = ''' +--- +module: nso_config +extends_documentation_fragment: +- community.general.nso + +short_description: Manage Cisco NSO configuration and service synchronization. +description: + - This module provides support for managing configuration in Cisco NSO and + can also ensure services are in sync. +requirements: + - Cisco NSO version 3.4.12 or higher, 4.2.7 or higher, + 4.3.8 or higher, 4.4.3 or higher, 4.5 or higher. +author: "Claes Nästén (@cnasten)" +options: + data: + description: > + NSO data in format as | display json converted to YAML. List entries can + be annotated with a __state entry. Set to in-sync/deep-in-sync for + services to verify service is in sync with the network. Set to absent in + list entries to ensure they are deleted if they exist in NSO. + required: true +''' + +EXAMPLES = ''' +- name: Create L3VPN + nso_config: + url: http://localhost:8080/jsonrpc + username: username + password: password + data: + l3vpn:vpn: + l3vpn: + - name: company + route-distinguisher: 999 + endpoint: + - id: branch-office1 + ce-device: ce6 + ce-interface: GigabitEthernet0/12 + ip-network: 10.10.1.0/24 + bandwidth: 12000000 + as-number: 65101 + - id: branch-office2 + ce-device: ce1 + ce-interface: GigabitEthernet0/11 + ip-network: 10.7.7.0/24 + bandwidth: 6000000 + as-number: 65102 + - id: branch-office3 + __state: absent + __state: in-sync +''' + +RETURN = ''' +changes: + description: List of changes + returned: always + type: complex + sample: + - path: "/l3vpn:vpn/l3vpn{example}/endpoint{office}/bandwidth" + from: '6000000' + to: '12000000' + type: set + contains: + path: + description: Path to value changed + returned: always + type: str + from: + description: Previous value if any, else null + returned: When previous value is present on value change + type: str + to: + description: Current value if any, else null. + returned: When new value is present on value change + type: + description: Type of change. create|delete|set|re-deploy +diffs: + description: List of sync changes + returned: always + type: complex + sample: + - path: "/l3vpn:vpn/l3vpn{example}" + diff: |2 + devices { + device pe3 { + config { + alu:service { + vprn 65101 { + bgp { + group example-ce6 { + - peer-as 65102; + + peer-as 65101; + } + } + } + } + } + } + } + contains: + path: + description: keypath to service changed + returned: always + type: str + diff: + description: configuration difference triggered the re-deploy + returned: always + type: str +''' + +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import State, ValueBuilder +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import ModuleFailException, NsoException +from ansible.module_utils.basic import AnsibleModule + + +class NsoConfig(object): + REQUIRED_VERSIONS = [ + (4, 5), + (4, 4, 3), + (4, 3, 8), + (4, 2, 7), + (3, 4, 12) + ] + + def __init__(self, check_mode, client, data): + self._check_mode = check_mode + self._client = client + self._data = data + + self._changes = [] + self._diffs = [] + + def main(self): + # build list of values from configured data + value_builder = ValueBuilder(self._client) + for key, value in self._data.items(): + value_builder.build('', key, value) + + self._data_write(value_builder.values) + + # check sync AFTER configuration is written + sync_values = self._sync_check(value_builder.values) + self._sync_ensure(sync_values) + + return self._changes, self._diffs + + def _data_write(self, values): + th = self._client.get_trans(mode='read_write') + + for value in values: + if value.state == State.SET: + self._client.set_value(th, value.path, value.value) + elif value.state == State.PRESENT: + self._client.create(th, value.path) + elif value.state == State.ABSENT: + self._client.delete(th, value.path) + + changes = self._client.get_trans_changes(th) + for change in changes: + if change['op'] == 'value_set': + self._changes.append({ + 'path': change['path'], + 'from': change['old'] or None, + 'to': change['value'], + 'type': 'set' + }) + elif change['op'] in ('created', 'deleted'): + self._changes.append({ + 'path': change['path'], + 'type': change['op'][:-1] + }) + + if len(changes) > 0: + warnings = self._client.validate_commit(th) + if len(warnings) > 0: + raise NsoException( + 'failed to validate transaction with warnings: {0}'.format( + ', '.join((str(warning) for warning in warnings))), {}) + + if self._check_mode or len(changes) == 0: + self._client.delete_trans(th) + else: + self._client.commit(th) + + def _sync_check(self, values): + sync_values = [] + + for value in values: + if value.state in (State.CHECK_SYNC, State.IN_SYNC): + action = 'check-sync' + elif value.state in (State.DEEP_CHECK_SYNC, State.DEEP_IN_SYNC): + action = 'deep-check-sync' + else: + action = None + + if action is not None: + action_path = '{0}/{1}'.format(value.path, action) + action_params = {'outformat': 'cli'} + resp = self._client.run_action(None, action_path, action_params) + if len(resp) > 0: + sync_values.append( + ValueBuilder.Value(value.path, value.state, resp[0]['value'])) + + return sync_values + + def _sync_ensure(self, sync_values): + for value in sync_values: + if value.state in (State.CHECK_SYNC, State.DEEP_CHECK_SYNC): + raise NsoException( + '{0} out of sync, diff {1}'.format(value.path, value.value), {}) + + action_path = '{0}/{1}'.format(value.path, 're-deploy') + if not self._check_mode: + result = self._client.run_action(None, action_path) + if not result: + raise NsoException( + 'failed to re-deploy {0}'.format(value.path), {}) + + self._changes.append({'path': value.path, 'type': 're-deploy'}) + self._diffs.append({'path': value.path, 'diff': value.value}) + + +def main(): + argument_spec = dict( + data=dict(required=True, type='dict') + ) + argument_spec.update(nso_argument_spec) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + p = module.params + + client = connect(p) + nso_config = NsoConfig(module.check_mode, client, p['data']) + try: + verify_version(client, NsoConfig.REQUIRED_VERSIONS) + + changes, diffs = nso_config.main() + client.logout() + + changed = len(changes) > 0 + module.exit_json( + changed=changed, changes=changes, diffs=diffs) + + except NsoException as ex: + client.logout() + module.fail_json(msg=ex.message) + except ModuleFailException as ex: + client.logout() + module.fail_json(msg=ex.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nso/nso_query.py b/plugins/modules/network/nso/nso_query.py new file mode 100644 index 0000000000..9f03551dee --- /dev/null +++ b/plugins/modules/network/nso/nso_query.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = ''' +--- +module: nso_query +extends_documentation_fragment: +- community.general.nso + +short_description: Query data from Cisco NSO. +description: + - This module provides support for querying data from Cisco NSO using XPath. +requirements: + - Cisco NSO version 3.4 or higher. +author: "Claes Nästén (@cnasten)" +options: + xpath: + description: XPath selection relative to the root. + required: true + fields: + description: > + List of fields to select from matching nodes. + required: true +''' + +EXAMPLES = ''' +- name: Select device name and description + nso_query: + url: http://localhost:8080/jsonrpc + username: username + password: password + xpath: /ncs:devices/device + fields: + - name + - description +''' + +RETURN = ''' +output: + description: Value of matching nodes + returned: success + type: list +''' + +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import ModuleFailException, NsoException +from ansible.module_utils.basic import AnsibleModule + + +class NsoQuery(object): + REQUIRED_VERSIONS = [ + (3, 4) + ] + + def __init__(self, check_mode, client, xpath, fields): + self._check_mode = check_mode + self._client = client + self._xpath = xpath + self._fields = fields + + def main(self): + if self._check_mode: + return [] + else: + return self._client.query(self._xpath, self._fields) + + +def main(): + argument_spec = dict( + xpath=dict(required=True, type='str'), + fields=dict(required=True, type='list') + ) + argument_spec.update(nso_argument_spec) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + p = module.params + + client = connect(p) + nso_query = NsoQuery( + module.check_mode, client, + p['xpath'], p['fields']) + try: + verify_version(client, NsoQuery.REQUIRED_VERSIONS) + + output = nso_query.main() + client.logout() + module.exit_json(changed=False, output=output) + except NsoException as ex: + client.logout() + module.fail_json(msg=ex.message) + except ModuleFailException as ex: + client.logout() + module.fail_json(msg=ex.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nso/nso_show.py b/plugins/modules/network/nso/nso_show.py new file mode 100644 index 0000000000..4ad0c8e0f1 --- /dev/null +++ b/plugins/modules/network/nso/nso_show.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = ''' +--- +module: nso_show +extends_documentation_fragment: +- community.general.nso + +short_description: Displays data from Cisco NSO. +description: + - This module provides support for displaying data from Cisco NSO. +requirements: + - Cisco NSO version 3.4.12 or higher, 4.1.9 or higher, 4.2.6 or higher, + 4.3.7 or higher, 4.4.5 or higher, 4.5 or higher. +author: "Claes Nästén (@cnasten)" +options: + path: + description: Path to NSO data. + required: true + operational: + description: > + Controls whether or not operational data is included in the result. + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Show devices including operational data + nso_show: + url: http://localhost:8080/jsonrpc + username: username + password: password + path: /ncs:devices/device + operational: true +''' + +RETURN = ''' +output: + description: Configuration + returned: success + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import ModuleFailException, NsoException +from ansible.module_utils.basic import AnsibleModule + + +class NsoShow(object): + REQUIRED_VERSIONS = [ + (4, 5), + (4, 4, 5), + (4, 3, 7), + (4, 2, 6), + (4, 1, 9), + (3, 4, 12) + ] + + def __init__(self, check_mode, client, path, operational): + self._check_mode = check_mode + self._client = client + self._path = path + self._operational = operational + + def main(self): + if self._check_mode: + return {} + else: + return self._client.show_config(self._path, self._operational) + + +def main(): + argument_spec = dict( + path=dict(required=True, type='str'), + operational=dict(required=False, type='bool', default=False) + ) + argument_spec.update(nso_argument_spec) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + p = module.params + + client = connect(p) + nso_show = NsoShow( + module.check_mode, client, + p['path'], p['operational']) + try: + verify_version(client, NsoShow.REQUIRED_VERSIONS) + + output = nso_show.main() + client.logout() + module.exit_json(changed=False, output=output) + except NsoException as ex: + client.logout() + module.fail_json(msg=ex.message) + except ModuleFailException as ex: + client.logout() + module.fail_json(msg=ex.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nso/nso_verify.py b/plugins/modules/network/nso/nso_verify.py new file mode 100644 index 0000000000..a32479dfa1 --- /dev/null +++ b/plugins/modules/network/nso/nso_verify.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = ''' +--- +module: nso_verify +extends_documentation_fragment: +- community.general.nso + +short_description: Verifies Cisco NSO configuration. +description: + - This module provides support for verifying Cisco NSO configuration is in + compliance with specified values. +requirements: + - Cisco NSO version 3.4.12 or higher, 4.2.7 or higher, + 4.3.8 or higher, 4.4.3 or higher, 4.5 or higher. +author: "Claes Nästén (@cnasten)" +options: + data: + description: > + NSO data in format as C(| display json) converted to YAML. List entries can + be annotated with a C(__state) entry. Set to in-sync/deep-in-sync for + services to verify service is in sync with the network. Set to absent in + list entries to ensure they are deleted if they exist in NSO. + required: true +''' + +EXAMPLES = ''' +- name: Verify interface is up + nso_config: + url: http://localhost:8080/jsonrpc + username: username + password: password + data: + ncs:devices: + device: + - name: ce0 + live-status: + interfaces: + interface: + - name: GigabitEthernet0/12 + - state: Up +''' + +RETURN = ''' +violations: + description: List of value violations + returned: failed + type: complex + sample: + - path: /ncs:devices/device{ce0}/description + expected-value: CE0 example + value: null + contains: + path: + description: Path to the value in violation + returned: always + type: str + expected-value: + description: Expected value of path + returned: always + type: str + value: + description: Current value of path + returned: always + type: str +''' + +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import normalize_value +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import State, ValueBuilder +from ansible_collections.community.general.plugins.module_utils.network.nso.nso import ModuleFailException, NsoException +from ansible.module_utils.basic import AnsibleModule + + +class NsoVerify(object): + REQUIRED_VERSIONS = [ + (4, 5), + (4, 4, 3), + (4, 3, 8), + (4, 2, 7), + (3, 4, 12) + ] + + def __init__(self, client, data): + self._client = client + self._data = data + + def main(self): + violations = [] + + # build list of values from configured data + value_builder = ValueBuilder(self._client, 'verify') + for key, value in self._data.items(): + value_builder.build('', key, value) + + for expected_value in value_builder.values: + if expected_value.state == State.PRESENT: + violations.append({ + 'path': expected_value.path, + 'expected-value': 'present', + 'value': 'absent' + }) + elif expected_value.state == State.ABSENT: + violations.append({ + 'path': expected_value.path, + 'expected-value': 'absent', + 'value': 'present' + }) + elif expected_value.state == State.SET: + try: + value = self._client.get_value(expected_value.path)['value'] + except NsoException as ex: + if ex.error.get('type', '') == 'data.not_found': + value = None + else: + raise + + # handle different types properly + n_value = normalize_value( + expected_value.value, value, expected_value.path) + if n_value != expected_value.value: + # if the value comparison fails, try mapping identityref + value_type = value_builder.get_type(expected_value.path) + if value_type is not None and 'identityref' in value_type: + n_value, t_value = self.get_prefix_name(value) + + if expected_value.value != n_value: + violations.append({ + 'path': expected_value.path, + 'expected-value': expected_value.value, + 'value': n_value + }) + else: + raise ModuleFailException( + 'value state {0} not supported at {1}'.format( + expected_value.state, expected_value.path)) + + return violations + + +def main(): + argument_spec = dict( + data=dict(required=True, type='dict') + ) + argument_spec.update(nso_argument_spec) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + p = module.params + + client = connect(p) + nso_verify = NsoVerify(client, p['data']) + try: + verify_version(client, NsoVerify.REQUIRED_VERSIONS) + + violations = nso_verify.main() + client.logout() + + num_violations = len(violations) + if num_violations > 0: + msg = '{0} value{1} differ'.format( + num_violations, num_violations > 1 and 's' or '') + module.fail_json(msg=msg, violations=violations) + else: + module.exit_json(changed=False) + + except NsoException as ex: + client.logout() + module.fail_json(msg=ex.message) + except ModuleFailException as ex: + client.logout() + module.fail_json(msg=ex.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/nuage/nuage_vspk.py b/plugins/modules/network/nuage/nuage_vspk.py new file mode 100644 index 0000000000..a472a6ae39 --- /dev/null +++ b/plugins/modules/network/nuage/nuage_vspk.py @@ -0,0 +1,1020 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Nokia +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: nuage_vspk +short_description: Manage Nuage VSP environments +description: + - Manage or find Nuage VSP entities, this includes create, update, delete, assign, unassign and find, with all supported properties. +author: Philippe Dellaert (@pdellaert) +options: + auth: + description: + - Dict with the authentication information required to connect to a Nuage VSP environment. + - Requires a I(api_username) parameter (example csproot). + - Requires either a I(api_password) parameter (example csproot) or a I(api_certificate) and I(api_key) parameters, + which point to the certificate and key files for certificate based authentication. + - Requires a I(api_enterprise) parameter (example csp). + - Requires a I(api_url) parameter (example https://10.0.0.10:8443). + - Requires a I(api_version) parameter (example v4_0). + required: true + type: + description: + - The type of entity you want to work on (example Enterprise). + - This should match the objects CamelCase class name in VSPK-Python. + - This Class name can be found on U(https://nuagenetworks.github.io/vspkdoc/index.html). + required: true + id: + description: + - The ID of the entity you want to work on. + - In combination with I(command=find), it will only return the single entity. + - In combination with I(state), it will either update or delete this entity. + - Will take precedence over I(match_filter) and I(properties) whenever an entity needs to be found. + parent_id: + description: + - The ID of the parent of the entity you want to work on. + - When I(state) is specified, the entity will be gathered from this parent, if it exists, unless an I(id) is specified. + - When I(command=find) is specified, the entity will be searched for in this parent, unless an I(id) is specified. + - If specified, I(parent_type) also needs to be specified. + parent_type: + description: + - The type of parent the ID is specified for (example Enterprise). + - This should match the objects CamelCase class name in VSPK-Python. + - This Class name can be found on U(https://nuagenetworks.github.io/vspkdoc/index.html). + - If specified, I(parent_id) also needs to be specified. + state: + description: + - Specifies the desired state of the entity. + - If I(state=present), in case the entity already exists, will update the entity if it is needed. + - If I(state=present), in case the relationship with the parent is a member relationship, will assign the entity as a member of the parent. + - If I(state=absent), in case the relationship with the parent is a member relationship, will unassign the entity as a member of the parent. + - Either I(state) or I(command) needs to be defined, both can not be defined at the same time. + choices: + - present + - absent + command: + description: + - Specifies a command to be executed. + - With I(command=find), if I(parent_id) and I(parent_type) are defined, it will only search within the parent. Otherwise, if allowed, + will search in the root object. + - With I(command=find), if I(id) is specified, it will only return the single entity matching the id. + - With I(command=find), otherwise, if I(match_filter) is define, it will use that filter to search. + - With I(command=find), otherwise, if I(properties) are defined, it will do an AND search using all properties. + - With I(command=change_password), a password of a user can be changed. Warning - In case the password is the same as the existing, + it will throw an error. + - With I(command=wait_for_job), the module will wait for a job to either have a status of SUCCESS or ERROR. In case an ERROR status is found, + the module will exit with an error. + - With I(command=wait_for_job), the job will always be returned, even if the state is ERROR situation. + - Either I(state) or I(command) needs to be defined, both can not be defined at the same time. + choices: + - find + - change_password + - wait_for_job + - get_csp_enterprise + match_filter: + description: + - A filter used when looking (both in I(command) and I(state) for entities, in the format the Nuage VSP API expects. + - If I(match_filter) is defined, it will take precedence over the I(properties), but not on the I(id) + properties: + description: + - Properties are the key, value pairs of the different properties an entity has. + - If no I(id) and no I(match_filter) is specified, these are used to find or determine if the entity exists. + children: + description: + - Can be used to specify a set of child entities. + - A mandatory property of each child is the I(type). + - Supported optional properties of each child are I(id), I(properties) and I(match_filter). + - The function of each of these properties is the same as in the general task definition. + - This can be used recursively + - Only useable in case I(state=present). +notes: + - Check mode is supported, but with some caveats. It will not do any changes, and if possible try to determine if it is able do what is requested. + - In case a parent id is provided from a previous task, it might be empty and if a search is possible on root, it will do so, which can impact performance. +requirements: + - Python 2.7 + - Supports Nuage VSP 4.0Rx & 5.x.y + - Proper VSPK-Python installed for your Nuage version + - Tested with NuageX U(https://nuagex.io) +''' + +EXAMPLES = ''' +# This can be executed as a single role, with the following vars +# vars: +# auth: +# api_username: csproot +# api_password: csproot +# api_enterprise: csp +# api_url: https://10.0.0.10:8443 +# api_version: v5_0 +# enterprise_name: Ansible-Enterprise +# enterprise_new_name: Ansible-Updated-Enterprise +# +# or, for certificate based authentication +# vars: +# auth: +# api_username: csproot +# api_certificate: /path/to/user-certificate.pem +# api_key: /path/to/user-Key.pem +# api_enterprise: csp +# api_url: https://10.0.0.10:8443 +# api_version: v5_0 +# enterprise_name: Ansible-Enterprise +# enterprise_new_name: Ansible-Updated-Enterprise + +# Creating a new enterprise +- name: Create Enterprise + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: Enterprise + state: present + properties: + name: "{{ enterprise_name }}-basic" + register: nuage_enterprise + +# Checking if an Enterprise with the new name already exists +- name: Check if an Enterprise exists with the new name + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: Enterprise + command: find + properties: + name: "{{ enterprise_new_name }}-basic" + ignore_errors: yes + register: nuage_check_enterprise + +# Updating an enterprise's name +- name: Update Enterprise name + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: Enterprise + id: "{{ nuage_enterprise.id }}" + state: present + properties: + name: "{{ enterprise_new_name }}-basic" + when: nuage_check_enterprise is failed + +# Creating a User in an Enterprise +- name: Create admin user + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: User + parent_id: "{{ nuage_enterprise.id }}" + parent_type: Enterprise + state: present + match_filter: "userName == 'ansible-admin'" + properties: + email: "ansible@localhost.local" + first_name: "Ansible" + last_name: "Admin" + password: "ansible-password" + user_name: "ansible-admin" + register: nuage_user + +# Updating password for User +- name: Update admin password + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: User + id: "{{ nuage_user.id }}" + command: change_password + properties: + password: "ansible-new-password" + ignore_errors: yes + +# Finding a group in an enterprise +- name: Find Administrators group in Enterprise + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: Group + parent_id: "{{ nuage_enterprise.id }}" + parent_type: Enterprise + command: find + properties: + name: "Administrators" + register: nuage_group + +# Assign the user to the group +- name: Assign admin user to administrators + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: User + id: "{{ nuage_user.id }}" + parent_id: "{{ nuage_group.id }}" + parent_type: Group + state: present + +# Creating multiple DomainTemplates +- name: Create multiple DomainTemplates + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: DomainTemplate + parent_id: "{{ nuage_enterprise.id }}" + parent_type: Enterprise + state: present + properties: + name: "{{ item }}" + description: "Created by Ansible" + with_items: + - "Template-1" + - "Template-2" + +# Finding all DomainTemplates +- name: Fetching all DomainTemplates + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: DomainTemplate + parent_id: "{{ nuage_enterprise.id }}" + parent_type: Enterprise + command: find + register: nuage_domain_templates + +# Deleting all DomainTemplates +- name: Deleting all found DomainTemplates + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: DomainTemplate + state: absent + id: "{{ item.ID }}" + with_items: "{{ nuage_domain_templates.entities }}" + when: nuage_domain_templates.entities is defined + +# Unassign user from group +- name: Unassign admin user to administrators + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: User + id: "{{ nuage_user.id }}" + parent_id: "{{ nuage_group.id }}" + parent_type: Group + state: absent + +# Deleting an enterprise +- name: Delete Enterprise + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: Enterprise + id: "{{ nuage_enterprise.id }}" + state: absent + +# Setup an enterprise with Children +- name: Setup Enterprise and domain structure + connection: local + nuage_vspk: + auth: "{{ nuage_auth }}" + type: Enterprise + state: present + properties: + name: "Child-based-Enterprise" + children: + - type: L2DomainTemplate + properties: + name: "Unmanaged-Template" + children: + - type: EgressACLTemplate + match_filter: "name == 'Allow All'" + properties: + name: "Allow All" + active: true + default_allow_ip: true + default_allow_non_ip: true + default_install_acl_implicit_rules: true + description: "Created by Ansible" + priority_type: "TOP" + - type: IngressACLTemplate + match_filter: "name == 'Allow All'" + properties: + name: "Allow All" + active: true + default_allow_ip: true + default_allow_non_ip: true + description: "Created by Ansible" + priority_type: "TOP" +''' + +RETURN = ''' +id: + description: The id of the entity that was found, created, updated or assigned. + returned: On state=present and command=find in case one entity was found. + type: str + sample: bae07d8d-d29c-4e2b-b6ba-621b4807a333 +entities: + description: A list of entities handled. Each element is the to_dict() of the entity. + returned: On state=present and find, with only one element in case of state=present or find=one. + type: list + sample: [{ + "ID": acabc435-3946-4117-a719-b8895a335830", + "assocEntityType": "DOMAIN", + "command": "BEGIN_POLICY_CHANGES", + "creationDate": 1487515656000, + "entityScope": "ENTERPRISE", + "externalID": null, + "lastUpdatedBy": "8a6f0e20-a4db-4878-ad84-9cc61756cd5e", + "lastUpdatedDate": 1487515656000, + "owner": "8a6f0e20-a4db-4878-ad84-9cc61756cd5e", + "parameters": null, + "parentID": "a22fddb9-3da4-4945-bd2e-9d27fe3d62e0", + "parentType": "domain", + "progress": 0.0, + "result": null, + "status": "RUNNING" + }] +''' + +import time + +try: + import importlib + HAS_IMPORTLIB = True +except ImportError: + HAS_IMPORTLIB = False + +try: + from bambou.exceptions import BambouHTTPError + HAS_BAMBOU = True +except ImportError: + HAS_BAMBOU = False + +from ansible.module_utils.basic import AnsibleModule + + +SUPPORTED_COMMANDS = ['find', 'change_password', 'wait_for_job', 'get_csp_enterprise'] +VSPK = None + + +class NuageEntityManager(object): + """ + This module is meant to manage an entity in a Nuage VSP Platform + """ + + def __init__(self, module): + self.module = module + self.auth = module.params['auth'] + self.api_username = None + self.api_password = None + self.api_enterprise = None + self.api_url = None + self.api_version = None + self.api_certificate = None + self.api_key = None + self.type = module.params['type'] + + self.state = module.params['state'] + self.command = module.params['command'] + self.match_filter = module.params['match_filter'] + self.entity_id = module.params['id'] + self.parent_id = module.params['parent_id'] + self.parent_type = module.params['parent_type'] + self.properties = module.params['properties'] + self.children = module.params['children'] + + self.entity = None + self.entity_class = None + self.parent = None + self.parent_class = None + self.entity_fetcher = None + + self.result = { + 'state': self.state, + 'id': self.entity_id, + 'entities': [] + } + self.nuage_connection = None + + self._verify_api() + self._verify_input() + self._connect_vspk() + self._find_parent() + + def _connect_vspk(self): + """ + Connects to a Nuage API endpoint + """ + try: + # Connecting to Nuage + if self.api_certificate and self.api_key: + self.nuage_connection = VSPK.NUVSDSession(username=self.api_username, enterprise=self.api_enterprise, api_url=self.api_url, + certificate=(self.api_certificate, self.api_key)) + else: + self.nuage_connection = VSPK.NUVSDSession(username=self.api_username, password=self.api_password, enterprise=self.api_enterprise, + api_url=self.api_url) + self.nuage_connection.start() + except BambouHTTPError as error: + self.module.fail_json(msg='Unable to connect to the API URL with given username, password and enterprise: {0}'.format(error)) + + def _verify_api(self): + """ + Verifies the API and loads the proper VSPK version + """ + # Checking auth parameters + if ('api_password' not in list(self.auth.keys()) or not self.auth['api_password']) and ('api_certificate' not in list(self.auth.keys()) or + 'api_key' not in list(self.auth.keys()) or + not self.auth['api_certificate'] or not self.auth['api_key']): + self.module.fail_json(msg='Missing api_password or api_certificate and api_key parameter in auth') + + self.api_username = self.auth['api_username'] + if 'api_password' in list(self.auth.keys()) and self.auth['api_password']: + self.api_password = self.auth['api_password'] + if 'api_certificate' in list(self.auth.keys()) and 'api_key' in list(self.auth.keys()) and self.auth['api_certificate'] and self.auth['api_key']: + self.api_certificate = self.auth['api_certificate'] + self.api_key = self.auth['api_key'] + self.api_enterprise = self.auth['api_enterprise'] + self.api_url = self.auth['api_url'] + self.api_version = self.auth['api_version'] + + try: + global VSPK + VSPK = importlib.import_module('vspk.{0:s}'.format(self.api_version)) + except ImportError: + self.module.fail_json(msg='vspk is required for this module, or the API version specified does not exist.') + + def _verify_input(self): + """ + Verifies the parameter input for types and parent correctness and necessary parameters + """ + + # Checking if type exists + try: + self.entity_class = getattr(VSPK, 'NU{0:s}'.format(self.type)) + except AttributeError: + self.module.fail_json(msg='Unrecognised type specified') + + if self.module.check_mode: + return + + if self.parent_type: + # Checking if parent type exists + try: + self.parent_class = getattr(VSPK, 'NU{0:s}'.format(self.parent_type)) + except AttributeError: + # The parent type does not exist, fail + self.module.fail_json(msg='Unrecognised parent type specified') + + fetcher = self.parent_class().fetcher_for_rest_name(self.entity_class.rest_name) + if fetcher is None: + # The parent has no fetcher, fail + self.module.fail_json(msg='Specified parent is not a valid parent for the specified type') + elif not self.entity_id: + # If there is an id, we do not need a parent because we'll interact directly with the entity + # If an assign needs to happen, a parent will have to be provided + # Root object is the parent + self.parent_class = VSPK.NUMe + fetcher = self.parent_class().fetcher_for_rest_name(self.entity_class.rest_name) + if fetcher is None: + self.module.fail_json(msg='No parent specified and root object is not a parent for the type') + + # Verifying if a password is provided in case of the change_password command: + if self.command and self.command == 'change_password' and 'password' not in self.properties.keys(): + self.module.fail_json(msg='command is change_password but the following are missing: password property') + + def _find_parent(self): + """ + Fetches the parent if needed, otherwise configures the root object as parent. Also configures the entity fetcher + Important notes: + - If the parent is not set, the parent is automatically set to the root object + - It the root object does not hold a fetcher for the entity, you have to provide an ID + - If you want to assign/unassign, you have to provide a valid parent + """ + self.parent = self.nuage_connection.user + + if self.parent_id: + self.parent = self.parent_class(id=self.parent_id) + try: + self.parent.fetch() + except BambouHTTPError as error: + self.module.fail_json(msg='Failed to fetch the specified parent: {0}'.format(error)) + + self.entity_fetcher = self.parent.fetcher_for_rest_name(self.entity_class.rest_name) + + def _find_entities(self, entity_id=None, entity_class=None, match_filter=None, properties=None, entity_fetcher=None): + """ + Will return a set of entities matching a filter or set of properties if the match_filter is unset. If the + entity_id is set, it will return only the entity matching that ID as the single element of the list. + :param entity_id: Optional ID of the entity which should be returned + :param entity_class: Optional class of the entity which needs to be found + :param match_filter: Optional search filter + :param properties: Optional set of properties the entities should contain + :param entity_fetcher: The fetcher for the entity type + :return: List of matching entities + """ + search_filter = '' + + if entity_id: + found_entity = entity_class(id=entity_id) + try: + found_entity.fetch() + except BambouHTTPError as error: + self.module.fail_json(msg='Failed to fetch the specified entity by ID: {0}'.format(error)) + + return [found_entity] + + elif match_filter: + search_filter = match_filter + elif properties: + # Building filter + for num, property_name in enumerate(properties): + if num > 0: + search_filter += ' and ' + search_filter += '{0:s} == "{1}"'.format(property_name, properties[property_name]) + + if entity_fetcher is not None: + try: + return entity_fetcher.get(filter=search_filter) + except BambouHTTPError: + pass + return [] + + def _find_entity(self, entity_id=None, entity_class=None, match_filter=None, properties=None, entity_fetcher=None): + """ + Finds a single matching entity that matches all the provided properties, unless an ID is specified, in which + case it just fetches the one item + :param entity_id: Optional ID of the entity which should be returned + :param entity_class: Optional class of the entity which needs to be found + :param match_filter: Optional search filter + :param properties: Optional set of properties the entities should contain + :param entity_fetcher: The fetcher for the entity type + :return: The first entity matching the criteria, or None if none was found + """ + search_filter = '' + if entity_id: + found_entity = entity_class(id=entity_id) + try: + found_entity.fetch() + except BambouHTTPError as error: + self.module.fail_json(msg='Failed to fetch the specified entity by ID: {0}'.format(error)) + + return found_entity + + elif match_filter: + search_filter = match_filter + elif properties: + # Building filter + for num, property_name in enumerate(properties): + if num > 0: + search_filter += ' and ' + search_filter += '{0:s} == "{1}"'.format(property_name, properties[property_name]) + + if entity_fetcher is not None: + try: + return entity_fetcher.get_first(filter=search_filter) + except BambouHTTPError: + pass + return None + + def handle_main_entity(self): + """ + Handles the Ansible task + """ + if self.command and self.command == 'find': + self._handle_find() + elif self.command and self.command == 'change_password': + self._handle_change_password() + elif self.command and self.command == 'wait_for_job': + self._handle_wait_for_job() + elif self.command and self.command == 'get_csp_enterprise': + self._handle_get_csp_enterprise() + elif self.state == 'present': + self._handle_present() + elif self.state == 'absent': + self._handle_absent() + self.module.exit_json(**self.result) + + def _handle_absent(self): + """ + Handles the Ansible task when the state is set to absent + """ + # Absent state + self.entity = self._find_entity(entity_id=self.entity_id, entity_class=self.entity_class, match_filter=self.match_filter, properties=self.properties, + entity_fetcher=self.entity_fetcher) + if self.entity and (self.entity_fetcher is None or self.entity_fetcher.relationship in ['child', 'root']): + # Entity is present, deleting + if self.module.check_mode: + self.result['changed'] = True + else: + self._delete_entity(self.entity) + self.result['id'] = None + elif self.entity and self.entity_fetcher.relationship == 'member': + # Entity is a member, need to check if already present + if self._is_member(entity_fetcher=self.entity_fetcher, entity=self.entity): + # Entity is not a member yet + if self.module.check_mode: + self.result['changed'] = True + else: + self._unassign_member(entity_fetcher=self.entity_fetcher, entity=self.entity, entity_class=self.entity_class, parent=self.parent, + set_output=True) + + def _handle_present(self): + """ + Handles the Ansible task when the state is set to present + """ + # Present state + self.entity = self._find_entity(entity_id=self.entity_id, entity_class=self.entity_class, match_filter=self.match_filter, properties=self.properties, + entity_fetcher=self.entity_fetcher) + # Determining action to take + if self.entity_fetcher is not None and self.entity_fetcher.relationship == 'member' and not self.entity: + self.module.fail_json(msg='Trying to assign an entity that does not exist') + elif self.entity_fetcher is not None and self.entity_fetcher.relationship == 'member' and self.entity: + # Entity is a member, need to check if already present + if not self._is_member(entity_fetcher=self.entity_fetcher, entity=self.entity): + # Entity is not a member yet + if self.module.check_mode: + self.result['changed'] = True + else: + self._assign_member(entity_fetcher=self.entity_fetcher, entity=self.entity, entity_class=self.entity_class, parent=self.parent, + set_output=True) + elif self.entity_fetcher is not None and self.entity_fetcher.relationship in ['child', 'root'] and not self.entity: + # Entity is not present as a child, creating + if self.module.check_mode: + self.result['changed'] = True + else: + self.entity = self._create_entity(entity_class=self.entity_class, parent=self.parent, properties=self.properties) + self.result['id'] = self.entity.id + self.result['entities'].append(self.entity.to_dict()) + + # Checking children + if self.children: + for child in self.children: + self._handle_child(child=child, parent=self.entity) + elif self.entity: + # Need to compare properties in entity and found entity + changed = self._has_changed(entity=self.entity, properties=self.properties) + + if self.module.check_mode: + self.result['changed'] = changed + elif changed: + self.entity = self._save_entity(entity=self.entity) + self.result['id'] = self.entity.id + self.result['entities'].append(self.entity.to_dict()) + else: + self.result['id'] = self.entity.id + self.result['entities'].append(self.entity.to_dict()) + + # Checking children + if self.children: + for child in self.children: + self._handle_child(child=child, parent=self.entity) + elif not self.module.check_mode: + self.module.fail_json(msg='Invalid situation, verify parameters') + + def _handle_get_csp_enterprise(self): + """ + Handles the Ansible task when the command is to get the csp enterprise + """ + self.entity_id = self.parent.enterprise_id + self.entity = VSPK.NUEnterprise(id=self.entity_id) + try: + self.entity.fetch() + except BambouHTTPError as error: + self.module.fail_json(msg='Unable to fetch CSP enterprise: {0}'.format(error)) + self.result['id'] = self.entity_id + self.result['entities'].append(self.entity.to_dict()) + + def _handle_wait_for_job(self): + """ + Handles the Ansible task when the command is to wait for a job + """ + # Command wait_for_job + self.entity = self._find_entity(entity_id=self.entity_id, entity_class=self.entity_class, match_filter=self.match_filter, properties=self.properties, + entity_fetcher=self.entity_fetcher) + if self.module.check_mode: + self.result['changed'] = True + else: + self._wait_for_job(self.entity) + + def _handle_change_password(self): + """ + Handles the Ansible task when the command is to change a password + """ + # Command change_password + self.entity = self._find_entity(entity_id=self.entity_id, entity_class=self.entity_class, match_filter=self.match_filter, properties=self.properties, + entity_fetcher=self.entity_fetcher) + if self.module.check_mode: + self.result['changed'] = True + else: + try: + getattr(self.entity, 'password') + except AttributeError: + self.module.fail_json(msg='Entity does not have a password property') + + try: + setattr(self.entity, 'password', self.properties['password']) + except AttributeError: + self.module.fail_json(msg='Password can not be changed for entity') + + self.entity = self._save_entity(entity=self.entity) + self.result['id'] = self.entity.id + self.result['entities'].append(self.entity.to_dict()) + + def _handle_find(self): + """ + Handles the Ansible task when the command is to find an entity + """ + # Command find + entities = self._find_entities(entity_id=self.entity_id, entity_class=self.entity_class, match_filter=self.match_filter, properties=self.properties, + entity_fetcher=self.entity_fetcher) + self.result['changed'] = False + if entities: + if len(entities) == 1: + self.result['id'] = entities[0].id + for entity in entities: + self.result['entities'].append(entity.to_dict()) + elif not self.module.check_mode: + self.module.fail_json(msg='Unable to find matching entries') + + def _handle_child(self, child, parent): + """ + Handles children of a main entity. Fields are similar to the normal fields + Currently only supported state: present + """ + if 'type' not in list(child.keys()): + self.module.fail_json(msg='Child type unspecified') + elif 'id' not in list(child.keys()) and 'properties' not in list(child.keys()): + self.module.fail_json(msg='Child ID or properties unspecified') + + # Setting intern variables + child_id = None + if 'id' in list(child.keys()): + child_id = child['id'] + child_properties = None + if 'properties' in list(child.keys()): + child_properties = child['properties'] + child_filter = None + if 'match_filter' in list(child.keys()): + child_filter = child['match_filter'] + + # Checking if type exists + entity_class = None + try: + entity_class = getattr(VSPK, 'NU{0:s}'.format(child['type'])) + except AttributeError: + self.module.fail_json(msg='Unrecognised child type specified') + + entity_fetcher = parent.fetcher_for_rest_name(entity_class.rest_name) + if entity_fetcher is None and not child_id and not self.module.check_mode: + self.module.fail_json(msg='Unable to find a fetcher for child, and no ID specified.') + + # Try and find the child + entity = self._find_entity(entity_id=child_id, entity_class=entity_class, match_filter=child_filter, properties=child_properties, + entity_fetcher=entity_fetcher) + + # Determining action to take + if entity_fetcher.relationship == 'member' and not entity: + self.module.fail_json(msg='Trying to assign a child that does not exist') + elif entity_fetcher.relationship == 'member' and entity: + # Entity is a member, need to check if already present + if not self._is_member(entity_fetcher=entity_fetcher, entity=entity): + # Entity is not a member yet + if self.module.check_mode: + self.result['changed'] = True + else: + self._assign_member(entity_fetcher=entity_fetcher, entity=entity, entity_class=entity_class, parent=parent, set_output=False) + elif entity_fetcher.relationship in ['child', 'root'] and not entity: + # Entity is not present as a child, creating + if self.module.check_mode: + self.result['changed'] = True + else: + entity = self._create_entity(entity_class=entity_class, parent=parent, properties=child_properties) + elif entity_fetcher.relationship in ['child', 'root'] and entity: + changed = self._has_changed(entity=entity, properties=child_properties) + + if self.module.check_mode: + self.result['changed'] = changed + elif changed: + entity = self._save_entity(entity=entity) + + if entity: + self.result['entities'].append(entity.to_dict()) + + # Checking children + if 'children' in list(child.keys()) and not self.module.check_mode: + for subchild in child['children']: + self._handle_child(child=subchild, parent=entity) + + def _has_changed(self, entity, properties): + """ + Compares a set of properties with a given entity, returns True in case the properties are different from the + values in the entity + :param entity: The entity to check + :param properties: The properties to check + :return: boolean + """ + # Need to compare properties in entity and found entity + changed = False + if properties: + for property_name in list(properties.keys()): + if property_name == 'password': + continue + entity_value = '' + try: + entity_value = getattr(entity, property_name) + except AttributeError: + self.module.fail_json(msg='Property {0:s} is not valid for this type of entity'.format(property_name)) + + if entity_value != properties[property_name]: + # Difference in values changing property + changed = True + try: + setattr(entity, property_name, properties[property_name]) + except AttributeError: + self.module.fail_json(msg='Property {0:s} can not be changed for this type of entity'.format(property_name)) + return changed + + def _is_member(self, entity_fetcher, entity): + """ + Verifies if the entity is a member of the parent in the fetcher + :param entity_fetcher: The fetcher for the entity type + :param entity: The entity to look for as a member in the entity fetcher + :return: boolean + """ + members = entity_fetcher.get() + for member in members: + if member.id == entity.id: + return True + return False + + def _assign_member(self, entity_fetcher, entity, entity_class, parent, set_output): + """ + Adds the entity as a member to a parent + :param entity_fetcher: The fetcher of the entity type + :param entity: The entity to add as a member + :param entity_class: The class of the entity + :param parent: The parent on which to add the entity as a member + :param set_output: If set to True, sets the Ansible result variables + """ + members = entity_fetcher.get() + members.append(entity) + try: + parent.assign(members, entity_class) + except BambouHTTPError as error: + self.module.fail_json(msg='Unable to assign entity as a member: {0}'.format(error)) + self.result['changed'] = True + if set_output: + self.result['id'] = entity.id + self.result['entities'].append(entity.to_dict()) + + def _unassign_member(self, entity_fetcher, entity, entity_class, parent, set_output): + """ + Removes the entity as a member of a parent + :param entity_fetcher: The fetcher of the entity type + :param entity: The entity to remove as a member + :param entity_class: The class of the entity + :param parent: The parent on which to add the entity as a member + :param set_output: If set to True, sets the Ansible result variables + """ + members = [] + for member in entity_fetcher.get(): + if member.id != entity.id: + members.append(member) + try: + parent.assign(members, entity_class) + except BambouHTTPError as error: + self.module.fail_json(msg='Unable to remove entity as a member: {0}'.format(error)) + self.result['changed'] = True + if set_output: + self.result['id'] = entity.id + self.result['entities'].append(entity.to_dict()) + + def _create_entity(self, entity_class, parent, properties): + """ + Creates a new entity in the parent, with all properties configured as in the file + :param entity_class: The class of the entity + :param parent: The parent of the entity + :param properties: The set of properties of the entity + :return: The entity + """ + entity = entity_class(**properties) + try: + parent.create_child(entity) + except BambouHTTPError as error: + self.module.fail_json(msg='Unable to create entity: {0}'.format(error)) + self.result['changed'] = True + return entity + + def _save_entity(self, entity): + """ + Updates an existing entity + :param entity: The entity to save + :return: The updated entity + """ + try: + entity.save() + except BambouHTTPError as error: + self.module.fail_json(msg='Unable to update entity: {0}'.format(error)) + self.result['changed'] = True + return entity + + def _delete_entity(self, entity): + """ + Deletes an entity + :param entity: The entity to delete + """ + try: + entity.delete() + except BambouHTTPError as error: + self.module.fail_json(msg='Unable to delete entity: {0}'.format(error)) + self.result['changed'] = True + + def _wait_for_job(self, entity): + """ + Waits for a job to finish + :param entity: The job to wait for + """ + running = False + if entity.status == 'RUNNING': + self.result['changed'] = True + running = True + + while running: + time.sleep(1) + entity.fetch() + + if entity.status != 'RUNNING': + running = False + + self.result['entities'].append(entity.to_dict()) + if entity.status == 'ERROR': + self.module.fail_json(msg='Job ended in an error') + + +def main(): + """ + Main method + """ + module = AnsibleModule( + argument_spec=dict( + auth=dict( + required=True, + type='dict', + options=dict( + api_username=dict(required=True, type='str'), + api_enterprise=dict(required=True, type='str'), + api_url=dict(required=True, type='str'), + api_version=dict(required=True, type='str'), + api_password=dict(default=None, required=False, type='str', no_log=True), + api_certificate=dict(default=None, required=False, type='str', no_log=True), + api_key=dict(default=None, required=False, type='str', no_log=True) + ) + ), + type=dict(required=True, type='str'), + id=dict(default=None, required=False, type='str'), + parent_id=dict(default=None, required=False, type='str'), + parent_type=dict(default=None, required=False, type='str'), + state=dict(default=None, choices=['present', 'absent'], type='str'), + command=dict(default=None, choices=SUPPORTED_COMMANDS, type='str'), + match_filter=dict(default=None, required=False, type='str'), + properties=dict(default=None, required=False, type='dict'), + children=dict(default=None, required=False, type='list') + ), + mutually_exclusive=[ + ['command', 'state'] + ], + required_together=[ + ['parent_id', 'parent_type'] + ], + required_one_of=[ + ['command', 'state'] + ], + required_if=[ + ['state', 'present', ['id', 'properties', 'match_filter'], True], + ['state', 'absent', ['id', 'properties', 'match_filter'], True], + ['command', 'change_password', ['id', 'properties']], + ['command', 'wait_for_job', ['id']] + ], + supports_check_mode=True + ) + + if not HAS_BAMBOU: + module.fail_json(msg='bambou is required for this module') + + if not HAS_IMPORTLIB: + module.fail_json(msg='importlib (python 2.7) is required for this module') + + entity_manager = NuageEntityManager(module) + entity_manager.handle_main_entity() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_aaa.py b/plugins/modules/network/onyx/onyx_aaa.py new file mode 100644 index 0000000000..46080f0351 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_aaa.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_aaa +author: "Sara Touqan (@sarato)" +short_description: Configures AAA parameters +description: + - This module provides declarative management of AAA protocol params + on Mellanox ONYX network devices. +options: + tacacs_accounting_enabled: + description: + - Configures accounting settings. + type: bool + auth_default_user: + description: + - Sets local user default mapping. + type: str + choices: ['admin', 'monitor'] + auth_order: + description: + - Sets the order on how to handle remote to local user mappings. + type: str + choices: ['local-only', 'remote-first', 'remote-only'] + auth_fallback_enabled: + description: + - Enables/Disables fallback server-err option. + type: bool +''' + +EXAMPLES = """ +- name: configures aaa + onyx_aaa: + tacacs_accounting_enabled: yes + auth_default_user: monitor + auth_order: local-only + auth_fallback_enabled: false +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - aaa accounting changes default stop-only tacacs+ + - no aaa accounting changes default stop-only tacacs+ + - aaa authorization map default-user + - aaa authorization map order + - aaa authorization map fallback server-err + - no aaa authorization map fallback server-err +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxAAAModule(BaseOnyxModule): + + def init_module(self): + """ initialize module + """ + element_spec = dict( + tacacs_accounting_enabled=dict(type='bool'), + auth_default_user=dict(type='str', choices=['admin', 'monitor']), + auth_order=dict(type='str', choices=['local-only', 'remote-first', 'remote-only']), + auth_fallback_enabled=dict(type='bool') + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _set_aaa_config(self, all_aaa_config): + aaa_config = all_aaa_config[0] + self._current_config['auth_default_user'] = aaa_config.get("Default User") + self._current_config['auth_order'] = aaa_config.get("Map Order") + auth_fallback_enabled = aaa_config.get("Fallback on server-err") + if auth_fallback_enabled == "yes": + self._current_config['auth_fallback_enabled'] = True + else: + self._current_config['auth_fallback_enabled'] = False + aaa_config_2 = all_aaa_config[2] + accounting_message = aaa_config_2.get("message") + if accounting_message == "No accounting methods configured.": + self._current_config['tacacs_accounting_enabled'] = False + else: + self._current_config['tacacs_accounting_enabled'] = True + + def _show_aaa_config(self): + cmd = "show aaa" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + aaa_config = self._show_aaa_config() + if aaa_config: + self._set_aaa_config(aaa_config) + + def generate_commands(self): + tacacs_accounting_enabled = self._required_config.get("tacacs_accounting_enabled") + if tacacs_accounting_enabled is not None: + current_accounting_enabled = self._current_config.get("tacacs_accounting_enabled") + if current_accounting_enabled != tacacs_accounting_enabled: + if tacacs_accounting_enabled is True: + self._commands.append('aaa accounting changes default stop-only tacacs+') + else: + self._commands.append('no aaa accounting changes default stop-only tacacs+') + + auth_default_user = self._required_config.get("auth_default_user") + if auth_default_user is not None: + current_user = self._current_config.get("auth_default_user") + if current_user != auth_default_user: + self._commands.append('aaa authorization map default-user {0}' .format(auth_default_user)) + + auth_order = self._required_config.get("auth_order") + if auth_order is not None: + current_order = self._current_config.get("auth_order") + if current_order != auth_order: + self._commands.append('aaa authorization map order {0}' .format(auth_order)) + + auth_fallback_enabled = self._required_config.get("auth_fallback_enabled") + if auth_fallback_enabled is not None: + current_fallback = self._current_config.get("auth_fallback_enabled") + if current_fallback != auth_fallback_enabled: + if auth_fallback_enabled is True: + self._commands.append('aaa authorization map fallback server-err') + else: + self._commands.append('no aaa authorization map fallback server-err') + + +def main(): + """ main entry point for module execution + """ + OnyxAAAModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_bfd.py b/plugins/modules/network/onyx/onyx_bfd.py new file mode 100644 index 0000000000..d82ce254b0 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_bfd.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_bfd +author: "Sara Touqan (@sarato)" +short_description: Configures BFD parameters +description: + - This module provides declarative management of BFD protocol params + on Mellanox ONYX network devices. +options: + shutdown: + description: + - Administratively shut down BFD protection. + type: bool + vrf: + description: + - Specifys the vrf name. + type: str + interval_min_rx: + description: + - Minimum desired receive rate, should be between 50 and 6000. + type: int + interval_multiplier: + description: + - Desired detection multiplier, should be between 3 and 50. + type: int + interval_transmit_rate: + description: + - Minimum desired transmit rate, should be between 50 and 60000. + type: int + iproute_network_prefix: + description: + - Configures the ip route network prefix, e.g 1.1.1.1. + type: str + iproute_mask_length: + description: + - Configures the mask length of the ip route network prefix, e.g 24. + type: int + iproute_next_hop: + description: + - Configures the ip route next hop, e.g 2.2.2.2. + type: str +''' + +EXAMPLES = """ +- name: configures bfd + onyx_bfd: + shutdown: yes + vrf: 5 + interval_min_rx: 55 + interval_multiplier: 8 + interval_transmit_rate: 88 + iproute_network_prefix: 1.1.1.0 + iproute_mask_length: 24 + iproute_next_hop: 3.2.2.2 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - ip bfd shutdown + - no ip bfd shutdown + - ip bfd shutdown vrf + - no ip bfd shutdown vrf + - ip bfd vrf interval min-rx multiplier transmit-rate force + - ip bfd interval min-rx multiplier transmit-rate force + - ip route vrf / bfd + - ip route / bfd +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxBFDModule(BaseOnyxModule): + + def init_module(self): + """ initialize module + """ + element_spec = dict( + shutdown=dict(type='bool'), + vrf=dict(type='str'), + interval_min_rx=dict(type='int'), + interval_multiplier=dict(type='int'), + interval_transmit_rate=dict(type='int'), + iproute_network_prefix=dict(type='str'), + iproute_mask_length=dict(type='int'), + iproute_next_hop=dict(type='str'), + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=[ + ['interval_min_rx', 'interval_multiplier', 'interval_transmit_rate'], + ['iproute_network_prefix', 'iproute_mask_length', 'iproute_next_hop']]) + + def validate_bfd_interval_values(self): + interval_min_rx = self._required_config.get('interval_min_rx') + if interval_min_rx: + if ((interval_min_rx < 50) or (interval_min_rx > 6000)): + self._module.fail_json(msg='Receive interval should be between 50 and 6000.') + interval_multiplier = self._required_config.get('interval_multiplier') + if interval_multiplier: + if ((interval_multiplier < 3) or (interval_multiplier > 50)): + self._module.fail_json(msg='Multiplier should be between 3 and 50.') + interval_transmit_rate = self._required_config.get('interval_transmit_rate') + if interval_transmit_rate: + if ((interval_transmit_rate < 50) or (interval_transmit_rate > 60000)): + self._module.fail_json(msg='Transmit interval should be between 50 and 60000.') + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + self.validate_bfd_interval_values() + + def _set_bfd_config(self, bfd_config): + curr_config_arr = [] + bfd_config = bfd_config.get('Lines') + if bfd_config is None: + return + for runn_config in bfd_config: + curr_config_arr.append(runn_config.strip()) + if 'ip bfd shutdown vrf default' in curr_config_arr: + self._current_config['bfd_shutdown'] = True + else: + self._current_config['bfd_shutdown'] = False + self._current_config['curr_config_arr'] = curr_config_arr + + def _show_bfd_config(self): + cmd = "show running-config | include bfd" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + bfd_config = self._show_bfd_config() + if bfd_config: + self._set_bfd_config(bfd_config) + + def generate_shutdown_commands(self, curr_config_arr): + shutdown_enabled = self._required_config.get('shutdown') + vrf_name = self._required_config.get('vrf') + current_shutdown = self._current_config.get("bfd_shutdown") + if shutdown_enabled is not None: + if vrf_name is not None: + if curr_config_arr is not None: + if ('ip bfd shutdown vrf {0}' .format(vrf_name)) not in curr_config_arr: + if shutdown_enabled is True: + self._commands.append('ip bfd shutdown vrf {0}' .format(vrf_name)) + else: + if shutdown_enabled is False: + self._commands.append('no ip bfd shutdown vrf {0}' .format(vrf_name)) + else: + if ((current_shutdown is not None and (current_shutdown != shutdown_enabled)) or (current_shutdown is None)): + if shutdown_enabled is True: + self._commands.append('ip bfd shutdown') + else: + self._commands.append('no ip bfd shutdown') + + def generate_interval_commands(self, curr_config_arr): + interval_min_rx = self._required_config.get('interval_min_rx') + interval_multiplier = self._required_config.get('interval_multiplier') + interval_transmit_rate = self._required_config.get('interval_transmit_rate') + vrf_name = self._required_config.get('vrf') + if ((interval_min_rx is not None) and (interval_multiplier is not None) and (interval_transmit_rate is not None)): + if vrf_name is not None: + if curr_config_arr is not None: + if ((('ip bfd vrf {0} interval transmit-rate {1} force' .format(vrf_name, interval_transmit_rate)) not in curr_config_arr) or + (('ip bfd vrf {0} interval min-rx {1} force' .format(vrf_name, interval_min_rx)) not in curr_config_arr) or + (('ip bfd vrf {0} interval multiplier {1} force' .format(vrf_name, interval_multiplier)) not in curr_config_arr)): + self._commands.append('ip bfd vrf {0} interval min-rx {1} multiplier {2} transmit-rate {3} force' + .format(vrf_name, interval_min_rx, interval_multiplier, interval_transmit_rate)) + else: + self._commands.append('ip bfd vrf {0} interval min-rx {1} multiplier {2} transmit-rate {3} force' + .format(vrf_name, interval_min_rx, interval_multiplier, interval_transmit_rate)) + else: + if curr_config_arr is not None: + if ((('ip bfd vrf default interval transmit-rate {0} force' .format(interval_transmit_rate)) not in curr_config_arr) or + (('ip bfd vrf default interval min-rx {0} force' .format(interval_min_rx)) not in curr_config_arr) or + (('ip bfd vrf default interval multiplier {0} force' .format(interval_multiplier)) not in curr_config_arr)): + self._commands.append('ip bfd interval min-rx {0} multiplier {1} transmit-rate {2} force' + .format(interval_min_rx, interval_multiplier, interval_transmit_rate)) + else: + self._commands.append('ip bfd interval min-rx {0} multiplier {1} transmit-rate {2} force' + .format(interval_min_rx, interval_multiplier, interval_transmit_rate)) + + def generate_iproute_commands(self, curr_config_arr): + iproute_network_prefix = self._required_config.get('iproute_network_prefix') + iproute_mask_length = self._required_config.get('iproute_mask_length') + iproute_next_hop = self._required_config.get('iproute_next_hop') + vrf_name = self._required_config.get('vrf') + if ((iproute_network_prefix is not None) and (iproute_mask_length is not None) and + (iproute_next_hop is not None)): + if vrf_name is not None: + if curr_config_arr is not None: + if ('ip route vrf {0} {1}/{2} {3} bfd' .format(vrf_name, iproute_network_prefix, + iproute_mask_length, iproute_next_hop)) not in curr_config_arr: + self._commands.append('ip route vrf {0} {1} /{2} {3} bfd' + .format(vrf_name, iproute_network_prefix, iproute_mask_length, iproute_next_hop)) + else: + self._commands.append('ip route vrf {0} {1} /{2} {3} bfd' .format(vrf_name, iproute_network_prefix, iproute_mask_length, iproute_next_hop)) + else: + if curr_config_arr is not None: + if ('ip route vrf default {0}/{1} {2} bfd' .format(iproute_network_prefix, + iproute_mask_length, iproute_next_hop)) not in curr_config_arr: + self._commands.append('ip route {0} /{1} {2} bfd' .format(iproute_network_prefix, iproute_mask_length, iproute_next_hop)) + else: + self._commands.append('ip route {0} /{1} {2} bfd' .format(iproute_network_prefix, iproute_mask_length, iproute_next_hop)) + + def generate_commands(self): + curr_config_arr = self._current_config.get("curr_config_arr") + self.generate_shutdown_commands(curr_config_arr) + self.generate_interval_commands(curr_config_arr) + self.generate_iproute_commands(curr_config_arr) + + +def main(): + """ main entry point for module execution + """ + OnyxBFDModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_bgp.py b/plugins/modules/network/onyx/onyx_bgp.py new file mode 100644 index 0000000000..99ee0299fa --- /dev/null +++ b/plugins/modules/network/onyx/onyx_bgp.py @@ -0,0 +1,450 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_bgp +author: "Samer Deeb (@samerd), Anas Badaha (@anasb)" +short_description: Configures BGP on Mellanox ONYX network devices +description: + - This module provides declarative management of BGP router and neighbors + on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.4000 +options: + as_number: + description: + - Local AS number. + required: true + router_id: + description: + - Router IP address. + neighbors: + description: + - List of neighbors. Required if I(state=present). + suboptions: + remote_as: + description: + - Remote AS number. + required: true + neighbor: + description: + - Neighbor IP address. + required: true + multihop: + description: + - multihop number. + networks: + description: + - List of advertised networks. + fast_external_fallover: + description: + - will configure fast_external_fallover when it is True. + type: bool + max_paths: + description: + - Maximum bgp paths. + ecmp_bestpath: + description: + - Enables ECMP across AS paths. + type: bool + evpn: + description: + - Configure evpn peer-group. + type: bool + vrf: + description: + - vrf name. + state: + description: + - BGP state. + default: present + choices: ['present', 'absent'] + purge: + description: + - will remove all neighbors when it is True. + type: bool + default: false +''' + +EXAMPLES = """ +- name: configure bgp + onyx_bgp: + as_number: 320 + router_id: 10.3.3.3 + neighbors: + - remote_as: 321 + neighbor: 10.3.3.4 + - remote_as: 322 + neighbor: 10.3.3.5 + multihop: 250 + purge: True + state: present + networks: + - 172.16.1.0/24 + vrf: default + evpn: yes + fast_external_fallover: yes + max_paths: 32 + ecmp_bestpath: yes + +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - router bgp 320 vrf default + - exit + - router bgp 320 router-id 10.3.3.3 force + - router bgp 320 vrf default bgp fast-external-fallover + - router bgp 320 vrf default maximum-paths 32 + - router bgp 320 vrf default bestpath as-path multipath-relax force + - router bgp 320 vrf default neighbor evpn peer-group + - router bgp 320 vrf default neighbor evpn send-community extended + - router bgp 320 vrf default address-family l2vpn-evpn neighbor evpn next-hop-unchanged + - router bgp 320 vrf default address-family l2vpn-evpn neighbor evpn activate + - router bgp 320 vrf default address-family l2vpn-evpn auto-create + - router bgp 320 vrf default neighbor 10.3.3.4 remote-as 321 + - router bgp 320 vrf default neighbor 10.3.3.4 ebgp-multihop 250 + - router bgp 320 vrf default neighbor 10.3.3.5 remote-as 322 + - router bgp 320 vrf default network 172.16.1.0 /24 +""" +import re +from ansible.module_utils.six import iteritems + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import get_bgp_summary +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxBgpModule(BaseOnyxModule): + LOCAL_AS_REGEX = re.compile(r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+).*') + ROUTER_ID_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+).*router-id\s+(\S+)\s+.*') + NEIGHBOR_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+).*neighbor\s+(\S+)\s+remote\-as\s+(\d+).*') + NEIGHBOR_MULTIHOP_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+).*neighbor\s+(\S+)\s+ebgp\-multihop\s+(\d+).*') + NETWORK_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+).*network\s+(\S+)\s+(\S+).*') + FAST_EXTERNAL_FALLOVER_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+bgp fast\-external\-fallover.*') + MAX_PATHS_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+maximum\-paths\s+(\d+).*') + ECMP_BESTPATH_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+bestpath as\-path multipath\-relax.*') + NEIGHBOR_EVPN_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+neighbor\s+(\S+)\s+peer\-group evpn.*') + EVPN_PEER_GROUP_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+neighbor evpn peer\-group.*') + EVPN_SEND_COMMUNITY_EXTENDED_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+neighbor evpn send-community extended.*') + EVPN_NEXT_HOP_UNCHANGED_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+address\-family l2vpn\-evpn neighbor evpn next\-hop-unchanged.*') + EVPN_ACTIVATE_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+address-family l2vpn\-evpn neighbor evpn activate.*') + EVPN_AUTO_CREATE_REGEX = re.compile( + r'^\s.*router bgp\s+(\d+)\s+vrf\s+(\S+)\s+address-family l2vpn\-evpn auto-create.*') + + _purge = False + + EVPN_PEER_GROUP_ATTR = "evpn_peer_group" + EVPN_SEND_COMMUNITY_EXTENDED_ATTR = "evpn_send_community_extended" + EVPN_NEXT_HOP_UNCHANGED_ATTR = "evpn_next_hop_unchanged" + EVPN_ACTIVATE_ATTR = "evpn_activate" + EVPN_AUTO_CREATE_ATTR = "evpn_auto_create" + + EVPN_PEER_GROUP_CMD = "router bgp %s vrf %s neighbor evpn peer-group" + EVPN_SEND_COMMUNITY_EXTENDED_CMD = "router bgp %s vrf %s neighbor evpn send-community extended" + EVPN_NEXT_HOP_UNCHANGED_CMD = "router bgp %s vrf %s address-family l2vpn-evpn neighbor evpn next-hop-unchanged" + EVPN_ACTIVATE_CMD = "router bgp %s vrf %s address-family l2vpn-evpn neighbor evpn activate" + EVPN_AUTO_CREATE_CMD = "router bgp %s vrf %s address-family l2vpn-evpn auto-create" + + EVPN_ENABLE_ATTRS = [EVPN_PEER_GROUP_ATTR, EVPN_SEND_COMMUNITY_EXTENDED_ATTR, + EVPN_NEXT_HOP_UNCHANGED_ATTR, EVPN_ACTIVATE_ATTR, EVPN_AUTO_CREATE_ATTR] + + EVPN_DISABLE_ATTRS = [EVPN_PEER_GROUP_ATTR, EVPN_AUTO_CREATE_ATTR] + + EVPN_COMMANDS_REGEX_MAPPER = { + EVPN_PEER_GROUP_ATTR: (EVPN_PEER_GROUP_REGEX, EVPN_PEER_GROUP_CMD), + EVPN_SEND_COMMUNITY_EXTENDED_ATTR: (EVPN_SEND_COMMUNITY_EXTENDED_REGEX, + EVPN_SEND_COMMUNITY_EXTENDED_CMD), + EVPN_NEXT_HOP_UNCHANGED_ATTR: (EVPN_NEXT_HOP_UNCHANGED_REGEX, + EVPN_NEXT_HOP_UNCHANGED_CMD), + EVPN_ACTIVATE_ATTR: (EVPN_ACTIVATE_REGEX, EVPN_ACTIVATE_CMD), + EVPN_AUTO_CREATE_ATTR: (EVPN_AUTO_CREATE_REGEX, EVPN_AUTO_CREATE_CMD) + } + + def init_module(self): + """ initialize module + """ + neighbor_spec = dict( + remote_as=dict(type='int', required=True), + neighbor=dict(required=True), + multihop=dict(type='int') + ) + element_spec = dict( + as_number=dict(type='int', required=True), + router_id=dict(), + neighbors=dict(type='list', elements='dict', + options=neighbor_spec), + networks=dict(type='list', elements='str'), + state=dict(choices=['present', 'absent'], default='present'), + purge=dict(default=False, type='bool'), + vrf=dict(), + fast_external_fallover=dict(type='bool'), + max_paths=dict(type='int'), + ecmp_bestpath=dict(type='bool'), + evpn=dict(type='bool') + ) + argument_spec = dict() + + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self._purge = self._required_config.get('purge', False) + self.validate_param_values(self._required_config) + + def _set_bgp_config(self, bgp_config): + lines = bgp_config.split('\n') + self._current_config['router_id'] = None + self._current_config['as_number'] = None + self._current_config['fast_external_fallover'] = False + self._current_config['ecmp_bestpath'] = False + self._current_config[self.EVPN_PEER_GROUP_ATTR] = False + self._current_config[self.EVPN_SEND_COMMUNITY_EXTENDED_ATTR] = False + self._current_config[self.EVPN_NEXT_HOP_UNCHANGED_ATTR] = False + self._current_config[self.EVPN_AUTO_CREATE_ATTR] = False + self._current_config[self.EVPN_ACTIVATE_ATTR] = False + neighbors = self._current_config['neighbors'] = dict() + networks = self._current_config['networks'] = list() + for line in lines: + if line.startswith('#'): + continue + if not self._current_config['as_number']: + match = self.LOCAL_AS_REGEX.match(line) + if match: + self._current_config['as_number'] = int(match.group(1)) + self._current_config['vrf'] = match.group(2) + continue + if not self._current_config['router_id']: + match = self.ROUTER_ID_REGEX.match(line) + if match: + self._current_config['router_id'] = match.group(2) + continue + match = self.NEIGHBOR_REGEX.match(line) + if match: + neighbor = neighbors.setdefault(match.group(2), dict()) + neighbor['remote_as'] = int(match.group(3)) + continue + match = self.NEIGHBOR_MULTIHOP_REGEX.match(line) + if match: + neighbor = neighbors.setdefault(match.group(2), dict()) + neighbor["multihop"] = int(match.group(3)) + continue + match = self.NEIGHBOR_EVPN_REGEX.match(line) + if match: + neighbor = neighbors.setdefault(match.group(3), dict()) + neighbor["evpn"] = True + continue + match = self.NETWORK_REGEX.match(line) + if match: + network = match.group(2) + match.group(3) + networks.append(network) + continue + match = self.FAST_EXTERNAL_FALLOVER_REGEX.match(line) + if match: + self._current_config['fast_external_fallover'] = True + continue + match = self.ECMP_BESTPATH_REGEX.match(line) + if match: + self._current_config['ecmp_bestpath'] = True + continue + match = self.MAX_PATHS_REGEX.match(line) + if match: + self._current_config['max_paths'] = int(match.group(3)) + continue + for key, value in iteritems(self.EVPN_COMMANDS_REGEX_MAPPER): + match = value[0].match(line) + if match: + self._current_config[key] = True + break + + def _get_bgp_summary(self): + return get_bgp_summary(self._module) + + def load_current_config(self): + self._current_config = dict() + bgp_config = self._get_bgp_summary() + if bgp_config: + self._set_bgp_config(bgp_config) + + def generate_commands(self): + state = self._required_config['state'] + if state == 'present': + self._generate_bgp_cmds() + else: + self._generate_no_bgp_cmds() + + def _generate_bgp_cmds(self): + vrf = self._required_config.get('vrf') + if vrf is None: + vrf = "default" + + as_number = self._required_config['as_number'] + curr_as_num = self._current_config.get('as_number') + curr_vrf = self._current_config.get("vrf") + bgp_removed = False + if curr_as_num != as_number or vrf != curr_vrf: + if curr_as_num: + self._commands.append('no router bgp %d vrf %s' % (curr_as_num, curr_vrf)) + bgp_removed = True + self._commands.append('router bgp %d vrf %s' % (as_number, vrf)) + self._commands.append('exit') + + req_router_id = self._required_config.get('router_id') + if req_router_id is not None: + curr_route_id = self._current_config.get('router_id') + if bgp_removed or req_router_id != curr_route_id: + self._commands.append('router bgp %d vrf %s router-id %s force' % (as_number, vrf, req_router_id)) + + fast_external_fallover = self._required_config.get('fast_external_fallover') + if fast_external_fallover is not None: + current_fast_external_fallover = self._current_config.get('fast_external_fallover') + if fast_external_fallover and (bgp_removed or fast_external_fallover != current_fast_external_fallover): + self._commands.append('router bgp %d vrf %s bgp fast-external-fallover' % (as_number, vrf)) + elif not fast_external_fallover and (bgp_removed or fast_external_fallover != current_fast_external_fallover): + self._commands.append('router bgp %d vrf %s no bgp fast-external-fallover' % (as_number, vrf)) + + max_paths = self._required_config.get('max_paths') + if max_paths is not None: + current_max_paths = self._current_config.get('max_paths') + if bgp_removed or max_paths != current_max_paths: + self._commands.append('router bgp %d vrf %s maximum-paths %s' % (as_number, vrf, max_paths)) + + ecmp_bestpath = self._required_config.get('ecmp_bestpath') + if ecmp_bestpath is not None: + current_ecmp_bestpath = self._current_config.get('ecmp_bestpath') + if ecmp_bestpath and (bgp_removed or ecmp_bestpath != current_ecmp_bestpath): + self._commands.append('router bgp %d vrf %s bestpath as-path multipath-relax force' % (as_number, vrf)) + elif not ecmp_bestpath and (bgp_removed or ecmp_bestpath != current_ecmp_bestpath): + self._commands.append('router bgp %d vrf %s no bestpath as-path multipath-relax force' % (as_number, vrf)) + + evpn = self._required_config.get('evpn') + if evpn is not None: + self._generate_evpn_cmds(evpn, as_number, vrf) + + self._generate_neighbors_cmds(as_number, vrf, bgp_removed) + self._generate_networks_cmds(as_number, vrf, bgp_removed) + + def _generate_neighbors_cmds(self, as_number, vrf, bgp_removed): + req_neighbors = self._required_config['neighbors'] + curr_neighbors = self._current_config.get('neighbors', {}) + evpn = self._required_config.get('evpn') + if self._purge: + for neighbor in curr_neighbors: + remote_as = curr_neighbors[neighbor].get("remote_as") + self._commands.append('router bgp %s vrf %s no neighbor %s remote-as %s' % ( + as_number, vrf, neighbor, remote_as)) + + if req_neighbors is not None: + for neighbor_data in req_neighbors: + neighbor = neighbor_data.get("neighbor") + curr_neighbor = curr_neighbors.get(neighbor) + remote_as = neighbor_data.get("remote_as") + multihop = neighbor_data.get("multihop") + if bgp_removed or curr_neighbor is None: + if remote_as is not None: + self._commands.append( + 'router bgp %s vrf %s neighbor %s remote-as %s' % (as_number, vrf, neighbor, remote_as)) + if multihop is not None: + self._commands.append( + 'router bgp %s vrf %s neighbor %s ebgp-multihop %s' % (as_number, vrf, neighbor, multihop)) + if evpn: + self._commands.append( + 'router bgp %s vrf %s neighbor %s peer-group evpn' % (as_number, vrf, neighbor)) + elif curr_neighbor is not None: + curr_remote_as = curr_neighbor.get("remote_as") + curr_multihop = curr_neighbor.get("multihop") + curr_neighbor_evpn = curr_neighbor.get("evpn") + if remote_as != curr_remote_as: + self._commands.append( + 'router bgp %s vrf %s neighbor %s remote-as %s' % (as_number, vrf, neighbor, remote_as)) + if multihop is not None and multihop != curr_multihop: + self._commands.append( + 'router bgp %s vrf %s neighbor %s ebgp-multihop %s' % (as_number, vrf, neighbor, multihop)) + if evpn and curr_neighbor_evpn is not True: + self._commands.append( + 'router bgp %s vrf %s neighbor %s peer-group evpn' % (as_number, vrf, neighbor)) + + def _generate_networks_cmds(self, as_number, vrf, bgp_removed): + req_networks = self._required_config['networks'] or [] + curr_networks = self._current_config.get('networks', []) + if not bgp_removed: + for network in curr_networks: + if network not in req_networks: + net_attrs = network.split('/') + if len(net_attrs) != 2: + self._module.fail_json( + msg='Invalid network %s' % network) + + net_address, netmask = net_attrs + cmd = 'router bgp %s no network %s /%s' % ( + as_number, net_address, netmask) + self._commands.append(cmd) + + for network in req_networks: + if bgp_removed or network not in curr_networks: + net_attrs = network.split('/') + if len(net_attrs) != 2: + self._module.fail_json( + msg='Invalid network %s' % network) + net_address, netmask = net_attrs + cmd = 'router bgp %s vrf %s network %s /%s' % ( + as_number, vrf, net_address, netmask) + self._commands.append(cmd) + + def _generate_no_bgp_cmds(self): + as_number = self._required_config['as_number'] + curr_as_num = self._current_config.get('as_number') + if curr_as_num and curr_as_num == as_number: + self._commands.append('no router bgp %d' % as_number) + + def _generate_evpn_cmds(self, evpn, as_number, vrf): + if evpn: + for attr in self.EVPN_ENABLE_ATTRS: + curr_attr = self._current_config.get(attr) + if curr_attr is not True: + self._commands.append(self.EVPN_COMMANDS_REGEX_MAPPER.get(attr)[1] % (as_number, vrf)) + elif not evpn: + for attr in self.EVPN_DISABLE_ATTRS: + curr_attr = self._current_config.get(attr) + if curr_attr is not False: + self._commands.append("no " + self.EVPN_COMMANDS_REGEX_MAPPER.get(attr)[1] % (as_number, vrf)) + + +def main(): + """ main entry point for module execution + """ + OnyxBgpModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_buffer_pool.py b/plugins/modules/network/onyx/onyx_buffer_pool.py new file mode 100644 index 0000000000..52f4d3f788 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_buffer_pool.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_buffer_pool +author: "Anas Badaha (@anasb)" +short_description: Configures Buffer Pool +description: + - This module provides declarative management of Onyx Buffer Pool configuration + on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.8130 +options: + name: + description: + - pool name. + required: true + pool_type: + description: + - pool type. + choices: ['lossless', 'lossy'] + default: lossy + memory_percent: + description: + - memory percent. + switch_priority: + description: + - switch priority, range 1-7. +''' + +EXAMPLES = """ +- name: configure buffer pool + onyx_buffer_pool: + name: roce + pool_type: lossless + memory_percent: 50.00 + switch_priority: 3 + +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - traffic pool roce type lossless + - traffic pool roce memory percent 50.00 + - traffic pool roce map switch-priority 3 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxBufferPoolModule(BaseOnyxModule): + + def init_module(self): + """ initialize module + """ + element_spec = dict( + name=dict(type='str', required=True), + pool_type=dict(choices=['lossless', 'lossy'], default='lossy'), + memory_percent=dict(type='float'), + switch_priority=dict(type='int') + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def validate_switch_priority(self, value): + if value and not 0 <= int(value) <= 7: + self._module.fail_json(msg='switch_priority value must be between 0 and 7') + + def _set_traffic_pool_config(self, traffic_pool_config): + if traffic_pool_config is None: + return + traffic_pool_config = traffic_pool_config.get(self._required_config.get('name')) + self._current_config['pool_type'] = traffic_pool_config[0].get("Type") + self._current_config['switch_priority'] = int(traffic_pool_config[0].get("Switch Priorities")) + self._current_config['memory_percent'] = float(traffic_pool_config[0].get("Memory [%]")) + + def _show_traffic_pool(self): + cmd = "show traffic pool {0}".format(self._required_config.get("name")) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + traffic_pool_config = self._show_traffic_pool() + self._set_traffic_pool_config(traffic_pool_config) + + def generate_commands(self): + name = self._required_config.get("name") + pool_type = self._required_config.get("pool_type") + + if self._current_config is None: + self._add_add_traffic_pool_cmds(name, pool_type) + else: + current_pool_type = self._current_config.get("pool_type") + if pool_type != current_pool_type: + self._add_add_traffic_pool_cmds(name, pool_type) + + memory_percent = self._required_config.get("memory_percent") + if memory_percent is not None: + curr_memory_percent = self._current_config.get("memory_percent") + if curr_memory_percent is None or memory_percent != curr_memory_percent: + self._commands.append('traffic pool {0} memory percent {1}'.format(name, memory_percent)) + + switch_priority = self._required_config.get("switch_priority") + if switch_priority is not None: + curr_switch_priority = self._current_config.get("switch_priority") + if curr_switch_priority is None or switch_priority != curr_switch_priority: + self._commands.append('traffic pool {0} map switch-priority {1}'.format(name, switch_priority)) + + def _add_add_traffic_pool_cmds(self, name, pool_type): + self._commands.append('traffic pool {0} type {1}'.format(name, pool_type)) + + +def main(): + """ main entry point for module execution + """ + OnyxBufferPoolModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_command.py b/plugins/modules/network/onyx/onyx_command.py new file mode 100644 index 0000000000..11aa50c21c --- /dev/null +++ b/plugins/modules/network/onyx/onyx_command.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_command +extends_documentation_fragment: +- community.general.onyx + +author: "Samer Deeb (@samerd)" +short_description: Run commands on remote devices running Mellanox ONYX +description: + - Sends arbitrary commands to an Mellanox ONYX network device and returns + the results read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(onyx_config) to configure Mellanox ONYX devices. +notes: + - Tested on ONYX 3.6.4000 +options: + commands: + description: + - List of commands to send to the remote Mellanox ONYX network device. + The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + onyx_command: + commands: show version + + - name: run show version and check to see if output contains MLNXOS + onyx_command: + commands: show version + wait_for: result[0] contains MLNXOS + + - name: run multiple commands on remote nodes + onyx_command: + commands: + - show version + - show interfaces + + - name: run multiple commands and evaluate the output + onyx_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains MLNXOS + - result[1] contains mgmt1 +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" + +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible.module_utils.six import string_types + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import run_commands + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for item in list(commands): + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + commands.remove(item) + elif item['command'].startswith('conf'): + module.fail_json( + msg='onyx_command does not support running config mode ' + 'commands. Please use onyx_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_config.py b/plugins/modules/network/onyx/onyx_config.py new file mode 100644 index 0000000000..1ccb6d5beb --- /dev/null +++ b/plugins/modules/network/onyx/onyx_config.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_config +extends_documentation_fragment: +- community.general.onyx + +author: "Alex Tabachnik (@atabachnik), Samer Deeb (@samerd)" +short_description: Manage Mellanox ONYX configuration sections +description: + - Mellanox ONYX configurations uses a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with ONYX configuration sections in + a deterministic way. +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct + default: line + choices: ['line', 'block'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + default: no + type: bool + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + default: no + type: bool + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +--- +- onyx_config: + lines: + - snmp-server community + - snmp-server host 10.2.2.2 traps version 2c +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/onyx_config.2016-07-16@22:28:34 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import get_config +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import load_config +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import run_commands + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate = get_candidate(module) + if match != 'none': + contents = module.params['config'] + if not contents: + contents = get_config(module) + config = NetworkConfig(indent=1, contents=contents) + configobjs = candidate.difference(config, path=path, match=match, + replace=replace) + + else: + configobjs = candidate.items + + total_commands = [] + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + total_commands.extend(commands) + result['updates'] = total_commands + + if module.params['save']: + total_commands.append('configuration write') + if total_commands: + result['changed'] = True + if not module.check_mode: + load_config(module, total_commands) + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + config=dict(), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + save=dict(type='bool', default=False), + ) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + if module.params['backup']: + result['__backup__'] = get_config(module) + + run(module, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_facts.py b/plugins/modules/network/onyx/onyx_facts.py new file mode 100644 index 0000000000..e06b4fc4cd --- /dev/null +++ b/plugins/modules/network/onyx/onyx_facts.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_facts +author: "Waleed Mousa (@waleedym), Samer Deeb (@samerd)" +short_description: Collect facts from Mellanox ONYX network devices +description: + - Collects a base set of device facts from a ONYX Mellanox network devices + This module prepends all of the base network fact keys with + C(ansible_net_). The facts module will always collect a base set of + facts from the device and can enable or disable collection of additional + facts. +notes: + - Tested against ONYX 3.6 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, version, module, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: version +''' + +EXAMPLES = """ +--- +- name: Collect all facts from the device + onyx_facts: + gather_subset: all +- name: Collect only the interfaces facts + onyx_facts: + gather_subset: + - interfaces +- name: Do not collect version facts + onyx_facts: + gather_subset: + - "!version" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list +# version +ansible_net_version: + description: A hash of all currently running system image information + returned: when version is configured or when no gather_subset is provided + type: dict +# modules +ansible_net_modules: + description: A hash of all modules on the systeme with status + returned: when modules is configured + type: dict +# interfaces +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxFactsModule(BaseOnyxModule): + + def get_runable_subset(self, gather_subset): + runable_subsets = set() + exclude_subsets = set() + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + self._module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + if not runable_subsets: + runable_subsets.add('version') + return runable_subsets + + def init_module(self): + """ module initialization + """ + argument_spec = dict( + gather_subset=dict(default=['version'], type='list') + ) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def run(self): + self.init_module() + gather_subset = self._module.params['gather_subset'] + runable_subsets = self.get_runable_subset(gather_subset) + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + facter_cls = FACT_SUBSETS[key] + instances.append(facter_cls(self._module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + self._module.exit_json(ansible_facts=ansible_facts) + + +class FactsBase(object): + + COMMANDS = [''] + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def _show_cmd(self, cmd): + return show_cmd(self.module, cmd, json_fmt=True) + + def populate(self): + self.responses = [] + for cmd in self.COMMANDS: + self.responses.append(self._show_cmd(cmd)) + + +class Version(FactsBase): + + COMMANDS = ['show version'] + + def populate(self): + super(Version, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = data + + +class Module(FactsBase): + + COMMANDS = ['show module'] + + def populate(self): + super(Module, self).populate() + data = self.responses[0] + if data: + self.facts['modules'] = data + + +class Interfaces(FactsBase): + + COMMANDS = ['show version', 'show interfaces ethernet'] + + def populate(self): + super(Interfaces, self).populate() + + version_data = self.responses[0] + os_version = version_data['Product release'] + data = self.responses[1] + + if data: + self.facts['interfaces'] = self.populate_interfaces(data, os_version) + + def extractIfData(self, interface_data): + return {"MAC Address": interface_data["Mac address"], + "Actual Speed": interface_data["Actual speed"], + "MTU": interface_data["MTU"], + "Admin State": interface_data["Admin state"], + "Operational State": interface_data["Operational state"]} + + def populate_interfaces(self, interfaces, os_version): + interfaces_dict = dict() + for if_data in interfaces: + if_dict = dict() + if os_version >= BaseOnyxModule.ONYX_API_VERSION: + for if_name, interface_data in iteritems(if_data): + interface_data = interface_data[0] + if_dict = self.extractIfData(interface_data) + if_name = if_dict["Interface Name"] = if_name + + else: + if_dict = self.extractIfData(if_data) + if_name = if_dict["Interface Name"] = if_data["header"] + interfaces_dict[if_name] = if_dict + return interfaces_dict + + +FACT_SUBSETS = dict( + version=Version, + modules=Module, + interfaces=Interfaces +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """ main entry point for module execution + """ + OnyxFactsModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_igmp.py b/plugins/modules/network/onyx/onyx_igmp.py new file mode 100644 index 0000000000..180dd92f67 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_igmp.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_igmp +author: "Samer Deeb (@samerd)" +short_description: Configures IGMP global parameters +description: + - This module provides declarative management of IGMP protocol params + on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.6107 +options: + state: + description: + - IGMP state. + required: true + choices: ['enabled', 'disabled'] + last_member_query_interval: + description: + - Configure the last member query interval, range 1-25 + mrouter_timeout: + description: + - Configure the mrouter timeout, range 60-600 + port_purge_timeout: + description: + - Configure the host port purge timeout, range 130-1225 + proxy_reporting: + description: + - Configure ip igmp snooping proxy and enable reporting mode + choices: ['enabled', 'disabled'] + report_suppression_interval: + description: + - Configure the report suppression interval, range 1-25 + unregistered_multicast: + description: + - Configure the unregistered multicast mode + Flood unregistered multicast + Forward unregistered multicast to mrouter ports + choices: ['flood', 'forward-to-mrouter-ports'] + default_version: + description: + - Configure the default operating version of the IGMP snooping + choices: ['V2','V3'] +''' + +EXAMPLES = """ +- name: configure igmp + onyx_igmp: + state: enabled + unregistered_multicast: flood +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - ip igmp snooping + - ip igmp snooping last-member-query-interval 10 + - ip igmp snooping mrouter-timeout 150 + - ip igmp snooping port-purge-timeout 150 +""" + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxIgmpModule(BaseOnyxModule): + TIME_INTERVAL_REGEX = re.compile(r'^(\d+)\s+seconds') + + _RANGE_INTERVALS = dict( + last_member_query_interval=(1, 25, 'Last member query interval'), + mrouter_timeout=(60, 600, 'Mrouter timeout'), + port_purge_timeout=(130, 1225, 'Port purge timeout'), + report_suppression_interval=(1, 25, 'Report suppression interval'), + ) + + def init_module(self): + """ initialize module + """ + element_spec = dict( + state=dict(choices=['enabled', 'disabled'], required=True), + last_member_query_interval=dict(type='int'), + mrouter_timeout=dict(type='int'), + port_purge_timeout=dict(type='int'), + proxy_reporting=dict(choices=['enabled', 'disabled']), + report_suppression_interval=dict(type='int'), + unregistered_multicast=dict( + choices=['flood', 'forward-to-mrouter-ports']), + default_version=dict(choices=['V2', 'V3']), + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def _validate_key(self, param, key): + interval_params = self._RANGE_VALIDATORS.get(key) + if interval_params: + min_val, max_val = interval_params[0], interval_params[1] + value = param.get(key) + self._validate_range(key, min_val, max_val, value) + else: + super(OnyxIgmpModule, self)._validate_key(param, key) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _set_igmp_config(self, igmp_config): + igmp_config = igmp_config[0] + if not igmp_config: + return + self._current_config['state'] = igmp_config.get( + 'IGMP snooping globally', 'disabled') + self._current_config['proxy_reporting'] = igmp_config.get( + 'Proxy-reporting globally', 'disabled') + self._current_config['default_version'] = igmp_config.get( + 'IGMP default version for new VLAN', 'V3') + self._current_config['unregistered_multicast'] = igmp_config.get( + 'IGMP snooping unregistered multicast', 'flood') + + for interval_name, interval_params in iteritems(self._RANGE_INTERVALS): + display_str = interval_params[2] + value = igmp_config.get(display_str, '') + match = self.TIME_INTERVAL_REGEX.match(value) + if match: + interval_value = int(match.group(1)) + else: + interval_value = None + self._current_config[interval_name] = interval_value + + def _show_igmp(self): + cmd = "show ip igmp snooping" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + igmp_config = self._show_igmp() + if igmp_config: + self._set_igmp_config(igmp_config) + + def generate_commands(self): + state = self._required_config['state'] + if state == 'enabled': + self._generate_igmp_cmds() + else: + self._generate_no_igmp_cmds() + + def _generate_igmp_cmds(self): + curr_state = self._current_config.get('state', 'disabled') + if curr_state == 'disabled': + self._commands.append('ip igmp snooping') + for interval_name in self._RANGE_INTERVALS: + req_val = self._required_config.get(interval_name) + if not req_val: + continue + curr_value = self._current_config.get(interval_name) + if curr_value == req_val: + continue + interval_cmd = interval_name.replace('_', '-') + self._commands.append( + 'ip igmp snooping %s %s' % (interval_cmd, req_val)) + + req_val = self._required_config.get('unregistered_multicast') + if req_val: + curr_value = self._current_config.get( + 'unregistered_multicast', 'flood') + if req_val != curr_value: + self._commands.append( + 'ip igmp snooping unregistered multicast %s' % req_val) + + req_val = self._required_config.get('proxy_reporting') + if req_val: + curr_value = self._current_config.get( + 'proxy_reporting', 'disabled') + if req_val != curr_value: + cmd = 'ip igmp snooping proxy reporting' + if req_val == 'disabled': + cmd = 'no %s' % cmd + self._commands.append(cmd) + + req_val = self._required_config.get('default_version') + if req_val: + curr_value = self._current_config.get( + 'default_version', 'V3') + if req_val != curr_value: + version = req_val[1] # remove the 'V' and take the number only + self._commands.append( + 'ip igmp snooping version %s' % version) + + def _generate_no_igmp_cmds(self): + curr_state = self._current_config.get('state', 'disabled') + if curr_state != 'disabled': + self._commands.append('no ip igmp snooping') + + +def main(): + """ main entry point for module execution + """ + OnyxIgmpModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_igmp_interface.py b/plugins/modules/network/onyx/onyx_igmp_interface.py new file mode 100644 index 0000000000..beafd702ea --- /dev/null +++ b/plugins/modules/network/onyx/onyx_igmp_interface.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_igmp_interface +author: "Anas Badaha (@anasb)" +short_description: Configures IGMP interface parameters +description: + - This module provides declarative management of IGMP interface configuration + on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.8130 +options: + name: + description: + - interface name that we want to configure IGMP on it + required: true + state: + description: + - IGMP Interface state. + choices: ['enabled', 'disabled'] + default: enabled +''' + +EXAMPLES = """ +- name: configure igmp interface + onyx_igmp_interface: + state: enabled + name: Eth1/1 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface ethernet 1/1 ip igmp snooping fast-leave +""" + +import re +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxIgmpInterfaceModule(BaseOnyxModule): + IF_NAME_REGEX = re.compile(r"^(Eth\d+\/\d+|Eth\d+\/\d+\d+)$") + + def init_module(self): + """ initialize module + """ + element_spec = dict( + state=dict(choices=['enabled', 'disabled'], default='enabled'), + name=dict(required=True) + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + match = self.IF_NAME_REGEX.match(self._required_config["name"]) + if not match: + raise AttributeError("Please Insert Valid Interface Name") + + self.validate_param_values(self._required_config) + + def _set_igmp_config(self, igmp_interfaces_config): + if not igmp_interfaces_config: + return + name = self._required_config.get('name') + interface_state = igmp_interfaces_config[name][0].get('leave-mode') + if interface_state == "Fast": + self._current_config['state'] = "enabled" + else: + self._current_config['state'] = "disabled" + + def _show_igmp_interfaces(self): + cmd = "show ip igmp snooping interfaces" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + igmp_interfaces_config = self._show_igmp_interfaces() + if igmp_interfaces_config: + self._set_igmp_config(igmp_interfaces_config) + + def generate_commands(self): + req_state = self._required_config['state'] + self._req_val = self._required_config.get('name').replace("Eth", "ethernet ") + + if req_state == 'enabled': + self._generate_igmp_interface_cmds() + else: + self._generate_no_igmp_cmds() + + def _generate_igmp_interface_cmds(self): + curr_state = self._current_config.get('state', 'enabled') + if curr_state == 'enabled': + pass + + elif curr_state == 'disabled': + self._commands.append('interface %s ip igmp snooping fast-leave' % self._req_val) + + def _generate_no_igmp_cmds(self): + curr_state = self._current_config.get('state', 'enabled') + if curr_state == 'enabled': + self._commands.append('interface %s no ip igmp snooping fast-leave' % self._req_val) + else: + pass + + +def main(): + """ main entry point for module execution + """ + OnyxIgmpInterfaceModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_igmp_vlan.py b/plugins/modules/network/onyx/onyx_igmp_vlan.py new file mode 100644 index 0000000000..55b9cc4b89 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_igmp_vlan.py @@ -0,0 +1,435 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_igmp_vlan +author: Anas Badaha (@anasbadaha) +short_description: Configures IGMP Vlan parameters +description: + - This module provides declarative management of IGMP vlan configuration on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.7.0932-01 +options: + vlan_id: + description: + - VLAN ID, vlan should exist. + required: true + state: + description: + - IGMP state. + choices: ['enabled', 'disabled'] + default: enabled + mrouter: + description: + - Configure ip igmp snooping mrouter port on vlan + suboptions: + state: + description: + - Enable IGMP snooping mrouter on vlan interface. + choices: ['enabled', 'disabled'] + default: enabled + name: + description: + - Configure mrouter interface + required: true + querier: + description: + - Configure the IGMP querier parameters + suboptions: + state: + description: + - Enable IGMP snooping querier on vlan in the switch. + choices: ['enabled', 'disabled'] + default: enabled + interval: + description: + - Update time interval between querier queries, range 60-600 + address: + description: + - Update IP address for the querier + static_groups: + description: + - List of IGMP static groups. + suboptions: + multicast_ip_address: + description: + - Configure static IP multicast group, range 224.0.1.0-239.255.255.25. + required: true + name: + description: + - interface name to configure static groups on it. + sources: + description: + - List of IP sources to be configured + version: + description: + - IGMP snooping operation version on this vlan + choices: ['V2','V3'] +''' + +EXAMPLES = """ +- name: configure igmp vlan + onyx_igmp_vlan: + state: enabled + vlan_id: 10 + version: + V2 + querier: + state: enabled + interval: 70 + address: 10.11.121.13 + mrouter: + state: disabled + name: Eth1/2 + static_groups: + - multicast_ip_address: 224.5.5.8 + name: Eth1/1 + sources: + - 1.1.1.1 + - 1.1.1.2 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - vlan 10 ip igmp snooping + - vlan 10 ip igmp snooping static-group 224.5.5.5 interface ethernet 1/1 +""" +import socket +import struct + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +def _ip_to_int(addr): + return struct.unpack("!I", socket.inet_aton(addr))[0] + + +class OnyxIgmpVlanModule(BaseOnyxModule): + MIN_MULTICAST_IP = _ip_to_int("224.0.1.0") + MAX_MULTICAST_IP = _ip_to_int("239.255.255.255") + + def init_module(self): + """ initialize module + """ + mrouter_spec = dict(name=dict(required=True), + state=dict(choices=['enabled', 'disabled'], default='enabled')) + querier_spec = dict(state=dict(choices=['enabled', 'disabled'], default='enabled'), + interval=dict(type='int'), address=dict()) + static_groups_spec = dict(multicast_ip_address=dict(required=True), + name=dict(required=True), sources=dict(type='list')) + element_spec = dict(vlan_id=dict(type='int', required=True), + state=dict(choices=['enabled', 'disabled'], default='enabled'), + querier=dict(type='dict', options=querier_spec), + static_groups=dict(type='list', elements='dict', options=static_groups_spec), + mrouter=dict(type='dict', options=mrouter_spec), + version=dict(choices=['V2', 'V3'])) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _validate_attr_is_not_none(self, attr_name, attr_value): + if attr_name == 'vlan_id' or attr_name == 'state': + pass + elif attr_value is not None: + self._module.fail_json(msg='Can not set %s value on switch while state is disabled' % attr_name) + + def validate_param_values(self, obj, param=None): + if obj['state'] == 'disabled': + for attr_name in obj: + self._validate_attr_is_not_none(attr_name, obj[attr_name]) + super(OnyxIgmpVlanModule, self).validate_param_values(obj, param) + + def validate_querier(self, value): + interval = value.get('interval') + if interval and not 60 <= int(interval) <= 600: + self._module.fail_json(msg='query-interval value must be between 60 and 600') + + def validate_static_groups(self, value): + multicast_ip = value.get('multicast_ip_address') + multicast_ip = _ip_to_int(multicast_ip) + + if multicast_ip < self.MIN_MULTICAST_IP or multicast_ip > self.MAX_MULTICAST_IP: + self._module.fail_json(msg='multicast IP address must be in range 224.0.1.0 - 239.255.255.255') + + @staticmethod + def _get_curr_mrouter_config(mrouter_port): + if mrouter_port == "none": + return {'state': 'disabled'} + else: + return {'state': 'enabled', + 'name': mrouter_port} + + def _get_curr_querier_config(self, querier_config): + if "Non-Querier" in querier_config: + return {'state': 'disabled'} + elif "Querier" in querier_config: + igmp_querier_config = self._show_igmp_querier_config()[0] + snooping_querier_info = igmp_querier_config["Snooping querier information for VLAN %d" % ( + self._required_config['vlan_id'])] + snooping_querier_info = snooping_querier_info[1] + interval = int(snooping_querier_info["Query interval"]) + address = snooping_querier_info["Configured querier IP address"] + return {'state': 'enabled', + 'interval': interval, + 'address': address} + + @staticmethod + def _get_curr_version(version): + if "V3" in version: + return "V3" + elif "V2" in version: + return "V2" + + def _get_curr_static_group_config(self, multicast_ip_address): + sources = None + names = None + igmp_snooping_groups_config = self._show_igmp_snooping_groups_config(multicast_ip_address) + if igmp_snooping_groups_config is not None: + igmp_snooping_groups_config = igmp_snooping_groups_config[0] + snooping_group_information = igmp_snooping_groups_config.get('Snooping group ' + 'information for VLAN %d and group ' + '%s' % (self._required_config['vlan_id'], + multicast_ip_address)) + if snooping_group_information is not None: + if len(snooping_group_information) == 1: + names = snooping_group_information[0].get('V1/V2 Receiver Ports') + elif len(snooping_group_information) == 2: + sources_dict = dict() + v3_receiver_ports = snooping_group_information[1].get('V3 Receiver Ports') + ports_number = v3_receiver_ports[0].get('Port Number') + sources = v3_receiver_ports[0].get('Include sources') + if isinstance(ports_number, list): + i = 0 + for port_number in ports_number: + sources_dict[port_number] = sources[i] + i += 1 + else: + sources_dict[ports_number] = sources + names = snooping_group_information[0].get('V1/V2 Receiver Ports') + sources = sources_dict + + return {'sources': sources, + 'names': names} + else: + return None + else: + return None + + def _set_igmp_config(self, igmp_vlan_config): + igmp_vlan_config = igmp_vlan_config[0] + if not igmp_vlan_config: + return + + self._current_config['state'] = igmp_vlan_config.get('message 1') + if "enabled" in self._current_config['state']: + self._current_config['state'] = "enabled" + elif "disabled" in self._current_config['state']: + self._current_config['state'] = "disabled" + + mrouter_port = igmp_vlan_config.get('mrouter static port list') + self._current_config['mrouter'] = dict(self._get_curr_mrouter_config(mrouter_port)) + + querier_config = igmp_vlan_config.get('message 3') + self._current_config['querier'] = dict(self._get_curr_querier_config(querier_config)) + + version = igmp_vlan_config.get('message 2') + self._current_config['version'] = self._get_curr_version(version) + + req_static_groups = self._required_config.get('static_groups') + if req_static_groups is not None: + static_groups = self._current_config['static_groups'] = dict() + for static_group in req_static_groups: + static_group_config = self._get_curr_static_group_config(static_group['multicast_ip_address']) + static_groups[static_group['multicast_ip_address']] = static_group_config + + def _show_igmp_vlan(self): + cmd = ("show ip igmp snooping vlan %d" % self._required_config['vlan_id']) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def _show_igmp_querier_config(self): + cmd = ("show ip igmp snooping querier vlan %d " % self._required_config['vlan_id']) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def _show_igmp_snooping_groups_config(self, multicast_ip_address): + cmd = ("show ip igmp snooping groups vlan %d group %s" % (self._required_config['vlan_id'], + multicast_ip_address)) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + igmp_vlan_config = self._show_igmp_vlan() + if igmp_vlan_config: + self._set_igmp_config(igmp_vlan_config) + + def generate_commands(self): + req_state = self._required_config.get('state', 'enabled') + self._generate_igmp_vlan_cmds(req_state) + + _mrouter = self._required_config.get('mrouter') + if _mrouter is not None: + self._generate_igmp_mrouter_cmds(_mrouter) + + _querier = self._required_config.get('querier') + if _querier is not None: + req_querier_state = _querier.get('state', 'enabled') + self._generate_igmp_querier_cmds(req_querier_state) + + req_querier_interval = _querier.get('interval') + if req_querier_interval is not None: + self._gen_querier_attr_commands("interval", req_querier_interval, "query-interval") + + req_querier_address = _querier.get('address') + if req_querier_address is not None: + self._gen_querier_attr_commands("address", req_querier_address, "address") + + _version = self._required_config.get('version') + if _version is not None: + self._generate_igmp_version_cmds(_version) + + _static_groups = self._required_config.get('static_groups') + if _static_groups is not None: + for static_group in _static_groups: + self._generate_igmp_static_groups_cmd(static_group) + + def _add_igmp_vlan_commands(self, req_state): + if req_state == 'enabled': + igmp_vlan_cmd = 'vlan %d ip igmp snooping' % self._required_config['vlan_id'] + else: + igmp_vlan_cmd = 'vlan %d no ip igmp snooping' % self._required_config['vlan_id'] + + self._commands.append(igmp_vlan_cmd) + + def _generate_igmp_vlan_cmds(self, req_state): + curr_state = self._current_config.get('state') + if curr_state != req_state: + self._add_igmp_vlan_commands(req_state) + + def _gen_querier_attr_commands(self, attr_name, req_attr_value, attr_cmd_name): + _curr_querier = self._current_config.get('querier') + curr_querier_val = _curr_querier.get(attr_name) + if req_attr_value != curr_querier_val: + self._commands.append('vlan %d ip igmp snooping querier %s %s' % (self._required_config['vlan_id'], + attr_cmd_name, req_attr_value)) + + def _add_querier_commands(self, req_querier_state): + if req_querier_state == 'enabled': + self._commands.append('vlan %d ip igmp snooping querier' % self._required_config['vlan_id']) + elif req_querier_state == 'disabled': + self._commands.append('vlan %d no ip igmp snooping querier' % ( + self._required_config['vlan_id'])) + + def _generate_igmp_querier_cmds(self, req_querier_state): + _curr_querier = self._current_config.get('querier') + curr_querier_state = _curr_querier.get('state') + if req_querier_state != curr_querier_state: + self._add_querier_commands(req_querier_state) + + def _generate_igmp_version_cmds(self, version): + _curr_version = self._current_config.get('version') + if version != _curr_version: + self._commands.append('vlan %d ip igmp snooping version %s' % ( + self._required_config['vlan_id'], version[1])) + + def _add_mrouter_commands(self, req_mrouter, curr_mrouter): + curr_state = curr_mrouter.get('state') + curr_interface = curr_mrouter.get('name') + req_state = req_mrouter.get('state') + req_interface = req_mrouter.get('name') + mrouter_interface = req_interface.replace("Eth", "ethernet ") + if curr_state == 'enabled' and req_state == 'disabled': + self._commands.append('vlan %d no ip igmp snooping mrouter interface ' + '%s' % (self._required_config['vlan_id'], mrouter_interface)) + elif curr_state == 'disabled' and req_state == 'enabled': + self._commands.append('vlan %d ip igmp snooping mrouter interface ' + '%s' % (self._required_config['vlan_id'], mrouter_interface)) + elif req_state == 'enabled' and curr_state == 'enabled' and req_interface != curr_interface: + self._commands.append('vlan %d ip igmp snooping mrouter interface ' + '%s' % (self._required_config['vlan_id'], mrouter_interface)) + + def _generate_igmp_mrouter_cmds(self, req_mrouter): + curr_mrouter = self._current_config.get('mrouter') + if curr_mrouter != req_mrouter: + self._add_mrouter_commands(req_mrouter, curr_mrouter) + + def _add_igmp_static_groups_cmd(self, req_name, req_multicast_ip_address, curr_names): + if curr_names is None: + self._commands.append('vlan %d ip igmp snooping static-group %s interface %s' % ( + self._required_config['vlan_id'], req_multicast_ip_address, req_name.replace('Eth', 'ethernet '))) + elif req_name.replace('E', 'e') not in curr_names: + self._commands.append('vlan %d ip igmp snooping static-group %s interface %s' % ( + self._required_config['vlan_id'], req_multicast_ip_address, req_name.replace('Eth', 'ethernet '))) + + def _add_igmp_static_groups_sources_cmd(self, req_sources, req_name, req_multicast_ip_address, curr_sources): + if curr_sources is None: + for source in req_sources: + self._commands.append('vlan %d ip igmp snooping static-group %s interface %s source %s' % ( + self._required_config['vlan_id'], req_multicast_ip_address, req_name.replace('Eth', 'ethernet '), + source)) + else: + curr_sources = curr_sources.get(req_name.replace('E', 'e')) + if curr_sources is None: + curr_sources = set([]) + else: + curr_sources = set(x.strip() for x in curr_sources.split(',')) + sources_to_add = set(req_sources) - set(curr_sources) + sources_to_remove = set(curr_sources) - set(req_sources) + if len(sources_to_add) != 0: + for source in sources_to_add: + self._commands.append('vlan %d ip igmp snooping static-group %s interface %s source %s' % ( + self._required_config['vlan_id'], req_multicast_ip_address, + req_name.replace('Eth', 'ethernet '), source)) + if len(sources_to_remove) != 0: + for source in sources_to_remove: + self._commands.append('vlan %d no ip igmp snooping static-group %s interface %s source %s' % ( + self._required_config['vlan_id'], req_multicast_ip_address, + req_name.replace('Eth', 'ethernet '), + source)) + + def _generate_igmp_static_groups_cmd(self, static_group): + req_multicast_ip_address = static_group.get('multicast_ip_address') + req_name = static_group.get('name') + req_sources = static_group.get('sources') + curr_static_groups = self._current_config.get('static_groups') + curr_static_group = curr_static_groups.get(req_multicast_ip_address) + curr_names = None + curr_sources = None + if curr_static_group is not None: + curr_names = curr_static_group.get('names') + curr_sources = curr_static_group.get('sources') + + self._add_igmp_static_groups_cmd(req_name, req_multicast_ip_address, curr_names) + if req_sources is not None: + self._add_igmp_static_groups_sources_cmd(req_sources, req_name, req_multicast_ip_address, curr_sources) + + +def main(): + """ main entry point for module execution + """ + OnyxIgmpVlanModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_interface.py b/plugins/modules/network/onyx/onyx_interface.py new file mode 100644 index 0000000000..e0ef8f3d9a --- /dev/null +++ b/plugins/modules/network/onyx/onyx_interface.py @@ -0,0 +1,501 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_interface +author: "Samer Deeb (@samerd)" +short_description: Manage Interfaces on Mellanox ONYX network devices +description: + - This module provides declarative management of Interfaces + on Mellanox ONYX network devices. +notes: +options: + name: + description: + - Name of the Interface. + required: true + description: + description: + - Description of Interface. + enabled: + description: + - Interface link status. + type: bool + speed: + description: + - Interface link speed. + choices: ['1G', '10G', '25G', '40G', '50G', '56G', '100G'] + mtu: + description: + - Maximum size of transmit packet. + aggregate: + description: List of Interfaces definitions. + duplex: + description: + - Interface link status + default: auto + choices: ['full', 'half', 'auto'] + tx_rate: + description: + - Transmit rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) + rx_rate: + description: + - Receiver rate in bits per second (bps). + - This is state check parameter only. + - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) + delay: + description: + - Time in seconds to wait before checking for the operational state on + remote device. This wait is applicable for operational state argument + which are I(state) with values C(up)/C(down). + default: 10 + purge: + description: + - Purge Interfaces not defined in the aggregate parameter. + This applies only for logical interface. + default: false + type: bool + state: + description: + - State of the Interface configuration, C(up) means present and + operationally up and C(down) means present and operationally C(down) + default: present + choices: ['present', 'absent', 'up', 'down'] +''' + +EXAMPLES = """ +- name: configure interface + onyx_interface: + name: Eth1/2 + description: test-interface + speed: 100G + mtu: 512 + +- name: make interface up + onyx_interface: + name: Eth1/2 + enabled: True + +- name: make interface down + onyx_interface: + name: Eth1/2 + enabled: False + +- name: Check intent arguments + onyx_interface: + name: Eth1/2 + state: up + +- name: Config + intent + onyx_interface: + name: Eth1/2 + enabled: False + state: down +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface ethernet 1/2 + - description test-interface + - mtu 512 + - exit +""" + +from copy import deepcopy +import re +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import conditional +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import get_interfaces_config + + +class OnyxInterfaceModule(BaseOnyxModule): + IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|\d+\/\d+\/\d+)$") + IF_VLAN_REGEX = re.compile(r"^Vlan (\d+)$") + IF_LOOPBACK_REGEX = re.compile(r"^Loopback (\d+)$") + IF_PO_REGEX = re.compile(r"^Po(\d+)$") + + IF_TYPE_ETH = "ethernet" + IF_TYPE_LOOPBACK = "loopback" + IF_TYPE_VLAN = "vlan" + IF_TYPE_PO = "port-channel" + + IF_TYPE_MAP = { + IF_TYPE_ETH: IF_ETH_REGEX, + IF_TYPE_VLAN: IF_VLAN_REGEX, + IF_TYPE_LOOPBACK: IF_LOOPBACK_REGEX, + IF_TYPE_PO: IF_PO_REGEX + } + UNSUPPORTED_ATTRS = { + IF_TYPE_ETH: (), + IF_TYPE_VLAN: ('speed', 'rx_rate', 'tx_rate'), + IF_TYPE_LOOPBACK: ('speed', 'mtu', 'rx_rate', 'tx_rate'), + IF_TYPE_PO: ('speed', 'rx_rate', 'tx_rate'), + } + UNSUPPORTED_STATES = { + IF_TYPE_ETH: ('absent',), + IF_TYPE_VLAN: (), + IF_TYPE_LOOPBACK: ('up', 'down'), + IF_TYPE_PO: ('absent'), + } + + IF_MODIFIABLE_ATTRS = ('speed', 'description', 'mtu') + _interface_type = None + + @classmethod + def _get_element_spec(cls): + return dict( + name=dict(type='str'), + description=dict(), + speed=dict(choices=['1G', '10G', '25G', '40G', '50G', '56G', '100G']), + mtu=dict(type='int'), + enabled=dict(type='bool'), + delay=dict(default=10, type='int'), + state=dict(default='present', + choices=['present', 'absent', 'up', 'down']), + tx_rate=dict(), + rx_rate=dict(), + ) + + @classmethod + def _get_aggregate_spec(cls, element_spec): + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + return aggregate_spec + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + aggregate_spec = self._get_aggregate_spec(element_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec), + purge=dict(default=False, type='bool'), + ) + argument_spec.update(element_spec) + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + self._module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def validate_purge(self, value): + if value: + self._module.fail_json( + msg='Purge is not supported!') + + def validate_duplex(self, value): + if value != 'auto': + self._module.fail_json( + msg='Duplex is not supported!') + + def _get_interface_type(self, if_name): + if_type = None + if_id = None + for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP): + match = interface_regex.match(if_name) + if match: + if_type = interface_type + if_id = match.group(1) + break + return if_type, if_id + + def _set_if_type(self, params): + if_name = params['name'] + if_type, if_id = self._get_interface_type(if_name) + if not if_id: + self._module.fail_json( + msg='unsupported interface: %s' % if_name) + params['if_type'] = if_type + params['if_id'] = if_id + + def _check_supported_attrs(self, if_obj): + unsupported_attrs = self.UNSUPPORTED_ATTRS[self._interface_type] + for attr in unsupported_attrs: + val = if_obj[attr] + if val is not None: + self._module.fail_json( + msg='attribute %s is not supported for %s interface' % ( + attr, self._interface_type)) + req_state = if_obj['state'] + unsupported_states = self.UNSUPPORTED_STATES[self._interface_type] + if req_state in unsupported_states: + self._module.fail_json( + msg='%s state is not supported for %s interface' % ( + req_state, self._interface_type)) + + def _validate_interface_type(self): + for if_obj in self._required_config: + if_type = if_obj['if_type'] + if not self._interface_type: + self._interface_type = if_type + elif self._interface_type != if_type: + self._module.fail_json( + msg='Cannot aggregate interfaces from different types') + self._check_supported_attrs(if_obj) + + def get_required_config(self): + self._required_config = list() + module_params = self._module.params + aggregate = module_params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module_params[key] + + self.validate_param_values(item, item) + req_item = item.copy() + self._set_if_type(req_item) + self._required_config.append(req_item) + else: + params = { + 'name': module_params['name'], + 'description': module_params['description'], + 'speed': module_params['speed'], + 'mtu': module_params['mtu'], + 'state': module_params['state'], + 'delay': module_params['delay'], + 'enabled': module_params['enabled'], + 'tx_rate': module_params['tx_rate'], + 'rx_rate': module_params['rx_rate'], + } + + self.validate_param_values(params) + self._set_if_type(params) + self._required_config.append(params) + self._validate_interface_type() + + @classmethod + def get_if_name(cls, item): + return cls.get_config_attr(item, "header") + + @classmethod + def get_admin_state(cls, item): + admin_state = cls.get_config_attr(item, "Admin state") + return str(admin_state).lower() == "enabled" + + @classmethod + def get_oper_state(cls, item): + oper_state = cls.get_config_attr(item, "Operational state") + if not oper_state: + oper_state = cls.get_config_attr(item, "State") + return str(oper_state).lower() + + @classmethod + def get_speed(cls, item): + speed = cls.get_config_attr(item, 'Actual speed') + if not speed: + return + try: + speed = int(speed.split()[0]) + return "%dG" % speed + except ValueError: + return None + + def _create_if_data(self, name, item): + regex = self.IF_TYPE_MAP[self._interface_type] + if_id = '' + match = regex.match(name) + if match: + if_id = match.group(1) + return dict( + name=name, + description=self.get_config_attr(item, 'Description'), + speed=self.get_speed(item), + mtu=self.get_mtu(item), + enabled=self.get_admin_state(item), + state=self.get_oper_state(item), + if_id=if_id) + + def _get_interfaces_config(self): + return get_interfaces_config(self._module, self._interface_type) + + def load_current_config(self): + self._os_version = self._get_os_version() + self._current_config = dict() + config = self._get_interfaces_config() + if not config: + return + if self._os_version < self.ONYX_API_VERSION: + for if_data in config: + if_name = self.get_if_name(if_data) + self._current_config[if_name] = self._create_if_data( + if_name, if_data) + else: + if_data = dict() + for if_config in config: + for if_name, if_attr in iteritems(if_config): + for config in if_attr: + for key, value in iteritems(config): + if_data[key] = value + self._current_config[if_name] = self._create_if_data( + if_name, if_data) + + def _generate_no_if_commands(self, req_if, curr_if): + if self._interface_type == self.IF_TYPE_ETH: + name = req_if['name'] + self._module.fail_json( + msg='cannot remove ethernet interface %s' % name) + if not curr_if: + return + if_id = req_if['if_id'] + if not if_id: + return + self._commands.append( + 'no interface %s %s' % (self._interface_type, if_id)) + + def _add_commands_to_interface(self, req_if, cmd_list): + if not cmd_list: + return + if_id = req_if['if_id'] + if not if_id: + return + self._commands.append( + 'interface %s %s' % (self._interface_type, if_id)) + self._commands.extend(cmd_list) + self._commands.append('exit') + + def _generate_if_commands(self, req_if, curr_if): + enabled = req_if['enabled'] + cmd_list = [] + for attr_name in self.IF_MODIFIABLE_ATTRS: + candidate = req_if.get(attr_name) + running = curr_if.get(attr_name) + if candidate != running: + if candidate: + cmd = attr_name + ' ' + str(candidate) + if self._interface_type == self.IF_TYPE_ETH and \ + attr_name in ('mtu', 'speed'): + cmd = cmd + ' ' + 'force' + cmd_list.append(cmd) + curr_enabled = curr_if.get('enabled', False) + if enabled is not None and enabled != curr_enabled: + cmd = 'shutdown' + if enabled: + cmd = "no %s" % cmd + cmd_list.append(cmd) + if cmd_list: + self._add_commands_to_interface(req_if, cmd_list) + + def generate_commands(self): + for req_if in self._required_config: + name = req_if['name'] + curr_if = self._current_config.get(name, {}) + if not curr_if and self._interface_type == self.IF_TYPE_ETH: + self._module.fail_json( + msg='could not find ethernet interface %s' % name) + continue + req_state = req_if['state'] + if req_state == 'absent': + self._generate_no_if_commands(req_if, curr_if) + else: + self._generate_if_commands(req_if, curr_if) + + def _get_interfaces_rates(self): + return get_interfaces_config(self._module, self._interface_type, + "rates") + + def _get_interfaces_status(self): + return get_interfaces_config(self._module, self._interface_type, + "status") + + def _check_state(self, name, want_state, statuses): + curr_if = statuses.get(name, {}) + if curr_if: + curr_if = curr_if[0] + curr_state = self.get_oper_state(curr_if).strip() + if curr_state is None or not conditional(want_state, curr_state): + return 'state eq(%s)' % want_state + + def check_declarative_intent_params(self, result): + failed_conditions = [] + delay_called = False + rates = None + statuses = None + for req_if in self._required_config: + want_state = req_if.get('state') + want_tx_rate = req_if.get('tx_rate') + want_rx_rate = req_if.get('rx_rate') + name = req_if['name'] + if want_state not in ('up', 'down') and not want_tx_rate and not \ + want_rx_rate: + continue + if not delay_called and result['changed']: + delay_called = True + delay = req_if['delay'] + if delay > 0: + sleep(delay) + if want_state in ('up', 'down'): + if statuses is None: + statuses = self._get_interfaces_status() or {} + cond = self._check_state(name, want_state, statuses) + if cond: + failed_conditions.append(cond) + if_rates = None + if want_tx_rate or want_rx_rate: + if not rates: + rates = self._get_interfaces_rates() + if_rates = rates.get(name) + if if_rates: + if_rates = if_rates[0] + if want_tx_rate: + have_tx_rate = None + if if_rates: + have_tx_rate = if_rates.get('egress rate') + if have_tx_rate: + have_tx_rate = have_tx_rate.split()[0] + if have_tx_rate is None or not \ + conditional(want_tx_rate, have_tx_rate.strip(), + cast=int): + failed_conditions.append('tx_rate ' + want_tx_rate) + + if want_rx_rate: + have_rx_rate = None + if if_rates: + have_rx_rate = if_rates.get('ingress rate') + if have_rx_rate: + have_rx_rate = have_rx_rate.split()[0] + if have_rx_rate is None or not \ + conditional(want_rx_rate, have_rx_rate.strip(), + cast=int): + failed_conditions.append('rx_rate ' + want_rx_rate) + + return failed_conditions + + +def main(): + """ main entry point for module execution + """ + OnyxInterfaceModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_l2_interface.py b/plugins/modules/network/onyx/onyx_l2_interface.py new file mode 100644 index 0000000000..b2102de220 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_l2_interface.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_l2_interface +author: "Samer Deeb (@samerd)" +short_description: Manage Layer-2 interface on Mellanox ONYX network devices +description: + - This module provides declarative management of Layer-2 interface + on Mellanox ONYX network devices. +options: + name: + description: + - Name of the interface. + aggregate: + description: + - List of Layer-2 interface definitions. + mode: + description: + - Mode in which interface needs to be configured. + default: access + choices: ['access', 'trunk', 'hybrid'] + access_vlan: + description: + - Configure given VLAN in access port. + trunk_allowed_vlans: + description: + - List of allowed VLANs in a given trunk port. + state: + description: + - State of the Layer-2 Interface configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: configure Layer-2 interface + onyx_l2_interface: + name: Eth1/1 + mode: access + access_vlan: 30 +- name: remove Layer-2 interface configuration + onyx_l2_interface: + name: Eth1/1 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - interface ethernet 1/1 + - switchport mode access + - switchport access vlan 30 +""" +from copy import deepcopy +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import get_interfaces_config + + +class OnyxL2InterfaceModule(BaseOnyxModule): + IFNAME_REGEX = re.compile(r"^.*(Eth\d+\/\d+|Mpo\d+|Po\d+)") + + @classmethod + def _get_element_spec(cls): + return dict( + name=dict(), + access_vlan=dict(type='int'), + trunk_allowed_vlans=dict(type='list', elements='int'), + state=dict(default='present', + choices=['present', 'absent']), + mode=dict(default='access', + choices=['access', 'hybrid', 'trunk']), + ) + + @classmethod + def _get_aggregate_spec(cls, element_spec): + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + return aggregate_spec + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + aggregate_spec = self._get_aggregate_spec(element_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec), + ) + argument_spec.update(element_spec) + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + self._module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def get_required_config(self): + self._required_config = list() + module_params = self._module.params + aggregate = module_params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module_params[key] + self.validate_param_values(item, item) + req_item = item.copy() + self._required_config.append(req_item) + else: + params = { + 'name': module_params['name'], + 'access_vlan': module_params['access_vlan'], + 'trunk_allowed_vlans': module_params['trunk_allowed_vlans'], + 'mode': module_params['mode'], + 'state': module_params['state'], + } + self.validate_param_values(params) + self._required_config.append(params) + + def validate_access_vlan(self, value): + if value and not 1 <= int(value) <= 4094: + self._module.fail_json(msg='vlan id must be between 1 and 4094') + + @classmethod + def get_allowed_vlans(cls, if_data): + allowed_vlans = cls.get_config_attr(if_data, 'Allowed vlans') + interface_allwoed_vlans = [] + if allowed_vlans: + vlans = [x.strip() for x in allowed_vlans.split(',')] + for vlan in vlans: + if '-' not in vlan: + interface_allwoed_vlans.append(int(vlan)) + else: + vlan_range = vlan.split("-") + min_number = int(vlan_range[0].strip()) + max_number = int(vlan_range[1].strip()) + vlan_list = range(min_number, max_number + 1) + interface_allwoed_vlans.extend(vlan_list) + return interface_allwoed_vlans + + @classmethod + def get_access_vlan(cls, if_data): + access_vlan = cls.get_config_attr(if_data, 'Access vlan') + if access_vlan: + try: + return int(access_vlan) + except ValueError: + return None + + def _create_switchport_data(self, if_name, if_data): + if self._os_version >= self.ONYX_API_VERSION: + if_data = if_data[0] + + return { + 'name': if_name, + 'mode': self.get_config_attr(if_data, 'Mode'), + 'access_vlan': self.get_access_vlan(if_data), + 'trunk_allowed_vlans': self.get_allowed_vlans(if_data) + } + + def _get_switchport_config(self): + return get_interfaces_config(self._module, 'switchport') + + def load_current_config(self): + # called in base class in run function + self._os_version = self._get_os_version() + self._current_config = dict() + switchports_config = self._get_switchport_config() + if not switchports_config: + return + for if_name, if_data in iteritems(switchports_config): + self._current_config[if_name] = \ + self._create_switchport_data(if_name, if_data) + + def _get_switchport_command_name(self, if_name): + if if_name.startswith('Eth'): + return if_name.replace("Eth", "ethernet ") + if if_name.startswith('Po'): + return if_name.replace("Po", "port-channel ") + if if_name.startswith('Mpo'): + return if_name.replace("Mpo", "mlag-port-channel ") + self._module.fail_json( + msg='invalid interface name: %s' % if_name) + + def _add_interface_commands(self, if_name, commands): + if_cmd_name = self._get_switchport_command_name(if_name) + self._commands.append("interface %s" % if_cmd_name) + self._commands.extend(commands) + self._commands.append('exit') + + def _generate_no_switchport_commands(self, if_name): + commands = ['no switchport force'] + self._add_interface_commands(if_name, commands) + + def _generate_switchport_commands(self, if_name, req_conf): + commands = [] + curr_conf = self._current_config.get(if_name, {}) + curr_mode = curr_conf.get('mode') + req_mode = req_conf.get('mode') + if req_mode != curr_mode: + commands.append('switchport mode %s' % req_mode) + curr_access_vlan = curr_conf.get('access_vlan') + req_access_vlan = req_conf.get('access_vlan') + if curr_access_vlan != req_access_vlan and req_access_vlan: + commands.append('switchport access vlan %s' % req_access_vlan) + curr_trunk_vlans = curr_conf.get('trunk_allowed_vlans') or set() + if curr_trunk_vlans: + curr_trunk_vlans = set(curr_trunk_vlans) + req_trunk_vlans = req_conf.get('trunk_allowed_vlans') or set() + if req_trunk_vlans: + req_trunk_vlans = set(req_trunk_vlans) + if req_mode != 'access' and curr_trunk_vlans != req_trunk_vlans: + added_vlans = req_trunk_vlans - curr_trunk_vlans + for vlan_id in added_vlans: + commands.append('switchport %s allowed-vlan add %s' % + (req_mode, vlan_id)) + removed_vlans = curr_trunk_vlans - req_trunk_vlans + for vlan_id in removed_vlans: + commands.append('switchport %s allowed-vlan remove %s' % + (req_mode, vlan_id)) + + if commands: + self._add_interface_commands(if_name, commands) + + def generate_commands(self): + for req_conf in self._required_config: + state = req_conf['state'] + if_name = req_conf['name'] + if state == 'absent': + if if_name in self._current_config: + self._generate_no_switchport_commands(if_name) + else: + self._generate_switchport_commands(if_name, req_conf) + + def _generate_vlan_commands(self, vlan_id, req_conf): + curr_vlan = self._current_config.get(vlan_id, {}) + if not curr_vlan: + cmd = "vlan " + vlan_id + self._commands.append("vlan %s" % vlan_id) + self._commands.append("exit") + vlan_name = req_conf['vlan_name'] + if vlan_name: + if vlan_name != curr_vlan.get('vlan_name'): + self._commands.append("vlan %s name %s" % (vlan_id, vlan_name)) + curr_members = set(curr_vlan.get('interfaces', [])) + req_members = req_conf['interfaces'] + mode = req_conf['mode'] + for member in req_members: + if member in curr_members: + continue + if_name = self.get_switchport_command_name(member) + cmd = "interface %s switchport mode %s" % (if_name, mode) + self._commands.append(cmd) + cmd = "interface %s switchport %s allowed-vlan add %s" % ( + if_name, mode, vlan_id) + self._commands.append(cmd) + req_members = set(req_members) + for member in curr_members: + if member in req_members: + continue + if_name = self.get_switchport_command_name(member) + cmd = "interface %s switchport %s allowed-vlan remove %s" % ( + if_name, mode, vlan_id) + self._commands.append(cmd) + + +def main(): + """ main entry point for module execution + """ + OnyxL2InterfaceModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_l3_interface.py b/plugins/modules/network/onyx/onyx_l3_interface.py new file mode 100644 index 0000000000..437ef236eb --- /dev/null +++ b/plugins/modules/network/onyx/onyx_l3_interface.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_l3_interface +author: "Samer Deeb (@samerd)" +short_description: Manage L3 interfaces on Mellanox ONYX network devices +description: + - This module provides declarative management of L3 interfaces + on Mellanox ONYX network devices. +options: + name: + description: + - Name of the L3 interface. + ipv4: + description: + - IPv4 of the L3 interface. + ipv6: + description: + - IPv6 of the L3 interface (not supported for now). + aggregate: + description: List of L3 interfaces definitions + purge: + description: + - Purge L3 interfaces not defined in the I(aggregate) parameter. + default: false + type: bool + state: + description: + - State of the L3 interface configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Set Eth1/1 IPv4 address + onyx_l3_interface: + name: Eth1/1 + ipv4: 192.168.0.1/24 + +- name: Remove Eth1/1 IPv4 address + onyx_l3_interface: + name: Eth1/1 + state: absent + +- name: Set IP addresses on aggregate + onyx_l3_interface: + aggregate: + - { name: Eth1/1, ipv4: 192.168.2.10/24 } + - { name: Eth1/2, ipv4: 192.168.3.10/24 } + +- name: Remove IP addresses on aggregate + onyx_l3_interface: + aggregate: + - { name: Eth1/1, ipv4: 192.168.2.10/24 } + - { name: Eth1/2, ipv4: 192.168.3.10/24 } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - interfaces ethernet 1/1 ip address 192.168.0.1 /24 +""" +import re +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import get_interfaces_config + + +class OnyxL3InterfaceModule(BaseOnyxModule): + IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$") + IF_VLAN_REGEX = re.compile(r"^Vlan (\d+)$") + IF_LOOPBACK_REGEX = re.compile(r"^Loopback (\d+)$") + + IF_TYPE_ETH = "ethernet" + IF_TYPE_LOOPBACK = "loopback" + IF_TYPE_VLAN = "vlan" + + IF_TYPE_MAP = { + IF_TYPE_ETH: IF_ETH_REGEX, + IF_TYPE_VLAN: IF_VLAN_REGEX, + IF_TYPE_LOOPBACK: IF_LOOPBACK_REGEX, + } + + IP_ADDR_ATTR_MAP = { + IF_TYPE_ETH: 'IP Address', + IF_TYPE_VLAN: 'Internet Address', + IF_TYPE_LOOPBACK: 'Internet Address', + } + + _purge = False + + @classmethod + def _get_element_spec(cls): + return dict( + name=dict(type='str'), + ipv4=dict(type='str'), + ipv6=dict(type='str'), + state=dict(default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + ) + + @classmethod + def _get_aggregate_spec(cls, element_spec): + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + return aggregate_spec + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + aggregate_spec = self._get_aggregate_spec(element_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec), + purge=dict(default=False, type='bool'), + ) + argument_spec.update(element_spec) + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + self._module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def _get_interface_type(self, if_name): + if_type = None + if_id = None + for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP): + match = interface_regex.match(if_name) + if match: + if_type = interface_type + if_id = match.group(1) + break + return if_type, if_id + + def _set_if_type(self, params): + if_name = params['name'] + if_type, if_id = self._get_interface_type(if_name) + if not if_id: + self._module.fail_json( + msg='unsupported interface: %s' % if_name) + params['if_type'] = if_type + params['if_id'] = if_id + + def get_required_config(self): + self._required_config = list() + module_params = self._module.params + aggregate = module_params.get('aggregate') + self._purge = module_params.get('purge', False) + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module_params[key] + self.validate_param_values(item, item) + req_item = item.copy() + self._set_if_type(req_item) + self._required_config.append(req_item) + else: + params = { + 'name': module_params['name'], + 'ipv4': module_params['ipv4'], + 'ipv6': module_params['ipv6'], + 'state': module_params['state'], + } + self.validate_param_values(params) + self._set_if_type(params) + self._required_config.append(params) + + def _get_interfaces_config(self, interface_type): + return get_interfaces_config(self._module, interface_type) + + def _parse_interfaces_config(self, if_type, if_config): + if self._os_version < self.ONYX_API_VERSION: + for if_data in if_config: + if_name = self.get_config_attr(if_data, 'header') + self._get_if_attributes(if_type, if_name, if_data) + else: + for if_config_item in if_config: + for if_name, if_data in iteritems(if_config_item): + if_data = if_data[0] + self._get_if_attributes(if_type, if_name, if_data) + + def _get_if_attributes(self, if_type, if_name, if_data): + ipaddr_attr = self.IP_ADDR_ATTR_MAP[if_type] + regex = self.IF_TYPE_MAP[if_type] + match = regex.match(if_name) + if not match: + return + ipv4 = self.get_config_attr(if_data, ipaddr_attr) + if ipv4: + ipv4 = ipv4.replace(' ', '') + ipv6 = self.get_config_attr(if_data, 'IPv6 address(es)') + if ipv6: + ipv6 = ipv6.replace('[primary]', '') + ipv6 = ipv6.strip() + if_id = match.group(1) + switchport = self.get_config_attr(if_data, 'Switchport mode') + if_obj = { + 'name': if_name, + 'if_id': if_id, + 'if_type': if_type, + 'ipv4': ipv4, + 'ipv6': ipv6, + 'switchport': switchport, + } + self._current_config[if_name] = if_obj + + def load_current_config(self): + # called in base class in run function + self._os_version = self._get_os_version() + self._current_config = dict() + if_types = set([if_obj['if_type'] for if_obj in self._required_config]) + for if_type in if_types: + if_config = self._get_interfaces_config(if_type) + if not if_config: + continue + self._parse_interfaces_config(if_type, if_config) + + def _generate_no_ip_commands(self, req_conf, curr_conf): + curr_ip = curr_conf.get('ipv4') + if_type = req_conf['if_type'] + if_id = req_conf['if_id'] + if curr_ip: + cmd = "interface %s %s no ip address" % (if_type, if_id) + self._commands.append(cmd) + curr_ipv6 = curr_conf.get('ipv6') + if curr_ipv6: + cmd = "interface %s %s no ipv6 address %s" % ( + if_type, if_id, curr_ipv6) + self._commands.append(cmd) + + def _generate_ip_commands(self, req_conf, curr_conf): + curr_ipv4 = curr_conf.get('ipv4') + req_ipv4 = req_conf.get('ipv4') + curr_ipv6 = curr_conf.get('ipv6') + req_ipv6 = req_conf.get('ipv6') + if_type = req_conf['if_type'] + if_id = req_conf['if_id'] + switchport = curr_conf.get('switchport') + if switchport: + cmd = "interface %s %s no switchport force" % (if_type, if_id) + self._commands.append(cmd) + if curr_ipv4 != req_ipv4: + cmd = "interface %s %s ip address %s" % (if_type, if_id, req_ipv4) + self._commands.append(cmd) + if curr_ipv6 != req_ipv6: + cmd = "interface %s %s ipv6 address %s" % ( + if_type, if_id, req_ipv6) + self._commands.append(cmd) + + def generate_commands(self): + req_interfaces = set() + for req_conf in self._required_config: + state = req_conf['state'] + if_name = req_conf['name'] + curr_conf = self._current_config.get(if_name, {}) + if state == 'absent': + self._generate_no_ip_commands(req_conf, curr_conf) + else: + req_interfaces.add(if_name) + self._generate_ip_commands(req_conf, curr_conf) + if self._purge: + for if_name, curr_conf in iteritems(self._current_config): + if if_name not in req_interfaces: + self._generate_no_ip_commands(req_conf, curr_conf) + + +def main(): + """ main entry point for module execution + """ + OnyxL3InterfaceModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_linkagg.py b/plugins/modules/network/onyx/onyx_linkagg.py new file mode 100644 index 0000000000..35da5de939 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_linkagg.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_linkagg +author: "Samer Deeb (@samerd)" +short_description: Manage link aggregation groups on Mellanox ONYX network devices +description: + - This module provides declarative management of link aggregation groups + on Mellanox ONYX network devices. +options: + name: + description: + - Name of the link aggregation group. + required: true + mode: + description: + - Mode of the link aggregation group. A value of C(on) will enable LACP. + C(active) configures the link to actively information about the state of the link, + or it can be configured in C(passive) mode ie. send link state information only when + received them from another link. + default: on + choices: ['on', 'active', 'passive'] + members: + description: + - List of members interfaces of the link aggregation group. The value can be + single interface or list of interfaces. + required: true + aggregate: + description: List of link aggregation definitions. + purge: + description: + - Purge link aggregation groups not defined in the I(aggregate) parameter. + default: false + type: bool + state: + description: + - State of the link aggregation group. + default: present + choices: ['present', 'absent', 'up', 'down'] +''' + +EXAMPLES = """ +- name: configure link aggregation group + onyx_linkagg: + name: Po1 + members: + - Eth1/1 + - Eth1/2 + +- name: remove configuration + onyx_linkagg: + name: Po1 + state: absent + +- name: Create aggregate of linkagg definitions + onyx_linkagg: + aggregate: + - { name: Po1, members: [Eth1/1] } + - { name: Po2, members: [Eth1/2] } + +- name: Remove aggregate of linkagg definitions + onyx_linkagg: + aggregate: + - name: Po1 + - name: Po2 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - interface port-channel 1 + - exit + - interface ethernet 1/1 channel-group 1 mode on + - interface ethernet 1/2 channel-group 1 mode on +""" + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import get_interfaces_config + + +class OnyxLinkAggModule(BaseOnyxModule): + LAG_ID_REGEX = re.compile(r"^\d+ (Po\d+|Mpo\d+)\(([A-Z])\)$") + LAG_NAME_REGEX = re.compile(r"^(Po|Mpo)(\d+)$") + IF_NAME_REGEX = re.compile(r"^(Eth\d+\/\d+|Eth\d+\/\d+\/\d+)(.*)$") + PORT_CHANNEL = 'port-channel' + CHANNEL_GROUP = 'channel-group' + MLAG_PORT_CHANNEL = 'mlag-port-channel' + MLAG_CHANNEL_GROUP = 'mlag-channel-group' + MLAG_SUMMARY = 'MLAG Port-Channel Summary' + + LAG_TYPE = 'lag' + MLAG_TYPE = 'mlag' + + IF_TYPE_MAP = dict( + lag=PORT_CHANNEL, + mlag=MLAG_PORT_CHANNEL + ) + + _purge = False + + @classmethod + def _get_element_spec(cls): + return dict( + name=dict(type='str'), + members=dict(type='list'), + mode=dict(default='on', choices=['active', 'on', 'passive']), + state=dict(default='present', choices=['present', 'absent']), + ) + + @classmethod + def _get_aggregate_spec(cls, element_spec): + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + return aggregate_spec + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + aggregate_spec = self._get_aggregate_spec(element_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec), + purge=dict(default=False, type='bool'), + ) + argument_spec.update(element_spec) + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + self._module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def _get_lag_type(self, lag_name): + match = self.LAG_NAME_REGEX.match(lag_name) + if match: + prefix = match.group(1) + if prefix == "Po": + return self.LAG_TYPE + return self.MLAG_TYPE + self._module.fail_json( + msg='invalid lag name: %s, lag name should start with Po or ' + 'Mpo' % lag_name) + + def get_required_config(self): + self._required_config = list() + module_params = self._module.params + aggregate = module_params.get('aggregate') + self._purge = module_params.get('purge', False) + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module_params[key] + self.validate_param_values(item, item) + req_item = item.copy() + req_item['type'] = self._get_lag_type(req_item['name']) + self._required_config.append(req_item) + else: + params = { + 'name': module_params['name'], + 'state': module_params['state'], + 'members': module_params['members'], + 'mode': module_params['mode'], + 'type': self._get_lag_type(module_params['name']), + } + self.validate_param_values(params) + self._required_config.append(params) + + @classmethod + def _extract_lag_name(cls, header): + match = cls.LAG_ID_REGEX.match(header) + state = None + lag_name = None + if match: + state = 'up' if match.group(2) == 'U' else 'down' + lag_name = match.group(1) + return lag_name, state + + @classmethod + def _extract_if_name(cls, member): + match = cls.IF_NAME_REGEX.match(member) + if match: + return match.group(1) + + @classmethod + def _extract_lag_members(cls, lag_type, lag_item): + members = "" + if lag_type == cls.LAG_TYPE: + members = cls.get_config_attr(lag_item, "Member Ports") + else: + for attr_name, attr_val in iteritems(lag_item): + if attr_name.startswith('Local Ports'): + members = attr_val + return [cls._extract_if_name(member) for member in members.split()] + + def _get_port_channels(self, if_type): + return get_interfaces_config(self._module, if_type, flags="summary") + + def _parse_port_channels_summary(self, lag_type, lag_summary): + if lag_type == self.MLAG_TYPE: + if self._os_version >= self.ONYX_API_VERSION: + found_summary = False + for summary_item in lag_summary: + if self.MLAG_SUMMARY in summary_item: + lag_summary = summary_item[self.MLAG_SUMMARY] + if lag_summary: + lag_summary = lag_summary[0] + else: + lag_summary = dict() + found_summary = True + break + if not found_summary: + lag_summary = dict() + else: + lag_summary = lag_summary.get(self.MLAG_SUMMARY, dict()) + for lag_key, lag_data in iteritems(lag_summary): + lag_name, state = self._extract_lag_name(lag_key) + if not lag_name: + continue + lag_members = self._extract_lag_members(lag_type, lag_data[0]) + lag_obj = dict( + name=lag_name, + state=state, + members=lag_members + ) + self._current_config[lag_name] = lag_obj + + def load_current_config(self): + self._current_config = dict() + self._os_version = self._get_os_version() + lag_types = set([lag_obj['type'] for lag_obj in self._required_config]) + for lag_type in lag_types: + if_type = self.IF_TYPE_MAP[lag_type] + lag_summary = self._get_port_channels(if_type) + if lag_summary: + self._parse_port_channels_summary(lag_type, lag_summary) + + def _get_interface_command_suffix(self, if_name): + if if_name.startswith('Eth'): + return if_name.replace("Eth", "ethernet ") + if if_name.startswith('Po'): + return if_name.replace("Po", "port-channel ") + if if_name.startswith('Mpo'): + return if_name.replace("Mpo", "mlag-port-channel ") + self._module.fail_json( + msg='invalid interface name: %s' % if_name) + + def _get_channel_group(self, if_name): + if if_name.startswith('Po'): + return if_name.replace("Po", "channel-group ") + if if_name.startswith('Mpo'): + return if_name.replace("Mpo", "mlag-channel-group ") + self._module.fail_json( + msg='invalid interface name: %s' % if_name) + + def _generate_no_linkagg_commands(self, lag_name): + suffix = self._get_interface_command_suffix(lag_name) + command = 'no interface %s' % suffix + self._commands.append(command) + + def _generate_linkagg_commands(self, lag_name, req_lag): + curr_lag = self._current_config.get(lag_name, {}) + if not curr_lag: + suffix = self._get_interface_command_suffix(lag_name) + self._commands.append("interface %s" % suffix) + self._commands.append("exit") + curr_members = set(curr_lag.get('members', [])) + req_members = set(req_lag.get('members') or []) + + lag_mode = req_lag['mode'] + if req_members != curr_members: + channel_group = self._get_channel_group(lag_name) + channel_group_type = channel_group.split()[0] + for member in req_members: + if member in curr_members: + continue + suffix = self._get_interface_command_suffix(member) + self._commands.append( + "interface %s %s mode %s" % + (suffix, channel_group, lag_mode)) + for member in curr_members: + if member in req_members: + continue + suffix = self._get_interface_command_suffix(member) + self._commands.append( + "interface %s no %s" % (suffix, channel_group_type)) + req_state = req_lag.get('state') + if req_state in ('up', 'down'): + curr_state = curr_lag.get('state') + if curr_state != req_state: + suffix = self._get_interface_command_suffix(lag_name) + cmd = "interface %s " % suffix + if req_state == 'up': + cmd += 'no shutdown' + else: + cmd += 'shutdown' + self._commands.append(cmd) + + def generate_commands(self): + req_lags = set() + for req_conf in self._required_config: + state = req_conf['state'] + lag_name = req_conf['name'] + if state == 'absent': + if lag_name in self._current_config: + self._generate_no_linkagg_commands(lag_name) + else: + req_lags.add(lag_name) + self._generate_linkagg_commands(lag_name, req_conf) + if self._purge: + for lag_name in self._current_config: + if lag_name not in req_lags: + self._generate_no_linkagg_commands(lag_name) + + def check_declarative_intent_params(self, result): + pass + + +def main(): + """ main entry point for module execution + """ + OnyxLinkAggModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_lldp.py b/plugins/modules/network/onyx/onyx_lldp.py new file mode 100644 index 0000000000..fc3ebe0d57 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_lldp.py @@ -0,0 +1,116 @@ +#!/usr/bin/python + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_lldp +author: "Samer Deeb (@samerd)" +short_description: Manage LLDP configuration on Mellanox ONYX network devices +description: + - This module provides declarative management of LLDP service configuration + on Mellanox ONYX network devices. +options: + state: + description: + - State of the LLDP protocol configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Enable LLDP protocol + onyx_lldp: + state: present + +- name: Disable LLDP protocol + onyx_lldp: + state: lldp +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - lldp +""" + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxLldpModule(BaseOnyxModule): + LLDP_ENTRY = 'LLDP' + SHOW_LLDP_CMD = 'show lldp local' + + @classmethod + def _get_element_spec(cls): + return dict( + state=dict(default='present', choices=['present', 'absent']), + ) + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + self._required_config = dict() + module_params = self._module.params + params = { + 'state': module_params['state'], + } + + self.validate_param_values(params) + self._required_config.update(params) + + def _get_lldp_config(self): + return show_cmd(self._module, self.SHOW_LLDP_CMD) + + def load_current_config(self): + self._current_config = dict() + state = 'absent' + config = self._get_lldp_config() or dict() + for item in config: + lldp_state = item.get(self.LLDP_ENTRY) + if lldp_state is not None: + if lldp_state == 'enabled': + state = 'present' + break + self._current_config['state'] = state + + def generate_commands(self): + req_state = self._required_config['state'] + curr_state = self._current_config['state'] + if curr_state != req_state: + cmd = 'lldp' + if req_state == 'absent': + cmd = 'no %s' % cmd + self._commands.append(cmd) + + +def main(): + """ main entry point for module execution + """ + OnyxLldpModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_lldp_interface.py b/plugins/modules/network/onyx/onyx_lldp_interface.py new file mode 100644 index 0000000000..b6d2cdb34b --- /dev/null +++ b/plugins/modules/network/onyx/onyx_lldp_interface.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_lldp_interface +author: "Samer Deeb (@samerd)" +short_description: Manage LLDP interfaces configuration on Mellanox ONYX network devices +description: + - This module provides declarative management of LLDP interfaces + configuration on Mellanox ONYX network devices. +options: + name: + description: + - Name of the interface LLDP should be configured on. + aggregate: + description: List of interfaces LLDP should be configured on. + purge: + description: + - Purge interfaces not defined in the aggregate parameter. + type: bool + default: false + state: + description: + - State of the LLDP configuration. + default: present + choices: ['present', 'absent', 'enabled', 'disabled'] +''' + +EXAMPLES = """ +- name: Configure LLDP on specific interfaces + onyx_lldp_interface: + name: Eth1/1 + state: present + +- name: Disable LLDP on specific interfaces + onyx_lldp_interface: + name: Eth1/1 + state: disabled + +- name: Enable LLDP on specific interfaces + onyx_lldp_interface: + name: Eth1/1 + state: enabled + +- name: Delete LLDP on specific interfaces + onyx_lldp_interface: + name: Eth1/1 + state: absent + +- name: Create aggregate of LLDP interface configurations + onyx_lldp_interface: + aggregate: + - { name: Eth1/1 } + - { name: Eth1/2 } + state: present + +- name: Delete aggregate of LLDP interface configurations + onyx_lldp_interface: + aggregate: + - { name: Eth1/1 } + - { name: Eth1/2 } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - interface ethernet 1/1 lldp transmit + - interface ethernet 1/1 lldp receive +""" +import re +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxLldpInterfaceModule(BaseOnyxModule): + IF_NAME_REGEX = re.compile(r"^(Eth\d+\/\d+|Eth\d+\/\d+\d+)$") + _purge = False + + @classmethod + def _get_element_spec(cls): + return dict( + name=dict(type='str'), + state=dict(default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + ) + + @classmethod + def _get_aggregate_spec(cls, element_spec): + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + return aggregate_spec + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + aggregate_spec = self._get_aggregate_spec(element_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec), + purge=dict(default=False, type='bool'), + ) + argument_spec.update(element_spec) + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + self._module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def get_required_config(self): + self._required_config = list() + module_params = self._module.params + aggregate = module_params.get('aggregate') + self._purge = module_params.get('purge', False) + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module_params[key] + self.validate_param_values(item, item) + req_item = item.copy() + self._required_config.append(req_item) + else: + params = { + 'name': module_params['name'], + 'state': module_params['state'], + } + self.validate_param_values(params) + self._required_config.append(params) + + def _create_if_lldp_data(self, if_name, if_lldp_data): + return { + 'name': if_name, + 'receive': self.get_config_attr(if_lldp_data, 'Receive'), + 'transmit': self.get_config_attr(if_lldp_data, 'Transmit'), + } + + def _get_lldp_config(self): + return show_cmd(self._module, "show lldp interfaces") + + def load_current_config(self): + # called in base class in run function + self._current_config = dict() + lldp_config = self._get_lldp_config() + if not lldp_config: + return + for if_name, if_lldp_data in iteritems(lldp_config): + match = self.IF_NAME_REGEX.match(if_name) + if not match: + continue + if if_lldp_data: + if_lldp_data = if_lldp_data[0] + self._current_config[if_name] = \ + self._create_if_lldp_data(if_name, if_lldp_data) + + def _get_interface_cmd_name(self, if_name): + return if_name.replace("Eth", "ethernet ") + + def _add_if_lldp_commands(self, if_name, flag, enable): + cmd_prefix = "interface %s " % self._get_interface_cmd_name(if_name) + lldp_cmd = "lldp %s" % flag + if not enable: + lldp_cmd = 'no %s' % lldp_cmd + self._commands.append(cmd_prefix + lldp_cmd) + + def _gen_lldp_commands(self, if_name, req_state, curr_conf): + curr_receive = curr_conf.get('receive') + curr_transmit = curr_conf.get('transmit') + enable = (req_state == 'Enabled') + if curr_receive != req_state: + flag = 'receive' + self._add_if_lldp_commands(if_name, flag, enable) + if curr_transmit != req_state: + flag = 'transmit' + self._add_if_lldp_commands(if_name, flag, enable) + + def generate_commands(self): + req_interfaces = set() + for req_conf in self._required_config: + state = req_conf['state'] + if_name = req_conf['name'] + if state in ('absent', 'disabled'): + req_state = 'Disabled' + else: + req_interfaces.add(if_name) + req_state = 'Enabled' + curr_conf = self._current_config.get(if_name, {}) + self._gen_lldp_commands(if_name, req_state, curr_conf) + if self._purge: + for if_name, curr_conf in iteritems(self._current_config): + if if_name not in req_interfaces: + req_state = 'Disabled' + self._gen_lldp_commands(if_name, req_state, curr_conf) + + +def main(): + """ main entry point for module execution + """ + OnyxLldpInterfaceModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_magp.py b/plugins/modules/network/onyx/onyx_magp.py new file mode 100644 index 0000000000..528eb82e5a --- /dev/null +++ b/plugins/modules/network/onyx/onyx_magp.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_magp +author: "Samer Deeb (@samerd)" +short_description: Manage MAGP protocol on Mellanox ONYX network devices +description: + - This module provides declarative management of MAGP protocol on vlan + interface of Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.4000 +options: + magp_id: + description: + - "MAGP instance number 1-255" + required: true + interface: + description: + - VLAN Interface name. + required: true + state: + description: + - MAGP state. + default: present + choices: ['present', 'absent', 'enabled', 'disabled'] + router_ip: + description: + - MAGP router IP address. + router_mac: + description: + - MAGP router MAC address. +''' + +EXAMPLES = """ +- name: run add vlan interface with magp + onyx_magp: + magp_id: 103 + router_ip: 192.168.8.2 + router_mac: AA:1B:2C:3D:4E:5F + interface: Vlan 1002 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface vlan 234 magp 103 + - exit + - interface vlan 234 magp 103 ip virtual-router address 1.2.3.4 +""" +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxMagpModule(BaseOnyxModule): + IF_VLAN_REGEX = re.compile(r"^Vlan (\d+)$") + + @classmethod + def _get_element_spec(cls): + return dict( + magp_id=dict(type='int', required=True), + state=dict(default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + interface=dict(required=True), + router_ip=dict(), + router_mac=dict(), + ) + + def init_module(self): + """ Ansible module initialization + """ + element_spec = self._get_element_spec() + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def validate_magp_id(self, value): + if value and not 1 <= int(value) <= 255: + self._module.fail_json(msg='magp id must be between 1 and 255') + + def get_required_config(self): + module_params = self._module.params + interface = module_params['interface'] + match = self.IF_VLAN_REGEX.match(interface) + vlan_id = 0 + if match: + vlan_id = int(match.group(1)) + else: + self._module.fail_json( + msg='Invalid interface name: should be "Vlan "') + + self._required_config = dict( + magp_id=module_params['magp_id'], + state=module_params['state'], + vlan_id=vlan_id, + router_ip=module_params['router_ip'], + router_mac=module_params['router_mac']) + self.validate_param_values(self._required_config) + + @classmethod + def get_magp_id(cls, item): + header = cls.get_config_attr(item, "header") + return int(header.split()[1]) + + def _create_magp_instance_data(self, magp_id, item): + vlan_id = int(self.get_config_attr(item, "Interface vlan")) + state = self.get_config_attr(item, "Admin state").lower() + return dict( + magp_id=magp_id, + state=state, + vlan_id=vlan_id, + router_ip=self.get_config_attr(item, "Virtual IP"), + router_mac=self.get_config_attr(item, "Virtual MAC")) + + def _update_magp_data(self, magp_data): + if self._os_version >= self.ONYX_API_VERSION: + for magp_config in magp_data: + for magp_name, data in iteritems(magp_config): + magp_id = int(magp_name.replace('MAGP ', '')) + self._current_config[magp_id] = \ + self._create_magp_instance_data(magp_id, data[0]) + else: + for magp_item in magp_data: + magp_id = self.get_magp_id(magp_item) + inst_data = self._create_magp_instance_data(magp_id, magp_item) + self._current_config[magp_id] = inst_data + + def _get_magp_config(self): + cmd = "show magp" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + # called in base class in run function + self._os_version = self._get_os_version() + self._current_config = dict() + magp_data = self._get_magp_config() + if magp_data: + self._update_magp_data(magp_data) + + def _generate_no_magp_commands(self): + req_vlan_id = self._required_config['vlan_id'] + req_magp_id = self._required_config['magp_id'] + curr_magp_data = self._current_config.get(req_magp_id) + if not curr_magp_data: + return + curr_vlan_id = curr_magp_data.get(req_vlan_id) + if curr_vlan_id == req_vlan_id: + cmd = 'interface vlan %s no magp %s' % (req_vlan_id, req_magp_id) + self._commands.append(cmd) + + def _generate_magp_commands(self, req_state): + req_vlan_id = self._required_config['vlan_id'] + req_magp_id = self._required_config['magp_id'] + curr_magp_data = self._current_config.get(req_magp_id, dict()) + curr_vlan_id = curr_magp_data.get('vlan_id') + magp_prefix = 'interface vlan %s magp %s' % (req_vlan_id, req_magp_id) + create_new_magp = False + if curr_vlan_id != req_vlan_id: + if curr_vlan_id: + cmd = 'interface vlan %s no magp %s' % ( + curr_vlan_id, req_magp_id) + self._commands.append(cmd) + create_new_magp = True + self._commands.append(magp_prefix) + self._commands.append('exit') + req_router_ip = self._required_config['router_ip'] + curr_router_ip = curr_magp_data.get('router_ip') + if req_router_ip: + if curr_router_ip != req_router_ip or create_new_magp: + cmd = '%s ip virtual-router address %s' % ( + magp_prefix, req_router_ip) + self._commands.append(cmd) + else: + if curr_router_ip and curr_router_ip != '0.0.0.0': + cmd = '%s no ip virtual-router address' % magp_prefix + self._commands.append(cmd) + req_router_mac = self._required_config['router_mac'] + curr_router_mac = curr_magp_data.get('router_mac') + if curr_router_mac: + curr_router_mac = curr_router_mac.lower() + if req_router_mac: + req_router_mac = req_router_mac.lower() + if curr_router_mac != req_router_mac or create_new_magp: + cmd = '%s ip virtual-router mac-address %s' % ( + magp_prefix, req_router_mac) + self._commands.append(cmd) + else: + if curr_router_mac and curr_router_mac != '00:00:00:00:00:00': + cmd = '%s no ip virtual-router mac-address' % magp_prefix + self._commands.append(cmd) + if req_state in ('enabled', 'disabled'): + curr_state = curr_magp_data.get('state', 'enabled') + if curr_state != req_state: + if req_state == 'enabled': + suffix = 'no shutdown' + else: + suffix = 'shutdown' + cmd = '%s %s' % (magp_prefix, suffix) + self._commands.append(cmd) + + def generate_commands(self): + req_state = self._required_config['state'] + if req_state == 'absent': + return self._generate_no_magp_commands() + return self._generate_magp_commands(req_state) + + +def main(): + """ main entry point for module execution + """ + OnyxMagpModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_mlag_ipl.py b/plugins/modules/network/onyx/onyx_mlag_ipl.py new file mode 100644 index 0000000000..6a18943015 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_mlag_ipl.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_mlag_ipl +author: "Samer Deeb (@samerd)" +short_description: Manage IPL (inter-peer link) on Mellanox ONYX network devices +description: + - This module provides declarative management of IPL (inter-peer link) + management on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.4000 +options: + name: + description: + - Name of the interface (port-channel) IPL should be configured on. + required: true + vlan_interface: + description: + - Name of the IPL vlan interface. + state: + description: + - IPL state. + default: present + choices: ['present', 'absent'] + peer_address: + description: + - IPL peer IP address. +''' + +EXAMPLES = """ +- name: run configure ipl + onyx_mlag_ipl: + name: Po1 + vlan_interface: Vlan 322 + state: present + peer_address: 192.168.7.1 + +- name: run remove ipl + onyx_mlag_ipl: + name: Po1 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface port-channel 1 ipl 1 + - interface vlan 1024 ipl 1 peer-address 10.10.10.10 +""" +import re + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxMlagIplModule(BaseOnyxModule): + VLAN_IF_REGEX = re.compile(r'^Vlan \d+') + + @classmethod + def _get_element_spec(cls): + return dict( + name=dict(required=True), + state=dict(default='present', + choices=['present', 'absent']), + peer_address=dict(), + vlan_interface=dict(), + ) + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict( + name=module_params['name'], + state=module_params['state'], + peer_address=module_params['peer_address'], + vlan_interface=module_params['vlan_interface']) + self.validate_param_values(self._required_config) + + def _update_mlag_data(self, mlag_data): + if not mlag_data: + return + mlag_summary = mlag_data.get("MLAG IPLs Summary", {}) + ipl_id = "1" + ipl_list = mlag_summary.get(ipl_id) + if ipl_list: + ipl_data = ipl_list[0] + vlan_id = ipl_data.get("Vlan Interface") + vlan_interface = "" + if vlan_id != "N/A": + vlan_interface = "Vlan %s" % vlan_id + peer_address = ipl_data.get("Peer IP address") + name = ipl_data.get("Group Port-Channel") + self._current_config = dict( + name=name, + peer_address=peer_address, + vlan_interface=vlan_interface) + + def _show_mlag_data(self): + cmd = "show mlag" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + # called in base class in run function + self._current_config = dict() + mlag_data = self._show_mlag_data() + self._update_mlag_data(mlag_data) + + def _get_interface_cmd_name(self, if_name): + if if_name.startswith('Po'): + return if_name.replace("Po", "port-channel ") + self._module.fail_json( + msg='invalid interface name: %s' % if_name) + + def _generate_port_channel_command(self, if_name, enable): + if_cmd_name = self._get_interface_cmd_name(if_name) + if enable: + ipl_cmd = 'ipl 1' + else: + ipl_cmd = "no ipl 1" + cmd = "interface %s %s" % (if_cmd_name, ipl_cmd) + return cmd + + def _generate_vlan_if_command(self, if_name, enable, peer_address): + if_cmd_name = if_name.lower() + if enable: + ipl_cmd = 'ipl 1 peer-address %s' % peer_address + else: + ipl_cmd = "no ipl 1" + cmd = "interface %s %s" % (if_cmd_name, ipl_cmd) + return cmd + + def _generate_no_ipl_commands(self): + curr_interface = self._current_config.get('name') + req_interface = self._required_config.get('name') + if curr_interface == req_interface: + cmd = self._generate_port_channel_command( + req_interface, enable=False) + self._commands.append(cmd) + + def _generate_ipl_commands(self): + curr_interface = self._current_config.get('name') + req_interface = self._required_config.get('name') + if curr_interface != req_interface: + if curr_interface and curr_interface != 'N/A': + cmd = self._generate_port_channel_command( + curr_interface, enable=False) + self._commands.append(cmd) + cmd = self._generate_port_channel_command( + req_interface, enable=True) + self._commands.append(cmd) + curr_vlan = self._current_config.get('vlan_interface') + req_vlan = self._required_config.get('vlan_interface') + add_peer = False + if curr_vlan != req_vlan: + add_peer = True + if curr_vlan: + cmd = self._generate_vlan_if_command(curr_vlan, enable=False, + peer_address=None) + self._commands.append(cmd) + curr_peer = self._current_config.get('peer_address') + req_peer = self._required_config.get('peer_address') + if req_peer != curr_peer: + add_peer = True + if add_peer and req_peer: + cmd = self._generate_vlan_if_command(req_vlan, enable=True, + peer_address=req_peer) + self._commands.append(cmd) + + def generate_commands(self): + state = self._required_config['state'] + if state == 'absent': + self._generate_no_ipl_commands() + else: + self._generate_ipl_commands() + + +def main(): + """ main entry point for module execution + """ + OnyxMlagIplModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_mlag_vip.py b/plugins/modules/network/onyx/onyx_mlag_vip.py new file mode 100644 index 0000000000..5abd6b89d5 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_mlag_vip.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_mlag_vip +author: "Samer Deeb (@samerd)" +short_description: Configures MLAG VIP on Mellanox ONYX network devices +description: + - This module provides declarative management of MLAG virtual IPs + on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.4000 +options: + ipaddress: + description: + - Virtual IP address of the MLAG. Required if I(state=present). + group_name: + description: + - MLAG group name. Required if I(state=present). + mac_address: + description: + - MLAG system MAC address. Required if I(state=present). + state: + description: + - MLAG VIP state. + choices: ['present', 'absent'] + delay: + description: + - Delay interval, in seconds, waiting for the changes on mlag VIP to take + effect. + default: 12 +''' + +EXAMPLES = """ +- name: configure mlag-vip + onyx_mlag_vip: + ipaddress: 50.3.3.1/24 + group_name: ansible-test-group + mac_address: 00:11:12:23:34:45 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - mlag-vip ansible_test_group ip 50.3.3.1 /24 force + - no mlag shutdown +""" + +import time + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxMLagVipModule(BaseOnyxModule): + + def init_module(self): + """ initialize module + """ + element_spec = dict( + ipaddress=dict(), + group_name=dict(), + mac_address=dict(), + delay=dict(type='int', default=12), + state=dict(choices=['present', 'absent'], default='present'), + ) + argument_spec = dict() + + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + lag_params = { + 'ipaddress': module_params['ipaddress'], + 'group_name': module_params['group_name'], + 'mac_address': module_params['mac_address'], + 'delay': module_params['delay'], + 'state': module_params['state'], + } + + self.validate_param_values(lag_params) + self._required_config = lag_params + + def _show_mlag_cmd(self, cmd): + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def _show_mlag(self): + cmd = "show mlag" + return self._show_mlag_cmd(cmd) + + def _show_mlag_vip(self): + cmd = "show mlag-vip" + return self._show_mlag_cmd(cmd) + + def load_current_config(self): + self._current_config = dict() + mlag_config = self._show_mlag() + mlag_vip_config = self._show_mlag_vip() + if mlag_vip_config: + mlag_vip = mlag_vip_config.get("MLAG-VIP", {}) + self._current_config['group_name'] = \ + mlag_vip.get("MLAG group name") + self._current_config['ipaddress'] = \ + mlag_vip.get("MLAG VIP address") + if mlag_config: + self._current_config['mac_address'] = \ + mlag_config.get("System-mac") + + def generate_commands(self): + state = self._required_config['state'] + if state == 'present': + self._generate_mlag_vip_cmds() + else: + self._generate_no_mlag_vip_cmds() + + def _generate_mlag_vip_cmds(self): + current_group = self._current_config.get('group_name') + current_ip = self._current_config.get('ipaddress') + current_mac = self._current_config.get('mac_address') + if current_mac: + current_mac = current_mac.lower() + + req_group = self._required_config.get('group_name') + req_ip = self._required_config.get('ipaddress') + req_mac = self._required_config.get('mac_address') + if req_mac: + req_mac = req_mac.lower() + + if req_ip is not None: + if req_group is None: + self._module.fail_json(msg='In order to configure Mlag-Vip you must send ' + 'group name param beside IPaddress') + ipaddr, mask = req_ip.split('/') + if req_group != current_group or req_ip != current_ip: + self._commands.append('mlag-vip %s ip %s /%s force' % (req_group, ipaddr, mask)) + elif req_group and req_group != current_group: + self._commands.append('mlag-vip %s' % req_group) + + if req_mac and req_mac != current_mac: + self._commands.append( + 'mlag system-mac %s' % (req_mac)) + if self._commands: + self._commands.append('no mlag shutdown') + + def _generate_no_mlag_vip_cmds(self): + if self._current_config.get('group_name'): + self._commands.append('no mlag-vip') + + def check_declarative_intent_params(self, result): + if not result['changed']: + return + delay_interval = self._required_config.get('delay') + if delay_interval > 0: + time.sleep(delay_interval) + for cmd in ("show mlag-vip", ""): + show_cmd(self._module, cmd, json_fmt=False, fail_on_error=False) + + +def main(): + """ main entry point for module execution + """ + OnyxMLagVipModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_ntp.py b/plugins/modules/network/onyx/onyx_ntp.py new file mode 100644 index 0000000000..1ae9d7bdd8 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_ntp.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_ntp +author: "Sara-Touqan (@sarato)" +short_description: Manage NTP general configurations and ntp keys configurations on Mellanox ONYX network devices +description: + - This module provides declarative management of NTP & NTP Keys + on Mellanox ONYX network devices. +options: + state: + description: + - State of the NTP configuration. + choices: ['enabled', 'disabled'] + type: str + authenticate_state: + description: + - State of the NTP authentication configuration. + choices: ['enabled', 'disabled'] + type: str + ntp_authentication_keys: + type: list + description: + - List of ntp authentication keys + suboptions: + auth_key_id: + description: + - Configures ntp key-id, range 1-65534 + required: true + type: int + auth_key_encrypt_type: + description: + - encryption type used to configure ntp authentication key. + required: true + choices: ['md5', 'sha1'] + type: str + auth_key_password: + description: + - password used for ntp authentication key. + required: true + type: str + auth_key_state: + description: + - Used to decide if you want to delete given ntp key or not + choices: ['present', 'absent'] + type: str + trusted_keys: + type: list + description: + - List of ntp trusted keys +''' + +EXAMPLES = """ +- name: configure NTP + onyx_ntp: + state: enabled + authenticate_state: enabled + ntp_authentication_keys: + - auth_key_id: 1 + auth_key_encrypt_type: md5 + auth_key_password: 12345 + auth_key_state: absent + trusted_keys: 1,2,3 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - ntp enable + - ntp disable + - ntp authenticate + - no ntp authenticate + - ntp authentication-key 1 md5 12345 + - no ntp authentication-key 1 + - ntp trusted-key 1,2,3 +""" + + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxNTPModule(BaseOnyxModule): + + def init_module(self): + """ module initialization + """ + ntp_authentication_key_spec = dict(auth_key_id=dict(type='int', required=True), + auth_key_encrypt_type=dict(required=True, choices=['md5', 'sha1']), + auth_key_password=dict(required=True), + auth_key_state=dict(choices=['present', 'absent'])) + element_spec = dict( + state=dict(choices=['enabled', 'disabled']), + authenticate_state=dict(choices=['enabled', 'disabled']), + ntp_authentication_keys=dict(type='list', elements='dict', options=ntp_authentication_key_spec), + trusted_keys=dict(type='list', elements='int') + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def _validate_key_id(self): + keys_id_list = self._required_config.get("ntp_authentication_keys") + if keys_id_list: + for key_item in keys_id_list: + key_id = key_item.get("auth_key_id") + if (key_id < 1) or (key_id > 65534): + self._module.fail_json( + msg='Invalid Key value, value should be in the range 1-65534') + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + self._validate_key_id() + + def _show_ntp_config(self): + show_cmds = [] + cmd = "show ntp" + show_cmds.append(show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)) + cmd = "show ntp keys" + show_cmds.append(show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)) + return show_cmds + + def _set_ntp_keys_config(self, ntp_config): + if not ntp_config: + return + for req_ntp_auth_key in ntp_config: + ecryption_type = req_ntp_auth_key.get("Encryption Type") + self._current_config[req_ntp_auth_key.get("header")] = ecryption_type + + def _set_ntp_config(self, ntp_config): + ntp_config = ntp_config[0] + if not ntp_config: + return + self._current_config['state'] = ntp_config.get("NTP is administratively") + self._current_config['authenticate_state'] = ntp_config.get("NTP Authentication administratively") + + def load_current_config(self): + self._current_config = dict() + ntp_config = self._show_ntp_config() + if ntp_config: + if ntp_config[0]: + self._set_ntp_config(ntp_config[0]) + if ntp_config[1]: + self._set_ntp_keys_config(ntp_config[1]) + + def generate_commands(self): + current_state = self._current_config.get("state") + state = self._required_config.get("state") + if state is None: + state = current_state + if state is not None: + if current_state != state: + if state == 'enabled': + self._commands.append('ntp enable') + else: + self._commands.append('no ntp enable') + authenticate_state = self._required_config.get("authenticate_state") + if authenticate_state: + current_authenticate_state = self._current_config.get("authenticate_state") + if authenticate_state is not None: + if current_authenticate_state != authenticate_state: + if authenticate_state == 'enabled': + self._commands.append('ntp authenticate') + else: + self._commands.append('no ntp authenticate') + req_ntp_auth_keys = self._required_config.get('ntp_authentication_keys') + if req_ntp_auth_keys: + if req_ntp_auth_keys is not None: + for req_ntp_auth_key in req_ntp_auth_keys: + req_key_id = req_ntp_auth_key.get('auth_key_id') + req_key = 'NTP Key ' + str(req_key_id) + current_req_key = self._current_config.get(req_key) + auth_key_state = req_ntp_auth_key.get('auth_key_state') + req_encrypt_type = req_ntp_auth_key.get('auth_key_encrypt_type') + req_password = req_ntp_auth_key.get('auth_key_password') + if current_req_key: + if req_encrypt_type == current_req_key: + if auth_key_state: + if auth_key_state == 'absent': + self._commands.append('no ntp authentication-key {0}' .format(req_key_id)) + else: + continue + else: + if auth_key_state: + if auth_key_state == 'present': + self._commands.append('ntp authentication-key {0} {1} {2}' + .format(req_key_id, + req_encrypt_type, + req_password)) + else: + self._commands.append('ntp authentication-key {0} {1} {2}' + .format(req_key_id, + req_encrypt_type, + req_password)) + + else: + if auth_key_state: + if auth_key_state == 'present': + self._commands.append('ntp authentication-key {0} {1} {2}' + .format(req_key_id, + req_encrypt_type, + req_password)) + else: + self._commands.append('ntp authentication-key {0} {1} {2}' + .format(req_key_id, + req_encrypt_type, + req_password)) + + req_trusted_keys = self._required_config.get('trusted_keys') + if req_trusted_keys: + for key in req_trusted_keys: + self._commands.append('ntp trusted-key {0}' .format(key)) + + +def main(): + """ main entry point for module execution + """ + OnyxNTPModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_ntp_servers_peers.py b/plugins/modules/network/onyx/onyx_ntp_servers_peers.py new file mode 100644 index 0000000000..ddebb63825 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_ntp_servers_peers.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_ntp_servers_peers +author: "Sara-Touqan (@sarato)" +short_description: Configures NTP peers and servers parameters +description: + - This module provides declarative management of NTP peers and servers configuration on Mellanox ONYX network devices. +options: + peer: + type: list + description: + - List of ntp peers. + suboptions: + ip_or_name: + description: + - Configures ntp peer name or ip. + required: true + type: str + enabled: + description: + - Disables/Enables ntp peer state + type: bool + version: + description: + - version number for the ntp peer + choices: [3, 4] + type: int + key_id: + description: + - Used to configure the key-id for the ntp peer + type: int + state: + description: + - Indicates if the ntp peer exists or should be deleted + choices: ['present', 'absent'] + type: str + server: + type: list + description: + - List of ntp servers. + suboptions: + ip_or_name: + description: + - Configures ntp server name or ip. + required: true + type: str + enabled: + description: + - Disables/Enables ntp server + type: bool + trusted_enable: + description: + - Disables/Enables the trusted state for the ntp server. + type: bool + version: + description: + - version number for the ntp server + choices: [3, 4] + type: int + key_id: + description: + - Used to configure the key-id for the ntp server + type: int + state: + description: + - Indicates if the ntp peer exists or should be deleted. + choices: ['present', 'absent'] + type: str + ntpdate: + description: + - Sets system clock once from a remote server using NTP. + type: str +''' + +EXAMPLES = """ +- name: configure NTP peers and servers + onyx_ntp_peers_servers: + peer: + - ip_or_name: 1.1.1.1 + enabled: yes + version: 4 + key_id: 6 + state: present + server: + - ip_or_name: 2.2.2.2 + enabled: true + version: 3 + key_id: 8 + trusted_enable: no + state: present + ntpdate: 192.168.10.10 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - ntp peer 1.1.1.1 disable + no ntp peer 1.1.1.1 disable + ntp peer 1.1.1.1 keyId 6 + ntp peer 1.1.1.1 version 4 + no ntp peer 1.1.1.1 + ntp server 2.2.2.2 disable + no ntp server 2.2.2.2 disable + ntp server 2.2.2.2 keyID 8 + ntp server 2.2.2.2 version 3 + ntp server 2.2.2.2 trusted-enable + no ntp server 2.2.2.2 + ntp server 192.168.10.10 + ntpdate 192.168.10.10 +""" + +from copy import deepcopy +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxNTPServersPeersModule(BaseOnyxModule): + + def init_module(self): + """ module initialization + """ + peer_spec = dict(ip_or_name=dict(required=True), + enabled=dict(type='bool'), + version=dict(type='int', choices=[3, 4]), + key_id=dict(type='int'), + state=dict(choices=['present', 'absent'])) + server_spec = dict(ip_or_name=dict(required=True), + enabled=dict(type='bool'), + version=dict(type='int', choices=[3, 4]), + trusted_enable=dict(type='bool'), + key_id=dict(type='int'), + state=dict(choices=['present', 'absent'])) + element_spec = dict(peer=dict(type='list', elements='dict', options=peer_spec), + server=dict(type='list', elements='dict', options=server_spec), + ntpdate=dict()) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _show_peers_servers_config(self): + cmd = "show ntp configured" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def _set_servers_config(self, peers_servers_config): + servers = dict() + peers = dict() + if not peers_servers_config: + return + index = 0 + for peer_server in peers_servers_config: + if (index == 0): + index += 1 + continue + else: + header_list = peer_server.get("header").split(" ") + header_type = header_list[1] + if peer_server.get("Enabled") == "yes": + enabled_state = True + else: + enabled_state = False + if (header_type == 'server'): + trusted_state = peer_server.get("Trusted") + if trusted_state == 'yes': + trusted_state = True + else: + trusted_state = False + server_entry = {"version": peer_server.get("NTP version"), + "enabled": enabled_state, + "trusted_enable": trusted_state, + "key_id": peer_server.get("Key ID")} + servers[header_list[2]] = server_entry + else: + peer_entry = {"version": peer_server.get("NTP version"), + "enabled": enabled_state, + "key_id": peer_server.get("Key ID")} + peers[header_list[2]] = peer_entry + index += 1 + self._current_config = dict(server=servers, + peer=peers) + + def load_current_config(self): + servers = dict() + peers = dict() + self._current_config = dict(server=servers, + peer=peers) + peers_servers_config = self._show_peers_servers_config() + if peers_servers_config: + self._set_servers_config(peers_servers_config) + + def generate_commands(self): + for option in self._current_config: + req_ntp = self._required_config.get(option) + if req_ntp is not None: + for ntp_peer in req_ntp: + peer_name = ntp_peer.get('ip_or_name') + peer_key = ntp_peer.get('key_id') + peer_state = ntp_peer.get("state") + peer_enabled = ntp_peer.get("enabled") + peer_version = ntp_peer.get("version") + peer_key = ntp_peer.get("key_id") + curr_name = self._current_config.get(option).get(peer_name) + peer_version = ntp_peer.get('version') + if self._current_config.get(option) and curr_name: + if peer_state: + if(peer_state == "absent"): + self._commands.append('no ntp {0} {1}' .format(option, peer_name)) + continue + if peer_enabled is not None: + if curr_name.get("enabled") != peer_enabled: + if(peer_enabled is True): + self._commands.append('no ntp {0} {1} disable' .format(option, peer_name)) + else: + self._commands.append('ntp {0} {1} disable' .format(option, peer_name)) + if peer_version: + if (int(curr_name.get("version")) != peer_version): + self._commands.append('ntp {0} {1} version {2}' .format(option, peer_name, peer_version)) + if peer_key: + if curr_name.get("key_id") != "none": + if (int(curr_name.get("key_id")) != peer_key): + self._commands.append('ntp {0} {1} keyID {2}' .format(option, peer_name, peer_key)) + else: + self._commands.append('ntp {0} {1} keyID {2}' .format(option, peer_name, peer_key)) + if option == "server": + server_trusted = ntp_peer.get("trusted_enable") + if server_trusted is not None: + if (curr_name.get("trusted_enable") != server_trusted): + if server_trusted is True: + self._commands.append('ntp {0} {1} trusted-enable' .format(option, peer_name)) + else: + self._commands.append('no ntp {0} {1} trusted-enable' .format(option, peer_name)) + else: + if peer_state: + if(peer_state == "absent"): + continue + if peer_enabled is not None: + if(peer_enabled is True): + self._commands.append('no ntp {0} {1} disable' .format(option, peer_name)) + else: + self._commands.append('ntp {0} {1} disable' .format(option, peer_name)) + else: + self._commands.append('ntp {0} {1} disable' .format(option, peer_name)) + if peer_version: + self._commands.append('ntp {0} {1} version {2}' .format(option, peer_name, peer_version)) + if peer_key: + self._commands.append('ntp {0} {1} keyID {2}' .format(option, peer_name, peer_key)) + + ntpdate = self._required_config.get("ntpdate") + if ntpdate is not None: + self._commands.append('ntpdate {0}' .format(ntpdate)) + + +def main(): + """ main entry point for module execution + """ + OnyxNTPServersPeersModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_ospf.py b/plugins/modules/network/onyx/onyx_ospf.py new file mode 100644 index 0000000000..e7cc2e1e5c --- /dev/null +++ b/plugins/modules/network/onyx/onyx_ospf.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_ospf +author: "Samer Deeb (@samerd)" +short_description: Manage OSPF protocol on Mellanox ONYX network devices +description: + - This module provides declarative management and configuration of OSPF + protocol on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.4000 +options: + ospf: + description: + - "OSPF instance number 1-65535" + required: true + router_id: + description: + - OSPF router ID. Required if I(state=present). + interfaces: + description: + - List of interfaces and areas. Required if I(state=present). + suboptions: + name: + description: + - Interface name. + required: true + area: + description: + - OSPF area. + required: true + state: + description: + - OSPF state. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: add ospf router to interface + onyx_ospf: + ospf: 2 + router_id: 192.168.8.2 + interfaces: + - name: Eth1/1 + - area: 0.0.0.0 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - router ospf 2 + - router-id 192.168.8.2 + - exit + - interface ethernet 1/1 ip ospf area 0.0.0.0 +""" +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxOspfModule(BaseOnyxModule): + OSPF_IF_REGEX = re.compile( + r'^(Loopback\d+|Eth\d+\/\d+|Vlan\d+|Po\d+)\s+(\S+).*') + OSPF_ROUTER_REGEX = re.compile(r'^Routing Process (\d+).*ID\s+(\S+).*') + + @classmethod + def _get_element_spec(cls): + interface_spec = dict( + name=dict(required=True), + area=dict(required=True), + ) + element_spec = dict( + ospf=dict(type='int', required=True), + router_id=dict(), + interfaces=dict(type='list', elements='dict', + options=interface_spec), + state=dict(choices=['present', 'absent'], default='present'), + ) + return element_spec + + def init_module(self): + """ Ansible module initialization + """ + element_spec = self._get_element_spec() + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def validate_ospf(self, value): + if value and not 1 <= int(value) <= 65535: + self._module.fail_json(msg='ospf id must be between 1 and 65535') + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict( + ospf=module_params['ospf'], + router_id=module_params['router_id'], + state=module_params['state'], + ) + interfaces = module_params['interfaces'] or list() + req_interfaces = self._required_config['interfaces'] = dict() + for interface_data in interfaces: + req_interfaces[interface_data['name']] = interface_data['area'] + self.validate_param_values(self._required_config) + + def _update_ospf_data(self, ospf_data): + match = self.OSPF_ROUTER_REGEX.match(ospf_data) + if match: + ospf_id = int(match.group(1)) + router_id = match.group(2) + self._current_config['ospf'] = ospf_id + self._current_config['router_id'] = router_id + + def _update_ospf_interfaces(self, ospf_interfaces): + interfaces = self._current_config['interfaces'] = dict() + lines = ospf_interfaces.split('\n') + for line in lines: + line = line.strip() + match = self.OSPF_IF_REGEX.match(line) + if match: + name = match.group(1) + area = match.group(2) + for prefix in ("Vlan", "Loopback"): + if name.startswith(prefix): + name = name.replace(prefix, prefix + ' ') + interfaces[name] = area + + def _get_ospf_config(self, ospf_id): + cmd = 'show ip ospf %s | include Process' % ospf_id + return show_cmd(self._module, cmd, json_fmt=False, fail_on_error=False) + + def _get_ospf_interfaces_config(self, ospf_id): + cmd = 'show ip ospf interface %s brief' % ospf_id + return show_cmd(self._module, cmd, json_fmt=False, fail_on_error=False) + + def load_current_config(self): + # called in base class in run function + ospf_id = self._required_config['ospf'] + self._current_config = dict() + ospf_data = self._get_ospf_config(ospf_id) + if ospf_data: + self._update_ospf_data(ospf_data) + ospf_interfaces = self._get_ospf_interfaces_config(ospf_id) + if ospf_interfaces: + self._update_ospf_interfaces(ospf_interfaces) + + def _generate_no_ospf_commands(self): + req_ospf_id = self._required_config['ospf'] + curr_ospf_id = self._current_config.get('ospf') + if curr_ospf_id == req_ospf_id: + cmd = 'no router ospf %s' % req_ospf_id + self._commands.append(cmd) + + def _get_interface_command_name(self, if_name): + if if_name.startswith('Eth'): + return if_name.replace("Eth", "ethernet ") + if if_name.startswith('Po'): + return if_name.replace("Po", "port-channel ") + if if_name.startswith('Vlan'): + return if_name.replace("Vlan", "vlan") + if if_name.startswith('Loopback'): + return if_name.replace("Loopback", "loopback") + self._module.fail_json( + msg='invalid interface name: %s' % if_name) + + def _get_interface_area_cmd(self, if_name, area): + interface_prefix = self._get_interface_command_name(if_name) + if area: + area_cmd = 'ip ospf area %s' % area + else: + area_cmd = 'no ip ospf area' + cmd = 'interface %s %s' % (interface_prefix, area_cmd) + return cmd + + def _generate_ospf_commands(self): + req_router_id = self._required_config['router_id'] + req_ospf_id = self._required_config['ospf'] + curr_router_id = self._current_config.get('router_id') + curr_ospf_id = self._current_config.get('ospf') + if curr_ospf_id != req_ospf_id or req_router_id != curr_router_id: + cmd = 'router ospf %s' % req_ospf_id + self._commands.append(cmd) + if req_router_id != curr_router_id: + if req_router_id: + cmd = 'router-id %s' % req_router_id + else: + cmd = 'no router-id' + self._commands.append(cmd) + self._commands.append('exit') + req_interfaces = self._required_config['interfaces'] + curr_interfaces = self._current_config.get('interfaces', dict()) + for if_name, area in iteritems(req_interfaces): + curr_area = curr_interfaces.get(if_name) + if curr_area != area: + cmd = self._get_interface_area_cmd(if_name, area) + self._commands.append(cmd) + for if_name in curr_interfaces: + if if_name not in req_interfaces: + cmd = self._get_interface_area_cmd(if_name, None) + self._commands.append(cmd) + + def generate_commands(self): + req_state = self._required_config['state'] + if req_state == 'absent': + return self._generate_no_ospf_commands() + return self._generate_ospf_commands() + + +def main(): + """ main entry point for module execution + """ + OnyxOspfModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_pfc_interface.py b/plugins/modules/network/onyx/onyx_pfc_interface.py new file mode 100644 index 0000000000..5b7adb62c3 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_pfc_interface.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_pfc_interface +author: "Samer Deeb (@samerd)" +short_description: Manage priority flow control on ONYX network devices +description: + - This module provides declarative management of priority flow control (PFC) + on interfaces of Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.4000 +options: + name: + description: + - Name of the interface PFC should be configured on. + aggregate: + description: List of interfaces PFC should be configured on. + purge: + description: + - Purge interfaces not defined in the aggregate parameter. + type: bool + default: false + state: + description: + - State of the PFC configuration. + default: enabled + choices: ['enabled', 'disabled'] +''' + +EXAMPLES = """ +- name: configure PFC + onyx_pfc_interface: + name: Eth1/1 + state: enabled +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface ethernet 1/17 dcb priority-flow-control mode on +""" +from copy import deepcopy +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxPfcInterfaceModule(BaseOnyxModule): + PFC_IF_REGEX = re.compile( + r"^(Eth\d+\/\d+)|(Eth\d+\/\d+\/\d+)|(Po\d+)|(Mpo\d+)$") + + _purge = False + + @classmethod + def _get_element_spec(cls): + return dict( + name=dict(type='str'), + state=dict(default='enabled', + choices=['enabled', 'disabled']), + ) + + @classmethod + def _get_aggregate_spec(cls, element_spec): + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + return aggregate_spec + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + aggregate_spec = self._get_aggregate_spec(element_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec), + purge=dict(default=False, type='bool'), + ) + argument_spec.update(element_spec) + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + self._module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def get_required_config(self): + self._required_config = list() + module_params = self._module.params + aggregate = module_params.get('aggregate') + self._purge = module_params.get('purge', False) + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module_params[key] + self.validate_param_values(item, item) + req_item = item.copy() + self._required_config.append(req_item) + else: + params = { + 'name': module_params['name'], + 'state': module_params['state'], + } + self.validate_param_values(params) + self._required_config.append(params) + + def _create_if_pfc_data(self, if_name, if_pfc_data): + state = self.get_config_attr(if_pfc_data, "PFC oper") + state = state.lower() + return dict( + name=if_name, + state=state) + + def _get_pfc_config(self): + return show_cmd(self._module, "show dcb priority-flow-control") + + def load_current_config(self): + # called in base class in run function + self._os_version = self._get_os_version() + self._current_config = dict() + pfc_config = self._get_pfc_config() + if not pfc_config: + return + if self._os_version >= self.ONYX_API_VERSION: + if len(pfc_config) >= 3: + pfc_config = pfc_config[2] + else: + pfc_config = dict() + else: + if 'Table 2' in pfc_config: + pfc_config = pfc_config['Table 2'] + + for if_name, if_pfc_data in iteritems(pfc_config): + match = self.PFC_IF_REGEX.match(if_name) + if not match: + continue + if if_pfc_data: + if_pfc_data = if_pfc_data[0] + self._current_config[if_name] = \ + self._create_if_pfc_data(if_name, if_pfc_data) + + def _get_interface_cmd_name(self, if_name): + if if_name.startswith('Eth'): + return if_name.replace("Eth", "ethernet ") + if if_name.startswith('Po'): + return if_name.replace("Po", "port-channel ") + if if_name.startswith('Mpo'): + return if_name.replace("Mpo", "mlag-port-channel ") + self._module.fail_json( + msg='invalid interface name: %s' % if_name) + + def _add_if_pfc_commands(self, if_name, req_state): + cmd_prefix = "interface %s " % self._get_interface_cmd_name(if_name) + + if req_state == 'disabled': + pfc_cmd = 'no dcb priority-flow-control mode force' + else: + pfc_cmd = 'dcb priority-flow-control mode on force' + self._commands.append(cmd_prefix + pfc_cmd) + + def _gen_pfc_commands(self, if_name, curr_conf, req_state): + curr_state = curr_conf.get('state', 'disabled') + if curr_state != req_state: + self._add_if_pfc_commands(if_name, req_state) + + def generate_commands(self): + req_interfaces = set() + for req_conf in self._required_config: + req_state = req_conf['state'] + if_name = req_conf['name'] + if req_state == 'enabled': + req_interfaces.add(if_name) + curr_conf = self._current_config.get(if_name, {}) + self._gen_pfc_commands(if_name, curr_conf, req_state) + if self._purge: + for if_name, curr_conf in iteritems(self._current_config): + if if_name not in req_interfaces: + req_state = 'disabled' + self._gen_pfc_commands(if_name, curr_conf, req_state) + + +def main(): + """ main entry point for module execution + """ + OnyxPfcInterfaceModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_protocol.py b/plugins/modules/network/onyx/onyx_protocol.py new file mode 100644 index 0000000000..da1a18cb91 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_protocol.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_protocol +author: "Samer Deeb (@samerd)" +short_description: Enables/Disables protocols on Mellanox ONYX network devices +description: + - This module provides a mechanism for enabling and disabling protocols + Mellanox on ONYX network devices. +notes: + - Tested on ONYX 3.6.4000 +options: + mlag: + description: MLAG protocol + choices: ['enabled', 'disabled'] + magp: + description: MAGP protocol + choices: ['enabled', 'disabled'] + spanning_tree: + description: Spanning Tree support + choices: ['enabled', 'disabled'] + dcb_pfc: + description: DCB priority flow control + choices: ['enabled', 'disabled'] + igmp_snooping: + description: IP IGMP snooping + choices: ['enabled', 'disabled'] + lacp: + description: LACP protocol + choices: ['enabled', 'disabled'] + ip_l3: + description: IP L3 support + choices: ['enabled', 'disabled'] + ip_routing: + description: IP routing support + choices: ['enabled', 'disabled'] + lldp: + description: LLDP protocol + choices: ['enabled', 'disabled'] + bgp: + description: BGP protocol + choices: ['enabled', 'disabled'] + ospf: + description: OSPF protocol + choices: ['enabled', 'disabled'] + nve: + description: nve protocol + choices: ['enabled', 'disabled'] + bfd: + description: bfd protocol + choices: ['enabled', 'disabled'] +''' + +EXAMPLES = """ +- name: enable protocols for MLAG + onyx_protocol: + lacp: enabled + spanning_tree: disabled + ip_routing: enabled + mlag: enabled + dcb_pfc: enabled +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - no spanning-tree + - protocol mlag +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxProtocolModule(BaseOnyxModule): + + PROTOCOL_MAPPING = dict( + mlag=dict(name="mlag", enable="protocol mlag", + disable="no protocol mlag"), + magp=dict(name="magp", enable="protocol magp", + disable="no protocol magp"), + spanning_tree=dict(name="spanning-tree", enable="spanning-tree", + disable="no spanning-tree"), + dcb_pfc=dict(name="priority-flow-control", + enable="dcb priority-flow-control enable force", + disable="no dcb priority-flow-control enable force"), + igmp_snooping=dict(name="igmp-snooping", enable="ip igmp snooping", + disable="no ip igmp snooping"), + lacp=dict(name="lacp", enable="lacp", disable="no lacp"), + ip_l3=dict(name="IP L3", enable="ip l3", + disable="no ip l3"), + ip_routing=dict(name="IP routing", enable="ip routing", + disable="no ip routing"), + lldp=dict(name="lldp", enable="lldp", disable="no lldp"), + bgp=dict(name="bgp", enable="protocol bgp", disable="no protocol bgp"), + ospf=dict(name="ospf", enable="protocol ospf", + disable="no protocol ospf"), + nve=dict(name="nve", enable="protocol nve", + disable="no protocol nve"), + bfd=dict(name="bfd", enable="protocol bfd", + disable="no protocol bfd"), + ) + + @classmethod + def _get_element_spec(cls): + element_spec = dict() + for protocol in cls.PROTOCOL_MAPPING: + element_spec[protocol] = dict(choices=['enabled', 'disabled']) + return element_spec + + def init_module(self): + """ Ansible module initialization + """ + element_spec = self._get_element_spec() + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + def get_required_config(self): + self._required_config = dict() + module_params = self._module.params + for key, val in iteritems(module_params): + if key in self.PROTOCOL_MAPPING and val is not None: + self._required_config[key] = val + + def _get_protocols(self): + return show_cmd(self._module, "show protocols") + + def _get_ip_routing(self): + return show_cmd(self._module, 'show ip routing | include "IP routing"', + json_fmt=False) + + def load_current_config(self): + self._current_config = dict() + protocols_config = self._get_protocols() + if not protocols_config: + protocols_config = dict() + ip_config = self._get_ip_routing() + if ip_config: + lines = ip_config.split('\n') + for line in lines: + line = line.strip() + line_attr = line.split(':') + if len(line_attr) == 2: + attr = line_attr[0].strip() + val = line_attr[1].strip() + protocols_config[attr] = val + for protocol, protocol_metadata in iteritems(self.PROTOCOL_MAPPING): + protocol_json_attr = protocol_metadata['name'] + val = protocols_config.get(protocol_json_attr, 'disabled') + if val not in ('enabled', 'disabled'): + val = 'enabled' + self._current_config[protocol] = val + + def generate_commands(self): + for protocol, req_val in iteritems(self._required_config): + protocol_metadata = self.PROTOCOL_MAPPING[protocol] + curr_val = self._current_config.get(protocol, 'disabled') + if curr_val != req_val: + if req_val == 'disabled': + command = protocol_metadata['disable'] + else: + command = protocol_metadata['enable'] + self._commands.append(command) + + +def main(): + """ main entry point for module execution + """ + OnyxProtocolModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_ptp_global.py b/plugins/modules/network/onyx/onyx_ptp_global.py new file mode 100644 index 0000000000..3b41ab10e5 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_ptp_global.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_ptp_global +author: "Anas Badaha (@anasb)" +short_description: Configures PTP Global parameters +description: + - This module provides declarative management of PTP Global configuration + on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.8130 + ptp and ntp protocols cannot be enabled at the same time +options: + ptp_state: + description: + - PTP state. + choices: ['enabled', 'disabled'] + default: enabled + ntp_state: + description: + - NTP state. + choices: ['enabled', 'disabled'] + domain: + description: + - "set PTP domain number Range 0-127" + primary_priority: + description: + - "set PTP primary priority Range 0-225" + secondary_priority: + description: + - "set PTP secondary priority Range 0-225" +''' + +EXAMPLES = """ +- name: configure PTP + onyx_ptp_global: + ntp_state: enabled + ptp_state: disabled + domain: 127 + primary_priority: 128 + secondary_priority: 128 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - no ntp enable + - protocol ptp + - ptp domain 127 +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxPtpGlobalModule(BaseOnyxModule): + + def init_module(self): + """ initialize module + """ + element_spec = dict( + ntp_state=dict(choices=['enabled', 'disabled']), + ptp_state=dict(choices=['enabled', 'disabled'], default='enabled'), + domain=dict(type=int), + primary_priority=dict(type=int), + secondary_priority=dict(type=int) + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self._validate_param_values(self._required_config) + + def _validate_param_values(self, obj, param=None): + super(OnyxPtpGlobalModule, self).validate_param_values(obj, param) + if obj['ntp_state'] == 'enabled' and obj['ptp_state'] == 'enabled': + self._module.fail_json(msg='PTP State and NTP State Can not be enabled at the same time') + + def validate_domain(self, value): + if value and not 0 <= int(value) <= 127: + self._module.fail_json(msg='domain must be between 0 and 127') + + def validate_primary_priority(self, value): + if value and not 0 <= int(value) <= 255: + self._module.fail_json(msg='Primary Priority must be between 0 and 255') + + def validate_secondary_priority(self, value): + if value and not 0 <= int(value) <= 255: + self._module.fail_json(msg='Secondary Priority must be between 0 and 255') + + def _set_ntp_config(self, ntp_config): + ntp_config = ntp_config[0] + if not ntp_config: + return + ntp_state = ntp_config.get('NTP enabled') + if ntp_state == "yes": + self._current_config['ntp_state'] = "enabled" + else: + self._current_config['ntp_state'] = "disabled" + + def _set_ptp_config(self, ptp_config): + if ptp_config is None: + self._current_config['ptp_state'] = 'disabled' + else: + self._current_config['ptp_state'] = 'enabled' + self._current_config['domain'] = int(ptp_config['Domain']) + self._current_config['primary_priority'] = int(ptp_config['Priority1']) + self._current_config['secondary_priority'] = int(ptp_config['Priority2']) + + def _show_ntp_config(self): + cmd = "show ntp configured" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def _show_ptp_config(self): + cmd = "show ptp clock" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + + ntp_config = self._show_ntp_config() + self._set_ntp_config(ntp_config) + + ptp_config = self._show_ptp_config() + self._set_ptp_config(ptp_config) + + def generate_commands(self): + ntp_state = self._required_config.get("ntp_state") + if ntp_state == "enabled": + self._enable_ntp() + elif ntp_state == "disabled": + self._disable_ntp() + + ptp_state = self._required_config.get("ptp_state", "enabled") + if ptp_state == "enabled": + self._enable_ptp() + else: + self._disable_ptp() + + domain = self._required_config.get("domain") + if domain is not None: + curr_domain = self._current_config.get("domain") + if domain != curr_domain: + self._commands.append('ptp domain %d' % domain) + + primary_priority = self._required_config.get("primary_priority") + if primary_priority is not None: + curr_primary_priority = self._current_config.get("primary_priority") + if primary_priority != curr_primary_priority: + self._commands.append('ptp priority1 %d' % primary_priority) + + secondary_priority = self._required_config.get("secondary_priority") + if secondary_priority is not None: + curr_secondary_priority = self._current_config.get("secondary_priority") + if secondary_priority != curr_secondary_priority: + self._commands.append('ptp priority2 %d' % secondary_priority) + + def _enable_ptp(self): + curr_ptp_state = self._current_config['ptp_state'] + if curr_ptp_state == 'disabled': + self._commands.append('protocol ptp') + + def _disable_ptp(self): + curr_ptp_state = self._current_config['ptp_state'] + if curr_ptp_state == 'enabled': + self._commands.append('no protocol ptp') + + def _enable_ntp(self): + curr_ntp_state = self._current_config.get('ntp_state') + if curr_ntp_state == 'disabled': + self._commands.append('ntp enable') + + def _disable_ntp(self): + curr_ntp_state = self._current_config['ntp_state'] + if curr_ntp_state == 'enabled': + self._commands.append('no ntp enable') + + +def main(): + """ main entry point for module execution + """ + OnyxPtpGlobalModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_ptp_interface.py b/plugins/modules/network/onyx/onyx_ptp_interface.py new file mode 100644 index 0000000000..6d207ae28a --- /dev/null +++ b/plugins/modules/network/onyx/onyx_ptp_interface.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_ptp_interface +author: 'Anas Badaha (@anasb)' +short_description: 'Configures PTP on interface' +description: + - "This module provides declarative management of PTP interfaces configuration +on Mellanox ONYX network devices." +notes: + - 'Tested on ONYX 3.6.8130' + - 'PTP Protocol must be enabled on switch.' + - 'Interface must not be a switch port interface.' +options: + name: + description: + - 'ethernet or vlan interface name that we want to configure PTP on it' + required: true + state: + description: + - 'Enable/Disable PTP on Interface' + default: enabled + choices: + - enabled + - disabled + delay_request: + description: + - 'configure PTP delay request interval, Range 0-5' + announce_interval: + description: + - 'configure PTP announce setting for interval, Range -3-1' + announce_timeout: + description: + - 'configure PTP announce setting for timeout, Range 2-10' + sync_interval: + description: + - 'configure PTP sync interval, Range -7--1' +''' + +EXAMPLES = """ +- name: configure PTP interface + onyx_ptp_interface: + state: enabled + name: Eth1/1 + delay_request: 0 + announce_interval: -2 + announce_timeout: 3 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface ethernet 1/16 ptp enable + - interface ethernet 1/16 ptp delay-req interval 0 + - interface ethernet 1/16 ptp announce interval -1 +""" + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxPtpInterfaceModule(BaseOnyxModule): + IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$") + IF_VLAN_REGEX = re.compile(r"^Vlan (\d+)$") + + IF_TYPE_ETH = "ethernet" + IF_TYPE_VLAN = "vlan" + + IF_TYPE_MAP = { + IF_TYPE_ETH: IF_ETH_REGEX, + IF_TYPE_VLAN: IF_VLAN_REGEX + } + + RANGE_ATTR = { + "delay_request": (0, 5), + "announce_interval": (-3, -1), + "announce_timeout": (2, 10), + "sync_interval": (-7, -1) + } + + _interface_type = None + _interface_id = None + + def init_module(self): + """ initialize module + """ + element_spec = dict( + name=dict(required=True), + state=dict(choices=['enabled', 'disabled'], default='enabled'), + delay_request=dict(type=int), + announce_interval=dict(type=int), + announce_timeout=dict(type=int), + sync_interval=dict(type=int) + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + @classmethod + def _get_interface_type(cls, if_name): + if_type = None + if_id = None + for interface_type, interface_regex in iteritems(cls.IF_TYPE_MAP): + match = interface_regex.match(if_name) + if match: + if_type = interface_type + if_id = match.group(1) + break + return if_type, if_id + + def _set_if_type(self, module_params): + if_name = module_params['name'] + self._interface_type, self._interface_id = self._get_interface_type(if_name) + if not self._interface_id: + self._module.fail_json( + msg='unsupported interface name/type: %s' % if_name) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self._set_if_type(self._required_config) + self.validate_param_values(self._required_config) + + def _validate_attr_is_not_none(self, attr_name, attr_value): + if attr_value is not None: + self._module.fail_json(msg='Can not set %s value on switch while state is disabled' % attr_name) + + def validate_param_values(self, obj, param=None): + if obj['state'] == 'disabled': + for attr_name in self.RANGE_ATTR: + self._validate_attr_is_not_none(attr_name, obj[attr_name]) + super(OnyxPtpInterfaceModule, self).validate_param_values(obj, param) + + def _validate_range(self, value, attr_name): + min_value, max_value = self.RANGE_ATTR[attr_name] + if value and not min_value <= int(value) <= max_value: + self._module.fail_json(msg='%s value must be between %d and %d' % (attr_name, min_value, max_value)) + + def validate_delay_request(self, value): + self._validate_range(value, "delay_request") + + def validate_announce_interval(self, value): + self._validate_range(value, "announce_interval") + + def validate_announce_timeout(self, value): + self._validate_range(value, "announce_timeout") + + def validate_sync_interval(self, value): + self._validate_range(value, "sync_interval") + + def _set_ptp_interface_config(self, ptp_interface_config): + if ptp_interface_config is None: + self._current_config['state'] = 'disabled' + return + ptp_interface_config = ptp_interface_config[0] + self._current_config['state'] = 'enabled' + self._current_config['delay_request'] = int(ptp_interface_config['Delay request interval(log mean)']) + self._current_config['announce_interval'] = int(ptp_interface_config['Announce interval(log mean)']) + self._current_config['announce_timeout'] = int(ptp_interface_config['Announce receipt time out']) + self._current_config['sync_interval'] = int(ptp_interface_config['Sync interval(log mean)']) + + def _show_ptp_interface_config(self): + cmd = "show ptp interface %s %s" % (self._interface_type, self._interface_id) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + ptp_interface_config = self._show_ptp_interface_config() + self._set_ptp_interface_config(ptp_interface_config) + + def _generate_attr_command(self, attr_name, attr_cmd_name): + attr_val = self._required_config.get(attr_name) + if attr_val is not None: + curr_val = self._current_config.get(attr_name) + if attr_val != curr_val: + self._commands.append( + 'interface %s %s ptp %s %d' % (self._interface_type, self._interface_id, attr_cmd_name, attr_val)) + + def generate_commands(self): + state = self._required_config.get("state", "enabled") + self._gen_ptp_commands(state) + + self._generate_attr_command("delay_request", "delay-req interval") + self._generate_attr_command("announce_interval", "announce interval") + self._generate_attr_command("announce_timeout", "announce timeout") + self._generate_attr_command("sync_interval", "sync interval") + + def _add_if_ptp_cmd(self, req_state): + if req_state == 'enabled': + if_ptp_cmd = 'interface %s %s ptp enable' % (self._interface_type, self._interface_id) + else: + if_ptp_cmd = 'no interface %s %s ptp enable' % (self._interface_type, self._interface_id) + self._commands.append(if_ptp_cmd) + + def _gen_ptp_commands(self, req_state): + curr_state = self._current_config.get('state') + if curr_state != req_state: + self._add_if_ptp_cmd(req_state) + + +def main(): + """ main entry point for module execution + """ + OnyxPtpInterfaceModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_qos.py b/plugins/modules/network/onyx/onyx_qos.py new file mode 100644 index 0000000000..ee4efd04ac --- /dev/null +++ b/plugins/modules/network/onyx/onyx_qos.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_qos +author: "Anas Badaha (@anasb)" +short_description: Configures QoS +description: + - This module provides declarative management of Onyx QoS configuration + on Mellanox ONYX network devices. +notes: + - Tested on ONYX 3.6.8130 +options: + interfaces: + description: + - list of interfaces name. + required: true + trust: + description: + - trust type. + choices: ['L2', 'L3', 'both'] + default: L2 + rewrite_pcp: + description: + - rewrite with type pcp. + choices: ['enabled', 'disabled'] + default: disabled + rewrite_dscp: + description: + - rewrite with type dscp. + choices: ['enabled', 'disabled'] + default: disabled +''' + +EXAMPLES = """ +- name: configure QoS + onyx_QoS: + interfaces: + - Mpo7 + - Mpo7 + trust: L3 + rewrite_pcp: disabled + rewrite_dscp: enabled + +- name: configure QoS + onyx_QoS: + interfaces: + - Eth1/1 + - Eth1/2 + trust: both + rewrite_pcp: disabled + rewrite_dscp: enabled +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface ethernet 1/16 qos trust L3 + - interface mlag-port-channel 7 qos trust L3 + - interface port-channel 1 qos trust L3 + - interface mlag-port-channel 7 qos trust L2 + - interface mlag-port-channel 7 qos rewrite dscp + - interface ethernet 1/16 qos rewrite pcp + - interface ethernet 1/1 no qos rewrite pcp +""" + +import re +from ansible.module_utils.six import iteritems +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxQosModule(BaseOnyxModule): + TRUST_CMD = "interface {0} {1} qos trust {2}" + NO_REWRITE_PCP_CMD = "interface {0} {1} no qos rewrite pcp" + NO_REWRITE_DSCP_CMD = "interface {0} {1} no qos rewrite dscp" + REWRITE_PCP_CMD = "interface {0} {1} qos rewrite pcp" + REWRITE_DSCP_CMD = "interface {0} {1} qos rewrite dscp" + + REWRITE_PCP = "pcp" + REWRITE_DSCP = "dscp" + + IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$") + IF_PO_REGEX = re.compile(r"^Po(\d+)$") + MLAG_NAME_REGEX = re.compile(r"^Mpo(\d+)$") + + IF_TYPE_ETH = "ethernet" + PORT_CHANNEL = "port-channel" + MLAG_PORT_CHANNEL = "mlag-port-channel" + + IF_TYPE_MAP = { + IF_TYPE_ETH: IF_ETH_REGEX, + PORT_CHANNEL: IF_PO_REGEX, + MLAG_PORT_CHANNEL: MLAG_NAME_REGEX + } + + def init_module(self): + """ initialize module + """ + element_spec = dict( + interfaces=dict(type='list', required=True), + trust=dict(choices=['L2', 'L3', 'both'], default='L2'), + rewrite_pcp=dict(choices=['enabled', 'disabled'], default='disabled'), + rewrite_dscp=dict(choices=['enabled', 'disabled'], default='disabled') + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _get_interface_type(self, if_name): + if_type = None + if_id = None + for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP): + match = interface_regex.match(if_name) + if match: + if_type = interface_type + if_id = match.group(1) + break + return if_type, if_id + + def _set_interface_qos_config(self, interface_qos_config, interface, if_type, if_id): + interface_qos_config = interface_qos_config[0].get(interface) + trust = interface_qos_config[0].get("Trust mode") + rewrite_dscp = interface_qos_config[0].get("DSCP rewrite") + rewrite_pcp = interface_qos_config[0].get("PCP,DEI rewrite") + + self._current_config[interface] = dict(trust=trust, rewrite_dscp=rewrite_dscp, + rewrite_pcp=rewrite_pcp, if_type=if_type, if_id=if_id) + + def _show_interface_qos(self, if_type, interface): + cmd = "show qos interface {0} {1}".format(if_type, interface) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + for interface in self._required_config.get("interfaces"): + if_type, if_id = self._get_interface_type(interface) + if not if_id: + self._module.fail_json( + msg='unsupported interface: {0}'.format(interface)) + interface_qos_config = self._show_interface_qos(if_type, if_id) + if interface_qos_config is not None: + self._set_interface_qos_config(interface_qos_config, interface, if_type, if_id) + else: + self._module.fail_json( + msg='Interface {0} does not exist on switch'.format(interface)) + + def generate_commands(self): + trust = self._required_config.get("trust") + rewrite_pcp = self._required_config.get("rewrite_pcp") + rewrite_dscp = self._required_config.get("rewrite_dscp") + for interface in self._required_config.get("interfaces"): + ignored1, ignored2, current_trust, if_type, if_id = self._get_current_rewrite_config(interface) + self._add_interface_trust_cmds(if_type, if_id, interface, trust, current_trust) + self._add_interface_rewrite_cmds(if_type, if_id, interface, + rewrite_pcp, rewrite_dscp) + + def _get_current_rewrite_config(self, interface): + current_interface_qos_config = self._current_config.get(interface) + current_rewrite_pcp = current_interface_qos_config.get('rewrite_pcp') + current_rewrite_dscp = current_interface_qos_config.get('rewrite_dscp') + if_type = current_interface_qos_config.get("if_type") + if_id = current_interface_qos_config.get("if_id") + current_trust = current_interface_qos_config.get('trust') + + return current_rewrite_pcp, current_rewrite_dscp, current_trust, if_type, if_id + + def _add_interface_trust_cmds(self, if_type, if_id, interface, trust, current_trust): + + current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config( + interface) + + if trust == "L3" and trust != current_trust: + self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp) + self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust)) + elif trust == "L2" and trust != current_trust: + self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp) + self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust)) + elif trust == "both" and trust != current_trust: + self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp) + self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp) + self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust)) + + def _add_interface_rewrite_cmds(self, if_type, if_id, interface, rewrite_pcp, rewrite_dscp): + current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config( + interface) + + if rewrite_pcp == "enabled" and rewrite_pcp != current_rewrite_pcp: + self._commands.append(self.REWRITE_PCP_CMD.format(if_type, if_id)) + elif rewrite_pcp == "disabled" and rewrite_pcp != current_rewrite_pcp: + self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id)) + + if rewrite_dscp == "enabled" and rewrite_dscp != current_rewrite_dscp: + self._commands.append(self.REWRITE_DSCP_CMD.format(if_type, if_id)) + elif rewrite_dscp == "disabled" and rewrite_dscp != current_rewrite_dscp: + self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id)) + + def _add_no_rewrite_cmd(self, if_type, if_id, interface, rewrite_type, current_rewrite): + if rewrite_type == self.REWRITE_PCP and current_rewrite == "enabled": + self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id)) + self._current_config[interface]["rewrite_pcp"] = "disabled" + elif rewrite_type == self.REWRITE_DSCP and current_rewrite == "enabled": + self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id)) + self._current_config[interface]["rewrite_dscp"] = "disabled" + + +def main(): + """ main entry point for module execution + """ + OnyxQosModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_snmp.py b/plugins/modules/network/onyx/onyx_snmp.py new file mode 100644 index 0000000000..92019272cf --- /dev/null +++ b/plugins/modules/network/onyx/onyx_snmp.py @@ -0,0 +1,426 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_snmp +author: "Sara-Touqan (@sarato)" +short_description: Manages SNMP general configurations on Mellanox ONYX network devices +description: + - This module provides declarative management of SNMP + on Mellanox ONYX network devices. +options: + state_enabled: + description: + - Enables/Disables the state of the SNMP configuration. + type: bool + contact_name: + description: + - Sets the SNMP contact name. + type: str + location: + description: + - Sets the SNMP location. + type: str + communities_enabled: + description: + - Enables/Disables community-based authentication on the system. + type: bool + multi_communities_enabled: + description: + - Enables/Disables multiple communities to be configured. + type: bool + snmp_communities: + type: list + description: + - List of snmp communities + suboptions: + community_name: + description: + - Configures snmp community name. + required: true + type: str + community_type: + description: + - Add this community as either a read-only or read-write community. + choices: ['read-only', 'read-write'] + type: str + state: + description: + - Used to decide if you want to delete the given snmp community or not + choices: ['present', 'absent'] + type: str + notify_enabled: + description: + - Enables/Disables sending of SNMP notifications (traps and informs) from thee system. + type: bool + notify_port: + description: + - Sets the default port to which notifications are sent. + type: str + notify_community: + description: + - Sets the default community for SNMP v1 and v2c notifications sent to hosts which do not have a community override set. + type: str + notify_send_test: + description: + - Sends a test notification. + type: str + choices: ['yes','no'] + notify_event: + description: + - Specifys which events will be sent as SNMP notifications. + type: str + choices: ['asic-chip-down', 'dcbx-pfc-port-oper-state-trap', 'insufficient-power', 'mstp-new-bridge-root', + 'ospf-lsdb-approaching-overflow', 'sm-stop', 'user-logout', 'cli-line-executed', 'dcbx-pfc-port-peer-state-trap', + 'interface-down', 'mstp-new-root-port', 'ospf-lsdb-overflow', 'snmp-authtrap', 'xstp-new-root-bridge', + 'cpu-util-high', 'disk-io-high', 'interface-up', 'mstp-topology-change', 'ospf-nbr-state-change', + 'temperature-too-high', 'xstp-root-port-change', 'dcbx-ets-module-state-change', 'disk-space-low', + 'internal-bus-error', 'netusage-high', 'paging-high', 'topology_change', 'xstp-topology-change', + 'dcbx-ets-port-admin-state-trap', 'entity-state-change', 'internal-link-speed-mismatch', 'new_root', + 'power-redundancy-mismatch', 'unexpected-cluster-join', 'dcbx-ets-port-oper-state-trap', 'expected-shutdown', + 'liveness-failure', 'ospf-auth-fail', 'process-crash', 'unexpected-cluster-leave', 'dcbx-ets-port-peer-state-trap', + 'health-module-status', 'low-power', 'ospf-config-error', 'process-exit', 'unexpected-cluster-size', + 'dcbx-pfc-module-state-change', 'insufficient-fans', 'low-power-recover', 'ospf-if-rx-bad-packet', + 'sm-restart', 'unexpected-shutdown', 'dcbx-pfc-port-admin-state-trap', 'insufficient-fans-recover', 'memusage-high', + 'ospf-if-state-change', 'sm-start', 'user-login'] + engine_id_reset: + description: + - Sets SNMPv3 engineID to node unique value. + type: bool + snmp_permissions: + type: list + description: + - Allow SNMPSET requests for items in a MIB. + suboptions: + state_enabled: + description: + - Enables/Disables the request. + required: true + type: bool + permission_type: + description: + - Configures the request type. + choices: ['MELLANOX-CONFIG-DB-MIB', 'MELLANOX-EFM-MIB','MELLANOX-POWER-CYCLE','MELLANOX-SW-UPDATE','RFC1213-MIB'] + type: str +''' + +EXAMPLES = """ +- name: configure SNMP + onyx_snmp: + state_enabled: yes + contact_name: sara + location: Nablus + communities_enabled: no + multi_communities_enabled: no + notify_enabled: yes + notify_port: 1 + notify_community: community_1 + notify_send_test: yes + notify_event: temperature-too-high + snmp_communities: + - community_name: public + community_type: read-only + state: absent + snmp_permissions: + - state_enabled: yes + permission_type: MELLANOX-CONFIG-DB-MIB +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - snmp-server enable + - no snmp-server enable + - snmp-server location + - snmp-server contact + - snmp-server enable communities + - no snmp-server enable communities + - snmp-server enable mult-communities + - no snmp-server enable mult-communities + - snmp-server enable notify + - snmp-server notify port + - snmp-server notify community + - snmp-server notify send-test + - snmp-server notify event + - snmp-server enable set-permission + - no snmp-server enable set-permission + - snmp-server community + - no snmp-server community . + - snmp-server engineID reset. +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxSNMPModule(BaseOnyxModule): + + def init_module(self): + """ module initialization + """ + + community_spec = dict(community_name=dict(required=True), + community_type=dict(choices=['read-only', 'read-write']), + state=dict(choices=['present', 'absent'])) + + snmp_permission_spec = dict(state_enabled=dict(type='bool', required=True), + permission_type=dict(choices=['MELLANOX-CONFIG-DB-MIB', 'MELLANOX-EFM-MIB', 'MELLANOX-POWER-CYCLE', + 'MELLANOX-SW-UPDATE', 'RFC1213-MIB'])) + + event_choices = ['asic-chip-down', 'dcbx-pfc-port-oper-state-trap', 'insufficient-power', 'mstp-new-bridge-root', + 'ospf-lsdb-approaching-overflow', 'sm-stop', 'user-logout', 'cli-line-executed', 'dcbx-pfc-port-peer-state-trap', + 'interface-down', 'mstp-new-root-port', 'ospf-lsdb-overflow', 'snmp-authtrap', 'xstp-new-root-bridge', + 'cpu-util-high', 'disk-io-high', 'interface-up', 'mstp-topology-change', 'ospf-nbr-state-change', + 'temperature-too-high', 'xstp-root-port-change', 'dcbx-ets-module-state-change', 'disk-space-low', + 'internal-bus-error', 'netusage-high', 'paging-high', 'topology_change', 'xstp-topology-change', + 'dcbx-ets-port-admin-state-trap', 'entity-state-change', 'internal-link-speed-mismatch', 'new_root', + 'power-redundancy-mismatch', 'unexpected-cluster-join', 'dcbx-ets-port-oper-state-trap', 'expected-shutdown', + 'liveness-failure', 'ospf-auth-fail', 'process-crash', 'unexpected-cluster-leave', 'dcbx-ets-port-peer-state-trap', + 'health-module-status', 'low-power', 'ospf-config-error', 'process-exit', 'unexpected-cluster-size', + 'dcbx-pfc-module-state-change', 'insufficient-fans', 'low-power-recover', 'ospf-if-rx-bad-packet', + 'sm-restart', 'unexpected-shutdown', 'dcbx-pfc-port-admin-state-trap', 'insufficient-fans-recover', 'memusage-high', + 'ospf-if-state-change', 'sm-start', 'user-login'] + element_spec = dict( + state_enabled=dict(type='bool'), + contact_name=dict(type='str'), + location=dict(type='str'), + communities_enabled=dict(type='bool'), + multi_communities_enabled=dict(type='bool'), + snmp_communities=dict(type='list', elements='dict', options=community_spec), + notify_enabled=dict(type='bool'), + notify_port=dict(type='str'), + notify_community=dict(type='str'), + notify_send_test=dict(type='str', choices=['yes', 'no']), + notify_event=dict(type='str', choices=event_choices), + engine_id_reset=dict(type='bool'), + snmp_permissions=dict(type='list', elements='dict', options=snmp_permission_spec) + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _show_snmp_config(self): + show_cmds = [] + cmd = "show snmp" + show_cmds.append(show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)) + cmd = "show running-config | include snmp" + show_cmds.append(show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)) + return show_cmds + + def _set_snmp_config(self, all_snmp_config): + ro_communities_list = [] + rw_communities_list = [] + snmp_config = all_snmp_config[0] + if not snmp_config: + return + if snmp_config.get("SNMP enabled") == 'yes': + self._current_config['state_enabled'] = True + else: + self._current_config['state_enabled'] = False + self._current_config['contact_name'] = snmp_config.get("System contact") + self._current_config['location'] = snmp_config.get("System location") + curr_ro_comm = snmp_config.get("Read-only community") + if curr_ro_comm: + ro_arr = curr_ro_comm.split(' ') + rw_arr = snmp_config.get("Read-write community").split(' ') + ro_communities_list = ro_arr[0] + rw_communities_list = rw_arr[0] + if (len(ro_arr) == 2): + self._current_config['communities_enabled'] = False + else: + self._current_config['communities_enabled'] = True + else: + read_only_communities = all_snmp_config[1] + read_write_communities = all_snmp_config[2] + if not read_only_communities: + return + read_only_comm = read_only_communities.get("Read-only communities") + if read_only_comm: + self._current_config['communities_enabled'] = True + ro_communities_list = read_only_comm[0].get("Lines") + else: + self._current_config['communities_enabled'] = False + ro_comm_disabled = read_only_communities.get("Read-only communities (DISABLED)") + if ro_comm_disabled: + ro_communities_list = ro_comm_disabled[0].get("Lines") + if not read_write_communities: + return + read_write_comm = read_write_communities.get("Read-write communities") + if read_write_comm: + self._current_config['communities_enabled'] = True + rw_communities_list = read_write_comm[0].get("Lines") + else: + self._current_config['communities_enabled'] = False + rw_comm_disabled = read_write_communities.get("Read-write communities (DISABLED)") + if rw_comm_disabled: + rw_communities_list = rw_comm_disabled[0].get("Lines") + self._current_config['ro_communities_list'] = ro_communities_list + self._current_config['rw_communities_list'] = rw_communities_list + + def _set_snmp_running_config(self, snmp_running_config): + self._current_config['multi_comm_enabled'] = True + self._current_config['notify_enabled'] = True + curr_config_arr = [] + snmp_lines = snmp_running_config.get('Lines') + for runn_config in snmp_lines: + curr_config_arr.append(runn_config.strip()) + if 'no snmp-server enable mult-communities' in snmp_lines: + self._current_config['multi_comm_enabled'] = False + if 'no snmp-server enable notify' in snmp_lines: + self._current_config['notify_enabled'] = False + self._current_config['snmp_running_config'] = curr_config_arr + + def load_current_config(self): + self._current_config = dict() + snmp_config = self._show_snmp_config() + if snmp_config[0]: + self._set_snmp_config(snmp_config[0]) + if snmp_config[1]: + self._set_snmp_running_config(snmp_config[1]) + + def generate_commands(self): + current_state = self._current_config.get("state_enabled") + state = current_state + req_state = self._required_config.get("state_enabled") + if req_state is not None: + state = req_state + if state is not None: + if current_state != state: + if state is True: + self._commands.append('snmp-server enable') + else: + self._commands.append('no snmp-server enable') + + contact_name = self._required_config.get("contact_name") + if contact_name: + current_contact_name = self._current_config.get("contact_name") + if contact_name is not None: + if current_contact_name != contact_name: + self._commands.append('snmp-server contact {0}' .format(contact_name)) + + location = self._required_config.get("location") + if location: + current_location = self._current_config.get("location") + if location is not None: + if current_location != location: + self._commands.append('snmp-server location {0}' .format(location)) + + communities_enabled = self._required_config.get("communities_enabled") + if communities_enabled is not None: + current_communities_enabled = self._current_config.get("communities_enabled") + if communities_enabled is not None: + if current_communities_enabled != communities_enabled: + if communities_enabled is True: + self._commands.append('snmp-server enable communities') + else: + self._commands.append('no snmp-server enable communities') + + ro_communities = self._current_config.get("ro_communities_list") + rw_communities = self._current_config.get("rw_communities_list") + snmp_communities = self._required_config.get("snmp_communities") + if snmp_communities: + if snmp_communities is not None: + for community in snmp_communities: + community_name = community.get("community_name") + state = community.get("state") + if state: + if state == 'absent': + self._commands.append('no snmp-server community {0}' .format(community_name)) + continue + community_type = community.get("community_type") + if community_type: + if community_type == 'read-only': + if community_name not in ro_communities: + self._commands.append('snmp-server community {0} ro' .format(community_name)) + else: + if community_name not in rw_communities: + self._commands.append('snmp-server community {0} rw' .format(community_name)) + else: + if community_name not in ro_communities: + self._commands.append('snmp-server community {0}' .format(community_name)) + + engine_id_reset = self._required_config.get("engine_id_reset") + if engine_id_reset is not None: + if engine_id_reset: + self._commands.append('snmp-server engineID reset') + + current_multi_comm_state = self._current_config.get("multi_comm_enabled") + multi_communities_enabled = self._required_config.get("multi_communities_enabled") + if multi_communities_enabled is not None: + if current_multi_comm_state != multi_communities_enabled: + if multi_communities_enabled is True: + self._commands.append('snmp-server enable mult-communities') + else: + self._commands.append('no snmp-server enable mult-communities') + + notify_enabled = self._required_config.get("notify_enabled") + if notify_enabled is not None: + current_notify_state = self._current_config.get("notify_enabled") + if current_notify_state != notify_enabled: + if notify_enabled is True: + self._commands.append('snmp-server enable notify') + else: + self._commands.append('no snmp-server enable notify') + + snmp_permissions = self._required_config.get("snmp_permissions") + if snmp_permissions is not None: + for permission in snmp_permissions: + permission_type = permission.get('permission_type') + if permission.get('state_enabled') is True: + self._commands.append('snmp-server enable set-permission {0}' .format(permission_type)) + else: + self._commands.append('no snmp-server enable set-permission {0}' .format(permission_type)) + + snmp_running_config = self._current_config.get("snmp_running_config") + notify_port = self._required_config.get("notify_port") + if notify_port is not None: + notified_cmd = 'snmp-server notify port {0}' .format(notify_port) + if notified_cmd not in snmp_running_config: + self._commands.append('snmp-server notify port {0}' .format(notify_port)) + + notify_community = self._required_config.get("notify_community") + if notify_community is not None: + notified_cmd = 'snmp-server notify community {0}' .format(notify_community) + if notified_cmd not in snmp_running_config: + self._commands.append('snmp-server notify community {0}' .format(notify_community)) + + notify_send_test = self._required_config.get("notify_send_test") + if notify_send_test is not None: + if notify_send_test == 'yes': + self._commands.append('snmp-server notify send-test') + + notify_event = self._required_config.get("notify_event") + if notify_event is not None: + self._commands.append('snmp-server notify event {0}' .format(notify_event)) + + +def main(): + """ main entry point for module execution + """ + OnyxSNMPModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_snmp_hosts.py b/plugins/modules/network/onyx/onyx_snmp_hosts.py new file mode 100644 index 0000000000..81e49b2525 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_snmp_hosts.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_snmp_hosts +author: "Sara Touqan (@sarato)" +short_description: Configures SNMP host parameters +description: + - This module provides declarative management of SNMP hosts protocol params + on Mellanox ONYX network devices. +options: + hosts: + type: list + description: + - List of snmp hosts + suboptions: + name: + description: + - Specifies the name of the host. + required: true + type: str + enabled: + description: + - Temporarily Enables/Disables sending of all notifications to this host. + type: bool + notification_type: + description: + - Configures the type of sending notification to the specified host. + choices: ['trap', 'inform'] + type: str + port: + description: + - Overrides default target port for this host. + type: str + version: + description: + - Specifys SNMP version of informs to send. + choices: ['1', '2c', '3'] + type: str + user_name: + description: + - Specifys username for this inform sink. + type: str + auth_type: + description: + - Configures SNMP v3 security parameters, specifying passwords in a nother parameter (auth_password) (passwords are always stored encrypted). + choices: ['md5', 'sha', 'sha224', 'sha256', 'sha384', 'sha512'] + type: str + auth_password: + description: + - The password needed to configure the auth type. + type: str + privacy_type: + description: + - Specifys SNMP v3 privacy settings for this user. + choices: ['3des', 'aes-128', 'aes-192', 'aes-192-cfb', 'aes-256', 'aes-256-cfb', 'des'] + type: str + privacy_password: + description: + - The password needed to configure the privacy type. + type: str + state: + description: + - Used to decide if you want to delete the specified host or not. + choices: ['present' , 'absent'] + type: str +''' + +EXAMPLES = """ +- name: enables snmp host + onyx_snmp_hosts: + hosts: + - name: 1.1.1.1 + enabled: true + +- name: configures snmp host with version 2c + onyx_snmp_hosts: + hosts: + - name: 2.3.2.4 + enabled: true + notification_type: trap + port: 66 + version: 2c + +- name: configures snmp host with version 3 and configures it with user as sara + onyx_snmp_hosts: + hosts: + - name: 2.3.2.4 + enabled: true + notification_type: trap + port: 66 + version: 3 + user_name: sara + auth_type: sha + auth_password: jnbdfijbdsf + privacy_type: 3des + privacy_password: nojfd8uherwiugfh + +- name: deletes the snmp host + onyx_snmp_hosts: + hosts: + - name: 2.3.2.4 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - snmp-server host disable + - no snmp-server host disable + - snmp-server host informs port version + - snmp-server host traps port version + - snmp-server host informs port version user auth + priv + - snmp-server host traps port version user auth + priv + - no snmp-server host . +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxSNMPHostsModule(BaseOnyxModule): + + def init_module(self): + """ initialize module + """ + host_spec = dict(name=dict(required=True), + enabled=dict(type='bool'), + notification_type=dict(type='str', choices=['trap', 'inform']), + port=dict(type='str'), + version=dict(type='str', choices=['1', '2c', '3']), + user_name=dict(type='str'), + auth_type=dict(type='str', choices=['md5', 'sha', 'sha224', 'sha256', 'sha384', 'sha512']), + privacy_type=dict(type='str', choices=['3des', 'aes-128', 'aes-192', 'aes-192-cfb', 'aes-256', 'aes-256-cfb', 'des']), + privacy_password=dict(type='str', no_log=True), + auth_password=dict(type='str', no_log=True), + state=dict(type='str', choices=['present', 'absent']) + ) + element_spec = dict( + hosts=dict(type='list', elements='dict', options=host_spec), + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def validate_snmp_required_params(self): + req_hosts = self._required_config.get("hosts") + if req_hosts: + for host in req_hosts: + version = host.get('version') + if version: + if version == '3': + if host.get('user_name') is None or host.get('auth_type') is None or host.get('auth_password') is None: + self._module.fail_json(msg='user_name, auth_type and auth_password are required when version number is 3.') + + if host.get('notification_type') is not None: + if host.get('version') is None or host.get('port') is None: + self._module.fail_json(msg='port and version are required when notification_type is provided.') + + if host.get('auth_type') is not None: + if host.get('auth_password') is None: + self._module.fail_json(msg='auth_password is required when auth_type is provided.') + + if host.get('privacy_type') is not None: + if host.get('privacy_password') is None: + self._module.fail_json(msg='privacy_password is required when privacy_type is provided.') + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + self.validate_snmp_required_params() + + def _set_host_config(self, hosts_config): + hosts = hosts_config.get('Notification sinks') + if hosts[0].get('Lines'): + self._current_config['current_hosts'] = dict() + self._current_config['host_names'] = [] + return + + current_hosts = dict() + host_names = [] + for host in hosts: + host_info = dict() + for host_name in host: + host_names.append(host_name) + enabled = True + first_entry = host.get(host_name)[0] + if first_entry: + if first_entry.get('Enabled') == 'no': + enabled = False + notification_type = first_entry.get('Notification type') + notification_type = notification_type.split() + host_info['notification_type'] = notification_type[2] + version = notification_type[1][1:] + host_info['port'] = first_entry.get('Port') + host_info['name'] = host_name + host_info['enabled'] = enabled + host_info['version'] = version + if first_entry.get('Community') is None: + if len(first_entry) == 8: + host_info['user_name'] = first_entry.get('Username') + host_info['auth_type'] = first_entry.get('Authentication type') + host_info['privacy_type'] = first_entry.get('Privacy type') + elif len(host.get(host_name)) == 2: + second_entry = host.get(host_name)[1] + host_info['user_name'] = second_entry.get('Username') + host_info['auth_type'] = second_entry.get('Authentication type') + host_info['privacy_type'] = second_entry.get('Privacy type') + else: + host_info['user_name'] = '' + host_info['auth_type'] = '' + host_info['privacy_type'] = '' + else: + host_info['user_name'] = '' + host_info['auth_type'] = '' + host_info['privacy_type'] = '' + current_hosts[host_name] = host_info + self._current_config['current_hosts'] = current_hosts + self._current_config['host_names'] = host_names + + def _show_hosts_config(self): + cmd = "show snmp host" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + hosts_config = self._show_hosts_config() + if hosts_config[1]: + self._set_host_config(hosts_config[1]) + + def generate_snmp_commands_with_current_config(self, host): + host_id = host.get('name') + host_notification_type = host.get('notification_type') + host_enabled = host.get("enabled") + host_port = host.get('port') + host_version = host.get('version') + host_username = host.get('user_name') + host_auth_type = host.get('auth_type') + host_auth_pass = host.get('auth_password') + host_priv_type = host.get('privacy_type') + host_priv_pass = host.get('privacy_password') + present_state = host.get('state') + current_hosts = self._current_config.get("current_hosts") + current_entry = current_hosts.get(host_id) + if present_state is not None: + if present_state == 'absent': + self._commands.append('no snmp-server host {0}' .format(host_id)) + return + if host_enabled is not None: + if current_entry.get('enabled') != host_enabled: + if host_enabled is True: + self._commands.append('no snmp-server host {0} disable' .format(host_id)) + else: + self._commands.append('snmp-server host {0} disable' .format(host_id)) + if host_notification_type is not None: + current_port = current_entry.get('port') + current_version = current_entry.get('version') + current_priv_type = current_entry.get('privacy_type') + current_username = current_entry.get('user_name') + current_auth_type = current_entry.get('auth_type') + current_noti_type = current_entry.get('notification_type') + if host_port is not None: + if host_version is not None: + if host_version == '3': + if (host_priv_type is not None and host_priv_pass is not None): + if((current_noti_type != host_notification_type) or + ((current_port != host_port)) or + (current_version != host_version) or + (current_priv_type != host_priv_type) or + (current_username != host_username) or + (current_auth_type != host_auth_type)): + self._commands.append('snmp-server host {0} {1}s port {2} version {3} user {4} auth {5} {6} priv {7} {8}' + .format(host_id, host_notification_type, host_port, + host_version, host_username, host_auth_type, host_auth_pass, + host_priv_type, host_priv_pass)) + else: + if((current_noti_type != host_notification_type) or + ((current_port != host_port)) or + (current_version != host_version) or + (current_username != host_username) or + (current_auth_type != host_auth_type)): + self._commands.append('snmp-server host {0} {1}s port {2} version {3} user {4} auth {5} {6}' + .format(host_id, host_notification_type, + host_port, host_version, host_username, + host_auth_type, host_auth_pass)) + else: + if((current_noti_type != host_notification_type) or + ((current_port != host_port)) or + (current_version != host_version)): + self._commands.append('snmp-server host {0} {1}s port {2} version {3}' + .format(host_id, host_notification_type, + host_port, host_version)) + else: + if ((current_noti_type != host_notification_type) or + ((current_port != host_port))): + self._commands.append('snmp-server host {0} {1}s port {2}' + .format(host_id, host_notification_type, host_port)) + else: + if host_version is not None: + if host_version == '3': + if (host_priv_type is not None and host_priv_pass is not None): + if ((current_noti_type != host_notification_type) or + ((current_version != host_version)) or + (current_username != host_username) or + ((current_auth_type != host_auth_type)) or + (current_priv_type != host_priv_type)): + self._commands.append('snmp-server host {0} {1}s version {2} user {3} auth {4} {5} priv {6} {7}' + .format(host_id, host_notification_type, host_version, host_username, + host_auth_type, host_auth_pass, host_priv_type, host_priv_pass)) + + else: + if ((current_noti_type != host_notification_type) or + ((current_version != host_version)) or + (current_username != host_username) or + ((current_auth_type != host_auth_type))): + self._commands.append('snmp-server host {0} {1}s version {2} user {3} auth {4} {5}' + .format(host_id, host_notification_type, + host_version, host_username, host_auth_type, host_auth_pass)) + + else: + if ((current_noti_type != host_notification_type) or + ((current_version != host_version))): + self._commands.append('snmp-server host {0} {1}s version {2}' .format(host_id, + host_notification_type, host_version)) + + def generate_snmp_commands_without_current_config(self, host): + host_id = host.get('name') + host_notification_type = host.get('notification_type') + host_enabled = host.get("enabled") + host_port = host.get('port') + host_version = host.get('version') + host_username = host.get('user_name') + host_auth_type = host.get('auth_type') + host_auth_pass = host.get('auth_password') + host_priv_type = host.get('privacy_type') + host_priv_pass = host.get('privacy_password') + present_state = host.get('state') + present_state = host.get('state') + if present_state is not None: + if present_state == 'absent': + return + if host_enabled is not None: + if host_enabled is True: + self._commands.append('no snmp-server host {0} disable' .format(host_id)) + else: + self._commands.append('snmp-server host {0} disable' .format(host_id)) + + if host_notification_type is not None: + if host_port is not None: + if host_version is not None: + if host_version == '3': + if (host_priv_type is not None and host_priv_pass is not None): + self._commands.append('snmp-server host {0} {1}s port {2} version {3} user {4} auth {5} {6} priv {7} {8}' + .format(host_id, host_notification_type, host_port, host_version, host_username, + host_auth_type, host_auth_pass, host_priv_type, host_priv_pass)) + else: + self._commands.append('snmp-server host {0} {1}s port {2} version {3} user {4} auth {5} {6}' + .format(host_id, host_notification_type, host_port, host_version, host_username, + host_auth_type, host_auth_pass)) + else: + self._commands.append('snmp-server host {0} {1}s port {2} version {3}' .format(host_id, + host_notification_type, host_port, host_version)) + else: + self._commands.append('snmp-server host {0} {1}s port {2}' .format(host_id, + host_notification_type, host_port)) + else: + if host_version is not None: + if host_version == '3': + if (host_priv_type is not None and host_priv_pass is not None): + self._commands.append('snmp-server host {0} {1}s version {2} user {3} auth {4} {5} priv {6} {7}' + .format(host_id, host_notification_type, host_version, host_username, + host_auth_type, host_auth_pass, host_priv_type, host_priv_pass)) + else: + self._commands.append('snmp-server host {0} {1}s version {2} user {3} auth {4} {5}' .format(host_id, + host_notification_type, host_version, host_username, + host_auth_type, host_auth_pass)) + else: + self._commands.append('snmp-server host {0} {1}s version {2}' .format(host_id, + host_notification_type, host_version)) + + def generate_commands(self): + req_hosts = self._required_config.get("hosts") + host_names = self._current_config.get("host_names") + + if req_hosts: + for host in req_hosts: + host_id = host.get('name') + if host_id: + if host_names and (host_id in host_names): + self.generate_snmp_commands_with_current_config(host) + else: + self.generate_snmp_commands_without_current_config(host) + + +def main(): + """ main entry point for module execution + """ + OnyxSNMPHostsModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_snmp_users.py b/plugins/modules/network/onyx/onyx_snmp_users.py new file mode 100644 index 0000000000..c4c0c19b6f --- /dev/null +++ b/plugins/modules/network/onyx/onyx_snmp_users.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_snmp_users +author: "Sara Touqan (@sarato)" +short_description: Configures SNMP User parameters +description: + - This module provides declarative management of SNMP Users protocol params + on Mellanox ONYX network devices. +options: + users: + type: list + description: + - List of snmp users + suboptions: + name: + description: + - Specifies the name of the user. + required: true + type: str + enabled: + description: + - Enables/Disables SNMP v3 access for the user. + type: bool + set_access_enabled: + description: + - Enables/Disables SNMP SET requests for the user. + type: bool + require_privacy: + description: + - Enables/Disables the Require privacy (encryption) for requests from this user + type: bool + auth_type: + description: + - Configures the hash type used to configure SNMP v3 security parameters. + choices: ['md5', 'sha', 'sha224', 'sha256', 'sha384', 'sha512'] + type: str + auth_password: + description: + - The password needed to configure the hash type. + type: str + capability_level: + description: + - Sets capability level for SET requests. + choices: ['admin','monitor','unpriv','v_admin'] + type: str +''' + +EXAMPLES = """ +- name: enables snmp user + onyx_snmp_users: + users: + - name: sara + enabled: true + +- name: enables snmp set requests + onyx_snmp_users: + users: + - name: sara + set_access_enabled: yes + +- name: enables user require privacy + onyx_snmp_users: + users: + - name: sara + require_privacy: true + +- name: configures user hash type + onyx_snmp_users: + users: + - auth_type: md5 + auth_password: 1297sara1234sara + +- name: configures user capability_level + onyx_snmp_users: + users: + - name: sara + capability_level: admin +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - snmp-server user v3 enable + - no snmp-server user v3 enable + - snmp-server user v3 enable sets + - no snmp-server user v3 enable sets + - snmp-server user v3 require-privacy + - no snmp-server user v3 require-privacy + - snmp-server user v3 capability + - snmp-server user v3 auth +""" + +import re + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxSNMPUsersModule(BaseOnyxModule): + + def init_module(self): + """ initialize module + """ + user_spec = dict(name=dict(required=True), + enabled=dict(type='bool'), + set_access_enabled=dict(type='bool'), + require_privacy=dict(type='bool'), + auth_type=dict(type='str', choices=['md5', 'sha', 'sha224', 'sha256', 'sha384', 'sha512']), + auth_password=dict(type='str'), + capability_level=dict(type='str', choices=['admin', 'monitor', 'unpriv', 'v_admin']), + ) + element_spec = dict( + users=dict(type='list', elements='dict', options=user_spec), + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _set_snmp_config(self, users_config): + if users_config[0]: + if users_config[0].get('Lines'): + return + current_users = [] + count = 0 + enabled = True + set_access_enabled = True + require_privacy = True + auth_type = '' + capability_level = '' + name = '' + all_users_names = [] + for user in users_config: + user_dict = {} + entry_dict = {} + for entry in user: + name = entry.split()[2] + if user.get(entry): + if user.get(entry)[0]: + enabled = user.get(entry)[0].get('Enabled overall') + if enabled == 'no': + enabled = False + else: + enabled = True + set_access_enabled = user.get(entry)[1].get('SET access')[0].get('Enabled') + if set_access_enabled == 'no': + set_access_enabled = False + else: + set_access_enabled = True + require_privacy = user.get(entry)[0].get('Require privacy') + if require_privacy == 'yes': + require_privacy = True + else: + require_privacy = False + capability_level = user.get(entry)[1].get('SET access')[0].get('Capability level') + auth_type = user.get(entry)[0].get('Authentication type') + user_dict['enabled'] = enabled + user_dict['set_access_enabled'] = set_access_enabled + user_dict['auth_type'] = auth_type + user_dict['capability_level'] = capability_level + user_dict['require_privacy'] = require_privacy + entry_dict[name] = user_dict + all_users_names.append(name) + current_users.append(entry_dict) + self._current_config['users'] = current_users + self._current_config['current_names'] = all_users_names + + def _show_users(self): + cmd = "show snmp user" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + users_config = self._show_users() + if users_config: + self._set_snmp_config(users_config) + + def generate_commands(self): + req_uers = self._required_config.get("users") + current_users = self._current_config.get("users") + current_names = self._current_config.get("current_names") + if req_uers: + for user in req_uers: + user_id = user.get('name') + if user_id: + if current_names and (user_id in current_names): + for user_entry in current_users: + for user_name in user_entry: + if user_name == user_id: + user_state = user.get("enabled") + user_entry_name = user_entry.get(user_name) + if user_state is not None: + if user_state != user_entry_name.get("enabled"): + if user_state is True: + self._commands.append('snmp-server user {0} v3 enable' .format(user_id)) + else: + self._commands.append('no snmp-server user {0} v3 enable' .format(user_id)) + set_state = user.get("set_access_enabled") + if set_state is not None: + if set_state != user_entry_name.get("set_access_enabled"): + if set_state is True: + self._commands.append('snmp-server user {0} v3 enable sets' .format(user_id)) + else: + self._commands.append('no snmp-server user {0} v3 enable sets' .format(user_id)) + auth_type = user.get("auth_type") + if auth_type is not None: + if user.get("auth_password") is not None: + if auth_type != user_entry_name.get("auth_type"): + self._commands.append('snmp-server user {0} v3 auth {1} {2}' + .format(user_id, user.get('auth_type'), user.get('auth_password'))) + cap_level = user.get("capability_level") + if cap_level is not None: + if cap_level != user_entry_name.get("capability_level"): + self._commands.append('snmp-server user {0} v3 capability {1}' + .format(user_id, user.get('capability_level'))) + req_priv = user.get("require_privacy") + if req_priv is not None: + if req_priv != user_entry_name.get("require_privacy"): + if req_priv is True: + self._commands.append('snmp-server user {0} v3 require-privacy' .format(user_id)) + else: + self._commands.append('no snmp-server user {0} v3 require-privacy' .format(user_id)) + + else: + user_state = user.get("enabled") + if user_state is not None: + if user_state is True: + self._commands.append('snmp-server user {0} v3 enable' .format(user_id)) + else: + self._commands.append('no snmp-server user {0} v3 enable' .format(user_id)) + set_state = user.get("set_access_enabled") + if set_state is not None: + if set_state is True: + self._commands.append('snmp-server user {0} v3 enable sets' .format(user_id)) + else: + self._commands.append('no snmp-server user {0} v3 enable sets' .format(user_id)) + if user.get("capability_level") is not None: + self._commands.append('snmp-server user {0} v3 capability {1}' .format(user_id, user.get('capability_level'))) + req_priv = user.get("require_privacy") + if req_priv is not None: + if req_priv is True: + self._commands.append('snmp-server user {0} v3 require-privacy' .format(user_id)) + else: + self._commands.append('no snmp-server user {0} v3 require-privacy' .format(user_id)) + + +def main(): + """ main entry point for module execution + """ + OnyxSNMPUsersModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_syslog_files.py b/plugins/modules/network/onyx/onyx_syslog_files.py new file mode 100644 index 0000000000..2cc974a2c2 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_syslog_files.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: onyx_syslog_files +author: "Anas Shami (@anass)" +short_description: Configure file management syslog module +description: + - This module provides declarative management of syslog + on Mellanox ONYX network devices. +notes: +options: + debug: + description: + - Configure settings for debug log files + type: bool + default: False + delete_group: + description: + - Delete certain log files + choices: ['current', 'oldest'] + type: str + rotation: + description: + - rotation related attributes + type: dict + suboptions: + frequency: + description: + - Rotate log files on a fixed time-based schedule + choices: ['daily', 'weekly', 'monthly'] + type: str + force: + description: + - force an immediate rotation of log files + type: bool + max_num: + description: + - Sepcify max_num of old log files to keep + type: int + size: + description: + - Rotate files when they pass max size + type: float + size_pct: + description: + - Rotatoe files when they pass percent of HD + type: float + upload_url: + description: + - upload local log files to remote host (ftp, scp, sftp, tftp) with format protocol://username[:password]@server/path + type: str + upload_file: + description: + - Upload compressed log file (current or filename) + type: str +''' + +EXAMPLES = """ +- name: syslog delete old files +- onyx_syslog_files: + delete_group: oldest +- name: syslog upload file +- onyx_syslog_files: + upload_url: scp://username:password@hostnamepath/filename + upload_file: current +- name: syslog rotation force, frequency and max number +- onyx_syslog_files: + rotation: + force: true + max_num: 30 + frequency: daily + size: 128 +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - logging files delete current + - logging files rotate criteria + - logging files upload current url +""" +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxSyslogFilesModule(BaseOnyxModule): + MAX_FILES = 999999 + URL_REGEX = re.compile( + r'^(ftp|scp|ftps):\/\/[a-z0-9\.]*:(.*)@(.*):([a-zA-Z\/\/])*$') + FREQUANCIES = ['daily', 'weekly', 'monthly'] + ROTATION_KEYS = ['frequency', 'max_num', 'size', 'size_pct', 'force'] + ROTATION_CMDS = {'size': 'logging {0} rotation criteria size {1}', + 'frequency': 'logging {0} rotation criteria frequency {1}', + 'max_num': 'logging {0} rotation max-num {1}', + 'size_pct': 'logging {0} rotation criteria size-pct {1}', + 'force': 'logging {0} rotation force'} + + def init_module(self): + """" Ansible module initialization + """ + rotation_spec = dict(frequency=dict(choices=self.FREQUANCIES), + max_num=dict(type="int"), + force=dict(type="bool"), + size=dict(type="float"), + size_pct=dict(type="float")) + + element_spec = dict(delete_group=dict(choices=['oldest', 'current']), + rotation=dict(type="dict", options=rotation_spec), + upload_file=dict(type="str"), + upload_url=dict(type="str"), + debug=dict(type="bool", default=False)) + + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=[['upload_file', 'upload_url']]) + + def validate_rotation(self, rotation): + size_pct = rotation.get('size_pct', None) + max_num = rotation.get('max_num', None) + if size_pct is not None and (float(size_pct) < 0 or float(size_pct) > 100): + self._module.fail_json( + msg='logging size_pct must be in range 0-100') + elif max_num is not None and (int(max_num) < 0 or int(max_num) > self.MAX_FILES): + self._module.fail_json( + msg='logging max_num must be positive number less than {0}'.format(self.MAX_FILES)) + + def validate_upload_url(self, upload_url): + check = self.URL_REGEX.match(upload_url) + if upload_url and not check: + self._module.fail_json( + msg='Invalid url, make sure that you use "[ftp, scp, tftp, sftp]://username:password@hostname:/location" format') + + def show_logging(self): + show_logging = show_cmd(self._module, "show logging", json_fmt=True, fail_on_error=False) + running_config = show_cmd(self._module, "show running-config | include .*logging.*debug-files.*", json_fmt=True, fail_on_error=False) + + if len(show_logging) > 0: + show_logging[0]['debug'] = running_config['Lines'] if 'Lines' in running_config else [] + else: + show_logging = [{ + 'debug': running_config['Lines'] if 'Lines' in running_config else [] + }] + return show_logging + + def load_current_config(self): + self._current_config = dict() + current_config = self.show_logging()[0] + freq = current_config.get('Log rotation frequency') # daily (Once per day at midnight) + size = current_config.get('Log rotation size threshold') # 19.07 megabytes or 10.000% of partition (987.84 megabytes) + max_num = current_config.get('Number of archived log files to keep') + if freq is not None: + freq_str = freq.split()[0] + self._current_config['frequency'] = freq_str + + if size is not None: + size_arr = size.split(' ') + if '%' in size: + size_pct_value = size_arr[0].replace('%', '') + self._current_config['size_pct'] = float(size_pct_value) + size_value = re.sub(r'(\(|\)|megabytes)', '', size_arr[-2]).strip() + self._current_config['size'] = float(size_value) + else: + size_value = size_arr[0] + self._current_config['size'] = float(size_value) + + if max_num is not None: + self._current_config['max_num'] = int(max_num) + + '''debug params''' + for line in current_config['debug']: + if 'size' in line: + self._current_config['debug_size'] = float(line.split(' ')[-1]) + elif 'frequency' in line: + self._current_config['debug_frequency'] = line.split(' ')[-1] + elif 'size-pct' in line: + self._current_config['debug_size_pct'] = float(line.split(' ')[-1]) + elif 'max-num' in line: + self._current_config['debug_max_num'] = int(line.split(' ')[-1]) + + def get_required_config(self): + self._required_config = dict() + required_config = dict() + module_params = self._module.params + + delete_group = module_params.get('delete_group') + upload_file = module_params.get('upload_file') + rotation = module_params.get('rotation') + if delete_group: + required_config['delete_group'] = delete_group + if upload_file: + required_config.update({'upload_file': upload_file, + 'upload_url': module_params.get('upload_url')}) + if rotation: + required_config['rotation'] = rotation + required_config['debug'] = module_params['debug'] + + self.validate_param_values(required_config) + self._required_config = required_config + + def generate_commands(self): + required_config = self._required_config + current_config = self._current_config + + logging_files_type = 'debug-files' if required_config['debug'] else 'files' + debug_prefix = 'debug_' if required_config['debug'] else '' + + rotation = required_config.get('rotation') + if rotation: + for key in rotation: + if rotation.get(key) and current_config.get(debug_prefix + key) != rotation.get(key): + cmd = self.ROTATION_CMDS[key].format(logging_files_type, rotation[key]) if key != 'force' else\ + self.ROTATION_CMDS[key].format(logging_files_type) + self._commands.append(cmd) + + delete_group = required_config.get('delete_group') + if delete_group: + self._commands.append('logging {0} delete {1}'.format(logging_files_type, + delete_group)) + + upload_file = required_config.get('upload_file') + if upload_file: + self._commands.append('logging {0} upload {1} {2}'.format(logging_files_type, + upload_file, required_config.get('upload_url'))) + + +def main(): + """ main entry point for module execution + """ + OnyxSyslogFilesModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_syslog_remote.py b/plugins/modules/network/onyx/onyx_syslog_remote.py new file mode 100644 index 0000000000..507d684252 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_syslog_remote.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: onyx_syslog_remote +author: "Anas Shami (@anass)" +short_description: Configure remote syslog module +description: + - This module provides declarative management of syslog + on Mellanox ONYX network devices. +notes: +options: + enabled: + description: + - Disable/Enable logging to given remote host + default: true + type: bool + host: + description: + - Send event logs to this server using the syslog protocol + required: true + type: str + port: + description: + - Set remote server destination port for log messages + type: int + trap: + description: + - Minimum severity level for messages to this syslog server + choices: ['none', 'debug', 'info', 'notice', 'alert', 'warning', 'err', 'emerg', 'crit'] + type: str + trap_override: + description: + - Override log levels for this sink on a per-class basis + type: list + suboptions: + override_class: + description: + - Specify a class whose log level to override + choices: ['mgmt-front', 'mgmt-back', 'mgmt-core', 'events', 'debug-module', 'sx-sdk', 'mlx-daemons', 'protocol-stack'] + required: True + type: str + override_priority: + description: + -Specify a priority whose log level to override + choices: ['none', 'debug', 'info', 'notice', 'alert', 'warning', 'err', 'emerg', 'crit'] + type: str + override_enabled: + description: + - disable override priorities for specific class. + default: True + type: bool + + filter: + description: + - Specify a filter type + choices: ['include', 'exclude'] + type: str + filter_str: + description: + - Specify a regex filter string + type: str +''' + +EXAMPLES = """ +- name: remote logging port 8080 +- onyx_syslog_remote: + host: 10.10.10.10 + port: 8080 + +- name: remote logging trap override +- onyx_syslog_remote: + host: 10.10.10.10 + trap_override: + - override_class: events + override_priority: emerg + +- name: remote logging trap emerg +- onyx_syslog_remote: + host: 10.10.10.10 + trap: emerg + +- name: remote logging filter include 'ERR' +- onyx_syslog_remote: + host: 10.10.10.10 + filter: include + filter_str: /ERR/ + +- name: disable remote logging with class events +- onyx_syslog_remote: + enabled: False + host: 10.10.10.10 + class: events +- name : disable remote logging +- onyx_syslog_remote: + enabled: False + host: 10.10.10.10 + +- name : enable/disable override class +- onyx_syslog_remote: + host: 10.7.144.71 + trap_override: + - override_class: events + override_priority: emerg + override_enabled: False + - override_class: mgmt-front + override_priority: alert +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - logging x port 8080 + - logging 10.10.10.10 trap override class events priority emerg + - no logging 10.10.10.10 trap override class events + - logging 10.10.10.10 trap emerg + - logging 10.10.10.10 filter [include | exclude] ERR +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxSyslogRemoteModule(BaseOnyxModule): + MAX_PORT = 65535 + LEVELS = ['none', 'debug', 'info', 'notice', 'alert', 'warning', 'err', 'emerg', 'crit'] + CLASSES = ['mgmt-front', 'mgmt-back', 'mgmt-core', 'events', 'debug-module', 'sx-sdk', 'mlx-daemons', 'protocol-stack'] + FILTER = ['include', 'exclude'] + + LOGGING_HOST = re.compile(r'^logging ([a-z0-9\.]+)$') + LOGGING_PORT = re.compile(r'^logging ([a-z0-9\.]+) port ([0-9]+)$') + LOGGING_TRAP = re.compile(r'^logging ([a-z0-9\.]+) trap ([a-z]+)$') + LOGGING_TRAP_OVERRIDE = re.compile(r'^logging ([a-z0-9\.]+) trap override class ([a-z\-]+) priority ([a-z]+)$') + LOGGING_FILTER = re.compile(r'^logging ([a-z0-9\.]+) filter (include|exclude) "([\D\d]+)"$') + + def init_module(self): + """" Ansible module initialization + """ + override_spec = dict(override_priority=dict(choices=self.LEVELS), + override_class=dict(choices=self.CLASSES, required=True), + override_enabled=dict(default=True, type="bool")) + + element_spec = dict(enabled=dict(type="bool", default=True), + host=dict(type="str", required=True), + port=dict(type="int"), + trap=dict(choices=self.LEVELS), + trap_override=dict(type="list", elements='dict', options=override_spec), + filter=dict(choices=self.FILTER), + filter_str=dict(type="str")) + + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=[ + ['filter', 'filter_str'] + ]) + + def validate_port(self, port): + if port and (port < 1 or port > self.MAX_PORT): + self._module.fail_json(msg='logging port must be between 1 and {0}'.format(self.MAX_PORT)) + + def show_logging(self): + # we can't use show logging it has lack of information + return show_cmd(self._module, "show running-config | include .*logging.*", json_fmt=False, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + current_config = self.show_logging().split('\n') + for line in current_config: + line = line.strip() + match = self.LOGGING_HOST.match(line) + if match: + host = match.group(1) + self._current_config[host] = dict() + continue + + match = self.LOGGING_PORT.match(line) + if match: + host = match.group(1) + port = int(match.group(2)) + if host in self._current_config: + self._current_config[host]['port'] = port + else: + self._current_config[host] = dict(port=port) + continue + + match = self.LOGGING_TRAP.match(line) + if match: + host = match.group(1) + trap = match.group(2) + host_config = self._current_config.get(host) + if host_config: + self._current_config[host]['trap'] = trap + else: + self._current_config[host] = dict(trap=trap) + continue + + match = self.LOGGING_TRAP_OVERRIDE.match(line) + if match: + host = match.group(1) + override_class = match.group(2) + override_priority = match.group(3) + host_config = self._current_config.get(host) + + if host_config: + if 'trap_override' in host_config: + self._current_config[host]['trap_override'].append(dict(override_class=override_class, override_priority=override_priority)) + else: + self._current_config[host]['trap_override'] = [dict(override_class=override_class, override_priority=override_priority)] + else: + self._current_config[host] = {'trap_override': [dict(override_class=override_class, override_priority=override_priority)]} + continue + + match = self.LOGGING_FILTER.match(line) + if match: + host = match.group(1) + filter_type = match.group(2) + filter_str = match.group(3) + if host in self._current_config: + self._current_config[host].update({'filter': filter_type, 'filter_str': filter_str}) + else: + self._current_config[host] = dict(filter=filter_type, filter_str=filter_str) + + def get_required_config(self): + self._required_config = dict() + required_config = dict() + module_params = self._module.params + port = module_params.get('port') + trap = module_params.get('trap') + trap_override = module_params.get('trap_override') + filtered = module_params.get('filter') + + required_config['host'] = module_params.get('host') + required_config['enabled'] = module_params.get('enabled') + + if port: + required_config['port'] = port + if trap: + required_config['trap'] = trap + if trap_override: + required_config['trap_override'] = trap_override + if filtered: + required_config['filter'] = filtered + required_config['filter_str'] = module_params.get('filter_str', '') + + self.validate_param_values(required_config) + self._required_config = required_config + + def generate_commands(self): + required_config = self._required_config + current_config = self._current_config + host = required_config.get('host') + enabled = required_config['enabled'] + ''' + cases: + if host in current config and current config != required config and its enable + if host in current config and its disable + if host in current and it has override_class with disable flag + ''' + host_config = current_config.get(host, dict()) + + if host in current_config and not enabled: + self._commands.append('no logging {0}'.format(host)) + else: + if host not in current_config: + self._commands.append('logging {0}'.format(host)) + if 'port' in required_config: + if required_config['port'] != host_config.get('port', None) or not host_config: + '''Edit/Create new one''' + self._commands.append('logging {0} port {1}'.format(host, required_config['port'])) + + if 'trap' in required_config or 'trap_override' in required_config: + trap_commands = self._get_trap_commands(host) + self._commands += trap_commands + + if 'filter' in required_config: + is_change = host_config.get('filter', None) != required_config['filter'] or \ + host_config.get('filter_str', None) != required_config['filter_str'] + if is_change or not host_config: + self._commands.append('logging {0} filter {1} {2}'.format(host, required_config['filter'], required_config['filter_str'])) + + ''' ********** private methods ********** ''' + def _get_trap_commands(self, host): + current_config = self._current_config + required_config = self._required_config + trap_commands = [] + host_config = current_config.get(host, dict()) + + override_list = required_config.get('trap_override') + if override_list: + current_override_list = host_config.get('trap_override', []) + + for override_trap in override_list: + override_class = override_trap.get('override_class') + override_priority = override_trap.get('override_priority') + override_enabled = override_trap.get('override_enabled') + found, found_class = False, False + for current_override in current_override_list: + if current_override.get('override_class') == override_class: + found_class = True + if not override_enabled: + break + if override_priority and current_override.get('override_priority') == override_priority: + found = True + break + + if override_enabled: + if not found and override_priority: + trap_commands.append('logging {0} trap override class {1} priority {2}'.format( + host, override_class, override_priority)) + elif found_class: # disabled option will use only class + trap_commands.append('no logging {0} trap override class {1}'.format( + host, override_class)) + + else: + if required_config['enabled']: # no disabled option for this, just override trap level can be disabled + trap = required_config.get('trap') + if trap and (trap != host_config.get('trap', None) or not host_config): + trap_commands.append('logging {0} trap {1}'.format( + host, trap)) + '''no disable for trap''' + + return trap_commands + + +def main(): + """ main entry point for module execution + """ + OnyxSyslogRemoteModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_traffic_class.py b/plugins/modules/network/onyx/onyx_traffic_class.py new file mode 100644 index 0000000000..cb2b7f4322 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_traffic_class.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_traffic_class +author: "Anas Badaha (@anasb)" +short_description: Configures Traffic Class +description: + - This module provides declarative management of Traffic Class configuration + on Mellanox ONYX network devices. +options: + state: + description: + - enable congestion control on interface. + choices: ['enabled', 'disabled'] + default: enabled + interfaces: + description: + - list of interfaces name. + required: true + tc: + description: + - traffic class, range 0-7. + required: true + congestion_control: + description: + - configure congestion control on interface. + suboptions: + control: + description: + - congestion control type. + choices: ['red', 'ecn', 'both'] + required: true + threshold_mode: + description: + - congestion control threshold mode. + choices: ['absolute', 'relative'] + required: true + min_threshold: + description: + - Set minimum-threshold value (in KBs) for marking traffic-class queue. + required: true + max_threshold: + description: + - Set maximum-threshold value (in KBs) for marking traffic-class queue. + required: true + dcb: + description: + - configure dcb control on interface. + suboptions: + mode: + description: + - dcb control mode. + choices: ['strict', 'wrr'] + required: true + weight: + description: + - Relevant only for wrr mode. +''' + +EXAMPLES = """ +- name: configure traffic class + onyx_traffic_class: + interfaces: + - Eth1/1 + - Eth1/2 + tc: 3 + congestion_control: + control: ecn + threshold_mode: absolute + min_threshold: 500 + max_threshold: 1500 + dcb: + mode: strict +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface ethernet 1/15 traffic-class 3 congestion-control ecn minimum-absolute 150 maximum-absolute 1500 + - interface ethernet 1/16 traffic-class 3 congestion-control ecn minimum-absolute 150 maximum-absolute 1500 + - interface mlag-port-channel 7 traffic-class 3 congestion-control ecn minimum-absolute 150 maximum-absolute 1500 + - interface port-channel 1 traffic-class 3 congestion-control ecn minimum-absolute 150 maximum-absolute 1500 + - interface ethernet 1/15 traffic-class 3 dcb ets strict + - interface ethernet 1/16 traffic-class 3 dcb ets strict + - interface mlag-port-channel 7 traffic-class 3 dcb ets strict + - interface port-channel 1 traffic-class 3 dcb ets strict +""" + +import re +from ansible.module_utils.six import iteritems +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxTrafficClassModule(BaseOnyxModule): + + IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$") + IF_PO_REGEX = re.compile(r"^Po(\d+)$") + MLAG_NAME_REGEX = re.compile(r"^Mpo(\d+)$") + + IF_TYPE_ETH = "ethernet" + PORT_CHANNEL = "port-channel" + MLAG_PORT_CHANNEL = "mlag-port-channel" + + IF_TYPE_MAP = { + IF_TYPE_ETH: IF_ETH_REGEX, + PORT_CHANNEL: IF_PO_REGEX, + MLAG_PORT_CHANNEL: MLAG_NAME_REGEX + } + + def init_module(self): + """ initialize module + """ + congestion_control_spec = dict(control=dict(choices=['red', 'ecn', 'both'], required=True), + threshold_mode=dict(choices=['absolute', 'relative'], required=True), + min_threshold=dict(type=int, required=True), + max_threshold=dict(type=int, required=True)) + + dcb_spec = dict(mode=dict(choices=['strict', 'wrr'], required=True), + weight=dict(type=int)) + + element_spec = dict( + interfaces=dict(type='list', required=True), + tc=dict(type=int, required=True), + congestion_control=dict(type='dict', options=congestion_control_spec), + dcb=dict(type='dict', options=dcb_spec), + state=dict(choices=['enabled', 'disabled'], default='enabled')) + + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def validate_tc(self, value): + if value and not 0 <= int(value) <= 7: + self._module.fail_json(msg='tc value must be between 0 and 7') + + def validate_param_values(self, obj, param=None): + dcb = obj.get("dcb") + if dcb is not None: + dcb_mode = dcb.get("mode") + weight = dcb.get("weight") + if dcb_mode == "wrr" and weight is None: + self._module.fail_json(msg='User should send weight attribute when dcb mode is wrr') + super(OnyxTrafficClassModule, self).validate_param_values(obj, param) + + def _get_interface_type(self, if_name): + if_type = None + if_id = None + for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP): + match = interface_regex.match(if_name) + if match: + if_type = interface_type + if_id = match.group(1) + break + return if_type, if_id + + def _set_interface_congestion_control_config(self, interface_congestion_control_config, + interface, if_type, if_id): + tc = self._required_config.get("tc") + interface_dcb_ets = self._show_interface_dcb_ets(if_type, if_id)[0].get(interface) + if interface_dcb_ets is None: + dcb = dict() + else: + ets_per_tc = interface_dcb_ets[2].get("ETS per TC") + tc_config = ets_per_tc[0].get(str(tc)) + dcb_mode = tc_config[0].get("S.Mode") + dcb_weight = int(tc_config[0].get("W")) + dcb = dict(mode=dcb_mode.lower(), weight=dcb_weight) + + interface_congestion_control_config = interface_congestion_control_config[tc + 1] + mode = interface_congestion_control_config.get("Mode") + if mode == "none": + self._current_config[interface] = dict(state="disabled", dcb=dcb, if_type=if_type, if_id=if_id) + return + + threshold_mode = interface_congestion_control_config.get("Threshold mode") + max_threshold = interface_congestion_control_config.get("Maximum threshold") + min_threshold = interface_congestion_control_config.get("Minimum threshold") + + if threshold_mode == "absolute": + delimiter = ' ' + else: + delimiter = '%' + min_value = int(min_threshold.split(delimiter)[0]) + max_malue = int(max_threshold.split(delimiter)[0]) + congestion_control = dict(control=mode.lower(), threshold_mode=threshold_mode, + min_threshold=min_value, max_threshold=max_malue) + + self._current_config[interface] = dict(state="enabled", congestion_control=congestion_control, + dcb=dcb, if_type=if_type, if_id=if_id) + + def _show_interface_congestion_control(self, if_type, interface): + cmd = "show interfaces {0} {1} congestion-control".format(if_type, interface) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def _show_interface_dcb_ets(self, if_type, interface): + cmd = "show dcb ets interface {0} {1}".format(if_type, interface) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + for interface in self._required_config.get("interfaces"): + if_type, if_id = self._get_interface_type(interface) + if not if_id: + self._module.fail_json( + msg='unsupported interface: {0}'.format(interface)) + interface_congestion_control_config = self._show_interface_congestion_control(if_type, if_id) + if interface_congestion_control_config is not None: + self._set_interface_congestion_control_config(interface_congestion_control_config, + interface, if_type, if_id) + else: + self._module.fail_json( + msg='Interface {0} does not exist on switch'.format(interface)) + + def generate_commands(self): + state = self._required_config.get("state") + tc = self._required_config.get("tc") + interfaces = self._required_config.get("interfaces") + for interface in interfaces: + current_interface = self._current_config.get(interface) + current_state = current_interface.get("state") + if_type = current_interface.get("if_type") + if_id = current_interface.get("if_id") + if state == "disabled": + if current_state == "enabled": + self._commands.append('interface {0} {1} no traffic-class {2} congestion-control'.format(if_type, if_id, tc)) + continue + + congestion_control = self._required_config.get("congestion_control") + + if congestion_control is not None: + control = congestion_control.get("control") + current_congestion_control = current_interface.get("congestion_control") + threshold_mode = congestion_control.get("threshold_mode") + min_threshold = congestion_control.get("min_threshold") + max_threshold = congestion_control.get("max_threshold") + if current_congestion_control is None: + self._threshold_mode_generate_cmds_mappers(threshold_mode, if_type, if_id, tc, + control, min_threshold, max_threshold) + else: + current_control = current_congestion_control.get("control") + curr_threshold_mode = current_congestion_control.get("threshold_mode") + curr_min_threshold = current_congestion_control.get("min_threshold") + curr_max_threshold = current_congestion_control.get("max_threshold") + + if control != current_control: + self._threshold_mode_generate_cmds_mappers(threshold_mode, if_type, if_id, tc, + control, min_threshold, max_threshold) + else: + if threshold_mode != curr_threshold_mode: + self._threshold_mode_generate_cmds_mappers(threshold_mode, if_type, if_id, tc, + control, min_threshold, max_threshold) + elif min_threshold != curr_min_threshold or max_threshold != curr_max_threshold: + self._threshold_mode_generate_cmds_mappers(threshold_mode, if_type, if_id, tc, + control, min_threshold, max_threshold) + + dcb = self._required_config.get("dcb") + if dcb is not None: + dcb_mode = dcb.get("mode") + current_dcb = current_interface.get("dcb") + current_dcb_mode = current_dcb.get("mode") + if dcb_mode == "strict" and dcb_mode != current_dcb_mode: + self._commands.append('interface {0} {1} traffic-class {2} ' + 'dcb ets {3}'.format(if_type, if_id, tc, dcb_mode)) + elif dcb_mode == "wrr": + weight = dcb.get("weight") + current_weight = current_dcb.get("weight") + if dcb_mode != current_dcb_mode or weight != current_weight: + self._commands.append('interface {0} {1} traffic-class {2} ' + 'dcb ets {3} {4}'.format(if_type, if_id, tc, dcb_mode, weight)) + + def _threshold_mode_generate_cmds_mappers(self, threshold_mode, if_type, if_id, tc, + control, min_threshold, max_threshold): + if threshold_mode == 'absolute': + self._generate_congestion_control_absolute_cmds(if_type, if_id, tc, control, + min_threshold, max_threshold) + else: + self._generate_congestion_control_relative_cmds(if_type, if_id, tc, control, + min_threshold, max_threshold) + + def _generate_congestion_control_absolute_cmds(self, if_type, if_id, tc, control, + min_absolute, max_absolute): + self._commands.append('interface {0} {1} traffic-class {2} ' + 'congestion-control {3} minimum-absolute {4} ' + 'maximum-absolute {5}'.format(if_type, if_id, tc, control, + min_absolute, max_absolute)) + + def _generate_congestion_control_relative_cmds(self, if_type, if_id, tc, control, + min_relative, max_relative): + self._commands.append('interface {0} {1} traffic-class {2} ' + 'congestion-control {3} minimum-relative {4} ' + 'maximum-relative {5}'.format(if_type, if_id, tc, control, + min_relative, max_relative)) + + +def main(): + """ main entry point for module execution + """ + OnyxTrafficClassModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_username.py b/plugins/modules/network/onyx/onyx_username.py new file mode 100644 index 0000000000..5395ebb61f --- /dev/null +++ b/plugins/modules/network/onyx/onyx_username.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_username +author: "Anas Shami (@anass)" +short_description: Configure username module +description: + - This module provides declarative management of users/roles + on Mellanox ONYX network devices. +notes: +options: + username: + description: + - Create/Edit user using username + type: str + required: True + full_name: + description: + - Set the full name of this user + type: str + nopassword: + description: + - Clear password for such user + type: bool + default: False + password: + description: + - Set password fot such user + type: str + encrypted_password: + description: + - Decide the type of setted password (plain text or encrypted) + type: bool + default: False + capability: + description: + - Grant capability to this user account + type: str + choices: ['monitor', 'unpriv', 'v_admin', 'admin'] + reset_capability: + description: + - Reset capability to this user account + type: bool + default: False + disconnected: + description: + - Disconnect all sessions of this user + type: bool + default: False + disabled: + description: + - Disable means of logging into this account + type: str + choices: ['none', 'login', 'password', 'all'] + state: + description: + - Set state of the given account + default: present + type: str + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: create new user + onyx_username: + username: anass + +- name: set the user full-name + onyx_username: + username: anass + full_name: anasshami + +- name: set the user encrypted password + onyx_username: + username: anass + password: 12345 + encrypted_password: True + +- name: set the user capability + onyx_username: + username: anass + capability: monitor + +- name: reset the user capability + onyx_username: + username: anass + reset_capability: True + +- name: remove the user configuration + onyx_username: + username: anass + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - username * + - username * password * + - username * nopassword + - username * disable login + - username * capability admin + - no username * + - no username * disable + +""" +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule, show_cmd + + +class OnyxUsernameModule(BaseOnyxModule): + ACCOUNT_STATE = { + 'Account locked out': dict(disabled='all'), + 'No password required for login': dict(nopassword=True), + 'Local password login disabled': dict(disabled='password'), + 'Account disabled': dict(disabled='all') + } + ENCRYPTED_ID = 7 + + def init_module(self): + """ + module initialization + """ + element_spec = dict() + + argument_spec = dict(state=dict(choices=['absent', 'present'], default='present'), + username=dict(type='str', required=True), + disabled=dict(choices=['none', 'login', 'password', 'all']), + capability=dict(choices=['monitor', 'unpriv', 'v_admin', 'admin']), + nopassword=dict(type='bool', default=False), + password=dict(type='str', no_log=True), + encrypted_password=dict(type='bool', default=False), + reset_capability=dict(type="bool", default=False), + disconnected=dict(type='bool', default=False), + full_name=dict(type='str')) + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['password', 'nopassword']]) + + def get_required_config(self): + self._required_config = dict() + module_params = self._module.params + params = {} + ''' Requred/Default fields ''' + params['username'] = module_params.get('username') + params['state'] = module_params.get('state') + params['encrypted_password'] = module_params.get('encrypted_password') + params['reset_capability'] = module_params.get('reset_capability') + ''' Other fields ''' + for key, value in module_params.items(): + if value is not None: + params[key] = value + self.validate_param_values(params) + self._required_config = params + + def _get_username_config(self): + return show_cmd(self._module, "show usernames", json_fmt=True, fail_on_error=False) + + def _set_current_config(self, users_config): + ''' + users_config ex: + { + admin": [ + { + "CAPABILITY": "admin", + "ACCOUNT STATUS": "No password required for login", + "FULL NAME": "System Administrator" + } + ], + } + ''' + if not users_config: + return + current_config = self._current_config + for username, config in users_config.items(): + config_json = config[0] + current_config[username] = current_config.get(username, {}) + account_status = config_json.get('ACCOUNT STATUS') + status_value = self.ACCOUNT_STATE.get(account_status) + + if status_value is not None: + # None for enabled account with password account "Password set (SHA512 | MD5)" so we won't change any attribute here. + current_config[username].update(status_value) + current_config[username].update({ + 'capability': config_json.get('CAPABILITY'), + 'full_name': config_json.get('FULL NAME') + }) + + def load_current_config(self): + self._current_config = dict() + users_config = self._get_username_config() + self._set_current_config(users_config) + + def generate_commands(self): + current_config, required_config = self._current_config, self._required_config + username = required_config.get('username') + current_user = current_config.get(username) + if current_user is not None: + '''created account we just need to edit his attributes''' + full_name = required_config.get('full_name') + if full_name is not None and current_user.get('full_name') != full_name: + self._commands.append("username {0} full-name {1}".format(username, full_name)) + + disabled = required_config.get('disabled') + if disabled is not None and current_user.get('disabled') != disabled: + if disabled == 'none': + self._commands.append("no username {0} disable".format(username)) + elif disabled == 'all': + self._commands.append("username {0} disable".format(username)) + else: + self._commands.append("username {0} disable {1}".format(username, disabled)) + + state = required_config.get('state') + if state == 'absent': # this will remove the user + self._commands.append("no username {0}".format(username)) + + capability = required_config.get('capability') + if capability is not None and current_user.get('capability') != capability: + self._commands.append("username {0} capability {1}".format(username, capability)) + + reset_capability = required_config.get('reset_capability') + if reset_capability: + self._commands.append("no username {0} capability".format(username)) + + password = required_config.get('password') + if password is not None: + encrypted = required_config.get('encrypted_password') + if encrypted: + self._commands.append("username {0} password {1} {2}".format(username, self.ENCRYPTED_ID, password)) + else: + self._commands.append("username {0} password {1}".format(username, password)) + + nopassword = required_config.get('nopassword') + if nopassword and nopassword != current_user.get('nopassword', False): + self._commands.append("username {0} nopassword".format(username)) + + disconnected = required_config.get('disconnected') + if disconnected: + self._commands.append("username {0} disconnect".format(username)) + else: + '''create new account if we have valid inforamtion, just check for username, capability, full_name, password''' + + capability = required_config.get('capability') + password = required_config.get('password') + full_name = required_config.get('full_name') + if capability is not None or password is not None or full_name is not None: + if capability is not None: + self._commands.append("username {0} capability {1}".format(username, capability)) + + if password is not None: + encrypted = required_config.get('encrypted_password') + if encrypted: + self._commands.append("username {0} password {1} {2} ".format(username, self.ENCRYPTED_ID, password)) + else: + self._commands.append("username {0} password {1}".format(username, password)) + + if full_name is not None: + self._commands.append("username {0} full-name {1}".format(username, full_name)) + + else: + self._commands.append("username {0}".format(username)) + + +def main(): + """ main entry point for module execution + """ + OnyxUsernameModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_vlan.py b/plugins/modules/network/onyx/onyx_vlan.py new file mode 100644 index 0000000000..d0016fb4bb --- /dev/null +++ b/plugins/modules/network/onyx/onyx_vlan.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_vlan +author: "Samer Deeb (@samerd) Alex Tabachnik (@atabachnik)" +short_description: Manage VLANs on Mellanox ONYX network devices +description: + - This module provides declarative management of VLANs + on Mellanox ONYX network devices. +options: + name: + description: + - Name of the VLAN. + vlan_id: + description: + - ID of the VLAN. + aggregate: + description: List of VLANs definitions. + purge: + description: + - Purge VLANs not defined in the I(aggregate) parameter. + default: no + type: bool + state: + description: + - State of the VLAN configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: configure VLAN ID and name + onyx_vlan: + vlan_id: 20 + name: test-vlan + +- name: remove configuration + onyx_vlan: + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always. + type: list + sample: + - vlan 20 + - name test-vlan + - exit +""" + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec + +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd + + +class OnyxVlanModule(BaseOnyxModule): + _purge = False + + @classmethod + def _get_element_spec(cls): + return dict( + vlan_id=dict(type='int'), + name=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), + ) + + @classmethod + def _get_aggregate_spec(cls, element_spec): + aggregate_spec = deepcopy(element_spec) + aggregate_spec['vlan_id'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + return aggregate_spec + + def init_module(self): + """ module initialization + """ + element_spec = self._get_element_spec() + aggregate_spec = self._get_aggregate_spec(element_spec) + argument_spec = dict( + aggregate=dict(type='list', elements='dict', + options=aggregate_spec), + purge=dict(default=False, type='bool'), + ) + argument_spec.update(element_spec) + required_one_of = [['vlan_id', 'aggregate']] + mutually_exclusive = [['vlan_id', 'aggregate']] + self._module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + def validate_vlan_id(self, value): + if value and not 1 <= int(value) <= 4094: + self._module.fail_json(msg='vlan id must be between 1 and 4094') + + def get_required_config(self): + self._required_config = list() + module_params = self._module.params + aggregate = module_params.get('aggregate') + self._purge = module_params.get('purge', False) + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module_params[key] + self.validate_param_values(item, item) + req_item = item.copy() + req_item['vlan_id'] = int(req_item['vlan_id']) + self._required_config.append(req_item) + else: + params = { + 'vlan_id': module_params['vlan_id'], + 'name': module_params['name'], + 'state': module_params['state'], + } + self.validate_param_values(params) + self._required_config.append(params) + + def _create_vlan_data(self, vlan_id, vlan_data): + if self._os_version >= self.ONYX_API_VERSION: + vlan_data = vlan_data[0] + return { + 'vlan_id': vlan_id, + 'name': self.get_config_attr(vlan_data, 'Name') + } + + def _get_vlan_config(self): + return show_cmd(self._module, "show vlan") + + def load_current_config(self): + # called in base class in run function + self._os_version = self._get_os_version() + self._current_config = dict() + vlan_config = self._get_vlan_config() + if not vlan_config: + return + for vlan_id, vlan_data in iteritems(vlan_config): + try: + vlan_id = int(vlan_id) + except ValueError: + continue + self._current_config[vlan_id] = \ + self._create_vlan_data(vlan_id, vlan_data) + + def generate_commands(self): + req_vlans = set() + for req_conf in self._required_config: + state = req_conf['state'] + vlan_id = req_conf['vlan_id'] + if state == 'absent': + if vlan_id in self._current_config: + self._commands.append('no vlan %s' % vlan_id) + else: + req_vlans.add(vlan_id) + self._generate_vlan_commands(vlan_id, req_conf) + if self._purge: + for vlan_id in self._current_config: + if vlan_id not in req_vlans: + self._commands.append('no vlan %s' % vlan_id) + + def _generate_vlan_commands(self, vlan_id, req_conf): + curr_vlan = self._current_config.get(vlan_id, {}) + if not curr_vlan: + self._commands.append("vlan %s" % vlan_id) + self._commands.append("exit") + req_name = req_conf['name'] + curr_name = curr_vlan.get('name') + if req_name: + if req_name != curr_name: + self._commands.append("vlan %s name %s" % (vlan_id, req_name)) + elif req_name is not None: + if curr_name: + self._commands.append("vlan %s no name" % vlan_id) + + +def main(): + """ main entry point for module execution + """ + OnyxVlanModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_vxlan.py b/plugins/modules/network/onyx/onyx_vxlan.py new file mode 100644 index 0000000000..0fc406aabf --- /dev/null +++ b/plugins/modules/network/onyx/onyx_vxlan.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_vxlan +author: "Anas Badaha (@anasb)" +short_description: Configures Vxlan +description: + - This module provides declarative management of Vxlan configuration + on Mellanox ONYX network devices. +notes: + - Tested on ONYX evpn_dev.031. + - nve protocol must be enabled. +options: + nve_id: + description: + - nve interface ID. + required: true + loopback_id: + description: + - loopback interface ID. + bgp: + description: + - configure bgp on nve interface. + type: bool + default: true + mlag_tunnel_ip: + description: + - vxlan Mlag tunnel IP + vni_vlan_list: + description: + - Each item in the list has two attributes vlan_id, vni_id. + arp_suppression: + description: + - A flag telling if to configure arp suppression. + type: bool + default: false +''' + +EXAMPLES = """ +- name: configure Vxlan + onyx_vxlan: + nve_id: 1 + loopback_id: 1 + bgp: yes + mlag-tunnel-ip: 100.0.0.1 + vni_vlan_list: + - vlan_id: 10 + vni_id: 10010 + - vlan_id: 6 + vni_id: 10060 + arp_suppression: yes +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - interface nve 1 + - interface nve 1 vxlan source interface loopback 1 + - interface nve 1 nve controller bgp + - interface nve 1 vxlan mlag-tunnel-ip 100.0.0.1 + - interface nve 1 nve vni 10010 vlan 10 + - interface nve 1 nve vni 10060 vlan 6 + - interface nve 1 nve neigh-suppression + - interface vlan 6 + - interface vlan 10 +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule + + +class OnyxVxlanModule(BaseOnyxModule): + + LOOPBACK_REGEX = re.compile(r'^loopback (\d+).*') + NVE_ID_REGEX = re.compile(r'^Interface NVE (\d+).*') + + def init_module(self): + """ initialize module + """ + vni_vlan_spec = dict(vlan_id=dict(type=int), + vni_id=dict(type=int)) + element_spec = dict( + nve_id=dict(type=int), + loopback_id=dict(type=int), + bgp=dict(default=True, type='bool'), + mlag_tunnel_ip=dict(type='str'), + vni_vlan_list=dict(type='list', + elements='dict', + options=vni_vlan_spec), + arp_suppression=dict(default=False, type='bool') + ) + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + def get_required_config(self): + module_params = self._module.params + self._required_config = dict(module_params) + self.validate_param_values(self._required_config) + + def _set_vxlan_config(self, vxlan_config): + vxlan_config = vxlan_config[0] + if not vxlan_config: + return + nve_header = vxlan_config.get("header") + match = self.NVE_ID_REGEX.match(nve_header) + if match: + current_nve_id = int(match.group(1)) + self._current_config['nve_id'] = current_nve_id + if int(current_nve_id) != self._required_config.get("nve_id"): + return + + self._current_config['mlag_tunnel_ip'] = vxlan_config.get("Mlag tunnel IP") + controller_mode = vxlan_config.get("Controller mode") + if controller_mode == "BGP": + self._current_config['bgp'] = True + else: + self._current_config['bgp'] = False + + loopback_str = vxlan_config.get("Source interface") + match = self.LOOPBACK_REGEX.match(loopback_str) + if match: + loopback_id = match.group(1) + self._current_config['loopback_id'] = int(loopback_id) + + self._current_config['global_neigh_suppression'] = vxlan_config.get("Global Neigh-Suppression") + + vni_vlan_mapping = self._current_config['vni_vlan_mapping'] = dict() + nve_detail = self._show_nve_detail() + + if nve_detail is not None: + nve_detail = nve_detail[0] + + if nve_detail: + for vlan_id in nve_detail: + vni_vlan_mapping[int(vlan_id)] = dict( + vni_id=int(nve_detail[vlan_id][0].get("VNI")), + arp_suppression=nve_detail[vlan_id][0].get("Neigh Suppression")) + + def _show_vxlan_config(self): + cmd = "show interfaces nve" + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def _show_nve_detail(self): + cmd = "show interface nve {0} detail".format(self._required_config.get("nve_id")) + return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) + + def load_current_config(self): + self._current_config = dict() + vxlan_config = self._show_vxlan_config() + if vxlan_config: + self._set_vxlan_config(vxlan_config) + + def generate_commands(self): + nve_id = self._required_config.get("nve_id") + current_nve_id = self._current_config.get("nve_id") + + if current_nve_id is None: + self._add_nve_commands(nve_id) + elif current_nve_id != nve_id: + self._add_no_nve_commands(current_nve_id) + self._add_nve_commands(nve_id) + + bgp = self._required_config.get("bgp") + if bgp is not None: + curr_bgp = self._current_config.get("bgp") + if bgp and bgp != curr_bgp: + self._commands.append('interface nve {0} nve controller bgp'.format(nve_id)) + + loopback_id = self._required_config.get("loopback_id") + if loopback_id is not None: + curr_loopback_id = self._current_config.get("loopback_id") + if loopback_id != curr_loopback_id: + self._commands.append('interface nve {0} vxlan source interface ' + 'loopback {1} '.format(nve_id, loopback_id)) + + mlag_tunnel_ip = self._required_config.get("mlag_tunnel_ip") + if mlag_tunnel_ip is not None: + curr_mlag_tunnel_ip = self._current_config.get("mlag_tunnel_ip") + if mlag_tunnel_ip != curr_mlag_tunnel_ip: + self._commands.append('interface nve {0} vxlan ' + 'mlag-tunnel-ip {1}'.format(nve_id, mlag_tunnel_ip)) + + vni_vlan_list = self._required_config.get("vni_vlan_list") + arp_suppression = self._required_config.get("arp_suppression") + if vni_vlan_list is not None: + self._generate_vni_vlan_cmds(vni_vlan_list, nve_id, arp_suppression) + + def _generate_vni_vlan_cmds(self, vni_vlan_list, nve_id, arp_suppression): + + current_global_arp_suppression = self._current_config.get('global_neigh_suppression') + if arp_suppression is True and current_global_arp_suppression != "Enable": + self._commands.append('interface nve {0} nve neigh-suppression'.format(nve_id)) + + current_vni_vlan_mapping = self._current_config.get('vni_vlan_mapping') + if current_vni_vlan_mapping is None: + for vni_vlan in vni_vlan_list: + vlan_id = vni_vlan.get("vlan_id") + vni_id = vni_vlan.get("vni_id") + self._add_vni_vlan_cmds(nve_id, vni_id, vlan_id) + self._add_arp_suppression_cmds(arp_suppression, vlan_id) + else: + for vni_vlan in vni_vlan_list: + vlan_id = vni_vlan.get("vlan_id") + vni_id = vni_vlan.get("vni_id") + + currt_vlan_id = current_vni_vlan_mapping.get(vlan_id) + + if currt_vlan_id is None: + self._add_vni_vlan_cmds(nve_id, vni_id, vlan_id) + self._add_arp_suppression_cmds(arp_suppression, vlan_id) + else: + current_vni_id = currt_vlan_id.get("vni_id") + current_arp_suppression = currt_vlan_id.get("arp_suppression") + + if int(current_vni_id) != vni_id: + self._add_vni_vlan_cmds(nve_id, vni_id, vlan_id) + + if current_arp_suppression == "Disable": + self._add_arp_suppression_cmds(arp_suppression, vlan_id) + + def _add_no_nve_commands(self, current_nve_id): + self._commands.append('no interface nve {0}'.format(current_nve_id)) + + def _add_nve_commands(self, nve_id): + self._commands.append('interface nve {0}'.format(nve_id)) + self._commands.append('exit') + + def _add_vni_vlan_cmds(self, nve_id, vni_id, vlan_id): + self._commands.append('interface nve {0} nve vni {1} ' + 'vlan {2}'.format(nve_id, vni_id, vlan_id)) + + def _add_arp_suppression_cmds(self, arp_suppression, vlan_id): + if arp_suppression is True: + self._commands.append('interface vlan {0}'.format(vlan_id)) + self._commands.append('exit') + + +def main(): + """ main entry point for module execution + """ + OnyxVxlanModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/onyx/onyx_wjh.py b/plugins/modules/network/onyx/onyx_wjh.py new file mode 100644 index 0000000000..32f7125584 --- /dev/null +++ b/plugins/modules/network/onyx/onyx_wjh.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: onyx_wjh +author: "Anas Shami (@anass)" +short_description: Configure what-just-happend module +description: + - This module provides declarative management of wjh + on Mellanox ONYX network devices. +notes: +options: + group: + description: + - Name of wjh group. + choices: ['all', 'forwarding', 'acl'] + type: str + enabled: + description: + - wjh group status + type: bool + auto_export: + description: + - wjh group auto export pcap file status + type: bool + export_group: + description: + - wjh group auto export group + choices: ['all', 'forwarding', 'acl'] + type: str + clear_group: + description: + - clear pcap file by group + choices: ['all', 'user', 'auto-export'] + type: str +''' + +EXAMPLES = """ +- name: enable wjh + onyx_wjh: + group: forwarding + enabled: True + +- name: disable wjh + onyx_wjh: + group: forwarding + enabled: False + +- name: enable auto-export + onyx_wjh: + auto_export: True + export_group: forwarding +- name: disable auto-export + onyx_wjh: + auto_export: False + export_group: forwarding +- name: clear pcap file + onyx_wjh: + clear_group: auto-export +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always + type: list + sample: + - what-just-happend forwarding enable + - what-just-happend auto-export forwarding enable + - clear what-just-happend pcap-file user +""" +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule, show_cmd + + +class OnyxWJHModule(BaseOnyxModule): + WJH_DISABLED_REGX = re.compile(r'^no what-just-happened ([a-z]+) enable.*') + WJH_DISABLED_AUTO_EXPORT_REGX = re.compile(r'^no what-just-happened auto-export ([a-z]+) enable.*') + + WJH_CMD_FMT = '{0}what-just-happened {1} enable' + WJH_EXPORT_CMD_FMT = '{0}what-just-happened auto-export {1} enable' + WJH_CLEAR_CMD_FMT = 'clear what-just-happened pcap-files {0}' + + WJH_GROUPS = ['all', 'forwarding', 'acl'] + CLEAR_GROUPS = ['all', 'user', 'auto-export'] + + def init_module(self): + """ + module initialization + """ + element_spec = dict(group=dict(choices=self.WJH_GROUPS), + enabled=dict(type='bool'), + auto_export=dict(type='bool'), + export_group=dict(choices=self.WJH_GROUPS), + clear_group=dict(choices=self.CLEAR_GROUPS)) + + argument_spec = dict() + argument_spec.update(element_spec) + self._module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=[ + ['group', 'enabled'], + ['auto_export', 'export_group'] + ]) + + def get_required_config(self): + self._required_config = dict() + module_params = self._module.params + group = module_params.get('group') + export_group = module_params.get('export_group') + clear_group = module_params.get('clear_group') + + params = dict() + if group: + enabled = module_params.get('enabled') + params.update({ + 'group': group, + 'enabled': enabled + }) + + if export_group: + auto_export = module_params.get('auto_export') + params.update({ + 'export_group': export_group, + 'auto_export': auto_export + }) + + if clear_group: + params.update({ + 'clear_group': clear_group + }) + + self.validate_param_values(params) + self._required_config = params + + def _get_wjh_config(self): + return show_cmd(self._module, "show running-config | include .*what-just-happened.*", json_fmt=False, fail_on_error=False) + + def _set_current_config(self, config): + if not config: + return + current_config = self._current_config + lines = config.split('\n') + for line in lines: + if line.startswith('#'): + continue + match = self.WJH_DISABLED_REGX.match(line) + if match: + # wjh is disabled + group = match.group(1) + current_config[group] = False + + match = self.WJH_DISABLED_AUTO_EXPORT_REGX.match(line) + if match: + # wjh auto export is disabled + export_group = match.group(1) + '_export' + current_config[export_group] = False + + ''' + show running config will contains [no wjh * group enable] if disabled - default config is enabled + ''' + def load_current_config(self): + self._current_config = dict() + config_lines = self._get_wjh_config() + if config_lines: + self._set_current_config(config_lines) + + def wjh_group_status(self, current_config, group_value, suffix=''): + current_enabled = False + if group_value == 'all': + # no disabled group so all would be false + current_enabled = not all([ + (group + suffix) in current_config for group in self.WJH_GROUPS]) + else: + # if no current-value its enabled + current_enabled = current_config[group_value + suffix] if((group_value + suffix) in current_config) else True + return current_enabled + + ''' + wjh is enabled "by default" + when wjh disable we will find no wjh commands in running config + ''' + def generate_commands(self): + current_config, required_config = self._current_config, self._required_config + group = required_config.get('group') + export_group = required_config.get('export_group') + clear_group = required_config.get('clear_group') + if group: + current_enabled = self.wjh_group_status(current_config, group) + if(required_config['enabled'] != current_enabled): + self._commands.append(self.WJH_CMD_FMT + .format(('' if required_config['enabled'] else 'no '), group)) + if export_group: + current_enabled = self.wjh_group_status(current_config, required_config['export_group'], '_export') + if(required_config['auto_export'] != current_enabled): + self._commands.append(self.WJH_EXPORT_CMD_FMT + .format(('' if required_config['auto_export'] else 'no '), export_group)) + if clear_group: + # clear pcap files + self._commands.append(self.WJH_CLEAR_CMD_FMT.format(clear_group)) + + +def main(): + """ main entry point for module execution + """ + OnyxWJHModule.main() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/opx/opx_cps.py b/plugins/modules/network/opx/opx_cps.py new file mode 100644 index 0000000000..7b80a75ffb --- /dev/null +++ b/plugins/modules/network/opx/opx_cps.py @@ -0,0 +1,393 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018 Dell Inc. or its subsidiaries. All Rights Reserved. +# +# This file is part of Ansible by Red Hat +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: opx_cps +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: CPS operations on networking device running Openswitch (OPX) +description: + - Executes the given operation on the YANG object, using CPS API in the + networking device running OpenSwitch (OPX). It uses the YANG models + provided in https://github.com/open-switch/opx-base-model. +options: + module_name: + description: + - Yang path to be configured. + attr_type: + description: + - Attribute Yang type. + attr_data: + description: + - Attribute Yang path and their corresponding data. + operation: + description: + - Operation to be performed on the object. + default: create + choices: ['delete', 'create', 'set', 'action', 'get'] + db: + description: + - Queries/Writes the specified yang path from/to the db. + type: bool + default: 'no' + qualifier: + description: + - A qualifier provides the type of object data to retrieve or act on. + default: target + choices: ['target', 'observed', 'proposed', 'realtime', 'registration', 'running', 'startup'] + commit_event: + description: + - Attempts to force the auto-commit event to the specified yang object. + type: bool + default: 'no' +requirements: + - "cps" + - "cps_object" + - "cps_utils" +''' + +EXAMPLES = """ +- name: Create VLAN + opx_cps: + module_name: "dell-base-if-cmn/if/interfaces/interface" + attr_data: { + "base-if-vlan/if/interfaces/interface/id": 230, + "if/interfaces/interface/name": "br230", + "if/interfaces/interface/type": "ianaift:l2vlan" + } + operation: "create" +- name: Get VLAN + opx_cps: + module_name: "dell-base-if-cmn/if/interfaces/interface" + attr_data: { + "if/interfaces/interface/name": "br230", + } + operation: "get" +- name: Modify some attributes in VLAN + opx_cps: + module_name: "dell-base-if-cmn/if/interfaces/interface" + attr_data: { + "cps/key_data": + { "if/interfaces/interface/name": "br230" }, + "dell-if/if/interfaces/interface/untagged-ports": ["e101-008-0"], + } + operation: "set" +- name: Delete VLAN + opx_cps: + module_name: "dell-base-if-cmn/if/interfaces/interface" + attr_data: { + "if/interfaces/interface/name": "br230", + } + operation: "delete" +""" + +RETURN = """ +response: + description: Output from the CPS transaction. + Output of CPS Get operation if CPS set/create/delete not done. + returned: when a CPS transaction is successfully performed. + type: list + sample: + [{ + "data": { + "base-if-vlan/if/interfaces/interface/id": 230, + "cps/object-group/return-code": 0, + "dell-base-if-cmn/if/interfaces/interface/if-index": 46, + "if/interfaces/interface/name": "br230", + "if/interfaces/interface/type": "ianaift:l2vlan" + }, + "key": "target/dell-base-if-cmn/if/interfaces/interface" + }] +cps_curr_config: + description: Returns the CPS Get output i.e. the running configuration + before CPS operation of set/delete is performed + returned: when CPS operations set, delete + type: dict + sample: + [{ + "data": { + "base-if-vlan/if/interfaces/interface/id": 230, + "cps/key_data": { + "if/interfaces/interface/name": "br230" + }, + "dell-base-if-cmn/if/interfaces/interface/if-index": 44, + "dell-if/if/interfaces/interface/learning-mode": 1, + "dell-if/if/interfaces/interface/mtu": 1532, + "dell-if/if/interfaces/interface/phys-address": "", + "dell-if/if/interfaces/interface/vlan-type": 1, + "if/interfaces/interface/enabled": 0, + "if/interfaces/interface/type": "ianaift:l2vlan" + }, + "key": "target/dell-base-if-cmn/if/interfaces/interface" + }] +diff: + description: The actual configuration that will be pushed comparing + the running configuration and input attributes + returned: when CPS operations set, delete + type: dict + sample: + { + "cps/key_data": { + "if/interfaces/interface/name": "br230" + }, + "dell-if/if/interfaces/interface/untagged-ports": [ + "e101-007-0" + ] + } +db: + description: Denotes if CPS DB transaction was performed + returned: when db is set to True in module options + type: bool + sample: True +commit_event: + description: Denotes if auto-commit event is set + returned: when commit_event is set to True in module options + type: bool + sample: True +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import dict_diff + +try: + import cps + import cps_object + import cps_utils + HAS_CPS = True +except ImportError: + HAS_CPS = False + + +def convert_cps_raw_list(raw_list): + resp_list = [] + if raw_list: + for raw_elem in raw_list: + processed_element = convert_cps_raw_data(raw_elem) + if processed_element: + raw_key = raw_elem['key'] + individual_element = {} + individual_element['data'] = processed_element + individual_element['key'] = (cps.qual_from_key(raw_key) + "/" + + cps.name_from_key(raw_key, 1)) + resp_list.append(individual_element) + return resp_list + + +def convert_cps_raw_data(raw_elem): + d = {} + obj = cps_object.CPSObject(obj=raw_elem) + for attr in raw_elem['data']: + d[attr] = obj.get_attr_data(attr) + return d + + +def parse_cps_parameters(module_name, qualifier, attr_type, + attr_data, operation=None, db=None, + commit_event=None): + + obj = cps_object.CPSObject(module=module_name, qual=qualifier) + + if operation: + obj.set_property('oper', operation) + + if attr_type: + for key, val in iteritems(attr_type): + cps_utils.cps_attr_types_map.add_type(key, val) + + for key, val in iteritems(attr_data): + + embed_attrs = key.split(',') + embed_attrs_len = len(embed_attrs) + if embed_attrs_len >= 3: + obj.add_embed_attr(embed_attrs, val, embed_attrs_len - 2) + else: + if isinstance(val, str): + val_list = val.split(',') + # Treat as list if value contains ',' but is not + # enclosed within {} + if len(val_list) == 1 or val.startswith('{'): + obj.add_attr(key, val) + else: + obj.add_attr(key, val_list) + else: + obj.add_attr(key, val) + + if db: + cps.set_ownership_type(obj.get_key(), 'db') + obj.set_property('db', True) + else: + obj.set_property('db', False) + + if commit_event: + cps.set_auto_commit_event(obj.get_key(), True) + obj.set_property('commit-event', True) + return obj + + +def cps_get(obj): + + RESULT = dict() + key = obj.get() + l = [] + cps.get([key], l) + + resp_list = convert_cps_raw_list(l) + + RESULT["response"] = resp_list + return RESULT + + +def cps_transaction(obj): + + RESULT = dict() + ch = {'operation': obj.get_property('oper'), 'change': obj.get()} + if cps.transaction([ch]): + RESULT["response"] = convert_cps_raw_list([ch['change']]) + RESULT["changed"] = True + else: + error_msg = "Transaction error while " + obj.get_property('oper') + raise RuntimeError(error_msg) + return RESULT + + +def parse_key_data(attrs): + + res = dict() + for key, val in iteritems(attrs): + if key == 'cps/key_data': + res.update(val) + else: + res[key] = val + return res + + +def main(): + """ + main entry point for module execution + """ + argument_spec = dict( + qualifier=dict(required=False, + default="target", + type='str', + choices=['target', 'observed', 'proposed', 'realtime', + 'registration', 'running', 'startup']), + module_name=dict(required=True, type='str'), + attr_type=dict(required=False, type='dict'), + attr_data=dict(required=True, type='dict'), + operation=dict(required=False, + default="create", + type='str', + choices=['delete', 'create', 'set', 'action', 'get']), + db=dict(required=False, default=False, type='bool'), + commit_event=dict(required=False, default=False, type='bool') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=False) + + if not HAS_CPS: + module.fail_json(msg='CPS library required for this module') + + qualifier = module.params['qualifier'] + module_name = module.params['module_name'] + attr_type = module.params["attr_type"] + attr_data = module.params["attr_data"] + operation = module.params['operation'] + db = module.params["db"] + commit_event = module.params["commit_event"] + RESULT = dict(changed=False, db=False, commit_event=False) + + if db: + RESULT['db'] = True + if commit_event: + RESULT['commit_event'] = True + + try: + # First do a CPS get operation + get_obj = parse_cps_parameters(module_name, qualifier, attr_type, + attr_data, 'get', db, commit_event) + curr_config = cps_get(get_obj) + + if operation == 'get': + RESULT.update(curr_config) + else: + diff = attr_data + + # Evaluate the changes in the attributes + cfg = dict() + if curr_config and curr_config['response']: + cfg = curr_config['response'][0]['data'] + key_d = 'cps/key_data' + + # diff computation is not needed for delete + if operation != 'delete': + configs = parse_key_data(cfg) + attributes = parse_key_data(attr_data) + diff = dict_diff(configs, attributes) + # Append diff with any 'cps/key_data' from attr_data + if diff and key_d in attr_data: + diff[key_d] = attr_data[key_d] + + # Append diff with any 'cps/key_data' from curr_config + # Needed for all operations including delete + if diff and key_d in cfg: + if key_d in diff: + diff[key_d].update(cfg[key_d]) + else: + diff[key_d] = cfg[key_d] + + RESULT.update({"diff": diff}) + + # Create object for cps operation + obj = parse_cps_parameters(module_name, qualifier, attr_type, + diff, operation, db, commit_event) + + res = dict() + if operation == "delete": + if cfg: + res = cps_transaction(obj) + else: + if diff: + res = cps_transaction(obj) + + if not res and cfg: + res.update({"response": curr_config['response']}) + else: + res.update({"cps_curr_config": curr_config['response']}) + RESULT.update(res) + + except Exception as e: + module.fail_json(msg=str(type(e).__name__) + ": " + str(e)) + + module.exit_json(**RESULT) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ordnance/ordnance_config.py b/plugins/modules/network/ordnance/ordnance_config.py new file mode 100644 index 0000000000..62817ac905 --- /dev/null +++ b/plugins/modules/network/ordnance/ordnance_config.py @@ -0,0 +1,361 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ordnance_config +author: "Alexander Turner (@alexanderturner) " +short_description: Manage Ordnance configuration sections +description: + - Ordnance router configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with these configuration sections in + a deterministic way. +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + multiline_delimiter: + description: + - This argument is used when pushing a multiline configuration + element to the Ordnance router. It specifies the character to use + as the delimiting character. This only applies to the + configuration action + default: "@" + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + type: bool + default: 'no' + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config all). + type: bool + default: 'no' + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + type: bool + default: 'no' +''' + +EXAMPLES = """ +--- +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: RouterName + password: password + transport: cli + +--- +- name: configure top level configuration + ordnance_config: + lines: hostname {{ inventory_hostname }} + provider: "{{ cli }}" + +- name: configure interface settings + ordnance_config: + lines: + - description test interface + - ip address 172.31.1.1 255.255.255.0 + parents: interface Ethernet1 + provider: "{{ cli }}" + +- name: configure bgp router + ordnance_config: + lines: + - neighbor 1.1.1.1 remote-as 1234 + - network 10.0.0.0/24 + parents: router bgp 65001 + provider: "{{ cli }}" + +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: Only when commands is specified. + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/ordnance_config.2016-07-16@22:28:34 +""" +import re +import time +import traceback + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import NetworkModule, NetworkError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Command +from ansible_collections.community.general.plugins.module_utils.network.ordnance.ordnance import get_config +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + + +def check_args(module, warnings): + if module.params['multiline_delimiter']: + if len(module.params['multiline_delimiter']) != 1: + module.fail_json(msg='multiline_delimiter value can only be a ' + 'single character') + if module.params['force']: + warnings.append('The force argument is deprecated, please use ' + 'match=none instead. This argument will be ' + 'removed in the future') + + +def extract_banners(config): + banners = {} + banner_cmds = re.findall(r'^banner (\w+)', config, re.M) + for cmd in banner_cmds: + regex = r'banner %s \^C(.+?)(?=\^C)' % cmd + match = re.search(regex, config, re.S) + if match: + key = 'banner %s' % cmd + banners[key] = match.group(1).strip() + + for cmd in banner_cmds: + regex = r'banner %s \^C(.+?)(?=\^C)' % cmd + match = re.search(regex, config, re.S) + if match: + config = config.replace(str(match.group(1)), '') + + config = re.sub(r'banner \w+ \^C\^C', '!! banner removed', config) + return (config, banners) + + +def diff_banners(want, have): + candidate = {} + for key, value in iteritems(want): + if value != have.get(key): + candidate[key] = value + return candidate + + +def load_banners(module, banners): + delimiter = module.params['multiline_delimiter'] + for key, value in iteritems(banners): + key += ' %s' % delimiter + for cmd in ['config terminal', key, value, delimiter, 'end']: + cmd += '\r' + module.connection.shell.shell.sendall(cmd) + time.sleep(1) + module.connection.shell.receive() + + +def get_config(module, result): + contents = module.params['config'] + if not contents: + defaults = module.params['defaults'] + contents = module.config.get_config(include_defaults=defaults) + + contents, banners = extract_banners(contents) + return NetworkConfig(indent=1, contents=contents), banners + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + banners = {} + + if module.params['src']: + src, banners = extract_banners(module.params['src']) + candidate.load(src) + + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + + return candidate, banners + + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate, want_banners = get_candidate(module) + + if match != 'none': + config, have_banners = get_config(module, result) + path = module.params['parents'] + configobjs = candidate.difference(config, path=path, match=match, + replace=replace) + else: + configobjs = candidate.items + have_banners = {} + + banners = diff_banners(want_banners, have_banners) + + if configobjs or banners: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + result['banners'] = banners + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + module.config(commands) + if banners: + load_banners(module, banners) + + result['changed'] = True + + if module.params['save']: + if not module.check_mode: + module.config.save_config() + result['changed'] = True + + +def main(): + """ main entry point for module execution + """ + + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + multiline_delimiter=dict(default='@'), + + config=dict(), + defaults=dict(type='bool', default=False), + + backup=dict(type='bool', default=False), + save=dict(default=False, type='bool'), + ) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + if module.params['force'] is True: + module.params['match'] = 'none' + + warnings = list() + check_args(module, warnings) + + result = dict(changed=False, warnings=warnings) + + if module.params['backup']: + result['__backup__'] = module.config.get_config() + + try: + run(module, result) + except NetworkError as e: + module.disconnect() + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.disconnect() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/ordnance/ordnance_facts.py b/plugins/modules/network/ordnance/ordnance_facts.py new file mode 100644 index 0000000000..d3463b9e48 --- /dev/null +++ b/plugins/modules/network/ordnance/ordnance_facts.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ordnance_facts +author: "Alexander Turner (@alexanderturner) " +short_description: Collect facts from Ordnance Virtual Routers over SSH +description: + - Collects a base set of device facts from an Ordnance Virtual + router over SSH. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' + +EXAMPLES = """ +--- +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: RouterName + password: ordnance + transport: cli + +--- +# Collect all facts from the device +- ordnance_facts: + gather_subset: all + provider: "{{ cli }}" + +# Collect only the config and default facts +- ordnance_facts: + gather_subset: + - config + provider: "{{ cli }}" + +# Do not collect hardware facts +- ordnance_facts: + gather_subset: + - "!hardware" + provider: "{{ cli }}" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the virtual router + returned: always + type: list + +# config +ansible_net_config: + description: The current active config from the virtual router + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the virtual router + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the virtual router + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the virtual router + returned: when interfaces is configured + type: dict +""" +import re +import traceback + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import NetworkModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import zip +from ansible.module_utils._text import to_native + + +class FactsBase(object): + + def __init__(self, module): + self.module = module + self.facts = dict() + self.failed_commands = list() + + def run(self, cmd): + try: + return self.module.cli(cmd)[0] + except Exception: + self.failed_commands.append(cmd) + + +class Config(FactsBase): + + def populate(self): + data = self.run('show running-config') + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + def populate(self): + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.run('show interfaces') + if data: + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces(interfaces) + + data = self.run('show ipv6 interface') + if data: + data = self.parse_interfaces(data) + self.populate_ipv6_interfaces(data) + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + + ipv4 = self.parse_ipv4(value) + intf['ipv4'] = self.parse_ipv4(value) + if ipv4: + self.add_ip_address(ipv4['address'], 'ipv4') + + intf['duplex'] = self.parse_duplex(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv6_interfaces(self, data): + for key, value in iteritems(data): + self.facts['interfaces'][key]['ipv6'] = list() + addresses = re.findall(r'\s+(.+), subnet', value, re.M) + subnets = re.findall(r', subnet is (.+)$', value, re.M) + for addr, subnet in zip(addresses, subnets): + ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_interfaces(self, data): + parsed = dict() + key = '' + for line in data.split('\n'): + if len(line) == 0: + continue + elif line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'^(\S+)', line) + if match: + key = match.group(1) + parsed[key] = line + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'address is (\S+)', data) + if match: + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Internet address is (\S+)', data) + if match: + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_duplex(self, data): + match = re.search(r'(\w+) Duplex', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + failed_commands = list() + + try: + for inst in instances: + inst.populate() + failed_commands.extend(inst.failed_commands) + facts.update(inst.facts) + except Exception as exc: + module.fail_json(msg=to_native(exc), exception=traceback.format_exc()) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_admin.py b/plugins/modules/network/panos/panos_admin.py new file mode 100644 index 0000000000..8198dc3bb8 --- /dev/null +++ b/plugins/modules/network/panos/panos_admin.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: panos_admin +short_description: Add or modify PAN-OS user accounts password. +description: + - PanOS module that allows changes to the user account passwords by doing + API calls to the Firewall using pan-api as the protocol. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + admin_username: + description: + - username for admin user + default: "admin" + admin_password: + description: + - password for admin user + required: true + role: + description: + - role for admin user + commit: + description: + - commit if changed + type: bool + default: 'yes' +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +# Set the password of user admin to "badpassword" +# Doesn't commit the candidate config + - name: set admin password + panos_admin: + ip_address: "192.168.1.1" + password: "admin" + admin_username: admin + admin_password: "badpassword" + commit: False +''' + +RETURN = ''' +status: + description: success status + returned: success + type: str + sample: "okey dokey" +''' +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + +_ADMIN_XPATH = "/config/mgt-config/users/entry[@name='%s']" + + +def admin_exists(xapi, admin_username): + xapi.get(_ADMIN_XPATH % admin_username) + e = xapi.element_root.find('.//entry') + return e + + +def admin_set(xapi, module, admin_username, admin_password, role): + if admin_password is not None: + xapi.op(cmd='request password-hash password "%s"' % admin_password, + cmd_xml=True) + r = xapi.element_root + phash = r.find('.//phash').text + if role is not None: + rbval = "yes" + if role != "superuser" and role != 'superreader': + rbval = "" + + ea = admin_exists(xapi, admin_username) + if ea is not None: + # user exists + changed = False + + if role is not None: + rb = ea.find('.//role-based') + if rb is not None: + if rb[0].tag != role: + changed = True + xpath = _ADMIN_XPATH % admin_username + xpath += '/permissions/role-based/%s' % rb[0].tag + xapi.delete(xpath=xpath) + + xpath = _ADMIN_XPATH % admin_username + xpath += '/permissions/role-based' + xapi.set(xpath=xpath, + element='<%s>%s' % (role, rbval, role)) + + if admin_password is not None: + xapi.edit(xpath=_ADMIN_XPATH % admin_username + '/phash', + element='%s' % phash) + changed = True + + return changed + + # setup the non encrypted part of the monitor + exml = [] + + exml.append('%s' % phash) + exml.append('<%s>%s' + '' % (role, rbval, role)) + + exml = ''.join(exml) + # module.fail_json(msg=exml) + + xapi.set(xpath=_ADMIN_XPATH % admin_username, element=exml) + + return True + + +def main(): + argument_spec = dict( + ip_address=dict(), + password=dict(no_log=True), + username=dict(default='admin'), + admin_username=dict(default='admin'), + admin_password=dict(no_log=True), + role=dict(), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_LIB: + module.fail_json(msg='pan-python required for this module') + + ip_address = module.params["ip_address"] + if not ip_address: + module.fail_json(msg="ip_address should be specified") + password = module.params["password"] + if not password: + module.fail_json(msg="password is required") + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + admin_username = module.params['admin_username'] + if admin_username is None: + module.fail_json(msg="admin_username is required") + admin_password = module.params['admin_password'] + role = module.params['role'] + commit = module.params['commit'] + + changed = admin_set(xapi, module, admin_username, admin_password, role) + + if changed and commit: + xapi.commit(cmd="", sync=True, interval=1) + + module.exit_json(changed=changed, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_admpwd.py b/plugins/modules/network/panos/panos_admpwd.py new file mode 100644 index 0000000000..3671bba3bd --- /dev/null +++ b/plugins/modules/network/panos/panos_admpwd.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_admpwd +short_description: change admin password of PAN-OS device using SSH with SSH key +description: + - Change the admin password of PAN-OS via SSH using a SSH key for authentication. + - Useful for AWS instances where the first login should be done via SSH. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - paramiko +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device + required: true + username: + description: + - username for initial authentication + required: false + default: "admin" + key_filename: + description: + - filename of the SSH Key to use for authentication + required: true + newpassword: + description: + - password to configure for admin on the PAN-OS device + required: true +''' + +EXAMPLES = ''' +# Tries for 10 times to set the admin password of 192.168.1.1 to "badpassword" +# via SSH, authenticating using key /tmp/ssh.key +- name: set admin password + panos_admpwd: + ip_address: "192.168.1.1" + username: "admin" + key_filename: "/tmp/ssh.key" + newpassword: "badpassword" + register: result + until: result is not failed + retries: 10 + delay: 30 +''' + +RETURN = ''' +status: + description: success status + returned: success + type: str + sample: "Last login: Fri Sep 16 11:09:20 2016 from 10.35.34.56.....Configuration committed successfully" +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.compat.paramiko import paramiko +import time +import sys + +_PROMPTBUFF = 4096 + + +def wait_with_timeout(module, shell, prompt, timeout=60): + now = time.time() + result = "" + while True: + if shell.recv_ready(): + result += shell.recv(_PROMPTBUFF) + endresult = result.strip() + if len(endresult) != 0 and endresult[-1] == prompt: + break + + if time.time() - now > timeout: + module.fail_json(msg="Timeout waiting for prompt") + + return result + + +def set_panwfw_password(module, ip_address, key_filename, newpassword, username): + stdout = "" + + ssh = paramiko.SSHClient() + + # add policy to accept all host keys, I haven't found + # a way to retrieve the instance SSH key fingerprint from AWS + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + ssh.connect(ip_address, username=username, key_filename=key_filename) + shell = ssh.invoke_shell() + + # wait for the shell to start + buff = wait_with_timeout(module, shell, ">") + stdout += buff + + # step into config mode + shell.send('configure\n') + # wait for the config prompt + buff = wait_with_timeout(module, shell, "#") + stdout += buff + + if module.check_mode: + # exit and close connection + shell.send('exit\n') + ssh.close() + return False, 'Connection test successful. Password left intact.' + + # set admin password + shell.send('set mgt-config users ' + username + ' password\n') + + # wait for the password prompt + buff = wait_with_timeout(module, shell, ":") + stdout += buff + + # enter password for the first time + shell.send(newpassword + '\n') + + # wait for the password prompt + buff = wait_with_timeout(module, shell, ":") + stdout += buff + + # enter password for the second time + shell.send(newpassword + '\n') + + # wait for the config mode prompt + buff = wait_with_timeout(module, shell, "#") + stdout += buff + + # commit ! + shell.send('commit\n') + + # wait for the prompt + buff = wait_with_timeout(module, shell, "#", 120) + stdout += buff + + if 'success' not in buff: + module.fail_json(msg="Error setting " + username + " password: " + stdout) + + # exit + shell.send('exit\n') + + ssh.close() + + return True, stdout + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + username=dict(default='admin'), + key_filename=dict(required=True), + newpassword=dict(no_log=True, required=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if paramiko is None: + module.fail_json(msg='paramiko is required for this module') + + ip_address = module.params["ip_address"] + if not ip_address: + module.fail_json(msg="ip_address should be specified") + key_filename = module.params["key_filename"] + if not key_filename: + module.fail_json(msg="key_filename should be specified") + newpassword = module.params["newpassword"] + if not newpassword: + module.fail_json(msg="newpassword is required") + username = module.params['username'] + + try: + changed, stdout = set_panwfw_password(module, ip_address, key_filename, newpassword, username) + module.exit_json(changed=changed, stdout=stdout) + except Exception: + x = sys.exc_info()[1] + module.fail_json(msg=x) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_cert_gen_ssh.py b/plugins/modules/network/panos/panos_cert_gen_ssh.py new file mode 100644 index 0000000000..9c204d1a7b --- /dev/null +++ b/plugins/modules/network/panos/panos_cert_gen_ssh.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_cert_gen_ssh +short_description: generates a self-signed certificate using SSH protocol with SSH key +description: + - This module generates a self-signed certificate that can be used by GlobalProtect client, SSL connector, or + - otherwise. Root certificate must be preset on the system first. This module depends on paramiko for ssh. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - paramiko +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device being configured. + required: true + key_filename: + description: + - Location of the filename that is used for the auth. Either I(key_filename) or I(password) is required. + required: true + password: + description: + - Password credentials to use for auth. Either I(key_filename) or I(password) is required. + required: true + cert_friendly_name: + description: + - Human friendly certificate name (not CN but just a friendly name). + required: true + cert_cn: + description: + - Certificate CN (common name) embedded in the certificate signature. + required: true + signed_by: + description: + - Undersigning authority (CA) that MUST already be presents on the device. + required: true + rsa_nbits: + description: + - Number of bits used by the RSA algorithm for the certificate generation. + default: "2048" +''' + +EXAMPLES = ''' +# Generates a new self-signed certificate using ssh +- name: generate self signed certificate + panos_cert_gen_ssh: + ip_address: "192.168.1.1" + password: "paloalto" + cert_cn: "1.1.1.1" + cert_friendly_name: "test123" + signed_by: "root-ca" +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.compat.paramiko import paramiko +import time + +_PROMPTBUFF = 4096 + + +def wait_with_timeout(module, shell, prompt, timeout=60): + now = time.time() + result = "" + while True: + if shell.recv_ready(): + result += shell.recv(_PROMPTBUFF) + endresult = result.strip() + if len(endresult) != 0 and endresult[-1] == prompt: + break + + if time.time() - now > timeout: + module.fail_json(msg="Timeout waiting for prompt") + + return result + + +def generate_cert(module, ip_address, key_filename, password, + cert_cn, cert_friendly_name, signed_by, rsa_nbits): + stdout = "" + + client = paramiko.SSHClient() + + # add policy to accept all host keys, I haven't found + # a way to retrieve the instance SSH key fingerprint from AWS + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + if not key_filename: + client.connect(ip_address, username="admin", password=password) + else: + client.connect(ip_address, username="admin", key_filename=key_filename) + + shell = client.invoke_shell() + # wait for the shell to start + buff = wait_with_timeout(module, shell, ">") + stdout += buff + + # generate self-signed certificate + if isinstance(cert_cn, list): + cert_cn = cert_cn[0] + cmd = 'request certificate generate signed-by {0} certificate-name {1} name {2} algorithm RSA rsa-nbits {3}\n'.format( + signed_by, cert_friendly_name, cert_cn, rsa_nbits) + shell.send(cmd) + + # wait for the shell to complete + buff = wait_with_timeout(module, shell, ">") + stdout += buff + + # exit + shell.send('exit\n') + + if 'Success' not in buff: + module.fail_json(msg="Error generating self signed certificate: " + stdout) + + client.close() + return stdout + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + key_filename=dict(), + password=dict(no_log=True), + cert_cn=dict(required=True), + cert_friendly_name=dict(required=True), + rsa_nbits=dict(default='2048'), + signed_by=dict(required=True) + + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['key_filename', 'password']]) + if paramiko is None: + module.fail_json(msg='paramiko is required for this module') + + ip_address = module.params["ip_address"] + key_filename = module.params["key_filename"] + password = module.params["password"] + cert_cn = module.params["cert_cn"] + cert_friendly_name = module.params["cert_friendly_name"] + signed_by = module.params["signed_by"] + rsa_nbits = module.params["rsa_nbits"] + + try: + stdout = generate_cert(module, + ip_address, + key_filename, + password, + cert_cn, + cert_friendly_name, + signed_by, + rsa_nbits) + except Exception as exc: + module.fail_json(msg=to_native(exc)) + + module.exit_json(changed=True, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_check.py b/plugins/modules/network/panos/panos_check.py new file mode 100644 index 0000000000..c7a8fa6e79 --- /dev/null +++ b/plugins/modules/network/panos/panos_check.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_check +short_description: check if PAN-OS device is ready for configuration +description: + - Check if PAN-OS device is ready for being configured (no pending jobs). + - The check could be done once or multiple times until the device is ready. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + timeout: + description: + - timeout of API calls + required: false + default: 0 + interval: + description: + - time waited between checks + required: false + default: 0 +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +# single check on 192.168.1.1 with credentials admin/admin +- name: check if ready + panos_check: + ip_address: "192.168.1.1" + password: "admin" + +# check for 10 times, every 30 seconds, if device 192.168.1.1 +# is ready, using credentials admin/admin +- name: wait for reboot + panos_check: + ip_address: "192.168.1.1" + password: "admin" + register: result + until: result is not failed + retries: 10 + delay: 30 +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +import time + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def check_jobs(jobs, module): + job_check = False + for j in jobs: + status = j.find('.//status') + if status is None: + return False + if status.text != 'FIN': + return False + job_check = True + return job_check + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + timeout=dict(default=0, type='int'), + interval=dict(default=0, type='int') + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + timeout = module.params['timeout'] + interval = module.params['interval'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password, + timeout=60 + ) + + checkpnt = time.time() + timeout + while True: + try: + xapi.op(cmd="show jobs all", cmd_xml=True) + except Exception: + pass + else: + jobs = xapi.element_root.findall('.//job') + if check_jobs(jobs, module): + module.exit_json(changed=True, msg="okey dokey") + + if time.time() > checkpnt: + break + + time.sleep(interval) + + module.fail_json(msg="Timeout") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_commit.py b/plugins/modules/network/panos/panos_commit.py new file mode 100644 index 0000000000..3d94c045a8 --- /dev/null +++ b/plugins/modules/network/panos/panos_commit.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2019, Tomi Raittinen +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_commit +short_description: commit firewall's candidate configuration +description: + - PanOS module that will commit firewall's candidate configuration on + - the device. The new configuration will become active immediately. +author: + - Luigi Mori (@jtschichold) + - Ivan Bojer (@ivanbojer) + - Tomi Raittinen (@traittinen) +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device. + required: true + password: + description: + - Password for authentication. If the value is not specified in the + task, the value of environment variable C(ANSIBLE_NET_PASSWORD) + will be used instead. + required: true + username: + description: + - Username for authentication. If the value is not specified in the + task, the value of environment variable C(ANSIBLE_NET_USERNAME) + will be used instead if defined. C(admin) will be used if nothing + above is defined. + default: admin + interval: + description: + - interval for checking commit job + default: 0.5 + timeout: + description: + - timeout for commit job + sync: + description: + - if commit should be synchronous + type: bool + default: 'yes' + description: + description: + - Commit description/comment + type: str + commit_changes_by: + description: + - Commit changes made by specified admin + type: list + commit_vsys: + description: + - Commit changes for specified VSYS + type: list +''' + +EXAMPLES = ''' +# Commit candidate config on 192.168.1.1 in sync mode +- panos_commit: + ip_address: "192.168.1.1" + username: "admin" + password: "admin" +''' + +RETURN = ''' +panos_commit: + description: Information about commit job. + returned: always + type: complex + version_added: 2.8 + contains: + job_id: + description: Palo Alto job ID for the commit operation. Only returned if commit job is launched on device. + returned: always + type: str + sample: "139" + status_code: + description: Palo Alto API status code. Null if commit is successful. + returned: always + type: str + sample: 19 + status_detail: + description: Palo Alto API detailed status message. + returned: always + type: str + sample: Configuration committed successfully + status_text: + description: Palo Alto API status text. + returned: always + type: str + sample: success +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule, env_fallback +import xml.etree.ElementTree as etree + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def main(): + argument_spec = dict( + ip_address=dict(required=True, type='str'), + password=dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + username=dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME']), default="admin"), + interval=dict(default=0.5), + timeout=dict(), + sync=dict(type='bool', default=True), + description=dict(type='str'), + commit_changes_by=dict(type='list'), + commit_vsys=dict(type='list') + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + if not ip_address: + module.fail_json(msg="ip_address should be specified") + + password = module.params["password"] + if not password: + module.fail_json(msg="password is required") + + username = module.params['username'] + if not username: + module.fail_json(msg="username is required") + + interval = module.params['interval'] + timeout = module.params['timeout'] + sync = module.params['sync'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + cmd = "" + + description = module.params["description"] + if description: + cmd += "" + description + "" + + commit_changes_by = module.params["commit_changes_by"] + commit_vsys = module.params["commit_vsys"] + + if commit_changes_by or commit_vsys: + + cmd += "" + + if commit_changes_by: + cmd += "" + for admin in commit_changes_by: + cmd += "" + admin + "" + cmd += "" + + if commit_vsys: + cmd += "" + for vsys in commit_vsys: + cmd += "" + vsys + "" + cmd += "" + + cmd += "" + + cmd += "" + + xapi.commit( + cmd=cmd, + sync=sync, + interval=interval, + timeout=timeout + ) + + try: + result = xapi.xml_root().encode('utf-8') + root = etree.fromstring(result) + job_id = root.find('./result/job/id').text + except AttributeError: + job_id = None + + panos_commit_details = dict( + status_text=xapi.status, + status_code=xapi.status_code, + status_detail=xapi.status_detail, + job_id=job_id + ) + + if "Commit failed" in xapi.status_detail: + module.fail_json(msg=xapi.status_detail, panos_commit=panos_commit_details) + + if job_id: + module.exit_json(changed=True, msg="Commit successful.", panos_commit=panos_commit_details) + else: + module.exit_json(changed=False, msg="No changes to commit.", panos_commit=panos_commit_details) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_dag.py b/plugins/modules/network/panos/panos_dag.py new file mode 100644 index 0000000000..6e9e57d53b --- /dev/null +++ b/plugins/modules/network/panos/panos_dag.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_dag +short_description: create a dynamic address group +description: + - Create a dynamic address group object in the firewall used for policy rules +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + dag_name: + description: + - name of the dynamic address group + required: true + dag_filter: + description: + - dynamic filter user by the dynamic address group + required: true + commit: + description: + - commit if changed + type: bool + default: 'yes' +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +- name: dag + panos_dag: + ip_address: "192.168.1.1" + password: "admin" + dag_name: "dag-1" + dag_filter: "'aws-tag.aws:cloudformation:logical-id.ServerInstance' and 'instanceState.running'" +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + +_ADDRGROUP_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\ + "/vsys/entry[@name='vsys1']/address-group/entry[@name='%s']" + + +def addressgroup_exists(xapi, group_name): + xapi.get(_ADDRGROUP_XPATH % group_name) + e = xapi.element_root.find('.//entry') + if e is None: + return False + return True + + +def add_dag(xapi, dag_name, dag_filter): + if addressgroup_exists(xapi, dag_name): + return False + + # setup the non encrypted part of the monitor + exml = [] + + exml.append('') + exml.append('%s' % dag_filter) + exml.append('') + + exml = ''.join(exml) + xapi.set(xpath=_ADDRGROUP_XPATH % dag_name, element=exml) + + return True + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + dag_name=dict(required=True), + dag_filter=dict(required=True), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + dag_name = module.params['dag_name'] + dag_filter = module.params['dag_filter'] + commit = module.params['commit'] + + changed = add_dag(xapi, dag_name, dag_filter) + + if changed and commit: + xapi.commit(cmd="", sync=True, interval=1) + + module.exit_json(changed=changed, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_dag_tags.py b/plugins/modules/network/panos/panos_dag_tags.py new file mode 100644 index 0000000000..e706def9f4 --- /dev/null +++ b/plugins/modules/network/panos/panos_dag_tags.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# limitations under the License. + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: panos_dag_tags +short_description: Create tags for DAG's on PAN-OS devices. +description: + - Create the ip address to tag associations. Tags will in turn be used to create DAG's +author: "Vinay Venkataraghavan (@vinayvenkat)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. + - Panorama is not supported. +options: + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + description: + description: + - The purpose / objective of the static Address Group + commit: + description: + - commit if changed + default: true + type: bool + devicegroup: + description: > + - Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama. + If device group is not define we assume that we are contacting Firewall. + operation: + description: + - The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete). + tag_names: + description: + - The list of the tags that will be added or removed from the IP address. + ip_to_register: + description: + - IP that will be registered with the given tag names. +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +- name: Create the tags to map IP addresses + panos_dag_tags: + ip_address: "{{ ip_address }}" + password: "{{ password }}" + ip_to_register: "{{ ip_to_register }}" + tag_names: "{{ tag_names }}" + description: "Tags to allow certain IP's to access various SaaS Applications" + operation: 'add' + tags: "adddagip" + +- name: List the IP address to tag mapping + panos_dag_tags: + ip_address: "{{ ip_address }}" + password: "{{ password }}" + tag_names: "{{ tag_names }}" + description: "List the IP address to tag mapping" + operation: 'list' + tags: "listdagip" + +- name: Unregister an IP address from a tag mapping + panos_dag_tags: + ip_address: "{{ ip_address }}" + password: "{{ password }}" + ip_to_register: "{{ ip_to_register }}" + tag_names: "{{ tag_names }}" + description: "Unregister IP address from tag mappings" + operation: 'delete' + tags: "deletedagip" +''' + +RETURN = ''' +# Default return values +''' + +try: + from pandevice import base + from pandevice import firewall + from pandevice import panorama + from pandevice import objects + + from pan.xapi import PanXapiError + + HAS_LIB = True +except ImportError: + HAS_LIB = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def get_devicegroup(device, devicegroup): + dg_list = device.refresh_devices() + for group in dg_list: + if isinstance(group, panorama.DeviceGroup): + if group.name == devicegroup: + return group + return False + + +def register_ip_to_tag_map(device, ip_addresses, tag): + exc = None + try: + device.userid.register(ip_addresses, tag) + except PanXapiError as exc: + return False, exc + + return True, exc + + +def get_all_address_group_mapping(device): + exc = None + ret = None + try: + ret = device.userid.get_registered_ip() + except PanXapiError as exc: + return False, exc + + return ret, exc + + +def delete_address_from_mapping(device, ip_address, tags): + exc = None + try: + ret = device.userid.unregister(ip_address, tags) + except PanXapiError as exc: + return False, exc + + return True, exc + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + api_key=dict(no_log=True), + devicegroup=dict(default=None), + description=dict(default=None), + ip_to_register=dict(type='str', required=False), + tag_names=dict(type='list', required=True), + commit=dict(type='bool', default=True), + operation=dict(type='str', required=True) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + commit = module.params['commit'] + devicegroup = module.params['devicegroup'] + operation = module.params['operation'] + + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + # If Panorama, validate the devicegroup + dev_group = None + if devicegroup and isinstance(device, panorama.Panorama): + dev_group = get_devicegroup(device, devicegroup) + if dev_group: + device.add(dev_group) + else: + module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup) + + result = None + if operation == 'add': + result, exc = register_ip_to_tag_map(device, + ip_addresses=module.params.get('ip_to_register', None), + tag=module.params.get('tag_names', None) + ) + elif operation == 'list': + result, exc = get_all_address_group_mapping(device) + elif operation == 'delete': + result, exc = delete_address_from_mapping(device, + ip_address=module.params.get('ip_to_register', None), + tags=module.params.get('tag_names', []) + ) + else: + module.fail_json(msg="Unsupported option") + + if not result: + module.fail_json(msg=exc.message) + + if commit: + try: + device.commit(sync=True) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + + module.exit_json(changed=True, msg=result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/network/panos/panos_import.py b/plugins/modules/network/panos/panos_import.py new file mode 100644 index 0000000000..19d3ee9b20 --- /dev/null +++ b/plugins/modules/network/panos/panos_import.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_import +short_description: import file on PAN-OS devices +description: + - Import file on PAN-OS device +notes: + - API reference documentation can be read from the C(/api/) directory of your appliance + - Certificate validation is enabled by default as of Ansible 2.6. This may break existing playbooks but should be disabled with caution. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python + - requests + - requests_toolbelt +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + category: + description: + - Category of file uploaded. The default is software. + - See API > Import section of the API reference for category options. + default: software + file: + description: + - Location of the file to import into device. + url: + description: + - URL of the file that will be imported to device. + validate_certs: + description: + - If C(no), SSL certificates will not be validated. Disabling certificate validation is not recommended. + default: yes + type: bool +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +# import software image PanOS_vm-6.1.1 on 192.168.1.1 +- name: import software image into PAN-OS + panos_import: + ip_address: 192.168.1.1 + username: admin + password: admin + file: /tmp/PanOS_vm-6.1.1 + category: software +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +import os.path +import xml.etree +import tempfile +import shutil +import os + +try: + import pan.xapi + import requests + import requests_toolbelt + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def import_file(xapi, module, ip_address, file_, category): + xapi.keygen() + + params = { + 'type': 'import', + 'category': category, + 'key': xapi.api_key + } + + filename = os.path.basename(file_) + + mef = requests_toolbelt.MultipartEncoder( + fields={ + 'file': (filename, open(file_, 'rb'), 'application/octet-stream') + } + ) + + r = requests.post( + 'https://' + ip_address + '/api/', + verify=module.params['validate_certs'], + params=params, + headers={'Content-Type': mef.content_type}, + data=mef + ) + + # if something goes wrong just raise an exception + r.raise_for_status() + + resp = xml.etree.ElementTree.fromstring(r.content) + + if resp.attrib['status'] == 'error': + module.fail_json(msg=r.content) + + return True, filename + + +def download_file(url): + r = requests.get(url, stream=True) + fo = tempfile.NamedTemporaryFile(prefix='ai', delete=False) + shutil.copyfileobj(r.raw, fo) + fo.close() + + return fo.name + + +def delete_file(path): + os.remove(path) + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + category=dict(default='software'), + file=dict(), + url=dict(), + validate_certs=dict(type='bool', default=True), + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_one_of=[['file', 'url']]) + if not HAS_LIB: + module.fail_json(msg='pan-python, requests, and requests_toolbelt are required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + file_ = module.params['file'] + url = module.params['url'] + + category = module.params['category'] + + # we can get file from URL or local storage + if url is not None: + file_ = download_file(url) + + try: + changed, filename = import_file(xapi, module, ip_address, file_, category) + except Exception as exc: + module.fail_json(msg=to_native(exc)) + + # cleanup and delete file if local + if url is not None: + delete_file(file_) + + module.exit_json(changed=changed, filename=filename, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_interface.py b/plugins/modules/network/panos/panos_interface.py new file mode 100644 index 0000000000..3460bcb8f4 --- /dev/null +++ b/plugins/modules/network/panos/panos_interface.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: panos_interface +short_description: configure data-port network interface for DHCP +description: + - Configure data-port (DP) network interface for DHCP. By default DP interfaces are static. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. +options: + if_name: + description: + - Name of the interface to configure. + required: true + zone_name: + description: > + Name of the zone for the interface. If the zone does not exist it is created but if the zone exists and + it is not of the layer3 type the operation will fail. + required: true + create_default_route: + description: + - Whether or not to add default route with router learned via DHCP. + default: "false" + type: bool + commit: + description: + - Commit if changed + default: true + type: bool +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +- name: enable DHCP client on ethernet1/1 in zone public + interface: + password: "admin" + ip_address: "192.168.1.1" + if_name: "ethernet1/1" + zone_name: "public" + create_default_route: "yes" +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +try: + import pan.xapi + from pan.xapi import PanXapiError + HAS_LIB = True +except ImportError: + HAS_LIB = False + +_IF_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\ + "/network/interface/ethernet/entry[@name='%s']" + +_ZONE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\ + "/vsys/entry/zone/entry" +_ZONE_XPATH_QUERY = _ZONE_XPATH + "[network/layer3/member/text()='%s']" +_ZONE_XPATH_IF = _ZONE_XPATH + "[@name='%s']/network/layer3/member[text()='%s']" +_VR_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\ + "/network/virtual-router/entry" + + +def add_dhcp_if(xapi, if_name, zone_name, create_default_route): + if_xml = [ + '', + '', + '', + '%s', + '' + '' + '' + ] + cdr = 'yes' + if not create_default_route: + cdr = 'no' + if_xml = (''.join(if_xml)) % (if_name, cdr) + xapi.edit(xpath=_IF_XPATH % if_name, element=if_xml) + + xapi.set(xpath=_ZONE_XPATH + "[@name='%s']/network/layer3" % zone_name, + element='%s' % if_name) + xapi.set(xpath=_VR_XPATH + "[@name='default']/interface", + element='%s' % if_name) + + return True + + +def if_exists(xapi, if_name): + xpath = _IF_XPATH % if_name + xapi.get(xpath=xpath) + network = xapi.element_root.find('.//layer3') + return (network is not None) + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + if_name=dict(required=True), + zone_name=dict(required=True), + create_default_route=dict(type='bool', default=False), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + if_name = module.params['if_name'] + zone_name = module.params['zone_name'] + create_default_route = module.params['create_default_route'] + commit = module.params['commit'] + + ifexists = if_exists(xapi, if_name) + + if ifexists: + module.exit_json(changed=False, msg="interface exists, not changed") + + try: + changed = add_dhcp_if(xapi, if_name, zone_name, create_default_route) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + + if changed and commit: + xapi.commit(cmd="", sync=True, interval=1) + + module.exit_json(changed=changed, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_lic.py b/plugins/modules/network/panos/panos_lic.py new file mode 100644 index 0000000000..9e37fd8cd3 --- /dev/null +++ b/plugins/modules/network/panos/panos_lic.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_lic +short_description: apply authcode to a device/instance +description: + - Apply an authcode to a device. + - The authcode should have been previously registered on the Palo Alto Networks support portal. + - The device should have Internet access. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + auth_code: + description: + - authcode to be applied + required: true + force: + description: + - whether to apply authcode even if device is already licensed + required: false + default: "false" + type: bool +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' + - hosts: localhost + connection: local + tasks: + - name: fetch license + panos_lic: + ip_address: "192.168.1.1" + password: "paloalto" + auth_code: "IBADCODE" + register: result + - name: Display serialnumber (if already registered) + debug: + var: "{{result.serialnumber}}" +''' + +RETURN = ''' +serialnumber: + description: serialnumber of the device in case that it has been already registered + returned: success + type: str + sample: 007200004214 +''' + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def get_serial(xapi, module): + xapi.op(cmd="show system info", cmd_xml=True) + r = xapi.element_root + serial = r.find('.//serial') + if serial is None: + module.fail_json(msg="No tag in show system info") + + serial = serial.text + + return serial + + +def apply_authcode(xapi, module, auth_code): + try: + xapi.op(cmd='request license fetch auth-code "%s"' % auth_code, + cmd_xml=True) + except pan.xapi.PanXapiError: + if hasattr(xapi, 'xml_document'): + if 'Successfully' in xapi.xml_document: + return + + if 'Invalid Auth Code' in xapi.xml_document: + module.fail_json(msg="Invalid Auth Code") + + raise + + return + + +def fetch_authcode(xapi, module): + try: + xapi.op(cmd='request license fetch', cmd_xml=True) + except pan.xapi.PanXapiError: + if hasattr(xapi, 'xml_document'): + if 'Successfully' in xapi.xml_document: + return + + if 'Invalid Auth Code' in xapi.xml_document: + module.fail_json(msg="Invalid Auth Code") + + raise + + return + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + auth_code=dict(), + username=dict(default='admin'), + force=dict(type='bool', default=False) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + auth_code = module.params["auth_code"] + force = module.params['force'] + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + if not force: + serialnumber = get_serial(xapi, module) + if serialnumber != 'unknown': + return module.exit_json(changed=False, serialnumber=serialnumber) + if auth_code: + apply_authcode(xapi, module, auth_code) + else: + fetch_authcode(xapi, module) + + module.exit_json(changed=True, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_loadcfg.py b/plugins/modules/network/panos/panos_loadcfg.py new file mode 100644 index 0000000000..b9ab48e680 --- /dev/null +++ b/plugins/modules/network/panos/panos_loadcfg.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_loadcfg +short_description: load configuration on PAN-OS device +description: + - Load configuration on PAN-OS device +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + file: + description: + - configuration file to load + commit: + description: + - commit if changed + type: bool + default: 'yes' +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +# Import and load config file from URL + - name: import configuration + panos_import: + ip_address: "192.168.1.1" + password: "admin" + url: "{{ConfigURL}}" + category: "configuration" + register: result + - name: load configuration + panos_loadcfg: + ip_address: "192.168.1.1" + password: "admin" + file: "{{result.filename}}" +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def load_cfgfile(xapi, module, ip_address, file_): + # load configuration file + cmd = '%s' %\ + file_ + + xapi.op(cmd=cmd) + + return True + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + file=dict(), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + file_ = module.params['file'] + commit = module.params['commit'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + changed = load_cfgfile(xapi, module, ip_address, file_) + if changed and commit: + xapi.commit(cmd="", sync=True, interval=1) + + module.exit_json(changed=changed, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_match_rule.py b/plugins/modules/network/panos/panos_match_rule.py new file mode 100644 index 0000000000..782794259e --- /dev/null +++ b/plugins/modules/network/panos/panos_match_rule.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# limitations under the License. + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: panos_match_rule +short_description: Test for match against a security rule on PAN-OS devices or Panorama management console. +description: + - Security policies allow you to enforce rules and take action, and can be as general or specific as needed. +author: "Robert Hagen (@rnh556)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. + - Panorama NOT is supported. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device being configured. + required: true + username: + description: + - Username credentials to use for auth unless I(api_key) is set. + default: "admin" + password: + description: + - Password credentials to use for auth unless I(api_key) is set. + required: true + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + rule_type: + description: + - Type of rule. Valid types are I(security) or I(nat). + required: true + choices: + - security + - nat + source_zone: + description: + - The source zone. + source_ip: + description: + - The source IP address. + required: true + source_port: + description: + - The source port. + source_user: + description: + - The source user or group. + to_interface: + description: + - The inbound interface in a NAT rule. + destination_zone: + description: + - The destination zone. + destination_ip: + description: + - The destination IP address. + destination_port: + description: + - The destination port. + application: + description: + - The application. + protocol: + description: + - The IP protocol number from 1 to 255. + category: + description: + - URL category + vsys_id: + description: + - ID of the VSYS object. + default: "vsys1" + required: true +''' + +EXAMPLES = ''' +- name: check security rules for Google DNS + panos_match_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + rule_type: 'security' + source_ip: '10.0.0.0' + destination_ip: '8.8.8.8' + application: 'dns' + destination_port: '53' + protocol: '17' + register: result +- debug: msg='{{result.stdout_lines}}' + +- name: check security rules inbound SSH with user match + panos_match_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + rule_type: 'security' + source_ip: '0.0.0.0' + source_user: 'mydomain\\jsmith' + destination_ip: '192.168.100.115' + destination_port: '22' + protocol: '6' + register: result +- debug: msg='{{result.stdout_lines}}' + +- name: check NAT rules for source NAT + panos_match_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + rule_type: 'nat' + source_zone: 'Prod-DMZ' + source_ip: '10.10.118.50' + to_interface: 'ethernet1/2' + destination_zone: 'Internet' + destination_ip: '0.0.0.0' + protocol: '6' + register: result +- debug: msg='{{result.stdout_lines}}' + +- name: check NAT rules for inbound web + panos_match_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + rule_type: 'nat' + source_zone: 'Internet' + source_ip: '0.0.0.0' + to_interface: 'ethernet1/1' + destination_zone: 'Prod DMZ' + destination_ip: '192.168.118.50' + destination_port: '80' + protocol: '6' + register: result +- debug: msg='{{result.stdout_lines}}' + +- name: check security rules for outbound POP3 in vsys4 + panos_match_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + vsys_id: 'vsys4' + rule_type: 'security' + source_ip: '10.0.0.0' + destination_ip: '4.3.2.1' + application: 'pop3' + destination_port: '110' + protocol: '6' + register: result +- debug: msg='{{result.stdout_lines}}' + +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule + +try: + from pan.xapi import PanXapiError + from pan.xapi import PanXapiError + from pandevice import base + from pandevice import policies + from pandevice import panorama + import xmltodict + import json + + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def create_security_test(**kwargs): + security_test = 'test security-policy-match' + + # Add the source IP (required) + if kwargs['source_ip']: + security_test += ' source \"%s\"' % kwargs['source_ip'] + + # Add the source user (optional) + if kwargs['source_user']: + security_test += ' source-user \"%s\"' % kwargs['source_user'] + + # Add the destination IP (required) + if kwargs['destination_ip']: + security_test += ' destination \"%s\"' % kwargs['destination_ip'] + + # Add the application (optional) + if kwargs['application']: + security_test += ' application \"%s\"' % kwargs['application'] + + # Add the destination port (required) + if kwargs['destination_port']: + security_test += ' destination-port \"%s\"' % kwargs['destination_port'] + + # Add the IP protocol number (required) + if kwargs['protocol']: + security_test += ' protocol \"%s\"' % kwargs['protocol'] + + # Add the URL category (optional) + if kwargs['category']: + security_test += ' category \"%s\"' % kwargs['category'] + + # Return the resulting string + return security_test + + +def create_nat_test(**kwargs): + nat_test = 'test nat-policy-match' + + # Add the source zone (optional) + if kwargs['source_zone']: + nat_test += ' from \"%s\"' % kwargs['source_zone'] + + # Add the source IP (required) + if kwargs['source_ip']: + nat_test += ' source \"%s\"' % kwargs['source_ip'] + + # Add the source user (optional) + if kwargs['source_port']: + nat_test += ' source-port \"%s\"' % kwargs['source_port'] + + # Add inbound interface (optional) + if kwargs['to_interface']: + nat_test += ' to-interface \"%s\"' % kwargs['to_interface'] + + # Add the destination zone (optional) + if kwargs['destination_zone']: + nat_test += ' to \"%s\"' % kwargs['destination_zone'] + + # Add the destination IP (required) + if kwargs['destination_ip']: + nat_test += ' destination \"%s\"' % kwargs['destination_ip'] + + # Add the destination port (optional) + if kwargs['destination_port']: + nat_test += ' destination-port \"%s\"' % kwargs['destination_port'] + + # Add the IP protocol number (required) + if kwargs['protocol']: + nat_test += ' protocol \"%s\"' % kwargs['protocol'] + + # Return the resulting string + return nat_test + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(no_log=True), + username=dict(default='admin'), + api_key=dict(no_log=True), + vsys_id=dict(default='vsys1'), + rule_type=dict(required=True, choices=['security', 'nat']), + source_zone=dict(default=None), + source_ip=dict(default=None), + source_user=dict(default=None), + source_port=dict(default=None, type=int), + to_interface=dict(default=None), + destination_zone=dict(default=None), + category=dict(default=None), + application=dict(default=None), + protocol=dict(default=None, type=int), + destination_ip=dict(default=None), + destination_port=dict(default=None, type=int) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['api_key', 'password']]) + if not HAS_LIB: + module.fail_json(msg='Missing required libraries.') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + vsys_id = module.params['vsys_id'] + rule_type = module.params['rule_type'] + source_zone = module.params['source_zone'] + source_ip = module.params['source_ip'] + source_user = module.params['source_user'] + source_port = module.params['source_port'] + to_interface = module.params['to_interface'] + destination_zone = module.params['destination_zone'] + destination_ip = module.params['destination_ip'] + destination_port = module.params['destination_port'] + category = module.params['category'] + application = module.params['application'] + protocol = module.params['protocol'] + + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + # Fail the module if this is a Panorama instance + if isinstance(device, panorama.Panorama): + module.fail_json( + failed=1, + msg='Panorama is not supported.' + ) + + # Create and attach security and NAT rulebases. Then populate them. + sec_rule_base = nat_rule_base = policies.Rulebase() + device.add(sec_rule_base) + device.add(nat_rule_base) + policies.SecurityRule.refreshall(sec_rule_base) + policies.NatRule.refreshall(nat_rule_base) + + # Which action shall we take on the object? + if rule_type == 'security': + # Search for the object + test_string = create_security_test( + source_ip=source_ip, + source_user=source_user, + destination_ip=destination_ip, + destination_port=destination_port, + application=application, + protocol=protocol, + category=category + ) + elif rule_type == 'nat': + test_string = create_nat_test( + source_zone=source_zone, + source_ip=source_ip, + source_port=source_port, + to_interface=to_interface, + destination_zone=destination_zone, + destination_ip=destination_ip, + destination_port=destination_port, + protocol=protocol + ) + + # Submit the op command with the appropriate test string + try: + response = device.op(cmd=test_string, vsys=vsys_id) + except PanXapiError as exc: + module.fail_json(msg=exc.message) + + if response.find('result/rules').__len__() == 1: + rule_name = response.find('result/rules/entry').text.split(';')[0] + elif rule_type == 'nat': + module.exit_json(msg='No matching NAT rule.') + else: + module.fail_json(msg='Rule match failed. Please check playbook syntax.') + + if rule_type == 'security': + rule_match = sec_rule_base.find(rule_name, policies.SecurityRule) + elif rule_type == 'nat': + rule_match = nat_rule_base.find(rule_name, policies.NatRule) + + # Print out the rule + module.exit_json( + stdout_lines=json.dumps(xmltodict.parse(rule_match.element_str()), indent=2), + msg='Rule matched' + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_mgtconfig.py b/plugins/modules/network/panos/panos_mgtconfig.py new file mode 100644 index 0000000000..ac3f300765 --- /dev/null +++ b/plugins/modules/network/panos/panos_mgtconfig.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_mgtconfig +short_description: configure management settings of device +description: + - Configure management settings of device +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + dns_server_primary: + description: + - address of primary DNS server + dns_server_secondary: + description: + - address of secondary DNS server + panorama_primary: + description: + - address of primary Panorama server + panorama_secondary: + description: + - address of secondary Panorama server + commit: + description: + - commit if changed + type: bool + default: 'yes' +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +- name: set dns and panorama + panos_mgtconfig: + ip_address: "192.168.1.1" + password: "admin" + dns_server_primary: "1.1.1.1" + dns_server_secondary: "1.1.1.2" + panorama_primary: "1.1.1.3" + panorama_secondary: "1.1.1.4" +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +try: + import pan.xapi + from pan.xapi import PanXapiError + HAS_LIB = True +except ImportError: + HAS_LIB = False + +_XPATH_DNS_SERVERS = "/config/devices/entry[@name='localhost.localdomain']" +\ + "/deviceconfig/system/dns-setting/servers" +_XPATH_PANORAMA_SERVERS = "/config" +\ + "/devices/entry[@name='localhost.localdomain']" +\ + "/deviceconfig/system" + + +def set_dns_server(xapi, new_dns_server, primary=True): + if primary: + tag = "primary" + else: + tag = "secondary" + xpath = _XPATH_DNS_SERVERS + "/" + tag + + # check the current element value + xapi.get(xpath) + val = xapi.element_root.find(".//" + tag) + if val is not None: + # element exists + val = val.text + if val == new_dns_server: + return False + + element = "<%(tag)s>%(value)s" %\ + dict(tag=tag, value=new_dns_server) + xapi.edit(xpath, element) + + return True + + +def set_panorama_server(xapi, new_panorama_server, primary=True): + if primary: + tag = "panorama-server" + else: + tag = "panorama-server-2" + xpath = _XPATH_PANORAMA_SERVERS + "/" + tag + + # check the current element value + xapi.get(xpath) + val = xapi.element_root.find(".//" + tag) + if val is not None: + # element exists + val = val.text + if val == new_panorama_server: + return False + + element = "<%(tag)s>%(value)s" %\ + dict(tag=tag, value=new_panorama_server) + xapi.edit(xpath, element) + + return True + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + dns_server_primary=dict(), + dns_server_secondary=dict(), + panorama_primary=dict(), + panorama_secondary=dict(), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + dns_server_primary = module.params['dns_server_primary'] + dns_server_secondary = module.params['dns_server_secondary'] + panorama_primary = module.params['panorama_primary'] + panorama_secondary = module.params['panorama_secondary'] + commit = module.params['commit'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + changed = False + try: + if dns_server_primary is not None: + changed |= set_dns_server(xapi, dns_server_primary, primary=True) + if dns_server_secondary is not None: + changed |= set_dns_server(xapi, dns_server_secondary, primary=False) + if panorama_primary is not None: + changed |= set_panorama_server(xapi, panorama_primary, primary=True) + if panorama_secondary is not None: + changed |= set_panorama_server(xapi, panorama_secondary, primary=False) + + if changed and commit: + xapi.commit(cmd="", sync=True, interval=1) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + + module.exit_json(changed=changed, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_nat_rule.py b/plugins/modules/network/panos/panos_nat_rule.py new file mode 100644 index 0000000000..d67b1fccff --- /dev/null +++ b/plugins/modules/network/panos/panos_nat_rule.py @@ -0,0 +1,471 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, techbizdev +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: panos_nat_rule +short_description: create a policy NAT rule +description: > + - Create a policy nat rule. Keep in mind that we can either end up configuring source NAT, destination NAT, or + both. Instead of splitting it into two we will make a fair attempt to determine which one the user wants. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. + - Panorama is supported. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device being configured. + required: true + username: + description: + - Username credentials to use for auth unless I(api_key) is set. + default: "admin" + password: + description: + - Password credentials to use for auth unless I(api_key) is set. + required: true + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + operation: + description: + - The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete). + required: true + choices: + - add + - update + - delete + - find + devicegroup: + description: + - If Panorama, the device group to put this rule in. + rule_name: + description: + - name of the SNAT rule + required: true + description: + description: + - The description + source_zone: + description: + - list of source zones + required: true + destination_zone: + description: + - destination zone + required: true + source_ip: + description: + - list of source addresses + default: ["any"] + destination_ip: + description: + - list of destination addresses + default: ["any"] + service: + description: + - service + default: "any" + snat_type: + description: + - type of source translation + choices: + - static-ip + - dynamic-ip-and-port + - dynamic-ip + snat_address_type: + description: + - type of source translation. Supported values are I(translated-address)/I(translated-address). + default: 'interface-address' + choices: + - interface-address + - translated-address + snat_static_address: + description: + - Source NAT translated address. Used with Static-IP translation. + snat_dynamic_address: + description: + - Source NAT translated address. Used with Dynamic-IP and Dynamic-IP-and-Port. + snat_interface: + description: + - snat interface + snat_interface_address: + description: + - snat interface address + snat_bidirectional: + description: + - bidirectional flag + type: bool + default: 'no' + dnat_address: + description: + - dnat translated address + dnat_port: + description: + - dnat translated port + tag_name: + description: + - Tag for the NAT rule. + to_interface: + description: + - Destination interface. + default: 'any' + commit: + description: + - Commit configuration if changed. + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +# Create a source and destination nat rule + - name: Create NAT SSH rule for 10.0.1.101 + panos_nat_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + rule_name: "Web SSH" + source_zone: ["external"] + destination_zone: "external" + source: ["any"] + destination: ["10.0.0.100"] + service: "service-tcp-221" + snat_type: "dynamic-ip-and-port" + snat_interface: "ethernet1/2" + dnat_address: "10.0.1.101" + dnat_port: "22" +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +# import pydevd +# pydevd.settrace('localhost', port=60374, stdoutToServer=True, stderrToServer=True) +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +try: + import pan.xapi + from pan.xapi import PanXapiError + import pandevice + from pandevice import base + from pandevice import firewall + from pandevice import panorama + from pandevice import objects + from pandevice import policies + import xmltodict + import json + + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def get_devicegroup(device, devicegroup): + dg_list = device.refresh_devices() + for group in dg_list: + if isinstance(group, pandevice.panorama.DeviceGroup): + if group.name == devicegroup: + return group + return False + + +def get_rulebase(device, devicegroup): + # Build the rulebase + if isinstance(device, pandevice.firewall.Firewall): + rulebase = pandevice.policies.Rulebase() + device.add(rulebase) + elif isinstance(device, pandevice.panorama.Panorama): + dg = panorama.DeviceGroup(devicegroup) + device.add(dg) + rulebase = policies.PreRulebase() + dg.add(rulebase) + else: + return False + policies.NatRule.refreshall(rulebase) + return rulebase + + +def find_rule(rulebase, rule_name): + # Search for the rule name + rule = rulebase.find(rule_name) + if rule: + return rule + else: + return False + + +def create_nat_rule(**kwargs): + nat_rule = policies.NatRule( + name=kwargs['rule_name'], + description=kwargs['description'], + fromzone=kwargs['source_zone'], + source=kwargs['source_ip'], + tozone=kwargs['destination_zone'], + destination=kwargs['destination_ip'], + service=kwargs['service'], + to_interface=kwargs['to_interface'], + nat_type=kwargs['nat_type'] + ) + + # Source translation: Static IP + if kwargs['snat_type'] in ['static-ip'] and kwargs['snat_static_address']: + nat_rule.source_translation_type = kwargs['snat_type'] + nat_rule.source_translation_static_translated_address = kwargs['snat_static_address'] + # Bi-directional flag set? + if kwargs['snat_bidirectional']: + nat_rule.source_translation_static_bi_directional = kwargs['snat_bidirectional'] + + # Source translation: Dynamic IP and port + elif kwargs['snat_type'] in ['dynamic-ip-and-port']: + nat_rule.source_translation_type = kwargs['snat_type'] + nat_rule.source_translation_address_type = kwargs['snat_address_type'] + # Interface address? + if kwargs['snat_interface']: + nat_rule.source_translation_interface = kwargs['snat_interface'] + # Interface IP? + if kwargs['snat_interface_address']: + nat_rule.source_translation_ip_address = kwargs['snat_interface_address'] + else: + nat_rule.source_translation_translated_addresses = kwargs['snat_dynamic_address'] + + # Source translation: Dynamic IP + elif kwargs['snat_type'] in ['dynamic-ip']: + if kwargs['snat_dynamic_address']: + nat_rule.source_translation_type = kwargs['snat_type'] + nat_rule.source_translation_translated_addresses = kwargs['snat_dynamic_address'] + else: + return False + + # Destination translation + if kwargs['dnat_address']: + nat_rule.destination_translated_address = kwargs['dnat_address'] + if kwargs['dnat_port']: + nat_rule.destination_translated_port = kwargs['dnat_port'] + + # Any tags? + if 'tag_name' in kwargs: + nat_rule.tag = kwargs['tag_name'] + + return nat_rule + + +def add_rule(rulebase, nat_rule): + if rulebase: + rulebase.add(nat_rule) + nat_rule.create() + return True + else: + return False + + +def update_rule(rulebase, nat_rule): + if rulebase: + rulebase.add(nat_rule) + nat_rule.apply() + return True + else: + return False + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + username=dict(default='admin'), + password=dict(required=True, no_log=True), + api_key=dict(no_log=True), + operation=dict(required=True, choices=['add', 'update', 'delete', 'find']), + rule_name=dict(required=True), + description=dict(), + tag_name=dict(), + source_zone=dict(type='list'), + source_ip=dict(type='list', default=['any']), + destination_zone=dict(), + destination_ip=dict(type='list', default=['any']), + service=dict(default='any'), + to_interface=dict(default='any'), + snat_type=dict(choices=['static-ip', 'dynamic-ip-and-port', 'dynamic-ip']), + snat_address_type=dict(choices=['interface-address', 'translated-address'], default='interface-address'), + snat_static_address=dict(), + snat_dynamic_address=dict(type='list'), + snat_interface=dict(), + snat_interface_address=dict(), + snat_bidirectional=dict(type='bool', default=False), + dnat_address=dict(), + dnat_port=dict(), + devicegroup=dict(), + commit=dict(type='bool', default=True) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['api_key', 'password']]) + if not HAS_LIB: + module.fail_json(msg='Missing required libraries.') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + operation = module.params['operation'] + rule_name = module.params['rule_name'] + description = module.params['description'] + tag_name = module.params['tag_name'] + source_zone = module.params['source_zone'] + source_ip = module.params['source_ip'] + destination_zone = module.params['destination_zone'] + destination_ip = module.params['destination_ip'] + service = module.params['service'] + to_interface = module.params['to_interface'] + nat_type = 'ipv4' + snat_type = module.params['snat_type'] + snat_address_type = module.params['snat_address_type'] + snat_static_address = module.params['snat_static_address'] + snat_dynamic_address = module.params['snat_dynamic_address'] + snat_interface = module.params['snat_interface'] + snat_interface_address = module.params['snat_interface_address'] + snat_bidirectional = module.params['snat_bidirectional'] + dnat_address = module.params['dnat_address'] + dnat_port = module.params['dnat_port'] + devicegroup = module.params['devicegroup'] + + commit = module.params['commit'] + + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + # If Panorama, validate the devicegroup + dev_group = None + if devicegroup and isinstance(device, panorama.Panorama): + dev_group = get_devicegroup(device, devicegroup) + if dev_group: + device.add(dev_group) + else: + module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup) + + # Get the rulebase + rulebase = get_rulebase(device, dev_group) + + # Which action shall we take on the object? + if operation == "find": + # Search for the rule + match = find_rule(rulebase, rule_name) + # If found, format and return the result + if match: + match_dict = xmltodict.parse(match.element_str()) + module.exit_json( + stdout_lines=json.dumps(match_dict, indent=2), + msg='Rule matched' + ) + else: + module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name) + elif operation == "delete": + # Search for the object + match = find_rule(rulebase, rule_name) + # If found, delete it + if match: + try: + match.delete() + if commit: + device.commit(sync=True) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + + module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted.' % rule_name) + else: + module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name) + elif operation == "add": + # Look for required parameters + if source_zone and destination_zone and nat_type: + pass + else: + module.fail_json(msg='Missing parameter. Required: source_zone, destination_zone, nat_type') + # Search for the rule. Fail if found. + match = find_rule(rulebase, rule_name) + if match: + module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name) + else: + try: + new_rule = create_nat_rule( + rule_name=rule_name, + description=description, + tag_name=tag_name, + source_zone=source_zone, + destination_zone=destination_zone, + source_ip=source_ip, + destination_ip=destination_ip, + service=service, + to_interface=to_interface, + nat_type=nat_type, + snat_type=snat_type, + snat_address_type=snat_address_type, + snat_static_address=snat_static_address, + snat_dynamic_address=snat_dynamic_address, + snat_interface=snat_interface, + snat_interface_address=snat_interface_address, + snat_bidirectional=snat_bidirectional, + dnat_address=dnat_address, + dnat_port=dnat_port + ) + changed = add_rule(rulebase, new_rule) + if changed and commit: + device.commit(sync=True) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + module.exit_json(changed=changed, msg='Rule \'%s\' successfully added.' % rule_name) + elif operation == 'update': + # Search for the rule. Update if found. + match = find_rule(rulebase, rule_name) + if match: + try: + new_rule = create_nat_rule( + rule_name=rule_name, + description=description, + tag_name=tag_name, + source_zone=source_zone, + destination_zone=destination_zone, + source_ip=source_ip, + destination_ip=destination_ip, + service=service, + to_interface=to_interface, + nat_type=nat_type, + snat_type=snat_type, + snat_address_type=snat_address_type, + snat_static_address=snat_static_address, + snat_dynamic_address=snat_dynamic_address, + snat_interface=snat_interface, + snat_interface_address=snat_interface_address, + snat_bidirectional=snat_bidirectional, + dnat_address=dnat_address, + dnat_port=dnat_port + ) + changed = update_rule(rulebase, new_rule) + if changed and commit: + device.commit(sync=True) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated.' % rule_name) + else: + module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_object.py b/plugins/modules/network/panos/panos_object.py new file mode 100644 index 0000000000..f654641dfa --- /dev/null +++ b/plugins/modules/network/panos/panos_object.py @@ -0,0 +1,490 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# limitations under the License. + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: panos_object +short_description: create/read/update/delete object in PAN-OS or Panorama +description: + - Policy objects form the match criteria for policy rules and many other functions in PAN-OS. These may include + address object, address groups, service objects, service groups, and tag. +author: "Bob Hagen (@rnh556)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. + - Panorama is supported. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device or Panorama management console being configured. + required: true + username: + description: + - Username credentials to use for authentication. + default: "admin" + password: + description: + - Password credentials to use for authentication. + required: true + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + operation: + description: + - The operation to be performed. Supported values are I(add)/I(delete)/I(find). + required: true + choices: + - add + - update + - delete + - find + addressobject: + description: + - The name of the address object. + address: + description: + - The IP address of the host or network in CIDR notation. + address_type: + description: + - The type of address object definition. Valid types are I(ip-netmask) and I(ip-range). + default: 'ip-netmask' + choices: + - ip-netmask + - ip-range + - fqdn + addressgroup: + description: + - A static group of address objects or dynamic address group. + static_value: + description: + - A group of address objects to be used in an addressgroup definition. + dynamic_value: + description: + - The filter match criteria to be used in a dynamic addressgroup definition. + serviceobject: + description: + - The name of the service object. + source_port: + description: + - The source port to be used in a service object definition. + destination_port: + description: + - The destination port to be used in a service object definition. + protocol: + description: + - The IP protocol to be used in a service object definition. Valid values are I(tcp) or I(udp). + choices: + - tcp + - udp + servicegroup: + description: + - A group of service objects. + services: + description: + - The group of service objects used in a servicegroup definition. + description: + description: + - The description of the object. + tag_name: + description: + - The name of an object or rule tag. + color: + description: > + - The color of the tag object. Valid values are I(red, green, blue, yellow, copper, orange, purple, gray, + light green, cyan, light gray, blue gray, lime, black, gold, and brown). + choices: + - red + - green + - blue + - yellow + - copper + - orange + - purple + - gray + - light green + - cyan + - light gray + - blue gray + - lime + - black + - gold + - brown + devicegroup: + description: > + - The name of the Panorama device group. The group must exist on Panorama. If device group is not defined it + is assumed that we are contacting a firewall. +''' + +EXAMPLES = ''' +- name: search for shared address object + panos_object: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + operation: 'find' + address: 'DevNet' + +- name: create an address group in devicegroup using API key + panos_object: + ip_address: '{{ ip_address }}' + api_key: '{{ api_key }}' + operation: 'add' + addressgroup: 'Prod_DB_Svrs' + static_value: ['prod-db1', 'prod-db2', 'prod-db3'] + description: 'Production DMZ database servers' + tag_name: 'DMZ' + devicegroup: 'DMZ Firewalls' + +- name: create a global service for TCP 3306 + panos_object: + ip_address: '{{ ip_address }}' + api_key: '{{ api_key }}' + operation: 'add' + serviceobject: 'mysql-3306' + destination_port: '3306' + protocol: 'tcp' + description: 'MySQL on tcp/3306' + +- name: create a global tag + panos_object: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + operation: 'add' + tag_name: 'ProjectX' + color: 'yellow' + description: 'Associated with Project X' + +- name: delete an address object from a devicegroup using API key + panos_object: + ip_address: '{{ ip_address }}' + api_key: '{{ api_key }}' + operation: 'delete' + addressobject: 'Win2K test' +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +try: + import pan.xapi + from pan.xapi import PanXapiError + import pandevice + from pandevice import base + from pandevice import firewall + from pandevice import panorama + from pandevice import objects + import xmltodict + import json + + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def get_devicegroup(device, devicegroup): + dg_list = device.refresh_devices() + for group in dg_list: + if isinstance(group, pandevice.panorama.DeviceGroup): + if group.name == devicegroup: + return group + return False + + +def find_object(device, dev_group, obj_name, obj_type): + # Get the firewall objects + obj_type.refreshall(device) + if isinstance(device, pandevice.firewall.Firewall): + addr = device.find(obj_name, obj_type) + return addr + elif isinstance(device, pandevice.panorama.Panorama): + addr = device.find(obj_name, obj_type) + if addr is None: + if dev_group: + device.add(dev_group) + obj_type.refreshall(dev_group) + addr = dev_group.find(obj_name, obj_type) + return addr + else: + return False + + +def create_object(**kwargs): + if kwargs['addressobject']: + newobject = objects.AddressObject( + name=kwargs['addressobject'], + value=kwargs['address'], + type=kwargs['address_type'], + description=kwargs['description'], + tag=kwargs['tag_name'] + ) + if newobject.type and newobject.value: + return newobject + else: + return False + elif kwargs['addressgroup']: + newobject = objects.AddressGroup( + name=kwargs['addressgroup'], + static_value=kwargs['static_value'], + dynamic_value=kwargs['dynamic_value'], + description=kwargs['description'], + tag=kwargs['tag_name'] + ) + if newobject.static_value or newobject.dynamic_value: + return newobject + else: + return False + elif kwargs['serviceobject']: + newobject = objects.ServiceObject( + name=kwargs['serviceobject'], + protocol=kwargs['protocol'], + source_port=kwargs['source_port'], + destination_port=kwargs['destination_port'], + tag=kwargs['tag_name'] + ) + if newobject.protocol and newobject.destination_port: + return newobject + else: + return False + elif kwargs['servicegroup']: + newobject = objects.ServiceGroup( + name=kwargs['servicegroup'], + value=kwargs['services'], + tag=kwargs['tag_name'] + ) + if newobject.value: + return newobject + else: + return False + elif kwargs['tag_name']: + newobject = objects.Tag( + name=kwargs['tag_name'], + color=kwargs['color'], + comments=kwargs['description'] + ) + if newobject.name: + return newobject + else: + return False + else: + return False + + +def add_object(device, dev_group, new_object): + if dev_group: + dev_group.add(new_object) + else: + device.add(new_object) + new_object.create() + return True + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(no_log=True), + username=dict(default='admin'), + api_key=dict(no_log=True), + operation=dict(required=True, choices=['add', 'update', 'delete', 'find']), + addressobject=dict(default=None), + addressgroup=dict(default=None), + serviceobject=dict(default=None), + servicegroup=dict(default=None), + address=dict(default=None), + address_type=dict(default='ip-netmask', choices=['ip-netmask', 'ip-range', 'fqdn']), + static_value=dict(type='list', default=None), + dynamic_value=dict(default=None), + protocol=dict(default=None, choices=['tcp', 'udp']), + source_port=dict(default=None), + destination_port=dict(default=None), + services=dict(type='list', default=None), + description=dict(default=None), + tag_name=dict(default=None), + color=dict(default=None, choices=['red', 'green', 'blue', 'yellow', 'copper', 'orange', 'purple', + 'gray', 'light green', 'cyan', 'light gray', 'blue gray', + 'lime', 'black', 'gold', 'brown']), + devicegroup=dict(default=None) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['api_key', 'password']], + mutually_exclusive=[['addressobject', 'addressgroup', + 'serviceobject', 'servicegroup', + 'tag_name']] + ) + if not HAS_LIB: + module.fail_json(msg='Missing required libraries.') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + operation = module.params['operation'] + addressobject = module.params['addressobject'] + addressgroup = module.params['addressgroup'] + serviceobject = module.params['serviceobject'] + servicegroup = module.params['servicegroup'] + address = module.params['address'] + address_type = module.params['address_type'] + static_value = module.params['static_value'] + dynamic_value = module.params['dynamic_value'] + protocol = module.params['protocol'] + source_port = module.params['source_port'] + destination_port = module.params['destination_port'] + services = module.params['services'] + description = module.params['description'] + tag_name = module.params['tag_name'] + color = module.params['color'] + devicegroup = module.params['devicegroup'] + + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + # If Panorama, validate the devicegroup + dev_group = None + if devicegroup and isinstance(device, panorama.Panorama): + dev_group = get_devicegroup(device, devicegroup) + if dev_group: + device.add(dev_group) + else: + module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup) + + # What type of object are we talking about? + if addressobject: + obj_name = addressobject + obj_type = objects.AddressObject + elif addressgroup: + obj_name = addressgroup + obj_type = objects.AddressGroup + elif serviceobject: + obj_name = serviceobject + obj_type = objects.ServiceObject + elif servicegroup: + obj_name = servicegroup + obj_type = objects.ServiceGroup + elif tag_name: + obj_name = tag_name + obj_type = objects.Tag + else: + module.fail_json(msg='No object type defined!') + + # Which operation shall we perform on the object? + if operation == "find": + # Search for the object + match = find_object(device, dev_group, obj_name, obj_type) + + # If found, format and return the result + if match: + match_dict = xmltodict.parse(match.element_str()) + module.exit_json( + stdout_lines=json.dumps(match_dict, indent=2), + msg='Object matched' + ) + else: + module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name) + elif operation == "delete": + # Search for the object + match = find_object(device, dev_group, obj_name, obj_type) + + # If found, delete it + if match: + try: + match.delete() + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + + module.exit_json(changed=True, msg='Object \'%s\' successfully deleted' % obj_name) + else: + module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name) + elif operation == "add": + # Search for the object. Fail if found. + match = find_object(device, dev_group, obj_name, obj_type) + if match: + module.fail_json(msg='Object \'%s\' already exists. Use operation: \'update\' to change it.' % obj_name) + else: + try: + new_object = create_object( + addressobject=addressobject, + addressgroup=addressgroup, + serviceobject=serviceobject, + servicegroup=servicegroup, + address=address, + address_type=address_type, + static_value=static_value, + dynamic_value=dynamic_value, + protocol=protocol, + source_port=source_port, + destination_port=destination_port, + services=services, + description=description, + tag_name=tag_name, + color=color + ) + changed = add_object(device, dev_group, new_object) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + module.exit_json(changed=changed, msg='Object \'%s\' successfully added' % obj_name) + elif operation == "update": + # Search for the object. Update if found. + match = find_object(device, dev_group, obj_name, obj_type) + if match: + try: + new_object = create_object( + addressobject=addressobject, + addressgroup=addressgroup, + serviceobject=serviceobject, + servicegroup=servicegroup, + address=address, + address_type=address_type, + static_value=static_value, + dynamic_value=dynamic_value, + protocol=protocol, + source_port=source_port, + destination_port=destination_port, + services=services, + description=description, + tag_name=tag_name, + color=color + ) + changed = add_object(device, dev_group, new_object) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + module.exit_json(changed=changed, msg='Object \'%s\' successfully updated.' % obj_name) + else: + module.fail_json(msg='Object \'%s\' does not exist. Use operation: \'add\' to add it.' % obj_name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_op.py b/plugins/modules/network/panos/panos_op.py new file mode 100644 index 0000000000..50fc1b4de8 --- /dev/null +++ b/plugins/modules/network/panos/panos_op.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: panos_op +short_description: execute arbitrary OP commands on PANW devices (e.g. show interface all) +description: This module will allow user to pass and execute any supported OP command on the PANW device. +author: "Ivan Bojer (@ivanbojer)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is NOT supported. + - Panorama is NOT supported. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device or Panorama management console being configured. + required: true + username: + description: + - Username credentials to use for authentication. + required: false + default: "admin" + password: + description: + - Password credentials to use for authentication. + required: true + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + cmd: + description: + - The OP command to be performed. + required: true +''' + +EXAMPLES = ''' +- name: show list of all interfaces + panos_op: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + cmd: 'show interfaces all' + +- name: show system info + panos_op: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + cmd: 'show system info' +''' + +RETURN = ''' +stdout: + description: output of the given OP command as JSON formatted string + returned: success + type: str + sample: "{system: {app-release-date: 2017/05/01 15:09:12}}" + +stdout_xml: + description: output of the given OP command as JSON formatted string + returned: success + type: str + sample: "fw2" +''' + +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + from pan.xapi import PanXapiError + import pandevice + from pandevice import base + from pandevice import firewall + from pandevice import panorama + import xmltodict + import json + + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(no_log=True), + username=dict(default='admin'), + api_key=dict(no_log=True), + cmd=dict(required=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['api_key', 'password']]) + if not HAS_LIB: + module.fail_json(msg='Missing required libraries.') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + cmd = module.params['cmd'] + + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + changed = False + try: + xml_output = device.op(cmd, xml=True) + changed = True + except PanXapiError as exc: + if 'non NULL value' in exc.message: + # rewrap and call again + cmd_array = cmd.split() + cmd_array_len = len(cmd_array) + cmd_array[cmd_array_len - 1] = '\"' + cmd_array[cmd_array_len - 1] + '\"' + cmd2 = ' '.join(cmd_array) + try: + xml_output = device.op(cmd2, xml=True) + changed = True + except PanXapiError as exc: + module.fail_json(msg=exc.message) + + obj_dict = xmltodict.parse(xml_output) + json_output = json.dumps(obj_dict) + + module.exit_json(changed=changed, msg="Done", stdout=json_output, stdout_xml=xml_output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_pg.py b/plugins/modules/network/panos/panos_pg.py new file mode 100644 index 0000000000..ddd155fb11 --- /dev/null +++ b/plugins/modules/network/panos/panos_pg.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_pg +short_description: create a security profiles group +description: + - Create a security profile group +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + pg_name: + description: + - name of the security profile group + required: true + data_filtering: + description: + - name of the data filtering profile + file_blocking: + description: + - name of the file blocking profile + spyware: + description: + - name of the spyware profile + url_filtering: + description: + - name of the url filtering profile + virus: + description: + - name of the anti-virus profile + vulnerability: + description: + - name of the vulnerability profile + wildfire: + description: + - name of the wildfire analysis profile + commit: + description: + - commit if changed + type: bool + default: 'yes' +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +- name: setup security profile group + panos_pg: + ip_address: "192.168.1.1" + password: "admin" + username: "admin" + pg_name: "pg-default" + virus: "default" + spyware: "default" + vulnerability: "default" +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +try: + import pan.xapi + from pan.xapi import PanXapiError + HAS_LIB = True +except ImportError: + HAS_LIB = False + +_PG_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\ + "/vsys/entry[@name='vsys1']" +\ + "/profile-group/entry[@name='%s']" + + +def pg_exists(xapi, pg_name): + xapi.get(_PG_XPATH % pg_name) + e = xapi.element_root.find('.//entry') + if e is None: + return False + return True + + +def add_pg(xapi, pg_name, data_filtering, file_blocking, spyware, + url_filtering, virus, vulnerability, wildfire): + if pg_exists(xapi, pg_name): + return False + + exml = [] + + if data_filtering is not None: + exml.append('%s' % + data_filtering) + if file_blocking is not None: + exml.append('%s' % + file_blocking) + if spyware is not None: + exml.append('%s' % + spyware) + if url_filtering is not None: + exml.append('%s' % + url_filtering) + if virus is not None: + exml.append('%s' % + virus) + if vulnerability is not None: + exml.append('%s' % + vulnerability) + if wildfire is not None: + exml.append('%s' % + wildfire) + + exml = ''.join(exml) + xapi.set(xpath=_PG_XPATH % pg_name, element=exml) + + return True + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + pg_name=dict(required=True), + data_filtering=dict(), + file_blocking=dict(), + spyware=dict(), + url_filtering=dict(), + virus=dict(), + vulnerability=dict(), + wildfire=dict(), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + pg_name = module.params['pg_name'] + data_filtering = module.params['data_filtering'] + file_blocking = module.params['file_blocking'] + spyware = module.params['spyware'] + url_filtering = module.params['url_filtering'] + virus = module.params['virus'] + vulnerability = module.params['vulnerability'] + wildfire = module.params['wildfire'] + commit = module.params['commit'] + + try: + changed = add_pg(xapi, pg_name, data_filtering, file_blocking, + spyware, url_filtering, virus, vulnerability, wildfire) + + if changed and commit: + xapi.commit(cmd="", sync=True, interval=1) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + + module.exit_json(changed=changed, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_query_rules.py b/plugins/modules/network/panos/panos_query_rules.py new file mode 100644 index 0000000000..6fe27767d2 --- /dev/null +++ b/plugins/modules/network/panos/panos_query_rules.py @@ -0,0 +1,495 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# limitations under the License. + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: panos_query_rules +short_description: PANOS module that allows search for security rules in PANW NGFW devices. +description: > + - Security policies allow you to enforce rules and take action, and can be as general or specific as needed. The + policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the + traffic is applied, the more specific rules must precede the more general ones. +author: "Bob Hagen (@rnh556)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) + - xmltodict can be obtains from PyPI U(https://pypi.org/project/xmltodict/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. + - Panorama is supported. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS firewall or Panorama management console being queried. + required: true + username: + description: + - Username credentials to use for authentication. + default: "admin" + password: + description: + - Password credentials to use for authentication. + required: true + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + application: + description: + - Name of the application or application group to be queried. + source_zone: + description: + - Name of the source security zone to be queried. + source_ip: + description: + - The source IP address to be queried. + source_port: + description: + - The source port to be queried. + destination_zone: + description: + - Name of the destination security zone to be queried. + destination_ip: + description: + - The destination IP address to be queried. + destination_port: + description: + - The destination port to be queried. + protocol: + description: + - The protocol used to be queried. Must be either I(tcp) or I(udp). + choices: + - tcp + - udp + tag_name: + description: + - Name of the rule tag to be queried. + devicegroup: + description: + - The Panorama device group in which to conduct the query. +''' + +EXAMPLES = ''' +- name: search for rules with tcp/3306 + panos_query_rules: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + source_zone: 'DevNet' + destination_zone: 'DevVPC' + destination_port: '3306' + protocol: 'tcp' + +- name: search devicegroup for inbound rules to dmz host + panos_query_rules: + ip_address: '{{ ip_address }}' + api_key: '{{ api_key }}' + destination_zone: 'DMZ' + destination_ip: '10.100.42.18' + address: 'DeviceGroupA' + +- name: search for rules containing a specified rule tag + panos_query_rules: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + tag_name: 'ProjectX' +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + from pan.xapi import PanXapiError + import pandevice + from pandevice import base + from pandevice import firewall + from pandevice import panorama + from pandevice import objects + from pandevice import policies + import ipaddress + import xmltodict + import json + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def get_devicegroup(device, devicegroup): + dg_list = device.refresh_devices() + for group in dg_list: + if isinstance(group, pandevice.panorama.DeviceGroup): + if group.name == devicegroup: + return group + return False + + +def get_rulebase(device, devicegroup): + # Build the rulebase + if isinstance(device, firewall.Firewall): + rulebase = policies.Rulebase() + device.add(rulebase) + elif isinstance(device, panorama.Panorama): + dg = panorama.DeviceGroup(devicegroup) + device.add(dg) + rulebase = policies.PreRulebase() + dg.add(rulebase) + else: + return False + policies.SecurityRule.refreshall(rulebase) + return rulebase + + +def get_object(device, dev_group, obj_name): + # Search global address objects + match = device.find(obj_name, objects.AddressObject) + if match: + return match + + # Search global address groups + match = device.find(obj_name, objects.AddressGroup) + if match: + return match + + # Search Panorama device group + if isinstance(device, pandevice.panorama.Panorama): + # Search device group address objects + match = dev_group.find(obj_name, objects.AddressObject) + if match: + return match + + # Search device group address groups + match = dev_group.find(obj_name, objects.AddressGroup) + if match: + return match + return False + + +def addr_in_obj(addr, obj): + ip = ipaddress.ip_address(addr) + # Process address objects + if isinstance(obj, objects.AddressObject): + if obj.type == 'ip-netmask': + net = ipaddress.ip_network(obj.value) + if ip in net: + return True + if obj.type == 'ip-range': + ip_range = obj.value.split('-') + lower = ipaddress.ip_address(ip_range[0]) + upper = ipaddress.ip_address(ip_range[1]) + if lower < ip < upper: + return True + return False + + +def get_services(device, dev_group, svc_list, obj_list): + for svc in svc_list: + + # Search global address objects + global_obj_match = device.find(svc, objects.ServiceObject) + if global_obj_match: + obj_list.append(global_obj_match) + + # Search global address groups + global_grp_match = device.find(svc, objects.ServiceGroup) + if global_grp_match: + get_services(device, dev_group, global_grp_match.value, obj_list) + + # Search Panorama device group + if isinstance(device, pandevice.panorama.Panorama): + + # Search device group address objects + dg_obj_match = dev_group.find(svc, objects.ServiceObject) + if dg_obj_match: + obj_list.append(dg_obj_match) + + # Search device group address groups + dg_grp_match = dev_group.find(svc, objects.ServiceGroup) + if dg_grp_match: + get_services(device, dev_group, dg_grp_match.value, obj_list) + + return obj_list + + +def port_in_svc(orientation, port, protocol, obj): + # Process address objects + if orientation == 'source': + for x in obj.source_port.split(','): + if '-' in x: + port_range = x.split('-') + lower = int(port_range[0]) + upper = int(port_range[1]) + if (lower <= int(port) <= upper) and (obj.protocol == protocol): + return True + else: + if port == x and obj.protocol == protocol: + return True + elif orientation == 'destination': + for x in obj.destination_port.split(','): + if '-' in x: + port_range = x.split('-') + lower = int(port_range[0]) + upper = int(port_range[1]) + if (lower <= int(port) <= upper) and (obj.protocol == protocol): + return True + else: + if port == x and obj.protocol == protocol: + return True + return False + + +def get_tag(device, dev_group, tag_name): + # Search global address objects + match = device.find(tag_name, objects.Tag) + if match: + return match + # Search Panorama device group + if isinstance(device, panorama.Panorama): + # Search device group address objects + match = dev_group.find(tag_name, objects.Tag) + if match: + return match + return False + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(no_log=True), + username=dict(default='admin'), + api_key=dict(no_log=True), + application=dict(default=None), + source_zone=dict(default=None), + destination_zone=dict(default=None), + source_ip=dict(default=None), + destination_ip=dict(default=None), + source_port=dict(default=None), + destination_port=dict(default=None), + protocol=dict(default=None, choices=['tcp', 'udp']), + tag_name=dict(default=None), + devicegroup=dict(default=None) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['api_key', 'password']] + ) + if not HAS_LIB: + module.fail_json(msg='Missing required libraries.') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + application = module.params['application'] + source_zone = module.params['source_zone'] + source_ip = module.params['source_ip'] + source_port = module.params['source_port'] + destination_zone = module.params['destination_zone'] + destination_ip = module.params['destination_ip'] + destination_port = module.params['destination_port'] + protocol = module.params['protocol'] + tag_name = module.params['tag_name'] + devicegroup = module.params['devicegroup'] + + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + # Grab the global objects + objects.AddressObject.refreshall(device) + objects.AddressGroup.refreshall(device) + objects.ServiceObject.refreshall(device) + objects.ServiceGroup.refreshall(device) + objects.Tag.refreshall(device) + + # If Panorama, validate the devicegroup and grab the devicegroup objects + dev_group = None + if devicegroup and isinstance(device, panorama.Panorama): + dev_group = get_devicegroup(device, devicegroup) + if dev_group: + device.add(dev_group) + objects.AddressObject.refreshall(dev_group) + objects.AddressGroup.refreshall(dev_group) + objects.ServiceObject.refreshall(dev_group) + objects.ServiceGroup.refreshall(dev_group) + objects.Tag.refreshall(dev_group) + else: + module.fail_json( + failed=1, + msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup + ) + + # Build the rulebase and produce list + rulebase = get_rulebase(device, dev_group) + rulelist = rulebase.children + hitbase = policies.Rulebase() + loose_match = True + + # Process each rule + for rule in rulelist: + hitlist = [] + + if source_zone: + source_zone_match = False + if loose_match and 'any' in rule.fromzone: + source_zone_match = True + else: + for object_string in rule.fromzone: + if object_string == source_zone: + source_zone_match = True + hitlist.append(source_zone_match) + + if destination_zone: + destination_zone_match = False + if loose_match and 'any' in rule.tozone: + destination_zone_match = True + else: + for object_string in rule.tozone: + if object_string == destination_zone: + destination_zone_match = True + hitlist.append(destination_zone_match) + + if source_ip: + source_ip_match = False + if loose_match and 'any' in rule.source: + source_ip_match = True + else: + for object_string in rule.source: + # Get a valid AddressObject or AddressGroup + obj = get_object(device, dev_group, object_string) + # Otherwise the object_string is not an object and should be handled differently + if obj is False: + if '-' in object_string: + obj = ipaddress.ip_address(source_ip) + source_range = object_string.split('-') + source_lower = ipaddress.ip_address(source_range[0]) + source_upper = ipaddress.ip_address(source_range[1]) + if source_lower <= obj <= source_upper: + source_ip_match = True + else: + if source_ip == object_string: + source_ip_match = True + if isinstance(obj, objects.AddressObject) and addr_in_obj(source_ip, obj): + source_ip_match = True + elif isinstance(obj, objects.AddressGroup) and obj.static_value: + for member_string in obj.static_value: + member = get_object(device, dev_group, member_string) + if addr_in_obj(source_ip, member): + source_ip_match = True + hitlist.append(source_ip_match) + + if destination_ip: + destination_ip_match = False + if loose_match and 'any' in rule.destination: + destination_ip_match = True + else: + for object_string in rule.destination: + # Get a valid AddressObject or AddressGroup + obj = get_object(device, dev_group, object_string) + # Otherwise the object_string is not an object and should be handled differently + if obj is False: + if '-' in object_string: + obj = ipaddress.ip_address(destination_ip) + destination_range = object_string.split('-') + destination_lower = ipaddress.ip_address(destination_range[0]) + destination_upper = ipaddress.ip_address(destination_range[1]) + if destination_lower <= obj <= destination_upper: + destination_ip_match = True + else: + if destination_ip == object_string: + destination_ip_match = True + if isinstance(obj, objects.AddressObject) and addr_in_obj(destination_ip, obj): + destination_ip_match = True + elif isinstance(obj, objects.AddressGroup) and obj.static_value: + for member_string in obj.static_value: + member = get_object(device, dev_group, member_string) + if addr_in_obj(destination_ip, member): + destination_ip_match = True + hitlist.append(destination_ip_match) + + if source_port: + source_port_match = False + orientation = 'source' + if loose_match and (rule.service[0] == 'any'): + source_port_match = True + elif rule.service[0] == 'application-default': + source_port_match = False # Fix this once apps are supported + else: + service_list = [] + service_list = get_services(device, dev_group, rule.service, service_list) + for obj in service_list: + if port_in_svc(orientation, source_port, protocol, obj): + source_port_match = True + break + hitlist.append(source_port_match) + + if destination_port: + destination_port_match = False + orientation = 'destination' + if loose_match and (rule.service[0] == 'any'): + destination_port_match = True + elif rule.service[0] == 'application-default': + destination_port_match = False # Fix this once apps are supported + else: + service_list = [] + service_list = get_services(device, dev_group, rule.service, service_list) + for obj in service_list: + if port_in_svc(orientation, destination_port, protocol, obj): + destination_port_match = True + break + hitlist.append(destination_port_match) + + if tag_name: + tag_match = False + if rule.tag: + for object_string in rule.tag: + obj = get_tag(device, dev_group, object_string) + if obj and (obj.name == tag_name): + tag_match = True + hitlist.append(tag_match) + + # Add to hit rulebase + if False not in hitlist: + hitbase.add(rule) + + # Dump the hit rulebase + if hitbase.children: + output_string = xmltodict.parse(hitbase.element_str()) + module.exit_json( + stdout_lines=json.dumps(output_string, indent=2), + msg='%s of %s rules matched' % (hitbase.children.__len__(), rulebase.children.__len__()) + ) + else: + module.fail_json(msg='No matching rules found.') + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_restart.py b/plugins/modules/network/panos/panos_restart.py new file mode 100644 index 0000000000..1991cfac05 --- /dev/null +++ b/plugins/modules/network/panos/panos_restart.py @@ -0,0 +1,110 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_restart +short_description: restart a device +description: + - Restart a device +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +requirements: + - pan-python +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +- panos_restart: + ip_address: "192.168.1.1" + username: "admin" + password: "admin" +''' + +RETURN = ''' +status: + description: success status + returned: success + type: str + sample: "okey dokey" +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +import sys +import traceback + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def main(): + argument_spec = dict( + ip_address=dict(), + password=dict(no_log=True), + username=dict(default='admin') + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_LIB: + module.fail_json(msg='pan-python required for this module') + + ip_address = module.params["ip_address"] + if not ip_address: + module.fail_json(msg="ip_address should be specified") + password = module.params["password"] + if not password: + module.fail_json(msg="password is required") + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + try: + xapi.op(cmd="") + except Exception as e: + if 'succeeded' in to_native(e): + module.exit_json(changed=True, msg=to_native(e)) + else: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=True, msg="okey dokey") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_sag.py b/plugins/modules/network/panos/panos_sag.py new file mode 100644 index 0000000000..2e171db258 --- /dev/null +++ b/plugins/modules/network/panos/panos_sag.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_sag +short_description: Create a static address group. +description: + - Create a static address group object in the firewall used for policy rules. +author: "Vinay Venkataraghavan (@vinayvenkat)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) + - xmltodict can be obtained from PyPI U(https://pypi.org/project/xmltodict/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +options: + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + sag_name: + description: + - name of the dynamic address group + required: true + sag_match_filter: + description: + - Static filter user by the address group + type: list + devicegroup: + description: > + - The name of the Panorama device group. The group must exist on Panorama. If device group is not defined + it is assumed that we are contacting a firewall. + description: + description: + - The purpose / objective of the static Address Group + tags: + description: + - Tags to be associated with the address group + commit: + description: + - commit if changed + type: bool + default: 'yes' + operation: + description: + - The operation to perform Supported values are I(add)/I(list)/I(delete). + required: true + choices: + - add + - list + - delete +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' +- name: sag + panos_sag: + ip_address: "192.168.1.1" + password: "admin" + sag_name: "sag-1" + static_value: ['test-addresses', ] + description: "A description for the static address group" + tags: ["tags to be associated with the group", ] +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +try: + from pandevice import base + from pandevice import firewall + from pandevice import panorama + from pandevice import objects + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def get_devicegroup(device, devicegroup): + dg_list = device.refresh_devices() + for group in dg_list: + if isinstance(group, panorama.DeviceGroup): + if group.name == devicegroup: + return group + return False + + +def find_object(device, dev_group, obj_name, obj_type): + # Get the firewall objects + obj_type.refreshall(device) + if isinstance(device, firewall.Firewall): + addr = device.find(obj_name, obj_type) + return addr + elif isinstance(device, panorama.Panorama): + addr = device.find(obj_name, obj_type) + if addr is None: + if dev_group: + device.add(dev_group) + obj_type.refreshall(dev_group) + addr = dev_group.find(obj_name, obj_type) + return addr + else: + return False + + +def create_address_group_object(**kwargs): + """ + Create an Address object + + :param kwargs: key word arguments to instantiate AddressGroup object + @return False or ```objects.AddressObject``` + """ + ad_object = objects.AddressGroup( + name=kwargs['address_gp_name'], + static_value=kwargs['sag_match_filter'], + description=kwargs['description'], + tag=kwargs['tag_name'] + ) + if ad_object.static_value or ad_object.dynamic_value: + return ad_object + else: + return None + + +def add_address_group(device, dev_group, ag_object): + """ + Create a new dynamic address group object on the + PAN FW. + + :param device: Firewall Handle + :param dev_group: Panorama device group + :param ag_object: Address group object + """ + + if dev_group: + dev_group.add(ag_object) + else: + device.add(ag_object) + + exc = None + try: + ag_object.create() + except Exception as exc: + return False, exc + + return True, exc + + +def delete_address_group(device, dev_group, obj_name): + """ + + :param device: + :param dev_group: + :param obj_name: + :return: + """ + static_obj = find_object(device, dev_group, obj_name, objects.AddressGroup) + # If found, delete it + + if static_obj: + try: + static_obj.delete() + except Exception as exc: + return False, exc + return True, None + else: + return False, None + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True), + username=dict(default='admin'), + api_key=dict(no_log=True), + sag_match_filter=dict(type='list', required=False), + sag_name=dict(required=True), + commit=dict(type='bool', default=True), + devicegroup=dict(default=None), + description=dict(default=None), + tags=dict(type='list', default=[]), + operation=dict(type='str', required=True, choices=['add', 'list', 'delete']) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['api_key', 'password']]) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + operation = module.params['operation'] + + ag_object = create_address_group_object(address_gp_name=module.params.get('sag_name', None), + sag_match_filter=module.params.get('sag_match_filter', None), + description=module.params.get('description', None), + tag_name=module.params.get('tags', None) + ) + commit = module.params['commit'] + + devicegroup = module.params['devicegroup'] + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + # If Panorama, validate the devicegroup + dev_group = None + if devicegroup and isinstance(device, panorama.Panorama): + dev_group = get_devicegroup(device, devicegroup) + if dev_group: + device.add(dev_group) + else: + module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup) + + if operation == 'add': + result, exc = add_address_group(device, dev_group, ag_object) + + if result and commit: + try: + device.commit(sync=True) + except Exception as exc: + module.fail_json(msg=to_native(exc)) + + elif operation == 'delete': + obj_name = module.params.get('sag_name', None) + result, exc = delete_address_group(device, dev_group, obj_name) + if not result and exc: + module.fail_json(msg=exc.message) + elif not result: + module.fail_json(msg="Specified object not found.") + else: + module.fail_json(changed=False, msg="Unsupported option.") + + module.exit_json(changed=True, msg="Address Group Operation Completed.") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_security_rule.py b/plugins/modules/network/panos/panos_security_rule.py new file mode 100644 index 0000000000..8cbacb1a61 --- /dev/null +++ b/plugins/modules/network/panos/panos_security_rule.py @@ -0,0 +1,576 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, techbizdev +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: panos_security_rule +short_description: Create security rule policy on PAN-OS devices or Panorama management console. +description: + - Security policies allow you to enforce rules and take action, and can be as general or specific as needed. + The policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the traffic is applied, + the more specific rules must precede the more general ones. +author: "Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)" +requirements: + - pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/) + - pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/) + - xmltodict can be obtained from PyPI U(https://pypi.org/project/xmltodict/) +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +notes: + - Checkmode is not supported. + - Panorama is supported. +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device being configured. + required: true + username: + description: + - Username credentials to use for auth unless I(api_key) is set. + default: "admin" + password: + description: + - Password credentials to use for auth unless I(api_key) is set. + required: true + api_key: + description: + - API key that can be used instead of I(username)/I(password) credentials. + operation: + description: + - The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete). + default: 'add' + choices: + - add + - update + - delete + - find + category: + description: + - The category. + type: list + default: ['any'] + rule_name: + description: + - Name of the security rule. + required: true + rule_type: + description: + - Type of security rule (version 6.1 of PanOS and above). + default: "universal" + description: + description: + - Description for the security rule. + tag_name: + description: + - Administrative tags that can be added to the rule. Note, tags must be already defined. + source_zone: + description: + - List of source zones. + default: "any" + destination_zone: + description: + - List of destination zones. + default: "any" + source_ip: + description: + - List of source addresses. + default: "any" + source_user: + description: + - Use users to enforce policy for individual users or a group of users. + default: "any" + hip_profiles: + description: > + - If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy + on information collected by GlobalProtect. For example, the user access level can be determined HIP that + notifies the firewall about the user's local configuration. + default: "any" + destination_ip: + description: + - List of destination addresses. + default: "any" + application: + description: + - List of applications. + default: "any" + service: + description: + - List of services. + default: "application-default" + log_start: + description: + - Whether to log at session start. + type: bool + log_end: + description: + - Whether to log at session end. + default: true + type: bool + action: + description: + - Action to apply once rules maches. + default: "allow" + group_profile: + description: > + - Security profile group that is already defined in the system. This property supersedes antivirus, + vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties. + antivirus: + description: + - Name of the already defined antivirus profile. + vulnerability: + description: + - Name of the already defined vulnerability profile. + spyware: + description: + - Name of the already defined spyware profile. + url_filtering: + description: + - Name of the already defined url_filtering profile. + file_blocking: + description: + - Name of the already defined file_blocking profile. + data_filtering: + description: + - Name of the already defined data_filtering profile. + wildfire_analysis: + description: + - Name of the already defined wildfire_analysis profile. + devicegroup: + description: > + - Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama. + If device group is not define we assume that we are contacting Firewall. + commit: + description: + - Commit configuration if changed. + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +- name: add an SSH inbound rule to devicegroup + panos_security_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + operation: 'add' + rule_name: 'SSH permit' + description: 'SSH rule test' + tag_name: ['ProjectX'] + source_zone: ['public'] + destination_zone: ['private'] + source_ip: ['any'] + source_user: ['any'] + destination_ip: ['1.1.1.1'] + category: ['any'] + application: ['ssh'] + service: ['application-default'] + hip_profiles: ['any'] + action: 'allow' + devicegroup: 'Cloud Edge' + +- name: add a rule to allow HTTP multimedia only from CDNs + panos_security_rule: + ip_address: '10.5.172.91' + username: 'admin' + password: 'paloalto' + operation: 'add' + rule_name: 'HTTP Multimedia' + description: 'Allow HTTP multimedia only to host at 1.1.1.1' + source_zone: ['public'] + destination_zone: ['private'] + source_ip: ['any'] + source_user: ['any'] + destination_ip: ['1.1.1.1'] + category: ['content-delivery-networks'] + application: ['http-video', 'http-audio'] + service: ['service-http', 'service-https'] + hip_profiles: ['any'] + action: 'allow' + +- name: add a more complex rule that uses security profiles + panos_security_rule: + ip_address: '{{ ip_address }}' + username: '{{ username }}' + password: '{{ password }}' + operation: 'add' + rule_name: 'Allow HTTP w profile' + log_start: false + log_end: true + action: 'allow' + antivirus: 'default' + vulnerability: 'default' + spyware: 'default' + url_filtering: 'default' + wildfire_analysis: 'default' + +- name: delete a devicegroup security rule + panos_security_rule: + ip_address: '{{ ip_address }}' + api_key: '{{ api_key }}' + operation: 'delete' + rule_name: 'Allow telnet' + devicegroup: 'DC Firewalls' + +- name: find a specific security rule + panos_security_rule: + ip_address: '{{ ip_address }}' + password: '{{ password }}' + operation: 'find' + rule_name: 'Allow RDP to DCs' + register: result +- debug: msg='{{result.stdout_lines}}' + +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +try: + import pan.xapi + from pan.xapi import PanXapiError + import pandevice + from pandevice import base + from pandevice import firewall + from pandevice import panorama + from pandevice import objects + from pandevice import policies + import xmltodict + import json + + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def get_devicegroup(device, devicegroup): + dg_list = device.refresh_devices() + for group in dg_list: + if isinstance(group, pandevice.panorama.DeviceGroup): + if group.name == devicegroup: + return group + return False + + +def get_rulebase(device, devicegroup): + # Build the rulebase + if isinstance(device, pandevice.firewall.Firewall): + rulebase = pandevice.policies.Rulebase() + device.add(rulebase) + elif isinstance(device, pandevice.panorama.Panorama): + dg = panorama.DeviceGroup(devicegroup) + device.add(dg) + rulebase = policies.PreRulebase() + dg.add(rulebase) + else: + return False + policies.SecurityRule.refreshall(rulebase) + return rulebase + + +def find_rule(rulebase, rule_name): + # Search for the rule name + rule = rulebase.find(rule_name) + if rule: + return rule + else: + return False + + +def rule_is_match(propose_rule, current_rule): + + match_check = ['name', 'description', 'group_profile', 'antivirus', 'vulnerability', + 'spyware', 'url_filtering', 'file_blocking', 'data_filtering', + 'wildfire_analysis', 'type', 'action', 'tag', 'log_start', 'log_end'] + list_check = ['tozone', 'fromzone', 'source', 'source_user', 'destination', 'category', + 'application', 'service', 'hip_profiles'] + + for check in match_check: + propose_check = getattr(propose_rule, check, None) + current_check = getattr(current_rule, check, None) + if propose_check != current_check: + return False + for check in list_check: + propose_check = getattr(propose_rule, check, []) + current_check = getattr(current_rule, check, []) + if set(propose_check) != set(current_check): + return False + return True + + +def create_security_rule(**kwargs): + security_rule = policies.SecurityRule( + name=kwargs['rule_name'], + description=kwargs['description'], + fromzone=kwargs['source_zone'], + source=kwargs['source_ip'], + source_user=kwargs['source_user'], + hip_profiles=kwargs['hip_profiles'], + tozone=kwargs['destination_zone'], + destination=kwargs['destination_ip'], + application=kwargs['application'], + service=kwargs['service'], + category=kwargs['category'], + log_start=kwargs['log_start'], + log_end=kwargs['log_end'], + action=kwargs['action'], + type=kwargs['rule_type'] + ) + + if 'tag_name' in kwargs: + security_rule.tag = kwargs['tag_name'] + + # profile settings + if 'group_profile' in kwargs: + security_rule.group = kwargs['group_profile'] + else: + if 'antivirus' in kwargs: + security_rule.virus = kwargs['antivirus'] + if 'vulnerability' in kwargs: + security_rule.vulnerability = kwargs['vulnerability'] + if 'spyware' in kwargs: + security_rule.spyware = kwargs['spyware'] + if 'url_filtering' in kwargs: + security_rule.url_filtering = kwargs['url_filtering'] + if 'file_blocking' in kwargs: + security_rule.file_blocking = kwargs['file_blocking'] + if 'data_filtering' in kwargs: + security_rule.data_filtering = kwargs['data_filtering'] + if 'wildfire_analysis' in kwargs: + security_rule.wildfire_analysis = kwargs['wildfire_analysis'] + return security_rule + + +def add_rule(rulebase, sec_rule): + if rulebase: + rulebase.add(sec_rule) + sec_rule.create() + return True + else: + return False + + +def update_rule(rulebase, nat_rule): + if rulebase: + rulebase.add(nat_rule) + nat_rule.apply() + return True + else: + return False + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(no_log=True), + username=dict(default='admin'), + api_key=dict(no_log=True), + operation=dict(default='add', choices=['add', 'update', 'delete', 'find']), + rule_name=dict(required=True), + description=dict(default=''), + tag_name=dict(type='list'), + destination_zone=dict(type='list', default=['any']), + source_zone=dict(type='list', default=['any']), + source_ip=dict(type='list', default=["any"]), + source_user=dict(type='list', default=['any']), + destination_ip=dict(type='list', default=["any"]), + category=dict(type='list', default=['any']), + application=dict(type='list', default=['any']), + service=dict(type='list', default=['application-default']), + hip_profiles=dict(type='list', default=['any']), + group_profile=dict(), + antivirus=dict(), + vulnerability=dict(), + spyware=dict(), + url_filtering=dict(), + file_blocking=dict(), + data_filtering=dict(), + wildfire_analysis=dict(), + log_start=dict(type='bool', default=False), + log_end=dict(type='bool', default=True), + rule_type=dict(default='universal'), + action=dict(default='allow'), + devicegroup=dict(), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, + required_one_of=[['api_key', 'password']]) + if not HAS_LIB: + module.fail_json(msg='Missing required libraries.') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + api_key = module.params['api_key'] + operation = module.params['operation'] + rule_name = module.params['rule_name'] + description = module.params['description'] + tag_name = module.params['tag_name'] + source_zone = module.params['source_zone'] + source_ip = module.params['source_ip'] + source_user = module.params['source_user'] + hip_profiles = module.params['hip_profiles'] + destination_zone = module.params['destination_zone'] + destination_ip = module.params['destination_ip'] + application = module.params['application'] + service = module.params['service'] + category = module.params['category'] + log_start = module.params['log_start'] + log_end = module.params['log_end'] + action = module.params['action'] + group_profile = module.params['group_profile'] + antivirus = module.params['antivirus'] + vulnerability = module.params['vulnerability'] + spyware = module.params['spyware'] + url_filtering = module.params['url_filtering'] + file_blocking = module.params['file_blocking'] + data_filtering = module.params['data_filtering'] + wildfire_analysis = module.params['wildfire_analysis'] + rule_type = module.params['rule_type'] + devicegroup = module.params['devicegroup'] + + commit = module.params['commit'] + + # Create the device with the appropriate pandevice type + device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key) + + # If Panorama, validate the devicegroup + dev_group = None + if devicegroup and isinstance(device, panorama.Panorama): + dev_group = get_devicegroup(device, devicegroup) + if dev_group: + device.add(dev_group) + else: + module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup) + + # Get the rulebase + rulebase = get_rulebase(device, dev_group) + + # Which action shall we take on the object? + if operation == "find": + # Search for the object + match = find_rule(rulebase, rule_name) + # If found, format and return the result + if match: + match_dict = xmltodict.parse(match.element_str()) + module.exit_json( + stdout_lines=json.dumps(match_dict, indent=2), + msg='Rule matched' + ) + else: + module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name) + elif operation == "delete": + # Search for the object + match = find_rule(rulebase, rule_name) + # If found, delete it + if match: + try: + if commit: + match.delete() + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + + module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted' % rule_name) + else: + module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name) + elif operation == "add": + new_rule = create_security_rule( + rule_name=rule_name, + description=description, + tag_name=tag_name, + source_zone=source_zone, + destination_zone=destination_zone, + source_ip=source_ip, + source_user=source_user, + destination_ip=destination_ip, + category=category, + application=application, + service=service, + hip_profiles=hip_profiles, + group_profile=group_profile, + antivirus=antivirus, + vulnerability=vulnerability, + spyware=spyware, + url_filtering=url_filtering, + file_blocking=file_blocking, + data_filtering=data_filtering, + wildfire_analysis=wildfire_analysis, + log_start=log_start, + log_end=log_end, + rule_type=rule_type, + action=action + ) + # Search for the rule. Fail if found. + match = find_rule(rulebase, rule_name) + if match: + if rule_is_match(match, new_rule): + module.exit_json(changed=False, msg='Rule \'%s\' is already in place' % rule_name) + else: + module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name) + else: + try: + changed = add_rule(rulebase, new_rule) + if changed and commit: + device.commit(sync=True) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + module.exit_json(changed=changed, msg='Rule \'%s\' successfully added' % rule_name) + elif operation == 'update': + # Search for the rule. Update if found. + match = find_rule(rulebase, rule_name) + if match: + try: + new_rule = create_security_rule( + rule_name=rule_name, + description=description, + tag_name=tag_name, + source_zone=source_zone, + destination_zone=destination_zone, + source_ip=source_ip, + source_user=source_user, + destination_ip=destination_ip, + category=category, + application=application, + service=service, + hip_profiles=hip_profiles, + group_profile=group_profile, + antivirus=antivirus, + vulnerability=vulnerability, + spyware=spyware, + url_filtering=url_filtering, + file_blocking=file_blocking, + data_filtering=data_filtering, + wildfire_analysis=wildfire_analysis, + log_start=log_start, + log_end=log_end, + rule_type=rule_type, + action=action + ) + changed = update_rule(rulebase, new_rule) + if changed and commit: + device.commit(sync=True) + except PanXapiError as exc: + module.fail_json(msg=to_native(exc)) + module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated' % rule_name) + else: + module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/panos/panos_set.py b/plugins/modules/network/panos/panos_set.py new file mode 100644 index 0000000000..4f83f91561 --- /dev/null +++ b/plugins/modules/network/panos/panos_set.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2018, Jasper Mackenzie +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: panos_set +short_description: Execute arbitrary commands on a PAN-OS device using XPath and element +description: + - Run an arbitrary 'xapi' command taking an XPath (i.e get) or XPath and element (i.e set). + - See https://github.com/kevinsteves/pan-python/blob/master/doc/pan.xapi.rst for details + - Runs a 'set' command by default + - This should support _all_ commands that your PAN-OS device accepts vi it's cli + - cli commands are found as + - Once logged in issue 'debug cli on' + - Enter configuration mode by issuing 'configure' + - Enter your set (or other) command, for example 'set deviceconfig system timezone Australia/Melbourne' + - returns + - > + "Australia/Melbourne + - The 'xpath' is "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system" + - The 'element' is "Australia/Melbourne" +author: "Jasper Mackenzie (@spmp)" +deprecated: + alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead. + removed_in: "2.12" + why: Consolidating code base. +requirements: + - pan-python +options: + ip_address: + description: + - IP address or host FQDN of the target PAN-OS NVA + required: true + username: + description: + - User name for a user with admin rights on the PAN-OS NVA + default: admin + password: + description: + - Password for the given 'username' + required: true + command: + description: + - Xapi method name which supports 'xpath' or 'xpath' and 'element' + choices: + - set + - edit + - delete + - get + - show + - override + default: set + xpath: + description: + - The 'xpath' for the commands configurable + required: true + element: + description: + - The 'element' for the 'xpath' if required +extends_documentation_fragment: +- community.general.panos + +''' + +EXAMPLES = ''' + +- name: Set timezone on PA NVA + panos_set: + ip_address: "192.168.1.1" + username: "my-random-admin" + password: "admin1234" + xpath: "/config/devices/entry/deviceconfig/system" + element: "Australia/Melbourne" + +- name: Commit configuration + panos_commit: + ip_address: "192.168.1.1" + username: "my-random-admin" + password: "admin1234" +''' + +RETURN = ''' +# Default return values +''' + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def main(): + argument_spec = dict( + ip_address=dict(required=True), + password=dict(required=True, no_log=True), + username=dict(default='admin'), + command=dict(default='set', choices=['set', 'edit', 'delete', 'get', 'show', 'override']), + xpath=dict(required=True), + element=dict(default=None) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg='pan-python is required for this module') + + ip_address = module.params["ip_address"] + password = module.params["password"] + username = module.params['username'] + xpath = module.params['xpath'] + element = module.params['element'] + xcommand = module.params['command'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password, + timeout=60 + ) + + if element is None: + # Issue command with no `element` + try: + getattr(xapi, xcommand)(xpath=xpath) + except Exception as e: + raise Exception("Failed to run '%s' with xpath: '%s' with the following error: %s" % + (xcommand, xpath, e)) + else: + # Issue command with `element` + try: + getattr(xapi, xcommand)(xpath=xpath, element=element) + except Exception as e: + raise Exception("Failed to run '%s' with xpath: '%s' and element '%s' with the following error: %s" % + (xcommand, xpath, element, e)) + + module.exit_json( + status="success" + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/radware/vdirect_commit.py b/plugins/modules/network/radware/vdirect_commit.py new file mode 100644 index 0000000000..900dab0a0a --- /dev/null +++ b/plugins/modules/network/radware/vdirect_commit.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2017 Radware LTD. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +module: vdirect_commit +author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk) +short_description: Commits pending configuration changes on Radware devices +description: + - Commits pending configuration changes on one or more Radware devices via vDirect server. + - For Alteon ADC device, apply, sync and save actions will be performed by default. + Skipping of an action is possible by explicit parameter specifying. + - For Alteon VX Container device, no sync operation will be performed + since sync action is only relevant for Alteon ADC devices. + - For DefensePro and AppWall devices, a bulk commit action will be performed. + Explicit apply, sync and save actions specifying is not relevant. +notes: + - Requires the Radware vdirect-client Python package on the host. This is as easy as + C(pip install vdirect-client) +options: + vdirect_ip: + description: + - Primary vDirect server IP address, may be set as C(VDIRECT_IP) environment variable. + required: true + vdirect_user: + description: + - vDirect server username, may be set as C(VDIRECT_USER) environment variable. + required: true + vdirect_password: + description: + - vDirect server password, may be set as C(VDIRECT_PASSWORD) environment variable. + required: true + vdirect_secondary_ip: + description: + - Secondary vDirect server IP address, may be set as C(VDIRECT_SECONDARY_IP) environment variable. + vdirect_wait: + description: + - Wait for async operation to complete, may be set as C(VDIRECT_WAIT) environment variable. + type: bool + default: 'yes' + vdirect_https_port: + description: + - vDirect server HTTPS port number, may be set as C(VDIRECT_HTTPS_PORT) environment variable. + default: 2189 + vdirect_http_port: + description: + - vDirect server HTTP port number, may be set as C(VDIRECT_HTTP_PORT) environment variable. + default: 2188 + vdirect_timeout: + description: + - Amount of time to wait for async operation completion [seconds], + - may be set as C(VDIRECT_TIMEOUT) environment variable. + default: 60 + vdirect_use_ssl: + description: + - If C(no), an HTTP connection will be used instead of the default HTTPS connection, + - may be set as C(VDIRECT_HTTPS) or C(VDIRECT_USE_SSL) environment variable. + type: bool + default: 'yes' + validate_certs: + description: + - If C(no), SSL certificates will not be validated, + - may be set as C(VDIRECT_VALIDATE_CERTS) or C(VDIRECT_VERIFY) environment variable. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + aliases: [ vdirect_validate_certs ] + devices: + description: + - List of Radware Alteon device names for commit operations. + required: true + apply: + description: + - If C(no), apply action will not be performed. Relevant for ADC devices only. + type: bool + default: 'yes' + save: + description: + - If C(no), save action will not be performed. Relevant for ADC devices only. + type: bool + default: 'yes' + sync: + description: + - If C(no), sync action will not be performed. Relevant for ADC devices only. + type: bool + default: 'yes' + +requirements: + - "vdirect-client >= 4.9.0-post4" +''' + +EXAMPLES = ''' +- name: vdirect_commit + vdirect_commit: + vdirect_ip: 10.10.10.10 + vdirect_user: vDirect + vdirect_password: radware + devices: ['dev1', 'dev2'] + sync: no +''' + +RETURN = ''' +result: + description: Message detailing actions result + returned: success + type: str + sample: "Requested actions were successfully performed on all devices." +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + from vdirect_client import rest_client + HAS_REST_CLIENT = True +except ImportError: + HAS_REST_CLIENT = False + + +SUCCESS = 'Requested actions were successfully performed on all devices.' +FAILURE = 'Failure occurred while performing requested actions on devices. See details' + +ADC_DEVICE_TYPE = 'Adc' +CONTAINER_DEVICE_TYPE = 'Container' +PARTITIONED_CONTAINER_DEVICE_TYPE = 'AlteonPartitioned' +APPWALL_DEVICE_TYPE = 'AppWall' +DP_DEVICE_TYPE = 'DefensePro' + +SUCCEEDED = 'succeeded' +FAILED = 'failed' +NOT_PERFORMED = 'not performed' + +meta_args = dict( + vdirect_ip=dict(required=True, fallback=(env_fallback, ['VDIRECT_IP'])), + vdirect_user=dict(required=True, fallback=(env_fallback, ['VDIRECT_USER'])), + vdirect_password=dict( + required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']), + no_log=True, type='str'), + vdirect_secondary_ip=dict( + required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']), + default=None), + vdirect_use_ssl=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']), + default=True, type='bool'), + vdirect_wait=dict( + required=False, fallback=(env_fallback, ['VDIRECT_WAIT']), + default=True, type='bool'), + vdirect_timeout=dict( + required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']), + default=60, type='int'), + validate_certs=dict( + required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']), + default=True, type='bool', aliases=['vdirect_validate_certs']), + vdirect_https_port=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']), + default=2189, type='int'), + vdirect_http_port=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']), + default=2188, type='int'), + devices=dict( + required=True, type='list'), + apply=dict( + required=False, default=True, type='bool'), + save=dict( + required=False, default=True, type='bool'), + sync=dict( + required=False, default=True, type='bool'), +) + + +class CommitException(Exception): + def __init__(self, reason, details): + self.reason = reason + self.details = details + + def __str__(self): + return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details) + + +class MissingDeviceException(CommitException): + def __init__(self, device_name): + super(MissingDeviceException, self).__init__( + 'Device missing', + 'Device ' + repr(device_name) + ' does not exist') + + +class VdirectCommit(object): + def __init__(self, params): + self.client = rest_client.RestClient(params['vdirect_ip'], + params['vdirect_user'], + params['vdirect_password'], + wait=params['vdirect_wait'], + secondary_vdirect_ip=params['vdirect_secondary_ip'], + https_port=params['vdirect_https_port'], + http_port=params['vdirect_http_port'], + timeout=params['vdirect_timeout'], + https=params['vdirect_use_ssl'], + verify=params['validate_certs']) + self.devices = params['devices'] + self.apply = params['apply'] + self.save = params['save'] + self.sync = params['sync'] + self.devicesMap = {} + + def _validate_devices(self): + for device in self.devices: + try: + res = self.client.adc.get(device) + if res[rest_client.RESP_STATUS] == 200: + self.devicesMap.update({device: ADC_DEVICE_TYPE}) + continue + res = self.client.container.get(device) + if res[rest_client.RESP_STATUS] == 200: + if res[rest_client.RESP_DATA]['type'] == PARTITIONED_CONTAINER_DEVICE_TYPE: + self.devicesMap.update({device: CONTAINER_DEVICE_TYPE}) + continue + res = self.client.appWall.get(device) + if res[rest_client.RESP_STATUS] == 200: + self.devicesMap.update({device: APPWALL_DEVICE_TYPE}) + continue + res = self.client.defensePro.get(device) + if res[rest_client.RESP_STATUS] == 200: + self.devicesMap.update({device: DP_DEVICE_TYPE}) + continue + + except Exception as e: + raise CommitException('Failed to communicate with device ' + device, str(e)) + + raise MissingDeviceException(device) + + def _perform_action_and_update_result(self, device, action, perform, failure_occurred, actions_result): + + if not perform or failure_occurred: + actions_result[action] = NOT_PERFORMED + return True + + try: + if self.devicesMap[device] == ADC_DEVICE_TYPE: + res = self.client.adc.control_device(device, action) + elif self.devicesMap[device] == CONTAINER_DEVICE_TYPE: + res = self.client.container.control(device, action) + elif self.devicesMap[device] == APPWALL_DEVICE_TYPE: + res = self.client.appWall.control_device(device, action) + elif self.devicesMap[device] == DP_DEVICE_TYPE: + res = self.client.defensePro.control_device(device, action) + + if res[rest_client.RESP_STATUS] in [200, 204]: + actions_result[action] = SUCCEEDED + else: + actions_result[action] = FAILED + actions_result['failure_description'] = res[rest_client.RESP_STR] + return False + except Exception as e: + actions_result[action] = FAILED + actions_result['failure_description'] = 'Exception occurred while performing '\ + + action + ' action. Exception: ' + str(e) + return False + + return True + + def commit(self): + self._validate_devices() + + result_to_return = dict() + result_to_return['details'] = list() + + for device in self.devices: + failure_occurred = False + device_type = self.devicesMap[device] + actions_result = dict() + actions_result['device_name'] = device + actions_result['device_type'] = device_type + + if device_type in [DP_DEVICE_TYPE, APPWALL_DEVICE_TYPE]: + failure_occurred = not self._perform_action_and_update_result( + device, 'commit', True, failure_occurred, actions_result)\ + or failure_occurred + else: + failure_occurred = not self._perform_action_and_update_result( + device, 'apply', self.apply, failure_occurred, actions_result)\ + or failure_occurred + if device_type != CONTAINER_DEVICE_TYPE: + failure_occurred = not self._perform_action_and_update_result( + device, 'sync', self.sync, failure_occurred, actions_result)\ + or failure_occurred + failure_occurred = not self._perform_action_and_update_result( + device, 'save', self.save, failure_occurred, actions_result)\ + or failure_occurred + + result_to_return['details'].extend([actions_result]) + + if failure_occurred: + result_to_return['msg'] = FAILURE + + if 'msg' not in result_to_return: + result_to_return['msg'] = SUCCESS + + return result_to_return + + +def main(): + + module = AnsibleModule(argument_spec=meta_args) + + if not HAS_REST_CLIENT: + module.fail_json(msg="The python vdirect-client module is required") + + try: + vdirect_commit = VdirectCommit(module.params) + result = vdirect_commit.commit() + result = dict(result=result) + module.exit_json(**result) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/radware/vdirect_file.py b/plugins/modules/network/radware/vdirect_file.py new file mode 100644 index 0000000000..889a96e71f --- /dev/null +++ b/plugins/modules/network/radware/vdirect_file.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2017 Radware LTD. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +module: vdirect_file +author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk) +short_description: Uploads a new or updates an existing runnable file into Radware vDirect server +description: + - Uploads a new or updates an existing configuration template or workflow template into the Radware vDirect server. + All parameters may be set as environment variables. +notes: + - Requires the Radware vdirect-client Python package on the host. This is as easy as + C(pip install vdirect-client) +options: + vdirect_ip: + description: + - Primary vDirect server IP address, may be set as VDIRECT_IP environment variable. + required: true + vdirect_user: + description: + - vDirect server username, may be set as VDIRECT_USER environment variable. + required: true + vdirect_password: + description: + - vDirect server password, may be set as VDIRECT_PASSWORD environment variable. + required: true + vdirect_secondary_ip: + description: + - Secondary vDirect server IP address, may be set as VDIRECT_SECONDARY_IP environment variable. + vdirect_wait: + description: + - Wait for async operation to complete, may be set as VDIRECT_WAIT environment variable. + type: bool + default: 'yes' + vdirect_https_port: + description: + - vDirect server HTTPS port number, may be set as VDIRECT_HTTPS_PORT environment variable. + default: 2189 + vdirect_http_port: + description: + - vDirect server HTTP port number, may be set as VDIRECT_HTTP_PORT environment variable. + default: 2188 + vdirect_timeout: + description: + - Amount of time to wait for async operation completion [seconds], + - may be set as VDIRECT_TIMEOUT environment variable. + default: 60 + vdirect_use_ssl: + description: + - If C(no), an HTTP connection will be used instead of the default HTTPS connection, + - may be set as VDIRECT_HTTPS or VDIRECT_USE_SSL environment variable. + type: bool + default: 'yes' + validate_certs: + description: + - If C(no), SSL certificates will not be validated, + - may be set as VDIRECT_VALIDATE_CERTS or VDIRECT_VERIFY environment variable. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + aliases: [ vdirect_validate_certs ] + file_name: + description: + - vDirect runnable file name to be uploaded. + - May be velocity configuration template (.vm) or workflow template zip file (.zip). + required: true + +requirements: + - "vdirect-client >= 4.9.0-post4" +''' + +EXAMPLES = ''' +- name: vdirect_file + vdirect_file: + vdirect_ip: 10.10.10.10 + vdirect_user: vDirect + vdirect_password: radware + file_name: /tmp/get_vlans.vm +''' + +RETURN = ''' +result: + description: Message detailing upload result + returned: success + type: str + sample: "Workflow template created" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback +import os +import os.path + +try: + from vdirect_client import rest_client + HAS_REST_CLIENT = True +except ImportError: + HAS_REST_CLIENT = False + +TEMPLATE_EXTENSION = '.vm' +WORKFLOW_EXTENSION = '.zip' +WRONG_EXTENSION_ERROR = 'The file_name parameter must have ' \ + 'velocity script (.vm) extension or ZIP archive (.zip) extension' +CONFIGURATION_TEMPLATE_CREATED_SUCCESS = 'Configuration template created' +CONFIGURATION_TEMPLATE_UPDATED_SUCCESS = 'Configuration template updated' +WORKFLOW_TEMPLATE_CREATED_SUCCESS = 'Workflow template created' +WORKFLOW_TEMPLATE_UPDATED_SUCCESS = 'Workflow template updated' + +meta_args = dict( + vdirect_ip=dict(required=True, fallback=(env_fallback, ['VDIRECT_IP'])), + vdirect_user=dict(required=True, fallback=(env_fallback, ['VDIRECT_USER'])), + vdirect_password=dict( + required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']), + no_log=True, type='str'), + vdirect_secondary_ip=dict( + required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']), + default=None), + vdirect_use_ssl=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']), + default=True, type='bool'), + vdirect_wait=dict( + required=False, fallback=(env_fallback, ['VDIRECT_WAIT']), + default=True, type='bool'), + vdirect_timeout=dict( + required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']), + default=60, type='int'), + validate_certs=dict( + required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']), + default=True, type='bool', aliases=['vdirect_validate_certs']), + vdirect_https_port=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']), + default=2189, type='int'), + vdirect_http_port=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']), + default=2188, type='int'), + file_name=dict(required=True) +) + + +class FileException(Exception): + def __init__(self, reason, details): + self.reason = reason + self.details = details + + def __str__(self): + return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details) + + +class InvalidSourceException(FileException): + def __init__(self, message): + super(InvalidSourceException, self).__init__( + 'Error parsing file', repr(message)) + + +class VdirectFile(object): + def __init__(self, params): + self.client = rest_client.RestClient(params['vdirect_ip'], + params['vdirect_user'], + params['vdirect_password'], + wait=params['vdirect_wait'], + secondary_vdirect_ip=params['vdirect_secondary_ip'], + https_port=params['vdirect_https_port'], + http_port=params['vdirect_http_port'], + timeout=params['vdirect_timeout'], + https=params['vdirect_use_ssl'], + verify=params['validate_certs']) + + def upload(self, fqn): + if fqn.endswith(TEMPLATE_EXTENSION): + template_name = os.path.basename(fqn) + template = rest_client.Template(self.client) + runnable_file = open(fqn, 'r') + file_content = runnable_file.read() + + result_to_return = CONFIGURATION_TEMPLATE_CREATED_SUCCESS + result = template.create_from_source(file_content, template_name, fail_if_invalid=True) + if result[rest_client.RESP_STATUS] == 409: + result_to_return = CONFIGURATION_TEMPLATE_UPDATED_SUCCESS + result = template.upload_source(file_content, template_name, fail_if_invalid=True) + + if result[rest_client.RESP_STATUS] == 400: + raise InvalidSourceException(str(result[rest_client.RESP_STR])) + elif fqn.endswith(WORKFLOW_EXTENSION): + workflow = rest_client.WorkflowTemplate(self.client) + + runnable_file = open(fqn, 'rb') + file_content = runnable_file.read() + + result_to_return = WORKFLOW_TEMPLATE_CREATED_SUCCESS + result = workflow.create_template_from_archive(file_content, fail_if_invalid=True) + if result[rest_client.RESP_STATUS] == 409: + result_to_return = WORKFLOW_TEMPLATE_UPDATED_SUCCESS + result = workflow.update_archive(file_content, os.path.splitext(os.path.basename(fqn))[0]) + + if result[rest_client.RESP_STATUS] == 400: + raise InvalidSourceException(str(result[rest_client.RESP_STR])) + else: + result_to_return = WRONG_EXTENSION_ERROR + return result_to_return + + +def main(): + + module = AnsibleModule(argument_spec=meta_args) + + if not HAS_REST_CLIENT: + module.fail_json(msg="The python vdirect-client module is required") + + try: + vdirect_file = VdirectFile(module.params) + result = vdirect_file.upload(module.params['file_name']) + result = dict(result=result) + module.exit_json(**result) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/radware/vdirect_runnable.py b/plugins/modules/network/radware/vdirect_runnable.py new file mode 100644 index 0000000000..df2fce506c --- /dev/null +++ b/plugins/modules/network/radware/vdirect_runnable.py @@ -0,0 +1,340 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2017 Radware LTD. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +module: vdirect_runnable +author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk) +short_description: Runs templates and workflow actions in Radware vDirect server +description: + - Runs configuration templates, creates workflows and runs workflow actions in Radware vDirect server. +notes: + - Requires the Radware vdirect-client Python package on the host. This is as easy as + C(pip install vdirect-client) +options: + vdirect_ip: + description: + - Primary vDirect server IP address, may be set as C(VDIRECT_IP) environment variable. + required: true + vdirect_user: + description: + - vDirect server username, may be set as C(VDIRECT_USER) environment variable. + required: true + vdirect_password: + description: + - vDirect server password, may be set as C(VDIRECT_PASSWORD) environment variable. + required: true + vdirect_secondary_ip: + description: + - Secondary vDirect server IP address, may be set as C(VDIRECT_SECONDARY_IP) environment variable. + vdirect_wait: + description: + - Wait for async operation to complete, may be set as C(VDIRECT_WAIT) environment variable. + type: bool + default: 'yes' + vdirect_https_port: + description: + - vDirect server HTTPS port number, may be set as C(VDIRECT_HTTPS_PORT) environment variable. + default: 2189 + vdirect_http_port: + description: + - vDirect server HTTP port number, may be set as C(VDIRECT_HTTP_PORT) environment variable. + default: 2188 + vdirect_timeout: + description: + - Amount of time to wait for async operation completion [seconds], + - may be set as C(VDIRECT_TIMEOUT) environment variable. + default: 60 + vdirect_use_ssl: + description: + - If C(no), an HTTP connection will be used instead of the default HTTPS connection, + - may be set as C(VDIRECT_HTTPS) or C(VDIRECT_USE_SSL) environment variable. + type: bool + default: 'yes' + validate_certs: + description: + - If C(no), SSL certificates will not be validated, + - may be set as C(VDIRECT_VALIDATE_CERTS) or C(VDIRECT_VERIFY) environment variable. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + aliases: [ vdirect_validate_certs ] + runnable_type: + description: + - vDirect runnable type. + required: true + choices: ['ConfigurationTemplate', 'Workflow', 'WorkflowTemplate', 'Plugin'] + runnable_name: + description: + - vDirect runnable name to run. + - May be configuration template name, workflow template name or workflow instance name. + required: true + action_name: + description: + - Workflow action name to run. + - Required if I(runnable_type=Workflow). + parameters: + description: + - Action parameters dictionary. In case of C(ConfigurationTemplate) runnable type, + - the device connection details should always be passed as a parameter. + +requirements: + - "vdirect-client >= 4.9.0-post4" +''' + +EXAMPLES = ''' +- name: vdirect_runnable + vdirect_runnable: + vdirect_ip: 10.10.10.10 + vdirect_user: vDirect + vdirect_password: radware + runnable_type: ConfigurationTemplate + runnable_name: get_vlans + parameters: {'vlans_needed':1,'adc':[{'type':'Adc','name':'adc-1'}]} +''' + +RETURN = ''' +result: + description: Message detailing run result + returned: success + type: str + sample: "Workflow action run completed." +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + from vdirect_client import rest_client + HAS_REST_CLIENT = True +except ImportError: + HAS_REST_CLIENT = False + +CONFIGURATION_TEMPLATE_RUNNABLE_TYPE = 'ConfigurationTemplate' +WORKFLOW_TEMPLATE_RUNNABLE_TYPE = 'WorkflowTemplate' +WORKFLOW_RUNNABLE_TYPE = 'Workflow' +PLUGIN_RUNNABLE_TYPE = 'Plugin' + +TEMPLATE_SUCCESS = 'Configuration template run completed.' +WORKFLOW_CREATION_SUCCESS = 'Workflow created.' +WORKFLOW_ACTION_SUCCESS = 'Workflow action run completed.' +PLUGIN_ACTION_SUCCESS = 'Plugin action run completed.' + +meta_args = dict( + vdirect_ip=dict(required=True, fallback=(env_fallback, ['VDIRECT_IP'])), + vdirect_user=dict(required=True, fallback=(env_fallback, ['VDIRECT_USER'])), + vdirect_password=dict( + required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']), + no_log=True, type='str'), + vdirect_secondary_ip=dict( + required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']), + default=None), + vdirect_use_ssl=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']), + default=True, type='bool'), + vdirect_wait=dict( + required=False, fallback=(env_fallback, ['VDIRECT_WAIT']), + default=True, type='bool'), + vdirect_timeout=dict( + required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']), + default=60, type='int'), + validate_certs=dict( + required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']), + default=True, type='bool', aliases=['vdirect_validate_certs']), + vdirect_https_port=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']), + default=2189, type='int'), + vdirect_http_port=dict( + required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']), + default=2188, type='int'), + runnable_type=dict( + required=True, + choices=[CONFIGURATION_TEMPLATE_RUNNABLE_TYPE, WORKFLOW_TEMPLATE_RUNNABLE_TYPE, WORKFLOW_RUNNABLE_TYPE, PLUGIN_RUNNABLE_TYPE]), + runnable_name=dict(required=True), + action_name=dict(required=False, default=None), + parameters=dict(required=False, type='dict', default={}) +) + + +class RunnableException(Exception): + def __init__(self, reason, details): + self.reason = reason + self.details = details + + def __str__(self): + return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details) + + +class WrongActionNameException(RunnableException): + def __init__(self, action, available_actions): + super(WrongActionNameException, self).__init__('Wrong action name ' + repr(action), + 'Available actions are: ' + repr(available_actions)) + + +class MissingActionParametersException(RunnableException): + def __init__(self, required_parameters): + super(MissingActionParametersException, self).__init__( + 'Action parameters missing', + 'Required parameters are: ' + repr(required_parameters)) + + +class MissingRunnableException(RunnableException): + def __init__(self, name): + super(MissingRunnableException, self).__init__( + 'Runnable missing', + 'Runnable ' + name + ' is missing') + + +class VdirectRunnable(object): + + CREATE_WORKFLOW_ACTION = 'createWorkflow' + RUN_ACTION = 'run' + + def __init__(self, params): + self.client = rest_client.RestClient(params['vdirect_ip'], + params['vdirect_user'], + params['vdirect_password'], + wait=params['vdirect_wait'], + secondary_vdirect_ip=params['vdirect_secondary_ip'], + https_port=params['vdirect_https_port'], + http_port=params['vdirect_http_port'], + timeout=params['vdirect_timeout'], + strict_http_results=True, + https=params['vdirect_use_ssl'], + verify=params['validate_certs']) + self.params = params + self.type = self.params['runnable_type'] + self.name = self.params['runnable_name'] + + if self.type == WORKFLOW_TEMPLATE_RUNNABLE_TYPE: + self.action_name = VdirectRunnable.CREATE_WORKFLOW_ACTION + elif self.type == CONFIGURATION_TEMPLATE_RUNNABLE_TYPE: + self.action_name = VdirectRunnable.RUN_ACTION + else: + self.action_name = self.params['action_name'] + + if 'parameters' in self.params and self.params['parameters']: + self.action_params = self.params['parameters'] + else: + self.action_params = {} + + def _validate_runnable_exists(self): + if self.type == WORKFLOW_RUNNABLE_TYPE: + res = self.client.runnable.get_runnable_objects(self.type) + runnable_names = res[rest_client.RESP_DATA]['names'] + if self.name not in runnable_names: + raise MissingRunnableException(self.name) + else: + try: + self.client.catalog.get_catalog_item(self.type, self.name) + except rest_client.RestClientException: + raise MissingRunnableException(self.name) + + def _validate_action_name(self): + if self.type in [WORKFLOW_RUNNABLE_TYPE, PLUGIN_RUNNABLE_TYPE]: + res = self.client.runnable.get_available_actions(self.type, self.name) + available_actions = res[rest_client.RESP_DATA]['names'] + if self.action_name not in available_actions: + raise WrongActionNameException(self.action_name, available_actions) + + def _validate_required_action_params(self): + action_params_names = [n for n in self.action_params] + + res = self.client.runnable.get_action_info(self.type, self.name, self.action_name) + if 'parameters' in res[rest_client.RESP_DATA]: + action_params_spec = res[rest_client.RESP_DATA]['parameters'] + else: + action_params_spec = [] + + required_action_params_dict = [{'name': p['name'], 'type': p['type']} for p in action_params_spec + if p['type'] == 'alteon' or + p['type'] == 'defensePro' or + p['type'] == 'appWall' or + p['type'] == 'alteon[]' or + p['type'] == 'defensePro[]' or + p['type'] == 'appWall[]' or + p['direction'] != 'out'] + required_action_params_names = [n['name'] for n in required_action_params_dict] + + if set(required_action_params_names) & set(action_params_names) != set(required_action_params_names): + raise MissingActionParametersException(required_action_params_dict) + + def run(self): + self._validate_runnable_exists() + self._validate_action_name() + self._validate_required_action_params() + + data = self.action_params + + result = self.client.runnable.run(data, self.type, self.name, self.action_name) + result_to_return = {'msg': ''} + if result[rest_client.RESP_STATUS] == 200: + if result[rest_client.RESP_DATA]['success']: + if self.type == WORKFLOW_TEMPLATE_RUNNABLE_TYPE: + result_to_return['msg'] = WORKFLOW_CREATION_SUCCESS + elif self.type == CONFIGURATION_TEMPLATE_RUNNABLE_TYPE: + result_to_return['msg'] = TEMPLATE_SUCCESS + elif self.type == PLUGIN_RUNNABLE_TYPE: + result_to_return['msg'] = PLUGIN_ACTION_SUCCESS + else: + result_to_return['msg'] = WORKFLOW_ACTION_SUCCESS + result_to_return['output'] = result[rest_client.RESP_DATA] + + else: + if 'exception' in result[rest_client.RESP_DATA]: + raise RunnableException(result[rest_client.RESP_DATA]['exception']['message'], + result[rest_client.RESP_STR]) + else: + raise RunnableException('The status returned ' + str(result[rest_client.RESP_DATA]['status']), + result[rest_client.RESP_STR]) + else: + raise RunnableException(result[rest_client.RESP_REASON], + result[rest_client.RESP_STR]) + + return result_to_return + + +def main(): + + module = AnsibleModule(argument_spec=meta_args, + required_if=[['runnable_type', WORKFLOW_RUNNABLE_TYPE, ['action_name']], + ['runnable_type', PLUGIN_RUNNABLE_TYPE, ['action_name']]]) + + if not HAS_REST_CLIENT: + module.fail_json(msg="The python vdirect-client module is required") + + try: + vdirect_runnable = VdirectRunnable(module.params) + result = vdirect_runnable.run() + result = dict(result=result) + module.exit_json(**result) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/routeros/routeros_command.py b/plugins/modules/network/routeros/routeros_command.py new file mode 100644 index 0000000000..0ac0edeeb0 --- /dev/null +++ b/plugins/modules/network/routeros/routeros_command.py @@ -0,0 +1,186 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: routeros_command +author: "Egor Zaitsev (@heuels)" +short_description: Run commands on remote devices running MikroTik RouterOS +description: + - Sends arbitrary commands to an RouterOS node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. +options: + commands: + description: + - List of commands to send to the remote RouterOS device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run command on remote devices + routeros_command: + commands: /system routerboard print + + - name: run command and check to see if output contains routeros + routeros_command: + commands: /system resource print + wait_for: result[0] contains MikroTik + + - name: run multiple commands on remote nodes + routeros_command: + commands: + - /system routerboard print + - /system identity print + + - name: run multiple commands and evaluate the output + routeros_command: + commands: + - /system routerboard print + - /interface ethernet print + wait_for: + - result[0] contains x86 + - result[1] contains ether1 +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" + +import re +import time + +from ansible_collections.community.general.plugins.module_utils.network.routeros.routeros import run_commands +from ansible_collections.community.general.plugins.module_utils.network.routeros.routeros import routeros_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(routeros_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, module.params['commands']) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/routeros/routeros_facts.py b/plugins/modules/network/routeros/routeros_facts.py new file mode 100644 index 0000000000..643cda2dd3 --- /dev/null +++ b/plugins/modules/network/routeros/routeros_facts.py @@ -0,0 +1,434 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: routeros_facts +author: "Egor Zaitsev (@heuels)" +short_description: Collect facts from remote devices running MikroTik RouterOS +description: + - Collects a base set of device facts from a remote device that + is running RotuerOS. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + C(all), C(hardware), C(config), and C(interfaces). Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(!) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' + +EXAMPLES = """ +# Collect all facts from the device +- routeros_facts: + gather_subset: all + +# Collect only the config and default facts +- routeros_facts: + gather_subset: + - config + +# Do not collect hardware facts +- routeros_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# hardware +ansible_net_spacefree_mb: + description: The available disk space on the remote device in MiB + returned: when hardware is configured + type: dict +ansible_net_spacetotal_mb: + description: The total disk space on the remote device in MiB + returned: when hardware is configured + type: dict +ansible_net_memfree_mb: + description: The available free memory on the remote device in MiB + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in MiB + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.routeros.routeros import run_commands +from ansible_collections.community.general.plugins.module_utils.network.routeros.routeros import routeros_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, commands=cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = [ + '/system identity print without-paging', + '/system resource print without-paging', + '/system routerboard print without-paging' + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['hostname'] = self.parse_hostname(data) + + data = self.responses[1] + if data: + self.facts['version'] = self.parse_version(data) + + data = self.responses[2] + if data: + self.facts['model'] = self.parse_model(data) + self.facts['serialnum'] = self.parse_serialnum(data) + + def parse_hostname(self, data): + match = re.search(r'name:\s(.*)\s*$', data, re.M) + if match: + return match.group(1) + + def parse_version(self, data): + match = re.search(r'version:\s(.*)\s*$', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'model:\s(.*)\s*$', data, re.M) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'serial-number:\s(.*)\s*$', data, re.M) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + COMMANDS = [ + '/system resource print without-paging' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + if data: + self.parse_filesystem_info(data) + self.parse_memory_info(data) + + def parse_filesystem_info(self, data): + match = re.search(r'free-hdd-space:\s(.*)([KMG]iB)', data, re.M) + if match: + self.facts['spacefree_mb'] = self.to_megabytes(match) + match = re.search(r'total-hdd-space:\s(.*)([KMG]iB)', data, re.M) + if match: + self.facts['spacetotal_mb'] = self.to_megabytes(match) + + def parse_memory_info(self, data): + match = re.search(r'free-memory:\s(\d+\.?\d*)([KMG]iB)', data, re.M) + if match: + self.facts['memfree_mb'] = self.to_megabytes(match) + match = re.search(r'total-memory:\s(\d+\.?\d*)([KMG]iB)', data, re.M) + if match: + self.facts['memtotal_mb'] = self.to_megabytes(match) + + def to_megabytes(self, data): + if data.group(2) == 'KiB': + return float(data.group(1)) / 1024 + elif data.group(2) == 'MiB': + return float(data.group(1)) + elif data.group(2) == 'GiB': + return float(data.group(1)) * 1024 + else: + return None + + +class Config(FactsBase): + + COMMANDS = ['/export'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = [ + '/interface print detail without-paging', + '/ip address print detail without-paging', + '/ipv6 address print detail without-paging', + '/ip neighbor print detail without-paging' + ] + + DETAIL_RE = re.compile(r'([\w\d\-]+)=\"?(\w{3}/\d{2}/\d{4}\s\d{2}:\d{2}:\d{2}|[\w\d\-\.:/]+)') + WRAPPED_LINE_RE = re.compile(r'^\s+(?!\d)') + + def populate(self): + super(Interfaces, self).populate() + + self.facts['interfaces'] = dict() + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + self.facts['neighbors'] = dict() + + data = self.responses[0] + if data: + interfaces = self.parse_interfaces(data) + self.populate_interfaces(interfaces) + + data = self.responses[1] + if data: + data = self.parse_addresses(data) + self.populate_ipv4_interfaces(data) + + data = self.responses[2] + if data: + data = self.parse_addresses(data) + self.populate_ipv6_interfaces(data) + + data = self.responses[3] + if data: + self.facts['neighbors'] = self.parse_neighbors(data) + + def populate_interfaces(self, data): + for key, value in iteritems(data): + self.facts['interfaces'][key] = value + + def populate_ipv4_interfaces(self, data): + for key, value in iteritems(data): + if 'ipv4' not in self.facts['interfaces'][key]: + self.facts['interfaces'][key]['ipv4'] = list() + addr, subnet = value['address'].split("/") + ipv4 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv4') + self.facts['interfaces'][key]['ipv4'].append(ipv4) + + def populate_ipv6_interfaces(self, data): + for key, value in iteritems(data): + if 'ipv6' not in self.facts['interfaces'][key]: + self.facts['interfaces'][key]['ipv6'] = list() + addr, subnet = value['address'].split("/") + ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def preprocess(self, data): + preprocessed = list() + for line in data.split('\n'): + if len(line) == 0 or line[:5] == 'Flags': + continue + elif not re.match(self.WRAPPED_LINE_RE, line): + preprocessed.append(line) + else: + preprocessed[-1] += line + return preprocessed + + def parse_interfaces(self, data): + facts = dict() + data = self.preprocess(data) + for line in data: + name = self.parse_name(line) + facts[name] = dict() + for (key, value) in re.findall(self.DETAIL_RE, line): + facts[name][key] = value + return facts + + def parse_addresses(self, data): + facts = dict() + data = self.preprocess(data) + for line in data: + name = self.parse_interface(line) + facts[name] = dict() + for (key, value) in re.findall(self.DETAIL_RE, line): + facts[name][key] = value + return facts + + def parse_neighbors(self, data): + facts = dict() + data = self.preprocess(data) + for line in data: + name = self.parse_interface(line) + facts[name] = dict() + for (key, value) in re.findall(self.DETAIL_RE, line): + facts[name][key] = value + return facts + + def parse_name(self, data): + match = re.search(r'name=\"([\w\d\-]+)\"', data, re.M) + if match: + return match.group(1) + + def parse_interface(self, data): + match = re.search(r'interface=([\w\d\-]+)', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +warnings = list() + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + argument_spec.update(routeros_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset: %s' % subset) + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_command.py b/plugins/modules/network/slxos/slxos_command.py new file mode 100644 index 0000000000..e8a88ef7e3 --- /dev/null +++ b/plugins/modules/network/slxos/slxos_command.py @@ -0,0 +1,224 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Extreme Networks Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: slxos_command +author: "Lindsay Hill (@LindsayHill)" +short_description: Run commands on remote devices running Extreme Networks SLX-OS +description: + - Sends arbitrary commands to an SLX node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(slxos_config) to configure SLX-OS devices. +notes: + - Tested against SLX-OS 17s.1.02 + - If a command sent to the device requires answering a prompt, it is possible + to pass a dict containing I(command), I(answer) and I(prompt). See examples. +options: + commands: + description: + - List of commands to send to the remote SLX-OS device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + slxos_command: + commands: show version + + - name: run show version and check to see if output contains SLX + slxos_command: + commands: show version + wait_for: result[0] contains SLX + + - name: run multiple commands on remote nodes + slxos_command: + commands: + - show version + - show interfaces + + - name: run multiple commands and evaluate the output + slxos_command: + commands: + - show version + - show interface status + wait_for: + - result[0] contains SLX + - result[1] contains Eth + - name: run command that requires answering a prompt + slxos_command: + commands: + - command: 'clear sessions' + prompt: 'This operation will logout all the user sessions. Do you want to continue (yes/no)?:' + answer: y +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import re +import time + +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +__metaclass__ = type + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for item in list(commands): + configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command']) + if module.check_mode: + if configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'): + module.fail_json( + msg='slxos_command does not support running config mode ' + 'commands. Please use slxos_config instead' + ) + if not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + commands.remove(item) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_config.py b/plugins/modules/network/slxos/slxos_config.py new file mode 100644 index 0000000000..acd887e3de --- /dev/null +++ b/plugins/modules/network/slxos/slxos_config.py @@ -0,0 +1,465 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Extreme Networks Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: slxos_config +author: "Lindsay Hill (@LindsayHill)" +short_description: Manage Extreme Networks SLX-OS configuration sections +description: + - Extreme SLX-OS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with SLX-OS configuration sections in + a deterministic way. +notes: + - Tested against SLX-OS 17s.1.02 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + multiline_delimiter: + description: + - This argument is used when pushing a multiline configuration + element to the SLX-OS device. It specifies the character to use + as the delimiting character. This only applies to the + configuration action. + default: "@" + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(running_config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + aliases: ['config'] + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config all). + type: bool + default: 'no' + save_when: + description: + - When changes are made to the device running-configuration, the + changes are not copied to non-volatile storage by default. Using + this argument will change that before. If the argument is set to + I(always), then the running-config will always be copied to the + startup-config and the I(modified) flag will always be set to + True. If the argument is set to I(modified), then the running-config + will only be copied to the startup-config if it has changed since + the last save to startup-config. If the argument is set to + I(never), the running-config will never be copied to the + startup-config. If the argument is set to I(changed), then the running-config + will only be copied to the startup-config if the task has made a change. + default: never + choices: ['always', 'never', 'modified', 'changed'] + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument + the module can generate diffs against different sources. + - When this option is configure as I(startup), the module will return + the diff of the running-config against the startup-config. + - When this option is configured as I(intended), the module will + return the diff of the running-config against the configuration + provided in the C(intended_config) argument. + - When this option is configured as I(running), the module will + return the before and after diff of the running-config with respect + to any changes made to the device configuration. + choices: ['running', 'startup', 'intended'] + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + intended_config: + description: + - The C(intended_config) provides the master configuration that + the node should conform to and is used to check the final + running-config against. This argument will not modify any settings + on the remote device and is strictly used to check the compliance + of the current device's configuration against. When specifying this + argument, the task should also modify the C(diff_against) value and + set it to I(intended). + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure top level configuration + slxos_config: + lines: hostname {{ inventory_hostname }} + +- name: configure interface settings + slxos_config: + lines: + - description test interface + - ip address 172.31.1.1/24 + parents: interface Ethernet 0/1 + +- name: configure multiple interfaces + slxos_config: + lines: + - lacp timeout long + parents: "{{ item }}" + with_items: + - interface Ethernet 0/1 + - interface Ethernet 0/2 + +- name: load new acl into device + slxos_config: + lines: + - seq 10 permit ip host 1.1.1.1 any log + - seq 20 permit ip host 2.2.2.2 any log + - seq 30 permit ip host 3.3.3.3 any log + - seq 40 permit ip host 4.4.4.4 any log + - seq 50 permit ip host 5.5.5.5 any log + parents: ip access-list extended test + before: no ip access-list extended test + match: exact + +- name: check the running-config against master config + slxos_config: + diff_against: intended + intended_config: "{{ lookup('file', 'master.cfg') }}" + +- name: check the startup-config against the running-config + slxos_config: + diff_against: startup + diff_ignore_lines: + - ntp clock .* + +- name: save running to startup when modified + slxos_config: + save_when: modified + +- name: configurable backup path + slxos_config: + lines: hostname {{ inventory_hostname }} + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['switch-attributes hostname foo', 'router ospf', 'area 0'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['switch-attributes hostname foo', 'router ospf', 'area 0'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/slxos_config.2018-02-12@18:26:34 +""" + +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import run_commands, get_config, load_config +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + +__metaclass__ = type + + +def check_args(module, warnings): + if module.params['multiline_delimiter']: + if len(module.params['multiline_delimiter']) != 1: + module.fail_json(msg='multiline_delimiter value can only be a ' + 'single character') + + +def get_running_config(module, current_config=None): + contents = module.params['running_config'] + if not contents: + if current_config: + contents = current_config.config_text + else: + contents = get_config(module) + return NetworkConfig(indent=1, contents=contents) + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + + if module.params['src']: + src = module.params['src'] + candidate.load(src) + + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + + return candidate + + +def save_config(module, result): + result['changed'] = True + if not module.check_mode: + command = {"command": "copy running-config startup-config", + "prompt": "This operation will modify your startup configuration. Do you want to continue", "answer": "y"} + run_commands(module, command) + else: + module.warn('Skipping command `copy running-config startup-config` ' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + multiline_delimiter=dict(default='@'), + + running_config=dict(aliases=['config']), + intended_config=dict(), + + defaults=dict(type='bool', default=False), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + + save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'), + + diff_against=dict(choices=['startup', 'intended', 'running']), + diff_ignore_lines=dict(type='list'), + ) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('diff_against', 'intended', ['intended_config'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + result['warnings'] = warnings + + config = None + + if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'): + contents = get_config(module) + config = NetworkConfig(indent=1, contents=contents) + if module.params['backup']: + result['__backup__'] = contents + + if any((module.params['lines'], module.params['src'])): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate = get_candidate(module) + + if match != 'none': + config = get_running_config(module, config) + path = module.params['parents'] + configobjs = candidate.difference(config, path=path, match=match, replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + load_config(module, commands) + + result['changed'] = True + + running_config = None + startup_config = None + + diff_ignore_lines = module.params['diff_ignore_lines'] + + if module.params['save_when'] == 'always': + save_config(module, result) + elif module.params['save_when'] == 'modified': + output = run_commands(module, ['show running-config', 'show startup-config']) + + running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines) + startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines) + + if running_config.sha1 != startup_config.sha1: + save_config(module, result) + elif module.params['save_when'] == 'changed' and result['changed']: + save_config(module, result) + + if module._diff: + if not running_config: + output = run_commands(module, 'show running-config') + contents = output[0] + else: + contents = running_config.config_text + + # recreate the object in order to process diff_ignore_lines + running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if module.params['diff_against'] == 'running': + if module.check_mode: + module.warn("unable to perform diff against running-config due to check mode") + contents = None + else: + contents = config.config_text + + elif module.params['diff_against'] == 'startup': + if not startup_config: + output = run_commands(module, 'show startup-config') + contents = output[0] + else: + contents = startup_config.config_text + + elif module.params['diff_against'] == 'intended': + contents = module.params['intended_config'] + + if contents is not None: + base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines) + + if running_config.sha1 != base_config.sha1: + if module.params['diff_against'] == 'intended': + before = running_config + after = base_config + elif module.params['diff_against'] in ('startup', 'running'): + before = base_config + after = running_config + + result.update({ + 'changed': True, + 'diff': {'before': str(before), 'after': str(after)} + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_facts.py b/plugins/modules/network/slxos/slxos_facts.py new file mode 100644 index 0000000000..7408965ada --- /dev/null +++ b/plugins/modules/network/slxos/slxos_facts.py @@ -0,0 +1,456 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: slxos_facts +author: "Lindsay Hill (@LindsayHill)" +short_description: Collect facts from devices running Extreme SLX-OS +description: + - Collects a base set of device facts from a remote device that + is running SLX-OS. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +notes: + - Tested against SLX-OS 17s.1.02 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: ['!config'] +''' + +EXAMPLES = """ +# Collect all facts from the device +- slxos_facts: + gather_subset: all + +# Collect only the config and default facts +- slxos_facts: + gather_subset: + - config + +# Do not collect hardware facts +- slxos_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# hardware +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All Primary IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS) + + def run(self, cmd): + return run_commands(self.module, cmd) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version', + 'show inventory chassis', + r'show running-config | include host\-name' + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = self.parse_version(data) + + data = self.responses[1] + if data: + self.facts['model'] = self.parse_model(data) + self.facts['serialnum'] = self.parse_serialnum(data) + + data = self.responses[2] + if data: + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'SLX-OS Operating System Version: (\S+)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'SID:(\S+)', data, re.M) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'switch-attributes host-name (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'SN:(\S+)', data, re.M) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show process memory summary' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + if data: + self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0)) + self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0)) + + def parse_memtotal(self, data): + match = re.search(r'Total\s*Memory: (\d+)\s', data, re.M) + if match: + return match.group(1) + + def parse_memfree(self, data): + match = re.search(r'Total Free: (\d+)\s', data, re.M) + if match: + return match.group(1) + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show interface', + 'show ipv6 interface brief', + r'show lldp nei detail | inc ^Local\ Interface|^Remote\ Interface|^System\ Name' + ] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.responses[0] + if data: + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces(interfaces) + self.populate_ipv4_interfaces(interfaces) + + data = self.responses[1] + if data: + self.populate_ipv6_interfaces(data) + + data = self.responses[2] + if data: + self.facts['neighbors'] = self.parse_neighbors(data) + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv4_interfaces(self, data): + for key, value in data.items(): + self.facts['interfaces'][key]['ipv4'] = list() + primary_address = addresses = [] + primary_address = re.findall(r'Primary Internet Address is (\S+)', value, re.M) + addresses = re.findall(r'Secondary Internet Address is (\S+)', value, re.M) + if not primary_address: + continue + addresses.append(primary_address[0]) + for address in addresses: + addr, subnet = address.split("/") + ipv4 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv4') + self.facts['interfaces'][key]['ipv4'].append(ipv4) + + # Only gets primary IPv6 addresses + def populate_ipv6_interfaces(self, data): + interfaces = re.split('=+', data)[1].strip() + matches = re.findall(r'(\S+ \S+) +[\w-]+.+\s+([\w:/]+/\d+)', interfaces, re.M) + for match in matches: + interface = match[0] + self.facts['interfaces'][interface]['ipv6'] = list() + address, masklen = match[1].split('/') + ipv6 = dict(address=address, masklen=int(masklen)) + self.add_ip_address(ipv6['address'], 'ipv6') + self.facts['interfaces'][interface]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + lines = neighbors.split('Local Interface: ') + if not lines: + return facts + for line in lines: + match = re.search(r'(\w+ \S+)\s+\(Local Int.+?\)[\s\S]+Remote Interface: (\S+.+?) \(Remote Int.+?\)[\s\S]+System Name: (\S+)', line, re.M) + if match: + intf = match.group(1) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = match.group(3) + fact['port'] = match.group(2) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + for interface in data.split('\n\n'): + match = re.match(r'^(\S+ \S+)', interface, re.M) + if not match: + continue + else: + parsed[match.group(1)] = interface + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'Hardware is Ethernet, address is (\S+)', data) + if match: + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Primary Internet Address is ([^\s,]+)', data) + if match: + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+) bytes', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'LineSpeed Actual\s+:\s(.+)', data) + if match: + return match.group(1) + + def parse_duplex(self, data): + match = re.search(r'Duplex: (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (\S+)', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.match(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=["!config"], type='list') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_interface.py b/plugins/modules/network/slxos/slxos_interface.py new file mode 100644 index 0000000000..59cb2b4fb8 --- /dev/null +++ b/plugins/modules/network/slxos/slxos_interface.py @@ -0,0 +1,469 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: slxos_interface +author: "Lindsay Hill (@LindsayHill)" +short_description: Manage Interfaces on Extreme SLX-OS network devices +description: + - This module provides declarative management of Interfaces + on Extreme SLX-OS network devices. +notes: + - Tested against SLX-OS 17s.1.02 +options: + name: + description: + - Name of the Interface. + required: true + description: + description: + - Description of Interface. + enabled: + description: + - Interface link status. + default: True + type: bool + speed: + description: + - Interface link speed. + mtu: + description: + - Maximum size of transmit packet. + tx_rate: + description: + - Transmit rate in bits per second (bps). + rx_rate: + description: + - Receiver rate in bits per second (bps). + neighbors: + description: + - Check the operational state of given interface C(name) for LLDP neighbor. + - The following suboptions are available. + suboptions: + host: + description: + - "LLDP neighbor host for given interface C(name)." + port: + description: + - "LLDP neighbor port to which given interface C(name) is connected." + aggregate: + description: List of Interfaces definitions. + delay: + description: + - Time in seconds to wait before checking for the operational state on remote + device. This wait is applicable for operational state argument which are + I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate). + default: 10 + state: + description: + - State of the Interface configuration, C(up) means present and + operationally up and C(down) means present and operationally C(down) + default: present + choices: ['present', 'absent', 'up', 'down'] +''' + +EXAMPLES = """ +- name: configure interface + slxos_interface: + name: Ethernet 0/2 + description: test-interface + speed: 1000 + mtu: 9216 + +- name: remove interface + slxos_interface: + name: Loopback 9 + state: absent + +- name: make interface up + slxos_interface: + name: Ethernet 0/2 + enabled: True + +- name: make interface down + slxos_interface: + name: Ethernet 0/2 + enabled: False + +- name: Check intent arguments + slxos_interface: + name: Ethernet 0/2 + state: up + tx_rate: ge(0) + rx_rate: le(0) + +- name: Check neighbors intent arguments + slxos_interface: + name: Ethernet 0/41 + neighbors: + - port: Ethernet 0/41 + host: SLX + +- name: Config + intent + slxos_interface: + name: Ethernet 0/2 + enabled: False + state: down + +- name: Add interface using aggregate + slxos_interface: + aggregate: + - { name: Ethernet 0/1, mtu: 1548, description: test-interface-1 } + - { name: Ethernet 0/2, mtu: 1548, description: test-interface-2 } + speed: 10000 + state: present + +- name: Delete interface using aggregate + slxos_interface: + aggregate: + - name: Loopback 9 + - name: Loopback 10 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device. + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - interface Ethernet 0/2 + - description test-interface + - mtu 1548 +""" +import re + +from copy import deepcopy +from time import sleep + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import exec_command +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import get_config, load_config +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import conditional, remove_default_spec + + +def validate_mtu(value, module): + if value and not 1548 <= int(value) <= 9216: + module.fail_json(msg='mtu must be between 1548 and 9216') + + +def validate_param_values(module, obj, param=None): + if param is None: + param = module.params + for key in obj: + # validate the param value (if validator func exists) + validator = globals().get('validate_%s' % key) + if callable(validator): + validator(param.get(key), module) + + +def parse_shutdown(configobj, name): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'^shutdown', cfg, re.M) + if match: + return True + else: + return False + + +def parse_config_argument(configobj, name, arg=None): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'%s (.+)$' % arg, cfg, re.M) + if match: + return match.group(1) + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'] == name: + return o + + return None + + +def add_command_to_interface(interface, cmd, commands): + if interface not in commands: + commands.append(interface) + commands.append(cmd) + + +def map_config_to_obj(module): + config = get_config(module) + configobj = NetworkConfig(indent=1, contents=config) + + match = re.findall(r'^interface (\S+ \S+)', config, re.M) + if not match: + return list() + + instances = list() + + for item in set(match): + obj = { + 'name': item, + 'description': parse_config_argument(configobj, item, 'description'), + 'speed': parse_config_argument(configobj, item, 'speed'), + 'mtu': parse_config_argument(configobj, item, 'mtu'), + 'disable': True if parse_shutdown(configobj, item) else False, + 'state': 'present' + } + instances.append(obj) + return instances + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + validate_param_values(module, item, item) + d = item.copy() + + if d['enabled']: + d['disable'] = False + else: + d['disable'] = True + + obj.append(d) + + else: + params = { + 'name': module.params['name'], + 'description': module.params['description'], + 'speed': module.params['speed'], + 'mtu': module.params['mtu'], + 'state': module.params['state'], + 'delay': module.params['delay'], + 'tx_rate': module.params['tx_rate'], + 'rx_rate': module.params['rx_rate'], + 'neighbors': module.params['neighbors'] + } + + validate_param_values(module, params) + if module.params['enabled']: + params.update({'disable': False}) + else: + params.update({'disable': True}) + + obj.append(params) + return obj + + +def map_obj_to_commands(updates): + commands = list() + want, have = updates + args = ('speed', 'description', 'mtu') + for w in want: + name = w['name'] + disable = w['disable'] + state = w['state'] + + obj_in_have = search_obj_in_list(name, have) + interface = 'interface ' + name + + if state == 'absent' and obj_in_have: + commands.append('no ' + interface) + + elif state in ('present', 'up', 'down'): + if obj_in_have: + for item in args: + candidate = w.get(item) + running = obj_in_have.get(item) + if candidate != running: + if candidate: + cmd = item + ' ' + str(candidate) + add_command_to_interface(interface, cmd, commands) + + if disable and not obj_in_have.get('disable', False): + add_command_to_interface(interface, 'shutdown', commands) + elif not disable and obj_in_have.get('disable', False): + add_command_to_interface(interface, 'no shutdown', commands) + else: + commands.append(interface) + for item in args: + value = w.get(item) + if value: + commands.append(item + ' ' + str(value)) + + if disable: + commands.append('no shutdown') + return commands + + +def check_declarative_intent_params(module, want, result): + failed_conditions = [] + have_neighbors = None + for w in want: + want_state = w.get('state') + want_tx_rate = w.get('tx_rate') + want_rx_rate = w.get('rx_rate') + want_neighbors = w.get('neighbors') + + if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate and not want_neighbors: + continue + + if result['changed']: + sleep(w['delay']) + + command = 'show interface %s' % w['name'] + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc) + + if want_state in ('up', 'down'): + match = re.search(r'%s (\w+)' % 'line protocol is', out, re.M) + have_state = None + if match: + have_state = match.group(1) + if have_state is None or not conditional(want_state, have_state.strip()): + failed_conditions.append('state ' + 'eq(%s)' % want_state) + + if want_tx_rate: + match = re.search(r'%s (\d+)' % 'Output', out, re.M) + have_tx_rate = None + if match: + have_tx_rate = match.group(1) + + if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int): + failed_conditions.append('tx_rate ' + want_tx_rate) + + if want_rx_rate: + match = re.search(r'%s (\d+)' % 'Input', out, re.M) + have_rx_rate = None + if match: + have_rx_rate = match.group(1) + + if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int): + failed_conditions.append('rx_rate ' + want_rx_rate) + + if want_neighbors: + have_host = [] + have_port = [] + if have_neighbors is None: + rc, have_neighbors, err = exec_command(module, 'show lldp neighbors detail') + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc) + + if have_neighbors: + lines = have_neighbors.strip().split('Local Interface: ') + short_name = w['name'].replace('Ethernet', 'Eth') + for line in lines: + field = line.split('\n') + if field[0].split('(')[0].strip() == short_name: + for item in field: + if item.startswith('System Name:'): + have_host.append(item.split(':')[1].strip()) + if item.startswith('Remote Interface:'): + have_port.append(item.split(':')[1].split('(')[0].strip()) + for item in want_neighbors: + host = item.get('host') + port = item.get('port') + if host and host not in have_host: + failed_conditions.append('host ' + host) + if port and port not in have_port: + failed_conditions.append('port ' + port) + return failed_conditions + + +def main(): + """ main entry point for module execution + """ + neighbors_spec = dict( + host=dict(), + port=dict() + ) + + element_spec = dict( + name=dict(), + description=dict(), + speed=dict(), + mtu=dict(), + enabled=dict(default=True, type='bool'), + tx_rate=dict(), + rx_rate=dict(), + neighbors=dict(type='list', elements='dict', options=neighbors_spec), + delay=dict(default=10, type='int'), + state=dict(default='present', + choices=['present', 'absent', 'up', 'down']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + warnings = list() + + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have)) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + failed_conditions = check_declarative_intent_params(module, want, result) + + if failed_conditions: + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions, changed=result['changed']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_l2_interface.py b/plugins/modules/network/slxos/slxos_l2_interface.py new file mode 100644 index 0000000000..633d5da412 --- /dev/null +++ b/plugins/modules/network/slxos/slxos_l2_interface.py @@ -0,0 +1,505 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: slxos_l2_interface +short_description: Manage Layer-2 interface on Extreme Networks SLX-OS devices. +description: + - This module provides declarative management of Layer-2 interface on + Extreme slxos devices. +author: + - Matthew Stone (@bigmstone) +options: + name: + description: + - Full name of the interface excluding any logical + unit number, i.e. Ethernet 0/1. + required: true + aliases: ['interface'] + mode: + description: + - Mode in which interface needs to be configured. + default: access + choices: ['access', 'trunk'] + access_vlan: + description: + - Configure given VLAN in access port. + If C(mode=access), used as the access VLAN ID. + trunk_vlans: + description: + - List of VLANs to be configured in trunk port. + If C(mode=trunk), used as the VLAN range to ADD or REMOVE + from the trunk. + native_vlan: + description: + - Native VLAN to be configured in trunk port. + If C(mode=trunk), used as the trunk native VLAN ID. + trunk_allowed_vlans: + description: + - List of allowed VLANs in a given trunk port. + If C(mode=trunk), these are the only VLANs that will be + configured on the trunk, i.e. "2-10,15". + aggregate: + description: + - List of Layer-2 interface definitions. + state: + description: + - Manage the state of the Layer-2 Interface configuration. + default: present + choices: ['present','absent', 'unconfigured'] +''' + +EXAMPLES = """ +- name: Ensure Ethernet 0/5 is in its default l2 interface state + slxos_l2_interface: + name: Ethernet 0/5 + state: unconfigured + +- name: Ensure Ethernet 0/5 is configured for access vlan 20 + slxos_l2_interface: + name: Ethernet 0/5 + mode: access + access_vlan: 20 + +- name: Ensure Ethernet 0/5 only has vlans 5-10 as trunk vlans + slxos_l2_interface: + name: Ethernet 0/5 + mode: trunk + native_vlan: 10 + trunk_vlans: 5-10 + +- name: Ensure Ethernet 0/5 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged) + slxos_l2_interface: + name: Ethernet 0/5 + mode: trunk + native_vlan: 10 + trunk_vlans: 2-50 + +- name: Ensure these VLANs are not being tagged on the trunk + slxos_l2_interface: + name: Ethernet 0/5 + mode: trunk + trunk_vlans: 51-4094 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - interface Ethernet 0/5 + - switchport access vlan 20 +""" + +import re +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import get_config, load_config, run_commands + + +def get_interface_type(interface): + intf_type = 'unknown' + if interface.upper()[:2] in ('ET', 'GI'): + intf_type = 'ethernet' + elif interface.upper().startswith('VL'): + intf_type = 'svi' + elif interface.upper().startswith('LO'): + intf_type = 'loopback' + elif interface.upper()[:2] in ('MG', 'MA'): + intf_type = 'management' + elif interface.upper().startswith('PO'): + intf_type = 'portchannel' + elif interface.upper().startswith('NV'): + intf_type = 'nve' + + return intf_type + + +def is_switchport(name, module): + intf_type = get_interface_type(name) + + if intf_type in ('ethernet', 'portchannel'): + config = run_commands(module, ['show interface {0} switchport'.format(name)])[0] + match = re.search(r'Interface name\s+:\s', config) + return bool(match) + return False + + +def interface_is_portchannel(name, module): + if get_interface_type(name) == 'ethernet': + config = get_config(module) + if 'channel group' in config: + return True + + return False + + +def get_switchport(name, module): + config = run_commands(module, ['show interface {0} switchport'.format(name)])[0] + mode = re.search(r'Switchport mode\s+:\s(?:.* )?(\w+)$', config, re.M) + if mode: + mode = mode.group(1) + access = re.search(r'Default Vlan\s+:\s(\d+)', config) + if access: + access = access.group(1) + native = re.search(r'Native Vlan\s+:\s(\d+)', config) + if native: + native = native.group(1) + trunk = re.search(r'Active Vlans\s+:\s(.+)$', config, re.M) + if trunk: + trunk = trunk.group(1) + if trunk == 'ALL': + trunk = '1-4094' + + switchport_config = { + "interface": name, + "mode": mode, + "access_vlan": access, + "native_vlan": native, + "trunk_vlans": trunk, + } + + return switchport_config + + +def remove_switchport_config_commands(name, existing, proposed, module): + mode = proposed.get('mode') + commands = [] + command = None + + if mode == 'access': + av_check = existing.get('access_vlan') == proposed.get('access_vlan') + if av_check: + command = 'no switchport access vlan {0}'.format(existing.get('access_vlan')) + commands.append(command) + + elif mode == 'trunk': + tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list') + + if not tv_check: + existing_vlans = existing.get('trunk_vlans_list') + proposed_vlans = proposed.get('trunk_vlans_list') + vlans_to_remove = set(proposed_vlans).intersection(existing_vlans) + + if vlans_to_remove: + proposed_allowed_vlans = proposed.get('trunk_allowed_vlans') + remove_trunk_allowed_vlans = proposed.get('trunk_vlans', proposed_allowed_vlans) + command = 'switchport trunk allowed vlan remove {0}'.format(remove_trunk_allowed_vlans) + commands.append(command) + + native_check = existing.get('native_vlan') == proposed.get('native_vlan') + if native_check and proposed.get('native_vlan'): + command = 'no switchport trunk native vlan {0}'.format(existing.get('native_vlan')) + commands.append(command) + + if commands: + commands.insert(0, 'interface ' + name) + return commands + + +def get_switchport_config_commands(name, existing, proposed, module): + """Gets commands required to config a given switchport interface + """ + + proposed_mode = proposed.get('mode') + existing_mode = existing.get('mode') + commands = [] + command = None + + if proposed_mode != existing_mode: + if proposed_mode == 'trunk': + command = 'switchport mode trunk' + elif proposed_mode == 'access': + command = 'switchport mode access' + + if command: + commands.append(command) + + if proposed_mode == 'access': + av_check = str(existing.get('access_vlan')) == str(proposed.get('access_vlan')) + if not av_check: + command = 'switchport access vlan {0}'.format(proposed.get('access_vlan')) + commands.append(command) + + elif proposed_mode == 'trunk': + tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list') + + if not tv_check: + if proposed.get('allowed'): + command = 'switchport trunk allowed vlan add {0}'.format(proposed.get('trunk_allowed_vlans')) + commands.append(command) + + else: + existing_vlans = existing.get('trunk_vlans_list') + proposed_vlans = proposed.get('trunk_vlans_list') + vlans_to_add = set(proposed_vlans).difference(existing_vlans) + if vlans_to_add: + command = 'switchport trunk allowed vlan add {0}'.format(proposed.get('trunk_vlans')) + commands.append(command) + + native_check = str(existing.get('native_vlan')) == str(proposed.get('native_vlan')) + if not native_check and proposed.get('native_vlan'): + command = 'switchport trunk native vlan {0}'.format(proposed.get('native_vlan')) + commands.append(command) + + if commands: + commands.insert(0, 'interface ' + name) + return commands + + +def is_switchport_default(existing): + """Determines if switchport has a default config based on mode + Args: + existing (dict): existing switchport configuration from Ansible mod + Returns: + boolean: True if switchport has OOB Layer 2 config, i.e. + vlan 1 and trunk all and mode is access + """ + + c1 = str(existing['access_vlan']) == '1' + c2 = str(existing['native_vlan']) == '1' + c3 = existing['trunk_vlans'] == '1-4094' + c4 = existing['mode'] == 'access' + + default = c1 and c2 and c3 and c4 + + return default + + +def default_switchport_config(name): + commands = [] + commands.append('interface ' + name) + commands.append('switchport mode access') + commands.append('switch access vlan 1') + commands.append('switchport trunk native vlan 1') + commands.append('switchport trunk allowed vlan all') + return commands + + +def vlan_range_to_list(vlans): + result = [] + if vlans: + for part in vlans.split(','): + if part == 'none': + break + if '-' in part: + start, stop = (int(i) for i in part.split('-')) + result.extend(range(start, stop + 1)) + else: + result.append(int(part)) + return sorted(result) + + +def get_list_of_vlans(module): + config = run_commands(module, ['show vlan brief'])[0] + vlans = set() + + lines = config.strip().splitlines() + for line in lines: + line_parts = line.split() + if line_parts: + try: + int(line_parts[0]) + except ValueError: + continue + vlans.add(line_parts[0]) + + return list(vlans) + + +def flatten_list(commands): + flat_list = [] + for command in commands: + if isinstance(command, list): + flat_list.extend(command) + else: + flat_list.append(command) + return flat_list + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + obj.append(item.copy()) + else: + obj.append({ + 'name': module.params['name'], + 'mode': module.params['mode'], + 'access_vlan': module.params['access_vlan'], + 'native_vlan': module.params['native_vlan'], + 'trunk_vlans': module.params['trunk_vlans'], + 'trunk_allowed_vlans': module.params['trunk_allowed_vlans'], + 'state': module.params['state'] + }) + + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + name=dict(type='str', aliases=['interface']), + mode=dict(choices=['access', 'trunk'], default='access'), + access_vlan=dict(type='str'), + native_vlan=dict(type='str'), + trunk_vlans=dict(type='str'), + trunk_allowed_vlans=dict(type='str'), + state=dict(choices=['absent', 'present', 'unconfigured'], default='present') + ) + + aggregate_spec = deepcopy(element_spec) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['access_vlan', 'trunk_vlans'], + ['access_vlan', 'native_vlan'], + ['access_vlan', 'trunk_allowed_vlans']], + supports_check_mode=True) + + warnings = list() + commands = [] + result = {'changed': False, 'warnings': warnings} + + want = map_params_to_obj(module) + for w in want: + name = w['name'] + mode = w['mode'] + access_vlan = w['access_vlan'] + state = w['state'] + trunk_vlans = w['trunk_vlans'] + native_vlan = w['native_vlan'] + trunk_allowed_vlans = w['trunk_allowed_vlans'] + + args = dict(name=name, mode=mode, access_vlan=access_vlan, + native_vlan=native_vlan, trunk_vlans=trunk_vlans, + trunk_allowed_vlans=trunk_allowed_vlans) + + proposed = dict((k, v) for k, v in args.items() if v is not None) + + name = name.lower() + + if mode == 'access' and state == 'present' and not access_vlan: + module.fail_json(msg='access_vlan param is required when mode=access && state=present') + + if mode == 'trunk' and access_vlan: + module.fail_json(msg='access_vlan param not supported when using mode=trunk') + + if not is_switchport(name, module): + module.fail_json(msg='Ensure interface is configured to be a L2' + '\nport first before using this module. You can use' + '\nthe slxos_interface module for this.') + + if interface_is_portchannel(name, module): + module.fail_json(msg='Cannot change L2 config on physical ' + '\nport because it is in a portchannel. ' + '\nYou should update the portchannel config.') + + # existing will never be null for Eth intfs as there is always a default + existing = get_switchport(name, module) + + # Safeguard check + # If there isn't an existing, something is wrong per previous comment + if not existing: + module.fail_json(msg='Make sure you are using the FULL interface name') + + if trunk_vlans or trunk_allowed_vlans: + if trunk_vlans: + trunk_vlans_list = vlan_range_to_list(trunk_vlans) + elif trunk_allowed_vlans: + trunk_vlans_list = vlan_range_to_list(trunk_allowed_vlans) + proposed['allowed'] = True + + existing_trunks_list = vlan_range_to_list((existing['trunk_vlans'])) + + existing['trunk_vlans_list'] = existing_trunks_list + proposed['trunk_vlans_list'] = trunk_vlans_list + + current_vlans = get_list_of_vlans(module) + + if state == 'present': + if access_vlan and access_vlan not in current_vlans: + module.fail_json(msg='You are trying to configure a VLAN' + ' on an interface that\ndoes not exist on the ' + ' switch yet!', vlan=access_vlan) + elif native_vlan and native_vlan not in current_vlans: + module.fail_json(msg='You are trying to configure a VLAN' + ' on an interface that\ndoes not exist on the ' + ' switch yet!', vlan=native_vlan) + else: + command = get_switchport_config_commands(name, existing, proposed, module) + commands.append(command) + elif state == 'unconfigured': + is_default = is_switchport_default(existing) + if not is_default: + command = default_switchport_config(name) + commands.append(command) + elif state == 'absent': + command = remove_switchport_config_commands(name, existing, proposed, module) + commands.append(command) + + if trunk_vlans or trunk_allowed_vlans: + existing.pop('trunk_vlans_list') + proposed.pop('trunk_vlans_list') + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + result['changed'] = True + load_config(module, cmds) + if 'configure' in cmds: + cmds.pop(0) + + result['commands'] = cmds + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_l3_interface.py b/plugins/modules/network/slxos/slxos_l3_interface.py new file mode 100644 index 0000000000..2b901ad7af --- /dev/null +++ b/plugins/modules/network/slxos/slxos_l3_interface.py @@ -0,0 +1,312 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: slxos_l3_interface +author: "Matthew Stone (@bigmstone)" +short_description: Manage L3 interfaces on Extreme Networks SLX-OS network devices. +description: + - This module provides declarative management of L3 interfaces + on slxos network devices. +notes: + - Tested against slxos 15.2 +options: + name: + description: + - Name of the L3 interface to be configured eg. Ethernet 0/2 + ipv4: + description: + - IPv4 address to be set for the L3 interface mentioned in I(name) option. + The address format is /, the mask is number + in range 0-32 eg. 192.168.0.1/24 + ipv6: + description: + - IPv6 address to be set for the L3 interface mentioned in I(name) option. + The address format is /, the mask is number + in range 0-128 eg. fd5d:12c9:2201:1::1/64 + aggregate: + description: + - List of L3 interfaces definitions. Each of the entry in aggregate list should + define name of interface C(name) and a optional C(ipv4) or C(ipv6) address. + state: + description: + - State of the L3 interface configuration. It indicates if the configuration should + be present or absent on remote device. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Remove Ethernet 0/3 IPv4 and IPv6 address + slxos_l3_interface: + name: Ethernet 0/3 + state: absent + +- name: Set Ethernet 0/3 IPv4 address + slxos_l3_interface: + name: Ethernet 0/3 + ipv4: 192.168.0.1/24 + +- name: Set Ethernet 0/3 IPv6 address + slxos_l3_interface: + name: Ethernet 0/3 + ipv6: "fd5d:12c9:2201:1::1/64" + +- name: Set IP addresses on aggregate + slxos_l3_interface: + aggregate: + - { name: Ethernet 0/3, ipv4: 192.168.2.10/24 } + - { name: Ethernet 0/3, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" } + +- name: Remove IP addresses on aggregate + slxos_l3_interface: + aggregate: + - { name: Ethernet 0/3, ipv4: 192.168.2.10/24 } + - { name: Ethernet 0/3, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" } + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - interface Ethernet 0/2 + - ip address 192.168.0.1/24 + - ipv6 address fd5d:12c9:2201:1::1/64 +""" +import re + +from copy import deepcopy + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import get_config, load_config +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import conditional, remove_default_spec +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import is_netmask, is_masklen, to_netmask, to_masklen + + +def validate_ipv4(value, module): + if value: + address = value.split('/') + if len(address) != 2: + module.fail_json(msg='address format is /, got invalid format %s' % value) + + if not is_masklen(address[1]): + module.fail_json(msg='invalid value for mask: %s, mask should be in range 0-32' % address[1]) + + +def validate_ipv6(value, module): + if value: + address = value.split('/') + if len(address) != 2: + module.fail_json(msg='address format is /, got invalid format %s' % value) + else: + if not 0 <= int(address[1]) <= 128: + module.fail_json(msg='invalid value for mask: %s, mask should be in range 0-128' % address[1]) + + +def validate_param_values(module, obj, param=None): + if param is None: + param = module.params + for key in obj: + # validate the param value (if validator func exists) + validator = globals().get('validate_%s' % key) + if callable(validator): + validator(param.get(key), module) + + +def parse_config_argument(configobj, name, arg=None): + cfg = configobj['interface %s' % name] + cfg = '\n'.join(cfg.children) + match = re.search(r'%s (.+)$' % arg, cfg, re.M) + if match: + return match.group(1).strip() + + return None + + +def search_obj_in_list(name, lst): + for o in lst: + if o['name'] == name: + return o + + return None + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + for w in want: + name = w['name'] + ipv4 = w['ipv4'] + ipv6 = w['ipv6'] + state = w['state'] + + interface = 'interface ' + name + commands.append(interface) + + obj_in_have = search_obj_in_list(name, have) + if state == 'absent' and obj_in_have: + if obj_in_have['ipv4']: + if ipv4: + commands.append('no ip address %s' % ipv4) + else: + commands.append('no ip address') + if obj_in_have['ipv6']: + if ipv6: + commands.append('no ipv6 address %s' % ipv6) + else: + commands.append('no ipv6 address') + + elif state == 'present': + if ipv4: + if obj_in_have is None or obj_in_have.get('ipv4') is None or ipv4 != obj_in_have['ipv4']: + commands.append('ip address %s' % ipv4) + + if ipv6: + if obj_in_have is None or obj_in_have.get('ipv6') is None or ipv6.lower() != obj_in_have['ipv6'].lower(): + commands.append('ipv6 address %s' % ipv6) + + if commands[-1] == interface: + commands.pop(-1) + + return commands + + +def map_config_to_obj(module): + config = get_config(module) + configobj = NetworkConfig(indent=1, contents=config) + + match = re.findall(r'^interface (\S+\s[0-9]+/[0-9]+)', config, re.M) + if not match: + return list() + + instances = list() + + for item in set(match): + ipv4 = parse_config_argument(configobj, item, 'ip address') + if ipv4: + # eg. 192.168.2.10 255.255.255.0 -> 192.168.2.10/24 + address = ipv4.strip().split(' ') + if len(address) == 2 and is_netmask(address[1]): + ipv4 = '%s/%s' % (address[0], to_text(to_masklen(address[1]))) + + obj = { + 'name': item, + 'ipv4': ipv4, + 'ipv6': parse_config_argument(configobj, item, 'ipv6 address'), + 'state': 'present' + } + instances.append(obj) + + return instances + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + validate_param_values(module, item, item) + obj.append(item.copy()) + else: + obj.append({ + 'name': module.params['name'], + 'ipv4': module.params['ipv4'], + 'ipv6': module.params['ipv6'], + 'state': module.params['state'] + }) + + validate_param_values(module, obj) + + return obj + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + name=dict(), + ipv4=dict(), + ipv6=dict(), + state=dict(default='present', + choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['name'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + ) + + argument_spec.update(element_spec) + + required_one_of = [['name', 'aggregate']] + mutually_exclusive = [['name', 'aggregate']] + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + + result = {'changed': False} + + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_linkagg.py b/plugins/modules/network/slxos/slxos_linkagg.py new file mode 100644 index 0000000000..fc0323df9c --- /dev/null +++ b/plugins/modules/network/slxos/slxos_linkagg.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: slxos_linkagg +author: "Matthew Stone (@bigmstone)" +short_description: Manage link aggregation groups on Extreme Networks SLX-OS network devices +description: + - This module provides declarative management of link aggregation groups + on Extreme Networks SLX-OS network devices. +notes: + - Tested against SLX-OS 17s.1.02 +options: + group: + description: + - Channel-group number for the port-channel + Link aggregation group. Range 1-1024. + mode: + description: + - Mode of the link aggregation group. + choices: ['active', 'on', 'passive'] + members: + description: + - List of members of the link aggregation group. + aggregate: + description: List of link aggregation definitions. + state: + description: + - State of the link aggregation group. + default: present + choices: ['present', 'absent'] + purge: + description: + - Purge links not defined in the I(aggregate) parameter. + type: bool +''' + +EXAMPLES = """ +- name: create link aggregation group + slxos_linkagg: + group: 10 + state: present + +- name: delete link aggregation group + slxos_linkagg: + group: 10 + state: absent + +- name: set link aggregation group to members + slxos_linkagg: + group: 200 + mode: active + members: + - Ethernet 0/1 + - Ethernet 0/2 + +- name: remove link aggregation group from Ethernet 0/1 + slxos_linkagg: + group: 200 + mode: active + members: + - Ethernet 0/1 + +- name: Create aggregate of linkagg definitions + slxos_linkagg: + aggregate: + - { group: 3, mode: on, members: [Ethernet 0/1] } + - { group: 100, mode: passive, members: [Ethernet 0/2] } +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - interface port-channel 30 + - interface Ethernet 0/3 + - channel-group 30 mode on + - no interface port-channel 30 +""" + +import re +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import CustomNetworkConfig +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import get_config, load_config + + +def search_obj_in_list(group, lst): + for o in lst: + if o['group'] == group: + return o + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + purge = module.params['purge'] + + for w in want: + group = w['group'] + mode = w['mode'] + members = w.get('members') or [] + state = w['state'] + del w['state'] + + obj_in_have = search_obj_in_list(group, have) + + if state == 'absent': + if obj_in_have: + commands.append('no interface port-channel {0}'.format(group)) + + elif state == 'present': + cmd = ['interface port-channel {0}'.format(group), + 'exit'] + if not obj_in_have: + if not group: + module.fail_json(msg='group is a required option') + commands.extend(cmd) + + if members: + for m in members: + commands.append('interface {0}'.format(m)) + commands.append('channel-group {0} mode {1}'.format(group, mode)) + + else: + if members: + if 'members' not in obj_in_have.keys(): + for m in members: + commands.extend(cmd) + commands.append('interface {0}'.format(m)) + commands.append('channel-group {0} mode {1}'.format(group, mode)) + + elif set(members) != set(obj_in_have['members']): + missing_members = list(set(members) - set(obj_in_have['members'])) + for m in missing_members: + commands.extend(cmd) + commands.append('interface {0}'.format(m)) + commands.append('channel-group {0} mode {1}'.format(group, mode)) + + superfluous_members = list(set(obj_in_have['members']) - set(members)) + for m in superfluous_members: + commands.extend(cmd) + commands.append('interface {0}'.format(m)) + commands.append('no channel-group') + + if purge: + for h in have: + obj_in_want = search_obj_in_list(h['group'], want) + if not obj_in_want: + commands.append('no interface port-channel {0}'.format(h['group'])) + + return commands + + +def map_params_to_obj(module): + obj = [] + + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + d = item.copy() + d['group'] = str(d['group']) + + obj.append(d) + else: + obj.append({ + 'group': str(module.params['group']), + 'mode': module.params['mode'], + 'members': module.params['members'], + 'state': module.params['state'] + }) + + return obj + + +def parse_mode(module, config, group, member): + mode = None + netcfg = CustomNetworkConfig(indent=1, contents=config) + parents = ['interface {0}'.format(member)] + body = netcfg.get_section(parents) + + match_int = re.findall(r'interface {0}\n'.format(member), body, re.M) + if match_int: + match = re.search(r'channel-group {0} mode (\S+)'.format(group), body, re.M) + if match: + mode = match.group(1) + + return mode + + +def parse_members(module, config, group): + members = [] + + for line in config.strip().split('!'): + l = line.strip() + if l.startswith('interface'): + match_group = re.findall(r'channel-group {0} mode'.format(group), l, re.M) + if match_group: + match = re.search(r'^interface (\S+\s\S+)$', l, re.M) + if match: + members.append(match.group(1)) + + return members + + +def get_channel(module, config, group): + match = re.findall(r'^interface (\S+\s\S+)$', config, re.M) + + if not match: + return {} + + channel = {} + for item in set(match): + member = item + channel['mode'] = parse_mode(module, config, group, member) + channel['members'] = parse_members(module, config, group) + + return channel + + +def map_config_to_obj(module): + objs = list() + config = get_config(module) + + for line in config.split('\n'): + l = line.strip() + match = re.search(r'interface Port-channel (\S+)', l, re.M) + if match: + obj = {} + group = match.group(1) + obj['group'] = group + obj.update(get_channel(module, config, group)) + objs.append(obj) + + return objs + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + group=dict(type='int'), + mode=dict(choices=['active', 'on', 'passive']), + members=dict(type='list'), + state=dict(default='present', + choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['group'] = dict(required=True) + + required_one_of = [['group', 'aggregate']] + required_together = [['members', 'mode']] + mutually_exclusive = [['group', 'aggregate']] + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec, + required_together=required_together), + purge=dict(default=False, type='bool') + ) + + argument_spec.update(element_spec) + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + required_together=required_together, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + warnings = list() + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_lldp.py b/plugins/modules/network/slxos/slxos_lldp.py new file mode 100644 index 0000000000..efb8ab3560 --- /dev/null +++ b/plugins/modules/network/slxos/slxos_lldp.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: slxos_lldp +author: "Matthew Stone (@bigmstone)" +short_description: Manage LLDP configuration on Extreme Networks SLX-OS network devices. +description: + - This module provides declarative management of LLDP service + on Extreme SLX-OS network devices. +notes: + - Tested against SLX-OS 17s.1.02 +options: + state: + description: + - State of the LLDP configuration. If value is I(present) lldp will be enabled + else if it is I(absent) it will be disabled. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Enable LLDP service + slxos_lldp: + state: present + +- name: Disable LLDP service + slxos_lldp: + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always, except for the platforms that use Netconf transport to manage the device. + type: list + sample: + - lldp run +""" +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import CustomNetworkConfig +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import ( + load_config, + get_config +) + +PROTOCOL = "protocol lldp" + + +def has_lldp(module): + config = get_config(module) + netcfg = CustomNetworkConfig(indent=1, contents=config) + parents = [PROTOCOL] + body = netcfg.get_section(parents) + + for line in body.split('\n'): + l = line.strip() + match = re.search(r'disable', l, re.M) + if match: + return False + + return True + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + state=dict(default='present', + choices=['present', 'absent']) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + HAS_LLDP = has_lldp(module) + + warnings = list() + + result = {'changed': False} + + if warnings: + result['warnings'] = warnings + + commands = [] + + if module.params['state'] == 'absent' and HAS_LLDP: + commands.append('protocol lldp') + commands.append('disable') + elif module.params['state'] == 'present' and not HAS_LLDP: + commands.append('protocol lldp') + commands.append('no disable') + + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/slxos/slxos_vlan.py b/plugins/modules/network/slxos/slxos_vlan.py new file mode 100644 index 0000000000..0adf86ca0d --- /dev/null +++ b/plugins/modules/network/slxos/slxos_vlan.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: slxos_vlan +author: "Lindsay Hill (@lindsayhill)" +short_description: Manage VLANs on Extreme Networks SLX-OS network devices +description: + - This module provides declarative management of VLANs + on Extreme SLX-OS network devices. +notes: + - Tested against SLX-OS 18r.1.00 +options: + name: + description: + - Name of the VLAN. + vlan_id: + description: + - ID of the VLAN. Range 1-4094. + required: true + interfaces: + description: + - List of interfaces that should be associated to the VLAN. + required: true + delay: + description: + - Delay the play should wait to check for declarative intent params values. + default: 10 + aggregate: + description: List of VLANs definitions. + purge: + description: + - Purge VLANs not defined in the I(aggregate) parameter. + type: bool + default: no + state: + description: + - State of the VLAN configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +- name: Create vlan + slxos_vlan: + vlan_id: 100 + name: test-vlan + state: present +- name: Add interfaces to VLAN + slxos_vlan: + vlan_id: 100 + interfaces: + - Ethernet 0/1 + - Ethernet 0/2 +- name: Delete vlan + slxos_vlan: + vlan_id: 100 + state: absent +""" + +RETURN = """ +commands: + description: The list of configuration mode commands to send to the device + returned: always + type: list + sample: + - vlan 100 + - name test-vlan +""" + +import re +import time + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec +from ansible_collections.community.general.plugins.module_utils.network.slxos.slxos import load_config, run_commands + + +def search_obj_in_list(vlan_id, lst): + for o in lst: + if o['vlan_id'] == vlan_id: + return o + return None + + +def map_obj_to_commands(updates, module): + commands = list() + want, have = updates + purge = module.params['purge'] + + for w in want: + vlan_id = w['vlan_id'] + name = w['name'] + interfaces = w['interfaces'] + state = w['state'] + + obj_in_have = search_obj_in_list(vlan_id, have) + + if state == 'absent': + if obj_in_have: + commands.append('no vlan %s' % vlan_id) + + elif state == 'present': + if not obj_in_have: + commands.append('vlan %s' % vlan_id) + if name: + commands.append('name %s' % name) + + if interfaces: + for i in interfaces: + commands.append('interface %s' % i) + commands.append('switchport') + commands.append('switchport mode access') + commands.append('switchport access vlan %s' % vlan_id) + + else: + if name: + if name != obj_in_have['name']: + commands.append('vlan %s' % vlan_id) + commands.append('name %s' % name) + + if interfaces: + if not obj_in_have['interfaces']: + for i in interfaces: + commands.append('vlan %s ' % vlan_id) + commands.append('interface %s' % i) + commands.append('switchport') + commands.append('switchport mode access') + commands.append('switchport access vlan %s' % vlan_id) + + elif set(interfaces) != set(obj_in_have['interfaces']): + missing_interfaces = list(set(interfaces) - set(obj_in_have['interfaces'])) + for i in missing_interfaces: + commands.append('vlan %s' % vlan_id) + commands.append('interface %s' % i) + commands.append('switchport') + commands.append('switchport mode access') + commands.append('switchport access vlan %s' % vlan_id) + + superfluous_interfaces = list(set(obj_in_have['interfaces']) - set(interfaces)) + for i in superfluous_interfaces: + commands.append('vlan %s' % vlan_id) + commands.append('interface %s' % i) + commands.append('switchport mode access') + commands.append('no switchport access vlan %s' % vlan_id) + + if purge: + for h in have: + obj_in_want = search_obj_in_list(h['vlan_id'], want) + if not obj_in_want and h['vlan_id'] != '1': + commands.append('no vlan %s' % h['vlan_id']) + + return commands + + +def map_params_to_obj(module): + obj = [] + aggregate = module.params.get('aggregate') + if aggregate: + for item in aggregate: + for key in item: + if item.get(key) is None: + item[key] = module.params[key] + + d = item.copy() + d['vlan_id'] = str(d['vlan_id']) + + obj.append(d) + else: + obj.append({ + 'vlan_id': str(module.params['vlan_id']), + 'name': module.params['name'], + 'interfaces': module.params['interfaces'], + 'state': module.params['state'] + }) + + return obj + + +def map_config_to_obj(module): + output = run_commands(module, ['show vlan brief']) + lines = output[0].strip().splitlines()[5:] + + if not lines: + return list() + + objs = list() + obj = {} + + for l in lines: + splitted_line = re.split(r'([0-9]+)? +(\S.{14})? +(ACTIVE|INACTIVE\(.+?\))?.*(Eth .+?|Po .+?|Tu .+?)\([ut]\).*$', l.rstrip()) + if len(splitted_line) == 1: + # Handle situation where VLAN is configured, but has no associated ports + inactive = re.match(r'([0-9]+)? +(\S.{14}) +INACTIVE\(no member port\).*$', l.rstrip()) + if inactive: + splitted_line = ['', inactive.groups()[0], inactive.groups()[1], '', ''] + else: + continue + + splitted_line[4] = splitted_line[4].replace('Eth', 'Ethernet').replace('Po', 'Port-channel').replace('Tu', 'Tunnel') + + if splitted_line[1] is None: + obj['interfaces'].append(splitted_line[4]) + continue + + obj = {} + obj['vlan_id'] = splitted_line[1] + obj['name'] = splitted_line[2].strip() + obj['interfaces'] = [splitted_line[4]] + + objs.append(obj) + + return objs + + +def check_declarative_intent_params(want, module): + if module.params['interfaces']: + time.sleep(module.params['delay']) + have = map_config_to_obj(module) + + for w in want: + for i in w['interfaces']: + obj_in_have = search_obj_in_list(w['vlan_id'], have) + if obj_in_have and 'interfaces' in obj_in_have and i not in obj_in_have['interfaces']: + module.fail_json(msg="Interface %s not configured on vlan %s" % (i, w['vlan_id'])) + + +def main(): + """ main entry point for module execution + """ + element_spec = dict( + vlan_id=dict(type='int'), + name=dict(), + interfaces=dict(type='list'), + delay=dict(default=10, type='int'), + state=dict(default='present', + choices=['present', 'absent']) + ) + + aggregate_spec = deepcopy(element_spec) + aggregate_spec['vlan_id'] = dict(required=True) + + # remove default in aggregate spec, to handle common arguments + remove_default_spec(aggregate_spec) + + argument_spec = dict( + aggregate=dict(type='list', elements='dict', options=aggregate_spec), + purge=dict(default=False, type='bool') + ) + + argument_spec.update(element_spec) + + required_one_of = [['vlan_id', 'aggregate']] + mutually_exclusive = [['vlan_id', 'aggregate']] + + module = AnsibleModule(argument_spec=argument_spec, + required_one_of=required_one_of, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + warnings = list() + result = {'changed': False} + if warnings: + result['warnings'] = warnings + + want = map_params_to_obj(module) + have = map_config_to_obj(module) + commands = map_obj_to_commands((want, have), module) + result['commands'] = commands + + if commands: + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + if result['changed']: + check_declarative_intent_params(want, module) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/sros/sros_command.py b/plugins/modules/network/sros/sros_command.py new file mode 100644 index 0000000000..51d525ebb6 --- /dev/null +++ b/plugins/modules/network/sros/sros_command.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +--- +module: sros_command +author: "Peter Sprygada (@privateip)" +short_description: Run commands on remote devices running Nokia SR OS +description: + - Sends arbitrary commands to an SR OS node and returns the results + read from the device. This module includes an argument that will + cause the module to wait for a specific condition before returning + or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(sros_config) to configure SR OS devices. +extends_documentation_fragment: +- community.general.sros + +options: + commands: + description: + - List of commands to send to the remote SR OS device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +--- +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +--- +tasks: + - name: run show version on remote devices + sros_command: + commands: show version + provider: "{{ cli }}" + + - name: run show version and check to see if output contains sros + sros_command: + commands: show version + wait_for: result[0] contains sros + provider: "{{ cli }}" + + - name: run multiple commands on remote nodes + sros_command: + commands: + - show version + - show port detail + provider: "{{ cli }}" + + - name: run multiple commands and evaluate the output + sros_command: + commands: + - show version + - show port detail + wait_for: + - result[0] contains TiMOS-B-14.0.R4 + provider: "{{ cli }}" +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible.module_utils.six import string_types +from ansible_collections.community.general.plugins.module_utils.network.sros.sros import run_commands, sros_argument_spec, check_args + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + elif item['command'].startswith('conf'): + module.fail_json( + msg='sros_command does not support running config mode ' + 'commands. Please use sros_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(sros_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result = { + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + } + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/sros/sros_config.py b/plugins/modules/network/sros/sros_config.py new file mode 100644 index 0000000000..a3450d5c18 --- /dev/null +++ b/plugins/modules/network/sros/sros_config.py @@ -0,0 +1,334 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: sros_config +author: "Peter Sprygada (@privateip)" +short_description: Manage Nokia SR OS device configuration +description: + - Nokia SR OS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with SR OS configuration sections in + a deterministic way. +extends_documentation_fragment: +- community.general.sros + +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. The I(lines) argument only supports current + context lines. See EXAMPLES + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. + If match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + default: line + choices: ['line', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + force: + description: + - The force argument instructs the module to not consider the + current devices running-config. When set to true, this will + cause the module to push the contents of I(src) into the device + without first checking if already configured. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the C(match=none) which is idempotent. This argument + will be removed in a future release. + type: bool + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(admin display-config detail). + type: bool + default: 'no' + aliases: ['detail'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +--- +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +--- +- name: enable rollback location + sros_config: + lines: configure system rollback rollback-location "cf3:/ansible" + provider: "{{ cli }}" + +- name: set system name to {{ inventory_hostname }} using one line + sros_config: + lines: + - configure system name "{{ inventory_hostname }}" + provider: "{{ cli }}" + +- name: set system name to {{ inventory_hostname }} using parents + sros_config: + lines: + - 'name "{{ inventory_hostname }}"' + parents: + - configure + - system + provider: "{{ cli }}" + backup: yes + +- name: load config from file + sros_config: + src: "{{ inventory_hostname }}.cfg" + provider: "{{ cli }}" + save: yes + +- name: invalid use of lines + sros_config: + lines: + - service + - vpls 1000 customer foo 1 create + - description "invalid lines example" + provider: "{{ cli }}" + +- name: valid use of lines + sros_config: + lines: + - description "invalid lines example" + parents: + - service + - vpls 1000 customer foo 1 create + provider: "{{ cli }}" + +- name: configurable backup path + sros_config: + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['config system name "sros01"'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['config system name "sros01"'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/sros_config.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible_collections.community.general.plugins.module_utils.network.sros.sros import sros_argument_spec, check_args +from ansible_collections.community.general.plugins.module_utils.network.sros.sros import load_config, run_commands, get_config + + +def get_active_config(module): + contents = module.params['config'] + if not contents: + flags = [] + if module.params['defaults']: + flags = ['detail'] + return get_config(module, flags) + return contents + + +def get_candidate(module): + candidate = NetworkConfig(indent=4) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def run(module, result): + match = module.params['match'] + + candidate = get_candidate(module) + + if match != 'none': + config_text = get_active_config(module) + config = NetworkConfig(indent=4, contents=config_text) + configobjs = candidate.difference(config) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands') + commands = commands.split('\n') + + result['commands'] = commands + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + load_config(module, commands) + result['changed'] = True + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + match=dict(default='line', choices=['line', 'none']), + + config=dict(), + defaults=dict(type='bool', default=False, aliases=['detail']), + + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + save=dict(type='bool', default=False), + ) + + argument_spec.update(sros_argument_spec) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + result = dict(changed=False, warnings=list()) + + warnings = list() + check_args(module, warnings) + if warnings: + result['warnings'] = warnings + + if module.params['backup']: + result['__backup__'] = get_config(module) + + run(module, result) + + if module.params['save']: + if not module.check_mode: + run_commands(module, ['admin save']) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/sros/sros_rollback.py b/plugins/modules/network/sros/sros_rollback.py new file mode 100644 index 0000000000..6168c61bf1 --- /dev/null +++ b/plugins/modules/network/sros/sros_rollback.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +--- +module: sros_rollback +author: "Peter Sprygada (@privateip)" +short_description: Configure Nokia SR OS rollback +description: + - Configure the rollback feature on remote Nokia devices running + the SR OS operating system. this module provides a stateful + implementation for managing the configuration of the rollback + feature +extends_documentation_fragment: +- community.general.sros + +options: + rollback_location: + description: + - The I(rollback_location) specifies the location and filename + of the rollback checkpoint files. This argument supports any + valid local or remote URL as specified in SR OS + remote_max_checkpoints: + description: + - The I(remote_max_checkpoints) argument configures the maximum + number of rollback files that can be transferred and saved to + a remote location. Valid values for this argument are in the + range of 1 to 50 + local_max_checkpoints: + description: + - The I(local_max_checkpoints) argument configures the maximum + number of rollback files that can be saved on the devices local + compact flash. Valid values for this argument are in the range + of 1 to 50 + rescue_location: + description: + - The I(rescue_location) specifies the location of the + rescue file. This argument supports any valid local + or remote URL as specified in SR OS + state: + description: + - The I(state) argument specifies the state of the configuration + entries in the devices active configuration. When the state + value is set to C(true) the configuration is present in the + devices active configuration. When the state value is set to + C(false) the configuration values are removed from the devices + active configuration. + default: present + choices: ['present', 'absent'] +''' + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +--- +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +--- +- name: configure rollback location + sros_rollback: + rollback_location: "cb3:/ansible" + provider: "{{ cli }}" + +- name: remove all rollback configuration + sros_rollback: + state: absent + provider: "{{ cli }}" +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps +from ansible_collections.community.general.plugins.module_utils.network.sros.sros import load_config, get_config, sros_argument_spec, check_args + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def sanitize_config(lines): + commands = list() + for line in lines: + for index, entry in enumerate(commands): + if line.startswith(entry): + del commands[index] + break + commands.append(line) + return commands + + +def present(module, commands): + setters = set() + for key, value in module.argument_spec.items(): + if module.params[key] is not None: + setter = value.get('setter') or 'set_%s' % key + if setter not in setters: + setters.add(setter) + invoke(setter, module, commands) + + +def absent(module, commands): + config = get_config(module) + if 'rollback-location' in config: + commands.append('configure system rollback no rollback-location') + if 'rescue-location' in config: + commands.append('configure system rollback no rescue-location') + if 'remote-max-checkpoints' in config: + commands.append('configure system rollback no remote-max-checkpoints') + if 'local-max-checkpoints' in config: + commands.append('configure system rollback no remote-max-checkpoints') + + +def set_rollback_location(module, commands): + value = module.params['rollback_location'] + commands.append('configure system rollback rollback-location "%s"' % value) + + +def set_local_max_checkpoints(module, commands): + value = module.params['local_max_checkpoints'] + if not 1 <= value <= 50: + module.fail_json(msg='local_max_checkpoints must be between 1 and 50') + commands.append('configure system rollback local-max-checkpoints %s' % value) + + +def set_remote_max_checkpoints(module, commands): + value = module.params['remote_max_checkpoints'] + if not 1 <= value <= 50: + module.fail_json(msg='remote_max_checkpoints must be between 1 and 50') + commands.append('configure system rollback remote-max-checkpoints %s' % value) + + +def set_rescue_location(module, commands): + value = module.params['rescue_location'] + commands.append('configure system rollback rescue-location "%s"' % value) + + +def get_device_config(module): + contents = get_config(module) + return NetworkConfig(indent=4, contents=contents) + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + rollback_location=dict(), + + local_max_checkpoints=dict(type='int'), + remote_max_checkpoints=dict(type='int'), + + rescue_location=dict(), + + state=dict(default='present', choices=['present', 'absent']) + ) + + argument_spec.update(sros_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + + result = dict(changed=False) + + commands = list() + invoke(state, module, commands) + + candidate = NetworkConfig(indent=4, contents='\n'.join(commands)) + config = get_device_config(module) + configobjs = candidate.difference(config) + + if configobjs: + # commands = dumps(configobjs, 'lines') + commands = dumps(configobjs, 'commands') + commands = sanitize_config(commands.split('\n')) + + result['updates'] = commands + result['commands'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + load_config(module, commands) + + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/voss/voss_command.py b/plugins/modules/network/voss/voss_command.py new file mode 100644 index 0000000000..9c27a5c98e --- /dev/null +++ b/plugins/modules/network/voss/voss_command.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: voss_command +author: "Lindsay Hill (@LindsayHill)" +short_description: Run commands on remote devices running Extreme VOSS +description: + - Sends arbitrary commands to an Extreme VSP device running VOSS, and + returns the results read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(voss_config) to configure VOSS devices. +notes: + - Tested against VOSS 7.0.0 +options: + commands: + description: + - List of commands to send to the remote VOSS device. The + resulting output from the command is returned. If the + I(wait_for) argument is provided, the module is not returned + until the condition is satisfied or the number of retries has + expired. If a command sent to the device requires answering a + prompt, it is possible to pass a dict containing I(command), + I(answer) and I(prompt). Common answers are 'y' or "\r" + (carriage return, must be double quotes). See examples. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + default: 1 +''' + +EXAMPLES = r""" +tasks: + - name: run show sys software on remote devices + voss_command: + commands: show sys software + + - name: run show sys software and check to see if output contains VOSS + voss_command: + commands: show sys software + wait_for: result[0] contains VOSS + + - name: run multiple commands on remote nodes + voss_command: + commands: + - show sys software + - show interfaces vlan + + - name: run multiple commands and evaluate the output + voss_command: + commands: + - show sys software + - show interfaces vlan + wait_for: + - result[0] contains Version + - result[1] contains Basic + + - name: run command that requires answering a prompt + voss_command: + commands: + - command: 'reset' + prompt: 'Are you sure you want to reset the switch? (y/n)' + answer: 'y' +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import re +import time + +from ansible_collections.community.general.plugins.module_utils.network.voss.voss import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for item in list(commands): + configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command']) + if module.check_mode: + if configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'): + module.fail_json( + msg='voss_command does not support running config mode ' + 'commands. Please use voss_config instead' + ) + if not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + commands.remove(item) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type='list', required=True), + + wait_for=dict(type='list'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/voss/voss_config.py b/plugins/modules/network/voss/voss_config.py new file mode 100644 index 0000000000..cbdf2b32b9 --- /dev/null +++ b/plugins/modules/network/voss/voss_config.py @@ -0,0 +1,456 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Extreme Networks Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: voss_config +author: "Lindsay Hill (@LindsayHill)" +short_description: Manage Extreme VOSS configuration sections +description: + - Extreme VOSS configurations use a simple flat text file syntax. + This module provides an implementation for working with EXOS + configuration lines in a deterministic way. +notes: + - Tested against VOSS 7.0.0 + - Abbreviated commands are NOT idempotent, see + L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands). +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + aliases: ['commands'] + parents: + description: + - The parent line that uniquely identifies the section the commands + should be checked against. If this argument is omitted, the commands + are checked against the set of top level or global commands. Note + that VOSS configurations only support one level of nested commands. + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + choices: ['line', 'strict', 'exact', 'none'] + default: line + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + default: line + choices: ['line', 'block'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory or role root directory, if playbook is part of an + ansible role. If the directory does not exist, it is created. + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(running_config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + aliases: ['config'] + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config verbose). + type: bool + default: 'no' + save_when: + description: + - When changes are made to the device running-configuration, the + changes are not copied to non-volatile storage by default. Using + this argument will change that behavior. If the argument is set to + I(always), then the running-config will always be saved and the + I(modified) flag will always be set to True. If the argument is set + to I(modified), then the running-config will only be saved if it + has changed since the last save to startup-config. If the argument + is set to I(never), the running-config will never be saved. + If the argument is set to I(changed), then the running-config + will only be saved if the task has made a change. + default: never + choices: ['always', 'never', 'modified', 'changed'] + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument + the module can generate diffs against different sources. + - When this option is configure as I(startup), the module will return + the diff of the running-config against the startup-config. + - When this option is configured as I(intended), the module will + return the diff of the running-config against the configuration + provided in the C(intended_config) argument. + - When this option is configured as I(running), the module will + return the before and after diff of the running-config with respect + to any changes made to the device configuration. + choices: ['running', 'startup', 'intended'] + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + intended_config: + description: + - The C(intended_config) provides the master configuration that + the node should conform to and is used to check the final + running-config against. This argument will not modify any settings + on the remote device and is strictly used to check the compliance + of the current device's configuration against. When specifying this + argument, the task should also modify the C(diff_against) value and + set it to I(intended). + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +''' + +EXAMPLES = """ +- name: configure system name + voss_config: + lines: prompt "{{ inventory_hostname }}" + +- name: configure interface settings + voss_config: + lines: + - name "ServerA" + backup: yes + parents: interface GigabitEthernet 1/1 + +- name: check the running-config against master config + voss_config: + diff_against: intended + intended_config: "{{ lookup('file', 'master.cfg') }}" + +- name: check the startup-config against the running-config + voss_config: + diff_against: startup + diff_ignore_lines: + - qos queue-profile .* + +- name: save running to startup when modified + voss_config: + save_when: modified + +- name: configurable backup path + voss_config: + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['prompt "VSP200"'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['interface GigabitEthernet 1/1', 'name "ServerA"', 'exit'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/vsp200_config.2018-08-21@15:00:21 +""" +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import ConnectionError +from ansible_collections.community.general.plugins.module_utils.network.voss.voss import run_commands, get_config +from ansible_collections.community.general.plugins.module_utils.network.voss.voss import get_defaults_flag, get_connection +from ansible_collections.community.general.plugins.module_utils.network.voss.voss import get_sublevel_config, VossNetworkConfig +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import dumps + + +def get_candidate_config(module): + candidate = VossNetworkConfig(indent=0) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + commands = module.params['lines'][0] + if (isinstance(commands, dict)) and (isinstance(commands['command'], list)): + candidate.add(commands['command'], parents=parents) + elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)): + candidate.add([commands['command']], parents=parents) + else: + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def get_running_config(module, current_config=None, flags=None): + running = module.params['running_config'] + if not running: + if not module.params['defaults'] and current_config: + running = current_config + else: + running = get_config(module, flags=flags) + + return running + + +def save_config(module, result): + result['changed'] = True + if not module.check_mode: + run_commands(module, 'save config\r') + else: + module.warn('Skipping command `save config` ' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + running_config=dict(aliases=['config']), + intended_config=dict(), + + defaults=dict(type='bool', default=False), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec), + + save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'), + + diff_against=dict(choices=['startup', 'intended', 'running']), + diff_ignore_lines=dict(type='list'), + ) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('diff_against', 'intended', ['intended_config'])] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = {'changed': False} + + parents = module.params['parents'] or list() + + match = module.params['match'] + replace = module.params['replace'] + + warnings = list() + result['warnings'] = warnings + + diff_ignore_lines = module.params['diff_ignore_lines'] + + config = None + contents = None + flags = get_defaults_flag(module) if module.params['defaults'] else [] + connection = get_connection(module) + + if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'): + contents = get_config(module, flags=flags) + config = VossNetworkConfig(indent=0, contents=contents) + if module.params['backup']: + result['__backup__'] = contents + + if any((module.params['lines'], module.params['src'])): + candidate = get_candidate_config(module) + if match != 'none': + config = get_running_config(module) + config = VossNetworkConfig(contents=config, indent=0) + + if parents: + config = get_sublevel_config(config, module) + configobjs = candidate.difference(config, match=match, replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands') + commands = commands.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['commands'] = commands + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + try: + connection.edit_config(candidate=commands) + except ConnectionError as exc: + module.fail_json(msg=to_text(commands, errors='surrogate_then_replace')) + + result['changed'] = True + + running_config = module.params['running_config'] + startup = None + + if module.params['save_when'] == 'always': + save_config(module, result) + elif module.params['save_when'] == 'modified': + match = module.params['match'] + replace = module.params['replace'] + try: + # Note we need to re-retrieve running config, not use cached version + running = connection.get_config(source='running') + startup = connection.get_config(source='startup') + response = connection.get_diff(candidate=startup, running=running, diff_match=match, + diff_ignore_lines=diff_ignore_lines, path=None, + diff_replace=replace) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + + config_diff = response['config_diff'] + if config_diff: + save_config(module, result) + elif module.params['save_when'] == 'changed' and result['changed']: + save_config(module, result) + + if module._diff: + if not running_config: + try: + # Note we need to re-retrieve running config, not use cached version + contents = connection.get_config(source='running') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + else: + contents = running_config + + # recreate the object in order to process diff_ignore_lines + running_config = VossNetworkConfig(indent=0, contents=contents, + ignore_lines=diff_ignore_lines) + + if module.params['diff_against'] == 'running': + if module.check_mode: + module.warn("unable to perform diff against running-config due to check mode") + contents = None + else: + contents = config.config_text + + elif module.params['diff_against'] == 'startup': + if not startup: + try: + contents = connection.get_config(source='startup') + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) + else: + contents = startup + + elif module.params['diff_against'] == 'intended': + contents = module.params['intended_config'] + + if contents is not None: + base_config = VossNetworkConfig(indent=0, contents=contents, + ignore_lines=diff_ignore_lines) + + if running_config.sha1 != base_config.sha1: + if module.params['diff_against'] == 'intended': + before = running_config + after = base_config + elif module.params['diff_against'] in ('startup', 'running'): + before = base_config + after = running_config + + result.update({ + 'changed': True, + 'diff': {'before': str(before), 'after': str(after)} + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network/voss/voss_facts.py b/plugins/modules/network/voss/voss_facts.py new file mode 100644 index 0000000000..9b796648cd --- /dev/null +++ b/plugins/modules/network/voss/voss_facts.py @@ -0,0 +1,508 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: voss_facts +author: "Lindsay Hill (@LindsayHill)" +short_description: Collect facts from remote devices running Extreme VOSS +description: + - Collects a base set of device facts from a remote device that + is running VOSS. This module prepends all of the base network fact + keys with C(ansible_net_). The facts module will always collect + a base set of facts from the device and can enable or disable + collection of additional facts. +notes: + - Tested against VOSS 7.0.0 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +''' + +EXAMPLES = """ +# Collect all facts from the device +- voss_facts: + gather_subset: all + +# Collect only the config and default facts +- voss_facts: + gather_subset: + - config + +# Do not collect hardware facts +- voss_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# hardware +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re + +from ansible_collections.community.general.plugins.module_utils.network.voss.voss import run_commands +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, commands=cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = ['show sys-info'] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'SysDescr\s+: \S+ \((\S+)\)', data) + if match: + return match.group(1) + return '' + + def parse_hostname(self, data): + match = re.search(r'SysName\s+: (\S+)', data, re.M) + if match: + return match.group(1) + return '' + + def parse_model(self, data): + match = re.search(r'Chassis\s+: (\S+)', data, re.M) + if match: + return match.group(1) + return '' + + def parse_serialnum(self, data): + match = re.search(r'Serial#\s+: (\S+)', data) + if match: + return match.group(1) + return '' + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show khi performance memory' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + + if data: + match = re.search(r'Free:\s+(\d+)\s+\(KB\)', data, re.M) + if match: + self.facts['memfree_mb'] = int(round(int(match.group(1)) / 1024, 0)) + match = re.search(r'Used:\s+(\d+)\s+\(KB\)', data, re.M) + if match: + memused_mb = int(round(int(match.group(1)) / 1024, 0)) + self.facts['memtotal_mb'] = self.facts.get('memfree_mb', 0) + memused_mb + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + data = self.responses[0] + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show interfaces gigabitEthernet interface', + 'show interfaces gigabitEthernet name', + 'show ip interface', + 'show ipv6 address interface', + 'show lldp neighbor | include Port|SysName' + ] + + def populate(self): + super(Interfaces, self).populate() + + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.responses[0] + if data: + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces_eth(interfaces) + + data = self.responses[1] + if data: + data = self.parse_interfaces(data) + self.populate_interfaces_eth_additional(data) + + data = self.responses[2] + if data: + data = self.parse_interfaces(data) + self.populate_ipv4_interfaces(data) + + data = self.responses[3] + if data: + self.populate_ipv6_interfaces(data) + + data = self.responses[4] + if data: + self.facts['neighbors'] = self.parse_neighbors(data) + + def populate_interfaces_eth(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + match = re.match(r'^\d+\s+(\S+)\s+\w+\s+\w+\s+(\d+)\s+([a-f\d:]+)\s+(\w+)\s+(\w+)$', value) + if match: + intf['mediatype'] = match.group(1) + intf['mtu'] = match.group(2) + intf['macaddress'] = match.group(3) + intf['adminstatus'] = match.group(4) + intf['operstatus'] = match.group(5) + intf['type'] = 'Ethernet' + facts[key] = intf + return facts + + def populate_interfaces_eth_additional(self, interfaces): + for key, value in iteritems(interfaces): + # This matches when no description is set + match = re.match(r'^\w+\s+\w+\s+(\w+)\s+(\d+)\s+\w+$', value) + if match: + self.facts['interfaces'][key]['description'] = '' + self.facts['interfaces'][key]['duplex'] = match.group(1) + self.facts['interfaces'][key]['bandwidth'] = match.group(2) + else: + # This matches when a description is set + match = re.match(r'^(.+)\s+\w+\s+\w+\s+(\w+)\s+(\d+)\s+\w+$', value) + if match: + self.facts['interfaces'][key]['description'] = match.group(1).strip() + self.facts['interfaces'][key]['duplex'] = match.group(2) + self.facts['interfaces'][key]['bandwidth'] = match.group(3) + + def populate_ipv4_interfaces(self, data): + for key, value in data.items(): + if key not in self.facts['interfaces']: + if re.match(r'Vlan\d+', key): + self.facts['interfaces'][key] = dict() + self.facts['interfaces'][key]['type'] = 'VLAN' + elif re.match(r'Clip\d+', key): + self.facts['interfaces'][key] = dict() + self.facts['interfaces'][key]['type'] = 'Loopback' + if re.match(r'Port(\d+/\d+)', key): + key = re.split('Port', key)[1] + self.facts['interfaces'][key]['ipv4'] = list() + match = re.match(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', value, re.M) + if match: + addr = match.group(1) + subnet = match.group(2) + ipv4 = dict(address=addr, subnet=subnet) + self.add_ip_address(addr, 'ipv4') + self.facts['interfaces'][key]['ipv4'].append(ipv4) + + def populate_ipv6_interfaces(self, data): + addresses = re.split(r'-{3,}', data)[1].lstrip() + for line in addresses.split('\n'): + if not line: + break + + match = re.match(r'^([\da-f:]+)/(\d+)\s+([CV])-(\d+)\s+.+$', line) + if match: + address = match.group(1) + subnet = match.group(2) + interface_short_name = match.group(3) + interface_id = match.group(4) + if interface_short_name == 'C': + intf_type = 'Loopback' + interface_name = 'Clip' + interface_id + elif interface_short_name == 'V': + intf_type = 'VLAN' + interface_name = 'Vlan' + interface_id + else: + # Unknown interface type, better to gracefully ignore it for now + break + ipv6 = dict(address=address, subnet=subnet) + self.add_ip_address(address, 'ipv6') + try: + self.facts['interfaces'][interface_name].setdefault('ipv6', []).append(ipv6) + self.facts['interfaces'][interface_name]['type'] = intf_type + except KeyError: + self.facts['interfaces'][interface_name] = dict() + self.facts['interfaces'][interface_name]['type'] = intf_type + self.facts['interfaces'][interface_name].setdefault('ipv6', []).append(ipv6) + else: + break + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + lines = neighbors.split('Port: ') + if not lines: + return facts + for line in lines: + match = re.search(r'^(\w.*?)\s+Index.*IfName\s+(\w.*)$\s+SysName\s+:\s(\S+)', line, (re.M | re.S)) + if match: + intf = match.group(1) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = match.group(3) + fact['port'] = match.group(2) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + interfaces = re.split(r'-{3,}', data)[1].lstrip() + for line in interfaces.split('\n'): + if not line or re.match('^All', line): + break + else: + match = re.split(r'^(\S+)\s+', line) + key = match[1] + parsed[key] = match[2].strip() + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + return '' + + def parse_macaddress(self, data): + match = re.search(r'Hardware is (?:.*), address is (\S+)', data) + if match: + return match.group(1) + return '' + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+)', data) + if match: + return int(match.group(1)) + return '' + + def parse_bandwidth(self, data): + match = re.search(r'BW (\d+)', data) + if match: + return int(match.group(1)) + return '' + + def parse_duplex(self, data): + match = re.search(r'(\w+) Duplex', data, re.M) + if match: + return match.group(1) + return '' + + def parse_mediatype(self, data): + match = re.search(r'media type is (.+)$', data, re.M) + if match: + return match.group(1) + return '' + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + return '' + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (.+)$', data, re.M) + if match: + return match.group(1) + return '' + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + return '' + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/bearychat.py b/plugins/modules/notification/bearychat.py new file mode 100644 index 0000000000..54cf8f5e0d --- /dev/null +++ b/plugins/modules/notification/bearychat.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Jiangge Zhang +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: bearychat +short_description: Send BearyChat notifications +description: + - The M(bearychat) module sends notifications to U(https://bearychat.com) + via the Incoming Robot integration. +author: "Jiangge Zhang (@tonyseek)" +options: + url: + description: + - BearyChat WebHook URL. This authenticates you to the bearychat + service. It looks like + C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60). + required: true + text: + description: + - Message to send. + markdown: + description: + - If C(yes), text will be parsed as markdown. + default: 'yes' + type: bool + channel: + description: + - Channel to send the message to. If absent, the message goes to the + default channel selected by the I(url). + attachments: + description: + - Define a list of attachments. For more information, see + https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments +''' + +EXAMPLES = """ +- name: Send notification message via BearyChat + local_action: + module: bearychat + url: | + https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 + text: "{{ inventory_hostname }} completed" + +- name: Send notification message via BearyChat all options + local_action: + module: bearychat + url: | + https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 + text: "{{ inventory_hostname }} completed" + markdown: no + channel: "#ansible" + attachments: + - title: "Ansible on {{ inventory_hostname }}" + text: "May the Force be with you." + color: "#ffffff" + images: + - http://example.com/index.png +""" + +RETURN = """ +msg: + description: execution result + returned: success + type: str + sample: "OK" +""" + +try: + from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse + HAS_URLPARSE = True +except Exception: + HAS_URLPARSE = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def build_payload_for_bearychat(module, text, markdown, channel, attachments): + payload = {} + if text is not None: + payload['text'] = text + if markdown is not None: + payload['markdown'] = markdown + if channel is not None: + payload['channel'] = channel + if attachments is not None: + payload.setdefault('attachments', []).extend( + build_payload_for_bearychat_attachment( + module, item.get('title'), item.get('text'), item.get('color'), + item.get('images')) + for item in attachments) + payload = 'payload=%s' % module.jsonify(payload) + return payload + + +def build_payload_for_bearychat_attachment(module, title, text, color, images): + attachment = {} + if title is not None: + attachment['title'] = title + if text is not None: + attachment['text'] = text + if color is not None: + attachment['color'] = color + if images is not None: + target_images = attachment.setdefault('images', []) + if not isinstance(images, (list, tuple)): + images = [images] + for image in images: + if isinstance(image, dict) and 'url' in image: + image = {'url': image['url']} + elif hasattr(image, 'startswith') and image.startswith('http'): + image = {'url': image} + else: + module.fail_json( + msg="BearyChat doesn't have support for this kind of " + "attachment image") + target_images.append(image) + return attachment + + +def do_notify_bearychat(module, url, payload): + response, info = fetch_url(module, url, data=payload) + if info['status'] != 200: + url_info = urlparse(url) + obscured_incoming_webhook = urlunparse( + (url_info.scheme, url_info.netloc, '[obscured]', '', '', '')) + module.fail_json( + msg=" failed to send %s to %s: %s" % ( + payload, obscured_incoming_webhook, info['msg'])) + + +def main(): + module = AnsibleModule(argument_spec={ + 'url': dict(type='str', required=True, no_log=True), + 'text': dict(type='str'), + 'markdown': dict(default='yes', type='bool'), + 'channel': dict(type='str'), + 'attachments': dict(type='list'), + }) + + if not HAS_URLPARSE: + module.fail_json(msg='urlparse is not installed') + + url = module.params['url'] + text = module.params['text'] + markdown = module.params['markdown'] + channel = module.params['channel'] + attachments = module.params['attachments'] + + payload = build_payload_for_bearychat( + module, text, markdown, channel, attachments) + do_notify_bearychat(module, url, payload) + + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/campfire.py b/plugins/modules/notification/campfire.py new file mode 100644 index 0000000000..343ce747d6 --- /dev/null +++ b/plugins/modules/notification/campfire.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: campfire +short_description: Send a message to Campfire +description: + - Send a message to Campfire. + - Messages with newlines will result in a "Paste" message being sent. +options: + subscription: + description: + - The subscription name to use. + required: true + token: + description: + - API token. + required: true + room: + description: + - Room number to which the message should be sent. + required: true + msg: + description: + - The message body. + required: true + notify: + description: + - Send a notification sound before the message. + required: false + choices: ["56k", "bell", "bezos", "bueller", "clowntown", + "cottoneyejoe", "crickets", "dadgummit", "dangerzone", + "danielsan", "deeper", "drama", "greatjob", "greyjoy", + "guarantee", "heygirl", "horn", "horror", + "inconceivable", "live", "loggins", "makeitso", "noooo", + "nyan", "ohmy", "ohyeah", "pushit", "rimshot", + "rollout", "rumble", "sax", "secret", "sexyback", + "story", "tada", "tmyk", "trololo", "trombone", "unix", + "vuvuzela", "what", "whoomp", "yeah", "yodel"] + +# informational: requirements for nodes +requirements: [ ] +author: "Adam Garside (@fabulops)" +''' + +EXAMPLES = ''' +- campfire: + subscription: foo + token: 12345 + room: 123 + msg: Task completed. + +- campfire: + subscription: foo + token: 12345 + room: 123 + notify: loggins + msg: Task completed ... with feeling. +''' + +try: + from html import escape as html_escape +except ImportError: + # Python-3.2 or later + import cgi + + def html_escape(text, quote=True): + return cgi.escape(text, quote) + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + subscription=dict(required=True), + token=dict(required=True, no_log=True), + room=dict(required=True), + msg=dict(required=True), + notify=dict(required=False, + choices=["56k", "bell", "bezos", "bueller", + "clowntown", "cottoneyejoe", + "crickets", "dadgummit", "dangerzone", + "danielsan", "deeper", "drama", + "greatjob", "greyjoy", "guarantee", + "heygirl", "horn", "horror", + "inconceivable", "live", "loggins", + "makeitso", "noooo", "nyan", "ohmy", + "ohyeah", "pushit", "rimshot", + "rollout", "rumble", "sax", "secret", + "sexyback", "story", "tada", "tmyk", + "trololo", "trombone", "unix", + "vuvuzela", "what", "whoomp", "yeah", + "yodel"]), + ), + supports_check_mode=False + ) + + subscription = module.params["subscription"] + token = module.params["token"] + room = module.params["room"] + msg = module.params["msg"] + notify = module.params["notify"] + + URI = "https://%s.campfirenow.com" % subscription + NSTR = "SoundMessage%s" + MSTR = "%s" + AGENT = "Ansible/1.2" + + # Hack to add basic auth username and password the way fetch_url expects + module.params['url_username'] = token + module.params['url_password'] = 'X' + + target_url = '%s/room/%s/speak.xml' % (URI, room) + headers = {'Content-Type': 'application/xml', + 'User-agent': AGENT} + + # Send some audible notification if requested + if notify: + response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers) + if info['status'] not in [200, 201]: + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (notify, info['status'])) + + # Send the message + response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers) + if info['status'] not in [200, 201]: + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (msg, info['status'])) + + module.exit_json(changed=True, room=room, msg=msg, notify=notify) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/catapult.py b/plugins/modules/notification/catapult.py new file mode 100644 index 0000000000..a0e1f000a2 --- /dev/null +++ b/plugins/modules/notification/catapult.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Jonathan Mainguy +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# basis of code taken from the ansible twillio and nexmo modules + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: catapult +short_description: Send a sms / mms using the catapult bandwidth api +description: + - Allows notifications to be sent using sms / mms via the catapult bandwidth api. +options: + src: + description: + - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)). + required: true + dest: + description: + - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)). + required: true + msg: + description: + - The contents of the text message (must be 2048 characters or less). + required: true + media: + description: + - For MMS messages, a media url to the location of the media to be sent with the message. + user_id: + description: + - User Id from Api account page. + required: true + api_token: + description: + - Api Token from Api account page. + required: true + api_secret: + description: + - Api Secret from Api account page. + required: true + +author: "Jonathan Mainguy (@Jmainguy)" +notes: + - Will return changed even if the media url is wrong. + - Will return changed if the destination number is invalid. + +''' + +EXAMPLES = ''' +- name: Send a mms to multiple users + catapult: + src: "+15035555555" + dest: + - "+12525089000" + - "+12018994225" + media: "http://example.com/foobar.jpg" + msg: "Task is complete" + user_id: "{{ user_id }}" + api_token: "{{ api_token }}" + api_secret: "{{ api_secret }}" + +- name: Send a sms to a single user + catapult: + src: "+15035555555" + dest: "+12018994225" + msg: "Consider yourself notified" + user_id: "{{ user_id }}" + api_token: "{{ api_token }}" + api_secret: "{{ api_secret }}" + +''' + +RETURN = ''' +changed: + description: Whether the api accepted the message. + returned: always + type: bool + sample: True +''' + + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def send(module, src, dest, msg, media, user_id, api_token, api_secret): + """ + Send the message + """ + AGENT = "Ansible" + URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id + data = {'from': src, 'to': dest, 'text': msg} + if media: + data['media'] = media + + headers = {'User-Agent': AGENT, 'Content-type': 'application/json'} + + # Hack module params to have the Basic auth params that fetch_url expects + module.params['url_username'] = api_token.replace('\n', '') + module.params['url_password'] = api_secret.replace('\n', '') + + return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(required=True), + dest=dict(required=True, type='list'), + msg=dict(required=True), + user_id=dict(required=True), + api_token=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + media=dict(default=None, required=False), + ), + ) + + src = module.params['src'] + dest = module.params['dest'] + msg = module.params['msg'] + media = module.params['media'] + user_id = module.params['user_id'] + api_token = module.params['api_token'] + api_secret = module.params['api_secret'] + + for number in dest: + rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret) + if info["status"] != 201: + body = json.loads(info["body"]) + fail_msg = body["message"] + module.fail_json(msg=fail_msg) + + changed = True + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/cisco_spark.py b/plugins/modules/notification/cisco_spark.py new file mode 100644 index 0000000000..d22a260174 --- /dev/null +++ b/plugins/modules/notification/cisco_spark.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cisco_spark +short_description: Send a message to a Cisco Spark Room or Individual. +description: + - Send a message to a Cisco Spark Room or Individual with options to control the formatting. +author: Drew Rusell (@drew-russell) +notes: + - The C(recipient_id) type must be valid for the supplied C(recipient_id). + - Full API documentation can be found at U(https://developer.ciscospark.com/endpoint-messages-post.html). + +options: + + recipient_type: + description: + - The request parameter you would like to send the message to. + - Messages can be sent to either a room or individual (by ID or E-Mail). + required: True + choices: ['roomId', 'toPersonEmail', 'toPersonId'] + + recipient_id: + description: + - The unique identifier associated with the supplied C(recipient_type). + required: true + + message_type: + description: + - Specifies how you would like the message formatted. + required: False + default: text + choices: ['text', 'markdown'] + + personal_token: + description: + - Your personal access token required to validate the Spark API. + required: true + aliases: ['token'] + + message: + description: + - The message you would like to send. + required: True +''' + +EXAMPLES = """ +# Note: The following examples assume a variable file has been imported +# that contains the appropriate information. + +- name: Cisco Spark - Markdown Message to a Room + cisco_spark: + recipient_type: roomId + recipient_id: "{{ room_id }}" + message_type: markdown + personal_token: "{{ token }}" + message: "**Cisco Spark Ansible Module - Room Message in Markdown**" + +- name: Cisco Spark - Text Message to a Room + cisco_spark: + recipient_type: roomId + recipient_id: "{{ room_id }}" + message_type: text + personal_token: "{{ token }}" + message: "Cisco Spark Ansible Module - Room Message in Text" + +- name: Cisco Spark - Text Message by an Individuals ID + cisco_spark: + recipient_type: toPersonId + recipient_id: "{{ person_id}}" + message_type: text + personal_token: "{{ token }}" + message: "Cisco Spark Ansible Module - Text Message to Individual by ID" + +- name: Cisco Spark - Text Message by an Individuals E-Mail Address + cisco_spark: + recipient_type: toPersonEmail + recipient_id: "{{ person_email }}" + message_type: text + personal_token: "{{ token }}" + message: "Cisco Spark Ansible Module - Text Message to Individual by E-Mail" + +""" + +RETURN = """ +status_code: + description: + - The Response Code returned by the Spark API. + - Full Response Code explanations can be found at U(https://developer.ciscospark.com/endpoint-messages-post.html). + returned: always + type: int + sample: 200 + +message: + description: + - The Response Message returned by the Spark API. + - Full Response Code explanations can be found at U(https://developer.ciscospark.com/endpoint-messages-post.html). + returned: always + type: str + sample: OK (585 bytes) +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def spark_message(module): + """ When check mode is specified, establish a read only connection, that does not return any user specific + data, to validate connectivity. In regular mode, send a message to a Cisco Spark Room or Individual""" + + # Ansible Specific Variables + results = {} + ansible = module.params + + headers = { + 'Authorization': 'Bearer {0}'.format(ansible['personal_token']), + 'content-type': 'application/json' + } + + if module.check_mode: + url = "https://api.ciscospark.com/v1/people/me" + payload = None + + else: + url = "https://api.ciscospark.com/v1/messages" + + payload = { + ansible['recipient_type']: ansible['recipient_id'], + ansible['message_type']: ansible['message'] + } + + payload = module.jsonify(payload) + + response, info = fetch_url(module, url, data=payload, headers=headers) + + status_code = info['status'] + message = info['msg'] + + # Module will fail if the response is not 200 + if status_code != 200: + results['failed'] = True + results['status_code'] = status_code + results['message'] = message + else: + results['failed'] = False + results['status_code'] = status_code + + if module.check_mode: + results['message'] = 'Authentication Successful.' + else: + results['message'] = message + + return results + + +def main(): + '''Ansible main. ''' + module = AnsibleModule( + argument_spec=dict( + recipient_type=dict(required=True, choices=[ + 'roomId', 'toPersonEmail', 'toPersonId']), + recipient_id=dict(required=True, no_log=True), + message_type=dict(required=False, default=['text'], aliases=[ + 'type'], choices=['text', 'markdown']), + personal_token=dict(required=True, no_log=True, aliases=['token']), + message=dict(required=True) + + ), + + supports_check_mode=True + ) + + results = spark_message(module) + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/notification/flowdock.py b/plugins/modules/notification/flowdock.py new file mode 100644 index 0000000000..e5dfe3c428 --- /dev/null +++ b/plugins/modules/notification/flowdock.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Matt Coddington +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: flowdock +author: "Matt Coddington (@mcodd)" +short_description: Send a message to a flowdock +description: + - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) +options: + token: + description: + - API token. + required: true + type: + description: + - Whether to post to 'inbox' or 'chat' + required: true + choices: [ "inbox", "chat" ] + msg: + description: + - Content of the message + required: true + tags: + description: + - tags of the message, separated by commas + required: false + external_user_name: + description: + - (chat only - required) Name of the "user" sending the message + required: false + from_address: + description: + - (inbox only - required) Email address of the message sender + required: false + source: + description: + - (inbox only - required) Human readable identifier of the application that uses the Flowdock API + required: false + subject: + description: + - (inbox only - required) Subject line of the message + required: false + from_name: + description: + - (inbox only) Name of the message sender + required: false + reply_to: + description: + - (inbox only) Email address for replies + required: false + project: + description: + - (inbox only) Human readable identifier for more detailed message categorization + required: false + link: + description: + - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. + required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + type: bool + +requirements: [ ] +''' + +EXAMPLES = ''' +- flowdock: + type: inbox + token: AAAAAA + from_address: user@example.com + source: my cool app + msg: test from ansible + subject: test subject + +- flowdock: + type: chat + token: AAAAAA + external_user_name: testuser + msg: test from ansible + tags: tag1,tag2,tag3 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + msg=dict(required=True), + type=dict(required=True, choices=["inbox", "chat"]), + external_user_name=dict(required=False), + from_address=dict(required=False), + source=dict(required=False), + subject=dict(required=False), + from_name=dict(required=False), + reply_to=dict(required=False), + project=dict(required=False), + tags=dict(required=False), + link=dict(required=False), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + type = module.params["type"] + token = module.params["token"] + if type == 'inbox': + url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) + else: + url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) + + params = {} + + # required params + params['content'] = module.params["msg"] + + # required params for the 'chat' type + if module.params['external_user_name']: + if type == 'inbox': + module.fail_json(msg="external_user_name is not valid for the 'inbox' type") + else: + params['external_user_name'] = module.params["external_user_name"] + elif type == 'chat': + module.fail_json(msg="external_user_name is required for the 'chat' type") + + # required params for the 'inbox' type + for item in ['from_address', 'source', 'subject']: + if module.params[item]: + if type == 'chat': + module.fail_json(msg="%s is not valid for the 'chat' type" % item) + else: + params[item] = module.params[item] + elif type == 'inbox': + module.fail_json(msg="%s is required for the 'inbox' type" % item) + + # optional params + if module.params["tags"]: + params['tags'] = module.params["tags"] + + # optional params for the 'inbox' type + for item in ['from_name', 'reply_to', 'project', 'link']: + if module.params[item]: + if type == 'chat': + module.fail_json(msg="%s is not valid for the 'chat' type" % item) + else: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=False) + + # Send the data to Flowdock + data = urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] != 200: + module.fail_json(msg="unable to send msg: %s" % info['msg']) + + module.exit_json(changed=True, msg=module.params["msg"]) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/grove.py b/plugins/modules/notification/grove.py new file mode 100644 index 0000000000..8922af15a4 --- /dev/null +++ b/plugins/modules/notification/grove.py @@ -0,0 +1,115 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: grove +short_description: Sends a notification to a grove.io channel +description: + - The C(grove) module sends a message for a service to a Grove.io + channel. +options: + channel_token: + description: + - Token of the channel to post to. + required: true + service: + description: + - Name of the service (displayed as the "user" in the message) + required: false + default: ansible + message: + description: + - Message content + required: true + url: + description: + - Service URL for the web client + required: false + icon_url: + description: + - Icon for the service + required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + default: 'yes' + type: bool +author: "Jonas Pfenniger (@zimbatm)" +''' + +EXAMPLES = ''' +- grove: > + channel_token=6Ph62VBBJOccmtTPZbubiPzdrhipZXtg + service=my-app + message=deployed {{ target }} +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +BASE_URL = 'https://grove.io/api/notice/%s/' + +# ============================================================== +# do_notify_grove + + +def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): + my_url = BASE_URL % (channel_token,) + + my_data = dict(service=service, message=message) + if url is not None: + my_data['url'] = url + if icon_url is not None: + my_data['icon_url'] = icon_url + + data = urlencode(my_data) + response, info = fetch_url(module, my_url, data=data) + if info['status'] != 200: + module.fail_json(msg="failed to send notification: %s" % info['msg']) + +# ============================================================== +# main + + +def main(): + module = AnsibleModule( + argument_spec=dict( + channel_token=dict(type='str', required=True, no_log=True), + message=dict(type='str', required=True), + service=dict(type='str', default='ansible'), + url=dict(type='str', default=None), + icon_url=dict(type='str', default=None), + validate_certs=dict(default='yes', type='bool'), + ) + ) + + channel_token = module.params['channel_token'] + service = module.params['service'] + message = module.params['message'] + url = module.params['url'] + icon_url = module.params['icon_url'] + + do_notify_grove(module, channel_token, service, message, url, icon_url) + + # Mission complete + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/hipchat.py b/plugins/modules/notification/hipchat.py new file mode 100644 index 0000000000..f3f29e44f1 --- /dev/null +++ b/plugins/modules/notification/hipchat.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: hipchat +short_description: Send a message to Hipchat. +description: + - Send a message to a Hipchat room, with options to control the formatting. +options: + token: + description: + - API token. + required: true + room: + description: + - ID or name of the room. + required: true + from: + description: + - Name the message will appear to be sent from. Max length is 15 + characters - above this it will be truncated. + default: Ansible + msg: + description: + - The message body. + required: true + color: + description: + - Background color for the message. + default: yellow + choices: [ "yellow", "red", "green", "purple", "gray", "random" ] + msg_format: + description: + - Message format. + default: text + choices: [ "text", "html" ] + notify: + description: + - If true, a notification will be triggered for users in the room. + type: bool + default: 'yes' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + api: + description: + - API url if using a self-hosted hipchat server. For Hipchat API version + 2 use the default URI with C(/v2) instead of C(/v1). + default: 'https://api.hipchat.com/v1' + +author: +- Shirou Wakayama (@shirou) +- Paul Bourdel (@pb8226) +''' + +EXAMPLES = ''' +- hipchat: + room: notif + msg: Ansible task finished + +# Use Hipchat API version 2 +- hipchat: + api: https://api.hipchat.com/v2/ + token: OAUTH2_TOKEN + room: notify + msg: Ansible task finished +''' + +# =========================================== +# HipChat module specific support methods. +# + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six.moves.urllib.request import pathname2url +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url + + +DEFAULT_URI = "https://api.hipchat.com/v1" + +MSG_URI_V1 = "/rooms/message" + +NOTIFY_URI_V2 = "/room/{id_or_name}/notification" + + +def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=MSG_URI_V1): + '''sending message to hipchat v1 server''' + + params = {} + params['room_id'] = room + params['from'] = msg_from[:15] # max length is 15 + params['message'] = msg + params['message_format'] = msg_format + params['color'] = color + params['api'] = api + params['notify'] = int(notify) + + url = api + MSG_URI_V1 + "?auth_token=%s" % (token) + data = urlencode(params) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=NOTIFY_URI_V2): + '''sending message to hipchat v2 server''' + + headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} + + body = dict() + body['message'] = msg + body['color'] = color + body['message_format'] = msg_format + body['notify'] = notify + + POST_URL = api + NOTIFY_URI_V2 + + url = POST_URL.replace('{id_or_name}', pathname2url(room)) + data = json.dumps(body) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + + response, info = fetch_url(module, url, data=data, headers=headers, method='POST') + + # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows + # 204 to be the expected result code. + if info['status'] in [200, 204]: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + room=dict(required=True), + msg=dict(required=True), + msg_from=dict(default="Ansible", aliases=['from']), + color=dict(default="yellow", choices=["yellow", "red", "green", + "purple", "gray", "random"]), + msg_format=dict(default="text", choices=["text", "html"]), + notify=dict(default=True, type='bool'), + validate_certs=dict(default='yes', type='bool'), + api=dict(default=DEFAULT_URI), + ), + supports_check_mode=True + ) + + token = module.params["token"] + room = str(module.params["room"]) + msg = module.params["msg"] + msg_from = module.params["msg_from"] + color = module.params["color"] + msg_format = module.params["msg_format"] + notify = module.params["notify"] + api = module.params["api"] + + try: + if api.find('/v2') != -1: + send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) + else: + send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) + + changed = True + module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/irc.py b/plugins/modules/notification/irc.py new file mode 100644 index 0000000000..368ec97c1e --- /dev/null +++ b/plugins/modules/notification/irc.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Jan-Piet Mens +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: irc +short_description: Send a message to an IRC channel or a nick +description: + - Send a message to an IRC channel or a nick. This is a very simplistic implementation. +options: + server: + description: + - IRC server name/address + default: localhost + port: + description: + - IRC server port number + default: 6667 + nick: + description: + - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. + default: ansible + msg: + description: + - The message body. + required: true + topic: + description: + - Set the channel topic + color: + description: + - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). + Added 11 more colors in version 2.0. + default: "none" + choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", "light_gray"] + channel: + description: + - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. + required: true + nick_to: + description: + - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. + key: + description: + - Channel key + passwd: + description: + - Server password + timeout: + description: + - Timeout to use while waiting for successful registration and join + messages, this is to prevent an endless loop + default: 30 + use_ssl: + description: + - Designates whether TLS/SSL should be used when connecting to the IRC server + type: bool + default: 'no' + part: + description: + - Designates whether user should part from channel after sending message or not. + Useful for when using a faux bot and not wanting join/parts between messages. + type: bool + default: 'yes' + style: + description: + - Text style for the message. Note italic does not work on some clients + choices: [ "bold", "underline", "reverse", "italic" ] + +# informational: requirements for nodes +requirements: [ socket ] +author: + - "Jan-Piet Mens (@jpmens)" + - "Matt Martz (@sivel)" +''' + +EXAMPLES = ''' +- irc: + server: irc.example.net + channel: #t1 + msg: Hello world + +- local_action: + module: irc + port: 6669 + server: irc.example.net + channel: #t1 + msg: 'All finished at {{ ansible_date_time.iso8601 }}' + color: red + nick: ansibleIRC + +- local_action: + module: irc + port: 6669 + server: irc.example.net + channel: #t1 + nick_to: + - nick1 + - nick2 + msg: 'All finished at {{ ansible_date_time.iso8601 }}' + color: red + nick: ansibleIRC +''' + +# =========================================== +# IRC module support methods. +# + +import re +import socket +import ssl +import time +import traceback + +from ansible.module_utils._text import to_native, to_bytes +from ansible.module_utils.basic import AnsibleModule + + +def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None, + nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None): + '''send message to IRC''' + nick_to = [] if nick_to is None else nick_to + + colornumbers = { + 'white': "00", + 'black': "01", + 'blue': "02", + 'green': "03", + 'red': "04", + 'brown': "05", + 'purple': "06", + 'orange': "07", + 'yellow': "08", + 'light_green': "09", + 'teal': "10", + 'light_cyan': "11", + 'light_blue': "12", + 'pink': "13", + 'gray': "14", + 'light_gray': "15", + } + + stylechoices = { + 'bold': "\x02", + 'underline': "\x1F", + 'reverse': "\x16", + 'italic': "\x1D", + } + + try: + styletext = stylechoices[style] + except Exception: + styletext = "" + + try: + colornumber = colornumbers[color] + colortext = "\x03" + colornumber + except Exception: + colortext = "" + + message = styletext + colortext + msg + + irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if use_ssl: + irc = ssl.wrap_socket(irc) + irc.connect((server, int(port))) + + if passwd: + irc.send(to_bytes('PASS %s\r\n' % passwd)) + irc.send(to_bytes('NICK %s\r\n' % nick)) + irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))) + motd = '' + start = time.time() + while 1: + motd += to_native(irc.recv(1024)) + # The server might send back a shorter nick than we specified (due to NICKLEN), + # so grab that and use it from now on (assuming we find the 00[1-4] response). + match = re.search(r'^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) + if match: + nick = match.group('nick') + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC server welcome response') + time.sleep(0.5) + + if channel: + if key: + irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key))) + else: + irc.send(to_bytes('JOIN %s\r\n' % channel)) + + join = '' + start = time.time() + while 1: + join += to_native(irc.recv(1024)) + if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I): + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC JOIN response') + time.sleep(0.5) + + if topic is not None: + irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic))) + time.sleep(1) + + if nick_to: + for nick in nick_to: + irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message))) + if channel: + irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message))) + time.sleep(1) + if part: + if channel: + irc.send(to_bytes('PART %s\r\n' % channel)) + irc.send(to_bytes('QUIT\r\n')) + time.sleep(1) + irc.close() + +# =========================================== +# Main +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server=dict(default='localhost'), + port=dict(type='int', default=6667), + nick=dict(default='ansible'), + nick_to=dict(required=False, type='list'), + msg=dict(required=True), + color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", + "green", "red", "brown", + "purple", "orange", "yellow", + "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", + "light_gray", "none"]), + style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), + channel=dict(required=False), + key=dict(no_log=True), + topic=dict(), + passwd=dict(no_log=True), + timeout=dict(type='int', default=30), + part=dict(type='bool', default=True), + use_ssl=dict(type='bool', default=False) + ), + supports_check_mode=True, + required_one_of=[['channel', 'nick_to']] + ) + + server = module.params["server"] + port = module.params["port"] + nick = module.params["nick"] + nick_to = module.params["nick_to"] + msg = module.params["msg"] + color = module.params["color"] + channel = module.params["channel"] + topic = module.params["topic"] + if topic and not channel: + module.fail_json(msg="When topic is specified, a channel is required.") + key = module.params["key"] + passwd = module.params["passwd"] + timeout = module.params["timeout"] + use_ssl = module.params["use_ssl"] + part = module.params["part"] + style = module.params["style"] + + try: + send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style) + except Exception as e: + module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=False, channel=channel, nick=nick, + msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/jabber.py b/plugins/modules/notification/jabber.py new file mode 100644 index 0000000000..201607116c --- /dev/null +++ b/plugins/modules/notification/jabber.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: jabber +short_description: Send a message to jabber user or chat room +description: + - Send a message to jabber +options: + user: + description: + - User as which to connect + required: true + password: + description: + - password for user to connect + required: true + to: + description: + - user ID or name of the room, when using room use a slash to indicate your nick. + required: true + msg: + description: + - The message body. + required: true + host: + description: + - host to connect, overrides user info + port: + description: + - port to connect to, overrides default + default: 5222 + encoding: + description: + - message encoding + +# informational: requirements for nodes +requirements: + - python xmpp (xmpppy) +author: "Brian Coca (@bcoca)" +''' + +EXAMPLES = ''' +# send a message to a user +- jabber: + user: mybot@example.net + password: secret + to: friend@example.net + msg: Ansible task finished + +# send a message to a room +- jabber: + user: mybot@example.net + password: secret + to: mychaps@conference.example.net/ansiblebot + msg: Ansible task finished + +# send a message, specifying the host and port +- jabber: + user: mybot@example.net + host: talk.example.net + port: 5223 + password: secret + to: mychaps@example.net + msg: Ansible task finished +''' + +import time +import traceback + +HAS_XMPP = True +XMPP_IMP_ERR = None +try: + import xmpp +except ImportError: + XMPP_IMP_ERR = traceback.format_exc() + HAS_XMPP = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True), + password=dict(required=True, no_log=True), + to=dict(required=True), + msg=dict(required=True), + host=dict(required=False), + port=dict(required=False, default=5222, type='int'), + encoding=dict(required=False), + ), + supports_check_mode=True + ) + + if not HAS_XMPP: + module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR) + + jid = xmpp.JID(module.params['user']) + user = jid.getNode() + server = jid.getDomain() + port = module.params['port'] + password = module.params['password'] + try: + to, nick = module.params['to'].split('/', 1) + except ValueError: + to, nick = module.params['to'], None + + if module.params['host']: + host = module.params['host'] + else: + host = server + if module.params['encoding']: + xmpp.simplexml.ENCODING = module.params['encoding'] + + msg = xmpp.protocol.Message(body=module.params['msg']) + + try: + conn = xmpp.Client(server, debug=[]) + if not conn.connect(server=(host, port)): + module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) + if not conn.auth(user, password, 'Ansible'): + module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server)) + # some old servers require this, also the sleep following send + conn.sendInitPresence(requestRoster=0) + + if nick: # sending to room instead of user, need to join + msg.setType('groupchat') + msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') + join = xmpp.Presence(to=module.params['to']) + join.setTag('x', namespace='http://jabber.org/protocol/muc') + conn.send(join) + time.sleep(1) + else: + msg.setType('chat') + + msg.setTo(to) + if not module.check_mode: + conn.send(msg) + time.sleep(1) + conn.disconnect() + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/logentries_msg.py b/plugins/modules/notification/logentries_msg.py new file mode 100644 index 0000000000..4a37f8cf77 --- /dev/null +++ b/plugins/modules/notification/logentries_msg.py @@ -0,0 +1,98 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: logentries_msg +short_description: Send a message to logentries. +description: + - Send a message to logentries +requirements: + - "python >= 2.6" +options: + token: + description: + - Log token. + required: true + msg: + description: + - The message body. + required: true + api: + description: + - API endpoint + default: data.logentries.com + port: + description: + - API endpoint port + default: 80 +author: "Jimmy Tang (@jcftang) " +''' + +RETURN = '''# ''' + +EXAMPLES = ''' +- logentries_msg: + token=00000000-0000-0000-0000-000000000000 + msg="{{ ansible_hostname }}" +''' + +import socket + +from ansible.module_utils.basic import AnsibleModule + + +def send_msg(module, token, msg, api, port): + + message = "{0} {1}\n".format(token, msg) + + api_ip = socket.gethostbyname(api) + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((api_ip, port)) + try: + if not module.check_mode: + s.send(message) + except Exception as e: + module.fail_json(msg="failed to send message, msg=%s" % e) + s.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + token=dict(type='str', required=True), + msg=dict(type='str', required=True), + api=dict(type='str', default="data.logentries.com"), + port=dict(type='int', default=80)), + supports_check_mode=True + ) + + token = module.params["token"] + msg = module.params["msg"] + api = module.params["api"] + port = module.params["port"] + + changed = False + try: + send_msg(module, token, msg, api, port) + changed = True + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % e) + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/mail.py b/plugins/modules/notification/mail.py new file mode 100644 index 0000000000..63eb3d6c05 --- /dev/null +++ b/plugins/modules/notification/mail.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +author: +- Dag Wieers (@dagwieers) +module: mail +short_description: Send an email +description: +- This module is useful for sending emails from playbooks. +- One may wonder why automate sending emails? In complex environments + there are from time to time processes that cannot be automated, either + because you lack the authority to make it so, or because not everyone + agrees to a common approach. +- If you cannot automate a specific step, but the step is non-blocking, + sending out an email to the responsible party to make them perform their + part of the bargain is an elegant way to put the responsibility in + someone else's lap. +- Of course sending out a mail can be equally useful as a way to notify + one or more people in a team that a specific action has been + (successfully) taken. +options: + from: + description: + - The email-address the mail is sent from. May contain address and phrase. + type: str + default: root + to: + description: + - The email-address(es) the mail is being sent to. + - This is a list, which may contain address and phrase portions. + type: list + default: root + aliases: [ recipients ] + cc: + description: + - The email-address(es) the mail is being copied to. + - This is a list, which may contain address and phrase portions. + type: list + bcc: + description: + - The email-address(es) the mail is being 'blind' copied to. + - This is a list, which may contain address and phrase portions. + type: list + subject: + description: + - The subject of the email being sent. + required: yes + type: str + body: + description: + - The body of the email being sent. + type: str + default: $subject + username: + description: + - If SMTP requires username. + type: str + password: + description: + - If SMTP requires password. + type: str + host: + description: + - The mail server. + type: str + default: localhost + port: + description: + - The mail server port. + - This must be a valid integer between 1 and 65534 + type: int + default: 25 + attach: + description: + - A list of pathnames of files to attach to the message. + - Attached files will have their content-type set to C(application/octet-stream). + type: list + default: [] + headers: + description: + - A list of headers which should be added to the message. + - Each individual header is specified as C(header=value) (see example below). + type: list + default: [] + charset: + description: + - The character set of email being sent. + type: str + default: utf-8 + subtype: + description: + - The minor mime type, can be either C(plain) or C(html). + - The major type is always C(text). + type: str + choices: [ html, plain ] + default: plain + secure: + description: + - If C(always), the connection will only send email if the connection is Encrypted. + If the server doesn't accept the encrypted connection it will fail. + - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. + - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending + - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. + If it is unable to do so it will fail. + type: str + choices: [ always, never, starttls, try ] + default: try + timeout: + description: + - Sets the timeout in seconds for connection attempts. + type: int + default: 20 +''' + +EXAMPLES = r''' +- name: Example playbook sending mail to root + mail: + subject: System {{ ansible_hostname }} has been successfully provisioned. + delegate_to: localhost + +- name: Sending an e-mail using Gmail SMTP servers + mail: + host: smtp.gmail.com + port: 587 + username: username@gmail.com + password: mysecret + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + delegate_to: localhost + +- name: Send e-mail to a bunch of users, attaching files + mail: + host: 127.0.0.1 + port: 2025 + subject: Ansible-report + body: Hello, this is an e-mail. I hope you like it ;-) + from: jane@example.net (Jane Jolie) + to: + - John Doe + - Suzie Something + cc: Charlie Root + attach: + - /etc/group + - /tmp/avatar2.png + headers: + - Reply-To=john@example.com + - X-Special="Something or other" + charset: us-ascii + delegate_to: localhost + +- name: Sending an e-mail using the remote machine, not the Ansible controller node + mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + +- name: Sending an e-mail using Legacy SSL to the remote machine + mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: always + +- name: Sending an e-mail using StartTLS to the remote machine + mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: starttls +''' + +import os +import smtplib +import ssl +import traceback +from email import encoders +from email.utils import parseaddr, formataddr, formatdate +from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from email.header import Header + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import PY3 +from ansible.module_utils._text import to_native + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + username=dict(type='str'), + password=dict(type='str', no_log=True), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=25), + sender=dict(type='str', default='root', aliases=['from']), + to=dict(type='list', default=['root'], aliases=['recipients']), + cc=dict(type='list', default=[]), + bcc=dict(type='list', default=[]), + subject=dict(type='str', required=True, aliases=['msg']), + body=dict(type='str'), + attach=dict(type='list', default=[]), + headers=dict(type='list', default=[]), + charset=dict(type='str', default='utf-8'), + subtype=dict(type='str', default='plain', choices=['html', 'plain']), + secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), + timeout=dict(type='int', default=20), + ), + required_together=[['password', 'username']], + ) + + username = module.params.get('username') + password = module.params.get('password') + host = module.params.get('host') + port = module.params.get('port') + sender = module.params.get('sender') + recipients = module.params.get('to') + copies = module.params.get('cc') + blindcopies = module.params.get('bcc') + subject = module.params.get('subject') + body = module.params.get('body') + attach_files = module.params.get('attach') + headers = module.params.get('headers') + charset = module.params.get('charset') + subtype = module.params.get('subtype') + secure = module.params.get('secure') + timeout = module.params.get('timeout') + + code = 0 + secure_state = False + sender_phrase, sender_addr = parseaddr(sender) + + if not body: + body = subject + + try: + if secure != 'never': + try: + if PY3: + smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout) + else: + smtp = smtplib.SMTP_SSL(timeout=timeout) + code, smtpmessage = smtp.connect(host, port) + secure_state = True + except ssl.SSLError as e: + if secure == 'always': + module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % + (host, port, to_native(e)), exception=traceback.format_exc()) + except Exception: + pass + + if not secure_state: + if PY3: + smtp = smtplib.SMTP(host=host, port=port, timeout=timeout) + else: + smtp = smtplib.SMTP(timeout=timeout) + code, smtpmessage = smtp.connect(host, port) + + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + + try: + smtp.ehlo() + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + + if int(code) > 0: + if not secure_state and secure in ('starttls', 'try'): + if smtp.has_extn('STARTTLS'): + try: + smtp.starttls() + secure_state = True + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % + (host, port, to_native(e)), exception=traceback.format_exc()) + try: + smtp.ehlo() + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + else: + if secure == 'starttls': + module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port)) + + if username and password: + if smtp.has_extn('AUTH'): + try: + smtp.login(username, password) + except smtplib.SMTPAuthenticationError: + module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port)) + except smtplib.SMTPException: + module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port)) + else: + module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port)) + + if not secure_state and (username and password): + module.warn('Username and Password was sent without encryption') + + msg = MIMEMultipart(_charset=charset) + msg['From'] = formataddr((sender_phrase, sender_addr)) + msg['Date'] = formatdate(localtime=True) + msg['Subject'] = Header(subject, charset) + msg.preamble = "Multipart message" + + for header in headers: + # NOTE: Backward compatible with old syntax using '|' as delimiter + for hdr in [x.strip() for x in header.split('|')]: + try: + h_key, h_val = hdr.split('=') + h_val = to_native(Header(h_val, charset)) + msg.add_header(h_key, h_val) + except Exception: + module.warn("Skipping header '%s', unable to parse" % hdr) + + if 'X-Mailer' not in msg: + msg.add_header('X-Mailer', 'Ansible mail module') + + addr_list = [] + for addr in [x.strip() for x in blindcopies]: + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + + to_list = [] + for addr in [x.strip() for x in recipients]: + to_list.append(formataddr(parseaddr(addr))) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg['To'] = ", ".join(to_list) + + cc_list = [] + for addr in [x.strip() for x in copies]: + cc_list.append(formataddr(parseaddr(addr))) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg['Cc'] = ", ".join(cc_list) + + part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) + msg.attach(part) + + # NOTE: Backware compatibility with old syntax using space as delimiter is not retained + # This breaks files with spaces in it :-( + for filename in attach_files: + try: + part = MIMEBase('application', 'octet-stream') + with open(filename, 'rb') as fp: + part.set_payload(fp.read()) + encoders.encode_base64(part) + part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename)) + msg.attach(part) + except Exception as e: + module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % + (filename, to_native(e)), exception=traceback.format_exc()) + + composed = msg.as_string() + + try: + result = smtp.sendmail(sender_addr, set(addr_list), composed) + except Exception as e: + module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" % + (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc()) + + smtp.quit() + + if result: + for key in result: + module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1])) + module.exit_json(msg='Failed to send mail to at least one recipient', result=result) + + module.exit_json(msg='Mail sent successfully', result=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/matrix.py b/plugins/modules/notification/matrix.py new file mode 100644 index 0000000000..3921d3497f --- /dev/null +++ b/plugins/modules/notification/matrix.py @@ -0,0 +1,138 @@ +#!/usr/bin/python +# coding: utf-8 + +# (c) 2018, Jan Christian Grünhage +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +author: "Jan Christian Grünhage (@jcgruenhage)" +module: matrix +short_description: Send notifications to matrix +description: + - This module sends html formatted notifications to matrix rooms. +options: + msg_plain: + description: + - Plain text form of the message to send to matrix, usually markdown + required: true + msg_html: + description: + - HTML form of the message to send to matrix + required: true + room_id: + description: + - ID of the room to send the notification to + required: true + hs_url: + description: + - URL of the homeserver, where the CS-API is reachable + required: true + token: + description: + - Authentication token for the API call. If provided, user_id and password are not required + user_id: + description: + - The user id of the user + password: + description: + - The password to log in with +requirements: + - matrix-client (Python library) +''' + +EXAMPLES = ''' +- name: Send matrix notification with token + matrix: + msg_plain: "**hello world**" + msg_html: "hello world" + room_id: "!12345678:server.tld" + hs_url: "https://matrix.org" + token: "{{ matrix_auth_token }}" + +- name: Send matrix notification with user_id and password + matrix: + msg_plain: "**hello world**" + msg_html: "hello world" + room_id: "!12345678:server.tld" + hs_url: "https://matrix.org" + user_id: "ansible_notification_bot" + password: "{{ matrix_auth_password }}" +''' + +RETURN = ''' +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +MATRIX_IMP_ERR = None +try: + from matrix_client.client import MatrixClient +except ImportError: + MATRIX_IMP_ERR = traceback.format_exc() + matrix_found = False +else: + matrix_found = True + + +def run_module(): + module_args = dict( + msg_plain=dict(type='str', required=True), + msg_html=dict(type='str', required=True), + room_id=dict(type='str', required=True), + hs_url=dict(type='str', required=True), + token=dict(type='str', required=False, no_log=True), + user_id=dict(type='str', required=False), + password=dict(type='str', required=False, no_log=True), + ) + + result = dict( + changed=False, + message='' + ) + + module = AnsibleModule( + argument_spec=module_args, + mutually_exclusive=[['password', 'token']], + required_one_of=[['password', 'token']], + required_together=[['user_id', 'password']], + supports_check_mode=True + ) + + if not matrix_found: + module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR) + + if module.check_mode: + return result + + # create a client object + client = MatrixClient(module.params['hs_url']) + if module.params['token'] is not None: + client.api.token = module.params['token'] + else: + client.login(module.params['user_id'], module.params['password'], sync=False) + + # make sure we are in a given room and return a room object for it + room = client.join_room(module.params['room_id']) + # send an html formatted messages + room.send_html(module.params['msg_html'], module.params['msg_plain']) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/mattermost.py b/plugins/modules/notification/mattermost.py new file mode 100644 index 0000000000..59c2934ce7 --- /dev/null +++ b/plugins/modules/notification/mattermost.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Benjamin Jolivot +# Inspired by slack module : +# # (c) 2017, Steve Pletcher +# # (c) 2016, René Moser +# # (c) 2015, Stefan Berggren +# # (c) 2014, Ramon de la Fuente ) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: mattermost +short_description: Send Mattermost notifications +description: + - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration. +author: "Benjamin Jolivot (@bjolivot)" +options: + url: + description: + - Mattermost url (i.e. http://mattermost.yourcompany.com). + required: true + api_key: + description: + - Mattermost webhook api key. Log into your mattermost site, go to + Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. + This will give you full URL. api_key is the last part. + http://mattermost.example.com/hooks/C(API_KEY) + required: true + text: + description: + - Text to send. Note that the module does not handle escaping characters. + required: true + channel: + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key). + username: + description: + - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc. + default: Ansible + icon_url: + description: + - Url for the message sender's icon. + default: https://www.ansible.com/favicon.ico + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + default: yes + type: bool +''' + +EXAMPLES = """ +- name: Send notification message via Mattermost + mattermost: + url: http://mattermost.example.com + api_key: my_api_key + text: '{{ inventory_hostname }} completed' + +- name: Send notification message via Mattermost all options + mattermost: + url: http://mattermost.example.com + api_key: my_api_key + text: '{{ inventory_hostname }} completed' + channel: notifications + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png +""" + +RETURN = ''' +payload: + description: Mattermost payload + returned: success + type: str +webhook_url: + description: URL the webhook is sent to + returned: success + type: str +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + url=dict(type='str', required=True), + api_key=dict(type='str', required=True, no_log=True), + text=dict(type='str', required=True), + channel=dict(type='str', default=None), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), + validate_certs=dict(default='yes', type='bool'), + ) + ) + # init return dict + result = dict(changed=False, msg="OK") + + # define webhook + webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key']) + result['webhook_url'] = webhook_url + + # define payload + payload = {} + for param in ['text', 'channel', 'username', 'icon_url']: + if module.params[param] is not None: + payload[param] = module.params[param] + + payload = module.jsonify(payload) + result['payload'] = payload + + # http headers + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + + # notes: + # Nothing is done in check mode + # it'll pass even if your server is down or/and if your token is invalid. + # If someone find good way to check... + + # send request if not in test mode + if module.check_mode is False: + response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload) + + # something's wrong + if info['status'] != 200: + # some problem + result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg']) + module.fail_json(**result) + + # Looks good + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/mqtt.py b/plugins/modules/notification/mqtt.py new file mode 100644 index 0000000000..5044e187cd --- /dev/null +++ b/plugins/modules/notification/mqtt.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, 2014, Jan-Piet Mens +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: mqtt +short_description: Publish a message on an MQTT topic for the IoT +description: + - Publish a message on an MQTT topic. +options: + server: + description: + - MQTT broker address/name + default: localhost + port: + description: + - MQTT broker port number + default: 1883 + username: + description: + - Username to authenticate against the broker. + password: + description: + - Password for C(username) to authenticate against the broker. + client_id: + description: + - MQTT client identifier + default: hostname + pid + topic: + description: + - MQTT topic name + required: true + payload: + description: + - Payload. The special string C("None") may be used to send a NULL + (i.e. empty) payload which is useful to simply notify with the I(topic) + or to clear previously retained messages. + required: true + qos: + description: + - QoS (Quality of Service) + default: 0 + choices: [ "0", "1", "2" ] + retain: + description: + - Setting this flag causes the broker to retain (i.e. keep) the message so that + applications that subsequently subscribe to the topic can received the last + retained message immediately. + type: bool + default: 'no' + ca_cert: + description: + - The path to the Certificate Authority certificate files that are to be + treated as trusted by this client. If this is the only option given + then the client will operate in a similar manner to a web browser. That + is to say it will require the broker to have a certificate signed by the + Certificate Authorities in ca_certs and will communicate using TLS v1, + but will not attempt any form of authentication. This provides basic + network encryption but may not be sufficient depending on how the broker + is configured. + aliases: [ ca_certs ] + client_cert: + description: + - The path pointing to the PEM encoded client certificate. If this is not + None it will be used as client information for TLS based + authentication. Support for this feature is broker dependent. + aliases: [ certfile ] + client_key: + description: + - The path pointing to the PEM encoded client private key. If this is not + None it will be used as client information for TLS based + authentication. Support for this feature is broker dependent. + aliases: [ keyfile ] + tls_version: + description: + - Specifies the version of the SSL/TLS protocol to be used. + - By default (if the python version supports it) the highest TLS version is + detected. If unavailable, TLS v1 is used. + type: str + choices: + - tlsv1.1 + - tlsv1.2 +requirements: [ mosquitto ] +notes: + - This module requires a connection to an MQTT broker such as Mosquitto + U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)). +author: "Jan-Piet Mens (@jpmens)" +''' + +EXAMPLES = ''' +- mqtt: + topic: 'service/ansible/{{ ansible_hostname }}' + payload: 'Hello at {{ ansible_date_time.iso8601 }}' + qos: 0 + retain: False + client_id: ans001 + delegate_to: localhost +''' + +# =========================================== +# MQTT module support methods. +# + +import os +import ssl +import traceback +import platform +from distutils.version import LooseVersion + +HAS_PAHOMQTT = True +PAHOMQTT_IMP_ERR = None +try: + import socket + import paho.mqtt.publish as mqtt +except ImportError: + PAHOMQTT_IMP_ERR = traceback.format_exc() + HAS_PAHOMQTT = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +# =========================================== +# Main +# + +def main(): + tls_map = {} + + try: + tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2 + except AttributeError: + pass + + try: + tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1 + except AttributeError: + pass + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='localhost'), + port=dict(default=1883, type='int'), + topic=dict(required=True), + payload=dict(required=True), + client_id=dict(default=None), + qos=dict(default="0", choices=["0", "1", "2"]), + retain=dict(default=False, type='bool'), + username=dict(default=None), + password=dict(default=None, no_log=True), + ca_cert=dict(default=None, type='path', aliases=['ca_certs']), + client_cert=dict(default=None, type='path', aliases=['certfile']), + client_key=dict(default=None, type='path', aliases=['keyfile']), + tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2']) + ), + supports_check_mode=True + ) + + if not HAS_PAHOMQTT: + module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR) + + server = module.params.get("server", 'localhost') + port = module.params.get("port", 1883) + topic = module.params.get("topic") + payload = module.params.get("payload") + client_id = module.params.get("client_id", '') + qos = int(module.params.get("qos", 0)) + retain = module.params.get("retain") + username = module.params.get("username", None) + password = module.params.get("password", None) + ca_certs = module.params.get("ca_cert", None) + certfile = module.params.get("client_cert", None) + keyfile = module.params.get("client_key", None) + tls_version = module.params.get("tls_version", None) + + if client_id is None: + client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) + + if payload and payload == 'None': + payload = None + + auth = None + if username is not None: + auth = {'username': username, 'password': password} + + tls = None + if ca_certs is not None: + if tls_version: + tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23) + else: + if LooseVersion(platform.python_version()) <= "3.5.2": + # Specifying `None` on later versions of python seems sufficient to + # instruct python to autonegotiate the SSL/TLS connection. On versions + # 3.5.2 and lower though we need to specify the version. + # + # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was + # not available until 3.5.3. + tls_version = ssl.PROTOCOL_SSLv23 + + tls = { + 'ca_certs': ca_certs, + 'certfile': certfile, + 'keyfile': keyfile, + 'tls_version': tls_version, + } + + try: + mqtt.single( + topic, + payload, + qos=qos, + retain=retain, + client_id=client_id, + hostname=server, + port=port, + auth=auth, + tls=tls + ) + except Exception as e: + module.fail_json( + msg="unable to publish to MQTT broker %s" % to_native(e), + exception=traceback.format_exc() + ) + + module.exit_json(changed=False, topic=topic) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/nexmo.py b/plugins/modules/notification/nexmo.py new file mode 100644 index 0000000000..b8316b57ce --- /dev/null +++ b/plugins/modules/notification/nexmo.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: nexmo +short_description: Send a SMS via nexmo +description: + - Send a SMS message via nexmo +author: "Matt Martz (@sivel)" +options: + api_key: + description: + - Nexmo API Key + required: true + api_secret: + description: + - Nexmo API Secret + required: true + src: + description: + - Nexmo Number to send from + required: true + dest: + description: + - Phone number(s) to send SMS message to + required: true + msg: + description: + - Message to text to send. Messages longer than 160 characters will be + split into multiple messages + required: true + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' +extends_documentation_fragment: + - url +''' + +EXAMPLES = """ +- name: Send notification message via Nexmo + nexmo: + api_key: 640c8a53 + api_secret: 0ce239a6 + src: 12345678901 + dest: + - 10987654321 + - 16789012345 + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost +""" +import json + +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, url_argument_spec + + +NEXMO_API = 'https://rest.nexmo.com/sms/json' + + +def send_msg(module): + failed = list() + responses = dict() + msg = { + 'api_key': module.params.get('api_key'), + 'api_secret': module.params.get('api_secret'), + 'from': module.params.get('src'), + 'text': module.params.get('msg') + } + for number in module.params.get('dest'): + msg['to'] = number + url = "%s?%s" % (NEXMO_API, urlencode(msg)) + + headers = dict(Accept='application/json') + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + failed.append(number) + responses[number] = dict(failed=True) + + try: + responses[number] = json.load(response) + except Exception: + failed.append(number) + responses[number] = dict(failed=True) + else: + for message in responses[number]['messages']: + if int(message['status']) != 0: + failed.append(number) + responses[number] = dict(failed=True, **responses[number]) + + if failed: + msg = 'One or messages failed to send' + else: + msg = '' + + module.exit_json(failed=bool(failed), msg=msg, changed=False, + responses=responses) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + dict( + api_key=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + src=dict(required=True, type='int'), + dest=dict(required=True, type='list'), + msg=dict(required=True), + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + send_msg(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/office_365_connector_card.py b/plugins/modules/notification/office_365_connector_card.py new file mode 100644 index 0000000000..eec47fdac2 --- /dev/null +++ b/plugins/modules/notification/office_365_connector_card.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Marc Sensenich +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: office_365_connector_card +short_description: Use webhooks to create Connector Card messages within an Office 365 group +description: + - Creates Connector Card messages through + - Office 365 Connectors U(https://dev.outlook.com/Connectors) +author: "Marc Sensenich (@marc-sensenich)" +notes: + - This module is not idempotent, therefore if the same task is run twice + there will be two Connector Cards created +options: + webhook: + description: + - The webhook URL is given to you when you create a new Connector. + required: true + summary: + description: + - A string used for summarizing card content. + - This will be shown as the message subject. + - This is required if the text parameter isn't populated. + color: + description: + - Accent color used for branding or indicating status in the card. + title: + description: + - A title for the Connector message. Shown at the top of the message. + text: + description: + - The main text of the card. + - This will be rendered below the sender information and optional title, + - and above any sections or actions present. + actions: + description: + - This array of objects will power the action links + - found at the bottom of the card. + sections: + description: + - Contains a list of sections to display in the card. + - For more information see https://dev.outlook.com/Connectors/reference. +''' + +EXAMPLES = """ +- name: Create a simple Connector Card + office_365_connector_card: + webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID + text: 'Hello, World!' + +- name: Create a Connector Card with the full format + office_365_connector_card: + webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID + summary: This is the summary property + title: This is the **card's title** property + text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur + adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + color: E81123 + sections: + - title: This is the **section's title** property + activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg + activity_title: This is the section's **activityTitle** property + activity_subtitle: This is the section's **activitySubtitle** property + activity_text: This is the section's **activityText** property. + hero_image: + image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur + adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + facts: + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + images: + - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg + title: This is the image's alternate text + actions: + - "@type": ActionCard + name: Comment + inputs: + - "@type": TextInput + id: comment + is_multiline: true + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": ActionCard + name: Due Date + inputs: + - "@type": DateInput + id: dueDate + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": HttpPOST + name: Action's name prop. + target: http://... + - "@type": OpenUri + name: Action's name prop + targets: + - os: default + uri: http://... + - start_group: true + title: This is the title of a **second section** + text: This second section is visually separated from the first one by setting its + **startGroup** property to true. +""" + +RETURN = """ +""" + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions" +OFFICE_365_CARD_TYPE = "MessageCard" +OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required." +OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable." + + +def build_actions(actions): + action_items = [] + + for action in actions: + action_item = snake_dict_to_camel_dict(action) + action_items.append(action_item) + + return action_items + + +def build_sections(sections): + sections_created = [] + + for section in sections: + sections_created.append(build_section(section)) + + return sections_created + + +def build_section(section): + section_payload = dict() + + if 'title' in section: + section_payload['title'] = section['title'] + + if 'start_group' in section: + section_payload['startGroup'] = section['start_group'] + + if 'activity_image' in section: + section_payload['activityImage'] = section['activity_image'] + + if 'activity_title' in section: + section_payload['activityTitle'] = section['activity_title'] + + if 'activity_subtitle' in section: + section_payload['activitySubtitle'] = section['activity_subtitle'] + + if 'activity_text' in section: + section_payload['activityText'] = section['activity_text'] + + if 'hero_image' in section: + section_payload['heroImage'] = section['hero_image'] + + if 'text' in section: + section_payload['text'] = section['text'] + + if 'facts' in section: + section_payload['facts'] = section['facts'] + + if 'images' in section: + section_payload['images'] = section['images'] + + if 'actions' in section: + section_payload['potentialAction'] = build_actions(section['actions']) + + return section_payload + + +def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None): + payload = dict() + payload['@context'] = OFFICE_365_CARD_CONTEXT + payload['@type'] = OFFICE_365_CARD_TYPE + + if summary is not None: + payload['summary'] = summary + + if color is not None: + payload['themeColor'] = color + + if title is not None: + payload['title'] = title + + if text is not None: + payload['text'] = text + + if actions: + payload['potentialAction'] = build_actions(actions) + + if sections: + payload['sections'] = build_sections(sections) + + payload = module.jsonify(payload) + return payload + + +def do_notify_connector_card_webhook(module, webhook, payload): + headers = { + 'Content-Type': 'application/json' + } + + response, info = fetch_url( + module=module, + url=webhook, + headers=headers, + method='POST', + data=payload + ) + + if info['status'] == 200: + module.exit_json(changed=True) + elif info['status'] == 400 and module.check_mode: + if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG: + module.exit_json(changed=True) + else: + module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG) + else: + module.fail_json( + msg="failed to send %s as a connector card to Incoming Webhook: %s" + % (payload, info['msg']) + ) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + webhook=dict(required=True, no_log=True), + summary=dict(type='str'), + color=dict(type='str'), + title=dict(type='str'), + text=dict(type='str'), + actions=dict(type='list'), + sections=dict(type='list') + ), + supports_check_mode=True + ) + + webhook = module.params['webhook'] + summary = module.params['summary'] + color = module.params['color'] + title = module.params['title'] + text = module.params['text'] + actions = module.params['actions'] + sections = module.params['sections'] + + payload = build_payload_for_connector_card( + module, + summary, + color, + title, + text, + actions, + sections) + + if module.check_mode: + # In check mode, send an empty payload to validate connection + check_mode_payload = build_payload_for_connector_card(module) + do_notify_connector_card_webhook(module, webhook, check_mode_payload) + + do_notify_connector_card_webhook(module, webhook, payload) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/osx_say.py b/plugins/modules/notification/osx_say.py new file mode 120000 index 0000000000..f080521d9d --- /dev/null +++ b/plugins/modules/notification/osx_say.py @@ -0,0 +1 @@ +say.py \ No newline at end of file diff --git a/plugins/modules/notification/pushbullet.py b/plugins/modules/notification/pushbullet.py new file mode 100644 index 0000000000..2031aa7d63 --- /dev/null +++ b/plugins/modules/notification/pushbullet.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +author: "Willy Barro (@willybarro)" +requirements: [ pushbullet.py ] +module: pushbullet +short_description: Sends notifications to Pushbullet +description: + - This module sends push notifications via Pushbullet to channels or devices. +options: + api_key: + description: + - Push bullet API token + required: true + channel: + description: + - The channel TAG you wish to broadcast a push notification, + as seen on the "My Channels" > "Edit your channel" at + Pushbullet page. + device: + description: + - The device NAME you wish to send a push notification, + as seen on the Pushbullet main page. + push_type: + description: + - Thing you wish to push. + default: note + choices: [ "note", "link" ] + title: + description: + - Title of the notification. + required: true + body: + description: + - Body of the notification, e.g. Details of the fault you're alerting. + +notes: + - Requires pushbullet.py Python package on the remote host. + You can install it via pip with ($ pip install pushbullet.py). + See U(https://github.com/randomchars/pushbullet.py) +''' + +EXAMPLES = ''' +# Sends a push notification to a device +- pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + device: "Chrome" + title: "You may see this on Google Chrome" + +# Sends a link to a device +- pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + device: Chrome + push_type: link + title: Ansible Documentation + body: https://docs.ansible.com/ + +# Sends a push notification to a channel +- pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + channel: my-awesome-channel + title: Broadcasting a message to the #my-awesome-channel folks + +# Sends a push notification with title and body to a channel +- pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + channel: my-awesome-channel + title: ALERT! Signup service is down + body: Error rate on signup service is over 90% for more than 2 minutes +''' + +import traceback + +PUSHBULLET_IMP_ERR = None +try: + from pushbullet import PushBullet + from pushbullet.errors import InvalidKeyError, PushError +except ImportError: + PUSHBULLET_IMP_ERR = traceback.format_exc() + pushbullet_found = False +else: + pushbullet_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +# =========================================== +# Main +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(type='str', required=True, no_log=True), + channel=dict(type='str', default=None), + device=dict(type='str', default=None), + push_type=dict(type='str', default="note", choices=['note', 'link']), + title=dict(type='str', required=True), + body=dict(type='str', default=None), + url=dict(type='str', default=None), + ), + mutually_exclusive=( + ['channel', 'device'], + ), + supports_check_mode=True + ) + + api_key = module.params['api_key'] + channel = module.params['channel'] + device = module.params['device'] + push_type = module.params['push_type'] + title = module.params['title'] + body = module.params['body'] + url = module.params['url'] + + if not pushbullet_found: + module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR) + + # Init pushbullet + try: + pb = PushBullet(api_key) + target = None + except InvalidKeyError: + module.fail_json(msg="Invalid api_key") + + # Checks for channel/device + if device is None and channel is None: + module.fail_json(msg="You need to provide a channel or a device.") + + # Search for given device + if device is not None: + devices_by_nickname = {} + for d in pb.devices: + devices_by_nickname[d.nickname] = d + + if device in devices_by_nickname: + target = devices_by_nickname[device] + else: + module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) + + # Search for given channel + if channel is not None: + channels_by_tag = {} + for c in pb.channels: + channels_by_tag[c.channel_tag] = c + + if channel in channels_by_tag: + target = channels_by_tag[channel] + else: + module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) + + # If in check mode, exit saying that we succeeded + if module.check_mode: + module.exit_json(changed=False, msg="OK") + + # Send push notification + try: + if push_type == "link": + target.push_link(title, url, body) + else: + target.push_note(title, body) + module.exit_json(changed=False, msg="OK") + except PushError as e: + module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) + + module.fail_json(msg="An unknown error has occurred") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/pushover.py b/plugins/modules/notification/pushover.py new file mode 100644 index 0000000000..f7527430b9 --- /dev/null +++ b/plugins/modules/notification/pushover.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2012, Jim Richardson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pushover +short_description: Send notifications via U(https://pushover.net) +description: + - Send notifications via pushover, to subscriber list of devices, and email + addresses. Requires pushover app on devices. +notes: + - You will require a pushover.net account to use this module. But no account + is required to receive messages. +options: + msg: + description: + - What message you wish to send. + required: true + app_token: + description: + - Pushover issued token identifying your pushover app. + required: true + user_key: + description: + - Pushover issued authentication key for your user. + required: true + title: + description: + - Message title. + required: false + pri: + description: + - Message priority (see U(https://pushover.net) for details). + required: false + +author: "Jim Richardson (@weaselkeeper)" +''' + +EXAMPLES = ''' +- pushover: + msg: '{{ inventory_hostname }} is acting strange ...' + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + delegate_to: localhost + +- pushover: + title: 'Alert!' + msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic' + pri: 1 + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + delegate_to: localhost +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +class Pushover(object): + ''' Instantiates a pushover object, use it to send notifications ''' + base_uri = 'https://api.pushover.net' + + def __init__(self, module, user, token): + self.module = module + self.user = user + self.token = token + + def run(self, priority, msg, title): + ''' Do, whatever it is, we do. ''' + + url = '%s/1/messages.json' % (self.base_uri) + + # parse config + options = dict(user=self.user, + token=self.token, + priority=priority, + message=msg) + + if title is not None: + options = dict(options, + title=title) + + data = urlencode(options) + + headers = {"Content-type": "application/x-www-form-urlencoded"} + r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers) + if info['status'] != 200: + raise Exception(info) + + return r.read() + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + title=dict(type='str'), + msg=dict(required=True), + app_token=dict(required=True, no_log=True), + user_key=dict(required=True, no_log=True), + pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']), + ), + ) + + msg_object = Pushover(module, module.params['user_key'], module.params['app_token']) + try: + response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title']) + except Exception: + module.fail_json(msg='Unable to send msg via pushover') + + module.exit_json(msg='message sent successfully: %s' % response, changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/rabbitmq_publish.py b/plugins/modules/notification/rabbitmq_publish.py new file mode 100644 index 0000000000..992de09fe4 --- /dev/null +++ b/plugins/modules/notification/rabbitmq_publish.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, John Imison +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_publish +short_description: Publish a message to a RabbitMQ queue. +description: + - Publish a message on a RabbitMQ queue using a blocking connection. +options: + url: + description: + - An URL connection string to connect to the RabbitMQ server. + - I(url) and I(host)/I(port)/I(user)/I(pass)/I(vhost) are mutually exclusive, use either or but not both. + proto: + description: + - The protocol to use. + choices: [amqps, amqp] + host: + description: + - The RabbitMQ server hostname or IP. + port: + description: + - The RabbitMQ server port. + username: + description: + - The RabbitMQ username. + password: + description: + - The RabbitMQ password. + vhost: + description: + - The virtual host to target. + - If default vhost is required, use C('%2F'). + queue: + description: + - The queue to publish a message to. If no queue is specified, RabbitMQ will return a random queue name. + exchange: + description: + - The exchange to publish a message to. + routing_key: + description: + - The routing key. + body: + description: + - The body of the message. + - A C(body) cannot be provided if a C(src) is specified. + src: + description: + - A file to upload to the queue. Automatic mime type detection is attempted if content_type is not defined (left as default). + - A C(src) cannot be provided if a C(body) is specified. + - The filename is added to the headers of the posted message to RabbitMQ. Key being the C(filename), value is the filename. + aliases: ['file'] + content_type: + description: + - The content type of the body. + default: text/plain + durable: + description: + - Set the queue to be durable. + default: False + type: bool + exclusive: + description: + - Set the queue to be exclusive. + default: False + type: bool + auto_delete: + description: + - Set the queue to auto delete. + default: False + type: bool + headers: + description: + - A dictionary of headers to post with the message. + default: {} + type: dict + cafile: + description: + - CA file used during connection to the RabbitMQ server over SSL. + - If this option is specified, also I(certfile) and I(keyfile) must be specified. + certfile: + description: + - Client certificate to establish SSL connection. + - If this option is specified, also I(cafile) and I(keyfile) must be specified. + keyfile: + description: + - Client key to establish SSL connection. + - If this option is specified, also I(cafile) and I(certfile) must be specified. + + + +requirements: [ pika ] +notes: + - This module requires the pika python library U(https://pika.readthedocs.io/). + - Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library. + - This module is tested against RabbitMQ. Other AMQP 0.9.1 protocol based servers may work but not tested/guaranteed. + - The certificate authentication was tested with certificates created + via U(https://www.rabbitmq.com/ssl.html#automated-certificate-generation) and RabbitMQ + configuration variables C(ssl_options.verify = verify_peer) & C(ssl_options.fail_if_no_peer_cert = true). +author: "John Imison (@Im0)" +''' + +EXAMPLES = ''' +- name: Publish a message to a queue with headers + rabbitmq_publish: + url: "amqp://guest:guest@192.168.0.32:5672/%2F" + queue: 'test' + body: "Hello world from ansible module rabbitmq_publish" + content_type: "text/plain" + headers: + myHeader: myHeaderValue + + +- name: Publish a file to a queue + rabbitmq_publish: + url: "amqp://guest:guest@192.168.0.32:5672/%2F" + queue: 'images' + file: 'path/to/logo.gif' + +- name: RabbitMQ auto generated queue + rabbitmq_publish: + url: "amqp://guest:guest@192.168.0.32:5672/%2F" + body: "Hello world random queue from ansible module rabbitmq_publish" + content_type: "text/plain" + +- name: Publish with certs + rabbitmq_publish: + url: "amqps://guest:guest@192.168.0.32:5671/%2F" + body: "Hello test queue from ansible module rabbitmq_publish via SSL certs" + queue: 'test' + content_type: "text/plain" + cafile: 'ca_certificate.pem' + certfile: 'client_certificate.pem' + keyfile: 'client_key.pem' + +''' + +RETURN = ''' +result: + description: + - Contains the status I(msg), content type I(content_type) and the queue name I(queue). + returned: success + type: dict + sample: | + 'result': { 'content_type': 'text/plain', 'msg': 'Successfully published to queue test', 'queue': 'test' } +''' + +try: + import pika + HAS_PIKA = True +except ImportError: + HAS_PIKA = False + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native, to_text +from ansible_collections.community.general.plugins.module_utils.rabbitmq import RabbitClient + + +def main(): + argument_spec = RabbitClient.rabbitmq_argument_spec() + argument_spec.update( + exchange=dict(type='str', default=''), + routing_key=dict(type='str', required=False), + body=dict(type='str', required=False), + src=dict(aliases=['file'], type='path', required=False), + content_type=dict(default="text/plain", type='str'), + durable=dict(default=False, type='bool'), + exclusive=dict(default=False, type='bool'), + auto_delete=dict(default=False, type='bool'), + headers=dict(default={}, type='dict'), + cafile=dict(type='str', required=False), + certfile=dict(type='str', required=False), + keyfile=dict(type='str', required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['body', 'src']], + required_together=[['cafile', 'certfile', 'keyfile']], + supports_check_mode=False + ) + + rabbitmq = RabbitClient(module) + + if rabbitmq.basic_publish(): + rabbitmq.close_connection() + module.exit_json(changed=True, result={"msg": "Successfully published to queue %s" % rabbitmq.queue, + "queue": rabbitmq.queue, + "content_type": rabbitmq.content_type}) + else: + rabbitmq.close_connection() + module.fail_json(changed=False, msg="Unsuccessful publishing to queue %s" % rabbitmq.queue) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/rocketchat.py b/plugins/modules/notification/rocketchat.py new file mode 100644 index 0000000000..375fd1e1b0 --- /dev/null +++ b/plugins/modules/notification/rocketchat.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Deepak Kothandan +# (c) 2015, Stefan Berggren +# (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: rocketchat +short_description: Send notifications to Rocket Chat +description: + - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration +author: "Ramon de la Fuente (@ramondelafuente)" +options: + domain: + description: + - The domain for your environment without protocol. (i.e. + C(example.com) or C(chat.example.com)) + required: true + token: + description: + - Rocket Chat Incoming Webhook integration token. This provides + authentication to Rocket Chat's Incoming webhook for posting + messages. + required: true + protocol: + description: + - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https) + default: https + choices: + - 'http' + - 'https' + msg: + description: + - Message to be sent. + channel: + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(token) + specified during the creation of webhook. + username: + description: + - This is the sender of the message. + default: "Ansible" + icon_url: + description: + - URL for the message sender's icon. + default: "https://www.ansible.com/favicon.ico" + icon_emoji: + description: + - Emoji for the message sender. The representation for the available emojis can be + got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used) + link_names: + description: + - Automatically create links for channels and usernames in I(msg). + default: 1 + choices: + - 1 + - 0 + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + color: + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' + attachments: + description: + - Define a list of attachments. +''' + +EXAMPLES = """ +- name: Send notification message via Rocket Chat + rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost + +- name: Send notification message via Rocket Chat all options + rocketchat: + domain: chat.example.com + token: thetoken/generatedby/rocketchat + msg: '{{ inventory_hostname }} completed' + channel: #ansible + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + link_names: 0 + delegate_to: localhost + +- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat + rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: '{{ inventory_hostname }} is alive!' + color: good + username: '' + icon_url: '' + delegate_to: localhost + +- name: Use the attachments API + rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + attachments: + - text: Display my system load on host A and B + color: #ff00dd + title: System load + fields: + - title: System A + value: 'load average: 0,74, 0,66, 0,63' + short: True + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: True + delegate_to: localhost +""" + +RETURN = """ +changed: + description: A flag indicating if any change was made or not. + returned: success + type: bool + sample: false +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' + + +def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=text) + elif text is not None: + payload = dict(attachments=[dict(text=text, color=color)]) + if channel is not None: + if (channel[0] == '#') or (channel[0] == '@'): + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + for attachment in attachments: + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + payload['attachments'].append(attachment) + + payload = "payload=" + module.jsonify(payload) + return payload + + +def do_notify_rocketchat(module, domain, token, protocol, payload): + + if token.count('/') < 1: + module.fail_json(msg="Invalid Token specified, provide a valid token") + + rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) + + response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload) + if info['status'] != 200: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', required=True, default=None), + token=dict(type='str', required=True, no_log=True), + protocol=dict(type='str', default='https', choices=['http', 'https']), + msg=dict(type='str', required=False, default=None), + channel=dict(type='str', default=None), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), + icon_emoji=dict(type='str', default=None), + link_names=dict(type='int', default=1, choices=[0, 1]), + validate_certs=dict(default='yes', type='bool'), + color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), + attachments=dict(type='list', required=False, default=None) + ) + ) + + domain = module.params['domain'] + token = module.params['token'] + protocol = module.params['protocol'] + text = module.params['msg'] + channel = module.params['channel'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + color = module.params['color'] + attachments = module.params['attachments'] + + payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments) + do_notify_rocketchat(module, domain, token, protocol, payload) + + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/say.py b/plugins/modules/notification/say.py new file mode 100644 index 0000000000..2feb5ecf86 --- /dev/null +++ b/plugins/modules/notification/say.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Michael DeHaan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: say +short_description: Makes a computer to speak. +description: + - makes a computer speak! Amuse your friends, annoy your coworkers! +notes: + - In 2.5, this module has been renamed from C(osx_say) to M(say). + - If you like this module, you may also be interested in the osx_say callback plugin. + - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host. +options: + msg: + description: + What to say + required: true + voice: + description: + What voice to use + required: false +requirements: [ say or espeak or espeak-ng ] +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +''' + +EXAMPLES = ''' +- say: + msg: '{{ inventory_hostname }} is all done' + voice: Zarvox + delegate_to: localhost +''' +import platform + +from ansible.module_utils.basic import AnsibleModule + + +def say(module, executable, msg, voice): + cmd = [executable, msg] + if voice: + cmd.extend(('-v', voice)) + module.run_command(cmd, check_rc=True) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + voice=dict(required=False), + ), + supports_check_mode=True + ) + + msg = module.params['msg'] + voice = module.params['voice'] + possibles = ('say', 'espeak', 'espeak-ng') + + if platform.system() != 'Darwin': + # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter + voice = None + + for possible in possibles: + executable = module.get_bin_path(possible) + if executable: + break + else: + module.fail_json(msg='Unable to find either %s' % ', '.join(possibles)) + + if module.check_mode: + module.exit_json(msg=msg, changed=False) + + say(module, executable, msg, voice) + + module.exit_json(msg=msg, changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/sendgrid.py b/plugins/modules/notification/sendgrid.py new file mode 100644 index 0000000000..b029b0c6aa --- /dev/null +++ b/plugins/modules/notification/sendgrid.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Makai +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: sendgrid +short_description: Sends an email with the SendGrid API +description: + - "Sends an email with a SendGrid account through their API, not through + the SMTP service." +notes: + - "This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails." + - "Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid + account." + - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers + you must pip install sendgrid" + - "since 2.2 username and password are not required if you supply an api_key" +requirements: + - sendgrid python library +options: + username: + description: + - username for logging into the SendGrid account. + - Since 2.2 it is only required if api_key is not supplied. + password: + description: + - password that corresponds to the username + - Since 2.2 it is only required if api_key is not supplied. + from_address: + description: + - the address in the "from" field for the email + required: true + to_addresses: + description: + - a list with one or more recipient email addresses + required: true + subject: + description: + - the desired subject for the email + required: true + api_key: + description: + - sendgrid API key to use instead of username/password + cc: + description: + - a list of email addresses to cc + bcc: + description: + - a list of email addresses to bcc + attachments: + description: + - a list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs) + from_name: + description: + - the name you want to appear in the from field, i.e 'John Doe' + html_body: + description: + - whether the body is html content that should be rendered + type: bool + default: 'no' + headers: + description: + - a dict to pass on as headers +author: "Matt Makai (@makaimc)" +''' + +EXAMPLES = ''' +# send an email to a single recipient that the deployment was successful +- sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject: "Deployment success." + body: "The most recent Ansible deployment was successful." + delegate_to: localhost + +# send an email to more than one recipient that the build failed +- sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." + delegate_to: localhost +''' + +# ======================================= +# sendgrid module support methods +# +import os +import traceback + +SENDGRID_IMP_ERR = None +try: + import sendgrid + HAS_SENDGRID = True +except ImportError: + SENDGRID_IMP_ERR = traceback.format_exc() + HAS_SENDGRID = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils._text import to_bytes +from ansible.module_utils.urls import fetch_url + + +def post_sendgrid_api(module, username, password, from_address, to_addresses, + subject, body, api_key=None, cc=None, bcc=None, attachments=None, + html_body=False, from_name=None, headers=None): + + if not HAS_SENDGRID: + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible" + data = {'api_user': username, 'api_key': password, + 'from': from_address, 'subject': subject, 'text': body} + encoded_data = urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + recipient = to_bytes(recipient, errors='surrogate_or_strict') + to_addresses_api += '&to[]=%s' % recipient + encoded_data += to_addresses_api + + headers = {'User-Agent': AGENT, + 'Content-type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json'} + return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') + else: + + if api_key: + sg = sendgrid.SendGridClient(api_key) + else: + sg = sendgrid.SendGridClient(username, password) + + message = sendgrid.Mail() + message.set_subject(subject) + + for recip in to_addresses: + message.add_to(recip) + + if cc: + for recip in cc: + message.add_cc(recip) + if bcc: + for recip in bcc: + message.add_bcc(recip) + + if headers: + message.set_headers(headers) + + if attachments: + for f in attachments: + name = os.path.basename(f) + message.add_attachment(name, f) + + if from_name: + message.set_from('%s <%s.' % (from_name, from_address)) + else: + message.set_from(from_address) + + if html_body: + message.set_html(body) + else: + message.set_text(body) + + return sg.send(message) +# ======================================= +# Main +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + username=dict(required=False), + password=dict(required=False, no_log=True), + api_key=dict(required=False, no_log=True), + bcc=dict(required=False, type='list'), + cc=dict(required=False, type='list'), + headers=dict(required=False, type='dict'), + from_address=dict(required=True), + from_name=dict(required=False), + to_addresses=dict(required=True, type='list'), + subject=dict(required=True), + body=dict(required=True), + html_body=dict(required=False, default=False, type='bool'), + attachments=dict(required=False, type='list') + ), + supports_check_mode=True, + mutually_exclusive=[ + ['api_key', 'password'], + ['api_key', 'username'] + ], + required_together=[['username', 'password']], + ) + + username = module.params['username'] + password = module.params['password'] + api_key = module.params['api_key'] + bcc = module.params['bcc'] + cc = module.params['cc'] + headers = module.params['headers'] + from_name = module.params['from_name'] + from_address = module.params['from_address'] + to_addresses = module.params['to_addresses'] + subject = module.params['subject'] + body = module.params['body'] + html_body = module.params['html_body'] + attachments = module.params['attachments'] + + sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments] + + if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID: + reason = 'when using any of the following arguments: ' \ + 'api_key, bcc, cc, headers, from_name, html_body, attachments' + module.fail_json(msg=missing_required_lib('sendgrid', reason=reason), + exception=SENDGRID_IMP_ERR) + + response, info = post_sendgrid_api(module, username, password, + from_address, to_addresses, subject, body, attachments=attachments, + bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key) + + if not HAS_SENDGRID: + if info['status'] != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg']) + else: + if response != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message']) + + module.exit_json(msg=subject, changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/slack.py b/plugins/modules/notification/slack.py new file mode 100644 index 0000000000..46b94557c7 --- /dev/null +++ b/plugins/modules/notification/slack.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Steve Pletcher +# (c) 2016, René Moser +# (c) 2015, Stefan Berggren +# (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: slack +short_description: Send Slack notifications +description: + - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration +author: "Ramon de la Fuente (@ramondelafuente)" +options: + domain: + description: + - Slack (sub)domain for your environment without protocol. (i.e. + C(example.slack.com)) In 1.8 and beyond, this is deprecated and may + be ignored. See token documentation for information. + token: + description: + - Slack integration token. This authenticates you to the slack service. + Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In + 1.8 and above, ansible adapts to the new slack API where tokens look + like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens + are in the new format then slack will ignore any value of domain. If + the token is in the old format the domain is required. Ansible has no + control of when slack will get rid of the old API. When slack does + that the old format will stop working. ** Please keep in mind the tokens + are not the API tokens but are the webhook tokens. In slack these are + found in the webhook URL which are obtained under the apps and integrations. + The incoming webhooks can be added in that area. In some cases this may + be locked by your Slack admin and you must request access. It is there + that the incoming webhooks can be added. The key is on the end of the + URL given to you in that section. + required: true + msg: + description: + - Message to send. Note that the module does not handle escaping characters. + Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending. + See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. + channel: + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). + thread_id: + description: + - Optional. Timestamp of message to thread this message to as a float. https://api.slack.com/docs/message-threading + username: + description: + - This is the sender of the message. + default: "Ansible" + icon_url: + description: + - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico)) + icon_emoji: + description: + - Emoji for the message sender. See Slack documentation for options. + (if I(icon_emoji) is set, I(icon_url) will not be used) + link_names: + description: + - Automatically create links for channels and usernames in I(msg). + default: 1 + choices: + - 1 + - 0 + parse: + description: + - Setting for the message parser at Slack + choices: + - 'full' + - 'none' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + color: + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message. + - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value. + - Specifying value in hex is supported from version 2.8. + default: 'normal' + attachments: + description: + - Define a list of attachments. This list mirrors the Slack JSON API. + - For more information, see also in the (U(https://api.slack.com/docs/attachments)). +''' + +EXAMPLES = """ +- name: Send notification message via Slack + slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost + +- name: Send notification message via Slack all options + slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} completed' + channel: '#ansible' + thread_id: 1539917263.000100 + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + link_names: 0 + parse: 'none' + delegate_to: localhost + +- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack + slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} is alive!' + color: good + username: '' + icon_url: '' + +- name: insert a color bar in front of the message with valid hex color value + slack: + token: thetoken/generatedby/slack + msg: 'This message uses color in hex value' + color: '#00aacc' + username: '' + icon_url: '' + +- name: Use the attachments API + slack: + token: thetoken/generatedby/slack + attachments: + - text: Display my system load on host A and B + color: '#ff00dd' + title: System load + fields: + - title: System A + value: "load average: 0,74, 0,66, 0,63" + short: True + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: True + +- name: Send a message with a link using Slack markup + slack: + token: thetoken/generatedby/slack + msg: We sent this message using ! + +- name: Send a message with angle brackets and ampersands + slack: + token: thetoken/generatedby/slack + msg: This message has <brackets> & ampersands in plain text. +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' +SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' + +# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. +# We do not escape other characters used as Slack metacharacters (e.g. &, <, >). +escape_table = { + '"': "\"", + "'": "\'", +} + + +def is_valid_hex_color(color_choice): + if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice): + return True + return False + + +def escape_quotes(text): + '''Backslash any quotes within text.''' + return "".join(escape_table.get(c, c) for c in text) + + +def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names, + parse, color, attachments): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=escape_quotes(text)) + elif text is not None: + # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it. + payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])]) + if channel is not None: + if (channel[0] == '#') or (channel[0] == '@'): + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + if thread_id is not None: + payload['thread_ts'] = thread_id + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + if parse is not None: + payload['parse'] = parse + + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + keys_to_escape = [ + 'title', + 'text', + 'author_name', + 'pretext', + 'fallback', + ] + for attachment in attachments: + for key in keys_to_escape: + if key in attachment: + attachment[key] = escape_quotes(attachment[key]) + + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + + payload['attachments'].append(attachment) + + payload = module.jsonify(payload) + return payload + + +def do_notify_slack(module, domain, token, payload): + if token.count('/') >= 2: + # New style token + slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token) + else: + if not domain: + module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form " + "XXXX/YYYY/ZZZZ in your playbook") + slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) + + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + response, info = fetch_url(module=module, url=slack_incoming_webhook, headers=headers, method='POST', data=payload) + + if info['status'] != 200: + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]') + module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg'])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', required=False, default=None), + token=dict(type='str', required=True, no_log=True), + msg=dict(type='str', required=False, default=None), + channel=dict(type='str', default=None), + thread_id=dict(type='float', default=None), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), + icon_emoji=dict(type='str', default=None), + link_names=dict(type='int', default=1, choices=[0, 1]), + parse=dict(type='str', default=None, choices=['none', 'full']), + validate_certs=dict(default='yes', type='bool'), + color=dict(type='str', default='normal'), + attachments=dict(type='list', required=False, default=None) + ) + ) + + domain = module.params['domain'] + token = module.params['token'] + text = module.params['msg'] + channel = module.params['channel'] + thread_id = module.params['thread_id'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + parse = module.params['parse'] + color = module.params['color'] + attachments = module.params['attachments'] + + color_choices = ['normal', 'good', 'warning', 'danger'] + if color not in color_choices and not is_valid_hex_color(color): + module.fail_json(msg="Color value specified should be either one of %r " + "or any valid hex value with length 3 or 6." % color_choices) + + payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names, + parse, color, attachments) + do_notify_slack(module, domain, token, payload) + + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/syslogger.py b/plugins/modules/notification/syslogger.py new file mode 100644 index 0000000000..9903de6a49 --- /dev/null +++ b/plugins/modules/notification/syslogger.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# Copyright (c) 2017 Tim Rightnour +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: syslogger + +short_description: Log messages in the syslog + + +description: + - "Uses syslog to add log entries to the host." + - "Can specify facility and priority." + +options: + msg: + description: + - This is the message to place in syslog + required: true + priority: + description: + - Set the log priority + choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] + required: false + default: "info" + facility: + description: + - Set the log facility + choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news", + "uucp", "cron", "syslog", "local0", "local1", "local2", + "local3", "local4", "local5", "local6", "local7" ] + required: false + default: "daemon" + log_pid: + description: + - Log the pid in brackets + type: bool + required: false + default: "no" + +author: + - Tim Rightnour (@garbled1) +''' + +EXAMPLES = ''' +# Full example +- name: Test syslog + syslogger: + msg: "Hello from ansible" + priority: "err" + facility: "daemon" + log_pid: true + +# Basic usage +- name: Simple Usage + syslogger: + msg: "I will end up as daemon.info" + +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +import syslog + + +def get_facility(x): + return { + 'kern': syslog.LOG_KERN, + 'user': syslog.LOG_USER, + 'mail': syslog.LOG_MAIL, + 'daemon': syslog.LOG_DAEMON, + 'auth': syslog.LOG_AUTH, + 'lpr': syslog.LOG_LPR, + 'news': syslog.LOG_NEWS, + 'uucp': syslog.LOG_UUCP, + 'cron': syslog.LOG_CRON, + 'syslog': syslog.LOG_SYSLOG, + 'local0': syslog.LOG_LOCAL0, + 'local1': syslog.LOG_LOCAL1, + 'local2': syslog.LOG_LOCAL2, + 'local3': syslog.LOG_LOCAL3, + 'local4': syslog.LOG_LOCAL4, + 'local5': syslog.LOG_LOCAL5, + 'local6': syslog.LOG_LOCAL6, + 'local7': syslog.LOG_LOCAL7 + }.get(x, syslog.LOG_DAEMON) + + +def get_priority(x): + return { + 'emerg': syslog.LOG_EMERG, + 'alert': syslog.LOG_ALERT, + 'crit': syslog.LOG_CRIT, + 'err': syslog.LOG_ERR, + 'warning': syslog.LOG_WARNING, + 'notice': syslog.LOG_NOTICE, + 'info': syslog.LOG_INFO, + 'debug': syslog.LOG_DEBUG + }.get(x, syslog.LOG_INFO) + + +def run_module(): + # define the available arguments/parameters that a user can pass to + # the module + module_args = dict( + msg=dict(type='str', required=True), + priority=dict(type='str', required=False, + choices=["emerg", "alert", "crit", "err", "warning", + "notice", "info", "debug"], + default='info'), + facility=dict(type='str', required=False, + choices=["kern", "user", "mail", "daemon", "auth", + "lpr", "news", "uucp", "cron", "syslog", + "local0", "local1", "local2", "local3", + "local4", "local5", "local6", "local7"], + default='daemon'), + log_pid=dict(type='bool', required=False, default=False) + ) + + module = AnsibleModule( + argument_spec=module_args, + ) + + result = dict( + changed=False, + priority=module.params['priority'], + facility=module.params['facility'], + log_pid=module.params['log_pid'], + msg=module.params['msg'] + ) + + # do the logging + try: + if module.params['log_pid']: + syslog.openlog('ansible_syslogger', + logoption=syslog.LOG_PID, + facility=get_facility(module.params['facility'])) + else: + syslog.openlog('ansible_syslogger', + facility=get_facility(module.params['facility'])) + syslog.syslog(get_priority(module.params['priority']), + module.params['msg']) + syslog.closelog() + result['changed'] = True + + except Exception: + module.fail_json(error='Failed to write to syslog', **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/telegram.py b/plugins/modules/notification/telegram.py new file mode 100644 index 0000000000..9d9572735c --- /dev/null +++ b/plugins/modules/notification/telegram.py @@ -0,0 +1,115 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Artem Feofanov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: telegram +author: "Artem Feofanov (@tyouxa)" + +short_description: module for sending notifications via telegram + +description: + - Send notifications via telegram bot, to a verified group or user +notes: + - You will require a telegram account and create telegram bot to use this module. +options: + msg: + description: + - What message you wish to send. + required: true + msg_format: + description: + - Message format. Formatting options `markdown` and `html` described in + Telegram API docs (https://core.telegram.org/bots/api#formatting-options). + If option `plain` set, message will not be formatted. + default: plain + choices: [ "plain", "markdown", "html" ] + token: + description: + - Token identifying your telegram bot. + required: true + chat_id: + description: + - Telegram group or user chat_id + required: true + +''' + +EXAMPLES = """ + +- name: send a message to chat in playbook + telegram: + token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' + chat_id: 000000 + msg: Ansible task finished +""" + +RETURN = """ + +msg: + description: The message you attempted to send + returned: success + type: str + sample: "Ansible task finished" +telegram_error: + description: Error message gotten from Telegram API + returned: failure + type: str + sample: "Bad Request: message text is empty" +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import quote +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(type='str', required=True, no_log=True), + chat_id=dict(type='str', required=True, no_log=True), + msg_format=dict(type='str', required=False, default='plain', + choices=['plain', 'markdown', 'html']), + msg=dict(type='str', required=True)), + supports_check_mode=True + ) + + token = quote(module.params.get('token')) + chat_id = quote(module.params.get('chat_id')) + msg_format = quote(module.params.get('msg_format')) + msg = quote(module.params.get('msg')) + + url = 'https://api.telegram.org/bot' + token + \ + '/sendMessage?text=' + msg + '&chat_id=' + chat_id + if msg_format in ('markdown', 'html'): + url += '&parse_mode=' + msg_format + + if module.check_mode: + module.exit_json(changed=False) + + response, info = fetch_url(module, url) + if info['status'] == 200: + module.exit_json(changed=True) + else: + body = json.loads(info['body']) + module.fail_json(msg="failed to send message, return status=%s" % str(info['status']), + telegram_error=body['description']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/twilio.py b/plugins/modules/notification/twilio.py new file mode 100644 index 0000000000..db2e02b63c --- /dev/null +++ b/plugins/modules/notification/twilio.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Makai +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: twilio +short_description: Sends a text message to a mobile phone through Twilio. +description: + - Sends a text message to a phone number through the Twilio messaging API. +notes: + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need a Twilio account with + a purchased or verified phone number to send the text message. +options: + account_sid: + description: + user's Twilio account token found on the account page + required: true + auth_token: + description: user's Twilio authentication token + required: true + msg: + description: + the body of the text message + required: true + to_numbers: + description: + one or more phone numbers to send the text message to, + format +15551112222 + required: true + aliases: [ to_number ] + from_number: + description: + the Twilio number to send the text message from, format +15551112222 + required: true + media_url: + description: + a URL with a picture, video or sound clip to send with an MMS + (multimedia message) instead of a plain SMS + required: false + +author: "Matt Makai (@makaimc)" +''' + +EXAMPLES = ''' +# send an SMS about the build status to (555) 303 5681 +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: All servers with webserver role are now configured. + account_sid: ACXXXXXXXXXXXXXXXXX + auth_token: ACXXXXXXXXXXXXXXXXX + from_number: +15552014545 + to_number: +15553035681 + delegate_to: localhost + +# send an SMS to multiple phone numbers about the deployment +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: This server configuration is now complete. + account_sid: ACXXXXXXXXXXXXXXXXX + auth_token: ACXXXXXXXXXXXXXXXXX + from_number: +15553258899 + to_numbers: + - +15551113232 + - +12025551235 + - +19735559010 + delegate_to: localhost + +# send an MMS to a single recipient with an update on the deployment +# and an image of the results +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: Deployment complete! + account_sid: ACXXXXXXXXXXXXXXXXX + auth_token: ACXXXXXXXXXXXXXXXXX + from_number: +15552014545 + to_number: +15553035681 + media_url: https://demo.twilio.com/logo.png + delegate_to: localhost +''' + +# ======================================= +# twilio module support methods +# +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +def post_twilio_api(module, account_sid, auth_token, msg, from_number, + to_number, media_url=None): + URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ + % (account_sid,) + AGENT = "Ansible" + + data = {'From': from_number, 'To': to_number, 'Body': msg} + if media_url: + data['MediaUrl'] = media_url + encoded_data = urlencode(data) + + headers = {'User-Agent': AGENT, + 'Content-type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json', + } + + # Hack module params to have the Basic auth params that fetch_url expects + module.params['url_username'] = account_sid.replace('\n', '') + module.params['url_password'] = auth_token.replace('\n', '') + + return fetch_url(module, URI, data=encoded_data, headers=headers) + + +# ======================================= +# Main +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + account_sid=dict(required=True), + auth_token=dict(required=True, no_log=True), + msg=dict(required=True), + from_number=dict(required=True), + to_numbers=dict(required=True, aliases=['to_number'], type='list'), + media_url=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + account_sid = module.params['account_sid'] + auth_token = module.params['auth_token'] + msg = module.params['msg'] + from_number = module.params['from_number'] + to_numbers = module.params['to_numbers'] + media_url = module.params['media_url'] + + for number in to_numbers: + r, info = post_twilio_api(module, account_sid, auth_token, msg, + from_number, number, media_url) + if info['status'] not in [200, 201]: + body_message = "unknown error" + if 'body' in info: + body = module.from_json(info['body']) + body_message = body['message'] + module.fail_json(msg="unable to send message to %s: %s" % (number, body_message)) + + module.exit_json(msg=msg, changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/typetalk.py b/plugins/modules/notification/typetalk.py new file mode 100644 index 0000000000..bafd580f0b --- /dev/null +++ b/plugins/modules/notification/typetalk.py @@ -0,0 +1,128 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: typetalk +short_description: Send a message to typetalk +description: + - Send a message to typetalk using typetalk API +options: + client_id: + description: + - OAuth2 client ID + required: true + client_secret: + description: + - OAuth2 client secret + required: true + topic: + description: + - topic id to post message + required: true + msg: + description: + - message body + required: true +requirements: [ json ] +author: "Takashi Someda (@tksmd)" +''' + +EXAMPLES = ''' +- typetalk: + client_id: 12345 + client_secret: 12345 + topic: 1 + msg: install completed +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url, ConnectionError + + +def do_request(module, url, params, headers=None): + data = urlencode(params) + if headers is None: + headers = dict() + headers = dict(headers, **{ + 'User-Agent': 'Ansible/typetalk module', + }) + r, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] != 200: + exc = ConnectionError(info['msg']) + exc.code = info['status'] + raise exc + return r + + +def get_access_token(module, client_id, client_secret): + params = { + 'client_id': client_id, + 'client_secret': client_secret, + 'grant_type': 'client_credentials', + 'scope': 'topic.post' + } + res = do_request(module, 'https://typetalk.com/oauth2/access_token', params) + return json.load(res)['access_token'] + + +def send_message(module, client_id, client_secret, topic, msg): + """ + send message to typetalk + """ + try: + access_token = get_access_token(module, client_id, client_secret) + url = 'https://typetalk.com/api/v1/topics/%d' % topic + headers = { + 'Authorization': 'Bearer %s' % access_token, + } + do_request(module, url, {'message': msg}, headers) + return True, {'access_token': access_token} + except ConnectionError as e: + return False, e + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + client_id=dict(required=True), + client_secret=dict(required=True, no_log=True), + topic=dict(required=True, type='int'), + msg=dict(required=True), + ), + supports_check_mode=False + ) + + if not json: + module.fail_json(msg="json module is required") + + client_id = module.params["client_id"] + client_secret = module.params["client_secret"] + topic = module.params["topic"] + msg = module.params["msg"] + + res, error = send_message(module, client_id, client_secret, topic, msg) + if not res: + module.fail_json(msg='fail to send message with response code %s' % error.code) + + module.exit_json(changed=True, topic=topic, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/bower.py b/plugins/modules/packaging/language/bower.py new file mode 100644 index 0000000000..703886dbd2 --- /dev/null +++ b/plugins/modules/packaging/language/bower.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Michael Warkentin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: bower +short_description: Manage bower packages with bower +description: + - Manage bower packages with bower +author: "Michael Warkentin (@mwarkentin)" +options: + name: + description: + - The name of a bower package to install + offline: + description: + - Install packages from local cache, if the packages were installed before + type: bool + default: 'no' + production: + description: + - Install with --production flag + type: bool + default: 'no' + path: + description: + - The base path where to install the bower packages + required: true + relative_execpath: + description: + - Relative path to bower executable from install path + state: + description: + - The state of the bower package + default: present + choices: [ "present", "absent", "latest" ] + version: + description: + - The version to be installed +''' + +EXAMPLES = ''' +- name: Install "bootstrap" bower package. + bower: + name: bootstrap + +- name: Install "bootstrap" bower package on version 3.1.1. + bower: + name: bootstrap + version: '3.1.1' + +- name: Remove the "bootstrap" bower package. + bower: + name: bootstrap + state: absent + +- name: Install packages based on bower.json. + bower: + path: /app/location + +- name: Update packages based on bower.json to their latest version. + bower: + path: /app/location + state: latest + +# install bower locally and run from there +- npm: + path: /app/location + name: bower + global: no +- bower: + path: /app/location + relative_execpath: node_modules/.bin +''' +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +class Bower(object): + def __init__(self, module, **kwargs): + self.module = module + self.name = kwargs['name'] + self.offline = kwargs['offline'] + self.production = kwargs['production'] + self.path = kwargs['path'] + self.relative_execpath = kwargs['relative_execpath'] + self.version = kwargs['version'] + + if kwargs['version']: + self.name_version = self.name + '#' + self.version + else: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [] + + if self.relative_execpath: + cmd.append(os.path.join(self.path, self.relative_execpath, "bower")) + if not os.path.isfile(cmd[-1]): + self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath) + else: + cmd.append("bower") + + cmd.extend(args) + cmd.extend(['--config.interactive=false', '--allow-root']) + + if self.name: + cmd.append(self.name_version) + + if self.offline: + cmd.append('--offline') + + if self.production: + cmd.append('--production') + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) + cwd = self.path + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out + return '' + + def list(self): + cmd = ['list', '--json'] + + installed = list() + missing = list() + outdated = list() + data = json.loads(self._exec(cmd, True, False)) + if 'dependencies' in data: + for dep in data['dependencies']: + dep_data = data['dependencies'][dep] + if dep_data.get('missing', False): + missing.append(dep) + elif ('version' in dep_data['pkgMeta'] and + 'update' in dep_data and + dep_data['pkgMeta']['version'] != dep_data['update']['latest']): + outdated.append(dep) + elif dep_data.get('incompatible', False): + outdated.append(dep) + else: + installed.append(dep) + # Named dependency not installed + else: + missing.append(self.name) + + return installed, missing, outdated + + def install(self): + return self._exec(['install']) + + def update(self): + return self._exec(['update']) + + def uninstall(self): + return self._exec(['uninstall']) + + +def main(): + arg_spec = dict( + name=dict(default=None), + offline=dict(default='no', type='bool'), + production=dict(default='no', type='bool'), + path=dict(required=True, type='path'), + relative_execpath=dict(default=None, required=False, type='path'), + state=dict(default='present', choices=['present', 'absent', 'latest', ]), + version=dict(default=None), + ) + module = AnsibleModule( + argument_spec=arg_spec + ) + + name = module.params['name'] + offline = module.params['offline'] + production = module.params['production'] + path = module.params['path'] + relative_execpath = module.params['relative_execpath'] + state = module.params['state'] + version = module.params['version'] + + if state == 'absent' and not name: + module.fail_json(msg='uninstalling a package is only available for named packages') + + bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version) + + changed = False + if state == 'present': + installed, missing, outdated = bower.list() + if missing: + changed = True + bower.install() + elif state == 'latest': + installed, missing, outdated = bower.list() + if missing or outdated: + changed = True + bower.update() + else: # Absent + installed, missing, outdated = bower.list() + if name in installed: + changed = True + bower.uninstall() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/bundler.py b/plugins/modules/packaging/language/bundler.py new file mode 100644 index 0000000000..b9559a7f85 --- /dev/null +++ b/plugins/modules/packaging/language/bundler.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Tim Hoiberg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: bundler +short_description: Manage Ruby Gem dependencies with Bundler +description: + - Manage installation and Gem version dependencies for Ruby using the Bundler gem +options: + executable: + description: + - The path to the bundler executable + state: + description: + - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version + choices: [present, latest] + default: present + chdir: + description: + - The directory to execute the bundler commands from. This directory + needs to contain a valid Gemfile or .bundle/ directory + default: temporary working directory + exclude_groups: + description: + - A list of Gemfile groups to exclude during operations. This only + applies when state is C(present). Bundler considers this + a 'remembered' property for the Gemfile and will automatically exclude + groups in future operations even if C(exclude_groups) is not set + clean: + description: + - Only applies if state is C(present). If set removes any gems on the + target host that are not in the gemfile + type: bool + default: 'no' + gemfile: + description: + - Only applies if state is C(present). The path to the gemfile to use to install gems. + default: Gemfile in current directory + local: + description: + - If set only installs gems from the cache on the target host + type: bool + default: 'no' + deployment_mode: + description: + - Only applies if state is C(present). If set it will install gems in + ./vendor/bundle instead of the default location. Requires a Gemfile.lock + file to have been created prior + type: bool + default: 'no' + user_install: + description: + - Only applies if state is C(present). Installs gems in the local user's cache or for all users + type: bool + default: 'yes' + gem_path: + description: + - Only applies if state is C(present). Specifies the directory to + install the gems into. If C(chdir) is set then this path is relative to + C(chdir) + default: RubyGems gem paths + binstub_directory: + description: + - Only applies if state is C(present). Specifies the directory to + install any gem bins files to. When executed the bin files will run + within the context of the Gemfile and fail if any required gem + dependencies are not installed. If C(chdir) is set then this path is + relative to C(chdir) + extra_args: + description: + - A space separated string of additional commands that can be applied to + the Bundler command. Refer to the Bundler documentation for more + information +author: "Tim Hoiberg (@thoiberg)" +''' + +EXAMPLES = ''' +# Installs gems from a Gemfile in the current directory +- bundler: + state: present + executable: ~/.rvm/gems/2.1.5/bin/bundle + +# Excludes the production group from installing +- bundler: + state: present + exclude_groups: production + +# Install gems into ./vendor/bundle +- bundler: + state: present + deployment_mode: yes + +# Installs gems using a Gemfile in another directory +- bundler: + state: present + gemfile: ../rails_project/Gemfile + +# Updates Gemfile in another directory +- bundler: + state: latest + chdir: ~/rails_project +''' + +from ansible.module_utils.basic import AnsibleModule + + +def get_bundler_executable(module): + if module.params.get('executable'): + result = module.params.get('executable').split(' ') + else: + result = [module.get_bin_path('bundle', True)] + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + executable=dict(default=None, required=False), + state=dict(default='present', required=False, choices=['present', 'latest']), + chdir=dict(default=None, required=False, type='path'), + exclude_groups=dict(default=None, required=False, type='list'), + clean=dict(default=False, required=False, type='bool'), + gemfile=dict(default=None, required=False, type='path'), + local=dict(default=False, required=False, type='bool'), + deployment_mode=dict(default=False, required=False, type='bool'), + user_install=dict(default=True, required=False, type='bool'), + gem_path=dict(default=None, required=False, type='path'), + binstub_directory=dict(default=None, required=False, type='path'), + extra_args=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + state = module.params.get('state') + chdir = module.params.get('chdir') + exclude_groups = module.params.get('exclude_groups') + clean = module.params.get('clean') + gemfile = module.params.get('gemfile') + local = module.params.get('local') + deployment_mode = module.params.get('deployment_mode') + user_install = module.params.get('user_install') + gem_path = module.params.get('gem_path') + binstub_directory = module.params.get('binstub_directory') + extra_args = module.params.get('extra_args') + + cmd = get_bundler_executable(module) + + if module.check_mode: + cmd.append('check') + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) + + module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) + + if state == 'present': + cmd.append('install') + if exclude_groups: + cmd.extend(['--without', ':'.join(exclude_groups)]) + if clean: + cmd.append('--clean') + if gemfile: + cmd.extend(['--gemfile', gemfile]) + if local: + cmd.append('--local') + if deployment_mode: + cmd.append('--deployment') + if not user_install: + cmd.append('--system') + if gem_path: + cmd.extend(['--path', gem_path]) + if binstub_directory: + cmd.extend(['--binstubs', binstub_directory]) + else: + cmd.append('update') + if local: + cmd.append('--local') + + if extra_args: + cmd.extend(extra_args.split(' ')) + + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) + + module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/packaging/language/composer.py new file mode 100644 index 0000000000..fdf96c8a6f --- /dev/null +++ b/plugins/modules/packaging/language/composer.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Dimitrios Tydeas Mengidis +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: composer +author: + - "Dimitrios Tydeas Mengidis (@dmtrs)" + - "René Moser (@resmo)" +short_description: Dependency Manager for PHP +description: + - > + Composer is a tool for dependency management in PHP. It allows you to + declare the dependent libraries your project needs and it will install + them in your project for you. +options: + command: + description: + - Composer command like "install", "update" and so on. + default: install + arguments: + description: + - Composer arguments like required package, version and so on. + executable: + description: + - Path to PHP Executable on the remote host, if PHP is not in PATH. + aliases: [ php_path ] + working_dir: + description: + - Directory of your project (see --working-dir). This is required when + the command is not run globally. + - Will be ignored if C(global_command=true). + aliases: [ working-dir ] + global_command: + description: + - Runs the specified command globally. + type: bool + default: false + aliases: [ global-command ] + prefer_source: + description: + - Forces installation from package sources when possible (see --prefer-source). + default: false + type: bool + aliases: [ prefer-source ] + prefer_dist: + description: + - Forces installation from package dist even for dev versions (see --prefer-dist). + default: false + type: bool + aliases: [ prefer-dist ] + no_dev: + description: + - Disables installation of require-dev packages (see --no-dev). + default: true + type: bool + aliases: [ no-dev ] + no_scripts: + description: + - Skips the execution of all scripts defined in composer.json (see --no-scripts). + default: false + type: bool + aliases: [ no-scripts ] + no_plugins: + description: + - Disables all plugins ( see --no-plugins ). + default: false + type: bool + aliases: [ no-plugins ] + optimize_autoloader: + description: + - Optimize autoloader during autoloader dump (see --optimize-autoloader). + - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: true + type: bool + aliases: [ optimize-autoloader ] + classmap_authoritative: + description: + - Autoload classes from classmap only. + - Implicitely enable optimize_autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: false + type: bool + aliases: [ classmap-authoritative ] + apcu_autoloader: + description: + - Uses APCu to cache found/not-found classes + default: false + type: bool + aliases: [ apcu-autoloader ] + ignore_platform_reqs: + description: + - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. + default: false + type: bool + aliases: [ ignore-platform-reqs ] +requirements: + - php + - composer installed in bin path (recommended /usr/local/bin) +notes: + - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. + - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. +''' + +EXAMPLES = ''' +# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock +- composer: + command: install + working_dir: /path/to/project + +- composer: + command: require + arguments: my/package + working_dir: /path/to/project + +# Clone project and install with all dependencies +- composer: + command: create-project + arguments: package/package /path/to/project ~1.0 + working_dir: /path/to/project + prefer_dist: yes + +# Installs package globally +- composer: + command: require + global_command: yes + arguments: my/package +''' + +import re +from ansible.module_utils.basic import AnsibleModule + + +def parse_out(string): + return re.sub(r"\s+", " ", string).strip() + + +def has_changed(string): + return "Nothing to install or update" not in string + + +def get_available_options(module, command='install'): + # get all available options from a composer command using composer help to json + rc, out, err = composer_command(module, "help %s --format=json" % command) + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output) + + command_help_json = module.from_json(out) + return command_help_json['definition']['options'] + + +def composer_command(module, command, arguments="", options=None, global_command=False): + if options is None: + options = [] + + if module.params['executable'] is None: + php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) + else: + php_path = module.params['executable'] + + composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments) + return module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(default="install", type="str"), + arguments=dict(default="", type="str"), + executable=dict(type="path", aliases=["php_path"]), + working_dir=dict(type="path", aliases=["working-dir"]), + global_command=dict(default=False, type="bool", aliases=["global-command"]), + prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]), + prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]), + no_dev=dict(default=True, type="bool", aliases=["no-dev"]), + no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]), + no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]), + apcu_autoloader=dict(default=False, type="bool", aliases=["apcu-autoloader"]), + optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]), + classmap_authoritative=dict(default=False, type="bool", aliases=["classmap-authoritative"]), + ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]), + ), + required_if=[('global_command', False, ['working_dir'])], + supports_check_mode=True + ) + + # Get composer command with fallback to default + command = module.params['command'] + if re.search(r"\s", command): + module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") + + arguments = module.params['arguments'] + global_command = module.params['global_command'] + available_options = get_available_options(module=module, command=command) + + options = [] + + # Default options + default_options = [ + 'no-ansi', + 'no-interaction', + 'no-progress', + ] + + for option in default_options: + if option in available_options: + option = "--%s" % option + options.append(option) + + if not global_command: + options.extend(['--working-dir', "'%s'" % module.params['working_dir']]) + + option_params = { + 'prefer_source': 'prefer-source', + 'prefer_dist': 'prefer-dist', + 'no_dev': 'no-dev', + 'no_scripts': 'no-scripts', + 'no_plugins': 'no-plugins', + 'apcu_autoloader': 'acpu-autoloader', + 'optimize_autoloader': 'optimize-autoloader', + 'classmap_authoritative': 'classmap-authoritative', + 'ignore_platform_reqs': 'ignore-platform-reqs', + } + + for param, option in option_params.items(): + if module.params.get(param) and option in available_options: + option = "--%s" % option + options.append(option) + + if module.check_mode: + if 'dry-run' in available_options: + options.append('--dry-run') + else: + module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command) + + rc, out, err = composer_command(module, command, arguments, options, global_command) + + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output, stdout=err) + else: + # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages + output = parse_out(out + err) + module.exit_json(changed=has_changed(output), msg=output, stdout=out + err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/cpanm.py b/plugins/modules/packaging/language/cpanm.py new file mode 100644 index 0000000000..723e12aeb4 --- /dev/null +++ b/plugins/modules/packaging/language/cpanm.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Franck Cuny +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cpanm +short_description: Manages Perl library dependencies. +description: + - Manage Perl library dependencies. +options: + name: + description: + - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz + aliases: ["pkg"] + from_path: + description: + - The local directory from where to install + notest: + description: + - Do not run unit tests + type: bool + default: no + locallib: + description: + - Specify the install base to install modules + type: path + mirror: + description: + - Specifies the base URL for the CPAN mirror to use + type: str + mirror_only: + description: + - Use the mirror's index file instead of the CPAN Meta DB + type: bool + default: no + installdeps: + description: + - Only install dependencies + type: bool + default: no + version: + description: + - minimum version of perl module to consider acceptable + type: str + system_lib: + description: + - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work. + - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation. + type: bool + default: no + aliases: ['use_sudo'] + executable: + description: + - Override the path to the cpanm executable + type: path +notes: + - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. +author: "Franck Cuny (@fcuny)" +''' + +EXAMPLES = ''' +# install Dancer perl package +- cpanm: + name: Dancer + +# install version 0.99_05 of the Plack perl package +- cpanm: + name: MIYAGAWA/Plack-0.99_05.tar.gz + +# install Dancer into the specified locallib +- cpanm: + name: Dancer + locallib: /srv/webapps/my_app/extlib + +# install perl dependencies from local directory +- cpanm: + from_path: /srv/webapps/my_app/src/ + +# install Dancer perl package without running the unit tests in indicated locallib +- cpanm: + name: Dancer + notest: True + locallib: /srv/webapps/my_app/extlib + +# install Dancer perl package from a specific mirror +- cpanm: + name: Dancer + mirror: 'http://cpan.cpantesters.org/' + +# install Dancer perl package into the system root path +- cpanm: + name: Dancer + system_lib: yes + +# install Dancer if it's not already installed +# OR the installed version is older than version 1.0 +- cpanm: + name: Dancer + version: '1.0' +''' + +import os + +from ansible.module_utils.basic import AnsibleModule + + +def _is_package_installed(module, name, locallib, cpanm, version): + cmd = "" + if locallib: + os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib + cmd = "%s perl -e ' use %s" % (cmd, name) + if version: + cmd = "%s %s;'" % (cmd, version) + else: + cmd = "%s;'" % cmd + res, stdout, stderr = module.run_command(cmd, check_rc=False) + return res == 0 + + +def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo): + # this code should use "%s" like everything else and just return early but not fixing all of it now. + # don't copy stuff like this + if from_path: + cmd = cpanm + " " + from_path + else: + cmd = cpanm + " " + name + + if notest is True: + cmd = cmd + " -n" + + if locallib is not None: + cmd = cmd + " -l " + locallib + + if mirror is not None: + cmd = cmd + " --mirror " + mirror + + if mirror_only is True: + cmd = cmd + " --mirror-only" + + if installdeps is True: + cmd = cmd + " --installdeps" + + if use_sudo is True: + cmd = cmd + " --sudo" + + return cmd + + +def _get_cpanm_path(module): + if module.params['executable']: + result = module.params['executable'] + else: + result = module.get_bin_path('cpanm', True) + return result + + +def main(): + arg_spec = dict( + name=dict(default=None, required=False, aliases=['pkg']), + from_path=dict(default=None, required=False, type='path'), + notest=dict(default=False, type='bool'), + locallib=dict(default=None, required=False, type='path'), + mirror=dict(default=None, required=False), + mirror_only=dict(default=False, type='bool'), + installdeps=dict(default=False, type='bool'), + system_lib=dict(default=False, type='bool', aliases=['use_sudo']), + version=dict(default=None, required=False), + executable=dict(required=False, type='path'), + ) + + module = AnsibleModule( + argument_spec=arg_spec, + required_one_of=[['name', 'from_path']], + ) + + cpanm = _get_cpanm_path(module) + name = module.params['name'] + from_path = module.params['from_path'] + notest = module.boolean(module.params.get('notest', False)) + locallib = module.params['locallib'] + mirror = module.params['mirror'] + mirror_only = module.params['mirror_only'] + installdeps = module.params['installdeps'] + use_sudo = module.params['system_lib'] + version = module.params['version'] + + changed = False + + installed = _is_package_installed(module, name, locallib, cpanm, version) + + if not installed: + cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo) + + rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) + + if rc_cpanm != 0: + module.fail_json(msg=err_cpanm, cmd=cmd) + + if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1): + changed = True + + module.exit_json(changed=changed, binary=cpanm, name=name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/easy_install.py b/plugins/modules/packaging/language/easy_install.py new file mode 100644 index 0000000000..dfe9883c24 --- /dev/null +++ b/plugins/modules/packaging/language/easy_install.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: easy_install +short_description: Installs Python libraries +description: + - Installs Python libraries, optionally in a I(virtualenv) +options: + name: + description: + - A Python library name + required: true + virtualenv: + description: + - an optional I(virtualenv) directory path to install into. If the + I(virtualenv) does not exist, it is created automatically + virtualenv_site_packages: + description: + - Whether the virtual environment will inherit packages from the + global site-packages directory. Note that if this setting is + changed on an already existing virtual environment it will not + have any effect, the environment must be deleted and newly + created. + type: bool + default: 'no' + virtualenv_command: + description: + - The command to create the virtual environment with. For example + C(pyvenv), C(virtualenv), C(virtualenv2). + default: virtualenv + executable: + description: + - The explicit executable or a pathname to the executable to be used to + run easy_install for a specific version of Python installed in the + system. For example C(easy_install-3.3), if there are both Python 2.7 + and 3.3 installations in the system and you want to run easy_install + for the Python 3.3 installation. + state: + description: + - The desired state of the library. C(latest) ensures that the latest version is installed. + choices: [present, latest] + default: present +notes: + - Please note that the C(easy_install) module can only install Python + libraries. Thus this module is not able to remove libraries. It is + generally recommended to use the M(pip) module which you can first install + using M(easy_install). + - Also note that I(virtualenv) must be installed on the remote host if the + C(virtualenv) parameter is specified. +requirements: [ "virtualenv" ] +author: "Matt Wright (@mattupstate)" +''' + +EXAMPLES = ''' +# Examples from Ansible Playbooks +- easy_install: + name: pip + state: latest + +# Install Bottle into the specified virtualenv. +- easy_install: + name: bottle + virtualenv: /webapps/myapp/venv +''' + +import os +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule + + +def install_package(module, name, easy_install, executable_arguments): + cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) + rc, out, err = module.run_command(cmd) + return rc, out, err + + +def _is_package_installed(module, name, easy_install, executable_arguments): + # Copy and add to the arguments + executable_arguments = executable_arguments[:] + executable_arguments.append('--dry-run') + rc, out, err = install_package(module, name, easy_install, executable_arguments) + if rc: + module.fail_json(msg=err) + return 'Downloading' not in out + + +def _get_easy_install(module, env=None, executable=None): + candidate_easy_inst_basenames = ['easy_install'] + easy_install = None + if executable is not None: + if os.path.isabs(executable): + easy_install = executable + else: + candidate_easy_inst_basenames.insert(0, executable) + if easy_install is None: + if env is None: + opt_dirs = [] + else: + # Try easy_install with the virtualenv directory first. + opt_dirs = ['%s/bin' % env] + for basename in candidate_easy_inst_basenames: + easy_install = module.get_bin_path(basename, False, opt_dirs) + if easy_install is not None: + break + # easy_install should have been found by now. The final call to + # get_bin_path will trigger fail_json. + if easy_install is None: + basename = candidate_easy_inst_basenames[0] + easy_install = module.get_bin_path(basename, True, opt_dirs) + return easy_install + + +def main(): + arg_spec = dict( + name=dict(required=True), + state=dict(required=False, + default='present', + choices=['present', 'latest'], + type='str'), + virtualenv=dict(default=None, required=False), + virtualenv_site_packages=dict(default='no', type='bool'), + virtualenv_command=dict(default='virtualenv', required=False), + executable=dict(default='easy_install', required=False), + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + env = module.params['virtualenv'] + executable = module.params['executable'] + site_packages = module.params['virtualenv_site_packages'] + virtualenv_command = module.params['virtualenv_command'] + executable_arguments = [] + if module.params['state'] == 'latest': + executable_arguments.append('--upgrade') + + rc = 0 + err = '' + out = '' + + if env: + virtualenv = module.get_bin_path(virtualenv_command, True) + + if not os.path.exists(os.path.join(env, 'bin', 'activate')): + if module.check_mode: + module.exit_json(changed=True) + command = '%s %s' % (virtualenv, env) + if site_packages: + command += ' --system-site-packages' + cwd = tempfile.gettempdir() + rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) + + rc += rc_venv + out += out_venv + err += err_venv + + easy_install = _get_easy_install(module, env, executable) + + cmd = None + changed = False + installed = _is_package_installed(module, name, easy_install, executable_arguments) + + if not installed: + if module.check_mode: + module.exit_json(changed=True) + rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments) + + rc += rc_easy_inst + out += out_easy_inst + err += err_easy_inst + + changed = True + + if rc != 0: + module.fail_json(msg=err, cmd=cmd) + + module.exit_json(changed=changed, binary=easy_install, + name=name, virtualenv=env) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/gem.py b/plugins/modules/packaging/language/gem.py new file mode 100644 index 0000000000..1cd98c4e69 --- /dev/null +++ b/plugins/modules/packaging/language/gem.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Johan Wiren +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gem +short_description: Manage Ruby gems +description: + - Manage installation and uninstallation of Ruby gems. +options: + name: + description: + - The name of the gem to be managed. + required: true + state: + description: + - The desired state of the gem. C(latest) ensures that the latest version is installed. + required: false + choices: [present, absent, latest] + default: present + gem_source: + description: + - The path to a local gem used as installation source. + required: false + include_dependencies: + description: + - Whether to include dependencies or not. + required: false + type: bool + default: "yes" + repository: + description: + - The repository from which the gem will be installed + required: false + aliases: [source] + user_install: + description: + - Install gem in user's local gems cache or for all users + required: false + type: bool + default: "yes" + executable: + description: + - Override the path to the gem executable + required: false + install_dir: + description: + - Install the gems into a specific directory. + These gems will be independent from the global installed ones. + Specifying this requires user_install to be false. + required: false + env_shebang: + description: + - Rewrite the shebang line on installed scripts to use /usr/bin/env. + required: false + default: "no" + type: bool + version: + description: + - Version of the gem to be installed/removed. + required: false + pre_release: + description: + - Allow installation of pre-release versions of the gem. + required: false + default: "no" + type: bool + include_doc: + description: + - Install with or without docs. + required: false + default: "no" + type: bool + build_flags: + description: + - Allow adding build flags for gem compilation + required: false + force: + description: + - Force gem to install, bypassing dependency checks. + required: false + default: "no" + type: bool +author: + - "Ansible Core Team" + - "Johan Wiren (@johanwiren)" +''' + +EXAMPLES = ''' +# Installs version 1.0 of vagrant. +- gem: + name: vagrant + version: 1.0 + state: present + +# Installs latest available version of rake. +- gem: + name: rake + state: latest + +# Installs rake version 1.0 from a local gem on disk. +- gem: + name: rake + gem_source: /path/to/gems/rake-1.0.gem + state: present +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def get_rubygems_path(module): + if module.params['executable']: + result = module.params['executable'].split(' ') + else: + result = [module.get_bin_path('gem', True)] + return result + + +def get_rubygems_version(module): + cmd = get_rubygems_path(module) + ['--version'] + (rc, out, err) = module.run_command(cmd, check_rc=True) + + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) + if not match: + return None + + return tuple(int(x) for x in match.groups()) + + +def get_rubygems_environ(module): + if module.params['install_dir']: + return {'GEM_HOME': module.params['install_dir']} + return None + + +def get_installed_versions(module, remote=False): + + cmd = get_rubygems_path(module) + cmd.append('query') + if remote: + cmd.append('--remote') + if module.params['repository']: + cmd.extend(['--source', module.params['repository']]) + cmd.append('-n') + cmd.append('^%s$' % module.params['name']) + + environ = get_rubygems_environ(module) + (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True) + installed_versions = [] + for line in out.splitlines(): + match = re.match(r"\S+\s+\((.+)\)", line) + if match: + versions = match.group(1) + for version in versions.split(', '): + installed_versions.append(version.split()[0]) + return installed_versions + + +def exists(module): + if module.params['state'] == 'latest': + remoteversions = get_installed_versions(module, remote=True) + if remoteversions: + module.params['version'] = remoteversions[0] + installed_versions = get_installed_versions(module) + if module.params['version']: + if module.params['version'] in installed_versions: + return True + else: + if installed_versions: + return True + return False + + +def uninstall(module): + + if module.check_mode: + return + cmd = get_rubygems_path(module) + environ = get_rubygems_environ(module) + cmd.append('uninstall') + if module.params['install_dir']: + cmd.extend(['--install-dir', module.params['install_dir']]) + + if module.params['version']: + cmd.extend(['--version', module.params['version']]) + else: + cmd.append('--all') + cmd.append('--executable') + cmd.append(module.params['name']) + module.run_command(cmd, environ_update=environ, check_rc=True) + + +def install(module): + + if module.check_mode: + return + + ver = get_rubygems_version(module) + if ver: + major = ver[0] + else: + major = None + + cmd = get_rubygems_path(module) + cmd.append('install') + if module.params['version']: + cmd.extend(['--version', module.params['version']]) + if module.params['repository']: + cmd.extend(['--source', module.params['repository']]) + if not module.params['include_dependencies']: + cmd.append('--ignore-dependencies') + else: + if major and major < 2: + cmd.append('--include-dependencies') + if module.params['user_install']: + cmd.append('--user-install') + else: + cmd.append('--no-user-install') + if module.params['install_dir']: + cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params['pre_release']: + cmd.append('--pre') + if not module.params['include_doc']: + if major and major < 2: + cmd.append('--no-rdoc') + cmd.append('--no-ri') + else: + cmd.append('--no-document') + if module.params['env_shebang']: + cmd.append('--env-shebang') + cmd.append(module.params['gem_source']) + if module.params['build_flags']: + cmd.extend(['--', module.params['build_flags']]) + if module.params['force']: + cmd.append('--force') + module.run_command(cmd, check_rc=True) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + executable=dict(required=False, type='path'), + gem_source=dict(required=False, type='path'), + include_dependencies=dict(required=False, default=True, type='bool'), + name=dict(required=True, type='str'), + repository=dict(required=False, aliases=['source'], type='str'), + state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), + user_install=dict(required=False, default=True, type='bool'), + install_dir=dict(required=False, type='path'), + pre_release=dict(required=False, default=False, type='bool'), + include_doc=dict(required=False, default=False, type='bool'), + env_shebang=dict(required=False, default=False, type='bool'), + version=dict(required=False, type='str'), + build_flags=dict(required=False, type='str'), + force=dict(required=False, default=False, type='bool'), + ), + supports_check_mode=True, + mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], + ) + + if module.params['version'] and module.params['state'] == 'latest': + module.fail_json(msg="Cannot specify version when state=latest") + if module.params['gem_source'] and module.params['state'] == 'latest': + module.fail_json(msg="Cannot maintain state=latest when installing from local source") + if module.params['user_install'] and module.params['install_dir']: + module.fail_json(msg="install_dir requires user_install=false") + + if not module.params['gem_source']: + module.params['gem_source'] = module.params['name'] + + changed = False + + if module.params['state'] in ['present', 'latest']: + if not exists(module): + install(module) + changed = True + elif module.params['state'] == 'absent': + if exists(module): + uninstall(module) + changed = True + + result = {} + result['name'] = module.params['name'] + result['state'] = module.params['state'] + if module.params['version']: + result['version'] = module.params['version'] + result['changed'] = changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py new file mode 100644 index 0000000000..f1b5e2f3d3 --- /dev/null +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -0,0 +1,667 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Chris Schmidt +# +# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact +# as a reference and starting point. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: maven_artifact +short_description: Downloads an Artifact from a Maven Repository +description: + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. + - Can retrieve snapshots or release versions of the artifact and will resolve the latest available + version if one is not available. +author: "Chris Schmidt (@chrisisbeef)" +requirements: + - lxml + - boto if using a S3 repository (s3://...) +options: + group_id: + description: + - The Maven groupId coordinate + required: true + artifact_id: + description: + - The maven artifactId coordinate + required: true + version: + description: + - The maven version coordinate + - Mutually exclusive with I(version_by_spec). + version_by_spec: + description: + - The maven dependency version ranges. + - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution) + - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported. + - Mutually exclusive with I(version). + classifier: + description: + - The maven classifier coordinate + extension: + description: + - The maven type/extension coordinate + default: jar + repository_url: + description: + - The URL of the Maven Repository to download from. + - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. + - Use file://... if the repository is local, added in version 2.6 + default: https://repo1.maven.org/maven2 + username: + description: + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 + aliases: [ "aws_secret_key" ] + password: + description: + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 + aliases: [ "aws_secret_access_key" ] + headers: + description: + - Add custom HTTP headers to a request in hash/dict format. + type: dict + force_basic_auth: + description: + - httplib2, the library used by the uri module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly + send a 401, logins will fail. This option forces the sending of the Basic authentication header + upon initial request. + default: 'no' + type: bool + dest: + description: + - The path where the artifact should be written to + - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file + required: true + state: + description: + - The desired state of the artifact + default: present + choices: [present,absent] + timeout: + description: + - Specifies a timeout in seconds for the connection attempt + default: 10 + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. + type: bool + default: 'yes' + keep_name: + description: + - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it. + - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec) + is defined. + type: bool + default: 'no' + verify_checksum: + description: + - If C(never), the md5 checksum will never be downloaded and verified. + - If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default. + - If C(change), the md5 checksum will be downloaded and verified if the destination already exist, + to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe) + downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error + if the artifact has not been cached yet, it may fail unexpectedly. + If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to + use it to verify integrity after download. + - C(always) combines C(download) and C(change). + required: false + default: 'download' + choices: ['never', 'download', 'change', 'always'] +extends_documentation_fragment: + - files +''' + +EXAMPLES = ''' +# Download the latest version of the JUnit framework artifact from Maven Central +- maven_artifact: + group_id: junit + artifact_id: junit + dest: /tmp/junit-latest.jar + +# Download JUnit 4.11 from Maven Central +- maven_artifact: + group_id: junit + artifact_id: junit + version: 4.11 + dest: /tmp/junit-4.11.jar + +# Download an artifact from a private repository requiring authentication +- maven_artifact: + group_id: com.company + artifact_id: library-name + repository_url: 'https://repo.company.com/maven' + username: user + password: pass + dest: /tmp/library-name-latest.jar + +# Download a WAR File to the Tomcat webapps directory to be deployed +- maven_artifact: + group_id: com.company + artifact_id: web-app + extension: war + repository_url: 'https://repo.company.com/maven' + dest: /var/lib/tomcat7/webapps/web-app.war + +# Keep a downloaded artifact's name, i.e. retain the version +- maven_artifact: + version: latest + artifact_id: spring-core + group_id: org.springframework + dest: /tmp/ + keep_name: yes + +# Download the latest version of the JUnit framework artifact from Maven local +- maven_artifact: + group_id: junit + artifact_id: junit + dest: /tmp/junit-latest.jar + repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository" + +# Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central +- maven_artifact: + group_id: junit + artifact_id: junit + version_by_spec: "[3.8,4.0)" + dest: /tmp/ +''' + +import hashlib +import os +import posixpath +import shutil +import io +import tempfile +import traceback + +from ansible.module_utils.ansible_release import __version__ as ansible_version +from re import match + +LXML_ETREE_IMP_ERR = None +try: + from lxml import etree + HAS_LXML_ETREE = True +except ImportError: + LXML_ETREE_IMP_ERR = traceback.format_exc() + HAS_LXML_ETREE = False + +BOTO_IMP_ERR = None +try: + import boto3 + HAS_BOTO = True +except ImportError: + BOTO_IMP_ERR = traceback.format_exc() + HAS_BOTO = False + +SEMANTIC_VERSION_IMP_ERR = None +try: + from semantic_version import Version, Spec + HAS_SEMANTIC_VERSION = True +except ImportError: + SEMANTIC_VERSION_IMP_ERR = traceback.format_exc() + HAS_SEMANTIC_VERSION = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_bytes, to_native, to_text + + +def split_pre_existing_dir(dirname): + ''' + Return the first pre-existing directory and a list of the new directories that will be created. + ''' + head, tail = os.path.split(dirname) + b_head = to_bytes(head, errors='surrogate_or_strict') + if not os.path.exists(b_head): + if head == dirname: + return None, [head] + else: + (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) + else: + return head, [tail] + new_directory_list.append(tail) + return pre_existing_dir, new_directory_list + + +def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): + ''' + Walk the new directories list and make sure that permissions are as we would expect + ''' + if new_directory_list: + first_sub_dir = new_directory_list.pop(0) + if not pre_existing_dir: + working_dir = first_sub_dir + else: + working_dir = os.path.join(pre_existing_dir, first_sub_dir) + directory_args['path'] = working_dir + changed = module.set_fs_attributes_if_different(directory_args, changed) + changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) + return changed + + +class Artifact(object): + def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'): + if not group_id: + raise ValueError("group_id must be set") + if not artifact_id: + raise ValueError("artifact_id must be set") + + self.group_id = group_id + self.artifact_id = artifact_id + self.version = version + self.version_by_spec = version_by_spec + self.classifier = classifier + + if not extension: + self.extension = "jar" + else: + self.extension = extension + + def is_snapshot(self): + return self.version and self.version.endswith("SNAPSHOT") + + def path(self, with_version=True): + base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) + if with_version and self.version: + base = posixpath.join(base, self.version) + return base + + def _generate_filename(self): + filename = self.artifact_id + "-" + self.classifier + "." + self.extension + if not self.classifier: + filename = self.artifact_id + "." + self.extension + return filename + + def get_filename(self, filename=None): + if not filename: + filename = self._generate_filename() + elif os.path.isdir(filename): + filename = os.path.join(filename, self._generate_filename()) + return filename + + def __str__(self): + result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) + if self.classifier: + result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) + elif self.extension != "jar": + result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) + return result + + @staticmethod + def parse(input): + parts = input.split(":") + if len(parts) >= 3: + g = parts[0] + a = parts[1] + v = parts[len(parts) - 1] + t = None + c = None + if len(parts) == 4: + t = parts[2] + if len(parts) == 5: + t = parts[2] + c = parts[3] + return Artifact(g, a, v, c, t) + else: + return None + + +class MavenDownloader: + def __init__(self, module, base, local=False, headers=None): + self.module = module + if base.endswith("/"): + base = base.rstrip("/") + self.base = base + self.local = local + self.headers = headers + self.user_agent = "Ansible {0} maven_artifact".format(ansible_version) + self.latest_version_found = None + self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml" + + def find_version_by_spec(self, artifact): + path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + original_versions = xml.xpath("/metadata/versioning/versions/version/text()") + versions = [] + for version in original_versions: + try: + versions.append(Version.coerce(version)) + except ValueError: + # This means that version string is not a valid semantic versioning + pass + + parse_versions_syntax = { + # example -> (,1.0] + r"^\(,(?P[0-9.]*)]$": "<={upper_bound}", + # example -> 1.0 + r"^(?P[0-9.]*)$": "~={version}", + # example -> [1.0] + r"^\[(?P[0-9.]*)\]$": "=={version}", + # example -> [1.2, 1.3] + r"^\[(?P[0-9.]*),\s*(?P[0-9.]*)\]$": ">={lower_bound},<={upper_bound}", + # example -> [1.2, 1.3) + r"^\[(?P[0-9.]*),\s*(?P[0-9.]+)\)$": ">={lower_bound},<{upper_bound}", + # example -> [1.5,) + r"^\[(?P[0-9.]*),\)$": ">={lower_bound}", + } + + for regex, spec_format in parse_versions_syntax.items(): + regex_result = match(regex, artifact.version_by_spec) + if regex_result: + spec = Spec(spec_format.format(**regex_result.groupdict())) + selected_version = spec.select(versions) + + if not selected_version: + raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec)) + + # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0) + if str(selected_version) not in original_versions: + selected_version.patch = None + + return str(selected_version) + + raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec)) + + def find_latest_version_available(self, artifact): + if self.latest_version_found: + return self.latest_version_found + path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") + if v: + self.latest_version_found = v[0] + return v[0] + + def find_uri_for_artifact(self, artifact): + if artifact.version_by_spec: + artifact.version = self.find_version_by_spec(artifact) + + if artifact.version == "latest": + artifact.version = self.find_latest_version_available(artifact) + + if artifact.is_snapshot(): + if self.local: + return self._uri_for_artifact(artifact, artifact.version) + path = "/%s/%s" % (artifact.path(), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + + for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): + classifier = snapshotArtifact.xpath("classifier/text()") + artifact_classifier = classifier[0] if classifier else '' + extension = snapshotArtifact.xpath("extension/text()") + artifact_extension = extension[0] if extension else '' + if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension: + return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) + timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()") + if timestamp_xmlpath: + timestamp = timestamp_xmlpath[0] + build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] + return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number)) + + return self._uri_for_artifact(artifact, artifact.version) + + def _uri_for_artifact(self, artifact, version=None): + if artifact.is_snapshot() and not version: + raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) + elif not artifact.is_snapshot(): + version = artifact.version + if artifact.classifier: + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) + + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) + + # for small files, directly get the full content + def _getContent(self, url, failmsg, force=True): + if self.local: + parsed_url = urlparse(url) + if os.path.isfile(parsed_url.path): + with io.open(parsed_url.path, 'rb') as f: + return f.read() + if force: + raise ValueError(failmsg + " because can not find file: " + url) + return None + response = self._request(url, failmsg, force) + if response: + return response.read() + return None + + # only for HTTP request + def _request(self, url, failmsg, force=True): + url_to_use = url + parsed_url = urlparse(url) + + if parsed_url.scheme == 's3': + parsed_url = urlparse(url) + bucket_name = parsed_url.netloc + key_name = parsed_url.path[1:] + client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) + url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10) + + req_timeout = self.module.params.get('timeout') + + # Hack to add parameters in the way that fetch_url expects + self.module.params['url_username'] = self.module.params.get('username', '') + self.module.params['url_password'] = self.module.params.get('password', '') + self.module.params['http_agent'] = self.user_agent + + response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers) + if info['status'] == 200: + return response + if force: + raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) + return None + + def download(self, tmpdir, artifact, verify_download, filename=None): + if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest": + artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None, + artifact.classifier, artifact.extension) + url = self.find_uri_for_artifact(artifact) + tempfd, tempname = tempfile.mkstemp(dir=tmpdir) + + try: + # copy to temp file + if self.local: + parsed_url = urlparse(url) + if os.path.isfile(parsed_url.path): + shutil.copy2(parsed_url.path, tempname) + else: + return "Can not find local file: " + parsed_url.path + else: + response = self._request(url, "Failed to download artifact " + str(artifact)) + with os.fdopen(tempfd, 'wb') as f: + shutil.copyfileobj(response, f) + + if verify_download: + invalid_md5 = self.is_invalid_md5(tempname, url) + if invalid_md5: + # if verify_change was set, the previous file would be deleted + os.remove(tempname) + return invalid_md5 + except Exception as e: + os.remove(tempname) + raise e + + # all good, now copy temp file to target + shutil.move(tempname, artifact.get_filename(filename)) + return None + + def is_invalid_md5(self, file, remote_url): + if os.path.exists(file): + local_md5 = self._local_md5(file) + if self.local: + parsed_url = urlparse(remote_url) + remote_md5 = self._local_md5(parsed_url.path) + else: + try: + remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict') + except UnicodeError as e: + return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e)) + if(not remote_md5): + return "Cannot find md5 from " + remote_url + try: + # Check if remote md5 only contains md5 or md5 + filename + _remote_md5 = remote_md5.split(None)[0] + remote_md5 = _remote_md5 + # remote_md5 is empty so we continue and keep original md5 string + # This should not happen since we check for remote_md5 before + except IndexError as e: + pass + if local_md5 == remote_md5: + return None + else: + return "Checksum does not match: we computed " + local_md5 + "but the repository states " + remote_md5 + + return "Path does not exist: " + file + + def _local_md5(self, file): + md5 = hashlib.md5() + with io.open(file, 'rb') as f: + for chunk in iter(lambda: f.read(8192), b''): + md5.update(chunk) + return md5.hexdigest() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + group_id=dict(required=True), + artifact_id=dict(required=True), + version=dict(default=None), + version_by_spec=dict(default=None), + classifier=dict(default=''), + extension=dict(default='jar'), + repository_url=dict(default='https://repo1.maven.org/maven2'), + username=dict(default=None, aliases=['aws_secret_key']), + password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']), + headers=dict(type='dict'), + force_basic_auth=dict(default=False, type='bool'), + state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state + timeout=dict(default=10, type='int'), + dest=dict(type="path", required=True), + validate_certs=dict(required=False, default=True, type='bool'), + keep_name=dict(required=False, default=False, type='bool'), + verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), + directory_mode=dict(type='str'), # Used since https://github.com/ansible/ansible/pull/24965, not sure + # if this should really be here. + ), + add_file_common_args=True, + mutually_exclusive=([('version', 'version_by_spec')]) + ) + + if not HAS_LXML_ETREE: + module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + + if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION: + module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR) + + repository_url = module.params["repository_url"] + if not repository_url: + repository_url = "https://repo1.maven.org/maven2" + try: + parsed_url = urlparse(repository_url) + except AttributeError as e: + module.fail_json(msg='url parsing went wrong %s' % e) + + local = parsed_url.scheme == "file" + + if parsed_url.scheme == 's3' and not HAS_BOTO: + module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'), + exception=BOTO_IMP_ERR) + + group_id = module.params["group_id"] + artifact_id = module.params["artifact_id"] + version = module.params["version"] + version_by_spec = module.params["version_by_spec"] + classifier = module.params["classifier"] + extension = module.params["extension"] + headers = module.params['headers'] + state = module.params["state"] + dest = module.params["dest"] + b_dest = to_bytes(dest, errors='surrogate_or_strict') + keep_name = module.params["keep_name"] + verify_checksum = module.params["verify_checksum"] + verify_download = verify_checksum in ['download', 'always'] + verify_change = verify_checksum in ['change', 'always'] + + downloader = MavenDownloader(module, repository_url, local, headers) + + if not version_by_spec and not version: + version = "latest" + + try: + artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + changed = False + prev_state = "absent" + + if dest.endswith(os.sep): + b_dest = to_bytes(dest, errors='surrogate_or_strict') + if not os.path.exists(b_dest): + (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest) + os.makedirs(b_dest) + directory_args = module.load_file_common_arguments(module.params) + directory_mode = module.params["directory_mode"] + if directory_mode is not None: + directory_args['mode'] = directory_mode + else: + directory_args['mode'] = None + changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) + + if os.path.isdir(b_dest): + version_part = version + if version == 'latest': + version_part = downloader.find_latest_version_available(artifact) + elif version_by_spec: + version_part = downloader.find_version_by_spec(artifact) + + filename = "{artifact_id}{version_part}{classifier}.{extension}".format( + artifact_id=artifact_id, + version_part="-{0}".format(version_part) if keep_name else "", + classifier="-{0}".format(classifier) if classifier else "", + extension=extension + ) + dest = posixpath.join(dest, filename) + + b_dest = to_bytes(dest, errors='surrogate_or_strict') + + if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))): + prev_state = "present" + + if prev_state == "absent": + try: + download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest) + if download_error is None: + changed = True + else: + module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + file_args = module.load_file_common_arguments(module.params, path=dest) + changed = module.set_fs_attributes_if_different(file_args, changed) + if changed: + module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, + extension=extension, repository_url=repository_url, changed=changed) + else: + module.exit_json(state=state, dest=dest, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py new file mode 100644 index 0000000000..9401019435 --- /dev/null +++ b/plugins/modules/packaging/language/npm.py @@ -0,0 +1,311 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Chris Hoffman +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: npm +short_description: Manage node.js packages with npm +description: + - Manage node.js packages with Node Package Manager (npm) +author: "Chris Hoffman (@chrishoffman)" +options: + name: + description: + - The name of a node.js library to install + type: str + required: false + path: + description: + - The base path where to install the node.js libraries + type: path + required: false + version: + description: + - The version to be installed + type: str + required: false + global: + description: + - Install the node.js library globally + required: false + default: no + type: bool + executable: + description: + - The executable location for npm. + - This is useful if you are using a version manager, such as nvm + type: path + required: false + ignore_scripts: + description: + - Use the C(--ignore-scripts) flag when installing. + required: false + type: bool + default: no + unsafe_perm: + description: + - Use the C(--unsafe-perm) flag when installing. + type: bool + default: no + ci: + description: + - Install packages based on package-lock file, same as running npm ci + type: bool + default: no + production: + description: + - Install dependencies in production mode, excluding devDependencies + required: false + type: bool + default: no + registry: + description: + - The registry to install modules from. + required: false + type: str + state: + description: + - The state of the node.js library + required: false + type: str + default: present + choices: [ "present", "absent", "latest" ] +requirements: + - npm installed in bin path (recommended /usr/local/bin) +''' + +EXAMPLES = ''' +- name: Install "coffee-script" node.js package. + npm: + name: coffee-script + path: /app/location + +- name: Install "coffee-script" node.js package on version 1.6.1. + npm: + name: coffee-script + version: '1.6.1' + path: /app/location + +- name: Install "coffee-script" node.js package globally. + npm: + name: coffee-script + global: yes + +- name: Remove the globally package "coffee-script". + npm: + name: coffee-script + global: yes + state: absent + +- name: Install "coffee-script" node.js package from custom registry. + npm: + name: coffee-script + registry: 'http://registry.mysite.com' + +- name: Install packages based on package.json. + npm: + path: /app/location + +- name: Update packages based on package.json to their latest version. + npm: + path: /app/location + state: latest + +- name: Install packages based on package.json using the npm installed with nvm v0.10.1. + npm: + path: /app/location + executable: /opt/nvm/v0.10.1/bin/npm + state: present +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + +import json + + +class Npm(object): + def __init__(self, module, **kwargs): + self.module = module + self.glbl = kwargs['glbl'] + self.name = kwargs['name'] + self.version = kwargs['version'] + self.path = kwargs['path'] + self.registry = kwargs['registry'] + self.production = kwargs['production'] + self.ignore_scripts = kwargs['ignore_scripts'] + self.unsafe_perm = kwargs['unsafe_perm'] + self.state = kwargs['state'] + + if kwargs['executable']: + self.executable = kwargs['executable'].split(' ') + else: + self.executable = [module.get_bin_path('npm', True)] + + if kwargs['version'] and self.state != 'absent': + self.name_version = self.name + '@' + str(self.version) + else: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = self.executable + args + + if self.glbl: + cmd.append('--global') + if self.production and ('install' in cmd or 'update' in cmd): + cmd.append('--production') + if self.ignore_scripts: + cmd.append('--ignore-scripts') + if self.unsafe_perm: + cmd.append('--unsafe-perm') + if self.name: + cmd.append(self.name_version) + if self.registry: + cmd.append('--registry') + cmd.append(self.registry) + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) + cwd = self.path + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out + return '' + + def list(self): + cmd = ['list', '--json', '--long'] + + installed = list() + missing = list() + data = json.loads(self._exec(cmd, True, False)) + if 'dependencies' in data: + for dep in data['dependencies']: + if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: + missing.append(dep) + elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: + missing.append(dep) + else: + installed.append(dep) + if self.name and self.name not in installed: + missing.append(self.name) + # Named dependency not installed + else: + missing.append(self.name) + + return installed, missing + + def install(self): + return self._exec(['install']) + + def ci_install(self): + return self._exec(['ci']) + + def update(self): + return self._exec(['update']) + + def uninstall(self): + return self._exec(['uninstall']) + + def list_outdated(self): + outdated = list() + data = self._exec(['outdated'], True, False) + for dep in data.splitlines(): + if dep: + # node.js v0.10.22 changed the `npm outdated` module separator + # from "@" to " ". Split on both for backwards compatibility. + pkg, other = re.split(r'\s|@', dep, 1) + outdated.append(pkg) + + return outdated + + +def main(): + arg_spec = dict( + name=dict(default=None, type='str'), + path=dict(default=None, type='path'), + version=dict(default=None, type='str'), + production=dict(default='no', type='bool'), + executable=dict(default=None, type='path'), + registry=dict(default=None, type='str'), + state=dict(default='present', choices=['present', 'absent', 'latest']), + ignore_scripts=dict(default=False, type='bool'), + unsafe_perm=dict(default=False, type='bool'), + ci=dict(default=False, type='bool'), + ) + arg_spec['global'] = dict(default='no', type='bool') + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + path = module.params['path'] + version = module.params['version'] + glbl = module.params['global'] + production = module.params['production'] + executable = module.params['executable'] + registry = module.params['registry'] + state = module.params['state'] + ignore_scripts = module.params['ignore_scripts'] + unsafe_perm = module.params['unsafe_perm'] + ci = module.params['ci'] + + if not path and not glbl: + module.fail_json(msg='path must be specified when not using global') + if state == 'absent' and not name: + module.fail_json(msg='uninstalling a package is only available for named packages') + + npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, + executable=executable, registry=registry, ignore_scripts=ignore_scripts, + unsafe_perm=unsafe_perm, state=state) + + changed = False + if ci: + npm.ci_install() + changed = True + elif state == 'present': + installed, missing = npm.list() + if missing: + changed = True + npm.install() + elif state == 'latest': + installed, missing = npm.list() + outdated = npm.list_outdated() + if missing: + changed = True + npm.install() + if outdated: + changed = True + npm.update() + else: # absent + installed, missing = npm.list() + if name in installed: + changed = True + npm.uninstall() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/pear.py b/plugins/modules/packaging/language/pear.py new file mode 100644 index 0000000000..f4672e13fc --- /dev/null +++ b/plugins/modules/packaging/language/pear.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Afterburn +# (c) 2013, Aaron Bull Schaefer +# (c) 2015, Jonathan Lestrelin +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pear +short_description: Manage pear/pecl packages +description: + - Manage PHP packages with the pear package manager. +author: + - Jonathan Lestrelin (@jle64) +options: + name: + description: + - Name of the package to install, upgrade, or remove. + required: true + + state: + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent", "latest"] + executable: + description: + - Path to the pear executable +''' + +EXAMPLES = ''' +# Install pear package +- pear: + name: Net_URL2 + state: present + +# Install pecl package +- pear: + name: pecl/json_post + state: present + +# Upgrade package +- pear: + name: Net_URL2 + state: latest + +# Remove packages +- pear: + name: Net_URL2,pecl/json_post + state: absent +''' + +import os + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule + + +def get_local_version(pear_output): + """Take pear remoteinfo output and get the installed version""" + lines = pear_output.split('\n') + for line in lines: + if 'Installed ' in line: + installed = line.rsplit(None, 1)[-1].strip() + if installed == '-': + continue + return installed + return None + + +def _get_pear_path(module): + if module.params['executable'] and os.path.isfile(module.params['executable']): + result = module.params['executable'] + else: + result = module.get_bin_path('pear', True, [module.params['executable']]) + return result + + +def get_repository_version(pear_output): + """Take pear remote-info output and get the latest version""" + lines = pear_output.split('\n') + for line in lines: + if 'Latest ' in line: + return line.rsplit(None, 1)[-1].strip() + return None + + +def query_package(module, name, state="present"): + """Query the package status in both the local system and the repository. + Returns a boolean to indicate if the package is installed, + and a second boolean to indicate if the package is up-to-date.""" + if state == "present": + lcmd = "%s info %s" % (_get_pear_path(module), name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + rcmd = "%s remote-info %s" % (_get_pear_path(module), name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + + # get the version installed locally (if any) + lversion = get_local_version(rstdout) + + # get the version in the repository + rversion = get_repository_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, + # and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False + + +def remove_packages(module, packages): + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, package) + if not installed: + continue + + cmd = "%s uninstall %s" % (_get_pear_path(module), package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr))) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, state, packages): + install_c = 0 + + for i, package in enumerate(packages): + # if the package is installed and state == present + # or state == latest and is up-to-date then skip + installed, updated = query_package(module, package) + if installed and (state == 'present' or (state == 'latest' and updated)): + continue + + if state == 'present': + command = 'install' + + if state == 'latest': + command = 'upgrade' + + cmd = "%s %s %s" % (_get_pear_path(module), command, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr))) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already installed") + + +def check_packages(module, packages, state): + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state)) + else: + module.exit_json(change=False, msg="package(s) already %s" % state) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pkg'], required=True), + state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), + executable=dict(default=None, required=False, type='path')), + supports_check_mode=True) + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs) + elif p['state'] == 'absent': + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/packaging/language/pip_package_info.py new file mode 100644 index 0000000000..8396b5252b --- /dev/null +++ b/plugins/modules/packaging/language/pip_package_info.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# started out with AWX's scan_packages module + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: pip_package_info +short_description: pip package information +description: + - Return information about installed pip packages +options: + clients: + description: + - A list of the pip executables that will be used to get the packages. + They can be supplied with the full path or just the executable name, i.e `pip3.7`. + default: ['pip'] + required: False + type: list +requirements: + - The requested pip executables must be installed on the target. +author: + - Matthew Jones (@matburt) + - Brian Coca (@bcoca) + - Adam Miller (@maxamillion) +''' + +EXAMPLES = ''' +- name: Just get the list from default pip + pip_package_info: + +- name: get the facts for default pip, pip2 and pip3.6 + pip_package_info: + clients: ['pip', 'pip2', 'pip3.6'] + +- name: get from specific paths (virtualenvs?) + pip_package_info: + clients: '/home/me/projec42/python/pip3.5' +''' + +RETURN = ''' +packages: + description: a dictionary of installed package data + returned: always + type: dict + contains: + python: + description: A dictionary with each pip client which then contains a list of dicts with python package information + returned: always + type: dict + sample: + "packages": { + "pip": { + "Babel": [ + { + "name": "Babel", + "source": "pip", + "version": "2.6.0" + } + ], + "Flask": [ + { + "name": "Flask", + "source": "pip", + "version": "1.0.2" + } + ], + "Flask-SQLAlchemy": [ + { + "name": "Flask-SQLAlchemy", + "source": "pip", + "version": "2.3.2" + } + ], + "Jinja2": [ + { + "name": "Jinja2", + "source": "pip", + "version": "2.10" + } + ], + }, + } +''' +import json +import os + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.facts.packages import CLIMgr + + +class PIP(CLIMgr): + + def __init__(self, pip): + + self.CLI = pip + + def list_installed(self): + global module + rc, out, err = module.run_command([self._cli, 'list', '-l', '--format=json']) + if rc != 0: + raise Exception("Unable to list packages rc=%s : %s" % (rc, err)) + return json.loads(out) + + def get_package_details(self, package): + package['source'] = self.CLI + return package + + +def main(): + + # start work + global module + module = AnsibleModule(argument_spec=dict(clients={'type': 'list', 'default': ['pip']},), supports_check_mode=True) + packages = {} + results = {'packages': {}} + clients = module.params['clients'] + + found = 0 + for pip in clients: + + if not os.path.basename(pip).startswith('pip'): + module.warn('Skipping invalid pip client: %s' % (pip)) + continue + try: + pip_mgr = PIP(pip) + if pip_mgr.is_available(): + found += 1 + packages[pip] = pip_mgr.get_packages() + except Exception as e: + module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e))) + continue + + if found == 0: + module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients) + + # return info + results['packages'] = packages + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/yarn.py b/plugins/modules/packaging/language/yarn.py new file mode 100644 index 0000000000..cf9ebdaa40 --- /dev/null +++ b/plugins/modules/packaging/language/yarn.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017 David Gunter +# Copyright (c) 2017 Chris Hoffman +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: yarn +short_description: Manage node.js packages with Yarn +description: + - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/) +author: + - "David Gunter (@verkaufer)" + - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)" +options: + name: + description: + - The name of a node.js library to install + - If omitted all packages in package.json are installed. + - To globally install from local node.js library. Prepend "file:" to the path of the node.js library. + required: false + path: + description: + - The base path where Node.js libraries will be installed. + - This is where the node_modules folder lives. + required: false + version: + description: + - The version of the library to be installed. + - Must be in semver format. If "latest" is desired, use "state" arg instead + required: false + global: + description: + - Install the node.js library globally + required: false + default: no + type: bool + executable: + description: + - The executable location for yarn. + required: false + ignore_scripts: + description: + - Use the --ignore-scripts flag when installing. + required: false + type: bool + default: no + production: + description: + - Install dependencies in production mode. + - Yarn will ignore any dependencies under devDependencies in package.json + required: false + type: bool + default: no + registry: + description: + - The registry to install modules from. + required: false + state: + description: + - Installation state of the named node.js library + - If absent is selected, a name option must be provided + required: false + default: present + choices: [ "present", "absent", "latest" ] +requirements: + - Yarn installed in bin path (typically /usr/local/bin) +''' + +EXAMPLES = ''' +- name: Install "imagemin" node.js package. + yarn: + name: imagemin + path: /app/location + +- name: Install "imagemin" node.js package on version 5.3.1 + yarn: + name: imagemin + version: '5.3.1' + path: /app/location + +- name: Install "imagemin" node.js package globally. + yarn: + name: imagemin + global: yes + +- name: Remove the globally-installed package "imagemin". + yarn: + name: imagemin + global: yes + state: absent + +- name: Install "imagemin" node.js package from custom registry. + yarn: + name: imagemin + registry: 'http://registry.mysite.com' + +- name: Install packages based on package.json. + yarn: + path: /app/location + +- name: Update all packages in package.json to their latest version. + yarn: + path: /app/location + state: latest +''' + +RETURN = ''' +changed: + description: Whether Yarn changed any package data + returned: always + type: bool + sample: true +msg: + description: Provides an error message if Yarn syntax was incorrect + returned: failure + type: str + sample: "Package must be explicitly named when uninstalling." +invocation: + description: Parameters and values used during execution + returned: success + type: dict + sample: { + "module_args": { + "executable": null, + "globally": false, + "ignore_scripts": false, + "name": null, + "path": "/some/path/folder", + "production": false, + "registry": null, + "state": "present", + "version": null + } + } +out: + description: Output generated from Yarn with emojis removed. + returned: always + type: str + sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4] + Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s." +''' + +import os +import re +import json + +from ansible.module_utils.basic import AnsibleModule + + +class Yarn(object): + + DEFAULT_GLOBAL_INSTALLATION_PATH = '~/.config/yarn/global' + + def __init__(self, module, **kwargs): + self.module = module + self.globally = kwargs['globally'] + self.name = kwargs['name'] + self.version = kwargs['version'] + self.path = kwargs['path'] + self.registry = kwargs['registry'] + self.production = kwargs['production'] + self.ignore_scripts = kwargs['ignore_scripts'] + + # Specify a version of package if version arg passed in + self.name_version = None + + if kwargs['executable']: + self.executable = kwargs['executable'].split(' ') + else: + self.executable = [module.get_bin_path('yarn', True)] + + if kwargs['version'] and self.name is not None: + self.name_version = self.name + '@' + str(self.version) + elif self.name is not None: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + + if self.globally: + # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`) + args.insert(0, 'global') + + cmd = self.executable + args + + if self.production: + cmd.append('--production') + if self.ignore_scripts: + cmd.append('--ignore-scripts') + if self.registry: + cmd.append('--registry') + cmd.append(self.registry) + + # always run Yarn without emojis when called via Ansible + cmd.append('--no-emoji') + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path and not self.globally: + if not os.path.exists(self.path): + # Module will make directory if not exists. + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="Path provided %s is not a directory" % self.path) + cwd = self.path + + if not os.path.isfile(os.path.join(self.path, 'package.json')): + self.module.fail_json(msg="Package.json does not exist in provided path.") + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out, err + + return '' + + def list(self): + cmd = ['list', '--depth=0', '--json'] + + installed = list() + missing = list() + + if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): + missing.append(self.name) + return installed, missing + + result, error = self._exec(cmd, True, False) + + if error: + self.module.fail_json(msg=error) + + data = json.loads(result) + try: + dependencies = data['data']['trees'] + except KeyError: + missing.append(self.name) + return installed, missing + + for dep in dependencies: + name, version = dep['name'].split('@') + installed.append(name) + + if self.name not in installed: + missing.append(self.name) + + return installed, missing + + def install(self): + if self.name_version: + # Yarn has a separate command for installing packages by name... + return self._exec(['add', self.name_version]) + # And one for installing all packages in package.json + return self._exec(['install', '--non-interactive']) + + def update(self): + return self._exec(['upgrade', '--latest']) + + def uninstall(self): + return self._exec(['remove', self.name]) + + def list_outdated(self): + outdated = list() + + if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): + return outdated + + cmd_result, err = self._exec(['outdated', '--json'], True, False) + if err: + self.module.fail_json(msg=err) + + outdated_packages_data = cmd_result.splitlines()[1] + + data = json.loads(outdated_packages_data) + + try: + outdated_dependencies = data['data']['body'] + except KeyError: + return outdated + + for dep in outdated_dependencies: + # Outdated dependencies returned as a list of lists, where + # item at index 0 is the name of the dependency + outdated.append(dep[0]) + return outdated + + +def main(): + arg_spec = dict( + name=dict(default=None), + path=dict(default=None, type='path'), + version=dict(default=None), + production=dict(default='no', type='bool'), + executable=dict(default=None, type='path'), + registry=dict(default=None), + state=dict(default='present', choices=['present', 'absent', 'latest']), + ignore_scripts=dict(default=False, type='bool'), + ) + arg_spec['global'] = dict(default='no', type='bool') + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + path = module.params['path'] + version = module.params['version'] + globally = module.params['global'] + production = module.params['production'] + executable = module.params['executable'] + registry = module.params['registry'] + state = module.params['state'] + ignore_scripts = module.params['ignore_scripts'] + + # When installing globally, users should not be able to define a path for installation. + # Require a path if global is False, though! + if path is None and globally is False: + module.fail_json(msg='Path must be specified when not using global arg') + elif path and globally is True: + module.fail_json(msg='Cannot specify path if doing global installation') + + if state == 'absent' and not name: + module.fail_json(msg='Package must be explicitly named when uninstalling.') + if state == 'latest': + version = 'latest' + + # When installing globally, use the defined path for global node_modules + if globally: + path = Yarn.DEFAULT_GLOBAL_INSTALLATION_PATH + + yarn = Yarn(module, + name=name, + path=path, + version=version, + globally=globally, + production=production, + executable=executable, + registry=registry, + ignore_scripts=ignore_scripts) + + changed = False + out = '' + err = '' + if state == 'present': + + if not name: + changed = True + out, err = yarn.install() + else: + installed, missing = yarn.list() + if len(missing): + changed = True + out, err = yarn.install() + + elif state == 'latest': + + if not name: + changed = True + out, err = yarn.install() + else: + installed, missing = yarn.list() + outdated = yarn.list_outdated() + if len(missing): + changed = True + out, err = yarn.install() + if len(outdated): + changed = True + out, err = yarn.update() + else: + # state == absent + installed, missing = yarn.list() + if name in installed: + changed = True + out, err = yarn.uninstall() + + module.exit_json(changed=changed, out=out, err=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/apk.py b/plugins/modules/packaging/os/apk.py new file mode 100644 index 0000000000..859539ffb6 --- /dev/null +++ b/plugins/modules/packaging/os/apk.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Kevin Brebanov +# Based on pacman (Afterburn , Aaron Bull Schaefer ) +# and apt (Matthew Williams ) modules. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: apk +short_description: Manages apk packages +description: + - Manages I(apk) packages for Alpine Linux. +author: "Kevin Brebanov (@kbrebanov)" +options: + available: + description: + - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them) + if the currently installed package is no longer available from any repository. + type: bool + default: 'no' + name: + description: + - A package name, like C(foo), or multiple packages, like C(foo, bar). + type: list + elements: str + repository: + description: + - A package repository or multiple repositories. + Unlike with the underlying apk command, this list will override the system repositories rather than supplement them. + state: + description: + - Indicates the desired package(s) state. + - C(present) ensures the package(s) is/are present. + - C(absent) ensures the package(s) is/are absent. + - C(latest) ensures the package(s) is/are present and the latest version(s). + default: present + choices: [ "present", "absent", "latest" ] + update_cache: + description: + - Update repository indexes. Can be run with other steps or on it's own. + type: bool + default: 'no' + upgrade: + description: + - Upgrade all installed packages to their latest version. + type: bool + default: 'no' +notes: + - '"name" and "upgrade" are mutually exclusive.' + - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. +''' + +EXAMPLES = ''' +# Update repositories and install "foo" package +- apk: + name: foo + update_cache: yes + +# Update repositories and install "foo" and "bar" packages +- apk: + name: foo,bar + update_cache: yes + +# Remove "foo" package +- apk: + name: foo + state: absent + +# Remove "foo" and "bar" packages +- apk: + name: foo,bar + state: absent + +# Install the package "foo" +- apk: + name: foo + state: present + +# Install the packages "foo" and "bar" +- apk: + name: foo,bar + state: present + +# Update repositories and update package "foo" to latest version +- apk: + name: foo + state: latest + update_cache: yes + +# Update repositories and update packages "foo" and "bar" to latest versions +- apk: + name: foo,bar + state: latest + update_cache: yes + +# Update all installed packages to the latest versions +- apk: + upgrade: yes + +# Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available +- apk: + available: yes + upgrade: yes + +# Update repositories as a separate step +- apk: + update_cache: yes + +# Install package from a specific repository +- apk: + name: foo + state: latest + update_cache: yes + repository: http://dl-3.alpinelinux.org/alpine/edge/main +''' + +RETURN = ''' +packages: + description: a list of packages that have been changed + returned: when packages have changed + type: list + sample: ['package', 'other-package'] +''' + +import re +# Import module snippets. +from ansible.module_utils.basic import AnsibleModule + + +def parse_for_packages(stdout): + packages = [] + data = stdout.split('\n') + regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)') + for l in data: + p = regex.search(l) + if p: + packages.append(p.group(1)) + return packages + + +def update_package_db(module, exit): + cmd = "%s update" % (APK_PATH) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc != 0: + module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr) + elif exit: + module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr) + else: + return True + + +def query_toplevel(module, name): + # /etc/apk/world contains a list of top-level packages separated by ' ' or \n + # packages may contain repository (@) or version (=<>~) separator characters or start with negation ! + regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$') + with open('/etc/apk/world') as f: + content = f.read().split() + for p in content: + if regex.search(p): + return True + return False + + +def query_package(module, name): + cmd = "%s -v info --installed %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + return True + else: + return False + + +def query_latest(module, name): + cmd = "%s version %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name)) + match = re.search(search_pattern, stdout) + if match and match.group(2) == "<": + return False + return True + + +def query_virtual(module, name): + cmd = "%s -v info --description %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + search_pattern = r"^%s: virtual meta package" % (re.escape(name)) + if re.search(search_pattern, stdout): + return True + return False + + +def get_dependencies(module, name): + cmd = "%s -v info --depends %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + dependencies = stdout.split() + if len(dependencies) > 1: + return dependencies[1:] + else: + return [] + + +def upgrade_packages(module, available): + if module.check_mode: + cmd = "%s upgrade --simulate" % (APK_PATH) + else: + cmd = "%s upgrade" % (APK_PATH) + if available: + cmd = "%s --available" % cmd + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + if rc != 0: + module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist) + if re.search(r'^OK', stdout): + module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist) + + +def install_packages(module, names, state): + upgrade = False + to_install = [] + to_upgrade = [] + for name in names: + # Check if virtual package + if query_virtual(module, name): + # Get virtual package dependencies + dependencies = get_dependencies(module, name) + for dependency in dependencies: + if state == 'latest' and not query_latest(module, dependency): + to_upgrade.append(dependency) + else: + if not query_toplevel(module, name): + to_install.append(name) + elif state == 'latest' and not query_latest(module, name): + to_upgrade.append(name) + if to_upgrade: + upgrade = True + if not to_install and not upgrade: + module.exit_json(changed=False, msg="package(s) already installed") + packages = " ".join(to_install + to_upgrade) + if upgrade: + if module.check_mode: + cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages) + else: + cmd = "%s add --upgrade %s" % (APK_PATH, packages) + else: + if module.check_mode: + cmd = "%s add --simulate %s" % (APK_PATH, packages) + else: + cmd = "%s add %s" % (APK_PATH, packages) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + if rc != 0: + module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) + + +def remove_packages(module, names): + installed = [] + for name in names: + if query_package(module, name): + installed.append(name) + if not installed: + module.exit_json(changed=False, msg="package(s) already removed") + names = " ".join(installed) + if module.check_mode: + cmd = "%s del --purge --simulate %s" % (APK_PATH, names) + else: + cmd = "%s del --purge %s" % (APK_PATH, names) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + # Check to see if packages are still present because of dependencies + for name in installed: + if query_package(module, name): + rc = 1 + break + if rc != 0: + module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) + +# ========================================== +# Main control flow. + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']), + name=dict(type='list', elements='str'), + repository=dict(type='list'), + update_cache=dict(default='no', type='bool'), + upgrade=dict(default='no', type='bool'), + available=dict(default='no', type='bool'), + ), + required_one_of=[['name', 'update_cache', 'upgrade']], + mutually_exclusive=[['name', 'upgrade']], + supports_check_mode=True + ) + + # Set LANG env since we parse stdout + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + global APK_PATH + APK_PATH = module.get_bin_path('apk', required=True) + + p = module.params + + # add repositories to the APK_PATH + if p['repository']: + for r in p['repository']: + APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r) + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + if p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['update_cache']: + update_package_db(module, not p['name'] and not p['upgrade']) + + if p['upgrade']: + upgrade_packages(module, p['available']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['name'], p['state']) + elif p['state'] == 'absent': + remove_packages(module, p['name']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/apt_rpm.py b/plugins/modules/packaging/os/apt_rpm.py new file mode 100644 index 0000000000..d8181e2c4e --- /dev/null +++ b/plugins/modules/packaging/os/apt_rpm.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Evgenii Terechkov +# Written by Evgenii Terechkov +# Based on urpmi module written by Philippe Makowski + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: apt_rpm +short_description: apt_rpm package manager +description: + - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. +options: + pkg: + description: + - name of package to install, upgrade or remove. + required: true + state: + description: + - Indicates the desired package state. + choices: [ absent, present ] + default: present + update_cache: + description: + - update the package database first C(apt-get update). + type: bool + default: 'no' +author: +- Evgenii Terechkov (@evgkrsk) +''' + +EXAMPLES = ''' +- name: Install package foo + apt_rpm: + pkg: foo + state: present + +- name: Remove package foo + apt_rpm: + pkg: foo + state: absent + +- name: Remove packages foo and bar + apt_rpm: + pkg: foo,bar + state: absent + +# bar will be the updated if a newer version exists +- name: Update the package database and install bar + apt_rpm: + name: bar + state: present + update_cache: yes +''' + +import json +import os +import shlex +import sys + +from ansible.module_utils.basic import AnsibleModule + +APT_PATH = "/usr/bin/apt-get" +RPM_PATH = "/usr/bin/rpm" + + +def query_package(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name)) + if rc == 0: + return True + else: + return False + + +def query_package_provides(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name)) + return rc == 0 + + +def update_package_db(module): + rc, out, err = module.run_command("%s update" % APT_PATH) + + if rc != 0: + module.fail_json(msg="could not update package db: %s" % err) + + +def remove_packages(module, packages): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package)) + + if rc != 0: + module.fail_json(msg="failed to remove %s: %s" % (package, err)) + + remove_c += 1 + + if remove_c > 0: + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, pkgspec): + + packages = "" + for package in pkgspec: + if not query_package_provides(module, package): + packages += "'%s' " % package + + if len(packages) != 0: + + rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages)) + + installed = True + for packages in pkgspec: + if not query_package_provides(module, package): + installed = False + + # apt-rpm always have 0 for exit code if --force is used + if rc or not installed: + module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) + else: + module.exit_json(changed=True, msg="%s present(s)" % packages) + else: + module.exit_json(changed=False) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='installed', choices=['absent', 'installed', 'present', 'removed']), + update_cache=dict(type='bool', default=False, aliases=['update-cache']), + package=dict(type='str', required=True, aliases=['name', 'pkg']), + ), + ) + + if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): + module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") + + p = module.params + + if p['update_cache']: + update_package_db(module) + + packages = p['package'].split(',') + + if p['state'] in ['installed', 'present']: + install_packages(module, packages) + + elif p['state'] in ['absent', 'removed']: + remove_packages(module, packages) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/flatpak.py b/plugins/modules/packaging/os/flatpak.py new file mode 100644 index 0000000000..46c881d904 --- /dev/null +++ b/plugins/modules/packaging/os/flatpak.py @@ -0,0 +1,299 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright: (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +# ATTENTION CONTRIBUTORS! +# +# TL;DR: Run this module's integration tests manually before opening a pull request +# +# Long explanation: +# The integration tests for this module are currently NOT run on the Ansible project's continuous +# delivery pipeline. So please: When you make changes to this module, make sure that you run the +# included integration tests manually for both Python 2 and Python 3: +# +# Python 2: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak +# Python 3: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak +# +# Because of external dependencies, the current integration tests are somewhat too slow and brittle +# to be included right now. I have plans to rewrite the integration tests based on a local flatpak +# repository so that they can be included into the normal CI pipeline. +# //oolongbrothers + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: flatpak +short_description: Manage flatpaks +description: +- Allows users to add or remove flatpaks. +- See the M(flatpak_remote) module for managing flatpak remotes. +author: +- John Kwiatkoski (@JayKayy) +- Alexander Bethke (@oolongbrothers) +requirements: +- flatpak +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + default: flatpak + method: + description: + - The installation method to use. + - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) + or only for the current C(user). + choices: [ system, user ] + default: system + name: + description: + - The name of the flatpak to manage. + - When used with I(state=present), I(name) can be specified as an C(http(s)) URL to a + C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. + - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote + to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). + - When used with I(state=absent), it is recommended to specify the name in the reverse DNS + format. + - When supplying an C(http(s)) URL with I(state=absent), the module will try to match the + installed flatpak based on the name of the flatpakref to remove it. However, there is no + guarantee that the names of the flatpakref file and the reverse DNS name of the installed + flatpak do match. + required: true + remote: + description: + - The flatpak remote (repository) to install the flatpak from. + - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before + you can use this. + - See the M(flatpak_remote) module for managing flatpak remotes. + default: flathub + state: + description: + - Indicates the desired package state. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = r''' +- name: Install the spotify flatpak + flatpak: + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + state: present + +- name: Install the gedit flatpak package + flatpak: + name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref + state: present + +- name: Install the gedit package from flathub for current user + flatpak: + name: org.gnome.gedit + state: present + method: user + +- name: Install the Gnome Calendar flatpak from the gnome remote system-wide + flatpak: + name: org.gnome.Calendar + state: present + remote: gnome + +- name: Remove the gedit flatpak + flatpak: + name: org.gnome.gedit + state: absent +''' + +RETURN = r''' +command: + description: The exact flatpak command that was executed + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak install --user -y flathub org.gnome.Calculator" +msg: + description: Module error message + returned: failure + type: str + sample: "Executable '/usr/local/bin/flatpak' was not found on the system." +rc: + description: Return code from flatpak binary + returned: When a flatpak command has been executed + type: int + sample: 0 +stderr: + description: Error output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE" +stdout: + description: Output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n" +''' + +import subprocess +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" + + +def install_flat(module, binary, remote, name, method): + """Add a new flatpak.""" + global result + if name.startswith('http://') or name.startswith('https://'): + command = "{0} install --{1} -y {2}".format(binary, method, name) + else: + command = "{0} install --{1} -y {2} {3}".format(binary, method, remote, name) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def uninstall_flat(module, binary, name, method): + """Remove an existing flatpak.""" + global result + installed_flat_name = _match_installed_flat_name(module, binary, name, method) + command = "{0} uninstall -y --{1} {2}".format(binary, method, installed_flat_name) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def flatpak_exists(module, binary, name, method): + """Check if the flatpak is installed.""" + command = "{0} list --{1} --app".format(binary, method) + output = _flatpak_command(module, False, command) + name = _parse_flatpak_name(name).lower() + if name in output.lower(): + return True + return False + + +def _match_installed_flat_name(module, binary, name, method): + # This is a difficult function, since if the user supplies a flatpakref url, + # we have to rely on a naming convention: + # The flatpakref file name needs to match the flatpak name + global result + parsed_name = _parse_flatpak_name(name) + # Try running flatpak list with columns feature + command = "{0} list --{1} --app --columns=application".format(binary, method) + _flatpak_command(module, False, command, ignore_failure=True) + if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']: + # Probably flatpak before 1.2 + matched_flatpak_name = \ + _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) + else: + # Probably flatpak >= 1.2 + matched_flatpak_name = \ + _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) + + if matched_flatpak_name: + return matched_flatpak_name + else: + result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\ + "the name `{0}`. ".format(_parse_flatpak_name(name)) +\ + "If you used a URL, try using the reverse DNS name of the flatpak" + module.fail_json(**result) + + +def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method): + global result + command = "{0} list --{1} --app --columns=application".format(binary, method) + output = _flatpak_command(module, False, command) + for row in output.split('\n'): + if parsed_name.lower() == row.lower(): + return row + + +def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method): + global result + command = "{0} list --{1} --app".format(binary, method) + output = _flatpak_command(module, False, command) + for row in output.split('\n'): + if parsed_name.lower() in row.lower(): + return row.split()[0] + + +def _parse_flatpak_name(name): + if name.startswith('http://') or name.startswith('https://'): + file_name = urlparse(name).path.split('/')[-1] + file_name_without_extension = file_name.split('.')[0:-1] + common_name = ".".join(file_name_without_extension) + else: + common_name = name + return common_name + + +def _flatpak_command(module, noop, command, ignore_failure=False): + global result + if noop: + result['rc'] = 0 + result['command'] = command + return "" + + process = subprocess.Popen( + command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout_data, stderr_data = process.communicate() + result['rc'] = process.returncode + result['command'] = command + result['stdout'] = to_native(stdout_data) + result['stderr'] = to_native(stderr_data) + if result['rc'] != 0 and not ignore_failure: + module.fail_json(msg="Failed to execute flatpak command", **result) + return to_native(stdout_data) + + +def main(): + # This module supports check mode + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + remote=dict(type='str', default='flathub'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default='present', + choices=['absent', 'present']), + executable=dict(type='path', default='flatpak') + ), + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + remote = module.params['remote'] + method = module.params['method'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + if state == 'present' and not flatpak_exists(module, binary, name, method): + install_flat(module, binary, remote, name, method) + elif state == 'absent' and flatpak_exists(module, binary, name, method): + uninstall_flat(module, binary, name, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/flatpak_remote.py b/plugins/modules/packaging/os/flatpak_remote.py new file mode 100644 index 0000000000..a85286cc44 --- /dev/null +++ b/plugins/modules/packaging/os/flatpak_remote.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright: (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +# ATTENTION CONTRIBUTORS! +# +# TL;DR: Run this module's integration tests manually before opening a pull request +# +# Long explanation: +# The integration tests for this module are currently NOT run on the Ansible project's continuous +# delivery pipeline. So please: When you make changes to this module, make sure that you run the +# included integration tests manually for both Python 2 and Python 3: +# +# Python 2: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote +# Python 3: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote +# +# Because of external dependencies, the current integration tests are somewhat too slow and brittle +# to be included right now. I have plans to rewrite the integration tests based on a local flatpak +# repository so that they can be included into the normal CI pipeline. +# //oolongbrothers + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: flatpak_remote +short_description: Manage flatpak repository remotes +description: +- Allows users to add or remove flatpak remotes. +- The flatpak remotes concept is comparable to what is called repositories in other packaging + formats. +- Currently, remote addition is only supported via I(flatpakrepo) file URLs. +- Existing remotes will not be updated. +- See the M(flatpak) module for managing flatpaks. +author: +- John Kwiatkoski (@JayKayy) +- Alexander Bethke (@oolongbrothers) +requirements: +- flatpak +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + default: flatpak + flatpakrepo_url: + description: + - The URL to the I(flatpakrepo) file representing the repository remote to add. + - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) + is added using the specified installation C(method). + - When used with I(state=absent), this is not required. + - Required when I(state=present). + method: + description: + - The installation method to use. + - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) + or only for the current C(user). + choices: [ system, user ] + default: system + name: + description: + - The desired name for the flatpak remote to be registered under on the managed host. + - When used with I(state=present), the remote will be added to the managed host under + the specified I(name). + - When used with I(state=absent) the remote with that name will be removed. + required: true + state: + description: + - Indicates the desired package state. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = r''' +- name: Add the Gnome flatpak remote to the system installation + flatpak_remote: + name: gnome + state: present + flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo + +- name: Add the flathub flatpak repository remote to the user installation + flatpak_remote: + name: flathub + state: present + flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + method: user + +- name: Remove the Gnome flatpak remote from the user installation + flatpak_remote: + name: gnome + state: absent + method: user + +- name: Remove the flathub remote from the system installation + flatpak_remote: + name: flathub + state: absent +''' + +RETURN = r''' +command: + description: The exact flatpak command that was executed + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" +msg: + description: Module error message + returned: failure + type: str + sample: "Executable '/usr/local/bin/flatpak' was not found on the system." +rc: + description: Return code from flatpak binary + returned: When a flatpak command has been executed + type: int + sample: 0 +stderr: + description: Error output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" +stdout: + description: Output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" +''' + +import subprocess +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + + +def add_remote(module, binary, name, flatpakrepo_url, method): + """Add a new remote.""" + global result + command = "{0} remote-add --{1} {2} {3}".format( + binary, method, name, flatpakrepo_url) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remove_remote(module, binary, name, method): + """Remove an existing remote.""" + global result + command = "{0} remote-delete --{1} --force {2} ".format( + binary, method, name) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_exists(module, binary, name, method): + """Check if the remote exists.""" + command = "{0} remote-list -d --{1}".format(binary, method) + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return True + return False + + +def _flatpak_command(module, noop, command): + global result + if noop: + result['rc'] = 0 + result['command'] = command + return "" + + process = subprocess.Popen( + command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout_data, stderr_data = process.communicate() + result['rc'] = process.returncode + result['command'] = command + result['stdout'] = stdout_data + result['stderr'] = stderr_data + if result['rc'] != 0: + module.fail_json(msg="Failed to execute flatpak command", **result) + return to_native(stdout_data) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + flatpakrepo_url=dict(type='str'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default="present", + choices=['absent', 'present']), + executable=dict(type='str', default="flatpak") + ), + # This module supports check mode + supports_check_mode=True, + ) + + name = module.params['name'] + flatpakrepo_url = module.params['flatpakrepo_url'] + method = module.params['method'] + state = module.params['state'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + if flatpakrepo_url is None: + flatpakrepo_url = '' + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + remote_already_exists = remote_exists(module, binary, to_bytes(name), method) + + if state == 'present' and not remote_already_exists: + add_remote(module, binary, name, flatpakrepo_url, method) + elif state == 'absent' and remote_already_exists: + remove_remote(module, binary, name, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/homebrew.py b/plugins/modules/packaging/os/homebrew.py new file mode 100644 index 0000000000..bc09612e10 --- /dev/null +++ b/plugins/modules/packaging/os/homebrew.py @@ -0,0 +1,904 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Andrew Dunham +# (c) 2013, Daniel Jaouen +# (c) 2015, Indrajit Raychaudhuri +# +# Based on macports (Jimmy Tang ) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: homebrew +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" +requirements: + - "python >= 2.6" + - homebrew must already be installed on the target system +short_description: Package manager for Homebrew +description: + - Manages Homebrew packages +options: + name: + description: + - list of names of packages to install/remove + aliases: ['pkg', 'package', 'formula'] + type: list + elements: str + path: + description: + - "A ':' separated list of paths to search for 'brew' executable. + Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, + providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." + default: '/usr/local/bin' + state: + description: + - state of the package + choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ] + default: present + update_homebrew: + description: + - update homebrew itself first + type: bool + default: 'no' + aliases: ['update-brew'] + upgrade_all: + description: + - upgrade all homebrew packages + type: bool + default: 'no' + aliases: ['upgrade'] + install_options: + description: + - options flags to install a package + aliases: ['options'] +notes: + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +''' +EXAMPLES = ''' +# Install formula foo with 'brew' in default path (C(/usr/local/bin)) +- homebrew: + name: foo + state: present + +# Install formula foo with 'brew' in alternate path C(/my/other/location/bin) +- homebrew: + name: foo + path: /my/other/location/bin + state: present + +# Update homebrew first and install formula foo with 'brew' in default path +- homebrew: + name: foo + state: present + update_homebrew: yes + +# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path +- homebrew: + name: foo + state: latest + update_homebrew: yes + +# Update homebrew and upgrade all packages +- homebrew: + update_homebrew: yes + upgrade_all: yes + +# Miscellaneous other examples +- homebrew: + name: foo + state: head + +- homebrew: + name: foo + state: linked + +- homebrew: + name: foo + state: absent + +- homebrew: + name: foo,bar + state: absent + +- homebrew: + name: foo + state: present + install_options: with-baz,enable-debug +''' + +import os.path +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems, string_types + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class Homebrew(object): + '''A class to manage Homebrew packages.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + . # dots + - # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + . # dots + - # dashes + '''.format(sep=os.path.sep) + + VALID_PACKAGE_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \+ # plusses + - # dashes + : # colons (for URLs) + @ # at-sign + ''' + + INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, string_types): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True + + return ( + isinstance(brew_path, string_types) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) + + @classmethod + def valid_package(cls, package): + '''A valid package is either None or alphanumeric.''' + + if package is None: + return True + + return ( + isinstance(package, string_types) + and not cls.INVALID_PACKAGE_REGEX.search(package) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - None + - installed + - upgraded + - head + - linked + - unlinked + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, string_types) + and state.lower() in ( + 'installed', + 'upgraded', + 'head', + 'linked', + 'unlinked', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewException(self.message) + + else: + if isinstance(path, string_types): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_package(self): + return self._current_package + + @current_package.setter + def current_package(self, package): + if not self.valid_package(package): + self._current_package = None + self.failed = True + self.message = 'Invalid package: {0}.'.format(package) + raise HomebrewException(self.message) + + else: + self._current_package = package + return package + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path, packages=None, state=None, + update_homebrew=False, upgrade_all=False, + install_options=None): + if not install_options: + install_options = list() + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, + install_options=install_options, ) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in iteritems(kwargs): + setattr(self, key, val) + + def _prep(self): + self._prep_brew_path() + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_package_is_installed(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "info", + self.current_package, + ] + rc, out, err = self.module.run_command(cmd) + for line in out.split('\n'): + if ( + re.search(r'Built from source', line) + or re.search(r'Poured from bottle', line) + ): + return True + + return False + + def _current_package_is_outdated(self): + if not self.valid_package(self.current_package): + return False + + rc, out, err = self.module.run_command([ + self.brew_path, + 'outdated', + self.current_package, + ]) + + return rc != 0 + + def _current_package_is_installed_from_head(self): + if not Homebrew.valid_package(self.current_package): + return False + elif not self._current_package_is_installed(): + return False + + rc, out, err = self.module.run_command([ + self.brew_path, + 'info', + self.current_package, + ]) + + try: + version_info = [line for line in out.split('\n') if line][0] + except IndexError: + return False + + return version_info.split(' ')[-1] == 'HEAD' + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.update_homebrew: + self._update_homebrew() + + if self.upgrade_all: + self._upgrade_all() + + if self.packages: + if self.state == 'installed': + return self._install_packages() + elif self.state == 'upgraded': + return self._upgrade_packages() + elif self.state == 'head': + return self._install_packages() + elif self.state == 'linked': + return self._link_packages() + elif self.state == 'unlinked': + return self._unlink_packages() + elif self.state == 'absent': + return self._uninstall_packages() + + # updated -------------------------------- {{{ + def _update_homebrew(self): + if self.module.check_mode: + self.changed = True + self.message = 'Homebrew would be updated.' + raise HomebrewException(self.message) + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, string_types): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /updated ------------------------------- }}} + + # _upgrade_all --------------------------- {{{ + def _upgrade_all(self): + if self.module.check_mode: + self.changed = True + self.message = 'Homebrew packages would be upgraded.' + raise HomebrewException(self.message) + rc, out, err = self.module.run_command([ + self.brew_path, + 'upgrade', + ]) + if rc == 0: + if not out: + self.message = 'Homebrew packages already upgraded.' + + else: + self.changed = True + self.message = 'Homebrew upgraded.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /_upgrade_all -------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self._current_package_is_installed(): + self.unchanged_count += 1 + self.message = 'Package already installed: {0}'.format( + self.current_package, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be installed: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + if self.state == 'head': + head = '--HEAD' + else: + head = None + + opts = ( + [self.brew_path, 'install'] + + self.install_options + + [self.current_package, head] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if self._current_package_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Package installed: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _install_packages(self): + for package in self.packages: + self.current_package = package + self._install_current_package() + + return True + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_current_package(self): + command = 'upgrade' + + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + command = 'install' + + if self._current_package_is_installed() and not self._current_package_is_outdated(): + self.message = 'Package is already upgraded: {0}'.format( + self.current_package, + ) + self.unchanged_count += 1 + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be upgraded: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, command] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if self._current_package_is_installed() and not self._current_package_is_outdated(): + self.changed_count += 1 + self.changed = True + self.message = 'Package upgraded: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_all_packages(self): + opts = ( + [self.brew_path, 'upgrade'] + + self.install_options + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed = True + self.message = 'All packages upgraded.' + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_packages(self): + if not self.packages: + self._upgrade_all_packages() + else: + for package in self.packages: + self.current_package = package + self._upgrade_current_package() + return True + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.unchanged_count += 1 + self.message = 'Package already uninstalled: {0}'.format( + self.current_package, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be uninstalled: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'uninstall', '--force'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if not self._current_package_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Package uninstalled: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _uninstall_packages(self): + for package in self.packages: + self.current_package = package + self._uninstall_current_package() + + return True + # /uninstalled ----------------------------- }}} + + # linked --------------------------------- {{{ + def _link_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be linked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'link'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_count += 1 + self.changed = True + self.message = 'Package linked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be linked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + def _link_packages(self): + for package in self.packages: + self.current_package = package + self._link_current_package() + + return True + # /linked -------------------------------- }}} + + # unlinked ------------------------------- {{{ + def _unlink_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be unlinked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'unlink'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_count += 1 + self.changed = True + self.message = 'Package unlinked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + def _unlink_packages(self): + for package in self.packages: + self.current_package = package + self._unlink_current_package() + + return True + # /unlinked ------------------------------ }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["pkg", "package", "formula"], + required=False, + type='list', + elements='str', + ), + path=dict( + default="/usr/local/bin", + required=False, + type='path', + ), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", "head", + "linked", "unlinked", + "absent", "removed", "uninstalled", + ], + ), + update_homebrew=dict( + default=False, + aliases=["update-brew"], + type='bool', + ), + upgrade_all=dict( + default=False, + aliases=["upgrade"], + type='bool', + ), + install_options=dict( + default=None, + aliases=['options'], + type='list', + ) + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p['name']: + packages = p['name'] + else: + packages = None + + path = p['path'] + if path: + path = path.split(':') + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('head', ): + state = 'head' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state == 'linked': + state = 'linked' + if state == 'unlinked': + state = 'unlinked' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + update_homebrew = p['update_homebrew'] + upgrade_all = p['upgrade_all'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + + brew = Homebrew(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, install_options=install_options) + (failed, changed, message) = brew.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/homebrew_cask.py b/plugins/modules/packaging/os/homebrew_cask.py new file mode 100644 index 0000000000..19687da12f --- /dev/null +++ b/plugins/modules/packaging/os/homebrew_cask.py @@ -0,0 +1,846 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Daniel Jaouen +# Copyright: (c) 2016, Indrajit Raychaudhuri +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: homebrew_cask +author: +- "Indrajit Raychaudhuri (@indrajitr)" +- "Daniel Jaouen (@danieljaouen)" +- "Enric Lluelles (@enriclluelles)" +requirements: +- "python >= 2.6" +short_description: Install and uninstall homebrew casks. +description: +- Manages Homebrew casks. +options: + name: + description: + - Name of cask to install or remove. + required: true + aliases: ['pkg', 'package', 'cask'] + type: list + path: + description: + - "':' separated list of paths to search for 'brew' executable." + default: '/usr/local/bin' + type: path + state: + description: + - State of the cask. + choices: [ 'present', 'absent', 'upgraded' ] + default: present + type: str + sudo_password: + description: + - The sudo password to be passed to SUDO_ASKPASS. + required: false + type: str + update_homebrew: + description: + - Update homebrew itself first. + - Note that C(brew cask update) is a synonym for C(brew update). + type: bool + default: 'no' + aliases: ['update-brew'] + install_options: + description: + - Options flags to install a package. + aliases: ['options'] + type: list + accept_external_apps: + description: + - Allow external apps. + type: bool + default: 'no' + upgrade_all: + description: + - Upgrade all casks. + - Mutually exclusive with C(upgraded) state. + type: bool + default: 'no' + aliases: ['upgrade'] + greedy: + description: + - Upgrade casks that auto update. + - Passes --greedy to brew cask outdated when checking + if an installed cask has a newer version available. + type: bool + default: 'no' +''' +EXAMPLES = ''' +- name: Install cask + homebrew_cask: + name: alfred + state: present + +- name: Remove cask + homebrew_cask: + name: alfred + state: absent + +- name: Install cask with install options + homebrew_cask: + name: alfred + state: present + install_options: 'appdir=/Applications' + +- name: Install cask with install options + homebrew_cask: + name: alfred + state: present + install_options: 'debug,appdir=/Applications' + +- name: Allow external app + homebrew_cask: + name: alfred + state: present + accept_external_apps: True + +- name: Remove cask with force option + homebrew_cask: + name: alfred + state: absent + install_options: force + +- name: Upgrade all casks + homebrew_cask: + upgrade_all: true + +- name: Upgrade given cask with force option + homebrew_cask: + name: alfred + state: upgraded + install_options: force + +- name: Upgrade cask with greedy option + homebrew_cask: + name: 1password + state: upgraded + greedy: True + +- name: Using sudo password for installing cask + homebrew_cask: + name: wireshark + state: present + sudo_password: "{{ ansible_become_pass }}" +''' + +import os +import re +import tempfile + +from ansible.module_utils._text import to_bytes +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems, string_types + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewCaskException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class HomebrewCask(object): + '''A class to manage Homebrew casks.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + . # dots + - # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + . # dots + - # dashes + '''.format(sep=os.path.sep) + + VALID_CASK_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + - # dashes + ''' + + INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) + INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, (string_types)): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True + + return ( + isinstance(brew_path, string_types) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) + + @classmethod + def valid_cask(cls, cask): + '''A valid cask is either None or alphanumeric + backslashes.''' + + if cask is None: + return True + + return ( + isinstance(cask, string_types) + and not cls.INVALID_CASK_REGEX.search(cask) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - installed + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, string_types) + and state.lower() in ( + 'installed', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewCaskException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewCaskException(self.message) + + else: + if isinstance(path, string_types): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewCaskException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_cask(self): + return self._current_cask + + @current_cask.setter + def current_cask(self, cask): + if not self.valid_cask(cask): + self._current_cask = None + self.failed = True + self.message = 'Invalid cask: {0}.'.format(cask) + raise HomebrewCaskException(self.message) + + else: + self._current_cask = cask + return cask + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path=path, casks=None, state=None, + sudo_password=None, update_homebrew=False, + install_options=None, accept_external_apps=False, + upgrade_all=False, greedy=False): + if not install_options: + install_options = list() + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, casks=casks, + state=state, sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, ) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in iteritems(kwargs): + setattr(self, key, val) + + def _prep(self): + self._prep_brew_path() + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewCaskException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewCaskException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewCaskException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_cask_is_outdated(self): + if not self.valid_cask(self.current_cask): + return False + + cask_is_outdated_command = ( + [ + self.brew_path, + 'cask', + 'outdated', + ] + + (['--greedy'] if self.greedy else []) + + [self.current_cask] + ) + + rc, out, err = self.module.run_command(cask_is_outdated_command) + + return out != "" + + def _current_cask_is_installed(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "cask", + "list", + self.current_cask + ] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.upgrade_all: + return self._upgrade_all() + + if self.casks: + if self.state == 'installed': + return self._install_casks() + elif self.state == 'upgraded': + return self._upgrade_casks() + elif self.state == 'absent': + return self._uninstall_casks() + + self.failed = True + self.message = "You must select a cask to install." + raise HomebrewCaskException(self.message) + + # sudo_password fix ---------------------- {{{ + def _run_command_with_sudo_password(self, cmd): + rc, out, err = '', '', '' + + with tempfile.NamedTemporaryFile() as sudo_askpass_file: + sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password)) + os.chmod(sudo_askpass_file.name, 0o700) + sudo_askpass_file.file.close() + + rc, out, err = self.module.run_command( + cmd, + environ_update={'SUDO_ASKPASS': sudo_askpass_file.name} + ) + + self.module.add_cleanup_file(sudo_askpass_file.name) + + return (rc, out, err) + # /sudo_password fix --------------------- }}} + + # updated -------------------------------- {{{ + def _update_homebrew(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, string_types): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /updated ------------------------------- }}} + + # _upgrade_all --------------------------- {{{ + def _upgrade_all(self): + if self.module.check_mode: + self.changed = True + self.message = 'Casks would be upgraded.' + raise HomebrewCaskException(self.message) + + opts = ( + [self.brew_path, 'cask', 'upgrade'] + ) + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): + self.message = 'Homebrew casks already upgraded.' + + else: + self.changed = True + self.message = 'Homebrew casks upgraded.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /_upgrade_all -------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be installed: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + opts = ( + [self.brew_path, 'cask', 'install', self.current_cask] + + self.install_options + ) + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask installed: {0}'.format(self.current_cask) + return True + elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _install_casks(self): + for cask in self.casks: + self.current_cask = cask + self._install_current_cask() + + return True + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_current_cask(self): + command = 'upgrade' + + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + command = 'install' + + if self._current_cask_is_installed() and not self._current_cask_is_outdated(): + self.message = 'Cask is already upgraded: {0}'.format( + self.current_cask, + ) + self.unchanged_count += 1 + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be upgraded: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + opts = ( + [self.brew_path, 'cask', command] + + self.install_options + + [self.current_cask] + ) + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if self._current_cask_is_installed() and not self._current_cask_is_outdated(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask upgraded: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _upgrade_casks(self): + for cask in self.casks: + self.current_cask = cask + self._upgrade_current_cask() + + return True + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already uninstalled: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be uninstalled: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + opts = ( + [self.brew_path, 'cask', 'uninstall', self.current_cask] + + self.install_options + ) + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if not self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask uninstalled: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _uninstall_casks(self): + for cask in self.casks: + self.current_cask = cask + self._uninstall_current_cask() + + return True + # /uninstalled --------------------------- }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["pkg", "package", "cask"], + required=False, + type='list', + ), + path=dict( + default="/usr/local/bin", + required=False, + type='path', + ), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", + "absent", "removed", "uninstalled", + ], + ), + sudo_password=dict( + type="str", + required=False, + no_log=True, + ), + update_homebrew=dict( + default=False, + aliases=["update-brew"], + type='bool', + ), + install_options=dict( + default=None, + aliases=['options'], + type='list', + ), + accept_external_apps=dict( + default=False, + type='bool', + ), + upgrade_all=dict( + default=False, + aliases=["upgrade"], + type='bool', + ), + greedy=dict( + default=False, + type='bool', + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p['name']: + casks = p['name'] + else: + casks = None + + path = p['path'] + if path: + path = path.split(':') + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + sudo_password = p['sudo_password'] + + update_homebrew = p['update_homebrew'] + upgrade_all = p['upgrade_all'] + greedy = p['greedy'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + + accept_external_apps = p['accept_external_apps'] + + brew_cask = HomebrewCask(module=module, path=path, casks=casks, + state=state, sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, + ) + (failed, changed, message) = brew_cask.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/homebrew_tap.py b/plugins/modules/packaging/os/homebrew_tap.py new file mode 100644 index 0000000000..415541c28f --- /dev/null +++ b/plugins/modules/packaging/os/homebrew_tap.py @@ -0,0 +1,253 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Daniel Jaouen +# (c) 2016, Indrajit Raychaudhuri +# +# Based on homebrew (Andrew Dunham ) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: homebrew_tap +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" +short_description: Tap a Homebrew repository. +description: + - Tap external Homebrew repositories. +options: + name: + description: + - The GitHub user/organization repository to tap. + required: true + aliases: ['tap'] + url: + description: + - The optional git URL of the repository to tap. The URL is not + assumed to be on GitHub, and the protocol doesn't have to be HTTP. + Any location and protocol that git can handle is fine. + - I(name) option may not be a list of multiple taps (but a single + tap instead) when this option is provided. + required: false + state: + description: + - state of the repository. + choices: [ 'present', 'absent' ] + required: false + default: 'present' +requirements: [ homebrew ] +''' + +EXAMPLES = ''' +- homebrew_tap: + name: homebrew/dupes + +- homebrew_tap: + name: homebrew/dupes + state: absent + +- homebrew_tap: + name: homebrew/dupes,homebrew/science + state: present + +- homebrew_tap: + name: telemachus/brew + url: 'https://bitbucket.org/telemachus/brew' +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def a_valid_tap(tap): + '''Returns True if the tap is valid.''' + regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') + return regex.match(tap) + + +def already_tapped(module, brew_path, tap): + '''Returns True if already tapped.''' + + rc, out, err = module.run_command([ + brew_path, + 'tap', + ]) + + taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] + tap_name = re.sub('homebrew-', '', tap.lower()) + + return tap_name in taps + + +def add_tap(module, brew_path, tap, url=None): + '''Adds a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif not already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'tap', + tap, + url, + ]) + if rc == 0: + changed = True + msg = 'successfully tapped: %s' % tap + else: + failed = True + msg = 'failed to tap: %s' % tap + + else: + msg = 'already tapped: %s' % tap + + return (failed, changed, msg) + + +def add_taps(module, brew_path, taps): + '''Adds one or more taps.''' + failed, unchanged, added, msg = False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = add_tap(module, brew_path, tap) + if failed: + break + if changed: + added += 1 + else: + unchanged += 1 + + if failed: + msg = 'added: %d, unchanged: %d, error: ' + msg + msg = msg % (added, unchanged) + elif added: + changed = True + msg = 'added: %d, unchanged: %d' % (added, unchanged) + else: + msg = 'added: %d, unchanged: %d' % (added, unchanged) + + return (failed, changed, msg) + + +def remove_tap(module, brew_path, tap): + '''Removes a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'untap', + tap, + ]) + if not already_tapped(module, brew_path, tap): + changed = True + msg = 'successfully untapped: %s' % tap + else: + failed = True + msg = 'failed to untap: %s' % tap + + else: + msg = 'already untapped: %s' % tap + + return (failed, changed, msg) + + +def remove_taps(module, brew_path, taps): + '''Removes one or more taps.''' + failed, unchanged, removed, msg = False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = remove_tap(module, brew_path, tap) + if failed: + break + if changed: + removed += 1 + else: + unchanged += 1 + + if failed: + msg = 'removed: %d, unchanged: %d, error: ' + msg + msg = msg % (removed, unchanged) + elif removed: + changed = True + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + else: + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + + return (failed, changed, msg) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['tap'], type='list', required=True), + url=dict(default=None, required=False), + state=dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True, + ) + + brew_path = module.get_bin_path( + 'brew', + required=True, + opt_dirs=['/usr/local/bin'] + ) + + taps = module.params['name'] + url = module.params['url'] + + if module.params['state'] == 'present': + if url is None: + # No tap URL provided explicitly, continue with bulk addition + # of all the taps. + failed, changed, msg = add_taps(module, brew_path, taps) + else: + # When an tap URL is provided explicitly, we allow adding + # *single* tap only. Validate and proceed to add single tap. + if len(taps) > 1: + msg = "List of multiple taps may not be provided with 'url' option." + module.fail_json(msg=msg) + else: + failed, changed, msg = add_tap(module, brew_path, taps[0], url) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + elif module.params['state'] == 'absent': + failed, changed, msg = remove_taps(module, brew_path, taps) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/installp.py b/plugins/modules/packaging/os/installp.py new file mode 100644 index 0000000000..af1d09ac57 --- /dev/null +++ b/plugins/modules/packaging/os/installp.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: installp +author: +- Kairo Araujo (@kairoaraujo) +short_description: Manage packages on AIX +description: + - Manage packages using 'installp' on AIX +options: + accept_license: + description: + - Whether to accept the license for the package(s). + type: bool + default: no + name: + description: + - One or more packages to install or remove. + - Use C(all) to install all packages available on informed C(repository_path). + type: list + required: true + aliases: [ pkg ] + repository_path: + description: + - Path with AIX packages (required to install). + type: path + state: + description: + - Whether the package needs to be present on or absent from the system. + type: str + choices: [ absent, present ] + default: present +notes: +- If the package is already installed, even the package/fileset is new, the module will not install it. +''' + +EXAMPLES = r''' +- name: Install package foo + installp: + name: foo + repository_path: /repository/AIX71/installp/base + accept_license: yes + state: present + +- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot + installp: + name: bos.sysmgt + repository_path: /repository/AIX71/installp/base + accept_license: yes + state: present + +- name: Install bos.sysmgt.nim.master only + installp: + name: bos.sysmgt.nim.master + repository_path: /repository/AIX71/installp/base + accept_license: yes + state: present + +- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot + installp: + name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot + repository_path: /repository/AIX71/installp/base + accept_license: yes + state: present + +- name: Remove packages bos.sysmgt.nim.master + installp: + name: bos.sysmgt.nim.master + state: absent +''' + +RETURN = r''' # ''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +def _check_new_pkg(module, package, repository_path): + """ + Check if the package of fileset is correct name and repository path. + + :param module: Ansible module arguments spec. + :param package: Package/fileset name. + :param repository_path: Repository package path. + :return: Bool, package information. + """ + + if os.path.isdir(repository_path): + installp_cmd = module.get_bin_path('installp', True) + rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path)) + if rc != 0: + module.fail_json(msg="Failed to run installp.", rc=rc, err=err) + + if package == 'all': + pkg_info = "All packages on dir" + return True, pkg_info + + else: + pkg_info = {} + for line in package_result.splitlines(): + if re.findall(package, line): + pkg_name = line.split()[0].strip() + pkg_version = line.split()[1].strip() + pkg_info[pkg_name] = pkg_version + + return True, pkg_info + + return False, None + + else: + module.fail_json(msg="Repository path %s is not valid." % repository_path) + + +def _check_installed_pkg(module, package, repository_path): + """ + Check the package on AIX. + It verifies if the package is installed and informations + + :param module: Ansible module parameters spec. + :param package: Package/fileset name. + :param repository_path: Repository package path. + :return: Bool, package data. + """ + + lslpp_cmd = module.get_bin_path('lslpp', True) + rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package)) + + if rc == 1: + package_state = ' '.join(err.split()[-2:]) + if package_state == 'not installed.': + return False, None + else: + module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) + + if rc != 0: + module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) + + pkg_data = {} + full_pkg_data = lslpp_result.splitlines() + for line in full_pkg_data: + pkg_name, fileset, level = line.split(':')[0:3] + pkg_data[pkg_name] = fileset, level + + return True, pkg_data + + +def remove(module, installp_cmd, packages): + repository_path = None + remove_count = 0 + removed_pkgs = [] + not_found_pkg = [] + for package in packages: + pkg_check, dummy = _check_installed_pkg(module, package, repository_path) + + if pkg_check: + if not module.check_mode: + rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package)) + if rc != 0: + module.fail_json(msg="Failed to run installp.", rc=rc, err=err) + remove_count += 1 + removed_pkgs.append(package) + + else: + not_found_pkg.append(package) + + if remove_count > 0: + if len(not_found_pkg) > 1: + not_found_pkg.insert(0, "Package(s) not found: ") + + changed = True + msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg)) + + else: + changed = False + msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg)) + + return changed, msg + + +def install(module, installp_cmd, packages, repository_path, accept_license): + installed_pkgs = [] + not_found_pkgs = [] + already_installed_pkgs = {} + + accept_license_param = { + True: '-Y', + False: '', + } + + # Validate if package exists on repository path. + for package in packages: + pkg_check, pkg_data = _check_new_pkg(module, package, repository_path) + + # If package exists on repository path, check if package is installed. + if pkg_check: + pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path) + + # If package is already installed. + if pkg_check_current: + # Check if package is a package and not a fileset, get version + # and add the package into already installed list + if package in pkg_info.keys(): + already_installed_pkgs[package] = pkg_info[package][1] + + else: + # If the package is not a package but a fileset, confirm + # and add the fileset/package into already installed list + for key in pkg_info.keys(): + if package in pkg_info[key]: + already_installed_pkgs[package] = pkg_info[key][1] + + else: + if not module.check_mode: + rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package)) + if rc != 0: + module.fail_json(msg="Failed to run installp", rc=rc, err=err) + installed_pkgs.append(package) + + else: + not_found_pkgs.append(package) + + if len(installed_pkgs) > 0: + installed_msg = (" Installed: %s." % ' '.join(installed_pkgs)) + else: + installed_msg = '' + + if len(not_found_pkgs) > 0: + not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs)) + else: + not_found_msg = '' + + if len(already_installed_pkgs) > 0: + already_installed_msg = (" Already installed: %s." % already_installed_pkgs) + else: + already_installed_msg = '' + + if len(installed_pkgs) > 0: + changed = True + msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) + else: + changed = False + msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) + + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', required=True, aliases=['pkg']), + repository_path=dict(type='path'), + accept_license=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + repository_path = module.params['repository_path'] + accept_license = module.params['accept_license'] + state = module.params['state'] + + installp_cmd = module.get_bin_path('installp', True) + + if state == 'present': + if repository_path is None: + module.fail_json(msg="repository_path is required to install package") + + changed, msg = install(module, installp_cmd, name, repository_path, accept_license) + + elif state == 'absent': + changed, msg = remove(module, installp_cmd, name) + + else: + module.fail_json(changed=False, msg="Unexpected state.") + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/layman.py b/plugins/modules/packaging/os/layman.py new file mode 100644 index 0000000000..c6bc2bc967 --- /dev/null +++ b/plugins/modules/packaging/os/layman.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jakub Jirutka +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: layman +author: "Jakub Jirutka (@jirutka)" +short_description: Manage Gentoo overlays +description: + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. + Please note that Layman must be installed on a managed node prior using this module. +requirements: + - "python >= 2.6" + - layman python module +options: + name: + description: + - The overlay id to install, synchronize, or uninstall. + Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)). + required: true + list_url: + description: + - An URL of the alternative overlays list that defines the overlay to install. + This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where + C(overlay_defs) is readed from the Layman's configuration. + state: + description: + - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. + default: present + choices: [present, absent, updated] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be + set to C(no) when no other option exists. Prior to 1.9.3 the code + defaulted to C(no). + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +# Install the overlay 'mozilla' which is on the central overlays list. +- layman: + name: mozilla + +# Install the overlay 'cvut' from the specified alternative list. +- layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' + +# Update (sync) the overlay 'cvut', or install if not installed yet. +- layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' + state: updated + +# Update (sync) all of the installed overlays. +- layman: + name: ALL + state: updated + +# Uninstall the overlay 'cvut'. +- layman: + name: cvut + state: absent +''' + +import shutil +import traceback + +from os import path + +LAYMAN_IMP_ERR = None +try: + from layman.api import LaymanAPI + from layman.config import BareConfig + HAS_LAYMAN_API = True +except ImportError: + LAYMAN_IMP_ERR = traceback.format_exc() + HAS_LAYMAN_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url + + +USERAGENT = 'ansible-httpget' + + +class ModuleError(Exception): + pass + + +def init_layman(config=None): + '''Returns the initialized ``LaymanAPI``. + + :param config: the layman's configuration to use (optional) + ''' + if config is None: + config = BareConfig(read_configfile=True, quietness=1) + return LaymanAPI(config) + + +def download_url(module, url, dest): + ''' + :param url: the URL to download + :param dest: the absolute path of where to save the downloaded content to; + it must be writable and not a directory + + :raises ModuleError + ''' + + # Hack to add params in the form that fetch_url expects + module.params['http_agent'] = USERAGENT + response, info = fetch_url(module, url) + if info['status'] != 200: + raise ModuleError("Failed to get %s: %s" % (url, info['msg'])) + + try: + with open(dest, 'w') as f: + shutil.copyfileobj(response, f) + except IOError as e: + raise ModuleError("Failed to write: %s" % str(e)) + + +def install_overlay(module, name, list_url=None): + '''Installs the overlay repository. If not on the central overlays list, + then :list_url of an alternative list must be provided. The list will be + fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the + ``overlay_defs`` is read from the Layman's configuration). + + :param name: the overlay id + :param list_url: the URL of the remote repositories list to look for the overlay + definition (optional, default: None) + + :returns: True if the overlay was installed, or False if already exists + (i.e. nothing has changed) + :raises ModuleError + ''' + # read Layman configuration + layman_conf = BareConfig(read_configfile=True) + layman = init_layman(layman_conf) + + if layman.is_installed(name): + return False + + if module.check_mode: + mymsg = 'Would add layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) + + if not layman.is_repo(name): + if not list_url: + raise ModuleError("Overlay '%s' is not on the list of known " + "overlays and URL of the remote list was not provided." % name) + + overlay_defs = layman_conf.get_option('overlay_defs') + dest = path.join(overlay_defs, name + '.xml') + + download_url(module, list_url, dest) + + # reload config + layman = init_layman() + + if not layman.add_repos(name): + raise ModuleError(layman.get_errors()) + + return True + + +def uninstall_overlay(module, name): + '''Uninstalls the given overlay repository from the system. + + :param name: the overlay id to uninstall + + :returns: True if the overlay was uninstalled, or False if doesn't exist + (i.e. nothing has changed) + :raises ModuleError + ''' + layman = init_layman() + + if not layman.is_installed(name): + return False + + if module.check_mode: + mymsg = 'Would remove layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) + + layman.delete_repos(name) + if layman.get_errors(): + raise ModuleError(layman.get_errors()) + + return True + + +def sync_overlay(name): + '''Synchronizes the specified overlay repository. + + :param name: the overlay repository id to sync + :raises ModuleError + ''' + layman = init_layman() + + if not layman.sync(name): + messages = [str(item[1]) for item in layman.sync_results[2]] + raise ModuleError(messages) + + +def sync_overlays(): + '''Synchronize all of the installed overlays. + + :raises ModuleError + ''' + layman = init_layman() + + for name in layman.get_installed(): + sync_overlay(name) + + +def main(): + # define module + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + list_url=dict(aliases=['url']), + state=dict(default="present", choices=['present', 'absent', 'updated']), + validate_certs=dict(required=False, default=True, type='bool'), + ), + supports_check_mode=True + ) + + if not HAS_LAYMAN_API: + module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR) + + state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) + + changed = False + try: + if state == 'present': + changed = install_overlay(module, name, url) + + elif state == 'updated': + if name == 'ALL': + sync_overlays() + elif install_overlay(module, name, url): + changed = True + else: + sync_overlay(name) + else: + changed = uninstall_overlay(module, name) + + except ModuleError as e: + module.fail_json(msg=e.message) + else: + module.exit_json(changed=changed, name=name) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/macports.py b/plugins/modules/packaging/os/macports.py new file mode 100644 index 0000000000..8ef7f32919 --- /dev/null +++ b/plugins/modules/packaging/os/macports.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Jimmy Tang +# Based on okpg (Patrick Pelletier ), pacman +# (Afterburn) and pkgin (Shaun Zinck) modules +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: macports +author: "Jimmy Tang (@jcftang)" +short_description: Package manager for MacPorts +description: + - Manages MacPorts packages (ports) +options: + name: + description: + - A list of port names. + aliases: ['port'] + type: list + elements: str + selfupdate: + description: + - Update Macports and the ports tree, either prior to installing ports or as a separate step. + - Equivalent to running C(port selfupdate). + aliases: ['update_cache', 'update_ports'] + default: "no" + type: bool + state: + description: + - Indicates the desired state of the port. + choices: [ 'present', 'absent', 'active', 'inactive' ] + default: present + upgrade: + description: + - Upgrade all outdated ports, either prior to installing ports or as a separate step. + - Equivalent to running C(port upgrade outdated). + default: "no" + type: bool + variant: + description: + - A port variant specification. + - 'C(variant) is only supported with state: I(installed)/I(present).' + aliases: ['variants'] +''' +EXAMPLES = ''' +- name: Install the foo port + macports: + name: foo + +- name: Install the universal, x11 variant of the foo port + macports: + name: foo + variant: +universal+x11 + +- name: Install a list of ports + macports: + name: "{{ ports }}" + vars: + ports: + - foo + - foo-tools + +- name: Update Macports and the ports tree, then upgrade all outdated ports + macports: + selfupdate: yes + upgrade: yes + +- name: Update Macports and the ports tree, then install the foo port + macports: + name: foo + selfupdate: yes + +- name: Remove the foo port + macports: + name: foo + state: absent + +- name: Activate the foo port + macports: + name: foo + state: active + +- name: Deactivate the foo port + macports: + name: foo + state: inactive +''' + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def selfupdate(module, port_path): + """ Update Macports and the ports tree. """ + + rc, out, err = module.run_command("%s -v selfupdate" % port_path) + + if rc == 0: + updated = any( + re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or + re.search(r'Installing new Macports release', s.strip()) + for s in out.split('\n') + if s + ) + if updated: + changed = True + msg = "Macports updated successfully" + else: + changed = False + msg = "Macports already up-to-date" + + return (changed, msg) + else: + module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err) + + +def upgrade(module, port_path): + """ Upgrade outdated ports. """ + + rc, out, err = module.run_command("%s upgrade outdated" % port_path) + + # rc is 1 when nothing to upgrade so check stdout first. + if out.strip() == "Nothing to upgrade.": + changed = False + msg = "Ports already upgraded" + return (changed, msg) + elif rc == 0: + changed = True + msg = "Outdated ports upgraded successfully" + return (changed, msg) + else: + module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err) + + +def query_port(module, port_path, name, state="present"): + """ Returns whether a port is installed or not. """ + + if state == "present": + + rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (shlex_quote(port_path), shlex_quote(name)), use_unsafe_shell=True) + if rc == 0: + return True + + return False + + elif state == "active": + + rc, out, err = module.run_command("%s installed %s | grep -q active" % (shlex_quote(port_path), shlex_quote(name)), use_unsafe_shell=True) + + if rc == 0: + return True + + return False + + +def remove_ports(module, port_path, ports): + """ Uninstalls one or more ports if installed. """ + + remove_c = 0 + # Using a for loop in case of error, we can report the port that failed + for port in ports: + # Query the port first, to see if we even need to remove + if not query_port(module, port_path, port): + continue + + rc, out, err = module.run_command("%s uninstall %s" % (port_path, port)) + + if query_port(module, port_path, port): + module.fail_json(msg="Failed to remove %s: %s" % (port, err)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c) + + module.exit_json(changed=False, msg="Port(s) already absent") + + +def install_ports(module, port_path, ports, variant): + """ Installs one or more ports if not already installed. """ + + install_c = 0 + + for port in ports: + if query_port(module, port_path, port): + continue + + rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant)) + + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to install %s: %s" % (port, err)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c)) + + module.exit_json(changed=False, msg="Port(s) already present") + + +def activate_ports(module, port_path, ports): + """ Activate a port if it's inactive. """ + + activate_c = 0 + + for port in ports: + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to activate %s, port(s) not present" % (port)) + + if query_port(module, port_path, port, state="active"): + continue + + rc, out, err = module.run_command("%s activate %s" % (port_path, port)) + + if not query_port(module, port_path, port, state="active"): + module.fail_json(msg="Failed to activate %s: %s" % (port, err)) + + activate_c += 1 + + if activate_c > 0: + module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c)) + + module.exit_json(changed=False, msg="Port(s) already active") + + +def deactivate_ports(module, port_path, ports): + """ Deactivate a port if it's active. """ + + deactivated_c = 0 + + for port in ports: + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port)) + + if not query_port(module, port_path, port, state="active"): + continue + + rc, out, err = module.run_command("%s deactivate %s" % (port_path, port)) + + if query_port(module, port_path, port, state="active"): + module.fail_json(msg="Failed to deactivate %s: %s" % (port, err)) + + deactivated_c += 1 + + if deactivated_c > 0: + module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c)) + + module.exit_json(changed=False, msg="Port(s) already inactive") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', aliases=["port"]), + selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'), + state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), + upgrade=dict(default=False, type='bool'), + variant=dict(aliases=["variants"], default=None, type='str') + ) + ) + + port_path = module.get_bin_path('port', True, ['/opt/local/bin']) + + p = module.params + + if p["selfupdate"]: + (changed, msg) = selfupdate(module, port_path) + if not (p["name"] or p["upgrade"]): + module.exit_json(changed=changed, msg=msg) + + if p["upgrade"]: + (changed, msg) = upgrade(module, port_path) + if not p["name"]: + module.exit_json(changed=changed, msg=msg) + + pkgs = p["name"] + + variant = p["variant"] + + if p["state"] in ["present", "installed"]: + install_ports(module, port_path, pkgs, variant) + + elif p["state"] in ["absent", "removed"]: + remove_ports(module, port_path, pkgs) + + elif p["state"] == "active": + activate_ports(module, port_path, pkgs) + + elif p["state"] == "inactive": + deactivate_ports(module, port_path, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/mas.py b/plugins/modules/packaging/os/mas.py new file mode 100644 index 0000000000..e7ca5f8eaa --- /dev/null +++ b/plugins/modules/packaging/os/mas.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Lukas Bestle +# Copyright: (c) 2017, Michael Heap +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: mas +short_description: Manage Mac App Store applications with mas-cli +description: + - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). +author: + - Michael Heap (@mheap) + - Lukas Bestle (@lukasbestle) +options: + id: + description: + - The Mac App Store identifier of the app(s) you want to manage. + - This can be found by running C(mas search APP_NAME) on your machine. + type: list + elements: int + state: + description: + - Desired state of the app installation. + - The C(absent) value requires root permissions, also see the examples. + type: str + choices: + - absent + - latest + - present + default: present + upgrade_all: + description: + - Upgrade all installed Mac App Store apps. + type: bool + default: "no" + aliases: ["upgrade"] +requirements: + - macOS 10.11+ + - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" + - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). +notes: + - This module supports C(check_mode). +''' + +EXAMPLES = ''' +- name: Install Keynote + mas: + id: 409183694 + state: present + +- name: Install a list of apps + mas: + id: + - 409183694 # Keynote + - 413857545 # Divvy + state: present + +- name: Ensure the latest Keynote version is installed + mas: + id: 409183694 + state: latest + +- name: Upgrade all installed Mac App Store apps + mas: + upgrade_all: yes + +- name: Install specific apps and also upgrade all others + mas: + id: + - 409183694 # Keynote + - 413857545 # Divvy + state: present + upgrade_all: yes + +- name: Uninstall Divvy + mas: + id: 413857545 + state: absent + become: yes # Uninstallation requires root permissions +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from distutils.version import StrictVersion +import os + + +class Mas(object): + + def __init__(self, module): + self.module = module + + # Initialize data properties + self.mas_path = self.module.get_bin_path('mas') + self._checked_signin = False + self._installed = None # Populated only if needed + self._outdated = None # Populated only if needed + self.count_install = 0 + self.count_upgrade = 0 + self.count_uninstall = 0 + self.result = { + 'changed': False + } + + self.check_mas_tool() + + def app_command(self, command, id): + ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' ''' + + if not self.module.check_mode: + if command != 'uninstall': + self.check_signin() + + rc, out, err = self.run([command, str(id)]) + if rc != 0: + self.module.fail_json( + msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip()) + ) + + # No error or dry run + self.__dict__['count_' + command] += 1 + + def check_mas_tool(self): + ''' Verifies that the `mas` tool is available in a recent version ''' + + # Is the `mas` tool available at all? + if not self.mas_path: + self.module.fail_json(msg='Required `mas` tool is not installed') + + # Is the version recent enough? + rc, out, err = self.run(['version']) + if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'): + self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip()) + + def check_signin(self): + ''' Verifies that the user is signed in to the Mac App Store ''' + + # Only check this once per execution + if self._checked_signin: + return + + rc, out, err = self.run(['account']) + if out.split("\n", 1)[0].rstrip() == 'Not signed in': + self.module.fail_json(msg='You must be signed in to the Mac App Store') + + self._checked_signin = True + + def exit(self): + ''' Exit with the data we have collected over time ''' + + msgs = [] + if self.count_install > 0: + msgs.append('Installed {0} app(s)'.format(self.count_install)) + if self.count_upgrade > 0: + msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade)) + if self.count_uninstall > 0: + msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall)) + + if msgs: + self.result['changed'] = True + self.result['msg'] = ', '.join(msgs) + + self.module.exit_json(**self.result) + + def get_current_state(self, command): + ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' ''' + + rc, raw_apps, err = self.run([command]) + rows = raw_apps.split("\n") + apps = [] + for r in rows: + # Format: "123456789 App Name" + r = r.split(' ', 1) + if len(r) == 2: + apps.append(int(r[0])) + + return apps + + def installed(self): + ''' Returns the list of installed apps ''' + + # Populate cache if not already done + if self._installed is None: + self._installed = self.get_current_state('list') + + return self._installed + + def is_installed(self, id): + ''' Checks whether the given app is installed ''' + + return int(id) in self.installed() + + def is_outdated(self, id): + ''' Checks whether the given app is installed, but outdated ''' + + return int(id) in self.outdated() + + def outdated(self): + ''' Returns the list of installed, but outdated apps ''' + + # Populate cache if not already done + if self._outdated is None: + self._outdated = self.get_current_state('outdated') + + return self._outdated + + def run(self, cmd): + ''' Runs a command of the `mas` tool ''' + + cmd.insert(0, self.mas_path) + return self.module.run_command(cmd, False) + + def upgrade_all(self): + ''' Upgrades all installed apps and sets the correct result data ''' + + outdated = self.outdated() + + if not self.module.check_mode: + self.check_signin() + + rc, out, err = self.run(['upgrade']) + if rc != 0: + self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip()) + + self.count_upgrade += len(outdated) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='list', elements='int'), + state=dict(type='str', default='present', choices=['absent', 'latest', 'present']), + upgrade_all=dict(type='bool', default=False, aliases=['upgrade']), + ), + supports_check_mode=True + ) + mas = Mas(module) + + if module.params['id']: + apps = module.params['id'] + else: + apps = [] + + state = module.params['state'] + upgrade = module.params['upgrade_all'] + + # Run operations on the given app IDs + for app in sorted(set(apps)): + if state == 'present': + if not mas.is_installed(app): + mas.app_command('install', app) + + elif state == 'absent': + if mas.is_installed(app): + # Ensure we are root + if os.getuid() != 0: + module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')") + + mas.app_command('uninstall', app) + + elif state == 'latest': + if not mas.is_installed(app): + mas.app_command('install', app) + elif mas.is_outdated(app): + mas.app_command('upgrade', app) + + # Upgrade all apps if requested + mas._outdated = None # Clear cache + if upgrade and mas.outdated(): + mas.upgrade_all() + + # Exit with the collected data + mas.exit() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/openbsd_pkg.py b/plugins/modules/packaging/os/openbsd_pkg.py new file mode 100644 index 0000000000..c87936907d --- /dev/null +++ b/plugins/modules/packaging/os/openbsd_pkg.py @@ -0,0 +1,634 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrik Lundin +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: openbsd_pkg +author: +- Patrik Lundin (@eest) +short_description: Manage packages on OpenBSD +description: + - Manage packages on OpenBSD using the pkg tools. +requirements: +- python >= 2.5 +options: + name: + description: + - A name or a list of names of the packages. + required: yes + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + choices: [ absent, latest, present ] + default: present + build: + description: + - Build the package from source instead of downloading and installing + a binary. Requires that the port source tree is already installed. + Automatically builds and installs the 'sqlports' package, if it is + not already installed. + type: bool + default: 'no' + ports_dir: + description: + - When used in combination with the C(build) option, allows overriding + the default ports source directory. + default: /usr/ports + clean: + description: + - When updating or removing packages, delete the extra configuration + file(s) in the old packages which are annotated with @extra in + the packaging-list. + type: bool + default: 'no' + quick: + description: + - Replace or delete packages quickly; do not bother with checksums + before removing normal files. + type: bool + default: 'no' +notes: + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +''' + +EXAMPLES = ''' +- name: Make sure nmap is installed + openbsd_pkg: + name: nmap + state: present + +- name: Make sure nmap is the latest version + openbsd_pkg: + name: nmap + state: latest + +- name: Make sure nmap is not installed + openbsd_pkg: + name: nmap + state: absent + +- name: Make sure nmap is installed, build it from source if it is not + openbsd_pkg: + name: nmap + state: present + build: yes + +- name: Specify a pkg flavour with '--' + openbsd_pkg: + name: vim--no_x11 + state: present + +- name: Specify the default flavour to avoid ambiguity errors + openbsd_pkg: + name: vim-- + state: present + +- name: Specify a package branch (requires at least OpenBSD 6.0) + openbsd_pkg: + name: python%3.5 + state: present + +- name: Update all packages on the system + openbsd_pkg: + name: '*' + state: latest + +- name: Purge a package and it's configuration files + openbsd_pkg: + name: mpd + clean: yes + state: absent + +- name: Quickly remove a package without checking checksums + openbsd_pkg: + name: qt5 + quick: yes + state: absent +''' + +import os +import platform +import re +import shlex +import sqlite3 + +from distutils.version import StrictVersion + +from ansible.module_utils.basic import AnsibleModule + + +# Function used for executing commands. +def execute_command(cmd, module): + # Break command line into arguments. + # This makes run_command() use shell=False which we need to not cause shell + # expansion of special characters like '*'. + cmd_args = shlex.split(cmd) + return module.run_command(cmd_args) + + +# Function used to find out if a package is currently installed. +def get_package_state(names, pkg_spec, module): + info_cmd = 'pkg_info -Iq' + + for name in names: + command = "%s inst:%s" % (info_cmd, name) + + rc, stdout, stderr = execute_command(command, module) + + if stderr: + module.fail_json(msg="failed in get_package_state(): " + stderr) + + if stdout: + # If the requested package name is just a stem, like "python", we may + # find multiple packages with that name. + pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()] + module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names']) + pkg_spec[name]['installed_state'] = True + else: + pkg_spec[name]['installed_state'] = False + + +# Function used to make sure a package is present. +def package_present(names, pkg_spec, module): + build = module.params['build'] + + for name in names: + # It is possible package_present() has been called from package_latest(). + # In that case we do not want to operate on the whole list of names, + # only the leftovers. + if pkg_spec['package_latest_leftovers']: + if name not in pkg_spec['package_latest_leftovers']: + module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name) + continue + else: + module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name) + + if module.check_mode: + install_cmd = 'pkg_add -Imn' + else: + if build is True: + port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module)) + if os.path.isdir(port_dir): + if pkg_spec[name]['flavor']: + flavors = pkg_spec[name]['flavor'].replace('-', ' ') + install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors) + elif pkg_spec[name]['subpackage']: + install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, + pkg_spec[name]['subpackage']) + else: + install_cmd = "cd %s && make install && make clean=depends" % (port_dir) + else: + module.fail_json(msg="the port source directory %s does not exist" % (port_dir)) + else: + install_cmd = 'pkg_add -Im' + + if pkg_spec[name]['installed_state'] is False: + + # Attempt to install the package + if build is True and not module.check_mode: + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True) + else: + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module) + + # The behaviour of pkg_add is a bit different depending on if a + # specific version is supplied or not. + # + # When a specific version is supplied the return code will be 0 when + # a package is found and 1 when it is not. If a version is not + # supplied the tool will exit 0 in both cases. + # + # It is important to note that "version" relates to the + # packages-specs(7) notion of a version. If using the branch syntax + # (like "python%3.5") even though a branch name may look like a + # version string it is not used an one by pkg_add. + if pkg_spec[name]['version'] or build is True: + # Depend on the return code. + module.debug("package_present(): depending on return code for name '%s'" % name) + if pkg_spec[name]['rc']: + pkg_spec[name]['changed'] = False + else: + # Depend on stderr instead. + module.debug("package_present(): depending on stderr for name '%s'" % name) + if pkg_spec[name]['stderr']: + # There is a corner case where having an empty directory in + # installpath prior to the right location will result in a + # "file:/local/package/directory/ is empty" message on stderr + # while still installing the package, so we need to look for + # for a message like "packagename-1.0: ok" just in case. + match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout']) + + if match: + # It turns out we were able to install the package. + module.debug("package_present(): we were able to install package for name '%s'" % name) + else: + # We really did fail, fake the return code. + module.debug("package_present(): we really did fail for name '%s'" % name) + pkg_spec[name]['rc'] = 1 + pkg_spec[name]['changed'] = False + else: + module.debug("package_present(): stderr was not set for name '%s'" % name) + + if pkg_spec[name]['rc'] == 0: + pkg_spec[name]['changed'] = True + + else: + pkg_spec[name]['rc'] = 0 + pkg_spec[name]['stdout'] = '' + pkg_spec[name]['stderr'] = '' + pkg_spec[name]['changed'] = False + + +# Function used to make sure a package is the latest available version. +def package_latest(names, pkg_spec, module): + if module.params['build'] is True: + module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build']) + + upgrade_cmd = 'pkg_add -um' + + if module.check_mode: + upgrade_cmd += 'n' + + if module.params['clean']: + upgrade_cmd += 'c' + + if module.params['quick']: + upgrade_cmd += 'q' + + for name in names: + if pkg_spec[name]['installed_state'] is True: + + # Attempt to upgrade the package. + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module) + + # Look for output looking something like "nmap-6.01->6.25: ok" to see if + # something changed (or would have changed). Use \W to delimit the match + # from progress meter output. + pkg_spec[name]['changed'] = False + for installed_name in pkg_spec[name]['installed_names']: + module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) + match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout']) + if match: + module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) + + pkg_spec[name]['changed'] = True + break + + # FIXME: This part is problematic. Based on the issues mentioned (and + # handled) in package_present() it is not safe to blindly trust stderr + # as an indicator that the command failed, and in the case with + # empty installpath directories this will break. + # + # For now keep this safeguard here, but ignore it if we managed to + # parse out a successful update above. This way we will report a + # successful run when we actually modify something but fail + # otherwise. + if pkg_spec[name]['changed'] is not True: + if pkg_spec[name]['stderr']: + pkg_spec[name]['rc'] = 1 + + else: + # Note packages that need to be handled by package_present + module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name) + pkg_spec['package_latest_leftovers'].append(name) + + # If there were any packages that were not installed we call + # package_present() which will handle those. + if pkg_spec['package_latest_leftovers']: + module.debug("package_latest(): calling package_present() to handle leftovers") + package_present(names, pkg_spec, module) + + +# Function used to make sure a package is not installed. +def package_absent(names, pkg_spec, module): + remove_cmd = 'pkg_delete -I' + + if module.check_mode: + remove_cmd += 'n' + + if module.params['clean']: + remove_cmd += 'c' + + if module.params['quick']: + remove_cmd += 'q' + + for name in names: + if pkg_spec[name]['installed_state'] is True: + # Attempt to remove the package. + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module) + + if pkg_spec[name]['rc'] == 0: + pkg_spec[name]['changed'] = True + else: + pkg_spec[name]['changed'] = False + + else: + pkg_spec[name]['rc'] = 0 + pkg_spec[name]['stdout'] = '' + pkg_spec[name]['stderr'] = '' + pkg_spec[name]['changed'] = False + + +# Function used to parse the package name based on packages-specs(7). +# The general name structure is "stem-version[-flavors]". +# +# Names containing "%" are a special variation not part of the +# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a +# description. +def parse_package_name(names, pkg_spec, module): + + # Initialize empty list of package_latest() leftovers. + pkg_spec['package_latest_leftovers'] = [] + + for name in names: + module.debug("parse_package_name(): parsing name: %s" % name) + # Do some initial matches so we can base the more advanced regex on that. + version_match = re.search("-[0-9]", name) + versionless_match = re.search("--", name) + + # Stop if someone is giving us a name that both has a version and is + # version-less at the same time. + if version_match and versionless_match: + module.fail_json(msg="package name both has a version and is version-less: " + name) + + # All information for a given name is kept in the pkg_spec keyed by that name. + pkg_spec[name] = {} + + # If name includes a version. + if version_match: + match = re.search("^(?P[^%]+)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = '-' + pkg_spec[name]['version'] = match.group('version') + pkg_spec[name]['flavor_separator'] = match.group('flavor_separator') + pkg_spec[name]['flavor'] = match.group('flavor') + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'version' + module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, " + "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at version_match: " + name) + + # If name includes no version but is version-less ("--"). + elif versionless_match: + match = re.search("^(?P[^%]+)--(?P[a-z].*)?(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = '-' + pkg_spec[name]['version'] = None + pkg_spec[name]['flavor_separator'] = '-' + pkg_spec[name]['flavor'] = match.group('flavor') + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'versionless' + module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at versionless_match: " + name) + + # If name includes no version, and is not version-less, it is all a + # stem, possibly with a branch (%branchname) tacked on at the + # end. + else: + match = re.search("^(?P[^%]+)(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = None + pkg_spec[name]['version'] = None + pkg_spec[name]['flavor_separator'] = None + pkg_spec[name]['flavor'] = None + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'stem' + module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at else: " + name) + + # Verify that the managed host is new enough to support branch syntax. + if pkg_spec[name]['branch']: + branch_release = "6.0" + + if StrictVersion(platform.release()) < StrictVersion(branch_release): + module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name)) + + # Sanity check that there are no trailing dashes in flavor. + # Try to stop strange stuff early so we can be strict later. + if pkg_spec[name]['flavor']: + match = re.search("-$", pkg_spec[name]['flavor']) + if match: + module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor']) + + +# Function used for figuring out the port path. +def get_package_source_path(name, pkg_spec, module): + pkg_spec[name]['subpackage'] = None + if pkg_spec[name]['stem'] == 'sqlports': + return 'databases/sqlports' + else: + # try for an exact match first + sqlports_db_file = '/usr/local/share/sqlports' + if not os.path.isfile(sqlports_db_file): + module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file) + + conn = sqlite3.connect(sqlports_db_file) + first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname' + query = first_part_of_query + ' = ?' + module.debug("package_package_source_path(): exact query: %s" % query) + cursor = conn.execute(query, (name,)) + results = cursor.fetchall() + + # next, try for a fuzzier match + if len(results) < 1: + looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%') + query = first_part_of_query + ' LIKE ?' + if pkg_spec[name]['flavor']: + looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor'] + module.debug("package_package_source_path(): fuzzy flavor query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + elif pkg_spec[name]['style'] == 'versionless': + query += ' AND fullpkgname NOT LIKE ?' + module.debug("package_package_source_path(): fuzzy versionless query: %s" % query) + cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,)) + else: + module.debug("package_package_source_path(): fuzzy query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + results = cursor.fetchall() + + # error if we don't find exactly 1 match + conn.close() + if len(results) < 1: + module.fail_json(msg="could not find a port by the name '%s'" % name) + if len(results) > 1: + matches = map(lambda x: x[1], results) + module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches)) + + # there's exactly 1 match, so figure out the subpackage, if any, then return + fullpkgpath = results[0][0] + parts = fullpkgpath.split(',') + if len(parts) > 1 and parts[1][0] == '-': + pkg_spec[name]['subpackage'] = parts[1] + return parts[0] + + +# Function used for upgrading all installed packages. +def upgrade_packages(pkg_spec, module): + if module.check_mode: + upgrade_cmd = 'pkg_add -Imnu' + else: + upgrade_cmd = 'pkg_add -Imu' + + # Create a minimal pkg_spec entry for '*' to store return values. + pkg_spec['*'] = {} + + # Attempt to upgrade all packages. + pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module) + + # Try to find any occurrence of a package changing version like: + # "bzip2-1.0.6->1.0.6p0: ok". + match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout']) + if match: + pkg_spec['*']['changed'] = True + + else: + pkg_spec['*']['changed'] = False + + # It seems we can not trust the return value, so depend on the presence of + # stderr to know if something failed. + if pkg_spec['*']['stderr']: + pkg_spec['*']['rc'] = 1 + else: + pkg_spec['*']['rc'] = 0 + + +# =========================================== +# Main control flow. +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', required=True), + state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + build=dict(type='bool', default=False), + ports_dir=dict(type='path', default='/usr/ports'), + quick=dict(type='bool', default=False), + clean=dict(type='bool', default=False), + ), + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + build = module.params['build'] + ports_dir = module.params['ports_dir'] + + rc = 0 + stdout = '' + stderr = '' + result = {} + result['name'] = name + result['state'] = state + result['build'] = build + + # The data structure used to keep track of package information. + pkg_spec = {} + + if build is True: + if not os.path.isdir(ports_dir): + module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) + + # build sqlports if its not installed yet + parse_package_name(['sqlports'], pkg_spec, module) + get_package_state(['sqlports'], pkg_spec, module) + if not pkg_spec['sqlports']['installed_state']: + module.debug("main(): installing 'sqlports' because build=%s" % module.params['build']) + package_present(['sqlports'], pkg_spec, module) + + asterisk_name = False + for n in name: + if n == '*': + if len(name) != 1: + module.fail_json(msg="the package name '*' can not be mixed with other names") + + asterisk_name = True + + if asterisk_name: + if state != 'latest': + module.fail_json(msg="the package name '*' is only valid when using state=latest") + else: + # Perform an upgrade of all installed packages. + upgrade_packages(pkg_spec, module) + else: + # Parse package names and put results in the pkg_spec dictionary. + parse_package_name(name, pkg_spec, module) + + # Not sure how the branch syntax is supposed to play together + # with build mode. Disable it for now. + for n in name: + if pkg_spec[n]['branch'] and module.params['build'] is True: + module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n)) + + # Get state for all package names. + get_package_state(name, pkg_spec, module) + + # Perform requested action. + if state in ['installed', 'present']: + package_present(name, pkg_spec, module) + elif state in ['absent', 'removed']: + package_absent(name, pkg_spec, module) + elif state == 'latest': + package_latest(name, pkg_spec, module) + + # The combined changed status for all requested packages. If anything + # is changed this is set to True. + combined_changed = False + + # The combined failed status for all requested packages. If anything + # failed this is set to True. + combined_failed = False + + # We combine all error messages in this comma separated string, for example: + # "msg": "Can't find nmapp\n, Can't find nmappp\n" + combined_error_message = '' + + # Loop over all requested package names and check if anything failed or + # changed. + for n in name: + if pkg_spec[n]['rc'] != 0: + combined_failed = True + if pkg_spec[n]['stderr']: + if combined_error_message: + combined_error_message += ", %s" % pkg_spec[n]['stderr'] + else: + combined_error_message = pkg_spec[n]['stderr'] + else: + if combined_error_message: + combined_error_message += ", %s" % pkg_spec[n]['stdout'] + else: + combined_error_message = pkg_spec[n]['stdout'] + + if pkg_spec[n]['changed'] is True: + combined_changed = True + + # If combined_error_message contains anything at least some part of the + # list of requested package names failed. + if combined_failed: + module.fail_json(msg=combined_error_message, **result) + + result['changed'] = combined_changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/opkg.py b/plugins/modules/packaging/os/opkg.py new file mode 100644 index 0000000000..6603dcd0f8 --- /dev/null +++ b/plugins/modules/packaging/os/opkg.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrick Pelletier +# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: opkg +author: "Patrick Pelletier (@skinp)" +short_description: Package manager for OpenWrt +description: + - Manages OpenWrt packages +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'present', 'absent' ] + default: present + force: + description: + - opkg --force parameter used + choices: + - "" + - "depends" + - "maintainer" + - "reinstall" + - "overwrite" + - "downgrade" + - "space" + - "postinstall" + - "remove" + - "checksum" + - "removal-of-dependent-packages" + default: absent + update_cache: + description: + - update the package db first + default: "no" + type: bool +requirements: + - opkg + - python +''' +EXAMPLES = ''' +- opkg: + name: foo + state: present + +- opkg: + name: foo + state: present + update_cache: yes + +- opkg: + name: foo + state: absent + +- opkg: + name: foo,bar + state: absent + +- opkg: + name: foo + state: present + force: overwrite +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def update_package_db(module, opkg_path): + """ Updates packages list. """ + + rc, out, err = module.run_command("%s update" % opkg_path) + + if rc != 0: + module.fail_json(msg="could not update package db") + + +def query_package(module, opkg_path, name, state="present"): + """ Returns whether a package is installed or not. """ + + if state == "present": + + rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True) + if rc == 0: + return True + + return False + + +def remove_packages(module, opkg_path, packages): + """ Uninstalls one or more packages if installed. """ + + p = module.params + force = p["force"] + if force: + force = "--force-%s" % force + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, opkg_path, package): + continue + + rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package)) + + if query_package(module, opkg_path, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, opkg_path, packages): + """ Installs one or more packages if not already installed. """ + + p = module.params + force = p["force"] + if force: + force = "--force-%s" % force + + install_c = 0 + + for package in packages: + if query_package(module, opkg_path, package): + continue + + rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package)) + + if not query_package(module, opkg_path, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=["pkg"], required=True), + state=dict(default="present", choices=["present", "installed", "absent", "removed"]), + force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", + "checksum", "removal-of-dependent-packages"]), + update_cache=dict(default="no", aliases=["update-cache"], type='bool') + ) + ) + + opkg_path = module.get_bin_path('opkg', True, ['/bin']) + + p = module.params + + if p["update_cache"]: + update_package_db(module, opkg_path) + + pkgs = p["name"].split(",") + + if p["state"] in ["present", "installed"]: + install_packages(module, opkg_path, pkgs) + + elif p["state"] in ["absent", "removed"]: + remove_packages(module, opkg_path, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py new file mode 100644 index 0000000000..14995a2216 --- /dev/null +++ b/plugins/modules/packaging/os/pacman.py @@ -0,0 +1,484 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Afterburn +# Copyright: (c) 2013, Aaron Bull Schaefer +# Copyright: (c) 2015, Indrajit Raychaudhuri +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: pacman +short_description: Manage packages with I(pacman) +description: + - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. +author: + - Indrajit Raychaudhuri (@indrajitr) + - Aaron Bull Schaefer (@elasticdog) + - Maxime de Roucy (@tchernomax) +options: + name: + description: + - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. + Can't be used in combination with C(upgrade). + aliases: [ package, pkg ] + type: list + elements: str + + state: + description: + - Desired state of the package. + default: present + choices: [ absent, latest, present ] + + force: + description: + - When removing package, force remove package, without any checks. + Same as `extra_args="--nodeps --nodeps"`. + When update_cache, force redownload repo databases. + Same as `update_cache_extra_args="--refresh --refresh"`. + default: no + type: bool + + extra_args: + description: + - Additional option to pass to pacman when enforcing C(state). + default: + + update_cache: + description: + - Whether or not to refresh the master package lists. + - This can be run as part of a package installation or as a separate step. + default: no + type: bool + aliases: [ update-cache ] + + update_cache_extra_args: + description: + - Additional option to pass to pacman when enforcing C(update_cache). + default: + + upgrade: + description: + - Whether or not to upgrade the whole system. + Can't be used in combination with C(name). + default: no + type: bool + + upgrade_extra_args: + description: + - Additional option to pass to pacman when enforcing C(upgrade). + default: + +notes: + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +''' + +RETURN = ''' +packages: + description: a list of packages that have been changed + returned: when upgrade is set to yes + type: list + sample: [ package, other-package ] +''' + +EXAMPLES = ''' +- name: Install package foo from repo + pacman: + name: foo + state: present + +- name: Install package bar from file + pacman: + name: ~/bar-1.0-1-any.pkg.tar.xz + state: present + +- name: Install package foo from repo and bar from file + pacman: + name: + - foo + - ~/bar-1.0-1-any.pkg.tar.xz + state: present + +- name: Upgrade package foo + pacman: + name: foo + state: latest + update_cache: yes + +- name: Remove packages foo and bar + pacman: + name: + - foo + - bar + state: absent + +- name: Recursively remove package baz + pacman: + name: baz + state: absent + extra_args: --recursive + +- name: Run the equivalent of "pacman -Sy" as a separate step + pacman: + update_cache: yes + +- name: Run the equivalent of "pacman -Su" as a separate step + pacman: + upgrade: yes + +- name: Run the equivalent of "pacman -Syu" as a separate step + pacman: + update_cache: yes + upgrade: yes + +- name: Run the equivalent of "pacman -Rdd", force remove package baz + pacman: + name: baz + state: absent + force: yes +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def get_version(pacman_output): + """Take pacman -Qi or pacman -Si output and get the Version""" + lines = pacman_output.split('\n') + for line in lines: + if line.startswith('Version '): + return line.split(':')[1].strip() + return None + + +def get_name(module, pacman_output): + """Take pacman -Qi or pacman -Si output and get the package name""" + lines = pacman_output.split('\n') + for line in lines: + if line.startswith('Name '): + return line.split(':')[1].strip() + module.fail_json(msg="get_name: fail to retrieve package name from pacman output") + + +def query_package(module, pacman_path, name, state="present"): + """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second + boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available + """ + if state == "present": + lcmd = "%s --query --info %s" % (pacman_path, name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False, False + else: + # a non-zero exit code doesn't always mean the package is installed + # for example, if the package name queried is "provided" by another package + installed_name = get_name(module, lstdout) + if installed_name != name: + return False, False, False + + # get the version installed locally (if any) + lversion = get_version(lstdout) + + rcmd = "%s --sync --info %s" % (pacman_path, name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + # get the version in the repository + rversion = get_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion), False + + # package is installed but cannot fetch remote Version. Last True stands for the error + return True, True, True + + +def update_package_db(module, pacman_path): + if module.params['force']: + module.params["update_cache_extra_args"] += " --refresh --refresh" + + cmd = "%s --sync --refresh %s" % (pacman_path, module.params["update_cache_extra_args"]) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc == 0: + return True + else: + module.fail_json(msg="could not update package db") + + +def upgrade(module, pacman_path): + cmdupgrade = "%s --sync --sysupgrade --quiet --noconfirm %s" % (pacman_path, module.params["upgrade_extra_args"]) + cmdneedrefresh = "%s --query --upgrades" % (pacman_path) + rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False) + data = stdout.split('\n') + data.remove('') + packages = [] + diff = { + 'before': '', + 'after': '', + } + + if rc == 0: + # Match lines of `pacman -Qu` output of the form: + # (package name) (before version-release) -> (after version-release) + # e.g., "ansible 2.7.1-1 -> 2.7.2-1" + regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)') + for p in data: + m = regex.search(p) + packages.append(m.group(1)) + if module._diff: + diff['before'] += "%s-%s\n" % (m.group(1), m.group(2)) + diff['after'] += "%s-%s\n" % (m.group(1), m.group(3)) + if module.check_mode: + module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff) + rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) + if rc == 0: + module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff) + else: + module.fail_json(msg="Could not upgrade") + else: + module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) + + +def remove_packages(module, pacman_path, packages): + data = [] + diff = { + 'before': '', + 'after': '', + } + + if module.params["force"]: + module.params["extra_args"] += " --nodeps --nodeps" + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated, unknown = query_package(module, pacman_path, package) + if not installed: + continue + + cmd = "%s --remove --noconfirm --noprogressbar %s %s" % (pacman_path, module.params["extra_args"], package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + if module._diff: + d = stdout.split('\n')[2].split(' ')[2:] + for i, pkg in enumerate(d): + d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1]) + diff['before'] += "%s\n" % pkg + data.append('\n'.join(d)) + + remove_c += 1 + + if remove_c > 0: + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, pacman_path, state, packages, package_files): + install_c = 0 + package_err = [] + message = "" + data = [] + diff = { + 'before': '', + 'after': '', + } + + to_install_repos = [] + to_install_files = [] + for i, package in enumerate(packages): + # if the package is installed and state == present or state == latest and is up-to-date then skip + installed, updated, latestError = query_package(module, pacman_path, package) + if latestError and state == 'latest': + package_err.append(package) + + if installed and (state == 'present' or (state == 'latest' and updated)): + continue + + if package_files[i]: + to_install_files.append(package_files[i]) + else: + to_install_repos.append(package) + + if to_install_repos: + cmd = "%s --sync --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_repos)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr)) + + # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed. + # The check for > 3 is here because we pick the 4th line in normal operation. + if len(stdout.split('\n')) > 3: + data = stdout.split('\n')[3].split(' ')[2:] + data = [i for i in data if i != ''] + for i, pkg in enumerate(data): + data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1]) + if module._diff: + diff['after'] += "%s\n" % pkg + + install_c += len(to_install_repos) + + if to_install_files: + cmd = "%s --upgrade --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_files)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr)) + + # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed. + # The check for > 3 is here because we pick the 4th line in normal operation. + if len(stdout.split('\n')) > 3: + data = stdout.split('\n')[3].split(' ')[2:] + data = [i for i in data if i != ''] + for i, pkg in enumerate(data): + data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1]) + if module._diff: + diff['after'] += "%s\n" % pkg + + install_c += len(to_install_files) + + if state == 'latest' and len(package_err) > 0: + message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err) + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff) + + module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff) + + +def check_packages(module, pacman_path, packages, state): + would_be_changed = [] + diff = { + 'before': '', + 'after': '', + 'before_header': '', + 'after_header': '' + } + + for package in packages: + installed, updated, unknown = query_package(module, pacman_path, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + + if module._diff and (state == 'removed'): + diff['before_header'] = 'removed' + diff['before'] = '\n'.join(would_be_changed) + '\n' + elif module._diff and ((state == 'present') or (state == 'latest')): + diff['after_header'] = 'installed' + diff['after'] = '\n'.join(would_be_changed) + '\n' + + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state), diff=diff) + else: + module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff) + + +def expand_package_groups(module, pacman_path, pkgs): + expanded = [] + + for pkg in pkgs: + if pkg: # avoid empty strings + cmd = "%s --sync --groups --quiet %s" % (pacman_path, pkg) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc == 0: + # A group was found matching the name, so expand it + for name in stdout.split('\n'): + name = name.strip() + if name: + expanded.append(name) + else: + expanded.append(pkg) + + return expanded + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', aliases=['pkg', 'package']), + state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']), + force=dict(type='bool', default=False), + extra_args=dict(type='str', default=''), + upgrade=dict(type='bool', default=False), + upgrade_extra_args=dict(type='str', default=''), + update_cache=dict(type='bool', default=False, aliases=['update-cache']), + update_cache_extra_args=dict(type='str', default=''), + ), + required_one_of=[['name', 'update_cache', 'upgrade']], + mutually_exclusive=[['name', 'upgrade']], + supports_check_mode=True, + ) + + pacman_path = module.get_bin_path('pacman', True) + module.run_command_environ_update = dict(LC_ALL='C') + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p["update_cache"] and not module.check_mode: + update_package_db(module, pacman_path) + if not (p['name'] or p['upgrade']): + module.exit_json(changed=True, msg='Updated the package master lists') + + if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']): + module.exit_json(changed=True, msg='Would have updated the package cache') + + if p['upgrade']: + upgrade(module, pacman_path) + + if p['name']: + pkgs = expand_package_groups(module, pacman_path, p['name']) + + pkg_files = [] + for i, pkg in enumerate(pkgs): + if not pkg: # avoid empty strings + continue + elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg): + # The package given is a filename, extract the raw pkg name from + # it and store the filename + pkg_files.append(pkg) + pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1]) + else: + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pacman_path, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, pacman_path, p['state'], pkgs, pkg_files) + elif p['state'] == 'absent': + remove_packages(module, pacman_path, pkgs) + else: + module.exit_json(changed=False, msg="No package specified to work on.") + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/packaging/os/pkg5.py b/plugins/modules/packaging/os/pkg5.py new file mode 100644 index 0000000000..4d4c9ff2b2 --- /dev/null +++ b/plugins/modules/packaging/os/pkg5.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Peter Oliver +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: pkg5 +author: +- Peter Oliver (@mavit) +short_description: Manages packages with the Solaris 11 Image Packaging System +description: + - IPS packages are the native packages in Solaris 11 and higher. +notes: + - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html). +options: + name: + description: + - An FRMI of the package(s) to be installed/removed/updated. + - Multiple packages may be specified, separated by C(,). + required: true + state: + description: + - Whether to install (I(present), I(latest)), or remove (I(absent)) a package. + choices: [ absent, latest, present ] + default: present + accept_licenses: + description: + - Accept any licences. + type: bool + default: no + aliases: [ accept, accept_licences ] + be_name: + description: + - Creates a new boot environment with the given name. + type: str + refresh: + description: + - Refresh publishers before execution. + type: bool + default: yes +''' +EXAMPLES = ''' +- name: Install Vim + pkg5: + name: editor/vim + +- name: Install Vim without refreshing publishers + pkg5: + name: editor/vim + refresh: no + +- name: Remove finger daemon + pkg5: + name: service/network/finger + state: absent + +- name: Install several packages at once + pkg5: + name: + - /file/gnu-findutils + - /text/gnu-grep +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', required=True), + state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']), + accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']), + be_name=dict(type='str'), + refresh=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + params = module.params + packages = [] + + # pkg(5) FRMIs include a comma before the release number, but + # AnsibleModule will have split this into multiple items for us. + # Try to spot where this has happened and fix it. + for fragment in params['name']: + if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]): + packages[-1] += ',' + fragment + else: + packages.append(fragment) + + if params['state'] in ['present', 'installed']: + ensure(module, 'present', packages, params) + elif params['state'] in ['latest']: + ensure(module, 'latest', packages, params) + elif params['state'] in ['absent', 'uninstalled', 'removed']: + ensure(module, 'absent', packages, params) + + +def ensure(module, state, packages, params): + response = { + 'results': [], + 'msg': '', + } + behaviour = { + 'present': { + 'filter': lambda p: not is_installed(module, p), + 'subcommand': 'install', + }, + 'latest': { + 'filter': lambda p: ( + not is_installed(module, p) or not is_latest(module, p) + ), + 'subcommand': 'install', + }, + 'absent': { + 'filter': lambda p: is_installed(module, p), + 'subcommand': 'uninstall', + }, + } + + if module.check_mode: + dry_run = ['-n'] + else: + dry_run = [] + + if params['accept_licenses']: + accept_licenses = ['--accept'] + else: + accept_licenses = [] + + if params['be_name']: + beadm = ['--be-name=' + module.params['be_name']] + else: + beadm = [] + + if params['refresh']: + no_refresh = [] + else: + no_refresh = ['--no-refresh'] + + to_modify = filter(behaviour[state]['filter'], packages) + if to_modify: + rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify) + response['rc'] = rc + response['results'].append(out) + response['msg'] += err + response['changed'] = True + if rc == 4: + response['changed'] = False + response['failed'] = False + elif rc != 0: + module.fail_json(**response) + + module.exit_json(**response) + + +def is_installed(module, package): + rc, out, err = module.run_command(['pkg', 'list', '--', package]) + return not bool(int(rc)) + + +def is_latest(module, package): + rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) + return bool(int(rc)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/pkg5_publisher.py b/plugins/modules/packaging/os/pkg5_publisher.py new file mode 100644 index 0000000000..d30972c49a --- /dev/null +++ b/plugins/modules/packaging/os/pkg5_publisher.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014 Peter Oliver +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pkg5_publisher +author: "Peter Oliver (@mavit)" +short_description: Manages Solaris 11 Image Packaging System publishers +description: + - IPS packages are the native packages in Solaris 11 and higher. + - This modules will configure which publishers a client will download IPS + packages from. +options: + name: + description: + - The publisher's name. + required: true + aliases: [ publisher ] + state: + description: + - Whether to ensure that a publisher is present or absent. + default: present + choices: [ present, absent ] + sticky: + description: + - Packages installed from a sticky repository can only receive updates + from that repository. + type: bool + enabled: + description: + - Is the repository enabled or disabled? + type: bool + origin: + description: + - A path or URL to the repository. + - Multiple values may be provided. + mirror: + description: + - A path or URL to the repository mirror. + - Multiple values may be provided. +''' +EXAMPLES = ''' +# Fetch packages for the solaris publisher direct from Oracle: +- pkg5_publisher: + name: solaris + sticky: true + origin: https://pkg.oracle.com/solaris/support/ + +# Configure a publisher for locally-produced packages: +- pkg5_publisher: + name: site + origin: 'https://pkg.example.com/site/' +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['publisher']), + state=dict(default='present', choices=['present', 'absent']), + sticky=dict(type='bool'), + enabled=dict(type='bool'), + # search_after=dict(), + # search_before=dict(), + origin=dict(type='list'), + mirror=dict(type='list'), + ) + ) + + for option in ['origin', 'mirror']: + if module.params[option] == ['']: + module.params[option] = [] + + if module.params['state'] == 'present': + modify_publisher(module, module.params) + else: + unset_publisher(module, module.params['name']) + + +def modify_publisher(module, params): + name = params['name'] + existing = get_publishers(module) + + if name in existing: + for option in ['origin', 'mirror', 'sticky', 'enabled']: + if params[option] is not None: + if params[option] != existing[name][option]: + return set_publisher(module, params) + else: + return set_publisher(module, params) + + module.exit_json() + + +def set_publisher(module, params): + name = params['name'] + args = [] + + if params['origin'] is not None: + args.append('--remove-origin=*') + args.extend(['--add-origin=' + u for u in params['origin']]) + if params['mirror'] is not None: + args.append('--remove-mirror=*') + args.extend(['--add-mirror=' + u for u in params['mirror']]) + + if params['sticky'] is not None and params['sticky']: + args.append('--sticky') + elif params['sticky'] is not None: + args.append('--non-sticky') + + if params['enabled'] is not None and params['enabled']: + args.append('--enable') + elif params['enabled'] is not None: + args.append('--disable') + + rc, out, err = module.run_command( + ["pkg", "set-publisher"] + args + [name], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + if rc != 0: + module.fail_json(**response) + module.exit_json(**response) + + +def unset_publisher(module, publisher): + if publisher not in get_publishers(module): + module.exit_json() + + rc, out, err = module.run_command( + ["pkg", "unset-publisher", publisher], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + if rc != 0: + module.fail_json(**response) + module.exit_json(**response) + + +def get_publishers(module): + rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True) + + lines = out.splitlines() + keys = lines.pop(0).lower().split("\t") + + publishers = {} + for line in lines: + values = dict(zip(keys, map(unstringify, line.split("\t")))) + name = values['publisher'] + + if name not in publishers: + publishers[name] = dict( + (k, values[k]) for k in ['sticky', 'enabled'] + ) + publishers[name]['origin'] = [] + publishers[name]['mirror'] = [] + + if values['type'] is not None: + publishers[name][values['type']].append(values['uri']) + + return publishers + + +def unstringify(val): + if val == "-" or val == '': + return None + elif val == "true": + return True + elif val == "false": + return False + else: + return val + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/pkgin.py b/plugins/modules/packaging/os/pkgin.py new file mode 100644 index 0000000000..ed781cc278 --- /dev/null +++ b/plugins/modules/packaging/os/pkgin.py @@ -0,0 +1,375 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013 Shaun Zinck +# Copyright (c) 2015 Lawrence Leonard Gilbert +# Copyright (c) 2016 Jasper Lievisse Adriaanse +# +# Written by Shaun Zinck +# Based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pkgin +short_description: Package manager for SmartOS, NetBSD, et al. +description: + - "The standard package manager for SmartOS, but also usable on NetBSD + or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" +author: + - "Larry Gilbert (@L2G)" + - "Shaun Zinck (@szinck)" + - "Jasper Lievisse Adriaanse (@jasperla)" +notes: + - "Known bug with pkgin < 0.8.0: if a package is removed and another + package depends on it, the other package will be silently removed as + well. New to Ansible 1.9: check-mode support." +options: + name: + description: + - Name of package to install/remove; + - multiple names may be given, separated by commas + state: + description: + - Intended state of the package + choices: [ 'present', 'absent' ] + default: present + update_cache: + description: + - Update repository database. Can be run with other steps or on it's own. + type: bool + default: 'no' + upgrade: + description: + - Upgrade main packages to their newer versions + type: bool + default: 'no' + full_upgrade: + description: + - Upgrade all packages to their newer versions + type: bool + default: 'no' + clean: + description: + - Clean packages cache + type: bool + default: 'no' + force: + description: + - Force package reinstall + type: bool + default: 'no' +''' + +EXAMPLES = ''' +# install package foo +- pkgin: + name: foo + state: present + +# Update database and install "foo" package +- pkgin: + name: foo + update_cache: yes + +# remove package foo +- pkgin: + name: foo + state: absent + +# remove packages foo and bar +- pkgin: + name: foo,bar + state: absent + +# Update repositories as a separate step +- pkgin: + update_cache: yes + +# Upgrade main packages (equivalent to C(pkgin upgrade)) +- pkgin: + upgrade: yes + +# Upgrade all packages (equivalent to C(pkgin full-upgrade)) +- pkgin: + full_upgrade: yes + +# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade)) +- pkgin: + full_upgrade: yes + force: yes + +# clean packages cache (equivalent to C(pkgin clean)) +- pkgin: + clean: yes +''' + + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, name): + """Search for the package by name. + + Possible return values: + * "present" - installed, no upgrade needed + * "outdated" - installed, but can be upgraded + * False - not installed or not found + """ + + # test whether '-p' (parsable) flag is supported. + rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH) + + if rc == 0: + pflag = '-p' + splitchar = ';' + else: + pflag = '' + splitchar = ' ' + + # Use "pkgin search" to find the package. The regular expression will + # only match on the complete name. + rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name)) + + # rc will not be 0 unless the search was a success + if rc == 0: + + # Search results may contain more than one line (e.g., 'emacs'), so iterate + # through each line to see if we have a match. + packages = out.split('\n') + + for package in packages: + + # Break up line at spaces. The first part will be the package with its + # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state + # of the package: + # '' - not installed + # '<' - installed but out of date + # '=' - installed and up to date + # '>' - installed but newer than the repository version + pkgname_with_version, raw_state = package.split(splitchar)[0:2] + + # Search for package, stripping version + # (results in sth like 'gcc47-libs' or 'emacs24-nox11') + pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M) + + # Do not proceed unless we have a match + if not pkg_search_obj: + continue + + # Grab matched string + pkgname_without_version = pkg_search_obj.group(1) + + if name != pkgname_without_version: + continue + + # The package was found; now return its state + if raw_state == '<': + return 'outdated' + elif raw_state == '=' or raw_state == '>': + return 'present' + else: + return False + # no fall-through + + # No packages were matched, so return False + return False + + +def format_action_message(module, action, count): + vars = {"actioned": action, + "count": count} + + if module.check_mode: + message = "would have %(actioned)s %(count)d package" % vars + else: + message = "%(actioned)s %(count)d package" % vars + + if count == 1: + return message + else: + return message + "s" + + +def format_pkgin_command(module, command, package=None): + # Not all commands take a package argument, so cover this up by passing + # an empty string. Some commands (e.g. 'update') will ignore extra + # arguments, however this behaviour cannot be relied on for others. + if package is None: + package = "" + + if module.params["force"]: + force = "-F" + else: + force = "" + + vars = {"pkgin": PKGIN_PATH, + "command": command, + "package": package, + "force": force} + + if module.check_mode: + return "%(pkgin)s -n %(command)s %(package)s" % vars + else: + return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars + + +def remove_packages(module, packages): + + remove_c = 0 + + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command( + format_pkgin_command(module, "remove", package)) + + if not module.check_mode and query_package(module, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c)) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages): + + install_c = 0 + + for package in packages: + if query_package(module, package): + continue + + rc, out, err = module.run_command( + format_pkgin_command(module, "install", package)) + + if not module.check_mode and not query_package(module, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def update_package_db(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "update")) + + if rc == 0: + if re.search('database for.*is up-to-date\n$', out): + return False, "database is up-to-date" + else: + return True, "updated repository database" + else: + module.fail_json(msg="could not update package db") + + +def do_upgrade_packages(module, full=False): + if full: + cmd = "full-upgrade" + else: + cmd = "upgrade" + + rc, out, err = module.run_command( + format_pkgin_command(module, cmd)) + + if rc == 0: + if re.search('^nothing to do.\n$', out): + module.exit_json(changed=False, msg="nothing left to upgrade") + else: + module.fail_json(msg="could not %s packages" % cmd) + + +def upgrade_packages(module): + do_upgrade_packages(module) + + +def full_upgrade_packages(module): + do_upgrade_packages(module, True) + + +def clean_cache(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "clean")) + + if rc == 0: + # There's no indication if 'clean' actually removed anything, + # so assume it did. + module.exit_json(changed=True, msg="cleaned caches") + else: + module.fail_json(msg="could not clean package cache") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"]), + name=dict(aliases=["pkg"], type='list'), + update_cache=dict(default='no', type='bool'), + upgrade=dict(default='no', type='bool'), + full_upgrade=dict(default='no', type='bool'), + clean=dict(default='no', type='bool'), + force=dict(default='no', type='bool')), + required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']], + supports_check_mode=True) + + global PKGIN_PATH + PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin']) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p["update_cache"]: + c, msg = update_package_db(module) + if not (p['name'] or p["upgrade"] or p["full_upgrade"]): + module.exit_json(changed=c, msg=msg) + + if p["upgrade"]: + upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded packages') + + if p["full_upgrade"]: + full_upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded all packages') + + if p["clean"]: + clean_cache(module) + if not p['name']: + module.exit_json(changed=True, msg='cleaned caches') + + pkgs = p["name"] + + if p["state"] == "present": + install_packages(module, pkgs) + + elif p["state"] == "absent": + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/pkgng.py b/plugins/modules/packaging/os/pkgng.py new file mode 100644 index 0000000000..2d53159393 --- /dev/null +++ b/plugins/modules/packaging/os/pkgng.py @@ -0,0 +1,401 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, bleader +# Written by bleader +# Based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pkgng +short_description: Package manager for FreeBSD >= 9.0 +description: + - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. +options: + name: + description: + - Name or list of names of packages to install/remove. + required: true + state: + description: + - State of the package. + - 'Note: "latest" added in 2.7' + choices: [ 'present', 'latest', 'absent' ] + required: false + default: present + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: no + annotation: + description: + - A comma-separated list of keyvalue-pairs of the form + C(<+/-/:>[=]). A C(+) denotes adding an annotation, a + C(-) denotes removing an annotation, and C(:) denotes modifying an + annotation. + If setting or modifying annotations, a value must be provided. + required: false + pkgsite: + description: + - For pkgng versions before 1.1.4, specify packagesite to use + for downloading packages. If not specified, use settings from + C(/usr/local/etc/pkg.conf). + - For newer pkgng versions, specify a the name of a repository + configured in C(/usr/local/etc/pkg/repos). + required: false + rootdir: + description: + - For pkgng versions 1.5 and later, pkg will install all packages + within the specified root directory. + - Can not be used together with I(chroot) or I(jail) options. + required: false + chroot: + description: + - Pkg will chroot in the specified environment. + - Can not be used together with I(rootdir) or I(jail) options. + required: false + jail: + description: + - Pkg will execute in the given jail name or id. + - Can not be used together with I(chroot) or I(rootdir) options. + autoremove: + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: no +author: "bleader (@bleader)" +notes: + - When using pkgsite, be careful that already in cache packages won't be downloaded again. + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +''' + +EXAMPLES = ''' +- name: Install package foo + pkgng: + name: foo + state: present + +- name: Annotate package foo and bar + pkgng: + name: foo,bar + annotation: '+test1=baz,-test2,:test3=foobar' + +- name: Remove packages foo and bar + pkgng: + name: foo,bar + state: absent + +# "latest" support added in 2.7 +- name: Upgrade package baz + pkgng: + name: baz + state: latest +''' + + +import re +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, pkgng_path, name, dir_arg): + + rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name)) + + if rc == 0: + return True + + return False + + +def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite): + + # Check to see if a package upgrade is available. + # rc = 0, no updates available or package not installed + # rc = 1, updates available + if old_pkgng: + rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name)) + else: + rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name)) + + if rc == 1: + return True + + return False + + +def pkgng_older_than(module, pkgng_path, compare_version): + + rc, out, err = module.run_command("%s -v" % pkgng_path) + version = [int(x) for x in re.split(r'[\._]', out)] + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + + +def remove_packages(module, pkgng_path, packages, dir_arg): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, pkgng_path, package, dir_arg): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package)) + + if not module.check_mode and query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + return (True, "removed %s package(s)" % remove_c) + + return (False, "package(s) already absent") + + +def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state): + + install_c = 0 + + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) + if pkgsite != "": + if old_pkgng: + pkgsite = "PACKAGESITE=%s" % (pkgsite) + else: + pkgsite = "-r %s" % (pkgsite) + + # This environment variable skips mid-install prompts, + # setting them to their default values. + batch_var = 'env BATCH=yes' + + if not module.check_mode and not cached: + if old_pkgng: + rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) + else: + rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg)) + if rc != 0: + module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err)) + + for package in packages: + already_installed = query_package(module, pkgng_path, package, dir_arg) + if already_installed and state == "present": + continue + + update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite) + if not update_available and already_installed and state == "latest": + continue + + if not module.check_mode: + if already_installed: + action = "upgrade" + else: + action = "install" + if old_pkgng: + rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package)) + else: + rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package)) + + if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stderr=err) + + install_c += 1 + + if install_c > 0: + return (True, "added %s package(s)" % (install_c)) + + return (False, "package(s) already %s" % (state)) + + +def annotation_query(module, pkgng_path, package, tag, dir_arg): + rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package)) + match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not _value: + # Annotation does not exist, add it. + rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + + +def annotation_delete(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if _value: + rc, out, err = module.run_command('%s %s annotate -y -D %s %s' + % (pkgng_path, dir_arg, package, tag)) + if rc != 0: + module.fail_json(msg="could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + + +def annotation_modify(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not value: + # No such tag + module.fail_json(msg="could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not change annotation annotation to %s: %s" + % (package, out), stderr=err) + return True + + +def annotate_packages(module, pkgng_path, packages, annotation, dir_arg): + annotate_c = 0 + annotations = map(lambda _annotation: + re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?', + _annotation).groupdict(), + re.split(r',', annotation)) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for _annotation in annotations: + if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']): + annotate_c += 1 + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") + + +def autoremove_packages(module, pkgng_path, dir_arg): + rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg)) + + autoremove_c = 0 + + match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + if match: + autoremove_c = int(match.group(1)) + + if autoremove_c == 0: + return False, "no package(s) to autoremove" + + if not module.check_mode: + rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg)) + + return True, "autoremoved %d package(s)" % (autoremove_c) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "latest", "absent"], required=False), + name=dict(aliases=["pkg"], required=True, type='list'), + cached=dict(default=False, type='bool'), + annotation=dict(default="", required=False), + pkgsite=dict(default="", required=False), + rootdir=dict(default="", required=False, type='path'), + chroot=dict(default="", required=False, type='path'), + jail=dict(default="", required=False, type='str'), + autoremove=dict(default=False, type='bool')), + supports_check_mode=True, + mutually_exclusive=[["rootdir", "chroot", "jail"]]) + + pkgng_path = module.get_bin_path('pkg', True) + + p = module.params + + pkgs = p["name"] + + changed = False + msgs = [] + dir_arg = "" + + if p["rootdir"] != "": + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0]) + if old_pkgng: + module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") + else: + dir_arg = "--rootdir %s" % (p["rootdir"]) + + if p["chroot"] != "": + dir_arg = '--chroot %s' % (p["chroot"]) + + if p["jail"] != "": + dir_arg = '--jail %s' % (p["jail"]) + + if p["state"] in ("present", "latest"): + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg, p["state"]) + changed = changed or _changed + msgs.append(_msg) + + elif p["state"] == "absent": + _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["autoremove"]: + _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["annotation"]: + _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/pkgutil.py b/plugins/modules/packaging/os/pkgutil.py new file mode 100644 index 0000000000..b2535c518b --- /dev/null +++ b/plugins/modules/packaging/os/pkgutil.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Alexander Winkler +# based on svr4pkg by +# Boyd Adamson (2012) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pkgutil +short_description: Manage CSW-Packages on Solaris +description: + - Manages CSW packages (SVR4 format) on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available + as a legacy feature in Solaris 11. + - Pkgutil is an advanced packaging system, which resolves dependency on installation. + It is designed for CSW packages. +author: "Alexander Winkler (@dermute)" +options: + name: + description: + - Package name, e.g. (C(CSWnrpe)) + required: true + site: + description: + - Specifies the repository path to install the package from. + - Its global definition is done in C(/etc/opt/csw/pkgutil.conf). + required: false + state: + description: + - Whether to install (C(present)), or remove (C(absent)) a package. + - The upgrade (C(latest)) operation will update/install the package to the latest version available. + - "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them." + required: true + choices: ["present", "absent", "latest"] + update_catalog: + description: + - If you want to refresh your catalog from the mirror, set this to (C(yes)). + required: false + default: False + type: bool +''' + +EXAMPLES = ''' +# Install a package +- pkgutil: + name: CSWcommon + state: present + +# Install a package from a specific repository +- pkgutil: + name: CSWnrpe + site: 'ftp://myinternal.repo/opencsw/kiel' + state: latest +''' + +from ansible.module_utils.basic import AnsibleModule + + +def package_installed(module, name): + cmd = ['pkginfo'] + cmd.append('-q') + cmd.append(name) + rc, out, err = run_command(module, cmd) + if rc == 0: + return True + else: + return False + + +def package_latest(module, name, site): + # Only supports one package + cmd = ['pkgutil', '-U', '--single', '-c'] + if site is not None: + cmd += ['-t', site] + cmd.append(name) + rc, out, err = run_command(module, cmd) + # replace | tail -1 |grep -v SAME + # use -2, because splitting on \n create a empty line + # at the end of the list + return 'SAME' in out.split('\n')[-2] + + +def run_command(module, cmd, **kwargs): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin']) + return module.run_command(cmd, **kwargs) + + +def package_install(module, state, name, site, update_catalog): + cmd = ['pkgutil', '-iy'] + if update_catalog: + cmd += ['-U'] + if site is not None: + cmd += ['-t', site] + if state == 'latest': + cmd += ['-f'] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + return (rc, out, err) + + +def package_upgrade(module, name, site, update_catalog): + cmd = ['pkgutil', '-ufy'] + if update_catalog: + cmd += ['-U'] + if site is not None: + cmd += ['-t', site] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + return (rc, out, err) + + +def package_uninstall(module, name): + cmd = ['pkgutil', '-ry', name] + (rc, out, err) = run_command(module, cmd) + return (rc, out, err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=True, choices=['present', 'absent', 'latest']), + site=dict(default=None), + update_catalog=dict(required=False, default=False, type='bool'), + ), + supports_check_mode=True + ) + name = module.params['name'] + state = module.params['state'] + site = module.params['site'] + update_catalog = module.params['update_catalog'] + rc = None + out = '' + err = '' + result = {} + result['name'] = name + result['state'] = state + + if state == 'present': + if not package_installed(module, name): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, state, name, site, update_catalog) + # Stdout is normally empty but for some packages can be + # very long and is not often useful + if len(out) > 75: + out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) + + elif state == 'latest': + if not package_installed(module, name): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, state, name, site, update_catalog) + if len(out) > 75: + out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) + + else: + if not package_latest(module, name, site): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_upgrade(module, name, site, update_catalog) + if len(out) > 75: + out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) + + elif state == 'absent': + if package_installed(module, name): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_uninstall(module, name) + if len(out) > 75: + out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) + + if rc is None: + # pkgutil was not executed because the package was already present/absent + result['changed'] = False + elif rc == 0: + result['changed'] = True + else: + result['changed'] = False + result['failed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/portage.py b/plugins/modules/packaging/os/portage.py new file mode 100644 index 0000000000..a23e44369e --- /dev/null +++ b/plugins/modules/packaging/os/portage.py @@ -0,0 +1,527 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, William L Thomson Jr +# (c) 2013, Yap Sok Ann +# Written by Yap Sok Ann +# Modified by William L. Thomson Jr. +# Based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: portage +short_description: Package manager for Gentoo +description: + - Manages Gentoo packages + +options: + package: + description: + - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) + type: list + elements: str + + state: + description: + - State of the package atom + default: "present" + choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ] + + update: + description: + - Update packages to the best version available (--update) + type: bool + default: 'no' + + deep: + description: + - Consider the entire dependency tree of packages (--deep) + type: bool + default: 'no' + + newuse: + description: + - Include installed packages where USE flags have changed (--newuse) + type: bool + default: 'no' + + changed_use: + description: + - Include installed packages where USE flags have changed, except when + - flags that the user has not enabled are added or removed + - (--changed-use) + type: bool + default: 'no' + + oneshot: + description: + - Do not add the packages to the world file (--oneshot) + type: bool + default: 'no' + + noreplace: + description: + - Do not re-emerge installed packages (--noreplace) + type: bool + default: 'yes' + + nodeps: + description: + - Only merge packages but not their dependencies (--nodeps) + type: bool + default: 'no' + + onlydeps: + description: + - Only merge packages' dependencies but not the packages (--onlydeps) + type: bool + default: 'no' + + depclean: + description: + - Remove packages not needed by explicitly merged packages (--depclean) + - If no package is specified, clean up the world's dependencies + - Otherwise, --depclean serves as a dependency aware version of --unmerge + type: bool + default: 'no' + + quiet: + description: + - Run emerge in quiet mode (--quiet) + type: bool + default: 'no' + + verbose: + description: + - Run emerge in verbose mode (--verbose) + type: bool + default: 'no' + + sync: + description: + - Sync package repositories first + - If yes, perform "emerge --sync" + - If web, perform "emerge-webrsync" + choices: [ "web", "yes", "no" ] + + getbinpkg: + description: + - Prefer packages specified at PORTAGE_BINHOST in make.conf + type: bool + default: 'no' + + usepkgonly: + description: + - Merge only binaries (no compiling). This sets getbinpkg=yes. + type: bool + default: 'no' + + keepgoing: + description: + - Continue as much as possible after an error. + type: bool + default: 'no' + + jobs: + description: + - Specifies the number of packages to build simultaneously. + - "Since version 2.6: Value of 0 or False resets any previously added" + - --jobs setting values + + loadavg: + description: + - Specifies that no new builds should be started if there are + - other builds running and the load average is at least LOAD + - "Since version 2.6: Value of 0 or False resets any previously added" + - --load-average setting values + + quietbuild: + description: + - Redirect all build output to logs alone, and do not display it + - on stdout (--quiet-build) + type: bool + default: 'no' + + quietfail: + description: + - Suppresses display of the build log on stdout (--quiet-fail) + - Only the die message and the path of the build log will be + - displayed on stdout. + type: bool + default: 'no' + +requirements: [ gentoolkit ] +author: + - "William L Thomson Jr (@wltjr)" + - "Yap Sok Ann (@sayap)" + - "Andrew Udvare (@Tatsh)" +''' + +EXAMPLES = ''' +# Make sure package foo is installed +- portage: + package: foo + state: present + +# Make sure package foo is not installed +- portage: + package: foo + state: absent + +# Update package foo to the "latest" version ( os specific alternative to latest ) +- portage: + package: foo + update: yes + +# Install package foo using PORTAGE_BINHOST setup +- portage: + package: foo + getbinpkg: yes + +# Re-install world from binary packages only and do not allow any compiling +- portage: + package: '@world' + usepkgonly: yes + +# Sync repositories and update world +- portage: + package: '@world' + update: yes + deep: yes + sync: yes + +# Remove unneeded packages +- portage: + depclean: yes + +# Remove package foo if it is not explicitly needed +- portage: + package: foo + state: absent + depclean: yes +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def query_package(module, package, action): + if package.startswith('@'): + return query_set(module, package, action) + return query_atom(module, package, action) + + +def query_atom(module, atom, action): + cmd = '%s list %s' % (module.equery_path, atom) + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def query_set(module, set, action): + system_sets = [ + '@live-rebuild', + '@module-rebuild', + '@preserved-rebuild', + '@security', + '@selected', + '@system', + '@world', + '@x11-module-rebuild', + ] + + if set in system_sets: + if action == 'unmerge': + module.fail_json(msg='set %s cannot be removed' % set) + return False + + world_sets_path = '/var/lib/portage/world_sets' + if not os.path.exists(world_sets_path): + return False + + cmd = 'grep %s %s' % (set, world_sets_path) + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def sync_repositories(module, webrsync=False): + if module.check_mode: + module.exit_json(msg='check mode not supported by sync') + + if webrsync: + webrsync_path = module.get_bin_path('emerge-webrsync', required=True) + cmd = '%s --quiet' % webrsync_path + else: + cmd = '%s --sync --quiet --ask=n' % module.emerge_path + + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg='could not sync package repositories') + + +# Note: In the 3 functions below, equery is done one-by-one, but emerge is done +# in one go. If that is not desirable, split the packages into multiple tasks +# instead of joining them together with comma. + + +def emerge_packages(module, packages): + """Run emerge command against given list of atoms.""" + p = module.params + + if p['noreplace'] and not (p['update'] or p['state'] == 'latest'): + for package in packages: + if p['noreplace'] and not query_package(module, package, 'emerge'): + break + else: + module.exit_json(changed=False, msg='Packages already present.') + if module.check_mode: + module.exit_json(changed=True, msg='Packages would be installed.') + + args = [] + emerge_flags = { + 'update': '--update', + 'deep': '--deep', + 'newuse': '--newuse', + 'changed_use': '--changed-use', + 'oneshot': '--oneshot', + 'noreplace': '--noreplace', + 'nodeps': '--nodeps', + 'onlydeps': '--onlydeps', + 'quiet': '--quiet', + 'verbose': '--verbose', + 'getbinpkg': '--getbinpkg', + 'usepkgonly': '--usepkgonly', + 'usepkg': '--usepkg', + 'keepgoing': '--keep-going', + 'quietbuild': '--quiet-build', + 'quietfail': '--quiet-fail', + } + for flag, arg in emerge_flags.items(): + if p[flag]: + args.append(arg) + + if p['state'] and p['state'] == 'latest': + args.append("--update") + + if p['usepkg'] and p['usepkgonly']: + module.fail_json(msg='Use only one of usepkg, usepkgonly') + + emerge_flags = { + 'jobs': '--jobs', + 'loadavg': '--load-average', + } + + for flag, arg in emerge_flags.items(): + flag_val = p[flag] + + if flag_val is None: + """Fallback to default: don't use this argument at all.""" + continue + + if not flag_val: + """If the value is 0 or 0.0: add the flag, but not the value.""" + args.append(arg) + continue + + """Add the --flag=value pair.""" + args.extend((arg, to_native(flag_val))) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not installed.', + ) + + # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite + # this error + if (p['usepkgonly'] or p['getbinpkg']) \ + and 'Permission denied (publickey).' in err: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Please check your PORTAGE_BINHOST configuration in make.conf ' + 'and your SSH authorized_keys file', + ) + + changed = True + for line in out.splitlines(): + if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): + msg = 'Packages installed.' + break + elif module.check_mode and re.match(r'\[(binary|ebuild)', line): + msg = 'Packages would be installed.' + break + else: + changed = False + msg = 'No packages installed.' + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg=msg, + ) + + +def unmerge_packages(module, packages): + p = module.params + + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--unmerge'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not removed.', + ) + + module.exit_json( + changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages removed.', + ) + + +def cleanup_packages(module, packages): + p = module.params + + if packages: + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--depclean'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) + + removed = 0 + for line in out.splitlines(): + if not line.startswith('Number removed:'): + continue + parts = line.split(':') + removed = int(parts[1].strip()) + changed = removed > 0 + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Depclean completed.', + ) + + +def run_emerge(module, packages, *args): + args = list(args) + + args.append('--ask=n') + if module.check_mode: + args.append('--pretend') + + cmd = [module.emerge_path] + args + packages + return cmd, module.run_command(cmd) + + +portage_present_states = ['present', 'emerged', 'installed', 'latest'] +portage_absent_states = ['absent', 'unmerged', 'removed'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + package=dict(type='list', elements='str', default=None, aliases=['name']), + state=dict( + default=portage_present_states[0], + choices=portage_present_states + portage_absent_states, + ), + update=dict(default=False, type='bool'), + deep=dict(default=False, type='bool'), + newuse=dict(default=False, type='bool'), + changed_use=dict(default=False, type='bool'), + oneshot=dict(default=False, type='bool'), + noreplace=dict(default=True, type='bool'), + nodeps=dict(default=False, type='bool'), + onlydeps=dict(default=False, type='bool'), + depclean=dict(default=False, type='bool'), + quiet=dict(default=False, type='bool'), + verbose=dict(default=False, type='bool'), + sync=dict(default=None, choices=['yes', 'web', 'no']), + getbinpkg=dict(default=False, type='bool'), + usepkgonly=dict(default=False, type='bool'), + usepkg=dict(default=False, type='bool'), + keepgoing=dict(default=False, type='bool'), + jobs=dict(default=None, type='int'), + loadavg=dict(default=None, type='float'), + quietbuild=dict(default=False, type='bool'), + quietfail=dict(default=False, type='bool'), + ), + required_one_of=[['package', 'sync', 'depclean']], + mutually_exclusive=[ + ['nodeps', 'onlydeps'], + ['quiet', 'verbose'], + ['quietbuild', 'verbose'], + ['quietfail', 'verbose'], + ], + supports_check_mode=True, + ) + + module.emerge_path = module.get_bin_path('emerge', required=True) + module.equery_path = module.get_bin_path('equery', required=True) + + p = module.params + + if p['sync'] and p['sync'].strip() != 'no': + sync_repositories(module, webrsync=(p['sync'] == 'web')) + if not p['package']: + module.exit_json(msg='Sync successfully finished.') + + packages = [] + if p['package']: + packages.extend(p['package']) + + if p['depclean']: + if packages and p['state'] not in portage_absent_states: + module.fail_json( + msg='Depclean can only be used with package when the state is ' + 'one of: %s' % portage_absent_states, + ) + + cleanup_packages(module, packages) + + elif p['state'] in portage_present_states: + emerge_packages(module, packages) + + elif p['state'] in portage_absent_states: + unmerge_packages(module, packages) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/portinstall.py b/plugins/modules/packaging/os/portinstall.py new file mode 100644 index 0000000000..a4908fe0d2 --- /dev/null +++ b/plugins/modules/packaging/os/portinstall.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, berenddeboer +# Written by berenddeboer +# Based on pkgng module written by bleader +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: portinstall +short_description: Installing packages from FreeBSD's ports system +description: + - Manage packages for FreeBSD using 'portinstall'. +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'present', 'absent' ] + required: false + default: present + use_packages: + description: + - use packages instead of ports whenever available + type: bool + required: false + default: yes +author: "berenddeboer (@berenddeboer)" +''' + +EXAMPLES = ''' +# Install package foo +- portinstall: + name: foo + state: present + +# Install package security/cyrus-sasl2-saslauthd +- portinstall: + name: security/cyrus-sasl2-saslauthd + state: present + +# Remove packages foo and bar +- portinstall: + name: foo,bar + state: absent +''' + +import os +import re +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def query_package(module, name): + + pkg_info_path = module.get_bin_path('pkg_info', False) + + # Assume that if we have pkg_info, we haven't upgraded to pkgng + if pkg_info_path: + pkgng = False + pkg_glob_path = module.get_bin_path('pkg_glob', True) + rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True) + else: + pkgng = True + pkg_info_path = module.get_bin_path('pkg', True) + pkg_info_path = pkg_info_path + " info" + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) + + found = rc == 0 + + if not found: + # databases/mysql55-client installs as mysql-client, so try solving + # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking + # some package is installed + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + if pkgng: + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + else: + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + + found = rc == 0 + + return found + + +def matching_packages(module, name): + + ports_glob_path = module.get_bin_path('ports_glob', True) + rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) + # counts the number of packages found + occurrences = out.count('\n') + if occurrences == 0: + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) + occurrences = out.count('\n') + return occurrences + + +def remove_packages(module, packages): + + remove_c = 0 + pkg_glob_path = module.get_bin_path('pkg_glob', True) + + # If pkg_delete not found, we assume pkgng + pkg_delete_path = module.get_bin_path('pkg_delete', False) + if not pkg_delete_path: + pkg_delete_path = module.get_bin_path('pkg', True) + pkg_delete_path = pkg_delete_path + " delete -y" + + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True) + + if query_package(module, package): + name_without_digits = re.sub('[0-9]', '', package) + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, + shlex_quote(name_without_digits)), + use_unsafe_shell=True) + if query_package(module, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages, use_packages): + + install_c = 0 + + # If portinstall not found, automagically install + portinstall_path = module.get_bin_path('portinstall', False) + if not portinstall_path: + pkg_path = module.get_bin_path('pkg', False) + if pkg_path: + module.run_command("pkg install -y portupgrade") + portinstall_path = module.get_bin_path('portinstall', True) + + if use_packages: + portinstall_params = "--use-packages" + else: + portinstall_params = "" + + for package in packages: + if query_package(module, package): + continue + + # TODO: check how many match + matches = matching_packages(module, package) + if matches == 1: + rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) + if not query_package(module, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + elif matches == 0: + module.fail_json(msg="no matches for package %s" % (package)) + else: + module.fail_json(msg="%s matches found for package name %s" % (matches, package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"]), + name=dict(aliases=["pkg"], required=True), + use_packages=dict(type='bool', default='yes'))) + + p = module.params + + pkgs = p["name"].split(",") + + if p["state"] == "present": + install_packages(module, pkgs, p["use_packages"]) + + elif p["state"] == "absent": + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/pulp_repo.py b/plugins/modules/packaging/os/pulp_repo.py new file mode 100644 index 0000000000..1f5421438b --- /dev/null +++ b/plugins/modules/packaging/os/pulp_repo.py @@ -0,0 +1,743 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Joe Adams <@sysadmind> +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pulp_repo +author: "Joe Adams (@sysadmind)" +short_description: Add or remove Pulp repos from a remote host. +description: + - Add or remove Pulp repos from a remote host. +options: + add_export_distributor: + description: + - Whether or not to add the export distributor to new C(rpm) repositories. + type: bool + default: 'no' + feed: + description: + - Upstream feed URL to receive updates from. + force_basic_auth: + description: + - httplib2, the library used by the M(uri) module only sends + authentication information when a webservice responds to an initial + request with a 401 status. Since some basic auth services do not + properly send a 401, logins will fail. This option forces the sending of + the Basic authentication header upon initial request. + type: bool + default: 'no' + generate_sqlite: + description: + - Boolean flag to indicate whether sqlite files should be generated during + a repository publish. + required: false + type: bool + default: 'no' + feed_ca_cert: + description: + - CA certificate string used to validate the feed source SSL certificate. + This can be the file content or the path to the file. + The ca_cert alias will be removed in Ansible 2.14. + type: str + aliases: [ importer_ssl_ca_cert, ca_cert ] + feed_client_cert: + description: + - Certificate used as the client certificate when synchronizing the + repository. This is used to communicate authentication information to + the feed source. The value to this option must be the full path to the + certificate. The specified file may be the certificate itself or a + single file containing both the certificate and private key. This can be + the file content or the path to the file. + - If not specified the default value will come from client_cert. Which will + change in Ansible 2.14. + type: str + aliases: [ importer_ssl_client_cert ] + feed_client_key: + description: + - Private key to the certificate specified in I(importer_ssl_client_cert), + assuming it is not included in the certificate file itself. This can be + the file content or the path to the file. + - If not specified the default value will come from client_key. Which will + change in Ansible 2.14. + type: str + aliases: [ importer_ssl_client_key ] + name: + description: + - Name of the repo to add or remove. This correlates to repo-id in Pulp. + required: true + proxy_host: + description: + - Proxy url setting for the pulp repository importer. This is in the + format scheme://host. + required: false + default: null + proxy_port: + description: + - Proxy port setting for the pulp repository importer. + required: false + default: null + proxy_username: + description: + - Proxy username for the pulp repository importer. + required: false + default: null + proxy_password: + description: + - Proxy password for the pulp repository importer. + required: false + default: null + publish_distributor: + description: + - Distributor to use when state is C(publish). The default is to + publish all distributors. + pulp_host: + description: + - URL of the pulp server to connect to. + default: http://127.0.0.1 + relative_url: + description: + - Relative URL for the local repository. + required: true + repo_type: + description: + - Repo plugin type to use (i.e. C(rpm), C(docker)). + default: rpm + repoview: + description: + - Whether to generate repoview files for a published repository. Setting + this to "yes" automatically activates `generate_sqlite`. + required: false + type: bool + default: 'no' + serve_http: + description: + - Make the repo available over HTTP. + type: bool + default: 'no' + serve_https: + description: + - Make the repo available over HTTPS. + type: bool + default: 'yes' + state: + description: + - The repo state. A state of C(sync) will queue a sync of the repo. + This is asynchronous but not delayed like a scheduled sync. A state of + C(publish) will use the repository's distributor to publish the content. + default: present + choices: [ "present", "absent", "sync", "publish" ] + url_password: + description: + - The password for use in HTTP basic authentication to the pulp API. + If the I(url_username) parameter is not specified, the I(url_password) + parameter will not be used. + url_username: + description: + - The username for use in HTTP basic authentication to the pulp API. + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be + used on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + wait_for_completion: + description: + - Wait for asynchronous tasks to complete before returning. + type: bool + default: 'no' +notes: + - This module can currently only create distributors and importers on rpm + repositories. Contributions to support other repo types are welcome. +extends_documentation_fragment: + - url +''' + +EXAMPLES = ''' +- name: Create a new repo with name 'my_repo' + pulp_repo: + name: my_repo + relative_url: my/repo + state: present + +- name: Create a repo with a feed and a relative URL + pulp_repo: + name: my_centos_updates + repo_type: rpm + feed: http://mirror.centos.org/centos/6/updates/x86_64/ + relative_url: centos/6/updates + url_username: admin + url_password: admin + force_basic_auth: yes + state: present + +- name: Remove a repo from the pulp server + pulp_repo: + name: my_old_repo + repo_type: rpm + state: absent +''' + +RETURN = ''' +repo: + description: Name of the repo that the action was performed on. + returned: success + type: str + sample: my_repo +''' + +import json +import os +from time import sleep + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.urls import url_argument_spec + + +class pulp_server(object): + """ + Class to interact with a Pulp server + """ + + def __init__(self, module, pulp_host, repo_type, wait_for_completion=False): + self.module = module + self.host = pulp_host + self.repo_type = repo_type + self.repo_cache = dict() + self.wait_for_completion = wait_for_completion + + def check_repo_exists(self, repo_id): + try: + self.get_repo_config_by_id(repo_id) + except IndexError: + return False + else: + return True + + def compare_repo_distributor_config(self, repo_id, **kwargs): + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + for key, value in kwargs.items(): + if key not in distributor['config'].keys(): + return False + + if not distributor['config'][key] == value: + return False + + return True + + def compare_repo_importer_config(self, repo_id, **kwargs): + repo_config = self.get_repo_config_by_id(repo_id) + + for importer in repo_config['importers']: + for key, value in kwargs.items(): + if value is not None: + if key not in importer['config'].keys(): + return False + + if not importer['config'][key] == value: + return False + + return True + + def create_repo( + self, + repo_id, + relative_url, + feed=None, + generate_sqlite=False, + serve_http=False, + serve_https=True, + proxy_host=None, + proxy_port=None, + proxy_username=None, + proxy_password=None, + repoview=False, + ssl_ca_cert=None, + ssl_client_cert=None, + ssl_client_key=None, + add_export_distributor=False + ): + url = "%s/pulp/api/v2/repositories/" % self.host + data = dict() + data['id'] = repo_id + data['distributors'] = [] + + if self.repo_type == 'rpm': + yum_distributor = dict() + yum_distributor['distributor_id'] = "yum_distributor" + yum_distributor['distributor_type_id'] = "yum_distributor" + yum_distributor['auto_publish'] = True + yum_distributor['distributor_config'] = dict() + yum_distributor['distributor_config']['http'] = serve_http + yum_distributor['distributor_config']['https'] = serve_https + yum_distributor['distributor_config']['relative_url'] = relative_url + yum_distributor['distributor_config']['repoview'] = repoview + yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview + data['distributors'].append(yum_distributor) + + if add_export_distributor: + export_distributor = dict() + export_distributor['distributor_id'] = "export_distributor" + export_distributor['distributor_type_id'] = "export_distributor" + export_distributor['auto_publish'] = False + export_distributor['distributor_config'] = dict() + export_distributor['distributor_config']['http'] = serve_http + export_distributor['distributor_config']['https'] = serve_https + export_distributor['distributor_config']['relative_url'] = relative_url + export_distributor['distributor_config']['repoview'] = repoview + export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview + data['distributors'].append(export_distributor) + + data['importer_type_id'] = "yum_importer" + data['importer_config'] = dict() + + if feed: + data['importer_config']['feed'] = feed + + if proxy_host: + data['importer_config']['proxy_host'] = proxy_host + + if proxy_port: + data['importer_config']['proxy_port'] = proxy_port + + if proxy_username: + data['importer_config']['proxy_username'] = proxy_username + + if proxy_password: + data['importer_config']['proxy_password'] = proxy_password + + if ssl_ca_cert: + data['importer_config']['ssl_ca_cert'] = ssl_ca_cert + + if ssl_client_cert: + data['importer_config']['ssl_client_cert'] = ssl_client_cert + + if ssl_client_key: + data['importer_config']['ssl_client_key'] = ssl_client_key + + data['notes'] = { + "_repo-type": "rpm-repo" + } + + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 201: + self.module.fail_json( + msg="Failed to create repo.", + status_code=info['status'], + response=info['msg'], + url=url) + else: + return True + + def delete_repo(self, repo_id): + url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id) + response, info = fetch_url(self.module, url, data='', method='DELETE') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to delete repo.", + status_code=info['status'], + response=info['msg'], + url=url) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def get_repo_config_by_id(self, repo_id): + if repo_id not in self.repo_cache.keys(): + repo_array = [x for x in self.repo_list if x['id'] == repo_id] + self.repo_cache[repo_id] = repo_array[0] + + return self.repo_cache[repo_id] + + def publish_repo(self, repo_id, publish_distributor): + url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id) + + # If there's no distributor specified, we will publish them all + if publish_distributor is None: + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + data = dict() + data['id'] = distributor['id'] + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to publish the repo.", + status_code=info['status'], + response=info['msg'], + url=url, + distributor=distributor['id']) + else: + data = dict() + data['id'] = publish_distributor + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to publish the repo", + status_code=info['status'], + response=info['msg'], + url=url, + distributor=publish_distributor) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def sync_repo(self, repo_id): + url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id) + response, info = fetch_url(self.module, url, data='', method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to schedule a sync of the repo.", + status_code=info['status'], + response=info['msg'], + url=url) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def update_repo_distributor_config(self, repo_id, **kwargs): + url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id) + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + distributor_url = "%s%s/" % (url, distributor['id']) + data = dict() + data['distributor_config'] = dict() + + for key, value in kwargs.items(): + data['distributor_config'][key] = value + + response, info = fetch_url( + self.module, + distributor_url, + data=json.dumps(data), + method='PUT') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to set the relative url for the repository.", + status_code=info['status'], + response=info['msg'], + url=url) + + def update_repo_importer_config(self, repo_id, **kwargs): + url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id) + data = dict() + importer_config = dict() + + for key, value in kwargs.items(): + if value is not None: + importer_config[key] = value + + data['importer_config'] = importer_config + + if self.repo_type == 'rpm': + data['importer_type_id'] = "yum_importer" + + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to set the repo importer configuration", + status_code=info['status'], + response=info['msg'], + importer_config=importer_config, + url=url) + + def set_repo_list(self): + url = "%s/pulp/api/v2/repositories/?details=true" % self.host + response, info = fetch_url(self.module, url, method='GET') + + if info['status'] != 200: + self.module.fail_json( + msg="Request failed", + status_code=info['status'], + response=info['msg'], + url=url) + + self.repo_list = json.load(response) + + def verify_tasks_completed(self, response_dict): + for task in response_dict['spawned_tasks']: + task_url = "%s%s" % (self.host, task['_href']) + + while True: + response, info = fetch_url( + self.module, + task_url, + data='', + method='GET') + + if info['status'] != 200: + self.module.fail_json( + msg="Failed to check async task status.", + status_code=info['status'], + response=info['msg'], + url=task_url) + + task_dict = json.load(response) + + if task_dict['state'] == 'finished': + return True + + if task_dict['state'] == 'error': + self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error']) + + sleep(2) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + add_export_distributor=dict(default=False, type='bool'), + feed=dict(), + generate_sqlite=dict(default=False, type='bool'), + feed_ca_cert=dict(aliases=['importer_ssl_ca_cert', 'ca_cert'], deprecated_aliases=[dict(name='ca_cert', version='2.14')]), + feed_client_cert=dict(aliases=['importer_ssl_client_cert']), + feed_client_key=dict(aliases=['importer_ssl_client_key']), + name=dict(required=True, aliases=['repo']), + proxy_host=dict(), + proxy_port=dict(), + proxy_username=dict(), + proxy_password=dict(no_log=True), + publish_distributor=dict(), + pulp_host=dict(default="https://127.0.0.1"), + relative_url=dict(), + repo_type=dict(default="rpm"), + repoview=dict(default=False, type='bool'), + serve_http=dict(default=False, type='bool'), + serve_https=dict(default=True, type='bool'), + state=dict( + default="present", + choices=['absent', 'present', 'sync', 'publish']), + wait_for_completion=dict(default=False, type="bool")) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + add_export_distributor = module.params['add_export_distributor'] + feed = module.params['feed'] + generate_sqlite = module.params['generate_sqlite'] + importer_ssl_ca_cert = module.params['feed_ca_cert'] + importer_ssl_client_cert = module.params['feed_client_cert'] + if importer_ssl_client_cert is None and module.params['client_cert'] is not None: + importer_ssl_client_cert = module.params['client_cert'] + module.deprecate("To specify client certificates to be used with the repo to sync, and not for communication with the " + "Pulp instance, use the new options `feed_client_cert` and `feed_client_key` (available since " + "Ansible 2.9.2). Until Ansible 2.14, the default value for `feed_client_cert` will be taken from " + "`client_cert` if only the latter is specified", version="2.14") + importer_ssl_client_key = module.params['feed_client_key'] + if importer_ssl_client_key is None and module.params['client_key'] is not None: + importer_ssl_client_key = module.params['client_key'] + module.deprecate("In Ansible 2.9.2 `feed_client_key` option was added. Until 2.14 the default value will come from client_key option", version="2.14") + proxy_host = module.params['proxy_host'] + proxy_port = module.params['proxy_port'] + proxy_username = module.params['proxy_username'] + proxy_password = module.params['proxy_password'] + publish_distributor = module.params['publish_distributor'] + pulp_host = module.params['pulp_host'] + relative_url = module.params['relative_url'] + repo = module.params['name'] + repo_type = module.params['repo_type'] + repoview = module.params['repoview'] + serve_http = module.params['serve_http'] + serve_https = module.params['serve_https'] + state = module.params['state'] + wait_for_completion = module.params['wait_for_completion'] + + if (state == 'present') and (not relative_url): + module.fail_json(msg="When state is present, relative_url is required.") + + # Ensure that the importer_ssl_* is the content and not a file path + if importer_ssl_ca_cert is not None: + importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert) + if os.path.isfile(importer_ssl_ca_cert_file_path): + importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r') + try: + importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read() + finally: + importer_ssl_ca_cert_file_object.close() + + if importer_ssl_client_cert is not None: + importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert) + if os.path.isfile(importer_ssl_client_cert_file_path): + importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r') + try: + importer_ssl_client_cert = importer_ssl_client_cert_file_object.read() + finally: + importer_ssl_client_cert_file_object.close() + + if importer_ssl_client_key is not None: + importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key) + if os.path.isfile(importer_ssl_client_key_file_path): + importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r') + try: + importer_ssl_client_key = importer_ssl_client_key_file_object.read() + finally: + importer_ssl_client_key_file_object.close() + + server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion) + server.set_repo_list() + repo_exists = server.check_repo_exists(repo) + + changed = False + + if state == 'absent' and repo_exists: + if not module.check_mode: + server.delete_repo(repo) + + changed = True + + if state == 'sync': + if not repo_exists: + module.fail_json(msg="Repository was not found. The repository can not be synced.") + + if not module.check_mode: + server.sync_repo(repo) + + changed = True + + if state == 'publish': + if not repo_exists: + module.fail_json(msg="Repository was not found. The repository can not be published.") + + if not module.check_mode: + server.publish_repo(repo, publish_distributor) + + changed = True + + if state == 'present': + if not repo_exists: + if not module.check_mode: + server.create_repo( + repo_id=repo, + relative_url=relative_url, + feed=feed, + generate_sqlite=generate_sqlite, + serve_http=serve_http, + serve_https=serve_https, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + repoview=repoview, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key, + add_export_distributor=add_export_distributor) + + changed = True + + else: + # Check to make sure all the settings are correct + # The importer config gets overwritten on set and not updated, so + # we set the whole config at the same time. + if not server.compare_repo_importer_config( + repo, + feed=feed, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key + ): + if not module.check_mode: + server.update_repo_importer_config( + repo, + feed=feed, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key) + + changed = True + + if relative_url is not None: + if not server.compare_repo_distributor_config( + repo, + relative_url=relative_url + ): + if not module.check_mode: + server.update_repo_distributor_config( + repo, + relative_url=relative_url) + + changed = True + + if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite): + if not module.check_mode: + server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite) + + changed = True + + if not server.compare_repo_distributor_config(repo, repoview=repoview): + if not module.check_mode: + server.update_repo_distributor_config(repo, repoview=repoview) + + changed = True + + if not server.compare_repo_distributor_config(repo, http=serve_http): + if not module.check_mode: + server.update_repo_distributor_config(repo, http=serve_http) + + changed = True + + if not server.compare_repo_distributor_config(repo, https=serve_https): + if not module.check_mode: + server.update_repo_distributor_config(repo, https=serve_https) + + changed = True + + module.exit_json(changed=changed, repo=repo) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py new file mode 100644 index 0000000000..0f5aea185a --- /dev/null +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -0,0 +1,914 @@ +#!/usr/bin/python + +# James Laska (jlaska@redhat.com) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: redhat_subscription +short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command +description: + - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command +author: "Barnaby Court (@barnabycourt)" +notes: + - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID. + - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl), + I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and + I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) + config file and default to None. +requirements: + - subscription-manager +options: + state: + description: + - whether to register and subscribe (C(present)), or unregister (C(absent)) a system + choices: [ "present", "absent" ] + default: "present" + username: + description: + - access.redhat.com or Sat6 username + password: + description: + - access.redhat.com or Sat6 password + server_hostname: + description: + - Specify an alternative Red Hat Subscription Management or Sat6 server + server_insecure: + description: + - Enable or disable https server certificate verification when connecting to C(server_hostname) + rhsm_baseurl: + description: + - Specify CDN baseurl + rhsm_repo_ca_cert: + description: + - Specify an alternative location for a CA certificate for CDN + server_proxy_hostname: + description: + - Specify a HTTP proxy hostname + server_proxy_port: + description: + - Specify a HTTP proxy port + server_proxy_user: + description: + - Specify a user for HTTP proxy with basic authentication + server_proxy_password: + description: + - Specify a password for HTTP proxy with basic authentication + auto_attach: + description: + - Upon successful registration, auto-consume available subscriptions + - Added in favor of deprecated autosubscribe in 2.5. + type: bool + default: 'no' + aliases: [autosubscribe] + activationkey: + description: + - supply an activation key for use with registration + org_id: + description: + - Organization ID to use in conjunction with activationkey + environment: + description: + - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello + pool: + description: + - | + Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if + possible, as it is much faster. Mutually exclusive with I(pool_ids). + default: '^$' + pool_ids: + description: + - | + Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster. + A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)), + or as a C(dict) with the pool ID as the key, and a quantity as the value (ex. + C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple + entitlements from a pool (the pool must support this). Mutually exclusive with I(pool). + default: [] + consumer_type: + description: + - The type of unit to register, defaults to system + consumer_name: + description: + - Name of the system to register, defaults to the hostname + consumer_id: + description: + - | + References an existing consumer ID to resume using a previous registration + for this system. If the system's identity certificate is lost or corrupted, + this option allows it to resume using its previous identity and subscriptions. + The default is to not specify a consumer ID so a new ID is created. + force_register: + description: + - Register the system even if it is already registered + type: bool + default: 'no' + release: + description: + - Set a release version + syspurpose: + description: + - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) + and synchronize these attributes with RHSM server. Syspurpose attributes help attach + the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file + already contains some attributes, then new attributes overwrite existing attributes. + When some attribute is not listed in the new list of attributes, the existing + attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored. + type: dict + default: {} + suboptions: + usage: + description: Syspurpose attribute usage + role: + description: Syspurpose attribute role + service_level_agreement: + description: Syspurpose attribute service_level_agreement + addons: + description: Syspurpose attribute addons + type: list + sync: + description: + - When this option is true, then syspurpose attributes are synchronized with + RHSM server immediately. When this option is false, then syspurpose attributes + will be synchronized with RHSM server by rhsmcertd daemon. + type: bool + default: False +''' + +EXAMPLES = ''' +- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. + redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + +- name: Same as above but subscribe to a specific pool by ID. + redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: 0123456789abcdef0123456789abcdef + +- name: Register and subscribe to multiple pools. + redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef + - 1123456789abcdef0123456789abcdef + +- name: Same as above but consume multiple entitlements. + redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef: 2 + - 1123456789abcdef0123456789abcdef: 4 + +- name: Register and pull existing system data. + redhat_subscription: + state: present + username: joe_user + password: somepass + consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization + redhat_subscription: + state: present + activationkey: 1-222333444 + org_id: 222333444 + pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' + +- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) + redhat_subscription: + state: present + activationkey: 1-222333444 + org_id: 222333444 + pool: '^Red Hat Enterprise Server$' + +- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe. + redhat_subscription: + state: present + username: joe_user + password: somepass + environment: Library + auto_attach: true + +- name: Register as user (joe_user) with password (somepass) and a specific release + redhat_subscription: + state: present + username: joe_user + password: somepass + release: 7.4 + +- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server + redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + syspurpose: + usage: "Production" + role: "Red Hat Enterprise Server" + service_level_agreement: "Premium" + addons: + - addon1 + - addon2 + sync: true +''' + +RETURN = ''' +subscribed_pool_ids: + description: List of pool IDs to which system is now subscribed + returned: success + type: complex + sample: { + "8a85f9815ab905d3015ab928c7005de4": "1" + } +''' + +from os.path import isfile +from os import unlink +import re +import shutil +import tempfile +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.six.moves import configparser + + +SUBMAN_CMD = None + + +class RegistrationBase(object): + + REDHAT_REPO = "/etc/yum.repos.d/redhat.repo" + + def __init__(self, module, username=None, password=None): + self.module = module + self.username = username + self.password = password + + def configure(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def enable(self): + # Remove any existing redhat.repo + if isfile(self.REDHAT_REPO): + unlink(self.REDHAT_REPO) + + def register(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unregister(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unsubscribe(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def update_plugin_conf(self, plugin, enabled=True): + plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin + + if isfile(plugin_conf): + tmpfd, tmpfile = tempfile.mkstemp() + shutil.copy2(plugin_conf, tmpfile) + cfg = configparser.ConfigParser() + cfg.read([tmpfile]) + + if enabled: + cfg.set('main', 'enabled', '1') + else: + cfg.set('main', 'enabled', '0') + + fd = open(tmpfile, 'w+') + cfg.write(fd) + fd.close() + self.module.atomic_move(tmpfile, plugin_conf) + + def subscribe(self, **kwargs): + raise NotImplementedError("Must be implemented by a sub-class") + + +class Rhsm(RegistrationBase): + def __init__(self, module, username=None, password=None): + RegistrationBase.__init__(self, module, username, password) + self.module = module + + def enable(self): + ''' + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + ''' + RegistrationBase.enable(self) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', True) + + def configure(self, **kwargs): + ''' + Configure the system as directed for registration with RHSM + Raises: + * Exception - if error occurs while running command + ''' + + args = [SUBMAN_CMD, 'config'] + + # Pass supplied **kwargs as parameters to subscription-manager. Ignore + # non-configuration parameters and replace '_' with '.'. For example, + # 'server_hostname' becomes '--server.hostname'. + options = [] + for k, v in sorted(kwargs.items()): + if re.search(r'^(server|rhsm)_', k) and v is not None: + options.append('--%s=%s' % (k.replace('_', '.', 1), v)) + + # When there is nothing to configure, then it is not necessary + # to run config command, because it only returns current + # content of current configuration file + if len(options) == 0: + return + + args.extend(options) + + self.module.run_command(args, check_rc=True) + + @property + def is_registered(self): + ''' + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHSM. + ''' + + args = [SUBMAN_CMD, 'identity'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: + return True + else: + return False + + def register(self, username, password, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, environment, + rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname, + server_proxy_port, server_proxy_user, server_proxy_password, release): + ''' + Register the current system to the provided RHSM or Sat6 server + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'register'] + + # Generate command arguments + if force_register: + args.extend(['--force']) + + if rhsm_baseurl: + args.extend(['--baseurl', rhsm_baseurl]) + + if server_insecure: + args.extend(['--insecure']) + + if server_hostname: + args.extend(['--serverurl', server_hostname]) + + if org_id: + args.extend(['--org', org_id]) + + if server_proxy_hostname and server_proxy_port: + args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port]) + + if server_proxy_user: + args.extend(['--proxyuser', server_proxy_user]) + + if server_proxy_password: + args.extend(['--proxypassword', server_proxy_password]) + + if activationkey: + args.extend(['--activationkey', activationkey]) + else: + if auto_attach: + args.append('--auto-attach') + if username: + args.extend(['--username', username]) + if password: + args.extend(['--password', password]) + if consumer_type: + args.extend(['--type', consumer_type]) + if consumer_name: + args.extend(['--name', consumer_name]) + if consumer_id: + args.extend(['--consumerid', consumer_id]) + if environment: + args.extend(['--environment', environment]) + + if release: + args.extend(['--release', release]) + + rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + def unsubscribe(self, serials=None): + ''' + Unsubscribe a system from subscribed channels + Args: + serials(list or None): list of serials to unsubscribe. If + serials is none or an empty list, then + all subscribed channels will be removed. + Raises: + * Exception - if error occurs while running command + ''' + items = [] + if serials is not None and serials: + items = ["--serial=%s" % s for s in serials] + if serials is None: + items = ["--all"] + + if items: + args = [SUBMAN_CMD, 'unsubscribe'] + items + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + return serials + + def unregister(self): + ''' + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'unregister'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', False) + + def subscribe(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression. It matches regexp against available pool ids first. + If any pool ids match, subscribe to those pools and return. + + If no pool ids match, then match regexp against available pool product + names. Note this can still easily match many many pools. Then subscribe + to those pools. + + Since a pool id is a more specific match, we only fallback to matching + against names if we didn't match pool ids. + + Raises: + * Exception - if error occurs while running command + ''' + # See https://github.com/ansible/ansible/issues/19466 + + # subscribe to pools whose pool id matches regexp (and only the pool id) + subscribed_pool_ids = self.subscribe_pool(regexp) + + # If we found any matches, we are done + # Don't attempt to match pools by product name + if subscribed_pool_ids: + return subscribed_pool_ids + + # We didn't match any pool ids. + # Now try subscribing to pools based on product name match + # Note: This can match lots of product names. + subscribed_by_product_pool_ids = self.subscribe_product(regexp) + if subscribed_by_product_pool_ids: + return subscribed_by_product_pool_ids + + # no matches + return [] + + def subscribe_by_pool_ids(self, pool_ids): + """ + Try to subscribe to the list of pool IDs + """ + available_pools = RhsmPools(self.module) + + available_pool_ids = [p.get_pool_id() for p in available_pools] + + for pool_id, quantity in sorted(pool_ids.items()): + if pool_id in available_pool_ids: + args = [SUBMAN_CMD, 'attach', '--pool', pool_id] + if quantity is not None: + args.extend(['--quantity', to_native(quantity)]) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + else: + self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id) + return pool_ids + + def subscribe_pool(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression + Raises: + * Exception - if error occurs while running command + ''' + + # Available pools ready for subscription + available_pools = RhsmPools(self.module) + + subscribed_pool_ids = [] + for pool in available_pools.filter_pools(regexp): + pool.subscribe() + subscribed_pool_ids.append(pool.get_pool_id()) + return subscribed_pool_ids + + def subscribe_product(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression + Raises: + * Exception - if error occurs while running command + ''' + + # Available pools ready for subscription + available_pools = RhsmPools(self.module) + + subscribed_pool_ids = [] + for pool in available_pools.filter_products(regexp): + pool.subscribe() + subscribed_pool_ids.append(pool.get_pool_id()) + return subscribed_pool_ids + + def update_subscriptions(self, regexp): + changed = False + consumed_pools = RhsmPools(self.module, consumed=True) + pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)] + pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)]) + + serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] + serials = self.unsubscribe(serials=serials_to_remove) + + subscribed_pool_ids = self.subscribe(regexp) + + if subscribed_pool_ids or serials: + changed = True + return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, + 'unsubscribed_serials': serials} + + def update_subscriptions_by_pool_ids(self, pool_ids): + changed = False + consumed_pools = RhsmPools(self.module, consumed=True) + + existing_pools = {} + for p in consumed_pools: + existing_pools[p.get_pool_id()] = p.QuantityUsed + + serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed] + serials = self.unsubscribe(serials=serials_to_remove) + + missing_pools = {} + for pool_id, quantity in sorted(pool_ids.items()): + if existing_pools.get(pool_id, 0) != quantity: + missing_pools[pool_id] = quantity + + self.subscribe_by_pool_ids(missing_pools) + + if missing_pools or serials: + changed = True + return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(), + 'unsubscribed_serials': serials} + + def sync_syspurpose(self): + """ + Try to synchronize syspurpose attributes with server + """ + args = [SUBMAN_CMD, 'status'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + + +class RhsmPool(object): + ''' + Convenience class for housing subscription information + ''' + + def __init__(self, module, **kwargs): + self.module = module + for k, v in kwargs.items(): + setattr(self, k, v) + + def __str__(self): + return str(self.__getattribute__('_name')) + + def get_pool_id(self): + return getattr(self, 'PoolId', getattr(self, 'PoolID')) + + def subscribe(self): + args = "subscription-manager attach --pool %s" % self.get_pool_id() + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False + + +class RhsmPools(object): + """ + This class is used for manipulating pools subscriptions with RHSM + """ + + def __init__(self, module, consumed=False): + self.module = module + self.products = self._load_product_list(consumed) + + def __iter__(self): + return self.products.__iter__() + + def _load_product_list(self, consumed=False): + """ + Loads list of all available or consumed pools for system in data structure + + Args: + consumed(bool): if True list consumed pools, else list available pools (default False) + """ + args = "subscription-manager list" + if consumed: + args += " --consumed" + else: + args += " --available" + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env) + + products = [] + for line in stdout.split('\n'): + # Remove leading+trailing whitespace + line = line.strip() + # An empty line implies the end of a output group + if len(line) == 0: + continue + # If a colon ':' is found, parse + elif ':' in line: + (key, value) = line.split(':', 1) + key = key.strip().replace(" ", "") # To unify + value = value.strip() + if key in ['ProductName', 'SubscriptionName']: + # Remember the name for later processing + products.append(RhsmPool(self.module, _name=value, key=value)) + elif products: + # Associate value with most recently recorded product + products[-1].__setattr__(key, value) + # FIXME - log some warning? + # else: + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + return products + + def filter_pools(self, regexp='^$'): + ''' + Return a list of RhsmPools whose pool id matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product.get_pool_id()): + yield product + + def filter_products(self, regexp='^$'): + ''' + Return a list of RhsmPools whose product name matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product._name): + yield product + + +class SysPurpose(object): + """ + This class is used for reading and writing to syspurpose.json file + """ + + SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json" + + ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons'] + + def __init__(self, path=None): + """ + Initialize class used for reading syspurpose json file + """ + self.path = path or self.SYSPURPOSE_FILE_PATH + + def update_syspurpose(self, new_syspurpose): + """ + Try to update current syspurpose with new attributes from new_syspurpose + """ + syspurpose = {} + syspurpose_changed = False + for key, value in new_syspurpose.items(): + if key in self.ALLOWED_ATTRIBUTES: + if value is not None: + syspurpose[key] = value + elif key == 'sync': + pass + else: + raise KeyError("Attribute: %s not in list of allowed attributes: %s" % + (key, self.ALLOWED_ATTRIBUTES)) + current_syspurpose = self._read_syspurpose() + if current_syspurpose != syspurpose: + syspurpose_changed = True + # Update current syspurpose with new values + current_syspurpose.update(syspurpose) + # When some key is not listed in new syspurpose, then delete it from current syspurpose + # and ignore custom attributes created by user (e.g. "foo": "bar") + for key in list(current_syspurpose): + if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose: + del current_syspurpose[key] + self._write_syspurpose(current_syspurpose) + return syspurpose_changed + + def _write_syspurpose(self, new_syspurpose): + """ + This function tries to update current new_syspurpose attributes to + json file. + """ + with open(self.path, "w") as fp: + fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True)) + + def _read_syspurpose(self): + """ + Read current syspurpuse from json file. + """ + current_syspurpose = {} + try: + with open(self.path, "r") as fp: + content = fp.read() + except IOError: + pass + else: + current_syspurpose = json.loads(content) + return current_syspurpose + + +def main(): + + # Load RHSM configuration from file + rhsm = Rhsm(None) + + # Note: the default values for parameters are: + # 'type': 'str', 'default': None, 'required': False + # So there is no need to repeat these values for each parameter. + module = AnsibleModule( + argument_spec={ + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'username': {}, + 'password': {'no_log': True}, + 'server_hostname': {}, + 'server_insecure': {}, + 'rhsm_baseurl': {}, + 'rhsm_repo_ca_cert': {}, + 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'}, + 'activationkey': {'no_log': True}, + 'org_id': {}, + 'environment': {}, + 'pool': {'default': '^$'}, + 'pool_ids': {'default': [], 'type': 'list'}, + 'consumer_type': {}, + 'consumer_name': {}, + 'consumer_id': {}, + 'force_register': {'default': False, 'type': 'bool'}, + 'server_proxy_hostname': {}, + 'server_proxy_port': {}, + 'server_proxy_user': {}, + 'server_proxy_password': {'no_log': True}, + 'release': {}, + 'syspurpose': { + 'type': 'dict', + 'options': { + 'role': {}, + 'usage': {}, + 'service_level_agreement': {}, + 'addons': {'type': 'list'}, + 'sync': {'type': 'bool', 'default': False} + } + } + }, + required_together=[['username', 'password'], + ['server_proxy_hostname', 'server_proxy_port'], + ['server_proxy_user', 'server_proxy_password']], + mutually_exclusive=[['activationkey', 'username'], + ['activationkey', 'consumer_id'], + ['activationkey', 'environment'], + ['activationkey', 'autosubscribe'], + ['force', 'consumer_id'], + ['pool', 'pool_ids']], + required_if=[['state', 'present', ['username', 'activationkey'], True]], + ) + + rhsm.module = module + state = module.params['state'] + username = module.params['username'] + password = module.params['password'] + server_hostname = module.params['server_hostname'] + server_insecure = module.params['server_insecure'] + rhsm_baseurl = module.params['rhsm_baseurl'] + rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] + auto_attach = module.params['auto_attach'] + activationkey = module.params['activationkey'] + org_id = module.params['org_id'] + if activationkey and not org_id: + module.fail_json(msg='org_id is required when using activationkey') + environment = module.params['environment'] + pool = module.params['pool'] + pool_ids = {} + for value in module.params['pool_ids']: + if isinstance(value, dict): + if len(value) != 1: + module.fail_json(msg='Unable to parse pool_ids option.') + pool_id, quantity = list(value.items())[0] + else: + pool_id, quantity = value, None + pool_ids[pool_id] = quantity + consumer_type = module.params["consumer_type"] + consumer_name = module.params["consumer_name"] + consumer_id = module.params["consumer_id"] + force_register = module.params["force_register"] + server_proxy_hostname = module.params['server_proxy_hostname'] + server_proxy_port = module.params['server_proxy_port'] + server_proxy_user = module.params['server_proxy_user'] + server_proxy_password = module.params['server_proxy_password'] + release = module.params['release'] + syspurpose = module.params['syspurpose'] + + global SUBMAN_CMD + SUBMAN_CMD = module.get_bin_path('subscription-manager', True) + + syspurpose_changed = False + if syspurpose is not None: + try: + syspurpose_changed = SysPurpose().update_syspurpose(syspurpose) + except Exception as err: + module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err)) + + # Ensure system is registered + if state == 'present': + + # Register system + if rhsm.is_registered and not force_register: + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + try: + rhsm.sync_syspurpose() + except Exception as e: + module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e)) + if pool != '^$' or pool_ids: + try: + if pool_ids: + result = rhsm.update_subscriptions_by_pool_ids(pool_ids) + else: + result = rhsm.update_subscriptions(pool) + except Exception as e: + module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(**result) + else: + if syspurpose_changed is True: + module.exit_json(changed=True, msg="Syspurpose attributes changed.") + else: + module.exit_json(changed=False, msg="System already registered.") + else: + try: + rhsm.enable() + rhsm.configure(**module.params) + rhsm.register(username, password, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, + environment, rhsm_baseurl, server_insecure, server_hostname, + server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release) + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + rhsm.sync_syspurpose() + if pool_ids: + subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) + elif pool != '^$': + subscribed_pool_ids = rhsm.subscribe(pool) + else: + subscribed_pool_ids = [] + except Exception as e: + module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(changed=True, + msg="System successfully registered to '%s'." % server_hostname, + subscribed_pool_ids=subscribed_pool_ids) + + # Ensure system is *not* registered + if state == 'absent': + if not rhsm.is_registered: + module.exit_json(changed=False, msg="System already unregistered.") + else: + try: + rhsm.unsubscribe() + rhsm.unregister() + except Exception as e: + module.fail_json(msg="Failed to unregister: %s" % to_native(e)) + else: + module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/rhn_channel.py b/plugins/modules/packaging/os/rhn_channel.py new file mode 100644 index 0000000000..de1cb9e30f --- /dev/null +++ b/plugins/modules/packaging/os/rhn_channel.py @@ -0,0 +1,162 @@ +#!/usr/bin/python + +# Copyright: (c) Vincent Van de Kussen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: rhn_channel +short_description: Adds or removes Red Hat software channels +description: + - Adds or removes Red Hat software channels. +author: +- Vincent Van der Kussen (@vincentvdk) +notes: + - This module fetches the system id from RHN. + - This module doesn't support I(check_mode). +options: + name: + description: + - Name of the software channel. + required: true + sysname: + description: + - Name of the system as it is known in RHN/Satellite. + required: true + state: + description: + - Whether the channel should be present or not, taking action if the state is different from what is stated. + default: present + url: + description: + - The full URL to the RHN/Satellite API. + required: true + user: + description: + - RHN/Satellite login. + required: true + password: + description: + - RHN/Satellite password. + required: true +''' + +EXAMPLES = ''' +- rhn_channel: + name: rhel-x86_64-server-v2vwin-6 + sysname: server01 + url: https://rhn.redhat.com/rpc/api + user: rhnuser + password: guessme + delegate_to: localhost +''' + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +def get_systemid(client, session, sysname): + systems = client.system.listUserSystems(session) + for system in systems: + if system.get('name') == sysname: + idres = system.get('id') + idd = int(idres) + return idd + + +def subscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.append(channelname) + return client.system.setChildChannels(session, sys_id, channels) + + +def unsubscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.remove(channelname) + return client.system.setChildChannels(session, sys_id, channels) + + +def base_channels(client, session, sys_id): + basechan = client.channel.software.listSystemChannels(session, sys_id) + try: + chans = [item['label'] for item in basechan] + except KeyError: + chans = [item['channel_label'] for item in basechan] + return chans + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + sysname=dict(type='str', required=True), + url=dict(type='str', required=True), + user=dict(type='str', required=True), + password=dict(type='str', required=True, aliases=['pwd'], no_log=True), + ) + ) + + state = module.params['state'] + channelname = module.params['name'] + systname = module.params['sysname'] + saturl = module.params['url'] + user = module.params['user'] + password = module.params['password'] + + # initialize connection + client = xmlrpc_client.ServerProxy(saturl) + try: + session = client.auth.login(user, password) + except Exception as e: + module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e)) + + if not session: + module.fail_json(msg="Failed to establish session with Satellite server.") + + # get systemid + try: + sys_id = get_systemid(client, session, systname) + except Exception as e: + module.fail_json(msg="Unable to get system id: %s " % to_text(e)) + + if not sys_id: + module.fail_json(msg="Failed to get system id.") + + # get channels for system + try: + chans = base_channels(client, session, sys_id) + except Exception as e: + module.fail_json(msg="Unable to get channel information: %s " % to_text(e)) + + try: + if state == 'present': + if channelname in chans: + module.exit_json(changed=False, msg="Channel %s already exists" % channelname) + else: + subscribe_channels(channelname, client, session, systname, sys_id) + module.exit_json(changed=True, msg="Channel %s added" % channelname) + + if state == 'absent': + if channelname not in chans: + module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname) + else: + unsubscribe_channels(channelname, client, session, systname, sys_id) + module.exit_json(changed=True, msg="Channel %s removed" % channelname) + except Exception as e: + module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e))) + finally: + client.auth.logout(session) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/rhn_register.py b/plugins/modules/packaging/os/rhn_register.py new file mode 100644 index 0000000000..b4bf167d2f --- /dev/null +++ b/plugins/modules/packaging/os/rhn_register.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) James Laska +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: rhn_register +short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command +description: + - Manage registration to the Red Hat Network. +author: +- James Laska (@jlaska) +notes: + - This is for older Red Hat products. You probably want the M(redhat_subscription) module instead. + - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey. +requirements: + - rhnreg_ks + - either libxml2 or lxml +options: + state: + description: + - Whether to register (C(present)), or unregister (C(absent)) a system. + type: str + choices: [ absent, present ] + default: present + username: + description: + - Red Hat Network username. + type: str + password: + description: + - Red Hat Network password. + type: str + server_url: + description: + - Specify an alternative Red Hat Network server URL. + - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date). + type: str + activationkey: + description: + - Supply an activation key for use with registration. + type: str + profilename: + description: + - Supply an profilename for use with registration. + type: str + ca_cert: + description: + - Supply a custom ssl CA certificate file for use with registration. + type: path + aliases: [ sslcacert ] + systemorgid: + description: + - Supply an organizational id for use with registration. + type: str + channels: + description: + - Optionally specify a list of channels to subscribe to upon successful registration. + type: list + default: [] + enable_eus: + description: + - If C(no), extended update support will be requested. + type: bool + default: no + nopackages: + description: + - If C(yes), the registered node will not upload its installed packages information to Satellite server. + type: bool + default: no +''' + +EXAMPLES = r''' +- name: Unregister system from RHN + rhn_register: + state: absent + username: joe_user + password: somepass + +- name: Register as user with password and auto-subscribe to available content + rhn_register: + state: present + username: joe_user + password: somepass + +- name: Register with activationkey and enable extended update support + rhn_register: + state: present + activationkey: 1-222333444 + enable_eus: yes + +- name: Register with activationkey and set a profilename which may differ from the hostname + rhn_register: + state: present + activationkey: 1-222333444 + profilename: host.example.com.custom + +- name: Register as user with password against a satellite server + rhn_register: + state: present + username: joe_user + password: somepass + server_url: https://xmlrpc.my.satellite/XMLRPC + +- name: Register as user with password and enable channels + rhn_register: + state: present + username: joe_user + password: somepass + channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 +''' + +RETURN = r''' +# Default return values +''' + +import os +import sys + +# Attempt to import rhn client tools +sys.path.insert(0, '/usr/share/rhn') +try: + import up2date_client + import up2date_client.config + HAS_UP2DATE_CLIENT = True +except ImportError: + HAS_UP2DATE_CLIENT = False + +# INSERT REDHAT SNIPPETS +from ansible_collections.community.general.plugins.module_utils import redhat +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import urllib, xmlrpc_client + + +class Rhn(redhat.RegistrationBase): + + def __init__(self, module=None, username=None, password=None): + redhat.RegistrationBase.__init__(self, module, username, password) + self.config = self.load_config() + self.server = None + self.session = None + + def logout(self): + if self.session is not None: + self.server.auth.logout(self.session) + + def load_config(self): + ''' + Read configuration from /etc/sysconfig/rhn/up2date + ''' + if not HAS_UP2DATE_CLIENT: + return None + + config = up2date_client.config.initUp2dateConfig() + + return config + + @property + def server_url(self): + return self.config['serverURL'] + + @property + def hostname(self): + ''' + Return the non-xmlrpc RHN hostname. This is a convenience method + used for displaying a more readable RHN hostname. + + Returns: str + ''' + url = urllib.parse.urlparse(self.server_url) + return url[1].replace('xmlrpc.', '') + + @property + def systemid(self): + systemid = None + xpath_str = "//member[name='system_id']/value/string" + + if os.path.isfile(self.config['systemIdPath']): + fd = open(self.config['systemIdPath'], 'r') + xml_data = fd.read() + fd.close() + + # Ugh, xml parsing time ... + # First, try parsing with libxml2 ... + if systemid is None: + try: + import libxml2 + doc = libxml2.parseDoc(xml_data) + ctxt = doc.xpathNewContext() + systemid = ctxt.xpathEval(xpath_str)[0].content + doc.freeDoc() + ctxt.xpathFreeContext() + except ImportError: + pass + + # m-kay, let's try with lxml now ... + if systemid is None: + try: + from lxml import etree + root = etree.fromstring(xml_data) + systemid = root.xpath(xpath_str)[0].text + except ImportError: + raise Exception('"libxml2" or "lxml" is required for this module.') + + # Strip the 'ID-' prefix + if systemid is not None and systemid.startswith('ID-'): + systemid = systemid[3:] + + return int(systemid) + + @property + def is_registered(self): + ''' + Determine whether the current system is registered. + + Returns: True|False + ''' + return os.path.isfile(self.config['systemIdPath']) + + def configure_server_url(self, server_url): + ''' + Configure server_url for registration + ''' + + self.config.set('serverURL', server_url) + self.config.save() + + def enable(self): + ''' + Prepare the system for RHN registration. This includes ... + * enabling the rhnplugin yum plugin + * disabling the subscription-manager yum plugin + ''' + redhat.RegistrationBase.enable(self) + self.update_plugin_conf('rhnplugin', True) + self.update_plugin_conf('subscription-manager', False) + + def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False): + ''' + Register system to RHN. If enable_eus=True, extended update + support will be requested. + ''' + register_cmd = ['/usr/sbin/rhnreg_ks', '--force'] + if self.username: + register_cmd.extend(['--username', self.username, '--password', self.password]) + if self.server_url: + register_cmd.extend(['--serverUrl', self.server_url]) + if enable_eus: + register_cmd.append('--use-eus-channel') + if nopackages: + register_cmd.append('--nopackages') + if activationkey is not None: + register_cmd.extend(['--activationkey', activationkey]) + if profilename is not None: + register_cmd.extend(['--profilename', profilename]) + if sslcacert is not None: + register_cmd.extend(['--sslCACert', sslcacert]) + if systemorgid is not None: + register_cmd.extend(['--systemorgid', systemorgid]) + rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True) + + def api(self, method, *args): + ''' + Convenience RPC wrapper + ''' + if self.server is None: + if self.hostname != 'rhn.redhat.com': + url = "https://%s/rpc/api" % self.hostname + else: + url = "https://xmlrpc.%s/rpc/api" % self.hostname + self.server = xmlrpc_client.ServerProxy(url) + self.session = self.server.auth.login(self.username, self.password) + + func = getattr(self.server, method) + return func(self.session, *args) + + def unregister(self): + ''' + Unregister a previously registered system + ''' + + # Initiate RPC connection + self.api('system.deleteSystems', [self.systemid]) + + # Remove systemid file + os.unlink(self.config['systemIdPath']) + + def subscribe(self, channels): + if not channels: + return + + if self._is_hosted(): + current_channels = self.api('channel.software.listSystemChannels', self.systemid) + new_channels = [item['channel_label'] for item in current_channels] + new_channels.extend(channels) + return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels)) + + else: + current_channels = self.api('channel.software.listSystemChannels', self.systemid) + current_channels = [item['label'] for item in current_channels] + new_base = None + new_childs = [] + for ch in channels: + if ch in current_channels: + continue + if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '': + new_base = ch + else: + if ch not in new_childs: + new_childs.append(ch) + out_base = 0 + out_childs = 0 + + if new_base: + out_base = self.api('system.setBaseChannel', self.systemid, new_base) + + if new_childs: + out_childs = self.api('system.setChildChannels', self.systemid, new_childs) + + return out_base and out_childs + + def _is_hosted(self): + ''' + Return True if we are running against Hosted (rhn.redhat.com) or + False otherwise (when running against Satellite or Spacewalk) + ''' + return 'rhn.redhat.com' in self.hostname + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + username=dict(type='str'), + password=dict(type='str', no_log=True), + server_url=dict(type='str'), + activationkey=dict(type='str', no_log=True), + profilename=dict(type='str'), + ca_cert=dict(type='path', aliases=['sslcacert']), + systemorgid=dict(type='str'), + enable_eus=dict(type='bool', default=False), + nopackages=dict(type='bool', default=False), + channels=dict(type='list', default=[]), + ), + # username/password is required for state=absent, or if channels is not empty + # (basically anything that uses self.api requires username/password) but it doesn't + # look like we can express that with required_if/required_together/mutually_exclusive + + # only username+password can be used for unregister + required_if=[['state', 'absent', ['username', 'password']]], + ) + + if not HAS_UP2DATE_CLIENT: + module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?") + + server_url = module.params['server_url'] + username = module.params['username'] + password = module.params['password'] + + state = module.params['state'] + activationkey = module.params['activationkey'] + profilename = module.params['profilename'] + sslcacert = module.params['ca_cert'] + systemorgid = module.params['systemorgid'] + channels = module.params['channels'] + enable_eus = module.params['enable_eus'] + nopackages = module.params['nopackages'] + + rhn = Rhn(module=module, username=username, password=password) + + # use the provided server url and persist it to the rhn config. + if server_url: + rhn.configure_server_url(server_url) + + if not rhn.server_url: + module.fail_json( + msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)" + ) + + # Ensure system is registered + if state == 'present': + + # Check for missing parameters ... + if not (activationkey or rhn.username or rhn.password): + module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, + rhn.password)) + if not activationkey and not (rhn.username and rhn.password): + module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") + + # Register system + if rhn.is_registered: + module.exit_json(changed=False, msg="System already registered.") + + try: + rhn.enable() + rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages) + rhn.subscribe(channels) + except Exception as exc: + module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc)) + finally: + rhn.logout() + + module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) + + # Ensure system is *not* registered + if state == 'absent': + if not rhn.is_registered: + module.exit_json(changed=False, msg="System already unregistered.") + + if not (rhn.username and rhn.password): + module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password") + + try: + rhn.unregister() + except Exception as exc: + module.fail_json(msg="Failed to unregister: %s" % exc) + finally: + rhn.logout() + + module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/rhsm_release.py b/plugins/modules/packaging/os/rhsm_release.py new file mode 100644 index 0000000000..068a5ca704 --- /dev/null +++ b/plugins/modules/packaging/os/rhsm_release.py @@ -0,0 +1,129 @@ +#!/usr/bin/python + +# (c) 2018, Sean Myers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: rhsm_release +short_description: Set or Unset RHSM Release version +description: + - Sets or unsets the release version used by RHSM repositories. +notes: + - This module will fail on an unregistered system. + Use the C(redhat_subscription) module to register a system + prior to setting the RHSM release. +requirements: + - Red Hat Enterprise Linux 6+ with subscription-manager installed +options: + release: + description: + - RHSM release version to use (use null to unset) + required: true +author: + - Sean Myers (@seandst) +''' + +EXAMPLES = ''' +# Set release version to 7.1 +- name: Set RHSM release version + rhsm_release: + release: "7.1" + +# Set release version to 6Server +- name: Set RHSM release version + rhsm_release: + release: "6Server" + +# Unset release version +- name: Unset RHSM release release + rhsm_release: + release: null +''' + +RETURN = ''' +current_release: + description: The current RHSM release version value + returned: success + type: str +''' + +from ansible.module_utils.basic import AnsibleModule + +import re + +# Matches release-like values such as 7.2, 6.10, 10Server, +# but rejects unlikely values, like 100Server, 100.0, 1.100, etc. +release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b') + + +def _sm_release(module, *args): + # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes + # "subscription-manager release --set 0.1" + sm_bin = module.get_bin_path('subscription-manager', required=True) + cmd = '{0} release {1}'.format(sm_bin, " ".join(args)) + # delegate nonzero rc handling to run_command + return module.run_command(cmd, check_rc=True) + + +def get_release(module): + # Get the current release version, or None if release unset + rc, out, err = _sm_release(module, '--show') + try: + match = release_matcher.findall(out)[0] + except IndexError: + # 0'th index did not exist; no matches + match = None + + return match + + +def set_release(module, release): + # Set current release version, or unset if release is None + if release is None: + args = ('--unset',) + else: + args = ('--set', release) + + return _sm_release(module, *args) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + release=dict(type='str', required=True), + ), + supports_check_mode=True + ) + + target_release = module.params['release'] + + # sanity check: the target release at least looks like a valid release + if target_release and not release_matcher.findall(target_release): + module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release)) + + # Will fail with useful error from s-m if system not subscribed + current_release = get_release(module) + + changed = (target_release != current_release) + if not module.check_mode and changed: + set_release(module, target_release) + # If setting the release fails, then a fail_json would have exited with + # the s-m error, e.g. "No releases match '7.20'...". If not, then the + # current release is now set to the target release (job's done) + current_release = target_release + + module.exit_json(current_release=current_release, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/rhsm_repository.py b/plugins/modules/packaging/os/rhsm_repository.py new file mode 100644 index 0000000000..7aa4ce032b --- /dev/null +++ b/plugins/modules/packaging/os/rhsm_repository.py @@ -0,0 +1,247 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: rhsm_repository +short_description: Manage RHSM repositories using the subscription-manager command +description: + - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription + Management entitlement platform using the C(subscription-manager) command. +author: Giovanni Sciortino (@giovannisciortino) +notes: + - In order to manage RHSM repositories the system must be already registered + to RHSM manually or using the Ansible C(redhat_subscription) module. + +requirements: + - subscription-manager +options: + state: + description: + - If state is equal to present or disabled, indicates the desired + repository state. + choices: [present, enabled, absent, disabled] + required: True + default: "present" + name: + description: + - The ID of repositories to enable. + - To operate on several repositories this can accept a comma separated + list or a YAML list. + required: True + purge: + description: + - Disable all currently enabled repositories that are not not specified in C(name). + Only set this to C(True) if passing in a list of repositories to the C(name) field. + Using this with C(loop) will most likely not have the desired result. + type: bool + default: False +''' + +EXAMPLES = ''' +- name: Enable a RHSM repository + rhsm_repository: + name: rhel-7-server-rpms + +- name: Disable all RHSM repositories + rhsm_repository: + name: '*' + state: disabled + +- name: Enable all repositories starting with rhel-6-server + rhsm_repository: + name: rhel-6-server* + state: enabled + +- name: Disable all repositories except rhel-7-server-rpms + rhsm_repository: + name: rhel-7-server-rpms + purge: True +''' + +RETURN = ''' +repositories: + description: + - The list of RHSM repositories with their states. + - When this module is used to change the repository states, this list contains the updated states after the changes. + returned: success + type: list +''' + +import re +import os +from fnmatch import fnmatch +from copy import deepcopy +from ansible.module_utils.basic import AnsibleModule + + +def run_subscription_manager(module, arguments): + # Execute subscription-manager with arguments and manage common errors + rhsm_bin = module.get_bin_path('subscription-manager') + if not rhsm_bin: + module.fail_json(msg='The executable file subscription-manager was not found in PATH') + + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env) + + if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0): + module.fail_json(msg='The executable file subscription-manager must be run using root privileges') + elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n': + module.fail_json(msg='This system has no repositories available through subscriptions') + elif rc == 1: + module.fail_json(msg='subscription-manager failed with the following error: %s' % err) + else: + return rc, out, err + + +def get_repository_list(module, list_parameter): + # Generate RHSM repository list and return a list of dict + if list_parameter == 'list_enabled': + rhsm_arguments = ['repos', '--list-enabled'] + elif list_parameter == 'list_disabled': + rhsm_arguments = ['repos', '--list-disabled'] + elif list_parameter == 'list': + rhsm_arguments = ['repos', '--list'] + rc, out, err = run_subscription_manager(module, rhsm_arguments) + + skip_lines = [ + '+----------------------------------------------------------+', + ' Available Repositories in /etc/yum.repos.d/redhat.repo' + ] + repo_id_re = re.compile(r'Repo ID:\s+(.*)') + repo_name_re = re.compile(r'Repo Name:\s+(.*)') + repo_url_re = re.compile(r'Repo URL:\s+(.*)') + repo_enabled_re = re.compile(r'Enabled:\s+(.*)') + + repo_id = '' + repo_name = '' + repo_url = '' + repo_enabled = '' + + repo_result = [] + for line in out.splitlines(): + if line == '' or line in skip_lines: + continue + + repo_id_match = repo_id_re.match(line) + if repo_id_match: + repo_id = repo_id_match.group(1) + continue + + repo_name_match = repo_name_re.match(line) + if repo_name_match: + repo_name = repo_name_match.group(1) + continue + + repo_url_match = repo_url_re.match(line) + if repo_url_match: + repo_url = repo_url_match.group(1) + continue + + repo_enabled_match = repo_enabled_re.match(line) + if repo_enabled_match: + repo_enabled = repo_enabled_match.group(1) + + repo = { + "id": repo_id, + "name": repo_name, + "url": repo_url, + "enabled": True if repo_enabled == '1' else False + } + + repo_result.append(repo) + + return repo_result + + +def repository_modify(module, state, name, purge=False): + name = set(name) + current_repo_list = get_repository_list(module, 'list') + updated_repo_list = deepcopy(current_repo_list) + matched_existing_repo = {} + for repoid in name: + matched_existing_repo[repoid] = [] + for idx, repo in enumerate(current_repo_list): + if fnmatch(repo['id'], repoid): + matched_existing_repo[repoid].append(repo) + # Update current_repo_list to return it as result variable + updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False + + changed = False + results = [] + diff_before = "" + diff_after = "" + rhsm_arguments = ['repos'] + + for repoid in matched_existing_repo: + if len(matched_existing_repo[repoid]) == 0: + results.append("%s is not a valid repository ID" % repoid) + module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid) + for repo in matched_existing_repo[repoid]: + if state in ['disabled', 'absent']: + if repo['enabled']: + changed = True + diff_before += "Repository '%s' is enabled for this system\n" % repo['id'] + diff_after += "Repository '%s' is disabled for this system\n" % repo['id'] + results.append("Repository '%s' is disabled for this system" % repo['id']) + rhsm_arguments += ['--disable', repo['id']] + elif state in ['enabled', 'present']: + if not repo['enabled']: + changed = True + diff_before += "Repository '%s' is disabled for this system\n" % repo['id'] + diff_after += "Repository '%s' is enabled for this system\n" % repo['id'] + results.append("Repository '%s' is enabled for this system" % repo['id']) + rhsm_arguments += ['--enable', repo['id']] + + # Disable all enabled repos on the system that are not in the task and not + # marked as disabled by the task + if purge: + enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled']) + matched_repoids_set = set(matched_existing_repo.keys()) + difference = enabled_repo_ids.difference(matched_repoids_set) + if len(difference) > 0: + for repoid in difference: + changed = True + diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid)) + diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid)) + results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid)) + rhsm_arguments.extend(['--disable', repoid]) + + diff = {'before': diff_before, + 'after': diff_after, + 'before_header': "RHSM repositories", + 'after_header': "RHSM repositories"} + + if not module.check_mode: + rc, out, err = run_subscription_manager(module, rhsm_arguments) + results = out.splitlines() + module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', required=True), + state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'), + purge=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + name = module.params['name'] + state = module.params['state'] + purge = module.params['purge'] + + repository_modify(module, state, name, purge) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/slackpkg.py b/plugins/modules/packaging/os/slackpkg.py new file mode 100644 index 0000000000..5726f5995e --- /dev/null +++ b/plugins/modules/packaging/os/slackpkg.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Kim Nørgaard +# Written by Kim Nørgaard +# Based on pkgng module written by bleader +# that was based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: slackpkg +short_description: Package manager for Slackware >= 12.2 +description: + - Manage binary packages for Slackware using 'slackpkg' which + is available in versions after 12.2. +options: + name: + description: + - name of package to install/remove + required: true + + state: + description: + - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent). + choices: [ 'present', 'absent', 'latest' ] + required: false + default: present + + update_cache: + description: + - update the package database first + required: false + default: false + type: bool + +author: Kim Nørgaard (@KimNorgaard) +requirements: [ "Slackware >= 12.2" ] +''' + +EXAMPLES = ''' +# Install package foo +- slackpkg: + name: foo + state: present + +# Remove packages foo and bar +- slackpkg: + name: foo,bar + state: absent + +# Make sure that it is the most updated package +- slackpkg: + name: foo + state: latest +''' + +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, slackpkg_path, name): + + import glob + import platform + + machine = platform.machine() + packages = glob.glob("/var/log/packages/%s-*-[%s|noarch]*" % (name, + machine)) + + if len(packages) > 0: + return True + + return False + + +def remove_packages(module, slackpkg_path, packages): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, slackpkg_path, package): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s -default_answer=y -batch=on \ + remove %s" % (slackpkg_path, + package)) + + if not module.check_mode and query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, slackpkg_path, packages): + + install_c = 0 + + for package in packages: + if query_package(module, slackpkg_path, package): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s -default_answer=y -batch=on \ + install %s" % (slackpkg_path, + package)) + + if not module.check_mode and not query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to install %s: %s" % (package, out), + stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" + % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def upgrade_packages(module, slackpkg_path, packages): + install_c = 0 + + for package in packages: + if not module.check_mode: + rc, out, err = module.run_command("%s -default_answer=y -batch=on \ + upgrade %s" % (slackpkg_path, + package)) + + if not module.check_mode and not query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to install %s: %s" % (package, out), + stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" + % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def update_cache(module, slackpkg_path): + rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path)) + if rc != 0: + module.fail_json(msg="Could not update package cache") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="installed", choices=['installed', 'removed', 'absent', 'present', 'latest']), + name=dict(aliases=["pkg"], required=True, type='list'), + update_cache=dict(default=False, aliases=["update-cache"], + type='bool'), + ), + supports_check_mode=True) + + slackpkg_path = module.get_bin_path('slackpkg', True) + + p = module.params + + pkgs = p['name'] + + if p["update_cache"]: + update_cache(module, slackpkg_path) + + if p['state'] == 'latest': + upgrade_packages(module, slackpkg_path, pkgs) + + elif p['state'] in ['present', 'installed']: + install_packages(module, slackpkg_path, pkgs) + + elif p["state"] in ['removed', 'absent']: + remove_packages(module, slackpkg_path, pkgs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py new file mode 100644 index 0000000000..7a5d97d1c2 --- /dev/null +++ b/plugins/modules/packaging/os/snap.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Stanislas Lange (angristan) +# Copyright: (c) 2018, Victor Carceler + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: snap + +short_description: Manages snaps + + +description: + - "Manages snaps packages." + +options: + name: + description: + - Name of the snap to install or remove. Can be a list of snaps. + required: true + state: + description: + - Desired state of the package. + required: false + default: present + choices: [ absent, present ] + classic: + description: + - Confinement policy. The classic confinement allows a snap to have + the same level of access to the system as "classic" packages, + like those managed by APT. This option corresponds to the --classic argument. + This option can only be specified if there is a single snap in the task. + type: bool + required: false + default: False + channel: + description: + - Define which release of a snap is installed and tracked for updates. + This option can only be specified if there is a single snap in the task. + type: str + required: false + default: stable + +author: + - Victor Carceler (@vcarceler) + - Stanislas Lange (@angristan) +''' + +EXAMPLES = ''' +# Install "foo" and "bar" snap +- name: Install foo + snap: + name: + - foo + - bar + +# Remove "foo" snap +- name: Remove foo + snap: + name: foo + state: absent + +# Install a snap with classic confinement +- name: Install "foo" with option --classic + snap: + name: foo + classic: yes + +# Install a snap with from a specific channel +- name: Install "foo" with option --channel=latest/edge + snap: + name: foo + channel: latest/edge +''' + +RETURN = ''' +classic: + description: Whether or not the snaps were installed with the classic confinement + type: bool + returned: When snaps are installed +channel: + description: The channel the snaps were installed from + type: str + returned: When snaps are installed +cmd: + description: The command that was executed on the host + type: str + returned: When changed is true +snaps_installed: + description: The list of actually installed snaps + type: list + returned: When any snaps have been installed +snaps_removed: + description: The list of actually removed snaps + type: list + returned: When any snaps have been removed +''' + +import operator +import re + +from ansible.module_utils.basic import AnsibleModule + + +def validate_input_snaps(module): + """Ensure that all exist.""" + for snap_name in module.params['name']: + if not snap_exists(module, snap_name): + module.fail_json(msg="No snap matching '%s' available." % snap_name) + + +def snap_exists(module, snap_name): + snap_path = module.get_bin_path("snap", True) + cmd_parts = [snap_path, 'info', snap_name] + cmd = ' '.join(cmd_parts) + rc, out, err = module.run_command(cmd, check_rc=False) + + return rc == 0 + + +def is_snap_installed(module, snap_name): + snap_path = module.get_bin_path("snap", True) + cmd_parts = [snap_path, 'list', snap_name] + cmd = ' '.join(cmd_parts) + rc, out, err = module.run_command(cmd, check_rc=False) + + return rc == 0 + + +def get_snap_for_action(module): + """Construct a list of snaps to use for current action.""" + snaps = module.params['name'] + + is_present_state = module.params['state'] == 'present' + negation_predicate = operator.not_ if is_present_state else bool + + def predicate(s): + return negation_predicate(is_snap_installed(module, s)) + + return [s for s in snaps if predicate(s)] + + +def get_base_cmd_parts(module): + action_map = { + 'present': 'install', + 'absent': 'remove', + } + + state = module.params['state'] + + classic = ['--classic'] if module.params['classic'] else [] + channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else [] + + snap_path = module.get_bin_path("snap", True) + snap_action = action_map[state] + + cmd_parts = [snap_path, snap_action] + if snap_action == 'install': + cmd_parts += classic + channel + + return cmd_parts + + +def get_cmd_parts(module, snap_names): + """Return list of cmds to run in exec format.""" + is_install_mode = module.params['state'] == 'present' + has_multiple_snaps = len(snap_names) > 1 + + cmd_parts = get_base_cmd_parts(module) + has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts + + if not (is_install_mode and has_one_pkg_params and has_multiple_snaps): + return [cmd_parts + snap_names] + + return [cmd_parts + [s] for s in snap_names] + + +def run_cmd_for(module, snap_names): + cmds_parts = get_cmd_parts(module, snap_names) + cmd = '; '.join(' '.join(c) for c in cmds_parts) + cmd = 'sh -c "{0}"'.format(cmd) + + # Actually execute the snap command + return (cmd, ) + module.run_command(cmd, check_rc=False) + + +def execute_action(module): + is_install_mode = module.params['state'] == 'present' + exit_kwargs = { + 'classic': module.params['classic'], + 'channel': module.params['channel'], + } if is_install_mode else {} + + actionable_snaps = get_snap_for_action(module) + if not actionable_snaps: + module.exit_json(changed=False, **exit_kwargs) + + changed_def_args = { + 'changed': True, + 'snaps_{result}'. + format(result='installed' if is_install_mode + else 'removed'): actionable_snaps, + } + + if module.check_mode: + module.exit_json(**dict(changed_def_args, **exit_kwargs)) + + cmd, rc, out, err = run_cmd_for(module, actionable_snaps) + cmd_out_args = { + 'cmd': cmd, + 'rc': rc, + 'stdout': out, + 'stderr': err, + } + + if rc == 0: + module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs))) + else: + msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.".format(cmd=cmd) + if is_install_mode: + m = re.match(r'^error: This revision of snap "(?P\w+)" was published using classic confinement', err) + if m is not None: + err_pkg = m.group('package_name') + msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) + module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs)) + + +def main(): + module_args = { + 'name': dict(type='list', required=True), + 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']), + 'classic': dict(type='bool', required=False, default=False), + 'channel': dict(type='str', required=False, default='stable'), + } + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + validate_input_snaps(module) + + # Apply changes to the snaps + execute_action(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/sorcery.py b/plugins/modules/packaging/os/sorcery.py new file mode 100644 index 0000000000..22bfee2a3b --- /dev/null +++ b/plugins/modules/packaging/os/sorcery.py @@ -0,0 +1,644 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015-2016, Vlad Glagolev +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: sorcery +short_description: Package manager for Source Mage GNU/Linux +description: + - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain +author: "Vlad Glagolev (@vaygr)" +notes: + - When all three components are selected, the update goes by the sequence -- + Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. + - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not + yet supported. +requirements: + - bash +options: + name: + description: + - Name of the spell + - multiple names can be given, separated by commas + - special value '*' in conjunction with states C(latest) or + C(rebuild) will update or rebuild the whole system respectively + aliases: ["spell"] + + state: + description: + - Whether to cast, dispel or rebuild a package + - state C(cast) is an equivalent of C(present), not C(latest) + - state C(latest) always triggers C(update_cache=yes) + - state C(rebuild) implies cast of all specified spells, not only + those existed before + choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] + default: "present" + + depends: + description: + - Comma-separated list of _optional_ dependencies to build a spell + (or make sure it is built) with; use +/- in front of dependency + to turn it on/off ('+' is optional though) + - this option is ignored if C(name) parameter is equal to '*' or + contains more than one spell + - providers must be supplied in the form recognized by Sorcery, e.g. + 'openssl(SSL)' + + update: + description: + - Whether or not to update sorcery scripts at the very first stage + type: bool + default: 'no' + + update_cache: + description: + - Whether or not to update grimoire collection before casting spells + type: bool + default: 'no' + aliases: ["update_codex"] + + cache_valid_time: + description: + - Time in seconds to invalidate grimoire collection on update + - especially useful for SCM and rsync grimoires + - makes sense only in pair with C(update_cache) +''' + + +EXAMPLES = ''' +# Make sure spell 'foo' is installed +- sorcery: + spell: foo + state: present + +# Make sure spells 'foo', 'bar' and 'baz' are removed +- sorcery: + spell: foo,bar,baz + state: absent + +# Make sure spell 'foo' with dependencies 'bar' and 'baz' is installed +- sorcery: + spell: foo + depends: bar,baz + state: present + +# Make sure spell 'foo' with 'bar' and without 'baz' dependencies is installed +- sorcery: + spell: foo + depends: +bar,-baz + state: present + +# Make sure spell 'foo' with libressl (providing SSL) dependency is installed +- sorcery: + spell: foo + depends: libressl(SSL) + state: present + +# Playbook: make sure spells with/without required dependencies (if any) are installed +- sorcery: + name: "{{ item.spell }}" + depends: "{{ item.depends | default(None) }}" + state: present + loop: + - { spell: 'vifm', depends: '+file,-gtk+2' } + - { spell: 'fwknop', depends: 'gpgme' } + - { spell: 'pv,tnftp,tor' } + +# Install the latest version of spell 'foo' using regular glossary +- sorcery: + name: foo + state: latest + +# Rebuild spell 'foo' +- sorcery: + spell: foo + state: rebuild + +# Rebuild the whole system, but update Sorcery and Codex first +- sorcery: + spell: '*' + state: rebuild + update: yes + update_cache: yes + +# Refresh the grimoire collection if it's 1 day old using native sorcerous alias +- sorcery: + update_codex: yes + cache_valid_time: 86400 + +# Update only Sorcery itself +- sorcery: + update: yes +''' + + +RETURN = ''' +''' + + +import datetime +import fileinput +import os +import re +import shutil +import sys + + +# auto-filled at module init +SORCERY = { + 'sorcery': None, + 'scribe': None, + 'cast': None, + 'dispel': None, + 'gaze': None +} + +SORCERY_LOG_DIR = "/var/log/sorcery" +SORCERY_STATE_DIR = "/var/state/sorcery" + + +def get_sorcery_ver(module): + """ Get Sorcery version. """ + + cmd_sorcery = "%s --version" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0 or not stdout: + module.fail_json(msg="unable to get Sorcery version") + + return stdout.strip() + + +def codex_fresh(codex, module): + """ Check if grimoire collection is fresh enough. """ + + if not module.params['cache_valid_time']: + return False + + timedelta = datetime.timedelta(seconds=module.params['cache_valid_time']) + + for grimoire in codex: + lastupdate_path = os.path.join(SORCERY_STATE_DIR, + grimoire + ".lastupdate") + + try: + mtime = os.stat(lastupdate_path).st_mtime + except Exception: + return False + + lastupdate_ts = datetime.datetime.fromtimestamp(mtime) + + # if any grimoire is not fresh, we invalidate the Codex + if lastupdate_ts + timedelta < datetime.datetime.now(): + return False + + return True + + +def codex_list(module): + """ List valid grimoire collection. """ + + codex = {} + + cmd_scribe = "%s index" % SORCERY['scribe'] + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="unable to list grimoire collection, fix your Codex") + + rex = re.compile(r"^\s*\[\d+\] : (?P[\w\-+.]+) : [\w\-+./]+(?: : (?P[\w\-+.]+))?\s*$") + + # drop 4-line header and empty trailing line + for line in stdout.splitlines()[4:-1]: + match = rex.match(line) + + if match: + codex[match.group('grim')] = match.group('ver') + + if not codex: + module.fail_json(msg="no grimoires to operate on; add at least one") + + return codex + + +def update_sorcery(module): + """ Update sorcery scripts. + + This runs 'sorcery update' ('sorcery -u'). Check mode always returns a + positive change value. + + """ + + changed = False + + if module.check_mode: + if not module.params['name'] and not module.params['update_cache']: + module.exit_json(changed=True, msg="would have updated Sorcery") + else: + sorcery_ver = get_sorcery_ver(module) + + cmd_sorcery = "%s update" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="unable to update Sorcery: " + stdout) + + if sorcery_ver != get_sorcery_ver(module): + changed = True + + if not module.params['name'] and not module.params['update_cache']: + module.exit_json(changed=changed, + msg="successfully updated Sorcery") + + +def update_codex(module): + """ Update grimoire collections. + + This runs 'scribe update'. Check mode always returns a positive change + value when 'cache_valid_time' is used. + + """ + + params = module.params + + changed = False + + codex = codex_list(module) + fresh = codex_fresh(codex, module) + + if module.check_mode: + if not params['name']: + if not fresh: + changed = True + + module.exit_json(changed=changed, msg="would have updated Codex") + elif not fresh or params['name'] and params['state'] == 'latest': + # SILENT is required as a workaround for query() in libgpg + module.run_command_environ_update.update(dict(SILENT='1')) + + cmd_scribe = "%s update" % SORCERY['scribe'] + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="unable to update Codex: " + stdout) + + if codex != codex_list(module): + changed = True + + if not params['name']: + module.exit_json(changed=changed, + msg="successfully updated Codex") + + +def match_depends(module): + """ Check for matching dependencies. + + This inspects spell's dependencies with the desired states and returns + 'False' if a recast is needed to match them. It also adds required lines + to the system-wide depends file for proper recast procedure. + + """ + + params = module.params + spells = params['name'] + + depends = {} + + depends_ok = True + + if len(spells) > 1 or not params['depends']: + return depends_ok + + spell = spells[0] + + if module.check_mode: + sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends") + sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check") + + try: + shutil.copy2(sorcery_depends_orig, sorcery_depends) + except IOError: + module.fail_json(msg="failed to copy depends.check file") + else: + sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends") + + rex = re.compile(r"^(?P\+?|\-){1}(?P[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$") + + for d in params['depends'].split(','): + match = rex.match(d) + + if not match: + module.fail_json(msg="wrong depends line for spell '%s'" % spell) + + # normalize status + if not match.group('status') or match.group('status') == '+': + status = 'on' + else: + status = 'off' + + depends[match.group('depend')] = status + + # drop providers spec + depends_list = [s.split('(')[0] for s in depends] + + cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list)) + + rc, stdout, stderr = module.run_command(cmd_gaze) + + if rc != 0: + module.fail_json(msg="wrong dependencies for spell '%s'" % spell) + + fi = fileinput.input(sorcery_depends, inplace=True) + + try: + try: + for line in fi: + if line.startswith(spell + ':'): + match = None + + for d in depends: + # when local status is 'off' and dependency is provider, + # use only provider value + d_offset = d.find('(') + + if d_offset == -1: + d_p = '' + else: + d_p = re.escape(d[d_offset:]) + + # .escape() is needed mostly for the spells like 'libsigc++' + rex = re.compile("%s:(?:%s|%s):(?Pon|off):optional:" % + (re.escape(spell), re.escape(d), d_p)) + + match = rex.match(line) + + # we matched the line "spell:dependency:on|off:optional:" + if match: + # if we also matched the local status, mark dependency + # as empty and put it back into depends file + if match.group('lstatus') == depends[d]: + depends[d] = None + + sys.stdout.write(line) + + # status is not that we need, so keep this dependency + # in the list for further reverse switching; + # stop and process the next line in both cases + break + + if not match: + sys.stdout.write(line) + else: + sys.stdout.write(line) + except IOError: + module.fail_json(msg="I/O error on the depends file") + finally: + fi.close() + + depends_new = [v for v in depends if depends[v]] + + if depends_new: + try: + try: + fl = open(sorcery_depends, 'a') + + for k in depends_new: + fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k])) + except IOError: + module.fail_json(msg="I/O error on the depends file") + finally: + fl.close() + + depends_ok = False + + if module.check_mode: + try: + os.remove(sorcery_depends) + except IOError: + module.fail_json(msg="failed to clean up depends.backup file") + + return depends_ok + + +def manage_spells(module): + """ Cast or dispel spells. + + This manages the whole system ('*'), list or a single spell. Command 'cast' + is used to install or rebuild spells, while 'dispel' takes care of theirs + removal from the system. + + """ + + params = module.params + spells = params['name'] + + sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install") + + if spells == '*': + if params['state'] == 'latest': + # back up original queue + try: + os.rename(sorcery_queue, sorcery_queue + ".backup") + except IOError: + module.fail_json(msg="failed to backup the update queue") + + # see update_codex() + module.run_command_environ_update.update(dict(SILENT='1')) + + cmd_sorcery = "%s queue" + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="failed to generate the update queue") + + try: + queue_size = os.stat(sorcery_queue).st_size + except Exception: + module.fail_json(msg="failed to read the update queue") + + if queue_size != 0: + if module.check_mode: + try: + os.rename(sorcery_queue + ".backup", sorcery_queue) + except IOError: + module.fail_json(msg="failed to restore the update queue") + + module.exit_json(changed=True, msg="would have updated the system") + + cmd_cast = "%s --queue" % SORCERY['cast'] + + rc, stdout, stderr = module.run_command(cmd_cast) + + if rc != 0: + module.fail_json(msg="failed to update the system") + + module.exit_json(changed=True, msg="successfully updated the system") + else: + module.exit_json(changed=False, msg="the system is already up to date") + elif params['state'] == 'rebuild': + if module.check_mode: + module.exit_json(changed=True, msg="would have rebuilt the system") + + cmd_sorcery = "%s rebuild" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="failed to rebuild the system: " + stdout) + + module.exit_json(changed=True, msg="successfully rebuilt the system") + else: + module.fail_json(msg="unsupported operation on '*' name value") + else: + if params['state'] in ('present', 'latest', 'rebuild', 'absent'): + # extract versions from the 'gaze' command + cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells)) + + rc, stdout, stderr = module.run_command(cmd_gaze) + + # fail if any of spells cannot be found + if rc != 0: + module.fail_json(msg="failed to locate spell(s) in the list (%s)" % + ', '.join(spells)) + + cast_queue = [] + dispel_queue = [] + + rex = re.compile(r"[^|]+\|[^|]+\|(?P[^|]+)\|(?P[^|]+)\|(?P[^$]+)") + + # drop 2-line header and empty trailing line + for line in stdout.splitlines()[2:-1]: + match = rex.match(line) + + cast = False + + if params['state'] == 'present': + # spell is not installed.. + if match.group('inst_ver') == '-': + # ..so set up depends reqs for it + match_depends(module) + + cast = True + # spell is installed.. + else: + # ..but does not conform depends reqs + if not match_depends(module): + cast = True + elif params['state'] == 'latest': + # grimoire and installed versions do not match.. + if match.group('grim_ver') != match.group('inst_ver'): + # ..so check for depends reqs first and set them up + match_depends(module) + + cast = True + # grimoire and installed versions match.. + else: + # ..but the spell does not conform depends reqs + if not match_depends(module): + cast = True + elif params['state'] == 'rebuild': + cast = True + # 'absent' + else: + if match.group('inst_ver') != '-': + dispel_queue.append(match.group('spell')) + + if cast: + cast_queue.append(match.group('spell')) + + if cast_queue: + if module.check_mode: + module.exit_json(changed=True, msg="would have cast spell(s)") + + cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue)) + + rc, stdout, stderr = module.run_command(cmd_cast) + + if rc != 0: + module.fail_json(msg="failed to cast spell(s): %s" + stdout) + + module.exit_json(changed=True, msg="successfully cast spell(s)") + elif params['state'] != 'absent': + module.exit_json(changed=False, msg="spell(s) are already cast") + + if dispel_queue: + if module.check_mode: + module.exit_json(changed=True, msg="would have dispelled spell(s)") + + cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue)) + + rc, stdout, stderr = module.run_command(cmd_dispel) + + if rc != 0: + module.fail_json(msg="failed to dispel spell(s): %s" + stdout) + + module.exit_json(changed=True, msg="successfully dispelled spell(s)") + else: + module.exit_json(changed=False, msg="spell(s) are already dispelled") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(default=None, aliases=['spell'], type='list'), + state=dict(default='present', choices=['present', 'latest', + 'absent', 'cast', 'dispelled', 'rebuild']), + depends=dict(default=None), + update=dict(default=False, type='bool'), + update_cache=dict(default=False, aliases=['update_codex'], type='bool'), + cache_valid_time=dict(default=0, type='int') + ), + required_one_of=[['name', 'update', 'update_cache']], + supports_check_mode=True + ) + + if os.geteuid() != 0: + module.fail_json(msg="root privileges are required for this operation") + + for c in SORCERY: + SORCERY[c] = module.get_bin_path(c, True) + + # prepare environment: run sorcery commands without asking questions + module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0') + + params = module.params + + # normalize 'state' parameter + if params['state'] in ('present', 'cast'): + params['state'] = 'present' + elif params['state'] in ('absent', 'dispelled'): + params['state'] = 'absent' + + if params['update']: + update_sorcery(module) + + if params['update_cache'] or params['state'] == 'latest': + update_codex(module) + + if params['name']: + manage_spells(module) + + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/svr4pkg.py b/plugins/modules/packaging/os/svr4pkg.py new file mode 100644 index 0000000000..dedd55f09e --- /dev/null +++ b/plugins/modules/packaging/os/svr4pkg.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Boyd Adamson +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: svr4pkg +short_description: Manage Solaris SVR4 packages +description: + - Manages SVR4 packages on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available + as a legacy feature in Solaris 11. + - Note that this is a very basic packaging system. It will not enforce + dependencies on install or remove. +author: "Boyd Adamson (@brontitall)" +options: + name: + description: + - Package name, e.g. C(SUNWcsr) + required: true + + state: + description: + - Whether to install (C(present)), or remove (C(absent)) a package. + - If the package is to be installed, then I(src) is required. + - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. + required: true + choices: ["present", "absent"] + + src: + description: + - Specifies the location to install the package from. Required when C(state=present). + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there. + proxy: + description: + - HTTP[s] proxy to be used if C(src) is a URL. + response_file: + description: + - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) + required: false + zone: + description: + - Whether to install the package only in the current zone, or install it into all zones. + - The installation into all zones works only if you are working with the global zone. + required: false + default: "all" + choices: ["current", "all"] + category: + description: + - Install/Remove category instead of a single package. + required: false + type: bool +''' + +EXAMPLES = ''' +# Install a package from an already copied file +- svr4pkg: + name: CSWcommon + src: /tmp/cswpkgs.pkg + state: present + +# Install a package directly from an http site +- svr4pkg: + name: CSWpkgutil + src: 'http://get.opencsw.org/now' + state: present + zone: current + +# Install a package with a response file +- svr4pkg: + name: CSWggrep + src: /tmp/third-party.pkg + response_file: /tmp/ggrep.response + state: present + +# Ensure that a package is not installed. +- svr4pkg: + name: SUNWgnome-sound-recorder + state: absent + +# Ensure that a category is not installed. +- svr4pkg: + name: FIREFOX + state: absent + category: true +''' + + +import os +import tempfile + +from ansible.module_utils.basic import AnsibleModule + + +def package_installed(module, name, category): + cmd = [module.get_bin_path('pkginfo', True)] + cmd.append('-q') + if category: + cmd.append('-c') + cmd.append(name) + rc, out, err = module.run_command(' '.join(cmd)) + if rc == 0: + return True + else: + return False + + +def create_admin_file(): + (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) + fullauto = ''' +mail= +instance=unique +partial=nocheck +runlevel=quit +idepend=nocheck +rdepend=nocheck +space=quit +setuid=nocheck +conflict=nocheck +action=nocheck +networktimeout=60 +networkretries=3 +authentication=quit +keystore=/var/sadm/security +proxy= +basedir=default +''' + os.write(desc, fullauto) + os.close(desc) + return filename + + +def run_command(module, cmd): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True) + return module.run_command(cmd) + + +def package_install(module, name, src, proxy, response_file, zone, category): + adminfile = create_admin_file() + cmd = ['pkgadd', '-n'] + if zone == 'current': + cmd += ['-G'] + cmd += ['-a', adminfile, '-d', src] + if proxy is not None: + cmd += ['-x', proxy] + if response_file is not None: + cmd += ['-r', response_file] + if category: + cmd += ['-Y'] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + + +def package_uninstall(module, name, src, category): + adminfile = create_admin_file() + if category: + cmd = ['pkgrm', '-na', adminfile, '-Y', name] + else: + cmd = ['pkgrm', '-na', adminfile, name] + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=True, choices=['present', 'absent']), + src=dict(default=None), + proxy=dict(default=None), + response_file=dict(default=None), + zone=dict(required=False, default='all', choices=['current', 'all']), + category=dict(default=False, type='bool') + ), + supports_check_mode=True + ) + state = module.params['state'] + name = module.params['name'] + src = module.params['src'] + proxy = module.params['proxy'] + response_file = module.params['response_file'] + zone = module.params['zone'] + category = module.params['category'] + rc = None + out = '' + err = '' + result = {} + result['name'] = name + result['state'] = state + + if state == 'present': + if src is None: + module.fail_json(name=name, + msg="src is required when state=present") + if not package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) + # Stdout is normally empty but for some packages can be + # very long and is not often useful + if len(out) > 75: + out = out[:75] + '...' + + elif state == 'absent': + if package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_uninstall(module, name, src, category) + out = out[:75] + + # Returncodes as per pkgadd(1m) + # 0 Successful completion + # 1 Fatal error. + # 2 Warning. + # 3 Interruption. + # 4 Administration. + # 5 Administration. Interaction is required. Do not use pkgadd -n. + # 10 Reboot after installation of all packages. + # 20 Reboot after installation of this package. + # 99 (observed) pkgadd: ERROR: could not process datastream from + if rc in (0, 2, 3, 10, 20): + result['changed'] = True + # no install nor uninstall, or failed + else: + result['changed'] = False + + # rc will be none when the package already was installed and no action took place + # Only return failed=False when the returncode is known to be good as there may be more + # undocumented failure return codes + if rc not in (None, 0, 2, 10, 20): + result['failed'] = True + else: + result['failed'] = False + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/swdepot.py b/plugins/modules/packaging/os/swdepot.py new file mode 100644 index 0000000000..8494705511 --- /dev/null +++ b/plugins/modules/packaging/os/swdepot.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Raul Melo +# Written by Raul Melo +# Based on yum module written by Seth Vidal +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: swdepot +short_description: Manage packages with swdepot package manager (HP-UX) +description: + - Will install, upgrade and remove packages with swdepot package manager (HP-UX) +notes: [] +author: "Raul Melo (@melodous)" +options: + name: + description: + - package name. + required: true + state: + description: + - whether to install (C(present), C(latest)), or remove (C(absent)) a package. + required: true + choices: [ 'present', 'latest', 'absent'] + depot: + description: + - The source repository from which install or upgrade a package. +''' + +EXAMPLES = ''' +- swdepot: + name: unzip-6.0 + state: present + depot: 'repository:/path' + +- swdepot: + name: unzip + state: latest + depot: 'repository:/path' + +- swdepot: + name: unzip + state: absent +''' + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def compare_package(version1, version2): + """ Compare version packages. + Return values: + -1 first minor + 0 equal + 1 first greater """ + + def normalize(v): + return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] + normalized_version1 = normalize(version1) + normalized_version2 = normalize(version2) + if normalized_version1 == normalized_version2: + rc = 0 + elif normalized_version1 < normalized_version2: + rc = -1 + else: + rc = 1 + return rc + + +def query_package(module, name, depot=None): + """ Returns whether a package is installed or not and version. """ + + cmd_list = '/usr/sbin/swlist -a revision -l product' + if depot: + rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)), + use_unsafe_shell=True) + else: + rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True) + if rc == 0: + version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1] + else: + version = None + + return rc, version + + +def remove_package(module, name): + """ Uninstall package if installed. """ + + cmd_remove = '/usr/sbin/swremove' + rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) + + if rc == 0: + return rc, stdout + else: + return rc, stderr + + +def install_package(module, depot, name): + """ Install package if not already installed """ + + cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' + rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) + if rc == 0: + return rc, stdout + else: + return rc, stderr + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pkg'], required=True), + state=dict(choices=['present', 'absent', 'latest'], required=True), + depot=dict(default=None, required=False) + ), + supports_check_mode=True + ) + name = module.params['name'] + state = module.params['state'] + depot = module.params['depot'] + + changed = False + msg = "No changed" + rc = 0 + if (state == 'present' or state == 'latest') and depot is None: + output = "depot parameter is mandatory in present or latest task" + module.fail_json(name=name, msg=output, rc=rc) + + # Check local version + rc, version_installed = query_package(module, name) + if not rc: + installed = True + msg = "Already installed" + + else: + installed = False + + if (state == 'present' or state == 'latest') and installed is False: + if module.check_mode: + module.exit_json(changed=True) + rc, output = install_package(module, depot, name) + + if not rc: + changed = True + msg = "Package installed" + + else: + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'latest' and installed is True: + # Check depot version + rc, version_depot = query_package(module, name, depot) + + if not rc: + if compare_package(version_installed, version_depot) == -1: + if module.check_mode: + module.exit_json(changed=True) + # Install new version + rc, output = install_package(module, depot, name) + + if not rc: + msg = "Package upgraded, Before " + version_installed + " Now " + version_depot + changed = True + + else: + module.fail_json(name=name, msg=output, rc=rc) + + else: + output = "Software package not in repository " + depot + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'absent' and installed is True: + if module.check_mode: + module.exit_json(changed=True) + rc, output = remove_package(module, name) + if not rc: + changed = True + msg = "Package removed" + else: + module.fail_json(name=name, msg=output, rc=rc) + + if module.check_mode: + module.exit_json(changed=False) + + module.exit_json(changed=changed, name=name, state=state, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/swupd.py b/plugins/modules/packaging/os/swupd.py new file mode 100644 index 0000000000..1c3063ff28 --- /dev/null +++ b/plugins/modules/packaging/os/swupd.py @@ -0,0 +1,309 @@ +#!/usr/bin/python + +# (c) 2017, Alberto Murillo +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: swupd +short_description: Manages updates and bundles in ClearLinux systems. +description: + - Manages updates and bundles with the swupd bundle manager, which is used by the + Clear Linux Project for Intel Architecture. +author: Alberto Murillo (@albertomurillo) +options: + contenturl: + description: + - URL pointing to the contents of available bundles. + If not specified, the contents are retrieved from clearlinux.org. + format: + description: + - The format suffix for version file downloads. For example [1,2,3,staging,etc]. + If not specified, the default format is used. + manifest: + description: + - The manifest contains information about the bundles at certain version of the OS. + Specify a Manifest version to verify against that version or leave unspecified to + verify against the current version. + aliases: [release, version] + name: + description: + - Name of the (I)bundle to install or remove. + aliases: [bundle] + state: + description: + - Indicates the desired (I)bundle state. C(present) ensures the bundle + is installed while C(absent) ensures the (I)bundle is not installed. + default: present + choices: [present, absent] + update: + description: + - Updates the OS to the latest version. + type: bool + url: + description: + - Overrides both I(contenturl) and I(versionurl). + verify: + description: + - Verify content for OS version. + type: bool + versionurl: + description: + - URL for version string download. +''' + +EXAMPLES = ''' +- name: Update the OS to the latest version + swupd: + update: yes + +- name: Installs the "foo" bundle + swupd: + name: foo + state: present + +- name: Removes the "foo" bundle + swupd: + name: foo + state: absent + +- name: Check integrity of filesystem + swupd: + verify: yes + +- name: Downgrade OS to release 12920 + swupd: + verify: yes + manifest: 12920 +''' + +RETURN = ''' +stdout: + description: stdout of swupd + returned: always + type: str +stderr: + description: stderr of swupd + returned: always + type: str +''' + +import os +from ansible.module_utils.basic import AnsibleModule + + +class Swupd(object): + FILES_NOT_MATCH = "files did not match" + FILES_REPLACED = "missing files were replaced" + FILES_FIXED = "files were fixed" + FILES_DELETED = "files were deleted" + + def __init__(self, module): + # Fail if swupd is not found + self.module = module + self.swupd_cmd = module.get_bin_path("swupd", False) + if not self.swupd_cmd: + module.fail_json(msg="Could not find swupd.") + + # Initialize parameters + for key in module.params.keys(): + setattr(self, key, module.params[key]) + + # Initialize return values + self.changed = False + self.failed = False + self.msg = None + self.rc = None + self.stderr = "" + self.stdout = "" + + def _run_cmd(self, cmd): + self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False) + + def _get_cmd(self, command): + cmd = "%s %s" % (self.swupd_cmd, command) + + if self.format: + cmd += " --format=%s" % self.format + if self.manifest: + cmd += " --manifest=%s" % self.manifest + if self.url: + cmd += " --url=%s" % self.url + else: + if self.contenturl and command != "check-update": + cmd += " --contenturl=%s" % self.contenturl + if self.versionurl: + cmd += " --versionurl=%s" % self.versionurl + + return cmd + + def _is_bundle_installed(self, bundle): + try: + os.stat("/usr/share/clear/bundles/%s" % bundle) + except OSError: + return False + + return True + + def _needs_update(self): + cmd = self._get_cmd("check-update") + self._run_cmd(cmd) + + if self.rc == 0: + return True + + if self.rc == 1: + return False + + self.failed = True + self.msg = "Failed to check for updates" + + def _needs_verify(self): + cmd = self._get_cmd("verify") + self._run_cmd(cmd) + + if self.rc != 0: + self.failed = True + self.msg = "Failed to check for filesystem inconsistencies." + + if self.FILES_NOT_MATCH in self.stdout: + return True + + return False + + def install_bundle(self, bundle): + """Installs a bundle with `swupd bundle-add bundle`""" + if self.module.check_mode: + self.module.exit_json(changed=not self._is_bundle_installed(bundle)) + + if self._is_bundle_installed(bundle): + self.msg = "Bundle %s is already installed" % bundle + return + + cmd = self._get_cmd("bundle-add %s" % bundle) + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Bundle %s installed" % bundle + return + + self.failed = True + self.msg = "Failed to install bundle %s" % bundle + + def remove_bundle(self, bundle): + """Removes a bundle with `swupd bundle-remove bundle`""" + if self.module.check_mode: + self.module.exit_json(changed=self._is_bundle_installed(bundle)) + + if not self._is_bundle_installed(bundle): + self.msg = "Bundle %s not installed" + return + + cmd = self._get_cmd("bundle-remove %s" % bundle) + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Bundle %s removed" % bundle + return + + self.failed = True + self.msg = "Failed to remove bundle %s" % bundle + + def update_os(self): + """Updates the os with `swupd update`""" + if self.module.check_mode: + self.module.exit_json(changed=self._needs_update()) + + if not self._needs_update(): + self.msg = "There are no updates available" + return + + cmd = self._get_cmd("update") + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Update successful" + return + + self.failed = True + self.msg = "Failed to check for updates" + + def verify_os(self): + """Verifies filesystem against specified or current version""" + if self.module.check_mode: + self.module.exit_json(changed=self._needs_verify()) + + if not self._needs_verify(): + self.msg = "No files where changed" + return + + cmd = self._get_cmd("verify --fix") + self._run_cmd(cmd) + + if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout): + self.changed = True + self.msg = "Fix successful" + return + + self.failed = True + self.msg = "Failed to verify the OS" + + +def main(): + """The main function.""" + module = AnsibleModule( + argument_spec=dict( + contenturl=dict(type="str"), + format=dict(type="str"), + manifest=dict(aliases=["release", "version"], type="int"), + name=dict(aliases=["bundle"], type="str"), + state=dict(default="present", choices=["present", "absent"], type="str"), + update=dict(default=False, type="bool"), + url=dict(type="str"), + verify=dict(default=False, type="bool"), + versionurl=dict(type="str"), + ), + required_one_of=[["name", "update", "verify"]], + mutually_exclusive=[["name", "update", "verify"]], + supports_check_mode=True + ) + + swupd = Swupd(module) + + name = module.params["name"] + state = module.params["state"] + update = module.params["update"] + verify = module.params["verify"] + + if update: + swupd.update_os() + elif verify: + swupd.verify_os() + elif state == "present": + swupd.install_bundle(name) + elif state == "absent": + swupd.remove_bundle(name) + else: + swupd.failed = True + + if swupd.failed: + module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) + else: + module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/urpmi.py b/plugins/modules/packaging/os/urpmi.py new file mode 100644 index 0000000000..2752cb0f36 --- /dev/null +++ b/plugins/modules/packaging/os/urpmi.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Philippe Makowski +# Written by Philippe Makowski +# Based on apt module written by Matthew Williams + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: urpmi +short_description: Urpmi manager +description: + - Manages packages with I(urpmi) (such as for Mageia or Mandriva) +options: + name: + description: + - A list of package names to install, upgrade or remove. + required: yes + aliases: [ package, pkg ] + state: + description: + - Indicates the desired package state. + choices: [ absent, present ] + default: present + update_cache: + description: + - Update the package database first C(urpmi.update -a). + type: bool + default: 'no' + no-recommends: + description: + - Corresponds to the C(--no-recommends) option for I(urpmi). + type: bool + default: 'yes' + aliases: ['no-recommends'] + force: + description: + - Assume "yes" is the answer to any question urpmi has to ask. + Corresponds to the C(--force) option for I(urpmi). + type: bool + default: 'yes' + root: + description: + - Specifies an alternative install root, relative to which all packages will be installed. + Corresponds to the C(--root) option for I(urpmi). + default: / + aliases: [ installroot ] +author: +- Philippe Makowski (@pmakowski) +''' + +EXAMPLES = ''' +- name: Install package foo + urpmi: + pkg: foo + state: present + +- name: Remove package foo + urpmi: + pkg: foo + state: absent + +- name: Remove packages foo and bar + urpmi: + pkg: foo,bar + state: absent + +- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) +- urpmi: + name: bar + state: present + update_cache: yes +''' + + +import os +import shlex +import sys + +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, name, root): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rpm_path = module.get_bin_path("rpm", True) + cmd = "%s -q %s %s" % (rpm_path, name, root_option(root)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + return True + else: + return False + + +def query_package_provides(module, name, root): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rpm_path = module.get_bin_path("rpm", True) + cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + return rc == 0 + + +def update_package_db(module): + + urpmiupdate_path = module.get_bin_path("urpmi.update", True) + cmd = "%s -a -q" % (urpmiupdate_path,) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc != 0: + module.fail_json(msg="could not update package db") + + +def remove_packages(module, packages, root): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package, root): + continue + + urpme_path = module.get_bin_path("urpme", True) + cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, pkgspec, root, force=True, no_recommends=True): + + packages = "" + for package in pkgspec: + if not query_package_provides(module, package, root): + packages += "'%s' " % package + + if len(packages) != 0: + if no_recommends: + no_recommends_yes = '--no-recommends' + else: + no_recommends_yes = '' + + if force: + force_yes = '--force' + else: + force_yes = '' + + urpmi_path = module.get_bin_path("urpmi", True) + cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes, + no_recommends_yes, + root_option(root), + packages)) + + rc, out, err = module.run_command(cmd) + + for package in pkgspec: + if not query_package_provides(module, package, root): + module.fail_json(msg="'urpmi %s' failed: %s" % (package, err)) + + # urpmi always have 0 for exit code if --force is used + if rc: + module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err)) + else: + module.exit_json(changed=True, msg="%s present(s)" % packages) + else: + module.exit_json(changed=False) + + +def root_option(root): + if (root): + return "--root=%s" % (root) + else: + return "" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='installed', + choices=['absent', 'installed', 'present', 'removed']), + update_cache=dict(type='bool', default=False, aliases=['update-cache']), + force=dict(type='bool', default=True), + no_recommends=dict(type='bool', default=True, aliases=['no-recommends']), + name=dict(type='list', required=True, aliases=['package', 'pkg']), + root=dict(type='str', aliases=['installroot']), + ), + ) + + p = module.params + + if p['update_cache']: + update_package_db(module) + + if p['state'] in ['installed', 'present']: + install_packages(module, p['name'], p['root'], p['force'], p['no_recommends']) + + elif p['state'] in ['removed', 'absent']: + remove_packages(module, p['name'], p['root']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/xbps.py b/plugins/modules/packaging/os/xbps.py new file mode 100644 index 0000000000..dd390cb235 --- /dev/null +++ b/plugins/modules/packaging/os/xbps.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2016 Dino Occhialini +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: xbps +short_description: Manage packages with XBPS +description: + - Manage packages with the XBPS package manager. +author: + - "Dino Occhialini (@dinoocch)" + - "Michael Aldridge (@the-maldridge)" +options: + name: + description: + - Name of the package to install, upgrade, or remove. + state: + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent", "latest"] + recurse: + description: + - When removing a package, also remove its dependencies, provided + that they are not required by other packages and were not + explicitly installed by a user. + type: bool + default: 'no' + update_cache: + description: + - Whether or not to refresh the master package lists. This can be + run as part of a package installation or as a separate step. + type: bool + default: 'yes' + upgrade: + description: + - Whether or not to upgrade whole system + type: bool + default: 'no' +''' + +EXAMPLES = ''' +# Install package foo +- xbps: name=foo state=present +# Upgrade package foo +- xbps: name=foo state=latest update_cache=yes +# Remove packages foo and bar +- xbps: name=foo,bar state=absent +# Recursively remove package foo +- xbps: name=foo state=absent recurse=yes +# Update package cache +- xbps: update_cache=yes +# Upgrade packages +- xbps: upgrade=yes +''' + +RETURN = ''' +msg: + description: Message about results + returned: success + type: str + sample: "System Upgraded" +packages: + description: Packages that are affected/would be affected + type: list + sample: ["ansible"] + returned: success +''' + + +import os + +from ansible.module_utils.basic import AnsibleModule + + +def is_installed(xbps_output): + """Returns package install state""" + return bool(len(xbps_output)) + + +def query_package(module, xbps_path, name, state="present"): + """Returns Package info""" + if state == "present": + lcmd = "%s %s" % (xbps_path['query'], name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if not is_installed(lstdout): + # package is not installed locally + return False, False + + rcmd = "%s -Sun" % (xbps_path['install']) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + if rrc == 0 or rrc == 17: + """Return True to indicate that the package is installed locally, + and the result of the version number comparison to determine if the + package is up-to-date""" + return True, name not in rstdout + + return False, False + + +def update_package_db(module, xbps_path): + """Returns True if update_package_db changed""" + cmd = "%s -S" % (xbps_path['install']) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="Could not update package db") + if "avg rate" in stdout: + return True + else: + return False + + +def upgrade(module, xbps_path): + """Returns true is full upgrade succeeds""" + cmdupgrade = "%s -uy" % (xbps_path['install']) + cmdneedupgrade = "%s -un" % (xbps_path['install']) + + rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False) + if rc == 0: + if(len(stdout.splitlines()) == 0): + module.exit_json(changed=False, msg='Nothing to upgrade') + else: + rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) + if rc == 0: + module.exit_json(changed=True, msg='System upgraded') + else: + module.fail_json(msg="Could not upgrade") + else: + module.fail_json(msg="Could not upgrade") + + +def remove_packages(module, xbps_path, packages): + """Returns true if package removal succeeds""" + changed_packages = [] + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, xbps_path, package) + if not installed: + continue + + cmd = "%s -y %s" % (xbps_path['remove'], package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + changed_packages.append(package) + + if len(changed_packages) > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % + len(changed_packages), packages=changed_packages) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, xbps_path, state, packages): + """Returns true if package install succeeds.""" + toInstall = [] + for i, package in enumerate(packages): + """If the package is installed and state == present or state == latest + and is up-to-date then skip""" + installed, updated = query_package(module, xbps_path, package) + if installed and (state == 'present' or + (state == 'latest' and updated)): + continue + + toInstall.append(package) + + if len(toInstall) == 0: + module.exit_json(changed=False, msg="Nothing to Install") + + cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0 and not (state == 'latest' and rc == 17): + module.fail_json(msg="failed to install %s" % (package)) + + module.exit_json(changed=True, msg="installed %s package(s)" + % (len(toInstall)), + packages=toInstall) + + module.exit_json(changed=False, msg="package(s) already installed", + packages=[]) + + +def check_packages(module, xbps_path, packages, state): + """Returns change status of command""" + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, xbps_path, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state), + packages=would_be_changed) + else: + module.exit_json(changed=False, msg="package(s) already %s" % state, + packages=[]) + + +def main(): + """Returns, calling appropriate command""" + + module = AnsibleModule( + argument_spec=dict( + name=dict(default=None, aliases=['pkg', 'package'], type='list'), + state=dict(default='present', choices=['present', 'installed', + 'latest', 'absent', + 'removed']), + recurse=dict(default=False, type='bool'), + force=dict(default=False, type='bool'), + upgrade=dict(default=False, type='bool'), + update_cache=dict(default=True, aliases=['update-cache'], + type='bool') + ), + required_one_of=[['name', 'update_cache', 'upgrade']], + supports_check_mode=True) + + xbps_path = dict() + xbps_path['install'] = module.get_bin_path('xbps-install', True) + xbps_path['query'] = module.get_bin_path('xbps-query', True) + xbps_path['remove'] = module.get_bin_path('xbps-remove', True) + + if not os.path.exists(xbps_path['install']): + module.fail_json(msg="cannot find xbps, in path %s" + % (xbps_path['install'])) + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p["update_cache"] and not module.check_mode: + changed = update_package_db(module, xbps_path) + if p['name'] is None and not p['upgrade']: + if changed: + module.exit_json(changed=True, + msg='Updated the package master lists') + else: + module.exit_json(changed=False, + msg='Package list already up to date') + + if (p['update_cache'] and module.check_mode and not + (p['name'] or p['upgrade'])): + module.exit_json(changed=True, + msg='Would have updated the package cache') + + if p['upgrade']: + upgrade(module, xbps_path) + + if p['name']: + pkgs = p['name'] + + if module.check_mode: + check_packages(module, xbps_path, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, xbps_path, p['state'], pkgs) + elif p['state'] == 'absent': + remove_packages(module, xbps_path, pkgs) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/packaging/os/zypper.py b/plugins/modules/packaging/os/zypper.py new file mode 100644 index 0000000000..74d3c6b6e0 --- /dev/null +++ b/plugins/modules/packaging/os/zypper.py @@ -0,0 +1,531 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrick Callahan +# based on +# openbsd_pkg +# (c) 2013 +# Patrik Lundin +# +# yum +# (c) 2012, Red Hat, Inc +# Written by Seth Vidal +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zypper +author: + - "Patrick Callahan (@dirtyharrycallahan)" + - "Alexander Gubin (@alxgu)" + - "Thomas O'Donnell (@andytom)" + - "Robin Roth (@robinro)" + - "Andrii Radyk (@AnderEnder)" +short_description: Manage packages on SUSE and openSUSE +description: + - Manage packages on SUSE and openSUSE using the zypper and rpm tools. +options: + name: + description: + - Package name C(name) or package specifier or a list of either. + - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to + update the package within the version range given. + - You can also pass a url or a local path to a rpm file. + - When using state=latest, this can be '*', which updates all installed packages. + required: true + aliases: [ 'pkg' ] + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. + - When using C(dist-upgrade), I(name) should be C('*'). + required: false + choices: [ present, latest, absent, dist-upgrade ] + default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage, application ] + default: "package" + extra_args_precommand: + required: false + description: + - Add additional global target options to C(zypper). + - Options should be supplied in a single line as if given in the command line. + disable_gpg_check: + description: + - Whether to disable to GPG signature checking of the package + signature being installed. Has an effect only if state is + I(present) or I(latest). + required: false + default: "no" + type: bool + disable_recommends: + description: + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does + install recommended packages. + required: false + default: "yes" + type: bool + force: + description: + - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. + required: false + default: "no" + type: bool + force_resolution: + description: + - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). + required: false + default: "no" + type: bool + update_cache: + description: + - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. + required: false + default: "no" + type: bool + aliases: [ "refresh" ] + oldpackage: + description: + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a + version is specified as part of the package name. + required: false + default: "no" + type: bool + extra_args: + required: false + description: + - Add additional options to C(zypper) command. + - Options should be supplied in a single line as if given in the command line. +notes: + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +# informational: requirements for nodes +requirements: + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml + - rpm +''' + +EXAMPLES = ''' +# Install "nmap" +- zypper: + name: nmap + state: present + +# Install apache2 with recommended packages +- zypper: + name: apache2 + state: present + disable_recommends: no + +# Apply a given patch +- zypper: + name: openSUSE-2016-128 + state: present + type: patch + +# Remove the "nmap" package +- zypper: + name: nmap + state: absent + +# Install the nginx rpm from a remote repo +- zypper: + name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' + state: present + +# Install local rpm file +- zypper: + name: /tmp/fancy-software.rpm + state: present + +# Update all packages +- zypper: + name: '*' + state: latest + +# Apply all available patches +- zypper: + name: '*' + state: latest + type: patch + +# Perform a dist-upgrade with additional arguments +- zypper: + name: '*' + state: dist-upgrade + extra_args: '--no-allow-vendor-change --allow-arch-change' + +# Refresh repositories and update package "openssl" +- zypper: + name: openssl + state: present + update_cache: yes + +# Install specific version (possible comparisons: <, >, <=, >=, =) +- zypper: + name: 'docker>=1.10' + state: present + +# Wait 20 seconds to acquire the lock before failing +- zypper: + name: mosh + state: present + environment: + ZYPP_LOCK_TIMEOUT: 20 +''' + +import xml +import re +from xml.dom.minidom import parseString as parseXML +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +class Package: + def __init__(self, name, prefix, version): + self.name = name + self.prefix = prefix + self.version = version + self.shouldinstall = (prefix == '+') + + def __str__(self): + return self.prefix + self.name + self.version + + +def split_name_version(name): + """splits of the package name and desired version + + example formats: + - docker>=1.10 + - apache=2.4 + + Allowed version specifiers: <, >, <=, >=, = + Allowed version format: [0-9.-]* + + Also allows a prefix indicating remove "-", "~" or install "+" + """ + + prefix = '' + if name[0] in ['-', '~', '+']: + prefix = name[0] + name = name[1:] + if prefix == '~': + prefix = '-' + + version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') + try: + reres = version_check.match(name) + name, version = reres.groups() + if version is None: + version = '' + return prefix, name, version + except Exception: + return prefix, name, '' + + +def get_want_state(names, remove=False): + packages = [] + urls = [] + for name in names: + if '://' in name or name.endswith('.rpm'): + urls.append(name) + else: + prefix, pname, version = split_name_version(name) + if prefix not in ['-', '+']: + if remove: + prefix = '-' + else: + prefix = '+' + packages.append(Package(pname, prefix, version)) + return packages, urls + + +def get_installed_state(m, packages): + "get installed state of packages" + + cmd = get_cmd(m, 'search') + cmd.extend(['--match-exact', '--details', '--installed-only']) + cmd.extend([p.name for p in packages]) + return parse_zypper_xml(m, cmd, fail_not_found=False)[0] + + +def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + try: + dom = parseXML(stdout) + except xml.parsers.expat.ExpatError as exc: + m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc), + rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + if rc == 104: + # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) + if fail_not_found: + errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data + m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + else: + return {}, rc, stdout, stderr + elif rc in [0, 106, 103]: + # zypper exit codes + # 0: success + # 106: signature verification failed + # 103: zypper was upgraded, run same command again + if packages is None: + firstrun = True + packages = {} + solvable_list = dom.getElementsByTagName('solvable') + for solvable in solvable_list: + name = solvable.getAttribute('name') + packages[name] = {} + packages[name]['version'] = solvable.getAttribute('edition') + packages[name]['oldversion'] = solvable.getAttribute('edition-old') + status = solvable.getAttribute('status') + packages[name]['installed'] = status == "installed" + packages[name]['group'] = solvable.parentNode.nodeName + if rc == 103 and firstrun: + # if this was the first run and it failed with 103 + # run zypper again with the same command to complete update + return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + + return packages, rc, stdout, stderr + m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + +def get_cmd(m, subcommand): + "puts together the basic zypper command arguments with those passed to the module" + is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] + is_refresh = subcommand == 'refresh' + cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] + if m.params['extra_args_precommand']: + args_list = m.params['extra_args_precommand'].split() + cmd.extend(args_list) + # add global options before zypper command + if (is_install or is_refresh) and m.params['disable_gpg_check']: + cmd.append('--no-gpg-checks') + + if subcommand == 'search': + cmd.append('--disable-repositories') + + cmd.append(subcommand) + if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: + cmd.extend(['--type', m.params['type']]) + if m.check_mode and subcommand != 'search': + cmd.append('--dry-run') + if is_install: + cmd.append('--auto-agree-with-licenses') + if m.params['disable_recommends']: + cmd.append('--no-recommends') + if m.params['force']: + cmd.append('--force') + if m.params['force_resolution']: + cmd.append('--force-resolution') + if m.params['oldpackage']: + cmd.append('--oldpackage') + if m.params['extra_args']: + args_list = m.params['extra_args'].split(' ') + cmd.extend(args_list) + + return cmd + + +def set_diff(m, retvals, result): + # TODO: if there is only one package, set before/after to version numbers + packages = {'installed': [], 'removed': [], 'upgraded': []} + if result: + for p in result: + group = result[p]['group'] + if group == 'to-upgrade': + versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' + packages['upgraded'].append(p + versions) + elif group == 'to-install': + packages['installed'].append(p) + elif group == 'to-remove': + packages['removed'].append(p) + + output = '' + for state in packages: + if packages[state]: + output += state + ': ' + ', '.join(packages[state]) + '\n' + if 'diff' not in retvals: + retvals['diff'] = {} + if 'prepared' not in retvals['diff']: + retvals['diff']['prepared'] = output + else: + retvals['diff']['prepared'] += '\n' + output + + +def package_present(m, name, want_latest): + "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + packages, urls = get_want_state(name) + + # add oldpackage flag when a version is given to allow downgrades + if any(p.version for p in packages): + m.params['oldpackage'] = True + + if not want_latest: + # for state=present: filter out already installed packages + # if a version is given leave the package in to let zypper handle the version + # resolution + packageswithoutversion = [p for p in packages if not p.version] + prerun_state = get_installed_state(m, packageswithoutversion) + # generate lists of packages to install or remove + packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] + + if not packages and not urls: + # nothing to install/remove and nothing to update + return None, retvals + + # zypper install also updates packages + cmd = get_cmd(m, 'install') + cmd.append('--') + cmd.extend(urls) + # pass packages to zypper + # allow for + or - prefixes in install/remove lists + # also add version specifier if given + # do this in one zypper run to allow for dependency-resolution + # for example "-exim postfix" runs without removing packages depending on mailserver + cmd.extend([str(p) for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return result, retvals + + +def package_update_all(m): + "run update or patch on all available packages" + + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + if m.params['type'] == 'patch': + cmdname = 'patch' + elif m.params['state'] == 'dist-upgrade': + cmdname = 'dist-upgrade' + else: + cmdname = 'update' + + cmd = get_cmd(m, cmdname) + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def package_absent(m, name): + "remove the packages in name" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + # Get package state + packages, urls = get_want_state(name, remove=True) + if any(p.prefix == '+' for p in packages): + m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") + if urls: + m.fail_json(msg="Can not remove via URL.") + if m.params['type'] == 'patch': + m.fail_json(msg="Can not remove patches.") + prerun_state = get_installed_state(m, packages) + packages = [p for p in packages if p.name in prerun_state] + + if not packages: + return None, retvals + + cmd = get_cmd(m, 'remove') + cmd.extend([p.name + p.version for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def repo_refresh(m): + "update the repositories" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + + cmd = get_cmd(m, 'refresh') + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return retvals + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['pkg'], type='list'), + state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), + type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), + extra_args_precommand=dict(required=False, default=None), + disable_gpg_check=dict(required=False, default='no', type='bool'), + disable_recommends=dict(required=False, default='yes', type='bool'), + force=dict(required=False, default='no', type='bool'), + force_resolution=dict(required=False, default='no', type='bool'), + update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'), + oldpackage=dict(required=False, default='no', type='bool'), + extra_args=dict(required=False, default=None), + ), + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + update_cache = module.params['update_cache'] + + # remove empty strings from package list + name = list(filter(None, name)) + + # Refresh repositories + if update_cache and not module.check_mode: + retvals = repo_refresh(module) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper refresh run failed.", **retvals) + + # Perform requested action + if name == ['*'] and state in ['latest', 'dist-upgrade']: + packages_changed, retvals = package_update_all(module) + elif name != ['*'] and state == 'dist-upgrade': + module.fail_json(msg="Can not dist-upgrade specific packages.") + else: + if state in ['absent', 'removed']: + packages_changed, retvals = package_absent(module, name) + elif state in ['installed', 'present', 'latest']: + packages_changed, retvals = package_present(module, name, state == 'latest') + + retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) + + if module._diff: + set_diff(module, retvals, packages_changed) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper run failed.", **retvals) + + if not retvals['changed']: + del retvals['stdout'] + del retvals['stderr'] + + module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/packaging/os/zypper_repository.py b/plugins/modules/packaging/os/zypper_repository.py new file mode 100644 index 0000000000..0948ed8d1f --- /dev/null +++ b/plugins/modules/packaging/os/zypper_repository.py @@ -0,0 +1,391 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2013, Matthias Vogelgesang +# (c) 2014, Justin Lecher +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zypper_repository +author: "Matthias Vogelgesang (@matze)" +short_description: Add and remove Zypper repositories +description: + - Add or remove Zypper repositories on SUSE and openSUSE +options: + name: + description: + - A name for the repository. Not required when adding repofiles. + repo: + description: + - URI of the repository or .repo file. Required when state=present. + state: + description: + - A source string state. + choices: [ "absent", "present" ] + default: "present" + description: + description: + - A description of the repository + disable_gpg_check: + description: + - Whether to disable GPG signature checking of + all packages. Has an effect only if state is + I(present). + - Needs zypper version >= 1.6.2. + type: bool + default: 'no' + autorefresh: + description: + - Enable autorefresh of the repository. + type: bool + default: 'yes' + aliases: [ "refresh" ] + priority: + description: + - Set priority of repository. Packages will always be installed + from the repository with the smallest priority number. + - Needs zypper version >= 1.12.25. + overwrite_multiple: + description: + - Overwrite multiple repository entries, if repositories with both name and + URL already exist. + type: bool + default: 'no' + auto_import_keys: + description: + - Automatically import the gpg signing key of the new or changed repository. + - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent). + - Implies runrefresh. + - Only works with C(.repo) files if `name` is given explicitly. + type: bool + default: 'no' + runrefresh: + description: + - Refresh the package list of the given repository. + - Can be used with repo=* to refresh all repositories. + type: bool + default: 'no' + enabled: + description: + - Set repository to enabled (or disabled). + type: bool + default: 'yes' + + +requirements: + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml +''' + +EXAMPLES = ''' +# Add NVIDIA repository for graphics drivers +- zypper_repository: + name: nvidia-repo + repo: 'ftp://download.nvidia.com/opensuse/12.2' + state: present + +# Remove NVIDIA repository +- zypper_repository: + name: nvidia-repo + repo: 'ftp://download.nvidia.com/opensuse/12.2' + state: absent + +# Add python development repository +- zypper_repository: + repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo' + +# Refresh all repos +- zypper_repository: + repo: '*' + runrefresh: yes + +# Add a repo and add it's gpg key +- zypper_repository: + repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/' + auto_import_keys: yes + +# Force refresh of a repository +- zypper_repository: + repo: 'http://my_internal_ci_repo/repo' + name: my_ci_repo + state: present + runrefresh: yes +''' + +from distutils.version import LooseVersion + +from ansible.module_utils.basic import AnsibleModule + + +REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] + + +def _get_cmd(*args): + """Combines the non-interactive zypper command with arguments/subcommands""" + cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive'] + cmd.extend(args) + + return cmd + + +def _parse_repos(module): + """parses the output of zypper --xmlout repos and return a parse repo dictionary""" + cmd = _get_cmd('--xmlout', 'repos') + + from xml.dom.minidom import parseString as parseXML + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + repos = [] + dom = parseXML(stdout) + repo_list = dom.getElementsByTagName('repo') + for repo in repo_list: + opts = {} + for o in REPO_OPTS: + opts[o] = repo.getAttribute(o) + opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + # A repo can be uniquely identified by an alias + url + repos.append(opts) + return repos + # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined) + elif rc == 6: + return [] + else: + module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr) + + +def _repo_changes(realrepo, repocmp): + "Check whether the 2 given repos have different settings." + for k in repocmp: + if repocmp[k] and k not in realrepo: + return True + + for k, v in realrepo.items(): + if k in repocmp and repocmp[k]: + valold = str(repocmp[k] or "") + valnew = v or "" + if k == "url": + valold, valnew = valold.rstrip("/"), valnew.rstrip("/") + if valold != valnew: + return True + return False + + +def repo_exists(module, repodata, overwrite_multiple): + """Check whether the repository already exists. + + returns (exists, mod, old_repos) + exists: whether a matching (name, URL) repo exists + mod: whether there are changes compared to the existing repo + old_repos: list of matching repos + """ + existing_repos = _parse_repos(module) + + # look for repos that have matching alias or url to the one searched + repos = [] + for kw in ['alias', 'url']: + name = repodata[kw] + for oldr in existing_repos: + if repodata[kw] == oldr[kw] and oldr not in repos: + repos.append(oldr) + + if len(repos) == 0: + # Repo does not exist yet + return (False, False, None) + elif len(repos) == 1: + # Found an existing repo, look for changes + has_changes = _repo_changes(repos[0], repodata) + return (True, has_changes, repos) + elif len(repos) >= 2: + if overwrite_multiple: + # Found two repos and want to overwrite_multiple + return (True, True, repos) + else: + errmsg = 'More than one repo matched "%s": "%s".' % (name, repos) + errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten' + module.fail_json(msg=errmsg) + + +def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): + "Adds the repo, removes old repos before, that would conflict." + repo = repodata['url'] + cmd = _get_cmd('addrepo', '--check') + if repodata['name']: + cmd.extend(['--name', repodata['name']]) + + # priority on addrepo available since 1.12.25 + # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336 + if repodata['priority']: + if zypper_version >= LooseVersion('1.12.25'): + cmd.extend(['--priority', str(repodata['priority'])]) + else: + warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.") + + if repodata['enabled'] == '0': + cmd.append('--disable') + + # gpgcheck available since 1.6.2 + # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449 + # the default changed in the past, so don't assume a default here and show warning for old zypper versions + if zypper_version >= LooseVersion('1.6.2'): + if repodata['gpgcheck'] == '1': + cmd.append('--gpgcheck') + else: + cmd.append('--no-gpgcheck') + else: + warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.") + + if repodata['autorefresh'] == '1': + cmd.append('--refresh') + + cmd.append(repo) + + if not repo.endswith('.repo'): + cmd.append(repodata['alias']) + + if old_repos is not None: + for oldrepo in old_repos: + remove_repo(module, oldrepo['url']) + + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + return rc, stdout, stderr + + +def remove_repo(module, repo): + "Removes the repo." + cmd = _get_cmd('removerepo', repo) + + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + return rc, stdout, stderr + + +def get_zypper_version(module): + rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version']) + if rc != 0 or not stdout.startswith('zypper '): + return LooseVersion('1.0') + return LooseVersion(stdout.split()[1]) + + +def runrefreshrepo(module, auto_import_keys=False, shortname=None): + "Forces zypper to refresh repo metadata." + if auto_import_keys: + cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force') + else: + cmd = _get_cmd('refresh', '--force') + if shortname is not None: + cmd.extend(['-r', shortname]) + + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + return rc, stdout, stderr + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=False), + repo=dict(required=False), + state=dict(choices=['present', 'absent'], default='present'), + runrefresh=dict(required=False, default='no', type='bool'), + description=dict(required=False), + disable_gpg_check=dict(required=False, default=False, type='bool'), + autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']), + priority=dict(required=False, type='int'), + enabled=dict(required=False, default=True, type='bool'), + overwrite_multiple=dict(required=False, default=False, type='bool'), + auto_import_keys=dict(required=False, default=False, type='bool'), + ), + supports_check_mode=False, + required_one_of=[['state', 'runrefresh']], + ) + + repo = module.params['repo'] + alias = module.params['name'] + state = module.params['state'] + overwrite_multiple = module.params['overwrite_multiple'] + auto_import_keys = module.params['auto_import_keys'] + runrefresh = module.params['runrefresh'] + + zypper_version = get_zypper_version(module) + warnings = [] # collect warning messages for final output + + repodata = { + 'url': repo, + 'alias': alias, + 'name': module.params['description'], + 'priority': module.params['priority'], + } + # rewrite bools in the language that zypper lr -x provides for easier comparison + if module.params['enabled']: + repodata['enabled'] = '1' + else: + repodata['enabled'] = '0' + if module.params['disable_gpg_check']: + repodata['gpgcheck'] = '0' + else: + repodata['gpgcheck'] = '1' + if module.params['autorefresh']: + repodata['autorefresh'] = '1' + else: + repodata['autorefresh'] = '0' + + def exit_unchanged(): + module.exit_json(changed=False, repodata=repodata, state=state) + + # Check run-time module parameters + if repo == '*' or alias == '*': + if runrefresh: + runrefreshrepo(module, auto_import_keys) + module.exit_json(changed=False, runrefresh=True) + else: + module.fail_json(msg='repo=* can only be used with the runrefresh option.') + + if state == 'present' and not repo: + module.fail_json(msg='Module option state=present requires repo') + if state == 'absent' and not repo and not alias: + module.fail_json(msg='Alias or repo parameter required when state=absent') + + if repo and repo.endswith('.repo'): + if alias: + module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files') + else: + if not alias and state == "present": + module.fail_json(msg='Name required when adding non-repo files.') + + exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple) + + if repo: + shortname = repo + else: + shortname = alias + + if state == 'present': + if exists and not mod: + if runrefresh: + runrefreshrepo(module, auto_import_keys, shortname) + exit_unchanged() + rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings) + if rc == 0 and (runrefresh or auto_import_keys): + runrefreshrepo(module, auto_import_keys, shortname) + elif state == 'absent': + if not exists: + exit_unchanged() + rc, stdout, stderr = remove_repo(module, shortname) + + if rc == 0: + module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings) + else: + module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/cobbler/cobbler_sync.py b/plugins/modules/remote_management/cobbler/cobbler_sync.py new file mode 100644 index 0000000000..67e61e40c6 --- /dev/null +++ b/plugins/modules/remote_management/cobbler/cobbler_sync.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: cobbler_sync +short_description: Sync Cobbler +description: +- Sync Cobbler to commit changes. +options: + host: + description: + - The name or IP address of the Cobbler system. + default: 127.0.0.1 + port: + description: + - Port number to be used for REST connection. + - The default value depends on parameter C(use_ssl). + username: + description: + - The username to log in to Cobbler. + default: cobbler + password: + description: + - The password to log in to Cobbler. + required: yes + use_ssl: + description: + - If C(no), an HTTP connection will be used instead of the default HTTPS connection. + type: bool + default: 'yes' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. + - This should only set to C(no) when used on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' +author: +- Dag Wieers (@dagwieers) +todo: +notes: +- Concurrently syncing Cobbler is bound to fail with weird errors. +- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. + More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). +''' + +EXAMPLES = r''' +- name: Commit Cobbler changes + cobbler_sync: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + run_once: yes + delegate_to: localhost +''' + +RETURN = r''' +# Default return values +''' + +import datetime +import ssl + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client +from ansible.module_utils._text import to_text + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', default='127.0.0.1'), + port=dict(type='int'), + username=dict(type='str', default='cobbler'), + password=dict(type='str', no_log=True), + use_ssl=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + username = module.params['username'] + password = module.params['password'] + port = module.params['port'] + use_ssl = module.params['use_ssl'] + validate_certs = module.params['validate_certs'] + + module.params['proto'] = 'https' if use_ssl else 'http' + if not port: + module.params['port'] = '443' if use_ssl else '80' + + result = dict( + changed=True, + ) + + start = datetime.datetime.utcnow() + + ssl_context = None + if not validate_certs: + try: # Python 2.7.9 and newer + ssl_context = ssl.create_unverified_context() + except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default + ssl._create_default_context = ssl._create_unverified_context + else: # Python 2.7.8 and older + ssl._create_default_https_context = ssl._create_unverified_https_context + + url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + if ssl_context: + conn = xmlrpc_client.ServerProxy(url, context=ssl_context) + else: + conn = xmlrpc_client.Server(url) + + try: + token = conn.login(username, password) + except xmlrpc_client.Fault as e: + module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + except Exception as e: + module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e))) + + if not module.check_mode: + try: + conn.sync(token) + except Exception as e: + module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) + + elapsed = datetime.datetime.utcnow() - start + module.exit_json(elapsed=elapsed.seconds, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/cobbler/cobbler_system.py b/plugins/modules/remote_management/cobbler/cobbler_system.py new file mode 100644 index 0000000000..441eb0f787 --- /dev/null +++ b/plugins/modules/remote_management/cobbler/cobbler_system.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: cobbler_system +short_description: Manage system objects in Cobbler +description: +- Add, modify or remove systems in Cobbler +options: + host: + description: + - The name or IP address of the Cobbler system. + default: 127.0.0.1 + port: + description: + - Port number to be used for REST connection. + - The default value depends on parameter C(use_ssl). + username: + description: + - The username to log in to Cobbler. + default: cobbler + password: + description: + - The password to log in to Cobbler. + required: yes + use_ssl: + description: + - If C(no), an HTTP connection will be used instead of the default HTTPS connection. + type: bool + default: 'yes' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. + - This should only set to C(no) when used on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' + name: + description: + - The system name to manage. + properties: + description: + - A dictionary with system properties. + interfaces: + description: + - A list of dictionaries containing interface options. + sync: + description: + - Sync on changes. + - Concurrently syncing Cobbler is bound to fail. + type: bool + default: no + state: + description: + - Whether the system should be present, absent or a query is made. + choices: [ absent, present, query ] + default: present +author: +- Dag Wieers (@dagwieers) +notes: +- Concurrently syncing Cobbler is bound to fail with weird errors. +- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. + More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). +''' + +EXAMPLES = r''' +- name: Ensure the system exists in Cobbler + cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: myhost + properties: + profile: CentOS6-x86_64 + name_servers: [ 2.3.4.5, 3.4.5.6 ] + name_servers_search: foo.com, bar.com + interfaces: + eth0: + macaddress: 00:01:02:03:04:05 + ipaddress: 1.2.3.4 + delegate_to: localhost + +- name: Enable network boot in Cobbler + cobbler_system: + host: bdsol-aci-cobbler-01 + username: cobbler + password: ins3965! + name: bdsol-aci51-apic1.cisco.com + properties: + netboot_enabled: yes + state: present + delegate_to: localhost + +- name: Query all systems in Cobbler + cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + state: query + register: cobbler_systems + delegate_to: localhost + +- name: Query a specific system in Cobbler + cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: '{{ inventory_hostname }}' + state: query + register: cobbler_properties + delegate_to: localhost + +- name: Ensure the system does not exist in Cobbler + cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: myhost + state: absent + delegate_to: localhost +''' + +RETURN = r''' +systems: + description: List of systems + returned: C(state=query) and C(name) is not provided + type: list +system: + description: (Resulting) information about the system we are working with + returned: when C(name) is provided + type: dict +''' + +import copy +import datetime +import ssl + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import xmlrpc_client +from ansible.module_utils._text import to_text + +IFPROPS_MAPPING = dict( + bondingopts='bonding_opts', + bridgeopts='bridge_opts', + connected_mode='connected_mode', + cnames='cnames', + dhcptag='dhcp_tag', + dnsname='dns_name', + ifgateway='if_gateway', + interfacetype='interface_type', + interfacemaster='interface_master', + ipaddress='ip_address', + ipv6address='ipv6_address', + ipv6defaultgateway='ipv6_default_gateway', + ipv6mtu='ipv6_mtu', + ipv6prefix='ipv6_prefix', + ipv6secondaries='ipv6_secondariesu', + ipv6staticroutes='ipv6_static_routes', + macaddress='mac_address', + management='management', + mtu='mtu', + netmask='netmask', + static='static', + staticroutes='static_routes', + virtbridge='virt_bridge', +) + + +def getsystem(conn, name, token): + system = dict() + if name: + # system = conn.get_system(name, token) + systems = conn.find_system(dict(name=name), token) + if systems: + system = systems[0] + return system + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', default='127.0.0.1'), + port=dict(type='int'), + username=dict(type='str', default='cobbler'), + password=dict(type='str', no_log=True), + use_ssl=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + name=dict(type='str'), + interfaces=dict(type='dict'), + properties=dict(type='dict'), + sync=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + ), + supports_check_mode=True, + ) + + username = module.params['username'] + password = module.params['password'] + port = module.params['port'] + use_ssl = module.params['use_ssl'] + validate_certs = module.params['validate_certs'] + + name = module.params['name'] + state = module.params['state'] + + module.params['proto'] = 'https' if use_ssl else 'http' + if not port: + module.params['port'] = '443' if use_ssl else '80' + + result = dict( + changed=False, + ) + + start = datetime.datetime.utcnow() + + ssl_context = None + if not validate_certs: + try: # Python 2.7.9 and newer + ssl_context = ssl.create_unverified_context() + except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default + ssl._create_default_context = ssl._create_unverified_context + else: # Python 2.7.8 and older + ssl._create_default_https_context = ssl._create_unverified_https_context + + url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + if ssl_context: + conn = xmlrpc_client.ServerProxy(url, context=ssl_context) + else: + conn = xmlrpc_client.Server(url) + + try: + token = conn.login(username, password) + except xmlrpc_client.Fault as e: + module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + except Exception as e: + module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params)) + + system = getsystem(conn, name, token) + # result['system'] = system + + if state == 'query': + if name: + result['system'] = system + else: + # Turn it into a dictionary of dictionaries + # all_systems = conn.get_systems() + # result['systems'] = { system['name']: system for system in all_systems } + + # Return a list of dictionaries + result['systems'] = conn.get_systems() + + elif state == 'present': + + if system: + # Update existing entry + system_id = conn.get_system_handle(name, token) + + for key, value in iteritems(module.params['properties']): + if key not in system: + module.warn("Property '{0}' is not a valid system property.".format(key)) + if system[key] != value: + try: + conn.modify_system(system_id, key, value, token) + result['changed'] = True + except Exception as e: + module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) + + else: + # Create a new entry + system_id = conn.new_system(token) + conn.modify_system(system_id, 'name', name, token) + result['changed'] = True + + if module.params['properties']: + for key, value in iteritems(module.params['properties']): + try: + conn.modify_system(system_id, key, value, token) + except Exception as e: + module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) + + # Add interface properties + interface_properties = dict() + if module.params['interfaces']: + for device, values in iteritems(module.params['interfaces']): + for key, value in iteritems(values): + if key == 'name': + continue + if key not in IFPROPS_MAPPING: + module.warn("Property '{0}' is not a valid system property.".format(key)) + if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value: + result['changed'] = True + interface_properties['{0}-{1}'.format(key, device)] = value + + if result['changed'] is True: + conn.modify_system(system_id, "modify_interface", interface_properties, token) + + # Only save when the entry was changed + if not module.check_mode and result['changed']: + conn.save_system(system_id, token) + + elif state == 'absent': + + if system: + if not module.check_mode: + conn.remove_system(name, token) + result['changed'] = True + + if not module.check_mode and module.params['sync'] and result['changed']: + try: + conn.sync(token) + except Exception as e: + module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e))) + + if state in ('absent', 'present'): + result['system'] = getsystem(conn, name, token) + + if module._diff: + result['diff'] = dict(before=system, after=result['system']) + + elapsed = datetime.datetime.utcnow() - start + module.exit_json(elapsed=elapsed.seconds, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/dellemc/idrac_firmware.py b/plugins/modules/remote_management/dellemc/idrac_firmware.py new file mode 100644 index 0000000000..5ae7498317 --- /dev/null +++ b/plugins/modules/remote_management/dellemc/idrac_firmware.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.0 +# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: idrac_firmware +short_description: Firmware update from a repository on a network share (CIFS, NFS). +description: + - Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of + available updates. + - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs. + - All applicable updates contained in the repository are applied to the system. + - This feature is available only with iDRAC Enterprise License. +options: + idrac_ip: + description: iDRAC IP Address. + type: str + required: True + idrac_user: + description: iDRAC username. + type: str + required: True + idrac_password: + description: iDRAC user password. + type: str + required: True + aliases: ['idrac_pwd'] + idrac_port: + description: iDRAC port. + type: int + default: 443 + share_name: + description: CIFS or NFS Network share. + type: str + required: True + share_user: + description: Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + type: str + share_password: + description: Network share user password. This option is mandatory for CIFS Network Share. + type: str + aliases: ['share_pwd'] + share_mnt: + description: Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for Network Share. + type: str + required: True + reboot: + description: Whether to reboots after applying the updates or not. + type: bool + default: false + job_wait: + description: Whether to wait for job completion or not. + type: bool + default: true + catalog_file_name: + required: False + description: Catalog file name relative to the I(share_name). + type: str + default: 'Catalog.xml' + +requirements: + - "omsdk" + - "python >= 2.7.5" +author: "Rajeev Arakkal (@rajeevarakkal)" +''' + +EXAMPLES = """ +--- +- name: Update firmware from repository on a Network Share + idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + share_name: "192.168.0.0:/share" + share_user: "share_user_name" + share_password: "share_user_pwd" + share_mnt: "/mnt/share" + reboot: True + job_wait: True + catalog_file_name: "Catalog.xml" +""" + +RETURN = """ +--- +msg: + type: str + description: Over all firmware update status. + returned: always + sample: "Successfully updated the firmware." +update_status: + type: dict + description: Firmware Update job and progress details from the iDRAC. + returned: success + sample: { + 'InstanceID': 'JID_XXXXXXXXXXXX', + 'JobState': 'Completed', + 'Message': 'Job completed successfully.', + 'MessageId': 'REDXXX', + 'Name': 'Repository Update', + 'JobStartTime': 'NA', + 'Status': 'Success', + } +""" + + +from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection +from ansible.module_utils.basic import AnsibleModule +try: + from omsdk.sdkcreds import UserCredentials + from omsdk.sdkfile import FileOnShare + HAS_OMSDK = True +except ImportError: + HAS_OMSDK = False + + +def _validate_catalog_file(catalog_file_name): + normilized_file_name = catalog_file_name.lower() + if not normilized_file_name: + raise ValueError('catalog_file_name should be a non-empty string.') + elif not normilized_file_name.endswith("xml"): + raise ValueError('catalog_file_name should be an XML file.') + + +def update_firmware(idrac, module): + """Update firmware from a network share and return the job details.""" + msg = {} + msg['changed'] = False + msg['update_status'] = {} + + try: + upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'], + mount_point=module.params['share_mnt'], + isFolder=False, + creds=UserCredentials( + module.params['share_user'], + module.params['share_password']) + ) + + idrac.use_redfish = True + if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration: + idrac.use_redfish = False + + apply_update = True + msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share, + apply_update, + module.params['reboot'], + module.params['job_wait']) + except RuntimeError as e: + module.fail_json(msg=str(e)) + + if "Status" in msg['update_status']: + if msg['update_status']['Status'] == "Success": + if module.params['job_wait']: + msg['changed'] = True + else: + module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status']) + return msg + + +def main(): + module = AnsibleModule( + argument_spec={ + "idrac_ip": {"required": True, "type": 'str'}, + "idrac_user": {"required": True, "type": 'str'}, + "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True}, + "idrac_port": {"required": False, "default": 443, "type": 'int'}, + + "share_name": {"required": True, "type": 'str'}, + "share_user": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "share_mnt": {"required": True, "type": 'str'}, + + "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"}, + "reboot": {"required": False, "type": 'bool', "default": False}, + "job_wait": {"required": False, "type": 'bool', "default": True}, + }, + + supports_check_mode=False) + + try: + # Validate the catalog file + _validate_catalog_file(module.params['catalog_file_name']) + # Connect to iDRAC and update firmware + with iDRACConnection(module.params) as idrac: + update_status = update_firmware(idrac, module) + except (ImportError, ValueError, RuntimeError) as e: + module.fail_json(msg=str(e)) + + module.exit_json(msg='Successfully updated the firmware.', update_status=update_status) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py b/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py new file mode 100644 index 0000000000..9d285a83dd --- /dev/null +++ b/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.0 +# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: idrac_server_config_profile +short_description: Export or Import iDRAC Server Configuration Profile (SCP). +description: + - Export the Server Configuration Profile (SCP) from the iDRAC or Import from a network share or a local file. +options: + idrac_ip: + description: iDRAC IP Address. + type: str + required: True + idrac_user: + description: iDRAC username. + type: str + required: True + idrac_password: + description: iDRAC user password. + type: str + required: True + aliases: ['idrac_pwd'] + idrac_port: + description: iDRAC port. + type: int + default: 443 + command: + description: + - If C(import), will perform SCP import operations. + - If C(export), will perform SCP export operations. + choices: ['import', 'export'] + default: 'export' + job_wait: + description: Whether to wait for job completion or not. + type: bool + required: True + share_name: + description: CIFS or NFS Network Share or a local path. + type: str + required: True + share_user: + description: Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + type: str + share_password: + description: Network share user password. This option is mandatory for CIFS Network Share. + type: str + aliases: ['share_pwd'] + scp_file: + description: Server Configuration Profile file name. This option is mandatory for C(import) command. + type: str + scp_components: + description: + - If C(ALL), this module will import all components configurations from SCP file. + - If C(IDRAC), this module will import iDRAC configuration from SCP file. + - If C(BIOS), this module will import BIOS configuration from SCP file. + - If C(NIC), this module will import NIC configuration from SCP file. + - If C(RAID), this module will import RAID configuration from SCP file. + choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'] + default: 'ALL' + shutdown_type: + description: + - This option is applicable for C(import) command. + - If C(Graceful), it gracefully shuts down the server. + - If C(Forced), it forcefully shuts down the server. + - If C(NoReboot), it does not reboot the server. + choices: ['Graceful', 'Forced', 'NoReboot'] + default: 'Graceful' + end_host_power_state: + description: + - This option is applicable for C(import) command. + - If C(On), End host power state is on. + - If C(Off), End host power state is off. + choices: ['On' ,'Off'] + default: 'On' + export_format: + description: Specify the output file format. This option is applicable for C(export) command. + choices: ['JSON', 'XML'] + default: 'XML' + export_use: + description: Specify the type of server configuration profile (SCP) to be exported. + This option is applicable for C(export) command. + choices: ['Default', 'Clone', 'Replace'] + default: 'Default' + +requirements: + - "omsdk" + - "python >= 2.7.5" +author: "Jagadeesh N V(@jagadeeshnv)" + +''' + +EXAMPLES = r''' +--- +- name: Import Server Configuration Profile from a network share + idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + command: "import" + share_name: "192.168.0.2:/share" + share_user: "share_user_name" + share_password: "share_user_password" + scp_file: "scp_filename.xml" + scp_components: "ALL" + job_wait: True + +- name: Import Server Configuration Profile from a local path + idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + command: "import" + share_name: "/scp_folder" + share_user: "share_user_name" + share_password: "share_user_password" + scp_file: "scp_filename.xml" + scp_components: "ALL" + job_wait: True + +- name: Export Server Configuration Profile to a network share + idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + share_name: "192.168.0.2:/share" + share_user: "share_user_name" + share_password: "share_user_password" + job_wait: False + +- name: Export Server Configuration Profile to a local path + idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + share_name: "/scp_folder" + share_user: "share_user_name" + share_password: "share_user_password" + job_wait: False +''' + +RETURN = r''' +--- +msg: + type: str + description: Status of the import or export SCP job. + returned: always + sample: "Successfully imported the Server Configuration Profile" +scp_status: + type: dict + description: SCP operation job and progress details from the iDRAC. + returned: success + sample: + { + "Id": "JID_XXXXXXXXX", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageId": "XXX123", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true + } +''' + +import os +from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection +from ansible.module_utils.basic import AnsibleModule +try: + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials + from omdrivers.enums.iDRAC.iDRACEnums import (SCPTargetEnum, EndHostPowerStateEnum, + ShutdownTypeEnum, ExportFormatEnum, ExportUseEnum) +except ImportError: + pass + + +def run_import_server_config_profile(idrac, module): + """Import Server Configuration Profile from a network share.""" + target = SCPTargetEnum[module.params['scp_components']] + job_wait = module.params['job_wait'] + end_host_power_state = EndHostPowerStateEnum[module.params['end_host_power_state']] + shutdown_type = ShutdownTypeEnum[module.params['shutdown_type']] + idrac.use_redfish = True + + try: + myshare = file_share_manager.create_share_obj( + share_path="{0}{1}{2}".format(module.params['share_name'], os.sep, module.params['scp_file']), + creds=UserCredentials(module.params['share_user'], + module.params['share_password']), isFolder=False) + import_status = idrac.config_mgr.scp_import(myshare, + target=target, shutdown_type=shutdown_type, + end_host_power_state=end_host_power_state, + job_wait=job_wait) + if not import_status or import_status.get('Status') != "Success": + module.fail_json(msg='Failed to import scp.', scp_status=import_status) + except RuntimeError as e: + module.fail_json(msg=str(e)) + return import_status + + +def run_export_server_config_profile(idrac, module): + """Export Server Configuration Profile to a network share.""" + export_format = ExportFormatEnum[module.params['export_format']] + scp_file_name_format = "%ip_%Y%m%d_%H%M%S_scp.{0}".format(module.params['export_format'].lower()) + target = SCPTargetEnum[module.params['scp_components']] + export_use = ExportUseEnum[module.params['export_use']] + idrac.use_redfish = True + + try: + myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'], + creds=UserCredentials(module.params['share_user'], + module.params['share_password']), + isFolder=True) + scp_file_name = myshare.new_file(scp_file_name_format) + export_status = idrac.config_mgr.scp_export(scp_file_name, + target=target, + export_format=export_format, + export_use=export_use, + job_wait=module.params['job_wait']) + if not export_status or export_status.get('Status') != "Success": + module.fail_json(msg='Failed to export scp.', scp_status=export_status) + except RuntimeError as e: + module.fail_json(msg=str(e)) + return export_status + + +def main(): + module = AnsibleModule( + argument_spec={ + "idrac_ip": {"required": True, "type": 'str'}, + "idrac_user": {"required": True, "type": 'str'}, + "idrac_password": {"required": True, "type": 'str', + "aliases": ['idrac_pwd'], "no_log": True}, + "idrac_port": {"required": False, "default": 443, "type": 'int'}, + + "command": {"required": False, "type": 'str', + "choices": ['export', 'import'], "default": 'export'}, + "job_wait": {"required": True, "type": 'bool'}, + + "share_name": {"required": True, "type": 'str'}, + "share_user": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', + "aliases": ['share_pwd'], "no_log": True}, + "scp_components": {"required": False, + "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'], + "default": 'ALL'}, + + "scp_file": {"required": False, "type": 'str'}, + "shutdown_type": {"required": False, + "choices": ['Graceful', 'Forced', 'NoReboot'], + "default": 'Graceful'}, + "end_host_power_state": {"required": False, + "choices": ['On', 'Off'], + "default": 'On'}, + + "export_format": {"required": False, "type": 'str', + "choices": ['JSON', 'XML'], "default": 'XML'}, + "export_use": {"required": False, "type": 'str', + "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'} + }, + required_if=[ + ["command", "import", ["scp_file"]] + ], + supports_check_mode=False) + + try: + changed = False + with iDRACConnection(module.params) as idrac: + command = module.params['command'] + if command == 'import': + scp_status = run_import_server_config_profile(idrac, module) + if "No changes were applied" not in scp_status.get('Message', ""): + changed = True + else: + scp_status = run_export_server_config_profile(idrac, module) + module.exit_json(changed=changed, msg="Successfully {0}ed the Server Configuration Profile.".format(command), + scp_status=scp_status) + except (ImportError, ValueError, RuntimeError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/dellemc/ome_device_info.py b/plugins/modules/remote_management/dellemc/ome_device_info.py new file mode 100644 index 0000000000..57a51a1240 --- /dev/null +++ b/plugins/modules/remote_management/dellemc/ome_device_info.py @@ -0,0 +1,417 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 1.2 +# Copyright (C) 2019 Dell Inc. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. +# Other trademarks may be trademarks of their respective owners. +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ome_device_info +short_description: Retrieves the information about Device. +description: + - This module retrieves the list of all devices information with the exhaustive inventory of each + device. +options: + hostname: + description: + - Target IP Address or hostname. + type: str + required: True + username: + description: + - Target username. + type: str + required: True + password: + description: + - Target user password. + type: str + required: True + port: + description: + - Target HTTPS port. + type: int + default: 443 + fact_subset: + description: + - C(basic_inventory) returns the list of the devices. + - C(detailed_inventory) returns the inventory details of specified devices. + - C(subsystem_health) returns the health status of specified devices. + type: str + choices: [basic_inventory, detailed_inventory, subsystem_health ] + default: basic_inventory + system_query_options: + description: + - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag) + is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable. + type: dict + suboptions: + device_id: + description: + - A list of unique identifier is applicable + for C(detailed_inventory) and C(subsystem_health). + type: list + device_service_tag: + description: + - A list of service tags are applicable for C(detailed_inventory) + and C(subsystem_health). + type: list + inventory_type: + description: + - For C(detailed_inventory), it returns details of the specified inventory type. + type: str + filter: + description: + - For C(basic_inventory), it filters the collection of devices. + I(filter) query format should be aligned with OData standards. + type: str + +requirements: + - "python >= 2.7.5" +author: "Sajna Shetty(@Sajna-Shetty)" +''' + +EXAMPLES = """ +--- +- name: Retrieve basic inventory of all devices. + ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + +- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering. + ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + fact_subset: "basic_inventory" + system_query_options: + filter: "Id eq 33333 or Id eq 11111" + +- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222. + ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + - 22222 + +- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567. + ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + fact_subset: "detailed_inventory" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + +- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags. + ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + device_service_tag: + - MXL1234 + - MXL4567 + inventory_type: "serverDeviceCards" + +- name: Retrieve subsystem health of specified devices identified by service tags. + ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + fact_subset: "subsystem_health" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + +""" + +RETURN = ''' +--- +msg: + type: str + description: Over all device information status. + returned: on error + sample: "Failed to fetch the device information" +device_info: + type: dict + description: Returns the information collected from the Device. + returned: success + sample: { + "value": [ + { + "Actions": null, + "AssetTag": null, + "ChassisServiceTag": null, + "ConnectionState": true, + "DeviceManagement": [ + { + "DnsName": "dnsname.host.com", + "InstrumentationName": "MX-12345", + "MacAddress": "11:10:11:10:11:10", + "ManagementId": 12345, + "ManagementProfile": [ + { + "HasCreds": 0, + "ManagementId": 12345, + "ManagementProfileId": 12345, + "ManagementURL": "https://192.168.0.1:443", + "Status": 1000, + "StatusDateTime": "2019-01-21 06:30:08.501" + } + ], + "ManagementType": 2, + "NetworkAddress": "192.168.0.1" + } + ], + "DeviceName": "MX-0003I", + "DeviceServiceTag": "MXL1234", + "DeviceSubscription": null, + "LastInventoryTime": "2019-01-21 06:30:08.501", + "LastStatusTime": "2019-01-21 06:30:02.492", + "ManagedState": 3000, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "SlotConfiguration": {}, + "Status": 4000, + "SystemId": 2031, + "Type": 2000 + } + ] + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +DEVICES_INVENTORY_DETAILS = "detailed_inventory" +DEVICES_SUBSYSTEM_HEALTH = "subsystem_health" +DEVICES_INVENTORY_TYPE = "inventory_type" +DEVICE_LIST = "basic_inventory" +DESC_HTTP_ERROR = "HTTP Error 404: Not Found" +device_fact_error_report = {} + +DEVICE_RESOURCE_COLLECTION = { + DEVICE_LIST: {"resource": "DeviceService/Devices"}, + DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"}, + DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"}, + DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"}, +} + + +def _get_device_id_from_service_tags(service_tags, rest_obj): + """ + Get device ids from device service tag + Returns :dict : device_id to service_tag map + :arg service_tags: service tag + :arg rest_obj: RestOME class object in case of request with session. + :returns: dict eg: {1345:"MXL1245"} + """ + try: + path = DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"] + resp = rest_obj.invoke_request('GET', path) + if resp.success: + devices_list = resp.json_data["value"] + service_tag_dict = {} + for item in devices_list: + if item["DeviceServiceTag"] in service_tags: + service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]}) + available_service_tags = service_tag_dict.values() + not_available_service_tag = list(set(service_tags) - set(available_service_tags)) + device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in not_available_service_tag)) + else: + raise ValueError(resp.json_data) + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + return service_tag_dict + + +def is_int(val): + """check when device_id numeric represented value is int""" + try: + int(val) + return True + except ValueError: + return False + + +def _check_duplicate_device_id(device_id_list, service_tag_dict): + """If service_tag is duplicate of device_id, then updates the message as Duplicate report + :arg1: device_id_list : list of device_id + :arg2: service_tag_id_dict: dictionary of device_id to service tag map""" + if device_id_list: + device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)] + common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys())) + for device_id in common_val: + device_fact_error_report.update( + {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)}) + del service_tag_dict[device_id] + + +def _get_device_identifier_map(module_params, rest_obj): + """ + Builds the identifiers mapping + :returns: the dict of device_id to server_tag map + eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}""" + system_query_options_param = module_params.get("system_query_options") + device_id_service_tag_dict = {} + if system_query_options_param is not None: + device_id_list = system_query_options_param.get("device_id") + device_service_tag_list = system_query_options_param.get("device_service_tag") + if device_id_list: + device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list))) + device_id_service_tag_dict["device_id"] = device_id_dict + if device_service_tag_list: + service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list, + rest_obj) + + _check_duplicate_device_id(device_id_list, service_tag_dict) + device_id_service_tag_dict["device_service_tag"] = service_tag_dict + return device_id_service_tag_dict + + +def _get_query_parameters(module_params): + """ + Builds query parameter + :returns: dictionary, which is applicable builds the query format + eg : {"$filter":"Type eq 2000"} + """ + system_query_options_param = module_params.get("system_query_options") + query_parameter = None + if system_query_options_param: + filter_by_val = system_query_options_param.get("filter") + if filter_by_val: + query_parameter = {"$filter": filter_by_val} + return query_parameter + + +def _get_resource_parameters(module_params, rest_obj): + """ + Identifies the resource path by different states + :returns: dictionary containing identifier with respective resource path + eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"}, + "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}} + """ + fact_subset = module_params["fact_subset"] + path_dict = {} + if fact_subset != DEVICE_LIST: + inventory_type = None + device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj) + if fact_subset == DEVICES_INVENTORY_DETAILS: + system_query_options = module_params.get("system_query_options") + inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE) + path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset + for identifier_type, identifier_dict in device_id_service_tag_dict.items(): + path_dict[identifier_type] = {} + for device_id, service_tag in identifier_dict.items(): + key_identifier = service_tag if identifier_type == "device_service_tag" else device_id + path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id, + InventoryType=inventory_type) + path_dict[identifier_type].update({key_identifier: path}) + else: + path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]}) + return path_dict + + +def _check_mutually_inclusive_arguments(val, module_params, required_args): + """" + Throws error if arguments detailed_inventory, subsystem_health + not exists with qualifier device_id or device_service_tag""" + system_query_options_param = module_params.get("system_query_options") + if system_query_options_param is None or (system_query_options_param is not None and not any( + system_query_options_param.get(qualifier) for qualifier in required_args)): + raise ValueError("One of the following {0} is required for {1}".format(required_args, val)) + + +def _validate_inputs(module_params): + """validates input parameters""" + fact_subset = module_params["fact_subset"] + if fact_subset != "basic_inventory": + _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"]) + + +def main(): + system_query_options = {"type": 'dict', "required": False, "options": { + "device_id": {"type": 'list'}, + "device_service_tag": {"type": 'list'}, + "inventory_type": {"type": 'str'}, + "filter": {"type": 'str', "required": False}, + }} + + module = AnsibleModule( + argument_spec={ + "hostname": {"required": True, "type": 'str'}, + "username": {"required": True, "type": 'str'}, + "password": {"required": True, "type": 'str', "no_log": True}, + "port": {"required": False, "default": 443, "type": 'int'}, + "fact_subset": {"required": False, "default": "basic_inventory", + "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']}, + "system_query_options": system_query_options, + }, + required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']], + ['fact_subset', 'subsystem_health', ['system_query_options']], ], + supports_check_mode=False) + + try: + _validate_inputs(module.params) + with RestOME(module.params, req_session=True) as rest_obj: + device_facts = _get_resource_parameters(module.params, rest_obj) + resp_status = [] + if device_facts.get("basic_inventory"): + query_param = _get_query_parameters(module.params) + resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param) + device_facts = resp.json_data + resp_status.append(resp.status_code) + else: + for identifier_type, path_dict_map in device_facts.items(): + for identifier, path in path_dict_map.items(): + try: + resp = rest_obj.invoke_request('GET', path) + data = resp.json_data + resp_status.append(resp.status_code) + except HTTPError as err: + data = str(err) + path_dict_map[identifier] = data + if any(device_fact_error_report): + if "device_service_tag" in device_facts: + device_facts["device_service_tag"].update(device_fact_error_report) + else: + device_facts["device_service_tag"] = device_fact_error_report + if 200 in resp_status: + module.exit_json(device_info=device_facts) + else: + module.fail_json(msg="Failed to fetch the device information") + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/foreman/foreman.py b/plugins/modules/remote_management/foreman/foreman.py new file mode 100644 index 0000000000..98f291a73a --- /dev/null +++ b/plugins/modules/remote_management/foreman/foreman.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Eric D Helms +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: foreman +short_description: Manage Foreman Resources +deprecated: + removed_in: "2.12" + why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules" + alternative: https://github.com/theforeman/foreman-ansible-modules +description: + - Allows the management of Foreman resources inside your Foreman server. +author: +- Eric D Helms (@ehelms) +requirements: + - nailgun >= 0.28.0 + - python >= 2.6 + - datetime +options: + server_url: + description: + - URL of Foreman server. + required: true + username: + description: + - Username on Foreman server. + required: true + verify_ssl: + description: + - Whether to verify an SSL connection to Foreman server. + type: bool + default: False + password: + description: + - Password for user accessing Foreman server. + required: true + entity: + description: + - The Foreman resource that the action will be performed on (e.g. organization, host). + required: true + params: + description: + - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description). + required: true +''' + +EXAMPLES = ''' +- name: Create CI Organization + foreman: + username: admin + password: admin + server_url: https://fakeserver.com + entity: organization + params: + name: My Cool New Organization + delegate_to: localhost +''' + +RETURN = '''# ''' + +import traceback + +try: + from nailgun import entities + from nailgun.config import ServerConfig + HAS_NAILGUN_PACKAGE = True +except Exception: + HAS_NAILGUN_PACKAGE = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class NailGun(object): + def __init__(self, server, entities, module): + self._server = server + self._entities = entities + self._module = module + + def find_organization(self, name, **params): + org = self._entities.Organization(self._server, name=name, **params) + response = org.search(set(), {'search': 'name={0}'.format(name)}) + + if len(response) == 1: + return response[0] + + return None + + def organization(self, params): + name = params['name'] + del params['name'] + org = self.find_organization(name, **params) + + if org: + org = self._entities.Organization(self._server, name=name, id=org.id, **params) + org.update() + else: + org = self._entities.Organization(self._server, name=name, **params) + org.create() + + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True), + username=dict(type='str', required=True, no_log=True), + password=dict(type='str', required=True, no_log=True), + entity=dict(type='str', required=True), + verify_ssl=dict(type='bool', default=False), + params=dict(type='dict', required=True, no_log=True), + ), + supports_check_mode=True, + ) + + if not HAS_NAILGUN_PACKAGE: + module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun") + + server_url = module.params['server_url'] + username = module.params['username'] + password = module.params['password'] + entity = module.params['entity'] + params = module.params['params'] + verify_ssl = module.params['verify_ssl'] + + server = ServerConfig( + url=server_url, + auth=(username, password), + verify=verify_ssl + ) + ng = NailGun(server, entities, module) + + # Lets make an connection to the server with username and password + try: + org = entities.Organization(server) + org.search() + except Exception as e: + module.fail_json(msg="Failed to connect to Foreman server: %s " % to_native(e), + exception=traceback.format_exc()) + + if entity == 'organization': + ng.organization(params) + module.exit_json(changed=True, result="%s updated" % entity) + else: + module.fail_json(changed=False, result="Unsupported entity supplied") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/foreman/katello.py b/plugins/modules/remote_management/foreman/katello.py new file mode 100644 index 0000000000..29f360803b --- /dev/null +++ b/plugins/modules/remote_management/foreman/katello.py @@ -0,0 +1,619 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Eric D Helms +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: katello +short_description: Manage Katello Resources +deprecated: + removed_in: "2.12" + why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules" + alternative: https://github.com/theforeman/foreman-ansible-modules +description: + - Allows the management of Katello resources inside your Foreman server. +author: +- Eric D Helms (@ehelms) +requirements: + - nailgun >= 0.28.0 + - python >= 2.6 + - datetime +options: + server_url: + description: + - URL of Foreman server. + required: true + username: + description: + - Username on Foreman server. + required: true + password: + description: + - Password for user accessing Foreman server. + required: true + entity: + description: + - The Foreman resource that the action will be performed on (e.g. organization, host). + choices: + + - repository + - manifest + - repository_set + - sync_plan + - content_view + - lifecycle_environment + - activation_key + - product + + required: true + action: + description: + - action associated to the entity resource to set or edit in dictionary format. + - Possible Action in relation to Entitys. + - "sync (available when entity=product or entity=repository)" + - "publish (available when entity=content_view)" + - "promote (available when entity=content_view)" + choices: + - sync + - publish + - promote + required: false + params: + description: + - Parameters associated to the entity resource and action, to set or edit in dictionary format. + - Each choice may be only available with specific entitys and actions. + - "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)." + - The action "None" means no action specified. + - Possible Params in relation to entity and action. + - "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None]," + - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])" + - "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], " + - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])" + - "content ([manifest,None])" + - "product ([repository,sync,None], [repository_set,None], [sync_plan,None])" + - "basearch ([repository_set,None])" + - "releaserver ([repository_set,None])" + - "sync_date ([sync_plan,None])" + - "interval ([sync_plan,None])" + - "repositories ([content_view,None])" + - "from_environment ([content_view,promote])" + - "to_environment([content_view,promote])" + - "prior ([lifecycle_environment,None])" + - "content_view ([activation_key,None])" + - "lifecycle_environment ([activation_key,None])" + required: true + task_timeout: + description: + - The timeout in seconds to wait for the started Foreman action to finish. + - If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled. + default: 1000 + required: false + verify_ssl: + description: + - verify the ssl/https connection (e.g for a valid certificate) + default: false + type: bool + required: false +''' + +EXAMPLES = ''' +--- +# Simple Example: + +- name: Create Product + katello: + username: admin + password: admin + server_url: https://fakeserver.com + entity: product + params: + name: Centos 7 + delegate_to: localhost + +# Abstraction Example: +# katello.yml +--- +- name: "{{ name }}" + katello: + username: admin + password: admin + server_url: https://fakeserver.com + entity: "{{ entity }}" + params: "{{ params }}" + delegate_to: localhost + +# tasks.yml +--- +- include: katello.yml + vars: + name: Create Dev Environment + entity: lifecycle_environment + params: + name: Dev + prior: Library + organization: Default Organization + +- include: katello.yml + vars: + name: Create Centos Product + entity: product + params: + name: Centos 7 + organization: Default Organization + +- include: katello.yml + vars: + name: Create 7.2 Repository + entity: repository + params: + name: Centos 7.2 + product: Centos 7 + organization: Default Organization + content_type: yum + url: http://mirror.centos.org/centos/7/os/x86_64/ + +- include: katello.yml + vars: + name: Create Centos 7 View + entity: content_view + params: + name: Centos 7 View + organization: Default Organization + repositories: + - name: Centos 7.2 + product: Centos 7 + +- include: katello.yml + vars: + name: Enable RHEL Product + entity: repository_set + params: + name: Red Hat Enterprise Linux 7 Server (RPMs) + product: Red Hat Enterprise Linux Server + organization: Default Organization + basearch: x86_64 + releasever: 7 + +- include: katello.yml + vars: + name: Promote Contentview Environment with longer timeout + task_timeout: 10800 + entity: content_view + action: promote + params: + name: MyContentView + organization: MyOrganisation + from_environment: Testing + to_environment: Production + +# Best Practices + +# In Foreman, things can be done in parallel. +# When a conflicting action is already running, +# the task will fail instantly instead of waiting for the already running action to complete. +# So you should use a "until success" loop to catch this. + +- name: Promote Contentview Environment with increased Timeout + katello: + username: ansibleuser + password: supersecret + task_timeout: 10800 + entity: content_view + action: promote + params: + name: MyContentView + organization: MyOrganisation + from_environment: Testing + to_environment: Production + register: task_result + until: task_result is success + retries: 9 + delay: 120 + +''' + +RETURN = '''# ''' + +import datetime +import os +import traceback + +try: + from nailgun import entities, entity_fields, entity_mixins + from nailgun.config import ServerConfig + HAS_NAILGUN_PACKAGE = True +except Exception: + HAS_NAILGUN_PACKAGE = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class NailGun(object): + def __init__(self, server, entities, module, task_timeout): + self._server = server + self._entities = entities + self._module = module + entity_mixins.TASK_TIMEOUT = task_timeout + + def find_organization(self, name, **params): + org = self._entities.Organization(self._server, name=name, **params) + response = org.search(set(), {'search': 'name={0}'.format(name)}) + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No organization found for %s" % name) + + def find_lifecycle_environment(self, name, organization): + org = self.find_organization(organization) + + lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org) + response = lifecycle_env.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Lifecycle Found found for %s" % name) + + def find_product(self, name, organization): + org = self.find_organization(organization) + + product = self._entities.Product(self._server, name=name, organization=org) + response = product.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Product found for %s" % name) + + def find_repository(self, name, product, organization): + product = self.find_product(product, organization) + + repository = self._entities.Repository(self._server, name=name, product=product) + repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) + repository.organization = product.organization + response = repository.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Repository found for %s" % name) + + def find_content_view(self, name, organization): + org = self.find_organization(organization) + + content_view = self._entities.ContentView(self._server, name=name, organization=org) + response = content_view.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Content View found for %s" % name) + + def organization(self, params): + name = params['name'] + del params['name'] + org = self.find_organization(name, **params) + + if org: + org = self._entities.Organization(self._server, name=name, id=org.id, **params) + org.update() + else: + org = self._entities.Organization(self._server, name=name, **params) + org.create() + + return True + + def manifest(self, params): + org = self.find_organization(params['organization']) + params['organization'] = org.id + + try: + file = open(os.getcwd() + params['content'], 'r') + content = file.read() + finally: + file.close() + + manifest = self._entities.Subscription(self._server) + + try: + manifest.upload( + data={'organization_id': org.id}, + files={'content': content} + ) + return True + except Exception as e: + + if "Import is the same as existing data" in e.message: + return False + else: + self._module.fail_json(msg="Manifest import failed with %s" % to_native(e), + exception=traceback.format_exc()) + + def product(self, params): + org = self.find_organization(params['organization']) + params['organization'] = org.id + + product = self._entities.Product(self._server, **params) + response = product.search() + + if len(response) == 1: + product.id = response[0].id + product.update() + else: + product.create() + + return True + + def sync_product(self, params): + org = self.find_organization(params['organization']) + product = self.find_product(params['name'], org.name) + + return product.sync() + + def repository(self, params): + product = self.find_product(params['product'], params['organization']) + params['product'] = product.id + del params['organization'] + + repository = self._entities.Repository(self._server, **params) + repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) + repository.organization = product.organization + response = repository.search() + + if len(response) == 1: + repository.id = response[0].id + repository.update() + else: + repository.create() + + return True + + def sync_repository(self, params): + org = self.find_organization(params['organization']) + repository = self.find_repository(params['name'], params['product'], org.name) + + return repository.sync() + + def repository_set(self, params): + product = self.find_product(params['product'], params['organization']) + del params['product'] + del params['organization'] + + if not product: + return False + else: + reposet = self._entities.RepositorySet(self._server, product=product, name=params['name']) + reposet = reposet.search()[0] + + formatted_name = [params['name'].replace('(', '').replace(')', '')] + formatted_name.append(params['basearch']) + + if 'releasever' in params: + formatted_name.append(params['releasever']) + + formatted_name = ' '.join(formatted_name) + + repository = self._entities.Repository(self._server, product=product, name=formatted_name) + repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) + repository.organization = product.organization + repository = repository.search() + + if len(repository) == 0: + if 'releasever' in params: + reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']}) + else: + reposet.enable(data={'basearch': params['basearch']}) + + return True + + def sync_plan(self, params): + org = self.find_organization(params['organization']) + params['organization'] = org.id + params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M") + + products = params['products'] + del params['products'] + + sync_plan = self._entities.SyncPlan( + self._server, + name=params['name'], + organization=org + ) + response = sync_plan.search() + + sync_plan.sync_date = params['sync_date'] + sync_plan.interval = params['interval'] + + if len(response) == 1: + sync_plan.id = response[0].id + sync_plan.update() + else: + response = sync_plan.create() + sync_plan.id = response[0].id + + if products: + ids = [] + + for name in products: + product = self.find_product(name, org.name) + ids.append(product.id) + + sync_plan.add_products(data={'product_ids': ids}) + + return True + + def content_view(self, params): + org = self.find_organization(params['organization']) + + content_view = self._entities.ContentView(self._server, name=params['name'], organization=org) + response = content_view.search() + + if len(response) == 1: + content_view.id = response[0].id + content_view.update() + else: + content_view = content_view.create() + + if params['repositories']: + repos = [] + + for repository in params['repositories']: + repository = self.find_repository(repository['name'], repository['product'], org.name) + repos.append(repository) + + content_view.repository = repos + content_view.update(['repository']) + + def find_content_view_version(self, name, organization, environment): + env = self.find_lifecycle_environment(environment, organization) + content_view = self.find_content_view(name, organization) + + content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view) + response = content_view_version.search(['content_view'], {'environment_id': env.id}) + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Content View version found for %s" % response) + + def publish(self, params): + content_view = self.find_content_view(params['name'], params['organization']) + + return content_view.publish() + + def promote(self, params): + to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization']) + version = self.find_content_view_version(params['name'], params['organization'], params['from_environment']) + + data = {'environment_id': to_environment.id} + return version.promote(data=data) + + def lifecycle_environment(self, params): + org = self.find_organization(params['organization']) + prior_env = self.find_lifecycle_environment(params['prior'], params['organization']) + + lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env) + response = lifecycle_env.search() + + if len(response) == 1: + lifecycle_env.id = response[0].id + lifecycle_env.update() + else: + lifecycle_env.create() + + return True + + def activation_key(self, params): + org = self.find_organization(params['organization']) + + activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org) + response = activation_key.search() + + if len(response) == 1: + activation_key.id = response[0].id + activation_key.update() + else: + activation_key.create() + + if params['content_view']: + content_view = self.find_content_view(params['content_view'], params['organization']) + lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization']) + + activation_key.content_view = content_view + activation_key.environment = lifecycle_environment + activation_key.update() + + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(type='str', required=True), + username=dict(type='str', required=True, no_log=True), + password=dict(type='str', required=True, no_log=True), + entity=dict(type='str', required=True, + choices=['repository', 'manifest', 'repository_set', 'sync_plan', + 'content_view', 'lifecycle_environment', 'activation_key', 'product']), + action=dict(type='str', choices=['sync', 'publish', 'promote']), + verify_ssl=dict(type='bool', default=False), + task_timeout=dict(type='int', default=1000), + params=dict(type='dict', required=True, no_log=True), + ), + supports_check_mode=True, + ) + + if not HAS_NAILGUN_PACKAGE: + module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun") + + server_url = module.params['server_url'] + username = module.params['username'] + password = module.params['password'] + entity = module.params['entity'] + action = module.params['action'] + params = module.params['params'] + verify_ssl = module.params['verify_ssl'] + task_timeout = module.params['task_timeout'] + + server = ServerConfig( + url=server_url, + auth=(username, password), + verify=verify_ssl + ) + ng = NailGun(server, entities, module, task_timeout) + + # Lets make an connection to the server with username and password + try: + org = entities.Organization(server) + org.search() + except Exception as e: + module.fail_json(msg="Failed to connect to Foreman server: %s " % e) + + result = False + + if entity == 'product': + if action == 'sync': + result = ng.sync_product(params) + else: + result = ng.product(params) + elif entity == 'repository': + if action == 'sync': + result = ng.sync_repository(params) + else: + result = ng.repository(params) + elif entity == 'manifest': + result = ng.manifest(params) + elif entity == 'repository_set': + result = ng.repository_set(params) + elif entity == 'sync_plan': + result = ng.sync_plan(params) + elif entity == 'content_view': + if action == 'publish': + result = ng.publish(params) + elif action == 'promote': + result = ng.promote(params) + else: + result = ng.content_view(params) + elif entity == 'lifecycle_environment': + result = ng.lifecycle_environment(params) + elif entity == 'activation_key': + result = ng.activation_key(params) + else: + module.fail_json(changed=False, result="Unsupported entity supplied") + + module.exit_json(changed=result, result="%s updated" % entity) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/hpilo/hpilo_boot.py b/plugins/modules/remote_management/hpilo/hpilo_boot.py new file mode 100644 index 0000000000..493057db58 --- /dev/null +++ b/plugins/modules/remote_management/hpilo/hpilo_boot.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2012 Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: hpilo_boot +author: Dag Wieers (@dagwieers) +short_description: Boot system using specific media through HP iLO interface +description: +- "This module boots a system through its HP iLO interface. The boot media + can be one of: cdrom, floppy, hdd, network or usb." +- This module requires the hpilo python module. +options: + host: + description: + - The HP iLO hostname/address that is linked to the physical system. + required: true + login: + description: + - The login name to authenticate to the HP iLO interface. + default: Administrator + password: + description: + - The password to authenticate to the HP iLO interface. + default: admin + media: + description: + - The boot media to boot the system from + choices: [ "cdrom", "floppy", "hdd", "network", "normal", "usb" ] + image: + description: + - The URL of a cdrom, floppy or usb boot media image. + protocol://username:password@hostname:port/filename + - protocol is either 'http' or 'https' + - username:password is optional + - port is optional + state: + description: + - The state of the boot media. + - "no_boot: Do not boot from the device" + - "boot_once: Boot from the device once and then notthereafter" + - "boot_always: Boot from the device each time the server is rebooted" + - "connect: Connect the virtual media device and set to boot_always" + - "disconnect: Disconnects the virtual media device and set to no_boot" + - "poweroff: Power off the server" + default: boot_once + choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ] + force: + description: + - Whether to force a reboot (even when the system is already booted). + - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. + default: no + type: bool + ssl_version: + description: + - Change the ssl_version used. + default: TLSv1 + choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] +requirements: +- python-hpilo +notes: +- To use a USB key image you need to specify floppy as boot media. +- This module ought to be run from a system that can access the HP iLO + interface directly, either by using C(local_action) or using C(delegate_to). +''' + +EXAMPLES = r''' +- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server + hpilo_boot: + host: YOUR_ILO_ADDRESS + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + media: cdrom + image: http://some-web-server/iso/boot.iso + when: cmdb_hwmodel.startswith('HP ') + delegate_to: localhost + +- name: Power off a server + hpilo_boot: + host: YOUR_ILO_HOST + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + state: poweroff + delegate_to: localhost +''' + +RETURN = ''' +# Default return values +''' + +import time +import traceback +import warnings + +HPILO_IMP_ERR = None +try: + import hpilo + HAS_HPILO = True +except ImportError: + HPILO_IMP_ERR = traceback.format_exc() + HAS_HPILO = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +# Suppress warnings from hpilo +warnings.simplefilter('ignore') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + login=dict(type='str', default='Administrator'), + password=dict(type='str', default='admin', no_log=True), + media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']), + image=dict(type='str'), + state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), + force=dict(type='bool', default=False), + ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + ) + ) + + if not HAS_HPILO: + module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) + + host = module.params['host'] + login = module.params['login'] + password = module.params['password'] + media = module.params['media'] + image = module.params['image'] + state = module.params['state'] + force = module.params['force'] + ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) + + ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) + changed = False + status = {} + power_status = 'UNKNOWN' + + if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'): + + # Workaround for: Error communicating with iLO: Problem manipulating EV + try: + ilo.set_one_time_boot(media) + except hpilo.IloError: + time.sleep(60) + ilo.set_one_time_boot(media) + + # TODO: Verify if image URL exists/works + if image: + ilo.insert_virtual_media(media, image) + changed = True + + if media == 'cdrom': + ilo.set_vm_status('cdrom', state, True) + status = ilo.get_vm_status() + changed = True + elif media in ('floppy', 'usb'): + ilo.set_vf_status(state, True) + status = ilo.get_vf_status() + changed = True + + # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot + if state in ('boot_once', 'boot_always') or force: + + power_status = ilo.get_host_power_status() + + if not force and power_status == 'ON': + module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) + + if power_status == 'ON': + ilo.warm_boot_server() +# ilo.cold_boot_server() + changed = True + else: + ilo.press_pwr_btn() +# ilo.reset_server() +# ilo.set_host_power(host_power=True) + changed = True + + elif state in ('poweroff'): + + power_status = ilo.get_host_power_status() + + if not power_status == 'OFF': + ilo.hold_pwr_btn() +# ilo.set_host_power(host_power=False) + changed = True + + module.exit_json(changed=changed, power=power_status, **status) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/hpilo/hpilo_facts.py b/plugins/modules/remote_management/hpilo/hpilo_facts.py new file mode 120000 index 0000000000..792c1a7fbc --- /dev/null +++ b/plugins/modules/remote_management/hpilo/hpilo_facts.py @@ -0,0 +1 @@ +hpilo_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/remote_management/hpilo/hpilo_info.py new file mode 100644 index 0000000000..4fc475c3f9 --- /dev/null +++ b/plugins/modules/remote_management/hpilo/hpilo_info.py @@ -0,0 +1,262 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2012 Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: hpilo_info +author: Dag Wieers (@dagwieers) +short_description: Gather information through an HP iLO interface +description: +- This module gathers information on a specific system using its HP iLO interface. + These information includes hardware and network related information useful + for provisioning (e.g. macaddress, uuid). +- This module requires the C(hpilo) python module. +- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(hpilo_info) module no longer returns C(ansible_facts)! +options: + host: + description: + - The HP iLO hostname/address that is linked to the physical system. + required: true + login: + description: + - The login name to authenticate to the HP iLO interface. + default: Administrator + password: + description: + - The password to authenticate to the HP iLO interface. + default: admin + ssl_version: + description: + - Change the ssl_version used. + default: TLSv1 + choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] +requirements: +- hpilo +notes: +- This module ought to be run from a system that can access the HP iLO + interface directly, either by using C(local_action) or using C(delegate_to). +''' + +EXAMPLES = r''' +# Task to gather facts from a HP iLO interface only if the system is an HP server +- hpilo_info: + host: YOUR_ILO_ADDRESS + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + when: cmdb_hwmodel.startswith('HP ') + delegate_to: localhost + register: results + +- fail: + msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !' + when: cmdb_serialno != results.hw_system_serial +''' + +RETURN = r''' +# Typical output of HP iLO_info for a physical system +hw_bios_date: + description: BIOS date + returned: always + type: str + sample: 05/05/2011 + +hw_bios_version: + description: BIOS version + returned: always + type: str + sample: P68 + +hw_ethX: + description: Interface information (for each interface) + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:55 + macaddress_dash: 00-11-22-33-44-55 + +hw_eth_ilo: + description: Interface information (for the iLO network interface) + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:BA + - macaddress_dash: 00-11-22-33-44-BA + +hw_product_name: + description: Product name + returned: always + type: str + sample: ProLiant DL360 G7 + +hw_product_uuid: + description: Product UUID + returned: always + type: str + sample: ef50bac8-2845-40ff-81d9-675315501dac + +hw_system_serial: + description: System serial number + returned: always + type: str + sample: ABC12345D6 + +hw_uuid: + description: Hardware UUID + returned: always + type: str + sample: 123456ABC78901D2 +''' + +import re +import traceback +import warnings + +HPILO_IMP_ERR = None +try: + import hpilo + HAS_HPILO = True +except ImportError: + HPILO_IMP_ERR = traceback.format_exc() + HAS_HPILO = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +# Suppress warnings from hpilo +warnings.simplefilter('ignore') + + +def parse_flat_interface(entry, non_numeric='hw_eth_ilo'): + try: + infoname = 'hw_eth' + str(int(entry['Port']) - 1) + except Exception: + infoname = non_numeric + + info = { + 'macaddress': entry['MAC'].replace('-', ':'), + 'macaddress_dash': entry['MAC'] + } + return (infoname, info) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + login=dict(type='str', default='Administrator'), + password=dict(type='str', default='admin', no_log=True), + ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + ), + supports_check_mode=True, + ) + is_old_facts = module._name == 'hpilo_facts' + if is_old_facts: + module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + if not HAS_HPILO: + module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) + + host = module.params['host'] + login = module.params['login'] + password = module.params['password'] + ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) + + ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) + + info = { + 'module_hw': True, + } + + # TODO: Count number of CPUs, DIMMs and total memory + try: + data = ilo.get_host_data() + except hpilo.IloCommunicationError as e: + module.fail_json(msg=to_native(e)) + + for entry in data: + if 'type' not in entry: + continue + elif entry['type'] == 0: # BIOS Information + info['hw_bios_version'] = entry['Family'] + info['hw_bios_date'] = entry['Date'] + elif entry['type'] == 1: # System Information + info['hw_uuid'] = entry['UUID'] + info['hw_system_serial'] = entry['Serial Number'].rstrip() + info['hw_product_name'] = entry['Product Name'] + info['hw_product_uuid'] = entry['cUUID'] + elif entry['type'] == 209: # Embedded NIC MAC Assignment + if 'fields' in entry: + for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: + if name.startswith('Port'): + try: + infoname = 'hw_eth' + str(int(value) - 1) + except Exception: + infoname = 'hw_eth_ilo' + elif name.startswith('MAC'): + info[infoname] = { + 'macaddress': value.replace('-', ':'), + 'macaddress_dash': value + } + else: + (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + info[infoname] = entry_info + elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info + for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: + if name.startswith('Port'): + try: + infoname = 'hw_iscsi' + str(int(value) - 1) + except Exception: + infoname = 'hw_iscsi_ilo' + elif name.startswith('MAC'): + info[infoname] = { + 'macaddress': value.replace('-', ':'), + 'macaddress_dash': value + } + elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format) + (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + info[infoname] = entry_info + + # Collect health (RAM/CPU data) + health = ilo.get_embedded_health() + info['hw_health'] = health + + memory_details_summary = health.get('memory', {}).get('memory_details_summary') + # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8 + if memory_details_summary: + info['hw_memory_details_summary'] = memory_details_summary + info['hw_memory_total'] = 0 + for cpu, details in memory_details_summary.items(): + cpu_total_memory_size = details.get('total_memory_size') + if cpu_total_memory_size: + ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size) + if ram: + if ram.group(2) == 'GB': + info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1)) + + # reformat into a text friendly format + info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total']) + + if is_old_facts: + module.exit_json(ansible_facts=info) + else: + module.exit_json(**info) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/hpilo/hponcfg.py b/plugins/modules/remote_management/hpilo/hponcfg.py new file mode 100644 index 0000000000..b47d1e5621 --- /dev/null +++ b/plugins/modules/remote_management/hpilo/hponcfg.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: hponcfg +author: Dag Wieers (@dagwieers) +short_description: Configure HP iLO interface using hponcfg +description: +- This modules configures the HP iLO interface using hponcfg. +options: + path: + description: + - The XML file as accepted by hponcfg. + required: true + aliases: ['src'] + minfw: + description: + - The minimum firmware level needed. + required: false + executable: + description: + - Path to the hponcfg executable (`hponcfg` which uses $PATH). + default: hponcfg + verbose: + description: + - Run hponcfg in verbose mode (-v). + default: no + type: bool +requirements: +- hponcfg tool +notes: +- You need a working hponcfg on the target system. +''' + +EXAMPLES = r''' +- name: Example hponcfg configuration XML + copy: + content: | + + + + + + + + + + + + + + dest: /tmp/enable-ssh.xml + +- name: Configure HP iLO using enable-ssh.xml + hponcfg: + src: /tmp/enable-ssh.xml + +- name: Configure HP iLO on VMware ESXi hypervisor + hponcfg: + src: /tmp/enable-ssh.xml + executable: /opt/hp/tools/hponcfg +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + src=dict(type='path', required=True, aliases=['path']), + minfw=dict(type='str'), + executable=dict(default='hponcfg', type='str'), + verbose=dict(default=False, type='bool'), + ) + ) + + # Consider every action a change (not idempotent yet!) + changed = True + + src = module.params['src'] + minfw = module.params['minfw'] + executable = module.params['executable'] + verbose = module.params['verbose'] + + options = ' -f %s' % src + + if verbose: + options += ' -v' + + if minfw: + options += ' -m %s' % minfw + + rc, stdout, stderr = module.run_command('%s %s' % (executable, options)) + + if rc != 0: + module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr) + + module.exit_json(changed=changed, stdout=stdout, stderr=stderr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/imc/imc_rest.py b/plugins/modules/remote_management/imc/imc_rest.py new file mode 100644 index 0000000000..10667e25a6 --- /dev/null +++ b/plugins/modules/remote_management/imc/imc_rest.py @@ -0,0 +1,431 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: imc_rest +short_description: Manage Cisco IMC hardware through its REST API +description: +- Provides direct access to the Cisco IMC REST API. +- Perform any configuration changes and actions that the Cisco IMC supports. +- More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) +author: +- Dag Wieers (@dagwieers) +requirements: +- lxml +- xmljson >= 0.1.8 +options: + hostname: + description: + - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. + required: true + aliases: [ host, ip ] + username: + description: + - Username used to login to the switch. + default: admin + aliases: [ user ] + password: + description: + - The password to use for authentication. + default: password + path: + description: + - Name of the absolute path of the filename that includes the body + of the http request being sent to the Cisco IMC REST API. + - Parameter C(path) is mutual exclusive with parameter C(content). + aliases: [ 'src', 'config_file' ] + content: + description: + - When used instead of C(path), sets the content of the API requests directly. + - This may be convenient to template simple requests, for anything complex use the M(template) module. + - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, + the Cisco IMC output is subsequently merged. + - Parameter C(content) is mutual exclusive with parameter C(path). + protocol: + description: + - Connection protocol to use. + default: https + choices: [ http, https ] + timeout: + description: + - The socket level timeout in seconds. + - This is the time that every single connection (every fragment) can spend. + If this C(timeout) is reached, the module will fail with a + C(Connection failure) indicating that C(The read operation timed out). + default: 60 + validate_certs: + description: + - If C(no), SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + type: bool + default: 'yes' +notes: +- The XML fragments don't need an authentication cookie, this is injected by the module automatically. +- The Cisco IMC XML output is being translated to JSON using the Cobra convention. +- Any configConfMo change requested has a return status of 'modified', even if there was no actual change + from the previous configuration. As a result, this module will always report a change on subsequent runs. + In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt. +- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout) + parameter. Some XML fragments can take longer than the default timeout. +- More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) +''' + +EXAMPLES = r''' +- name: Power down server + imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: no + content: | + + + + delegate_to: localhost + +- name: Configure IMC using multiple XML fragments + imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: no + timeout: 120 + content: | + + + + + + + + + + delegate_to: localhost + +- name: Enable PXE boot and power-cycle server + imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: no + content: | + + + + + + + + + + delegate_to: localhost + +- name: Reconfigure IMC to boot from storage + imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: no + content: | + + + + delegate_to: localhost + +- name: Add customer description to server + imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: no + content: | + + + + delegate_to: localhost + +- name: Disable HTTP and increase session timeout to max value 10800 secs + imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: no + timeout: 120 + content: | + + + + + + + + delegate_to: localhost +''' + +RETURN = r''' +aaLogin: + description: Cisco IMC XML output for the login, translated to JSON using Cobra convention + returned: success + type: dict + sample: | + "attributes": { + "cookie": "", + "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a", + "outPriv": "admin", + "outRefreshPeriod": "600", + "outSessionId": "114", + "outVersion": "2.0(13e)", + "response": "yes" + } +configConfMo: + description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention + returned: success + type: dict + sample: | +elapsed: + description: Elapsed time in seconds + returned: always + type: int + sample: 31 +response: + description: HTTP response message, including content length + returned: always + type: str + sample: OK (729 bytes) +status: + description: The HTTP response status code + returned: always + type: dict + sample: 200 +error: + description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention + returned: failed + type: dict + sample: | + "attributes": { + "cookie": "", + "errorCode": "ERR-xml-parse-error", + "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ", + "invocationResult": "594", + "response": "yes" + } +error_code: + description: Cisco IMC error code + returned: failed + type: str + sample: ERR-xml-parse-error +error_text: + description: Cisco IMC error message + returned: failed + type: str + sample: | + XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. +input: + description: RAW XML input sent to the Cisco IMC, causing the error + returned: failed + type: str + sample: | + +output: + description: RAW XML output received from the Cisco IMC, with error details + returned: failed + type: str + sample: > + +''' + +import atexit +import datetime +import itertools +import os +import traceback + +LXML_ETREE_IMP_ERR = None +try: + import lxml.etree + HAS_LXML_ETREE = True +except ImportError: + LXML_ETREE_IMP_ERR = traceback.format_exc() + HAS_LXML_ETREE = False + +XMLJSON_COBRA_IMP_ERR = None +try: + from xmljson import cobra + HAS_XMLJSON_COBRA = True +except ImportError: + XMLJSON_COBRA_IMP_ERR = traceback.format_exc() + HAS_XMLJSON_COBRA = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url + + +def imc_response(module, rawoutput, rawinput=''): + ''' Handle IMC returned data ''' + xmloutput = lxml.etree.fromstring(rawoutput) + result = cobra.data(xmloutput) + + # Handle errors + if xmloutput.get('errorCode') and xmloutput.get('errorDescr'): + if rawinput: + result['input'] = rawinput + result['output'] = rawoutput + result['error_code'] = xmloutput.get('errorCode') + result['error_text'] = xmloutput.get('errorDescr') + module.fail_json(msg='Request failed: %(error_text)s' % result, **result) + + return result + + +def logout(module, url, cookie, timeout): + ''' Perform a logout, if needed ''' + data = '' % (cookie, cookie) + resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout) + + +def merge(one, two): + ''' Merge two complex nested datastructures into one''' + if isinstance(one, dict) and isinstance(two, dict): + copy = dict(one) + # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) + copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) + return copy + + elif isinstance(one, list) and isinstance(two, list): + return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)] + + return one if two is None else two + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + hostname=dict(type='str', required=True, aliases=['host', 'ip']), + username=dict(type='str', default='admin', aliases=['user']), + password=dict(type='str', default='password', no_log=True), + content=dict(type='str'), + path=dict(type='path', aliases=['config_file', 'src']), + protocol=dict(type='str', default='https', choices=['http', 'https']), + timeout=dict(type='int', default=60), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + mutually_exclusive=[['content', 'path']], + ) + + if not HAS_LXML_ETREE: + module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + + if not HAS_XMLJSON_COBRA: + module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR) + + hostname = module.params['hostname'] + username = module.params['username'] + password = module.params['password'] + + content = module.params['content'] + path = module.params['path'] + + protocol = module.params['protocol'] + timeout = module.params['timeout'] + + result = dict( + failed=False, + changed=False, + ) + + # Report missing file + file_exists = False + if path: + if os.path.isfile(path): + file_exists = True + else: + module.fail_json(msg='Cannot find/access path:\n%s' % path) + + start = datetime.datetime.utcnow() + + # Perform login first + url = '%s://%s/nuova' % (protocol, hostname) + data = '' % (username, password) + resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) + if resp is None or auth['status'] != 200: + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) + result.update(imc_response(module, resp.read())) + + # Store cookie for future requests + try: + cookie = result['aaaLogin']['attributes']['outCookie'] + except Exception: + module.fail_json(msg='Could not find cookie in output', **result) + + # If we would not log out properly, we run out of sessions quickly + atexit.register(logout, module, url, cookie, timeout) + + # Prepare request data + if content: + rawdata = content + elif file_exists: + with open(path, 'r') as config_object: + rawdata = config_object.read() + + # Wrap the XML documents in a element + xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) + + # Handle each XML document separately in the same session + for xmldoc in list(xmldata): + if xmldoc.tag is lxml.etree.Comment: + continue + # Add cookie to XML + xmldoc.set('cookie', cookie) + data = lxml.etree.tostring(xmldoc) + + # Perform actual request + resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) + if resp is None or info['status'] != 200: + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) + + # Merge results with previous results + rawoutput = resp.read() + result = merge(result, imc_response(module, rawoutput, rawinput=data)) + result['response'] = info['msg'] + result['status'] = info['status'] + + # Check for any changes + # NOTE: Unfortunately IMC API always report status as 'modified' + xmloutput = lxml.etree.fromstring(rawoutput) + results = xmloutput.xpath('/configConfMo/outConfig/*/@status') + result['changed'] = ('modified' in results) + + # Report success + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/intersight/intersight_info.py b/plugins/modules/remote_management/intersight/intersight_info.py new file mode 100644 index 0000000000..0ffeb9b906 --- /dev/null +++ b/plugins/modules/remote_management/intersight/intersight_info.py @@ -0,0 +1,118 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: intersight_info +short_description: Gather information about Intersight +description: +- Gathers information about servers in L(Cisco Intersight,https://intersight.com). +- This module was called C(intersight_facts) before Ansible 2.9. The usage did not change. +extends_documentation_fragment: +- cisco.intersight.intersight + +options: + server_names: + description: + - Server names to retrieve information from. + - An empty list will return all servers. + type: list + required: yes +author: +- David Soper (@dsoper2) +- CiscoUcs (@CiscoUcs) +''' + +EXAMPLES = r''' +- name: Get info for all servers + intersight_info: + api_private_key: ~/Downloads/SecretKey.txt + api_key_id: 64612d300d0982/64612d300d0b00/64612d300d3650 + server_names: +- debug: + msg: "server name {{ item.Name }}, moid {{ item.Moid }}" + loop: "{{ intersight_servers }}" + when: intersight_servers is defined + +- name: Get info for servers by name + intersight_info: + api_private_key: ~/Downloads/SecretKey.txt + api_key_id: 64612d300d0982/64612d300d0b00/64612d300d3650 + server_names: + - SJC18-L14-UCS1-1 +- debug: + msg: "server moid {{ intersight_servers[0].Moid }}" + when: intersight_servers[0] is defined +''' + +RETURN = r''' +intersight_servers: + description: A list of Intersight Servers. See L(Cisco Intersight,https://intersight.com/apidocs) for details. + returned: always + type: complex + contains: + Name: + description: The name of the server. + returned: always + type: str + sample: SJC18-L14-UCS1-1 + Moid: + description: The unique identifier of this Managed Object instance. + returned: always + type: str + sample: 5978bea36ad4b000018d63dc +''' + +from ansible_collections.cisco.intersight.plugins.module_utils.remote_management.intersight import IntersightModule, intersight_argument_spec +from ansible.module_utils.basic import AnsibleModule + + +def get_servers(module, intersight): + query_list = [] + if module.params['server_names']: + for server in module.params['server_names']: + query_list.append("Name eq '%s'" % server) + query_str = ' or '.join(query_list) + options = { + 'http_method': 'get', + 'resource_path': '/compute/PhysicalSummaries', + 'query_params': { + '$filter': query_str, + '$top': 5000 + } + } + response_dict = intersight.call_api(**options) + + return response_dict.get('Results') + + +def main(): + argument_spec = intersight_argument_spec + argument_spec.update( + server_names=dict(type='list', required=True), + ) + + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + ) + if module._name == 'intersight_facts': + module.deprecate("The 'intersight_facts' module has been renamed to 'intersight_info'", version='2.13') + + intersight = IntersightModule(module) + + # one API call returning all requested servers + module.exit_json(intersight_servers=get_servers(module, intersight)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/ipmi/ipmi_boot.py b/plugins/modules/remote_management/ipmi/ipmi_boot.py new file mode 100644 index 0000000000..c6ad888715 --- /dev/null +++ b/plugins/modules/remote_management/ipmi/ipmi_boot.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipmi_boot +short_description: Management of order of boot devices +description: + - Use this module to manage order of boot devices +options: + name: + description: + - Hostname or ip address of the BMC. + required: true + port: + description: + - Remote RMCP port. + default: 623 + user: + description: + - Username to use to connect to the BMC. + required: true + password: + description: + - Password to connect to the BMC. + required: true + bootdev: + description: + - Set boot device to use on next reboot + required: true + choices: + - network -- Request network boot + - floppy -- Boot from floppy + - hd -- Boot from hard drive + - safe -- Boot from hard drive, requesting 'safe mode' + - optical -- boot from CD/DVD/BD drive + - setup -- Boot into setup utility + - default -- remove any IPMI directed boot device request + state: + description: + - Whether to ensure that boot devices is desired. + default: present + choices: + - present -- Request system turn on + - absent -- Request system turn on + persistent: + description: + - If set, ask that system firmware uses this device beyond next boot. + Be aware many systems do not honor this. + type: bool + default: 'no' + uefiboot: + description: + - If set, request UEFI boot explicitly. + Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. + In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. + type: bool + default: 'no' +requirements: + - "python >= 2.6" + - pyghmi +author: "Bulat Gaifullin (@bgaifullin) " +''' + +RETURN = ''' +bootdev: + description: The boot device name which will be used beyond next boot. + returned: success + type: str + sample: default +persistent: + description: If True, system firmware will use this device beyond next boot. + returned: success + type: bool + sample: false +uefimode: + description: If True, system firmware will use UEFI boot explicitly beyond next boot. + returned: success + type: bool + sample: false +''' + +EXAMPLES = ''' +# Ensure bootdevice is HD. +- ipmi_boot: + name: test.testdomain.com + user: admin + password: password + bootdev: hd + +# Ensure bootdevice is not Network +- ipmi_boot: + name: test.testdomain.com + user: admin + password: password + bootdev: network + state: absent +''' + +import traceback + +PYGHMI_IMP_ERR = None +try: + from pyghmi.ipmi import command +except ImportError: + PYGHMI_IMP_ERR = traceback.format_exc() + command = None + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + state=dict(default='present', choices=['present', 'absent']), + bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']), + persistent=dict(default=False, type='bool'), + uefiboot=dict(default=False, type='bool') + ), + supports_check_mode=True, + ) + + if command is None: + module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + bootdev = module.params['bootdev'] + persistent = module.params['persistent'] + uefiboot = module.params['uefiboot'] + request = dict() + + if state == 'absent' and bootdev == 'default': + module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port + ) + module.debug('ipmi instantiated - name: "%s"' % name) + current = ipmi_cmd.get_bootdev() + # uefimode may not supported by BMC, so use desired value as default + current.setdefault('uefimode', uefiboot) + if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): + request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) + elif state == 'absent' and current['bootdev'] == bootdev: + request = dict(bootdev='default') + else: + module.exit_json(changed=False, **current) + + if module.check_mode: + response = dict(bootdev=request['bootdev']) + else: + response = ipmi_cmd.set_bootdev(**request) + + if 'error' in response: + module.fail_json(msg=response['error']) + + if 'persist' in request: + response['persistent'] = request['persist'] + if 'uefiboot' in request: + response['uefimode'] = request['uefiboot'] + + module.exit_json(changed=True, **response) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/ipmi/ipmi_power.py b/plugins/modules/remote_management/ipmi/ipmi_power.py new file mode 100644 index 0000000000..d6b67de045 --- /dev/null +++ b/plugins/modules/remote_management/ipmi/ipmi_power.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ipmi_power +short_description: Power management for machine +description: + - Use this module for power management +options: + name: + description: + - Hostname or ip address of the BMC. + required: true + port: + description: + - Remote RMCP port. + default: 623 + user: + description: + - Username to use to connect to the BMC. + required: true + password: + description: + - Password to connect to the BMC. + required: true + state: + description: + - Whether to ensure that the machine in desired state. + required: true + choices: + - on -- Request system turn on + - off -- Request system turn off without waiting for OS to shutdown + - shutdown -- Have system request OS proper shutdown + - reset -- Request system reset without waiting for OS + - boot -- If system is off, then 'on', else 'reset' + timeout: + description: + - Maximum number of seconds before interrupt request. + default: 300 +requirements: + - "python >= 2.6" + - pyghmi +author: "Bulat Gaifullin (@bgaifullin) " +''' + +RETURN = ''' +powerstate: + description: The current power state of the machine. + returned: success + type: str + sample: on +''' + +EXAMPLES = ''' +# Ensure machine is powered on. +- ipmi_power: + name: test.testdomain.com + user: admin + password: password + state: on +''' + +import traceback + +PYGHMI_IMP_ERR = None +try: + from pyghmi.ipmi import command +except ImportError: + PYGHMI_IMP_ERR = traceback.format_exc() + command = None + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + timeout=dict(default=300, type='int'), + ), + supports_check_mode=True, + ) + + if command is None: + module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + timeout = module.params['timeout'] + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port + ) + module.debug('ipmi instantiated - name: "%s"' % name) + + current = ipmi_cmd.get_power() + if current['powerstate'] != state: + response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout) + changed = True + else: + response = current + changed = False + + if 'error' in response: + module.fail_json(msg=response['error']) + + module.exit_json(changed=changed, **response) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/lxca/lxca_cmms.py b/plugins/modules/remote_management/lxca/lxca_cmms.py new file mode 100644 index 0000000000..20672958f3 --- /dev/null +++ b/plugins/modules/remote_management/lxca/lxca_cmms.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + + +DOCUMENTATION = ''' +--- +author: + - Naval Patel (@navalkp) + - Prashant Bhosale (@prabhosa) +module: lxca_cmms +short_description: Custom module for lxca cmms inventory utility +description: + - This module returns/displays a inventory details of cmms + +options: + uuid: + description: + uuid of device, this is string with length greater than 16. + + command_options: + description: + options to filter nodes information + default: cmms + choices: + - cmms + - cmms_by_uuid + - cmms_by_chassis_uuid + + chassis: + description: + uuid of chassis, this is string with length greater than 16. + +extends_documentation_fragment: +- community.general.lxca_common + +''' + +EXAMPLES = ''' +# get all cmms info +- name: get nodes data from LXCA + lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + +# get specific cmms info by uuid +- name: get nodes data from LXCA + lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + uuid: "3C737AA5E31640CE949B10C129A8B01F" + command_options: cmms_by_uuid + +# get specific cmms info by chassis uuid +- name: get nodes data from LXCA + lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + chassis: "3C737AA5E31640CE949B10C129A8B01F" + command_options: cmms_by_chassis_uuid + +''' + +RETURN = r''' +result: + description: cmms detail from lxca + returned: success + type: dict + sample: + cmmList: + - machineType: '' + model: '' + type: 'CMM' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + - machineType: '' + model: '' + type: 'CMM' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + # Multiple cmms details +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +try: + from pylxca import cmms +except ImportError: + pass + + +UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.' +CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.' +SUCCESS_MSG = "Success %s result" + + +def _cmms(module, lxca_con): + return cmms(lxca_con) + + +def _cmms_by_uuid(module, lxca_con): + if not module.params['uuid']: + module.fail_json(msg=UUID_REQUIRED) + return cmms(lxca_con, module.params['uuid']) + + +def _cmms_by_chassis_uuid(module, lxca_con): + if not module.params['chassis']: + module.fail_json(msg=CHASSIS_UUID_REQUIRED) + return cmms(lxca_con, chassis=module.params['chassis']) + + +def setup_module_object(): + """ + this function merge argument spec and create ansible module object + :return: + """ + args_spec = dict(LXCA_COMMON_ARGS) + args_spec.update(INPUT_ARG_SPEC) + module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) + + return module + + +FUNC_DICT = { + 'cmms': _cmms, + 'cmms_by_uuid': _cmms_by_uuid, + 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid, +} + + +INPUT_ARG_SPEC = dict( + command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', + 'cmms_by_chassis_uuid']), + uuid=dict(default=None), + chassis=dict(default=None) +) + + +def execute_module(module): + """ + This function invoke commands + :param module: Ansible module object + """ + try: + with connection_object(module) as lxca_con: + result = FUNC_DICT[module.params['command_options']](module, lxca_con) + module.exit_json(changed=False, + msg=SUCCESS_MSG % module.params['command_options'], + result=result) + except Exception as exception: + error_msg = '; '.join((e) for e in exception.args) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def main(): + module = setup_module_object() + has_pylxca(module) + execute_module(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/lxca/lxca_nodes.py b/plugins/modules/remote_management/lxca/lxca_nodes.py new file mode 100644 index 0000000000..e909353d67 --- /dev/null +++ b/plugins/modules/remote_management/lxca/lxca_nodes.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + + +DOCUMENTATION = ''' +--- +author: + - Naval Patel (@navalkp) + - Prashant Bhosale (@prabhosa) +module: lxca_nodes +short_description: Custom module for lxca nodes inventory utility +description: + - This module returns/displays a inventory details of nodes + +options: + uuid: + description: + uuid of device, this is string with length greater than 16. + + command_options: + description: + options to filter nodes information + default: nodes + choices: + - nodes + - nodes_by_uuid + - nodes_by_chassis_uuid + - nodes_status_managed + - nodes_status_unmanaged + + chassis: + description: + uuid of chassis, this is string with length greater than 16. + +extends_documentation_fragment: +- community.general.lxca_common + +''' + +EXAMPLES = ''' +# get all nodes info +- name: get nodes data from LXCA + lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes + +# get specific nodes info by uuid +- name: get nodes data from LXCA + lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + uuid: "3C737AA5E31640CE949B10C129A8B01F" + command_options: nodes_by_uuid + +# get specific nodes info by chassis uuid +- name: get nodes data from LXCA + lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + chassis: "3C737AA5E31640CE949B10C129A8B01F" + command_options: nodes_by_chassis_uuid + +# get managed nodes +- name: get nodes data from LXCA + lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes_status_managed + +# get unmanaged nodes +- name: get nodes data from LXCA + lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes_status_unmanaged + +''' + +RETURN = r''' +result: + description: nodes detail from lxca + returned: always + type: dict + sample: + nodeList: + - machineType: '6241' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + - machineType: '8871' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + # Multiple nodes details +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +try: + from pylxca import nodes +except ImportError: + pass + + +UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.' +CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.' +SUCCESS_MSG = "Success %s result" + + +def _nodes(module, lxca_con): + return nodes(lxca_con) + + +def _nodes_by_uuid(module, lxca_con): + if not module.params['uuid']: + module.fail_json(msg=UUID_REQUIRED) + return nodes(lxca_con, module.params['uuid']) + + +def _nodes_by_chassis_uuid(module, lxca_con): + if not module.params['chassis']: + module.fail_json(msg=CHASSIS_UUID_REQUIRED) + return nodes(lxca_con, chassis=module.params['chassis']) + + +def _nodes_status_managed(module, lxca_con): + return nodes(lxca_con, status='managed') + + +def _nodes_status_unmanaged(module, lxca_con): + return nodes(lxca_con, status='unmanaged') + + +def setup_module_object(): + """ + this function merge argument spec and create ansible module object + :return: + """ + args_spec = dict(LXCA_COMMON_ARGS) + args_spec.update(INPUT_ARG_SPEC) + module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) + + return module + + +FUNC_DICT = { + 'nodes': _nodes, + 'nodes_by_uuid': _nodes_by_uuid, + 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid, + 'nodes_status_managed': _nodes_status_managed, + 'nodes_status_unmanaged': _nodes_status_unmanaged, +} + + +INPUT_ARG_SPEC = dict( + command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', + 'nodes_by_chassis_uuid', + 'nodes_status_managed', + 'nodes_status_unmanaged']), + uuid=dict(default=None), chassis=dict(default=None) +) + + +def execute_module(module): + """ + This function invoke commands + :param module: Ansible module object + """ + try: + with connection_object(module) as lxca_con: + result = FUNC_DICT[module.params['command_options']](module, lxca_con) + module.exit_json(changed=False, + msg=SUCCESS_MSG % module.params['command_options'], + result=result) + except Exception as exception: + error_msg = '; '.join(exception.args) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def main(): + module = setup_module_object() + has_pylxca(module) + execute_module(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py b/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py new file mode 100644 index 0000000000..4691dcbc7e --- /dev/null +++ b/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + +module: manageiq_alert_profiles + +short_description: Configuration of alert profiles for ManageIQ +extends_documentation_fragment: +- community.general.manageiq + +author: Elad Alfassa (@elad661) +description: + - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ. + +options: + state: + description: + - absent - alert profile should not exist, + - present - alert profile should exist, + choices: ['absent', 'present'] + default: 'present' + name: + description: + - The unique alert profile name in ManageIQ. + - Required when state is "absent" or "present". + resource_type: + description: + - The resource type for the alert profile in ManageIQ. Required when state is "present". + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', + 'ExtManagementSystem', 'MiddlewareServer'] + alerts: + description: + - List of alert descriptions to assign to this profile. + - Required if state is "present" + notes: + description: + - Optional notes for this profile + +''' + +EXAMPLES = ''' +- name: Add an alert profile to ManageIQ + manageiq_alert_profiles: + state: present + name: Test profile + resource_type: ContainerNode + alerts: + - Test Alert 01 + - Test Alert 02 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Delete an alert profile from ManageIQ + manageiq_alert_profiles: + state: absent + name: Test profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQAlertProfiles(object): + """ Object to execute alert profile management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url) + + def get_profiles(self): + """ Get all alert profiles from ManageIQ + """ + try: + response = self.client.get(self.url + '?expand=alert_definitions,resources') + except Exception as e: + self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e)) + return response.get('resources') or [] + + def get_alerts(self, alert_descriptions): + """ Get a list of alert hrefs from a list of alert descriptions + """ + alerts = [] + for alert_description in alert_descriptions: + alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", + description=alert_description) + alerts.append(alert['href']) + + return alerts + + def add_profile(self, profile): + """ Add a new alert profile to ManageIQ + """ + # find all alerts to add to the profile + # we do this first to fail early if one is missing. + alerts = self.get_alerts(profile['alerts']) + + # build the profile dict to send to the server + + profile_dict = dict(name=profile['name'], + description=profile['name'], + mode=profile['resource_type']) + if profile['notes']: + profile_dict['set_data'] = dict(notes=profile['notes']) + + # send it to the server + try: + result = self.client.post(self.url, resource=profile_dict, action="create") + except Exception as e: + self.module.fail_json(msg="Creating profile failed {error}".format(error=e)) + + # now that it has been created, we can assign the alerts + self.assign_or_unassign(result['results'][0], alerts, "assign") + + msg = "Profile {name} created successfully" + msg = msg.format(name=profile['name']) + return dict(changed=True, msg=msg) + + def delete_profile(self, profile): + """ Delete an alert profile from ManageIQ + """ + try: + self.client.post(profile['href'], action="delete") + except Exception as e: + self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e)) + + msg = "Successfully deleted profile {name}".format(name=profile['name']) + return dict(changed=True, msg=msg) + + def get_alert_href(self, alert): + """ Get an absolute href for an alert + """ + return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id']) + + def assign_or_unassign(self, profile, resources, action): + """ Assign or unassign alerts to profile, and validate the result. + """ + alerts = [dict(href=href) for href in resources] + + subcollection_url = profile['href'] + '/alert_definitions' + try: + result = self.client.post(subcollection_url, resources=alerts, action=action) + if len(result['results']) != len(alerts): + msg = "Failed to {action} alerts to profile '{name}'," +\ + "expected {expected} alerts to be {action}ed," +\ + "but only {changed} were {action}ed" + msg = msg.format(action=action, + name=profile['name'], + expected=len(alerts), + changed=result['results']) + self.module.fail_json(msg=msg) + except Exception as e: + msg = "Failed to {action} alerts to profile '{name}': {error}" + msg = msg.format(action=action, name=profile['name'], error=e) + self.module.fail_json(msg=msg) + + return result['results'] + + def update_profile(self, old_profile, desired_profile): + """ Update alert profile in ManageIQ + """ + changed = False + # we need to use client.get to query the alert definitions + old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions') + + # figure out which alerts we need to assign / unassign + # alerts listed by the user: + desired_alerts = set(self.get_alerts(desired_profile['alerts'])) + + # alert which currently exist in the profile + if 'alert_definitions' in old_profile: + # we use get_alert_href to have a direct href to the alert + existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']]) + else: + # no alerts in this profile + existing_alerts = set() + + to_add = list(desired_alerts - existing_alerts) + to_remove = list(existing_alerts - desired_alerts) + + # assign / unassign the alerts, if needed + + if to_remove: + self.assign_or_unassign(old_profile, to_remove, "unassign") + changed = True + if to_add: + self.assign_or_unassign(old_profile, to_add, "assign") + changed = True + + # update other properties + profile_dict = dict() + + if old_profile['mode'] != desired_profile['resource_type']: + # mode needs to be updated + profile_dict['mode'] = desired_profile['resource_type'] + + # check if notes need to be updated + old_notes = old_profile.get('set_data', {}).get('notes') + + if desired_profile['notes'] != old_notes: + profile_dict['set_data'] = dict(notes=desired_profile['notes']) + + if profile_dict: + # if we have any updated values + changed = True + try: + result = self.client.post(old_profile['href'], + resource=profile_dict, + action="edit") + except Exception as e: + msg = "Updating profile '{name}' failed: {error}" + msg = msg.format(name=old_profile['name'], error=e) + self.module.fail_json(msg=msg, result=result) + + if changed: + msg = "Profile {name} updated successfully".format(name=desired_profile['name']) + else: + msg = "No update needed for profile {name}".format(name=desired_profile['name']) + return dict(changed=changed, msg=msg) + + +def main(): + argument_spec = dict( + name=dict(type='str'), + resource_type=dict(type='str', choices=['Vm', + 'ContainerNode', + 'MiqServer', + 'Host', + 'Storage', + 'EmsCluster', + 'ExtManagementSystem', + 'MiddlewareServer']), + alerts=dict(type='list'), + notes=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule(argument_spec=argument_spec, + required_if=[('state', 'present', ['name', 'resource_type']), + ('state', 'absent', ['name'])]) + + state = module.params['state'] + name = module.params['name'] + + manageiq = ManageIQ(module) + manageiq_alert_profiles = ManageIQAlertProfiles(manageiq) + + existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles", + name=name) + + # we need to add or update the alert profile + if state == "present": + if not existing_profile: + # a profile with this name doesn't exist yet, let's create it + res_args = manageiq_alert_profiles.add_profile(module.params) + else: + # a profile with this name exists, we might need to update it + res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params) + + # this alert profile should not exist + if state == "absent": + # if we have an alert profile with this name, delete it + if existing_profile: + res_args = manageiq_alert_profiles.delete_profile(existing_profile) + else: + # This alert profile does not exist in ManageIQ, and that's okay + msg = "Alert profile '{name}' does not exist in ManageIQ" + msg = msg.format(name=name) + res_args = dict(changed=False, msg=msg) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_alerts.py b/plugins/modules/remote_management/manageiq/manageiq_alerts.py new file mode 100644 index 0000000000..fd5d9e5a72 --- /dev/null +++ b/plugins/modules/remote_management/manageiq/manageiq_alerts.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + +module: manageiq_alerts + +short_description: Configuration of alerts in ManageIQ +extends_documentation_fragment: +- community.general.manageiq + +author: Elad Alfassa (@elad661) (base on manageiq_user.py by Daniel Korn ) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + +module: manageiq_group + +short_description: Management of groups in ManageIQ. +extends_documentation_fragment: +- community.general.manageiq + +author: Evert Mulder (@evertmulder) +description: + - The manageiq_group module supports adding, updating and deleting groups in ManageIQ. +requirements: +- manageiq-client + +options: + state: + description: + - absent - group should not exist, present - group should be. + choices: ['absent', 'present'] + default: 'present' + description: + description: + - The group description. + required: true + default: null + role_id: + description: + - The the group role id + required: false + default: null + role: + description: + - The the group role name + - The C(role_id) has precedence over the C(role) when supplied. + required: false + default: null + tenant_id: + description: + - The tenant for the group identified by the tenant id. + required: false + default: null + tenant: + description: + - The tenant for the group identified by the tenant name. + - The C(tenant_id) has precedence over the C(tenant) when supplied. + - Tenant names are case sensitive. + required: false + default: null + managed_filters: + description: The tag values per category + type: dict + required: false + default: null + managed_filters_merge_mode: + description: + - In merge mode existing categories are kept or updated, new categories are added. + - In replace mode all categories will be replaced with the supplied C(managed_filters). + choices: [ merge, replace ] + default: replace + belongsto_filters: + description: A list of strings with a reference to the allowed host, cluster or folder + type: list + required: false + default: null + belongsto_filters_merge_mode: + description: + - In merge mode existing settings are merged with the supplied C(belongsto_filters). + - In replace mode current values are replaced with the supplied C(belongsto_filters). + choices: [ merge, replace ] + default: replace +''' + +EXAMPLES = ''' +- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant' + manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant: 'my_tenant' + manageiq_connection: + url: 'https://manageiq_server' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4 + manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant_id: 4 + manageiq_connection: + url: 'https://manageiq_server' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: + - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. + - Apply 3 prov_max_cpu and 2 department tags to the group. + - Limit access to a cluster for the group. + manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant: my_tenant + managed_filters: + prov_max_cpu: + - '1' + - '2' + - '4' + department: + - defense + - engineering + managed_filters_merge_mode: replace + belongsto_filters: + - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" + belongsto_filters_merge_mode: merge + manageiq_connection: + url: 'https://manageiq_server' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Delete a group in ManageIQ + manageiq_group: + state: 'absent' + description: 'MyGroup-user' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + +- name: Delete a group in ManageIQ using a token + manageiq_group: + state: 'absent' + description: 'MyGroup-user' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' +''' + +RETURN = ''' +group: + description: The group. + returned: success + type: complex + contains: + description: + description: The group description + returned: success + type: str + id: + description: The group id + returned: success + type: int + group_type: + description: The group type, system or user + returned: success + type: str + role: + description: The group role name + returned: success + type: str + tenant: + description: The group tenant name + returned: success + type: str + managed_filters: + description: The tag values per category + returned: success + type: dict + belongsto_filters: + description: A list of strings with a reference to the allowed host, cluster or folder + returned: success + type: list + created_on: + description: Group creation date + returned: success + type: str + sample: "2018-08-12T08:37:55+00:00" + updated_on: + description: Group update date + returned: success + type: int + sample: "2018-08-12T08:37:55+00:00" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQgroup(object): + """ + Object to execute group management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def group(self, description): + """ Search for group object by description. + Returns: + the group, or None if group was not found. + """ + groups = self.client.collections.groups.find_by(description=description) + if len(groups) == 0: + return None + else: + return groups[0] + + def tenant(self, tenant_id, tenant_name): + """ Search for tenant entity by name or id + Returns: + the tenant entity, None if no id or name was supplied + """ + + if tenant_id: + tenant = self.client.get_entity('tenants', tenant_id) + if not tenant: + self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id)) + return tenant + else: + if tenant_name: + tenant_res = self.client.collections.tenants.find_by(name=tenant_name) + if not tenant_res: + self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name) + if len(tenant_res) > 1: + self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name) + tenant = tenant_res[0] + return tenant + else: + # No tenant name or tenant id supplied + return None + + def role(self, role_id, role_name): + """ Search for a role object by name or id. + Returns: + the role entity, None no id or name was supplied + + the role, or send a module Fail signal if role not found. + """ + if role_id: + role = self.client.get_entity('roles', role_id) + if not role: + self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id)) + return role + else: + if role_name: + role_res = self.client.collections.roles.find_by(name=role_name) + if not role_res: + self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name) + if len(role_res) > 1: + self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name) + return role_res[0] + else: + # No role name or role id supplied + return None + + @staticmethod + def merge_dict_values(norm_current_values, norm_updated_values): + """ Create an merged update object for manageiq group filters. + + The input dict contain the tag values per category. + If the new values contain the category, all tags for that category are replaced + If the new values do not contain the category, the existing tags are kept + + Returns: + the nested array with the merged values, used in the update post body + """ + + # If no updated values are supplied, in merge mode, the original values must be returned + # otherwise the existing tag filters will be removed. + if norm_current_values and (not norm_updated_values): + return norm_current_values + + # If no existing tag filters exist, use the user supplied values + if (not norm_current_values) and norm_updated_values: + return norm_updated_values + + # start with norm_current_values's keys and values + res = norm_current_values.copy() + # replace res with norm_updated_values's keys and values + res.update(norm_updated_values) + return res + + def delete_group(self, group): + """ Deletes a group from manageiq. + + Returns: + a dict of: + changed: boolean indicating if the entity was updated. + msg: a short message describing the operation executed. + """ + try: + url = '%s/groups/%s' % (self.api_url, group['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e))) + + if result['success'] is False: + self.module.fail_json(msg=result['message']) + + return dict( + changed=True, + msg="deleted group %s with id %s" % (group['description'], group['id'])) + + def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode): + """ Edit a manageiq group. + + Returns: + a dict of: + changed: boolean indicating if the entity was updated. + msg: a short message describing the operation executed. + """ + + if role or norm_managed_filters or belongsto_filters: + group.reload(attributes=['miq_user_role_name', 'entitlement']) + + try: + current_role = group['miq_user_role_name'] + except AttributeError: + current_role = None + + changed = False + resource = {} + + if description and group['description'] != description: + resource['description'] = description + changed = True + + if tenant and group['tenant_id'] != tenant['id']: + resource['tenant'] = dict(id=tenant['id']) + changed = True + + if role and current_role != role['name']: + resource['role'] = dict(id=role['id']) + changed = True + + if norm_managed_filters or belongsto_filters: + + # Only compare if filters are supplied + entitlement = group['entitlement'] + + if 'filters' not in entitlement: + # No existing filters exist, use supplied filters + managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + changed = True + else: + current_filters = entitlement['filters'] + new_filters = self.edit_group_edit_filters(current_filters, + norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode) + if new_filters: + resource['filters'] = new_filters + changed = True + + if not changed: + return dict( + changed=False, + msg="group %s is not changed." % group['description']) + + # try to update group + try: + self.client.post(group['href'], action='edit', resource=resource) + changed = True + except Exception as e: + self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e))) + + return dict( + changed=changed, + msg="successfully updated the group %s with id %s" % (group['description'], group['id'])) + + def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode): + """ Edit a manageiq group filters. + + Returns: + None if no the group was not updated + If the group was updated the post body part for updating the group + """ + filters_updated = False + new_filters_resource = {} + + current_belongsto_set = current_filters.get('belongsto', set()) + + if belongsto_filters: + new_belongsto_set = set(belongsto_filters) + else: + new_belongsto_set = set() + + if current_belongsto_set == new_belongsto_set: + new_filters_resource['belongsto'] = current_filters['belongsto'] + else: + if belongsto_filters_merge_mode == 'merge': + current_belongsto_set.update(new_belongsto_set) + new_filters_resource['belongsto'] = list(current_belongsto_set) + else: + new_filters_resource['belongsto'] = list(new_belongsto_set) + filters_updated = True + + # Process belongsto managed filter tags + # The input is in the form dict with keys are the categories and the tags are supplied string array + # ManageIQ, the current_managed, uses an array of arrays. One array of categories. + # We normalize the user input from a dict with arrays to a dict of sorted arrays + # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare + norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters) + + if norm_current_filters == norm_managed_filters: + if 'managed' in current_filters: + new_filters_resource['managed'] = current_filters['managed'] + else: + if managed_filters_merge_mode == 'merge': + merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters) + new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict) + else: + new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + filters_updated = True + + if not filters_updated: + return None + + return new_filters_resource + + def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters): + """ Creates the group in manageiq. + + Returns: + the created group id, name, created_on timestamp, + updated_on timestamp. + """ + # check for required arguments + for key, value in dict(description=description).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % key) + + url = '%s/groups' % self.api_url + + resource = {'description': description} + + if role is not None: + resource['role'] = dict(id=role['id']) + + if tenant is not None: + resource['tenant'] = dict(id=tenant['id']) + + if norm_managed_filters or belongsto_filters: + managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + + try: + result = self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e))) + + return dict( + changed=True, + msg="successfully created group %s" % description, + group_id=result['results'][0]['id'] + ) + + @staticmethod + def normalized_managed_tag_filters_to_miq(norm_managed_filters): + if not norm_managed_filters: + return None + + return list(norm_managed_filters.values()) + + @staticmethod + def manageiq_filters_to_sorted_dict(current_filters): + current_managed_filters = current_filters.get('managed') + if not current_managed_filters: + return None + + res = {} + for tag_list in current_managed_filters: + tag_list.sort() + key = tag_list[0].split('/')[2] + res[key] = tag_list + + return res + + @staticmethod + def normalize_user_managed_filters_to_sorted_dict(managed_filters, module): + if not managed_filters: + return None + + res = {} + for cat_key in managed_filters: + cat_array = [] + if not isinstance(managed_filters[cat_key], list): + module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key)) + for tags in managed_filters[cat_key]: + miq_managed_tag = "/managed/" + cat_key + "/" + tags + cat_array.append(miq_managed_tag) + # Do not add empty categories. ManageIQ will remove all categories that are not supplied + if cat_array: + cat_array.sort() + res[cat_key] = cat_array + return res + + @staticmethod + def create_result_group(group): + """ Creates the ansible result object from a manageiq group entity + + Returns: + a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on + """ + try: + role_name = group['miq_user_role_name'] + except AttributeError: + role_name = None + + managed_filters = None + belongsto_filters = None + if 'filters' in group['entitlement']: + filters = group['entitlement']['filters'] + belongsto_filters = filters.get('belongsto') + group_managed_filters = filters.get('managed') + if group_managed_filters: + managed_filters = {} + for tag_list in group_managed_filters: + key = tag_list[0].split('/')[2] + tags = [] + for t in tag_list: + tags.append(t.split('/')[3]) + managed_filters[key] = tags + + return dict( + id=group['id'], + description=group['description'], + role=role_name, + tenant=group['tenant']['name'], + managed_filters=managed_filters, + belongsto_filters=belongsto_filters, + group_type=group['group_type'], + created_on=group['created_on'], + updated_on=group['updated_on'], + ) + + +def main(): + argument_spec = dict( + description=dict(required=True, type='str'), + state=dict(choices=['absent', 'present'], default='present'), + role_id=dict(required=False, type='int'), + role=dict(required=False, type='str'), + tenant_id=dict(required=False, type='int'), + tenant=dict(required=False, type='str'), + managed_filters=dict(required=False, type='dict'), + managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), + belongsto_filters=dict(required=False, type='list', elements='str'), + belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + description = module.params['description'] + state = module.params['state'] + role_id = module.params['role_id'] + role_name = module.params['role'] + tenant_id = module.params['tenant_id'] + tenant_name = module.params['tenant'] + managed_filters = module.params['managed_filters'] + managed_filters_merge_mode = module.params['managed_filters_merge_mode'] + belongsto_filters = module.params['belongsto_filters'] + belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode'] + + manageiq = ManageIQ(module) + manageiq_group = ManageIQgroup(manageiq) + + group = manageiq_group.group(description) + + # group should not exist + if state == "absent": + # if we have a group, delete it + if group: + res_args = manageiq_group.delete_group(group) + # if we do not have a group, nothing to do + else: + res_args = dict( + changed=False, + msg="group '%s' does not exist in manageiq" % description) + + # group should exist + if state == "present": + + tenant = manageiq_group.tenant(tenant_id, tenant_name) + role = manageiq_group.role(role_id, role_name) + norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module) + # if we have a group, edit it + if group: + res_args = manageiq_group.edit_group(group, description, role, tenant, + norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode) + + # if we do not have a group, create it + else: + res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters) + group = manageiq.client.get_entity('groups', res_args['group_id']) + + group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement']) + res_args['group'] = manageiq_group.create_result_group(group) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_policies.py b/plugins/modules/remote_management/manageiq/manageiq_policies.py new file mode 100644 index 0000000000..aa8585858b --- /dev/null +++ b/plugins/modules/remote_management/manageiq/manageiq_policies.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2017, Daniel Korn +# (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' + +module: manageiq_policies + +short_description: Management of resource policy_profiles in ManageIQ. +extends_documentation_fragment: +- community.general.manageiq + +author: Daniel Korn (@dkorn) +description: + - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ. + +options: + state: + description: + - absent - policy_profiles should not exist, + - present - policy_profiles should exist, + - list - list current policy_profiles and policies. + choices: ['absent', 'present', 'list'] + default: 'present' + policy_profiles: + description: + - list of dictionaries, each includes the policy_profile 'name' key. + - required if state is present or absent. + resource_type: + description: + - the type of the resource to which the profile should be [un]assigned + required: true + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', + 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] + resource_name: + description: + - the name of the resource to which the profile should be [un]assigned + required: true +''' + +EXAMPLES = ''' +- name: Assign new policy_profile for a provider in ManageIQ + manageiq_policies: + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Unassign a policy_profile for a provider in ManageIQ + manageiq_policies: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: List current policy_profile and policies for a provider in ManageIQ + manageiq_policies: + state: list + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False +''' + +RETURN = ''' +manageiq_policies: + description: + - List current policy_profile and policies for a provider in ManageIQ + returned: always + type: dict + sample: '{ + "changed": false, + "profiles": [ + { + "policies": [ + { + "active": true, + "description": "OpenSCAP", + "name": "openscap policy" + }, + { + "active": true, + "description": "Analyse incoming container images", + "name": "analyse incoming container images" + }, + { + "active": true, + "description": "Schedule compliance after smart state analysis", + "name": "schedule compliance after smart state analysis" + } + ], + "profile_description": "OpenSCAP profile", + "profile_name": "openscap profile" + } + ] + }' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +class ManageIQPolicies(object): + """ + Object to execute policies management operations of manageiq resources. + """ + + def __init__(self, manageiq, resource_type, resource_id): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + self.resource_type = resource_type + self.resource_id = resource_id + self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( + api_url=self.api_url, + resource_type=resource_type, + resource_id=resource_id) + + def query_profile_href(self, profile): + """ Add or Update the policy_profile href field + + Example: + {name: STR, ...} => {name: STR, href: STR} + """ + resource = self.manageiq.find_collection_resource_or_fail( + "policy_profiles", **profile) + return dict(name=profile['name'], href=resource['href']) + + def query_resource_profiles(self): + """ Returns a set of the profile objects objects assigned to the resource + """ + url = '{resource_url}/policy_profiles?expand=resources' + try: + response = self.client.get(url.format(resource_url=self.resource_url)) + except Exception as e: + msg = "Failed to query {resource_type} policies: {error}".format( + resource_type=self.resource_type, + error=e) + self.module.fail_json(msg=msg) + + resources = response.get('resources', []) + + # clean the returned rest api profile object to look like: + # {profile_name: STR, profile_description: STR, policies: ARR} + profiles = [self.clean_profile_object(profile) for profile in resources] + + return profiles + + def query_profile_policies(self, profile_id): + """ Returns a set of the policy objects assigned to the resource + """ + url = '{api_url}/policy_profiles/{profile_id}?expand=policies' + try: + response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id)) + except Exception as e: + msg = "Failed to query {resource_type} policies: {error}".format( + resource_type=self.resource_type, + error=e) + self.module.fail_json(msg=msg) + + resources = response.get('policies', []) + + # clean the returned rest api policy object to look like: + # {name: STR, description: STR, active: BOOL} + policies = [self.clean_policy_object(policy) for policy in resources] + + return policies + + def clean_policy_object(self, policy): + """ Clean a policy object to have human readable form of: + { + name: STR, + description: STR, + active: BOOL + } + """ + name = policy.get('name') + description = policy.get('description') + active = policy.get('active') + + return dict( + name=name, + description=description, + active=active) + + def clean_profile_object(self, profile): + """ Clean a profile object to have human readable form of: + { + profile_name: STR, + profile_description: STR, + policies: ARR + } + """ + profile_id = profile['id'] + name = profile.get('name') + description = profile.get('description') + policies = self.query_profile_policies(profile_id) + + return dict( + profile_name=name, + profile_description=description, + policies=policies) + + def profiles_to_update(self, profiles, action): + """ Create a list of policies we need to update in ManageIQ. + + Returns: + Whether or not a change took place and a message describing the + operation executed. + """ + profiles_to_post = [] + assigned_profiles = self.query_resource_profiles() + + # make a list of assigned full profile names strings + # e.g. ['openscap profile', ...] + assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles]) + + for profile in profiles: + assigned = profile.get('name') in assigned_profiles_set + + if (action == 'unassign' and assigned) or (action == 'assign' and not assigned): + # add/update the policy profile href field + # {name: STR, ...} => {name: STR, href: STR} + profile = self.query_profile_href(profile) + profiles_to_post.append(profile) + + return profiles_to_post + + def assign_or_unassign_profiles(self, profiles, action): + """ Perform assign/unassign action + """ + # get a list of profiles needed to be changed + profiles_to_post = self.profiles_to_update(profiles, action) + if not profiles_to_post: + return dict( + changed=False, + msg="Profiles {profiles} already {action}ed, nothing to do".format( + action=action, + profiles=profiles)) + + # try to assign or unassign profiles to resource + url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url) + try: + response = self.client.post(url, action=action, resources=profiles_to_post) + except Exception as e: + msg = "Failed to {action} profile: {error}".format( + action=action, + error=e) + self.module.fail_json(msg=msg) + + # check all entities in result to be successful + for result in response['results']: + if not result['success']: + msg = "Failed to {action}: {message}".format( + action=action, + message=result['message']) + self.module.fail_json(msg=msg) + + # successfully changed all needed profiles + return dict( + changed=True, + msg="Successfully {action}ed profiles: {profiles}".format( + action=action, + profiles=profiles)) + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} + argument_spec = dict( + policy_profiles=dict(type='list'), + resource_name=dict(required=True, type='str'), + resource_type=dict(required=True, type='str', + choices=manageiq_entities().keys()), + state=dict(required=False, type='str', + choices=['present', 'absent', 'list'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['policy_profiles']), + ('state', 'absent', ['policy_profiles']) + ], + ) + + policy_profiles = module.params['policy_profiles'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] + + manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id) + + if action == 'list': + # return a list of current profiles for this object + current_profiles = manageiq_policies.query_resource_profiles() + res_args = dict(changed=False, profiles=current_profiles) + else: + # assign or unassign the profiles + res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_provider.py b/plugins/modules/remote_management/manageiq/manageiq_provider.py new file mode 100644 index 0000000000..0131971614 --- /dev/null +++ b/plugins/modules/remote_management/manageiq/manageiq_provider.py @@ -0,0 +1,894 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2017, Daniel Korn +# (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +module: manageiq_provider +short_description: Management of provider in ManageIQ. +extends_documentation_fragment: +- community.general.manageiq + +author: Daniel Korn (@dkorn) +description: + - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ. + +options: + state: + description: + - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed + choices: ['absent', 'present', 'refresh'] + default: 'present' + name: + description: The provider's name. + required: true + type: + description: The provider's type. + required: true + choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE'] + zone: + description: The ManageIQ zone name that will manage the provider. + default: 'default' + provider_region: + description: The provider region name to connect to (e.g. AWS region for Amazon). + host_default_vnc_port_start: + description: The first port in the host VNC range. defaults to None. + host_default_vnc_port_end: + description: The last port in the host VNC range. defaults to None. + subscription: + description: Microsoft Azure subscription ID. defaults to None. + project: + description: Google Compute Engine Project ID. defaults to None. + azure_tenant_id: + description: Tenant ID. defaults to None. + aliases: [ keystone_v3_domain_id ] + tenant_mapping_enabled: + type: bool + default: 'no' + description: Whether to enable mapping of existing tenants. defaults to False. + api_version: + description: The OpenStack Keystone API version. defaults to None. + choices: ['v2', 'v3'] + + provider: + description: Default endpoint connection information, required if state is true. + suboptions: + hostname: + description: The provider's api hostname. + required: true + port: + description: The provider's api port. + userid: + description: Provider's api endpoint authentication userid. defaults to None. + password: + description: Provider's api endpoint authentication password. defaults to None. + auth_key: + description: Provider's api endpoint authentication bearer token. defaults to None. + validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + type: bool + default: 'yes' + security_protocol: + description: How SSL certificates should be used for HTTPS requests. defaults to None. + choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] + certificate_authority: + description: The CA bundle string with custom certificates. defaults to None. + + metrics: + description: Metrics endpoint connection information. + suboptions: + hostname: + description: The provider's api hostname. + required: true + port: + description: The provider's api port. + userid: + description: Provider's api endpoint authentication userid. defaults to None. + password: + description: Provider's api endpoint authentication password. defaults to None. + auth_key: + description: Provider's api endpoint authentication bearer token. defaults to None. + validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + type: bool + default: 'yes' + security_protocol: + choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] + description: How SSL certificates should be used for HTTPS requests. defaults to None. + certificate_authority: + description: The CA bundle string with custom certificates. defaults to None. + path: + description: Database name for oVirt metrics. Defaults to ovirt_engine_history. + default: ovirt_engine_history + + alerts: + description: Alerts endpoint connection information. + suboptions: + hostname: + description: The provider's api hostname. + required: true + port: + description: The provider's api port. + userid: + description: Provider's api endpoint authentication userid. defaults to None. + password: + description: Provider's api endpoint authentication password. defaults to None. + auth_key: + description: Provider's api endpoint authentication bearer token. defaults to None. + validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + default: true + security_protocol: + choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation'] + description: How SSL certificates should be used for HTTPS requests. defaults to None. + certificate_authority: + description: The CA bundle string with custom certificates. defaults to None. + + ssh_keypair: + description: SSH key pair used for SSH connections to all hosts in this provider. + suboptions: + hostname: + description: Director hostname. + required: true + userid: + description: SSH username. + auth_key: + description: SSH private key. +''' + +EXAMPLES = ''' +- name: Create a new provider in ManageIQ ('Hawkular' metrics) + manageiq_provider: + name: 'EngLab' + type: 'OpenShift' + state: 'present' + provider: + auth_key: 'topSecret' + hostname: 'example.com' + port: 8443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + auth_key: 'topSecret' + role: 'hawkular' + hostname: 'example.com' + port: 443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1:80' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics) + manageiq_provider: + name: 'EngLab' + type: 'Openshift' + state: 'present' + provider: + auth_key: 'topSecret' + hostname: 'next.example.com' + port: 8443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + auth_key: 'topSecret' + hostname: 'next.example.com' + port: 443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Delete a provider in ManageIQ + manageiq_provider: + name: 'EngLab' + type: 'Openshift' + state: 'absent' + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Create a new Amazon provider in ManageIQ using token authentication + manageiq_provider: + name: 'EngAmazon' + type: 'Amazon' + state: 'present' + provider: + hostname: 'amazon.example.com' + userid: 'hello' + password: 'world' + manageiq_connection: + url: 'https://127.0.0.1' + token: 'VeryLongToken' + validate_certs: true + + +- name: Create a new oVirt provider in ManageIQ + manageiq_provider: + name: 'RHEV' + type: 'oVirt' + state: 'present' + provider: + hostname: 'rhev01.example.com' + userid: 'admin@internal' + password: 'password' + validate_certs: true + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + hostname: 'metrics.example.com' + path: 'ovirt_engine_history' + userid: 'user_id_metrics' + password: 'password_metrics' + validate_certs: true + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + +- name: Create a new VMware provider in ManageIQ + manageiq_provider: + name: 'EngVMware' + type: 'VMware' + state: 'present' + provider: + hostname: 'vcenter.example.com' + host_default_vnc_port_start: 5800 + host_default_vnc_port_end: 5801 + userid: 'root' + password: 'password' + manageiq_connection: + url: 'https://127.0.0.1' + token: 'VeryLongToken' + validate_certs: true + +- name: Create a new Azure provider in ManageIQ + manageiq_provider: + name: 'EngAzure' + type: 'Azure' + provider_region: 'northeurope' + subscription: 'e272bd74-f661-484f-b223-88dd128a4049' + azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048' + state: 'present' + provider: + hostname: 'azure.example.com' + userid: 'e272bd74-f661-484f-b223-88dd128a4049' + password: 'password' + manageiq_connection: + url: 'https://cf-6af0.rhpds.opentlc.com' + username: 'admin' + password: 'password' + validate_certs: false + +- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair + manageiq_provider: + name: 'EngDirector' + type: 'Director' + api_version: 'v3' + state: 'present' + provider: + hostname: 'director.example.com' + userid: 'admin' + password: 'password' + security_protocol: 'ssl-with-validation' + validate_certs: 'true' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + ssh_keypair: + hostname: director.example.com + userid: heat-admin + auth_key: 'SecretSSHPrivateKey' + +- name: Create a new OpenStack provider in ManageIQ with amqp metrics + manageiq_provider: + name: 'EngOpenStack' + type: 'OpenStack' + api_version: 'v3' + state: 'present' + provider_region: 'europe' + tenant_mapping_enabled: 'False' + keystone_v3_domain_id: 'mydomain' + provider: + hostname: 'openstack.example.com' + userid: 'admin' + password: 'password' + security_protocol: 'ssl-with-validation' + validate_certs: 'true' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + role: amqp + hostname: 'amqp.example.com' + security_protocol: 'non-ssl' + port: 5666 + userid: admin + password: password + + +- name: Create a new GCE provider in ManageIQ + manageiq_provider: + name: 'EngGoogle' + type: 'GCE' + provider_region: 'europe-west1' + project: 'project1' + state: 'present' + provider: + hostname: 'gce.example.com' + auth_key: 'google_json_key' + validate_certs: 'false' +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +def supported_providers(): + return dict( + Openshift=dict( + class_name='ManageIQ::Providers::Openshift::ContainerManager', + authtype='bearer', + default_role='default', + metrics_role='prometheus', + alerts_role='prometheus_alerts', + ), + Amazon=dict( + class_name='ManageIQ::Providers::Amazon::CloudManager', + ), + oVirt=dict( + class_name='ManageIQ::Providers::Redhat::InfraManager', + default_role='default', + metrics_role='metrics', + ), + VMware=dict( + class_name='ManageIQ::Providers::Vmware::InfraManager', + ), + Azure=dict( + class_name='ManageIQ::Providers::Azure::CloudManager', + ), + Director=dict( + class_name='ManageIQ::Providers::Openstack::InfraManager', + ssh_keypair_role="ssh_keypair" + ), + OpenStack=dict( + class_name='ManageIQ::Providers::Openstack::CloudManager', + ), + GCE=dict( + class_name='ManageIQ::Providers::Google::CloudManager', + ), + ) + + +def endpoint_list_spec(): + return dict( + provider=dict(type='dict', options=endpoint_argument_spec()), + metrics=dict(type='dict', options=endpoint_argument_spec()), + alerts=dict(type='dict', options=endpoint_argument_spec()), + ssh_keypair=dict(type='dict', options=endpoint_argument_spec()), + ) + + +def endpoint_argument_spec(): + return dict( + role=dict(), + hostname=dict(required=True), + port=dict(type='int'), + validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), + certificate_authority=dict(), + security_protocol=dict( + choices=[ + 'ssl-with-validation', + 'ssl-with-validation-custom-ca', + 'ssl-without-validation', + 'non-ssl', + ], + ), + userid=dict(), + password=dict(no_log=True), + auth_key=dict(no_log=True), + subscription=dict(no_log=True), + project=dict(), + uid_ems=dict(), + path=dict(), + ) + + +def delete_nulls(h): + """ Remove null entries from a hash + + Returns: + a hash without nulls + """ + if isinstance(h, list): + return map(delete_nulls, h) + if isinstance(h, dict): + return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None) + + return h + + +class ManageIQProvider(object): + """ + Object to execute provider management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def class_name_to_type(self, class_name): + """ Convert class_name to type + + Returns: + the type + """ + out = [k for k, v in supported_providers().items() if v['class_name'] == class_name] + if len(out) == 1: + return out[0] + + return None + + def zone_id(self, name): + """ Search for zone id by zone name. + + Returns: + the zone id, or send a module Fail signal if zone not found. + """ + zone = self.manageiq.find_collection_resource_by('zones', name=name) + if not zone: # zone doesn't exist + self.module.fail_json( + msg="zone %s does not exist in manageiq" % (name)) + + return zone['id'] + + def provider(self, name): + """ Search for provider object by name. + + Returns: + the provider, or None if provider not found. + """ + return self.manageiq.find_collection_resource_by('providers', name=name) + + def build_connection_configurations(self, provider_type, endpoints): + """ Build "connection_configurations" objects from + requested endpoints provided by user + + Returns: + the user requested provider endpoints list + """ + connection_configurations = [] + endpoint_keys = endpoint_list_spec().keys() + provider_defaults = supported_providers().get(provider_type, {}) + + # get endpoint defaults + endpoint = endpoints.get('provider') + default_auth_key = endpoint.get('auth_key') + + # build a connection_configuration object for each endpoint + for endpoint_key in endpoint_keys: + endpoint = endpoints.get(endpoint_key) + if endpoint: + # get role and authtype + role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default') + if role == 'default': + authtype = provider_defaults.get('authtype') or role + else: + authtype = role + + # set a connection_configuration + connection_configurations.append({ + 'endpoint': { + 'role': role, + 'hostname': endpoint.get('hostname'), + 'port': endpoint.get('port'), + 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)], + 'security_protocol': endpoint.get('security_protocol'), + 'certificate_authority': endpoint.get('certificate_authority'), + 'path': endpoint.get('path'), + }, + 'authentication': { + 'authtype': authtype, + 'userid': endpoint.get('userid'), + 'password': endpoint.get('password'), + 'auth_key': endpoint.get('auth_key') or default_auth_key, + } + }) + + return connection_configurations + + def delete_provider(self, provider): + """ Deletes a provider from manageiq. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/providers/%s' % (self.api_url, provider['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e))) + + return dict(changed=True, msg=result['message']) + + def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version): + """ Edit a provider from manageiq. + + Returns: + a short message describing the operation executed. + """ + url = '%s/providers/%s' % (self.api_url, provider['id']) + + resource = dict( + name=name, + zone={'id': zone_id}, + provider_region=provider_region, + connection_configurations=endpoints, + host_default_vnc_port_start=host_default_vnc_port_start, + host_default_vnc_port_end=host_default_vnc_port_end, + subscription=subscription, + project=project, + uid_ems=uid_ems, + tenant_mapping_enabled=tenant_mapping_enabled, + api_version=api_version, + ) + + # NOTE: we do not check for diff's between requested and current + # provider, we always submit endpoints with password or auth_keys, + # since we can not compare with current password or auth_key, + # every edit request is sent to ManageIQ API without comparing + # it to current state. + + # clean nulls, we do not send nulls to the api + resource = delete_nulls(resource) + + # try to update provider + try: + result = self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e))) + + return dict( + changed=True, + msg="successfully updated the provider %s: %s" % (provider['name'], result)) + + def create_provider(self, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version): + """ Creates the provider in manageiq. + + Returns: + a short message describing the operation executed. + """ + resource = dict( + name=name, + zone={'id': zone_id}, + provider_region=provider_region, + host_default_vnc_port_start=host_default_vnc_port_start, + host_default_vnc_port_end=host_default_vnc_port_end, + subscription=subscription, + project=project, + uid_ems=uid_ems, + tenant_mapping_enabled=tenant_mapping_enabled, + api_version=api_version, + connection_configurations=endpoints, + ) + + # clean nulls, we do not send nulls to the api + resource = delete_nulls(resource) + + # try to create a new provider + try: + url = '%s/providers' % (self.api_url) + result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource) + except Exception as e: + self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="successfully created the provider %s: %s" % (name, result['results'])) + + def refresh(self, provider, name): + """ Trigger provider refresh. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/providers/%s' % (self.api_url, provider['id']) + result = self.client.post(url, action='refresh') + except Exception as e: + self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="refreshing provider %s" % name) + + +def main(): + zone_id = None + endpoints = [] + argument_spec = dict( + state=dict(choices=['absent', 'present', 'refresh'], default='present'), + name=dict(required=True), + zone=dict(default='default'), + provider_region=dict(), + host_default_vnc_port_start=dict(), + host_default_vnc_port_end=dict(), + subscription=dict(), + project=dict(), + azure_tenant_id=dict(aliases=['keystone_v3_domain_id']), + tenant_mapping_enabled=dict(default=False, type='bool'), + api_version=dict(choices=['v2', 'v3']), + type=dict(choices=supported_providers().keys()), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + # add the endpoint arguments to the arguments + argument_spec.update(endpoint_list_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['provider']), + ('state', 'refresh', ['name'])], + required_together=[ + ['host_default_vnc_port_start', 'host_default_vnc_port_end'] + ], + ) + + name = module.params['name'] + zone_name = module.params['zone'] + provider_type = module.params['type'] + raw_endpoints = module.params + provider_region = module.params['provider_region'] + host_default_vnc_port_start = module.params['host_default_vnc_port_start'] + host_default_vnc_port_end = module.params['host_default_vnc_port_end'] + subscription = module.params['subscription'] + uid_ems = module.params['azure_tenant_id'] + project = module.params['project'] + tenant_mapping_enabled = module.params['tenant_mapping_enabled'] + api_version = module.params['api_version'] + state = module.params['state'] + + manageiq = ManageIQ(module) + manageiq_provider = ManageIQProvider(manageiq) + + provider = manageiq_provider.provider(name) + + # provider should not exist + if state == "absent": + # if we have a provider, delete it + if provider: + res_args = manageiq_provider.delete_provider(provider) + # if we do not have a provider, nothing to do + else: + res_args = dict( + changed=False, + msg="provider %s: does not exist in manageiq" % (name)) + + # provider should exist + if state == "present": + # get data user did not explicitly give + if zone_name: + zone_id = manageiq_provider.zone_id(zone_name) + + # if we do not have a provider_type, use the current provider_type + if provider and not provider_type: + provider_type = manageiq_provider.class_name_to_type(provider['type']) + + # check supported_providers types + if not provider_type: + manageiq_provider.module.fail_json( + msg="missing required argument: provider_type") + + # check supported_providers types + if provider_type not in supported_providers().keys(): + manageiq_provider.module.fail_json( + msg="provider_type %s is not supported" % (provider_type)) + + # build "connection_configurations" objects from user requested endpoints + # "provider" is a required endpoint, if we have it, we have endpoints + if raw_endpoints.get("provider"): + endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints) + + # if we have a provider, edit it + if provider: + res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version) + # if we do not have a provider, create it + else: + res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version) + + # refresh provider (trigger sync) + if state == "refresh": + if provider: + res_args = manageiq_provider.refresh(provider, name) + else: + res_args = dict( + changed=False, + msg="provider %s: does not exist in manageiq" % (name)) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_tags.py b/plugins/modules/remote_management/manageiq/manageiq_tags.py new file mode 100644 index 0000000000..f009e73e64 --- /dev/null +++ b/plugins/modules/remote_management/manageiq/manageiq_tags.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2017, Daniel Korn +# (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' + +module: manageiq_tags + +short_description: Management of resource tags in ManageIQ. +extends_documentation_fragment: +- community.general.manageiq + +author: Daniel Korn (@dkorn) +description: + - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ. + +options: + state: + description: + - absent - tags should not exist, + - present - tags should exist, + - list - list current tags. + choices: ['absent', 'present', 'list'] + default: 'present' + tags: + description: + - tags - list of dictionaries, each includes 'name' and 'category' keys. + - required if state is present or absent. + resource_type: + description: + - the relevant resource type in manageiq + required: true + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', + 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] + resource_name: + description: + - the relevant resource name in manageiq + required: true +''' + +EXAMPLES = ''' +- name: Create new tags for a provider in ManageIQ + manageiq_tags: + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Remove tags for a provider in ManageIQ + manageiq_tags: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: List current tags for a provider in ManageIQ + manageiq_tags: + state: list + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +def query_resource_id(manageiq, resource_type, resource_name): + """ Query the resource name in ManageIQ. + + Returns: + the resource id if it exists in manageiq, Fail otherwise. + """ + resource = manageiq.find_collection_resource_by(resource_type, name=resource_name) + if resource: + return resource["id"] + else: + msg = "{resource_name} {resource_type} does not exist in manageiq".format( + resource_name=resource_name, resource_type=resource_type) + manageiq.module.fail_json(msg=msg) + + +class ManageIQTags(object): + """ + Object to execute tags management operations of manageiq resources. + """ + + def __init__(self, manageiq, resource_type, resource_id): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + self.resource_type = resource_type + self.resource_id = resource_id + self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( + api_url=self.api_url, + resource_type=resource_type, + resource_id=resource_id) + + def full_tag_name(self, tag): + """ Returns the full tag name in manageiq + """ + return '/managed/{tag_category}/{tag_name}'.format( + tag_category=tag['category'], + tag_name=tag['name']) + + def clean_tag_object(self, tag): + """ Clean a tag object to have human readable form of: + { + full_name: STR, + name: STR, + display_name: STR, + category: STR + } + """ + full_name = tag.get('name') + categorization = tag.get('categorization', {}) + + return dict( + full_name=full_name, + name=categorization.get('name'), + display_name=categorization.get('display_name'), + category=categorization.get('category', {}).get('name')) + + def query_resource_tags(self): + """ Returns a set of the tag objects assigned to the resource + """ + url = '{resource_url}/tags?expand=resources&attributes=categorization' + try: + response = self.client.get(url.format(resource_url=self.resource_url)) + except Exception as e: + msg = "Failed to query {resource_type} tags: {error}".format( + resource_type=self.resource_type, + error=e) + self.module.fail_json(msg=msg) + + resources = response.get('resources', []) + + # clean the returned rest api tag object to look like: + # {full_name: STR, name: STR, display_name: STR, category: STR} + tags = [self.clean_tag_object(tag) for tag in resources] + + return tags + + def tags_to_update(self, tags, action): + """ Create a list of tags we need to update in ManageIQ. + + Returns: + Whether or not a change took place and a message describing the + operation executed. + """ + tags_to_post = [] + assigned_tags = self.query_resource_tags() + + # make a list of assigned full tag names strings + # e.g. ['/managed/environment/prod', ...] + assigned_tags_set = set([tag['full_name'] for tag in assigned_tags]) + + for tag in tags: + assigned = self.full_tag_name(tag) in assigned_tags_set + + if assigned and action == 'unassign': + tags_to_post.append(tag) + elif (not assigned) and action == 'assign': + tags_to_post.append(tag) + + return tags_to_post + + def assign_or_unassign_tags(self, tags, action): + """ Perform assign/unassign action + """ + # get a list of tags needed to be changed + tags_to_post = self.tags_to_update(tags, action) + if not tags_to_post: + return dict( + changed=False, + msg="Tags already {action}ed, nothing to do".format(action=action)) + + # try to assign or unassign tags to resource + url = '{resource_url}/tags'.format(resource_url=self.resource_url) + try: + response = self.client.post(url, action=action, resources=tags) + except Exception as e: + msg = "Failed to {action} tag: {error}".format( + action=action, + error=e) + self.module.fail_json(msg=msg) + + # check all entities in result to be successful + for result in response['results']: + if not result['success']: + msg = "Failed to {action}: {message}".format( + action=action, + message=result['message']) + self.module.fail_json(msg=msg) + + # successfully changed all needed tags + return dict( + changed=True, + msg="Successfully {action}ed tags".format(action=action)) + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} + argument_spec = dict( + tags=dict(type='list'), + resource_name=dict(required=True, type='str'), + resource_type=dict(required=True, type='str', + choices=manageiq_entities().keys()), + state=dict(required=False, type='str', + choices=['present', 'absent', 'list'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['tags']), + ('state', 'absent', ['tags']) + ], + ) + + tags = module.params['tags'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + resource_id = query_resource_id(manageiq, resource_type, resource_name) + + manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) + + if action == 'list': + # return a list of current tags for this object + current_tags = manageiq_tags.query_resource_tags() + res_args = dict(changed=False, tags=current_tags) + else: + # assign or unassign the tags + res_args = manageiq_tags.assign_or_unassign_tags(tags, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_tenant.py b/plugins/modules/remote_management/manageiq/manageiq_tenant.py new file mode 100644 index 0000000000..6d40a50abe --- /dev/null +++ b/plugins/modules/remote_management/manageiq/manageiq_tenant.py @@ -0,0 +1,555 @@ +#!/usr/bin/python +# +# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' + +module: manageiq_tenant + +short_description: Management of tenants in ManageIQ. +extends_documentation_fragment: +- community.general.manageiq + +author: Evert Mulder (@evertmulder) +description: + - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ. +requirements: +- manageiq-client +options: + state: + description: + - absent - tenant should not exist, present - tenant should be. + choices: ['absent', 'present'] + default: 'present' + name: + description: + - The tenant name. + required: true + default: null + description: + description: + - The tenant description. + required: true + default: null + parent_id: + description: + - The id of the parent tenant. If not supplied the root tenant is used. + - The C(parent_id) takes president over C(parent) when supplied + required: false + default: null + parent: + description: + - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used. + required: false + default: null + quotas: + description: + - The tenant quotas. + - All parameters case sensitive. + - 'Valid attributes are:' + - ' - C(cpu_allocated) (int): use null to remove the quota.' + - ' - C(mem_allocated) (GB): use null to remove the quota.' + - ' - C(storage_allocated) (GB): use null to remove the quota.' + - ' - C(vms_allocated) (int): use null to remove the quota.' + - ' - C(templates_allocated) (int): use null to remove the quota.' + required: false + default: null +''' + +EXAMPLES = ''' +- name: Update the root tenant in ManageIQ + manageiq_tenant: + name: 'My Company' + description: 'My company name' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Create a tenant in ManageIQ + manageiq_tenant: + name: 'Dep1' + description: 'Manufacturing department' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Delete a tenant in ManageIQ + manageiq_tenant: + state: 'absent' + name: 'Dep1' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated + manageiq_tenant: + name: 'Dep1' + parent_id: 1 + quotas: + - cpu_allocated: 100 + - mem_allocated: 50 + - vms_allocated: null + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + + +- name: Delete a tenant in ManageIQ using a token + manageiq_tenant: + state: 'absent' + name: 'Dep1' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: False +''' + +RETURN = ''' +tenant: + description: The tenant. + returned: success + type: complex + contains: + id: + description: The tenant id + returned: success + type: int + name: + description: The tenant name + returned: success + type: str + description: + description: The tenant description + returned: success + type: str + parent_id: + description: The id of the parent tenant + returned: success + type: int + quotas: + description: List of tenant quotas + returned: success + type: list + sample: + cpu_allocated: 100 + mem_allocated: 50 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQTenant(object): + """ + Object to execute tenant management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def tenant(self, name, parent_id, parent): + """ Search for tenant object by name and parent_id or parent + or the root tenant if no parent or parent_id is supplied. + Returns: + the parent tenant, None for the root tenant + the tenant or None if tenant was not found. + """ + + if parent_id: + parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id) + if not parent_tenant_res: + self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id)) + parent_tenant = parent_tenant_res[0] + tenants = self.client.collections.tenants.find_by(name=name) + + for tenant in tenants: + try: + ancestry = tenant['ancestry'] + except AttributeError: + ancestry = None + + if ancestry: + tenant_parent_id = int(ancestry.split("/")[-1]) + if int(tenant_parent_id) == parent_id: + return parent_tenant, tenant + + return parent_tenant, None + else: + if parent: + parent_tenant_res = self.client.collections.tenants.find_by(name=parent) + if not parent_tenant_res: + self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent) + + if len(parent_tenant_res) > 1: + self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent) + + parent_tenant = parent_tenant_res[0] + parent_id = int(parent_tenant['id']) + tenants = self.client.collections.tenants.find_by(name=name) + + for tenant in tenants: + try: + ancestry = tenant['ancestry'] + except AttributeError: + ancestry = None + + if ancestry: + tenant_parent_id = int(ancestry.split("/")[-1]) + if tenant_parent_id == parent_id: + return parent_tenant, tenant + + return parent_tenant, None + else: + # No parent or parent id supplied we select the root tenant + return None, self.client.collections.tenants.find_by(ancestry=None)[0] + + def compare_tenant(self, tenant, name, description): + """ Compare tenant fields with new field values. + + Returns: + false if tenant fields have some difference from new fields, true o/w. + """ + found_difference = ( + (name and tenant['name'] != name) or + (description and tenant['description'] != description) + ) + + return not found_difference + + def delete_tenant(self, tenant): + """ Deletes a tenant from manageiq. + + Returns: + dict with `msg` and `changed` + """ + try: + url = '%s/tenants/%s' % (self.api_url, tenant['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e))) + + if result['success'] is False: + self.module.fail_json(msg=result['message']) + + return dict(changed=True, msg=result['message']) + + def edit_tenant(self, tenant, name, description): + """ Edit a manageiq tenant. + + Returns: + dict with `msg` and `changed` + """ + resource = dict(name=name, description=description, use_config_for_attributes=False) + + # check if we need to update ( compare_tenant is true is no difference found ) + if self.compare_tenant(tenant, name, description): + return dict( + changed=False, + msg="tenant %s is not changed." % tenant['name'], + tenant=tenant['_data']) + + # try to update tenant + try: + result = self.client.post(tenant['href'], action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e))) + + return dict( + changed=True, + msg="successfully updated the tenant with id %s" % (tenant['id'])) + + def create_tenant(self, name, description, parent_tenant): + """ Creates the tenant in manageiq. + + Returns: + dict with `msg`, `changed` and `tenant_id` + """ + parent_id = parent_tenant['id'] + # check for required arguments + for key, value in dict(name=name, description=description, parent_id=parent_id).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % key) + + url = '%s/tenants' % self.api_url + + resource = {'name': name, 'description': description, 'parent': {'id': parent_id}} + + try: + result = self.client.post(url, action='create', resource=resource) + tenant_id = result['results'][0]['id'] + except Exception as e: + self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id), + tenant_id=tenant_id) + + def tenant_quota(self, tenant, quota_key): + """ Search for tenant quota object by tenant and quota_key. + Returns: + the quota for the tenant, or None if the tenant quota was not found. + """ + + tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key)) + + return tenant_quotas['resources'] + + def tenant_quotas(self, tenant): + """ Search for tenant quotas object by tenant. + Returns: + the quotas for the tenant, or None if no tenant quotas were not found. + """ + + tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href'])) + + return tenant_quotas['resources'] + + def update_tenant_quotas(self, tenant, quotas): + """ Creates the tenant quotas in manageiq. + + Returns: + dict with `msg` and `changed` + """ + + changed = False + messages = [] + for quota_key, quota_value in quotas.items(): + current_quota_filtered = self.tenant_quota(tenant, quota_key) + if current_quota_filtered: + current_quota = current_quota_filtered[0] + else: + current_quota = None + + if quota_value: + # Change the byte values to GB + if quota_key in ['storage_allocated', 'mem_allocated']: + quota_value_int = int(quota_value) * 1024 * 1024 * 1024 + else: + quota_value_int = int(quota_value) + if current_quota: + res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int) + else: + res = self.create_tenant_quota(tenant, quota_key, quota_value_int) + else: + if current_quota: + res = self.delete_tenant_quota(tenant, current_quota) + else: + res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key) + + if res['changed']: + changed = True + + messages.append(res['msg']) + + return dict( + changed=changed, + msg=', '.join(messages)) + + def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value): + """ Update the tenant quotas in manageiq. + + Returns: + result + """ + + if current_quota['value'] == quota_value: + return dict( + changed=False, + msg="tenant quota %s already has value %s" % (quota_key, quota_value)) + else: + + url = '%s/quotas/%s' % (tenant['href'], current_quota['id']) + resource = {'value': quota_value} + try: + self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e))) + + return dict( + changed=True, + msg="successfully updated tenant quota %s" % quota_key) + + def create_tenant_quota(self, tenant, quota_key, quota_value): + """ Creates the tenant quotas in manageiq. + + Returns: + result + """ + url = '%s/quotas' % (tenant['href']) + resource = {'name': quota_key, 'value': quota_value} + try: + self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e))) + + return dict( + changed=True, + msg="successfully created tenant quota %s" % quota_key) + + def delete_tenant_quota(self, tenant, quota): + """ deletes the tenant quotas in manageiq. + + Returns: + result + """ + try: + result = self.client.post(quota['href'], action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e))) + + return dict(changed=True, msg=result['message']) + + def create_tenant_response(self, tenant, parent_tenant): + """ Creates the ansible result object from a manageiq tenant entity + + Returns: + a dict with the tenant id, name, description, parent id, + quota's + """ + tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas']) + + try: + ancestry = tenant['ancestry'] + tenant_parent_id = ancestry.split("/")[-1] + except AttributeError: + # The root tenant does not return the ancestry attribute + tenant_parent_id = None + + return dict( + id=tenant['id'], + name=tenant['name'], + description=tenant['description'], + parent_id=tenant_parent_id, + quotas=tenant_quotas + ) + + @staticmethod + def create_tenant_quotas_response(tenant_quotas): + """ Creates the ansible result object from a manageiq tenant_quotas entity + + Returns: + a dict with the applied quotas, name and value + """ + + if not tenant_quotas: + return {} + + result = {} + for quota in tenant_quotas: + if quota['unit'] == 'bytes': + value = float(quota['value']) / (1024 * 1024 * 1024) + else: + value = quota['value'] + result[quota['name']] = value + return result + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + description=dict(required=True, type='str'), + parent_id=dict(required=False, type='int'), + parent=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], default='present'), + quotas=dict(type='dict', default={}) + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + name = module.params['name'] + description = module.params['description'] + parent_id = module.params['parent_id'] + parent = module.params['parent'] + state = module.params['state'] + quotas = module.params['quotas'] + + manageiq = ManageIQ(module) + manageiq_tenant = ManageIQTenant(manageiq) + + parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent) + + # tenant should not exist + if state == "absent": + # if we have a tenant, delete it + if tenant: + res_args = manageiq_tenant.delete_tenant(tenant) + # if we do not have a tenant, nothing to do + else: + if parent_id: + msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id) + else: + msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent) + + res_args = dict( + changed=False, + msg=msg) + + # tenant should exist + if state == "present": + # if we have a tenant, edit it + if tenant: + res_args = manageiq_tenant.edit_tenant(tenant, name, description) + + # if we do not have a tenant, create it + else: + res_args = manageiq_tenant.create_tenant(name, description, parent_tenant) + tenant = manageiq.client.get_entity('tenants', res_args['tenant_id']) + + # quotas as supplied and we have a tenant + if quotas: + tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas) + if tenant_quotas_res['changed']: + res_args['changed'] = True + res_args['tenant_quotas_msg'] = tenant_quotas_res['msg'] + + tenant.reload(expand='resources', attributes=['tenant_quotas']) + res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_user.py b/plugins/modules/remote_management/manageiq/manageiq_user.py new file mode 100644 index 0000000000..8e8b8cd8db --- /dev/null +++ b/plugins/modules/remote_management/manageiq/manageiq_user.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# +# (c) 2017, Daniel Korn +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: manageiq_user + +short_description: Management of users in ManageIQ. +extends_documentation_fragment: +- community.general.manageiq + +author: Daniel Korn (@dkorn) +description: + - The manageiq_user module supports adding, updating and deleting users in ManageIQ. + +options: + state: + description: + - absent - user should not exist, present - user should be. + choices: ['absent', 'present'] + default: 'present' + userid: + description: + - The unique userid in manageiq, often mentioned as username. + required: true + name: + description: + - The users' full name. + password: + description: + - The users' password. + group: + description: + - The name of the group to which the user belongs. + email: + description: + - The users' E-mail address. + update_password: + default: always + choices: ['always', 'on_create'] + description: + - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user. +''' + +EXAMPLES = ''' +- name: Create a new user in ManageIQ + manageiq_user: + userid: 'jdoe' + name: 'Jane Doe' + password: 'VerySecret' + group: 'EvmGroup-user' + email: 'jdoe@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Create a new user in ManageIQ using a token + manageiq_user: + userid: 'jdoe' + name: 'Jane Doe' + password: 'VerySecret' + group: 'EvmGroup-user' + email: 'jdoe@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: False + +- name: Delete a user in ManageIQ + manageiq_user: + state: 'absent' + userid: 'jdoe' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Delete a user in ManageIQ using a token + manageiq_user: + state: 'absent' + userid: 'jdoe' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: False + +- name: Update email of user in ManageIQ + manageiq_user: + userid: 'jdoe' + email: 'jaustine@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + +- name: Update email of user in ManageIQ using a token + manageiq_user: + userid: 'jdoe' + email: 'jaustine@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: False +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQUser(object): + """ + Object to execute user management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def group_id(self, description): + """ Search for group id by group description. + + Returns: + the group id, or send a module Fail signal if group not found. + """ + group = self.manageiq.find_collection_resource_by('groups', description=description) + if not group: # group doesn't exist + self.module.fail_json( + msg="group %s does not exist in manageiq" % (description)) + + return group['id'] + + def user(self, userid): + """ Search for user object by userid. + + Returns: + the user, or None if user not found. + """ + return self.manageiq.find_collection_resource_by('users', userid=userid) + + def compare_user(self, user, name, group_id, password, email): + """ Compare user fields with new field values. + + Returns: + false if user fields have some difference from new fields, true o/w. + """ + found_difference = ( + (name and user['name'] != name) or + (password is not None) or + (email and user['email'] != email) or + (group_id and user['current_group_id'] != group_id) + ) + + return not found_difference + + def delete_user(self, user): + """ Deletes a user from manageiq. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/users/%s' % (self.api_url, user['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e))) + + return dict(changed=True, msg=result['message']) + + def edit_user(self, user, name, group, password, email): + """ Edit a user from manageiq. + + Returns: + a short message describing the operation executed. + """ + group_id = None + url = '%s/users/%s' % (self.api_url, user['id']) + + resource = dict(userid=user['userid']) + if group is not None: + group_id = self.group_id(group) + resource['group'] = dict(id=group_id) + if name is not None: + resource['name'] = name + if email is not None: + resource['email'] = email + + # if there is a password param, but 'update_password' is 'on_create' + # then discard the password (since we're editing an existing user) + if self.module.params['update_password'] == 'on_create': + password = None + if password is not None: + resource['password'] = password + + # check if we need to update ( compare_user is true is no difference found ) + if self.compare_user(user, name, group_id, password, email): + return dict( + changed=False, + msg="user %s is not changed." % (user['userid'])) + + # try to update user + try: + result = self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e))) + + return dict( + changed=True, + msg="successfully updated the user %s: %s" % (user['userid'], result)) + + def create_user(self, userid, name, group, password, email): + """ Creates the user in manageiq. + + Returns: + the created user id, name, created_on timestamp, + updated_on timestamp, userid and current_group_id. + """ + # check for required arguments + for key, value in dict(name=name, group=group, password=password).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % (key)) + + group_id = self.group_id(group) + url = '%s/users' % (self.api_url) + + resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}} + if email is not None: + resource['email'] = email + + # try to create a new user + try: + result = self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e))) + + return dict( + changed=True, + msg="successfully created the user %s: %s" % (userid, result['results'])) + + +def main(): + argument_spec = dict( + userid=dict(required=True, type='str'), + name=dict(), + password=dict(no_log=True), + group=dict(), + email=dict(), + state=dict(choices=['absent', 'present'], default='present'), + update_password=dict(choices=['always', 'on_create'], + default='always'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + userid = module.params['userid'] + name = module.params['name'] + password = module.params['password'] + group = module.params['group'] + email = module.params['email'] + state = module.params['state'] + + manageiq = ManageIQ(module) + manageiq_user = ManageIQUser(manageiq) + + user = manageiq_user.user(userid) + + # user should not exist + if state == "absent": + # if we have a user, delete it + if user: + res_args = manageiq_user.delete_user(user) + # if we do not have a user, nothing to do + else: + res_args = dict( + changed=False, + msg="user %s: does not exist in manageiq" % (userid)) + + # user should exist + if state == "present": + # if we have a user, edit it + if user: + res_args = manageiq_user.edit_user(user, name, group, password, email) + # if we do not have a user, create it + else: + res_args = manageiq_user.create_user(userid, name, group, password, email) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py b/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py new file mode 120000 index 0000000000..290e891ee9 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py @@ -0,0 +1 @@ +oneview_datacenter_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py new file mode 100644 index 0000000000..1e6e2cc037 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_datacenter_info +short_description: Retrieve information about the OneView Data Centers +description: + - Retrieve information about the OneView Data Centers. + - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_datacenter_info) module no longer returns C(ansible_facts)! +requirements: + - "hpOneView >= 2.0.1" +author: + - Alex Monteiro (@aalexmonteiro) + - Madhav Bharadwaj (@madhav-bharadwaj) + - Priyanka Sood (@soodpr) + - Ricardo Galeno (@ricardogpsf) +options: + name: + description: + - Data Center name. + options: + description: + - "Retrieve additional information. Options available: 'visualContent'." + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.factsparams + +''' + +EXAMPLES = ''' +- name: Gather information about all Data Centers + oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + delegate_to: localhost + register: result +- debug: + msg: "{{ result.datacenters }}" + +- name: Gather paginated, filtered and sorted information about Data Centers + oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: 'state=Unmanaged' + register: result +- debug: + msg: "{{ result.datacenters }}" + +- name: Gather information about a Data Center by name + oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: "My Data Center" + delegate_to: localhost + register: result +- debug: + msg: "{{ result.datacenters }}" + +- name: Gather information about the Data Center Visual Content + oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: "My Data Center" + options: + - visualContent + delegate_to: localhost + register: result +- debug: + msg: "{{ result.datacenters }}" +- debug: + msg: "{{ result.datacenter_visual_content }}" +''' + +RETURN = ''' +datacenters: + description: Has all the OneView information about the Data Centers. + returned: Always, but can be null. + type: dict + +datacenter_visual_content: + description: Has information about the Data Center Visual Content. + returned: When requested, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class DatacenterInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list'), + params=dict(type='dict') + ) + + def __init__(self): + super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + self.is_old_facts = self.module._name == 'oneview_datacenter_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + def execute_module(self): + + client = self.oneview_client.datacenters + info = {} + + if self.module.params.get('name'): + datacenters = client.get_by('name', self.module.params['name']) + + if self.options and 'visualContent' in self.options: + if datacenters: + info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri']) + else: + info['datacenter_visual_content'] = None + + info['datacenters'] = datacenters + else: + info['datacenters'] = client.get_all(**self.facts_params) + + if self.is_old_facts: + return dict(changed=False, + ansible_facts=info) + else: + return dict(changed=False, **info) + + +def main(): + DatacenterInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py b/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py new file mode 120000 index 0000000000..98e325454c --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py @@ -0,0 +1 @@ +oneview_enclosure_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py new file mode 100644 index 0000000000..6f31493622 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -0,0 +1,228 @@ +#!/usr/bin/python + +# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_enclosure_info +short_description: Retrieve information about one or more Enclosures +description: + - Retrieve information about one or more of the Enclosures from OneView. + - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_enclosure_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + name: + description: + - Enclosure name. + options: + description: + - "List with options to gather additional information about an Enclosure and related resources. + Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization), + you can provide specific parameters." + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.factsparams + +''' + +EXAMPLES = ''' +- name: Gather information about all Enclosures + oneview_enclosure_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result +- debug: + msg: "{{ result.enclosures }}" + +- name: Gather paginated, filtered and sorted information about Enclosures + oneview_enclosure_info: + params: + start: 0 + count: 3 + sort: name:descending + filter: status=OK + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result +- debug: + msg: "{{ result.enclosures }}" + +- name: Gather information about an Enclosure by name + oneview_enclosure_info: + name: Enclosure-Name + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result +- debug: + msg: "{{ result.enclosures }}" + +- name: Gather information about an Enclosure by name with options + oneview_enclosure_info: + name: Test-Enclosure + options: + - script # optional + - environmentalConfiguration # optional + - utilization # optional + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result +- debug: + msg: "{{ result.enclosures }}" +- debug: + msg: "{{ result.enclosure_script }}" +- debug: + msg: "{{ result.enclosure_environmental_configuration }}" +- debug: + msg: "{{ result.enclosure_utilization }}" + +- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two + specified dates" + oneview_enclosure_info: + name: Test-Enclosure + options: + - utilization: # optional + fields: AmbientTemperature + filter: + - startDate=2016-07-01T14:29:42.000Z + - endDate=2017-07-01T03:29:42.000Z + view: day + refresh: false + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result +- debug: + msg: "{{ result.enclosures }}" +- debug: + msg: "{{ result.enclosure_utilization }}" +''' + +RETURN = ''' +enclosures: + description: Has all the OneView information about the Enclosures. + returned: Always, but can be null. + type: dict + +enclosure_script: + description: Has all the OneView information about the script of an Enclosure. + returned: When requested, but can be null. + type: str + +enclosure_environmental_configuration: + description: Has all the OneView information about the environmental configuration of an Enclosure. + returned: When requested, but can be null. + type: dict + +enclosure_utilization: + description: Has all the OneView information about the utilization of an Enclosure. + returned: When requested, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class EnclosureInfoModule(OneViewModuleBase): + argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict')) + + def __init__(self): + super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + self.is_old_facts = self.module._name == 'oneview_enclosure_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + def execute_module(self): + + info = {} + + if self.module.params['name']: + enclosures = self._get_by_name(self.module.params['name']) + + if self.options and enclosures: + info = self._gather_optional_info(self.options, enclosures[0]) + else: + enclosures = self.oneview_client.enclosures.get_all(**self.facts_params) + + info['enclosures'] = enclosures + + if self.is_old_facts: + return dict(changed=False, + ansible_facts=info) + else: + return dict(changed=False, **info) + + def _gather_optional_info(self, options, enclosure): + + enclosure_client = self.oneview_client.enclosures + info = {} + + if options.get('script'): + info['enclosure_script'] = enclosure_client.get_script(enclosure['uri']) + if options.get('environmentalConfiguration'): + env_config = enclosure_client.get_environmental_configuration(enclosure['uri']) + info['enclosure_environmental_configuration'] = env_config + if options.get('utilization'): + info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization']) + + return info + + def _get_utilization(self, enclosure, params): + fields = view = refresh = filter = '' + + if isinstance(params, dict): + fields = params.get('fields') + view = params.get('view') + refresh = params.get('refresh') + filter = params.get('filter') + + return self.oneview_client.enclosures.get_utilization(enclosure['uri'], + fields=fields, + filter=filter, + refresh=refresh, + view=view) + + def _get_by_name(self, name): + return self.oneview_client.enclosures.get_by('name', name) + + +def main(): + EnclosureInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py new file mode 100644 index 0000000000..54ffea2005 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_ethernet_network +short_description: Manage OneView Ethernet Network resources +description: + - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. +requirements: + - hpOneView >= 3.1.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + state: + description: + - Indicates the desired state for the Ethernet Network resource. + - C(present) will ensure data properties are compliant with OneView. + - C(absent) will remove the resource from OneView, if it exists. + - C(default_bandwidth_reset) will reset the network connection template to the default. + default: present + choices: [present, absent, default_bandwidth_reset] + data: + description: + - List with Ethernet Network properties. + required: true +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.validateetag + +''' + +EXAMPLES = ''' +- name: Ensure that the Ethernet Network is present using the default configuration + oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + vlanId: '201' + delegate_to: localhost + +- name: Update the Ethernet Network changing bandwidth and purpose + oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + purpose: Management + bandwidth: + maximumBandwidth: 3000 + typicalBandwidth: 2000 + delegate_to: localhost + +- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network' + oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + newName: 'Renamed Ethernet Network' + delegate_to: localhost + +- name: Ensure that the Ethernet Network is absent + oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: absent + data: + name: 'New Ethernet Network' + delegate_to: localhost + +- name: Create Ethernet networks in bulk + oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + vlanIdRange: '1-10,15,17' + purpose: General + namePrefix: TestNetwork + smartLink: false + privateNetwork: false + bandwidth: + maximumBandwidth: 10000 + typicalBandwidth: 2000 + delegate_to: localhost + +- name: Reset to the default network connection template + oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: default_bandwidth_reset + data: + name: 'Test Ethernet Network' + delegate_to: localhost +''' + +RETURN = ''' +ethernet_network: + description: Has the facts about the Ethernet Networks. + returned: On state 'present'. Can be null. + type: dict + +ethernet_network_bulk: + description: Has the facts about the Ethernet Networks affected by the bulk insert. + returned: When 'vlanIdRange' attribute is in data argument. Can be null. + type: dict + +ethernet_network_connection_template: + description: Has the facts about the Ethernet Network Connection Template. + returned: On state 'default_bandwidth_reset'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class EthernetNetworkModule(OneViewModuleBase): + MSG_CREATED = 'Ethernet Network created successfully.' + MSG_UPDATED = 'Ethernet Network updated successfully.' + MSG_DELETED = 'Ethernet Network deleted successfully.' + MSG_ALREADY_PRESENT = 'Ethernet Network is already present.' + MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.' + + MSG_BULK_CREATED = 'Ethernet Networks created successfully.' + MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.' + MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.' + MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.' + MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.' + + RESOURCE_FACT_NAME = 'ethernet_network' + + def __init__(self): + + argument_spec = dict( + state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']), + data=dict(type='dict', required=True), + ) + + super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True) + + self.resource_client = self.oneview_client.ethernet_networks + + def execute_module(self): + + changed, msg, ansible_facts, resource = False, '', {}, None + + if self.data.get('name'): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + if self.data.get('vlanIdRange'): + return self._bulk_present() + else: + return self._present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + elif self.state == 'default_bandwidth_reset': + changed, msg, ansible_facts = self._default_bandwidth_reset(resource) + return dict(changed=changed, msg=msg, ansible_facts=ansible_facts) + + def _present(self, resource): + + bandwidth = self.data.pop('bandwidth', None) + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + + if bandwidth: + if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]: + result['changed'] = True + result['msg'] = self.MSG_UPDATED + + if scope_uris is not None: + result = self.resource_scopes_set(result, 'ethernet_network', scope_uris) + + return result + + def _bulk_present(self): + vlan_id_range = self.data['vlanIdRange'] + result = dict(ansible_facts={}) + ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + + if not ethernet_networks: + self.resource_client.create_bulk(self.data) + result['changed'] = True + result['msg'] = self.MSG_BULK_CREATED + + else: + vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range) + for net in ethernet_networks[:]: + vlan_ids.remove(net['vlanId']) + + if len(vlan_ids) == 0: + result['msg'] = self.MSG_BULK_ALREADY_EXIST + result['changed'] = False + else: + if len(vlan_ids) == 1: + self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0]) + else: + self.data['vlanIdRange'] = ','.join(map(str, vlan_ids)) + + self.resource_client.create_bulk(self.data) + result['changed'] = True + result['msg'] = self.MSG_MISSING_BULK_CREATED + result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + + return result + + def _update_connection_template(self, ethernet_network, bandwidth): + + if 'connectionTemplateUri' not in ethernet_network: + return False, None + + connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri']) + + merged_data = connection_template.copy() + merged_data.update({'bandwidth': bandwidth}) + + if not self.compare(connection_template, merged_data): + connection_template = self.oneview_client.connection_templates.update(merged_data) + return True, connection_template + else: + return False, None + + def _default_bandwidth_reset(self, resource): + + if not resource: + raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND) + + default_connection_template = self.oneview_client.connection_templates.get_default() + + changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth']) + + return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict( + ethernet_network_connection_template=connection_template) + + +def main(): + EthernetNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py new file mode 120000 index 0000000000..b6dd1014c6 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py @@ -0,0 +1 @@ +oneview_ethernet_network_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py new file mode 100644 index 0000000000..56093f38f7 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_ethernet_network_info +short_description: Retrieve the information about one or more of the OneView Ethernet Networks +description: + - Retrieve the information about one or more of the Ethernet Networks from OneView. + - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_ethernet_network_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + name: + description: + - Ethernet Network name. + options: + description: + - "List with options to gather additional information about an Ethernet Network and related resources. + Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)." +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.factsparams + +''' + +EXAMPLES = ''' +- name: Gather information about all Ethernet Networks + oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather paginated and filtered information about Ethernet Networks + oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 1 + count: 3 + sort: 'name:descending' + filter: 'purpose=General' + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather information about an Ethernet Network by name + oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + name: Ethernet network name + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather information about an Ethernet Network by name with options + oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + name: eth1 + options: + - associatedProfiles + - associatedUplinkGroups + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.enet_associated_profiles }}" +- debug: + msg: "{{ result.enet_associated_uplink_groups }}" +''' + +RETURN = ''' +ethernet_networks: + description: Has all the OneView information about the Ethernet Networks. + returned: Always, but can be null. + type: dict + +enet_associated_profiles: + description: Has all the OneView information about the profiles which are using the Ethernet network. + returned: When requested, but can be null. + type: dict + +enet_associated_uplink_groups: + description: Has all the OneView information about the uplink sets which are using the Ethernet network. + returned: When requested, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class EthernetNetworkInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list'), + params=dict(type='dict') + ) + + def __init__(self): + super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + self.is_old_facts = self.module._name == 'oneview_ethernet_network_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + self.resource_client = self.oneview_client.ethernet_networks + + def execute_module(self): + info = {} + if self.module.params['name']: + ethernet_networks = self.resource_client.get_by('name', self.module.params['name']) + + if self.module.params.get('options') and ethernet_networks: + info = self.__gather_optional_info(ethernet_networks[0]) + else: + ethernet_networks = self.resource_client.get_all(**self.facts_params) + + info['ethernet_networks'] = ethernet_networks + + if self.is_old_facts: + return dict(changed=False, ansible_facts=info) + else: + return dict(changed=False, **info) + + def __gather_optional_info(self, ethernet_network): + + info = {} + + if self.options.get('associatedProfiles'): + info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network) + if self.options.get('associatedUplinkGroups'): + info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network) + + return info + + def __get_associated_profiles(self, ethernet_network): + associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri']) + return [self.oneview_client.server_profiles.get(x) for x in associated_profiles] + + def __get_associated_uplink_groups(self, ethernet_network): + uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri']) + return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups] + + +def main(): + EthernetNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network.py b/plugins/modules/remote_management/oneview/oneview_fc_network.py new file mode 100644 index 0000000000..6335bd6239 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_fc_network.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_fc_network +short_description: Manage OneView Fibre Channel Network resources. +description: + - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. +requirements: + - "hpOneView >= 4.0.0" +author: "Felipe Bulsoni (@fgbulsoni)" +options: + state: + description: + - Indicates the desired state for the Fibre Channel Network resource. + C(present) will ensure data properties are compliant with OneView. + C(absent) will remove the resource from OneView, if it exists. + choices: ['present', 'absent'] + data: + description: + - List with the Fibre Channel Network properties. + required: true + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.validateetag + +''' + +EXAMPLES = ''' +- name: Ensure that the Fibre Channel Network is present using the default configuration + oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + +- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach' + oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + fabricType: 'DirectAttach' + +- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes + oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + scopeUris: + - '/rest/scopes/00SC123456' + - '/rest/scopes/01SC123456' + +- name: Ensure that the Fibre Channel Network is absent + oneview_fc_network: + config: "{{ config_file_path }}" + state: absent + data: + name: 'New FC Network' +''' + +RETURN = ''' +fc_network: + description: Has the facts about the managed OneView FC Network. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcNetworkModule(OneViewModuleBase): + MSG_CREATED = 'FC Network created successfully.' + MSG_UPDATED = 'FC Network updated successfully.' + MSG_DELETED = 'FC Network deleted successfully.' + MSG_ALREADY_PRESENT = 'FC Network is already present.' + MSG_ALREADY_ABSENT = 'FC Network is already absent.' + RESOURCE_FACT_NAME = 'fc_network' + + def __init__(self): + + additional_arg_spec = dict(data=dict(required=True, type='dict'), + state=dict( + required=True, + choices=['present', 'absent'])) + + super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, + validate_etag_support=True) + + self.resource_client = self.oneview_client.fc_networks + + def execute_module(self): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + return self._present(resource) + else: + return self.resource_absent(resource) + + def _present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, 'fc_network', scope_uris) + return result + + +def main(): + FcNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py b/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py new file mode 120000 index 0000000000..f7739f3bab --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py @@ -0,0 +1 @@ +oneview_fc_network_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py new file mode 100644 index 0000000000..582988a826 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_fc_network_info +short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks +description: + - Retrieve the information about one or more of the Fibre Channel Networks from OneView. + - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_fc_network_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + name: + description: + - Fibre Channel Network name. + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.factsparams + +''' + +EXAMPLES = ''' +- name: Gather information about all Fibre Channel Networks + oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.fc_networks }}" + +- name: Gather paginated, filtered and sorted information about Fibre Channel Networks + oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 1 + count: 3 + sort: 'name:descending' + filter: 'fabricType=FabricAttach' + delegate_to: localhost + register: result +- debug: + msg: "{{ result.fc_networks }}" + +- name: Gather information about a Fibre Channel Network by name + oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + name: network name + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.fc_networks }}" +''' + +RETURN = ''' +fc_networks: + description: Has all the OneView information about the Fibre Channel Networks. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcNetworkInfoModule(OneViewModuleBase): + def __init__(self): + + argument_spec = dict( + name=dict(required=False, type='str'), + params=dict(required=False, type='dict') + ) + + super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) + self.is_old_facts = self.module._name == 'oneview_fc_network_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + def execute_module(self): + + if self.module.params['name']: + fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name']) + else: + fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params) + + if self.is_old_facts: + return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks)) + else: + return dict(changed=False, fc_networks=fc_networks) + + +def main(): + FcNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py new file mode 100644 index 0000000000..a3ac24e243 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_fcoe_network +short_description: Manage OneView FCoE Network resources +description: + - Provides an interface to manage FCoE Network resources. Can create, update, or delete. +requirements: + - "python >= 2.7.9" + - "hpOneView >= 4.0.0" +author: "Felipe Bulsoni (@fgbulsoni)" +options: + state: + description: + - Indicates the desired state for the FCoE Network resource. + C(present) will ensure data properties are compliant with OneView. + C(absent) will remove the resource from OneView, if it exists. + default: present + choices: ['present', 'absent'] + data: + description: + - List with FCoE Network properties. + required: true + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.validateetag + +''' + +EXAMPLES = ''' +- name: Ensure that FCoE Network is present using the default configuration + oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: Test FCoE Network + vlanId: 201 + delegate_to: localhost + +- name: Update the FCOE network scopes + oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: New FCoE Network + scopeUris: + - '/rest/scopes/00SC123456' + - '/rest/scopes/01SC123456' + delegate_to: localhost + +- name: Ensure that FCoE Network is absent + oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: absent + data: + name: New FCoE Network + delegate_to: localhost +''' + +RETURN = ''' +fcoe_network: + description: Has the facts about the OneView FCoE Networks. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcoeNetworkModule(OneViewModuleBase): + MSG_CREATED = 'FCoE Network created successfully.' + MSG_UPDATED = 'FCoE Network updated successfully.' + MSG_DELETED = 'FCoE Network deleted successfully.' + MSG_ALREADY_PRESENT = 'FCoE Network is already present.' + MSG_ALREADY_ABSENT = 'FCoE Network is already absent.' + RESOURCE_FACT_NAME = 'fcoe_network' + + def __init__(self): + + additional_arg_spec = dict(data=dict(required=True, type='dict'), + state=dict(default='present', + choices=['present', 'absent'])) + + super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, + validate_etag_support=True) + + self.resource_client = self.oneview_client.fcoe_networks + + def execute_module(self): + resource = self.get_by_name(self.data.get('name')) + + if self.state == 'present': + return self.__present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def __present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, 'fcoe_network', scope_uris) + return result + + +def main(): + FcoeNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py new file mode 120000 index 0000000000..3ff2b0deaa --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py @@ -0,0 +1 @@ +oneview_fcoe_network_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py new file mode 100644 index 0000000000..f96b17e2f2 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_fcoe_network_info +short_description: Retrieve the information about one or more of the OneView FCoE Networks +description: + - Retrieve the information about one or more of the FCoE Networks from OneView. + - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_fcoe_network_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + name: + description: + - FCoE Network name. +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.factsparams + +''' + +EXAMPLES = ''' +- name: Gather information about all FCoE Networks + oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.fcoe_networks }}" + +- name: Gather paginated, filtered and sorted information about FCoE Networks + oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: 'vlanId=2' + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.fcoe_networks }}" + +- name: Gather information about a FCoE Network by name + oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + name: Test FCoE Network Information + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.fcoe_networks }}" +''' + +RETURN = ''' +fcoe_networks: + description: Has all the OneView information about the FCoE Networks. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcoeNetworkInfoModule(OneViewModuleBase): + def __init__(self): + argument_spec = dict( + name=dict(type='str'), + params=dict(type='dict'), + ) + + super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) + self.is_old_facts = self.module._name == 'oneview_fcoe_network_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + def execute_module(self): + + if self.module.params['name']: + fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name']) + else: + fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params) + + if self.is_old_facts: + return dict(changed=False, + ansible_facts=dict(fcoe_networks=fcoe_networks)) + else: + return dict(changed=False, fcoe_networks=fcoe_networks) + + +def main(): + FcoeNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py new file mode 100644 index 0000000000..4d553744d3 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py @@ -0,0 +1,168 @@ +#!/usr/bin/python + +# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_logical_interconnect_group +short_description: Manage OneView Logical Interconnect Group resources +description: + - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. +requirements: + - hpOneView >= 4.0.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + state: + description: + - Indicates the desired state for the Logical Interconnect Group resource. + C(absent) will remove the resource from OneView, if it exists. + C(present) will ensure data properties are compliant with OneView. + choices: [absent, present] + default: present + data: + description: + - List with the Logical Interconnect Group properties. + required: true +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.validateetag + +''' + +EXAMPLES = ''' +- name: Ensure that the Logical Interconnect Group is present + oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: Test Logical Interconnect Group + uplinkSets: [] + enclosureType: C7000 + interconnectMapTemplate: + interconnectMapEntryTemplates: + - logicalDownlinkUri: ~ + logicalLocation: + locationEntries: + - relativeValue: 1 + type: Bay + - relativeValue: 1 + type: Enclosure + permittedInterconnectTypeName: HP VC Flex-10/10D Module + # Alternatively you can inform permittedInterconnectTypeUri + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group has the specified scopes + oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: Test Logical Interconnect Group + scopeUris: + - /rest/scopes/00SC123456 + - /rest/scopes/01SC123456 + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group is present with name 'Test' + oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: New Logical Interconnect Group + newName: Test + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group is absent + oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: New Logical Interconnect Group + delegate_to: localhost +''' + +RETURN = ''' +logical_interconnect_group: + description: Has the facts about the OneView Logical Interconnect Group. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class LogicalInterconnectGroupModule(OneViewModuleBase): + MSG_CREATED = 'Logical Interconnect Group created successfully.' + MSG_UPDATED = 'Logical Interconnect Group updated successfully.' + MSG_DELETED = 'Logical Interconnect Group deleted successfully.' + MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.' + MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.' + MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.' + + RESOURCE_FACT_NAME = 'logical_interconnect_group' + + def __init__(self): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + data=dict(required=True, type='dict') + ) + + super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec, + validate_etag_support=True) + self.resource_client = self.oneview_client.logical_interconnect_groups + + def execute_module(self): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + return self.__present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def __present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + + self.__replace_name_by_uris(self.data) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + + if scope_uris is not None: + result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris) + + return result + + def __replace_name_by_uris(self, data): + map_template = data.get('interconnectMapTemplate') + + if map_template: + map_entry_templates = map_template.get('interconnectMapEntryTemplates') + if map_entry_templates: + for value in map_entry_templates: + permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None) + if permitted_interconnect_type_name: + value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name( + permitted_interconnect_type_name).get('uri') + + def __get_interconnect_type_by_name(self, name): + i_type = self.oneview_client.interconnect_types.get_by('name', name) + if i_type: + return i_type[0] + else: + raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND) + + +def main(): + LogicalInterconnectGroupModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py new file mode 120000 index 0000000000..2539865f5f --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py @@ -0,0 +1 @@ +oneview_logical_interconnect_group_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py new file mode 100644 index 0000000000..26f6d6eff6 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py @@ -0,0 +1,125 @@ +#!/usr/bin/python + +# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_logical_interconnect_group_info +short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups +description: + - Retrieve information about one or more of the Logical Interconnect Groups from OneView + - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + name: + description: + - Logical Interconnect Group name. +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.factsparams + +''' + +EXAMPLES = ''' +- name: Gather information about all Logical Interconnect Groups + oneview_logical_interconnect_group_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.logical_interconnect_groups }}" + +- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups + oneview_logical_interconnect_group_info: + params: + start: 0 + count: 3 + sort: name:descending + filter: name=LIGName + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.logical_interconnect_groups }}" + +- name: Gather information about a Logical Interconnect Group by name + oneview_logical_interconnect_group_info: + name: logical interconnect group name + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.logical_interconnect_groups }}" +''' + +RETURN = ''' +logical_interconnect_groups: + description: Has all the OneView information about the Logical Interconnect Groups. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class LogicalInterconnectGroupInfoModule(OneViewModuleBase): + def __init__(self): + + argument_spec = dict( + name=dict(type='str'), + params=dict(type='dict'), + ) + + super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec) + self.is_old_facts = self.module._name == 'oneview_logical_interconnect_group_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + def execute_module(self): + if self.module.params.get('name'): + ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name']) + else: + ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params) + + if self.is_old_facts: + return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs)) + else: + return dict(changed=False, logical_interconnect_groups=ligs) + + +def main(): + LogicalInterconnectGroupInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_network_set.py b/plugins/modules/remote_management/oneview/oneview_network_set.py new file mode 100644 index 0000000000..08828d3d59 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_network_set.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_network_set +short_description: Manage HPE OneView Network Set resources +description: + - Provides an interface to manage Network Set resources. Can create, update, or delete. +requirements: + - hpOneView >= 4.0.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + state: + description: + - Indicates the desired state for the Network Set resource. + - C(present) will ensure data properties are compliant with OneView. + - C(absent) will remove the resource from OneView, if it exists. + default: present + choices: ['present', 'absent'] + data: + description: + - List with the Network Set properties. + required: true + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.validateetag + +''' + +EXAMPLES = ''' +- name: Create a Network Set + oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + networkUris: + - Test Ethernet Network_1 # can be a name + - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI + delegate_to: localhost + +- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks + oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + newName: OneViewSDK Test Network Set - Renamed + networkUris: + - Test Ethernet Network_1 + delegate_to: localhost + +- name: Delete the Network Set + oneview_network_set: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: OneViewSDK Test Network Set - Renamed + delegate_to: localhost + +- name: Update the Network set with two scopes + oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + scopeUris: + - /rest/scopes/01SC123456 + - /rest/scopes/02SC123456 + delegate_to: localhost +''' + +RETURN = ''' +network_set: + description: Has the facts about the Network Set. + returned: On state 'present', but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class NetworkSetModule(OneViewModuleBase): + MSG_CREATED = 'Network Set created successfully.' + MSG_UPDATED = 'Network Set updated successfully.' + MSG_DELETED = 'Network Set deleted successfully.' + MSG_ALREADY_PRESENT = 'Network Set is already present.' + MSG_ALREADY_ABSENT = 'Network Set is already absent.' + MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: ' + RESOURCE_FACT_NAME = 'network_set' + + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + data=dict(required=True, type='dict')) + + def __init__(self): + super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec, + validate_etag_support=True) + self.resource_client = self.oneview_client.network_sets + + def execute_module(self): + resource = self.get_by_name(self.data.get('name')) + + if self.state == 'present': + return self._present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def _present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + self._replace_network_name_by_uri(self.data) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris) + return result + + def _get_ethernet_network_by_name(self, name): + result = self.oneview_client.ethernet_networks.get_by('name', name) + return result[0] if result else None + + def _get_network_uri(self, network_name_or_uri): + if network_name_or_uri.startswith('/rest/ethernet-networks'): + return network_name_or_uri + else: + enet_network = self._get_ethernet_network_by_name(network_name_or_uri) + if enet_network: + return enet_network['uri'] + else: + raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri) + + def _replace_network_name_by_uri(self, data): + if 'networkUris' in data: + data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']] + + +def main(): + NetworkSetModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_facts.py b/plugins/modules/remote_management/oneview/oneview_network_set_facts.py new file mode 120000 index 0000000000..78f61020d5 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_network_set_facts.py @@ -0,0 +1 @@ +oneview_network_set_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py new file mode 100644 index 0000000000..d2ecb8333c --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_network_set_info +short_description: Retrieve information about the OneView Network Sets +description: + - Retrieve information about the Network Sets from OneView. + - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_network_set_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + name: + description: + - Network Set name. + + options: + description: + - "List with options to gather information about Network Set. + Option allowed: C(withoutEthernet). + The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks." + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.factsparams + +''' + +EXAMPLES = ''' +- name: Gather information about all Network Sets + oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.network_sets }}" + +- name: Gather paginated, filtered, and sorted information about Network Sets + oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: name='netset001' + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about all Network Sets, excluding Ethernet networks + oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + options: + - withoutEthernet + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about a Network Set by name + oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: Name of the Network Set + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about a Network Set by name, excluding Ethernet networks + oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: Name of the Network Set + options: + - withoutEthernet + no_log: true + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.network_sets }}" +''' + +RETURN = ''' +network_sets: + description: Has all the OneView information about the Network Sets. + returned: Always, but can be empty. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class NetworkSetInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list'), + params=dict(type='dict'), + ) + + def __init__(self): + super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + self.is_old_facts = self.module._name == 'oneview_network_set_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + def execute_module(self): + + name = self.module.params.get('name') + + if 'withoutEthernet' in self.options: + filter_by_name = ("\"'name'='%s'\"" % name) if name else '' + network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name) + elif name: + network_sets = self.oneview_client.network_sets.get_by('name', name) + else: + network_sets = self.oneview_client.network_sets.get_all(**self.facts_params) + + if self.is_old_facts: + return dict(changed=False, + ansible_facts=dict(network_sets=network_sets)) + else: + return dict(changed=False, network_sets=network_sets) + + +def main(): + NetworkSetInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager.py b/plugins/modules/remote_management/oneview/oneview_san_manager.py new file mode 100644 index 0000000000..d1f92a4029 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_san_manager.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_san_manager +short_description: Manage OneView SAN Manager resources +description: + - Provides an interface to manage SAN Manager resources. Can create, update, or delete. +requirements: + - hpOneView >= 3.1.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + state: + description: + - Indicates the desired state for the Uplink Set resource. + - C(present) ensures data properties are compliant with OneView. + - C(absent) removes the resource from OneView, if it exists. + - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. + default: present + choices: [present, absent, connection_information_set] + data: + description: + - List with SAN Manager properties. + required: true + +extends_documentation_fragment: +- community.general.oneview +- community.general.oneview.validateetag + +''' + +EXAMPLES = ''' +- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials + oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + providerDisplayName: Brocade Network Advisor + connectionInfo: + - name: Host + value: 172.18.15.1 + - name: Port + value: 5989 + - name: Username + value: username + - name: Password + value: password + - name: UseSsl + value: true + delegate_to: localhost + +- name: Ensure a Device Manager for the Cisco SAN Provider is present + oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + name: 172.18.20.1 + providerDisplayName: Cisco + connectionInfo: + - name: Host + value: 172.18.20.1 + - name: SnmpPort + value: 161 + - name: SnmpUserName + value: admin + - name: SnmpAuthLevel + value: authnopriv + - name: SnmpAuthProtocol + value: sha + - name: SnmpAuthString + value: password + delegate_to: localhost + +- name: Sets the SAN Manager connection information + oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: connection_information_set + data: + connectionInfo: + - name: Host + value: '172.18.15.1' + - name: Port + value: '5989' + - name: Username + value: 'username' + - name: Password + value: 'password' + - name: UseSsl + value: true + delegate_to: localhost + +- name: Refreshes the SAN Manager + oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + name: 172.18.15.1 + refreshState: RefreshPending + delegate_to: localhost + +- name: Delete the SAN Manager recently created + oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: '172.18.15.1' + delegate_to: localhost +''' + +RETURN = ''' +san_manager: + description: Has the OneView facts about the SAN Manager. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError + + +class SanManagerModule(OneViewModuleBase): + MSG_CREATED = 'SAN Manager created successfully.' + MSG_UPDATED = 'SAN Manager updated successfully.' + MSG_DELETED = 'SAN Manager deleted successfully.' + MSG_ALREADY_PRESENT = 'SAN Manager is already present.' + MSG_ALREADY_ABSENT = 'SAN Manager is already absent.' + MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found." + + argument_spec = dict( + state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']), + data=dict(type='dict', required=True) + ) + + def __init__(self): + super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True) + self.resource_client = self.oneview_client.san_managers + + def execute_module(self): + if self.data.get('connectionInfo'): + for connection_hash in self.data.get('connectionInfo'): + if connection_hash.get('name') == 'Host': + resource_name = connection_hash.get('value') + elif self.data.get('name'): + resource_name = self.data.get('name') + else: + msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. ' + msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.' + raise OneViewModuleValueError(msg.format()) + + resource = self.resource_client.get_by_name(resource_name) + + if self.state == 'present': + changed, msg, san_manager = self._present(resource) + return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) + + elif self.state == 'absent': + return self.resource_absent(resource, method='remove') + + elif self.state == 'connection_information_set': + changed, msg, san_manager = self._connection_information_set(resource) + return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) + + def _present(self, resource): + if not resource: + provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data)) + return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri) + else: + merged_data = resource.copy() + merged_data.update(self.data) + + # Remove 'connectionInfo' from comparison, since it is not possible to validate it. + resource.pop('connectionInfo', None) + merged_data.pop('connectionInfo', None) + + if self.compare(resource, merged_data): + return False, self.MSG_ALREADY_PRESENT, resource + else: + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + return True, self.MSG_UPDATED, updated_san_manager + + def _connection_information_set(self, resource): + if not resource: + return self._present(resource) + else: + merged_data = resource.copy() + merged_data.update(self.data) + merged_data.pop('refreshState', None) + if not self.data.get('connectionInfo', None): + raise OneViewModuleValueError('A connectionInfo field is required for this operation.') + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + return True, self.MSG_UPDATED, updated_san_manager + + def _get_provider_uri_by_display_name(self, data): + display_name = data.get('providerDisplayName') + provider_uri = self.resource_client.get_provider_uri(display_name) + + if not provider_uri: + raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name)) + + return provider_uri + + +def main(): + SanManagerModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py b/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py new file mode 120000 index 0000000000..d64693b48e --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py @@ -0,0 +1 @@ +oneview_san_manager_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py new file mode 100644 index 0000000000..97a206f0e5 --- /dev/null +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: oneview_san_manager_info +short_description: Retrieve information about one or more of the OneView SAN Managers +description: + - Retrieve information about one or more of the SAN Managers from OneView + - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(oneview_san_manager_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +options: + provider_display_name: + description: + - Provider Display Name. + params: + description: + - List of params to delimit, filter and sort the list of resources. + - "params allowed: + - C(start): The first item to return, using 0-based indexing. + - C(count): The number of resources to return. + - C(query): A general query string to narrow the list of resources returned. + - C(sort): The sort order of the returned data set." +extends_documentation_fragment: +- community.general.oneview + +''' + +EXAMPLES = ''' +- name: Gather information about all SAN Managers + oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.san_managers }}" + +- name: Gather paginated, filtered and sorted information about SAN Managers + oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + params: + start: 0 + count: 3 + sort: name:ascending + query: isInternal eq false + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.san_managers }}" + +- name: Gather information about a SAN Manager by provider display name + oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + provider_display_name: Brocade Network Advisor + delegate_to: localhost + register: result + +- debug: + msg: "{{ result.san_managers }}" +''' + +RETURN = ''' +san_managers: + description: Has all the OneView information about the SAN Managers. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class SanManagerInfoModule(OneViewModuleBase): + argument_spec = dict( + provider_display_name=dict(type='str'), + params=dict(type='dict') + ) + + def __init__(self): + super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + self.resource_client = self.oneview_client.san_managers + self.is_old_facts = self.module._name == 'oneview_san_manager_facts' + if self.is_old_facts: + self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + def execute_module(self): + if self.module.params.get('provider_display_name'): + provider_display_name = self.module.params['provider_display_name'] + san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name) + if san_manager: + resources = [san_manager] + else: + resources = [] + else: + resources = self.oneview_client.san_managers.get_all(**self.facts_params) + + if self.is_old_facts: + return dict(changed=False, ansible_facts=dict(san_managers=resources)) + else: + return dict(changed=False, san_managers=resources) + + +def main(): + SanManagerInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/remote_management/redfish/idrac_redfish_command.py new file mode 100644 index 0000000000..6ccf1fae67 --- /dev/null +++ b/plugins/modules/remote_management/redfish/idrac_redfish_command.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: idrac_redfish_command +short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action. + - For use with Dell iDRAC operations that require Redfish OEM extensions +options: + category: + required: true + description: + - Category to execute on OOB controller + type: str + command: + required: true + description: + - List of commands to execute on OOB controller + type: list + baseuri: + required: true + description: + - Base URI of OOB controller + type: str + username: + required: true + description: + - User for authentication with OOB controller + type: str + password: + required: true + description: + - Password for authentication with OOB controller + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller + default: 10 + type: int + resource_id: + required: false + description: + - The ID of the System, Manager or Chassis to modify + type: str + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Create BIOS configuration job (schedule BIOS setting update) + idrac_redfish_command: + category: Systems + command: CreateBiosConfigJob + resource_id: System.Embedded.1 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils._text import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def create_bios_config_job(self): + result = {} + key = "Bios" + jobs = "Jobs" + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + self.systems_uris[0]) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + bios_uri = data[key]["@odata.id"] + + # Extract proper URI + response = self.get_request(self.root_uri + bios_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][ + "@odata.id"] + + payload = {"TargetSettingsURI": set_bios_attr_uri} + response = self.post_request( + self.root_uri + self.manager_uri + "/" + jobs, payload) + if response['ret'] is False: + return response + + response_output = response['resp'].__dict__ + job_id = response_output["headers"]["Location"] + job_id = re.search("JID_.+", job_id).group() + # Currently not passing job_id back to user but patch is coming + return {'ret': True, 'msg': "Config job %s created" % job_id} + + +CATEGORY_COMMANDS_ALL = { + "Systems": ["CreateBiosConfigJob"], + "Accounts": [], + "Manager": [] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list'), + baseuri=dict(required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict() + ), + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + + if category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "CreateBiosConfigJob": + # execute only if we find a Managers resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + result = rf_utils.create_bios_config_job() + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + module.exit_json(changed=True, msg='Action was successful') + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py new file mode 100644 index 0000000000..b1ce734449 --- /dev/null +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: idrac_redfish_config +short_description: Manages servers through iDRAC using Dell Redfish APIs +description: + - For use with Dell iDRAC operations that require Redfish OEM extensions + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to + set or update a configuration attribute. +options: + category: + required: true + type: str + description: + - Category to execute on iDRAC + command: + required: true + description: + - List of commands to execute on iDRAC + - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and + I(SetSystemAttributes) are mutually exclusive commands when C(category) + is I(Manager) + type: list + baseuri: + required: true + description: + - Base URI of iDRAC + type: str + username: + required: true + description: + - User for authentication with iDRAC + type: str + password: + required: true + description: + - Password for authentication with iDRAC + type: str + manager_attribute_name: + required: false + description: + - (deprecated) name of iDRAC attribute to update + type: str + manager_attribute_value: + required: false + description: + - (deprecated) value of iDRAC attribute to update + type: str + manager_attributes: + required: false + description: + - dictionary of iDRAC attribute name and value pairs to update + default: {} + type: 'dict' + timeout: + description: + - Timeout in seconds for URL requests to iDRAC controller + default: 10 + type: int + resource_id: + required: false + description: + - The ID of the System, Manager or Chassis to modify + type: str + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Enable NTP and set NTP server and Time zone attributes in iDRAC + idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + NTPConfigGroup.1.NTPEnable: "Enabled" + NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" + Time.1.Timezone: "{{ timezone }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Enable Syslog and set Syslog servers in iDRAC + idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SysLog.1.SysLogEnable: "Enabled" + SysLog.1.Server1: "{{ syslog_server1 }}" + SysLog.1.Server2: "{{ syslog_server2 }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Configure SNMP community string, port, protocol and trap format + idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SNMP.1.AgentEnable: "Enabled" + SNMP.1.AgentCommunity: "public_community_string" + SNMP.1.TrapFormat: "SNMPv1" + SNMP.1.SNMPProtocol: "All" + SNMP.1.DiscoveryPort: 161 + SNMP.1.AlertPort: 162 + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Enable CSIOR + idrac_redfish_config: + category: Manager + command: SetLifecycleControllerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Set Power Supply Redundancy Policy to A/B Grid Redundant + idrac_redfish_config: + category: Manager + command: SetSystemAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.validation import ( + check_mutually_exclusive, + check_required_arguments +) +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils._text import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def set_manager_attributes(self, command): + + result = {} + required_arg_spec = {'manager_attributes': {'required': True}} + + try: + check_required_arguments(required_arg_spec, self.module.params) + + except TypeError as e: + msg = to_native(e) + self.module.fail_json(msg=msg) + + key = "Attributes" + command_manager_attributes_uri_map = { + "SetManagerAttributes": self.manager_uri, + "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1", + "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1" + } + manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri) + + attributes = self.module.params['manager_attributes'] + manager_attr_name = self.module.params.get('manager_attribute_name') + manager_attr_value = self.module.params.get('manager_attribute_value') + + # manager attributes to update + if manager_attr_name: + attributes.update({manager_attr_name: manager_attr_value}) + + attrs_to_patch = {} + attrs_skipped = {} + + # Search for key entry and extract URI from it + response = self.get_request(self.root_uri + manager_uri + "/" + key) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, + 'msg': "%s: Key %s not found" % (command, key)} + + for attr_name, attr_value in attributes.items(): + # Check if attribute exists + if attr_name not in data[u'Attributes']: + return {'ret': False, + 'msg': "%s: Manager attribute %s not found" % (command, attr_name)} + + # Find out if value is already set to what we want. If yes, exclude + # those attributes + if data[u'Attributes'][attr_name] == attr_value: + attrs_skipped.update({attr_name: attr_value}) + else: + attrs_to_patch.update({attr_name: attr_value}) + + if not attrs_to_patch: + return {'ret': True, 'changed': False, + 'msg': "Manager attributes already set"} + + payload = {"Attributes": attrs_to_patch} + response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)} + + +CATEGORY_COMMANDS_ALL = { + "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes", + "SetSystemAttributes"] +} + +# list of mutually exclusive commands for a category +CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { + "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", + "SetSystemAttributes"]] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list'), + baseuri=dict(required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + manager_attribute_name=dict(default=None), + manager_attribute_value=dict(default=None), + manager_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + resource_id=dict() + ), + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # check for mutually exclusive commands + try: + # check_mutually_exclusive accepts a single list or list of lists that + # are groups of terms that should be mutually exclusive with one another + # and checks that against a dictionary + check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category], + dict.fromkeys(command_list, True)) + + except TypeError as e: + module.fail_json(msg=to_native(e)) + + # Organize by Categories / Commands + + if category == "Manager": + # execute only if we find a Manager resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]: + result = rf_utils.set_manager_attributes(command) + + if any((module.params['manager_attribute_name'], module.params['manager_attribute_value'])): + module.deprecate(msg='Arguments `manager_attribute_name` and ' + '`manager_attribute_value` are deprecated. ' + 'Use `manager_attributes` instead for passing in ' + 'the manager attribute name and value pairs', + version='2.13') + + # Return data back or fail with proper message + if result['ret'] is True: + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_facts.py b/plugins/modules/remote_management/redfish/idrac_redfish_facts.py new file mode 120000 index 0000000000..794ab40206 --- /dev/null +++ b/plugins/modules/remote_management/redfish/idrac_redfish_facts.py @@ -0,0 +1 @@ +idrac_redfish_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py new file mode 100644 index 0000000000..6a67f50105 --- /dev/null +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: idrac_redfish_info +short_description: Gather PowerEdge server information through iDRAC using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to + get information back. + - For use with Dell EMC iDRAC operations that require Redfish OEM extensions + - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(idrac_redfish_info) module no longer returns C(ansible_facts)! +options: + category: + required: true + description: + - Category to execute on iDRAC controller + type: str + command: + required: true + description: + - List of commands to execute on iDRAC controller + - C(GetManagerAttributes) returns the list of dicts containing iDRAC, + LifecycleController and System attributes + type: list + baseuri: + required: true + description: + - Base URI of iDRAC controller + type: str + username: + required: true + description: + - User for authentication with iDRAC controller + type: str + password: + required: true + description: + - Password for authentication with iDRAC controller + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller + default: 10 + type: int + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Get Manager attributes with a default of 20 seconds + idrac_redfish_info: + category: Manager + command: GetManagerAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result + + # Examples to display the value of all or a single iDRAC attribute + - name: Store iDRAC attributes as a fact variable + set_fact: + idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}" + + - name: Display all iDRAC attributes + debug: + var: idrac_attributes + + - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute + debug: + var: idrac_attributes['Syslog.1.SysLogEnable'] + + # Examples to display the value of all or a single LifecycleController attribute + - name: Store LifecycleController attributes as a fact variable + set_fact: + lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}" + + - name: Display LifecycleController attributes + debug: + var: lc_attributes + + - name: Display the value of 'CollectSystemInventoryOnRestart' attribute + debug: + var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] + + # Examples to display the value of all or a single System attribute + - name: Store System attributes as a fact variable + set_fact: + system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}" + + - name: Display System attributes + debug: + var: system_attributes + + - name: Display the value of 'PSRedPolicy' + debug: + var: system_attributes['ServerPwr.1.PSRedPolicy'] + +''' + +RETURN = ''' +msg: + description: different results depending on task + returned: always + type: dict + sample: List of Manager attributes +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils._text import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def get_manager_attributes(self): + result = {} + manager_attributes = [] + properties = ['Attributes', 'Id'] + + response = self.get_request(self.root_uri + self.manager_uri) + + if response['ret'] is False: + return response + data = response['data'] + + # Manager attributes are supported as part of iDRAC OEM extension + # Attributes are supported only on iDRAC9 + try: + for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']: + attributes_uri = members[u'@odata.id'] + + response = self.get_request(self.root_uri + attributes_uri) + if response['ret'] is False: + return response + data = response['data'] + + attributes = {} + for prop in properties: + if prop in data: + attributes[prop] = data.get(prop) + + if attributes: + manager_attributes.append(attributes) + + result['ret'] = True + + except (AttributeError, KeyError) as e: + result['ret'] = False + result['msg'] = "Failed to find attribute/key: " + str(e) + + result["entries"] = manager_attributes + return result + + +CATEGORY_COMMANDS_ALL = { + "Manager": ["GetManagerAttributes"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list'), + baseuri=dict(required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=False + ) + is_old_facts = module._name == 'idrac_redfish_facts' + if is_old_facts: + module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password']} + + # timeout + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + + if category == "Manager": + # execute only if we find a Manager resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "GetManagerAttributes": + result = rf_utils.get_manager_attributes() + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + if is_old_facts: + module.exit_json(ansible_facts=dict(redfish_facts=result)) + else: + module.exit_json(redfish_facts=result) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py new file mode 100644 index 0000000000..bca6578b00 --- /dev/null +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -0,0 +1,552 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action. + - Manages OOB controller ex. reboot, log management. + - Manages OOB controller users ex. add, remove, update. + - Manages system power ex. on, off, graceful and forced reboot. +options: + category: + required: true + description: + - Category to execute on OOB controller + type: str + command: + required: true + description: + - List of commands to execute on OOB controller + type: list + baseuri: + required: true + description: + - Base URI of OOB controller + type: str + username: + required: true + description: + - Username for authentication with OOB controller + type: str + password: + required: true + description: + - Password for authentication with OOB controller + type: str + id: + required: false + aliases: [ account_id ] + description: + - ID of account to delete/modify + type: str + new_username: + required: false + aliases: [ account_username ] + description: + - Username of account to add/delete/modify + type: str + new_password: + required: false + aliases: [ account_password ] + description: + - New password of account to add/modify + type: str + roleid: + required: false + aliases: [ account_roleid ] + description: + - Role of account to add/modify + type: str + bootdevice: + required: false + description: + - bootdevice when setting boot configuration + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller + default: 10 + type: int + uefi_target: + required: false + description: + - UEFI target when bootdevice is "UefiTarget" + type: str + boot_next: + required: false + description: + - BootNext target when bootdevice is "UefiBootNext" + type: str + update_username: + required: false + aliases: [ account_updatename ] + description: + - new update user name for account_username + type: str + account_properties: + required: false + description: + - properties of account service to update + type: dict + resource_id: + required: false + description: + - The ID of the System, Manager or Chassis to modify + type: str + update_image_uri: + required: false + description: + - The URI of the image for the update + type: str + update_protocol: + required: false + description: + - The protocol for the update + type: str + update_targets: + required: false + description: + - The list of target resource URIs to apply the update to + type: list + elements: str + update_creds: + required: false + description: + - The credentials for retrieving the update image + type: dict + suboptions: + username: + required: false + description: + - The username for retrieving the update image + type: str + password: + required: false + description: + - The password for retrieving the update image + type: str + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Restart system power gracefully + redfish_command: + category: Systems + command: PowerGracefulRestart + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set one-time boot device to {{ bootdevice }} + redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" + redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiTarget" + uefi_target: "/0x31/0x33/0x01/0x01" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set one-time boot device to BootNext target of "Boot0001" + redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiBootNext" + boot_next: "Boot0001" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set chassis indicator LED to blink + redfish_command: + category: Chassis + command: IndicatorLedBlink + resource_id: 1U + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Add user + redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + + - name: Add user using new option aliases + redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + account_roleid: "{{ account_roleid }}" + + - name: Delete user + redfish_command: + category: Accounts + command: DeleteUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + + - name: Disable user + redfish_command: + category: Accounts + command: DisableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + + - name: Enable user + redfish_command: + category: Accounts + command: EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + + - name: Add and enable user + redfish_command: + category: Accounts + command: AddUser,EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + + - name: Update user password + redfish_command: + category: Accounts + command: UpdateUserPassword + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + + - name: Update user role + redfish_command: + category: Accounts + command: UpdateUserRole + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + roleid: "{{ roleid }}" + + - name: Update user name + redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_updatename: "{{ account_updatename }}" + + - name: Update user name + redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + update_username: "{{ update_username }}" + + - name: Update AccountService properties + redfish_command: + category: Accounts + command: UpdateAccountServiceProperties + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_properties: + AccountLockoutThreshold: 5 + AccountLockoutDuration: 600 + + - name: Clear Manager Logs with a timeout of 20 seconds + redfish_command: + category: Manager + command: ClearLogs + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + + - name: Clear Sessions + redfish_command: + category: Sessions + command: ClearSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Simple update + redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: https://example.com/myupdate.img + + - name: Simple update with additional options + redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: //example.com/myupdate.img + update_protocol: FTP + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_creds: + username: operator + password: supersecretpwd +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils._text import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", + "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot"], + "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], + "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", + "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", + "UpdateAccountServiceProperties"], + "Sessions": ["ClearSessions"], + "Manager": ["GracefulRestart", "ClearLogs"], + "Update": ["SimpleUpdate"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list'), + baseuri=dict(required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + id=dict(aliases=["account_id"]), + new_username=dict(aliases=["account_username"]), + new_password=dict(aliases=["account_password"], no_log=True), + roleid=dict(aliases=["account_roleid"]), + update_username=dict(type='str', aliases=["account_updatename"]), + account_properties=dict(type='dict', default={}), + bootdevice=dict(), + timeout=dict(type='int', default=10), + uefi_target=dict(), + boot_next=dict(), + resource_id=dict(), + update_image_uri=dict(), + update_protocol=dict(), + update_targets=dict(type='list', elements='str', default=[]), + update_creds=dict( + type='dict', + options=dict( + username=dict(), + password=dict() + ) + ) + ), + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password']} + + # user to add/modify/delete + user = {'account_id': module.params['id'], + 'account_username': module.params['new_username'], + 'account_password': module.params['new_password'], + 'account_roleid': module.params['roleid'], + 'account_updatename': module.params['update_username'], + 'account_properties': module.params['account_properties']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # update options + update_opts = { + 'update_image_uri': module.params['update_image_uri'], + 'update_protocol': module.params['update_protocol'], + 'update_targets': module.params['update_targets'], + 'update_creds': module.params['update_creds'] + } + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Accounts": + ACCOUNTS_COMMANDS = { + "AddUser": rf_utils.add_user, + "EnableUser": rf_utils.enable_user, + "DeleteUser": rf_utils.delete_user, + "DisableUser": rf_utils.disable_user, + "UpdateUserRole": rf_utils.update_user_role, + "UpdateUserPassword": rf_utils.update_user_password, + "UpdateUserName": rf_utils.update_user_name, + "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties + } + + # execute only if we find an Account service resource + result = rf_utils._find_accountservice_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + result = ACCOUNTS_COMMANDS[command](user) + + elif category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if "Power" in command: + result = rf_utils.manage_system_power(command) + elif command == "SetOneTimeBoot": + result = rf_utils.set_one_time_boot_device( + module.params['bootdevice'], + module.params['uefi_target'], + module.params['boot_next']) + + elif category == "Chassis": + result = rf_utils._find_chassis_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] + + # Check if more than one led_command is present + num_led_commands = sum([command in led_commands for command in command_list]) + if num_led_commands > 1: + result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} + else: + for command in command_list: + if command in led_commands: + result = rf_utils.manage_indicator_led(command) + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ClearSessions": + result = rf_utils.clear_sessions() + + elif category == "Manager": + MANAGER_COMMANDS = { + "GracefulRestart": rf_utils.restart_manager_gracefully, + "ClearLogs": rf_utils.clear_logs + } + + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + result = MANAGER_COMMANDS[command]() + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "SimpleUpdate": + result = rf_utils.simple_update(update_opts) + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + changed = result.get('changed', True) + module.exit_json(changed=changed, msg='Action was successful') + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py new file mode 100644 index 0000000000..82abceafeb --- /dev/null +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -0,0 +1,332 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: redfish_config +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + set or update a configuration attribute. + - Manages BIOS configuration settings. + - Manages OOB controller configuration settings. +options: + category: + required: true + description: + - Category to execute on OOB controller + type: str + command: + required: true + description: + - List of commands to execute on OOB controller + type: list + baseuri: + required: true + description: + - Base URI of OOB controller + type: str + username: + required: true + description: + - User for authentication with OOB controller + type: str + password: + required: true + description: + - Password for authentication with OOB controller + type: str + bios_attribute_name: + required: false + description: + - name of BIOS attr to update (deprecated - use bios_attributes instead) + default: 'null' + type: str + bios_attribute_value: + required: false + description: + - value of BIOS attr to update (deprecated - use bios_attributes instead) + default: 'null' + type: str + bios_attributes: + required: false + description: + - dictionary of BIOS attributes to update + default: {} + type: dict + timeout: + description: + - Timeout in seconds for URL requests to OOB controller + default: 10 + type: int + boot_order: + required: false + description: + - list of BootOptionReference strings specifying the BootOrder + default: [] + type: list + network_protocols: + required: false + description: + - setting dict of manager services to update + type: dict + resource_id: + required: false + description: + - The ID of the System, Manager or Chassis to modify + type: str + nic_addr: + required: false + description: + - EthernetInterface Address string on OOB controller + default: 'null' + type: str + nic_config: + required: false + description: + - setting dict of EthernetInterface on OOB controller + type: dict + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Set BootMode to UEFI + redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Uefi" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set multiple BootMode attributes + redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Enable PXE Boot for NIC1 using deprecated options + redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attribute_name: PxeDev1EnDis + bios_attribute_value: Enabled + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set BIOS default settings with a timeout of 20 seconds + redfish_config: + category: Systems + command: SetBiosDefaultSettings + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + + - name: Set boot order + redfish_config: + category: Systems + command: SetBootOrder + boot_order: + - Boot0002 + - Boot0001 + - Boot0000 + - Boot0003 + - Boot0004 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set boot order to the default + redfish_config: + category: Systems + command: SetDefaultBootOrder + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set Manager Network Protocols + redfish_config: + category: Manager + command: SetNetworkProtocols + network_protocols: + SNMP: + ProtocolEnabled: True + Port: 161 + HTTP: + ProtocolEnabled: False + Port: 8080 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set Manager NIC + redfish_config: + category: Manager + command: SetManagerNic + nic_config: + DHCPv4: + DHCPEnabled: False + IPv4StaticAddresses: + Address: 192.168.1.3 + Gateway: 192.168.1.1 + SubnetMask: 255.255.255.0 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils._text import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", + "SetDefaultBootOrder"], + "Manager": ["SetNetworkProtocols", "SetManagerNic"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list'), + baseuri=dict(required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + bios_attribute_name=dict(default='null'), + bios_attribute_value=dict(default='null'), + bios_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + boot_order=dict(type='list', elements='str', default=[]), + network_protocols=dict( + type='dict', + default={} + ), + resource_id=dict(), + nic_addr=dict(default='null'), + nic_config=dict( + type='dict', + default={} + ) + ), + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password']} + + # timeout + timeout = module.params['timeout'] + + # BIOS attributes to update + bios_attributes = module.params['bios_attributes'] + if module.params['bios_attribute_name'] != 'null': + bios_attributes[module.params['bios_attribute_name']] = module.params[ + 'bios_attribute_value'] + module.deprecate(msg='The bios_attribute_name/bios_attribute_value ' + 'options are deprecated. Use bios_attributes instead', + version='2.14') + + # boot order + boot_order = module.params['boot_order'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # manager nic + nic_addr = module.params['nic_addr'] + nic_config = module.params['nic_config'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetBiosDefaultSettings": + result = rf_utils.set_bios_default_settings() + elif command == "SetBiosAttributes": + result = rf_utils.set_bios_attributes(bios_attributes) + elif command == "SetBootOrder": + result = rf_utils.set_boot_order(boot_order) + elif command == "SetDefaultBootOrder": + result = rf_utils.set_default_boot_order() + + elif category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetNetworkProtocols": + result = rf_utils.set_network_protocols(module.params['network_protocols']) + elif command == "SetManagerNic": + result = rf_utils.set_manager_nic(nic_addr, nic_config) + + # Return data back or fail with proper message + if result['ret'] is True: + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/redfish/redfish_facts.py b/plugins/modules/remote_management/redfish/redfish_facts.py new file mode 120000 index 0000000000..ef039d9a5d --- /dev/null +++ b/plugins/modules/remote_management/redfish/redfish_facts.py @@ -0,0 +1 @@ +redfish_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py new file mode 100644 index 0000000000..fdbc7944e7 --- /dev/null +++ b/plugins/modules/remote_management/redfish/redfish_info.py @@ -0,0 +1,469 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: redfish_info +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + get information back. + - Information retrieved is placed in a location specified by the user. + - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(redfish_info) module no longer returns C(ansible_facts)! +options: + category: + required: false + description: + - List of categories to execute on OOB controller + default: ['Systems'] + type: list + command: + required: false + description: + - List of commands to execute on OOB controller + type: list + baseuri: + required: true + description: + - Base URI of OOB controller + type: str + username: + required: true + description: + - User for authentication with OOB controller + type: str + password: + required: true + description: + - Password for authentication with OOB controller + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller + default: 10 + type: int + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Get CPU inventory + redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - debug: + msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" + + - name: Get CPU model + redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - debug: + msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" + + - name: Get memory inventory + redfish_info: + category: Systems + command: GetMemoryInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Get fan inventory with a timeout of 20 seconds + redfish_info: + category: Chassis + command: GetFanInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result + + - name: Get Virtual Media information + redfish_info: + category: Manager + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" + + - name: Get Volume Inventory + redfish_info: + category: Systems + command: GetVolumeInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - debug: + msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" + + - name: Get Session information + redfish_info: + category: Sessions + command: GetSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - debug: + msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" + + - name: Get default inventory information + redfish_info: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - debug: + msg: "{{ result.redfish_facts | to_nice_json }}" + + - name: Get several inventories + redfish_info: + category: Systems + command: GetNicInventory,GetBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get default system inventory and user information + redfish_info: + category: Systems,Accounts + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get default system, user and firmware information + redfish_info: + category: ["Systems", "Accounts", "Update"] + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get Manager NIC inventory information + redfish_info: + category: Manager + command: GetManagerNicInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get boot override information + redfish_info: + category: Systems + command: GetBootOverride + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get chassis inventory + redfish_info: + category: Chassis + command: GetChassisInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get all information available in the Manager category + redfish_info: + category: Manager + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get firmware update capability information + redfish_info: + category: Update + command: GetFirmwareUpdateCapabilities + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get firmware inventory + redfish_info: + category: Update + command: GetFirmwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get software inventory + redfish_info: + category: Update + command: GetSoftwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get Manager Services + redfish_info: + category: Manager + command: GetNetworkProtocols + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get all information available in all categories + redfish_info: + category: all + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get system health report + redfish_info: + category: Systems + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get chassis health report + redfish_info: + category: Chassis + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get manager health report + redfish_info: + category: Manager + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +result: + description: different results depending on task + returned: always + type: dict + sample: List of CPUs on system +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils + +CATEGORY_COMMANDS_ALL = { + "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", + "GetMemoryInventory", "GetNicInventory", "GetHealthReport", + "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", + "GetBiosAttributes", "GetBootOrder", "GetBootOverride"], + "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", + "GetChassisThermals", "GetChassisInventory", "GetHealthReport"], + "Accounts": ["ListUsers"], + "Sessions": ["GetSessions"], + "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"], + "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", + "GetHealthReport"], +} + +CATEGORY_COMMANDS_DEFAULT = { + "Systems": "GetSystemInventory", + "Chassis": "GetFanInventory", + "Accounts": "ListUsers", + "Update": "GetFirmwareInventory", + "Sessions": "GetSessions", + "Manager": "GetManagerNicInventory" +} + + +def main(): + result = {} + category_list = [] + module = AnsibleModule( + argument_spec=dict( + category=dict(type='list', default=['Systems']), + command=dict(type='list'), + baseuri=dict(required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=False + ) + is_old_facts = module._name == 'redfish_facts' + if is_old_facts: + module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password']} + + # timeout + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module) + + # Build Category list + if "all" in module.params['category']: + for entry in CATEGORY_COMMANDS_ALL: + category_list.append(entry) + else: + # one or more categories specified + category_list = module.params['category'] + + for category in category_list: + command_list = [] + # Build Command list for each Category + if category in CATEGORY_COMMANDS_ALL: + if not module.params['command']: + # True if we don't specify a command --> use default + command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) + elif "all" in module.params['command']: + for entry in range(len(CATEGORY_COMMANDS_ALL[category])): + command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) + # one or more commands + else: + command_list = module.params['command'] + # Verify that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg="Invalid Command: %s" % cmd) + else: + # Fail if even one category given is invalid + module.fail_json(msg="Invalid Category: %s" % category) + + # Organize by Categories / Commands + if category == "Systems": + # execute only if we find a Systems resource + resource = rf_utils._find_systems_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSystemInventory": + result["system"] = rf_utils.get_multi_system_inventory() + elif command == "GetCpuInventory": + result["cpu"] = rf_utils.get_multi_cpu_inventory() + elif command == "GetMemoryInventory": + result["memory"] = rf_utils.get_multi_memory_inventory() + elif command == "GetNicInventory": + result["nic"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetStorageControllerInventory": + result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory() + elif command == "GetDiskInventory": + result["disk"] = rf_utils.get_multi_disk_inventory() + elif command == "GetVolumeInventory": + result["volume"] = rf_utils.get_multi_volume_inventory() + elif command == "GetBiosAttributes": + result["bios_attribute"] = rf_utils.get_multi_bios_attributes() + elif command == "GetBootOrder": + result["boot_order"] = rf_utils.get_multi_boot_order() + elif command == "GetBootOverride": + result["boot_override"] = rf_utils.get_multi_boot_override() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_system_health_report() + + elif category == "Chassis": + # execute only if we find Chassis resource + resource = rf_utils._find_chassis_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFanInventory": + result["fan"] = rf_utils.get_fan_inventory() + elif command == "GetPsuInventory": + result["psu"] = rf_utils.get_psu_inventory() + elif command == "GetChassisThermals": + result["thermals"] = rf_utils.get_chassis_thermals() + elif command == "GetChassisPower": + result["chassis_power"] = rf_utils.get_chassis_power() + elif command == "GetChassisInventory": + result["chassis"] = rf_utils.get_chassis_inventory() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_chassis_health_report() + + elif category == "Accounts": + # execute only if we find an Account service resource + resource = rf_utils._find_accountservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ListUsers": + result["user"] = rf_utils.list_users() + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFirmwareInventory": + result["firmware"] = rf_utils.get_firmware_inventory() + elif command == "GetSoftwareInventory": + result["software"] = rf_utils.get_software_inventory() + elif command == "GetFirmwareUpdateCapabilities": + result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities() + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSessions": + result["session"] = rf_utils.get_sessions() + + elif category == "Manager": + # execute only if we find a Manager service resource + resource = rf_utils._find_managers_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetManagerNicInventory": + result["manager_nics"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetVirtualMedia": + result["virtual_media"] = rf_utils.get_multi_virtualmedia() + elif command == "GetLogs": + result["log"] = rf_utils.get_logs() + elif command == "GetNetworkProtocols": + result["network_protocols"] = rf_utils.get_network_protocols() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_manager_health_report() + + # Return data back + if is_old_facts: + module.exit_json(ansible_facts=dict(redfish_facts=result)) + else: + module.exit_json(redfish_facts=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/stacki/stacki_host.py b/plugins/modules/remote_management/stacki/stacki_host.py new file mode 100644 index 0000000000..4ec65b67ed --- /dev/null +++ b/plugins/modules/remote_management/stacki/stacki_host.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Hugh Ma +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: stacki_host +short_description: Add or remove host to stacki front-end +description: + - Use this module to add or remove hosts to a stacki front-end via API. + - U(https://github.com/StackIQ/stacki) +options: + name: + description: + - Name of the host to be added to Stacki. + required: True + stacki_user: + description: + - Username for authenticating with Stacki API, but if not + specified, the environment variable C(stacki_user) is used instead. + required: True + stacki_password: + description: + - Password for authenticating with Stacki API, but if not + specified, the environment variable C(stacki_password) is used instead. + required: True + stacki_endpoint: + description: + - URL for the Stacki API Endpoint. + required: True + prim_intf_mac: + description: + - MAC Address for the primary PXE boot network interface. + prim_intf_ip: + description: + - IP Address for the primary network interface. + prim_intf: + description: + - Name of the primary network interface. + force_install: + description: + - Set value to True to force node into install state if it already exists in stacki. + type: bool +author: +- Hugh Ma (@bbyhuy) +''' + +EXAMPLES = ''' +- name: Add a host named test-1 + stacki_host: + name: test-1 + stacki_user: usr + stacki_password: pwd + stacki_endpoint: url + prim_intf_mac: mac_addr + prim_intf_ip: x.x.x.x + prim_intf: eth0 + +- name: Remove a host named test-1 + stacki_host: + name: test-1 + stacki_user: usr + stacki_password: pwd + stacki_endpoint: url + state: absent +''' + +RETURN = ''' +changed: + description: response to whether or not the api call completed successfully + returned: always + type: bool + sample: true + +stdout: + description: the set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: the value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +''' + +import json +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +class StackiHost(object): + + def __init__(self, module): + self.module = module + self.hostname = module.params['name'] + self.rack = module.params['rack'] + self.rank = module.params['rank'] + self.appliance = module.params['appliance'] + self.prim_intf = module.params['prim_intf'] + self.prim_intf_ip = module.params['prim_intf_ip'] + self.network = module.params['network'] + self.prim_intf_mac = module.params['prim_intf_mac'] + self.endpoint = module.params['stacki_endpoint'] + + auth_creds = {'USERNAME': module.params['stacki_user'], + 'PASSWORD': module.params['stacki_password']} + + # Get Initial CSRF + cred_a = self.do_request(self.module, self.endpoint, method="GET") + cookie_a = cred_a.headers.get('Set-Cookie').split(';') + init_csrftoken = None + for c in cookie_a: + if "csrftoken" in c: + init_csrftoken = c.replace("csrftoken=", "") + init_csrftoken = init_csrftoken.rstrip("\r\n") + break + + # Make Header Dictionary with initial CSRF + header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken, + 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')} + + # Endpoint to get final authentication header + login_endpoint = self.endpoint + "/login" + + # Get Final CSRF and Session ID + login_req = self.do_request(self.module, login_endpoint, headers=header, + payload=urlencode(auth_creds), method='POST') + + cookie_f = login_req.headers.get('Set-Cookie').split(';') + csrftoken = None + for f in cookie_f: + if "csrftoken" in f: + csrftoken = f.replace("csrftoken=", "") + if "sessionid" in f: + sessionid = c.split("sessionid=", 1)[-1] + sessionid = sessionid.rstrip("\r\n") + + self.header = {'csrftoken': csrftoken, + 'X-CSRFToken': csrftoken, + 'sessionid': sessionid, + 'Content-type': 'application/json', + 'Cookie': login_req.headers.get('Set-Cookie')} + + def do_request(self, module, url, payload=None, headers=None, method=None): + res, info = fetch_url(module, url, data=payload, headers=headers, method=method) + + if info['status'] != 200: + self.module.fail_json(changed=False, msg=info['msg']) + + return res + + def stack_check_host(self): + res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") + + if self.hostname in res.read(): + return True + else: + return False + + def stack_sync(self): + self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") + self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") + + def stack_force_install(self, result): + data = dict() + changed = False + + data['cmd'] = "set host boot {0} action=install" \ + .format(self.hostname) + self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + changed = True + + self.stack_sync() + + result['changed'] = changed + result['stdout'] = "api call successful".rstrip("\r\n") + + def stack_add(self, result): + data = dict() + changed = False + + data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\ + .format(self.hostname, self.rack, self.rank, self.appliance) + self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + + self.stack_sync() + + result['changed'] = changed + result['stdout'] = "api call successful".rstrip("\r\n") + + def stack_remove(self, result): + data = dict() + + data['cmd'] = "remove host {0}"\ + .format(self.hostname) + self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + + self.stack_sync() + + result['changed'] = True + result['stdout'] = "api call successful".rstrip("\r\n") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type='str', required=True), + rack=dict(type='int', default=0), + rank=dict(type='int', default=0), + appliance=dict(type='str', default='backend'), + prim_intf=dict(type='str'), + prim_intf_ip=dict(type='str'), + network=dict(type='str', default='private'), + prim_intf_mac=dict(type='str'), + stacki_user=dict(type='str', required=True, default=os.environ.get('stacki_user')), + stacki_password=dict(type='str', required=True, default=os.environ.get('stacki_password'), no_log=True), + stacki_endpoint=dict(type='str', required=True, default=os.environ.get('stacki_endpoint')), + force_install=dict(type='bool', default=False), + ), + supports_check_mode=False, + ) + + result = {'changed': False} + missing_params = list() + + stacki = StackiHost(module) + host_exists = stacki.stack_check_host() + + # If state is present, but host exists, need force_install flag to put host back into install state + if module.params['state'] == 'present' and host_exists and module.params['force_install']: + stacki.stack_force_install(result) + # If state is present, but host exists, and force_install and false, do nothing + elif module.params['state'] == 'present' and host_exists and not module.params['force_install']: + result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\ + .format(module.params['name']) + # Otherwise, state is present, but host doesn't exists, require more params to add host + elif module.params['state'] == 'present' and not host_exists: + for param in ['appliance', 'prim_intf', + 'prim_intf_ip', 'network', 'prim_intf_mac']: + if not module.params[param]: + missing_params.append(param) + if len(missing_params) > 0: + module.fail_json(msg="missing required arguments: {0}".format(missing_params)) + + stacki.stack_add(result) + # If state is absent, and host exists, lets remove it. + elif module.params['state'] == 'absent' and host_exists: + stacki.stack_remove(result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/wakeonlan.py b/plugins/modules/remote_management/wakeonlan.py new file mode 100644 index 0000000000..2287168a67 --- /dev/null +++ b/plugins/modules/remote_management/wakeonlan.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: wakeonlan +short_description: Send a magic Wake-on-LAN (WoL) broadcast packet +description: +- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets. +options: + mac: + description: + - MAC address to send Wake-on-LAN broadcast packet for. + required: true + broadcast: + description: + - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. + default: 255.255.255.255 + port: + description: + - UDP port to use for magic Wake-on-LAN packet. + default: 7 +todo: + - Add arping support to check whether the system is up (before and after) + - Enable check-mode support (when we have arping support) + - Does not have SecureOn password support +notes: + - This module sends a magic packet, without knowing whether it worked + - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) + - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first). +seealso: +- module: community.windows.win_wakeonlan +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 + wakeonlan: + mac: '00:00:5E:00:53:66' + broadcast: 192.0.2.23 + delegate_to: localhost + +- wakeonlan: + mac: 00:00:5E:00:53:66 + port: 9 + delegate_to: localhost +''' + +RETURN = r''' +# Default return values +''' +import socket +import struct +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def wakeonlan(module, mac, broadcast, port): + """ Send a magic Wake-on-LAN packet. """ + + mac_orig = mac + + # Remove possible separator from MAC address + if len(mac) == 12 + 5: + mac = mac.replace(mac[2], '') + + # If we don't end up with 12 hexadecimal characters, fail + if len(mac) != 12: + module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig) + + # Test if it converts to an integer, otherwise fail + try: + int(mac, 16) + except ValueError: + module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig) + + # Create payload for magic packet + data = b'' + padding = ''.join(['FFFFFFFFFFFF', mac * 20]) + for i in range(0, len(padding), 2): + data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))]) + + # Broadcast payload to network + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + + if not module.check_mode: + + try: + sock.sendto(data, (broadcast, port)) + except socket.error as e: + sock.close() + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + sock.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + mac=dict(type='str', required=True), + broadcast=dict(type='str', default='255.255.255.255'), + port=dict(type='int', default=7), + ), + supports_check_mode=True, + ) + + mac = module.params['mac'] + broadcast = module.params['broadcast'] + port = module.params['port'] + + wakeonlan(module, mac, broadcast, port) + + module.exit_json(changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_access_key.py b/plugins/modules/source_control/bitbucket/bitbucket_access_key.py new file mode 100644 index 0000000000..95968404b4 --- /dev/null +++ b/plugins/modules/source_control/bitbucket/bitbucket_access_key.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community', +} + +DOCUMENTATION = r''' +--- +module: bitbucket_access_key +short_description: Manages Bitbucket repository access keys +description: + - Manages Bitbucket repository access keys (also called deploy keys). +author: + - Evgeniy Krysanov (@catcombo) +options: + client_id: + description: + - The OAuth consumer key. + - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. + type: str + client_secret: + description: + - The OAuth consumer secret. + - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. + type: str + repository: + description: + - The repository name. + type: str + required: true + username: + description: + - The repository owner. + type: str + required: true + key: + description: + - The SSH public key. + type: str + label: + description: + - The key label. + type: str + required: true + state: + description: + - Indicates desired state of the access key. + type: str + required: true + choices: [ absent, present ] +notes: + - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. + - Bitbucket OAuth consumer should have permissions to read and administrate account repositories. + - Check mode is supported. +''' + +EXAMPLES = r''' +- name: Create access key + bitbucket_access_key: + repository: 'bitbucket-repo' + username: bitbucket_username + key: '{{lookup("file", "bitbucket.pub") }}' + label: 'Bitbucket' + state: present + +- name: Delete access key + bitbucket_access_key: + repository: bitbucket-repo + username: bitbucket_username + label: Bitbucket + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'required_key': '`key` is required when the `state` is `present`', + 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', + 'invalid_username_or_repo': 'Invalid `repository` or `username`', + 'invalid_key': 'Invalid SSH key or key is already in use', +} + +BITBUCKET_API_ENDPOINTS = { + 'deploy-key-list': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, + 'deploy-key-detail': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_deploy_key(module, bitbucket): + """ + Search for an existing deploy key on Bitbucket + with the label specified in module param `label` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing deploy key or None if not found + :rtype: dict or None + + Return example:: + + { + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" + }, + "html": { + "href": "https://bitbucket.org/mleu/test" + }, + "avatar": { + "href": "..." + } + }, + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" + }, + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" + } + }, + } + """ + content = { + 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ) + } + + # Look through the all response pages in search of deploy key we need + while 'next' in content: + info, content = bitbucket.request( + api_url=content['next'], + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_username_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) + + res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None) + + if res is not None: + return res + + return None + + +def create_deploy_key(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'key': module.params['key'], + 'label': module.params['label'], + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_username_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] == 400: + module.fail_json(msg=error_messages['invalid_key']) + + if info['status'] != 200: + module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format( + label=module.params['label'], + info=info, + )) + + +def delete_deploy_key(module, bitbucket, key_id): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + key_id=key_id, + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_username_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format( + label=module.params['label'], + info=info, + )) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + username=dict(type='str', required=True), + key=dict(type='str'), + label=dict(type='str', required=True), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + bitbucket = BitbucketHelper(module) + + key = module.params['key'] + state = module.params['state'] + + # Check parameters + if (key is None) and (state == 'present'): + module.fail_json(msg=error_messages['required_key']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing deploy key (if any) + existing_deploy_key = get_existing_deploy_key(module, bitbucket) + changed = False + + # Create new deploy key in case it doesn't exists + if not existing_deploy_key and (state == 'present'): + if not module.check_mode: + create_deploy_key(module, bitbucket) + changed = True + + # Update deploy key if the old value does not match the new one + elif existing_deploy_key and (state == 'present'): + if not key.startswith(existing_deploy_key.get('key')): + if not module.check_mode: + # Bitbucket doesn't support update key for the same label, + # so we need to delete the old one first + delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + create_deploy_key(module, bitbucket) + changed = True + + # Delete deploy key + elif existing_deploy_key and (state == 'absent'): + if not module.check_mode: + delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py new file mode 100644 index 0000000000..52ad599274 --- /dev/null +++ b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community', +} + +DOCUMENTATION = r''' +--- +module: bitbucket_pipeline_key_pair +short_description: Manages Bitbucket pipeline SSH key pair +description: + - Manages Bitbucket pipeline SSH key pair. +author: + - Evgeniy Krysanov (@catcombo) +options: + client_id: + description: + - OAuth consumer key. + - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. + type: str + client_secret: + description: + - OAuth consumer secret. + - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. + type: str + repository: + description: + - The repository name. + type: str + required: true + username: + description: + - The repository owner. + type: str + required: true + public_key: + description: + - The public key. + type: str + private_key: + description: + - The private key. + type: str + state: + description: + - Indicates desired state of the key pair. + type: str + required: true + choices: [ absent, present ] +notes: + - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. + - Check mode is supported. +''' + +EXAMPLES = r''' +- name: Create or update SSH key pair + bitbucket_pipeline_key_pair: + repository: 'bitbucket-repo' + username: bitbucket_username + public_key: '{{lookup("file", "bitbucket.pub") }}' + private_key: '{{lookup("file", "bitbucket") }}' + state: present + +- name: Remove SSH key pair + bitbucket_pipeline_key_pair: + repository: bitbucket-repo + username: bitbucket_username + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'invalid_params': 'Account, repository or SSH key pair was not found', + 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`', +} + +BITBUCKET_API_ENDPOINTS = { + 'ssh-key-pair': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_ssh_key_pair(module, bitbucket): + """ + Retrieves an existing ssh key pair from repository + specified in module param `repository` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing key pair or None if not found + :rtype: dict or None + + Return example:: + + { + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT", + "type": "pipeline_ssh_key_pair" + } + """ + api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ) + + info, content = bitbucket.request( + api_url=api_url, + method='GET', + ) + + if info['status'] == 404: + # Account, repository or SSH key pair was not found. + return None + + return content + + +def update_ssh_key_pair(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ), + method='PUT', + data={ + 'private_key': module.params['private_key'], + 'public_key': module.params['public_key'], + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 200: + module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info)) + + +def delete_ssh_key_pair(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info)) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + username=dict(type='str', required=True), + public_key=dict(type='str'), + private_key=dict(type='str', no_log=True), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + bitbucket = BitbucketHelper(module) + + state = module.params['state'] + public_key = module.params['public_key'] + private_key = module.params['private_key'] + + # Check parameters + if ((public_key is None) or (private_key is None)) and (state == 'present'): + module.fail_json(msg=error_messages['required_keys']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing ssh key + key_pair = get_existing_ssh_key_pair(module, bitbucket) + changed = False + + # Create or update key pair + if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'): + if not module.check_mode: + update_ssh_key_pair(module, bitbucket) + changed = True + + # Delete key pair + elif key_pair and (state == 'absent'): + if not module.check_mode: + delete_ssh_key_pair(module, bitbucket) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py new file mode 100644 index 0000000000..27d78f8450 --- /dev/null +++ b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community', +} + +DOCUMENTATION = r''' +--- +module: bitbucket_pipeline_known_host +short_description: Manages Bitbucket pipeline known hosts +description: + - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. + - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually. +author: + - Evgeniy Krysanov (@catcombo) +requirements: + - paramiko +options: + client_id: + description: + - The OAuth consumer key. + - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. + type: str + client_secret: + description: + - The OAuth consumer secret. + - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. + type: str + repository: + description: + - The repository name. + type: str + required: true + username: + description: + - The repository owner. + type: str + required: true + name: + description: + - The FQDN of the known host. + type: str + required: true + key: + description: + - The public key. + type: str + state: + description: + - Indicates desired state of the record. + type: str + required: true + choices: [ absent, present ] +notes: + - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. + - Check mode is supported. +''' + +EXAMPLES = r''' +- name: Create known hosts from the list + bitbucket_pipeline_known_host: + repository: 'bitbucket-repo' + username: bitbucket_username + name: '{{ item }}' + state: present + with_items: + - bitbucket.org + - example.com + +- name: Remove known host + bitbucket_pipeline_known_host: + repository: bitbucket-repo + username: bitbucket_username + name: bitbucket.org + state: absent + +- name: Specify public key file + bitbucket_pipeline_known_host: + repository: bitbucket-repo + username: bitbucket_username + name: bitbucket.org + key: '{{lookup("file", "bitbucket.pub") }}' + state: absent +''' + +RETURN = r''' # ''' + +import socket + +try: + import paramiko + HAS_PARAMIKO = True +except ImportError: + HAS_PARAMIKO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'invalid_params': 'Account or repository was not found', + 'unknown_key_type': 'Public key type is unknown', +} + +BITBUCKET_API_ENDPOINTS = { + 'known-host-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL, + 'known-host-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_known_host(module, bitbucket): + """ + Search for a host in Bitbucket pipelines known hosts + with the name specified in module param `name` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing host or None if not found + :rtype: dict or None + + Return example:: + + { + 'type': 'pipeline_known_host', + 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}' + 'hostname': 'bitbucket.org', + 'public_key': { + 'type': 'pipeline_ssh_public_key', + 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', + 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', + 'key_type': 'ssh-rsa', + 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' + }, + } + """ + content = { + 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ) + } + + # Look through all response pages in search of hostname we need + while 'next' in content: + info, content = bitbucket.request( + api_url=content['next'], + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg='Invalid `repository` or `username`.') + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info)) + + host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None) + + if host is not None: + return host + + return None + + +def get_host_key(module, hostname): + """ + Fetches public key for specified host + + :param module: instance of the :class:`AnsibleModule` + :param hostname: host name + :return: key type and key content + :rtype: tuple + + Return example:: + + ( + 'ssh-rsa', + 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==', + ) + """ + try: + sock = socket.socket() + sock.connect((hostname, 22)) + except socket.error: + module.fail_json(msg='Error opening socket to {0}'.format(hostname)) + + try: + trans = paramiko.transport.Transport(sock) + trans.start_client() + host_key = trans.get_remote_server_key() + except paramiko.SSHException: + module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname)) + + trans.close() + sock.close() + + key_type = host_key.get_name() + key = host_key.get_base64() + + return key_type, key + + +def create_known_host(module, bitbucket): + hostname = module.params['name'] + key_param = module.params['key'] + + if key_param is None: + key_type, key = get_host_key(module, hostname) + elif ' ' in key_param: + key_type, key = key_param.split(' ', 1) + else: + module.fail_json(msg=error_messages['unknown_key_type']) + + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'hostname': hostname, + 'public_key': { + 'key_type': key_type, + 'key': key, + } + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 201: + module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format( + hostname=module.params['hostname'], + info=info, + )) + + +def delete_known_host(module, bitbucket, known_host_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + known_host_uuid=known_host_uuid, + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format( + hostname=module.params['name'], + info=info, + )) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + username=dict(type='str', required=True), + name=dict(type='str', required=True), + key=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + if (module.params['key'] is None) and (not HAS_PARAMIKO): + module.fail_json(msg='`paramiko` package not found, please install it.') + + bitbucket = BitbucketHelper(module) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing known host + existing_host = get_existing_known_host(module, bitbucket) + state = module.params['state'] + changed = False + + # Create new host in case it doesn't exists + if not existing_host and (state == 'present'): + if not module.check_mode: + create_known_host(module, bitbucket) + changed = True + + # Delete host + elif existing_host and (state == 'absent'): + if not module.check_mode: + delete_known_host(module, bitbucket, existing_host['uuid']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py new file mode 100644 index 0000000000..6d1a0ee490 --- /dev/null +++ b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community', +} + +DOCUMENTATION = r''' +--- +module: bitbucket_pipeline_variable +short_description: Manages Bitbucket pipeline variables +description: + - Manages Bitbucket pipeline variables. +author: + - Evgeniy Krysanov (@catcombo) +options: + client_id: + description: + - The OAuth consumer key. + - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. + type: str + client_secret: + description: + - The OAuth consumer secret. + - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. + type: str + repository: + description: + - The repository name. + type: str + required: true + username: + description: + - The repository owner. + type: str + required: true + name: + description: + - The pipeline variable name. + type: str + required: true + value: + description: + - The pipeline variable value. + type: str + secured: + description: + - Whether to encrypt the variable value. + type: bool + default: no + state: + description: + - Indicates desired state of the variable. + type: str + required: true + choices: [ absent, present ] +notes: + - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. + - Check mode is supported. + - For secured values return parameter C(changed) is always C(True). +''' + +EXAMPLES = r''' +- name: Create or update pipeline variables from the list + bitbucket_pipeline_variable: + repository: 'bitbucket-repo' + username: bitbucket_username + name: '{{ item.name }}' + value: '{{ item.value }}' + secured: '{{ item.secured }}' + state: present + with_items: + - { name: AWS_ACCESS_KEY, value: ABCD1234 } + - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True } + +- name: Remove pipeline variable + bitbucket_pipeline_variable: + repository: bitbucket-repo + username: bitbucket_username + name: AWS_ACCESS_KEY + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'required_value': '`value` is required when the `state` is `present`', +} + +BITBUCKET_API_ENDPOINTS = { + 'pipeline-variable-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL, + 'pipeline-variable-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_pipeline_variable(module, bitbucket): + """ + Search for a pipeline variable + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing variable or None if not found + :rtype: dict or None + + Return example:: + + { + 'name': 'AWS_ACCESS_OBKEY_ID', + 'value': 'x7HU80-a2', + 'type': 'pipeline_variable', + 'secured': False, + 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}' + } + + The `value` key in dict is absent in case of secured variable. + """ + content = { + 'next': BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ) + } + + # Look through the all response pages in search of variable we need + while 'next' in content: + info, content = bitbucket.request( + api_url=content['next'], + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg='Invalid `repository` or `username`.') + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info)) + + var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None) + + if var is not None: + var['name'] = var.pop('key') + return var + + return None + + +def create_pipeline_variable(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'key': module.params['name'], + 'value': module.params['value'], + 'secured': module.params['secured'], + }, + ) + + if info['status'] != 201: + module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +def update_pipeline_variable(module, bitbucket, variable_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + variable_uuid=variable_uuid, + ), + method='PUT', + data={ + 'value': module.params['value'], + 'secured': module.params['secured'], + }, + ) + + if info['status'] != 200: + module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +def delete_pipeline_variable(module, bitbucket, variable_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( + username=module.params['username'], + repo_slug=module.params['repository'], + variable_uuid=variable_uuid, + ), + method='DELETE', + ) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + username=dict(type='str', required=True), + name=dict(type='str', required=True), + value=dict(type='str'), + secured=dict(type='bool', default=False), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + bitbucket = BitbucketHelper(module) + + value = module.params['value'] + state = module.params['state'] + secured = module.params['secured'] + + # Check parameters + if (value is None) and (state == 'present'): + module.fail_json(msg=error_messages['required_value']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing pipeline variable (if any) + existing_variable = get_existing_pipeline_variable(module, bitbucket) + changed = False + + # Create new variable in case it doesn't exists + if not existing_variable and (state == 'present'): + if not module.check_mode: + create_pipeline_variable(module, bitbucket) + changed = True + + # Update variable if it is secured or the old value does not match the new one + elif existing_variable and (state == 'present'): + if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value): + if not module.check_mode: + update_pipeline_variable(module, bitbucket, existing_variable['uuid']) + changed = True + + # Delete variable + elif existing_variable and (state == 'absent'): + if not module.check_mode: + delete_pipeline_variable(module, bitbucket, existing_variable['uuid']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/bzr.py b/plugins/modules/source_control/bzr.py new file mode 100644 index 0000000000..e9aa8750ed --- /dev/null +++ b/plugins/modules/source_control/bzr.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, André Paramés +# Based on the Git module by Michael DeHaan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: bzr +author: +- André Paramés (@andreparames) +short_description: Deploy software (or files) from bzr branches +description: + - Manage I(bzr) branches to deploy files or software. +options: + name: + description: + - SSH or HTTP protocol address of the parent branch. + aliases: [ parent ] + required: yes + dest: + description: + - Absolute path of where the branch should be cloned to. + required: yes + version: + description: + - What version of the branch to clone. This can be the + bzr revno or revid. + default: head + force: + description: + - If C(yes), any modified files in the working + tree will be discarded. Before 1.9 the default + value was C(yes). + type: bool + default: 'no' + executable: + description: + - Path to bzr executable to use. If not supplied, + the normal mechanism for resolving binary paths will be used. +''' + +EXAMPLES = ''' +# Example bzr checkout from Ansible Playbooks +- bzr: + name: bzr+ssh://foosball.example.org/path/to/branch + dest: /srv/checkout + version: 22 +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class Bzr(object): + def __init__(self, module, parent, dest, version, bzr_path): + self.module = module + self.parent = parent + self.dest = dest + self.version = version + self.bzr_path = bzr_path + + def _command(self, args_list, cwd=None, **kwargs): + (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) + return (rc, out, err) + + def get_version(self): + '''samples the version of the bzr branch''' + + cmd = "%s revno" % self.bzr_path + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + revno = stdout.strip() + return revno + + def clone(self): + '''makes a new bzr branch if it does not already exist''' + dest_dirname = os.path.dirname(self.dest) + try: + os.makedirs(dest_dirname) + except Exception: + pass + if self.version.lower() != 'head': + args_list = ["branch", "-r", self.version, self.parent, self.dest] + else: + args_list = ["branch", self.parent, self.dest] + return self._command(args_list, check_rc=True, cwd=dest_dirname) + + def has_local_mods(self): + + cmd = "%s status -S" % self.bzr_path + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + lines = stdout.splitlines() + + lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + return len(lines) > 0 + + def reset(self, force): + ''' + Resets the index and working tree to head. + Discards any changes to tracked files in the working + tree since that commit. + ''' + if not force and self.has_local_mods(): + self.module.fail_json(msg="Local modifications exist in branch (force=no).") + return self._command(["revert"], check_rc=True, cwd=self.dest) + + def fetch(self): + '''updates branch from remote sources''' + if self.version.lower() != 'head': + (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) + else: + (rc, out, err) = self._command(["pull"], cwd=self.dest) + if rc != 0: + self.module.fail_json(msg="Failed to pull") + return (rc, out, err) + + def switch_version(self): + '''once pulled, switch to a particular revno or revid''' + if self.version.lower() != 'head': + args_list = ["revert", "-r", self.version] + else: + args_list = ["revert"] + return self._command(args_list, check_rc=True, cwd=self.dest) + + +# =========================================== + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(type='path', required=True), + name=dict(type='str', required=True, aliases=['parent']), + version=dict(type='str', default='head'), + force=dict(type='bool', default='no'), + executable=dict(type='str'), + ) + ) + + dest = module.params['dest'] + parent = module.params['name'] + version = module.params['version'] + force = module.params['force'] + bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) + + bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') + + rc, out, err = (0, None, None) + + bzr = Bzr(module, parent, dest, version, bzr_path) + + # if there is no bzr configuration, do a branch operation + # else pull and switch the version + before = None + local_mods = False + if not os.path.exists(bzrconfig): + (rc, out, err) = bzr.clone() + + else: + # else do a pull + local_mods = bzr.has_local_mods() + before = bzr.get_version() + (rc, out, err) = bzr.reset(force) + if rc != 0: + module.fail_json(msg=err) + (rc, out, err) = bzr.fetch() + if rc != 0: + module.fail_json(msg=err) + + # switch to version specified regardless of whether + # we cloned or pulled + (rc, out, err) = bzr.switch_version() + + # determine if we changed anything + after = bzr.get_version() + changed = False + + if before != after or local_mods: + changed = True + + module.exit_json(changed=changed, before=before, after=after) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/git_config.py b/plugins/modules/source_control/git_config.py new file mode 100644 index 0000000000..59009c8212 --- /dev/null +++ b/plugins/modules/source_control/git_config.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Marius Gedminas +# (c) 2016, Matthew Gamble +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: git_config +author: + - Matthew Gamble (@djmattyg007) + - Marius Gedminas (@mgedmin) +requirements: ['git'] +short_description: Read and write git configuration +description: + - The C(git_config) module changes git configuration by invoking 'git config'. + This is needed if you don't want to use M(template) for the entire git + config file (e.g. because you need to change just C(user.email) in + /etc/.git/config). Solutions involving M(command) are cumbersome or + don't work correctly in check mode. +options: + list_all: + description: + - List all settings (optionally limited to a given I(scope)) + type: bool + default: 'no' + name: + description: + - The name of the setting. If no value is supplied, the value will + be read from the config if it has been set. + repo: + description: + - Path to a git repository for reading and writing values from a + specific repo. + scope: + description: + - Specify which scope to read/set values from. This is required + when setting config values. If this is set to local, you must + also specify the repo parameter. It defaults to system only when + not using I(list_all)=yes. + choices: [ "local", "global", "system" ] + state: + description: + - "Indicates the setting should be set/unset. + This parameter has higher precedence than I(value) parameter: + when I(state)=absent and I(value) is defined, I(value) is discarded." + choices: [ 'present', 'absent' ] + default: 'present' + value: + description: + - When specifying the name of a single setting, supply a value to + set that setting to the given value. +''' + +EXAMPLES = ''' +# Set some settings in ~/.gitconfig +- git_config: + name: alias.ci + scope: global + value: commit + +- git_config: + name: alias.st + scope: global + value: status + +# Unset some settings in ~/.gitconfig +- git_config: + name: alias.ci + scope: global + state: absent + +# Or system-wide: +- git_config: + name: alias.remotev + scope: system + value: remote -v + +- git_config: + name: core.editor + scope: global + value: vim + +# scope=system is the default +- git_config: + name: alias.diffc + value: diff --cached + +- git_config: + name: color.ui + value: auto + +# Make etckeeper not complain when invoked by cron +- git_config: + name: user.email + repo: /etc + scope: local + value: 'root@{{ ansible_fqdn }}' + +# Read individual values from git config +- git_config: + name: alias.ci + scope: global + +# scope: system is also assumed when reading values, unless list_all=yes +- git_config: + name: alias.diffc + +# Read all values from git config +- git_config: + list_all: yes + scope: global + +# When list_all=yes and no scope is specified, you get configuration from all scopes +- git_config: + list_all: yes + +# Specify a repository to include local settings +- git_config: + list_all: yes + repo: /path/to/repo.git +''' + +RETURN = ''' +--- +config_value: + description: When list_all=no and value is not set, a string containing the value of the setting in name + returned: success + type: str + sample: "vim" + +config_values: + description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings + returned: success + type: dict + sample: + core.editor: "vim" + color.ui: "auto" + alias.diffc: "diff --cached" + alias.remotev: "remote -v" +''' +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def main(): + module = AnsibleModule( + argument_spec=dict( + list_all=dict(required=False, type='bool', default=False), + name=dict(type='str'), + repo=dict(type='path'), + scope=dict(required=False, type='str', choices=['local', 'global', 'system']), + state=dict(required=False, type='str', default='present', choices=['present', 'absent']), + value=dict(required=False) + ), + mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], + required_if=[('scope', 'local', ['repo'])], + required_one_of=[['list_all', 'name']], + supports_check_mode=True, + ) + git_path = module.get_bin_path('git', True) + + params = module.params + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + if params['name']: + name = params['name'] + else: + name = None + + if params['scope']: + scope = params['scope'] + elif params['list_all']: + scope = None + else: + scope = 'system' + + if params['state'] == 'absent': + unset = 'unset' + params['value'] = None + else: + unset = None + + if params['value']: + new_value = params['value'] + else: + new_value = None + + args = [git_path, "config", "--includes"] + if params['list_all']: + args.append('-l') + if scope: + args.append("--" + scope) + if name: + args.append(name) + + if scope == 'local': + dir = params['repo'] + elif params['list_all'] and params['repo']: + # Include local settings from a specific repo when listing all available settings + dir = params['repo'] + else: + # Run from root directory to avoid accidentally picking up any local config settings + dir = "/" + + (rc, out, err) = module.run_command(' '.join(args), cwd=dir) + if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: + # This just means nothing has been set at the given scope + module.exit_json(changed=False, msg='', config_values={}) + elif rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) + + if params['list_all']: + values = out.rstrip().splitlines() + config_values = {} + for value in values: + k, v = value.split('=', 1) + config_values[k] = v + module.exit_json(changed=False, msg='', config_values=config_values) + elif not new_value and not unset: + module.exit_json(changed=False, msg='', config_value=out.rstrip()) + elif unset and not out: + module.exit_json(changed=False, msg='no setting to unset') + else: + old_value = out.rstrip() + if old_value == new_value: + module.exit_json(changed=False, msg="") + + if not module.check_mode: + if unset: + args.insert(len(args) - 1, "--" + unset) + cmd = ' '.join(args) + else: + new_value_quoted = shlex_quote(new_value) + cmd = ' '.join(args + [new_value_quoted]) + (rc, out, err) = module.run_command(cmd, cwd=dir) + if err: + module.fail_json(rc=rc, msg=err, cmd=cmd) + + module.exit_json( + msg='setting changed', + diff=dict( + before_header=' '.join(args), + before=old_value + "\n", + after_header=' '.join(args), + after=(new_value or '') + "\n" + ), + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/github/github_deploy_key.py b/plugins/modules/source_control/github/github_deploy_key.py new file mode 100644 index 0000000000..b1407795d6 --- /dev/null +++ b/plugins/modules/source_control/github/github_deploy_key.py @@ -0,0 +1,333 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: github_deploy_key +author: "Ali (@bincyber)" +short_description: Manages deploy keys for GitHub repositories. +description: + - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, + username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin + rights on the repository are required." +options: + github_url: + description: + - The base URL of the GitHub API + required: false + type: str + default: https://api.github.com + owner: + description: + - The name of the individual account or organization that owns the GitHub repository. + required: true + aliases: [ 'account', 'organization' ] + repo: + description: + - The name of the GitHub repository. + required: true + aliases: [ 'repository' ] + name: + description: + - The name for the deploy key. + required: true + aliases: [ 'title', 'label' ] + key: + description: + - The SSH public key to add to the repository as a deploy key. + required: true + read_only: + description: + - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. + type: bool + default: 'yes' + state: + description: + - The state of the deploy key. + default: "present" + choices: [ "present", "absent" ] + force: + description: + - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. + type: bool + default: 'no' + username: + description: + - The username to authenticate with. Should not be set when using personal access token + password: + description: + - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination. + token: + description: + - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password). + otp: + description: + - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). + aliases: ['2fa_token'] +notes: + - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." +''' + +EXAMPLES = ''' +# add a new read-only deploy key to a GitHub repository using basic authentication +- github_deploy_key: + owner: "johndoe" + repo: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + read_only: yes + username: "johndoe" + password: "supersecretpassword" + +# remove an existing deploy key from a GitHub repository +- github_deploy_key: + owner: "johndoe" + repository: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + force: yes + username: "johndoe" + password: "supersecretpassword" + state: absent + +# add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate +- github_deploy_key: + owner: "johndoe" + repository: "example" + name: "new-deploy-key" + key: "{{ lookup('file', '~/.ssh/github.pub') }}" + force: yes + token: "ABAQDAwXxn7kIMNWzcDfo..." + +# re-add a deploy key to a GitHub repository but with a different name +- github_deploy_key: + owner: "johndoe" + repository: "example" + name: "replace-deploy-key" + key: "{{ lookup('file', '~/.ssh/github.pub') }}" + username: "johndoe" + password: "supersecretpassword" + +# add a new deploy key to a GitHub repository using 2FA +- github_deploy_key: + owner: "johndoe" + repo: "example" + name: "new-deploy-key-2" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + username: "johndoe" + password: "supersecretpassword" + otp: 123456 + +# add a read-only deploy key to a repository hosted on GitHub Enterprise +- github_deploy_key: + github_url: "https://api.example.com" + owner: "janedoe" + repo: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + read_only: yes + username: "janedoe" + password: "supersecretpassword" +''' + +RETURN = ''' +msg: + description: the status message describing what occurred + returned: always + type: str + sample: "Deploy key added successfully" + +http_status_code: + description: the HTTP status code returned by the GitHub API + returned: failed + type: int + sample: 400 + +error: + description: the error message returned by the GitHub API + returned: failed + type: str + sample: "key is already in use" + +id: + description: the key identifier assigned by GitHub for the deploy key + returned: changed + type: int + sample: 24381901 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from re import findall + + +class GithubDeployKey(object): + def __init__(self, module): + self.module = module + + self.github_url = self.module.params['github_url'] + self.name = module.params['name'] + self.key = module.params['key'] + self.state = module.params['state'] + self.read_only = module.params.get('read_only', True) + self.force = module.params.get('force', False) + self.username = module.params.get('username', None) + self.password = module.params.get('password', None) + self.token = module.params.get('token', None) + self.otp = module.params.get('otp', None) + + @property + def url(self): + owner = self.module.params['owner'] + repo = self.module.params['repo'] + return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo) + + @property + def headers(self): + if self.username is not None and self.password is not None: + self.module.params['url_username'] = self.username + self.module.params['url_password'] = self.password + self.module.params['force_basic_auth'] = True + if self.otp is not None: + return {"X-GitHub-OTP": self.otp} + elif self.token is not None: + return {"Authorization": "token {0}".format(self.token)} + else: + return None + + def paginate(self, url): + while url: + resp, info = fetch_url(self.module, url, headers=self.headers, method="GET") + + if info["status"] == 200: + yield self.module.from_json(resp.read()) + + links = {} + for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]): + links[y] = x + + url = links.get('next') + else: + self.handle_error(method="GET", info=info) + + def get_existing_key(self): + for keys in self.paginate(self.url): + if keys: + for i in keys: + existing_key_id = str(i["id"]) + if i["key"].split() == self.key.split()[:2]: + return existing_key_id + elif i['title'] == self.name and self.force: + return existing_key_id + else: + return None + + def add_new_key(self): + request_body = {"title": self.name, "key": self.key, "read_only": self.read_only} + + resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30) + + status_code = info["status"] + + if status_code == 201: + response_body = self.module.from_json(resp.read()) + key_id = response_body["id"] + self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) + elif status_code == 422: + self.module.exit_json(changed=False, msg="Deploy key already exists") + else: + self.handle_error(method="POST", info=info) + + def remove_existing_key(self, key_id): + resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE") + + status_code = info["status"] + + if status_code == 204: + if self.state == 'absent': + self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id) + else: + self.handle_error(method="DELETE", info=info, key_id=key_id) + + def handle_error(self, method, info, key_id=None): + status_code = info['status'] + body = info.get('body') + if body: + err = self.module.from_json(body)['message'] + + if status_code == 401: + self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err) + elif status_code == 404: + self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err) + else: + if method == "GET": + self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err) + elif method == "POST": + self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err) + elif method == "DELETE": + self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + github_url=dict(required=False, type='str', default="https://api.github.com"), + owner=dict(required=True, type='str', aliases=['account', 'organization']), + repo=dict(required=True, type='str', aliases=['repository']), + name=dict(required=True, type='str', aliases=['title', 'label']), + key=dict(required=True, type='str'), + read_only=dict(required=False, type='bool', default=True), + state=dict(default='present', choices=['present', 'absent']), + force=dict(required=False, type='bool', default=False), + username=dict(required=False, type='str'), + password=dict(required=False, type='str', no_log=True), + otp=dict(required=False, type='int', aliases=['2fa_token'], no_log=True), + token=dict(required=False, type='str', no_log=True) + ), + mutually_exclusive=[ + ['password', 'token'] + ], + required_together=[ + ['username', 'password'], + ['otp', 'username', 'password'] + ], + required_one_of=[ + ['username', 'token'] + ], + supports_check_mode=True, + ) + + deploy_key = GithubDeployKey(module) + + if module.check_mode: + key_id = deploy_key.get_existing_key() + if deploy_key.state == "present" and key_id is None: + module.exit_json(changed=True) + elif deploy_key.state == "present" and key_id is not None: + module.exit_json(changed=False) + + # to forcefully modify an existing key, the existing key must be deleted first + if deploy_key.state == 'absent' or deploy_key.force: + key_id = deploy_key.get_existing_key() + + if key_id is not None: + deploy_key.remove_existing_key(key_id) + elif deploy_key.state == 'absent': + module.exit_json(changed=False, msg="Deploy key does not exist") + + if deploy_key.state == "present": + deploy_key.add_new_key() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/github/github_hooks.py b/plugins/modules/source_control/github/github_hooks.py new file mode 100644 index 0000000000..054eaaca6e --- /dev/null +++ b/plugins/modules/source_control/github/github_hooks.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: github_hooks +short_description: Manages GitHub service hooks. +deprecated: + removed_in: "2.12" + why: Replaced by more granular modules + alternative: Use M(github_webhook) and M(github_webhook_info) instead. +description: + - Adds service hooks and removes service hooks that have an error status. +options: + user: + description: + - GitHub username. + required: true + oauthkey: + description: + - The oauth key provided by GitHub. It can be found/generated on GitHub under "Edit Your Profile" >> "Developer settings" >> "Personal Access Tokens" + required: true + repo: + description: + - > + This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:. + Note this is different than the normal repo url. + required: true + hookurl: + description: + - When creating a new hook, this is the url that you want GitHub to post to. It is only required when creating a new hook. + required: false + action: + description: + - This tells the githooks module what you want it to do. + required: true + choices: [ "create", "cleanall", "list", "clean504" ] + validate_certs: + description: + - If C(no), SSL certificates for the target repo will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + type: bool + content_type: + description: + - Content type to use for requests made to the webhook + required: false + default: 'json' + choices: ['json', 'form'] + +author: "Phillip Gentry, CX Inc (@pcgentry)" +''' + +EXAMPLES = ''' +# Example creating a new service hook. It ignores duplicates. +- github_hooks: + action: create + hookurl: http://11.111.111.111:2222 + user: '{{ gituser }}' + oauthkey: '{{ oauthkey }}' + repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy + +# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would +# be called from a handler. +- github_hooks: + action: cleanall + user: '{{ gituser }}' + oauthkey: '{{ oauthkey }}' + repo: '{{ repo }}' + delegate_to: localhost +''' + +import json +import base64 + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_bytes + + +def request(module, url, user, oauthkey, data='', method='GET'): + auth = base64.b64encode(to_bytes('%s:%s' % (user, oauthkey)).replace('\n', '')) + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, headers=headers, data=data, method=method) + return response, info + + +def _list(module, oauthkey, repo, user): + url = "%s/hooks" % repo + response, info = request(module, url, user, oauthkey) + if info['status'] != 200: + return False, '' + else: + return False, response.read() + + +def _clean504(module, oauthkey, repo, user): + current_hooks = _list(module, oauthkey, repo, user)[1] + decoded = json.loads(current_hooks) + + for hook in decoded: + if hook['last_response']['code'] == 504: + _delete(module, oauthkey, repo, user, hook['id']) + + return 0, current_hooks + + +def _cleanall(module, oauthkey, repo, user): + current_hooks = _list(module, oauthkey, repo, user)[1] + decoded = json.loads(current_hooks) + + for hook in decoded: + if hook['last_response']['code'] != 200: + _delete(module, oauthkey, repo, user, hook['id']) + + return 0, current_hooks + + +def _create(module, hookurl, oauthkey, repo, user, content_type): + url = "%s/hooks" % repo + values = { + "active": True, + "name": "web", + "config": { + "url": "%s" % hookurl, + "content_type": "%s" % content_type + } + } + data = json.dumps(values) + response, info = request(module, url, user, oauthkey, data=data, method='POST') + if info['status'] != 200: + return 0, '[]' + else: + return 0, response.read() + + +def _delete(module, oauthkey, repo, user, hookid): + url = "%s/hooks/%s" % (repo, hookid) + response, info = request(module, url, user, oauthkey, method='DELETE') + return response.read() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True, choices=['list', 'clean504', 'cleanall', 'create']), + hookurl=dict(required=False), + oauthkey=dict(required=True, no_log=True), + repo=dict(required=True), + user=dict(required=True), + validate_certs=dict(default='yes', type='bool'), + content_type=dict(default='json', choices=['json', 'form']), + ) + ) + + action = module.params['action'] + hookurl = module.params['hookurl'] + oauthkey = module.params['oauthkey'] + repo = module.params['repo'] + user = module.params['user'] + content_type = module.params['content_type'] + + if action == "list": + (rc, out) = _list(module, oauthkey, repo, user) + + if action == "clean504": + (rc, out) = _clean504(module, oauthkey, repo, user) + + if action == "cleanall": + (rc, out) = _cleanall(module, oauthkey, repo, user) + + if action == "create": + (rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type) + + if rc != 0: + module.fail_json(msg="failed", result=out) + + module.exit_json(msg="success", result=out) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/source_control/github/github_issue.py new file mode 100644 index 0000000000..6e8264ac0e --- /dev/null +++ b/plugins/modules/source_control/github/github_issue.py @@ -0,0 +1,118 @@ +#!/usr/bin/python + +# Copyright: (c) 2017-18, Abhijeet Kasurde +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +module: github_issue +short_description: View GitHub issue. +description: + - View GitHub issue for a given repository and organization. +options: + repo: + description: + - Name of repository from which issue needs to be retrieved. + required: true + organization: + description: + - Name of the GitHub organization in which the repository is hosted. + required: true + issue: + description: + - Issue number for which information is required. + required: true + action: + description: + - Get various details about issue depending upon action specified. + default: 'get_status' + choices: + - 'get_status' +author: + - Abhijeet Kasurde (@Akasurde) +''' + +RETURN = ''' +get_status: + description: State of the GitHub issue + type: str + returned: success + sample: open, closed +''' + +EXAMPLES = ''' +- name: Check if GitHub issue is closed or not + github_issue: + organization: ansible + repo: ansible + issue: 23642 + action: get_status + register: r + +- name: Take action depending upon issue status + debug: + msg: Do something when issue 23642 is open + when: r.issue_status == 'open' +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + module = AnsibleModule( + argument_spec=dict( + organization=dict(required=True), + repo=dict(required=True), + issue=dict(type='int', required=True), + action=dict(choices=['get_status'], default='get_status'), + ), + supports_check_mode=True, + ) + + organization = module.params['organization'] + repo = module.params['repo'] + issue = module.params['issue'] + action = module.params['action'] + + result = dict() + + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/vnd.github.v3+json', + } + + url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue) + + response, info = fetch_url(module, url, headers=headers) + if not (200 <= info['status'] < 400): + if info['status'] == 404: + module.fail_json(msg="Failed to find issue %s" % issue) + module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg'])) + + gh_obj = json.loads(response.read()) + + if action == 'get_status' or action is None: + if module.check_mode: + result.update(changed=True) + else: + result.update(changed=True, issue_status=gh_obj['state']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/github/github_key.py b/plugins/modules/source_control/github/github_key.py new file mode 100644 index 0000000000..e7c8adf8fe --- /dev/null +++ b/plugins/modules/source_control/github/github_key.py @@ -0,0 +1,242 @@ +#!/usr/bin/python + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: github_key +short_description: Manage GitHub access keys. +description: + - Creates, removes, or updates GitHub access keys. +options: + token: + description: + - GitHub Access Token with permission to list and create public keys. + required: true + name: + description: + - SSH key name + required: true + pubkey: + description: + - SSH public key value. Required when C(state=present). + state: + description: + - Whether to remove a key, ensure that it exists, or update its value. + choices: ['present', 'absent'] + default: 'present' + force: + description: + - The default is C(yes), which will replace the existing remote key + if it's different than C(pubkey). If C(no), the key will only be + set if no key with the given C(name) exists. + type: bool + default: 'yes' + +author: Robert Estelle (@erydo) +''' + +RETURN = ''' +deleted_keys: + description: An array of key objects that were deleted. Only present on state=absent + type: list + returned: When state=absent + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] +matching_keys: + description: An array of keys matching the specified name. Only present on state=present + type: list + returned: When state=present + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] +key: + description: Metadata about the key just created. Only present on state=present + type: dict + returned: success + sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False} +''' + +EXAMPLES = ''' +- name: Read SSH public key to authorize + shell: cat /home/foo/.ssh/id_rsa.pub + register: ssh_pub_key + +- name: Authorize key with GitHub + local_action: + module: github_key + name: Access Key for Some Machine + token: '{{ github_access_token }}' + pubkey: '{{ ssh_pub_key.stdout }}' +''' + + +import json +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +API_BASE = 'https://api.github.com' + + +class GitHubResponse(object): + def __init__(self, response, info): + self.content = response.read() + self.info = info + + def json(self): + return json.loads(self.content) + + def links(self): + links = {} + if 'link' in self.info: + link_header = self.info['link'] + matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header) + for url, rel in matches: + links[rel] = url + return links + + +class GitHubSession(object): + def __init__(self, module, token): + self.module = module + self.token = token + + def request(self, method, url, data=None): + headers = { + 'Authorization': 'token %s' % self.token, + 'Content-Type': 'application/json', + 'Accept': 'application/vnd.github.v3+json', + } + response, info = fetch_url( + self.module, url, method=method, data=data, headers=headers) + if not (200 <= info['status'] < 400): + self.module.fail_json( + msg=(" failed to send request %s to %s: %s" + % (method, url, info['msg']))) + return GitHubResponse(response, info) + + +def get_all_keys(session): + url = API_BASE + '/user/keys' + result = [] + while url: + r = session.request('GET', url) + result.extend(r.json()) + url = r.links().get('next') + return result + + +def create_key(session, name, pubkey, check_mode): + if check_mode: + from datetime import datetime + now = datetime.utcnow() + return { + 'id': 0, + 'key': pubkey, + 'title': name, + 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', + 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), + 'read_only': False, + 'verified': False + } + else: + return session.request( + 'POST', + API_BASE + '/user/keys', + data=json.dumps({'title': name, 'key': pubkey})).json() + + +def delete_keys(session, to_delete, check_mode): + if check_mode: + return + + for key in to_delete: + session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"]) + + +def ensure_key_absent(session, name, check_mode): + to_delete = [key for key in get_all_keys(session) if key['title'] == name] + delete_keys(session, to_delete, check_mode=check_mode) + + return {'changed': bool(to_delete), + 'deleted_keys': to_delete} + + +def ensure_key_present(module, session, name, pubkey, force, check_mode): + all_keys = get_all_keys(session) + matching_keys = [k for k in all_keys if k['title'] == name] + deleted_keys = [] + + new_signature = pubkey.split(' ')[1] + for key in all_keys: + existing_signature = key['key'].split(' ')[1] + if new_signature == existing_signature and key['title'] != name: + module.fail_json(msg=( + "another key with the same content is already registered " + "under the name |{0}|").format(key['title'])) + + if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature: + delete_keys(session, matching_keys, check_mode=check_mode) + (deleted_keys, matching_keys) = (matching_keys, []) + + if not matching_keys: + key = create_key(session, name, pubkey, check_mode=check_mode) + else: + key = matching_keys[0] + + return { + 'changed': bool(deleted_keys or not matching_keys), + 'deleted_keys': deleted_keys, + 'matching_keys': matching_keys, + 'key': key + } + + +def main(): + argument_spec = { + 'token': {'required': True, 'no_log': True}, + 'name': {'required': True}, + 'pubkey': {}, + 'state': {'choices': ['present', 'absent'], 'default': 'present'}, + 'force': {'default': True, 'type': 'bool'}, + } + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + token = module.params['token'] + name = module.params['name'] + state = module.params['state'] + force = module.params['force'] + pubkey = module.params.get('pubkey') + + if pubkey: + pubkey_parts = pubkey.split(' ') + # Keys consist of a protocol, the key data, and an optional comment. + if len(pubkey_parts) < 2: + module.fail_json(msg='"pubkey" parameter has an invalid format') + elif state == 'present': + module.fail_json(msg='"pubkey" is required when state=present') + + session = GitHubSession(module, token) + if state == 'present': + result = ensure_key_present(module, session, name, pubkey, force=force, + check_mode=module.check_mode) + elif state == 'absent': + result = ensure_key_absent(session, name, check_mode=module.check_mode) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/github/github_release.py b/plugins/modules/source_control/github/github_release.py new file mode 100644 index 0000000000..a6ca4b75b5 --- /dev/null +++ b/plugins/modules/source_control/github/github_release.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: github_release +short_description: Interact with GitHub Releases +description: + - Fetch metadata about GitHub Releases +options: + token: + description: + - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password). + user: + description: + - The GitHub account that owns the repository + required: true + password: + description: + - The GitHub account password for the user. Mutually exclusive with C(token). + repo: + description: + - Repository name + required: true + action: + description: + - Action to perform + required: true + choices: [ 'latest_release', 'create_release' ] + tag: + description: + - Tag name when creating a release. Required when using action is set to C(create_release). + target: + description: + - Target of release when creating a release + name: + description: + - Name of release when creating a release + body: + description: + - Description of the release when creating a release + draft: + description: + - Sets if the release is a draft or not. (boolean) + type: 'bool' + default: 'no' + prerelease: + description: + - Sets if the release is a prerelease or not. (boolean) + type: bool + default: 'no' + +author: + - "Adrian Moisey (@adrianmoisey)" +requirements: + - "github3.py >= 1.0.0a3" +''' + +EXAMPLES = ''' +- name: Get latest release of a public repository + github_release: + user: ansible + repo: ansible + action: latest_release + +- name: Get latest release of testuseer/testrepo + github_release: + token: tokenabc1234567890 + user: testuser + repo: testrepo + action: latest_release + +- name: Get latest release of test repo using username and password. Ansible 2.4. + github_release: + user: testuser + password: secret123 + repo: testrepo + action: latest_release + +- name: Create a new release + github_release: + token: tokenabc1234567890 + user: testuser + repo: testrepo + action: create_release + tag: test + target: master + name: My Release + body: Some description + +''' + +RETURN = ''' +create_release: + description: + - Version of the created release + - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged" + - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped" + type: str + returned: success + sample: 1.1.0 + +latest_release: + description: Version of the latest release + type: str + returned: success + sample: 1.1.0 +''' + +import traceback + +GITHUB_IMP_ERR = None +try: + import github3 + + HAS_GITHUB_API = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(required=True), + user=dict(required=True), + password=dict(no_log=True), + token=dict(no_log=True), + action=dict( + required=True, choices=['latest_release', 'create_release']), + tag=dict(type='str'), + target=dict(type='str'), + name=dict(type='str'), + body=dict(type='str'), + draft=dict(type='bool', default=False), + prerelease=dict(type='bool', default=False), + ), + supports_check_mode=True, + mutually_exclusive=(('password', 'token'),), + required_if=[('action', 'create_release', ['tag']), + ('action', 'create_release', ['password', 'token'], True)], + ) + + if not HAS_GITHUB_API: + module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'), + exception=GITHUB_IMP_ERR) + + repo = module.params['repo'] + user = module.params['user'] + password = module.params['password'] + login_token = module.params['token'] + action = module.params['action'] + tag = module.params.get('tag') + target = module.params.get('target') + name = module.params.get('name') + body = module.params.get('body') + draft = module.params.get('draft') + prerelease = module.params.get('prerelease') + + # login to github + try: + if password: + gh_obj = github3.login(user, password=password) + elif login_token: + gh_obj = github3.login(token=login_token) + else: + gh_obj = github3.GitHub() + + # test if we're actually logged in + if password or login_token: + gh_obj.me() + except github3.exceptions.AuthenticationFailed as e: + module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), + details="Please check username and password or token " + "for repository %s" % repo) + + repository = gh_obj.repository(user, repo) + + if not repository: + module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo)) + + if action == 'latest_release': + release = repository.latest_release() + if release: + module.exit_json(tag=release.tag_name) + else: + module.exit_json(tag=None) + + if action == 'create_release': + release_exists = repository.release_from_tag(tag) + if release_exists: + module.exit_json(changed=False, msg="Release for tag %s already exists." % tag) + + release = repository.create_release( + tag, target, name, body, draft, prerelease) + if release: + module.exit_json(changed=True, tag=release.tag_name) + else: + module.exit_json(changed=False, tag=None) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/source_control/github/github_webhook.py new file mode 100644 index 0000000000..2cd5134e64 --- /dev/null +++ b/plugins/modules/source_control/github/github_webhook.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: github_webhook +short_description: Manage GitHub webhooks +description: + - "Create and delete GitHub webhooks" +requirements: + - "PyGithub >= 1.3.5" +options: + repository: + description: + - Full name of the repository to configure a hook for + required: true + aliases: + - repo + url: + description: + - URL to which payloads will be delivered + required: true + content_type: + description: + - The media type used to serialize the payloads + required: false + choices: [ form, json ] + default: form + secret: + description: + - The shared secret between GitHub and the payload URL. + required: false + insecure_ssl: + description: + - > + Flag to indicate that GitHub should skip SSL verification when calling + the hook. + required: false + type: bool + default: false + events: + description: + - > + A list of GitHub events the hook is triggered for. Events are listed at + U(https://developer.github.com/v3/activity/events/types/). Required + unless C(state) is C(absent) + required: false + active: + description: + - Whether or not the hook is active + required: false + type: bool + default: true + state: + description: + - Whether the hook should be present or absent + required: false + choices: [ absent, present ] + default: present + user: + description: + - User to authenticate to GitHub as + required: true + password: + description: + - Password to authenticate to GitHub with + required: false + token: + description: + - Token to authenticate to GitHub with + required: false + github_url: + description: + - Base URL of the GitHub API + required: false + default: https://api.github.com + +author: + - "Chris St. Pierre (@stpierre)" +''' + +EXAMPLES = ''' +- name: create a new webhook that triggers on push (password auth) + github_webhook: + repository: ansible/ansible + url: https://www.example.com/hooks/ + events: + - push + user: "{{ github_user }}" + password: "{{ github_password }}" + +- name: create a new webhook in a github enterprise installation with multiple event triggers (token auth) + github_webhook: + repository: myorg/myrepo + url: https://jenkins.example.com/ghprbhook/ + content_type: json + secret: "{{ github_shared_secret }}" + insecure_ssl: True + events: + - issue_comment + - pull_request + user: "{{ github_user }}" + token: "{{ github_user_api_token }}" + github_url: https://github.example.com + +- name: delete a webhook (password auth) + github_webhook: + repository: ansible/ansible + url: https://www.example.com/hooks/ + state: absent + user: "{{ github_user }}" + password: "{{ github_password }}" +''' + +RETURN = ''' +--- +hook_id: + description: The GitHub ID of the hook created/updated + returned: when state is 'present' + type: int + sample: 6206 +''' + +import traceback + +GITHUB_IMP_ERR = None +try: + import github + HAS_GITHUB = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def _create_hook_config(module): + return { + "url": module.params["url"], + "content_type": module.params["content_type"], + "secret": module.params.get("secret"), + "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" + } + + +def create_hook(repo, module): + config = _create_hook_config(module) + try: + hook = repo.create_hook( + name="web", + config=config, + events=module.params["events"], + active=module.params["active"]) + except github.GithubException as err: + module.fail_json(msg="Unable to create hook for repository %s: %s" % ( + repo.full_name, to_native(err))) + + data = {"hook_id": hook.id} + return True, data + + +def update_hook(repo, hook, module): + config = _create_hook_config(module) + try: + hook.update() + hook.edit( + name="web", + config=config, + events=module.params["events"], + active=module.params["active"]) + + changed = hook.update() + except github.GithubException as err: + module.fail_json(msg="Unable to modify hook for repository %s: %s" % ( + repo.full_name, to_native(err))) + + data = {"hook_id": hook.id} + return changed, data + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repository=dict(type='str', required=True, aliases=['repo']), + url=dict(type='str', required=True), + content_type=dict( + type='str', + choices=('json', 'form'), + required=False, + default='form'), + secret=dict(type='str', required=False, no_log=True), + insecure_ssl=dict(type='bool', required=False, default=False), + events=dict(type='list', elements='str', required=False), + active=dict(type='bool', required=False, default=True), + state=dict( + type='str', + required=False, + choices=('absent', 'present'), + default='present'), + user=dict(type='str', required=True), + password=dict(type='str', required=False, no_log=True), + token=dict(type='str', required=False, no_log=True), + github_url=dict( + type='str', required=False, default="https://api.github.com")), + mutually_exclusive=(('password', 'token'),), + required_one_of=(("password", "token"),), + required_if=(("state", "present", ("events",)),), + ) + + if not HAS_GITHUB: + module.fail_json(msg=missing_required_lib('PyGithub'), + exception=GITHUB_IMP_ERR) + + try: + github_conn = github.Github( + module.params["user"], + module.params.get("password") or module.params.get("token"), + base_url=module.params["github_url"]) + except github.GithubException as err: + module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + + try: + repo = github_conn.get_repo(module.params["repository"]) + except github.BadCredentialsException as err: + module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + except github.UnknownObjectException as err: + module.fail_json( + msg="Could not find repository %s in GitHub at %s: %s" % ( + module.params["repository"], module.params["github_url"], + to_native(err))) + except Exception as err: + module.fail_json( + msg="Could not fetch repository %s from GitHub at %s: %s" % + (module.params["repository"], module.params["github_url"], + to_native(err)), + exception=traceback.format_exc()) + + hook = None + try: + for hook in repo.get_hooks(): + if hook.config.get("url") == module.params["url"]: + break + else: + hook = None + except github.GithubException as err: + module.fail_json(msg="Unable to get hooks from repository %s: %s" % ( + module.params["repository"], to_native(err))) + + changed = False + data = {} + if hook is None and module.params["state"] == "present": + changed, data = create_hook(repo, module) + elif hook is not None and module.params["state"] == "absent": + try: + hook.delete() + except github.GithubException as err: + module.fail_json( + msg="Unable to delete hook from repository %s: %s" % ( + repo.full_name, to_native(err))) + else: + changed = True + elif hook is not None and module.params["state"] == "present": + changed, data = update_hook(repo, hook, module) + # else, there is no hook and we want there to be no hook + + module.exit_json(changed=changed, **data) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/github/github_webhook_facts.py b/plugins/modules/source_control/github/github_webhook_facts.py new file mode 120000 index 0000000000..1e4cc94d4d --- /dev/null +++ b/plugins/modules/source_control/github/github_webhook_facts.py @@ -0,0 +1 @@ +github_webhook_info.py \ No newline at end of file diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py new file mode 100644 index 0000000000..8dd944c473 --- /dev/null +++ b/plugins/modules/source_control/github/github_webhook_info.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: github_webhook_info +short_description: Query information about GitHub webhooks +description: + - "Query information about GitHub webhooks" + - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change. +requirements: + - "PyGithub >= 1.3.5" +options: + repository: + description: + - Full name of the repository to configure a hook for + required: true + aliases: + - repo + user: + description: + - User to authenticate to GitHub as + required: true + password: + description: + - Password to authenticate to GitHub with + required: false + token: + description: + - Token to authenticate to GitHub with + required: false + github_url: + description: + - Base URL of the github api + required: false + default: https://api.github.com + +author: + - "Chris St. Pierre (@stpierre)" +''' + +EXAMPLES = ''' +- name: list hooks for a repository (password auth) + github_webhook_info: + repository: ansible/ansible + user: "{{ github_user }}" + password: "{{ github_password }}" + register: ansible_webhooks + +- name: list hooks for a repository on GitHub Enterprise (token auth) + github_webhook_info: + repository: myorg/myrepo + user: "{{ github_user }}" + token: "{{ github_user_api_token }}" + github_url: https://github.example.com/api/v3/ + register: myrepo_webhooks +''' + +RETURN = ''' +--- +hooks: + description: A list of hooks that exist for the repo + returned: always + type: list + sample: > + [{"has_shared_secret": true, + "url": "https://jenkins.example.com/ghprbhook/", + "events": ["issue_comment", "pull_request"], + "insecure_ssl": "1", + "content_type": "json", + "active": true, + "id": 6206, + "last_response": {"status": "active", "message": "OK", "code": 200}}] +''' + +import traceback + +GITHUB_IMP_ERR = None +try: + import github + HAS_GITHUB = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def _munge_hook(hook_obj): + retval = { + "active": hook_obj.active, + "events": hook_obj.events, + "id": hook_obj.id, + "url": hook_obj.url, + } + retval.update(hook_obj.config) + retval["has_shared_secret"] = "secret" in retval + if "secret" in retval: + del retval["secret"] + + retval["last_response"] = hook_obj.last_response.raw_data + return retval + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repository=dict(type='str', required=True, aliases=["repo"]), + user=dict(type='str', required=True), + password=dict(type='str', required=False, no_log=True), + token=dict(type='str', required=False, no_log=True), + github_url=dict( + type='str', required=False, default="https://api.github.com")), + mutually_exclusive=(('password', 'token'), ), + required_one_of=(("password", "token"), ), + supports_check_mode=True) + if module._name == 'github_webhook_facts': + module.deprecate("The 'github_webhook_facts' module has been renamed to 'github_webhook_info'", version='2.13') + + if not HAS_GITHUB: + module.fail_json(msg=missing_required_lib('PyGithub'), + exception=GITHUB_IMP_ERR) + + try: + github_conn = github.Github( + module.params["user"], + module.params.get("password") or module.params.get("token"), + base_url=module.params["github_url"]) + except github.GithubException as err: + module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + + try: + repo = github_conn.get_repo(module.params["repository"]) + except github.BadCredentialsException as err: + module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + except github.UnknownObjectException as err: + module.fail_json( + msg="Could not find repository %s in GitHub at %s: %s" % ( + module.params["repository"], module.params["github_url"], + to_native(err))) + except Exception as err: + module.fail_json( + msg="Could not fetch repository %s from GitHub at %s: %s" % + (module.params["repository"], module.params["github_url"], + to_native(err)), + exception=traceback.format_exc()) + + try: + hooks = [_munge_hook(h) for h in repo.get_hooks()] + except github.GithubException as err: + module.fail_json( + msg="Unable to get hooks from repository %s: %s" % + (module.params["repository"], to_native(err)), + exception=traceback.format_exc()) + + module.exit_json(changed=False, hooks=hooks) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py new file mode 100644 index 0000000000..78d3a458c8 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright: (c) 2018, Marcus Watkins +# Based on code: +# Copyright: (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: gitlab_deploy_key +short_description: Manages GitLab project deploy keys. +description: + - Adds, updates and removes project deploy keys +author: + - Marcus Watkins (@marwatk) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: +- community.general.auth_basic + +options: + api_token: + description: + - GitLab token for logging in. + type: str + project: + description: + - Id or Full path of project in the form of group/name. + required: true + type: str + title: + description: + - Deploy key's title. + required: true + type: str + key: + description: + - Deploy key + required: true + type: str + can_push: + description: + - Whether this key can push to the project. + type: bool + default: no + state: + description: + - When C(present) the deploy key added to the project if it doesn't exist. + - When C(absent) it will be removed from the project if it exists. + required: true + default: present + type: str + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +- name: "Adding a project deploy key" + gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + title: "Jenkins CI" + state: present + key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." + +- name: "Update the above deploy key to add push access" + gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + title: "Jenkins CI" + state: present + can_push: yes + +- name: "Remove the previous deploy key from the project" + gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + state: absent + key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." + +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: key is already in use" + +deploy_key: + description: API object + returned: always + type: dict +''' + +import re +import traceback + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication + + +class GitLabDeployKey(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.deployKeyObject = None + + ''' + @param project Project object + @param key_title Title of the key + @param key_key String of the key + @param key_can_push Option of the deployKey + @param options Deploy key options + ''' + def createOrUpdateDeployKey(self, project, key_title, key_key, options): + changed = False + + # Because we have already call existsDeployKey in main() + if self.deployKeyObject is None: + deployKey = self.createDeployKey(project, { + 'title': key_title, + 'key': key_key, + 'can_push': options['can_push']}) + changed = True + else: + changed, deployKey = self.updateDeployKey(self.deployKeyObject, { + 'can_push': options['can_push']}) + + self.deployKeyObject = deployKey + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title) + + try: + deployKey.save() + except Exception as e: + self._module.fail_json(msg="Failed to update deploy key: %s " % e) + return True + else: + return False + + ''' + @param project Project Object + @param arguments Attributes of the deployKey + ''' + def createDeployKey(self, project, arguments): + if self._module.check_mode: + return True + + try: + deployKey = project.keys.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e)) + + return deployKey + + ''' + @param deployKey Deploy Key Object + @param arguments Attributes of the deployKey + ''' + def updateDeployKey(self, deployKey, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(deployKey, arg_key) != arguments[arg_key]: + setattr(deployKey, arg_key, arguments[arg_key]) + changed = True + + return (changed, deployKey) + + ''' + @param project Project object + @param key_title Title of the key + ''' + def findDeployKey(self, project, key_title): + deployKeys = project.keys.list() + for deployKey in deployKeys: + if (deployKey.title == key_title): + return deployKey + + ''' + @param project Project object + @param key_title Title of the key + ''' + def existsDeployKey(self, project, key_title): + # When project exists, object will be stored in self.projectObject. + deployKey = self.findDeployKey(project, key_title) + if deployKey: + self.deployKeyObject = deployKey + return True + return False + + def deleteDeployKey(self): + if self._module.check_mode: + return True + + return self.deployKeyObject.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_token=dict(type='str', no_log=True), + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + key=dict(type='str', required=True), + can_push=dict(type='bool', default=False), + title=dict(type='str', required=True) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True, + ) + + state = module.params['state'] + project_identifier = module.params['project'] + key_title = module.params['title'] + key_keyfile = module.params['key'] + key_can_push = module.params['can_push'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_instance = gitlabAuthentication(module) + + gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) + + project = findProject(gitlab_instance, project_identifier) + + if project is None: + module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier) + + deployKey_exists = gitlab_deploy_key.existsDeployKey(project, key_title) + + if state == 'absent': + if deployKey_exists: + gitlab_deploy_key.deleteDeployKey() + module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title) + else: + module.exit_json(changed=False, msg="Deploy key deleted or does not exists") + + if state == 'present': + if gitlab_deploy_key.createOrUpdateDeployKey(project, key_title, key_keyfile, {'can_push': key_can_push}): + + module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title, + deploy_key=gitlab_deploy_key.deployKeyObject._attrs) + else: + module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title, + deploy_key=gitlab_deploy_key.deployKeyObject._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_group.py b/plugins/modules/source_control/gitlab/gitlab_group.py new file mode 100644 index 0000000000..5cea3da9a6 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_group.py @@ -0,0 +1,324 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gitlab_group +short_description: Creates/updates/deletes GitLab Groups +description: + - When the group does not exist in GitLab, it will be created. + - When the group does exist and state=absent, the group will be deleted. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: +- community.general.auth_basic + +options: + api_token: + description: + - GitLab token for logging in. + type: str + name: + description: + - Name of the group you want to create. + required: true + type: str + path: + description: + - The path of the group you want to create, this will be api_url/group_path + - If not supplied, the group_name will be used. + type: str + description: + description: + - A description for the group. + type: str + state: + description: + - create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + parent: + description: + - Allow to create subgroups + - Id or Full path of parent group in the form of group/name + type: str + visibility: + description: + - Default visibility of the group + choices: ["private", "internal", "public"] + default: private + type: str +''' + +EXAMPLES = ''' +- name: "Delete GitLab Group" + gitlab_group: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: False + name: my_first_group + state: absent + +- name: "Create GitLab Group" + gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: True + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_group + path: my_first_group + state: present + +# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group +- name: "Create GitLab SubGroup" + gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: True + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_group + path: my_first_group + state: present + parent: "super_parent/parent" +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +group: + description: API object + returned: always + type: dict +''' + +import traceback + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication + + +class GitLabGroup(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.groupObject = None + + ''' + @param group Group object + ''' + def getGroupId(self, group): + if group is not None: + return group.id + return None + + ''' + @param name Name of the group + @param parent Parent group full path + @param options Group options + ''' + def createOrUpdateGroup(self, name, parent, options): + changed = False + + # Because we have already call userExists in main() + if self.groupObject is None: + parent_id = self.getGroupId(parent) + + group = self.createGroup({ + 'name': name, + 'path': options['path'], + 'parent_id': parent_id, + 'visibility': options['visibility']}) + changed = True + else: + changed, group = self.updateGroup(self.groupObject, { + 'name': name, + 'description': options['description'], + 'visibility': options['visibility']}) + + self.groupObject = group + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name) + + try: + group.save() + except Exception as e: + self._module.fail_json(msg="Failed to update group: %s " % e) + return True + else: + return False + + ''' + @param arguments Attributes of the group + ''' + def createGroup(self, arguments): + if self._module.check_mode: + return True + + try: + group = self._gitlab.groups.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create group: %s " % to_native(e)) + + return group + + ''' + @param group Group Object + @param arguments Attributes of the group + ''' + def updateGroup(self, group, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(group, arg_key) != arguments[arg_key]: + setattr(group, arg_key, arguments[arg_key]) + changed = True + + return (changed, group) + + def deleteGroup(self): + group = self.groupObject + + if len(group.projects.list()) >= 1: + self._module.fail_json( + msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") + else: + if self._module.check_mode: + return True + + try: + group.delete() + except Exception as e: + self._module.fail_json(msg="Failed to delete group: %s " % to_native(e)) + + ''' + @param name Name of the groupe + @param full_path Complete path of the Group including parent group path. / + ''' + def existsGroup(self, project_identifier): + # When group/user exists, object will be stored in self.groupObject. + group = findGroup(self._gitlab, project_identifier) + if group: + self.groupObject = group + return True + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_token=dict(type='str', no_log=True), + name=dict(type='str', required=True), + path=dict(type='str'), + description=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + parent=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True, + ) + + group_name = module.params['name'] + group_path = module.params['path'] + description = module.params['description'] + state = module.params['state'] + parent_identifier = module.params['parent'] + group_visibility = module.params['visibility'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_instance = gitlabAuthentication(module) + + # Define default group_path based on group_name + if group_path is None: + group_path = group_name.replace(" ", "_") + + gitlab_group = GitLabGroup(module, gitlab_instance) + + parent_group = None + if parent_identifier: + parent_group = findGroup(gitlab_instance, parent_identifier) + if not parent_group: + module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") + + group_exists = gitlab_group.existsGroup(parent_group.full_path + '/' + group_path) + else: + group_exists = gitlab_group.existsGroup(group_path) + + if state == 'absent': + if group_exists: + gitlab_group.deleteGroup() + module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) + else: + module.exit_json(changed=False, msg="Group deleted or does not exists") + + if state == 'present': + if gitlab_group.createOrUpdateGroup(group_name, parent_group, { + "path": group_path, + "description": description, + "visibility": group_visibility}): + module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs) + else: + module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_hook.py b/plugins/modules/source_control/gitlab/gitlab_hook.py new file mode 100644 index 0000000000..575607ca8c --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_hook.py @@ -0,0 +1,391 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright: (c) 2018, Marcus Watkins +# Based on code: +# Copyright: (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gitlab_hook +short_description: Manages GitLab project hooks. +description: + - Adds, updates and removes project hook +author: + - Marcus Watkins (@marwatk) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: +- community.general.auth_basic + +options: + api_token: + description: + - GitLab token for logging in. + type: str + project: + description: + - Id or Full path of the project in the form of group/name. + required: true + type: str + hook_url: + description: + - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. + required: true + type: str + state: + description: + - When C(present) the hook will be updated to match the input or created if it doesn't exist. + - When C(absent) hook will be deleted if it exists. + required: true + default: present + type: str + choices: [ "present", "absent" ] + push_events: + description: + - Trigger hook on push events. + type: bool + default: yes + push_events_branch_filter: + description: + - Branch name of wildcard to trigger hook on push events + type: str + issues_events: + description: + - Trigger hook on issues events. + type: bool + default: no + merge_requests_events: + description: + - Trigger hook on merge requests events. + type: bool + default: no + tag_push_events: + description: + - Trigger hook on tag push events. + type: bool + default: no + note_events: + description: + - Trigger hook on note events or when someone adds a comment. + type: bool + default: no + job_events: + description: + - Trigger hook on job events. + type: bool + default: no + pipeline_events: + description: + - Trigger hook on pipeline events. + type: bool + default: no + wiki_page_events: + description: + - Trigger hook on wiki events. + type: bool + default: no + hook_validate_certs: + description: + - Whether GitLab will do SSL verification when triggering the hook. + type: bool + default: no + aliases: [ enable_ssl_verification ] + token: + description: + - Secret token to validate hook messages at the receiver. + - If this is present it will always result in a change as it cannot be retrieved from GitLab. + - Will show up in the X-GitLab-Token HTTP request header. + required: false + type: str +''' + +EXAMPLES = ''' +- name: "Adding a project hook" + gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: "my_group/my_project" + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: present + push_events: yes + tag_push_events: yes + hook_validate_certs: no + token: "my-super-secret-token-that-my-ci-server-will-check" + +- name: "Delete the previous hook" + gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: "my_group/my_project" + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: absent + +- name: "Delete a hook by numeric project id" + gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: 10 + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: absent +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +hook: + description: API object + returned: always + type: dict +''' + +import re +import traceback + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication + + +class GitLabHook(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.hookObject = None + + ''' + @param project Project Object + @param hook_url Url to call on event + @param description Description of the group + @param parent Parent group full path + ''' + def createOrUpdateHook(self, project, hook_url, options): + changed = False + + # Because we have already call userExists in main() + if self.hookObject is None: + hook = self.createHook(project, { + 'url': hook_url, + 'push_events': options['push_events'], + 'push_events_branch_filter': options['push_events_branch_filter'], + 'issues_events': options['issues_events'], + 'merge_requests_events': options['merge_requests_events'], + 'tag_push_events': options['tag_push_events'], + 'note_events': options['note_events'], + 'job_events': options['job_events'], + 'pipeline_events': options['pipeline_events'], + 'wiki_page_events': options['wiki_page_events'], + 'enable_ssl_verification': options['enable_ssl_verification'], + 'token': options['token']}) + changed = True + else: + changed, hook = self.updateHook(self.hookObject, { + 'push_events': options['push_events'], + 'push_events_branch_filter': options['push_events_branch_filter'], + 'issues_events': options['issues_events'], + 'merge_requests_events': options['merge_requests_events'], + 'tag_push_events': options['tag_push_events'], + 'note_events': options['note_events'], + 'job_events': options['job_events'], + 'pipeline_events': options['pipeline_events'], + 'wiki_page_events': options['wiki_page_events'], + 'enable_ssl_verification': options['enable_ssl_verification'], + 'token': options['token']}) + + self.hookObject = hook + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url) + + try: + hook.save() + except Exception as e: + self._module.fail_json(msg="Failed to update hook: %s " % e) + return True + else: + return False + + ''' + @param project Project Object + @param arguments Attributes of the hook + ''' + def createHook(self, project, arguments): + if self._module.check_mode: + return True + + hook = project.hooks.create(arguments) + + return hook + + ''' + @param hook Hook Object + @param arguments Attributes of the hook + ''' + def updateHook(self, hook, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(hook, arg_key) != arguments[arg_key]: + setattr(hook, arg_key, arguments[arg_key]) + changed = True + + return (changed, hook) + + ''' + @param project Project object + @param hook_url Url to call on event + ''' + def findHook(self, project, hook_url): + hooks = project.hooks.list() + for hook in hooks: + if (hook.url == hook_url): + return hook + + ''' + @param project Project object + @param hook_url Url to call on event + ''' + def existsHook(self, project, hook_url): + # When project exists, object will be stored in self.projectObject. + hook = self.findHook(project, hook_url) + if hook: + self.hookObject = hook + return True + return False + + def deleteHook(self): + if self._module.check_mode: + return True + + return self.hookObject.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_token=dict(type='str', no_log=True), + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + hook_url=dict(type='str', required=True), + push_events=dict(type='bool', default=True), + push_events_branch_filter=dict(type='str', default=''), + issues_events=dict(type='bool', default=False), + merge_requests_events=dict(type='bool', default=False), + tag_push_events=dict(type='bool', default=False), + note_events=dict(type='bool', default=False), + job_events=dict(type='bool', default=False), + pipeline_events=dict(type='bool', default=False), + wiki_page_events=dict(type='bool', default=False), + hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), + token=dict(type='str', no_log=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True, + ) + + state = module.params['state'] + project_identifier = module.params['project'] + hook_url = module.params['hook_url'] + push_events = module.params['push_events'] + push_events_branch_filter = module.params['push_events_branch_filter'] + issues_events = module.params['issues_events'] + merge_requests_events = module.params['merge_requests_events'] + tag_push_events = module.params['tag_push_events'] + note_events = module.params['note_events'] + job_events = module.params['job_events'] + pipeline_events = module.params['pipeline_events'] + wiki_page_events = module.params['wiki_page_events'] + enable_ssl_verification = module.params['hook_validate_certs'] + hook_token = module.params['token'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_instance = gitlabAuthentication(module) + + gitlab_hook = GitLabHook(module, gitlab_instance) + + project = findProject(gitlab_instance, project_identifier) + + if project is None: + module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier) + + hook_exists = gitlab_hook.existsHook(project, hook_url) + + if state == 'absent': + if hook_exists: + gitlab_hook.deleteHook() + module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url) + else: + module.exit_json(changed=False, msg="Hook deleted or does not exists") + + if state == 'present': + if gitlab_hook.createOrUpdateHook(project, hook_url, { + "push_events": push_events, + "push_events_branch_filter": push_events_branch_filter, + "issues_events": issues_events, + "merge_requests_events": merge_requests_events, + "tag_push_events": tag_push_events, + "note_events": note_events, + "job_events": job_events, + "pipeline_events": pipeline_events, + "wiki_page_events": wiki_page_events, + "enable_ssl_verification": enable_ssl_verification, + "token": hook_token}): + + module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs) + else: + module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_hooks.py b/plugins/modules/source_control/gitlab/gitlab_hooks.py new file mode 120000 index 0000000000..bb7ccd6db1 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_hooks.py @@ -0,0 +1 @@ +gitlab_hook.py \ No newline at end of file diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py new file mode 100644 index 0000000000..d01a37d726 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gitlab_project +short_description: Creates/updates/deletes GitLab Projects +description: + - When the project does not exist in GitLab, it will be created. + - When the project does exists and state=absent, the project will be deleted. + - When changes are made to the project, the project will be updated. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: +- community.general.auth_basic + +options: + api_token: + description: + - GitLab token for logging in. + type: str + group: + description: + - Id or The full path of the group of which this projects belongs to. + type: str + name: + description: + - The name of the project + required: true + type: str + path: + description: + - The path of the project you want to create, this will be server_url//path. + - If not supplied, name will be used. + type: str + description: + description: + - An description for the project. + type: str + issues_enabled: + description: + - Whether you want to create issues or not. + - Possible values are true and false. + type: bool + default: yes + merge_requests_enabled: + description: + - If merge requests can be made or not. + - Possible values are true and false. + type: bool + default: yes + wiki_enabled: + description: + - If an wiki for this project should be available or not. + - Possible values are true and false. + type: bool + default: yes + snippets_enabled: + description: + - If creating snippets should be available or not. + - Possible values are true and false. + type: bool + default: yes + visibility: + description: + - Private. Project access must be granted explicitly for each user. + - Internal. The project can be cloned by any logged in user. + - Public. The project can be cloned without any authentication. + default: private + type: str + choices: ["private", "internal", "public"] + aliases: + - visibility_level + import_url: + description: + - Git repository which will be imported into gitlab. + - GitLab server needs read access to this git repository. + required: false + type: str + state: + description: + - create or delete project. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] +''' + +EXAMPLES = ''' +- name: Delete GitLab Project + gitlab_project: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: False + name: my_first_project + state: absent + delegate_to: localhost + +- name: Create GitLab Project in group Ansible + gitlab_project: + api_url: https://gitlab.example.com/ + validate_certs: True + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_project + group: ansible + issues_enabled: False + wiki_enabled: True + snippets_enabled: True + import_url: http://git.example.com/example/lab.git + state: present + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +project: + description: API object + returned: always + type: dict +''' + +import traceback + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication + + +class GitLabProject(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.projectObject = None + + ''' + @param project_name Name of the project + @param namespace Namespace Object (User or Group) + @param options Options of the project + ''' + def createOrUpdateProject(self, project_name, namespace, options): + changed = False + + # Because we have already call userExists in main() + if self.projectObject is None: + project = self.createProject(namespace, { + 'name': project_name, + 'path': options['path'], + 'description': options['description'], + 'issues_enabled': options['issues_enabled'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'wiki_enabled': options['wiki_enabled'], + 'snippets_enabled': options['snippets_enabled'], + 'visibility': options['visibility'], + 'import_url': options['import_url']}) + changed = True + else: + changed, project = self.updateProject(self.projectObject, { + 'name': project_name, + 'description': options['description'], + 'issues_enabled': options['issues_enabled'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'wiki_enabled': options['wiki_enabled'], + 'snippets_enabled': options['snippets_enabled'], + 'visibility': options['visibility']}) + + self.projectObject = project + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name) + + try: + project.save() + except Exception as e: + self._module.fail_json(msg="Failed update project: %s " % e) + return True + else: + return False + + ''' + @param namespace Namespace Object (User or Group) + @param arguments Attributes of the project + ''' + def createProject(self, namespace, arguments): + if self._module.check_mode: + return True + + arguments['namespace_id'] = namespace.id + try: + project = self._gitlab.projects.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create project: %s " % to_native(e)) + + return project + + ''' + @param project Project Object + @param arguments Attributes of the project + ''' + def updateProject(self, project, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(project, arg_key) != arguments[arg_key]: + setattr(project, arg_key, arguments[arg_key]) + changed = True + + return (changed, project) + + def deleteProject(self): + if self._module.check_mode: + return True + + project = self.projectObject + + return project.delete() + + ''' + @param namespace User/Group object + @param name Name of the project + ''' + def existsProject(self, namespace, path): + # When project exists, object will be stored in self.projectObject. + project = findProject(self._gitlab, namespace.full_path + '/' + path) + if project: + self.projectObject = project + return True + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_token=dict(type='str', no_log=True), + group=dict(type='str'), + name=dict(type='str', required=True), + path=dict(type='str'), + description=dict(type='str'), + issues_enabled=dict(type='bool', default=True), + merge_requests_enabled=dict(type='bool', default=True), + wiki_enabled=dict(type='bool', default=True), + snippets_enabled=dict(default=True, type='bool'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), + import_url=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True, + ) + + group_identifier = module.params['group'] + project_name = module.params['name'] + project_path = module.params['path'] + project_description = module.params['description'] + issues_enabled = module.params['issues_enabled'] + merge_requests_enabled = module.params['merge_requests_enabled'] + wiki_enabled = module.params['wiki_enabled'] + snippets_enabled = module.params['snippets_enabled'] + visibility = module.params['visibility'] + import_url = module.params['import_url'] + state = module.params['state'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_instance = gitlabAuthentication(module) + + # Set project_path to project_name if it is empty. + if project_path is None: + project_path = project_name.replace(" ", "_") + + gitlab_project = GitLabProject(module, gitlab_instance) + + if group_identifier: + group = findGroup(gitlab_instance, group_identifier) + if group is None: + module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) + + namespace = gitlab_instance.namespaces.get(group.id) + project_exists = gitlab_project.existsProject(namespace, project_path) + else: + user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0] + namespace = gitlab_instance.namespaces.get(user.id) + project_exists = gitlab_project.existsProject(namespace, project_path) + + if state == 'absent': + if project_exists: + gitlab_project.deleteProject() + module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) + else: + module.exit_json(changed=False, msg="Project deleted or does not exists") + + if state == 'present': + if gitlab_project.createOrUpdateProject(project_name, namespace, { + "path": project_path, + "description": project_description, + "issues_enabled": issues_enabled, + "merge_requests_enabled": merge_requests_enabled, + "wiki_enabled": wiki_enabled, + "snippets_enabled": snippets_enabled, + "visibility": visibility, + "import_url": import_url}): + + module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs) + else: + module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/plugins/modules/source_control/gitlab/gitlab_project_variable.py new file mode 100644 index 0000000000..c99366f347 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_project_variable.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: gitlab_project_variable +short_description: Creates/updates/deletes GitLab Projects Variables +description: + - When a project variable does not exist, it will be created. + - When a project variable does exist, its value will be updated when the values are different. + - Variables which are untouched in the playbook, but are not untouched in the GitLab project, + they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). +author: + - "Markus Bergholz (@markuman)" +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: +- community.general.auth_basic + +options: + state: + description: + - Create or delete project variable. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + api_token: + description: + - GitLab access token with API permissions. + required: true + type: str + project: + description: + - The path and name of the project. + required: true + type: str + purge: + description: + - When set to true, all variables which are not untouched in the task will be deleted. + default: false + type: bool + vars: + description: + - When the list element is a simple key-value pair, masked and protected will be set to false. + - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can + have full control about whether a value should be masked, protected or both. + - Support for protected values requires GitLab >= 9.3. + - Support for masked values requires GitLab >= 11.10. + - A I(value) must be a string or a number. + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. + See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). + default: {} + type: dict +''' + + +EXAMPLES = ''' +- name: Set or update some CI/CD variables + gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: 321cba + +- name: Set or update some CI/CD variables + gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + +- name: Delete one variable + gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + state: absent + vars: + ACCESS_KEY_ID: abc123 +''' + +RETURN = ''' +project_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + updated: + description: A list of variables whose values were changed. + returned: always + type: list + sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.six import string_types +from ansible.module_utils.six import integer_types + + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication + + +class GitlabProjectVariables(object): + + def __init__(self, module, gitlab_instance): + self.repo = gitlab_instance + self.project = self.get_project(module.params['project']) + self._module = module + + def get_project(self, project_name): + return self.repo.projects.get(project_name) + + def list_all_project_variables(self): + return self.project.variables.list() + + def create_variable(self, key, value, masked, protected): + if self._module.check_mode: + return + return self.project.variables.create({"key": key, "value": value, + "masked": masked, "protected": protected}) + + def update_variable(self, key, var, value, masked, protected): + if var.value == value and var.protected == protected and var.masked == masked: + return False + + if self._module.check_mode: + return True + + if var.protected == protected and var.masked == masked: + var.value = value + var.save() + return True + + self.delete_variable(key) + self.create_variable(key, value, masked, protected) + return True + + def delete_variable(self, key): + if self._module.check_mode: + return + return self.project.variables.delete(key) + + +def native_python_main(this_gitlab, purge, var_list, state, module): + + change = False + return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + + gitlab_keys = this_gitlab.list_all_project_variables() + existing_variables = [x.get_id() for x in gitlab_keys] + + for key in var_list: + + if isinstance(var_list[key], string_types) or isinstance(var_list[key], (integer_types, float)): + value = var_list[key] + masked = False + protected = False + elif isinstance(var_list[key], dict): + value = var_list[key].get('value') + masked = var_list[key].get('masked', False) + protected = var_list[key].get('protected', False) + else: + module.fail_json(msg="value must be of type string, integer or dict") + + if key in existing_variables: + index = existing_variables.index(key) + existing_variables[index] = None + + if state == 'present': + single_change = this_gitlab.update_variable(key, + gitlab_keys[index], + value, masked, + protected) + change = single_change or change + if single_change: + return_value['updated'].append(key) + else: + return_value['untouched'].append(key) + + elif state == 'absent': + this_gitlab.delete_variable(key) + change = True + return_value['removed'].append(key) + + elif key not in existing_variables and state == 'present': + this_gitlab.create_variable(key, value, masked, protected) + change = True + return_value['added'].append(key) + + existing_variables = list(filter(None, existing_variables)) + if purge: + for item in existing_variables: + this_gitlab.delete_variable(item) + change = True + return_value['removed'].append(item) + else: + return_value['untouched'].extend(existing_variables) + + return change, return_value + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update( + api_token=dict(type='str', required=True, no_log=True), + project=dict(type='str', required=True), + purge=dict(type='bool', required=False, default=False), + vars=dict(type='dict', required=False, default=dict(), no_log=True), + state=dict(type='str', default="present", choices=["absent", "present"]) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True + ) + + purge = module.params['purge'] + var_list = module.params['vars'] + state = module.params['state'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_instance = gitlabAuthentication(module) + + this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) + + change, return_value = native_python_main(this_gitlab, purge, var_list, state, module) + + module.exit_json(changed=change, project_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py new file mode 100644 index 0000000000..cde8e31d8f --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright: (c) 2018, Samy Coenen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gitlab_runner +short_description: Create, modify and delete GitLab Runners. +description: + - Register, update and delete runners with the GitLab API. + - All operations are performed using the GitLab API v4. + - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html). + - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at + U(https://$GITLAB_URL/profile/personal_access_tokens). + - A valid registration token is required for registering a new runner. + To create shared runners, you need to ask your administrator to give you this token. + It can be found at U(https://$GITLAB_URL/admin/runners/). +notes: + - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required. + - Runners need to have unique descriptions. +author: + - Samy Coenen (@SamyCoenen) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab >= 1.5.0 +extends_documentation_fragment: +- community.general.auth_basic + +options: + api_token: + description: + - Your private token to interact with the GitLab API. + required: True + type: str + description: + description: + - The unique name of the runner. + required: True + type: str + aliases: + - name + state: + description: + - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name. + required: False + default: present + choices: ["present", "absent"] + type: str + registration_token: + description: + - The registration token is used to register new runners. + required: True + type: str + active: + description: + - Define if the runners is immediately active after creation. + required: False + default: yes + type: bool + locked: + description: + - Determines if the runner is locked or not. + required: False + default: False + type: bool + access_level: + description: + - Determines if a runner can pick up jobs from protected branches. + required: False + default: ref_protected + choices: ["ref_protected", "not_protected"] + type: str + maximum_timeout: + description: + - The maximum timeout that a runner has to pick up a specific job. + required: False + default: 3600 + type: int + run_untagged: + description: + - Run untagged jobs or not. + required: False + default: yes + type: bool + tag_list: + description: The tags that apply to the runner. + required: False + default: [] + type: list +''' + +EXAMPLES = ''' +- name: "Register runner" + gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: Docker Machine t1 + state: present + active: True + tag_list: ['docker'] + run_untagged: False + locked: False + +- name: "Delete runner" + gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: absent +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +runner: + description: API object + returned: always + type: dict +''' + +import traceback + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication + +try: + cmp +except NameError: + def cmp(a, b): + return (a > b) - (a < b) + + +class GitLabRunner(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.runnerObject = None + + def createOrUpdateRunner(self, description, options): + changed = False + + # Because we have already call userExists in main() + if self.runnerObject is None: + runner = self.createRunner({ + 'description': description, + 'active': options['active'], + 'token': options['registration_token'], + 'locked': options['locked'], + 'run_untagged': options['run_untagged'], + 'maximum_timeout': options['maximum_timeout'], + 'tag_list': options['tag_list']}) + changed = True + else: + changed, runner = self.updateRunner(self.runnerObject, { + 'active': options['active'], + 'locked': options['locked'], + 'run_untagged': options['run_untagged'], + 'maximum_timeout': options['maximum_timeout'], + 'access_level': options['access_level'], + 'tag_list': options['tag_list']}) + + self.runnerObject = runner + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description) + + try: + runner.save() + except Exception as e: + self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) + return True + else: + return False + + ''' + @param arguments Attributes of the runner + ''' + def createRunner(self, arguments): + if self._module.check_mode: + return True + + try: + runner = self._gitlab.runners.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) + + return runner + + ''' + @param runner Runner object + @param arguments Attributes of the runner + ''' + def updateRunner(self, runner, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if isinstance(arguments[arg_key], list): + list1 = getattr(runner, arg_key) + list1.sort() + list2 = arguments[arg_key] + list2.sort() + if cmp(list1, list2): + setattr(runner, arg_key, arguments[arg_key]) + changed = True + else: + if getattr(runner, arg_key) != arguments[arg_key]: + setattr(runner, arg_key, arguments[arg_key]) + changed = True + + return (changed, runner) + + ''' + @param description Description of the runner + ''' + def findRunner(self, description): + runners = self._gitlab.runners.all(as_list=False) + for runner in runners: + if (runner['description'] == description): + return self._gitlab.runners.get(runner['id']) + + ''' + @param description Description of the runner + ''' + def existsRunner(self, description): + # When runner exists, object will be stored in self.runnerObject. + runner = self.findRunner(description) + + if runner: + self.runnerObject = runner + return True + return False + + def deleteRunner(self): + if self._module.check_mode: + return True + + runner = self.runnerObject + + return runner.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_token=dict(type='str', no_log=True), + description=dict(type='str', required=True, aliases=["name"]), + active=dict(type='bool', default=True), + tag_list=dict(type='list', default=[]), + run_untagged=dict(type='bool', default=True), + locked=dict(type='bool', default=False), + access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), + maximum_timeout=dict(type='int', default=3600), + registration_token=dict(type='str', required=True), + state=dict(type='str', default="present", choices=["absent", "present"]), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'], + ], + supports_check_mode=True, + ) + + state = module.params['state'] + runner_description = module.params['description'] + runner_active = module.params['active'] + tag_list = module.params['tag_list'] + run_untagged = module.params['run_untagged'] + runner_locked = module.params['locked'] + access_level = module.params['access_level'] + maximum_timeout = module.params['maximum_timeout'] + registration_token = module.params['registration_token'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_instance = gitlabAuthentication(module) + + gitlab_runner = GitLabRunner(module, gitlab_instance) + runner_exists = gitlab_runner.existsRunner(runner_description) + + if state == 'absent': + if runner_exists: + gitlab_runner.deleteRunner() + module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) + else: + module.exit_json(changed=False, msg="Runner deleted or does not exists") + + if state == 'present': + if gitlab_runner.createOrUpdateRunner(runner_description, { + "active": runner_active, + "tag_list": tag_list, + "run_untagged": run_untagged, + "locked": runner_locked, + "access_level": access_level, + "maximum_timeout": maximum_timeout, + "registration_token": registration_token}): + module.exit_json(changed=True, runner=gitlab_runner.runnerObject._attrs, + msg="Successfully created or updated the runner %s" % runner_description) + else: + module.exit_json(changed=False, runner=gitlab_runner.runnerObject._attrs, + msg="No need to update the runner %s" % runner_description) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py new file mode 100644 index 0000000000..78d23d22c8 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -0,0 +1,477 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gitlab_user +short_description: Creates/updates/deletes GitLab Users +description: + - When the user does not exist in GitLab, it will be created. + - When the user does exists and state=absent, the user will be deleted. + - When changes are made to user, the user will be updated. +notes: + - From Ansible 2.10 and onwards, name, email and password are optional while deleting the user. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module + - administrator rights on the GitLab server +extends_documentation_fragment: +- community.general.auth_basic + +options: + api_token: + description: + - GitLab token for logging in. + type: str + name: + description: + - Name of the user you want to create. + - Required only if C(state) is set to C(present). + type: str + username: + description: + - The username of the user. + required: true + type: str + password: + description: + - The password of the user. + - GitLab server enforces minimum password length to 8, set this value with 8 or more characters. + - Required only if C(state) is set to C(present). + type: str + email: + description: + - The email that belongs to the user. + - Required only if C(state) is set to C(present). + type: str + sshkey_name: + description: + - The name of the sshkey + type: str + sshkey_file: + description: + - The ssh key itself. + type: str + group: + description: + - Id or Full path of parent group in the form of group/name. + - Add user as an member to this group. + type: str + access_level: + description: + - The access level to the group. One of the following can be used. + - guest + - reporter + - developer + - master (alias for maintainer) + - maintainer + - owner + default: guest + type: str + choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] + state: + description: + - create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + confirm: + description: + - Require confirmation. + type: bool + default: yes + isadmin: + description: + - Grant admin privileges to the user. + type: bool + default: no + external: + description: + - Define external parameter for this user. + type: bool + default: no +''' + +EXAMPLES = ''' +- name: "Delete GitLab User" + gitlab_user: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: False + username: myusername + state: absent + delegate_to: localhost + +- name: "Create GitLab User" + gitlab_user: + api_url: https://gitlab.example.com/ + validate_certs: True + api_username: dj-wasabi + api_password: "MySecretPassword" + name: My Name + username: myusername + password: mysecretpassword + email: me@example.com + sshkey_name: MySSH + sshkey_file: ssh-rsa AAAAB3NzaC1yc... + state: present + group: super_group/mon_group + access_level: owner + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +user: + description: API object + returned: always + type: dict +''' + +import traceback + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication + + +class GitLabUser(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.userObject = None + self.ACCESS_LEVEL = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'master': gitlab.MAINTAINER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + 'owner': gitlab.OWNER_ACCESS} + + ''' + @param username Username of the user + @param options User options + ''' + def createOrUpdateUser(self, username, options): + changed = False + + # Because we have already call userExists in main() + if self.userObject is None: + user = self.createUser({ + 'name': options['name'], + 'username': username, + 'password': options['password'], + 'email': options['email'], + 'skip_confirmation': not options['confirm'], + 'admin': options['isadmin'], + 'external': options['external']}) + changed = True + else: + changed, user = self.updateUser(self.userObject, { + 'name': options['name'], + 'email': options['email'], + 'is_admin': options['isadmin'], + 'external': options['external']}) + + # Assign ssh keys + if options['sshkey_name'] and options['sshkey_file']: + key_changed = self.addSshKeyToUser(user, { + 'name': options['sshkey_name'], + 'file': options['sshkey_file']}) + changed = changed or key_changed + + # Assign group + if options['group_path']: + group_changed = self.assignUserToGroup(user, options['group_path'], options['access_level']) + changed = changed or group_changed + + self.userObject = user + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username) + + try: + user.save() + except Exception as e: + self._module.fail_json(msg="Failed to update user: %s " % to_native(e)) + return True + else: + return False + + ''' + @param group User object + ''' + def getUserId(self, user): + if user is not None: + return user.id + return None + + ''' + @param user User object + @param sshkey_name Name of the ssh key + ''' + def sshKeyExists(self, user, sshkey_name): + keyList = map(lambda k: k.title, user.keys.list()) + + return sshkey_name in keyList + + ''' + @param user User object + @param sshkey Dict containing sshkey infos {"name": "", "file": ""} + ''' + def addSshKeyToUser(self, user, sshkey): + if not self.sshKeyExists(user, sshkey['name']): + if self._module.check_mode: + return True + + try: + user.keys.create({ + 'title': sshkey['name'], + 'key': sshkey['file']}) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e)) + return True + return False + + ''' + @param group Group object + @param user_id Id of the user to find + ''' + def findMember(self, group, user_id): + try: + member = group.members.get(user_id) + except gitlab.exceptions.GitlabGetError: + return None + return member + + ''' + @param group Group object + @param user_id Id of the user to check + ''' + def memberExists(self, group, user_id): + member = self.findMember(group, user_id) + + return member is not None + + ''' + @param group Group object + @param user_id Id of the user to check + @param access_level GitLab access_level to check + ''' + def memberAsGoodAccessLevel(self, group, user_id, access_level): + member = self.findMember(group, user_id) + + return member.access_level == access_level + + ''' + @param user User object + @param group_path Complete path of the Group including parent group path. / + @param access_level GitLab access_level to assign + ''' + def assignUserToGroup(self, user, group_identifier, access_level): + group = findGroup(self._gitlab, group_identifier) + + if self._module.check_mode: + return True + + if group is None: + return False + + if self.memberExists(group, self.getUserId(user)): + member = self.findMember(group, self.getUserId(user)) + if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]): + member.access_level = self.ACCESS_LEVEL[access_level] + member.save() + return True + else: + try: + group.members.create({ + 'user_id': self.getUserId(user), + 'access_level': self.ACCESS_LEVEL[access_level]}) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e)) + return True + return False + + ''' + @param user User object + @param arguments User attributes + ''' + def updateUser(self, user, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(user, arg_key) != arguments[arg_key]: + setattr(user, arg_key, arguments[arg_key]) + changed = True + + return (changed, user) + + ''' + @param arguments User attributes + ''' + def createUser(self, arguments): + if self._module.check_mode: + return True + + try: + user = self._gitlab.users.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create user: %s " % to_native(e)) + + return user + + ''' + @param username Username of the user + ''' + def findUser(self, username): + users = self._gitlab.users.list(search=username) + for user in users: + if (user.username == username): + return user + + ''' + @param username Username of the user + ''' + def existsUser(self, username): + # When user exists, object will be stored in self.userObject. + user = self.findUser(username) + if user: + self.userObject = user + return True + return False + + def deleteUser(self): + if self._module.check_mode: + return True + + user = self.userObject + + return user.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_token=dict(type='str', no_log=True), + name=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + username=dict(type='str', required=True), + password=dict(type='str', no_log=True), + email=dict(type='str'), + sshkey_name=dict(type='str'), + sshkey_file=dict(type='str'), + group=dict(type='str'), + access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), + confirm=dict(type='bool', default=True), + isadmin=dict(type='bool', default=False), + external=dict(type='bool', default=False), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True, + required_if=( + ('state', 'present', ['name', 'email', 'password']), + ) + ) + + user_name = module.params['name'] + state = module.params['state'] + user_username = module.params['username'].lower() + user_password = module.params['password'] + user_email = module.params['email'] + user_sshkey_name = module.params['sshkey_name'] + user_sshkey_file = module.params['sshkey_file'] + group_path = module.params['group'] + access_level = module.params['access_level'] + confirm = module.params['confirm'] + user_isadmin = module.params['isadmin'] + user_external = module.params['external'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_instance = gitlabAuthentication(module) + + gitlab_user = GitLabUser(module, gitlab_instance) + user_exists = gitlab_user.existsUser(user_username) + + if state == 'absent': + if user_exists: + gitlab_user.deleteUser() + module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username) + else: + module.exit_json(changed=False, msg="User deleted or does not exists") + + if state == 'present': + if gitlab_user.createOrUpdateUser(user_username, { + "name": user_name, + "password": user_password, + "email": user_email, + "sshkey_name": user_sshkey_name, + "sshkey_file": user_sshkey_file, + "group_path": group_path, + "access_level": access_level, + "confirm": confirm, + "isadmin": user_isadmin, + "external": user_external}): + module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs) + else: + module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/hg.py b/plugins/modules/source_control/hg.py new file mode 100644 index 0000000000..68ea099738 --- /dev/null +++ b/plugins/modules/source_control/hg.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Yeukhon Wong +# Copyright: (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: hg +short_description: Manages Mercurial (hg) repositories +description: + - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. +author: "Yeukhon Wong (@yeukhon)" +options: + repo: + description: + - The repository address. + required: yes + aliases: [ name ] + dest: + description: + - Absolute path of where the repository should be cloned to. + This parameter is required, unless clone and update are set to no + required: yes + revision: + description: + - Equivalent C(-r) option in hg command which could be the changeset, revision number, + branch name or even tag. + aliases: [ version ] + force: + description: + - Discards uncommitted changes. Runs C(hg update -C). Prior to + 1.9, the default was `yes`. + type: bool + default: 'no' + purge: + description: + - Deletes untracked files. Runs C(hg purge). + type: bool + default: 'no' + update: + description: + - If C(no), do not retrieve new revisions from the origin repository + type: bool + default: 'yes' + clone: + description: + - If C(no), do not clone the repository if it does not exist locally. + type: bool + default: 'yes' + executable: + description: + - Path to hg executable to use. If not supplied, + the normal mechanism for resolving binary paths will be used. +notes: + - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). + - "If the task seems to be hanging, first verify remote host is in C(known_hosts). + SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, + one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling + the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." + - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, + if the underlying system still uses a Python version below 2.7.9, you will have issues checking out + bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). +''' + +EXAMPLES = ''' +- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. + hg: + repo: https://bitbucket.org/user/repo1 + dest: /home/user/repo1 + revision: stable + purge: yes + +- name: Get information about the repository whether or not it has already been cloned locally. + hg: + repo: git://bitbucket.org/user/repo + dest: /srv/checkout + clone: no + update: no +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class Hg(object): + def __init__(self, module, dest, repo, revision, hg_path): + self.module = module + self.dest = dest + self.repo = repo + self.revision = revision + self.hg_path = hg_path + + def _command(self, args_list): + (rc, out, err) = self.module.run_command([self.hg_path] + args_list) + return (rc, out, err) + + def _list_untracked(self): + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] + return self._command(args) + + def get_revision(self): + """ + hg id -b -i -t returns a string in the format: + "[+] " + This format lists the state of the current working copy, + and indicates whether there are uncommitted changes by the + plus sign. Otherwise, the sign is omitted. + + Read the full description via hg id --help + """ + (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + else: + return to_native(out).strip('\n') + + def get_remote_revision(self): + (rc, out, err) = self._command(['id', self.repo]) + if rc != 0: + self.module.fail_json(msg=err) + else: + return to_native(out).strip('\n') + + def has_local_mods(self): + now = self.get_revision() + if '+' in now: + return True + else: + return False + + def discard(self): + before = self.has_local_mods() + if not before: + return False + + args = ['update', '-C', '-R', self.dest, '-r', '.'] + (rc, out, err) = self._command(args) + if rc != 0: + self.module.fail_json(msg=err) + + after = self.has_local_mods() + if before != after and not after: # no more local modification + return True + + def purge(self): + # before purge, find out if there are any untracked files + (rc1, out1, err1) = self._list_untracked() + if rc1 != 0: + self.module.fail_json(msg=err1) + + # there are some untrackd files + if out1 != '': + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] + (rc2, out2, err2) = self._command(args) + if rc2 != 0: + self.module.fail_json(msg=err2) + return True + else: + return False + + def cleanup(self, force, purge): + discarded = False + purged = False + + if force: + discarded = self.discard() + if purge: + purged = self.purge() + if discarded or purged: + return True + else: + return False + + def pull(self): + return self._command( + ['pull', '-R', self.dest, self.repo]) + + def update(self): + if self.revision is not None: + return self._command(['update', '-r', self.revision, '-R', self.dest]) + return self._command(['update', '-R', self.dest]) + + def clone(self): + if self.revision is not None: + return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + return self._command(['clone', self.repo, self.dest]) + + @property + def at_revision(self): + """ + There is no point in pulling from a potentially down/slow remote site + if the desired changeset is already the current changeset. + """ + if self.revision is None or len(self.revision) < 7: + # Assume it's a rev number, tag, or branch + return False + (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + if out.startswith(self.revision): + return True + return False + + +# =========================================== + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(type='str', required=True, aliases=['name']), + dest=dict(type='path'), + revision=dict(type='str', default=None, aliases=['version']), + force=dict(type='bool', default=False), + purge=dict(type='bool', default=False), + update=dict(type='bool', default=True), + clone=dict(type='bool', default=True), + executable=dict(type='str', default=None), + ), + ) + repo = module.params['repo'] + dest = module.params['dest'] + revision = module.params['revision'] + force = module.params['force'] + purge = module.params['purge'] + update = module.params['update'] + clone = module.params['clone'] + hg_path = module.params['executable'] or module.get_bin_path('hg', True) + if dest is not None: + hgrc = os.path.join(dest, '.hg/hgrc') + + # initial states + before = '' + changed = False + cleaned = False + + if not dest and (clone or update): + module.fail_json(msg="the destination directory must be specified unless clone=no and update=no") + + hg = Hg(module, dest, repo, revision, hg_path) + + # If there is no hgrc file, then assume repo is absent + # and perform clone. Otherwise, perform pull and update. + if not clone and not update: + out = hg.get_remote_revision() + module.exit_json(after=out, changed=False) + if not os.path.exists(hgrc): + if clone: + (rc, out, err) = hg.clone() + if rc != 0: + module.fail_json(msg=err) + else: + module.exit_json(changed=False) + elif not update: + # Just return having found a repo already in the dest path + before = hg.get_revision() + elif hg.at_revision: + # no update needed, don't pull + before = hg.get_revision() + + # but force and purge if desired + cleaned = hg.cleanup(force, purge) + else: + # get the current state before doing pulling + before = hg.get_revision() + + # can perform force and purge + cleaned = hg.cleanup(force, purge) + + (rc, out, err) = hg.pull() + if rc != 0: + module.fail_json(msg=err) + + (rc, out, err) = hg.update() + if rc != 0: + module.fail_json(msg=err) + + after = hg.get_revision() + if before != after or cleaned: + changed = True + + module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/storage/emc/emc_vnx_sg_member.py new file mode 100644 index 0000000000..75004a0615 --- /dev/null +++ b/plugins/modules/storage/emc/emc_vnx_sg_member.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: emc_vnx_sg_member + +short_description: Manage storage group member on EMC VNX + + +description: + - "This module manages the members of an existing storage group." + +extends_documentation_fragment: +- community.general.emc.emc_vnx + + +options: + name: + description: + - Name of the Storage group to manage. + required: true + lunid: + description: + - Lun id to be added. + required: true + state: + description: + - Indicates the desired lunid state. + - C(present) ensures specified lunid is present in the Storage Group. + - C(absent) ensures specified lunid is absent from Storage Group. + default: present + choices: [ "present", "absent"] + + +author: + - Luca 'remix_tj' Lorenzetto (@remixtj) +''' + +EXAMPLES = ''' +- name: Add lun to storage group + emc_vnx_sg_member: + name: sg01 + sp_address: sp1a.fqdn + sp_user: sysadmin + sp_password: sysadmin + lunid: 100 + state: present + +- name: Remove lun from storage group + emc_vnx_sg_member: + name: sg01 + sp_address: sp1a.fqdn + sp_user: sysadmin + sp_password: sysadmin + lunid: 100 + state: absent +''' + +RETURN = ''' +hluid: + description: LUNID that hosts attached to the storage group will see. + type: int + returned: success +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec + +LIB_IMP_ERR = None +try: + from storops import VNXSystem + from storops.exception import VNXCredentialError, VNXStorageGroupError, \ + VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError + HAS_LIB = True +except Exception: + LIB_IMP_ERR = traceback.format_exc() + HAS_LIB = False + + +def run_module(): + module_args = dict( + name=dict(type='str', required=True), + lunid=dict(type='int', required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + + module_args.update(emc_vnx_argument_spec) + + result = dict( + changed=False, + hluid=None + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + if not HAS_LIB: + module.fail_json(msg=missing_required_lib('storops >= 0.5.10'), + exception=LIB_IMP_ERR) + + sp_user = module.params['sp_user'] + sp_address = module.params['sp_address'] + sp_password = module.params['sp_password'] + alu = module.params['lunid'] + + # if the user is working with this module in only check mode we do not + # want to make any changes to the environment, just return the current + # state with no modifications + if module.check_mode: + return result + + try: + vnx = VNXSystem(sp_address, sp_user, sp_password) + sg = vnx.get_sg(module.params['name']) + if sg.existed: + if module.params['state'] == 'present': + if not sg.has_alu(alu): + try: + result['hluid'] = sg.attach_alu(alu) + result['changed'] = True + except VNXAluAlreadyAttachedError: + result['hluid'] = sg.get_hlu(alu) + except (VNXAttachAluError, VNXStorageGroupError) as e: + module.fail_json(msg='Error attaching {0}: ' + '{1} '.format(alu, to_native(e)), + **result) + else: + result['hluid'] = sg.get_hlu(alu) + if module.params['state'] == 'absent' and sg.has_alu(alu): + try: + sg.detach_alu(alu) + result['changed'] = True + except VNXDetachAluNotFoundError: + # being not attached when using absent is OK + pass + except VNXStorageGroupError as e: + module.fail_json(msg='Error detaching alu {0}: ' + '{1} '.format(alu, to_native(e)), + **result) + else: + module.fail_json(msg='No such storage group named ' + '{0}'.format(module.params['name']), + **result) + except VNXCredentialError as e: + module.fail_json(msg='{0}'.format(to_native(e)), **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/glusterfs/gluster_heal_facts.py b/plugins/modules/storage/glusterfs/gluster_heal_facts.py new file mode 120000 index 0000000000..e5a7565c18 --- /dev/null +++ b/plugins/modules/storage/glusterfs/gluster_heal_facts.py @@ -0,0 +1 @@ +gluster_heal_info.py \ No newline at end of file diff --git a/plugins/modules/storage/glusterfs/gluster_heal_info.py b/plugins/modules/storage/glusterfs/gluster_heal_info.py new file mode 100644 index 0000000000..73e25c0ff2 --- /dev/null +++ b/plugins/modules/storage/glusterfs/gluster_heal_info.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2016, Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gluster_heal_info +short_description: Gather information on self-heal or rebalance status +author: "Devyani Kota (@devyanikota)" +description: + - Gather facts about either self-heal or rebalance status. + - This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(gluster_heal_info) module no longer returns C(ansible_facts)! +options: + name: + description: + - The volume name. + required: true + aliases: ['volume'] + status_filter: + default: "self-heal" + choices: ["self-heal", "rebalance"] + description: + - Determines which facts are to be returned. + - If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned. + - If the C(status_filter) is C(rebalance), rebalance status is returned. +requirements: + - GlusterFS > 3.2 +''' + +EXAMPLES = ''' +- name: Gather self-heal facts about all gluster hosts in the cluster + gluster_heal_info: + name: test_volume + status_filter: self-heal + register: self_heal_status +- debug: + var: self_heal_status + +- name: Gather rebalance facts about all gluster hosts in the cluster + gluster_heal_info: + name: test_volume + status_filter: rebalance + register: rebalance_status +- debug: + var: rebalance_status +''' + +RETURN = ''' +name: + description: GlusterFS volume name + returned: always + type: str +status_filter: + description: Whether self-heal or rebalance status is to be returned + returned: always + type: str +heal_info: + description: List of files that still need healing process + returned: On success + type: list +rebalance_status: + description: Status of rebalance operation + returned: On success + type: list +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from distutils.version import LooseVersion + +glusterbin = '' + + +def run_gluster(gargs, **kwargs): + global glusterbin + global module + args = [glusterbin, '--mode=script'] + args.extend(gargs) + try: + rc, out, err = module.run_command(args, **kwargs) + if rc != 0: + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % + (' '.join(args), rc, out or err), exception=traceback.format_exc()) + except Exception as e: + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), + to_native(e)), exception=traceback.format_exc()) + return out + + +def get_self_heal_status(name): + out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')) + raw_out = out.split("\n") + heal_info = [] + # return files that still need healing. + for line in raw_out: + if 'Brick' in line: + br_dict = {} + br_dict['brick'] = line.strip().strip("Brick") + elif 'Status' in line: + br_dict['status'] = line.split(":")[1].strip() + elif 'Number' in line: + br_dict['no_of_entries'] = line.split(":")[1].strip() + elif line.startswith('/') or line.startswith('<') or '\n' in line: + continue + else: + br_dict and heal_info.append(br_dict) + br_dict = {} + return heal_info + + +def get_rebalance_status(name): + out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')) + raw_out = out.split("\n") + rebalance_status = [] + # return the files that are either still 'in progress' state or 'completed'. + for line in raw_out: + line = " ".join(line.split()) + line_vals = line.split(" ") + if line_vals[0].startswith('-') or line_vals[0].startswith('Node'): + continue + node_dict = {} + if len(line_vals) == 1 or len(line_vals) == 4: + continue + node_dict['node'] = line_vals[0] + node_dict['rebalanced_files'] = line_vals[1] + node_dict['failures'] = line_vals[4] + if 'in progress' in line: + node_dict['status'] = line_vals[5] + line_vals[6] + rebalance_status.append(node_dict) + elif 'completed' in line: + node_dict['status'] = line_vals[5] + rebalance_status.append(node_dict) + return rebalance_status + + +def is_invalid_gluster_version(module, required_version): + cmd = module.get_bin_path('gluster', True) + ' --version' + result = module.run_command(cmd) + ver_line = result[1].split('\n')[0] + version = ver_line.split(' ')[1] + # If the installed version is less than 3.2, it is an invalid version + # return True + return LooseVersion(version) < LooseVersion(required_version) + + +def main(): + global module + global glusterbin + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['volume']), + status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']), + ), + ) + is_old_facts = module._name == 'gluster_heal_facts' + if is_old_facts: + module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', " + "and the renamed one no longer returns ansible_facts", version='2.13') + + glusterbin = module.get_bin_path('gluster', True) + required_version = "3.2" + status_filter = module.params['status_filter'] + volume_name = module.params['name'] + heal_info = '' + rebalance_status = '' + + # Verify if required GlusterFS version is installed + if is_invalid_gluster_version(module, required_version): + module.fail_json(msg="GlusterFS version > %s is required" % + required_version) + + try: + if status_filter == "self-heal": + heal_info = get_self_heal_status(volume_name) + elif status_filter == "rebalance": + rebalance_status = get_rebalance_status(volume_name) + except Exception as e: + module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc()) + + facts = {} + facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status} + + if is_old_facts: + module.exit_json(ansible_facts=facts) + else: + module.exit_json(**facts) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/glusterfs/gluster_peer.py b/plugins/modules/storage/glusterfs/gluster_peer.py new file mode 100644 index 0000000000..34bb3fbc4c --- /dev/null +++ b/plugins/modules/storage/glusterfs/gluster_peer.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 Nandaja Varma +# Copyright 2018 Red Hat, Inc. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: gluster_peer +short_description: Attach/Detach peers to/from the cluster +description: + - Create or diminish a GlusterFS trusted storage pool. A set of nodes can be + added into an existing trusted storage pool or a new storage pool can be + formed. Or, nodes can be removed from an existing trusted storage pool. +author: Sachidananda Urs (@sac) +options: + state: + choices: ["present", "absent"] + default: "present" + description: + - Determines whether the nodes should be attached to the pool or + removed from the pool. If the state is present, nodes will be + attached to the pool. If state is absent, nodes will be detached + from the pool. + required: true + nodes: + description: + - List of nodes that have to be probed into the pool. + required: true + force: + type: bool + default: "false" + description: + - Applicable only while removing the nodes from the pool. gluster + will refuse to detach a node from the pool if any one of the node + is down, in such cases force can be used. +requirements: + - GlusterFS > 3.2 +notes: + - This module does not support check mode. +''' + +EXAMPLES = ''' +- name: Create a trusted storage pool + gluster_peer: + state: present + nodes: + - 10.0.1.5 + - 10.0.1.10 + +- name: Delete a node from the trusted storage pool + gluster_peer: + state: absent + nodes: + - 10.0.1.10 + +- name: Delete a node from the trusted storage pool by force + gluster_peer: + state: absent + nodes: + - 10.0.0.1 + force: true +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from distutils.version import LooseVersion + + +class Peer(object): + def __init__(self, module): + self.module = module + self.state = self.module.params['state'] + self.nodes = self.module.params['nodes'] + self.glustercmd = self.module.get_bin_path('gluster', True) + self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + self.action = '' + self.force = '' + + def gluster_peer_ops(self): + if not self.nodes: + self.module.fail_json(msg="nodes list cannot be empty") + self.force = 'force' if self.module.params.get('force') else '' + if self.state == 'present': + self.nodes = self.get_to_be_probed_hosts(self.nodes) + self.action = 'probe' + # In case of peer probe, we do not need `force' + self.force = '' + else: + self.action = 'detach' + self.call_peer_commands() + + def get_to_be_probed_hosts(self, hosts): + peercmd = [self.glustercmd, 'pool', 'list', '--mode=script'] + rc, output, err = self.module.run_command(peercmd, + environ_update=self.lang) + peers_in_cluster = [line.split('\t')[1].strip() for + line in filter(None, output.split('\n')[1:])] + try: + peers_in_cluster.remove('localhost') + except ValueError: + # It is ok not to have localhost in list + pass + hosts_to_be_probed = [host for host in hosts if host not in + peers_in_cluster] + return hosts_to_be_probed + + def call_peer_commands(self): + result = {} + result['msg'] = '' + result['changed'] = False + + for node in self.nodes: + peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script'] + if self.force: + peercmd.append(self.force) + rc, out, err = self.module.run_command(peercmd, + environ_update=self.lang) + if rc: + result['rc'] = rc + result['msg'] = err + # Fail early, do not wait for the loop to finish + self.module.fail_json(**result) + else: + if 'already in peer' in out or \ + 'localhost not needed' in out: + result['changed'] |= False + else: + result['changed'] = True + self.module.exit_json(**result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + force=dict(type='bool', required=False), + nodes=dict(type='list', required=True), + state=dict(type='str', choices=['absent', 'present'], + default='present'), + ), + supports_check_mode=False + ) + pops = Peer(module) + required_version = "3.2" + # Verify if required GlusterFS version is installed + if is_invalid_gluster_version(module, required_version): + module.fail_json(msg="GlusterFS version > %s is required" % + required_version) + pops.gluster_peer_ops() + + +def is_invalid_gluster_version(module, required_version): + cmd = module.get_bin_path('gluster', True) + ' --version' + result = module.run_command(cmd) + ver_line = result[1].split('\n')[0] + version = ver_line.split(' ')[1] + # If the installed version is less than 3.2, it is an invalid version + # return True + return LooseVersion(version) < LooseVersion(required_version) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/storage/glusterfs/gluster_volume.py b/plugins/modules/storage/glusterfs/gluster_volume.py new file mode 100644 index 0000000000..a8bbccfc31 --- /dev/null +++ b/plugins/modules/storage/glusterfs/gluster_volume.py @@ -0,0 +1,607 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Taneli Leppä +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: gluster_volume +short_description: Manage GlusterFS volumes +description: + - Create, remove, start, stop and tune GlusterFS volumes +options: + name: + description: + - The volume name. + required: true + aliases: ['volume'] + state: + description: + - Use present/absent ensure if a volume exists or not. + Use started/stopped to control its availability. + required: true + choices: ['absent', 'present', 'started', 'stopped'] + cluster: + description: + - List of hosts to use for probing and brick setup. + host: + description: + - Override local hostname (for peer probing purposes). + replicas: + description: + - Replica count for volume. + arbiters: + description: + - Arbiter count for volume. + stripes: + description: + - Stripe count for volume. + disperses: + description: + - Disperse count for volume. + redundancies: + description: + - Redundancy count for volume. + transport: + description: + - Transport type for volume. + default: tcp + choices: [ tcp, rdma, 'tcp,rdma' ] + bricks: + description: + - Brick paths on servers. Multiple brick paths can be separated by commas. + aliases: [ brick ] + start_on_create: + description: + - Controls whether the volume is started after creation or not. + type: bool + default: 'yes' + rebalance: + description: + - Controls whether the cluster is rebalanced after changes. + type: bool + default: 'no' + directory: + description: + - Directory for limit-usage. + options: + description: + - A dictionary/hash with options/settings for the volume. + quota: + description: + - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list). + force: + description: + - If brick is being created in the root partition, module will fail. + Set force to true to override this behaviour. + type: bool +notes: + - Requires cli tools for GlusterFS on servers. + - Will add new bricks, but not remove them. +author: +- Taneli Leppä (@rosmo) +''' + +EXAMPLES = """ +- name: create gluster volume + gluster_volume: + state: present + name: test1 + bricks: /bricks/brick1/g1 + rebalance: yes + cluster: + - 192.0.2.10 + - 192.0.2.11 + run_once: true + +- name: tune + gluster_volume: + state: present + name: test1 + options: + performance.cache-size: 256MB + +- name: Set multiple options on GlusterFS volume + gluster_volume: + state: present + name: test1 + options: + { performance.cache-size: 128MB, + write-behind: 'off', + quick-read: 'on' + } + +- name: start gluster volume + gluster_volume: + state: started + name: test1 + +- name: limit usage + gluster_volume: + state: present + name: test1 + directory: /foo + quota: 20.0MB + +- name: stop gluster volume + gluster_volume: + state: stopped + name: test1 + +- name: remove gluster volume + gluster_volume: + state: absent + name: test1 + +- name: create gluster volume with multiple bricks + gluster_volume: + state: present + name: test2 + bricks: /bricks/brick1/g2,/bricks/brick2/g2 + cluster: + - 192.0.2.10 + - 192.0.2.11 + run_once: true + +- name: Remove the bricks from gluster volume + gluster_volume: + state: present + name: testvol + bricks: /bricks/brick1/b1,/bricks/brick2/b2 + cluster: + - 10.70.42.85 + force: true + run_once: true + +- name: Reduce cluster configuration + gluster_volume: + state: present + name: testvol + bricks: /bricks/brick3/b1,/bricks/brick4/b2 + replicas: 2 + cluster: + - 10.70.42.85 + force: true + run_once: true +""" + +import re +import socket +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +glusterbin = '' + + +def run_gluster(gargs, **kwargs): + global glusterbin + global module + args = [glusterbin, '--mode=script'] + args.extend(gargs) + try: + rc, out, err = module.run_command(args, **kwargs) + if rc != 0: + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % + (' '.join(args), rc, out or err), exception=traceback.format_exc()) + except Exception as e: + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), + to_native(e)), exception=traceback.format_exc()) + return out + + +def run_gluster_nofail(gargs, **kwargs): + global glusterbin + global module + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, **kwargs) + if rc != 0: + return None + return out + + +def get_peers(): + out = run_gluster(['peer', 'status']) + peers = {} + hostname = None + uuid = None + state = None + shortNames = False + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'hostname': + hostname = value + shortNames = False + if key.lower() == 'uuid': + uuid = value + if key.lower() == 'state': + state = value + peers[hostname] = [uuid, state] + elif row.lower() == 'other names:': + shortNames = True + elif row != '' and shortNames is True: + peers[row] = [uuid, state] + elif row == '': + shortNames = False + return peers + + +def get_volumes(): + out = run_gluster(['volume', 'info']) + + volumes = {} + volume = {} + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'volume name': + volume['name'] = value + volume['options'] = {} + volume['quota'] = False + if key.lower() == 'volume id': + volume['id'] = value + if key.lower() == 'status': + volume['status'] = value + if key.lower() == 'transport-type': + volume['transport'] = value + if value.lower().endswith(' (arbiter)'): + if 'arbiters' not in volume: + volume['arbiters'] = [] + value = value[:-10] + volume['arbiters'].append(value) + elif key.lower() == 'number of bricks': + volume['replicas'] = value[-1:] + if key.lower() != 'bricks' and key.lower()[:5] == 'brick': + if 'bricks' not in volume: + volume['bricks'] = [] + volume['bricks'].append(value) + # Volume options + if '.' in key: + if 'options' not in volume: + volume['options'] = {} + volume['options'][key] = value + if key == 'features.quota' and value == 'on': + volume['quota'] = True + else: + if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': + if len(volume) > 0: + volumes[volume['name']] = volume + volume = {} + return volumes + + +def get_quotas(name, nofail): + quotas = {} + if nofail: + out = run_gluster_nofail(['volume', 'quota', name, 'list']) + if not out: + return quotas + else: + out = run_gluster(['volume', 'quota', name, 'list']) + for row in out.split('\n'): + if row[:1] == '/': + q = re.split(r'\s+', row) + quotas[q[0]] = q[1] + return quotas + + +def wait_for_peer(host): + for x in range(0, 4): + peers = get_peers() + if host in peers and peers[host][1].lower().find('peer in cluster') != -1: + return True + time.sleep(1) + return False + + +def probe(host, myhostname): + global module + out = run_gluster(['peer', 'probe', host]) + if out.find('localhost') == -1 and not wait_for_peer(host): + module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname)) + + +def probe_all_peers(hosts, peers, myhostname): + for host in hosts: + host = host.strip() # Clean up any extra space for exact comparison + if host not in peers: + probe(host, myhostname) + + +def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force): + args = ['volume', 'create'] + args.append(name) + if stripe: + args.append('stripe') + args.append(str(stripe)) + if replica: + args.append('replica') + args.append(str(replica)) + if arbiter: + args.append('arbiter') + args.append(str(arbiter)) + if disperse: + args.append('disperse') + args.append(str(disperse)) + if redundancy: + args.append('redundancy') + args.append(str(redundancy)) + args.append('transport') + args.append(transport) + for brick in bricks: + for host in hosts: + args.append(('%s:%s' % (host, brick))) + if force: + args.append('force') + run_gluster(args) + + +def start_volume(name): + run_gluster(['volume', 'start', name]) + + +def stop_volume(name): + run_gluster(['volume', 'stop', name]) + + +def set_volume_option(name, option, parameter): + run_gluster(['volume', 'set', name, option, parameter]) + + +def add_bricks(name, new_bricks, stripe, replica, force): + args = ['volume', 'add-brick', name] + if stripe: + args.append('stripe') + args.append(str(stripe)) + if replica: + args.append('replica') + args.append(str(replica)) + args.extend(new_bricks) + if force: + args.append('force') + run_gluster(args) + + +def remove_bricks(name, removed_bricks, force): + # max-tries=12 with default_interval=10 secs + max_tries = 12 + retries = 0 + success = False + args = ['volume', 'remove-brick', name] + args.extend(removed_bricks) + # create a copy of args to use for commit operation + args_c = args[:] + args.append('start') + run_gluster(args) + # remove-brick operation needs to be followed by commit operation. + if not force: + module.fail_json(msg="Force option is mandatory.") + else: + while retries < max_tries: + last_brick = removed_bricks[-1] + out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status']) + for row in out.split('\n')[1:]: + if 'completed' in row: + # remove-brick successful, call commit operation. + args_c.append('commit') + out = run_gluster(args_c) + success = True + break + else: + time.sleep(10) + if success: + break + retries += 1 + if not success: + # remove-brick still in process, needs to be committed after completion. + module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n" + "Commit operation needs to be followed.") + + +def reduce_config(name, removed_bricks, replicas, force): + out = run_gluster(['volume', 'heal', name, 'info']) + summary = out.split("\n") + for line in summary: + if 'Number' in line and int(line.split(":")[1].strip()) != 0: + module.fail_json(msg="Operation aborted, self-heal in progress.") + args = ['volume', 'remove-brick', name, 'replica', replicas] + args.extend(removed_bricks) + if force: + args.append('force') + else: + module.fail_json(msg="Force option is mandatory") + run_gluster(args) + + +def do_rebalance(name): + run_gluster(['volume', 'rebalance', name, 'start']) + + +def enable_quota(name): + run_gluster(['volume', 'quota', name, 'enable']) + + +def set_quota(name, directory, value): + run_gluster(['volume', 'quota', name, 'limit-usage', directory, value]) + + +def main(): + # MAIN + + global module + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['volume']), + state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']), + cluster=dict(type='list'), + host=dict(type='str'), + stripes=dict(type='int'), + replicas=dict(type='int'), + arbiters=dict(type='int'), + disperses=dict(type='int'), + redundancies=dict(type='int'), + transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']), + bricks=dict(type='str', aliases=['brick']), + start_on_create=dict(type='bool', default=True), + rebalance=dict(type='bool', default=False), + options=dict(type='dict', default={}), + quota=dict(type='str'), + directory=dict(type='str'), + force=dict(type='bool', default=False), + ), + ) + + global glusterbin + glusterbin = module.get_bin_path('gluster', True) + + changed = False + + action = module.params['state'] + volume_name = module.params['name'] + cluster = module.params['cluster'] + brick_paths = module.params['bricks'] + stripes = module.params['stripes'] + replicas = module.params['replicas'] + arbiters = module.params['arbiters'] + disperses = module.params['disperses'] + redundancies = module.params['redundancies'] + transport = module.params['transport'] + myhostname = module.params['host'] + start_on_create = module.boolean(module.params['start_on_create']) + rebalance = module.boolean(module.params['rebalance']) + force = module.boolean(module.params['force']) + + if not myhostname: + myhostname = socket.gethostname() + + # Clean up if last element is empty. Consider that yml can look like this: + # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" + if cluster is not None and len(cluster) > 1 and cluster[-1] == '': + cluster = cluster[0:-1] + + if cluster is None: + cluster = [] + + if brick_paths is not None and "," in brick_paths: + brick_paths = brick_paths.split(",") + else: + brick_paths = [brick_paths] + + options = module.params['options'] + quota = module.params['quota'] + directory = module.params['directory'] + + # get current state info + peers = get_peers() + volumes = get_volumes() + quotas = {} + if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started': + quotas = get_quotas(volume_name, True) + + # do the work! + if action == 'absent': + if volume_name in volumes: + if volumes[volume_name]['status'].lower() != 'stopped': + stop_volume(volume_name) + run_gluster(['volume', 'delete', volume_name]) + changed = True + + if action == 'present': + probe_all_peers(cluster, peers, myhostname) + + # create if it doesn't exist + if volume_name not in volumes: + create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force) + volumes = get_volumes() + changed = True + + if volume_name in volumes: + if volumes[volume_name]['status'].lower() != 'started' and start_on_create: + start_volume(volume_name) + changed = True + + # switch bricks + new_bricks = [] + removed_bricks = [] + all_bricks = [] + bricks_in_volume = volumes[volume_name]['bricks'] + + for node in cluster: + for brick_path in brick_paths: + brick = '%s:%s' % (node, brick_path) + all_bricks.append(brick) + if brick not in bricks_in_volume: + new_bricks.append(brick) + + if not new_bricks and len(all_bricks) > 0 and \ + len(all_bricks) < len(bricks_in_volume): + for brick in bricks_in_volume: + if brick not in all_bricks: + removed_bricks.append(brick) + + if new_bricks: + add_bricks(volume_name, new_bricks, stripes, replicas, force) + changed = True + + if removed_bricks: + if replicas and int(replicas) < int(volumes[volume_name]['replicas']): + reduce_config(volume_name, removed_bricks, str(replicas), force) + else: + remove_bricks(volume_name, removed_bricks, force) + changed = True + + # handle quotas + if quota: + if not volumes[volume_name]['quota']: + enable_quota(volume_name) + quotas = get_quotas(volume_name, False) + if directory not in quotas or quotas[directory] != quota: + set_quota(volume_name, directory, quota) + changed = True + + # set options + for option in options.keys(): + if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]: + set_volume_option(volume_name, option, options[option]) + changed = True + + else: + module.fail_json(msg='failed to create volume %s' % volume_name) + + if action != 'absent' and volume_name not in volumes: + module.fail_json(msg='volume not found %s' % volume_name) + + if action == 'started': + if volumes[volume_name]['status'].lower() != 'started': + start_volume(volume_name) + changed = True + + if action == 'stopped': + if volumes[volume_name]['status'].lower() != 'stopped': + stop_volume(volume_name) + changed = True + + if changed: + volumes = get_volumes() + if rebalance: + do_rebalance(volume_name) + + facts = {} + facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas} + + module.exit_json(changed=changed, ansible_facts=facts) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/plugins/modules/storage/hpe3par/ss_3par_cpg.py new file mode 100644 index 0000000000..80feecfcc9 --- /dev/null +++ b/plugins/modules/storage/hpe3par/ss_3par_cpg.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +short_description: Manage HPE StoreServ 3PAR CPG +author: + - Farhan Nomani (@farhan7500) + - Gautham P Hegde (@gautamphegde) +description: + - Create and delete CPG on HPE 3PAR. +module: ss_3par_cpg +options: + cpg_name: + description: + - Name of the CPG. + type: str + required: true + disk_type: + choices: + - FC + - NL + - SSD + description: + - Specifies that physical disks must have the specified device type. + type: str + domain: + description: + - Specifies the name of the domain in which the object will reside. + type: str + growth_increment: + description: + - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage + created on each auto-grow operation. + type: str + growth_limit: + description: + - Specifies that the autogrow operation is limited to the specified + storage amount that sets the growth limit(in MiB, GiB or TiB). + type: str + growth_warning: + description: + - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded + results in a warning alert. + type: str + high_availability: + choices: + - PORT + - CAGE + - MAG + description: + - Specifies that the layout must support the failure of one port pair, + one cage, or one magazine. + type: str + raid_type: + choices: + - R0 + - R1 + - R5 + - R6 + description: + - Specifies the RAID type for the logical disk. + type: str + set_size: + description: + - Specifies the set size in the number of chunklets. + type: int + state: + choices: + - present + - absent + description: + - Whether the specified CPG should exist or not. + required: true + type: str + secure: + description: + - Specifies whether the certificate needs to be validated while communicating. + type: bool + default: no +extends_documentation_fragment: +- community.general.hpe3par + +''' + + +EXAMPLES = r''' + - name: Create CPG sample_cpg + ss_3par_cpg: + storage_system_ip: 10.10.10.1 + storage_system_username: username + storage_system_password: password + state: present + cpg_name: sample_cpg + domain: sample_domain + growth_increment: 32000 MiB + growth_limit: 64000 MiB + growth_warning: 48000 MiB + raid_type: R6 + set_size: 8 + high_availability: MAG + disk_type: FC + secure: no + + - name: Delete CPG sample_cpg + ss_3par_cpg: + storage_system_ip: 10.10.10.1 + storage_system_username: username + storage_system_password: password + state: absent + cpg_name: sample_cpg + secure: no +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par +try: + from hpe3par_sdk import client + from hpe3parclient import exceptions + HAS_3PARCLIENT = True +except ImportError: + HAS_3PARCLIENT = False + + +def validate_set_size(raid_type, set_size): + if raid_type: + set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes'] + if set_size in set_size_array: + return True + return False + + +def cpg_ldlayout_map(ldlayout_dict): + if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']: + ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[ + ldlayout_dict['RAIDType']]['raid_value'] + if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']: + ldlayout_dict['HA'] = getattr( + client.HPE3ParClient, ldlayout_dict['HA']) + return ldlayout_dict + + +def create_cpg( + client_obj, + cpg_name, + domain, + growth_increment, + growth_limit, + growth_warning, + raid_type, + set_size, + high_availability, + disk_type): + try: + if not validate_set_size(raid_type, set_size): + return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type)) + if not client_obj.cpgExists(cpg_name): + + disk_patterns = [] + if disk_type: + disk_type = getattr(client.HPE3ParClient, disk_type) + disk_patterns = [{'diskType': disk_type}] + ld_layout = { + 'RAIDType': raid_type, + 'setSize': set_size, + 'HA': high_availability, + 'diskPatterns': disk_patterns} + ld_layout = cpg_ldlayout_map(ld_layout) + if growth_increment is not None: + growth_increment = hpe3par.convert_to_binary_multiple( + growth_increment) + if growth_limit is not None: + growth_limit = hpe3par.convert_to_binary_multiple( + growth_limit) + if growth_warning is not None: + growth_warning = hpe3par.convert_to_binary_multiple( + growth_warning) + optional = { + 'domain': domain, + 'growthIncrementMiB': growth_increment, + 'growthLimitMiB': growth_limit, + 'usedLDWarningAlertMiB': growth_warning, + 'LDLayout': ld_layout} + client_obj.createCPG(cpg_name, optional) + else: + return (True, False, "CPG already present") + except exceptions.ClientException as e: + return (False, False, "CPG creation failed | %s" % (e)) + return (True, True, "Created CPG %s successfully." % cpg_name) + + +def delete_cpg( + client_obj, + cpg_name): + try: + if client_obj.cpgExists(cpg_name): + client_obj.deleteCPG(cpg_name) + else: + return (True, False, "CPG does not exist") + except exceptions.ClientException as e: + return (False, False, "CPG delete failed | %s" % e) + return (True, True, "Deleted CPG %s successfully." % cpg_name) + + +def main(): + module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(), + required_together=[['raid_type', 'set_size']]) + if not HAS_3PARCLIENT: + module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)') + + if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31: + module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters") + + storage_system_ip = module.params["storage_system_ip"] + storage_system_username = module.params["storage_system_username"] + storage_system_password = module.params["storage_system_password"] + cpg_name = module.params["cpg_name"] + domain = module.params["domain"] + growth_increment = module.params["growth_increment"] + growth_limit = module.params["growth_limit"] + growth_warning = module.params["growth_warning"] + raid_type = module.params["raid_type"] + set_size = module.params["set_size"] + high_availability = module.params["high_availability"] + disk_type = module.params["disk_type"] + secure = module.params["secure"] + + wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip + try: + client_obj = client.HPE3ParClient(wsapi_url, secure) + except exceptions.SSLCertFailed: + module.fail_json(msg="SSL Certificate Failed") + except exceptions.ConnectionError: + module.fail_json(msg="Connection Error") + except exceptions.UnsupportedVersion: + module.fail_json(msg="Unsupported WSAPI version") + except Exception as e: + module.fail_json(msg="Initializing client failed. %s" % e) + + if storage_system_username is None or storage_system_password is None: + module.fail_json(msg="Storage system username or password is None") + if cpg_name is None: + module.fail_json(msg="CPG Name is None") + + # States + if module.params["state"] == "present": + try: + client_obj.login(storage_system_username, storage_system_password) + return_status, changed, msg = create_cpg( + client_obj, + cpg_name, + domain, + growth_increment, + growth_limit, + growth_warning, + raid_type, + set_size, + high_availability, + disk_type + ) + except Exception as e: + module.fail_json(msg="CPG create failed | %s" % e) + finally: + client_obj.logout() + + elif module.params["state"] == "absent": + try: + client_obj.login(storage_system_username, storage_system_password) + return_status, changed, msg = delete_cpg( + client_obj, + cpg_name + ) + except Exception as e: + module.fail_json(msg="CPG create failed | %s" % e) + finally: + client_obj.logout() + + if return_status: + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/ibm/ibm_sa_domain.py b/plugins/modules/storage/ibm/ibm_sa_domain.py new file mode 100644 index 0000000000..9774e6c291 --- /dev/null +++ b/plugins/modules/storage/ibm/ibm_sa_domain.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ibm_sa_domain +short_description: Manages domains on IBM Spectrum Accelerate Family storage systems + +description: + - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." + +options: + domain: + description: + - Name of the domain to be managed. + required: true + state: + description: + - The desired state of the domain. + required: true + default: "present" + choices: [ "present", "absent" ] + ldap_id: + description: + - ldap id to add to the domain. + required: false + size: + description: + - Size of the domain. + required: false + hard_capacity: + description: + - Hard capacity of the domain. + required: false + soft_capacity: + description: + - Soft capacity of the domain. + required: false + max_cgs: + description: + - Number of max cgs. + required: false + max_dms: + description: + - Number of max dms. + required: false + max_mirrors: + description: + - Number of max_mirrors. + required: false + max_pools: + description: + - Number of max_pools. + required: false + max_volumes: + description: + - Number of max_volumes. + required: false + perf_class: + description: + - Add the domain to a performance class. + required: false + +extends_documentation_fragment: +- community.general.ibm_storage + + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Define new domain. + ibm_sa_domain: + domain: domain_name + size: domain_size + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete domain. + ibm_sa_domain: + domain: domain_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +msg: + description: module return status. + returned: as needed + type: str + sample: "domain 'domain_name' created successfully." +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + domain=dict(required=True), + size=dict(), + max_dms=dict(), + max_cgs=dict(), + ldap_id=dict(), + max_mirrors=dict(), + max_pools=dict(), + max_volumes=dict(), + perf_class=dict(), + hard_capacity=dict(), + soft_capacity=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + domain = xcli_client.cmd.domain_list( + domain=module.params['domain']).as_single_element + state = module.params['state'] + + state_changed = False + msg = 'Domain \'{0}\''.format(module.params['domain']) + if state == 'present' and not domain: + state_changed = execute_pyxcli_command( + module, 'domain_create', xcli_client) + msg += " created successfully." + elif state == 'absent' and domain: + state_changed = execute_pyxcli_command( + module, 'domain_delete', xcli_client) + msg += " deleted successfully." + else: + msg += " state unchanged." + + module.exit_json(changed=state_changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/ibm/ibm_sa_host.py b/plugins/modules/storage/ibm/ibm_sa_host.py new file mode 100644 index 0000000000..293849cbc0 --- /dev/null +++ b/plugins/modules/storage/ibm/ibm_sa_host.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ibm_sa_host +short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. + +description: + - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems." + +options: + host: + description: + - Host name. + required: true + state: + description: + - Host state. + required: true + default: "present" + choices: [ "present", "absent" ] + cluster: + description: + - The name of the cluster to include the host. + required: false + domain: + description: + - The domains the cluster will be attached to. + To include more than one domain, + separate domain names with commas. + To include all existing domains, use an asterisk ("*"). + required: false + iscsi_chap_name: + description: + - The host's CHAP name identifier + required: false + iscsi_chap_secret: + description: + - The password of the initiator used to + authenticate to the system when CHAP is enable + required: false + +extends_documentation_fragment: +- community.general.ibm_storage + + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Define new host. + ibm_sa_host: + host: host_name + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete host. + ibm_sa_host: + host: host_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + host=dict(required=True), + cluster=dict(), + domain=dict(), + iscsi_chap_name=dict(), + iscsi_chap_secret=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + host = xcli_client.cmd.host_list( + host=module.params['host']).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not host: + state_changed = execute_pyxcli_command( + module, 'host_define', xcli_client) + elif state == 'absent' and host: + state_changed = execute_pyxcli_command( + module, 'host_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/ibm/ibm_sa_host_ports.py b/plugins/modules/storage/ibm/ibm_sa_host_ports.py new file mode 100644 index 0000000000..5a43bdb1db --- /dev/null +++ b/plugins/modules/storage/ibm/ibm_sa_host_ports.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ibm_sa_host_ports +short_description: Add host ports on IBM Spectrum Accelerate Family storage systems. + +description: + - "This module adds ports to or removes them from the hosts + on IBM Spectrum Accelerate Family storage systems." + +options: + host: + description: + - Host name. + required: true + state: + description: + - Host ports state. + required: true + default: "present" + choices: [ "present", "absent" ] + iscsi_name: + description: + - iSCSI initiator name. + required: false + fcaddress: + description: + - Fiber channel address. + required: false + num_of_visible_targets: + description: + - Number of visible targets. + required: false + +extends_documentation_fragment: +- community.general.ibm_storage + + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Add ports for host. + ibm_sa_host_ports: + host: test_host + iscsi_name: iqn.1994-05.com*** + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Remove ports for host. + ibm_sa_host_ports: + host: test_host + iscsi_name: iqn.1994-05.com*** + username: admin + password: secret + endpoints: hostdev-system + state: absent + +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, + spectrum_accelerate_spec, is_pyxcli_installed) + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + host=dict(required=True), + iscsi_name=dict(), + fcaddress=dict(), + num_of_visible_targets=dict() + ) + ) + + module = AnsibleModule(argument_spec) + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + ports = [] + try: + ports = xcli_client.cmd.host_list_ports( + host=module.params.get('host')).as_list + except Exception: + pass + state = module.params['state'] + port_exists = False + ports = [port.get('port_name') for port in ports] + + fc_ports = (module.params.get('fcaddress') + if module.params.get('fcaddress') else []) + iscsi_ports = (module.params.get('iscsi_name') + if module.params.get('iscsi_name') else []) + for port in ports: + if port in iscsi_ports or port in fc_ports: + port_exists = True + break + state_changed = False + if state == 'present' and not port_exists: + state_changed = execute_pyxcli_command( + module, 'host_add_port', xcli_client) + if state == 'absent' and port_exists: + state_changed = execute_pyxcli_command( + module, 'host_remove_port', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/ibm/ibm_sa_pool.py b/plugins/modules/storage/ibm/ibm_sa_pool.py new file mode 100644 index 0000000000..fb5e4c7b7e --- /dev/null +++ b/plugins/modules/storage/ibm/ibm_sa_pool.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ibm_sa_pool +short_description: Handles pools on IBM Spectrum Accelerate Family storage systems. + +description: + - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems" + +options: + pool: + description: + - Pool name. + required: true + state: + description: + - Pool state. + required: true + default: "present" + choices: [ "present", "absent" ] + size: + description: + - Pool size in GB + required: false + snapshot_size: + description: + - Pool snapshot size in GB + required: false + domain: + description: + - Adds the pool to the specified domain. + required: false + perf_class: + description: + - Assigns a perf_class to the pool. + required: false + +extends_documentation_fragment: +- community.general.ibm_storage + + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Create new pool. + ibm_sa_pool: + name: pool_name + size: 300 + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete pool. + ibm_sa_pool: + name: pool_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + pool=dict(required=True), + size=dict(), + snapshot_size=dict(), + domain=dict(), + perf_class=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + pool = xcli_client.cmd.pool_list( + pool=module.params['pool']).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not pool: + state_changed = execute_pyxcli_command( + module, 'pool_create', xcli_client) + if state == 'absent' and pool: + state_changed = execute_pyxcli_command( + module, 'pool_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/ibm/ibm_sa_vol.py b/plugins/modules/storage/ibm/ibm_sa_vol.py new file mode 100644 index 0000000000..4de5ef13b1 --- /dev/null +++ b/plugins/modules/storage/ibm/ibm_sa_vol.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ibm_sa_vol +short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems. + +description: + - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems." + +options: + vol: + description: + - Volume name. + required: true + pool: + description: + - Volume pool. + required: false + state: + description: + - Volume state. + required: true + default: "present" + choices: [ "present", "absent" ] + size: + description: + - Volume size. + required: false + +extends_documentation_fragment: +- community.general.ibm_storage + + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Create a new volume. + ibm_sa_vol: + vol: volume_name + pool: pool_name + size: 17 + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete an existing volume. + ibm_sa_vol: + vol: volume_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + vol=dict(required=True), + pool=dict(), + size=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + volume = xcli_client.cmd.vol_list( + vol=module.params.get('vol')).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not volume: + state_changed = execute_pyxcli_command( + module, 'vol_create', xcli_client) + elif state == 'absent' and volume: + state_changed = execute_pyxcli_command( + module, 'vol_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/ibm/ibm_sa_vol_map.py b/plugins/modules/storage/ibm/ibm_sa_vol_map.py new file mode 100644 index 0000000000..d6f38b748a --- /dev/null +++ b/plugins/modules/storage/ibm/ibm_sa_vol_map.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: ibm_sa_vol_map +short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems. + +description: + - "This module maps volumes to or unmaps them from the hosts on + IBM Spectrum Accelerate Family storage systems." + +options: + vol: + description: + - Volume name. + required: true + state: + default: "present" + choices: [ "present", "absent" ] + description: + - When the state is present the volume is mapped. + When the state is absent, the volume is meant to be unmapped. + required: true + cluster: + description: + - Maps the volume to a cluster. + required: false + host: + description: + - Maps the volume to a host. + required: false + lun: + description: + - The LUN identifier. + required: false + override: + description: + - Overrides the existing volume mapping. + required: false + +extends_documentation_fragment: +- community.general.ibm_storage + + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Map volume to host. + ibm_sa_vol_map: + vol: volume_name + lun: 1 + host: host_name + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Map volume to cluster. + ibm_sa_vol_map: + vol: volume_name + lun: 1 + cluster: cluster_name + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Unmap volume. + ibm_sa_vol_map: + host: host_name + username: admin + password: secret + endpoints: hostdev-system + state: absent +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed) + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + vol=dict(required=True), + lun=dict(), + cluster=dict(), + host=dict(), + override=dict() + ) + ) + + module = AnsibleModule(argument_spec) + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + mapping = False + try: + mapped_hosts = xcli_client.cmd.vol_mapping_list( + vol=module.params.get('vol')).as_list + for host in mapped_hosts: + if host['host'] == module.params.get("host", ""): + mapping = True + except Exception: + pass + state = module.params['state'] + + state_changed = False + if state == 'present' and not mapping: + state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client) + if state == 'absent' and mapping: + state_changed = execute_pyxcli_command( + module, 'unmap_vol', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/infinidat/infini_export.py b/plugins/modules/storage/infinidat/infini_export.py new file mode 100644 index 0000000000..73ee538c02 --- /dev/null +++ b/plugins/modules/storage/infinidat/infini_export.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: infini_export +short_description: Create, Delete or Modify NFS Exports on Infinibox +description: + - This module creates, deletes or modifies NFS exports on Infinibox. +author: Gregory Shulov (@GR360RY) +options: + name: + description: + - Export name. Should always start with C(/). (ex. name=/data) + aliases: ['export', 'path'] + required: true + state: + description: + - Creates/Modifies export when present and removes when absent. + required: false + default: "present" + choices: [ "present", "absent" ] + inner_path: + description: + - Internal path of the export. + default: "/" + client_list: + description: + - List of dictionaries with client entries. See examples. + Check infini_export_client module to modify individual NFS client entries for export. + default: "All Hosts(*), RW, no_root_squash: True" + required: false + filesystem: + description: + - Name of exported file system. + required: true +extends_documentation_fragment: +- community.general.infinibox + +requirements: + - munch +''' + +EXAMPLES = ''' +- name: Export bar filesystem under foo pool as /data + infini_export: + name: /data01 + filesystem: foo + user: admin + password: secret + system: ibox001 + +- name: Export and specify client list explicitly + infini_export: + name: /data02 + filesystem: foo + client_list: + - client: 192.168.0.2 + access: RW + no_root_squash: True + - client: 192.168.0.100 + access: RO + no_root_squash: False + - client: 192.168.0.10-192.168.0.20 + access: RO + no_root_squash: False + system: ibox001 + user: admin + password: secret +''' + +RETURN = ''' +''' +import traceback + +MUNCH_IMP_ERR = None +try: + from munch import unmunchify + HAS_MUNCH = True +except ImportError: + MUNCH_IMP_ERR = traceback.format_exc() + HAS_MUNCH = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec + + +def transform(d): + return frozenset(d.items()) + + +@api_wrapper +def get_filesystem(module, system): + """Return Filesystem or None""" + try: + return system.filesystems.get(name=module.params['filesystem']) + except Exception: + return None + + +@api_wrapper +def get_export(module, filesystem, system): + """Return export if found. When not found return None""" + + export = None + exports_to_list = system.exports.to_list() + + for e in exports_to_list: + if e.get_export_path() == module.params['name']: + export = e + break + + return export + + +@api_wrapper +def update_export(module, export, filesystem, system): + """ Create new filesystem or update existing one""" + + changed = False + + name = module.params['name'] + client_list = module.params['client_list'] + + if export is None: + if not module.check_mode: + export = system.exports.create(export_path=name, filesystem=filesystem) + if client_list: + export.update_permissions(client_list) + changed = True + else: + if client_list: + if set(map(transform, unmunchify(export.get_permissions()))) != set(map(transform, client_list)): + if not module.check_mode: + export.update_permissions(client_list) + changed = True + + module.exit_json(changed=changed) + + +@api_wrapper +def delete_export(module, export): + """ Delete file system""" + if not module.check_mode: + export.delete() + module.exit_json(changed=True) + + +def main(): + argument_spec = infinibox_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + filesystem=dict(required=True), + client_list=dict(type='list') + ) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + if not HAS_INFINISDK: + module.fail_json(msg=missing_required_lib('infinisdk')) + if not HAS_MUNCH: + module.fail_json(msg=missing_required_lib('munch'), exception=MUNCH_IMP_ERR) + + state = module.params['state'] + system = get_system(module) + filesystem = get_filesystem(module, system) + export = get_export(module, filesystem, system) + + if filesystem is None: + module.fail_json(msg='Filesystem {0} not found'.format(module.params['filesystem'])) + + if state == 'present': + update_export(module, export, filesystem, system) + elif export and state == 'absent': + delete_export(module, export) + elif export is None and state == 'absent': + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/infinidat/infini_export_client.py b/plugins/modules/storage/infinidat/infini_export_client.py new file mode 100644 index 0000000000..bc89e955fe --- /dev/null +++ b/plugins/modules/storage/infinidat/infini_export_client.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: infini_export_client +short_description: Create, Delete or Modify NFS Client(s) for existing exports on Infinibox +description: + - This module creates, deletes or modifys NFS client(s) for existing exports on Infinibox. +author: Gregory Shulov (@GR360RY) +options: + client: + description: + - Client IP or Range. Ranges can be defined as follows + 192.168.0.1-192.168.0.254. + aliases: ['name'] + required: true + state: + description: + - Creates/Modifies client when present and removes when absent. + required: false + default: "present" + choices: [ "present", "absent" ] + access_mode: + description: + - Read Write or Read Only Access. + choices: [ "RW", "RO" ] + default: RW + required: false + no_root_squash: + description: + - Don't squash root user to anonymous. Will be set to "no" on creation if not specified explicitly. + type: bool + default: no + required: false + export: + description: + - Name of the export. + required: true +extends_documentation_fragment: +- community.general.infinibox + +requirements: + - munch +''' + +EXAMPLES = ''' +- name: Make sure nfs client 10.0.0.1 is configured for export. Allow root access + infini_export_client: + client: 10.0.0.1 + access_mode: RW + no_root_squash: yes + export: /data + user: admin + password: secret + system: ibox001 + +- name: Add multiple clients with RO access. Squash root privileges + infini_export_client: + client: "{{ item }}" + access_mode: RO + no_root_squash: no + export: /data + user: admin + password: secret + system: ibox001 + with_items: + - 10.0.0.2 + - 10.0.0.3 +''' + +RETURN = ''' +''' +import traceback + +MUNCH_IMP_ERR = None +try: + from munch import Munch, unmunchify + HAS_MUNCH = True +except ImportError: + MUNCH_IMP_ERR = traceback.format_exc() + HAS_MUNCH = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec + + +def transform(d): + return frozenset(d.items()) + + +@api_wrapper +def get_export(module, system): + """Return export if found. Fail module if not found""" + + try: + export = system.exports.get(export_path=module.params['export']) + except Exception: + module.fail_json(msg="Export with export path {0} not found".format(module.params['export'])) + + return export + + +@api_wrapper +def update_client(module, export): + """Update export client list""" + + changed = False + + client = module.params['client'] + access_mode = module.params['access_mode'] + no_root_squash = module.params['no_root_squash'] + + client_list = export.get_permissions() + client_not_in_list = True + + for index, item in enumerate(client_list): + if item.client == client: + client_not_in_list = False + if item.access != access_mode: + item.access = access_mode + changed = True + if item.no_root_squash is not no_root_squash: + item.no_root_squash = no_root_squash + changed = True + + # If access_mode and/or no_root_squash not passed as arguments to the module, + # use access_mode with RW value and set no_root_squash to False + if client_not_in_list: + changed = True + client_list.append(Munch(client=client, access=access_mode, no_root_squash=no_root_squash)) + + if changed: + for index, item in enumerate(client_list): + client_list[index] = unmunchify(item) + if not module.check_mode: + export.update_permissions(client_list) + + module.exit_json(changed=changed) + + +@api_wrapper +def delete_client(module, export): + """Update export client list""" + + changed = False + + client = module.params['client'] + client_list = export.get_permissions() + + for index, item in enumerate(client_list): + if item.client == client: + changed = True + del client_list[index] + + if changed: + for index, item in enumerate(client_list): + client_list[index] = unmunchify(item) + if not module.check_mode: + export.update_permissions(client_list) + + module.exit_json(changed=changed) + + +def main(): + argument_spec = infinibox_argument_spec() + argument_spec.update( + dict( + client=dict(required=True), + access_mode=dict(choices=['RO', 'RW'], default='RW'), + no_root_squash=dict(type='bool', default='no'), + state=dict(default='present', choices=['present', 'absent']), + export=dict(required=True) + ) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + if not HAS_INFINISDK: + module.fail_json(msg=missing_required_lib('infinisdk')) + if not HAS_MUNCH: + module.fail_json(msg=missing_required_lib('munch'), exception=MUNCH_IMP_ERR) + + system = get_system(module) + export = get_export(module, system) + + if module.params['state'] == 'present': + update_client(module, export) + else: + delete_client(module, export) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/infinidat/infini_fs.py b/plugins/modules/storage/infinidat/infini_fs.py new file mode 100644 index 0000000000..dc5b717fc6 --- /dev/null +++ b/plugins/modules/storage/infinidat/infini_fs.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: infini_fs +short_description: Create, Delete or Modify filesystems on Infinibox +description: + - This module creates, deletes or modifies filesystems on Infinibox. +author: Gregory Shulov (@GR360RY) +options: + name: + description: + - File system name. + required: true + state: + description: + - Creates/Modifies file system when present or removes when absent. + required: false + default: present + choices: [ "present", "absent" ] + size: + description: + - File system size in MB, GB or TB units. See examples. + required: false + pool: + description: + - Pool that will host file system. + required: true +extends_documentation_fragment: +- community.general.infinibox + +requirements: + - capacity +''' + +EXAMPLES = ''' +- name: Create new file system named foo under pool named bar + infini_fs: + name: foo + size: 1TB + pool: bar + state: present + user: admin + password: secret + system: ibox001 +''' + +RETURN = ''' +''' +import traceback + +CAPACITY_IMP_ERR = None +try: + from capacity import KiB, Capacity + HAS_CAPACITY = True +except ImportError: + CAPACITY_IMP_ERR = traceback.format_exc() + HAS_CAPACITY = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec + + +@api_wrapper +def get_pool(module, system): + """Return Pool or None""" + try: + return system.pools.get(name=module.params['pool']) + except Exception: + return None + + +@api_wrapper +def get_filesystem(module, system): + """Return Filesystem or None""" + try: + return system.filesystems.get(name=module.params['name']) + except Exception: + return None + + +@api_wrapper +def create_filesystem(module, system): + """Create Filesystem""" + if not module.check_mode: + filesystem = system.filesystems.create(name=module.params['name'], pool=get_pool(module, system)) + if module.params['size']: + size = Capacity(module.params['size']).roundup(64 * KiB) + filesystem.update_size(size) + module.exit_json(changed=True) + + +@api_wrapper +def update_filesystem(module, filesystem): + """Update Filesystem""" + changed = False + if module.params['size']: + size = Capacity(module.params['size']).roundup(64 * KiB) + if filesystem.get_size() != size: + if not module.check_mode: + filesystem.update_size(size) + changed = True + + module.exit_json(changed=changed) + + +@api_wrapper +def delete_filesystem(module, filesystem): + """ Delete Filesystem""" + if not module.check_mode: + filesystem.delete() + module.exit_json(changed=True) + + +def main(): + argument_spec = infinibox_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + pool=dict(required=True), + size=dict() + ) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + if not HAS_INFINISDK: + module.fail_json(msg=missing_required_lib('infinisdk')) + if not HAS_CAPACITY: + module.fail_json(msg=missing_required_lib('capacity'), exception=CAPACITY_IMP_ERR) + + if module.params['size']: + try: + Capacity(module.params['size']) + except Exception: + module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units') + + state = module.params['state'] + system = get_system(module) + pool = get_pool(module, system) + filesystem = get_filesystem(module, system) + + if pool is None: + module.fail_json(msg='Pool {0} not found'.format(module.params['pool'])) + + if state == 'present' and not filesystem: + create_filesystem(module, system) + elif state == 'present' and filesystem: + update_filesystem(module, filesystem) + elif state == 'absent' and filesystem: + delete_filesystem(module, filesystem) + elif state == 'absent' and not filesystem: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/infinidat/infini_host.py b/plugins/modules/storage/infinidat/infini_host.py new file mode 100644 index 0000000000..cad1ab747c --- /dev/null +++ b/plugins/modules/storage/infinidat/infini_host.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: infini_host +short_description: Create, Delete and Modify Hosts on Infinibox +description: + - This module creates, deletes or modifies hosts on Infinibox. +author: Gregory Shulov (@GR360RY) +options: + name: + description: + - Host Name + required: true + state: + description: + - Creates/Modifies Host when present or removes when absent + required: false + default: present + choices: [ "present", "absent" ] + wwns: + description: + - List of wwns of the host + required: false + volume: + description: + - Volume name to map to the host + required: false +extends_documentation_fragment: +- community.general.infinibox + +''' + +EXAMPLES = ''' +- name: Create new new host + infini_host: + name: foo.example.com + user: admin + password: secret + system: ibox001 + +- name: Make sure host bar is available with wwn ports + infini_host: + name: bar.example.com + wwns: + - "00:00:00:00:00:00:00" + - "11:11:11:11:11:11:11" + system: ibox01 + user: admin + password: secret + +- name: Map host foo.example.com to volume bar + infini_host: + name: foo.example.com + volume: bar + system: ibox01 + user: admin + password: secret +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec + + +@api_wrapper +def get_host(module, system): + + host = None + + for h in system.hosts.to_list(): + if h.get_name() == module.params['name']: + host = h + break + + return host + + +@api_wrapper +def create_host(module, system): + + changed = True + + if not module.check_mode: + host = system.hosts.create(name=module.params['name']) + if module.params['wwns']: + for p in module.params['wwns']: + host.add_fc_port(p) + if module.params['volume']: + host.map_volume(system.volumes.get(name=module.params['volume'])) + module.exit_json(changed=changed) + + +@api_wrapper +def update_host(module, host): + changed = False + module.exit_json(changed=changed) + + +@api_wrapper +def delete_host(module, host): + changed = True + if not module.check_mode: + host.delete() + module.exit_json(changed=changed) + + +def main(): + argument_spec = infinibox_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + wwns=dict(type='list'), + volume=dict() + ) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + if not HAS_INFINISDK: + module.fail_json(msg=missing_required_lib('infinisdk')) + + state = module.params['state'] + system = get_system(module) + host = get_host(module, system) + + if module.params['volume']: + try: + system.volumes.get(name=module.params['volume']) + except Exception: + module.fail_json(msg='Volume {0} not found'.format(module.params['volume'])) + + if host and state == 'present': + update_host(module, host) + elif host and state == 'absent': + delete_host(module, host) + elif host is None and state == 'absent': + module.exit_json(changed=False) + else: + create_host(module, system) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/infinidat/infini_pool.py b/plugins/modules/storage/infinidat/infini_pool.py new file mode 100644 index 0000000000..59366869b0 --- /dev/null +++ b/plugins/modules/storage/infinidat/infini_pool.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: infini_pool +short_description: Create, Delete and Modify Pools on Infinibox +description: + - This module to creates, deletes or modifies pools on Infinibox. +author: Gregory Shulov (@GR360RY) +options: + name: + description: + - Pool Name + required: true + state: + description: + - Creates/Modifies Pool when present or removes when absent + required: false + default: present + choices: [ "present", "absent" ] + size: + description: + - Pool Physical Capacity in MB, GB or TB units. + If pool size is not set on pool creation, size will be equal to 1TB. + See examples. + required: false + vsize: + description: + - Pool Virtual Capacity in MB, GB or TB units. + If pool vsize is not set on pool creation, Virtual Capacity will be equal to Physical Capacity. + See examples. + required: false + ssd_cache: + description: + - Enable/Disable SSD Cache on Pool + required: false + default: yes + type: bool +notes: + - Infinibox Admin level access is required for pool modifications +extends_documentation_fragment: +- community.general.infinibox + +requirements: + - capacity +''' + +EXAMPLES = ''' +- name: Make sure pool foo exists. Set pool physical capacity to 10TB + infini_pool: + name: foo + size: 10TB + vsize: 10TB + user: admin + password: secret + system: ibox001 + +- name: Disable SSD Cache on pool + infini_pool: + name: foo + ssd_cache: no + user: admin + password: secret + system: ibox001 +''' + +RETURN = ''' +''' +import traceback + +CAPACITY_IMP_ERR = None +try: + from capacity import KiB, Capacity + HAS_CAPACITY = True +except ImportError: + CAPACITY_IMP_ERR = traceback.format_exc() + HAS_CAPACITY = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec + + +@api_wrapper +def get_pool(module, system): + """Return Pool on None""" + try: + return system.pools.get(name=module.params['name']) + except Exception: + return None + + +@api_wrapper +def create_pool(module, system): + """Create Pool""" + name = module.params['name'] + size = module.params['size'] + vsize = module.params['vsize'] + ssd_cache = module.params['ssd_cache'] + + if not module.check_mode: + if not size and not vsize: + pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity('1TB')) + elif size and not vsize: + pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(size)) + elif not size and vsize: + pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity(vsize)) + else: + pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(vsize)) + # Default value of ssd_cache is True. Disable ssd caching if False + if not ssd_cache: + pool.update_ssd_enabled(ssd_cache) + + module.exit_json(changed=True) + + +@api_wrapper +def update_pool(module, system, pool): + """Update Pool""" + changed = False + + size = module.params['size'] + vsize = module.params['vsize'] + ssd_cache = module.params['ssd_cache'] + + # Roundup the capacity to mimic Infinibox behaviour + if size: + physical_capacity = Capacity(size).roundup(6 * 64 * KiB) + if pool.get_physical_capacity() != physical_capacity: + if not module.check_mode: + pool.update_physical_capacity(physical_capacity) + changed = True + + if vsize: + virtual_capacity = Capacity(vsize).roundup(6 * 64 * KiB) + if pool.get_virtual_capacity() != virtual_capacity: + if not module.check_mode: + pool.update_virtual_capacity(virtual_capacity) + changed = True + + if pool.get_ssd_enabled() != ssd_cache: + if not module.check_mode: + pool.update_ssd_enabled(ssd_cache) + changed = True + + module.exit_json(changed=changed) + + +@api_wrapper +def delete_pool(module, pool): + """Delete Pool""" + if not module.check_mode: + pool.delete() + module.exit_json(changed=True) + + +def main(): + argument_spec = infinibox_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + size=dict(), + vsize=dict(), + ssd_cache=dict(type='bool', default=True) + ) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + if not HAS_INFINISDK: + module.fail_json(msg=missing_required_lib('infinisdk')) + if not HAS_CAPACITY: + module.fail_json(msg=missing_required_lib('capacity'), exception=CAPACITY_IMP_ERR) + + if module.params['size']: + try: + Capacity(module.params['size']) + except Exception: + module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units') + + if module.params['vsize']: + try: + Capacity(module.params['vsize']) + except Exception: + module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units') + + state = module.params['state'] + system = get_system(module) + pool = get_pool(module, system) + + if state == 'present' and not pool: + create_pool(module, system) + elif state == 'present' and pool: + update_pool(module, system, pool) + elif state == 'absent' and pool: + delete_pool(module, pool) + elif state == 'absent' and not pool: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/infinidat/infini_vol.py b/plugins/modules/storage/infinidat/infini_vol.py new file mode 100644 index 0000000000..c5a6f1fff0 --- /dev/null +++ b/plugins/modules/storage/infinidat/infini_vol.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: infini_vol +short_description: Create, Delete or Modify volumes on Infinibox +description: + - This module creates, deletes or modifies volume on Infinibox. +author: Gregory Shulov (@GR360RY) +options: + name: + description: + - Volume Name + required: true + state: + description: + - Creates/Modifies volume when present or removes when absent + required: false + default: present + choices: [ "present", "absent" ] + size: + description: + - Volume size in MB, GB or TB units. See examples. + required: false + pool: + description: + - Pool that volume will reside on + required: true +extends_documentation_fragment: +- community.general.infinibox + +requirements: + - capacity +''' + +EXAMPLES = ''' +- name: Create new volume named foo under pool named bar + infini_vol: + name: foo + size: 1TB + pool: bar + state: present + user: admin + password: secret + system: ibox001 +''' + +RETURN = ''' +''' + +try: + from capacity import KiB, Capacity + HAS_CAPACITY = True +except ImportError: + HAS_CAPACITY = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec + + +@api_wrapper +def get_pool(module, system): + """Return Pool or None""" + try: + return system.pools.get(name=module.params['pool']) + except Exception: + return None + + +@api_wrapper +def get_volume(module, system): + """Return Volume or None""" + try: + return system.volumes.get(name=module.params['name']) + except Exception: + return None + + +@api_wrapper +def create_volume(module, system): + """Create Volume""" + if not module.check_mode: + volume = system.volumes.create(name=module.params['name'], pool=get_pool(module, system)) + if module.params['size']: + size = Capacity(module.params['size']).roundup(64 * KiB) + volume.update_size(size) + module.exit_json(changed=True) + + +@api_wrapper +def update_volume(module, volume): + """Update Volume""" + changed = False + if module.params['size']: + size = Capacity(module.params['size']).roundup(64 * KiB) + if volume.get_size() != size: + if not module.check_mode: + volume.update_size(size) + changed = True + + module.exit_json(changed=changed) + + +@api_wrapper +def delete_volume(module, volume): + """ Delete Volume""" + if not module.check_mode: + volume.delete() + module.exit_json(changed=True) + + +def main(): + argument_spec = infinibox_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + pool=dict(required=True), + size=dict() + ) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + if not HAS_INFINISDK: + module.fail_json(msg=missing_required_lib('infinisdk')) + + if module.params['size']: + try: + Capacity(module.params['size']) + except Exception: + module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units') + + state = module.params['state'] + system = get_system(module) + pool = get_pool(module, system) + volume = get_volume(module, system) + + if pool is None: + module.fail_json(msg='Pool {0} not found'.format(module.params['pool'])) + + if state == 'present' and not volume: + create_volume(module, system) + elif state == 'present' and volume: + update_volume(module, volume) + elif state == 'absent' and volume: + delete_volume(module, volume) + elif state == 'absent' and not volume: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_aggregate.py b/plugins/modules/storage/netapp/na_cdot_aggregate.py new file mode 100644 index 0000000000..6e3d9d1f72 --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_aggregate.py @@ -0,0 +1,233 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_aggregate + +short_description: Manage NetApp cDOT aggregates. +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_aggregate) instead. + +description: +- Create or destroy aggregates on NetApp cDOT. + +options: + + state: + required: true + description: + - Whether the specified aggregate should exist or not. + choices: ['present', 'absent'] + + name: + required: true + description: + - The name of the aggregate to manage. + + disk_count: + description: + - Number of disks to place into the aggregate, including parity disks. + - The disks in this newly-created aggregate come from the spare disk pool. + - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided. + - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1]. + - Required when C(state=present). + +''' + +EXAMPLES = """ +- name: Manage Aggregates + na_cdot_aggregate: + state: present + name: ansibleAggr + disk_count: 1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Manage Aggregates + na_cdot_aggregate: + state: present + name: ansibleAggr + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTAggregate(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + disk_count=dict(required=False, type='int'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['disk_count']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.disk_count = p['disk_count'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module) + + def get_aggr(self): + """ + Checks if aggregate exists. + + :return: + True if aggregate found + False if aggregate is not found + :rtype: bool + """ + + aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-attributes', **{'aggregate-name': self.name}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + aggr_get_iter.add_child_elem(query) + + try: + result = self.server.invoke_successfully(aggr_get_iter, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + # Error 13040 denotes an aggregate not being found. + if to_native(e.code) == "13040": + return False + else: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + if (result.get_child_by_name('num-records') and + int(result.get_child_content('num-records')) >= 1): + return True + else: + return False + + def create_aggr(self): + aggr_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-create', **{'aggregate': self.name, + 'disk-count': str(self.disk_count)}) + + try: + self.server.invoke_successfully(aggr_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def delete_aggr(self): + aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-destroy', **{'aggregate': self.name}) + + try: + self.server.invoke_successfully(aggr_destroy, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def rename_aggregate(self): + aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-rename', **{'aggregate': self.name, + 'new-aggregate-name': + self.name}) + + try: + self.server.invoke_successfully(aggr_rename, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + changed = False + aggregate_exists = self.get_aggr() + rename_aggregate = False + + # check if anything needs to be changed (add/delete/update) + + if aggregate_exists: + if self.state == 'absent': + changed = True + + elif self.state == 'present': + if self.name is not None and not self.name == self.name: + rename_aggregate = True + changed = True + + else: + if self.state == 'present': + # Aggregate does not exist, but requested state is present. + changed = True + + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not aggregate_exists: + self.create_aggr() + + else: + if rename_aggregate: + self.rename_aggregate() + + elif self.state == 'absent': + self.delete_aggr() + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTAggregate() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_license.py b/plugins/modules/storage/netapp/na_cdot_license.py new file mode 100644 index 0000000000..9b844ba381 --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_license.py @@ -0,0 +1,299 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_license + +short_description: Manage NetApp cDOT protocol and feature licenses +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_license) instead. + +description: +- Add or remove licenses on NetApp ONTAP. + +options: + + remove_unused: + description: + - Remove licenses that have no controller affiliation in the cluster. + type: bool + + remove_expired: + description: + - Remove licenses that have expired in the cluster. + type: bool + + serial_number: + description: + - Serial number of the node associated with the license. + - This parameter is used primarily when removing license for a specific service. + - If this parameter is not provided, the cluster serial number is used by default. + + licenses: + description: + - List of licenses to add or remove. + - Please note that trying to remove a non-existent license will throw an error. + suboptions: + base: + description: + - Cluster Base License + nfs: + description: + - NFS License + cifs: + description: + - CIFS License + iscsi: + description: + - iSCSI License + fcp: + description: + - FCP License + cdmi: + description: + - CDMI License + snaprestore: + description: + - SnapRestore License + snapmirror: + description: + - SnapMirror License + flexclone: + description: + - FlexClone License + snapvault: + description: + - SnapVault License + snaplock: + description: + - SnapLock License + snapmanagersuite: + description: + - SnapManagerSuite License + snapprotectapps: + description: + - SnapProtectApp License + v_storageattach: + description: + - Virtual Attached Storage License + +''' + + +EXAMPLES = """ +- name: Add licenses + na_cdot_license: + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + serial_number: ################# + licenses: + nfs: ################# + cifs: ################# + iscsi: ################# + fcp: ################# + snaprestore: ################# + flexclone: ################# + +- name: Remove licenses + na_cdot_license: + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + remove_unused: false + remove_expired: true + serial_number: ################# + licenses: + nfs: remove +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTLicense(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + serial_number=dict(required=False, type='str', default=None), + remove_unused=dict(default=False, type='bool'), + remove_expired=dict(default=False, type='bool'), + licenses=dict(default=False, type='dict'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=False + ) + + p = self.module.params + + # set up state variables + self.serial_number = p['serial_number'] + self.remove_unused = p['remove_unused'] + self.remove_expired = p['remove_expired'] + self.licenses = p['licenses'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module) + + def get_licensing_status(self): + """ + Check licensing status + + :return: package (key) and licensing status (value) + :rtype: dict + """ + license_status = netapp_utils.zapi.NaElement('license-v2-status-list-info') + result = None + try: + result = self.server.invoke_successfully(license_status, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error checking license status: %s" % + to_native(e), exception=traceback.format_exc()) + + return_dictionary = {} + license_v2_status = result.get_child_by_name('license-v2-status') + if license_v2_status: + for license_v2_status_info in license_v2_status.get_children(): + package = license_v2_status_info.get_child_content('package') + status = license_v2_status_info.get_child_content('method') + return_dictionary[package] = status + + return return_dictionary + + def remove_licenses(self, remove_list): + """ + Remove requested licenses + :param: + remove_list : List of packages to remove + + """ + license_delete = netapp_utils.zapi.NaElement('license-v2-delete') + for package in remove_list: + license_delete.add_new_child('package', package) + + if self.serial_number is not None: + license_delete.add_new_child('serial-number', self.serial_number) + + try: + self.server.invoke_successfully(license_delete, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error removing license %s" % + to_native(e), exception=traceback.format_exc()) + + def remove_unused_licenses(self): + """ + Remove unused licenses + """ + remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused') + try: + self.server.invoke_successfully(remove_unused, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error removing unused licenses: %s" % + to_native(e), exception=traceback.format_exc()) + + def remove_expired_licenses(self): + """ + Remove expired licenses + """ + remove_expired = netapp_utils.zapi.NaElement('license-v2-delete-expired') + try: + self.server.invoke_successfully(remove_expired, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error removing expired licenses: %s" % + to_native(e), exception=traceback.format_exc()) + + def update_licenses(self): + """ + Update licenses + """ + # Remove unused and expired licenses, if requested. + if self.remove_unused: + self.remove_unused_licenses() + + if self.remove_expired: + self.remove_expired_licenses() + + # Next, add/remove specific requested licenses. + license_add = netapp_utils.zapi.NaElement('license-v2-add') + codes = netapp_utils.zapi.NaElement('codes') + remove_list = [] + for key, value in self.licenses.items(): + str_value = str(value) + # Make sure license is not an empty string. + if str_value and str_value.strip(): + if str_value.lower() == 'remove': + remove_list.append(str(key).lower()) + else: + codes.add_new_child('license-code-v2', str_value) + + # Remove requested licenses. + if len(remove_list) != 0: + self.remove_licenses(remove_list) + + # Add requested licenses + if len(codes.get_children()) != 0: + license_add.add_child_elem(codes) + try: + self.server.invoke_successfully(license_add, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error adding licenses: %s" % + to_native(e), exception=traceback.format_exc()) + + def apply(self): + changed = False + # Add / Update licenses. + license_status = self.get_licensing_status() + self.update_licenses() + new_license_status = self.get_licensing_status() + + if license_status != new_license_status: + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTLicense() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_lun.py b/plugins/modules/storage/netapp/na_cdot_lun.py new file mode 100644 index 0000000000..8fcd1c8d40 --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_lun.py @@ -0,0 +1,378 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_lun + +short_description: Manage NetApp cDOT luns +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_lun) instead. + +description: +- Create, destroy, resize luns on NetApp cDOT. + +options: + + state: + description: + - Whether the specified lun should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - The name of the lun to manage. + required: true + + flexvol_name: + description: + - The name of the FlexVol the lun should exist on. + - Required when C(state=present). + + size: + description: + - The size of the lun in C(size_unit). + - Required when C(state=present). + + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + + force_resize: + description: + - Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size. + default: false + + force_remove: + description: + - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped. + - If "false", destroying an online and mapped LUN will fail. + default: false + + force_remove_fenced: + description: + - If "true", override checks that prevent a LUN from being destroyed while it is fenced. + - If "false", attempting to destroy a fenced LUN will fail. + - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later. + default: false + + vserver: + required: true + description: + - The name of the vserver to use. + +''' + +EXAMPLES = """ +- name: Create LUN + na_cdot_lun: + state: present + name: ansibleLUN + flexvol_name: ansibleVolume + vserver: ansibleVServer + size: 5 + size_unit: mb + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Resize Lun + na_cdot_lun: + state: present + name: ansibleLUN + force_resize: True + flexvol_name: ansibleVolume + vserver: ansibleVServer + size: 5 + size_unit: gb + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTLUN(object): + + def __init__(self): + + self._size_unit_map = dict( + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 + ) + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + size=dict(type='int'), + size_unit=dict(default='gb', + choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + force_resize=dict(default=False, type='bool'), + force_remove=dict(default=False, type='bool'), + force_remove_fenced=dict(default=False, type='bool'), + flexvol_name=dict(type='str'), + vserver=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['flexvol_name', 'size']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.size_unit = p['size_unit'] + if p['size'] is not None: + self.size = p['size'] * self._size_unit_map[self.size_unit] + else: + self.size = None + self.force_resize = p['force_resize'] + self.force_remove = p['force_remove'] + self.force_remove_fenced = p['force_remove_fenced'] + self.flexvol_name = p['flexvol_name'] + self.vserver = p['vserver'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver) + + def get_lun(self): + """ + Return details about the LUN + + :return: Details about the lun + :rtype: dict + """ + + luns = [] + tag = None + while True: + lun_info = netapp_utils.zapi.NaElement('lun-get-iter') + if tag: + lun_info.add_new_child('tag', tag, True) + + query_details = netapp_utils.zapi.NaElement('lun-info') + query_details.add_new_child('vserver', self.vserver) + query_details.add_new_child('volume', self.flexvol_name) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + + lun_info.add_child_elem(query) + + result = self.server.invoke_successfully(lun_info, True) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + luns.extend(attr_list.get_children()) + + tag = result.get_child_content('next-tag') + + if tag is None: + break + + # The LUNs have been extracted. + # Find the specified lun and extract details. + return_value = None + for lun in luns: + path = lun.get_child_content('path') + _rest, _splitter, found_name = path.rpartition('/') + + if found_name == self.name: + size = lun.get_child_content('size') + + # Find out if the lun is attached + attached_to = None + lun_id = None + if lun.get_child_content('mapped') == 'true': + lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-map-list-info', **{'path': path}) + + result = self.server.invoke_successfully( + lun_map_list, enable_tunneling=True) + + igroups = result.get_child_by_name('initiator-groups') + if igroups: + for igroup_info in igroups.get_children(): + igroup = igroup_info.get_child_content( + 'initiator-group-name') + attached_to = igroup + lun_id = igroup_info.get_child_content('lun-id') + + return_value = { + 'name': found_name, + 'size': size, + 'attached_to': attached_to, + 'lun_id': lun_id + } + else: + continue + + return return_value + + def create_lun(self): + """ + Create LUN with requested name and size + """ + path = '/vol/%s/%s' % (self.flexvol_name, self.name) + lun_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-create-by-size', **{'path': path, + 'size': str(self.size), + 'ostype': 'linux'}) + + try: + self.server.invoke_successfully(lun_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" % (self.name, self.size, to_native(e)), + exception=traceback.format_exc()) + + def delete_lun(self): + """ + Delete requested LUN + """ + path = '/vol/%s/%s' % (self.flexvol_name, self.name) + + lun_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-destroy', **{'path': path, + 'force': str(self.force_remove), + 'destroy-fenced-lun': + str(self.force_remove_fenced)}) + + try: + self.server.invoke_successfully(lun_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(e)), + exception=traceback.format_exc()) + + def resize_lun(self): + """ + Resize requested LUN. + + :return: True if LUN was actually re-sized, false otherwise. + :rtype: bool + """ + path = '/vol/%s/%s' % (self.flexvol_name, self.name) + + lun_resize = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-resize', **{'path': path, + 'size': str(self.size), + 'force': str(self.force_resize)}) + try: + self.server.invoke_successfully(lun_resize, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + if to_native(e.code) == "9042": + # Error 9042 denotes the new LUN size being the same as the + # old LUN size. This happens when there's barely any difference + # in the two sizes. For example, from 8388608 bytes to + # 8194304 bytes. This should go away if/when the default size + # requested/reported to/from the controller is changed to a + # larger unit (MB/GB/TB). + return False + else: + self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)), + exception=traceback.format_exc()) + + return True + + def apply(self): + property_changed = False + multiple_properties_changed = False + size_changed = False + lun_exists = False + lun_detail = self.get_lun() + + if lun_detail: + lun_exists = True + current_size = lun_detail['size'] + + if self.state == 'absent': + property_changed = True + + elif self.state == 'present': + if not int(current_size) == self.size: + size_changed = True + property_changed = True + + else: + if self.state == 'present': + property_changed = True + + if property_changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not lun_exists: + self.create_lun() + + else: + if size_changed: + # Ensure that size was actually changed. Please + # read notes in 'resize_lun' function for details. + size_changed = self.resize_lun() + if not size_changed and not \ + multiple_properties_changed: + property_changed = False + + elif self.state == 'absent': + self.delete_lun() + + changed = property_changed or size_changed + # TODO: include other details about the lun (size, etc.) + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTLUN() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_qtree.py b/plugins/modules/storage/netapp/na_cdot_qtree.py new file mode 100644 index 0000000000..ff3b4a1109 --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_qtree.py @@ -0,0 +1,239 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_qtree + +short_description: Manage qtrees +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_qtree) instead. + +description: +- Create or destroy Qtrees. + +options: + + state: + description: + - Whether the specified Qtree should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - The name of the Qtree to manage. + required: true + + flexvol_name: + description: + - The name of the FlexVol the Qtree should exist on. Required when C(state=present). + + vserver: + description: + - The name of the vserver to use. + required: true + +''' + +EXAMPLES = """ +- name: Create QTree + na_cdot_qtree: + state: present + name: ansibleQTree + flexvol_name: ansibleVolume + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Rename QTree + na_cdot_qtree: + state: present + name: ansibleQTree + flexvol_name: ansibleVolume + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTQTree(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + flexvol_name=dict(type='str'), + vserver=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['flexvol_name']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.flexvol_name = p['flexvol_name'] + self.vserver = p['vserver'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver) + + def get_qtree(self): + """ + Checks if the qtree exists. + + :return: + True if qtree found + False if qtree is not found + :rtype: bool + """ + + qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'qtree-info', **{'vserver': self.vserver, + 'volume': self.flexvol_name, + 'qtree': self.name}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + qtree_list_iter.add_child_elem(query) + + result = self.server.invoke_successfully(qtree_list_iter, + enable_tunneling=True) + + if (result.get_child_by_name('num-records') and + int(result.get_child_content('num-records')) >= 1): + return True + else: + return False + + def create_qtree(self): + qtree_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'qtree-create', **{'volume': self.flexvol_name, + 'qtree': self.name}) + + try: + self.server.invoke_successfully(qtree_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error provisioning qtree %s: %s" % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def delete_qtree(self): + path = '/vol/%s/%s' % (self.flexvol_name, self.name) + qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'qtree-delete', **{'qtree': path}) + + try: + self.server.invoke_successfully(qtree_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(e)), + exception=traceback.format_exc()) + + def rename_qtree(self): + path = '/vol/%s/%s' % (self.flexvol_name, self.name) + new_path = '/vol/%s/%s' % (self.flexvol_name, self.name) + qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'qtree-rename', **{'qtree': path, + 'new-qtree-name': new_path}) + + try: + self.server.invoke_successfully(qtree_rename, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error renaming qtree %s: %s" % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + changed = False + qtree_exists = False + rename_qtree = False + qtree_detail = self.get_qtree() + + if qtree_detail: + qtree_exists = True + + if self.state == 'absent': + # Qtree exists, but requested state is 'absent'. + changed = True + + elif self.state == 'present': + if self.name is not None and not self.name == \ + self.name: + changed = True + rename_qtree = True + + else: + if self.state == 'present': + # Qtree does not exist, but requested state is 'present'. + changed = True + + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not qtree_exists: + self.create_qtree() + + else: + if rename_qtree: + self.rename_qtree() + + elif self.state == 'absent': + self.delete_qtree() + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTQTree() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_svm.py b/plugins/modules/storage/netapp/na_cdot_svm.py new file mode 100644 index 0000000000..16aa0e56ff --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_svm.py @@ -0,0 +1,251 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_svm + +short_description: Manage NetApp cDOT svm +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_svm) instead. + +description: +- Create or destroy svm on NetApp cDOT + +options: + + state: + description: + - Whether the specified SVM should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - The name of the SVM to manage. + required: true + + root_volume: + description: + - Root volume of the SVM. Required when C(state=present). + + root_volume_aggregate: + description: + - The aggregate on which the root volume will be created. + - Required when C(state=present). + + root_volume_security_style: + description: + - Security Style of the root volume. + - When specified as part of the vserver-create, this field represents the security style for the Vserver root volume. + - When specified as part of vserver-get-iter call, this will return the list of matching Vservers. + - Possible values are 'unix', 'ntfs', 'mixed'. + - The 'unified' security style, which applies only to Infinite Volumes, cannot be applied to a Vserver's root volume. + - Valid options are "unix" for NFS, "ntfs" for CIFS, "mixed" for Mixed, "unified" for Unified. + - Required when C(state=present) + choices: ['unix', 'ntfs', 'mixed', 'unified'] + +''' + +EXAMPLES = """ + + - name: Create SVM + na_cdot_svm: + state: present + name: ansibleVServer + root_volume: vol1 + root_volume_aggregate: aggr1 + root_volume_security_style: mixed + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTSVM(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + root_volume=dict(type='str'), + root_volume_aggregate=dict(type='str'), + root_volume_security_style=dict(type='str', choices=['unix', + 'ntfs', + 'mixed', + 'unified' + ]), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['root_volume', + 'root_volume_aggregate', + 'root_volume_security_style']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.root_volume = p['root_volume'] + self.root_volume_aggregate = p['root_volume_aggregate'] + self.root_volume_security_style = p['root_volume_security_style'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module) + + def get_vserver(self): + """ + Checks if vserver exists. + + :return: + True if vserver found + False if vserver is not found + :rtype: bool + """ + + vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-info', **{'vserver-name': self.name}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + vserver_info.add_child_elem(query) + + result = self.server.invoke_successfully(vserver_info, + enable_tunneling=False) + + if (result.get_child_by_name('num-records') and + int(result.get_child_content('num-records')) >= 1): + + """ + TODO: + Return more relevant parameters about vserver that can + be updated by the playbook. + """ + return True + else: + return False + + def create_vserver(self): + vserver_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-create', **{'vserver-name': self.name, + 'root-volume': self.root_volume, + 'root-volume-aggregate': + self.root_volume_aggregate, + 'root-volume-security-style': + self.root_volume_security_style + }) + + try: + self.server.invoke_successfully(vserver_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error provisioning SVM %s with root volume %s on aggregate %s: %s' + % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)), + exception=traceback.format_exc()) + + def delete_vserver(self): + vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-destroy', **{'vserver-name': self.name}) + + try: + self.server.invoke_successfully(vserver_delete, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error deleting SVM %s with root volume %s on aggregate %s: %s' + % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)), + exception=traceback.format_exc()) + + def rename_vserver(self): + vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-rename', **{'vserver-name': self.name, + 'new-name': self.name}) + + try: + self.server.invoke_successfully(vserver_rename, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error renaming SVM %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + changed = False + vserver_exists = self.get_vserver() + rename_vserver = False + if vserver_exists: + if self.state == 'absent': + changed = True + + elif self.state == 'present': + # Update properties + pass + + else: + if self.state == 'present': + changed = True + + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not vserver_exists: + self.create_vserver() + + else: + if rename_vserver: + self.rename_vserver() + + elif self.state == 'absent': + self.delete_vserver() + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTSVM() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_user.py b/plugins/modules/storage/netapp/na_cdot_user.py new file mode 100644 index 0000000000..5a1062cd0e --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_user.py @@ -0,0 +1,306 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_user + +short_description: useradmin configuration and management +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_user) instead. + +description: +- Create or destroy users. + +options: + + state: + description: + - Whether the specified user should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - The name of the user to manage. + required: true + + application: + description: + - Applications to grant access to. + required: true + choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet'] + + authentication_method: + description: + - Authentication method for the application. + - Not all authentication methods are valid for an application. + - Valid authentication methods for each application are as denoted in I(authentication_choices_description). + - password for console application + - password, domain, nsswitch, cert for http application. + - password, domain, nsswitch, cert for ontapi application. + - community for snmp application (when creating SNMPv1 and SNMPv2 users). + - usm and community for snmp application (when creating SNMPv3 users). + - password for sp application. + - password for rsh application. + - password for telnet application. + - password, publickey, domain, nsswitch for ssh application. + required: true + choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm'] + + set_password: + description: + - Password for the user account. + - It is ignored for creating snmp users, but is required for creating non-snmp users. + - For an existing user, this value will be used as the new password. + + role_name: + description: + - The name of the role. Required when C(state=present) + + + vserver: + description: + - The name of the vserver to use. + required: true + +''' + +EXAMPLES = """ + + - name: Create User + na_cdot_user: + state: present + name: SampleUser + application: ssh + authentication_method: password + set_password: apn1242183u1298u41 + role_name: vsadmin + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTUser(object): + """ + Common operations to manage users and roles. + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + + application=dict(required=True, type='str', choices=[ + 'console', 'http', 'ontapi', 'rsh', + 'snmp', 'sp', 'ssh', 'telnet']), + authentication_method=dict(required=True, type='str', + choices=['community', 'password', + 'publickey', 'domain', + 'nsswitch', 'usm']), + set_password=dict(required=False, type='str', default=None), + role_name=dict(required=False, type='str'), + + vserver=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['role_name']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + + self.application = p['application'] + self.authentication_method = p['authentication_method'] + self.set_password = p['set_password'] + self.role_name = p['role_name'] + + self.vserver = p['vserver'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module) + + def get_user(self): + """ + Checks if the user exists. + + :return: + True if user found + False if user is not found + :rtype: bool + """ + + security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-account-info', **{'vserver': self.vserver, + 'user-name': self.name, + 'application': self.application, + 'authentication-method': + self.authentication_method}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + security_login_get_iter.add_child_elem(query) + + try: + result = self.server.invoke_successfully(security_login_get_iter, + enable_tunneling=False) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return True + else: + return False + + except netapp_utils.zapi.NaApiError as e: + # Error 16034 denotes a user not being found. + if to_native(e.code) == "16034": + return False + else: + self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def create_user(self): + user_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-create', **{'vserver': self.vserver, + 'user-name': self.name, + 'application': self.application, + 'authentication-method': + self.authentication_method, + 'role-name': self.role_name}) + if self.set_password is not None: + user_create.add_new_child('password', self.set_password) + + try: + self.server.invoke_successfully(user_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def delete_user(self): + user_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-delete', **{'vserver': self.vserver, + 'user-name': self.name, + 'application': self.application, + 'authentication-method': + self.authentication_method}) + + try: + self.server.invoke_successfully(user_delete, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def change_password(self): + """ + Changes the password + + :return: + True if password updated + False if password is not updated + :rtype: bool + """ + self.server.set_vserver(self.vserver) + modify_password = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-modify-password', **{ + 'new-password': str(self.set_password), + 'user-name': self.name}) + try: + self.server.invoke_successfully(modify_password, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + if to_native(e.code) == '13114': + return False + else: + self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + self.server.set_vserver(None) + return True + + def apply(self): + property_changed = False + password_changed = False + user_exists = self.get_user() + + if user_exists: + if self.state == 'absent': + property_changed = True + + elif self.state == 'present': + if self.set_password is not None: + password_changed = self.change_password() + else: + if self.state == 'present': + # Check if anything needs to be updated + property_changed = True + + if property_changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not user_exists: + self.create_user() + + # Add ability to update parameters. + + elif self.state == 'absent': + self.delete_user() + + changed = property_changed or password_changed + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTUser() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_user_role.py b/plugins/modules/storage/netapp/na_cdot_user_role.py new file mode 100644 index 0000000000..9531f6f353 --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_user_role.py @@ -0,0 +1,232 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_user_role + +short_description: useradmin configuration and management +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_user_role) instead. + +description: +- Create or destroy user roles + +options: + + state: + description: + - Whether the specified user should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - The name of the role to manage. + required: true + + command_directory_name: + description: + - The command or command directory to which the role has an access. + required: true + + access_level: + description: + - The name of the role to manage. + choices: ['none', 'readonly', 'all'] + default: 'all' + + vserver: + description: + - The name of the vserver to use. + required: true + +''' + +EXAMPLES = """ + + - name: Create User Role + na_cdot_user_role: + state: present + name: ansibleRole + command_directory_name: DEFAULT + access_level: none + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTUserRole(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + + command_directory_name=dict(required=True, type='str'), + access_level=dict(required=False, type='str', default='all', + choices=['none', 'readonly', 'all']), + + vserver=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + + self.command_directory_name = p['command_directory_name'] + self.access_level = p['access_level'] + + self.vserver = p['vserver'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module) + + def get_role(self): + """ + Checks if the role exists for specific command-directory-name. + + :return: + True if role found + False if role is not found + :rtype: bool + """ + + security_login_role_get_iter = netapp_utils.zapi.NaElement( + 'security-login-role-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-role-info', **{'vserver': self.vserver, + 'role-name': self.name, + 'command-directory-name': + self.command_directory_name}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + security_login_role_get_iter.add_child_elem(query) + + try: + result = self.server.invoke_successfully( + security_login_role_get_iter, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + # Error 16031 denotes a role not being found. + if to_native(e.code) == "16031": + return False + else: + self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + if (result.get_child_by_name('num-records') and + int(result.get_child_content('num-records')) >= 1): + return True + else: + return False + + def create_role(self): + role_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-role-create', **{'vserver': self.vserver, + 'role-name': self.name, + 'command-directory-name': + self.command_directory_name, + 'access-level': + self.access_level}) + + try: + self.server.invoke_successfully(role_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error creating role %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def delete_role(self): + role_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-role-delete', **{'vserver': self.vserver, + 'role-name': self.name, + 'command-directory-name': + self.command_directory_name}) + + try: + self.server.invoke_successfully(role_delete, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error removing role %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + changed = False + role_exists = self.get_role() + + if role_exists: + if self.state == 'absent': + changed = True + + # Check if properties need to be updated + else: + if self.state == 'present': + changed = True + + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not role_exists: + self.create_role() + + # Update properties + + elif self.state == 'absent': + self.delete_role() + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTUserRole() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_cdot_volume.py b/plugins/modules/storage/netapp/na_cdot_volume.py new file mode 100644 index 0000000000..a66c9071a5 --- /dev/null +++ b/plugins/modules/storage/netapp/na_cdot_volume.py @@ -0,0 +1,442 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_cdot_volume + +short_description: Manage NetApp cDOT volumes +extends_documentation_fragment: +- netapp.ontap.netapp.ontap + +author: Sumit Kumar (@timuster) + +deprecated: + removed_in: '2.11' + why: Updated modules released with increased functionality + alternative: Use M(na_ontap_volume) instead. + +description: +- Create or destroy volumes on NetApp cDOT + +options: + + state: + description: + - Whether the specified volume should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - The name of the volume to manage. + required: true + + infinite: + description: + - Set True if the volume is an Infinite Volume. + type: bool + default: 'no' + + online: + description: + - Whether the specified volume is online, or not. + type: bool + default: 'yes' + + aggregate_name: + description: + - The name of the aggregate the flexvol should exist on. Required when C(state=present). + + size: + description: + - The size of the volume in (size_unit). Required when C(state=present). + + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + + vserver: + description: + - Name of the vserver to use. + required: true + + junction_path: + description: + - Junction path where to mount the volume + required: false + + export_policy: + description: + - Export policy to set for the specified junction path. + required: false + default: default + + snapshot_policy: + description: + - Snapshot policy to set for the specified volume. + required: false + default: default + +''' + +EXAMPLES = """ + + - name: Create FlexVol + na_cdot_volume: + state: present + name: ansibleVolume + infinite: False + aggregate_name: aggr1 + size: 20 + size_unit: mb + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + junction_path: /ansibleVolume + export_policy: all_nfs_networks + snapshot_policy: daily + + - name: Make FlexVol offline + na_cdot_volume: + state: present + name: ansibleVolume + infinite: False + online: False + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ + + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppCDOTVolume(object): + + def __init__(self): + + self._size_unit_map = dict( + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 + ) + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']), + is_online=dict(required=False, type='bool', default=True, aliases=['online']), + size=dict(type='int'), + size_unit=dict(default='gb', + choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + aggregate_name=dict(type='str'), + vserver=dict(required=True, type='str', default=None), + junction_path=dict(required=False, type='str', default=None), + export_policy=dict(required=False, type='str', default='default'), + snapshot_policy=dict(required=False, type='str', default='default'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['aggregate_name', 'size']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.is_infinite = p['is_infinite'] + self.is_online = p['is_online'] + self.size_unit = p['size_unit'] + self.vserver = p['vserver'] + self.junction_path = p['junction_path'] + self.export_policy = p['export_policy'] + self.snapshot_policy = p['snapshot_policy'] + + if p['size'] is not None: + self.size = p['size'] * self._size_unit_map[self.size_unit] + else: + self.size = None + self.aggregate_name = p['aggregate_name'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver) + + def get_volume(self): + """ + Return details about the volume + :param: + name : Name of the volume + + :return: Details about the volume. None if not found. + :rtype: dict + """ + volume_info = netapp_utils.zapi.NaElement('volume-get-iter') + volume_attributes = netapp_utils.zapi.NaElement('volume-attributes') + volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes') + volume_id_attributes.add_new_child('name', self.name) + volume_attributes.add_child_elem(volume_id_attributes) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(volume_attributes) + + volume_info.add_child_elem(query) + + result = self.server.invoke_successfully(volume_info, True) + + return_value = None + + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + + volume_attributes = result.get_child_by_name( + 'attributes-list').get_child_by_name( + 'volume-attributes') + # Get volume's current size + volume_space_attributes = volume_attributes.get_child_by_name( + 'volume-space-attributes') + current_size = volume_space_attributes.get_child_content('size') + + # Get volume's state (online/offline) + volume_state_attributes = volume_attributes.get_child_by_name( + 'volume-state-attributes') + current_state = volume_state_attributes.get_child_content('state') + is_online = None + if current_state == "online": + is_online = True + elif current_state == "offline": + is_online = False + return_value = { + 'name': self.name, + 'size': current_size, + 'is_online': is_online, + } + + return return_value + + def create_volume(self): + create_parameters = {'volume': self.name, + 'containing-aggr-name': self.aggregate_name, + 'size': str(self.size), + } + if self.junction_path: + create_parameters['junction-path'] = str(self.junction_path) + if self.export_policy != 'default': + create_parameters['export-policy'] = str(self.export_policy) + if self.snapshot_policy != 'default': + create_parameters['snapshot-policy'] = str(self.snapshot_policy) + + volume_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-create', **create_parameters) + + try: + self.server.invoke_successfully(volume_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)), + exception=traceback.format_exc()) + + def delete_volume(self): + if self.is_infinite: + volume_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-destroy-async', **{'volume-name': self.name}) + else: + volume_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-destroy', **{'name': self.name, 'unmount-and-offline': + 'true'}) + + try: + self.server.invoke_successfully(volume_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def rename_volume(self): + """ + Rename the volume. + + Note: 'is_infinite' needs to be set to True in order to rename an + Infinite Volume. + """ + if self.is_infinite: + volume_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-rename-async', + **{'volume-name': self.name, 'new-volume-name': str( + self.name)}) + else: + volume_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-rename', **{'volume': self.name, 'new-volume-name': str( + self.name)}) + try: + self.server.invoke_successfully(volume_rename, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def resize_volume(self): + """ + Re-size the volume. + + Note: 'is_infinite' needs to be set to True in order to rename an + Infinite Volume. + """ + if self.is_infinite: + volume_resize = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-size-async', + **{'volume-name': self.name, 'new-size': str( + self.size)}) + else: + volume_resize = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-size', **{'volume': self.name, 'new-size': str( + self.size)}) + try: + self.server.invoke_successfully(volume_resize, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def change_volume_state(self): + """ + Change volume's state (offline/online). + + Note: 'is_infinite' needs to be set to True in order to change the + state of an Infinite Volume. + """ + state_requested = None + if self.is_online: + # Requested state is 'online'. + state_requested = "online" + if self.is_infinite: + volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-online-async', + **{'volume-name': self.name}) + else: + volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-online', + **{'name': self.name}) + else: + # Requested state is 'offline'. + state_requested = "offline" + if self.is_infinite: + volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-offline-async', + **{'volume-name': self.name}) + else: + volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-offline', + **{'name': self.name}) + try: + self.server.invoke_successfully(volume_change_state, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' % + (self.name, state_requested, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + changed = False + volume_exists = False + rename_volume = False + resize_volume = False + volume_detail = self.get_volume() + + if volume_detail: + volume_exists = True + + if self.state == 'absent': + changed = True + + elif self.state == 'present': + if str(volume_detail['size']) != str(self.size): + resize_volume = True + changed = True + if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online): + changed = True + if self.is_online is False: + # Volume is online, but requested state is offline + pass + else: + # Volume is offline but requested state is online + pass + + else: + if self.state == 'present': + changed = True + + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not volume_exists: + self.create_volume() + + else: + if resize_volume: + self.resize_volume() + if volume_detail['is_online'] is not \ + None and volume_detail['is_online'] != \ + self.is_online: + self.change_volume_state() + # Ensure re-naming is the last change made. + if rename_volume: + self.rename_volume() + + elif self.state == 'absent': + self.delete_volume() + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppCDOTVolume() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/na_ontap_gather_facts.py b/plugins/modules/storage/netapp/na_ontap_gather_facts.py new file mode 100644 index 0000000000..05b70889c8 --- /dev/null +++ b/plugins/modules/storage/netapp/na_ontap_gather_facts.py @@ -0,0 +1,614 @@ +#!/usr/bin/python + +# (c) 2018 Piotr Olczak +# (c) 2018-2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +module: na_ontap_gather_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(na_ontap_info) instead. +author: Piotr Olczak (@dprts) +extends_documentation_fragment: +- netapp.ontap.netapp.na_ontap + +short_description: NetApp information gatherer +description: + - This module allows you to gather various information about ONTAP configuration +requirements: + - netapp_lib +options: + state: + description: + - Returns "info" + default: "info" + choices: ['info'] + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + "aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info", + "net_ifgrp_info", + "net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info", + "nvme_namespace_info", "nvme_subsystem_info", "ontap_version", + "qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info", + "security_login_account_info", "storage_failover_info", "volume_info", + "vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info" + Can specify a list of values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + - nvme is supported with ONTAP 9.4 onwards. + - use "help" to get a list of supported facts for your system. + default: "all" +''' + +EXAMPLES = ''' +- name: Get NetApp info (Password Authentication) + na_ontap_gather_facts: + state: info + hostname: "na-vsim" + username: "admin" + password: "admins_password" +- debug: + var: ontap_facts +- name: Limit Fact Gathering to Aggregate Information + na_ontap_gather_facts: + state: info + hostname: "na-vsim" + username: "admin" + password: "admins_password" + gather_subset: "aggregate_info" +- name: Limit Fact Gathering to Volume and Lun Information + na_ontap_gather_facts: + state: info + hostname: "na-vsim" + username: "admin" + password: "admins_password" + gather_subset: + - volume_info + - lun_info +- name: Gather all facts except for volume and lun information + na_ontap_gather_facts: + state: info + hostname: "na-vsim" + username: "admin" + password: "admins_password" + gather_subset: + - "!volume_info" + - "!lun_info" +''' + +RETURN = ''' +ontap_facts: + description: Returns various information about NetApp cluster configuration + returned: always + type: dict + sample: '{ + "ontap_facts": { + "aggregate_info": {...}, + "cluster_node_info": {...}, + "net_dns_info": {...}, + "net_ifgrp_info": {...}, + "net_interface_info": {...}, + "net_port_info": {...}, + "security_key_manager_key_info": {...}, + "security_login_account_info": {...}, + "volume_info": {...}, + "lun_info": {...}, + "storage_failover_info": {...}, + "vserver_login_banner_info": {...}, + "vserver_motd_info": {...}, + "vserver_info": {...}, + "vserver_nfs_info": {...}, + "ontap_version": {...}, + "igroup_info": {...}, + "qos_policy_info": {...}, + "qos_adaptive_policy_info": {...} + }' +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +try: + import xmltodict + HAS_XMLTODICT = True +except ImportError: + HAS_XMLTODICT = False + +try: + import json + HAS_JSON = True +except ImportError: + HAS_JSON = False + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPGatherFacts(object): + '''Class with gather facts methods''' + + def __init__(self, module): + self.module = module + self.netapp_info = dict() + + # thanks to coreywan (https://github.com/ansible/ansible/pull/47016) + # for starting this + # min_version identifies the ontapi version which supports this ZAPI + # use 0 if it is supported since 9.1 + self.fact_subsets = { + 'net_dns_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-dns-get-iter', + 'attribute': 'net-dns-info', + 'field': 'vserver-name', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'net_interface_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-interface-get-iter', + 'attribute': 'net-interface-info', + 'field': 'interface-name', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'net_port_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-port-get-iter', + 'attribute': 'net-port-info', + 'field': ('node', 'port'), + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'cluster_node_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cluster-node-get-iter', + 'attribute': 'cluster-node-info', + 'field': 'node-name', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'security_login_account_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'security-login-get-iter', + 'attribute': 'security-login-account-info', + 'field': ('vserver', 'user-name', 'application', 'authentication-method'), + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'aggregate_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'aggr-get-iter', + 'attribute': 'aggr-attributes', + 'field': 'aggregate-name', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'volume_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'volume-get-iter', + 'attribute': 'volume-attributes', + 'field': ('name', 'owning-vserver-name'), + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'lun_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'lun-get-iter', + 'attribute': 'lun-info', + 'field': 'path', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'storage_failover_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cf-get-iter', + 'attribute': 'storage-failover-info', + 'field': 'node', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'vserver_motd_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vserver-motd-get-iter', + 'attribute': 'vserver-motd-info', + 'field': 'vserver', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'vserver_login_banner_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vserver-login-banner-get-iter', + 'attribute': 'vserver-login-banner-info', + 'field': 'vserver', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'security_key_manager_key_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'security-key-manager-key-get-iter', + 'attribute': 'security-key-manager-key-info', + 'field': ('node', 'key-id'), + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'vserver_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vserver-get-iter', + 'attribute': 'vserver-info', + 'field': 'vserver-name', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'vserver_nfs_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nfs-service-get-iter', + 'attribute': 'nfs-info', + 'field': 'vserver', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'net_ifgrp_info': { + 'method': self.get_ifgrp_info, + 'kwargs': {}, + 'min_version': '0', + }, + 'ontap_version': { + 'method': self.ontapi, + 'kwargs': {}, + 'min_version': '0', + }, + 'system_node_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'system-node-get-iter', + 'attribute': 'node-details-info', + 'field': 'node', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'igroup_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'igroup-get-iter', + 'attribute': 'initiator-group-info', + 'field': ('vserver', 'initiator-group-name'), + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + 'qos_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'qos-policy-group-get-iter', + 'attribute': 'qos-policy-group-info', + 'field': 'policy-group', + 'query': {'max-records': '1024'}, + }, + 'min_version': '0', + }, + # supported in ONTAP 9.3 and onwards + 'qos_adaptive_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'qos-adaptive-policy-group-get-iter', + 'attribute': 'qos-adaptive-policy-group-info', + 'field': 'policy-group', + 'query': {'max-records': '1024'}, + }, + 'min_version': '130', + }, + # supported in ONTAP 9.4 and onwards + 'nvme_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-get-iter', + 'attribute': 'nvme-target-service-info', + 'field': 'vserver', + 'query': {'max-records': '1024'}, + }, + 'min_version': '140', + }, + 'nvme_interface_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-interface-get-iter', + 'attribute': 'nvme-interface-info', + 'field': 'vserver', + 'query': {'max-records': '1024'}, + }, + 'min_version': '140', + }, + 'nvme_subsystem_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-subsystem-get-iter', + 'attribute': 'nvme-subsystem-info', + 'field': 'subsystem', + 'query': {'max-records': '1024'}, + }, + 'min_version': '140', + }, + 'nvme_namespace_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-namespace-get-iter', + 'attribute': 'nvme-namespace-info', + 'field': 'path', + 'query': {'max-records': '1024'}, + }, + 'min_version': '140', + }, + } + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def ontapi(self): + '''Method to get ontapi version''' + + api = 'system-get-ontapi-version' + api_call = netapp_utils.zapi.NaElement(api) + try: + results = self.server.invoke_successfully(api_call, enable_tunneling=False) + ontapi_version = results.get_child_content('minor-version') + return ontapi_version if ontapi_version is not None else '0' + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error calling API %s: %s" % + (api, to_native(error)), exception=traceback.format_exc()) + + def call_api(self, call, query=None): + '''Main method to run an API call''' + + api_call = netapp_utils.zapi.NaElement(call) + result = None + + if query: + for key, val in query.items(): + # Can val be nested? + api_call.add_new_child(key, val) + try: + result = self.server.invoke_successfully(api_call, enable_tunneling=False) + return result + except netapp_utils.zapi.NaApiError as error: + if call in ['security-key-manager-key-get-iter']: + return result + else: + self.module.fail_json(msg="Error calling API %s: %s" + % (call, to_native(error)), exception=traceback.format_exc()) + + def get_ifgrp_info(self): + '''Method to get network port ifgroups info''' + + try: + net_port_info = self.netapp_info['net_port_info'] + except KeyError: + net_port_info_calls = self.fact_subsets['net_port_info'] + net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs']) + interfaces = net_port_info.keys() + + ifgrps = [] + for ifn in interfaces: + if net_port_info[ifn]['port_type'] == 'if_group': + ifgrps.append(ifn) + + net_ifgrp_info = dict() + for ifgrp in ifgrps: + query = dict() + query['node'], query['ifgrp-name'] = ifgrp.split(':') + + tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'), + attribute='net-ifgrp-info', query=query) + net_ifgrp_info = net_ifgrp_info.copy() + net_ifgrp_info.update(tmp) + return net_ifgrp_info + + def get_generic_get_iter(self, call, attribute=None, field=None, query=None): + '''Method to run a generic get-iter call''' + + generic_call = self.call_api(call, query) + + if call == 'net-port-ifgrp-get': + children = 'attributes' + else: + children = 'attributes-list' + + if generic_call is None: + return None + + if field is None: + out = [] + else: + out = {} + + attributes_list = generic_call.get_child_by_name(children) + + if attributes_list is None: + return None + + for child in attributes_list.get_children(): + dic = xmltodict.parse(child.to_string(), xml_attribs=False) + + if attribute is not None: + dic = dic[attribute] + + if isinstance(field, str): + unique_key = _finditem(dic, field) + out = out.copy() + out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))}) + elif isinstance(field, tuple): + unique_key = ':'.join([_finditem(dic, el) for el in field]) + out = out.copy() + out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))}) + else: + out.append(convert_keys(json.loads(json.dumps(dic)))) + + return out + + def get_all(self, gather_subset): + '''Method to get all subsets''' + + results = netapp_utils.get_cserver(self.server) + cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results) + netapp_utils.ems_log_event("na_ontap_gather_facts", cserver) + + self.netapp_info['ontap_version'] = self.ontapi() + + run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version']) + if 'help' in gather_subset: + self.netapp_info['help'] = sorted(run_subset) + else: + for subset in run_subset: + call = self.fact_subsets[subset] + self.netapp_info[subset] = call['method'](**call['kwargs']) + + return self.netapp_info + + def get_subset(self, gather_subset, version): + '''Method to get a single subset''' + + runable_subsets = set() + exclude_subsets = set() + usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']] + if 'help' in gather_subset: + return usable_subsets + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(usable_subsets) + return runable_subsets + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + return set() + exclude = True + else: + exclude = False + + if subset not in usable_subsets: + if subset not in self.fact_subsets.keys(): + self.module.fail_json(msg='Bad subset: %s' % subset) + self.module.fail_json(msg='Remote system at version %s does not support %s' % + (version, subset)) + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(usable_subsets) + + runable_subsets.difference_update(exclude_subsets) + + return runable_subsets + + +# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary +def __finditem(obj, key): + + if key in obj: + return obj[key] + for dummy, val in obj.items(): + if isinstance(val, dict): + item = __finditem(val, key) + if item is not None: + return item + return None + + +def _finditem(obj, key): + + value = __finditem(obj, key) + if value is not None: + return value + raise KeyError(key) + + +def convert_keys(d_param): + '''Method to convert hyphen to underscore''' + + out = {} + if isinstance(d_param, dict): + for key, val in d_param.items(): + val = convert_keys(val) + out[key.replace('-', '_')] = val + else: + return d_param + return out + + +def main(): + '''Execute action''' + + argument_spec = netapp_utils.na_ontap_host_argument_spec() + argument_spec.update(dict( + state=dict(default='info', choices=['info']), + gather_subset=dict(default=['all'], type='list'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not HAS_XMLTODICT: + module.fail_json(msg="xmltodict missing") + + if not HAS_JSON: + module.fail_json(msg="json missing") + + state = module.params['state'] + gather_subset = module.params['gather_subset'] + if gather_subset is None: + gather_subset = ['all'] + gf_obj = NetAppONTAPGatherFacts(module) + gf_all = gf_obj.get_all(gather_subset) + result = {'state': state, 'changed': False} + module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_alerts.py b/plugins/modules/storage/netapp/netapp_e_alerts.py new file mode 100644 index 0000000000..ac9dbff93e --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_alerts.py @@ -0,0 +1,280 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_alerts +short_description: NetApp E-Series manage email notification settings +description: + - Certain E-Series systems have the capability to send email notifications on potentially critical events. + - This module will allow the owner of the system to specify email recipients for these messages. +author: Michael Price (@lmprice) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Enable/disable the sending of email-based alerts. + default: enabled + required: false + choices: + - enabled + - disabled + server: + description: + - A fully qualified domain name, IPv4 address, or IPv6 address of a mail server. + - To use a fully qualified domain name, you must configure a DNS server on both controllers using + M(netapp_e_mgmt_interface). + - Required when I(state=enabled). + required: no + sender: + description: + - This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account. + - Required when I(state=enabled). + required: no + contact: + description: + - Allows the owner to specify some free-form contact information to be included in the emails. + - This is typically utilized to provide a contact phone number. + required: no + recipients: + description: + - The email addresses that will receive the email notifications. + - Required when I(state=enabled). + required: no + test: + description: + - When a change is detected in the configuration, a test email will be sent. + - This may take a few minutes to process. + - Only applicable if I(state=enabled). + default: no + type: bool + log_path: + description: + - Path to a file on the Ansible control node to be used for debug logging + required: no +notes: + - Check mode is supported. + - Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples + of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical + events. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher. +''' + +EXAMPLES = """ + - name: Enable email-based alerting + netapp_e_alerts: + state: enabled + sender: noreply@example.com + server: mail@example.com + contact: "Phone: 1-555-555-5555" + recipients: + - name1@example.com + - name2@example.com + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Disable alerting + netapp_e_alerts: + state: disabled + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +""" + +import json +import logging +from pprint import pformat +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Alerts(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', required=False, default='enabled', + choices=['enabled', 'disabled']), + server=dict(type='str', required=False, ), + sender=dict(type='str', required=False, ), + contact=dict(type='str', required=False, ), + recipients=dict(type='list', required=False, ), + test=dict(type='bool', required=False, default=False, ), + log_path=dict(type='str', required=False), + )) + + required_if = [ + ['state', 'enabled', ['server', 'sender', 'recipients']] + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + args = self.module.params + self.alerts = args['state'] == 'enabled' + self.server = args['server'] + self.sender = args['sender'] + self.contact = args['contact'] + self.recipients = args['recipients'] + self.test = args['test'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + # Very basic validation on email addresses: xx@yy.zz + email = re.compile(r"[^@]+@[^@]+\.[^@]+") + + if self.sender and not email.match(self.sender): + self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender) + + if self.recipients is not None: + for recipient in self.recipients: + if not email.match(recipient): + self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient) + + if len(self.recipients) < 1: + self.module.fail_json(msg="At least one recipient address must be specified.") + + def get_configuration(self): + try: + (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, headers=HEADERS, + **self.creds) + self._logger.info("Current config: %s", pformat(result)) + return result + + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self): + config = self.get_configuration() + update = False + body = dict() + + if self.alerts: + body = dict(alertingEnabled=True) + if not config['alertingEnabled']: + update = True + + body.update(emailServerAddress=self.server) + if config['emailServerAddress'] != self.server: + update = True + + body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True) + if self.contact and (self.contact != config['additionalContactInformation'] + or not config['sendAdditionalContactInformation']): + update = True + + body.update(emailSenderAddress=self.sender) + if config['emailSenderAddress'] != self.sender: + update = True + + self.recipients.sort() + if config['recipientEmailAddresses']: + config['recipientEmailAddresses'].sort() + + body.update(recipientEmailAddresses=self.recipients) + if config['recipientEmailAddresses'] != self.recipients: + update = True + + elif config['alertingEnabled']: + body = dict(alertingEnabled=False) + update = True + + self._logger.debug(pformat(body)) + + if update and not self.check_mode: + try: + (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return update + + def send_test_email(self): + """Send a test email to verify that the provided configuration is valid and functional.""" + if not self.check_mode: + try: + (rc, result) = request(self.url + 'storage-systems/%s/device-alerts/alert-email-test' % self.ssid, + timeout=300, method='POST', headers=HEADERS, **self.creds) + + if result['response'] != 'emailSentOK': + self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." + % (result['response'], self.ssid)) + + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update(self): + update = self.update_configuration() + + if self.test and update: + self._logger.info("An update was detected and test=True, running a test.") + self.send_test_email() + + if self.alerts: + msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender) + else: + msg = 'Alerting has been disabled.' + + self.module.exit_json(msg=msg, changed=update, ) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + alerts = Alerts() + alerts() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_amg.py b/plugins/modules/storage/netapp/netapp_e_amg.py new file mode 100644 index 0000000000..c34648458f --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_amg.py @@ -0,0 +1,255 @@ +#!/usr/bin/python +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netapp_e_amg +short_description: NetApp E-Series create, remove, and update asynchronous mirror groups +description: + - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays +author: Kevin Hulquest (@hulquest) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + name: + description: + - The name of the async array you wish to target, or create. + - If C(state) is present and the name isn't found, it will attempt to create. + required: yes + secondaryArrayId: + description: + - The ID of the secondary array to be used in mirroring process + required: yes + syncIntervalMinutes: + description: + - The synchronization interval in minutes + default: 10 + manualSync: + description: + - Setting this to true will cause other synchronization values to be ignored + type: bool + default: 'no' + recoveryWarnThresholdMinutes: + description: + - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value + default: 20 + repoUtilizationWarnThreshold: + description: + - Recovery point warning threshold + default: 80 + interfaceType: + description: + - The intended protocol to use if both Fibre and iSCSI are available. + choices: + - iscsi + - fibre + syncWarnThresholdMinutes: + description: + - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete. + default: 10 + state: + description: + - A C(state) of present will either create or update the async mirror group. + - A C(state) of absent will remove the async mirror group. + choices: [ absent, present ] + required: yes +''' + +EXAMPLES = """ + - name: AMG removal + na_eseries_amg: + state: absent + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create + + - name: AMG create + netapp_e_amg: + state: present + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create +""" + +RETURN = """ +msg: + description: Successful creation + returned: success + type: str + sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}' +""" # NOQA + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec + + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def has_match(module, ssid, api_url, api_pwd, api_usr, body): + compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', + 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] + desired_state = dict((x, (body.get(x))) for x in compare_keys) + label_exists = False + matches_spec = False + current_state = None + async_id = None + api_data = None + desired_name = body.get('name') + endpoint = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + endpoint + try: + rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc()) + + for async_group in data: + if async_group['label'] == desired_name: + label_exists = True + api_data = async_group + async_id = async_group['groupRef'] + current_state = dict( + syncIntervalMinutes=async_group['syncIntervalMinutes'], + syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'], + recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'], + repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'], + ) + + if current_state == desired_state: + matches_spec = True + + return label_exists, matches_spec, api_data, async_id + + +def create_async(module, ssid, api_url, api_pwd, api_usr, body): + endpoint = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + endpoint + post_data = json.dumps(body) + try: + rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd, + headers=HEADERS) + except Exception as e: + module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e), + exception=traceback.format_exc()) + return data + + +def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id): + endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) + url = api_url + endpoint + compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', + 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] + desired_state = dict((x, (body.get(x))) for x in compare_keys) + + if new_name: + desired_state['new_name'] = new_name + + post_data = json.dumps(desired_state) + + try: + rc, data = request(url, data=post_data, method='POST', headers=HEADERS, + url_username=user, url_password=pwd) + except Exception as e: + module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e), + exception=traceback.format_exc()) + + return data + + +def remove_amg(module, ssid, api_url, pwd, user, async_id): + endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) + url = api_url + endpoint + try: + rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, + headers=HEADERS) + except Exception as e: + module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e), + exception=traceback.format_exc()) + + return + + +def main(): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + name=dict(required=True, type='str'), + new_name=dict(required=False, type='str'), + secondaryArrayId=dict(required=True, type='str'), + syncIntervalMinutes=dict(required=False, default=10, type='int'), + manualSync=dict(required=False, default=False, type='bool'), + recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'), + repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'), + interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'), + state=dict(required=True, choices=['present', 'absent']), + syncWarnThresholdMinutes=dict(required=False, default=10, type='int') + )) + + module = AnsibleModule(argument_spec=argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + new_name = p.pop('new_name') + state = p.pop('state') + + if not api_url.endswith('/'): + api_url += '/' + + name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p) + + if state == 'present': + if name_exists and spec_matches: + module.exit_json(changed=False, msg="Desired state met", **api_data) + elif name_exists and not spec_matches: + results = update_async(module, ssid, api_url, pwd, user, + p, new_name, async_id) + module.exit_json(changed=True, + msg="Async mirror group updated", async_id=async_id, + **results) + elif not name_exists: + results = create_async(module, ssid, api_url, user, pwd, p) + module.exit_json(changed=True, **results) + + elif state == 'absent': + if name_exists: + remove_amg(module, ssid, api_url, pwd, user, async_id) + module.exit_json(changed=True, msg="Async mirror group removed.", + async_id=async_id) + else: + module.exit_json(changed=False, + msg="Async Mirror group: %s already absent" % p['name']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_amg_role.py b/plugins/modules/storage/netapp/netapp_e_amg_role.py new file mode 100644 index 0000000000..9b7a40405b --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_amg_role.py @@ -0,0 +1,233 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netapp_e_amg_role +short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG). +description: + - Update a storage array to become the primary or secondary instance in an asynchronous mirror group +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + description: + - The ID of the primary storage array for the async mirror action + required: yes + role: + description: + - Whether the array should be the primary or secondary array for the AMG + required: yes + choices: ['primary', 'secondary'] + noSync: + description: + - Whether to avoid synchronization prior to role reversal + required: no + default: no + type: bool + force: + description: + - Whether to force the role reversal regardless of the online-state of the primary + required: no + default: no + type: bool +''' + +EXAMPLES = """ + - name: Update the role of a storage array + netapp_e_amg_role: + name: updating amg role + role: primary + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" + +RETURN = """ +msg: + description: Failure message + returned: failure + type: str + sample: "No Async Mirror Group with the name." +""" +import json +import traceback + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as e: + r = e.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def has_match(module, ssid, api_url, api_pwd, api_usr, body, name): + amg_exists = False + has_desired_role = False + amg_id = None + amg_data = None + get_amgs = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + get_amgs + try: + amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd, + headers=HEADERS) + except Exception: + module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid)) + + for amg in amgs: + if amg['label'] == name: + amg_exists = True + amg_id = amg['id'] + amg_data = amg + if amg['localRole'] == body.get('role'): + has_desired_role = True + + return amg_exists, has_desired_role, amg_id, amg_data + + +def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id): + endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id) + url = api_url + endpoint + post_data = json.dumps(body) + try: + request(url, data=post_data, method='POST', url_username=api_usr, + url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.fail_json( + msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), + exception=traceback.format_exc()) + + status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id) + status_url = api_url + status_endpoint + try: + rc, status = request(status_url, method='GET', url_username=api_usr, + url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.fail_json( + msg="Failed to check status of AMG after role reversal. " + "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), + exception=traceback.format_exc()) + + # Here we wait for the role reversal to complete + if 'roleChangeProgress' in status: + while status['roleChangeProgress'] != "none": + try: + rc, status = request(status_url, method='GET', + url_username=api_usr, url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.fail_json( + msg="Failed to check status of AMG after role reversal. " + "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), + exception=traceback.format_exc()) + return status + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + name=dict(required=True, type='str'), + role=dict(required=True, choices=['primary', 'secondary']), + noSync=dict(required=False, type='bool', default=False), + force=dict(required=False, type='bool', default=False), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + )) + + module = AnsibleModule(argument_spec=argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + name = p.pop('name') + + if not api_url.endswith('/'): + api_url += '/' + + agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name) + + if not agm_exists: + module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name) + elif has_desired_role: + module.exit_json(changed=False, **amg_data) + + else: + amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id) + if amg_data: + module.exit_json(changed=True, **amg_data) + else: + module.exit_json(changed=True, msg="AMG role changed.") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_amg_sync.py b/plugins/modules/storage/netapp/netapp_e_amg_sync.py new file mode 100644 index 0000000000..f0025ff86a --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_amg_sync.py @@ -0,0 +1,260 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netapp_e_amg_sync +short_description: NetApp E-Series conduct synchronization actions on asynchronous mirror groups. +description: + - Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays. +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + description: + - The ID of the storage array containing the AMG you wish to target + name: + description: + - The name of the async mirror group you wish to target + required: yes + state: + description: + - The synchronization action you'd like to take. + - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in + progress, it will return with an OK status. + - If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended + choices: + - running + - suspended + required: yes + delete_recovery_point: + description: + - Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization. + - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last + failures point will be deleted and synchronization will continue. + - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary + and the failures point will be preserved. + - "NOTE: This only has impact for newly launched syncs." + type: bool + default: no +''' +EXAMPLES = """ + - name: start AMG async + netapp_e_amg_sync: + name: "{{ amg_sync_name }}" + state: running + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" +""" +RETURN = """ +json: + description: The object attributes of the AMG. + returned: success + type: str + example: + { + "changed": false, + "connectionType": "fc", + "groupRef": "3700000060080E5000299C24000006EF57ACAC70", + "groupState": "optimal", + "id": "3700000060080E5000299C24000006EF57ACAC70", + "label": "made_with_ansible", + "localRole": "primary", + "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", + "orphanGroup": false, + "recoveryPointAgeAlertThresholdMinutes": 20, + "remoteRole": "secondary", + "remoteTarget": { + "nodeName": { + "ioInterfaceType": "fc", + "iscsiNodeName": null, + "remoteNodeWWN": "20040080E5299F1C" + }, + "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", + "scsiinitiatorTargetBaseProperties": { + "ioInterfaceType": "fc", + "iscsiinitiatorTargetBaseParameters": null + } + }, + "remoteTargetId": "ansible2", + "remoteTargetName": "Ansible2", + "remoteTargetWwn": "60080E5000299F880000000056A25D56", + "repositoryUtilizationWarnThreshold": 80, + "roleChangeProgress": "none", + "syncActivity": "idle", + "syncCompletionTimeAlertThresholdMinutes": 10, + "syncIntervalMinutes": 10, + "worldWideName": "60080E5000299C24000006EF57ACAC70" + } +""" +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.urls import open_url + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as e: + r = e.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class AMGsync(object): + def __init__(self): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + name=dict(required=True, type='str'), + ssid=dict(required=True, type='str'), + state=dict(required=True, type='str', choices=['running', 'suspended']), + delete_recovery_point=dict(required=False, type='bool', default=False) + )) + self.module = AnsibleModule(argument_spec=argument_spec) + args = self.module.params + self.name = args['name'] + self.ssid = args['ssid'] + self.state = args['state'] + self.delete_recovery_point = args['delete_recovery_point'] + try: + self.user = args['api_username'] + self.pwd = args['api_password'] + self.url = args['api_url'] + except KeyError: + self.module.fail_json(msg="You must pass in api_username" + "and api_password and api_url to the module.") + self.certs = args['validate_certs'] + + self.post_headers = { + "Accept": "application/json", + "Content-Type": "application/json" + } + self.amg_id, self.amg_obj = self.get_amg() + + def get_amg(self): + endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid + (rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + headers=self.post_headers) + try: + amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id'] + amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0] + except IndexError: + self.module.fail_json( + msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid)) + return amg_id, amg_obj + + @property + def current_state(self): + amg_id, amg_obj = self.get_amg() + return amg_obj['syncActivity'] + + def run_sync_action(self): + # If we get to this point we know that the states differ, and there is no 'err' state, + # so no need to revalidate + + post_body = dict() + if self.state == 'running': + if self.current_state == 'idle': + if self.delete_recovery_point: + post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point)) + suffix = 'sync' + else: + # In a suspended state + suffix = 'resume' + else: + suffix = 'suspend' + + endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix) + + (rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd, + validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers, + ignore_errors=True) + + if not str(rc).startswith('2'): + self.module.fail_json(msg=str(resp['errorMessage'])) + + return resp + + def apply(self): + state_map = dict( + running=['active'], + suspended=['userSuspended', 'internallySuspended', 'paused'], + err=['unkown', '_UNDEFINED']) + + if self.current_state not in state_map[self.state]: + if self.current_state in state_map['err']: + self.module.fail_json( + msg="The sync is a state of '%s', this requires manual intervention. " + + "Please investigate and try again" % self.current_state) + else: + self.amg_obj = self.run_sync_action() + + (ret, amg) = self.get_amg() + self.module.exit_json(changed=False, **amg) + + +def main(): + sync = AMGsync() + sync.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_asup.py b/plugins/modules/storage/netapp/netapp_e_asup.py new file mode 100644 index 0000000000..bb82400d45 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_asup.py @@ -0,0 +1,309 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_asup +short_description: NetApp E-Series manage auto-support settings +description: + - Allow the auto-support settings to be configured for an individual E-Series storage-system +author: Michael Price (@lmprice) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Enable/disable the E-Series auto-support configuration. + - When this option is enabled, configuration, logs, and other support-related information will be relayed + to NetApp to help better support your system. No personally identifiable information, passwords, etc, will + be collected. + default: enabled + choices: + - enabled + - disabled + aliases: + - asup + - auto_support + - autosupport + active: + description: + - Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's + possible that the bundle did not contain all of the required information at the time of the event. + Enabling this option allows NetApp support personnel to manually request transmission or re-transmission + of support data in order ot resolve the problem. + - Only applicable if I(state=enabled). + default: yes + type: bool + start: + description: + - A start hour may be specified in a range from 0 to 23 hours. + - ASUP bundles will be sent daily between the provided start and end time (UTC). + - I(start) must be less than I(end). + aliases: + - start_time + default: 0 + end: + description: + - An end hour may be specified in a range from 1 to 24 hours. + - ASUP bundles will be sent daily between the provided start and end time (UTC). + - I(start) must be less than I(end). + aliases: + - end_time + default: 24 + days: + description: + - A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one + of the provided days. + choices: + - monday + - tuesday + - wednesday + - thursday + - friday + - saturday + - sunday + required: no + aliases: + - days_of_week + - schedule_days + verbose: + description: + - Provide the full ASUP configuration in the return. + default: no + required: no + type: bool + log_path: + description: + - A local path to a file to be used for debug logging + required: no +notes: + - Check mode is supported. + - Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively + respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be + disabled if desired. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher. +''' + +EXAMPLES = """ + - name: Enable ASUP and allow pro-active retrieval of bundles + netapp_e_asup: + state: enabled + active: yes + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST. + netapp_e_asup: + start: 17 + end: 20 + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +asup: + description: + - True if ASUP is enabled. + returned: on success + sample: True + type: bool +active: + description: + - True if the active option has been enabled. + returned: on success + sample: True + type: bool +cfg: + description: + - Provide the full ASUP configuration. + returned: on success when I(verbose=true). + type: complex + contains: + asupEnabled: + description: + - True if ASUP has been enabled. + type: bool + onDemandEnabled: + description: + - True if ASUP active monitoring has been enabled. + type: bool + daysOfWeek: + description: + - The days of the week that ASUP bundles will be sent. + type: list +""" + +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Asup(object): + DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday'] + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'], + choices=['enabled', 'disabled']), + active=dict(type='bool', required=False, default=True, ), + days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'], + choices=self.DAYS_OPTIONS), + start=dict(type='int', required=False, default=0, aliases=['start_time']), + end=dict(type='int', required=False, default=24, aliases=['end_time']), + verbose=dict(type='bool', required=False, default=False), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, ) + args = self.module.params + self.asup = args['state'] == 'enabled' + self.active = args['active'] + self.days = args['days'] + self.start = args['start'] + self.end = args['end'] + self.verbose = args['verbose'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.start >= self.end: + self.module.fail_json(msg="The value provided for the start time is invalid." + " It must be less than the end time.") + if self.start < 0 or self.start > 23: + self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.") + else: + self.start = self.start * 60 + if self.end < 1 or self.end > 24: + self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.") + else: + self.end = min(self.end * 60, 1439) + + if not self.days: + self.days = self.DAYS_OPTIONS + + def get_configuration(self): + try: + (rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds) + + if not (result['asupCapable'] and result['onDemandCapable']): + self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid)) + return result + + except Exception as err: + self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self): + config = self.get_configuration() + update = False + body = dict() + + if self.asup: + body = dict(asupEnabled=True) + if not config['asupEnabled']: + update = True + + if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active: + update = True + body.update(dict(onDemandEnabled=self.active, + remoteDiagsEnabled=self.active)) + self.days.sort() + config['schedule']['daysOfWeek'].sort() + + body['schedule'] = dict(daysOfWeek=self.days, + dailyMinTime=self.start, + dailyMaxTime=self.end, + weeklyMinTime=self.start, + weeklyMaxTime=self.end) + + if self.days != config['schedule']['daysOfWeek']: + update = True + if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']: + update = True + elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']: + update = True + + elif config['asupEnabled']: + body = dict(asupEnabled=False) + update = True + + self._logger.info(pformat(body)) + + if update and not self.check_mode: + try: + (rc, result) = request(self.url + 'device-asup', method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return update + + def update(self): + update = self.update_configuration() + cfg = self.get_configuration() + if self.verbose: + self.module.exit_json(msg="The ASUP settings have been updated.", changed=update, + asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg) + else: + self.module.exit_json(msg="The ASUP settings have been updated.", changed=update, + asup=cfg['asupEnabled'], active=cfg['onDemandEnabled']) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = Asup() + settings() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_auditlog.py b/plugins/modules/storage/netapp/netapp_e_auditlog.py new file mode 100644 index 0000000000..95ec778b93 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_auditlog.py @@ -0,0 +1,281 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_auditlog +short_description: NetApp E-Series manage audit-log configuration +description: + - This module allows an e-series storage system owner to set audit-log configuration parameters. +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + max_records: + description: + - The maximum number log messages audit-log will retain. + - Max records must be between and including 100 and 50000. + default: 50000 + log_level: + description: Filters the log messages according to the specified log level selection. + choices: + - all + - writeOnly + default: writeOnly + full_policy: + description: Specifies what audit-log should do once the number of entries approach the record limit. + choices: + - overWrite + - preventSystemAccess + default: overWrite + threshold: + description: + - This is the memory full percent threshold that audit-log will start issuing warning messages. + - Percent range must be between and including 60 and 90. + default: 90 + force: + description: + - Forces the audit-log configuration to delete log history when log messages fullness cause immediate + warning or full condition. + - Warning! This will cause any existing audit-log messages to be deleted. + - This is only applicable for I(full_policy=preventSystemAccess). + type: bool + default: no + log_path: + description: A local path to a file to be used for debug logging. + required: no +notes: + - Check mode is supported. + - This module is currently only supported with the Embedded Web Services API v3.0 and higher. +''' + +EXAMPLES = """ +- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity. + netapp_e_auditlog: + api_url: "https://{{ netapp_e_api_host }}/devmgr/v2" + api_username: "{{ netapp_e_api_username }}" + api_password: "{{ netapp_e_api_password }}" + ssid: "{{ netapp_e_ssid }}" + validate_certs: no + max_records: 50000 + log_level: all + full_policy: preventSystemAccess + threshold: 60 + log_path: /path/to/log_file.log +- name: Define audit-log utilize the default values. + netapp_e_auditlog: + api_url: "https://{{ netapp_e_api_host }}/devmgr/v2" + api_username: "{{ netapp_e_api_username }}" + api_password: "{{ netapp_e_api_password }}" + ssid: "{{ netapp_e_ssid }}" +- name: Force audit-log configuration when full or warning conditions occur while enacting preventSystemAccess policy. + netapp_e_auditlog: + api_url: "https://{{ netapp_e_api_host }}/devmgr/v2" + api_username: "{{ netapp_e_api_username }}" + api_password: "{{ netapp_e_api_password }}" + ssid: "{{ netapp_e_ssid }}" + max_records: 5000 + log_level: all + full_policy: preventSystemAccess + threshold: 60 + force: yes +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +""" + +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +try: + from urlparse import urlparse, urlunparse +except Exception: + from urllib.parse import urlparse, urlunparse + + +class AuditLog(object): + """Audit-log module configuration class.""" + MAX_RECORDS = 50000 + HEADERS = {"Content-Type": "application/json", + "Accept": "application/json"} + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + max_records=dict(type="int", default=50000), + log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]), + full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]), + threshold=dict(type="int", default=90), + force=dict(type="bool", default=False), + log_path=dict(type='str', required=False))) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + args = self.module.params + + self.max_records = args["max_records"] + if self.max_records < 100 or self.max_records > self.MAX_RECORDS: + self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]" + % self.max_records) + self.threshold = args["threshold"] + if self.threshold < 60 or self.threshold > 90: + self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold) + self.log_level = args["log_level"] + self.full_policy = args["full_policy"] + self.force = args["force"] + self.ssid = args['ssid'] + self.url = args['api_url'] + if not self.url.endswith('/'): + self.url += '/' + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + # logging setup + log_path = args['log_path'] + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + self.proxy_used = self.is_proxy() + self._logger.info(self.proxy_used) + self.check_mode = self.module.check_mode + + def is_proxy(self): + """Determine whether the API is embedded or proxy.""" + try: + + # replace http url path with devmgr/utils/about + about_url = list(urlparse(self.url)) + about_url[2] = "devmgr/utils/about" + about_url = urlunparse(about_url) + + rc, data = request(about_url, timeout=300, headers=self.HEADERS, **self.creds) + + return data["runningAsProxy"] + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def get_configuration(self): + """Retrieve the existing audit-log configurations. + + :returns: dictionary containing current audit-log configuration + """ + try: + if self.proxy_used: + rc, data = request(self.url + "audit-log/config", timeout=300, headers=self.HEADERS, **self.creds) + else: + rc, data = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, + timeout=300, headers=self.HEADERS, **self.creds) + return data + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the audit-log configuration! " + "Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def build_configuration(self): + """Build audit-log expected configuration. + + :returns: Tuple containing update boolean value and dictionary of audit-log configuration + """ + config = self.get_configuration() + + current = dict(auditLogMaxRecords=config["auditLogMaxRecords"], + auditLogLevel=config["auditLogLevel"], + auditLogFullPolicy=config["auditLogFullPolicy"], + auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"]) + + body = dict(auditLogMaxRecords=self.max_records, + auditLogLevel=self.log_level, + auditLogFullPolicy=self.full_policy, + auditLogWarningThresholdPct=self.threshold) + + update = current != body + + self._logger.info(pformat(update)) + self._logger.info(pformat(body)) + return update, body + + def delete_log_messages(self): + """Delete all audit-log messages.""" + self._logger.info("Deleting audit-log messages...") + try: + if self.proxy_used: + rc, result = request(self.url + "audit-log?clearAll=True", timeout=300, + method="DELETE", headers=self.HEADERS, **self.creds) + else: + rc, result = request(self.url + "storage-systems/%s/audit-log?clearAll=True" % self.ssid, timeout=300, + method="DELETE", headers=self.HEADERS, **self.creds) + except Exception as err: + self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self, update=None, body=None, attempt_recovery=True): + """Update audit-log configuration.""" + if update is None or body is None: + update, body = self.build_configuration() + + if update and not self.check_mode: + try: + if self.proxy_used: + rc, result = request(self.url + "storage-systems/audit-log/config", timeout=300, + data=json.dumps(body), method='POST', headers=self.HEADERS, + ignore_errors=True, **self.creds) + else: + rc, result = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, timeout=300, + data=json.dumps(body), method='POST', headers=self.HEADERS, + ignore_errors=True, **self.creds) + + if rc == 422: + if self.force and attempt_recovery: + self.delete_log_messages() + update = self.update_configuration(update, body, False) + else: + self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(rc, result))) + + except Exception as error: + self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + return update + + def update(self): + """Update the audit-log configuration.""" + update = self.update_configuration() + self.module.exit_json(msg="Audit-log update complete", changed=update) + + def __call__(self): + self.update() + + +def main(): + auditlog = AuditLog() + auditlog() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/storage/netapp/netapp_e_auth.py b/plugins/modules/storage/netapp/netapp_e_auth.py new file mode 100644 index 0000000000..352fe0106d --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_auth.py @@ -0,0 +1,275 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_auth +short_description: NetApp E-Series set or update the password for a storage array. +description: + - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web + Services proxy. Note, all storage arrays do not have a Monitor or RO role. +author: Kevin Hulquest (@hulquest) +options: + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + name: + description: + - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use + the ID instead. + required: False + ssid: + description: + - the identifier of the storage array in the Web Services Proxy. + required: False + set_admin: + description: + - Boolean value on whether to update the admin password. If set to false then the RO account is updated. + type: bool + default: False + current_password: + description: + - The current admin password. This is not required if the password hasn't been set before. + required: False + new_password: + description: + - The password you would like to set. Cannot be more than 30 characters. + required: True + api_url: + description: + - The full API url. + - "Example: http://ENDPOINT:8080/devmgr/v2" + - This can optionally be set via an environment variable, API_URL + required: False + api_username: + description: + - The username used to authenticate against the API + - This can optionally be set via an environment variable, API_USERNAME + required: False + api_password: + description: + - The password used to authenticate against the API + - This can optionally be set via an environment variable, API_PASSWORD + required: False +''' + +EXAMPLES = ''' +- name: Test module + netapp_e_auth: + name: trex + current_password: OldPasswd + new_password: NewPasswd + set_admin: yes + api_url: '{{ netapp_api_url }}' + api_username: '{{ netapp_api_username }}' + api_password: '{{ netapp_api_password }}' +''' + +RETURN = ''' +msg: + description: Success message + returned: success + type: str + sample: "Password Updated Successfully" +''' +import json +import traceback + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", + "x-netapp-password-validate-method": "none" + +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as e: + r = e.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def get_ssid(module, name, api_url, user, pwd): + count = 0 + all_systems = 'storage-systems' + systems_url = api_url + all_systems + rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd, + validate_certs=module.validate_certs) + for system in data: + if system['name'] == name: + count += 1 + if count > 1: + module.fail_json( + msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " + + "Use the id instead") + else: + ssid = system['id'] + else: + continue + + if count == 0: + module.fail_json(msg="No storage array with the name %s was found" % name) + + else: + return ssid + + +def get_pwd_status(module, ssid, api_url, user, pwd): + pwd_status = "storage-systems/%s/passwords" % ssid + url = api_url + pwd_status + try: + rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd, + validate_certs=module.validate_certs) + return data['readOnlyPasswordSet'], data['adminPasswordSet'] + except HTTPError as e: + module.fail_json(msg="There was an issue with connecting, please check that your " + "endpoint is properly defined and your credentials are correct: %s" % to_native(e)) + + +def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd): + """Update the stored storage-system password""" + update_pwd = 'storage-systems/%s' % ssid + url = api_url + update_pwd + post_body = json.dumps(dict(storedPassword=pwd)) + try: + rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr, + url_password=api_pwd, validate_certs=module.validate_certs) + return rc, data + except Exception as e: + module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, to_native(e))) + + +def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False): + """Set the storage-system password""" + set_pass = "storage-systems/%s/passwords" % ssid + url = api_url + set_pass + + if not current_password: + current_password = "" + + post_body = json.dumps( + dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password)) + + try: + rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd, + ignore_errors=True, validate_certs=module.validate_certs) + except Exception as e: + module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, to_native(e)), + exception=traceback.format_exc()) + + if rc == 422: + post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password)) + try: + rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd, + validate_certs=module.validate_certs) + except Exception: + # TODO(lorenp): Resolve ignored rc, data + module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again") + + if int(rc) >= 300: + module.fail_json(msg="Failed to set system password. Id [%s] Code [%s]. Error [%s]" % (ssid, rc, data)) + + rc, update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd) + + if int(rc) < 300: + return update_data + else: + module.fail_json(msg="%s:%s" % (rc, update_data)) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + name=dict(required=False, type='str'), + ssid=dict(required=False, type='str'), + current_password=dict(required=False, no_log=True), + new_password=dict(required=True, no_log=True), + set_admin=dict(required=True, type='bool'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True) + ) + ) + module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']], + required_one_of=[['name', 'ssid']]) + + name = module.params['name'] + ssid = module.params['ssid'] + current_password = module.params['current_password'] + new_password = module.params['new_password'] + set_admin = module.params['set_admin'] + user = module.params['api_username'] + pwd = module.params['api_password'] + api_url = module.params['api_url'] + module.validate_certs = module.params['validate_certs'] + + if not api_url.endswith('/'): + api_url += '/' + + if name: + ssid = get_ssid(module, name, api_url, user, pwd) + + ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd) + + if admin_pwd and not current_password: + module.fail_json( + msg="Admin account has a password set. " + + "You must supply current_password in order to update the RO or Admin passwords") + + if len(new_password) > 30: + module.fail_json(msg="Passwords must not be greater than 30 characters in length") + + result = set_password(module, ssid, api_url, user, pwd, current_password=current_password, + new_password=new_password, set_admin=set_admin) + + module.exit_json(changed=True, msg="Password Updated Successfully", + password_set=result['passwordSet'], + password_status=result['passwordStatus']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_drive_firmware.py b/plugins/modules/storage/netapp/netapp_e_drive_firmware.py new file mode 100644 index 0000000000..5aaf386511 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_drive_firmware.py @@ -0,0 +1,215 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_drive_firmware +short_description: NetApp E-Series manage drive firmware +description: + - Ensure drive firmware version is activated on specified drive model. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + firmware: + description: + - list of drive firmware file paths. + - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/ + type: list + required: True + wait_for_completion: + description: + - This flag will cause module to wait for any upgrade actions to complete. + type: bool + default: false + ignore_inaccessible_drives: + description: + - This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible. + type: bool + default: false + upgrade_drives_online: + description: + - This flag will determine whether drive firmware can be upgrade while drives are accepting I/O. + - When I(upgrade_drives_online==False) stop all I/O before running task. + type: bool + default: true +''' +EXAMPLES = """ +- name: Ensure correct firmware versions + nac_santricity_drive_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + firmware: "path/to/drive_firmware" + wait_for_completion: true + ignore_inaccessible_drives: false +""" +RETURN = """ +msg: + description: Whether any drive firmware was upgraded and whether it is in progress. + type: str + returned: always + sample: + { changed: True, upgrade_in_process: True } +""" +import os +import re + +from time import sleep +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata +from ansible.module_utils._text import to_native, to_text, to_bytes + + +class NetAppESeriesDriveFirmware(NetAppESeriesModule): + WAIT_TIMEOUT_SEC = 60 * 15 + + def __init__(self): + ansible_options = dict( + firmware=dict(type="list", required=True), + wait_for_completion=dict(type="bool", default=False), + ignore_inaccessible_drives=dict(type="bool", default=False), + upgrade_drives_online=dict(type="bool", default=True)) + + super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.firmware_list = args["firmware"] + self.wait_for_completion = args["wait_for_completion"] + self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"] + self.upgrade_drives_online = args["upgrade_drives_online"] + + self.upgrade_list_cache = None + + self.upgrade_required_cache = None + self.upgrade_in_progress = False + self.drive_info_cache = None + + def upload_firmware(self): + """Ensure firmware has been upload prior to uploaded.""" + for firmware in self.firmware_list: + firmware_name = os.path.basename(firmware) + files = [("file", firmware_name, firmware)] + headers, data = create_multipart_formdata(files) + try: + rc, response = self.request("/files/drive", method="POST", headers=headers, data=data) + except Exception as error: + self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error))) + + def upgrade_list(self): + """Determine whether firmware is compatible with the specified drives.""" + if self.upgrade_list_cache is None: + self.upgrade_list_cache = list() + try: + rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid) + + # Create upgrade list, this ensures only the firmware uploaded is applied + for firmware in self.firmware_list: + filename = os.path.basename(firmware) + + for uploaded_firmware in response["compatibilities"]: + if uploaded_firmware["filename"] == filename: + + # Determine whether upgrade is required + drive_reference_list = [] + for drive in uploaded_firmware["compatibleDrives"]: + try: + rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"])) + + # Add drive references that are supported and differ from current firmware + if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and + uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]): + + if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]): + drive_reference_list.append(drive["driveRef"]) + + if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online: + self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]." + % (self.ssid, drive["driveRef"])) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]." + % (self.ssid, drive["driveRef"], to_native(error))) + + if drive_reference_list: + self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}]) + + except Exception as error: + self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return self.upgrade_list_cache + + def wait_for_upgrade_completion(self): + """Wait for drive firmware upgrade to complete.""" + drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]] + last_status = None + for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)): + try: + rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid) + + # Check drive status + for status in response["driveStatus"]: + last_status = status + if status["driveRef"] in drive_references: + if status["status"] == "okay": + continue + elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]: + break + else: + self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]." + % (self.ssid, status["driveRef"], status["status"])) + else: + self.upgrade_in_progress = False + break + except Exception as error: + self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + sleep(5) + else: + self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status)) + + def upgrade(self): + """Apply firmware to applicable drives.""" + try: + rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s" + % (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list()) + self.upgrade_in_progress = True + except Exception as error: + self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + if self.wait_for_completion: + self.wait_for_upgrade_completion() + + def apply(self): + """Apply firmware policy has been enforced on E-Series storage system.""" + self.upload_firmware() + + if self.upgrade_list() and not self.module.check_mode: + self.upgrade() + + self.module.exit_json(changed=True if self.upgrade_list() else False, + upgrade_in_process=self.upgrade_in_progress) + + +def main(): + drive_firmware = NetAppESeriesDriveFirmware() + drive_firmware.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_facts.py b/plugins/modules/storage/netapp/netapp_e_facts.py new file mode 100644 index 0000000000..3be087a3ab --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_facts.py @@ -0,0 +1,530 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: netapp_e_facts +short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays +description: + - The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays. +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +''' + +EXAMPLES = """ +--- +- name: Get array facts + netapp_e_facts: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true +""" + +RETURN = """ + msg: + description: Success message + returned: on success + type: str + sample: + - Gathered facts for storage array. Array ID [1]. + - Gathered facts for web services proxy. + storage_array_facts: + description: provides details about the array, controllers, management interfaces, hostside interfaces, + driveside interfaces, disks, storage pools, volumes, snapshots, and features. + returned: on successful inquiry from from embedded web services rest api + type: complex + contains: + netapp_controllers: + description: storage array controller list that contains basic controller identification and status + type: complex + sample: + - [{"name": "A", "serial": "021632007299", "status": "optimal"}, + {"name": "B", "serial": "021632007300", "status": "failed"}] + netapp_disks: + description: drive list that contains identification, type, and status information for each drive + type: complex + sample: + - [{"available": false, + "firmware_version": "MS02", + "id": "01000000500003960C8B67880000000000000000", + "media_type": "ssd", + "product_id": "PX02SMU080 ", + "serial_number": "15R0A08LT2BA", + "status": "optimal", + "tray_ref": "0E00000000000000000000000000000000000000", + "usable_bytes": "799629205504" }] + netapp_driveside_interfaces: + description: drive side interface list that contains identification, type, and speed for each interface + type: complex + sample: + - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }] + - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }] + netapp_enabled_features: + description: specifies the enabled features on the storage array. + returned: on success + type: complex + sample: + - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ] + netapp_host_groups: + description: specifies the host groups on the storage arrays. + returned: on success + type: complex + sample: + - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }] + netapp_hosts: + description: specifies the hosts on the storage arrays. + returned: on success + type: complex + sample: + - [{ "id": "8203800000000000000000000000000000000000", + "name": "host1", + "group_id": "85000000600A098000A4B28D003610705C40B964", + "host_type_index": 28, + "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" }, + { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}] + netapp_host_types: + description: lists the available host types on the storage array. + returned: on success + type: complex + sample: + - [{ "index": 0, "type": "FactoryDefault" }, + { "index": 1, "type": "W2KNETNCL"}, + { "index": 2, "type": "SOL" }, + { "index": 5, "type": "AVT_4M" }, + { "index": 6, "type": "LNX" }, + { "index": 7, "type": "LnxALUA" }, + { "index": 8, "type": "W2KNETCL" }, + { "index": 9, "type": "AIX MPIO" }, + { "index": 10, "type": "VmwTPGSALUA" }, + { "index": 15, "type": "HPXTPGS" }, + { "index": 17, "type": "SolTPGSALUA" }, + { "index": 18, "type": "SVC" }, + { "index": 22, "type": "MacTPGSALUA" }, + { "index": 23, "type": "WinTPGSALUA" }, + { "index": 24, "type": "LnxTPGSALUA" }, + { "index": 25, "type": "LnxTPGSALUA_PM" }, + { "index": 26, "type": "ONTAP_ALUA" }, + { "index": 27, "type": "LnxTPGSALUA_SF" }, + { "index": 28, "type": "LnxDHALUA" }, + { "index": 29, "type": "ATTOClusterAllOS" }] + netapp_hostside_interfaces: + description: host side interface list that contains identification, configuration, type, speed, and + status information for each interface + type: complex + sample: + - [{"iscsi": + [{ "controller": "A", + "current_interface_speed": "10g", + "ipv4_address": "10.10.10.1", + "ipv4_enabled": true, + "ipv4_gateway": "10.10.10.1", + "ipv4_subnet_mask": "255.255.255.0", + "ipv6_enabled": false, + "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76", + "link_status": "up", + "mtu": 9000, + "supported_interface_speeds": [ "10g" ] }]}] + netapp_management_interfaces: + description: management interface list that contains identification, configuration, and status for + each interface + type: complex + sample: + - [{"alias": "ict-2800-A", + "channel": 1, + "controller": "A", + "dns_config_method": "dhcp", + "dns_servers": [], + "ipv4_address": "10.1.1.1", + "ipv4_address_config_method": "static", + "ipv4_enabled": true, + "ipv4_gateway": "10.113.1.1", + "ipv4_subnet_mask": "255.255.255.0", + "ipv6_enabled": false, + "link_status": "up", + "mac_address": "00A098A81B5D", + "name": "wan0", + "ntp_config_method": "disabled", + "ntp_servers": [], + "remote_ssh_access": false }] + netapp_storage_array: + description: provides storage array identification, firmware version, and available capabilities + type: dict + sample: + - {"chassis_serial": "021540006043", + "firmware": "08.40.00.01", + "name": "ict-2800-11_40", + "wwn": "600A098000A81B5D0000000059D60C76", + "cacheBlockSizes": [4096, + 8192, + 16384, + 32768], + "supportedSegSizes": [8192, + 16384, + 32768, + 65536, + 131072, + 262144, + 524288]} + netapp_storage_pools: + description: storage pool list that contains identification and capacity information for each pool + type: complex + sample: + - [{"available_capacity": "3490353782784", + "id": "04000000600A098000A81B5D000002B45A953A61", + "name": "Raid6", + "total_capacity": "5399466745856", + "used_capacity": "1909112963072" }] + netapp_volumes: + description: storage volume list that contains identification and capacity information for each volume + type: complex + sample: + - [{"capacity": "5368709120", + "id": "02000000600A098000AAC0C3000002C45A952BAA", + "is_thin_provisioned": false, + "name": "5G", + "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }] + netapp_workload_tags: + description: workload tag list + type: complex + sample: + - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38", + "name": "ftp_server", + "workloadAttributes": [{"key": "use", + "value": "general"}]}] + netapp_volumes_by_initiators: + description: list of available volumes keyed by the mapped initiators. + type: complex + sample: + - {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E", + "meta_data": {"filetype": "xfs", "public": true}, + "name": "some_volume", + "workload_name": "test2_volumes", + "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]} + snapshot_images: + description: snapshot image list that contains identification, capacity, and status information for each + snapshot image + type: complex + sample: + - [{"active_cow": true, + "creation_method": "user", + "id": "34000000600A098000A81B5D00630A965B0535AC", + "pit_capacity": "5368709120", + "reposity_cap_utilization": "0", + "rollback_source": false, + "status": "optimal" }] +""" + +from re import match +from pprint import pformat +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule + + +class Facts(NetAppESeriesModule): + def __init__(self): + web_services_version = "02.00.0000.0000" + super(Facts, self).__init__(ansible_options={}, + web_services_version=web_services_version, + supports_check_mode=True) + + def get_controllers(self): + """Retrieve a mapping of controller references to their labels.""" + controllers = list() + try: + rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." + % (self.ssid, str(err))) + + controllers.sort() + + controllers_dict = {} + i = ord('A') + for controller in controllers: + label = chr(i) + controllers_dict[controller] = label + i += 1 + + return controllers_dict + + def get_array_facts(self): + """Extract particular facts from the storage array graph""" + facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid) + controller_reference_label = self.get_controllers() + array_facts = None + + # Get the storage array graph + try: + rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error))) + + facts['netapp_storage_array'] = dict( + name=array_facts['sa']['saData']['storageArrayLabel'], + chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'], + firmware=array_facts['sa']['saData']['fwVersion'], + wwn=array_facts['sa']['saData']['saId']['worldWideName'], + segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'], + cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes']) + + facts['netapp_controllers'] = [ + dict( + name=controller_reference_label[controller['controllerRef']], + serial=controller['serialNumber'].strip(), + status=controller['status'], + ) for controller in array_facts['controller']] + + facts['netapp_host_groups'] = [ + dict( + id=group['id'], + name=group['name'] + ) for group in array_facts['storagePoolBundle']['cluster']] + + facts['netapp_hosts'] = [ + dict( + group_id=host['clusterRef'], + hosts_reference=host['hostRef'], + id=host['id'], + name=host['name'], + host_type_index=host['hostTypeIndex'], + posts=host['hostSidePorts'] + ) for host in array_facts['storagePoolBundle']['host']] + + facts['netapp_host_types'] = [ + dict( + type=host_type['hostType'], + index=host_type['index'] + ) for host_type in array_facts['sa']['hostSpecificVals'] + if 'hostType' in host_type.keys() and host_type['hostType'] + # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared. + ] + facts['snapshot_images'] = [ + dict( + id=snapshot['id'], + status=snapshot['status'], + pit_capacity=snapshot['pitCapacity'], + creation_method=snapshot['creationMethod'], + reposity_cap_utilization=snapshot['repositoryCapacityUtilization'], + active_cow=snapshot['activeCOW'], + rollback_source=snapshot['isRollbackSource'] + ) for snapshot in array_facts['highLevelVolBundle']['pit']] + + facts['netapp_disks'] = [ + dict( + id=disk['id'], + available=disk['available'], + media_type=disk['driveMediaType'], + status=disk['status'], + usable_bytes=disk['usableCapacity'], + tray_ref=disk['physicalLocation']['trayRef'], + product_id=disk['productID'], + firmware_version=disk['firmwareVersion'], + serial_number=disk['serialNumber'].lstrip() + ) for disk in array_facts['drive']] + + facts['netapp_management_interfaces'] = [ + dict(controller=controller_reference_label[controller['controllerRef']], + name=iface['ethernet']['interfaceName'], + alias=iface['ethernet']['alias'], + channel=iface['ethernet']['channel'], + mac_address=iface['ethernet']['macAddr'], + remote_ssh_access=iface['ethernet']['rloginEnabled'], + link_status=iface['ethernet']['linkStatus'], + ipv4_enabled=iface['ethernet']['ipv4Enabled'], + ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""), + ipv4_address=iface['ethernet']['ipv4Address'], + ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'], + ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'], + ipv6_enabled=iface['ethernet']['ipv6Enabled'], + dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'], + dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] + if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []), + ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'], + ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] + if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else []) + ) for controller in array_facts['controller'] for iface in controller['netInterfaces']] + + facts['netapp_hostside_interfaces'] = [ + dict( + fc=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['fibre']['channel'], + link_status=iface['fibre']['linkStatus'], + current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']), + maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'fc'], + ib=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['ib']['channel'], + link_status=iface['ib']['linkState'], + mtu=iface['ib']['maximumTransmissionUnit'], + current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']), + maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'ib'], + iscsi=[dict(controller=controller_reference_label[controller['controllerRef']], + iqn=iface['iscsi']['iqn'], + link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'], + ipv4_enabled=iface['iscsi']['ipv4Enabled'], + ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'], + ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'], + ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'], + ipv6_enabled=iface['iscsi']['ipv6Enabled'], + mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'], + current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData'] + ['ethernetData']['currentInterfaceSpeed']), + supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData'] + ['ethernetData'] + ['supportedInterfaceSpeeds'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'iscsi'], + sas=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['sas']['channel'], + current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']), + maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']), + link_status=iface['sas']['iocPort']['state']) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'sas'])] + + facts['netapp_driveside_interfaces'] = [ + dict( + controller=controller_reference_label[controller['controllerRef']], + interface_type=interface['interfaceType'], + interface_speed=strip_interface_speed( + interface[interface['interfaceType']]['maximumInterfaceSpeed'] + if (interface['interfaceType'] == 'sata' or + interface['interfaceType'] == 'sas' or + interface['interfaceType'] == 'fibre') + else ( + interface[interface['interfaceType']]['currentSpeed'] + if interface['interfaceType'] == 'ib' + else ( + interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed'] + if interface['interfaceType'] == 'iscsi' else 'unknown' + ))), + ) + for controller in array_facts['controller'] + for interface in controller['driveInterfaces']] + + facts['netapp_storage_pools'] = [ + dict( + id=storage_pool['id'], + name=storage_pool['name'], + available_capacity=storage_pool['freeSpace'], + total_capacity=storage_pool['totalRaidedSpace'], + used_capacity=storage_pool['usedSpace'] + ) for storage_pool in array_facts['volumeGroup']] + + all_volumes = list(array_facts['volume']) + + facts['netapp_volumes'] = [ + dict( + id=v['id'], + name=v['name'], + parent_storage_pool_id=v['volumeGroupRef'], + capacity=v['capacity'], + is_thin_provisioned=v['thinProvisioned'], + workload=v['metadata'], + ) for v in all_volumes] + + workload_tags = None + try: + rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid) + + facts['netapp_workload_tags'] = [ + dict( + id=workload_tag['id'], + name=workload_tag['name'], + attributes=workload_tag['workloadAttributes'] + ) for workload_tag in workload_tags] + + # Create a dictionary of volume lists keyed by host names + facts['netapp_volumes_by_initiators'] = dict() + for mapping in array_facts['storagePoolBundle']['lunMapping']: + for host in facts['netapp_hosts']: + if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']: + if host['name'] not in facts['netapp_volumes_by_initiators'].keys(): + facts['netapp_volumes_by_initiators'].update({host['name']: []}) + + for volume in all_volumes: + if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]: + + # Determine workload name if there is one + workload_name = "" + metadata = dict() + for volume_tag in volume['metadata']: + if volume_tag['key'] == 'workloadId': + for workload_tag in facts['netapp_workload_tags']: + if volume_tag['value'] == workload_tag['id']: + workload_name = workload_tag['name'] + metadata = dict((entry['key'], entry['value']) + for entry in workload_tag['attributes'] + if entry['key'] != 'profileId') + + facts['netapp_volumes_by_initiators'][host['name']].append( + dict(name=volume['name'], + id=volume['id'], + wwn=volume['wwn'], + workload_name=workload_name, + meta_data=metadata)) + + features = [feature for feature in array_facts['sa']['capabilities']] + features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures'] + if feature['isEnabled']]) + features = list(set(features)) # ensure unique + features.sort() + facts['netapp_enabled_features'] = features + + return facts + + def get_facts(self): + """Get the embedded or web services proxy information.""" + facts = self.get_array_facts() + + self.module.log("isEmbedded: %s" % self.is_embedded()) + self.module.log(pformat(facts)) + + self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid, + storage_array_facts=facts) + + +def strip_interface_speed(speed): + """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'""" + if isinstance(speed, list): + result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed] + result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp] + result = ["auto" if match(r"auto", sp) else sp for sp in result] + else: + result = match(r"speed[0-9]{1,3}[gm]", speed) + result = result.group().replace("speed", "") if result else "unknown" + result = "auto" if match(r"auto", result.lower()) else result + return result + + +def main(): + facts = Facts() + facts.get_facts() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/storage/netapp/netapp_e_firmware.py b/plugins/modules/storage/netapp/netapp_e_firmware.py new file mode 100644 index 0000000000..b835fe3735 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_firmware.py @@ -0,0 +1,488 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_firmware +short_description: NetApp E-Series manage firmware. +description: + - Ensure specific firmware versions are activated on E-Series storage system. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + nvsram: + description: + - Path to the NVSRAM file. + type: str + required: true + firmware: + description: + - Path to the firmware file. + type: str + required: true + wait_for_completion: + description: + - This flag will cause module to wait for any upgrade actions to complete. + type: bool + default: false + ignore_health_check: + description: + - This flag will force firmware to be activated in spite of the health check. + - Use at your own risk. Certain non-optimal states could result in data loss. + type: bool + default: false +''' +EXAMPLES = """ +- name: Ensure correct firmware versions + netapp_e_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + nvsram: "path/to/nvsram" + bundle: "path/to/bundle" + wait_for_completion: true +- name: Ensure correct firmware versions + netapp_e_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + nvsram: "path/to/nvsram" + firmware: "path/to/firmware" +""" +RETURN = """ +msg: + description: Status and version of firmware and NVSRAM. + type: str + returned: always + sample: +""" +import os + +from time import sleep +from ansible.module_utils import six +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata, request +from ansible.module_utils._text import to_native, to_text, to_bytes + + +class NetAppESeriesFirmware(NetAppESeriesModule): + HEALTH_CHECK_TIMEOUT_MS = 120000 + REBOOT_TIMEOUT_SEC = 15 * 60 + FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC = 60 + DEFAULT_TIMEOUT = 60 * 15 # This will override the NetAppESeriesModule request method timeout. + + def __init__(self): + ansible_options = dict( + nvsram=dict(type="str", required=True), + firmware=dict(type="str", required=True), + wait_for_completion=dict(type="bool", default=False), + ignore_health_check=dict(type="bool", default=False)) + + super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.nvsram = args["nvsram"] + self.firmware = args["firmware"] + self.wait_for_completion = args["wait_for_completion"] + self.ignore_health_check = args["ignore_health_check"] + + self.nvsram_name = None + self.firmware_name = None + self.is_bundle_cache = None + self.firmware_version_cache = None + self.nvsram_version_cache = None + self.upgrade_required = False + self.upgrade_in_progress = False + self.module_info = dict() + + self.nvsram_name = os.path.basename(self.nvsram) + self.firmware_name = os.path.basename(self.firmware) + + def is_firmware_bundled(self): + """Determine whether supplied firmware is bundle.""" + if self.is_bundle_cache is None: + with open(self.firmware, "rb") as fh: + signature = fh.read(16).lower() + + if b"firmware" in signature: + self.is_bundle_cache = False + elif b"combined_content" in signature: + self.is_bundle_cache = True + else: + self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid)) + + return self.is_bundle_cache + + def firmware_version(self): + """Retrieve firmware version of the firmware file. Return: bytes string""" + if self.firmware_version_cache is None: + + # Search firmware file for bundle or firmware version + with open(self.firmware, "rb") as fh: + line = fh.readline() + while line: + if self.is_firmware_bundled(): + if b'displayableAttributeList=' in line: + for item in line[25:].split(b','): + key, value = item.split(b"|") + if key == b'VERSION': + self.firmware_version_cache = value.strip(b"\n") + break + elif b"Version:" in line: + self.firmware_version_cache = line.split()[-1].strip(b"\n") + break + line = fh.readline() + else: + self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid)) + return self.firmware_version_cache + + def nvsram_version(self): + """Retrieve NVSRAM version of the NVSRAM file. Return: byte string""" + if self.nvsram_version_cache is None: + + with open(self.nvsram, "rb") as fh: + line = fh.readline() + while line: + if b".NVSRAM Configuration Number" in line: + self.nvsram_version_cache = line.split(b'"')[-2] + break + line = fh.readline() + else: + self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid)) + return self.nvsram_version_cache + + def check_system_health(self): + """Ensure E-Series storage system is healthy. Works for both embedded and proxy web services.""" + try: + rc, request_id = self.request("health-check", method="POST", data={"onlineOnly": True, "storageDeviceIds": [self.ssid]}) + + while True: + sleep(1) + + try: + rc, response = self.request("health-check?requestId=%s" % request_id["requestId"]) + + if not response["healthCheckRunning"]: + return response["results"][0]["successful"] + elif int(response["results"][0]["processingTimeMS"]) > self.HEALTH_CHECK_TIMEOUT_MS: + self.module.fail_json(msg="Health check failed to complete. Array Id [%s]." % self.ssid) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + except Exception as error: + self.module.fail_json(msg="Failed to initiate health check. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % self.ssid) + + def embedded_check_compatibility(self): + """Verify files are compatible with E-Series storage system.""" + self.embedded_check_nvsram_compatibility() + self.embedded_check_bundle_compatibility() + + def embedded_check_nvsram_compatibility(self): + """Verify the provided NVSRAM is compatible with E-Series storage system.""" + + # Check nvsram compatibility + try: + files = [("nvsramimage", self.nvsram_name, self.nvsram)] + headers, data = create_multipart_formdata(files=files) + + rc, nvsram_compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid, + method="POST", data=data, headers=headers) + + if not nvsram_compatible["signatureTestingPassed"]: + self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram) + if not nvsram_compatible["fileCompatible"]: + self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram) + + # Determine whether nvsram is required + for module in nvsram_compatible["versionContents"]: + if module["bundledVersion"] != module["onboardVersion"]: + self.upgrade_required = True + + # Update bundle info + self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}}) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + def embedded_check_bundle_compatibility(self): + """Verify the provided firmware bundle is compatible with E-Series storage system.""" + try: + files = [("files[]", "blob", self.firmware)] + headers, data = create_multipart_formdata(files=files, send_8kb=True) + rc, bundle_compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid, + method="POST", data=data, headers=headers) + + # Determine whether valid and compatible firmware + if not bundle_compatible["signatureTestingPassed"]: + self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware) + if not bundle_compatible["fileCompatible"]: + self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware) + + # Determine whether upgrade is required + for module in bundle_compatible["versionContents"]: + + bundle_module_version = module["bundledVersion"].split(".") + onboard_module_version = module["onboardVersion"].split(".") + version_minimum_length = min(len(bundle_module_version), len(onboard_module_version)) + if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]: + self.upgrade_required = True + + # Check whether downgrade is being attempted + bundle_version = module["bundledVersion"].split(".")[:2] + onboard_version = module["onboardVersion"].split(".")[:2] + if bundle_version[0] < onboard_version[0] or (bundle_version[0] == onboard_version[0] and bundle_version[1] < onboard_version[1]): + self.module.fail_json(msg="Downgrades are not permitted. onboard [%s] > bundled[%s]." + % (module["onboardVersion"], module["bundledVersion"])) + + # Update bundle info + self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}}) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + def embedded_wait_for_upgrade(self): + """Wait for SANtricity Web Services Embedded to be available after reboot.""" + for count in range(0, self.REBOOT_TIMEOUT_SEC): + try: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData" % self.ssid) + bundle_display = [m["versionString"] for m in response[0]["extendedSAData"]["codeVersions"] if m["codeModule"] == "bundleDisplay"][0] + if rc == 200 and six.b(bundle_display) == self.firmware_version() and six.b(response[0]["nvsramVersion"]) == self.nvsram_version(): + self.upgrade_in_progress = False + break + except Exception as error: + pass + sleep(1) + else: + self.module.fail_json(msg="Timeout waiting for Santricity Web Services Embedded. Array [%s]" % self.ssid) + + def embedded_upgrade(self): + """Upload and activate both firmware and NVSRAM.""" + files = [("nvsramfile", self.nvsram_name, self.nvsram), + ("dlpfile", self.firmware_name, self.firmware)] + headers, data = create_multipart_formdata(files=files) + try: + rc, response = self.request("firmware/embedded-firmware?staged=false&nvsram=true", method="POST", data=data, headers=headers) + self.upgrade_in_progress = True + except Exception as error: + self.module.fail_json(msg="Failed to upload and activate firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + if self.wait_for_completion: + self.embedded_wait_for_upgrade() + + def proxy_check_nvsram_compatibility(self): + """Verify nvsram is compatible with E-Series storage system.""" + data = {"storageDeviceIds": [self.ssid]} + try: + rc, check = self.request("firmware/compatibility-check", method="POST", data=data) + for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))): + sleep(5) + try: + rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"]) + if not response["checkRunning"]: + for result in response["results"][0]["nvsramFiles"]: + if result["filename"] == self.nvsram_name: + return + self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid)) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + except Exception as error: + self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def proxy_check_firmware_compatibility(self): + """Verify firmware is compatible with E-Series storage system.""" + data = {"storageDeviceIds": [self.ssid]} + try: + rc, check = self.request("firmware/compatibility-check", method="POST", data=data) + for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))): + sleep(5) + try: + rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"]) + if not response["checkRunning"]: + for result in response["results"][0]["cfwFiles"]: + if result["filename"] == self.firmware_name: + return + self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid)) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + except Exception as error: + self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def proxy_upload_and_check_compatibility(self): + """Ensure firmware is uploaded and verify compatibility.""" + try: + rc, cfw_files = self.request("firmware/cfw-files") + for file in cfw_files: + if file["filename"] == self.nvsram_name: + break + else: + fields = [("validate", "true")] + files = [("firmwareFile", self.nvsram_name, self.nvsram)] + headers, data = create_multipart_formdata(files=files, fields=fields) + try: + rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]." + % (self.nvsram_name, self.ssid, to_native(error))) + + self.proxy_check_nvsram_compatibility() + + for file in cfw_files: + if file["filename"] == self.firmware_name: + break + else: + fields = [("validate", "true")] + files = [("firmwareFile", self.firmware_name, self.firmware)] + headers, data = create_multipart_formdata(files=files, fields=fields) + try: + rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]." + % (self.firmware_name, self.ssid, to_native(error))) + + self.proxy_check_firmware_compatibility() + except Exception as error: + self.module.fail_json(msg="Failed to retrieve existing existing firmware files. Error [%s]" % to_native(error)) + + def proxy_check_upgrade_required(self): + """Staging is required to collect firmware information from the web services proxy.""" + # Verify controller consistency and get firmware versions + try: + # Retrieve current bundle version + if self.is_firmware_bundled(): + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid) + current_firmware_version = six.b(response[0]["versionString"]) + else: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid) + current_firmware_version = six.b(response[0]) + + # Determine whether upgrade is required + if current_firmware_version != self.firmware_version(): + + current = current_firmware_version.split(b".")[:2] + upgrade = self.firmware_version().split(b".")[:2] + if current[0] < upgrade[0] or (current[0] == upgrade[0] and current[1] <= upgrade[1]): + self.upgrade_required = True + else: + self.module.fail_json(msg="Downgrades are not permitted. Firmware [%s]. Array [%s]." % (self.firmware, self.ssid)) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error))) + # Determine current NVSRAM version and whether change is required + try: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid) + if six.b(response[0]) != self.nvsram_version(): + self.upgrade_required = True + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error))) + + def proxy_wait_for_upgrade(self, request_id): + """Wait for SANtricity Web Services Proxy to report upgrade complete""" + if self.is_firmware_bundled(): + while True: + try: + sleep(5) + rc, response = self.request("batch/cfw-upgrade/%s" % request_id) + + if response["status"] == "complete": + self.upgrade_in_progress = False + break + elif response["status"] in ["failed", "cancelled"]: + self.module.fail_json(msg="Firmware upgrade failed to complete. Array [%s]." % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve firmware upgrade status. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + else: + for count in range(0, int(self.REBOOT_TIMEOUT_SEC / 5)): + try: + sleep(5) + rc_firmware, firmware = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid) + rc_nvsram, nvsram = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid) + + if six.b(firmware[0]) == self.firmware_version() and six.b(nvsram[0]) == self.nvsram_version(): + self.upgrade_in_progress = False + break + except Exception as error: + pass + else: + self.module.fail_json(msg="Timed out waiting for firmware upgrade to complete. Array [%s]." % self.ssid) + + def proxy_upgrade(self): + """Activate previously uploaded firmware related files.""" + request_id = None + if self.is_firmware_bundled(): + data = {"activate": True, + "firmwareFile": self.firmware_name, + "nvsramFile": self.nvsram_name, + "systemInfos": [{"systemId": self.ssid, + "allowNonOptimalActivation": self.ignore_health_check}]} + try: + rc, response = self.request("batch/cfw-upgrade", method="POST", data=data) + request_id = response["requestId"] + except Exception as error: + self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + else: + data = {"stageFirmware": False, + "skipMelCheck": self.ignore_health_check, + "cfwFile": self.firmware_name, + "nvsramFile": self.nvsram_name} + try: + rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=data) + request_id = response["requestId"] + except Exception as error: + self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + self.upgrade_in_progress = True + if self.wait_for_completion: + self.proxy_wait_for_upgrade(request_id) + + def apply(self): + """Upgrade controller firmware.""" + self.check_system_health() + + # Verify firmware compatibility and whether changes are required + if self.is_embedded(): + self.embedded_check_compatibility() + else: + self.proxy_check_upgrade_required() + + # This will upload the firmware files to the web services proxy but not to the controller + if self.upgrade_required: + self.proxy_upload_and_check_compatibility() + + # Perform upgrade + if self.upgrade_required and not self.module.check_mode: + if self.is_embedded(): + self.embedded_upgrade() + else: + self.proxy_upgrade() + + self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, status=self.module_info) + + +def main(): + firmware = NetAppESeriesFirmware() + firmware.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_flashcache.py b/plugins/modules/storage/netapp/netapp_e_flashcache.py new file mode 100644 index 0000000000..42ee09515e --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_flashcache.py @@ -0,0 +1,414 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: netapp_e_flashcache +author: Kevin Hulquest (@hulquest) +short_description: NetApp E-Series manage SSD caches +description: +- Create or remove SSD caches on a NetApp E-Series storage array. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + required: true + description: + - The ID of the array to manage (as configured on the web services proxy). + state: + required: true + description: + - Whether the specified SSD cache should exist or not. + choices: ['present', 'absent'] + default: present + name: + required: true + description: + - The name of the SSD cache to manage + io_type: + description: + - The type of workload to optimize the cache for. + choices: ['filesystem','database','media'] + default: filesystem + disk_count: + description: + - The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place + size_unit: + description: + - The unit to be applied to size arguments + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: gb + cache_size_min: + description: + - The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache. +''' + +EXAMPLES = """ + - name: Flash Cache + netapp_e_flashcache: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + name: SSDCacheBuiltByAnsible +""" + +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: json for newly created flash cache +""" +import json +import logging +import sys +import traceback + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import reduce +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class NetAppESeriesFlashCache(object): + def __init__(self): + self.name = None + self.log_mode = None + self.log_path = None + self.api_url = None + self.api_username = None + self.api_password = None + self.ssid = None + self.validate_certs = None + self.disk_count = None + self.size_unit = None + self.cache_size_min = None + self.io_type = None + self.driveRefs = None + self.state = None + self._size_unit_map = dict( + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 + ) + + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(default='present', choices=['present', 'absent'], type='str'), + ssid=dict(required=True, type='str'), + name=dict(required=True, type='str'), + disk_count=dict(type='int'), + disk_refs=dict(type='list'), + cache_size_min=dict(type='int'), + io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']), + size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], + type='str'), + criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'], + type='str'), + log_mode=dict(type='str'), + log_path=dict(type='str'), + )) + self.module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + + ], + mutually_exclusive=[ + + ], + # TODO: update validation for various selection criteria + supports_check_mode=True + ) + + self.__dict__.update(self.module.params) + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + self.debug = self._logger.debug + + if self.log_mode == 'file' and self.log_path: + logging.basicConfig(level=logging.DEBUG, filename=self.log_path) + elif self.log_mode == 'stderr': + logging.basicConfig(level=logging.DEBUG, stream=sys.stderr) + + self.post_headers = dict(Accept="application/json") + self.post_headers['Content-Type'] = 'application/json' + + def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None): + self.debug("getting candidate disks...") + + drives_req = dict( + driveCount=disk_count, + sizeUnit=size_unit, + driveType='ssd', + ) + + if capacity: + drives_req['targetUsableCapacity'] = capacity + + (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), + data=json.dumps(drives_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + if rc == 204: + self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache') + + disk_ids = [d['id'] for d in drives_resp] + bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0) + + return (disk_ids, bytes) + + def create_cache(self): + (disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit, + capacity=self.cache_size_min) + + self.debug("creating ssd cache...") + + create_fc_req = dict( + driveRefs=disk_ids, + name=self.name + ) + + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), + data=json.dumps(create_fc_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def update_cache(self): + self.debug('updating flash cache config...') + update_fc_req = dict( + name=self.name, + configType=self.io_type + ) + + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid), + data=json.dumps(update_fc_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def delete_cache(self): + self.debug('deleting flash cache...') + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs, ignore_errors=True) + + @property + def needs_more_disks(self): + if len(self.cache_detail['driveRefs']) < self.disk_count: + self.debug("needs resize: current disk count %s < requested requested count %s", + len(self.cache_detail['driveRefs']), self.disk_count) + return True + + @property + def needs_less_disks(self): + if len(self.cache_detail['driveRefs']) > self.disk_count: + self.debug("needs resize: current disk count %s < requested requested count %s", + len(self.cache_detail['driveRefs']), self.disk_count) + return True + + @property + def current_size_bytes(self): + return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity']) + + @property + def requested_size_bytes(self): + if self.cache_size_min: + return self.cache_size_min * self._size_unit_map[self.size_unit] + else: + return 0 + + @property + def needs_more_capacity(self): + if self.current_size_bytes < self.requested_size_bytes: + self.debug("needs resize: current capacity %sb is less than requested minimum %sb", + self.current_size_bytes, self.requested_size_bytes) + return True + + @property + def needs_resize(self): + return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks + + def resize_cache(self): + # increase up to disk count first, then iteratively add disks until we meet requested capacity + + # TODO: perform this calculation in check mode + current_disk_count = len(self.cache_detail['driveRefs']) + proposed_new_disks = 0 + + proposed_additional_bytes = 0 + proposed_disk_ids = [] + + if self.needs_more_disks: + proposed_disk_count = self.disk_count - current_disk_count + + (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count) + proposed_additional_bytes = bytes + proposed_disk_ids = disk_ids + + while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes: + proposed_new_disks += 1 + (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks) + proposed_disk_ids = disk_ids + proposed_additional_bytes = bytes + + add_drives_req = dict( + driveRef=proposed_disk_ids + ) + + self.debug("adding drives to flash-cache...") + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid), + data=json.dumps(add_drives_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + elif self.needs_less_disks and self.driveRefs: + rm_drives = dict(driveRef=self.driveRefs) + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid), + data=json.dumps(rm_drives), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def apply(self): + result = dict(changed=False) + (rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs, ignore_errors=True) + + if rc == 200: + self.cache_detail = cache_resp + else: + self.cache_detail = None + + if rc not in [200, 404]: + raise Exception( + "Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp)) + + if self.state == 'present': + if self.cache_detail: + # TODO: verify parameters against detail for changes + if self.cache_detail['name'] != self.name: + self.debug("CHANGED: name differs") + result['changed'] = True + if self.cache_detail['flashCacheBase']['configType'] != self.io_type: + self.debug("CHANGED: io_type differs") + result['changed'] = True + if self.needs_resize: + self.debug("CHANGED: resize required") + result['changed'] = True + else: + self.debug("CHANGED: requested state is 'present' but cache does not exist") + result['changed'] = True + else: # requested state is absent + if self.cache_detail: + self.debug("CHANGED: requested state is 'absent' but cache exists") + result['changed'] = True + + if not result['changed']: + self.debug("no changes, exiting...") + self.module.exit_json(**result) + + if self.module.check_mode: + self.debug("changes pending in check mode, exiting early...") + self.module.exit_json(**result) + + if self.state == 'present': + if not self.cache_detail: + self.create_cache() + else: + if self.needs_resize: + self.resize_cache() + + # run update here as well, since io_type can't be set on creation + self.update_cache() + + elif self.state == 'absent': + self.delete_cache() + + # TODO: include other details about the storage pool (size, type, id, etc) + self.module.exit_json(changed=result['changed'], **self.resp) + + +def main(): + sp = NetAppESeriesFlashCache() + try: + sp.apply() + except Exception as e: + sp.debug("Exception in apply(): \n%s", to_native(e)) + sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_global.py b/plugins/modules/storage/netapp/netapp_e_global.py new file mode 100644 index 0000000000..4cf1aa835c --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_global.py @@ -0,0 +1,157 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_global +short_description: NetApp E-Series manage global settings configuration +description: + - Allow the user to configure several of the global settings associated with an E-Series storage-system +author: Michael Price (@lmprice) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + name: + description: + - Set the name of the E-Series storage-system + - This label/name doesn't have to be unique. + - May be up to 30 characters in length. + aliases: + - label + log_path: + description: + - A local path to a file to be used for debug logging + required: no +notes: + - Check mode is supported. + - This module requires Web Services API v1.3 or newer. +''' + +EXAMPLES = """ + - name: Set the storage-system name + netapp_e_global: + name: myArrayName + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +name: + description: + - The current name/label of the storage-system. + returned: on success + sample: myArrayName + type: str +""" +import json +import logging + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class GlobalSettings(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=False, aliases=['label']), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, ) + args = self.module.params + self.name = args['name'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.name and len(self.name) > 30: + self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.") + + def get_name(self): + try: + (rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds) + if result['status'] in ['offline', 'neverContacted']: + self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid)) + return result['name'] + except Exception as err: + self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update_name(self): + name = self.get_name() + update = False + if self.name != name: + update = True + + body = dict(name=self.name) + + if update and not self.check_mode: + try: + (rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + self._logger.info("Set name to %s.", result['name']) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json( + msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return update + + def update(self): + update = self.update_name() + name = self.get_name() + + self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = GlobalSettings() + settings() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_host.py b/plugins/modules/storage/netapp/netapp_e_host.py new file mode 100644 index 0000000000..73445f4dcb --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_host.py @@ -0,0 +1,536 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_host +short_description: NetApp E-Series manage eseries hosts +description: Create, update, remove hosts on NetApp E-series storage arrays +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + name: + description: + - If the host doesn't yet exist, the label/name to assign at creation time. + - If the hosts already exists, this will be used to uniquely identify the host to make any required changes + required: True + aliases: + - label + state: + description: + - Set to absent to remove an existing host + - Set to present to modify or create a new host definition + choices: + - absent + - present + default: present + host_type: + description: + - This is the type of host to be mapped + - Required when C(state=present) + - Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a + host type index which can be found in M(netapp_e_facts) + type: str + aliases: + - host_type_index + ports: + description: + - A list of host ports you wish to associate with the host. + - Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are + uniquely identified by a label and these must be unique. + required: False + suboptions: + type: + description: + - The interface type of the port to define. + - Acceptable choices depend on the capabilities of the target hardware/software platform. + required: true + choices: + - iscsi + - sas + - fc + - ib + - nvmeof + - ethernet + label: + description: + - A unique label to assign to this port assignment. + required: true + port: + description: + - The WWN or IQN of the hostPort to assign to this port definition. + required: true + force_port: + description: + - Allow ports that are already assigned to be re-assigned to your current host + required: false + type: bool + group: + description: + - The unique identifier of the host-group you want the host to be a member of; this is used for clustering. + required: False + aliases: + - cluster + log_path: + description: + - A local path to a file to be used for debug logging + required: False +''' + +EXAMPLES = """ + - name: Define or update an existing host named 'Host1' + netapp_e_host: + ssid: "1" + api_url: "10.113.1.101:8443" + api_username: admin + api_password: myPassword + name: "Host1" + state: present + host_type_index: Linux DM-MP + ports: + - type: 'iscsi' + label: 'PORT_1' + port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe' + - type: 'fc' + label: 'FC_1' + port: '10:00:FF:7C:FF:FF:FF:01' + - type: 'fc' + label: 'FC_2' + port: '10:00:FF:7C:FF:FF:FF:00' + + - name: Ensure a host named 'Host2' doesn't exist + netapp_e_host: + ssid: "1" + api_url: "10.113.1.101:8443" + api_username: admin + api_password: myPassword + name: "Host2" + state: absent +""" + +RETURN = """ +msg: + description: + - A user-readable description of the actions performed. + returned: on success + type: str + sample: The host has been created. +id: + description: + - the unique identifier of the host on the E-Series storage-system + returned: on success when state=present + type: str + sample: 00000000600A098000AAC0C3003004700AD86A52 + version_added: "2.6" + +ssid: + description: + - the unique identifier of the E-Series storage-system with the current api + returned: on success + type: str + sample: 1 + version_added: "2.6" + +api_url: + description: + - the url of the API that this request was processed by + returned: on success + type: str + sample: https://webservices.example.com:8443 + version_added: "2.6" +""" +import json +import logging +import re +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Host(object): + HOST_TYPE_INDEXES = {"linux dm-mp": 28, "vmware": 10, "windows": 1, "windows clustered": 8} + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + group=dict(type='str', required=False, aliases=['cluster']), + ports=dict(type='list', required=False), + force_port=dict(type='bool', default=False), + name=dict(type='str', required=True, aliases=['label']), + host_type_index=dict(type='str', aliases=['host_type']), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + self.check_mode = self.module.check_mode + args = self.module.params + self.group = args['group'] + self.ports = args['ports'] + self.force_port = args['force_port'] + self.name = args['name'] + self.state = args['state'] + self.ssid = args['ssid'] + self.url = args['api_url'] + self.user = args['api_username'] + self.pwd = args['api_password'] + self.certs = args['validate_certs'] + + self.post_body = dict() + self.all_hosts = list() + self.host_obj = dict() + self.newPorts = list() + self.portsForUpdate = list() + self.portsForRemoval = list() + + # Update host type with the corresponding index + host_type = args['host_type_index'] + if host_type: + host_type = host_type.lower() + if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]: + self.host_type_index = self.HOST_TYPE_INDEXES[host_type] + elif host_type.isdigit(): + self.host_type_index = int(args['host_type_index']) + else: + self.module.fail_json(msg="host_type must be either a host type name or host type index found integer" + " the documentation.") + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + if args['log_path']: + logging.basicConfig( + level=logging.DEBUG, filename=args['log_path'], filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + # Ensure when state==present then host_type_index is defined + if self.state == "present" and self.host_type_index is None: + self.module.fail_json(msg="Host_type_index is required when state=='present'. Array Id: [%s]" % self.ssid) + + # Fix port representation if they are provided with colons + if self.ports is not None: + for port in self.ports: + port['label'] = port['label'].lower() + port['type'] = port['type'].lower() + port['port'] = port['port'].lower() + + # Determine whether address is 16-byte WWPN and, if so, remove + if re.match(r'^(0x)?[0-9a-f]{16}$', port['port'].replace(':', '')): + port['port'] = port['port'].replace(':', '').replace('0x', '') + + def valid_host_type(self): + host_types = None + try: + (rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + try: + match = list(filter(lambda host_type: host_type['index'] == self.host_type_index, host_types))[0] + return True + except IndexError: + self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index) + + def assigned_host_ports(self, apply_unassigning=False): + """Determine if the hostPorts requested have already been assigned and return list of required used ports.""" + used_host_ports = {} + for host in self.all_hosts: + if host['label'] != self.name: + for host_port in host['hostSidePorts']: + for port in self.ports: + if port['port'] == host_port["address"] or port['label'] == host_port['label']: + if not self.force_port: + self.module.fail_json(msg="There are no host ports available OR there are not enough" + " unassigned host ports") + else: + # Determine port reference + port_ref = [port["hostPortRef"] for port in host["ports"] + if port["hostPortName"] == host_port["address"]] + port_ref.extend([port["initiatorRef"] for port in host["initiators"] + if port["nodeName"]["iscsiNodeName"] == host_port["address"]]) + + # Create dictionary of hosts containing list of port references + if host["hostRef"] not in used_host_ports.keys(): + used_host_ports.update({host["hostRef"]: port_ref}) + else: + used_host_ports[host["hostRef"]].extend(port_ref) + else: + for host_port in host['hostSidePorts']: + for port in self.ports: + if ((host_port['label'] == port['label'] and host_port['address'] != port['port']) or + (host_port['label'] != port['label'] and host_port['address'] == port['port'])): + if not self.force_port: + self.module.fail_json(msg="There are no host ports available OR there are not enough" + " unassigned host ports") + else: + # Determine port reference + port_ref = [port["hostPortRef"] for port in host["ports"] + if port["hostPortName"] == host_port["address"]] + port_ref.extend([port["initiatorRef"] for port in host["initiators"] + if port["nodeName"]["iscsiNodeName"] == host_port["address"]]) + + # Create dictionary of hosts containing list of port references + if host["hostRef"] not in used_host_ports.keys(): + used_host_ports.update({host["hostRef"]: port_ref}) + else: + used_host_ports[host["hostRef"]].extend(port_ref) + + # Unassign assigned ports + if apply_unassigning: + for host_ref in used_host_ports.keys(): + try: + rc, resp = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, host_ref), + url_username=self.user, url_password=self.pwd, headers=HEADERS, + validate_certs=self.certs, method='POST', + data=json.dumps({"portsToRemove": used_host_ports[host_ref]})) + except Exception as err: + self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]." + " Error [%s]." % (self.host_obj['id'], self.ssid, + used_host_ports[host_ref], to_native(err))) + + return used_host_ports + + def group_id(self): + if self.group: + try: + (rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid, + url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + try: + group_obj = list(filter(lambda group: group['name'] == self.group, all_groups))[0] + return group_obj['id'] + except IndexError: + self.module.fail_json(msg="No group with the name: %s exists" % self.group) + else: + # Return the value equivalent of no group + return "0000000000000000000000000000000000000000" + + def host_exists(self): + """Determine if the requested host exists + As a side effect, set the full list of defined hosts in 'all_hosts', and the target host in 'host_obj'. + """ + match = False + all_hosts = list() + + try: + (rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + # Augment the host objects + for host in all_hosts: + for port in host['hostSidePorts']: + port['type'] = port['type'].lower() + port['address'] = port['address'].lower() + port['label'] = port['label'].lower() + + # Augment hostSidePorts with their ID (this is an omission in the API) + ports = dict((port['label'], port['id']) for port in host['ports']) + ports.update((port['label'], port['id']) for port in host['initiators']) + + for host_side_port in host['hostSidePorts']: + if host_side_port['label'] in ports: + host_side_port['id'] = ports[host_side_port['label']] + + if host['label'] == self.name: + self.host_obj = host + match = True + + self.all_hosts = all_hosts + return match + + def needs_update(self): + """Determine whether we need to update the Host object + As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add + (newPorts), on self. + """ + changed = False + if (self.host_obj["clusterRef"].lower() != self.group_id().lower() or + self.host_obj["hostTypeIndex"] != self.host_type_index): + self._logger.info("Either hostType or the clusterRef doesn't match, an update is required.") + changed = True + current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]}) + for port in self.host_obj["hostSidePorts"]) + + if self.ports: + for port in self.ports: + for current_host_port_id in current_host_ports.keys(): + if port == current_host_ports[current_host_port_id]: + current_host_ports.pop(current_host_port_id) + break + elif port["port"] == current_host_ports[current_host_port_id]["port"]: + if self.port_on_diff_host(port) and not self.force_port: + self.module.fail_json(msg="The port you specified [%s] is associated with a different host." + " Specify force_port as True or try a different port spec" % port) + + if (port["label"] != current_host_ports[current_host_port_id]["label"] or + port["type"] != current_host_ports[current_host_port_id]["type"]): + current_host_ports.pop(current_host_port_id) + self.portsForUpdate.append({"portRef": current_host_port_id, "port": port["port"], + "label": port["label"], "hostRef": self.host_obj["hostRef"]}) + break + else: + self.newPorts.append(port) + + self.portsForRemoval = list(current_host_ports.keys()) + changed = any([self.newPorts, self.portsForUpdate, self.portsForRemoval, changed]) + + return changed + + def port_on_diff_host(self, arg_port): + """ Checks to see if a passed in port arg is present on a different host """ + for host in self.all_hosts: + # Only check 'other' hosts + if host['name'] != self.name: + for port in host['hostSidePorts']: + # Check if the port label is found in the port dict list of each host + if arg_port['label'] == port['label'] or arg_port['port'] == port['address']: + self.other_host = host + return True + return False + + def update_host(self): + self._logger.info("Beginning the update for host=%s.", self.name) + + if self.ports: + + # Remove ports that need reassigning from their current host. + self.assigned_host_ports(apply_unassigning=True) + + self.post_body["portsToUpdate"] = self.portsForUpdate + self.post_body["ports"] = self.newPorts + self._logger.info("Requested ports: %s", pformat(self.ports)) + else: + self._logger.info("No host ports were defined.") + + if self.group: + self.post_body['groupId'] = self.group_id() + + self.post_body['hostType'] = dict(index=self.host_type_index) + + api = self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']) + self._logger.info("POST => url=%s, body=%s.", api, pformat(self.post_body)) + + if not self.check_mode: + try: + (rc, self.host_obj) = request(api, url_username=self.user, url_password=self.pwd, headers=HEADERS, + validate_certs=self.certs, method='POST', data=json.dumps(self.post_body)) + except Exception as err: + self.module.fail_json( + msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=True, **payload) + + def create_host(self): + self._logger.info("Creating host definition.") + + # Remove ports that need reassigning from their current host. + self.assigned_host_ports(apply_unassigning=True) + + # needs_reassignment = False + post_body = dict( + name=self.name, + hostType=dict(index=self.host_type_index), + groupId=self.group_id(), + ) + + if self.ports: + post_body.update(ports=self.ports) + + api = self.url + "storage-systems/%s/hosts" % self.ssid + self._logger.info('POST => url=%s, body=%s', api, pformat(post_body)) + + if not self.check_mode: + if not self.host_exists(): + try: + (rc, self.host_obj) = request(api, method='POST', url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + data=json.dumps(post_body), headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + else: + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload) + + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=True, msg='Host created.', **payload) + + def remove_host(self): + try: + (rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']), + method='DELETE', + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except Exception as err: + self.module.fail_json( + msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'], + self.ssid, + to_native(err))) + + def build_success_payload(self, host=None): + keys = ['id'] + if host is not None: + result = dict((key, host[key]) for key in keys) + else: + result = dict() + result['ssid'] = self.ssid + result['api_url'] = self.url + return result + + def apply(self): + if self.state == 'present': + if self.host_exists(): + if self.needs_update() and self.valid_host_type(): + self.update_host() + else: + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload) + elif self.valid_host_type(): + self.create_host() + else: + payload = self.build_success_payload() + if self.host_exists(): + self.remove_host() + self.module.exit_json(changed=True, msg="Host removed.", **payload) + else: + self.module.exit_json(changed=False, msg="Host already absent.", **payload) + + +def main(): + host = Host() + host.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_hostgroup.py b/plugins/modules/storage/netapp/netapp_e_hostgroup.py new file mode 100644 index 0000000000..87338af793 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_hostgroup.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {"metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community"} + + +DOCUMENTATION = ''' +--- +module: netapp_e_hostgroup +short_description: NetApp E-Series manage array host groups +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +description: Create, update or destroy host groups on a NetApp E-Series storage array. +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + required: true + description: + - Whether the specified host group should exist or not. + choices: ["present", "absent"] + name: + required: false + description: + - Name of the host group to manage + - This option is mutually exclusive with I(id). + new_name: + required: false + description: + - Specify this when you need to update the name of a host group + id: + required: false + description: + - Host reference identifier for the host group to manage. + - This option is mutually exclusive with I(name). + hosts: + required: false + description: + - List of host names/labels to add to the group +''' +EXAMPLES = """ + - name: Configure Hostgroup + netapp_e_hostgroup: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present +""" +RETURN = """ +clusterRef: + description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster. + returned: always except when state is absent + type: str + sample: "3233343536373839303132333100000000000000" +confirmLUNMappingCreation: + description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping + will alter the volume access rights of other clusters, in addition to this one. + returned: always + type: bool + sample: false +hosts: + description: A list of the hosts that are part of the host group after all operations. + returned: always except when state is absent + type: list + sample: ["HostA","HostB"] +id: + description: The id number of the hostgroup + returned: always except when state is absent + type: str + sample: "3233343536373839303132333100000000000000" +isSAControlled: + description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, + indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings. + returned: always except when state is absent + type: bool + sample: false +label: + description: The user-assigned, descriptive label string for the cluster. + returned: always + type: str + sample: "MyHostGroup" +name: + description: same as label + returned: always except when state is absent + type: str + sample: "MyHostGroup" +protectionInformationCapableAccessMethod: + description: This field is true if the host has a PI capable access method. + returned: always except when state is absent + type: bool + sample: true +""" + +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesHostGroup(NetAppESeriesModule): + EXPANSION_TIMEOUT_SEC = 10 + DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11 + + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict( + state=dict(required=True, choices=["present", "absent"], type="str"), + name=dict(required=False, type="str"), + new_name=dict(required=False, type="str"), + id=dict(required=False, type="str"), + hosts=dict(required=False, type="list")) + mutually_exclusive = [["name", "id"]] + super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) + + args = self.module.params + self.state = args["state"] + self.name = args["name"] + self.new_name = args["new_name"] + self.id = args["id"] + self.hosts_list = args["hosts"] + + self.current_host_group = None + + @property + def hosts(self): + """Retrieve a list of host reference identifiers should be associated with the host group.""" + host_list = [] + existing_hosts = [] + + if self.hosts_list: + try: + rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + for host in self.hosts_list: + for existing_host in existing_hosts: + if host in existing_host["id"] or host in existing_host["name"]: + host_list.append(existing_host["id"]) + break + else: + self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]." + % (self.ssid, host)) + + return host_list + + @property + def host_groups(self): + """Retrieve a list of existing host groups.""" + host_groups = [] + hosts = [] + try: + rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid) + rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups] + for group in host_groups: + hosts_ids = [] + for host in hosts: + if group["id"] == host["clusterRef"]: + hosts_ids.append(host["hostRef"]) + group.update({"hosts": hosts_ids}) + + return host_groups + + @property + def current_hosts_in_host_group(self): + """Retrieve the current hosts associated with the current hostgroup.""" + current_hosts = [] + for group in self.host_groups: + if (self.name and group["name"] == self.name) or (self.id and group["id"] == self.id): + current_hosts = group["hosts"] + + return current_hosts + + def unassign_hosts(self, host_list=None): + """Unassign hosts from host group.""" + if host_list is None: + host_list = self.current_host_group["hosts"] + + for host_id in host_list: + try: + rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id), + method="POST", data={"group": "0000000000000000000000000000000000000000"}) + except Exception as error: + self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]." + " Error[%s]." % (self.ssid, host_id, to_native(error))) + + def delete_host_group(self, unassign_hosts=True): + """Delete host group""" + if unassign_hosts: + self.unassign_hosts() + + try: + rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + def create_host_group(self): + """Create host group.""" + data = {"name": self.name, "hosts": self.hosts} + + response = None + try: + rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data) + except Exception as error: + self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return response + + def update_host_group(self): + """Update host group.""" + data = {"name": self.new_name if self.new_name else self.name, + "hosts": self.hosts} + + # unassign hosts that should not be part of the hostgroup + desired_host_ids = self.hosts + for host in self.current_hosts_in_host_group: + if host not in desired_host_ids: + self.unassign_hosts([host]) + + update_response = None + try: + rc, update_response = self.request("storage-systems/%s/host-groups/%s" + % (self.ssid, self.current_host_group["id"]), method="POST", data=data) + except Exception as error: + self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return update_response + + def apply(self): + """Apply desired host group state to the storage array.""" + changes_required = False + + # Search for existing host group match + for group in self.host_groups: + if (self.id and group["id"] == self.id) or (self.name and group["name"] == self.name): + self.current_host_group = group + + # Determine whether changes are required + if self.state == "present": + if self.current_host_group: + if (self.new_name and self.new_name != self.name) or self.hosts != self.current_host_group["hosts"]: + changes_required = True + else: + if not self.name: + self.module.fail_json(msg="The option name must be supplied when creating a new host group." + " Array id [%s]." % self.ssid) + changes_required = True + + elif self.current_host_group: + changes_required = True + + # Apply any necessary changes + msg = "" + if changes_required and not self.module.check_mode: + msg = "No changes required." + if self.state == "present": + if self.current_host_group: + if ((self.new_name and self.new_name != self.name) or + (self.hosts != self.current_host_group["hosts"])): + msg = self.update_host_group() + else: + msg = self.create_host_group() + + elif self.current_host_group: + self.delete_host_group() + msg = "Host group deleted. Array Id [%s]. Host Name [%s]. Host Id [%s]."\ + % (self.ssid, self.current_host_group["name"], self.current_host_group["id"]) + + self.module.exit_json(msg=msg, changed=changes_required) + + +def main(): + hostgroup = NetAppESeriesHostGroup() + hostgroup.apply() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/storage/netapp/netapp_e_iscsi_interface.py b/plugins/modules/storage/netapp/netapp_e_iscsi_interface.py new file mode 100644 index 0000000000..c6f0a39e71 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_iscsi_interface.py @@ -0,0 +1,398 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_iscsi_interface +short_description: NetApp E-Series manage iSCSI interface configuration +description: + - Configure settings of an E-Series iSCSI interface +author: Michael Price (@lmprice) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are presented alphabetically, with the first controller as A, + the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard + limitation and could change in the future. + required: yes + choices: + - A + - B + name: + description: + - The channel of the port to modify the configuration of. + - The list of choices is not necessarily comprehensive. It depends on the number of ports + that are available in the system. + - The numerical value represents the number of the channel (typically from left to right on the HIC), + beginning with a value of 1. + required: yes + aliases: + - channel + state: + description: + - When enabled, the provided configuration will be utilized. + - When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled. + choices: + - enabled + - disabled + default: enabled + address: + description: + - The IPv4 address to assign to the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + subnet_mask: + description: + - The subnet mask to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + gateway: + description: + - The IPv4 gateway address to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + config_method: + description: + - The configuration method type to use for this interface. + - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway). + choices: + - dhcp + - static + default: dhcp + mtu: + description: + - The maximum transmission units (MTU), in bytes. + - This allows you to configure a larger value for the MTU, in order to enable jumbo frames + (any value > 1500). + - Generally, it is necessary to have your host, switches, and other components not only support jumbo + frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to + leave this at the default. + default: 1500 + aliases: + - max_frame_size + log_path: + description: + - A local path to a file to be used for debug logging + required: no +notes: + - Check mode is supported. + - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address + via dhcp, etc), can take seconds or minutes longer to take effect. + - This module will not be useful/usable on an E-Series system without any iSCSI interfaces. + - This module requires a Web Services API version of >= 1.3. +''' + +EXAMPLES = """ + - name: Configure the first port on the A controller with a static IPv4 address + netapp_e_iscsi_interface: + name: "1" + controller: "A" + config_method: static + address: "192.168.1.100" + subnet_mask: "255.255.255.0" + gateway: "192.168.1.1" + ssid: "1" + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Disable ipv4 connectivity for the second port on the B controller + netapp_e_iscsi_interface: + name: "2" + controller: "B" + state: disabled + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + + - name: Enable jumbo frames for the first 4 ports on controller A + netapp_e_iscsi_interface: + name: "{{ item | int }}" + controller: "A" + state: enabled + mtu: 9000 + config_method: dhcp + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + loop: + - 1 + - 2 + - 3 + - 4 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +enabled: + description: + - Indicates whether IPv4 connectivity has been enabled or disabled. + - This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance, + it is unlikely that the configuration will actually be valid. + returned: on success + sample: True + type: bool +""" +import json +import logging +from pprint import pformat +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class IscsiInterface(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + controller=dict(type='str', required=True, choices=['A', 'B']), + name=dict(type='int', aliases=['channel']), + state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']), + address=dict(type='str', required=False), + subnet_mask=dict(type='str', required=False), + gateway=dict(type='str', required=False), + config_method=dict(type='str', required=False, default='dhcp', choices=['dhcp', 'static']), + mtu=dict(type='int', default=1500, required=False, aliases=['max_frame_size']), + log_path=dict(type='str', required=False), + )) + + required_if = [ + ["config_method", "static", ["address", "subnet_mask"]], + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, ) + args = self.module.params + self.controller = args['controller'] + self.name = args['name'] + self.mtu = args['mtu'] + self.state = args['state'] + self.address = args['address'] + self.subnet_mask = args['subnet_mask'] + self.gateway = args['gateway'] + self.config_method = args['config_method'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + self.post_body = dict() + self.controllers = list() + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.mtu < 1500 or self.mtu > 9000: + self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.") + + if self.config_method == 'dhcp' and any([self.address, self.subnet_mask, self.gateway]): + self.module.fail_json(msg='A config_method of dhcp is mutually exclusive with the address,' + ' subnet_mask, and gateway options.') + + # A relatively primitive regex to validate that the input is formatted like a valid ip address + address_regex = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}') + + if self.address and not address_regex.match(self.address): + self.module.fail_json(msg="An invalid ip address was provided for address.") + + if self.subnet_mask and not address_regex.match(self.subnet_mask): + self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.") + + if self.gateway and not address_regex.match(self.gateway): + self.module.fail_json(msg="An invalid ip address was provided for gateway.") + + @property + def interfaces(self): + ifaces = list() + try: + (rc, ifaces) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + # Filter out non-iSCSI interfaces + ifaces = [iface['iscsi'] for iface in ifaces if iface['interfaceType'] == 'iscsi'] + + return ifaces + + def get_controllers(self): + """Retrieve a mapping of controller labels to their references + { + 'A': '070000000000000000000001', + 'B': '070000000000000000000002', + } + :return: the controllers defined on the system + """ + controllers = list() + try: + (rc, controllers) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/id' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + controllers.sort() + + controllers_dict = {} + i = ord('A') + for controller in controllers: + label = chr(i) + controllers_dict[label] = controller + i += 1 + + return controllers_dict + + def fetch_target_interface(self): + interfaces = self.interfaces + + for iface in interfaces: + if iface['channel'] == self.name and self.controllers[self.controller] == iface['controllerId']: + return iface + + channels = sorted(set((str(iface['channel'])) for iface in interfaces + if self.controllers[self.controller] == iface['controllerId'])) + + self.module.fail_json(msg="The requested channel of %s is not valid. Valid channels include: %s." + % (self.name, ", ".join(channels))) + + def make_update_body(self, target_iface): + body = dict(iscsiInterface=target_iface['id']) + update_required = False + + self._logger.info("Requested state=%s.", self.state) + self._logger.info("config_method: current=%s, requested=%s", + target_iface['ipv4Data']['ipv4AddressConfigMethod'], self.config_method) + + if self.state == 'enabled': + settings = dict() + if not target_iface['ipv4Enabled']: + update_required = True + settings['ipv4Enabled'] = [True] + if self.mtu != target_iface['interfaceData']['ethernetData']['maximumFramePayloadSize']: + update_required = True + settings['maximumFramePayloadSize'] = [self.mtu] + if self.config_method == 'static': + ipv4Data = target_iface['ipv4Data']['ipv4AddressData'] + + if ipv4Data['ipv4Address'] != self.address: + update_required = True + settings['ipv4Address'] = [self.address] + if ipv4Data['ipv4SubnetMask'] != self.subnet_mask: + update_required = True + settings['ipv4SubnetMask'] = [self.subnet_mask] + if self.gateway is not None and ipv4Data['ipv4GatewayAddress'] != self.gateway: + update_required = True + settings['ipv4GatewayAddress'] = [self.gateway] + + if target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configStatic': + update_required = True + settings['ipv4AddressConfigMethod'] = ['configStatic'] + + elif (target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configDhcp'): + update_required = True + settings.update(dict(ipv4Enabled=[True], + ipv4AddressConfigMethod=['configDhcp'])) + body['settings'] = settings + + else: + if target_iface['ipv4Enabled']: + update_required = True + body['settings'] = dict(ipv4Enabled=[False]) + + self._logger.info("Update required ?=%s", update_required) + self._logger.info("Update body: %s", pformat(body)) + + return update_required, body + + def update(self): + self.controllers = self.get_controllers() + if self.controller not in self.controllers: + self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s." + % ", ".join(self.controllers.keys())) + + iface_before = self.fetch_target_interface() + update_required, body = self.make_update_body(iface_before) + if update_required and not self.check_mode: + try: + url = (self.url + + 'storage-systems/%s/symbol/setIscsiInterfaceProperties' % self.ssid) + (rc, result) = request(url, method='POST', data=json.dumps(body), headers=HEADERS, timeout=300, + ignore_errors=True, **self.creds) + # We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook + # is cancelled mid-flight), that it isn't worth the complexity. + if rc == 422 and result['retcode'] in ['busy', '3']: + self.module.fail_json( + msg="The interface is currently busy (probably processing a previously requested modification" + " request). This operation cannot currently be completed. Array Id [%s]. Error [%s]." + % (self.ssid, result)) + # Handle authentication issues, etc. + elif rc != 200: + self.module.fail_json( + msg="Failed to modify the interface! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(result))) + self._logger.debug("Update request completed successfully.") + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json( + msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + iface_after = self.fetch_target_interface() + + self.module.exit_json(msg="The interface settings have been updated.", changed=update_required, + enabled=iface_after['ipv4Enabled']) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + iface = IscsiInterface() + iface() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_iscsi_target.py b/plugins/modules/storage/netapp/netapp_e_iscsi_target.py new file mode 100644 index 0000000000..124b4b9ca7 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_iscsi_target.py @@ -0,0 +1,294 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_iscsi_target +short_description: NetApp E-Series manage iSCSI target configuration +description: + - Configure the settings of an E-Series iSCSI target +author: Michael Price (@lmprice) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + name: + description: + - The name/alias to assign to the iSCSI target. + - This alias is often used by the initiator software in order to make an iSCSI target easier to identify. + aliases: + - alias + ping: + description: + - Enable ICMP ping responses from the configured iSCSI ports. + type: bool + default: yes + chap_secret: + description: + - Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password. + - When this value is specified, we will always trigger an update (changed=True). We have no way of verifying + whether or not the password has changed. + - The chap secret may only use ascii characters with values between 32 and 126 decimal. + - The chap secret must be no less than 12 characters, but no greater than 57 characters in length. + - The chap secret is cleared when not specified or an empty string. + aliases: + - chap + - password + unnamed_discovery: + description: + - When an initiator initiates a discovery session to an initiator port, it is considered an unnamed + discovery session if the iSCSI target iqn is not specified in the request. + - This option may be disabled to increase security if desired. + type: bool + default: yes + log_path: + description: + - A local path (on the Ansible controller), to a file to be used for debug logging. + required: no +notes: + - Check mode is supported. + - Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using + M(netapp_e_iscsi_interface). + - This module requires a Web Services API version of >= 1.3. +''' + +EXAMPLES = """ + - name: Enable ping responses and unnamed discovery sessions for all iSCSI ports + netapp_e_iscsi_target: + api_url: "https://localhost:8443/devmgr/v2" + api_username: admin + api_password: myPassword + ssid: "1" + validate_certs: no + name: myTarget + ping: yes + unnamed_discovery: yes + + - name: Set the target alias and the CHAP secret + netapp_e_iscsi_target: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + name: myTarget + chap: password1234 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The iSCSI target settings have been updated. +alias: + description: + - The alias assigned to the iSCSI target. + returned: on success + sample: myArray + type: str +iqn: + description: + - The iqn (iSCSI Qualified Name), assigned to the iSCSI target. + returned: on success + sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45 + type: str +""" +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class IscsiTarget(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=False, aliases=['alias']), + ping=dict(type='bool', required=False, default=True), + chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True), + unnamed_discovery=dict(type='bool', required=False, default=True), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, ) + args = self.module.params + + self.name = args['name'] + self.ping = args['ping'] + self.chap_secret = args['chap_secret'] + self.unnamed_discovery = args['unnamed_discovery'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + self.post_body = dict() + self.controllers = list() + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.chap_secret: + if len(self.chap_secret) < 12 or len(self.chap_secret) > 57: + self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57" + " characters in length.") + + for c in self.chap_secret: + ordinal = ord(c) + if ordinal < 32 or ordinal > 126: + self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii" + " characters with decimal values between 32 and 126.") + + @property + def target(self): + """Provide information on the iSCSI Target configuration + + Sample: + { + 'alias': 'myCustomName', + 'ping': True, + 'unnamed_discovery': True, + 'chap': False, + 'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45', + } + """ + target = dict() + try: + (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target' + % self.ssid, headers=HEADERS, **self.creds) + # This likely isn't an iSCSI-enabled system + if not data: + self.module.fail_json( + msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid)) + + data = data[0] + + chap = any( + [auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap']) + + target.update(dict(alias=data['alias']['iscsiAlias'], + iqn=data['nodeName']['iscsiNodeName'], + chap=chap)) + + (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData' + % self.ssid, headers=HEADERS, **self.creds) + + data = data[0] + target.update(dict(ping=data['icmpPingResponseEnabled'], + unnamed_discovery=data['unnamedDiscoverySessionsEnabled'])) + + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return target + + def apply_iscsi_settings(self): + """Update the iSCSI target alias and CHAP settings""" + update = False + target = self.target + + body = dict() + + if self.name is not None and self.name != target['alias']: + update = True + body['alias'] = self.name + + # If the CHAP secret was provided, we trigger an update. + if self.chap_secret: + update = True + body.update(dict(enableChapAuthentication=True, + chapSecret=self.chap_secret)) + # If no secret was provided, then we disable chap + elif target['chap']: + update = True + body.update(dict(enableChapAuthentication=False)) + + if update and not self.check_mode: + try: + request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return update + + def apply_target_changes(self): + update = False + target = self.target + + body = dict() + + if self.ping != target['ping']: + update = True + body['icmpPingResponseEnabled'] = self.ping + + if self.unnamed_discovery != target['unnamed_discovery']: + update = True + body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery + + self._logger.info(pformat(body)) + if update and not self.check_mode: + try: + request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST', + data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return update + + def update(self): + update = self.apply_iscsi_settings() + update = self.apply_target_changes() or update + + target = self.target + data = dict((key, target[key]) for key in target if key in ['iqn', 'alias']) + + self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + iface = IscsiTarget() + iface() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_ldap.py b/plugins/modules/storage/netapp/netapp_e_ldap.py new file mode 100644 index 0000000000..1c9ecb3ad2 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_ldap.py @@ -0,0 +1,390 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_ldap +short_description: NetApp E-Series manage LDAP integration to use for authentication +description: + - Configure an E-Series system to allow authentication via an LDAP server +author: Michael Price (@lmprice) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains. + choices: + - present + - absent + default: present + identifier: + description: + - This is a unique identifier for the configuration (for cases where there are multiple domains configured). + - If this is not specified, but I(state=present), we will utilize a default value of 'default'. + username: + description: + - This is the user account that will be used for querying the LDAP server. + - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com" + required: yes + aliases: + - bind_username + password: + description: + - This is the password for the bind user account. + required: yes + aliases: + - bind_password + attributes: + description: + - The user attributes that should be considered for the group to role mapping. + - Typically this is used with something like 'memberOf', and a user's access is tested against group + membership or lack thereof. + default: memberOf + server: + description: + - This is the LDAP server url. + - The connection string should be specified as using the ldap or ldaps protocol along with the port + information. + aliases: + - server_url + required: yes + name: + description: + - The domain name[s] that will be utilized when authenticating to identify which domain to utilize. + - Default to use the DNS name of the I(server). + - The only requirement is that the name[s] be resolvable. + - "Example: user@example.com" + required: no + search_base: + description: + - The search base is used to find group memberships of the user. + - "Example: ou=users,dc=example,dc=com" + required: yes + role_mappings: + description: + - This is where you specify which groups should have access to what permissions for the + storage-system. + - For example, all users in group A will be assigned all 4 available roles, which will allow access + to all the management functionality of the system (super-user). Those in group B only have the + storage.monitor role, which will allow only read-only access. + - This is specified as a mapping of regular expressions to a list of roles. See the examples. + - The roles that will be assigned to to the group/groups matching the provided regex. + - storage.admin allows users full read/write access to storage objects and operations. + - storage.monitor allows users read-only access to storage objects and operations. + - support.admin allows users access to hardware, diagnostic information, the Major Event + Log, and other critical support-related functionality, but not the storage configuration. + - security.admin allows users access to authentication/authorization configuration, as well + as the audit log configuration, and certification management. + required: yes + user_attribute: + description: + - This is the attribute we will use to match the provided username when a user attempts to + authenticate. + default: sAMAccountName + log_path: + description: + - A local path to a file to be used for debug logging + required: no +notes: + - Check mode is supported. + - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for + authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given + different (or no), access to certain aspects of the system and API. + - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible. + - Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure + the system for using LDAP authentication; every implementation is likely to be very different. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy + v3.0 and higher. +''' + +EXAMPLES = ''' + - name: Disable LDAP authentication + netapp_e_ldap: + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + ssid: "1" + state: absent + + - name: Remove the 'default' LDAP domain configuration + netapp_e_ldap: + state: absent + identifier: default + + - name: Define a new LDAP domain, utilizing defaults where possible + netapp_e_ldap: + state: present + bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com" + bind_password: "mySecretPass" + server: "ldap://example.com:389" + search_base: 'OU=Users,DC=example,DC=com' + role_mappings: + ".*dist-dev-storage.*": + - storage.admin + - security.admin + - support.admin + - storage.monitor +''' + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The ldap settings have been updated. +""" + +import json +import logging + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + + +class Ldap(object): + NO_CHANGE_MSG = "No changes were necessary." + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', required=False, default='present', + choices=['present', 'absent']), + identifier=dict(type='str', required=False, ), + username=dict(type='str', required=False, aliases=['bind_username']), + password=dict(type='str', required=False, aliases=['bind_password'], no_log=True), + name=dict(type='list', required=False, ), + server=dict(type='str', required=False, aliases=['server_url']), + search_base=dict(type='str', required=False, ), + role_mappings=dict(type='dict', required=False, ), + user_attribute=dict(type='str', required=False, default='sAMAccountName'), + attributes=dict(type='list', default=['memberOf'], required=False, ), + log_path=dict(type='str', required=False), + )) + + required_if = [ + ["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]] + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + args = self.module.params + self.ldap = args['state'] == 'present' + self.identifier = args['identifier'] + self.username = args['username'] + self.password = args['password'] + self.names = args['name'] + self.server = args['server'] + self.search_base = args['search_base'] + self.role_mappings = args['role_mappings'] + self.user_attribute = args['user_attribute'] + self.attributes = args['attributes'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], + timeout=60) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + self.embedded = None + self.base_path = None + + def make_configuration(self): + if not self.identifier: + self.identifier = 'default' + + if not self.names: + parts = urlparse.urlparse(self.server) + netloc = parts.netloc + if ':' in netloc: + netloc = netloc.split(':')[0] + self.names = [netloc] + + roles = list() + for regex in self.role_mappings: + for role in self.role_mappings[regex]: + roles.append(dict(groupRegex=regex, + ignoreCase=True, + name=role)) + + domain = dict(id=self.identifier, + ldapUrl=self.server, + bindLookupUser=dict(user=self.username, password=self.password), + roleMapCollection=roles, + groupAttributes=self.attributes, + names=self.names, + searchBase=self.search_base, + userAttribute=self.user_attribute, + ) + + return domain + + def is_embedded(self): + """Determine whether or not we're using the embedded or proxy implementation of Web Services""" + if self.embedded is None: + url = self.url + try: + parts = urlparse.urlparse(url) + parts = parts._replace(path='/devmgr/utils/') + url = urlparse.urlunparse(parts) + + (rc, result) = request(url + 'about', **self.creds) + self.embedded = not result['runningAsProxy'] + except Exception as err: + self._logger.exception("Failed to retrieve the About information.") + self.module.fail_json(msg="Failed to determine the Web Services implementation type!" + " Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return self.embedded + + def get_full_configuration(self): + try: + (rc, result) = request(self.url + self.base_path, **self.creds) + return result + except Exception as err: + self._logger.exception("Failed to retrieve the LDAP configuration.") + self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def get_configuration(self, identifier): + try: + (rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds) + if rc == 200: + return result + elif rc == 404: + return None + else: + self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, result)) + except Exception as err: + self._logger.exception("Failed to retrieve the LDAP configuration.") + self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self): + # Define a new domain based on the user input + domain = self.make_configuration() + + # This is the current list of configurations + current = self.get_configuration(self.identifier) + + update = current != domain + msg = "No changes were necessary for [%s]." % self.identifier + self._logger.info("Is updated: %s", update) + if update and not self.check_mode: + msg = "The configuration changes were made for [%s]." % self.identifier + try: + if current is None: + api = self.base_path + 'addDomain' + else: + api = self.base_path + '%s' % (domain['id']) + + (rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds) + except Exception as err: + self._logger.exception("Failed to modify the LDAP configuration.") + self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return msg, update + + def clear_single_configuration(self, identifier=None): + if identifier is None: + identifier = self.identifier + + configuration = self.get_configuration(identifier) + updated = False + msg = self.NO_CHANGE_MSG + if configuration: + updated = True + msg = "The LDAP domain configuration for [%s] was cleared." % identifier + if not self.check_mode: + try: + (rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds) + except Exception as err: + self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return msg, updated + + def clear_configuration(self): + configuration = self.get_full_configuration() + updated = False + msg = self.NO_CHANGE_MSG + if configuration['ldapDomains']: + updated = True + msg = "The LDAP configuration for all domains was cleared." + if not self.check_mode: + try: + (rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds) + + # Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs + if rc == 405: + for config in configuration['ldapDomains']: + self.clear_single_configuration(config['id']) + + except Exception as err: + self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return msg, updated + + def get_base_path(self): + embedded = self.is_embedded() + if embedded: + return 'storage-systems/%s/ldap/' % self.ssid + else: + return '/ldap/' + + def update(self): + self.base_path = self.get_base_path() + + if self.ldap: + msg, update = self.update_configuration() + elif self.identifier: + msg, update = self.clear_single_configuration() + else: + msg, update = self.clear_configuration() + self.module.exit_json(msg=msg, changed=update, ) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = Ldap() + settings() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_lun_mapping.py b/plugins/modules/storage/netapp/netapp_e_lun_mapping.py new file mode 100644 index 0000000000..1e14cec9b7 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_lun_mapping.py @@ -0,0 +1,284 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_lun_mapping +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +short_description: NetApp E-Series create, delete, or modify lun mappings +description: + - Create, delete, or modify mappings between a volume and a targeted host/host+ group. +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Present will ensure the mapping exists, absent will remove the mapping. + required: True + choices: ["present", "absent"] + target: + description: + - The name of host or hostgroup you wish to assign to the mapping + - If omitted, the default hostgroup is used. + - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here. + required: False + volume_name: + description: + - The name of the volume you wish to include in the mapping. + required: True + aliases: + - volume + lun: + description: + - The LUN value you wish to give the mapping. + - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here. + - LUN value will be determine by the storage-system when not specified. + required: no + target_type: + description: + - This option specifies the whether the target should be a host or a group of hosts + - Only necessary when the target name is used for both a host and a group of hosts + choices: + - host + - group + required: no +''' + +EXAMPLES = ''' +--- + - name: Map volume1 to the host target host1 + netapp_e_lun_mapping: + ssid: 1 + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: no + state: present + target: host1 + volume: volume1 + - name: Delete the lun mapping between volume1 and host1 + netapp_e_lun_mapping: + ssid: 1 + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: yes + state: absent + target: host1 + volume: volume1 +''' +RETURN = ''' +msg: + description: success of the module + returned: always + type: str + sample: Lun mapping is complete +''' +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json" +} + + +class LunMapping(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=["present", "absent"]), + target=dict(required=False, default=None), + volume_name=dict(required=True, aliases=["volume"]), + lun=dict(type="int", required=False), + target_type=dict(required=False, choices=["host", "group"]))) + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + args = self.module.params + + self.state = args["state"] in ["present"] + self.target = args["target"] + self.volume = args["volume_name"] + self.lun = args["lun"] + self.target_type = args["target_type"] + self.ssid = args["ssid"] + self.url = args["api_url"] + self.check_mode = self.module.check_mode + self.creds = dict(url_username=args["api_username"], + url_password=args["api_password"], + validate_certs=args["validate_certs"]) + self.mapping_info = None + + if not self.url.endswith('/'): + self.url += '/' + + def update_mapping_info(self): + """Collect the current state of the storage array.""" + response = None + try: + rc, response = request(self.url + "storage-systems/%s/graph" % self.ssid, + method="GET", headers=HEADERS, **self.creds) + + except Exception as error: + self.module.fail_json( + msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error))) + + # Create dictionary containing host/cluster references mapped to their names + target_reference = {} + target_name = {} + target_type = {} + + if self.target_type is None or self.target_type == "host": + for host in response["storagePoolBundle"]["host"]: + target_reference.update({host["hostRef"]: host["name"]}) + target_name.update({host["name"]: host["hostRef"]}) + target_type.update({host["name"]: "host"}) + + if self.target_type is None or self.target_type == "group": + for cluster in response["storagePoolBundle"]["cluster"]: + + # Verify there is no ambiguity between target's type (ie host and group has the same name) + if self.target and self.target_type is None and cluster["name"] == self.target and \ + self.target in target_name.keys(): + self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group" + " targets! Id [%s]" % self.ssid) + + target_reference.update({cluster["clusterRef"]: cluster["name"]}) + target_name.update({cluster["name"]: cluster["clusterRef"]}) + target_type.update({cluster["name"]: "group"}) + + volume_reference = {} + volume_name = {} + lun_name = {} + for volume in response["volume"]: + volume_reference.update({volume["volumeRef"]: volume["name"]}) + volume_name.update({volume["name"]: volume["volumeRef"]}) + if volume["listOfMappings"]: + lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]}) + for volume in response["highLevelVolBundle"]["thinVolume"]: + volume_reference.update({volume["volumeRef"]: volume["name"]}) + volume_name.update({volume["name"]: volume["volumeRef"]}) + if volume["listOfMappings"]: + lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]}) + + # Build current mapping object + self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"], + map_reference=mapping["mapRef"], + lun_mapping_reference=mapping["lunMappingRef"], + lun=mapping["lun"] + ) for mapping in response["storagePoolBundle"]["lunMapping"]], + volume_by_reference=volume_reference, + volume_by_name=volume_name, + lun_by_name=lun_name, + target_by_reference=target_reference, + target_by_name=target_name, + target_type_by_name=target_type) + + def get_lun_mapping(self): + """Find the matching lun mapping reference. + + Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun + """ + target_match = False + reference = None + lun = None + + self.update_mapping_info() + + # Verify that when a lun is specified that it does not match an existing lun value unless it is associated with + # the specified volume (ie for an update) + if self.lun and any((self.lun == lun_mapping["lun"] and + self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and + self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]] + ) for lun_mapping in self.mapping_info["lun_mapping"]): + self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid) + + # Verify that when target_type is specified then it matches the target's actually type + if self.target and self.target_type and self.target in self.mapping_info["target_type_by_name"].keys() and \ + self.mapping_info["target_type_by_name"][self.target] != self.target_type: + self.module.fail_json( + msg="Option target does not match the specified target_type! Id [%s]." % self.ssid) + + # Verify volume and target exist if needed for expected state. + if self.state: + if self.volume not in self.mapping_info["volume_by_name"].keys(): + self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid) + if self.target and self.target not in self.mapping_info["target_by_name"].keys(): + self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid) + + for lun_mapping in self.mapping_info["lun_mapping"]: + + # Find matching volume reference + if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]: + reference = lun_mapping["lun_mapping_reference"] + lun = lun_mapping["lun"] + + # Determine if lun mapping is attached to target with the + if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and + self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and + (self.lun is None or lun == self.lun)): + target_match = True + + return target_match, reference, lun + + def update(self): + """Execute the changes the require changes on the storage array.""" + target_match, lun_reference, lun = self.get_lun_mapping() + update = (self.state and not target_match) or (not self.state and target_match) + + if update and not self.check_mode: + try: + if self.state: + body = dict() + target = None if not self.target else self.mapping_info["target_by_name"][self.target] + if target: + body.update(dict(targetId=target)) + if self.lun is not None: + body.update(dict(lun=self.lun)) + + if lun_reference: + + rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s/move" + % (self.ssid, lun_reference), method="POST", data=json.dumps(body), + headers=HEADERS, **self.creds) + else: + body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume])) + rc, response = request(self.url + "storage-systems/%s/volume-mappings" % self.ssid, + method="POST", data=json.dumps(body), headers=HEADERS, **self.creds) + + else: # Remove existing lun mapping for volume and target + rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s" + % (self.ssid, lun_reference), + method="DELETE", headers=HEADERS, **self.creds) + except Exception as error: + self.module.fail_json( + msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]" + % (self.ssid, to_native(error))) + + self.module.exit_json(msg="Lun mapping is complete.", changed=update) + + +def main(): + lun_mapping = LunMapping() + lun_mapping.update() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_mgmt_interface.py b/plugins/modules/storage/netapp/netapp_e_mgmt_interface.py new file mode 100644 index 0000000000..2ba856572d --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_mgmt_interface.py @@ -0,0 +1,708 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_mgmt_interface +short_description: NetApp E-Series management interface configuration +description: + - Configure the E-Series management interfaces +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Enable or disable IPv4 network interface configuration. + - Either IPv4 or IPv6 must be enabled otherwise error will occur. + - Only required when enabling or disabling IPv4 network interface + choices: + - enable + - disable + required: no + aliases: + - enable_interface + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are represented alphabetically, with the first controller as A, + the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard + limitation and could change in the future. + required: yes + choices: + - A + - B + name: + description: + - The port to modify the configuration for. + - The list of choices is not necessarily comprehensive. It depends on the number of ports + that are present in the system. + - The name represents the port number (typically from left to right on the controller), + beginning with a value of 1. + - Mutually exclusive with I(channel). + aliases: + - port + - iface + channel: + description: + - The port to modify the configuration for. + - The channel represents the port number (typically from left to right on the controller), + beginning with a value of 1. + - Mutually exclusive with I(name). + address: + description: + - The IPv4 address to assign to the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + required: no + subnet_mask: + description: + - The subnet mask to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + required: no + gateway: + description: + - The IPv4 gateway address to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + required: no + config_method: + description: + - The configuration method type to use for network interface ports. + - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway). + choices: + - dhcp + - static + required: no + dns_config_method: + description: + - The configuration method type to use for DNS services. + - dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup). + choices: + - dhcp + - static + required: no + dns_address: + description: + - Primary IPv4 DNS server address + required: no + dns_address_backup: + description: + - Backup IPv4 DNS server address + - Queried when primary DNS server fails + required: no + ntp_config_method: + description: + - The configuration method type to use for NTP services. + - disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup). + - dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup). + choices: + - disable + - dhcp + - static + required: no + ntp_address: + description: + - Primary IPv4 NTP server address + required: no + ntp_address_backup: + description: + - Backup IPv4 NTP server address + - Queried when primary NTP server fails + required: no + ssh: + type: bool + description: + - Enable ssh access to the controller for debug purposes. + - This is a controller-level setting. + - rlogin/telnet will be enabled for ancient equipment where ssh is not available. + required: no + log_path: + description: + - A local path to a file to be used for debug logging + required: no +notes: + - Check mode is supported. + - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address + via dhcp, etc), can take seconds or minutes longer to take effect. + - "Known issue: Changes specifically to down ports will result in a failure. However, this may not be the case in up + coming NetApp E-Series firmware releases (released after firmware version 11.40.2)." +''' + +EXAMPLES = """ + - name: Configure the first port on the A controller with a static IPv4 address + netapp_e_mgmt_interface: + name: "1" + controller: "A" + config_method: static + address: "192.168.1.100" + subnet_mask: "255.255.255.0" + gateway: "192.168.1.1" + ssid: "1" + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Disable ipv4 connectivity for the second port on the B controller + netapp_e_mgmt_interface: + name: "2" + controller: "B" + enable_interface: no + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + + - name: Enable ssh access for ports one and two on controller A + netapp_e_mgmt_interface: + name: "{{ item }}" + controller: "A" + ssh: yes + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + loop: + - 1 + - 2 + + - name: Configure static DNS settings for the first port on controller A + netapp_e_mgmt_interface: + name: "1" + controller: "A" + dns_config_method: static + dns_address: "192.168.1.100" + dns_address_backup: "192.168.1.1" + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + + - name: Configure static NTP settings for ports one and two on controller B + netapp_e_mgmt_interface: + name: "{{ item }}" + controller: "B" + ntp_config_method: static + ntp_address: "129.100.1.100" + ntp_address_backup: "127.100.1.1" + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + loop: + - 1 + - 2 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +enabled: + description: + - Indicates whether IPv4 connectivity has been enabled or disabled. + - This does not necessarily indicate connectivity. If dhcp was enabled absent a dhcp server, for instance, + it is unlikely that the configuration will actually be valid. + returned: on success + sample: True + type: bool +""" +import json +import logging +from pprint import pformat, pprint +import time +import socket + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class MgmtInterface(object): + MAX_RETRIES = 15 + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type="str", choices=["enable", "disable"], + aliases=["enable_interface"], required=False), + controller=dict(type="str", required=True, choices=["A", "B"]), + name=dict(type="str", aliases=["port", "iface"]), + channel=dict(type="int"), + address=dict(type="str", required=False), + subnet_mask=dict(type="str", required=False), + gateway=dict(type="str", required=False), + config_method=dict(type="str", required=False, choices=["dhcp", "static"]), + dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]), + dns_address=dict(type="str", required=False), + dns_address_backup=dict(type="str", required=False), + ntp_config_method=dict(type="str", required=False, choices=["disable", "dhcp", "static"]), + ntp_address=dict(type="str", required=False), + ntp_address_backup=dict(type="str", required=False), + ssh=dict(type="bool", required=False), + log_path=dict(type="str", required=False), + )) + + required_if = [ + ["state", "enable", ["config_method"]], + ["config_method", "static", ["address", "subnet_mask"]], + ["dns_config_method", "static", ["dns_address"]], + ["ntp_config_method", "static", ["ntp_address"]], + ] + + mutually_exclusive = [ + ["name", "channel"], + ] + + self.module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + mutually_exclusive=mutually_exclusive) + args = self.module.params + + self.controller = args["controller"] + self.name = args["name"] + self.channel = args["channel"] + + self.config_method = args["config_method"] + self.address = args["address"] + self.subnet_mask = args["subnet_mask"] + self.gateway = args["gateway"] + self.enable_interface = None if args["state"] is None else args["state"] == "enable" + + self.dns_config_method = args["dns_config_method"] + self.dns_address = args["dns_address"] + self.dns_address_backup = args["dns_address_backup"] + + self.ntp_config_method = args["ntp_config_method"] + self.ntp_address = args["ntp_address"] + self.ntp_address_backup = args["ntp_address_backup"] + + self.ssh = args["ssh"] + + self.ssid = args["ssid"] + self.url = args["api_url"] + self.creds = dict(url_password=args["api_password"], + validate_certs=args["validate_certs"], + url_username=args["api_username"], ) + + self.retries = 0 + + self.check_mode = self.module.check_mode + self.post_body = dict() + + log_path = args["log_path"] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + @property + def controllers(self): + """Retrieve a mapping of controller labels to their references + { + 'A': '070000000000000000000001', + 'B': '070000000000000000000002', + } + :return: the controllers defined on the system + """ + try: + (rc, controllers) = request(self.url + 'storage-systems/%s/controllers' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + controllers = list() + self.module.fail_json( + msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + controllers.sort(key=lambda c: c['physicalLocation']['slot']) + + controllers_dict = dict() + i = ord('A') + for controller in controllers: + label = chr(i) + settings = dict(controllerSlot=controller['physicalLocation']['slot'], + controllerRef=controller['controllerRef'], + ssh=controller['networkSettings']['remoteAccessEnabled']) + controllers_dict[label] = settings + i += 1 + + return controllers_dict + + @property + def interface(self): + net_interfaces = list() + try: + (rc, net_interfaces) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + controllers = self.controllers + controller = controllers[self.controller] + + net_interfaces = [iface for iface in net_interfaces if iface["controllerRef"] == controller["controllerRef"]] + + # Find the correct interface + iface = None + for net in net_interfaces: + if self.name: + if net["alias"] == self.name or net["interfaceName"] == self.name: + iface = net + break + elif self.channel: + if net["channel"] == self.channel: + iface = net + break + + if iface is None: + identifier = self.name if self.name is not None else self.channel + self.module.fail_json(msg="We could not find an interface matching [%s] on Array=[%s]." + % (identifier, self.ssid)) + + return dict(alias=iface["alias"], + channel=iface["channel"], + link_status=iface["linkStatus"], + enabled=iface["ipv4Enabled"], + address=iface["ipv4Address"], + gateway=iface["ipv4GatewayAddress"], + subnet_mask=iface["ipv4SubnetMask"], + dns_config_method=iface["dnsProperties"]["acquisitionProperties"]["dnsAcquisitionType"], + dns_servers=iface["dnsProperties"]["acquisitionProperties"]["dnsServers"], + ntp_config_method=iface["ntpProperties"]["acquisitionProperties"]["ntpAcquisitionType"], + ntp_servers=iface["ntpProperties"]["acquisitionProperties"]["ntpServers"], + config_method=iface["ipv4AddressConfigMethod"], + controllerRef=iface["controllerRef"], + controllerSlot=iface["controllerSlot"], + ipv6Enabled=iface["ipv6Enabled"], + id=iface["interfaceRef"], ) + + def get_enable_interface_settings(self, iface, expected_iface, update, body): + """Enable or disable the IPv4 network interface.""" + if self.enable_interface: + if not iface["enabled"]: + update = True + body["ipv4Enabled"] = True + else: + if iface["enabled"]: + update = True + body["ipv4Enabled"] = False + + expected_iface["enabled"] = body["ipv4Enabled"] + return update, expected_iface, body + + def get_interface_settings(self, iface, expected_iface, update, body): + """Update network interface settings.""" + + if self.config_method == "dhcp": + if iface["config_method"] != "configDhcp": + update = True + body["ipv4AddressConfigMethod"] = "configDhcp" + + else: + if iface["config_method"] != "configStatic": + update = True + body["ipv4AddressConfigMethod"] = "configStatic" + + if iface["address"] != self.address: + update = True + body["ipv4Address"] = self.address + + if iface["subnet_mask"] != self.subnet_mask: + update = True + body["ipv4SubnetMask"] = self.subnet_mask + + if self.gateway and iface["gateway"] != self.gateway: + update = True + body["ipv4GatewayAddress"] = self.gateway + + expected_iface["address"] = body["ipv4Address"] + expected_iface["subnet_mask"] = body["ipv4SubnetMask"] + expected_iface["gateway"] = body["ipv4GatewayAddress"] + + expected_iface["config_method"] = body["ipv4AddressConfigMethod"] + + return update, expected_iface, body + + def get_dns_server_settings(self, iface, expected_iface, update, body): + """Add DNS server information to the request body.""" + if self.dns_config_method == "dhcp": + if iface["dns_config_method"] != "dhcp": + update = True + body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="dhcp") + + elif self.dns_config_method == "static": + dns_servers = [dict(addressType="ipv4", ipv4Address=self.dns_address)] + if self.dns_address_backup: + dns_servers.append(dict(addressType="ipv4", ipv4Address=self.dns_address_backup)) + + body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="stat", dnsServers=dns_servers) + + if (iface["dns_config_method"] != "stat" or + len(iface["dns_servers"]) != len(dns_servers) or + (len(iface["dns_servers"]) == 2 and + (iface["dns_servers"][0]["ipv4Address"] != self.dns_address or + iface["dns_servers"][1]["ipv4Address"] != self.dns_address_backup)) or + (len(iface["dns_servers"]) == 1 and + iface["dns_servers"][0]["ipv4Address"] != self.dns_address)): + update = True + + expected_iface["dns_servers"] = dns_servers + + expected_iface["dns_config_method"] = body["dnsAcquisitionDescriptor"]["dnsAcquisitionType"] + return update, expected_iface, body + + def get_ntp_server_settings(self, iface, expected_iface, update, body): + """Add NTP server information to the request body.""" + if self.ntp_config_method == "disable": + if iface["ntp_config_method"] != "disabled": + update = True + body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="disabled") + + elif self.ntp_config_method == "dhcp": + if iface["ntp_config_method"] != "dhcp": + update = True + body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="dhcp") + + elif self.ntp_config_method == "static": + ntp_servers = [dict(addrType="ipvx", ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address))] + if self.ntp_address_backup: + ntp_servers.append(dict(addrType="ipvx", + ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address_backup))) + + body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="stat", ntpServers=ntp_servers) + + if (iface["ntp_config_method"] != "stat" or + len(iface["ntp_servers"]) != len(ntp_servers) or + ((len(iface["ntp_servers"]) == 2 and + (iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address or + iface["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup)) or + (len(iface["ntp_servers"]) == 1 and + iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address))): + update = True + + expected_iface["ntp_servers"] = ntp_servers + + expected_iface["ntp_config_method"] = body["ntpAcquisitionDescriptor"]["ntpAcquisitionType"] + return update, expected_iface, body + + def get_remote_ssh_settings(self, settings, update, body): + """Configure network interface ports for remote ssh access.""" + if self.ssh != settings["ssh"]: + update = True + + body["enableRemoteAccess"] = self.ssh + return update, body + + def update_array(self, settings, iface): + """Update controller with new interface, dns service, ntp service and/or remote ssh access information. + + :returns: whether information passed will modify the controller's current state + :rtype: bool + """ + update = False + body = dict(controllerRef=settings['controllerRef'], + interfaceRef=iface['id']) + expected_iface = iface.copy() + + # Check if api url is using the effected management interface to change itself + update_used_matching_address = False + if self.enable_interface and self.config_method: + netloc = list(urlparse.urlparse(self.url))[1] + address = netloc.split(":")[0] + address_info = socket.getaddrinfo(address, 8443) + url_address_info = socket.getaddrinfo(iface["address"], 8443) + update_used_matching_address = any(info in url_address_info for info in address_info) + + self._logger.info("update_used_matching_address: %s", update_used_matching_address) + + # Populate the body of the request and check for changes + if self.enable_interface is not None: + update, expected_iface, body = self.get_enable_interface_settings(iface, expected_iface, update, body) + + if self.config_method is not None: + update, expected_iface, body = self.get_interface_settings(iface, expected_iface, update, body) + + if self.dns_config_method is not None: + update, expected_iface, body = self.get_dns_server_settings(iface, expected_iface, update, body) + + if self.ntp_config_method is not None: + update, expected_iface, body = self.get_ntp_server_settings(iface, expected_iface, update, body) + + if self.ssh is not None: + update, body = self.get_remote_ssh_settings(settings, update, body) + iface["ssh"] = self.ssh + expected_iface["ssh"] = self.ssh + + # debug information + self._logger.info(pformat(body)) + self._logger.info(pformat(iface)) + self._logger.info(pformat(expected_iface)) + + if self.check_mode: + return update + + if update and not self.check_mode: + if not update_used_matching_address: + try: + (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' + % self.ssid, method='POST', data=json.dumps(body), headers=HEADERS, + timeout=300, ignore_errors=True, **self.creds) + if rc == 422: + if data['retcode'] == "4" or data['retcode'] == "illegalParam": + if not (body['ipv4Enabled'] or iface['ipv6Enabled']): + self.module.fail_json(msg="This storage-system already has IPv6 connectivity disabled. " + "DHCP configuration for IPv4 is required at a minimum." + " Array Id [%s] Message [%s]." + % (self.ssid, data['errorMessage'])) + else: + self.module.fail_json(msg="We failed to configure the management interface. Array Id " + "[%s] Message [%s]." % (self.ssid, data)) + elif rc >= 300: + self.module.fail_json( + msg="We failed to configure the management interface. Array Id [%s] Message [%s]." % + (self.ssid, data)) + + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json( + msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + else: + self.update_api_address_interface_match(body) + + return self.validate_changes(expected_iface) if update and iface["link_status"] != "up" else update + + def update_api_address_interface_match(self, body): + """Change network interface address which matches the api_address""" + try: + try: + (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid, + use_proxy=False, force=True, ignore_errors=True, method='POST', + data=json.dumps(body), headers=HEADERS, timeout=10, **self.creds) + except Exception: + url_parts = list(urlparse.urlparse(self.url)) + domain = url_parts[1].split(":") + domain[0] = self.address + url_parts[1] = ":".join(domain) + expected_url = urlparse.urlunparse(url_parts) + self._logger.info(pformat(expected_url)) + + (rc, data) = request(expected_url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid, + headers=HEADERS, timeout=300, **self.creds) + return + except Exception as err: + self._logger.info(type(err)) + self.module.fail_json( + msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def validate_changes(self, expected_iface, retry=6): + """Validate interface changes were applied to the controller interface port. 30 second timeout""" + if self.interface != expected_iface: + time.sleep(5) + if retry: + return self.validate_changes(expected_iface, retry - 1) + + self.module.fail_json(msg="Update failure: we failed to verify the necessary state change.") + + return True + + def check_health(self): + """It's possible, due to a previous operation, for the API to report a 424 (offline) status for the + storage-system. Therefore, we run a manual check with retries to attempt to contact the system before we + continue. + """ + try: + (rc, data) = request(self.url + 'storage-systems/%s/controllers' + % self.ssid, headers=HEADERS, + ignore_errors=True, **self.creds) + + # We've probably recently changed the interface settings and it's still coming back up: retry. + if rc == 424: + if self.retries < self.MAX_RETRIES: + self.retries += 1 + self._logger.info("We hit a 424, retrying in 5s.") + time.sleep(5) + self.check_health() + else: + self.module.fail_json( + msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." % + (self.ssid, data)) + elif rc >= 300: + self.module.fail_json( + msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." % + (self.ssid, data)) + # This is going to catch cases like a connection failure + except Exception as err: + if self.retries < self.MAX_RETRIES: + self._logger.info("We hit a connection failure, retrying in 5s.") + self.retries += 1 + time.sleep(5) + self.check_health() + else: + self.module.fail_json( + msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update(self): + """Update storage system with necessary changes.""" + # Check if the storage array can be contacted + self.check_health() + + # make the necessary changes to the storage system + settings = self.controllers[self.controller] + iface = self.interface + self._logger.info(pformat(settings)) + self._logger.info(pformat(iface)) + update = self.update_array(settings, iface) + + self.module.exit_json(msg="The interface settings have been updated.", changed=update) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + iface = MgmtInterface() + iface() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_snapshot_group.py b/plugins/modules/storage/netapp/netapp_e_snapshot_group.py new file mode 100644 index 0000000000..b2b61be082 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_snapshot_group.py @@ -0,0 +1,369 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netapp_e_snapshot_group +short_description: NetApp E-Series manage snapshot groups +description: + - Create, update, delete snapshot groups for NetApp E-series storage arrays +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + state: + description: + - Whether to ensure the group is present or absent. + required: True + choices: + - present + - absent + name: + description: + - The name to give the snapshot group + required: True + base_volume_name: + description: + - The name of the base volume or thin volume to use as the base for the new snapshot group. + - If a snapshot group with an identical C(name) already exists but with a different base volume + an error will be returned. + required: True + repo_pct: + description: + - The size of the repository in relation to the size of the base volume + required: False + default: 20 + warning_threshold: + description: + - The repository utilization warning threshold, as a percentage of the repository volume capacity. + required: False + default: 80 + delete_limit: + description: + - The automatic deletion indicator. + - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of + snapshot images limited to the number specified. + - This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group. + required: False + default: 30 + full_policy: + description: + - The behavior on when the data repository becomes full. + - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group + required: False + default: purgepit + choices: + - purgepit + - unknown + - failbasewrites + - __UNDEFINED + storage_pool_name: + required: True + description: + - The name of the storage pool on which to allocate the repository volume. + rollback_priority: + required: False + description: + - The importance of the rollback operation. + - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group + choices: + - highest + - high + - medium + - low + - lowest + - __UNDEFINED + default: medium +''' + +EXAMPLES = """ + - name: Configure Snapshot group + netapp_e_snapshot_group: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + base_volume_name: SSGroup_test + name=: OOSS_Group + repo_pct: 20 + warning_threshold: 85 + delete_limit: 30 + full_policy: purgepit + storage_pool_name: Disk_Pool_1 + rollback_priority: medium +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: json facts for newly created snapshot group. +""" +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class SnapshotGroup(object): + def __init__(self): + + argument_spec = basic_auth_argument_spec() + argument_spec.update( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(required=True, choices=['present', 'absent']), + base_volume_name=dict(required=True), + name=dict(required=True), + repo_pct=dict(default=20, type='int'), + warning_threshold=dict(default=80, type='int'), + delete_limit=dict(default=30, type='int'), + full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']), + rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']), + storage_pool_name=dict(type='str'), + ssid=dict(required=True), + ) + + self.module = AnsibleModule(argument_spec=argument_spec) + + self.post_data = dict() + self.warning_threshold = self.module.params['warning_threshold'] + self.base_volume_name = self.module.params['base_volume_name'] + self.name = self.module.params['name'] + self.repo_pct = self.module.params['repo_pct'] + self.delete_limit = self.module.params['delete_limit'] + self.full_policy = self.module.params['full_policy'] + self.rollback_priority = self.module.params['rollback_priority'] + self.storage_pool_name = self.module.params['storage_pool_name'] + self.state = self.module.params['state'] + + self.url = self.module.params['api_url'] + self.user = self.module.params['api_username'] + self.pwd = self.module.params['api_password'] + self.certs = self.module.params['validate_certs'] + self.ssid = self.module.params['ssid'] + + if not self.url.endswith('/'): + self.url += '/' + + self.changed = False + + @property + def pool_id(self): + pools = 'storage-systems/%s/storage-pools' % self.ssid + url = self.url + pools + try: + (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd) + except Exception as err: + self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " + + "Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + for pool in data: + if pool['name'] == self.storage_pool_name: + self.pool_data = pool + return pool['id'] + + self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name) + + @property + def volume_id(self): + volumes = 'storage-systems/%s/volumes' % self.ssid + url = self.url + volumes + try: + rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " + + "Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + qty = 0 + for volume in data: + if volume['name'] == self.base_volume_name: + qty += 1 + + if qty > 1: + self.module.fail_json(msg="More than one volume with the name: %s was found, " + "please ensure your volume has a unique name" % self.base_volume_name) + else: + Id = volume['id'] + self.volume = volume + + try: + return Id + except NameError: + self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name) + + @property + def snapshot_group_id(self): + url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid + try: + rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to fetch snapshot groups. " + + "Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + for ssg in data: + if ssg['name'] == self.name: + self.ssg_data = ssg + return ssg['id'] + + return None + + @property + def ssg_needs_update(self): + if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \ + self.ssg_data['autoDeleteLimit'] != self.delete_limit or \ + self.ssg_data['repFullPolicy'] != self.full_policy or \ + self.ssg_data['rollbackPriority'] != self.rollback_priority: + return True + else: + return False + + def create_snapshot_group(self): + self.post_data = dict( + baseMappableObjectId=self.volume_id, + name=self.name, + repositoryPercentage=self.repo_pct, + warningThreshold=self.warning_threshold, + autoDeleteLimit=self.delete_limit, + fullPolicy=self.full_policy, + storagePoolId=self.pool_id, + ) + snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid + url = self.url + snapshot + try: + rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to create snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + to_native(err))) + + if not self.snapshot_group_id: + self.snapshot_group_id = self.ssg_data['id'] + + if self.ssg_needs_update: + self.update_ssg() + else: + self.module.exit_json(changed=True, **self.ssg_data) + + def update_ssg(self): + self.post_data = dict( + warningThreshold=self.warning_threshold, + autoDeleteLimit=self.delete_limit, + fullPolicy=self.full_policy, + rollbackPriority=self.rollback_priority + ) + + url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id) + try: + rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to update snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + to_native(err))) + + def apply(self): + if self.state == 'absent': + if self.snapshot_group_id: + try: + rc, resp = request( + self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id), + method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user, + validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to delete snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + to_native(err))) + self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data) + else: + self.module.exit_json(changed=False, msg="Snapshot group absent") + + elif self.snapshot_group_id: + if self.ssg_needs_update: + self.update_ssg() + self.module.exit_json(changed=True, **self.ssg_data) + else: + self.module.exit_json(changed=False, **self.ssg_data) + else: + self.create_snapshot_group() + + +def main(): + vg = SnapshotGroup() + vg.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_snapshot_images.py b/plugins/modules/storage/netapp/netapp_e_snapshot_images.py new file mode 100644 index 0000000000..f36f35dee2 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_snapshot_images.py @@ -0,0 +1,246 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netapp_e_snapshot_images +short_description: NetApp E-Series create and delete snapshot images +description: + - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays. + - Only the oldest snapshot image can be deleted so consistency is preserved. + - "Related: Snapshot volumes are created from snapshot images." +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + snapshot_group: + description: + - The name of the snapshot group in which you want to create a snapshot image. + required: True + state: + description: + - Whether a new snapshot image should be created or oldest be deleted. + required: True + choices: ['create', 'remove'] +''' +EXAMPLES = """ + - name: Create Snapshot + netapp_e_snapshot_images: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ validate_certs }}" + snapshot_group: "3300000060080E5000299C24000005B656D9F394" + state: 'create' +""" +RETURN = """ +--- + msg: + description: State of operation + type: str + returned: always + sample: "Created snapshot image" + image_id: + description: ID of snapshot image + type: str + returned: state == created + sample: "3400000060080E5000299B640063074057BC5C5E " +""" + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name): + snap_groups = 'storage-systems/%s/snapshot-groups' % ssid + snap_groups_url = api_url + snap_groups + (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + + snapshot_group_id = None + for snapshot_group in snapshot_groups: + if name == snapshot_group['label']: + snapshot_group_id = snapshot_group['pitGroupRef'] + break + if snapshot_group_id is None: + module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid)) + + return snapshot_group + + +def oldest_image(module, ssid, api_url, api_pwd, api_usr, name): + get_status = 'storage-systems/%s/snapshot-images' % ssid + url = api_url + get_status + + try: + (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + except Exception as err: + module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" % + (name, ssid, to_native(err))) + if not images: + module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid)) + + oldest = min(images, key=lambda x: x['pitSequenceNumber']) + if oldest is None or "pitRef" not in oldest: + module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid)) + + return oldest + + +def create_image(module, ssid, api_url, pwd, user, p, snapshot_group): + snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group) + snapshot_group_id = snapshot_group_obj['pitGroupRef'] + endpoint = 'storage-systems/%s/snapshot-images' % ssid + url = api_url + endpoint + post_data = json.dumps({'groupId': snapshot_group_id}) + + image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + + if image_data[1]['status'] == 'optimal': + status = True + id = image_data[1]['id'] + else: + status = False + id = '' + + return status, id + + +def delete_image(module, ssid, api_url, pwd, user, snapshot_group): + image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group) + image_id = image['pitRef'] + endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id) + url = api_url + endpoint + + try: + (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + except Exception as e: + image_data = (e[0], e[1]) + + if ret == 204: + deleted_status = True + error_message = '' + else: + deleted_status = False + error_message = image_data[1]['errorMessage'] + + return deleted_status, error_message + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + snapshot_group=dict(required=True, type='str'), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + validate_certs=dict(required=False, default=True), + state=dict(required=True, choices=['create', 'remove'], type='str'), + )) + module = AnsibleModule(argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + snapshot_group = p.pop('snapshot_group') + desired_state = p.pop('state') + + if not api_url.endswith('/'): + api_url += '/' + + if desired_state == 'create': + created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group) + + if created_status: + module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id) + else: + module.fail_json( + msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group)) + else: + deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group) + + if deleted: + module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group)) + else: + module.fail_json( + msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % ( + ssid, snapshot_group, error_msg)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_snapshot_volume.py b/plugins/modules/storage/netapp/netapp_e_snapshot_volume.py new file mode 100644 index 0000000000..de4d6752f5 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_snapshot_volume.py @@ -0,0 +1,280 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netapp_e_snapshot_volume +short_description: NetApp E-Series manage snapshot volumes. +description: + - Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays. +author: Kevin Hulquest (@hulquest) +notes: + - Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status + will be returned, no other changes can be made to a pre-existing snapshot volume. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + description: + - storage array ID + required: True + snapshot_image_id: + required: True + description: + - The identifier of the snapshot image used to create the new snapshot volume. + - "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want." + full_threshold: + description: + - The repository utilization warning threshold percentage + default: 85 + name: + required: True + description: + - The name you wish to give the snapshot volume + view_mode: + required: True + description: + - The snapshot volume access mode + choices: + - modeUnknown + - readWrite + - readOnly + - __UNDEFINED + repo_percentage: + description: + - The size of the view in relation to the size of the base volume + default: 20 + storage_pool_name: + description: + - Name of the storage pool on which to allocate the repository volume. + required: True + state: + description: + - Whether to create or remove the snapshot volume + required: True + choices: + - absent + - present +''' +EXAMPLES = """ + - name: Snapshot volume + netapp_e_snapshot_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}/" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + state: present + storage_pool_name: "{{ snapshot_volume_storage_pool_name }}" + snapshot_image_id: "{{ snapshot_volume_image_id }}" + name: "{{ snapshot_volume_name }}" +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: Json facts for the volume that was created. +""" +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class SnapshotVolume(object): + def __init__(self): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + ssid=dict(type='str', required=True), + snapshot_image_id=dict(type='str', required=True), + full_threshold=dict(type='int', default=85), + name=dict(type='str', required=True), + view_mode=dict(type='str', default='readOnly', + choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']), + repo_percentage=dict(type='int', default=20), + storage_pool_name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'present']) + )) + + self.module = AnsibleModule(argument_spec=argument_spec) + args = self.module.params + self.state = args['state'] + self.ssid = args['ssid'] + self.snapshot_image_id = args['snapshot_image_id'] + self.full_threshold = args['full_threshold'] + self.name = args['name'] + self.view_mode = args['view_mode'] + self.repo_percentage = args['repo_percentage'] + self.storage_pool_name = args['storage_pool_name'] + self.url = args['api_url'] + self.user = args['api_username'] + self.pwd = args['api_password'] + self.certs = args['validate_certs'] + + if not self.url.endswith('/'): + self.url += '/' + + @property + def pool_id(self): + pools = 'storage-systems/%s/storage-pools' % self.ssid + url = self.url + pools + (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + + for pool in data: + if pool['name'] == self.storage_pool_name: + self.pool_data = pool + return pool['id'] + + self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name) + + @property + def ss_vol_exists(self): + rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + if ss_vols: + for ss_vol in ss_vols: + if ss_vol['name'] == self.name: + self.ss_vol = ss_vol + return True + else: + return False + + return False + + @property + def ss_vol_needs_update(self): + if self.ss_vol['fullWarnThreshold'] != self.full_threshold: + return True + else: + return False + + def create_ss_vol(self): + post_data = dict( + snapshotImageId=self.snapshot_image_id, + fullThreshold=self.full_threshold, + name=self.name, + viewMode=self.view_mode, + repositoryPercentage=self.repo_percentage, + repositoryPoolId=self.pool_id + ) + + rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, + data=json.dumps(post_data), headers=HEADERS, url_username=self.user, + url_password=self.pwd, validate_certs=self.certs, method='POST') + + self.ss_vol = create_resp + # Doing a check after creation because the creation call fails to set the specified warning threshold + if self.ss_vol_needs_update: + self.update_ss_vol() + else: + self.module.exit_json(changed=True, **create_resp) + + def update_ss_vol(self): + post_data = dict( + fullThreshold=self.full_threshold, + ) + + rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']), + data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd, + method='POST', validate_certs=self.certs) + + self.module.exit_json(changed=True, **resp) + + def remove_ss_vol(self): + rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']), + headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + method='DELETE') + self.module.exit_json(changed=True, msg="Volume successfully deleted") + + def apply(self): + if self.state == 'present': + if self.ss_vol_exists: + if self.ss_vol_needs_update: + self.update_ss_vol() + else: + self.module.exit_json(changed=False, **self.ss_vol) + else: + self.create_ss_vol() + else: + if self.ss_vol_exists: + self.remove_ss_vol() + else: + self.module.exit_json(changed=False, msg="Volume already absent") + + +def main(): + sv = SnapshotVolume() + sv.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_storage_system.py b/plugins/modules/storage/netapp/netapp_e_storage_system.py new file mode 100644 index 0000000000..1ee2718384 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_storage_system.py @@ -0,0 +1,295 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: netapp_e_storage_system +short_description: NetApp E-Series Web Services Proxy manage storage arrays +description: +- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays. +options: + api_username: + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + required: true + api_password: + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + required: true + api_url: + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + required: true + validate_certs: + description: + - Should https certificates be validated? + type: bool + default: 'yes' + ssid: + description: + - The ID of the array to manage. This value must be unique for each array. + required: true + state: + description: + - Whether the specified array should be configured on the Web Services Proxy or not. + required: true + choices: ['present', 'absent'] + controller_addresses: + description: + - The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter. + required: true + array_wwn: + description: + - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of + controller_addresses parameter. + array_password: + description: + - The management password of the array to manage, if set. + enable_trace: + description: + - Enable trace logging for SYMbol calls to the storage system. + type: bool + default: 'no' + meta_tags: + description: + - Optional meta tags to associate to this storage system +author: Kevin Hulquest (@hulquest) +''' + +EXAMPLES = ''' +--- + - name: Presence of storage system + netapp_e_storage_system: + ssid: "{{ item.key }}" + state: present + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + controller_addresses: + - "{{ item.value.address1 }}" + - "{{ item.value.address2 }}" + with_dict: "{{ storage_systems }}" + when: check_storage_system +''' + +RETURN = ''' +msg: + description: State of request + type: str + returned: always + sample: 'Storage system removed.' +''' +import json +from datetime import datetime as dt, timedelta +from time import sleep + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout): + (rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers, + method='POST', url_username=api_usr, url_password=api_pwd, + validate_certs=validate_certs) + status = None + return_resp = resp + if 'status' in resp: + status = resp['status'] + + if rc == 201: + status = 'neverContacted' + fail_after_time = dt.utcnow() + timedelta(seconds=timeout) + + while status == 'neverContacted': + if dt.utcnow() > fail_after_time: + raise Exception("web proxy timed out waiting for array status") + + sleep(1) + (rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid, + headers=dict(Accept="application/json"), url_username=api_usr, + url_password=api_pwd, validate_certs=validate_certs, + ignore_errors=True) + status = system_resp['status'] + return_resp = system_resp + + return status, return_resp + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + ssid=dict(required=True, type='str'), + controller_addresses=dict(type='list'), + array_wwn=dict(required=False, type='str'), + array_password=dict(required=False, type='str', no_log=True), + array_status_timeout_sec=dict(default=60, type='int'), + enable_trace=dict(default=False, type='bool'), + meta_tags=dict(type='list') + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['controller_addresses', 'array_wwn']], + required_if=[('state', 'present', ['controller_addresses'])] + ) + + p = module.params + + state = p['state'] + ssid = p['ssid'] + controller_addresses = p['controller_addresses'] + array_wwn = p['array_wwn'] + array_password = p['array_password'] + array_status_timeout_sec = p['array_status_timeout_sec'] + validate_certs = p['validate_certs'] + meta_tags = p['meta_tags'] + enable_trace = p['enable_trace'] + + api_usr = p['api_username'] + api_pwd = p['api_password'] + api_url = p['api_url'] + + changed = False + array_exists = False + + try: + (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"), + url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs, + ignore_errors=True) + except Exception as err: + module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err))) + + array_exists = True + array_detail = resp + + if rc == 200: + if state == 'absent': + changed = True + array_exists = False + elif state == 'present': + current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i) + if set(controller_addresses) != current_addresses: + changed = True + if array_detail['wwn'] != array_wwn and array_wwn is not None: + module.fail_json( + msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % + (ssid, array_detail['wwn']) + ) + elif rc == 404: + if state == 'present': + changed = True + array_exists = False + else: + changed = False + module.exit_json(changed=changed, msg="Storage system was not present.") + + if changed and not module.check_mode: + if state == 'present': + if not array_exists: + # add the array + array_add_req = dict( + id=ssid, + controllerAddresses=controller_addresses, + metaTags=meta_tags, + enableTrace=enable_trace + ) + + if array_wwn: + array_add_req['wwn'] = array_wwn + + if array_password: + array_add_req['password'] = array_password + + post_headers = dict(Accept="application/json") + post_headers['Content-Type'] = 'application/json' + request_data = json.dumps(array_add_req) + + try: + (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data, + array_status_timeout_sec) + except Exception as err: + module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." % + (ssid, request_data, to_native(err))) + + else: # array exists, modify... + post_headers = dict(Accept="application/json") + post_headers['Content-Type'] = 'application/json' + post_body = dict( + controllerAddresses=controller_addresses, + removeAllTags=True, + enableTrace=enable_trace, + metaTags=meta_tags + ) + + try: + (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body, + array_status_timeout_sec) + except Exception as err: + module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." % + (ssid, post_body, to_native(err))) + + elif state == 'absent': + # delete the array + try: + (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE', + url_username=api_usr, + url_password=api_pwd, validate_certs=validate_certs) + except Exception as err: + module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err))) + + if rc == 422: + module.exit_json(changed=changed, msg="Storage system was not presented.") + if rc == 204: + module.exit_json(changed=changed, msg="Storage system removed.") + + module.exit_json(changed=changed, **resp) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_storagepool.py b/plugins/modules/storage/netapp/netapp_e_storagepool.py new file mode 100644 index 0000000000..05f209e962 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_storagepool.py @@ -0,0 +1,935 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {"metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community"} + +DOCUMENTATION = ''' +--- +module: netapp_e_storagepool +short_description: NetApp E-Series manage volume groups and disk pools +description: Create or remove volume groups and disk pools for NetApp E-series storage arrays. +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Whether the specified storage pool should exist or not. + - Note that removing a storage pool currently requires the removal of all defined volumes first. + required: true + choices: ["present", "absent"] + name: + description: + - The name of the storage pool to manage + required: true + criteria_drive_count: + description: + - The number of disks to use for building the storage pool. + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below) + required: false + type: int + criteria_min_usable_capacity: + description: + - The minimum size of the storage pool (in size_unit). + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this value exceeds its current size. (See expansion note below) + required: false + type: float + criteria_drive_type: + description: + - The type of disk (hdd or ssd) to use when searching for candidates to use. + - When not specified each drive type will be evaluated until successful drive candidates are found starting with + the most prevalent drive type. + required: false + choices: ["hdd","ssd"] + criteria_size_unit: + description: + - The unit used to interpret size parameters + choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"] + default: "gb" + criteria_drive_min_size: + description: + - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool. + criteria_drive_interface_type: + description: + - The interface type to use when selecting drives for the storage pool + - If not provided then all interface types will be considered. + choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"] + required: false + criteria_drive_require_da: + description: + - Ensures the storage pool will be created with only data assurance (DA) capable drives. + - Only available for new storage pools; existing storage pools cannot be converted. + default: false + type: bool + criteria_drive_require_fde: + description: + - Whether full disk encryption ability is required for drives to be added to the storage pool + default: false + type: bool + raid_level: + description: + - The RAID level of the storage pool to be created. + - Required only when I(state=="present"). + - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required + depending on the storage array specifications. + - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required. + - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required. + - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required. + - Note that raidAll will be treated as raidDiskPool and raid3 as raid5. + required: false + choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"] + default: "raidDiskPool" + secure_pool: + description: + - Enables security at rest feature on the storage pool. + - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix) + - Warning, once security is enabled it is impossible to disable without erasing the drives. + required: false + type: bool + reserve_drive_count: + description: + - Set the number of drives reserved by the storage pool for reconstruction operations. + - Only valid on raid disk pools. + required: false + remove_volumes: + description: + - Prior to removing a storage pool, delete all volumes in the pool. + default: true + erase_secured_drives: + description: + - If I(state=="absent") then all storage pool drives will be erase + - If I(state=="present") then delete all available storage array drives that have security enabled. + default: true + type: bool +notes: + - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups + - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each + required step will be attempted until the request fails which is likely because of the required expansion time. + - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5. + - Tray loss protection and drawer loss protection will be chosen if at all possible. +''' +EXAMPLES = """ +- name: No disk groups + netapp_e_storagepool: + ssid: "{{ ssid }}" + name: "{{ item }}" + state: absent + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: Json facts for the pool that was created. +""" +import functools +from itertools import groupby +from time import sleep +from pprint import pformat +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +def get_most_common_elements(iterator): + """Returns a generator containing a descending list of most common elements.""" + if not isinstance(iterator, list): + raise TypeError("iterator must be a list.") + + grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))] + return sorted(grouped, key=lambda x: x[1], reverse=True) + + +def memoize(func): + """Generic memoizer for any function with any number of arguments including zero.""" + + @functools.wraps(func) + def wrapper(*args, **kwargs): + class MemoizeFuncArgs(dict): + def __missing__(self, _key): + self[_key] = func(*args, **kwargs) + return self[_key] + + key = str((args, kwargs)) if args and kwargs else "no_argument_response" + return MemoizeFuncArgs().__getitem__(key) + + return wrapper + + +class NetAppESeriesStoragePool(NetAppESeriesModule): + EXPANSION_TIMEOUT_SEC = 10 + DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11 + + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict( + state=dict(required=True, choices=["present", "absent"], type="str"), + name=dict(required=True, type="str"), + criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"], + default="gb", type="str"), + criteria_drive_count=dict(type="int"), + criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"], + type="str"), + criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False), + criteria_drive_min_size=dict(type="float"), + criteria_drive_require_da=dict(type="bool", required=False), + criteria_drive_require_fde=dict(type="bool", required=False), + criteria_min_usable_capacity=dict(type="float"), + raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"], + default="raidDiskPool"), + erase_secured_drives=dict(type="bool", default=True), + secure_pool=dict(type="bool", default=False), + reserve_drive_count=dict(type="int"), + remove_volumes=dict(type="bool", default=True)) + + required_if = [["state", "present", ["raid_level"]]] + super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True, + required_if=required_if) + + args = self.module.params + self.state = args["state"] + self.ssid = args["ssid"] + self.name = args["name"] + self.criteria_drive_count = args["criteria_drive_count"] + self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"] + self.criteria_size_unit = args["criteria_size_unit"] + self.criteria_drive_min_size = args["criteria_drive_min_size"] + self.criteria_drive_type = args["criteria_drive_type"] + self.criteria_drive_interface_type = args["criteria_drive_interface_type"] + self.criteria_drive_require_fde = args["criteria_drive_require_fde"] + self.criteria_drive_require_da = args["criteria_drive_require_da"] + self.raid_level = args["raid_level"] + self.erase_secured_drives = args["erase_secured_drives"] + self.secure_pool = args["secure_pool"] + self.reserve_drive_count = args["reserve_drive_count"] + self.remove_volumes = args["remove_volumes"] + self.pool_detail = None + + # Change all sizes to be measured in bytes + if self.criteria_min_usable_capacity: + self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity * + self.SIZE_UNIT_MAP[self.criteria_size_unit]) + if self.criteria_drive_min_size: + self.criteria_drive_min_size = int(self.criteria_drive_min_size * + self.SIZE_UNIT_MAP[self.criteria_size_unit]) + self.criteria_size_unit = "bytes" + + # Adjust unused raid level option to reflect documentation + if self.raid_level == "raidAll": + self.raid_level = "raidDiskPool" + if self.raid_level == "raid3": + self.raid_level = "raid5" + + @property + @memoize + def available_drives(self): + """Determine the list of available drives""" + return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"] + + @property + @memoize + def available_drive_types(self): + """Determine the types of available drives sorted by the most common first.""" + types = [drive["driveMediaType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(types)] + + @property + @memoize + def available_drive_interface_types(self): + """Determine the types of available drives.""" + interfaces = [drive["phyDriveType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(interfaces)] + + @property + def storage_pool_drives(self, exclude_hotspares=True): + """Retrieve list of drives found in storage pool.""" + if exclude_hotspares: + return [drive for drive in self.drives + if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]] + + return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"]] + + @property + def expandable_drive_count(self): + """Maximum number of drives that a storage pool can be expanded at a given time.""" + capabilities = None + if self.raid_level == "raidDiskPool": + return len(self.available_drives) + + try: + rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return capabilities["featureParameters"]["maxDCEDrives"] + + @property + def disk_pool_drive_minimum(self): + """Provide the storage array's minimum disk pool drive count.""" + rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True) + + # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default + if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or + attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0): + return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT + + return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] + + def get_available_drive_capacities(self, drive_id_list=None): + """Determine the list of available drive capacities.""" + if drive_id_list: + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["id"] in drive_id_list and drive["available"] and + drive["status"] == "optimal"]) + else: + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["available"] and drive["status"] == "optimal"]) + + self.module.log("available drive capacities: %s" % available_drive_capacities) + return list(available_drive_capacities) + + @property + def drives(self): + """Retrieve list of drives found in storage pool.""" + drives = None + try: + rc, drives = self.request("storage-systems/%s/drives" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return drives + + def is_drive_count_valid(self, drive_count): + """Validate drive count criteria is met.""" + if self.criteria_drive_count and drive_count < self.criteria_drive_count: + return False + + if self.raid_level == "raidDiskPool": + return drive_count >= self.disk_pool_drive_minimum + if self.raid_level == "raid0": + return drive_count > 0 + if self.raid_level == "raid1": + return drive_count >= 2 and (drive_count % 2) == 0 + if self.raid_level in ["raid3", "raid5"]: + return 3 <= drive_count <= 30 + if self.raid_level == "raid6": + return 5 <= drive_count <= 30 + return False + + @property + def storage_pool(self): + """Retrieve storage pool information.""" + storage_pools_resp = None + try: + rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) + + pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name] + return pool_detail[0] if pool_detail else dict() + + @property + def storage_pool_volumes(self): + """Retrieve list of volumes associated with storage pool.""" + volumes_resp = None + try: + rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) + + group_ref = self.storage_pool["volumeGroupRef"] + storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref] + return storage_pool_volume_list + + def get_ddp_capacity(self, expansion_drive_list): + """Return the total usable capacity based on the additional drives.""" + + def get_ddp_error_percent(_drive_count, _extent_count): + """Determine the space reserved for reconstruction""" + if _drive_count <= 36: + if _extent_count <= 600: + return 0.40 + elif _extent_count <= 1400: + return 0.35 + elif _extent_count <= 6200: + return 0.20 + elif _extent_count <= 50000: + return 0.15 + elif _drive_count <= 64: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + elif _drive_count <= 480: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + + self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid) + + def get_ddp_reserved_drive_count(_disk_count): + """Determine the number of reserved drive.""" + reserve_count = 0 + + if self.reserve_drive_count: + reserve_count = self.reserve_drive_count + elif _disk_count >= 256: + reserve_count = 8 + elif _disk_count >= 192: + reserve_count = 7 + elif _disk_count >= 128: + reserve_count = 6 + elif _disk_count >= 64: + reserve_count = 4 + elif _disk_count >= 32: + reserve_count = 3 + elif _disk_count >= 12: + reserve_count = 2 + elif _disk_count == 11: + reserve_count = 1 + + return reserve_count + + if self.pool_detail: + drive_count = len(self.storage_pool_drives) + len(expansion_drive_list) + else: + drive_count = len(expansion_drive_list) + + drive_usable_capacity = min(min(self.get_available_drive_capacities()), + min(self.get_available_drive_capacities(expansion_drive_list))) + drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912) + maximum_stripe_count = (drive_count * drive_data_extents) / 10 + + error_percent = get_ddp_error_percent(drive_count, drive_data_extents) + error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10 + + total_stripe_count = maximum_stripe_count - error_overhead + stripe_count_per_drive = total_stripe_count / drive_count + reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive + available_stripe_count = total_stripe_count - reserved_stripe_count + + return available_stripe_count * 4294967296 + + @memoize + def get_candidate_drives(self): + """Retrieve set of drives candidates for creating a new storage pool.""" + + def get_candidate_drive_request(): + """Perform request for new volume creation.""" + candidates_list = list() + drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types + interface_types = [self.criteria_drive_interface_type] \ + if self.criteria_drive_interface_type else self.available_drive_interface_types + + for interface_type in interface_types: + for drive_type in drive_types: + candidates = None + volume_candidate_request_data = dict( + type="diskPool" if self.raid_level == "raidDiskPool" else "traditional", + diskPoolVolumeCandidateRequestData=dict( + reconstructionReservedDriveCount=65535)) + candidate_selection_type = dict( + candidateSelectionType="count", + driveRefList=dict(driveRef=self.available_drives)) + criteria = dict(raidLevel=self.raid_level, + phyDriveType=interface_type, + dssPreallocEnabled=False, + securityType="capable" if self.criteria_drive_require_fde else "none", + driveMediaType=drive_type, + onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False, + volumeCandidateRequestData=volume_candidate_request_data, + allocateReserveSpace=False, + securityLevel="fde" if self.criteria_drive_require_fde else "none", + candidateSelectionType=candidate_selection_type) + + try: + rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError" + "Response=true" % self.ssid, data=criteria, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + if candidates: + candidates_list.extend(candidates["volumeCandidate"]) + + # Sort output based on tray and then drawer protection first + tray_drawer_protection = list() + tray_protection = list() + drawer_protection = list() + no_protection = list() + sorted_candidates = list() + for item in candidates_list: + if item["trayLossProtection"]: + if item["drawerLossProtection"]: + tray_drawer_protection.append(item) + else: + tray_protection.append(item) + elif item["drawerLossProtection"]: + drawer_protection.append(item) + else: + no_protection.append(item) + + if tray_drawer_protection: + sorted_candidates.extend(tray_drawer_protection) + if tray_protection: + sorted_candidates.extend(tray_protection) + if drawer_protection: + sorted_candidates.extend(drawer_protection) + if no_protection: + sorted_candidates.extend(no_protection) + + return sorted_candidates + + # Determine the appropriate candidate list + for candidate in get_candidate_drive_request(): + + # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size + if self.criteria_drive_count: + if self.criteria_drive_count != int(candidate["driveCount"]): + continue + if self.criteria_min_usable_capacity: + if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity > + self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or + self.criteria_min_usable_capacity > int(candidate["usableSize"])): + continue + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])): + continue + + return candidate + + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) + + @memoize + def get_expansion_candidate_drives(self): + """Retrieve required expansion drive list. + + Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there + is a potential limitation on how many drives can be incorporated at a time. + * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools. + + :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint + """ + + def get_expansion_candidate_drive_request(): + """Perform the request for expanding existing volume groups or disk pools. + + Note: the list of candidate structures do not necessarily produce candidates that meet all criteria. + """ + candidates_list = None + url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid + + try: + rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"]) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + return candidates_list["candidates"] + + required_candidate_list = list() + required_additional_drives = 0 + required_additional_capacity = 0 + total_required_capacity = 0 + + # determine whether and how much expansion is need to satisfy the specified criteria + if self.criteria_min_usable_capacity: + total_required_capacity = self.criteria_min_usable_capacity + required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"]) + + if self.criteria_drive_count: + required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives) + + # Determine the appropriate expansion candidate list + if required_additional_drives > 0 or required_additional_capacity > 0: + for candidate in get_expansion_candidate_drive_request(): + + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])): + continue + + if self.raid_level == "raidDiskPool": + if (len(candidate["drives"]) >= required_additional_drives and + self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity): + required_candidate_list.append(candidate) + break + else: + required_additional_drives -= len(candidate["drives"]) + required_additional_capacity -= int(candidate["usableCapacity"]) + required_candidate_list.append(candidate) + + # Determine if required drives and capacities are satisfied + if required_additional_drives <= 0 and required_additional_capacity <= 0: + break + else: + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) + + return required_candidate_list + + def get_reserve_drive_count(self): + """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool).""" + + if not self.pool_detail: + self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid) + + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) + + return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"] + + def get_maximum_reserve_drive_count(self): + """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool).""" + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) + + drives_ids = list() + + if self.pool_detail: + drives_ids.extend(self.storage_pool_drives) + for candidate in self.get_expansion_candidate_drives(): + drives_ids.extend((candidate["drives"])) + else: + candidate = self.get_candidate_drives() + drives_ids.extend(candidate["driveRefList"]["driveRef"]) + + drive_count = len(drives_ids) + maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10) + if maximum_reserve_drive_count > 10: + maximum_reserve_drive_count = 10 + + return maximum_reserve_drive_count + + def set_reserve_drive_count(self, check_mode=False): + """Set the reserve drive count for raidDiskPool.""" + changed = False + + if self.raid_level == "raidDiskPool" and self.reserve_drive_count: + maximum_count = self.get_maximum_reserve_drive_count() + + if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count: + self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. " + "Note that it may be necessary to wait for expansion operations to complete " + "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]." + % (maximum_count, self.ssid)) + + if self.reserve_drive_count != self.get_reserve_drive_count(): + changed = True + + if not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid, + method="POST", data=dict(volumeGroupRef=self.pool_detail["id"], + newDriveCount=self.reserve_drive_count)) + except Exception as error: + self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]." + " Array [%s]." % (self.pool_detail["id"], self.ssid)) + + return changed + + def erase_all_available_secured_drives(self, check_mode=False): + """Erase all available drives that have encryption at rest feature enabled.""" + changed = False + drives_list = list() + for drive in self.drives: + if drive["available"] and drive["fdeEnabled"]: + changed = True + drives_list.append(drive["id"]) + + if drives_list and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=drives_list)) + except Exception as error: + self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid) + + return changed + + def create_storage_pool(self): + """Create new storage pool.""" + url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid + request_body = dict(label=self.name, + candidate=self.get_candidate_drives()) + + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid + + request_body.update( + dict(backgroundOperationPriority="useDefault", + criticalReconstructPriority="useDefault", + degradedReconstructPriority="useDefault", + poolUtilizationCriticalThreshold=65535, + poolUtilizationWarningThreshold=0)) + + if self.reserve_drive_count: + request_body.update(dict(volumeCandidateData=dict( + diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count)))) + + try: + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + # Update drive and storage pool information + self.pool_detail = self.storage_pool + + def delete_storage_pool(self): + """Delete storage pool.""" + storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]] + try: + delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else "" + rc, resp = self.request("storage-systems/%s/storage-pools/%s%s" + % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." + % (self.pool_detail["id"], self.ssid, to_native(error))) + + if storage_pool_drives and self.erase_secured_drives: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives)) + except Exception as error: + self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]." + " Error [%s]." % (self.ssid, to_native(error))) + + def secure_storage_pool(self, check_mode=False): + """Enable security on an existing storage pool""" + self.pool_detail = self.storage_pool + needs_secure_pool = False + + if not self.secure_pool and self.pool_detail["securityType"] == "enabled": + self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.") + if self.secure_pool and self.pool_detail["securityType"] != "enabled": + needs_secure_pool = True + + if needs_secure_pool and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), + data=dict(securePool=True), method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error" + " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_secure_pool + + def migrate_raid_level(self, check_mode=False): + """Request storage pool raid level migration.""" + needs_migration = self.raid_level != self.pool_detail["raidLevel"] + if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool": + self.module.fail_json(msg="Raid level cannot be changed for disk pools") + + if needs_migration and not check_mode: + sp_raid_migrate_req = dict(raidLevel=self.raid_level) + + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration" + % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]." + " Error[%s]." % (self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_migration + + def expand_storage_pool(self, check_mode=False): + """Add drives to existing storage pool. + + :return bool: whether drives were required to be added to satisfy the specified criteria.""" + expansion_candidate_list = self.get_expansion_candidate_drives() + changed_required = bool(expansion_candidate_list) + estimated_completion_time = 0.0 + + # build expandable groupings of traditional raid candidate + required_expansion_candidate_list = list() + while expansion_candidate_list: + subset = list() + while expansion_candidate_list and len(subset) < self.expandable_drive_count: + subset.extend(expansion_candidate_list.pop()["drives"]) + required_expansion_candidate_list.append(subset) + + if required_expansion_candidate_list and not check_mode: + url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid + + while required_expansion_candidate_list: + candidate_drives_list = required_expansion_candidate_list.pop() + request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"], + driveRef=candidate_drives_list) + try: + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200 and actions_resp: + actions = [action["currentAction"] for action in actions_resp + if action["volumeRef"] in self.storage_pool_volumes] + self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions" + " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]." + % (", ".join(actions), self.pool_detail["id"], self.ssid, + to_native(error))) + + self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]." + " Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + # Wait for expansion completion unless it is the last request in the candidate list + if required_expansion_candidate_list: + for dummy in range(self.EXPANSION_TIMEOUT_SEC): + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200: + for action in actions_resp: + if (action["volumeRef"] in self.storage_pool_volumes and + action["currentAction"] == "remappingDce"): + sleep(1) + estimated_completion_time = action["estimatedTimeToCompletion"] + break + else: + estimated_completion_time = 0.0 + break + + return changed_required, estimated_completion_time + + def apply(self): + """Apply requested state to storage array.""" + changed = False + + if self.state == "present": + if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None: + self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be" + " specified.") + if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count): + self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.") + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + + if self.state == "present" and self.erase_secured_drives: + self.erase_all_available_secured_drives(check_mode=True) + + # Determine whether changes need to be applied to the storage array + if self.pool_detail: + + if self.state == "absent": + changed = True + + elif self.state == "present": + + if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives): + self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]." + % (self.ssid, self.pool_detail["id"])) + + if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]: + self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type." + " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"])) + + if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da != + self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]): + self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]." + " Pool [%s]." % (self.ssid, self.pool_detail["id"])) + + # Evaluate current storage pool for required change. + needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True) + if needs_expansion: + changed = True + if self.migrate_raid_level(check_mode=True): + changed = True + if self.secure_storage_pool(check_mode=True): + changed = True + if self.set_reserve_drive_count(check_mode=True): + changed = True + + elif self.state == "present": + changed = True + + # Apply changes to storage array + msg = "No changes were required for the storage pool [%s]." + if changed and not self.module.check_mode: + if self.state == "present": + if self.erase_secured_drives: + self.erase_all_available_secured_drives() + + if self.pool_detail: + change_list = list() + + # Expansion needs to occur before raid level migration to account for any sizing needs. + expanded, estimated_completion_time = self.expand_storage_pool() + if expanded: + change_list.append("expanded") + if self.migrate_raid_level(): + change_list.append("raid migration") + if self.secure_storage_pool(): + change_list.append("secured") + if self.set_reserve_drive_count(): + change_list.append("adjusted reserve drive count") + + if change_list: + msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list) + + if expanded: + msg += "\nThe expansion operation will complete in an estimated %s minutes."\ + % estimated_completion_time + else: + self.create_storage_pool() + msg = "Storage pool [%s] was created." + + if self.secure_storage_pool(): + msg = "Storage pool [%s] was created and secured." + if self.set_reserve_drive_count(): + msg += " Adjusted reserve drive count." + + elif self.pool_detail: + self.delete_storage_pool() + msg = "Storage pool [%s] removed." + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + self.module.log(msg % self.name) + self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail) + + +def main(): + storage_pool = NetAppESeriesStoragePool() + storage_pool.apply() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/storage/netapp/netapp_e_syslog.py b/plugins/modules/storage/netapp/netapp_e_syslog.py new file mode 100644 index 0000000000..8be1de185e --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_syslog.py @@ -0,0 +1,280 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_syslog +short_description: NetApp E-Series manage syslog settings +description: + - Allow the syslog settings to be configured for an individual E-Series storage-system +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Add or remove the syslog server configuration for E-Series storage array. + - Existing syslog server configuration will be removed or updated when its address matches I(address). + - Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be + treated as a match. + choices: + - present + - absent + default: present + address: + description: + - The syslog server's IPv4 address or a fully qualified hostname. + - All existing syslog configurations will be removed when I(state=absent) and I(address=None). + port: + description: + - This is the port the syslog server is using. + default: 514 + protocol: + description: + - This is the transmission protocol the syslog server's using to receive syslog messages. + choices: + - udp + - tcp + - tls + default: udp + components: + description: + - The e-series logging components define the specific logs to transfer to the syslog server. + - At the time of writing, 'auditLog' is the only logging component but more may become available. + default: ["auditLog"] + test: + description: + - This forces a test syslog message to be sent to the stated syslog server. + - Only attempts transmission when I(state=present). + type: bool + default: no + log_path: + description: + - This argument specifies a local path for logging purposes. + required: no +notes: + - Check mode is supported. + - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with + SANtricity OS 11.40.2) and higher. +''' + +EXAMPLES = """ + - name: Add two syslog server configurations to NetApp E-Series storage array. + netapp_e_syslog: + state: present + address: "{{ item }}" + port: 514 + protocol: tcp + component: "auditLog" + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + loop: + - "192.168.1.1" + - "192.168.1.100" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +syslog: + description: + - True if syslog server configuration has been added to e-series storage array. + returned: on success + sample: True + type: bool +""" + +import json +import logging + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Syslog(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(choices=["present", "absent"], required=False, default="present"), + address=dict(type="str", required=False), + port=dict(type="int", default=514, required=False), + protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False), + components=dict(type="list", required=False, default=["auditLog"]), + test=dict(type="bool", default=False, required=False), + log_path=dict(type="str", required=False), + )) + + required_if = [ + ["state", "present", ["address", "port", "protocol", "components"]], + ] + + mutually_exclusive = [ + ["test", "absent"], + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, + mutually_exclusive=mutually_exclusive) + args = self.module.params + + self.syslog = args["state"] in ["present"] + self.address = args["address"] + self.port = args["port"] + self.protocol = args["protocol"] + self.components = args["components"] + self.test = args["test"] + self.ssid = args["ssid"] + self.url = args["api_url"] + self.creds = dict(url_password=args["api_password"], + validate_certs=args["validate_certs"], + url_username=args["api_username"], ) + + self.components.sort() + + self.check_mode = self.module.check_mode + + # logging setup + log_path = args["log_path"] + self._logger = logging.getLogger(self.__class__.__name__) + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + def get_configuration(self): + """Retrieve existing syslog configuration.""" + try: + (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid), + headers=HEADERS, **self.creds) + return result + except Exception as err: + self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def test_configuration(self, body): + """Send test syslog message to the storage array. + + Allows fix number of retries to occur before failure is issued to give the storage array time to create + new syslog server record. + """ + try: + (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}/test".format(self.ssid, body["id"]), + method='POST', headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="We failed to send test message! Array Id [{0}]. Error [{1}].".format(self.ssid, to_native(err))) + + def update_configuration(self): + """Post the syslog request to array.""" + config_match = None + perfect_match = None + update = False + body = dict() + + # search existing configuration for syslog server entry match + configs = self.get_configuration() + if self.address: + for config in configs: + if config["serverAddress"] == self.address: + config_match = config + if (config["port"] == self.port and config["protocol"] == self.protocol and + len(config["components"]) == len(self.components) and + all([component["type"] in self.components for component in config["components"]])): + perfect_match = config_match + break + + # generate body for the http request + if self.syslog: + if not perfect_match: + update = True + if config_match: + body.update(dict(id=config_match["id"])) + components = [dict(type=component_type) for component_type in self.components] + body.update(dict(serverAddress=self.address, port=self.port, + protocol=self.protocol, components=components)) + self._logger.info(body) + self.make_configuration_request(body) + + # remove specific syslog server configuration + elif self.address: + update = True + body.update(dict(id=config_match["id"])) + self._logger.info(body) + self.make_configuration_request(body) + + # if no address is specified, remove all syslog server configurations + elif configs: + update = True + for config in configs: + body.update(dict(id=config["id"])) + self._logger.info(body) + self.make_configuration_request(body) + + return update + + def make_configuration_request(self, body): + # make http request(s) + if not self.check_mode: + try: + if self.syslog: + if "id" in body: + (rc, result) = request( + self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]), + method='POST', data=json.dumps(body), headers=HEADERS, **self.creds) + else: + (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid), + method='POST', data=json.dumps(body), headers=HEADERS, **self.creds) + body.update(result) + + # send syslog test message + if self.test: + self.test_configuration(body) + + elif "id" in body: + (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]), + method='DELETE', headers=HEADERS, **self.creds) + + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update(self): + """Update configuration and respond to ansible.""" + update = self.update_configuration() + self.module.exit_json(msg="The syslog settings have been updated.", changed=update) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = Syslog() + settings() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/storage/netapp/netapp_e_volume.py b/plugins/modules/storage/netapp/netapp_e_volume.py new file mode 100644 index 0000000000..9a61563a22 --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_volume.py @@ -0,0 +1,845 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_volume +short_description: NetApp E-Series manage storage volumes (standard and thin) +description: + - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays. +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + state: + description: + - Whether the specified volume should exist + required: true + choices: ['present', 'absent'] + name: + description: + - The name of the volume to manage. + required: true + storage_pool_name: + description: + - Required only when requested I(state=='present'). + - Name of the storage pool wherein the volume should reside. + required: false + size_unit: + description: + - The unit used to interpret the size parameter + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + size: + description: + - Required only when I(state=='present'). + - Size of the volume in I(size_unit). + - Size of the virtual volume in the case of a thin volume in I(size_unit). + - Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may + exist. + required: true + segment_size_kb: + description: + - Segment size of the volume + - All values are in kibibytes. + - Some common choices include '8', '16', '32', '64', '128', '256', and '512' but options are system + dependent. + - Retrieve the definitive system list from M(netapp_e_facts) under segment_sizes. + - When the storage pool is a raidDiskPool then the segment size must be 128kb. + - Segment size migrations are not allowed in this module + default: '128' + thin_provision: + description: + - Whether the volume should be thin provisioned. + - Thin volumes can only be created when I(raid_level=="raidDiskPool"). + - Generally, use of thin-provisioning is not recommended due to performance impacts. + type: bool + default: false + thin_volume_repo_size: + description: + - This value (in size_unit) sets the allocated space for the thin provisioned repository. + - Initial value must between or equal to 4gb and 256gb in increments of 4gb. + - During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb. + - This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic"). + - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic). + required: false + thin_volume_max_repo_size: + description: + - This is the maximum amount the thin volume repository will be allowed to grow. + - Only has significance when I(thin_volume_expansion_policy=="automatic"). + - When the percentage I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds + I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute + the I(thin_volume_expansion_policy) policy. + - Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum + repository size. + default: same as size (in size_unit) + thin_volume_expansion_policy: + description: + - This is the thin volume expansion policy. + - When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the + I(thin_volume_max_repo_size) will be automatically expanded. + - When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the + storage system will wait for manual intervention. + - The thin volume_expansion policy can not be modified on existing thin volumes in this module. + - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic). + choices: ["automatic", "manual"] + default: "automatic" + thin_volume_growth_alert_threshold: + description: + - This is the thin provision repository utilization threshold (in percent). + - When the percentage of used storage of the maximum repository size exceeds this value then a alert will + be issued and the I(thin_volume_expansion_policy) will be executed. + - Values must be between or equal to 10 and 99. + default: 95 + owning_controller: + description: + - Specifies which controller will be the primary owner of the volume + - Not specifying will allow the controller to choose ownership. + required: false + choices: ["A", "B"] + ssd_cache_enabled: + description: + - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined) + - The default value is to ignore existing SSD cache setting. + type: bool + default: false + data_assurance_enabled: + description: + - Determines whether data assurance (DA) should be enabled for the volume + - Only available when creating a new volume and on a storage pool with drives supporting the DA capability. + type: bool + default: false + read_cache_enable: + description: + - Indicates whether read caching should be enabled for the volume. + type: bool + default: true + read_ahead_enable: + description: + - Indicates whether or not automatic cache read-ahead is enabled. + - This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot + benefit from read ahead caching. + type: bool + default: true + write_cache_enable: + description: + - Indicates whether write-back caching should be enabled for the volume. + type: bool + default: true + cache_without_batteries: + description: + - Indicates whether caching should be used without battery backup. + - Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost! + type: bool + default: false + workload_name: + description: + - Label for the workload defined by the metadata. + - When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage + array. + - When I(workload_name) exists on the storage array but the metadata is different then the workload + definition will be updated. (Changes will update all associated volumes!) + - Existing workloads can be retrieved using M(netapp_e_facts). + required: false + metadata: + description: + - Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily + defined for whatever the user deems useful) + - When I(workload_name) exists on the storage array but the metadata is different then the workload + definition will be updated. (Changes will update all associated volumes!) + - I(workload_name) must be specified when I(metadata) are defined. + type: dict + required: false + wait_for_initialization: + description: + - Forces the module to wait for expansion operations to complete before continuing. + type: bool + default: false + initialization_timeout: + description: + - Duration in seconds before the wait_for_initialization operation will terminate. + - M(wait_for_initialization==True) to have any effect on module's operations. + type: int + required: false +''' +EXAMPLES = """ +- name: Create simple volume with workload tags (volume meta data) + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume + storage_pool_name: storage_pool + size: 300 + size_unit: gb + workload_name: volume_tag + metadata: + key1: value1 + key2: value2 +- name: Create a thin volume + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume1 + storage_pool_name: storage_pool + size: 131072 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 1024 +- name: Expand thin volume's virtual size + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume1 + storage_pool_name: storage_pool + size: 262144 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 1024 +- name: Expand thin volume's maximum repository size + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume1 + storage_pool_name: storage_pool + size: 262144 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 2048 +- name: Delete volume + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: absent + name: volume +""" +RETURN = """ +msg: + description: State of volume + type: str + returned: always + sample: "Standard volume [workload_vol_1] has been created." +""" +from time import sleep +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesVolume(NetAppESeriesModule): + VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300 + + def __init__(self): + ansible_options = dict( + state=dict(required=True, choices=["present", "absent"]), + name=dict(required=True, type="str"), + storage_pool_name=dict(type="str"), + size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"], + type="str"), + size=dict(type="float"), + segment_size_kb=dict(type="int", default=128), + owning_controller=dict(required=False, choices=['A', 'B']), + ssd_cache_enabled=dict(type="bool", default=False), + data_assurance_enabled=dict(type="bool", default=False), + thin_provision=dict(type="bool", default=False), + thin_volume_repo_size=dict(type="int"), + thin_volume_max_repo_size=dict(type="float"), + thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"]), + thin_volume_growth_alert_threshold=dict(type="int", default=95), + read_cache_enable=dict(type="bool", default=True), + read_ahead_enable=dict(type="bool", default=True), + write_cache_enable=dict(type="bool", default=True), + cache_without_batteries=dict(type="bool", default=False), + workload_name=dict(type="str", required=False), + metadata=dict(type="dict", required=False), + wait_for_initialization=dict(type="bool", default=False), + initialization_timeout=dict(type="int", required=False)) + + required_if = [ + ["state", "present", ["storage_pool_name", "size"]], + ["thin_provision", "true", ["thin_volume_repo_size"]] + ] + + super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True, + required_if=required_if) + + args = self.module.params + self.state = args["state"] + self.name = args["name"] + self.storage_pool_name = args["storage_pool_name"] + self.size_unit = args["size_unit"] + self.segment_size_kb = args["segment_size_kb"] + if args["size"]: + self.size_b = self.convert_to_aligned_bytes(args["size"]) + + self.owning_controller_id = None + if args["owning_controller"]: + self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002" + + self.read_cache_enable = args["read_cache_enable"] + self.read_ahead_enable = args["read_ahead_enable"] + self.write_cache_enable = args["write_cache_enable"] + self.ssd_cache_enabled = args["ssd_cache_enabled"] + self.cache_without_batteries = args["cache_without_batteries"] + self.data_assurance_enabled = args["data_assurance_enabled"] + + self.thin_provision = args["thin_provision"] + self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"] + self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"]) + self.thin_volume_repo_size_b = None + self.thin_volume_max_repo_size_b = None + + if args["thin_volume_repo_size"]: + self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"]) + if args["thin_volume_max_repo_size"]: + self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"]) + + self.workload_name = args["workload_name"] + self.metadata = args["metadata"] + self.wait_for_initialization = args["wait_for_initialization"] + self.initialization_timeout = args["initialization_timeout"] + + # convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to + # each of the workload attributes dictionary entries + metadata = [] + if self.metadata: + if not self.workload_name: + self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified." + " Array [%s]." % self.ssid) + for key in self.metadata.keys(): + metadata.append(dict(key=key, value=self.metadata[key])) + self.metadata = metadata + + if self.thin_provision: + if not self.thin_volume_max_repo_size_b: + self.thin_volume_max_repo_size_b = self.size_b + + if not self.thin_volume_expansion_policy: + self.thin_volume_expansion_policy = "automatic" + + if self.size_b > 256 * 1024 ** 4: + self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size." + " Attempted size [%sg]" % (self.size_b * 1024 ** 3)) + + if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and + self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b): + self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum" + " repository size. Array [%s]." % self.ssid) + + if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99: + self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99." + "thin_volume_growth_alert_threshold [%s]. Array [%s]." + % (self.thin_volume_growth_alert_threshold, self.ssid)) + + self.volume_detail = None + self.pool_detail = None + self.workload_id = None + + def convert_to_aligned_bytes(self, size): + """Convert size to the truncated byte size that aligns on the segment size.""" + size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit]) + segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"]) + segment_count = int(size_bytes / segment_size_bytes) + return segment_count * segment_size_bytes + + def get_volume(self): + """Retrieve volume details from storage array.""" + volumes = list() + thin_volumes = list() + try: + rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + try: + rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + + volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name] + return volume_detail[0] if volume_detail else dict() + + def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5): + """Waits until volume becomes available. + + :raises AnsibleFailJson when retries are exhausted. + """ + if retries == 0: + self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]." + % (self.name, self.ssid)) + if not self.get_volume(): + sleep(5) + self.wait_for_volume_availability(retries=retries - 1) + + def wait_for_volume_action(self, timeout=None): + """Waits until volume action is complete is complete. + :param: int timeout: Wait duration measured in seconds. Waits indefinitely when None. + """ + action = "unknown" + percent_complete = None + while action != "complete": + sleep(5) + + try: + rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid) + + # Search long lived operations for volume + action = "complete" + for operation in operations["longLivedOpsProgress"]: + if operation["volAction"] is not None: + for key in operation.keys(): + if (operation[key] is not None and "volumeRef" in operation[key] and + (operation[key]["volumeRef"] == self.volume_detail["id"] or + ("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))): + action = operation["volAction"] + percent_complete = operation["init"]["percentComplete"] + except Exception as err: + self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(err))) + + if timeout is not None: + if timeout <= 0: + self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining" + " [%s]. Array Id [%s]." % (action, percent_complete, self.ssid)) + self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid)) + if timeout: + timeout -= 5 + + self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete)) + self.module.log("Expansion action is complete.") + + def get_storage_pool(self): + """Retrieve storage pool details from the storage array.""" + storage_pools = list() + try: + rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + + pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name] + return pool_detail[0] if pool_detail else dict() + + def check_storage_pool_sufficiency(self): + """Perform a series of checks as to the sufficiency of the storage pool for the volume.""" + if not self.pool_detail: + self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name) + + if not self.volume_detail: + if self.thin_provision and not self.pool_detail['diskPool']: + self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.') + + if (self.data_assurance_enabled and not + (self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and + self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")): + self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible." + " Array [%s]." % self.ssid) + + if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision: + self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs." + " Array [%s]." % self.ssid) + else: + # Check for expansion + if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and + not self.thin_provision): + self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs." + " Array [%s]." % self.ssid) + + def update_workload_tags(self, check_mode=False): + """Check the status of the workload tag and update storage array definitions if necessary. + + When the workload attributes are not provided but an existing workload tag name is, then the attributes will be + used. + + :return bool: Whether changes were required to be made.""" + change_required = False + workload_tags = None + request_body = None + ansible_profile_id = None + + if self.workload_name: + try: + rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid) + + # Generate common indexed Ansible workload tag + current_tag_index_list = [int(pair["value"].replace("ansible_workload_", "")) + for tag in workload_tags for pair in tag["workloadAttributes"] + if pair["key"] == "profileId" and "ansible_workload_" in pair["value"] and + str(pair["value"]).replace("ansible_workload_", "").isdigit()] + + tag_index = 1 + if current_tag_index_list: + tag_index = max(current_tag_index_list) + 1 + + ansible_profile_id = "ansible_workload_%d" % tag_index + request_body = dict(name=self.workload_name, + profileId=ansible_profile_id, + workloadInstanceIndex=None, + isValid=True) + + # evaluate and update storage array when needed + for tag in workload_tags: + if tag["name"] == self.workload_name: + self.workload_id = tag["id"] + + if not self.metadata: + break + + # Determine if core attributes (everything but profileId) is the same + metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata) + tag_set = set(tuple(sorted(attr.items())) + for attr in tag["workloadAttributes"] if attr["key"] != "profileId") + if metadata_set != tag_set: + self.module.log("Workload tag change is required!") + change_required = True + + # only perform the required action when check_mode==False + if change_required and not check_mode: + self.metadata.append(dict(key="profileId", value=ansible_profile_id)) + request_body.update(dict(isNewWorkloadInstance=False, + isWorkloadDataInitialized=True, + isWorkloadCardDataToBeReset=True, + workloadAttributes=self.metadata)) + try: + rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]), + data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]" + % (self.ssid, to_native(error))) + self.module.log("Workload tag [%s] required change." % self.workload_name) + break + + # existing workload tag not found so create new workload tag + else: + change_required = True + self.module.log("Workload tag creation is required!") + + if change_required and not check_mode: + if self.metadata: + self.metadata.append(dict(key="profileId", value=ansible_profile_id)) + else: + self.metadata = [dict(key="profileId", value=ansible_profile_id)] + + request_body.update(dict(isNewWorkloadInstance=True, + isWorkloadDataInitialized=False, + isWorkloadCardDataToBeReset=False, + workloadAttributes=self.metadata)) + try: + rc, resp = self.request("storage-systems/%s/workloads" % self.ssid, + method="POST", data=request_body) + self.workload_id = resp["id"] + except Exception as error: + self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]" + % (self.ssid, to_native(error))) + self.module.log("Workload tag [%s] was added." % self.workload_name) + + return change_required + + def get_volume_property_changes(self): + """Retrieve the volume update request body when change(s) are required. + + :raise AnsibleFailJson when attempting to change segment size on existing volume. + :return dict: request body when change(s) to a volume's properties are required. + """ + change = False + request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[], + cacheSettings=dict(readCacheEnable=self.read_cache_enable, + writeCacheEnable=self.write_cache_enable)) + + # check for invalid modifications + if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]): + self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified." + % self.volume_detail["segmentSize"]) + + # common thick/thin volume properties + if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or + self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or + self.ssd_cache_enabled != self.volume_detail["flashCached"]): + change = True + + # controller ownership + if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]: + change = True + request_body.update(dict(owningControllerId=self.owning_controller_id)) + + if self.workload_name: + request_body.update(dict(metaTags=[dict(key="workloadId", value=self.workload_id), + dict(key="volumeTypeId", value="volume")])) + if {"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"]: + change = True + elif self.volume_detail["metadata"]: + change = True + + # thick/thin volume specific properties + if self.thin_provision: + if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]): + change = True + request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold)) + if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]: + change = True + request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy)) + else: + if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0): + change = True + request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable)) + if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]: + change = True + request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries)) + + return request_body if change else dict() + + def get_expand_volume_changes(self): + """Expand the storage specifications for the existing thick/thin volume. + + :raise AnsibleFailJson when a thick/thin volume expansion request fails. + :return dict: dictionary containing all the necessary values for volume expansion request + """ + request_body = dict() + + if self.size_b < int(self.volume_detail["capacity"]): + self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]" + % (self.name, self.ssid)) + + if self.volume_detail["thinProvisioned"]: + if self.size_b > int(self.volume_detail["capacity"]): + request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b)) + self.module.log("Thin volume virtual size have been expanded.") + + if self.volume_detail["expansionPolicy"] == "automatic": + if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]): + request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b)) + self.module.log("Thin volume maximum repository size have been expanded (automatic policy).") + + elif self.volume_detail["expansionPolicy"] == "manual": + if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]): + change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"]) + if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0: + self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb" + " and 256gb in increments of 4gb. Attempted size [%sg]." + % (self.thin_volume_repo_size_b * 1024 ** 3)) + + request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b)) + self.module.log("Thin volume maximum repository size have been expanded (manual policy).") + + elif self.size_b > int(self.volume_detail["capacity"]): + request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b)) + self.module.log("Volume storage capacities have been expanded.") + + return request_body + + def create_volume(self): + """Create thick/thin volume according to the specified criteria.""" + body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes", + dataAssuranceEnabled=self.data_assurance_enabled) + + if self.thin_provision: + body.update(dict(virtualSize=self.size_b, + repositorySize=self.thin_volume_repo_size_b, + maximumRepositorySize=self.thin_volume_max_repo_size_b, + expansionPolicy=self.thin_volume_expansion_policy, + growthAlertThreshold=self.thin_volume_growth_alert_threshold)) + try: + rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + + self.module.log("New thin volume created [%s]." % self.name) + + else: + body.update(dict(size=self.size_b, segSize=self.segment_size_kb)) + try: + rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + + self.module.log("New volume created [%s]." % self.name) + + def update_volume_properties(self): + """Update existing thin-volume or volume properties. + + :raise AnsibleFailJson when either thick/thin volume update request fails. + :return bool: whether update was applied + """ + self.wait_for_volume_availability() + self.volume_detail = self.get_volume() + + request_body = self.get_volume_property_changes() + + if request_body: + if self.thin_provision: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s" + % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(error))) + else: + try: + rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]), + data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(error))) + return True + return False + + def expand_volume(self): + """Expand the storage specifications for the existing thick/thin volume. + + :raise AnsibleFailJson when a thick/thin volume expansion request fails. + """ + request_body = self.get_expand_volume_changes() + if request_body: + if self.volume_detail["thinProvisioned"]: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand" + % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST") + except Exception as err: + self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(err))) + self.module.log("Thin volume specifications have been expanded.") + + else: + try: + rc, resp = self.request( + "storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']), + data=request_body, method="POST") + except Exception as err: + self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(err))) + + self.module.log("Volume storage capacities have been expanded.") + + def delete_volume(self): + """Delete existing thin/thick volume.""" + if self.thin_provision: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + self.module.log("Thin volume deleted [%s]." % self.name) + else: + try: + rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + self.module.log("Volume deleted [%s]." % self.name) + + def apply(self): + """Determine and apply any changes necessary to satisfy the specified criteria. + + :raise AnsibleExitJson when completes successfully""" + change = False + msg = None + + self.volume_detail = self.get_volume() + self.pool_detail = self.get_storage_pool() + + # Determine whether changes need to be applied to existing workload tags + if self.state == 'present' and self.update_workload_tags(check_mode=True): + change = True + + # Determine if any changes need to be applied + if self.volume_detail: + if self.state == 'absent': + change = True + + elif self.state == 'present': + if self.get_expand_volume_changes() or self.get_volume_property_changes(): + change = True + + elif self.state == 'present': + if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or + self.thin_volume_repo_size_b > 256 * 1024 ** 3 or + self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0): + self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in" + " increments of 4gb. Attempted size [%sg]." + % (self.thin_volume_repo_size_b * 1024 ** 3)) + change = True + + self.module.log("Update required: [%s]." % change) + + # Apply any necessary changes + if change and not self.module.check_mode: + if self.state == 'present': + if self.update_workload_tags(): + msg = "Workload tag change occurred." + + if not self.volume_detail: + self.check_storage_pool_sufficiency() + self.create_volume() + self.update_volume_properties() + msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created." + else: + if self.update_volume_properties(): + msg = "Volume [%s] properties were updated." + + if self.get_expand_volume_changes(): + self.expand_volume() + msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded." + + if self.wait_for_initialization: + self.module.log("Waiting for volume operation to complete.") + self.wait_for_volume_action(timeout=self.initialization_timeout) + + elif self.state == 'absent': + self.delete_volume() + msg = "Volume [%s] has been deleted." + + else: + msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists." + + self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change) + + +def main(): + volume = NetAppESeriesVolume() + volume.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/netapp_e_volume_copy.py b/plugins/modules/storage/netapp/netapp_e_volume_copy.py new file mode 100644 index 0000000000..7c3043757a --- /dev/null +++ b/plugins/modules/storage/netapp/netapp_e_volume_copy.py @@ -0,0 +1,400 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: netapp_e_volume_copy +short_description: NetApp E-Series create volume copy pairs +description: + - Create and delete snapshots images on volume groups for NetApp E-series storage arrays. +author: Kevin Hulquest (@hulquest) +extends_documentation_fragment: +- netapp.ontap.netapp.eseries + +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API, for example C(https://prod-1.wahoo.acme.com/devmgr/v2). + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + source_volume_id: + description: + - The id of the volume copy source. + - If used, must be paired with destination_volume_id + - Mutually exclusive with volume_copy_pair_id, and search_volume_id + destination_volume_id: + description: + - The id of the volume copy destination. + - If used, must be paired with source_volume_id + - Mutually exclusive with volume_copy_pair_id, and search_volume_id + volume_copy_pair_id: + description: + - The id of a given volume copy pair + - Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id + - Can use to delete or check presence of volume pairs + - Must specify this or (destination_volume_id and source_volume_id) + state: + description: + - Whether the specified volume copy pair should exist or not. + required: True + choices: ['present', 'absent'] + create_copy_pair_if_does_not_exist: + description: + - Defines if a copy pair will be created if it does not exist. + - If set to True destination_volume_id and source_volume_id are required. + type: bool + default: True + start_stop_copy: + description: + - starts a re-copy or stops a copy in progress + - "Note: If you stop the initial file copy before it it done the copy pair will be destroyed" + - Requires volume_copy_pair_id + search_volume_id: + description: + - Searches for all valid potential target and source volumes that could be used in a copy_pair + - Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id +''' +RESULTS = """ +""" +EXAMPLES = """ +--- +msg: + description: Success message + returned: success + type: str + sample: Json facts for the volume copy that was created. +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: Created Volume Copy Pair with ID +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid'] + url = params['api_url'] + get_status + + (rc, resp) = request(url, method='GET', url_username=params['api_username'], + url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + + volume_copy_pair_id = None + for potential_copy_pair in resp: + if potential_copy_pair['sourceVolume'] == params['source_volume_id']: + if potential_copy_pair['sourceVolume'] == params['source_volume_id']: + volume_copy_pair_id = potential_copy_pair['id'] + + return volume_copy_pair_id + + +def create_copy_pair(params): + get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid'] + url = params['api_url'] + get_status + + rData = { + "sourceId": params['source_volume_id'], + "targetId": params['destination_volume_id'] + } + + (rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 200: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def delete_copy_pair_by_copy_pair_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (rc, resp) = request(url, ignore_errors=True, method='DELETE', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 204: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def find_volume_copy_pair_id_by_volume_copy_pair_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (rc, resp) = request(url, ignore_errors=True, method='DELETE', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 200: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def start_stop_copy(params): + get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % ( + params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy']) + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='POST', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + return True, response_data[0]['percentComplete'] + else: + return False, response_data + + +def check_copy_status(params): + get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='GET', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + if response_data['percentComplete'] != -1: + + return True, response_data['percentComplete'] + else: + return False, response_data['percentComplete'] + else: + return False, response_data + + +def find_valid_copy_pair_targets_and_sources(params): + get_status = 'storage-systems/%s/volumes' % params['ssid'] + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='GET', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + source_capacity = None + candidates = [] + for volume in response_data: + if volume['id'] == params['search_volume_id']: + source_capacity = volume['capacity'] + else: + candidates.append(volume) + + potential_sources = [] + potential_targets = [] + + for volume in candidates: + if volume['capacity'] > source_capacity: + if volume['volumeCopyTarget'] is False: + if volume['volumeCopySource'] is False: + potential_targets.append(volume['id']) + else: + if volume['volumeCopyTarget'] is False: + if volume['volumeCopySource'] is False: + potential_sources.append(volume['id']) + + return potential_targets, potential_sources + + else: + raise Exception("Response [%s]" % response_code) + + +def main(): + module = AnsibleModule(argument_spec=dict( + source_volume_id=dict(type='str'), + destination_volume_id=dict(type='str'), + copy_priority=dict(required=False, default=0, type='int'), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + validate_certs=dict(required=False, default=True), + targetWriteProtected=dict(required=False, default=True, type='bool'), + onlineCopy=dict(required=False, default=False, type='bool'), + volume_copy_pair_id=dict(type='str'), + status=dict(required=True, choices=['present', 'absent'], type='str'), + create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'), + start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'), + search_volume_id=dict(type='str'), + ), + mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'], + ['volume_copy_pair_id', 'source_volume_id'], + ['volume_copy_pair_id', 'search_volume_id'], + ['search_volume_id', 'destination_volume_id'], + ['search_volume_id', 'source_volume_id'], + ], + required_together=[['source_volume_id', 'destination_volume_id'], + ], + required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ], + ["start_stop_copy", 'stop', ['volume_copy_pair_id'], ], + ["start_stop_copy", 'start', ['volume_copy_pair_id'], ], + ] + + ) + params = module.params + + if not params['api_url'].endswith('/'): + params['api_url'] += '/' + + # Check if we want to search + if params['search_volume_id'] is not None: + try: + potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params) + except Exception as e: + module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % to_native(e)) + + module.exit_json(changed=False, + msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)), + search_volume_id=params['search_volume_id'], + valid_targets=potential_targets, + valid_sources=potential_sources) + + # Check if we want to start or stop a copy operation + if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop': + + # Get the current status info + currenty_running, status_info = check_copy_status(params) + + # If we want to start + if params['start_stop_copy'] == 'start': + + # If we have already started + if currenty_running is True: + module.exit_json(changed=False, msg='Volume Copy Pair copy has started.', + volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info) + # If we need to start + else: + + start_status, info = start_stop_copy(params) + + if start_status is True: + module.exit_json(changed=True, msg='Volume Copy Pair copy has started.', + volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info) + else: + module.fail_json(msg="Could not start volume copy pair Error: %s" % info) + + # If we want to stop + else: + # If it has already stopped + if currenty_running is False: + module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.', + volume_copy_pair_id=params['volume_copy_pair_id']) + + # If we need to stop it + else: + start_status, info = start_stop_copy(params) + + if start_status is True: + module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.', + volume_copy_pair_id=params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not stop volume copy pair Error: %s" % info) + + # If we want the copy pair to exist we do this stuff + if params['status'] == 'present': + + # We need to check if it exists first + if params['volume_copy_pair_id'] is None: + params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id( + params) + + # If no volume copy pair is found we need need to make it. + if params['volume_copy_pair_id'] is None: + + # In order to create we can not do so with just a volume_copy_pair_id + + copy_began_status, (rc, resp) = create_copy_pair(params) + + if copy_began_status is True: + module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id']) + else: + module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp)) + + # If it does exist we do nothing + else: + # We verify that it exists + exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id( + params) + + if exist_status: + module.exit_json(changed=False, + msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id']) + else: + if exist_status_code == 404: + module.fail_json( + msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' % + params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % ( + exist_status_code, exist_status_data)) + + module.fail_json(msg="Done") + + # If we want it to not exist we do this + else: + + if params['volume_copy_pair_id'] is None: + params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id( + params) + + # We delete it by the volume_copy_pair_id + delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params) + + if delete_status is True: + module.exit_json(changed=True, + msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id']) + else: + if delete_status_code == 404: + module.exit_json(changed=False, + msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % ( + delete_status_code, delete_status_data)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/sf_account_manager.py b/plugins/modules/storage/netapp/sf_account_manager.py new file mode 100644 index 0000000000..139bdfe0cd --- /dev/null +++ b/plugins/modules/storage/netapp/sf_account_manager.py @@ -0,0 +1,268 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: sf_account_manager +deprecated: + removed_in: "2.11" + why: This Module has been replaced + alternative: please use M(na_elementsw_account) +short_description: Manage SolidFire accounts +extends_documentation_fragment: +- netapp.ontap.netapp.solidfire + +author: Sumit Kumar (@timuster) +description: +- Create, destroy, or update accounts on SolidFire + +options: + + state: + description: + - Whether the specified account should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - Unique username for this account. (May be 1 to 64 characters in length). + required: true + + new_name: + description: + - New name for the user account. + + initiator_secret: + description: + - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable. + - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret. + - If not specified, a random secret is created. + + target_secret: + description: + - CHAP secret to use for the target (mutual CHAP authentication). + - Should be 12-16 characters long and impenetrable. + - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret. + - If not specified, a random secret is created. + + attributes: + description: List of Name/Value pairs in JSON object format. + + account_id: + description: + - The ID of the account to manage or update. + + status: + description: + - Status of the account. + +''' + +EXAMPLES = """ +- name: Create Account + sf_account_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + name: TenantA + +- name: Modify Account + sf_account_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + name: TenantA + new_name: TenantA-Renamed + +- name: Delete Account + sf_account_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: absent + name: TenantA-Renamed +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class SolidFireAccount(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + account_id=dict(required=False, type='int', default=None), + + new_name=dict(required=False, type='str', default=None), + initiator_secret=dict(required=False, type='str'), + target_secret=dict(required=False, type='str'), + attributes=dict(required=False, type='dict'), + status=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.account_id = p['account_id'] + + self.new_name = p['new_name'] + self.initiator_secret = p['initiator_secret'] + self.target_secret = p['target_secret'] + self.attributes = p['attributes'] + self.status = p['status'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def get_account(self): + """ + Return account object if found + + :return: Details about the account. None if not found. + :rtype: dict + """ + account_list = self.sfe.list_accounts() + + for account in account_list.accounts: + if account.username == self.name: + # Update self.account_id: + if self.account_id is not None: + if account.account_id == self.account_id: + return account + else: + self.account_id = account.account_id + return account + return None + + def create_account(self): + try: + self.sfe.add_account(username=self.name, + initiator_secret=self.initiator_secret, + target_secret=self.target_secret, + attributes=self.attributes) + except Exception as e: + self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def delete_account(self): + try: + self.sfe.remove_account(account_id=self.account_id) + + except Exception as e: + self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)), + exception=traceback.format_exc()) + + def update_account(self): + try: + self.sfe.modify_account(account_id=self.account_id, + username=self.new_name, + status=self.status, + initiator_secret=self.initiator_secret, + target_secret=self.target_secret, + attributes=self.attributes) + + except Exception as e: + self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + changed = False + account_exists = False + update_account = False + account_detail = self.get_account() + + if account_detail: + account_exists = True + + if self.state == 'absent': + changed = True + + elif self.state == 'present': + # Check if we need to update the account + + if account_detail.username is not None and self.new_name is not None and \ + account_detail.username != self.new_name: + update_account = True + changed = True + + elif account_detail.status is not None and self.status is not None \ + and account_detail.status != self.status: + update_account = True + changed = True + + elif account_detail.initiator_secret is not None and self.initiator_secret is not None \ + and account_detail.initiator_secret != self.initiator_secret: + update_account = True + changed = True + + elif account_detail.target_secret is not None and self.target_secret is not None \ + and account_detail.target_secret != self.target_secret: + update_account = True + changed = True + + elif account_detail.attributes is not None and self.attributes is not None \ + and account_detail.attributes != self.attributes: + update_account = True + changed = True + else: + if self.state == 'present': + changed = True + + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not account_exists: + self.create_account() + elif update_account: + self.update_account() + + elif self.state == 'absent': + self.delete_account() + + self.module.exit_json(changed=changed) + + +def main(): + v = SolidFireAccount() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/sf_check_connections.py b/plugins/modules/storage/netapp/sf_check_connections.py new file mode 100644 index 0000000000..1e6641d741 --- /dev/null +++ b/plugins/modules/storage/netapp/sf_check_connections.py @@ -0,0 +1,184 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: sf_check_connections +deprecated: + removed_in: "2.11" + why: This Module has been replaced + alternative: please use M(na_elementsw_check_connections) +short_description: Check connectivity to MVIP and SVIP. +extends_documentation_fragment: +- netapp.ontap.netapp.solidfire + +author: Sumit Kumar (@timuster) +description: +- Used to test the management connection to the cluster. +- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity. + +options: + + skip: + description: + - Skip checking connection to SVIP or MVIP. + choices: ['svip', 'mvip'] + + mvip: + description: + - Optionally, use to test connection of a different MVIP. + - This is not needed to test the connection to the target cluster. + + svip: + description: + - Optionally, use to test connection of a different SVIP. + - This is not needed to test the connection to the target cluster. + +''' + + +EXAMPLES = """ + - name: Check connections to MVIP and SVIP + sf_check_connections: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class SolidFireConnection(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']), + mvip=dict(required=False, type='str', default=None), + svip=dict(required=False, type='str', default=None) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.skip = p['skip'] + self.mvip = p['mvip'] + self.svip = p['svip'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.ElementFactory.create(p['hostname'], p['username'], p['password'], port=442) + + def check_mvip_connection(self): + """ + Check connection to MVIP + + :return: true if connection was successful, false otherwise. + :rtype: bool + """ + try: + test = self.sfe.test_connect_mvip(mvip=self.mvip) + result = test.details.connected + # Todo - Log details about the test + return result + + except Exception as e: + self.module.fail_json(msg='Error checking connection to MVIP: %s' % to_native(e), exception=traceback.format_exc()) + return False + + def check_svip_connection(self): + """ + Check connection to SVIP + + :return: true if connection was successful, false otherwise. + :rtype: bool + """ + try: + test = self.sfe.test_connect_svip(svip=self.svip) + result = test.details.connected + # Todo - Log details about the test + return result + + except Exception as e: + self.module.fail_json(msg='Error checking connection to SVIP: %s' % to_native(e), exception=traceback.format_exc()) + return False + + def check(self): + + failed = True + msg = '' + + if self.skip is None: + mvip_connection_established = self.check_mvip_connection() + svip_connection_established = self.check_svip_connection() + + # Set failed and msg + if not mvip_connection_established: + failed = True + msg = 'Connection to MVIP failed.' + elif not svip_connection_established: + failed = True + msg = 'Connection to SVIP failed.' + else: + failed = False + + elif self.skip == 'mvip': + svip_connection_established = self.check_svip_connection() + + # Set failed and msg + if not svip_connection_established: + failed = True + msg = 'Connection to SVIP failed.' + else: + failed = False + + elif self.skip == 'svip': + mvip_connection_established = self.check_mvip_connection() + + # Set failed and msg + if not mvip_connection_established: + failed = True + msg = 'Connection to MVIP failed.' + else: + failed = False + + if failed: + self.module.fail_json(msg=msg) + else: + self.module.exit_json() + + +def main(): + v = SolidFireConnection() + v.check() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py b/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py new file mode 100644 index 0000000000..b9fe50263d --- /dev/null +++ b/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: sf_snapshot_schedule_manager +deprecated: + removed_in: "2.11" + why: This Module has been replaced + alternative: please use M(na_elementsw_snapshot_schedule) +short_description: Manage SolidFire snapshot schedules +extends_documentation_fragment: +- netapp.ontap.netapp.solidfire + +author: Sumit Kumar (@timuster) +description: +- Create, destroy, or update accounts on SolidFire + +options: + + state: + description: + - Whether the specified schedule should exist or not. + required: true + choices: ['present', 'absent'] + + paused: + description: + - Pause / Resume a schedule. + required: false + + recurring: + description: + - Should the schedule recur? + required: false + + time_interval_days: + description: Time interval in days. + required: false + default: 1 + + time_interval_hours: + description: Time interval in hours. + required: false + default: 0 + + time_interval_minutes: + description: Time interval in minutes. + required: false + default: 0 + + name: + description: + - Name for the snapshot schedule. + required: true + + snapshot_name: + description: + - Name for the created snapshots. + required: false + + volumes: + description: + - Volume IDs that you want to set the snapshot schedule for. + - At least 1 volume ID is required for creating a new schedule. + - required when C(state=present) + required: false + + retention: + description: + - Retention period for the snapshot. + - Format is 'HH:mm:ss'. + required: false + + schedule_id: + description: + - The schedule ID for the schedule that you want to update or delete. + required: false + + starting_date: + description: + - Starting date for the schedule. + - Required when C(state=present). + - Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description. + - "Format: C(2016--12--01T00:00:00Z)" + required: false +''' + +EXAMPLES = """ + - name: Create Snapshot schedule + sf_snapshot_schedule_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + name: Schedule_A + time_interval_days: 1 + starting_date: 2016--12--01T00:00:00Z + volumes: 7 + + - name: Update Snapshot schedule + sf_snapshot_schedule_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + schedule_id: 6 + recurring: True + snapshot_name: AnsibleSnapshots + + - name: Delete Snapshot schedule + sf_snapshot_schedule_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: absent + schedule_id: 6 +""" + +RETURN = """ + +schedule_id: + description: Schedule ID of the newly created schedule + returned: success + type: str +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class SolidFireSnapShotSchedule(object): + + def __init__(self): + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + + time_interval_days=dict(required=False, type='int', default=1), + time_interval_hours=dict(required=False, type='int', default=0), + time_interval_minutes=dict(required=False, type='int', default=0), + + paused=dict(required=False, type='bool'), + recurring=dict(required=False, type='bool'), + + starting_date=dict(type='str'), + + snapshot_name=dict(required=False, type='str'), + volumes=dict(required=False, type='list'), + retention=dict(required=False, type='str'), + + schedule_id=dict(type='int'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['starting_date', 'volumes']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + + # self.interval = p['interval'] + + self.time_interval_days = p['time_interval_days'] + self.time_interval_hours = p['time_interval_hours'] + self.time_interval_minutes = p['time_interval_minutes'] + + self.paused = p['paused'] + self.recurring = p['recurring'] + + self.starting_date = p['starting_date'] + if self.starting_date is not None: + self.starting_date = self.starting_date.replace("--", "-") + + self.snapshot_name = p['snapshot_name'] + self.volumes = p['volumes'] + self.retention = p['retention'] + + self.schedule_id = p['schedule_id'] + + self.create_schedule_result = None + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def get_schedule(self): + schedule_list = self.sfe.list_schedules() + for schedule in schedule_list.schedules: + if schedule.name == self.name: + # Update self.schedule_id: + if self.schedule_id is not None: + if schedule.schedule_id == self.schedule_id: + return schedule + else: + self.schedule_id = schedule.schedule_id + return schedule + + return None + + def create_schedule(self): + + try: + sched = netapp_utils.Schedule() + # if self.interval == 'time_interval': + sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days, + hours=self.time_interval_hours, + minutes=self.time_interval_minutes) + + # Create schedule + sched.name = self.name + sched.schedule_info = netapp_utils.ScheduleInfo( + volume_ids=self.volumes, + snapshot_name=self.snapshot_name, + retention=self.retention + ) + sched.paused = self.paused + sched.recurring = self.recurring + sched.starting_date = self.starting_date + + self.create_schedule_result = self.sfe.create_schedule(schedule=sched) + + except Exception as e: + self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def delete_schedule(self): + + try: + get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id) + sched = get_schedule_result.schedule + sched.to_be_deleted = True + self.sfe.modify_schedule(schedule=sched) + + except Exception as e: + self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def update_schedule(self): + + try: + get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id) + sched = get_schedule_result.schedule + + # Update schedule properties + + # if self.interval == 'time_interval': + temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days, + hours=self.time_interval_hours, + minutes=self.time_interval_minutes) + + if sched.frequency.days != temp_frequency.days or \ + sched.frequency.hours != temp_frequency.hours \ + or sched.frequency.minutes != temp_frequency.minutes: + sched.frequency = temp_frequency + + sched.name = self.name + if self.volumes is not None: + sched.schedule_info.volume_ids = self.volumes + if self.retention is not None: + sched.schedule_info.retention = self.retention + if self.snapshot_name is not None: + sched.schedule_info.snapshot_name = self.snapshot_name + if self.paused is not None: + sched.paused = self.paused + if self.recurring is not None: + sched.recurring = self.recurring + if self.starting_date is not None: + sched.starting_date = self.starting_date + + # Make API call + self.sfe.modify_schedule(schedule=sched) + + except Exception as e: + self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + changed = False + schedule_exists = False + update_schedule = False + schedule_detail = self.get_schedule() + + if schedule_detail: + schedule_exists = True + + if self.state == 'absent': + changed = True + + elif self.state == 'present': + # Check if we need to update the account + + if self.retention is not None and schedule_detail.schedule_info.retention != self.retention: + update_schedule = True + changed = True + + elif schedule_detail.name != self.name: + update_schedule = True + changed = True + + elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name: + update_schedule = True + changed = True + + elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes: + update_schedule = True + changed = True + + elif self.paused is not None and schedule_detail.paused != self.paused: + update_schedule = True + changed = True + + elif self.recurring is not None and schedule_detail.recurring != self.recurring: + update_schedule = True + changed = True + + elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date: + update_schedule = True + changed = True + + elif self.time_interval_minutes is not None or self.time_interval_hours is not None \ + or self.time_interval_days is not None: + + temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days, + hours=self.time_interval_hours, + minutes=self.time_interval_minutes) + + if schedule_detail.frequency.days != temp_frequency.days or \ + schedule_detail.frequency.hours != temp_frequency.hours \ + or schedule_detail.frequency.minutes != temp_frequency.minutes: + update_schedule = True + changed = True + + else: + if self.state == 'present': + changed = True + + if changed: + if self.module.check_mode: + # Skip changes + pass + else: + if self.state == 'present': + if not schedule_exists: + self.create_schedule() + elif update_schedule: + self.update_schedule() + + elif self.state == 'absent': + self.delete_schedule() + + if self.create_schedule_result is not None: + self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id) + else: + self.module.exit_json(changed=changed) + + +def main(): + v = SolidFireSnapShotSchedule() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/sf_volume_access_group_manager.py b/plugins/modules/storage/netapp/sf_volume_access_group_manager.py new file mode 100644 index 0000000000..8ce3f2d34f --- /dev/null +++ b/plugins/modules/storage/netapp/sf_volume_access_group_manager.py @@ -0,0 +1,249 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: sf_volume_access_group_manager +deprecated: + removed_in: "2.11" + why: This Module has been replaced + alternative: please use M(na_elementsw_access_group) +short_description: Manage SolidFire Volume Access Groups +extends_documentation_fragment: +- netapp.ontap.netapp.solidfire + +author: Sumit Kumar (@timuster) +description: +- Create, destroy, or update volume access groups on SolidFire + +options: + + state: + description: + - Whether the specified volume access group should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - Name of the volume access group. It is not required to be unique, but recommended. + required: true + + initiators: + description: + - List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators. + + volumes: + description: + - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes. + + virtual_network_id: + description: + - The ID of the SolidFire Virtual Network ID to associate the volume access group with. + + virtual_network_tags: + description: + - The ID of the VLAN Virtual Network Tag to associate the volume access group with. + + attributes: + description: List of Name/Value pairs in JSON object format. + + volume_access_group_id: + description: + - The ID of the volume access group to modify or delete. + +''' + +EXAMPLES = """ + - name: Create Volume Access Group + sf_volume_access_group_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + name: AnsibleVolumeAccessGroup + volumes: [7,8] + + - name: Modify Volume Access Group + sf_volume_access_group_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + volume_access_group_id: 1 + name: AnsibleVolumeAccessGroup-Renamed + attributes: {"volumes": [1,2,3], "virtual_network_id": 12345} + + - name: Delete Volume Access Group + sf_volume_access_group_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: absent + volume_access_group_id: 1 +""" + +RETURN = """ + + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class SolidFireVolumeAccessGroup(object): + + def __init__(self): + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + volume_access_group_id=dict(required=False, type='int', default=None), + + initiators=dict(required=False, type='list', default=None), + volumes=dict(required=False, type='list', default=None), + virtual_network_id=dict(required=False, type='list', default=None), + virtual_network_tags=dict(required=False, type='list', default=None), + attributes=dict(required=False, type='dict', default=None), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.volume_access_group_id = p['volume_access_group_id'] + + self.initiators = p['initiators'] + self.volumes = p['volumes'] + self.virtual_network_id = p['virtual_network_id'] + self.virtual_network_tags = p['virtual_network_tags'] + self.attributes = p['attributes'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def get_volume_access_group(self): + access_groups_list = self.sfe.list_volume_access_groups() + + for group in access_groups_list.volume_access_groups: + if group.name == self.name: + # Update self.volume_access_group_id: + if self.volume_access_group_id is not None: + if group.volume_access_group_id == self.volume_access_group_id: + return group + else: + self.volume_access_group_id = group.volume_access_group_id + return group + return None + + def create_volume_access_group(self): + try: + self.sfe.create_volume_access_group(name=self.name, + initiators=self.initiators, + volumes=self.volumes, + virtual_network_id=self.virtual_network_id, + virtual_network_tags=self.virtual_network_tags, + attributes=self.attributes) + except Exception as e: + self.module.fail_json(msg="Error creating volume access group %s: %s" % + (self.name, to_native(e)), exception=traceback.format_exc()) + + def delete_volume_access_group(self): + try: + self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id) + + except Exception as e: + self.module.fail_json(msg="Error deleting volume access group %s: %s" % + (self.volume_access_group_id, to_native(e)), + exception=traceback.format_exc()) + + def update_volume_access_group(self): + try: + self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id, + virtual_network_id=self.virtual_network_id, + virtual_network_tags=self.virtual_network_tags, + name=self.name, + initiators=self.initiators, + volumes=self.volumes, + attributes=self.attributes) + except Exception as e: + self.module.fail_json(msg="Error updating volume access group %s: %s" % + (self.volume_access_group_id, to_native(e)), exception=traceback.format_exc()) + + def apply(self): + changed = False + group_exists = False + update_group = False + group_detail = self.get_volume_access_group() + + if group_detail: + group_exists = True + + if self.state == 'absent': + changed = True + + elif self.state == 'present': + # Check if we need to update the group + if self.volumes is not None and group_detail.volumes != self.volumes: + update_group = True + changed = True + elif self.initiators is not None and group_detail.initiators != self.initiators: + update_group = True + changed = True + elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \ + self.attributes is not None: + update_group = True + changed = True + + else: + if self.state == 'present': + changed = True + + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + if not group_exists: + self.create_volume_access_group() + elif update_group: + self.update_volume_access_group() + + elif self.state == 'absent': + self.delete_volume_access_group() + + self.module.exit_json(changed=changed) + + +def main(): + v = SolidFireVolumeAccessGroup() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/netapp/sf_volume_manager.py b/plugins/modules/storage/netapp/sf_volume_manager.py new file mode 100644 index 0000000000..4ebf9fa0d5 --- /dev/null +++ b/plugins/modules/storage/netapp/sf_volume_manager.py @@ -0,0 +1,320 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: sf_volume_manager +deprecated: + removed_in: "2.11" + why: This Module has been replaced + alternative: please use M(na_elementsw_volume) +short_description: Manage SolidFire volumes +extends_documentation_fragment: +- netapp.ontap.netapp.solidfire + +author: Sumit Kumar (@timuster) +description: +- Create, destroy, or update volumes on SolidFire + +options: + + state: + description: + - Whether the specified volume should exist or not. + required: true + choices: ['present', 'absent'] + + name: + description: + - The name of the volume to manage. + required: true + + account_id: + description: + - Account ID for the owner of this volume. + required: true + + 512emulation: + description: + - Should the volume provide 512-byte sector emulation? + - Required when C(state=present) + + qos: + description: Initial quality of service settings for this volume. Configure as dict in playbooks. + + attributes: + description: A YAML dictionary of attributes that you would like to apply on this volume. + + volume_id: + description: + - The ID of the volume to manage or update. + - In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id) + parameter with an arbitrary value. However, the specified volume_id will not be assigned to the newly created + volume (since it's an auto-generated property). + + size: + description: + - The size of the volume in (size_unit). + - Required when C(state = present). + + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + + access: + description: + - "Access allowed for the volume." + - "readOnly: Only read operations are allowed." + - "readWrite: Reads and writes are allowed." + - "locked: No reads or writes are allowed." + - "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked." + - "If unspecified, the access settings of the clone will be the same as the source." + choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget'] + +''' + +EXAMPLES = """ + - name: Create Volume + sf_volume_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + name: AnsibleVol + qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000} + account_id: 3 + enable512e: False + size: 1 + size_unit: gb + + - name: Update Volume + sf_volume_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: present + name: AnsibleVol + account_id: 3 + access: readWrite + + - name: Delete Volume + sf_volume_manager: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" + state: absent + name: AnsibleVol + account_id: 2 +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class SolidFireVolume(object): + + def __init__(self): + + self._size_unit_map = netapp_utils.SF_BYTE_MAP + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + account_id=dict(required=True, type='int'), + + enable512e=dict(type='bool', aliases=['512emulation']), + qos=dict(required=False, type='dict', default=None), + attributes=dict(required=False, type='dict', default=None), + + volume_id=dict(type='int', default=None), + size=dict(type='int'), + size_unit=dict(default='gb', + choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + + access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite', + 'locked', 'replicationTarget']), + + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['size', 'enable512e']) + ], + supports_check_mode=True + ) + + p = self.module.params + + # set up state variables + self.state = p['state'] + self.name = p['name'] + self.account_id = p['account_id'] + self.enable512e = p['enable512e'] + self.qos = p['qos'] + self.attributes = p['attributes'] + + self.volume_id = p['volume_id'] + self.size_unit = p['size_unit'] + if p['size'] is not None: + self.size = p['size'] * self._size_unit_map[self.size_unit] + else: + self.size = None + self.access = p['access'] + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def get_volume(self): + """ + Return volume object if found + + :return: Details about the volume. None if not found. + :rtype: dict + """ + volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id) + for volume in volume_list.volumes: + if volume.name == self.name: + # Update self.volume_id + if self.volume_id is not None: + if volume.volume_id == self.volume_id and str(volume.delete_time) == "": + return volume + else: + if str(volume.delete_time) == "": + self.volume_id = volume.volume_id + return volume + return None + + def create_volume(self): + try: + self.sfe.create_volume(name=self.name, + account_id=self.account_id, + total_size=self.size, + enable512e=self.enable512e, + qos=self.qos, + attributes=self.attributes) + + except Exception as err: + self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size), + exception=to_native(err)) + + def delete_volume(self): + try: + self.sfe.delete_volume(volume_id=self.volume_id) + + except Exception as err: + self.module.fail_json(msg="Error deleting volume %s" % self.volume_id, + exception=to_native(err)) + + def update_volume(self): + try: + self.sfe.modify_volume(self.volume_id, + account_id=self.account_id, + access=self.access, + qos=self.qos, + total_size=self.size, + attributes=self.attributes) + + except Exception as err: + self.module.fail_json(msg="Error updating volume %s" % self.name, + exception=to_native(err)) + + def apply(self): + changed = False + volume_exists = False + update_volume = False + volume_detail = self.get_volume() + + if volume_detail: + volume_exists = True + + if self.state == 'absent': + # Checking for state change(s) here, and applying it later in the code allows us to support + # check_mode + changed = True + + elif self.state == 'present': + if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access: + update_volume = True + changed = True + + elif volume_detail.account_id is not None and self.account_id is not None \ + and volume_detail.account_id != self.account_id: + update_volume = True + changed = True + + elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos: + update_volume = True + changed = True + + elif volume_detail.total_size is not None and volume_detail.total_size != self.size: + size_difference = abs(float(volume_detail.total_size - self.size)) + # Change size only if difference is bigger than 0.001 + if size_difference / self.size > 0.001: + update_volume = True + changed = True + + elif volume_detail.attributes is not None and self.attributes is not None and \ + volume_detail.attributes != self.attributes: + update_volume = True + changed = True + else: + if self.state == 'present': + changed = True + + result_message = "" + + if changed: + if self.module.check_mode: + result_message = "Check mode, skipping changes" + else: + if self.state == 'present': + if not volume_exists: + self.create_volume() + result_message = "Volume created" + elif update_volume: + self.update_volume() + result_message = "Volume updated" + + elif self.state == 'absent': + self.delete_volume() + result_message = "Volume deleted" + + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + v = SolidFireVolume() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/purestorage/purefa_facts.py b/plugins/modules/storage/purestorage/purefa_facts.py new file mode 100644 index 0000000000..90eea6bec7 --- /dev/null +++ b/plugins/modules/storage/purestorage/purefa_facts.py @@ -0,0 +1,862 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Simon Dodsley (simon@purestorage.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: purefa_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favor of C(_info) module. + alternative: Use M(purefa_info) instead. +short_description: Collect facts from Pure Storage FlashArray +description: + - Collect facts information from a Pure Storage Flasharray running the + Purity//FA operating system. By default, the module will collect basic + fact information including hosts, host groups, protection + groups and volume counts. Additional fact information can be collected + based on the configured set of arguments. +author: + - Pure Storage ansible Team (@sdodsley) +options: + gather_subset: + description: + - When supplied, this argument will define the facts to be collected. + Possible values for this include all, minimum, config, performance, + capacity, network, subnet, interfaces, hgroups, pgroups, hosts, + admins, volumes, snapshots, pods, vgroups, offload, apps and arrays. + type: list + required: false + default: minimum +extends_documentation_fragment: +- community.general.purestorage.fa + +''' + +EXAMPLES = r''' +- name: collect default set of facts + purefa_facts: + fa_url: 10.10.10.2 + api_token: e31060a7-21fc-e277-6240-25983c6c4592 + +- name: collect configuration and capacity facts + purefa_facts: + gather_subset: + - config + - capacity + fa_url: 10.10.10.2 + api_token: e31060a7-21fc-e277-6240-25983c6c4592 + +- name: collect all facts + purefa_facts: + gather_subset: + - all + fa_url: 10.10.10.2 + api_token: e31060a7-21fc-e277-6240-25983c6c4592 +''' + +RETURN = r''' +ansible_facts: + description: Returns the facts collected from the FlashArray + returned: always + type: complex + sample: { + "capacity": {}, + "config": { + "directory_service": { + "array_admin_group": null, + "base_dn": null, + "bind_password": null, + "bind_user": null, + "check_peer": false, + "enabled": false, + "group_base": null, + "readonly_group": null, + "storage_admin_group": null, + "uri": [] + }, + "dns": { + "domain": "domain.com", + "nameservers": [ + "8.8.8.8", + "8.8.4.4" + ] + }, + "ntp": [ + "0.ntp.pool.org", + "1.ntp.pool.org", + "2.ntp.pool.org", + "3.ntp.pool.org" + ], + "smtp": [ + { + "enabled": true, + "name": "alerts@acme.com" + }, + { + "enabled": true, + "name": "user@acme.com" + } + ], + "snmp": [ + { + "auth_passphrase": null, + "auth_protocol": null, + "community": null, + "host": "localhost", + "name": "localhost", + "privacy_passphrase": null, + "privacy_protocol": null, + "user": null, + "version": "v2c" + } + ], + "ssl_certs": { + "country": null, + "email": null, + "issued_by": "", + "issued_to": "", + "key_size": 2048, + "locality": null, + "organization": "Acme Storage, Inc.", + "organizational_unit": "Acme Storage, Inc.", + "state": null, + "status": "self-signed", + "valid_from": "2017-08-11T23:09:06Z", + "valid_to": "2027-08-09T23:09:06Z" + }, + "syslog": [] + }, + "default": { + "array_name": "flasharray1", + "connected_arrays": 1, + "hostgroups": 0, + "hosts": 10, + "pods": 3, + "protection_groups": 1, + "purity_version": "5.0.4", + "snapshots": 1, + "volume_groups": 2 + }, + "hgroups": {}, + "hosts": { + "host1": { + "hgroup": null, + "iqn": [ + "iqn.1994-05.com.redhat:2f6f5715a533" + ], + "wwn": [] + }, + "host2": { + "hgroup": null, + "iqn": [ + "iqn.1994-05.com.redhat:d17fb13fe0b" + ], + "wwn": [] + }, + "host3": { + "hgroup": null, + "iqn": [ + "iqn.1994-05.com.redhat:97b1351bfb2" + ], + "wwn": [] + }, + "host4": { + "hgroup": null, + "iqn": [ + "iqn.1994-05.com.redhat:dd84e9a7b2cb" + ], + "wwn": [ + "10000000C96C48D1", + "10000000C96C48D2" + ] + } + }, + "interfaces": { + "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", + "CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", + "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", + "CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682" + }, + "network": { + "ct0.eth0": { + "address": "10.10.10.10", + "gateway": "10.10.10.1", + "hwaddr": "ec:f4:bb:c8:8a:04", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "management" + ], + "speed": 1000000000 + }, + "ct0.eth2": { + "address": "10.10.10.11", + "gateway": null, + "hwaddr": "ec:f4:bb:c8:8a:00", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "replication" + ], + "speed": 10000000000 + }, + "ct0.eth3": { + "address": "10.10.10.12", + "gateway": null, + "hwaddr": "ec:f4:bb:c8:8a:02", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "replication" + ], + "speed": 10000000000 + }, + "ct0.eth4": { + "address": "10.10.10.13", + "gateway": null, + "hwaddr": "90:e2:ba:83:79:0c", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "iscsi" + ], + "speed": 10000000000 + }, + "ct0.eth5": { + "address": "10.10.10.14", + "gateway": null, + "hwaddr": "90:e2:ba:83:79:0d", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "iscsi" + ], + "speed": 10000000000 + }, + "vir0": { + "address": "10.10.10.20", + "gateway": "10.10.10.1", + "hwaddr": "fe:ba:e9:e7:6b:0f", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "management" + ], + "speed": 1000000000 + } + }, + "offload": { + "nfstarget": { + "address": "10.0.2.53", + "mount_options": null, + "mount_point": "/offload", + "protocol": "nfs", + "status": "scanning" + } + }, + "performance": { + "input_per_sec": 8191, + "output_per_sec": 0, + "queue_depth": 1, + "reads_per_sec": 0, + "san_usec_per_write_op": 15, + "usec_per_read_op": 0, + "usec_per_write_op": 642, + "writes_per_sec": 2 + }, + "pgroups": { + "consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": { + "hgroups": null, + "hosts": null, + "source": "host1", + "targets": null, + "volumes": [ + "volume-1" + ] + } + }, + "pods": { + "srm-pod": { + "arrays": [ + { + "array_id": "52595f7e-b460-4b46-8851-a5defd2ac192", + "mediator_status": "online", + "name": "sn1-405-c09-37", + "status": "online" + }, + { + "array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca", + "mediator_status": "online", + "name": "sn1-420-c11-31", + "status": "online" + } + ], + "source": null + } + }, + "snapshots": { + "consisgroup.cgsnapshot": { + "created": "2018-03-28T09:34:02Z", + "size": 13958643712, + "source": "volume-1" + } + }, + "subnet": {}, + "vgroups": { + "vvol--vSphere-HA-0ffc7dd1-vg": { + "volumes": [ + "vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6" + ] + } + }, + "volumes": { + "ansible_data": { + "bandwidth": null, + "hosts": [ + [ + "host1", + 1 + ] + ], + "serial": "43BE47C12334399B000114A6", + "size": 1099511627776, + "source": null + } + } + } +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec + + +ADMIN_API_VERSION = '1.14' +S3_REQUIRED_API_VERSION = '1.16' +LATENCY_REQUIRED_API_VERSION = '1.16' +AC_REQUIRED_API_VERSION = '1.14' +CAP_REQUIRED_API_VERSION = '1.6' +SAN_REQUIRED_API_VERSION = '1.10' +NVME_API_VERSION = '1.16' +PREFERRED_API_VERSION = '1.15' +CONN_STATUS_API_VERSION = '1.17' + + +def generate_default_dict(array): + default_facts = {} + defaults = array.get() + api_version = array._list_available_rest_versions() + if AC_REQUIRED_API_VERSION in api_version: + default_facts['volume_groups'] = len(array.list_vgroups()) + default_facts['connected_arrays'] = len(array.list_array_connections()) + default_facts['pods'] = len(array.list_pods()) + default_facts['connection_key'] = array.get(connection_key=True)['connection_key'] + hosts = array.list_hosts() + admins = array.list_admins() + snaps = array.list_volumes(snap=True, pending=True) + pgroups = array.list_pgroups(pending=True) + hgroups = array.list_hgroups() + # Old FA arrays only report model from the primary controller + ct0_model = array.get_hardware('CT0')['model'] + if ct0_model: + model = ct0_model + else: + ct1_model = array.get_hardware('CT1')['model'] + model = ct1_model + default_facts['array_model'] = model + default_facts['array_name'] = defaults['array_name'] + default_facts['purity_version'] = defaults['version'] + default_facts['hosts'] = len(hosts) + default_facts['snapshots'] = len(snaps) + default_facts['protection_groups'] = len(pgroups) + default_facts['hostgroups'] = len(hgroups) + default_facts['admins'] = len(admins) + return default_facts + + +def generate_perf_dict(array): + perf_facts = {} + api_version = array._list_available_rest_versions() + if LATENCY_REQUIRED_API_VERSION in api_version: + latency_info = array.get(action='monitor', latency=True)[0] + perf_info = array.get(action='monitor')[0] + # IOPS + perf_facts['writes_per_sec'] = perf_info['writes_per_sec'] + perf_facts['reads_per_sec'] = perf_info['reads_per_sec'] + + # Bandwidth + perf_facts['input_per_sec'] = perf_info['input_per_sec'] + perf_facts['output_per_sec'] = perf_info['output_per_sec'] + + # Latency + if LATENCY_REQUIRED_API_VERSION in api_version: + perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op'] + perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op'] + perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op'] + perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op'] + perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op'] + perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op'] + perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op'] + perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op'] + perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op'] + perf_facts['queue_depth'] = perf_info['queue_depth'] + return perf_facts + + +def generate_config_dict(array): + config_facts = {} + api_version = array._list_available_rest_versions() + # DNS + config_facts['dns'] = array.get_dns() + # SMTP + config_facts['smtp'] = array.list_alert_recipients() + # SNMP + config_facts['snmp'] = array.list_snmp_managers() + config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id'] + # DS + config_facts['directory_service'] = array.get_directory_service() + if S3_REQUIRED_API_VERSION in api_version: + config_facts['directory_service_roles'] = {} + roles = array.list_directory_service_roles() + for role in range(0, len(roles)): + role_name = roles[role]['name'] + config_facts['directory_service_roles'][role_name] = { + 'group': roles[role]['group'], + 'group_base': roles[role]['group_base'], + } + else: + config_facts['directory_service'].update(array.get_directory_service(groups=True)) + # NTP + config_facts['ntp'] = array.get(ntpserver=True)['ntpserver'] + # SYSLOG + config_facts['syslog'] = array.get(syslogserver=True)['syslogserver'] + # Phonehome + config_facts['phonehome'] = array.get(phonehome=True)['phonehome'] + # Proxy + config_facts['proxy'] = array.get(proxy=True)['proxy'] + # Relay Host + config_facts['relayhost'] = array.get(relayhost=True)['relayhost'] + # Sender Domain + config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain'] + # SYSLOG + config_facts['syslog'] = array.get(syslogserver=True)['syslogserver'] + # Idle Timeout + config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout'] + # SCSI Timeout + config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout'] + # SSL + config_facts['ssl_certs'] = array.get_certificate() + # Global Admin settings + if S3_REQUIRED_API_VERSION in api_version: + config_facts['global_admin'] = array.get_global_admin_attributes() + return config_facts + + +def generate_admin_dict(array): + api_version = array._list_available_rest_versions() + admin_facts = {} + if ADMIN_API_VERSION in api_version: + admins = array.list_admins() + for admin in range(0, len(admins)): + admin_name = admins[admin]['name'] + admin_facts[admin_name] = { + 'type': admins[admin]['type'], + 'role': admins[admin]['role'], + } + return admin_facts + + +def generate_subnet_dict(array): + sub_facts = {} + subnets = array.list_subnets() + for sub in range(0, len(subnets)): + sub_name = subnets[sub]['name'] + if subnets[sub]['enabled']: + sub_facts[sub_name] = { + 'gateway': subnets[sub]['gateway'], + 'mtu': subnets[sub]['mtu'], + 'vlan': subnets[sub]['vlan'], + 'prefix': subnets[sub]['prefix'], + 'interfaces': subnets[sub]['interfaces'], + 'services': subnets[sub]['services'], + } + return sub_facts + + +def generate_network_dict(array): + net_facts = {} + ports = array.list_network_interfaces() + for port in range(0, len(ports)): + int_name = ports[port]['name'] + net_facts[int_name] = { + 'hwaddr': ports[port]['hwaddr'], + 'mtu': ports[port]['mtu'], + 'enabled': ports[port]['enabled'], + 'speed': ports[port]['speed'], + 'address': ports[port]['address'], + 'slaves': ports[port]['slaves'], + 'services': ports[port]['services'], + 'gateway': ports[port]['gateway'], + 'netmask': ports[port]['netmask'], + } + if ports[port]['subnet']: + subnets = array.get_subnet(ports[port]['subnet']) + if subnets['enabled']: + net_facts[int_name]['subnet'] = { + 'name': subnets['name'], + 'prefix': subnets['prefix'], + 'vlan': subnets['vlan'], + } + return net_facts + + +def generate_capacity_dict(array): + capacity_facts = {} + api_version = array._list_available_rest_versions() + if CAP_REQUIRED_API_VERSION in api_version: + volumes = array.list_volumes(pending=True) + capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes) + capacity = array.get(space=True) + total_capacity = capacity[0]['capacity'] + used_space = capacity[0]["total"] + capacity_facts['free_space'] = total_capacity - used_space + capacity_facts['total_capacity'] = total_capacity + capacity_facts['data_reduction'] = capacity[0]['data_reduction'] + capacity_facts['system_space'] = capacity[0]['system'] + capacity_facts['volume_space'] = capacity[0]['volumes'] + capacity_facts['shared_space'] = capacity[0]['shared_space'] + capacity_facts['snapshot_space'] = capacity[0]['snapshots'] + capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning'] + capacity_facts['total_reduction'] = capacity[0]['total_reduction'] + + return capacity_facts + + +def generate_snap_dict(array): + snap_facts = {} + snaps = array.list_volumes(snap=True) + for snap in range(0, len(snaps)): + snapshot = snaps[snap]['name'] + snap_facts[snapshot] = { + 'size': snaps[snap]['size'], + 'source': snaps[snap]['source'], + 'created': snaps[snap]['created'], + } + return snap_facts + + +def generate_vol_dict(array): + volume_facts = {} + vols = array.list_volumes() + for vol in range(0, len(vols)): + volume = vols[vol]['name'] + volume_facts[volume] = { + 'source': vols[vol]['source'], + 'size': vols[vol]['size'], + 'serial': vols[vol]['serial'], + 'hosts': [], + 'bandwidth': "" + } + api_version = array._list_available_rest_versions() + if AC_REQUIRED_API_VERSION in api_version: + qvols = array.list_volumes(qos=True) + for qvol in range(0, len(qvols)): + volume = qvols[qvol]['name'] + qos = qvols[qvol]['bandwidth_limit'] + volume_facts[volume]['bandwidth'] = qos + vvols = array.list_volumes(protocol_endpoint=True) + for vvol in range(0, len(vvols)): + volume = vvols[vvol]['name'] + volume_facts[volume] = { + 'source': vvols[vvol]['source'], + 'serial': vvols[vvol]['serial'], + 'hosts': [] + } + cvols = array.list_volumes(connect=True) + for cvol in range(0, len(cvols)): + volume = cvols[cvol]['name'] + voldict = [cvols[cvol]['host'], cvols[cvol]['lun']] + volume_facts[volume]['hosts'].append(voldict) + return volume_facts + + +def generate_host_dict(array): + api_version = array._list_available_rest_versions() + host_facts = {} + hosts = array.list_hosts() + for host in range(0, len(hosts)): + hostname = hosts[host]['name'] + tports = [] + host_all_info = array.get_host(hostname, all=True) + if host_all_info: + tports = host_all_info[0]['target_port'] + host_facts[hostname] = { + 'hgroup': hosts[host]['hgroup'], + 'iqn': hosts[host]['iqn'], + 'wwn': hosts[host]['wwn'], + 'personality': array.get_host(hostname, + personality=True)['personality'], + 'target_port': tports + } + if NVME_API_VERSION in api_version: + host_facts[hostname]['nqn'] = hosts[host]['nqn'] + if PREFERRED_API_VERSION in api_version: + hosts = array.list_hosts(preferred_array=True) + for host in range(0, len(hosts)): + hostname = hosts[host]['name'] + host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array'] + return host_facts + + +def generate_pgroups_dict(array): + pgroups_facts = {} + pgroups = array.list_pgroups() + for pgroup in range(0, len(pgroups)): + protgroup = pgroups[pgroup]['name'] + pgroups_facts[protgroup] = { + 'hgroups': pgroups[pgroup]['hgroups'], + 'hosts': pgroups[pgroup]['hosts'], + 'source': pgroups[pgroup]['source'], + 'targets': pgroups[pgroup]['targets'], + 'volumes': pgroups[pgroup]['volumes'], + } + prot_sched = array.get_pgroup(protgroup, schedule=True) + prot_reten = array.get_pgroup(protgroup, retention=True) + if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']: + pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency'] + pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency'] + pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled'] + pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled'] + pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at'] + pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at'] + pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout'] + pgroups_facts[protgroup]['per_day'] = prot_reten['per_day'] + pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day'] + pgroups_facts[protgroup]['target_days'] = prot_reten['target_days'] + pgroups_facts[protgroup]['days'] = prot_reten['days'] + pgroups_facts[protgroup]['all_for'] = prot_reten['all_for'] + pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for'] + if ":" in protgroup: + snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True) + pgroups_facts[protgroup]['snaps'] = {} + for snap_transfer in range(0, len(snap_transfers)): + snap = snap_transfers[snap_transfer]['name'] + pgroups_facts[protgroup]['snaps'][snap] = { + 'created': snap_transfers[snap_transfer]['created'], + 'started': snap_transfers[snap_transfer]['started'], + 'completed': snap_transfers[snap_transfer]['completed'], + 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'], + 'data_transferred': snap_transfers[snap_transfer]['data_transferred'], + 'progress': snap_transfers[snap_transfer]['progress'], + } + return pgroups_facts + + +def generate_pods_dict(array): + pods_facts = {} + api_version = array._list_available_rest_versions() + if AC_REQUIRED_API_VERSION in api_version: + pods = array.list_pods() + for pod in range(0, len(pods)): + acpod = pods[pod]['name'] + pods_facts[acpod] = { + 'source': pods[pod]['source'], + 'arrays': pods[pod]['arrays'], + } + return pods_facts + + +def generate_conn_array_dict(array): + conn_array_facts = {} + api_version = array._list_available_rest_versions() + if CONN_STATUS_API_VERSION in api_version: + carrays = array.list_connected_arrays() + for carray in range(0, len(carrays)): + arrayname = carrays[carray]['array_name'] + conn_array_facts[arrayname] = { + 'array_id': carrays[carray]['id'], + 'throtled': carrays[carray]['throtled'], + 'version': carrays[carray]['version'], + 'type': carrays[carray]['type'], + 'mgmt_ip': carrays[carray]['management_address'], + 'repl_ip': carrays[carray]['replication_address'], + } + if CONN_STATUS_API_VERSION in api_version: + conn_array_facts[arrayname]['status'] = carrays[carray]['status'] + return conn_array_facts + + +def generate_apps_dict(array): + apps_facts = {} + api_version = array._list_available_rest_versions() + if SAN_REQUIRED_API_VERSION in api_version: + apps = array.list_apps() + for app in range(0, len(apps)): + appname = apps[app]['name'] + apps_facts[appname] = { + 'version': apps[app]['version'], + 'status': apps[app]['status'], + 'description': apps[app]['description'], + } + return apps_facts + + +def generate_vgroups_dict(array): + vgroups_facts = {} + api_version = array._list_available_rest_versions() + if AC_REQUIRED_API_VERSION in api_version: + vgroups = array.list_vgroups() + for vgroup in range(0, len(vgroups)): + virtgroup = vgroups[vgroup]['name'] + vgroups_facts[virtgroup] = { + 'volumes': vgroups[vgroup]['volumes'], + } + return vgroups_facts + + +def generate_nfs_offload_dict(array): + offload_facts = {} + api_version = array._list_available_rest_versions() + if AC_REQUIRED_API_VERSION in api_version: + offload = array.list_nfs_offload() + for target in range(0, len(offload)): + offloadt = offload[target]['name'] + offload_facts[offloadt] = { + 'status': offload[target]['status'], + 'mount_point': offload[target]['mount_point'], + 'protocol': offload[target]['protocol'], + 'mount_options': offload[target]['mount_options'], + 'address': offload[target]['address'], + } + return offload_facts + + +def generate_s3_offload_dict(array): + offload_facts = {} + api_version = array._list_available_rest_versions() + if S3_REQUIRED_API_VERSION in api_version: + offload = array.list_s3_offload() + for target in range(0, len(offload)): + offloadt = offload[target]['name'] + offload_facts[offloadt] = { + 'status': offload[target]['status'], + 'bucket': offload[target]['bucket'], + 'protocol': offload[target]['protocol'], + 'access_key_id': offload[target]['access_key_id'], + } + return offload_facts + + +def generate_hgroups_dict(array): + hgroups_facts = {} + hgroups = array.list_hgroups() + for hgroup in range(0, len(hgroups)): + hostgroup = hgroups[hgroup]['name'] + hgroups_facts[hostgroup] = { + 'hosts': hgroups[hgroup]['hosts'], + 'pgs': [], + 'vols': [], + } + pghgroups = array.list_hgroups(protect=True) + for pghg in range(0, len(pghgroups)): + pgname = pghgroups[pghg]['name'] + hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group']) + volhgroups = array.list_hgroups(connect=True) + for pgvol in range(0, len(volhgroups)): + pgname = volhgroups[pgvol]['name'] + volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']] + hgroups_facts[pgname]['vols'].append(volpgdict) + return hgroups_facts + + +def generate_interfaces_dict(array): + api_version = array._list_available_rest_versions() + int_facts = {} + ports = array.list_ports() + for port in range(0, len(ports)): + int_name = ports[port]['name'] + if ports[port]['wwn']: + int_facts[int_name] = ports[port]['wwn'] + if ports[port]['iqn']: + int_facts[int_name] = ports[port]['iqn'] + if NVME_API_VERSION in api_version: + if ports[port]['nqn']: + int_facts[int_name] = ports[port]['nqn'] + return int_facts + + +def main(): + argument_spec = purefa_argument_spec() + argument_spec.update(dict( + gather_subset=dict(default='minimum', type='list',) + )) + + module = AnsibleModule(argument_spec, supports_check_mode=False) + + array = get_system(module) + + subset = [test.lower() for test in module.params['gather_subset']] + valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity', + 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups', + 'hosts', 'admins', 'volumes', 'snapshots', 'pods', + 'vgroups', 'offload', 'apps', 'arrays') + subset_test = (test in valid_subsets for test in subset) + if not all(subset_test): + module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s" + % (",".join(valid_subsets), ",".join(subset))) + + facts = {} + + if 'minimum' in subset or 'all' in subset: + facts['default'] = generate_default_dict(array) + if 'performance' in subset or 'all' in subset: + facts['performance'] = generate_perf_dict(array) + if 'config' in subset or 'all' in subset: + facts['config'] = generate_config_dict(array) + if 'capacity' in subset or 'all' in subset: + facts['capacity'] = generate_capacity_dict(array) + if 'network' in subset or 'all' in subset: + facts['network'] = generate_network_dict(array) + if 'subnet' in subset or 'all' in subset: + facts['subnet'] = generate_subnet_dict(array) + if 'interfaces' in subset or 'all' in subset: + facts['interfaces'] = generate_interfaces_dict(array) + if 'hosts' in subset or 'all' in subset: + facts['hosts'] = generate_host_dict(array) + if 'volumes' in subset or 'all' in subset: + facts['volumes'] = generate_vol_dict(array) + if 'snapshots' in subset or 'all' in subset: + facts['snapshots'] = generate_snap_dict(array) + if 'hgroups' in subset or 'all' in subset: + facts['hgroups'] = generate_hgroups_dict(array) + if 'pgroups' in subset or 'all' in subset: + facts['pgroups'] = generate_pgroups_dict(array) + if 'pods' in subset or 'all' in subset: + facts['pods'] = generate_pods_dict(array) + if 'admins' in subset or 'all' in subset: + facts['admins'] = generate_admin_dict(array) + if 'vgroups' in subset or 'all' in subset: + facts['vgroups'] = generate_vgroups_dict(array) + if 'offload' in subset or 'all' in subset: + facts['nfs_offload'] = generate_nfs_offload_dict(array) + facts['s3_offload'] = generate_s3_offload_dict(array) + if 'apps' in subset or 'all' in subset: + facts['apps'] = generate_apps_dict(array) + if 'arrays' in subset or 'all' in subset: + facts['arrays'] = generate_conn_array_dict(array) + + module.exit_json(ansible_facts={'ansible_purefa_facts': facts}) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/purestorage/purefb_facts.py b/plugins/modules/storage/purestorage/purefb_facts.py new file mode 100644 index 0000000000..2648eced38 --- /dev/null +++ b/plugins/modules/storage/purestorage/purefb_facts.py @@ -0,0 +1,656 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Simon Dodsley (simon@purestorage.com) +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: purefb_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favor of C(_info) module. + alternative: Use M(purefb_info) instead. +short_description: Collect facts from Pure Storage FlashBlade +description: + - Collect facts information from a Pure Storage FlashBlade running the + Purity//FB operating system. By default, the module will collect basic + fact information including hosts, host groups, protection + groups and volume counts. Additional fact information can be collected + based on the configured set of arguments. +author: + - Pure Storage Ansible Team (@sdodsley) +options: + gather_subset: + description: + - When supplied, this argument will define the facts to be collected. + Possible values for this include all, minimum, config, performance, + capacity, network, subnets, lags, filesystems and snapshots. + required: false + type: list + default: minimum +extends_documentation_fragment: +- community.general.purestorage.fb + +''' + +EXAMPLES = r''' +- name: collect default set of facts + purefb_facts: + fb_url: 10.10.10.2 + api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641 + +- name: collect configuration and capacity facts + purefb_facts: + gather_subset: + - config + - capacity + fb_url: 10.10.10.2 + api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641 + +- name: collect all facts + purefb_facts: + gather_subset: + - all + fb_url: 10.10.10.2 + api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641 +''' + +RETURN = r''' +ansible_facts: + description: Returns the facts collected from the FlashBlade + returned: always + type: complex + sample: { + "capacity": { + "aggregate": { + "data_reduction": 1.1179228, + "snapshots": 0, + "total_physical": 17519748439, + "unique": 17519748439, + "virtual": 19585726464 + }, + "file-system": { + "data_reduction": 1.3642412, + "snapshots": 0, + "total_physical": 4748219708, + "unique": 4748219708, + "virtual": 6477716992 + }, + "object-store": { + "data_reduction": 1.0263462, + "snapshots": 0, + "total_physical": 12771528731, + "unique": 12771528731, + "virtual": 6477716992 + }, + "total": 83359896948925 + }, + "config": { + "alert_watchers": { + "enabled": true, + "name": "notify@acmestorage.com" + }, + "array_management": { + "base_dn": null, + "bind_password": null, + "bind_user": null, + "enabled": false, + "name": "management", + "services": [ + "management" + ], + "uris": [] + }, + "directory_service_roles": { + "array_admin": { + "group": null, + "group_base": null + }, + "ops_admin": { + "group": null, + "group_base": null + }, + "readonly": { + "group": null, + "group_base": null + }, + "storage_admin": { + "group": null, + "group_base": null + } + }, + "dns": { + "domain": "demo.acmestorage.com", + "name": "demo-fb-1", + "nameservers": [ + "8.8.8.8" + ], + "search": [ + "demo.acmestorage.com" + ] + }, + "nfs_directory_service": { + "base_dn": null, + "bind_password": null, + "bind_user": null, + "enabled": false, + "name": "nfs", + "services": [ + "nfs" + ], + "uris": [] + }, + "ntp": [ + "0.ntp.pool.org" + ], + "smb_directory_service": { + "base_dn": null, + "bind_password": null, + "bind_user": null, + "enabled": false, + "name": "smb", + "services": [ + "smb" + ], + "uris": [] + }, + "smtp": { + "name": "demo-fb-1", + "relay_host": null, + "sender_domain": "acmestorage.com" + }, + "ssl_certs": { + "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----", + "common_name": "Acme Storage", + "country": "US", + "email": null, + "intermediate_certificate": null, + "issued_by": "Acme Storage", + "issued_to": "Acme Storage", + "key_size": 4096, + "locality": null, + "name": "global", + "organization": "Acme Storage", + "organizational_unit": "Acme Storage", + "passphrase": null, + "private_key": null, + "state": null, + "status": "self-signed", + "valid_from": "1508433967000", + "valid_to": "2458833967000" + } + }, + "default": { + "blades": 15, + "buckets": 7, + "filesystems": 2, + "flashblade_name": "demo-fb-1", + "object_store_accounts": 1, + "object_store_users": 1, + "purity_version": "2.2.0", + "snapshots": 1, + "total_capacity": 83359896948925 + }, + "filesystems": { + "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": { + "destroyed": false, + "fast_remove": false, + "hard_limit": true, + "nfs_rules": "*(rw,no_root_squash)", + "provisioned": 21474836480, + "snapshot_enabled": false + }, + "z": { + "destroyed": false, + "fast_remove": false, + "hard_limit": false, + "provisioned": 1073741824, + "snapshot_enabled": false + } + }, + "lag": { + "uplink": { + "lag_speed": 0, + "port_speed": 40000000000, + "ports": [ + { + "name": "CH1.FM1.ETH1.1" + }, + { + "name": "CH1.FM1.ETH1.2" + }, + ], + "status": "healthy" + } + }, + "network": { + "fm1.admin0": { + "address": "10.10.100.6", + "gateway": "10.10.100.1", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "support" + ], + "type": "vip", + "vlan": 2200 + }, + "fm2.admin0": { + "address": "10.10.100.7", + "gateway": "10.10.100.1", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "support" + ], + "type": "vip", + "vlan": 2200 + }, + "nfs1": { + "address": "10.10.100.4", + "gateway": "10.10.100.1", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "data" + ], + "type": "vip", + "vlan": 2200 + }, + "vir0": { + "address": "10.10.100.5", + "gateway": "10.10.100.1", + "mtu": 1500, + "netmask": "255.255.255.0", + "services": [ + "management" + ], + "type": "vip", + "vlan": 2200 + } + }, + "performance": { + "aggregate": { + "bytes_per_op": 0, + "bytes_per_read": 0, + "bytes_per_write": 0, + "read_bytes_per_sec": 0, + "reads_per_sec": 0, + "usec_per_other_op": 0, + "usec_per_read_op": 0, + "usec_per_write_op": 0, + "write_bytes_per_sec": 0, + "writes_per_sec": 0 + }, + "http": { + "bytes_per_op": 0, + "bytes_per_read": 0, + "bytes_per_write": 0, + "read_bytes_per_sec": 0, + "reads_per_sec": 0, + "usec_per_other_op": 0, + "usec_per_read_op": 0, + "usec_per_write_op": 0, + "write_bytes_per_sec": 0, + "writes_per_sec": 0 + }, + "nfs": { + "bytes_per_op": 0, + "bytes_per_read": 0, + "bytes_per_write": 0, + "read_bytes_per_sec": 0, + "reads_per_sec": 0, + "usec_per_other_op": 0, + "usec_per_read_op": 0, + "usec_per_write_op": 0, + "write_bytes_per_sec": 0, + "writes_per_sec": 0 + }, + "s3": { + "bytes_per_op": 0, + "bytes_per_read": 0, + "bytes_per_write": 0, + "read_bytes_per_sec": 0, + "reads_per_sec": 0, + "usec_per_other_op": 0, + "usec_per_read_op": 0, + "usec_per_write_op": 0, + "write_bytes_per_sec": 0, + "writes_per_sec": 0 + } + }, + "snapshots": { + "z.188": { + "destroyed": false, + "source": "z", + "source_destroyed": false, + "suffix": "188" + } + }, + "subnet": { + "new-mgmt": { + "gateway": "10.10.100.1", + "interfaces": [ + { + "name": "fm1.admin0" + }, + { + "name": "fm2.admin0" + }, + { + "name": "nfs1" + }, + { + "name": "vir0" + } + ], + "lag": "uplink", + "mtu": 1500, + "prefix": "10.10.100.0/24", + "services": [ + "data", + "management", + "support" + ], + "vlan": 2200 + } + } + } +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec + + +MIN_REQUIRED_API_VERSION = '1.3' +HARD_LIMIT_API_VERSION = '1.4' + + +def generate_default_dict(blade): + default_facts = {} + defaults = blade.arrays.list_arrays().items[0] + default_facts['flashblade_name'] = defaults.name + default_facts['purity_version'] = defaults.version + default_facts['filesystems'] = \ + len(blade.file_systems.list_file_systems().items) + default_facts['snapshots'] = \ + len(blade.file_system_snapshots.list_file_system_snapshots().items) + default_facts['buckets'] = len(blade.buckets.list_buckets().items) + default_facts['object_store_users'] = \ + len(blade.object_store_users.list_object_store_users().items) + default_facts['object_store_accounts'] = \ + len(blade.object_store_accounts.list_object_store_accounts().items) + default_facts['blades'] = len(blade.blade.list_blades().items) + default_facts['total_capacity'] = \ + blade.arrays.list_arrays_space().items[0].capacity + return default_facts + + +def generate_perf_dict(blade): + perf_facts = {} + total_perf = blade.arrays.list_arrays_performance() + http_perf = blade.arrays.list_arrays_performance(protocol='http') + s3_perf = blade.arrays.list_arrays_performance(protocol='s3') + nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs') + perf_facts['aggregate'] = { + 'bytes_per_op': total_perf.items[0].bytes_per_op, + 'bytes_per_read': total_perf.items[0].bytes_per_read, + 'bytes_per_write': total_perf.items[0].bytes_per_write, + 'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec, + 'reads_per_sec': total_perf.items[0].reads_per_sec, + 'usec_per_other_op': total_perf.items[0].usec_per_other_op, + 'usec_per_read_op': total_perf.items[0].usec_per_read_op, + 'usec_per_write_op': total_perf.items[0].usec_per_write_op, + 'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec, + 'writes_per_sec': total_perf.items[0].writes_per_sec, + } + perf_facts['http'] = { + 'bytes_per_op': http_perf.items[0].bytes_per_op, + 'bytes_per_read': http_perf.items[0].bytes_per_read, + 'bytes_per_write': http_perf.items[0].bytes_per_write, + 'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec, + 'reads_per_sec': http_perf.items[0].reads_per_sec, + 'usec_per_other_op': http_perf.items[0].usec_per_other_op, + 'usec_per_read_op': http_perf.items[0].usec_per_read_op, + 'usec_per_write_op': http_perf.items[0].usec_per_write_op, + 'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec, + 'writes_per_sec': http_perf.items[0].writes_per_sec, + } + perf_facts['s3'] = { + 'bytes_per_op': s3_perf.items[0].bytes_per_op, + 'bytes_per_read': s3_perf.items[0].bytes_per_read, + 'bytes_per_write': s3_perf.items[0].bytes_per_write, + 'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec, + 'reads_per_sec': s3_perf.items[0].reads_per_sec, + 'usec_per_other_op': s3_perf.items[0].usec_per_other_op, + 'usec_per_read_op': s3_perf.items[0].usec_per_read_op, + 'usec_per_write_op': s3_perf.items[0].usec_per_write_op, + 'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec, + 'writes_per_sec': s3_perf.items[0].writes_per_sec, + } + perf_facts['nfs'] = { + 'bytes_per_op': nfs_perf.items[0].bytes_per_op, + 'bytes_per_read': nfs_perf.items[0].bytes_per_read, + 'bytes_per_write': nfs_perf.items[0].bytes_per_write, + 'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec, + 'reads_per_sec': nfs_perf.items[0].reads_per_sec, + 'usec_per_other_op': nfs_perf.items[0].usec_per_other_op, + 'usec_per_read_op': nfs_perf.items[0].usec_per_read_op, + 'usec_per_write_op': nfs_perf.items[0].usec_per_write_op, + 'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec, + 'writes_per_sec': nfs_perf.items[0].writes_per_sec, + } + + return perf_facts + + +def generate_config_dict(blade): + config_facts = {} + config_facts['dns'] = blade.dns.list_dns().items[0].to_dict() + config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict() + config_facts['alert_watchers'] = \ + blade.alert_watchers.list_alert_watchers().items[0].to_dict() + api_version = blade.api_version.list_versions().versions + if HARD_LIMIT_API_VERSION in api_version: + config_facts['array_management'] = \ + blade.directory_services.list_directory_services(names=['management']).items[0].to_dict() + config_facts['directory_service_roles'] = {} + roles = blade.directory_services.list_directory_services_roles() + for role in range(0, len(roles.items)): + role_name = roles.items[role].name + config_facts['directory_service_roles'][role_name] = { + 'group': roles.items[role].group, + 'group_base': roles.items[role].group_base + } + config_facts['nfs_directory_service'] = \ + blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict() + config_facts['smb_directory_service'] = \ + blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict() + config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers + config_facts['ssl_certs'] = \ + blade.certificates.list_certificates().items[0].to_dict() + return config_facts + + +def generate_subnet_dict(blade): + sub_facts = {} + subnets = blade.subnets.list_subnets() + for sub in range(0, len(subnets.items)): + sub_name = subnets.items[sub].name + if subnets.items[sub].enabled: + sub_facts[sub_name] = { + 'gateway': subnets.items[sub].gateway, + 'mtu': subnets.items[sub].mtu, + 'vlan': subnets.items[sub].vlan, + 'prefix': subnets.items[sub].prefix, + 'services': subnets.items[sub].services, + } + sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name + sub_facts[sub_name]['interfaces'] = [] + for iface in range(0, len(subnets.items[sub].interfaces)): + sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name}) + return sub_facts + + +def generate_lag_dict(blade): + lag_facts = {} + groups = blade.link_aggregation_groups.list_link_aggregation_groups() + for groupcnt in range(0, len(groups.items)): + lag_name = groups.items[groupcnt].name + lag_facts[lag_name] = { + 'lag_speed': groups.items[groupcnt].lag_speed, + 'port_speed': groups.items[groupcnt].port_speed, + 'status': groups.items[groupcnt].status, + } + lag_facts[lag_name]['ports'] = [] + for port in range(0, len(groups.items[groupcnt].ports)): + lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name}) + return lag_facts + + +def generate_network_dict(blade): + net_facts = {} + ports = blade.network_interfaces.list_network_interfaces() + for portcnt in range(0, len(ports.items)): + int_name = ports.items[portcnt].name + if ports.items[portcnt].enabled: + net_facts[int_name] = { + 'type': ports.items[portcnt].type, + 'mtu': ports.items[portcnt].mtu, + 'vlan': ports.items[portcnt].vlan, + 'address': ports.items[portcnt].address, + 'services': ports.items[portcnt].services, + 'gateway': ports.items[portcnt].gateway, + 'netmask': ports.items[portcnt].netmask, + } + return net_facts + + +def generate_capacity_dict(blade): + capacity_facts = {} + total_cap = blade.arrays.list_arrays_space() + file_cap = blade.arrays.list_arrays_space(type='file-system') + object_cap = blade.arrays.list_arrays_space(type='object-store') + capacity_facts['total'] = total_cap.items[0].capacity + capacity_facts['aggregate'] = { + 'data_reduction': total_cap.items[0].space.data_reduction, + 'snapshots': total_cap.items[0].space.snapshots, + 'total_physical': total_cap.items[0].space.total_physical, + 'unique': total_cap.items[0].space.unique, + 'virtual': total_cap.items[0].space.virtual, + } + capacity_facts['file-system'] = { + 'data_reduction': file_cap.items[0].space.data_reduction, + 'snapshots': file_cap.items[0].space.snapshots, + 'total_physical': file_cap.items[0].space.total_physical, + 'unique': file_cap.items[0].space.unique, + 'virtual': file_cap.items[0].space.virtual, + } + capacity_facts['object-store'] = { + 'data_reduction': object_cap.items[0].space.data_reduction, + 'snapshots': object_cap.items[0].space.snapshots, + 'total_physical': object_cap.items[0].space.total_physical, + 'unique': object_cap.items[0].space.unique, + 'virtual': file_cap.items[0].space.virtual, + } + + return capacity_facts + + +def generate_snap_dict(blade): + snap_facts = {} + snaps = blade.file_system_snapshots.list_file_system_snapshots() + for snap in range(0, len(snaps.items)): + snapshot = snaps.items[snap].name + snap_facts[snapshot] = { + 'destroyed': snaps.items[snap].destroyed, + 'source': snaps.items[snap].source, + 'suffix': snaps.items[snap].suffix, + 'source_destroyed': snaps.items[snap].source_destroyed, + } + return snap_facts + + +def generate_fs_dict(blade): + fs_facts = {} + fsys = blade.file_systems.list_file_systems() + for fsystem in range(0, len(fsys.items)): + share = fsys.items[fsystem].name + fs_facts[share] = { + 'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled, + 'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled, + 'provisioned': fsys.items[fsystem].provisioned, + 'destroyed': fsys.items[fsystem].destroyed, + } + if fsys.items[fsystem].http.enabled: + fs_facts[share]['http'] = fsys.items[fsystem].http.enabled + if fsys.items[fsystem].smb.enabled: + fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode + if fsys.items[fsystem].nfs.enabled: + fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules + api_version = blade.api_version.list_versions().versions + if HARD_LIMIT_API_VERSION in api_version: + fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled + + return fs_facts + + +def main(): + argument_spec = purefb_argument_spec() + argument_spec.update(dict( + gather_subset=dict(default='minimum', type='list',) + )) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + blade = get_blade(module) + versions = blade.api_version.list_versions().versions + + if MIN_REQUIRED_API_VERSION not in versions: + module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION)) + + subset = [test.lower() for test in module.params['gather_subset']] + valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity', + 'network', 'subnets', 'lags', + 'filesystems', 'snapshots') + subset_test = (test in valid_subsets for test in subset) + if not all(subset_test): + module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s" + % (",".join(valid_subsets), ",".join(subset))) + + facts = {} + + if 'minimum' in subset or 'all' in subset: + facts['default'] = generate_default_dict(blade) + if 'performance' in subset or 'all' in subset: + facts['performance'] = generate_perf_dict(blade) + if 'config' in subset or 'all' in subset: + facts['config'] = generate_config_dict(blade) + if 'capacity' in subset or 'all' in subset: + facts['capacity'] = generate_capacity_dict(blade) + if 'lags' in subset or 'all' in subset: + facts['lag'] = generate_lag_dict(blade) + if 'network' in subset or 'all' in subset: + facts['network'] = generate_network_dict(blade) + if 'subnets' in subset or 'all' in subset: + facts['subnet'] = generate_subnet_dict(blade) + if 'filesystems' in subset or 'all' in subset: + facts['filesystems'] = generate_fs_dict(blade) + if 'snapshots' in subset or 'all' in subset: + facts['snapshots'] = generate_snap_dict(blade) + + module.exit_json(ansible_facts={'ansible_purefb_facts': facts}) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/vexata/vexata_eg.py b/plugins/modules/storage/vexata/vexata_eg.py new file mode 100644 index 0000000000..0a3561798b --- /dev/null +++ b/plugins/modules/storage/vexata/vexata_eg.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: vexata_eg +short_description: Manage export groups on Vexata VX100 storage arrays +description: + - Create or delete export groups on a Vexata VX100 array. + - An export group is a tuple of a volume group, initiator group and port + group that allows a set of volumes to be exposed to one or more hosts + through specific array ports. +author: + - Sandeep Kasargod (@vexata) +options: + name: + description: + - Export group name. + required: true + type: str + state: + description: + - Creates export group when present or delete when absent. + default: present + choices: [ present, absent ] + type: str + vg: + description: + - Volume group name. + type: str + ig: + description: + - Initiator group name. + type: str + pg: + description: + - Port group name. + type: str +extends_documentation_fragment: +- community.general.vexata.vx100 + +''' + +EXAMPLES = r''' +- name: Create export group named db_export. + vexata_eg: + name: db_export + vg: dbvols + ig: dbhosts + pg: pg1 + state: present + array: vx100_ultra.test.com + user: admin + password: secret + +- name: Delete export group named db_export + vexata_eg: + name: db_export + state: absent + array: vx100_ultra.test.com + user: admin + password: secret +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vexata import ( + argument_spec, get_array, required_together) + + +def get_eg(module, array): + """Retrieve a named vg if it exists, None if absent.""" + name = module.params['name'] + try: + egs = array.list_egs() + eg = filter(lambda eg: eg['name'] == name, egs) + if len(eg) == 1: + return eg[0] + else: + return None + except Exception: + module.fail_json(msg='Error while attempting to retrieve export groups.') + + +def get_vg_id(module, array): + """Retrieve a named vg's id if it exists, error if absent.""" + name = module.params['vg'] + try: + vgs = array.list_vgs() + vg = filter(lambda vg: vg['name'] == name, vgs) + if len(vg) == 1: + return vg[0]['id'] + else: + module.fail_json(msg='Volume group {0} was not found.'.format(name)) + except Exception: + module.fail_json(msg='Error while attempting to retrieve volume groups.') + + +def get_ig_id(module, array): + """Retrieve a named ig's id if it exists, error if absent.""" + name = module.params['ig'] + try: + igs = array.list_igs() + ig = filter(lambda ig: ig['name'] == name, igs) + if len(ig) == 1: + return ig[0]['id'] + else: + module.fail_json(msg='Initiator group {0} was not found.'.format(name)) + except Exception: + module.fail_json(msg='Error while attempting to retrieve initiator groups.') + + +def get_pg_id(module, array): + """Retrieve a named pg's id if it exists, error if absent.""" + name = module.params['pg'] + try: + pgs = array.list_pgs() + pg = filter(lambda pg: pg['name'] == name, pgs) + if len(pg) == 1: + return pg[0]['id'] + else: + module.fail_json(msg='Port group {0} was not found.'.format(name)) + except Exception: + module.fail_json(msg='Error while attempting to retrieve port groups.') + + +def create_eg(module, array): + """"Create a new export group.""" + changed = False + eg_name = module.params['name'] + vg_id = get_vg_id(module, array) + ig_id = get_ig_id(module, array) + pg_id = get_pg_id(module, array) + if module.check_mode: + module.exit_json(changed=changed) + + try: + eg = array.create_eg( + eg_name, + 'Ansible export group', + (vg_id, ig_id, pg_id)) + if eg: + module.log(msg='Created export group {0}'.format(eg_name)) + changed = True + else: + raise Exception + except Exception: + module.fail_json(msg='Export group {0} create failed.'.format(eg_name)) + module.exit_json(changed=changed) + + +def delete_eg(module, array, eg): + changed = False + eg_name = eg['name'] + if module.check_mode: + module.exit_json(changed=changed) + + try: + ok = array.delete_eg( + eg['id']) + if ok: + module.log(msg='Export group {0} deleted.'.format(eg_name)) + changed = True + else: + raise Exception + except Exception: + module.fail_json(msg='Export group {0} delete failed.'.format(eg_name)) + module.exit_json(changed=changed) + + +def main(): + arg_spec = argument_spec() + arg_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + vg=dict(type='str'), + ig=dict(type='str'), + pg=dict(type='str') + ) + ) + + module = AnsibleModule(arg_spec, + supports_check_mode=True, + required_together=required_together()) + + state = module.params['state'] + array = get_array(module) + eg = get_eg(module, array) + + if state == 'present' and not eg: + create_eg(module, array) + elif state == 'absent' and eg: + delete_eg(module, array, eg) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/vexata/vexata_volume.py b/plugins/modules/storage/vexata/vexata_volume.py new file mode 100644 index 0000000000..83e23d1389 --- /dev/null +++ b/plugins/modules/storage/vexata/vexata_volume.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: vexata_volume +short_description: Manage volumes on Vexata VX100 storage arrays +description: + - Create, deletes or extend volumes on a Vexata VX100 array. +author: +- Sandeep Kasargod (@vexata) +options: + name: + description: + - Volume name. + required: true + type: str + state: + description: + - Creates/Modifies volume when present or removes when absent. + default: present + choices: [ present, absent ] + type: str + size: + description: + - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes. + type: str +extends_documentation_fragment: +- community.general.vexata.vx100 + +''' + +EXAMPLES = r''' +- name: Create new 2 TiB volume named foo + vexata_volume: + name: foo + size: 2T + state: present + array: vx100_ultra.test.com + user: admin + password: secret + +- name: Expand volume named foo to 4 TiB + vexata_volume: + name: foo + size: 4T + state: present + array: vx100_ultra.test.com + user: admin + password: secret + +- name: Delete volume named foo + vexata_volume: + name: foo + state: absent + array: vx100_ultra.test.com + user: admin + password: secret +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vexata import ( + argument_spec, get_array, required_together, size_to_MiB) + + +def get_volume(module, array): + """Retrieve a named volume if it exists, None if absent.""" + name = module.params['name'] + try: + vols = array.list_volumes() + vol = filter(lambda v: v['name'] == name, vols) + if len(vol) == 1: + return vol[0] + else: + return None + except Exception: + module.fail_json(msg='Error while attempting to retrieve volumes.') + + +def validate_size(module, err_msg): + size = module.params.get('size', False) + if not size: + module.fail_json(msg=err_msg) + size = size_to_MiB(size) + if size <= 0: + module.fail_json(msg='Invalid volume size, must be [MGT].') + return size + + +def create_volume(module, array): + """"Create a new volume.""" + changed = False + size = validate_size(module, err_msg='Size is required to create volume.') + if module.check_mode: + module.exit_json(changed=changed) + + try: + vol = array.create_volume( + module.params['name'], + 'Ansible volume', + size) + if vol: + module.log(msg='Created volume {0}'.format(vol['id'])) + changed = True + else: + module.fail_json(msg='Volume create failed.') + except Exception: + pass + module.exit_json(changed=changed) + + +def update_volume(module, array, volume): + """Expand the volume size.""" + changed = False + size = validate_size(module, err_msg='Size is required to update volume') + prev_size = volume['volSize'] + if size <= prev_size: + module.log(msg='Volume expanded size needs to be larger ' + 'than current size.') + if module.check_mode: + module.exit_json(changed=changed) + + try: + vol = array.grow_volume( + volume['name'], + volume['description'], + volume['id'], + size) + if vol: + changed = True + except Exception: + pass + + module.exit_json(changed=changed) + + +def delete_volume(module, array, volume): + changed = False + vol_name = volume['name'] + if module.check_mode: + module.exit_json(changed=changed) + + try: + ok = array.delete_volume( + volume['id']) + if ok: + module.log(msg='Volume {0} deleted.'.format(vol_name)) + changed = True + else: + raise Exception + except Exception: + pass + module.exit_json(changed=changed) + + +def main(): + arg_spec = argument_spec() + arg_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(default='present', choices=['present', 'absent']), + size=dict(type='str') + ) + ) + + module = AnsibleModule(arg_spec, + supports_check_mode=True, + required_together=required_together()) + + state = module.params['state'] + array = get_array(module) + volume = get_volume(module, array) + + if state == 'present': + if not volume: + create_volume(module, array) + else: + update_volume(module, array, volume) + elif state == 'absent' and volume: + delete_volume(module, array, volume) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py new file mode 100644 index 0000000000..00aabff955 --- /dev/null +++ b/plugins/modules/storage/zfs/zfs.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Johan Wiren +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: zfs +short_description: Manage zfs +description: + - Manages ZFS file systems, volumes, clones and snapshots +options: + name: + description: + - File system, snapshot or volume name e.g. C(rpool/myfs). + required: true + state: + description: + - Whether to create (C(present)), or remove (C(absent)) a + file system, snapshot or volume. All parents/children + will be created/destroyed as needed to reach the desired state. + choices: [ absent, present ] + required: true + origin: + description: + - Snapshot from which to create a clone. + extra_zfs_properties: + description: + - A dictionary of zfs properties to be set. + - See the zfs(8) man page for more information. +author: +- Johan Wiren (@johanwiren) +''' + +EXAMPLES = ''' +- name: Create a new file system called myfs in pool rpool with the setuid property turned off + zfs: + name: rpool/myfs + state: present + extra_zfs_properties: + setuid: off + +- name: Create a new volume called myvol in pool rpool. + zfs: + name: rpool/myvol + state: present + extra_zfs_properties: + volsize: 10M + +- name: Create a snapshot of rpool/myfs file system. + zfs: + name: rpool/myfs@mysnapshot + state: present + +- name: Create a new file system called myfs2 with snapdir enabled + zfs: + name: rpool/myfs2 + state: present + extra_zfs_properties: + snapdir: enabled + +- name: Create a new file system by cloning a snapshot + zfs: + name: rpool/cloned_fs + state: present + origin: rpool/myfs@mysnapshot + +- name: Destroy a filesystem + zfs: + name: rpool/myfs + state: absent +''' + +import os + +from ansible.module_utils.basic import AnsibleModule + + +class Zfs(object): + + def __init__(self, module, name, properties): + self.module = module + self.name = name + self.properties = properties + self.changed = False + self.zfs_cmd = module.get_bin_path('zfs', True) + self.zpool_cmd = module.get_bin_path('zpool', True) + self.pool = name.split('/')[0] + self.is_solaris = os.uname()[0] == 'SunOS' + self.is_openzfs = self.check_openzfs() + self.enhanced_sharing = self.check_enhanced_sharing() + + def check_openzfs(self): + cmd = [self.zpool_cmd] + cmd.extend(['get', 'version']) + cmd.append(self.pool) + (rc, out, err) = self.module.run_command(cmd, check_rc=True) + version = out.splitlines()[-1].split()[2] + if version == '-': + return True + if int(version) == 5000: + return True + return False + + def check_enhanced_sharing(self): + if self.is_solaris and not self.is_openzfs: + cmd = [self.zpool_cmd] + cmd.extend(['get', 'version']) + cmd.append(self.pool) + (rc, out, err) = self.module.run_command(cmd, check_rc=True) + version = out.splitlines()[-1].split()[2] + if int(version) >= 34: + return True + return False + + def exists(self): + cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name] + (rc, out, err) = self.module.run_command(' '.join(cmd)) + if rc == 0: + return True + else: + return False + + def create(self): + if self.module.check_mode: + self.changed = True + return + properties = self.properties + origin = self.module.params.get('origin', None) + cmd = [self.zfs_cmd] + + if "@" in self.name: + action = 'snapshot' + elif origin: + action = 'clone' + else: + action = 'create' + + cmd.append(action) + + if action in ['create', 'clone']: + cmd += ['-p'] + + if properties: + for prop, value in properties.items(): + if prop == 'volsize': + cmd += ['-V', value] + elif prop == 'volblocksize': + cmd += ['-b', value] + else: + cmd += ['-o', '%s="%s"' % (prop, value)] + if origin and action == 'clone': + cmd.append(origin) + cmd.append(self.name) + (rc, out, err) = self.module.run_command(' '.join(cmd)) + if rc == 0: + self.changed = True + else: + self.module.fail_json(msg=err) + + def destroy(self): + if self.module.check_mode: + self.changed = True + return + cmd = [self.zfs_cmd, 'destroy', '-R', self.name] + (rc, out, err) = self.module.run_command(' '.join(cmd)) + if rc == 0: + self.changed = True + else: + self.module.fail_json(msg=err) + + def set_property(self, prop, value): + if self.module.check_mode: + self.changed = True + return + cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name] + (rc, out, err) = self.module.run_command(cmd) + if rc == 0: + self.changed = True + else: + self.module.fail_json(msg=err) + + def set_properties_if_changed(self): + current_properties = self.get_current_properties() + for prop, value in self.properties.items(): + if current_properties.get(prop, None) != value: + self.set_property(prop, value) + + def get_current_properties(self): + cmd = [self.zfs_cmd, 'get', '-H'] + if self.enhanced_sharing: + cmd += ['-e'] + cmd += ['all', self.name] + rc, out, err = self.module.run_command(" ".join(cmd)) + properties = dict() + for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]: + if source == 'local': + properties[prop] = value + # Add alias for enhanced sharing properties + if self.enhanced_sharing: + properties['sharenfs'] = properties.get('share.nfs', None) + properties['sharesmb'] = properties.get('share.smb', None) + return properties + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'present']), + origin=dict(type='str', default=None), + extra_zfs_properties=dict(type='dict', default={}), + ), + supports_check_mode=True, + ) + + state = module.params.get('state') + name = module.params.get('name') + + if module.params.get('origin') and '@' in name: + module.fail_json(msg='cannot specify origin when operating on a snapshot') + + # Reverse the boolification of zfs properties + for prop, value in module.params['extra_zfs_properties'].items(): + if isinstance(value, bool): + if value is True: + module.params['extra_zfs_properties'][prop] = 'on' + else: + module.params['extra_zfs_properties'][prop] = 'off' + else: + module.params['extra_zfs_properties'][prop] = value + + result = dict( + name=name, + state=state, + ) + + zfs = Zfs(module, name, module.params['extra_zfs_properties']) + + if state == 'present': + if zfs.exists(): + zfs.set_properties_if_changed() + else: + zfs.create() + + elif state == 'absent': + if zfs.exists(): + zfs.destroy() + + result.update(zfs.properties) + result['changed'] = zfs.changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/zfs/zfs_delegate_admin.py b/plugins/modules/storage/zfs/zfs_delegate_admin.py new file mode 100644 index 0000000000..82d7e3199f --- /dev/null +++ b/plugins/modules/storage/zfs/zfs_delegate_admin.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Nate Coraor +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = r''' +--- +module: zfs_delegate_admin +short_description: Manage ZFS delegated administration (user admin privileges) +description: + - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS + operations normally restricted to the superuser. + - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options. + - This module attempts to adhere to the behavior of the command line tool as much as possible. +requirements: + - "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all + versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0." +options: + name: + description: + - File system or volume name e.g. C(rpool/myfs). + required: true + type: str + state: + description: + - Whether to allow (C(present)), or unallow (C(absent)) a permission. + - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required. + - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. + required: true + choices: [ absent, present ] + default: present + users: + description: + - List of users to whom permission(s) should be granted. + type: list + groups: + description: + - List of groups to whom permission(s) should be granted. + type: list + everyone: + description: + - Apply permissions to everyone. + type: bool + default: no + permissions: + description: + - The list of permission(s) to delegate (required if C(state) is C(present)). + type: list + choices: [ allow, clone, create, destroy, mount, promote, readonly, receive, rename, rollback, send, share, snapshot, unallow ] + local: + description: + - Apply permissions to C(name) locally (C(zfs allow -l)). + type: bool + descendents: + description: + - Apply permissions to C(name)'s descendents (C(zfs allow -d)). + type: bool + recursive: + description: + - Unallow permissions recursively (ignored when C(state) is C(present)). + type: bool + default: no +author: +- Nate Coraor (@natefoo) +''' + +EXAMPLES = r''' +- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope + zfs_delegate_admin: + name: rpool/myfs + users: adm + permissions: allow,unallow + +- name: Grant `zfs send` to everyone, plus the group `backup` + zfs_delegate_admin: + name: rpool/myvol + groups: backup + everyone: yes + permissions: send + +- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only + zfs_delegate_admin: + name: rpool/myfs + users: foo,bar + permissions: send,receive + local: yes + +- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain) +- zfs_delegate_admin: + name: rpool/myfs + everyone: yes + state: absent +''' + +# This module does not return anything other than the standard +# changed/state/msg/stdout +RETURN = ''' +''' + +from itertools import product + +from ansible.module_utils.basic import AnsibleModule + + +class ZfsDelegateAdmin(object): + def __init__(self, module): + self.module = module + self.name = module.params.get('name') + self.state = module.params.get('state') + self.users = module.params.get('users') + self.groups = module.params.get('groups') + self.everyone = module.params.get('everyone') + self.perms = module.params.get('permissions') + self.scope = None + self.changed = False + self.initial_perms = None + self.subcommand = 'allow' + self.recursive_opt = [] + self.run_method = self.update + + self.setup(module) + + def setup(self, module): + """ Validate params and set up for run. + """ + if self.state == 'absent': + self.subcommand = 'unallow' + if module.params.get('recursive'): + self.recursive_opt = ['-r'] + + local = module.params.get('local') + descendents = module.params.get('descendents') + if (local and descendents) or (not local and not descendents): + self.scope = 'ld' + elif local: + self.scope = 'l' + elif descendents: + self.scope = 'd' + else: + self.module.fail_json(msg='Impossible value for local and descendents') + + if not (self.users or self.groups or self.everyone): + if self.state == 'present': + self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set') + elif self.state == 'absent': + self.run_method = self.clear + # ansible ensures the else cannot happen here + + self.zfs_path = module.get_bin_path('zfs', True) + + @property + def current_perms(self): + """ Parse the output of `zfs allow ` to retrieve current permissions. + """ + out = self.run_zfs_raw(subcommand='allow') + perms = { + 'l': {'u': {}, 'g': {}, 'e': []}, + 'd': {'u': {}, 'g': {}, 'e': []}, + 'ld': {'u': {}, 'g': {}, 'e': []}, + } + linemap = { + 'Local permissions:': 'l', + 'Descendent permissions:': 'd', + 'Local+Descendent permissions:': 'ld', + } + scope = None + for line in out.splitlines(): + scope = linemap.get(line, scope) + if not scope: + continue + try: + if line.startswith('\tuser ') or line.startswith('\tgroup '): + ent_type, ent, cur_perms = line.split() + perms[scope][ent_type[0]][ent] = cur_perms.split(',') + elif line.startswith('\teveryone '): + perms[scope]['e'] = line.split()[1].split(',') + except ValueError: + self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line) + return perms + + def run_zfs_raw(self, subcommand=None, args=None): + """ Run a raw zfs command, fail on error. + """ + cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name] + rc, out, err = self.module.run_command(cmd) + if rc: + self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err)) + return out + + def run_zfs(self, args): + """ Run zfs allow/unallow with appropriate options as per module arguments. + """ + args = self.recursive_opt + ['-' + self.scope] + args + if self.perms: + args.append(','.join(self.perms)) + return self.run_zfs_raw(args=args) + + def clear(self): + """ Called by run() to clear all permissions. + """ + changed = False + stdout = '' + for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')): + for ent in self.initial_perms[scope][ent_type].keys(): + stdout += self.run_zfs(['-%s' % ent_type, ent]) + changed = True + for scope in ('ld', 'l', 'd'): + if self.initial_perms[scope]['e']: + stdout += self.run_zfs(['-e']) + changed = True + return (changed, stdout) + + def update(self): + """ Update permissions as per module arguments. + """ + stdout = '' + for ent_type, entities in (('u', self.users), ('g', self.groups)): + if entities: + stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)]) + if self.everyone: + stdout += self.run_zfs(['-e']) + return (self.initial_perms != self.current_perms, stdout) + + def run(self): + """ Run an operation, return results for Ansible. + """ + exit_args = {'state': self.state} + self.initial_perms = self.current_perms + exit_args['changed'], stdout = self.run_method() + if exit_args['changed']: + exit_args['msg'] = 'ZFS delegated admin permissions updated' + exit_args['stdout'] = stdout + self.module.exit_json(**exit_args) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + users=dict(type='list'), + groups=dict(type='list'), + everyone=dict(type='bool', default=False), + permissions=dict(type='list', + choices=['allow', 'clone', 'create', 'destroy', 'mount', 'promote', 'readonly', 'receive', + 'rename', 'rollback', 'send', 'share', 'snapshot', 'unallow']), + local=dict(type='bool'), + descendents=dict(type='bool'), + recursive=dict(type='bool', default=False), + ), + supports_check_mode=False, + required_if=[('state', 'present', ['permissions'])], + ) + zfs_delegate_admin = ZfsDelegateAdmin(module) + zfs_delegate_admin.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/zfs/zfs_facts.py b/plugins/modules/storage/zfs/zfs_facts.py new file mode 100644 index 0000000000..803b1a0bea --- /dev/null +++ b/plugins/modules/storage/zfs/zfs_facts.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zfs_facts +short_description: Gather facts about ZFS datasets. +description: + - Gather facts from ZFS dataset properties. +author: Adam Števko (@xen0l) +options: + name: + description: + - ZFS dataset name. + required: yes + aliases: [ "ds", "dataset" ] + recurse: + description: + - Specifies if properties for any children should be recursively + displayed. + type: bool + default: 'no' + parsable: + description: + - Specifies if property values should be displayed in machine + friendly format. + type: bool + default: 'no' + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. + For more information about dataset properties, check zfs(1M) man page. + default: all + aliases: [ "props" ] + type: + description: + - Specifies which datasets types to display. Multiple values have to be + provided in comma-separated form. + choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] + default: all + depth: + description: + - Specifies recursion depth. +''' + +EXAMPLES = ''' +- name: Gather facts about ZFS dataset rpool/export/home + zfs_facts: + dataset: rpool/export/home + +- name: Report space usage on ZFS filesystems under data/home + zfs_facts: + name: data/home + recurse: yes + type: filesystem + +- debug: + msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.' + with_items: '{{ ansible_zfs_datasets }}' +''' + +RETURN = ''' +name: + description: ZFS dataset name + returned: always + type: str + sample: rpool/var/spool +parsable: + description: if parsable output should be provided in machine friendly format. + returned: if 'parsable' is set to True + type: bool + sample: True +recurse: + description: if we should recurse over ZFS dataset + returned: if 'recurse' is set to True + type: bool + sample: True +zfs_datasets: + description: ZFS dataset facts + returned: always + type: str + sample: + { + "aclinherit": "restricted", + "aclmode": "discard", + "atime": "on", + "available": "43.8G", + "canmount": "on", + "casesensitivity": "sensitive", + "checksum": "on", + "compression": "off", + "compressratio": "1.00x", + "copies": "1", + "creation": "Thu Jun 16 11:37 2016", + "dedup": "off", + "devices": "on", + "exec": "on", + "filesystem_count": "none", + "filesystem_limit": "none", + "logbias": "latency", + "logicalreferenced": "18.5K", + "logicalused": "3.45G", + "mlslabel": "none", + "mounted": "yes", + "mountpoint": "/rpool", + "name": "rpool", + "nbmand": "off", + "normalization": "none", + "org.openindiana.caiman:install": "ready", + "primarycache": "all", + "quota": "none", + "readonly": "off", + "recordsize": "128K", + "redundant_metadata": "all", + "refcompressratio": "1.00x", + "referenced": "29.5K", + "refquota": "none", + "refreservation": "none", + "reservation": "none", + "secondarycache": "all", + "setuid": "on", + "sharenfs": "off", + "sharesmb": "off", + "snapdir": "hidden", + "snapshot_count": "none", + "snapshot_limit": "none", + "sync": "standard", + "type": "filesystem", + "used": "4.41G", + "usedbychildren": "4.41G", + "usedbydataset": "29.5K", + "usedbyrefreservation": "0", + "usedbysnapshots": "0", + "utf8only": "off", + "version": "5", + "vscan": "off", + "written": "29.5K", + "xattr": "on", + "zoned": "off" + } +''' + +from collections import defaultdict + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] + + +class ZFSFacts(object): + def __init__(self, module): + + self.module = module + + self.name = module.params['name'] + self.recurse = module.params['recurse'] + self.parsable = module.params['parsable'] + self.properties = module.params['properties'] + self.type = module.params['type'] + self.depth = module.params['depth'] + + self._datasets = defaultdict(dict) + self.facts = [] + + def dataset_exists(self): + cmd = [self.module.get_bin_path('zfs')] + + cmd.append('list') + cmd.append(self.name) + + (rc, out, err) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def get_facts(self): + cmd = [self.module.get_bin_path('zfs')] + + cmd.append('get') + cmd.append('-H') + if self.parsable: + cmd.append('-p') + if self.recurse: + cmd.append('-r') + if int(self.depth) != 0: + cmd.append('-d') + cmd.append('%s' % self.depth) + if self.type: + cmd.append('-t') + cmd.append(self.type) + cmd.append('-o') + cmd.append('name,property,value') + cmd.append(self.properties) + cmd.append(self.name) + + (rc, out, err) = self.module.run_command(cmd) + + if rc == 0: + for line in out.splitlines(): + dataset, property, value = line.split('\t') + + self._datasets[dataset].update({property: value}) + + for k, v in iteritems(self._datasets): + v.update({'name': k}) + self.facts.append(v) + + return {'ansible_zfs_datasets': self.facts} + else: + self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name, + stderr=err, + rc=rc) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['ds', 'dataset'], type='str'), + recurse=dict(required=False, default=False, type='bool'), + parsable=dict(required=False, default=False, type='bool'), + properties=dict(required=False, default='all', type='str'), + type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES), + depth=dict(required=False, default=0, type='int') + ), + supports_check_mode=True + ) + + zfs_facts = ZFSFacts(module) + + result = {} + result['changed'] = False + result['name'] = zfs_facts.name + + if zfs_facts.parsable: + result['parsable'] = zfs_facts.parsable + + if zfs_facts.recurse: + result['recurse'] = zfs_facts.recurse + + if zfs_facts.dataset_exists(): + result['ansible_facts'] = zfs_facts.get_facts() + else: + module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/storage/zfs/zpool_facts.py b/plugins/modules/storage/zfs/zpool_facts.py new file mode 100644 index 0000000000..6c66093dbd --- /dev/null +++ b/plugins/modules/storage/zfs/zpool_facts.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zpool_facts +short_description: Gather facts about ZFS pools. +description: + - Gather facts from ZFS pool properties. +author: Adam Števko (@xen0l) +options: + name: + description: + - ZFS pool name. + aliases: [ "pool", "zpool" ] + required: false + parsable: + description: + - Specifies if property values should be displayed in machine + friendly format. + type: bool + default: False + required: false + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. + For more information about dataset properties, check zpool(1M) man page. + aliases: [ "props" ] + default: all + required: false +''' + +EXAMPLES = ''' +# Gather facts about ZFS pool rpool +- zpool_facts: pool=rpool + +# Gather space usage about all imported ZFS pools +- zpool_facts: properties='free,size' + +- debug: msg='ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.' + with_items: '{{ ansible_zfs_pools }}' +''' + +RETURN = ''' +ansible_facts: + description: Dictionary containing all the detailed information about the ZFS pool facts + returned: always + type: complex + contains: + ansible_zfs_pools: + description: ZFS pool facts + returned: always + type: str + sample: + { + "allocated": "3.46G", + "altroot": "-", + "autoexpand": "off", + "autoreplace": "off", + "bootfs": "rpool/ROOT/openindiana", + "cachefile": "-", + "capacity": "6%", + "comment": "-", + "dedupditto": "0", + "dedupratio": "1.00x", + "delegation": "on", + "expandsize": "-", + "failmode": "wait", + "feature@async_destroy": "enabled", + "feature@bookmarks": "enabled", + "feature@edonr": "enabled", + "feature@embedded_data": "active", + "feature@empty_bpobj": "active", + "feature@enabled_txg": "active", + "feature@extensible_dataset": "enabled", + "feature@filesystem_limits": "enabled", + "feature@hole_birth": "active", + "feature@large_blocks": "enabled", + "feature@lz4_compress": "active", + "feature@multi_vdev_crash_dump": "enabled", + "feature@sha512": "enabled", + "feature@skein": "enabled", + "feature@spacemap_histogram": "active", + "fragmentation": "3%", + "free": "46.3G", + "freeing": "0", + "guid": "15729052870819522408", + "health": "ONLINE", + "leaked": "0", + "listsnapshots": "off", + "name": "rpool", + "readonly": "off", + "size": "49.8G", + "version": "-" + } +name: + description: ZFS pool name + returned: always + type: str + sample: rpool +parsable: + description: if parsable output should be provided in machine friendly format. + returned: if 'parsable' is set to True + type: bool + sample: True +''' + +from collections import defaultdict + +from ansible.module_utils.six import iteritems +from ansible.module_utils.basic import AnsibleModule + + +class ZPoolFacts(object): + def __init__(self, module): + + self.module = module + + self.name = module.params['name'] + self.parsable = module.params['parsable'] + self.properties = module.params['properties'] + + self._pools = defaultdict(dict) + self.facts = [] + + def pool_exists(self): + cmd = [self.module.get_bin_path('zpool')] + + cmd.append('list') + cmd.append(self.name) + + (rc, out, err) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def get_facts(self): + cmd = [self.module.get_bin_path('zpool')] + + cmd.append('get') + cmd.append('-H') + if self.parsable: + cmd.append('-p') + cmd.append('-o') + cmd.append('name,property,value') + cmd.append(self.properties) + if self.name: + cmd.append(self.name) + + (rc, out, err) = self.module.run_command(cmd) + + if rc == 0: + for line in out.splitlines(): + pool, property, value = line.split('\t') + + self._pools[pool].update({property: value}) + + for k, v in iteritems(self._pools): + v.update({'name': k}) + self.facts.append(v) + + return {'ansible_zfs_pools': self.facts} + else: + self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name, + stderr=err, + rc=rc) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=False, aliases=['pool', 'zpool'], type='str'), + parsable=dict(required=False, default=False, type='bool'), + properties=dict(required=False, default='all', type='str'), + ), + supports_check_mode=True + ) + + zpool_facts = ZPoolFacts(module) + + result = {} + result['changed'] = False + result['name'] = zpool_facts.name + + if zpool_facts.parsable: + result['parsable'] = zpool_facts.parsable + + if zpool_facts.name is not None: + if zpool_facts.pool_exists(): + result['ansible_facts'] = zpool_facts.get_facts() + else: + module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name) + else: + result['ansible_facts'] = zpool_facts.get_facts() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/aix_devices.py b/plugins/modules/system/aix_devices.py new file mode 100644 index 0000000000..65a518629e --- /dev/null +++ b/plugins/modules/system/aix_devices.py @@ -0,0 +1,376 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, 2018 Kairo Araujo +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +author: +- Kairo Araujo (@kairoaraujo) +module: aix_devices +short_description: Manages AIX devices +description: +- This module discovers, defines, removes and modifies attributes of AIX devices. +options: + attributes: + description: + - A list of device attributes. + type: dict + device: + description: + - The name of the device. + - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command). + type: str + required: true + force: + description: + - Forces action. + type: bool + default: no + recursive: + description: + - Removes or defines a device and children devices. + type: bool + default: no + state: + description: + - Controls the device state. + - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified). + - C(removed) (alias C(absent) removes a device. + - C(defined) changes device to Defined state. + type: str + choices: [ available, defined, removed ] + default: available +''' + +EXAMPLES = r''' +- name: Scan new devices + aix_devices: + device: all + state: available + +- name: Scan new virtual devices (vio0) + aix_devices: + device: vio0 + state: available + +- name: Removing IP alias to en0 + aix_devices: + device: en0 + attributes: + delalias4: 10.0.0.100,255.255.255.0 + +- name: Removes ent2 + aix_devices: + device: ent2 + state: removed + +- name: Put device en2 in Defined + aix_devices: + device: en2 + state: defined + +- name: Removes ent4 (inexistent). + aix_devices: + device: ent4 + state: removed + +- name: Put device en4 in Defined (inexistent) + aix_devices: + device: en4 + state: defined + +- name: Put vscsi1 and children devices in Defined state. + aix_devices: + device: vscsi1 + recursive: yes + state: defined + +- name: Removes vscsi1 and children devices. + aix_devices: + device: vscsi1 + recursive: yes + state: removed + +- name: Changes en1 mtu to 9000 and disables arp. + aix_devices: + device: en1 + attributes: + mtu: 900 + arp: off + state: available + +- name: Configure IP, netmask and set en1 up. + aix_devices: + device: en1 + attributes: + netaddr: 192.168.0.100 + netmask: 255.255.255.0 + state: up + state: available + +- name: Adding IP alias to en0 + aix_devices: + device: en0 + attributes: + alias4: 10.0.0.100,255.255.255.0 + state: available +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule + + +def _check_device(module, device): + """ + Check if device already exists and the state. + Args: + module: Ansible module. + device: device to be checked. + + Returns: bool, device state + + """ + lsdev_cmd = module.get_bin_path('lsdev', True) + rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device]) + + if rc != 0: + module.fail_json(msg="Failed to run lsdev", rc=rc, err=err) + + if lsdev_out: + device_state = lsdev_out.split()[1] + return True, device_state + + device_state = None + return False, device_state + + +def _check_device_attr(module, device, attr): + """ + + Args: + module: Ansible module. + device: device to check attributes. + attr: attribute to be checked. + + Returns: + + """ + lsattr_cmd = module.get_bin_path('lsattr', True) + rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr]) + + hidden_attrs = ['delalias4', 'delalias6'] + + if rc == 255: + + if attr in hidden_attrs: + current_param = '' + else: + current_param = None + + return current_param + + elif rc != 0: + module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err) + + current_param = lsattr_out.split()[1] + return current_param + + +def discover_device(module, device): + """ Discover AIX devices.""" + cfgmgr_cmd = module.get_bin_path('cfgmgr', True) + + if device is not None: + device = "-l %s" % device + + else: + device = '' + + changed = True + msg = '' + if not module.check_mode: + rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device]) + changed = True + msg = cfgmgr_out + + return changed, msg + + +def change_device_attr(module, attributes, device, force): + """ Change AIX device attribute. """ + + attr_changed = [] + attr_not_changed = [] + attr_invalid = [] + chdev_cmd = module.get_bin_path('chdev', True) + + for attr in list(attributes.keys()): + new_param = attributes[attr] + current_param = _check_device_attr(module, device, attr) + + if current_param is None: + attr_invalid.append(attr) + + elif current_param != new_param: + if force: + cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force] + else: + cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])] + + if not module.check_mode: + rc, chdev_out, err = module.run_command(cmd) + if rc != 0: + module.exit_json(msg="Failed to run chdev.", rc=rc, err=err) + + attr_changed.append(attributes[attr]) + else: + attr_not_changed.append(attributes[attr]) + + if len(attr_changed) > 0: + changed = True + attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed) + else: + changed = False + attr_changed_msg = '' + + if len(attr_not_changed) > 0: + attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed) + else: + attr_not_changed_msg = '' + + if len(attr_invalid) > 0: + attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid) + else: + attr_invalid_msg = '' + + msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg) + + return changed, msg + + +def remove_device(module, device, force, recursive, state): + """ Puts device in defined state or removes device. """ + + state_opt = { + 'removed': '-d', + 'absent': '-d', + 'defined': '' + } + + recursive_opt = { + True: '-R', + False: '' + } + + recursive = recursive_opt[recursive] + state = state_opt[state] + + changed = True + msg = '' + rmdev_cmd = module.get_bin_path('rmdev', True) + + if not module.check_mode: + if state: + rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force]) + else: + rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive]) + + if rc != 0: + module.fail_json(msg="Failed to run rmdev", rc=rc, err=err) + + msg = rmdev_out + + return changed, msg + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + attributes=dict(type='dict'), + device=dict(type='str'), + force=dict(type='bool', default=False), + recursive=dict(type='bool', default=False), + state=dict(type='str', default='available', choices=['available', 'defined', 'removed']), + ), + supports_check_mode=True, + ) + + force_opt = { + True: '-f', + False: '', + } + + attributes = module.params['attributes'] + device = module.params['device'] + force = force_opt[module.params['force']] + recursive = module.params['recursive'] + state = module.params['state'] + + result = dict( + changed=False, + msg='', + ) + + if state == 'available' or state == 'present': + if attributes: + # change attributes on device + device_status, device_state = _check_device(module, device) + if device_status: + result['changed'], result['msg'] = change_device_attr(module, attributes, device, force) + else: + result['msg'] = "Device %s does not exist." % device + + else: + # discovery devices (cfgmgr) + if device and device != 'all': + device_status, device_state = _check_device(module, device) + if device_status: + # run cfgmgr on specific device + result['changed'], result['msg'] = discover_device(module, device) + + else: + result['msg'] = "Device %s does not exist." % device + + else: + result['changed'], result['msg'] = discover_device(module, device) + + elif state == 'removed' or state == 'absent' or state == 'defined': + if not device: + result['msg'] = "device is required to removed or defined state." + + else: + # Remove device + check_device, device_state = _check_device(module, device) + if check_device: + if state == 'defined' and device_state == 'Defined': + result['changed'] = False + result['msg'] = 'Device %s already in Defined' % device + + else: + result['changed'], result['msg'] = remove_device(module, device, force, recursive, state) + + else: + result['msg'] = "Device %s does not exist." % device + + else: + result['msg'] = "Unexpected state %s." % state + module.fail_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/aix_filesystem.py b/plugins/modules/system/aix_filesystem.py new file mode 100644 index 0000000000..f32297ad65 --- /dev/null +++ b/plugins/modules/system/aix_filesystem.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +author: + - Kairo Araujo (@kairoaraujo) +module: aix_filesystem +short_description: Configure LVM and NFS file systems for AIX +description: + - This module creates, removes, mount and unmount LVM and NFS file system for + AIX using C(/etc/filesystems). + - For LVM file systems is possible to resize a file system. +options: + account_subsystem: + description: + - Specifies whether the file system is to be processed by the accounting subsystem. + type: bool + default: no + attributes: + description: + - Specifies attributes for files system separated by comma. + type: list + default: agblksize='4096',isnapshot='no' + auto_mount: + description: + - File system is automatically mounted at system restart. + type: bool + default: yes + device: + description: + - Logical volume (LV) device name or remote export device to create a NFS file system. + - It is used to create a file system on an already existing logical volume or the exported NFS file system. + - If not mentioned a new logical volume name will be created following AIX standards (LVM). + type: str + fs_type: + description: + - Specifies the virtual file system type. + type: str + default: jfs2 + permissions: + description: + - Set file system permissions. C(rw) (read-write) or C(ro) (read-only). + type: str + choices: [ ro, rw ] + default: rw + mount_group: + description: + - Specifies the mount group. + type: str + filesystem: + description: + - Specifies the mount point, which is the directory where the file system will be mounted. + type: str + required: true + nfs_server: + description: + - Specifies a Network File System (NFS) server. + type: str + rm_mount_point: + description: + - Removes the mount point directory when used with state C(absent). + type: bool + default: no + size: + description: + - Specifies the file system size. + - For already C(present) it will be resized. + - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified + it will be in Megabytes. If the value has G specified it will be in + Gigabytes. + - If no M or G the value will be 512-byte blocks. + - If "+" is specified in begin of value, the value will be added. + - If "-" is specified in begin of value, the value will be removed. + - If "+" or "-" is not specified, the total value will be the specified. + - Size will respects the LVM AIX standards. + type: str + state: + description: + - Controls the file system state. + - C(present) check if file system exists, creates or resize. + - C(absent) removes existing file system if already C(unmounted). + - C(mounted) checks if the file system is mounted or mount the file system. + - C(unmounted) check if the file system is unmounted or unmount the file system. + type: str + required: true + choices: [ absent, mounted, present, unmounted ] + default: present + vg: + description: + - Specifies an existing volume group (VG). + type: str +notes: + - For more C(attributes), please check "crfs" AIX manual. +''' + +EXAMPLES = r''' +- name: Create filesystem in a previously defined logical volume. + aix_filesystem: + device: testlv + filesystem: /testfs + state: present + +- name: Creating NFS filesystem from nfshost. + aix_filesystem: + device: /home/ftp + nfs_server: nfshost + filesystem: /home/ftp + state: present + +- name: Creating a new file system without a previously logical volume. + aix_filesystem: + filesystem: /newfs + size: 1G + state: present + vg: datavg + +- name: Unmounting /testfs. + aix_filesystem: + filesystem: /testfs + state: unmounted + +- name: Resizing /mksysb to +512M. + aix_filesystem: + filesystem: /mksysb + size: +512M + state: present + +- name: Resizing /mksysb to 11G. + aix_filesystem: + filesystem: /mksysb + size: 11G + state: present + +- name: Resizing /mksysb to -2G. + aix_filesystem: + filesystem: /mksysb + size: -2G + state: present + +- name: Remove NFS filesystem /home/ftp. + aix_filesystem: + filesystem: /home/ftp + rm_mount_point: yes + state: absent + +- name: Remove /newfs. + aix_filesystem: + filesystem: /newfs + rm_mount_point: yes + state: absent +''' + +RETURN = r''' +changed: + description: Return changed for aix_filesystems actions as true or false. + returned: always + type: bool +msg: + description: Return message regarding the action. + returned: always + type: str +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.posix.plugins.module_utils.ismount import ismount +import re + + +def _fs_exists(module, filesystem): + """ + Check if file system already exists on /etc/filesystems. + + :param module: Ansible module. + :param filesystem: filesystem name. + :return: True or False. + """ + lsfs_cmd = module.get_bin_path('lsfs', True) + rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem)) + if rc == 1: + if re.findall("No record matching", err): + return False + + else: + module.fail_json(msg="Failed to run lsfs. Error message: %s" % err) + + else: + + return True + + +def _check_nfs_device(module, nfs_host, device): + """ + Validate if NFS server is exporting the device (remote export). + + :param module: Ansible module. + :param nfs_host: nfs_host parameter, NFS server. + :param device: device parameter, remote export. + :return: True or False. + """ + showmount_cmd = module.get_bin_path('showmount', True) + rc, showmount_out, err = module.run_command( + "%s -a %s" % (showmount_cmd, nfs_host)) + if rc != 0: + module.fail_json(msg="Failed to run showmount. Error message: %s" % err) + else: + showmount_data = showmount_out.splitlines() + for line in showmount_data: + if line.split(':')[1] == device: + return True + + return False + + +def _validate_vg(module, vg): + """ + Check the current state of volume group. + + :param module: Ansible module argument spec. + :param vg: Volume Group name. + :return: True (VG in varyon state) or False (VG in varyoff state) or + None (VG does not exist), message. + """ + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd) + if rc != 0: + module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + + rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd) + if rc != 0: + module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + + if vg in current_all_vgs and vg not in current_active_vgs: + msg = "Volume group %s is in varyoff state." % vg + return False, msg + elif vg in current_active_vgs: + msg = "Volume group %s is in varyon state." % vg + return True, msg + else: + msg = "Volume group %s does not exist." % vg + return None, msg + + +def resize_fs(module, filesystem, size): + """ Resize LVM file system. """ + + chfs_cmd = module.get_bin_path('chfs', True) + if not module.check_mode: + rc, chfs_out, err = module.run_command('%s -a size="%s" %s' % (chfs_cmd, size, filesystem)) + + if rc == 28: + changed = False + return changed, chfs_out + elif rc != 0: + if re.findall('Maximum allocation for logical', err): + changed = False + return changed, err + else: + module.fail_json(msg="Failed to run chfs. Error message: %s" % err) + + else: + if re.findall('The filesystem size is already', chfs_out): + changed = False + else: + changed = True + + return changed, chfs_out + else: + changed = True + msg = '' + + return changed, msg + + +def create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, + account_subsystem, permissions, nfs_server, attributes): + """ Create LVM file system or NFS remote mount point. """ + + attributes = ' -a '.join(attributes) + + # Parameters definition. + account_subsys_opt = { + True: '-t yes', + False: '-t no' + } + + if nfs_server is not None: + auto_mount_opt = { + True: '-A', + False: '-a' + } + + else: + auto_mount_opt = { + True: '-A yes', + False: '-A no' + } + + if size is None: + size = '' + else: + size = "-a size=%s" % size + + if device is None: + device = '' + else: + device = "-d %s" % device + + if vg is None: + vg = '' + else: + vg_state, msg = _validate_vg(module, vg) + if vg_state: + vg = "-g %s" % vg + else: + changed = False + + return changed, msg + + if mount_group is None: + mount_group = '' + + else: + mount_group = "-u %s" % mount_group + + auto_mount = auto_mount_opt[auto_mount] + account_subsystem = account_subsys_opt[account_subsystem] + + if nfs_server is not None: + # Creates a NFS file system. + mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True) + if not module.check_mode: + rc, mknfsmnt_out, err = module.run_command('%s -f "%s" %s -h "%s" -t "%s" "%s" -w "bg"' % ( + mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount)) + if rc != 0: + module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err) + else: + changed = True + msg = "NFS file system %s created." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + else: + # Creates a LVM file system. + crfs_cmd = module.get_bin_path('crfs', True) + if not module.check_mode: + cmd = "%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s" % ( + crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes) + rc, crfs_out, err = module.run_command(cmd) + + if rc == 10: + module.exit_json( + msg="Using a existent previously defined logical volume, " + "volume group needs to be empty. %s" % err) + + elif rc != 0: + module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + + else: + changed = True + return changed, crfs_out + else: + changed = True + msg = '' + + return changed, msg + + +def remove_fs(module, filesystem, rm_mount_point): + """ Remove an LVM file system or NFS entry. """ + + # Command parameters. + rm_mount_point_opt = { + True: '-r', + False: '' + } + + rm_mount_point = rm_mount_point_opt[rm_mount_point] + + rmfs_cmd = module.get_bin_path('rmfs', True) + if not module.check_mode: + cmd = "%s -r %s %s" % (rmfs_cmd, rm_mount_point, filesystem) + rc, rmfs_out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + else: + changed = True + msg = rmfs_out + if not rmfs_out: + msg = "File system %s removed." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def mount_fs(module, filesystem): + """ Mount a file system. """ + mount_cmd = module.get_bin_path('mount', True) + + if not module.check_mode: + rc, mount_out, err = module.run_command( + "%s %s" % (mount_cmd, filesystem)) + if rc != 0: + module.fail_json(msg="Failed to run mount. Error message: %s" % err) + else: + changed = True + msg = "File system %s mounted." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def unmount_fs(module, filesystem): + """ Unmount a file system.""" + unmount_cmd = module.get_bin_path('unmount', True) + + if not module.check_mode: + rc, unmount_out, err = module.run_command("%s %s" % (unmount_cmd, filesystem)) + if rc != 0: + module.fail_json(msg="Failed to run unmount. Error message: %s" % err) + else: + changed = True + msg = "File system %s unmounted." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + account_subsystem=dict(type='bool', default=False), + attributes=dict(type='list', default=["agblksize='4096'", "isnapshot='no'"]), + auto_mount=dict(type='bool', default=True), + device=dict(type='str'), + filesystem=dict(type='str', required=True), + fs_type=dict(type='str', default='jfs2'), + permissions=dict(type='str', default='rw', choices=['rw', 'ro']), + mount_group=dict(type='str'), + nfs_server=dict(type='str'), + rm_mount_point=dict(type='bool', default=False), + size=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']), + vg=dict(type='str'), + ), + supports_check_mode=True, + ) + + account_subsystem = module.params['account_subsystem'] + attributes = module.params['attributes'] + auto_mount = module.params['auto_mount'] + device = module.params['device'] + fs_type = module.params['fs_type'] + permissions = module.params['permissions'] + mount_group = module.params['mount_group'] + filesystem = module.params['filesystem'] + nfs_server = module.params['nfs_server'] + rm_mount_point = module.params['rm_mount_point'] + size = module.params['size'] + state = module.params['state'] + vg = module.params['vg'] + + result = dict( + changed=False, + msg='', + ) + + if state == 'present': + fs_mounted = ismount(filesystem) + fs_exists = _fs_exists(module, filesystem) + + # Check if fs is mounted or exists. + if fs_mounted or fs_exists: + result['msg'] = "File system %s already exists." % filesystem + result['changed'] = False + + # If parameter size was passed, resize fs. + if size is not None: + result['changed'], result['msg'] = resize_fs(module, filesystem, size) + + # If fs doesn't exist, create it. + else: + # Check if fs will be a NFS device. + if nfs_server is not None: + if device is None: + result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.' + module.fail_json(**result) + else: + # Create a fs from NFS export. + if _check_nfs_device(module, nfs_server, device): + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + if device is None: + if vg is None: + result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.' + module.fail_json(**result) + else: + # Create a fs from + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + if device is not None and nfs_server is None: + # Create a fs from a previously lv device. + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + elif state == 'absent': + if ismount(filesystem): + result['msg'] = "File system %s mounted." % filesystem + + else: + fs_status = _fs_exists(module, filesystem) + if not fs_status: + result['msg'] = "File system %s does not exist." % filesystem + else: + result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point) + + elif state == 'mounted': + if ismount(filesystem): + result['changed'] = False + result['msg'] = "File system %s already mounted." % filesystem + else: + result['changed'], result['msg'] = mount_fs(module, filesystem) + + elif state == 'unmounted': + if not ismount(filesystem): + result['changed'] = False + result['msg'] = "File system %s already unmounted." % filesystem + else: + result['changed'], result['msg'] = unmount_fs(module, filesystem) + + else: + # Unreachable codeblock + result['msg'] = "Unexpected state %s." % state + module.fail_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/aix_inittab.py b/plugins/modules/system/aix_inittab.py new file mode 100644 index 0000000000..1bce604eae --- /dev/null +++ b/plugins/modules/system/aix_inittab.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Joris Weijters +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +author: +- Joris Weijters (@molekuul) +module: aix_inittab +short_description: Manages the inittab on AIX +description: + - Manages the inittab on AIX. +options: + name: + description: + - Name of the inittab entry. + type: str + required: yes + aliases: [ service ] + runlevel: + description: + - Runlevel of the entry. + type: str + required: yes + action: + description: + - Action what the init has to do with this entry. + type: str + required: yes + choices: + - boot + - bootwait + - hold + - initdefault + - 'off' + - once + - ondemand + - powerfail + - powerwait + - respawn + - sysinit + - wait + command: + description: + - What command has to run. + type: str + required: yes + insertafter: + description: + - After which inittabline should the new entry inserted. + type: str + state: + description: + - Whether the entry should be present or absent in the inittab file. + type: str + choices: [ absent, present ] + default: present +notes: + - The changes are persistent across reboots. + - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands. + - Tested on AIX 7.1. +requirements: +- itertools +''' + +EXAMPLES = ''' +# Add service startmyservice to the inittab, directly after service existingservice. +- name: Add startmyservice to inittab + aix_inittab: + name: startmyservice + runlevel: 4 + action: once + command: echo hello + insertafter: existingservice + state: present + become: yes + +# Change inittab entry startmyservice to runlevel "2" and processaction "wait". +- name: Change startmyservice to inittab + aix_inittab: + name: startmyservice + runlevel: 2 + action: wait + command: echo hello + state: present + become: yes + +- name: Remove startmyservice from inittab + aix_inittab: + name: startmyservice + runlevel: 2 + action: wait + command: echo hello + state: absent + become: yes +''' + +RETURN = ''' +name: + description: Name of the adjusted inittab entry + returned: always + type: str + sample: startmyservice +msg: + description: Action done with the inittab entry + returned: changed + type: str + sample: changed inittab entry startmyservice +changed: + description: Whether the inittab changed or not + returned: always + type: bool + sample: true +''' + +# Import necessary libraries +try: + # python 2 + from itertools import izip +except ImportError: + izip = zip + +from ansible.module_utils.basic import AnsibleModule + +# end import modules +# start defining the functions + + +def check_current_entry(module): + # Check if entry exists, if not return False in exists in return dict, + # if true return True and the entry in return dict + existsdict = {'exist': False} + lsitab = module.get_bin_path('lsitab') + (rc, out, err) = module.run_command([lsitab, module.params['name']]) + if rc == 0: + keys = ('name', 'runlevel', 'action', 'command') + values = out.split(":") + # strip non readable characters as \n + values = map(lambda s: s.strip(), values) + existsdict = dict(izip(keys, values)) + existsdict.update({'exist': True}) + return existsdict + + +def main(): + # initialize + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['service']), + runlevel=dict(type='str', required=True), + action=dict(type='str', choices=[ + 'boot', + 'bootwait', + 'hold', + 'initdefault', + 'off', + 'once', + 'ondemand', + 'powerfail', + 'powerwait', + 'respawn', + 'sysinit', + 'wait', + ]), + command=dict(type='str', required=True), + insertafter=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + result = { + 'name': module.params['name'], + 'changed': False, + 'msg': "" + } + + # Find commandline strings + mkitab = module.get_bin_path('mkitab') + rmitab = module.get_bin_path('rmitab') + chitab = module.get_bin_path('chitab') + rc = 0 + + # check if the new entry exists + current_entry = check_current_entry(module) + + # if action is install or change, + if module.params['state'] == 'present': + + # create new entry string + new_entry = module.params['name'] + ":" + module.params['runlevel'] + \ + ":" + module.params['action'] + ":" + module.params['command'] + + # If current entry exists or fields are different(if the entry does not + # exists, then the entry wil be created + if (not current_entry['exist']) or ( + module.params['runlevel'] != current_entry['runlevel'] or + module.params['action'] != current_entry['action'] or + module.params['command'] != current_entry['command']): + + # If the entry does exist then change the entry + if current_entry['exist']: + if not module.check_mode: + (rc, out, err) = module.run_command([chitab, new_entry]) + if rc != 0: + module.fail_json( + msg="could not change inittab", rc=rc, err=err) + result['msg'] = "changed inittab entry" + " " + current_entry['name'] + result['changed'] = True + + # If the entry does not exist create the entry + elif not current_entry['exist']: + if module.params['insertafter']: + if not module.check_mode: + (rc, out, err) = module.run_command( + [mkitab, '-i', module.params['insertafter'], new_entry]) + else: + if not module.check_mode: + (rc, out, err) = module.run_command( + [mkitab, new_entry]) + + if rc != 0: + module.fail_json(msg="could not adjust inittab", rc=rc, err=err) + result['msg'] = "add inittab entry" + " " + module.params['name'] + result['changed'] = True + + elif module.params['state'] == 'absent': + # If the action is remove and the entry exists then remove the entry + if current_entry['exist']: + if not module.check_mode: + (rc, out, err) = module.run_command( + [rmitab, module.params['name']]) + if rc != 0: + module.fail_json( + msg="could not remove entry grom inittab)", rc=rc, err=err) + result['msg'] = "removed inittab entry" + " " + current_entry['name'] + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/aix_lvg.py b/plugins/modules/system/aix_lvg.py new file mode 100644 index 0000000000..314eb31ceb --- /dev/null +++ b/plugins/modules/system/aix_lvg.py @@ -0,0 +1,368 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +author: +- Kairo Araujo (@kairoaraujo) +module: aix_lvg +short_description: Manage LVM volume groups on AIX +description: +- This module creates, removes or resize volume groups on AIX LVM. +options: + force: + description: + - Force volume group creation. + type: bool + default: no + pp_size: + description: + - The size of the physical partition in megabytes. + type: int + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or extending (C(present) state) the volume group. + - If not informed reducing (C(absent) state) the volume group will be removed. + type: list + state: + description: + - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff). + type: str + choices: [ absent, present, varyoff, varyon ] + default: present + vg: + description: + - The name of the volume group. + type: str + required: true + vg_type: + description: + - The type of the volume group. + type: str + choices: [ big, normal, scalable ] + default: normal +notes: +- AIX will permit remove VG only if all LV/Filesystems are not busy. +- Module does not modify PP size for already present volume group. +''' + +EXAMPLES = r''' +- name: Create a volume group datavg + aix_lvg: + vg: datavg + pp_size: 128 + vg_type: scalable + state: present + +- name: Removing a volume group datavg + aix_lvg: + vg: datavg + state: absent + +- name: Extending rootvg + aix_lvg: + vg: rootvg + pvs: hdisk1 + state: present + +- name: Reducing rootvg + aix_lvg: + vg: rootvg + pvs: hdisk1 + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule + + +def _validate_pv(module, vg, pvs): + """ + Function to validate if the physical volume (PV) is not already in use by + another volume group or Oracle ASM. + + :param module: Ansible module argument spec. + :param vg: Volume group name. + :param pvs: Physical volume list. + :return: [bool, message] or module.fail_json for errors. + """ + + lspv_cmd = module.get_bin_path('lspv', True) + rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd) + if rc != 0: + module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr) + + for pv in pvs: + # Get pv list. + lspv_list = {} + for line in current_lspv.splitlines(): + pv_data = line.split() + lspv_list[pv_data[0]] = pv_data[2] + + # Check if pv exists and is free. + if pv not in lspv_list.keys(): + module.fail_json(msg="Physical volume '%s' doesn't exist." % pv) + + if lspv_list[pv] == 'None': + # Disk None, looks free. + # Check if PV is not already in use by Oracle ASM. + lquerypv_cmd = module.get_bin_path('lquerypv', True) + rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv)) + if rc != 0: + module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr) + + if 'ORCLDISK' in current_lquerypv: + module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv) + + msg = "Physical volume '%s' is ok to be used." % pv + return True, msg + + # Check if PV is already in use for the same vg. + elif vg != lspv_list[pv]: + module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv])) + + msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv]) + return False, msg + + +def _validate_vg(module, vg): + """ + Check the current state of volume group. + + :param module: Ansible module argument spec. + :param vg: Volume Group name. + :return: True (VG in varyon state) or False (VG in varyoff state) or + None (VG does not exist), message. + """ + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd) + if rc != 0: + module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + + rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd) + if rc != 0: + module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + + if vg in current_all_vgs and vg not in current_active_vgs: + msg = "Volume group '%s' is in varyoff state." % vg + return False, msg + + if vg in current_active_vgs: + msg = "Volume group '%s' is in varyon state." % vg + return True, msg + + msg = "Volume group '%s' does not exist." % vg + return None, msg + + +def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): + """ Creates or extend a volume group. """ + + # Command option parameters. + force_opt = { + True: '-f', + False: '' + } + + vg_opt = { + 'normal': '', + 'big': '-B', + 'scalable': '-S', + } + + # Validate if PV are not already in use. + pv_state, msg = _validate_pv(module, vg, pvs) + if not pv_state: + changed = False + return changed, msg + + vg_state, msg = vg_validation + if vg_state is False: + changed = False + return changed, msg + + elif vg_state is True: + # Volume group extension. + changed = True + msg = "" + + if not module.check_mode: + extendvg_cmd = module.get_bin_path('extendvg', True) + rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs))) + if rc != 0: + changed = False + msg = "Extending volume group '%s' has failed." % vg + return changed, msg + + msg = "Volume group '%s' extended." % vg + return changed, msg + + elif vg_state is None: + # Volume group creation. + changed = True + msg = '' + + if not module.check_mode: + mkvg_cmd = module.get_bin_path('mkvg', True) + rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs))) + if rc != 0: + changed = False + msg = "Creating volume group '%s' failed." % vg + return changed, msg + + msg = "Volume group '%s' created." % vg + return changed, msg + + +def reduce_vg(module, vg, pvs, vg_validation): + vg_state, msg = vg_validation + + if vg_state is False: + changed = False + return changed, msg + + elif vg_state is None: + changed = False + return changed, msg + + # Define pvs_to_remove (list of physical volumes to be removed). + if pvs is None: + # Remove VG if pvs are note informed. + # Remark: AIX will permit remove only if the VG has not LVs. + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg)) + if rc != 0: + module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd) + + pvs_to_remove = [] + for line in current_pvs.splitlines()[2:]: + pvs_to_remove.append(line.split()[0]) + + reduce_msg = "Volume group '%s' removed." % vg + else: + pvs_to_remove = pvs + reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg)) + + # Reduce volume group. + if len(pvs_to_remove) <= 0: + changed = False + msg = "No physical volumes to remove." + return changed, msg + + changed = True + msg = '' + + if not module.check_mode: + reducevg_cmd = module.get_bin_path('reducevg', True) + rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove))) + if rc != 0: + module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr) + + msg = reduce_msg + return changed, msg + + +def state_vg(module, vg, state, vg_validation): + vg_state, msg = vg_validation + + if vg_state is None: + module.fail_json(msg=msg) + + if state == 'varyon': + if vg_state is True: + changed = False + return changed, msg + + changed = True + msg = '' + if not module.check_mode: + varyonvg_cmd = module.get_bin_path('varyonvg', True) + rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg)) + if rc != 0: + module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err) + + msg = "Varyon volume group %s completed." % vg + return changed, msg + + elif state == 'varyoff': + if vg_state is False: + changed = False + return changed, msg + + changed = True + msg = '' + + if not module.check_mode: + varyonvg_cmd = module.get_bin_path('varyoffvg', True) + rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg)) + if rc != 0: + module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr) + + msg = "Varyoff volume group %s completed." % vg + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + force=dict(type='bool', default=False), + pp_size=dict(type='int'), + pvs=dict(type='list'), + state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']), + vg=dict(type='str', required=True), + vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable']) + ), + supports_check_mode=True, + ) + + force = module.params['force'] + pp_size = module.params['pp_size'] + pvs = module.params['pvs'] + state = module.params['state'] + vg = module.params['vg'] + vg_type = module.params['vg_type'] + + if pp_size is None: + pp_size = '' + else: + pp_size = "-s %s" % pp_size + + vg_validation = _validate_vg(module, vg) + + if state == 'present': + if not pvs: + changed = False + msg = "pvs is required to state 'present'." + module.fail_json(msg=msg) + else: + changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation) + + elif state == 'absent': + changed, msg = reduce_vg(module, vg, pvs, vg_validation) + + elif state == 'varyon' or state == 'varyoff': + changed, msg = state_vg(module, vg, state, vg_validation) + + else: + changed = False + msg = "Unexpected state" + + module.exit_json(changed=changed, msg=msg, state=state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/aix_lvol.py b/plugins/modules/system/aix_lvol.py new file mode 100644 index 0000000000..48261cee96 --- /dev/null +++ b/plugins/modules/system/aix_lvol.py @@ -0,0 +1,340 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Alain Dejoux +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +author: + - Alain Dejoux (@adejoux) +module: aix_lvol +short_description: Configure AIX LVM logical volumes +description: + - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module. +options: + vg: + description: + - The volume group this logical volume is part of. + type: str + required: true + lv: + description: + - The name of the logical volume. + type: str + required: true + lv_type: + description: + - The type of the logical volume. + type: str + default: jfs2 + size: + description: + - The size of the logical volume with one of the [MGT] units. + type: str + copies: + description: + - The number of copies of the logical volume. + - Maximum copies are 3. + type: int + default: 1 + policy: + description: + - Sets the interphysical volume allocation policy. + - C(maximum) allocates logical partitions across the maximum number of physical volumes. + - C(minimum) allocates logical partitions across the minimum number of physical volumes. + type: str + choices: [ maximum, minimum ] + default: maximum + state: + description: + - Control if the logical volume exists. If C(present) and the + volume does not already exist then the C(size) option is required. + type: str + choices: [ absent, present ] + default: present + opts: + description: + - Free-form options to be passed to the mklv command. + type: str + pvs: + description: + - A list of physical volumes e.g. C(hdisk1,hdisk2). + type: list +''' + +EXAMPLES = r''' +- name: Create a logical volume of 512M + aix_lvol: + vg: testvg + lv: testlv + size: 512M + +- name: Create a logical volume of 512M with disks hdisk1 and hdisk2 + aix_lvol: + vg: testvg + lv: test2lv + size: 512M + pvs: [ hdisk1, hdisk2 ] + +- name: Create a logical volume of 512M mirrored + aix_lvol: + vg: testvg + lv: test3lv + size: 512M + copies: 2 + +- name: Create a logical volume of 1G with a minimum placement policy + aix_lvol: + vg: rootvg + lv: test4lv + size: 1G + policy: minimum + +- name: Create a logical volume with special options like mirror pool + aix_lvol: + vg: testvg + lv: testlv + size: 512M + opts: -p copy1=poolA -p copy2=poolB + +- name: Extend the logical volume to 1200M + aix_lvol: + vg: testvg + lv: test4lv + size: 1200M + +- name: Remove the logical volume + aix_lvol: + vg: testvg + lv: testlv + state: absent +''' + +RETURN = r''' +msg: + type: str + description: A friendly message describing the task result. + returned: always + sample: Logical volume testlv created. +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def convert_size(module, size): + unit = size[-1].upper() + units = ['M', 'G', 'T'] + try: + multiplier = 1024 ** units.index(unit) + except ValueError: + module.fail_json(msg="No valid size unit specified.") + + return int(size[:-1]) * multiplier + + +def round_ppsize(x, base=16): + new_size = int(base * round(float(x) / base)) + if new_size < x: + new_size += base + return new_size + + +def parse_lv(data): + name = None + + for line in data.splitlines(): + match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line) + if match is not None: + name = match.group(1) + vg = match.group(2) + continue + match = re.search(r"LPs:\s+(\d+).*PPs", line) + if match is not None: + lps = int(match.group(1)) + continue + match = re.search(r"PP SIZE:\s+(\d+)", line) + if match is not None: + pp_size = int(match.group(1)) + continue + match = re.search(r"INTER-POLICY:\s+(\w+)", line) + if match is not None: + policy = match.group(1) + continue + + if not name: + return None + + size = lps * pp_size + + return {'name': name, 'vg': vg, 'size': size, 'policy': policy} + + +def parse_vg(data): + + for line in data.splitlines(): + + match = re.search(r"VOLUME GROUP:\s+(\w+)", line) + if match is not None: + name = match.group(1) + continue + + match = re.search(r"TOTAL PP.*\((\d+)", line) + if match is not None: + size = int(match.group(1)) + continue + + match = re.search(r"PP SIZE:\s+(\d+)", line) + if match is not None: + pp_size = int(match.group(1)) + continue + + match = re.search(r"FREE PP.*\((\d+)", line) + if match is not None: + free = int(match.group(1)) + continue + + return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + lv=dict(type='str', required=True), + lv_type=dict(type='str', default='jfs2'), + size=dict(type='str'), + opts=dict(type='str', default=''), + copies=dict(type='int', default=1), + state=dict(type='str', default='present', choices=['absent', 'present']), + policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']), + pvs=dict(type='list', default=list()) + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + lv = module.params['lv'] + lv_type = module.params['lv_type'] + size = module.params['size'] + opts = module.params['opts'] + copies = module.params['copies'] + policy = module.params['policy'] + state = module.params['state'] + pvs = module.params['pvs'] + + pv_list = ' '.join(pvs) + + if policy == 'maximum': + lv_policy = 'x' + else: + lv_policy = 'm' + + # Add echo command when running in check-mode + if module.check_mode: + test_opt = 'echo ' + else: + test_opt = '' + + # check if system commands are available + lsvg_cmd = module.get_bin_path("lsvg", required=True) + lslv_cmd = module.get_bin_path("lslv", required=True) + + # Get information on volume group requested + rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) + + this_vg = parse_vg(vg_info) + + if size is not None: + # Calculate pp size and round it up based on pp size. + lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) + + # Get information on logical volume requested + rc, lv_info, err = module.run_command( + "%s %s" % (lslv_cmd, lv)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) + + changed = False + + this_lv = parse_lv(lv_info) + + if state == 'present' and not size: + if this_lv is None: + module.fail_json(msg="No size given.") + + if this_lv is None: + if state == 'present': + if lv_size > this_vg['free']: + module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) + + # create LV + mklv_cmd = module.get_bin_path("mklv", required=True) + + cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) + rc, out, err = module.run_command(cmd) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s created." % lv) + else: + module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) + else: + if state == 'absent': + # remove LV + rmlv_cmd = module.get_bin_path("rmlv", required=True) + rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) + else: + module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) + else: + if this_lv['policy'] != policy: + # change lv allocation policy + chlv_cmd = module.get_bin_path("chlv", required=True) + rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) + else: + module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) + + if vg != this_lv['vg']: + module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) + + # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. + if not size: + module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) + + # resize LV based on absolute values + if int(lv_size) > this_lv['size']: + extendlv_cmd = module.get_bin_path("extendlv", required=True) + cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) + rc, out, err = module.run_command(cmd) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) + else: + module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) + elif lv_size < this_lv['size']: + module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) + else: + module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/alternatives.py b/plugins/modules/system/alternatives.py new file mode 100644 index 0000000000..1ce88f5540 --- /dev/null +++ b/plugins/modules/system/alternatives.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Gabe Mulley +# Copyright: (c) 2015, David Wittman +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: alternatives +short_description: Manages alternative programs for common commands +description: + - Manages symbolic links using the 'update-alternatives' tool. + - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). +author: + - David Wittman (@DavidWittman) + - Gabe Mulley (@mulby) +options: + name: + description: + - The generic name of the link. + type: str + required: true + path: + description: + - The path to the real executable that the link should point to. + type: path + required: true + link: + description: + - The path to the symbolic link that should point to the real executable. + - This option is always required on RHEL-based distributions. On Debian-based distributions this option is + required when the alternative I(name) is unknown to the system. + type: path + priority: + description: + - The priority of the alternative. + type: int + default: 50 +requirements: [ update-alternatives ] +''' + +EXAMPLES = r''' +- name: Correct java version selected + alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + +- name: Alternatives link created + alternatives: + name: hadoop-conf + link: /etc/hadoop/conf + path: /etc/hadoop/conf.ansible + +- name: Make java 32 bit an alternative with low priority + alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java + priority: -10 +''' + +import os +import re +import subprocess + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + path=dict(type='path', required=True), + link=dict(type='path'), + priority=dict(type='int', default=50), + ), + supports_check_mode=True, + ) + + params = module.params + name = params['name'] + path = params['path'] + link = params['link'] + priority = params['priority'] + + UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True) + + current_path = None + all_alternatives = [] + + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, _) = module.run_command( + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] + ) + + if rc == 0: + # Alternatives already exist for this link group + # Parse the output to determine the current path of the symlink and + # available alternatives + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', + re.MULTILINE) + alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE) + + match = current_path_regex.search(display_output) + if match: + current_path = match.group(1) + all_alternatives = alternative_regex.findall(display_output) + + if not link: + # Read the current symlink target from `update-alternatives --query` + # in case we need to install the new alternative before setting it. + # + # This is only compatible on Debian-based systems, as the other + # alternatives don't have --query available + rc, query_output, _ = module.run_command( + ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] + ) + if rc == 0: + for line in query_output.splitlines(): + if line.startswith('Link:'): + link = line.split()[1] + break + + if current_path != path: + if module.check_mode: + module.exit_json(changed=True, current_path=current_path) + try: + # install the requested path if necessary + if path not in all_alternatives: + if not os.path.exists(path): + module.fail_json(msg="Specified path %s does not exist" % path) + if not link: + module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") + + module.run_command( + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)], + check_rc=True + ) + + # select the requested path + module.run_command( + [UPDATE_ALTERNATIVES, '--set', name, path], + check_rc=True + ) + + module.exit_json(changed=True) + except subprocess.CalledProcessError as cpe: + module.fail_json(msg=str(dir(cpe))) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/awall.py b/plugins/modules/system/awall.py new file mode 100644 index 0000000000..e5479769a8 --- /dev/null +++ b/plugins/modules/system/awall.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ted Trask +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: awall +short_description: Manage awall policies +author: Ted Trask (@tdtrask) +description: + - This modules allows for enable/disable/activate of I(awall) policies. + - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files + and activates the configuration on the system. +options: + name: + description: + - One or more policy names. + type: list + state: + description: + - Whether the policies should be enabled or disabled. + type: str + choices: [ disabled, enabled ] + default: enabled + activate: + description: + - Activate the new firewall rules. + - Can be run with other steps or on its own. + type: bool + default: no +''' + +EXAMPLES = r''' +- name: Enable "foo" and "bar" policy + awall: + name: [ foo bar ] + state: enabled + +- name: Disable "foo" and "bar" policy and activate new rules + awall: + name: + - foo + - bar + state: disabled + activate: no + +- name: Activate currently enabled firewall rules + awall: + activate: yes +''' + +RETURN = ''' # ''' + +import re +from ansible.module_utils.basic import AnsibleModule + + +def activate(module): + cmd = "%s activate --force" % (AWALL_PATH) + rc, stdout, stderr = module.run_command(cmd) + if rc == 0: + return True + else: + module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr) + + +def is_policy_enabled(module, name): + cmd = "%s list" % (AWALL_PATH) + rc, stdout, stderr = module.run_command(cmd) + if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE): + return True + return False + + +def enable_policy(module, names, act): + policies = [] + for name in names: + if not is_policy_enabled(module, name): + policies.append(name) + if not policies: + module.exit_json(changed=False, msg="policy(ies) already enabled") + names = " ".join(policies) + if module.check_mode: + cmd = "%s list" % (AWALL_PATH) + else: + cmd = "%s enable %s" % (AWALL_PATH, names) + rc, stdout, stderr = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr) + if act and not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names) + + +def disable_policy(module, names, act): + policies = [] + for name in names: + if is_policy_enabled(module, name): + policies.append(name) + if not policies: + module.exit_json(changed=False, msg="policy(ies) already disabled") + names = " ".join(policies) + if module.check_mode: + cmd = "%s list" % (AWALL_PATH) + else: + cmd = "%s disable %s" % (AWALL_PATH, names) + rc, stdout, stderr = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr) + if act and not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='enabled', choices=['disabled', 'enabled']), + name=dict(type='list'), + activate=dict(type='bool', default=False), + ), + required_one_of=[['name', 'activate']], + supports_check_mode=True, + ) + + global AWALL_PATH + AWALL_PATH = module.get_bin_path('awall', required=True) + + p = module.params + + if p['name']: + if p['state'] == 'enabled': + enable_policy(module, p['name'], p['activate']) + elif p['state'] == 'disabled': + disable_policy(module, p['name'], p['activate']) + + if p['activate']: + if not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="activated awall rules") + + module.fail_json(msg="no action defined") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/beadm.py b/plugins/modules/system/beadm.py new file mode 100644 index 0000000000..c089482fb3 --- /dev/null +++ b/plugins/modules/system/beadm.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: beadm +short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems. +description: + - Create, delete or activate ZFS boot environments. + - Mount and unmount ZFS boot environments. +author: Adam Števko (@xen0l) +options: + name: + description: + - ZFS boot environment name. + type: str + required: True + aliases: [ "be" ] + snapshot: + description: + - If specified, the new boot environment will be cloned from the given + snapshot or inactive boot environment. + type: str + description: + description: + - Associate a description with a new boot environment. This option is + available only on Solarish platforms. + type: str + options: + description: + - Create the datasets for new BE with specific ZFS properties. + - Multiple options can be specified. + - This option is available only on Solarish platforms. + type: str + mountpoint: + description: + - Path where to mount the ZFS boot environment. + type: path + state: + description: + - Create or delete ZFS boot environment. + type: str + choices: [ absent, activated, mounted, present, unmounted ] + default: present + force: + description: + - Specifies if the unmount should be forced. + type: bool + default: false +''' + +EXAMPLES = r''' +- name: Create ZFS boot environment + beadm: + name: upgrade-be + state: present + +- name: Create ZFS boot environment from existing inactive boot environment + beadm: + name: upgrade-be + snapshot: be@old + state: present + +- name: Create ZFS boot environment with compression enabled and description "upgrade" + beadm: + name: upgrade-be + options: "compression=on" + description: upgrade + state: present + +- name: Delete ZFS boot environment + beadm: + name: old-be + state: absent + +- name: Mount ZFS boot environment on /tmp/be + beadm: + name: BE + mountpoint: /tmp/be + state: mounted + +- name: Unmount ZFS boot environment + beadm: + name: BE + state: unmounted + +- name: Activate ZFS boot environment + beadm: + name: upgrade-be + state: activated +''' + +RETURN = r''' +name: + description: BE name + returned: always + type: str + sample: pre-upgrade +snapshot: + description: ZFS snapshot to create BE from + returned: always + type: str + sample: rpool/ROOT/oi-hipster@fresh +description: + description: BE description + returned: always + type: str + sample: Upgrade from 9.0 to 10.0 +options: + description: BE additional options + returned: always + type: str + sample: compression=on +mountpoint: + description: BE mountpoint + returned: always + type: str + sample: /mnt/be +state: + description: state of the target + returned: always + type: str + sample: present +force: + description: If forced action is wanted + returned: always + type: bool + sample: False +''' + +import os +import re +from ansible.module_utils.basic import AnsibleModule + + +class BE(object): + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.snapshot = module.params['snapshot'] + self.description = module.params['description'] + self.options = module.params['options'] + self.mountpoint = module.params['mountpoint'] + self.state = module.params['state'] + self.force = module.params['force'] + self.is_freebsd = os.uname()[0] == 'FreeBSD' + + def _beadm_list(self): + cmd = [self.module.get_bin_path('beadm')] + cmd.append('list') + cmd.append('-H') + if '@' in self.name: + cmd.append('-s') + return self.module.run_command(cmd) + + def _find_be_by_name(self, out): + if '@' in self.name: + for line in out.splitlines(): + if self.is_freebsd: + check = re.match(r'.+/({0})\s+\-'.format(self.name), line) + if check: + return check + else: + check = line.split(';') + if check[1] == self.name: + return check + else: + splitter = '\t' if self.is_freebsd else ';' + for line in out.splitlines(): + check = line.split(splitter) + if check[0] == self.name: + return check + + return None + + def exists(self): + (rc, out, _) = self._beadm_list() + + if rc == 0: + if self._find_be_by_name(out): + return True + else: + return False + else: + return False + + def is_activated(self): + (rc, out, _) = self._beadm_list() + + if rc == 0: + line = self._find_be_by_name(out) + if self.is_freebsd: + if line is not None and 'R' in line.split('\t')[1]: + return True + else: + if 'R' in line.split(';')[2]: + return True + + return False + + def activate_be(self): + cmd = [self.module.get_bin_path('beadm')] + + cmd.append('activate') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def create_be(self): + cmd = [self.module.get_bin_path('beadm')] + + cmd.append('create') + + if self.snapshot: + cmd.append('-e') + cmd.append(self.snapshot) + + if not self.is_freebsd: + if self.description: + cmd.append('-d') + cmd.append(self.description) + + if self.options: + cmd.append('-o') + cmd.append(self.options) + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def destroy_be(self): + cmd = [self.module.get_bin_path('beadm')] + + cmd.append('destroy') + cmd.append('-F') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def is_mounted(self): + (rc, out, _) = self._beadm_list() + + if rc == 0: + line = self._find_be_by_name(out) + if self.is_freebsd: + # On FreeBSD, we exclude currently mounted BE on /, as it is + # special and can be activated even if it is mounted. That is not + # possible with non-root BEs. + if line.split('\t')[2] != '-' and \ + line.split('\t')[2] != '/': + return True + else: + if line.split(';')[3]: + return True + + return False + + def mount_be(self): + cmd = [self.module.get_bin_path('beadm')] + + cmd.append('mount') + cmd.append(self.name) + + if self.mountpoint: + cmd.append(self.mountpoint) + + return self.module.run_command(cmd) + + def unmount_be(self): + cmd = [self.module.get_bin_path('beadm')] + + cmd.append('unmount') + if self.force: + cmd.append('-f') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['be']), + snapshot=dict(type='str'), + description=dict(type='str'), + options=dict(type='str'), + mountpoint=dict(type='path'), + state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + be = BE(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = be.name + result['state'] = be.state + + if be.snapshot: + result['snapshot'] = be.snapshot + + if be.description: + result['description'] = be.description + + if be.options: + result['options'] = be.options + + if be.mountpoint: + result['mountpoint'] = be.mountpoint + + if be.state == 'absent': + # beadm on FreeBSD and Solarish systems differs in delete behaviour in + # that we are not allowed to delete activated BE on FreeBSD while on + # Solarish systems we cannot delete BE if it is mounted. We add mount + # check for both platforms as BE should be explicitly unmounted before + # being deleted. On FreeBSD, we also check if the BE is activated. + if be.exists(): + if not be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + if be.is_freebsd: + if be.is_activated(): + module.fail_json(msg='Unable to remove active BE!') + + (rc, out, err) = be.destroy_be() + + if rc != 0: + module.fail_json(msg='Error while destroying BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + else: + module.fail_json(msg='Unable to remove BE as it is mounted!') + + elif be.state == 'present': + if not be.exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.create_be() + + if rc != 0: + module.fail_json(msg='Error while creating BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + elif be.state == 'activated': + if not be.is_activated(): + if module.check_mode: + module.exit_json(changed=True) + + # On FreeBSD, beadm is unable to activate mounted BEs, so we add + # an explicit check for that case. + if be.is_freebsd: + if be.is_mounted(): + module.fail_json(msg='Unable to activate mounted BE!') + + (rc, out, err) = be.activate_be() + + if rc != 0: + module.fail_json(msg='Error while activating BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + elif be.state == 'mounted': + if not be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.mount_be() + + if rc != 0: + module.fail_json(msg='Error while mounting BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + elif be.state == 'unmounted': + if be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.unmount_be() + + if rc != 0: + module.fail_json(msg='Error while unmounting BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/capabilities.py b/plugins/modules/system/capabilities.py new file mode 100644 index 0000000000..172a96b913 --- /dev/null +++ b/plugins/modules/system/capabilities.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: capabilities +short_description: Manage Linux capabilities +description: + - This module manipulates files privileges using the Linux capabilities(7) system. +options: + path: + description: + - Specifies the path to the file to be managed. + type: str + required: yes + aliases: [ key ] + capability: + description: + - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) + type: str + required: yes + aliases: [ cap ] + state: + description: + - Whether the entry should be present or absent in the file's capabilities. + type: str + choices: [ absent, present ] + default: present +notes: + - The capabilities system will automatically transform operators and flags into the effective set, + so for example, C(cap_foo=ep) will probably become C(cap_foo+ep). + - This module does not attempt to determine the final operator and flags to compare, + so you will want to ensure that your capabilities argument matches the final capabilities. +author: +- Nate Coraor (@natefoo) +''' + +EXAMPLES = r''' +- name: Set cap_sys_chroot+ep on /foo + capabilities: + path: /foo + capability: cap_sys_chroot+ep + state: present + +- name: Remove cap_net_bind_service from /bar + capabilities: + path: /bar + capability: cap_net_bind_service + state: absent +''' + +from ansible.module_utils.basic import AnsibleModule + +OPS = ('=', '-', '+') + + +class CapabilitiesModule(object): + platform = 'Linux' + distribution = None + + def __init__(self, module): + self.module = module + self.path = module.params['path'].strip() + self.capability = module.params['capability'].strip().lower() + self.state = module.params['state'] + self.getcap_cmd = module.get_bin_path('getcap', required=True) + self.setcap_cmd = module.get_bin_path('setcap', required=True) + self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present') + + self.run() + + def run(self): + + current = self.getcap(self.path) + caps = [cap[0] for cap in current] + + if self.state == 'present' and self.capability_tup not in current: + # need to add capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list if it's already set (but op/flags differ) + current = list(filter(lambda x: x[0] != self.capability_tup[0], current)) + # add new cap with correct op/flags + current.append(self.capability_tup) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + elif self.state == 'absent' and self.capability_tup[0] in caps: + # need to remove capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list and then set current list + current = filter(lambda x: x[0] != self.capability_tup[0], current) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + self.module.exit_json(changed=False, state=self.state) + + def getcap(self, path): + rval = [] + cmd = "%s -v %s" % (self.getcap_cmd, path) + rc, stdout, stderr = self.module.run_command(cmd) + # If file xattrs are set but no caps are set the output will be: + # '/foo =' + # If file xattrs are unset the output will be: + # '/foo' + # If the file does not exist the output will be (with rc == 0...): + # '/foo (No such file or directory)' + if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1): + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) + if stdout.strip() != path: + caps = stdout.split(' =')[1].strip().split() + for cap in caps: + cap = cap.lower() + # getcap condenses capabilities with the same op/flags into a + # comma-separated list, so we have to parse that + if ',' in cap: + cap_group = cap.split(',') + cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) + for subcap in cap_group: + rval.append((subcap, op, flags)) + else: + rval.append(self._parse_cap(cap)) + return rval + + def setcap(self, path, caps): + caps = ' '.join([''.join(cap) for cap in caps]) + cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) + rc, stdout, stderr = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) + else: + return stdout + + def _parse_cap(self, cap, op_required=True): + opind = -1 + try: + i = 0 + while opind == -1: + opind = cap.find(OPS[i]) + i += 1 + except Exception: + if op_required: + self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) + else: + return (cap, None, None) + op = cap[opind] + cap, flags = cap.split(op) + return (cap, op, flags) + + +# ============================================================== +# main + +def main(): + # defining module + module = AnsibleModule( + argument_spec=dict( + path=dict(type='str', required=True, aliases=['key']), + capability=dict(type='str', required=True, aliases=['cap']), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + CapabilitiesModule(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/cronvar.py b/plugins/modules/system/cronvar.py new file mode 100644 index 0000000000..ebe5aa7551 --- /dev/null +++ b/plugins/modules/system/cronvar.py @@ -0,0 +1,427 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Cronvar Plugin: The goal of this plugin is to provide an idempotent +# method for set cron variable values. It should play well with the +# existing cron module as well as allow for manually added variables. +# Each variable entered will be preceded with a comment describing the +# variable so that it can be found later. This is required to be +# present in order for this plugin to find/modify the variable + +# This module is based on the crontab module. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: cronvar +short_description: Manage variables in crontabs +description: + - Use this module to manage crontab variables. + - This module allows you to create, update, or delete cron variable definitions. +options: + name: + description: + - Name of the crontab variable. + type: str + required: yes + value: + description: + - The value to set this variable to. + - Required if C(state=present). + type: str + insertafter: + description: + - If specified, the variable will be inserted after the variable specified. + - Used with C(state=present). + type: str + insertbefore: + description: + - Used with C(state=present). If specified, the variable will be inserted + just before the variable specified. + type: str + state: + description: + - Whether to ensure that the variable is present or absent. + type: str + choices: [ absent, present ] + default: present + user: + description: + - The specific user whose crontab should be modified. + - This parameter defaults to C(root) when unset. + type: str + cron_file: + description: + - If specified, uses this file instead of an individual user's crontab. + - Without a leading C(/), this is assumed to be in I(/etc/cron.d). + - With a leading C(/), this is taken as absolute. + type: str + backup: + description: + - If set, create a backup of the crontab before it is modified. + The location of the backup is returned in the C(backup) variable by this module. + type: bool + default: no +requirements: + - cron +author: +- Doug Luce (@dougluce) +''' + +EXAMPLES = r''' +- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists + cronvar: + name: EMAIL + value: doug@ansibmod.con.com + +- name: Ensure a variable does not exist. This may remove any variable named "LEGACY" + cronvar: + name: LEGACY + state: absent + +- name: Add a variable to a file under /etc/cron.d + cronvar: + name: LOGFILE + value: /var/log/yum-autoupdate.log + user: root + cron_file: ansible_yum-autoupdate +''' + +import os +import platform +import pwd +import re +import shlex +import sys +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +class CronVarError(Exception): + pass + + +class CronVar(object): + """ + CronVar object to write variables to crontabs. + + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d + """ + + def __init__(self, module, user=None, cron_file=None): + self.module = module + self.user = user + self.lines = None + self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',)) + self.cron_cmd = self.module.get_bin_path('crontab', required=True) + + if cron_file: + self.cron_file = "" + if os.path.isabs(cron_file): + self.cron_file = cron_file + else: + self.cron_file = os.path.join('/etc/cron.d', cron_file) + else: + self.cron_file = None + + self.read() + + def read(self): + # Read in the crontab from the system + self.lines = [] + if self.cron_file: + # read the cronfile + try: + f = open(self.cron_file, 'r') + self.lines = f.read().splitlines() + f.close() + except IOError: + # cron file does not exist + return + except Exception: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + else: + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) + + if rc != 0 and rc != 1: # 1 can mean that there are no jobs. + raise CronVarError("Unable to read crontab") + + lines = out.splitlines() + count = 0 + for l in lines: + if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l + ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): + self.lines.append(l) + count += 1 + + def log_message(self, message): + self.module.debug('ansible: "%s"' % message) + + def write(self, backup_file=None): + """ + Write the crontab to the system. Saves all information. + """ + if backup_file: + fileh = open(backup_file, 'w') + elif self.cron_file: + fileh = open(self.cron_file, 'w') + else: + filed, path = tempfile.mkstemp(prefix='crontab') + fileh = os.fdopen(filed, 'w') + + fileh.write(self.render()) + fileh.close() + + # return if making a backup + if backup_file: + return + + # Add the entire crontab back to the user crontab + if not self.cron_file: + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) + os.unlink(path) + + if rc != 0: + self.module.fail_json(msg=err) + + def remove_variable_file(self): + try: + os.unlink(self.cron_file) + return True + except OSError: + # cron file does not exist + return False + except Exception: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + + def parse_for_var(self, line): + lexer = shlex.shlex(line) + lexer.wordchars = self.wordchars + varname = lexer.get_token() + is_env_var = lexer.get_token() == '=' + value = ''.join(lexer) + if is_env_var: + return (varname, value) + raise CronVarError("Not a variable.") + + def find_variable(self, name): + for l in self.lines: + try: + (varname, value) = self.parse_for_var(l) + if varname == name: + return value + except CronVarError: + pass + return None + + def get_var_names(self): + var_names = [] + for l in self.lines: + try: + (var_name, _) = self.parse_for_var(l) + var_names.append(var_name) + except CronVarError: + pass + return var_names + + def add_variable(self, name, value, insertbefore, insertafter): + if insertbefore is None and insertafter is None: + # Add the variable to the top of the file. + self.lines.insert(0, "%s=%s" % (name, value)) + else: + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname == insertbefore: + newlines.append("%s=%s" % (name, value)) + newlines.append(l) + elif varname == insertafter: + newlines.append(l) + newlines.append("%s=%s" % (name, value)) + else: + raise CronVarError # Append. + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def remove_variable(self, name): + self.update_variable(name, None, remove=True) + + def update_variable(self, name, value, remove=False): + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname != name: + raise CronVarError # Append. + if not remove: + newlines.append("%s=%s" % (name, value)) + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def render(self): + """ + Render a proper crontab + """ + result = '\n'.join(self.lines) + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def _read_user_execute(self): + """ + Returns the command line for reading a crontab + """ + user = '' + + if self.user: + if platform.system() == 'SunOS': + return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd)) + elif platform.system() == 'AIX': + return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user)) + elif platform.system() == 'HP-UX': + return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user)) + elif pwd.getpwuid(os.getuid())[0] != self.user: + user = '-u %s' % shlex_quote(self.user) + return "%s %s %s" % (self.cron_cmd, user, '-l') + + def _write_execute(self, path): + """ + Return the command line for writing a crontab + """ + user = '' + if self.user: + if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + return "chown %s %s ; su '%s' -c '%s %s'" % ( + shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path)) + elif pwd.getpwuid(os.getuid())[0] != self.user: + user = '-u %s' % shlex_quote(self.user) + return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path)) + + +# ================================================== + +def main(): + # The following example playbooks: + # + # - cronvar: name="SHELL" value="/bin/bash" + # + # - name: Set the email + # cronvar: name="EMAILTO" value="doug@ansibmod.con.com" + # + # - name: Get rid of the old new host variable + # cronvar: name="NEW_HOST" state=absent + # + # Would produce: + # SHELL = /bin/bash + # EMAILTO = doug@ansibmod.con.com + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + value=dict(type='str'), + user=dict(type='str'), + cron_file=dict(type='str'), + insertafter=dict(type='str'), + insertbefore=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + backup=dict(type='bool', default=False), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + supports_check_mode=False, + ) + + name = module.params['name'] + value = module.params['value'] + user = module.params['user'] + cron_file = module.params['cron_file'] + insertafter = module.params['insertafter'] + insertbefore = module.params['insertbefore'] + state = module.params['state'] + backup = module.params['backup'] + ensure_present = state == 'present' + + changed = False + res_args = dict() + + # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. + os.umask(int('022', 8)) + cronvar = CronVar(module, user, cron_file) + + module.debug('cronvar instantiated - name: "%s"' % name) + + # --- user input validation --- + + if name is None and ensure_present: + module.fail_json(msg="You must specify 'name' to insert a new cron variable") + + if value is None and ensure_present: + module.fail_json(msg="You must specify 'value' to insert a new cron variable") + + if name is None and not ensure_present: + module.fail_json(msg="You must specify 'name' to remove a cron variable") + + # if requested make a backup before making a change + if backup: + (_, backup_file) = tempfile.mkstemp(prefix='cronvar') + cronvar.write(backup_file) + + if cronvar.cron_file and not name and not ensure_present: + changed = cronvar.remove_job_file() + module.exit_json(changed=changed, cron_file=cron_file, state=state) + + old_value = cronvar.find_variable(name) + + if ensure_present: + if old_value is None: + cronvar.add_variable(name, value, insertbefore, insertafter) + changed = True + elif old_value != value: + cronvar.update_variable(name, value) + changed = True + else: + if old_value is not None: + cronvar.remove_variable(name) + changed = True + + res_args = { + "vars": cronvar.get_var_names(), + "changed": changed + } + + if changed: + cronvar.write() + + # retain the backup only if crontab or cron file have changed + if backup: + if changed: + res_args['backup_file'] = backup_file + else: + os.unlink(backup_file) + + if cron_file: + res_args['cron_file'] = cron_file + + module.exit_json(**res_args) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/crypttab.py b/plugins/modules/system/crypttab.py new file mode 100644 index 0000000000..237f640304 --- /dev/null +++ b/plugins/modules/system/crypttab.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Steve +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: crypttab +short_description: Encrypted Linux block devices +description: + - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). +options: + name: + description: + - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or + optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/) + will be stripped from I(name). + type: str + required: yes + state: + description: + - Use I(present) to add a line to C(/etc/crypttab) or update its definition + if already present. + - Use I(absent) to remove a line with matching I(name). + - Use I(opts_present) to add options to those already present; options with + different values will be updated. + - Use I(opts_absent) to remove options from the existing set. + type: str + required: yes + choices: [ absent, opts_absent, opts_present, present ] + backing_device: + description: + - Path to the underlying block device or file, or the UUID of a block-device + prefixed with I(UUID=). + type: str + password: + description: + - Encryption password, the path to a file containing the password, or + C(-) or unset if the password should be entered at boot. + type: path + opts: + description: + - A comma-delimited list of options. See C(crypttab(5) ) for details. + type: str + path: + description: + - Path to file to use instead of C(/etc/crypttab). + - This might be useful in a chroot environment. + type: path + default: /etc/crypttab +author: +- Steve (@groks) +''' + +EXAMPLES = r''' +- name: Set the options explicitly a device which must already exist + crypttab: + name: luks-home + state: present + opts: discard,cipher=aes-cbc-essiv:sha256 + +- name: Add the 'discard' option to any existing options for all devices + crypttab: + name: '{{ item.device }}' + state: opts_present + opts: discard + loop: '{{ ansible_mounts }}' + when: "'/dev/mapper/luks-' in {{ item.device }}" +''' + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']), + backing_device=dict(type='str'), + password=dict(type='path'), + opts=dict(type='str'), + path=dict(type='path', default='/etc/crypttab') + ), + supports_check_mode=True, + ) + + backing_device = module.params['backing_device'] + password = module.params['password'] + opts = module.params['opts'] + state = module.params['state'] + path = module.params['path'] + name = module.params['name'] + if name.startswith('/dev/mapper/'): + name = name[len('/dev/mapper/'):] + + if state != 'absent' and backing_device is None and password is None and opts is None: + module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", + **module.params) + + if 'opts' in state and (backing_device is not None or password is not None): + module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state, + **module.params) + + for arg_name, arg in (('name', name), + ('backing_device', backing_device), + ('password', password), + ('opts', opts)): + if (arg is not None and (' ' in arg or '\t' in arg or arg == '')): + module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, + **module.params) + + try: + crypttab = Crypttab(path) + existing_line = crypttab.match(name) + except Exception as e: + module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e), + exception=traceback.format_exc(), **module.params) + + if 'present' in state and existing_line is None and backing_device is None: + module.fail_json(msg="'backing_device' required to add a new entry", + **module.params) + + changed, reason = False, '?' + + if state == 'absent': + if existing_line is not None: + changed, reason = existing_line.remove() + + elif state == 'present': + if existing_line is not None: + changed, reason = existing_line.set(backing_device, password, opts) + else: + changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) + + elif state == 'opts_present': + if existing_line is not None: + changed, reason = existing_line.opts.add(opts) + else: + changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) + + elif state == 'opts_absent': + if existing_line is not None: + changed, reason = existing_line.opts.remove(opts) + + if changed and not module.check_mode: + try: + f = open(path, 'wb') + f.write(to_bytes(crypttab, errors='surrogate_or_strict')) + finally: + f.close() + + module.exit_json(changed=changed, msg=reason, **module.params) + + +class Crypttab(object): + _lines = [] + + def __init__(self, path): + self.path = path + if not os.path.exists(path): + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + open(path, 'a').close() + + try: + f = open(path, 'r') + for line in f.readlines(): + self._lines.append(Line(line)) + finally: + f.close() + + def add(self, line): + self._lines.append(line) + return True, 'added line' + + def lines(self): + for line in self._lines: + if line.valid(): + yield line + + def match(self, name): + for line in self.lines(): + if line.name == name: + return line + return None + + def __str__(self): + lines = [] + for line in self._lines: + lines.append(str(line)) + crypttab = '\n'.join(lines) + if len(crypttab) == 0: + crypttab += '\n' + if crypttab[-1] != '\n': + crypttab += '\n' + return crypttab + + +class Line(object): + def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None): + self.line = line + self.name = name + self.backing_device = backing_device + self.password = password + self.opts = Options(opts) + + if line is not None: + self.line = self.line.rstrip('\n') + if self._line_valid(line): + self.name, backing_device, password, opts = self._split_line(line) + + self.set(backing_device, password, opts) + + def set(self, backing_device, password, opts): + changed = False + + if backing_device is not None and self.backing_device != backing_device: + self.backing_device = backing_device + changed = True + + if password is not None and self.password != password: + self.password = password + changed = True + + if opts is not None: + opts = Options(opts) + if opts != self.opts: + self.opts = opts + changed = True + + return changed, 'updated line' + + def _line_valid(self, line): + if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4): + return False + return True + + def _split_line(self, line): + fields = line.split() + try: + field2 = fields[2] + except IndexError: + field2 = None + try: + field3 = fields[3] + except IndexError: + field3 = None + + return (fields[0], + fields[1], + field2, + field3) + + def remove(self): + self.line, self.name, self.backing_device = '', None, None + return True, 'removed line' + + def valid(self): + if self.name is not None and self.backing_device is not None: + return True + return False + + def __str__(self): + if self.valid(): + fields = [self.name, self.backing_device] + if self.password is not None or self.opts: + if self.password is not None: + fields.append(self.password) + else: + fields.append('none') + if self.opts: + fields.append(str(self.opts)) + return ' '.join(fields) + return self.line + + +class Options(dict): + """opts_string looks like: 'discard,foo=bar,baz=greeble' """ + + def __init__(self, opts_string): + super(Options, self).__init__() + self.itemlist = [] + if opts_string is not None: + for opt in opts_string.split(','): + kv = opt.split('=') + if len(kv) > 1: + k, v = (kv[0], kv[1]) + else: + k, v = (kv[0], None) + self[k] = v + + def add(self, opts_string): + changed = False + for k, v in Options(opts_string).items(): + if k in self: + if self[k] != v: + changed = True + else: + changed = True + self[k] = v + return changed, 'updated options' + + def remove(self, opts_string): + changed = False + for k in Options(opts_string): + if k in self: + del self[k] + changed = True + return changed, 'removed options' + + def keys(self): + return self.itemlist + + def values(self): + return [self[key] for key in self] + + def items(self): + return [(key, self[key]) for key in self] + + def __iter__(self): + return iter(self.itemlist) + + def __setitem__(self, key, value): + if key not in self: + self.itemlist.append(key) + super(Options, self).__setitem__(key, value) + + def __delitem__(self, key): + self.itemlist.remove(key) + super(Options, self).__delitem__(key) + + def __ne__(self, obj): + return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items())) + + def __str__(self): + ret = [] + for k, v in self.items(): + if v is None: + ret.append(k) + else: + ret.append('%s=%s' % (k, v)) + return ','.join(ret) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/dconf.py b/plugins/modules/system/dconf.py new file mode 100644 index 0000000000..a7fe710622 --- /dev/null +++ b/plugins/modules/system/dconf.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Branko Majic +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: dconf +author: + - "Branko Majic (@azaghal)" +short_description: Modify and read dconf database +description: + - This module allows modifications and reading of dconf database. The module + is implemented as a wrapper around dconf tool. Please see the dconf(1) man + page for more details. + - Since C(dconf) requires a running D-Bus session to change values, the module + will try to detect an existing session and reuse it, or run the tool via + C(dbus-run-session). +notes: + - This module depends on C(psutil) Python library (version 4.0.0 and upwards), + C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on + distribution you are using, you may need to install additional packages to + have these available. + - Detection of existing, running D-Bus session, required to change settings + via C(dconf), is not 100% reliable due to implementation details of D-Bus + daemon itself. This might lead to running applications not picking-up + changes on the fly if options are changed via Ansible and + C(dbus-run-session). + - Keep in mind that the C(dconf) CLI tool, which this module wraps around, + utilises an unusual syntax for the values (GVariant). For example, if you + wanted to provide a string value, the correct syntax would be + C(value="'myvalue'") - with single quotes as part of the Ansible parameter + value. + - When using loops in combination with a value like + :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible + type conversions. Applying a filter :code:`"{{ item.value | string }}"` + to the parameter variable can avoid potential conversion problems. + - The easiest way to figure out exact syntax/value you need to provide for a + key is by making the configuration change in application affected by the + key, and then having a look at value set via commands C(dconf dump + /path/to/dir/) or C(dconf read /path/to/key). +options: + key: + required: true + description: + - A dconf key to modify or read from the dconf database. + value: + required: false + description: + - Value to set for the specified dconf key. Value should be specified in + GVariant format. Due to complexity of this format, it is best to have a + look at existing values in the dconf database. Required for + C(state=present). + state: + required: false + default: present + choices: + - read + - present + - absent + description: + - The action to take upon the key/value. +''' + +RETURN = """ +value: + description: value associated with the requested key + returned: success, state was "read" + type: str + sample: "'Default'" +""" + +EXAMPLES = """ +- name: Configure available keyboard layouts in Gnome + dconf: + key: "/org/gnome/desktop/input-sources/sources" + value: "[('xkb', 'us'), ('xkb', 'se')]" + state: present + +- name: Read currently available keyboard layouts in Gnome + dconf: + key: "/org/gnome/desktop/input-sources/sources" + state: read + register: keyboard_layouts + +- name: Reset the available keyboard layouts in Gnome + dconf: + key: "/org/gnome/desktop/input-sources/sources" + state: absent + +- name: Configure available keyboard layouts in Cinnamon + dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + value: "['us', 'se']" + state: present + +- name: Read currently available keyboard layouts in Cinnamon + dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + state: read + register: keyboard_layouts + +- name: Reset the available keyboard layouts in Cinnamon + dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + state: absent + +- name: Disable desktop effects in Cinnamon + dconf: + key: "/org/cinnamon/desktop-effects" + value: "false" + state: present +""" + + +import os +import traceback + +PSUTIL_IMP_ERR = None +try: + import psutil + psutil_found = True +except ImportError: + PSUTIL_IMP_ERR = traceback.format_exc() + psutil_found = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class DBusWrapper(object): + """ + Helper class that can be used for running a command with a working D-Bus + session. + + If possible, command will be run against an existing D-Bus session, + otherwise the session will be spawned via dbus-run-session. + + Example usage: + + dbus_wrapper = DBusWrapper(ansible_module) + dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"]) + """ + + def __init__(self, module): + """ + Initialises an instance of the class. + + :param module: Ansible module instance used to signal failures and run commands. + :type module: AnsibleModule + """ + + # Store passed-in arguments and set-up some defaults. + self.module = module + + # Try to extract existing D-Bus session address. + self.dbus_session_bus_address = self._get_existing_dbus_session() + + # If no existing D-Bus session was detected, check if dbus-run-session + # is available. + if self.dbus_session_bus_address is None: + self.module.get_bin_path('dbus-run-session', required=True) + + def _get_existing_dbus_session(self): + """ + Detects and returns an existing D-Bus session bus address. + + :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None. + """ + + # We'll be checking the processes of current user only. + uid = os.getuid() + + # Go through all the pids for this user, try to extract the D-Bus + # session bus address from environment, and ensure it is possible to + # connect to it. + self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid) + + for pid in psutil.pids(): + process = psutil.Process(pid) + process_real_uid, _, _ = process.uids() + try: + if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ(): + dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS'] + self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate) + command = ['dbus-send', '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test'] + rc, _, _ = self.module.run_command(command) + + if rc == 0: + self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate) + + return dbus_session_bus_address_candidate + + # This can happen with things like SSH sessions etc. + except psutil.AccessDenied: + pass + + self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session") + + return None + + def run_command(self, command): + """ + Runs the specified command within a functional D-Bus session. Command is + effectively passed-on to AnsibleModule.run_command() method, with + modification for using dbus-run-session if necessary. + + :param command: Command to run, including parameters. Each element of the list should be a string. + :type module: list + + :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command. + """ + + if self.dbus_session_bus_address is None: + self.module.debug("Using dbus-run-session wrapper for running commands.") + command = ['dbus-run-session'] + command + rc, out, err = self.module.run_command(command) + + if self.dbus_session_bus_address is None and rc == 127: + self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err) + else: + extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address} + rc, out, err = self.module.run_command(command, environ_update=extra_environment) + + return rc, out, err + + +class DconfPreference(object): + + def __init__(self, module, check_mode=False): + """ + Initialises instance of the class. + + :param module: Ansible module instance used to signal failures and run commands. + :type module: AnsibleModule + + :param check_mode: Specify whether to only check if a change should be made or if to actually make a change. + :type check_mode: bool + """ + + self.module = module + self.check_mode = check_mode + + def read(self, key): + """ + Retrieves current value associated with the dconf key. + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None. + """ + + command = ["dconf", "read", key] + + rc, out, err = self.module.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err) + + if out == '': + value = None + else: + value = out.rstrip('\n') + + return value + + def write(self, key, value): + """ + Writes the value for specified key. + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :param key: dconf key for which the value should be set. Should be a full path. + :type key: str + + :param value: Value to set for the specified dconf key. Should be specified in GVariant format. + :type value: str + + :returns: bool -- True if a change was made, False if no change was required. + """ + + # If no change is needed (or won't be done due to check_mode), notify + # caller straight away. + if value == self.read(key): + return False + elif self.check_mode: + return True + + # Set-up command to run. Since DBus is needed for write operation, wrap + # dconf command dbus-launch. + command = ["dconf", "write", key, value] + + # Run the command and fetch standard return code, stdout, and stderr. + dbus_wrapper = DBusWrapper(self.module) + rc, out, err = dbus_wrapper.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while write the value with error: %s' % err) + + # Value was changed. + return True + + def reset(self, key): + """ + Returns value for the specified key (removes it from user configuration). + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :param key: dconf key to reset. Should be a full path. + :type key: str + + :returns: bool -- True if a change was made, False if no change was required. + """ + + # Read the current value first. + current_value = self.read(key) + + # No change was needed, key is not set at all, or just notify user if we + # are in check mode. + if current_value is None: + return False + elif self.check_mode: + return True + + # Set-up command to run. Since DBus is needed for reset operation, wrap + # dconf command dbus-launch. + command = ["dconf", "reset", key] + + # Run the command and fetch standard return code, stdout, and stderr. + dbus_wrapper = DBusWrapper(self.module) + rc, out, err = dbus_wrapper.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err) + + # Value was changed. + return True + + +def main(): + # Setup the Ansible module + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent', 'read']), + key=dict(required=True, type='str'), + value=dict(required=False, default=None, type='str'), + ), + supports_check_mode=True + ) + + if not psutil_found: + module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR) + + # If present state was specified, value must be provided. + if module.params['state'] == 'present' and module.params['value'] is None: + module.fail_json(msg='State "present" requires "value" to be set.') + + # Create wrapper instance. + dconf = DconfPreference(module, module.check_mode) + + # Process based on different states. + if module.params['state'] == 'read': + value = dconf.read(module.params['key']) + module.exit_json(changed=False, value=value) + elif module.params['state'] == 'present': + changed = dconf.write(module.params['key'], module.params['value']) + module.exit_json(changed=changed) + elif module.params['state'] == 'absent': + changed = dconf.reset(module.params['key']) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/facter.py b/plugins/modules/system/facter.py new file mode 100644 index 0000000000..03c4821063 --- /dev/null +++ b/plugins/modules/system/facter.py @@ -0,0 +1,53 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: facter +short_description: Runs the discovery program I(facter) on the remote system +description: +- Runs the I(facter) discovery program + (U(https://github.com/puppetlabs/facter)) on the remote system, returning + JSON data that can be useful for inventory purposes. +requirements: +- facter +- ruby-json +author: +- Ansible Core Team +- Michael DeHaan +''' + +EXAMPLES = ''' +# Example command-line invocation +ansible www.example.net -m facter +''' +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict() + ) + + facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin']) + + cmd = [facter_path, "--json"] + + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py new file mode 100644 index 0000000000..d73a815b38 --- /dev/null +++ b/plugins/modules/system/filesystem.py @@ -0,0 +1,405 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Alexander Bulimov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +author: +- Alexander Bulimov (@abulimov) +module: filesystem +short_description: Makes a filesystem +description: + - This module creates a filesystem. +options: + fstype: + choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ] + description: + - Filesystem type to be created. + - reiserfs support was added in 2.2. + - lvm support was added in 2.5. + - since 2.5, I(dev) can be an image file. + - vfat support was added in 2.5 + - ocfs2 support was added in 2.6 + - f2fs support was added in 2.7 + - swap support was added in 2.8 + required: yes + aliases: [type] + dev: + description: + - Target path to device or image file. + required: yes + aliases: [device] + force: + description: + - If C(yes), allows to create new filesystem on devices that already has filesystem. + type: bool + default: 'no' + resizefs: + description: + - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. + - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems. + - XFS Will only grow if mounted. + - vFAT will likely fail if fatresize < 1.04. + type: bool + default: 'no' + opts: + description: + - List of options to be passed to mkfs command. +requirements: + - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too. +notes: + - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem, + this filesystem is overwritten even if I(force) is C(no). +''' + +EXAMPLES = ''' +- name: Create a ext2 filesystem on /dev/sdb1 + filesystem: + fstype: ext2 + dev: /dev/sdb1 + +- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks + filesystem: + fstype: ext4 + dev: /dev/sdb1 + opts: -cc +''' + +from distutils.version import LooseVersion +import os +import platform +import re +import stat + +from ansible.module_utils.basic import AnsibleModule + + +class Device(object): + def __init__(self, module, path): + self.module = module + self.path = path + + def size(self): + """ Return size in bytes of device. Returns int """ + statinfo = os.stat(self.path) + if stat.S_ISBLK(statinfo.st_mode): + blockdev_cmd = self.module.get_bin_path("blockdev", required=True) + _, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) + return int(devsize_in_bytes) + elif os.path.isfile(self.path): + return os.path.getsize(self.path) + else: + self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) + + def __str__(self): + return self.path + + +class Filesystem(object): + + GROW = None + MKFS = None + MKFS_FORCE_FLAGS = '' + + LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} + + def __init__(self, module): + self.module = module + + @property + def fstype(self): + return type(self).__name__ + + def get_fs_size(self, dev): + """ Return size in bytes of filesystem on device. Returns int """ + raise NotImplementedError() + + def create(self, opts, dev): + if self.module.check_mode: + return + + mkfs = self.module.get_bin_path(self.MKFS, required=True) + if opts is None: + cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev) + else: + cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev) + self.module.run_command(cmd, check_rc=True) + + def grow_cmd(self, dev): + cmd = self.module.get_bin_path(self.GROW, required=True) + return [cmd, str(dev)] + + def grow(self, dev): + """Get dev and fs size and compare. Returns stdout of used command.""" + devsize_in_bytes = dev.size() + + try: + fssize_in_bytes = self.get_fs_size(dev) + except NotImplementedError: + self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype) + + if not fssize_in_bytes < devsize_in_bytes: + self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) + elif self.module.check_mode: + self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev)) + else: + _, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True) + return out + + +class Ext(Filesystem): + MKFS_FORCE_FLAGS = '-F' + GROW = 'resize2fs' + + def get_fs_size(self, dev): + cmd = self.module.get_bin_path('tune2fs', required=True) + # Get Block count and Block size + _, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + for line in size.splitlines(): + if 'Block count:' in line: + block_count = int(line.split(':')[1].strip()) + elif 'Block size:' in line: + block_size = int(line.split(':')[1].strip()) + return block_size * block_count + + +class Ext2(Ext): + MKFS = 'mkfs.ext2' + + +class Ext3(Ext): + MKFS = 'mkfs.ext3' + + +class Ext4(Ext): + MKFS = 'mkfs.ext4' + + +class XFS(Filesystem): + MKFS = 'mkfs.xfs' + MKFS_FORCE_FLAGS = '-f' + GROW = 'xfs_growfs' + + def get_fs_size(self, dev): + cmd = self.module.get_bin_path('xfs_growfs', required=True) + _, size, _ = self.module.run_command([cmd, '-n', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + for line in size.splitlines(): + col = line.split('=') + if col[0].strip() == 'data': + if col[1].strip() != 'bsize': + self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "bsize")') + if col[2].split()[1] != 'blocks': + self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "blocks")') + block_size = int(col[2].split()[0]) + block_count = int(col[3].split(',')[0]) + return block_size * block_count + + +class Reiserfs(Filesystem): + MKFS = 'mkfs.reiserfs' + MKFS_FORCE_FLAGS = '-f' + + +class Btrfs(Filesystem): + MKFS = 'mkfs.btrfs' + + def __init__(self, module): + super(Btrfs, self).__init__(module) + _, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True) + match = re.search(r" v([0-9.]+)", stdout) + if not match: + # v0.20-rc1 use stderr + match = re.search(r" v([0-9.]+)", stderr) + if match: + # v0.20-rc1 doesn't have --force parameter added in following version v3.12 + if LooseVersion(match.group(1)) >= LooseVersion('3.12'): + self.MKFS_FORCE_FLAGS = '-f' + else: + self.MKFS_FORCE_FLAGS = '' + else: + # assume version is greater or equal to 3.12 + self.MKFS_FORCE_FLAGS = '-f' + self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) + + +class Ocfs2(Filesystem): + MKFS = 'mkfs.ocfs2' + MKFS_FORCE_FLAGS = '-Fx' + + +class F2fs(Filesystem): + MKFS = 'mkfs.f2fs' + GROW = 'resize.f2fs' + + @property + def MKFS_FORCE_FLAGS(self): + mkfs = self.module.get_bin_path(self.MKFS, required=True) + cmd = "%s %s" % (mkfs, os.devnull) + _, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV) + # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" + # mkfs.f2fs displays version since v1.2.0 + match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) + if match is not None: + # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem + # before that version -f switch wasn't used + if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): + return '-f' + + return '' + + def get_fs_size(self, dev): + cmd = self.module.get_bin_path('dump.f2fs', required=True) + # Get sector count and sector size + _, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + sector_size = None + sector_count = None + for line in dump.splitlines(): + if 'Info: sector size = ' in line: + # expected: 'Info: sector size = 512' + sector_size = int(line.split()[4]) + elif 'Info: total FS sectors = ' in line: + # expected: 'Info: total FS sectors = 102400 (50 MB)' + sector_count = int(line.split()[5]) + + if None not in (sector_size, sector_count): + break + else: + self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump)) + self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev) + + return sector_size * sector_count + + +class VFAT(Filesystem): + if platform.system() == 'FreeBSD': + MKFS = "newfs_msdos" + else: + MKFS = 'mkfs.vfat' + GROW = 'fatresize' + + def get_fs_size(self, dev): + cmd = self.module.get_bin_path(self.GROW, required=True) + _, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + for line in output.splitlines()[1:]: + param, value = line.split(':', 1) + if param.strip() == 'Size': + return int(value.strip()) + self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev) + + def grow_cmd(self, dev): + cmd = self.module.get_bin_path(self.GROW) + return [cmd, "-s", str(dev.size()), str(dev.path)] + + +class LVM(Filesystem): + MKFS = 'pvcreate' + MKFS_FORCE_FLAGS = '-f' + GROW = 'pvresize' + + def get_fs_size(self, dev): + cmd = self.module.get_bin_path('pvs', required=True) + _, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) + block_count = int(size) + return block_count + + +class Swap(Filesystem): + MKFS = 'mkswap' + MKFS_FORCE_FLAGS = '-f' + + +FILESYSTEMS = { + 'ext2': Ext2, + 'ext3': Ext3, + 'ext4': Ext4, + 'ext4dev': Ext4, + 'f2fs': F2fs, + 'reiserfs': Reiserfs, + 'xfs': XFS, + 'btrfs': Btrfs, + 'vfat': VFAT, + 'ocfs2': Ocfs2, + 'LVM2_member': LVM, + 'swap': Swap, +} + + +def main(): + friendly_names = { + 'lvm': 'LVM2_member', + } + + fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys()) + + # There is no "single command" to manipulate filesystems, so we map them all out and their options + module = AnsibleModule( + argument_spec=dict( + fstype=dict(required=True, aliases=['type'], + choices=list(fstypes)), + dev=dict(required=True, aliases=['device']), + opts=dict(), + force=dict(type='bool', default=False), + resizefs=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + dev = module.params['dev'] + fstype = module.params['fstype'] + opts = module.params['opts'] + force = module.params['force'] + resizefs = module.params['resizefs'] + + if fstype in friendly_names: + fstype = friendly_names[fstype] + + changed = False + + try: + klass = FILESYSTEMS[fstype] + except KeyError: + module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype) + + if not os.path.exists(dev): + module.fail_json(msg="Device %s not found." % dev) + dev = Device(module, dev) + + cmd = module.get_bin_path('blkid', required=True) + rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) + # In case blkid isn't able to identify an existing filesystem, device is considered as empty, + # then this existing filesystem would be overwritten even if force isn't enabled. + fs = raw_fs.strip() + + filesystem = klass(module) + + same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] + if same_fs and not resizefs and not force: + module.exit_json(changed=False) + elif same_fs and resizefs: + if not filesystem.GROW: + module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) + + out = filesystem.grow(dev) + + module.exit_json(changed=True, msg=out) + elif fs and not force: + module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) + + # create fs + filesystem.create(opts, dev) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/firewalld.py b/plugins/modules/system/firewalld.py new file mode 100644 index 0000000000..6324ce32fe --- /dev/null +++ b/plugins/modules/system/firewalld.py @@ -0,0 +1,863 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Adam Miller +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: firewalld +short_description: Manage arbitrary ports/services with firewalld +description: + - This module allows for addition or deletion of services and ports (either TCP or UDP) in either running or permanent firewalld rules. +options: + service: + description: + - Name of a service to add/remove to/from firewalld. + - The service must be listed in output of firewall-cmd --get-services. + type: str + port: + description: + - Name of a port or port range to add/remove to/from firewalld. + - Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges. + type: str + rich_rule: + description: + - Rich rule to add/remove to/from firewalld. + type: str + source: + description: + - The source/network you would like to add/remove to/from firewalld. + type: str + interface: + description: + - The interface you would like to add/remove to/from a zone in firewalld. + type: str + icmp_block: + description: + - The ICMP block you would like to add/remove to/from a zone in firewalld. + type: str + icmp_block_inversion: + description: + - Enable/Disable inversion of ICMP blocks for a zone in firewalld. + type: str + zone: + description: + - The firewalld zone to add/remove to/from. + - Note that the default zone can be configured per system but C(public) is default from upstream. + - Available choices can be extended based on per-system configs, listed here are "out of the box" defaults. + - Possible values include C(block), C(dmz), C(drop), C(external), C(home), C(internal), C(public), C(trusted), C(work). + type: str + permanent: + description: + - Should this configuration be in the running firewalld configuration or persist across reboots. + - As of Ansible 2.3, permanent operations can operate on firewalld configs when it is not running (requires firewalld >= 3.0.9). + - Note that if this is C(no), immediate is assumed C(yes). + type: bool + immediate: + description: + - Should this configuration be applied immediately, if set as permanent. + type: bool + default: no + state: + description: + - Enable or disable a setting. + - 'For ports: Should this port accept (enabled) or reject (disabled) connections.' + - The states C(present) and C(absent) can only be used in zone level operations (i.e. when no other parameters but zone and state are set). + type: str + required: true + choices: [ absent, disabled, enabled, present ] + timeout: + description: + - The amount of time the rule should be in effect for when non-permanent. + type: int + default: 0 + masquerade: + description: + - The masquerade setting you would like to enable/disable to/from zones within firewalld. + type: str + offline: + description: + - Whether to run this module even when firewalld is offline. + type: bool +notes: + - Not tested on any Debian based system. + - Requires the python2 bindings of firewalld, which may not be installed by default. + - For distributions where the python2 firewalld bindings are unavailable (e.g Fedora 28 and later) you will have to set the + ansible_python_interpreter for these hosts to the python3 interpreter path and install the python3 bindings. + - Zone transactions (creating, deleting) can be performed by using only the zone and state parameters "present" or "absent". + Note that zone transactions must explicitly be permanent. This is a limitation in firewalld. + This also means that you will have to reload firewalld after adding a zone that you wish to perform immediate actions on. + The module will not take care of this for you implicitly because that would undo any previously performed immediate actions which were not + permanent. Therefore, if you require immediate access to a newly created zone it is recommended you reload firewalld immediately after the zone + creation returns with a changed state and before you perform any other immediate, non-permanent actions on that zone. +requirements: +- firewalld >= 0.2.11 +author: +- Adam Miller (@maxamillion) +''' + +EXAMPLES = r''' +- firewalld: + service: https + permanent: yes + state: enabled + +- firewalld: + port: 8081/tcp + permanent: yes + state: disabled + +- firewalld: + port: 161-162/udp + permanent: yes + state: enabled + +- firewalld: + zone: dmz + service: http + permanent: yes + state: enabled + +- firewalld: + rich_rule: rule service name="ftp" audit limit value="1/m" accept + permanent: yes + state: enabled + +- firewalld: + source: 192.0.2.0/24 + zone: internal + state: enabled + +- firewalld: + zone: trusted + interface: eth2 + permanent: yes + state: enabled + +- firewalld: + masquerade: yes + state: enabled + permanent: yes + zone: dmz + +- firewalld: + zone: custom + state: present + permanent: yes + +- firewalld: + zone: drop + state: enabled + permanent: yes + icmp_block_inversion: yes + +- firewalld: + zone: drop + state: enabled + permanent: yes + icmp_block: echo-request + +- name: Redirect port 443 to 8443 with Rich Rule + firewalld: + rich_rule: rule family=ipv4 forward-port port=443 protocol=tcp to-port=8443 + zone: public + permanent: yes + immediate: yes + state: enabled +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.firewalld import FirewallTransaction, fw_offline + +try: + from firewall.client import Rich_Rule + from firewall.client import FirewallClientZoneSettings +except ImportError: + # The import errors are handled via FirewallTransaction, don't need to + # duplicate that here + pass + + +class IcmpBlockTransaction(FirewallTransaction): + """ + IcmpBlockTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(IcmpBlockTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + def get_enabled_immediate(self, icmp_block, timeout): + return icmp_block in self.fw.getIcmpBlocks(self.zone) + + def get_enabled_permanent(self, icmp_block, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + return icmp_block in fw_settings.getIcmpBlocks() + + def set_enabled_immediate(self, icmp_block, timeout): + self.fw.addIcmpBlock(self.zone, icmp_block, timeout) + + def set_enabled_permanent(self, icmp_block, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.addIcmpBlock(icmp_block) + self.update_fw_settings(fw_zone, fw_settings) + + def set_disabled_immediate(self, icmp_block, timeout): + self.fw.removeIcmpBlock(self.zone, icmp_block) + + def set_disabled_permanent(self, icmp_block, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.removeIcmpBlock(icmp_block) + self.update_fw_settings(fw_zone, fw_settings) + + +class IcmpBlockInversionTransaction(FirewallTransaction): + """ + IcmpBlockInversionTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(IcmpBlockInversionTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + def get_enabled_immediate(self): + if self.fw.queryIcmpBlockInversion(self.zone) is True: + return True + else: + return False + + def get_enabled_permanent(self): + fw_zone, fw_settings = self.get_fw_zone_settings() + if fw_settings.getIcmpBlockInversion() is True: + return True + else: + return False + + def set_enabled_immediate(self): + self.fw.addIcmpBlockInversion(self.zone) + + def set_enabled_permanent(self): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.setIcmpBlockInversion(True) + self.update_fw_settings(fw_zone, fw_settings) + + def set_disabled_immediate(self): + self.fw.removeIcmpBlockInversion(self.zone) + + def set_disabled_permanent(self): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.setIcmpBlockInversion(False) + self.update_fw_settings(fw_zone, fw_settings) + + +class ServiceTransaction(FirewallTransaction): + """ + ServiceTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(ServiceTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + def get_enabled_immediate(self, service, timeout): + if service in self.fw.getServices(self.zone): + return True + else: + return False + + def get_enabled_permanent(self, service, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + + if service in fw_settings.getServices(): + return True + else: + return False + + def set_enabled_immediate(self, service, timeout): + self.fw.addService(self.zone, service, timeout) + + def set_enabled_permanent(self, service, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.addService(service) + self.update_fw_settings(fw_zone, fw_settings) + + def set_disabled_immediate(self, service, timeout): + self.fw.removeService(self.zone, service) + + def set_disabled_permanent(self, service, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.removeService(service) + self.update_fw_settings(fw_zone, fw_settings) + + +class MasqueradeTransaction(FirewallTransaction): + """ + MasqueradeTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(MasqueradeTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + self.enabled_msg = "Added masquerade to zone %s" % self.zone + self.disabled_msg = "Removed masquerade from zone %s" % self.zone + + def get_enabled_immediate(self): + if self.fw.queryMasquerade(self.zone) is True: + return True + else: + return False + + def get_enabled_permanent(self): + fw_zone, fw_settings = self.get_fw_zone_settings() + if fw_settings.getMasquerade() is True: + return True + else: + return False + + def set_enabled_immediate(self): + self.fw.addMasquerade(self.zone) + + def set_enabled_permanent(self): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.setMasquerade(True) + self.update_fw_settings(fw_zone, fw_settings) + + def set_disabled_immediate(self): + self.fw.removeMasquerade(self.zone) + + def set_disabled_permanent(self): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.setMasquerade(False) + self.update_fw_settings(fw_zone, fw_settings) + + +class PortTransaction(FirewallTransaction): + """ + PortTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(PortTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + def get_enabled_immediate(self, port, protocol, timeout): + port_proto = [port, protocol] + if self.fw_offline: + fw_zone, fw_settings = self.get_fw_zone_settings() + ports_list = fw_settings.getPorts() + else: + ports_list = self.fw.getPorts(self.zone) + + if port_proto in ports_list: + return True + else: + return False + + def get_enabled_permanent(self, port, protocol, timeout): + port_proto = (port, protocol) + fw_zone, fw_settings = self.get_fw_zone_settings() + + if port_proto in fw_settings.getPorts(): + return True + else: + return False + + def set_enabled_immediate(self, port, protocol, timeout): + self.fw.addPort(self.zone, port, protocol, timeout) + + def set_enabled_permanent(self, port, protocol, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.addPort(port, protocol) + self.update_fw_settings(fw_zone, fw_settings) + + def set_disabled_immediate(self, port, protocol, timeout): + self.fw.removePort(self.zone, port, protocol) + + def set_disabled_permanent(self, port, protocol, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.removePort(port, protocol) + self.update_fw_settings(fw_zone, fw_settings) + + +class InterfaceTransaction(FirewallTransaction): + """ + InterfaceTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(InterfaceTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + self.enabled_msg = "Changed %s to zone %s" % \ + (self.action_args[0], self.zone) + + self.disabled_msg = "Removed %s from zone %s" % \ + (self.action_args[0], self.zone) + + def get_enabled_immediate(self, interface): + if self.fw_offline: + fw_zone, fw_settings = self.get_fw_zone_settings() + interface_list = fw_settings.getInterfaces() + else: + interface_list = self.fw.getInterfaces(self.zone) + if interface in interface_list: + return True + else: + return False + + def get_enabled_permanent(self, interface): + fw_zone, fw_settings = self.get_fw_zone_settings() + + if interface in fw_settings.getInterfaces(): + return True + else: + return False + + def set_enabled_immediate(self, interface): + self.fw.changeZoneOfInterface(self.zone, interface) + + def set_enabled_permanent(self, interface): + fw_zone, fw_settings = self.get_fw_zone_settings() + if self.fw_offline: + iface_zone_objs = [] + for zone in self.fw.config.get_zones(): + old_zone_obj = self.fw.config.get_zone(zone) + if interface in old_zone_obj.interfaces: + iface_zone_objs.append(old_zone_obj) + if len(iface_zone_objs) > 1: + # Even it shouldn't happen, it's actually possible that + # the same interface is in several zone XML files + self.module.fail_json( + msg='ERROR: interface {0} is in {1} zone XML file, can only be in one'.format( + interface, + len(iface_zone_objs) + ) + ) + old_zone_obj = iface_zone_objs[0] + if old_zone_obj.name != self.zone: + old_zone_settings = FirewallClientZoneSettings( + self.fw.config.get_zone_config(old_zone_obj) + ) + old_zone_settings.removeInterface(interface) # remove from old + self.fw.config.set_zone_config( + old_zone_obj, + old_zone_settings.settings + ) + fw_settings.addInterface(interface) # add to new + self.fw.config.set_zone_config(fw_zone, fw_settings.settings) + else: + old_zone_name = self.fw.config().getZoneOfInterface(interface) + if old_zone_name != self.zone: + if old_zone_name: + old_zone_obj = self.fw.config().getZoneByName(old_zone_name) + old_zone_settings = old_zone_obj.getSettings() + old_zone_settings.removeInterface(interface) # remove from old + old_zone_obj.update(old_zone_settings) + fw_settings.addInterface(interface) # add to new + fw_zone.update(fw_settings) + + def set_disabled_immediate(self, interface): + self.fw.removeInterface(self.zone, interface) + + def set_disabled_permanent(self, interface): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.removeInterface(interface) + self.update_fw_settings(fw_zone, fw_settings) + + +class RichRuleTransaction(FirewallTransaction): + """ + RichRuleTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(RichRuleTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + def get_enabled_immediate(self, rule, timeout): + # Convert the rule string to standard format + # before checking whether it is present + rule = str(Rich_Rule(rule_str=rule)) + if rule in self.fw.getRichRules(self.zone): + return True + else: + return False + + def get_enabled_permanent(self, rule, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + # Convert the rule string to standard format + # before checking whether it is present + rule = str(Rich_Rule(rule_str=rule)) + if rule in fw_settings.getRichRules(): + return True + else: + return False + + def set_enabled_immediate(self, rule, timeout): + self.fw.addRichRule(self.zone, rule, timeout) + + def set_enabled_permanent(self, rule, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.addRichRule(rule) + self.update_fw_settings(fw_zone, fw_settings) + + def set_disabled_immediate(self, rule, timeout): + self.fw.removeRichRule(self.zone, rule) + + def set_disabled_permanent(self, rule, timeout): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.removeRichRule(rule) + self.update_fw_settings(fw_zone, fw_settings) + + +class SourceTransaction(FirewallTransaction): + """ + SourceTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): + super(SourceTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate + ) + + self.enabled_msg = "Added %s to zone %s" % \ + (self.action_args[0], self.zone) + + self.disabled_msg = "Removed %s from zone %s" % \ + (self.action_args[0], self.zone) + + def get_enabled_immediate(self, source): + if source in self.fw.getSources(self.zone): + return True + else: + return False + + def get_enabled_permanent(self, source): + fw_zone, fw_settings = self.get_fw_zone_settings() + if source in fw_settings.getSources(): + return True + else: + return False + + def set_enabled_immediate(self, source): + self.fw.addSource(self.zone, source) + + def set_enabled_permanent(self, source): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.addSource(source) + self.update_fw_settings(fw_zone, fw_settings) + + def set_disabled_immediate(self, source): + self.fw.removeSource(self.zone, source) + + def set_disabled_permanent(self, source): + fw_zone, fw_settings = self.get_fw_zone_settings() + fw_settings.removeSource(source) + self.update_fw_settings(fw_zone, fw_settings) + + +class ZoneTransaction(FirewallTransaction): + """ + ZoneTransaction + """ + + def __init__(self, module, action_args=None, zone=None, desired_state=None, + permanent=True, immediate=False, enabled_values=None, disabled_values=None): + super(ZoneTransaction, self).__init__( + module, action_args=action_args, desired_state=desired_state, zone=zone, + permanent=permanent, immediate=immediate, + enabled_values=enabled_values or ["present"], + disabled_values=disabled_values or ["absent"]) + + self.enabled_msg = "Added zone %s" % \ + (self.zone) + + self.disabled_msg = "Removed zone %s" % \ + (self.zone) + + self.tx_not_permanent_error_msg = "Zone operations must be permanent. " \ + "Make sure you didn't set the 'permanent' flag to 'false' or the 'immediate' flag to 'true'." + + def get_enabled_immediate(self): + self.module.fail_json(msg=self.tx_not_permanent_error_msg) + + def get_enabled_permanent(self): + zones = self.fw.config().listZones() + zone_names = [self.fw.config().getZone(z).get_property("name") for z in zones] + if self.zone in zone_names: + return True + else: + return False + + def set_enabled_immediate(self): + self.module.fail_json(msg=self.tx_not_permanent_error_msg) + + def set_enabled_permanent(self): + self.fw.config().addZone(self.zone, FirewallClientZoneSettings()) + + def set_disabled_immediate(self): + self.module.fail_json(msg=self.tx_not_permanent_error_msg) + + def set_disabled_permanent(self): + zone_obj = self.fw.config().getZoneByName(self.zone) + zone_obj.remove() + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + icmp_block=dict(type='str'), + icmp_block_inversion=dict(type='str'), + service=dict(type='str'), + port=dict(type='str'), + rich_rule=dict(type='str'), + zone=dict(type='str'), + immediate=dict(type='bool', default=False), + source=dict(type='str'), + permanent=dict(type='bool'), + state=dict(type='str', required=True, choices=['absent', 'disabled', 'enabled', 'present']), + timeout=dict(type='int', default=0), + interface=dict(type='str'), + masquerade=dict(type='str'), + offline=dict(type='bool'), + ), + supports_check_mode=True, + required_by=dict( + interface=('zone',), + source=('permanent',), + ), + ) + + permanent = module.params['permanent'] + desired_state = module.params['state'] + immediate = module.params['immediate'] + timeout = module.params['timeout'] + interface = module.params['interface'] + masquerade = module.params['masquerade'] + + # Sanity checks + FirewallTransaction.sanity_check(module) + + # If neither permanent or immediate is provided, assume immediate (as + # written in the module's docs) + if not permanent and not immediate: + immediate = True + + # Verify required params are provided + if immediate and fw_offline: + module.fail_json(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon') + + changed = False + msgs = [] + icmp_block = module.params['icmp_block'] + icmp_block_inversion = module.params['icmp_block_inversion'] + service = module.params['service'] + rich_rule = module.params['rich_rule'] + source = module.params['source'] + zone = module.params['zone'] + + if module.params['port'] is not None: + if '/' in module.params['port']: + port, protocol = module.params['port'].strip().split('/') + else: + protocol = None + if not protocol: + module.fail_json(msg='improper port format (missing protocol?)') + else: + port = None + + modification_count = 0 + if icmp_block is not None: + modification_count += 1 + if icmp_block_inversion is not None: + modification_count += 1 + if service is not None: + modification_count += 1 + if port is not None: + modification_count += 1 + if rich_rule is not None: + modification_count += 1 + if interface is not None: + modification_count += 1 + if masquerade is not None: + modification_count += 1 + if source is not None: + modification_count += 1 + + if modification_count > 1: + module.fail_json( + msg='can only operate on port, service, rich_rule, masquerade, icmp_block, icmp_block_inversion, interface or source at once' + ) + elif modification_count > 0 and desired_state in ['absent', 'present']: + module.fail_json( + msg='absent and present state can only be used in zone level operations' + ) + + if icmp_block is not None: + + transaction = IcmpBlockTransaction( + module, + action_args=(icmp_block, timeout), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + if changed is True: + msgs.append("Changed icmp-block %s to %s" % (icmp_block, desired_state)) + + if icmp_block_inversion is not None: + + transaction = IcmpBlockInversionTransaction( + module, + action_args=(), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + if changed is True: + msgs.append("Changed icmp-block-inversion %s to %s" % (icmp_block_inversion, desired_state)) + + if service is not None: + + transaction = ServiceTransaction( + module, + action_args=(service, timeout), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + if changed is True: + msgs.append("Changed service %s to %s" % (service, desired_state)) + + if source is not None: + + transaction = SourceTransaction( + module, + action_args=(source,), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + + if port is not None: + + transaction = PortTransaction( + module, + action_args=(port, protocol, timeout), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + if changed is True: + msgs.append( + "Changed port %s to %s" % ( + "%s/%s" % (port, protocol), desired_state + ) + ) + + if rich_rule is not None: + + transaction = RichRuleTransaction( + module, + action_args=(rich_rule, timeout), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + if changed is True: + msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state)) + + if interface is not None: + + transaction = InterfaceTransaction( + module, + action_args=(interface,), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + + if masquerade is not None: + + transaction = MasqueradeTransaction( + module, + action_args=(), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + + ''' If there are no changes within the zone we are operating on the zone itself ''' + if modification_count == 0 and desired_state in ['absent', 'present']: + + transaction = ZoneTransaction( + module, + action_args=(), + zone=zone, + desired_state=desired_state, + permanent=permanent, + immediate=immediate, + ) + + changed, transaction_msgs = transaction.run() + msgs = msgs + transaction_msgs + if changed is True: + msgs.append("Changed zone %s to %s" % (zone, desired_state)) + + if fw_offline: + msgs.append("(offline operation: only on-disk configs were altered)") + + module.exit_json(changed=changed, msg=', '.join(msgs)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py new file mode 100644 index 0000000000..4f7ea5f296 --- /dev/null +++ b/plugins/modules/system/gconftool2.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Kenneth D. Evensen +# Copyright: (c) 2017, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: gconftool2 +author: + - Kenneth D. Evensen (@kevensen) +short_description: Edit GNOME Configurations +description: + - This module allows for the manipulation of GNOME 2 Configuration via + gconftool-2. Please see the gconftool-2(1) man pages for more details. +options: + key: + description: + - A GConf preference key is an element in the GConf repository + that corresponds to an application preference. See man gconftool-2(1) + required: yes + value: + description: + - Preference keys typically have simple values such as strings, + integers, or lists of strings and integers. This is ignored if the state + is "get". See man gconftool-2(1) + value_type: + description: + - The type of value being set. This is ignored if the state is "get". + choices: [ bool, float, int, string ] + state: + description: + - The action to take upon the key/value. + required: yes + choices: [ absent, get, present ] + config_source: + description: + - Specify a configuration source to use rather than the default path. + See man gconftool-2(1) + direct: + description: + - Access the config database directly, bypassing server. If direct is + specified then the config_source must be specified as well. + See man gconftool-2(1) + type: bool + default: 'no' +''' + +EXAMPLES = """ +- name: Change the widget font to "Serif 12" + gconftool2: + key: "/desktop/gnome/interface/font_name" + value_type: "string" + value: "Serif 12" +""" + +RETURN = ''' + key: + description: The key specified in the module parameters + returned: success + type: str + sample: /desktop/gnome/interface/font_name + value_type: + description: The type of the value that was changed + returned: success + type: str + sample: string + value: + description: The value of the preference key after executing the module + returned: success + type: str + sample: "Serif 12" +... +''' + +from ansible.module_utils.basic import AnsibleModule + + +class GConf2Preference(object): + def __init__(self, ansible, key, value_type, value, + direct=False, config_source=""): + self.ansible = ansible + self.key = key + self.value_type = value_type + self.value = value + self.config_source = config_source + self.direct = direct + + def value_already_set(self): + return False + + def call(self, call_type, fail_onerr=True): + """ Helper function to perform gconftool-2 operations """ + config_source = '' + direct = '' + changed = False + out = '' + + # If the configuration source is different from the default, create + # the argument + if self.config_source is not None and len(self.config_source) > 0: + config_source = "--config-source " + self.config_source + + # If direct is true, create the argument + if self.direct: + direct = "--direct" + + # Execute the call + cmd = "gconftool-2 " + try: + # If the call is "get", then we don't need as many parameters and + # we can ignore some + if call_type == 'get': + cmd += "--get {0}".format(self.key) + # Otherwise, we will use all relevant parameters + elif call_type == 'set': + cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct, + config_source, + self.value_type, + call_type, + self.key, + self.value) + elif call_type == 'unset': + cmd += "--unset {0}".format(self.key) + + # Start external command + rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True) + + if len(err) > 0: + if fail_onerr: + self.ansible.fail_json(msg='gconftool-2 failed with ' + 'error: %s' % (str(err))) + else: + changed = True + + except OSError as exception: + self.ansible.fail_json(msg='gconftool-2 failed with exception: ' + '%s' % exception) + return changed, out.rstrip() + + +def main(): + # Setup the Ansible module + module = AnsibleModule( + argument_spec=dict( + key=dict(type='str', required=True), + value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), + value=dict(type='str'), + state=dict(type='str', required=True, choices=['absent', 'get', 'present']), + direct=dict(type='bool', default=False), + config_source=dict(type='str'), + ), + supports_check_mode=True + ) + + state_values = {"present": "set", "absent": "unset", "get": "get"} + + # Assign module values to dictionary values + key = module.params['key'] + value_type = module.params['value_type'] + if module.params['value'].lower() == "true": + value = "true" + elif module.params['value'] == "false": + value = "false" + else: + value = module.params['value'] + + state = state_values[module.params['state']] + direct = module.params['direct'] + config_source = module.params['config_source'] + + # Initialize some variables for later + change = False + new_value = '' + + if state != "get": + if value is None or value == "": + module.fail_json(msg='State %s requires "value" to be set' + % str(state)) + elif value_type is None or value_type == "": + module.fail_json(msg='State %s requires "value_type" to be set' + % str(state)) + + if direct and config_source is None: + module.fail_json(msg='If "direct" is "yes" then the ' + + '"config_source" must be specified') + elif not direct and config_source is not None: + module.fail_json(msg='If the "config_source" is specified ' + + 'then "direct" must be "yes"') + + # Create a gconf2 preference + gconf_pref = GConf2Preference(module, key, value_type, + value, direct, config_source) + # Now we get the current value, if not found don't fail + _, current_value = gconf_pref.call("get", fail_onerr=False) + + # Check if the current value equals the value we want to set. If not, make + # a change + if current_value != value: + # If check mode, we know a change would have occurred. + if module.check_mode: + # So we will set the change to True + change = True + # And set the new_value to the value that would have been set + new_value = value + # If not check mode make the change. + else: + change, new_value = gconf_pref.call(state) + # If the value we want to set is the same as the current_value, we will + # set the new_value to the current_value for reporting + else: + new_value = current_value + + facts = dict(gconftool2={'changed': change, + 'key': key, + 'value_type': value_type, + 'new_value': new_value, + 'previous_value': current_value, + 'playbook_value': module.params['value']}) + + module.exit_json(changed=change, ansible_facts=facts) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py new file mode 100644 index 0000000000..ce3b42db81 --- /dev/null +++ b/plugins/modules/system/interfaces_file.py @@ -0,0 +1,397 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2016, Roman Belyakovsky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: interfaces_file +short_description: Tweak settings in /etc/network/interfaces files +extends_documentation_fragment: files +description: + - Manage (add, remove, change) individual interface options in an interfaces-style file without having + to manage the file as a whole with, say, M(template) or M(assemble). Interface has to be presented in a file. + - Read information about interfaces from interfaces-styled files +options: + dest: + description: + - Path to the interfaces file + default: /etc/network/interfaces + iface: + description: + - Name of the interface, required for value changes or option remove + address_family: + description: + - Address family of the interface, useful if same interface name is used for both inet and inet6 + option: + description: + - Name of the option, required for value changes or option remove + value: + description: + - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added. + If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated. + C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing + ones or cleaning the whole option set are supported + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: 'no' + state: + description: + - If set to C(absent) the option or section will be removed if present instead of created. + default: "present" + choices: [ "present", "absent" ] + +notes: + - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state +requirements: [] +author: "Roman Belyakovsky (@hryamzik)" +''' + +RETURN = ''' +dest: + description: destination file/path + returned: success + type: str + sample: "/etc/network/interfaces" +ifaces: + description: interfaces dictionary + returned: success + type: complex + contains: + ifaces: + description: interface dictionary + returned: success + type: dict + contains: + eth0: + description: Name of the interface + returned: success + type: dict + contains: + address_family: + description: interface address family + returned: success + type: str + sample: "inet" + method: + description: interface method + returned: success + type: str + sample: "manual" + mtu: + description: other options, all values returned as strings + returned: success + type: str + sample: "1500" + pre-up: + description: list of C(pre-up) scripts + returned: success + type: list + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + up: + description: list of C(up) scripts + returned: success + type: list + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + post-up: + description: list of C(post-up) scripts + returned: success + type: list + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + down: + description: list of C(down) scripts + returned: success + type: list + sample: + - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" +... +''' + +EXAMPLES = ''' +# Set eth1 mtu configuration value to 8000 +- interfaces_file: + dest: /etc/network/interfaces.d/eth1.cfg + iface: eth1 + option: mtu + value: 8000 + backup: yes + state: present + register: eth1_cfg +''' + +import os +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes + + +def lineDict(line): + return {'line': line, 'line_type': 'unknown'} + + +def optionDict(line, iface, option, value, address_family): + return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} + + +def getValueFromLine(s): + spaceRe = re.compile(r'\s+') + for m in spaceRe.finditer(s): + pass + valueEnd = m.start() + option = s.split()[0] + optionStart = s.find(option) + optionLen = len(option) + valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart + return s[valueStart:valueEnd] + + +def read_interfaces_file(module, filename): + f = open(filename, 'r') + return read_interfaces_lines(module, f) + + +def read_interfaces_lines(module, line_strings): + lines = [] + ifaces = {} + currently_processing = None + i = 0 + for line in line_strings: + i += 1 + words = line.split() + if len(words) < 1: + lines.append(lineDict(line)) + continue + if words[0][0] == "#": + lines.append(lineDict(line)) + continue + if words[0] == "mapping": + # currmap = calloc(1, sizeof *currmap); + lines.append(lineDict(line)) + currently_processing = "MAPPING" + elif words[0] == "source": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-dir": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-directory": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "iface": + currif = { + "pre-up": [], + "up": [], + "down": [], + "post-up": [] + } + iface_name = words[1] + try: + currif['address_family'] = words[2] + except IndexError: + currif['address_family'] = None + address_family = currif['address_family'] + try: + currif['method'] = words[3] + except IndexError: + currif['method'] = None + + ifaces[iface_name] = currif + lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family}) + currently_processing = "IFACE" + elif words[0] == "auto": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0].startswith("allow-"): + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-auto-down": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-scripts": + lines.append(lineDict(line)) + currently_processing = "NONE" + else: + if currently_processing == "IFACE": + option_name = words[0] + # TODO: if option_name in currif.options + value = getValueFromLine(line) + lines.append(optionDict(line, iface_name, option_name, value, address_family)) + if option_name in ["pre-up", "up", "down", "post-up"]: + currif[option_name].append(value) + else: + currif[option_name] = value + elif currently_processing == "MAPPING": + lines.append(lineDict(line)) + elif currently_processing == "NONE": + lines.append(lineDict(line)) + else: + module.fail_json(msg="misplaced option %s in line %d" % (line, i)) + return None, None + return lines, ifaces + + +def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None): + value = str(raw_value) + changed = False + + iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface] + if address_family is not None: + iface_lines = [item for item in iface_lines + if "address_family" in item and item["address_family"] == address_family] + + if len(iface_lines) < 1: + # interface not found + module.fail_json(msg="Error: interface %s not found" % iface) + return changed, None + + iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines)) + target_options = list(filter(lambda i: i['option'] == option, iface_options)) + + if state == "present": + if len(target_options) < 1: + changed = True + # add new option + last_line_dict = iface_lines[-1] + changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family) + else: + if option in ["pre-up", "up", "down", "post-up"]: + if len(list(filter(lambda i: i['value'] == value, target_options))) < 1: + changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family) + else: + # if more than one option found edit the last one + if target_options[-1]['value'] != value: + changed = True + target_option = target_options[-1] + old_line = target_option['line'] + old_value = target_option['value'] + address_family = target_option['address_family'] + prefix_start = old_line.find(option) + optionLen = len(option) + old_value_position = re.search(r"\s+".join(old_value.split()), old_line[prefix_start + optionLen:]) + start = old_value_position.start() + prefix_start + optionLen + end = old_value_position.end() + prefix_start + optionLen + line = old_line[:start] + value + old_line[end:] + index = len(lines) - lines[::-1].index(target_option) - 1 + lines[index] = optionDict(line, iface, option, value, address_family) + elif state == "absent": + if len(target_options) >= 1: + if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None": + for target_option in filter(lambda i: i['value'] == value, target_options): + changed = True + lines = list(filter(lambda ln: ln != target_option, lines)) + else: + changed = True + for target_option in target_options: + lines = list(filter(lambda ln: ln != target_option, lines)) + else: + module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state) + + return changed, lines + + +def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family): + # Changing method of interface is not an addition + if option == 'method': + changed = False + for ln in lines: + if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''): + changed = True + ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line')) + ln['params']['method'] = value + return changed, lines + + last_line = last_line_dict['line'] + prefix_start = last_line.find(last_line.split()[0]) + suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1]) + prefix = last_line[:prefix_start] + + if len(iface_options) < 1: + # interface has no options, ident + prefix += " " + + line = prefix + "%s %s" % (option, value) + last_line[suffix_start:] + option_dict = optionDict(line, iface, option, value, address_family) + index = len(lines) - lines[::-1].index(last_line_dict) + lines.insert(index, option_dict) + return True, lines + + +def write_changes(module, lines, dest): + + tmpfd, tmpfile = tempfile.mkstemp() + f = os.fdopen(tmpfd, 'wb') + f.write(to_bytes(''.join(lines), errors='surrogate_or_strict')) + f.close() + module.atomic_move(tmpfile, os.path.realpath(dest)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(type='path', default='/etc/network/interfaces'), + iface=dict(type='str'), + address_family=dict(type='str'), + option=dict(type='str'), + value=dict(type='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + add_file_common_args=True, + supports_check_mode=True, + required_by=dict( + option=('iface',), + ), + ) + + dest = module.params['dest'] + iface = module.params['iface'] + address_family = module.params['address_family'] + option = module.params['option'] + value = module.params['value'] + backup = module.params['backup'] + state = module.params['state'] + + if option is not None and state == "present" and value is None: + module.fail_json(msg="Value must be set if option is defined and state is 'present'") + + lines, ifaces = read_interfaces_file(module, dest) + + changed = False + + if option is not None: + changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family) + + if changed: + _, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d]) + + if changed and not module.check_mode: + if backup: + module.backup_local(dest) + write_changes(module, [d['line'] for d in lines if 'line' in d], dest) + + module.exit_json(dest=dest, changed=changed, ifaces=ifaces) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/system/java_cert.py new file mode 100644 index 0000000000..225cf75dcd --- /dev/null +++ b/plugins/modules/system/java_cert.py @@ -0,0 +1,403 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, RSD Services S.A +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: java_cert +short_description: Uses keytool to import/remove key from java keystore (cacerts) +description: + - This is a wrapper module around keytool, which can be used to import/remove + certificates from a given java keystore. +options: + cert_url: + description: + - Basic URL to fetch SSL certificate from. + - One of C(cert_url) or C(cert_path) is required to load certificate. + type: str + cert_port: + description: + - Port to connect to URL. + - This will be used to create server URL:PORT. + type: int + default: 443 + cert_path: + description: + - Local path to load certificate from. + - One of C(cert_url) or C(cert_path) is required to load certificate. + type: path + cert_alias: + description: + - Imported certificate alias. + - The alias is used when checking for the presence of a certificate in the keystore. + type: str + trust_cacert: + description: + - Trust imported cert as CAcert. + type: bool + default: False + pkcs12_path: + description: + - Local path to load PKCS12 keystore from. + type: path + pkcs12_password: + description: + - Password for importing from PKCS12 keystore. + type: str + default: '' + pkcs12_alias: + description: + - Alias in the PKCS12 keystore. + type: str + keystore_path: + description: + - Path to keystore. + type: path + keystore_pass: + description: + - Keystore password. + type: str + required: true + keystore_create: + description: + - Create keystore if it does not exist. + type: bool + keystore_type: + description: + - Keystore type (JCEKS, JKS). + type: str + executable: + description: + - Path to keytool binary if not used we search in PATH for it. + type: str + default: keytool + state: + description: + - Defines action which can be either certificate import or removal. + type: str + choices: [ absent, present ] + default: present +author: +- Adam Hamsik (@haad) +''' + +EXAMPLES = r''' +- name: Import SSL certificate from google.com to a given cacerts keystore + java_cert: + cert_url: google.com + cert_port: 443 + keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts + keystore_pass: changeit + state: present + +- name: Remove certificate with given alias from a keystore + java_cert: + cert_url: google.com + keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts + keystore_pass: changeit + executable: /usr/lib/jvm/jre7/bin/keytool + state: absent + +- name: Import trusted CA from SSL certificate + java_cert: + cert_path: /opt/certs/rootca.crt + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: yes + state: present + cert_alias: LE_RootCA + trust_cacert: True + +- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist + java_cert: + cert_url: google.com + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: yes + state: present + +- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist + java_cert: + pkcs12_path: "/tmp/importkeystore.p12" + cert_alias: default + keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks + keystore_pass: changeit + keystore_create: yes + state: present + +- name: Import SSL certificate to JCEKS keystore + java_cert: + pkcs12_path: "/tmp/importkeystore.p12" + pkcs12_alias: default + pkcs12_password: somepass + cert_alias: default + keystore_path: /opt/someapp/security/keystore.jceks + keystore_type: "JCEKS" + keystore_pass: changeit + keystore_create: yes + state: present +''' + +RETURN = r''' +msg: + description: Output from stdout of keytool command after execution of given command. + returned: success + type: str + sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'" + +rc: + description: Keytool command execution return value. + returned: success + type: int + sample: "0" + +cmd: + description: Executed command to get action done. + returned: success + type: str + sample: "keytool -importcert -noprompt -keystore" +''' + +import os +import re + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +def get_keystore_type(keystore_type): + ''' Check that custom keystore is presented in parameters ''' + if keystore_type: + return " -storetype '%s'" % keystore_type + return '' + + +def check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type): + ''' Check if certificate with alias is present in keystore + located at keystore_path ''' + test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' " + "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type)) + + (check_rc, _, _) = module.run_command(test_cmd) + if check_rc == 0: + return True + return False + + +def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): + ''' Import certificate from URL into keystore located at keystore_path ''' + + https_proxy = os.getenv("https_proxy") + no_proxy = os.getenv("no_proxy") + + proxy_opts = '' + if https_proxy is not None: + (proxy_host, proxy_port) = https_proxy.split(':') + proxy_opts = "-J-Dhttps.proxyHost=%s -J-Dhttps.proxyPort=%s" % (proxy_host, proxy_port) + + if no_proxy is not None: + # For Java's nonProxyHosts property, items are separated by '|', + # and patterns have to start with "*". + non_proxy_hosts = no_proxy.replace(',', '|') + non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts) + + # The property name is http.nonProxyHosts, there is no + # separate setting for HTTPS. + proxy_opts += " -J-Dhttp.nonProxyHosts='%s'" % non_proxy_hosts + + fetch_cmd = "%s -printcert -rfc -sslserver %s %s:%d" % (executable, proxy_opts, url, port) + import_cmd = ("%s -importcert -noprompt -keystore '%s' " + "-storepass '%s' -alias '%s' %s") % (executable, keystore_path, + keystore_pass, alias, + get_keystore_type(keystore_type)) + if trust_cacert: + import_cmd = import_cmd + " -trustcacerts" + + # Fetch SSL certificate from remote host. + (_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True) + + # Use remote certificate from remote host and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, + data=fetch_out, + check_rc=False) + diff = {'before': '\n', 'after': '%s\n' % alias} + if import_rc == 0: + module.exit_json(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + diff=diff) + else: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, + error=import_err) + + +def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): + ''' Import certificate from path into keystore located on + keystore_path as alias ''' + import_cmd = ("%s -importcert -noprompt -keystore '%s' " + "-storepass '%s' -file '%s' -alias '%s' %s") % (executable, keystore_path, + keystore_pass, path, alias, + get_keystore_type(keystore_type)) + + if trust_cacert: + import_cmd = import_cmd + " -trustcacerts" + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, + check_rc=False) + + diff = {'before': '\n', 'after': '%s\n' % alias} + if import_rc == 0: + module.exit_json(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + else: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd) + + +def import_pkcs12_path(module, executable, path, keystore_path, keystore_pass, pkcs12_pass, pkcs12_alias, alias, keystore_type): + ''' Import pkcs12 from path into keystore located on + keystore_path as alias ''' + import_cmd = ("%s -importkeystore -noprompt -destkeystore '%s' -srcstoretype PKCS12 " + "-deststorepass '%s' -destkeypass '%s' -srckeystore '%s' -srcstorepass '%s' " + "-srcalias '%s' -destalias '%s' %s") % (executable, keystore_path, keystore_pass, + keystore_pass, path, pkcs12_pass, pkcs12_alias, + alias, get_keystore_type(keystore_type)) + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, + check_rc=False) + + diff = {'before': '\n', 'after': '%s\n' % alias} + if import_rc == 0: + module.exit_json(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + else: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd) + + +def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type): + ''' Delete certificate identified with alias from keystore on keystore_path ''' + del_cmd = ("%s -delete -keystore '%s' -storepass '%s' " + "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type)) + + # Delete SSL certificate from keystore + (del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True) + + diff = {'before': '%s\n' % alias, 'after': None} + + module.exit_json(changed=True, msg=del_out, + rc=del_rc, cmd=del_cmd, stdout=del_out, + error=del_err, diff=diff) + + +def test_keytool(module, executable): + ''' Test if keytool is actually executable or not ''' + module.run_command("%s" % executable, check_rc=True) + + +def test_keystore(module, keystore_path): + ''' Check if we can access keystore as file or not ''' + if keystore_path is None: + keystore_path = '' + + if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path): + # Keystore doesn't exist we want to create it + module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path) + + +def main(): + argument_spec = dict( + cert_url=dict(type='str'), + cert_path=dict(type='path'), + pkcs12_path=dict(type='path'), + pkcs12_password=dict(type='str', no_log=True), + pkcs12_alias=dict(type='str'), + cert_alias=dict(type='str'), + cert_port=dict(type='int', default=443), + keystore_path=dict(type='path'), + keystore_pass=dict(type='str', required=True, no_log=True), + trust_cacert=dict(type='bool', default=False), + keystore_create=dict(type='bool', default=False), + keystore_type=dict(type='str'), + executable=dict(type='str', default='keytool'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[['cert_path', 'cert_url', 'pkcs12_path']], + required_together=[['keystore_path', 'keystore_pass']], + mutually_exclusive=[ + ['cert_url', 'cert_path', 'pkcs12_path'] + ], + supports_check_mode=True, + ) + + url = module.params.get('cert_url') + path = module.params.get('cert_path') + port = module.params.get('cert_port') + + pkcs12_path = module.params.get('pkcs12_path') + pkcs12_pass = module.params.get('pkcs12_password', '') + pkcs12_alias = module.params.get('pkcs12_alias', '1') + + cert_alias = module.params.get('cert_alias') or url + trust_cacert = module.params.get('trust_cacert') + + keystore_path = module.params.get('keystore_path') + keystore_pass = module.params.get('keystore_pass') + keystore_create = module.params.get('keystore_create') + keystore_type = module.params.get('keystore_type') + executable = module.params.get('executable') + state = module.params.get('state') + + if path and not cert_alias: + module.fail_json(changed=False, + msg="Using local path import from %s requires alias argument." + % keystore_path) + + test_keytool(module, executable) + + if not keystore_create: + test_keystore(module, keystore_path) + + cert_present = check_cert_present(module, executable, keystore_path, + keystore_pass, cert_alias, keystore_type) + + if state == 'absent' and cert_present: + if module.check_mode: + module.exit_json(changed=True) + + delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + + elif state == 'present' and not cert_present: + if module.check_mode: + module.exit_json(changed=True) + + if pkcs12_path: + import_pkcs12_path(module, executable, pkcs12_path, keystore_path, + keystore_pass, pkcs12_pass, pkcs12_alias, cert_alias, keystore_type) + + if path: + import_cert_path(module, executable, path, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) + + if url: + import_cert_url(module, executable, url, port, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) + + module.exit_json(changed=False) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py new file mode 100644 index 0000000000..21d029fa2f --- /dev/null +++ b/plugins/modules/system/java_keystore.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Guillaume Grossetie +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: java_keystore +short_description: Create or delete a Java keystore in JKS format. +description: + - Create or delete a Java keystore in JKS format for a given certificate. +options: + name: + description: + - Name of the certificate. + required: true + certificate: + description: + - Certificate that should be used to create the key store. + required: true + private_key: + description: + - Private key that should be used to create the key store. + required: true + password: + description: + - Password that should be used to secure the key store. + required: true + dest: + description: + - Absolute path where the jks should be generated. + required: true + owner: + description: + - Name of the user that should own jks file. + required: false + group: + description: + - Name of the group that should own jks file. + required: false + mode: + description: + - Mode the file should be. + required: false + force: + description: + - Key store will be created even if it already exists. + required: false + type: bool + default: 'no' +requirements: [openssl, keytool] +author: Guillaume Grossetie (@Mogztter) +''' + +EXAMPLES = ''' +# Create a key store for the given certificate (inline) +- java_keystore: + name: example + certificate: | + -----BEGIN CERTIFICATE----- + h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69 + MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB + -----END CERTIFICATE----- + private_key: | + -----BEGIN RSA PRIVATE KEY----- + DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3 + GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99 + -----END RSA PRIVATE KEY----- + password: changeit + dest: /etc/security/keystore.jks + +# Create a key store for the given certificate (lookup) +- java_keystore: + name: example + certificate: "{{lookup('file', '/path/to/certificate.crt') }}" + private_key: "{{lookup('file', '/path/to/private.key') }}" + password: changeit + dest: /etc/security/keystore.jks +''' + +RETURN = ''' +msg: + description: Output from stdout of keytool/openssl command after execution of given command or an error. + returned: changed and failure + type: str + sample: "Unable to find the current certificate fingerprint in ..." + +rc: + description: keytool/openssl command execution return value + returned: changed and failure + type: int + sample: "0" + +cmd: + description: Executed command to get action done + returned: changed and failure + type: str + sample: "openssl x509 -noout -in /tmp/cert.crt -fingerprint -sha256" +''' + + +from ansible.module_utils.basic import AnsibleModule +import os +import re + + +def read_certificate_fingerprint(module, openssl_bin, certificate_path): + current_certificate_fingerprint_cmd = "%s x509 -noout -in %s -fingerprint -sha256" % (openssl_bin, certificate_path) + (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd) + if rc != 0: + return module.fail_json(msg=current_certificate_fingerprint_out, + err=current_certificate_fingerprint_err, + rc=rc, + cmd=current_certificate_fingerprint_cmd) + + current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) + if not current_certificate_match: + return module.fail_json( + msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out, + rc=rc, + cmd=current_certificate_fingerprint_err + ) + + return current_certificate_match.group(1) + + +def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password): + stored_certificate_fingerprint_cmd = "%s -list -alias '%s' -keystore '%s' -storepass '%s' -v" % (keytool_bin, alias, keystore_path, keystore_password) + (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands(module, stored_certificate_fingerprint_cmd) + if rc != 0: + if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out: + return module.fail_json(msg=stored_certificate_fingerprint_out, + err=stored_certificate_fingerprint_err, + rc=rc, + cmd=stored_certificate_fingerprint_cmd) + else: + return None + else: + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) + if not stored_certificate_match: + return module.fail_json( + msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, + rc=rc, + cmd=stored_certificate_fingerprint_cmd + ) + + return stored_certificate_match.group(1) + + +def run_commands(module, cmd, check_rc=True): + return module.run_command(cmd, check_rc) + + +def create_file(path, content): + with open(path, 'w') as f: + f.write(content) + return path + + +def create_tmp_certificate(module): + return create_file("/tmp/%s.crt" % module.params['name'], module.params['certificate']) + + +def create_tmp_private_key(module): + return create_file("/tmp/%s.key" % module.params['name'], module.params['private_key']) + + +def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias): + certificate_path = create_tmp_certificate(module) + try: + current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path) + stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass) + return current_certificate_fingerprint != stored_certificate_fingerprint + finally: + os.remove(certificate_path) + + +def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password): + if module.check_mode: + module.exit_json(changed=True) + else: + certificate_path = create_tmp_certificate(module) + private_key_path = create_tmp_private_key(module) + try: + if os.path.exists(keystore_path): + os.remove(keystore_path) + + keystore_p12_path = "/tmp/keystore.p12" + if os.path.exists(keystore_p12_path): + os.remove(keystore_p12_path) + + export_p12_cmd = "%s pkcs12 -export -name '%s' -in '%s' -inkey '%s' -out '%s' -passout 'pass:%s'" % ( + openssl_bin, name, certificate_path, private_key_path, keystore_p12_path, password) + (rc, export_p12_out, export_p12_err) = run_commands(module, export_p12_cmd) + if rc != 0: + return module.fail_json(msg=export_p12_out, + rc=rc, + cmd=export_p12_cmd) + + import_keystore_cmd = "%s -importkeystore " \ + "-destkeystore '%s' " \ + "-srckeystore '%s' " \ + "-srcstoretype pkcs12 " \ + "-alias '%s' " \ + "-deststorepass '%s' " \ + "-srcstorepass '%s' " \ + "-noprompt" % (keytool_bin, keystore_path, keystore_p12_path, name, password, password) + (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd) + if rc == 0: + update_jks_perm(module, keystore_path) + return module.exit_json(changed=True, + msg=import_keystore_out, + rc=rc, + cmd=import_keystore_cmd, + stdout_lines=import_keystore_out) + else: + return module.fail_json(msg=import_keystore_out, + rc=rc, + cmd=import_keystore_cmd) + finally: + os.remove(certificate_path) + os.remove(private_key_path) + + +def update_jks_perm(module, keystore_path): + file_args = module.load_file_common_arguments(module.params, path=keystore_path) + module.set_fs_attributes_if_different(file_args, False) + + +def process_jks(module): + name = module.params['name'] + password = module.params['password'] + keystore_path = module.params['dest'] + force = module.params['force'] + openssl_bin = module.get_bin_path('openssl', True) + keytool_bin = module.get_bin_path('keytool', True) + + if os.path.exists(keystore_path): + if force: + create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password) + else: + if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name): + create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password) + else: + if not module.check_mode: + update_jks_perm(module, keystore_path) + return module.exit_json(changed=False) + else: + create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password) + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = True + self.add_file_common_args = True + argument_spec = dict( + name=dict(required=True), + certificate=dict(required=True, no_log=True), + private_key=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + dest=dict(required=True), + force=dict(required=False, default=False, type='bool') + ) + self.argument_spec = argument_spec + + +def main(): + spec = ArgumentSpec() + module = AnsibleModule( + argument_spec=spec.argument_spec, + add_file_common_args=spec.add_file_common_args, + supports_check_mode=spec.supports_check_mode + ) + process_jks(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/kernel_blacklist.py b/plugins/modules/system/kernel_blacklist.py new file mode 100644 index 0000000000..05a4ec9c3f --- /dev/null +++ b/plugins/modules/system/kernel_blacklist.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# encoding: utf-8 -*- + +# Copyright: (c) 2013, Matthias Vogelgesang +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: kernel_blacklist +author: +- Matthias Vogelgesang (@matze) +short_description: Blacklist kernel modules +description: + - Add or remove kernel modules from blacklist. +options: + name: + description: + - Name of kernel module to black- or whitelist. + required: true + state: + description: + - Whether the module should be present in the blacklist or absent. + choices: [ absent, present ] + default: present + blacklist_file: + description: + - If specified, use this blacklist file instead of + C(/etc/modprobe.d/blacklist-ansible.conf). +''' + +EXAMPLES = ''' +- name: Blacklist the nouveau driver module + kernel_blacklist: + name: nouveau + state: present +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class Blacklist(object): + def __init__(self, module, filename, checkmode): + self.filename = filename + self.module = module + self.checkmode = checkmode + + def create_file(self): + if not self.checkmode and not os.path.exists(self.filename): + open(self.filename, 'a').close() + return True + elif self.checkmode and not os.path.exists(self.filename): + self.filename = os.devnull + return True + else: + return False + + def get_pattern(self): + return r'^blacklist\s*' + self.module + '$' + + def readlines(self): + f = open(self.filename, 'r') + lines = f.readlines() + f.close() + return lines + + def module_listed(self): + lines = self.readlines() + pattern = self.get_pattern() + + for line in lines: + stripped = line.strip() + if stripped.startswith('#'): + continue + + if re.match(pattern, stripped): + return True + + return False + + def remove_module(self): + lines = self.readlines() + pattern = self.get_pattern() + + if self.checkmode: + f = open(os.devnull, 'w') + else: + f = open(self.filename, 'w') + + for line in lines: + if not re.match(pattern, line.strip()): + f.write(line) + + f.close() + + def add_module(self): + if self.checkmode: + f = open(os.devnull, 'a') + else: + f = open(self.filename, 'a') + + f.write('blacklist %s\n' % self.module) + + f.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + blacklist_file=dict(type='str') + ), + supports_check_mode=True, + ) + + args = dict(changed=False, failed=False, + name=module.params['name'], state=module.params['state']) + + filename = '/etc/modprobe.d/blacklist-ansible.conf' + + if module.params['blacklist_file']: + filename = module.params['blacklist_file'] + + blacklist = Blacklist(args['name'], filename, module.check_mode) + + if blacklist.create_file(): + args['changed'] = True + else: + args['changed'] = False + + if blacklist.module_listed(): + if args['state'] == 'absent': + blacklist.remove_module() + args['changed'] = True + else: + if args['state'] == 'present': + blacklist.add_module() + args['changed'] = True + + module.exit_json(**args) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/lbu.py b/plugins/modules/system/lbu.py new file mode 100644 index 0000000000..3658517933 --- /dev/null +++ b/plugins/modules/system/lbu.py @@ -0,0 +1,130 @@ +#!/usr/bin/python + +# Copyright: (c) 2019, Kaarle Ritvanen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: lbu + +short_description: Local Backup Utility for Alpine Linux + + +description: +- Manage Local Backup Utility of Alpine Linux in run-from-RAM mode + +options: + commit: + description: + - Control whether to commit changed files. + type: bool + exclude: + description: + - List of paths to exclude. + type: list + include: + description: + - List of paths to include. + type: list + +author: +- Kaarle Ritvanen (@kunkku) +''' + +EXAMPLES = ''' +# Commit changed files (if any) +- name: Commit + lbu: + commit: true + +# Exclude path and commit +- name: Exclude directory + lbu: + commit: true + exclude: + - /etc/opt + +# Include paths without committing +- name: Include file and directory + lbu: + include: + - /root/.ssh/authorized_keys + - /var/lib/misc +''' + +RETURN = ''' +msg: + description: Error message + type: str + returned: on failure +''' + +from ansible.module_utils.basic import AnsibleModule + +import os.path + + +def run_module(): + module = AnsibleModule( + argument_spec={ + 'commit': {'type': 'bool'}, + 'exclude': {'type': 'list', 'elements': 'str'}, + 'include': {'type': 'list', 'elements': 'str'} + }, + supports_check_mode=True + ) + + changed = False + + def run_lbu(*args): + code, stdout, stderr = module.run_command( + [module.get_bin_path('lbu', required=True)] + list(args) + ) + if code: + module.fail_json(changed=changed, msg=stderr) + return stdout + + update = False + commit = False + + for param in ('include', 'exclude'): + if module.params[param]: + paths = run_lbu(param, '-l').split('\n') + for path in module.params[param]: + if os.path.normpath('/' + path)[1:] not in paths: + update = True + + if module.params['commit']: + commit = update or run_lbu('status') > '' + + if module.check_mode: + module.exit_json(changed=update or commit) + + if update: + for param in ('include', 'exclude'): + if module.params[param]: + run_lbu(param, *module.params[param]) + changed = True + + if commit: + run_lbu('commit') + changed = True + + module.exit_json(changed=changed) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/listen_ports_facts.py b/plugins/modules/system/listen_ports_facts.py new file mode 100644 index 0000000000..f25396ae64 --- /dev/null +++ b/plugins/modules/system/listen_ports_facts.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, Nathan Davison +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: listen_ports_facts + +author: + - Nathan Davison (@ndavison) + + +description: + - Gather facts on processes listening on TCP and UDP ports. + +short_description: Gather facts on processes listening on TCP and UDP ports. +''' + +EXAMPLES = r''' +- name: Gather facts on listening ports + listen_ports_facts: + +- name: TCP whitelist violation + debug: + msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist + vars: + tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}" + tcp_whitelist: + - 22 + - 25 + loop: "{{ tcp_listen_violations }}" + +- name: List TCP ports + debug: + msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}" + +- name: List UDP ports + debug: + msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}" + +- name: List all ports + debug: + msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}" +''' + +RETURN = r''' +ansible_facts: + description: Dictionary containing details of TCP and UDP ports with listening servers + returned: always + type: complex + contains: + tcp_listen: + description: A list of processes that are listening on a TCP port. + returned: if TCP servers were found + type: list + contains: + address: + description: The address the server is listening on. + returned: always + type: str + sample: "0.0.0.0" + name: + description: The name of the listening process. + returned: if user permissions allow + type: str + sample: "mysqld" + pid: + description: The pid of the listening process. + returned: always + type: int + sample: 1223 + port: + description: The port the server is listening on. + returned: always + type: int + sample: 3306 + protocol: + description: The network protocol of the server. + returned: always + type: str + sample: "tcp" + stime: + description: The start time of the listening process. + returned: always + type: str + sample: "Thu Feb 2 13:29:45 2017" + user: + description: The user who is running the listening process. + returned: always + type: str + sample: "mysql" + udp_listen: + description: A list of processes that are listening on a UDP port. + returned: if UDP servers were found + type: list + contains: + address: + description: The address the server is listening on. + returned: always + type: str + sample: "0.0.0.0" + name: + description: The name of the listening process. + returned: if user permissions allow + type: str + sample: "rsyslogd" + pid: + description: The pid of the listening process. + returned: always + type: int + sample: 609 + port: + description: The port the server is listening on. + returned: always + type: int + sample: 514 + protocol: + description: The network protocol of the server. + returned: always + type: str + sample: "udp" + stime: + description: The start time of the listening process. + returned: always + type: str + sample: "Thu Feb 2 13:29:45 2017" + user: + description: The user who is running the listening process. + returned: always + type: str + sample: "root" +''' + +import re +import platform +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule + + +def netStatParse(raw): + results = list() + for line in raw.splitlines(): + listening_search = re.search('[^ ]+:[0-9]+', line) + if listening_search: + splitted = line.split() + conns = re.search('([^ ]+):([0-9]+)', splitted[3]) + pidstr = '' + if 'tcp' in splitted[0]: + protocol = 'tcp' + pidstr = splitted[6] + elif 'udp' in splitted[0]: + protocol = 'udp' + pidstr = splitted[5] + pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr) + if conns and pids: + address = conns.group(1) + port = conns.group(2) + if (pids.group(2)): + pid = pids.group(2) + else: + pid = 0 + if (pids.group(3)): + name = pids.group(3) + else: + name = '' + result = { + 'pid': int(pid), + 'address': address, + 'port': int(port), + 'protocol': protocol, + 'name': name, + } + if result not in results: + results.append(result) + else: + raise EnvironmentError('Could not get process information for the listening ports.') + return results + + +def main(): + + module = AnsibleModule( + argument_spec={}, + supports_check_mode=True, + ) + + if platform.system() != 'Linux': + module.fail_json(msg='This module requires Linux.') + + def getPidSTime(pid): + ps_cmd = module.get_bin_path('ps', True) + rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)]) + stime = '' + if rc == 0: + for line in ps_output.splitlines(): + if 'started' not in line: + stime = line + return stime + + def getPidUser(pid): + ps_cmd = module.get_bin_path('ps', True) + rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)]) + user = '' + if rc == 0: + for line in ps_output.splitlines(): + if line != 'USER': + user = line + return user + + result = { + 'changed': False, + 'ansible_facts': { + 'tcp_listen': [], + 'udp_listen': [], + }, + } + + try: + netstat_cmd = module.get_bin_path('netstat', True) + + # which ports are listening for connections? + rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt']) + if rc == 0: + netstatOut = netStatParse(stdout) + for p in netstatOut: + p['stime'] = getPidSTime(p['pid']) + p['user'] = getPidUser(p['pid']) + if p['protocol'] == 'tcp': + result['ansible_facts']['tcp_listen'].append(p) + elif p['protocol'] == 'udp': + result['ansible_facts']['udp_listen'].append(p) + except (KeyError, EnvironmentError) as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/locale_gen.py b/plugins/modules/system/locale_gen.py new file mode 100644 index 0000000000..7a84a8ee47 --- /dev/null +++ b/plugins/modules/system/locale_gen.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: locale_gen +short_description: Creates or removes locales +description: + - Manages locales by editing /etc/locale.gen and invoking locale-gen. +author: +- Augustus Kling (@AugustusKling) +options: + name: + description: + - Name and encoding of the locale, such as "en_GB.UTF-8". + required: true + state: + description: + - Whether the locale shall be present. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = ''' +- name: Ensure a locale exists + locale_gen: + name: de_CH.UTF-8 + state: present +''' + +import os +import re +from subprocess import Popen, PIPE, call + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", + ".iso885915": ".ISO-8859-15", + ".cp1251": ".CP1251", + ".koi8r": ".KOI8-R", + ".armscii8": ".ARMSCII-8", + ".euckr": ".EUC-KR", + ".gbk": ".GBK", + ".gb18030": ".GB18030", + ".euctw": ".EUC-TW", +} + + +# =========================================== +# location module specific support methods. +# + +def is_available(name, ubuntuMode): + """Check if the given locale is available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + if ubuntuMode: + __regexp = r'^(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/usr/share/i18n/SUPPORTED' + else: + __regexp = r'^#{0,1}\s*(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/etc/locale.gen' + + re_compiled = re.compile(__regexp) + fd = open(__locales_available, 'r') + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + fd.close() + return False + + +def is_present(name): + """Checks if the given locale is currently installed.""" + output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] + output = to_native(output) + return any(fix_case(name) == fix_case(line) for line in output.splitlines()) + + +def fix_case(name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + for s, r in LOCALE_NORMALIZATION.items(): + name = name.replace(s, r) + return name + + +def replace_line(existing_line, new_line): + """Replaces lines in /etc/locale.gen""" + try: + f = open("/etc/locale.gen", "r") + lines = [line.replace(existing_line, new_line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def set_locale(name, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + search_string = r'#{0,1}\s*%s (?P.+)' % name + if enabled: + new_string = r'%s \g' % (name) + else: + new_string = r'# %s \g' % (name) + try: + f = open("/etc/locale.gen", "r") + lines = [re.sub(search_string, new_string, line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def apply_change(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + set_locale(name, enabled=True) + else: + # Delete locale. + set_locale(name, enabled=False) + + localeGenExitValue = call("locale-gen") + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def apply_change_ubuntu(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + localeGenExitValue = call(["locale-gen", name]) + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + try: + f = open("/var/lib/locales/supported.d/local", "r") + content = f.readlines() + finally: + f.close() + try: + f = open("/var/lib/locales/supported.d/local", "w") + for line in content: + locale, charset = line.split(' ') + if locale != name: + f.write(line) + finally: + f.close() + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + localeGenExitValue = call(["locale-gen", "--purge"]) + + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + + if not os.path.exists("/etc/locale.gen"): + if os.path.exists("/var/lib/locales/supported.d/"): + # Ubuntu created its own system to manage locales. + ubuntuMode = True + else: + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") + else: + # We found the common way to manage locales. + ubuntuMode = False + + if not is_available(name, ubuntuMode): + module.fail_json(msg="The locale you've entered is not available " + "on your system.") + + if is_present(name): + prev_state = "present" + else: + prev_state = "absent" + changed = (prev_state != state) + + if module.check_mode: + module.exit_json(changed=changed) + else: + if changed: + try: + if ubuntuMode is False: + apply_change(state, name) + else: + apply_change_ubuntu(state, name) + except EnvironmentError as e: + module.fail_json(msg=to_native(e), exitValue=e.errno) + + module.exit_json(name=name, changed=changed, msg="OK") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/lvg.py b/plugins/modules/system/lvg.py new file mode 100644 index 0000000000..f75fb41ee1 --- /dev/null +++ b/plugins/modules/system/lvg.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Alexander Bulimov +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +author: +- Alexander Bulimov (@abulimov) +module: lvg +short_description: Configure LVM volume groups +description: + - This module creates, removes or resizes volume groups. +options: + vg: + description: + - The name of the volume group. + type: str + required: true + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or resizing volume group. + - The module will take care of running pvcreate if needed. + type: list + pesize: + description: + - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector + (where the sector size is the largest sector size of the PVs currently used in the VG), + or at least 128KiB." + - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. + type: str + default: "4" + pv_options: + description: + - Additional options to pass to C(pvcreate) when creating the volume group. + type: str + vg_options: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + type: str + state: + description: + - Control if the volume group exists. + type: str + choices: [ absent, present ] + default: present + force: + description: + - If C(yes), allows to remove volume group with logical volumes. + type: bool + default: no +seealso: +- module: filesystem +- module: lvol +- module: parted +notes: + - This module does not modify PE size for already present volume group. +''' + +EXAMPLES = r''' +- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB + lvg: + vg: vg.services + pvs: /dev/sda1 + pesize: 32 + +- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB + lvg: + vg: vg.services + pvs: /dev/sdb + pesize: 128K + +# If, for example, we already have VG vg.services on top of /dev/sdb1, +# this VG will be extended by /dev/sdc5. Or if vg.services was created on +# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, +# and then reduce by /dev/sda5. +- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. + lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc5 + +- name: Remove a volume group with name vg.services + lvg: + vg: vg.services + state: absent +''' + +import itertools +import os + +from ansible.module_utils.basic import AnsibleModule + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'pv_count': int(parts[1]), + 'lv_count': int(parts[2]), + }) + return vgs + + +def find_mapper_device_name(module, dm_device): + dmsetup_cmd = module.get_bin_path('dmsetup', True) + mapper_prefix = '/dev/mapper/' + rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) + if rc != 0: + module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) + mapper_device = mapper_prefix + dm_name.rstrip() + return mapper_device + + +def parse_pvs(module, data): + pvs = [] + dm_prefix = '/dev/dm-' + for line in data.splitlines(): + parts = line.strip().split(';') + if parts[0].startswith(dm_prefix): + parts[0] = find_mapper_device_name(module, parts[0]) + pvs.append({ + 'name': parts[0], + 'vg_name': parts[1], + }) + return pvs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + pvs=dict(type='list'), + pesize=dict(type='str', default='4'), + pv_options=dict(type='str', default=''), + vg_options=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'present']), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + state = module.params['state'] + force = module.boolean(module.params['force']) + pesize = module.params['pesize'] + pvoptions = module.params['pv_options'].split() + vgoptions = module.params['vg_options'].split() + + dev_list = [] + if module.params['pvs']: + dev_list = list(module.params['pvs']) + elif state == 'present': + module.fail_json(msg="No physical volumes given.") + + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) + + if state == 'present': + # check given devices + for test_dev in dev_list: + if not os.path.exists(test_dev): + module.fail_json(msg="Device %s not found." % test_dev) + + # get pv list + pvs_cmd = module.get_bin_path('pvs', True) + if dev_list: + pvs_filter_pv_name = ' || '.join( + 'pv_name = {0}'.format(x) + for x in itertools.chain(dev_list, module.params['pvs']) + ) + pvs_filter_vg_name = 'vg_name = {0}'.format(vg) + pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name) + else: + pvs_filter = '' + rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) + if rc != 0: + module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) + + # check pv for devices + pvs = parse_pvs(module, current_pvs) + used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] + if used_pvs: + module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) + + vgs_cmd = module.get_bin_path('vgs', True) + rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) + + if rc != 0: + module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) + + changed = False + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + if this_vg is None: + if state == 'present': + # create VG + if module.check_mode: + changed = True + else: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in dev_list: + rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + vgcreate_cmd = module.get_bin_path('vgcreate') + rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) + else: + if state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + else: + if this_vg['lv_count'] == 0 or force: + # remove VG + vgremove_cmd = module.get_bin_path('vgremove', True) + rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) + else: + module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg)) + + # resize VG + current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + devs_to_add_string = ' '.join(devs_to_add) + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + # add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) + + # remove some PV from our VG + if devs_to_remove: + devs_to_remove_string = ' '.join(devs_to_remove) + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py new file mode 100644 index 0000000000..26ea7dc24e --- /dev/null +++ b/plugins/modules/system/lvol.py @@ -0,0 +1,557 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Jeroen Hoekx , Alexander Bulimov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +author: + - Jeroen Hoekx (@jhoekx) + - Alexander Bulimov (@abulimov) +module: lvol +short_description: Configure LVM logical volumes +description: + - This module creates, removes or resizes logical volumes. +options: + vg: + description: + - The volume group this logical volume is part of. + lv: + description: + - The name of the logical volume. + size: + description: + - The size of the logical volume, according to lvcreate(8) --size, by + default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or + according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; + Float values must begin with a digit. + Resizing using percentage values was not supported prior to 2.1. + state: + description: + - Control if the logical volume exists. If C(present) and the + volume does not already exist then the C(size) option is required. + choices: [ absent, present ] + default: present + active: + description: + - Whether the volume is active and visible to the host. + type: bool + default: 'yes' + force: + description: + - Shrink or remove operations of volumes requires this switch. Ensures that + that filesystems get never corrupted/destroyed by mistake. + type: bool + default: 'no' + opts: + description: + - Free-form options to be passed to the lvcreate command. + snapshot: + description: + - The name of the snapshot volume + pvs: + description: + - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb). + thinpool: + description: + - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name. + shrink: + description: + - Shrink if current size is higher than size requested. + type: bool + default: 'yes' + resizefs: + description: + - Resize the underlying filesystem together with the logical volume. + type: bool + default: 'no' +notes: + - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume). +''' + +EXAMPLES = ''' +- name: Create a logical volume of 512m + lvol: + vg: firefly + lv: test + size: 512 + +- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb + lvol: + vg: firefly + lv: test + size: 512 + pvs: /dev/sda,/dev/sdb + +- name: Create cache pool logical volume + lvol: + vg: firefly + lv: lvcache + size: 512m + opts: --type cache-pool + +- name: Create a logical volume of 512g. + lvol: + vg: firefly + lv: test + size: 512g + +- name: Create a logical volume the size of all remaining space in the volume group + lvol: + vg: firefly + lv: test + size: 100%FREE + +- name: Create a logical volume with special options + lvol: + vg: firefly + lv: test + size: 512g + opts: -r 16 + +- name: Extend the logical volume to 1024m. + lvol: + vg: firefly + lv: test + size: 1024 + +- name: Extend the logical volume to consume all remaining space in the volume group + lvol: + vg: firefly + lv: test + size: +100%FREE + +- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem + lvol: + vg: firefly + lv: test + size: 100%PVS + resizefs: true + +- name: Resize the logical volume to % of VG + lvol: + vg: firefly + lv: test + size: 80%VG + force: yes + +- name: Reduce the logical volume to 512m + lvol: + vg: firefly + lv: test + size: 512 + force: yes + +- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one + lvol: + vg: firefly + lv: test + size: 512 + shrink: no + +- name: Remove the logical volume. + lvol: + vg: firefly + lv: test + state: absent + force: yes + +- name: Create a snapshot volume of the test logical volume. + lvol: + vg: firefly + lv: test + snapshot: snap1 + size: 100m + +- name: Deactivate a logical volume + lvol: + vg: firefly + lv: test + active: false + +- name: Create a deactivated logical volume + lvol: + vg: firefly + lv: test + size: 512g + active: false + +- name: Create a thin pool of 512g + lvol: + vg: firefly + thinpool: testpool + size: 512g + +- name: Create a thin volume of 128g + lvol: + vg: firefly + lv: test + thinpool: testpool + size: 128g +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +LVOL_ENV_VARS = dict( + # make sure we use the C locale when running lvol-related commands + LANG='C', + LC_ALL='C', + LC_MESSAGES='C', + LC_CTYPE='C', +) + + +def mkversion(major, minor, patch): + return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) + + +def parse_lvs(data): + lvs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + lvs.append({ + 'name': parts[0].replace('[', '').replace(']', ''), + 'size': float(parts[1]), + 'active': (parts[2][4] == 'a'), + 'thinpool': (parts[2][0] == 't'), + 'thinvol': (parts[2][0] == 'V'), + }) + return lvs + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'size': float(parts[1]), + 'free': float(parts[2]), + 'ext_size': float(parts[3]) + }) + return vgs + + +def get_lvm_version(module): + ver_cmd = module.get_bin_path("lvm", required=True) + rc, out, err = module.run_command("%s version" % (ver_cmd)) + if rc != 0: + return None + m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) + if not m: + return None + return mkversion(m.group(1), m.group(2), m.group(3)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + lv=dict(type='str'), + size=dict(type='str'), + opts=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + force=dict(type='bool', default=False), + shrink=dict(type='bool', default=True), + active=dict(type='bool', default=True), + snapshot=dict(type='str'), + pvs=dict(type='str'), + resizefs=dict(type='bool', default=False), + thinpool=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=( + ['lv', 'thinpool'], + ), + ) + + module.run_command_environ_update = LVOL_ENV_VARS + + # Determine if the "--yes" option should be used + version_found = get_lvm_version(module) + if version_found is None: + module.fail_json(msg="Failed to get LVM version number") + version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option + if version_found >= version_yesopt: + yesopt = "--yes" + else: + yesopt = "" + + vg = module.params['vg'] + lv = module.params['lv'] + size = module.params['size'] + opts = module.params['opts'] + state = module.params['state'] + force = module.boolean(module.params['force']) + shrink = module.boolean(module.params['shrink']) + active = module.boolean(module.params['active']) + resizefs = module.boolean(module.params['resizefs']) + thinpool = module.params['thinpool'] + size_opt = 'L' + size_unit = 'm' + snapshot = module.params['snapshot'] + pvs = module.params['pvs'] + + if pvs is None: + pvs = "" + else: + pvs = pvs.replace(",", " ") + + if opts is None: + opts = "" + + # Add --test option when running in check-mode + if module.check_mode: + test_opt = ' --test' + else: + test_opt = '' + + if size: + # LVCREATE(8) -l --extents option with percentage + if '%' in size: + size_parts = size.split('%', 1) + size_percent = int(size_parts[0]) + if size_percent > 100: + module.fail_json(msg="Size percentage cannot be larger than 100%") + size_whole = size_parts[1] + if size_whole == 'ORIGIN': + module.fail_json(msg="Snapshot Volumes are not supported") + elif size_whole not in ['VG', 'PVS', 'FREE']: + module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") + size_opt = 'l' + size_unit = '' + + if '%' not in size: + # LVCREATE(8) -L --size option unit + if size[-1].lower() in 'bskmgtpe': + size_unit = size[-1].lower() + size = size[0:-1] + + try: + float(size) + if not size[0].isdigit(): + raise ValueError() + except ValueError: + module.fail_json(msg="Bad size specification of '%s'" % size) + + # when no unit, megabytes by default + if size_opt == 'l': + unit = 'm' + else: + unit = size_unit + + # Get information on volume group requested + vgs_cmd = module.get_bin_path("vgs", required=True) + rc, current_vgs, err = module.run_command( + "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + vgs = parse_vgs(current_vgs) + this_vg = vgs[0] + + # Get information on logical volume requested + lvs_cmd = module.get_bin_path("lvs", required=True) + rc, current_lvs, err = module.run_command( + "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + changed = False + + lvs = parse_lvs(current_lvs) + + if snapshot: + # Check snapshot pre-conditions + for test_lv in lvs: + if test_lv['name'] == lv or test_lv['name'] == thinpool: + if not test_lv['thinpool'] and not thinpool: + break + else: + module.fail_json(msg="Snapshots of thin pool LVs are not supported.") + else: + module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg)) + check_lv = snapshot + + elif thinpool: + if lv: + # Check thin volume pre-conditions + for test_lv in lvs: + if test_lv['name'] == thinpool: + break + else: + module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg)) + check_lv = lv + else: + check_lv = thinpool + else: + check_lv = lv + + for test_lv in lvs: + if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): + this_lv = test_lv + break + else: + this_lv = None + + msg = '' + if this_lv is None: + if state == 'present': + # Require size argument except for snapshot of thin volumes + if (lv or thinpool) and not size: + for test_lv in lvs: + if test_lv['name'] == lv and test_lv['thinvol'] and snapshot: + break + else: + module.fail_json(msg="No size given.") + + # create LV + lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + if snapshot is not None: + if size: + cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) + else: + cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv) + elif thinpool and lv: + if size_opt == 'l': + module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") + size_opt = 'V' + cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) + elif thinpool and not lv: + cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool) + else: + cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) + rc, _, err = module.run_command(cmd) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) + else: + if state == 'absent': + # remove LV + if not force: + module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) + lvremove_cmd = module.get_bin_path("lvremove", required=True) + rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) + + elif not size: + pass + + elif size_opt == 'l': + # Resize LV based on % value + tool = None + size_free = this_vg['free'] + if size_whole == 'VG' or size_whole == 'PVS': + size_requested = size_percent * this_vg['size'] / 100 + else: # size_whole == 'FREE': + size_requested = size_percent * this_vg['free'] / 100 + if '+' in size: + size_requested += this_lv['size'] + if this_lv['size'] < size_requested: + if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): + tool = module.get_bin_path("lvextend", required=True) + else: + module.fail_json( + msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % + (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit) + ) + elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large + if size_requested == 0: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) + elif not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) + else: + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') + + if tool: + if resizefs: + tool = '%s %s' % (tool, '--resizefs') + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: + changed = True + msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) + elif "matches existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + else: + # resize LV based on absolute values + tool = None + if float(size) > this_lv['size']: + tool = module.get_bin_path("lvextend", required=True) + elif shrink and float(size) < this_lv['size']: + if float(size) == 0: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) + if not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) + else: + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') + + if tool: + if resizefs: + tool = '%s %s' % (tool, '--resizefs') + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: + changed = True + elif "matches existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + if this_lv is not None: + if active: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) + else: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/make.py b/plugins/modules/system/make.py new file mode 100644 index 0000000000..4dd459d425 --- /dev/null +++ b/plugins/modules/system/make.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Linus Unnebäck +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: make +short_description: Run targets in a Makefile +requirements: +- make +author: Linus Unnebäck (@LinusU) +description: + - Run targets in a Makefile. +options: + target: + description: + - The target to run. + - Typically this would be something like C(install),C(test) or C(all)." + type: str + params: + description: + - Any extra parameters to pass to make. + type: dict + chdir: + description: + - Change to this directory before running make. + type: path + required: true + file: + description: + - Use a custom Makefile. + type: path +''' + +EXAMPLES = r''' +- name: Build the default target + make: + chdir: /home/ubuntu/cool-project + +- name: Run 'install' target as root + make: + chdir: /home/ubuntu/cool-project + target: install + become: yes + +- name: Build 'all' target with extra arguments + make: + chdir: /home/ubuntu/cool-project + target: all + params: + NUM_THREADS: 4 + BACKEND: lapack + +- name: Build 'all' target with a custom Makefile + make: + chdir: /home/ubuntu/cool-project + target: all + file: /some-project/Makefile +''' + +RETURN = r'''# ''' + +from ansible.module_utils.six import iteritems +from ansible.module_utils.basic import AnsibleModule + + +def run_command(command, module, check_rc=True): + """ + Run a command using the module, return + the result code and std{err,out} content. + + :param command: list of command arguments + :param module: Ansible make module instance + :return: return code, stdout content, stderr content + """ + rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir']) + return rc, sanitize_output(out), sanitize_output(err) + + +def sanitize_output(output): + """ + Sanitize the output string before we + pass it to module.fail_json. Defaults + the string to empty if it is None, else + strips trailing newlines. + + :param output: output to sanitize + :return: sanitized output + """ + if output is None: + return '' + else: + return output.rstrip("\r\n") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + target=dict(type='str'), + params=dict(type='dict'), + chdir=dict(type='path', required=True), + file=dict(type='path'), + ), + supports_check_mode=True, + ) + # Build up the invocation of `make` we are going to use + # For non-Linux OSes, prefer gmake (GNU make) over make + make_path = module.get_bin_path('gmake', required=False) + if not make_path: + # Fall back to system make + make_path = module.get_bin_path('make', required=True) + make_target = module.params['target'] + if module.params['params'] is not None: + make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])] + else: + make_parameters = [] + + if module.params['file'] is not None: + base_command = [make_path, "-f", module.params['file'], make_target] + else: + base_command = [make_path, make_target] + base_command.extend(make_parameters) + + # Check if the target is already up to date + rc, out, err = run_command(base_command + ['--question'], module, check_rc=False) + if module.check_mode: + # If we've been asked to do a dry run, we only need + # to report whether or not the target is up to date + changed = (rc != 0) + else: + if rc == 0: + # The target is up to date, so we don't have to + # do anything + changed = False + else: + # The target isn't up to date, so we need to run it + rc, out, err = run_command(base_command, module, + check_rc=True) + changed = True + + # We don't report the return code, as if this module failed + # we would be calling fail_json from run_command, so even if + # we had a non-zero return code, we did not fail. However, if + # we report a non-zero return code here, we will be marked as + # failed regardless of what we signal using the failed= kwarg. + module.exit_json( + changed=changed, + failed=False, + stdout=out, + stderr=err, + target=module.params['target'], + params=module.params['params'], + chdir=module.params['chdir'], + file=module.params['file'] + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/mksysb.py b/plugins/modules/system/mksysb.py new file mode 100644 index 0000000000..da3533d7f5 --- /dev/null +++ b/plugins/modules/system/mksysb.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +author: Kairo Araujo (@kairoaraujo) +module: mksysb +short_description: Generates AIX mksysb rootvg backups. +description: + - This module manages a basic AIX mksysb (image) of rootvg. +options: + backup_crypt_files: + description: + - Backup encrypted files. + type: bool + default: "yes" + backup_dmapi_fs: + description: + - Back up DMAPI filesystem files. + type: bool + default: "yes" + create_map_files: + description: + - Creates a new MAP files. + type: bool + default: "no" + exclude_files: + description: + - Excludes files using C(/etc/rootvg.exclude). + type: bool + default: "no" + exclude_wpar_files: + description: + - Excludes WPAR files. + type: bool + default: "no" + extended_attrs: + description: + - Backup extended attributes. + type: bool + default: "yes" + name: + description: + - Backup name + required: true + new_image_data: + description: + - Creates a new file data. + type: bool + default: "yes" + software_packing: + description: + - Exclude files from packing option listed in + C(/etc/exclude_packing.rootvg). + type: bool + default: "no" + storage_path: + description: + - Storage path where the mksysb will stored. + required: true + use_snapshot: + description: + - Creates backup using snapshots. + type: bool + default: "no" +''' + +EXAMPLES = ''' +- name: Running a backup image mksysb + mksysb: + name: myserver + storage_path: /repository/images + exclude_files: yes + exclude_wpar_files: yes +''' + +RETURN = ''' +changed: + description: Return changed for mksysb actions as true or false. + returned: always + type: bool + version_added: 2.5 +msg: + description: Return message regarding the action. + returned: always + type: str + version_added: 2.5 +''' + + +from ansible.module_utils.basic import AnsibleModule +import os + + +def main(): + module = AnsibleModule( + argument_spec=dict( + backup_crypt_files=dict(type='bool', default=True), + backup_dmapi_fs=dict(type='bool', default=True), + create_map_files=dict(type='bool', default=False), + exclude_files=dict(type='bool', default=False), + exclude_wpar_files=dict(type='bool', default=False), + extended_attrs=dict(type='bool', default=True), + name=dict(required=True), + new_image_data=dict(type='bool', default=True), + software_packing=dict(type='bool', default=False), + storage_path=dict(required=True), + use_snapshot=dict(type='bool', default=False) + ), + supports_check_mode=True, + ) + + # Command options. + map_file_opt = { + True: '-m', + False: '' + } + + use_snapshot_opt = { + True: '-T', + False: '' + } + + exclude_files_opt = { + True: '-e', + False: '' + } + + exclude_wpar_opt = { + True: '-G', + False: '' + } + + new_image_data_opt = { + True: '-i', + False: '' + } + + soft_packing_opt = { + True: '', + False: '-p' + } + + extend_attr_opt = { + True: '', + False: '-a' + } + + crypt_files_opt = { + True: '', + False: '-Z' + } + + dmapi_fs_opt = { + True: '-a', + False: '' + } + + backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']] + backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']] + create_map_files = map_file_opt[module.params['create_map_files']] + exclude_files = exclude_files_opt[module.params['exclude_files']] + exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']] + extended_attrs = extend_attr_opt[module.params['extended_attrs']] + name = module.params['name'] + new_image_data = new_image_data_opt[module.params['new_image_data']] + software_packing = soft_packing_opt[module.params['software_packing']] + storage_path = module.params['storage_path'] + use_snapshot = use_snapshot_opt[module.params['use_snapshot']] + + # Validate if storage_path is a valid directory. + if os.path.isdir(storage_path): + if not module.check_mode: + # Generates the mksysb image backup. + mksysb_cmd = module.get_bin_path('mksysb', True) + rc, mksysb_output, err = module.run_command( + "%s -X %s %s %s %s %s %s %s %s %s %s/%s" % ( + mksysb_cmd, create_map_files, use_snapshot, exclude_files, + exclude_wpar_files, software_packing, extended_attrs, + backup_crypt_files, backup_dmapi_fs, new_image_data, + storage_path, name)) + if rc == 0: + module.exit_json(changed=True, msg=mksysb_output) + else: + module.fail_json(msg="mksysb failed.", rc=rc, err=err) + + module.exit_json(changed=True) + + else: + module.fail_json(msg="Storage path %s is not valid." % storage_path) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/modprobe.py b/plugins/modules/system/modprobe.py new file mode 100644 index 0000000000..133a65fd13 --- /dev/null +++ b/plugins/modules/system/modprobe.py @@ -0,0 +1,128 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, David Stygstra +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: modprobe +short_description: Load or unload kernel modules +author: + - David Stygstra (@stygstra) + - Julien Dauphant (@jdauphant) + - Matt Jeffery (@mattjeffery) +description: + - Load or unload kernel modules. +options: + name: + required: true + description: + - Name of kernel module to manage. + state: + description: + - Whether the module should be present or absent. + choices: [ absent, present ] + default: present + params: + description: + - Modules parameters. + default: '' +''' + +EXAMPLES = ''' +- name: Add the 802.1q module + modprobe: + name: 8021q + state: present + +- name: Add the dummy module + modprobe: + name: dummy + state: present + params: 'numdummies=2' +''' + +import os.path +import shlex +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + params = module.params['params'] + state = module.params['state'] + + # FIXME: Adding all parameters as result values is useless + result = dict( + changed=False, + name=name, + params=params, + state=state, + ) + + # Check if module is present + try: + present = False + with open('/proc/modules') as modules: + module_name = name.replace('-', '_') + ' ' + for line in modules: + if line.startswith(module_name): + present = True + break + if not present: + command = [module.get_bin_path('uname', True), '-r'] + rc, uname_kernel_release, err = module.run_command(command) + module_file = '/' + name + '.ko' + builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(), + 'modules.builtin') + with open(builtin_path) as builtins: + for line in builtins: + if line.endswith(module_file): + present = True + break + except IOError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result) + + # Add/remove module as needed + if state == 'present': + if not present: + if not module.check_mode: + command = [module.get_bin_path('modprobe', True), name] + command.extend(shlex.split(params)) + rc, out, err = module.run_command(command) + if rc != 0: + module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) + result['changed'] = True + elif state == 'absent': + if present: + if not module.check_mode: + rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name]) + if rc != 0: + module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/nosh.py b/plugins/modules/system/nosh.py new file mode 100644 index 0000000000..5452dc5354 --- /dev/null +++ b/plugins/modules/system/nosh.py @@ -0,0 +1,505 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Thomas Caravia +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: nosh +author: + - "Thomas Caravia (@tacatac)" +short_description: Manage services with nosh +description: + - Control running and enabled state for system-wide or user services. + - BSD and Linux systems are supported. +options: + name: + required: true + description: + - Name of the service to manage. + state: + required: false + choices: [ started, stopped, reset, restarted, reloaded ] + description: + - C(started)/C(stopped) are idempotent actions that will not run + commands unless necessary. + C(restarted) will always bounce the service. + C(reloaded) will send a SIGHUP or start the service. + C(reset) will start or stop the service according to whether it is + enabled or not. + enabled: + required: false + type: bool + description: + - Enable or disable the service, independently of C(*.preset) file + preference or running state. Mutually exclusive with I(preset). Will take + effect prior to I(state=reset). + preset: + required: false + type: bool + description: + - Enable or disable the service according to local preferences in *.preset files. + Mutually exclusive with I(enabled). Only has an effect if set to true. Will take + effect prior to I(state=reset). + user: + required: false + default: 'no' + type: bool + description: + - Run system-control talking to the calling user's service manager, rather than + the system-wide service manager. +requirements: + - A system with an active nosh service manager, see Notes for further information. +notes: + - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/). +''' + +EXAMPLES = ''' +- name: start dnscache if not running + nosh: name=dnscache state=started + +- name: stop mpd, if running + nosh: name=mpd state=stopped + +- name: restart unbound or start it if not already running + nosh: + name: unbound + state: restarted + +- name: reload fail2ban or start it if not already running + nosh: + name: fail2ban + state: reloaded + +- name: disable nsd + nosh: name=nsd enabled=no + +- name: for package installers, set nginx running state according to local enable settings, preset and reset + nosh: name=nginx preset=True state=reset + +- name: reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is + nosh: name=reboot state=started + +- name: using conditionals with the module facts + tasks: + - name: obtain information on tinydns service + nosh: name=tinydns + register: result + + - name: fail if service not loaded + fail: msg="The {{ result.name }} service is not loaded" + when: not result.status + + - name: fail if service is running + fail: msg="The {{ result.name }} service is running" + when: result.status and result.status['DaemontoolsEncoreState'] == "running" +''' + +RETURN = ''' +name: + description: name used to find the service + returned: success + type: str + sample: "sshd" +service_path: + description: resolved path for the service + returned: success + type: str + sample: "/var/sv/sshd" +enabled: + description: whether the service is enabled at system bootstrap + returned: success + type: bool + sample: True +preset: + description: whether the enabled status reflects the one set in the relevant C(*.preset) file + returned: success + type: bool + sample: 'False' +state: + description: service process run state, C(None) if the service is not loaded and will not be started + returned: if state option is used + type: str + sample: "reloaded" +status: + description: a dictionary with the key=value pairs returned by `system-control show-json` or C(None) if the service is not loaded + returned: success + type: complex + contains: + After: + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"] + Before: + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Conflicts: + returned: success + type: list + sample: '[]' + DaemontoolsEncoreState: + returned: success + type: str + sample: "running" + DaemontoolsState: + returned: success + type: str + sample: "up" + Enabled: + returned: success + type: bool + sample: True + LogService: + returned: success + type: str + sample: "../cyclog@sshd" + MainPID: + returned: success + type: int + sample: 661 + Paused: + returned: success + type: bool + sample: 'False' + ReadyAfterRun: + returned: success + type: bool + sample: 'False' + RemainAfterExit: + returned: success + type: bool + sample: 'False' + Required-By: + returned: success + type: list + sample: '[]' + RestartExitStatusCode: + returned: success + type: int + sample: '0' + RestartExitStatusNumber: + returned: success + type: int + sample: '0' + RestartTimestamp: + returned: success + type: int + sample: 4611686019935648081 + RestartUTCTimestamp: + returned: success + type: int + sample: 1508260140 + RunExitStatusCode: + returned: success + type: int + sample: '0' + RunExitStatusNumber: + returned: success + type: int + sample: '0' + RunTimestamp: + returned: success + type: int + sample: 4611686019935648081 + RunUTCTimestamp: + returned: success + type: int + sample: 1508260140 + StartExitStatusCode: + returned: success + type: int + sample: 1 + StartExitStatusNumber: + returned: success + type: int + sample: '0' + StartTimestamp: + returned: success + type: int + sample: 4611686019935648081 + StartUTCTimestamp: + returned: success + type: int + sample: 1508260140 + StopExitStatusCode: + returned: success + type: int + sample: '0' + StopExitStatusNumber: + returned: success + type: int + sample: '0' + StopTimestamp: + returned: success + type: int + sample: 4611686019935648081 + StopUTCTimestamp: + returned: success + type: int + sample: 1508260140 + Stopped-By: + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Timestamp: + returned: success + type: int + sample: 4611686019935648081 + UTCTimestamp: + returned: success + type: int + sample: 1508260140 + Want: + returned: success + type: str + sample: "nothing" + Wanted-By: + returned: success + type: list + sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"] + Wants: + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"] +user: + description: whether the user-level service manager is called + returned: success + type: bool + sample: False +''' + + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.service import fail_if_missing +from ansible.module_utils._text import to_native + + +def run_sys_ctl(module, args): + sys_ctl = [module.get_bin_path('system-control', required=True)] + if module.params['user']: + sys_ctl = sys_ctl + ['--user'] + return module.run_command(sys_ctl + args) + + +def get_service_path(module, service): + (rc, out, err) = run_sys_ctl(module, ['find', service]) + # fail if service not found + if rc != 0: + fail_if_missing(module, False, service, msg='host') + else: + return to_native(out).strip() + + +def service_is_enabled(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path]) + return rc == 0 + + +def service_is_preset_enabled(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path]) + return to_native(out).strip().startswith("enable") + + +def service_is_loaded(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path]) + return rc == 0 + + +def get_service_status(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['show-json', service_path]) + # will fail if not service is not loaded + if err is not None and err: + module.fail_json(msg=err) + else: + json_out = json.loads(to_native(out).strip()) + status = json_out[service_path] # descend past service path header + return status + + +def service_is_running(service_status): + return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running']) + + +def handle_enabled(module, result, service_path): + """Enable or disable a service as needed. + + - 'preset' will set the enabled state according to available preset file settings. + - 'enabled' will set the enabled state explicitly, independently of preset settings. + + These options are set to "mutually exclusive" but the explicit 'enabled' option will + have priority if the check is bypassed. + """ + + # computed prior in control flow + preset = result['preset'] + enabled = result['enabled'] + + # preset, effect only if option set to true (no reverse preset) + if module.params['preset']: + action = 'preset' + + # run preset if needed + if preset != module.params['preset']: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err)) + result['preset'] = not preset + result['enabled'] = not enabled + + # enabled/disabled state + if module.params['enabled'] is not None: + if module.params['enabled']: + action = 'enable' + else: + action = 'disable' + + # change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err)) + result['enabled'] = not enabled + result['preset'] = not preset + + +def handle_state(module, result, service_path): + """Set service running state as needed. + + Takes into account the fact that a service may not be loaded (no supervise directory) in + which case it is 'stopped' as far as the service manager is concerned. No status information + can be obtained and the service can only be 'started'. + """ + # default to desired state, no action + result['state'] = module.params['state'] + state = module.params['state'] + action = None + + # computed prior in control flow, possibly modified by handle_enabled() + enabled = result['enabled'] + + # service not loaded -> not started by manager, no status information + if not service_is_loaded(module, service_path): + if state in ['started', 'restarted', 'reloaded']: + action = 'start' + result['state'] = 'started' + elif state == 'reset': + if enabled: + action = 'start' + result['state'] = 'started' + else: + result['state'] = None + else: + result['state'] = None + + # service is loaded + else: + # get status information + result['status'] = get_service_status(module, service_path) + running = service_is_running(result['status']) + + if state == 'started': + if not running: + action = 'start' + elif state == 'stopped': + if running: + action = 'stop' + # reset = start/stop according to enabled status + elif state == 'reset': + if enabled is not running: + if running: + action = 'stop' + result['state'] = 'stopped' + else: + action = 'start' + result['state'] = 'started' + # start if not running, 'service' module constraint + elif state == 'restarted': + if not running: + action = 'start' + result['state'] = 'started' + else: + action = 'condrestart' + # start if not running, 'service' module constraint + elif state == 'reloaded': + if not running: + action = 'start' + result['state'] = 'started' + else: + action = 'hangup' + + # change state as needed + if action: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err)) + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(choices=['started', 'stopped', 'reset', 'restarted', 'reloaded'], type='str'), + enabled=dict(type='bool'), + preset=dict(type='bool'), + user=dict(type='bool', default=False), + ), + supports_check_mode=True, + mutually_exclusive=[['enabled', 'preset']], + ) + + service = module.params['name'] + rc = 0 + out = err = '' + result = { + 'name': service, + 'changed': False, + 'status': None, + } + + # check service can be found (or fail) and get path + service_path = get_service_path(module, service) + + # get preliminary service facts + result['service_path'] = service_path + result['user'] = module.params['user'] + result['enabled'] = service_is_enabled(module, service_path) + result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path) + + # set enabled state, service need not be loaded + if module.params['enabled'] is not None or module.params['preset']: + handle_enabled(module, result, service_path) + + # set service running state + if module.params['state'] is not None: + handle_state(module, result, service_path) + + # get final service status if possible + if service_is_loaded(module, service_path): + result['status'] = get_service_status(module, service_path) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/ohai.py b/plugins/modules/system/ohai.py new file mode 100644 index 0000000000..4cc8982e19 --- /dev/null +++ b/plugins/modules/system/ohai.py @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: ohai +short_description: Returns inventory data from I(Ohai) +description: + - Similar to the M(facter) module, this runs the I(Ohai) discovery program + (U(https://docs.chef.io/ohai.html)) on the remote host and + returns JSON inventory data. + I(Ohai) data is a bit more verbose and nested than I(facter). +options: {} +notes: [] +requirements: [ "ohai" ] +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +''' + +EXAMPLES = ''' +# Retrieve (ohai) data from all Web servers and store in one-file per host +ansible webservers -m ohai --tree=/tmp/ohaidata +''' +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict() + ) + cmd = ["/usr/bin/env", "ohai"] + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/open_iscsi.py b/plugins/modules/system/open_iscsi.py new file mode 100644 index 0000000000..22a835b39d --- /dev/null +++ b/plugins/modules/system/open_iscsi.py @@ -0,0 +1,365 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Serge van Ginderachter +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: open_iscsi +author: +- Serge van Ginderachter (@srvg) +short_description: Manage iSCSI targets with Open-iSCSI +description: + - Discover targets on given portal, (dis)connect targets, mark targets to + manually or auto start, return device nodes of connected targets. +requirements: + - open_iscsi library and tools (iscsiadm) +options: + portal: + description: + - The IP address of the iSCSI target. + type: str + aliases: [ ip ] + port: + description: + - The port on which the iSCSI target process listens. + type: str + default: 3260 + target: + description: + - The iSCSI target name. + type: str + aliases: [ name, targetname ] + login: + description: + - Whether the target node should be connected. + type: bool + aliases: [ state ] + node_auth: + description: + - The value for C(discovery.sendtargets.auth.authmethod). + type: str + default: CHAP + node_user: + description: + - The value for C(discovery.sendtargets.auth.username). + type: str + node_pass: + description: + - The value for C(discovery.sendtargets.auth.password). + type: str + auto_node_startup: + description: + - Whether the target node should be automatically connected at startup. + type: bool + aliases: [ automatic ] + discover: + description: + - Whether the list of target nodes on the portal should be + (re)discovered and added to the persistent iSCSI database. + - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) + to manual, hence combined with C(auto_node_startup=yes) will always return + a changed state. + type: bool + show_nodes: + description: + - Whether the list of nodes in the persistent iSCSI database should be returned by the module. + type: bool +''' + +EXAMPLES = r''' +- name: Perform a discovery on 10.1.2.3 and show available target nodes + open_iscsi: + show_nodes: yes + discover: yes + portal: 10.1.2.3 + +# NOTE: Only works if exactly one target is exported to the initiator +- name: Discover targets on portal and login to the one available + open_iscsi: + portal: '{{ iscsi_target }}' + login: yes + discover: yes + +- name: Connect to the named target, after updating the local persistent database (cache) + open_iscsi: + login: yes + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Disconnect from the cached named target + open_iscsi: + login: no + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d +''' + +import glob +import os +import time + +from ansible.module_utils.basic import AnsibleModule + +ISCSIADM = 'iscsiadm' + + +def compare_nodelists(l1, l2): + l1.sort() + l2.sort() + return l1 == l2 + + +def iscsi_get_cached_nodes(module, portal=None): + cmd = '%s --mode node' % iscsiadm_cmd + (rc, out, err) = module.run_command(cmd) + + if rc == 0: + lines = out.splitlines() + nodes = [] + for line in lines: + # line format is "ip:port,target_portal_group_tag targetname" + parts = line.split() + if len(parts) > 2: + module.fail_json(msg='error parsing output', cmd=cmd) + target = parts[1] + parts = parts[0].split(':') + target_portal = parts[0] + + if portal is None or portal == target_portal: + nodes.append(target) + + # older versions of scsiadm don't have nice return codes + # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details + # err can contain [N|n]o records... + elif rc == 21 or (rc == 255 and "o records found" in err): + nodes = [] + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + return nodes + + +def iscsi_discover(module, portal, port): + cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_loggedon(module, target): + cmd = '%s --mode session' % iscsiadm_cmd + (rc, out, err) = module.run_command(cmd) + + if rc == 0: + return target in out + elif rc == 21: + return False + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_login(module, target, portal=None, port=None): + node_auth = module.params['node_auth'] + node_user = module.params['node_user'] + node_pass = module.params['node_pass'] + + if node_user: + params = [('node.session.auth.authmethod', node_auth), + ('node.session.auth.username', node_user), + ('node.session.auth.password', node_pass)] + for (name, value) in params: + cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) + (rc, out, err) = module.run_command(cmd) + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target) + if portal is not None and port is not None: + cmd += ' --portal %s:%s' % (portal, port) + + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_logout(module, target): + cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_device_node(module, target): + # if anyone know a better way to find out which devicenodes get created for + # a given target... + + devices = glob.glob('/dev/disk/by-path/*%s*' % target) + devdisks = [] + for dev in devices: + # exclude partitions + if "-part" not in dev: + devdisk = os.path.realpath(dev) + # only add once (multi-path?) + if devdisk not in devdisks: + devdisks.append(devdisk) + return devdisks + + +def target_isauto(module, target): + cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc == 0: + lines = out.splitlines() + for line in lines: + if 'node.startup' in line: + return 'automatic' in line + return False + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_setauto(module, target): + cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_setmanual(module, target): + cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def main(): + # load ansible module object + module = AnsibleModule( + argument_spec=dict( + + # target + portal=dict(type='str', aliases=['ip']), + port=dict(type='str', default='3260'), + target=dict(type='str', aliases=['name', 'targetname']), + node_auth=dict(type='str', default='CHAP'), + node_user=dict(type='str'), + node_pass=dict(type='str', no_log=True), + + # actions + login=dict(type='bool', aliases=['state']), + auto_node_startup=dict(type='bool', aliases=['automatic']), + discover=dict(type='bool', default=False), + show_nodes=dict(type='bool', default=False), + ), + + required_together=[['discover_user', 'discover_pass'], + ['node_user', 'node_pass']], + supports_check_mode=True, + ) + + global iscsiadm_cmd + iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) + + # parameters + portal = module.params['portal'] + target = module.params['target'] + port = module.params['port'] + login = module.params['login'] + automatic = module.params['auto_node_startup'] + discover = module.params['discover'] + show_nodes = module.params['show_nodes'] + + check = module.check_mode + + cached = iscsi_get_cached_nodes(module, portal) + + # return json dict + result = {} + result['changed'] = False + + if discover: + if portal is None: + module.fail_json(msg="Need to specify at least the portal (ip) to discover") + elif check: + nodes = cached + else: + iscsi_discover(module, portal, port) + nodes = iscsi_get_cached_nodes(module, portal) + if not compare_nodelists(cached, nodes): + result['changed'] |= True + result['cache_updated'] = True + else: + nodes = cached + + if login is not None or automatic is not None: + if target is None: + if len(nodes) > 1: + module.fail_json(msg="Need to specify a target") + else: + target = nodes[0] + else: + # check given target is in cache + check_target = False + for node in nodes: + if node == target: + check_target = True + break + if not check_target: + module.fail_json(msg="Specified target not found") + + if show_nodes: + result['nodes'] = nodes + + if login is not None: + loggedon = target_loggedon(module, target) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] = target_device_node(module, target) + elif not check: + if login: + target_login(module, target, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] = target_device_node(module, target) + else: + target_logout(module, target) + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True + + if automatic is not None: + isauto = target_isauto(module, target) + if (automatic and isauto) or (not automatic and not isauto): + result['changed'] |= False + result['automatic_changed'] = False + elif not check: + if automatic: + target_setauto(module, target) + else: + target_setmanual(module, target) + result['changed'] |= True + result['automatic_changed'] = True + else: + result['changed'] |= True + result['automatic_changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/openwrt_init.py b/plugins/modules/system/openwrt_init.py new file mode 100644 index 0000000000..89407087f5 --- /dev/null +++ b/plugins/modules/system/openwrt_init.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, Andrew Gaffney +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: openwrt_init +author: + - "Andrew Gaffney (@agaffney)" +short_description: Manage services on OpenWrt. +description: + - Controls OpenWrt services on remote hosts. +options: + name: + description: + - Name of the service. + required: true + aliases: ['service'] + state: + description: + - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. + C(restarted) will always bounce the service. C(reloaded) will always reload. + choices: [ 'started', 'stopped', 'restarted', 'reloaded' ] + enabled: + description: + - Whether the service should start on boot. B(At least one of state and enabled are required.) + type: bool + pattern: + description: + - If the service does not respond to the 'running' command, name a + substring to look for as would be found in the output of the I(ps) + command as a stand-in for a 'running' result. If the string is found, + the service will be assumed to be running. +notes: + - One option other than name is required. +requirements: + - An OpenWrt system (with python) +''' + +EXAMPLES = ''' +# Example action to start service httpd, if not running +- openwrt_init: + state: started + name: httpd + +# Example action to stop service cron, if running +- openwrt_init: + name: cron + state: stopped + +# Example action to reload service httpd, in all cases +- openwrt_init: + name: httpd + state: reloaded + +# Example action to enable service httpd +- openwrt_init: + name: httpd + enabled: yes +''' + +RETURN = ''' +''' + +import os +import glob +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + +module = None +init_script = None + + +# =============================== +# Check if service is enabled +def is_enabled(): + (rc, out, err) = module.run_command("%s enabled" % init_script) + if rc == 0: + return True + return False + + +# =========================================== +# Main control flow + +def main(): + global module, init_script + # init + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str', aliases=['service']), + state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'), + enabled=dict(type='bool'), + pattern=dict(required=False, default=None), + ), + supports_check_mode=True, + required_one_of=[['state', 'enabled']], + ) + + # initialize + service = module.params['name'] + init_script = '/etc/init.d/' + service + rc = 0 + out = err = '' + result = { + 'name': service, + 'changed': False, + } + + # check if service exists + if not os.path.exists(init_script): + module.fail_json(msg='service %s does not exist' % service) + + # Enable/disable service startup at boot if requested + if module.params['enabled'] is not None: + # do we need to enable the service? + enabled = is_enabled() + + # default to current state + result['enabled'] = enabled + + # Change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + if module.params['enabled']: + action = 'enable' + else: + action = 'disable' + + if not module.check_mode: + (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + # openwrt init scripts can return a non-zero exit code on a successful 'enable' + # command if the init script doesn't contain a STOP value, so we ignore the exit + # code and explicitly check if the service is now in the desired state + if is_enabled() != module.params['enabled']: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + result['enabled'] = not enabled + + if module.params['state'] is not None: + running = False + + # check if service is currently running + if module.params['pattern']: + # Find ps binary + psbin = module.get_bin_path('ps', True) + + # this should be busybox ps, so we only want/need to the 'w' option + (rc, psout, pserr) = module.run_command('%s w' % psbin) + # If rc is 0, set running as appropriate + if rc == 0: + lines = psout.split("\n") + for line in lines: + if module.params['pattern'] in line and "pattern=" not in line: + # so as to not confuse ./hacking/test-module.py + running = True + break + else: + (rc, out, err) = module.run_command("%s running" % init_script) + if rc == 0: + running = True + + # default to desired state + result['state'] = module.params['state'] + + # determine action, if any + action = None + if module.params['state'] == 'started': + if not running: + action = 'start' + result['changed'] = True + elif module.params['state'] == 'stopped': + if running: + action = 'stop' + result['changed'] = True + else: + action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded + result['state'] = 'started' + result['changed'] = True + + if action: + if not module.check_mode: + (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/osx_defaults.py b/plugins/modules/system/osx_defaults.py new file mode 100644 index 0000000000..fc8ec76a82 --- /dev/null +++ b/plugins/modules/system/osx_defaults.py @@ -0,0 +1,391 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, GeekChimp - Franck Nijhof +# Copyright: (c) 2019, Ansible project +# Copyright: (c) 2019, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: osx_defaults +author: +- Franck Nijhof (@frenck) +short_description: Manage macOS user defaults +description: + - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts. + - macOS applications and other programs use the defaults system to record user preferences and other + information that must be maintained when the applications are not running (such as default font for new + documents, or the position of an Info panel). +options: + domain: + description: + - The domain is a domain name of the form C(com.companyname.appname). + type: str + default: NSGlobalDomain + host: + description: + - The host on which the preference should apply. + - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool. + type: str + key: + description: + - The key of the user preference. + type: str + required: true + type: + description: + - The type of value to write. + type: str + choices: [ array, bool, boolean, date, float, int, integer, string ] + default: string + array_add: + description: + - Add new elements to the array for a key which has an array as its value. + type: bool + default: no + value: + description: + - The value to write. + - Only required when C(state=present). + type: raw + state: + description: + - The state of the user defaults. + - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled. + - C(list) added in version 2.8. + type: str + choices: [ absent, list, present ] + default: present + path: + description: + - The path in which to search for C(defaults). + type: str + default: /usr/bin:/usr/local/bin +notes: + - Apple Mac caches defaults. You may need to logout and login to apply the changes. +''' + +EXAMPLES = r''' +- osx_defaults: + domain: com.apple.Safari + key: IncludeInternalDebugMenu + type: bool + value: true + state: present + +- osx_defaults: + domain: NSGlobalDomain + key: AppleMeasurementUnits + type: string + value: Centimeters + state: present + +- osx_defaults: + domain: /Library/Preferences/com.apple.SoftwareUpdate + key: AutomaticCheckEnabled + type: int + value: 1 + become: yes + +- osx_defaults: + domain: com.apple.screensaver + host: currentHost + key: showClock + type: int + value: 1 + +- osx_defaults: + key: AppleMeasurementUnits + type: string + value: Centimeters + +- osx_defaults: + key: AppleLanguages + type: array + value: + - en + - nl + +- osx_defaults: + domain: com.geekchimp.macable + key: ExampleKeyToRemove + state: absent +''' + +from datetime import datetime +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import binary_type, text_type + + +# exceptions --------------------------------------------------------------- {{{ +class OSXDefaultsException(Exception): + def __init__(self, msg): + self.message = msg + + +# /exceptions -------------------------------------------------------------- }}} + +# class MacDefaults -------------------------------------------------------- {{{ +class OSXDefaults(object): + """ Class to manage Mac OS user defaults """ + + # init ---------------------------------------------------------------- {{{ + def __init__(self, module): + """ Initialize this module. Finds 'defaults' executable and preps the parameters """ + # Initial var for storing current defaults value + self.current_value = None + self.module = module + self.domain = module.params['domain'] + self.host = module.params['host'] + self.key = module.params['key'] + self.type = module.params['type'] + self.array_add = module.params['array_add'] + self.value = module.params['value'] + self.state = module.params['state'] + self.path = module.params['path'] + + # Try to find the defaults executable + self.executable = self.module.get_bin_path( + 'defaults', + required=False, + opt_dirs=self.path.split(':'), + ) + + if not self.executable: + raise OSXDefaultsException("Unable to locate defaults executable.") + + # Ensure the value is the correct type + if self.state != 'absent': + self.value = self._convert_type(self.type, self.value) + + # /init --------------------------------------------------------------- }}} + + # tools --------------------------------------------------------------- {{{ + @staticmethod + def _convert_type(data_type, value): + """ Converts value to given type """ + if data_type == "string": + return str(value) + elif data_type in ["bool", "boolean"]: + if isinstance(value, (binary_type, text_type)): + value = value.lower() + if value in [True, 1, "true", "1", "yes"]: + return True + elif value in [False, 0, "false", "0", "no"]: + return False + raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value))) + elif data_type == "date": + try: + return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") + except ValueError: + raise OSXDefaultsException( + "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value)) + ) + elif data_type in ["int", "integer"]: + if not str(value).isdigit(): + raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value))) + return int(value) + elif data_type == "float": + try: + value = float(value) + except ValueError: + raise OSXDefaultsException("Invalid float value: {0}".format(repr(value))) + return value + elif data_type == "array": + if not isinstance(value, list): + raise OSXDefaultsException("Invalid value. Expected value to be an array") + return value + + raise OSXDefaultsException('Type is not supported: {0}'.format(data_type)) + + def _host_args(self): + """ Returns a normalized list of commandline arguments based on the "host" attribute """ + if self.host is None: + return [] + elif self.host == 'currentHost': + return ['-currentHost'] + else: + return ['-host', self.host] + + def _base_command(self): + """ Returns a list containing the "defaults" executable and any common base arguments """ + return [self.executable] + self._host_args() + + @staticmethod + def _convert_defaults_str_to_list(value): + """ Converts array output from defaults to an list """ + # Split output of defaults. Every line contains a value + value = value.splitlines() + + # Remove first and last item, those are not actual values + value.pop(0) + value.pop(-1) + + # Remove extra spaces and comma (,) at the end of values + value = [re.sub(',$', '', x.strip(' ')) for x in value] + + return value + + # /tools -------------------------------------------------------------- }}} + + # commands ------------------------------------------------------------ {{{ + def read(self): + """ Reads value of this domain & key from defaults """ + # First try to find out the type + rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key]) + + # If RC is 1, the key does not exist + if rc == 1: + return None + + # If the RC is not 0, then terrible happened! Ooooh nooo! + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % out) + + # Ok, lets parse the type from output + data_type = out.strip().replace('Type is ', '') + + # Now get the current value + rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key]) + + # Strip output + out = out.strip() + + # An non zero RC at this point is kinda strange... + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % out) + + # Convert string to list when type is array + if data_type == "array": + out = self._convert_defaults_str_to_list(out) + + # Store the current_value + self.current_value = self._convert_type(data_type, out) + + def write(self): + """ Writes value to this domain & key to defaults """ + # We need to convert some values so the defaults commandline understands it + if isinstance(self.value, bool): + if self.value: + value = "TRUE" + else: + value = "FALSE" + elif isinstance(self.value, (int, float)): + value = str(self.value) + elif self.array_add and self.current_value is not None: + value = list(set(self.value) - set(self.current_value)) + elif isinstance(self.value, datetime): + value = self.value.strftime('%Y-%m-%d %H:%M:%S') + else: + value = self.value + + # When the type is array and array_add is enabled, morph the type :) + if self.type == "array" and self.array_add: + self.type = "array-add" + + # All values should be a list, for easy passing it to the command + if not isinstance(value, list): + value = [value] + + rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value) + + if rc != 0: + raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % out) + + def delete(self): + """ Deletes defaults key from domain """ + rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key]) + if rc != 0: + raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % out) + + # /commands ----------------------------------------------------------- }}} + + # run ----------------------------------------------------------------- {{{ + """ Does the magic! :) """ + + def run(self): + + # Get the current value from defaults + self.read() + + if self.state == 'list': + self.module.exit_json(key=self.key, value=self.current_value) + + # Handle absent state + if self.state == "absent": + if self.current_value is None: + return False + if self.module.check_mode: + return True + self.delete() + return True + + # There is a type mismatch! Given type does not match the type in defaults + value_type = type(self.value) + if self.current_value is not None and not isinstance(self.current_value, value_type): + raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__) + + # Current value matches the given value. Nothing need to be done. Arrays need extra care + if self.type == "array" and self.current_value is not None and not self.array_add and \ + set(self.current_value) == set(self.value): + return False + elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0: + return False + elif self.current_value == self.value: + return False + + if self.module.check_mode: + return True + + # Change/Create/Set given key/value for domain in defaults + self.write() + return True + + # /run ---------------------------------------------------------------- }}} + + +# /class MacDefaults ------------------------------------------------------ }}} + + +# main -------------------------------------------------------------------- {{{ +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', default='NSGlobalDomain'), + host=dict(type='str'), + key=dict(type='str'), + type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']), + array_add=dict(type='bool', default=False), + value=dict(type='raw'), + state=dict(type='str', default='present', choices=['absent', 'list', 'present']), + path=dict(type='str', default='/usr/bin:/usr/local/bin'), + ), + supports_check_mode=True, + required_if=( + ('state', 'present', ['value']), + ), + ) + + try: + defaults = OSXDefaults(module=module) + module.exit_json(changed=defaults.run()) + except OSXDefaultsException as e: + module.fail_json(msg=e.message) + + +# /main ------------------------------------------------------------------- }}} + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/pam_limits.py b/plugins/modules/system/pam_limits.py new file mode 100644 index 0000000000..7ccc98be66 --- /dev/null +++ b/plugins/modules/system/pam_limits.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Sebastien Rohaut +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: pam_limits +author: + - "Sebastien Rohaut (@usawa)" +short_description: Modify Linux PAM limits +description: + - The C(pam_limits) module modifies PAM limits. The default file is + C(/etc/security/limits.conf). For the full documentation, see C(man 5 + limits.conf). +options: + domain: + description: + - A username, @groupname, wildcard, uid/gid range. + required: true + limit_type: + description: + - Limit type, see C(man 5 limits.conf) for an explanation + required: true + choices: [ "hard", "soft", "-" ] + limit_item: + description: + - The limit to be set + required: true + choices: + - "core" + - "data" + - "fsize" + - "memlock" + - "nofile" + - "rss" + - "stack" + - "cpu" + - "nproc" + - "as" + - "maxlogins" + - "maxsyslogins" + - "priority" + - "locks" + - "sigpending" + - "msgqueue" + - "nice" + - "rtprio" + - "chroot" + value: + description: + - The value of the limit. + required: true + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + required: false + type: bool + default: "no" + use_min: + description: + - If set to C(yes), the minimal value will be used or conserved. + If the specified value is inferior to the value in the file, file content is replaced with the new value, + else content is not modified. + required: false + type: bool + default: "no" + use_max: + description: + - If set to C(yes), the maximal value will be used or conserved. + If the specified value is superior to the value in the file, file content is replaced with the new value, + else content is not modified. + required: false + type: bool + default: "no" + dest: + description: + - Modify the limits.conf path. + required: false + default: "/etc/security/limits.conf" + comment: + description: + - Comment associated with the limit. + required: false + default: '' +notes: + - If C(dest) file doesn't exist, it is created. +''' + +EXAMPLES = ''' +- name: Add or modify nofile soft limit for the user joe + pam_limits: + domain: joe + limit_type: soft + limit_item: nofile + value: 64000 + +- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value. + pam_limits: + domain: smith + limit_type: hard + limit_item: fsize + value: 1000000 + use_max: yes + +- name: Add or modify memlock, both soft and hard, limit for the user james with a comment. + pam_limits: + domain: james + limit_type: '-' + limit_item: memlock + value: unlimited + comment: unlimited memory lock for james + +- name: Add or modify hard nofile limits for wildcard domain + pam_limits: + domain: '*' + limit_type: hard + limit_item: nofile + value: 39693561 +''' + +import os +import os.path +import tempfile +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def main(): + pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', + 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot'] + + pam_types = ['soft', 'hard', '-'] + + limits_conf = '/etc/security/limits.conf' + + module = AnsibleModule( + # not checking because of daisy chain to file module + argument_spec=dict( + domain=dict(required=True, type='str'), + limit_type=dict(required=True, type='str', choices=pam_types), + limit_item=dict(required=True, type='str', choices=pam_items), + value=dict(required=True, type='str'), + use_max=dict(default=False, type='bool'), + use_min=dict(default=False, type='bool'), + backup=dict(default=False, type='bool'), + dest=dict(default=limits_conf, type='str'), + comment=dict(required=False, default='', type='str') + ) + ) + + domain = module.params['domain'] + limit_type = module.params['limit_type'] + limit_item = module.params['limit_item'] + value = module.params['value'] + use_max = module.params['use_max'] + use_min = module.params['use_min'] + backup = module.params['backup'] + limits_conf = module.params['dest'] + new_comment = module.params['comment'] + + changed = False + + if os.path.isfile(limits_conf): + if not os.access(limits_conf, os.W_OK): + module.fail_json(msg="%s is not writable. Use sudo" % limits_conf) + else: + limits_conf_dir = os.path.dirname(limits_conf) + if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK): + open(limits_conf, 'a').close() + changed = True + else: + module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir) + + if use_max and use_min: + module.fail_json(msg="Cannot use use_min and use_max at the same time.") + + if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()): + module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.") + + # Backup + if backup: + backup_file = module.backup_local(limits_conf) + + space_pattern = re.compile(r'\s+') + + message = '' + f = open(limits_conf, 'rb') + # Tempfile + nf = tempfile.NamedTemporaryFile(mode='w+') + + found = False + new_value = value + + for line in f: + line = to_native(line, errors='surrogate_or_strict') + if line.startswith('#'): + nf.write(line) + continue + + newline = re.sub(space_pattern, ' ', line).strip() + if not newline: + nf.write(line) + continue + + # Remove comment in line + newline = newline.split('#', 1)[0] + try: + old_comment = line.split('#', 1)[1] + except Exception: + old_comment = '' + + newline = newline.rstrip() + + if not new_comment: + new_comment = old_comment + + line_fields = newline.split(' ') + + if len(line_fields) != 4: + nf.write(line) + continue + + line_domain = line_fields[0] + line_type = line_fields[1] + line_item = line_fields[2] + actual_value = line_fields[3] + + if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()): + module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item)) + + # Found the line + if line_domain == domain and line_type == limit_type and line_item == limit_item: + found = True + if value == actual_value: + message = line + nf.write(line) + continue + + actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1'] + value_unlimited = value in ['unlimited', 'infinity', '-1'] + + if use_max: + if value.isdigit() and actual_value.isdigit(): + new_value = str(max(int(value), int(actual_value))) + elif actual_value_unlimited: + new_value = actual_value + else: + new_value = value + + if use_min: + if value.isdigit() and actual_value.isdigit(): + new_value = str(min(int(value), int(actual_value))) + elif value_unlimited: + new_value = actual_value + else: + new_value = value + + # Change line only if value has changed + if new_value != actual_value: + changed = True + if new_comment: + new_comment = "\t#" + new_comment + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" + message = new_limit + nf.write(new_limit) + else: + message = line + nf.write(line) + else: + nf.write(line) + + if not found: + changed = True + if new_comment: + new_comment = "\t#" + new_comment + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" + message = new_limit + nf.write(new_limit) + + f.close() + nf.flush() + + # Copy tempfile to newfile + module.atomic_move(nf.name, f.name) + + try: + nf.close() + except Exception: + pass + + res_args = dict( + changed=changed, msg=message + ) + + if backup: + res_args['backup_file'] = backup_file + + module.exit_json(**res_args) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/pamd.py b/plugins/modules/system/pamd.py new file mode 100644 index 0000000000..c8683bade0 --- /dev/null +++ b/plugins/modules/system/pamd.py @@ -0,0 +1,878 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Kenneth D. Evensen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +module: pamd +author: + - Kenneth D. Evensen (@kevensen) +short_description: Manage PAM Modules +description: + - Edit PAM service's type, control, module path and module arguments. + - In order for a PAM rule to be modified, the type, control and + module_path must match an existing rule. See man(5) pam.d for details. +options: + name: + description: + - The name generally refers to the PAM service file to + change, for example system-auth. + type: str + required: true + type: + description: + - The type of the PAM rule being modified. + - The C(type), C(control) and C(module_path) all must match a rule to be modified. + type: str + required: true + choices: [ account, -account, auth, -auth, password, -password, session, -session ] + control: + description: + - The control of the PAM rule being modified. + - This may be a complicated control with brackets. If this is the case, be + sure to put "[bracketed controls]" in quotes. + - The C(type), C(control) and C(module_path) all must match a rule to be modified. + type: str + required: true + module_path: + description: + - The module path of the PAM rule being modified. + - The C(type), C(control) and C(module_path) all must match a rule to be modified. + type: str + required: true + new_type: + description: + - The new type to assign to the new rule. + type: str + choices: [ account, -account, auth, -auth, password, -password, session, -session ] + new_control: + description: + - The new control to assign to the new rule. + type: str + new_module_path: + description: + - The new module path to be assigned to the new rule. + type: str + module_arguments: + description: + - When state is C(updated), the module_arguments will replace existing module_arguments. + - When state is C(args_absent) args matching those listed in module_arguments will be removed. + - When state is C(args_present) any args listed in module_arguments are added if + missing from the existing rule. + - Furthermore, if the module argument takes a value denoted by C(=), + the value will be changed to that specified in module_arguments. + type: list + state: + description: + - The default of C(updated) will modify an existing rule if type, + control and module_path all match an existing rule. + - With C(before), the new rule will be inserted before a rule matching type, + control and module_path. + - Similarly, with C(after), the new rule will be inserted after an existing rulematching type, + control and module_path. + - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified. + - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored. + - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4. + type: str + choices: [ absent, before, after, args_absent, args_present, updated ] + default: updated + path: + description: + - This is the path to the PAM service files. + type: path + default: /etc/pam.d + backup: + description: + - Create a backup file including the timestamp information so you can + get the original file back if you somehow clobbered it incorrectly. + type: bool + default: no +''' + +EXAMPLES = r''' +- name: Update pamd rule's control in /etc/pam.d/system-auth + pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + new_control: sufficient + +- name: Update pamd rule's complex control in /etc/pam.d/system-auth + pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + new_control: '[success=2 default=ignore]' + +- name: Insert a new rule before an existing rule + pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + new_type: auth + new_control: sufficient + new_module_path: pam_faillock.so + state: before + +- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \ + existing rule pam_rootok.so + pamd: + name: su + type: auth + control: sufficient + module_path: pam_rootok.so + new_type: auth + new_control: required + new_module_path: pam_wheel.so + module_arguments: 'use_uid' + state: after + +- name: Remove module arguments from an existing rule + pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: '' + state: updated + +- name: Replace all module arguments in an existing rule + pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: 'preauth + silent + deny=3 + unlock_time=604800 + fail_interval=900' + state: updated + +- name: Remove specific arguments from a rule + pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: crond,quiet + state: args_absent + +- name: Ensure specific arguments are present in a rule + pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: crond,quiet + state: args_present + +- name: Ensure specific arguments are present in a rule (alternative) + pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: + - crond + - quiet + state: args_present + +- name: Module arguments requiring commas must be listed as a Yaml list + pamd: + name: special-module + type: account + control: required + module_path: pam_access.so + module_arguments: + - listsep=, + state: args_present + +- name: Update specific argument value in a rule + pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: 'fail_interval=300' + state: args_present + +- name: Add pam common-auth rule for duo + pamd: + name: common-auth + new_type: auth + new_control: '[success=1 default=ignore]' + new_module_path: '/lib64/security/pam_duo.so' + state: after + type: auth + module_path: pam_sss.so + control: 'requisite' +''' + +RETURN = r''' +change_count: + description: How many rules were changed. + type: int + sample: 1 + returned: success + version_added: 2.4 +new_rule: + description: The changes to the rule. This was available in Ansible 2.4 and Ansible 2.5. It was removed in Ansible 2.6. + type: str + sample: None None None sha512 shadow try_first_pass use_authtok + returned: success + version_added: 2.4 +updated_rule_(n): + description: The rule(s) that was/were changed. This is only available in + Ansible 2.4 and was removed in Ansible 2.5. + type: str + sample: + - password sufficient pam_unix.so sha512 shadow try_first_pass + use_authtok + returned: success + version_added: 2.4 +action: + description: + - "That action that was taken and is one of: update_rule, + insert_before_rule, insert_after_rule, args_present, args_absent, + absent. This was available in Ansible 2.4 and removed in Ansible 2.8" + returned: always + type: str + sample: "update_rule" + version_added: 2.4 +dest: + description: + - "Path to pam.d service that was changed. This is only available in + Ansible 2.3 and was removed in Ansible 2.4." + returned: success + type: str + sample: "/etc/pam.d/system-auth" +backupdest: + description: + - "The file name of the backup file, if created." + returned: success + type: str + version_added: 2.6 +... +''' + + +from ansible.module_utils.basic import AnsibleModule +import os +import re +from tempfile import NamedTemporaryFile +from datetime import datetime + + +RULE_REGEX = re.compile(r"""(?P-?(?:auth|account|session|password))\s+ + (?P\[.*\]|\S*)\s+ + (?P\S*)\s* + (?P.*)\s*""", re.X) + +RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""") + +VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session'] + + +class PamdLine(object): + + def __init__(self, line): + self.line = line + self.prev = None + self.next = None + + @property + def is_valid(self): + if self.line == '': + return True + return False + + def validate(self): + if not self.is_valid: + return False, "Rule is not valid " + self.line + return True, "Rule is valid " + self.line + + # Method to check if a rule matches the type, control and path. + def matches(self, rule_type, rule_control, rule_path, rule_args=None): + return False + + def __str__(self): + return str(self.line) + + +class PamdComment(PamdLine): + + def __init__(self, line): + super(PamdComment, self).__init__(line) + + @property + def is_valid(self): + if self.line.startswith('#'): + return True + return False + + +class PamdInclude(PamdLine): + def __init__(self, line): + super(PamdInclude, self).__init__(line) + + @property + def is_valid(self): + if self.line.startswith('@include'): + return True + return False + + +class PamdRule(PamdLine): + + valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive'] + valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err', + 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown', + 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail', + 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err', + 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again', + 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again', + 'incomplete', 'default'] + valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset'] + + def __init__(self, rule_type, rule_control, rule_path, rule_args=None): + self.prev = None + self.next = None + self._control = None + self._args = None + self.rule_type = rule_type + self.rule_control = rule_control + + self.rule_path = rule_path + self.rule_args = rule_args + + # Method to check if a rule matches the type, control and path. + def matches(self, rule_type, rule_control, rule_path, rule_args=None): + if (rule_type == self.rule_type and + rule_control == self.rule_control and + rule_path == self.rule_path): + return True + return False + + @classmethod + def rule_from_string(cls, line): + rule_match = RULE_REGEX.search(line) + rule_args = parse_module_arguments(rule_match.group('args')) + return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args) + + def __str__(self): + if self.rule_args: + return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args)) + return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path) + + @property + def rule_control(self): + if isinstance(self._control, list): + return '[' + ' '.join(self._control) + ']' + return self._control + + @rule_control.setter + def rule_control(self, control): + if control.startswith('['): + control = control.replace(' = ', '=').replace('[', '').replace(']', '') + self._control = control.split(' ') + else: + self._control = control + + @property + def rule_args(self): + if not self._args: + return [] + return self._args + + @rule_args.setter + def rule_args(self, args): + self._args = parse_module_arguments(args) + + @property + def line(self): + return str(self) + + @classmethod + def is_action_unsigned_int(cls, string_num): + number = 0 + try: + number = int(string_num) + except ValueError: + return False + + if number >= 0: + return True + return False + + @property + def is_valid(self): + return self.validate()[0] + + def validate(self): + # Validate the rule type + if self.rule_type not in VALID_TYPES: + return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line + # Validate the rule control + if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls: + return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line + elif isinstance(self._control, list): + for control in self._control: + value, action = control.split("=") + if value not in PamdRule.valid_control_values: + return False, "Rule control value, " + value + ", is not valid in rule " + self.line + if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action): + return False, "Rule control action, " + action + ", is not valid in rule " + self.line + + # TODO: Validate path + + return True, "Rule is valid " + self.line + + +# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this +# as a doubly linked list. +class PamdService(object): + + def __init__(self, content): + self._head = None + self._tail = None + for line in content.splitlines(): + if line.lstrip().startswith('#'): + pamd_line = PamdComment(line) + elif line.lstrip().startswith('@include'): + pamd_line = PamdInclude(line) + elif line == '': + pamd_line = PamdLine(line) + else: + pamd_line = PamdRule.rule_from_string(line) + + self.append(pamd_line) + + def append(self, pamd_line): + if self._head is None: + self._head = self._tail = pamd_line + else: + pamd_line.prev = self._tail + pamd_line.next = None + self._tail.next = pamd_line + self._tail = pamd_line + + def remove(self, rule_type, rule_control, rule_path): + current_line = self._head + changed = 0 + + while current_line is not None: + if current_line.matches(rule_type, rule_control, rule_path): + if current_line.prev is not None: + current_line.prev.next = current_line.next + if current_line.next is not None: + current_line.next.prev = current_line.prev + else: + self._head = current_line.next + current_line.next.prev = None + changed += 1 + + current_line = current_line.next + return changed + + def get(self, rule_type, rule_control, rule_path): + lines = [] + current_line = self._head + while current_line is not None: + + if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path): + lines.append(current_line) + + current_line = current_line.next + + return lines + + def has_rule(self, rule_type, rule_control, rule_path): + if self.get(rule_type, rule_control, rule_path): + return True + return False + + def update_rule(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + new_args = parse_module_arguments(new_args) + + changes = 0 + for current_rule in rules_to_find: + rule_changed = False + if new_type: + if(current_rule.rule_type != new_type): + rule_changed = True + current_rule.rule_type = new_type + if new_control: + if(current_rule.rule_control != new_control): + rule_changed = True + current_rule.rule_control = new_control + if new_path: + if(current_rule.rule_path != new_path): + rule_changed = True + current_rule.rule_path = new_path + if new_args: + if(current_rule.rule_args != new_args): + rule_changed = True + current_rule.rule_args = new_args + + if rule_changed: + changes += 1 + + return changes + + def insert_before(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + changes = 0 + # There are two cases to consider. + # 1. The new rule doesn't exist before the existing rule + # 2. The new rule exists + + for current_rule in rules_to_find: + # Create a new rule + new_rule = PamdRule(new_type, new_control, new_path, new_args) + # First we'll get the previous rule. + previous_rule = current_rule.prev + + # Next we may have to loop backwards if the previous line is a comment. If it + # is, we'll get the previous "rule's" previous. + while previous_rule is not None and isinstance(previous_rule, PamdComment): + previous_rule = previous_rule.prev + # Next we'll see if the previous rule matches what we are trying to insert. + if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path): + # First set the original previous rule's next to the new_rule + previous_rule.next = new_rule + # Second, set the new_rule's previous to the original previous + new_rule.prev = previous_rule + # Third, set the new rule's next to the current rule + new_rule.next = current_rule + # Fourth, set the current rule's previous to the new_rule + current_rule.prev = new_rule + + changes += 1 + + # Handle the case where it is the first rule in the list. + elif previous_rule is None: + # This is the case where the current rule is not only the first rule + # but the first line as well. So we set the head to the new rule + if current_rule.prev is None: + self._head = new_rule + # This case would occur if the previous line was a comment. + else: + current_rule.prev.next = new_rule + new_rule.prev = current_rule.prev + new_rule.next = current_rule + current_rule.prev = new_rule + changes += 1 + + return changes + + def insert_after(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + changes = 0 + # There are two cases to consider. + # 1. The new rule doesn't exist after the existing rule + # 2. The new rule exists + for current_rule in rules_to_find: + # First we'll get the next rule. + next_rule = current_rule.next + # Next we may have to loop forwards if the next line is a comment. If it + # is, we'll get the next "rule's" next. + while next_rule is not None and isinstance(next_rule, PamdComment): + next_rule = next_rule.next + + # First we create a new rule + new_rule = PamdRule(new_type, new_control, new_path, new_args) + if next_rule is not None and not next_rule.matches(new_type, new_control, new_path): + # If the previous rule doesn't match we'll insert our new rule. + + # Second set the original next rule's previous to the new_rule + next_rule.prev = new_rule + # Third, set the new_rule's next to the original next rule + new_rule.next = next_rule + # Fourth, set the new rule's previous to the current rule + new_rule.prev = current_rule + # Fifth, set the current rule's next to the new_rule + current_rule.next = new_rule + + changes += 1 + + # This is the case where the current_rule is the last in the list + elif next_rule is None: + new_rule.prev = self._tail + new_rule.next = None + self._tail.next = new_rule + self._tail = new_rule + + current_rule.next = new_rule + changes += 1 + + return changes + + def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + args_to_add = parse_module_arguments(args_to_add) + + changes = 0 + + for current_rule in rules_to_find: + rule_changed = False + + # create some structures to evaluate the situation + simple_new_args = set() + key_value_new_args = dict() + + for arg in args_to_add: + if arg.startswith("["): + continue + elif "=" in arg: + key, value = arg.split("=") + key_value_new_args[key] = value + else: + simple_new_args.add(arg) + + key_value_new_args_set = set(key_value_new_args) + + simple_current_args = set() + key_value_current_args = dict() + + for arg in current_rule.rule_args: + if arg.startswith("["): + continue + elif "=" in arg: + key, value = arg.split("=") + key_value_current_args[key] = value + else: + simple_current_args.add(arg) + + key_value_current_args_set = set(key_value_current_args) + + new_args_to_add = list() + + # Handle new simple arguments + if simple_new_args.difference(simple_current_args): + for arg in simple_new_args.difference(simple_current_args): + new_args_to_add.append(arg) + + # Handle new key value arguments + if key_value_new_args_set.difference(key_value_current_args_set): + for key in key_value_new_args_set.difference(key_value_current_args_set): + new_args_to_add.append(key + '=' + key_value_new_args[key]) + + if new_args_to_add: + current_rule.rule_args += new_args_to_add + rule_changed = True + + # Handle existing key value arguments when value is not equal + if key_value_new_args_set.intersection(key_value_current_args_set): + for key in key_value_new_args_set.intersection(key_value_current_args_set): + if key_value_current_args[key] != key_value_new_args[key]: + arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key]) + current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key]) + rule_changed = True + + if rule_changed: + changes += 1 + + return changes + + def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + args_to_remove = parse_module_arguments(args_to_remove) + + changes = 0 + + for current_rule in rules_to_find: + if not args_to_remove: + args_to_remove = [] + + # Let's check to see if there are any args to remove by finding the intersection + # of the rule's current args and the args_to_remove lists + if not list(set(current_rule.rule_args) & set(args_to_remove)): + continue + + # There are args to remove, so we create a list of new_args absent the args + # to remove. + current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove] + + changes += 1 + + return changes + + def validate(self): + current_line = self._head + + while current_line is not None: + if not current_line.validate()[0]: + return current_line.validate() + current_line = current_line.next + return True, "Module is valid" + + def __str__(self): + lines = [] + current_line = self._head + + while current_line is not None: + lines.append(str(current_line)) + current_line = current_line.next + + if lines[1].startswith("# Updated by Ansible"): + lines.pop(1) + + lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat()) + + return '\n'.join(lines) + '\n' + + +def parse_module_arguments(module_arguments): + # Return empty list if we have no args to parse + if not module_arguments: + return [] + elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]: + return [] + + if not isinstance(module_arguments, list): + module_arguments = [module_arguments] + + parsed_args = list() + + for arg in module_arguments: + for item in filter(None, RULE_ARG_REGEX.findall(arg)): + if not item.startswith("["): + re.sub("\\s*=\\s*", "=", item) + parsed_args.append(item) + + return parsed_args + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + type=dict(type='str', required=True, choices=VALID_TYPES), + control=dict(type='str', required=True), + module_path=dict(type='str', required=True), + new_type=dict(type='str', choices=VALID_TYPES), + new_control=dict(type='str'), + new_module_path=dict(type='str'), + module_arguments=dict(type='list'), + state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']), + path=dict(type='path', default='/etc/pam.d'), + backup=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_if=[ + ("state", "args_present", ["module_arguments"]), + ("state", "args_absent", ["module_arguments"]), + ("state", "before", ["new_control"]), + ("state", "before", ["new_type"]), + ("state", "before", ["new_module_path"]), + ("state", "after", ["new_control"]), + ("state", "after", ["new_type"]), + ("state", "after", ["new_module_path"]), + + ], + ) + content = str() + fname = os.path.join(module.params["path"], module.params["name"]) + + # Open the file and read the content or fail + try: + with open(fname, 'r') as service_file_obj: + content = service_file_obj.read() + except IOError as e: + # If unable to read the file, fail out + module.fail_json(msg='Unable to open/read PAM module \ + file %s with error %s.' % + (fname, str(e))) + + # Assuming we didn't fail, create the service + service = PamdService(content) + # Set the action + action = module.params['state'] + + changes = 0 + + # Take action + if action == 'updated': + changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'before': + changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'after': + changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'args_absent': + changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], + module.params['module_arguments']) + elif action == 'args_present': + if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]: + module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.") + + changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], + module.params['module_arguments']) + elif action == 'absent': + changes = service.remove(module.params['type'], module.params['control'], module.params['module_path']) + + valid, msg = service.validate() + + # If the module is not valid (meaning one of the rules is invalid), we will fail + if not valid: + module.fail_json(msg=msg) + + result = dict( + changed=(changes > 0), + change_count=changes, + backupdest='', + ) + + # If not check mode and something changed, backup the original if necessary then write out the file or fail + if not module.check_mode and result['changed']: + # First, create a backup if desired. + if module.params['backup']: + result['backupdest'] = module.backup_local(fname) + try: + temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False) + with open(temp_file.name, 'w') as fd: + fd.write(str(service)) + + except IOError: + module.fail_json(msg='Unable to create temporary \ + file %s' % temp_file) + + module.atomic_move(temp_file.name, os.path.realpath(fname)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/parted.py b/plugins/modules/system/parted.py new file mode 100644 index 0000000000..fcc1b8aaac --- /dev/null +++ b/plugins/modules/system/parted.py @@ -0,0 +1,694 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Fabrizio Colonna +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +author: + - Fabrizio Colonna (@ColOfAbRiX) +module: parted +short_description: Configure block device partitions +description: + - This module allows configuring block device partition using the C(parted) + command line tool. For a full description of the fields and the options + check the GNU parted manual. +requirements: + - This module requires parted version 1.8.3 and above. + - If the version of parted is below 3.1, it requires a Linux version running + the sysfs file system C(/sys/). +options: + device: + description: The block device (disk) where to operate. + type: str + required: True + align: + description: Set alignment for newly created partitions. + type: str + choices: [ cylinder, minimal, none, optimal ] + default: optimal + number: + description: + - The number of the partition to work with or the number of the partition + that will be created. + - Required when performing any action on the disk, except fetching information. + type: int + unit: + description: + - Selects the current default unit that Parted will use to display + locations and capacities on the disk and to interpret those given by the + user if they are not suffixed by an unit. + - When fetching information about a disk, it is always recommended to specify a unit. + type: str + choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ] + default: KiB + label: + description: Creates a new disk label. + type: str + choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ] + default: msdos + part_type: + description: + - May be specified only with 'msdos' or 'dvh' partition tables. + - A C(name) must be specified for a 'gpt' partition table. + - Neither C(part_type) nor C(name) may be used with a 'sun' partition table. + type: str + choices: [ extended, logical, primary ] + default: primary + part_start: + description: + - Where the partition will start as offset from the beginning of the disk, + that is, the "distance" from the start of the disk. + - The distance can be specified with all the units supported by parted + (except compat) and it is case sensitive, e.g. C(10GiB), C(15%). + type: str + default: 0% + part_end : + description: + - Where the partition will end as offset from the beginning of the disk, + that is, the "distance" from the start of the disk. + - The distance can be specified with all the units supported by parted + (except compat) and it is case sensitive, e.g. C(10GiB), C(15%). + type: str + default: 100% + name: + description: + - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only). + type: str + flags: + description: A list of the flags that has to be set on the partition. + type: list + state: + description: + - Whether to create or delete a partition. + - If set to C(info) the module will only return the device information. + type: str + choices: [ absent, present, info ] + default: info +notes: + - When fetching information about a new disk and when the version of parted + installed on the system is before version 3.1, the module queries the kernel + through C(/sys/) to obtain disk information. In this case the units CHS and + CYL are not supported. +''' + +RETURN = r''' +partition_info: + description: Current partition information + returned: success + type: complex + contains: + device: + description: Generic device information. + type: dict + partitions: + description: List of device partitions. + type: list + sample: { + "disk": { + "dev": "/dev/sdb", + "logical_block": 512, + "model": "VMware Virtual disk", + "physical_block": 512, + "size": 5.0, + "table": "msdos", + "unit": "gib" + }, + "partitions": [{ + "begin": 0.0, + "end": 1.0, + "flags": ["boot", "lvm"], + "fstype": "", + "name": "", + "num": 1, + "size": 1.0 + }, { + "begin": 1.0, + "end": 5.0, + "flags": [], + "fstype": "", + "name": "", + "num": 2, + "size": 4.0 + }] + } +''' + +EXAMPLES = r''' +- name: Create a new primary partition + parted: + device: /dev/sdb + number: 1 + state: present + +- name: Remove partition number 1 + parted: + device: /dev/sdb + number: 1 + state: absent + +- name: Create a new primary partition with a size of 1GiB + parted: + device: /dev/sdb + number: 1 + state: present + part_end: 1GiB + +- name: Create a new primary partition for LVM + parted: + device: /dev/sdb + number: 2 + flags: [ lvm ] + state: present + part_start: 1GiB + +# Example on how to read info and reuse it in subsequent task +- name: Read device information (always use unit when probing) + parted: device=/dev/sdb unit=MiB + register: sdb_info + +- name: Remove all partitions from disk + parted: + device: /dev/sdb + number: '{{ item.num }}' + state: absent + loop: '{{ sdb_info.partitions }}' +''' + + +from ansible.module_utils.basic import AnsibleModule +import math +import re +import os + + +# Reference prefixes (International System of Units and IEC) +units_si = ['B', 'KB', 'MB', 'GB', 'TB'] +units_iec = ['KiB', 'MiB', 'GiB', 'TiB'] +parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact'] + + +def parse_unit(size_str, unit=''): + """ + Parses a string containing a size of information + """ + matches = re.search(r'^([\d.]+)([\w%]+)?$', size_str) + if matches is None: + # ",," format + matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str) + if matches is None: + module.fail_json( + msg="Error interpreting parted size output: '%s'" % size_str + ) + + size = { + 'cylinder': int(matches.group(1)), + 'head': int(matches.group(2)), + 'sector': int(matches.group(3)) + } + unit = 'chs' + + else: + # Normal format: "[]" + if matches.group(2) is not None: + unit = matches.group(2) + + size = float(matches.group(1)) + + return size, unit + + +def parse_partition_info(parted_output, unit): + """ + Parses the output of parted and transforms the data into + a dictionary. + + Parted Machine Parseable Output: + See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00 + 0573.html + - All lines end with a semicolon (;) + - The first line indicates the units in which the output is expressed. + CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively. + - The second line is made of disk information in the following format: + "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz + e":"partition-table-type":"model-name"; + - If the first line was either CYL or CHS, the next line will contain + information on no. of cylinders, heads, sectors and cylinder size. + - Partition information begins from the next line. This is of the format: + (for BYT) + "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s + et"; + (for CHS/CYL) + "number":"begin":"end":"filesystem-type":"partition-name":"flags-set"; + """ + lines = [x for x in parted_output.split('\n') if x.strip() != ''] + + # Generic device info + generic_params = lines[1].rstrip(';').split(':') + + # The unit is read once, because parted always returns the same unit + size, unit = parse_unit(generic_params[1], unit) + + generic = { + 'dev': generic_params[0], + 'size': size, + 'unit': unit.lower(), + 'table': generic_params[5], + 'model': generic_params[6], + 'logical_block': int(generic_params[3]), + 'physical_block': int(generic_params[4]) + } + + # CYL and CHS have an additional line in the output + if unit in ['cyl', 'chs']: + chs_info = lines[2].rstrip(';').split(':') + cyl_size, cyl_unit = parse_unit(chs_info[3]) + generic['chs_info'] = { + 'cylinders': int(chs_info[0]), + 'heads': int(chs_info[1]), + 'sectors': int(chs_info[2]), + 'cyl_size': cyl_size, + 'cyl_size_unit': cyl_unit.lower() + } + lines = lines[1:] + + parts = [] + for line in lines[2:]: + part_params = line.rstrip(';').split(':') + + # CHS use a different format than BYT, but contrary to what stated by + # the author, CYL is the same as BYT. I've tested this undocumented + # behaviour down to parted version 1.8.3, which is the first version + # that supports the machine parseable output. + if unit != 'chs': + size = parse_unit(part_params[3])[0] + fstype = part_params[4] + name = part_params[5] + flags = part_params[6] + + else: + size = "" + fstype = part_params[3] + name = part_params[4] + flags = part_params[5] + + parts.append({ + 'num': int(part_params[0]), + 'begin': parse_unit(part_params[1])[0], + 'end': parse_unit(part_params[2])[0], + 'size': size, + 'fstype': fstype, + 'name': name, + 'flags': [f.strip() for f in flags.split(', ') if f != ''], + 'unit': unit.lower(), + }) + + return {'generic': generic, 'partitions': parts} + + +def format_disk_size(size_bytes, unit): + """ + Formats a size in bytes into a different unit, like parted does. It doesn't + manage CYL and CHS formats, though. + This function has been adapted from https://github.com/Distrotech/parted/blo + b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c + """ + global units_si, units_iec + + unit = unit.lower() + + # Shortcut + if size_bytes == 0: + return 0.0, 'b' + + # Cases where we default to 'compact' + if unit in ['', 'compact', 'cyl', 'chs']: + index = max(0, int( + (math.log10(size_bytes) - 1.0) / 3.0 + )) + unit = 'b' + if index < len(units_si): + unit = units_si[index] + + # Find the appropriate multiplier + multiplier = 1.0 + if unit in units_si: + multiplier = 1000.0 ** units_si.index(unit) + elif unit in units_iec: + multiplier = 1024.0 ** units_iec.index(unit) + + output = size_bytes // multiplier * (1 + 1E-16) + + # Corrections to round up as per IEEE754 standard + if output < 10: + w = output + 0.005 + elif output < 100: + w = output + 0.05 + else: + w = output + 0.5 + + if w < 10: + precision = 2 + elif w < 100: + precision = 1 + else: + precision = 0 + + # Round and return + return round(output, precision), unit + + +def get_unlabeled_device_info(device, unit): + """ + Fetches device information directly from the kernel and it is used when + parted cannot work because of a missing label. It always returns a 'unknown' + label. + """ + device_name = os.path.basename(device) + base = "/sys/block/%s" % device_name + + vendor = read_record(base + "/device/vendor", "Unknown") + model = read_record(base + "/device/model", "model") + logic_block = int(read_record(base + "/queue/logical_block_size", 0)) + phys_block = int(read_record(base + "/queue/physical_block_size", 0)) + size_bytes = int(read_record(base + "/size", 0)) * logic_block + + size, unit = format_disk_size(size_bytes, unit) + + return { + 'generic': { + 'dev': device, + 'table': "unknown", + 'size': size, + 'unit': unit, + 'logical_block': logic_block, + 'physical_block': phys_block, + 'model': "%s %s" % (vendor, model), + }, + 'partitions': [] + } + + +def get_device_info(device, unit): + """ + Fetches information about a disk and its partitions and it returns a + dictionary. + """ + global module, parted_exec + + # If parted complains about missing labels, it means there are no partitions. + # In this case only, use a custom function to fetch information and emulate + # parted formats for the unit. + label_needed = check_parted_label(device) + if label_needed: + return get_unlabeled_device_info(device, unit) + + command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit) + rc, out, err = module.run_command(command) + if rc != 0 and 'unrecognised disk label' not in err: + module.fail_json(msg=( + "Error while getting device information with parted " + "script: '%s'" % command), + rc=rc, out=out, err=err + ) + + return parse_partition_info(out, unit) + + +def check_parted_label(device): + """ + Determines if parted needs a label to complete its duties. Versions prior + to 3.1 don't return data when there is no label. For more information see: + http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html + """ + global parted_exec + + # Check the version + parted_major, parted_minor, _ = parted_version() + if (parted_major == 3 and parted_minor >= 1) or parted_major > 3: + return False + + # Older parted versions return a message in the stdout and RC > 0. + rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device)) + if rc != 0 and 'unrecognised disk label' in out.lower(): + return True + + return False + + +def parted_version(): + """ + Returns the major and minor version of parted installed on the system. + """ + global module, parted_exec + + rc, out, err = module.run_command("%s --version" % parted_exec) + if rc != 0: + module.fail_json( + msg="Failed to get parted version.", rc=rc, out=out, err=err + ) + + lines = [x for x in out.split('\n') if x.strip() != ''] + if len(lines) == 0: + module.fail_json(msg="Failed to get parted version.", rc=0, out=out) + + matches = re.search(r'^parted.+(\d+)\.(\d+)(?:\.(\d+))?$', lines[0]) + if matches is None: + module.fail_json(msg="Failed to get parted version.", rc=0, out=out) + + # Convert version to numbers + major = int(matches.group(1)) + minor = int(matches.group(2)) + rev = 0 + if matches.group(3) is not None: + rev = int(matches.group(3)) + + return major, minor, rev + + +def parted(script, device, align): + """ + Runs a parted script. + """ + global module, parted_exec + + if script and not module.check_mode: + command = "%s -s -m -a %s %s -- %s" % (parted_exec, align, device, script) + rc, out, err = module.run_command(command) + + if rc != 0: + module.fail_json( + msg="Error while running parted script: %s" % command.strip(), + rc=rc, out=out, err=err + ) + + +def read_record(file_path, default=None): + """ + Reads the first line of a file and returns it. + """ + try: + f = open(file_path, 'r') + try: + return f.readline().strip() + finally: + f.close() + except IOError: + return default + + +def part_exists(partitions, attribute, number): + """ + Looks if a partition that has a specific value for a specific attribute + actually exists. + """ + return any( + part[attribute] and + part[attribute] == number for part in partitions + ) + + +def check_size_format(size_str): + """ + Checks if the input string is an allowed size + """ + size, unit = parse_unit(size_str) + return unit in parted_units + + +def main(): + global module, units_si, units_iec, parted_exec + + changed = False + output_script = "" + script = "" + module = AnsibleModule( + argument_spec=dict( + device=dict(type='str', required=True), + align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal']), + number=dict(type='int'), + + # unit command + unit=dict(type='str', default='KiB', choices=parted_units), + + # mklabel command + label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']), + + # mkpart [] command + part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']), + part_start=dict(type='str', default='0%'), + part_end=dict(type='str', default='100%'), + + # name command + name=dict(type='str'), + + # set command + flags=dict(type='list'), + + # rm/mkpart command + state=dict(type='str', default='info', choices=['absent', 'info', 'present']), + ), + required_if=[ + ['state', 'present', ['number']], + ['state', 'absent', ['number']], + ], + supports_check_mode=True, + ) + module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'} + + # Data extraction + device = module.params['device'] + align = module.params['align'] + number = module.params['number'] + unit = module.params['unit'] + label = module.params['label'] + part_type = module.params['part_type'] + part_start = module.params['part_start'] + part_end = module.params['part_end'] + name = module.params['name'] + state = module.params['state'] + flags = module.params['flags'] + + # Parted executable + parted_exec = module.get_bin_path('parted', True) + + # Conditioning + if number is not None and number < 1: + module.fail_json(msg="The partition number must be greater then 0.") + if not check_size_format(part_start): + module.fail_json( + msg="The argument 'part_start' doesn't respect required format." + "The size unit is case sensitive.", + err=parse_unit(part_start) + ) + if not check_size_format(part_end): + module.fail_json( + msg="The argument 'part_end' doesn't respect required format." + "The size unit is case sensitive.", + err=parse_unit(part_end) + ) + + # Read the current disk information + current_device = get_device_info(device, unit) + current_parts = current_device['partitions'] + + if state == 'present': + + # Assign label if required + if current_device['generic'].get('table', None) != label: + script += "mklabel %s " % label + + # Create partition if required + if part_type and not part_exists(current_parts, 'num', number): + script += "mkpart %s %s %s " % ( + part_type, + part_start, + part_end + ) + + # Set the unit of the run + if unit and script: + script = "unit %s %s" % (unit, script) + + # Execute the script and update the data structure. + # This will create the partition for the next steps + if script: + output_script += script + parted(script, device, align) + changed = True + script = "" + + current_parts = get_device_info(device, unit)['partitions'] + + if part_exists(current_parts, 'num', number) or module.check_mode: + partition = {'flags': []} # Empty structure for the check-mode + if not module.check_mode: + partition = [p for p in current_parts if p['num'] == number][0] + + # Assign name to the partition + if name is not None and partition.get('name', None) != name: + # Wrap double quotes in single quotes so the shell doesn't strip + # the double quotes as those need to be included in the arg + # passed to parted + script += 'name %s \'"%s"\' ' % (number, name) + + # Manage flags + if flags: + # Parted infers boot with esp, if you assign esp, boot is set + # and if boot is unset, esp is also unset. + if 'esp' in flags and 'boot' not in flags: + flags.append('boot') + + # Compute only the changes in flags status + flags_off = list(set(partition['flags']) - set(flags)) + flags_on = list(set(flags) - set(partition['flags'])) + + for f in flags_on: + script += "set %s %s on " % (number, f) + + for f in flags_off: + script += "set %s %s off " % (number, f) + + # Set the unit of the run + if unit and script: + script = "unit %s %s" % (unit, script) + + # Execute the script + if script: + output_script += script + changed = True + parted(script, device, align) + + elif state == 'absent': + # Remove the partition + if part_exists(current_parts, 'num', number) or module.check_mode: + script = "rm %s " % number + output_script += script + changed = True + parted(script, device, align) + + elif state == 'info': + output_script = "unit '%s' print " % unit + + # Final status of the device + final_device_status = get_device_info(device, unit) + module.exit_json( + changed=changed, + disk=final_device_status['generic'], + partitions=final_device_status['partitions'], + script=output_script.strip() + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py new file mode 100644 index 0000000000..dfd3acf035 --- /dev/null +++ b/plugins/modules/system/pids.py @@ -0,0 +1,88 @@ +#!/usr/bin/python +# Copyright: (c) 2019, Saranya Sridharan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: pids +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists." +short_description: "Retrieves process IDs list if the process is running otherwise return empty list" +author: + - Saranya Sridharan (@saranyasridharan) +requirements: + - psutil(python module) +options: + name: + description: the name of the process you want to get PID for. + required: true + type: str +''' + +EXAMPLES = ''' +# Pass the process name +- name: Getting process IDs of the process + pids: + name: python + register: pids_of_python + +- name: Printing the process IDs obtained + debug: + msg: "PIDS of python:{{pids_of_python.pids|join(',')}}" +''' + +RETURN = ''' +pids: + description: Process IDs of the given process + returned: list of none, one, or more process IDs + type: list + sample: [100,200] +''' + +from ansible.module_utils.basic import AnsibleModule +try: + import psutil + HAS_PSUTIL = True +except ImportError: + HAS_PSUTIL = False + + +def compare_lower(a, b): + if a is None or b is None: + # this could just be "return False" but would lead to surprising behavior if both a and b are None + return a == b + + return a.lower() == b.lower() + + +def get_pid(name): + pids = [] + + for proc in psutil.process_iter(attrs=['name', 'cmdline']): + if compare_lower(proc.info['name'], name) or \ + proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): + pids.append(proc.pid) + + return pids + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type="str"), + ), + supports_check_mode=True, + ) + if not HAS_PSUTIL: + module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil") + name = module.params["name"] + response = dict(pids=get_pid(name)) + module.exit_json(**response) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/puppet.py b/plugins/modules/system/puppet.py new file mode 100644 index 0000000000..8efe2c85d7 --- /dev/null +++ b/plugins/modules/system/puppet.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: puppet +short_description: Runs puppet +description: + - Runs I(puppet) agent or apply in a reliable manner. +options: + timeout: + description: + - How long to wait for I(puppet) to finish. + type: str + default: 30m + puppetmaster: + description: + - The hostname of the puppetmaster to contact. + type: str + modulepath: + description: + - Path to an alternate location for puppet modules. + type: str + manifest: + description: + - Path to the manifest file to run puppet apply on. + type: str + noop: + description: + - Override puppet.conf noop mode. + - When C(yes), run Puppet agent with C(--noop) switch set. + - When C(no), run Puppet agent with C(--no-noop) switch set. + - When unset (default), use default or puppet.conf value if defined. + type: bool + facts: + description: + - A dict of values to pass in as persistent external facter facts. + type: dict + facter_basename: + description: + - Basename of the facter output file. + type: str + default: ansible + environment: + description: + - Puppet environment to be used. + type: str + logdest: + description: + - Where the puppet logs should go, if puppet apply is being used. + - C(all) will go to both C(stdout) and C(syslog). + type: str + choices: [ all, stdout, syslog ] + default: stdout + certname: + description: + - The name to use when handling certificates. + type: str + tags: + description: + - A list of puppet tags to be used. + type: list + execute: + description: + - Execute a specific piece of Puppet code. + - It has no effect with a puppetmaster. + type: str + use_srv_records: + description: + - Toggles use_srv_records flag + type: bool + summarize: + description: + - Whether to print a transaction summary. + type: bool + verbose: + description: + - Print extra information. + type: bool + debug: + description: + - Enable full debugging. + type: bool +requirements: +- puppet +author: +- Monty Taylor (@emonty) +''' + +EXAMPLES = r''' +- name: Run puppet agent and fail if anything goes wrong + puppet: + +- name: Run puppet and timeout in 5 minutes + puppet: + timeout: 5m + +- name: Run puppet using a different environment + puppet: + environment: testing + +- name: Run puppet using a specific certname + puppet: + certname: agent01.example.com + +- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster + puppet: + execute: include ::mymodule + +- name: Run puppet using a specific tags + puppet: + tags: + - update + - nginx + +- name: Run puppet agent in noop mode + puppet: + noop: yes + +- name: Run a manifest with debug, log to both syslog and stdout, specify module path + puppet: + modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules + logdest: all + manifest: /var/lib/example/puppet_step_config.pp +''' + +import json +import os +import stat + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def _get_facter_dir(): + if os.getuid() == 0: + return '/etc/facter/facts.d' + else: + return os.path.expanduser('~/.facter/facts.d') + + +def _write_structured_data(basedir, basename, data): + if not os.path.exists(basedir): + os.makedirs(basedir) + file_path = os.path.join(basedir, "{0}.json".format(basename)) + # This is more complex than you might normally expect because we want to + # open the file with only u+rw set. Also, we use the stat constants + # because ansible still supports python 2.4 and the octal syntax changed + out_file = os.fdopen( + os.open( + file_path, os.O_CREAT | os.O_WRONLY, + stat.S_IRUSR | stat.S_IWUSR), 'w') + out_file.write(json.dumps(data).encode('utf8')) + out_file.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + timeout=dict(type='str', default='30m'), + puppetmaster=dict(type='str'), + modulepath=dict(type='str'), + manifest=dict(type='str'), + noop=dict(required=False, type='bool'), + logdest=dict(type='str', default='stdout', choices=['all', + 'stdout', + 'syslog']), + # internal code to work with --diff, do not use + show_diff=dict(type='bool', default=False, aliases=['show-diff']), + facts=dict(type='dict'), + facter_basename=dict(type='str', default='ansible'), + environment=dict(type='str'), + certname=dict(type='str'), + tags=dict(type='list'), + execute=dict(type='str'), + summarize=dict(type='bool', default=False), + debug=dict(type='bool', default=False), + verbose=dict(type='bool', default=False), + use_srv_records=dict(type='bool'), + ), + supports_check_mode=True, + mutually_exclusive=[ + ('puppetmaster', 'manifest'), + ('puppetmaster', 'manifest', 'execute'), + ('puppetmaster', 'modulepath'), + ], + ) + p = module.params + + global PUPPET_CMD + PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin']) + + if not PUPPET_CMD: + module.fail_json( + msg="Could not find puppet. Please ensure it is installed.") + + global TIMEOUT_CMD + TIMEOUT_CMD = module.get_bin_path("timeout", False) + + if p['manifest']: + if not os.path.exists(p['manifest']): + module.fail_json( + msg="Manifest file %(manifest)s not found." % dict( + manifest=p['manifest'])) + + # Check if puppet is disabled here + if not p['manifest']: + rc, stdout, stderr = module.run_command( + PUPPET_CMD + " config print agent_disabled_lockfile") + if os.path.exists(stdout.strip()): + module.fail_json( + msg="Puppet agent is administratively disabled.", + disabled=True) + elif rc != 0: + module.fail_json( + msg="Puppet agent state could not be determined.") + + if module.params['facts'] and not module.check_mode: + _write_structured_data( + _get_facter_dir(), + module.params['facter_basename'], + module.params['facts']) + + if TIMEOUT_CMD: + base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict( + timeout_cmd=TIMEOUT_CMD, + timeout=shlex_quote(p['timeout']), + puppet_cmd=PUPPET_CMD) + else: + base_cmd = PUPPET_CMD + + if not p['manifest'] and not p['execute']: + cmd = ("%(base_cmd)s agent --onetime" + " --no-daemonize --no-usecacheonfailure --no-splay" + " --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd) + if p['puppetmaster']: + cmd += " --server %s" % shlex_quote(p['puppetmaster']) + if p['show_diff']: + cmd += " --show_diff" + if p['environment']: + cmd += " --environment '%s'" % p['environment'] + if p['tags']: + cmd += " --tags '%s'" % ','.join(p['tags']) + if p['certname']: + cmd += " --certname='%s'" % p['certname'] + if module.check_mode: + cmd += " --noop" + if p['use_srv_records'] is not None: + if not p['use_srv_records']: + cmd += " --no-use_srv_records" + else: + cmd += " --use_srv_records" + elif 'noop' in p: + if p['noop']: + cmd += " --noop" + else: + cmd += " --no-noop" + else: + cmd = "%s apply --detailed-exitcodes " % base_cmd + if p['logdest'] == 'syslog': + cmd += "--logdest syslog " + if p['logdest'] == 'all': + cmd += " --logdest syslog --logdest stdout" + if p['modulepath']: + cmd += "--modulepath='%s'" % p['modulepath'] + if p['environment']: + cmd += "--environment '%s' " % p['environment'] + if p['certname']: + cmd += " --certname='%s'" % p['certname'] + if p['tags']: + cmd += " --tags '%s'" % ','.join(p['tags']) + if module.check_mode: + cmd += "--noop " + elif 'noop' in p: + if p['noop']: + cmd += " --noop" + else: + cmd += " --no-noop" + if p['execute']: + cmd += " --execute '%s'" % p['execute'] + else: + cmd += shlex_quote(p['manifest']) + if p['summarize']: + cmd += " --summarize" + if p['debug']: + cmd += " --debug" + if p['verbose']: + cmd += " --verbose" + rc, stdout, stderr = module.run_command(cmd) + + if rc == 0: + # success + module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr) + elif rc == 1: + # rc==1 could be because it's disabled + # rc==1 could also mean there was a compilation failure + disabled = "administratively disabled" in stdout + if disabled: + msg = "puppet is disabled" + else: + msg = "puppet did not run" + module.exit_json( + rc=rc, disabled=disabled, msg=msg, + error=True, stdout=stdout, stderr=stderr) + elif rc == 2: + # success with changes + module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr) + elif rc == 124: + # timeout + module.exit_json( + rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr) + else: + # failure + module.fail_json( + rc=rc, msg="%s failed with return code: %d" % (cmd, rc), + stdout=stdout, stderr=stderr) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/python_requirements_facts.py b/plugins/modules/system/python_requirements_facts.py new file mode 120000 index 0000000000..d816829034 --- /dev/null +++ b/plugins/modules/system/python_requirements_facts.py @@ -0,0 +1 @@ +python_requirements_info.py \ No newline at end of file diff --git a/plugins/modules/system/python_requirements_info.py b/plugins/modules/system/python_requirements_info.py new file mode 100644 index 0000000000..1dbeb66d4c --- /dev/null +++ b/plugins/modules/system/python_requirements_info.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +module: python_requirements_info +short_description: Show python path and assert dependency versions +description: + - Get info about available Python requirements on the target host, including listing required libraries and gathering versions. + - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change. +options: + dependencies: + description: > + A list of version-likes or module names to check for installation. + Supported operators: <, >, <=, >=, or ==. The bare module name like + I(ansible), the module with a specific version like I(boto3==1.6.1), or a + partial version like I(requests>2) are all valid specifications. +author: +- Will Thames (@willthames) +- Ryan Scott Brown (@ryansb) +''' + +EXAMPLES = ''' +- name: show python lib/site paths + python_requirements_info: +- name: check for modern boto3 and botocore versions + python_requirements_info: + dependencies: + - boto3>1.6 + - botocore<2 +''' + +RETURN = ''' +python: + description: path to python version used + returned: always + type: str + sample: /usr/local/opt/python@2/bin/python2.7 +python_version: + description: version of python + returned: always + type: str + sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]" +python_system_path: + description: List of paths python is looking for modules in + returned: always + type: list + sample: + - /usr/local/opt/python@2/site-packages/ + - /usr/lib/python/site-packages/ + - /usr/lib/python/site-packages/ +valid: + description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null + returned: always + type: dict + sample: + boto3: + desired: null + installed: 1.7.60 + botocore: + desired: botocore<2 + installed: 1.10.60 +mismatched: + description: A dictionary of dependencies that did not satisfy the desired version + returned: always + type: dict + sample: + botocore: + desired: botocore>2 + installed: 1.10.60 +not_found: + description: A list of packages that could not be imported at all, and are not installed + returned: always + type: list + sample: + - boto4 + - requests +''' + +import re +import sys +import operator + +HAS_DISTUTILS = False +try: + import pkg_resources + from distutils.version import LooseVersion + HAS_DISTUTILS = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule + +operations = { + '<=': operator.le, + '>=': operator.ge, + '<': operator.lt, + '>': operator.gt, + '==': operator.eq, +} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dependencies=dict(type='list') + ), + supports_check_mode=True, + ) + if module._name == 'python_requirements_facts': + module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'", version='2.13') + if not HAS_DISTUTILS: + module.fail_json( + msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.', + python=sys.executable, + python_version=sys.version, + python_system_path=sys.path, + ) + pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$') + + results = dict( + not_found=[], + mismatched={}, + valid={}, + ) + + for dep in (module.params.get('dependencies') or []): + match = pkg_dep_re.match(dep) + if match is None: + module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep)) + pkg, op, version = match.groups() + if op is not None and op not in operations: + module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep)) + try: + existing = pkg_resources.get_distribution(pkg).version + except pkg_resources.DistributionNotFound: + # not there + results['not_found'].append(pkg) + continue + if op is None and version is None: + results['valid'][pkg] = { + 'installed': existing, + 'desired': None, + } + elif operations[op](LooseVersion(existing), LooseVersion(version)): + results['valid'][pkg] = { + 'installed': existing, + 'desired': dep, + } + else: + results['mismatched'] = { + 'installed': existing, + 'desired': dep, + } + + module.exit_json( + python=sys.executable, + python_version=sys.version, + python_system_path=sys.path, + **results + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/runit.py b/plugins/modules/system/runit.py new file mode 100644 index 0000000000..2d5409e7f2 --- /dev/null +++ b/plugins/modules/system/runit.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +# This is a modification of @bcoca's `svc` module + +DOCUMENTATION = r''' +--- +module: runit +author: +- James Sumners (@jsumners) +short_description: Manage runit services +description: + - Controls runit services on remote hosts using the sv utility. +options: + name: + description: + - Name of the service to manage. + type: str + required: yes + state: + description: + - C(started)/C(stopped) are idempotent actions that will not run + commands unless necessary. C(restarted) will always bounce the + service (sv restart) and C(killed) will always bounce the service (sv force-stop). + C(reloaded) will send a HUP (sv reload). + C(once) will run a normally downed sv once (sv once), not really + an idempotent operation. + type: str + choices: [ killed, once, reloaded, restarted, started, stopped ] + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. + type: bool + service_dir: + description: + - directory runsv watches for services + type: str + default: /var/service + service_src: + description: + - directory where services are defined, the source of symlinks to service_dir. + type: str + default: /etc/sv +''' + +EXAMPLES = r''' +- name: Start sv dnscache, if not running + runit: + name: dnscache + state: started + +- name: Stop sv dnscache, if running + runit: + name: dnscache + state: stopped + +- name: Kill sv dnscache, in all cases + runit: + name: dnscache + state: killed + +- name: Restart sv dnscache, in all cases + runit: + name: dnscache + state: restarted + +- name: Reload sv dnscache, in all cases + runit: + name: dnscache + state: reloaded + +- name: Use alternative sv directory location + runit: + name: dnscache + state: reloaded + service_dir: /run/service +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def _load_dist_subclass(cls, *args, **kwargs): + ''' + Used for derivative implementations + ''' + subclass = None + + distro = kwargs['module'].params['distro'] + + # get the most specific superclass for this platform + if distro is not None: + for sc in cls.__subclasses__(): + if sc.distro is not None and sc.distro == distro: + subclass = sc + if subclass is None: + subclass = cls + + return super(cls, subclass).__new__(subclass) + + +class Sv(object): + """ + Main class that handles daemontools, can be subclassed and overridden in case + we want to use a 'derivative' like encore, s6, etc + """ + + # def __new__(cls, *args, **kwargs): + # return _load_dist_subclass(cls, args, kwargs) + + def __init__(self, module): + self.extra_paths = [] + self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True) + self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([self.service_dir, self.name]) + self.src_full = '/'.join([self.service_src, self.name]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.get_status() + else: + self.state = 'stopped' + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError as e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + self.execute_command([self.svc_cmd, 'force-stop', self.src_full]) + try: + os.unlink(self.svc_full) + except OSError as e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e)) + + def get_status(self): + (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + # full_state *may* contain information about the logger: + # "down: /etc/service/service-without-logger: 1s, normally up\n" + # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n" + full_state_no_logger = self.full_state.split("; ")[0] + + m = re.search(r'\(pid (\d+)\)', full_state_no_logger) + if m: + self.pid = m.group(1) + + m = re.search(r' (\d+)s', full_state_no_logger) + if m: + self.duration = m.group(1) + + if re.search(r'^run:', full_state_no_logger): + self.state = 'started' + elif re.search(r'^down:', full_state_no_logger): + self.state = 'stopped' + else: + self.state = 'unknown' + return + + def started(self): + return self.start() + + def start(self): + return self.execute_command([self.svc_cmd, 'start', self.svc_full]) + + def stopped(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, 'stop', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, 'once', self.svc_full]) + + def reloaded(self): + return self.reload() + + def reload(self): + return self.execute_command([self.svc_cmd, 'reload', self.svc_full]) + + def restarted(self): + return self.restart() + + def restart(self): + return self.execute_command([self.svc_cmd, 'restart', self.svc_full]) + + def killed(self): + return self.kill() + + def kill(self): + return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full]) + + def execute_command(self, cmd): + try: + (rc, out, err) = self.module.run_command(' '.join(cmd)) + except Exception as e: + self.module.fail_json(msg="failed to execute: %s" % to_native(e)) + return (rc, out, err) + + def report(self): + self.get_status() + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), + enabled=dict(type='bool'), + dist=dict(type='str', default='runit'), + service_dir=dict(type='str', default='/var/service'), + service_src=dict(type='str', default='/etc/sv'), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + state = module.params['state'] + enabled = module.params['enabled'] + + sv = Sv(module) + changed = False + orig_state = sv.report() + + if enabled is not None and enabled != sv.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + sv.enable() + else: + sv.disable() + except (OSError, IOError) as e: + module.fail_json(msg="Could not change service link: %s" % to_native(e)) + + if state is not None and state != sv.state: + changed = True + if not module.check_mode: + getattr(sv, state)() + + module.exit_json(changed=changed, sv=sv.report()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/sefcontext.py b/plugins/modules/system/sefcontext.py new file mode 100644 index 0000000000..f6c2c8b74f --- /dev/null +++ b/plugins/modules/system/sefcontext.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: +- Manages SELinux file context mapping definitions. +- Similar to the C(semanage fcontext) command. +options: + target: + description: + - Target path (expression). + type: str + required: yes + aliases: [ path ] + ftype: + description: + - The file type that should have SELinux contexts applied. + - "The following file type options are available:" + - C(a) for all files, + - C(b) for block devices, + - C(c) for character devices, + - C(d) for directories, + - C(f) for regular files, + - C(l) for symbolic links, + - C(p) for named pipes, + - C(s) for socket files. + type: str + choices: [ a, b, c, d, f, l, p, s ] + default: a + setype: + description: + - SELinux type for the specified target. + type: str + required: yes + seuser: + description: + - SELinux user for the specified target. + type: str + selevel: + description: + - SELinux range for the specified target. + type: str + aliases: [ serange ] + state: + description: + - Whether the SELinux file context must be C(absent) or C(present). + type: str + choices: [ absent, present ] + default: present + reload: + description: + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. + type: bool + default: yes + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + type: bool + default: no +notes: +- The changes are persistent across reboots. +- The M(sefcontext) module does not modify existing files to the new + SELinux context(s), so it is advisable to first create the SELinux + file contexts before creating files, or run C(restorecon) manually + for the existing files that require the new SELinux file contexts. +- Not applying SELinux fcontexts to existing files is a deliberate + decision as it would be unclear what reported changes would entail + to, and there's no guarantee that applying SELinux fcontext does + not pick up other unrelated prior changes. +requirements: +- libselinux-python +- policycoreutils-python +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Allow apache to modify files in /srv/git_repos + sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_git_rw_content_t + state: present + +- name: Apply new SELinux file context to filesystem + command: restorecon -irv /srv/git_repos +''' + +RETURN = r''' +# Default return values +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +# Add missing entries (backward compatible) +if HAVE_SEOBJECT: + seobject.file_types.update( + a=seobject.SEMANAGE_FCONTEXT_ALL, + b=seobject.SEMANAGE_FCONTEXT_BLOCK, + c=seobject.SEMANAGE_FCONTEXT_CHAR, + d=seobject.SEMANAGE_FCONTEXT_DIR, + f=seobject.SEMANAGE_FCONTEXT_REG, + l=seobject.SEMANAGE_FCONTEXT_LINK, + p=seobject.SEMANAGE_FCONTEXT_PIPE, + s=seobject.SEMANAGE_FCONTEXT_SOCK, + ) + +# Make backward compatible +option_to_file_type_str = dict( + a='all files', + b='block device', + c='character device', + d='directory', + f='regular file', + l='symbolic link', + p='named pipe', + s='socket', +) + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + + +def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + + +def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + target=dict(type='str', required=True, aliases=['path']), + ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()), + setype=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange']), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/selinux_permissive.py b/plugins/modules/system/selinux_permissive.py new file mode 100644 index 0000000000..6280c457a7 --- /dev/null +++ b/plugins/modules/system/selinux_permissive.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Michael Scherer +# inspired by code of github.com/dandiker/ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: selinux_permissive +short_description: Change permissive domain in SELinux policy +description: + - Add and remove a domain from the list of permissive domains. +options: + domain: + description: + - The domain that will be added or removed from the list of permissive domains. + type: str + required: true + default: '' + aliases: [ name ] + permissive: + description: + - Indicate if the domain should or should not be set as permissive. + type: bool + required: true + no_reload: + description: + - Disable reloading of the SELinux policy after making change to a domain's permissive setting. + - The default is C(no), which causes policy to be reloaded when a domain changes state. + - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6." + type: bool + default: no + store: + description: + - Name of the SELinux policy store to use. + type: str +notes: + - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). +requirements: [ policycoreutils-python ] +author: +- Michael Scherer (@mscherer) +''' + +EXAMPLES = r''' +- name: Change the httpd_t domain to permissive + selinux_permissive: + name: httpd_t + permissive: true +''' + +import traceback + +HAVE_SEOBJECT = False +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', required=True, aliases=['name']), + store=dict(type='str', default=''), + permissive=dict(type='bool', required=True), + no_reload=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + # global vars + changed = False + store = module.params['store'] + permissive = module.params['permissive'] + domain = module.params['domain'] + no_reload = module.params['no_reload'] + + if not HAVE_SEOBJECT: + module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"), + exception=SEOBJECT_IMP_ERR) + + try: + permissive_domains = seobject.permissiveRecords(store) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + + # not supported on EL 6 + if 'set_reload' in dir(permissive_domains): + permissive_domains.set_reload(not no_reload) + + try: + all_domains = permissive_domains.get_all() + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + + if permissive: + if domain not in all_domains: + if not module.check_mode: + try: + permissive_domains.add(domain) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + changed = True + else: + if domain in all_domains: + if not module.check_mode: + try: + permissive_domains.delete(domain) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + changed = True + + module.exit_json(changed=changed, store=store, + permissive=permissive, domain=domain) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/selogin.py b/plugins/modules/system/selogin.py new file mode 100644 index 0000000000..74715e6159 --- /dev/null +++ b/plugins/modules/system/selogin.py @@ -0,0 +1,259 @@ +#!/usr/bin/python + +# (c) 2017, Petr Lautrbach +# Based on seport.py module (c) 2014, Dan Keder + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: selogin +short_description: Manages linux user to SELinux user mapping +description: + - Manages linux user to SELinux user mapping +options: + login: + description: + - a Linux user + required: true + seuser: + description: + - SELinux user name + required: true + selevel: + aliases: [ serange ] + description: + - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. + default: s0 + state: + description: + - Desired mapping value. + required: true + default: present + choices: [ 'present', 'absent' ] + reload: + description: + - Reload SELinux policy after commit. + default: yes + ignore_selinux_state: + description: + - Run independent of selinux runtime state + type: bool + default: false +notes: + - The changes are persistent across reboots + - Not tested on any debian based system +requirements: [ 'libselinux', 'policycoreutils' ] +author: +- Dan Keder (@dankeder) +- Petr Lautrbach (@bachradsusi) +- James Cassell (@jamescassell) +''' + +EXAMPLES = ''' +# Modify the default user on the system to the guest_u user +- selogin: + login: __default__ + seuser: guest_u + state: present + +# Assign gijoe user on an MLS machine a range and to the staff_u user +- selogin: + login: gijoe + seuser: staff_u + serange: SystemLow-Secret + state: present + +# Assign all users in the engineering group to the staff_u user +- selogin: + login: '%engineering' + seuser: staff_u + state: present +''' + +RETURN = r''' +# Default return values +''' + + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): + """ Add linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + # for local_login in all_logins: + if login not in all_logins.keys(): + change = True + if not module.check_mode: + selogin.add(login, seuser, serange) + else: + if all_logins[login][0] != seuser or all_logins[login][1] != serange: + change = True + if not module.check_mode: + selogin.modify(login, seuser, serange) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_login_del(module, login, seuser, do_reload, sestore=''): + """ Delete linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + if login in all_logins.keys(): + change = True + if not module.check_mode: + selogin.delete(login) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + login=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange'], default='s0'), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + required_if=[ + ["state", "present", ["seuser"]] + ], + supports_check_mode=True + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + login = module.params['login'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = { + 'login': login, + 'seuser': seuser, + 'serange': serange, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange) + elif state == 'absent': + result['changed'] = semanage_login_del(module, login, seuser, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/seport.py b/plugins/modules/system/seport.py new file mode 100644 index 0000000000..c263037bd7 --- /dev/null +++ b/plugins/modules/system/seport.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Dan Keder +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: seport +short_description: Manages SELinux network port type definitions +description: + - Manages SELinux network port type definitions. +options: + ports: + description: + - Ports or port ranges. + - Can be a list (since 2.6) or comma separated string. + type: list + required: true + proto: + description: + - Protocol for the specified port. + type: str + required: true + choices: [ tcp, udp ] + setype: + description: + - SELinux type for the specified port. + type: str + required: true + state: + description: + - Desired boolean value. + type: str + choices: [ absent, present ] + default: present + reload: + description: + - Reload SELinux policy after commit. + type: bool + default: yes + ignore_selinux_state: + description: + - Run independent of selinux runtime state + type: bool + default: no +notes: + - The changes are persistent across reboots. + - Not tested on any debian based system. +requirements: +- libselinux-python +- policycoreutils-python +author: +- Dan Keder (@dankeder) +''' + +EXAMPLES = r''' +- name: Allow Apache to listen on tcp port 8888 + seport: + ports: 8888 + proto: tcp + setype: http_port_t + state: present + +- name: Allow sshd to listen on tcp port 8991 + seport: + ports: 8991 + proto: tcp + setype: ssh_port_t + state: present + +- name: Allow memcached to listen on tcp ports 10000-10100 and 10112 + seport: + ports: 10000-10100,10112 + proto: tcp + setype: memcache_port_t + state: present + +- name: Allow memcached to listen on tcp ports 10000-10100 and 10112 + seport: + ports: + - 10000-10100 + - 10112 + proto: tcp + setype: memcache_port_t + state: present +''' + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_port_get_ports(seport, setype, proto): + """ Get the list of ports that have the specified type definition. + + :param seport: Instance of seobject.portRecords + + :type setype: str + :param setype: SELinux type. + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :rtype: list + :return: List of ports that have the specified SELinux type. + """ + records = seport.get_all_by_type() + if (setype, proto) in records: + return records[(setype, proto)] + else: + return [] + + +def semanage_port_get_type(seport, port, proto): + """ Get the SELinux type of the specified port. + + :param seport: Instance of seobject.portRecords + + :type port: str + :param port: Port or port range (example: "8080", "8080-9090") + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :rtype: tuple + :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found. + """ + if isinstance(port, str): + ports = port.split('-', 1) + if len(ports) == 1: + ports.extend(ports) + else: + ports = (port, port) + + key = (int(ports[0]), int(ports[1]), proto) + + records = seport.get_all() + if key in records: + return records[key] + else: + return None + + +def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''): + """ Add SELinux port type definition to the policy. + + :type module: AnsibleModule + :param module: Ansible module + + :type ports: list + :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"]) + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :type setype: str + :param setype: SELinux type + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + seport = seobject.portRecords(sestore) + seport.set_reload(do_reload) + change = False + ports_by_type = semanage_port_get_ports(seport, setype, proto) + for port in ports: + if port not in ports_by_type: + change = True + port_type = semanage_port_get_type(seport, port, proto) + if port_type is None and not module.check_mode: + seport.add(port, proto, serange, setype) + elif port_type is not None and not module.check_mode: + seport.modify(port, proto, serange, setype) + + except (ValueError, IOError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''): + """ Delete SELinux port type definition from the policy. + + :type module: AnsibleModule + :param module: Ansible module + + :type ports: list + :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"]) + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :type setype: str + :param setype: SELinux type. + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + seport = seobject.portRecords(sestore) + seport.set_reload(do_reload) + change = False + ports_by_type = semanage_port_get_ports(seport, setype, proto) + for port in ports: + if port in ports_by_type: + change = True + if not module.check_mode: + seport.delete(port, proto) + + except (ValueError, IOError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + ports=dict(type='list', required=True), + proto=dict(type='str', required=True, choices=['tcp', 'udp']), + setype=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + ports = module.params['ports'] + proto = module.params['proto'] + setype = module.params['setype'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = { + 'ports': ports, + 'proto': proto, + 'setype': setype, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload) + elif state == 'absent': + result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/solaris_zone.py b/plugins/modules/system/solaris_zone.py new file mode 100644 index 0000000000..ac7471e694 --- /dev/null +++ b/plugins/modules/system/solaris_zone.py @@ -0,0 +1,490 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Paul Markham +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: solaris_zone +short_description: Manage Solaris zones +description: + - Create, start, stop and delete Solaris zones. + - This module does not currently allow changing of options for a zone that is already been created. +author: +- Paul Markham (@pmarkham) +requirements: + - Solaris 10 or 11 +options: + state: + description: + - C(present), configure and install the zone. + - C(installed), synonym for C(present). + - C(running), if the zone already exists, boot it, otherwise, configure and install + the zone first, then boot it. + - C(started), synonym for C(running). + - C(stopped), shutdown a zone. + - C(absent), destroy the zone. + - C(configured), configure the ready so that it's to be attached. + - C(attached), attach a zone, but do not boot it. + - C(detached), shutdown and detach a zone + type: str + choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ] + default: present + required: true + name: + description: + - Zone name. + - A zone name must be unique name. + - A zone name must begin with an alpha-numeric character. + - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.). + - The name cannot be longer than 64 characters. + type: str + required: true + path: + description: + - The path where the zone will be created. This is required when the zone is created, but not + used otherwise. + type: str + sparse: + description: + - Whether to create a sparse (C(true)) or whole root (C(false)) zone. + type: bool + default: no + root_password: + description: + - The password hash for the root account. If not specified, the zone's root account + will not have a password. + type: str + config: + description: + - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options + and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g. + "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"' + type: str + default: '' + create_options: + description: + - 'Extra options to the zonecfg(1M) create command.' + type: str + default: '' + install_options: + description: + - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, + use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"' + type: str + default: '' + attach_options: + description: + - 'Extra options to the zoneadm attach command. For example, this can be used to specify + whether a minimum or full update of packages is required and if any packages need to + be deleted. For valid values, see zoneadm(1M)' + type: str + default: '' + timeout: + description: + - Timeout, in seconds, for zone to boot. + type: int + default: 600 +''' + +EXAMPLES = ''' +- name: Create and install a zone, but don't boot it + solaris_zone: + name: zone1 + state: present + path: /zones/zone1 + sparse: True + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Create and install a zone and boot it + solaris_zone: + name: zone1 + state: running + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Boot an already installed zone + solaris_zone: + name: zone1 + state: running + +- name: Stop a zone + solaris_zone: + name: zone1 + state: stopped + +- name: Destroy a zone + solaris_zone: + name: zone1 + state: absent + +- name: Detach a zone + solaris_zone: + name: zone1 + state: detached + +- name: Configure a zone, ready to be attached + solaris_zone: + name: zone1 + state: configured + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Attach zone1 + solaris_zone: + name: zone1 + state: attached + attach_options: -u +''' + +import os +import platform +import re +import tempfile +import time + +from ansible.module_utils.basic import AnsibleModule + + +class Zone(object): + def __init__(self, module): + self.changed = False + self.msg = [] + + self.module = module + self.path = self.module.params['path'] + self.name = self.module.params['name'] + self.sparse = self.module.params['sparse'] + self.root_password = self.module.params['root_password'] + self.timeout = self.module.params['timeout'] + self.config = self.module.params['config'] + self.create_options = self.module.params['create_options'] + self.install_options = self.module.params['install_options'] + self.attach_options = self.module.params['attach_options'] + + self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True) + self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True) + self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True) + + if self.module.check_mode: + self.msg.append('Running in check mode') + + if platform.system() != 'SunOS': + self.module.fail_json(msg='This module requires Solaris') + + (self.os_major, self.os_minor) = platform.release().split('.') + if int(self.os_minor) < 10: + self.module.fail_json(msg='This module requires Solaris 10 or later') + + match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name) + if not match: + self.module.fail_json(msg="Provided zone name is not a valid zone name. " + "Please refer documentation for correct zone name specifications.") + + def configure(self): + if not self.path: + self.module.fail_json(msg='Missing required argument: path') + + if not self.module.check_mode: + t = tempfile.NamedTemporaryFile(delete=False) + + if self.sparse: + t.write('create %s\n' % self.create_options) + self.msg.append('creating sparse-root zone') + else: + t.write('create -b %s\n' % self.create_options) + self.msg.append('creating whole-root zone') + + t.write('set zonepath=%s\n' % self.path) + t.write('%s\n' % self.config) + t.close() + + cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create zone. %s' % (out + err)) + os.unlink(t.name) + + self.changed = True + self.msg.append('zone configured') + + def install(self): + if not self.module.check_mode: + cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to install zone. %s' % (out + err)) + if int(self.os_minor) == 10: + self.configure_sysid() + self.configure_password() + self.configure_ssh_keys() + self.changed = True + self.msg.append('zone installed') + + def uninstall(self): + if self.is_installed(): + if not self.module.check_mode: + cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone uninstalled') + + def configure_sysid(self): + if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path): + os.unlink('%s/root/etc/.UNCONFIGURED' % self.path) + + open('%s/root/noautoshutdown' % self.path, 'w').close() + + node = open('%s/root/etc/nodename' % self.path, 'w') + node.write(self.name) + node.close() + + id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w') + id.write('1 # System previously configured?\n') + id.write('1 # Bootparams succeeded?\n') + id.write('1 # System is on a network?\n') + id.write('1 # Extended network information gathered?\n') + id.write('0 # Autobinder succeeded?\n') + id.write('1 # Network has subnets?\n') + id.write('1 # root password prompted for?\n') + id.write('1 # locale and term prompted for?\n') + id.write('1 # security policy in place\n') + id.write('1 # NFSv4 domain configured\n') + id.write('0 # Auto Registration Configured\n') + id.write('vt100') + id.close() + + def configure_ssh_keys(self): + rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path + dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path + + if not os.path.isfile(rsa_key_file): + cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err)) + + if not os.path.isfile(dsa_key_file): + cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err)) + + def configure_password(self): + shadow = '%s/root/etc/shadow' % self.path + if self.root_password: + f = open(shadow, 'r') + lines = f.readlines() + f.close() + + for i in range(0, len(lines)): + fields = lines[i].split(':') + if fields[0] == 'root': + fields[1] = self.root_password + lines[i] = ':'.join(fields) + + f = open(shadow, 'w') + for line in lines: + f.write(line) + f.close() + + def boot(self): + if not self.module.check_mode: + cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to boot zone. %s' % (out + err)) + + """ + The boot command can return before the zone has fully booted. This is especially + true on the first boot when the zone initializes the SMF services. Unless the zone + has fully booted, subsequent tasks in the playbook may fail as services aren't running yet. + Wait until the zone's console login is running; once that's running, consider the zone booted. + """ + + elapsed = 0 + while True: + if elapsed > self.timeout: + self.module.fail_json(msg='timed out waiting for zone to boot') + rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name) + if rc == 0: + break + time.sleep(10) + elapsed += 10 + self.changed = True + self.msg.append('zone booted') + + def destroy(self): + if self.is_running(): + self.stop() + if self.is_installed(): + self.uninstall() + if not self.module.check_mode: + cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to delete zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone deleted') + + def stop(self): + if not self.module.check_mode: + cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to stop zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone stopped') + + def detach(self): + if not self.module.check_mode: + cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to detach zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone detached') + + def attach(self): + if not self.module.check_mode: + cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to attach zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone attached') + + def exists(self): + cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc == 0: + return True + else: + return False + + def is_running(self): + return self.status() == 'running' + + def is_installed(self): + return self.status() == 'installed' + + def is_configured(self): + return self.status() == 'configured' + + def status(self): + cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc == 0: + return out.split(':')[2] + else: + return 'undefined' + + def state_present(self): + if self.exists(): + self.msg.append('zone already exists') + else: + self.configure() + self.install() + + def state_running(self): + self.state_present() + if self.is_running(): + self.msg.append('zone already running') + else: + self.boot() + + def state_stopped(self): + if self.exists(): + self.stop() + else: + self.module.fail_json(msg='zone does not exist') + + def state_absent(self): + if self.exists(): + if self.is_running(): + self.stop() + self.destroy() + else: + self.msg.append('zone does not exist') + + def state_configured(self): + if self.exists(): + self.msg.append('zone already exists') + else: + self.configure() + + def state_detached(self): + if not self.exists(): + self.module.fail_json(msg='zone does not exist') + if self.is_configured(): + self.msg.append('zone already detached') + else: + self.stop() + self.detach() + + def state_attached(self): + if not self.exists(): + self.msg.append('zone does not exist') + if self.is_configured(): + self.attach() + else: + self.msg.append('zone already attached') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', + choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']), + path=dict(type='str'), + sparse=dict(type='bool', default=False), + root_password=dict(type='str', no_log=True), + timeout=dict(type='int', default=600), + config=dict(type='str', default=''), + create_options=dict(type='str', default=''), + install_options=dict(type='str', default=''), + attach_options=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + zone = Zone(module) + + state = module.params['state'] + + if state == 'running' or state == 'started': + zone.state_running() + elif state == 'present' or state == 'installed': + zone.state_present() + elif state == 'stopped': + zone.state_stopped() + elif state == 'absent': + zone.state_absent() + elif state == 'configured': + zone.state_configured() + elif state == 'detached': + zone.state_detached() + elif state == 'attached': + zone.state_attached() + else: + module.fail_json(msg='Invalid state: %s' % state) + + module.exit_json(changed=zone.changed, msg=', '.join(zone.msg)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/svc.py b/plugins/modules/system/svc.py new file mode 100644 index 0000000000..df05b8f682 --- /dev/null +++ b/plugins/modules/system/svc.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: svc +author: +- Brian Coca (@bcoca) +short_description: Manage daemontools services +description: + - Controls daemontools services on remote hosts using the svc utility. +options: + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - C(Started)/C(stopped) are idempotent actions that will not run + commands unless necessary. C(restarted) will always bounce the + svc (svc -t) and C(killed) will always bounce the svc (svc -k). + C(reloaded) will send a sigusr1 (svc -1). + C(once) will run a normally downed svc once (svc -o), not really + an idempotent operation. + type: str + choices: [ killed, once, reloaded, restarted, started, stopped ] + downed: + description: + - Should a 'down' file exist or not, if it exists it disables auto startup. + Defaults to no. Downed does not imply stopped. + type: bool + default: no + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. + Take note that a service can be enabled and downed (no auto restart). + type: bool + service_dir: + description: + - Directory svscan watches for services + type: str + default: /service + service_src: + description: + - Directory where services are defined, the source of symlinks to service_dir. + type: str + default: /etc/service +''' + +EXAMPLES = ''' +- name: Start svc dnscache, if not running + svc: + name: dnscache + state: started + +- name: Stop svc dnscache, if running + svc: + name: dnscache + state: stopped + +- name: Kill svc dnscache, in all cases + svc: + name: dnscache + state: killed + +- name: Restart svc dnscache, in all cases + svc: + name: dnscache + state: restarted + +- name: Reload svc dnscache, in all cases + svc: + name: dnscache + state: reloaded + +- name: Using alternative svc directory location + svc: + name: dnscache + state: reloaded + service_dir: /var/service +''' + +import os +import re +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +def _load_dist_subclass(cls, *args, **kwargs): + ''' + Used for derivative implementations + ''' + subclass = None + + distro = kwargs['module'].params['distro'] + + # get the most specific superclass for this platform + if distro is not None: + for sc in cls.__subclasses__(): + if sc.distro is not None and sc.distro == distro: + subclass = sc + if subclass is None: + subclass = cls + + return super(cls, subclass).__new__(subclass) + + +class Svc(object): + """ + Main class that handles daemontools, can be subclassed and overridden in case + we want to use a 'derivative' like encore, s6, etc + """ + + # def __new__(cls, *args, **kwargs): + # return _load_dist_subclass(cls, args, kwargs) + + def __init__(self, module): + self.extra_paths = ['/command', '/usr/local/bin'] + self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.downed = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) + self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([self.service_dir, self.name]) + self.src_full = '/'.join([self.service_src, self.name]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.downed = os.path.lexists('%s/down' % self.svc_full) + self.get_status() + else: + self.downed = os.path.lexists('%s/down' % self.src_full) + self.state = 'stopped' + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError as e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + try: + os.unlink(self.svc_full) + except OSError as e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e)) + self.execute_command([self.svc_cmd, '-dx', self.src_full]) + + src_log = '%s/log' % self.src_full + if os.path.exists(src_log): + self.execute_command([self.svc_cmd, '-dx', src_log]) + + def get_status(self): + (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + + m = re.search(r'\(pid (\d+)\)', out) + if m: + self.pid = m.group(1) + + m = re.search(r'(\d+) seconds', out) + if m: + self.duration = m.group(1) + + if re.search(' up ', out): + self.state = 'start' + elif re.search(' down ', out): + self.state = 'stopp' + else: + self.state = 'unknown' + return + + if re.search(' want ', out): + self.state += 'ing' + else: + self.state += 'ed' + + def start(self): + return self.execute_command([self.svc_cmd, '-u', self.svc_full]) + + def stopp(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, '-d', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, '-o', self.svc_full]) + + def reload(self): + return self.execute_command([self.svc_cmd, '-1', self.svc_full]) + + def restart(self): + return self.execute_command([self.svc_cmd, '-t', self.svc_full]) + + def kill(self): + return self.execute_command([self.svc_cmd, '-k', self.svc_full]) + + def execute_command(self, cmd): + try: + (rc, out, err) = self.module.run_command(' '.join(cmd)) + except Exception as e: + self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc()) + return (rc, out, err) + + def report(self): + self.get_status() + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states + + +# =========================================== +# Main control flow + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), + enabled=dict(type='bool'), + downed=dict(type='bool'), + service_dir=dict(type='str', default='/service'), + service_src=dict(type='str', default='/etc/service'), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + state = module.params['state'] + enabled = module.params['enabled'] + downed = module.params['downed'] + + svc = Svc(module) + changed = False + orig_state = svc.report() + + if enabled is not None and enabled != svc.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + svc.enable() + else: + svc.disable() + except (OSError, IOError) as e: + module.fail_json(msg="Could not change service link: %s" % to_native(e)) + + if state is not None and state != svc.state: + changed = True + if not module.check_mode: + getattr(svc, state[:-2])() + + if downed is not None and downed != svc.downed: + changed = True + if not module.check_mode: + d_file = "%s/down" % svc.svc_full + try: + if downed: + open(d_file, "a").close() + else: + os.unlink(d_file) + except (OSError, IOError) as e: + module.fail_json(msg="Could not change downed file: %s " % (to_native(e))) + + module.exit_json(changed=changed, svc=svc.report()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/syspatch.py b/plugins/modules/system/syspatch.py new file mode 100644 index 0000000000..2606f2297e --- /dev/null +++ b/plugins/modules/system/syspatch.py @@ -0,0 +1,180 @@ +#!/usr/bin/python + +# Copyright: (c) 2019, Andrew Klaus +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: syspatch + +short_description: Manage OpenBSD system patches + + +description: + - "Manage OpenBSD system patches using syspatch" + +options: + apply: + description: + - Apply all available system patches + default: False + required: false + revert: + description: + - Revert system patches + required: false + type: str + choices: [ all, one ] + +author: + - Andrew Klaus (@precurse) +''' + +EXAMPLES = ''' +- name: Apply all available system patches + syspatch: + apply: true + +- name: Revert last patch + syspatch: + revert: one + +- name: Revert all patches + syspatch: + revert: all + +# NOTE: You can reboot automatically if a patch requires it: +- name: Apply all patches and store result + syspatch: + apply: true + register: syspatch + +- name: Reboot if patch requires it + reboot: + when: syspatch.reboot_needed +''' + +RETURN = r''' +rc: + description: The command return code (0 means success) + returned: always + type: int +stdout: + description: syspatch standard output + returned: always + type: str + sample: "001_rip6cksum" +stderr: + description: syspatch standard error + returned: always + type: str + sample: "syspatch: need root privileges" +reboot_needed: + description: Whether or not a reboot is required after an update + returned: always + type: bool + sample: True +''' + +from ansible.module_utils.basic import AnsibleModule + + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + apply=dict(type='bool'), + revert=dict(type='str', choices=['all', 'one']) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_one_of=[['apply', 'revert']] + ) + + result = syspatch_run(module) + + module.exit_json(**result) + + +def syspatch_run(module): + cmd = module.get_bin_path('syspatch', True) + changed = False + reboot_needed = False + warnings = [] + + # Set safe defaults for run_flag and check_flag + run_flag = ['-c'] + check_flag = ['-c'] + if module.params['revert']: + check_flag = ['-l'] + + if module.params['revert'] == 'all': + run_flag = ['-R'] + else: + run_flag = ['-r'] + elif module.params['apply']: + check_flag = ['-c'] + run_flag = [] + + # Run check command + rc, out, err = module.run_command([cmd] + check_flag) + + if rc != 0: + module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err)) + + if len(out) > 0: + # Changes pending + change_pending = True + else: + # No changes pending + change_pending = False + + if module.check_mode: + changed = change_pending + elif change_pending: + rc, out, err = module.run_command([cmd] + run_flag) + + # Workaround syspatch ln bug: + # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html + if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n': + module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err)) + elif out.lower().find('create unique kernel') > 0: + # Kernel update applied + reboot_needed = True + elif out.lower().find('syspatch updated itself') > 0: + warnings.append('Syspatch was updated. Please run syspatch again.') + + # If no stdout, then warn user + if len(out) == 0: + warnings.append('syspatch had suggested changes, but stdout was empty.') + + changed = True + else: + changed = False + + return dict( + changed=changed, + reboot_needed=reboot_needed, + rc=rc, + stderr=err, + stdout=out, + warnings=warnings + ) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/timezone.py b/plugins/modules/system/timezone.py new file mode 100644 index 0000000000..9aab18debc --- /dev/null +++ b/plugins/modules/system/timezone.py @@ -0,0 +1,908 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Shinichi TAMURA (@tmshn) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: timezone +short_description: Configure timezone setting +description: + - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module. + - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. + - Several different tools are used depending on the OS/Distribution involved. + For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). + On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. + On AIX, C(chtz) is used. + - As of Ansible 2.3 support was added for SmartOS and BSDs. + - As of Ansible 2.4 support was added for macOS. + - As of Ansible 2.9 support was added for AIX 6.1+ + - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. +options: + name: + description: + - Name of the timezone for the system clock. + - Default is to keep current setting. + - B(At least one of name and hwclock are required.) + type: str + hwclock: + description: + - Whether the hardware clock is in UTC or in local timezone. + - Default is to keep current setting. + - Note that this option is recommended not to change and may fail + to configure, especially on virtual environments such as AWS. + - B(At least one of name and hwclock are required.) + - I(Only used on Linux.) + type: str + aliases: [ rtc ] + choices: [ local, UTC ] +notes: + - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone + - On AIX only Olson/tz database timezones are useable (POSIX is not supported). + - An OS reboot is also required on AIX for the new timezone setting to take effect. +author: + - Shinichi TAMURA (@tmshn) + - Jasper Lievisse Adriaanse (@jasperla) + - Indrajit Raychaudhuri (@indrajitr) +''' + +RETURN = r''' +diff: + description: The differences about the given arguments. + returned: success + type: complex + contains: + before: + description: The values before change + type: dict + after: + description: The values after change + type: dict +''' + +EXAMPLES = r''' +- name: Set timezone to Asia/Tokyo + timezone: + name: Asia/Tokyo +''' + +import errno +import os +import platform +import random +import re +import string +import filecmp + +from ansible.module_utils.basic import AnsibleModule, get_distribution +from ansible.module_utils.six import iteritems + + +class Timezone(object): + """This is a generic Timezone manipulation class that is subclassed based on platform. + + A subclass may wish to override the following action methods: + - get(key, phase) ... get the value from the system at `phase` + - set(key, value) ... set the value to the current system + """ + + def __new__(cls, module): + """Return the platform-specific subclass. + + It does not use load_platform_subclass() because it needs to judge based + on whether the `timedatectl` command exists and is available. + + Args: + module: The AnsibleModule. + """ + if platform.system() == 'Linux': + timedatectl = module.get_bin_path('timedatectl') + if timedatectl is not None: + rc, stdout, stderr = module.run_command(timedatectl) + if rc == 0: + return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) + else: + module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr) + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + else: + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + elif re.match('^joyent_.*Z', platform.version()): + # platform.system() returns SunOS, which is too broad. So look at the + # platform version instead. However we have to ensure that we're not + # running in the global zone where changing the timezone has no effect. + zonename_cmd = module.get_bin_path('zonename') + if zonename_cmd is not None: + (rc, stdout, _) = module.run_command(zonename_cmd) + if rc == 0 and stdout.strip() == 'global': + module.fail_json(msg='Adjusting timezone is not supported in Global Zone') + + return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone) + elif re.match('^Darwin', platform.platform()): + return super(Timezone, DarwinTimezone).__new__(DarwinTimezone) + elif re.match('^(Free|Net|Open)BSD', platform.platform()): + return super(Timezone, BSDTimezone).__new__(BSDTimezone) + elif platform.system() == 'AIX': + AIXoslevel = int(platform.version() + platform.release()) + if AIXoslevel >= 61: + return super(Timezone, AIXTimezone).__new__(AIXTimezone) + else: + module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel) + else: + # Not supported yet + return super(Timezone, Timezone).__new__(Timezone) + + def __init__(self, module): + """Initialize of the class. + + Args: + module: The AnsibleModule. + """ + super(Timezone, self).__init__() + self.msg = [] + # `self.value` holds the values for each params on each phases. + # Initially there's only info of "planned" phase, but the + # `self.check()` function will fill out it. + self.value = dict() + for key in module.argument_spec: + value = module.params[key] + if value is not None: + self.value[key] = dict(planned=value) + self.module = module + + def abort(self, msg): + """Abort the process with error message. + + This is just the wrapper of module.fail_json(). + + Args: + msg: The error message. + """ + error_msg = ['Error message:', msg] + if len(self.msg) > 0: + error_msg.append('Other message(s):') + error_msg.extend(self.msg) + self.module.fail_json(msg='\n'.join(error_msg)) + + def execute(self, *commands, **kwargs): + """Execute the shell command. + + This is just the wrapper of module.run_command(). + + Args: + *commands: The command to execute. + It will be concatenated with single space. + **kwargs: Only 'log' key is checked. + If kwargs['log'] is true, record the command to self.msg. + + Returns: + stdout: Standard output of the command. + """ + command = ' '.join(commands) + (rc, stdout, stderr) = self.module.run_command(command, check_rc=True) + if kwargs.get('log', False): + self.msg.append('executed `%s`' % command) + return stdout + + def diff(self, phase1='before', phase2='after'): + """Calculate the difference between given 2 phases. + + Args: + phase1, phase2: The names of phase to compare. + + Returns: + diff: The difference of value between phase1 and phase2. + This is in the format which can be used with the + `--diff` option of ansible-playbook. + """ + diff = {phase1: {}, phase2: {}} + for key, value in iteritems(self.value): + diff[phase1][key] = value[phase1] + diff[phase2][key] = value[phase2] + return diff + + def check(self, phase): + """Check the state in given phase and set it to `self.value`. + + Args: + phase: The name of the phase to check. + + Returns: + NO RETURN VALUE + """ + if phase == 'planned': + return + for key, value in iteritems(self.value): + value[phase] = self.get(key, phase) + + def change(self): + """Make the changes effect based on `self.value`.""" + for key, value in iteritems(self.value): + if value['before'] != value['planned']: + self.set(key, value['planned']) + + # =========================================== + # Platform specific methods (must be replaced by subclass). + + def get(self, key, phase): + """Get the value for the key at the given phase. + + Called from self.check(). + + Args: + key: The key to get the value + phase: The phase to get the value + + Return: + value: The value for the key at the given phase. + """ + self.abort('get(key, phase) is not implemented on target platform') + + def set(self, key, value): + """Set the value for the key (of course, for the phase 'after'). + + Called from self.change(). + + Args: + key: Key to set the value + value: Value to set + """ + self.abort('set(key, value) is not implemented on target platform') + + def _verify_timezone(self): + tz = self.value['name']['planned'] + tzfile = '/usr/share/zoneinfo/%s' % tz + if not os.path.isfile(tzfile): + self.abort('given timezone "%s" is not available' % tz) + return tzfile + + +class SystemdTimezone(Timezone): + """This is a Timezone manipulation class for systemd-powered Linux. + + It uses the `timedatectl` command to check/set all arguments. + """ + + regexps = dict( + hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE), + name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + subcmds = dict( + hwclock='set-local-rtc', + name='set-timezone' + ) + + def __init__(self, module): + super(SystemdTimezone, self).__init__(module) + self.timedatectl = module.get_bin_path('timedatectl', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_status(self, phase): + if phase not in self.status: + self.status[phase] = self.execute(self.timedatectl, 'status') + return self.status[phase] + + def get(self, key, phase): + status = self._get_status(phase) + value = self.regexps[key].search(status).group(1) + if key == 'hwclock': + # For key='hwclock'; convert yes/no -> local/UTC + if self.module.boolean(value): + value = 'local' + else: + value = 'UTC' + return value + + def set(self, key, value): + # For key='hwclock'; convert UTC/local -> yes/no + if key == 'hwclock': + if value == 'local': + value = 'yes' + else: + value = 'no' + self.execute(self.timedatectl, self.subcmds[key], value, log=True) + + +class NosystemdTimezone(Timezone): + """This is a Timezone manipulation class for non systemd-powered Linux. + + For timezone setting, it edits the following file and reflect changes: + - /etc/sysconfig/clock ... RHEL/CentOS + - /etc/timezone ... Debian/Ubuntu + For hwclock setting, it executes `hwclock --systohc` command with the + '--utc' or '--localtime' option. + """ + + conf_files = dict( + name=None, # To be set in __init__ + hwclock=None, # To be set in __init__ + adjtime='/etc/adjtime' + ) + + # It's fine if all tree config files don't exist + allow_no_file = dict( + name=True, + hwclock=True, + adjtime=True + ) + + regexps = dict( + name=None, # To be set in __init__ + hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE), + adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE) + ) + + dist_regexps = dict( + SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE), + redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE) + ) + + dist_tzline_format = dict( + SuSE='TIMEZONE="%s"\n', + redhat='ZONE="%s"\n' + ) + + def __init__(self, module): + super(NosystemdTimezone, self).__init__(module) + # Validate given timezone + if 'name' in self.value: + tzfile = self._verify_timezone() + # `--remove-destination` is needed if /etc/localtime is a symlink so + # that it overwrites it instead of following it. + self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)] + self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + # Distribution-specific configurations + if self.module.get_bin_path('dpkg-reconfigure') is not None: + # Debian/Ubuntu + if 'name' in self.value: + self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile), + '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)] + self.conf_files['name'] = '/etc/timezone' + self.conf_files['hwclock'] = '/etc/default/rcS' + self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) + self.tzline_format = '%s\n' + else: + # RHEL/CentOS/SUSE + if self.module.get_bin_path('tzdata-update') is not None: + # tzdata-update cannot update the timezone if /etc/localtime is + # a symlink so we have to use cp to update the time zone which + # was set above. + if not os.path.islink('/etc/localtime'): + self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)] + # else: + # self.update_timezone = 'cp --remove-destination ...' <- configured above + self.conf_files['name'] = '/etc/sysconfig/clock' + self.conf_files['hwclock'] = '/etc/sysconfig/clock' + try: + f = open(self.conf_files['name'], 'r') + except IOError as err: + if self._allow_ioerror(err, 'name'): + # If the config file doesn't exist detect the distribution and set regexps. + distribution = get_distribution() + if distribution == 'SuSE': + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + else: + self.abort('could not read configuration file "%s"' % self.conf_files['name']) + else: + # The key for timezone might be `ZONE` or `TIMEZONE` + # (the former is used in RHEL/CentOS and the latter is used in SUSE linux). + # So check the content of /etc/sysconfig/clock and decide which key to use. + sysconfig_clock = f.read() + f.close() + if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE): + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + + def _allow_ioerror(self, err, key): + # In some cases, even if the target file does not exist, + # simply creating it may solve the problem. + # In such cases, we should continue the configuration rather than aborting. + if err.errno != errno.ENOENT: + # If the error is not ENOENT ("No such file or directory"), + # (e.g., permission error, etc), we should abort. + return False + return self.allow_no_file.get(key, False) + + def _edit_file(self, filename, regexp, value, key): + """Replace the first matched line with given `value`. + + If `regexp` matched more than once, other than the first line will be deleted. + + Args: + filename: The name of the file to edit. + regexp: The regular expression to search with. + value: The line which will be inserted. + key: For what key the file is being editted. + """ + # Read the file + try: + file = open(filename, 'r') + except IOError as err: + if self._allow_ioerror(err, key): + lines = [] + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + lines = file.readlines() + file.close() + # Find the all matched lines + matched_indices = [] + for i, line in enumerate(lines): + if regexp.search(line): + matched_indices.append(i) + if len(matched_indices) > 0: + insert_line = matched_indices[0] + else: + insert_line = 0 + # Remove all matched lines + for i in matched_indices[::-1]: + del lines[i] + # ...and insert the value + lines.insert(insert_line, value) + # Write the changes + try: + file = open(filename, 'w') + except IOError: + self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename)) + else: + file.writelines(lines) + file.close() + self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename)) + + def _get_value_from_config(self, key, phase): + filename = self.conf_files[key] + try: + file = open(filename, mode='r') + except IOError as err: + if self._allow_ioerror(err, key): + if key == 'hwclock': + return 'n/a' + elif key == 'adjtime': + return 'UTC' + elif key == 'name': + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + status = file.read() + file.close() + try: + value = self.regexps[key].search(status).group(1) + except AttributeError: + if key == 'hwclock': + # If we cannot find UTC in the config that's fine. + return 'n/a' + elif key == 'adjtime': + # If we cannot find UTC/LOCAL in /etc/cannot that means UTC + # will be used by default. + return 'UTC' + elif key == 'name': + if phase == 'before': + # In 'before' phase UTC/LOCAL doesn't need to be set in + # the timezone config file, so we ignore this error. + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename)) + else: + if key == 'hwclock': + # convert yes/no -> UTC/local + if self.module.boolean(value): + value = 'UTC' + else: + value = 'local' + elif key == 'adjtime': + # convert LOCAL -> local + if value != 'UTC': + value = value.lower() + return value + + def get(self, key, phase): + planned = self.value[key]['planned'] + if key == 'hwclock': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the value in the config file is the same as the 'planned' + # value, we need to check /etc/adjtime. + value = self._get_value_from_config('adjtime', phase) + elif key == 'name': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the planned values is the same as the one in the config file + # we need to check if /etc/localtime is also set to the 'planned' zone. + if os.path.islink('/etc/localtime'): + # If /etc/localtime is a symlink and is not set to the TZ we 'planned' + # to set, we need to return the TZ which the symlink points to. + if os.path.exists('/etc/localtime'): + # We use readlink() because on some distros zone files are symlinks + # to other zone files, so it's hard to get which TZ is actually set + # if we follow the symlink. + path = os.readlink('/etc/localtime') + linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE) + if linktz: + valuelink = linktz.group(1) + if valuelink != planned: + value = valuelink + else: + # Set current TZ to 'n/a' if the symlink points to a path + # which isn't a zone file. + value = 'n/a' + else: + # Set current TZ to 'n/a' if the symlink to the zone file is broken. + value = 'n/a' + else: + # If /etc/localtime is not a symlink best we can do is compare it with + # the 'planned' zone info file and return 'n/a' if they are different. + try: + if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned): + return 'n/a' + except Exception: + return 'n/a' + else: + self.abort('unknown parameter "%s"' % key) + return value + + def set_timezone(self, value): + self._edit_file(filename=self.conf_files['name'], + regexp=self.regexps['name'], + value=self.tzline_format % value, + key='name') + for cmd in self.update_timezone: + self.execute(cmd) + + def set_hwclock(self, value): + if value == 'local': + option = '--localtime' + utc = 'no' + else: + option = '--utc' + utc = 'yes' + if self.conf_files['hwclock'] is not None: + self._edit_file(filename=self.conf_files['hwclock'], + regexp=self.regexps['hwclock'], + value='UTC=%s\n' % utc, + key='hwclock') + self.execute(self.update_hwclock, '--systohc', option, log=True) + + def set(self, key, value): + if key == 'name': + self.set_timezone(value) + elif key == 'hwclock': + self.set_hwclock(value) + else: + self.abort('unknown parameter "%s"' % key) + + +class SmartOSTimezone(Timezone): + """This is a Timezone manipulation class for SmartOS instances. + + It uses the C(sm-set-timezone) utility to set the timezone, and + inspects C(/etc/default/init) to determine the current timezone. + + NB: A zone needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(SmartOSTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False) + if not self.settimezone: + module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.') + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/default/init`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + try: + f = open('/etc/default/init', 'r') + for line in f: + m = re.match('^TZ=(.*)$', line.strip()) + if m: + return m.groups()[0] + except Exception: + self.module.fail_json(msg='Failed to read /etc/default/init') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through sm-set-timezone, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + cmd = 'sm-set-timezone %s' % value + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # sm-set-timezone knows no state and will always set the timezone. + # XXX: https://github.com/joyent/smtools/pull/2 + m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1]) + if not (m and m.groups()[-1] == value): + self.module.fail_json(msg='Failed to set timezone') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class DarwinTimezone(Timezone): + """This is the timezone implementation for Darwin which, unlike other *BSD + implementations, uses the `systemsetup` command on Darwin to check/set + the timezone. + """ + + regexps = dict( + name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + def __init__(self, module): + super(DarwinTimezone, self).__init__(module) + self.systemsetup = module.get_bin_path('systemsetup', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_current_timezone(self, phase): + """Lookup the current timezone via `systemsetup -gettimezone`.""" + if phase not in self.status: + self.status[phase] = self.execute(self.systemsetup, '-gettimezone') + return self.status[phase] + + def _verify_timezone(self): + tz = self.value['name']['planned'] + # Lookup the list of supported timezones via `systemsetup -listtimezones`. + # Note: Skip the first line that contains the label 'Time Zones:' + out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:] + tz_list = list(map(lambda x: x.strip(), out)) + if tz not in tz_list: + self.abort('given timezone "%s" is not available' % tz) + return tz + + def get(self, key, phase): + if key == 'name': + status = self._get_current_timezone(phase) + value = self.regexps[key].search(status).group(1) + return value + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + self.execute(self.systemsetup, '-settimezone', value, log=True) + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class BSDTimezone(Timezone): + """This is the timezone implementation for *BSD which works simply through + updating the `/etc/localtime` symlink to point to a valid timezone name under + `/usr/share/zoneinfo`. + """ + + def __init__(self, module): + super(BSDTimezone, self).__init__(module) + + def __get_timezone(self): + zoneinfo_dir = '/usr/share/zoneinfo/' + localtime_file = '/etc/localtime' + + # Strategy 1: + # If /etc/localtime does not exist, assum the timezone is UTC. + if not os.path.exists(localtime_file): + self.module.warn('Could not read /etc/localtime. Assuming UTC.') + return 'UTC' + + # Strategy 2: + # Follow symlink of /etc/localtime + zoneinfo_file = localtime_file + while not zoneinfo_file.startswith(zoneinfo_dir): + try: + zoneinfo_file = os.readlink(localtime_file) + except OSError: + # OSError means "end of symlink chain" or broken link. + break + else: + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 3: + # (If /etc/localtime is not symlinked) + # Check all files in /usr/share/zoneinfo and return first non-link match. + for dname, _, fnames in sorted(os.walk(zoneinfo_dir)): + for fname in sorted(fnames): + zoneinfo_file = os.path.join(dname, fname) + if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file): + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 4: + # As a fall-back, return 'UTC' as default assumption. + self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.') + return 'UTC' + + def get(self, key, phase): + """Lookup the current timezone by resolving `/etc/localtime`.""" + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + # First determine if the requested timezone is valid by looking in + # the zoneinfo directory. + zonefile = '/usr/share/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone' % value) + except Exception: + self.module.fail_json(msg='Failed to stat %s' % zonefile) + + # Now (somewhat) atomically update the symlink by creating a new + # symlink and move it into place. Otherwise we have to remove the + # original symlink and create the new symlink, however that would + # create a race condition in case another process tries to read + # /etc/localtime between removal and creation. + suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)]) + new_localtime = '/etc/localtime.' + suffix + + try: + os.symlink(zonefile, new_localtime) + os.rename(new_localtime, '/etc/localtime') + except Exception: + os.remove(new_localtime) + self.module.fail_json(msg='Could not update /etc/localtime') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class AIXTimezone(Timezone): + """This is a Timezone manipulation class for AIX instances. + + It uses the C(chtz) utility to set the timezone, and + inspects C(/etc/environment) to determine the current timezone. + + While AIX time zones can be set using two formats (POSIX and + Olson) the prefered method is Olson. + See the following article for more information: + https://developer.ibm.com/articles/au-aix-posix/ + + NB: AIX needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(AIXTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('chtz', required=True) + + def __get_timezone(self): + """ Return the current value of TZ= in /etc/environment """ + try: + f = open('/etc/environment', 'r') + etcenvironment = f.read() + f.close() + except Exception: + self.module.fail_json(msg='Issue reading contents of /etc/environment') + + match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE) + if match: + return match.group(1) + else: + return None + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/environment`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through chtz, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values. + # It will only return non-zero if the chtz command itself fails, it does not check for + # valid timezones. We need to perform a basic check to confirm that the timezone + # definition exists in /usr/share/lib/zoneinfo + # This does mean that we can only support Olson for now. The below commented out regex + # detects Olson date formats, so in the future we could detect Posix or Olson and + # act accordingly. + + # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE) + # if not regex_olson.match(value): + # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value + # self.module.fail_json(msg=msg) + + # First determine if the requested timezone is valid by looking in the zoneinfo + # directory. + zonefile = '/usr/share/lib/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone.' % value) + except Exception: + self.module.fail_json(msg='Failed to check %s.' % zonefile) + + # Now set the TZ using chtz + cmd = 'chtz %s' % value + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # The best condition check we can do is to check the value of TZ after making the + # change. + TZ = self.__get_timezone() + if TZ != value: + msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value) + self.module.fail_json(msg=msg) + + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +def main(): + # Construct 'module' and 'tz' + module = AnsibleModule( + argument_spec=dict( + hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']), + name=dict(type='str'), + ), + required_one_of=[ + ['hwclock', 'name'] + ], + supports_check_mode=True, + ) + tz = Timezone(module) + + # Check the current state + tz.check(phase='before') + if module.check_mode: + diff = tz.diff('before', 'planned') + # In check mode, 'planned' state is treated as 'after' state + diff['after'] = diff.pop('planned') + else: + # Make change + tz.change() + # Check the current state + tz.check(phase='after') + # Examine if the current state matches planned state + (after, planned) = tz.diff('after', 'planned').values() + if after != planned: + tz.abort('still not desired state, though changes have made - ' + 'planned: %s, after: %s' % (str(planned), str(after))) + diff = tz.diff('before', 'after') + + changed = (diff['before'] != diff['after']) + if len(tz.msg) > 0: + module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg)) + else: + module.exit_json(changed=changed, diff=diff) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/ufw.py b/plugins/modules/system/ufw.py new file mode 100644 index 0000000000..8039bf6ee0 --- /dev/null +++ b/plugins/modules/system/ufw.py @@ -0,0 +1,593 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Ahti Kitsik +# Copyright: (c) 2014, Jarno Keskikangas +# Copyright: (c) 2013, Aleksey Ovcharenko +# Copyright: (c) 2013, James Martin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: ufw +short_description: Manage firewall with UFW +description: + - Manage firewall with UFW. +author: + - Aleksey Ovcharenko (@ovcharenko) + - Jarno Keskikangas (@pyykkis) + - Ahti Kitsik (@ahtik) +notes: + - See C(man ufw) for more examples. +requirements: + - C(ufw) package +options: + state: + description: + - C(enabled) reloads firewall and enables firewall on boot. + - C(disabled) unloads firewall and disables firewall on boot. + - C(reloaded) reloads firewall. + - C(reset) disables and resets firewall to installation defaults. + type: str + choices: [ disabled, enabled, reloaded, reset ] + default: + description: + - Change the default policy for incoming or outgoing traffic. + type: str + choices: [ allow, deny, reject ] + aliases: [ policy ] + direction: + description: + - Select direction for a rule or default policy command. Mutually + exclusive with I(interface_in) and I(interface_out). + type: str + choices: [ in, incoming, out, outgoing, routed ] + logging: + description: + - Toggles logging. Logged packets use the LOG_KERN syslog facility. + type: str + choices: [ 'on', 'off', low, medium, high, full ] + insert: + description: + - Insert the corresponding rule as rule number NUM. + - Note that ufw numbers rules starting with 1. + type: int + insert_relative_to: + description: + - Allows to interpret the index in I(insert) relative to a position. + - C(zero) interprets the rule number as an absolute index (i.e. 1 is + the first rule). + - C(first-ipv4) interprets the rule number relative to the index of the + first IPv4 rule, or relative to the position where the first IPv4 rule + would be if there is currently none. + - C(last-ipv4) interprets the rule number relative to the index of the + last IPv4 rule, or relative to the position where the last IPv4 rule + would be if there is currently none. + - C(first-ipv6) interprets the rule number relative to the index of the + first IPv6 rule, or relative to the position where the first IPv6 rule + would be if there is currently none. + - C(last-ipv6) interprets the rule number relative to the index of the + last IPv6 rule, or relative to the position where the last IPv6 rule + would be if there is currently none. + type: str + choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ] + default: zero + rule: + description: + - Add firewall rule + type: str + choices: [ allow, deny, limit, reject ] + log: + description: + - Log new connections matched to this rule + type: bool + from_ip: + description: + - Source IP address. + type: str + default: any + aliases: [ from, src ] + from_port: + description: + - Source port. + type: str + to_ip: + description: + - Destination IP address. + type: str + default: any + aliases: [ dest, to] + to_port: + description: + - Destination port. + type: str + aliases: [ port ] + proto: + description: + - TCP/IP protocol. + type: str + choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ] + aliases: [ protocol ] + name: + description: + - Use profile located in C(/etc/ufw/applications.d). + type: str + aliases: [ app ] + delete: + description: + - Delete rule. + type: bool + interface: + description: + - Specify interface for the rule. The direction (in or out) used + for the interface depends on the value of I(direction). See + I(interface_in) and I(interface_out) for routed rules that needs + to supply both an input and output interface. Mutually + exclusive with I(interface_in) and I(interface_out). + type: str + aliases: [ if ] + interface_in: + description: + - Specify input interface for the rule. This is mutually + exclusive with I(direction) and I(interface). However, it is + compatible with I(interface_out) for routed rules. + type: str + aliases: [ if_in ] + interface_out: + description: + - Specify output interface for the rule. This is mutually + exclusive with I(direction) and I(interface). However, it is + compatible with I(interface_in) for routed rules. + type: str + aliases: [ if_out ] + route: + description: + - Apply the rule to routed/forwarded packets. + type: bool + comment: + description: + - Add a comment to the rule. Requires UFW version >=0.35. + type: str +''' + +EXAMPLES = r''' +- name: Allow everything and enable UFW + ufw: + state: enabled + policy: allow + +- name: Set logging + ufw: + logging: 'on' + +# Sometimes it is desirable to let the sender know when traffic is +# being denied, rather than simply ignoring it. In these cases, use +# reject instead of deny. In addition, log rejected connections: +- ufw: + rule: reject + port: auth + log: yes + +# ufw supports connection rate limiting, which is useful for protecting +# against brute-force login attacks. ufw will deny connections if an IP +# address has attempted to initiate 6 or more connections in the last +# 30 seconds. See http://www.debian-administration.org/articles/187 +# for details. Typical usage is: +- ufw: + rule: limit + port: ssh + proto: tcp + +# Allow OpenSSH. (Note that as ufw manages its own state, simply removing +# a rule=allow task can leave those ports exposed. Either use delete=yes +# or a separate state=reset task) +- ufw: + rule: allow + name: OpenSSH + +- name: Delete OpenSSH rule + ufw: + rule: allow + name: OpenSSH + delete: yes + +- name: Deny all access to port 53 + ufw: + rule: deny + port: '53' + +- name: Allow port range 60000-61000 + ufw: + rule: allow + port: 60000:61000 + proto: tcp + +- name: Allow all access to tcp port 80 + ufw: + rule: allow + port: '80' + proto: tcp + +- name: Allow all access from RFC1918 networks to this host + ufw: + rule: allow + src: '{{ item }}' + loop: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + +- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment + ufw: + rule: deny + proto: udp + src: 1.2.3.4 + port: '514' + comment: Block syslog + +- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469 + ufw: + rule: allow + interface: eth0 + direction: in + proto: udp + src: 1.2.3.5 + from_port: '5469' + dest: 1.2.3.4 + to_port: '5469' + +# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. +- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host + ufw: + rule: deny + proto: tcp + src: 2001:db8::/32 + port: '25' + +- name: Deny all IPv6 traffic to tcp port 20 on this host + # this should be the first IPv6 rule + ufw: + rule: deny + proto: tcp + port: '20' + to_ip: "::" + insert: 0 + insert_relative_to: first-ipv6 + +- name: Deny all IPv4 traffic to tcp port 20 on this host + # This should be the third to last IPv4 rule + # (insert: -1 addresses the second to last IPv4 rule; + # so the new rule will be inserted before the second + # to last IPv4 rule, and will be come the third to last + # IPv4 rule.) + ufw: + rule: deny + proto: tcp + port: '20' + to_ip: "::" + insert: -1 + insert_relative_to: last-ipv4 + +# Can be used to further restrict a global FORWARD policy set to allow +- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24 + ufw: + rule: deny + route: yes + src: 1.2.3.0/24 + dest: 4.5.6.0/24 +''' + +import re + +from operator import itemgetter + +from ansible.module_utils.basic import AnsibleModule + + +def compile_ipv4_regexp(): + r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}" + r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])" + return re.compile(r) + + +def compile_ipv6_regexp(): + """ + validation pattern provided by : + https://stackoverflow.com/questions/53497/regular-expression-that-matches- + valid-ipv6-addresses#answer-17871737 + """ + r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:" + r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}" + r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})" + r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]" + r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]" + r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})" + r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]" + r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" + r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" + r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))" + return re.compile(r) + + +def main(): + command_keys = ['state', 'default', 'rule', 'logging'] + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']), + default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']), + logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']), + direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']), + delete=dict(type='bool', default=False), + route=dict(type='bool', default=False), + insert=dict(type='int'), + insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'), + rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']), + interface=dict(type='str', aliases=['if']), + interface_in=dict(type='str', aliases=['if_in']), + interface_out=dict(type='str', aliases=['if_out']), + log=dict(type='bool', default=False), + from_ip=dict(type='str', default='any', aliases=['from', 'src']), + from_port=dict(type='str'), + to_ip=dict(type='str', default='any', aliases=['dest', 'to']), + to_port=dict(type='str', aliases=['port']), + proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']), + name=dict(type='str', aliases=['app']), + comment=dict(type='str'), + ), + supports_check_mode=True, + mutually_exclusive=[ + ['name', 'proto', 'logging'], + # Mutual exclusivity with `interface` implied by `required_by`. + ['direction', 'interface_in'], + ['direction', 'interface_out'], + ], + required_one_of=([command_keys]), + required_by=dict( + interface=('direction', ), + ), + ) + + cmds = [] + + ipv4_regexp = compile_ipv4_regexp() + ipv6_regexp = compile_ipv6_regexp() + + def filter_line_that_not_start_with(pattern, content): + return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)]) + + def filter_line_that_contains(pattern, content): + return [line for line in content.splitlines(True) if pattern in line] + + def filter_line_that_not_contains(pattern, content): + return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)]) + + def filter_line_that_match_func(match_func, content): + return ''.join([line for line in content.splitlines(True) if match_func(line) is not None]) + + def filter_line_that_contains_ipv4(content): + return filter_line_that_match_func(ipv4_regexp.search, content) + + def filter_line_that_contains_ipv6(content): + return filter_line_that_match_func(ipv6_regexp.search, content) + + def is_starting_by_ipv4(ip): + return ipv4_regexp.match(ip) is not None + + def is_starting_by_ipv6(ip): + return ipv6_regexp.match(ip) is not None + + def execute(cmd, ignore_error=False): + cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) + + cmds.append(cmd) + (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"}) + + if rc != 0 and not ignore_error: + module.fail_json(msg=err or out, commands=cmds) + + return out + + def get_current_rules(): + user_rules_files = ["/lib/ufw/user.rules", + "/lib/ufw/user6.rules", + "/etc/ufw/user.rules", + "/etc/ufw/user6.rules", + "/var/lib/ufw/user.rules", + "/var/lib/ufw/user6.rules"] + + cmd = [[grep_bin], ["-h"], ["'^### tuple'"]] + + cmd.extend([[f] for f in user_rules_files]) + return execute(cmd, ignore_error=True) + + def ufw_version(): + """ + Returns the major and minor version of ufw installed on the system. + """ + out = execute([[ufw_bin], ["--version"]]) + + lines = [x for x in out.split('\n') if x.strip() != ''] + if len(lines) == 0: + module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) + + matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0]) + if matches is None: + module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) + + # Convert version to numbers + major = int(matches.group(1)) + minor = int(matches.group(2)) + rev = 0 + if matches.group(3) is not None: + rev = int(matches.group(3)) + + return major, minor, rev + + params = module.params + + commands = dict((key, params[key]) for key in command_keys if params[key]) + + # Ensure ufw is available + ufw_bin = module.get_bin_path('ufw', True) + grep_bin = module.get_bin_path('grep', True) + + # Save the pre state and rules in order to recognize changes + pre_state = execute([[ufw_bin], ['status verbose']]) + pre_rules = get_current_rules() + + changed = False + + # Execute filter + for (command, value) in commands.items(): + + cmd = [[ufw_bin], [module.check_mode, '--dry-run']] + + if command == 'state': + states = {'enabled': 'enable', 'disabled': 'disable', + 'reloaded': 'reload', 'reset': 'reset'} + + if value in ['reloaded', 'reset']: + changed = True + + if module.check_mode: + # "active" would also match "inactive", hence the space + ufw_enabled = pre_state.find(" active") != -1 + if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled): + changed = True + else: + execute(cmd + [['-f'], [states[value]]]) + + elif command == 'logging': + extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state) + if extract: + current_level = extract.group(2) + current_on_off_value = extract.group(1) + if value != "off": + if current_on_off_value == "off": + changed = True + elif value != "on" and value != current_level: + changed = True + elif current_on_off_value != "off": + changed = True + else: + changed = True + + if not module.check_mode: + execute(cmd + [[command], [value]]) + + elif command == 'default': + if params['direction'] not in ['outgoing', 'incoming', 'routed', None]: + module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.') + if module.check_mode: + regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)' + extract = re.search(regexp, pre_state) + if extract is not None: + current_default_values = {} + current_default_values["incoming"] = extract.group(1) + current_default_values["outgoing"] = extract.group(2) + current_default_values["routed"] = extract.group(3) + v = current_default_values[params['direction'] or 'incoming'] + if v not in (value, 'disabled'): + changed = True + else: + changed = True + else: + execute(cmd + [[command], [value], [params['direction']]]) + + elif command == 'rule': + if params['direction'] not in ['in', 'out', None]: + module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.') + if not params['route'] and params['interface_in'] and params['interface_out']: + module.fail_json(msg='Only route rules can combine ' + 'interface_in and interface_out') + # Rules are constructed according to the long format + # + # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ + # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ + # [proto protocol] [app application] [comment COMMENT] + cmd.append([module.boolean(params['route']), 'route']) + cmd.append([module.boolean(params['delete']), 'delete']) + if params['insert'] is not None: + relative_to_cmd = params['insert_relative_to'] + if relative_to_cmd == 'zero': + insert_to = params['insert'] + else: + (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered']) + numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ') + lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()] + lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher] + last_number = max([no for (no, ipv6) in lines]) if lines else 0 + has_ipv4 = any([not ipv6 for (no, ipv6) in lines]) + has_ipv6 = any([ipv6 for (no, ipv6) in lines]) + if relative_to_cmd == 'first-ipv4': + relative_to = 1 + elif relative_to_cmd == 'last-ipv4': + relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1 + elif relative_to_cmd == 'first-ipv6': + relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1 + elif relative_to_cmd == 'last-ipv6': + relative_to = last_number if has_ipv6 else last_number + 1 + insert_to = params['insert'] + relative_to + if insert_to > last_number: + # ufw does not like it when the insert number is larger than the + # maximal rule number for IPv4/IPv6. + insert_to = None + cmd.append([insert_to is not None, "insert %s" % insert_to]) + cmd.append([value]) + cmd.append([params['direction'], "%s" % params['direction']]) + cmd.append([params['interface'], "on %s" % params['interface']]) + cmd.append([params['interface_in'], "in on %s" % params['interface_in']]) + cmd.append([params['interface_out'], "out on %s" % params['interface_out']]) + cmd.append([module.boolean(params['log']), 'log']) + + for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"), + ('to_ip', "to %s"), ('to_port', "port %s"), + ('proto', "proto %s"), ('name', "app '%s'")]: + value = params[key] + cmd.append([value, template % (value)]) + + ufw_major, ufw_minor, dummy = ufw_version() + # comment is supported only in ufw version after 0.35 + if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0: + cmd.append([params['comment'], "comment '%s'" % params['comment']]) + + rules_dry = execute(cmd) + + if module.check_mode: + + nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry)) + + if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))): + + rules_dry = filter_line_that_not_start_with("### tuple", rules_dry) + # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules + if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']): + if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry): + changed = True + elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']): + if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry): + changed = True + elif pre_rules != rules_dry: + changed = True + + # Get the new state + if module.check_mode: + return module.exit_json(changed=changed, commands=cmds) + else: + post_state = execute([[ufw_bin], ['status'], ['verbose']]) + if not changed: + post_rules = get_current_rules() + changed = (pre_state != post_state) or (pre_rules != post_rules) + return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/vdo.py b/plugins/modules/system/vdo.py new file mode 100644 index 0000000000..875b18526b --- /dev/null +++ b/plugins/modules/system/vdo.py @@ -0,0 +1,872 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +author: + - Bryan Gurney (@bgurney-rh) + +module: vdo + +short_description: Module to control VDO + + +description: + - This module controls the VDO dedupe and compression device. + - VDO, or Virtual Data Optimizer, is a device-mapper target that + provides inline block-level deduplication, compression, and + thin provisioning capabilities to primary storage. + +options: + name: + description: + - The name of the VDO volume. + type: str + required: true + state: + description: + - Whether this VDO volume should be "present" or "absent". + If a "present" VDO volume does not exist, it will be + created. If a "present" VDO volume already exists, it + will be modified, by updating the configuration, which + will take effect when the VDO volume is restarted. + Not all parameters of an existing VDO volume can be + modified; the "statusparamkeys" list contains the + parameters that can be modified after creation. If an + "absent" VDO volume does not exist, it will not be + removed. + type: str + required: true + choices: [ absent, present ] + default: present + activated: + description: + - The "activate" status for a VDO volume. If this is set + to "no", the VDO volume cannot be started, and it will + not start on system startup. However, on initial + creation, a VDO volume with "activated" set to "off" + will be running, until stopped. This is the default + behavior of the "vdo create" command; it provides the + user an opportunity to write a base amount of metadata + (filesystem, LVM headers, etc.) to the VDO volume prior + to stopping the volume, and leaving it deactivated + until ready to use. + type: bool + running: + description: + - Whether this VDO volume is running. + - A VDO volume must be activated in order to be started. + type: bool + device: + description: + - The full path of the device to use for VDO storage. + - This is required if "state" is "present". + type: str + logicalsize: + description: + - The logical size of the VDO volume (in megabytes, or + LVM suffix format). If not specified for a new volume, + this defaults to the same size as the underlying storage + device, which is specified in the 'device' parameter. + Existing volumes will maintain their size if the + logicalsize parameter is not specified, or is smaller + than or identical to the current size. If the specified + size is larger than the current size, a growlogical + operation will be performed. + type: str + deduplication: + description: + - Configures whether deduplication is enabled. The + default for a created volume is 'enabled'. Existing + volumes will maintain their previously configured + setting unless a different value is specified in the + playbook. + type: str + choices: [ disabled, enabled ] + compression: + description: + - Configures whether compression is enabled. The default + for a created volume is 'enabled'. Existing volumes + will maintain their previously configured setting unless + a different value is specified in the playbook. + type: str + choices: [ disabled, enabled ] + blockmapcachesize: + description: + - The amount of memory allocated for caching block map + pages, in megabytes (or may be issued with an LVM-style + suffix of K, M, G, or T). The default (and minimum) + value is 128M. The value specifies the size of the + cache; there is a 15% memory usage overhead. Each 1.25G + of block map covers 1T of logical blocks, therefore a + small amount of block map cache memory can cache a + significantly large amount of block map data. Existing + volumes will maintain their previously configured + setting unless a different value is specified in the + playbook. + type: str + readcache: + description: + - Enables or disables the read cache. The default is + 'disabled'. Choosing 'enabled' enables a read cache + which may improve performance for workloads of high + deduplication, read workloads with a high level of + compression, or on hard disk storage. Existing + volumes will maintain their previously configured + setting unless a different value is specified in the + playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + choices: [ disabled, enabled ] + readcachesize: + description: + - Specifies the extra VDO device read cache size in + megabytes. This is in addition to a system-defined + minimum. Using a value with a suffix of K, M, G, or T + is optional. The default value is 0. 1.125 MB of + memory per bio thread will be used per 1 MB of read + cache specified (for example, a VDO volume configured + with 4 bio threads will have a read cache memory usage + overhead of 4.5 MB per 1 MB of read cache specified). + Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + emulate512: + description: + - Enables 512-byte emulation mode, allowing drivers or + filesystems to access the VDO volume at 512-byte + granularity, instead of the default 4096-byte granularity. + Default is 'disabled'; only recommended when a driver + or filesystem requires 512-byte sector level access to + a device. This option is only available when creating + a new volume, and cannot be changed for an existing + volume. + type: bool + growphysical: + description: + - Specifies whether to attempt to execute a growphysical + operation, if there is enough unused space on the + device. A growphysical operation will be executed if + there is at least 64 GB of free space, relative to the + previous physical size of the affected VDO volume. + type: bool + default: false + slabsize: + description: + - The size of the increment by which the physical size of + a VDO volume is grown, in megabytes (or may be issued + with an LVM-style suffix of K, M, G, or T). Must be a + power of two between 128M and 32G. The default is 2G, + which supports volumes having a physical size up to 16T. + The maximum, 32G, supports a physical size of up to 256T. + This option is only available when creating a new + volume, and cannot be changed for an existing volume. + type: str + writepolicy: + description: + - Specifies the write policy of the VDO volume. The + 'sync' mode acknowledges writes only after data is on + stable storage. The 'async' mode acknowledges writes + when data has been cached for writing to stable + storage. The default (and highly recommended) 'auto' + mode checks the storage device to determine whether it + supports flushes. Devices that support flushes will + result in a VDO volume in 'async' mode, while devices + that do not support flushes will run in sync mode. + Existing volumes will maintain their previously + configured setting unless a different value is + specified in the playbook. + type: str + choices: [ async, auto, sync ] + indexmem: + description: + - Specifies the amount of index memory in gigabytes. The + default is 0.25. The special decimal values 0.25, 0.5, + and 0.75 can be used, as can any positive integer. + This option is only available when creating a new + volume, and cannot be changed for an existing volume. + type: str + indexmode: + description: + - Specifies the index mode of the Albireo index. The + default is 'dense', which has a deduplication window of + 1 GB of index memory per 1 TB of incoming data, + requiring 10 GB of index data on persistent storage. + The 'sparse' mode has a deduplication window of 1 GB of + index memory per 10 TB of incoming data, but requires + 100 GB of index data on persistent storage. This option + is only available when creating a new volume, and cannot + be changed for an existing volume. + type: str + choices: [ dense, sparse ] + ackthreads: + description: + - Specifies the number of threads to use for + acknowledging completion of requested VDO I/O operations. + Valid values are integer values from 1 to 100 (lower + numbers are preferable due to overhead). The default is + 1. Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str + biothreads: + description: + - Specifies the number of threads to use for submitting I/O + operations to the storage device. Valid values are + integer values from 1 to 100 (lower numbers are + preferable due to overhead). The default is 4. + Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str + cputhreads: + description: + - Specifies the number of threads to use for CPU-intensive + work such as hashing or compression. Valid values are + integer values from 1 to 100 (lower numbers are + preferable due to overhead). The default is 2. + Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str + logicalthreads: + description: + - Specifies the number of threads across which to + subdivide parts of the VDO processing based on logical + block addresses. Valid values are integer values from + 1 to 100 (lower numbers are preferable due to overhead). + The default is 1. Existing volumes will maintain their + previously configured setting unless a different value + is specified in the playbook. + type: str + physicalthreads: + description: + - Specifies the number of threads across which to + subdivide parts of the VDO processing based on physical + block addresses. Valid values are integer values from + 1 to 16 (lower numbers are preferable due to overhead). + The physical space used by the VDO volume must be + larger than (slabsize * physicalthreads). The default + is 1. Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str +notes: + - In general, the default thread configuration should be used. +requirements: + - PyYAML + - kmod-kvdo + - vdo +''' + +EXAMPLES = r''' +- name: Create 2 TB VDO volume vdo1 on device /dev/md0 + vdo: + name: vdo1 + state: present + device: /dev/md0 + logicalsize: 2T + +- name: Remove VDO volume vdo1 + vdo: + name: vdo1 + state: absent +''' + +RETURN = r'''# ''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import re +import traceback + +YAML_IMP_ERR = None +try: + import yaml + HAS_YAML = True +except ImportError: + YAML_IMP_ERR = traceback.format_exc() + HAS_YAML = False + + +# Generate a list of VDO volumes, whether they are running or stopped. +# +# @param module The AnsibleModule object. +# @param vdocmd The path of the 'vdo' command. +# +# @return vdolist A list of currently created VDO volumes. +def inventory_vdos(module, vdocmd): + rc, vdostatusout, err = module.run_command("%s status" % (vdocmd)) + + # if rc != 0: + # module.fail_json(msg="Inventorying VDOs failed: %s" + # % vdostatusout, rc=rc, err=err) + + vdolist = [] + + if (rc == 2 and + re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)): + # If there is no /etc/vdoconf.yml file, assume there are no + # VDO volumes. Return an empty list of VDO volumes. + return vdolist + + if rc != 0: + module.fail_json(msg="Inventorying VDOs failed: %s" + % vdostatusout, rc=rc, err=err) + + vdostatusyaml = yaml.load(vdostatusout) + if vdostatusyaml is None: + return vdolist + + vdoyamls = vdostatusyaml['VDOs'] + + if vdoyamls is not None: + vdolist = vdoyamls.keys() + + return vdolist + + +def list_running_vdos(module, vdocmd): + rc, vdolistout, err = module.run_command("%s list" % (vdocmd)) + runningvdolist = filter(None, vdolistout.split('\n')) + return runningvdolist + + +# Generate a string containing options to pass to the 'VDO' command. +# Note that a 'create' operation will pass more options than a +# 'modify' operation. +# +# @param params A dictionary of parameters, and their values +# (values of 'None' and/or nonexistent values are ignored). +# +# @return vdocmdoptions A string to be used in a 'vdo ' command. +def start_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname)) + if rc == 0: + module.log("started VDO volume %s" % vdoname) + + return rc + + +def stop_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname)) + if rc == 0: + module.log("stopped VDO volume %s" % vdoname) + + return rc + + +def activate_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command("%s activate --name=%s" + % (vdocmd, vdoname)) + if rc == 0: + module.log("activated VDO volume %s" % vdoname) + + return rc + + +def deactivate_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command("%s deactivate --name=%s" + % (vdocmd, vdoname)) + if rc == 0: + module.log("deactivated VDO volume %s" % vdoname) + + return rc + + +def add_vdooptions(params): + vdocmdoptions = "" + options = [] + + if ('logicalsize' in params) and (params['logicalsize'] is not None): + options.append("--vdoLogicalSize=" + params['logicalsize']) + + if (('blockmapcachesize' in params) and + (params['blockmapcachesize'] is not None)): + options.append("--blockMapCacheSize=" + params['blockmapcachesize']) + + if ('readcache' in params) and (params['readcache'] == 'enabled'): + options.append("--readCache=enabled") + + if ('readcachesize' in params) and (params['readcachesize'] is not None): + options.append("--readCacheSize=" + params['readcachesize']) + + if ('slabsize' in params) and (params['slabsize'] is not None): + options.append("--vdoSlabSize=" + params['slabsize']) + + if ('emulate512' in params) and (params['emulate512']): + options.append("--emulate512=enabled") + + if ('indexmem' in params) and (params['indexmem'] is not None): + options.append("--indexMem=" + params['indexmem']) + + if ('indexmode' in params) and (params['indexmode'] == 'sparse'): + options.append("--sparseIndex=enabled") + + # Entering an invalid thread config results in a cryptic + # 'Could not set up device mapper for %s' error from the 'vdo' + # command execution. The dmsetup module on the system will + # output a more helpful message, but one would have to log + # onto that system to read the error. For now, heed the thread + # limit warnings in the DOCUMENTATION section above. + if ('ackthreads' in params) and (params['ackthreads'] is not None): + options.append("--vdoAckThreads=" + params['ackthreads']) + + if ('biothreads' in params) and (params['biothreads'] is not None): + options.append("--vdoBioThreads=" + params['biothreads']) + + if ('cputhreads' in params) and (params['cputhreads'] is not None): + options.append("--vdoCpuThreads=" + params['cputhreads']) + + if ('logicalthreads' in params) and (params['logicalthreads'] is not None): + options.append("--vdoLogicalThreads=" + params['logicalthreads']) + + if (('physicalthreads' in params) and + (params['physicalthreads'] is not None)): + options.append("--vdoPhysicalThreads=" + params['physicalthreads']) + + vdocmdoptions = ' '.join(options) + return vdocmdoptions + + +def run_module(): + + # Define the available arguments/parameters that a user can pass to + # the module. + # Defaults for VDO parameters are None, in order to facilitate + # the detection of parameters passed from the playbook. + # Creation param defaults are determined by the creation section. + + module_args = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + activated=dict(type='bool'), + running=dict(type='bool'), + growphysical=dict(type='bool', default=False), + device=dict(type='str'), + logicalsize=dict(type='str'), + deduplication=dict(type='str', choices=['disabled', 'enabled']), + compression=dict(type='str', choices=['disabled', 'enabled']), + blockmapcachesize=dict(type='str'), + readcache=dict(type='str', choices=['disabled', 'enabled']), + readcachesize=dict(type='str'), + emulate512=dict(type='bool', default=False), + slabsize=dict(type='str'), + writepolicy=dict(type='str', choices=['async', 'auto', 'sync']), + indexmem=dict(type='str'), + indexmode=dict(type='str', choices=['dense', 'sparse']), + ackthreads=dict(type='str'), + biothreads=dict(type='str'), + cputhreads=dict(type='str'), + logicalthreads=dict(type='str'), + physicalthreads=dict(type='str') + ) + + # Seed the result dictionary in the object. There will be an + # 'invocation' dictionary added with 'module_args' (arguments + # given). + result = dict( + changed=False, + ) + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False, + ) + + if not HAS_YAML: + module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR) + + vdocmd = module.get_bin_path("vdo", required=True) + if not vdocmd: + module.fail_json(msg='VDO is not installed.', **result) + + # Print a pre-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + + runningvdolist = list_running_vdos(module, vdocmd) + + # Collect the name of the desired VDO volume, and its state. These will + # determine what to do. + desiredvdo = module.params['name'] + state = module.params['state'] + + # Create a desired VDO volume that doesn't exist yet. + if (desiredvdo not in vdolist) and (state == 'present'): + device = module.params['device'] + if device is None: + module.fail_json(msg="Creating a VDO volume requires specifying " + "a 'device' in the playbook.") + + # Create a dictionary of the options from the AnsibleModule + # parameters, compile the vdo command options, and run "vdo create" + # with those options. + # Since this is a creation of a new VDO volume, it will contain all + # all of the parameters given by the playbook; the rest will + # assume default values. + options = module.params + vdocmdoptions = add_vdooptions(options) + rc, out, err = module.run_command("%s create --name=%s --device=%s %s" + % (vdocmd, desiredvdo, device, + vdocmdoptions)) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Creating VDO %s failed." + % desiredvdo, rc=rc, err=err) + + if (module.params['compression'] == 'disabled'): + rc, out, err = module.run_command("%s disableCompression --name=%s" + % (vdocmd, desiredvdo)) + + if ((module.params['deduplication'] is not None) and + module.params['deduplication'] == 'disabled'): + rc, out, err = module.run_command("%s disableDeduplication " + "--name=%s" + % (vdocmd, desiredvdo)) + + if module.params['activated'] == 'no': + deactivate_vdo(module, desiredvdo, vdocmd) + + if module.params['running'] == 'no': + stop_vdo(module, desiredvdo, vdocmd) + + # Print a post-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + module.log("created VDO volume %s" % desiredvdo) + module.exit_json(**result) + + # Modify the current parameters of a VDO that exists. + if (desiredvdo in vdolist) and (state == 'present'): + rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd)) + vdostatusyaml = yaml.load(vdostatusoutput) + + # An empty dictionary to contain dictionaries of VDO statistics + processedvdos = {} + + vdoyamls = vdostatusyaml['VDOs'] + if vdoyamls is not None: + processedvdos = vdoyamls + + # The 'vdo status' keys that are currently modifiable. + statusparamkeys = ['Acknowledgement threads', + 'Bio submission threads', + 'Block map cache size', + 'CPU-work threads', + 'Logical threads', + 'Physical threads', + 'Read cache', + 'Read cache size', + 'Configured write policy', + 'Compression', + 'Deduplication'] + + # A key translation table from 'vdo status' output to Ansible + # module parameters. This covers all of the 'vdo status' + # parameter keys that could be modified with the 'vdo' + # command. + vdokeytrans = { + 'Logical size': 'logicalsize', + 'Compression': 'compression', + 'Deduplication': 'deduplication', + 'Block map cache size': 'blockmapcachesize', + 'Read cache': 'readcache', + 'Read cache size': 'readcachesize', + 'Configured write policy': 'writepolicy', + 'Acknowledgement threads': 'ackthreads', + 'Bio submission threads': 'biothreads', + 'CPU-work threads': 'cputhreads', + 'Logical threads': 'logicalthreads', + 'Physical threads': 'physicalthreads' + } + + # Build a dictionary of the current VDO status parameters, with + # the keys used by VDO. (These keys will be converted later.) + currentvdoparams = {} + + # Build a "lookup table" dictionary containing a translation table + # of the parameters that can be modified + modtrans = {} + + for statfield in statusparamkeys: + if statfield in processedvdos[desiredvdo]: + currentvdoparams[statfield] = processedvdos[desiredvdo][statfield] + + modtrans[statfield] = vdokeytrans[statfield] + + # Build a dictionary of current parameters formatted with the + # same keys as the AnsibleModule parameters. + currentparams = {} + for paramkey in modtrans.keys(): + currentparams[modtrans[paramkey]] = modtrans[paramkey] + + diffparams = {} + + # Check for differences between the playbook parameters and the + # current parameters. This will need a comparison function; + # since AnsibleModule params are all strings, compare them as + # strings (but if it's None; skip). + for key in currentparams.keys(): + if module.params[key] is not None: + if str(currentparams[key]) != module.params[key]: + diffparams[key] = module.params[key] + + if diffparams: + vdocmdoptions = add_vdooptions(diffparams) + if vdocmdoptions: + rc, out, err = module.run_command("%s modify --name=%s %s" + % (vdocmd, + desiredvdo, + vdocmdoptions)) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Modifying VDO %s failed." + % desiredvdo, rc=rc, err=err) + + if 'deduplication' in diffparams.keys(): + dedupemod = diffparams['deduplication'] + if dedupemod == 'disabled': + rc, out, err = module.run_command("%s " + "disableDeduplication " + "--name=%s" + % (vdocmd, desiredvdo)) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing deduplication on " + "VDO volume %s failed." + % desiredvdo, rc=rc, err=err) + + if dedupemod == 'enabled': + rc, out, err = module.run_command("%s " + "enableDeduplication " + "--name=%s" + % (vdocmd, desiredvdo)) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing deduplication on " + "VDO volume %s failed." + % desiredvdo, rc=rc, err=err) + + if 'compression' in diffparams.keys(): + compressmod = diffparams['compression'] + if compressmod == 'disabled': + rc, out, err = module.run_command("%s disableCompression " + "--name=%s" + % (vdocmd, desiredvdo)) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing compression on " + "VDO volume %s failed." + % desiredvdo, rc=rc, err=err) + + if compressmod == 'enabled': + rc, out, err = module.run_command("%s enableCompression " + "--name=%s" + % (vdocmd, desiredvdo)) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing compression on " + "VDO volume %s failed." + % desiredvdo, rc=rc, err=err) + + if 'writepolicy' in diffparams.keys(): + writepolmod = diffparams['writepolicy'] + if writepolmod == 'auto': + rc, out, err = module.run_command("%s " + "changeWritePolicy " + "--name=%s " + "--writePolicy=%s" + % (vdocmd, + desiredvdo, + writepolmod)) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing write policy on " + "VDO volume %s failed." + % desiredvdo, rc=rc, err=err) + + if writepolmod == 'sync': + rc, out, err = module.run_command("%s " + "changeWritePolicy " + "--name=%s " + "--writePolicy=%s" + % (vdocmd, + desiredvdo, + writepolmod)) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing write policy on " + "VDO volume %s failed." + % desiredvdo, rc=rc, err=err) + + if writepolmod == 'async': + rc, out, err = module.run_command("%s " + "changeWritePolicy " + "--name=%s " + "--writePolicy=%s" + % (vdocmd, + desiredvdo, + writepolmod)) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing write policy on " + "VDO volume %s failed." + % desiredvdo, rc=rc, err=err) + + # Process the size parameters, to determine of a growPhysical or + # growLogical operation needs to occur. + sizeparamkeys = ['Logical size', ] + + currentsizeparams = {} + sizetrans = {} + for statfield in sizeparamkeys: + currentsizeparams[statfield] = processedvdos[desiredvdo][statfield] + sizetrans[statfield] = vdokeytrans[statfield] + + sizeparams = {} + for paramkey in currentsizeparams.keys(): + sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey] + + diffsizeparams = {} + for key in sizeparams.keys(): + if module.params[key] is not None: + if str(sizeparams[key]) != module.params[key]: + diffsizeparams[key] = module.params[key] + + if module.params['growphysical']: + physdevice = module.params['device'] + rc, devsectors, err = module.run_command("blockdev --getsz %s" + % (physdevice)) + devblocks = (int(devsectors) / 8) + dmvdoname = ('/dev/mapper/' + desiredvdo) + currentvdostats = (processedvdos[desiredvdo] + ['VDO statistics'] + [dmvdoname]) + currentphysblocks = currentvdostats['physical blocks'] + + # Set a growPhysical threshold to grow only when there is + # guaranteed to be more than 2 slabs worth of unallocated + # space on the device to use. For now, set to device + # size + 64 GB, since 32 GB is the largest possible + # slab size. + growthresh = devblocks + 16777216 + + if currentphysblocks > growthresh: + result['changed'] = True + rc, out, err = module.run_command("%s growPhysical --name=%s" + % (vdocmd, desiredvdo)) + + if 'logicalsize' in diffsizeparams.keys(): + result['changed'] = True + vdocmdoptions = ("--vdoLogicalSize=" + + diffsizeparams['logicalsize']) + rc, out, err = module.run_command("%s growLogical --name=%s %s" + % (vdocmd, + desiredvdo, + vdocmdoptions)) + + vdoactivatestatus = processedvdos[desiredvdo]['Activate'] + + if ((module.params['activated'] == 'no') and + (vdoactivatestatus == 'enabled')): + deactivate_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + if ((module.params['activated'] == 'yes') and + (vdoactivatestatus == 'disabled')): + activate_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + if ((module.params['running'] == 'no') and + (desiredvdo in runningvdolist)): + stop_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + # Note that a disabled VDO volume cannot be started by the + # 'vdo start' command, by design. To accurately track changed + # status, don't try to start a disabled VDO volume. + # If the playbook contains 'activated: yes', assume that + # the activate_vdo() operation succeeded, as 'vdoactivatestatus' + # will have the activated status prior to the activate_vdo() + # call. + if (((vdoactivatestatus == 'enabled') or + (module.params['activated'] == 'yes')) and + (module.params['running'] == 'yes') and + (desiredvdo not in runningvdolist)): + start_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + # Print a post-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + if diffparams: + module.log("modified parameters of VDO volume %s" % desiredvdo) + + module.exit_json(**result) + + # Remove a desired VDO that currently exists. + if (desiredvdo in vdolist) and (state == 'absent'): + rc, out, err = module.run_command("%s remove --name=%s" + % (vdocmd, desiredvdo)) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Removing VDO %s failed." + % desiredvdo, rc=rc, err=err) + + # Print a post-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + module.log("removed VDO volume %s" % desiredvdo) + module.exit_json(**result) + + # fall through + # The state for the desired VDO volume was absent, and it does + # not exist. Print a post-run list of VDO volumes in the result + # object. + vdolist = inventory_vdos(module, vdocmd) + module.log("received request to remove non-existent VDO volume %s" + % desiredvdo) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py new file mode 100644 index 0000000000..2b3dcf9404 --- /dev/null +++ b/plugins/modules/system/xfconf.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2017, Joseph Benden +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: xfconf +author: + - "Joseph Benden (@jbenden)" +short_description: Edit XFCE4 Configurations +description: + - This module allows for the manipulation of Xfce 4 Configuration via + xfconf-query. Please see the xfconf-query(1) man pages for more details. +options: + channel: + description: + - A Xfconf preference channel is a top-level tree key, inside of the + Xfconf repository that corresponds to the location for which all + application properties/keys are stored. See man xfconf-query(1) + required: yes + property: + description: + - A Xfce preference key is an element in the Xfconf repository + that corresponds to an application preference. See man xfconf-query(1) + required: yes + value: + description: + - Preference properties typically have simple values such as strings, + integers, or lists of strings and integers. This is ignored if the state + is "get". See man xfconf-query(1) + value_type: + description: + - The type of value being set. This is ignored if the state is "get". + choices: [ int, bool, float, string ] + state: + description: + - The action to take upon the property/value. + choices: [ get, present, absent ] + default: "present" +''' + +EXAMPLES = """ +- name: Change the DPI to "192" + xfconf: + channel: "xsettings" + property: "/Xft/DPI" + value_type: "int" + value: "192" + become: True + become_user: johnsmith + +""" + +RETURN = ''' + channel: + description: The channel specified in the module parameters + returned: success + type: str + sample: "xsettings" + property: + description: The property specified in the module parameters + returned: success + type: str + sample: "/Xft/DPI" + value_type: + description: The type of the value that was changed + returned: success + type: str + sample: "int" + value: + description: The value of the preference key after executing the module + returned: success + type: str + sample: "192" +... +''' + +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +class XfconfPreference(object): + def __init__(self, module, channel, property, value_type, value): + self.module = module + self.channel = channel + self.property = property + self.value_type = value_type + self.value = value + + def call(self, call_type, fail_onerr=True): + """ Helper function to perform xfconf-query operations """ + changed = False + out = '' + + # Execute the call + cmd = "{0} --channel {1} --property {2}".format(self.module.get_bin_path('xfconf-query', True), + shlex_quote(self.channel), + shlex_quote(self.property)) + try: + if call_type == 'set': + cmd += " --type {0} --create --set {1}".format(shlex_quote(self.value_type), + shlex_quote(self.value)) + elif call_type == 'unset': + cmd += " --reset" + + # Start external command + rc, out, err = self.module.run_command(cmd, check_rc=False) + + if rc != 0 or len(err) > 0: + if fail_onerr: + self.module.fail_json(msg='xfconf-query failed with error: %s' % (str(err))) + else: + changed = True + + except OSError as exception: + self.module.fail_json(msg='xfconf-query failed with exception: %s' % exception) + return changed, out.rstrip() + + +def main(): + # Setup the Ansible module + module = AnsibleModule( + argument_spec=dict( + channel=dict(required=True, type='str'), + property=dict(required=True, type='str'), + value_type=dict(required=False, + choices=['int', 'bool', 'float', 'string'], + type='str'), + value=dict(required=False, default=None, type='str'), + state=dict(default='present', + choices=['present', 'get', 'absent'], + type='str') + ), + supports_check_mode=True + ) + + state_values = {"present": "set", "absent": "unset", "get": "get"} + + # Assign module values to dictionary values + channel = module.params['channel'] + property = module.params['property'] + value_type = module.params['value_type'] + if module.params['value'].lower() == "true": + value = "true" + elif module.params['value'] == "false": + value = "false" + else: + value = module.params['value'] + + state = state_values[module.params['state']] + + # Initialize some variables for later + change = False + new_value = '' + + if state != "get": + if value is None or value == "": + module.fail_json(msg='State %s requires "value" to be set' + % str(state)) + elif value_type is None or value_type == "": + module.fail_json(msg='State %s requires "value_type" to be set' + % str(state)) + + # Create a Xfconf preference + xfconf = XfconfPreference(module, + channel, + property, + value_type, + value) + # Now we get the current value, if not found don't fail + dummy, current_value = xfconf.call("get", fail_onerr=False) + + # Check if the current value equals the value we want to set. If not, make + # a change + if current_value != value: + # If check mode, we know a change would have occurred. + if module.check_mode: + # So we will set the change to True + change = True + # And set the new_value to the value that would have been set + new_value = value + # If not check mode make the change. + else: + change, new_value = xfconf.call(state) + # If the value we want to set is the same as the current_value, we will + # set the new_value to the current_value for reporting + else: + new_value = current_value + + facts = dict(xfconf={'changed': change, + 'channel': channel, + 'property': property, + 'value_type': value_type, + 'new_value': new_value, + 'previous_value': current_value, + 'playbook_value': module.params['value']}) + + module.exit_json(changed=change, ansible_facts=facts) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/xfs_quota.py b/plugins/modules/system/xfs_quota.py new file mode 100644 index 0000000000..03ba7e1f6c --- /dev/null +++ b/plugins/modules/system/xfs_quota.py @@ -0,0 +1,432 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Emmanouil Kampitakis +# Copyright: (c) 2018, William Leemans + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: xfs_quota +short_description: Manage quotas on XFS filesystems +description: + - Configure quotas on XFS filesystems. + - Before using this module /etc/projects and /etc/projid need to be configured. +author: +- William Leemans (@bushvin) +options: + type: + description: + - The XFS quota type. + type: str + required: true + choices: + - user + - group + - project + name: + description: + - The name of the user, group or project to apply the quota to, if other than default. + type: str + mountpoint: + description: + - The mount point on which to apply the quotas. + type: str + required: true + bhard: + description: + - Hard blocks quota limit. + - This argument supports human readable sizes. + type: str + bsoft: + description: + - Soft blocks quota limit. + - This argument supports human readable sizes. + type: str + ihard: + description: + - Hard inodes quota limit. + type: int + isoft: + description: + - Soft inodes quota limit. + type: int + rtbhard: + description: + - Hard realtime blocks quota limit. + - This argument supports human readable sizes. + type: str + rtbsoft: + description: + - Soft realtime blocks quota limit. + - This argument supports human readable sizes. + type: str + state: + description: + - Whether to apply the limits or remove them. + - When removing limit, they are set to 0, and not quite removed. + type: str + default: present + choices: + - present + - absent + +requirements: + - xfsprogs +''' + +EXAMPLES = r''' +- name: Set default project soft and hard limit on /opt of 1g + xfs_quota: + type: project + mountpoint: /opt + bsoft: 1g + bhard: 1g + state: present + +- name: Remove the default limits on /opt + xfs_quota: + type: project + mountpoint: /opt + state: absent + +- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048 + xfs_quota: + type: user + mountpoint: /home + isoft: 1024 + ihard: 2048 + +''' + +RETURN = r''' +bhard: + description: the current bhard setting in bytes + returned: always + type: int + sample: 1024 +bsoft: + description: the current bsoft setting in bytes + returned: always + type: int + sample: 1024 +ihard: + description: the current ihard setting in bytes + returned: always + type: int + sample: 100 +isoft: + description: the current isoft setting in bytes + returned: always + type: int + sample: 100 +rtbhard: + description: the current rtbhard setting in bytes + returned: always + type: int + sample: 1024 +rtbsoft: + description: the current rtbsoft setting in bytes + returned: always + type: int + sample: 1024 +''' + +import grp +import os +import pwd + +from ansible.module_utils.basic import AnsibleModule, human_to_bytes + + +def main(): + module = AnsibleModule( + argument_spec=dict( + bhard=dict(type='str'), + bsoft=dict(type='str'), + ihard=dict(type='int'), + isoft=dict(type='int'), + mountpoint=dict(type='str', required=True), + name=dict(type='str'), + rtbhard=dict(type='str'), + rtbsoft=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + type=dict(type='str', required=True, choices=['group', 'project', 'user']) + ), + supports_check_mode=True, + ) + + quota_type = module.params['type'] + name = module.params['name'] + mountpoint = module.params['mountpoint'] + bhard = module.params['bhard'] + bsoft = module.params['bsoft'] + ihard = module.params['ihard'] + isoft = module.params['isoft'] + rtbhard = module.params['rtbhard'] + rtbsoft = module.params['rtbsoft'] + state = module.params['state'] + + if bhard is not None: + bhard = human_to_bytes(bhard) + + if bsoft is not None: + bsoft = human_to_bytes(bsoft) + + if rtbhard is not None: + rtbhard = human_to_bytes(rtbhard) + + if rtbsoft is not None: + rtbsoft = human_to_bytes(rtbsoft) + + result = dict( + changed=False, + ) + + if not os.path.ismount(mountpoint): + module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result) + + mp = get_fs_by_mountpoint(mountpoint) + if mp is None: + module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result) + + if quota_type == 'user': + type_arg = '-u' + quota_default = 'root' + if name is None: + name = quota_default + + if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \ + 'qnoenforce' not in mp['mntopts']: + module.fail_json( + msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result + ) + try: + pwd.getpwnam(name) + except KeyError as e: + module.fail_json(msg="User '%s' does not exist." % name, **result) + + elif quota_type == 'group': + type_arg = '-g' + quota_default = 'root' + if name is None: + name = quota_default + + if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']: + module.fail_json( + msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result + ) + try: + grp.getgrnam(name) + except KeyError as e: + module.fail_json(msg="User '%s' does not exist." % name, **result) + + elif quota_type == 'project': + type_arg = '-p' + quota_default = '#0' + if name is None: + name = quota_default + + if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']: + module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result) + + if name != quota_default and not os.path.isfile('/etc/projects'): + module.fail_json(msg="Path '/etc/projects' does not exist.", **result) + + if name != quota_default and not os.path.isfile('/etc/projid'): + module.fail_json(msg="Path '/etc/projid' does not exist.", **result) + + if name != quota_default and name is not None and get_project_id(name) is None: + module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result) + + prj_set = True + if name != quota_default: + cmd = 'project %s' % name + rc, stdout, stderr = exec_quota(module, cmd, mountpoint) + if rc != 0: + result['cmd'] = cmd + result['rc'] = rc + result['stdout'] = stdout + result['stderr'] = stderr + module.fail_json(msg='Could not get project state.', **result) + else: + for line in stdout.split('\n'): + if "Project Id '%s' - is not set." in line: + prj_set = False + break + + if not prj_set and not module.check_mode: + cmd = 'project -s' + rc, stdout, stderr = exec_quota(module, cmd, mountpoint) + if rc != 0: + result['cmd'] = cmd + result['rc'] = rc + result['stdout'] = stdout + result['stderr'] = stderr + module.fail_json(msg='Could not get quota realtime block report.', **result) + + result['changed'] = True + + elif not prj_set and module.check_mode: + result['changed'] = True + + # Set limits + if state == 'absent': + bhard = 0 + bsoft = 0 + ihard = 0 + isoft = 0 + rtbhard = 0 + rtbsoft = 0 + + current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b') + current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i') + current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb') + + result['xfs_quota'] = dict( + bsoft=current_bsoft, + bhard=current_bhard, + isoft=current_isoft, + ihard=current_ihard, + rtbsoft=current_rtbsoft, + rtbhard=current_rtbhard + ) + + limit = [] + if bsoft is not None and int(bsoft) != current_bsoft: + limit.append('bsoft=%s' % bsoft) + result['bsoft'] = int(bsoft) + + if bhard is not None and int(bhard) != current_bhard: + limit.append('bhard=%s' % bhard) + result['bhard'] = int(bhard) + + if isoft is not None and isoft != current_isoft: + limit.append('isoft=%s' % isoft) + result['isoft'] = isoft + + if ihard is not None and ihard != current_ihard: + limit.append('ihard=%s' % ihard) + result['ihard'] = ihard + + if rtbsoft is not None and int(rtbsoft) != current_rtbsoft: + limit.append('rtbsoft=%s' % rtbsoft) + result['rtbsoft'] = int(rtbsoft) + + if rtbhard is not None and int(rtbhard) != current_rtbhard: + limit.append('rtbhard=%s' % rtbhard) + result['rtbhard'] = int(rtbhard) + + if len(limit) > 0 and not module.check_mode: + if name == quota_default: + cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit)) + else: + cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name) + + rc, stdout, stderr = exec_quota(module, cmd, mountpoint) + if rc != 0: + result['cmd'] = cmd + result['rc'] = rc + result['stdout'] = stdout + result['stderr'] = stderr + module.fail_json(msg='Could not set limits.', **result) + + result['changed'] = True + + elif len(limit) > 0 and module.check_mode: + result['changed'] = True + + module.exit_json(**result) + + +def quota_report(module, mountpoint, name, quota_type, used_type): + soft = None + hard = None + + if quota_type == 'project': + type_arg = '-p' + elif quota_type == 'user': + type_arg = '-u' + elif quota_type == 'group': + type_arg = '-g' + + if used_type == 'b': + used_arg = '-b' + used_name = 'blocks' + factor = 1024 + elif used_type == 'i': + used_arg = '-i' + used_name = 'inodes' + factor = 1 + elif used_type == 'rtb': + used_arg = '-r' + used_name = 'realtime blocks' + factor = 1024 + + rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint) + + if rc != 0: + result = dict( + changed=False, + rc=rc, + stdout=stdout, + stderr=stderr, + ) + module.fail_json(msg='Could not get quota report for %s.' % used_name, **result) + + for line in stdout.split('\n'): + line = line.strip().split() + if len(line) > 3 and line[0] == name: + soft = int(line[2]) * factor + hard = int(line[3]) * factor + break + + return soft, hard + + +def exec_quota(module, cmd, mountpoint): + cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint] + (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True) + if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \ + rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'): + module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation') + + return rc, stdout, stderr + + +def get_fs_by_mountpoint(mountpoint): + mpr = None + with open('/proc/mounts', 'r') as s: + for line in s.readlines(): + mp = line.strip().split() + if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs': + mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp)) + mpr['mntopts'] = mpr['mntopts'].split(',') + break + return mpr + + +def get_project_id(name): + prjid = None + with open('/etc/projid', 'r') as s: + for line in s.readlines(): + line = line.strip().partition(':') + if line[0] == name: + prjid = line[2] + break + + return prjid + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/apache2_mod_proxy.py b/plugins/modules/web_infrastructure/apache2_mod_proxy.py new file mode 100644 index 0000000000..461ea77e41 --- /dev/null +++ b/plugins/modules/web_infrastructure/apache2_mod_proxy.py @@ -0,0 +1,443 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Olivier Boukili +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: apache2_mod_proxy +author: Olivier Boukili (@oboukili) +short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool +description: + - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer + pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member + status page has to be enabled and accessible, as this module relies on parsing + this page. This module supports ansible check_mode, and requires BeautifulSoup + python module. +options: + balancer_url_suffix: + description: + - Suffix of the balancer pool url required to access the balancer pool + status page (e.g. balancer_vhost[:port]/balancer_url_suffix). + default: /balancer-manager/ + balancer_vhost: + description: + - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool. + required: true + member_host: + description: + - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to. + Port number is autodetected and should not be specified here. + If undefined, apache2_mod_proxy module will return a members list of + dictionaries of all the current balancer pool members' attributes. + state: + description: + - Desired state of the member host. + (absent|disabled),drained,hot_standby,ignore_errors can be + simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors). + choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"] + tls: + description: + - Use https to access balancer management page. + type: bool + default: 'no' + validate_certs: + description: + - Validate ssl/tls certificates. + type: bool + default: 'yes' +''' + +EXAMPLES = ''' +# Get all current balancer pool members' attributes: +- apache2_mod_proxy: + balancer_vhost: 10.0.0.2 + +# Get a specific member's attributes: +- apache2_mod_proxy: + balancer_vhost: myws.mydomain.org + balancer_suffix: /lb/ + member_host: node1.myws.mydomain.org + +# Enable all balancer pool members: +- apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + register: result +- apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + member_host: '{{ item.host }}' + state: present + with_items: '{{ result.members }}' + +# Gracefully disable a member from a loadbalancer node: +- apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: drained + delegate_to: myloadbalancernode +- wait_for: + host: '{{ member.host }}' + port: '{{ member.port }}' + state: drained + delegate_to: myloadbalancernode +- apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: absent + delegate_to: myloadbalancernode +''' + +RETURN = ''' +member: + description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter. + type: dict + returned: success + sample: + {"attributes": + {"Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + } +members: + description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args. + returned: success + type: list + sample: + [{"attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + }, + {"attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.21", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false} + } + ] +''' + +import re +import traceback + +BEAUTIFUL_SOUP_IMP_ERR = None +try: + from BeautifulSoup import BeautifulSoup +except ImportError: + BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc() + HAS_BEAUTIFULSOUP = False +else: + HAS_BEAUTIFULSOUP = True + +# balancer member attributes extraction regexp: +EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)" +# Apache2 server version extraction regexp: +APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)" + + +def regexp_extraction(string, _regexp, groups=1): + """ Returns the capture group (default=1) specified in the regexp, applied to the string """ + regexp_search = re.search(string=str(string), pattern=str(_regexp)) + if regexp_search: + if regexp_search.group(groups) != '': + return str(regexp_search.group(groups)) + return None + + +class BalancerMember(object): + """ Apache 2.4 mod_proxy LB balancer member. + attributes: + read-only: + host -> member host (string), + management_url -> member management url (string), + protocol -> member protocol (string) + port -> member port (string), + path -> member location (string), + balancer_url -> url of this member's parent balancer (string), + attributes -> whole member attributes (dictionary) + module -> ansible module instance (AnsibleModule object). + writable: + status -> status of the member (dictionary) + """ + + def __init__(self, management_url, balancer_url, module): + self.host = regexp_extraction(management_url, str(EXPRESSION), 4) + self.management_url = str(management_url) + self.protocol = regexp_extraction(management_url, EXPRESSION, 3) + self.port = regexp_extraction(management_url, EXPRESSION, 5) + self.path = regexp_extraction(management_url, EXPRESSION, 6) + self.balancer_url = str(balancer_url) + self.module = module + + def get_member_attributes(self): + """ Returns a dictionary of a balancer member's attributes.""" + + balancer_member_page = fetch_url(self.module, self.management_url) + + if balancer_member_page[1]['status'] != 200: + self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1]) + else: + try: + soup = BeautifulSoup(balancer_member_page[0]) + except TypeError: + self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup)) + else: + subsoup = soup.findAll('table')[1].findAll('tr') + keys = subsoup[0].findAll('th') + for valuesset in subsoup[1::1]: + if re.search(pattern=self.host, string=str(valuesset)): + values = valuesset.findAll('td') + return dict((keys[x].string, values[x].string) for x in range(0, len(keys))) + + def get_member_status(self): + """ Returns a dictionary of a balancer member's status attributes.""" + status_mapping = {'disabled': 'Dis', + 'drained': 'Drn', + 'hot_standby': 'Stby', + 'ignore_errors': 'Ign'} + status = {} + actual_status = str(self.attributes['Status']) + for mode in status_mapping.keys(): + if re.search(pattern=status_mapping[mode], string=actual_status): + status[mode] = True + else: + status[mode] = False + return status + + def set_member_status(self, values): + """ Sets a balancer member's status attributes amongst pre-mapped values.""" + values_mapping = {'disabled': '&w_status_D', + 'drained': '&w_status_N', + 'hot_standby': '&w_status_H', + 'ignore_errors': '&w_status_I'} + + request_body = regexp_extraction(self.management_url, EXPRESSION, 1) + for k in values_mapping.keys(): + if values[str(k)]: + request_body = request_body + str(values_mapping[k]) + '=1' + else: + request_body = request_body + str(values_mapping[k]) + '=0' + + response = fetch_url(self.module, self.management_url, data=str(request_body)) + if response[1]['status'] != 200: + self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status']) + + attributes = property(get_member_attributes) + status = property(get_member_status, set_member_status) + + +class Balancer(object): + """ Apache httpd 2.4 mod_proxy balancer object""" + + def __init__(self, host, suffix, module, members=None, tls=False): + if tls: + self.base_url = str(str('https://') + str(host)) + self.url = str(str('https://') + str(host) + str(suffix)) + else: + self.base_url = str(str('http://') + str(host)) + self.url = str(str('http://') + str(host) + str(suffix)) + self.module = module + self.page = self.fetch_balancer_page() + if members is None: + self._members = [] + + def fetch_balancer_page(self): + """ Returns the balancer management html page as a string for later parsing.""" + page = fetch_url(self.module, str(self.url)) + if page[1]['status'] != 200: + self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status'])) + else: + content = page[0].read() + apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) + if apache_version: + if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): + self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version)) + return content + else: + self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager") + + def get_balancer_members(self): + """ Returns members of the balancer as a generator object for later iteration.""" + try: + soup = BeautifulSoup(self.page) + except TypeError: + self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page)) + else: + for element in soup.findAll('a')[1::1]: + balancer_member_suffix = str(element.get('href')) + if not balancer_member_suffix: + self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!") + else: + yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module) + + members = property(get_balancer_members) + + +def main(): + """ Initiates module.""" + module = AnsibleModule( + argument_spec=dict( + balancer_vhost=dict(required=True, default=None, type='str'), + balancer_url_suffix=dict(default="/balancer-manager/", type='str'), + member_host=dict(type='str'), + state=dict(type='str'), + tls=dict(default=False, type='bool'), + validate_certs=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + if HAS_BEAUTIFULSOUP is False: + module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR) + + if module.params['state'] is not None: + states = module.params['state'].split(',') + if (len(states) > 1) and (("present" in states) or ("enabled" in states)): + module.fail_json(msg="state present/enabled is mutually exclusive with other states!") + else: + for _state in states: + if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']: + module.fail_json( + msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'." + ) + else: + states = ['None'] + + mybalancer = Balancer(module.params['balancer_vhost'], + module.params['balancer_url_suffix'], + module=module, + tls=module.params['tls']) + + if module.params['member_host'] is None: + json_output_list = [] + for member in mybalancer.members: + json_output_list.append({ + "host": member.host, + "status": member.status, + "protocol": member.protocol, + "port": member.port, + "path": member.path, + "attributes": member.attributes, + "management_url": member.management_url, + "balancer_url": member.balancer_url + }) + module.exit_json( + changed=False, + members=json_output_list + ) + else: + changed = False + member_exists = False + member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} + for mode in member_status.keys(): + for state in states: + if mode == state: + member_status[mode] = True + elif mode == 'disabled' and state == 'absent': + member_status[mode] = True + + for member in mybalancer.members: + if str(member.host) == str(module.params['member_host']): + member_exists = True + if module.params['state'] is not None: + member_status_before = member.status + if not module.check_mode: + member_status_after = member.status = member_status + else: + member_status_after = member_status + if member_status_before != member_status_after: + changed = True + json_output = { + "host": member.host, + "status": member.status, + "protocol": member.protocol, + "port": member.port, + "path": member.path, + "attributes": member.attributes, + "management_url": member.management_url, + "balancer_url": member.balancer_url + } + if member_exists: + module.exit_json( + changed=changed, + member=json_output + ) + else: + module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!') + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py new file mode 100644 index 0000000000..453bc11ae2 --- /dev/null +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# (c) 2013-2014, Christian Berendt +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: apache2_module +author: + - Christian Berendt (@berendt) + - Ralf Hertel (@n0trax) + - Robin Roth (@robinro) +short_description: Enables/disables a module of the Apache2 webserver. +description: + - Enables or disables a specified module of the Apache2 webserver. +options: + name: + description: + - Name of the module to enable/disable as given to C(a2enmod/a2dismod). + required: true + identifier: + description: + - Identifier of the module as listed by C(apache2ctl -M). + This is optional and usually determined automatically by the common convention of + appending C(_module) to I(name) as well as custom exception for popular modules. + required: False + force: + description: + - Force disabling of default modules and override Debian warnings. + required: false + type: bool + default: False + state: + description: + - Desired state of the module. + choices: ['present', 'absent'] + default: present + ignore_configcheck: + description: + - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. + type: bool + default: False +requirements: ["a2enmod","a2dismod"] +''' + +EXAMPLES = ''' +# enables the Apache2 module "wsgi" +- apache2_module: + state: present + name: wsgi +# disables the Apache2 module "wsgi" +- apache2_module: + state: absent + name: wsgi +# disable default modules for Debian +- apache2_module: + state: absent + name: autoindex + force: True +# disable mpm_worker and ignore warnings about missing mpm module +- apache2_module: + state: absent + name: mpm_worker + ignore_configcheck: True +# enable dump_io module, which is identified as dumpio_module inside apache2 +- apache2_module: + state: present + name: dump_io + identifier: dumpio_module +''' + +RETURN = ''' +result: + description: message about action taken + returned: always + type: str +warnings: + description: list of warning messages + returned: when needed + type: list +rc: + description: return code of underlying command + returned: failed + type: int +stdout: + description: stdout of underlying command + returned: failed + type: str +stderr: + description: stderr of underlying command + returned: failed + type: str +''' + +import re + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +def _run_threaded(module): + control_binary = _get_ctl_binary(module) + + result, stdout, stderr = module.run_command("%s -V" % control_binary) + + return bool(re.search(r'threaded:[ ]*yes', stdout)) + + +def _get_ctl_binary(module): + for command in ['apache2ctl', 'apachectl']: + ctl_binary = module.get_bin_path(command) + if ctl_binary is not None: + return ctl_binary + + module.fail_json( + msg="Neither of apache2ctl nor apachctl found." + " At least one apache control binary is necessary." + ) + + +def _module_is_enabled(module): + control_binary = _get_ctl_binary(module) + result, stdout, stderr = module.run_command("%s -M" % control_binary) + + if result != 0: + error_msg = "Error executing %s: %s" % (control_binary, stderr) + if module.params['ignore_configcheck']: + if 'AH00534' in stderr and 'mpm_' in module.params['name']: + module.warnings.append( + "No MPM module loaded! apache2 reload AND other module actions" + " will fail if no MPM module is loaded immediately." + ) + else: + module.warnings.append(error_msg) + return False + else: + module.fail_json(msg=error_msg) + + searchstring = ' ' + module.params['identifier'] + return searchstring in stdout + + +def create_apache_identifier(name): + """ + By convention if a module is loaded via name, it appears in apache2ctl -M as + name_module. + + Some modules don't follow this convention and we use replacements for those.""" + + # a2enmod name replacement to apache2ctl -M names + text_workarounds = [ + ('shib2', 'mod_shib'), + ('evasive', 'evasive20_module'), + ] + + # re expressions to extract subparts of names + re_workarounds = [ + ('php', r'^(php\d)\.'), + ] + + for a2enmod_spelling, module_name in text_workarounds: + if a2enmod_spelling in name: + return module_name + + for search, reexpr in re_workarounds: + if search in name: + try: + rematch = re.search(reexpr, name) + return rematch.group(1) + '_module' + except AttributeError: + pass + + return name + '_module' + + +def _set_state(module, state): + name = module.params['name'] + force = module.params['force'] + + want_enabled = state == 'present' + state_string = {'present': 'enabled', 'absent': 'disabled'}[state] + a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state] + success_msg = "Module %s %s" % (name, state_string) + + if _module_is_enabled(module) != want_enabled: + if module.check_mode: + module.exit_json(changed=True, + result=success_msg, + warnings=module.warnings) + + a2mod_binary = module.get_bin_path(a2mod_binary) + if a2mod_binary is None: + module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) + + if not want_enabled and force: + # force exists only for a2dismod on debian + a2mod_binary += ' -f' + + result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name)) + + if _module_is_enabled(module) == want_enabled: + module.exit_json(changed=True, + result=success_msg, + warnings=module.warnings) + else: + msg = ( + 'Failed to set module {name} to {state}:\n' + '{stdout}\n' + 'Maybe the module identifier ({identifier}) was guessed incorrectly.' + 'Consider setting the "identifier" option.' + ).format( + name=name, + state=state_string, + stdout=stdout, + identifier=module.params['identifier'] + ) + module.fail_json(msg=msg, + rc=result, + stdout=stdout, + stderr=stderr) + else: + module.exit_json(changed=False, + result=success_msg, + warnings=module.warnings) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + identifier=dict(required=False, type='str'), + force=dict(required=False, type='bool', default=False), + state=dict(default='present', choices=['absent', 'present']), + ignore_configcheck=dict(required=False, type='bool', default=False), + ), + supports_check_mode=True, + ) + + module.warnings = [] + + name = module.params['name'] + if name == 'cgi' and _run_threaded(module): + module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name) + + if not module.params['identifier']: + module.params['identifier'] = create_apache_identifier(module.params['name']) + + if module.params['state'] in ['present', 'absent']: + _set_state(module, module.params['state']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/deploy_helper.py b/plugins/modules/web_infrastructure/deploy_helper.py new file mode 100644 index 0000000000..0b5b3fc29b --- /dev/null +++ b/plugins/modules/web_infrastructure/deploy_helper.py @@ -0,0 +1,520 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jasper N. Brouwer +# (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: deploy_helper +author: "Ramon de la Fuente (@ramondelafuente)" +short_description: Manages some of the steps common in deploying projects. +description: + - The Deploy Helper manages some of the steps common in deploying software. + It creates a folder structure, manages a symlink for the current release + and cleans up old releases. + - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact. + C(project_path), whatever you set in the path parameter, + C(current_path), the path to the symlink that points to the active release, + C(releases_path), the path to the folder to keep releases in, + C(shared_path), the path to the folder to keep shared resources in, + C(unfinished_filename), the file to check for to recognize unfinished builds, + C(previous_release), the release the 'current' symlink is pointing to, + C(previous_release_path), the full path to the 'current' symlink target, + C(new_release), either the 'release' parameter or a generated timestamp, + C(new_release_path), the path to the new release folder (not created by the module)." + +options: + path: + required: True + aliases: ['dest'] + description: + - the root path of the project. Alias I(dest). + Returned in the C(deploy_helper.project_path) fact. + + state: + description: + - the state of the project. + C(query) will only gather facts, + C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders, + C(finalize) will remove the unfinished_filename file, create a symlink to the newly + deployed release and optionally clean old releases, + C(clean) will remove failed & old releases, + C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent)) + choices: [ present, finalize, absent, clean, query ] + default: present + + release: + description: + - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359'). + This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize). + You can use the generated fact C(release={{ deploy_helper.new_release }}). + + releases_path: + description: + - the name of the folder that will hold the releases. This can be relative to C(path) or absolute. + Returned in the C(deploy_helper.releases_path) fact. + default: releases + + shared_path: + description: + - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute. + If this is set to an empty string, no shared folder will be created. + Returned in the C(deploy_helper.shared_path) fact. + default: shared + + current_path: + description: + - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean). + Returned in the C(deploy_helper.current_path) fact. + default: current + + unfinished_filename: + description: + - the name of the file that indicates a deploy has not finished. All folders in the releases_path that + contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is + automatically deleted from the I(new_release_path) during C(state=finalize). + default: DEPLOY_UNFINISHED + + clean: + description: + - Whether to run the clean procedure in case of C(state=finalize). + type: bool + default: 'yes' + + keep_releases: + description: + - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds + will be deleted first, so only correct releases will count. The current version will not count. + default: 5 + +notes: + - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden + parameters to both calls, otherwise the second call will overwrite the facts of the first one. + - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a + new naming strategy without problems. + - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent + unless you pass your own release name with C(release). Due to the nature of deploying software, this should not + be much of a problem. +''' + +EXAMPLES = ''' + +# General explanation, starting with an example folder structure for a project: + +# root: +# releases: +# - 20140415234508 +# - 20140415235146 +# - 20140416082818 +# +# shared: +# - sessions +# - uploads +# +# current: releases/20140416082818 + + +# The 'releases' folder holds all the available releases. A release is a complete build of the application being +# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem. +# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like +# git tags or commit hashes. +# +# During a deploy, a new folder should be created in the releases folder and any build steps required should be +# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink +# with a link to this build. +# +# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server +# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release +# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps. +# +# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress. +# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new +# release is reduced to the time it takes to switch the link. +# +# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release +# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated +# procedure to remove it during cleanup. + + +# Typical usage +- name: Initialize the deploy root and gather facts + deploy_helper: + path: /path/to/root +- name: Clone the project to the new release folder + git: + repo: git://foosball.example.org/path/to/repo.git + dest: '{{ deploy_helper.new_release_path }}' + version: v1.1.1 +- name: Add an unfinished file, to allow cleanup on successful finalize + file: + path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}' + state: touch +- name: Perform some build steps, like running your dependency manager for example + composer: + command: install + working_dir: '{{ deploy_helper.new_release_path }}' +- name: Create some folders in the shared folder + file: + path: '{{ deploy_helper.shared_path }}/{{ item }}' + state: directory + with_items: + - sessions + - uploads +- name: Add symlinks from the new release to the shared folder + file: + path: '{{ deploy_helper.new_release_path }}/{{ item.path }}' + src: '{{ deploy_helper.shared_path }}/{{ item.src }}' + state: link + with_items: + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads +- name: Finalize the deploy, removing the unfinished file and switching the symlink + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Retrieving facts before running a deploy +- name: Run 'state=query' to gather facts without changing anything + deploy_helper: + path: /path/to/root + state: query +# Remember to set the 'release' parameter when you actually call 'state=present' later +- name: Initialize the deploy root + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: present + +# all paths can be absolute or relative (to the 'path' parameter) +- deploy_helper: + path: /path/to/root + releases_path: /var/www/project/releases + shared_path: /var/www/shared + current_path: /var/www/active + +# Using your own naming strategy for releases (a version tag in this case): +- deploy_helper: + path: /path/to/root + release: v1.1.1 + state: present +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Using a different unfinished_filename: +- deploy_helper: + path: /path/to/root + unfinished_filename: README.md + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Postponing the cleanup of older builds: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + clean: False +- deploy_helper: + path: /path/to/root + state: clean +# Or running the cleanup ahead of the new deploy +- deploy_helper: + path: /path/to/root + state: clean +- deploy_helper: + path: /path/to/root + state: present + +# Keeping more old releases: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + keep_releases: 10 +# Or, if you use 'clean=false' on finalize: +- deploy_helper: + path: /path/to/root + state: clean + keep_releases: 10 + +# Removing the entire project root folder +- deploy_helper: + path: /path/to/root + state: absent + +# Debugging the facts returned by the module +- deploy_helper: + path: /path/to/root +- debug: + var: deploy_helper +''' +import os +import shutil +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class DeployHelper(object): + + def __init__(self, module): + self.module = module + self.file_args = module.load_file_common_arguments(module.params) + + self.clean = module.params['clean'] + self.current_path = module.params['current_path'] + self.keep_releases = module.params['keep_releases'] + self.path = module.params['path'] + self.release = module.params['release'] + self.releases_path = module.params['releases_path'] + self.shared_path = module.params['shared_path'] + self.state = module.params['state'] + self.unfinished_filename = module.params['unfinished_filename'] + + def gather_facts(self): + current_path = os.path.join(self.path, self.current_path) + releases_path = os.path.join(self.path, self.releases_path) + if self.shared_path: + shared_path = os.path.join(self.path, self.shared_path) + else: + shared_path = None + + previous_release, previous_release_path = self._get_last_release(current_path) + + if not self.release and (self.state == 'query' or self.state == 'present'): + self.release = time.strftime("%Y%m%d%H%M%S") + + if self.release: + new_release_path = os.path.join(releases_path, self.release) + else: + new_release_path = None + + return { + 'project_path': self.path, + 'current_path': current_path, + 'releases_path': releases_path, + 'shared_path': shared_path, + 'previous_release': previous_release, + 'previous_release_path': previous_release_path, + 'new_release': self.release, + 'new_release_path': new_release_path, + 'unfinished_filename': self.unfinished_filename + } + + def delete_path(self, path): + if not os.path.lexists(path): + return False + + if not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + if not self.module.check_mode: + try: + shutil.rmtree(path, ignore_errors=False) + except Exception as e: + self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc()) + + return True + + def create_path(self, path): + changed = False + + if not os.path.lexists(path): + changed = True + if not self.module.check_mode: + os.makedirs(path) + + elif not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed) + + return changed + + def check_link(self, path): + if os.path.lexists(path): + if not os.path.islink(path): + self.module.fail_json(msg="%s exists but is not a symbolic link" % path) + + def create_link(self, source, link_name): + changed = False + + if os.path.islink(link_name): + norm_link = os.path.normpath(os.path.realpath(link_name)) + norm_source = os.path.normpath(os.path.realpath(source)) + if norm_link == norm_source: + changed = False + else: + changed = True + if not self.module.check_mode: + if not os.path.lexists(source): + self.module.fail_json(msg="the symlink target %s doesn't exists" % source) + tmp_link_name = link_name + '.' + self.unfinished_filename + if os.path.islink(tmp_link_name): + os.unlink(tmp_link_name) + os.symlink(source, tmp_link_name) + os.rename(tmp_link_name, link_name) + else: + changed = True + if not self.module.check_mode: + os.symlink(source, link_name) + + return changed + + def remove_unfinished_file(self, new_release_path): + changed = False + unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename) + if os.path.lexists(unfinished_file_path): + changed = True + if not self.module.check_mode: + os.remove(unfinished_file_path) + + return changed + + def remove_unfinished_builds(self, releases_path): + changes = 0 + + for release in os.listdir(releases_path): + if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)): + if self.module.check_mode: + changes += 1 + else: + changes += self.delete_path(os.path.join(releases_path, release)) + + return changes + + def remove_unfinished_link(self, path): + changed = False + + tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename) + if not self.module.check_mode and os.path.exists(tmp_link_name): + changed = True + os.remove(tmp_link_name) + + return changed + + def cleanup(self, releases_path, reserve_version): + changes = 0 + + if os.path.lexists(releases_path): + releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))] + try: + releases.remove(reserve_version) + except ValueError: + pass + + if not self.module.check_mode: + releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True) + for release in releases[self.keep_releases:]: + changes += self.delete_path(os.path.join(releases_path, release)) + elif len(releases) > self.keep_releases: + changes += (len(releases) - self.keep_releases) + + return changes + + def _get_file_args(self, path): + file_args = self.file_args.copy() + file_args['path'] = path + return file_args + + def _get_last_release(self, current_path): + previous_release = None + previous_release_path = None + + if os.path.lexists(current_path): + previous_release_path = os.path.realpath(current_path) + previous_release = os.path.basename(previous_release_path) + + return previous_release, previous_release_path + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(aliases=['dest'], required=True, type='path'), + release=dict(required=False, type='str', default=None), + releases_path=dict(required=False, type='str', default='releases'), + shared_path=dict(required=False, type='path', default='shared'), + current_path=dict(required=False, type='path', default='current'), + keep_releases=dict(required=False, type='int', default=5), + clean=dict(required=False, type='bool', default=True), + unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'), + state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + ), + add_file_common_args=True, + supports_check_mode=True + ) + + deploy_helper = DeployHelper(module) + facts = deploy_helper.gather_facts() + + result = { + 'state': deploy_helper.state + } + + changes = 0 + + if deploy_helper.state == 'query': + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'present': + deploy_helper.check_link(facts['current_path']) + changes += deploy_helper.create_path(facts['project_path']) + changes += deploy_helper.create_path(facts['releases_path']) + if deploy_helper.shared_path: + changes += deploy_helper.create_path(facts['shared_path']) + + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'finalize': + if not deploy_helper.release: + module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)") + if deploy_helper.keep_releases <= 0: + module.fail_json(msg="'keep_releases' should be at least 1") + + changes += deploy_helper.remove_unfinished_file(facts['new_release_path']) + changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path']) + if deploy_helper.clean: + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'clean': + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'absent': + # destroy the facts + result['ansible_facts'] = {'deploy_helper': []} + changes += deploy_helper.delete_path(facts['project_path']) + + if changes > 0: + result['changed'] = True + else: + result['changed'] = False + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py new file mode 100644 index 0000000000..ccd4c08cdb --- /dev/null +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Scott Anderson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: django_manage +short_description: Manages a Django application. +description: + - Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all + management commands will be executed by the given I(virtualenv) installation. +options: + command: + choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] + description: + - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, + test, and validate. + - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run + with the I(--noinput) flag. + required: true + app_path: + description: + - The path to the root of the Django application where B(manage.py) lives. + required: true + settings: + description: + - The Python path to the application's settings module, such as 'myapp.settings'. + required: false + pythonpath: + description: + - A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory. + required: false + virtualenv: + description: + - An optional path to a I(virtualenv) installation to use while running the manage application. + aliases: [virtualenv] + apps: + description: + - A list of space-delimited apps to target. Used by the 'test' command. + required: false + cache_table: + description: + - The name of the table used for database-backed caching. Used by the 'createcachetable' command. + required: false + clear: + description: + - Clear the existing files before trying to copy or link the original file. + - Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically. + required: false + default: no + type: bool + database: + description: + - The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands. + required: false + failfast: + description: + - Fail the command immediately if a test fails. Used by the 'test' command. + required: false + default: "no" + type: bool + fixtures: + description: + - A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command. + required: false + skip: + description: + - Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate) + required: false + type: bool + merge: + description: + - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command + required: false + type: bool + link: + description: + - Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command + required: false + type: bool +notes: + - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified. + - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location. + - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately. + - To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings. + - To be able to use the collectstatic command, you must have enabled staticfiles in your settings. + - As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python", + for invoking the appropriate Python interpreter. +requirements: [ "virtualenv", "django" ] +author: "Scott Anderson (@tastychutney)" +''' + +EXAMPLES = """ +# Run cleanup on the application installed in 'django_dir'. +- django_manage: + command: cleanup + app_path: "{{ django_dir }}" + +# Load the initial_data fixture into the application +- django_manage: + command: loaddata + app_path: "{{ django_dir }}" + fixtures: "{{ initial_data }}" + +# Run syncdb on the application +- django_manage: + command: syncdb + app_path: "{{ django_dir }}" + settings: "{{ settings_app_name }}" + pythonpath: "{{ settings_dir }}" + virtualenv: "{{ virtualenv_dir }}" + +# Run the SmokeTest test case from the main app. Useful for testing deploys. +- django_manage: + command: test + app_path: "{{ django_dir }}" + apps: main.SmokeTest + +# Create an initial superuser. +- django_manage: + command: "createsuperuser --noinput --username=admin --email=admin@example.com" + app_path: "{{ django_dir }}" +""" + +import os +import sys + +from ansible.module_utils.basic import AnsibleModule + + +def _fail(module, cmd, out, err, **kwargs): + msg = '' + if out: + msg += "stdout: %s" % (out, ) + if err: + msg += "\n:stderr: %s" % (err, ) + module.fail_json(cmd=cmd, msg=msg, **kwargs) + + +def _ensure_virtualenv(module): + + venv_param = module.params['virtualenv'] + if venv_param is None: + return + + vbin = os.path.join(venv_param, 'bin') + activate = os.path.join(vbin, 'activate') + + if not os.path.exists(activate): + virtualenv = module.get_bin_path('virtualenv', True) + vcmd = '%s %s' % (virtualenv, venv_param) + vcmd = [virtualenv, venv_param] + rc, out_venv, err_venv = module.run_command(vcmd) + if rc != 0: + _fail(module, vcmd, out_venv, err_venv) + + os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) + os.environ["VIRTUAL_ENV"] = venv_param + + +def createcachetable_filter_output(line): + return "Already exists" not in line + + +def flush_filter_output(line): + return "Installed" in line and "Installed 0 object" not in line + + +def loaddata_filter_output(line): + return "Installed" in line and "Installed 0 object" not in line + + +def syncdb_filter_output(line): + return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line) + + +def migrate_filter_output(line): + return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line) + + +def collectstatic_filter_output(line): + return line and "0 static files" not in line + + +def main(): + command_allowed_param_map = dict( + cleanup=(), + createcachetable=('cache_table', 'database', ), + flush=('database', ), + loaddata=('database', 'fixtures', ), + syncdb=('database', ), + test=('failfast', 'testrunner', 'liveserver', 'apps', ), + validate=(), + migrate=('apps', 'skip', 'merge', 'database',), + collectstatic=('clear', 'link', ), + ) + + command_required_param_map = dict( + loaddata=('fixtures', ), + ) + + # forces --noinput on every command that needs it + noinput_commands = ( + 'flush', + 'syncdb', + 'migrate', + 'test', + 'collectstatic', + ) + + # These params are allowed for certain commands only + specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner') + + # These params are automatically added to the command if present + general_params = ('settings', 'pythonpath', 'database',) + specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link') + end_of_command_params = ('apps', 'cache_table', 'fixtures') + + module = AnsibleModule( + argument_spec=dict( + command=dict(default=None, required=True), + app_path=dict(default=None, required=True, type='path'), + settings=dict(default=None, required=False), + pythonpath=dict(default=None, required=False, aliases=['python_path']), + virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']), + + apps=dict(default=None, required=False), + cache_table=dict(default=None, required=False), + clear=dict(default=None, required=False, type='bool'), + database=dict(default=None, required=False), + failfast=dict(default='no', required=False, type='bool', aliases=['fail_fast']), + fixtures=dict(default=None, required=False), + liveserver=dict(default=None, required=False, aliases=['live_server']), + testrunner=dict(default=None, required=False, aliases=['test_runner']), + skip=dict(default=None, required=False, type='bool'), + merge=dict(default=None, required=False, type='bool'), + link=dict(default=None, required=False, type='bool'), + ), + ) + + command = module.params['command'] + app_path = module.params['app_path'] + virtualenv = module.params['virtualenv'] + + for param in specific_params: + value = module.params[param] + if param in specific_boolean_params: + value = module.boolean(value) + if value and param not in command_allowed_param_map[command]: + module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) + + for param in command_required_param_map.get(command, ()): + if not module.params[param]: + module.fail_json(msg='%s param is required for command=%s' % (param, command)) + + _ensure_virtualenv(module) + + cmd = "./manage.py %s" % (command, ) + + if command in noinput_commands: + cmd = '%s --noinput' % cmd + + for param in general_params: + if module.params[param]: + cmd = '%s --%s=%s' % (cmd, param, module.params[param]) + + for param in specific_boolean_params: + if module.boolean(module.params[param]): + cmd = '%s --%s' % (cmd, param) + + # these params always get tacked on the end of the command + for param in end_of_command_params: + if module.params[param]: + cmd = '%s %s' % (cmd, module.params[param]) + + rc, out, err = module.run_command(cmd, cwd=app_path) + if rc != 0: + if command == 'createcachetable' and 'table' in err and 'already exists' in err: + out = 'Already exists.' + else: + if "Unknown command:" in err: + _fail(module, cmd, err, "Unknown django command: %s" % command) + _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path) + + changed = False + + lines = out.split('\n') + filt = globals().get(command + "_filter_output", None) + if filt: + filtered_output = list(filter(filt, lines)) + if len(filtered_output): + changed = True + + module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv, + settings=module.params['settings'], pythonpath=module.params['pythonpath']) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/ejabberd_user.py b/plugins/modules/web_infrastructure/ejabberd_user.py new file mode 100644 index 0000000000..0388481275 --- /dev/null +++ b/plugins/modules/web_infrastructure/ejabberd_user.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013, Peter Sprygada +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ejabberd_user +author: "Peter Sprygada (@privateip)" +short_description: Manages users for ejabberd servers +requirements: + - ejabberd with mod_admin_extra +description: + - This module provides user management for ejabberd servers +options: + username: + description: + - the name of the user to manage + required: true + host: + description: + - the ejabberd host associated with this username + required: true + password: + description: + - the password to assign to the username + required: false + logging: + description: + - enables or disables the local syslog facility for this module + required: false + default: false + type: bool + state: + description: + - describe the desired state of the user to be managed + required: false + default: 'present' + choices: [ 'present', 'absent' ] +notes: + - Password parameter is required for state == present only + - Passwords must be stored in clear text for this release + - The ejabberd configuration file must include mod_admin_extra as a module. +''' +EXAMPLES = ''' +# Example playbook entries using the ejabberd_user module to manage users state. + +- name: create a user if it does not exist + ejabberd_user: + username: test + host: server + password: password + +- name: delete a user if it exists + ejabberd_user: + username: test + host: server + state: absent +''' + +import syslog + +from ansible.module_utils.basic import AnsibleModule + + +class EjabberdUserException(Exception): + """ Base exception for EjabberdUser class object """ + pass + + +class EjabberdUser(object): + """ This object represents a user resource for an ejabberd server. The + object manages user creation and deletion using ejabberdctl. The following + commands are currently supported: + * ejabberdctl register + * ejabberdctl deregister + """ + + def __init__(self, module): + self.module = module + self.logging = module.params.get('logging') + self.state = module.params.get('state') + self.host = module.params.get('host') + self.user = module.params.get('username') + self.pwd = module.params.get('password') + + @property + def changed(self): + """ This method will check the current user and see if the password has + changed. It will return True if the user does not match the supplied + credentials and False if it does not + """ + try: + options = [self.user, self.host, self.pwd] + (rc, out, err) = self.run_command('check_password', options) + except EjabberdUserException: + (rc, out, err) = (1, None, "required attribute(s) missing") + return rc + + @property + def exists(self): + """ This method will check to see if the supplied username exists for + host specified. If the user exists True is returned, otherwise False + is returned + """ + try: + options = [self.user, self.host] + (rc, out, err) = self.run_command('check_account', options) + except EjabberdUserException: + (rc, out, err) = (1, None, "required attribute(s) missing") + return not bool(int(rc)) + + def log(self, entry): + """ This method will log information to the local syslog facility """ + if self.logging: + syslog.openlog('ansible-%s' % self.module._name) + syslog.syslog(syslog.LOG_NOTICE, entry) + + def run_command(self, cmd, options): + """ This method will run the any command specified and return the + returns using the Ansible common module + """ + if not all(options): + raise EjabberdUserException + + cmd = 'ejabberdctl %s ' % cmd + cmd += " ".join(options) + self.log('command: %s' % cmd) + return self.module.run_command(cmd.split()) + + def update(self): + """ The update method will update the credentials for the user provided + """ + try: + options = [self.user, self.host, self.pwd] + (rc, out, err) = self.run_command('change_password', options) + except EjabberdUserException: + (rc, out, err) = (1, None, "required attribute(s) missing") + return (rc, out, err) + + def create(self): + """ The create method will create a new user on the host with the + password provided + """ + try: + options = [self.user, self.host, self.pwd] + (rc, out, err) = self.run_command('register', options) + except EjabberdUserException: + (rc, out, err) = (1, None, "required attribute(s) missing") + return (rc, out, err) + + def delete(self): + """ The delete method will delete the user from the host + """ + try: + options = [self.user, self.host] + (rc, out, err) = self.run_command('unregister', options) + except EjabberdUserException: + (rc, out, err) = (1, None, "required attribute(s) missing") + return (rc, out, err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(default=None, type='str'), + username=dict(default=None, type='str'), + password=dict(default=None, type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + logging=dict(default=False, type='bool') + ), + supports_check_mode=True + ) + + obj = EjabberdUser(module) + + rc = None + result = dict(changed=False) + + if obj.state == 'absent': + if obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.delete() + if rc != 0: + module.fail_json(msg=err, rc=rc) + + elif obj.state == 'present': + if not obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.create() + elif obj.changed: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.update() + if rc is not None and rc != 0: + module.fail_json(msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/gunicorn.py b/plugins/modules/web_infrastructure/gunicorn.py new file mode 100644 index 0000000000..c6c8807c73 --- /dev/null +++ b/plugins/modules/web_infrastructure/gunicorn.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Alejandro Gomez +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: gunicorn +short_description: Run gunicorn with various settings. +description: + - Starts gunicorn with the parameters specified. Common settings for gunicorn + configuration are supported. For additional configuration use a config file + See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more + options. It's recommended to always use the chdir option to avoid problems + with the location of the app. +requirements: [gunicorn] +author: + - "Alejandro Gomez (@agmezr)" +options: + app: + required: true + aliases: ['name'] + description: + - The app module. A name refers to a WSGI callable that should be found in the specified module. + venv: + aliases: ['virtualenv'] + description: + - 'Path to the virtualenv directory.' + config: + description: + - 'Path to the gunicorn configuration file.' + chdir: + description: + - 'Chdir to specified directory before apps loading.' + pid: + description: + - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp + pid file will be created to check a successful run of gunicorn.' + worker: + choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] + description: + - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.' + user: + description: + - 'Switch worker processes to run as this user.' +notes: + - If not specified on config file, a temporary error log will be created on /tmp dir. + Please make sure you have write access in /tmp dir. Not needed but will help you to + identify any problem with configuration. +''' + +EXAMPLES = ''' +- name: simple gunicorn run example + gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + +- name: run gunicorn on a virtualenv + gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + venv: '/workspace/example/venv' + +- name: run gunicorn with a config file + gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + conf: '/workspace/example/gunicorn.cfg' + +- name: run gunicorn as ansible user with specified pid and config file + gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + conf: '/workspace/example/gunicorn.cfg' + venv: '/workspace/example/venv' + pid: '/workspace/example/gunicorn.pid' + user: 'ansible' +''' + +RETURN = ''' +gunicorn: + description: process id of gunicorn + returned: changed + type: str + sample: "1234" +''' + +import os +import time + +# import ansible utils +from ansible.module_utils.basic import AnsibleModule + + +def search_existing_config(config, option): + ''' search in config file for specified option ''' + if config and os.path.isfile(config): + data_config = None + with open(config, 'r') as f: + for line in f: + if option in line: + return line + return None + + +def remove_tmp_file(file_path): + ''' remove temporary files ''' + if os.path.isfile(file_path): + os.remove(file_path) + + +def main(): + + # available gunicorn options on module + gunicorn_options = { + 'config': '-c', + 'chdir': '--chdir', + 'worker': '-k', + 'user': '-u', + } + + module = AnsibleModule( + argument_spec=dict( + app=dict(required=True, type='str', aliases=['name']), + venv=dict(required=False, type='path', default=None, aliases=['virtualenv']), + config=dict(required=False, default=None, type='path', aliases=['conf']), + chdir=dict(required=False, type='path', default=None), + pid=dict(required=False, type='path', default=None), + user=dict(required=False, type='str'), + worker=dict(required=False, + type='str', + choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] + ), + ) + ) + + # temporary files in case no option provided + tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log') + tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid') + + # remove temp file if exists + remove_tmp_file(tmp_pid_file) + remove_tmp_file(tmp_error_log) + + # obtain app name and venv + params = module.params + app = params['app'] + venv = params['venv'] + pid = params['pid'] + + # use venv path if exists + if venv: + gunicorn_command = "/".join((venv, 'bin', 'gunicorn')) + else: + gunicorn_command = 'gunicorn' + + # to daemonize the process + options = ["-D"] + + # fill options + for option in gunicorn_options: + param = params[option] + if param: + options.append(gunicorn_options[option]) + options.append(param) + + error_log = search_existing_config(params['config'], 'errorlog') + if not error_log: + # place error log somewhere in case of fail + options.append("--error-logfile") + options.append(tmp_error_log) + + pid_file = search_existing_config(params['config'], 'pid') + if not params['pid'] and not pid_file: + pid = tmp_pid_file + + # add option for pid file if not found on config file + if not pid_file: + options.append('--pid') + options.append(pid) + + # put args together + args = [gunicorn_command] + options + [app] + rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None) + + if not err: + # wait for gunicorn to dump to log + time.sleep(0.5) + if os.path.isfile(pid): + with open(pid, 'r') as f: + result = f.readline().strip() + + if not params['pid']: + os.remove(pid) + + module.exit_json(changed=True, pid=result, debug=" ".join(args)) + else: + # if user defined own error log, check that + if error_log: + error = 'Please check your {0}'.format(error_log.strip()) + else: + if os.path.isfile(tmp_error_log): + with open(tmp_error_log, 'r') as f: + error = f.read() + # delete tmp log + os.remove(tmp_error_log) + else: + error = "Log not found" + + module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err) + + else: + module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/htpasswd.py b/plugins/modules/web_infrastructure/htpasswd.py new file mode 100644 index 0000000000..370eb3db47 --- /dev/null +++ b/plugins/modules/web_infrastructure/htpasswd.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Nimbis Services, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: htpasswd +short_description: manage user files for basic authentication +description: + - Add and remove username/password entries in a password file using htpasswd. + - This is used by web servers such as Apache and Nginx for basic authentication. +options: + path: + required: true + aliases: [ dest, destfile ] + description: + - Path to the file that contains the usernames and passwords + name: + required: true + aliases: [ username ] + description: + - User name to add or remove + password: + required: false + description: + - Password associated with user. + - Must be specified if user does not exist yet. + crypt_scheme: + required: false + choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + default: "apr_md5_crypt" + description: + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx + state: + required: false + choices: [ present, absent ] + default: "present" + description: + - Whether the user entry should be present or not + create: + required: false + type: bool + default: "yes" + description: + - Used with C(state=present). If specified, the file will be created + if it does not already exist. If set to "no", will fail if the + file does not exist +notes: + - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." + - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." + - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." +requirements: [ passlib>=1.6 ] +author: "Ansible Core Team" +extends_documentation_fragment: files +''' + +EXAMPLES = """ +# Add a user to a password file and ensure permissions are set +- htpasswd: + path: /etc/nginx/passwdfile + name: janedoe + password: '9s36?;fyNp' + owner: root + group: www-data + mode: 0640 + +# Remove a user from a password file +- htpasswd: + path: /etc/apache2/passwdfile + name: foobar + state: absent + +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: + path: /etc/mail/passwords + name: alex + password: oedu2eGh + crypt_scheme: md5_crypt +""" + + +import os +import tempfile +import traceback +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +PASSLIB_IMP_ERR = None +try: + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext + import passlib +except ImportError: + PASSLIB_IMP_ERR = traceback.format_exc() + passlib_installed = False +else: + passlib_installed = True + +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + + +def create_missing_directories(dest): + destpath = os.path.dirname(dest) + if not os.path.exists(destpath): + os.makedirs(destpath) + + +def present(dest, username, password, crypt_scheme, create, check_mode): + """ Ensures user is present + + Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes=[crypt_scheme] + apache_hashes) + if not os.path.exists(dest): + if not create: + raise ValueError('Destination %s does not exist' % dest) + if check_mode: + return ("Create %s" % dest, True) + create_missing_directories(dest) + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Created %s and added %s" % (dest, username), True) + else: + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) + + found = None + if getattr(ht, 'check_password', None): + found = ht.check_password(username, password) + else: + found = ht.verify(username, password) + + if found: + return ("%s already present" % username, False) + else: + if not check_mode: + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Add/update %s" % username, True) + + +def absent(dest, username, check_mode): + """ Ensures user is absent + + Returns (msg, changed) """ + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False) + else: + ht = HtpasswdFile(dest) + + if username not in ht.users(): + return ("%s not present" % username, False) + else: + if not check_mode: + ht.delete(username) + ht.save() + return ("Remove %s" % username, True) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + arg_spec = dict( + path=dict(required=True, aliases=["dest", "destfile"]), + name=dict(required=True, aliases=["username"]), + password=dict(required=False, default=None, no_log=True), + crypt_scheme=dict(required=False, default="apr_md5_crypt"), + state=dict(required=False, default="present"), + create=dict(type='bool', default='yes'), + + ) + module = AnsibleModule(argument_spec=arg_spec, + add_file_common_args=True, + supports_check_mode=True) + + path = module.params['path'] + username = module.params['name'] + password = module.params['password'] + crypt_scheme = module.params['crypt_scheme'] + state = module.params['state'] + create = module.params['create'] + check_mode = module.check_mode + + if not passlib_installed: + module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR) + + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + try: + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: + try: + lines = f.readlines() + finally: + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [f.write(line) for line in lines if line.strip()] + finally: + f.close() + + try: + if state == 'present': + (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) + elif state == 'absent': + if not os.path.exists(path): + module.exit_json(msg="%s not present" % username, + warnings="%s does not exist" % path, changed=False) + (msg, changed) = absent(path, username, check_mode) + else: + module.fail_json(msg="Invalid state: %s" % state) + + check_file_attrs(module, changed, msg) + module.exit_json(msg=msg, changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/jboss.py b/plugins/modules/web_infrastructure/jboss.py new file mode 100644 index 0000000000..49ab52be17 --- /dev/null +++ b/plugins/modules/web_infrastructure/jboss.py @@ -0,0 +1,183 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Jeroen Hoekx +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +module: jboss +short_description: Deploy applications to JBoss +description: + - Deploy applications to JBoss standalone using the filesystem. +options: + deployment: + required: true + description: + - The name of the deployment. + type: str + src: + description: + - The remote path of the application ear or war to deploy. + - Required when I(state=present). + - Ignored when I(state=absent). + type: path + deploy_path: + default: /var/lib/jbossas/standalone/deployments + description: + - The location in the filesystem where the deployment scanner listens. + type: path + state: + choices: [ present, absent ] + default: "present" + description: + - Whether the application should be deployed or undeployed. + type: str +notes: + - The JBoss standalone deployment-scanner has to be enabled in standalone.xml + - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner. + Duration of waiting time depends on scan-interval parameter from standalone.xml. + - Ensure no identically named application is deployed through the JBoss CLI +seealso: +- name: WildFly reference + description: Complete reference of the WildFly documentation. + link: https://docs.wildfly.org +author: + - Jeroen Hoekx (@jhoekx) +''' + +EXAMPLES = r""" +- name: Deploy a hello world application to the default deploy_path + jboss: + src: /tmp/hello-1.0-SNAPSHOT.war + deployment: hello.war + state: present + +- name: Update the hello world application to the non-default deploy_path + jboss: + src: /tmp/hello-1.1-SNAPSHOT.war + deploy_path: /opt/wildfly/deployment + deployment: hello.war + state: present + +- name: Undeploy the hello world application from the default deploy_path + jboss: + deployment: hello.war + state: absent +""" + +RETURN = r""" # """ + +import os +import shutil +import time +from ansible.module_utils.basic import AnsibleModule + + +DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments' + + +def is_deployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment)) + + +def is_undeployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment)) + + +def is_failed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(type='path'), + deployment=dict(type='str', required=True), + deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH), + state=dict(type='str', choices=['absent', 'present'], default='present'), + ), + required_if=[('state', 'present', ('src',))], + supports_check_mode=True + ) + + result = dict(changed=False) + + src = module.params['src'] + deployment = module.params['deployment'] + deploy_path = module.params['deploy_path'] + state = module.params['state'] + + if not os.path.exists(deploy_path): + module.fail_json(msg="deploy_path does not exist.") + + if state == 'absent' and src: + module.warn('Parameter src is ignored when state=absent') + elif state == 'present' and not os.path.exists(src): + module.fail_json(msg='Source file %s does not exist.' % src) + + deployed = is_deployed(deploy_path, deployment) + + # === when check_mode === + if module.check_mode: + if state == 'present': + if not deployed: + result['changed'] = True + + elif deployed: + if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): + result['changed'] = True + + elif state == 'absent' and deployed: + result['changed'] = True + + module.exit_json(**result) + # ======================= + + if state == 'present' and not deployed: + if is_failed(deploy_path, deployment): + # Clean up old failed deployment + os.remove(os.path.join(deploy_path, "%s.failed" % deployment)) + + shutil.copyfile(src, os.path.join(deploy_path, deployment)) + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + if state == 'present' and deployed: + if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): + os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) + shutil.copyfile(src, os.path.join(deploy_path, deployment)) + deployed = False + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + if state == 'absent' and deployed: + os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) + while deployed: + deployed = not is_undeployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Undeploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/jenkins_job.py b/plugins/modules/web_infrastructure/jenkins_job.py new file mode 100644 index 0000000000..44cbd3ad6d --- /dev/null +++ b/plugins/modules/web_infrastructure/jenkins_job.py @@ -0,0 +1,365 @@ +#!/usr/bin/python +# +# Copyright: (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: jenkins_job +short_description: Manage jenkins jobs +description: + - Manage Jenkins jobs by using Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: "Sergio Millan Rodriguez (@sermilrod)" +options: + config: + description: + - config in XML format. + - Required if job does not yet exist. + - Mutually exclusive with C(enabled). + - Considered if C(state=present). + required: false + enabled: + description: + - Whether the job should be enabled or disabled. + - Mutually exclusive with C(config). + - Considered if C(state=present). + type: bool + required: false + name: + description: + - Name of the Jenkins job. + required: true + password: + description: + - Password to authenticate with the Jenkins server. + required: false + state: + description: + - Attribute that specifies if the job has to be created or deleted. + required: false + default: present + choices: ['present', 'absent'] + token: + description: + - API token used to authenticate alternatively to password. + required: false + url: + description: + - URL where the Jenkins server is accessible. + required: false + default: http://localhost:8080 + user: + description: + - User to authenticate with the Jenkins server. + required: false +''' + +EXAMPLES = ''' +# Create a jenkins job using basic authentication +- jenkins_job: + config: "{{ lookup('file', 'templates/test.xml') }}" + name: test + password: admin + url: http://localhost:8080 + user: admin + +# Create a jenkins job using the token +- jenkins_job: + config: "{{ lookup('template', 'templates/test.xml.j2') }}" + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + url: http://localhost:8080 + user: admin + +# Delete a jenkins job using basic authentication +- jenkins_job: + name: test + password: admin + state: absent + url: http://localhost:8080 + user: admin + +# Delete a jenkins job using the token +- jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + state: absent + url: http://localhost:8080 + user: admin + +# Disable a jenkins job using basic authentication +- jenkins_job: + name: test + password: admin + enabled: False + url: http://localhost:8080 + user: admin + +# Disable a jenkins job using the token +- jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + enabled: False + url: http://localhost:8080 + user: admin +''' + +RETURN = ''' +--- +name: + description: Name of the jenkins job. + returned: success + type: str + sample: test-job +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +enabled: + description: Whether the jenkins job is enabled or not. + returned: success + type: bool + sample: true +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: Url to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +''' + +import traceback +import xml.etree.ElementTree as ET + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class JenkinsJob: + + def __init__(self, module): + self.module = module + + self.config = module.params.get('config') + self.name = module.params.get('name') + self.password = module.params.get('password') + self.state = module.params.get('state') + self.enabled = module.params.get('enabled') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + 'state': self.state, + 'diff': { + 'before': "", + 'after': "" + } + } + + self.EXCL_STATE = "excluded state" + + def get_jenkins_connection(self): + try: + if (self.user and self.password): + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif (self.user and self.token): + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif (self.user and not (self.password or self.token)): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc()) + + def get_job_status(self): + try: + response = self.server.get_job_info(self.name) + if "color" not in response: + return self.EXCL_STATE + else: + return to_native(response['color']) + + except Exception as e: + self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc()) + + def job_exists(self): + try: + return bool(self.server.job_exists(self.name)) + except Exception as e: + self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def get_config(self): + return job_config_to_string(self.config) + + def get_current_config(self): + return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8')) + + def has_config_changed(self): + # config is optional, if not provided we keep the current config as is + if self.config is None: + return False + + config_file = self.get_config() + machine_file = self.get_current_config() + + self.result['diff']['after'] = config_file + self.result['diff']['before'] = machine_file + + if machine_file != config_file: + return True + return False + + def present_job(self): + if self.config is None and self.enabled is None: + self.module.fail_json(msg='one of the following params is required on state=present: config,enabled') + + if not self.job_exists(): + self.create_job() + else: + self.update_job() + + def has_state_changed(self, status): + # Keep in current state if enabled arg_spec is not given + if self.enabled is None: + return False + + if ((self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")): + return True + return False + + def switch_state(self): + if self.enabled is False: + self.server.disable_job(self.name) + else: + self.server.enable_job(self.name) + + def update_job(self): + try: + status = self.get_job_status() + + # Handle job config + if self.has_config_changed(): + self.result['changed'] = True + if not self.module.check_mode: + self.server.reconfig_job(self.name, self.get_config()) + + # Handle job disable/enable + elif (status != self.EXCL_STATE and self.has_state_changed(status)): + self.result['changed'] = True + if not self.module.check_mode: + self.switch_state() + + except Exception as e: + self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def create_job(self): + if self.config is None: + self.module.fail_json(msg='missing required param: config') + + self.result['changed'] = True + try: + config_file = self.get_config() + self.result['diff']['after'] = config_file + if not self.module.check_mode: + self.server.create_job(self.name, config_file) + except Exception as e: + self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def absent_job(self): + if self.job_exists(): + self.result['changed'] = True + self.result['diff']['before'] = self.get_current_config() + if not self.module.check_mode: + try: + self.server.delete_job(self.name) + except Exception as e: + self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + if self.job_exists(): + result['enabled'] = self.get_job_status() != "disabled" + else: + result['enabled'] = None + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def job_config_to_string(xml_str): + return ET.tostring(ET.fromstring(xml_str)).decode('ascii') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + config=dict(required=False), + name=dict(required=True), + password=dict(required=False, no_log=True), + state=dict(required=False, choices=['present', 'absent'], default="present"), + enabled=dict(required=False, type='bool'), + token=dict(required=False, no_log=True), + url=dict(required=False, default="http://localhost:8080"), + user=dict(required=False) + ), + mutually_exclusive=[ + ['password', 'token'], + ['config', 'enabled'], + ], + supports_check_mode=True, + ) + + test_dependencies(module) + jenkins_job = JenkinsJob(module) + + if module.params.get('state') == "present": + jenkins_job.present_job() + else: + jenkins_job.absent_job() + + result = jenkins_job.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/jenkins_job_facts.py b/plugins/modules/web_infrastructure/jenkins_job_facts.py new file mode 120000 index 0000000000..7a78b2faee --- /dev/null +++ b/plugins/modules/web_infrastructure/jenkins_job_facts.py @@ -0,0 +1 @@ +jenkins_job_info.py \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py new file mode 100644 index 0000000000..613f09de5a --- /dev/null +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# +# Copyright: (c) Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: jenkins_job_info +short_description: Get information about Jenkins jobs +description: + - This module can be used to query information about which Jenkins jobs which already exists. + - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change. +requirements: + - "python-jenkins >= 0.4.12" +options: + name: + description: + - Exact name of the Jenkins job to fetch information about. + glob: + description: + - A shell glob of Jenkins job names to fetch information about. + color: + description: + - Only fetch jobs with the given status color. + password: + description: + - Password to authenticate with the Jenkins server. + - This is a required parameter, if C(token) is not provided. + token: + description: + - API token used to authenticate with the Jenkins server. + - This is a required parameter, if C(password) is not provided. + url: + description: + - URL where the Jenkins server is accessible. + default: http://localhost:8080 + user: + description: + - User to authenticate with the Jenkins server. + validate_certs: + description: + - If set to C(False), the SSL certificates will not be validated. + - This should only set to C(False) used on personally controlled sites using self-signed certificates. + default: true + type: bool +author: + - "Chris St. Pierre (@stpierre)" +''' + +EXAMPLES = ''' +# Get all Jenkins jobs using basic auth +- jenkins_job_info: + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get all Jenkins jobs using the token +- jenkins_job_info: + user: admin + token: abcdefghijklmnop + register: my_jenkins_job_info + +# Get info about a single job using basic auth +- jenkins_job_info: + name: some-job-name + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about a single job in a folder using basic auth +- jenkins_job_info: + name: some-folder-name/some-job-name + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about jobs matching a shell glob using basic auth +- jenkins_job_info: + glob: some-job-* + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about all failing jobs using basic auth +- jenkins_job_info: + color: red + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about passing jobs matching a shell glob using basic auth +- jenkins_job_info: + name: some-job-* + color: blue + user: admin + password: hunter2 + register: my_jenkins_job_info + +- name: Get the info from custom URL with token and validate_certs=False + jenkins_job_info: + user: admin + token: 126df5c60d66c66e3b75b11104a16a8a + url: https://jenkins.example.com + validate_certs: False + register: my_jenkins_job_info +''' + +RETURN = ''' +--- +jobs: + description: All jobs found matching the specified criteria + returned: success + type: list + sample: + [ + { + "name": "test-job", + "fullname": "test-folder/test-job", + "url": "http://localhost:8080/job/test-job/", + "color": "blue" + }, + ] +''' + +import ssl +import fnmatch +import traceback + +JENKINS_IMP_ERR = None +try: + import jenkins + HAS_JENKINS = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + HAS_JENKINS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def get_jenkins_connection(module): + url = module.params["url"] + username = module.params.get("user") + password = module.params.get("password") + token = module.params.get("token") + + validate_certs = module.params.get('validate_certs') + if not validate_certs and hasattr(ssl, 'SSLContext'): + ssl._create_default_https_context = ssl._create_unverified_context + if validate_certs and not hasattr(ssl, 'SSLContext'): + module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9." + " Either update Python or use validate_certs=false.") + + if username and (password or token): + return jenkins.Jenkins(url, username, password or token) + elif username: + return jenkins.Jenkins(url, username) + else: + return jenkins.Jenkins(url) + + +def test_dependencies(module): + if not HAS_JENKINS: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def get_jobs(module): + jenkins_conn = get_jenkins_connection(module) + jobs = [] + if module.params.get("name"): + try: + job_info = jenkins_conn.get_job_info(module.params.get("name")) + except jenkins.NotFoundException: + pass + else: + jobs.append({ + "name": job_info["name"], + "fullname": job_info["fullName"], + "url": job_info["url"], + "color": job_info["color"] + }) + + else: + all_jobs = jenkins_conn.get_all_jobs() + if module.params.get("glob"): + jobs.extend( + j for j in all_jobs + if fnmatch.fnmatch(j["fullname"], module.params.get("glob"))) + else: + jobs = all_jobs + # python-jenkins includes the internal Jenkins class used for each job + # in its return value; we strip that out because the leading underscore + # (and the fact that it's not documented in the python-jenkins docs) + # indicates that it's not part of the dependable public interface. + for job in jobs: + if "_class" in job: + del job["_class"] + + if module.params.get("color"): + jobs = [j for j in jobs if j["color"] == module.params.get("color")] + + return jobs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(), + glob=dict(), + color=dict(), + password=dict(no_log=True), + token=dict(no_log=True), + url=dict(default="http://localhost:8080"), + user=dict(), + validate_certs=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ['password', 'token'], + ['name', 'glob'], + ], + required_one_of=[ + ['password', 'token'], + ], + supports_check_mode=True, + ) + if module._name == 'jenkins_job_facts': + module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'", version='2.13') + + test_dependencies(module) + jobs = list() + + try: + jobs = get_jobs(module) + except jenkins.JenkinsException as err: + module.fail_json( + msg='Unable to connect to Jenkins server, %s' % to_native(err), + exception=traceback.format_exc()) + + module.exit_json(changed=False, jobs=jobs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py new file mode 100644 index 0000000000..62d626f73f --- /dev/null +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -0,0 +1,790 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: jenkins_plugin +author: Jiri Tyr (@jtyr) +short_description: Add or remove Jenkins plugin +description: + - Ansible module which helps to manage Jenkins plugins. + +options: + group: + description: + - Name of the Jenkins group on the OS. + default: jenkins + jenkins_home: + description: + - Home directory of the Jenkins user. + default: /var/lib/jenkins + mode: + description: + - File mode applied on versioned plugins. + default: '0644' + name: + description: + - Plugin name. + required: yes + owner: + description: + - Name of the Jenkins user on the OS. + default: jenkins + state: + description: + - Desired plugin state. + - If the C(latest) is set, the check for new version will be performed + every time. This is suitable to keep the plugin up-to-date. + choices: [absent, present, pinned, unpinned, enabled, disabled, latest] + default: present + timeout: + description: + - Server connection timeout in secs. + default: 30 + updates_expiration: + description: + - Number of seconds after which a new copy of the I(update-center.json) + file is downloaded. This is used to avoid the need to download the + plugin to calculate its checksum when C(latest) is specified. + - Set it to C(0) if no cache file should be used. In that case, the + plugin file will always be downloaded to calculate its checksum when + C(latest) is specified. + default: 86400 + updates_url: + description: + - URL of the Update Centre. + - Used as the base URL to download the plugins and the + I(update-center.json) JSON file. + default: https://updates.jenkins.io + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + version: + description: + - Plugin version number. + - If this option is specified, all plugin dependencies must be installed + manually. + - It might take longer to verify that the correct version is installed. + This is especially true if a specific version number is specified. + - Quote the version to prevent the value to be interpreted as float. For + example if C(1.20) would be unquoted, it would become C(1.2). + with_dependencies: + description: + - Defines whether to install plugin dependencies. + - This option takes effect only if the I(version) is not defined. + type: bool + default: yes + +notes: + - Plugin installation should be run under root or the same user which owns + the plugin files on the disk. Only if the plugin is not installed yet and + no version is specified, the API installation is performed which requires + only the Web UI credentials. + - It's necessary to notify the handler or call the I(service) module to + restart the Jenkins service after a new plugin was installed. + - Pinning works only if the plugin is installed and Jenkins service was + successfully restarted after the plugin installation. + - It is not possible to run the module remotely by changing the I(url) + parameter to point to the Jenkins server. The module must be used on the + host where Jenkins runs as it needs direct access to the plugin files. + - "The C(params) option was removed in Ansible 2.5 due to circumventing Ansible's + option handling" +extends_documentation_fragment: + - url +''' + +EXAMPLES = ''' +- name: Install plugin + jenkins_plugin: + name: build-pipeline-plugin + +- name: Install plugin without its dependencies + jenkins_plugin: + name: build-pipeline-plugin + with_dependencies: no + +- name: Make sure the plugin is always up-to-date + jenkins_plugin: + name: token-macro + state: latest + +- name: Install specific version of the plugin + jenkins_plugin: + name: token-macro + version: "1.15" + +- name: Pin the plugin + jenkins_plugin: + name: token-macro + state: pinned + +- name: Unpin the plugin + jenkins_plugin: + name: token-macro + state: unpinned + +- name: Enable the plugin + jenkins_plugin: + name: token-macro + state: enabled + +- name: Disable the plugin + jenkins_plugin: + name: token-macro + state: disabled + +- name: Uninstall plugin + jenkins_plugin: + name: build-pipeline-plugin + state: absent + +# +# Example of how to authenticate +# +- name: Install plugin + jenkins_plugin: + name: build-pipeline-plugin + url_username: admin + url_password: p4ssw0rd + url: http://localhost:8888 + +# +# Example of a Play which handles Jenkins restarts during the state changes +# +- name: Jenkins Master play + hosts: jenkins-master + vars: + my_jenkins_plugins: + token-macro: + enabled: yes + build-pipeline-plugin: + version: "1.4.9" + pinned: no + enabled: yes + tasks: + - name: Install plugins without a specific version + jenkins_plugin: + name: "{{ item.key }}" + register: my_jenkins_plugin_unversioned + when: > + 'version' not in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Install plugins with a specific version + jenkins_plugin: + name: "{{ item.key }}" + version: "{{ item.value['version'] }}" + register: my_jenkins_plugin_versioned + when: > + 'version' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Initiate the fact + set_fact: + jenkins_restart_required: no + + - name: Check if restart is required by any of the versioned plugins + set_fact: + jenkins_restart_required: yes + when: item.changed + with_items: "{{ my_jenkins_plugin_versioned.results }}" + + - name: Check if restart is required by any of the unversioned plugins + set_fact: + jenkins_restart_required: yes + when: item.changed + with_items: "{{ my_jenkins_plugin_unversioned.results }}" + + - name: Restart Jenkins if required + service: + name: jenkins + state: restarted + when: jenkins_restart_required + + - name: Wait for Jenkins to start up + uri: + url: http://localhost:8080 + status_code: 200 + timeout: 5 + register: jenkins_service_status + # Keep trying for 5 mins in 5 sec intervals + retries: 60 + delay: 5 + until: > + 'status' in jenkins_service_status and + jenkins_service_status['status'] == 200 + when: jenkins_restart_required + + - name: Reset the fact + set_fact: + jenkins_restart_required: no + when: jenkins_restart_required + + - name: Plugin pinning + jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}" + when: > + 'pinned' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Plugin enabling + jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}" + when: > + 'enabled' in item.value + with_dict: "{{ my_jenkins_plugins }}" +''' + +RETURN = ''' +plugin: + description: plugin name + returned: success + type: str + sample: build-pipeline-plugin +state: + description: state of the target, after execution + returned: success + type: str + sample: "present" +''' + +from ansible.module_utils.basic import AnsibleModule, to_bytes +from ansible.module_utils.six.moves import http_cookiejar as cookiejar +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url, url_argument_spec +from ansible.module_utils._text import to_native, text_type, binary_type +import base64 +import hashlib +import json +import os +import tempfile +import time + + +class JenkinsPlugin(object): + def __init__(self, module): + # To be able to call fail_json + self.module = module + + # Shortcuts for the params + self.params = self.module.params + self.url = self.params['url'] + self.timeout = self.params['timeout'] + + # Crumb + self.crumb = {} + # Cookie jar for crumb session + self.cookies = None + + if self._csrf_enabled(): + self.cookies = cookiejar.LWPCookieJar() + self.crumb = self._get_crumb() + + # Get list of installed plugins + self._get_installed_plugins() + + def _csrf_enabled(self): + csrf_data = self._get_json_data( + "%s/%s" % (self.url, "api/json"), 'CSRF') + + if 'useCrumbs' not in csrf_data: + self.module.fail_json( + msg="Required fields not found in the Crumbs response.", + details=csrf_data) + + return csrf_data['useCrumbs'] + + def _get_json_data(self, url, what, **kwargs): + # Get the JSON data + r = self._get_url_data(url, what, **kwargs) + + # Parse the JSON data + try: + json_data = json.loads(to_native(r.read())) + except Exception as e: + self.module.fail_json( + msg="Cannot parse %s JSON data." % what, + details=to_native(e)) + + return json_data + + def _get_url_data( + self, url, what=None, msg_status=None, msg_exception=None, + **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + # Get the URL data + try: + response, info = fetch_url( + self.module, url, timeout=self.timeout, cookies=self.cookies, + headers=self.crumb, **kwargs) + + if info['status'] != 200: + self.module.fail_json(msg=msg_status, details=info['msg']) + except Exception as e: + self.module.fail_json(msg=msg_exception, details=to_native(e)) + + return response + + def _get_crumb(self): + crumb_data = self._get_json_data( + "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb') + + if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: + ret = { + crumb_data['crumbRequestField']: crumb_data['crumb'] + } + else: + self.module.fail_json( + msg="Required fields not found in the Crum response.", + details=crumb_data) + + return ret + + def _get_installed_plugins(self): + plugins_data = self._get_json_data( + "%s/%s" % (self.url, "pluginManager/api/json?depth=1"), + 'list of plugins') + + # Check if we got valid data + if 'plugins' not in plugins_data: + self.module.fail_json(msg="No valid plugin data found.") + + # Create final list of installed/pined plugins + self.is_installed = False + self.is_pinned = False + self.is_enabled = False + + for p in plugins_data['plugins']: + if p['shortName'] == self.params['name']: + self.is_installed = True + + if p['pinned']: + self.is_pinned = True + + if p['enabled']: + self.is_enabled = True + + break + + def install(self): + changed = False + plugin_file = ( + '%s/plugins/%s.jpi' % ( + self.params['jenkins_home'], + self.params['name'])) + + if not self.is_installed and self.params['version'] in [None, 'latest']: + if not self.module.check_mode: + # Install the plugin (with dependencies) + install_script = ( + 'd = Jenkins.instance.updateCenter.getPlugin("%s")' + '.deploy(); d.get();' % self.params['name']) + + if self.params['with_dependencies']: + install_script = ( + 'Jenkins.instance.updateCenter.getPlugin("%s")' + '.getNeededDependencies().each{it.deploy()}; %s' % ( + self.params['name'], install_script)) + + script_data = { + 'script': install_script + } + data = urlencode(script_data) + + # Send the installation request + r = self._get_url_data( + "%s/scriptText" % self.url, + msg_status="Cannot install plugin.", + msg_exception="Plugin installation has failed.", + data=data) + + hpi_file = '%s/plugins/%s.hpi' % ( + self.params['jenkins_home'], + self.params['name']) + + if os.path.isfile(hpi_file): + os.remove(hpi_file) + + changed = True + else: + # Check if the plugin directory exists + if not os.path.isdir(self.params['jenkins_home']): + self.module.fail_json( + msg="Jenkins home directory doesn't exist.") + + md5sum_old = None + if os.path.isfile(plugin_file): + # Make the checksum of the currently installed plugin + with open(plugin_file, 'rb') as md5_plugin_fh: + md5_plugin_content = md5_plugin_fh.read() + md5sum_old = hashlib.md5(md5_plugin_content).hexdigest() + + if self.params['version'] in [None, 'latest']: + # Take latest version + plugin_url = ( + "%s/latest/%s.hpi" % ( + self.params['updates_url'], + self.params['name'])) + else: + # Take specific version + plugin_url = ( + "{0}/download/plugins/" + "{1}/{2}/{1}.hpi".format( + self.params['updates_url'], + self.params['name'], + self.params['version'])) + + if ( + self.params['updates_expiration'] == 0 or + self.params['version'] not in [None, 'latest'] or + md5sum_old is None): + + # Download the plugin file directly + r = self._download_plugin(plugin_url) + + # Write downloaded plugin into file if checksums don't match + if md5sum_old is None: + # No previously installed plugin + if not self.module.check_mode: + self._write_file(plugin_file, r) + + changed = True + else: + # Get data for the MD5 + data = r.read() + + # Make new checksum + md5sum_new = hashlib.md5(data).hexdigest() + + # If the checksum is different from the currently installed + # plugin, store the new plugin + if md5sum_old != md5sum_new: + if not self.module.check_mode: + self._write_file(plugin_file, data) + + changed = True + elif self.params['version'] == 'latest': + # Check for update from the updates JSON file + plugin_data = self._download_updates() + + try: + with open(plugin_file, 'rb') as sha1_plugin_fh: + sha1_plugin_content = sha1_plugin_fh.read() + sha1_old = hashlib.sha1(sha1_plugin_content) + except Exception as e: + self.module.fail_json( + msg="Cannot calculate SHA1 of the old plugin.", + details=to_native(e)) + + sha1sum_old = base64.b64encode(sha1_old.digest()) + + # If the latest version changed, download it + if sha1sum_old != to_bytes(plugin_data['sha1']): + if not self.module.check_mode: + r = self._download_plugin(plugin_url) + self._write_file(plugin_file, r) + + changed = True + + # Change file attributes if needed + if os.path.isfile(plugin_file): + params = { + 'dest': plugin_file + } + params.update(self.params) + file_args = self.module.load_file_common_arguments(params) + + if not self.module.check_mode: + # Not sure how to run this in the check mode + changed = self.module.set_fs_attributes_if_different( + file_args, changed) + else: + # See the comment above + changed = True + + return changed + + def _download_updates(self): + updates_filename = 'jenkins-plugin-cache.json' + updates_dir = os.path.expanduser('~/.ansible/tmp') + updates_file = "%s/%s" % (updates_dir, updates_filename) + download_updates = True + + # Check if we need to download new updates file + if os.path.isfile(updates_file): + # Get timestamp when the file was changed last time + ts_file = os.stat(updates_file).st_mtime + ts_now = time.time() + + if ts_now - ts_file < self.params['updates_expiration']: + download_updates = False + + updates_file_orig = updates_file + + # Download the updates file if needed + if download_updates: + url = "%s/update-center.json" % self.params['updates_url'] + + # Get the data + r = self._get_url_data( + url, + msg_status="Remote updates not found.", + msg_exception="Updates download failed.") + + # Write the updates file + update_fd, updates_file = tempfile.mkstemp() + os.write(update_fd, r.read()) + + try: + os.close(update_fd) + except IOError as e: + self.module.fail_json( + msg="Cannot close the tmp updates file %s." % updates_file, + details=to_native(e)) + + # Open the updates file + try: + f = open(updates_file, encoding='utf-8') + except IOError as e: + self.module.fail_json( + msg="Cannot open temporal updates file.", + details=to_native(e)) + + i = 0 + for line in f: + # Read only the second line + if i == 1: + try: + data = json.loads(line) + except Exception as e: + self.module.fail_json( + msg="Cannot load JSON data from the tmp updates file.", + details=to_native(e)) + + break + + i += 1 + + # Move the updates file to the right place if we could read it + if download_updates: + # Make sure the destination directory exists + if not os.path.isdir(updates_dir): + try: + os.makedirs(updates_dir, int('0700', 8)) + except OSError as e: + self.module.fail_json( + msg="Cannot create temporal directory.", + details=to_native(e)) + + self.module.atomic_move(updates_file, updates_file_orig) + + # Check if we have the plugin data available + if 'plugins' not in data or self.params['name'] not in data['plugins']: + self.module.fail_json( + msg="Cannot find plugin data in the updates file.") + + return data['plugins'][self.params['name']] + + def _download_plugin(self, plugin_url): + # Download the plugin + r = self._get_url_data( + plugin_url, + msg_status="Plugin not found.", + msg_exception="Plugin download failed.") + + return r + + def _write_file(self, f, data): + # Store the plugin into a temp file and then move it + tmp_f_fd, tmp_f = tempfile.mkstemp() + + if isinstance(data, (text_type, binary_type)): + os.write(tmp_f_fd, data) + else: + os.write(tmp_f_fd, data.read()) + + try: + os.close(tmp_f_fd) + except IOError as e: + self.module.fail_json( + msg='Cannot close the temporal plugin file %s.' % tmp_f, + details=to_native(e)) + + # Move the file onto the right place + self.module.atomic_move(tmp_f, f) + + def uninstall(self): + changed = False + + # Perform the action + if self.is_installed: + if not self.module.check_mode: + self._pm_query('doUninstall', 'Uninstallation') + + changed = True + + return changed + + def pin(self): + return self._pinning('pin') + + def unpin(self): + return self._pinning('unpin') + + def _pinning(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'pin' and not self.is_pinned or + action == 'unpin' and self.is_pinned): + + # Perform the action + if not self.module.check_mode: + self._pm_query(action, "%sning" % action.capitalize()) + + changed = True + + return changed + + def enable(self): + return self._enabling('enable') + + def disable(self): + return self._enabling('disable') + + def _enabling(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'enable' and not self.is_enabled or + action == 'disable' and self.is_enabled): + + # Perform the action + if not self.module.check_mode: + self._pm_query( + "make%sd" % action.capitalize(), + "%sing" % action[:-1].capitalize()) + + changed = True + + return changed + + def _pm_query(self, action, msg): + url = "%s/pluginManager/plugin/%s/%s" % ( + self.params['url'], self.params['name'], action) + + # Send the request + self._get_url_data( + url, + msg_status="Plugin not found. %s" % url, + msg_exception="%s has failed." % msg) + + +def main(): + # Module arguments + argument_spec = url_argument_spec() + argument_spec.update( + group=dict(default='jenkins'), + jenkins_home=dict(default='/var/lib/jenkins'), + mode=dict(default='0644', type='raw'), + name=dict(required=True), + owner=dict(default='jenkins'), + params=dict(type='dict'), + state=dict( + choices=[ + 'present', + 'absent', + 'pinned', + 'unpinned', + 'enabled', + 'disabled', + 'latest'], + default='present'), + timeout=dict(default=30, type="int"), + updates_expiration=dict(default=86400, type="int"), + updates_url=dict(default='https://updates.jenkins.io'), + url=dict(default='http://localhost:8080'), + url_password=dict(no_log=True), + version=dict(), + with_dependencies=dict(default=True, type='bool'), + ) + # Module settings + module = AnsibleModule( + argument_spec=argument_spec, + add_file_common_args=True, + supports_check_mode=True, + ) + + # Params was removed + # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html + if module.params['params']: + module.fail_json(msg="The params option to jenkins_plugin was removed in Ansible 2.5 since it circumvents Ansible's option handling") + + # Force basic authentication + module.params['force_basic_auth'] = True + + # Convert timeout to float + try: + module.params['timeout'] = float(module.params['timeout']) + except ValueError as e: + module.fail_json( + msg='Cannot convert %s to float.' % module.params['timeout'], + details=to_native(e)) + + # Set version to latest if state is latest + if module.params['state'] == 'latest': + module.params['state'] = 'present' + module.params['version'] = 'latest' + + # Create some shortcuts + name = module.params['name'] + state = module.params['state'] + + # Initial change state of the task + changed = False + + # Instantiate the JenkinsPlugin object + jp = JenkinsPlugin(module) + + # Perform action depending on the requested state + if state == 'present': + changed = jp.install() + elif state == 'absent': + changed = jp.uninstall() + elif state == 'pinned': + changed = jp.pin() + elif state == 'unpinned': + changed = jp.unpin() + elif state == 'enabled': + changed = jp.enable() + elif state == 'disabled': + changed = jp.disable() + + # Print status of the change + module.exit_json(changed=changed, plugin=name, state=state) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/jenkins_script.py b/plugins/modules/web_infrastructure/jenkins_script.py new file mode 100644 index 0000000000..a43b79e9e1 --- /dev/null +++ b/plugins/modules/web_infrastructure/jenkins_script.py @@ -0,0 +1,196 @@ +#!/usr/bin/python + +# encoding: utf-8 + +# (c) 2016, James Hogarth +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +author: James Hogarth (@hogarthj) +module: jenkins_script +short_description: Executes a groovy script in the jenkins instance +description: + - The C(jenkins_script) module takes a script plus a dict of values + to use within the script and returns the result of the script being run. + +options: + script: + description: + - The groovy script to be executed. + This gets passed as a string Template if args is defined. + required: true + url: + description: + - The jenkins server to execute the script against. The default is a local + jenkins instance that is not being proxied through a webserver. + default: http://localhost:8080 + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + This should only set to C(no) used on personally controlled sites + using self-signed certificates as it avoids verifying the source site. + type: bool + default: 'yes' + user: + description: + - The username to connect to the jenkins server with. + password: + description: + - The password to connect to the jenkins server with. + timeout: + description: + - The request timeout in seconds + default: 10 + args: + description: + - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings). + +notes: + - Since the script can do anything this does not report on changes. + Knowing the script is being run it's important to set changed_when + for the ansible output to be clear on any alterations made. + +''' + +EXAMPLES = ''' +- name: Obtaining a list of plugins + jenkins_script: + script: 'println(Jenkins.instance.pluginManager.plugins)' + user: admin + password: admin + +- name: Setting master using a variable to hold a more complicate script + set_fact: + setmaster_mode: | + import jenkins.model.* + instance = Jenkins.getInstance() + instance.setMode(${jenkins_mode}) + instance.save() + +- name: use the variable as the script + jenkins_script: + script: "{{ setmaster_mode }}" + args: + jenkins_mode: Node.Mode.EXCLUSIVE + +- name: interacting with an untrusted HTTPS connection + jenkins_script: + script: "println(Jenkins.instance.pluginManager.plugins)" + user: admin + password: admin + url: https://localhost + validate_certs: no +''' + +RETURN = ''' +output: + description: Result of script + returned: success + type: str + sample: 'Result: true' +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import http_cookiejar as cookiejar +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_native + + +def is_csrf_protection_enabled(module): + resp, info = fetch_url(module, + module.params['url'] + '/api/json', + timeout=module.params['timeout'], + method='GET') + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + content = to_native(resp.read()) + return json.loads(content).get('useCrumbs', False) + + +def get_crumb(module, cookies): + resp, info = fetch_url(module, + module.params['url'] + '/crumbIssuer/api/json', + method='GET', + timeout=module.params['timeout'], + cookies=cookies) + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + content = to_native(resp.read()) + return json.loads(content) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + script=dict(required=True, type="str"), + url=dict(required=False, type="str", default="http://localhost:8080"), + validate_certs=dict(required=False, type="bool", default=True), + user=dict(required=False, type="str", default=None), + password=dict(required=False, no_log=True, type="str", default=None), + timeout=dict(required=False, type="int", default=10), + args=dict(required=False, type="dict", default=None) + ) + ) + + if module.params['user'] is not None: + if module.params['password'] is None: + module.fail_json(msg="password required when user provided", output='') + module.params['url_username'] = module.params['user'] + module.params['url_password'] = module.params['password'] + module.params['force_basic_auth'] = True + + if module.params['args'] is not None: + from string import Template + try: + script_contents = Template(module.params['script']).substitute(module.params['args']) + except KeyError as err: + module.fail_json(msg="Error with templating variable: %s" % err, output='') + else: + script_contents = module.params['script'] + + headers = {} + cookies = None + if is_csrf_protection_enabled(module): + cookies = cookiejar.LWPCookieJar() + crumb = get_crumb(module, cookies) + headers = {crumb['crumbRequestField']: crumb['crumb']} + + resp, info = fetch_url(module, + module.params['url'] + "/scriptText", + data=urlencode({'script': script_contents}), + headers=headers, + method="POST", + timeout=module.params['timeout'], + cookies=cookies) + + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + result = to_native(resp.read()) + + if 'Exception:' in result and 'at java.lang.Thread' in result: + module.fail_json(msg="script failed with stacktrace:\n " + result, output='') + + module.exit_json( + output=result, + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py new file mode 100644 index 0000000000..f13ae84c89 --- /dev/null +++ b/plugins/modules/web_infrastructure/jira.py @@ -0,0 +1,443 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Steve Smith +# Atlassian open-source approval reference OSR-76. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: jira +short_description: create and modify issues in a JIRA instance +description: + - Create and modify issues in a JIRA instance. + +options: + uri: + required: true + description: + - Base URI for the JIRA instance. + + operation: + required: true + aliases: [ command ] + choices: [ create, comment, edit, fetch, transition , link ] + description: + - The operation to perform. + + username: + required: true + description: + - The username to log-in with. + + password: + required: true + description: + - The password to log-in with. + + project: + required: false + description: + - The project for this operation. Required for issue creation. + + summary: + required: false + description: + - The issue summary, where appropriate. + + description: + required: false + description: + - The issue description, where appropriate. + + issuetype: + required: false + description: + - The issue type, for issue creation. + + issue: + required: false + description: + - An existing issue key to operate on. + + comment: + required: false + description: + - The comment text to add. + + status: + required: false + description: + - The desired status; only relevant for the transition operation. + + assignee: + required: false + description: + - Sets the assignee on create or transition operations. Note not all transitions will allow this. + + linktype: + required: false + description: + - Set type of link, when action 'link' selected. + + inwardissue: + required: false + description: + - Set issue from which link will be created. + + outwardissue: + required: false + description: + - Set issue to which link will be created. + + fields: + required: false + description: + - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API + (possibly after merging with other required data, as when passed to create). See examples for more information, + and the JIRA REST API for the structure required for various fields. + + timeout: + required: false + description: + - Set timeout, in seconds, on requests to JIRA API. + default: 10 + + validate_certs: + required: false + description: + - Require valid SSL certificates (set to `false` if you'd like to use self-signed certificates) + default: true + type: bool + +notes: + - "Currently this only works with basic-auth." + +author: "Steve Smith (@tarka)" +''' + +EXAMPLES = """ +# Create a new issue and add a comment to it: +- name: Create an issue + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Example Issue + description: Created using Ansible + issuetype: Task + register: issue + +- name: Comment on issue + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + +# Assign an existing issue using edit +- name: Assign an issue using free-form fields + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key}}' + operation: edit + assignee: ssmith + +# Create an issue with an existing assignee +- name: Create an assigned issue + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Assigned issue + description: Created and assigned using Ansible + issuetype: Task + assignee: ssmith + +# Edit an issue +- name: Set the labels on an issue using free-form fields + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: edit + args: + fields: + labels: + - autocreated + - ansible + +# Retrieve metadata for an issue and use it to create an account +- name: Get an issue + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: fetch + issue: ANS-63 + register: issue + +- name: Create a unix account for the reporter + become: true + user: + name: '{{ issue.meta.fields.creator.name }}' + comment: '{{ issue.meta.fields.creator.displayName }}' + +# You can get list of valid linktypes at /rest/api/2/issueLinkType +# url of your jira installation. +- name: Create link from HSP-1 to MKY-1 + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + operation: link + linktype: Relates + inwardissue: HSP-1 + outwardissue: MKY-1 + +# Transition an issue by target status +- name: Close the issue + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: transition + status: Done +""" + +import base64 +import json +import sys +from ansible.module_utils._text import to_text, to_bytes + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def request(url, user, passwd, timeout, data=None, method=None): + if data: + data = json.dumps(data) + + # NOTE: fetch_url uses a password manager, which follows the + # standard request-then-challenge basic-auth semantics. However as + # JIRA allows some unauthorised operations it doesn't necessarily + # send the challenge, so the request occurs as the anonymous user, + # resulting in unexpected results. To work around this we manually + # inject the basic-auth header up-front to ensure that JIRA treats + # the requests as authorized for this user. + auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict'))) + + response, info = fetch_url(module, url, data=data, method=method, timeout=timeout, + headers={'Content-Type': 'application/json', + 'Authorization': "Basic %s" % auth}) + + if info['status'] not in (200, 201, 204): + module.fail_json(msg=info['msg']) + + body = response.read() + + if body: + return json.loads(to_text(body, errors='surrogate_or_strict')) + else: + return {} + + +def post(url, user, passwd, timeout, data): + return request(url, user, passwd, timeout, data=data, method='POST') + + +def put(url, user, passwd, timeout, data): + return request(url, user, passwd, timeout, data=data, method='PUT') + + +def get(url, user, passwd, timeout): + return request(url, user, passwd, timeout) + + +def create(restbase, user, passwd, params): + createfields = { + 'project': {'key': params['project']}, + 'summary': params['summary'], + 'issuetype': {'name': params['issuetype']}} + + if params['description']: + createfields['description'] = params['description'] + + # Merge in any additional or overridden fields + if params['fields']: + createfields.update(params['fields']) + + data = {'fields': createfields} + + url = restbase + '/issue/' + + ret = post(url, user, passwd, params['timeout'], data) + + return ret + + +def comment(restbase, user, passwd, params): + data = { + 'body': params['comment'] + } + + url = restbase + '/issue/' + params['issue'] + '/comment' + + ret = post(url, user, passwd, params['timeout'], data) + + return ret + + +def edit(restbase, user, passwd, params): + data = { + 'fields': params['fields'] + } + + url = restbase + '/issue/' + params['issue'] + + ret = put(url, user, passwd, params['timeout'], data) + + return ret + + +def fetch(restbase, user, passwd, params): + url = restbase + '/issue/' + params['issue'] + ret = get(url, user, passwd, params['timeout']) + return ret + + +def transition(restbase, user, passwd, params): + # Find the transition id + turl = restbase + '/issue/' + params['issue'] + "/transitions" + tmeta = get(turl, user, passwd, params['timeout']) + + target = params['status'] + tid = None + for t in tmeta['transitions']: + if t['name'] == target: + tid = t['id'] + break + + if not tid: + raise ValueError("Failed find valid transition for '%s'" % target) + + # Perform it + url = restbase + '/issue/' + params['issue'] + "/transitions" + data = {'transition': {"id": tid}, + 'fields': params['fields']} + + ret = post(url, user, passwd, params['timeout'], data) + + return ret + + +def link(restbase, user, passwd, params): + data = { + 'type': {'name': params['linktype']}, + 'inwardIssue': {'key': params['inwardissue']}, + 'outwardIssue': {'key': params['outwardissue']}, + } + + url = restbase + '/issueLink/' + + ret = post(url, user, passwd, params['timeout'], data) + + return ret + + +# Some parameters are required depending on the operation: +OP_REQUIRED = dict(create=['project', 'issuetype', 'summary'], + comment=['issue', 'comment'], + edit=[], + fetch=['issue'], + transition=['status'], + link=['linktype', 'inwardissue', 'outwardissue']) + + +def main(): + + global module + module = AnsibleModule( + argument_spec=dict( + uri=dict(required=True), + operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition', 'link'], + aliases=['command'], required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + project=dict(), + summary=dict(), + description=dict(), + issuetype=dict(), + issue=dict(aliases=['ticket']), + comment=dict(), + status=dict(), + assignee=dict(), + fields=dict(default={}, type='dict'), + linktype=dict(), + inwardissue=dict(), + outwardissue=dict(), + timeout=dict(type='float', default=10), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=False + ) + + op = module.params['operation'] + + # Check we have the necessary per-operation parameters + missing = [] + for parm in OP_REQUIRED[op]: + if not module.params[parm]: + missing.append(parm) + if missing: + module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing))) + + # Handle rest of parameters + uri = module.params['uri'] + user = module.params['username'] + passwd = module.params['password'] + if module.params['assignee']: + module.params['fields']['assignee'] = {'name': module.params['assignee']} + + if not uri.endswith('/'): + uri = uri + '/' + restbase = uri + 'rest/api/2' + + # Dispatch + try: + + # Lookup the corresponding method for this operation. This is + # safe as the AnsibleModule should remove any unknown operations. + thismod = sys.modules[__name__] + method = getattr(thismod, op) + + ret = method(restbase, user, passwd, module.params) + + except Exception as e: + return module.fail_json(msg=e.message) + + module.exit_json(changed=True, meta=ret) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/nginx_status_facts.py b/plugins/modules/web_infrastructure/nginx_status_facts.py new file mode 100644 index 0000000000..88c374ea07 --- /dev/null +++ b/plugins/modules/web_infrastructure/nginx_status_facts.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: nginx_status_facts +deprecated: + removed_in: '2.13' + why: Deprecated in favour of C(_info) module. + alternative: Use M(nginx_status_info) instead. +short_description: Retrieve nginx status facts. +description: + - Gathers facts from nginx from an URL having C(stub_status) enabled. +author: "René Moser (@resmo)" +options: + url: + description: + - URL of the nginx status. + required: true + timeout: + description: + - HTTP connection timeout in seconds. + required: false + default: 10 + +notes: + - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information. +''' + +EXAMPLES = ''' +# Gather status facts from nginx on localhost +- name: get current http stats + nginx_status_facts: + url: http://localhost/nginx_status + +# Gather status facts from nginx on localhost with a custom timeout of 20 seconds +- name: get current http stats + nginx_status_facts: + url: http://localhost/nginx_status + timeout: 20 +''' + +RETURN = ''' +--- +nginx_status_facts.active_connections: + description: Active connections. + returned: success + type: int + sample: 2340 +nginx_status_facts.accepts: + description: The total number of accepted client connections. + returned: success + type: int + sample: 81769947 +nginx_status_facts.handled: + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached. + returned: success + type: int + sample: 81769947 +nginx_status_facts.requests: + description: The total number of client requests. + returned: success + type: int + sample: 144332345 +nginx_status_facts.reading: + description: The current number of connections where nginx is reading the request header. + returned: success + type: int + sample: 0 +nginx_status_facts.writing: + description: The current number of connections where nginx is writing the response back to the client. + returned: success + type: int + sample: 241 +nginx_status_facts.waiting: + description: The current number of idle client connections waiting for a request. + returned: success + type: int + sample: 2092 +nginx_status_facts.data: + description: HTTP response as is. + returned: success + type: str + sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text + + +class NginxStatusFacts(object): + + def __init__(self): + self.url = module.params.get('url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'nginx_status_facts': { + 'active_connections': None, + 'accepts': None, + 'handled': None, + 'requests': None, + 'reading': None, + 'writing': None, + 'waiting': None, + 'data': None, + } + } + (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout) + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout)) + + data = to_text(response.read(), errors='surrogate_or_strict') + if not data: + return result + + result['nginx_status_facts']['data'] = data + expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \ + r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)' + match = re.match(expr, data, re.S) + if match: + result['nginx_status_facts']['active_connections'] = int(match.group(1)) + result['nginx_status_facts']['accepts'] = int(match.group(2)) + result['nginx_status_facts']['handled'] = int(match.group(3)) + result['nginx_status_facts']['requests'] = int(match.group(4)) + result['nginx_status_facts']['reading'] = int(match.group(5)) + result['nginx_status_facts']['writing'] = int(match.group(6)) + result['nginx_status_facts']['waiting'] = int(match.group(7)) + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + url=dict(required=True), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + nginx_status_facts = NginxStatusFacts().run() + result = dict(changed=False, ansible_facts=nginx_status_facts) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/nginx_status_info.py b/plugins/modules/web_infrastructure/nginx_status_info.py new file mode 100644 index 0000000000..93f6df58f7 --- /dev/null +++ b/plugins/modules/web_infrastructure/nginx_status_info.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: nginx_status_info +short_description: Retrieve information on nginx status. +description: + - Gathers information from nginx from an URL having C(stub_status) enabled. +author: "René Moser (@resmo)" +options: + url: + type: str + description: + - URL of the nginx status. + required: true + timeout: + type: int + description: + - HTTP connection timeout in seconds. + required: false + default: 10 + +notes: + - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information. +''' + +EXAMPLES = r''' +# Gather status info from nginx on localhost +- name: get current http stats + nginx_status_info: + url: http://localhost/nginx_status + register: result + +# Gather status info from nginx on localhost with a custom timeout of 20 seconds +- name: get current http stats + nginx_status_info: + url: http://localhost/nginx_status + timeout: 20 + register: result +''' + +RETURN = r''' +--- +active_connections: + description: Active connections. + returned: success + type: int + sample: 2340 +accepts: + description: The total number of accepted client connections. + returned: success + type: int + sample: 81769947 +handled: + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached. + returned: success + type: int + sample: 81769947 +requests: + description: The total number of client requests. + returned: success + type: int + sample: 144332345 +reading: + description: The current number of connections where nginx is reading the request header. + returned: success + type: int + sample: 0 +writing: + description: The current number of connections where nginx is writing the response back to the client. + returned: success + type: int + sample: 241 +waiting: + description: The current number of idle client connections waiting for a request. + returned: success + type: int + sample: 2092 +data: + description: HTTP response as is. + returned: success + type: str + sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_text + + +class NginxStatusInfo(object): + + def __init__(self): + self.url = module.params.get('url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'active_connections': None, + 'accepts': None, + 'handled': None, + 'requests': None, + 'reading': None, + 'writing': None, + 'waiting': None, + 'data': None, + } + (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout) + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout)) + + data = to_text(response.read(), errors='surrogate_or_strict') + if not data: + return result + + result['data'] = data + expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \ + r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)' + match = re.match(expr, data, re.S) + if match: + result['active_connections'] = int(match.group(1)) + result['accepts'] = int(match.group(2)) + result['handled'] = int(match.group(3)) + result['requests'] = int(match.group(4)) + result['reading'] = int(match.group(5)) + result['writing'] = int(match.group(6)) + result['waiting'] = int(match.group(7)) + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + url=dict(type='str', required=True), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + nginx_status_info = NginxStatusInfo().run() + module.exit_json(changed=False, **nginx_status_info) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/rundeck_acl_policy.py b/plugins/modules/web_infrastructure/rundeck_acl_policy.py new file mode 100644 index 0000000000..ed10cbb604 --- /dev/null +++ b/plugins/modules/web_infrastructure/rundeck_acl_policy.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: rundeck_acl_policy + +short_description: Manage Rundeck ACL policies. +description: + - Create, update and remove Rundeck ACL policies through HTTP API. +author: "Loic Blot (@nerzhul)" +options: + state: + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Sets the project name. + required: True + url: + description: + - Sets the rundeck instance URL. + required: True + api_version: + description: + - Sets the API version used by module. + - API version must be at least 14. + default: 14 + token: + description: + - Sets the token to authenticate against Rundeck API. + required: True + project: + description: + - Sets the project which receive the ACL policy. + - If unset, it's a system ACL policy. + policy: + description: + - Sets the ACL policy content. + - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html. + - It can be a YAML string or a pure Ansible inventory YAML object. +extends_documentation_fragment: url +''' + +EXAMPLES = ''' +- name: Create or update a rundeck ACL policy in project Ansible + rundeck_acl_policy: + name: "Project_01" + api_version: 18 + url: "https://rundeck.example.org" + token: "mytoken" + state: present + project: "Ansible" + policy: + description: "my policy" + context: + application: rundeck + for: + project: + - allow: read + by: + group: "build" + +- name: Remove a rundeck system policy + rundeck_acl_policy: + name: "Project_02" + url: "https://rundeck.example.org" + token: "mytoken" + state: absent +''' + +RETURN = ''' +rundeck_response: + description: Rundeck response when a failure occurs. + returned: failed + type: str +before: + description: Dictionary containing ACL policy informations before modification. + returned: success + type: dict +after: + description: Dictionary containing ACL policy informations after modification. + returned: success + type: dict +''' + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, url_argument_spec +from ansible.module_utils._text import to_text +import json + + +class RundeckACLManager: + def __init__(self, module): + self.module = module + + def handle_http_code_if_needed(self, infos): + if infos["status"] == 403: + self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct " + "permissions.", rundeck_response=infos["body"]) + elif infos["status"] >= 500: + self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"]) + + def request_rundeck_api(self, query, data=None, method="GET"): + resp, info = fetch_url(self.module, + "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query), + data=json.dumps(data), + method=method, + headers={ + "Content-Type": "application/json", + "Accept": "application/json", + "X-Rundeck-Auth-Token": self.module.params["token"] + }) + + self.handle_http_code_if_needed(info) + if resp is not None: + resp = resp.read() + if resp != b"": + try: + json_resp = json.loads(to_text(resp, errors='surrogate_or_strict')) + return json_resp, info + except ValueError as e: + self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. " + "Object was: %s" % (str(e), resp)) + return resp, info + + def get_acl(self): + resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"]) + return resp + + def create_or_update_acl(self): + facts = self.get_acl() + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json(changed=True, before={}, after=self.module.params["policy"]) + + _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], + method="POST", + data={"contents": self.module.params["policy"]}) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" % + self.module.params["name"]) + elif info["status"] == 409: + self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"]) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_acl()) + else: + if facts["contents"] == self.module.params["policy"]: + self.module.exit_json(changed=False, before=facts, after=facts) + + if self.module.check_mode: + self.module.exit_json(changed=True, before=facts, after=facts) + + _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], + method="PUT", + data={"contents": self.module.params["policy"]}) + + if info["status"] == 200: + self.module.exit_json(changed=True, before=facts, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" % + self.module.params["name"]) + elif info["status"] == 404: + self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"]) + + def remove_acl(self): + facts = self.get_acl() + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE") + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = url_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + url=dict(required=True, type='str'), + api_version=dict(type='int', default=14), + token=dict(required=True, type='str', no_log=True), + policy=dict(type='str'), + project=dict(type='str'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ['state', 'present', ['policy']], + ], + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckACLManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_acl() + elif module.params['state'] == 'absent': + rundeck.remove_acl() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/rundeck_project.py b/plugins/modules/web_infrastructure/rundeck_project.py new file mode 100644 index 0000000000..a5f179be1d --- /dev/null +++ b/plugins/modules/web_infrastructure/rundeck_project.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Ansible module to manage rundeck projects +# (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: rundeck_project + +short_description: Manage Rundeck projects. +description: + - Create and remove Rundeck projects through HTTP API. +author: "Loic Blot (@nerzhul)" +options: + state: + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Sets the project name. + required: True + url: + description: + - Sets the rundeck instance URL. + required: True + api_version: + description: + - Sets the API version used by module. + - API version must be at least 14. + default: 14 + token: + description: + - Sets the token to authenticate against Rundeck API. + required: True +extends_documentation_fragment: url +''' + +EXAMPLES = ''' +- name: Create a rundeck project + rundeck_project: + name: "Project_01" + api_version: 18 + url: "https://rundeck.example.org" + token: "mytoken" + state: present + +- name: Remove a rundeck project + rundeck_project: + name: "Project_02" + url: "https://rundeck.example.org" + token: "mytoken" + state: absent +''' + +RETURN = ''' +rundeck_response: + description: Rundeck response when a failure occurs + returned: failed + type: str +before: + description: dictionary containing project information before modification + returned: success + type: dict +after: + description: dictionary containing project information after modification + returned: success + type: dict +''' + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url, url_argument_spec +import json + + +class RundeckProjectManager(object): + def __init__(self, module): + self.module = module + + def handle_http_code_if_needed(self, infos): + if infos["status"] == 403: + self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct " + "permissions.", rundeck_response=infos["body"]) + elif infos["status"] >= 500: + self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"]) + + def request_rundeck_api(self, query, data=None, method="GET"): + resp, info = fetch_url(self.module, + "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query), + data=json.dumps(data), + method=method, + headers={ + "Content-Type": "application/json", + "Accept": "application/json", + "X-Rundeck-Auth-Token": self.module.params["token"] + }) + + self.handle_http_code_if_needed(info) + if resp is not None: + resp = resp.read() + if resp != "": + try: + json_resp = json.loads(resp) + return json_resp, info + except ValueError as e: + self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. " + "Object was: %s" % (to_native(e), resp)) + return resp, info + + def get_project_facts(self): + resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"]) + return resp + + def create_or_update_project(self): + facts = self.get_project_facts() + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]}) + + resp, info = self.request_rundeck_api("projects", method="POST", data={ + "name": self.module.params["name"], + "config": {} + }) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_project_facts()) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_project_facts()) + else: + self.module.exit_json(changed=False, before=facts, after=facts) + + def remove_project(self): + facts = self.get_project_facts() + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE") + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = url_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + url=dict(required=True, type='str'), + api_version=dict(type='int', default=14), + token=dict(required=True, type='str', no_log=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckProjectManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_project() + elif module.params['state'] == 'absent': + rundeck.remove_project() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py new file mode 100644 index 0000000000..75f0568f33 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py @@ -0,0 +1,226 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_aaa_group + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Create, update or destroy an aaa group object in Sophos UTM. + +description: + - Create, update or destroy an aaa group object in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry. + type: str + required: true + adirectory_groups: + description: + - List of adirectory group strings. + type: list + adirectory_groups_sids: + description: + - Dictionary of group sids. + type: dict + backend_match: + description: + - The backend for the group. + type: str + choices: + - none + - adirectory + - edirectory + - radius + - tacacs + - ldap + default: none + comment: + description: + - Comment that describes the AAA group. + type: str + default: '' + dynamic: + description: + - Group type. Is static if none is selected. + type: str + default: none + choices: + - none + - ipsec_dn + - directory_groups + edirectory_groups: + description: + - List of edirectory group strings. + type: list + ipsec_dn: + description: + - The ipsec dn string. + type: str + ldap_attribute: + description: + - The ldap attribute to check against. + type: str + ldap_attribute_value: + description: + - The ldap attribute value to check against. + type: str + members: + description: + - A list of user ref names (aaa/user). + type: list + default: [] + network: + description: + - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa). + type: str + default: "" + radius_groups: + description: + - A list of radius group strings. + type: list + default: [] + tacacs_groups: + description: + - A list of tacacs group strings. + type: list + default: [] + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Create UTM aaa_group + utm_aaa_group: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAAAGroupEntry + backend_match: ldap + dynamic: directory_groups + ldap_attributes: memberof + ldap_attributes_value: "cn=groupname,ou=Groups,dc=mydomain,dc=com" + network: REF_OBJECT_STRING + state: present + +- name: Remove UTM aaa_group + utm_aaa_group: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAAAGroupEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + adirectory_groups: + description: List of Active Directory Groups. + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS. + type: list + backend_match: + description: The backend to use. + type: str + comment: + description: The comment string. + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group. + type: str + edirectory_groups: + description: List of eDirectory Groups. + type: str + ipsec_dn: + description: ipsec_dn identifier to match. + type: str + ldap_attribute: + description: The LDAP Attribute to match against. + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against. + type: str + members: + description: List of member identifiers of the group. + type: list + network: + description: The identifier of the network (network/aaa). + type: str + radius_group: + description: The radius group identifier. + type: str + tacacs_group: + description: The tacacs group identifier. + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "aaa/group" + key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic", + "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members", + "network", "radius_groups", "tacacs_groups"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + adirectory_groups=dict(type='list', elements='str', required=False, default=[]), + adirectory_groups_sids=dict(type='dict', required=False, default={}), + backend_match=dict(type='str', required=False, default="none", + choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]), + comment=dict(type='str', required=False, default=""), + dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]), + edirectory_groups=dict(type='list', elements='str', required=False, default=[]), + ipsec_dn=dict(type='str', required=False, default=""), + ldap_attribute=dict(type='str', required=False, default=""), + ldap_attribute_value=dict(type='str', required=False, default=""), + members=dict(type='list', elements='str', required=False, default=[]), + network=dict(type='str', required=False, default=""), + radius_groups=dict(type='list', elements='str', required=False, default=[]), + tacacs_groups=dict(type='list', elements='str', required=False, default=[]), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py new file mode 100644 index 0000000000..fd5303c219 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py @@ -0,0 +1,127 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_aaa_group_info + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: get info for reverse_proxy frontend entry in Sophos UTM + +description: + - get info for a reverse_proxy frontend entry in SOPHOS UTM. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Remove UTM aaa_group + utm_aaa_group_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAAAGroupEntry +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + adirectory_groups: + description: List of Active Directory Groups + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS + type: list + backend_match: + description: The backend to use + type: str + comment: + description: The comment string + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group + type: str + edirectory_groups: + description: List of eDirectory Groups + type: str + ipsec_dn: + description: ipsec_dn identifier to match + type: str + ldap_attribute: + description: The LDAP Attribute to match against + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against + type: str + members: + description: List of member identifiers of the group + type: list + network: + description: The identifier of the network (network/aaa) + type: str + radius_group: + description: The radius group identifier + type: str + tacacs_group: + description: The tacacs group identifier + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "aaa/group" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py new file mode 100644 index 0000000000..a598682d7c --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py @@ -0,0 +1,163 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Stephan Schwarz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_ca_host_key_cert + +author: + - Stephan Schwarz (@stearz) + +short_description: create, update or destroy ca host_key_cert entry in Sophos UTM + +description: + - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry. + required: true + ca: + description: + - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + required: true + meta: + description: + - A reference to an existing utm_ca_meta_x509 object. + required: true + certificate: + description: + - The certificate in PEM format. + required: true + comment: + description: + - Optional comment string. + encrypted: + description: + - Optionally enable encryption. + default: False + type: bool + key: + description: + - Optional private key in PEM format. + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +# Create a ca_host_key_cert entry +- name: utm ca_host_key_cert + utm_ca_host_key_cert: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry + ca: REF_ca/signing_ca_OBJECT_STRING + meta: REF_ca/meta_x509_OBJECT_STRING + certificate: | + --- BEGIN CERTIFICATE --- + . . . + . . . + . . . + --- END CERTIFICATE --- + state: present + +# Remove a ca_host_key_cert entry +- name: utm ca_host_key_cert + utm_ca_host_key_cert: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry + state: absent + +# Read a ca_host_key_cert entry +- name: utm ca_host_key_cert + utm_ca_host_key_cert: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry + state: info + +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format + type: str + comment: + description: Comment string (may be empty string) + type: str + encrypted: + description: If encryption is enabled + type: bool + key: + description: Private key in PEM format (may be empty string) + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "ca/host_key_cert" + key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + ca=dict(type='str', required=True), + meta=dict(type='str', required=True), + certificate=dict(type='str', required=True), + comment=dict(type='str', required=False), + encrypted=dict(type='bool', required=False, default=False), + key=dict(type='str', required=False, no_log=True), + ) + ) + try: + # This is needed because the bool value only accepts int values in the backend + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py new file mode 100644 index 0000000000..cc643c919e --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py @@ -0,0 +1,106 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Stephan Schwarz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_ca_host_key_cert_info + +author: + - Stephan Schwarz (@stearz) + +short_description: Get info for a ca host_key_cert entry in Sophos UTM + +description: + - Get info for a ca host_key_cert entry in SOPHOS UTM. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: utm ca_host_key_cert_info + utm_ca_host_key_cert_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format + type: str + comment: + description: Comment string (may be empty string) + type: str + encrypted: + description: If encryption is enabled + type: bool + key: + description: Private key in PEM format (may be empty string) + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "ca/host_key_cert" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ) + ) + try: + # This is needed because the bool value only accepts int values in the backend + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py new file mode 100644 index 0000000000..a7ac79914e --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py @@ -0,0 +1,156 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_dns_host + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: create, update or destroy dns entry in Sophos UTM + +description: + - Create, update or destroy a dns entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + address: + description: + - The IPV4 Address of the entry. Can be left empty for automatic resolving. + default: 0.0.0.0 + address6: + description: + - The IPV6 Address of the entry. Can be left empty for automatic resolving. + default: "::" + comment: + description: + - An optional comment to add to the dns host object + hostname: + description: + - The hostname for the dns host object + interface: + description: + - The reference name of the interface to use. If not provided the default interface will be used + resolved: + description: + - whether the hostname's ipv4 address is already resolved or not + default: False + type: bool + resolved6: + description: + - whether the hostname's ipv6 address is already resolved or not + default: False + type: bool + timeout: + description: + - the timeout for the utm to resolve the ip address for the hostname again + default: 0 + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Create UTM dns host entry + utm_dns_host: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestDNSEntry + hostname: testentry.some.tld + state: present + +- name: Remove UTM dns host entry + utm_dns_host: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestDNSEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + name: + description: The name of the object + type: str + address: + description: The ipv4 address of the object + type: str + address6: + description: The ipv6 address of the object + type: str + comment: + description: The comment string + type: str + hostname: + description: The hostname of the object + type: str + interface: + description: The reference name of the interface the object is associated with + type: str + resolved: + description: Whether the ipv4 address is resolved or not + type: bool + resolved6: + description: Whether the ipv6 address is resolved or not + type: bool + timeout: + description: The timeout until a new resolving will be attempted + type: int +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "network/dns_host" + key_to_check_for_changes = ["comment", "hostname", "interface"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + address=dict(type='str', required=False, default='0.0.0.0'), + address6=dict(type='str', required=False, default='::'), + comment=dict(type='str', required=False, default=""), + hostname=dict(type='str', required=False), + interface=dict(type='str', required=False, default=""), + resolved=dict(type='bool', required=False, default=False), + resolved6=dict(type='bool', required=False, default=False), + timeout=dict(type='int', required=False, default=0), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py new file mode 100644 index 0000000000..84bd5f13f8 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py @@ -0,0 +1,136 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Juergen Wiebe +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_network_interface_address + +author: + - Juergen Wiebe (@steamx) + +short_description: Create, update or destroy network/interface_address object + +description: + - Create, update or destroy a network/interface_address object in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + address: + description: + - The ip4 address of the network/interface_address object. + required: true + address6: + description: + - The ip6 address of the network/interface_address object. + required: false + comment: + description: + - An optional comment to add to the object + resolved: + description: + - Whether or not the object is resolved + resolved6: + description: + - Whether or not the object is resolved + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +# Create a network interface address +- name: utm network interface address + utm_proxy_backend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestNetworkInterfaceAddress + address: 0.0.0.0 + state: present + +# Remove a network interface address +- name: utm network interface address + network_interface_address: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestNetworkInterfaceAddress + address: 0.0.0.0 + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + address: + description: The ip4 address of the network/interface_address object + type: str + address6: + description: The ip6 address of the network/interface_address object + type: str + comment: + description: The comment string + type: str + resolved: + description: Whether or not the object is resolved + type: bool + resolved6: + description: Whether or not the object is resolved + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "network/interface_address" + key_to_check_for_changes = ["comment", "address"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + address=dict(type='str', required=True), + comment=dict(type='str', required=False, default=""), + address6=dict(type='str', required=False), + resolved=dict(type='boolean', required=False), + resolved6=dict(type='boolean', required=False) + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py new file mode 100644 index 0000000000..dc68ac3521 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py @@ -0,0 +1,101 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Juergen Wiebe +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_network_interface_address_info + +author: + - Juergen Wiebe (@steamx) + +short_description: Get info for a network/interface_address object + +description: + - Get info for a network/interface_address object in SOPHOS UTM. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: utm network interface address + utm_proxy_interface_address_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestNetworkInterfaceAddress +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + address: + description: The ip4 address of the network/interface_address object + type: str + address6: + description: The ip6 address of the network/interface_address object + type: str + comment: + description: The comment string + type: str + resolved: + description: Whether or not the object is resolved + type: bool + resolved6: + description: Whether or not the object is resolved + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "network/interface_address" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py new file mode 100644 index 0000000000..45398c0ad7 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py @@ -0,0 +1,348 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Stephan Schwarz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_proxy_auth_profile + +author: + - Stephan Schwarz (@stearz) + +short_description: create, update or destroy reverse_proxy auth_profile entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + aaa: + description: + - List of references to utm_aaa objects (allowed users or groups) + required: true + basic_prompt: + description: + - The message in the basic authentication prompt + required: true + backend_mode: + description: + - Specifies if the backend server needs authentication ([Basic|None]) + default: None + choices: + - Basic + - None + backend_strip_basic_auth: + description: + - Should the login data be stripped when proxying the request to the backend host + type: bool + default: True + choices: + - True + - False + backend_user_prefix: + description: + - Prefix string to prepend to the username for backend authentication + default: "" + backend_user_suffix: + description: + - Suffix string to append to the username for backend authentication + default: "" + comment: + description: + - Optional comment string + default: "" + frontend_cookie: + description: + - Frontend cookie name + frontend_cookie_secret: + description: + - Frontend cookie secret + frontend_form: + description: + - Frontend authentication form name + frontend_form_template: + description: + - Frontend authentication form template + default: "" + frontend_login: + description: + - Frontend login name + frontend_logout: + description: + - Frontend logout name + frontend_mode: + description: + - Frontend authentication mode (Form|Basic) + default: Basic + choices: + - Basic + - Form + frontend_realm: + description: + - Frontend authentication realm + frontend_session_allow_persistency: + description: + - Allow session persistency + type: bool + default: False + choices: + - True + - False + frontend_session_lifetime: + description: + - session lifetime + required: true + frontend_session_lifetime_limited: + description: + - Specifies if limitation of session lifetime is active + type: bool + default: True + choices: + - True + - False + frontend_session_lifetime_scope: + description: + - scope for frontend_session_lifetime (days|hours|minutes) + default: hours + choices: + - days + - hours + - minutes + frontend_session_timeout: + description: + - session timeout + required: true + frontend_session_timeout_enabled: + description: + - Specifies if session timeout is active + type: bool + default: True + choices: + - True + - False + frontend_session_timeout_scope: + description: + - scope for frontend_session_timeout (days|hours|minutes) + default: minutes + choices: + - days + - hours + - minutes + logout_delegation_urls: + description: + - List of logout URLs that logouts are delegated to + default: [] + logout_mode: + description: + - Mode of logout (None|Delegation) + default: None + choices: + - None + - Delegation + redirect_to_requested_url: + description: + - Should a redirect to the requested URL be made + type: bool + default: False + choices: + - True + - False + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Create UTM proxy_auth_profile + utm_proxy_auth_profile: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAuthProfileEntry + aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING] + basic_prompt: "Authentication required: Please login" + frontend_session_lifetime: 1 + frontend_session_timeout: 1 + state: present + +- name: Remove UTM proxy_auth_profile + utm_proxy_auth_profile: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAuthProfileEntry + state: absent + +- name: Read UTM proxy_auth_profile + utm_proxy_auth_profile: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAuthProfileEntry + state: info + +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + aaa: + description: List of references to utm_aaa objects (allowed users or groups) + type: list + basic_prompt: + description: The message in the basic authentication prompt + type: str + backend_mode: + description: Specifies if the backend server needs authentication ([Basic|None]) + type: str + backend_strip_basic_auth: + description: Should the login data be stripped when proxying the request to the backend host + type: bool + backend_user_prefix: + description: Prefix string to prepend to the username for backend authentication + type: str + backend_user_suffix: + description: Suffix string to append to the username for backend authentication + type: str + comment: + description: Optional comment string + type: str + frontend_cookie: + description: Frontend cookie name + type: str + frontend_cookie_secret: + description: Frontend cookie secret + type: str + frontend_form: + description: Frontend authentication form name + type: str + frontend_form_template: + description: Frontend authentication form template + type: str + frontend_login: + description: Frontend login name + type: str + frontend_logout: + description: Frontend logout name + type: str + frontend_mode: + description: Frontend authentication mode (Form|Basic) + type: str + frontend_realm: + description: Frontend authentication realm + type: str + frontend_session_allow_persistency: + description: Allow session persistency + type: bool + frontend_session_lifetime: + description: session lifetime + type: int + frontend_session_lifetime_limited: + description: Specifies if limitation of session lifetime is active + type: bool + frontend_session_lifetime_scope: + description: scope for frontend_session_lifetime (days|hours|minutes) + type: str + frontend_session_timeout: + description: session timeout + type: int + frontend_session_timeout_enabled: + description: Specifies if session timeout is active + type: bool + frontend_session_timeout_scope: + description: scope for frontend_session_timeout (days|hours|minutes) + type: str + logout_delegation_urls: + description: List of logout URLs that logouts are delegated to + type: list + logout_mode: + description: Mode of logout (None|Delegation) + type: str + redirect_to_requested_url: + description: Should a redirect to the requested URL be made + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "reverse_proxy/auth_profile" + key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth", + "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie", + "frontend_cookie_secret", "frontend_form", "frontend_form_template", + "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm", + "frontend_session_allow_persistency", "frontend_session_lifetime", + "frontend_session_lifetime_limited", "frontend_session_lifetime_scope", + "frontend_session_timeout", "frontend_session_timeout_enabled", + "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode", + "redirect_to_requested_url"] + + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + aaa=dict(type='list', elements='str', required=True), + basic_prompt=dict(type='str', required=True), + backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']), + backend_strip_basic_auth=dict(type='bool', required=False, default=True, choices=[True, False]), + backend_user_prefix=dict(type='str', required=False, default=""), + backend_user_suffix=dict(type='str', required=False, default=""), + comment=dict(type='str', required=False, default=""), + frontend_cookie=dict(type='str', required=False), + frontend_cookie_secret=dict(type='str', required=False), + frontend_form=dict(type='str', required=False), + frontend_form_template=dict(type='str', required=False, default=""), + frontend_login=dict(type='str', required=False), + frontend_logout=dict(type='str', required=False), + frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']), + frontend_realm=dict(type='str', required=False), + frontend_session_allow_persistency=dict(type='bool', required=False, default=False, choices=[True, False]), + frontend_session_lifetime=dict(type='int', required=True), + frontend_session_lifetime_limited=dict(type='bool', required=False, default=True, choices=[True, False]), + frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']), + frontend_session_timeout=dict(type='int', required=True), + frontend_session_timeout_enabled=dict(type='bool', required=False, default=True, choices=[True, False]), + frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']), + logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]), + logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']), + redirect_to_requested_url=dict(type='bool', required=False, default=False, choices=[True, False]) + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py new file mode 100644 index 0000000000..ce6f121698 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py @@ -0,0 +1,242 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Sebastian Schenzel +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_proxy_exception + +author: + - Sebastian Schenzel (@RickS-C137) + +short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: True + type: str + op: + description: + - The operand to be used with the entries of the path parameter + default: 'AND' + choices: + - 'AND' + - 'OR' + required: False + type: str + path: + description: + - The paths the exception in the reverse proxy is defined for + type: list + default: [] + required: False + skip_custom_threats_filters: + description: + - A list of threats to be skipped + type: list + default: [] + required: False + skip_threats_filter_categories: + description: + - Define which categories of threats are skipped + type: list + default: [] + required: False + skipav: + description: + - Skip the Antivirus Scanning + default: False + type: bool + required: False + skipbadclients: + description: + - Block clients with bad reputation + default: False + type: bool + required: False + skipcookie: + description: + - Skip the Cookie Signing check + default: False + type: bool + required: False + skipform: + description: + - Enable form hardening + default: False + type: bool + required: False + skipform_missingtoken: + description: + - Enable form hardening with missing tokens + default: False + type: bool + required: False + skiphtmlrewrite: + description: + - Protection against SQL + default: False + type: bool + required: False + skiptft: + description: + - Enable true file type control + default: False + type: bool + required: False + skipurl: + description: + - Enable static URL hardening + default: False + type: bool + required: False + source: + description: + - Define which categories of threats are skipped + type: list + default: [] + required: False + status: + description: + - Status of the exception rule set + default: True + type: bool + required: False + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Create UTM proxy_exception + utm_proxy_exception: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestExceptionEntry + backend: REF_OBJECT_STRING + state: present + +- name: Remove UTM proxy_exception + utm_proxy_exception: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestExceptionEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + comment: + description: The optional comment string + op: + description: The operand to be used with the entries of the path parameter + type: str + path: + description: The paths the exception in the reverse proxy is defined for + type: list + skip_custom_threats_filters: + description: A list of threats to be skipped + type: list + skip_threats_filter_categories: + description: Define which categories of threats are skipped + type: list + skipav: + description: Skip the Antivirus Scanning + type: bool + skipbadclients: + description: Block clients with bad reputation + type: bool + skipcookie: + description: Skip the Cookie Signing check + type: bool + skipform: + description: Enable form hardening + type: bool + skipform_missingtoken: + description: Enable form hardening with missing tokens + type: bool + skiphtmlrewrite: + description: Protection against SQL + type: bool + skiptft: + description: Enable true file type control + type: bool + skipurl: + description: Enable static URL hardening + type: bool + source: + description: Define which categories of threats are skipped + type: list +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "reverse_proxy/exception" + key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav", + "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken", + "skiphtmlrewrite", "skiptft", "skipurl", "source"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']), + path=dict(type='list', elements='string', required=False, default=[]), + skip_custom_threats_filters=dict(type='list', elements='string', required=False, default=[]), + skip_threats_filter_categories=dict(type='list', elements='string', required=False, default=[]), + skipav=dict(type='bool', required=False, default=False), + skipbadclients=dict(type='bool', required=False, default=False), + skipcookie=dict(type='bool', required=False, default=False), + skipform=dict(type='bool', required=False, default=False), + skipform_missingtoken=dict(type='bool', required=False, default=False), + skiphtmlrewrite=dict(type='bool', required=False, default=False), + skiptft=dict(type='bool', required=False, default=False), + skipurl=dict(type='bool', required=False, default=False), + source=dict(type='list', elements='string', required=False, default=[]), + status=dict(type='bool', required=False, default=True), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py new file mode 100644 index 0000000000..7bc363ed97 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py @@ -0,0 +1,268 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_proxy_frontend + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + add_content_type_header : + description: + - Whether to add the content type header or not + type: bool + default: False + address: + description: + - The reference name of the network/interface_address object. + default: REF_DefaultInternalAddress + allowed_networks: + description: + - A list of reference names for the allowed networks. + default: ['REF_NetworkAny'] + certificate: + description: + - The reference name of the ca/host_key_cert object. + default: "" + comment: + description: + - An optional comment to add to the object + default: "" + disable_compression: + description: + - Whether to enable the compression + type: bool + default: False + domain: + description: + - A list of domain names for the frontend object + exceptions: + description: + - A list of exception ref names (reverse_proxy/exception) + default: [] + htmlrewrite: + description: + - Whether to enable html rewrite or not + type: bool + default: False + htmlrewrite_cookies: + description: + - Whether to enable html rewrite cookie or not + type: bool + default: False + implicitredirect: + description: + - Whether to enable implicit redirection or not + type: bool + default: False + lbmethod: + description: + - Which loadbalancer method should be used + choices: + - "" + - bybusyness + - bytraffic + - byrequests + default: bybusyness + locations: + description: + - A list of location ref names (reverse_proxy/location) + default: [] + port: + description: + - The frontend http port + default: 80 + preservehost: + description: + - Whether to preserve host header + type: bool + default: False + profile: + description: + - The reference string of the reverse_proxy/profile + default: "" + status: + description: + - Whether to activate the frontend entry or not + type: bool + default: True + type: + description: + - Which protocol should be used + choices: + - http + - https + default: http + xheaders: + description: + - Whether to pass the host header or not + type: bool + default: False + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Create utm proxy_frontend + utm_proxy_frontend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestFrontendEntry + host: REF_OBJECT_STRING + state: present + +- name: Remove utm proxy_frontend + utm_proxy_frontend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestFrontendEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + add_content_type_header: + description: Whether to add the content type header + type: bool + address: + description: The reference name of the address + type: str + allowed_networks: + description: List of reference names of networks associated + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert) + type: str + comment: + description: The comment string + type: str + disable_compression: + description: State of compression support + type: bool + domain: + description: List of hostnames + type: list + exceptions: + description: List of associated proxy exceptions + type: list + htmlrewrite: + description: State of html rewrite + type: bool + htmlrewrite_cookies: + description: Whether the html rewrite cookie will be set + type: bool + implicitredirect: + description: Whether to use implicit redirection + type: bool + lbmethod: + description: The method of loadbalancer to use + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object + type: list + port: + description: The port of the frontend connection + type: int + preservehost: + description: Preserve host header + type: bool + profile: + description: The associated reverse_proxy/profile + type: str + status: + description: Whether the frontend object is active or not + type: bool + type: + description: The connection type + type: str + xheaders: + description: The xheaders state + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "reverse_proxy/frontend" + key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate", + "comment", "disable_compression", "domain", "exceptions", "htmlrewrite", + "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations", + "port", "preservehost", "profile", "status", "type", "xheaders"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + add_content_type_header=dict(type='bool', required=False, default=False), + address=dict(type='str', required=False, default="REF_DefaultInternalAddress"), + allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]), + certificate=dict(type='str', required=False, default=""), + comment=dict(type='str', required=False, default=""), + disable_compression=dict(type='bool', required=False, default=False), + domain=dict(type='list', elements='str', required=False), + exceptions=dict(type='list', elements='str', required=False, default=[]), + htmlrewrite=dict(type='bool', required=False, default=False), + htmlrewrite_cookies=dict(type='bool', required=False, default=False), + implicitredirect=dict(type='bool', required=False, default=False), + lbmethod=dict(type='str', required=False, default="bybusyness", + choices=['bybusyness', 'bytraffic', 'byrequests', '']), + locations=dict(type='list', elements='str', required=False, default=[]), + port=dict(type='int', required=False, default=80), + preservehost=dict(type='bool', required=False, default=False), + profile=dict(type='str', required=False, default=""), + status=dict(type='bool', required=False, default=True), + type=dict(type='str', required=False, default="http", choices=['http', 'https']), + xheaders=dict(type='bool', required=False, default=False), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py new file mode 100644 index 0000000000..298058e174 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py @@ -0,0 +1,146 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_proxy_frontend_info + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Get utm proxy_frontend + utm_proxy_frontend_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestBackendEntry + host: REF_OBJECT_STRING +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + add_content_type_header: + description: Whether to add the content type header + type: bool + address: + description: The reference name of the address + type: str + allowed_networks: + description: List of reference names of networks associated + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert) + type: str + comment: + description: The comment string + type: str + disable_compression: + description: State of compression support + type: bool + domain: + description: List of hostnames + type: list + exceptions: + description: List of associated proxy exceptions + type: list + htmlrewrite: + description: State of html rewrite + type: bool + htmlrewrite_cookies: + description: whether the html rewrite cookie will be set + type: bool + implicitredirect: + description: whether to use implicit redirection + type: bool + lbmethod: + description: The method of loadbalancer to use + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object + type: list + port: + description: The port of the frontend connection + type: int + preservehost: + description: Preserve host header + type: bool + profile: + description: The associated reverse_proxy/profile + type: str + status: + description: Whether the frontend object is active or not + type: bool + type: + description: The connection type + type: str + xheaders: + description: The xheaders state + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "reverse_proxy/frontend" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py new file mode 100644 index 0000000000..58ec7d2a27 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py @@ -0,0 +1,209 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_proxy_location + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: create, update or destroy reverse_proxy location entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + access_control: + description: + - whether to activate the access control for the location + type: str + default: '0' + choices: + - '0' + - '1' + allowed_networks: + description: + - A list of allowed networks + type: list + default: REF_NetworkAny + auth_profile: + description: + - The reference name of the auth profile + backend: + description: + - A list of backends that are connected with this location declaration + default: [] + be_path: + description: + - The path of the backend + comment: + description: + - The optional comment string + denied_networks: + description: + - A list of denied network references + default: [] + hot_standby: + description: + - Activate hot standby mode + type: bool + default: False + path: + description: + - The path of the location + default: "/" + status: + description: + - Whether the location is active or not + type: bool + default: True + stickysession_id: + description: + - The stickysession id + default: ROUTEID + stickysession_status: + description: + - Enable the stickysession + type: bool + default: False + websocket_passthrough: + description: + - Enable the websocket passthrough + type: bool + default: False + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Create UTM proxy_location + utm_proxy_backend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestLocationEntry + backend: REF_OBJECT_STRING + state: present + +- name: Remove UTM proxy_location + utm_proxy_backend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestLocationEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + access_control: + description: Whether to use access control state + type: str + allowed_networks: + description: List of allowed network reference names + type: list + auth_profile: + description: The auth profile reference name + type: str + backend: + description: The backend reference name + type: str + be_path: + description: The backend path + type: str + comment: + description: The comment string + type: str + denied_networks: + description: The list of the denied network names + type: list + hot_standby: + description: Use hot standy + type: bool + path: + description: Path name + type: str + status: + description: Whether the object is active or not + type: bool + stickysession_id: + description: The identifier of the stickysession + type: str + stickysession_status: + description: Whether to use stickysession or not + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "reverse_proxy/location" + key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment", + "denied_networks", "hot_standby", "path", "status", "stickysession_id", + "stickysession_status", "websocket_passthrough"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + access_control=dict(type='str', required=False, default="0", choices=['0', '1']), + allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']), + auth_profile=dict(type='str', required=False, default=""), + backend=dict(type='list', elements='str', required=False, default=[]), + be_path=dict(type='str', required=False, default=""), + comment=dict(type='str', required=False, default=""), + denied_networks=dict(type='list', elements='str', required=False, default=[]), + hot_standby=dict(type='bool', required=False, default=False), + path=dict(type='str', required=False, default="/"), + status=dict(type='bool', required=False, default=True), + stickysession_id=dict(type='str', required=False, default='ROUTEID'), + stickysession_status=dict(type='bool', required=False, default=False), + websocket_passthrough=dict(type='bool', required=False, default=False), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py new file mode 100644 index 0000000000..51e581b0d9 --- /dev/null +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py @@ -0,0 +1,127 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: utm_proxy_location_info + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: create, update or destroy reverse_proxy location entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: +- community.general.utm + +''' + +EXAMPLES = """ +- name: Remove UTM proxy_location + utm_proxy_location_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestLocationEntry +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + access_control: + description: Whether to use access control state + type: str + allowed_networks: + description: List of allowed network reference names + type: list + auth_profile: + description: The auth profile reference name + type: str + backend: + description: The backend reference name + type: str + be_path: + description: The backend path + type: str + comment: + description: The comment string + type: str + denied_networks: + description: The list of the denied network names + type: list + hot_standby: + description: Use hot standy + type: bool + path: + description: Path name + type: str + status: + description: Whether the object is active or not + type: bool + stickysession_id: + description: The identifier of the stickysession + type: str + stickysession_status: + description: Whether to use stickysession or not + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils._text import to_native + + +def main(): + endpoint = "reverse_proxy/location" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/supervisorctl.py b/plugins/modules/web_infrastructure/supervisorctl.py new file mode 100644 index 0000000000..7f41959b4d --- /dev/null +++ b/plugins/modules/web_infrastructure/supervisorctl.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: supervisorctl +short_description: Manage the state of a program or group of programs running via supervisord +description: + - Manage the state of a program or group of programs running via supervisord +options: + name: + description: + - The name of the supervisord program or group to manage. + - The name will be taken as group name when it ends with a colon I(:) + - Group support is only available in Ansible version 1.6 or later. + required: true + config: + description: + - The supervisor configuration file path + server_url: + description: + - URL on which supervisord server is listening + username: + description: + - username to use for authentication + password: + description: + - password to use for authentication + state: + description: + - The desired state of program/group. + required: true + choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ] + signal: + description: + - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled). + supervisorctl_path: + description: + - path to supervisorctl executable +notes: + - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. + - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). + - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. +requirements: [ "supervisorctl" ] +author: + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " +''' + +EXAMPLES = ''' +# Manage the state of program to be in 'started' state. +- supervisorctl: + name: my_app + state: started + +# Manage the state of program group to be in 'started' state. +- supervisorctl: + name: 'my_apps:' + state: started + +# Restart my_app, reading supervisorctl configuration from a specified file. +- supervisorctl: + name: my_app + state: restarted + config: /var/opt/my_project/supervisord.conf + +# Restart my_app, connecting to supervisord with credentials and server URL. +- supervisorctl: + name: my_app + state: restarted + username: test + password: testpass + server_url: http://localhost:9001 + +# Send a signal to my_app via supervisorctl +- supervisorctl: + name: my_app + state: signalled + signal: USR1 +''' + +import os +from ansible.module_utils.basic import AnsibleModule, is_executable + + +def main(): + arg_spec = dict( + name=dict(required=True), + config=dict(required=False, type='path'), + server_url=dict(required=False), + username=dict(required=False), + password=dict(required=False, no_log=True), + supervisorctl_path=dict(required=False, type='path'), + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']), + signal=dict(required=False) + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + is_group = False + if name.endswith(':'): + is_group = True + name = name.rstrip(':') + state = module.params['state'] + config = module.params.get('config') + server_url = module.params.get('server_url') + username = module.params.get('username') + password = module.params.get('password') + supervisorctl_path = module.params.get('supervisorctl_path') + signal = module.params.get('signal') + + # we check error message for a pattern, so we need to make sure that's in C locale + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + if supervisorctl_path: + if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path): + supervisorctl_args = [supervisorctl_path] + else: + module.fail_json( + msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) + else: + supervisorctl_args = [module.get_bin_path('supervisorctl', True)] + + if config: + supervisorctl_args.extend(['-c', config]) + if server_url: + supervisorctl_args.extend(['-s', server_url]) + if username: + supervisorctl_args.extend(['-u', username]) + if password: + supervisorctl_args.extend(['-p', password]) + + if state == 'signalled' and not signal: + module.fail_json(msg="State 'signalled' requires a 'signal' value") + + def run_supervisorctl(cmd, name=None, **kwargs): + args = list(supervisorctl_args) # copy the master args + args.append(cmd) + if name: + args.append(name) + return module.run_command(args, **kwargs) + + def get_matched_processes(): + matched = [] + rc, out, err = run_supervisorctl('status') + for line in out.splitlines(): + # One status line may look like one of these two: + # process not in group: + # echo_date_lonely RUNNING pid 7680, uptime 13:22:18 + # process in group: + # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18 + fields = [field for field in line.split(' ') if field != ''] + process_name = fields[0] + status = fields[1] + + if is_group: + # If there is ':', this process must be in a group. + if ':' in process_name: + group = process_name.split(':')[0] + if group != name: + continue + else: + continue + else: + if process_name != name: + continue + + matched.append((process_name, status)) + return matched + + def take_action_on_processes(processes, status_filter, action, expected_result): + to_take_action_on = [] + for process_name, status in processes: + if status_filter(status): + to_take_action_on.append(process_name) + + if len(to_take_action_on) == 0: + module.exit_json(changed=False, name=name, state=state) + if module.check_mode: + module.exit_json(changed=True) + for process_name in to_take_action_on: + rc, out, err = run_supervisorctl(action, process_name, check_rc=True) + if '%s: %s' % (process_name, expected_result) not in out: + module.fail_json(msg=out) + + module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) + + if state == 'restarted': + rc, out, err = run_supervisorctl('update', check_rc=True) + processes = get_matched_processes() + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + + take_action_on_processes(processes, lambda s: True, 'restart', 'started') + + processes = get_matched_processes() + + if state == 'absent': + if len(processes) == 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('remove', name) + if '%s: removed process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + + if state == 'present': + if len(processes) > 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('add', name) + if '%s: added process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + + if state == 'started': + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') + + if state == 'stopped': + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') + + if state == 'signalled': + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled') + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/taiga_issue.py b/plugins/modules/web_infrastructure/taiga_issue.py new file mode 100644 index 0000000000..a867222371 --- /dev/null +++ b/plugins/modules/web_infrastructure/taiga_issue.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Alejandro Guirao +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: taiga_issue +short_description: Creates/deletes an issue in a Taiga Project Management Platform +description: + - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)). + - An issue is identified by the combination of project, issue subject and issue type. + - This module implements the creation or deletion of issues (not the update). +options: + taiga_host: + description: + - The hostname of the Taiga instance. + default: https://api.taiga.io + project: + description: + - Name of the project containing the issue. Must exist previously. + required: True + subject: + description: + - The issue subject. + required: True + issue_type: + description: + - The issue type. Must exist previously. + required: True + priority: + description: + - The issue priority. Must exist previously. + default: Normal + status: + description: + - The issue status. Must exist previously. + default: New + severity: + description: + - The issue severity. Must exist previously. + default: Normal + description: + description: + - The issue description. + default: "" + attachment: + description: + - Path to a file to be attached to the issue. + attachment_description: + description: + - A string describing the file to be attached to the issue. + default: "" + tags: + description: + - A lists of tags to be assigned to the issue. + default: [] + state: + description: + - Whether the issue should be present or not. + choices: ["present", "absent"] + default: present +author: Alejandro Guirao (@lekum) +requirements: [python-taiga] +notes: +- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD +''' + +EXAMPLES = ''' +# Create an issue in the my hosted Taiga environment and attach an error log +- taiga_issue: + taiga_host: https://mytaigahost.example.com + project: myproject + subject: An error has been found + issue_type: Bug + priority: High + status: New + severity: Important + description: An error has been found. Please check the attached error log for details. + attachment: /path/to/error.log + attachment_description: Error log file + tags: + - Error + - Needs manual check + state: present + +# Deletes the previously created issue +- taiga_issue: + taiga_host: https://mytaigahost.example.com + project: myproject + subject: An error has been found + issue_type: Bug + state: absent +''' + +RETURN = '''# ''' +import traceback + +from os import getenv +from os.path import isfile +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +TAIGA_IMP_ERR = None +try: + from taiga import TaigaAPI + from taiga.exceptions import TaigaException + TAIGA_MODULE_IMPORTED = True +except ImportError: + TAIGA_IMP_ERR = traceback.format_exc() + TAIGA_MODULE_IMPORTED = False + + +def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority, + issue_status, issue_type, issue_severity, issue_description, + issue_attachment, issue_attachment_description, + issue_tags, state, check_mode=False): + """ + Method that creates/deletes issues depending whether they exist and the state desired + + The credentials should be passed via environment variables: + - TAIGA_TOKEN + - TAIGA_USERNAME and TAIGA_PASSWORD + + Returns a tuple with these elements: + - A boolean representing the success of the operation + - A descriptive message + - A dict with the issue attributes, in case of issue creation, otherwise empty dict + """ + + changed = False + + try: + token = getenv('TAIGA_TOKEN') + if token: + api = TaigaAPI(host=taiga_host, token=token) + else: + api = TaigaAPI(host=taiga_host) + username = getenv('TAIGA_USERNAME') + password = getenv('TAIGA_PASSWORD') + if not any([username, password]): + return (False, changed, "Missing credentials", {}) + api.auth(username=username, password=password) + + user_id = api.me().id + project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id)) + if len(project_list) != 1: + return (False, changed, "Unable to find project %s" % project_name, {}) + project = project_list[0] + project_id = project.id + + priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id)) + if len(priority_list) != 1: + return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {}) + priority_id = priority_list[0].id + + status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id)) + if len(status_list) != 1: + return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {}) + status_id = status_list[0].id + + type_list = filter(lambda x: x.name == issue_type, project.list_issue_types()) + if len(type_list) != 1: + return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {}) + type_id = type_list[0].id + + severity_list = filter(lambda x: x.name == issue_severity, project.list_severities()) + if len(severity_list) != 1: + return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {}) + severity_id = severity_list[0].id + + issue = { + "project": project_name, + "subject": issue_subject, + "priority": issue_priority, + "status": issue_status, + "type": issue_type, + "severity": issue_severity, + "description": issue_description, + "tags": issue_tags, + } + + # An issue is identified by the project_name, the issue_subject and the issue_type + matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues()) + matching_issue_list_len = len(matching_issue_list) + + if matching_issue_list_len == 0: + # The issue does not exist in the project + if state == "present": + # This implies a change + changed = True + if not check_mode: + # Create the issue + new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description) + if issue_attachment: + new_issue.attach(issue_attachment, description=issue_attachment_description) + issue["attachment"] = issue_attachment + issue["attachment_description"] = issue_attachment_description + return (True, changed, "Issue created", issue) + + else: + # If does not exist, do nothing + return (True, changed, "Issue does not exist", {}) + + elif matching_issue_list_len == 1: + # The issue exists in the project + if state == "absent": + # This implies a change + changed = True + if not check_mode: + # Delete the issue + matching_issue_list[0].delete() + return (True, changed, "Issue deleted", {}) + + else: + # Do nothing + return (True, changed, "Issue already exists", {}) + + else: + # More than 1 matching issue + return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {}) + + except TaigaException as exc: + msg = "An exception happened: %s" % to_native(exc) + return (False, changed, msg, {}) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + taiga_host=dict(required=False, default="https://api.taiga.io"), + project=dict(required=True), + subject=dict(required=True), + issue_type=dict(required=True), + priority=dict(required=False, default="Normal"), + status=dict(required=False, default="New"), + severity=dict(required=False, default="Normal"), + description=dict(required=False, default=""), + attachment=dict(required=False, default=None), + attachment_description=dict(required=False, default=""), + tags=dict(required=False, default=[], type='list'), + state=dict(required=False, choices=['present', 'absent'], + default='present'), + ), + supports_check_mode=True + ) + + if not TAIGA_MODULE_IMPORTED: + module.fail_json(msg=missing_required_lib("python-taiga"), + exception=TAIGA_IMP_ERR) + + taiga_host = module.params['taiga_host'] + project_name = module.params['project'] + issue_subject = module.params['subject'] + issue_priority = module.params['priority'] + issue_status = module.params['status'] + issue_type = module.params['issue_type'] + issue_severity = module.params['severity'] + issue_description = module.params['description'] + issue_attachment = module.params['attachment'] + issue_attachment_description = module.params['attachment_description'] + if issue_attachment: + if not isfile(issue_attachment): + msg = "%s is not a file" % issue_attachment + module.fail_json(msg=msg) + issue_tags = module.params['tags'] + state = module.params['state'] + + return_status, changed, msg, issue_attr_dict = manage_issue( + module, + taiga_host, + project_name, + issue_subject, + issue_priority, + issue_status, + issue_type, + issue_severity, + issue_description, + issue_attachment, + issue_attachment_description, + issue_tags, + state, + check_mode=module.check_mode + ) + if return_status: + if len(issue_attr_dict) > 0: + module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict) + else: + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/netconf/__init__.py b/plugins/netconf/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/netconf/ce.py b/plugins/netconf/ce.py new file mode 100644 index 0000000000..14317681c9 --- /dev/null +++ b/plugins/netconf/ce.py @@ -0,0 +1,247 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +netconf: ce +short_description: Use ce netconf plugin to run netconf commands on Huawei Cloudengine platform +description: + - This ce plugin provides low level abstraction apis for + sending and receiving netconf commands from Huawei Cloudengine network devices. +options: + ncclient_device_handler: + type: str + default: huawei + description: + - Specifies the ncclient device handler name for Huawei Cloudengine. + To identify the ncclient device handler name refer ncclient library documentation. +''' + +import json +import re + +from ansible.module_utils._text import to_text, to_bytes +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.netconf import NetconfBase, ensure_ncclient + +try: + from ncclient import manager + from ncclient.operations import RPCError + from ncclient.transport.errors import SSHUnknownHostError + from ncclient.xml_ import to_ele, to_xml, new_ele + HAS_NCCLIENT = True +except (ImportError, AttributeError): # paramiko and gssapi are incompatible and raise AttributeError not ImportError + HAS_NCCLIENT = False + +try: + from lxml.etree import fromstring +except ImportError: + from xml.etree.ElementTree import fromstring + + +class Netconf(NetconfBase): + + @ensure_ncclient + def get_text(self, ele, tag): + try: + return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip() + except AttributeError: + pass + + @ensure_ncclient + def get_device_info(self): + device_info = dict() + device_info['network_os'] = 'ce' + filter_xml = ''' + + + + + + + + + + ''' + data = self.get(filter_xml) + data = re.sub(r'xmlns=".+?"', r'', data) + reply = fromstring(to_bytes(data, errors='surrogate_or_strict')) + sw_info = reply.find('.//systemInfo') + + device_info['network_os_version'] = self.get_text(sw_info, 'productVer') + device_info['network_os_hostname'] = self.get_text(sw_info, 'sysName') + device_info['network_os_platform_version'] = self.get_text(sw_info, 'platformVer') + device_info['network_os_platform'] = self.get_text(sw_info, 'productName') + + return device_info + + def execute_rpc(self, name): + """RPC to be execute on remote device + :name: Name of rpc in string format""" + return self.rpc(name) + + @ensure_ncclient + def load_configuration(self, *args, **kwargs): + """Loads given configuration on device + :format: Format of configuration (xml, text, set) + :action: Action to be performed (merge, replace, override, update) + :target: is the name of the configuration datastore being edited + :config: is the configuration in string format.""" + if kwargs.get('config'): + kwargs['config'] = to_bytes(kwargs['config'], errors='surrogate_or_strict') + if kwargs.get('format', 'xml') == 'xml': + kwargs['config'] = to_ele(kwargs['config']) + + try: + return self.m.load_configuration(*args, **kwargs).data_xml + except RPCError as exc: + raise Exception(to_xml(exc.xml)) + + def get_capabilities(self): + result = dict() + result['rpc'] = self.get_base_rpc() + ['execute_rpc', 'load_configuration', 'get_configuration', 'compare_configuration', + 'execute_action', 'halt', 'reboot', 'execute_nc_cli', 'dispatch_rpc'] + result['network_api'] = 'netconf' + result['device_info'] = self.get_device_info() + result['server_capabilities'] = [c for c in self.m.server_capabilities] + result['client_capabilities'] = [c for c in self.m.client_capabilities] + result['session_id'] = self.m.session_id + return json.dumps(result) + + @staticmethod + @ensure_ncclient + def guess_network_os(obj): + try: + m = manager.connect( + host=obj._play_context.remote_addr, + port=obj._play_context.port or 830, + username=obj._play_context.remote_user, + password=obj._play_context.password, + key_filename=obj.key_filename, + hostkey_verify=obj.get_option('host_key_checking'), + look_for_keys=obj.get_option('look_for_keys'), + allow_agent=obj._play_context.allow_agent, + timeout=obj.get_option('persistent_connect_timeout'), + # We need to pass in the path to the ssh_config file when guessing + # the network_os so that a jumphost is correctly used if defined + ssh_config=obj._ssh_config + ) + except SSHUnknownHostError as exc: + raise AnsibleConnectionFailure(to_text(exc)) + + guessed_os = None + for c in m.server_capabilities: + if re.search('huawei', c): + guessed_os = 'ce' + break + + m.close_session() + return guessed_os + + def get_configuration(self, *args, **kwargs): + """Retrieve all or part of a specified configuration. + :format: format in configuration should be retrieved + :filter: specifies the portion of the configuration to retrieve + (by default entire configuration is retrieved)""" + return self.m.get_configuration(*args, **kwargs).data_xml + + def compare_configuration(self, *args, **kwargs): + """Compare configuration + :rollback: rollback id""" + return self.m.compare_configuration(*args, **kwargs).data_xml + + @ensure_ncclient + def execute_action(self, xml_str): + """huawei execute-action""" + con_obj = None + try: + con_obj = self.m.action(action=xml_str) + except RPCError as exc: + raise Exception(to_xml(exc.xml)) + + return con_obj.xml + + def halt(self): + """reboot the device""" + return self.m.halt().data_xml + + def reboot(self): + """reboot the device""" + return self.m.reboot().data_xml + + @ensure_ncclient + def get(self, *args, **kwargs): + try: + if_rpc_reply = kwargs.pop('if_rpc_reply', False) + if if_rpc_reply: + return self.m.get(*args, **kwargs).xml + return self.m.get(*args, **kwargs).data_xml + except RPCError as exc: + raise Exception(to_xml(exc.xml)) + + @ensure_ncclient + def get_config(self, *args, **kwargs): + try: + return self.m.get_config(*args, **kwargs).data_xml + except RPCError as exc: + raise Exception(to_xml(exc.xml)) + + @ensure_ncclient + def edit_config(self, *args, **kwargs): + try: + return self.m.edit_config(*args, **kwargs).xml + except RPCError as exc: + raise Exception(to_xml(exc.xml)) + + @ensure_ncclient + def execute_nc_cli(self, *args, **kwargs): + try: + return self.m.cli(*args, **kwargs).xml + except RPCError as exc: + raise Exception(to_xml(exc.xml)) + + @ensure_ncclient + def commit(self, *args, **kwargs): + try: + return self.m.commit(*args, **kwargs).data_xml + except RPCError as exc: + raise Exception(to_xml(exc.xml)) + + def validate(self, *args, **kwargs): + return self.m.validate(*args, **kwargs).data_xml + + def discard_changes(self, *args, **kwargs): + return self.m.discard_changes(*args, **kwargs).data_xml + + @ensure_ncclient + def dispatch_rpc(self, rpc_command=None, source=None, filter=None): + """ + Execute rpc on the remote device eg. dispatch('get-next') + :param rpc_command: specifies rpc command to be dispatched either in plain text or in xml element format (depending on command) + :param source: name of the configuration datastore being queried + :param filter: specifies the portion of the configuration to retrieve (by default entire configuration is retrieved) + :return: Returns xml string containing the rpc-reply response received from remote host + """ + if rpc_command is None: + raise ValueError('rpc_command value must be provided') + resp = self.m.dispatch(fromstring(rpc_command), source=source, filter=filter) + # just return rpc-reply xml + return resp.xml diff --git a/plugins/netconf/sros.py b/plugins/netconf/sros.py new file mode 100644 index 0000000000..1b25d32166 --- /dev/null +++ b/plugins/netconf/sros.py @@ -0,0 +1,119 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +netconf: sros +short_description: Use Nokia SROS netconf plugin to run netconf commands on Nokia SROS platform +deprecated: + why: This plugin moved in 'nokia.sros' collection + removed_in: '2.13' + alternative: "Use the netconf plugin in 'nokia.sros' collection within Ansible galaxy" +description: + - This sros plugin provides low level abstraction apis for + sending and receiving netconf commands from Nokia sros network devices. +options: + ncclient_device_handler: + type: str + default: default + description: + - Specifies the ncclient device handler name for Nokia sros network os. To + identify the ncclient device handler name refer ncclient library documentation. +''' + +import json +import re + +from ansible.module_utils._text import to_text, to_native +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.netconf import NetconfBase +from ansible.plugins.netconf import ensure_ncclient + +try: + from ncclient import manager + from ncclient.transport.errors import SSHUnknownHostError + from ncclient.xml_ import to_ele + HAS_NCCLIENT = True +except (ImportError, AttributeError): # paramiko and gssapi are incompatible and raise AttributeError not ImportError + HAS_NCCLIENT = False + + +class Netconf(NetconfBase): + def get_text(self, ele, tag): + try: + return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip() + except AttributeError: + pass + + @ensure_ncclient + def get_device_info(self): + device_info = dict() + device_info['network_os'] = 'sros' + + xmlns = "urn:nokia.com:sros:ns:yang:sr:state" + f = '' % xmlns + reply = to_ele(self.m.get(filter=('subtree', f)).data_xml) + + device_info['network_os_hostname'] = reply.findtext('.//{%s}state/{*}system/{*}lldp/{*}system-name' % xmlns) + device_info['network_os_version'] = reply.findtext('.//{%s}state/{*}system/{*}version/{*}version-number' % xmlns) + device_info['network_os_model'] = reply.findtext('.//{%s}state/{*}system/{*}platform' % xmlns) + device_info['network_os_platform'] = 'Nokia 7x50' + return device_info + + def get_capabilities(self): + result = dict() + result['rpc'] = self.get_base_rpc() + result['network_api'] = 'netconf' + result['device_info'] = self.get_device_info() + result['server_capabilities'] = [c for c in self.m.server_capabilities] + result['client_capabilities'] = [c for c in self.m.client_capabilities] + result['session_id'] = self.m.session_id + result['device_operations'] = self.get_device_operations(result['server_capabilities']) + return json.dumps(result) + + @staticmethod + @ensure_ncclient + def guess_network_os(obj): + try: + m = manager.connect( + host=obj._play_context.remote_addr, + port=obj._play_context.port or 830, + username=obj._play_context.remote_user, + password=obj._play_context.password, + key_filename=obj.key_filename, + hostkey_verify=obj.get_option('host_key_checking'), + look_for_keys=obj.get_option('look_for_keys'), + allow_agent=obj._play_context.allow_agent, + timeout=obj.get_option('persistent_connect_timeout'), + # We need to pass in the path to the ssh_config file when guessing + # the network_os so that a jumphost is correctly used if defined + ssh_config=obj._ssh_config + ) + except SSHUnknownHostError as exc: + raise AnsibleConnectionFailure(to_native(exc)) + + guessed_os = None + for c in m.server_capabilities: + if re.search('urn:nokia.com:sros:ns:yang:sr', c): + guessed_os = 'sros' + + m.close_session() + return guessed_os diff --git a/plugins/terminal/__init__.py b/plugins/terminal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/terminal/aireos.py b/plugins/terminal/aireos.py new file mode 100644 index 0000000000..240b1dff3f --- /dev/null +++ b/plugins/terminal/aireos.py @@ -0,0 +1,59 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re +import time + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w]*\(.+\)?[>#\$](?:\s*)$"), + re.compile(br"User:") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"invalid input", re.I), + re.compile(br"incorrect usage", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found", re.I), + re.compile(br"'[^']' +returned error code: ?\d+"), + ] + + def on_open_shell(self): + try: + commands = ('{"command": "' + self._connection._play_context.remote_user + '", "prompt": "Password:", "answer": "' + + self._connection._play_context.password + '"}', + '{"command": "config paging disable"}') + for cmd in commands: + self._exec_cli_command(cmd) + except AnsibleConnectionFailure: + try: + self._exec_cli_command(b'config paging disable') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/apconos.py b/plugins/terminal/apconos.py new file mode 100644 index 0000000000..0cbd74f649 --- /dev/null +++ b/plugins/terminal/apconos.py @@ -0,0 +1,35 @@ +# (C) 2017 Red Hat Inc. +# Copyright (C) 2019 APCON. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains terminal Plugin methods for apconos Config Module +# Apcon Networking +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br'>>\ |#\ |\$\ ') + ] + + terminal_stderr_re = [ + re.compile(br"connection timed out", re.I), + ] diff --git a/plugins/terminal/aruba.py b/plugins/terminal/aruba.py new file mode 100644 index 0000000000..79bce7a2e1 --- /dev/null +++ b/plugins/terminal/aruba.py @@ -0,0 +1,68 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + ansi_re = [ + # check ECMA-48 Section 5.4 (Control Sequences) + re.compile(br'(\x1b\[\?1h\x1b=)'), + re.compile(br'((?:\x9b|\x1b\x5b)[\x30-\x3f]*[\x20-\x2f]*[\x40-\x7e])'), + re.compile(br'\x08.') + ] + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w]*\(.+\)\s*[\^\*]?(?:\[.+\])? ?#(?:\s*)$"), + re.compile(br"[pP]assword:$"), + re.compile(br"(?<=\s)[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\s*#\s*$"), + re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$"), + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"Error:", re.M), + re.compile(br"^% \w+", re.M), + re.compile(br"% ?Bad secret"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found", re.I), + re.compile(br"'[^']' +returned error code: ?\d+"), + ] + + terminal_initial_prompt = b'Press any key to continue' + + terminal_initial_answer = b'\r' + + terminal_inital_prompt_newline = False + + def on_open_shell(self): + try: + self._exec_cli_command(b'no pag') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/ce.py b/plugins/terminal/ce.py new file mode 100644 index 0000000000..67936e8fef --- /dev/null +++ b/plugins/terminal/ce.py @@ -0,0 +1,60 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br'[\r\n]?<.+>(?:\s*)$'), + re.compile(br'[\r\n]?\[.+\](?:\s*)$'), + ] + #: terminal initial prompt + #: The password needs to be changed. Change now? [Y/N]: + terminal_initial_prompt = br'Change\s*now\s*\?\s*\[Y\/N\]\s*:' + + #: terminal initial answer + #: do not change password when it is asked to change with initial connection. + terminal_initial_answer = b'N' + terminal_stderr_re = [ + re.compile(br"% ?Error: "), + re.compile(br"^% \w+", re.M), + re.compile(br"% ?Bad secret"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found", re.I), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"syntax error"), + re.compile(br"unknown command"), + re.compile(br"Error\[\d+\]: ", re.I), + re.compile(br"Error:", re.I) + ] + + def on_open_shell(self): + try: + self._exec_cli_command('screen-length 0 temporary') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/cnos.py b/plugins/terminal/cnos.py new file mode 100644 index 0000000000..6162b84cff --- /dev/null +++ b/plugins/terminal/cnos.py @@ -0,0 +1,83 @@ +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains terminal Plugin methods for CNOS Config Module +# Lenovo Networking +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), + re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"), + re.compile(br">[\r\n]?") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + ] + + def on_open_shell(self): + try: + for cmd in (b'\n', b'terminal length 0\n'): + self._exec_cli_command(cmd) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + # Note: python-3.5 cannot combine u"" and r"" together. Thus make + # an r string and use to_text to ensure it's text + # on both py2 and py3. + cmd[u'prompt'] = to_text(r"[\r\n]?password: $", + errors='surrogate_or_strict') + cmd[u'answer'] = passwd + + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), + errors='surrogate_or_strict')) + except AnsibleConnectionFailure: + msg = 'unable to elevate privilege to enable mode' + raise AnsibleConnectionFailure(msg) + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if b'(config' in prompt: + self._exec_cli_command(b'end') + self._exec_cli_command(b'disable') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'disable') diff --git a/plugins/terminal/edgeos.py b/plugins/terminal/edgeos.py new file mode 100644 index 0000000000..50f5901663 --- /dev/null +++ b/plugins/terminal/edgeos.py @@ -0,0 +1,35 @@ +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re + +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), + re.compile(br"\@[\w\-\.]+:\S+?[>#\$] ?$") + ] + + terminal_stderr_re = [ + re.compile(br"\n\s*command not found"), + re.compile(br"\nInvalid command"), + re.compile(br"\nCommit failed"), + re.compile(br"\n\s*Set failed"), + ] + + terminal_length = os.getenv('ANSIBLE_EDGEOS_TERMINAL_LENGTH', 10000) + + def on_open_shell(self): + try: + self._exec_cli_command('export VYATTA_PAGER=cat') + self._exec_cli_command('stty rows %s' % self.terminal_length) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/edgeswitch.py b/plugins/terminal/edgeswitch.py new file mode 100644 index 0000000000..27ac674d85 --- /dev/null +++ b/plugins/terminal/edgeswitch.py @@ -0,0 +1,87 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"\(([^\(\)]+)\) [>#]$"), + re.compile(br"\(([^\(\)]+)\) \(([^\(\)]+)\)#$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"An invalid") + ] + + def on_open_shell(self): + return + + def on_become(self, passwd=None): + prompt = self._get_prompt() + if prompt and prompt.endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + cmd[u'prompt'] = to_text(r"[\r\n]?[Pp]assword: ?$", errors='surrogate_or_strict') + cmd[u'answer'] = passwd + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + prompt = self._get_prompt() + if prompt is None or not prompt.endswith(b'#'): + raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt) + + cmd = {u'command': u'terminal length 0'} + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + prompt = self._get_prompt() + if prompt is None or not prompt.endswith(b'#'): + raise AnsibleConnectionFailure('failed to setup terminal in enable mode') + + except AnsibleConnectionFailure as e: + prompt = self._get_prompt() + raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message)) + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if b'(Config' in prompt: + self._exec_cli_command(b'end') + self._exec_cli_command(b'exit') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'exit') diff --git a/plugins/terminal/enos.py b/plugins/terminal/enos.py new file mode 100644 index 0000000000..e1b82d2562 --- /dev/null +++ b/plugins/terminal/enos.py @@ -0,0 +1,83 @@ +# (C) 2017 Red Hat Inc. +# Copyright (C) 2017 Lenovo. +# +# GNU General Public License v3.0+ +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Contains terminal Plugin methods for ENOS Config Module +# Lenovo Networking +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), + re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"), + re.compile(br">[\r\n]?") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + ] + + def on_open_shell(self): + try: + for cmd in (b'\n', b'terminal-length 0\n'): + self._exec_cli_command(cmd) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + # Note: python-3.5 cannot combine u"" and r"" together. Thus make + # an r string and use to_text to ensure it's text + # on both py2 and py3. + cmd[u'prompt'] = to_text(r"[\r\n]?password: $", + errors='surrogate_or_strict') + cmd[u'answer'] = passwd + + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), + errors='surrogate_or_strict')) + except AnsibleConnectionFailure: + msg = 'unable to elevate privilege to enable mode' + raise AnsibleConnectionFailure(msg) + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if b'(config' in prompt: + self._exec_cli_command(b'end') + self._exec_cli_command(b'disable') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'disable') diff --git a/plugins/terminal/eric_eccli.py b/plugins/terminal/eric_eccli.py new file mode 100644 index 0000000000..d162203265 --- /dev/null +++ b/plugins/terminal/eric_eccli.py @@ -0,0 +1,59 @@ +# +# Copyright (c) 2019 Ericsson AB. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible import constants as C +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase +from ansible.utils.display import Display +from ansible.module_utils.six import PY3 + +display = Display() + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?\[.*\][a-zA-Z0-9_.-]*[>\#] ?$"), + re.compile(br"[\r\n]?[a-zA-Z0-9_.-]*(?:\([^\)]+\))(?:[>#]) ?$"), + re.compile(br"bash\-\d\.\d(?:[$#]) ?"), + re.compile(br"[a-zA-Z0-9_.-]*\@[a-zA-Z0-9_.-]*\[\]\:\/flash\>") + ] + + terminal_stderr_re = [ + re.compile(br"[\r\n]+syntax error: .*"), + re.compile(br"Aborted: .*"), + re.compile(br"[\r\n]+Error: .*"), + re.compile(br"[\r\n]+% Error:.*"), + re.compile(br"[\r\n]+% Invalid input.*"), + re.compile(br"[\r\n]+% Incomplete command:.*") + ] + + def on_open_shell(self): + + try: + for cmd in (b'screen-length 0', b'screen-width 512'): + self._exec_cli_command(cmd) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/exos.py b/plugins/terminal/exos.py new file mode 100644 index 0000000000..6836cdb8f4 --- /dev/null +++ b/plugins/terminal/exos.py @@ -0,0 +1,59 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n](?:! )?(?:\* )?(?:\(.*\) )?(?:Slot-\d+ )?(?:VPEX )?\S+\.\d+ (?:[>#]) ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"[\r\n%] Bad passwords"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"Bad mask", re.I), + re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I), + re.compile(br"[%\S] ?Error: ?[\s]+", re.I), + re.compile(br"[%\S] ?Informational: ?[\s]+", re.I), + re.compile(br"%% Invalid .* at '\^' marker.", re.I), + ] + + def on_open_shell(self): + try: + self._exec_cli_command(b'disable clipaging') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + try: + self._exec_cli_command(b'configure cli columns 256') + except AnsibleConnectionFailure: + self._connection.queue_message('warning', 'Unable to configure cli columns, command responses may be truncated') diff --git a/plugins/terminal/icx.py b/plugins/terminal/icx.py new file mode 100644 index 0000000000..e7d55549d8 --- /dev/null +++ b/plugins/terminal/icx.py @@ -0,0 +1,81 @@ +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +import json + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"[\r\n%] Bad passwords"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"Bad mask", re.I), + re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I), + re.compile(br"[%\S] ?Error: ?[\s]+", re.I), + re.compile(br"[%\S] ?Informational: ?[\s]+", re.I), + re.compile(br"Command authorization failed"), + re.compile(br"Error - *"), + re.compile(br"Error - Incorrect username or password."), + re.compile(br"Invalid input"), + re.compile(br"Already a http operation is in progress"), + re.compile(br"Flash access in progress. Please try later"), + re.compile(br"Error: .*"), + re.compile(br"^Error: .*", re.I), + re.compile(br"^Ambiguous input"), + re.compile(br"Errno") + ] + + def on_open_shell(self): + pass + + def __del__(self): + try: + self.close() + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + cmd[u'prompt'] = to_text(r"[\r\n](?:Local_)?[Pp]assword: ?$", errors='surrogate_or_strict') + cmd[u'answer'] = passwd + cmd[u'prompt_retry_check'] = True + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + prompt = self._get_prompt() + if prompt is None or not prompt.endswith(b'#'): + raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt) + except AnsibleConnectionFailure as e: + prompt = self._get_prompt() + raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message)) + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + return + + if b'(config' in prompt: + self._exec_cli_command(b'exit') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'exit') diff --git a/plugins/terminal/ironware.py b/plugins/terminal/ironware.py new file mode 100644 index 0000000000..3651cbd114 --- /dev/null +++ b/plugins/terminal/ironware.py @@ -0,0 +1,78 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import json + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?(?:\w+@)?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$") + ] + + terminal_stderr_re = [ + re.compile(br"[\r\n]Error - "), + re.compile(br"[\r\n](?:incomplete|ambiguous|unrecognised|invalid) (?:command|input)", re.I) + ] + + def on_open_shell(self): + self.disable_pager() + + def disable_pager(self): + cmd = {u'command': u'terminal length 0'} + try: + self._exec_cli_command(u'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to disable terminal pager') + + def on_become(self, passwd=None): + if self._get_prompt().strip().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + # Note: python-3.5 cannot combine u"" and r"" together. Thus make + # an r string and use to_text to ensure it's text on both py2 and py3. + cmd[u'prompt'] = to_text(r"[\r\n]?password: ?$", errors='surrogate_or_strict') + cmd[u'answer'] = passwd + + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to elevate privilege to enable mode') + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if b'(config' in prompt: + self._exec_cli_command(b'end') + self._exec_cli_command(b'exit') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'exit') diff --git a/plugins/terminal/netvisor.py b/plugins/terminal/netvisor.py new file mode 100644 index 0000000000..27dffb5947 --- /dev/null +++ b/plugins/terminal/netvisor.py @@ -0,0 +1,39 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br">.*[\r\n]?(.*)") + + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error: (?!\bdoes not exist\b)(?!\balready exists\b)") + ] diff --git a/plugins/terminal/nos.py b/plugins/terminal/nos.py new file mode 100644 index 0000000000..245189468f --- /dev/null +++ b/plugins/terminal/nos.py @@ -0,0 +1,54 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"([\r\n]|(\x1b\[\?7h))[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + # re.compile(br"^% \w+", re.M), + re.compile(br"% ?Bad secret"), + re.compile(br"[\r\n%] Bad passwords"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"Bad mask", re.I), + re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I), + re.compile(br"[%\S] ?Informational: ?[\s]+", re.I), + re.compile(br"syntax error: unknown argument.", re.I) + ] + + def on_open_shell(self): + try: + self._exec_cli_command(u'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/onyx.py b/plugins/terminal/onyx.py new file mode 100644 index 0000000000..52d630b9fd --- /dev/null +++ b/plugins/terminal/onyx.py @@ -0,0 +1,80 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"(?P(.*)( > | # )\Z)"), + ] + + terminal_stderr_re = [ + re.compile(br"\A%|\r\n%|\n%"), + ] + + init_commands = [b'no cli session paging enable', ] + + def on_open_shell(self): + try: + for cmd in self.init_commands: + self._exec_cli_command(cmd) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + # Note: python-3.5 cannot combine u"" and r"" together. Thus make + # an r string and use to_text to ensure it's text on both py2 and + # py3. + cmd[u'prompt'] = to_text(r"[\r\n]?password: $", + errors='surrogate_or_strict') + cmd[u'answer'] = passwd + + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), + errors='surrogate_or_strict')) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure( + 'unable to elevate privilege to enable mode') + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if b'(config' in prompt: + self._exec_cli_command(b'exit') + self._exec_cli_command(b'disable') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'disable') diff --git a/plugins/terminal/routeros.py b/plugins/terminal/routeros.py new file mode 100644 index 0000000000..78996f28aa --- /dev/null +++ b/plugins/terminal/routeros.py @@ -0,0 +1,69 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase +from ansible.utils.display import Display + +display = Display() + + +class TerminalModule(TerminalBase): + + ansi_re = [ + # check ECMA-48 Section 5.4 (Control Sequences) + re.compile(br'(\x1b\[\?1h\x1b=)'), + re.compile(br'((?:\x9b|\x1b\x5b)[\x30-\x3f]*[\x20-\x2f]*[\x40-\x7e])'), + re.compile(br'\x08.') + ] + + terminal_initial_prompt = [ + br'\x1bZ', + ] + + terminal_initial_answer = b'\x1b/Z' + + terminal_stdout_re = [ + re.compile(br"\x1b<"), + re.compile(br"\[[\w\.]+\@[\w\s\-\.]+\] ?> ?$"), + re.compile(br"Please press \"Enter\" to continue!"), + re.compile(br"Do you want to see the software license\? \[Y\/n\]: ?"), + ] + + terminal_stderr_re = [ + re.compile(br"\nbad command name"), + re.compile(br"\nno such item"), + re.compile(br"\ninvalid value for"), + ] + + def on_open_shell(self): + prompt = self._get_prompt() + try: + if prompt.strip().endswith(b':'): + self._exec_cli_command(b' ') + if prompt.strip().endswith(b'!'): + self._exec_cli_command(b'\n') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to bypass license prompt') diff --git a/plugins/terminal/slxos.py b/plugins/terminal/slxos.py new file mode 100644 index 0000000000..245189468f --- /dev/null +++ b/plugins/terminal/slxos.py @@ -0,0 +1,54 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"([\r\n]|(\x1b\[\?7h))[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + # re.compile(br"^% \w+", re.M), + re.compile(br"% ?Bad secret"), + re.compile(br"[\r\n%] Bad passwords"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"Bad mask", re.I), + re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I), + re.compile(br"[%\S] ?Informational: ?[\s]+", re.I), + re.compile(br"syntax error: unknown argument.", re.I) + ] + + def on_open_shell(self): + try: + self._exec_cli_command(u'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/sros.py b/plugins/terminal/sros.py new file mode 100644 index 0000000000..77085a383e --- /dev/null +++ b/plugins/terminal/sros.py @@ -0,0 +1,43 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#|\$|>#) ?$"), + re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$") + ] + + terminal_stderr_re = [ + re.compile(br"Error:"), + ] + + def on_open_shell(self): + try: + self._exec_cli_command(b'environment no more') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') diff --git a/plugins/terminal/voss.py b/plugins/terminal/voss.py new file mode 100644 index 0000000000..f540be9b4e --- /dev/null +++ b/plugins/terminal/voss.py @@ -0,0 +1,89 @@ +# +# (c) 2018 Extreme Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]+[^\s#>]+(?:[>#])$", re.M) + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"[\r\n%] Bad passwords"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"Discontiguous Subnet Mask"), + re.compile(br"Conflicting IP address"), + re.compile(br"[\r\n]Error: ?[\S]+"), + re.compile(br"[%\S] ?Informational: ?[\s]+", re.I), + re.compile(br"Command authorization failed") + ] + + def on_open_shell(self): + try: + self._exec_cli_command(u'terminal more disable') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + # Note: python-3.5 cannot combine u"" and r"" together. Thus make + # an r string and use to_text to ensure it's text on both py2 and py3. + cmd[u'prompt'] = to_text(r"[\r\n](?:Local_)?[Pp]assword: ?$", errors='surrogate_or_strict') + cmd[u'answer'] = passwd + cmd[u'prompt_retry_check'] = True + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + prompt = self._get_prompt() + if prompt is None or not prompt.endswith(b'#'): + raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt) + except AnsibleConnectionFailure as e: + prompt = self._get_prompt() + raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message)) + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if prompt.endswith(b')#'): + self._exec_cli_command(b'end') + self._exec_cli_command(b'disable') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'disable') diff --git a/scripts/inventory/__init__.py b/scripts/inventory/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/inventory/abiquo.ini b/scripts/inventory/abiquo.ini new file mode 100644 index 0000000000..991a2ed803 --- /dev/null +++ b/scripts/inventory/abiquo.ini @@ -0,0 +1,48 @@ +# Ansible external inventory script settings for Abiquo +# + +# Define an Abiquo user with access to Abiquo API which will be used to +# perform required queries to obtain information to generate the Ansible +# inventory output. +# +[auth] +apiuser = admin +apipass = xabiquo + + +# Specify Abiquo API version in major.minor format and the access URI to +# API endpoint. Tested versions are: 2.6 , 3.0 and 3.1 +# To confirm that your box haves access to Abiquo API you can perform a +# curl command, replacing with suitable values, similar to this: +# curl -X GET https://192.168.2.100/api/login -u admin:xabiquo +# +[api] +version = 3.0 +uri = https://192.168.2.100/api +# You probably won't need to modify login preferences, but just in case +login_path = /login +login_type = application/vnd.abiquo.user+json + + +# To avoid performing excessive calls to Abiquo API you can define a +# cache for the plugin output. Within the time defined in seconds, latest +# output will be reused. After that time, the cache will be refreshed. +# +[cache] +cache_max_age = 30 +cache_dir = /tmp + + +[defaults] +# Depending in your Abiquo environment, you may want to use only public IP +# addresses (if using public cloud providers) or also private IP addresses. +# You can set this with public_ip_only configuration. +public_ip_only = false +# default_net_interface only is used if public_ip_only = false +# If public_ip_only is set to false, you can choose default nic to obtain +# IP address to define the host. +default_net_interface = nic0 +# Only deployed VM are displayed in the plugin output. +deployed_only = true +# Define if VM metadata is obtained from Abiquo API. +get_metadata = false diff --git a/scripts/inventory/abiquo.py b/scripts/inventory/abiquo.py new file mode 100644 index 0000000000..5a7950bd70 --- /dev/null +++ b/scripts/inventory/abiquo.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +''' +External inventory script for Abiquo +==================================== + +Shamelessly copied from an existing inventory script. + +This script generates an inventory that Ansible can understand by making API requests to Abiquo API +Requires some python libraries, ensure to have them installed when using this script. + +This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6. + +Before using this script you may want to modify abiquo.ini config file. + +This script generates an Ansible hosts file with these host groups: + +ABQ_xxx: Defines a hosts itself by Abiquo VM name label +all: Contains all hosts defined in Abiquo user's enterprise +virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it +virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it +imagetemplate: Creates a host group for each image template containing all hosts using it + +''' + +# (c) 2014, Daniel Beneyto +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import sys +import time + +import json + +from ansible.module_utils.six.moves import configparser as ConfigParser +from ansible.module_utils.urls import open_url + + +def api_get(link, config): + try: + if link is None: + url = config.get('api', 'uri') + config.get('api', 'login_path') + headers = {"Accept": config.get('api', 'login_type')} + else: + url = link['href'] + '?limit=0' + headers = {"Accept": link['type']} + result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''), + url_password=config.get('auth', 'apipass').replace('\n', '')) + return json.loads(result.read()) + except Exception: + return None + + +def save_cache(data, config): + ''' saves item to cache ''' + dpath = config.get('cache', 'cache_dir') + try: + cache = open('/'.join([dpath, 'inventory']), 'w') + cache.write(json.dumps(data)) + cache.close() + except IOError as e: + pass # not really sure what to do here + + +def get_cache(cache_item, config): + ''' returns cached item ''' + dpath = config.get('cache', 'cache_dir') + inv = {} + try: + cache = open('/'.join([dpath, 'inventory']), 'r') + inv = cache.read() + cache.close() + except IOError as e: + pass # not really sure what to do here + + return inv + + +def cache_available(config): + ''' checks if we have a 'fresh' cache available for item requested ''' + + if config.has_option('cache', 'cache_dir'): + dpath = config.get('cache', 'cache_dir') + + try: + existing = os.stat('/'.join([dpath, 'inventory'])) + except Exception: + # cache doesn't exist or isn't accessible + return False + + if config.has_option('cache', 'cache_max_age'): + maxage = config.get('cache', 'cache_max_age') + if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): + return True + + return False + + +def generate_inv_from_api(enterprise_entity, config): + try: + inventory['all'] = {} + inventory['all']['children'] = [] + inventory['all']['hosts'] = [] + inventory['_meta'] = {} + inventory['_meta']['hostvars'] = {} + + enterprise = api_get(enterprise_entity, config) + vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines') + vms = api_get(vms_entity, config) + for vmcollection in vms['collection']: + for link in vmcollection['links']: + if link['rel'] == 'virtualappliance': + vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_') + elif link['rel'] == 'virtualdatacenter': + vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_') + elif link['rel'] == 'virtualmachinetemplate': + vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_') + + # From abiquo.ini: Only adding to inventory VMs with public IP + if config.getboolean('defaults', 'public_ip_only') is True: + for link in vmcollection['links']: + if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip': + vm_nic = link['title'] + break + else: + vm_nic = None + # Otherwise, assigning defined network interface IP address + else: + for link in vmcollection['links']: + if link['rel'] == config.get('defaults', 'default_net_interface'): + vm_nic = link['title'] + break + else: + vm_nic = None + + vm_state = True + # From abiquo.ini: Only adding to inventory VMs deployed + if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED': + vm_state = False + + if vm_nic is not None and vm_state: + if vm_vapp not in inventory: + inventory[vm_vapp] = {} + inventory[vm_vapp]['children'] = [] + inventory[vm_vapp]['hosts'] = [] + if vm_vdc not in inventory: + inventory[vm_vdc] = {} + inventory[vm_vdc]['hosts'] = [] + inventory[vm_vdc]['children'] = [] + if vm_template not in inventory: + inventory[vm_template] = {} + inventory[vm_template]['children'] = [] + inventory[vm_template]['hosts'] = [] + if config.getboolean('defaults', 'get_metadata') is True: + meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata') + try: + metadata = api_get(meta_entity, config) + if (config.getfloat("api", "version") >= 3.0): + vm_metadata = metadata['metadata'] + else: + vm_metadata = metadata['metadata']['metadata'] + inventory['_meta']['hostvars'][vm_nic] = vm_metadata + except Exception as e: + pass + + inventory[vm_vapp]['children'].append(vmcollection['name']) + inventory[vm_vdc]['children'].append(vmcollection['name']) + inventory[vm_template]['children'].append(vmcollection['name']) + inventory['all']['children'].append(vmcollection['name']) + inventory[vmcollection['name']] = [] + inventory[vmcollection['name']].append(vm_nic) + + return inventory + except Exception as e: + # Return empty hosts output + return {'all': {'hosts': []}, '_meta': {'hostvars': {}}} + + +def get_inventory(enterprise, config): + ''' Reads the inventory from cache or Abiquo api ''' + + if cache_available(config): + inv = get_cache('inventory', config) + else: + default_group = os.path.basename(sys.argv[0]).rstrip('.py') + # MAKE ABIQUO API CALLS # + inv = generate_inv_from_api(enterprise, config) + + save_cache(inv, config) + return json.dumps(inv) + + +if __name__ == '__main__': + inventory = {} + enterprise = {} + + # Read config + config = ConfigParser.SafeConfigParser() + for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']: + if os.path.exists(configfilename): + config.read(configfilename) + break + + try: + login = api_get(None, config) + enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise') + except Exception as e: + enterprise = None + + if cache_available(config): + inventory = get_cache('inventory', config) + else: + inventory = get_inventory(enterprise, config) + + # return to ansible + sys.stdout.write(str(inventory)) + sys.stdout.flush() diff --git a/scripts/inventory/apache-libcloud.py b/scripts/inventory/apache-libcloud.py new file mode 100644 index 0000000000..3857d2f934 --- /dev/null +++ b/scripts/inventory/apache-libcloud.py @@ -0,0 +1,346 @@ +#!/usr/bin/env python + +# (c) 2013, Sebastien Goasguen +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +''' +Apache Libcloud generic external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +Cloud providers using the Apache libcloud library. + +This script also assumes there is a libcloud.ini file alongside it + +''' + +import sys +import os +import argparse +import re +from time import time + +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils.six.moves import configparser as ConfigParser +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver +import libcloud.security as sec + +import json + + +class LibcloudInventory(object): + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = {} + + # Index of hostname (address) to instance ID + self.index = {} + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if len(self.inventory) == 0: + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + def read_settings(self): + ''' Reads the settings from the libcloud.ini file ''' + + config = ConfigParser.SafeConfigParser() + libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini') + libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path) + config.read(libcloud_ini_path) + + if not config.has_section('driver'): + raise ValueError('libcloud.ini file must contain a [driver] section') + + if config.has_option('driver', 'provider'): + self.provider = config.get('driver', 'provider') + else: + raise ValueError('libcloud.ini does not have a provider defined') + + if config.has_option('driver', 'key'): + self.key = config.get('driver', 'key') + else: + raise ValueError('libcloud.ini does not have a key defined') + + if config.has_option('driver', 'secret'): + self.secret = config.get('driver', 'secret') + else: + raise ValueError('libcloud.ini does not have a secret defined') + + if config.has_option('driver', 'host'): + self.host = config.get('driver', 'host') + if config.has_option('driver', 'secure'): + self.secure = config.get('driver', 'secure') + if config.has_option('driver', 'verify_ssl_cert'): + self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert') + if config.has_option('driver', 'port'): + self.port = config.get('driver', 'port') + if config.has_option('driver', 'path'): + self.path = config.get('driver', 'path') + if config.has_option('driver', 'api_version'): + self.api_version = config.get('driver', 'api_version') + + Driver = get_driver(getattr(Provider, self.provider)) + + self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure, + host=self.host, path=self.path) + + # Cache related + cache_path = config.get('cache', 'cache_path') + self.cache_path_cache = cache_path + "/ansible-libcloud.cache" + self.cache_path_index = cache_path + "/ansible-libcloud.index" + self.cache_max_age = config.getint('cache', 'cache_max_age') + + def parse_cli_args(self): + ''' + Command line argument processing + ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)') + self.args = parser.parse_args() + + def do_api_calls_update_cache(self): + ''' + Do API calls to a location, and save data in cache files + ''' + + self.get_nodes() + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def get_nodes(self): + ''' + Gets the list of all nodes + ''' + + for node in self.conn.list_nodes(): + self.add_node(node) + + def get_node(self, node_id): + ''' + Gets details about a specific node + ''' + + return [node for node in self.conn.list_nodes() if node.id == node_id][0] + + def add_node(self, node): + ''' + Adds a node to the inventory and index, as long as it is + addressable + ''' + + # Only want running instances + if node.state != 0: + return + + # Select the best destination address + if not node.public_ips == []: + dest = node.public_ips[0] + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = node.name + + # Inventory: Group by instance ID (always a group of 1) + self.inventory[node.name] = [dest] + ''' + # Inventory: Group by region + self.push(self.inventory, region, dest) + + # Inventory: Group by availability zone + self.push(self.inventory, node.placement, dest) + + # Inventory: Group by instance type + self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) + ''' + # Inventory: Group by key pair + if node.extra['key_name']: + self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) + + # Inventory: Group by security group, quick thing to handle single sg + if node.extra['security_group']: + self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) + + # Inventory: Group by tag + if node.extra['tags']: + for tagkey in node.extra['tags'].keys(): + self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) + + def get_host_info(self): + ''' + Get variables about a specific host + ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if self.args.host not in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if self.args.host not in self.index: + # host migh not exist anymore + return self.json_format_dict({}, True) + + node_id = self.index[self.args.host] + + node = self.get_node(node_id) + instance_vars = {} + for key, value in vars(node).items(): + key = self.to_safe('ec2_' + key) + + # Handle complex types + if isinstance(value, (int, bool)): + instance_vars[key] = value + elif isinstance(value, string_types): + instance_vars[key] = value.strip() + elif value is None: + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2_tags': + for k, v in iteritems(value): + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join(group_ids) + instance_vars["ec2_security_group_names"] = ','.join(group_names) + else: + pass + # TODO Product codes if someone finds them useful + # print(key) + # print(type(value)) + # print(value) + + return self.json_format_dict(instance_vars, True) + + def push(self, my_dict, key, element): + ''' + Pushed an element onto an array that may not have been defined in + the dict + ''' + + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def get_inventory_from_cache(self): + ''' + Reads the inventory from the cache file and returns it as a JSON + object + ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + def load_index_from_cache(self): + ''' + Reads the index from the cache file sets self.index + ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + def write_to_cache(self, data, filename): + ''' + Writes data in JSON format to a file + ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + ''' + Converts 'bad' characters in a string to underscores so they can be + used as Ansible groups + ''' + + return re.sub(r"[^A-Za-z0-9\-]", "_", word) + + def json_format_dict(self, data, pretty=False): + ''' + Converts a dict to a JSON object and dumps it as a formatted + string + ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +def main(): + LibcloudInventory() + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/apstra_aos.ini b/scripts/inventory/apstra_aos.ini new file mode 100644 index 0000000000..1ec1255c9c --- /dev/null +++ b/scripts/inventory/apstra_aos.ini @@ -0,0 +1,20 @@ +# Ansible Apstra AOS external inventory script settings +# Dynamic Inventory script parameter can be provided using this file +# Or by using Environment Variables: +# - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT +# +# This file takes precedence over the Environment Variables +# + +[aos] + +# aos_server = 172.20.62.3 +# port = 8888 +# username = admin +# password = admin + +## Blueprint Mode +# to use the inventory in mode Blueprint, you need to define the blueprint name you want to use + +# blueprint = my-blueprint-l2 +# blueprint_interface = true diff --git a/scripts/inventory/apstra_aos.py b/scripts/inventory/apstra_aos.py new file mode 100644 index 0000000000..7b9af7db5c --- /dev/null +++ b/scripts/inventory/apstra_aos.py @@ -0,0 +1,589 @@ +#!/usr/bin/env python +# +# (c) 2017 Apstra Inc, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +""" +Apstra AOS external inventory script +==================================== + +Ansible has a feature where instead of reading from /etc/ansible/hosts +as a text file, it can query external programs to obtain the list +of hosts, groups the hosts are in, and even variables to assign to each host. + +To use this: + - copy this file over /etc/ansible/hosts and chmod +x the file. + - Copy both files (.py and .ini) in your preferred directory + +More information about Ansible Dynamic Inventory here +http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname + +2 modes are currently, supported: **device based** or **blueprint based**: + - For **Device based**, the list of device is taken from the global device list + the serial ID will be used as the inventory_hostname + - For **Blueprint based**, the list of device is taken from the given blueprint + the Node name will be used as the inventory_hostname + +Input parameters parameter can be provided using either with the ini file or by using Environment Variables: +The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT +The config file takes precedence over the Environment Variables + +Tested with Apstra AOS 1.1 + +This script has been inspired by the cobbler.py inventory. thanks + +Author: Damien Garros (@dgarros) +Version: 0.2.0 +""" +import json +import os +import re +import sys + +try: + import argparse + HAS_ARGPARSE = True +except ImportError: + HAS_ARGPARSE = False + +try: + from apstra.aosom.session import Session + HAS_AOS_PYEZ = True +except ImportError: + HAS_AOS_PYEZ = False + +from ansible.module_utils.six.moves import configparser + + +""" +## +Expected output format in Device mode +{ + "Cumulus": { + "hosts": [ + "52540073956E", + "52540022211A" + ], + "vars": {} + }, + "EOS": { + "hosts": [ + "5254001CAFD8", + "525400DDDF72" + ], + "vars": {} + }, + "Generic Model": { + "hosts": [ + "525400E5486D" + ], + "vars": {} + }, + "Ubuntu GNU/Linux": { + "hosts": [ + "525400E5486D" + ], + "vars": {} + }, + "VX": { + "hosts": [ + "52540073956E", + "52540022211A" + ], + "vars": {} + }, + "_meta": { + "hostvars": { + "5254001CAFD8": { + "agent_start_time": "2017-02-03T00:49:16.000000Z", + "ansible_ssh_host": "172.20.52.6", + "aos_hcl_model": "Arista_vEOS", + "aos_server": "", + "aos_version": "AOS_1.1.1_OB.5", + "comm_state": "on", + "device_start_time": "2017-02-03T00:47:58.454480Z", + "domain_name": "", + "error_message": "", + "fqdn": "localhost", + "hostname": "localhost", + "hw_model": "vEOS", + "hw_version": "", + "is_acknowledged": false, + "mgmt_ifname": "Management1", + "mgmt_ipaddr": "172.20.52.6", + "mgmt_macaddr": "52:54:00:1C:AF:D8", + "os_arch": "x86_64", + "os_family": "EOS", + "os_version": "4.16.6M", + "os_version_info": { + "build": "6M", + "major": "4", + "minor": "16" + }, + "serial_number": "5254001CAFD8", + "state": "OOS-QUARANTINED", + "vendor": "Arista" + }, + "52540022211A": { + "agent_start_time": "2017-02-03T00:45:22.000000Z", + "ansible_ssh_host": "172.20.52.7", + "aos_hcl_model": "Cumulus_VX", + "aos_server": "172.20.52.3", + "aos_version": "AOS_1.1.1_OB.5", + "comm_state": "on", + "device_start_time": "2017-02-03T00:45:11.019189Z", + "domain_name": "", + "error_message": "", + "fqdn": "cumulus", + "hostname": "cumulus", + "hw_model": "VX", + "hw_version": "", + "is_acknowledged": false, + "mgmt_ifname": "eth0", + "mgmt_ipaddr": "172.20.52.7", + "mgmt_macaddr": "52:54:00:22:21:1a", + "os_arch": "x86_64", + "os_family": "Cumulus", + "os_version": "3.1.1", + "os_version_info": { + "build": "1", + "major": "3", + "minor": "1" + }, + "serial_number": "52540022211A", + "state": "OOS-QUARANTINED", + "vendor": "Cumulus" + }, + "52540073956E": { + "agent_start_time": "2017-02-03T00:45:19.000000Z", + "ansible_ssh_host": "172.20.52.8", + "aos_hcl_model": "Cumulus_VX", + "aos_server": "172.20.52.3", + "aos_version": "AOS_1.1.1_OB.5", + "comm_state": "on", + "device_start_time": "2017-02-03T00:45:11.030113Z", + "domain_name": "", + "error_message": "", + "fqdn": "cumulus", + "hostname": "cumulus", + "hw_model": "VX", + "hw_version": "", + "is_acknowledged": false, + "mgmt_ifname": "eth0", + "mgmt_ipaddr": "172.20.52.8", + "mgmt_macaddr": "52:54:00:73:95:6e", + "os_arch": "x86_64", + "os_family": "Cumulus", + "os_version": "3.1.1", + "os_version_info": { + "build": "1", + "major": "3", + "minor": "1" + }, + "serial_number": "52540073956E", + "state": "OOS-QUARANTINED", + "vendor": "Cumulus" + }, + "525400DDDF72": { + "agent_start_time": "2017-02-03T00:49:07.000000Z", + "ansible_ssh_host": "172.20.52.5", + "aos_hcl_model": "Arista_vEOS", + "aos_server": "", + "aos_version": "AOS_1.1.1_OB.5", + "comm_state": "on", + "device_start_time": "2017-02-03T00:47:46.929921Z", + "domain_name": "", + "error_message": "", + "fqdn": "localhost", + "hostname": "localhost", + "hw_model": "vEOS", + "hw_version": "", + "is_acknowledged": false, + "mgmt_ifname": "Management1", + "mgmt_ipaddr": "172.20.52.5", + "mgmt_macaddr": "52:54:00:DD:DF:72", + "os_arch": "x86_64", + "os_family": "EOS", + "os_version": "4.16.6M", + "os_version_info": { + "build": "6M", + "major": "4", + "minor": "16" + }, + "serial_number": "525400DDDF72", + "state": "OOS-QUARANTINED", + "vendor": "Arista" + }, + "525400E5486D": { + "agent_start_time": "2017-02-02T18:44:42.000000Z", + "ansible_ssh_host": "172.20.52.4", + "aos_hcl_model": "Generic_Server_1RU_1x10G", + "aos_server": "172.20.52.3", + "aos_version": "AOS_1.1.1_OB.5", + "comm_state": "on", + "device_start_time": "2017-02-02T21:11:25.188734Z", + "domain_name": "", + "error_message": "", + "fqdn": "localhost", + "hostname": "localhost", + "hw_model": "Generic Model", + "hw_version": "pc-i440fx-trusty", + "is_acknowledged": false, + "mgmt_ifname": "eth0", + "mgmt_ipaddr": "172.20.52.4", + "mgmt_macaddr": "52:54:00:e5:48:6d", + "os_arch": "x86_64", + "os_family": "Ubuntu GNU/Linux", + "os_version": "14.04 LTS", + "os_version_info": { + "build": "", + "major": "14", + "minor": "04" + }, + "serial_number": "525400E5486D", + "state": "OOS-QUARANTINED", + "vendor": "Generic Manufacturer" + } + } + }, + "all": { + "hosts": [ + "5254001CAFD8", + "52540073956E", + "525400DDDF72", + "525400E5486D", + "52540022211A" + ], + "vars": {} + }, + "vEOS": { + "hosts": [ + "5254001CAFD8", + "525400DDDF72" + ], + "vars": {} + } +} +""" + + +def fail(msg): + sys.stderr.write("%s\n" % msg) + sys.exit(1) + + +class AosInventory(object): + + def __init__(self): + + """ Main execution path """ + + if not HAS_AOS_PYEZ: + raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez') + if not HAS_ARGPARSE: + raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7') + + # Initialize inventory + self.inventory = dict() # A list of groups and the hosts in that group + self.inventory['_meta'] = dict() + self.inventory['_meta']['hostvars'] = dict() + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # ---------------------------------------------------- + # Open session to AOS + # ---------------------------------------------------- + aos = Session(server=self.aos_server, + port=self.aos_server_port, + user=self.aos_username, + passwd=self.aos_password) + + aos.login() + + # Save session information in variables of group all + self.add_var_to_group('all', 'aos_session', aos.session) + + # Add the AOS server itself in the inventory + self.add_host_to_group("all", 'aos') + self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server) + self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password) + self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username) + + # ---------------------------------------------------- + # Build the inventory + # 2 modes are supported: device based or blueprint based + # - For device based, the list of device is taken from the global device list + # the serial ID will be used as the inventory_hostname + # - For Blueprint based, the list of device is taken from the given blueprint + # the Node name will be used as the inventory_hostname + # ---------------------------------------------------- + if self.aos_blueprint: + + bp = aos.Blueprints[self.aos_blueprint] + if bp.exists is False: + fail("Unable to find the Blueprint: %s" % self.aos_blueprint) + + for dev_name, dev_id in bp.params['devices'].value.items(): + + self.add_host_to_group('all', dev_name) + device = aos.Devices.find(uid=dev_id) + + if 'facts' in device.value.keys(): + self.add_device_facts_to_var(dev_name, device) + + # Define admin State and Status + if 'user_config' in device.value.keys(): + if 'admin_state' in device.value['user_config'].keys(): + self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state']) + + self.add_device_status_to_var(dev_name, device) + + # Go over the contents data structure + for node in bp.contents['system']['nodes']: + if node['display_name'] == dev_name: + self.add_host_to_group(node['role'], dev_name) + + # Check for additional attribute to import + attributes_to_import = [ + 'loopback_ip', + 'asn', + 'role', + 'position', + ] + for attr in attributes_to_import: + if attr in node.keys(): + self.add_var_to_host(dev_name, attr, node[attr]) + + # if blueprint_interface is enabled in the configuration + # Collect links information + if self.aos_blueprint_int: + interfaces = dict() + + for link in bp.contents['system']['links']: + # each link has 2 sides [0,1], and it's unknown which one match this device + # at first we assume, first side match(0) and peer is (1) + peer_id = 1 + + for side in link['endpoints']: + if side['display_name'] == dev_name: + + # import local information first + int_name = side['interface'] + + # init dict + interfaces[int_name] = dict() + if 'ip' in side.keys(): + interfaces[int_name]['ip'] = side['ip'] + + if 'interface' in side.keys(): + interfaces[int_name]['name'] = side['interface'] + + if 'display_name' in link['endpoints'][peer_id].keys(): + interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name'] + + if 'ip' in link['endpoints'][peer_id].keys(): + interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip'] + + if 'type' in link['endpoints'][peer_id].keys(): + interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type'] + + else: + # if we haven't match the first time, prepare the peer_id + # for the second loop iteration + peer_id = 0 + + self.add_var_to_host(dev_name, 'interfaces', interfaces) + + else: + for device in aos.Devices: + # If not reacheable, create by key and + # If reacheable, create by hostname + + self.add_host_to_group('all', device.name) + + # populate information for this host + self.add_device_status_to_var(device.name, device) + + if 'user_config' in device.value.keys(): + for key, value in device.value['user_config'].items(): + self.add_var_to_host(device.name, key, value) + + # Based on device status online|offline, collect facts as well + if device.value['status']['comm_state'] == 'on': + + if 'facts' in device.value.keys(): + self.add_device_facts_to_var(device.name, device) + + # Check if device is associated with a blueprint + # if it's create a new group + if 'blueprint_active' in device.value['status'].keys(): + if 'blueprint_id' in device.value['status'].keys(): + bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id']) + + if bp: + self.add_host_to_group(bp.name, device.name) + + # ---------------------------------------------------- + # Convert the inventory and return a JSON String + # ---------------------------------------------------- + data_to_print = "" + data_to_print += self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def read_settings(self): + """ Reads the settings from the apstra_aos.ini file """ + + config = configparser.ConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini') + + # Default Values + self.aos_blueprint = False + self.aos_blueprint_int = True + self.aos_username = 'admin' + self.aos_password = 'admin' + self.aos_server_port = 8888 + + # Try to reach all parameters from File, if not available try from ENV + try: + self.aos_server = config.get('aos', 'aos_server') + except Exception: + if 'AOS_SERVER' in os.environ.keys(): + self.aos_server = os.environ['AOS_SERVER'] + + try: + self.aos_server_port = config.get('aos', 'port') + except Exception: + if 'AOS_PORT' in os.environ.keys(): + self.aos_server_port = os.environ['AOS_PORT'] + + try: + self.aos_username = config.get('aos', 'username') + except Exception: + if 'AOS_USERNAME' in os.environ.keys(): + self.aos_username = os.environ['AOS_USERNAME'] + + try: + self.aos_password = config.get('aos', 'password') + except Exception: + if 'AOS_PASSWORD' in os.environ.keys(): + self.aos_password = os.environ['AOS_PASSWORD'] + + try: + self.aos_blueprint = config.get('aos', 'blueprint') + except Exception: + if 'AOS_BLUEPRINT' in os.environ.keys(): + self.aos_blueprint = os.environ['AOS_BLUEPRINT'] + + try: + if config.get('aos', 'blueprint_interface') in ['false', 'no']: + self.aos_blueprint_int = False + except Exception: + pass + + def parse_cli_args(self): + """ Command line argument processing """ + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') + self.args = parser.parse_args() + + def json_format_dict(self, data, pretty=False): + """ Converts a dict to a JSON object and dumps it as a formatted string """ + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + def add_host_to_group(self, group, host): + + # Cleanup group name first + clean_group = self.cleanup_group_name(group) + + # Check if the group exist, if not initialize it + if clean_group not in self.inventory.keys(): + self.inventory[clean_group] = {} + self.inventory[clean_group]['hosts'] = [] + self.inventory[clean_group]['vars'] = {} + + self.inventory[clean_group]['hosts'].append(host) + + def add_var_to_host(self, host, var, value): + + # Check if the host exist, if not initialize it + if host not in self.inventory['_meta']['hostvars'].keys(): + self.inventory['_meta']['hostvars'][host] = {} + + self.inventory['_meta']['hostvars'][host][var] = value + + def add_var_to_group(self, group, var, value): + + # Cleanup group name first + clean_group = self.cleanup_group_name(group) + + # Check if the group exist, if not initialize it + if clean_group not in self.inventory.keys(): + self.inventory[clean_group] = {} + self.inventory[clean_group]['hosts'] = [] + self.inventory[clean_group]['vars'] = {} + + self.inventory[clean_group]['vars'][var] = value + + def add_device_facts_to_var(self, device_name, device): + + # Populate variables for this host + self.add_var_to_host(device_name, + 'ansible_ssh_host', + device.value['facts']['mgmt_ipaddr']) + + self.add_var_to_host(device_name, 'id', device.id) + + # self.add_host_to_group('all', device.name) + for key, value in device.value['facts'].items(): + self.add_var_to_host(device_name, key, value) + + if key == 'os_family': + self.add_host_to_group(value, device_name) + elif key == 'hw_model': + self.add_host_to_group(value, device_name) + + def cleanup_group_name(self, group_name): + """ + Clean up group name by : + - Replacing all non-alphanumeric caracter by underscore + - Converting to lowercase + """ + + rx = re.compile(r'\W+') + clean_group = rx.sub('_', group_name).lower() + + return clean_group + + def add_device_status_to_var(self, device_name, device): + + if 'status' in device.value.keys(): + for key, value in device.value['status'].items(): + self.add_var_to_host(device.name, key, value) + + +# Run the script +if __name__ == '__main__': + AosInventory() diff --git a/scripts/inventory/azure_rm.ini b/scripts/inventory/azure_rm.ini new file mode 100644 index 0000000000..6edd9b981b --- /dev/null +++ b/scripts/inventory/azure_rm.ini @@ -0,0 +1,23 @@ +# +# Configuration file for azure_rm.py +# +[azure] +# Control which resource groups are included. By default all resources groups are included. +# Set resource_groups to a comma separated list of resource groups names. +#resource_groups= + +# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs +#tags= + +# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus) +#locations= + +# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. +include_powerstate=yes + +# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. +group_by_resource_group=yes +group_by_location=yes +group_by_security_group=yes +group_by_os_family=yes +group_by_tag=yes diff --git a/scripts/inventory/azure_rm.py b/scripts/inventory/azure_rm.py new file mode 100644 index 0000000000..7dc438f6a2 --- /dev/null +++ b/scripts/inventory/azure_rm.py @@ -0,0 +1,973 @@ +#!/usr/bin/env python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +''' +Important note (2018/10) +======================== +This inventory script is in maintenance mode: only critical bug fixes but no new features. +There's new Azure external inventory script at https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/azure_rm.py, +with better performance and latest new features. Please go to the link to get latest Azure inventory. + +Azure External Inventory Script +=============================== +Generates dynamic inventory by making API requests to the Azure Resource +Manager using the Azure Python SDK. For instruction on installing the +Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/ + +Authentication +-------------- +The order of precedence is command line arguments, environment variables, +and finally the [default] profile found in ~/.azure/credentials. + +If using a credentials file, it should be an ini formatted file with one or +more sections, which we refer to as profiles. The script looks for a +[default] section, if a profile is not specified either on the command line +or with an environment variable. The keys in a profile will match the +list of command line arguments below. + +For command line arguments and environment variables specify a profile found +in your ~/.azure/credentials file, or a service principal or Active Directory +user. + +Command line arguments: + - profile + - client_id + - secret + - subscription_id + - tenant + - ad_user + - password + - cloud_environment + - adfs_authority_url + +Environment variables: + - AZURE_PROFILE + - AZURE_CLIENT_ID + - AZURE_SECRET + - AZURE_SUBSCRIPTION_ID + - AZURE_TENANT + - AZURE_AD_USER + - AZURE_PASSWORD + - AZURE_CLOUD_ENVIRONMENT + - AZURE_ADFS_AUTHORITY_URL + +Run for Specific Host +----------------------- +When run for a specific host using the --host option, a resource group is +required. For a specific host, this script returns the following variables: + +{ + "ansible_host": "XXX.XXX.XXX.XXX", + "computer_name": "computer_name2", + "fqdn": null, + "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name", + "image": { + "offer": "CentOS", + "publisher": "OpenLogic", + "sku": "7.1", + "version": "latest" + }, + "location": "westus", + "mac_address": "00-00-5E-00-53-FE", + "name": "object-name", + "network_interface": "interface-name", + "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", + "network_security_group": null, + "network_security_group_id": null, + "os_disk": { + "name": "object-name", + "operating_system_type": "Linux" + }, + "plan": null, + "powerstate": "running", + "private_ip": "172.26.3.6", + "private_ip_alloc_method": "Static", + "provisioning_state": "Succeeded", + "public_ip": "XXX.XXX.XXX.XXX", + "public_ip_alloc_method": "Static", + "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name", + "public_ip_name": "object-name", + "resource_group": "galaxy-production", + "security_group": "object-name", + "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name", + "tags": { + "db": "database" + }, + "type": "Microsoft.Compute/virtualMachines", + "virtual_machine_size": "Standard_DS4" +} + +Groups +------ +When run in --list mode, instances are grouped by the following categories: + - azure + - location + - resource_group + - security_group + - tag key + - tag key_value + +Control groups using azure_rm.ini or set environment variables: + +AZURE_GROUP_BY_RESOURCE_GROUP=yes +AZURE_GROUP_BY_LOCATION=yes +AZURE_GROUP_BY_SECURITY_GROUP=yes +AZURE_GROUP_BY_TAG=yes + +Select hosts within specific resource groups by assigning a comma separated list to: + +AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b + +Select hosts for specific tag key by assigning a comma separated list of tag keys to: + +AZURE_TAGS=key1,key2,key3 + +Select hosts for specific locations: + +AZURE_LOCATIONS=eastus,westus,eastus2 + +Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: + +AZURE_TAGS=key1:value1,key2:value2 + +If you don't need the powerstate, you can improve performance by turning off powerstate fetching: +AZURE_INCLUDE_POWERSTATE=no + +azure_rm.ini +------------ +As mentioned above, you can control execution using environment variables or a .ini file. A sample +azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case +'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify +a different path for the .ini file, define the AZURE_INI_PATH environment variable: + + export AZURE_INI_PATH=/path/to/custom.ini + +Powerstate: +----------- +The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is +up. If the value is anything other than 'running', the machine is down, and will be unreachable. + +Examples: +--------- + Execute /bin/uname on all instances in the galaxy-qa resource group + $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a" + + Use the inventory script to print instance specific information + $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty + + Use with a playbook + $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa + + +Insecure Platform Warning +------------------------- +If you receive InsecurePlatformWarning from urllib3, install the +requests security packages: + + pip install requests[security] + + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +Company: Ansible by Red Hat + +Version: 1.0.0 +''' + +import argparse +import json +import os +import re +import sys +import inspect + +from os.path import expanduser +from ansible.module_utils.six.moves import configparser as cp +import ansible.module_utils.six.moves.urllib.parse as urlparse + +HAS_AZURE = True +HAS_AZURE_EXC = None +HAS_AZURE_CLI_CORE = True +CLIError = None + +try: + from msrestazure.azure_active_directory import AADTokenCredentials + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_active_directory import MSIAuthentication + from msrestazure import azure_cloud + from azure.mgmt.compute import __version__ as azure_compute_version + from azure.common import AzureMissingResourceHttpError, AzureHttpError + from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials + from azure.mgmt.network import NetworkManagementClient + from azure.mgmt.resource.resources import ResourceManagementClient + from azure.mgmt.resource.subscriptions import SubscriptionClient + from azure.mgmt.compute import ComputeManagementClient + from adal.authentication_context import AuthenticationContext +except ImportError as exc: + HAS_AZURE_EXC = exc + HAS_AZURE = False + +try: + from azure.cli.core.util import CLIError + from azure.common.credentials import get_azure_cli_credentials, get_cli_profile + from azure.common.cloud import get_cli_active_cloud +except ImportError: + HAS_AZURE_CLI_CORE = False + CLIError = Exception + +try: + from ansible.release import __version__ as ansible_version +except ImportError: + ansible_version = 'unknown' + +AZURE_CREDENTIAL_ENV_MAPPING = dict( + profile='AZURE_PROFILE', + subscription_id='AZURE_SUBSCRIPTION_ID', + client_id='AZURE_CLIENT_ID', + secret='AZURE_SECRET', + tenant='AZURE_TENANT', + ad_user='AZURE_AD_USER', + password='AZURE_PASSWORD', + cloud_environment='AZURE_CLOUD_ENVIRONMENT', + adfs_authority_url='AZURE_ADFS_AUTHORITY_URL' +) + +AZURE_CONFIG_SETTINGS = dict( + resource_groups='AZURE_RESOURCE_GROUPS', + tags='AZURE_TAGS', + locations='AZURE_LOCATIONS', + include_powerstate='AZURE_INCLUDE_POWERSTATE', + group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP', + group_by_location='AZURE_GROUP_BY_LOCATION', + group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP', + group_by_tag='AZURE_GROUP_BY_TAG', + group_by_os_family='AZURE_GROUP_BY_OS_FAMILY', + use_private_ip='AZURE_USE_PRIVATE_IP' +) + +AZURE_MIN_VERSION = "2.0.0" +ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version) + + +def azure_id_to_dict(id): + pieces = re.sub(r'^\/', '', id).split('/') + result = {} + index = 0 + while index < len(pieces) - 1: + result[pieces[index]] = pieces[index + 1] + index += 1 + return result + + +class AzureRM(object): + + def __init__(self, args): + self._args = args + self._cloud_environment = None + self._compute_client = None + self._resource_client = None + self._network_client = None + self._adfs_authority_url = None + self._resource = None + + self.debug = False + if args.debug: + self.debug = True + + self.credentials = self._get_credentials(args) + if not self.credentials: + self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " + "or define a profile in ~/.azure/credentials.") + + # if cloud_environment specified, look up/build Cloud object + raw_cloud_env = self.credentials.get('cloud_environment') + if not raw_cloud_env: + self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default + else: + # try to look up "well-known" values via the name attribute on azure_cloud members + all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] + matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] + if len(matched_clouds) == 1: + self._cloud_environment = matched_clouds[0] + elif len(matched_clouds) > 1: + self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) + else: + if not urlparse.urlparse(raw_cloud_env).scheme: + self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) + try: + self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) + except Exception as e: + self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) + + if self.credentials.get('subscription_id', None) is None: + self.fail("Credentials did not include a subscription_id value.") + self.log("setting subscription_id") + self.subscription_id = self.credentials['subscription_id'] + + # get authentication authority + # for adfs, user could pass in authority or not. + # for others, use default authority from cloud environment + if self.credentials.get('adfs_authority_url'): + self._adfs_authority_url = self.credentials.get('adfs_authority_url') + else: + self._adfs_authority_url = self._cloud_environment.endpoints.active_directory + + # get resource from cloud environment + self._resource = self._cloud_environment.endpoints.active_directory_resource_id + + if self.credentials.get('credentials'): + self.azure_credentials = self.credentials.get('credentials') + elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'): + self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=self.credentials['tenant'], + cloud_environment=self._cloud_environment) + + elif self.credentials.get('ad_user') is not None and \ + self.credentials.get('password') is not None and \ + self.credentials.get('client_id') is not None and \ + self.credentials.get('tenant') is not None: + + self.azure_credentials = self.acquire_token_with_username_password( + self._adfs_authority_url, + self._resource, + self.credentials['ad_user'], + self.credentials['password'], + self.credentials['client_id'], + self.credentials['tenant']) + + elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: + tenant = self.credentials.get('tenant') + if not tenant: + tenant = 'common' + self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], + self.credentials['password'], + tenant=tenant, + cloud_environment=self._cloud_environment) + + else: + self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " + "Credentials must include client_id, secret and tenant or ad_user and password, or " + "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or " + "be logged in using AzureCLI.") + + def log(self, msg): + if self.debug: + print(msg + u'\n') + + def fail(self, msg): + raise Exception(msg) + + def _get_profile(self, profile="default"): + path = expanduser("~") + path += "/.azure/credentials" + try: + config = cp.ConfigParser() + config.read(path) + except Exception as exc: + self.fail("Failed to access {0}. Check that the file exists and you have read " + "access. {1}".format(path, str(exc))) + credentials = dict() + for key in AZURE_CREDENTIAL_ENV_MAPPING: + try: + credentials[key] = config.get(profile, key, raw=True) + except Exception: + pass + + if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: + return credentials + + return None + + def _get_env_credentials(self): + env_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + env_credentials[attribute] = os.environ.get(env_variable, None) + + if env_credentials['profile'] is not None: + credentials = self._get_profile(env_credentials['profile']) + return credentials + + if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: + return env_credentials + + return None + + def _get_azure_cli_credentials(self): + credentials, subscription_id = get_azure_cli_credentials() + cloud_environment = get_cli_active_cloud() + + cli_credentials = { + 'credentials': credentials, + 'subscription_id': subscription_id, + 'cloud_environment': cloud_environment + } + return cli_credentials + + def _get_msi_credentials(self, subscription_id_param=None): + credentials = MSIAuthentication() + subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None) + try: + # try to get the subscription in MSI to test whether MSI is enabled + subscription_client = SubscriptionClient(credentials) + subscription = next(subscription_client.subscriptions.list()) + subscription_id = str(subscription.subscription_id) + return { + 'credentials': credentials, + 'subscription_id': subscription_id_param or subscription_id + } + except Exception as exc: + return None + + def _get_credentials(self, params): + # Get authentication credentials. + # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. + + self.log('Getting credentials') + + arg_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + arg_credentials[attribute] = getattr(params, attribute) + + # try module params + if arg_credentials['profile'] is not None: + self.log('Retrieving credentials with profile parameter.') + credentials = self._get_profile(arg_credentials['profile']) + return credentials + + if arg_credentials['client_id'] is not None: + self.log('Received credentials from parameters.') + return arg_credentials + + if arg_credentials['ad_user'] is not None: + self.log('Received credentials from parameters.') + return arg_credentials + + # try environment + env_credentials = self._get_env_credentials() + if env_credentials: + self.log('Received credentials from env.') + return env_credentials + + # try default profile from ~./azure/credentials + default_credentials = self._get_profile() + if default_credentials: + self.log('Retrieved default profile credentials from ~/.azure/credentials.') + return default_credentials + + msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id')) + if msi_credentials: + self.log('Retrieved credentials from MSI.') + return msi_credentials + + try: + if HAS_AZURE_CLI_CORE: + self.log('Retrieving credentials from AzureCLI profile') + cli_credentials = self._get_azure_cli_credentials() + return cli_credentials + except CLIError as ce: + self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) + + return None + + def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): + authority_uri = authority + + if tenant is not None: + authority_uri = authority + '/' + tenant + + context = AuthenticationContext(authority_uri) + token_response = context.acquire_token_with_username_password(resource, username, password, client_id) + return AADTokenCredentials(token_response) + + def _register(self, key): + try: + # We have to perform the one-time registration here. Otherwise, we receive an error the first + # time we attempt to use the requested client. + resource_client = self.rm_client + resource_client.providers.register(key) + except Exception as exc: + self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) + self.log("You might need to register {0} using an admin account".format(key)) + self.log(("To register a provider using the Python CLI: " + "https://docs.microsoft.com/azure/azure-resource-manager/" + "resource-manager-common-deployment-errors#noregisteredproviderfound")) + + def get_mgmt_svc_client(self, client_type, base_url, api_version): + client = client_type(self.azure_credentials, + self.subscription_id, + base_url=base_url, + api_version=api_version) + client.config.add_user_agent(ANSIBLE_USER_AGENT) + return client + + @property + def network_client(self): + self.log('Getting network client') + if not self._network_client: + self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, + self._cloud_environment.endpoints.resource_manager, + '2017-06-01') + self._register('Microsoft.Network') + return self._network_client + + @property + def rm_client(self): + self.log('Getting resource manager client') + if not self._resource_client: + self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, + self._cloud_environment.endpoints.resource_manager, + '2017-05-10') + return self._resource_client + + @property + def compute_client(self): + self.log('Getting compute client') + if not self._compute_client: + self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, + self._cloud_environment.endpoints.resource_manager, + '2017-03-30') + self._register('Microsoft.Compute') + return self._compute_client + + +class AzureInventory(object): + + def __init__(self): + + self._args = self._parse_cli_args() + + try: + rm = AzureRM(self._args) + except Exception as e: + sys.exit("{0}".format(str(e))) + + self._compute_client = rm.compute_client + self._network_client = rm.network_client + self._resource_client = rm.rm_client + self._security_groups = None + + self.resource_groups = [] + self.tags = None + self.locations = None + self.replace_dash_in_groups = False + self.group_by_resource_group = True + self.group_by_location = True + self.group_by_os_family = True + self.group_by_security_group = True + self.group_by_tag = True + self.include_powerstate = True + self.use_private_ip = False + + self._inventory = dict( + _meta=dict( + hostvars=dict() + ), + azure=[] + ) + + self._get_settings() + + if self._args.resource_groups: + self.resource_groups = self._args.resource_groups.split(',') + + if self._args.tags: + self.tags = self._args.tags.split(',') + + if self._args.locations: + self.locations = self._args.locations.split(',') + + if self._args.no_powerstate: + self.include_powerstate = False + + self.get_inventory() + print(self._json_format_dict(pretty=self._args.pretty)) + sys.exit(0) + + def _parse_cli_args(self): + # Parse command line arguments + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file for an Azure subscription') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--debug', action='store_true', default=False, + help='Send debug messages to STDOUT') + parser.add_argument('--host', action='store', + help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty print JSON output(default: False)') + parser.add_argument('--profile', action='store', + help='Azure profile contained in ~/.azure/credentials') + parser.add_argument('--subscription_id', action='store', + help='Azure Subscription Id') + parser.add_argument('--client_id', action='store', + help='Azure Client Id ') + parser.add_argument('--secret', action='store', + help='Azure Client Secret') + parser.add_argument('--tenant', action='store', + help='Azure Tenant Id') + parser.add_argument('--ad_user', action='store', + help='Active Directory User') + parser.add_argument('--password', action='store', + help='password') + parser.add_argument('--adfs_authority_url', action='store', + help='Azure ADFS authority url') + parser.add_argument('--cloud_environment', action='store', + help='Azure Cloud Environment name or metadata discovery URL') + parser.add_argument('--resource-groups', action='store', + help='Return inventory for comma separated list of resource group names') + parser.add_argument('--tags', action='store', + help='Return inventory for comma separated list of tag key:value pairs') + parser.add_argument('--locations', action='store', + help='Return inventory for comma separated list of locations') + parser.add_argument('--no-powerstate', action='store_true', default=False, + help='Do not include the power state of each virtual host') + return parser.parse_args() + + def get_inventory(self): + if len(self.resource_groups) > 0: + # get VMs for requested resource groups + for resource_group in self.resource_groups: + try: + virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower()) + except Exception as exc: + sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc))) + if self._args.host or self.tags: + selected_machines = self._selected_machines(virtual_machines) + self._load_machines(selected_machines) + else: + self._load_machines(virtual_machines) + else: + # get all VMs within the subscription + try: + virtual_machines = self._compute_client.virtual_machines.list_all() + except Exception as exc: + sys.exit("Error: fetching virtual machines - {0}".format(str(exc))) + + if self._args.host or self.tags or self.locations: + selected_machines = self._selected_machines(virtual_machines) + self._load_machines(selected_machines) + else: + self._load_machines(virtual_machines) + + def _load_machines(self, machines): + for machine in machines: + id_dict = azure_id_to_dict(machine.id) + + # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets + # fixed, we should remove the .lower(). Opened Issue + # #574: https://github.com/Azure/azure-sdk-for-python/issues/574 + resource_group = id_dict['resourceGroups'].lower() + + if self.group_by_security_group: + self._get_security_groups(resource_group) + + host_vars = dict( + ansible_host=None, + private_ip=None, + private_ip_alloc_method=None, + public_ip=None, + public_ip_name=None, + public_ip_id=None, + public_ip_alloc_method=None, + fqdn=None, + location=machine.location, + name=machine.name, + type=machine.type, + id=machine.id, + tags=machine.tags, + network_interface_id=None, + network_interface=None, + resource_group=resource_group, + mac_address=None, + plan=(machine.plan.name if machine.plan else None), + virtual_machine_size=machine.hardware_profile.vm_size, + computer_name=(machine.os_profile.computer_name if machine.os_profile else None), + provisioning_state=machine.provisioning_state, + ) + + host_vars['os_disk'] = dict( + name=machine.storage_profile.os_disk.name, + operating_system_type=machine.storage_profile.os_disk.os_type.value.lower() + ) + + if self.include_powerstate: + host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name) + + if machine.storage_profile.image_reference: + host_vars['image'] = dict( + offer=machine.storage_profile.image_reference.offer, + publisher=machine.storage_profile.image_reference.publisher, + sku=machine.storage_profile.image_reference.sku, + version=machine.storage_profile.image_reference.version + ) + + # Add windows details + if machine.os_profile is not None and machine.os_profile.windows_configuration is not None: + host_vars['ansible_connection'] = 'winrm' + host_vars['windows_auto_updates_enabled'] = \ + machine.os_profile.windows_configuration.enable_automatic_updates + host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone + host_vars['windows_rm'] = None + if machine.os_profile.windows_configuration.win_rm is not None: + host_vars['windows_rm'] = dict(listeners=None) + if machine.os_profile.windows_configuration.win_rm.listeners is not None: + host_vars['windows_rm']['listeners'] = [] + for listener in machine.os_profile.windows_configuration.win_rm.listeners: + host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name, + certificate_url=listener.certificate_url)) + + for interface in machine.network_profile.network_interfaces: + interface_reference = self._parse_ref_id(interface.id) + network_interface = self._network_client.network_interfaces.get( + interface_reference['resourceGroups'], + interface_reference['networkInterfaces']) + if network_interface.primary: + if self.group_by_security_group and \ + self._security_groups[resource_group].get(network_interface.id, None): + host_vars['security_group'] = \ + self._security_groups[resource_group][network_interface.id]['name'] + host_vars['security_group_id'] = \ + self._security_groups[resource_group][network_interface.id]['id'] + host_vars['network_interface'] = network_interface.name + host_vars['network_interface_id'] = network_interface.id + host_vars['mac_address'] = network_interface.mac_address + for ip_config in network_interface.ip_configurations: + host_vars['private_ip'] = ip_config.private_ip_address + host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method + if self.use_private_ip: + host_vars['ansible_host'] = ip_config.private_ip_address + if ip_config.public_ip_address: + public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id) + public_ip_address = self._network_client.public_ip_addresses.get( + public_ip_reference['resourceGroups'], + public_ip_reference['publicIPAddresses']) + if not self.use_private_ip: + host_vars['ansible_host'] = public_ip_address.ip_address + host_vars['public_ip'] = public_ip_address.ip_address + host_vars['public_ip_name'] = public_ip_address.name + host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method + host_vars['public_ip_id'] = public_ip_address.id + if public_ip_address.dns_settings: + host_vars['fqdn'] = public_ip_address.dns_settings.fqdn + + self._add_host(host_vars) + + def _selected_machines(self, virtual_machines): + selected_machines = [] + for machine in virtual_machines: + if self._args.host and self._args.host == machine.name: + selected_machines.append(machine) + if self.tags and self._tags_match(machine.tags, self.tags): + selected_machines.append(machine) + if self.locations and machine.location in self.locations: + selected_machines.append(machine) + return selected_machines + + def _get_security_groups(self, resource_group): + ''' For a given resource_group build a mapping of network_interface.id to security_group name ''' + if not self._security_groups: + self._security_groups = dict() + if not self._security_groups.get(resource_group): + self._security_groups[resource_group] = dict() + for group in self._network_client.network_security_groups.list(resource_group): + if group.network_interfaces: + for interface in group.network_interfaces: + self._security_groups[resource_group][interface.id] = dict( + name=group.name, + id=group.id + ) + + def _get_powerstate(self, resource_group, name): + try: + vm = self._compute_client.virtual_machines.get(resource_group, + name, + expand='instanceview') + except Exception as exc: + sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc))) + + return next((s.code.replace('PowerState/', '') + for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) + + def _add_host(self, vars): + + host_name = self._to_safe(vars['name']) + resource_group = self._to_safe(vars['resource_group']) + operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower()) + security_group = None + if vars.get('security_group'): + security_group = self._to_safe(vars['security_group']) + + if self.group_by_os_family: + if not self._inventory.get(operating_system_type): + self._inventory[operating_system_type] = [] + self._inventory[operating_system_type].append(host_name) + + if self.group_by_resource_group: + if not self._inventory.get(resource_group): + self._inventory[resource_group] = [] + self._inventory[resource_group].append(host_name) + + if self.group_by_location: + if not self._inventory.get(vars['location']): + self._inventory[vars['location']] = [] + self._inventory[vars['location']].append(host_name) + + if self.group_by_security_group and security_group: + if not self._inventory.get(security_group): + self._inventory[security_group] = [] + self._inventory[security_group].append(host_name) + + self._inventory['_meta']['hostvars'][host_name] = vars + self._inventory['azure'].append(host_name) + + if self.group_by_tag and vars.get('tags'): + for key, value in vars['tags'].items(): + safe_key = self._to_safe(key) + safe_value = safe_key + '_' + self._to_safe(value) + if not self._inventory.get(safe_key): + self._inventory[safe_key] = [] + if not self._inventory.get(safe_value): + self._inventory[safe_value] = [] + self._inventory[safe_key].append(host_name) + self._inventory[safe_value].append(host_name) + + def _json_format_dict(self, pretty=False): + # convert inventory to json + if pretty: + return json.dumps(self._inventory, sort_keys=True, indent=2) + else: + return json.dumps(self._inventory) + + def _get_settings(self): + # Load settings from the .ini, if it exists. Otherwise, + # look for environment values. + file_settings = self._load_settings() + if file_settings: + for key in AZURE_CONFIG_SETTINGS: + if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key): + values = file_settings.get(key).split(',') + if len(values) > 0: + setattr(self, key, values) + elif file_settings.get(key): + val = self._to_boolean(file_settings[key]) + setattr(self, key, val) + else: + env_settings = self._get_env_settings() + for key in AZURE_CONFIG_SETTINGS: + if key in('resource_groups', 'tags', 'locations') and env_settings.get(key): + values = env_settings.get(key).split(',') + if len(values) > 0: + setattr(self, key, values) + elif env_settings.get(key, None) is not None: + val = self._to_boolean(env_settings[key]) + setattr(self, key, val) + + def _parse_ref_id(self, reference): + response = {} + keys = reference.strip('/').split('/') + for index in range(len(keys)): + if index < len(keys) - 1 and index % 2 == 0: + response[keys[index]] = keys[index + 1] + return response + + def _to_boolean(self, value): + if value in ['Yes', 'yes', 1, 'True', 'true', True]: + result = True + elif value in ['No', 'no', 0, 'False', 'false', False]: + result = False + else: + result = True + return result + + def _get_env_settings(self): + env_settings = dict() + for attribute, env_variable in AZURE_CONFIG_SETTINGS.items(): + env_settings[attribute] = os.environ.get(env_variable, None) + return env_settings + + def _load_settings(self): + basename = os.path.splitext(os.path.basename(__file__))[0] + default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) + path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path))) + config = None + settings = None + try: + config = cp.ConfigParser() + config.read(path) + except Exception: + pass + + if config is not None: + settings = dict() + for key in AZURE_CONFIG_SETTINGS: + try: + settings[key] = config.get('azure', key, raw=True) + except Exception: + pass + + return settings + + def _tags_match(self, tag_obj, tag_args): + ''' + Return True if the tags object from a VM contains the requested tag values. + + :param tag_obj: Dictionary of string:string pairs + :param tag_args: List of strings in the form key=value + :return: boolean + ''' + + if not tag_obj: + return False + + matches = 0 + for arg in tag_args: + arg_key = arg + arg_value = None + if re.search(r':', arg): + arg_key, arg_value = arg.split(':') + if arg_value and tag_obj.get(arg_key, None) == arg_value: + matches += 1 + elif not arg_value and tag_obj.get(arg_key, None) is not None: + matches += 1 + if matches == len(tag_args): + return True + return False + + def _to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = r"[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += r"\-" + return re.sub(regex + "]", "_", word) + + +def main(): + if not HAS_AZURE: + sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC)) + + AzureInventory() + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/brook.ini b/scripts/inventory/brook.ini new file mode 100644 index 0000000000..e88c363150 --- /dev/null +++ b/scripts/inventory/brook.ini @@ -0,0 +1,39 @@ +# Copyright 2016 Doalitic. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# The Brook.io inventory script has the following dependencies: +# 1. A working Brook.io account +# See https://brook.io +# 2. A valid token generated through the 'API token' panel of Brook.io +# 3. The libbrook python libray. +# See https://github.com/doalitic/libbrook +# +# Author: Francisco Ros + +[brook] +# Valid API token (required). +# E.g. 'Aed342a12A60433697281FeEe1a4037C' +# +api_token = + +# Project id within Brook.io, as obtained from the project settings (optional). If provided, the +# generated inventory will just include the hosts that belong to such project. Otherwise, it will +# include all hosts in projects the requesting user has access to. The response includes groups +# 'project_x', being 'x' the project name. +# E.g. '2e8e099e1bc34cc0979d97ac34e9577b' +# +project_id = diff --git a/scripts/inventory/brook.py b/scripts/inventory/brook.py new file mode 100644 index 0000000000..236571315b --- /dev/null +++ b/scripts/inventory/brook.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python +# Copyright 2016 Doalitic. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +Brook.io external inventory script +================================== + +Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook +library. Hence, such dependency must be installed in the system to run this script. + +The default configuration file is named 'brook.ini' and is located alongside this script. You can +choose any other file by setting the BROOK_INI_PATH environment variable. + +If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in +projects where the requesting user belongs. Otherwise, only instances from the given project are +included, provided the requesting user belongs to it. + +The following variables are established for every host. They can be retrieved from the hostvars +dictionary. + - brook_pid: str + - brook_name: str + - brook_description: str + - brook_project: str + - brook_template: str + - brook_region: str + - brook_zone: str + - brook_status: str + - brook_tags: list(str) + - brook_internal_ips: list(str) + - brook_external_ips: list(str) + - brook_created_at + - brook_updated_at + - ansible_ssh_host + +Instances are grouped by the following categories: + - tag: + A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist + instances with tags 'foo' and/or 'bar'. + - project: + A group is created for each project. E.g. group 'project_test' is created if a project named + 'test' exist. + - status: + A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING' + are created if there are instances in running and pending state. + +Examples: + Execute uname on all instances in project 'test' + $ ansible -i brook.py project_test -m shell -a "/bin/uname -a" + + Install nginx on all debian web servers tagged with 'www' + $ ansible -i brook.py tag_www -m apt -a "name=nginx state=present" + + Run site.yml playbook on web servers + $ ansible-playbook -i brook.py site.yml -l tag_www + +Support: + This script is tested on Python 2.7 and 3.4. It may work on other versions though. + +Author: Francisco Ros +Version: 0.2 +""" + + +import sys +import os + +from ansible.module_utils.six.moves.configparser import SafeConfigParser as ConfigParser + +import json + +try: + import libbrook +except Exception: + sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook') + + +class BrookInventory: + + _API_ENDPOINT = 'https://api.brook.io' + + def __init__(self): + self._configure_from_file() + self.client = self.get_api_client() + self.inventory = self.get_inventory() + + def _configure_from_file(self): + """Initialize from .ini file. + + Configuration file is assumed to be named 'brook.ini' and to be located on the same + directory than this file, unless the environment variable BROOK_INI_PATH says otherwise. + """ + + brook_ini_default_path = \ + os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini') + brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path) + + config = ConfigParser(defaults={ + 'api_token': '', + 'project_id': '' + }) + config.read(brook_ini_path) + self.api_token = config.get('brook', 'api_token') + self.project_id = config.get('brook', 'project_id') + + if not self.api_token: + sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic ' + 'inventory.') + + def get_api_client(self): + """Authenticate user via the provided credentials and return the corresponding API client. + """ + + # Get JWT token from API token + # + unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT) + auth_api = libbrook.AuthApi(unauthenticated_client) + api_token = libbrook.AuthTokenRequest() + api_token.token = self.api_token + jwt = auth_api.auth_token(token=api_token) + + # Create authenticated API client + # + return libbrook.ApiClient(host=self._API_ENDPOINT, + header_name='Authorization', + header_value='Bearer %s' % jwt.token) + + def get_inventory(self): + """Generate Ansible inventory. + """ + + groups = dict() + meta = dict() + meta['hostvars'] = dict() + + instances_api = libbrook.InstancesApi(self.client) + projects_api = libbrook.ProjectsApi(self.client) + templates_api = libbrook.TemplatesApi(self.client) + + # If no project is given, get all projects the requesting user has access to + # + if not self.project_id: + projects = [project.id for project in projects_api.index_projects()] + else: + projects = [self.project_id] + + # Build inventory from instances in all projects + # + for project_id in projects: + project = projects_api.show_project(project_id=project_id) + for instance in instances_api.index_instances(project_id=project_id): + # Get template used for this instance if known + template = templates_api.show_template(template_id=instance.template) if instance.template else None + + # Update hostvars + try: + meta['hostvars'][instance.name] = \ + self.hostvars(project, instance, template, instances_api) + except libbrook.rest.ApiException: + continue + + # Group by project + project_group = 'project_%s' % project.name + if project_group in groups: + groups[project_group].append(instance.name) + else: + groups[project_group] = [instance.name] + + # Group by status + status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status'] + if status_group in groups: + groups[status_group].append(instance.name) + else: + groups[status_group] = [instance.name] + + # Group by tags + tags = meta['hostvars'][instance.name]['brook_tags'] + for tag in tags: + tag_group = 'tag_%s' % tag + if tag_group in groups: + groups[tag_group].append(instance.name) + else: + groups[tag_group] = [instance.name] + + groups['_meta'] = meta + return groups + + def hostvars(self, project, instance, template, api): + """Return the hostvars dictionary for the given instance. + + Raise libbrook.rest.ApiException if it cannot retrieve all required information from the + Brook.io API. + """ + + hostvars = instance.to_dict() + hostvars['brook_pid'] = hostvars.pop('pid') + hostvars['brook_name'] = hostvars.pop('name') + hostvars['brook_description'] = hostvars.pop('description') + hostvars['brook_project'] = hostvars.pop('project') + hostvars['brook_template'] = hostvars.pop('template') + hostvars['brook_region'] = hostvars.pop('region') + hostvars['brook_zone'] = hostvars.pop('zone') + hostvars['brook_created_at'] = hostvars.pop('created_at') + hostvars['brook_updated_at'] = hostvars.pop('updated_at') + del hostvars['id'] + del hostvars['key'] + del hostvars['provider'] + del hostvars['image'] + + # Substitute identifiers for names + # + hostvars['brook_project'] = project.name + hostvars['brook_template'] = template.name if template else None + + # Retrieve instance state + # + status = api.status_instance(project_id=project.id, instance_id=instance.id) + hostvars.update({'brook_status': status.state}) + + # Retrieve instance tags + # + tags = api.instance_tags(project_id=project.id, instance_id=instance.id) + hostvars.update({'brook_tags': tags}) + + # Retrieve instance addresses + # + addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id) + internal_ips = [address.address for address in addresses if address.scope == 'internal'] + external_ips = [address.address for address in addresses + if address.address and address.scope == 'external'] + hostvars.update({'brook_internal_ips': internal_ips}) + hostvars.update({'brook_external_ips': external_ips}) + try: + hostvars.update({'ansible_ssh_host': external_ips[0]}) + except IndexError: + raise libbrook.rest.ApiException(status='502', reason='Instance without public IP') + + return hostvars + + +# Run the script +# +brook = BrookInventory() +print(json.dumps(brook.inventory)) diff --git a/scripts/inventory/cloudforms.ini b/scripts/inventory/cloudforms.ini new file mode 100644 index 0000000000..30b9aa609e --- /dev/null +++ b/scripts/inventory/cloudforms.ini @@ -0,0 +1,40 @@ +[cloudforms] + +# the version of CloudForms ; currently not used, but tested with +version = 4.1 + +# This should be the hostname of the CloudForms server +url = https://cfme.example.com + +# This will more than likely need to be a local CloudForms username +username = + +# The password for said username +password = + +# True = verify SSL certificate / False = trust anything +ssl_verify = True + +# limit the number of vms returned per request +limit = 100 + +# purge the CloudForms actions from hosts +purge_actions = True + +# Clean up group names (from tags and other groupings so Ansible doesn't complain) +clean_group_keys = True + +# Explode tags into nested groups / subgroups +nest_tags = False + +# If set, ensure host name are suffixed with this value +# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is +# suffix = .example.org + +# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list +prefer_ipv4 = False + +[cache] + +# Maximum time to trust the cache in seconds +max_age = 600 diff --git a/scripts/inventory/cloudforms.py b/scripts/inventory/cloudforms.py new file mode 100644 index 0000000000..0057940930 --- /dev/null +++ b/scripts/inventory/cloudforms.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# +# Copyright (C) 2016 Guido Günther +# +# This script is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with it. If not, see . +# +# This is loosely based on the foreman inventory script +# -- Josh Preston +# + +from __future__ import print_function +import argparse +from ansible.module_utils.six.moves import configparser as ConfigParser +import os +import re +from time import time +import requests +from requests.auth import HTTPBasicAuth +import warnings +from ansible.errors import AnsibleError + +import json + + +class CloudFormsInventory(object): + def __init__(self): + """ + Main execution path + """ + self.inventory = dict() # A list of groups and the hosts in that group + self.hosts = dict() # Details about hosts in the inventory + + # Parse CLI arguments + self.parse_cli_args() + + # Read settings + self.read_settings() + + # Cache + if self.args.refresh_cache or not self.is_cache_valid(): + self.update_cache() + else: + self.load_inventory_from_cache() + self.load_hosts_from_cache() + + data_to_print = "" + + # Data to print + if self.args.host: + if self.args.debug: + print("Fetching host [%s]" % self.args.host) + data_to_print += self.get_host_info(self.args.host) + else: + self.inventory['_meta'] = {'hostvars': {}} + for hostname in self.hosts: + self.inventory['_meta']['hostvars'][hostname] = { + 'cloudforms': self.hosts[hostname], + } + # include the ansible_ssh_host in the top level + if 'ansible_ssh_host' in self.hosts[hostname]: + self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host'] + + data_to_print += self.json_format_dict(self.inventory, self.args.pretty) + + print(data_to_print) + + def is_cache_valid(self): + """ + Determines if the cache files have expired, or if it is still valid + """ + if self.args.debug: + print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age)) + + if os.path.isfile(self.cache_path_hosts): + mod_time = os.path.getmtime(self.cache_path_hosts) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_inventory): + if self.args.debug: + print("Cache is still valid!") + return True + + if self.args.debug: + print("Cache is stale or does not exist.") + + return False + + def read_settings(self): + """ + Reads the settings from the cloudforms.ini file + """ + config = ConfigParser.SafeConfigParser() + config_paths = [ + os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini', + "/etc/ansible/cloudforms.ini", + ] + + env_value = os.environ.get('CLOUDFORMS_INI_PATH') + if env_value is not None: + config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) + + if self.args.debug: + for config_path in config_paths: + print("Reading from configuration file [%s]" % config_path) + + config.read(config_paths) + + # CloudForms API related + if config.has_option('cloudforms', 'url'): + self.cloudforms_url = config.get('cloudforms', 'url') + else: + self.cloudforms_url = None + + if not self.cloudforms_url: + warnings.warn("No url specified, expected something like 'https://cfme.example.com'") + + if config.has_option('cloudforms', 'username'): + self.cloudforms_username = config.get('cloudforms', 'username') + else: + self.cloudforms_username = None + + if not self.cloudforms_username: + warnings.warn("No username specified, you need to specify a CloudForms username.") + + if config.has_option('cloudforms', 'password'): + self.cloudforms_pw = config.get('cloudforms', 'password', raw=True) + else: + self.cloudforms_pw = None + + if not self.cloudforms_pw: + warnings.warn("No password specified, you need to specify a password for the CloudForms user.") + + if config.has_option('cloudforms', 'ssl_verify'): + self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify') + else: + self.cloudforms_ssl_verify = True + + if config.has_option('cloudforms', 'version'): + self.cloudforms_version = config.get('cloudforms', 'version') + else: + self.cloudforms_version = None + + if config.has_option('cloudforms', 'limit'): + self.cloudforms_limit = config.getint('cloudforms', 'limit') + else: + self.cloudforms_limit = 100 + + if config.has_option('cloudforms', 'purge_actions'): + self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions') + else: + self.cloudforms_purge_actions = True + + if config.has_option('cloudforms', 'clean_group_keys'): + self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys') + else: + self.cloudforms_clean_group_keys = True + + if config.has_option('cloudforms', 'nest_tags'): + self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags') + else: + self.cloudforms_nest_tags = False + + if config.has_option('cloudforms', 'suffix'): + self.cloudforms_suffix = config.get('cloudforms', 'suffix') + if self.cloudforms_suffix[0] != '.': + raise AnsibleError('Leading fullstop is required for Cloudforms suffix') + else: + self.cloudforms_suffix = None + + if config.has_option('cloudforms', 'prefer_ipv4'): + self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4') + else: + self.cloudforms_prefer_ipv4 = False + + # Ansible related + try: + group_patterns = config.get('ansible', 'group_patterns') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + group_patterns = "[]" + + self.group_patterns = eval(group_patterns) + + # Cache related + try: + cache_path = os.path.expanduser(config.get('cache', 'path')) + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + cache_path = '.' + (script, ext) = os.path.splitext(os.path.basename(__file__)) + self.cache_path_hosts = cache_path + "/%s.hosts" % script + self.cache_path_inventory = cache_path + "/%s.inventory" % script + self.cache_max_age = config.getint('cache', 'max_age') + + if self.args.debug: + print("CloudForms settings:") + print("cloudforms_url = %s" % self.cloudforms_url) + print("cloudforms_username = %s" % self.cloudforms_username) + print("cloudforms_pw = %s" % self.cloudforms_pw) + print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify) + print("cloudforms_version = %s" % self.cloudforms_version) + print("cloudforms_limit = %s" % self.cloudforms_limit) + print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions) + print("Cache settings:") + print("cache_max_age = %s" % self.cache_max_age) + print("cache_path_hosts = %s" % self.cache_path_hosts) + print("cache_path_inventory = %s" % self.cache_path_inventory) + + def parse_cli_args(self): + """ + Command line argument processing + """ + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') + parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)') + parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)') + self.args = parser.parse_args() + + def _get_json(self, url): + """ + Make a request and return the JSON + """ + results = [] + + ret = requests.get(url, + auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw), + verify=self.cloudforms_ssl_verify) + + ret.raise_for_status() + + try: + results = json.loads(ret.text) + except ValueError: + warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason)) + results = {} + + if self.args.debug: + print("=======================================================================") + print("=======================================================================") + print("=======================================================================") + print(ret.text) + print("=======================================================================") + print("=======================================================================") + print("=======================================================================") + + return results + + def _get_hosts(self): + """ + Get all hosts by paging through the results + """ + limit = self.cloudforms_limit + + page = 0 + last_page = False + + results = [] + + while not last_page: + offset = page * limit + ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit)) + results += ret['resources'] + if ret['subcount'] < limit: + last_page = True + page += 1 + + return results + + def update_cache(self): + """ + Make calls to cloudforms and save the output in a cache + """ + self.groups = dict() + self.hosts = dict() + + if self.args.debug: + print("Updating cache...") + + for host in self._get_hosts(): + if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix): + host['name'] = host['name'] + self.cloudforms_suffix + + # Ignore VMs that are not powered on + if host['power_state'] != 'on': + if self.args.debug: + print("Skipping %s because power_state = %s" % (host['name'], host['power_state'])) + continue + + # purge actions + if self.cloudforms_purge_actions and 'actions' in host: + del host['actions'] + + # Create ansible groups for tags + if 'tags' in host: + + # Create top-level group + if 'tags' not in self.inventory: + self.inventory['tags'] = dict(children=[], vars={}, hosts=[]) + + if not self.cloudforms_nest_tags: + # don't expand tags, just use them in a safe way + for group in host['tags']: + # Add sub-group, as a child of top-level + safe_key = self.to_safe(group['name']) + if safe_key: + if self.args.debug: + print("Adding sub-group '%s' to parent 'tags'" % safe_key) + + if safe_key not in self.inventory['tags']['children']: + self.push(self.inventory['tags'], 'children', safe_key) + + self.push(self.inventory, safe_key, host['name']) + + if self.args.debug: + print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key)) + else: + # expand the tags into nested groups / sub-groups + # Create nested groups for tags + safe_parent_tag_name = 'tags' + for tag in host['tags']: + tag_hierarchy = tag['name'][1:].split('/') + + if self.args.debug: + print("Working on list %s" % tag_hierarchy) + + for tag_name in tag_hierarchy: + if self.args.debug: + print("Working on tag_name = %s" % tag_name) + + safe_tag_name = self.to_safe(tag_name) + if self.args.debug: + print("Using sanitized name %s" % safe_tag_name) + + # Create sub-group + if safe_tag_name not in self.inventory: + self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[]) + + # Add sub-group, as a child of top-level + if safe_parent_tag_name: + if self.args.debug: + print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name)) + + if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']: + self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name) + + # Make sure the next one uses this one as it's parent + safe_parent_tag_name = safe_tag_name + + # Add the host to the last tag + self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name']) + + # Set ansible_ssh_host to the first available ip address + if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list): + # If no preference for IPv4, just use the first entry + if not self.cloudforms_prefer_ipv4: + host['ansible_ssh_host'] = host['ipaddresses'][0] + else: + # Before we search for an IPv4 address, set using the first entry in case we don't find any + host['ansible_ssh_host'] = host['ipaddresses'][0] + for currenthost in host['ipaddresses']: + if '.' in currenthost: + host['ansible_ssh_host'] = currenthost + + # Create additional groups + for key in ('location', 'type', 'vendor'): + safe_key = self.to_safe(host[key]) + + # Create top-level group + if key not in self.inventory: + self.inventory[key] = dict(children=[], vars={}, hosts=[]) + + # Create sub-group + if safe_key not in self.inventory: + self.inventory[safe_key] = dict(children=[], vars={}, hosts=[]) + + # Add sub-group, as a child of top-level + if safe_key not in self.inventory[key]['children']: + self.push(self.inventory[key], 'children', safe_key) + + if key in host: + # Add host to sub-group + self.push(self.inventory[safe_key], 'hosts', host['name']) + + self.hosts[host['name']] = host + self.push(self.inventory, 'all', host['name']) + + if self.args.debug: + print("Saving cached data") + + self.write_to_cache(self.hosts, self.cache_path_hosts) + self.write_to_cache(self.inventory, self.cache_path_inventory) + + def get_host_info(self, host): + """ + Get variables about a specific host + """ + if not self.hosts or len(self.hosts) == 0: + # Need to load cache from cache + self.load_hosts_from_cache() + + if host not in self.hosts: + if self.args.debug: + print("[%s] not found in cache." % host) + + # try updating the cache + self.update_cache() + + if host not in self.hosts: + if self.args.debug: + print("[%s] does not exist after cache update." % host) + # host might not exist anymore + return self.json_format_dict({}, self.args.pretty) + + return self.json_format_dict(self.hosts[host], self.args.pretty) + + def push(self, d, k, v): + """ + Safely puts a new entry onto an array. + """ + if k in d: + d[k].append(v) + else: + d[k] = [v] + + def load_inventory_from_cache(self): + """ + Reads the inventory from the cache file sets self.inventory + """ + cache = open(self.cache_path_inventory, 'r') + json_inventory = cache.read() + self.inventory = json.loads(json_inventory) + + def load_hosts_from_cache(self): + """ + Reads the cache from the cache file sets self.hosts + """ + cache = open(self.cache_path_hosts, 'r') + json_cache = cache.read() + self.hosts = json.loads(json_cache) + + def write_to_cache(self, data, filename): + """ + Writes data in JSON format to a file + """ + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + """ + Converts 'bad' characters in a string to underscores so they can be used as Ansible groups + """ + if self.cloudforms_clean_group_keys: + regex = r"[^A-Za-z0-9\_]" + return re.sub(regex, "_", word.replace(" ", "")) + else: + return word + + def json_format_dict(self, data, pretty=False): + """ + Converts a dict to a JSON object and dumps it as a formatted string + """ + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +CloudFormsInventory() diff --git a/scripts/inventory/cloudstack.ini b/scripts/inventory/cloudstack.ini new file mode 100644 index 0000000000..43777b593f --- /dev/null +++ b/scripts/inventory/cloudstack.ini @@ -0,0 +1,5 @@ +[cloudstack] +#endpoint = https://api.exoscale.ch/compute +endpoint = https://cloud.example.com/client/api +key = cloudstack api key +secret = cloudstack api secret diff --git a/scripts/inventory/cloudstack.py b/scripts/inventory/cloudstack.py new file mode 100644 index 0000000000..db0322cfd6 --- /dev/null +++ b/scripts/inventory/cloudstack.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +""" +Ansible CloudStack external inventory script. +============================================= + +Generates Ansible inventory from CloudStack. Configuration is read from +'cloudstack.ini'. If you need to pass the project, write a simple wrapper +script, e.g. project_cloudstack.sh: + + #!/bin/bash + cloudstack.py --project $@ + + +When run against a specific host, this script returns the following attributes +based on the data obtained from CloudStack API: + + "web01": { + "cpu_number": 2, + "nic": [ + { + "ip": "10.102.76.98", + "mac": "02:00:50:99:00:01", + "type": "Isolated", + "netmask": "255.255.255.0", + "gateway": "10.102.76.1" + }, + { + "ip": "10.102.138.63", + "mac": "06:b7:5a:00:14:84", + "type": "Shared", + "netmask": "255.255.255.0", + "gateway": "10.102.138.1" + } + ], + "default_ip": "10.102.76.98", + "zone": "ZUERICH", + "created": "2014-07-02T07:53:50+0200", + "hypervisor": "VMware", + "memory": 2048, + "state": "Running", + "tags": [], + "cpu_speed": 1800, + "affinity_group": [], + "service_offering": "Small", + "cpu_used": "62%" + } + + +usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN] +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import sys +import argparse +import json + +try: + from cs import CloudStack, CloudStackException, read_config +except ImportError: + print("Error: CloudStack library must be installed: pip install cs.", + file=sys.stderr) + sys.exit(1) + + +class CloudStackInventory(object): + def __init__(self): + + parser = argparse.ArgumentParser() + parser.add_argument('--host') + parser.add_argument('--list', action='store_true') + parser.add_argument('--tag', help="Filter machines by a tag. Should be in the form key=value.") + parser.add_argument('--project') + parser.add_argument('--domain') + + options = parser.parse_args() + try: + self.cs = CloudStack(**read_config()) + except CloudStackException: + print("Error: Could not connect to CloudStack API", file=sys.stderr) + + domain_id = None + if options.domain: + domain_id = self.get_domain_id(options.domain) + + project_id = None + if options.project: + project_id = self.get_project_id(options.project, domain_id) + + if options.host: + data = self.get_host(options.host, project_id, domain_id) + print(json.dumps(data, indent=2)) + + elif options.list: + tags = dict() + if options.tag: + tags['tags[0].key'], tags['tags[0].value'] = options.tag.split('=') + data = self.get_list(project_id, domain_id, **tags) + print(json.dumps(data, indent=2)) + else: + print("usage: --list [--tag ] | --host [--project ] [--domain ]", + file=sys.stderr) + sys.exit(1) + + def get_domain_id(self, domain): + domains = self.cs.listDomains(listall=True) + if domains: + for d in domains['domain']: + if d['path'].lower() == domain.lower(): + return d['id'] + print("Error: Domain %s not found." % domain, file=sys.stderr) + sys.exit(1) + + def get_project_id(self, project, domain_id=None): + projects = self.cs.listProjects(domainid=domain_id) + if projects: + for p in projects['project']: + if p['name'] == project or p['id'] == project: + return p['id'] + print("Error: Project %s not found." % project, file=sys.stderr) + sys.exit(1) + + def get_host(self, name, project_id=None, domain_id=None, **kwargs): + hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs) + data = {} + if not hosts: + return data + for host in hosts: + host_name = host['displayname'] + if name == host_name: + data['zone'] = host['zonename'] + if 'group' in host: + data['group'] = host['group'] + data['state'] = host['state'] + data['service_offering'] = host['serviceofferingname'] + data['affinity_group'] = host['affinitygroup'] + data['security_group'] = host['securitygroup'] + data['cpu_number'] = host['cpunumber'] + if 'cpu_speed' in host: + data['cpu_speed'] = host['cpuspeed'] + if 'cpuused' in host: + data['cpu_used'] = host['cpuused'] + data['memory'] = host['memory'] + data['tags'] = host['tags'] + if 'hypervisor' in host: + data['hypervisor'] = host['hypervisor'] + data['created'] = host['created'] + data['nic'] = [] + for nic in host['nic']: + nicdata = { + 'ip': nic['ipaddress'], + 'mac': nic['macaddress'], + 'netmask': nic['netmask'], + 'gateway': nic['gateway'], + 'type': nic['type'], + } + if 'ip6address' in nic: + nicdata['ip6'] = nic['ip6address'] + if 'gateway' in nic: + nicdata['gateway'] = nic['gateway'] + if 'netmask' in nic: + nicdata['netmask'] = nic['netmask'] + data['nic'].append(nicdata) + if nic['isdefault']: + data['default_ip'] = nic['ipaddress'] + if 'ip6address' in nic: + data['default_ip6'] = nic['ip6address'] + break + return data + + def get_list(self, project_id=None, domain_id=None, **kwargs): + data = { + 'all': { + 'hosts': [], + }, + '_meta': { + 'hostvars': {}, + }, + } + + groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id) + if groups: + for group in groups['instancegroup']: + group_name = group['name'] + if group_name and group_name not in data: + data[group_name] = { + 'hosts': [] + } + + hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs) + if not hosts: + return data + for host in hosts: + host_name = host['displayname'] + data['all']['hosts'].append(host_name) + data['_meta']['hostvars'][host_name] = {} + + # Make a group per zone + data['_meta']['hostvars'][host_name]['zone'] = host['zonename'] + group_name = host['zonename'] + if group_name not in data: + data[group_name] = { + 'hosts': [] + } + data[group_name]['hosts'].append(host_name) + + if 'group' in host: + data['_meta']['hostvars'][host_name]['group'] = host['group'] + data['_meta']['hostvars'][host_name]['state'] = host['state'] + data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname'] + data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup'] + data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup'] + data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber'] + if 'cpuspeed' in host: + data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed'] + if 'cpuused' in host: + data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused'] + data['_meta']['hostvars'][host_name]['created'] = host['created'] + data['_meta']['hostvars'][host_name]['memory'] = host['memory'] + data['_meta']['hostvars'][host_name]['tags'] = host['tags'] + if 'hypervisor' in host: + data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor'] + data['_meta']['hostvars'][host_name]['created'] = host['created'] + data['_meta']['hostvars'][host_name]['nic'] = [] + for nic in host['nic']: + nicdata = { + 'ip': nic['ipaddress'], + 'mac': nic['macaddress'], + 'netmask': nic['netmask'], + 'gateway': nic['gateway'], + 'type': nic['type'], + } + if 'ip6address' in nic: + nicdata['ip6'] = nic['ip6address'] + if 'gateway' in nic: + nicdata['gateway'] = nic['gateway'] + if 'netmask' in nic: + nicdata['netmask'] = nic['netmask'] + data['_meta']['hostvars'][host_name]['nic'].append(nicdata) + if nic['isdefault']: + data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress'] + if 'ip6address' in nic: + data['_meta']['hostvars'][host_name]['default_ip6'] = nic['ip6address'] + + group_name = '' + if 'group' in host: + group_name = host['group'] + + if group_name and group_name in data: + data[group_name]['hosts'].append(host_name) + return data + + +if __name__ == '__main__': + CloudStackInventory() diff --git a/scripts/inventory/cobbler.ini b/scripts/inventory/cobbler.ini new file mode 100644 index 0000000000..2dc8cd3379 --- /dev/null +++ b/scripts/inventory/cobbler.ini @@ -0,0 +1,24 @@ +# Ansible Cobbler external inventory script settings +# + +[cobbler] + +host = http://PATH_TO_COBBLER_SERVER/cobbler_api + +# If API needs authentication add 'username' and 'password' options here. +#username = foo +#password = bar + +# API calls to Cobbler can be slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-cobbler.cache +# - ansible-cobbler.index +cache_path = /tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +cache_max_age = 900 + + + diff --git a/scripts/inventory/cobbler.py b/scripts/inventory/cobbler.py new file mode 100644 index 0000000000..60195ac197 --- /dev/null +++ b/scripts/inventory/cobbler.py @@ -0,0 +1,312 @@ +#!/usr/bin/env python + +""" +Cobbler external inventory script +================================= + +Ansible has a feature where instead of reading from /etc/ansible/hosts +as a text file, it can query external programs to obtain the list +of hosts, groups the hosts are in, and even variables to assign to each host. + +To use this, copy this file over /etc/ansible/hosts and chmod +x the file. +This, more or less, allows you to keep one central database containing +info about all of your managed instances. + +This script is an example of sourcing that data from Cobbler +(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler +will correspond to a group in Ansible, and --ks-meta variables will be +passed down for use in templates or even in argument lines. + +NOTE: The cobbler system names will not be used. Make sure a +cobbler --dns-name is set for each cobbler system. If a system +appears with two DNS names we do not add it twice because we don't want +ansible talking to it twice. The first one found will be used. If no +--dns-name is set the system will NOT be visible to ansible. We do +not add cobbler system names because there is no requirement in cobbler +that those correspond to addresses. + +Tested with Cobbler 2.0.11. + +Changelog: + - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in + higher performance at ansible startup. Groups are determined by owner rather than + default mgmt_classes. DNS name determined from hostname. cobbler values are written + to a 'cobbler' fact namespace + + - 2013-09-01 pgehres: Refactored implementation to make use of caching and to + limit the number of connections to external cobbler server for performance. + Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0 + +""" + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import argparse +import os +import re +from time import time +import xmlrpclib + +import json + +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import configparser as ConfigParser + +# NOTE -- this file assumes Ansible is being accessed FROM the cobbler +# server, so it does not attempt to login with a username and password. +# this will be addressed in a future version of this script. + +orderby_keyname = 'owners' # alternatively 'mgmt_classes' + + +class CobblerInventory(object): + + def __init__(self): + + """ Main execution path """ + self.conn = None + + self.inventory = dict() # A list of groups and the hosts in that group + self.cache = dict() # Details about hosts in the inventory + self.ignore_settings = False # used to only look at env vars for settings. + + # Read env vars, read settings, and parse CLI arguments + self.parse_env_vars() + self.read_settings() + self.parse_cli_args() + + # Cache + if self.args.refresh_cache: + self.update_cache() + elif not self.is_cache_valid(): + self.update_cache() + else: + self.load_inventory_from_cache() + self.load_cache_from_cache() + + data_to_print = "" + + # Data to print + if self.args.host: + data_to_print += self.get_host_info() + else: + self.inventory['_meta'] = {'hostvars': {}} + for hostname in self.cache: + self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]} + data_to_print += self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def _connect(self): + if not self.conn: + self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True) + self.token = None + if self.cobbler_username is not None: + self.token = self.conn.login(self.cobbler_username, self.cobbler_password) + + def is_cache_valid(self): + """ Determines if the cache files have expired, or if it is still valid """ + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_inventory): + return True + + return False + + def read_settings(self): + """ Reads the settings from the cobbler.ini file """ + + if(self.ignore_settings): + return + + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini') + + self.cobbler_host = config.get('cobbler', 'host') + self.cobbler_username = None + self.cobbler_password = None + if config.has_option('cobbler', 'username'): + self.cobbler_username = config.get('cobbler', 'username') + if config.has_option('cobbler', 'password'): + self.cobbler_password = config.get('cobbler', 'password') + + # Cache related + cache_path = config.get('cobbler', 'cache_path') + self.cache_path_cache = cache_path + "/ansible-cobbler.cache" + self.cache_path_inventory = cache_path + "/ansible-cobbler.index" + self.cache_max_age = config.getint('cobbler', 'cache_max_age') + + def parse_env_vars(self): + """ Reads the settings from the environment """ + + # Env. Vars: + # COBBLER_host + # COBBLER_username + # COBBLER_password + # COBBLER_cache_path + # COBBLER_cache_max_age + # COBBLER_ignore_settings + + self.cobbler_host = os.getenv('COBBLER_host', None) + self.cobbler_username = os.getenv('COBBLER_username', None) + self.cobbler_password = os.getenv('COBBLER_password', None) + + # Cache related + cache_path = os.getenv('COBBLER_cache_path', None) + if(cache_path is not None): + self.cache_path_cache = cache_path + "/ansible-cobbler.cache" + self.cache_path_inventory = cache_path + "/ansible-cobbler.index" + + self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30")) + + # ignore_settings is used to ignore the settings file, for use in Ansible + # Tower (or AWX inventory scripts and not throw python exceptions.) + if(os.getenv('COBBLER_ignore_settings', False) == "True"): + self.ignore_settings = True + + def parse_cli_args(self): + """ Command line argument processing """ + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)') + self.args = parser.parse_args() + + def update_cache(self): + """ Make calls to cobbler and save the output in a cache """ + + self._connect() + self.groups = dict() + self.hosts = dict() + if self.token is not None: + data = self.conn.get_systems(self.token) + else: + data = self.conn.get_systems() + + for host in data: + # Get the FQDN for the host and add it to the right groups + dns_name = host['hostname'] # None + ksmeta = None + interfaces = host['interfaces'] + # hostname is often empty for non-static IP hosts + if dns_name == '': + for (iname, ivalue) in iteritems(interfaces): + if ivalue['management'] or not ivalue['static']: + this_dns_name = ivalue.get('dns_name', None) + dns_name = this_dns_name if this_dns_name else '' + + if dns_name == '' or dns_name is None: + continue + + status = host['status'] + profile = host['profile'] + classes = host[orderby_keyname] + + if status not in self.inventory: + self.inventory[status] = [] + self.inventory[status].append(dns_name) + + if profile not in self.inventory: + self.inventory[profile] = [] + self.inventory[profile].append(dns_name) + + for cls in classes: + if cls not in self.inventory: + self.inventory[cls] = [] + self.inventory[cls].append(dns_name) + + # Since we already have all of the data for the host, update the host details as well + + # The old way was ksmeta only -- provide backwards compatibility + + self.cache[dns_name] = host + if "ks_meta" in host: + for key, value in iteritems(host["ks_meta"]): + self.cache[dns_name][key] = value + + self.write_to_cache(self.cache, self.cache_path_cache) + self.write_to_cache(self.inventory, self.cache_path_inventory) + + def get_host_info(self): + """ Get variables about a specific host """ + + if not self.cache or len(self.cache) == 0: + # Need to load index from cache + self.load_cache_from_cache() + + if self.args.host not in self.cache: + # try updating the cache + self.update_cache() + + if self.args.host not in self.cache: + # host might not exist anymore + return self.json_format_dict({}, True) + + return self.json_format_dict(self.cache[self.args.host], True) + + def push(self, my_dict, key, element): + """ Pushed an element onto an array that may not have been defined in the dict """ + + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def load_inventory_from_cache(self): + """ Reads the index from the cache file sets self.index """ + + cache = open(self.cache_path_inventory, 'r') + json_inventory = cache.read() + self.inventory = json.loads(json_inventory) + + def load_cache_from_cache(self): + """ Reads the cache from the cache file sets self.cache """ + + cache = open(self.cache_path_cache, 'r') + json_cache = cache.read() + self.cache = json.loads(json_cache) + + def write_to_cache(self, data, filename): + """ Writes data in JSON format to a file """ + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ + + return re.sub(r"[^A-Za-z0-9\-]", "_", word) + + def json_format_dict(self, data, pretty=False): + """ Converts a dict to a JSON object and dumps it as a formatted string """ + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +CobblerInventory() diff --git a/scripts/inventory/collins.ini b/scripts/inventory/collins.ini new file mode 100644 index 0000000000..0ce0c2acbd --- /dev/null +++ b/scripts/inventory/collins.ini @@ -0,0 +1,57 @@ +# Ansible Collins external inventory script settings +# + +[collins] + +# You should not have a trailing slash or collins +# will not properly match the URI +host = http://localhost:9000 + +username = blake +password = admin:first + +# Specifies a timeout for all HTTP requests to Collins. +timeout_secs = 120 + +# Specifies a maximum number of retries per Collins request. +max_retries = 5 + +# Specifies the number of results to return per paginated query as specified in +# the Pagination section of the Collins API docs: +# http://tumblr.github.io/collins/api.html +results_per_query = 100 + +# Specifies the Collins asset type which will be queried for; most typically +# you'll want to leave this at the default of SERVER_NODE. +asset_type = SERVER_NODE + +# Collins assets can optionally be assigned hostnames; this option will preference +# the selection of an asset's hostname over an IP address as the primary identifier +# in the Ansible inventory. Typically, this value should be set to true if assets +# are assigned hostnames. +prefer_hostnames = true + +# Within Collins, assets can be granted multiple IP addresses; this configuration +# value specifies the index within the 'ADDRESSES' array as returned by the +# following API endpoint: +# http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section +ip_address_index = 0 + +# Sets whether Collins instances in multiple datacenters will be queried. +query_remote_dcs = false + +# API calls to Collins can involve large, substantial queries. For this reason, +# we cache the results of an API call. Set this to the path you want cache files +# to be written to. Two files will be written to this directory: +# - ansible-collins.cache +# - ansible-collins.index +cache_path = /tmp + +# If errors occur while querying inventory, logging messages will be written +# to a logfile in the specified directory: +# - ansible-collins.log +log_path = /tmp + +# The number of seconds that a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +cache_max_age = 600 diff --git a/scripts/inventory/collins.py b/scripts/inventory/collins.py new file mode 100644 index 0000000000..39f6c4b447 --- /dev/null +++ b/scripts/inventory/collins.py @@ -0,0 +1,439 @@ +#!/usr/bin/env python + +""" +Collins external inventory script +================================= + +Ansible has a feature where instead of reading from /etc/ansible/hosts +as a text file, it can query external programs to obtain the list +of hosts, groups the hosts are in, and even variables to assign to each host. + +Collins is a hardware asset management system originally developed by +Tumblr for tracking new hardware as it built out its own datacenters. It +exposes a rich API for manipulating and querying one's hardware inventory, +which makes it an ideal 'single point of truth' for driving systems +automation like Ansible. Extensive documentation on Collins, including a quickstart, +API docs, and a full reference manual, can be found here: + +http://tumblr.github.io/collins + +This script adds support to Ansible for obtaining a dynamic inventory of +assets in your infrastructure, grouping them in Ansible by their useful attributes, +and binding all facts provided by Collins to each host so that they can be used to +drive automation. Some parts of this script were cribbed shamelessly from mdehaan's +Cobbler inventory script. + +To use it, copy it to your repo and pass -i to the ansible or +ansible-playbook command; if you'd like to use it by default, simply copy collins.ini +to /etc/ansible and this script to /etc/ansible/hosts. + +Alongside the options set in collins.ini, there are several environment variables +that will be used instead of the configured values if they are set: + + - COLLINS_USERNAME - specifies a username to use for Collins authentication + - COLLINS_PASSWORD - specifies a password to use for Collins authentication + - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying; + this can be used to run Ansible automation against different asset classes than + server nodes, such as network switches and PDUs + - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to + /collins.ini + +If errors are encountered during operation, this script will return an exit code of +255; otherwise, it will return an exit code of 0. + +Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name']. + +Tested against Ansible 1.8.2 and Collins 1.3.0. +""" + +# (c) 2014, Steve Salevan +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + + +import argparse +import logging +import os +import re +import sys +from time import time +import traceback + +import json + +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import configparser as ConfigParser +from ansible.module_utils.six.moves.urllib.parse import urlencode + +from ansible.module_utils.urls import open_url + + +class CollinsDefaults(object): + ASSETS_API_ENDPOINT = '%s/api/assets' + SPECIAL_ATTRIBUTES = set([ + 'CREATED', + 'DELETED', + 'UPDATED', + 'STATE', + ]) + LOG_FORMAT = '%(asctime)-15s %(message)s' + + +class Error(Exception): + pass + + +class MaxRetriesError(Error): + pass + + +class CollinsInventory(object): + + def __init__(self): + """ Constructs CollinsInventory object and reads all configuration. """ + + self.inventory = dict() # A list of groups and the hosts in that group + self.cache = dict() # Details about hosts in the inventory + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + logging.basicConfig(format=CollinsDefaults.LOG_FORMAT, + filename=self.log_location) + self.log = logging.getLogger('CollinsInventory') + + def _asset_get_attribute(self, asset, attrib): + """ Returns a user-defined attribute from an asset if it exists; otherwise, + returns None. """ + + if 'ATTRIBS' in asset: + for attrib_block in asset['ATTRIBS'].keys(): + if attrib in asset['ATTRIBS'][attrib_block]: + return asset['ATTRIBS'][attrib_block][attrib] + return None + + def _asset_has_attribute(self, asset, attrib): + """ Returns whether a user-defined attribute is present on an asset. """ + + if 'ATTRIBS' in asset: + for attrib_block in asset['ATTRIBS'].keys(): + if attrib in asset['ATTRIBS'][attrib_block]: + return True + return False + + def run(self): + """ Main execution path """ + + # Updates cache if cache is not present or has expired. + successful = True + if self.args.refresh_cache: + successful = self.update_cache() + elif not self.is_cache_valid(): + successful = self.update_cache() + else: + successful = self.load_inventory_from_cache() + successful &= self.load_cache_from_cache() + + data_to_print = "" + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + data_to_print = self.json_format_dict(self.inventory, self.args.pretty) + + else: # default action with no options + data_to_print = self.json_format_dict(self.inventory, self.args.pretty) + + print(data_to_print) + return successful + + def find_assets(self, attributes=None, operation='AND'): + """ Obtains Collins assets matching the provided attributes. """ + attributes = {} if attributes is None else attributes + + # Formats asset search query to locate assets matching attributes, using + # the CQL search feature as described here: + # http://tumblr.github.io/collins/recipes.html + attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)] + query_parameters = { + 'details': ['True'], + 'operation': [operation], + 'query': attributes_query, + 'remoteLookup': [str(self.query_remote_dcs)], + 'size': [self.results_per_query], + 'type': [self.collins_asset_type], + } + assets = [] + cur_page = 0 + num_retries = 0 + # Locates all assets matching the provided query, exhausting pagination. + while True: + if num_retries == self.collins_max_retries: + raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries) + query_parameters['page'] = cur_page + query_url = "%s?%s" % ( + (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), + urlencode(query_parameters, doseq=True) + ) + try: + response = open_url(query_url, + timeout=self.collins_timeout_secs, + url_username=self.collins_username, + url_password=self.collins_password, + force_basic_auth=True) + json_response = json.loads(response.read()) + # Adds any assets found to the array of assets. + assets += json_response['data']['Data'] + # If we've retrieved all of our assets, breaks out of the loop. + if len(json_response['data']['Data']) == 0: + break + cur_page += 1 + num_retries = 0 + except Exception: + self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc()) + num_retries += 1 + return assets + + def is_cache_valid(self): + """ Determines if the cache files have expired, or if it is still valid """ + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_inventory): + return True + + return False + + def read_settings(self): + """ Reads the settings from the collins.ini file """ + + config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') + + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') + + self.collins_host = config.get('collins', 'host') + self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username')) + self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password')) + self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type')) + self.collins_timeout_secs = config.getint('collins', 'timeout_secs') + self.collins_max_retries = config.getint('collins', 'max_retries') + + self.results_per_query = config.getint('collins', 'results_per_query') + self.ip_address_index = config.getint('collins', 'ip_address_index') + self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs') + self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames') + + cache_path = config.get('collins', 'cache_path') + self.cache_path_cache = cache_path + \ + '/ansible-collins-%s.cache' % self.collins_asset_type + self.cache_path_inventory = cache_path + \ + '/ansible-collins-%s.index' % self.collins_asset_type + self.cache_max_age = config.getint('collins', 'cache_max_age') + + log_path = config.get('collins', 'log_path') + self.log_location = log_path + '/ansible-collins.log' + + def parse_cli_args(self): + """ Command line argument processing """ + + parser = argparse.ArgumentParser( + description='Produces an Ansible Inventory file based on Collins') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to Collins ' + '(default: False - use cache files)') + parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output') + self.args = parser.parse_args() + + def update_cache(self): + """ Make calls to Collins and saves the output in a cache """ + + self.cache = dict() + self.inventory = dict() + + # Locates all server assets from Collins. + try: + server_assets = self.find_assets() + except Exception: + self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc()) + return False + + for asset in server_assets: + # Determines the index to retrieve the asset's IP address either by an + # attribute set on the Collins asset or the pre-configured value. + if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'): + ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX') + try: + ip_index = int(ip_index) + except Exception: + self.log.error( + "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset, + ip_index) + else: + ip_index = self.ip_address_index + + asset['COLLINS'] = {} + + # Attempts to locate the asset's primary identifier (hostname or IP address), + # which will be used to index the asset throughout the Ansible inventory. + if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): + asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') + elif 'ADDRESSES' not in asset: + self.log.warning("No IP addresses found for asset '%s', skipping", asset) + continue + elif len(asset['ADDRESSES']) < ip_index + 1: + self.log.warning( + "No IP address found at index %s for asset '%s', skipping", + ip_index, asset) + continue + else: + asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS'] + + # Adds an asset index to the Ansible inventory based upon unpacking + # the name of the asset's current STATE from its dictionary. + if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']: + state_inventory_key = self.to_safe( + 'STATE-%s' % asset['ASSET']['STATE']['NAME']) + self.push(self.inventory, state_inventory_key, asset_identifier) + + # Indexes asset by all user-defined Collins attributes. + if 'ATTRIBS' in asset: + for attrib_block in asset['ATTRIBS'].keys(): + for attrib in asset['ATTRIBS'][attrib_block].keys(): + asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib] + attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) + self.push(self.inventory, attrib_key, asset_identifier) + + # Indexes asset by all built-in Collins attributes. + for attribute in asset['ASSET'].keys(): + if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES: + attribute_val = asset['ASSET'][attribute] + if attribute_val is not None: + attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val)) + self.push(self.inventory, attrib_key, asset_identifier) + + # Indexes asset by hardware product information. + if 'HARDWARE' in asset: + if 'PRODUCT' in asset['HARDWARE']['BASE']: + product = asset['HARDWARE']['BASE']['PRODUCT'] + if product: + product_key = self.to_safe( + 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT']) + self.push(self.inventory, product_key, asset_identifier) + + # Indexing now complete, adds the host details to the asset cache. + self.cache[asset_identifier] = asset + + try: + self.write_to_cache(self.cache, self.cache_path_cache) + self.write_to_cache(self.inventory, self.cache_path_inventory) + except Exception: + self.log.error("Error while writing to cache:\n%s", traceback.format_exc()) + return False + return True + + def push(self, dictionary, key, value): + """ Adds a value to a list at a dictionary key, creating the list if it doesn't + exist. """ + + if key not in dictionary: + dictionary[key] = [] + dictionary[key].append(value) + + def get_host_info(self): + """ Get variables about a specific host. """ + + if not self.cache or len(self.cache) == 0: + # Need to load index from cache + self.load_cache_from_cache() + + if self.args.host not in self.cache: + # try updating the cache + self.update_cache() + + if self.args.host not in self.cache: + # host might not exist anymore + return self.json_format_dict({}, self.args.pretty) + + return self.json_format_dict(self.cache[self.args.host], self.args.pretty) + + def load_inventory_from_cache(self): + """ Reads the index from the cache file sets self.index """ + + try: + cache = open(self.cache_path_inventory, 'r') + json_inventory = cache.read() + self.inventory = json.loads(json_inventory) + return True + except Exception: + self.log.error("Error while loading inventory:\n%s", + traceback.format_exc()) + self.inventory = {} + return False + + def load_cache_from_cache(self): + """ Reads the cache from the cache file sets self.cache """ + + try: + cache = open(self.cache_path_cache, 'r') + json_cache = cache.read() + self.cache = json.loads(json_cache) + return True + except Exception: + self.log.error("Error while loading host cache:\n%s", + traceback.format_exc()) + self.cache = {} + return False + + def write_to_cache(self, data, filename): + """ Writes data in JSON format to a specified file. """ + + json_data = self.json_format_dict(data, self.args.pretty) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + """ Converts 'bad' characters in a string to underscores so they + can be used as Ansible groups """ + + return re.sub(r"[^A-Za-z0-9\-]", "_", word) + + def json_format_dict(self, data, pretty=False): + """ Converts a dict to a JSON object and dumps it as a formatted string """ + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +if __name__ in '__main__': + inventory = CollinsInventory() + if inventory.run(): + sys.exit(0) + else: + sys.exit(-1) diff --git a/scripts/inventory/consul_io.ini b/scripts/inventory/consul_io.ini new file mode 100644 index 0000000000..d18a1494dd --- /dev/null +++ b/scripts/inventory/consul_io.ini @@ -0,0 +1,54 @@ +# Ansible Consul external inventory script settings. + +[consul] + +# +# Bulk load. Load all possible data before building inventory JSON +# If true, script processes in-memory data. JSON generation reduces drastically +# +bulk_load = false + +# restrict included nodes to those from this datacenter +#datacenter = nyc1 + +# url of the consul cluster to query +#url = http://demo.consul.io +url = http://localhost:8500 + +# suffix added to each service to create a group name e.g Service of 'redis' and +# a suffix of '_servers' will add each address to the group name 'redis_servers' +servers_suffix = _servers + +# +# By default, final JSON is built based on all available info in consul. +# Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info +# There are cases when speed is preferable than having services groups +# False value will reduce script execution time drastically. +# +suffixes = true + +# if specified then the inventory will generate domain names that will resolve +# via Consul's inbuilt DNS. +#domain=consul + +# make groups from service tags. the name of the group is derived from the +# service name and the tag name e.g. a service named nginx with tags ['master', 'v1'] +# will create groups nginx_master and nginx_v1 +tags = true + +# looks up the node name at the given path for a list of groups to which the +# node should be added. +kv_groups=ansible/groups + +# looks up the node name at the given path for a json dictionary of metadata that +# should be attached as metadata for the node +kv_metadata=ansible/metadata + +# looks up the health of each service and adds the node to 'up' and 'down' groups +# based on the service availability +# +# !!!! if availability is true, suffixes also must be true. !!!! +# +availability = true +available_suffix = _up +unavailable_suffix = _down diff --git a/scripts/inventory/consul_io.py b/scripts/inventory/consul_io.py new file mode 100644 index 0000000000..4dad3eeec1 --- /dev/null +++ b/scripts/inventory/consul_io.py @@ -0,0 +1,537 @@ +#!/usr/bin/env python + +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +''' +Consul.io inventory script (http://consul.io) +====================================== + +Generates Ansible inventory from nodes in a Consul cluster. This script will +group nodes by: + - datacenter, + - registered service + - service tags + - service status + - values from the k/v store + +This script can be run with the switches +--list as expected groups all the nodes in all datacenters +--datacenter, to restrict the nodes to a single datacenter +--host to restrict the inventory to a single named node. (requires datacenter config) + +The configuration for this plugin is read from a consul_io.ini file located in the +same directory as this inventory script. All config options in the config file +are optional except the host and port, which must point to a valid agent or +server running the http api. For more information on enabling the endpoint see. + +http://www.consul.io/docs/agent/options.html + +Other options include: + +'datacenter': + +which restricts the included nodes to those from the given datacenter +This can also be set with the environmental variable CONSUL_DATACENTER + +'url': + +the URL of the Consul cluster. host, port and scheme are derived from the +URL. If not specified, connection configuration defaults to http requests +to localhost on port 8500. +This can also be set with the environmental variable CONSUL_URL + +'domain': + +if specified then the inventory will generate domain names that will resolve +via Consul's inbuilt DNS. The name is derived from the node name, datacenter +and domain .node... Note that you will need to +have consul hooked into your DNS server for these to resolve. See the consul +DNS docs for more info. + +which restricts the included nodes to those from the given datacenter + +'servers_suffix': + +defining the a suffix to add to the service name when creating the service +group. e.g Service name of 'redis' and a suffix of '_servers' will add +each nodes address to the group name 'redis_servers'. No suffix is added +if this is not set + +'tags': + +boolean flag defining if service tags should be used to create Inventory +groups e.g. an nginx service with the tags ['master', 'v1'] will create +groups nginx_master and nginx_v1 to which the node running the service +will be added. No tag groups are created if this is missing. + +'token': + +ACL token to use to authorize access to the key value store. May be required +to retrieve the kv_groups and kv_metadata based on your consul configuration. + +'kv_groups': + +This is used to lookup groups for a node in the key value store. It specifies a +path to which each discovered node's name will be added to create a key to query +the key/value store. There it expects to find a comma separated list of group +names to which the node should be added e.g. if the inventory contains node +'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key +'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query + returned 'test,honeypot' then the node address to both groups. + +'kv_metadata': + +kv_metadata is used to lookup metadata for each discovered node. Like kv_groups +above it is used to build a path to lookup in the kv store where it expects to +find a json dictionary of metadata entries. If found, each key/value pair in the +dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter +'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key +'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}' + +'availability': + +if true then availability groups will be created for each service. The node will +be added to one of the groups based on the health status of the service. The +group name is derived from the service name and the configurable availability +suffixes + +'available_suffix': + +suffix that should be appended to the service availability groups for available +services e.g. if the suffix is '_up' and the service is nginx, then nodes with +healthy nginx services will be added to the nginix_up group. Defaults to +'_available' + +'unavailable_suffix': + +as above but for unhealthy services, defaults to '_unavailable' + +Note that if the inventory discovers an 'ssh' service running on a node it will +register the port as ansible_ssh_port in the node's metadata and this port will +be used to access the machine. +``` + +''' + +import os +import re +import argparse +import sys + +from ansible.module_utils.six.moves import configparser + + +def get_log_filename(): + tty_filename = '/dev/tty' + stdout_filename = '/dev/stdout' + + if not os.path.exists(tty_filename): + return stdout_filename + if not os.access(tty_filename, os.W_OK): + return stdout_filename + if os.getenv('TEAMCITY_VERSION'): + return stdout_filename + + return tty_filename + + +def setup_logging(): + filename = get_log_filename() + + import logging.config + logging.config.dictConfig({ + 'version': 1, + 'formatters': { + 'simple': { + 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s', + }, + }, + 'root': { + 'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'), + 'handlers': ['console'], + }, + 'handlers': { + 'console': { + 'class': 'logging.FileHandler', + 'filename': filename, + 'formatter': 'simple', + }, + }, + 'loggers': { + 'iso8601': { + 'qualname': 'iso8601', + 'level': 'INFO', + }, + }, + }) + logger = logging.getLogger('consul_io.py') + logger.debug('Invoked with %r', sys.argv) + + +if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'): + setup_logging() + + +import json + +try: + import consul +except ImportError as e: + sys.exit("""failed=True msg='python-consul required for this module. +See https://python-consul.readthedocs.io/en/latest/#installation'""") + +from ansible.module_utils.six import iteritems + + +class ConsulInventory(object): + + def __init__(self): + ''' Create an inventory based on the catalog of nodes and services + registered in a consul cluster''' + self.node_metadata = {} + self.nodes = {} + self.nodes_by_service = {} + self.nodes_by_tag = {} + self.nodes_by_datacenter = {} + self.nodes_by_kv = {} + self.nodes_by_availability = {} + self.current_dc = None + self.inmemory_kv = [] + self.inmemory_nodes = [] + + config = ConsulConfig() + self.config = config + + self.consul_api = config.get_consul_api() + + if config.has_config('datacenter'): + if config.has_config('host'): + self.load_data_for_node(config.host, config.datacenter) + else: + self.load_data_for_datacenter(config.datacenter) + else: + self.load_all_data_consul() + + self.combine_all_results() + print(json.dumps(self.inventory, sort_keys=True, indent=2)) + + def bulk_load(self, datacenter): + index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter) + index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter) + index, nodes = self.consul_api.catalog.nodes(dc=datacenter) + self.inmemory_kv += groups_list + self.inmemory_kv += metadata_list + self.inmemory_nodes += nodes + + def load_all_data_consul(self): + ''' cycle through each of the datacenters in the consul catalog and process + the nodes in each ''' + self.datacenters = self.consul_api.catalog.datacenters() + for datacenter in self.datacenters: + self.current_dc = datacenter + self.bulk_load(datacenter) + self.load_data_for_datacenter(datacenter) + + def load_availability_groups(self, node, datacenter): + '''check the health of each service on a node and add the node to either + an 'available' or 'unavailable' grouping. The suffix for each group can be + controlled from the config''' + if self.config.has_config('availability'): + for service_name, service in iteritems(node['Services']): + for node in self.consul_api.health.service(service_name)[1]: + if self.is_service_available(node, service_name): + suffix = self.config.get_availability_suffix( + 'available_suffix', '_available') + else: + suffix = self.config.get_availability_suffix( + 'unavailable_suffix', '_unavailable') + self.add_node_to_map(self.nodes_by_availability, + service_name + suffix, node['Node']) + + def is_service_available(self, node, service_name): + '''check the availability of the service on the node beside ensuring the + availability of the node itself''' + consul_ok = service_ok = False + for check in node['Checks']: + if check['CheckID'] == 'serfHealth': + consul_ok = check['Status'] == 'passing' + elif check['ServiceName'] == service_name: + service_ok = check['Status'] == 'passing' + return consul_ok and service_ok + + def consul_get_kv_inmemory(self, key): + result = filter(lambda x: x['Key'] == key, self.inmemory_kv) + return result.pop() if result else None + + def consul_get_node_inmemory(self, node): + result = filter(lambda x: x['Node'] == node, self.inmemory_nodes) + return {"Node": result.pop(), "Services": {}} if result else None + + def load_data_for_datacenter(self, datacenter): + '''processes all the nodes in a particular datacenter''' + if self.config.bulk_load == 'true': + nodes = self.inmemory_nodes + else: + index, nodes = self.consul_api.catalog.nodes(dc=datacenter) + for node in nodes: + self.add_node_to_map(self.nodes_by_datacenter, datacenter, node) + self.load_data_for_node(node['Node'], datacenter) + + def load_data_for_node(self, node, datacenter): + '''loads the data for a single node adding it to various groups based on + metadata retrieved from the kv store and service availability''' + + if self.config.suffixes == 'true': + index, node_data = self.consul_api.catalog.node(node, dc=datacenter) + else: + node_data = self.consul_get_node_inmemory(node) + node = node_data['Node'] + + self.add_node_to_map(self.nodes, 'all', node) + self.add_metadata(node_data, "consul_datacenter", datacenter) + self.add_metadata(node_data, "consul_nodename", node['Node']) + + self.load_groups_from_kv(node_data) + self.load_node_metadata_from_kv(node_data) + if self.config.suffixes == 'true': + self.load_availability_groups(node_data, datacenter) + for name, service in node_data['Services'].items(): + self.load_data_from_service(name, service, node_data) + + def load_node_metadata_from_kv(self, node_data): + ''' load the json dict at the metadata path defined by the kv_metadata value + and the node name add each entry in the dictionary to the node's + metadata ''' + node = node_data['Node'] + if self.config.has_config('kv_metadata'): + key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node']) + if self.config.bulk_load == 'true': + metadata = self.consul_get_kv_inmemory(key) + else: + index, metadata = self.consul_api.kv.get(key) + if metadata and metadata['Value']: + try: + metadata = json.loads(metadata['Value']) + for k, v in metadata.items(): + self.add_metadata(node_data, k, v) + except Exception: + pass + + def load_groups_from_kv(self, node_data): + ''' load the comma separated list of groups at the path defined by the + kv_groups config value and the node name add the node address to each + group found ''' + node = node_data['Node'] + if self.config.has_config('kv_groups'): + key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node']) + if self.config.bulk_load == 'true': + groups = self.consul_get_kv_inmemory(key) + else: + index, groups = self.consul_api.kv.get(key) + if groups and groups['Value']: + for group in groups['Value'].split(','): + self.add_node_to_map(self.nodes_by_kv, group.strip(), node) + + def load_data_from_service(self, service_name, service, node_data): + '''process a service registered on a node, adding the node to a group with + the service name. Each service tag is extracted and the node is added to a + tag grouping also''' + self.add_metadata(node_data, "consul_services", service_name, True) + + if self.is_service("ssh", service_name): + self.add_metadata(node_data, "ansible_ssh_port", service['Port']) + + if self.config.has_config('servers_suffix'): + service_name = service_name + self.config.servers_suffix + + self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node']) + self.extract_groups_from_tags(service_name, service, node_data) + + def is_service(self, target, name): + return name and (name.lower() == target.lower()) + + def extract_groups_from_tags(self, service_name, service, node_data): + '''iterates each service tag and adds the node to groups derived from the + service and tag names e.g. nginx_master''' + if self.config.has_config('tags') and service['Tags']: + tags = service['Tags'] + self.add_metadata(node_data, "consul_%s_tags" % service_name, tags) + for tag in service['Tags']: + tagname = service_name + '_' + tag + self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node']) + + def combine_all_results(self): + '''prunes and sorts all groupings for combination into the final map''' + self.inventory = {"_meta": {"hostvars": self.node_metadata}} + groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service, + self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability] + for grouping in groupings: + for name, addresses in grouping.items(): + self.inventory[name] = sorted(list(set(addresses))) + + def add_metadata(self, node_data, key, value, is_list=False): + ''' Pushed an element onto a metadata dict for the node, creating + the dict if it doesn't exist ''' + key = self.to_safe(key) + node = self.get_inventory_name(node_data['Node']) + + if node in self.node_metadata: + metadata = self.node_metadata[node] + else: + metadata = {} + self.node_metadata[node] = metadata + if is_list: + self.push(metadata, key, value) + else: + metadata[key] = value + + def get_inventory_name(self, node_data): + '''return the ip or a node name that can be looked up in consul's dns''' + domain = self.config.domain + if domain: + node_name = node_data['Node'] + if self.current_dc: + return '%s.node.%s.%s' % (node_name, self.current_dc, domain) + else: + return '%s.node.%s' % (node_name, domain) + else: + return node_data['Address'] + + def add_node_to_map(self, map, name, node): + self.push(map, name, self.get_inventory_name(node)) + + def push(self, my_dict, key, element): + ''' Pushed an element onto an array that may not have been defined in the + dict ''' + key = self.to_safe(key) + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used + as Ansible groups ''' + return re.sub(r'[^A-Za-z0-9\-\.]', '_', word) + + def sanitize_dict(self, d): + + new_dict = {} + for k, v in d.items(): + if v is not None: + new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) + return new_dict + + def sanitize_list(self, seq): + new_seq = [] + for d in seq: + new_seq.append(self.sanitize_dict(d)) + return new_seq + + +class ConsulConfig(dict): + + def __init__(self): + self.read_settings() + self.read_cli_args() + self.read_env_vars() + + def has_config(self, name): + if hasattr(self, name): + return getattr(self, name) + else: + return False + + def read_settings(self): + ''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)''' + config = configparser.SafeConfigParser() + if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'): + config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini') + else: + config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini') + + config_options = ['host', 'token', 'datacenter', 'servers_suffix', + 'tags', 'kv_metadata', 'kv_groups', 'availability', + 'unavailable_suffix', 'available_suffix', 'url', + 'domain', 'suffixes', 'bulk_load'] + for option in config_options: + value = None + if config.has_option('consul', option): + value = config.get('consul', option).lower() + setattr(self, option, value) + + def read_cli_args(self): + ''' Command line argument processing ''' + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster') + + parser.add_argument('--list', action='store_true', + help='Get all inventory variables from all nodes in the consul cluster') + parser.add_argument('--host', action='store', + help='Get all inventory variables about a specific consul node,' + 'requires datacenter set in consul.ini.') + parser.add_argument('--datacenter', action='store', + help='Get all inventory about a specific consul datacenter') + + args = parser.parse_args() + arg_names = ['host', 'datacenter'] + + for arg in arg_names: + if getattr(args, arg): + setattr(self, arg, getattr(args, arg)) + + def read_env_vars(self): + env_var_options = ['datacenter', 'url'] + for option in env_var_options: + value = None + env_var = 'CONSUL_' + option.upper() + if os.environ.get(env_var): + setattr(self, option, os.environ.get(env_var)) + + def get_availability_suffix(self, suffix, default): + if self.has_config(suffix): + return self.has_config(suffix) + return default + + def get_consul_api(self): + '''get an instance of the api based on the supplied configuration''' + host = 'localhost' + port = 8500 + token = None + scheme = 'http' + + if hasattr(self, 'url'): + from ansible.module_utils.six.moves.urllib.parse import urlparse + o = urlparse(self.url) + if o.hostname: + host = o.hostname + if o.port: + port = o.port + if o.scheme: + scheme = o.scheme + + if hasattr(self, 'token'): + token = self.token + if not token: + token = 'anonymous' + return consul.Consul(host=host, port=port, token=token, scheme=scheme) + + +ConsulInventory() diff --git a/scripts/inventory/digital_ocean.ini b/scripts/inventory/digital_ocean.ini new file mode 100644 index 0000000000..b809554b20 --- /dev/null +++ b/scripts/inventory/digital_ocean.ini @@ -0,0 +1,34 @@ +# Ansible DigitalOcean external inventory script settings +# + +[digital_ocean] + +# The module needs your DigitalOcean API Token. +# It may also be specified on the command line via --api-token +# or via the environment variables DO_API_TOKEN or DO_API_KEY +# +#api_token = 123456abcdefg + + +# API calls to DigitalOcean may be slow. For this reason, we cache the results +# of an API call. Set this to the path you want cache files to be written to. +# One file will be written to this directory: +# - ansible-digital_ocean.cache +# +cache_path = /tmp + + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# +cache_max_age = 300 + +# Use the private network IP address instead of the public when available. +# +use_private_network = False + +# Pass variables to every group, e.g.: +# +# group_variables = { 'ansible_user': 'root' } +# +group_variables = {} diff --git a/scripts/inventory/digital_ocean.py b/scripts/inventory/digital_ocean.py new file mode 100644 index 0000000000..eecdb85c1c --- /dev/null +++ b/scripts/inventory/digital_ocean.py @@ -0,0 +1,551 @@ +#!/usr/bin/env python + +""" +DigitalOcean external inventory script +====================================== + +Generates Ansible inventory of DigitalOcean Droplets. + +In addition to the --list and --host options used by Ansible, there are options +for generating JSON of other DigitalOcean data. This is useful when creating +droplets. For example, --regions will return all the DigitalOcean Regions. +This information can also be easily found in the cache file, whose default +location is /tmp/ansible-digital_ocean.cache). + +The --pretty (-p) option pretty-prints the output for better human readability. + +---- +Although the cache stores all the information received from DigitalOcean, +the cache is not used for current droplet information (in --list, --host, +--all, and --droplets). This is so that accurate droplet information is always +found. You can force this script to use the cache with --force-cache. + +---- +Configuration is read from `digital_ocean.ini`, then from environment variables, +and then from command-line arguments. + +Most notably, the DigitalOcean API Token must be specified. It can be specified +in the INI file or with the following environment variables: + export DO_API_TOKEN='abc123' or + export DO_API_KEY='abc123' + +Alternatively, it can be passed on the command-line with --api-token. + +If you specify DigitalOcean credentials in the INI file, a handy way to +get them into your environment (e.g., to use the digital_ocean module) +is to use the output of the --env option with export: + export $(digital_ocean.py --env) + +---- +The following groups are generated from --list: + - ID (droplet ID) + - NAME (droplet NAME) + - digital_ocean + - image_ID + - image_NAME + - distro_NAME (distribution NAME from image) + - region_NAME + - size_NAME + - status_STATUS + +For each host, the following variables are registered: + - do_backup_ids + - do_created_at + - do_disk + - do_features - list + - do_id + - do_image - object + - do_ip_address + - do_private_ip_address + - do_kernel - object + - do_locked + - do_memory + - do_name + - do_networks - object + - do_next_backup_window + - do_region - object + - do_size - object + - do_size_slug + - do_snapshot_ids - list + - do_status + - do_tags + - do_vcpus + - do_volume_ids + +----- +``` +usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] + [--regions] [--images] [--sizes] [--ssh-keys] + [--domains] [--tags] [--pretty] + [--cache-path CACHE_PATH] + [--cache-max_age CACHE_MAX_AGE] [--force-cache] + [--refresh-cache] [--env] [--api-token API_TOKEN] + +Produce an Ansible Inventory file based on DigitalOcean credentials + +optional arguments: + -h, --help show this help message and exit + --list List all active Droplets as Ansible inventory + (default: True) + --host HOST Get all Ansible inventory variables about a specific + Droplet + --all List all DigitalOcean information as JSON + --droplets, -d List Droplets as JSON + --regions List Regions as JSON + --images List Images as JSON + --sizes List Sizes as JSON + --ssh-keys List SSH keys as JSON + --domains List Domains as JSON + --tags List Tags as JSON + --pretty, -p Pretty-print results + --cache-path CACHE_PATH + Path to the cache files (default: .) + --cache-max_age CACHE_MAX_AGE + Maximum age of the cached items (default: 0) + --force-cache Only use data from the cache + --refresh-cache, -r Force refresh of cache by making API requests to + DigitalOcean (default: False - use cache files) + --env, -e Display DO_API_TOKEN + --api-token API_TOKEN, -a API_TOKEN + DigitalOcean API Token +``` + +""" + +# (c) 2013, Evan Wies +# (c) 2017, Ansible Project +# (c) 2017, Abhijeet Kasurde +# +# Inspired by the EC2 inventory plugin: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import argparse +import ast +import os +import re +import requests +import sys +from time import time + +try: + import ConfigParser +except ImportError: + import configparser as ConfigParser + +import json + + +class DoManager: + def __init__(self, api_token): + self.api_token = api_token + self.api_endpoint = 'https://api.digitalocean.com/v2' + self.headers = {'Authorization': 'Bearer {0}'.format(self.api_token), + 'Content-type': 'application/json'} + self.timeout = 60 + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.api_endpoint, path) + + def send(self, url, method='GET', data=None): + url = self._url_builder(url) + data = json.dumps(data) + try: + if method == 'GET': + resp_data = {} + incomplete = True + while incomplete: + resp = requests.get(url, data=data, headers=self.headers, timeout=self.timeout) + json_resp = resp.json() + + for key, value in json_resp.items(): + if isinstance(value, list) and key in resp_data: + resp_data[key] += value + else: + resp_data[key] = value + + try: + url = json_resp['links']['pages']['next'] + except KeyError: + incomplete = False + + except ValueError as e: + sys.exit("Unable to parse result from %s: %s" % (url, e)) + return resp_data + + def all_active_droplets(self): + resp = self.send('droplets/') + return resp['droplets'] + + def all_regions(self): + resp = self.send('regions/') + return resp['regions'] + + def all_images(self, filter_name='global'): + params = {'filter': filter_name} + resp = self.send('images/', data=params) + return resp['images'] + + def sizes(self): + resp = self.send('sizes/') + return resp['sizes'] + + def all_ssh_keys(self): + resp = self.send('account/keys') + return resp['ssh_keys'] + + def all_domains(self): + resp = self.send('domains/') + return resp['domains'] + + def show_droplet(self, droplet_id): + resp = self.send('droplets/%s' % droplet_id) + return resp['droplet'] + + def all_tags(self): + resp = self.send('tags') + return resp['tags'] + + +class DigitalOceanInventory(object): + + ########################################################################### + # Main execution path + ########################################################################### + + def __init__(self): + """Main execution path """ + + # DigitalOceanInventory data + self.data = {} # All DigitalOcean data + self.inventory = {} # Ansible Inventory + + # Define defaults + self.cache_path = '.' + self.cache_max_age = 0 + self.use_private_network = False + self.group_variables = {} + + # Read settings, environment variables, and CLI arguments + self.read_settings() + self.read_environment() + self.read_cli_args() + + # Verify credentials were set + if not hasattr(self, 'api_token'): + msg = 'Could not find values for DigitalOcean api_token. They must be specified via either ini file, ' \ + 'command line argument (--api-token), or environment variables (DO_API_TOKEN)\n' + sys.stderr.write(msg) + sys.exit(-1) + + # env command, show DigitalOcean credentials + if self.args.env: + print("DO_API_TOKEN=%s" % self.api_token) + sys.exit(0) + + # Manage cache + self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" + self.cache_refreshed = False + + if self.is_cache_valid(): + self.load_from_cache() + if len(self.data) == 0: + if self.args.force_cache: + sys.stderr.write('Cache is empty and --force-cache was specified\n') + sys.exit(-1) + + self.manager = DoManager(self.api_token) + + # Pick the json_data to print based on the CLI command + if self.args.droplets: + self.load_from_digital_ocean('droplets') + json_data = {'droplets': self.data['droplets']} + elif self.args.regions: + self.load_from_digital_ocean('regions') + json_data = {'regions': self.data['regions']} + elif self.args.images: + self.load_from_digital_ocean('images') + json_data = {'images': self.data['images']} + elif self.args.sizes: + self.load_from_digital_ocean('sizes') + json_data = {'sizes': self.data['sizes']} + elif self.args.ssh_keys: + self.load_from_digital_ocean('ssh_keys') + json_data = {'ssh_keys': self.data['ssh_keys']} + elif self.args.domains: + self.load_from_digital_ocean('domains') + json_data = {'domains': self.data['domains']} + elif self.args.tags: + self.load_from_digital_ocean('tags') + json_data = {'tags': self.data['tags']} + elif self.args.all: + self.load_from_digital_ocean() + json_data = self.data + elif self.args.host: + json_data = self.load_droplet_variables_for_host() + else: # '--list' this is last to make it default + self.load_from_digital_ocean('droplets') + self.build_inventory() + json_data = self.inventory + + if self.cache_refreshed: + self.write_to_cache() + + if self.args.pretty: + print(json.dumps(json_data, indent=2)) + else: + print(json.dumps(json_data)) + + ########################################################################### + # Script configuration + ########################################################################### + + def read_settings(self): + """ Reads the settings from the digital_ocean.ini file """ + config = ConfigParser.ConfigParser() + config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini') + config.read(config_path) + + # Credentials + if config.has_option('digital_ocean', 'api_token'): + self.api_token = config.get('digital_ocean', 'api_token') + + # Cache related + if config.has_option('digital_ocean', 'cache_path'): + self.cache_path = config.get('digital_ocean', 'cache_path') + if config.has_option('digital_ocean', 'cache_max_age'): + self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') + + # Private IP Address + if config.has_option('digital_ocean', 'use_private_network'): + self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') + + # Group variables + if config.has_option('digital_ocean', 'group_variables'): + self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) + + def read_environment(self): + """ Reads the settings from environment variables """ + # Setup credentials + if os.getenv("DO_API_TOKEN"): + self.api_token = os.getenv("DO_API_TOKEN") + if os.getenv("DO_API_KEY"): + self.api_token = os.getenv("DO_API_KEY") + + def read_cli_args(self): + """ Command line argument processing """ + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') + + parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') + parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') + + parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') + parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') + parser.add_argument('--regions', action='store_true', help='List Regions as JSON') + parser.add_argument('--images', action='store_true', help='List Images as JSON') + parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') + parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') + parser.add_argument('--domains', action='store_true', help='List Domains as JSON') + parser.add_argument('--tags', action='store_true', help='List Tags as JSON') + + parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') + + parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') + parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') + parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') + parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, + help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') + + parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') + parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') + + self.args = parser.parse_args() + + if self.args.api_token: + self.api_token = self.args.api_token + + # Make --list default if none of the other commands are specified + if (not self.args.droplets and not self.args.regions and + not self.args.images and not self.args.sizes and + not self.args.ssh_keys and not self.args.domains and + not self.args.tags and + not self.args.all and not self.args.host): + self.args.list = True + + ########################################################################### + # Data Management + ########################################################################### + + def load_from_digital_ocean(self, resource=None): + """Get JSON from DigitalOcean API """ + if self.args.force_cache and os.path.isfile(self.cache_filename): + return + # We always get fresh droplets + if self.is_cache_valid() and not (resource == 'droplets' or resource is None): + return + if self.args.refresh_cache: + resource = None + + if resource == 'droplets' or resource is None: + self.data['droplets'] = self.manager.all_active_droplets() + self.cache_refreshed = True + if resource == 'regions' or resource is None: + self.data['regions'] = self.manager.all_regions() + self.cache_refreshed = True + if resource == 'images' or resource is None: + self.data['images'] = self.manager.all_images() + self.cache_refreshed = True + if resource == 'sizes' or resource is None: + self.data['sizes'] = self.manager.sizes() + self.cache_refreshed = True + if resource == 'ssh_keys' or resource is None: + self.data['ssh_keys'] = self.manager.all_ssh_keys() + self.cache_refreshed = True + if resource == 'domains' or resource is None: + self.data['domains'] = self.manager.all_domains() + self.cache_refreshed = True + if resource == 'tags' or resource is None: + self.data['tags'] = self.manager.all_tags() + self.cache_refreshed = True + + def add_inventory_group(self, key): + """ Method to create group dict """ + host_dict = {'hosts': [], 'vars': {}} + self.inventory[key] = host_dict + return + + def add_host(self, group, host): + """ Helper method to reduce host duplication """ + if group not in self.inventory: + self.add_inventory_group(group) + + if host not in self.inventory[group]['hosts']: + self.inventory[group]['hosts'].append(host) + return + + def build_inventory(self): + """ Build Ansible inventory of droplets """ + self.inventory = { + 'all': { + 'hosts': [], + 'vars': self.group_variables + }, + '_meta': {'hostvars': {}} + } + + # add all droplets by id and name + for droplet in self.data['droplets']: + for net in droplet['networks']['v4']: + if net['type'] == 'public': + dest = net['ip_address'] + else: + continue + + self.inventory['all']['hosts'].append(dest) + + self.add_host(droplet['id'], dest) + + self.add_host(droplet['name'], dest) + + # groups that are always present + for group in ('digital_ocean', + 'region_' + droplet['region']['slug'], + 'image_' + str(droplet['image']['id']), + 'size_' + droplet['size']['slug'], + 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']), + 'status_' + droplet['status']): + self.add_host(group, dest) + + # groups that are not always present + for group in (droplet['image']['slug'], + droplet['image']['name']): + if group: + image = 'image_' + DigitalOceanInventory.to_safe(group) + self.add_host(image, dest) + + if droplet['tags']: + for tag in droplet['tags']: + self.add_host(tag, dest) + + # hostvars + info = self.do_namespace(droplet) + self.inventory['_meta']['hostvars'][dest] = info + + def load_droplet_variables_for_host(self): + """ Generate a JSON response to a --host call """ + host = int(self.args.host) + droplet = self.manager.show_droplet(host) + info = self.do_namespace(droplet) + return {'droplet': info} + + ########################################################################### + # Cache Management + ########################################################################### + + def is_cache_valid(self): + """ Determines if the cache files have expired, or if it is still valid """ + if os.path.isfile(self.cache_filename): + mod_time = os.path.getmtime(self.cache_filename) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + return False + + def load_from_cache(self): + """ Reads the data from the cache file and assigns it to member variables as Python Objects """ + try: + with open(self.cache_filename, 'r') as cache: + json_data = cache.read() + data = json.loads(json_data) + except IOError: + data = {'data': {}, 'inventory': {}} + + self.data = data['data'] + self.inventory = data['inventory'] + + def write_to_cache(self): + """ Writes data in JSON format to a file """ + data = {'data': self.data, 'inventory': self.inventory} + json_data = json.dumps(data, indent=2) + + with open(self.cache_filename, 'w') as cache: + cache.write(json_data) + + ########################################################################### + # Utilities + ########################################################################### + @staticmethod + def to_safe(word): + """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ + return re.sub(r"[^A-Za-z0-9\-.]", "_", word) + + @staticmethod + def do_namespace(data): + """ Returns a copy of the dictionary with all the keys put in a 'do_' namespace """ + info = {} + for k, v in data.items(): + info['do_' + k] = v + return info + + +########################################################################### +# Run the script +DigitalOceanInventory() diff --git a/scripts/inventory/docker.py b/scripts/inventory/docker.py new file mode 100644 index 0000000000..489c820a23 --- /dev/null +++ b/scripts/inventory/docker.py @@ -0,0 +1,905 @@ +#!/usr/bin/env python +# +# (c) 2016 Paul Durivage +# Chris Houseknecht +# James Tanner +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' + +Docker Inventory Script +======================= +The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic +because the inventory is generated at run-time rather than being read from a static file. The script generates the +inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the +script contacts can be defined using environment variables or a configuration file. + +Requirements +------------ + +Using the docker modules requires having docker-py +installed on the host running Ansible. To install docker-py: + + pip install docker-py + + +Run for Specific Host +--------------------- +When run for a specific container using the --host option this script returns the following hostvars: + +{ + "ansible_ssh_host": "", + "ansible_ssh_port": 0, + "docker_apparmorprofile": "", + "docker_args": [], + "docker_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/hello" + ], + "Domainname": "", + "Entrypoint": null, + "Env": null, + "Hostname": "9f2f80b0a702", + "Image": "hello-world", + "Labels": {}, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "docker_created": "2016-04-18T02:05:59.659599249Z", + "docker_driver": "aufs", + "docker_execdriver": "native-0.2", + "docker_execids": null, + "docker_graphdriver": { + "Data": null, + "Name": "aufs" + }, + "docker_hostconfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "CgroupParent": "", + "ConsoleSize": [ + 0, + 0 + ], + "ContainerIDFile": "", + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuShares": 0, + "CpusetCpus": "", + "CpusetMems": "", + "Devices": null, + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "GroupAdd": null, + "IpcMode": "", + "KernelMemory": 0, + "Links": null, + "LogConfig": { + "Config": {}, + "Type": "json-file" + }, + "LxcConf": null, + "Memory": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "NetworkMode": "default", + "OomKillDisable": false, + "PidMode": "host", + "PortBindings": null, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "" + }, + "SecurityOpt": [ + "label:disable" + ], + "UTSMode": "", + "Ulimits": null, + "VolumeDriver": "", + "VolumesFrom": null + }, + "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname", + "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts", + "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14", + "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7", + "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log", + "docker_mountlabel": "", + "docker_mounts": [], + "docker_name": "/hello-world", + "docker_networksettings": { + "Bridge": "", + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "HairpinMode": false, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "" + } + }, + "Ports": null, + "SandboxID": "", + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null + }, + "docker_path": "/hello", + "docker_processlabel": "", + "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf", + "docker_restartcount": 0, + "docker_short_id": "9f2f80b0a7023", + "docker_state": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "2016-04-18T02:06:00.296619369Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": false, + "StartedAt": "2016-04-18T02:06:00.272065041Z", + "Status": "exited" + } +} + +Groups +------ +When run in --list mode (the default), container instances are grouped by: + + - container id + - container name + - container short id + - image_name (image_) + - stack_name (stack_) + - service_name (service_) + - docker_host + - running + - stopped + + +Configuration: +-------------- +You can control the behavior of the inventory script by passing arguments, defining environment variables, or +creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence +is command line args, then the docker.yml file and finally environment variables. + +Environment variables: +...................... + +To connect to a single Docker API the following variables can be defined in the environment to control the connection +options. These are the same environment variables used by the Docker modules. + + DOCKER_HOST + The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock. + + DOCKER_API_VERSION: + The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported + by docker-py. + + DOCKER_TIMEOUT: + The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds. + + DOCKER_TLS: + Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. + Defaults to False. + + DOCKER_TLS_VERIFY: + Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. + Default is False + + DOCKER_TLS_HOSTNAME: + When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults + to localhost. + + DOCKER_CERT_PATH: + Path to the directory containing the client certificate, client key and CA certificate. + + DOCKER_SSL_VERSION: + Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing + was 1.0 + +In addition to the connection variables there are a couple variables used to control the execution and output of the +script: + + DOCKER_CONFIG_FILE + Path to the configuration file. Defaults to ./docker.yml. + + DOCKER_PRIVATE_SSH_PORT: + The private port (container port) on which SSH is listening for connections. Defaults to 22. + + DOCKER_DEFAULT_IP: + The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. + + +Configuration File +.................. + +Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory. + +The default name of the file is derived from the name of the inventory script. By default the script will look for +basename of the script (i.e. docker) with an extension of '.yml'. + +You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment. + +Here's what you can define in docker_inventory.yml: + + defaults + Defines a default connection. Defaults will be taken from this and applied to any values not provided + for a host defined in the hosts list. + + hosts + If you wish to get inventory from more than one Docker host, define a hosts list. + +For the default host and each host in the hosts list define the following attributes: + + host: + description: The URL or Unix socket path used to connect to the Docker API. + required: yes + + tls: + description: Connect using TLS without verifying the authenticity of the Docker host server. + default: false + required: false + + tls_verify: + description: Connect using TLS without verifying the authenticity of the Docker host server. + default: false + required: false + + cert_path: + description: Path to the client's TLS certificate file. + default: null + required: false + + cacert_path: + description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. + default: null + required: false + + key_path: + description: Path to the client's TLS key file. + default: null + required: false + + version: + description: The Docker API version. + required: false + default: will be supplied by the docker-py module. + + timeout: + description: The amount of time in seconds to wait on an API response. + required: false + default: 60 + + default_ip: + description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface + '0.0.0.0'. + required: false + default: 127.0.0.1 + + private_ssh_port: + description: The port containers use for SSH + required: false + default: 22 + +Examples +-------- + +# Connect to the Docker API on localhost port 4243 and format the JSON output +DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty + +# Any container's ssh port exposed on 0.0.0.0 will be mapped to +# another IP address (where Ansible will attempt to connect via SSH) +DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty + +# Run as input to a playbook: +ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml + +# Simple playbook to invoke with the above example: + + - name: Test docker_inventory + hosts: all + connection: local + gather_facts: no + tasks: + - debug: msg="Container - {{ inventory_hostname }}" + +''' + +import os +import sys +import json +import argparse +import re +import yaml + +from collections import defaultdict +# Manipulation of the path is needed because the docker-py +# module is imported by the name docker, and because this file +# is also named docker +for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]: + try: + del sys.path[sys.path.index(path)] + except Exception: + pass + +HAS_DOCKER_PY = True +HAS_DOCKER_ERROR = False + +try: + from docker.errors import APIError, TLSParameterError + from docker.tls import TLSConfig + from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION +except ImportError as exc: + HAS_DOCKER_ERROR = str(exc) + HAS_DOCKER_PY = False + +# Client has recently been split into DockerClient and APIClient +try: + from docker import Client +except ImportError as dummy: + try: + from docker import APIClient as Client + except ImportError as exc: + HAS_DOCKER_ERROR = str(exc) + HAS_DOCKER_PY = False + + class Client: + pass + +DEFAULT_DOCKER_CONFIG_FILE = os.path.splitext(os.path.basename(__file__))[0] + '.yml' +DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' +DEFAULT_TLS = False +DEFAULT_TLS_VERIFY = False +DEFAULT_TLS_HOSTNAME = "localhost" +DEFAULT_IP = '127.0.0.1' +DEFAULT_SSH_PORT = '22' + +BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True] +BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False] + + +DOCKER_ENV_ARGS = dict( + config_file='DOCKER_CONFIG_FILE', + docker_host='DOCKER_HOST', + api_version='DOCKER_API_VERSION', + cert_path='DOCKER_CERT_PATH', + ssl_version='DOCKER_SSL_VERSION', + tls='DOCKER_TLS', + tls_verify='DOCKER_TLS_VERIFY', + tls_hostname='DOCKER_TLS_HOSTNAME', + timeout='DOCKER_TIMEOUT', + private_ssh_port='DOCKER_DEFAULT_SSH_PORT', + default_ip='DOCKER_DEFAULT_IP', +) + + +def fail(msg): + sys.stderr.write("%s\n" % msg) + sys.exit(1) + + +def log(msg, pretty_print=False): + if pretty_print: + print(json.dumps(msg, sort_keys=True, indent=2)) + else: + print(msg + u'\n') + + +class AnsibleDockerClient(Client): + def __init__(self, auth_params, debug): + + self.auth_params = auth_params + self.debug = debug + self._connect_params = self._get_connect_params() + + try: + super(AnsibleDockerClient, self).__init__(**self._connect_params) + except APIError as exc: + self.fail("Docker API error: %s" % exc) + except Exception as exc: + self.fail("Error connecting: %s" % exc) + + def fail(self, msg): + fail(msg) + + def log(self, msg, pretty_print=False): + if self.debug: + log(msg, pretty_print) + + def _get_tls_config(self, **kwargs): + self.log("get_tls_config:") + for key in kwargs: + self.log(" %s: %s" % (key, kwargs[key])) + try: + tls_config = TLSConfig(**kwargs) + return tls_config + except TLSParameterError as exc: + self.fail("TLS config error: %s" % exc) + + def _get_connect_params(self): + auth = self.auth_params + + self.log("auth params:") + for key in auth: + self.log(" %s: %s" % (key, auth[key])) + + if auth['tls'] or auth['tls_verify']: + auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') + + if auth['tls'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and no host verification + tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=False, + ssl_version=auth['ssl_version']) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls']: + # TLS with no certs and not host verification + tls_config = self._get_tls_config(verify=False, + ssl_version=auth['ssl_version']) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and host verification + if auth['cacert_path']: + tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + ca_cert=auth['cacert_path'], + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version']) + else: + tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version']) + + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify'] and auth['cacert_path']: + # TLS with cacert only + tls_config = self._get_tls_config(ca_cert=auth['cacert_path'], + assert_hostname=auth['tls_hostname'], + verify=True, + ssl_version=auth['ssl_version']) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify']: + # TLS with verify and no certs + tls_config = self._get_tls_config(verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version']) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + # No TLS + return dict(base_url=auth['docker_host'], + version=auth['api_version'], + timeout=auth['timeout']) + + def _handle_ssl_error(self, error): + match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) + if match: + msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \ + "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \ + "You may also use TLS without verification by setting the tls parameter to true." \ + % (self.auth_params['tls_hostname'], match.group(1), match.group(1)) + self.fail(msg) + self.fail("SSL Exception: %s" % (error)) + + +class EnvArgs(object): + def __init__(self): + self.config_file = None + self.docker_host = None + self.api_version = None + self.cert_path = None + self.ssl_version = None + self.tls = None + self.tls_verify = None + self.tls_hostname = None + self.timeout = None + self.default_ssh_port = None + self.default_ip = None + + +class DockerInventory(object): + + def __init__(self): + self._args = self._parse_cli_args() + self._env_args = self._parse_env_args() + self.groups = defaultdict(list) + self.hostvars = defaultdict(dict) + + def run(self): + config_from_file = self._parse_config_file() + if not config_from_file: + config_from_file = dict() + docker_hosts = self.get_hosts(config_from_file) + + for host in docker_hosts: + client = AnsibleDockerClient(host, self._args.debug) + self.get_inventory(client, host) + + if not self._args.host: + self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts] + self.groups['_meta'] = dict( + hostvars=self.hostvars + ) + print(self._json_format_dict(self.groups, pretty_print=self._args.pretty)) + else: + print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty)) + + sys.exit(0) + + def get_inventory(self, client, host): + + ssh_port = host.get('default_ssh_port') + default_ip = host.get('default_ip') + hostname = host.get('docker_host') + + try: + containers = client.containers(all=True) + except Exception as exc: + self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc))) + + for container in containers: + id = container.get('Id') + short_id = id[:13] + + try: + name = container.get('Names', list()).pop(0).lstrip('/') + except IndexError: + name = short_id + + if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]): + try: + inspect = client.inspect_container(id) + except Exception as exc: + self.fail("Error inspecting container %s - %s" % (name, str(exc))) + + running = inspect.get('State', dict()).get('Running') + + # Add container to groups + image_name = inspect.get('Config', dict()).get('Image') + if image_name: + self.groups["image_%s" % (image_name)].append(name) + + stack_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.stack.namespace') + if stack_name: + self.groups["stack_%s" % stack_name].append(name) + + service_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.swarm.service.name') + if service_name: + self.groups["service_%s" % service_name].append(name) + + self.groups[id].append(name) + self.groups[name].append(name) + if short_id not in self.groups: + self.groups[short_id].append(name) + self.groups[hostname].append(name) + + if running is True: + self.groups['running'].append(name) + else: + self.groups['stopped'].append(name) + + # Figure ous ssh IP and Port + try: + # Lookup the public facing port Nat'ed to ssh port. + port = client.port(container, ssh_port)[0] + except (IndexError, AttributeError, TypeError): + port = dict() + + try: + ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp'] + except KeyError: + ip = '' + + facts = dict( + ansible_ssh_host=ip, + ansible_ssh_port=port.get('HostPort', int()), + docker_name=name, + docker_short_id=short_id + ) + + for key in inspect: + fact_key = self._slugify(key) + facts[fact_key] = inspect.get(key) + + self.hostvars[name].update(facts) + + def _slugify(self, value): + return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) + + def get_hosts(self, config): + ''' + Determine the list of docker hosts we need to talk to. + + :param config: dictionary read from config file. can be empty. + :return: list of connection dictionaries + ''' + hosts = list() + + hosts_list = config.get('hosts') + defaults = config.get('defaults', dict()) + self.log('defaults:') + self.log(defaults, pretty_print=True) + def_host = defaults.get('host') + def_tls = defaults.get('tls') + def_tls_verify = defaults.get('tls_verify') + def_tls_hostname = defaults.get('tls_hostname') + def_ssl_version = defaults.get('ssl_version') + def_cert_path = defaults.get('cert_path') + def_cacert_path = defaults.get('cacert_path') + def_key_path = defaults.get('key_path') + def_version = defaults.get('version') + def_timeout = defaults.get('timeout') + def_ip = defaults.get('default_ip') + def_ssh_port = defaults.get('private_ssh_port') + + if hosts_list: + # use hosts from config file + for host in hosts_list: + docker_host = host.get('host') or def_host or self._args.docker_host or \ + self._env_args.docker_host or DEFAULT_DOCKER_HOST + api_version = host.get('version') or def_version or self._args.api_version or \ + self._env_args.api_version or DEFAULT_DOCKER_API_VERSION + tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \ + self._env_args.tls_hostname or DEFAULT_TLS_HOSTNAME + tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \ + self._env_args.tls_verify or DEFAULT_TLS_VERIFY + tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS + ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \ + self._env_args.ssl_version + + cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \ + self._env_args.cert_path + if cert_path and cert_path == self._env_args.cert_path: + cert_path = os.path.join(cert_path, 'cert.pem') + + cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \ + self._env_args.cert_path + if cacert_path and cacert_path == self._env_args.cert_path: + cacert_path = os.path.join(cacert_path, 'ca.pem') + + key_path = host.get('key_path') or def_key_path or self._args.key_path or \ + self._env_args.cert_path + if key_path and key_path == self._env_args.cert_path: + key_path = os.path.join(key_path, 'key.pem') + + timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \ + DEFAULT_TIMEOUT_SECONDS + default_ip = host.get('default_ip') or def_ip or self._env_args.default_ip or \ + self._args.default_ip_address or DEFAULT_IP + default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \ + DEFAULT_SSH_PORT + host_dict = dict( + docker_host=docker_host, + api_version=api_version, + tls=tls, + tls_verify=tls_verify, + tls_hostname=tls_hostname, + cert_path=cert_path, + cacert_path=cacert_path, + key_path=key_path, + ssl_version=ssl_version, + timeout=timeout, + default_ip=default_ip, + default_ssh_port=default_ssh_port, + ) + hosts.append(host_dict) + else: + # use default definition + docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST + api_version = def_version or self._args.api_version or self._env_args.api_version or \ + DEFAULT_DOCKER_API_VERSION + tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname or \ + DEFAULT_TLS_HOSTNAME + tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY + tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS + ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version + + cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path + if cert_path and cert_path == self._env_args.cert_path: + cert_path = os.path.join(cert_path, 'cert.pem') + + cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path + if cacert_path and cacert_path == self._env_args.cert_path: + cacert_path = os.path.join(cacert_path, 'ca.pem') + + key_path = def_key_path or self._args.key_path or self._env_args.cert_path + if key_path and key_path == self._env_args.cert_path: + key_path = os.path.join(key_path, 'key.pem') + + timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS + default_ip = def_ip or self._env_args.default_ip or self._args.default_ip_address or DEFAULT_IP + default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT + host_dict = dict( + docker_host=docker_host, + api_version=api_version, + tls=tls, + tls_verify=tls_verify, + tls_hostname=tls_hostname, + cert_path=cert_path, + cacert_path=cacert_path, + key_path=key_path, + ssl_version=ssl_version, + timeout=timeout, + default_ip=default_ip, + default_ssh_port=default_ssh_port, + ) + hosts.append(host_dict) + self.log("hosts: ") + self.log(hosts, pretty_print=True) + return hosts + + def _parse_config_file(self): + config = dict() + config_file = DEFAULT_DOCKER_CONFIG_FILE + + if self._args.config_file: + config_file = self._args.config_file + elif self._env_args.config_file: + config_file = self._env_args.config_file + + config_file = os.path.abspath(config_file) + + if os.path.isfile(config_file): + with open(config_file) as f: + try: + config = yaml.safe_load(f.read()) + except Exception as exc: + self.fail("Error: parsing %s - %s" % (config_file, str(exc))) + else: + msg = "Error: config file given by {} does not exist - " + config_file + if self._args.config_file: + self.fail(msg.format('command line argument')) + elif self._env_args.config_file: + self.fail(msg.format(DOCKER_ENV_ARGS.get('config_file'))) + else: + self.log(msg.format('DEFAULT_DOCKER_CONFIG_FILE')) + return config + + def log(self, msg, pretty_print=False): + if self._args.debug: + log(msg, pretty_print) + + def fail(self, msg): + fail(msg) + + def _parse_env_args(self): + args = EnvArgs() + for key, value in DOCKER_ENV_ARGS.items(): + if os.environ.get(value): + val = os.environ.get(value) + if val in BOOLEANS_TRUE: + val = True + if val in BOOLEANS_FALSE: + val = False + setattr(args, key, val) + return args + + def _parse_cli_args(self): + # Parse command line arguments + + parser = argparse.ArgumentParser( + description='Return Ansible inventory for one or more Docker hosts.') + parser.add_argument('--list', action='store_true', default=True, + help='List all containers (default: True)') + parser.add_argument('--debug', action='store_true', default=False, + help='Send debug messages to STDOUT') + parser.add_argument('--host', action='store', + help='Only get information for a specific container.') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty print JSON output(default: False)') + parser.add_argument('--config-file', action='store', default=None, + help="Name of the config file to use. Default is %s" % (DEFAULT_DOCKER_CONFIG_FILE)) + parser.add_argument('--docker-host', action='store', default=None, + help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s" + % (DEFAULT_DOCKER_HOST)) + parser.add_argument('--tls-hostname', action='store', default=None, + help="Host name to expect in TLS certs. Defaults to %s" % DEFAULT_TLS_HOSTNAME) + parser.add_argument('--api-version', action='store', default=None, + help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION)) + parser.add_argument('--timeout', action='store', default=None, + help="Docker connection timeout in seconds. Defaults to %s" + % (DEFAULT_TIMEOUT_SECONDS)) + parser.add_argument('--cacert-path', action='store', default=None, + help="Path to the TLS certificate authority pem file.") + parser.add_argument('--cert-path', action='store', default=None, + help="Path to the TLS certificate pem file.") + parser.add_argument('--key-path', action='store', default=None, + help="Path to the TLS encryption key pem file.") + parser.add_argument('--ssl-version', action='store', default=None, + help="TLS version number") + parser.add_argument('--tls', action='store_true', default=None, + help="Use TLS. Defaults to %s" % (DEFAULT_TLS)) + parser.add_argument('--tls-verify', action='store_true', default=None, + help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY)) + parser.add_argument('--private-ssh-port', action='store', default=None, + help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT)) + parser.add_argument('--default-ip-address', action='store', default=None, + help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP)) + return parser.parse_args() + + def _json_format_dict(self, data, pretty_print=False): + # format inventory data for output + if pretty_print: + return json.dumps(data, sort_keys=True, indent=4) + else: + return json.dumps(data) + + +def main(): + + if not HAS_DOCKER_PY: + fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR)) + + DockerInventory().run() + + +main() diff --git a/scripts/inventory/docker.yml b/scripts/inventory/docker.yml new file mode 100644 index 0000000000..97239392d1 --- /dev/null +++ b/scripts/inventory/docker.yml @@ -0,0 +1,74 @@ +# This is the configuration file for the Docker inventory script: docker_inventory.py. +# +# You can define the following in this file: +# +# defaults +# Defines a default connection. Defaults will be taken from this and applied to any values not provided +# for a host defined in the hosts list. +# +# hosts +# If you wish to get inventory from more than one Docker host, define a hosts list. +# +# For the default host and each host in the hosts list define the following attributes: +# +# host: +# description: The URL or Unix socket path used to connect to the Docker API. +# required: yes +# +# tls: +# description: Connect using TLS without verifying the authenticity of the Docker host server. +# default: false +# required: false +# +# tls_verify: +# description: Connect using TLS without verifying the authenticity of the Docker host server. +# default: false +# required: false +# +# cert_path: +# description: Path to the client's TLS certificate file. +# default: null +# required: false +# +# cacert_path: +# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. +# default: null +# required: false +# +# key_path: +# description: Path to the client's TLS key file. +# default: null +# required: false +# +# version: +# description: The Docker API version. +# required: false +# default: will be supplied by the docker-py module. +# +# timeout: +# description: The amount of time in seconds to wait on an API response. +# required: false +# default: 60 +# +# default_ip: +# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface +# '0.0.0.0'. +# required: false +# default: 127.0.0.1 +# +# private_ssh_port: +# description: The port containers use for SSH +# required: false +# default: 22 + +#defaults: +# host: unix:///var/run/docker.sock +# private_ssh_port: 22 +# default_ip: 127.0.0.1 + +#hosts: +# - host: tcp://10.45.5.16:4243 +# private_ssh_port: 2022 +# default_ip: 172.16.3.45 +# - host: tcp://localhost:4243 +# private_ssh_port: 2029 diff --git a/scripts/inventory/fleet.py b/scripts/inventory/fleet.py new file mode 100644 index 0000000000..dd0d4f7168 --- /dev/null +++ b/scripts/inventory/fleet.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +""" +fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and +returns it under the host group 'coreos' +""" + +# Copyright (C) 2014 Andrew Rothstein +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# +# Thanks to the vagrant.py inventory script for giving me the basic structure +# of this. +# + +import sys +import subprocess +import re +import string +from optparse import OptionParser +import json + +# Options +# ------------------------------ + +parser = OptionParser(usage="%prog [options] --list | --host ") +parser.add_option('--list', default=False, dest="list", action="store_true", + help="Produce a JSON consumable grouping of servers in your fleet") +parser.add_option('--host', default=None, dest="host", + help="Generate additional host specific details for given host for Ansible") +(options, args) = parser.parse_args() + +# +# helper functions +# + + +def get_ssh_config(): + configs = [] + for box in list_running_boxes(): + config = get_a_ssh_config(box) + configs.append(config) + return configs + + +# list all the running instances in the fleet +def list_running_boxes(): + boxes = [] + for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): + matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line) + if matcher and matcher.group(1) != "IP": + boxes.append(matcher.group(1)) + + return boxes + + +def get_a_ssh_config(box_name): + config = {} + config['Host'] = box_name + config['ansible_ssh_user'] = 'core' + config['ansible_python_interpreter'] = '/opt/bin/python' + return config + + +# List out servers that vagrant has running +# ------------------------------ +if options.list: + ssh_config = get_ssh_config() + hosts = {'coreos': []} + + for data in ssh_config: + hosts['coreos'].append(data['Host']) + + print(json.dumps(hosts)) + sys.exit(1) + +# Get out the host details +# ------------------------------ +elif options.host: + result = {} + ssh_config = get_ssh_config() + + details = filter(lambda x: (x['Host'] == options.host), ssh_config) + if len(details) > 0: + # pass through the port, in case it's non standard. + result = details[0] + + print(json.dumps(result)) + sys.exit(1) + + +# Print out help +# ------------------------------ +else: + parser.print_help() + sys.exit(1) diff --git a/scripts/inventory/foreman.ini b/scripts/inventory/foreman.ini new file mode 100644 index 0000000000..d157963848 --- /dev/null +++ b/scripts/inventory/foreman.ini @@ -0,0 +1,200 @@ +# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory) +# +# This script can be used as an Ansible dynamic inventory. +# The connection parameters are set up via *foreman.ini* +# This is how the script founds the configuration file in +# order of discovery. +# +# * `/etc/ansible/foreman.ini` +# * Current directory of your inventory script. +# * `FOREMAN_INI_PATH` environment variable. +# +# ## Variables and Parameters +# +# The data returned from Foreman for each host is stored in a foreman +# hash so they're available as *host_vars* along with the parameters +# of the host and it's hostgroups: +# +# "foo.example.com": { +# "foreman": { +# "architecture_id": 1, +# "architecture_name": "x86_64", +# "build": false, +# "build_status": 0, +# "build_status_label": "Installed", +# "capabilities": [ +# "build", +# "image" +# ], +# "compute_profile_id": 4, +# "hostgroup_name": "webtier/myapp", +# "id": 70, +# "image_name": "debian8.1", +# ... +# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77" +# }, +# "foreman_params": { +# "testparam1": "foobar", +# "testparam2": "small", +# ... +# } +# +# and could therefore be used in Ansible like: +# +# - debug: msg="From Foreman host {{ foreman['uuid'] }}" +# +# Which yields +# +# TASK [test_foreman : debug] **************************************************** +# ok: [foo.example.com] => { +# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf" +# } +# +# ## Automatic Ansible groups +# +# The inventory will provide a set of groups, by default prefixed by +# 'foreman_'. If you want to customize this prefix, change the +# group_prefix option in /etc/ansible/foreman.ini. The rest of this +# guide will assume the default prefix of 'foreman' +# +# The hostgroup, location, organization, content view, and lifecycle +# environment of each host are created as Ansible groups with a +# foreman_ prefix, all lowercase and problematic parameters +# removed. So e.g. the foreman hostgroup +# +# myapp / webtier / datacenter1 +# +# would turn into the Ansible group: +# +# foreman_hostgroup_myapp_webtier_datacenter1 +# +# If the parameter want_hostcollections is set to true, the +# collections each host is in are created as Ansible groups with a +# foreman_hostcollection prefix, all lowercase and problematic +# parameters removed. So e.g. the Foreman host collection +# +# Patch Window Thursday +# +# would turn into the Ansible group: +# +# foreman_hostcollection_patchwindowthursday +# +# If the parameter host_filters is set, it will be used as the +# "search" parameter for the /api/v2/hosts call. This can be used to +# restrict the list of returned host, as shown below. +# +# Furthermore Ansible groups can be created on the fly using the +# *group_patterns* variable in *foreman.ini* so that you can build up +# hierarchies using parameters on the hostgroup and host variables. +# +# Lets assume you have a host that is built using this nested hostgroup: +# +# myapp / webtier / datacenter1 +# +# and each of the hostgroups defines a parameters respectively: +# +# myapp: app_param = myapp +# webtier: tier_param = webtier +# datacenter1: dc_param = datacenter1 +# +# The host is also in a subnet called "mysubnet" and provisioned via an image +# then *group_patterns* like: +# +# [ansible] +# group_patterns = ["{app_param}-{tier_param}-{dc_param}", +# "{app_param}-{tier_param}", +# "{app_param}", +# "{subnet_name}-{provision_method}"] +# +# would put the host into the additional Ansible groups: +# +# - myapp-webtier-datacenter1 +# - myapp-webtier +# - myapp +# - mysubnet-image +# +# by recursively resolving the hostgroups, getting the parameter keys +# and values and doing a Python *string.format()* like replacement on +# it. +# +[foreman] +url = http://localhost:3000/ +user = foreman +password = secret +ssl_verify = True + +# Foreman 1.24 introduces a new reports API to improve performance of the inventory script. +# Note: This requires foreman_ansible plugin installed. +# Set to False if you want to use the old API. Defaults to True. + +use_reports_api = True + +# Retrieve only hosts from the organization "Web Engineering". +# host_filters = organization="Web Engineering" + +# Retrieve only hosts from the organization "Web Engineering" that are +# also in the host collection "Apache Servers". +# host_filters = organization="Web Engineering" and host_collection="Apache Servers" + +# Foreman Inventory report related configuration options. +# Configs that default to True : +# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts +# Configs that default to False : +# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params + +[report] +# want_organization = True +# want_location = True +# want_ipv4 = True +# want_ipv6 = False +# want_host_group = True +# want_subnet = True +# want_subnet_v6 = False +# want_smart_proxies = True +# want_content_facet_attributes = False +# want_host_params = False + +# use this config to determine if facts are to be fetched in the report and stored on the hosts. +# want_facts = False + +# Upon receiving a request to return inventory report, Foreman schedules a report generation job. +# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data +# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling. +# Defaults to 10 seconds + +# poll_interval = 10 + +[ansible] +group_patterns = ["{app}-{tier}-{color}", + "{app}-{color}", + "{app}", + "{tier}"] +group_prefix = foreman_ + +# Whether to fetch facts from Foreman and store them on the host +want_facts = True + +# Whether to create Ansible groups for host collections. Only tested +# with Katello (Red Hat Satellite). Disabled by default to not break +# the script for stand-alone Foreman. +want_hostcollections = False + +# Whether to interpret global parameters value as JSON (if possible, else +# take as is). Only tested with Katello (Red Hat Satellite). +# This allows to define lists and dictionaries (and more complicated structures) +# variables by entering them as JSON string in Foreman parameters. +# Disabled by default as the change would else not be backward compatible. +rich_params = False + +# Whether to populate the ansible_ssh_host variable to explicitly specify the +# connection target. Only tested with Katello (Red Hat Satellite). +# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated +# to permit connections where DNS resolution fails. +want_ansible_ssh_host = False + +[cache] +path = . +max_age = 60 + +# Whether to scan foreman to add recently created hosts in inventory cache +scan_new_hosts = True diff --git a/scripts/inventory/foreman.py b/scripts/inventory/foreman.py new file mode 100644 index 0000000000..343cf26c9d --- /dev/null +++ b/scripts/inventory/foreman.py @@ -0,0 +1,662 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# +# Copyright (C) 2016 Guido Günther , +# Daniel Lobato Garcia +# +# This script is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with it. If not, see . +# +# This is somewhat based on cobbler inventory + +# Stdlib imports +# __future__ imports must occur at the beginning of file +from __future__ import print_function +import json +import argparse +import copy +import os +import re +import sys +from time import time, sleep +from collections import defaultdict +from distutils.version import LooseVersion, StrictVersion + +# 3rd party imports +import requests +if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): + print('This script requires python-requests 1.1 as a minimum version') + sys.exit(1) + +from requests.auth import HTTPBasicAuth + +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves import configparser as ConfigParser + + +def json_format_dict(data, pretty=False): + """Converts a dict to a JSON object and dumps it as a formatted string""" + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +class ForemanInventory(object): + + def __init__(self): + self.inventory = defaultdict(list) # A list of groups and the hosts in that group + self.cache = dict() # Details about hosts in the inventory + self.params = dict() # Params of each host + self.facts = dict() # Facts of each host + self.hostgroups = dict() # host groups + self.hostcollections = dict() # host collections + self.session = None # Requests session + self.config_paths = [ + "/etc/ansible/foreman.ini", + os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini', + ] + env_value = os.environ.get('FOREMAN_INI_PATH') + if env_value is not None: + self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) + + def read_settings(self): + """Reads the settings from the foreman.ini file""" + + config = ConfigParser.SafeConfigParser() + config.read(self.config_paths) + + # Foreman API related + try: + self.foreman_url = config.get('foreman', 'url') + self.foreman_user = config.get('foreman', 'user') + self.foreman_pw = config.get('foreman', 'password', raw=True) + self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e: + print("Error parsing configuration: %s" % e, file=sys.stderr) + return False + + # Inventory Report Related + try: + self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.foreman_use_reports_api = True + + try: + self.want_organization = config.getboolean('report', 'want_organization') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_organization = True + + try: + self.want_location = config.getboolean('report', 'want_location') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_location = True + + try: + self.want_IPv4 = config.getboolean('report', 'want_ipv4') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_IPv4 = True + + try: + self.want_IPv6 = config.getboolean('report', 'want_ipv6') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_IPv6 = False + + try: + self.want_host_group = config.getboolean('report', 'want_host_group') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_host_group = True + + try: + self.want_host_params = config.getboolean('report', 'want_host_params') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_host_params = False + + try: + self.want_subnet = config.getboolean('report', 'want_subnet') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_subnet = True + + try: + self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_subnet_v6 = False + + try: + self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_smart_proxies = True + + try: + self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_content_facet_attributes = False + + try: + self.report_want_facts = config.getboolean('report', 'want_facts') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.report_want_facts = True + + try: + self.poll_interval = config.getint('report', 'poll_interval') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.poll_interval = 10 + + # Ansible related + try: + group_patterns = config.get('ansible', 'group_patterns') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + group_patterns = "[]" + + self.group_patterns = json.loads(group_patterns) + + try: + self.group_prefix = config.get('ansible', 'group_prefix') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.group_prefix = "foreman_" + + try: + self.want_facts = config.getboolean('ansible', 'want_facts') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_facts = True + + self.want_facts = self.want_facts and self.report_want_facts + + try: + self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_hostcollections = False + + try: + self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_ansible_ssh_host = False + + # Do we want parameters to be interpreted if possible as JSON? (no by default) + try: + self.rich_params = config.getboolean('ansible', 'rich_params') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.rich_params = False + + try: + self.host_filters = config.get('foreman', 'host_filters') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.host_filters = None + + # Cache related + try: + cache_path = os.path.expanduser(config.get('cache', 'path')) + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + cache_path = '.' + (script, ext) = os.path.splitext(os.path.basename(__file__)) + self.cache_path_cache = cache_path + "/%s.cache" % script + self.cache_path_inventory = cache_path + "/%s.index" % script + self.cache_path_params = cache_path + "/%s.params" % script + self.cache_path_facts = cache_path + "/%s.facts" % script + self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script + try: + self.cache_max_age = config.getint('cache', 'max_age') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.cache_max_age = 60 + try: + self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.scan_new_hosts = False + + return True + + def parse_cli_args(self): + """Command line argument processing""" + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to foreman (default: False - use cache files)') + self.args = parser.parse_args() + + def _get_session(self): + if not self.session: + self.session = requests.session() + self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw) + self.session.verify = self.foreman_ssl_verify + return self.session + + def _get_json(self, url, ignore_errors=None, params=None): + if params is None: + params = {} + params['per_page'] = 250 + + page = 1 + results = [] + s = self._get_session() + while True: + params['page'] = page + ret = s.get(url, params=params) + if ignore_errors and ret.status_code in ignore_errors: + break + ret.raise_for_status() + json = ret.json() + # /hosts/:id has not results key + if 'results' not in json: + return json + # Facts are returned as dict in results not list + if isinstance(json['results'], dict): + return json['results'] + # List of all hosts is returned paginaged + results = results + json['results'] + if len(results) >= json['subtotal']: + break + page += 1 + if len(json['results']) == 0: + print("Did not make any progress during loop. " + "expected %d got %d" % (json['total'], len(results)), + file=sys.stderr) + break + return results + + def _use_inventory_report(self): + if not self.foreman_use_reports_api: + return False + status_url = "%s/api/v2/status" % self.foreman_url + result = self._get_json(status_url) + foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0')) + return foreman_version + + def _fetch_params(self): + options, params = ("no", "yes"), dict() + params["Organization"] = options[self.want_organization] + params["Location"] = options[self.want_location] + params["IPv4"] = options[self.want_IPv4] + params["IPv6"] = options[self.want_IPv6] + params["Facts"] = options[self.want_facts] + params["Host Group"] = options[self.want_host_group] + params["Host Collections"] = options[self.want_hostcollections] + params["Subnet"] = options[self.want_subnet] + params["Subnet v6"] = options[self.want_subnet_v6] + params["Smart Proxies"] = options[self.want_smart_proxies] + params["Content Attributes"] = options[self.want_content_facet_attributes] + params["Host Parameters"] = options[self.want_host_params] + if self.host_filters: + params["Hosts"] = self.host_filters + return params + + def _post_request(self): + url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url + session = self._get_session() + params = {'input_values': self._fetch_params()} + ret = session.post(url, json=params) + if not ret: + raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!") + url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url')) + response = session.get(url) + while response: + if response.status_code != 204: + break + else: + sleep(self.poll_interval) + response = session.get(url) + if not response: + raise Exception("Error receiving inventory report from foreman. Please check foreman logs!") + else: + return response.json() + + def _get_hosts(self): + url = "%s/api/v2/hosts" % self.foreman_url + + params = {} + if self.host_filters: + params['search'] = self.host_filters + + return self._get_json(url, params=params) + + def _get_host_data_by_id(self, hid): + url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) + return self._get_json(url) + + def _get_facts_by_id(self, hid): + url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid) + return self._get_json(url) + + def _resolve_params(self, host_params): + """Convert host params to dict""" + params = {} + + for param in host_params: + name = param['name'] + if self.rich_params: + try: + params[name] = json.loads(param['value']) + except ValueError: + params[name] = param['value'] + else: + params[name] = param['value'] + + return params + + def _get_facts(self, host): + """Fetch all host facts of the host""" + if not self.want_facts: + return {} + + ret = self._get_facts_by_id(host['id']) + if len(ret.values()) == 0: + facts = {} + elif len(ret.values()) == 1: + facts = list(ret.values())[0] + else: + raise ValueError("More than one set of facts returned for '%s'" % host) + return facts + + def write_to_cache(self, data, filename): + """Write data in JSON format to a file""" + json_data = json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def _write_cache(self): + self.write_to_cache(self.cache, self.cache_path_cache) + self.write_to_cache(self.inventory, self.cache_path_inventory) + self.write_to_cache(self.params, self.cache_path_params) + self.write_to_cache(self.facts, self.cache_path_facts) + self.write_to_cache(self.hostcollections, self.cache_path_hostcollections) + + def to_safe(self, word): + '''Converts 'bad' characters in a string to underscores + so they can be used as Ansible groups + + >>> ForemanInventory.to_safe("foo-bar baz") + 'foo_barbaz' + ''' + regex = r"[^A-Za-z0-9\_]" + return re.sub(regex, "_", word.replace(" ", "")) + + def update_cache(self, scan_only_new_hosts=False): + """Make calls to foreman and save the output in a cache""" + use_inventory_report = self._use_inventory_report() + if use_inventory_report: + self._update_cache_inventory(scan_only_new_hosts) + else: + self._update_cache_host_api(scan_only_new_hosts) + + def _update_cache_inventory(self, scan_only_new_hosts): + self.groups = dict() + self.hosts = dict() + try: + inventory_report_response = self._post_request() + except Exception: + self._update_cache_host_api(scan_only_new_hosts) + return + host_data = json.loads(inventory_report_response) + for host in host_data: + if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts): + continue + dns_name = host['name'] + + host_params = host.pop('host_parameters', {}) + fact_list = host.pop('facts', {}) + content_facet_attributes = host.get('content_attributes', {}) or {} + + # Create ansible groups for hostgroup + group = 'host_group' + val = host.get(group) + if val: + safe_key = self.to_safe('%s%s_%s' % ( + to_text(self.group_prefix), + group, + to_text(val).lower() + )) + self.inventory[safe_key].append(dns_name) + + # Create ansible groups for environment, location and organization + for group in ['environment', 'location', 'organization']: + val = host.get('%s' % group) + if val: + safe_key = self.to_safe('%s%s_%s' % ( + to_text(self.group_prefix), + group, + to_text(val).lower() + )) + self.inventory[safe_key].append(dns_name) + + for group in ['lifecycle_environment', 'content_view']: + val = content_facet_attributes.get('%s_name' % group) + if val: + safe_key = self.to_safe('%s%s_%s' % ( + to_text(self.group_prefix), + group, + to_text(val).lower() + )) + self.inventory[safe_key].append(dns_name) + + params = host_params + + # Ansible groups by parameters in host groups and Foreman host + # attributes. + groupby = dict() + for k, v in params.items(): + groupby[k] = self.to_safe(to_text(v)) + + # The name of the ansible groups is given by group_patterns: + for pattern in self.group_patterns: + try: + key = pattern.format(**groupby) + self.inventory[key].append(dns_name) + except KeyError: + pass # Host not part of this group + + if self.want_hostcollections: + hostcollections = host.get('host_collections') + + if hostcollections: + # Create Ansible groups for host collections + for hostcollection in hostcollections: + safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower())) + self.inventory[safe_key].append(dns_name) + + self.hostcollections[dns_name] = hostcollections + + self.cache[dns_name] = host + self.params[dns_name] = params + self.facts[dns_name] = fact_list + self.inventory['all'].append(dns_name) + self._write_cache() + + def _update_cache_host_api(self, scan_only_new_hosts): + """Make calls to foreman and save the output in a cache""" + + self.groups = dict() + self.hosts = dict() + + for host in self._get_hosts(): + if host['name'] in self.cache.keys() and scan_only_new_hosts: + continue + dns_name = host['name'] + + host_data = self._get_host_data_by_id(host['id']) + host_params = host_data.get('all_parameters', {}) + + # Create ansible groups for hostgroup + group = 'hostgroup' + val = host.get('%s_title' % group) or host.get('%s_name' % group) + if val: + safe_key = self.to_safe('%s%s_%s' % ( + to_text(self.group_prefix), + group, + to_text(val).lower() + )) + self.inventory[safe_key].append(dns_name) + + # Create ansible groups for environment, location and organization + for group in ['environment', 'location', 'organization']: + val = host.get('%s_name' % group) + if val: + safe_key = self.to_safe('%s%s_%s' % ( + to_text(self.group_prefix), + group, + to_text(val).lower() + )) + self.inventory[safe_key].append(dns_name) + + for group in ['lifecycle_environment', 'content_view']: + val = host.get('content_facet_attributes', {}).get('%s_name' % group) + if val: + safe_key = self.to_safe('%s%s_%s' % ( + to_text(self.group_prefix), + group, + to_text(val).lower() + )) + self.inventory[safe_key].append(dns_name) + + params = self._resolve_params(host_params) + + # Ansible groups by parameters in host groups and Foreman host + # attributes. + groupby = dict() + for k, v in params.items(): + groupby[k] = self.to_safe(to_text(v)) + + # The name of the ansible groups is given by group_patterns: + for pattern in self.group_patterns: + try: + key = pattern.format(**groupby) + self.inventory[key].append(dns_name) + except KeyError: + pass # Host not part of this group + + if self.want_hostcollections: + hostcollections = host_data.get('host_collections') + + if hostcollections: + # Create Ansible groups for host collections + for hostcollection in hostcollections: + safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower())) + self.inventory[safe_key].append(dns_name) + + self.hostcollections[dns_name] = hostcollections + + self.cache[dns_name] = host + self.params[dns_name] = params + self.facts[dns_name] = self._get_facts(host) + self.inventory['all'].append(dns_name) + self._write_cache() + + def is_cache_valid(self): + """Determines if the cache is still valid""" + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if (os.path.isfile(self.cache_path_inventory) and + os.path.isfile(self.cache_path_params) and + os.path.isfile(self.cache_path_facts)): + return True + return False + + def load_inventory_from_cache(self): + """Read the index from the cache file sets self.index""" + + with open(self.cache_path_inventory, 'r') as fp: + self.inventory = json.load(fp) + + def load_params_from_cache(self): + """Read the index from the cache file sets self.index""" + + with open(self.cache_path_params, 'r') as fp: + self.params = json.load(fp) + + def load_facts_from_cache(self): + """Read the index from the cache file sets self.facts""" + + if not self.want_facts: + return + with open(self.cache_path_facts, 'r') as fp: + self.facts = json.load(fp) + + def load_hostcollections_from_cache(self): + """Read the index from the cache file sets self.hostcollections""" + + if not self.want_hostcollections: + return + with open(self.cache_path_hostcollections, 'r') as fp: + self.hostcollections = json.load(fp) + + def load_cache_from_cache(self): + """Read the cache from the cache file sets self.cache""" + + with open(self.cache_path_cache, 'r') as fp: + self.cache = json.load(fp) + + def get_inventory(self): + if self.args.refresh_cache or not self.is_cache_valid(): + self.update_cache() + else: + self.load_inventory_from_cache() + self.load_params_from_cache() + self.load_facts_from_cache() + self.load_hostcollections_from_cache() + self.load_cache_from_cache() + if self.scan_new_hosts: + self.update_cache(True) + + def get_host_info(self): + """Get variables about a specific host""" + + if not self.cache or len(self.cache) == 0: + # Need to load index from cache + self.load_cache_from_cache() + + if self.args.host not in self.cache: + # try updating the cache + self.update_cache() + + if self.args.host not in self.cache: + # host might not exist anymore + return json_format_dict({}, True) + + return json_format_dict(self.cache[self.args.host], True) + + def _print_data(self): + data_to_print = "" + if self.args.host: + data_to_print += self.get_host_info() + else: + self.inventory['_meta'] = {'hostvars': {}} + for hostname in self.cache: + self.inventory['_meta']['hostvars'][hostname] = { + 'foreman': self.cache[hostname], + 'foreman_params': self.params[hostname], + } + if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]: + self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip'] + if self.want_facts: + self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname] + + data_to_print += json_format_dict(self.inventory, True) + + print(data_to_print) + + def run(self): + # Read settings and parse CLI arguments + if not self.read_settings(): + return False + self.parse_cli_args() + self.get_inventory() + self._print_data() + return True + + +if __name__ == '__main__': + sys.exit(not ForemanInventory().run()) diff --git a/scripts/inventory/freeipa.py b/scripts/inventory/freeipa.py new file mode 100644 index 0000000000..cb5ccc071a --- /dev/null +++ b/scripts/inventory/freeipa.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import argparse +from distutils.version import LooseVersion +import json +import os +import sys +from ipalib import api, errors, __version__ as IPA_VERSION +from ansible.module_utils.six import u + + +def initialize(): + ''' + This function initializes the FreeIPA/IPA API. This function requires + no arguments. A kerberos key must be present in the users keyring in + order for this to work. IPA default configuration directory is /etc/ipa, + this path could be overridden with IPA_CONFDIR environment variable. + ''' + + api.bootstrap(context='cli') + + if not os.path.isdir(api.env.confdir): + print("WARNING: IPA configuration directory (%s) is missing. " + "Environment variable IPA_CONFDIR could be used to override " + "default path." % api.env.confdir) + + if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'): + # With ipalib < 4.6.0 'server' and 'domain' have default values + # ('localhost:8888', 'example.com'), newer versions don't and + # DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is + # required. + # ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132) + # that's why 4.6.2 is explicitely tested. + if 'server' not in api.env or 'domain' not in api.env: + sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not " + "defined in '[global]' section of '%s' nor in '%s'." % + (api.env.conf, api.env.conf_default)) + + api.finalize() + try: + api.Backend.rpcclient.connect() + except AttributeError: + # FreeIPA < 4.0 compatibility + api.Backend.xmlclient.connect() + + return api + + +def list_groups(api): + ''' + This function prints a list of all host groups. This function requires + one argument, the FreeIPA/IPA API object. + ''' + + inventory = {} + hostvars = {} + + result = api.Command.hostgroup_find(all=True)['result'] + + for hostgroup in result: + # Get direct and indirect members (nested hostgroups) of hostgroup + members = [] + + if 'member_host' in hostgroup: + members = [host for host in hostgroup['member_host']] + if 'memberindirect_host' in hostgroup: + members += (host for host in hostgroup['memberindirect_host']) + inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]} + + for member in members: + hostvars[member] = {} + + inventory['_meta'] = {'hostvars': hostvars} + inv_string = json.dumps(inventory, indent=1, sort_keys=True) + print(inv_string) + + return None + + +def parse_args(): + ''' + This function parses the arguments that were passed in via the command line. + This function expects no arguments. + ''' + + parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA ' + 'inventory module') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specified host') + + return parser.parse_args() + + +def get_host_attributes(api, host): + """ + This function expects one string, this hostname to lookup variables for. + Args: + api: FreeIPA API Object + host: Name of Hostname + + Returns: Dict of Host vars if found else None + """ + try: + result = api.Command.host_show(u(host))['result'] + if 'usercertificate' in result: + del result['usercertificate'] + return json.dumps(result, indent=1) + except errors.NotFound as e: + return {} + + +if __name__ == '__main__': + args = parse_args() + api = initialize() + + if args.host: + print(get_host_attributes(api, args.host)) + elif args.list: + list_groups(api) diff --git a/scripts/inventory/gce.ini b/scripts/inventory/gce.ini new file mode 100644 index 0000000000..af27a9c4ab --- /dev/null +++ b/scripts/inventory/gce.ini @@ -0,0 +1,76 @@ +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# The GCE inventory script has the following dependencies: +# 1. A valid Google Cloud Platform account with Google Compute Engine +# enabled. See https://cloud.google.com +# 2. An OAuth2 Service Account flow should be enabled. This will generate +# a private key file that the inventory script will use for API request +# authorization. See https://developers.google.com/accounts/docs/OAuth2 +# 3. Convert the private key from PKCS12 to PEM format +# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \ +# > -nodes -nocerts | openssl rsa -out pkey.pem +# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org +# +# (See ansible/test/gce_tests.py comments for full install instructions) +# +# Author: Eric Johnson +# Contributors: John Roach + +[gce] +# GCE Service Account configuration information can be stored in the +# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already +# exist in your PYTHONPATH and be picked up automatically with an import +# statement in the inventory script. However, you can specify an absolute +# path to the secrets.py file with 'libcloud_secrets' parameter. +# This option will be deprecated in a future release. +libcloud_secrets = + +# If you are not going to use a 'secrets.py' file, you can set the necessary +# authorization parameters here. +# You can add multiple gce projects to by using a comma separated list. Make +# sure that the service account used has permissions on said projects. +gce_service_account_email_address = +gce_service_account_pem_file_path = +gce_project_id = +gce_zone = + +# Filter inventory based on state. Leave undefined to return instances regardless of state. +# example: Uncomment to only return inventory in the running or provisioning state +#instance_states = RUNNING,PROVISIONING + +# Filter inventory based on instance tags. Leave undefined to return instances regardless of tags. +# example: Uncomment to only return inventory with the http-server or https-server tag +#instance_tags = http-server,https-server + + +[inventory] +# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should +# contain the instance internal or external address. Values may be either +# 'internal' or 'external'. If 'external' is specified but no external instance +# address exists, the internal address will be used. +# The INVENTORY_IP_TYPE environment variable will override this value. +inventory_ip_type = + +[cache] +# directory in which cache should be created +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 300 diff --git a/scripts/inventory/gce.py b/scripts/inventory/gce.py new file mode 100644 index 0000000000..0a7df3f52a --- /dev/null +++ b/scripts/inventory/gce.py @@ -0,0 +1,521 @@ +#!/usr/bin/env python + +# Copyright: (c) 2013, Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +GCE external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests +Google Compute Engine via the libcloud library. Full install/configuration +instructions for the gce* modules can be found in the comments of +ansible/test/gce_tests.py. + +When run against a specific host, this script returns the following variables +based on the data obtained from the libcloud Node object: + - gce_uuid + - gce_id + - gce_image + - gce_machine_type + - gce_private_ip + - gce_public_ip + - gce_name + - gce_description + - gce_status + - gce_zone + - gce_tags + - gce_metadata + - gce_network + - gce_subnetwork + +When run in --list mode, instances are grouped by the following categories: + - zone: + zone group name examples are us-central1-b, europe-west1-a, etc. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - machine type + types follow a pattern like n1-standard-4, g1-small, etc. + - running status: + group name prefixed with 'status_' (e.g. status_running, status_stopped,..) + - image: + when using an ephemeral/scratch disk, this will be set to the image name + used when creating the instance (e.g. debian-7-wheezy-v20130816). when + your instance was created with a root persistent disk it will be set to + 'persistent_disk' since there is no current way to determine the image. + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" + + Use the GCE inventory script to print out instance specific information + $ contrib/inventory/gce.py --host my_instance + +Author: Eric Johnson +Contributors: Matt Hite , Tom Melendez , + John Roach +Version: 0.0.4 +''' + +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + +USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin" +USER_AGENT_VERSION = "v2" + +import sys +import os +import argparse + +from time import time + +from ansible.module_utils.six.moves import configparser + +import logging +logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) + +import json + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + _ = Provider.GCE +except Exception: + sys.exit("GCE inventory script requires libcloud >= 0.13") + + +class CloudInventoryCache(object): + def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp', + cache_max_age=300): + cache_dir = os.path.expanduser(cache_path) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + self.cache_path_cache = os.path.join(cache_dir, cache_name) + + self.cache_max_age = cache_max_age + + def is_valid(self, max_age=None): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if max_age is None: + max_age = self.cache_max_age + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + max_age) > current_time: + return True + + return False + + def get_all_data_from_cache(self, filename=''): + ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' + + data = '' + if not filename: + filename = self.cache_path_cache + with open(filename, 'r') as cache: + data = cache.read() + return json.loads(data) + + def write_to_cache(self, data, filename=''): + ''' Writes data to file as JSON. Returns True. ''' + if not filename: + filename = self.cache_path_cache + json_data = json.dumps(data) + with open(filename, 'w') as cache: + cache.write(json_data) + return True + + +class GceInventory(object): + def __init__(self): + # Cache object + self.cache = None + # dictionary containing inventory read from disk + self.inventory = {} + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.config = self.get_config() + self.drivers = self.get_gce_drivers() + self.ip_type = self.get_inventory_options() + if self.ip_type: + self.ip_type = self.ip_type.lower() + + # Cache management + start_inventory_time = time() + cache_used = False + if self.args.refresh_cache or not self.cache.is_valid(): + self.do_api_calls_update_cache() + else: + self.load_inventory_from_cache() + cache_used = True + self.inventory['_meta']['stats'] = {'use_cache': True} + self.inventory['_meta']['stats'] = { + 'inventory_load_time': time() - start_inventory_time, + 'cache_used': cache_used + } + + # Just display data for specific host + if self.args.host: + print(self.json_format_dict( + self.inventory['_meta']['hostvars'][self.args.host], + pretty=self.args.pretty)) + else: + # Otherwise, assume user wants all instances grouped + zones = self.parse_env_zones() + print(self.json_format_dict(self.inventory, + pretty=self.args.pretty)) + sys.exit(0) + + def get_config(self): + """ + Reads the settings from the gce.ini file. + + Populates a ConfigParser object with defaults and + attempts to read an .ini-style configuration from the filename + specified in GCE_INI_PATH. If the environment variable is + not present, the filename defaults to gce.ini in the current + working directory. + """ + gce_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "gce.ini") + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = configparser.ConfigParser(defaults={ + 'gce_service_account_email_address': '', + 'gce_service_account_pem_file_path': '', + 'gce_project_id': '', + 'gce_zone': '', + 'libcloud_secrets': '', + 'instance_tags': '', + 'inventory_ip_type': '', + 'cache_path': '~/.ansible/tmp', + 'cache_max_age': '300' + }) + if 'gce' not in config.sections(): + config.add_section('gce') + if 'inventory' not in config.sections(): + config.add_section('inventory') + if 'cache' not in config.sections(): + config.add_section('cache') + + config.read(gce_ini_path) + + ######### + # Section added for processing ini settings + ######### + + # Set the instance_states filter based on config file options + self.instance_states = [] + if config.has_option('gce', 'instance_states'): + states = config.get('gce', 'instance_states') + # Ignore if instance_states is an empty string. + if states: + self.instance_states = states.split(',') + + # Set the instance_tags filter, env var overrides config from file + # and cli param overrides all + if self.args.instance_tags: + self.instance_tags = self.args.instance_tags + else: + self.instance_tags = os.environ.get( + 'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags')) + if self.instance_tags: + self.instance_tags = self.instance_tags.split(',') + + # Caching + cache_path = config.get('cache', 'cache_path') + cache_max_age = config.getint('cache', 'cache_max_age') + # TOOD(supertom): support project-specific caches + cache_name = 'ansible-gce.cache' + self.cache = CloudInventoryCache(cache_path=cache_path, + cache_max_age=cache_max_age, + cache_name=cache_name) + return config + + def get_inventory_options(self): + """Determine inventory options. Environment variables always + take precedence over configuration files.""" + ip_type = self.config.get('inventory', 'inventory_ip_type') + # If the appropriate environment variables are set, they override + # other configuration + ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) + return ip_type + + def get_gce_drivers(self): + """Determine the GCE authorization settings and return a list of + libcloud drivers. + """ + # Attempt to get GCE params from a configuration file, if one + # exists. + secrets_path = self.config.get('gce', 'libcloud_secrets') + secrets_found = False + + try: + import secrets + args = list(secrets.GCE_PARAMS) + kwargs = secrets.GCE_KEYWORD_PARAMS + secrets_found = True + except Exception: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify libcloud secrets file as " + err += "/absolute/path/to/secrets.py" + sys.exit(err) + sys.path.append(os.path.dirname(secrets_path)) + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except Exception: + pass + + if not secrets_found: + args = [ + self.config.get('gce', 'gce_service_account_email_address'), + self.config.get('gce', 'gce_service_account_pem_file_path') + ] + kwargs = {'project': self.config.get('gce', 'gce_project_id'), + 'datacenter': self.config.get('gce', 'gce_zone')} + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + args[0] = os.environ.get('GCE_EMAIL', args[0]) + args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) + args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1]) + + kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter']) + + gce_drivers = [] + projects = kwargs['project'].split(',') + for project in projects: + kwargs['project'] = project + gce = get_driver(Provider.GCE)(*args, **kwargs) + gce.connection.user_agent_append( + '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), + ) + gce_drivers.append(gce) + return gce_drivers + + def parse_env_zones(self): + '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. + If provided, this will be used to filter the results of the grouped_instances call''' + import csv + reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True) + zones = [r for r in reader] + return [z for z in zones[0]] + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on GCE') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all information about an instance') + parser.add_argument('--instance-tags', action='store', + help='Only include instances with this tags, separated by comma') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') + parser.add_argument( + '--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests (default: False - use cache files)') + self.args = parser.parse_args() + + def node_to_dict(self, inst): + md = {} + + if inst is None: + return {} + + if 'items' in inst.extra['metadata']: + for entry in inst.extra['metadata']['items']: + md[entry['key']] = entry['value'] + + net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + subnet = None + if 'subnetwork' in inst.extra['networkInterfaces'][0]: + subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1] + # default to exernal IP unless user has specified they prefer internal + if self.ip_type == 'internal': + ssh_host = inst.private_ips[0] + else: + ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] + + return { + 'gce_uuid': inst.uuid, + 'gce_id': inst.id, + 'gce_image': inst.image, + 'gce_machine_type': inst.size, + 'gce_private_ip': inst.private_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, + 'gce_name': inst.name, + 'gce_description': inst.extra['description'], + 'gce_status': inst.extra['status'], + 'gce_zone': inst.extra['zone'].name, + 'gce_tags': inst.extra['tags'], + 'gce_metadata': md, + 'gce_network': net, + 'gce_subnetwork': subnet, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': ssh_host + } + + def load_inventory_from_cache(self): + ''' Loads inventory from JSON on disk. ''' + + try: + self.inventory = self.cache.get_all_data_from_cache() + hosts = self.inventory['_meta']['hostvars'] + except Exception as e: + print( + "Invalid inventory file %s. Please rebuild with -refresh-cache option." + % (self.cache.cache_path_cache)) + raise + + def do_api_calls_update_cache(self): + ''' Do API calls and save data in cache. ''' + zones = self.parse_env_zones() + data = self.group_instances(zones) + self.cache.write_to_cache(data) + self.inventory = data + + def list_nodes(self): + all_nodes = [] + params, more_results = {'maxResults': 500}, True + while more_results: + for driver in self.drivers: + driver.connection.gce_params = params + all_nodes.extend(driver.list_nodes()) + more_results = 'pageToken' in params + return all_nodes + + def group_instances(self, zones=None): + '''Group all instances''' + groups = {} + meta = {} + meta["hostvars"] = {} + + for node in self.list_nodes(): + + # This check filters on the desired instance states defined in the + # config file with the instance_states config option. + # + # If the instance_states list is _empty_ then _ALL_ states are returned. + # + # If the instance_states list is _populated_ then check the current + # state against the instance_states list + if self.instance_states and not node.extra['status'] in self.instance_states: + continue + + # This check filters on the desired instance tags defined in the + # config file with the instance_tags config option, env var GCE_INSTANCE_TAGS, + # or as the cli param --instance-tags. + # + # If the instance_tags list is _empty_ then _ALL_ instances are returned. + # + # If the instance_tags list is _populated_ then check the current + # instance tags against the instance_tags list. If the instance has + # at least one tag from the instance_tags list, it is returned. + if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']): + continue + + name = node.name + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.extra['zone'].name + + # To avoid making multiple requests per zone + # we list all nodes and then filter the results + if zones and zone not in zones: + continue + + if zone in groups: + groups[zone].append(name) + else: + groups[zone] = [name] + + tags = node.extra['tags'] + for t in tags: + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t + if tag in groups: + groups[tag].append(name) + else: + groups[tag] = [name] + + net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] + net = 'network_%s' % net + if net in groups: + groups[net].append(name) + else: + groups[net] = [name] + + machine_type = node.size + if machine_type in groups: + groups[machine_type].append(name) + else: + groups[machine_type] = [name] + + image = node.image or 'persistent_disk' + if image in groups: + groups[image].append(name) + else: + groups[image] = [name] + + status = node.extra['status'] + stat = 'status_%s' % status.lower() + if stat in groups: + groups[stat].append(name) + else: + groups[stat] = [name] + + for private_ip in node.private_ips: + groups[private_ip] = [name] + + if len(node.public_ips) >= 1: + for public_ip in node.public_ips: + groups[public_ip] = [name] + + groups["_meta"] = meta + + return groups + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +if __name__ == '__main__': + GceInventory() diff --git a/scripts/inventory/infoblox.py b/scripts/inventory/infoblox.py new file mode 100644 index 0000000000..8c6a074186 --- /dev/null +++ b/scripts/inventory/infoblox.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# +# (c) 2018, Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +import os +import sys +import json +import argparse + +from ansible.parsing.dataloader import DataLoader +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_text +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiInventory +from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs + + +CONFIG_FILES = [ + '/etc/ansible/infoblox.yaml', + '/etc/ansible/infoblox.yml' +] + + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--list', action='store_true', + help='List host records from NIOS for use in Ansible') + + parser.add_argument('--host', + help='List meta data about single host (not used)') + + return parser.parse_args() + + +def main(): + args = parse_args() + + for config_file in CONFIG_FILES: + if os.path.exists(config_file): + break + else: + sys.stdout.write('unable to locate config file at /etc/ansible/infoblox.yaml\n') + sys.exit(-1) + + try: + loader = DataLoader() + config = loader.load_from_file(config_file) + provider = config.get('provider') or {} + wapi = WapiInventory(provider) + except Exception as exc: + sys.stdout.write(to_text(exc)) + sys.exit(-1) + + if args.host: + host_filter = {'name': args.host} + else: + host_filter = {} + + config_filters = config.get('filters') + + if config_filters.get('view') is not None: + host_filter['view'] = config_filters['view'] + + if config_filters.get('extattrs'): + extattrs = normalize_extattrs(config_filters['extattrs']) + else: + extattrs = {} + + hostvars = {} + inventory = { + '_meta': { + 'hostvars': hostvars + } + } + + return_fields = ['name', 'view', 'extattrs', 'ipv4addrs'] + + hosts = wapi.get_object('record:host', + host_filter, + extattrs=extattrs, + return_fields=return_fields) + + if hosts: + for item in hosts: + view = item['view'] + name = item['name'] + + if view not in inventory: + inventory[view] = {'hosts': []} + + inventory[view]['hosts'].append(name) + + hostvars[name] = { + 'view': view + } + + if item.get('extattrs'): + for key, value in iteritems(flatten_extattrs(item['extattrs'])): + if key.startswith('ansible_'): + hostvars[name][key] = value + else: + if 'extattrs' not in hostvars[name]: + hostvars[name]['extattrs'] = {} + hostvars[name]['extattrs'][key] = value + + sys.stdout.write(json.dumps(inventory, indent=4)) + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/infoblox.yaml b/scripts/inventory/infoblox.yaml new file mode 100644 index 0000000000..c1be5324ac --- /dev/null +++ b/scripts/inventory/infoblox.yaml @@ -0,0 +1,24 @@ +--- +# This file provides the configuration information for the Infoblox dynamic +# inventory script that is used to dynamically pull host information from NIOS. +# This file should be copied to /etc/ansible/infoblox.yaml in order for the +# dynamic script to find it. + +# Sets the provider arguments for authenticating to the Infoblox server to +# retrieve inventory hosts. Provider arguments can also be set using +# environment variables. Supported environment variables all start with +# INFOBLOX_{{ name }}. For instance, to set the host provider value, the +# environment variable would be INFOBLOX_HOST. +provider: + host: + username: + password: + +# Filters allow the dynamic inventory script to restrict the set of hosts that +# are returned from the Infoblox server. +filters: + # restrict returned hosts by extensible attributes + extattrs: {} + + # restrict returned hosts to a specified DNS view + view: null diff --git a/scripts/inventory/jail.py b/scripts/inventory/jail.py new file mode 100644 index 0000000000..9a2ccf18fe --- /dev/null +++ b/scripts/inventory/jail.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# (c) 2013, Michael Scherer +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from subprocess import Popen, PIPE +import sys +import json + +result = {} +result['all'] = {} + +pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True) +result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] +result['all']['vars'] = {} +result['all']['vars']['ansible_connection'] = 'jail' + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({'ansible_connection': 'jail'})) +else: + sys.stderr.write("Need an argument, either --list or --host \n") diff --git a/scripts/inventory/landscape.py b/scripts/inventory/landscape.py new file mode 100644 index 0000000000..9aa660bef8 --- /dev/null +++ b/scripts/inventory/landscape.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Canonical's +# Landscape (http://www.ubuntu.com/management/landscape-features). +# +# Requires the `landscape_api` Python module +# See: +# - https://landscape.canonical.com/static/doc/api/api-client-package.html +# - https://landscape.canonical.com/static/doc/api/python-api.html +# +# Environment variables +# --------------------- +# - `LANDSCAPE_API_URI` +# - `LANDSCAPE_API_KEY` +# - `LANDSCAPE_API_SECRET` +# - `LANDSCAPE_API_SSL_CA_FILE` (optional) + + +import argparse +import collections +import os +import sys + +from landscape_api.base import API, HTTPError + +import json + +_key = 'landscape' + + +class EnvironmentConfig(object): + uri = os.getenv('LANDSCAPE_API_URI') + access_key = os.getenv('LANDSCAPE_API_KEY') + secret_key = os.getenv('LANDSCAPE_API_SECRET') + ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') + + +def _landscape_client(): + env = EnvironmentConfig() + return API( + uri=env.uri, + access_key=env.access_key, + secret_key=env.secret_key, + ssl_ca_file=env.ssl_ca_file) + + +def get_landscape_members_data(): + return _landscape_client().get_computers() + + +def get_nodes(data): + return [node['hostname'] for node in data] + + +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for value in node['tags']: + groups[value].append(node['hostname']) + + return groups + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['hostname']] = {'tags': node['tags']} + return meta + + +def print_list(): + data = get_landscape_members_data() + nodes = get_nodes(data) + groups = get_groups(data) + meta = get_meta(data) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) + + +def print_host(host): + data = get_landscape_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from landscape cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from landscape cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/scripts/inventory/libcloud.ini b/scripts/inventory/libcloud.ini new file mode 100644 index 0000000000..7592c41cd0 --- /dev/null +++ b/scripts/inventory/libcloud.ini @@ -0,0 +1,15 @@ +# Ansible Apache Libcloud Generic inventory script + +[driver] +provider = CLOUDSTACK +host = +path = +secure = True +verify_ssl_cert = True + +key = +secret = + +[cache] +cache_path=/path/to/your/cache +cache_max_age=60 diff --git a/scripts/inventory/libvirt_lxc.py b/scripts/inventory/libvirt_lxc.py new file mode 100644 index 0000000000..c0d84dbaaf --- /dev/null +++ b/scripts/inventory/libvirt_lxc.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# (c) 2013, Michael Scherer +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from subprocess import Popen, PIPE +import sys +import json + +result = {} +result['all'] = {} + +pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True) +result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] +result['all']['vars'] = {} +result['all']['vars']['ansible_connection'] = 'libvirt_lxc' + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({'ansible_connection': 'libvirt_lxc'})) +else: + sys.stderr.write("Need an argument, either --list or --host \n") diff --git a/scripts/inventory/linode.ini b/scripts/inventory/linode.ini new file mode 100644 index 0000000000..c925d970e9 --- /dev/null +++ b/scripts/inventory/linode.ini @@ -0,0 +1,18 @@ +# Ansible Linode external inventory script settings +# + +[linode] + +# API calls to Linode are slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-Linode.cache +# - ansible-Linode.index +cache_path = /tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +cache_max_age = 300 + +# If set to true use the hosts public ip in the dictionary instead of the label +use_public_ip = false \ No newline at end of file diff --git a/scripts/inventory/linode.py b/scripts/inventory/linode.py new file mode 100644 index 0000000000..f4ae302816 --- /dev/null +++ b/scripts/inventory/linode.py @@ -0,0 +1,348 @@ +#!/usr/bin/env python + +''' +Linode external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +Linode using the Chube library. + +NOTE: This script assumes Ansible is being executed where Chube is already +installed and has a valid config at ~/.chube. If not, run: + + pip install chube + echo -e "---\napi_key: " > ~/.chube + +For more details, see: https://github.com/exosite/chube + +NOTE: By default, this script also assumes that the Linodes in your account all have +labels that correspond to hostnames that are in your resolver search path. +Your resolver search path resides in /etc/hosts. +Optionally, if you would like to use the hosts public IP instead of it's label use +the following setting in linode.ini: + + use_public_ip = true + +When run against a specific host, this script returns the following variables: + + - api_id + - datacenter_id + - datacenter_city (lowercase city name of data center, e.g. 'tokyo') + - label + - display_group + - create_dt + - total_hd + - total_xfer + - total_ram + - status + - public_ip (The first public IP found) + - private_ip (The first private IP found, or empty string if none) + - alert_cpu_enabled + - alert_cpu_threshold + - alert_diskio_enabled + - alert_diskio_threshold + - alert_bwin_enabled + - alert_bwin_threshold + - alert_bwout_enabled + - alert_bwout_threshold + - alert_bwquota_enabled + - alert_bwquota_threshold + - backup_weekly_daily + - backup_window + - watchdog + +Peter Sankauskas did most of the legwork here with his linode plugin; I +just adapted that for Linode. +''' + +# (c) 2013, Dan Slimmon +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +# Standard imports +import os +import re +import sys +import argparse +from time import time + +import json + +try: + from chube import load_chube_config + from chube import api as chube_api + from chube.datacenter import Datacenter + from chube.linode_obj import Linode +except Exception: + try: + # remove local paths and other stuff that may + # cause an import conflict, as chube is sensitive + # to name collisions on importing + old_path = sys.path + sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))] + + from chube import load_chube_config + from chube import api as chube_api + from chube.datacenter import Datacenter + from chube.linode_obj import Linode + + sys.path = old_path + except Exception as e: + raise Exception("could not import chube") + +load_chube_config() + +# Imports for ansible +from ansible.module_utils.six.moves import configparser as ConfigParser + + +class LinodeInventory(object): + def _empty_inventory(self): + return {"_meta": {"hostvars": {}}} + + def __init__(self): + """Main execution path.""" + # Inventory grouped by display group + self.inventory = self._empty_inventory() + # Index of label to Linode ID + self.index = {} + # Local cache of Datacenter objects populated by populate_datacenter_cache() + self._datacenter_cache = None + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + elif self.args.list: + # Display list of nodes for inventory + if len(self.inventory) == 1: + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def is_cache_valid(self): + """Determines if the cache file has expired, or if it is still valid.""" + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + return False + + def read_settings(self): + """Reads the settings from the .ini file.""" + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini') + + # Cache related + cache_path = config.get('linode', 'cache_path') + self.cache_path_cache = cache_path + "/ansible-linode.cache" + self.cache_path_index = cache_path + "/ansible-linode.index" + self.cache_max_age = config.getint('linode', 'cache_max_age') + self.use_public_ip = config.getboolean('linode', 'use_public_ip') + + def parse_cli_args(self): + """Command line argument processing""" + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode') + parser.add_argument('--list', action='store_true', default=True, + help='List nodes (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific node') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to Linode (default: False - use cache files)') + self.args = parser.parse_args() + + def do_api_calls_update_cache(self): + """Do API calls, and save data in cache files.""" + self.get_nodes() + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def get_nodes(self): + """Makes an Linode API call to get the list of nodes.""" + try: + for node in Linode.search(status=Linode.STATUS_RUNNING): + self.add_node(node) + except chube_api.linode_api.ApiError as e: + sys.exit("Looks like Linode's API is down:\n %s" % e) + + def get_node(self, linode_id): + """Gets details about a specific node.""" + try: + return Linode.find(api_id=linode_id) + except chube_api.linode_api.ApiError as e: + sys.exit("Looks like Linode's API is down:\n%s" % e) + + def populate_datacenter_cache(self): + """Creates self._datacenter_cache, containing all Datacenters indexed by ID.""" + self._datacenter_cache = {} + dcs = Datacenter.search() + for dc in dcs: + self._datacenter_cache[dc.api_id] = dc + + def get_datacenter_city(self, node): + """Returns a the lowercase city name of the node's data center.""" + if self._datacenter_cache is None: + self.populate_datacenter_cache() + location = self._datacenter_cache[node.datacenter_id].location + location = location.lower() + location = location.split(",")[0] + return location + + def add_node(self, node): + """Adds an node to the inventory and index.""" + if self.use_public_ip: + dest = self.get_node_public_ip(node) + else: + dest = node.label + + # Add to index + self.index[dest] = node.api_id + + # Inventory: Group by node ID (always a group of 1) + self.inventory[node.api_id] = [dest] + + # Inventory: Group by datacenter city + self.push(self.inventory, self.get_datacenter_city(node), dest) + + # Inventory: Group by display group + self.push(self.inventory, node.display_group, dest) + + # Inventory: Add a "linode" global tag group + self.push(self.inventory, "linode", dest) + + # Add host info to hostvars + self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node) + + def get_node_public_ip(self, node): + """Returns a the public IP address of the node""" + return [addr.address for addr in node.ipaddresses if addr.is_public][0] + + def get_host_info(self): + """Get variables about a specific host.""" + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if self.args.host not in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if self.args.host not in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + node_id = self.index[self.args.host] + node = self.get_node(node_id) + + return self.json_format_dict(self._get_host_info(node), True) + + def _get_host_info(self, node): + node_vars = {} + for direct_attr in [ + "api_id", + "datacenter_id", + "label", + "display_group", + "create_dt", + "total_hd", + "total_xfer", + "total_ram", + "status", + "alert_cpu_enabled", + "alert_cpu_threshold", + "alert_diskio_enabled", + "alert_diskio_threshold", + "alert_bwin_enabled", + "alert_bwin_threshold", + "alert_bwout_enabled", + "alert_bwout_threshold", + "alert_bwquota_enabled", + "alert_bwquota_threshold", + "backup_weekly_daily", + "backup_window", + "watchdog" + ]: + node_vars[direct_attr] = getattr(node, direct_attr) + + node_vars["datacenter_city"] = self.get_datacenter_city(node) + node_vars["public_ip"] = self.get_node_public_ip(node) + + # Set the SSH host information, so these inventory items can be used if + # their labels aren't FQDNs + node_vars['ansible_ssh_host'] = node_vars["public_ip"] + node_vars['ansible_host'] = node_vars["public_ip"] + + private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] + + if private_ips: + node_vars["private_ip"] = private_ips[0] + + return node_vars + + def push(self, my_dict, key, element): + """Pushed an element onto an array that may not have been defined in the dict.""" + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def get_inventory_from_cache(self): + """Reads the inventory from the cache file and returns it as a JSON object.""" + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + def load_index_from_cache(self): + """Reads the index from the cache file and sets self.index.""" + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + def write_to_cache(self, data, filename): + """Writes data in JSON format to a file.""" + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + """Escapes any characters that would be invalid in an ansible group name.""" + return re.sub(r"[^A-Za-z0-9\-]", "_", word) + + def json_format_dict(self, data, pretty=False): + """Converts a dict to a JSON object and dumps it as a formatted string.""" + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +LinodeInventory() diff --git a/scripts/inventory/lxc_inventory.py b/scripts/inventory/lxc_inventory.py new file mode 100644 index 0000000000..86a6b22c2d --- /dev/null +++ b/scripts/inventory/lxc_inventory.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# +# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH +# +# Based in part on: +# libvirt_lxc.py, (c) 2013, Michael Scherer +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +""" +Ansible inventory script for LXC containers. Requires Python +bindings for LXC API. + +In LXC, containers can be grouped by setting the lxc.group option, +which may be found more than once in a container's +configuration. So, we enumerate all containers, fetch their list +of groups, and then build the dictionary in the way Ansible expects +it. +""" +from __future__ import print_function + +import sys +import lxc +import json + + +def build_dict(): + """Returns a dictionary keyed to the defined LXC groups. All + containers, including the ones not in any group, are included in the + "all" group.""" + # Enumerate all containers, and list the groups they are in. Also, + # implicitly add every container to the 'all' group. + containers = dict([(c, + ['all'] + + (lxc.Container(c).get_config_item('lxc.group') or [])) + for c in lxc.list_containers()]) + + # Extract the groups, flatten the list, and remove duplicates + groups = set(sum([g for g in containers.values()], [])) + + # Create a dictionary for each group (including the 'all' group + return dict([(g, {'hosts': [k for k, v in containers.items() if g in v], + 'vars': {'ansible_connection': 'lxc'}}) for g in groups]) + + +def main(argv): + """Returns a JSON dictionary as expected by Ansible""" + result = build_dict() + if len(argv) == 2 and argv[1] == '--list': + json.dump(result, sys.stdout) + elif len(argv) == 3 and argv[1] == '--host': + json.dump({'ansible_connection': 'lxc'}, sys.stdout) + else: + print("Need an argument, either --list or --host ", file=sys.stderr) + + +if __name__ == '__main__': + main(sys.argv) diff --git a/scripts/inventory/lxd.ini b/scripts/inventory/lxd.ini new file mode 100644 index 0000000000..5398e7d021 --- /dev/null +++ b/scripts/inventory/lxd.ini @@ -0,0 +1,13 @@ +# LXD external inventory script settings + +[lxd] + +# The default resource +#resource = local: + +# The group name to add the hosts to +#group = lxd + +# The connection type to return for these hosts - lxd hasn't been tested yet +#connection = lxd +connection = smart diff --git a/scripts/inventory/lxd.py b/scripts/inventory/lxd.py new file mode 100644 index 0000000000..2cb1354277 --- /dev/null +++ b/scripts/inventory/lxd.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python + +# (c) 2013, Michael Scherer +# (c) 2014, Hiroaki Nakamura +# (c) 2016, Andew Clarke +# +# This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible, +# and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py +# +# NOTE, this file has some obvious limitations, improvements welcome +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +from subprocess import Popen, PIPE +import distutils.spawn +import sys +import json + +from ansible.module_utils.six.moves import configparser + +# Set up defaults +resource = 'local:' +group = 'lxd' +connection = 'lxd' +hosts = {} +result = {} + +# Read the settings from the lxd.ini file +config = configparser.SafeConfigParser() +config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini') +if config.has_option('lxd', 'resource'): + resource = config.get('lxd', 'resource') +if config.has_option('lxd', 'group'): + group = config.get('lxd', 'group') +if config.has_option('lxd', 'connection'): + connection = config.get('lxd', 'connection') + +# Ensure executable exists +if distutils.spawn.find_executable('lxc'): + + # Set up containers result and hosts array + result[group] = {} + result[group]['hosts'] = [] + + # Run the command and load json result + pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True) + lxdjson = json.load(pipe.stdout) + + # Iterate the json lxd output + for item in lxdjson: + + # Check state and network + if 'state' in item and item['state'] is not None and 'network' in item['state']: + network = item['state']['network'] + + # Check for eth0 and addresses + if 'eth0' in network and 'addresses' in network['eth0']: + addresses = network['eth0']['addresses'] + + # Iterate addresses + for address in addresses: + + # Only return inet family addresses + if 'family' in address and address['family'] == 'inet': + if 'address' in address: + ip = address['address'] + name = item['name'] + + # Add the host to the results and the host array + result[group]['hosts'].append(name) + hosts[name] = ip + + # Set the other containers result values + result[group]['vars'] = {} + result[group]['vars']['ansible_connection'] = connection + +# Process arguments +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + if sys.argv[2] == 'localhost': + print(json.dumps({'ansible_connection': 'local'})) + else: + if connection == 'lxd': + print(json.dumps({'ansible_connection': connection})) + else: + print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]})) +else: + print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/mdt.ini b/scripts/inventory/mdt.ini new file mode 100644 index 0000000000..c401c0ce17 --- /dev/null +++ b/scripts/inventory/mdt.ini @@ -0,0 +1,17 @@ +[mdt] + +# Set the MDT server to connect to +server = localhost.example.com + +# Set the MDT Instance +instance = EXAMPLEINSTANCE + +# Set the MDT database +database = MDTDB + +# Configure login credentials +user = local.domain\admin +password = adminpassword + +[tower] +groupname = mdt diff --git a/scripts/inventory/mdt_dynamic_inventory.py b/scripts/inventory/mdt_dynamic_inventory.py new file mode 100644 index 0000000000..154aac4f8d --- /dev/null +++ b/scripts/inventory/mdt_dynamic_inventory.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python + +# (c) 2016, Julian Barnett +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +''' +MDT external inventory script +================================= +author: J Barnett 06/23/2016 01:15 +maintainer: J Barnett (github @jbarnett1981) +''' + +import argparse +import json +import pymssql +from ansible.module_utils.six.moves import configparser + + +class MDTInventory(object): + + def __init__(self): + ''' Main execution path ''' + self.conn = None + + # Initialize empty inventory + self.inventory = self._empty_inventory() + + # Read CLI arguments + self.read_settings() + self.parse_cli_args() + + # Get Hosts + if self.args.list: + self.get_hosts() + + # Get specific host vars + if self.args.host: + self.get_hosts(self.args.host) + + def _connect(self, query): + ''' + Connect to MDT and dump contents of dbo.ComputerIdentity database + ''' + if not self.conn: + self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password, + database=self.mdt_database) + cursor = self.conn.cursor() + cursor.execute(query) + self.mdt_dump = cursor.fetchall() + self.conn.close() + + def get_hosts(self, hostname=False): + ''' + Gets host from MDT Database + ''' + if hostname: + query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role " + "FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname) + else: + query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID' + self._connect(query) + + # Configure to group name configured in Ansible Tower for this inventory + groupname = self.mdt_groupname + + # Initialize empty host list + hostlist = [] + + # Parse through db dump and populate inventory + for hosts in self.mdt_dump: + self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]} + hostlist.append(hosts[1]) + self.inventory[groupname] = hostlist + + # Print it all out + print(json.dumps(self.inventory, indent=2)) + + def _empty_inventory(self): + ''' + Create empty inventory dictionary + ''' + return {"_meta": {"hostvars": {}}} + + def read_settings(self): + ''' + Reads the settings from the mdt.ini file + ''' + config = configparser.SafeConfigParser() + config.read('mdt.ini') + + # MDT Server and instance and database + self.mdt_server = config.get('mdt', 'server') + self.mdt_instance = config.get('mdt', 'instance') + self.mdt_database = config.get('mdt', 'database') + + # MDT Login credentials + if config.has_option('mdt', 'user'): + self.mdt_user = config.get('mdt', 'user') + if config.has_option('mdt', 'password'): + self.mdt_password = config.get('mdt', 'password') + + # Group name in Tower + if config.has_option('tower', 'groupname'): + self.mdt_groupname = config.get('tower', 'groupname') + + def parse_cli_args(self): + ''' + Command line argument processing + ''' + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT') + parser.add_argument('--list', action='store_true', default=False, help='List instances') + parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') + self.args = parser.parse_args() + + +if __name__ == "__main__": + # Run the script + MDTInventory() diff --git a/scripts/inventory/nagios_livestatus.ini b/scripts/inventory/nagios_livestatus.ini new file mode 100644 index 0000000000..320f11f35c --- /dev/null +++ b/scripts/inventory/nagios_livestatus.ini @@ -0,0 +1,41 @@ +# Ansible Nagios external inventory script settings +# +# To get all available possibilities, check following URL: +# http://www.naemon.org/documentation/usersguide/livestatus.html +# https://mathias-kettner.de/checkmk_livestatus.html +# + +[local] +# Livestatus URI +# Example for default naemon livestatus unix socket : +# livestatus_uri=unix:/var/cache/naemon/live + +[remote] + +# default field name for host: name +# Uncomment to override: +# host_field=address +# +# default field group for host: groups +# Uncomment to override: +# group_field=state +# default fields retrieved: address, alias, display_name, children, parents +# To override, uncomment the following line +# fields_to_retrieve=address,alias,display_name +# +# default variable prefix: livestatus_ +# To override, uncomment the following line +# var_prefix=naemon_ +# +# default filter: None +# +# Uncomment to override +# +# All host with state = OK +# host_filter=state = 0 +# Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions. +# +# All host in groups Linux +# host_filter=groups >= Linux +# +livestatus_uri=tcp:192.168.66.137:6557 diff --git a/scripts/inventory/nagios_livestatus.py b/scripts/inventory/nagios_livestatus.py new file mode 100644 index 0000000000..25c043b5c0 --- /dev/null +++ b/scripts/inventory/nagios_livestatus.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python + +# (c) 2015, Yannig Perre +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +''' +Nagios livestatus inventory script. Before using this script, please +update nagios_livestatus.ini file. + +Livestatus is a nagios/naemon/shinken module which let you retrieve +informations stored in the monitoring core. + +This plugin inventory need livestatus API for python. Please install it +before using this script (apt/pip/yum/...). + +Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html +Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html +''' + +import os +import re +import argparse +import sys + +from ansible.module_utils.six.moves import configparser +import json + +try: + from mk_livestatus import Socket +except ImportError: + sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus") + + +class NagiosLivestatusInventory(object): + + def parse_ini_file(self): + config = configparser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini') + for section in config.sections(): + if not config.has_option(section, 'livestatus_uri'): + continue + + # If fields_to_retrieve is not set, using default fields + fields_to_retrieve = self.default_fields_to_retrieve + if config.has_option(section, 'fields_to_retrieve'): + fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')] + fields_to_retrieve = tuple(fields_to_retrieve) + + # default section values + section_values = { + 'var_prefix': 'livestatus_', + 'host_filter': None, + 'host_field': 'name', + 'group_field': 'groups' + } + for key, value in section_values.items(): + if config.has_option(section, key): + section_values[key] = config.get(section, key).strip() + + # Retrieving livestatus string connection + livestatus_uri = config.get(section, 'livestatus_uri') + backend_definition = None + + # Local unix socket + unix_match = re.match('unix:(.*)', livestatus_uri) + if unix_match is not None: + backend_definition = {'connection': unix_match.group(1)} + + # Remote tcp connection + tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri) + if tcp_match is not None: + backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))} + + # No valid livestatus_uri => exiting + if backend_definition is None: + raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri) + + # Updating backend_definition with current value + backend_definition['name'] = section + backend_definition['fields'] = fields_to_retrieve + for key, value in section_values.items(): + backend_definition[key] = value + + self.backends.append(backend_definition) + + def parse_options(self): + parser = argparse.ArgumentParser() + parser.add_argument('--host', nargs=1) + parser.add_argument('--list', action='store_true') + parser.add_argument('--pretty', action='store_true') + self.options = parser.parse_args() + + def add_host(self, hostname, group): + if group not in self.result: + self.result[group] = {} + self.result[group]['hosts'] = [] + if hostname not in self.result[group]['hosts']: + self.result[group]['hosts'].append(hostname) + + def query_backend(self, backend, host=None): + '''Query a livestatus backend''' + hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field']) + + if backend['host_filter'] is not None: + hosts_request = hosts_request.filter(backend['host_filter']) + + if host is not None: + hosts_request = hosts_request.filter('name = ' + host[0]) + + hosts_request._columns += backend['fields'] + + hosts = hosts_request.call() + for host in hosts: + hostname = host[backend['host_field']] + hostgroups = host[backend['group_field']] + if not isinstance(hostgroups, list): + hostgroups = [hostgroups] + self.add_host(hostname, 'all') + self.add_host(hostname, backend['name']) + for group in hostgroups: + self.add_host(hostname, group) + for field in backend['fields']: + var_name = backend['var_prefix'] + field + if hostname not in self.result['_meta']['hostvars']: + self.result['_meta']['hostvars'][hostname] = {} + self.result['_meta']['hostvars'][hostname][var_name] = host[field] + + def __init__(self): + + self.defaultgroup = 'group_all' + self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents') + self.backends = [] + self.options = None + + self.parse_ini_file() + self.parse_options() + + self.result = {} + self.result['_meta'] = {} + self.result['_meta']['hostvars'] = {} + self.json_indent = None + if self.options.pretty: + self.json_indent = 2 + + if len(self.backends) == 0: + sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.") + + for backend in self.backends: + self.query_backend(backend, self.options.host) + + if self.options.host: + print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent)) + elif self.options.list: + print(json.dumps(self.result, indent=self.json_indent)) + else: + sys.exit("usage: --list or --host HOSTNAME [--pretty]") + + +NagiosLivestatusInventory() diff --git a/scripts/inventory/nagios_ndo.ini b/scripts/inventory/nagios_ndo.ini new file mode 100644 index 0000000000..1e133a29f3 --- /dev/null +++ b/scripts/inventory/nagios_ndo.ini @@ -0,0 +1,10 @@ +# Ansible Nagios external inventory script settings +# + +[ndo] +# NDO database URI +# Make sure that data is returned as strings and not bytes if using python 3. +# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html +# for supported databases and URI format. +# Example for mysqlclient module : +database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1 diff --git a/scripts/inventory/nagios_ndo.py b/scripts/inventory/nagios_ndo.py new file mode 100644 index 0000000000..0f89ede659 --- /dev/null +++ b/scripts/inventory/nagios_ndo.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python + +# (c) 2014, Jonathan Lestrelin +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +Nagios NDO external inventory script. +======================================== + +Returns hosts and hostgroups from Nagios NDO. + +Configuration is read from `nagios_ndo.ini`. +""" + +import os +import argparse +import sys +from ansible.module_utils.six.moves import configparser +import json + +try: + from sqlalchemy import text + from sqlalchemy.engine import create_engine +except ImportError: + sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") + + +class NagiosNDOInventory(object): + + def read_settings(self): + config = configparser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini') + if config.has_option('ndo', 'database_uri'): + self.ndo_database_uri = config.get('ndo', 'database_uri') + + def read_cli(self): + parser = argparse.ArgumentParser() + parser.add_argument('--host', nargs=1) + parser.add_argument('--list', action='store_true') + self.options = parser.parse_args() + + def get_hosts(self): + engine = create_engine(self.ndo_database_uri) + connection = engine.connect() + select_hosts = text("SELECT display_name \ + FROM nagios_hosts") + select_hostgroups = text("SELECT alias \ + FROM nagios_hostgroups") + select_hostgroup_hosts = text("SELECT h.display_name \ + FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \ + WHERE hgm.hostgroup_id = hg.hostgroup_id \ + AND hgm.host_object_id = h.host_object_id \ + AND hg.alias =:hostgroup_alias") + + hosts = connection.execute(select_hosts) + self.result['all']['hosts'] = [host['display_name'] for host in hosts] + + for hostgroup in connection.execute(select_hostgroups): + hostgroup_alias = hostgroup['alias'] + self.result[hostgroup_alias] = {} + hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias) + self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts] + + def __init__(self): + + self.defaultgroup = 'group_all' + self.ndo_database_uri = None + self.options = None + + self.read_settings() + self.read_cli() + + self.result = {} + self.result['all'] = {} + self.result['all']['hosts'] = [] + self.result['_meta'] = {} + self.result['_meta']['hostvars'] = {} + + if self.ndo_database_uri: + self.get_hosts() + if self.options.host: + print(json.dumps({})) + elif self.options.list: + print(json.dumps(self.result)) + else: + sys.exit("usage: --list or --host HOSTNAME") + else: + sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.") + + +NagiosNDOInventory() diff --git a/scripts/inventory/nsot.py b/scripts/inventory/nsot.py new file mode 100644 index 0000000000..6b09704d3a --- /dev/null +++ b/scripts/inventory/nsot.py @@ -0,0 +1,344 @@ +#!/usr/bin/env python + +''' +nsot +==== + +Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox + +Features +-------- + +* Define host groups in form of NSoT device attribute criteria + +* All parameters defined by the spec as of 2015-09-05 are supported. + + + ``--list``: Returns JSON hash of host groups -> hosts and top-level + ``_meta`` -> ``hostvars`` which correspond to all device attributes. + + Group vars can be specified in the YAML configuration, noted below. + + + ``--host ``: Returns JSON hash where every item is a device + attribute. + +* In addition to all attributes assigned to resource being returned, script + will also append ``site_id`` and ``id`` as facts to utilize. + + +Configuration +------------ + +Since it'd be annoying and failure prone to guess where you're configuration +file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it. + +This file should adhere to the YAML spec. All top-level variable must be +desired Ansible group-name hashed with single 'query' item to define the NSoT +attribute query. + +Queries follow the normal NSoT query syntax, `shown here`_ + +.. _shown here: https://github.com/dropbox/pynsot#set-queries + +.. code:: yaml + + routers: + query: 'deviceType=ROUTER' + vars: + a: b + c: d + + juniper_fw: + query: 'deviceType=FIREWALL manufacturer=JUNIPER' + + not_f10: + query: '-manufacturer=FORCE10' + +The inventory will automatically use your ``.pynsotrc`` like normal pynsot from +cli would, so make sure that's configured appropriately. + +.. note:: + + Attributes I'm showing above are influenced from ones that the Trigger + project likes. As is the spirit of NSoT, use whichever attributes work best + for your workflow. + +If config file is blank or absent, the following default groups will be +created: + +* ``routers``: deviceType=ROUTER +* ``switches``: deviceType=SWITCH +* ``firewalls``: deviceType=FIREWALL + +These are likely not useful for everyone so please use the configuration. :) + +.. note:: + + By default, resources will only be returned for what your default + site is set for in your ``~/.pynsotrc``. + + If you want to specify, add an extra key under the group for ``site: n``. + +Output Examples +--------------- + +Here are some examples shown from just calling the command directly:: + + $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.' + { + "routers": { + "hosts": [ + "test1.example.com" + ], + "vars": { + "cool_level": "very", + "group": "routers" + } + }, + "firewalls": { + "hosts": [ + "test2.example.com" + ], + "vars": { + "cool_level": "enough", + "group": "firewalls" + } + }, + "_meta": { + "hostvars": { + "test2.example.com": { + "make": "SRX", + "site_id": 1, + "id": 108 + }, + "test1.example.com": { + "make": "MX80", + "site_id": 1, + "id": 107 + } + } + }, + "rtr_and_fw": { + "hosts": [ + "test1.example.com", + "test2.example.com" + ], + "vars": {} + } + } + + + $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.' + { + "make": "MX80", + "site_id": 1, + "id": 107 + } + +''' + +from __future__ import print_function +import sys +import os +import pkg_resources +import argparse +import json +import yaml +from textwrap import dedent +from pynsot.client import get_api_client +from pynsot.app import HttpServerError +from click.exceptions import UsageError + +from ansible.module_utils.six import string_types + + +def warning(*objs): + print("WARNING: ", *objs, file=sys.stderr) + + +class NSoTInventory(object): + '''NSoT Client object for gather inventory''' + + def __init__(self): + self.config = dict() + config_env = os.environ.get('NSOT_INVENTORY_CONFIG') + if config_env: + try: + config_file = os.path.abspath(config_env) + except IOError: # If file non-existent, use default config + self._config_default() + except Exception as e: + sys.exit('%s\n' % e) + + with open(config_file) as f: + try: + self.config.update(yaml.safe_load(f)) + except TypeError: # If empty file, use default config + warning('Empty config file') + self._config_default() + except Exception as e: + sys.exit('%s\n' % e) + else: # Use defaults if env var missing + self._config_default() + self.groups = self.config.keys() + self.client = get_api_client() + self._meta = {'hostvars': dict()} + + def _config_default(self): + default_yaml = ''' + --- + routers: + query: deviceType=ROUTER + switches: + query: deviceType=SWITCH + firewalls: + query: deviceType=FIREWALL + ''' + self.config = yaml.safe_load(dedent(default_yaml)) + + def do_list(self): + '''Direct callback for when ``--list`` is provided + + Relies on the configuration generated from init to run + _inventory_group() + ''' + inventory = dict() + for group, contents in self.config.items(): + group_response = self._inventory_group(group, contents) + inventory.update(group_response) + inventory.update({'_meta': self._meta}) + return json.dumps(inventory) + + def do_host(self, host): + return json.dumps(self._hostvars(host)) + + def _hostvars(self, host): + '''Return dictionary of all device attributes + + Depending on number of devices in NSoT, could be rather slow since this + has to request every device resource to filter through + ''' + device = [i for i in self.client.devices.get() + if host in i['hostname']][0] + attributes = device['attributes'] + attributes.update({'site_id': device['site_id'], 'id': device['id']}) + return attributes + + def _inventory_group(self, group, contents): + '''Takes a group and returns inventory for it as dict + + :param group: Group name + :type group: str + :param contents: The contents of the group's YAML config + :type contents: dict + + contents param should look like:: + + { + 'query': 'xx', + 'vars': + 'a': 'b' + } + + Will return something like:: + + { group: { + hosts: [], + vars: {}, + } + ''' + query = contents.get('query') + hostvars = contents.get('vars', dict()) + site = contents.get('site', dict()) + obj = {group: dict()} + obj[group]['hosts'] = [] + obj[group]['vars'] = hostvars + try: + assert isinstance(query, string_types) + except Exception: + sys.exit('ERR: Group queries must be a single string\n' + ' Group: %s\n' + ' Query: %s\n' % (group, query) + ) + try: + if site: + site = self.client.sites(site) + devices = site.devices.query.get(query=query) + else: + devices = self.client.devices.query.get(query=query) + except HttpServerError as e: + if '500' in str(e.response): + _site = 'Correct site id?' + _attr = 'Queried attributes actually exist?' + questions = _site + '\n' + _attr + sys.exit('ERR: 500 from server.\n%s' % questions) + else: + raise + except UsageError: + sys.exit('ERR: Could not connect to server. Running?') + + # Would do a list comprehension here, but would like to save code/time + # and also acquire attributes in this step + for host in devices: + # Iterate through each device that matches query, assign hostname + # to the group's hosts array and then use this single iteration as + # a chance to update self._meta which will be used in the final + # return + hostname = host['hostname'] + obj[group]['hosts'].append(hostname) + attributes = host['attributes'] + attributes.update({'site_id': host['site_id'], 'id': host['id']}) + self._meta['hostvars'].update({hostname: attributes}) + + return obj + + +def parse_args(): + desc = __doc__.splitlines()[4] # Just to avoid being redundant + + # Establish parser with options and error out if no action provided + parser = argparse.ArgumentParser( + description=desc, + conflict_handler='resolve', + ) + + # Arguments + # + # Currently accepting (--list | -l) and (--host | -h) + # These must not be allowed together + parser.add_argument( + '--list', '-l', + help='Print JSON object containing hosts to STDOUT', + action='store_true', + dest='list_', # Avoiding syntax highlighting for list + ) + + parser.add_argument( + '--host', '-h', + help='Print JSON object containing hostvars for ', + action='store', + ) + args = parser.parse_args() + + if not args.list_ and not args.host: # Require at least one option + parser.exit(status=1, message='No action requested') + + if args.list_ and args.host: # Do not allow multiple options + parser.exit(status=1, message='Too many actions requested') + + return args + + +def main(): + '''Set up argument handling and callback routing''' + args = parse_args() + client = NSoTInventory() + + # Callback condition + if args.list_: + print(client.do_list()) + elif args.host: + print(client.do_host(args.host)) + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/nsot.yaml b/scripts/inventory/nsot.yaml new file mode 100644 index 0000000000..ebddbc8234 --- /dev/null +++ b/scripts/inventory/nsot.yaml @@ -0,0 +1,22 @@ +--- +juniper_routers: + query: 'deviceType=ROUTER manufacturer=JUNIPER' + vars: + group: juniper_routers + netconf: true + os: junos + +cisco_asa: + query: 'manufacturer=CISCO deviceType=FIREWALL' + vars: + group: cisco_asa + routed_vpn: false + stateful: true + +old_cisco_asa: + query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+' + vars: + old_nat: true + +not_f10: + query: '-manufacturer=FORCE10' diff --git a/scripts/inventory/openshift.py b/scripts/inventory/openshift.py new file mode 100644 index 0000000000..c0aa4f1b89 --- /dev/null +++ b/scripts/inventory/openshift.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python + +# (c) 2013, Michael Scherer +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +inventory: openshift +short_description: Openshift gears external inventory script +description: + - Generates inventory of Openshift gears using the REST interface + - this permit to reuse playbook to setup an Openshift gear +version_added: None +author: Michael Scherer +''' + +import json +import os +import os.path +import sys +import StringIO + +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves import configparser as ConfigParser + +configparser = None + + +def get_from_rhc_config(variable): + global configparser + CONF_FILE = os.path.expanduser('~/.openshift/express.conf') + if os.path.exists(CONF_FILE): + if not configparser: + ini_str = '[root]\n' + open(CONF_FILE, 'r').read() + configparser = ConfigParser.SafeConfigParser() + configparser.readfp(StringIO.StringIO(ini_str)) + try: + return configparser.get('root', variable) + except ConfigParser.NoOptionError: + return None + + +def get_config(env_var, config_var): + result = os.getenv(env_var) + if not result: + result = get_from_rhc_config(config_var) + if not result: + sys.exit("failed=True msg='missing %s'" % env_var) + return result + + +def get_json_from_api(url, username, password): + headers = {'Accept': 'application/json; version=1.5'} + response = open_url(url, headers=headers, url_username=username, url_password=password) + return json.loads(response.read())['data'] + + +username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin') +password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password') +broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server') + + +response = get_json_from_api(broker_url + '/domains', username, password) + +response = get_json_from_api("%s/domains/%s/applications" % + (broker_url, response[0]['id']), username, password) + +result = {} +for app in response: + + # ssh://520311404832ce3e570000ff@blog-johndoe.example.org + (user, host) = app['ssh_url'][6:].split('@') + app_name = host.split('-')[0] + + result[app_name] = {} + result[app_name]['hosts'] = [] + result[app_name]['hosts'].append(host) + result[app_name]['vars'] = {} + result[app_name]['vars']['ansible_ssh_user'] = user + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({})) +else: + print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/openstack.yml b/scripts/inventory/openstack.yml new file mode 100644 index 0000000000..8053fb8fda --- /dev/null +++ b/scripts/inventory/openstack.yml @@ -0,0 +1,24 @@ +clouds: + vexxhost: + profile: vexxhost + auth: + project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9 + username: fb886a9b-c37b-442a-9be3-964bed961e04 + password: fantastic-password1 + rax: + profile: rackspace + auth: + username: example + password: spectacular-password + project_id: 2352426 + region_name: DFW,ORD,IAD + devstack: + auth: + auth_url: https://devstack.example.com + username: stack + password: stack + project_name: stack +ansible: + use_hostnames: True + expand_hostvars: False + fail_on_errors: True diff --git a/scripts/inventory/openstack_inventory.py b/scripts/inventory/openstack_inventory.py new file mode 100644 index 0000000000..ab2d96cb8b --- /dev/null +++ b/scripts/inventory/openstack_inventory.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python + +# Copyright (c) 2012, Marco Vito Moscaritolo +# Copyright (c) 2013, Jesse Keating +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# Copyright (c) 2016, Rackspace Australia +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +# The OpenStack Inventory module uses os-client-config for configuration. +# https://github.com/openstack/os-client-config +# This means it will either: +# - Respect normal OS_* environment variables like other OpenStack tools +# - Read values from a clouds.yaml file. +# If you want to configure via clouds.yaml, you can put the file in: +# - Current directory +# - ~/.config/openstack/clouds.yaml +# - /etc/openstack/clouds.yaml +# - /etc/ansible/openstack.yml +# The clouds.yaml file can contain entries for multiple clouds and multiple +# regions of those clouds. If it does, this inventory module will by default +# connect to all of them and present them as one contiguous inventory. You +# can limit to one cloud by passing the `--cloud` parameter, or use the +# OS_CLOUD environment variable. If caching is enabled, and a cloud is +# selected, then per-cloud cache folders will be used. +# +# See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server +# fail_on_errors causes the inventory to fail and return no hosts if one cloud +# has failed (for example, bad credentials or being offline). +# When set to False, the inventory will return hosts from +# whichever other clouds it can contact. (Default: True) +# +# Also it is possible to pass the correct user by setting an ansible_user: $myuser +# metadata attribute. + +import argparse +import collections +import os +import sys +import time +from distutils.version import StrictVersion +from io import StringIO + +import json + +import openstack as sdk +from openstack.cloud import inventory as sdk_inventory +from openstack.config import loader as cloud_config + +CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] + + +def get_groups_from_server(server_vars, namegroup=True): + groups = [] + + region = server_vars['region'] + cloud = server_vars['cloud'] + metadata = server_vars.get('metadata', {}) + + # Create a group for the cloud + groups.append(cloud) + + # Create a group on region + if region: + groups.append(region) + + # And one by cloud_region + groups.append("%s_%s" % (cloud, region)) + + # Check if group metadata key in servers' metadata + if 'group' in metadata: + groups.append(metadata['group']) + + for extra_group in metadata.get('groups', '').split(','): + if extra_group: + groups.append(extra_group.strip()) + + groups.append('instance-%s' % server_vars['id']) + if namegroup: + groups.append(server_vars['name']) + + for key in ('flavor', 'image'): + if 'name' in server_vars[key]: + groups.append('%s-%s' % (key, server_vars[key]['name'])) + + for key, value in iter(metadata.items()): + groups.append('meta-%s_%s' % (key, value)) + + az = server_vars.get('az', None) + if az: + # Make groups for az, region_az and cloud_region_az + groups.append(az) + groups.append('%s_%s' % (region, az)) + groups.append('%s_%s_%s' % (cloud, region, az)) + return groups + + +def get_host_groups(inventory, refresh=False, cloud=None): + (cache_file, cache_expiration_time) = get_cache_settings(cloud) + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): + groups = to_json(get_host_groups_from_cloud(inventory)) + with open(cache_file, 'w') as f: + f.write(groups) + else: + with open(cache_file, 'r') as f: + groups = f.read() + return groups + + +def append_hostvars(hostvars, groups, key, server, namegroup=False): + hostvars[key] = dict( + ansible_ssh_host=server['interface_ip'], + ansible_host=server['interface_ip'], + openstack=server) + + metadata = server.get('metadata', {}) + if 'ansible_user' in metadata: + hostvars[key]['ansible_user'] = metadata['ansible_user'] + + for group in get_groups_from_server(server, namegroup=namegroup): + groups[group].append(key) + + +def get_host_groups_from_cloud(inventory): + groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) + hostvars = {} + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + if StrictVersion(sdk.version.__version__) >= StrictVersion("0.13.0"): + list_args['fail_on_cloud_config'] = \ + inventory.extra_config['fail_on_errors'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): + + if 'interface_ip' not in server: + continue + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + server_ids = set() + # Trap for duplicate results + for server in servers: + server_ids.add(server['id']) + if len(server_ids) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + for server in servers: + append_hostvars( + hostvars, groups, server['id'], server, + namegroup=True) + groups['_meta'] = {'hostvars': hostvars} + return groups + + +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): + ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True + if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: + mod_time = os.path.getmtime(cache_file) + current_time = time.time() + if (mod_time + cache_expiration_time) > current_time: + return False + return True + + +def get_cache_settings(cloud=None): + config_files = cloud_config.CONFIG_FILES + CONFIG_FILES + if cloud: + config = cloud_config.OpenStackConfig( + config_files=config_files).get_one(cloud=cloud) + else: + config = cloud_config.OpenStackConfig( + config_files=config_files).get_all()[0] + # For inventory-wide caching + cache_expiration_time = config.get_cache_expiration_time() + cache_path = config.get_cache_path() + if cloud: + cache_path = '{0}_{1}'.format(cache_path, cloud) + if not os.path.exists(cache_path): + os.makedirs(cache_path) + cache_file = os.path.join(cache_path, 'ansible-inventory.cache') + return (cache_file, cache_expiration_time) + + +def to_json(in_dict): + return json.dumps(in_dict, sort_keys=True, indent=2) + + +def parse_args(): + parser = argparse.ArgumentParser(description='OpenStack Inventory Module') + parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'), + help='Cloud name (default: None') + parser.add_argument('--private', + action='store_true', + help='Use private address for ansible host') + parser.add_argument('--refresh', action='store_true', + help='Refresh cached information') + parser.add_argument('--debug', action='store_true', default=False, + help='Enable debug output') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specific host') + + return parser.parse_args() + + +def main(): + args = parse_args() + try: + # openstacksdk library may write to stdout, so redirect this + sys.stdout = StringIO() + config_files = cloud_config.CONFIG_FILES + CONFIG_FILES + sdk.enable_logging(debug=args.debug) + inventory_args = dict( + refresh=args.refresh, + config_files=config_files, + private=args.private, + cloud=args.cloud, + ) + if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + 'fail_on_errors': True, + } + )) + + inventory = sdk_inventory.OpenStackInventory(**inventory_args) + + sys.stdout = sys.__stdout__ + if args.list: + output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud) + elif args.host: + output = to_json(inventory.get_host(args.host)) + print(output) + except sdk.exceptions.OpenStackCloudException as e: + sys.stderr.write('%s\n' % e.message) + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/openvz.py b/scripts/inventory/openvz.py new file mode 100644 index 0000000000..5ea039c827 --- /dev/null +++ b/scripts/inventory/openvz.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# openvz.py +# +# Copyright 2014 jordonr +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Inspired by libvirt_lxc.py inventory script +# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py +# +# Groups are determined by the description field of openvz guests +# multiple groups can be separated by commas: webserver,dbserver + +from subprocess import Popen, PIPE +import sys +import json + + +# List openvz hosts +vzhosts = ['vzhost1', 'vzhost2', 'vzhost3'] +# Add openvz hosts to the inventory and Add "_meta" trick +inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} +# default group, when description not defined +default_group = ['vzguest'] + + +def get_guests(): + # Loop through vzhosts + for h in vzhosts: + # SSH to vzhost and get the list of guests in json + pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True) + + # Load Json info of guests + json_data = json.loads(pipe.stdout.read()) + + # loop through guests + for j in json_data: + # Add information to host vars + inventory['_meta']['hostvars'][j['hostname']] = { + 'ctid': j['ctid'], + 'veid': j['veid'], + 'vpsid': j['vpsid'], + 'private_path': j['private'], + 'root_path': j['root'], + 'ip': j['ip'] + } + + # determine group from guest description + if j['description'] is not None: + groups = j['description'].split(",") + else: + groups = default_group + + # add guest to inventory + for g in groups: + if g not in inventory: + inventory[g] = {'hosts': []} + + inventory[g]['hosts'].append(j['hostname']) + + return inventory + + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + inv_json = get_guests() + print(json.dumps(inv_json, sort_keys=True)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({})) +else: + print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/ovirt.ini b/scripts/inventory/ovirt.ini new file mode 100644 index 0000000000..d9aaf8a73e --- /dev/null +++ b/scripts/inventory/ovirt.ini @@ -0,0 +1,35 @@ +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# Author: Josha Inglis based on the gce.ini by Eric Johnson + +[ovirt] +# For ovirt.py script, which can be used with Python SDK version 3 +# Service Account configuration information can be stored in the +# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already +# exist in your PYTHONPATH and be picked up automatically with an import +# statement in the inventory script. However, you can specify an absolute +# path to the secrets.py file with 'libcloud_secrets' parameter. +ovirt_api_secrets = + +# If you are not going to use a 'secrets.py' file, you can set the necessary +# authorization parameters here. +ovirt_url = +ovirt_username = +ovirt_password = +ovirt_ca_file = diff --git a/scripts/inventory/ovirt.py b/scripts/inventory/ovirt.py new file mode 100644 index 0000000000..f97ab7a24a --- /dev/null +++ b/scripts/inventory/ovirt.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python +# Copyright 2015 IIX Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +ovirt external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests to +oVirt via the ovirt-engine-sdk-python library. + +When run against a specific host, this script returns the following variables +based on the data obtained from the ovirt_sdk Node object: + - ovirt_uuid + - ovirt_id + - ovirt_image + - ovirt_machine_type + - ovirt_ips + - ovirt_name + - ovirt_description + - ovirt_status + - ovirt_zone + - ovirt_tags + - ovirt_stats + +When run in --list mode, instances are grouped by the following categories: + + - zone: + zone group name. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - running status: + group name prefixed with 'status_' (e.g. status_up, status_down,..) + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" + + Use the ovirt inventory script to print out instance specific information + $ contrib/inventory/ovirt.py --host my_instance + +Author: Josha Inglis based on the gce.py by Eric Johnson +Version: 0.0.1 +""" + +USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" +USER_AGENT_VERSION = "v1" + +import sys +import os +import argparse +from collections import defaultdict +from ansible.module_utils.six.moves import configparser as ConfigParser + +import json + +try: + # noinspection PyUnresolvedReferences + from ovirtsdk.api import API + # noinspection PyUnresolvedReferences + from ovirtsdk.xml import params +except ImportError: + print("ovirt inventory script requires ovirt-engine-sdk-python") + sys.exit(1) + + +class OVirtInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.args = self.parse_cli_args() + self.driver = self.get_ovirt_driver() + + # Just display data for specific host + if self.args.host: + print(self.json_format_dict( + self.node_to_dict(self.get_instance(self.args.host)), + pretty=self.args.pretty + )) + sys.exit(0) + + # Otherwise, assume user wants all instances grouped + print( + self.json_format_dict( + data=self.group_instances(), + pretty=self.args.pretty + ) + ) + sys.exit(0) + + @staticmethod + def get_ovirt_driver(): + """ + Determine the ovirt authorization settings and return a ovirt_sdk driver. + + :rtype : ovirtsdk.api.API + """ + kwargs = {} + + ovirt_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") + ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'ovirt_url': '', + 'ovirt_username': '', + 'ovirt_password': '', + 'ovirt_api_secrets': '', + }) + if 'ovirt' not in config.sections(): + config.add_section('ovirt') + config.read(ovirt_ini_path) + + # Attempt to get ovirt params from a configuration file, if one + # exists. + secrets_path = config.get('ovirt', 'ovirt_api_secrets') + secrets_found = False + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + secrets_found = True + except ImportError: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" + print(err) + sys.exit(1) + sys.path.append(os.path.dirname(secrets_path)) + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + except ImportError: + pass + if not secrets_found: + kwargs = { + 'url': config.get('ovirt', 'ovirt_url'), + 'username': config.get('ovirt', 'ovirt_username'), + 'password': config.get('ovirt', 'ovirt_password'), + } + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) + kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) + kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) + + # Retrieve and return the ovirt driver. + return API(insecure=True, **kwargs) + + @staticmethod + def parse_cli_args(): + """ + Command line argument processing + + :rtype : argparse.Namespace + """ + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') + return parser.parse_args() + + def node_to_dict(self, inst): + """ + :type inst: params.VM + """ + if inst is None: + return {} + + inst.get_custom_properties() + ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ + if inst.get_guest_info() is not None else [] + stats = {} + for stat in inst.get_statistics().list(): + stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() + + return { + 'ovirt_uuid': inst.get_id(), + 'ovirt_id': inst.get_id(), + 'ovirt_image': inst.get_os().get_type(), + 'ovirt_machine_type': self.get_machine_type(inst), + 'ovirt_ips': ips, + 'ovirt_name': inst.get_name(), + 'ovirt_description': inst.get_description(), + 'ovirt_status': inst.get_status().get_state(), + 'ovirt_zone': inst.get_cluster().get_id(), + 'ovirt_tags': self.get_tags(inst), + 'ovirt_stats': stats, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': ips[0] if len(ips) > 0 else None + } + + @staticmethod + def get_tags(inst): + """ + :type inst: params.VM + """ + return [x.get_name() for x in inst.get_tags().list()] + + def get_machine_type(self, inst): + inst_type = inst.get_instance_type() + if inst_type: + return self.driver.instancetypes.get(id=inst_type.id).name + + # noinspection PyBroadException,PyUnusedLocal + def get_instance(self, instance_name): + """Gets details about a specific instance """ + try: + return self.driver.vms.get(name=instance_name) + except Exception as e: + return None + + def group_instances(self): + """Group all instances""" + groups = defaultdict(list) + meta = {"hostvars": {}} + + for node in self.driver.vms.list(): + assert isinstance(node, params.VM) + name = node.get_name() + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.get_cluster().get_name() + groups[zone].append(name) + + tags = self.get_tags(node) + for t in tags: + tag = 'tag_%s' % t + groups[tag].append(name) + + nets = [x.get_name() for x in node.get_nics().list()] + for net in nets: + net = 'network_%s' % net + groups[net].append(name) + + status = node.get_status().get_state() + stat = 'status_%s' % status.lower() + if stat in groups: + groups[stat].append(name) + else: + groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + @staticmethod + def json_format_dict(data, pretty=False): + """ Converts a dict to a JSON object and dumps it as a formatted + string """ + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +OVirtInventory() diff --git a/scripts/inventory/ovirt4.py b/scripts/inventory/ovirt4.py new file mode 100644 index 0000000000..74205ae449 --- /dev/null +++ b/scripts/inventory/ovirt4.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +""" +oVirt dynamic inventory script +================================= + +Generates dynamic inventory file for oVirt. + +Script will return following attributes for each virtual machine: + - id + - name + - host + - cluster + - status + - description + - fqdn + - os_type + - template + - tags + - statistics + - devices + +When run in --list mode, virtual machines are grouped by the following categories: + - cluster + - tag + - status + + Note: If there is some virtual machine which has has more tags it will be in both tag + records. + +Examples: + # Execute update of system on webserver virtual machine: + + $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest" + + # Get webserver virtual machine information: + + $ contrib/inventory/ovirt4.py --host webserver + +Author: Ondra Machacek (@machacekondra) +""" + +import argparse +import os +import sys + +from collections import defaultdict + +from ansible.module_utils.six.moves import configparser + +import json + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes +except ImportError: + print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0') + sys.exit(1) + + +def parse_args(): + """ + Create command line parser for oVirt dynamic inventory script. + """ + parser = argparse.ArgumentParser( + description='Ansible dynamic inventory script for oVirt.', + ) + parser.add_argument( + '--list', + action='store_true', + default=True, + help='Get data of all virtual machines (default: True).', + ) + parser.add_argument( + '--host', + help='Get data of virtual machines running on specified host.', + ) + parser.add_argument( + '--pretty', + action='store_true', + default=False, + help='Pretty format (default: False).', + ) + return parser.parse_args() + + +def create_connection(): + """ + Create a connection to oVirt engine API. + """ + # Get the path of the configuration file, by default use + # 'ovirt.ini' file in script directory: + default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + 'ovirt.ini', + ) + config_path = os.environ.get('OVIRT_INI_PATH', default_path) + + # Create parser and add ovirt section if it doesn't exist: + config = configparser.SafeConfigParser( + defaults={ + 'ovirt_url': os.environ.get('OVIRT_URL'), + 'ovirt_username': os.environ.get('OVIRT_USERNAME'), + 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), + 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), + } + ) + if not config.has_section('ovirt'): + config.add_section('ovirt') + config.read(config_path) + + # Create a connection with options defined in ini file: + return sdk.Connection( + url=config.get('ovirt', 'ovirt_url'), + username=config.get('ovirt', 'ovirt_username'), + password=config.get('ovirt', 'ovirt_password', raw=True), + ca_file=config.get('ovirt', 'ovirt_ca_file') or None, + insecure=not config.get('ovirt', 'ovirt_ca_file'), + ) + + +def get_dict_of_struct(connection, vm): + """ + Transform SDK Vm Struct type to Python dictionary. + """ + if vm is None: + return dict() + + vms_service = connection.system_service().vms_service() + clusters_service = connection.system_service().clusters_service() + vm_service = vms_service.vm_service(vm.id) + devices = vm_service.reported_devices_service().list() + tags = vm_service.tags_service().list() + stats = vm_service.statistics_service().list() + labels = vm_service.affinity_labels_service().list() + groups = clusters_service.cluster_service( + vm.cluster.id + ).affinity_groups_service().list() + + return { + 'id': vm.id, + 'name': vm.name, + 'host': connection.follow_link(vm.host).name if vm.host else None, + 'cluster': connection.follow_link(vm.cluster).name, + 'status': str(vm.status), + 'description': vm.description, + 'fqdn': vm.fqdn, + 'os_type': vm.os.type, + 'template': connection.follow_link(vm.template).name, + 'tags': [tag.name for tag in tags], + 'affinity_labels': [label.name for label in labels], + 'affinity_groups': [ + group.name for group in groups + if vm.name in [vm.name for vm in connection.follow_link(group.vms)] + ], + 'statistics': dict( + (stat.name, stat.values[0].datum) for stat in stats if stat.values + ), + 'devices': dict( + (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips + ), + 'ansible_host': next((device.ips[0].address for device in devices if device.ips), None) + } + + +def get_data(connection, vm_name=None): + """ + Obtain data of `vm_name` if specified, otherwise obtain data of all vms. + """ + vms_service = connection.system_service().vms_service() + clusters_service = connection.system_service().clusters_service() + + if vm_name: + vm = vms_service.list(search='name=%s' % vm_name) or [None] + data = get_dict_of_struct( + connection=connection, + vm=vm[0], + ) + else: + vms = dict() + data = defaultdict(list) + for vm in vms_service.list(): + name = vm.name + vm_service = vms_service.vm_service(vm.id) + cluster_service = clusters_service.cluster_service(vm.cluster.id) + + # Add vm to vms dict: + vms[name] = get_dict_of_struct(connection, vm) + + # Add vm to cluster group: + cluster_name = connection.follow_link(vm.cluster).name + data['cluster_%s' % cluster_name].append(name) + + # Add vm to tag group: + tags_service = vm_service.tags_service() + for tag in tags_service.list(): + data['tag_%s' % tag.name].append(name) + + # Add vm to status group: + data['status_%s' % vm.status].append(name) + + # Add vm to affinity group: + for group in cluster_service.affinity_groups_service().list(): + if vm.name in [ + v.name for v in connection.follow_link(group.vms) + ]: + data['affinity_group_%s' % group.name].append(vm.name) + + # Add vm to affinity label group: + affinity_labels_service = vm_service.affinity_labels_service() + for label in affinity_labels_service.list(): + data['affinity_label_%s' % label.name].append(name) + + data["_meta"] = { + 'hostvars': vms, + } + + return data + + +def main(): + args = parse_args() + connection = create_connection() + + print( + json.dumps( + obj=get_data( + connection=connection, + vm_name=args.host, + ), + sort_keys=args.pretty, + indent=args.pretty * 2, + ) + ) + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/packet_net.ini b/scripts/inventory/packet_net.ini new file mode 100644 index 0000000000..6dcc027b15 --- /dev/null +++ b/scripts/inventory/packet_net.ini @@ -0,0 +1,53 @@ +# Ansible Packet.net external inventory script settings +# + +[packet] + +# Packet projects to get info for. Set this to 'all' to get info for all +# projects in Packet and merge the results together. Alternatively, set +# this to a comma separated list of projects. E.g. 'project-1,project-3,project-4' +projects = all +projects_exclude = + +# By default, packet devices in all state are returned. Specify +# packet device states to return as a comma-separated list. +# device_states = active, inactive, queued, provisioning + +# items per page to retrieve from packet api at a time +items_per_page = 999 + +# API calls to Packet are costly. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-packet.cache +# - ansible-packet.index +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 300 + +# Organize groups into a nested/hierarchy instead of a flat namespace. +nested_groups = False + +# Replace - tags when creating groups to avoid issues with ansible +replace_dash_in_groups = True + +# The packet inventory output can become very large. To manage its size, +# configure which groups should be created. +group_by_device_id = True +group_by_hostname = True +group_by_facility = True +group_by_project = True +group_by_operating_system = True +group_by_plan_type = True +group_by_tags = True +group_by_tag_none = True + +# If you only want to include hosts that match a certain regular expression +# pattern_include = staging-* + +# If you want to exclude any hosts that match a certain regular expression +# pattern_exclude = staging-* + diff --git a/scripts/inventory/packet_net.py b/scripts/inventory/packet_net.py new file mode 100644 index 0000000000..22f989a9d9 --- /dev/null +++ b/scripts/inventory/packet_net.py @@ -0,0 +1,506 @@ +#!/usr/bin/env python + +''' +Packet.net external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +Packet.net using the Packet library. + +NOTE: This script assumes Ansible is being executed where the environment +variable needed for Packet API Token already been set: + export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs + +This script also assumes there is a packet_net.ini file alongside it. To specify a +different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable: + + export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini + +''' + +# (c) 2016, Peter Sankauskas +# (c) 2017, Tomas Karasek +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import sys +import os +import argparse +import re +from time import time + +from ansible.module_utils import six +from ansible.module_utils.six.moves import configparser + +try: + import packet +except ImportError as e: + sys.exit("failed=True msg='`packet-python` library required for this script'") + +import traceback + + +import json + + +ini_section = 'packet' + + +class PacketInventory(object): + + def _empty_inventory(self): + return {"_meta": {"hostvars": {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by device IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + # Index of hostname (address) to device ID + self.index = {} + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.read_settings() + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of devices for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + def read_settings(self): + ''' Reads the settings from the packet_net.ini file ''' + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + + _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH') + + if _ini_path_raw: + packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw)) + else: + packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini') + config.read(packet_ini_path) + + # items per page + self.items_per_page = 999 + if config.has_option(ini_section, 'items_per_page'): + config.get(ini_section, 'items_per_page') + + # Instance states to be gathered in inventory. Default is all of them. + packet_valid_device_states = [ + 'active', + 'inactive', + 'queued', + 'provisioning' + ] + self.packet_device_states = [] + if config.has_option(ini_section, 'device_states'): + for device_state in config.get(ini_section, 'device_states').split(','): + device_state = device_state.strip() + if device_state not in packet_valid_device_states: + continue + self.packet_device_states.append(device_state) + else: + self.packet_device_states = packet_valid_device_states + + # Cache related + cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path')) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + self.cache_path_cache = cache_dir + "/ansible-packet.cache" + self.cache_path_index = cache_dir + "/ansible-packet.index" + self.cache_max_age = config.getint(ini_section, 'cache_max_age') + + # Configure nested groups instead of flat namespace. + if config.has_option(ini_section, 'nested_groups'): + self.nested_groups = config.getboolean(ini_section, 'nested_groups') + else: + self.nested_groups = False + + # Replace dash or not in group names + if config.has_option(ini_section, 'replace_dash_in_groups'): + self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups') + else: + self.replace_dash_in_groups = True + + # Configure which groups should be created. + group_by_options = [ + 'group_by_device_id', + 'group_by_hostname', + 'group_by_facility', + 'group_by_project', + 'group_by_operating_system', + 'group_by_plan_type', + 'group_by_tags', + 'group_by_tag_none', + ] + for option in group_by_options: + if config.has_option(ini_section, option): + setattr(self, option, config.getboolean(ini_section, option)) + else: + setattr(self, option, True) + + # Do we need to just include hosts that match a pattern? + try: + pattern_include = config.get(ini_section, 'pattern_include') + if pattern_include and len(pattern_include) > 0: + self.pattern_include = re.compile(pattern_include) + else: + self.pattern_include = None + except configparser.NoOptionError: + self.pattern_include = None + + # Do we need to exclude hosts that match a pattern? + try: + pattern_exclude = config.get(ini_section, 'pattern_exclude') + if pattern_exclude and len(pattern_exclude) > 0: + self.pattern_exclude = re.compile(pattern_exclude) + else: + self.pattern_exclude = None + except configparser.NoOptionError: + self.pattern_exclude = None + + # Projects + self.projects = [] + configProjects = config.get(ini_section, 'projects') + configProjects_exclude = config.get(ini_section, 'projects_exclude') + if (configProjects == 'all'): + for projectInfo in self.get_projects(): + if projectInfo.name not in configProjects_exclude: + self.projects.append(projectInfo.name) + else: + self.projects = configProjects.split(",") + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet') + parser.add_argument('--list', action='store_true', default=True, + help='List Devices (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific device') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to Packet (default: False - use cache files)') + self.args = parser.parse_args() + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + for projectInfo in self.get_projects(): + if projectInfo.name in self.projects: + self.get_devices_by_project(projectInfo) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def connect(self): + ''' create connection to api server''' + token = os.environ.get('PACKET_API_TOKEN') + if token is None: + raise Exception("Error reading token from environment (PACKET_API_TOKEN)!") + manager = packet.Manager(auth_token=token) + return manager + + def get_projects(self): + '''Makes a Packet API call to get the list of projects''' + + params = { + 'per_page': self.items_per_page + } + + try: + manager = self.connect() + projects = manager.list_projects(params=params) + return projects + except Exception as e: + traceback.print_exc() + self.fail_with_error(e, 'getting Packet projects') + + def get_devices_by_project(self, project): + ''' Makes an Packet API call to the list of devices in a particular + project ''' + + params = { + 'per_page': self.items_per_page + } + + try: + manager = self.connect() + devices = manager.list_devices(project_id=project.id, params=params) + + for device in devices: + self.add_device(device, project) + + except Exception as e: + traceback.print_exc() + self.fail_with_error(e, 'getting Packet devices') + + def fail_with_error(self, err_msg, err_operation=None): + '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def get_device(self, device_id): + manager = self.connect() + + device = manager.get_device(device_id) + return device + + def add_device(self, device, project): + ''' Adds a device to the inventory and index, as long as it is + addressable ''' + + # Only return devices with desired device states + if device.state not in self.packet_device_states: + return + + # Select the best destination address. Only include management + # addresses as non-management (elastic) addresses need manual + # host configuration to be routable. + # See https://help.packet.net/article/54-elastic-ips. + dest = None + for ip_address in device.ip_addresses: + if ip_address['public'] is True and \ + ip_address['address_family'] == 4 and \ + ip_address['management'] is True: + dest = ip_address['address'] + + if not dest: + # Skip devices we cannot address (e.g. private VPC subnet) + return + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(device.hostname): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(device.hostname): + return + + # Add to index + self.index[dest] = [project.id, device.id] + + # Inventory: Group by device ID (always a group of 1) + if self.group_by_device_id: + self.inventory[device.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'devices', device.id) + + # Inventory: Group by device name (hopefully a group of 1) + if self.group_by_hostname: + self.push(self.inventory, device.hostname, dest) + if self.nested_groups: + self.push_group(self.inventory, 'hostnames', project.name) + + # Inventory: Group by project + if self.group_by_project: + self.push(self.inventory, project.name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'projects', project.name) + + # Inventory: Group by facility + if self.group_by_facility: + self.push(self.inventory, device.facility['code'], dest) + if self.nested_groups: + if self.group_by_facility: + self.push_group(self.inventory, project.name, device.facility['code']) + + # Inventory: Group by OS + if self.group_by_operating_system: + self.push(self.inventory, device.operating_system['slug'], dest) + if self.nested_groups: + self.push_group(self.inventory, 'operating_systems', device.operating_system['slug']) + + # Inventory: Group by plan type + if self.group_by_plan_type: + self.push(self.inventory, device.plan['slug'], dest) + if self.nested_groups: + self.push_group(self.inventory, 'plans', device.plan['slug']) + + # Inventory: Group by tag keys + if self.group_by_tags: + for k in device.tags: + key = self.to_safe("tag_" + k) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + + # Global Tag: devices without tags + if self.group_by_tag_none and len(device.tags) == 0: + self.push(self.inventory, 'tag_none', dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: tag all Packet devices + self.push(self.inventory, 'packet', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device) + + def get_host_info_dict_from_device(self, device): + device_vars = {} + for key in vars(device): + value = getattr(device, key) + key = self.to_safe('packet_' + key) + + # Handle complex types + if key == 'packet_state': + device_vars[key] = device.state or '' + elif key == 'packet_hostname': + device_vars[key] = value + elif isinstance(value, (int, bool)): + device_vars[key] = value + elif isinstance(value, six.string_types): + device_vars[key] = value.strip() + elif value is None: + device_vars[key] = '' + elif key == 'packet_facility': + device_vars[key] = value['code'] + elif key == 'packet_operating_system': + device_vars[key] = value['slug'] + elif key == 'packet_plan': + device_vars[key] = value['slug'] + elif key == 'packet_tags': + for k in value: + key = self.to_safe('packet_tag_' + k) + device_vars[key] = k + else: + pass + # print key + # print type(value) + # print value + + return device_vars + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if self.args.host not in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if self.args.host not in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (project_id, device_id) = self.index[self.args.host] + + device = self.get_device(device_id) + return self.json_format_dict(self.get_host_info_dict_from_device(device), True) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def push_group(self, my_dict, key, element): + ''' Push a group as a child of another group. ''' + parent_group = my_dict.setdefault(key, {}) + if not isinstance(parent_group, dict): + parent_group = my_dict[key] = {'hosts': parent_group} + child_groups = parent_group.setdefault('children', []) + if element not in child_groups: + child_groups.append(element) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = r"[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += r"\-" + return re.sub(regex + "]", "_", word) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +PacketInventory() diff --git a/scripts/inventory/proxmox.py b/scripts/inventory/proxmox.py new file mode 100644 index 0000000000..0538ca8a9b --- /dev/null +++ b/scripts/inventory/proxmox.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python + +# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Updated 2016 by Matt Harris +# +# Added support for Proxmox VE 4.x +# Added support for using the Notes field of a VM to define groups and variables: +# A well-formatted JSON object in the Notes field will be added to the _meta +# section for that VM. In addition, the "groups" key of this JSON object may be +# used to specify group membership: +# +# { "groups": ["utility", "databases"], "a": false, "b": true } + +import json +import os +import sys +from optparse import OptionParser + +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves.urllib.parse import urlencode + +from ansible.module_utils.urls import open_url + + +class ProxmoxNodeList(list): + def get_names(self): + return [node['node'] for node in self] + + +class ProxmoxVM(dict): + def get_variables(self): + variables = {} + for key, value in iteritems(self): + variables['proxmox_' + key] = value + return variables + + +class ProxmoxVMList(list): + def __init__(self, data=None): + data = [] if data is None else data + + for item in data: + self.append(ProxmoxVM(item)) + + def get_names(self): + return [vm['name'] for vm in self if vm['template'] != 1] + + def get_by_name(self, name): + results = [vm for vm in self if vm['name'] == name] + return results[0] if len(results) > 0 else None + + def get_variables(self): + variables = {} + for vm in self: + variables[vm['name']] = vm.get_variables() + + return variables + + +class ProxmoxPoolList(list): + def get_names(self): + return [pool['poolid'] for pool in self] + + +class ProxmoxPool(dict): + def get_members_name(self): + return [member['name'] for member in self['members'] if member['template'] != 1] + + +class ProxmoxAPI(object): + def __init__(self, options): + self.options = options + self.credentials = None + + if not options.url: + raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') + elif not options.username: + raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') + elif not options.password: + raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') + + def auth(self): + request_path = '{0}api2/json/access/ticket'.format(self.options.url) + + request_params = urlencode({ + 'username': self.options.username, + 'password': self.options.password, + }) + + data = json.load(open_url(request_path, data=request_params)) + + self.credentials = { + 'ticket': data['data']['ticket'], + 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], + } + + def get(self, url, data=None): + request_path = '{0}{1}'.format(self.options.url, url) + + headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])} + request = open_url(request_path, data=data, headers=headers) + + response = json.load(request) + return response['data'] + + def nodes(self): + return ProxmoxNodeList(self.get('api2/json/nodes')) + + def vms_by_type(self, node, type): + return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type))) + + def vm_description_by_type(self, node, vm, type): + return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm)) + + def node_qemu(self, node): + return self.vms_by_type(node, 'qemu') + + def node_qemu_description(self, node, vm): + return self.vm_description_by_type(node, vm, 'qemu') + + def node_lxc(self, node): + return self.vms_by_type(node, 'lxc') + + def node_lxc_description(self, node, vm): + return self.vm_description_by_type(node, vm, 'lxc') + + def pools(self): + return ProxmoxPoolList(self.get('api2/json/pools')) + + def pool(self, poolid): + return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid))) + + +def main_list(options): + results = { + 'all': { + 'hosts': [], + }, + '_meta': { + 'hostvars': {}, + } + } + + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + for node in proxmox_api.nodes().get_names(): + qemu_list = proxmox_api.node_qemu(node) + results['all']['hosts'] += qemu_list.get_names() + results['_meta']['hostvars'].update(qemu_list.get_variables()) + lxc_list = proxmox_api.node_lxc(node) + results['all']['hosts'] += lxc_list.get_names() + results['_meta']['hostvars'].update(lxc_list.get_variables()) + + for vm in results['_meta']['hostvars']: + vmid = results['_meta']['hostvars'][vm]['proxmox_vmid'] + try: + type = results['_meta']['hostvars'][vm]['proxmox_type'] + except KeyError: + type = 'qemu' + try: + description = proxmox_api.vm_description_by_type(node, vmid, type)['description'] + except KeyError: + description = None + + try: + metadata = json.loads(description) + except TypeError: + metadata = {} + except ValueError: + metadata = { + 'notes': description + } + + if 'groups' in metadata: + # print metadata + for group in metadata['groups']: + if group not in results: + results[group] = { + 'hosts': [] + } + results[group]['hosts'] += [vm] + + results['_meta']['hostvars'][vm].update(metadata) + + # pools + for pool in proxmox_api.pools().get_names(): + results[pool] = { + 'hosts': proxmox_api.pool(pool).get_members_name(), + } + + return results + + +def main_host(options): + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + for node in proxmox_api.nodes().get_names(): + qemu_list = proxmox_api.node_qemu(node) + qemu = qemu_list.get_by_name(options.host) + if qemu: + return qemu.get_variables() + + return {} + + +def main(): + parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') + parser.add_option('--list', action="store_true", default=False, dest="list") + parser.add_option('--host', dest="host") + parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') + parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') + parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') + parser.add_option('--pretty', action="store_true", default=False, dest='pretty') + (options, args) = parser.parse_args() + + if options.list: + data = main_list(options) + elif options.host: + data = main_host(options) + else: + parser.print_help() + sys.exit(1) + + indent = None + if options.pretty: + indent = 2 + + print(json.dumps(data, indent=indent)) + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/rackhd.py b/scripts/inventory/rackhd.py new file mode 100644 index 0000000000..e7a5cca5f5 --- /dev/null +++ b/scripts/inventory/rackhd.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import json +import os +import requests +import argparse + +RACKHD_URL = 'http://localhost:8080' + + +class RackhdInventory(object): + def __init__(self, nodeids): + self._inventory = {} + for nodeid in nodeids: + self._load_inventory_data(nodeid) + inventory = {} + for (nodeid, info) in self._inventory.items(): + inventory[nodeid] = (self._format_output(nodeid, info)) + print(json.dumps(inventory)) + + def _load_inventory_data(self, nodeid): + info = {} + info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid) + info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid) + + results = {} + for (key, url) in info.items(): + r = requests.get(url, verify=False) + results[key] = r.text + self._inventory[nodeid] = results + + def _format_output(self, nodeid, info): + try: + node_info = json.loads(info['lookup']) + ipaddress = '' + if len(node_info) > 0: + ipaddress = node_info[0]['ipAddress'] + output = {'hosts': [ipaddress], 'vars': {}} + for (key, result) in info.items(): + output['vars'][key] = json.loads(result) + output['vars']['ansible_ssh_user'] = 'monorail' + except KeyError: + pass + return output + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--host') + parser.add_argument('--list', action='store_true') + return parser.parse_args() + + +try: + # check if rackhd url(ie:10.1.1.45:8080) is specified in the environment + RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL']) +except Exception: + # use default values + pass + +# Use the nodeid specified in the environment to limit the data returned +# or return data for all available nodes +nodeids = [] + +if (parse_args().host): + try: + nodeids += parse_args().host.split(',') + RackhdInventory(nodeids) + except Exception: + pass +if (parse_args().list): + try: + url = RACKHD_URL + '/api/common/nodes' + r = requests.get(url, verify=False) + data = json.loads(r.text) + for entry in data: + if entry['type'] == 'compute': + nodeids.append(entry['id']) + RackhdInventory(nodeids) + except Exception: + pass diff --git a/scripts/inventory/rax.ini b/scripts/inventory/rax.ini new file mode 100644 index 0000000000..15948e7b2e --- /dev/null +++ b/scripts/inventory/rax.ini @@ -0,0 +1,66 @@ +# Ansible Rackspace external inventory script settings +# + +[rax] + +# Environment Variable: RAX_CREDS_FILE +# +# An optional configuration that points to a pyrax-compatible credentials +# file. +# +# If not supplied, rax.py will look for a credentials file +# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, +# and therefore requires a file formatted per the SDK's specifications. +# +# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md +# creds_file = ~/.rackspace_cloud_credentials + +# Environment Variable: RAX_REGION +# +# An optional environment variable to narrow inventory search +# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace +# datacenter) and optionally accepts a comma-separated list. +# regions = IAD,ORD,DFW + +# Environment Variable: RAX_ENV +# +# A configuration that will use an environment as configured in +# ~/.pyrax.cfg, see +# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md +# env = prod + +# Environment Variable: RAX_META_PREFIX +# Default: meta +# +# A configuration that changes the prefix used for meta key/value groups. +# For compatibility with ec2.py set to "tag" +# meta_prefix = meta + +# Environment Variable: RAX_ACCESS_NETWORK +# Default: public +# +# A configuration that will tell the inventory script to use a specific +# server network to determine the ansible_ssh_host value. If no address +# is found, ansible_ssh_host will not be set. Accepts a comma-separated +# list of network names, the first found wins. +# access_network = public + +# Environment Variable: RAX_ACCESS_IP_VERSION +# Default: 4 +# +# A configuration related to "access_network" that will attempt to +# determine the ansible_ssh_host value for either IPv4 or IPv6. If no +# address is found, ansible_ssh_host will not be set. +# Acceptable values are: 4 or 6. Values other than 4 or 6 +# will be ignored, and 4 will be used. Accepts a comma separated list, +# the first found wins. +# access_ip_version = 4 + +# Environment Variable: RAX_CACHE_MAX_AGE +# Default: 600 +# +# A configuration the changes the behavior or the inventory cache. +# Inventory listing performed before this value will be returned from +# the cache instead of making a full request for all inventory. Setting +# this value to 0 will force a full request. +# cache_max_age = 600 \ No newline at end of file diff --git a/scripts/inventory/rax.py b/scripts/inventory/rax.py new file mode 100644 index 0000000000..c6d512cd12 --- /dev/null +++ b/scripts/inventory/rax.py @@ -0,0 +1,470 @@ +#!/usr/bin/env python + +# (c) 2013, Jesse Keating , +# Matt Martz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +Rackspace Cloud Inventory + +Authors: + Jesse Keating , + Matt Martz + + +Description: + Generates inventory that Ansible can understand by making API request to + Rackspace Public Cloud API + + When run against a specific host, this script returns variables similar to: + rax_os-ext-sts_task_state + rax_addresses + rax_links + rax_image + rax_os-ext-sts_vm_state + rax_flavor + rax_id + rax_rax-bandwidth_bandwidth + rax_user_id + rax_os-dcf_diskconfig + rax_accessipv4 + rax_accessipv6 + rax_progress + rax_os-ext-sts_power_state + rax_metadata + rax_status + rax_updated + rax_hostid + rax_name + rax_created + rax_tenant_id + rax_loaded + +Configuration: + rax.py can be configured using a rax.ini file or via environment + variables. The rax.ini file should live in the same directory along side + this script. + + The section header for configuration values related to this + inventory plugin is [rax] + + [rax] + creds_file = ~/.rackspace_cloud_credentials + regions = IAD,ORD,DFW + env = prod + meta_prefix = meta + access_network = public + access_ip_version = 4 + + Each of these configurations also has a corresponding environment variable. + An environment variable will override a configuration file value. + + creds_file: + Environment Variable: RAX_CREDS_FILE + + An optional configuration that points to a pyrax-compatible credentials + file. + + If not supplied, rax.py will look for a credentials file + at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, + and therefore requires a file formatted per the SDK's specifications. + + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + regions: + Environment Variable: RAX_REGION + + An optional environment variable to narrow inventory search + scope. If used, needs a value like ORD, DFW, SYD (a Rackspace + datacenter) and optionally accepts a comma-separated list. + + environment: + Environment Variable: RAX_ENV + + A configuration that will use an environment as configured in + ~/.pyrax.cfg, see + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + meta_prefix: + Environment Variable: RAX_META_PREFIX + Default: meta + + A configuration that changes the prefix used for meta key/value groups. + For compatibility with ec2.py set to "tag" + + access_network: + Environment Variable: RAX_ACCESS_NETWORK + Default: public + + A configuration that will tell the inventory script to use a specific + server network to determine the ansible_ssh_host value. If no address + is found, ansible_ssh_host will not be set. Accepts a comma-separated + list of network names, the first found wins. + + access_ip_version: + Environment Variable: RAX_ACCESS_IP_VERSION + Default: 4 + + A configuration related to "access_network" that will attempt to + determine the ansible_ssh_host value for either IPv4 or IPv6. If no + address is found, ansible_ssh_host will not be set. + Acceptable values are: 4 or 6. Values other than 4 or 6 + will be ignored, and 4 will be used. Accepts a comma-separated list, + the first found wins. + +Examples: + List server instances + $ RAX_CREDS_FILE=~/.raxpub rax.py --list + + List servers in ORD datacenter only + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list + + List servers in ORD and DFW datacenters + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list + + Get server details for server named "server.example.com" + $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com + + Use the instance private IP to connect (instead of public IP) + $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list +""" + +import os +import re +import sys +import argparse +import warnings +import collections + +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import configparser as ConfigParser + +import json + +try: + import pyrax + from pyrax.utils import slugify +except ImportError: + sys.exit('pyrax is required for this module') + +from time import time + +from ansible.constants import get_config +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils.six import text_type + +NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None)) + + +def load_config_file(): + p = ConfigParser.ConfigParser() + config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'rax.ini') + try: + p.read(config_file) + except ConfigParser.Error: + return None + else: + return p + + +def rax_slugify(value): + return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) + + +def to_dict(obj): + instance = {} + for key in dir(obj): + value = getattr(obj, key) + if isinstance(value, NON_CALLABLES) and not key.startswith('_'): + key = rax_slugify(key) + instance[key] = value + + return instance + + +def host(regions, hostname): + hostvars = {} + + for region in regions: + # Connect to the region + cs = pyrax.connect_to_cloudservers(region=region) + for server in cs.servers.list(): + if server.name == hostname: + for key, value in to_dict(server).items(): + hostvars[key] = value + + # And finally, add an IP address + hostvars['ansible_ssh_host'] = server.accessIPv4 + print(json.dumps(hostvars, sort_keys=True, indent=4)) + + +def _list_into_cache(regions): + groups = collections.defaultdict(list) + hostvars = collections.defaultdict(dict) + images = {} + cbs_attachments = collections.defaultdict(dict) + + prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') + + try: + # Ansible 2.3+ + networks = get_config(p, 'rax', 'access_network', + 'RAX_ACCESS_NETWORK', 'public', value_type='list') + except TypeError: + # Ansible 2.2.x and below + # pylint: disable=unexpected-keyword-arg + networks = get_config(p, 'rax', 'access_network', + 'RAX_ACCESS_NETWORK', 'public', islist=True) + try: + try: + # Ansible 2.3+ + ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, value_type='list')) + except TypeError: + # Ansible 2.2.x and below + # pylint: disable=unexpected-keyword-arg + ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, islist=True)) + except Exception: + ip_versions = [4] + else: + ip_versions = [v for v in ip_versions if v in [4, 6]] + if not ip_versions: + ip_versions = [4] + + # Go through all the regions looking for servers + for region in regions: + # Connect to the region + cs = pyrax.connect_to_cloudservers(region=region) + if cs is None: + warnings.warn( + 'Connecting to Rackspace region "%s" has caused Pyrax to ' + 'return None. Is this a valid region?' % region, + RuntimeWarning) + continue + for server in cs.servers.list(): + # Create a group on region + groups[region].append(server.name) + + # Check if group metadata key in servers' metadata + group = server.metadata.get('group') + if group: + groups[group].append(server.name) + + for extra_group in server.metadata.get('groups', '').split(','): + if extra_group: + groups[extra_group].append(server.name) + + # Add host metadata + for key, value in to_dict(server).items(): + hostvars[server.name][key] = value + + hostvars[server.name]['rax_region'] = region + + for key, value in iteritems(server.metadata): + groups['%s_%s_%s' % (prefix, key, value)].append(server.name) + + groups['instance-%s' % server.id].append(server.name) + groups['flavor-%s' % server.flavor['id']].append(server.name) + + # Handle boot from volume + if not server.image: + if not cbs_attachments[region]: + cbs = pyrax.connect_to_cloud_blockstorage(region) + for vol in cbs.list(): + if boolean(vol.bootable, strict=False): + for attachment in vol.attachments: + metadata = vol.volume_image_metadata + server_id = attachment['server_id'] + cbs_attachments[region][server_id] = { + 'id': metadata['image_id'], + 'name': slugify(metadata['image_name']) + } + image = cbs_attachments[region].get(server.id) + if image: + server.image = {'id': image['id']} + hostvars[server.name]['rax_image'] = server.image + hostvars[server.name]['rax_boot_source'] = 'volume' + images[image['id']] = image['name'] + else: + hostvars[server.name]['rax_boot_source'] = 'local' + + try: + imagegroup = 'image-%s' % images[server.image['id']] + groups[imagegroup].append(server.name) + groups['image-%s' % server.image['id']].append(server.name) + except KeyError: + try: + image = cs.images.get(server.image['id']) + except cs.exceptions.NotFound: + groups['image-%s' % server.image['id']].append(server.name) + else: + images[image.id] = image.human_id + groups['image-%s' % image.human_id].append(server.name) + groups['image-%s' % server.image['id']].append(server.name) + + # And finally, add an IP address + ansible_ssh_host = None + # use accessIPv[46] instead of looping address for 'public' + for network_name in networks: + if ansible_ssh_host: + break + if network_name == 'public': + for version_name in ip_versions: + if ansible_ssh_host: + break + if version_name == 6 and server.accessIPv6: + ansible_ssh_host = server.accessIPv6 + elif server.accessIPv4: + ansible_ssh_host = server.accessIPv4 + if not ansible_ssh_host: + addresses = server.addresses.get(network_name, []) + for address in addresses: + for version_name in ip_versions: + if ansible_ssh_host: + break + if address.get('version') == version_name: + ansible_ssh_host = address.get('addr') + break + if ansible_ssh_host: + hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host + + if hostvars: + groups['_meta'] = {'hostvars': hostvars} + + with open(get_cache_file_path(regions), 'w') as cache_file: + json.dump(groups, cache_file) + + +def get_cache_file_path(regions): + regions_str = '.'.join([reg.strip().lower() for reg in regions]) + ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp') + if not os.path.exists(ansible_tmp_path): + os.makedirs(ansible_tmp_path) + return os.path.join(ansible_tmp_path, + 'ansible-rax-%s-%s.cache' % ( + pyrax.identity.username, regions_str)) + + +def _list(regions, refresh_cache=True): + cache_max_age = int(get_config(p, 'rax', 'cache_max_age', + 'RAX_CACHE_MAX_AGE', 600)) + + if (not os.path.exists(get_cache_file_path(regions)) or + refresh_cache or + (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): + # Cache file doesn't exist or older than 10m or refresh cache requested + _list_into_cache(regions) + + with open(get_cache_file_path(regions), 'r') as cache_file: + groups = json.load(cache_file) + print(json.dumps(groups, sort_keys=True, indent=4)) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud ' + 'inventory module') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specific host') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help=('Force refresh of cache, making API requests to' + 'RackSpace (default: False - use cache files)')) + return parser.parse_args() + + +def setup(): + default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') + + env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) + if env: + pyrax.set_environment(env) + + keyring_username = pyrax.get_setting('keyring_username') + + # Attempt to grab credentials from environment first + creds_file = get_config(p, 'rax', 'creds_file', + 'RAX_CREDS_FILE', None) + if creds_file is not None: + creds_file = os.path.expanduser(creds_file) + else: + # But if that fails, use the default location of + # ~/.rackspace_cloud_credentials + if os.path.isfile(default_creds_file): + creds_file = default_creds_file + elif not keyring_username: + sys.exit('No value in environment variable %s and/or no ' + 'credentials file at %s' + % ('RAX_CREDS_FILE', default_creds_file)) + + identity_type = pyrax.get_setting('identity_type') + pyrax.set_setting('identity_type', identity_type or 'rackspace') + + region = pyrax.get_setting('region') + + try: + if keyring_username: + pyrax.keyring_auth(keyring_username, region=region) + else: + pyrax.set_credential_file(creds_file, region=region) + except Exception as e: + sys.exit("%s: %s" % (e, e.message)) + + regions = [] + if region: + regions.append(region) + else: + try: + # Ansible 2.3+ + region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', + value_type='list') + except TypeError: + # Ansible 2.2.x and below + # pylint: disable=unexpected-keyword-arg + region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', + islist=True) + + for region in region_list: + region = region.strip().upper() + if region == 'ALL': + regions = pyrax.regions + break + elif region not in pyrax.regions: + sys.exit('Unsupported region %s' % region) + elif region not in regions: + regions.append(region) + + return regions + + +def main(): + args = parse_args() + regions = setup() + if args.list: + _list(regions, refresh_cache=args.refresh_cache) + elif args.host: + host(regions, args.host) + sys.exit(0) + + +p = load_config_file() +if __name__ == '__main__': + main() diff --git a/scripts/inventory/rhv.py b/scripts/inventory/rhv.py new file mode 120000 index 0000000000..e66635dd42 --- /dev/null +++ b/scripts/inventory/rhv.py @@ -0,0 +1 @@ +ovirt4.py \ No newline at end of file diff --git a/scripts/inventory/rudder.ini b/scripts/inventory/rudder.ini new file mode 100644 index 0000000000..748b3d2121 --- /dev/null +++ b/scripts/inventory/rudder.ini @@ -0,0 +1,35 @@ +# Rudder external inventory script settings +# + +[rudder] + +# Your Rudder server API URL, typically: +# https://rudder.local/rudder/api +uri = https://rudder.local/rudder/api + +# By default, Rudder uses a self-signed certificate. Set this to True +# to disable certificate validation. +disable_ssl_certificate_validation = True + +# Your Rudder API token, created in the Web interface. +token = aaabbbccc + +# Rudder API version to use, use "latest" for latest available +# version. +version = latest + +# Property to use as group name in the output. +# Can generally be "id" or "displayName". +group_name = displayName + +# Fail if there are two groups with the same name or two hosts with the +# same hostname in the output. +fail_if_name_collision = True + +# We cache the results of Rudder API in a local file +cache_path = /tmp/ansible-rudder.cache + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# Set to 0 to disable cache. +cache_max_age = 500 diff --git a/scripts/inventory/rudder.py b/scripts/inventory/rudder.py new file mode 100644 index 0000000000..4722fcf1e4 --- /dev/null +++ b/scripts/inventory/rudder.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python + +# Copyright (c) 2015, Normation SAS +# +# Inspired by the EC2 inventory plugin: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +''' +Rudder external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +a Rudder server. This script is compatible with Rudder 2.10 or later. + +The output JSON includes all your Rudder groups, containing the hostnames of +their nodes. Groups and nodes have a variable called rudder_group_id and +rudder_node_id, which is the Rudder internal id of the item, allowing to identify +them uniquely. Hosts variables also include your node properties, which are +key => value properties set by the API and specific to each node. + +This script assumes there is an rudder.ini file alongside it. To specify a +different path to rudder.ini, define the RUDDER_INI_PATH environment variable: + + export RUDDER_INI_PATH=/path/to/my_rudder.ini + +You have to configure your Rudder server information, either in rudder.ini or +by overriding it with environment variables: + + export RUDDER_API_VERSION='latest' + export RUDDER_API_TOKEN='my_token' + export RUDDER_API_URI='https://rudder.local/rudder/api' +''' + + +import sys +import os +import re +import argparse +import httplib2 as http +from time import time +from ansible.module_utils import six +from ansible.module_utils.six.moves import configparser +from ansible.module_utils.six.moves.urllib.parse import urlparse + +import json + + +class RudderInventory(object): + def __init__(self): + ''' Main execution path ''' + + # Empty inventory by default + self.inventory = {} + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # Create connection + self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation) + + # Cache + if self.args.refresh_cache: + self.update_cache() + elif not self.is_cache_valid(): + self.update_cache() + else: + self.load_cache() + + data_to_print = {} + + if self.args.host: + data_to_print = self.get_host_info(self.args.host) + elif self.args.list: + data_to_print = self.get_list_info() + + print(self.json_format_dict(data_to_print, True)) + + def read_settings(self): + ''' Reads the settings from the rudder.ini file ''' + if six.PY2: + config = configparser.SafeConfigParser() + else: + config = configparser.ConfigParser() + rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini') + rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path))) + config.read(rudder_ini_path) + + self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token')) + self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version')) + self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri')) + + self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation') + self.group_name = config.get('rudder', 'group_name') + self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision') + + self.cache_path = config.get('rudder', 'cache_path') + self.cache_max_age = config.getint('rudder', 'cache_max_age') + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)') + self.args = parser.parse_args() + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path): + mod_time = os.path.getmtime(self.cache_path) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + + return False + + def load_cache(self): + ''' Reads the cache from the cache file sets self.cache ''' + + cache = open(self.cache_path, 'r') + json_cache = cache.read() + + try: + self.inventory = json.loads(json_cache) + except ValueError as e: + self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache') + + def write_cache(self): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(self.inventory, True) + cache = open(self.cache_path, 'w') + cache.write(json_data) + cache.close() + + def get_nodes(self): + ''' Gets the nodes list from Rudder ''' + + path = '/nodes?select=nodeAndPolicyServer' + result = self.api_call(path) + + nodes = {} + + for node in result['data']['nodes']: + nodes[node['id']] = {} + nodes[node['id']]['hostname'] = node['hostname'] + if 'properties' in node: + nodes[node['id']]['properties'] = node['properties'] + else: + nodes[node['id']]['properties'] = [] + + return nodes + + def get_groups(self): + ''' Gets the groups list from Rudder ''' + + path = '/groups' + result = self.api_call(path) + + groups = {} + + for group in result['data']['groups']: + groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])} + + return groups + + def update_cache(self): + ''' Fetches the inventory information from Rudder and creates the inventory ''' + + nodes = self.get_nodes() + groups = self.get_groups() + + inventory = {} + + for group in groups: + # Check for name collision + if self.fail_if_name_collision: + if groups[group]['name'] in inventory: + self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups') + # Add group to inventory + inventory[groups[group]['name']] = {} + inventory[groups[group]['name']]['hosts'] = [] + inventory[groups[group]['name']]['vars'] = {} + inventory[groups[group]['name']]['vars']['rudder_group_id'] = group + for node in groups[group]['hosts']: + # Add node to group + inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname']) + + properties = {} + + for node in nodes: + # Check for name collision + if self.fail_if_name_collision: + if nodes[node]['hostname'] in properties: + self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts') + # Add node properties to inventory + properties[nodes[node]['hostname']] = {} + properties[nodes[node]['hostname']]['rudder_node_id'] = node + for node_property in nodes[node]['properties']: + properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value'] + + inventory['_meta'] = {} + inventory['_meta']['hostvars'] = properties + + self.inventory = inventory + + if self.cache_max_age > 0: + self.write_cache() + + def get_list_info(self): + ''' Gets inventory information from local cache ''' + + return self.inventory + + def get_host_info(self, hostname): + ''' Gets information about a specific host from local cache ''' + + if hostname in self.inventory['_meta']['hostvars']: + return self.inventory['_meta']['hostvars'][hostname] + else: + return {} + + def api_call(self, path): + ''' Performs an API request ''' + + headers = { + 'X-API-Token': self.token, + 'X-API-Version': self.version, + 'Content-Type': 'application/json;charset=utf-8' + } + + target = urlparse(self.uri + path) + method = 'GET' + body = '' + + try: + response, content = self.conn.request(target.geturl(), method, body, headers) + except Exception: + self.fail_with_error('Error connecting to Rudder server') + + try: + data = json.loads(content) + except ValueError as e: + self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response') + + return data + + def fail_with_error(self, err_msg, err_operation=None): + ''' Logs an error to std err for ansible-playbook to consume and exit ''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be + used as Ansible variable names ''' + + return re.sub(r'[^A-Za-z0-9\_]', '_', word) + + +# Run the script +RudderInventory() diff --git a/scripts/inventory/scaleway.ini b/scripts/inventory/scaleway.ini new file mode 100644 index 0000000000..99615a124c --- /dev/null +++ b/scripts/inventory/scaleway.ini @@ -0,0 +1,37 @@ +# Ansible dynamic inventory script for Scaleway cloud provider +# + +[compute] +# Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable +# +# regions = all +# regions = ams1 +# regions = par1, ams1 +regions = par1 + + +# Define a Scaleway token to perform required queries on the API +# in order to generate inventory output. +# +[auth] +# Token to authenticate with Scaleway's API. +# If not defined will read the SCALEWAY_TOKEN environment variable +# +api_token = mysecrettoken + + +# To avoid performing excessive calls to Scaleway API you can define a +# cache for the plugin output. Within the time defined in seconds, latest +# output will be reused. After that time, the cache will be refreshed. +# +[cache] +cache_max_age = 60 +cache_dir = '~/.ansible/tmp' + + +[defaults] +# You may want to use only public IP addresses or private IP addresses. +# You can set public_ip_only configuration to get public IPs only. +# If not defined defaults to retrieving private IP addresses. +# +public_ip_only = false diff --git a/scripts/inventory/scaleway.py b/scripts/inventory/scaleway.py new file mode 100644 index 0000000000..32999cc0e7 --- /dev/null +++ b/scripts/inventory/scaleway.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +''' +External inventory script for Scaleway +==================================== + +Shamelessly copied from an existing inventory script. + +This script generates an inventory that Ansible can understand by making API requests to Scaleway API + +Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/) + +Before using this script you may want to modify scaleway.ini config file. + +This script generates an Ansible hosts file with these host groups: + +: Defines host itself with Scaleway's hostname as group name. +: Contains all hosts which has "" as tag. +: Contains all hosts which are in the "" region. +all: Contains all hosts defined in Scaleway. +''' + +# (c) 2017, Paul B. +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import copy +import os +import requests +from ansible.module_utils import six +from ansible.module_utils.six.moves import configparser +import sys +import time +import traceback + +import json + +EMPTY_GROUP = { + 'children': [], + 'hosts': [] +} + + +class ScalewayAPI: + REGIONS = ['par1', 'ams1'] + + def __init__(self, auth_token, region): + self.session = requests.session() + self.session.headers.update({ + 'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0]) + }) + self.session.headers.update({ + 'X-Auth-Token': auth_token.encode('latin1') + }) + self.base_url = 'https://cp-%s.scaleway.com' % (region) + + def servers(self): + raw = self.session.get('/'.join([self.base_url, 'servers'])) + + try: + response = raw.json() + return self.get_resource('servers', response, raw) + except ValueError: + return [] + + def get_resource(self, resource, response, raw): + raw.raise_for_status() + + if resource in response: + return response[resource] + else: + raise ValueError( + "Resource %s not found in Scaleway API response" % (resource)) + + +def env_or_param(env_key, param=None, fallback=None): + env_value = os.environ.get(env_key) + + if (param, env_value) == (None, None): + return fallback + elif env_value is not None: + return env_value + else: + return param + + +def save_cache(data, config): + ''' saves item to cache ''' + dpath = config.get('cache', 'cache_dir') + try: + cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w') + cache.write(json.dumps(data)) + cache.close() + except IOError as e: + pass # not really sure what to do here + + +def get_cache(cache_item, config): + ''' returns cached item ''' + dpath = config.get('cache', 'cache_dir') + inv = {} + try: + cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r') + inv = cache.read() + cache.close() + except IOError as e: + pass # not really sure what to do here + + return inv + + +def cache_available(config): + ''' checks if we have a 'fresh' cache available for item requested ''' + + if config.has_option('cache', 'cache_dir'): + dpath = config.get('cache', 'cache_dir') + + try: + existing = os.stat( + '/'.join([dpath, 'scaleway_ansible_inventory.json'])) + except OSError: + return False + + if config.has_option('cache', 'cache_max_age'): + maxage = config.get('cache', 'cache_max_age') + else: + maxage = 60 + if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): + return True + + return False + + +def generate_inv_from_api(config): + try: + inventory['scaleway'] = copy.deepcopy(EMPTY_GROUP) + + auth_token = None + if config.has_option('auth', 'api_token'): + auth_token = config.get('auth', 'api_token') + auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token) + if auth_token is None: + sys.stderr.write('ERROR: missing authentication token for Scaleway API') + sys.exit(1) + + if config.has_option('compute', 'regions'): + regions = config.get('compute', 'regions') + if regions == 'all': + regions = ScalewayAPI.REGIONS + else: + regions = map(str.strip, regions.split(',')) + else: + regions = [ + env_or_param('SCALEWAY_REGION', fallback='par1') + ] + + for region in regions: + api = ScalewayAPI(auth_token, region) + + for server in api.servers(): + hostname = server['hostname'] + if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'): + ip = server['public_ip']['address'] + else: + ip = server['private_ip'] + for server_tag in server['tags']: + if server_tag not in inventory: + inventory[server_tag] = copy.deepcopy(EMPTY_GROUP) + inventory[server_tag]['children'].append(hostname) + if region not in inventory: + inventory[region] = copy.deepcopy(EMPTY_GROUP) + inventory[region]['children'].append(hostname) + inventory['scaleway']['children'].append(hostname) + inventory[hostname] = [] + inventory[hostname].append(ip) + + return inventory + except Exception: + # Return empty hosts output + traceback.print_exc() + return {'scaleway': {'hosts': []}, '_meta': {'hostvars': {}}} + + +def get_inventory(config): + ''' Reads the inventory from cache or Scaleway api ''' + + if cache_available(config): + inv = get_cache('scaleway_ansible_inventory.json', config) + else: + inv = generate_inv_from_api(config) + + save_cache(inv, config) + return json.dumps(inv) + + +if __name__ == '__main__': + inventory = {} + + # Read config + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']: + if os.path.exists(configfilename): + config.read(configfilename) + break + + if cache_available(config): + inventory = get_cache('scaleway_ansible_inventory.json', config) + else: + inventory = get_inventory(config) + + # return to ansible + sys.stdout.write(str(inventory)) + sys.stdout.flush() diff --git a/scripts/inventory/serf.py b/scripts/inventory/serf.py new file mode 100644 index 0000000000..8a24027dd4 --- /dev/null +++ b/scripts/inventory/serf.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Serf +# (https://serfdom.io/). +# +# Requires the `serfclient` Python module from +# https://pypi.org/project/serfclient/ +# +# Environment variables +# --------------------- +# - `SERF_RPC_ADDR` +# - `SERF_RPC_AUTH` +# +# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr + +import argparse +import collections +import os +import sys + +# https://pypi.org/project/serfclient/ +from serfclient import SerfClient, EnvironmentConfig + +import json + +_key = 'serf' + + +def _serf_client(): + env = EnvironmentConfig() + return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) + + +def get_serf_members_data(): + return _serf_client().members().body['Members'] + + +def get_nodes(data): + return [node['Name'] for node in data] + + +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for key, value in node['Tags'].items(): + groups[value].append(node['Name']) + + return groups + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['Name']] = node['Tags'] + return meta + + +def print_list(): + data = get_serf_members_data() + nodes = get_nodes(data) + groups = get_groups(data) + meta = get_meta(data) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) + + +def print_host(host): + data = get_serf_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from serf cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from serf cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/scripts/inventory/softlayer.py b/scripts/inventory/softlayer.py new file mode 100644 index 0000000000..016eb4c060 --- /dev/null +++ b/scripts/inventory/softlayer.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python +""" +SoftLayer external inventory script. + +The SoftLayer Python API client is required. Use `pip install softlayer` to install it. +You have a few different options for configuring your username and api_key. You can pass +environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to +~/.softlayer or /etc/softlayer.conf. For more information see the SL API at: +- https://softlayer-python.readthedocs.io/en/latest/config_file.html + +The SoftLayer Python client has a built in command for saving this configuration file +via the command `sl config setup`. +""" + +# Copyright (C) 2014 AJ Bourg +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# +# I found the structure of the ec2.py script very helpful as an example +# as I put this together. Thanks to whoever wrote that script! +# + +import SoftLayer +import re +import argparse +import itertools +import json + + +class SoftLayerInventory(object): + common_items = [ + 'id', + 'globalIdentifier', + 'hostname', + 'domain', + 'fullyQualifiedDomainName', + 'primaryBackendIpAddress', + 'primaryIpAddress', + 'datacenter', + 'tagReferences', + 'userData.value', + ] + + vs_items = [ + 'lastKnownPowerState.name', + 'powerState', + 'maxCpu', + 'maxMemory', + 'activeTransaction.transactionStatus[friendlyName,name]', + 'status', + ] + + hw_items = [ + 'hardwareStatusId', + 'processorPhysicalCoreAmount', + 'memoryCapacity', + ] + + def _empty_inventory(self): + return {"_meta": {"hostvars": {}}} + + def __init__(self): + '''Main path''' + + self.inventory = self._empty_inventory() + + self.parse_options() + + if self.args.list: + self.get_all_servers() + print(self.json_format_dict(self.inventory, True)) + elif self.args.host: + self.get_all_servers() + print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) + + def to_safe(self, word): + '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' + + return re.sub(r"[^A-Za-z0-9\-\.]", "_", word) + + def push(self, my_dict, key, element): + '''Push an element onto an array that may not have been defined in the dict''' + + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def parse_options(self): + '''Parse all the arguments from the CLI''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer') + parser.add_argument('--list', action='store_true', default=False, + help='List instances (default: False)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + self.args = parser.parse_args() + + def json_format_dict(self, data, pretty=False): + '''Converts a dict to a JSON object and dumps it as a formatted string''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + def process_instance(self, instance, instance_type="virtual"): + '''Populate the inventory dictionary with any instance information''' + + # only want active instances + if 'status' in instance and instance['status']['name'] != 'Active': + return + + # and powered on instances + if 'powerState' in instance and instance['powerState']['name'] != 'Running': + return + + # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid + if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5: + return + + # if there's no IP address, we can't reach it + if 'primaryIpAddress' not in instance: + return + + instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else '' + + dest = instance['primaryIpAddress'] + + instance['tags'] = list() + for tag in instance['tagReferences']: + instance['tags'].append(tag['tag']['name']) + + del instance['tagReferences'] + + self.inventory["_meta"]["hostvars"][dest] = instance + + # Inventory: group by memory + if 'maxMemory' in instance: + self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest) + elif 'memoryCapacity' in instance: + self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest) + + # Inventory: group by cpu count + if 'maxCpu' in instance: + self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest) + elif 'processorPhysicalCoreAmount' in instance: + self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest) + + # Inventory: group by datacenter + self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest) + + # Inventory: group by hostname + self.push(self.inventory, self.to_safe(instance['hostname']), dest) + + # Inventory: group by FQDN + self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest) + + # Inventory: group by domain + self.push(self.inventory, self.to_safe(instance['domain']), dest) + + # Inventory: group by type (hardware/virtual) + self.push(self.inventory, instance_type, dest) + + for tag in instance['tags']: + self.push(self.inventory, tag, dest) + + def get_virtual_servers(self): + '''Get all the CCI instances''' + vs = SoftLayer.VSManager(self.client) + mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items)) + instances = vs.list_instances(mask=mask) + + for instance in instances: + self.process_instance(instance) + + def get_physical_servers(self): + '''Get all the hardware instances''' + hw = SoftLayer.HardwareManager(self.client) + mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items)) + instances = hw.list_hardware(mask=mask) + + for instance in instances: + self.process_instance(instance, 'hardware') + + def get_all_servers(self): + self.client = SoftLayer.Client() + self.get_virtual_servers() + self.get_physical_servers() + + +SoftLayerInventory() diff --git a/scripts/inventory/spacewalk.ini b/scripts/inventory/spacewalk.ini new file mode 100644 index 0000000000..5433c4221b --- /dev/null +++ b/scripts/inventory/spacewalk.ini @@ -0,0 +1,16 @@ +# Put this ini-file in the same directory as spacewalk.py +# Command line options have precedence over options defined in here. + +[spacewalk] +# To limit the script on one organization in spacewalk, uncomment org_number +# and fill in the organization ID: +# org_number=2 + +# To prefix the group names with the organization ID set prefix_org_name=true. +# This is convenient when org_number is not set and you have the same group names +# in multiple organizations within spacewalk +# The prefix is "org_number-" +prefix_org_name=false + +# Default cache_age for files created with spacewalk-report is 300sec. +cache_age=300 diff --git a/scripts/inventory/spacewalk.py b/scripts/inventory/spacewalk.py new file mode 100644 index 0000000000..dc96b1fe3b --- /dev/null +++ b/scripts/inventory/spacewalk.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python + +""" +Spacewalk external inventory script +================================= + +Ansible has a feature where instead of reading from /etc/ansible/hosts +as a text file, it can query external programs to obtain the list +of hosts, groups the hosts are in, and even variables to assign to each host. + +To use this, copy this file over /etc/ansible/hosts and chmod +x the file. +This, more or less, allows you to keep one central database containing +info about all of your managed instances. + +This script is dependent upon the spacealk-reports package being installed +on the same machine. It is basically a CSV-to-JSON converter from the +output of "spacewalk-report system-groups-systems|inventory". + +Tested with Ansible 1.9.2 and spacewalk 2.3 +""" +# +# Author:: Jon Miller +# Copyright:: Copyright (c) 2013, Jon Miller +# +# Extended for support of multiple organizations and +# adding the "_meta" dictionary to --list output by +# Bernhard Lichtinger 2015 +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from __future__ import print_function + +import sys +import os +import time +from optparse import OptionParser +import subprocess +import json + +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import configparser as ConfigParser + + +base_dir = os.path.dirname(os.path.realpath(__file__)) +default_ini_file = os.path.join(base_dir, "spacewalk.ini") + +SW_REPORT = '/usr/bin/spacewalk-report' +CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports") +CACHE_AGE = 300 # 5min +INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file))) + + +# Sanity check +if not os.path.exists(SW_REPORT): + print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr) + sys.exit(1) + +# Pre-startup work +if not os.path.exists(CACHE_DIR): + os.mkdir(CACHE_DIR) + os.chmod(CACHE_DIR, 0o2775) + +# Helper functions +# ------------------------------ + + +def spacewalk_report(name): + """Yield a dictionary form of each CSV output produced by the specified + spacewalk-report + """ + cache_filename = os.path.join(CACHE_DIR, name) + if not os.path.exists(cache_filename) or \ + (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE: + # Update the cache + fh = open(cache_filename, 'w') + p = subprocess.Popen([SW_REPORT, name], stdout=fh) + p.wait() + fh.close() + + with open(cache_filename, 'r') as f: + lines = f.readlines() + keys = lines[0].strip().split(',') + # add 'spacewalk_' prefix to the keys + keys = ['spacewalk_' + key for key in keys] + for line in lines[1:]: + values = line.strip().split(',') + if len(keys) == len(values): + yield dict(zip(keys, values)) + + +# Options +# ------------------------------ + +parser = OptionParser(usage="%prog [options] --list | --host ") +parser.add_option('--list', default=False, dest="list", action="store_true", + help="Produce a JSON consumable grouping of servers for Ansible") +parser.add_option('--host', default=None, dest="host", + help="Generate additional host specific details for given host for Ansible") +parser.add_option('-H', '--human', dest="human", + default=False, action="store_true", + help="Produce a friendlier version of either server list or host detail") +parser.add_option('-o', '--org', default=None, dest="org_number", + help="Limit to spacewalk organization number") +parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true", + help="Prefix the group name with the organization number") +(options, args) = parser.parse_args() + + +# read spacewalk.ini if present +# ------------------------------ +if os.path.exists(INI_FILE): + config = ConfigParser.SafeConfigParser() + config.read(INI_FILE) + if config.has_option('spacewalk', 'cache_age'): + CACHE_AGE = config.get('spacewalk', 'cache_age') + if not options.org_number and config.has_option('spacewalk', 'org_number'): + options.org_number = config.get('spacewalk', 'org_number') + if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'): + options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name') + + +# Generate dictionary for mapping group_id to org_id +# ------------------------------ +org_groups = {} +try: + for group in spacewalk_report('system-groups'): + org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id'] + +except (OSError) as e: + print('Problem executing the command "%s system-groups": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + +# List out the known server from Spacewalk +# ------------------------------ +if options.list: + + # to build the "_meta"-Group with hostvars first create dictionary for later use + host_vars = {} + try: + for item in spacewalk_report('inventory'): + host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items()) + + except (OSError) as e: + print('Problem executing the command "%s inventory": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + groups = {} + meta = {"hostvars": {}} + try: + for system in spacewalk_report('system-groups-systems'): + # first get org_id of system + org_id = org_groups[system['spacewalk_group_id']] + + # shall we add the org_id as prefix to the group name: + if options.prefix_org_name: + prefix = org_id + "-" + group_name = prefix + system['spacewalk_group_name'] + else: + group_name = system['spacewalk_group_name'] + + # if we are limited to one organization: + if options.org_number: + if org_id == options.org_number: + if group_name not in groups: + groups[group_name] = set() + + groups[group_name].add(system['spacewalk_server_name']) + if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: + meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] + # or we list all groups and systems: + else: + if group_name not in groups: + groups[group_name] = set() + + groups[group_name].add(system['spacewalk_server_name']) + if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: + meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] + + except (OSError) as e: + print('Problem executing the command "%s system-groups-systems": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + if options.human: + for group, systems in iteritems(groups): + print('[%s]\n%s\n' % (group, '\n'.join(systems))) + else: + final = dict([(k, list(s)) for k, s in iteritems(groups)]) + final["_meta"] = meta + print(json.dumps(final)) + # print(json.dumps(groups)) + sys.exit(0) + + +# Return a details information concerning the spacewalk server +# ------------------------------ +elif options.host: + + host_details = {} + try: + for system in spacewalk_report('inventory'): + if system['spacewalk_hostname'] == options.host: + host_details = system + break + + except (OSError) as e: + print('Problem executing the command "%s inventory": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + if options.human: + print('Host: %s' % options.host) + for k, v in iteritems(host_details): + print(' %s: %s' % (k, '\n '.join(v.split(';')))) + else: + print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items()))) + sys.exit(0) + +else: + + parser.print_help() + sys.exit(1) diff --git a/scripts/inventory/ssh_config.py b/scripts/inventory/ssh_config.py new file mode 100644 index 0000000000..c7db6c7a88 --- /dev/null +++ b/scripts/inventory/ssh_config.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python + +# (c) 2014, Tomas Karasek +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use aliases from ~/.ssh/config. +# +# There were some issues with various Paramiko versions. I took a deeper look +# and tested heavily. Now, ansible parses this alright with Paramiko versions +# 1.7.2 to 1.15.2. +# +# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts +# with their alias, rather than with the IP or hostname. It takes advantage +# of the ansible_ssh_{host,port,user,private_key_file}. +# +# If you have in your .ssh/config: +# Host git +# HostName git.domain.org +# User tkarasek +# IdentityFile /home/tomk/keys/thekey +# +# You can do +# $ ansible git -m ping +# +# Example invocation: +# ssh_config.py --list +# ssh_config.py --host + +import argparse +import os.path +import sys + +import json + +import paramiko + +from ansible.module_utils.common._collections_compat import MutableSequence + +SSH_CONF = '~/.ssh/config' + +_key = 'ssh_config' + +_ssh_to_ansible = [('user', 'ansible_ssh_user'), + ('hostname', 'ansible_ssh_host'), + ('identityfile', 'ansible_ssh_private_key_file'), + ('port', 'ansible_ssh_port')] + + +def get_config(): + if not os.path.isfile(os.path.expanduser(SSH_CONF)): + return {} + with open(os.path.expanduser(SSH_CONF)) as f: + cfg = paramiko.SSHConfig() + cfg.parse(f) + ret_dict = {} + for d in cfg._config: + if isinstance(d['host'], MutableSequence): + alias = d['host'][0] + else: + alias = d['host'] + if ('?' in alias) or ('*' in alias): + continue + _copy = dict(d) + del _copy['host'] + if 'config' in _copy: + ret_dict[alias] = _copy['config'] + else: + ret_dict[alias] = _copy + return ret_dict + + +def print_list(): + cfg = get_config() + meta = {'hostvars': {}} + for alias, attributes in cfg.items(): + tmp_dict = {} + for ssh_opt, ans_opt in _ssh_to_ansible: + if ssh_opt in attributes: + # If the attribute is a list, just take the first element. + # Private key is returned in a list for some reason. + attr = attributes[ssh_opt] + if isinstance(attr, MutableSequence): + attr = attr[0] + tmp_dict[ans_opt] = attr + if tmp_dict: + meta['hostvars'][alias] = tmp_dict + + print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})) + + +def print_host(host): + cfg = get_config() + print(json.dumps(cfg[host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script parsing .ssh/config') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from .ssh/config inventory' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/scripts/inventory/stacki.py b/scripts/inventory/stacki.py new file mode 100644 index 0000000000..98f35c4fb5 --- /dev/null +++ b/scripts/inventory/stacki.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python + +# Copyright (c) 2016, Hugh Ma +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +# Stacki inventory script +# Configure stacki.yml with proper auth information and place in the following: +# - ../inventory/stacki.yml +# - /etc/stacki/stacki.yml +# - /etc/ansible/stacki.yml +# The stacki.yml file can contain entries for authentication information +# regarding the Stacki front-end node. +# +# use_hostnames uses hostname rather than interface ip as connection +# +# + +""" +Example Usage: + List Stacki Nodes + $ ./stack.py --list + + +Example Configuration: +--- +stacki: + auth: + stacki_user: admin + stacki_password: abc12345678910 + stacki_endpoint: http://192.168.200.50/stack +use_hostnames: false +""" + +import argparse +import os +import sys +import yaml +from distutils.version import StrictVersion + +import json + +try: + import requests +except Exception: + sys.exit('requests package is required for this inventory script') + + +CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml'] + + +def stack_auth(params): + endpoint = params['stacki_endpoint'] + auth_creds = {'USERNAME': params['stacki_user'], + 'PASSWORD': params['stacki_password']} + + client = requests.session() + client.get(endpoint) + + init_csrf = client.cookies['csrftoken'] + + header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf, + 'Content-type': 'application/x-www-form-urlencoded'} + + login_endpoint = endpoint + "/login" + + login_req = client.post(login_endpoint, data=auth_creds, headers=header) + + csrftoken = login_req.cookies['csrftoken'] + sessionid = login_req.cookies['sessionid'] + + auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid) + + return client, auth_creds + + +def stack_build_header(auth_creds): + header = {'csrftoken': auth_creds['CSRFTOKEN'], + 'X-CSRFToken': auth_creds['CSRFTOKEN'], + 'sessionid': auth_creds['SESSIONID'], + 'Content-type': 'application/json'} + + return header + + +def stack_host_list(endpoint, header, client): + + stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}), + headers=header) + return json.loads(stack_r.json()) + + +def stack_net_list(endpoint, header, client): + + stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}), + headers=header) + return json.loads(stack_r.json()) + + +def format_meta(hostdata, intfdata, config): + use_hostnames = config['use_hostnames'] + meta = dict(all=dict(hosts=list()), + frontends=dict(hosts=list()), + backends=dict(hosts=list()), + _meta=dict(hostvars=dict())) + + # Iterate through list of dicts of hosts and remove + # environment key as it causes conflicts + for host in hostdata: + del host['environment'] + meta['_meta']['hostvars'][host['host']] = host + meta['_meta']['hostvars'][host['host']]['interfaces'] = list() + + # @bbyhuy to improve readability in next iteration + + for intf in intfdata: + if intf['host'] in meta['_meta']['hostvars']: + meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf) + if intf['default'] is True: + meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip'] + if not use_hostnames: + meta['all']['hosts'].append(intf['ip']) + if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': + meta['backends']['hosts'].append(intf['ip']) + else: + meta['frontends']['hosts'].append(intf['ip']) + else: + meta['all']['hosts'].append(intf['host']) + if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': + meta['backends']['hosts'].append(intf['host']) + else: + meta['frontends']['hosts'].append(intf['host']) + return meta + + +def parse_args(): + parser = argparse.ArgumentParser(description='Stacki Inventory Module') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active hosts') + group.add_argument('--host', help='List details about the specific host') + + return parser.parse_args() + + +def main(): + args = parse_args() + + if StrictVersion(requests.__version__) < StrictVersion("2.4.3"): + sys.exit('requests>=2.4.3 is required for this inventory script') + + try: + config_files = CONFIG_FILES + config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml') + config = None + for cfg_file in config_files: + if os.path.isfile(cfg_file): + stream = open(cfg_file, 'r') + config = yaml.safe_load(stream) + break + if not config: + sys.stderr.write("No config file found at {0}\n".format(config_files)) + sys.exit(1) + client, auth_creds = stack_auth(config['stacki']['auth']) + header = stack_build_header(auth_creds) + host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client) + intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client) + final_meta = format_meta(host_list, intf_list, config) + print(json.dumps(final_meta, indent=4)) + except Exception as e: + sys.stderr.write('%s\n' % e.message) + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/scripts/inventory/stacki.yml b/scripts/inventory/stacki.yml new file mode 100644 index 0000000000..2e31c72cbc --- /dev/null +++ b/scripts/inventory/stacki.yml @@ -0,0 +1,7 @@ +--- +stacki: + auth: + stacki_user: admin + stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM + stacki_endpoint: http://192.168.200.50/stack +use_hostnames: false \ No newline at end of file diff --git a/scripts/inventory/vagrant.py b/scripts/inventory/vagrant.py new file mode 100644 index 0000000000..96517c30d3 --- /dev/null +++ b/scripts/inventory/vagrant.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +""" +Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and +returns it under the host group 'vagrant' + +Example Vagrant configuration using this script: + + config.vm.provision :ansible do |ansible| + ansible.playbook = "./provision/your_playbook.yml" + ansible.inventory_path = "./provision/inventory/vagrant.py" + ansible.verbose = true + end +""" + +# Copyright (C) 2013 Mark Mandel +# 2015 Igor Khomyakov +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# +# Thanks to the spacewalk.py inventory script for giving me the basic structure +# of this. +# + +import sys +import os.path +import subprocess +import re +from paramiko import SSHConfig +from optparse import OptionParser +from collections import defaultdict +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves import StringIO + + +_group = 'vagrant' # a default group +_ssh_to_ansible = [('user', 'ansible_user'), + ('hostname', 'ansible_host'), + ('identityfile', 'ansible_ssh_private_key_file'), + ('port', 'ansible_port')] + +# Options +# ------------------------------ + +parser = OptionParser(usage="%prog [options] --list | --host ") +parser.add_option('--list', default=False, dest="list", action="store_true", + help="Produce a JSON consumable grouping of Vagrant servers for Ansible") +parser.add_option('--host', default=None, dest="host", + help="Generate additional host specific details for given host for Ansible") +(options, args) = parser.parse_args() + +# +# helper functions +# + + +# get all the ssh configs for all boxes in an array of dictionaries. +def get_ssh_config(): + return dict((k, get_a_ssh_config(k)) for k in list_running_boxes()) + + +# list all the running boxes +def list_running_boxes(): + + output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n') + + boxes = [] + + for line in output: + matcher = re.search(r"([^\s]+)[\s]+running \(.+", line) + if matcher: + boxes.append(matcher.group(1)) + + return boxes + + +# get the ssh config for a single box +def get_a_ssh_config(box_name): + """Gives back a map of all the machine's ssh configurations""" + + output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict') + config = SSHConfig() + config.parse(StringIO(output)) + host_config = config.lookup(box_name) + + # man 5 ssh_config: + # > It is possible to have multiple identity files ... + # > all these identities will be tried in sequence. + for id in host_config['identityfile']: + if os.path.isfile(id): + host_config['identityfile'] = id + + return dict((v, host_config[k]) for k, v in _ssh_to_ansible) + + +# List out servers that vagrant has running +# ------------------------------ +if options.list: + ssh_config = get_ssh_config() + meta = defaultdict(dict) + + for host in ssh_config: + meta['hostvars'][host] = ssh_config[host] + + print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta})) + sys.exit(0) + +# Get out the host details +# ------------------------------ +elif options.host: + print(json.dumps(get_a_ssh_config(options.host))) + sys.exit(0) + +# Print out help +# ------------------------------ +else: + parser.print_help() + sys.exit(0) diff --git a/scripts/inventory/vbox.py b/scripts/inventory/vbox.py new file mode 100644 index 0000000000..7a0ed702ae --- /dev/null +++ b/scripts/inventory/vbox.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import sys +from subprocess import Popen, PIPE + +import json + + +class SetEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, set): + return list(obj) + return json.JSONEncoder.default(self, obj) + + +VBOX = "VBoxManage" + + +def get_hosts(host=None): + + returned = {} + try: + if host: + p = Popen([VBOX, 'showvminfo', host], stdout=PIPE) + else: + returned = {'all': set(), '_metadata': {}} + p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE) + except Exception: + sys.exit(1) + + hostvars = {} + prevkey = pref_k = '' + + for line in p.stdout.readlines(): + + try: + k, v = line.split(':', 1) + except Exception: + continue + + if k == '': + continue + + v = v.strip() + if k.startswith('Name'): + if v not in hostvars: + curname = v + hostvars[curname] = {} + try: # try to get network info + x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE) + ipinfo = x.stdout.read() + if 'Value' in ipinfo: + a, ip = ipinfo.split(':', 1) + hostvars[curname]['ansible_ssh_host'] = ip.strip() + except Exception: + pass + + continue + + if not host: + if k == 'Groups': + for group in v.split('/'): + if group: + if group not in returned: + returned[group] = set() + returned[group].add(curname) + returned['all'].add(curname) + continue + + pref_k = 'vbox_' + k.strip().replace(' ', '_') + if k.startswith(' '): + if prevkey not in hostvars[curname]: + hostvars[curname][prevkey] = {} + hostvars[curname][prevkey][pref_k] = v + else: + if v != '': + hostvars[curname][pref_k] = v + + prevkey = pref_k + + if not host: + returned['_metadata']['hostvars'] = hostvars + else: + returned = hostvars[host] + return returned + + +if __name__ == '__main__': + + inventory = {} + hostname = None + + if len(sys.argv) > 1: + if sys.argv[1] == "--host": + hostname = sys.argv[2] + + if hostname: + inventory = get_hosts(hostname) + else: + inventory = get_hosts() + + sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder)) diff --git a/scripts/inventory/zabbix.ini b/scripts/inventory/zabbix.ini new file mode 100644 index 0000000000..ead19b62d5 --- /dev/null +++ b/scripts/inventory/zabbix.ini @@ -0,0 +1,20 @@ +# Ansible Zabbix external inventory script settings +# + +[zabbix] + +# Server location +server = http://zabbix.example.com/zabbix + +# Login +username = admin +password = zabbix + +# Verify the server's SSL certificate +validate_certs = True + +# Read zabbix inventory per host +read_host_inventory = True + +# Set ansible_ssh_host based on first interface settings +use_host_interface = True \ No newline at end of file diff --git a/scripts/inventory/zabbix.py b/scripts/inventory/zabbix.py new file mode 100644 index 0000000000..acdf38e704 --- /dev/null +++ b/scripts/inventory/zabbix.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python + +# (c) 2013, Greg Buehler +# (c) 2018, Filippo Ferrazini +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +""" +Zabbix Server external inventory script. +======================================== + +Returns hosts and hostgroups from Zabbix Server. +If you want to run with --limit against a host group with space in the +name, use asterisk. For example --limit="Linux*servers". + +Configuration is read from `zabbix.ini`. + +Tested with Zabbix Server 2.0.6, 3.2.3 and 3.4. +""" + +from __future__ import print_function + +import os +import sys +import argparse +from ansible.module_utils.six.moves import configparser + +try: + from zabbix_api import ZabbixAPI +except Exception: + print("Error: Zabbix API library must be installed: pip install zabbix-api.", + file=sys.stderr) + sys.exit(1) + +import json + + +class ZabbixInventory(object): + + def read_settings(self): + config = configparser.SafeConfigParser() + conf_path = './zabbix.ini' + if not os.path.exists(conf_path): + conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini' + if os.path.exists(conf_path): + config.read(conf_path) + # server + if config.has_option('zabbix', 'server'): + self.zabbix_server = config.get('zabbix', 'server') + + # login + if config.has_option('zabbix', 'username'): + self.zabbix_username = config.get('zabbix', 'username') + if config.has_option('zabbix', 'password'): + self.zabbix_password = config.get('zabbix', 'password') + # ssl certs + if config.has_option('zabbix', 'validate_certs'): + if config.get('zabbix', 'validate_certs') in ['false', 'False', False]: + self.validate_certs = False + # host inventory + if config.has_option('zabbix', 'read_host_inventory'): + if config.get('zabbix', 'read_host_inventory') in ['true', 'True', True]: + self.read_host_inventory = True + # host interface + if config.has_option('zabbix', 'use_host_interface'): + if config.get('zabbix', 'use_host_interface') in ['false', 'False', False]: + self.use_host_interface = False + + def read_cli(self): + parser = argparse.ArgumentParser() + parser.add_argument('--host') + parser.add_argument('--list', action='store_true') + self.options = parser.parse_args() + + def hoststub(self): + return { + 'hosts': [] + } + + def get_host(self, api, name): + api_query = {'output': 'extend', 'selectGroups': 'extend', "filter": {"host": [name]}} + if self.use_host_interface: + api_query['selectInterfaces'] = ['useip', 'ip', 'dns'] + if self.read_host_inventory: + api_query['selectInventory'] = "extend" + + data = {'ansible_ssh_host': name} + if self.use_host_interface or self.read_host_inventory: + try: + hosts_data = api.host.get(api_query)[0] + if 'interfaces' in hosts_data: + # use first interface only + if hosts_data['interfaces'][0]['useip'] == 0: + data['ansible_ssh_host'] = hosts_data['interfaces'][0]['dns'] + else: + data['ansible_ssh_host'] = hosts_data['interfaces'][0]['ip'] + if ('inventory' in hosts_data) and (hosts_data['inventory']): + data.update(hosts_data['inventory']) + except IndexError: + # Host not found in zabbix + pass + return data + + def get_list(self, api): + api_query = {'output': 'extend', 'selectGroups': 'extend'} + if self.use_host_interface: + api_query['selectInterfaces'] = ['useip', 'ip', 'dns'] + if self.read_host_inventory: + api_query['selectInventory'] = "extend" + + hosts_data = api.host.get(api_query) + data = {'_meta': {'hostvars': {}}} + + data[self.defaultgroup] = self.hoststub() + for host in hosts_data: + hostname = host['name'] + hostvars = dict() + data[self.defaultgroup]['hosts'].append(hostname) + + for group in host['groups']: + groupname = group['name'] + + if groupname not in data: + data[groupname] = self.hoststub() + + data[groupname]['hosts'].append(hostname) + if 'interfaces' in host: + # use first interface only + if host['interfaces'][0]['useip'] == 0: + hostvars['ansible_ssh_host'] = host['interfaces'][0]['dns'] + else: + hostvars['ansible_ssh_host'] = host['interfaces'][0]['ip'] + if ('inventory' in host) and (host['inventory']): + hostvars.update(host['inventory']) + data['_meta']['hostvars'][hostname] = hostvars + + return data + + def __init__(self): + + self.defaultgroup = 'group_all' + self.zabbix_server = None + self.zabbix_username = None + self.zabbix_password = None + self.validate_certs = True + self.read_host_inventory = False + self.use_host_interface = True + + self.meta = {} + + self.read_settings() + self.read_cli() + + if self.zabbix_server and self.zabbix_username: + try: + api = ZabbixAPI(server=self.zabbix_server, validate_certs=self.validate_certs) + api.login(user=self.zabbix_username, password=self.zabbix_password) + # zabbix_api tries to exit if it cannot parse what the zabbix server returned + # so we have to use SystemExit here + except (Exception, SystemExit) as e: + print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr) + sys.exit(1) + + if self.options.host: + data = self.get_host(api, self.options.host) + print(json.dumps(data, indent=2)) + + elif self.options.list: + data = self.get_list(api) + print(json.dumps(data, indent=2)) + + else: + print("usage: --list ..OR.. --host ", file=sys.stderr) + sys.exit(1) + + else: + print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr) + sys.exit(1) + + +ZabbixInventory() diff --git a/scripts/inventory/zone.py b/scripts/inventory/zone.py new file mode 100644 index 0000000000..825e7499b0 --- /dev/null +++ b/scripts/inventory/zone.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python + +# (c) 2015, Dagobert Michelsen +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from subprocess import Popen, PIPE +import sys +import json + +result = {} +result['all'] = {} + +pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True) +result['all']['hosts'] = [] +for l in pipe.stdout.readlines(): + # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared + s = l.split(':') + if s[1] != 'global': + result['all']['hosts'].append(s[1]) + +result['all']['vars'] = {} +result['all']['vars']['ansible_connection'] = 'zone' + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({'ansible_connection': 'zone'})) +else: + sys.stderr.write("Need an argument, either --list or --host \n") diff --git a/scripts/vault/__init__.py b/scripts/vault/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/vault/azure_vault.ini b/scripts/vault/azure_vault.ini new file mode 100644 index 0000000000..d47f976201 --- /dev/null +++ b/scripts/vault/azure_vault.ini @@ -0,0 +1,10 @@ +[azure_keyvault] # Used with Azure KeyVault +vault_name=django-keyvault +secret_name=vaultpw +secret_version=9k1e6c7367b33eac8ee241b3698009f3 + +[azure] # Used by Dynamic Inventory +group_by_resource_group=yes +group_by_location=yes +group_by_security_group=yes +group_by_tag=yes \ No newline at end of file diff --git a/scripts/vault/azure_vault.py b/scripts/vault/azure_vault.py new file mode 100644 index 0000000000..5f15b2d40e --- /dev/null +++ b/scripts/vault/azure_vault.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python +# +# This script borrows a great deal of code from the azure_rm.py dynamic inventory script +# that is packaged with Ansible. This can be found in the Ansible GitHub project at: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/azure_rm.py +# +# The Azure Dynamic Inventory script was written by: +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# Altered/Added for Vault functionality: +# Austin Hobbs, GitHub: @OxHobbs + +''' +Ansible Vault Password with Azure Key Vault Secret Script +========================================================= +This script is designed to be used with Ansible Vault. It provides the +capability to provide this script as the password file to the ansible-vault +command. This script uses the Azure Python SDK. For instruction on installing +the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/ + +Authentication +-------------- +The order of precedence is command line arguments, environment variables, +and finally the [default] profile found in ~/.azure/credentials for all +authentication parameters. + +If using a credentials file, it should be an ini formatted file with one or +more sections, which we refer to as profiles. The script looks for a +[default] section, if a profile is not specified either on the command line +or with an environment variable. The keys in a profile will match the +list of command line arguments below. + +For command line arguments and environment variables specify a profile found +in your ~/.azure/credentials file, or a service principal or Active Directory +user. + +Command line arguments: + - profile + - client_id + - secret + - subscription_id + - tenant + - ad_user + - password + - cloud_environment + - adfs_authority_url + - vault-name + - secret-name + - secret-version + +Environment variables: + - AZURE_PROFILE + - AZURE_CLIENT_ID + - AZURE_SECRET + - AZURE_SUBSCRIPTION_ID + - AZURE_TENANT + - AZURE_AD_USER + - AZURE_PASSWORD + - AZURE_CLOUD_ENVIRONMENT + - AZURE_ADFS_AUTHORITY_URL + - AZURE_VAULT_NAME + - AZURE_VAULT_SECRET_NAME + - AZURE_VAULT_SECRET_VERSION + + +Vault +----- + +The order of precedence of Azure Key Vault Secret information is the same. +Command line arguments, environment variables, and finally the azure_vault.ini +file with the [azure_keyvault] section. + +azure_vault.ini (or azure_rm.ini if merged with Azure Dynamic Inventory Script) +------------------------------------------------------------------------------ +As mentioned above, you can control execution using environment variables or a .ini file. A sample +azure_vault.ini is included. The name of the .ini file is the basename of the inventory script (in this case +'azure_vault') with a .ini extension. It also assumes the .ini file is alongside the script. To specify +a different path for the .ini file, define the AZURE_VAULT_INI_PATH environment variable: + + export AZURE_VAULT_INI_PATH=/path/to/custom.ini + or + export AZURE_VAULT_INI_PATH=[same path as azure_rm.ini if merged] + + __NOTE__: If using the azure_rm.py dynamic inventory script, it is possible to use the same .ini + file for both the azure_rm dynamic inventory and the azure_vault password file. Simply add a section + named [azure_keyvault] to the ini file with the following properties: vault_name, secret_name and + secret_version. + +Examples: +--------- + Validate the vault_pw script with Python + $ python azure_vault.py -n mydjangovault -s vaultpw -v 6b6w7f7252b44eac8ee726b3698009f3 + $ python azure_vault.py --vault-name 'mydjangovault' --secret-name 'vaultpw' \ + --secret-version 6b6w7f7252b44eac8ee726b3698009f3 + + Use with a playbook + $ ansible-playbook -i ./azure_rm.py my_playbook.yml --limit galaxy-qa --vault-password-file ./azure_vault.py + + +Insecure Platform Warning +------------------------- +If you receive InsecurePlatformWarning from urllib3, install the +requests security packages: + + pip install requests[security] + + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + - Austin Hobbs (@OxHobbs) + +Company: Ansible by Red Hat, Microsoft + +Version: 0.1.0 +''' + +import argparse +import os +import re +import sys +import inspect +from azure.keyvault import KeyVaultClient + +from ansible.module_utils.six.moves import configparser as cp + +from os.path import expanduser +import ansible.module_utils.six.moves.urllib.parse as urlparse + +HAS_AZURE = True +HAS_AZURE_EXC = None +HAS_AZURE_CLI_CORE = True +CLIError = None + +try: + from msrestazure.azure_active_directory import AADTokenCredentials + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_active_directory import MSIAuthentication + from msrestazure import azure_cloud + from azure.mgmt.compute import __version__ as azure_compute_version + from azure.common import AzureMissingResourceHttpError, AzureHttpError + from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials + from azure.mgmt.network import NetworkManagementClient + from azure.mgmt.resource.resources import ResourceManagementClient + from azure.mgmt.resource.subscriptions import SubscriptionClient + from azure.mgmt.compute import ComputeManagementClient + from adal.authentication_context import AuthenticationContext +except ImportError as exc: + HAS_AZURE_EXC = exc + HAS_AZURE = False + +try: + from azure.cli.core.util import CLIError + from azure.common.credentials import get_azure_cli_credentials, get_cli_profile + from azure.common.cloud import get_cli_active_cloud +except ImportError: + HAS_AZURE_CLI_CORE = False + CLIError = Exception + +try: + from ansible.release import __version__ as ansible_version +except ImportError: + ansible_version = 'unknown' + + +AZURE_CREDENTIAL_ENV_MAPPING = dict( + profile='AZURE_PROFILE', + subscription_id='AZURE_SUBSCRIPTION_ID', + client_id='AZURE_CLIENT_ID', + secret='AZURE_SECRET', + tenant='AZURE_TENANT', + ad_user='AZURE_AD_USER', + password='AZURE_PASSWORD', + cloud_environment='AZURE_CLOUD_ENVIRONMENT', + adfs_authority_url='AZURE_ADFS_AUTHORITY_URL' +) + +AZURE_VAULT_SETTINGS = dict( + vault_name='AZURE_VAULT_NAME', + secret_name='AZURE_VAULT_SECRET_NAME', + secret_version='AZURE_VAULT_SECRET_VERSION', +) + +AZURE_MIN_VERSION = "2.0.0" +ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version) + + +class AzureRM(object): + + def __init__(self, args): + self._args = args + self._cloud_environment = None + self._compute_client = None + self._resource_client = None + self._network_client = None + self._adfs_authority_url = None + self._vault_client = None + self._resource = None + + self.debug = False + if args.debug: + self.debug = True + + self.credentials = self._get_credentials(args) + if not self.credentials: + self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " + "or define a profile in ~/.azure/credentials.") + + # if cloud_environment specified, look up/build Cloud object + raw_cloud_env = self.credentials.get('cloud_environment') + if not raw_cloud_env: + self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default + else: + # try to look up "well-known" values via the name attribute on azure_cloud members + all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] + matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] + if len(matched_clouds) == 1: + self._cloud_environment = matched_clouds[0] + elif len(matched_clouds) > 1: + self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format( + raw_cloud_env)) + else: + if not urlparse.urlparse(raw_cloud_env).scheme: + self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format( + [x.name for x in all_clouds])) + try: + self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) + except Exception as e: + self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) + + if self.credentials.get('subscription_id', None) is None: + self.fail("Credentials did not include a subscription_id value.") + self.log("setting subscription_id") + self.subscription_id = self.credentials['subscription_id'] + + # get authentication authority + # for adfs, user could pass in authority or not. + # for others, use default authority from cloud environment + if self.credentials.get('adfs_authority_url'): + self._adfs_authority_url = self.credentials.get('adfs_authority_url') + else: + self._adfs_authority_url = self._cloud_environment.endpoints.active_directory + + # get resource from cloud environment + self._resource = self._cloud_environment.endpoints.active_directory_resource_id + + if self.credentials.get('credentials'): + self.azure_credentials = self.credentials.get('credentials') + elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'): + self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=self.credentials['tenant'], + cloud_environment=self._cloud_environment) + + elif self.credentials.get('ad_user') is not None and \ + self.credentials.get('password') is not None and \ + self.credentials.get('client_id') is not None and \ + self.credentials.get('tenant') is not None: + + self.azure_credentials = self.acquire_token_with_username_password( + self._adfs_authority_url, + self._resource, + self.credentials['ad_user'], + self.credentials['password'], + self.credentials['client_id'], + self.credentials['tenant']) + + elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: + tenant = self.credentials.get('tenant') + if not tenant: + tenant = 'common' + self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], + self.credentials['password'], + tenant=tenant, + cloud_environment=self._cloud_environment) + + else: + self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " + "Credentials must include client_id, secret and tenant or ad_user and password, or " + "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, " + "or be logged in using AzureCLI.") + + def log(self, msg): + if self.debug: + print(msg + u'\n') + + def fail(self, msg): + raise Exception(msg) + + def _get_profile(self, profile="default"): + path = expanduser("~") + path += "/.azure/credentials" + try: + config = cp.ConfigParser() + config.read(path) + except Exception as exc: + self.fail("Failed to access {0}. Check that the file exists and you have read " + "access. {1}".format(path, str(exc))) + credentials = dict() + for key in AZURE_CREDENTIAL_ENV_MAPPING: + try: + credentials[key] = config.get(profile, key, raw=True) + except Exception: + pass + + if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: + return credentials + + return None + + def _get_env_credentials(self): + env_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + env_credentials[attribute] = os.environ.get(env_variable, None) + + if env_credentials['profile'] is not None: + credentials = self._get_profile(env_credentials['profile']) + return credentials + + if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: + return env_credentials + + return None + + def _get_azure_cli_credentials(self): + credentials, subscription_id = get_azure_cli_credentials() + cloud_environment = get_cli_active_cloud() + + cli_credentials = { + 'credentials': credentials, + 'subscription_id': subscription_id, + 'cloud_environment': cloud_environment + } + return cli_credentials + + def _get_msi_credentials(self, subscription_id_param=None): + credentials = MSIAuthentication() + try: + # try to get the subscription in MSI to test whether MSI is enabled + subscription_client = SubscriptionClient(credentials) + subscription = next(subscription_client.subscriptions.list()) + subscription_id = str(subscription.subscription_id) + return { + 'credentials': credentials, + 'subscription_id': subscription_id_param or subscription_id + } + except Exception as exc: + return None + + def _get_credentials(self, params): + # Get authentication credentials. + # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. + + self.log('Getting credentials') + + arg_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + arg_credentials[attribute] = getattr(params, attribute) + + # try module params + if arg_credentials['profile'] is not None: + self.log('Retrieving credentials with profile parameter.') + credentials = self._get_profile(arg_credentials['profile']) + return credentials + + if arg_credentials['client_id'] is not None: + self.log('Received credentials from parameters.') + return arg_credentials + + if arg_credentials['ad_user'] is not None: + self.log('Received credentials from parameters.') + return arg_credentials + + # try environment + env_credentials = self._get_env_credentials() + if env_credentials: + self.log('Received credentials from env.') + return env_credentials + + # try default profile from ~./azure/credentials + default_credentials = self._get_profile() + if default_credentials: + self.log('Retrieved default profile credentials from ~/.azure/credentials.') + return default_credentials + + msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id')) + if msi_credentials: + self.log('Retrieved credentials from MSI.') + return msi_credentials + + try: + if HAS_AZURE_CLI_CORE: + self.log('Retrieving credentials from AzureCLI profile') + cli_credentials = self._get_azure_cli_credentials() + return cli_credentials + except CLIError as ce: + self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) + + return None + + def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): + authority_uri = authority + + if tenant is not None: + authority_uri = authority + '/' + tenant + + context = AuthenticationContext(authority_uri) + token_response = context.acquire_token_with_username_password(resource, username, password, client_id) + return AADTokenCredentials(token_response) + + def _register(self, key): + try: + # We have to perform the one-time registration here. Otherwise, we receive an error the first + # time we attempt to use the requested client. + resource_client = self.rm_client + resource_client.providers.register(key) + except Exception as exc: + self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) + self.log("You might need to register {0} using an admin account".format(key)) + self.log(("To register a provider using the Python CLI: " + "https://docs.microsoft.com/azure/azure-resource-manager/" + "resource-manager-common-deployment-errors#noregisteredproviderfound")) + + def get_mgmt_svc_client(self, client_type, base_url, api_version): + client = client_type(self.azure_credentials, + self.subscription_id, + base_url=base_url, + api_version=api_version) + client.config.add_user_agent(ANSIBLE_USER_AGENT) + return client + + def get_vault_client(self): + return KeyVaultClient(self.azure_credentials) + + def get_vault_suffix(self): + return self._cloud_environment.suffixes.keyvault_dns + + @property + def network_client(self): + self.log('Getting network client') + if not self._network_client: + self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, + self._cloud_environment.endpoints.resource_manager, + '2017-06-01') + self._register('Microsoft.Network') + return self._network_client + + @property + def rm_client(self): + self.log('Getting resource manager client') + if not self._resource_client: + self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, + self._cloud_environment.endpoints.resource_manager, + '2017-05-10') + return self._resource_client + + @property + def compute_client(self): + self.log('Getting compute client') + if not self._compute_client: + self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, + self._cloud_environment.endpoints.resource_manager, + '2017-03-30') + self._register('Microsoft.Compute') + return self._compute_client + + @property + def vault_client(self): + self.log('Getting the Key Vault client') + if not self._vault_client: + self._vault_client = self.get_vault_client() + + return self._vault_client + + +class AzureKeyVaultSecret: + + def __init__(self): + + self._args = self._parse_cli_args() + + try: + rm = AzureRM(self._args) + except Exception as e: + sys.exit("{0}".format(str(e))) + + self._get_vault_settings() + + if self._args.vault_name: + self.vault_name = self._args.vault_name + + if self._args.secret_name: + self.secret_name = self._args.secret_name + + if self._args.secret_version: + self.secret_version = self._args.secret_version + + self._vault_suffix = rm.get_vault_suffix() + self._vault_client = rm.vault_client + + print(self.get_password_from_vault()) + + def _parse_cli_args(self): + parser = argparse.ArgumentParser( + description='Obtain the vault password used to secure your Ansilbe secrets' + ) + parser.add_argument('-n', '--vault-name', action='store', help='Name of Azure Key Vault') + parser.add_argument('-s', '--secret-name', action='store', + help='Name of the secret stored in Azure Key Vault') + parser.add_argument('-v', '--secret-version', action='store', + help='Version of the secret to be retrieved') + parser.add_argument('--debug', action='store_true', default=False, + help='Send the debug messages to STDOUT') + parser.add_argument('--profile', action='store', + help='Azure profile contained in ~/.azure/credentials') + parser.add_argument('--subscription_id', action='store', + help='Azure Subscription Id') + parser.add_argument('--client_id', action='store', + help='Azure Client Id ') + parser.add_argument('--secret', action='store', + help='Azure Client Secret') + parser.add_argument('--tenant', action='store', + help='Azure Tenant Id') + parser.add_argument('--ad_user', action='store', + help='Active Directory User') + parser.add_argument('--password', action='store', + help='password') + parser.add_argument('--adfs_authority_url', action='store', + help='Azure ADFS authority url') + parser.add_argument('--cloud_environment', action='store', + help='Azure Cloud Environment name or metadata discovery URL') + + return parser.parse_args() + + def get_password_from_vault(self): + vault_url = 'https://{0}{1}'.format(self.vault_name, self._vault_suffix) + secret = self._vault_client.get_secret(vault_url, self.secret_name, self.secret_version) + return secret.value + + def _get_vault_settings(self): + env_settings = self._get_vault_env_settings() + if None not in set(env_settings.values()): + for key in AZURE_VAULT_SETTINGS: + setattr(self, key, env_settings.get(key, None)) + else: + file_settings = self._load_vault_settings() + if not file_settings: + return + + for key in AZURE_VAULT_SETTINGS: + if file_settings.get(key): + setattr(self, key, file_settings.get(key)) + + def _get_vault_env_settings(self): + env_settings = dict() + for attribute, env_variable in AZURE_VAULT_SETTINGS.items(): + env_settings[attribute] = os.environ.get(env_variable, None) + return env_settings + + def _load_vault_settings(self): + basename = os.path.splitext(os.path.basename(__file__))[0] + default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) + path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_VAULT_INI_PATH', default_path))) + config = None + settings = None + try: + config = cp.ConfigParser() + config.read(path) + except Exception: + pass + + if config is not None: + settings = dict() + for key in AZURE_VAULT_SETTINGS: + try: + settings[key] = config.get('azure_keyvault', key, raw=True) + except Exception: + pass + + return settings + + +def main(): + if not HAS_AZURE: + sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format( + AZURE_MIN_VERSION, HAS_AZURE_EXC)) + + AzureKeyVaultSecret() + + +if __name__ == '__main__': + main() diff --git a/scripts/vault/vault-keyring-client.py b/scripts/vault/vault-keyring-client.py new file mode 100644 index 0000000000..b0932d89ad --- /dev/null +++ b/scripts/vault/vault-keyring-client.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# (c) 2014, Matt Martz +# (c) 2016, Justin Mayer +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ============================================================================= +# +# This script is to be used with ansible-vault's --vault-id arg +# to retrieve the vault password via your OS's native keyring application. +# +# This file *MUST* be saved with executable permissions. Otherwise, Ansible +# will try to parse as a password file and display: "ERROR! Decryption failed" +# +# The `keyring` Python module is required: https://pypi.org/project/keyring/ +# +# By default, this script will store the specified password in the keyring of +# the user that invokes the script. To specify a user keyring, add a [vault] +# section to your ansible.cfg file with a 'username' option. Example: +# +# [vault] +# username = 'ansible-vault' +# +# In useage like: +# +# ansible-vault --vault-id keyring_id@contrib/vault/vault-keyring-client.py view some_encrypted_file +# +# --vault-id will call this script like: +# +# contrib/vault/vault-keyring-client.py --vault-id keyring_id +# +# That will retrieve the password from users keyring for the +# keyring service 'keyring_id'. The equilivent of: +# +# keyring get keyring_id $USER +# +# If no vault-id name is specified to ansible command line, the vault-keyring-client.py +# script will be called without a '--vault-id' and will default to the keyring service 'ansible' +# This is equilivent to: +# +# keyring get ansible $USER +# +# You can configure the `vault_password_file` option in ansible.cfg: +# +# [defaults] +# ... +# vault_password_file = /path/to/vault-keyring-client.py +# ... +# +# To set your password, `cd` to your project directory and run: +# +# # will use default keyring service / vault-id of 'ansible' +# /path/to/vault-keyring-client.py --set +# +# or to specify the keyring service / vault-id of 'my_ansible_secret': +# +# /path/to/vault-keyring-client.py --vault-id my_ansible_secret --set +# +# If you choose not to configure the path to `vault_password_file` in +# ansible.cfg, your `ansible-playbook` command might look like: +# +# ansible-playbook --vault-id=keyring_id@/path/to/vault-keyring-client.py site.yml + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +import argparse +import sys +import getpass +import keyring + +from ansible.config.manager import ConfigManager + +KEYNAME_UNKNOWN_RC = 2 + + +def build_arg_parser(): + parser = argparse.ArgumentParser(description='Get a vault password from user keyring') + + parser.add_argument('--vault-id', action='store', default=None, + dest='vault_id', + help='name of the vault secret to get from keyring') + parser.add_argument('--username', action='store', default=None, + help='the username whose keyring is queried') + parser.add_argument('--set', action='store_true', default=False, + dest='set_password', + help='set the password instead of getting it') + return parser + + +def main(): + config_manager = ConfigManager() + username = config_manager.data.get_setting('vault.username') + if not username: + username = getpass.getuser() + + keyname = config_manager.data.get_setting('vault.keyname') + if not keyname: + keyname = 'ansible' + + arg_parser = build_arg_parser() + args = arg_parser.parse_args() + + username = args.username or username + keyname = args.vault_id or keyname + + # print('username: %s keyname: %s' % (username, keyname)) + + if args.set_password: + intro = 'Storing password in "{}" user keyring using key name: {}\n' + sys.stdout.write(intro.format(username, keyname)) + password = getpass.getpass() + confirm = getpass.getpass('Confirm password: ') + if password == confirm: + keyring.set_password(keyname, username, password) + else: + sys.stderr.write('Passwords do not match\n') + sys.exit(1) + else: + secret = keyring.get_password(keyname, username) + if secret is None: + sys.stderr.write('vault-keyring-client could not find key="%s" for user="%s" via backend="%s"\n' % + (keyname, username, keyring.get_keyring().name)) + sys.exit(KEYNAME_UNKNOWN_RC) + + # print('secret: %s' % secret) + sys.stdout.write('%s\n' % secret) + + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/scripts/vault/vault-keyring.py b/scripts/vault/vault-keyring.py new file mode 100644 index 0000000000..f03119a8dd --- /dev/null +++ b/scripts/vault/vault-keyring.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# (c) 2014, Matt Martz +# (c) 2016, Justin Mayer +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ============================================================================= +# +# This script is to be used with vault_password_file or --vault-password-file +# to retrieve the vault password via your OS's native keyring application. +# +# This file *MUST* be saved with executable permissions. Otherwise, Ansible +# will try to parse as a password file and display: "ERROR! Decryption failed" +# +# The `keyring` Python module is required: https://pypi.org/project/keyring/ +# +# By default, this script will store the specified password in the keyring of +# the user that invokes the script. To specify a user keyring, add a [vault] +# section to your ansible.cfg file with a 'username' option. Example: +# +# [vault] +# username = 'ansible-vault' +# +# Another optional setting is for the key name, which allows you to use this +# script to handle multiple project vaults with different passwords: +# +# [vault] +# keyname = 'ansible-vault-yourproject' +# +# You can configure the `vault_password_file` option in ansible.cfg: +# +# [defaults] +# ... +# vault_password_file = /path/to/vault-keyring.py +# ... +# +# To set your password, `cd` to your project directory and run: +# +# python /path/to/vault-keyring.py set +# +# If you choose not to configure the path to `vault_password_file` in +# ansible.cfg, your `ansible-playbook` command might look like: +# +# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +import sys +import getpass +import keyring + +from ansible.config.manager import ConfigManager, get_ini_config_value + + +def main(): + config = ConfigManager() + username = get_ini_config_value( + config._parsers[config._config_file], + dict(section='vault', key='username') + ) or getpass.getuser() + + keyname = get_ini_config_value( + config._parsers[config._config_file], + dict(section='vault', key='keyname') + ) or 'ansible' + + if len(sys.argv) == 2 and sys.argv[1] == 'set': + intro = 'Storing password in "{}" user keyring using key name: {}\n' + sys.stdout.write(intro.format(username, keyname)) + password = getpass.getpass() + confirm = getpass.getpass('Confirm password: ') + if password == confirm: + keyring.set_password(keyname, username, password) + else: + sys.stderr.write('Passwords do not match\n') + sys.exit(1) + else: + sys.stdout.write('{0}\n'.format(keyring.get_password(keyname, + username))) + + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 0000000000..ea1472ec1f --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1 @@ +output/ diff --git a/tests/integration/targets/__init__.py b/tests/integration/targets/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/targets/aix_devices/aliases b/tests/integration/targets/aix_devices/aliases new file mode 100644 index 0000000000..e6cab07d71 --- /dev/null +++ b/tests/integration/targets/aix_devices/aliases @@ -0,0 +1,2 @@ +# No AIX LPAR available +unsupported diff --git a/tests/integration/targets/aix_devices/tasks/main.yml b/tests/integration/targets/aix_devices/tasks/main.yml new file mode 100644 index 0000000000..106274c8cd --- /dev/null +++ b/tests/integration/targets/aix_devices/tasks/main.yml @@ -0,0 +1,71 @@ +- name: Scan new devices. + aix_devices: + device: all + state: present + +- name: Scan new virtual devices (vio0). + aix_devices: + device: vio0 + state: present + +- name: Removing IP alias to en0 + aix_devices: + device: en0 + attributes: + delalias4: 10.0.0.100,255.255.255.0 + +- name: Removes ent2. + aix_devices: + device: ent2 + state: absent + +- name: Put device en2 in Defined + aix_devices: + device: en2 + state: defined + +- name: Removes ent4 (inexistent). + aix_devices: + device: ent4 + state: absent + +- name: Put device en4 in Defined (inexistent) + aix_devices: + device: en4 + state: defined + +- name: Put vscsi1 and children devices in Defined state. + aix_devices: + device: vscsi1 + recursive: yes + state: defined + +- name: Removes vscsi1 and children devices. + aix_devices: + device: vscsi1 + recursive: yes + state: absent + +- name: Changes en1 mtu to 9000 and disables arp. + aix_devices: + device: en1 + attributes: + mtu: 900 + arp: off + state: present + +- name: Configure IP, netmask and set en1 up. + aix_devices: + device: en1 + attributes: + netaddr: 192.168.0.100 + netmask: 255.255.255.0 + state: up + state: present + +- name: Adding IP alias to en0 + aix_devices: + device: en0 + attributes: + alias4: 10.0.0.100,255.255.255.0 + state: present diff --git a/tests/integration/targets/aix_filesystem/aliases b/tests/integration/targets/aix_filesystem/aliases new file mode 100644 index 0000000000..ad7ccf7ada --- /dev/null +++ b/tests/integration/targets/aix_filesystem/aliases @@ -0,0 +1 @@ +unsupported diff --git a/tests/integration/targets/aix_filesystem/tasks/main.yml b/tests/integration/targets/aix_filesystem/tasks/main.yml new file mode 100644 index 0000000000..2942bf034b --- /dev/null +++ b/tests/integration/targets/aix_filesystem/tasks/main.yml @@ -0,0 +1,120 @@ +- name: Umounting /testfs + aix_filesystem: + filesystem: /testfs + state: unmounted + +- name: Removing /testfs + aix_filesystem: + filesystem: /testfs + state: absent + +- name: Creating a new file system + aix_filesystem: + filesystem: /newfs + size: 1G + state: present + vg: datavg + +# It requires a host (nfshost) exporting the NFS +- name: Creating NFS filesystem from nfshost (Linux NFS server) + aix_filesystem: + device: /home/ftp + nfs_server: nfshost + filesystem: /nfs/ftp + state: present + +# It requires a volume group named datavg (next three actions) +- name: Creating a logical volume testlv (aix_lvol module) + aix_lvol: + vg: datavg + lv: testlv + size: 2G + state: present + +- name: Create filesystem in a previously defined logical volume + aix_filesystem: + device: testlv + filesystem: /testfs + state: present + +- name: Create an already existing filesystem using existing logical volume. + aix_filesystem: + vg: datavg + device: mksysblv + filesystem: /mksysb + state: present + +- name: Create a filesystem in a non-existing VG + aix_filesystem: + vg: nonexistvg + filesystem: /newlv + state: present + +- name: Resizing /mksysb to 1G + aix_filesystem: + filesystem: /mksysb + size: 1G + state: present + +- name: Resizing /mksysb to +512M + aix_filesystem: + filesystem: /mksysb + size: +512M + state: present + +- name: Resizing /mksysb to 11G + aix_filesystem: + filesystem: /mksysb + size: 11G + state: present + +- name: Resizing /mksysb to 11G (already done) + aix_filesystem: + filesystem: /mksysb + size: 11G + state: present + +- name: Resizing /mksysb to -2G + aix_filesystem: + filesystem: /mksysb + size: -2G + state: present + +- name: Resizing /mksysb to 100G (no enought space) + aix_filesystem: + filesystem: /mksysb + size: +100G + state: present + +- name: Unmount filesystem /home/ftp + aix_filesystem: + filesystem: /home/ftp + state: unmounted + +- name: Remove NFS filesystem /home/ftp + aix_filesystem: + filesystem: /home/ftp + rm_mount_point: yes + state: absent + +- name: Mount filesystem /newfs + aix_filesystem: + filesystem: /newfs + state: mounted + +- name: Remove mounted /newfs + aix_filesystem: + filesystem: /newfs + rm_mount_point: yes + state: absent + +- name: Umount /newfs + aix_filesystem: + filesystem: /newfs + state: unmounted + +- name: Remove /newfs + aix_filesystem: + filesystem: /newfs + rm_mount_point: yes + state: absent diff --git a/tests/integration/targets/alternatives/aliases b/tests/integration/targets/alternatives/aliases new file mode 100644 index 0000000000..3c1ea41f18 --- /dev/null +++ b/tests/integration/targets/alternatives/aliases @@ -0,0 +1,6 @@ +shippable/posix/group3 +destructive +needs/root +skip/aix +skip/freebsd +skip/osx diff --git a/tests/integration/targets/alternatives/tasks/main.yml b/tests/integration/targets/alternatives/tasks/main.yml new file mode 100644 index 0000000000..94f7deeb8c --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/main.yml @@ -0,0 +1,66 @@ +# Copyright (c) 2017 Pierre-Louis Bonicoli +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: 'setup: create a dummy alternative' + block: + - import_tasks: setup.yml + + ############## + # Test parameters: + # link parameter present / absent ('with_link' variable) + # with / without alternatives defined in alternatives file ('with_alternatives' variable) + # auto / manual ('mode' variable) + + - include_tasks: tests.yml + with_nested: + - [ True, False ] # with_link + - [ True, False ] # with_alternatives + - [ 'auto', 'manual' ] # mode + loop_control: + loop_var: test_conf + + ########## + # Priority + - block: + - include_tasks: remove_links.yml + - include_tasks: setup_test.yml + # at least two iterations again + - include_tasks: tests_set_priority.yml + with_sequence: start=3 end=4 + vars: + with_alternatives: True + mode: auto + + - block: + - include_tasks: remove_links.yml + - include_tasks: setup_test.yml + # at least two iterations again + - include_tasks: tests_set_priority.yml + with_sequence: start=3 end=4 + vars: + with_alternatives: False + mode: auto + + # Test that path is checked: alternatives must fail when path is nonexistent + - import_tasks: path_is_checked.yml + + always: + - include_tasks: remove_links.yml + + - file: + path: '{{ item }}' + state: absent + with_items: + - '{{ alternatives_dir }}/dummy' + + - file: + path: '/usr/bin/dummy{{ item }}' + state: absent + with_sequence: start=1 end=4 + # *Disable tests on Fedora 24* + # Shippable Fedora 24 image provides chkconfig-1.7-2.fc24.x86_64 but not the + # latest available version (chkconfig-1.8-1.fc24.x86_64). update-alternatives + # in chkconfig-1.7-2 fails when /etc/alternatives/dummy link is missing, + # error is: 'failed to read link /usr/bin/dummy: No such file or directory'. + # Moreover Fedora 24 is no longer maintained. + when: ansible_distribution != 'Fedora' or ansible_distribution_major_version|int > 24 diff --git a/tests/integration/targets/alternatives/tasks/path_is_checked.yml b/tests/integration/targets/alternatives/tasks/path_is_checked.yml new file mode 100644 index 0000000000..ef0a3b4763 --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/path_is_checked.yml @@ -0,0 +1,12 @@ +- name: Try with nonexistent path + alternatives: + name: dummy + path: '/non/existent/path/there' + link: '/usr/bin/dummy' + ignore_errors: True + register: alternative + +- name: Check previous task failed + assert: + that: + - 'alternative is failed' diff --git a/tests/integration/targets/alternatives/tasks/remove_links.yml b/tests/integration/targets/alternatives/tasks/remove_links.yml new file mode 100644 index 0000000000..690b06069a --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/remove_links.yml @@ -0,0 +1,7 @@ +- name: remove links + file: + path: '{{ item }}' + state: absent + with_items: + - /etc/alternatives/dummy + - /usr/bin/dummy diff --git a/tests/integration/targets/alternatives/tasks/setup.yml b/tests/integration/targets/alternatives/tasks/setup.yml new file mode 100644 index 0000000000..7e4a405340 --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/setup.yml @@ -0,0 +1,14 @@ +- include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_os_family }}.yml' + - default.yml + paths: ../vars +- template: + src: dummy_command + dest: /usr/bin/dummy{{ item }} + owner: root + group: root + mode: '0755' + with_sequence: start=1 end=4 diff --git a/tests/integration/targets/alternatives/tasks/setup_test.yml b/tests/integration/targets/alternatives/tasks/setup_test.yml new file mode 100644 index 0000000000..6a55c6ba7e --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/setup_test.yml @@ -0,0 +1,11 @@ +- template: + src: dummy_alternative + dest: '{{ alternatives_dir }}/dummy' + owner: root + group: root + mode: '0644' + when: with_alternatives or ansible_os_family != 'RedHat' +- file: + path: '{{ alternatives_dir }}/dummy' + state: absent + when: not with_alternatives and ansible_os_family == 'RedHat' diff --git a/tests/integration/targets/alternatives/tasks/test.yml b/tests/integration/targets/alternatives/tasks/test.yml new file mode 100644 index 0000000000..e5cf2d99cc --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/test.yml @@ -0,0 +1,53 @@ +- debug: + msg: ' with_alternatives: {{ with_alternatives }}, mode: {{ mode }}' + +- block: + - name: set alternative (using link parameter) + alternatives: + name: dummy + path: '/usr/bin/dummy{{ item }}' + link: '/usr/bin/dummy' + register: alternative + + - name: check expected command was executed + assert: + that: + - 'alternative is successful' + - 'alternative is changed' + when: with_link + +- block: + - name: set alternative (without link parameter) + alternatives: + name: dummy + path: '/usr/bin/dummy{{ item }}' + register: alternative + + - name: check expected command was executed + assert: + that: + - 'alternative is successful' + - 'alternative is changed' + when: not with_link + +- name: execute dummy command + shell: dummy + register: cmd + +- name: check expected command was executed + assert: + that: + - 'cmd.stdout == "dummy" ~ item' + +- name: 'check mode (manual: alternatives file existed, it has been updated)' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' + when: ansible_os_family != 'RedHat' or with_alternatives or item != 1 + +- name: 'check mode (auto: alternatives file didn''t exist, it has been created)' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"' + when: ansible_os_family == 'RedHat' and not with_alternatives and item == 1 + +- name: check that alternative has been updated + command: "grep -Pzq '/bin/dummy{{ item }}\\n' '{{ alternatives_dir }}/dummy'" + # priority doesn't seem updated + #command: "grep -Pzq '/bin/dummy{{ item }}\\n50' '{{ alternatives_dir }}/dummy'" diff --git a/tests/integration/targets/alternatives/tasks/tests.yml b/tests/integration/targets/alternatives/tasks/tests.yml new file mode 100644 index 0000000000..e0400dfd81 --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/tests.yml @@ -0,0 +1,15 @@ +- block: + - include_tasks: remove_links.yml + - include_tasks: setup_test.yml + # at least two iterations: + # - first will use 'link currently absent', + # - second will receive 'link currently points to' + - include_tasks: test.yml + with_sequence: start=1 end=2 + vars: + with_link: '{{ test_conf[0] }}' + with_alternatives: '{{ test_conf[1] }}' + mode: '{{ test_conf[2] }}' + # update-alternatives included in Fedora 26 (1.10) & Red Hat 7.4 (1.8) doesn't provide + # '--query' switch, 'link' is mandatory for these distributions. + when: ansible_os_family != 'RedHat' or test_conf[0] diff --git a/tests/integration/targets/alternatives/tasks/tests_set_priority.yml b/tests/integration/targets/alternatives/tasks/tests_set_priority.yml new file mode 100644 index 0000000000..7e27817583 --- /dev/null +++ b/tests/integration/targets/alternatives/tasks/tests_set_priority.yml @@ -0,0 +1,23 @@ +- name: update dummy alternative + alternatives: + name: dummy + path: '/usr/bin/dummy{{ item }}' + link: /usr/bin/dummy + priority: '{{ 60 + item|int }}' + register: alternative + +- name: execute dummy command + shell: dummy + register: cmd + +- name: check if link group is in manual mode + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' + +- name: check expected command was executed + assert: + that: + - 'alternative is changed' + - 'cmd.stdout == "dummy{{ item }}"' + +- name: check that alternative has been updated + command: "grep -Pzq '/bin/dummy{{ item }}\\n{{ 60 + item|int }}' '{{ alternatives_dir }}/dummy'" diff --git a/tests/integration/targets/alternatives/templates/dummy_alternative b/tests/integration/targets/alternatives/templates/dummy_alternative new file mode 100644 index 0000000000..5dce8adde7 --- /dev/null +++ b/tests/integration/targets/alternatives/templates/dummy_alternative @@ -0,0 +1,12 @@ +{{ mode }} +/usr/bin/dummy + +{% if with_alternatives %} +/usr/bin/dummy1 +40 +/usr/bin/dummy2 +30 + +{% else %} + +{% endif %} diff --git a/tests/integration/targets/alternatives/templates/dummy_command b/tests/integration/targets/alternatives/templates/dummy_command new file mode 100644 index 0000000000..332d9fe1a9 --- /dev/null +++ b/tests/integration/targets/alternatives/templates/dummy_command @@ -0,0 +1,2 @@ +#!/bin/sh +echo dummy{{ item }} diff --git a/tests/integration/targets/alternatives/vars/Debian.yml b/tests/integration/targets/alternatives/vars/Debian.yml new file mode 100644 index 0000000000..1e83283e4d --- /dev/null +++ b/tests/integration/targets/alternatives/vars/Debian.yml @@ -0,0 +1,2 @@ +--- +alternatives_dir: /var/lib/dpkg/alternatives/ diff --git a/tests/integration/targets/alternatives/vars/Suse-42.3.yml b/tests/integration/targets/alternatives/vars/Suse-42.3.yml new file mode 100644 index 0000000000..37664ddb56 --- /dev/null +++ b/tests/integration/targets/alternatives/vars/Suse-42.3.yml @@ -0,0 +1,2 @@ +--- +alternatives_dir: /var/lib/rpm/alternatives/ diff --git a/tests/integration/targets/alternatives/vars/default.yml b/tests/integration/targets/alternatives/vars/default.yml new file mode 100644 index 0000000000..d00123ded3 --- /dev/null +++ b/tests/integration/targets/alternatives/vars/default.yml @@ -0,0 +1,2 @@ +--- +alternatives_dir: /var/lib/alternatives/ diff --git a/tests/integration/targets/apache2_module/aliases b/tests/integration/targets/apache2_module/aliases new file mode 100644 index 0000000000..0725da563f --- /dev/null +++ b/tests/integration/targets/apache2_module/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group3 +skip/aix diff --git a/tests/integration/targets/apache2_module/meta/main.yml b/tests/integration/targets/apache2_module/meta/main.yml new file mode 100644 index 0000000000..07faa21776 --- /dev/null +++ b/tests/integration/targets/apache2_module/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/tests/integration/targets/apache2_module/tasks/actualtest.yml b/tests/integration/targets/apache2_module/tasks/actualtest.yml new file mode 100644 index 0000000000..d780e92caa --- /dev/null +++ b/tests/integration/targets/apache2_module/tasks/actualtest.yml @@ -0,0 +1,231 @@ +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: install apache via apt + apt: + name: "{{item}}" + state: present + when: "ansible_os_family == 'Debian'" + with_items: + - apache2 + - libapache2-mod-evasive + +- name: install apache via zypper + package: + name: apache2 + state: present + when: "ansible_os_family == 'Suse'" + +- name: disable userdir module + apache2_module: + name: userdir + state: absent + register: userdir_first_disable + +- name: disable userdir module, second run + apache2_module: + name: userdir + state: absent + register: disable + +- name: ensure apache2_module is idempotent + assert: + that: + - disable is not changed + +- name: enable userdir module + apache2_module: + name: userdir + state: present + register: enable + +- name: ensure changed on successful enable + assert: + that: + - enable is changed + +- name: enable userdir module, second run + apache2_module: + name: userdir + state: present + register: enabletwo + +- name: ensure apache2_module is idempotent + assert: + that: + - 'not enabletwo.changed' + +- name: disable userdir module, final run + apache2_module: + name: userdir + state: absent + register: disablefinal + +- name: ensure changed on successful disable + assert: + that: + - 'disablefinal.changed' + +- name: set userdir to original state + apache2_module: + name: userdir + state: present + when: userdir_first_disable is changed + +- name: ensure autoindex enabled + apache2_module: + name: autoindex + state: present + +- name: Debian/Ubuntu specific tests + when: "ansible_os_family == 'Debian'" + block: + - name: force disable of autoindex # bug #2499 + apache2_module: + name: autoindex + state: absent + force: True + + - name: reenable autoindex + apache2_module: + name: autoindex + state: present + + - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635 + apache2_module: + name: evasive + state: present + + - name: disable evasive module + apache2_module: + name: evasive + state: absent + + - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 + apache2_module: + name: dump_io + state: present + ignore_errors: True + register: enable_dumpio_wrong + + - name: disable dump_io + apache2_module: + name: dump_io + identifier: dumpio_module + state: absent + + - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 + apache2_module: + name: dump_io + identifier: dumpio_module + state: present + register: enable_dumpio_correct_1 + + - name: ensure idempotency with identifier + apache2_module: + name: dump_io + identifier: dumpio_module + state: present + register: enable_dumpio_correct_2 + + - name: disable dump_io + apache2_module: + name: dump_io + identifier: dumpio_module + state: absent + + - assert: + that: + - enable_dumpio_wrong is failed + - enable_dumpio_correct_1 is changed + - enable_dumpio_correct_2 is not changed + + - name: disable mpm modules + apache2_module: + name: "{{ item }}" + state: absent + ignore_configcheck: True + with_items: + - mpm_worker + - mpm_event + - mpm_prefork + + - name: enabled mpm_event + apache2_module: + name: mpm_event + state: present + ignore_configcheck: True + register: enabledmpmevent + + - name: ensure changed mpm_event + assert: + that: + - 'enabledmpmevent.changed' + + - name: switch between mpm_event and mpm_worker + apache2_module: + name: "{{ item.name }}" + state: "{{ item.state }}" + ignore_configcheck: True + with_items: + - name: mpm_event + state: absent + - name: mpm_worker + state: present + + - name: ensure mpm_worker is already enabled + apache2_module: + name: mpm_worker + state: present + register: enabledmpmworker + + - name: ensure mpm_worker unchanged + assert: + that: + - 'not enabledmpmworker.changed' + + - name: try to disable all mpm modules with configcheck + apache2_module: + name: "{{item}}" + state: absent + with_items: + - mpm_worker + - mpm_event + - mpm_prefork + ignore_errors: yes + register: remove_with_configcheck + + - name: ensure configcheck fails task with when run without mpm modules + assert: + that: + - "{{ item.failed }}" + with_items: "{{ remove_with_configcheck.results }}" + + - name: try to disable all mpm modules without configcheck + apache2_module: + name: "{{item}}" + state: absent + ignore_configcheck: True + with_items: + - mpm_worker + - mpm_event + - mpm_prefork + + - name: enabled mpm_event to restore previous state + apache2_module: + name: mpm_event + state: present + ignore_configcheck: True + register: enabledmpmevent diff --git a/tests/integration/targets/apache2_module/tasks/main.yml b/tests/integration/targets/apache2_module/tasks/main.yml new file mode 100644 index 0000000000..d95c21243e --- /dev/null +++ b/tests/integration/targets/apache2_module/tasks/main.yml @@ -0,0 +1,21 @@ +--- + + +- name: + block: + - name: get list of enabled modules + shell: apache2ctl -M | sort + register: modules_before + - name: include only on supported systems + include: actualtest.yml + always: + - name: get list of enabled modules + shell: apache2ctl -M | sort + register: modules_after + - debug: var=modules_before + - debug: var=modules_after + - name: ensure that all test modules are disabled again + assert: + that: modules_before.stdout == modules_after.stdout + when: ansible_os_family in ['Debian', 'Suse'] + # centos/RHEL does not have a2enmod/a2dismod diff --git a/tests/integration/targets/archive/aliases b/tests/integration/targets/archive/aliases new file mode 100644 index 0000000000..db9bbd8c42 --- /dev/null +++ b/tests/integration/targets/archive/aliases @@ -0,0 +1,4 @@ +needs/root +shippable/posix/group2 +destructive +skip/aix diff --git a/tests/integration/targets/archive/files/bar.txt b/tests/integration/targets/archive/files/bar.txt new file mode 100644 index 0000000000..5f34b0af07 --- /dev/null +++ b/tests/integration/targets/archive/files/bar.txt @@ -0,0 +1 @@ +bar.txt \ No newline at end of file diff --git a/tests/integration/targets/archive/files/empty.txt b/tests/integration/targets/archive/files/empty.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/targets/archive/files/foo.txt b/tests/integration/targets/archive/files/foo.txt new file mode 100644 index 0000000000..7c6ded14ec --- /dev/null +++ b/tests/integration/targets/archive/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/tests/integration/targets/archive/meta/main.yml b/tests/integration/targets/archive/meta/main.yml new file mode 100644 index 0000000000..07faa21776 --- /dev/null +++ b/tests/integration/targets/archive/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml new file mode 100644 index 0000000000..171cb91dcd --- /dev/null +++ b/tests/integration/targets/archive/tasks/main.yml @@ -0,0 +1,340 @@ +# Test code for the archive module. +# (c) 2017, Abhijeet Kasurde + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# Make sure we start fresh + +- name: Ensure zip is present to create test archive (yum) + yum: name=zip state=latest + when: ansible_pkg_mgr == 'yum' + +- name: Ensure zip is present to create test archive (apt) + apt: name=zip state=latest + when: ansible_pkg_mgr == 'apt' + +- name: Install prerequisites for backports.lzma when using python2 (non OSX) + block: + - name: Set liblzma package name depending on the OS + set_fact: + liblzma_dev_package: + Debian: liblzma-dev + RedHat: xz-devel + Suse: xz-devel + - name: Ensure liblzma-dev is present to install backports-lzma + package: name={{ liblzma_dev_package[ansible_os_family] }} state=latest + when: ansible_os_family in liblzma_dev_package.keys() + when: + - ansible_python_version.split('.')[0] == '2' + - ansible_os_family != 'Darwin' + +- name: Install prerequisites for backports.lzma when using python2 (OSX) + block: + - name: Find brew binary + command: which brew + register: brew_which + - name: Get owner of brew binary + stat: path="{{ brew_which.stdout }}" + register: brew_stat + - name: "Install package" + homebrew: + name: xz + state: present + update_homebrew: no + become: yes + become_user: "{{ brew_stat.stat.pw_name }}" + # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a + # proper solution can be found + environment: + HOMEBREW_NO_AUTO_UPDATE: True + when: + - ansible_python_version.split('.')[0] == '2' + - ansible_os_family == 'Darwin' + +- name: Ensure backports.lzma is present to create test archive (pip) + pip: name=backports.lzma state=latest + when: ansible_python_version.split('.')[0] == '2' + register: backports_lzma_pip + +- name: prep our files + copy: src={{ item }} dest={{output_dir}}/{{ item }} + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: archive using gz + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_01.gz" + format: gz + register: archive_gz_result_01 + +- debug: msg="{{ archive_gz_result_01 }}" + +- name: verify that the files archived + file: path={{output_dir}}/archive_01.gz state=file + +- name: check if gz file exists and includes all text files + assert: + that: + - "{{ archive_gz_result_01.changed }}" + - "{{ 'archived' in archive_gz_result_01 }}" + - "{{ archive_gz_result_01['archived'] | length }} == 3" + +- name: archive using zip + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_01.zip" + format: zip + register: archive_zip_result_01 + +- debug: msg="{{ archive_zip_result_01 }}" + +- name: verify that the files archived + file: path={{output_dir}}/archive_01.zip state=file + +- name: check if zip file exists + assert: + that: + - "{{ archive_zip_result_01.changed }}" + - "{{ 'archived' in archive_zip_result_01 }}" + - "{{ archive_zip_result_01['archived'] | length }} == 3" + +- name: archive using bz2 + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_01.bz2" + format: bz2 + register: archive_bz2_result_01 + +- debug: msg="{{ archive_bz2_result_01 }}" + +- name: verify that the files archived + file: path={{output_dir}}/archive_01.bz2 state=file + +- name: check if bzip file exists + assert: + that: + - "{{ archive_bz2_result_01.changed }}" + - "{{ 'archived' in archive_bz2_result_01 }}" + - "{{ archive_bz2_result_01['archived'] | length }} == 3" + +- name: archive using xz + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_01.xz" + format: xz + register: archive_xz_result_01 + +- debug: msg="{{ archive_xz_result_01 }}" + +- name: verify that the files archived + file: path={{output_dir}}/archive_01.xz state=file + +- name: check if xz file exists + assert: + that: + - "{{ archive_xz_result_01.changed }}" + - "{{ 'archived' in archive_xz_result_01 }}" + - "{{ archive_xz_result_01['archived'] | length }} == 3" + +- name: archive and set mode to 0600 + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_02.gz" + format: gz + mode: "u+rwX,g-rwx,o-rwx" + register: archive_bz2_result_02 + +- name: Test that the file modes were changed + stat: + path: "{{ output_dir }}/archive_02.gz" + register: archive_02_gz_stat + +- debug: msg="{{ archive_02_gz_stat}}" + +- name: Test that the file modes were changed + assert: + that: + - "archive_02_gz_stat.changed == False " + - "archive_02_gz_stat.stat.mode == '0600'" + - "'archived' in archive_bz2_result_02" + - "{{ archive_bz2_result_02['archived']| length}} == 3" + +- name: remove our gz + file: path="{{ output_dir }}/archive_02.gz" state=absent + + +- name: archive and set mode to 0600 + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_02.zip" + format: zip + mode: "u+rwX,g-rwx,o-rwx" + register: archive_zip_result_02 + +- name: Test that the file modes were changed + stat: + path: "{{ output_dir }}/archive_02.zip" + register: archive_02_zip_stat + +- name: Test that the file modes were changed + assert: + that: + - "archive_02_zip_stat.changed == False" + - "archive_02_zip_stat.stat.mode == '0600'" + - "'archived' in archive_zip_result_02" + - "{{ archive_zip_result_02['archived']| length}} == 3" + +- name: remove our zip + file: path="{{ output_dir }}/archive_02.zip" state=absent + + +- name: archive and set mode to 0600 + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_02.bz2" + format: bz2 + mode: "u+rwX,g-rwx,o-rwx" + register: archive_bz2_result_02 + +- name: Test that the file modes were changed + stat: + path: "{{ output_dir }}/archive_02.bz2" + register: archive_02_bz2_stat + +- name: Test that the file modes were changed + assert: + that: + - "archive_02_bz2_stat.changed == False" + - "archive_02_bz2_stat.stat.mode == '0600'" + - "'archived' in archive_bz2_result_02" + - "{{ archive_bz2_result_02['archived']| length}} == 3" + +- name: remove our bz2 + file: path="{{ output_dir }}/archive_02.bz2" state=absent + +- name: archive and set mode to 0600 + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_02.xz" + format: xz + mode: "u+rwX,g-rwx,o-rwx" + register: archive_xz_result_02 + +- name: Test that the file modes were changed + stat: + path: "{{ output_dir }}/archive_02.xz" + register: archive_02_xz_stat + +- name: Test that the file modes were changed + assert: + that: + - "archive_02_xz_stat.changed == False" + - "archive_02_xz_stat.stat.mode == '0600'" + - "'archived' in archive_xz_result_02" + - "{{ archive_xz_result_02['archived']| length}} == 3" + +- name: remove our xz + file: path="{{ output_dir }}/archive_02.xz" state=absent + +- name: test that gz archive that contains non-ascii filenames + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" + format: gz + register: nonascii_result_0 + +- name: Check that file is really there + stat: + path: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" + register: nonascii_stat0 + +- name: Assert that nonascii tests succeeded + assert: + that: + - "nonascii_result_0.changed == true" + - "nonascii_stat0.stat.exists == true" + +- name: remove nonascii test + file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" state=absent + +- name: test that bz2 archive that contains non-ascii filenames + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" + format: bz2 + register: nonascii_result_1 + +- name: Check that file is really there + stat: + path: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" + register: nonascii_stat_1 + +- name: Assert that nonascii tests succeeded + assert: + that: + - "nonascii_result_1.changed == true" + - "nonascii_stat_1.stat.exists == true" + +- name: remove nonascii test + file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" state=absent + +- name: test that xz archive that contains non-ascii filenames + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz" + format: xz + register: nonascii_result_1 + +- name: Check that file is really there + stat: + path: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz" + register: nonascii_stat_1 + +- name: Assert that nonascii tests succeeded + assert: + that: + - "nonascii_result_1.changed == true" + - "nonascii_stat_1.stat.exists == true" + +- name: remove nonascii test + file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.xz" state=absent + +- name: test that zip archive that contains non-ascii filenames + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip" + format: zip + register: nonascii_result_2 + +- name: Check that file is really there + stat: + path: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip" + register: nonascii_stat_2 + +- name: Assert that nonascii tests succeeded + assert: + that: + - "nonascii_result_2.changed == true" + - "nonascii_stat_2.stat.exists == true" + +- name: remove nonascii test + file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.zip" state=absent + +- name: Remove backports.lzma if previously installed (pip) + pip: name=backports.lzma state=absent + when: backports_lzma_pip is changed diff --git a/tests/integration/targets/callback_log_plays/aliases b/tests/integration/targets/callback_log_plays/aliases new file mode 100644 index 0000000000..b59832142f --- /dev/null +++ b/tests/integration/targets/callback_log_plays/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/tests/integration/targets/callback_log_plays/ping_log.yml b/tests/integration/targets/callback_log_plays/ping_log.yml new file mode 100644 index 0000000000..8015726ebb --- /dev/null +++ b/tests/integration/targets/callback_log_plays/ping_log.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: false + tasks: + - ping: diff --git a/tests/integration/targets/callback_log_plays/runme.sh b/tests/integration/targets/callback_log_plays/runme.sh new file mode 100755 index 0000000000..059f8976ef --- /dev/null +++ b/tests/integration/targets/callback_log_plays/runme.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_CALLBACK_WHITELIST="log_plays,${ANSIBLE_CALLBACK_WHITELIST:-}" + +# run play, should create log and dir if needed +export ANSIBLE_LOG_FOLDER="logit" +ansible-playbook ping_log.yml -v "$@" +[[ -f "${ANSIBLE_LOG_FOLDER}/localhost" ]] + +# now force it to fail +export ANSIBLE_LOG_FOLDER="logit.file" +touch "${ANSIBLE_LOG_FOLDER}" +ansible-playbook ping_log.yml -v "$@" 2>&1| grep 'Failure using method (v2_runner_on_ok) in callback plugin' +[[ ! -f "${ANSIBLE_LOG_FOLDER}/localhost" ]] diff --git a/tests/integration/targets/ce_is_is_instance/defaults/main.yaml b/tests/integration/targets/ce_is_is_instance/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_is_is_instance/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_is_is_instance/meta/main.yml b/tests/integration/targets/ce_is_is_instance/meta/main.yml new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/integration/targets/ce_is_is_instance/meta/main.yml @@ -0,0 +1 @@ + diff --git a/tests/integration/targets/ce_is_is_instance/tasks/main.yaml b/tests/integration/targets/ce_is_is_instance/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_is_is_instance/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_is_is_instance/tasks/netconf.yaml b/tests/integration/targets/ce_is_is_instance/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_is_is_instance/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_is_is_instance/tests/netconf/ce_is_is_instance.yaml b/tests/integration/targets/ce_is_is_instance/tests/netconf/ce_is_is_instance.yaml new file mode 100644 index 0000000000..d7c5a7c6e1 --- /dev/null +++ b/tests/integration/targets/ce_is_is_instance/tests/netconf/ce_is_is_instance.yaml @@ -0,0 +1,85 @@ +--- +- debug: + msg: "START ce_is_is_instance merged integration tests on connection={{ ansible_connection }}" + +- block: + - name: berfore merged, there should be no isis 100. + ce_is_is_instance: &delete + instance_id: 100 + state: absent + + - name: Merge the provided configuration with the exisiting running configuration + ce_is_is_instance: &merged + instance_id: 100 + vpn_name: '__public__' + register: result + + - name: change ansible_connection to network_cli + ce_netconf: + rpc: get + cfg_xml: " + + + + + + + + + " + register: result_xml + + - name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + - "'100' in result_xml.end_state.result" + - "'__public__' in result_xml.end_state.result" + + - name: Merge the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_instance: *merged + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + + - name: delete the provided configuration with the exisiting running configuration + ce_is_is_instance: *delete + register: result + + - name: change ansible_connection to network_cli + ce_netconf: + rpc: get + cfg_xml: " + + + + + + + + + " + register: result_xml + + - name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + - "'100' not in result_xml.end_state.result" + - "'__public__' not in result_xml.end_state.result" + + - name: delete the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_instance: *delete + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + + +- debug: + msg: "END ce_is_is_instance merged integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_interface/defaults/main.yaml b/tests/integration/targets/ce_is_is_interface/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_is_is_interface/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_is_is_interface/meta/main.yml b/tests/integration/targets/ce_is_is_interface/meta/main.yml new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/integration/targets/ce_is_is_interface/meta/main.yml @@ -0,0 +1 @@ + diff --git a/tests/integration/targets/ce_is_is_interface/tasks/main.yaml b/tests/integration/targets/ce_is_is_interface/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_is_is_interface/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_is_is_interface/tasks/netconf.yaml b/tests/integration/targets/ce_is_is_interface/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_is_is_interface/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_is_is_interface/tests/netconf/ce_is_is_interface.yaml b/tests/integration/targets/ce_is_is_interface/tests/netconf/ce_is_is_interface.yaml new file mode 100644 index 0000000000..75afbbe899 --- /dev/null +++ b/tests/integration/targets/ce_is_is_interface/tests/netconf/ce_is_is_interface.yaml @@ -0,0 +1,133 @@ +--- +- debug: + msg: "START ce_is_is_interface merged integration tests on connection={{ ansible_connection }}" + +- block: + + - name: Merge the provided configuration with the exisiting running configuration + ce_is_is_interface: &delete + instance_id: 100 + ifname: 10GE1/0/1 + leveltype: level_1 + level1dispriority: 10 + silentenable: true + silentcost: true + typep2penable: true + snpacheck: true + p2pnegotiationmode: 2_way + p2ppeeripignore: true + ppposicpcheckenable: true + level2cost: 10 + state: absent + register: result + + - name: Merge the provided configuration with the exisiting running configuration + ce_is_is_interface: &merged + instance_id: 100 + ifname: 10GE1/0/1 + leveltype: level_1 + level1dispriority: 10 + silentenable: true + silentcost: true + typep2penable: true + snpacheck: true + p2pnegotiationmode: 2_way + p2ppeeripignore: true + ppposicpcheckenable: true + level2cost: 10 + register: result + + - name: use ce_netconf to get configuration + ce_netconf: + rpc: get + cfg_xml: " + + + + 100 + + + + + + + + " + register: result_xml + + - name: Assert the configuration is reflected on host + assert: + that: + - "'10GE1/0/1' in result_xml.end_state.result" + - "'level_1' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'2_way' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + + - name: Merge the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_interface: *merged + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + + - name: delete the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_interface: *delete + register: result + + - name: use ce_netconf to get configuration + ce_netconf: + rpc: get + cfg_xml: " + + + + 100 + + + + + + + + " + register: result_xml + + - name: Assert the configuration is reflected on host + assert: + that: + - "'10GE1/0/1' not in result_xml.end_state.result" + - "'level_1' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'2_way' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + + - name: delete the provided configuration with the existing running configuration (REPEAT) + ce_is_is_interface: *delete + register: result + + - name: Assert that the previous task was REPEAT + assert: + that: + - "result['changed'] == false" + +- debug: + msg: "END ce_is_is_interface merged integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/defaults/main.yaml b/tests/integration/targets/ce_is_is_view/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_is_is_view/tasks/main.yaml b/tests/integration/targets/ce_is_is_view/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_is_is_view/tasks/netconf.yaml b/tests/integration/targets/ce_is_is_view/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/cleanup.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/cleanup.yaml new file mode 100644 index 0000000000..03c0f0ee94 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/cleanup.yaml @@ -0,0 +1,21 @@ +--- +- debug: + msg: "CLEANUP ce_is_is_view, deleted integration tests on connection={{ ansible_connection }}" + +- name: Get lacp config by ce_netconf. + ce_netconf: + rpc: get + cfg_xml: " + + + + 100 + _public_ + ISIS + + + + " + +- debug: + msg: "END CLEANUP ce_is_is_view, deleted integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/setup.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/setup.yaml new file mode 100644 index 0000000000..92ec370c28 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/setup.yaml @@ -0,0 +1,22 @@ +--- +- debug: + msg: "SETUP ce_is_is_view integration tests on connection={{ ansible_connection }}" +# create ISIS 100 + +- name: Get lacp config by ce_netconf. + ce_netconf: + rpc: get + cfg_xml: " + + + + 100 + _public_ + ISIS + + + + " + +- debug: + msg: "END SETUP ce_is_is_view integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_is_is_view_entity .yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_is_is_view_entity .yaml new file mode 100644 index 0000000000..1c81c2ca7b --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_is_is_view_entity .yaml @@ -0,0 +1,78 @@ +--- +- debug: + msg: "START ce_is_is_view entity presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + netentity: isis_net_entity + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'isis_net_entity' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + netentity: isis_net_entity + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' not in result_xml.end_state.result" + - "''isis_net_entity' not in result_xml.end_state.result" + # after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view pentity resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_bfd.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_bfd.yaml new file mode 100644 index 0000000000..6aa200f6e8 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_bfd.yaml @@ -0,0 +1,92 @@ +--- +- debug: + msg: "START ce_is_is_view EXPORTROUTE route policy presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + bfd_min_rx: 100 + bfd_min_tx: 100 + bfd_multiplier_num: 10 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'100' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + defaultmode: always + cost: 10 + mode_tag: 10 + level_type: level_1 + avoid_learning: true + mode_routepolicyname: routepolicy_name + tag: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' not in result_xml.end_state.result" + - "'100' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" +# after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view EXPORTROUTE route policy resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_export.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_export.yaml new file mode 100644 index 0000000000..f83a07c2cf --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_export.yaml @@ -0,0 +1,96 @@ +--- +- debug: + msg: "START ce_is_is_view EXPORTROUTE route policy presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + export_protocol: ospf + export_policytype: aclNumOrName + export_processid: 100 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'ospf' in result_xml.end_state.result" + - "'100' in result_xml.end_state.result" + - "'level_1' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + defaultmode: always + cost: 10 + mode_tag: 10 + level_type: level_1 + avoid_learning: true + mode_routepolicyname: routepolicy_name + tag: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'ospf' not in result_xml.end_state.result" + - "'100' not in result_xml.end_state.result" + - "'level_1' in result_xml.end_state.result" +# after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view EXPORTROUTE route policy resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_import.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_import.yaml new file mode 100644 index 0000000000..c5b4e60df4 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_import.yaml @@ -0,0 +1,124 @@ +--- +- debug: + msg: "START ce_is_is_view import route policy presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + protocol: ospf + processid: 100 + cost_type: external + import_cost: 10 + import_tag: 10 + import_route_policy: routepolicy_name + impotr_leveltype: level_1 + inheritcost: true + permitibgp: true + tag: 100 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'ospf' in result_xml.end_state.result" + - "'100' in result_xml.end_state.result" + - "'external' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + - "'level_1' in result_xml.end_state.result" + - "'routepolicy_name' in result_xml.end_state.result" + - "'level_1' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + defaultmode: always + cost: 10 + mode_tag: 10 + level_type: level_1 + avoid_learning: true + mode_routepolicyname: routepolicy_name + tag: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'ospf' not in result_xml.end_state.result" + - "'100' not in result_xml.end_state.result" + - "'external' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + - "'level_1' not in result_xml.end_state.result" + - "'routepolicy_name' not in result_xml.end_state.result" + - "'level_1' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" +# after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view import route policy resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_l1tol2.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_l1tol2.yaml new file mode 100644 index 0000000000..f12b5034bf --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_l1tol2.yaml @@ -0,0 +1,109 @@ +--- +- debug: + msg: "START ce_is_is_view import route policy presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + allow_filter: true + allow_up_down: true + ip_prefix_name: prefix_name + aclnum_or_name: 3001 + penetration_direct: level1-level2 + import_routepolicy_name: routepolicy_name + tag: 100 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'routepolicy_name' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'3001' in result_xml.end_state.result" + - "'prefix_name' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + allow_filter: true + allow_up_down: true + ip_prefix_name: prefix_name + aclnum_or_name: 3001 + penetration_direct: level1-level2 + import_routepolicy_name: routepolicy_name + tag: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'routepolicy_name' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'3001' not in result_xml.end_state.result" + - "'prefix_name' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + # after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view import route policy resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_l2tol1.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_l2tol1.yaml new file mode 100644 index 0000000000..3af5111fb4 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_l2tol1.yaml @@ -0,0 +1,109 @@ +--- +- debug: + msg: "START ce_is_is_view import route policy presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + penetration_direct: level2-level1 + allow_filter: true + allow_up_down: true + ip_prefix_name: prefix_name + aclnum_or_name: 3001 + import_routepolicy_name: routepolicy_name + tag: 100 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'routepolicy_name' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'3001' in result_xml.end_state.result" + - "'prefix_name' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + penetration_direct: level2-level1 + allow_filter: true + allow_up_down: true + ip_prefix_name: prefix_name + aclnum_or_name: 3001 + import_routepolicy_name: routepolicy_name + tag: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'routepolicy_name' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'3001' not in result_xml.end_state.result" + - "'prefix_name' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + # after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view import route policy resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_max_load.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_max_load.yaml new file mode 100644 index 0000000000..0629d3415c --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_max_load.yaml @@ -0,0 +1,80 @@ +--- +- debug: + msg: "START ce_is_is_view maxLoadBalancing presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + max_load: 30 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'30' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + max_load: 30 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' not in result_xml.end_state.result" + - "'30' not in result_xml.end_state.result" + # after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view maxLoadBalancing resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_preferences.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_preferences.yaml new file mode 100644 index 0000000000..97d232feca --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_preferences.yaml @@ -0,0 +1,87 @@ +--- +- debug: + msg: "START ce_is_is_view preferences presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + preference_value: 100 + route_policy_name: route + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'100' in result_xml.end_state.result" + - "'route' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + preference_value: 100 + route_policy_name: route + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' not in result_xml.end_state.result" + - "''100' not in result_xml.end_state.result" + - "''route' not in result_xml.end_state.result" + # after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view Preference resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_view_basic.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_view_basic.yaml new file mode 100644 index 0000000000..ca95d0e059 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_isis_view_basic.yaml @@ -0,0 +1,115 @@ +--- +- debug: + msg: "START ce_is_is_view presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + description: ISIS + islevel: level_1 + coststyle: narrow + relaxSpfLimit: true + stdlevel1cost: 60 + stdlevel2cost: 60 + stdbandwidth: 100 + autocostenable: true + autocostenablecompatible: true + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'_public_' in result_xml.end_state.result" + - "'ISIS' in result_xml.end_state.result" + - "'level_1' in result_xml.end_state.result" + - "'narrow' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'60' in result_xml.end_state.result" + - "'60' in result_xml.end_state.result" + - "'100' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + description: ISIS + islevel: level_1 + coststyle: narrow + relaxSpfLimit: true + stdlevel1cost: 60 + stdlevel2cost: 60 + stdbandwidth: 100 + autocostenable: true + autocostenablecompatible: true + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' not in result_xml.end_state.result" + - "'_public_' not in result_xml.end_state.result" + - "'ISIS' not in result_xml.end_state.result" + - "'level_1' not in result_xml.end_state.result" + - "'narrow' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'60' not in result_xml.end_state.result" + - "'60' not in result_xml.end_state.result" + - "'100' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" +# after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view presentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_issi_default.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_issi_default.yaml new file mode 100644 index 0000000000..1059054dce --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_ce_issi_default.yaml @@ -0,0 +1,111 @@ +--- +- debug: + msg: "START ce_is_is_view import route policy presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + defaultmode: always + cost: 10 + mode_tag: 10 + level_type: level_1 + avoid_learning: true + mode_routepolicyname: routepolicy_name + tag: 100 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' in result_xml.end_state.result" + - "'always' in result_xml.end_state.result" + - "'routepolicy_name' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + - "'10' in result_xml.end_state.result" + - "'level_1' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + defaultmode: always + cost: 10 + mode_tag: 10 + level_type: level_1 + avoid_learning: true + mode_routepolicyname: routepolicy_name + tag: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'100' not in result_xml.end_state.result" + - "'always' not in result_xml.end_state.result" + - "'routepolicy_name' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + - "'10' not in result_xml.end_state.result" + - "'level_1' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" +# after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view import route policy resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_isis_filter_import.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_isis_filter_import.yaml new file mode 100644 index 0000000000..7a97d982d6 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_isis_filter_import.yaml @@ -0,0 +1,97 @@ +--- +- debug: + msg: "START ce_is_is_view EXPORTROUTE route policy presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + import_aclnumorname: 301 + import_ipprefix: ipprefix + import_routepolicyname: routepolicyname + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'3001' in result_xml.end_state.result" + - "'ipprefix' in result_xml.end_state.result" + - "'routepolicyname' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + defaultmode: always + cost: 10 + mode_tag: 10 + level_type: level_1 + avoid_learning: true + mode_routepolicyname: routepolicy_name + tag: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'3001' not in result_xml.end_state.result" + - "'ipprefix' not in result_xml.end_state.result" + - "'routepolicyname' not in result_xml.end_state.result" + +# after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view EXPORTROUTE route policy resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_is_is_view/tests/netconf/test_isis_next_hop.yaml b/tests/integration/targets/ce_is_is_view/tests/netconf/test_isis_next_hop.yaml new file mode 100644 index 0000000000..f18eeaf844 --- /dev/null +++ b/tests/integration/targets/ce_is_is_view/tests/netconf/test_isis_next_hop.yaml @@ -0,0 +1,87 @@ +--- +- debug: + msg: "START ce_is_is_view next hop presented integration tests on connection={{ ansible_connection }}" +- include_tasks: setup.yaml + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &present + instance_id: 100 + ip_address: 1.1.1.1 + weight: 100 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 100 + + + afIpv4 + 0 + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'1.1.1.1' in result_xml.end_state.result" + - "'100' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_is_is_view: &absent + instance_id: 100 + ip_address: 1.1.1.1 + weight: 100 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_is_is_view: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'1.1.1.1' not in result_xml.end_state.result" + - "'100' not in result_xml.end_state.result" + # after present, isis 100 should be deleted +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_is_is_view next hop resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lacp/defaults/main.yaml b/tests/integration/targets/ce_lacp/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_lacp/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_lacp/tasks/main.yaml b/tests/integration/targets/ce_lacp/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_lacp/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_lacp/tasks/netconf.yaml b/tests/integration/targets/ce_lacp/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_lacp/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_lacp/tests/netconf/absent.yaml b/tests/integration/targets/ce_lacp/tests/netconf/absent.yaml new file mode 100644 index 0000000000..2c32e574d4 --- /dev/null +++ b/tests/integration/targets/ce_lacp/tests/netconf/absent.yaml @@ -0,0 +1,95 @@ +--- +- debug: + msg: "START ce_lacp merged integration tests on connection={{ ansible_connection }}" +# befor removing, it should be merged +- include_tasks: merge.yaml + +- name: Merge the provided configuration with the exisiting running configuration + ce_lacp: &absent + mode: Dynamic + trunk_id: 10 + preempt_enable: True + state_flapping: True + port_id_extension_enable: True + unexpected_mac_disable: True + system_id: 1111-2222-3333 + timeout_type: Fast + fast_timeout: 12 + mixed_rate_link_enable: True + preempt_delay: 12 + collector_delay: 12 + max_active_linknumber: 2 + select: Prority + priority: 23 + global_priority: 123 + state: absent + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get lacp config by ce_netconf. + ce_netconf: + rpc: get + cfg_xml: " + + + + Eth-Trunk10 + + + + + + + + + + + + + + + + + + " + register: result_ifs_merged + +- name: Get lacp config by ce_netconf. + ce_netconf: + rpc: get + cfg_xml: " + + + + + + " + register: result_global_merged + + +- name: Merge the provided configuration with the existing running configuration (IDEMPOTENT) + ce_lacp: *absent + register: result_re_merged + +- name: Assert that the previous task was idempotent, some become ot default values, others depend on devices. + assert: + that: + - "result_re_merged.changed == false" + - "'false' == result_ifs_merged.end_state.result" + - "'Slow' == result_ifs_merged.end_state.result" + - "'90' == result_ifs_merged.end_state.result" + - "'Prority' == result_ifs_merged.end_state.result" + - "'30' == result_ifs_merged.end_state.result" + - "'0' in result_ifs_merged.end_state.result" + - "'false' in result_ifs_merged.end_state.result" + - "'false' in result_ifs_merged.end_state.result" + - "'false' in result_ifs_merged.end_state.result" + - "'false' in result_ifs_merged.end_state.result" + - "'32768' in result_global_merged.end_state.result" + +- debug: + msg: "END ce_lacp merged integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lacp/tests/netconf/delete.yaml b/tests/integration/targets/ce_lacp/tests/netconf/delete.yaml new file mode 100644 index 0000000000..a3ce81723a --- /dev/null +++ b/tests/integration/targets/ce_lacp/tests/netconf/delete.yaml @@ -0,0 +1,32 @@ +--- +- debug: + msg: "START ce_lacp deleted integration tests on connection={{ ansible_connection }}" + +- name: Merge the provided configuration with the exisiting running configuration + ce_lacp: + mode: Dynamic + trunk_id: 10 + preempt_enable: True + state_flapping: True + port_id_extension_enable: True + unexpected_mac_disable: True + system_id: 1111-2222-3333 + timeout_type: Fast + fast_timeout: 12 + mixed_rate_link_enable: True + preempt_delay: 12 + collector_delay: 12 + max_active_linknumber: 2 + select: Prority + priority: 23 + global_priority: 123 + state: absent + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- debug: + msg: "END ce_lacp deleted integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lacp/tests/netconf/merge.yaml b/tests/integration/targets/ce_lacp/tests/netconf/merge.yaml new file mode 100644 index 0000000000..eef3956eeb --- /dev/null +++ b/tests/integration/targets/ce_lacp/tests/netconf/merge.yaml @@ -0,0 +1,31 @@ +--- +- debug: + msg: "START ce_lacp merged integration tests on connection={{ ansible_connection }}" + +- name: Merge the provided configuration with the exisiting running configuration + ce_lacp: + mode: Dynamic + trunk_id: 10 + preempt_enable: True + state_flapping: True + port_id_extension_enable: True + unexpected_mac_disable: True + system_id: 1111-2222-3333 + timeout_type: Fast + fast_timeout: 12 + mixed_rate_link_enable: True + preempt_delay: 12 + collector_delay: 12 + max_active_linknumber: 2 + select: Prority + priority: 23 + global_priority: 123 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- debug: + msg: "END ce_lacp merged integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lacp/tests/netconf/present.yaml b/tests/integration/targets/ce_lacp/tests/netconf/present.yaml new file mode 100644 index 0000000000..ee696c9e1c --- /dev/null +++ b/tests/integration/targets/ce_lacp/tests/netconf/present.yaml @@ -0,0 +1,103 @@ +--- +- debug: + msg: "START ce_lacp presented integration tests on connection={{ ansible_connection }}" + +- name: present the provided configuration with the exisiting running configuration + ce_lacp: &present + mode: Dynamic + trunk_id: 10 + preempt_enable: True + state_flapping: True + port_id_extension_enable: True + unexpected_mac_disable: True + system_id: 1111-2222-3333 + timeout_type: Fast + fast_timeout: 12 + mixed_rate_link_enable: True + preempt_delay: 12 + collector_delay: 12 + max_active_linknumber: 2 + select: Prority + priority: 23 + global_priority: 123 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get lacp config by ce_netconf. + ce_netconf: + rpc: get + cfg_xml: " + + + + Eth-Trunk10 + + + + + + + + + + + + + + + + + + + " + register: result_ifs_presentd + +- name: Get global lacp config by ce_netconf. + ce_netconf: + rpc: get + cfg_xml: " + + + + + + + + + + " + register: result_global_presentd + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_lacp: *present + register: result_re_presentd + +- name: Assert that the previous task was idempotent + assert: + that: + - "result_re_presentd.changed == false" + - "'Dynamic' == result_ifs_presentd.end_state.result" + - "'true' == result_ifs_presentd.end_state.result" + - "'Fast' == result_ifs_presentd.end_state.result" + - "'12' == result_ifs_presentd.end_state.result" + - "'Prority' == result_ifs_presentd.end_state.result" + - "'12' == result_ifs_presentd.end_state.result" + - "'2' == result_ifs_presentd.end_state.result" + - "'12' in result_ifs_presentd.end_state.result" + - "'true' in result_ifs_presentd.end_state.result" + - "'true' in result_ifs_presentd.end_state.result" + - "'true' in result_ifs_presentd.end_state.result" + - "'true' in result_ifs_presentd.end_state.result" + - "'true' in result_ifs_presentd.end_state.result" + - "'1111-2222-3333' in result_global_presentd.end_state.result" + - "'123' in result_global_presentd.end_state.result" + +# after present, it should be deleted +- include_tasks: delete.yaml +- debug: + msg: "END ce_lacp presentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lldp/defaults/main.yaml b/tests/integration/targets/ce_lldp/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_lldp/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_lldp/meta/main.yml b/tests/integration/targets/ce_lldp/meta/main.yml new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/integration/targets/ce_lldp/meta/main.yml @@ -0,0 +1 @@ + diff --git a/tests/integration/targets/ce_lldp/tasks/main.yaml b/tests/integration/targets/ce_lldp/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_lldp/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_lldp/tasks/netconf.yaml b/tests/integration/targets/ce_lldp/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_lldp/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_lldp/tests/netconf/absent.yaml b/tests/integration/targets/ce_lldp/tests/netconf/absent.yaml new file mode 100644 index 0000000000..1a14890677 --- /dev/null +++ b/tests/integration/targets/ce_lldp/tests/netconf/absent.yaml @@ -0,0 +1,108 @@ +--- +- debug: + msg: "START ce_lldp absent integration tests on connection={{ ansible_connection }}" + +- block: + + - name: present the provided configuration befor absent + ce_lldp: + lldpenable: enabled + mdnstatus: rxOnly + interval: 35 + hold_multiplier: 5 + restart_delay: 3 + transmit_delay: 5 + notification_interval: 6 + fast_count: 5 + mdn_notification_interval: 10.1.1.1 + management_address: 10.10.10.1 + bind_name: vlanif100 + register: result + + - name: change ansible_connection to network_cli + set_fact: + ansible_connection: network_cli + + - name: display lldp + ce_command: + commands: + - display current-configuration | include lldp + register: result_display + + - name: change ansible_connection to netconf + set_fact: + ansible_connection: netconf + +# There should be some configuration(LLDP) on host befor absent + - name: Assert the configuration is reflected on host + assert: + that: + - "'lldp enable' in result_display.stdout[0]" + - "'undo lldp mdn disable' in result_display.stdout[0]" + - "'lldp transmit interval 35' in result_display.stdout[0]" + - "'lldp transmit multiplier 5' in result_display.stdout[0]" + - "'lldp restart 3' in result_display.stdout[0]" + - "'lldp transmit delay 5' in result_display.stdout[0]" + - "'lldp fast-count 5' in result_display.stdout[0]" + - "'lldp management-address 10.10.10.1' in result_display.stdout[0]" + - "'lldp mdn trap-interval 6' in result_display.stdout[0]" + - "'lldp trap-interval 6' in result_display.stdout[0]" + - "'lldp management-address bind interface vlanif100' in result_display.stdout[0]" + + - name: absent the provided configuration with the exisiting running configuration + ce_lldp: &absent + lldpenable: enabled + mdnstatus: rxOnly + interval: 35 + hold_multiplier: 5 + restart_delay: 3 + transmit_delay: 5 + notification_interval: 6 + fast_count: 5 + mdn_notification_interval: 10.1.1.1 + management_address: 10.10.10.1 + bind_name: vlanif100 + state: absent + register: result + + - name: change ansible_connection to network_cli + set_fact: + ansible_connection: network_cli + + - name: display lldp + ce_command: + commands: + - display current-configuration | include lldp + register: result_display + + - name: change ansible_connection to netconf + set_fact: + ansible_connection: netconf + + - name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + - "'lldp enable' not in result_display.stdout[0]" + - "'undo lldp mdn disable' not in result_display.stdout[0]" + - "'lldp transmit interval 35' not in result_display.stdout[0]" + - "'lldp transmit multiplier 5' not in result_display.stdout[0]" + - "'lldp restart 3' not in result_display.stdout[0]" + - "'lldp transmit delay 5' not in result_display.stdout[0]" + - "'lldp fast-count 5' not in result_display.stdout[0]" + - "'lldp management-address 10.10.10.1' not in result_display.stdout[0]" + - "'lldp mdn trap-interval 6' not in result_display.stdout[0]" + - "'lldp trap-interval 6' not in result_display.stdout[0]" + - "'lldp management-address bind interface vlanif100' not in result_display.stdout[0]" + + - name: Merge the provided configuration with the existing running configuration (IDEMPOTENT) + ce_lldp: *absent + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + +- debug: + msg: "END ce_lldp absent integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lldp/tests/netconf/clean.yaml b/tests/integration/targets/ce_lldp/tests/netconf/clean.yaml new file mode 100644 index 0000000000..8e12f61ea2 --- /dev/null +++ b/tests/integration/targets/ce_lldp/tests/netconf/clean.yaml @@ -0,0 +1,20 @@ +--- +- debug: + msg: "Start ce_lldp deleted remove interface config ansible_connection={{ ansible_connection }}" + +- name: change ansible_connection to network_cli + set_fact: + ansible_connection: network_cli +# After the global LLDP function is disabled, all LLDP configuration restore defaults except the LLDP alarm function. +- name: display lldp + ce_command: + commands: + - undo lldp enable + - lldp mdn disable + +- name: change ansible_connection to netconf + set_fact: + ansible_connection: netconf + +- debug: + msg: "End ce_lldp deleted remove interface config ansible_connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lldp/tests/netconf/present.yaml b/tests/integration/targets/ce_lldp/tests/netconf/present.yaml new file mode 100644 index 0000000000..f523d32dd7 --- /dev/null +++ b/tests/integration/targets/ce_lldp/tests/netconf/present.yaml @@ -0,0 +1,66 @@ +--- +- debug: + msg: "START ce_lldp merged integration tests on connection={{ ansible_connection }}" + +- block: + + - include_tasks: cleanup.yaml + + - name: Merge the provided configuration with the exisiting running configuration + ce_lldp: &merged + lldpenable: enabled + mdnstatus: rxOnly + interval: 35 + hold_multiplier: 5 + restart_delay: 3 + transmit_delay: 5 + notification_interval: 6 + fast_count: 5 + mdn_notification_interval: 10.1.1.1 + management_address: 10.10.10.1 + bind_name: vlanif100 + register: result + + - name: change ansible_connection to network_cli + set_fact: + ansible_connection: network_cli + + - name: display lldp + ce_command: + commands: + - display current-configuration | include lldp + register: result_display + + - name: change ansible_connection to netconf + set_fact: + ansible_connection: netconf + + - name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + - "'lldp enable' in result_display.stdout[0]" + - "'undo lldp mdn disable' in result_display.stdout[0]" + - "'lldp transmit interval 35' in result_display.stdout[0]" + - "'lldp transmit multiplier 5' in result_display.stdout[0]" + - "'lldp restart 3' in result_display.stdout[0]" + - "'lldp transmit delay 5' in result_display.stdout[0]" + - "'lldp fast-count 5' in result_display.stdout[0]" + - "'lldp management-address 10.10.10.1' in result_display.stdout[0]" + - "'lldp mdn trap-interval 6' in result_display.stdout[0]" + - "'lldp trap-interval 6' in result_display.stdout[0]" + - "'lldp management-address bind interface vlanif100' in result_display.stdout[0]" + + - name: Merge the provided configuration with the existing running configuration (IDEMPOTENT) + ce_lldp: *merged + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + + - include_tasks: cleanup.yaml + +- debug: + msg: "END ce_lldp merged integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_lldp_interface/defaults/main.yaml b/tests/integration/targets/ce_lldp_interface/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_lldp_interface/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_lldp_interface/meta/main.yml b/tests/integration/targets/ce_lldp_interface/meta/main.yml new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/integration/targets/ce_lldp_interface/meta/main.yml @@ -0,0 +1 @@ + diff --git a/tests/integration/targets/ce_lldp_interface/tasks/main.yaml b/tests/integration/targets/ce_lldp_interface/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_lldp_interface/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_lldp_interface/tasks/netconf.yaml b/tests/integration/targets/ce_lldp_interface/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_lldp_interface/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_lldp_interface/tests/netconf/lldp_interface.yaml b/tests/integration/targets/ce_lldp_interface/tests/netconf/lldp_interface.yaml new file mode 100644 index 0000000000..d5551a84f8 --- /dev/null +++ b/tests/integration/targets/ce_lldp_interface/tests/netconf/lldp_interface.yaml @@ -0,0 +1,155 @@ +--- +- debug: + msg: "START ce_lldp_interface merged integration tests on connection={{ ansible_connection }}" + +- block: + - name: Merge the provided configuration with the exisiting running configuration - basic-tlv + ce_lldp_interface: &merged1 + config: + msg_interval: 8 + ifname: 10GE 1/0/1 + admin_status: txandrx + basic_tlv: + management_addr: true + port_desc: true + system_capability: true + system_description: true + system_name: true + register: result1 + - name: Merge the provided configuration with the existing running configuration (REPEAT) + ce_lldp_interface: *merged1 + register: result2 + - name: "Netconf get operation" + ce_netconf: + rpc: get + cfg_xml: ' + + + + 10GE1/0/1 + + + + + + + + + + + + + ' + register: result3 + - name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" + - "'8' in result3.endstate.result" + - "'txAndRx' in result3.endstate.result" + - "'true' in result3.endstate.result" + - "'true' in result3.endstate.result" + - "'true' in result3.endstate.result" + + - name: Merge the provided configuration with the exisiting running configuration - dot1-tlv + ce_lldp_interface: &merged2 + config: + msg_interval: 8 + ifname: 10GE 1/0/1 + dot1_tlv: + port_vlan_enable: true + port_desc: true + prot_vlan_enable: true + prot_vlan_id: 123 + vlan_name: 234 + vlan_name_enable: true + register: result1 + - name: Merge the provided configuration with the existing running configuration (REPEAT) + ce_lldp_interface: *merged2 + register: result2 + - name: "Netconf get operation" + ce_netconf: + rpc: get + cfg_xml: ' + + + + 10GE1/0/1 + + + + + + + + + + + + + + + + ' + register: result3 + - name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" + - "'true' in result3.endstate.result" + - "'true' in result3.endstate.result" + - "'123' in result3.endstate.result" + - "'true' in result3.endstate.result" + - "'true' in result3.endstate.result" + - "'true' in result3.endstate.result" + + - name: Merge the provided configuration with the exisiting running configuration - dot3-tlv + ce_lldp_interface: &merged + config: + msg_interval: 8 + ifname: 10GE 1/0/1 + dot3_tlv: + eee: true + link_aggregation: true + mac_physic: true + max_frame_size: true + register: result1 + - name: Merge the provided configuration with the existing running configuration (REPEAT) + ce_lldp_interface: *merged + register: result2 + - name: "Netconf get operation" + ce_netconf: + rpc: get + cfg_xml: ' + + + + 10GE1/0/1 + + + + + + + + + + + + + + ' + register: result3 + - name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" + - "'true' in result3.endstate.result" + - "'true' in result3.endstate.result" + - "'123' in result3.endstate.result" + +- debug: + msg: "END ce_lldp_interface merged integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_mdn_interface/defaults/main.yaml b/tests/integration/targets/ce_mdn_interface/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_mdn_interface/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_mdn_interface/tasks/main.yaml b/tests/integration/targets/ce_mdn_interface/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_mdn_interface/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_mdn_interface/tasks/netconf.yaml b/tests/integration/targets/ce_mdn_interface/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_mdn_interface/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_mdn_interface/tests/netconf/ce_mdn_interface.yaml b/tests/integration/targets/ce_mdn_interface/tests/netconf/ce_mdn_interface.yaml new file mode 100644 index 0000000000..4aec853fef --- /dev/null +++ b/tests/integration/targets/ce_mdn_interface/tests/netconf/ce_mdn_interface.yaml @@ -0,0 +1,97 @@ +--- +- debug: + msg: "START ce_mdn_interface presented integration tests on connection={{ ansible_connection }}" +# set up default before test +- name: clean up default configuration with the exisiting running configuration + ce_mdn_interface: + lldpenable: disabled + mdnstatus: disabled + ifname: 10GE1/0/1 + +- name: present the provided configuration with the exisiting running configuration + ce_mdn_interface: &present + lldpenable: enabled + mdnstatus: rxOnly + ifname: 10GE1/0/1 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get mdnInterface config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + 10GE1/0/1 + + + + + " + register: result_xml + +- name: Get lldp enabled config by ce_netconf. + ce_netconf: &get_config_lldp + rpc: get + cfg_xml: " + + + + + + /filter>" + register: result_xml_lldp + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_mdn_interface: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'rxOnly' in result_xml.end_state.result" + - "'enabled' in result_xml_lldp.end_state.result" + +- name: absent the provided configuration with the exisiting running configuration + ce_mdn_interface: &absent + lldpenable: disabled + mdnstatus: disabled + ifname: 10GE1/0/1 + state: absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: absent the provided configuration with the existing running configuration (REPEAT) + ce_mdn_interface: *absent + register: repeat + +- name: Get mdnInterface config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Get lldp enabled config by ce_netconf. + ce_netconf: *get_config + register: result_xml_lldp + +- name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + - "'disabled' not in result_xml.end_state.result" + - "'disabled' in result_xml_lldp.end_state.result" +# after present, isis 100 should be deleted + +- debug: + msg: "END ce_mdn_interface resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_multicast_global/defaults/main.yaml b/tests/integration/targets/ce_multicast_global/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_multicast_global/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_multicast_global/tasks/main.yaml b/tests/integration/targets/ce_multicast_global/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_multicast_global/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_multicast_global/tasks/netconf.yaml b/tests/integration/targets/ce_multicast_global/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_multicast_global/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_multicast_global/tests/netconf/test_ce_multicast_global.yaml b/tests/integration/targets/ce_multicast_global/tests/netconf/test_ce_multicast_global.yaml new file mode 100644 index 0000000000..69b1f5f26b --- /dev/null +++ b/tests/integration/targets/ce_multicast_global/tests/netconf/test_ce_multicast_global.yaml @@ -0,0 +1,73 @@ +--- +- debug: + msg: "START ce_multicast_global presented integration tests on connection={{ ansible_connection }}" + +- name: present the provided configuration with the exisiting running configuration + ce_multicast_global: &present + aftype: v4 + vrf: vpna + weight: 100 + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_multicast_global: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'vpna' in result_xml.end_state.result" + - "'vpna' in result_xml.end_state.result" + +- name: present the provided configuration with the exisiting running configuration + ce_multicast_global: &absent + aftype: v4 + vrf: vpna + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_multicast_global: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'vpna' not in result_xml.end_state.result" + - "'vpna' not in result_xml.end_state.result" +# after present, isis 100 should be deleted + +- debug: + msg: "END ce_multicast_global resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_multicast_igmp_enable/defaults/main.yaml b/tests/integration/targets/ce_multicast_igmp_enable/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_multicast_igmp_enable/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_multicast_igmp_enable/tasks/main.yaml b/tests/integration/targets/ce_multicast_igmp_enable/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_multicast_igmp_enable/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_multicast_igmp_enable/tasks/netconf.yaml b/tests/integration/targets/ce_multicast_igmp_enable/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_multicast_igmp_enable/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_multicast_igmp_enable/tests/netconf/ce_multicast_igmp_enable.yaml b/tests/integration/targets/ce_multicast_igmp_enable/tests/netconf/ce_multicast_igmp_enable.yaml new file mode 100644 index 0000000000..2a94b0a352 --- /dev/null +++ b/tests/integration/targets/ce_multicast_igmp_enable/tests/netconf/ce_multicast_igmp_enable.yaml @@ -0,0 +1,95 @@ +--- +- debug: + msg: "START ce_multicast_igmp_enable presented integration tests on connection={{ ansible_connection }}" +# clean up before test +- name: clean up configuration with the exisiting running configuration + ce_multicast_igmp_enable: &absent + aftype: v4 + features: vlan + vlan_id: 100 + igmp: true + version: 2 + proxy: true + + +- name: present the provided configuration with the exisiting running configuration + ce_multicast_igmp_enable: &present + aftype: v4 + features: vlan + vlan_id: 100 + igmp: true + version: 2 + proxy: true + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: Get basic config by ce_netconf. + ce_netconf: &get_config + rpc: get + cfg_xml: " + + + + + + + + + + + + + + " + register: result_xml + + +- name: present the provided configuration with the existing running configuration (IDEMPOTENT) + ce_multicast_igmp_enable: *present + register: repeat + +- name: Assert that the previous task was idempotent + assert: + that: + - "repeat.changed == false" + - "'ipv4unicast' in result_xml.end_state.result" + - "'100' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + - "'2' in result_xml.end_state.result" + - "'true' in result_xml.end_state.result" + +- name: absent the provided configuration with the exisiting running configuration + ce_multicast_igmp_enable: *absent + register: result + + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" + +- name: absent the provided configuration with the existing running configuration (REPEAT) + ce_multicast_igmp_enable: *absent + register: repeat + +- name: Get basic config by ce_netconf. + ce_netconf: *get_config + register: result_xml + +- name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + - "'ipv4unicast' not in result_xml.end_state.result" + - "'100' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" + - "'2' not in result_xml.end_state.result" + - "'true' not in result_xml.end_state.result" +# after present, isis 100 should be deleted + +- debug: + msg: "END ce_multicast_igmp_enable resentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_static_route_bfd/defaults/main.yaml b/tests/integration/targets/ce_static_route_bfd/defaults/main.yaml new file mode 100644 index 0000000000..164afead28 --- /dev/null +++ b/tests/integration/targets/ce_static_route_bfd/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/tests/integration/targets/ce_static_route_bfd/tasks/main.yaml b/tests/integration/targets/ce_static_route_bfd/tasks/main.yaml new file mode 100644 index 0000000000..cc27f174fd --- /dev/null +++ b/tests/integration/targets/ce_static_route_bfd/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: netconf.yaml, tags: ['netconf'] } diff --git a/tests/integration/targets/ce_static_route_bfd/tasks/netconf.yaml b/tests/integration/targets/ce_static_route_bfd/tasks/netconf.yaml new file mode 100644 index 0000000000..73b91adfaa --- /dev/null +++ b/tests/integration/targets/ce_static_route_bfd/tasks/netconf.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all netconf test cases + find: + paths: "{{ role_path }}/tests/netconf" + patterns: "{{ testcase }}.yaml" + use_regex: true + connection: local + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=netconf) + include: "{{ test_case_to_run }} ansible_connection=netconf" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/ce_static_route_bfd/tests/netconf/ce_static_route_bfd.yaml b/tests/integration/targets/ce_static_route_bfd/tests/netconf/ce_static_route_bfd.yaml new file mode 100644 index 0000000000..e333278d91 --- /dev/null +++ b/tests/integration/targets/ce_static_route_bfd/tests/netconf/ce_static_route_bfd.yaml @@ -0,0 +1,150 @@ +--- +- debug: + msg: "START ce_static_route_bfd presented integration tests on connection={{ ansible_connection }}" +- include_tasks: cleanup.yaml +- name: Config an ip route-static bfd 10GE1/0/1 3.3.3.3 min-rx-interval 50 min-tx-interval 50 detect-multiplier 5 + ce_static_route_bfd: &merge1 + function_flag: 'singleBFD' + nhp_interface: 10GE1/0/1 + next_hop: 3.3.3.3 + min_tx_interval: 50 + min_rx_interval: 50 + detect_multiplier: 5 + aftype: v4 + state: present + register: result1 +- name: (repeat)Config an ip route-static bfd 10GE1/0/1 3.3.3.3 min-rx-interval 50 min-tx-interval 50 detect-multiplier 5 + ce_static_route_bfd: + <<: *merge1 + register: result2 + +- name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" + + +# ip route-static bfd [ interface-type interface-number | vpn-instance vpn-instance-name ] nexthop-address +- name: ip route-static bfd 10GE1/0/1 3.3.3.4 + ce_static_route_bfd: &merge2 + function_flag: 'singleBFD' + nhp_interface: 10GE1/0/1 + next_hop: 3.3.3.4 + aftype: v4 + register: result1 +- name: (repeat)ip route-static bfd 10GE1/0/1 3.3.3.4 + ce_static_route_bfd: + <<: *merge2 + register: result2 +- name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" +#ip route-static default-bfd { min-rx-interval {min-rx-interval} | min-tx-interval {min-tx-interval} | detect-multiplier {multiplier}} +- name: Config an ip route-static default-bfd min-rx-interval 50 min-tx-interval 50 detect-multiplier 6 + ce_static_route_bfd: &merge3 + function_flag: 'globalBFD' + min_tx_interval: 50 + min_rx_interval: 50 + detect_multiplier: 6 + aftype: v4 + state: present + register: result1 +- name: (repeat)Config an ip route-static default-bfd min-rx-interval 50 min-tx-interval 50 detect-multiplier 6 + ce_static_route_bfd: + <<: *merge3 + register: result2 +- name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" + +- name: undo ip route-static default-bfd + ce_static_route_bfd: &merge4 + function_flag: 'globalBFD' + aftype: v4 + state: absent + commands: 'sys,undo ip route-static default-bfd,commit' + register: result1 +- name: (repeat)undo ip route-static default-bfd + ce_static_route_bfd: + <<: *merge4 + register: result2 +- name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" + +- name: Config an ipv4 static route 2.2.2.0/24 2.2.2.1 preference 1 tag 2 description test for staticBFD + ce_static_route_bfd: &merge5 + function_flag: 'staticBFD' + prefix: 2.2.2.2 + mask: 24 + next_hop: 2.2.2.1 + tag: 2 + description: test + pref: 1 + aftype: v4 + bfd_session_name: btoa + state: present + register: result1 +- name: (repeat) Config an ipv4 static route 2.2.2.0/24 2.2.2.1 preference 1 tag 2 description test for staticBFD + ce_static_route_bfd: + <<: *merge5 + register: result2 +- name: Assert the configuration is reflected on host + assert: + that: + - "result1['changed'] == true" + - "result2['changed'] == false" + +- name: Get lacp config by ce_netconf. + ce_netconf: + rpc: get + cfg_xml: " + + + + + + + + + + + + + + + + + + + " + register: result_present + +- name: Assert that the previous task was idempotent + assert: + that: + - "'v4' == result_present.end_state.result" + - "'10GE1/0/1' == result_present.end_state.result" + - "'Fast' == result_present.end_state.result" + - "'__publiv__' == result_present.end_state.result" + - "'Prority' == result_present.end_state.result" + - "'2.2.2.1' == result_present.end_state.result" + - "'2.2.2.2' == result_present.end_state.result" + - "'12' in result_present.end_state.result" + - "'true' in result_present.end_state.result" + - "'true' in result_present.end_state.result" + - "'true' in result_present.end_state.result" + - "'true' in result_present.end_state.result" + - "'true' in result_present.end_state.result" + - "'1111-2222-3333' in result_present.end_state.result" + - "'123' in result_present.end_state.result" +- include_tasks: cleanup.yaml +- debug: + msg: "END ce_static_route_bfd presentd integration tests on connection={{ ansible_connection }}" diff --git a/tests/integration/targets/ce_static_route_bfd/tests/netconf/cleanup.yaml b/tests/integration/targets/ce_static_route_bfd/tests/netconf/cleanup.yaml new file mode 100644 index 0000000000..10ad3e5c18 --- /dev/null +++ b/tests/integration/targets/ce_static_route_bfd/tests/netconf/cleanup.yaml @@ -0,0 +1,31 @@ +--- +- name: Merge the provided configuration with the exisiting running configuration + ce_static_route_bfd: + function_flag: 'singleBFD' + nhp_interface: 10GE1/0/1 + next_hop: 3.3.3.3 + min_tx_interval: 50 + min_rx_interval: 50 + detect_multiplier: 5 + aftype: v4 + state: absent + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" +- name: ip route-static bfd 10GE1/0/1 3.3.3.4 + ce_static_route_bfd: &merge + function_flag: 'globalBFD' + min_tx_interval: 50 + min_rx_interval: 50 + detect_multiplier: 6 + aftype: v4 + state: absent + register: result + +- name: Assert the configuration is reflected on host + assert: + that: + - "result['changed'] == true" diff --git a/tests/integration/targets/cloud_init_data_facts/aliases b/tests/integration/targets/cloud_init_data_facts/aliases new file mode 100644 index 0000000000..e93cd86b56 --- /dev/null +++ b/tests/integration/targets/cloud_init_data_facts/aliases @@ -0,0 +1,5 @@ +destructive +shippable/posix/group1 +skip/aix +skip/osx +skip/freebsd diff --git a/tests/integration/targets/cloud_init_data_facts/tasks/main.yml b/tests/integration/targets/cloud_init_data_facts/tasks/main.yml new file mode 100644 index 0000000000..eca905c6c2 --- /dev/null +++ b/tests/integration/targets/cloud_init_data_facts/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: test cloud-init + # TODO: check for a workaround + # install 'cloud-init'' failed: dpkg-divert: error: `diversion of /etc/init/ureadahead.conf + # to /etc/init/ureadahead.conf.disabled by cloud-init' clashes with `local diversion of + # /etc/init/ureadahead.conf to /etc/init/ureadahead.conf.distrib + # https://bugs.launchpad.net/ubuntu/+source/ureadahead/+bug/997838 + # Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions + # (!= 42 and >= 15) ascloud-init will install the Python 3 package, breaking our build on py2. + when: + - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14) + - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3) + block: + - name: setup install cloud-init + package: + name: + - cloud-init + - udev + + - name: setup run cloud-init + service: + name: cloud-init-local + state: restarted + + - name: test gather cloud-init facts in check mode + cloud_init_data_facts: + check_mode: yes + register: result + - name: verify test gather cloud-init facts in check mode + assert: + that: + - result.cloud_init_data_facts.status.v1 is defined + - result.cloud_init_data_facts.status.v1.stage is defined + - not result.cloud_init_data_facts.status.v1.stage + - cloud_init_data_facts.status.v1 is defined + - cloud_init_data_facts.status.v1.stage is defined + - not cloud_init_data_facts.status.v1.stage + + - name: test gather cloud-init facts + cloud_init_data_facts: + register: result + - name: verify test gather cloud-init facts + assert: + that: + - result.cloud_init_data_facts.status.v1 is defined + - result.cloud_init_data_facts.status.v1.stage is defined + - not result.cloud_init_data_facts.status.v1.stage + - cloud_init_data_facts.status.v1 is defined + - cloud_init_data_facts.status.v1.stage is defined + - not cloud_init_data_facts.status.v1.stage diff --git a/tests/integration/targets/cloudscale_common/aliases b/tests/integration/targets/cloudscale_common/aliases new file mode 100644 index 0000000000..136c05e0d0 --- /dev/null +++ b/tests/integration/targets/cloudscale_common/aliases @@ -0,0 +1 @@ +hidden diff --git a/tests/integration/targets/cloudscale_common/defaults/main.yml b/tests/integration/targets/cloudscale_common/defaults/main.yml new file mode 100644 index 0000000000..293e0c49de --- /dev/null +++ b/tests/integration/targets/cloudscale_common/defaults/main.yml @@ -0,0 +1,19 @@ +--- +# The image to use for test servers +cloudscale_test_image: 'debian-9' + +# Alternate test image to use if a different image is required +cloudscale_alt_test_image: 'ubuntu-18.04' + +# The flavor to use for test servers +cloudscale_test_flavor: 'flex-2' + +# SSH key to use for test servers +cloudscale_test_ssh_key: | + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + +# The zone to use to test servers +cloudscale_test_zone: 'lpg1' + +# The region to use to request floating IPs +cloudscale_test_region: 'lpg' diff --git a/tests/integration/targets/cloudscale_common/tasks/cleanup_floating_ips.yml b/tests/integration/targets/cloudscale_common/tasks/cleanup_floating_ips.yml new file mode 100644 index 0000000000..8b60ab9260 --- /dev/null +++ b/tests/integration/targets/cloudscale_common/tasks/cleanup_floating_ips.yml @@ -0,0 +1,17 @@ +--- +- name: List all floating IPs + uri: + url: 'https://api.cloudscale.ch/v1/floating-ips' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: floating_ip_list + +- name: Remove all floating IPs created by this test run + cloudscale_floating_ip: + ip: '{{ item.network | ipaddr("address") }}' + state: 'absent' + when: cloudscale_resource_prefix in (item.reverse_ptr | string ) + with_items: '{{ floating_ip_list.json }}' + loop_control: + label: '{{ item.reverse_ptr }} ({{ item.network }})' diff --git a/tests/integration/targets/cloudscale_common/tasks/cleanup_server_groups.yml b/tests/integration/targets/cloudscale_common/tasks/cleanup_server_groups.yml new file mode 100644 index 0000000000..2bdb9e5939 --- /dev/null +++ b/tests/integration/targets/cloudscale_common/tasks/cleanup_server_groups.yml @@ -0,0 +1,17 @@ +--- +- name: List all server groups + uri: + url: 'https://api.cloudscale.ch/v1/server-groups' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: server_group_list + +- name: Remove all server groups created by this test run + cloudscale_server_group: + uuid: '{{ item.uuid }}' + state: absent + when: cloudscale_resource_prefix in item.name + with_items: '{{ server_group_list.json }}' + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' diff --git a/tests/integration/targets/cloudscale_common/tasks/cleanup_servers.yml b/tests/integration/targets/cloudscale_common/tasks/cleanup_servers.yml new file mode 100644 index 0000000000..8fe6798693 --- /dev/null +++ b/tests/integration/targets/cloudscale_common/tasks/cleanup_servers.yml @@ -0,0 +1,17 @@ +--- +- name: List all servers + uri: + url: 'https://api.cloudscale.ch/v1/servers' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: server_list + +- name: Remove all servers created by this test run + cloudscale_server: + uuid: '{{ item.uuid }}' + state: 'absent' + when: cloudscale_resource_prefix in item.name + with_items: '{{ server_list.json }}' + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' diff --git a/tests/integration/targets/cloudscale_common/tasks/cleanup_volumes.yml b/tests/integration/targets/cloudscale_common/tasks/cleanup_volumes.yml new file mode 100644 index 0000000000..67cc89bb31 --- /dev/null +++ b/tests/integration/targets/cloudscale_common/tasks/cleanup_volumes.yml @@ -0,0 +1,17 @@ +--- +- name: List all volumes + uri: + url: 'https://api.cloudscale.ch/v1/volumes' + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + status_code: 200 + register: volume_list + +- name: Remove all volumes created by this test run + cloudscale_volume: + uuid: '{{ item.uuid }}' + state: 'absent' + when: cloudscale_resource_prefix in item.name + with_items: '{{ volume_list.json }}' + loop_control: + label: '{{ item.name }} ({{ item.uuid }})' diff --git a/tests/integration/targets/cloudscale_common/tasks/main.yml b/tests/integration/targets/cloudscale_common/tasks/main.yml new file mode 100644 index 0000000000..fa0be6eb80 --- /dev/null +++ b/tests/integration/targets/cloudscale_common/tasks/main.yml @@ -0,0 +1,6 @@ +--- +# Password to use for test server +# This has to be set as a fact, otherwise a new password will be generated +# on every variable access. +- set_fact: + cloudscale_test_password: "{{ lookup('password', '/dev/null length=15 chars=ascii_letters') }}" diff --git a/tests/integration/targets/cloudscale_floating_ip/aliases b/tests/integration/targets/cloudscale_floating_ip/aliases new file mode 100644 index 0000000000..c200a3d2c8 --- /dev/null +++ b/tests/integration/targets/cloudscale_floating_ip/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/tests/integration/targets/cloudscale_floating_ip/meta/main.yml b/tests/integration/targets/cloudscale_floating_ip/meta/main.yml new file mode 100644 index 0000000000..8dd48f0337 --- /dev/null +++ b/tests/integration/targets/cloudscale_floating_ip/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - cloudscale_common diff --git a/tests/integration/targets/cloudscale_floating_ip/tasks/floating_ip.yml b/tests/integration/targets/cloudscale_floating_ip/tasks/floating_ip.yml new file mode 100644 index 0000000000..4e92c6ff46 --- /dev/null +++ b/tests/integration/targets/cloudscale_floating_ip/tasks/floating_ip.yml @@ -0,0 +1,129 @@ +- name: Request regional floating IP + cloudscale_floating_ip: + server: '{{ test01.uuid }}' + ip_version: '{{ item.ip_version }}' + reverse_ptr: '{{ item.reverse_ptr | default(omit) }}' + prefix_length: '{{ item.prefix_length | default(omit) }}' + region: '{{ cloudscale_test_region }}' + + register: floating_ip +- name: Verify request floating IP + assert: + that: + - floating_ip is successful + - floating_ip is changed + - floating_ip.region.slug == '{{ cloudscale_test_region }}' + - (item.ip_version == 4 and floating_ip.ip | ipv4) or (item.ip_version == 6 and floating_ip.ip | ipv6) + - floating_ip.server == test01.uuid + +- name: Check floating IP idempotence + cloudscale_floating_ip: + server: '{{ test01.uuid }}' + ip: '{{ floating_ip.ip }}' + region: '{{ cloudscale_test_region }}' + register: floating_ip_idempotence +- name: Verify floating IP idempotence + assert: + that: + - floating_ip_idempotence is successful + - floating_ip_idempotence is not changed + - floating_ip_idempotence.server == test01.uuid + - floating_ip.region.slug == '{{ cloudscale_test_region }}' + +- name: Request global floating IP + cloudscale_floating_ip: + server: '{{ test01.uuid }}' + ip_version: '{{ item.ip_version }}' + reverse_ptr: '{{ item.reverse_ptr | default(omit) }}' + prefix_length: '{{ item.prefix_length | default(omit) }}' + type: 'global' + register: global_floating_ip +- name: Verify global floating IP + assert: + that: + - global_floating_ip is successful + - global_floating_ip is changed + - global_floating_ip.region == None + - global_floating_ip.type == 'global' + - (item.ip_version == 4 and global_floating_ip.ip | ipv4) or (item.ip_version == 6 and global_floating_ip.ip | ipv6) + - global_floating_ip.server == test01.uuid + +- name: Release global floating IP + cloudscale_floating_ip: + ip: '{{ global_floating_ip.ip }}' + state: 'absent' + register: global_floating_ip +- name: Verify release of global floating IP + assert: + that: + - global_floating_ip is successful + - global_floating_ip is changed + - global_floating_ip.state == 'absent' + +- name: Check network parameter alias + cloudscale_floating_ip: + server: '{{ test01.uuid }}' + network: '{{ floating_ip.ip }}' + register: floating_ip_network +- name: Verify network parameter alias + assert: + that: + - floating_ip_network is successful + +- name: Move floating IP to second server + cloudscale_floating_ip: + server: '{{ test02.uuid }}' + ip: '{{ floating_ip.ip }}' + register: move_ip +- name: Verify move floating IPv4 to second server + assert: + that: + - move_ip is successful + - move_ip is changed + - move_ip.server == test02.uuid + +- name: Fail if server is missing on update + cloudscale_floating_ip: + ip: '{{ floating_ip.ip }}' + register: update_failed + ignore_errors: True +- name: Verify fail if server is missing on update + assert: + that: + - update_failed is failed + - "'Missing required parameter' in update_failed.msg" + +- name: Release floating IP + cloudscale_floating_ip: + ip: '{{ floating_ip.ip }}' + state: 'absent' + register: release_ip +- name: Verify release floating IPs + assert: + that: + - release_ip is successful + - release_ip is changed + - release_ip.state == 'absent' + +- name: Release floating IP idempotence + cloudscale_floating_ip: + ip: '{{ floating_ip.ip }}' + state: 'absent' + register: release_ip +- name: Verify release floating IPs idempotence + assert: + that: + - release_ip is successful + - release_ip is not changed + - release_ip.state == 'absent' + +- name: Fail if server is missing on request + cloudscale_floating_ip: + ip_version: 6 + register: request_failed + ignore_errors: True +- name: Verify fail if server is missing on request + assert: + that: + - request_failed is failed + - "'Missing required parameter' in request_failed.msg" diff --git a/tests/integration/targets/cloudscale_floating_ip/tasks/main.yml b/tests/integration/targets/cloudscale_floating_ip/tasks/main.yml new file mode 100644 index 0000000000..e46cad3108 --- /dev/null +++ b/tests/integration/targets/cloudscale_floating_ip/tasks/main.yml @@ -0,0 +1,35 @@ +- name: Cloudscale floating IP tests + block: + - name: Create a server + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test01' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + zone: '{{ cloudscale_test_zone }}' + register: test01 + + - name: Create a second server + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test02' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + zone: '{{ cloudscale_test_zone }}' + register: test02 + + - include_tasks: floating_ip.yml + loop: + - { 'ip_version': 4, 'reverse_ptr': '{{ cloudscale_resource_prefix }}-4.example.com' } + - { 'ip_version': 6, 'reverse_ptr': '{{ cloudscale_resource_prefix }}-6.example.com' } + - { 'ip_version': 6, 'prefix_length': 56 } + + - import_tasks: unassigned.yml + + always: + - import_role: + name: cloudscale_common + tasks_from: cleanup_servers + - import_role: + name: cloudscale_common + tasks_from: cleanup_floating_ips diff --git a/tests/integration/targets/cloudscale_floating_ip/tasks/unassigned.yml b/tests/integration/targets/cloudscale_floating_ip/tasks/unassigned.yml new file mode 100644 index 0000000000..214f4ab368 --- /dev/null +++ b/tests/integration/targets/cloudscale_floating_ip/tasks/unassigned.yml @@ -0,0 +1,26 @@ +--- +- name: Assign Floating IP to server test01 + cloudscale_floating_ip: + ip_version: 6 + server: '{{ test01.uuid }}' + reverse_ptr: '{{ cloudscale_resource_prefix }}-unassigned.example.com' + region: '{{ cloudscale_test_region }}' + register: floating_ip + +# The only way to have an unassigned floating IP is to delete the server +# where the floating IP is currently assigned. +- name: Delete server test01 + cloudscale_server: + uuid: '{{ test01.uuid }}' + state: 'absent' + +- name: Do not fail if floating IP is unassigned + cloudscale_floating_ip: + ip: '{{ floating_ip.ip }}' + register: floating_ip_not_fail +- name: Verify do not fail if floating IP is unassigned + assert: + that: + - floating_ip_not_fail is successful + - floating_ip_not_fail is not changed + - floating_ip_not_fail.server == None diff --git a/tests/integration/targets/cloudscale_server/aliases b/tests/integration/targets/cloudscale_server/aliases new file mode 100644 index 0000000000..c200a3d2c8 --- /dev/null +++ b/tests/integration/targets/cloudscale_server/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/tests/integration/targets/cloudscale_server/defaults/main.yml b/tests/integration/targets/cloudscale_server/defaults/main.yml new file mode 100644 index 0000000000..6d8a0ff847 --- /dev/null +++ b/tests/integration/targets/cloudscale_server/defaults/main.yml @@ -0,0 +1,2 @@ +--- +cloudscale_test_flavor_2: flex-4 diff --git a/tests/integration/targets/cloudscale_server/meta/main.yml b/tests/integration/targets/cloudscale_server/meta/main.yml new file mode 100644 index 0000000000..8dd48f0337 --- /dev/null +++ b/tests/integration/targets/cloudscale_server/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - cloudscale_common diff --git a/tests/integration/targets/cloudscale_server/tasks/failures.yml b/tests/integration/targets/cloudscale_server/tasks/failures.yml new file mode 100644 index 0000000000..2c975d76bd --- /dev/null +++ b/tests/integration/targets/cloudscale_server/tasks/failures.yml @@ -0,0 +1,53 @@ +--- +- name: Fail missing params + cloudscale_server: + register: srv + ignore_errors: True +- name: 'VERIFY: Fail name and UUID' + assert: + that: + - srv is failed + +- name: Fail unexisting server group + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-group' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + server_groups: '{{ cloudscale_resource_prefix }}-unexist-group' + ignore_errors: True + register: srv +- name: 'VERIFY: Fail unexisting server group' + assert: + that: + - srv is failed + - srv.msg.startswith('Server group name or UUID not found') + +- name: Create two server groups with the same name + uri: + url: https://api.cloudscale.ch/v1/server-groups + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + type: anti-affinity + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try to use server groups with identical name + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-group' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + server_groups: '{{ cloudscale_resource_prefix }}-duplicate' + ignore_errors: True + register: srv +- name: 'VERIFY: Fail unexisting server group' + assert: + that: + - srv is failed + - srv.msg.startswith('More than one server group with name exists') diff --git a/tests/integration/targets/cloudscale_server/tasks/main.yml b/tests/integration/targets/cloudscale_server/tasks/main.yml new file mode 100644 index 0000000000..cb53757eb8 --- /dev/null +++ b/tests/integration/targets/cloudscale_server/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- block: + - import_tasks: failures.yml + - import_tasks: tests.yml + always: + - import_role: + name: cloudscale_common + tasks_from: cleanup_servers + - import_role: + name: cloudscale_common + tasks_from: cleanup_server_groups \ No newline at end of file diff --git a/tests/integration/targets/cloudscale_server/tasks/tests.yml b/tests/integration/targets/cloudscale_server/tasks/tests.yml new file mode 100644 index 0000000000..34c4b3bf83 --- /dev/null +++ b/tests/integration/targets/cloudscale_server/tasks/tests.yml @@ -0,0 +1,676 @@ +--- +- name: Setup server groups + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-group-{{ item }}' + type: anti-affinity + zone: '{{ cloudscale_test_zone }}' + with_sequence: count=2 + +- name: Test create a running server in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: server + check_mode: yes +- name: Verify create a running server in check mode + assert: + that: + - server is changed + - server.state == 'absent' + +- name: Test create a running server + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: server +- name: Verify create a running server + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.zone.slug == '{{ cloudscale_test_zone }}' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'production' + - server.tags.sla == '24-7' + +- name: Test create a running server idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: server +- name: Verify create a running server idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.zone.slug == '{{ cloudscale_test_zone }}' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'production' + - server.tags.sla == '24-7' + +- name: Test update tags in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: server + check_mode: yes +- name: Verify update tags in check mode + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'production' + - server.tags.sla == '24-7' + +- name: Test update tags + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: server +- name: Verify update tags + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'staging' + - server.tags.sla == '8-5' + +- name: Test update tags idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: server +- name: Verify update tags idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'staging' + - server.tags.sla == '8-5' + +- name: Test omit tags idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + register: server +- name: Verify update tags idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - server.tags.project == 'ansible-test' + - server.tags.stage == 'staging' + - server.tags.sla == '8-5' + +- name: Test delete tags + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: {} + register: server +- name: Verify delete tags + assert: + that: + - server is changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - not server.tags + +- name: Test delete tags idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + tags: {} + register: server +- name: Verify delete tags idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + - not server.tags + +- name: Test update flavor of a running server without force in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: no + register: server + check_mode: yes +- name: Verify update flavor of a running server without force in check mode + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test update flavor of a running server without force + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor of a running server without force + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test update flavor of a running server without force idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor of a running server without force idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test update flavor and name of a running server without force in check mode + cloudscale_server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: no + register: server + check_mode: yes +- name: Verify update flavor and name of a running server without force in check mode + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test' + +- name: Test update flavor and name of a running server without force + cloudscale_server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor and name of a running server without force + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update flavor and name of a running server without force idempotence + cloudscale_server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: no + register: server +- name: Verify update flavor and name of a running server without force idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update flavor of a running server with force in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: yes + register: server + check_mode: yes +- name: Verify update flavor of a running server with force in check mode + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update flavor of a running server with force + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: yes + register: server +- name: Verify update flavor of a running server with force + assert: + that: + - server is changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update a running server with force idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + flavor: '{{ cloudscale_test_flavor_2 }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + force: yes + register: server +- name: Verify update flavor of a running server with force idempotence + assert: + that: + - server is not changed + - server.state == 'running' + - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Remember uuid of running server for anti affinity + set_fact: + running_server_uuid: '{{ server.uuid }}' + +- name: Test create server stopped in anti affinity and private network only in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + use_public_network: no + use_private_network: yes + state: stopped + check_mode: yes + register: server_stopped +- name: Verify create server stopped in anti affinity and private network only in check mode + assert: + that: + - server_stopped is changed + - server_stopped.state == 'absent' + +- name: Test create server stopped in anti affinity and private network only + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + use_public_network: no + use_private_network: yes + state: stopped + register: server_stopped +- name: Verify create server stopped in anti affinity and private network only + assert: + that: + - server_stopped is changed + - server_stopped.state == 'stopped' + - server_stopped.zone.slug == '{{ cloudscale_test_zone }}' + - server_stopped.anti_affinity_with.0.uuid == running_server_uuid + - server_stopped.interfaces.0.type == 'private' + - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test create server stopped in anti affinity and private network only idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-1' + zone: '{{ cloudscale_test_zone }}' + use_public_network: no + use_private_network: yes + state: stopped + register: server_stopped +- name: Verify create server stopped in anti affinity and private network only idempotence + assert: + that: + - server_stopped is not changed + - server_stopped.state == 'stopped' + - server_stopped.zone.slug == '{{ cloudscale_test_zone }}' + - server_stopped.anti_affinity_with.0.uuid == running_server_uuid + - server_stopped.interfaces.0.type == 'private' + - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test change server group not changed + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-stopped' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + server_groups: '{{ cloudscale_resource_prefix }}-group-2' + use_public_network: no + use_private_network: yes + state: stopped + register: server_stopped +- name: Verify Test update server group not changed + assert: + that: + - server_stopped is not changed + - server_stopped.state == 'stopped' + - server_stopped.zone.slug == '{{ cloudscale_test_zone }}' + - server_stopped.anti_affinity_with.0.uuid == running_server_uuid + - server_stopped.interfaces.0.type == 'private' + - server_stopped.server_groups.0.name == '{{ cloudscale_resource_prefix }}-group-1' + +- name: Test create server with password in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-password' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + check_mode: yes + register: server_password +- name: Verify create server with password in check mode + assert: + that: + - server_password is changed + - server_password.state == 'absent' + # Verify password is not logged + - server_password.diff.after.password != cloudscale_test_password + +- name: Test create server with password + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-password' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + register: server_password +- name: Verify create server with password + assert: + that: + - server_password is changed + - server_password.state == 'running' + # Verify password is not logged + - server_password.diff.after.password != cloudscale_test_password + +- name: Test create server with password idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-password' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + password: '{{ cloudscale_test_password }}' + register: server_password +- name: Verify create server with password idempotence + assert: + that: + - server_password is not changed + - server_password.state == 'running' + +- name: Test create server failure without required parameters + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-failed' + register: server_failed + ignore_errors: yes +- name: Verify create server failure without required parameters + assert: + that: + - server_failed is failed + - "'Failure while calling the cloudscale.ch API with POST for \"servers\".' in server_failed.msg" + - "'This field is required.' in server_failed.fetch_url_info.body" + +- name: Test stop running server in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + state: stopped + check_mode: yes + register: server +- name: Verify stop running server in check mode + assert: + that: + - server is changed + - server.state == 'running' + +- name: Test stop running server + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + state: stopped + register: server +- name: Verify stop running server + assert: + that: + - server is changed + - server.state == 'stopped' + +- name: Test stop running server idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test-renamed' + state: 'stopped' + register: server +- name: Verify stop running server idempotence + assert: + that: + - server is not changed + - server.state == 'stopped' + +- name: Test update a stopped server in check mode + cloudscale_server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + state: stopped + register: server + check_mode: yes +- name: Verify update a stopped server in check mode + assert: + that: + - server is changed + - server.state == 'stopped' + - server.flavor.slug == '{{ cloudscale_test_flavor_2 }}' + - server.name == '{{ cloudscale_resource_prefix }}-test-renamed' + +- name: Test update a stopped server without force + cloudscale_server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + state: stopped + register: server +- name: Verify update a stopped server without force + assert: + that: + - server is changed + - server.state == 'stopped' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test' + +- name: Test update a stopped server idempotence + cloudscale_server: + uuid: '{{ server.uuid }}' + name: '{{ cloudscale_resource_prefix }}-test' + flavor: '{{ cloudscale_test_flavor }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + state: stopped + register: server +- name: Verify update a stopped server idempotence + assert: + that: + - server is not changed + - server.state == 'stopped' + - server.flavor.slug == '{{ cloudscale_test_flavor }}' + - server.name == '{{ cloudscale_resource_prefix }}-test' + +- name: Test server running in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + state: running + register: server + check_mode: yes +- name: Verify server running in check mode + assert: + that: + - server is changed + - server.state == 'stopped' + +- name: Test server running + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + state: running + register: server +- name: Verify server running + assert: + that: + - server is changed + - server.state == 'running' + +- name: Test server running idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + state: running + register: server +- name: Verify server running idempotence + assert: + that: + - server is not changed + - server.state == 'running' + +- name: Test running server deletion by name in check mode + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + state: absent + register: server + check_mode: yes +- name: Verify running server deletion by name in check mode + assert: + that: + - server is changed + - server.state == 'running' + +- name: Test running server deletion by name + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + state: absent + register: server +- name: Verify running server deletion by name + assert: + that: + - server is changed + - server.state == 'absent' + +- name: Test running server deletion by name idempotence + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-test' + state: absent + register: server +- name: Verify running server deletion by name idempotence + assert: + that: + - server is not changed + - server.state == 'absent' + +- name: Test stopped server deletion by uuid in check mode + cloudscale_server: + uuid: '{{ server_stopped.uuid }}' + state: absent + register: server_stopped + check_mode: yes +- name: Verify stopped server deletion by uuid in check mode + assert: + that: + - server_stopped is changed + - server_stopped.state == 'stopped' + +- name: Test stopped server deletion by uuid + cloudscale_server: + uuid: '{{ server_stopped.uuid }}' + state: absent + register: server_stopped +- name: Verify stopped server deletion by uuid + assert: + that: + - server_stopped is changed + - server_stopped.state == 'absent' + +- name: Test stopped server deletion by uuid idempotence + cloudscale_server: + uuid: '{{ server_stopped.uuid }}' + state: absent + register: server_stopped +- name: Verify stopped server deletion by uuid idempotence + assert: + that: + - server_stopped is not changed + - server_stopped.state == 'absent' diff --git a/tests/integration/targets/cloudscale_server_group/aliases b/tests/integration/targets/cloudscale_server_group/aliases new file mode 100644 index 0000000000..c200a3d2c8 --- /dev/null +++ b/tests/integration/targets/cloudscale_server_group/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/tests/integration/targets/cloudscale_server_group/meta/main.yml b/tests/integration/targets/cloudscale_server_group/meta/main.yml new file mode 100644 index 0000000000..8dd48f0337 --- /dev/null +++ b/tests/integration/targets/cloudscale_server_group/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - cloudscale_common diff --git a/tests/integration/targets/cloudscale_server_group/tasks/failures.yml b/tests/integration/targets/cloudscale_server_group/tasks/failures.yml new file mode 100644 index 0000000000..9269a63458 --- /dev/null +++ b/tests/integration/targets/cloudscale_server_group/tasks/failures.yml @@ -0,0 +1,45 @@ +--- +- name: Fail missing params + cloudscale_server_group: + register: grp + ignore_errors: True +- name: 'VERIFY: Fail name and UUID' + assert: + that: + - grp is failed + +- name: Create two server groups with the same name + uri: + url: 'https://api.cloudscale.ch/v1/server-groups' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + type: 'anti-affinity' + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try access to duplicate name + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-duplicate' + register: grp + ignore_errors: True +- name: 'VERIFY: Try access to duplicate name' + assert: + that: + - grp is failed + - grp.msg.startswith('More than one server group with name exists') + +- name: Fail server group creation with UUID + cloudscale_server_group: + uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + register: grp + ignore_errors: True +- name: 'VERIFY: Fail server group creation with UUID' + assert: + that: + - grp is failed + - grp.msg.startswith('missing required arguments') \ No newline at end of file diff --git a/tests/integration/targets/cloudscale_server_group/tasks/main.yml b/tests/integration/targets/cloudscale_server_group/tasks/main.yml new file mode 100644 index 0000000000..f8783414af --- /dev/null +++ b/tests/integration/targets/cloudscale_server_group/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- block: + - import_tasks: failures.yml + - import_tasks: tests.yml + always: + - import_role: + name: cloudscale_common + tasks_from: cleanup_server_groups diff --git a/tests/integration/targets/cloudscale_server_group/tasks/tests.yml b/tests/integration/targets/cloudscale_server_group/tasks/tests.yml new file mode 100644 index 0000000000..3fcaa0397b --- /dev/null +++ b/tests/integration/targets/cloudscale_server_group/tasks/tests.yml @@ -0,0 +1,159 @@ +--- +- name: Create server group in check mode + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-grp' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: grp + check_mode: yes +- name: 'VERIFY: Create server group in check mode' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - not grp.uuid + +- name: Create server group + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-grp' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: grp +- name: 'VERIFY: Create server group' + assert: + that: + - grp is changed + - grp.type == 'anti-affinity' + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.uuid + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'production' + - grp.tags.sla == '24-7' + +- name: Remember uuid + set_fact: + server_group_uuid: '{{ grp.uuid }}' + +- name: Create server group idempotence + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-grp' + zone: '{{ cloudscale_test_zone }}' + tags: + project: ansible-test + stage: production + sla: 24-7 + register: grp +- name: 'VERIFY: Create server group idempotence' + assert: + that: + - grp is not changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.uuid == server_group_uuid + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'production' + - grp.tags.sla == '24-7' + +- name: Update server group in check mode + cloudscale_server_group: + uuid: '{{ server_group_uuid }}' + name: '{{ cloudscale_resource_prefix }}-grp2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: grp + check_mode: yes +- name: 'VERIFY: Update server group in check mode' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp' + - grp.uuid == server_group_uuid + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'production' + - grp.tags.sla == '24-7' + +- name: Update server group + cloudscale_server_group: + uuid: '{{ server_group_uuid }}' + name: '{{ cloudscale_resource_prefix }}-grp2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: grp +- name: 'VERIFY: Update server group' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'staging' + - grp.tags.sla == '8-5' + +- name: Update server group idempotence + cloudscale_server_group: + uuid: '{{ server_group_uuid }}' + name: '{{ cloudscale_resource_prefix }}-grp2' + tags: + project: ansible-test + stage: staging + sla: 8-5 + register: grp +- name: 'VERIFY: Update server group idempotence' + assert: + that: + - grp is not changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + - grp.zone.slug == '{{ cloudscale_test_zone }}' + - grp.tags.project == 'ansible-test' + - grp.tags.stage == 'staging' + - grp.tags.sla == '8-5' + +- name: Delete server group in check mode + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-grp2' + state: absent + register: grp + check_mode: yes +- name: 'VERIFY: Delete server group in check mode' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + +- name: Delete server group + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-grp2' + state: absent + register: grp +- name: 'VERIFY: Delete server group' + assert: + that: + - grp is changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - grp.uuid == server_group_uuid + +- name: Delete server group idempotence + cloudscale_server_group: + name: '{{ cloudscale_resource_prefix }}-grp2' + state: absent + register: grp +- name: 'VERIFY: Delete server group idempotence' + assert: + that: + - grp is not changed + - grp.name == '{{ cloudscale_resource_prefix }}-grp2' + - not grp.uuid diff --git a/tests/integration/targets/cloudscale_volume/aliases b/tests/integration/targets/cloudscale_volume/aliases new file mode 100644 index 0000000000..c200a3d2c8 --- /dev/null +++ b/tests/integration/targets/cloudscale_volume/aliases @@ -0,0 +1,2 @@ +cloud/cloudscale +unsupported diff --git a/tests/integration/targets/cloudscale_volume/meta/main.yml b/tests/integration/targets/cloudscale_volume/meta/main.yml new file mode 100644 index 0000000000..8dd48f0337 --- /dev/null +++ b/tests/integration/targets/cloudscale_volume/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - cloudscale_common diff --git a/tests/integration/targets/cloudscale_volume/tasks/cleanup.yml b/tests/integration/targets/cloudscale_volume/tasks/cleanup.yml new file mode 100644 index 0000000000..716717071a --- /dev/null +++ b/tests/integration/targets/cloudscale_volume/tasks/cleanup.yml @@ -0,0 +1,5 @@ +--- +- name: Remove test server + cloudscale_server: + uuid: '{{ server.uuid }}' + state: 'absent' diff --git a/tests/integration/targets/cloudscale_volume/tasks/failures.yml b/tests/integration/targets/cloudscale_volume/tasks/failures.yml new file mode 100644 index 0000000000..cab0d27cdc --- /dev/null +++ b/tests/integration/targets/cloudscale_volume/tasks/failures.yml @@ -0,0 +1,38 @@ +--- +- name: Create two volumes with the same name + uri: + url: 'https://api.cloudscale.ch/v1/volumes' + method: POST + headers: + Authorization: 'Bearer {{ cloudscale_api_token }}' + body: + name: '{{ cloudscale_resource_prefix }}-duplicate' + size_gb: 50 + body_format: json + status_code: 201 + register: duplicate + with_sequence: count=2 + +- name: Try access to duplicate name + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-duplicate' + size_gb: 10 + register: vol + ignore_errors: True +- name: 'VERIFY: Try access to duplicate name' + assert: + that: + - vol is failed + +- name: Fail volume creation with UUID + cloudscale_volume: + uuid: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 + name: '{{ cloudscale_resource_prefix }}-inexistent' + size_gb: 10 + register: vol + ignore_errors: True +- name: 'VERIFY: Fail volume creation with UUID' + assert: + that: + - vol is failed + - vol.msg.startswith('The volume with UUID \'ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48\' was not found') diff --git a/tests/integration/targets/cloudscale_volume/tasks/main.yml b/tests/integration/targets/cloudscale_volume/tasks/main.yml new file mode 100644 index 0000000000..984f6e1a24 --- /dev/null +++ b/tests/integration/targets/cloudscale_volume/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- block: + - import_tasks: setup.yml + - import_tasks: tests.yml + always: + - import_role: + name: cloudscale_common + tasks_from: cleanup_servers + - import_role: + name: cloudscale_common + tasks_from: cleanup_volumes diff --git a/tests/integration/targets/cloudscale_volume/tasks/setup.yml b/tests/integration/targets/cloudscale_volume/tasks/setup.yml new file mode 100644 index 0000000000..4e3593cfe3 --- /dev/null +++ b/tests/integration/targets/cloudscale_volume/tasks/setup.yml @@ -0,0 +1,9 @@ +--- +- name: Create test instance + cloudscale_server: + name: '{{ cloudscale_resource_prefix }}-server' + flavor: '{{ cloudscale_test_flavor }}' + zone: '{{ cloudscale_test_zone }}' + image: '{{ cloudscale_test_image }}' + ssh_keys: '{{ cloudscale_test_ssh_key }}' + register: server diff --git a/tests/integration/targets/cloudscale_volume/tasks/tests.yml b/tests/integration/targets/cloudscale_volume/tasks/tests.yml new file mode 100644 index 0000000000..ec1ac28b51 --- /dev/null +++ b/tests/integration/targets/cloudscale_volume/tasks/tests.yml @@ -0,0 +1,262 @@ +--- +- name: Create volume in check mode + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + tags: + project: ansible-test + stage: production + sla: 24-7 + check_mode: yes + register: vol +- name: 'VERIFY: Create volume in check mode' + assert: + that: + - vol is successful + - vol is changed + - vol.state == 'absent' + +- name: Create volume + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + tags: + project: ansible-test + stage: production + sla: 24-7 + register: vol +- name: 'VERIFY: Create volume' + assert: + that: + - vol is successful + - vol is changed + - vol.size_gb == 50 + - vol.name == '{{ cloudscale_resource_prefix }}-vol' + - vol.zone.slug == '{{ cloudscale_test_zone }}' + - vol.tags.project == 'ansible-test' + - vol.tags.stage == 'production' + - vol.tags.sla == '24-7' + +- name: Create volume idempotence + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-vol' + zone: '{{ cloudscale_test_zone }}' + size_gb: 50 + tags: + project: ansible-test + stage: production + sla: 24-7 + register: vol +- name: 'VERIFY: Create volume idempotence' + assert: + that: + - vol is successful + - vol is not changed + - vol.size_gb == 50 + - vol.name == '{{ cloudscale_resource_prefix }}-vol' + - vol.zone.slug == '{{ cloudscale_test_zone }}' + - vol.tags.project == 'ansible-test' + - vol.tags.stage == 'production' + - vol.tags.sla == '24-7' + +- name: Attach existing volume by name to server in check mode + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-vol' + server_uuids: + - '{{ server.uuid }}' + check_mode: yes + register: vol +- name: 'VERIFY: Attach existing volume by name to server in check mode' + assert: + that: + - vol is successful + - vol is changed + - server.uuid not in vol.server_uuids + +- name: Attach existing volume by name to server + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-vol' + server_uuids: + - '{{ server.uuid }}' + register: vol +- name: 'VERIFY: Attach existing volume by name to server' + assert: + that: + - vol is successful + - vol is changed + - server.uuid in vol.server_uuids + +- name: Attach existing volume by name to server idempotence + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-vol' + server_uuids: + - '{{ server.uuid }}' + register: vol +- name: 'VERIFY: Attach existing volume by name to server idempotence' + assert: + that: + - vol is successful + - vol is not changed + - server.uuid in vol.server_uuids + +- name: Resize attached volume by UUID in check mode + cloudscale_volume: + uuid: '{{ vol.uuid }}' + size_gb: 100 + check_mode: yes + register: vol +- name: 'VERIFY: Resize attached volume by UUID in check mode' + assert: + that: + - vol is successful + - vol is changed + - vol.size_gb == 50 + +- name: Resize attached volume by UUID + cloudscale_volume: + uuid: '{{ vol.uuid }}' + size_gb: 100 + register: vol +- name: 'VERIFY: Resize attached volume by UUID' + assert: + that: + - vol is successful + - vol is changed + - vol.size_gb == 100 + +- name: Resize attached volume by UUID idempotence + cloudscale_volume: + uuid: '{{ vol.uuid }}' + size_gb: 100 + register: vol +- name: 'VERIFY: Resize attached volume by UUID idempotence' + assert: + that: + - vol is successful + - vol is not changed + - vol.size_gb == 100 + +- name: Delete attached volume by UUID in check mode + cloudscale_volume: + uuid: '{{ vol.uuid }}' + state: 'absent' + check_mode: yes + register: deleted +- name: 'VERIFY: Delete attached volume by UUID in check mode' + assert: + that: + - deleted is successful + - deleted is changed + - deleted.state == 'present' + - deleted.uuid == vol.uuid + - deleted.name == '{{ cloudscale_resource_prefix }}-vol' + +- name: Delete attached volume by UUID + cloudscale_volume: + uuid: '{{ vol.uuid }}' + state: 'absent' + register: deleted +- name: 'VERIFY: Delete attached volume by UUID' + assert: + that: + - deleted is successful + - deleted is changed + - deleted.state == 'absent' + - deleted.uuid == vol.uuid + - deleted.name == '{{ cloudscale_resource_prefix }}-vol' + +- name: Delete attached volume by UUID idempotence + cloudscale_volume: + uuid: '{{ vol.uuid }}' + state: 'absent' + register: deleted +- name: 'VERIFY: Delete attached volume by UUID idempotence' + assert: + that: + - deleted is successful + - deleted is not changed + - deleted.state == 'absent' + - deleted.uuid == vol.uuid + - not deleted.name + +- name: Create bulk volume and attach + cloudscale_volume: + name: '{{ cloudscale_resource_prefix }}-bulk' + type: bulk + zone: '{{ cloudscale_test_zone }}' + size_gb: 100 + server_uuids: + - '{{ server.uuid }}' + register: bulk +- name: 'VERIFY: Create bulk volume and attach' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.size_gb == 100 + - server.uuid in bulk.server_uuids + +- name: Detach volume by UUID + cloudscale_volume: + uuid: '{{ bulk.uuid }}' + server_uuids: [] + register: bulk +- name: 'VERIFY: Detach volume by UUID' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.server_uuids == [] + +- name: Resize detached volume by name + cloudscale_volume: + name: '{{ bulk.name }}' + size_gb: 200 + register: bulk +- name: 'VERIFY: Resize detached volume by name' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.size_gb == 200 + +- name: Delete volume by name in check mode + cloudscale_volume: + name: '{{ bulk.name }}' + state: 'absent' + check_mode: yes + register: bulk +- name: 'VERIFY: Delete volume by name' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.state == 'present' + +- name: Delete volume by name + cloudscale_volume: + name: '{{ bulk.name }}' + state: 'absent' + register: bulk +- name: 'VERIFY: Delete volume by name' + assert: + that: + - bulk is successful + - bulk is changed + - bulk.state == 'absent' + +- name: Delete volume by name idempotence + cloudscale_volume: + name: '{{ bulk.name }}' + state: 'absent' + register: bulk +- name: 'VERIFY: Delete volume by name idempotence' + assert: + that: + - bulk is successful + - bulk is not changed + - bulk.state == 'absent' + +- import_tasks: failures.yml diff --git a/tests/integration/targets/cnos_backup/README.md b/tests/integration/targets/cnos_backup/README.md new file mode 100644 index 0000000000..9b3c7cd8f8 --- /dev/null +++ b/tests/integration/targets/cnos_backup/README.md @@ -0,0 +1,115 @@ +# Ansible Role: cnos_backup_sample - Saving the switch configuration to a remote server +--- + + +This role is an example of using the *cnos_backup.py* Lenovo module in the context of CNOS switch configuration. This module allows you to work with switch configurations. It provides a way to back up the running or startup configurations of a switch to a remote server. This is achieved by periodically saving a copy of the startup or running configuration of the network device to a remote server using FTP, SFTP, TFTP, or SCP. + +The results of the operation can be viewed in *results* directory. + +For more details, see [Lenovo modules for Ansible: cnos_backup](http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_backup.html&cp=0_3_1_0_4_4). + + +## Requirements +--- + + +- Ansible version 2.2 or later ([Ansible installation documentation](http://docs.ansible.com/ansible/intro_installation.html)) +- Lenovo switches running CNOS version 10.2.1.0 or later +- an SSH connection to the Lenovo switch (SSH must be enabled on the network device) + + +## Role Variables +--- + + +Available variables are listed below, along with description. + +The following are mandatory inventory variables: + +Variable | Description +--- | --- +`ansible_connection` | Has to be `network_cli` +`ansible_network_os` | Has to be `cnos` +`ansible_ssh_user` | Specifies the username used to log into the switch +`ansible_ssh_pass` | Specifies the password used to log into the switch +`enablePassword` | Configures the password used to enter Global Configuration command mode on the switch (this is an optional parameter) +`hostname` | Searches the hosts file at */etc/ansible/hosts* and identifies the IP address of the switch on which the role is going to be applied +`deviceType` | Specifies the type of device from where the configuration will be backed up (**g8272_cnos** - G8272, **g8296_cnos** - G8296, **g8332_cnos** - G8332, **NE10032** - NE10032, **NE1072T** - NE1072T, **NE1032** - NE1032, **NE1032T** - NE1032T, **NE2572** - NE2572, **NE0152T** - NE0152T) + +The values of the variables used need to be modified to fit the specific scenario in which you are deploying the solution. To change the values of the variables, you need to visits the *vars* directory of each role and edit the *main.yml* file located there. The values stored in this file will be used by Ansible when the template is executed. + +The syntax of *main.yml* file for variables is the following: + +``` +